summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.clang-format218
-rw-r--r--.mailmap1
-rw-r--r--Documentation/ABI/stable/sysfs-bus-mhi10
-rw-r--r--Documentation/ABI/testing/configfs-usb-gadget-uvc1
-rw-r--r--Documentation/ABI/testing/debugfs-driver-habanalabs38
-rw-r--r--Documentation/ABI/testing/debugfs-hisi-hpre14
-rw-r--r--Documentation/ABI/testing/debugfs-hisi-sec14
-rw-r--r--Documentation/ABI/testing/debugfs-hisi-zip14
-rw-r--r--Documentation/ABI/testing/sysfs-bus-thunderbolt10
-rw-r--r--Documentation/ABI/testing/sysfs-class-cxl4
-rw-r--r--Documentation/ABI/testing/sysfs-class-firmware77
-rw-r--r--Documentation/ABI/testing/sysfs-devices-physical_location42
-rw-r--r--Documentation/ABI/testing/sysfs-driver-ccp87
-rw-r--r--Documentation/PCI/pci.rst4
-rw-r--r--Documentation/admin-guide/blockdev/index.rst6
-rw-r--r--Documentation/admin-guide/bootconfig.rst31
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt20
-rw-r--r--Documentation/admin-guide/nfs/nfs-client.rst15
-rw-r--r--Documentation/admin-guide/pm/intel-speed-select.rst22
-rw-r--r--Documentation/arch.rst1
-rw-r--r--Documentation/arm/marvell.rst7
-rw-r--r--Documentation/conf.py2
-rw-r--r--Documentation/devicetree/bindings/arm/hpe,gxp.yaml27
-rw-r--r--Documentation/devicetree/bindings/arm/intel,socfpga.yaml1
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8186-clock.yaml56
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8186-sys-clock.yaml54
-rw-r--r--Documentation/devicetree/bindings/arm/tegra/nvidia,tegra-ccplex-cluster.yaml52
-rw-r--r--Documentation/devicetree/bindings/clock/airoha,en7523-scu.yaml58
-rw-r--r--Documentation/devicetree/bindings/clock/clock-bindings.txt188
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,gcc-apq8064.yaml4
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,gcc-apq8084.yaml42
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,gcc-sc8280xp.yaml128
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,mmcc.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,rpmcc.txt63
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,rpmcc.yaml75
-rw-r--r--Documentation/devicetree/bindings/clock/renesas,cpg-mssr.yaml1
-rw-r--r--Documentation/devicetree/bindings/clock/renesas,r9a06g032-sysctrl.yaml11
-rw-r--r--Documentation/devicetree/bindings/clock/renesas,rzg2l-cpg.yaml20
-rw-r--r--Documentation/devicetree/bindings/clock/rockchip,px30-cru.txt70
-rw-r--r--Documentation/devicetree/bindings/clock/rockchip,px30-cru.yaml119
-rw-r--r--Documentation/devicetree/bindings/clock/rockchip,rk3036-cru.txt56
-rw-r--r--Documentation/devicetree/bindings/clock/rockchip,rk3036-cru.yaml72
-rw-r--r--Documentation/devicetree/bindings/clock/rockchip,rk3188-cru.txt61
-rw-r--r--Documentation/devicetree/bindings/clock/rockchip,rk3188-cru.yaml78
-rw-r--r--Documentation/devicetree/bindings/clock/rockchip,rk3228-cru.txt58
-rw-r--r--Documentation/devicetree/bindings/clock/rockchip,rk3228-cru.yaml74
-rw-r--r--Documentation/devicetree/bindings/clock/rockchip,rk3288-cru.txt67
-rw-r--r--Documentation/devicetree/bindings/clock/rockchip,rk3288-cru.yaml85
-rw-r--r--Documentation/devicetree/bindings/clock/rockchip,rk3308-cru.txt60
-rw-r--r--Documentation/devicetree/bindings/clock/rockchip,rk3308-cru.yaml76
-rw-r--r--Documentation/devicetree/bindings/clock/rockchip,rk3368-cru.txt61
-rw-r--r--Documentation/devicetree/bindings/clock/rockchip,rk3368-cru.yaml78
-rw-r--r--Documentation/devicetree/bindings/clock/rockchip,rk3399-cru.yaml33
-rw-r--r--Documentation/devicetree/bindings/clock/rockchip,rk3568-cru.yaml13
-rw-r--r--Documentation/devicetree/bindings/clock/rockchip,rv1108-cru.txt59
-rw-r--r--Documentation/devicetree/bindings/clock/rockchip,rv1108-cru.yaml75
-rw-r--r--Documentation/devicetree/bindings/clock/st,stm32mp1-rcc.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/stericsson,u8500-clks.yaml57
-rw-r--r--Documentation/devicetree/bindings/clock/ti,am654-ehrpwm-tbclk.yaml1
-rw-r--r--Documentation/devicetree/bindings/cpufreq/cpufreq-mediatek.txt7
-rw-r--r--Documentation/devicetree/bindings/crypto/ti,sa2ul.yaml1
-rw-r--r--Documentation/devicetree/bindings/dma/allwinner,sun50i-a64-dma.yaml9
-rw-r--r--Documentation/devicetree/bindings/dma/altr,msgdma.yaml2
-rw-r--r--Documentation/devicetree/bindings/dma/arm,pl330.yaml3
-rw-r--r--Documentation/devicetree/bindings/dma/mmp-dma.txt10
-rw-r--r--Documentation/devicetree/bindings/dma/nvidia,tegra186-gpc-dma.yaml110
-rw-r--r--Documentation/devicetree/bindings/dma/qcom,gpi.yaml3
-rw-r--r--Documentation/devicetree/bindings/dma/renesas,rcar-dmac.yaml10
-rw-r--r--Documentation/devicetree/bindings/dma/renesas,rzn1-dmamux.yaml51
-rw-r--r--Documentation/devicetree/bindings/dma/sifive,fu540-c000-pdma.yaml19
-rw-r--r--Documentation/devicetree/bindings/dma/snps,dma-spear1340.yaml8
-rw-r--r--Documentation/devicetree/bindings/dma/sprd-dma.txt7
-rw-r--r--Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt6
-rw-r--r--Documentation/devicetree/bindings/extcon/siliconmitus,sm5502-muic.yaml5
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-altera.txt5
-rw-r--r--Documentation/devicetree/bindings/i2c/renesas,rcar-i2c.yaml2
-rw-r--r--Documentation/devicetree/bindings/i3c/cdns,i3c-master.txt43
-rw-r--r--Documentation/devicetree/bindings/i3c/cdns,i3c-master.yaml60
-rw-r--r--Documentation/devicetree/bindings/i3c/snps,dw-i3c-master.txt41
-rw-r--r--Documentation/devicetree/bindings/i3c/snps,dw-i3c-master.yaml52
-rw-r--r--Documentation/devicetree/bindings/iio/adc/renesas,rzg2l-adc.yaml3
-rw-r--r--Documentation/devicetree/bindings/iio/adc/sprd,sc2720-adc.yaml60
-rw-r--r--Documentation/devicetree/bindings/iio/adc/ti,ads1015.yaml7
-rw-r--r--Documentation/devicetree/bindings/iio/dac/adi,ad3552r.yaml2
-rw-r--r--Documentation/devicetree/bindings/iio/dac/lltc,ltc2632.yaml2
-rw-r--r--Documentation/devicetree/bindings/iio/imu/invensense,mpu6050.yaml34
-rw-r--r--Documentation/devicetree/bindings/iio/imu/st,lsm6dsx.yaml38
-rw-r--r--Documentation/devicetree/bindings/iio/light/stk33xx.yaml6
-rw-r--r--Documentation/devicetree/bindings/iio/potentiometer/microchip,mcp4131.yaml2
-rw-r--r--Documentation/devicetree/bindings/iio/st,st-sensors.yaml1
-rw-r--r--Documentation/devicetree/bindings/input/allwinner,sun4i-a10-lradc-keys.yaml22
-rw-r--r--Documentation/devicetree/bindings/input/azoteq,iqs7222.yaml960
-rw-r--r--Documentation/devicetree/bindings/input/google,cros-ec-keyb.yaml36
-rw-r--r--Documentation/devicetree/bindings/interconnect/qcom,osm-l3.yaml2
-rw-r--r--Documentation/devicetree/bindings/interconnect/qcom,rpmh.yaml18
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/qcom,pdc.txt1
-rw-r--r--Documentation/devicetree/bindings/iommu/arm,smmu.yaml25
-rw-r--r--Documentation/devicetree/bindings/iommu/mediatek,iommu.yaml34
-rw-r--r--Documentation/devicetree/bindings/iommu/samsung,sysmmu.yaml10
-rw-r--r--Documentation/devicetree/bindings/leds/kinetic,ktd2692.yaml87
-rw-r--r--Documentation/devicetree/bindings/leds/leds-class-multicolor.yaml2
-rw-r--r--Documentation/devicetree/bindings/leds/leds-ktd2692.txt50
-rw-r--r--Documentation/devicetree/bindings/leds/leds-pwm-multicolor.yaml79
-rw-r--r--Documentation/devicetree/bindings/leds/leds-qcom-lpg.yaml174
-rw-r--r--Documentation/devicetree/bindings/leds/regulator-led.yaml55
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/ingenic,nemc-peripherals.yaml46
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/ingenic,nemc.yaml32
-rw-r--r--Documentation/devicetree/bindings/mfd/da9063.txt9
-rw-r--r--Documentation/devicetree/bindings/mfd/google,cros-ec.yaml2
-rw-r--r--Documentation/devicetree/bindings/mfd/rk808.txt465
-rw-r--r--Documentation/devicetree/bindings/mfd/rockchip,rk805.yaml219
-rw-r--r--Documentation/devicetree/bindings/mfd/rockchip,rk808.yaml257
-rw-r--r--Documentation/devicetree/bindings/mfd/rockchip,rk809.yaml284
-rw-r--r--Documentation/devicetree/bindings/mfd/rockchip,rk817.yaml330
-rw-r--r--Documentation/devicetree/bindings/mfd/rockchip,rk818.yaml282
-rw-r--r--Documentation/devicetree/bindings/mfd/samsung,exynos5433-lpass.yaml4
-rw-r--r--Documentation/devicetree/bindings/mfd/syscon.yaml8
-rw-r--r--Documentation/devicetree/bindings/mfd/wlf,arizona.yaml1
-rw-r--r--Documentation/devicetree/bindings/mtd/ingenic,nand.yaml1
-rw-r--r--Documentation/devicetree/bindings/mtd/spi-nand.yaml1
-rw-r--r--Documentation/devicetree/bindings/mux/reg-mux.yaml1
-rw-r--r--Documentation/devicetree/bindings/net/adi,adin.yaml5
-rw-r--r--Documentation/devicetree/bindings/net/cdns,macb.yaml1
-rw-r--r--Documentation/devicetree/bindings/net/dsa/brcm,b53.yaml115
-rw-r--r--Documentation/devicetree/bindings/net/dsa/microchip,ksz.yaml1
-rw-r--r--Documentation/devicetree/bindings/net/dsa/nxp,sja1105.yaml1
-rw-r--r--Documentation/devicetree/bindings/net/dsa/realtek.yaml1
-rw-r--r--Documentation/devicetree/bindings/net/mediatek,net.yaml3
-rw-r--r--Documentation/devicetree/bindings/net/mediatek-dwmac.yaml3
-rw-r--r--Documentation/devicetree/bindings/net/wireless/mediatek,mt76.yaml2
-rw-r--r--Documentation/devicetree/bindings/nvmem/apple,efuses.yaml50
-rw-r--r--Documentation/devicetree/bindings/nvmem/fsl,layerscape-sfp.yaml30
-rw-r--r--Documentation/devicetree/bindings/opp/opp-v2-kryo-cpu.yaml56
-rw-r--r--Documentation/devicetree/bindings/pci/apple,pcie.yaml5
-rw-r--r--Documentation/devicetree/bindings/pci/layerscape-pci.txt65
-rw-r--r--Documentation/devicetree/bindings/pci/qcom,pcie.txt397
-rw-r--r--Documentation/devicetree/bindings/pci/qcom,pcie.yaml714
-rw-r--r--Documentation/devicetree/bindings/pci/rockchip-dw-pcie.yaml12
-rw-r--r--Documentation/devicetree/bindings/pci/socionext,uniphier-pcie.yaml23
-rw-r--r--Documentation/devicetree/bindings/pci/xilinx-versal-cpm.yaml10
-rw-r--r--Documentation/devicetree/bindings/phy/allwinner,sun6i-a31-mipi-dphy.yaml12
-rw-r--r--Documentation/devicetree/bindings/phy/marvell,armada-3700-utmi-phy.yaml2
-rw-r--r--Documentation/devicetree/bindings/phy/mixel,mipi-dsi-phy.txt29
-rw-r--r--Documentation/devicetree/bindings/phy/mixel,mipi-dsi-phy.yaml107
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,qmp-phy.yaml4
-rw-r--r--Documentation/devicetree/bindings/phy/renesas,usb2-phy.yaml1
-rw-r--r--Documentation/devicetree/bindings/phy/socionext,uniphier-ahci-phy.yaml91
-rw-r--r--Documentation/devicetree/bindings/phy/socionext,uniphier-pcie-phy.yaml47
-rw-r--r--Documentation/devicetree/bindings/phy/socionext,uniphier-usb2-phy.yaml3
-rw-r--r--Documentation/devicetree/bindings/phy/socionext,uniphier-usb3hs-phy.yaml89
-rw-r--r--Documentation/devicetree/bindings/phy/socionext,uniphier-usb3ss-phy.yaml98
-rw-r--r--Documentation/devicetree/bindings/pinctrl/aspeed,ast2500-pinctrl.yaml81
-rw-r--r--Documentation/devicetree/bindings/pinctrl/fsl,imx7d-pinctrl.txt87
-rw-r--r--Documentation/devicetree/bindings/pinctrl/fsl,imx7d-pinctrl.yaml113
-rw-r--r--Documentation/devicetree/bindings/pinctrl/fsl,imxrt1170.yaml77
-rw-r--r--Documentation/devicetree/bindings/pinctrl/marvell,ac5-pinctrl.yaml72
-rw-r--r--Documentation/devicetree/bindings/pinctrl/mediatek,pinctrl-mt6795.yaml224
-rw-r--r--Documentation/devicetree/bindings/pinctrl/mscc,ocelot-pinctrl.txt42
-rw-r--r--Documentation/devicetree/bindings/pinctrl/mscc,ocelot-pinctrl.yaml116
-rw-r--r--Documentation/devicetree/bindings/pinctrl/pinctrl-mt8192.yaml155
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.yaml287
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,qcm2290-pinctrl.yaml3
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,sc7280-lpass-lpi-pinctrl.yaml115
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,sc7280-pinctrl.yaml3
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,sm6115-pinctrl.yaml3
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,sm8250-lpass-lpi-pinctrl.yaml (renamed from Documentation/devicetree/bindings/pinctrl/qcom,lpass-lpi-pinctrl.yaml)2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,sm8250-pinctrl.yaml3
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,tlmm-common.yaml1
-rw-r--r--Documentation/devicetree/bindings/pinctrl/ralink,mt7620-pinctrl.yaml91
-rw-r--r--Documentation/devicetree/bindings/pinctrl/ralink,mt7621-pinctrl.yaml (renamed from Documentation/devicetree/bindings/pinctrl/ralink,rt2880-pinmux.yaml)27
-rw-r--r--Documentation/devicetree/bindings/pinctrl/ralink,rt2880-pinctrl.yaml68
-rw-r--r--Documentation/devicetree/bindings/pinctrl/ralink,rt305x-pinctrl.yaml92
-rw-r--r--Documentation/devicetree/bindings/pinctrl/ralink,rt3883-pinctrl.yaml71
-rw-r--r--Documentation/devicetree/bindings/pinctrl/renesas,rzg2l-pinctrl.yaml5
-rw-r--r--Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.yaml4
-rw-r--r--Documentation/devicetree/bindings/powerpc/fsl/cache_sram.txt20
-rw-r--r--Documentation/devicetree/bindings/pwm/atmel,at91sam-pwm.yaml47
-rw-r--r--Documentation/devicetree/bindings/pwm/atmel-pwm.txt35
-rw-r--r--Documentation/devicetree/bindings/pwm/google,cros-ec-pwm.yaml9
-rw-r--r--Documentation/devicetree/bindings/pwm/mediatek,pwm-disp.yaml75
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-mediatek.txt1
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-mtk-disp.txt45
-rw-r--r--Documentation/devicetree/bindings/pwm/sunplus,sp7021-pwm.yaml42
-rw-r--r--Documentation/devicetree/bindings/regulator/mt6315-regulator.yaml4
-rw-r--r--Documentation/devicetree/bindings/remoteproc/fsl,imx-rproc.yaml9
-rw-r--r--Documentation/devicetree/bindings/remoteproc/mtk,scp.yaml57
-rw-r--r--Documentation/devicetree/bindings/remoteproc/qcom,adsp.yaml31
-rw-r--r--Documentation/devicetree/bindings/remoteproc/st,stm32-rproc.yaml16
-rw-r--r--Documentation/devicetree/bindings/reset/qcom,aoss-reset.yaml2
-rw-r--r--Documentation/devicetree/bindings/reset/qcom,pdc-global.yaml2
-rw-r--r--Documentation/devicetree/bindings/riscv/microchip.yaml2
-rw-r--r--Documentation/devicetree/bindings/rtc/nxp,pcf85063.txt1
-rw-r--r--Documentation/devicetree/bindings/rtc/renesas,rzn1-rtc.yaml70
-rw-r--r--Documentation/devicetree/bindings/serial/fsl-lpuart.yaml4
-rw-r--r--Documentation/devicetree/bindings/serial/qcom,serial-geni-qcom.yaml86
-rw-r--r--Documentation/devicetree/bindings/serial/renesas,em-uart.yaml37
-rw-r--r--Documentation/devicetree/bindings/serial/renesas,hscif.yaml9
-rw-r--r--Documentation/devicetree/bindings/serial/renesas,scif.yaml2
-rw-r--r--Documentation/devicetree/bindings/serial/rs485.yaml5
-rw-r--r--Documentation/devicetree/bindings/serial/socionext,uniphier-uart.yaml5
-rw-r--r--Documentation/devicetree/bindings/soc/imx/fsl,imx8mp-media-blk-ctrl.yaml2
-rw-r--r--Documentation/devicetree/bindings/soc/intel/intel,hps-copy-engine.yaml51
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qcom,smd-rpm.yaml4
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qcom,smd.yaml4
-rw-r--r--Documentation/devicetree/bindings/soundwire/qcom,sdw.txt14
-rw-r--r--Documentation/devicetree/bindings/timer/hpe,gxp-timer.yaml47
-rw-r--r--Documentation/devicetree/bindings/timer/xlnx,xps-timer.yaml92
-rw-r--r--Documentation/devicetree/bindings/timestamp/hardware-timestamps-common.yaml29
-rw-r--r--Documentation/devicetree/bindings/timestamp/hte-consumer.yaml39
-rw-r--r--Documentation/devicetree/bindings/timestamp/nvidia,tegra194-hte.yaml88
-rw-r--r--Documentation/devicetree/bindings/trivial-devices.yaml4
-rw-r--r--Documentation/devicetree/bindings/usb/am33xx-usb.txt7
-rw-r--r--Documentation/devicetree/bindings/usb/da8xx-usb.txt5
-rw-r--r--Documentation/devicetree/bindings/usb/dwc2.yaml7
-rw-r--r--Documentation/devicetree/bindings/usb/dwc3-xilinx.yaml4
-rw-r--r--Documentation/devicetree/bindings/usb/fcs,fsa4480.yaml72
-rw-r--r--Documentation/devicetree/bindings/usb/generic-ehci.yaml1
-rw-r--r--Documentation/devicetree/bindings/usb/generic-ohci.yaml1
-rw-r--r--Documentation/devicetree/bindings/usb/mediatek,mtu3.yaml1
-rw-r--r--Documentation/devicetree/bindings/usb/qcom,dwc3.yaml227
-rw-r--r--Documentation/devicetree/bindings/usb/renesas,usbhs.yaml4
-rw-r--r--Documentation/devicetree/bindings/usb/samsung,exynos-usb2.yaml15
-rw-r--r--Documentation/devicetree/bindings/usb/snps,dwc3.yaml2
-rw-r--r--Documentation/devicetree/bindings/usb/ti,am62-usb.yaml103
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.yaml2
-rw-r--r--Documentation/devicetree/bindings/watchdog/da9062-wdt.txt6
-rw-r--r--Documentation/devicetree/bindings/watchdog/faraday,ftwdt010.txt22
-rw-r--r--Documentation/devicetree/bindings/watchdog/faraday,ftwdt010.yaml67
-rw-r--r--Documentation/devicetree/bindings/watchdog/fsl-imx7ulp-wdt.yaml1
-rw-r--r--Documentation/devicetree/bindings/watchdog/mtk-wdt.txt1
-rw-r--r--Documentation/devicetree/bindings/watchdog/qcom-wdt.yaml39
-rw-r--r--Documentation/devicetree/bindings/watchdog/renesas,wdt.yaml12
-rw-r--r--Documentation/devicetree/bindings/watchdog/socionext,uniphier-wdt.yaml2
-rw-r--r--Documentation/devicetree/bindings/watchdog/sunplus,sp7021-wdt.yaml47
-rw-r--r--Documentation/driver-api/dmaengine/provider.rst8
-rw-r--r--Documentation/driver-api/firmware/fw_upload.rst126
-rw-r--r--Documentation/driver-api/firmware/index.rst1
-rw-r--r--Documentation/driver-api/gpio/driver.rst30
-rw-r--r--Documentation/driver-api/index.rst1
-rw-r--r--Documentation/driver-api/pwm.rst6
-rw-r--r--Documentation/driver-api/serial/driver.rst2
-rw-r--r--Documentation/driver-api/serial/index.rst2
-rw-r--r--Documentation/driver-api/serial/n_gsm.rst159
-rw-r--r--Documentation/driver-api/tty/index.rst (renamed from Documentation/tty/index.rst)22
-rw-r--r--Documentation/driver-api/tty/moxa-smartio.rst (renamed from Documentation/driver-api/serial/moxa-smartio.rst)0
-rw-r--r--Documentation/driver-api/tty/n_gsm.rst153
-rw-r--r--Documentation/driver-api/tty/n_tty.rst (renamed from Documentation/tty/n_tty.rst)0
-rw-r--r--Documentation/driver-api/tty/tty_buffer.rst (renamed from Documentation/tty/tty_buffer.rst)0
-rw-r--r--Documentation/driver-api/tty/tty_driver.rst (renamed from Documentation/tty/tty_driver.rst)0
-rw-r--r--Documentation/driver-api/tty/tty_internals.rst (renamed from Documentation/tty/tty_internals.rst)0
-rw-r--r--Documentation/driver-api/tty/tty_ldisc.rst (renamed from Documentation/tty/tty_ldisc.rst)0
-rw-r--r--Documentation/driver-api/tty/tty_port.rst (renamed from Documentation/tty/tty_port.rst)0
-rw-r--r--Documentation/driver-api/tty/tty_struct.rst (renamed from Documentation/tty/tty_struct.rst)0
-rw-r--r--Documentation/driver-api/vfio-mediated-device.rst4
-rw-r--r--Documentation/filesystems/erofs.rst64
-rw-r--r--Documentation/filesystems/nfs/client-identifier.rst216
-rw-r--r--Documentation/filesystems/nfs/index.rst2
-rw-r--r--Documentation/firmware-guide/acpi/enumeration.rst25
-rw-r--r--Documentation/fpga/dfl.rst5
-rw-r--r--Documentation/hte/hte.rst79
-rw-r--r--Documentation/hte/index.rst22
-rw-r--r--Documentation/hte/tegra194-hte.rst49
-rw-r--r--Documentation/i2c/writing-clients.rst13
-rw-r--r--Documentation/images/COPYING-logo (renamed from Documentation/COPYING-logo)8
-rw-r--r--Documentation/images/logo.gif (renamed from Documentation/logo.gif)bin16335 -> 16335 bytes
-rw-r--r--Documentation/images/logo.svg2040
-rw-r--r--Documentation/index.rst2
-rw-r--r--Documentation/input/input-programming.rst6
-rw-r--r--Documentation/leds/leds-qcom-lpg.rst78
-rw-r--r--Documentation/loongarch/features.rst3
-rw-r--r--Documentation/loongarch/index.rst21
-rw-r--r--Documentation/loongarch/introduction.rst387
-rw-r--r--Documentation/loongarch/irq-chip-model.rst156
-rw-r--r--Documentation/misc-devices/index.rst1
-rw-r--r--Documentation/misc-devices/oxsemi-tornado.rst131
-rw-r--r--Documentation/networking/ip-sysctl.rst23
-rw-r--r--Documentation/powerpc/dawr-power9.rst26
-rw-r--r--Documentation/powerpc/kasan.txt58
-rw-r--r--Documentation/riscv/vm-layout.rst36
-rw-r--r--Documentation/tools/rtla/Makefile14
-rw-r--r--Documentation/trace/ftrace.rst12
-rw-r--r--Documentation/trace/timerlat-tracer.rst5
-rw-r--r--Documentation/translations/zh_CN/index.rst1
-rw-r--r--Documentation/translations/zh_CN/loongarch/features.rst8
-rw-r--r--Documentation/translations/zh_CN/loongarch/index.rst26
-rw-r--r--Documentation/translations/zh_CN/loongarch/introduction.rst351
-rw-r--r--Documentation/translations/zh_CN/loongarch/irq-chip-model.rst155
-rw-r--r--Documentation/usb/gadget-testing.rst1
-rw-r--r--MAINTAINERS183
-rw-r--r--Makefile4
-rw-r--r--arch/Kconfig6
-rw-r--r--arch/alpha/include/uapi/asm/termbits.h214
-rw-r--r--arch/alpha/kernel/osf_sys.c1
-rw-r--r--arch/alpha/kernel/process.c15
-rw-r--r--arch/alpha/lib/csum_partial_copy.c1
-rw-r--r--arch/arc/kernel/process.c13
-rw-r--r--arch/arm/Kconfig38
-rw-r--r--arch/arm/Makefile2
-rw-r--r--arch/arm/boot/dts/Makefile3
-rw-r--r--arch/arm/boot/dts/aspeed-ast2600-evb.dts39
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-facebook-bletchley.dts182
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-ibm-everest.dts2
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts2
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-nuvia-dc-scm.dts190
-rw-r--r--arch/arm/boot/dts/aspeed-g4.dtsi16
-rw-r--r--arch/arm/boot/dts/aspeed-g5.dtsi16
-rw-r--r--arch/arm/boot/dts/aspeed-g6.dtsi38
-rw-r--r--arch/arm/boot/dts/at91-sama7g5ek.dts21
-rw-r--r--arch/arm/boot/dts/at91sam9261ek.dts4
-rw-r--r--arch/arm/boot/dts/at91sam9263ek.dts4
-rw-r--r--arch/arm/boot/dts/at91sam9rlek.dts4
-rw-r--r--arch/arm/boot/dts/da850.dtsi2
-rw-r--r--arch/arm/boot/dts/hpe-bmc-dl360gen10.dts26
-rw-r--r--arch/arm/boot/dts/hpe-gxp.dtsi127
-rw-r--r--arch/arm/boot/dts/mmp2.dtsi2
-rw-r--r--arch/arm/boot/dts/pxa25x.dtsi5
-rw-r--r--arch/arm/boot/dts/pxa27x.dtsi5
-rw-r--r--arch/arm/boot/dts/pxa3xx.dtsi5
-rw-r--r--arch/arm/boot/dts/qcom-ipq4019.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom-sdx55.dtsi11
-rw-r--r--arch/arm/boot/dts/rk3036.dtsi2
-rw-r--r--arch/arm/boot/dts/rk3066a.dtsi3
-rw-r--r--arch/arm/boot/dts/rk3188.dtsi3
-rw-r--r--arch/arm/boot/dts/rk322x.dtsi2
-rw-r--r--arch/arm/boot/dts/rk3288.dtsi2
-rw-r--r--arch/arm/boot/dts/rv1108.dtsi31
-rw-r--r--arch/arm/boot/dts/sam9x60.dtsi2
-rw-r--r--arch/arm/boot/dts/sama7g5.dtsi26
-rw-r--r--arch/arm/boot/dts/socfpga_arria10_socdk_qspi.dts2
-rw-r--r--arch/arm/boot/dts/socfpga_cyclone5_socdk.dts2
-rw-r--r--arch/arm/boot/dts/socfpga_cyclone5_sodia.dts2
-rw-r--r--arch/arm/boot/dts/socfpga_cyclone5_vining_fpga.dts4
-rw-r--r--arch/arm/common/locomo.c1
-rw-r--r--arch/arm/common/sa1111.c5
-rw-r--r--arch/arm/configs/am200epdkit_defconfig1
-rw-r--r--arch/arm/configs/cm_x300_defconfig1
-rw-r--r--arch/arm/configs/colibri_pxa270_defconfig1
-rw-r--r--arch/arm/configs/colibri_pxa300_defconfig1
-rw-r--r--arch/arm/configs/corgi_defconfig1
-rw-r--r--arch/arm/configs/eseries_pxa_defconfig1
-rw-r--r--arch/arm/configs/ezx_defconfig1
-rw-r--r--arch/arm/configs/h5000_defconfig1
-rw-r--r--arch/arm/configs/lpd270_defconfig1
-rw-r--r--arch/arm/configs/lubbock_defconfig1
-rw-r--r--arch/arm/configs/magician_defconfig1
-rw-r--r--arch/arm/configs/mainstone_defconfig1
-rw-r--r--arch/arm/configs/multi_v7_defconfig16
-rw-r--r--arch/arm/configs/omap1_defconfig3
-rw-r--r--arch/arm/configs/palmz72_defconfig1
-rw-r--r--arch/arm/configs/pcm027_defconfig1
-rw-r--r--arch/arm/configs/pxa255-idp_defconfig1
-rw-r--r--arch/arm/configs/pxa3xx_defconfig1
-rw-r--r--arch/arm/configs/pxa_defconfig1
-rw-r--r--arch/arm/configs/spitz_defconfig1
-rw-r--r--arch/arm/configs/trizeps4_defconfig1
-rw-r--r--arch/arm/configs/viper_defconfig1
-rw-r--r--arch/arm/configs/xcep_defconfig1
-rw-r--r--arch/arm/configs/zeus_defconfig1
-rw-r--r--arch/arm/include/asm/hardware/sa1111.h2
-rw-r--r--arch/arm/include/asm/io.h27
-rw-r--r--arch/arm/kernel/crash_dump.c27
-rw-r--r--arch/arm/kernel/ftrace.c6
-rw-r--r--arch/arm/kernel/process.c12
-rw-r--r--arch/arm/kernel/reboot.c4
-rw-r--r--arch/arm/mach-at91/Kconfig2
-rw-r--r--arch/arm/mach-ep93xx/clock.c10
-rw-r--r--arch/arm/mach-ep93xx/ts72xx.c2
-rw-r--r--arch/arm/mach-hpe/Kconfig23
-rw-r--r--arch/arm/mach-hpe/Makefile1
-rw-r--r--arch/arm/mach-hpe/gxp.c16
-rw-r--r--arch/arm/mach-mmp/Kconfig10
-rw-r--r--arch/arm/mach-mmp/Makefile3
-rw-r--r--arch/arm/mach-mmp/devices.c2
-rw-r--r--arch/arm/mach-mmp/devices.h10
-rw-r--r--arch/arm/mach-mmp/mfp.h2
-rw-r--r--arch/arm/mach-mmp/mmp2.h48
-rw-r--r--arch/arm/mach-mmp/pxa168.h60
-rw-r--r--arch/arm/mach-mmp/pxa910.h38
-rw-r--r--arch/arm/mach-mmp/tavorevb.c113
-rw-r--r--arch/arm/mach-mmp/ttc_dkb.c6
-rw-r--r--arch/arm/mach-omap1/Kconfig16
-rw-r--r--arch/arm/mach-omap1/clock.c693
-rw-r--r--arch/arm/mach-omap1/clock.h139
-rw-r--r--arch/arm/mach-omap1/clock_data.c483
-rw-r--r--arch/arm/mach-omap1/hardware.h2
-rw-r--r--arch/arm/mach-omap1/include/mach/uncompress.h117
-rw-r--r--arch/arm/mach-omap1/io.c7
-rw-r--r--arch/arm/mach-omap1/serial.c3
-rw-r--r--arch/arm/mach-omap1/serial.h (renamed from arch/arm/mach-omap1/include/mach/serial.h)0
-rw-r--r--arch/arm/mach-omap1/time.c5
-rw-r--r--arch/arm/mach-pxa/Kconfig15
-rw-r--r--arch/arm/mach-pxa/Makefile18
-rw-r--r--arch/arm/mach-pxa/addr-map.h (renamed from arch/arm/mach-pxa/include/mach/addr-map.h)0
-rw-r--r--arch/arm/mach-pxa/am300epd.c2
-rw-r--r--arch/arm/mach-pxa/balloon3-pcmcia.c (renamed from drivers/pcmcia/pxa2xx_balloon3.c)4
-rw-r--r--arch/arm/mach-pxa/balloon3.c4
-rw-r--r--arch/arm/mach-pxa/balloon3.h (renamed from arch/arm/mach-pxa/include/mach/balloon3.h)0
-rw-r--r--arch/arm/mach-pxa/cm-x300.c12
-rw-r--r--arch/arm/mach-pxa/colibri-evalboard.c1
-rw-r--r--arch/arm/mach-pxa/colibri-pcmcia.c (renamed from drivers/pcmcia/pxa2xx_colibri.c)2
-rw-r--r--arch/arm/mach-pxa/colibri-pxa270-income.c1
-rw-r--r--arch/arm/mach-pxa/colibri-pxa270.c2
-rw-r--r--arch/arm/mach-pxa/colibri-pxa300.c3
-rw-r--r--arch/arm/mach-pxa/colibri-pxa320.c2
-rw-r--r--arch/arm/mach-pxa/colibri-pxa3xx.c3
-rw-r--r--arch/arm/mach-pxa/colibri.h2
-rw-r--r--arch/arm/mach-pxa/corgi.c23
-rw-r--r--arch/arm/mach-pxa/corgi.h (renamed from arch/arm/mach-pxa/include/mach/corgi.h)0
-rw-r--r--arch/arm/mach-pxa/corgi_pm.c5
-rw-r--r--arch/arm/mach-pxa/csb726.c5
-rw-r--r--arch/arm/mach-pxa/csb726.h2
-rw-r--r--arch/arm/mach-pxa/devices.c17
-rw-r--r--arch/arm/mach-pxa/e740-pcmcia.c (renamed from drivers/pcmcia/pxa2xx_e740.c)4
-rw-r--r--arch/arm/mach-pxa/eseries-gpio.h (renamed from arch/arm/mach-pxa/include/mach/eseries-gpio.h)0
-rw-r--r--arch/arm/mach-pxa/eseries.c38
-rw-r--r--arch/arm/mach-pxa/ezx.c1
-rw-r--r--arch/arm/mach-pxa/generic.c62
-rw-r--r--arch/arm/mach-pxa/generic.h9
-rw-r--r--arch/arm/mach-pxa/gumstix.c1
-rw-r--r--arch/arm/mach-pxa/gumstix.h2
-rw-r--r--arch/arm/mach-pxa/h5000.c2
-rw-r--r--arch/arm/mach-pxa/hx4700-pcmcia.c (renamed from drivers/pcmcia/pxa2xx_hx4700.c)4
-rw-r--r--arch/arm/mach-pxa/hx4700.c18
-rw-r--r--arch/arm/mach-pxa/hx4700.h (renamed from arch/arm/mach-pxa/include/mach/hx4700.h)0
-rw-r--r--arch/arm/mach-pxa/idp.c2
-rw-r--r--arch/arm/mach-pxa/idp.h2
-rw-r--r--arch/arm/mach-pxa/include/mach/bitfield.h114
-rw-r--r--arch/arm/mach-pxa/include/mach/dma.h17
-rw-r--r--arch/arm/mach-pxa/include/mach/generic.h1
-rw-r--r--arch/arm/mach-pxa/include/mach/mtd-xip.h36
-rw-r--r--arch/arm/mach-pxa/include/mach/uncompress.h70
-rw-r--r--arch/arm/mach-pxa/irq.c5
-rw-r--r--arch/arm/mach-pxa/irqs.h (renamed from arch/arm/mach-pxa/include/mach/irqs.h)0
-rw-r--r--arch/arm/mach-pxa/littleton.c1
-rw-r--r--arch/arm/mach-pxa/lpd270.c6
-rw-r--r--arch/arm/mach-pxa/lubbock.c17
-rw-r--r--arch/arm/mach-pxa/lubbock.h (renamed from arch/arm/mach-pxa/include/mach/lubbock.h)4
-rw-r--r--arch/arm/mach-pxa/magician.c56
-rw-r--r--arch/arm/mach-pxa/magician.h (renamed from arch/arm/mach-pxa/include/mach/magician.h)2
-rw-r--r--arch/arm/mach-pxa/mainstone.c17
-rw-r--r--arch/arm/mach-pxa/mainstone.h (renamed from arch/arm/mach-pxa/include/mach/mainstone.h)4
-rw-r--r--arch/arm/mach-pxa/mfp-pxa2xx.c3
-rw-r--r--arch/arm/mach-pxa/mfp-pxa2xx.h2
-rw-r--r--arch/arm/mach-pxa/mfp-pxa3xx.c3
-rw-r--r--arch/arm/mach-pxa/mfp-pxa3xx.h2
-rw-r--r--arch/arm/mach-pxa/mfp.h (renamed from arch/arm/mach-pxa/include/mach/mfp.h)2
-rw-r--r--arch/arm/mach-pxa/mioa701.c4
-rw-r--r--arch/arm/mach-pxa/mxm8x10.c8
-rw-r--r--arch/arm/mach-pxa/palm27x.c2
-rw-r--r--arch/arm/mach-pxa/palmld-pcmcia.c (renamed from drivers/pcmcia/pxa2xx_palmld.c)5
-rw-r--r--arch/arm/mach-pxa/palmld.c23
-rw-r--r--arch/arm/mach-pxa/palmld.h (renamed from arch/arm/mach-pxa/include/mach/palmld.h)0
-rw-r--r--arch/arm/mach-pxa/palmt5.c11
-rw-r--r--arch/arm/mach-pxa/palmt5.h2
-rw-r--r--arch/arm/mach-pxa/palmtc-pcmcia.c (renamed from drivers/pcmcia/pxa2xx_palmtc.c)4
-rw-r--r--arch/arm/mach-pxa/palmtc.c4
-rw-r--r--arch/arm/mach-pxa/palmtc.h (renamed from arch/arm/mach-pxa/include/mach/palmtc.h)0
-rw-r--r--arch/arm/mach-pxa/palmte2.c2
-rw-r--r--arch/arm/mach-pxa/palmtreo.c4
-rw-r--r--arch/arm/mach-pxa/palmtx-pcmcia.c (renamed from drivers/pcmcia/pxa2xx_palmtx.c)4
-rw-r--r--arch/arm/mach-pxa/palmtx.c13
-rw-r--r--arch/arm/mach-pxa/palmtx.h (renamed from arch/arm/mach-pxa/include/mach/palmtx.h)0
-rw-r--r--arch/arm/mach-pxa/palmz72.c2
-rw-r--r--arch/arm/mach-pxa/pcm027.h2
-rw-r--r--arch/arm/mach-pxa/pcm990-baseboard.c2
-rw-r--r--arch/arm/mach-pxa/pcm990_baseboard.h2
-rw-r--r--arch/arm/mach-pxa/poodle.c31
-rw-r--r--arch/arm/mach-pxa/poodle.h (renamed from arch/arm/mach-pxa/include/mach/poodle.h)2
-rw-r--r--arch/arm/mach-pxa/pxa-dt.c2
-rw-r--r--arch/arm/mach-pxa/pxa-regs.h52
-rw-r--r--arch/arm/mach-pxa/pxa25x.c12
-rw-r--r--arch/arm/mach-pxa/pxa25x.h6
-rw-r--r--arch/arm/mach-pxa/pxa27x-udc.h2
-rw-r--r--arch/arm/mach-pxa/pxa27x.c12
-rw-r--r--arch/arm/mach-pxa/pxa27x.h6
-rw-r--r--arch/arm/mach-pxa/pxa2xx-regs.h (renamed from arch/arm/mach-pxa/include/mach/pxa2xx-regs.h)47
-rw-r--r--arch/arm/mach-pxa/pxa2xx.c32
-rw-r--r--arch/arm/mach-pxa/pxa300.c1
-rw-r--r--arch/arm/mach-pxa/pxa320.c1
-rw-r--r--arch/arm/mach-pxa/pxa3xx-regs.h (renamed from arch/arm/mach-pxa/include/mach/pxa3xx-regs.h)71
-rw-r--r--arch/arm/mach-pxa/pxa3xx-ulpi.c2
-rw-r--r--arch/arm/mach-pxa/pxa3xx.c19
-rw-r--r--arch/arm/mach-pxa/pxa3xx.h6
-rw-r--r--arch/arm/mach-pxa/pxa930.c1
-rw-r--r--arch/arm/mach-pxa/regs-ost.h (renamed from arch/arm/mach-pxa/include/mach/regs-ost.h)4
-rw-r--r--arch/arm/mach-pxa/regs-rtc.h2
-rw-r--r--arch/arm/mach-pxa/regs-u2d.h2
-rw-r--r--arch/arm/mach-pxa/regs-uart.h (renamed from arch/arm/mach-pxa/include/mach/regs-uart.h)2
-rw-r--r--arch/arm/mach-pxa/reset.c9
-rw-r--r--arch/arm/mach-pxa/reset.h (renamed from arch/arm/mach-pxa/include/mach/reset.h)2
-rw-r--r--arch/arm/mach-pxa/sharpsl_pm.c2
-rw-r--r--arch/arm/mach-pxa/sleep.S9
-rw-r--r--arch/arm/mach-pxa/smemc.c13
-rw-r--r--arch/arm/mach-pxa/smemc.h (renamed from arch/arm/mach-pxa/include/mach/smemc.h)0
-rw-r--r--arch/arm/mach-pxa/spitz.c37
-rw-r--r--arch/arm/mach-pxa/spitz.h (renamed from arch/arm/mach-pxa/include/mach/spitz.h)0
-rw-r--r--arch/arm/mach-pxa/spitz_pm.c3
-rw-r--r--arch/arm/mach-pxa/standby.S3
-rw-r--r--arch/arm/mach-pxa/tosa.c80
-rw-r--r--arch/arm/mach-pxa/tosa.h (renamed from arch/arm/mach-pxa/include/mach/tosa.h)18
-rw-r--r--arch/arm/mach-pxa/trizeps4-pcmcia.c (renamed from drivers/pcmcia/pxa2xx_trizeps4.c)6
-rw-r--r--arch/arm/mach-pxa/trizeps4.c6
-rw-r--r--arch/arm/mach-pxa/trizeps4.h (renamed from arch/arm/mach-pxa/include/mach/trizeps4.h)1
-rw-r--r--arch/arm/mach-pxa/viper-pcmcia.c (renamed from drivers/pcmcia/pxa2xx_viper.c)6
-rw-r--r--arch/arm/mach-pxa/viper-pcmcia.h (renamed from include/linux/platform_data/pcmcia-pxa2xx_viper.h)0
-rw-r--r--arch/arm/mach-pxa/viper.c20
-rw-r--r--arch/arm/mach-pxa/vpac270-pcmcia.c (renamed from drivers/pcmcia/pxa2xx_vpac270.c)4
-rw-r--r--arch/arm/mach-pxa/vpac270.c4
-rw-r--r--arch/arm/mach-pxa/vpac270.h (renamed from arch/arm/mach-pxa/include/mach/vpac270.h)0
-rw-r--r--arch/arm/mach-pxa/xcep.c4
-rw-r--r--arch/arm/mach-pxa/z2.c13
-rw-r--r--arch/arm/mach-pxa/z2.h (renamed from arch/arm/mach-pxa/include/mach/z2.h)0
-rw-r--r--arch/arm/mach-pxa/zeus.c20
-rw-r--r--arch/arm/mach-pxa/zylonite.c34
-rw-r--r--arch/arm/mach-pxa/zylonite.h2
-rw-r--r--arch/arm/mach-pxa/zylonite_pxa300.c1
-rw-r--r--arch/arm/mach-pxa/zylonite_pxa320.c1
-rw-r--r--arch/arm/mach-sa1100/generic.c6
-rw-r--r--arch/arm/mach-sa1100/include/mach/reset.h1
-rw-r--r--arch/arm/mm/copypage-xsc3.c2
-rw-r--r--arch/arm/mm/ioremap.c2
-rw-r--r--arch/arm64/Kconfig4
-rw-r--r--arch/arm64/boot/dts/intel/Makefile3
-rw-r--r--arch/arm64/boot/dts/intel/socfpga_agilex_n6000.dts66
-rw-r--r--arch/arm64/boot/dts/qcom/apq8096-db820c.dts18
-rw-r--r--arch/arm64/boot/dts/qcom/ipq6018.dtsi12
-rw-r--r--arch/arm64/boot/dts/qcom/ipq8074.dtsi16
-rw-r--r--arch/arm64/boot/dts/qcom/msm8953.dtsi11
-rw-r--r--arch/arm64/boot/dts/qcom/msm8994.dtsi7
-rw-r--r--arch/arm64/boot/dts/qcom/msm8996-xiaomi-common.dtsi20
-rw-r--r--arch/arm64/boot/dts/qcom/msm8996.dtsi23
-rw-r--r--arch/arm64/boot/dts/qcom/msm8998.dtsi13
-rw-r--r--arch/arm64/boot/dts/qcom/qcs404-evb.dtsi7
-rw-r--r--arch/arm64/boot/dts/qcom/qcs404.dtsi8
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180.dtsi13
-rw-r--r--arch/arm64/boot/dts/qcom/sc7280.dtsi68
-rw-r--r--arch/arm64/boot/dts/qcom/sdm630.dtsi12
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845.dtsi26
-rw-r--r--arch/arm64/boot/dts/qcom/sm6125.dtsi16
-rw-r--r--arch/arm64/boot/dts/qcom/sm6350.dtsi11
-rw-r--r--arch/arm64/boot/dts/qcom/sm8150.dtsi22
-rw-r--r--arch/arm64/boot/dts/qcom/sm8250.dtsi20
-rw-r--r--arch/arm64/boot/dts/qcom/sm8350.dtsi21
-rw-r--r--arch/arm64/boot/dts/qcom/sm8450.dtsi20
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3308.dtsi5
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3368.dtsi2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3566-quartz64-a.dts4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk356x.dtsi2
-rw-r--r--arch/arm64/boot/dts/sprd/whale2.dtsi4
-rw-r--r--arch/arm64/crypto/Kconfig16
-rw-r--r--arch/arm64/crypto/Makefile8
-rw-r--r--arch/arm64/crypto/sm4-ce-cipher-core.S36
-rw-r--r--arch/arm64/crypto/sm4-ce-cipher-glue.c82
-rw-r--r--arch/arm64/crypto/sm4-ce-core.S688
-rw-r--r--arch/arm64/crypto/sm4-ce-glue.c386
-rw-r--r--arch/arm64/crypto/sm4-neon-core.S487
-rw-r--r--arch/arm64/crypto/sm4-neon-glue.c442
-rw-r--r--arch/arm64/include/asm/compat.h93
-rw-r--r--arch/arm64/include/asm/processor.h4
-rw-r--r--arch/arm64/include/asm/unistd.h1
-rw-r--r--arch/arm64/kernel/crash_dump.c29
-rw-r--r--arch/arm64/kernel/hibernate.c2
-rw-r--r--arch/arm64/kernel/process.c15
-rw-r--r--arch/arm64/kernel/setup.c7
-rw-r--r--arch/arm64/kernel/signal.c2
-rw-r--r--arch/arm64/mm/hugetlbpage.c5
-rw-r--r--arch/csky/kernel/power.c6
-rw-r--r--arch/csky/kernel/process.c15
-rw-r--r--arch/hexagon/kernel/process.c12
-rw-r--r--arch/ia64/include/asm/ptrace.h4
-rw-r--r--arch/ia64/kernel/crash_dump.c32
-rw-r--r--arch/ia64/kernel/kprobes.c64
-rw-r--r--arch/ia64/kernel/mca.c1
-rw-r--r--arch/ia64/kernel/palinfo.c2
-rw-r--r--arch/ia64/kernel/process.c19
-rw-r--r--arch/ia64/kernel/ptrace.c59
-rw-r--r--arch/ia64/kernel/setup.c2
-rw-r--r--arch/ia64/kernel/smpboot.c4
-rw-r--r--arch/ia64/kernel/traps.c2
-rw-r--r--arch/ia64/mm/init.c2
-rw-r--r--arch/ia64/mm/tlb.c4
-rw-r--r--arch/loongarch/Kbuild6
-rw-r--r--arch/loongarch/Kconfig438
-rw-r--r--arch/loongarch/Kconfig.debug0
-rw-r--r--arch/loongarch/Makefile100
-rw-r--r--arch/loongarch/boot/.gitignore (renamed from arch/arm/mach-pxa/Makefile.boot)3
-rw-r--r--arch/loongarch/boot/Makefile16
-rw-r--r--arch/loongarch/boot/dts/Makefile4
-rw-r--r--arch/loongarch/configs/loongson3_defconfig771
-rw-r--r--arch/loongarch/include/asm/Kbuild30
-rw-r--r--arch/loongarch/include/asm/acenv.h18
-rw-r--r--arch/loongarch/include/asm/acpi.h38
-rw-r--r--arch/loongarch/include/asm/addrspace.h112
-rw-r--r--arch/loongarch/include/asm/asm-offsets.h5
-rw-r--r--arch/loongarch/include/asm/asm-prototypes.h7
-rw-r--r--arch/loongarch/include/asm/asm.h191
-rw-r--r--arch/loongarch/include/asm/asmmacro.h289
-rw-r--r--arch/loongarch/include/asm/atomic.h362
-rw-r--r--arch/loongarch/include/asm/barrier.h159
-rw-r--r--arch/loongarch/include/asm/bitops.h33
-rw-r--r--arch/loongarch/include/asm/bitrev.h34
-rw-r--r--arch/loongarch/include/asm/bootinfo.h43
-rw-r--r--arch/loongarch/include/asm/branch.h21
-rw-r--r--arch/loongarch/include/asm/bug.h23
-rw-r--r--arch/loongarch/include/asm/cache.h13
-rw-r--r--arch/loongarch/include/asm/cacheflush.h80
-rw-r--r--arch/loongarch/include/asm/cacheops.h37
-rw-r--r--arch/loongarch/include/asm/clocksource.h12
-rw-r--r--arch/loongarch/include/asm/cmpxchg.h123
-rw-r--r--arch/loongarch/include/asm/compiler.h15
-rw-r--r--arch/loongarch/include/asm/cpu-features.h73
-rw-r--r--arch/loongarch/include/asm/cpu-info.h116
-rw-r--r--arch/loongarch/include/asm/cpu.h127
-rw-r--r--arch/loongarch/include/asm/cpufeature.h24
-rw-r--r--arch/loongarch/include/asm/delay.h26
-rw-r--r--arch/loongarch/include/asm/dma-direct.h11
-rw-r--r--arch/loongarch/include/asm/dmi.h24
-rw-r--r--arch/loongarch/include/asm/efi.h41
-rw-r--r--arch/loongarch/include/asm/elf.h301
-rw-r--r--arch/loongarch/include/asm/entry-common.h13
-rw-r--r--arch/loongarch/include/asm/exec.h10
-rw-r--r--arch/loongarch/include/asm/fb.h23
-rw-r--r--arch/loongarch/include/asm/fixmap.h13
-rw-r--r--arch/loongarch/include/asm/fpregdef.h53
-rw-r--r--arch/loongarch/include/asm/fpu.h129
-rw-r--r--arch/loongarch/include/asm/futex.h108
-rw-r--r--arch/loongarch/include/asm/hardirq.h26
-rw-r--r--arch/loongarch/include/asm/hugetlb.h83
-rw-r--r--arch/loongarch/include/asm/hw_irq.h17
-rw-r--r--arch/loongarch/include/asm/idle.h9
-rw-r--r--arch/loongarch/include/asm/inst.h117
-rw-r--r--arch/loongarch/include/asm/io.h129
-rw-r--r--arch/loongarch/include/asm/irq.h132
-rw-r--r--arch/loongarch/include/asm/irq_regs.h27
-rw-r--r--arch/loongarch/include/asm/irqflags.h78
-rw-r--r--arch/loongarch/include/asm/kdebug.h23
-rw-r--r--arch/loongarch/include/asm/linkage.h36
-rw-r--r--arch/loongarch/include/asm/local.h138
-rw-r--r--arch/loongarch/include/asm/loongarch.h1516
-rw-r--r--arch/loongarch/include/asm/loongson.h153
-rw-r--r--arch/loongarch/include/asm/mmu.h16
-rw-r--r--arch/loongarch/include/asm/mmu_context.h152
-rw-r--r--arch/loongarch/include/asm/mmzone.h18
-rw-r--r--arch/loongarch/include/asm/module.h80
-rw-r--r--arch/loongarch/include/asm/module.lds.h7
-rw-r--r--arch/loongarch/include/asm/numa.h67
-rw-r--r--arch/loongarch/include/asm/page.h115
-rw-r--r--arch/loongarch/include/asm/percpu.h214
-rw-r--r--arch/loongarch/include/asm/perf_event.h10
-rw-r--r--arch/loongarch/include/asm/pgalloc.h103
-rw-r--r--arch/loongarch/include/asm/pgtable-bits.h131
-rw-r--r--arch/loongarch/include/asm/pgtable.h565
-rw-r--r--arch/loongarch/include/asm/prefetch.h29
-rw-r--r--arch/loongarch/include/asm/processor.h209
-rw-r--r--arch/loongarch/include/asm/ptrace.h152
-rw-r--r--arch/loongarch/include/asm/reboot.h10
-rw-r--r--arch/loongarch/include/asm/regdef.h41
-rw-r--r--arch/loongarch/include/asm/seccomp.h20
-rw-r--r--arch/loongarch/include/asm/serial.h11
-rw-r--r--arch/loongarch/include/asm/setup.h21
-rw-r--r--arch/loongarch/include/asm/shmparam.h12
-rw-r--r--arch/loongarch/include/asm/smp.h124
-rw-r--r--arch/loongarch/include/asm/sparsemem.h23
-rw-r--r--arch/loongarch/include/asm/stackframe.h219
-rw-r--r--arch/loongarch/include/asm/stacktrace.h74
-rw-r--r--arch/loongarch/include/asm/string.h12
-rw-r--r--arch/loongarch/include/asm/switch_to.h37
-rw-r--r--arch/loongarch/include/asm/syscall.h74
-rw-r--r--arch/loongarch/include/asm/thread_info.h106
-rw-r--r--arch/loongarch/include/asm/time.h50
-rw-r--r--arch/loongarch/include/asm/timex.h33
-rw-r--r--arch/loongarch/include/asm/tlb.h180
-rw-r--r--arch/loongarch/include/asm/tlbflush.h48
-rw-r--r--arch/loongarch/include/asm/topology.h41
-rw-r--r--arch/loongarch/include/asm/types.h19
-rw-r--r--arch/loongarch/include/asm/uaccess.h269
-rw-r--r--arch/loongarch/include/asm/unistd.h11
-rw-r--r--arch/loongarch/include/asm/vdso.h38
-rw-r--r--arch/loongarch/include/asm/vdso/clocksource.h8
-rw-r--r--arch/loongarch/include/asm/vdso/gettimeofday.h99
-rw-r--r--arch/loongarch/include/asm/vdso/processor.h14
-rw-r--r--arch/loongarch/include/asm/vdso/vdso.h30
-rw-r--r--arch/loongarch/include/asm/vdso/vsyscall.h27
-rw-r--r--arch/loongarch/include/asm/vermagic.h19
-rw-r--r--arch/loongarch/include/asm/vmalloc.h4
-rw-r--r--arch/loongarch/include/uapi/asm/Kbuild (renamed from drivers/staging/vme/Makefile)2
-rw-r--r--arch/loongarch/include/uapi/asm/auxvec.h17
-rw-r--r--arch/loongarch/include/uapi/asm/bitsperlong.h9
-rw-r--r--arch/loongarch/include/uapi/asm/break.h23
-rw-r--r--arch/loongarch/include/uapi/asm/byteorder.h13
-rw-r--r--arch/loongarch/include/uapi/asm/hwcap.h20
-rw-r--r--arch/loongarch/include/uapi/asm/ptrace.h52
-rw-r--r--arch/loongarch/include/uapi/asm/reg.h59
-rw-r--r--arch/loongarch/include/uapi/asm/sigcontext.h44
-rw-r--r--arch/loongarch/include/uapi/asm/signal.h13
-rw-r--r--arch/loongarch/include/uapi/asm/ucontext.h35
-rw-r--r--arch/loongarch/include/uapi/asm/unistd.h5
-rw-r--r--arch/loongarch/kernel/.gitignore2
-rw-r--r--arch/loongarch/kernel/Makefile25
-rw-r--r--arch/loongarch/kernel/access-helper.h13
-rw-r--r--arch/loongarch/kernel/acpi.c333
-rw-r--r--arch/loongarch/kernel/asm-offsets.c264
-rw-r--r--arch/loongarch/kernel/cacheinfo.c122
-rw-r--r--arch/loongarch/kernel/cpu-probe.c292
-rw-r--r--arch/loongarch/kernel/dma.c40
-rw-r--r--arch/loongarch/kernel/efi.c72
-rw-r--r--arch/loongarch/kernel/elf.c30
-rw-r--r--arch/loongarch/kernel/entry.S89
-rw-r--r--arch/loongarch/kernel/env.c101
-rw-r--r--arch/loongarch/kernel/fpu.S261
-rw-r--r--arch/loongarch/kernel/genex.S95
-rw-r--r--arch/loongarch/kernel/head.S98
-rw-r--r--arch/loongarch/kernel/idle.c16
-rw-r--r--arch/loongarch/kernel/inst.c40
-rw-r--r--arch/loongarch/kernel/io.c94
-rw-r--r--arch/loongarch/kernel/irq.c88
-rw-r--r--arch/loongarch/kernel/mem.c64
-rw-r--r--arch/loongarch/kernel/module-sections.c121
-rw-r--r--arch/loongarch/kernel/module.c375
-rw-r--r--arch/loongarch/kernel/numa.c466
-rw-r--r--arch/loongarch/kernel/proc.c127
-rw-r--r--arch/loongarch/kernel/process.c267
-rw-r--r--arch/loongarch/kernel/ptrace.c431
-rw-r--r--arch/loongarch/kernel/reset.c102
-rw-r--r--arch/loongarch/kernel/setup.c374
-rw-r--r--arch/loongarch/kernel/signal.c566
-rw-r--r--arch/loongarch/kernel/smp.c751
-rw-r--r--arch/loongarch/kernel/switch.S35
-rw-r--r--arch/loongarch/kernel/syscall.c63
-rw-r--r--arch/loongarch/kernel/time.c214
-rw-r--r--arch/loongarch/kernel/topology.c52
-rw-r--r--arch/loongarch/kernel/traps.c725
-rw-r--r--arch/loongarch/kernel/vdso.c138
-rw-r--r--arch/loongarch/kernel/vmlinux.lds.S120
-rw-r--r--arch/loongarch/lib/Makefile6
-rw-r--r--arch/loongarch/lib/clear_user.S43
-rw-r--r--arch/loongarch/lib/copy_user.S47
-rw-r--r--arch/loongarch/lib/delay.c43
-rw-r--r--arch/loongarch/lib/dump_tlb.c111
-rw-r--r--arch/loongarch/mm/Makefile9
-rw-r--r--arch/loongarch/mm/cache.c141
-rw-r--r--arch/loongarch/mm/extable.c22
-rw-r--r--arch/loongarch/mm/fault.c261
-rw-r--r--arch/loongarch/mm/hugetlbpage.c87
-rw-r--r--arch/loongarch/mm/init.c178
-rw-r--r--arch/loongarch/mm/ioremap.c27
-rw-r--r--arch/loongarch/mm/maccess.c10
-rw-r--r--arch/loongarch/mm/mmap.c125
-rw-r--r--arch/loongarch/mm/page.S84
-rw-r--r--arch/loongarch/mm/pgtable.c130
-rw-r--r--arch/loongarch/mm/tlb.c305
-rw-r--r--arch/loongarch/mm/tlbex.S546
-rw-r--r--arch/loongarch/pci/Makefile7
-rw-r--r--arch/loongarch/vdso/.gitignore2
-rw-r--r--arch/loongarch/vdso/Makefile96
-rw-r--r--arch/loongarch/vdso/elf.S15
-rwxr-xr-xarch/loongarch/vdso/gen_vdso_offsets.sh13
-rw-r--r--arch/loongarch/vdso/sigreturn.S24
-rw-r--r--arch/loongarch/vdso/vdso.S22
-rw-r--r--arch/loongarch/vdso/vdso.lds.S72
-rw-r--r--arch/loongarch/vdso/vgettimeofday.c25
-rw-r--r--arch/m68k/Kconfig.bus10
-rw-r--r--arch/m68k/Kconfig.cpu2
-rw-r--r--arch/m68k/Kconfig.machine1
-rw-r--r--arch/m68k/coldfire/Makefile2
-rw-r--r--arch/m68k/coldfire/dma.c43
-rw-r--r--arch/m68k/coldfire/intc.c2
-rw-r--r--arch/m68k/coldfire/m53xx.c2
-rw-r--r--arch/m68k/coldfire/pci.c2
-rw-r--r--arch/m68k/emu/natfeat.c3
-rw-r--r--arch/m68k/hp300/config.c7
-rw-r--r--arch/m68k/include/asm/dma.h483
-rw-r--r--arch/m68k/include/asm/elf.h9
-rw-r--r--arch/m68k/include/asm/machdep.h2
-rw-r--r--arch/m68k/include/asm/mmu.h4
-rw-r--r--arch/m68k/include/asm/pgtable_no.h3
-rw-r--r--arch/m68k/include/uapi/asm/ptrace.h5
-rw-r--r--arch/m68k/kernel/process.c17
-rw-r--r--arch/m68k/kernel/ptrace.c58
-rw-r--r--arch/m68k/kernel/setup_mm.c10
-rw-r--r--arch/m68k/kernel/setup_no.c2
-rw-r--r--arch/m68k/kernel/time.c9
-rw-r--r--arch/m68k/lib/checksum.c2
-rw-r--r--arch/m68k/mac/config.c4
-rw-r--r--arch/m68k/mm/motorola.c1
-rw-r--r--arch/m68k/q40/config.c7
-rw-r--r--arch/m68k/virt/config.c4
-rw-r--r--arch/microblaze/include/asm/string.h2
-rw-r--r--arch/microblaze/kernel/kgdb.c2
-rw-r--r--arch/microblaze/kernel/process.c12
-rw-r--r--arch/microblaze/kernel/timer.c4
-rw-r--r--arch/microblaze/lib/memcpy.c18
-rw-r--r--arch/microblaze/lib/memmove.c31
-rw-r--r--arch/microblaze/lib/memset.c33
-rw-r--r--arch/microblaze/mm/init.c5
-rw-r--r--arch/mips/Kconfig17
-rw-r--r--arch/mips/alchemy/common/dbdma.c2
-rw-r--r--arch/mips/alchemy/devboards/db1300.c9
-rw-r--r--arch/mips/bmips/dma.c1
-rw-r--r--arch/mips/boot/dts/brcm/bcm97358svmb.dts2
-rw-r--r--arch/mips/boot/dts/brcm/bcm97360svmb.dts2
-rw-r--r--arch/mips/boot/dts/brcm/bcm97425svmb.dts2
-rw-r--r--arch/mips/boot/dts/ingenic/cu1000-neo.dts77
-rw-r--r--arch/mips/boot/dts/ingenic/cu1830-neo.dts76
-rw-r--r--arch/mips/boot/dts/ingenic/jz4780.dtsi2
-rw-r--r--arch/mips/boot/dts/ingenic/x1000.dtsi34
-rw-r--r--arch/mips/boot/dts/ingenic/x1830.dtsi55
-rw-r--r--arch/mips/boot/dts/mscc/jaguar2_pcb110.dts10
-rw-r--r--arch/mips/boot/dts/mscc/jaguar2_pcb111.dts10
-rw-r--r--arch/mips/boot/dts/mscc/jaguar2_pcb118.dts6
-rw-r--r--arch/mips/boot/dts/mscc/ocelot.dtsi4
-rw-r--r--arch/mips/boot/dts/mscc/ocelot_pcb120.dts6
-rw-r--r--arch/mips/boot/dts/mscc/serval_common.dtsi14
-rw-r--r--arch/mips/boot/dts/ralink/gardena_smart_gateway_mt7688.dts2
-rw-r--r--arch/mips/boot/dts/ralink/mt7621-gnubee-gb-pc1.dts26
-rw-r--r--arch/mips/boot/dts/ralink/mt7621-gnubee-gb-pc2.dts30
-rw-r--r--arch/mips/boot/dts/ralink/mt7621.dtsi4
-rw-r--r--arch/mips/boot/tools/relocs.c2
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-bootmem.c2
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-helper-xaui.c5
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-helper.c6
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-pko.c2
-rw-r--r--arch/mips/cavium-octeon/octeon-irq.c2
-rw-r--r--arch/mips/cavium-octeon/octeon-usb.c2
-rw-r--r--arch/mips/configs/cu1000-neo_defconfig2
-rw-r--r--arch/mips/configs/cu1830-neo_defconfig2
-rw-r--r--arch/mips/dec/ioasic-irq.c4
-rw-r--r--arch/mips/dec/setup.c2
-rw-r--r--arch/mips/fw/arc/memory.c2
-rw-r--r--arch/mips/generic/board-ingenic.c26
-rw-r--r--arch/mips/include/asm/checksum.h79
-rw-r--r--arch/mips/include/asm/compat.h41
-rw-r--r--arch/mips/include/asm/cpu-features.h3
-rw-r--r--arch/mips/include/asm/mach-ip27/cpu-feature-overrides.h1
-rw-r--r--arch/mips/include/asm/mach-ip30/cpu-feature-overrides.h1
-rw-r--r--arch/mips/include/asm/mach-ralink/spaces.h2
-rw-r--r--arch/mips/include/asm/octeon/cvmx-bootinfo.h2
-rw-r--r--arch/mips/include/asm/unistd.h2
-rw-r--r--arch/mips/include/uapi/asm/fcntl.h30
-rw-r--r--arch/mips/include/uapi/asm/stat.h12
-rw-r--r--arch/mips/include/uapi/asm/termbits.h249
-rw-r--r--arch/mips/jazz/irq.c2
-rw-r--r--arch/mips/kernel/cmpxchg.c2
-rw-r--r--arch/mips/kernel/cpu-probe.c2
-rw-r--r--arch/mips/kernel/crash_dump.c27
-rw-r--r--arch/mips/kernel/idle.c2
-rw-r--r--arch/mips/kernel/kprobes.c36
-rw-r--r--arch/mips/kernel/mips-cpc.c1
-rw-r--r--arch/mips/kernel/perf_event_mipsxx.c2
-rw-r--r--arch/mips/kernel/process.c13
-rw-r--r--arch/mips/kernel/reset.c3
-rw-r--r--arch/mips/kernel/setup.c17
-rw-r--r--arch/mips/kernel/smp.c6
-rw-r--r--arch/mips/kvm/tlb.c2
-rw-r--r--arch/mips/loongson32/Kconfig2
-rw-r--r--arch/mips/mm/fault.c6
-rw-r--r--arch/mips/net/bpf_jit_comp32.c2
-rw-r--r--arch/mips/pci/pcie-octeon.c4
-rw-r--r--arch/mips/pic32/pic32mzda/config.c2
-rw-r--r--arch/mips/sgi-ip22/ip22-reset.c11
-rw-r--r--arch/mips/sgi-ip27/ip27-xtalk.c4
-rw-r--r--arch/mips/sgi-ip30/ip30-xtalk.c4
-rw-r--r--arch/mips/sibyte/bcm1480/setup.c4
-rw-r--r--arch/mips/tools/loongson3-llsc-check.c2
-rw-r--r--arch/mips/txx9/generic/pci.c2
-rw-r--r--arch/mips/vr41xx/common/cmu.c2
-rw-r--r--arch/nios2/kernel/process.c12
-rw-r--r--arch/openrisc/kernel/process.c12
-rw-r--r--arch/parisc/Kconfig4
-rw-r--r--arch/parisc/Makefile1
-rw-r--r--arch/parisc/include/asm/assembly.h12
-rw-r--r--arch/parisc/include/asm/cache.h1
-rw-r--r--arch/parisc/include/asm/compat.h45
-rw-r--r--arch/parisc/include/asm/fb.h4
-rw-r--r--arch/parisc/include/asm/fixmap.h25
-rw-r--r--arch/parisc/include/asm/unistd.h3
-rw-r--r--arch/parisc/include/uapi/asm/termbits.h241
-rw-r--r--arch/parisc/kernel/cache.c3
-rw-r--r--arch/parisc/kernel/entry.S22
-rw-r--r--arch/parisc/kernel/pacache.S94
-rw-r--r--arch/parisc/kernel/process.c22
-rw-r--r--arch/parisc/kernel/processor.c2
-rw-r--r--arch/parisc/kernel/topology.c16
-rw-r--r--arch/parisc/mm/init.c6
-rw-r--r--arch/parisc/nm6
-rw-r--r--arch/powerpc/Kconfig30
-rw-r--r--arch/powerpc/Kconfig.debug3
-rw-r--r--arch/powerpc/Makefile12
-rw-r--r--arch/powerpc/boot/Makefile10
-rw-r--r--arch/powerpc/boot/crt0.S45
-rw-r--r--arch/powerpc/boot/cuboot-hotfoot.c2
-rw-r--r--arch/powerpc/boot/dts/fsl/p2020si-post.dtsi5
-rw-r--r--arch/powerpc/boot/dts/microwatt.dts2
-rw-r--r--arch/powerpc/boot/ops.h6
-rwxr-xr-xarch/powerpc/boot/wrapper2
-rw-r--r--arch/powerpc/crypto/aes-spe-glue.c2
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash.h4
-rw-r--r--arch/powerpc/include/asm/book3s/64/hugetlb.h4
-rw-r--r--arch/powerpc/include/asm/book3s/64/mmu-hash.h1
-rw-r--r--arch/powerpc/include/asm/book3s/64/mmu.h6
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h3
-rw-r--r--arch/powerpc/include/asm/book3s/64/radix.h12
-rw-r--r--arch/powerpc/include/asm/book3s/64/slice.h26
-rw-r--r--arch/powerpc/include/asm/checksum.h26
-rw-r--r--arch/powerpc/include/asm/code-patching.h67
-rw-r--r--arch/powerpc/include/asm/compat.h50
-rw-r--r--arch/powerpc/include/asm/cputable.h16
-rw-r--r--arch/powerpc/include/asm/drmem.h3
-rw-r--r--arch/powerpc/include/asm/eeh.h6
-rw-r--r--arch/powerpc/include/asm/elf.h14
-rw-r--r--arch/powerpc/include/asm/fadump-internal.h2
-rw-r--r--arch/powerpc/include/asm/fsl_85xx_cache_sram.h35
-rw-r--r--arch/powerpc/include/asm/ftrace.h8
-rw-r--r--arch/powerpc/include/asm/hugetlb.h2
-rw-r--r--arch/powerpc/include/asm/inst.h13
-rw-r--r--arch/powerpc/include/asm/interrupt.h52
-rw-r--r--arch/powerpc/include/asm/io.h2
-rw-r--r--arch/powerpc/include/asm/iommu.h6
-rw-r--r--arch/powerpc/include/asm/kasan.h22
-rw-r--r--arch/powerpc/include/asm/kup.h1
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_asm.h3
-rw-r--r--arch/powerpc/include/asm/kvm_host.h10
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h14
-rw-r--r--arch/powerpc/include/asm/linkage.h2
-rw-r--r--arch/powerpc/include/asm/livepatch.h10
-rw-r--r--arch/powerpc/include/asm/mmu_context.h5
-rw-r--r--arch/powerpc/include/asm/module.h2
-rw-r--r--arch/powerpc/include/asm/nohash/tlbflush.h12
-rw-r--r--arch/powerpc/include/asm/paca.h8
-rw-r--r--arch/powerpc/include/asm/page.h8
-rw-r--r--arch/powerpc/include/asm/parport.h2
-rw-r--r--arch/powerpc/include/asm/pci-bridge.h14
-rw-r--r--arch/powerpc/include/asm/pnv-pci.h1
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h109
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h4
-rw-r--r--arch/powerpc/include/asm/probes.h36
-rw-r--r--arch/powerpc/include/asm/processor.h2
-rw-r--r--arch/powerpc/include/asm/ptrace.h2
-rw-r--r--arch/powerpc/include/asm/reg.h3
-rw-r--r--arch/powerpc/include/asm/signal.h5
-rw-r--r--arch/powerpc/include/asm/slice.h46
-rw-r--r--arch/powerpc/include/asm/smp.h2
-rw-r--r--arch/powerpc/include/asm/svm.h2
-rw-r--r--arch/powerpc/include/asm/switch_to.h9
-rw-r--r--arch/powerpc/include/asm/task_size_64.h8
-rw-r--r--arch/powerpc/include/asm/time.h1
-rw-r--r--arch/powerpc/include/asm/topology.h8
-rw-r--r--arch/powerpc/include/asm/types.h8
-rw-r--r--arch/powerpc/include/asm/unistd.h1
-rw-r--r--arch/powerpc/include/asm/vas.h2
-rw-r--r--arch/powerpc/include/uapi/asm/auxvec.h4
-rw-r--r--arch/powerpc/include/uapi/asm/signal.h5
-rw-r--r--arch/powerpc/include/uapi/asm/stat.h4
-rw-r--r--arch/powerpc/include/uapi/asm/termbits.h182
-rw-r--r--arch/powerpc/kernel/Makefile13
-rw-r--r--arch/powerpc/kernel/btext.c5
-rw-r--r--arch/powerpc/kernel/cacheinfo.c1
-rw-r--r--arch/powerpc/kernel/cputable.c28
-rw-r--r--arch/powerpc/kernel/crash_dump.c37
-rw-r--r--arch/powerpc/kernel/dawr.c2
-rw-r--r--arch/powerpc/kernel/dt_cpu_ftrs.c10
-rw-r--r--arch/powerpc/kernel/eeh.c4
-rw-r--r--arch/powerpc/kernel/eeh_driver.c1
-rw-r--r--arch/powerpc/kernel/eeh_event.c2
-rw-r--r--arch/powerpc/kernel/eeh_pe.c3
-rw-r--r--arch/powerpc/kernel/eeh_sysfs.c1
-rw-r--r--arch/powerpc/kernel/entry_32.S49
-rw-r--r--arch/powerpc/kernel/entry_64.S150
-rw-r--r--arch/powerpc/kernel/fadump.c52
-rw-r--r--arch/powerpc/kernel/head_64.S4
-rw-r--r--arch/powerpc/kernel/idle.c2
-rw-r--r--arch/powerpc/kernel/interrupt_64.S12
-rw-r--r--arch/powerpc/kernel/iommu.c5
-rw-r--r--arch/powerpc/kernel/irq.c87
-rw-r--r--arch/powerpc/kernel/isa-bridge.c2
-rw-r--r--arch/powerpc/kernel/kprobes.c10
-rw-r--r--arch/powerpc/kernel/legacy_serial.c2
-rw-r--r--arch/powerpc/kernel/misc_64.S2
-rw-r--r--arch/powerpc/kernel/module.c4
-rw-r--r--arch/powerpc/kernel/module_32.c40
-rw-r--r--arch/powerpc/kernel/module_64.c11
-rw-r--r--arch/powerpc/kernel/nvram_64.c2
-rw-r--r--arch/powerpc/kernel/paca.c5
-rw-r--r--arch/powerpc/kernel/pci-common.c6
-rw-r--r--arch/powerpc/kernel/pci-hotplug.c1
-rw-r--r--arch/powerpc/kernel/pci_32.c1
-rw-r--r--arch/powerpc/kernel/pci_64.c11
-rw-r--r--arch/powerpc/kernel/pci_dn.c2
-rw-r--r--arch/powerpc/kernel/pci_of_scan.c4
-rw-r--r--arch/powerpc/kernel/proc_powerpc.c2
-rw-r--r--arch/powerpc/kernel/process.c61
-rw-r--r--arch/powerpc/kernel/prom.c1
-rw-r--r--arch/powerpc/kernel/prom_init.c4
-rw-r--r--arch/powerpc/kernel/ptrace/ptrace-view.c2
-rw-r--r--arch/powerpc/kernel/ptrace/ptrace.c6
-rw-r--r--arch/powerpc/kernel/rtas-proc.c9
-rw-r--r--arch/powerpc/kernel/rtas-rtc.c1
-rw-r--r--arch/powerpc/kernel/rtas.c21
-rw-r--r--arch/powerpc/kernel/rtas_entry.S172
-rw-r--r--arch/powerpc/kernel/rtas_flash.c2
-rw-r--r--arch/powerpc/kernel/rtas_pci.c3
-rw-r--r--arch/powerpc/kernel/rtasd.c1
-rw-r--r--arch/powerpc/kernel/setup-common.c83
-rw-r--r--arch/powerpc/kernel/setup_32.c3
-rw-r--r--arch/powerpc/kernel/setup_64.c5
-rw-r--r--arch/powerpc/kernel/signal.c15
-rw-r--r--arch/powerpc/kernel/signal_32.c6
-rw-r--r--arch/powerpc/kernel/signal_64.c7
-rw-r--r--arch/powerpc/kernel/smp.c27
-rw-r--r--arch/powerpc/kernel/syscalls.c2
-rw-r--r--arch/powerpc/kernel/sysfs.c2
-rw-r--r--arch/powerpc/kernel/time.c15
-rw-r--r--arch/powerpc/kernel/trace/Makefile5
-rw-r--r--arch/powerpc/kernel/trace/ftrace.c383
-rw-r--r--arch/powerpc/kernel/traps.c6
-rw-r--r--arch/powerpc/kernel/uprobes.c5
-rw-r--r--arch/powerpc/kernel/vdso.c1
-rw-r--r--arch/powerpc/kernel/vdso/Makefile1
-rw-r--r--arch/powerpc/kernel/vdso/vdso32.lds.S1
-rw-r--r--arch/powerpc/kernel/vdso/vdso64.lds.S1
-rw-r--r--arch/powerpc/kernel/watchdog.c2
-rw-r--r--arch/powerpc/kexec/Makefile2
-rw-r--r--arch/powerpc/kexec/core.c1
-rw-r--r--arch/powerpc/kexec/core_64.c4
-rw-r--r--arch/powerpc/kexec/crash.c1
-rw-r--r--arch/powerpc/kvm/Makefile10
-rw-r--r--arch/powerpc/kvm/book3s_64_entry.S2
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c42
-rw-r--r--arch/powerpc/kvm/book3s_64_vio.c43
-rw-r--r--arch/powerpc/kvm/book3s_64_vio_hv.c672
-rw-r--r--arch/powerpc/kvm/book3s_emulate.c2
-rw-r--r--arch/powerpc/kvm/book3s_hv.c74
-rw-r--r--arch/powerpc/kvm/book3s_hv_builtin.c64
-rw-r--r--arch/powerpc/kvm/book3s_hv_nested.c137
-rw-r--r--arch/powerpc/kvm/book3s_hv_p9_entry.c17
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_xics.c7
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_xive.c46
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S30
-rw-r--r--arch/powerpc/kvm/book3s_hv_uvmem.c10
-rw-r--r--arch/powerpc/kvm/book3s_interrupts.S2
-rw-r--r--arch/powerpc/kvm/book3s_pr.c2
-rw-r--r--arch/powerpc/kvm/book3s_pr_papr.c6
-rw-r--r--arch/powerpc/kvm/book3s_rmhandlers.S2
-rw-r--r--arch/powerpc/kvm/book3s_xics.c2
-rw-r--r--arch/powerpc/kvm/book3s_xive.c655
-rw-r--r--arch/powerpc/kvm/book3s_xive.h7
-rw-r--r--arch/powerpc/kvm/book3s_xive_native.c2
-rw-r--r--arch/powerpc/kvm/book3s_xive_template.c636
-rw-r--r--arch/powerpc/kvm/e500mc.c3
-rw-r--r--arch/powerpc/kvm/powerpc.c31
-rw-r--r--arch/powerpc/kvm/trace_hv.h8
-rw-r--r--arch/powerpc/lib/Makefile3
-rw-r--r--arch/powerpc/lib/checksum_wrappers.c2
-rw-r--r--arch/powerpc/lib/code-patching.c61
-rw-r--r--arch/powerpc/lib/feature-fixups.c2
-rw-r--r--arch/powerpc/lib/sstep.c52
-rw-r--r--arch/powerpc/mm/Makefile3
-rw-r--r--arch/powerpc/mm/book3s32/mmu.c1
-rw-r--r--arch/powerpc/mm/book3s64/Makefile11
-rw-r--r--arch/powerpc/mm/book3s64/hash_pgtable.c2
-rw-r--r--arch/powerpc/mm/book3s64/hash_utils.c39
-rw-r--r--arch/powerpc/mm/book3s64/iommu_api.c68
-rw-r--r--arch/powerpc/mm/book3s64/pgtable.c2
-rw-r--r--arch/powerpc/mm/book3s64/radix_hugetlbpage.c55
-rw-r--r--arch/powerpc/mm/book3s64/radix_pgtable.c2
-rw-r--r--arch/powerpc/mm/book3s64/radix_tlb.c2
-rw-r--r--arch/powerpc/mm/book3s64/slb.c4
-rw-r--r--arch/powerpc/mm/book3s64/slice.c (renamed from arch/powerpc/mm/slice.c)71
-rw-r--r--arch/powerpc/mm/cacheflush.c2
-rw-r--r--arch/powerpc/mm/drmem.c2
-rw-r--r--arch/powerpc/mm/hugetlbpage.c34
-rw-r--r--arch/powerpc/mm/init_32.c1
-rw-r--r--arch/powerpc/mm/init_64.c7
-rw-r--r--arch/powerpc/mm/kasan/Makefile3
-rw-r--r--arch/powerpc/mm/kasan/init_32.c (renamed from arch/powerpc/mm/kasan/kasan_init_32.c)0
-rw-r--r--arch/powerpc/mm/kasan/init_book3s_64.c102
-rw-r--r--arch/powerpc/mm/mem.c4
-rw-r--r--arch/powerpc/mm/mmap.c256
-rw-r--r--arch/powerpc/mm/mmu_decl.h4
-rw-r--r--arch/powerpc/mm/nohash/40x.c1
-rw-r--r--arch/powerpc/mm/nohash/book3e_hugetlbpage.c2
-rw-r--r--arch/powerpc/mm/nohash/fsl_book3e.c22
-rw-r--r--arch/powerpc/mm/nohash/kaslr_booke.c5
-rw-r--r--arch/powerpc/mm/nohash/mmu_context.c9
-rw-r--r--arch/powerpc/mm/nohash/tlb.c6
-rw-r--r--arch/powerpc/mm/numa.c36
-rw-r--r--arch/powerpc/mm/pageattr.c1
-rw-r--r--arch/powerpc/mm/pgtable-frag.c2
-rw-r--r--arch/powerpc/mm/pgtable.c2
-rw-r--r--arch/powerpc/mm/pgtable_64.c1
-rw-r--r--arch/powerpc/mm/ptdump/ptdump.c3
-rw-r--r--arch/powerpc/net/bpf_jit.h4
-rw-r--r--arch/powerpc/net/bpf_jit_comp.c2
-rw-r--r--arch/powerpc/net/bpf_jit_comp64.c4
-rw-r--r--arch/powerpc/perf/8xx-pmu.c2
-rw-r--r--arch/powerpc/perf/core-book3s.c6
-rw-r--r--arch/powerpc/perf/hv-24x7.c40
-rw-r--r--arch/powerpc/perf/imc-pmu.c5
-rw-r--r--arch/powerpc/perf/isa207-common.c18
-rw-r--r--arch/powerpc/perf/power9-pmu.c4
-rw-r--r--arch/powerpc/platforms/40x/ppc40x_simple.c1
-rw-r--r--arch/powerpc/platforms/44x/canyonlands.c1
-rw-r--r--arch/powerpc/platforms/44x/fsp2.c2
-rw-r--r--arch/powerpc/platforms/44x/ppc44x_simple.c1
-rw-r--r--arch/powerpc/platforms/44x/ppc476.c2
-rw-r--r--arch/powerpc/platforms/44x/sam440ep.c1
-rw-r--r--arch/powerpc/platforms/44x/warp.c3
-rw-r--r--arch/powerpc/platforms/4xx/cpm.c2
-rw-r--r--arch/powerpc/platforms/4xx/hsta_msi.c1
-rw-r--r--arch/powerpc/platforms/4xx/pci.c1
-rw-r--r--arch/powerpc/platforms/4xx/uic.c3
-rw-r--r--arch/powerpc/platforms/512x/clock-commonclk.c2
-rw-r--r--arch/powerpc/platforms/512x/mpc5121_ads.c1
-rw-r--r--arch/powerpc/platforms/512x/mpc5121_ads_cpld.c3
-rw-r--r--arch/powerpc/platforms/512x/mpc512x_generic.c1
-rw-r--r--arch/powerpc/platforms/512x/mpc512x_shared.c4
-rw-r--r--arch/powerpc/platforms/52xx/efika.c1
-rw-r--r--arch/powerpc/platforms/52xx/lite5200.c1
-rw-r--r--arch/powerpc/platforms/52xx/lite5200_pm.c2
-rw-r--r--arch/powerpc/platforms/52xx/media5200.c3
-rw-r--r--arch/powerpc/platforms/52xx/mpc5200_simple.c2
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_common.c4
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_gpt.c8
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c5
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_pci.c22
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_pic.c3
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_pm.c2
-rw-r--r--arch/powerpc/platforms/82xx/ep8248e.c1
-rw-r--r--arch/powerpc/platforms/82xx/km82xx.c1
-rw-r--r--arch/powerpc/platforms/82xx/pq2ads-pci-pic.c2
-rw-r--r--arch/powerpc/platforms/83xx/km83xx.c1
-rw-r--r--arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c15
-rw-r--r--arch/powerpc/platforms/83xx/mpc832x_mds.c1
-rw-r--r--arch/powerpc/platforms/83xx/mpc832x_rdb.c1
-rw-r--r--arch/powerpc/platforms/83xx/mpc834x_itx.c1
-rw-r--r--arch/powerpc/platforms/83xx/mpc834x_mds.c2
-rw-r--r--arch/powerpc/platforms/83xx/mpc836x_mds.c1
-rw-r--r--arch/powerpc/platforms/83xx/mpc836x_rdk.c1
-rw-r--r--arch/powerpc/platforms/83xx/mpc837x_mds.c2
-rw-r--r--arch/powerpc/platforms/83xx/suspend.c7
-rw-r--r--arch/powerpc/platforms/83xx/usb.c2
-rw-r--r--arch/powerpc/platforms/85xx/Kconfig9
-rw-r--r--arch/powerpc/platforms/85xx/corenet_generic.c1
-rw-r--r--arch/powerpc/platforms/85xx/ge_imp3a.c2
-rw-r--r--arch/powerpc/platforms/85xx/ksi8560.c1
-rw-r--r--arch/powerpc/platforms/85xx/mpc8536_ds.c1
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_cds.c5
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_ds.c2
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_mds.c1
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_rdb.c1
-rw-r--r--arch/powerpc/platforms/85xx/p1010rdb.c1
-rw-r--r--arch/powerpc/platforms/85xx/p1022_ds.c1
-rw-r--r--arch/powerpc/platforms/85xx/p1022_rdk.c1
-rw-r--r--arch/powerpc/platforms/85xx/p1023_rdb.c2
-rw-r--r--arch/powerpc/platforms/85xx/qemu_e500.c1
-rw-r--r--arch/powerpc/platforms/85xx/smp.c2
-rw-r--r--arch/powerpc/platforms/85xx/socrates.c1
-rw-r--r--arch/powerpc/platforms/85xx/stx_gp3.c1
-rw-r--r--arch/powerpc/platforms/85xx/tqm85xx.c1
-rw-r--r--arch/powerpc/platforms/85xx/xes_mpc85xx.c2
-rw-r--r--arch/powerpc/platforms/86xx/gef_ppc9a.c4
-rw-r--r--arch/powerpc/platforms/86xx/gef_sbc310.c4
-rw-r--r--arch/powerpc/platforms/86xx/gef_sbc610.c4
-rw-r--r--arch/powerpc/platforms/86xx/mpc8610_hpcd.c3
-rw-r--r--arch/powerpc/platforms/86xx/mpc86xx_hpcn.c1
-rw-r--r--arch/powerpc/platforms/86xx/mvme7100.c1
-rw-r--r--arch/powerpc/platforms/8xx/Makefile2
-rw-r--r--arch/powerpc/platforms/8xx/adder875.c4
-rw-r--r--arch/powerpc/platforms/8xx/cpm1-ic.c188
-rw-r--r--arch/powerpc/platforms/8xx/cpm1.c142
-rw-r--r--arch/powerpc/platforms/8xx/ep88xc.c3
-rw-r--r--arch/powerpc/platforms/8xx/m8xx_setup.c31
-rw-r--r--arch/powerpc/platforms/8xx/mpc86xads_setup.c3
-rw-r--r--arch/powerpc/platforms/8xx/mpc885ads_setup.c3
-rw-r--r--arch/powerpc/platforms/8xx/mpc8xx.h1
-rw-r--r--arch/powerpc/platforms/8xx/pic.c20
-rw-r--r--arch/powerpc/platforms/8xx/pic.h2
-rw-r--r--arch/powerpc/platforms/8xx/tqm8xx_setup.c3
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype11
-rw-r--r--arch/powerpc/platforms/amigaone/setup.c1
-rw-r--r--arch/powerpc/platforms/book3s/vas-api.c2
-rw-r--r--arch/powerpc/platforms/cell/axon_msi.c2
-rw-r--r--arch/powerpc/platforms/cell/cbe_powerbutton.c2
-rw-r--r--arch/powerpc/platforms/cell/cbe_regs.c4
-rw-r--r--arch/powerpc/platforms/cell/cbe_thermal.c1
-rw-r--r--arch/powerpc/platforms/cell/interrupt.c3
-rw-r--r--arch/powerpc/platforms/cell/iommu.c4
-rw-r--r--arch/powerpc/platforms/cell/pervasive.c1
-rw-r--r--arch/powerpc/platforms/cell/ras.c2
-rw-r--r--arch/powerpc/platforms/cell/setup.c1
-rw-r--r--arch/powerpc/platforms/cell/smp.c1
-rw-r--r--arch/powerpc/platforms/cell/spider-pci.c3
-rw-r--r--arch/powerpc/platforms/cell/spider-pic.c3
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c1
-rw-r--r--arch/powerpc/platforms/cell/spu_manage.c5
-rw-r--r--arch/powerpc/platforms/cell/spu_priv1_mmio.c1
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c2
-rw-r--r--arch/powerpc/platforms/chrp/nvram.c2
-rw-r--r--arch/powerpc/platforms/chrp/pci.c2
-rw-r--r--arch/powerpc/platforms/chrp/setup.c6
-rw-r--r--arch/powerpc/platforms/chrp/smp.c1
-rw-r--r--arch/powerpc/platforms/chrp/time.c4
-rw-r--r--arch/powerpc/platforms/embedded6xx/gamecube.c1
-rw-r--r--arch/powerpc/platforms/embedded6xx/holly.c3
-rw-r--r--arch/powerpc/platforms/embedded6xx/linkstation.c1
-rw-r--r--arch/powerpc/platforms/embedded6xx/ls_uart.c2
-rw-r--r--arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c2
-rw-r--r--arch/powerpc/platforms/embedded6xx/mvme5100.c2
-rw-r--r--arch/powerpc/platforms/embedded6xx/storcenter.c1
-rw-r--r--arch/powerpc/platforms/embedded6xx/usbgecko_udbg.c3
-rw-r--r--arch/powerpc/platforms/embedded6xx/wii.c2
-rw-r--r--arch/powerpc/platforms/fsl_uli1575.c1
-rw-r--r--arch/powerpc/platforms/maple/pci.c2
-rw-r--r--arch/powerpc/platforms/maple/setup.c2
-rw-r--r--arch/powerpc/platforms/maple/time.c2
-rw-r--r--arch/powerpc/platforms/pasemi/dma_lib.c2
-rw-r--r--arch/powerpc/platforms/pasemi/iommu.c1
-rw-r--r--arch/powerpc/platforms/pasemi/misc.c1
-rw-r--r--arch/powerpc/platforms/pasemi/msi.c2
-rw-r--r--arch/powerpc/platforms/pasemi/pci.c1
-rw-r--r--arch/powerpc/platforms/pasemi/setup.c2
-rw-r--r--arch/powerpc/platforms/powermac/backlight.c1
-rw-r--r--arch/powerpc/platforms/powermac/bootx_init.c3
-rw-r--r--arch/powerpc/platforms/powermac/feature.c1
-rw-r--r--arch/powerpc/platforms/powermac/low_i2c.c4
-rw-r--r--arch/powerpc/platforms/powermac/nvram.c4
-rw-r--r--arch/powerpc/platforms/powermac/pci.c3
-rw-r--r--arch/powerpc/platforms/powermac/pfunc_core.c4
-rw-r--r--arch/powerpc/platforms/powermac/pic.c6
-rw-r--r--arch/powerpc/platforms/powermac/pmac.h2
-rw-r--r--arch/powerpc/platforms/powermac/setup.c5
-rw-r--r--arch/powerpc/platforms/powermac/smp.c4
-rw-r--r--arch/powerpc/platforms/powermac/time.c2
-rw-r--r--arch/powerpc/platforms/powermac/udbg_adb.c2
-rw-r--r--arch/powerpc/platforms/powermac/udbg_scc.c2
-rw-r--r--arch/powerpc/platforms/powernv/Makefile8
-rw-r--r--arch/powerpc/platforms/powernv/eeh-powernv.c29
-rw-r--r--arch/powerpc/platforms/powernv/idle.c4
-rw-r--r--arch/powerpc/platforms/powernv/ocxl.c2
-rw-r--r--arch/powerpc/platforms/powernv/opal-fadump.c102
-rw-r--r--arch/powerpc/platforms/powernv/opal-fadump.h10
-rw-r--r--arch/powerpc/platforms/powernv/opal-flash.c4
-rw-r--r--arch/powerpc/platforms/powernv/opal-imc.c2
-rw-r--r--arch/powerpc/platforms/powernv/opal-lpc.c2
-rw-r--r--arch/powerpc/platforms/powernv/opal-memory-errors.c2
-rw-r--r--arch/powerpc/platforms/powernv/pci-cxl.c1
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda-tce.c5
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c51
-rw-r--r--arch/powerpc/platforms/powernv/pci-sriov.c4
-rw-r--r--arch/powerpc/platforms/powernv/pci.c1
-rw-r--r--arch/powerpc/platforms/powernv/pci.h3
-rw-r--r--arch/powerpc/platforms/powernv/setup.c9
-rw-r--r--arch/powerpc/platforms/powernv/smp.c2
-rw-r--r--arch/powerpc/platforms/powernv/ultravisor.c1
-rw-r--r--arch/powerpc/platforms/powernv/vas-fault.c2
-rw-r--r--arch/powerpc/platforms/powernv/vas-window.c4
-rw-r--r--arch/powerpc/platforms/powernv/vas.h2
-rw-r--r--arch/powerpc/platforms/ps3/Kconfig2
-rw-r--r--arch/powerpc/platforms/ps3/htab.c1
-rw-r--r--arch/powerpc/platforms/ps3/mm.c3
-rw-r--r--arch/powerpc/platforms/ps3/os-area.c2
-rw-r--r--arch/powerpc/platforms/ps3/setup.c2
-rw-r--r--arch/powerpc/platforms/ps3/system-bus.c2
-rw-r--r--arch/powerpc/platforms/pseries/Makefile4
-rw-r--r--arch/powerpc/platforms/pseries/cmm.c4
-rw-r--r--arch/powerpc/platforms/pseries/dlpar.c3
-rw-r--r--arch/powerpc/platforms/pseries/eeh_pseries.c9
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-cpu.c2
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-memory.c1
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c5
-rw-r--r--arch/powerpc/platforms/pseries/kexec.c8
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c1
-rw-r--r--arch/powerpc/platforms/pseries/lparcfg.c1
-rw-r--r--arch/powerpc/platforms/pseries/msi.c1
-rw-r--r--arch/powerpc/platforms/pseries/nvram.c2
-rw-r--r--arch/powerpc/platforms/pseries/papr_scm.c54
-rw-r--r--arch/powerpc/platforms/pseries/pci.c1
-rw-r--r--arch/powerpc/platforms/pseries/pmem.c1
-rw-r--r--arch/powerpc/platforms/pseries/pseries.h1
-rw-r--r--arch/powerpc/platforms/pseries/reconfig.c1
-rw-r--r--arch/powerpc/platforms/pseries/rtas-fadump.c17
-rw-r--r--arch/powerpc/platforms/pseries/setup.c18
-rw-r--r--arch/powerpc/platforms/pseries/smp.c1
-rw-r--r--arch/powerpc/platforms/pseries/vas-sysfs.c18
-rw-r--r--arch/powerpc/platforms/pseries/vas.c2
-rw-r--r--arch/powerpc/platforms/pseries/vio.c1
-rw-r--r--arch/powerpc/sysdev/Makefile1
-rw-r--r--arch/powerpc/sysdev/cpm2_pic.c2
-rw-r--r--arch/powerpc/sysdev/dart_iommu.c8
-rw-r--r--arch/powerpc/sysdev/dcr.c2
-rw-r--r--arch/powerpc/sysdev/fsl_85xx_cache_ctlr.h88
-rw-r--r--arch/powerpc/sysdev/fsl_85xx_cache_sram.c147
-rw-r--r--arch/powerpc/sysdev/fsl_85xx_l2ctlr.c216
-rw-r--r--arch/powerpc/sysdev/fsl_lbc.c5
-rw-r--r--arch/powerpc/sysdev/fsl_msi.c4
-rw-r--r--arch/powerpc/sysdev/fsl_pci.c5
-rw-r--r--arch/powerpc/sysdev/fsl_rio.c2
-rw-r--r--arch/powerpc/sysdev/fsl_soc.c1
-rw-r--r--arch/powerpc/sysdev/ge/ge_pic.c6
-rw-r--r--arch/powerpc/sysdev/grackle.c2
-rw-r--r--arch/powerpc/sysdev/i8259.c2
-rw-r--r--arch/powerpc/sysdev/indirect_pci.c1
-rw-r--r--arch/powerpc/sysdev/ipic.c3
-rw-r--r--arch/powerpc/sysdev/mmio_nvram.c2
-rw-r--r--arch/powerpc/sysdev/mpic.c2
-rw-r--r--arch/powerpc/sysdev/mpic_msgr.c5
-rw-r--r--arch/powerpc/sysdev/mpic_msi.c5
-rw-r--r--arch/powerpc/sysdev/mpic_timer.c2
-rw-r--r--arch/powerpc/sysdev/mpic_u3msi.c4
-rw-r--r--arch/powerpc/sysdev/msi_bitmap.c1
-rw-r--r--arch/powerpc/sysdev/pmi.c3
-rw-r--r--arch/powerpc/sysdev/rtc_cmos_setup.c2
-rw-r--r--arch/powerpc/sysdev/tsi108_dev.c3
-rw-r--r--arch/powerpc/sysdev/tsi108_pci.c3
-rw-r--r--arch/powerpc/sysdev/xics/icp-native.c3
-rw-r--r--arch/powerpc/sysdev/xics/icp-opal.c1
-rw-r--r--arch/powerpc/sysdev/xics/ics-native.c2
-rw-r--r--arch/powerpc/sysdev/xics/ics-opal.c1
-rw-r--r--arch/powerpc/sysdev/xics/ics-rtas.c1
-rw-r--r--arch/powerpc/sysdev/xics/xics-common.c6
-rw-r--r--arch/powerpc/sysdev/xive/common.c6
-rw-r--r--arch/powerpc/sysdev/xive/native.c4
-rw-r--r--arch/powerpc/sysdev/xive/spapr.c9
-rw-r--r--arch/powerpc/xmon/ppc-opc.c2
-rw-r--r--arch/powerpc/xmon/xmon.c19
-rw-r--r--arch/riscv/Kbuild4
-rw-r--r--arch/riscv/Kconfig75
-rw-r--r--arch/riscv/Kconfig.erratas34
-rw-r--r--arch/riscv/Kconfig.socs1
-rw-r--r--arch/riscv/Makefile12
-rw-r--r--arch/riscv/boot/.gitignore1
-rw-r--r--arch/riscv/boot/dts/microchip/Makefile3
-rw-r--r--arch/riscv/boot/dts/microchip/mpfs-icicle-kit-fabric.dtsi (renamed from arch/riscv/boot/dts/microchip/microchip-mpfs-fabric.dtsi)2
-rw-r--r--arch/riscv/boot/dts/microchip/mpfs-icicle-kit.dts (renamed from arch/riscv/boot/dts/microchip/microchip-mpfs-icicle-kit.dts)105
-rw-r--r--arch/riscv/boot/dts/microchip/mpfs-polarberry-fabric.dtsi16
-rw-r--r--arch/riscv/boot/dts/microchip/mpfs-polarberry.dts99
-rw-r--r--arch/riscv/boot/dts/microchip/mpfs.dtsi (renamed from arch/riscv/boot/dts/microchip/microchip-mpfs.dtsi)11
-rw-r--r--arch/riscv/boot/dts/sifive/fu540-c000.dtsi3
-rw-r--r--arch/riscv/errata/Makefile2
-rw-r--r--arch/riscv/errata/alternative.c75
-rw-r--r--arch/riscv/errata/sifive/errata.c20
-rw-r--r--arch/riscv/errata/thead/Makefile11
-rw-r--r--arch/riscv/errata/thead/errata.c82
-rw-r--r--arch/riscv/include/asm/alternative-macros.h133
-rw-r--r--arch/riscv/include/asm/alternative.h25
-rw-r--r--arch/riscv/include/asm/asm.h26
-rw-r--r--arch/riscv/include/asm/atomic.h102
-rw-r--r--arch/riscv/include/asm/cmpxchg.h12
-rw-r--r--arch/riscv/include/asm/compat.h129
-rw-r--r--arch/riscv/include/asm/csr.h7
-rw-r--r--arch/riscv/include/asm/elf.h50
-rw-r--r--arch/riscv/include/asm/errata_list.h59
-rw-r--r--arch/riscv/include/asm/fixmap.h2
-rw-r--r--arch/riscv/include/asm/hwcap.h1
-rw-r--r--arch/riscv/include/asm/irq_work.h2
-rw-r--r--arch/riscv/include/asm/kexec.h11
-rw-r--r--arch/riscv/include/asm/mmu.h1
-rw-r--r--arch/riscv/include/asm/pgtable-32.h17
-rw-r--r--arch/riscv/include/asm/pgtable-64.h79
-rw-r--r--arch/riscv/include/asm/pgtable-bits.h10
-rw-r--r--arch/riscv/include/asm/pgtable.h68
-rw-r--r--arch/riscv/include/asm/processor.h6
-rw-r--r--arch/riscv/include/asm/signal32.h18
-rw-r--r--arch/riscv/include/asm/syscall.h1
-rw-r--r--arch/riscv/include/asm/thread_info.h1
-rw-r--r--arch/riscv/include/asm/unistd.h12
-rw-r--r--arch/riscv/include/asm/vdso.h9
-rw-r--r--arch/riscv/include/asm/vendorid_list.h1
-rw-r--r--arch/riscv/include/asm/xip_fixup.h31
-rw-r--r--arch/riscv/include/uapi/asm/unistd.h3
-rw-r--r--arch/riscv/kernel/Makefile19
-rw-r--r--arch/riscv/kernel/alternative.c118
-rw-r--r--arch/riscv/kernel/compat_signal.c243
-rw-r--r--arch/riscv/kernel/compat_syscall_table.c19
-rw-r--r--arch/riscv/kernel/compat_vdso/.gitignore2
-rw-r--r--arch/riscv/kernel/compat_vdso/Makefile78
-rw-r--r--arch/riscv/kernel/compat_vdso/compat_vdso.S8
-rw-r--r--arch/riscv/kernel/compat_vdso/compat_vdso.lds.S3
-rw-r--r--arch/riscv/kernel/compat_vdso/flush_icache.S3
-rwxr-xr-xarch/riscv/kernel/compat_vdso/gen_compat_vdso_offsets.sh5
-rw-r--r--arch/riscv/kernel/compat_vdso/getcpu.S3
-rw-r--r--arch/riscv/kernel/compat_vdso/note.S3
-rw-r--r--arch/riscv/kernel/compat_vdso/rt_sigreturn.S3
-rw-r--r--arch/riscv/kernel/cpu.c5
-rw-r--r--arch/riscv/kernel/cpufeature.c87
-rw-r--r--arch/riscv/kernel/crash_dump.c26
-rw-r--r--arch/riscv/kernel/efi.c2
-rw-r--r--arch/riscv/kernel/elf_kexec.c448
-rw-r--r--arch/riscv/kernel/entry.S18
-rw-r--r--arch/riscv/kernel/ftrace.c6
-rw-r--r--arch/riscv/kernel/head.S2
-rw-r--r--arch/riscv/kernel/machine_kexec.c4
-rw-r--r--arch/riscv/kernel/machine_kexec_file.c14
-rw-r--r--arch/riscv/kernel/module.c29
-rw-r--r--arch/riscv/kernel/process.c49
-rw-r--r--arch/riscv/kernel/ptrace.c87
-rw-r--r--arch/riscv/kernel/reset.c12
-rw-r--r--arch/riscv/kernel/setup.c6
-rw-r--r--arch/riscv/kernel/signal.c7
-rw-r--r--arch/riscv/kernel/smpboot.c4
-rw-r--r--arch/riscv/kernel/suspend_entry.S1
-rw-r--r--arch/riscv/kernel/sys_riscv.c6
-rw-r--r--arch/riscv/kernel/traps.c2
-rw-r--r--arch/riscv/kernel/vdso.c114
-rw-r--r--arch/riscv/kernel/vdso/vdso.S6
-rw-r--r--arch/riscv/mm/fault.c10
-rw-r--r--arch/riscv/mm/init.c81
-rw-r--r--arch/riscv/purgatory/.gitignore4
-rw-r--r--arch/riscv/purgatory/Makefile95
-rw-r--r--arch/riscv/purgatory/entry.S47
-rw-r--r--arch/riscv/purgatory/purgatory.c45
-rw-r--r--arch/s390/Kconfig11
-rw-r--r--arch/s390/Kconfig.debug12
-rw-r--r--arch/s390/crypto/aes_s390.c4
-rw-r--r--arch/s390/crypto/chacha-glue.c34
-rw-r--r--arch/s390/include/asm/asm-extable.h91
-rw-r--r--arch/s390/include/asm/compat.h99
-rw-r--r--arch/s390/include/asm/kexec.h12
-rw-r--r--arch/s390/include/asm/livepatch.h22
-rw-r--r--arch/s390/include/asm/processor.h6
-rw-r--r--arch/s390/include/asm/stacktrace.h11
-rw-r--r--arch/s390/include/asm/uaccess.h217
-rw-r--r--arch/s390/include/asm/unistd.h1
-rw-r--r--arch/s390/kernel/Makefile2
-rw-r--r--arch/s390/kernel/asm-offsets.c26
-rw-r--r--arch/s390/kernel/crash_dump.c13
-rw-r--r--arch/s390/kernel/early.c5
-rw-r--r--arch/s390/kernel/earlypgm.S (renamed from arch/s390/kernel/base.S)33
-rw-r--r--arch/s390/kernel/entry.S23
-rw-r--r--arch/s390/kernel/entry.h2
-rw-r--r--arch/s390/kernel/ftrace.c3
-rw-r--r--arch/s390/kernel/perf_event.c2
-rw-r--r--arch/s390/kernel/process.c12
-rw-r--r--arch/s390/kvm/kvm-s390.c10
-rw-r--r--arch/s390/mm/extable.c39
-rw-r--r--arch/s390/mm/gmap.c14
-rw-r--r--arch/s390/mm/pgtable.c2
-rw-r--r--arch/sh/kernel/crash_dump.c29
-rw-r--r--arch/sh/kernel/process_32.c12
-rw-r--r--arch/sh/kernel/reboot.c3
-rw-r--r--arch/sparc/Kconfig5
-rw-r--r--arch/sparc/include/asm/compat.h61
-rw-r--r--arch/sparc/include/asm/unistd.h1
-rw-r--r--arch/sparc/include/uapi/asm/stat.h4
-rw-r--r--arch/sparc/include/uapi/asm/termbits.h223
-rw-r--r--arch/sparc/kernel/process_32.c12
-rw-r--r--arch/sparc/kernel/process_64.c12
-rw-r--r--arch/um/Kconfig1
-rw-r--r--arch/um/drivers/Kconfig15
-rw-r--r--arch/um/drivers/Makefile2
-rw-r--r--arch/um/drivers/chan_kern.c10
-rw-r--r--arch/um/drivers/chan_user.c9
-rw-r--r--arch/um/drivers/daemon_kern.c2
-rw-r--r--arch/um/drivers/line.c22
-rw-r--r--arch/um/drivers/line.h4
-rw-r--r--arch/um/drivers/ssl.c2
-rw-r--r--arch/um/drivers/stdio_console.c2
-rw-r--r--arch/um/drivers/virtio_uml.c33
-rw-r--r--arch/um/drivers/xterm.c7
-rw-r--r--arch/um/include/asm/Kbuild1
-rw-r--r--arch/um/include/asm/irq.h22
-rw-r--r--arch/um/include/asm/thread_info.h2
-rw-r--r--arch/um/kernel/exec.c2
-rw-r--r--arch/um/kernel/process.c17
-rw-r--r--arch/um/kernel/ptrace.c8
-rw-r--r--arch/um/kernel/signal.c4
-rw-r--r--arch/x86/Kconfig5
-rw-r--r--arch/x86/boot/header.S2
-rw-r--r--arch/x86/crypto/blowfish_glue.c8
-rw-r--r--arch/x86/crypto/camellia_glue.c8
-rw-r--r--arch/x86/crypto/serpent_avx2_glue.c8
-rw-r--r--arch/x86/crypto/twofish_glue.c8
-rw-r--r--arch/x86/crypto/twofish_glue_3way.c8
-rw-r--r--arch/x86/events/Kconfig12
-rw-r--r--arch/x86/events/intel/core.c2
-rw-r--r--arch/x86/include/asm/compat.h104
-rw-r--r--arch/x86/include/asm/cpufeature.h2
-rw-r--r--arch/x86/include/asm/e820/api.h5
-rw-r--r--arch/x86/include/asm/efi.h9
-rw-r--r--arch/x86/include/asm/extable.h8
-rw-r--r--arch/x86/include/asm/fpu/sched.h2
-rw-r--r--arch/x86/include/asm/ftrace.h7
-rw-r--r--arch/x86/include/asm/kexec.h8
-rw-r--r--arch/x86/include/asm/livepatch.h20
-rw-r--r--arch/x86/include/asm/pci_x86.h2
-rw-r--r--arch/x86/include/asm/processor.h4
-rw-r--r--arch/x86/include/asm/set_memory.h52
-rw-r--r--arch/x86/include/asm/switch_to.h8
-rw-r--r--arch/x86/include/asm/unistd.h1
-rw-r--r--arch/x86/include/asm/xen/page.h3
-rw-r--r--arch/x86/kernel/cpu/mce/core.c6
-rw-r--r--arch/x86/kernel/cpu/mshyperv.c2
-rw-r--r--arch/x86/kernel/crash_dump_32.c29
-rw-r--r--arch/x86/kernel/crash_dump_64.c48
-rw-r--r--arch/x86/kernel/fpu/core.c4
-rw-r--r--arch/x86/kernel/ftrace.c6
-rw-r--r--arch/x86/kernel/process.c18
-rw-r--r--arch/x86/kernel/reboot.c4
-rw-r--r--arch/x86/kernel/resource.c25
-rw-r--r--arch/x86/kernel/step.c3
-rw-r--r--arch/x86/kernel/tracepoint.c6
-rw-r--r--arch/x86/kvm/hyperv.c12
-rw-r--r--arch/x86/lib/csum-wrappers_64.c2
-rw-r--r--arch/x86/mm/pat/set_memory.c49
-rw-r--r--arch/x86/pci/acpi.c93
-rw-r--r--arch/x86/pci/common.c8
-rw-r--r--arch/x86/um/ldt.c6
-rw-r--r--arch/x86/xen/enlighten_pv.c4
-rw-r--r--arch/xtensa/kernel/process.c17
-rw-r--r--arch/xtensa/kernel/ptrace.c4
-rw-r--r--arch/xtensa/kernel/signal.c4
-rw-r--r--block/bio.c9
-rw-r--r--block/blk-cgroup.c8
-rw-r--r--block/blk-core.c2
-rw-r--r--block/blk-ia-ranges.c7
-rw-r--r--block/blk-iolatency.c122
-rw-r--r--block/blk-mq-tag.c1
-rw-r--r--block/blk-mq.c123
-rw-r--r--block/genhd.c2
-rw-r--r--crypto/Kconfig18
-rw-r--r--crypto/Makefile6
-rw-r--r--crypto/cryptd.c23
-rw-r--r--crypto/crypto_engine.c1
-rw-r--r--crypto/ecrdsa.c8
-rw-r--r--crypto/sm3.c (renamed from lib/crypto/sm3.c)0
-rw-r--r--crypto/sm4.c (renamed from lib/crypto/sm4.c)10
-rw-r--r--crypto/testmgr.c75
-rw-r--r--drivers/Kconfig6
-rw-r--r--drivers/Makefile5
-rw-r--r--drivers/accessibility/speakup/fakekey.c4
-rw-r--r--drivers/accessibility/speakup/serialio.c2
-rw-r--r--drivers/accessibility/speakup/speakup_acntpc.c2
-rw-r--r--drivers/accessibility/speakup/speakup_acntsa.c2
-rw-r--r--drivers/accessibility/speakup/speakup_apollo.c2
-rw-r--r--drivers/accessibility/speakup/speakup_audptr.c2
-rw-r--r--drivers/accessibility/speakup/speakup_bns.c2
-rw-r--r--drivers/accessibility/speakup/speakup_decext.c2
-rw-r--r--drivers/accessibility/speakup/speakup_dectlk.c2
-rw-r--r--drivers/accessibility/speakup/speakup_dtlk.c2
-rw-r--r--drivers/accessibility/speakup/speakup_dummy.c2
-rw-r--r--drivers/accessibility/speakup/speakup_keypc.c2
-rw-r--r--drivers/accessibility/speakup/speakup_ltlk.c2
-rw-r--r--drivers/accessibility/speakup/speakup_soft.c3
-rw-r--r--drivers/accessibility/speakup/speakup_spkout.c2
-rw-r--r--drivers/accessibility/speakup/speakup_txprt.c2
-rw-r--r--drivers/acpi/ac.c4
-rw-r--r--drivers/acpi/acpi_video.c31
-rw-r--r--drivers/acpi/battery.c24
-rw-r--r--drivers/acpi/bus.c2
-rw-r--r--drivers/acpi/cppc_acpi.c2
-rw-r--r--drivers/acpi/dptf/dptf_pch_fivr.c1
-rw-r--r--drivers/acpi/dptf/dptf_power.c2
-rw-r--r--drivers/acpi/dptf/int340x_thermal.c6
-rw-r--r--drivers/acpi/fan.h1
-rw-r--r--drivers/acpi/glue.c16
-rw-r--r--drivers/acpi/nfit/core.c30
-rw-r--r--drivers/acpi/nfit/mce.c4
-rw-r--r--drivers/acpi/nfit/nfit.h24
-rw-r--r--drivers/acpi/osl.c86
-rw-r--r--drivers/acpi/pci_root.c238
-rw-r--r--drivers/acpi/processor_idle.c10
-rw-r--r--drivers/acpi/sleep.c16
-rw-r--r--drivers/amba/bus.c65
-rw-r--r--drivers/android/binder.c199
-rw-r--r--drivers/android/binder_alloc.c22
-rw-r--r--drivers/android/binder_internal.h5
-rw-r--r--drivers/android/binderfs.c8
-rw-r--r--drivers/ata/pata_palmld.c3
-rw-r--r--drivers/base/Makefile1
-rw-r--r--drivers/base/arch_topology.c5
-rw-r--r--drivers/base/base.h1
-rw-r--r--drivers/base/bus.c4
-rw-r--r--drivers/base/core.c18
-rw-r--r--drivers/base/dd.c49
-rw-r--r--drivers/base/driver.c70
-rw-r--r--drivers/base/firmware_loader/Kconfig43
-rw-r--r--drivers/base/firmware_loader/Makefile2
-rw-r--r--drivers/base/firmware_loader/fallback.c430
-rw-r--r--drivers/base/firmware_loader/fallback.h46
-rw-r--r--drivers/base/firmware_loader/firmware.h16
-rw-r--r--drivers/base/firmware_loader/main.c94
-rw-r--r--drivers/base/firmware_loader/sysfs.c422
-rw-r--r--drivers/base/firmware_loader/sysfs.h117
-rw-r--r--drivers/base/firmware_loader/sysfs_upload.c397
-rw-r--r--drivers/base/firmware_loader/sysfs_upload.h41
-rw-r--r--drivers/base/physical_location.c143
-rw-r--r--drivers/base/physical_location.h16
-rw-r--r--drivers/base/platform.c55
-rw-r--r--drivers/base/property.c96
-rw-r--r--drivers/block/loop.c8
-rw-r--r--drivers/block/nbd.c114
-rw-r--r--drivers/block/null_blk/main.c6
-rw-r--r--drivers/block/null_blk/null_blk.h7
-rw-r--r--drivers/block/null_blk/zoned.c6
-rw-r--r--drivers/block/rbd.c13
-rw-r--r--drivers/block/sx8.c4
-rw-r--r--drivers/block/virtio_blk.c220
-rw-r--r--drivers/block/xen-blkfront.c6
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-bus.c49
-rw-r--r--drivers/bus/mhi/Kconfig1
-rw-r--r--drivers/bus/mhi/Makefile3
-rw-r--r--drivers/bus/mhi/common.h22
-rw-r--r--drivers/bus/mhi/ep/Kconfig10
-rw-r--r--drivers/bus/mhi/ep/Makefile2
-rw-r--r--drivers/bus/mhi/ep/internal.h218
-rw-r--r--drivers/bus/mhi/ep/main.c1591
-rw-r--r--drivers/bus/mhi/ep/mmio.c273
-rw-r--r--drivers/bus/mhi/ep/ring.c207
-rw-r--r--drivers/bus/mhi/ep/sm.c148
-rw-r--r--drivers/bus/mhi/host/boot.c22
-rw-r--r--drivers/bus/mhi/host/init.c89
-rw-r--r--drivers/bus/mhi/host/internal.h7
-rw-r--r--drivers/bus/mhi/host/main.c18
-rw-r--r--drivers/bus/mhi/host/pci_generic.c133
-rw-r--r--drivers/bus/mhi/host/pm.c24
-rw-r--r--drivers/bus/ti-sysc.c4
-rw-r--r--drivers/char/Kconfig3
-rw-r--r--drivers/char/hw_random/Kconfig15
-rw-r--r--drivers/char/hw_random/Makefile1
-rw-r--r--drivers/char/hw_random/cn10k-rng.c31
-rw-r--r--drivers/char/hw_random/mpfs-rng.c104
-rw-r--r--drivers/char/hw_random/omap3-rom-rng.c2
-rw-r--r--drivers/char/hw_random/optee-rng.c2
-rw-r--r--drivers/char/mem.c2
-rw-r--r--drivers/char/misc.c24
-rw-r--r--drivers/char/pcmcia/synclink_cs.c10
-rw-r--r--drivers/char/ttyprintk.c16
-rw-r--r--drivers/char/xillybus/xillybus_class.c26
-rw-r--r--drivers/char/xillybus/xillyusb.c1
-rw-r--r--drivers/clk/Kconfig14
-rw-r--r--drivers/clk/Makefile2
-rw-r--r--drivers/clk/actions/owl-pll.c2
-rw-r--r--drivers/clk/bcm/clk-raspberrypi.c2
-rw-r--r--drivers/clk/clk-cdce706.c5
-rw-r--r--drivers/clk/clk-cdce925.c24
-rw-r--r--drivers/clk/clk-cs2000-cp.c5
-rw-r--r--drivers/clk/clk-en7523.c351
-rw-r--r--drivers/clk/clk-fixed-rate.c2
-rw-r--r--drivers/clk/clk-max9485.c5
-rw-r--r--drivers/clk/clk-mux.c4
-rw-r--r--drivers/clk/clk-renesas-pcie.c4
-rw-r--r--drivers/clk/clk-si514.c5
-rw-r--r--drivers/clk/clk-si5341.c5
-rw-r--r--drivers/clk/clk-si5351.c24
-rw-r--r--drivers/clk/clk-si544.c22
-rw-r--r--drivers/clk/clk-si570.c24
-rw-r--r--drivers/clk/clk.c9
-rw-r--r--drivers/clk/imx/clk-composite-8m.c19
-rw-r--r--drivers/clk/imx/clk-imx7d.c1
-rw-r--r--drivers/clk/imx/clk-imx8mm.c3
-rw-r--r--drivers/clk/imx/clk-imx8mn.c41
-rw-r--r--drivers/clk/imx/clk-imx8mp.c24
-rw-r--r--drivers/clk/imx/clk-imx8mq.c5
-rw-r--r--drivers/clk/imx/clk-scu.c13
-rw-r--r--drivers/clk/imx/clk.c5
-rw-r--r--drivers/clk/imx/clk.h1
-rw-r--r--drivers/clk/ingenic/cgu.c2
-rw-r--r--drivers/clk/ingenic/cgu.h3
-rw-r--r--drivers/clk/ingenic/jz4725b-cgu.c10
-rw-r--r--drivers/clk/ingenic/jz4740-cgu.c10
-rw-r--r--drivers/clk/ingenic/jz4760-cgu.c10
-rw-r--r--drivers/clk/ingenic/jz4770-cgu.c5
-rw-r--r--drivers/clk/ingenic/jz4780-cgu.c15
-rw-r--r--drivers/clk/ingenic/tcu.c35
-rw-r--r--drivers/clk/ingenic/x1000-cgu.c15
-rw-r--r--drivers/clk/ingenic/x1830-cgu.c11
-rw-r--r--drivers/clk/keystone/syscon-clk.c11
-rw-r--r--drivers/clk/mediatek/Kconfig8
-rw-r--r--drivers/clk/mediatek/Makefile5
-rw-r--r--drivers/clk/mediatek/clk-apmixed.c12
-rw-r--r--drivers/clk/mediatek/clk-cpumux.c50
-rw-r--r--drivers/clk/mediatek/clk-cpumux.h6
-rw-r--r--drivers/clk/mediatek/clk-gate.c52
-rw-r--r--drivers/clk/mediatek/clk-gate.h8
-rw-r--r--drivers/clk/mediatek/clk-mt2701-aud.c4
-rw-r--r--drivers/clk/mediatek/clk-mt2701-bdp.c4
-rw-r--r--drivers/clk/mediatek/clk-mt2701-eth.c4
-rw-r--r--drivers/clk/mediatek/clk-mt2701-g3d.c4
-rw-r--r--drivers/clk/mediatek/clk-mt2701-hif.c4
-rw-r--r--drivers/clk/mediatek/clk-mt2701-img.c4
-rw-r--r--drivers/clk/mediatek/clk-mt2701-mm.c4
-rw-r--r--drivers/clk/mediatek/clk-mt2701-vdec.c4
-rw-r--r--drivers/clk/mediatek/clk-mt2701.c34
-rw-r--r--drivers/clk/mediatek/clk-mt2712-bdp.c4
-rw-r--r--drivers/clk/mediatek/clk-mt2712-img.c4
-rw-r--r--drivers/clk/mediatek/clk-mt2712-jpgdec.c4
-rw-r--r--drivers/clk/mediatek/clk-mt2712-mfg.c4
-rw-r--r--drivers/clk/mediatek/clk-mt2712-mm.c4
-rw-r--r--drivers/clk/mediatek/clk-mt2712-vdec.c4
-rw-r--r--drivers/clk/mediatek/clk-mt2712-venc.c4
-rw-r--r--drivers/clk/mediatek/clk-mt2712.c58
-rw-r--r--drivers/clk/mediatek/clk-mt6765-audio.c4
-rw-r--r--drivers/clk/mediatek/clk-mt6765-cam.c4
-rw-r--r--drivers/clk/mediatek/clk-mt6765-img.c4
-rw-r--r--drivers/clk/mediatek/clk-mt6765-mipi0a.c4
-rw-r--r--drivers/clk/mediatek/clk-mt6765-mm.c4
-rw-r--r--drivers/clk/mediatek/clk-mt6765-vcodec.c4
-rw-r--r--drivers/clk/mediatek/clk-mt6765.c32
-rw-r--r--drivers/clk/mediatek/clk-mt6779-aud.c4
-rw-r--r--drivers/clk/mediatek/clk-mt6779-cam.c4
-rw-r--r--drivers/clk/mediatek/clk-mt6779-img.c4
-rw-r--r--drivers/clk/mediatek/clk-mt6779-ipe.c4
-rw-r--r--drivers/clk/mediatek/clk-mt6779-mfg.c4
-rw-r--r--drivers/clk/mediatek/clk-mt6779-mm.c4
-rw-r--r--drivers/clk/mediatek/clk-mt6779-vdec.c4
-rw-r--r--drivers/clk/mediatek/clk-mt6779-venc.c4
-rw-r--r--drivers/clk/mediatek/clk-mt6779.c36
-rw-r--r--drivers/clk/mediatek/clk-mt6797-img.c4
-rw-r--r--drivers/clk/mediatek/clk-mt6797-mm.c4
-rw-r--r--drivers/clk/mediatek/clk-mt6797-vdec.c4
-rw-r--r--drivers/clk/mediatek/clk-mt6797-venc.c4
-rw-r--r--drivers/clk/mediatek/clk-mt6797.c42
-rw-r--r--drivers/clk/mediatek/clk-mt7622-aud.c4
-rw-r--r--drivers/clk/mediatek/clk-mt7622-eth.c8
-rw-r--r--drivers/clk/mediatek/clk-mt7622-hif.c8
-rw-r--r--drivers/clk/mediatek/clk-mt7622.c48
-rw-r--r--drivers/clk/mediatek/clk-mt7629-eth.c8
-rw-r--r--drivers/clk/mediatek/clk-mt7629-hif.c8
-rw-r--r--drivers/clk/mediatek/clk-mt7629.c42
-rw-r--r--drivers/clk/mediatek/clk-mt7986-apmixed.c22
-rw-r--r--drivers/clk/mediatek/clk-mt7986-eth.c14
-rw-r--r--drivers/clk/mediatek/clk-mt7986-infracfg.c4
-rw-r--r--drivers/clk/mediatek/clk-mt7986-topckgen.c16
-rw-r--r--drivers/clk/mediatek/clk-mt8135.c38
-rw-r--r--drivers/clk/mediatek/clk-mt8167-aud.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8167-img.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8167-mfgcfg.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8167-mm.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8167-vdec.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8167.c28
-rw-r--r--drivers/clk/mediatek/clk-mt8173-mm.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8173.c97
-rw-r--r--drivers/clk/mediatek/clk-mt8183-audio.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8183-cam.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8183-img.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8183-ipu0.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8183-ipu1.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8183-ipu_adl.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8183-ipu_conn.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8183-mfgcfg.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8183-mm.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8183-vdec.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8183-venc.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8183.c47
-rw-r--r--drivers/clk/mediatek/clk-mt8186-apmixedsys.c133
-rw-r--r--drivers/clk/mediatek/clk-mt8186-cam.c90
-rw-r--r--drivers/clk/mediatek/clk-mt8186-img.c68
-rw-r--r--drivers/clk/mediatek/clk-mt8186-imp_iic_wrap.c67
-rw-r--r--drivers/clk/mediatek/clk-mt8186-infra_ao.c216
-rw-r--r--drivers/clk/mediatek/clk-mt8186-ipe.c55
-rw-r--r--drivers/clk/mediatek/clk-mt8186-mcu.c108
-rw-r--r--drivers/clk/mediatek/clk-mt8186-mdp.c80
-rw-r--r--drivers/clk/mediatek/clk-mt8186-mfg.c48
-rw-r--r--drivers/clk/mediatek/clk-mt8186-mm.c111
-rw-r--r--drivers/clk/mediatek/clk-mt8186-topckgen.c780
-rw-r--r--drivers/clk/mediatek/clk-mt8186-vdec.c88
-rw-r--r--drivers/clk/mediatek/clk-mt8186-venc.c51
-rw-r--r--drivers/clk/mediatek/clk-mt8186-wpe.c51
-rw-r--r--drivers/clk/mediatek/clk-mt8192-aud.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8192-mm.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8192.c21
-rw-r--r--drivers/clk/mediatek/clk-mt8195-apmixedsys.c6
-rw-r--r--drivers/clk/mediatek/clk-mt8195-apusys_pll.c6
-rw-r--r--drivers/clk/mediatek/clk-mt8195-topckgen.c6
-rw-r--r--drivers/clk/mediatek/clk-mt8195-vdo0.c6
-rw-r--r--drivers/clk/mediatek/clk-mt8195-vdo1.c6
-rw-r--r--drivers/clk/mediatek/clk-mt8516-aud.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8516.c24
-rw-r--r--drivers/clk/mediatek/clk-mtk.c173
-rw-r--r--drivers/clk/mediatek/clk-mtk.h25
-rw-r--r--drivers/clk/mediatek/clk-mux.c50
-rw-r--r--drivers/clk/mediatek/clk-mux.h6
-rw-r--r--drivers/clk/mediatek/clk-pll.c64
-rw-r--r--drivers/clk/mediatek/clk-pll.h6
-rw-r--r--drivers/clk/pxa/clk-pxa.c8
-rw-r--r--drivers/clk/pxa/clk-pxa.h9
-rw-r--r--drivers/clk/pxa/clk-pxa25x.c46
-rw-r--r--drivers/clk/pxa/clk-pxa27x.c68
-rw-r--r--drivers/clk/pxa/clk-pxa2xx.h58
-rw-r--r--drivers/clk/pxa/clk-pxa3xx.c139
-rw-r--r--drivers/clk/qcom/Kconfig19
-rw-r--r--drivers/clk/qcom/Makefile2
-rw-r--r--drivers/clk/qcom/clk-rcg.h2
-rw-r--r--drivers/clk/qcom/clk-rcg2.c126
-rw-r--r--drivers/clk/qcom/clk-smd-rpm.c8
-rw-r--r--drivers/clk/qcom/gcc-msm8976.c7
-rw-r--r--drivers/clk/qcom/gcc-msm8998.c56
-rw-r--r--drivers/clk/qcom/gcc-sc8280xp.c7488
-rw-r--r--drivers/clk/qcom/lpassaudiocc-sc7280.c838
-rw-r--r--drivers/clk/qcom/lpasscorecc-sc7280.c431
-rw-r--r--drivers/clk/renesas/Kconfig17
-rw-r--r--drivers/clk/renesas/Makefile3
-rw-r--r--drivers/clk/renesas/r8a774a1-cpg-mssr.c9
-rw-r--r--drivers/clk/renesas/r8a774b1-cpg-mssr.c9
-rw-r--r--drivers/clk/renesas/r8a774c0-cpg-mssr.c8
-rw-r--r--drivers/clk/renesas/r8a774e1-cpg-mssr.c9
-rw-r--r--drivers/clk/renesas/r8a7795-cpg-mssr.c9
-rw-r--r--drivers/clk/renesas/r8a7796-cpg-mssr.c9
-rw-r--r--drivers/clk/renesas/r8a77965-cpg-mssr.c9
-rw-r--r--drivers/clk/renesas/r8a77980-cpg-mssr.c10
-rw-r--r--drivers/clk/renesas/r8a77990-cpg-mssr.c7
-rw-r--r--drivers/clk/renesas/r8a77995-cpg-mssr.c11
-rw-r--r--drivers/clk/renesas/r8a779a0-cpg-mssr.c23
-rw-r--r--drivers/clk/renesas/r8a779f0-cpg-mssr.c30
-rw-r--r--drivers/clk/renesas/r8a779g0-cpg-mssr.c218
-rw-r--r--drivers/clk/renesas/r9a06g032-clocks.c42
-rw-r--r--drivers/clk/renesas/r9a07g043-cpg.c320
-rw-r--r--drivers/clk/renesas/r9a07g044-cpg.c96
-rw-r--r--drivers/clk/renesas/r9a09g011-cpg.c172
-rw-r--r--drivers/clk/renesas/rcar-gen3-cpg.h5
-rw-r--r--drivers/clk/renesas/rcar-gen4-cpg.c5
-rw-r--r--drivers/clk/renesas/rcar-gen4-cpg.h3
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.c6
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.h1
-rw-r--r--drivers/clk/renesas/rzg2l-cpg.c464
-rw-r--r--drivers/clk/renesas/rzg2l-cpg.h87
-rw-r--r--drivers/clk/rockchip/clk-rk3568.c1
-rw-r--r--drivers/clk/samsung/Makefile1
-rw-r--r--drivers/clk/samsung/clk-exynosautov9.c1733
-rw-r--r--drivers/clk/stm32/Makefile1
-rw-r--r--drivers/clk/stm32/clk-stm32-core.c695
-rw-r--r--drivers/clk/stm32/clk-stm32-core.h188
-rw-r--r--drivers/clk/stm32/clk-stm32mp13.c1620
-rw-r--r--drivers/clk/stm32/reset-stm32.c122
-rw-r--r--drivers/clk/stm32/reset-stm32.h8
-rw-r--r--drivers/clk/stm32/stm32mp13_rcc.h1748
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c5
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h6-r.h2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h616.c8
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h616.h2
-rw-r--r--drivers/clk/tegra/clk-bpmp.c87
-rw-r--r--drivers/clk/tegra/clk-dfll.c20
-rw-r--r--drivers/clk/ti/clkctrl.c13
-rw-r--r--drivers/clk/ti/composite.c2
-rw-r--r--drivers/clk/ux500/clk-prcmu.c252
-rw-r--r--drivers/clk/ux500/clk.h70
-rw-r--r--drivers/clk/ux500/reset-prcc.c2
-rw-r--r--drivers/clk/ux500/u8500_of_clk.c350
-rw-r--r--drivers/clocksource/Kconfig8
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/timer-gxp.c209
-rw-r--r--drivers/comedi/drivers.c2
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c20
-rw-r--r--drivers/cpufreq/mediatek-cpufreq.c636
-rw-r--r--drivers/cpufreq/pxa2xx-cpufreq.c6
-rw-r--r--drivers/cpufreq/pxa3xx-cpufreq.c65
-rw-r--r--drivers/cpufreq/tegra194-cpufreq.c246
-rw-r--r--drivers/crypto/Kconfig4
-rw-r--r--drivers/crypto/Makefile1
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c22
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h1
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c102
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c54
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c130
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c6
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h19
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c180
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c92
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c385
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c6
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h33
-rw-r--r--drivers/crypto/atmel-ecc.c2
-rw-r--r--drivers/crypto/atmel-i2c.c30
-rw-r--r--drivers/crypto/atmel-i2c.h1
-rw-r--r--drivers/crypto/atmel-sha204a.c11
-rw-r--r--drivers/crypto/caam/Kconfig8
-rw-r--r--drivers/crypto/caam/Makefile1
-rw-r--r--drivers/crypto/caam/caamprng.c235
-rw-r--r--drivers/crypto/caam/ctrl.c18
-rw-r--r--drivers/crypto/caam/intern.h15
-rw-r--r--drivers/crypto/caam/jr.c3
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_main.c10
-rw-r--r--drivers/crypto/ccp/psp-dev.c49
-rw-r--r--drivers/crypto/ccp/psp-dev.h22
-rw-r--r--drivers/crypto/ccp/sev-dev.c32
-rw-r--r--drivers/crypto/ccp/sp-pci.c62
-rw-r--r--drivers/crypto/ccree/cc_buffer_mgr.c27
-rw-r--r--drivers/crypto/ccree/cc_driver.c24
-rw-r--r--drivers/crypto/hisilicon/Kconfig1
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_main.c222
-rw-r--r--drivers/crypto/hisilicon/qm.c282
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.c2
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_main.c108
-rw-r--r--drivers/crypto/hisilicon/sgl.c6
-rw-r--r--drivers/crypto/hisilicon/zip/zip_crypto.c2
-rw-r--r--drivers/crypto/hisilicon/zip/zip_main.c185
-rw-r--r--drivers/crypto/inside-secure/safexcel.c9
-rw-r--r--drivers/crypto/keembay/keembay-ocs-aes-core.c9
-rw-r--r--drivers/crypto/marvell/cesa/cipher.c1
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c7
-rw-r--r--drivers/crypto/nx/nx-common-powernv.c2
-rw-r--r--drivers/crypto/qat/qat_4xxx/adf_drv.c8
-rw-r--r--drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c15
-rw-r--r--drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h4
-rw-r--r--drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c15
-rw-r--r--drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h4
-rw-r--r--drivers/crypto/qat/qat_common/Makefile1
-rw-r--r--drivers/crypto/qat/qat_common/adf_accel_devices.h6
-rw-r--r--drivers/crypto/qat/qat_common/adf_common_drv.h18
-rw-r--r--drivers/crypto/qat/qat_common/adf_gen2_hw_data.c13
-rw-r--r--drivers/crypto/qat/qat_common/adf_gen2_hw_data.h6
-rw-r--r--drivers/crypto/qat/qat_common/adf_gen2_pfvf.c78
-rw-r--r--drivers/crypto/qat/qat_common/adf_gen4_pfvf.c61
-rw-r--r--drivers/crypto/qat/qat_common/adf_isr.c21
-rw-r--r--drivers/crypto/qat/qat_common/adf_pfvf_msg.h4
-rw-r--r--drivers/crypto/qat/qat_common/adf_pfvf_pf_proto.c6
-rw-r--r--drivers/crypto/qat/qat_common/adf_sriov.c16
-rw-r--r--drivers/crypto/qat/qat_common/adf_transport.c11
-rw-r--r--drivers/crypto/qat/qat_common/adf_transport.h1
-rw-r--r--drivers/crypto/qat/qat_common/adf_transport_internal.h1
-rw-r--r--drivers/crypto/qat/qat_common/adf_vf_isr.c1
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c153
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs_send.c86
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs_send.h11
-rw-r--r--drivers/crypto/qat/qat_common/qat_asym_algs.c307
-rw-r--r--drivers/crypto/qat/qat_common/qat_crypto.c10
-rw-r--r--drivers/crypto/qat/qat_common/qat_crypto.h44
-rw-r--r--drivers/crypto/qat/qat_common/qat_hal.c1
-rw-r--r--drivers/crypto/qat/qat_common/qat_uclo.c3
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c126
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h4
-rw-r--r--drivers/crypto/sa2ul.c1
-rw-r--r--drivers/crypto/talitos.c10
-rw-r--r--drivers/crypto/ux500/hash/hash_core.c4
-rw-r--r--drivers/crypto/virtio/virtio_crypto_akcipher_algs.c95
-rw-r--r--drivers/crypto/virtio/virtio_crypto_common.h21
-rw-r--r--drivers/crypto/virtio/virtio_crypto_core.c55
-rw-r--r--drivers/crypto/virtio/virtio_crypto_skcipher_algs.c140
-rw-r--r--drivers/crypto/vmx/Makefile17
-rw-r--r--drivers/cxl/Kconfig4
-rw-r--r--drivers/cxl/Makefile2
-rw-r--r--drivers/cxl/acpi.c13
-rw-r--r--drivers/cxl/core/Makefile1
-rw-r--r--drivers/cxl/core/mbox.c334
-rw-r--r--drivers/cxl/core/memdev.c3
-rw-r--r--drivers/cxl/core/pci.c364
-rw-r--r--drivers/cxl/core/pmem.c10
-rw-r--r--drivers/cxl/core/port.c68
-rw-r--r--drivers/cxl/core/suspend.c24
-rw-r--r--drivers/cxl/cxl.h78
-rw-r--r--drivers/cxl/cxlmem.h75
-rw-r--r--drivers/cxl/cxlpci.h2
-rw-r--r--drivers/cxl/mem.c148
-rw-r--r--drivers/cxl/pci.c175
-rw-r--r--drivers/cxl/pmem.c13
-rw-r--r--drivers/cxl/port.c28
-rw-r--r--drivers/dax/super.c14
-rw-r--r--drivers/dio/dio.c5
-rw-r--r--drivers/dma/Kconfig14
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/amba-pl08x.c11
-rw-r--r--drivers/dma/at_hdmac.c10
-rw-r--r--drivers/dma/at_xdmac.c9
-rw-r--r--drivers/dma/bestcomm/bestcomm.c2
-rw-r--r--drivers/dma/dma-jz4780.c9
-rw-r--r--drivers/dma/dmaengine.c7
-rw-r--r--drivers/dma/dmatest.c13
-rw-r--r--drivers/dma/dw/Kconfig9
-rw-r--r--drivers/dma/dw/Makefile2
-rw-r--r--drivers/dma/dw/platform.c1
-rw-r--r--drivers/dma/dw/rzn1-dmamux.c155
-rw-r--r--drivers/dma/ep93xx_dma.c2
-rw-r--r--drivers/dma/idxd/cdev.c18
-rw-r--r--drivers/dma/idxd/device.c151
-rw-r--r--drivers/dma/idxd/dma.c65
-rw-r--r--drivers/dma/idxd/idxd.h20
-rw-r--r--drivers/dma/idxd/init.c30
-rw-r--r--drivers/dma/idxd/registers.h1
-rw-r--r--drivers/dma/idxd/sysfs.c12
-rw-r--r--drivers/dma/mediatek/mtk-cqdma.c12
-rw-r--r--drivers/dma/mediatek/mtk-hsdma.c13
-rw-r--r--drivers/dma/mmp_pdma.c14
-rw-r--r--drivers/dma/mv_xor_v2.c4
-rw-r--r--drivers/dma/nbpfaxi.c14
-rw-r--r--drivers/dma/plx_dma.c4
-rw-r--r--drivers/dma/ptdma/ptdma-dev.c36
-rw-r--r--drivers/dma/ptdma/ptdma-dmaengine.c16
-rw-r--r--drivers/dma/ptdma/ptdma.h13
-rw-r--r--drivers/dma/pxa_dma.c13
-rw-r--r--drivers/dma/qcom/gpi.c21
-rw-r--r--drivers/dma/qcom/hidma.c13
-rw-r--r--drivers/dma/sf-pdma/sf-pdma.c24
-rw-r--r--drivers/dma/sf-pdma/sf-pdma.h8
-rw-r--r--drivers/dma/sh/Kconfig2
-rw-r--r--drivers/dma/sprd-dma.c6
-rw-r--r--drivers/dma/stm32-dma.c311
-rw-r--r--drivers/dma/stm32-dmamux.c2
-rw-r--r--drivers/dma/stm32-mdma.c53
-rw-r--r--drivers/dma/sun6i-dma.c92
-rw-r--r--drivers/dma/tegra186-gpc-dma.c1498
-rw-r--r--drivers/dma/ti/cppi41.c6
-rw-r--r--drivers/dma/ti/k3-psil-am62.c8
-rw-r--r--drivers/dma/xilinx/zynqmp_dma.c17
-rw-r--r--drivers/extcon/Kconfig3
-rw-r--r--drivers/extcon/extcon-axp288.c4
-rw-r--r--drivers/extcon/extcon-intel-int3496.c54
-rw-r--r--drivers/extcon/extcon-ptn5150.c36
-rw-r--r--drivers/extcon/extcon-sm5502.c2
-rw-r--r--drivers/extcon/extcon-usb-gpio.c15
-rw-r--r--drivers/extcon/extcon-usbc-cros-ec.c2
-rw-r--r--drivers/extcon/extcon.c37
-rw-r--r--drivers/firmware/Makefile3
-rw-r--r--drivers/firmware/dmi-sysfs.c2
-rw-r--r--drivers/firmware/edd.c3
-rw-r--r--drivers/firmware/efi/Kconfig52
-rw-r--r--drivers/firmware/efi/libstub/x86-stub.c4
-rw-r--r--drivers/firmware/stratix10-svc.c12
-rw-r--r--drivers/firmware/xilinx/zynqmp.c131
-rw-r--r--drivers/fpga/Makefile6
-rw-r--r--drivers/fpga/dfl-pci.c9
-rw-r--r--drivers/fpga/dfl.c38
-rw-r--r--drivers/fpga/dfl.h1
-rw-r--r--drivers/fpga/fpga-mgr.c13
-rw-r--r--drivers/fpga/fpga-region.c6
-rw-r--r--drivers/fpga/of-fpga-region.c22
-rw-r--r--drivers/gpio/gpio-adp5588.c19
-rw-r--r--drivers/gpio/gpio-pca953x.c19
-rw-r--r--drivers/gpio/gpio-rockchip.c29
-rw-r--r--drivers/gpio/gpio-tegra186.c81
-rw-r--r--drivers/gpio/gpiolib-cdev.c252
-rw-r--r--drivers/gpio/gpiolib-of.c5
-rw-r--r--drivers/gpio/gpiolib.c58
-rw-r--r--drivers/gpio/gpiolib.h1
-rw-r--r--drivers/gpu/Makefile3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c223
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c68
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v13_0.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc21.c1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h2964
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm394
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm244
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c22
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c248
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_surface.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c27
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c516
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c617
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.h67
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_dpia.h5
-rw-r--r--drivers/gpu/drm/amd/display/include/link_service_types.h6
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dce/dce_10_0_sh_mask.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_0_sh_mask.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_sh_mask.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_sh_mask.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_sh_mask.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_sh_mask.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_0_sh_mask.h2
-rwxr-xr-xdrivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_3_sh_mask.h4
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_sh_mask.h2
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_dpm.c3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h22
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c57
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c49
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c8
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c62
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c3
-rw-r--r--drivers/gpu/drm/drm_vm.c2
-rw-r--r--drivers/gpu/drm/hyperv/hyperv_drm_modeset.c5
-rw-r--r--drivers/gpu/drm/hyperv/hyperv_drm_proto.c23
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h8
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c115
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c3
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c8
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.c9
-rw-r--r--drivers/gpu/drm/msm/msm_mdss.c57
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_module.c2
-rw-r--r--drivers/gpu/host1x/Kconfig5
-rw-r--r--drivers/gpu/host1x/Makefile1
-rw-r--r--drivers/gpu/host1x/context_bus.c31
-rw-r--r--drivers/hid/usbhid/hid-core.c2
-rw-r--r--drivers/hid/usbhid/usbkbd.c2
-rw-r--r--drivers/hid/usbhid/usbmouse.c2
-rw-r--r--drivers/hte/Kconfig33
-rw-r--r--drivers/hte/Makefile3
-rw-r--r--drivers/hte/hte-tegra194-test.c238
-rw-r--r--drivers/hte/hte-tegra194.c730
-rw-r--r--drivers/hte/hte.c947
-rw-r--r--drivers/hv/channel.c116
-rw-r--r--drivers/hv/channel_mgmt.c40
-rw-r--r--drivers/hv/connection.c6
-rw-r--r--drivers/hv/hv_balloon.c21
-rw-r--r--drivers/hv/hyperv_vmbus.h2
-rw-r--r--drivers/hv/ring_buffer.c46
-rw-r--r--drivers/hv/vmbus_drv.c88
-rw-r--r--drivers/hwtracing/coresight/coresight-core.c33
-rw-r--r--drivers/hwtracing/coresight/coresight-cpu-debug.c7
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x-core.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x-sysfs.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-core.c136
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-sysfs.c180
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.h120
-rw-r--r--drivers/i2c/busses/i2c-at91-master.c11
-rw-r--r--drivers/i2c/busses/i2c-cadence.c12
-rw-r--r--drivers/i2c/busses/i2c-davinci.c12
-rw-r--r--drivers/i2c/busses/i2c-designware-amdpsp.c4
-rw-r--r--drivers/i2c/busses/i2c-designware-common.c2
-rw-r--r--drivers/i2c/busses/i2c-ismt.c3
-rw-r--r--drivers/i2c/busses/i2c-meson.c115
-rw-r--r--drivers/i2c/busses/i2c-mt65xx.c11
-rw-r--r--drivers/i2c/busses/i2c-mt7621.c5
-rw-r--r--drivers/i2c/busses/i2c-npcm7xx.c122
-rw-r--r--drivers/i2c/busses/i2c-powermac.c2
-rw-r--r--drivers/i2c/busses/i2c-qcom-geni.c6
-rw-r--r--drivers/i2c/busses/i2c-rcar.c217
-rw-r--r--drivers/i2c/busses/i2c-xiic.c84
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/core.c7
-rw-r--r--drivers/i3c/master/svc-i3c-master.c3
-rw-r--r--drivers/iio/accel/Kconfig1
-rw-r--r--drivers/iio/accel/adxl355_core.c7
-rw-r--r--drivers/iio/accel/adxl367.c1
-rw-r--r--drivers/iio/accel/bmc150-accel-core.c4
-rw-r--r--drivers/iio/accel/dmard09.c2
-rw-r--r--drivers/iio/accel/fxls8962af-core.c1
-rw-r--r--drivers/iio/accel/kxsd9-spi.c4
-rw-r--r--drivers/iio/accel/mma8452.c1
-rw-r--r--drivers/iio/accel/sca3000.c1
-rw-r--r--drivers/iio/accel/ssp_accel_sensor.c1
-rw-r--r--drivers/iio/accel/st_accel.h28
-rw-r--r--drivers/iio/accel/st_accel_core.c14
-rw-r--r--drivers/iio/accel/st_accel_i2c.c5
-rw-r--r--drivers/iio/accel/st_accel_spi.c5
-rw-r--r--drivers/iio/adc/Kconfig2
-rw-r--r--drivers/iio/adc/ad7124.c86
-rw-r--r--drivers/iio/adc/ad7192.c68
-rw-r--r--drivers/iio/adc/ad7266.c44
-rw-r--r--drivers/iio/adc/ad7280a.c2
-rw-r--r--drivers/iio/adc/ad_sigma_delta.c143
-rw-r--r--drivers/iio/adc/at91-sama5d2_adc.c4
-rw-r--r--drivers/iio/adc/ina2xx-adc.c3
-rw-r--r--drivers/iio/adc/palmas_gpadc.c3
-rw-r--r--drivers/iio/adc/sc27xx_adc.c470
-rw-r--r--drivers/iio/adc/stm32-dfsdm-adc.c5
-rw-r--r--drivers/iio/adc/stmpe-adc.c21
-rw-r--r--drivers/iio/adc/ti-ads1015.c398
-rw-r--r--drivers/iio/adc/ti-ads8688.c1
-rw-r--r--drivers/iio/adc/ti_am335x_adc.c4
-rw-r--r--drivers/iio/afe/Kconfig1
-rw-r--r--drivers/iio/afe/iio-rescale.c5
-rw-r--r--drivers/iio/buffer/kfifo_buf.c10
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c5
-rw-r--r--drivers/iio/common/scmi_sensors/scmi_iio.c1
-rw-r--r--drivers/iio/common/ssp_sensors/ssp_spi.c13
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_core.c50
-rw-r--r--drivers/iio/dac/Kconfig4
-rw-r--r--drivers/iio/dac/ad5064.c2
-rw-r--r--drivers/iio/dac/ad5360.c2
-rw-r--r--drivers/iio/dac/ad5380.c2
-rw-r--r--drivers/iio/dac/ad5446.c2
-rw-r--r--drivers/iio/dac/ad5504.c2
-rw-r--r--drivers/iio/dac/ad5624r_spi.c2
-rw-r--r--drivers/iio/dac/ad5686.c2
-rw-r--r--drivers/iio/dac/ad5755.c2
-rw-r--r--drivers/iio/dac/ad5791.c2
-rw-r--r--drivers/iio/dac/ad7303.c2
-rw-r--r--drivers/iio/dac/ltc2632.c8
-rw-r--r--drivers/iio/dac/ltc2688.c19
-rw-r--r--drivers/iio/dac/max5821.c2
-rw-r--r--drivers/iio/dac/mcp4725.c4
-rw-r--r--drivers/iio/dac/stm32-dac.c2
-rw-r--r--drivers/iio/dac/ti-dac082s085.c2
-rw-r--r--drivers/iio/dac/ti-dac5571.c2
-rw-r--r--drivers/iio/dac/ti-dac7311.c2
-rw-r--r--drivers/iio/dummy/iio_simple_dummy.c20
-rw-r--r--drivers/iio/dummy/iio_simple_dummy_buffer.c48
-rw-r--r--drivers/iio/frequency/ad9523.c2
-rw-r--r--drivers/iio/gyro/fxas21002c_core.c8
-rw-r--r--drivers/iio/gyro/mpu3050-core.c14
-rw-r--r--drivers/iio/gyro/mpu3050-i2c.c4
-rw-r--r--drivers/iio/gyro/mpu3050.h2
-rw-r--r--drivers/iio/gyro/ssp_gyro_sensor.c1
-rw-r--r--drivers/iio/gyro/st_gyro_core.c15
-rw-r--r--drivers/iio/health/max30100.c1
-rw-r--r--drivers/iio/health/max30102.c1
-rw-r--r--drivers/iio/imu/adis16480.c91
-rw-r--r--drivers/iio/imu/bmi160/bmi160_core.c27
-rw-r--r--drivers/iio/imu/bmi160/bmi160_i2c.c13
-rw-r--r--drivers/iio/imu/bmi160/bmi160_spi.c18
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c1
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c1
-rw-r--r--drivers/iio/imu/inv_mpu6050/Kconfig4
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_core.c9
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c6
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h2
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c5
-rw-r--r--drivers/iio/imu/st_lsm6dsx/Kconfig6
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h2
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c4
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c6
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c5
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c5
-rw-r--r--drivers/iio/industrialio-buffer.c42
-rw-r--r--drivers/iio/industrialio-core.c46
-rw-r--r--drivers/iio/industrialio-event.c2
-rw-r--r--drivers/iio/industrialio-trigger.c2
-rw-r--r--drivers/iio/light/Kconfig1
-rw-r--r--drivers/iio/light/apds9960.c1
-rw-r--r--drivers/iio/light/stk3310.c25
-rw-r--r--drivers/iio/light/tsl2772.c25
-rw-r--r--drivers/iio/magnetometer/Kconfig1
-rw-r--r--drivers/iio/magnetometer/rm3100-core.c15
-rw-r--r--drivers/iio/magnetometer/st_magn_core.c15
-rw-r--r--drivers/iio/multiplexer/Kconfig1
-rw-r--r--drivers/iio/multiplexer/iio-mux.c49
-rw-r--r--drivers/iio/pressure/st_pressure_core.c8
-rw-r--r--drivers/iio/proximity/mb1232.c8
-rw-r--r--drivers/iio/proximity/ping.c5
-rw-r--r--drivers/iio/proximity/vl53l0x-i2c.c7
-rw-r--r--drivers/iio/temperature/ltc2983.c236
-rw-r--r--drivers/iio/temperature/max31856.c6
-rw-r--r--drivers/iio/temperature/max31865.c4
-rw-r--r--drivers/iio/trigger/iio-trig-sysfs.c11
-rw-r--r--drivers/input/input.c4
-rw-r--r--drivers/input/joystick/Kconfig11
-rw-r--r--drivers/input/joystick/Makefile1
-rw-r--r--drivers/input/joystick/sensehat-joystick.c137
-rw-r--r--drivers/input/keyboard/bcm-keypad.c3
-rw-r--r--drivers/input/keyboard/clps711x-keypad.c3
-rw-r--r--drivers/input/keyboard/cros_ec_keyb.c29
-rw-r--r--drivers/input/keyboard/ep93xx_keypad.c7
-rw-r--r--drivers/input/keyboard/gpio_keys.c2
-rw-r--r--drivers/input/keyboard/mt6779-keypad.c10
-rw-r--r--drivers/input/keyboard/sun4i-lradc-keys.c63
-rw-r--r--drivers/input/misc/Kconfig10
-rw-r--r--drivers/input/misc/Makefile1
-rw-r--r--drivers/input/misc/ati_remote2.c2
-rw-r--r--drivers/input/misc/cm109.c2
-rw-r--r--drivers/input/misc/iqs7222.c2446
-rw-r--r--drivers/input/misc/pm8941-pwrkey.c127
-rw-r--r--drivers/input/misc/powermate.c2
-rw-r--r--drivers/input/misc/sparcspkr.c1
-rw-r--r--drivers/input/misc/xen-kbdfront.c4
-rw-r--r--drivers/input/misc/yealink.c2
-rw-r--r--drivers/input/mouse/cypress_ps2.c2
-rw-r--r--drivers/input/mouse/psmouse-smbus.c11
-rw-r--r--drivers/input/mouse/pxa930_trkball.c1
-rw-r--r--drivers/input/mouse/vmmouse.c14
-rw-r--r--drivers/input/rmi4/rmi_f54.c1
-rw-r--r--drivers/input/tablet/acecad.c2
-rw-r--r--drivers/input/tablet/aiptek.c3
-rw-r--r--drivers/input/tablet/pegasus_notetaker.c2
-rw-r--r--drivers/input/touchscreen/Kconfig2
-rw-r--r--drivers/input/touchscreen/mainstone-wm97xx.c130
-rw-r--r--drivers/input/touchscreen/stmfts.c16
-rw-r--r--drivers/input/touchscreen/wm97xx-core.c42
-rw-r--r--drivers/input/touchscreen/zylonite-wm97xx.c43
-rw-r--r--drivers/interconnect/qcom/Kconfig18
-rw-r--r--drivers/interconnect/qcom/Makefile4
-rw-r--r--drivers/interconnect/qcom/icc-rpm.c16
-rw-r--r--drivers/interconnect/qcom/icc-rpm.h6
-rw-r--r--drivers/interconnect/qcom/icc-rpmh.c2
-rw-r--r--drivers/interconnect/qcom/icc-rpmh.h6
-rw-r--r--drivers/interconnect/qcom/msm8916.c12
-rw-r--r--drivers/interconnect/qcom/msm8939.c16
-rw-r--r--drivers/interconnect/qcom/msm8974.c28
-rw-r--r--drivers/interconnect/qcom/msm8996.c16
-rw-r--r--drivers/interconnect/qcom/osm-l3.c16
-rw-r--r--drivers/interconnect/qcom/qcm2290.c24
-rw-r--r--drivers/interconnect/qcom/qcs404.c12
-rw-r--r--drivers/interconnect/qcom/sc7180.c66
-rw-r--r--drivers/interconnect/qcom/sc7280.c72
-rw-r--r--drivers/interconnect/qcom/sc8180x.c1895
-rw-r--r--drivers/interconnect/qcom/sc8180x.h7
-rw-r--r--drivers/interconnect/qcom/sc8280xp.c2438
-rw-r--r--drivers/interconnect/qcom/sc8280xp.h209
-rw-r--r--drivers/interconnect/qcom/sdm660.c24
-rw-r--r--drivers/interconnect/qcom/sdm845.c32
-rw-r--r--drivers/interconnect/qcom/sdx55.c12
-rw-r--r--drivers/interconnect/qcom/sdx65.c231
-rw-r--r--drivers/interconnect/qcom/sdx65.h65
-rw-r--r--drivers/interconnect/qcom/sm8150.c66
-rw-r--r--drivers/interconnect/qcom/sm8250.c66
-rw-r--r--drivers/interconnect/qcom/sm8350.c60
-rw-r--r--drivers/interconnect/qcom/sm8450.c68
-rw-r--r--drivers/iommu/amd/amd_iommu_types.h4
-rw-r--r--drivers/iommu/amd/init.c8
-rw-r--r--drivers/iommu/amd/iommu.c19
-rw-r--r--drivers/iommu/amd/iommu_v2.c12
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c13
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c2
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-impl.c3
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c1
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.c8
-rw-r--r--drivers/iommu/dma-iommu.c25
-rw-r--r--drivers/iommu/fsl_pamu.c3
-rw-r--r--drivers/iommu/fsl_pamu_domain.c1
-rw-r--r--drivers/iommu/intel/iommu.c216
-rw-r--r--drivers/iommu/intel/pasid.c45
-rw-r--r--drivers/iommu/intel/pasid.h2
-rw-r--r--drivers/iommu/iommu.c354
-rw-r--r--drivers/iommu/msm_iommu.c22
-rw-r--r--drivers/iommu/mtk_iommu.c980
-rw-r--r--drivers/iommu/mtk_iommu.h101
-rw-r--r--drivers/iommu/mtk_iommu_v1.c242
-rw-r--r--drivers/iommu/s390-iommu.c15
-rw-r--r--drivers/irqchip/Kconfig6
-rw-r--r--drivers/irqchip/irq-loongson-liointc.c6
-rw-r--r--drivers/leds/Kconfig3
-rw-r--r--drivers/leds/Makefile3
-rw-r--r--drivers/leds/flash/leds-ktd2692.c15
-rw-r--r--drivers/leds/leds-is31fl32xx.c8
-rw-r--r--drivers/leds/leds-locomo.c1
-rw-r--r--drivers/leds/leds-lp50xx.c4
-rw-r--r--drivers/leds/leds-pca9532.c11
-rw-r--r--drivers/leds/leds-regulator.c47
-rw-r--r--drivers/leds/rgb/Kconfig29
-rw-r--r--drivers/leds/rgb/Makefile4
-rw-r--r--drivers/leds/rgb/leds-pwm-multicolor.c186
-rw-r--r--drivers/leds/rgb/leds-qcom-lpg.c1451
-rw-r--r--drivers/macintosh/Kconfig6
-rw-r--r--drivers/macintosh/Makefile3
-rw-r--r--drivers/macintosh/adb.c2
-rw-r--r--drivers/macintosh/adbhid.c9
-rw-r--r--drivers/macintosh/ams/ams-core.c2
-rw-r--r--drivers/macintosh/ams/ams-i2c.c6
-rw-r--r--drivers/macintosh/ans-lcd.c2
-rw-r--r--drivers/macintosh/macio-adb.c5
-rw-r--r--drivers/macintosh/macio_asic.c9
-rw-r--r--drivers/macintosh/macio_sysfs.c2
-rw-r--r--drivers/macintosh/mediabay.c2
-rw-r--r--drivers/macintosh/rack-meter.c1
-rw-r--r--drivers/macintosh/smu.c7
-rw-r--r--drivers/macintosh/therm_adt746x.c1
-rw-r--r--drivers/macintosh/therm_windtunnel.c1
-rw-r--r--drivers/macintosh/via-cuda.c10
-rw-r--r--drivers/macintosh/via-pmu-backlight.c1
-rw-r--r--drivers/macintosh/via-pmu-led.c2
-rw-r--r--drivers/macintosh/via-pmu.c9
-rw-r--r--drivers/macintosh/windfarm_ad7417_sensor.c2
-rw-r--r--drivers/macintosh/windfarm_core.c2
-rw-r--r--drivers/macintosh/windfarm_cpufreq_clamp.c2
-rw-r--r--drivers/macintosh/windfarm_fcu_controls.c2
-rw-r--r--drivers/macintosh/windfarm_lm75_sensor.c1
-rw-r--r--drivers/macintosh/windfarm_lm87_sensor.c2
-rw-r--r--drivers/macintosh/windfarm_max6690_sensor.c2
-rw-r--r--drivers/macintosh/windfarm_mpu.h2
-rw-r--r--drivers/macintosh/windfarm_pm112.c4
-rw-r--r--drivers/macintosh/windfarm_pm121.c3
-rw-r--r--drivers/macintosh/windfarm_pm72.c2
-rw-r--r--drivers/macintosh/windfarm_pm81.c3
-rw-r--r--drivers/macintosh/windfarm_pm91.c3
-rw-r--r--drivers/macintosh/windfarm_rm31.c2
-rw-r--r--drivers/macintosh/windfarm_smu_controls.c3
-rw-r--r--drivers/macintosh/windfarm_smu_sat.c2
-rw-r--r--drivers/macintosh/windfarm_smu_sensors.c3
-rw-r--r--drivers/md/bcache/bcache.h7
-rw-r--r--drivers/md/bcache/btree.c59
-rw-r--r--drivers/md/bcache/btree.h2
-rw-r--r--drivers/md/bcache/journal.c31
-rw-r--r--drivers/md/bcache/journal.h2
-rw-r--r--drivers/md/bcache/request.c6
-rw-r--r--drivers/md/bcache/super.c1
-rw-r--r--drivers/md/bcache/writeback.c133
-rw-r--r--drivers/md/bcache/writeback.h2
-rw-r--r--drivers/md/dm-linear.c15
-rw-r--r--drivers/md/dm-log-writes.c15
-rw-r--r--drivers/md/dm-raid.c2
-rw-r--r--drivers/md/dm-stripe.c15
-rw-r--r--drivers/md/dm-table.c19
-rw-r--r--drivers/md/dm-target.c4
-rw-r--r--drivers/md/dm-verity-target.c1
-rw-r--r--drivers/md/dm-writecache.c7
-rw-r--r--drivers/md/dm.c25
-rw-r--r--drivers/md/md-linear.c5
-rw-r--r--drivers/md/md-multipath.c15
-rw-r--r--drivers/md/md.c185
-rw-r--r--drivers/md/md.h2
-rw-r--r--drivers/md/raid0.c29
-rw-r--r--drivers/md/raid1.c24
-rw-r--r--drivers/md/raid10.c54
-rw-r--r--drivers/md/raid5-cache.c5
-rw-r--r--drivers/md/raid5-ppl.c27
-rw-r--r--drivers/md/raid5.c37
-rw-r--r--drivers/media/rc/ati_remote.c4
-rw-r--r--drivers/media/rc/mceusb.c2
-rw-r--r--drivers/media/rc/streamzap.c2
-rw-r--r--drivers/media/rc/xbox_remote.c2
-rw-r--r--drivers/media/usb/tm6000/tm6000-dvb.c2
-rw-r--r--drivers/media/usb/tm6000/tm6000-input.c2
-rw-r--r--drivers/media/usb/tm6000/tm6000-video.c2
-rw-r--r--drivers/memory/emif.c2
-rw-r--r--drivers/mfd/cros_ec_dev.c19
-rw-r--r--drivers/mfd/davinci_voicecodec.c6
-rw-r--r--drivers/mfd/hi655x-pmic.c27
-rw-r--r--drivers/mfd/intel-lpss-pci.c2
-rw-r--r--drivers/mfd/ipaq-micro.c2
-rw-r--r--drivers/mfd/mt6397-core.c13
-rw-r--r--drivers/mfd/rt4831.c7
-rw-r--r--drivers/mfd/sprd-sc27xx-spi.c3
-rw-r--r--drivers/mfd/tc6393xb.c130
-rw-r--r--drivers/mfd/twl-core.c8
-rw-r--r--drivers/mfd/twl-core.h4
-rw-r--r--drivers/mfd/twl4030-irq.c7
-rw-r--r--drivers/mfd/twl6030-irq.c3
-rw-r--r--drivers/misc/altera-stapl/altera.c56
-rw-r--r--drivers/misc/bcm-vk/bcm_vk_msg.c29
-rw-r--r--drivers/misc/cardreader/alcor_pci.c6
-rw-r--r--drivers/misc/cardreader/rts5261.c115
-rw-r--r--drivers/misc/cardreader/rtsx_usb.c1
-rw-r--r--drivers/misc/cxl/api.c1
-rw-r--r--drivers/misc/cxl/cxl.h2
-rw-r--r--drivers/misc/cxl/cxllib.c1
-rw-r--r--drivers/misc/cxl/flash.c1
-rw-r--r--drivers/misc/cxl/guest.c2
-rw-r--r--drivers/misc/cxl/irq.c1
-rw-r--r--drivers/misc/cxl/main.c1
-rw-r--r--drivers/misc/cxl/native.c1
-rw-r--r--drivers/misc/fastrpc.c18
-rw-r--r--drivers/misc/habanalabs/common/Makefile2
-rw-r--r--drivers/misc/habanalabs/common/command_buffer.c413
-rw-r--r--drivers/misc/habanalabs/common/command_submission.c89
-rw-r--r--drivers/misc/habanalabs/common/context.c4
-rw-r--r--drivers/misc/habanalabs/common/debugfs.c304
-rw-r--r--drivers/misc/habanalabs/common/device.c280
-rw-r--r--drivers/misc/habanalabs/common/firmware_if.c86
-rw-r--r--drivers/misc/habanalabs/common/habanalabs.h415
-rw-r--r--drivers/misc/habanalabs/common/habanalabs_drv.c44
-rw-r--r--drivers/misc/habanalabs/common/habanalabs_ioctl.c108
-rw-r--r--drivers/misc/habanalabs/common/irq.c14
-rw-r--r--drivers/misc/habanalabs/common/memory.c289
-rw-r--r--drivers/misc/habanalabs/common/memory_mgr.c349
-rw-r--r--drivers/misc/habanalabs/common/mmu/mmu.c296
-rw-r--r--drivers/misc/habanalabs/common/mmu/mmu_v1.c297
-rw-r--r--drivers/misc/habanalabs/common/pci/pci.c10
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi.c412
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudiP.h4
-rw-r--r--drivers/misc/habanalabs/goya/goya.c363
-rw-r--r--drivers/misc/habanalabs/include/common/cpucp_if.h70
-rw-r--r--drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h10
-rw-r--r--drivers/misc/lkdtm/bugs.c96
-rw-r--r--drivers/misc/lkdtm/cfi.c145
-rw-r--r--drivers/misc/lkdtm/core.c138
-rw-r--r--drivers/misc/lkdtm/fortify.c17
-rw-r--r--drivers/misc/lkdtm/heap.c48
-rw-r--r--drivers/misc/lkdtm/lkdtm.h142
-rw-r--r--drivers/misc/lkdtm/perms.c47
-rw-r--r--drivers/misc/lkdtm/powerpc.c11
-rw-r--r--drivers/misc/lkdtm/refcount.c65
-rw-r--r--drivers/misc/lkdtm/stackleak.c13
-rw-r--r--drivers/misc/lkdtm/usercopy.c146
-rw-r--r--drivers/misc/mei/hdcp/mei_hdcp.c2
-rw-r--r--drivers/misc/mei/pxp/mei_pxp.c2
-rw-r--r--drivers/misc/ocxl/afu_irq.c1
-rw-r--r--drivers/misc/ocxl/file.c2
-rw-r--r--drivers/misc/ocxl/link.c1
-rw-r--r--drivers/misc/pvpanic/pvpanic.c10
-rw-r--r--drivers/misc/vmw_balloon.c4
-rw-r--r--drivers/misc/vmw_vmci/Kconfig2
-rw-r--r--drivers/misc/vmw_vmci/vmci_context.c15
-rw-r--r--drivers/misc/vmw_vmci/vmci_guest.c4
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.c12
-rw-r--r--drivers/mmc/host/Kconfig2
-rw-r--r--drivers/mmc/host/pxamci.c2
-rw-r--r--drivers/mtd/maps/pxa2xx-flash.c2
-rw-r--r--drivers/mtd/ubi/fastmap-wl.c121
-rw-r--r--drivers/mtd/ubi/fastmap.c11
-rw-r--r--drivers/mtd/ubi/ubi.h4
-rw-r--r--drivers/mtd/ubi/vmt.c1
-rw-r--r--drivers/mtd/ubi/wl.c33
-rw-r--r--drivers/mtd/ubi/wl.h2
-rw-r--r--drivers/net/amt.c6
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/bonding/bond_netlink.c5
-rw-r--r--drivers/net/bonding/bond_options.c10
-rw-r--r--drivers/net/bonding/bond_procfs.c15
-rw-r--r--drivers/net/dsa/b53/b53_common.c6
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c1
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c6
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c31
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.h31
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c32
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h32
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c32
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c4
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h58
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c188
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.h3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/trap.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sriov.c65
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c9
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c32
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.c9
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfdk/dp.c12
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfdk/nfdk.h3
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h11
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.c45
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.c2
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.c6
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h2
-rw-r--r--drivers/net/ethernet/sfc/siena/efx_channels.c6
-rw-r--r--drivers/net/ethernet/sfc/siena/net_driver.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c2
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c8
-rw-r--r--drivers/net/ipa/ipa_endpoint.c9
-rw-r--r--drivers/net/macsec.c7
-rw-r--r--drivers/net/phy/at803x.c33
-rw-r--r--drivers/net/phy/fixed_phy.c6
-rw-r--r--drivers/net/usb/cdc_ncm.c4
-rw-r--r--drivers/net/usb/lan78xx.c4
-rw-r--r--drivers/net/usb/qmi_wwan.c2
-rw-r--r--drivers/net/usb/rndis_host.c2
-rw-r--r--drivers/net/usb/usbnet.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c34
-rw-r--r--drivers/net/wireless/marvell/libertas/cfg.c4
-rw-r--r--drivers/net/wireless/marvell/libertas/host.h6
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00usb.c4
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.c10
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.h1
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac80211.c4
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.h1
-rw-r--r--drivers/net/wireless/silabs/wfx/hif_tx.c10
-rw-r--r--drivers/net/wireless/silabs/wfx/main.c2
-rw-r--r--drivers/net/wireless/silabs/wfx/sta.c20
-rw-r--r--drivers/net/xen-netback/netback.c2
-rw-r--r--drivers/net/xen-netfront.c7
-rw-r--r--drivers/nvdimm/btt_devs.c23
-rw-r--r--drivers/nvdimm/bus.c38
-rw-r--r--drivers/nvdimm/core.c19
-rw-r--r--drivers/nvdimm/dax_devs.c4
-rw-r--r--drivers/nvdimm/dimm_devs.c12
-rw-r--r--drivers/nvdimm/namespace_devs.c46
-rw-r--r--drivers/nvdimm/nd-core.h68
-rw-r--r--drivers/nvdimm/pfn_devs.c31
-rw-r--r--drivers/nvdimm/pmem.c205
-rw-r--r--drivers/nvdimm/pmem.h5
-rw-r--r--drivers/nvdimm/region.c2
-rw-r--r--drivers/nvdimm/region_devs.c20
-rw-r--r--drivers/nvdimm/security.c5
-rw-r--r--drivers/nvme/host/core.c13
-rw-r--r--drivers/nvme/host/fc.c18
-rw-r--r--drivers/nvme/host/ioctl.c3
-rw-r--r--drivers/nvme/host/pci.c12
-rw-r--r--drivers/nvme/target/passthru.c5
-rw-r--r--drivers/nvmem/Kconfig13
-rw-r--r--drivers/nvmem/Makefile2
-rw-r--r--drivers/nvmem/apple-efuses.c80
-rw-r--r--drivers/nvmem/bcm-ocotp.c2
-rw-r--r--drivers/nvmem/brcm_nvram.c2
-rw-r--r--drivers/nvmem/core.c1
-rw-r--r--drivers/nvmem/layerscape-sfp.c36
-rw-r--r--drivers/nvmem/qfprom.c3
-rw-r--r--drivers/nvmem/sunplus-ocotp.c4
-rw-r--r--drivers/opp/core.c339
-rw-r--r--drivers/opp/debugfs.c10
-rw-r--r--drivers/opp/of.c2
-rw-r--r--drivers/pci/controller/cadence/pci-j721e.c3
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-ep.c21
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-host.c10
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence.h7
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c23
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c3
-rw-r--r--drivers/pci/controller/dwc/pcie-dw-rockchip.c119
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom-ep.c91
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c23
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194.c9
-rw-r--r--drivers/pci/controller/pci-hyperv.c243
-rw-r--r--drivers/pci/controller/pci-mvebu.c97
-rw-r--r--drivers/pci/controller/pci-versatile.c3
-rw-r--r--drivers/pci/controller/pcie-brcmstb.c257
-rw-r--r--drivers/pci/controller/pcie-mediatek-gen3.c8
-rw-r--r--drivers/pci/controller/pcie-mediatek.c1
-rw-r--r--drivers/pci/controller/pcie-microchip-host.c18
-rw-r--r--drivers/pci/controller/pcie-rockchip-ep.c3
-rw-r--r--drivers/pci/controller/vmd.c7
-rw-r--r--drivers/pci/hotplug/pnv_php.c1
-rw-r--r--drivers/pci/hotplug/rpadlpar_core.c1
-rw-r--r--drivers/pci/hotplug/rpaphp_core.c2
-rw-r--r--drivers/pci/hotplug/rpaphp_pci.c1
-rw-r--r--drivers/pci/hotplug/rpaphp_slot.c1
-rw-r--r--drivers/pci/of.c78
-rw-r--r--drivers/pci/p2pdma.c25
-rw-r--r--drivers/pci/pci-acpi.c41
-rw-r--r--drivers/pci/pci-driver.c63
-rw-r--r--drivers/pci/pci-stub.c1
-rw-r--r--drivers/pci/pci-sysfs.c28
-rw-r--r--drivers/pci/pci.c355
-rw-r--r--drivers/pci/pci.h15
-rw-r--r--drivers/pci/pcie/aer.c7
-rw-r--r--drivers/pci/pcie/portdrv_pci.c2
-rw-r--r--drivers/pci/quirks.c47
-rw-r--r--drivers/pcmcia/Kconfig2
-rw-r--r--drivers/pcmcia/Makefile13
-rw-r--r--drivers/pcmcia/bcm63xx_pcmcia.c9
-rw-r--r--drivers/pcmcia/pxa2xx_base.c48
-rw-r--r--drivers/pcmcia/pxa2xx_sharpsl.c3
-rw-r--r--drivers/pcmcia/rsrc_nonstatic.c2
-rw-r--r--drivers/pcmcia/sa1111_generic.c1
-rw-r--r--drivers/pcmcia/sa1111_lubbock.c1
-rw-r--r--drivers/pcmcia/soc_common.c19
-rw-r--r--drivers/pcmcia/soc_common.h120
-rw-r--r--drivers/phy/Kconfig1
-rw-r--r--drivers/phy/allwinner/phy-sun6i-mipi-dphy.c166
-rw-r--r--drivers/phy/cadence/phy-cadence-sierra.c193
-rw-r--r--drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c276
-rw-r--r--drivers/phy/freescale/phy-fsl-imx8m-pcie.c10
-rw-r--r--drivers/phy/mediatek/phy-mtk-hdmi.c50
-rw-r--r--drivers/phy/mediatek/phy-mtk-mipi-dsi.c29
-rw-r--r--drivers/phy/phy-can-transceiver.c24
-rw-r--r--drivers/phy/phy-core.c44
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.c124
-rw-r--r--drivers/phy/rockchip/phy-rockchip-dphy-rx0.c7
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-usb2.c129
-rw-r--r--drivers/phy/rockchip/phy-rockchip-typec.c6
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c18
-rw-r--r--drivers/pinctrl/berlin/berlin-bg4ct.c3
-rw-r--r--drivers/pinctrl/freescale/Kconfig7
-rw-r--r--drivers/pinctrl/freescale/Makefile1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imxrt1170.c349
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c42
-rw-r--r--drivers/pinctrl/intel/pinctrl-broxton.c1
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c66
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c45
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.h2
-rw-r--r--drivers/pinctrl/intel/pinctrl-lynxpoint.c26
-rw-r--r--drivers/pinctrl/mediatek/Kconfig8
-rw-r--r--drivers/pinctrl/mediatek/Makefile1
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt6795.c623
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-mt6795.h1698
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-s4.c3
-rw-r--r--drivers/pinctrl/mvebu/Kconfig4
-rw-r--r--drivers/pinctrl/mvebu/Makefile1
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-ac5.c261
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-37xx.c2
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c7
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.c2
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c4
-rw-r--r--drivers/pinctrl/pinctrl-apple-gpio.c2
-rw-r--r--drivers/pinctrl/pinctrl-equilibrium.c11
-rw-r--r--drivers/pinctrl/pinctrl-equilibrium.h10
-rw-r--r--drivers/pinctrl/pinctrl-ingenic.c118
-rw-r--r--drivers/pinctrl/pinctrl-max77620.c1
-rw-r--r--drivers/pinctrl/pinctrl-microchip-sgpio.c16
-rw-r--r--drivers/pinctrl/pinctrl-ocelot.c22
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c532
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.h177
-rw-r--r--drivers/pinctrl/pinctrl-starfive.c11
-rw-r--r--drivers/pinctrl/pinctrl-thunderbay.c7
-rw-r--r--drivers/pinctrl/qcom/Kconfig19
-rw-r--r--drivers/pinctrl/qcom/Makefile2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-lpass-lpi.c309
-rw-r--r--drivers/pinctrl/qcom/pinctrl-lpass-lpi.h86
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc7280-lpass-lpi.c167
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8150.c22
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8250-lpass-lpi.c163
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-gpio.c2
-rw-r--r--drivers/pinctrl/ralink/Kconfig28
-rw-r--r--drivers/pinctrl/ralink/Makefile4
-rw-r--r--drivers/pinctrl/ralink/pinctrl-mt7620.c302
-rw-r--r--drivers/pinctrl/ralink/pinctrl-mt7621.c76
-rw-r--r--drivers/pinctrl/ralink/pinctrl-ralink.c349
-rw-r--r--drivers/pinctrl/ralink/pinctrl-ralink.h (renamed from drivers/pinctrl/ralink/pinmux.h)16
-rw-r--r--drivers/pinctrl/ralink/pinctrl-rt2880.c381
-rw-r--r--drivers/pinctrl/ralink/pinctrl-rt288x.c60
-rw-r--r--drivers/pinctrl/ralink/pinctrl-rt305x.c66
-rw-r--r--drivers/pinctrl/ralink/pinctrl-rt3883.c50
-rw-r--r--drivers/pinctrl/renesas/Kconfig7
-rw-r--r--drivers/pinctrl/renesas/core.c117
-rw-r--r--drivers/pinctrl/renesas/gpio.c1
-rw-r--r--drivers/pinctrl/renesas/pfc-emev2.c60
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a73a4.c58
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a7740.c74
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a77470.c176
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a7778.c98
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a7779.c82
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a7790.c110
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a7791.c111
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a7792.c231
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a7794.c97
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a77950.c170
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a77951.c169
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a7796.c166
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a77965.c166
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a77970.c136
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a77980.c107
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a77990.c252
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a77995.c229
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a779a0.c246
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a779f0.c121
-rw-r--r--drivers/pinctrl/renesas/pfc-sh7203.c53
-rw-r--r--drivers/pinctrl/renesas/pfc-sh7264.c104
-rw-r--r--drivers/pinctrl/renesas/pfc-sh7269.c82
-rw-r--r--drivers/pinctrl/renesas/pfc-sh73a0.c87
-rw-r--r--drivers/pinctrl/renesas/pfc-sh7720.c57
-rw-r--r--drivers/pinctrl/renesas/pfc-sh7722.c203
-rw-r--r--drivers/pinctrl/renesas/pfc-sh7723.c71
-rw-r--r--drivers/pinctrl/renesas/pfc-sh7724.c8
-rw-r--r--drivers/pinctrl/renesas/pfc-sh7734.c116
-rw-r--r--drivers/pinctrl/renesas/pfc-sh7757.c96
-rw-r--r--drivers/pinctrl/renesas/pfc-sh7785.c61
-rw-r--r--drivers/pinctrl/renesas/pfc-sh7786.c22
-rw-r--r--drivers/pinctrl/renesas/pfc-shx3.c1
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzg2l.c200
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzn1.c10
-rw-r--r--drivers/pinctrl/renesas/pinctrl.c1
-rw-r--r--drivers/pinctrl/renesas/sh_pfc.h24
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c198
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.h15
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32mp135.c3
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32mp157.c2
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra194.c9
-rw-r--r--drivers/platform/mips/cpu_hwmon.c127
-rw-r--r--drivers/power/supply/ab8500_fg.c19
-rw-r--r--drivers/power/supply/axp288_charger.c17
-rw-r--r--drivers/power/supply/axp288_fuel_gauge.c41
-rw-r--r--drivers/power/supply/bq24190_charger.c63
-rw-r--r--drivers/power/supply/bq27xxx_battery.c60
-rw-r--r--drivers/power/supply/charger-manager.c7
-rw-r--r--drivers/power/supply/max8997_charger.c8
-rw-r--r--drivers/power/supply/power_supply_core.c2
-rw-r--r--drivers/power/supply/tosa_battery.c172
-rw-r--r--drivers/ptp/ptp_clockmatrix.c2
-rw-r--r--drivers/pwm/Kconfig25
-rw-r--r--drivers/pwm/Makefile2
-rw-r--r--drivers/pwm/pwm-atmel-tcb.c14
-rw-r--r--drivers/pwm/pwm-clps711x.c68
-rw-r--r--drivers/pwm/pwm-cros-ec.c82
-rw-r--r--drivers/pwm/pwm-lp3943.c42
-rw-r--r--drivers/pwm/pwm-lpc18xx-sct.c43
-rw-r--r--drivers/pwm/pwm-lpc32xx.c29
-rw-r--r--drivers/pwm/pwm-mediatek.c36
-rw-r--r--drivers/pwm/pwm-raspberrypi-poe.c2
-rw-r--r--drivers/pwm/pwm-renesas-tpu.c317
-rw-r--r--drivers/pwm/pwm-samsung.c54
-rw-r--r--drivers/pwm/pwm-sifive.c5
-rw-r--r--drivers/pwm/pwm-sti.c29
-rw-r--r--drivers/pwm/pwm-stmpe.c29
-rw-r--r--drivers/pwm/pwm-sun4i.c18
-rw-r--r--drivers/pwm/pwm-sunplus.c232
-rw-r--r--drivers/pwm/pwm-tegra.c40
-rw-r--r--drivers/pwm/pwm-twl-led.c76
-rw-r--r--drivers/pwm/pwm-xilinx.c321
-rw-r--r--drivers/rapidio/devices/rio_mport_cdev.c4
-rw-r--r--drivers/regulator/pfuze100-regulator.c42
-rw-r--r--drivers/remoteproc/imx_dsp_rproc.c105
-rw-r--r--drivers/remoteproc/imx_rproc.c36
-rw-r--r--drivers/remoteproc/mtk_common.h2
-rw-r--r--drivers/remoteproc/mtk_scp.c76
-rw-r--r--drivers/remoteproc/qcom_q6v5_pas.c34
-rw-r--r--drivers/remoteproc/remoteproc_cdev.c11
-rw-r--r--drivers/remoteproc/remoteproc_core.c15
-rw-r--r--drivers/remoteproc/remoteproc_debugfs.c17
-rw-r--r--drivers/remoteproc/remoteproc_elf_loader.c2
-rw-r--r--drivers/remoteproc/remoteproc_sysfs.c11
-rw-r--r--drivers/rpmsg/qcom_smd.c4
-rw-r--r--drivers/rpmsg/rpmsg_core.c42
-rw-r--r--drivers/rpmsg/rpmsg_internal.h5
-rw-r--r--drivers/rpmsg/rpmsg_ns.c4
-rw-r--r--drivers/rpmsg/virtio_rpmsg_bus.c9
-rw-r--r--drivers/rtc/Kconfig7
-rw-r--r--drivers/rtc/Makefile1
-rw-r--r--drivers/rtc/rtc-ftrtc010.c34
-rw-r--r--drivers/rtc/rtc-gamecube.c1
-rw-r--r--drivers/rtc/rtc-meson.c2
-rw-r--r--drivers/rtc/rtc-mt6397.c2
-rw-r--r--drivers/rtc/rtc-mxc.c2
-rw-r--r--drivers/rtc/rtc-pcf85063.c2
-rw-r--r--drivers/rtc/rtc-pxa.c2
-rw-r--r--drivers/rtc/rtc-rx8025.c7
-rw-r--r--drivers/rtc/rtc-rzn1.c418
-rw-r--r--drivers/rtc/rtc-sun6i.c42
-rw-r--r--drivers/s390/block/dcssblk.c9
-rw-r--r--drivers/s390/cio/cio.h6
-rw-r--r--drivers/s390/cio/css.c28
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.c47
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.h4
-rw-r--r--drivers/s390/cio/vfio_ccw_fsm.c3
-rw-r--r--drivers/s390/cio/vfio_ccw_ops.c7
-rw-r--r--drivers/s390/crypto/vfio_ap_ops.c50
-rw-r--r--drivers/s390/crypto/vfio_ap_private.h3
-rw-r--r--drivers/s390/virtio/virtio_ccw.c34
-rw-r--r--drivers/scsi/Kconfig1
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/esas2r/esas2r_flash.c2
-rw-r--r--drivers/scsi/isci/request.c2
-rw-r--r--drivers/scsi/lpfc/Makefile2
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h22
-rw-r--r--drivers/scsi/lpfc/lpfc_ids.h30
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c89
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c45
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c263
-rw-r--r--drivers/scsi/lpfc/lpfc_vmid.c288
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr.h2
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_app.c50
-rw-r--r--drivers/scsi/myrb.c11
-rw-r--r--drivers/scsi/pmcraid.c2
-rw-r--r--drivers/scsi/qedf/qedf_io.c2
-rw-r--r--drivers/scsi/qla1280.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c7
-rw-r--r--drivers/scsi/scsi_error.c5
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/scsi/scsi_sysfs.c1
-rw-r--r--drivers/scsi/sd.c5
-rw-r--r--drivers/scsi/sd.h4
-rw-r--r--drivers/scsi/sd_zbc.c26
-rw-r--r--drivers/scsi/sg.c3
-rw-r--r--drivers/scsi/smartpqi/smartpqi.h2
-rw-r--r--drivers/scsi/st.c3
-rw-r--r--drivers/scsi/storvsc_drv.c193
-rw-r--r--drivers/slimbus/qcom-ctrl.c4
-rw-r--r--drivers/slimbus/qcom-ngd-ctrl.c23
-rw-r--r--drivers/soc/Kconfig1
-rw-r--r--drivers/soc/Makefile1
-rw-r--r--drivers/soc/ixp4xx/ixp4xx-qmgr.c2
-rw-r--r--drivers/soc/pxa/Kconfig (renamed from arch/arm/plat-pxa/Kconfig)5
-rw-r--r--drivers/soc/pxa/Makefile (renamed from arch/arm/plat-pxa/Makefile)4
-rw-r--r--drivers/soc/pxa/mfp.c (renamed from arch/arm/plat-pxa/mfp.c)2
-rw-r--r--drivers/soc/pxa/ssp.c (renamed from arch/arm/plat-pxa/ssp.c)0
-rw-r--r--drivers/soc/rockchip/grf.c2
-rw-r--r--drivers/soc/tegra/pmc.c87
-rw-r--r--drivers/soc/xilinx/xlnx_event_manager.c203
-rw-r--r--drivers/soc/xilinx/zynqmp_power.c7
-rw-r--r--drivers/soundwire/bus.c27
-rw-r--r--drivers/soundwire/cadence_master.c42
-rw-r--r--drivers/soundwire/intel.c11
-rw-r--r--drivers/soundwire/qcom.c22
-rw-r--r--drivers/soundwire/stream.c1
-rw-r--r--drivers/spi/spi-fsi.c12
-rw-r--r--drivers/spi/spi.c29
-rw-r--r--drivers/staging/Kconfig3
-rw-r--r--drivers/staging/Makefile3
-rw-r--r--drivers/staging/fieldbus/anybuss/host.c2
-rw-r--r--drivers/staging/greybus/arche-apb-ctrl.c2
-rw-r--r--drivers/staging/greybus/arche-platform.c2
-rw-r--r--drivers/staging/greybus/audio_codec.c32
-rw-r--r--drivers/staging/greybus/pwm.c1
-rw-r--r--drivers/staging/greybus/tools/loopback_test.c2
-rw-r--r--drivers/staging/iio/cdc/ad7746.c2
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.c1
-rw-r--r--drivers/staging/iio/resolver/ad2s1210.c1
-rw-r--r--drivers/staging/ks7010/ks_hostif.c19
-rw-r--r--drivers/staging/ks7010/ks_wlan.h2
-rw-r--r--drivers/staging/most/dim2/dim2.c29
-rw-r--r--drivers/staging/qlge/qlge.h1
-rw-r--r--drivers/staging/r8188eu/core/rtw_ap.c3
-rw-r--r--drivers/staging/r8188eu/core/rtw_br_ext.c76
-rw-r--r--drivers/staging/r8188eu/core/rtw_cmd.c337
-rw-r--r--drivers/staging/r8188eu/core/rtw_fw.c163
-rw-r--r--drivers/staging/r8188eu/core/rtw_ieee80211.c45
-rw-r--r--drivers/staging/r8188eu/core/rtw_ioctl_set.c4
-rw-r--r--drivers/staging/r8188eu/core/rtw_iol.c4
-rw-r--r--drivers/staging/r8188eu/core/rtw_led.c10
-rw-r--r--drivers/staging/r8188eu/core/rtw_mlme.c321
-rw-r--r--drivers/staging/r8188eu/core/rtw_mlme_ext.c755
-rw-r--r--drivers/staging/r8188eu/core/rtw_p2p.c70
-rw-r--r--drivers/staging/r8188eu/core/rtw_pwrctrl.c109
-rw-r--r--drivers/staging/r8188eu/core/rtw_recv.c319
-rw-r--r--drivers/staging/r8188eu/core/rtw_security.c6
-rw-r--r--drivers/staging/r8188eu/core/rtw_sta_mgt.c4
-rw-r--r--drivers/staging/r8188eu/core/rtw_wlan_util.c135
-rw-r--r--drivers/staging/r8188eu/core/rtw_xmit.c104
-rw-r--r--drivers/staging/r8188eu/hal/HalHWImg8188E_BB.c6
-rw-r--r--drivers/staging/r8188eu/hal/HalHWImg8188E_MAC.c2
-rw-r--r--drivers/staging/r8188eu/hal/HalHWImg8188E_RF.c2
-rw-r--r--drivers/staging/r8188eu/hal/HalPwrSeqCmd.c22
-rw-r--r--drivers/staging/r8188eu/hal/hal_com.c4
-rw-r--r--drivers/staging/r8188eu/hal/odm_HWConfig.c8
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188e_cmd.c49
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188e_hal_init.c15
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188e_phycfg.c8
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188e_rxdesc.c7
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188eu_xmit.c8
-rw-r--r--drivers/staging/r8188eu/hal/usb_halinit.c355
-rw-r--r--drivers/staging/r8188eu/hal/usb_ops_linux.c10
-rw-r--r--drivers/staging/r8188eu/include/HalVerDef.h5
-rw-r--r--drivers/staging/r8188eu/include/basic_types.h73
-rw-r--r--drivers/staging/r8188eu/include/drv_types.h7
-rw-r--r--drivers/staging/r8188eu/include/hal_intf.h34
-rw-r--r--drivers/staging/r8188eu/include/ieee80211.h63
-rw-r--r--drivers/staging/r8188eu/include/odm.h29
-rw-r--r--drivers/staging/r8188eu/include/osdep_service.h47
-rw-r--r--drivers/staging/r8188eu/include/rtl8188e_hal.h3
-rw-r--r--drivers/staging/r8188eu/include/rtl8188e_spec.h10
-rw-r--r--drivers/staging/r8188eu/include/rtw_debug.h55
-rw-r--r--drivers/staging/r8188eu/include/rtw_eeprom.h3
-rw-r--r--drivers/staging/r8188eu/include/rtw_fw.h5
-rw-r--r--drivers/staging/r8188eu/include/rtw_ioctl.h79
-rw-r--r--drivers/staging/r8188eu/include/rtw_mlme.h11
-rw-r--r--drivers/staging/r8188eu/include/rtw_mlme_ext.h62
-rw-r--r--drivers/staging/r8188eu/include/rtw_pwrctrl.h23
-rw-r--r--drivers/staging/r8188eu/include/rtw_recv.h3
-rw-r--r--drivers/staging/r8188eu/include/rtw_xmit.h4
-rw-r--r--drivers/staging/r8188eu/include/sta_info.h2
-rw-r--r--drivers/staging/r8188eu/include/usb_ops.h22
-rw-r--r--drivers/staging/r8188eu/include/usb_osintf.h4
-rw-r--r--drivers/staging/r8188eu/include/usb_vendor_req.h35
-rw-r--r--drivers/staging/r8188eu/include/wifi.h60
-rw-r--r--drivers/staging/r8188eu/os_dep/ioctl_linux.c295
-rw-r--r--drivers/staging/r8188eu/os_dep/mlme_linux.c1
-rw-r--r--drivers/staging/r8188eu/os_dep/os_intfs.c45
-rw-r--r--drivers/staging/r8188eu/os_dep/osdep_service.c27
-rw-r--r--drivers/staging/r8188eu/os_dep/usb_intf.c5
-rw-r--r--drivers/staging/r8188eu/os_dep/usb_ops_linux.c19
-rw-r--r--drivers/staging/r8188eu/os_dep/xmit_linux.c16
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c8
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c52
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c3
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c22
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_dm.c20
-rw-r--r--drivers/staging/rtl8192e/rtl819x_BAProc.c5
-rw-r--r--drivers/staging/rtl8192e/rtllib.h2
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_ccmp.c10
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_tkip.c38
-rw-r--r--drivers/staging/rtl8192e/rtllib_rx.c22
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac.c63
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac_wx.c4
-rw-r--r--drivers/staging/rtl8192e/rtllib_wx.c2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211.h2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c30
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c8
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c15
-rw-r--r--drivers/staging/rtl8192u/r8192U_core.c2
-rw-r--r--drivers/staging/rtl8712/drv_types.h3
-rw-r--r--drivers/staging/rtl8712/ieee80211.c4
-rw-r--r--drivers/staging/rtl8712/os_intfs.c1
-rw-r--r--drivers/staging/rtl8712/rtl8712_cmdctrl_bitdef.h1
-rw-r--r--drivers/staging/rtl8712/rtl8712_efuse.h4
-rw-r--r--drivers/staging/rtl8712/rtl8712_macsetting_bitdef.h3
-rw-r--r--drivers/staging/rtl8712/rtl8712_macsetting_regdef.h2
-rw-r--r--drivers/staging/rtl8712/rtl8712_ratectrl_regdef.h1
-rw-r--r--drivers/staging/rtl8712/rtl8712_recv.c16
-rw-r--r--drivers/staging/rtl8712/rtl8712_security_bitdef.h1
-rw-r--r--drivers/staging/rtl8712/rtl8712_spec.h3
-rw-r--r--drivers/staging/rtl8712/rtl8712_syscfg_bitdef.h4
-rw-r--r--drivers/staging/rtl8712/rtl8712_syscfg_regdef.h2
-rw-r--r--drivers/staging/rtl8712/rtl8712_timectrl_bitdef.h1
-rw-r--r--drivers/staging/rtl8712/rtl8712_wmac_bitdef.h1
-rw-r--r--drivers/staging/rtl8712/rtl871x_cmd.c4
-rw-r--r--drivers/staging/rtl8712/rtl871x_cmd.h3
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl.h1
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_linux.c24
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_rtl.c1
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_set.c3
-rw-r--r--drivers/staging/rtl8712/rtl871x_mlme.c102
-rw-r--r--drivers/staging/rtl8712/rtl871x_mp_ioctl.h1
-rw-r--r--drivers/staging/rtl8712/rtl871x_mp_phy_regdef.h3
-rw-r--r--drivers/staging/rtl8712/rtl871x_recv.c1
-rw-r--r--drivers/staging/rtl8712/rtl871x_security.c1
-rw-r--r--drivers/staging/rtl8712/sta_info.h1
-rw-r--r--drivers/staging/rtl8712/usb_intf.c14
-rw-r--r--drivers/staging/rtl8712/usb_ops.c27
-rw-r--r--drivers/staging/rtl8712/usb_ops_linux.c21
-rw-r--r--drivers/staging/rtl8712/wifi.h1
-rw-r--r--drivers/staging/rtl8712/xmit_linux.c8
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ap.c24
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_cmd.c12
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_efuse.c54
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ieee80211.c44
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme.c66
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme_ext.c194
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_rf.c56
-rw-r--r--drivers/staging/rtl8723bs/hal/HalBtcOutSrc.h11
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_btcoex.c8
-rw-r--r--drivers/staging/rtl8723bs/hal/sdio_ops.c15
-rw-r--r--drivers/staging/rtl8723bs/include/HalVerDef.h10
-rw-r--r--drivers/staging/rtl8723bs/include/drv_types.h1
-rw-r--r--drivers/staging/rtl8723bs/include/hal_com_reg.h295
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_ioctl.h72
-rw-r--r--drivers/staging/rtl8723bs/os_dep/os_intfs.c2
-rw-r--r--drivers/staging/rts5208/rtsx_transport.c12
-rw-r--r--drivers/staging/sm750fb/sm750_hw.c1
-rw-r--r--drivers/staging/unisys/Documentation/ABI/sysfs-platform-visorchipset89
-rw-r--r--drivers/staging/unisys/Documentation/overview.txt337
-rw-r--r--drivers/staging/unisys/Kconfig16
-rw-r--r--drivers/staging/unisys/MAINTAINERS5
-rw-r--r--drivers/staging/unisys/Makefile7
-rw-r--r--drivers/staging/unisys/TODO16
-rw-r--r--drivers/staging/unisys/include/iochannel.h571
-rw-r--r--drivers/staging/unisys/visorhba/Kconfig15
-rw-r--r--drivers/staging/unisys/visorhba/Makefile10
-rw-r--r--drivers/staging/unisys/visorhba/visorhba_main.c1142
-rw-r--r--drivers/staging/unisys/visorinput/Kconfig16
-rw-r--r--drivers/staging/unisys/visorinput/Makefile7
-rw-r--r--drivers/staging/unisys/visorinput/visorinput.c788
-rw-r--r--drivers/staging/unisys/visornic/Kconfig16
-rw-r--r--drivers/staging/unisys/visornic/Makefile10
-rw-r--r--drivers/staging/unisys/visornic/visornic_main.c2148
-rw-r--r--drivers/staging/vc04_services/Kconfig1
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/Kconfig8
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/TODO10
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835-ctl.c86
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c5
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c2
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835.c33
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835.h2
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/Kconfig4
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c24
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/controls.c33
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c26
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h1
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c13
-rw-r--r--drivers/staging/vc04_services/vchiq-mmal/mmal-msg-common.h7
-rw-r--r--drivers/staging/vc04_services/vchiq-mmal/mmal-msg-format.h6
-rw-r--r--drivers/staging/vc04_services/vchiq-mmal/mmal-parameters.h15
-rw-r--r--drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c11
-rw-r--r--drivers/staging/vme_user/Kconfig (renamed from drivers/staging/vme/devices/Kconfig)2
-rw-r--r--drivers/staging/vme_user/Makefile (renamed from drivers/staging/vme/devices/Makefile)0
-rw-r--r--drivers/staging/vme_user/vme_user.c (renamed from drivers/staging/vme/devices/vme_user.c)2
-rw-r--r--drivers/staging/vme_user/vme_user.h (renamed from drivers/staging/vme/devices/vme_user.h)0
-rw-r--r--drivers/staging/vt6655/baseband.c15
-rw-r--r--drivers/staging/vt6655/card.c38
-rw-r--r--drivers/staging/vt6655/card.h2
-rw-r--r--drivers/staging/vt6655/channel.c6
-rw-r--r--drivers/staging/vt6655/device_main.c37
-rw-r--r--drivers/staging/vt6655/key.c1
-rw-r--r--drivers/staging/vt6655/mac.c1
-rw-r--r--drivers/staging/vt6655/mac.h271
-rw-r--r--drivers/staging/vt6655/rf.c10
-rw-r--r--drivers/staging/vt6655/rxtx.c6
-rw-r--r--drivers/staging/vt6655/srom.c19
-rw-r--r--drivers/staging/vt6655/tmacro.h43
-rw-r--r--drivers/staging/vt6655/upc.h25
-rw-r--r--drivers/staging/vt6656/channel.c1
-rw-r--r--drivers/staging/vt6656/rf.c1
-rw-r--r--drivers/staging/wlan-ng/cfg80211.c10
-rw-r--r--drivers/staging/wlan-ng/hfa384x.h4
-rw-r--r--drivers/staging/wlan-ng/hfa384x_usb.c34
-rw-r--r--drivers/staging/wlan-ng/prism2usb.c8
-rw-r--r--drivers/target/target_core_pscsi.c3
-rw-r--r--drivers/tee/optee/call.c2
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3400_thermal.c1
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3403_thermal.c1
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device.h1
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c1
-rw-r--r--drivers/thunderbolt/ctl.c15
-rw-r--r--drivers/thunderbolt/domain.c12
-rw-r--r--drivers/thunderbolt/nhi.c46
-rw-r--r--drivers/thunderbolt/path.c6
-rw-r--r--drivers/thunderbolt/switch.c109
-rw-r--r--drivers/thunderbolt/tb.c25
-rw-r--r--drivers/thunderbolt/tb.h6
-rw-r--r--drivers/thunderbolt/tb_msgs.h39
-rw-r--r--drivers/thunderbolt/tb_regs.h5
-rw-r--r--drivers/thunderbolt/test.c108
-rw-r--r--drivers/thunderbolt/tunnel.c18
-rw-r--r--drivers/thunderbolt/tunnel.h4
-rw-r--r--drivers/thunderbolt/usb4_port.c38
-rw-r--r--drivers/thunderbolt/xdomain.c609
-rw-r--r--drivers/tty/amiserial.c2
-rw-r--r--drivers/tty/goldfish.c2
-rw-r--r--drivers/tty/hvc/Kconfig19
-rw-r--r--drivers/tty/hvc/hvc_dcc.c194
-rw-r--r--drivers/tty/hvc/hvc_opal.c6
-rw-r--r--drivers/tty/hvc/hvc_vio.c2
-rw-r--r--drivers/tty/hvc/hvc_xen.c2
-rw-r--r--drivers/tty/hvc/hvcs.c5
-rw-r--r--drivers/tty/hvc/hvsi.c2
-rw-r--r--drivers/tty/mxser.c5
-rw-r--r--drivers/tty/n_gsm.c37
-rw-r--r--drivers/tty/n_tty.c73
-rw-r--r--drivers/tty/serial/8250/8250.h41
-rw-r--r--drivers/tty/serial/8250/8250_aspeed_vuart.c2
-rw-r--r--drivers/tty/serial/8250/8250_core.c1
-rw-r--r--drivers/tty/serial/8250/8250_dma.c7
-rw-r--r--drivers/tty/serial/8250/8250_dw.c229
-rw-r--r--drivers/tty/serial/8250/8250_dwlib.c116
-rw-r--r--drivers/tty/serial/8250/8250_dwlib.h51
-rw-r--r--drivers/tty/serial/8250/8250_fintek.c8
-rw-r--r--drivers/tty/serial/8250/8250_mtk.c7
-rw-r--r--drivers/tty/serial/8250/8250_of.c2
-rw-r--r--drivers/tty/serial/8250/8250_pci.c480
-rw-r--r--drivers/tty/serial/8250/8250_port.c140
-rw-r--r--drivers/tty/serial/8250/8250_pxa.c1
-rw-r--r--drivers/tty/serial/8250/Kconfig2
-rw-r--r--drivers/tty/serial/Kconfig9
-rw-r--r--drivers/tty/serial/altera_jtaguart.c6
-rw-r--r--drivers/tty/serial/amba-pl011.c48
-rw-r--r--drivers/tty/serial/amba-pl011.h35
-rw-r--r--drivers/tty/serial/atmel_serial.c4
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart.h2
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_core.c2
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_cpm2.c1
-rw-r--r--drivers/tty/serial/digicolor-usart.c2
-rw-r--r--drivers/tty/serial/fsl_lpuart.c66
-rw-r--r--drivers/tty/serial/icom.c538
-rw-r--r--drivers/tty/serial/icom.h274
-rw-r--r--drivers/tty/serial/imx.c2
-rw-r--r--drivers/tty/serial/jsm/jsm_cls.c8
-rw-r--r--drivers/tty/serial/jsm/jsm_neo.c8
-rw-r--r--drivers/tty/serial/max310x.c1
-rw-r--r--drivers/tty/serial/men_z135_uart.c1
-rw-r--r--drivers/tty/serial/meson_uart.c40
-rw-r--r--drivers/tty/serial/mpc52xx_uart.c5
-rw-r--r--drivers/tty/serial/msm_serial.c5
-rw-r--r--drivers/tty/serial/omap-serial.c13
-rw-r--r--drivers/tty/serial/owl-uart.c7
-rw-r--r--drivers/tty/serial/pch_uart.c77
-rw-r--r--drivers/tty/serial/pic32_uart.c159
-rw-r--r--drivers/tty/serial/pic32_uart.h125
-rw-r--r--drivers/tty/serial/pmac_zilog.c69
-rw-r--r--drivers/tty/serial/pmac_zilog.h11
-rw-r--r--drivers/tty/serial/qcom_geni_serial.c58
-rw-r--r--drivers/tty/serial/rda-uart.c2
-rw-r--r--drivers/tty/serial/sa1100.c4
-rw-r--r--drivers/tty/serial/samsung_tty.c13
-rw-r--r--drivers/tty/serial/sc16is7xx.c10
-rw-r--r--drivers/tty/serial/serial_core.c89
-rw-r--r--drivers/tty/serial/serial_txx9.c2
-rw-r--r--drivers/tty/serial/sh-sci.c6
-rw-r--r--drivers/tty/serial/sifive.c20
-rw-r--r--drivers/tty/serial/st-asc.c4
-rw-r--r--drivers/tty/serial/stm32-usart.c225
-rw-r--r--drivers/tty/serial/stm32-usart.h3
-rw-r--r--drivers/tty/serial/sunplus-uart.c2
-rw-r--r--drivers/tty/serial/sunsu.c2
-rw-r--r--drivers/tty/serial/uartlite.c3
-rw-r--r--drivers/tty/serial/xilinx_uartps.c46
-rw-r--r--drivers/tty/serial/zs.c2
-rw-r--r--drivers/tty/synclink_gt.c2
-rw-r--r--drivers/tty/sysrq.c27
-rw-r--r--drivers/tty/tty_baudrate.c35
-rw-r--r--drivers/tty/tty_buffer.c3
-rw-r--r--drivers/tty/tty_ioctl.c2
-rw-r--r--drivers/tty/tty_jobctrl.c4
-rw-r--r--drivers/ufs/Kconfig30
-rw-r--r--drivers/ufs/Makefile5
-rw-r--r--drivers/ufs/core/Kconfig60
-rw-r--r--drivers/ufs/core/Makefile10
-rw-r--r--drivers/ufs/core/ufs-debugfs.c (renamed from drivers/scsi/ufs/ufs-debugfs.c)2
-rw-r--r--drivers/ufs/core/ufs-debugfs.h (renamed from drivers/scsi/ufs/ufs-debugfs.h)0
-rw-r--r--drivers/ufs/core/ufs-fault-injection.c (renamed from drivers/scsi/ufs/ufs-fault-injection.c)0
-rw-r--r--drivers/ufs/core/ufs-fault-injection.h (renamed from drivers/scsi/ufs/ufs-fault-injection.h)0
-rw-r--r--drivers/ufs/core/ufs-hwmon.c (renamed from drivers/scsi/ufs/ufs-hwmon.c)2
-rw-r--r--drivers/ufs/core/ufs-sysfs.c (renamed from drivers/scsi/ufs/ufs-sysfs.c)2
-rw-r--r--drivers/ufs/core/ufs-sysfs.h (renamed from drivers/scsi/ufs/ufs-sysfs.h)0
-rw-r--r--drivers/ufs/core/ufs_bsg.c (renamed from drivers/scsi/ufs/ufs_bsg.c)2
-rw-r--r--drivers/ufs/core/ufs_bsg.h (renamed from drivers/scsi/ufs/ufs_bsg.h)0
-rw-r--r--drivers/ufs/core/ufshcd-crypto.c (renamed from drivers/scsi/ufs/ufshcd-crypto.c)2
-rw-r--r--drivers/ufs/core/ufshcd-crypto.h (renamed from drivers/scsi/ufs/ufshcd-crypto.h)4
-rw-r--r--drivers/ufs/core/ufshcd-priv.h (renamed from drivers/scsi/ufs/ufshcd-priv.h)2
-rw-r--r--drivers/ufs/core/ufshcd.c (renamed from drivers/scsi/ufs/ufshcd.c)9
-rw-r--r--drivers/ufs/core/ufshpb.c (renamed from drivers/scsi/ufs/ufshpb.c)8
-rw-r--r--drivers/ufs/core/ufshpb.h (renamed from drivers/scsi/ufs/ufshpb.h)0
-rw-r--r--drivers/ufs/host/Kconfig (renamed from drivers/scsi/ufs/Kconfig)75
-rw-r--r--drivers/ufs/host/Makefile (renamed from drivers/scsi/ufs/Makefile)12
-rw-r--r--drivers/ufs/host/cdns-pltfrm.c (renamed from drivers/scsi/ufs/cdns-pltfrm.c)0
-rw-r--r--drivers/ufs/host/tc-dwc-g210-pci.c (renamed from drivers/scsi/ufs/tc-dwc-g210-pci.c)2
-rw-r--r--drivers/ufs/host/tc-dwc-g210-pltfrm.c (renamed from drivers/scsi/ufs/tc-dwc-g210-pltfrm.c)0
-rw-r--r--drivers/ufs/host/tc-dwc-g210.c (renamed from drivers/scsi/ufs/tc-dwc-g210.c)4
-rw-r--r--drivers/ufs/host/tc-dwc-g210.h (renamed from drivers/scsi/ufs/tc-dwc-g210.h)0
-rw-r--r--drivers/ufs/host/ti-j721e-ufs.c (renamed from drivers/scsi/ufs/ti-j721e-ufs.c)0
-rw-r--r--drivers/ufs/host/ufs-exynos.c (renamed from drivers/scsi/ufs/ufs-exynos.c)6
-rw-r--r--drivers/ufs/host/ufs-exynos.h (renamed from drivers/scsi/ufs/ufs-exynos.h)0
-rw-r--r--drivers/ufs/host/ufs-hisi.c (renamed from drivers/scsi/ufs/ufs-hisi.c)8
-rw-r--r--drivers/ufs/host/ufs-hisi.h (renamed from drivers/scsi/ufs/ufs-hisi.h)0
-rw-r--r--drivers/ufs/host/ufs-mediatek-trace.h (renamed from drivers/scsi/ufs/ufs-mediatek-trace.h)2
-rw-r--r--drivers/ufs/host/ufs-mediatek.c (renamed from drivers/scsi/ufs/ufs-mediatek.c)6
-rw-r--r--drivers/ufs/host/ufs-mediatek.h (renamed from drivers/scsi/ufs/ufs-mediatek.h)0
-rw-r--r--drivers/ufs/host/ufs-qcom-ice.c (renamed from drivers/scsi/ufs/ufs-qcom-ice.c)0
-rw-r--r--drivers/ufs/host/ufs-qcom.c (renamed from drivers/scsi/ufs/ufs-qcom.c)8
-rw-r--r--drivers/ufs/host/ufs-qcom.h (renamed from drivers/scsi/ufs/ufs-qcom.h)2
-rw-r--r--drivers/ufs/host/ufshcd-dwc.c (renamed from drivers/scsi/ufs/ufshcd-dwc.c)4
-rw-r--r--drivers/ufs/host/ufshcd-dwc.h (renamed from drivers/scsi/ufs/ufshcd-dwc.h)2
-rw-r--r--drivers/ufs/host/ufshcd-pci.c (renamed from drivers/scsi/ufs/ufshcd-pci.c)2
-rw-r--r--drivers/ufs/host/ufshcd-pltfrm.c (renamed from drivers/scsi/ufs/ufshcd-pltfrm.c)4
-rw-r--r--drivers/ufs/host/ufshcd-pltfrm.h (renamed from drivers/scsi/ufs/ufshcd-pltfrm.h)2
-rw-r--r--drivers/ufs/host/ufshci-dwc.h (renamed from drivers/scsi/ufs/ufshci-dwc.h)0
-rw-r--r--drivers/uio/uio_dfl.c2
-rw-r--r--drivers/usb/atm/usbatm.c2
-rw-r--r--drivers/usb/c67x00/c67x00-drv.c6
-rw-r--r--drivers/usb/c67x00/c67x00-sched.c4
-rw-r--r--drivers/usb/cdns3/cdns3-gadget.c47
-rw-r--r--drivers/usb/cdns3/cdns3-gadget.h9
-rw-r--r--drivers/usb/class/cdc-acm.h8
-rw-r--r--drivers/usb/core/devices.c47
-rw-r--r--drivers/usb/core/driver.c25
-rw-r--r--drivers/usb/core/hcd-pci.c5
-rw-r--r--drivers/usb/core/hcd.c29
-rw-r--r--drivers/usb/core/hub.c10
-rw-r--r--drivers/usb/core/quirks.c3
-rw-r--r--drivers/usb/core/usb-acpi.c7
-rw-r--r--drivers/usb/dwc2/core.c9
-rw-r--r--drivers/usb/dwc2/core.h5
-rw-r--r--drivers/usb/dwc2/gadget.c1
-rw-r--r--drivers/usb/dwc2/params.c50
-rw-r--r--drivers/usb/dwc3/Kconfig9
-rw-r--r--drivers/usb/dwc3/Makefile1
-rw-r--r--drivers/usb/dwc3/core.c81
-rw-r--r--drivers/usb/dwc3/core.h2
-rw-r--r--drivers/usb/dwc3/drd.c50
-rw-r--r--drivers/usb/dwc3/dwc3-am62.c332
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c2
-rw-r--r--drivers/usb/dwc3/dwc3-xilinx.c17
-rw-r--r--drivers/usb/dwc3/ep0.c14
-rw-r--r--drivers/usb/dwc3/gadget.c190
-rw-r--r--drivers/usb/dwc3/gadget.h2
-rw-r--r--drivers/usb/dwc3/host.c2
-rw-r--r--drivers/usb/gadget/composite.c2
-rw-r--r--drivers/usb/gadget/configfs.c2
-rw-r--r--drivers/usb/gadget/function/f_acm.c10
-rw-r--r--drivers/usb/gadget/function/f_uvc.c5
-rw-r--r--drivers/usb/gadget/function/u_audio.c4
-rw-r--r--drivers/usb/gadget/function/u_uvc.h1
-rw-r--r--drivers/usb/gadget/function/uvc.h1
-rw-r--r--drivers/usb/gadget/function/uvc_configfs.c189
-rw-r--r--drivers/usb/gadget/function/uvc_configfs.h120
-rw-r--r--drivers/usb/gadget/function/uvc_queue.c30
-rw-r--r--drivers/usb/gadget/function/uvc_queue.h3
-rw-r--r--drivers/usb/gadget/function/uvc_video.c17
-rw-r--r--drivers/usb/gadget/legacy/dbgp.c2
-rw-r--r--drivers/usb/gadget/legacy/inode.c2
-rw-r--r--drivers/usb/gadget/legacy/raw_gadget.c4
-rw-r--r--drivers/usb/gadget/udc/core.c289
-rw-r--r--drivers/usb/gadget/udc/net2272.c6
-rw-r--r--drivers/usb/gadget/udc/net2280.c14
-rw-r--r--drivers/usb/gadget/udc/omap_udc.c16
-rw-r--r--drivers/usb/gadget/udc/pxa25x_udc.c37
-rw-r--r--drivers/usb/gadget/udc/pxa25x_udc.h7
-rw-r--r--drivers/usb/gadget/udc/pxa27x_udc.h2
-rw-r--r--drivers/usb/gadget/udc/s3c-hsudc.c4
-rw-r--r--drivers/usb/gadget/udc/tegra-xudc.c4
-rw-r--r--drivers/usb/gadget/udc/udc-xilinx.c2
-rw-r--r--drivers/usb/host/ehci-omap.c5
-rw-r--r--drivers/usb/host/ehci-platform.c1
-rw-r--r--drivers/usb/host/ehci-q.c4
-rw-r--r--drivers/usb/host/ehci-xilinx-of.c12
-rw-r--r--drivers/usb/host/fhci-hcd.c3
-rw-r--r--drivers/usb/host/fotg210-hcd.c2
-rw-r--r--drivers/usb/host/isp116x-hcd.c11
-rw-r--r--drivers/usb/host/isp1362-hcd.c6
-rw-r--r--drivers/usb/host/max3421-hcd.c6
-rw-r--r--drivers/usb/host/ohci-hcd.c3
-rw-r--r--drivers/usb/host/ohci-omap.c18
-rw-r--r--drivers/usb/host/ohci-platform.c1
-rw-r--r--drivers/usb/host/ohci-ppc-of.c3
-rw-r--r--drivers/usb/host/ohci-pxa27x.c3
-rw-r--r--drivers/usb/host/oxu210hp-hcd.c16
-rw-r--r--drivers/usb/host/r8a66597-hcd.c3
-rw-r--r--drivers/usb/host/sl811-hcd.c6
-rw-r--r--drivers/usb/host/xhci-hub.c3
-rw-r--r--drivers/usb/host/xhci-mem.c23
-rw-r--r--drivers/usb/host/xhci-pci.c5
-rw-r--r--drivers/usb/host/xhci-plat.c46
-rw-r--r--drivers/usb/host/xhci-ring.c144
-rw-r--r--drivers/usb/host/xhci.c175
-rw-r--r--drivers/usb/host/xhci.h30
-rw-r--r--drivers/usb/isp1760/isp1760-core.c8
-rw-r--r--drivers/usb/isp1760/isp1760-hcd.c6
-rw-r--r--drivers/usb/misc/ftdi-elan.c15
-rw-r--r--drivers/usb/misc/lvstest.c2
-rw-r--r--drivers/usb/musb/mediatek.c73
-rw-r--r--drivers/usb/musb/omap2430.c1
-rw-r--r--drivers/usb/phy/phy-omap-otg.c4
-rw-r--r--drivers/usb/serial/ark3116.c3
-rw-r--r--drivers/usb/serial/ftdi_sio.c2
-rw-r--r--drivers/usb/serial/option.c2
-rw-r--r--drivers/usb/serial/pl2303.c3
-rw-r--r--drivers/usb/serial/whiteheat.c4
-rw-r--r--drivers/usb/storage/alauda.c4
-rw-r--r--drivers/usb/storage/isd200.c8
-rw-r--r--drivers/usb/storage/karma.c15
-rw-r--r--drivers/usb/storage/onetouch.c2
-rw-r--r--drivers/usb/storage/shuttle_usbat.c28
-rw-r--r--drivers/usb/storage/transport.c2
-rw-r--r--drivers/usb/typec/bus.c2
-rw-r--r--drivers/usb/typec/mux.c271
-rw-r--r--drivers/usb/typec/mux.h12
-rw-r--r--drivers/usb/typec/mux/Kconfig10
-rw-r--r--drivers/usb/typec/mux/Makefile1
-rw-r--r--drivers/usb/typec/mux/fsa4480.c218
-rw-r--r--drivers/usb/typec/mux/intel_pmc_mux.c29
-rw-r--r--drivers/usb/typec/mux/pi3usb30532.c8
-rw-r--r--drivers/usb/typec/tcpm/fusb302.c4
-rw-r--r--drivers/usb/typec/tipd/core.c32
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c85
-rw-r--r--drivers/usb/typec/ucsi/ucsi.h6
-rw-r--r--drivers/usb/typec/ucsi/ucsi_acpi.c23
-rw-r--r--drivers/usb/usbip/stub_dev.c2
-rw-r--r--drivers/usb/usbip/stub_rx.c2
-rw-r--r--drivers/vdpa/alibaba/eni_vdpa.c2
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_main.c23
-rw-r--r--drivers/vdpa/mlx5/core/mlx5_vdpa.h2
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c489
-rw-r--r--drivers/vdpa/vdpa.c286
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim.c105
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim.h3
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim_net.c169
-rw-r--r--drivers/vdpa/vdpa_user/vduse_dev.c3
-rw-r--r--drivers/vdpa/virtio_pci/vp_vdpa.c161
-rw-r--r--drivers/vfio/fsl-mc/vfio_fsl_mc.c1
-rw-r--r--drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c16
-rw-r--r--drivers/vfio/pci/mlx5/cmd.c236
-rw-r--r--drivers/vfio/pci/mlx5/cmd.h52
-rw-r--r--drivers/vfio/pci/mlx5/main.c136
-rw-r--r--drivers/vfio/pci/vfio_pci.c7
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c56
-rw-r--r--drivers/vfio/pci/vfio_pci_core.c254
-rw-r--r--drivers/vfio/platform/vfio_amba.c1
-rw-r--r--drivers/vfio/platform/vfio_platform.c1
-rw-r--r--drivers/vfio/vfio.c1013
-rw-r--r--drivers/vfio/vfio_iommu_type1.c30
-rw-r--r--drivers/vhost/iotlb.c23
-rw-r--r--drivers/vhost/net.c11
-rw-r--r--drivers/vhost/scsi.c4
-rw-r--r--drivers/vhost/test.c14
-rw-r--r--drivers/vhost/vdpa.c271
-rw-r--r--drivers/vhost/vhost.c45
-rw-r--r--drivers/vhost/vhost.h7
-rw-r--r--drivers/vhost/vsock.c7
-rw-r--r--drivers/video/console/sticon.c5
-rw-r--r--drivers/video/console/sticore.c53
-rw-r--r--drivers/video/fbdev/amba-clcd.c5
-rw-r--r--drivers/video/fbdev/hyperv_fb.c42
-rw-r--r--drivers/video/fbdev/omap/omapfb.h4
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-nec-nl8048hl11.c7
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi_pll.c8
-rw-r--r--drivers/video/fbdev/pxa3xx-gcu.c12
-rw-r--r--drivers/video/fbdev/pxa3xx-regs.h (renamed from arch/arm/mach-pxa/include/mach/regs-lcd.h)24
-rw-r--r--drivers/video/fbdev/pxafb.c4
-rw-r--r--drivers/video/fbdev/sticore.h3
-rw-r--r--drivers/video/fbdev/stifb.c4
-rw-r--r--drivers/video/fbdev/vesafb.c5
-rw-r--r--drivers/video/fbdev/xen-fbfront.c1
-rw-r--r--drivers/virt/fsl_hypervisor.c3
-rw-r--r--drivers/virtio/virtio.c32
-rw-r--r--drivers/virtio/virtio_balloon.c12
-rw-r--r--drivers/virtio/virtio_mmio.c27
-rw-r--r--drivers/virtio/virtio_pci_common.c15
-rw-r--r--drivers/virtio/virtio_pci_common.h10
-rw-r--r--drivers/virtio/virtio_pci_legacy.c11
-rw-r--r--drivers/virtio/virtio_pci_modern.c14
-rw-r--r--drivers/virtio/virtio_pci_modern_dev.c6
-rw-r--r--drivers/virtio/virtio_ring.c55
-rw-r--r--drivers/virtio/virtio_vdpa.c12
-rw-r--r--drivers/visorbus/Kconfig15
-rw-r--r--drivers/visorbus/Makefile10
-rw-r--r--drivers/visorbus/controlvmchannel.h650
-rw-r--r--drivers/visorbus/vbuschannel.h95
-rw-r--r--drivers/visorbus/visorbus_main.c1234
-rw-r--r--drivers/visorbus/visorbus_private.h48
-rw-r--r--drivers/visorbus/visorchannel.c434
-rw-r--r--drivers/visorbus/visorchipset.c1691
-rw-r--r--drivers/vme/Kconfig2
-rw-r--r--drivers/w1/masters/ds2490.c124
-rw-r--r--drivers/watchdog/Kconfig30
-rw-r--r--drivers/watchdog/Makefile3
-rw-r--r--drivers/watchdog/bcm7038_wdt.c1
-rw-r--r--drivers/watchdog/da9063_wdt.c36
-rw-r--r--drivers/watchdog/gxp-wdt.c174
-rw-r--r--drivers/watchdog/iTCO_wdt.c20
-rw-r--r--drivers/watchdog/mtk_wdt.c12
-rw-r--r--drivers/watchdog/rti_wdt.c10
-rw-r--r--drivers/watchdog/rzg2l_wdt.c83
-rw-r--r--drivers/watchdog/rzn1_wdt.c203
-rw-r--r--drivers/watchdog/sa1100_wdt.c88
-rw-r--r--drivers/watchdog/sp805_wdt.c1
-rw-r--r--drivers/watchdog/sunplus_wdt.c220
-rw-r--r--drivers/watchdog/ts4800_wdt.c5
-rw-r--r--drivers/watchdog/wdat_wdt.c6
-rw-r--r--drivers/xen/gntalloc.c9
-rw-r--r--drivers/xen/gntdev-dmabuf.c2
-rw-r--r--drivers/xen/grant-table.c14
-rw-r--r--drivers/xen/pvcalls-front.c6
-rw-r--r--drivers/xen/xen-front-pgdir-shbuf.c2
-rw-r--r--drivers/xen/xenbus/xenbus_client.c2
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c8
-rw-r--r--fs/Kconfig.binfmt2
-rw-r--r--fs/afs/dir.c5
-rw-r--r--fs/ceph/addr.c42
-rw-r--r--fs/ceph/caps.c75
-rw-r--r--fs/ceph/inode.c35
-rw-r--r--fs/ceph/mds_client.c121
-rw-r--r--fs/ceph/mds_client.h2
-rw-r--r--fs/ceph/quota.c19
-rw-r--r--fs/ceph/super.c1
-rw-r--r--fs/ceph/super.h29
-rw-r--r--fs/ceph/xattr.c10
-rw-r--r--fs/cifs/Makefile4
-rw-r--r--fs/cifs/cifs_debug.c11
-rw-r--r--fs/cifs/cifs_swn.c4
-rw-r--r--fs/cifs/cifsencrypt.c8
-rw-r--r--fs/cifs/cifsfs.c12
-rw-r--r--fs/cifs/cifsfs.h5
-rw-r--r--fs/cifs/cifsglob.h148
-rw-r--r--fs/cifs/cifsproto.h9
-rw-r--r--fs/cifs/cifssmb.c5
-rw-r--r--fs/cifs/connect.c134
-rw-r--r--fs/cifs/dfs_cache.c96
-rw-r--r--fs/cifs/file.c13
-rw-r--r--fs/cifs/fs_context.c33
-rw-r--r--fs/cifs/fs_context.h4
-rw-r--r--fs/cifs/misc.c11
-rw-r--r--fs/cifs/readdir.c179
-rw-r--r--fs/cifs/sess.c39
-rw-r--r--fs/cifs/smb1ops.c6
-rw-r--r--fs/cifs/smb2inode.c7
-rw-r--r--fs/cifs/smb2misc.c12
-rw-r--r--fs/cifs/smb2ops.c41
-rw-r--r--fs/cifs/smb2pdu.c11
-rw-r--r--fs/cifs/smb2pdu.h22
-rw-r--r--fs/cifs/smb2transport.c7
-rw-r--r--fs/cifs/smbdirect.c6
-rw-r--r--fs/cifs/trace.h2
-rw-r--r--fs/cifs/transport.c48
-rw-r--r--fs/dax.c22
-rw-r--r--fs/erofs/fscache.c1
-rw-r--r--fs/erofs/inode.c5
-rw-r--r--fs/erofs/zdata.c167
-rw-r--r--fs/erofs/zdata.h50
-rw-r--r--fs/exec.c8
-rw-r--r--fs/exportfs/expfs.c5
-rw-r--r--fs/f2fs/checkpoint.c16
-rw-r--r--fs/f2fs/data.c213
-rw-r--r--fs/f2fs/debug.c18
-rw-r--r--fs/f2fs/dir.c3
-rw-r--r--fs/f2fs/f2fs.h133
-rw-r--r--fs/f2fs/file.c307
-rw-r--r--fs/f2fs/gc.c186
-rw-r--r--fs/f2fs/hash.c11
-rw-r--r--fs/f2fs/inline.c29
-rw-r--r--fs/f2fs/inode.c34
-rw-r--r--fs/f2fs/namei.c38
-rw-r--r--fs/f2fs/node.c29
-rw-r--r--fs/f2fs/node.h1
-rw-r--r--fs/f2fs/segment.c460
-rw-r--r--fs/f2fs/segment.h40
-rw-r--r--fs/f2fs/super.c88
-rw-r--r--fs/f2fs/verity.c2
-rw-r--r--fs/fat/fat.h14
-rw-r--r--fs/fat/fatent.c7
-rw-r--r--fs/fat/file.c14
-rw-r--r--fs/fat/inode.c19
-rw-r--r--fs/fat/misc.c78
-rw-r--r--fs/fat/namei_vfat.c4
-rw-r--r--fs/file.c110
-rw-r--r--fs/file_table.c9
-rw-r--r--fs/freevxfs/vxfs.h27
-rw-r--r--fs/freevxfs/vxfs_bmap.c26
-rw-r--r--fs/freevxfs/vxfs_dir.h27
-rw-r--r--fs/freevxfs/vxfs_extern.h27
-rw-r--r--fs/freevxfs/vxfs_fshead.c26
-rw-r--r--fs/freevxfs/vxfs_fshead.h27
-rw-r--r--fs/freevxfs/vxfs_immed.c26
-rw-r--r--fs/freevxfs/vxfs_inode.c26
-rw-r--r--fs/freevxfs/vxfs_inode.h27
-rw-r--r--fs/freevxfs/vxfs_lookup.c26
-rw-r--r--fs/freevxfs/vxfs_olt.c26
-rw-r--r--fs/freevxfs/vxfs_olt.h27
-rw-r--r--fs/freevxfs/vxfs_subr.c26
-rw-r--r--fs/freevxfs/vxfs_super.c26
-rw-r--r--fs/fsopen.c4
-rw-r--r--fs/fuse/dax.c4
-rw-r--r--fs/fuse/virtio_fs.c6
-rw-r--r--fs/hugetlbfs/inode.c23
-rw-r--r--fs/internal.h3
-rw-r--r--fs/io_uring.c356
-rw-r--r--fs/jffs2/erase.c6
-rw-r--r--fs/jffs2/fs.c1
-rw-r--r--fs/jfs/Makefile2
-rw-r--r--fs/jfs/inode.c18
-rw-r--r--fs/jfs/jfs_dmap.c71
-rw-r--r--fs/jfs/jfs_dtree.c298
-rw-r--r--fs/jfs/jfs_extent.c255
-rw-r--r--fs/jfs/jfs_logmgr.c8
-rw-r--r--fs/jfs/jfs_mount.c4
-rw-r--r--fs/jfs/jfs_txnmgr.c34
-rw-r--r--fs/jfs/jfs_xtree.c961
-rw-r--r--fs/jfs/jfs_xtree.h4
-rw-r--r--fs/kernfs/dir.c31
-rw-r--r--fs/kernfs/file.c47
-rw-r--r--fs/ksmbd/connection.c22
-rw-r--r--fs/ksmbd/connection.h27
-rw-r--r--fs/ksmbd/ksmbd_netlink.h3
-rw-r--r--fs/ksmbd/misc.c10
-rw-r--r--fs/ksmbd/smb2misc.c2
-rw-r--r--fs/ksmbd/smb2pdu.c126
-rw-r--r--fs/ksmbd/smb_common.c4
-rw-r--r--fs/ksmbd/smbacl.c1
-rw-r--r--fs/ksmbd/transport_ipc.c3
-rw-r--r--fs/ksmbd/transport_rdma.c363
-rw-r--r--fs/ksmbd/transport_rdma.h8
-rw-r--r--fs/namei.c89
-rw-r--r--fs/namespace.c2
-rw-r--r--fs/nfs/file.c50
-rw-r--r--fs/nfs/filelayout/filelayout.c7
-rw-r--r--fs/nfs/fscache.c7
-rw-r--r--fs/nfs/internal.h1
-rw-r--r--fs/nfs/nfs4file.c4
-rw-r--r--fs/nfs/nfs4namespace.c9
-rw-r--r--fs/nfs/nfs4proc.c182
-rw-r--r--fs/nfs/nfs4state.c29
-rw-r--r--fs/nfs/nfs4xdr.c99
-rw-r--r--fs/nfs/pagelist.c3
-rw-r--r--fs/nfs/pnfs.c2
-rw-r--r--fs/nfs/unlink.c8
-rw-r--r--fs/nfs/write.c54
-rw-r--r--fs/ntfs/file.c4
-rw-r--r--fs/ntfs3/file.c12
-rw-r--r--fs/ntfs3/frecord.c10
-rw-r--r--fs/ntfs3/fslog.c12
-rw-r--r--fs/ntfs3/inode.c9
-rw-r--r--fs/ntfs3/super.c10
-rw-r--r--fs/ntfs3/xattr.c136
-rw-r--r--fs/ocfs2/dlm/dlmdebug.c12
-rw-r--r--fs/ocfs2/dlm/dlmunlock.c21
-rw-r--r--fs/ocfs2/dlmfs/userdlm.c17
-rw-r--r--fs/ocfs2/inode.c4
-rw-r--r--fs/ocfs2/journal.c33
-rw-r--r--fs/ocfs2/journal.h2
-rw-r--r--fs/ocfs2/quota_local.c10
-rw-r--r--fs/ocfs2/reservations.c4
-rw-r--r--fs/ocfs2/reservations.h9
-rw-r--r--fs/ocfs2/super.c180
-rw-r--r--fs/open.c24
-rw-r--r--fs/overlayfs/copy_up.c90
-rw-r--r--fs/overlayfs/dir.c147
-rw-r--r--fs/overlayfs/export.c5
-rw-r--r--fs/overlayfs/file.c43
-rw-r--r--fs/overlayfs/inode.c68
-rw-r--r--fs/overlayfs/namei.c53
-rw-r--r--fs/overlayfs/overlayfs.h232
-rw-r--r--fs/overlayfs/ovl_entry.h7
-rw-r--r--fs/overlayfs/readdir.c48
-rw-r--r--fs/overlayfs/super.c57
-rw-r--r--fs/overlayfs/util.c103
-rw-r--r--fs/pipe.c33
-rw-r--r--fs/proc/generic.c3
-rw-r--r--fs/proc/kcore.c14
-rw-r--r--fs/proc/proc_net.c3
-rw-r--r--fs/proc/vmcore.c130
-rw-r--r--fs/read_write.c16
-rw-r--r--fs/smbfs_common/smb2pdu.h108
-rw-r--r--fs/smbfs_common/smbfsctl.h6
-rw-r--r--fs/stat.c2
-rw-r--r--fs/sync.c9
-rw-r--r--fs/sysv/super.c4
-rw-r--r--fs/ubifs/budget.c7
-rw-r--r--fs/ubifs/xattr.c2
-rw-r--r--fs/xfs/libxfs/xfs_ag.c3
-rw-r--r--fs/xfs/libxfs/xfs_attr.c198
-rw-r--r--fs/xfs/libxfs/xfs_attr.h63
-rw-r--r--fs/xfs/libxfs/xfs_attr_remote.c6
-rw-r--r--fs/xfs/libxfs/xfs_attr_remote.h6
-rw-r--r--fs/xfs/libxfs/xfs_btree.c63
-rw-r--r--fs/xfs/libxfs/xfs_da_btree.c11
-rw-r--r--fs/xfs/libxfs/xfs_da_btree.h1
-rw-r--r--fs/xfs/libxfs/xfs_defer.c67
-rw-r--r--fs/xfs/libxfs/xfs_log_format.h18
-rw-r--r--fs/xfs/libxfs/xfs_log_recover.h14
-rw-r--r--fs/xfs/libxfs/xfs_symlink_remote.c2
-rw-r--r--fs/xfs/scrub/scrub.c17
-rw-r--r--fs/xfs/xfs_acl.c3
-rw-r--r--fs/xfs/xfs_attr_item.c364
-rw-r--r--fs/xfs/xfs_attr_item.h22
-rw-r--r--fs/xfs/xfs_buf_item_recover.c66
-rw-r--r--fs/xfs/xfs_file.c2
-rw-r--r--fs/xfs/xfs_fsops.c7
-rw-r--r--fs/xfs/xfs_inode.c2
-rw-r--r--fs/xfs/xfs_ioctl.c3
-rw-r--r--fs/xfs/xfs_iops.c3
-rw-r--r--fs/xfs/xfs_log.c41
-rw-r--r--fs/xfs/xfs_log.h7
-rw-r--r--fs/xfs/xfs_log_priv.h3
-rw-r--r--fs/xfs/xfs_log_recover.c93
-rw-r--r--fs/xfs/xfs_message.h6
-rw-r--r--fs/xfs/xfs_mount.c1
-rw-r--r--fs/xfs/xfs_mount.h18
-rw-r--r--fs/xfs/xfs_qm.c9
-rw-r--r--fs/xfs/xfs_super.c20
-rw-r--r--fs/xfs/xfs_super.h1
-rw-r--r--fs/xfs/xfs_xattr.c79
-rw-r--r--fs/xfs/xfs_xattr.h13
-rw-r--r--include/acpi/acpi_bus.h12
-rw-r--r--include/asm-generic/compat.h113
-rw-r--r--include/clocksource/timer-xilinx.h73
-rw-r--r--include/crypto/sm4.h4
-rw-r--r--include/drm/drm_cache.h8
-rw-r--r--include/drm/drm_edid.h6
-rw-r--r--include/dt-bindings/clock/en7523-clk.h17
-rw-r--r--include/dt-bindings/clock/imx8mn-clock.h16
-rw-r--r--include/dt-bindings/clock/imx8mp-clock.h9
-rw-r--r--include/dt-bindings/clock/mt8186-clk.h445
-rw-r--r--include/dt-bindings/clock/qcom,gcc-msm8976.h1
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sc8280xp.h496
-rw-r--r--include/dt-bindings/clock/samsung,exynosautov9.h14
-rw-r--r--include/dt-bindings/clock/ste-db8500-clkout.h17
-rw-r--r--include/dt-bindings/clock/stm32mp13-clks.h229
-rw-r--r--include/dt-bindings/clock/sun50i-h6-r-ccu.h1
-rw-r--r--include/dt-bindings/clock/sun50i-h616-ccu.h1
-rw-r--r--include/dt-bindings/interconnect/qcom,sc8180x.h7
-rw-r--r--include/dt-bindings/interconnect/qcom,sc8280xp.h232
-rw-r--r--include/dt-bindings/interconnect/qcom,sdx65.h67
-rw-r--r--include/dt-bindings/memory/mt8186-memory-port.h217
-rw-r--r--include/dt-bindings/memory/mt8195-memory-port.h408
-rw-r--r--include/dt-bindings/memory/mtk-memory-port.h2
-rw-r--r--include/dt-bindings/mfd/cros_ec.h18
-rw-r--r--include/dt-bindings/pinctrl/mt6795-pinfunc.h908
-rw-r--r--include/dt-bindings/reset/mt7986-resets.h55
-rw-r--r--include/dt-bindings/reset/mt8186-resets.h36
-rw-r--r--include/dt-bindings/reset/stm32mp13-resets.h100
-rw-r--r--include/linux/acpi.h45
-rw-r--r--include/linux/amba/bus.h14
-rw-r--r--include/linux/bitmap.h56
-rw-r--r--include/linux/blk-mq.h3
-rw-r--r--include/linux/blk_types.h4
-rw-r--r--include/linux/blkdev.h1
-rw-r--r--include/linux/bootconfig.h10
-rw-r--r--include/linux/clk/pxa.h16
-rw-r--r--include/linux/compat.h68
-rw-r--r--include/linux/context_tracking_state.h8
-rw-r--r--include/linux/cpuhotplug.h1
-rw-r--r--include/linux/crash_dump.h19
-rw-r--r--include/linux/dax.h22
-rw-r--r--include/linux/device-mapper.h13
-rw-r--r--include/linux/device.h121
-rw-r--r--include/linux/device/bus.h3
-rw-r--r--include/linux/device/driver.h2
-rw-r--r--include/linux/dmaengine.h9
-rw-r--r--include/linux/export.h7
-rw-r--r--include/linux/extcon.h2
-rw-r--r--include/linux/fdtable.h2
-rw-r--r--include/linux/file.h2
-rw-r--r--include/linux/find.h6
-rw-r--r--include/linux/firmware.h82
-rw-r--r--include/linux/firmware/xlnx-event-manager.h4
-rw-r--r--include/linux/firmware/xlnx-zynqmp.h11
-rw-r--r--include/linux/fpga/fpga-region.h6
-rw-r--r--include/linux/fs.h15
-rw-r--r--include/linux/fsl/mc.h14
-rw-r--r--include/linux/ftrace.h4
-rw-r--r--include/linux/gpio/consumer.h16
-rw-r--r--include/linux/gpio/driver.h22
-rw-r--r--include/linux/gpio/machine.h12
-rw-r--r--include/linux/hisi_acc_qm.h23
-rw-r--r--include/linux/host1x_context_bus.h15
-rw-r--r--include/linux/hte.h271
-rw-r--r--include/linux/hugetlb.h5
-rw-r--r--include/linux/hyperv.h103
-rw-r--r--include/linux/iio/adc/ad_sigma_delta.h38
-rw-r--r--include/linux/iio/common/st_sensors.h3
-rw-r--r--include/linux/iio/iio-opaque.h4
-rw-r--r--include/linux/iio/iio.h70
-rw-r--r--include/linux/iio/kfifo_buf.h5
-rw-r--r--include/linux/intel-iommu.h3
-rw-r--r--include/linux/intel-svm.h2
-rw-r--r--include/linux/iommu.h69
-rw-r--r--include/linux/ipc_namespace.h37
-rw-r--r--include/linux/ipv6.h2
-rw-r--r--include/linux/jump_label.h4
-rw-r--r--include/linux/kexec.h48
-rw-r--r--include/linux/kprobes.h2
-rw-r--r--include/linux/list.h6
-rw-r--r--include/linux/livepatch.h2
-rw-r--r--include/linux/lockdep.h6
-rw-r--r--include/linux/mfd/hi655x-pmic.h4
-rw-r--r--include/linux/mfd/mt6359/registers.h2
-rw-r--r--include/linux/mfd/tc6393xb.h3
-rw-r--r--include/linux/mfd/tps65218.h2
-rw-r--r--include/linux/mhi_ep.h277
-rw-r--r--include/linux/mlx5/driver.h12
-rw-r--r--include/linux/mlx5/mlx5_ifc.h6
-rw-r--r--include/linux/mlx5/mlx5_ifc_vdpa.h39
-rw-r--r--include/linux/mm.h9
-rw-r--r--include/linux/mod_devicetable.h2
-rw-r--r--include/linux/mount.h29
-rw-r--r--include/linux/namei.h6
-rw-r--r--include/linux/nfs4.h2
-rw-r--r--include/linux/nfs_fs_sb.h1
-rw-r--r--include/linux/nfs_xdr.h12
-rw-r--r--include/linux/nodemask.h38
-rw-r--r--include/linux/notifier.h7
-rw-r--r--include/linux/nvme-fc-driver.h14
-rw-r--r--include/linux/nvmem-consumer.h1
-rw-r--r--include/linux/of_irq.h6
-rw-r--r--include/linux/pci.h18
-rw-r--r--include/linux/phy/phy-lvds.h32
-rw-r--r--include/linux/phy/phy.h4
-rw-r--r--include/linux/pipe_fs_i.h2
-rw-r--r--include/linux/platform_data/asoc-poodle.h16
-rw-r--r--include/linux/platform_data/asoc-pxa.h (renamed from arch/arm/mach-pxa/include/mach/audio.h)4
-rw-r--r--include/linux/platform_data/video-pxafb.h22
-rw-r--r--include/linux/platform_device.h16
-rw-r--r--include/linux/pm.h10
-rw-r--r--include/linux/pm_opp.h41
-rw-r--r--include/linux/property.h5
-rw-r--r--include/linux/ptrace.h7
-rw-r--r--include/linux/reboot.h91
-rw-r--r--include/linux/rpmsg.h14
-rw-r--r--include/linux/rtsx_pci.h3
-rw-r--r--include/linux/sched.h10
-rw-r--r--include/linux/sched/jobctl.h8
-rw-r--r--include/linux/sched/mm.h11
-rw-r--r--include/linux/sched/signal.h20
-rw-r--r--include/linux/sched/task.h8
-rw-r--r--include/linux/serial_core.h1
-rw-r--r--include/linux/serial_s3c.h3
-rw-r--r--include/linux/set_memory.h10
-rw-r--r--include/linux/signal.h3
-rw-r--r--include/linux/siphash.h5
-rw-r--r--include/linux/skbuff.h9
-rw-r--r--include/linux/soc/pxa/cpu.h (renamed from arch/arm/mach-pxa/include/mach/hardware.h)61
-rw-r--r--include/linux/soc/pxa/mfp.h (renamed from arch/arm/plat-pxa/include/plat/mfp.h)6
-rw-r--r--include/linux/soc/pxa/smemc.h13
-rw-r--r--include/linux/soc/renesas/r9a06g032-sysctrl.h11
-rw-r--r--include/linux/spi/spi.h2
-rw-r--r--include/linux/swap.h7
-rw-r--r--include/linux/swapops.h10
-rw-r--r--include/linux/thunderbolt.h21
-rw-r--r--include/linux/usb.h17
-rw-r--r--include/linux/usb/gadget.h28
-rw-r--r--include/linux/usb/hcd.h2
-rw-r--r--include/linux/usb/typec_mux.h22
-rw-r--r--include/linux/vdpa.h65
-rw-r--r--include/linux/vfio.h44
-rw-r--r--include/linux/vfio_pci_core.h3
-rw-r--r--include/linux/vhost_iotlb.h2
-rw-r--r--include/linux/virtio.h1
-rw-r--r--include/linux/virtio_config.h47
-rw-r--r--include/linux/wm97xx.h4
-rw-r--r--include/net/amt.h2
-rw-r--r--include/net/ax25.h1
-rw-r--r--include/net/bonding.h6
-rw-r--r--include/net/netfilter/nf_conntrack_core.h7
-rw-r--r--include/net/sch_generic.h42
-rw-r--r--include/pcmcia/soc_common.h125
-rw-r--r--include/sound/pxa2xx-lib.h4
-rw-r--r--include/trace/events/f2fs.h141
-rw-r--r--include/trace/events/thermal_pressure.h29
-rw-r--r--include/uapi/asm-generic/fcntl.h23
-rw-r--r--include/uapi/asm-generic/termbits-common.h65
-rw-r--r--include/uapi/asm-generic/termbits.h239
-rw-r--r--include/uapi/asm-generic/unistd.h4
-rw-r--r--include/uapi/drm/amdgpu_drm.h8
-rw-r--r--include/uapi/linux/acct.h3
-rw-r--r--include/uapi/linux/android/binder.h18
-rw-r--r--include/uapi/linux/audit.h2
-rw-r--r--include/uapi/linux/cxl_mem.h14
-rw-r--r--include/uapi/linux/elf-em.h1
-rw-r--r--include/uapi/linux/elf.h7
-rw-r--r--include/uapi/linux/gpio.h3
-rw-r--r--include/uapi/linux/idxd.h31
-rw-r--r--include/uapi/linux/io_uring.h6
-rw-r--r--include/uapi/linux/ipv6.h2
-rw-r--r--include/uapi/linux/kexec.h1
-rw-r--r--include/uapi/linux/pci_regs.h1
-rw-r--r--include/uapi/linux/socket.h2
-rw-r--r--include/uapi/linux/taskstats.h24
-rw-r--r--include/uapi/linux/vdpa.h6
-rw-r--r--include/uapi/linux/vfio.h4
-rw-r--r--include/uapi/linux/vhost.h26
-rw-r--r--include/uapi/linux/vhost_types.h11
-rw-r--r--include/uapi/misc/habanalabs.h87
-rw-r--r--include/ufs/ufs.h (renamed from drivers/scsi/ufs/ufs.h)0
-rw-r--r--include/ufs/ufs_quirks.h (renamed from drivers/scsi/ufs/ufs_quirks.h)0
-rw-r--r--include/ufs/ufshcd.h (renamed from drivers/scsi/ufs/ufshcd.h)8
-rw-r--r--include/ufs/ufshci.h (renamed from drivers/scsi/ufs/ufshci.h)0
-rw-r--r--include/ufs/unipro.h (renamed from drivers/scsi/ufs/unipro.h)0
-rw-r--r--include/video/radeon.h2
-rw-r--r--include/xen/arm/page.h3
-rw-r--r--include/xen/grant_table.h6
-rw-r--r--init/Kconfig93
-rw-r--r--init/initramfs.c78
-rw-r--r--init/main.c40
-rw-r--r--ipc/ipc_sysctl.c205
-rw-r--r--ipc/mq_sysctl.c121
-rw-r--r--ipc/mqueue.c24
-rw-r--r--ipc/namespace.c10
-rw-r--r--ipc/sem.c25
-rw-r--r--kernel/bpf/core.c14
-rw-r--r--kernel/crash_core.c3
-rw-r--r--kernel/events/core.c1
-rw-r--r--kernel/fork.c46
-rw-r--r--kernel/hung_task.c2
-rw-r--r--kernel/kcov.c14
-rw-r--r--kernel/kexec_core.c2
-rw-r--r--kernel/kexec_file.c38
-rw-r--r--kernel/kprobes.c144
-rw-r--r--kernel/livepatch/patch.c2
-rw-r--r--kernel/module/signing.c3
-rw-r--r--kernel/notifier.c101
-rw-r--r--kernel/pid_namespace.c2
-rw-r--r--kernel/power/hibernate.c2
-rw-r--r--kernel/power/main.c5
-rw-r--r--kernel/power/suspend.c3
-rw-r--r--kernel/printk/printk.c2
-rw-r--r--kernel/ptrace.c93
-rw-r--r--kernel/reboot.c348
-rw-r--r--kernel/relay.c2
-rw-r--r--kernel/sched/core.c5
-rw-r--r--kernel/sched/fair.c2
-rw-r--r--kernel/signal.c140
-rw-r--r--kernel/taskstats.c24
-rw-r--r--kernel/time/posix-cpu-timers.c6
-rw-r--r--kernel/trace/Makefile4
-rw-r--r--kernel/trace/fgraph.c2
-rw-r--r--kernel/trace/ftrace.c210
-rw-r--r--kernel/trace/pid_list.c4
-rw-r--r--kernel/trace/ring_buffer.c81
-rw-r--r--kernel/trace/trace.c72
-rw-r--r--kernel/trace/trace.h26
-rw-r--r--kernel/trace/trace_boot.c2
-rw-r--r--kernel/trace/trace_dynevent.c9
-rw-r--r--kernel/trace/trace_eprobe.c24
-rw-r--r--kernel/trace/trace_events.c69
-rw-r--r--kernel/trace/trace_events_filter.c2
-rw-r--r--kernel/trace/trace_events_hist.c191
-rw-r--r--kernel/trace/trace_events_trigger.c324
-rw-r--r--kernel/trace/trace_kprobe.c15
-rw-r--r--kernel/trace/trace_osnoise.c22
-rw-r--r--kernel/trace/trace_output.c25
-rw-r--r--kernel/trace/trace_recursion_record.c7
-rw-r--r--kernel/trace/trace_selftest.c3
-rw-r--r--kernel/trace/trace_syscalls.c35
-rw-r--r--kernel/trace/tracing_map.c3
-rw-r--r--kernel/tsacct.c10
-rw-r--r--kernel/umh.c6
-rw-r--r--kernel/usermode_driver.c4
-rw-r--r--kernel/watchdog.c4
-rw-r--r--lib/.gitignore1
-rw-r--r--lib/Kconfig.debug79
-rw-r--r--lib/Makefile10
-rw-r--r--lib/assoc_array.c8
-rw-r--r--lib/bitmap.c117
-rw-r--r--lib/bootconfig-data.S10
-rw-r--r--lib/bootconfig.c13
-rw-r--r--lib/crypto/Kconfig6
-rw-r--r--lib/crypto/Makefile6
-rw-r--r--lib/glob.c2
-rw-r--r--lib/nodemask.c4
-rw-r--r--lib/siphash.c5
-rw-r--r--lib/string.c25
-rw-r--r--lib/string_helpers.c3
-rw-r--r--lib/test_bitmap.c25
-rw-r--r--lib/test_firmware.c381
-rw-r--r--lib/test_meminit.c12
-rw-r--r--lib/test_siphash.c7
-rw-r--r--lib/test_string.c33
-rw-r--r--mm/Kconfig56
-rw-r--r--mm/Kconfig.debug33
-rw-r--r--mm/cma.c4
-rw-r--r--mm/fadvise.c11
-rw-r--r--mm/hugetlb.c9
-rw-r--r--mm/internal.h4
-rw-r--r--mm/kasan/report.c2
-rw-r--r--mm/madvise.c18
-rw-r--r--mm/memory.c5
-rw-r--r--mm/mmap.c35
-rw-r--r--mm/page_alloc.c36
-rw-r--r--mm/page_isolation.c36
-rw-r--r--mm/page_table_check.c2
-rw-r--r--mm/readahead.c7
-rw-r--r--mm/shmem.c41
-rw-r--r--mm/swap_state.c3
-rw-r--r--mm/swapfile.c21
-rw-r--r--mm/util.c2
-rw-r--r--mm/vmstat.c4
-rw-r--r--mm/z3fold.c97
-rw-r--r--mm/zsmalloc.c37
-rw-r--r--net/9p/trans_xen.c8
-rw-r--r--net/Kconfig.debug2
-rw-r--r--net/ax25/af_ax25.c27
-rw-r--r--net/ax25/ax25_dev.c1
-rw-r--r--net/ax25/ax25_subr.c2
-rw-r--r--net/ceph/crush/mapper.c5
-rw-r--r--net/core/neighbour.c2
-rw-r--r--net/ipv4/tcp_input.c11
-rw-r--r--net/ipv4/tcp_ipv4.c4
-rw-r--r--net/ipv4/tcp_output.c4
-rw-r--r--net/ipv6/addrconf.c6
-rw-r--r--net/ipv6/ndisc.c42
-rw-r--r--net/ipv6/ping.c8
-rw-r--r--net/key/af_key.c10
-rw-r--r--net/mac80211/chan.c7
-rw-r--r--net/netfilter/nf_tables_api.c106
-rw-r--r--net/netfilter/nfnetlink.c24
-rw-r--r--net/netfilter/nfnetlink_cttimeout.c5
-rw-r--r--net/netfilter/nft_flow_offload.c6
-rw-r--r--net/netfilter/nft_limit.c2
-rw-r--r--net/nfc/core.c4
-rw-r--r--net/packet/af_packet.c6
-rw-r--r--net/sched/act_ct.c2
-rw-r--r--net/smc/af_smc.c1
-rw-r--r--net/smc/smc_cdc.c2
-rw-r--r--net/sunrpc/xprtrdma/rpc_rdma.c5
-rw-r--r--net/tipc/bearer.c3
-rw-r--r--net/vmw_vsock/hyperv_transport.c21
-rw-r--r--net/xfrm/xfrm_output.c3
-rw-r--r--scripts/Kbuild.include10
-rw-r--r--scripts/Makefile.build110
-rw-r--r--scripts/Makefile.lib31
-rw-r--r--scripts/Makefile.modfinal5
-rw-r--r--scripts/Makefile.modpost12
-rw-r--r--scripts/Makefile.vmlinux_o87
-rwxr-xr-xscripts/bloat-o-meter1
-rwxr-xr-xscripts/check-local-export65
-rwxr-xr-xscripts/decode_stacktrace.sh27
-rwxr-xr-xscripts/get_abi.pl4
-rwxr-xr-xscripts/get_maintainer.pl1
-rw-r--r--scripts/kallsyms.c2
-rw-r--r--scripts/kconfig/nconf.c8
-rwxr-xr-xscripts/link-vmlinux.sh126
-rw-r--r--scripts/mod/file2alias.c12
-rw-r--r--scripts/mod/modpost.c161
-rw-r--r--scripts/mod/modpost.h3
-rwxr-xr-xscripts/objdiff6
-rw-r--r--scripts/sorttable.c5
-rw-r--r--scripts/spdxcheck-test.sh2
-rwxr-xr-xscripts/spdxcheck.py175
-rw-r--r--scripts/spdxexclude18
-rw-r--r--scripts/subarch.include2
-rwxr-xr-xscripts/tags.sh11
-rw-r--r--security/smack/smackfs.c1
-rw-r--r--sound/arm/pxa2xx-ac97-lib.c145
-rw-r--r--sound/arm/pxa2xx-ac97-regs.h (renamed from arch/arm/mach-pxa/include/mach/regs-ac97.h)42
-rw-r--r--sound/arm/pxa2xx-ac97.c3
-rw-r--r--sound/core/Makefile2
-rw-r--r--sound/isa/Kconfig2
-rw-r--r--sound/pci/hda/patch_realtek.c11
-rw-r--r--sound/pci/hda/patch_via.c2
-rw-r--r--sound/soc/amd/acp/acp-pci.c1
-rw-r--r--sound/soc/codecs/da7219-aad.c18
-rw-r--r--sound/soc/codecs/rt5640.c11
-rw-r--r--sound/soc/codecs/rt5640.h2
-rw-r--r--sound/soc/fsl/fsl_sai.h4
-rw-r--r--sound/soc/intel/avs/board_selection.c3
-rw-r--r--sound/soc/intel/boards/bytcr_rt5640.c2
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-adl-match.c2
-rw-r--r--sound/soc/pxa/corgi.c43
-rw-r--r--sound/soc/pxa/e740_wm9705.c37
-rw-r--r--sound/soc/pxa/e750_wm9705.c33
-rw-r--r--sound/soc/pxa/e800_wm9712.c33
-rw-r--r--sound/soc/pxa/em-x270.c2
-rw-r--r--sound/soc/pxa/hx4700.c37
-rw-r--r--sound/soc/pxa/magician.c141
-rw-r--r--sound/soc/pxa/mioa701_wm9713.c2
-rw-r--r--sound/soc/pxa/palm27x.c2
-rw-r--r--sound/soc/pxa/poodle.c51
-rw-r--r--sound/soc/pxa/pxa2xx-ac97.c24
-rw-r--r--sound/soc/pxa/pxa2xx-i2s.c112
-rw-r--r--sound/soc/pxa/spitz.c58
-rw-r--r--sound/soc/pxa/tosa.c25
-rw-r--r--sound/soc/pxa/z2.c8
-rw-r--r--sound/soc/soc-pcm.c2
-rw-r--r--sound/usb/clock.c12
-rw-r--r--sound/usb/line6/pcm.c4
-rw-r--r--sound/usb/midi.c7
-rw-r--r--sound/usb/mixer_maps.c30
-rw-r--r--sound/usb/usx2y/usb_stream.c6
-rw-r--r--sound/usb/usx2y/usbusx2yaudio.c2
-rw-r--r--sound/usb/usx2y/usx2yhwdeppcm.c2
-rw-r--r--tools/accounting/.gitignore1
-rw-r--r--tools/accounting/Makefile2
-rw-r--r--tools/accounting/procacct.c417
-rw-r--r--tools/arch/arm64/include/uapi/asm/perf_regs.h7
-rw-r--r--tools/arch/x86/include/asm/msr-index.h19
-rw-r--r--tools/build/Makefile.feature4
-rw-r--r--tools/build/feature/Makefile20
-rw-r--r--tools/build/feature/test-libbpf-bpf_map_create.c8
-rw-r--r--tools/build/feature/test-libbpf-bpf_object__next_map.c8
-rw-r--r--tools/build/feature/test-libbpf-bpf_object__next_program.c8
-rw-r--r--tools/build/feature/test-libbpf-bpf_prog_load.c9
-rw-r--r--tools/build/feature/test-libbpf-btf__load_from_kernel_by_id.c5
-rw-r--r--tools/build/feature/test-libbpf-btf__raw_data.c8
-rw-r--r--tools/gpio/gpio-event-mon.c6
-rw-r--r--tools/include/linux/bitmap.h17
-rw-r--r--tools/include/uapi/asm-generic/fcntl.h21
-rw-r--r--tools/include/uapi/asm-generic/unistd.h4
-rw-r--r--tools/include/uapi/asm/bitsperlong.h2
-rw-r--r--tools/lib/bitmap.c20
-rw-r--r--tools/lib/perf/evlist.c71
-rw-r--r--tools/lib/perf/include/internal/evsel.h11
-rw-r--r--tools/objtool/check.c4
-rw-r--r--tools/perf/Documentation/perf-record.txt12
-rw-r--r--tools/perf/Documentation/perf-stat.txt2
-rw-r--r--tools/perf/Documentation/perf-top.txt2
-rw-r--r--tools/perf/Makefile.config25
-rw-r--r--tools/perf/Makefile.perf1
-rw-r--r--tools/perf/arch/arm64/util/mem-events.c6
-rw-r--r--tools/perf/arch/arm64/util/perf_regs.c38
-rw-r--r--tools/perf/arch/arm64/util/unwind-libunwind.c73
-rw-r--r--tools/perf/arch/x86/util/evsel.c5
-rw-r--r--tools/perf/arch/x86/util/evsel.h7
-rw-r--r--tools/perf/arch/x86/util/intel-pt.c31
-rw-r--r--tools/perf/arch/x86/util/topdown.c21
-rw-r--r--tools/perf/builtin-c2c.c10
-rw-r--r--tools/perf/builtin-lock.c2
-rw-r--r--tools/perf/builtin-record.c64
-rw-r--r--tools/perf/builtin-stat.c5
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z10/basic.json48
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z10/crypto.json64
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z10/extended.json36
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z13/basic.json48
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z13/crypto.json64
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z13/extended.json100
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z14/basic.json32
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z14/crypto.json64
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z14/extended.json102
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z15/basic.json32
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z15/crypto.json114
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z15/crypto6.json112
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z15/extended.json108
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z16/basic.json58
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z16/crypto6.json142
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z16/extended.json492
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z16/transaction.json7
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z196/basic.json48
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z196/crypto.json64
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z196/extended.json44
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_zec12/basic.json48
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_zec12/crypto.json64
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_zec12/extended.json66
-rw-r--r--tools/perf/pmu-events/arch/s390/mapfile.csv1
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlake/adl-metrics.json163
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/spr-metrics.json530
-rw-r--r--tools/perf/pmu-events/jevents.c2
-rwxr-xr-xtools/perf/scripts/python/arm-cs-trace-disasm.py272
-rw-r--r--tools/perf/tests/shell/lib/perf_csv_output_lint.py48
-rwxr-xr-xtools/perf/tests/shell/record_offcpu.sh60
-rwxr-xr-xtools/perf/tests/shell/stat+csv_output.sh147
-rwxr-xr-xtools/perf/tests/shell/test_arm_spe_fork.sh92
-rwxr-xr-xtools/perf/tests/shell/test_intel_pt.sh71
-rw-r--r--tools/perf/util/Build1
-rw-r--r--tools/perf/util/auxtrace.c15
-rw-r--r--tools/perf/util/auxtrace.h13
-rw-r--r--tools/perf/util/bpf-event.c24
-rw-r--r--tools/perf/util/bpf_counter.c6
-rw-r--r--tools/perf/util/bpf_off_cpu.c338
-rw-r--r--tools/perf/util/bpf_skel/off_cpu.bpf.c229
-rw-r--r--tools/perf/util/dso.h2
-rw-r--r--tools/perf/util/evlist.c61
-rw-r--r--tools/perf/util/evlist.h5
-rw-r--r--tools/perf/util/evsel.c7
-rw-r--r--tools/perf/util/libunwind/arm64.c2
-rw-r--r--tools/perf/util/mmap.c4
-rw-r--r--tools/perf/util/off_cpu.h29
-rw-r--r--tools/perf/util/parse-events.c2
-rw-r--r--tools/perf/util/perf_regs.c2
-rw-r--r--tools/perf/util/python-ext-sources1
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c21
-rw-r--r--tools/perf/util/unwind-libunwind-local.c105
-rw-r--r--tools/testing/crypto/chacha20-s390/Makefile12
-rw-r--r--tools/testing/crypto/chacha20-s390/run-tests.sh34
-rw-r--r--tools/testing/crypto/chacha20-s390/test-cipher.c372
-rw-r--r--tools/testing/cxl/Kbuild3
-rw-r--r--tools/testing/cxl/mock_mem.c10
-rw-r--r--tools/testing/cxl/test/mem.c17
-rw-r--r--tools/testing/cxl/test/mock.c29
-rw-r--r--tools/testing/memblock/TODO3
-rw-r--r--tools/testing/memblock/tests/basic_api.c392
-rw-r--r--tools/testing/nvdimm/pmem-dax.c4
-rw-r--r--tools/testing/nvdimm/test/iomap.c18
-rw-r--r--tools/testing/nvdimm/test/nfit.c3
-rw-r--r--tools/testing/selftests/alsa/Makefile3
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_sve_change_vl.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_stacktrace_build_id.c2
-rw-r--r--tools/testing/selftests/cgroup/memcg_protection.m89
-rw-r--r--tools/testing/selftests/cgroup/test_memcontrol.c247
-rw-r--r--tools/testing/selftests/filesystems/binderfs/binderfs_test.c1
-rw-r--r--tools/testing/selftests/firmware/Makefile2
-rw-r--r--tools/testing/selftests/firmware/config1
-rwxr-xr-xtools/testing/selftests/firmware/fw_filesystem.sh170
-rwxr-xr-xtools/testing/selftests/firmware/fw_lib.sh19
-rwxr-xr-xtools/testing/selftests/firmware/fw_run_tests.sh4
-rwxr-xr-xtools/testing/selftests/firmware/fw_upload.sh214
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/multiple_kprobes.tc2
-rw-r--r--tools/testing/selftests/lkdtm/config4
-rw-r--r--tools/testing/selftests/lkdtm/tests.txt9
-rwxr-xr-xtools/testing/selftests/net/ndisc_unsolicited_na_test.sh23
-rw-r--r--tools/testing/selftests/net/psock_snd.c2
-rw-r--r--tools/testing/selftests/powerpc/include/utils.h5
-rw-r--r--tools/testing/selftests/powerpc/math/Makefile4
-rw-r--r--tools/testing/selftests/powerpc/math/mma.S33
-rw-r--r--tools/testing/selftests/powerpc/math/mma.c48
-rw-r--r--tools/testing/selftests/powerpc/mm/.gitignore1
-rw-r--r--tools/testing/selftests/powerpc/mm/Makefile4
-rw-r--r--tools/testing/selftests/powerpc/mm/large_vm_gpr_corruption.c156
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/fixed_instruction_loop.S43
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/misc.c2
-rw-r--r--tools/testing/selftests/powerpc/security/spectre_v2.c32
-rw-r--r--tools/tracing/rtla/Makefile40
-rw-r--r--tools/tracing/rtla/README.txt13
-rw-r--r--tools/tracing/rtla/src/osnoise_hist.c5
-rw-r--r--tools/tracing/rtla/src/osnoise_top.c9
-rw-r--r--tools/tracing/rtla/src/timerlat_hist.c11
-rw-r--r--tools/tracing/rtla/src/timerlat_top.c11
-rw-r--r--tools/tracing/rtla/src/utils.c108
-rw-r--r--tools/tracing/rtla/src/utils.h3
-rw-r--r--tools/usb/testusb.c2
-rw-r--r--usr/gen_init_cpio.c92
-rw-r--r--virt/kvm/vfio.c329
4159 files changed, 146228 insertions, 62026 deletions
diff --git a/.clang-format b/.clang-format
index fa959436bcfd..9b87ea1fc16e 100644
--- a/.clang-format
+++ b/.clang-format
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
#
-# clang-format configuration file. Intended for clang-format >= 4.
+# clang-format configuration file. Intended for clang-format >= 11.
#
# For more information, see:
#
@@ -13,7 +13,7 @@ AccessModifierOffset: -4
AlignAfterOpenBracket: Align
AlignConsecutiveAssignments: false
AlignConsecutiveDeclarations: false
-#AlignEscapedNewlines: Left # Unknown to clang-format-4.0
+AlignEscapedNewlines: Left
AlignOperands: true
AlignTrailingComments: false
AllowAllParametersOfDeclarationOnNextLine: false
@@ -37,24 +37,24 @@ BraceWrapping:
AfterObjCDeclaration: false
AfterStruct: false
AfterUnion: false
- #AfterExternBlock: false # Unknown to clang-format-5.0
+ AfterExternBlock: false
BeforeCatch: false
BeforeElse: false
IndentBraces: false
- #SplitEmptyFunction: true # Unknown to clang-format-4.0
- #SplitEmptyRecord: true # Unknown to clang-format-4.0
- #SplitEmptyNamespace: true # Unknown to clang-format-4.0
+ SplitEmptyFunction: true
+ SplitEmptyRecord: true
+ SplitEmptyNamespace: true
BreakBeforeBinaryOperators: None
BreakBeforeBraces: Custom
-#BreakBeforeInheritanceComma: false # Unknown to clang-format-4.0
+BreakBeforeInheritanceComma: false
BreakBeforeTernaryOperators: false
BreakConstructorInitializersBeforeComma: false
-#BreakConstructorInitializers: BeforeComma # Unknown to clang-format-4.0
+BreakConstructorInitializers: BeforeComma
BreakAfterJavaFieldAnnotations: false
BreakStringLiterals: false
ColumnLimit: 80
CommentPragmas: '^ IWYU pragma:'
-#CompactNamespaces: false # Unknown to clang-format-4.0
+CompactNamespaces: false
ConstructorInitializerAllOnOneLineOrOnePerLine: false
ConstructorInitializerIndentWidth: 8
ContinuationIndentWidth: 8
@@ -62,39 +62,56 @@ Cpp11BracedListStyle: false
DerivePointerAlignment: false
DisableFormat: false
ExperimentalAutoDetectBinPacking: false
-#FixNamespaceComments: false # Unknown to clang-format-4.0
+FixNamespaceComments: false
# Taken from:
-# git grep -h '^#define [^[:space:]]*for_each[^[:space:]]*(' include/ \
+# git grep -h '^#define [^[:space:]]*for_each[^[:space:]]*(' include/ tools/ \
# | sed "s,^#define \([^[:space:]]*for_each[^[:space:]]*\)(.*$, - '\1'," \
-# | sort | uniq
+# | LC_ALL=C sort -u
ForEachMacros:
+ - '__ata_qc_for_each'
+ - '__bio_for_each_bvec'
+ - '__bio_for_each_segment'
+ - '__evlist__for_each_entry'
+ - '__evlist__for_each_entry_continue'
+ - '__evlist__for_each_entry_from'
+ - '__evlist__for_each_entry_reverse'
+ - '__evlist__for_each_entry_safe'
+ - '__for_each_mem_range'
+ - '__for_each_mem_range_rev'
+ - '__for_each_thread'
+ - '__hlist_for_each_rcu'
+ - '__map__for_each_symbol_by_name'
+ - '__perf_evlist__for_each_entry'
+ - '__perf_evlist__for_each_entry_reverse'
+ - '__perf_evlist__for_each_entry_safe'
+ - '__rq_for_each_bio'
+ - '__shost_for_each_device'
- 'apei_estatus_for_each_section'
- 'ata_for_each_dev'
- 'ata_for_each_link'
- - '__ata_qc_for_each'
- 'ata_qc_for_each'
- 'ata_qc_for_each_raw'
- 'ata_qc_for_each_with_internal'
- 'ax25_for_each'
- 'ax25_uid_for_each'
- - '__bio_for_each_bvec'
- 'bio_for_each_bvec'
- 'bio_for_each_bvec_all'
+ - 'bio_for_each_folio_all'
- 'bio_for_each_integrity_vec'
- - '__bio_for_each_segment'
- 'bio_for_each_segment'
- 'bio_for_each_segment_all'
- 'bio_list_for_each'
- 'bip_for_each_vec'
- - 'bitmap_for_each_clear_region'
- - 'bitmap_for_each_set_region'
- - 'blkg_for_each_descendant_post'
- - 'blkg_for_each_descendant_pre'
- - 'blk_queue_for_each_rl'
- 'bond_for_each_slave'
- 'bond_for_each_slave_rcu'
+ - 'bpf__perf_for_each_map'
+ - 'bpf__perf_for_each_map_named'
- 'bpf_for_each_spilled_reg'
+ - 'bpf_object__for_each_map'
+ - 'bpf_object__for_each_program'
+ - 'bpf_object__for_each_safe'
+ - 'bpf_perf_object__for_each'
- 'btree_for_each_safe128'
- 'btree_for_each_safe32'
- 'btree_for_each_safe64'
@@ -102,6 +119,7 @@ ForEachMacros:
- 'card_for_each_dev'
- 'cgroup_taskset_for_each'
- 'cgroup_taskset_for_each_leader'
+ - 'cpufreq_for_each_efficient_entry_idx'
- 'cpufreq_for_each_entry'
- 'cpufreq_for_each_entry_idx'
- 'cpufreq_for_each_valid_entry'
@@ -109,9 +127,22 @@ ForEachMacros:
- 'css_for_each_child'
- 'css_for_each_descendant_post'
- 'css_for_each_descendant_pre'
+ - 'damon_for_each_region'
+ - 'damon_for_each_region_safe'
+ - 'damon_for_each_scheme'
+ - 'damon_for_each_scheme_safe'
+ - 'damon_for_each_target'
+ - 'damon_for_each_target_safe'
+ - 'data__for_each_file'
+ - 'data__for_each_file_new'
+ - 'data__for_each_file_start'
- 'device_for_each_child_node'
- 'displayid_iter_for_each'
+ - 'dma_fence_array_for_each'
- 'dma_fence_chain_for_each'
+ - 'dma_fence_unwrap_for_each'
+ - 'dma_resv_for_each_fence'
+ - 'dma_resv_for_each_fence_unlocked'
- 'do_for_each_ftrace_op'
- 'drm_atomic_crtc_for_each_plane'
- 'drm_atomic_crtc_state_for_each_plane'
@@ -135,6 +166,25 @@ ForEachMacros:
- 'drm_mm_for_each_node'
- 'drm_mm_for_each_node_in_range'
- 'drm_mm_for_each_node_safe'
+ - 'dsa_switch_for_each_available_port'
+ - 'dsa_switch_for_each_cpu_port'
+ - 'dsa_switch_for_each_port'
+ - 'dsa_switch_for_each_port_continue_reverse'
+ - 'dsa_switch_for_each_port_safe'
+ - 'dsa_switch_for_each_user_port'
+ - 'dsa_tree_for_each_user_port'
+ - 'dso__for_each_symbol'
+ - 'dsos__for_each_with_build_id'
+ - 'elf_hash_for_each_possible'
+ - 'elf_section__for_each_rel'
+ - 'elf_section__for_each_rela'
+ - 'elf_symtab__for_each_symbol'
+ - 'evlist__for_each_cpu'
+ - 'evlist__for_each_entry'
+ - 'evlist__for_each_entry_continue'
+ - 'evlist__for_each_entry_from'
+ - 'evlist__for_each_entry_reverse'
+ - 'evlist__for_each_entry_safe'
- 'flow_action_for_each'
- 'for_each_acpi_dev_match'
- 'for_each_active_dev_scope'
@@ -142,8 +192,11 @@ ForEachMacros:
- 'for_each_active_iommu'
- 'for_each_aggr_pgid'
- 'for_each_available_child_of_node'
+ - 'for_each_bench'
- 'for_each_bio'
- 'for_each_board_func_rsrc'
+ - 'for_each_btf_ext_rec'
+ - 'for_each_btf_ext_sec'
- 'for_each_bvec'
- 'for_each_card_auxs'
- 'for_each_card_auxs_safe'
@@ -159,17 +212,22 @@ ForEachMacros:
- 'for_each_child_of_node'
- 'for_each_clear_bit'
- 'for_each_clear_bit_from'
+ - 'for_each_clear_bitrange'
+ - 'for_each_clear_bitrange_from'
+ - 'for_each_cmd'
- 'for_each_cmsghdr'
+ - 'for_each_collection'
+ - 'for_each_comp_order'
- 'for_each_compatible_node'
- 'for_each_component_dais'
- 'for_each_component_dais_safe'
- - 'for_each_comp_order'
- 'for_each_console'
- 'for_each_cpu'
- 'for_each_cpu_and'
- 'for_each_cpu_not'
- 'for_each_cpu_wrap'
- 'for_each_dapm_widgets'
+ - 'for_each_dedup_cand'
- 'for_each_dev_addr'
- 'for_each_dev_scope'
- 'for_each_dma_cap_mask'
@@ -179,13 +237,14 @@ ForEachMacros:
- 'for_each_dpcm_fe'
- 'for_each_drhd_unit'
- 'for_each_dss_dev'
- - 'for_each_dtpm_table'
- 'for_each_efi_memory_desc'
- 'for_each_efi_memory_desc_in_map'
- 'for_each_element'
- 'for_each_element_extid'
- 'for_each_element_id'
- 'for_each_endpoint_of_node'
+ - 'for_each_event'
+ - 'for_each_event_tps'
- 'for_each_evictable_lru'
- 'for_each_fib6_node_rt_rcu'
- 'for_each_fib6_walker_rt'
@@ -194,30 +253,35 @@ ForEachMacros:
- 'for_each_free_mem_range'
- 'for_each_free_mem_range_reverse'
- 'for_each_func_rsrc'
+ - 'for_each_group_evsel'
+ - 'for_each_group_member'
- 'for_each_hstate'
- 'for_each_if'
+ - 'for_each_inject_fn'
+ - 'for_each_insn'
+ - 'for_each_insn_prefix'
+ - 'for_each_intid'
- 'for_each_iommu'
- 'for_each_ip_tunnel_rcu'
- 'for_each_irq_nr'
+ - 'for_each_lang'
- 'for_each_link_codecs'
- 'for_each_link_cpus'
- 'for_each_link_platforms'
- 'for_each_lru'
- 'for_each_matching_node'
- 'for_each_matching_node_and_match'
- - 'for_each_member'
- - 'for_each_memcg_cache_index'
- 'for_each_mem_pfn_range'
- - '__for_each_mem_range'
- 'for_each_mem_range'
- - '__for_each_mem_range_rev'
- 'for_each_mem_range_rev'
- 'for_each_mem_region'
+ - 'for_each_member'
+ - 'for_each_memory'
- 'for_each_migratetype_order'
- - 'for_each_msi_entry'
- - 'for_each_msi_entry_safe'
+ - 'for_each_missing_reg'
- 'for_each_net'
- 'for_each_net_continue_reverse'
+ - 'for_each_net_rcu'
- 'for_each_netdev'
- 'for_each_netdev_continue'
- 'for_each_netdev_continue_rcu'
@@ -227,12 +291,13 @@ ForEachMacros:
- 'for_each_netdev_rcu'
- 'for_each_netdev_reverse'
- 'for_each_netdev_safe'
- - 'for_each_net_rcu'
- 'for_each_new_connector_in_state'
- 'for_each_new_crtc_in_state'
- 'for_each_new_mst_mgr_in_state'
- 'for_each_new_plane_in_state'
+ - 'for_each_new_plane_in_state_reverse'
- 'for_each_new_private_obj_in_state'
+ - 'for_each_new_reg'
- 'for_each_node'
- 'for_each_node_by_name'
- 'for_each_node_by_type'
@@ -248,20 +313,20 @@ ForEachMacros:
- 'for_each_old_connector_in_state'
- 'for_each_old_crtc_in_state'
- 'for_each_old_mst_mgr_in_state'
+ - 'for_each_old_plane_in_state'
+ - 'for_each_old_private_obj_in_state'
- 'for_each_oldnew_connector_in_state'
- 'for_each_oldnew_crtc_in_state'
- 'for_each_oldnew_mst_mgr_in_state'
- 'for_each_oldnew_plane_in_state'
- 'for_each_oldnew_plane_in_state_reverse'
- 'for_each_oldnew_private_obj_in_state'
- - 'for_each_old_plane_in_state'
- - 'for_each_old_private_obj_in_state'
- 'for_each_online_cpu'
- 'for_each_online_node'
- 'for_each_online_pgdat'
+ - 'for_each_path'
- 'for_each_pci_bridge'
- 'for_each_pci_dev'
- - 'for_each_pci_msi_entry'
- 'for_each_pcm_streams'
- 'for_each_physmem_range'
- 'for_each_populated_zone'
@@ -269,6 +334,7 @@ ForEachMacros:
- 'for_each_present_cpu'
- 'for_each_prime_number'
- 'for_each_prime_number_from'
+ - 'for_each_probe_cache_entry'
- 'for_each_process'
- 'for_each_process_thread'
- 'for_each_prop_codec_conf'
@@ -278,6 +344,8 @@ ForEachMacros:
- 'for_each_prop_dlc_cpus'
- 'for_each_prop_dlc_platforms'
- 'for_each_property_of_node'
+ - 'for_each_reg'
+ - 'for_each_reg_filtered'
- 'for_each_registered_fb'
- 'for_each_requested_gpio'
- 'for_each_requested_gpio_in_range'
@@ -287,8 +355,12 @@ ForEachMacros:
- 'for_each_rtd_components'
- 'for_each_rtd_cpu_dais'
- 'for_each_rtd_dais'
+ - 'for_each_script'
+ - 'for_each_sec'
- 'for_each_set_bit'
- 'for_each_set_bit_from'
+ - 'for_each_set_bitrange'
+ - 'for_each_set_bitrange_from'
- 'for_each_set_clump8'
- 'for_each_sg'
- 'for_each_sg_dma_page'
@@ -297,18 +369,25 @@ ForEachMacros:
- 'for_each_sgtable_dma_sg'
- 'for_each_sgtable_page'
- 'for_each_sgtable_sg'
+ - 'for_each_shell_test'
- 'for_each_sibling_event'
- 'for_each_subelement'
- 'for_each_subelement_extid'
- 'for_each_subelement_id'
- - '__for_each_thread'
+ - 'for_each_sublist'
+ - 'for_each_subsystem'
+ - 'for_each_supported_activate_fn'
+ - 'for_each_supported_inject_fn'
+ - 'for_each_test'
- 'for_each_thread'
+ - 'for_each_token'
- 'for_each_unicast_dest_pgid'
- 'for_each_vsi'
- 'for_each_wakeup_source'
- 'for_each_zone'
- 'for_each_zone_zonelist'
- 'for_each_zone_zonelist_nodemask'
+ - 'func_for_each_insn'
- 'fwnode_for_each_available_child_node'
- 'fwnode_for_each_child_node'
- 'fwnode_graph_for_each_endpoint'
@@ -322,7 +401,13 @@ ForEachMacros:
- 'hash_for_each_possible_safe'
- 'hash_for_each_rcu'
- 'hash_for_each_safe'
+ - 'hashmap__for_each_entry'
+ - 'hashmap__for_each_entry_safe'
+ - 'hashmap__for_each_key_entry'
+ - 'hashmap__for_each_key_entry_safe'
- 'hctx_for_each_ctx'
+ - 'hists__for_each_format'
+ - 'hists__for_each_sort_list'
- 'hlist_bl_for_each_entry'
- 'hlist_bl_for_each_entry_rcu'
- 'hlist_bl_for_each_entry_safe'
@@ -338,7 +423,6 @@ ForEachMacros:
- 'hlist_for_each_entry_rcu_notrace'
- 'hlist_for_each_entry_safe'
- 'hlist_for_each_entry_srcu'
- - '__hlist_for_each_rcu'
- 'hlist_for_each_safe'
- 'hlist_nulls_for_each_entry'
- 'hlist_nulls_for_each_entry_from'
@@ -346,9 +430,6 @@ ForEachMacros:
- 'hlist_nulls_for_each_entry_safe'
- 'i3c_bus_for_each_i2cdev'
- 'i3c_bus_for_each_i3cdev'
- - 'ide_host_for_each_port'
- - 'ide_port_for_each_dev'
- - 'ide_port_for_each_present_dev'
- 'idr_for_each_entry'
- 'idr_for_each_entry_continue'
- 'idr_for_each_entry_continue_ul'
@@ -356,7 +437,12 @@ ForEachMacros:
- 'in_dev_for_each_ifa_rcu'
- 'in_dev_for_each_ifa_rtnl'
- 'inet_bind_bucket_for_each'
+ - 'inet_lhash2_for_each_icsk'
+ - 'inet_lhash2_for_each_icsk_continue'
- 'inet_lhash2_for_each_icsk_rcu'
+ - 'intlist__for_each_entry'
+ - 'intlist__for_each_entry_safe'
+ - 'kcore_copy__for_each_phdr'
- 'key_for_each'
- 'key_for_each_safe'
- 'klp_for_each_func'
@@ -367,7 +453,9 @@ ForEachMacros:
- 'klp_for_each_object_static'
- 'kunit_suite_for_each_test_case'
- 'kvm_for_each_memslot'
+ - 'kvm_for_each_memslot_in_gfn_range'
- 'kvm_for_each_vcpu'
+ - 'libbpf_nla_for_each_attr'
- 'list_for_each'
- 'list_for_each_codec'
- 'list_for_each_codec_safe'
@@ -387,6 +475,7 @@ ForEachMacros:
- 'list_for_each_entry_safe_from'
- 'list_for_each_entry_safe_reverse'
- 'list_for_each_entry_srcu'
+ - 'list_for_each_from'
- 'list_for_each_prev'
- 'list_for_each_prev_safe'
- 'list_for_each_safe'
@@ -394,11 +483,18 @@ ForEachMacros:
- 'llist_for_each_entry'
- 'llist_for_each_entry_safe'
- 'llist_for_each_safe'
+ - 'map__for_each_symbol'
+ - 'map__for_each_symbol_by_name'
+ - 'map_for_each_event'
+ - 'map_for_each_metric'
+ - 'maps__for_each_entry'
+ - 'maps__for_each_entry_safe'
- 'mci_for_each_dimm'
- 'media_device_for_each_entity'
- 'media_device_for_each_intf'
- 'media_device_for_each_link'
- 'media_device_for_each_pad'
+ - 'msi_for_each_desc'
- 'nanddev_io_for_each_page'
- 'netdev_for_each_lower_dev'
- 'netdev_for_each_lower_private'
@@ -423,6 +519,20 @@ ForEachMacros:
- 'pcl_for_each_chunk'
- 'pcl_for_each_segment'
- 'pcm_for_each_format'
+ - 'perf_config_items__for_each_entry'
+ - 'perf_config_sections__for_each_entry'
+ - 'perf_config_set__for_each_entry'
+ - 'perf_cpu_map__for_each_cpu'
+ - 'perf_evlist__for_each_entry'
+ - 'perf_evlist__for_each_entry_reverse'
+ - 'perf_evlist__for_each_entry_safe'
+ - 'perf_evlist__for_each_evsel'
+ - 'perf_evlist__for_each_mmap'
+ - 'perf_hpp_list__for_each_format'
+ - 'perf_hpp_list__for_each_format_safe'
+ - 'perf_hpp_list__for_each_sort_list'
+ - 'perf_hpp_list__for_each_sort_list_safe'
+ - 'perf_pmu__for_each_hybrid_pmu'
- 'ping_portaddr_for_each_entry'
- 'plist_for_each'
- 'plist_for_each_continue'
@@ -442,6 +552,7 @@ ForEachMacros:
- 'rdma_for_each_block'
- 'rdma_for_each_port'
- 'rdma_umem_for_each_dma_block'
+ - 'resort_rb__for_each_entry'
- 'resource_list_for_each_entry'
- 'resource_list_for_each_entry_safe'
- 'rhl_for_each_entry_rcu'
@@ -455,15 +566,18 @@ ForEachMacros:
- 'rht_for_each_from'
- 'rht_for_each_rcu'
- 'rht_for_each_rcu_from'
- - '__rq_for_each_bio'
- 'rq_for_each_bvec'
- 'rq_for_each_segment'
+ - 'rq_list_for_each'
+ - 'rq_list_for_each_safe'
- 'scsi_for_each_prot_sg'
- 'scsi_for_each_sg'
- 'sctp_for_each_hentry'
- 'sctp_skb_for_each'
+ - 'sec_for_each_insn'
+ - 'sec_for_each_insn_continue'
+ - 'sec_for_each_insn_from'
- 'shdma_for_each_chan'
- - '__shost_for_each_device'
- 'shost_for_each_device'
- 'sk_for_each'
- 'sk_for_each_bound'
@@ -480,7 +594,13 @@ ForEachMacros:
- 'snd_soc_dapm_widget_for_each_path_safe'
- 'snd_soc_dapm_widget_for_each_sink_path'
- 'snd_soc_dapm_widget_for_each_source_path'
+ - 'strlist__for_each_entry'
+ - 'strlist__for_each_entry_safe'
+ - 'sym_for_each_insn'
+ - 'sym_for_each_insn_continue_reverse'
+ - 'symbols__for_each_entry'
- 'tb_property_for_each'
+ - 'tcf_act_for_each_action'
- 'tcf_exts_for_each_action'
- 'udp_portaddr_for_each_entry'
- 'udp_portaddr_for_each_entry_rcu'
@@ -504,15 +624,17 @@ ForEachMacros:
- 'xbc_node_for_each_array_value'
- 'xbc_node_for_each_child'
- 'xbc_node_for_each_key_value'
+ - 'xbc_node_for_each_subkey'
- 'zorro_for_each_dev'
-#IncludeBlocks: Preserve # Unknown to clang-format-5.0
+IncludeBlocks: Preserve
IncludeCategories:
- Regex: '.*'
Priority: 1
IncludeIsMainRegex: '(Test)?$'
IndentCaseLabels: false
-#IndentPPDirectives: None # Unknown to clang-format-5.0
+IndentGotoLabels: false
+IndentPPDirectives: None
IndentWidth: 8
IndentWrappedFunctionNames: false
JavaScriptQuotes: Leave
@@ -522,13 +644,13 @@ MacroBlockBegin: ''
MacroBlockEnd: ''
MaxEmptyLinesToKeep: 1
NamespaceIndentation: None
-#ObjCBinPackProtocolList: Auto # Unknown to clang-format-5.0
+ObjCBinPackProtocolList: Auto
ObjCBlockIndentWidth: 8
ObjCSpaceAfterProperty: true
ObjCSpaceBeforeProtocolList: true
# Taken from git's rules
-#PenaltyBreakAssignment: 10 # Unknown to clang-format-4.0
+PenaltyBreakAssignment: 10
PenaltyBreakBeforeFirstCallParameter: 30
PenaltyBreakComment: 10
PenaltyBreakFirstLessLess: 0
@@ -539,14 +661,14 @@ PenaltyReturnTypeOnItsOwnLine: 60
PointerAlignment: Right
ReflowComments: false
SortIncludes: false
-#SortUsingDeclarations: false # Unknown to clang-format-4.0
+SortUsingDeclarations: false
SpaceAfterCStyleCast: false
SpaceAfterTemplateKeyword: true
SpaceBeforeAssignmentOperators: true
-#SpaceBeforeCtorInitializerColon: true # Unknown to clang-format-5.0
-#SpaceBeforeInheritanceColon: true # Unknown to clang-format-5.0
-SpaceBeforeParens: ControlStatements
-#SpaceBeforeRangeBasedForLoopColon: true # Unknown to clang-format-5.0
+SpaceBeforeCtorInitializerColon: true
+SpaceBeforeInheritanceColon: true
+SpaceBeforeParens: ControlStatementsExceptForEachMacros
+SpaceBeforeRangeBasedForLoopColon: true
SpaceInEmptyParentheses: false
SpacesBeforeTrailingComments: 1
SpacesInAngles: false
diff --git a/.mailmap b/.mailmap
index 6d484937f901..9ba38a82aba4 100644
--- a/.mailmap
+++ b/.mailmap
@@ -236,6 +236,7 @@ Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@web.de>
<linux-hardening@vger.kernel.org> <kernel-hardening@lists.openwall.com>
Li Yang <leoyang.li@nxp.com> <leoli@freescale.com>
Li Yang <leoyang.li@nxp.com> <leo@zh-kernel.org>
+Lorenzo Pieralisi <lpieralisi@kernel.org> <lorenzo.pieralisi@arm.com>
Lukasz Luba <lukasz.luba@arm.com> <l.luba@partner.samsung.com>
Maciej W. Rozycki <macro@mips.com> <macro@imgtec.com>
Maciej W. Rozycki <macro@orcam.me.uk> <macro@linux-mips.org>
diff --git a/Documentation/ABI/stable/sysfs-bus-mhi b/Documentation/ABI/stable/sysfs-bus-mhi
index ecfe7662f8d0..96ccc3385a2b 100644
--- a/Documentation/ABI/stable/sysfs-bus-mhi
+++ b/Documentation/ABI/stable/sysfs-bus-mhi
@@ -19,3 +19,13 @@ Description: The file holds the OEM PK Hash value of the endpoint device
read without having the device power on at least once, the file
will read all 0's.
Users: Any userspace application or clients interested in device info.
+
+What: /sys/bus/mhi/devices/.../soc_reset
+Date: April 2022
+KernelVersion: 5.19
+Contact: mhi@lists.linux.dev
+Description: Initiates a SoC reset on the MHI controller. A SoC reset is
+ a reset of last resort, and will require a complete re-init.
+ This can be useful as a method of recovery if the device is
+ non-responsive, or as a means of loading new firmware as a
+ system administration task.
diff --git a/Documentation/ABI/testing/configfs-usb-gadget-uvc b/Documentation/ABI/testing/configfs-usb-gadget-uvc
index 889ed45be4ca..611b23e6488d 100644
--- a/Documentation/ABI/testing/configfs-usb-gadget-uvc
+++ b/Documentation/ABI/testing/configfs-usb-gadget-uvc
@@ -7,6 +7,7 @@ Description: UVC function directory
streaming_maxburst 0..15 (ss only)
streaming_maxpacket 1..1023 (fs), 1..3072 (hs/ss)
streaming_interval 1..16
+ function_name string [32]
=================== =============================
What: /config/usb-gadget/gadget/functions/uvc.name/control
diff --git a/Documentation/ABI/testing/debugfs-driver-habanalabs b/Documentation/ABI/testing/debugfs-driver-habanalabs
index bcf6915987e4..0f8d20fe343f 100644
--- a/Documentation/ABI/testing/debugfs-driver-habanalabs
+++ b/Documentation/ABI/testing/debugfs-driver-habanalabs
@@ -170,6 +170,20 @@ KernelVersion: 5.1
Contact: ogabbay@kernel.org
Description: Sets the state of the third S/W led on the device
+What: /sys/kernel/debug/habanalabs/hl<n>/memory_scrub
+Date: May 2022
+KernelVersion: 5.19
+Contact: dhirschfeld@habana.ai
+Description: Allows the root user to scrub the dram memory. The scrubbing
+ value can be set using the debugfs file memory_scrub_val.
+
+What: /sys/kernel/debug/habanalabs/hl<n>/memory_scrub_val
+Date: May 2022
+KernelVersion: 5.19
+Contact: dhirschfeld@habana.ai
+Description: The value to which the dram will be set to when the user
+ scrubs the dram using 'memory_scrub' debugfs file
+
What: /sys/kernel/debug/habanalabs/hl<n>/mmu
Date: Jan 2019
KernelVersion: 5.1
@@ -190,6 +204,30 @@ Description: Check and display page fault or access violation mmu errors for
echo "0x200" > /sys/kernel/debug/habanalabs/hl0/mmu_error
cat /sys/kernel/debug/habanalabs/hl0/mmu_error
+What: /sys/kernel/debug/habanalabs/hl<n>/monitor_dump
+Date: Mar 2022
+KernelVersion: 5.19
+Contact: osharabi@habana.ai
+Description: Allows the root user to dump monitors status from the device's
+ protected config space.
+ This property is a binary blob that contains the result of the
+ monitors registers dump.
+ This custom interface is needed (instead of using the generic
+ Linux user-space PCI mapping) because this space is protected
+ and cannot be accessed using PCI read.
+ This interface doesn't support concurrency in the same device.
+ Only supported on GAUDI.
+
+What: /sys/kernel/debug/habanalabs/hl<n>/monitor_dump_trig
+Date: Mar 2022
+KernelVersion: 5.19
+Contact: osharabi@habana.ai
+Description: Triggers dump of monitor data. The value to trigger the operation
+ must be 1. Triggering the monitor dump operation initiates dump of
+ current registers values of all monitors.
+ When the write is finished, the user can read the "monitor_dump"
+ blob
+
What: /sys/kernel/debug/habanalabs/hl<n>/set_power_state
Date: Jan 2019
KernelVersion: 5.1
diff --git a/Documentation/ABI/testing/debugfs-hisi-hpre b/Documentation/ABI/testing/debugfs-hisi-hpre
index 396de7bc735d..82abf92df429 100644
--- a/Documentation/ABI/testing/debugfs-hisi-hpre
+++ b/Documentation/ABI/testing/debugfs-hisi-hpre
@@ -104,6 +104,20 @@ Description: Dump the status of the QM.
Four states: initiated, started, stopped and closed.
Available for both PF and VF, and take no other effect on HPRE.
+What: /sys/kernel/debug/hisi_hpre/<bdf>/qm/diff_regs
+Date: Mar 2022
+Contact: linux-crypto@vger.kernel.org
+Description: QM debug registers(regs) read hardware register value. This
+ node is used to show the change of the qm register values. This
+ node can be help users to check the change of register values.
+
+What: /sys/kernel/debug/hisi_hpre/<bdf>/hpre_dfx/diff_regs
+Date: Mar 2022
+Contact: linux-crypto@vger.kernel.org
+Description: HPRE debug registers(regs) read hardware register value. This
+ node is used to show the change of the register values. This
+ node can be help users to check the change of register values.
+
What: /sys/kernel/debug/hisi_hpre/<bdf>/hpre_dfx/send_cnt
Date: Apr 2020
Contact: linux-crypto@vger.kernel.org
diff --git a/Documentation/ABI/testing/debugfs-hisi-sec b/Documentation/ABI/testing/debugfs-hisi-sec
index 2bf84ced484b..93c530d1bf0f 100644
--- a/Documentation/ABI/testing/debugfs-hisi-sec
+++ b/Documentation/ABI/testing/debugfs-hisi-sec
@@ -84,6 +84,20 @@ Description: Dump the status of the QM.
Four states: initiated, started, stopped and closed.
Available for both PF and VF, and take no other effect on SEC.
+What: /sys/kernel/debug/hisi_sec2/<bdf>/qm/diff_regs
+Date: Mar 2022
+Contact: linux-crypto@vger.kernel.org
+Description: QM debug registers(regs) read hardware register value. This
+ node is used to show the change of the qm register values. This
+ node can be help users to check the change of register values.
+
+What: /sys/kernel/debug/hisi_sec2/<bdf>/sec_dfx/diff_regs
+Date: Mar 2022
+Contact: linux-crypto@vger.kernel.org
+Description: SEC debug registers(regs) read hardware register value. This
+ node is used to show the change of the register values. This
+ node can be help users to check the change of register values.
+
What: /sys/kernel/debug/hisi_sec2/<bdf>/sec_dfx/send_cnt
Date: Apr 2020
Contact: linux-crypto@vger.kernel.org
diff --git a/Documentation/ABI/testing/debugfs-hisi-zip b/Documentation/ABI/testing/debugfs-hisi-zip
index bf1258bc6495..fd3f314cf8d1 100644
--- a/Documentation/ABI/testing/debugfs-hisi-zip
+++ b/Documentation/ABI/testing/debugfs-hisi-zip
@@ -97,6 +97,20 @@ Description: Dump the status of the QM.
Four states: initiated, started, stopped and closed.
Available for both PF and VF, and take no other effect on ZIP.
+What: /sys/kernel/debug/hisi_zip/<bdf>/qm/diff_regs
+Date: Mar 2022
+Contact: linux-crypto@vger.kernel.org
+Description: QM debug registers(regs) read hardware register value. This
+ node is used to show the change of the qm registers value. This
+ node can be help users to check the change of register values.
+
+What: /sys/kernel/debug/hisi_zip/<bdf>/zip_dfx/diff_regs
+Date: Mar 2022
+Contact: linux-crypto@vger.kernel.org
+Description: ZIP debug registers(regs) read hardware register value. This
+ node is used to show the change of the registers value. this
+ node can be help users to check the change of register values.
+
What: /sys/kernel/debug/hisi_zip/<bdf>/zip_dfx/send_cnt
Date: Apr 2020
Contact: linux-crypto@vger.kernel.org
diff --git a/Documentation/ABI/testing/sysfs-bus-thunderbolt b/Documentation/ABI/testing/sysfs-bus-thunderbolt
index b7e87f6c7d47..f7570c240ce8 100644
--- a/Documentation/ABI/testing/sysfs-bus-thunderbolt
+++ b/Documentation/ABI/testing/sysfs-bus-thunderbolt
@@ -293,6 +293,16 @@ Contact: thunderbolt-software@lists.01.org
Description: This contains XDomain service specific settings as
bitmask. Format: %x
+What: /sys/bus/thunderbolt/devices/usb4_portX/connector
+Date: April 2022
+Contact: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Description:
+ Symlink to the USB Type-C connector. This link is only
+ created when USB Type-C Connector Class is enabled,
+ and only if the system firmware is capable of
+ describing the connection between a port and its
+ connector.
+
What: /sys/bus/thunderbolt/devices/usb4_portX/link
Date: Sep 2021
KernelVersion: v5.14
diff --git a/Documentation/ABI/testing/sysfs-class-cxl b/Documentation/ABI/testing/sysfs-class-cxl
index 3c77677e0ca7..594fda254130 100644
--- a/Documentation/ABI/testing/sysfs-class-cxl
+++ b/Documentation/ABI/testing/sysfs-class-cxl
@@ -103,8 +103,8 @@ What: /sys/class/cxl/<afu>/api_version_compatible
Date: September 2014
Contact: linuxppc-dev@lists.ozlabs.org
Description: read only
- Decimal value of the the lowest version of the userspace API
- this this kernel supports.
+ Decimal value of the lowest version of the userspace API
+ this kernel supports.
Users: https://github.com/ibm-capi/libcxl
diff --git a/Documentation/ABI/testing/sysfs-class-firmware b/Documentation/ABI/testing/sysfs-class-firmware
new file mode 100644
index 000000000000..978d3d500400
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-firmware
@@ -0,0 +1,77 @@
+What: /sys/class/firmware/.../data
+Date: July 2022
+KernelVersion: 5.19
+Contact: Russ Weight <russell.h.weight@intel.com>
+Description: The data sysfs file is used for firmware-fallback and for
+ firmware uploads. Cat a firmware image to this sysfs file
+ after you echo 1 to the loading sysfs file. When the firmware
+ image write is complete, echo 0 to the loading sysfs file. This
+ sequence will signal the completion of the firmware write and
+ signal the lower-level driver that the firmware data is
+ available.
+
+What: /sys/class/firmware/.../cancel
+Date: July 2022
+KernelVersion: 5.19
+Contact: Russ Weight <russell.h.weight@intel.com>
+Description: Write-only. For firmware uploads, write a "1" to this file to
+ request that the transfer of firmware data to the lower-level
+ device be canceled. This request will be rejected (EBUSY) if
+ the update cannot be canceled (e.g. a FLASH write is in
+ progress) or (ENODEV) if there is no firmware update in progress.
+
+What: /sys/class/firmware/.../error
+Date: July 2022
+KernelVersion: 5.19
+Contact: Russ Weight <russell.h.weight@intel.com>
+Description: Read-only. Returns a string describing a failed firmware
+ upload. This string will be in the form of <STATUS>:<ERROR>,
+ where <STATUS> will be one of the status strings described
+ for the status sysfs file and <ERROR> will be one of the
+ following: "hw-error", "timeout", "user-abort", "device-busy",
+ "invalid-file-size", "read-write-error", "flash-wearout". The
+ error sysfs file is only meaningful when the current firmware
+ upload status is "idle". If this file is read while a firmware
+ transfer is in progress, then the read will fail with EBUSY.
+
+What: /sys/class/firmware/.../loading
+Date: July 2022
+KernelVersion: 5.19
+Contact: Russ Weight <russell.h.weight@intel.com>
+Description: The loading sysfs file is used for both firmware-fallback and
+ for firmware uploads. Echo 1 onto the loading file to indicate
+ you are writing a firmware file to the data sysfs node. Echo
+ -1 onto this file to abort the data write or echo 0 onto this
+ file to indicate that the write is complete. For firmware
+ uploads, the zero value also triggers the transfer of the
+ firmware data to the lower-level device driver.
+
+What: /sys/class/firmware/.../remaining_size
+Date: July 2022
+KernelVersion: 5.19
+Contact: Russ Weight <russell.h.weight@intel.com>
+Description: Read-only. For firmware upload, this file contains the size
+ of the firmware data that remains to be transferred to the
+ lower-level device driver. The size value is initialized to
+ the full size of the firmware image that was previously
+ written to the data sysfs file. This value is periodically
+ updated during the "transferring" phase of the firmware
+ upload.
+ Format: "%u".
+
+What: /sys/class/firmware/.../status
+Date: July 2022
+KernelVersion: 5.19
+Contact: Russ Weight <russell.h.weight@intel.com>
+Description: Read-only. Returns a string describing the current status of
+ a firmware upload. The string will be one of the following:
+ idle, "receiving", "preparing", "transferring", "programming".
+
+What: /sys/class/firmware/.../timeout
+Date: July 2022
+KernelVersion: 5.19
+Contact: Russ Weight <russell.h.weight@intel.com>
+Description: This file supports the timeout mechanism for firmware
+ fallback. This file has no affect on firmware uploads. For
+ more information on timeouts please see the documentation
+ for firmware fallback.
diff --git a/Documentation/ABI/testing/sysfs-devices-physical_location b/Documentation/ABI/testing/sysfs-devices-physical_location
new file mode 100644
index 000000000000..202324b87083
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-devices-physical_location
@@ -0,0 +1,42 @@
+What: /sys/devices/.../physical_location
+Date: March 2022
+Contact: Won Chung <wonchung@google.com>
+Description:
+ This directory contains information on physical location of
+ the device connection point with respect to the system's
+ housing.
+
+What: /sys/devices/.../physical_location/panel
+Date: March 2022
+Contact: Won Chung <wonchung@google.com>
+Description:
+ Describes which panel surface of the system’s housing the
+ device connection point resides on.
+
+What: /sys/devices/.../physical_location/vertical_position
+Date: March 2022
+Contact: Won Chung <wonchung@google.com>
+Description:
+ Describes vertical position of the device connection point on
+ the panel surface.
+
+What: /sys/devices/.../physical_location/horizontal_position
+Date: March 2022
+Contact: Won Chung <wonchung@google.com>
+Description:
+ Describes horizontal position of the device connection point on
+ the panel surface.
+
+What: /sys/devices/.../physical_location/dock
+Date: March 2022
+Contact: Won Chung <wonchung@google.com>
+Description:
+ "Yes" if the device connection point resides in a docking
+ station or a port replicator. "No" otherwise.
+
+What: /sys/devices/.../physical_location/lid
+Date: March 2022
+Contact: Won Chung <wonchung@google.com>
+Description:
+ "Yes" if the device connection point resides on the lid of
+ laptop system. "No" otherwise.
diff --git a/Documentation/ABI/testing/sysfs-driver-ccp b/Documentation/ABI/testing/sysfs-driver-ccp
new file mode 100644
index 000000000000..7aded9b75553
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-driver-ccp
@@ -0,0 +1,87 @@
+What: /sys/bus/pci/devices/<BDF>/fused_part
+Date: June 2022
+KernelVersion: 5.19
+Contact: mario.limonciello@amd.com
+Description:
+ The /sys/bus/pci/devices/<BDF>/fused_part file reports
+ whether the CPU or APU has been fused to prevent tampering.
+ 0: Not fused
+ 1: Fused
+
+What: /sys/bus/pci/devices/<BDF>/debug_lock_on
+Date: June 2022
+KernelVersion: 5.19
+Contact: mario.limonciello@amd.com
+Description:
+ The /sys/bus/pci/devices/<BDF>/debug_lock_on reports
+ whether the AMD CPU or APU has been unlocked for debugging.
+ Possible values:
+ 0: Not locked
+ 1: Locked
+
+What: /sys/bus/pci/devices/<BDF>/tsme_status
+Date: June 2022
+KernelVersion: 5.19
+Contact: mario.limonciello@amd.com
+Description:
+ The /sys/bus/pci/devices/<BDF>/tsme_status file reports
+ the status of transparent secure memory encryption on AMD systems.
+ Possible values:
+ 0: Not active
+ 1: Active
+
+What: /sys/bus/pci/devices/<BDF>/anti_rollback_status
+Date: June 2022
+KernelVersion: 5.19
+Contact: mario.limonciello@amd.com
+Description:
+ The /sys/bus/pci/devices/<BDF>/anti_rollback_status file reports
+ whether the PSP is enforcing rollback protection.
+ Possible values:
+ 0: Not enforcing
+ 1: Enforcing
+
+What: /sys/bus/pci/devices/<BDF>/rpmc_production_enabled
+Date: June 2022
+KernelVersion: 5.19
+Contact: mario.limonciello@amd.com
+Description:
+ The /sys/bus/pci/devices/<BDF>/rpmc_production_enabled file reports
+ whether Replay Protected Monotonic Counter support has been enabled.
+ Possible values:
+ 0: Not enabled
+ 1: Enabled
+
+What: /sys/bus/pci/devices/<BDF>/rpmc_spirom_available
+Date: June 2022
+KernelVersion: 5.19
+Contact: mario.limonciello@amd.com
+Description:
+ The /sys/bus/pci/devices/<BDF>/rpmc_spirom_available file reports
+ whether an Replay Protected Monotonic Counter supported SPI is installed
+ on the system.
+ Possible values:
+ 0: Not present
+ 1: Present
+
+What: /sys/bus/pci/devices/<BDF>/hsp_tpm_available
+Date: June 2022
+KernelVersion: 5.19
+Contact: mario.limonciello@amd.com
+Description:
+ The /sys/bus/pci/devices/<BDF>/hsp_tpm_available file reports
+ whether the HSP TPM has been activated.
+ Possible values:
+ 0: Not activated or present
+ 1: Activated
+
+What: /sys/bus/pci/devices/<BDF>/rom_armor_enforced
+Date: June 2022
+KernelVersion: 5.19
+Contact: mario.limonciello@amd.com
+Description:
+ The /sys/bus/pci/devices/<BDF>/rom_armor_enforced file reports
+ whether RomArmor SPI protection is enforced.
+ Possible values:
+ 0: Not enforced
+ 1: Enforced
diff --git a/Documentation/PCI/pci.rst b/Documentation/PCI/pci.rst
index 67a850b55617..cced568d78e9 100644
--- a/Documentation/PCI/pci.rst
+++ b/Documentation/PCI/pci.rst
@@ -273,12 +273,12 @@ Set the DMA mask size
While all drivers should explicitly indicate the DMA capability
(e.g. 32 or 64 bit) of the PCI bus master, devices with more than
32-bit bus master capability for streaming data need the driver
-to "register" this capability by calling pci_set_dma_mask() with
+to "register" this capability by calling dma_set_mask() with
appropriate parameters. In general this allows more efficient DMA
on systems where System RAM exists above 4G _physical_ address.
Drivers for all PCI-X and PCIe compliant devices must call
-set_dma_mask() as they are 64-bit DMA devices.
+dma_set_mask() as they are 64-bit DMA devices.
Similarly, drivers must also "register" this capability if the device
can directly address "coherent memory" in System RAM above 4G physical
diff --git a/Documentation/admin-guide/blockdev/index.rst b/Documentation/admin-guide/blockdev/index.rst
index b903cf152091..957ccf617797 100644
--- a/Documentation/admin-guide/blockdev/index.rst
+++ b/Documentation/admin-guide/blockdev/index.rst
@@ -1,8 +1,8 @@
.. SPDX-License-Identifier: GPL-2.0
-===========================
-The Linux RapidIO Subsystem
-===========================
+=============
+Block Devices
+=============
.. toctree::
:maxdepth: 1
diff --git a/Documentation/admin-guide/bootconfig.rst b/Documentation/admin-guide/bootconfig.rst
index a1860fc0ca88..d99994345d41 100644
--- a/Documentation/admin-guide/bootconfig.rst
+++ b/Documentation/admin-guide/bootconfig.rst
@@ -158,9 +158,15 @@ Each key-value pair is shown in each line with following style::
Boot Kernel With a Boot Config
==============================
-Since the boot configuration file is loaded with initrd, it will be added
-to the end of the initrd (initramfs) image file with padding, size,
-checksum and 12-byte magic word as below.
+There are two options to boot the kernel with bootconfig: attaching the
+bootconfig to the initrd image or embedding it in the kernel itself.
+
+Attaching a Boot Config to Initrd
+---------------------------------
+
+Since the boot configuration file is loaded with initrd by default,
+it will be added to the end of the initrd (initramfs) image file with
+padding, size, checksum and 12-byte magic word as below.
[initrd][bootconfig][padding][size(le32)][checksum(le32)][#BOOTCONFIG\n]
@@ -196,6 +202,25 @@ To remove the config from the image, you can use -d option as below::
Then add "bootconfig" on the normal kernel command line to tell the
kernel to look for the bootconfig at the end of the initrd file.
+Embedding a Boot Config into Kernel
+-----------------------------------
+
+If you can not use initrd, you can also embed the bootconfig file in the
+kernel by Kconfig options. In this case, you need to recompile the kernel
+with the following configs::
+
+ CONFIG_BOOT_CONFIG_EMBED=y
+ CONFIG_BOOT_CONFIG_EMBED_FILE="/PATH/TO/BOOTCONFIG/FILE"
+
+``CONFIG_BOOT_CONFIG_EMBED_FILE`` requires an absolute path or a relative
+path to the bootconfig file from source tree or object tree.
+The kernel will embed it as the default bootconfig.
+
+Just as when attaching the bootconfig to the initrd, you need ``bootconfig``
+option on the kernel command line to enable the embedded bootconfig.
+
+Note that even if you set this option, you can override the embedded
+bootconfig by another bootconfig which attached to the initrd.
Kernel parameters via Boot Config
=================================
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 32073f873662..8090130b544b 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -979,8 +979,10 @@
[KNL] Debugging option to set a timeout in seconds for
deferred probe to give up waiting on dependencies to
probe. Only specific dependencies (subsystems or
- drivers) that have opted in will be ignored. A timeout of 0
- will timeout at the end of initcalls. This option will also
+ drivers) that have opted in will be ignored. A timeout
+ of 0 will timeout at the end of initcalls. If the time
+ out hasn't expired, it'll be restarted by each
+ successful driver registration. This option will also
dump out devices still on the deferred probe list after
retrying.
@@ -1101,7 +1103,10 @@
driver later using sysfs.
driver_async_probe= [KNL]
- List of driver names to be probed asynchronously.
+ List of driver names to be probed asynchronously. *
+ matches with all driver names. If * is specified, the
+ rest of the listed driver names are those that will NOT
+ match the *.
Format: <driver_name1>,<driver_name2>...
drm.edid_firmware=[<connector>:]<file>[,[<connector>:]<file>]
@@ -4099,6 +4104,15 @@
please report a bug.
nocrs [X86] Ignore PCI host bridge windows from ACPI.
If you need to use this, please report a bug.
+ use_e820 [X86] Use E820 reservations to exclude parts of
+ PCI host bridge windows. This is a workaround
+ for BIOS defects in host bridge _CRS methods.
+ If you need to use this, please report a bug to
+ <linux-pci@vger.kernel.org>.
+ no_e820 [X86] Ignore E820 reservations for PCI host
+ bridge windows. This is the default on modern
+ hardware. If you need to use this, please report
+ a bug to <linux-pci@vger.kernel.org>.
routeirq Do IRQ routing for all PCI devices.
This is normally done in pci_enable_device(),
so this option is a temporary workaround
diff --git a/Documentation/admin-guide/nfs/nfs-client.rst b/Documentation/admin-guide/nfs/nfs-client.rst
index 6adb6457bc69..36760685dd34 100644
--- a/Documentation/admin-guide/nfs/nfs-client.rst
+++ b/Documentation/admin-guide/nfs/nfs-client.rst
@@ -36,10 +36,9 @@ administrative requirements that require particular behavior that does not
work well as part of an nfs_client_id4 string.
The nfs.nfs4_unique_id boot parameter specifies a unique string that can be
-used instead of a system's node name when an NFS client identifies itself to
-a server. Thus, if the system's node name is not unique, or it changes, its
-nfs.nfs4_unique_id stays the same, preventing collision with other clients
-or loss of state during NFS reboot recovery or transparent state migration.
+used together with a system's node name when an NFS client identifies itself to
+a server. Thus, if the system's node name is not unique, its
+nfs.nfs4_unique_id can help prevent collisions with other clients.
The nfs.nfs4_unique_id string is typically a UUID, though it can contain
anything that is believed to be unique across all NFS clients. An
@@ -53,8 +52,12 @@ outstanding NFSv4 state has expired, to prevent loss of NFSv4 state.
This string can be stored in an NFS client's grub.conf, or it can be provided
via a net boot facility such as PXE. It may also be specified as an nfs.ko
-module parameter. Specifying a uniquifier string is not support for NFS
-clients running in containers.
+module parameter.
+
+This uniquifier string will be the same for all NFS clients running in
+containers unless it is overridden by a value written to
+/sys/fs/nfs/net/nfs_client/identifier which will be local to the network
+namespace of the process which writes.
The DNS resolver
diff --git a/Documentation/admin-guide/pm/intel-speed-select.rst b/Documentation/admin-guide/pm/intel-speed-select.rst
index 0a1fbdb54bfe..a2bfb971654f 100644
--- a/Documentation/admin-guide/pm/intel-speed-select.rst
+++ b/Documentation/admin-guide/pm/intel-speed-select.rst
@@ -262,6 +262,28 @@ Which shows that the base frequency now increased from 2600 MHz at performance
level 0 to 2800 MHz at performance level 4. As a result, any workload, which can
use fewer CPUs, can see a boost of 200 MHz compared to performance level 0.
+Changing performance level via BMC Interface
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+It is possible to change SST-PP level using out of band (OOB) agent (Via some
+remote management console, through BMC "Baseboard Management Controller"
+interface). This mode is supported from the Sapphire Rapids processor
+generation. The kernel and tool change to support this mode is added to Linux
+kernel version 5.18. To enable this feature, kernel config
+"CONFIG_INTEL_HFI_THERMAL" is required. The minimum version of the tool
+is "v1.12" to support this feature, which is part of Linux kernel version 5.18.
+
+To support such configuration, this tool can be used as a daemon. Add
+a command line option --oob::
+
+ # intel-speed-select --oob
+ Intel(R) Speed Select Technology
+ Executing on CPU model:143[0x8f]
+ OOB mode is enabled and will run as daemon
+
+In this mode the tool will online/offline CPUs based on the new performance
+level.
+
Check presence of other Intel(R) SST features
---------------------------------------------
diff --git a/Documentation/arch.rst b/Documentation/arch.rst
index 14bcd8294b93..41a66a8b38e4 100644
--- a/Documentation/arch.rst
+++ b/Documentation/arch.rst
@@ -13,6 +13,7 @@ implementation.
arm/index
arm64/index
ia64/index
+ loongarch/index
m68k/index
mips/index
nios2/index
diff --git a/Documentation/arm/marvell.rst b/Documentation/arm/marvell.rst
index 2f41caa0096c..370721518987 100644
--- a/Documentation/arm/marvell.rst
+++ b/Documentation/arm/marvell.rst
@@ -374,8 +374,6 @@ PXA 2xx/3xx/93x/95x family
Linux kernel mach directory:
arch/arm/mach-pxa
- Linux kernel plat directory:
- arch/arm/plat-pxa
MMP/MMP2/MMP3 family (communication processor)
----------------------------------------------
@@ -429,8 +427,6 @@ MMP/MMP2/MMP3 family (communication processor)
Linux kernel mach directory:
arch/arm/mach-mmp
- Linux kernel plat directory:
- arch/arm/plat-pxa
Berlin family (Multimedia Solutions)
-------------------------------------
@@ -518,9 +514,6 @@ Long-term plans
Business Unit) in a single mach-<foo> directory. The plat-orion/
would therefore disappear.
- * Unify the mach-mmp/ and mach-pxa/ into the same mach-pxa
- directory. The plat-pxa/ would therefore disappear.
-
Credits
-------
diff --git a/Documentation/conf.py b/Documentation/conf.py
index 072ee31a301d..934727e23e0e 100644
--- a/Documentation/conf.py
+++ b/Documentation/conf.py
@@ -161,7 +161,7 @@ finally:
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
-language = None
+language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
diff --git a/Documentation/devicetree/bindings/arm/hpe,gxp.yaml b/Documentation/devicetree/bindings/arm/hpe,gxp.yaml
new file mode 100644
index 000000000000..224bbcb93f95
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/hpe,gxp.yaml
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/arm/hpe,gxp.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: HPE BMC GXP platforms
+
+maintainers:
+ - Nick Hawkins <nick.hawkins@hpe.com>
+ - Jean-Marie Verdun <verdun@hpe.com>
+
+properties:
+ compatible:
+ oneOf:
+ - description: GXP Based Boards
+ items:
+ - enum:
+ - hpe,gxp-dl360gen10
+ - const: hpe,gxp
+
+required:
+ - compatible
+
+additionalProperties: true
+
+...
diff --git a/Documentation/devicetree/bindings/arm/intel,socfpga.yaml b/Documentation/devicetree/bindings/arm/intel,socfpga.yaml
index 6e043459fcd5..61a454a40e87 100644
--- a/Documentation/devicetree/bindings/arm/intel,socfpga.yaml
+++ b/Documentation/devicetree/bindings/arm/intel,socfpga.yaml
@@ -18,6 +18,7 @@ properties:
items:
- enum:
- intel,n5x-socdk
+ - intel,socfpga-agilex-n6000
- intel,socfpga-agilex-socdk
- const: intel,socfpga-agilex
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8186-clock.yaml b/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8186-clock.yaml
new file mode 100644
index 000000000000..cf1002c3efa6
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8186-clock.yaml
@@ -0,0 +1,56 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/arm/mediatek/mediatek,mt8186-clock.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: MediaTek Functional Clock Controller for MT8186
+
+maintainers:
+ - Chun-Jie Chen <chun-jie.chen@mediatek.com>
+
+description: |
+ The clock architecture in MediaTek like below
+ PLLs -->
+ dividers -->
+ muxes
+ -->
+ clock gate
+
+ The devices provide clock gate control in different IP blocks.
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - mediatek,mt8186-imp_iic_wrap
+ - mediatek,mt8186-mfgsys
+ - mediatek,mt8186-wpesys
+ - mediatek,mt8186-imgsys1
+ - mediatek,mt8186-imgsys2
+ - mediatek,mt8186-vdecsys
+ - mediatek,mt8186-vencsys
+ - mediatek,mt8186-camsys
+ - mediatek,mt8186-camsys_rawa
+ - mediatek,mt8186-camsys_rawb
+ - mediatek,mt8186-mdpsys
+ - mediatek,mt8186-ipesys
+ reg:
+ maxItems: 1
+
+ '#clock-cells':
+ const: 1
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ imp_iic_wrap: clock-controller@11017000 {
+ compatible = "mediatek,mt8186-imp_iic_wrap";
+ reg = <0x11017000 0x1000>;
+ #clock-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8186-sys-clock.yaml b/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8186-sys-clock.yaml
new file mode 100644
index 000000000000..0886e2e335bb
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8186-sys-clock.yaml
@@ -0,0 +1,54 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/arm/mediatek/mediatek,mt8186-sys-clock.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: MediaTek System Clock Controller for MT8186
+
+maintainers:
+ - Chun-Jie Chen <chun-jie.chen@mediatek.com>
+
+description: |
+ The clock architecture in MediaTek like below
+ PLLs -->
+ dividers -->
+ muxes
+ -->
+ clock gate
+
+ The apmixedsys provides most of PLLs which generated from SoC 26m.
+ The topckgen provides dividers and muxes which provide the clock source to other IP blocks.
+ The infracfg_ao provides clock gate in peripheral and infrastructure IP blocks.
+ The mcusys provides mux control to select the clock source in AP MCU.
+ The device nodes also provide the system control capacity for configuration.
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - mediatek,mt8186-mcusys
+ - mediatek,mt8186-topckgen
+ - mediatek,mt8186-infracfg_ao
+ - mediatek,mt8186-apmixedsys
+ - const: syscon
+
+ reg:
+ maxItems: 1
+
+ '#clock-cells':
+ const: 1
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ topckgen: syscon@10000000 {
+ compatible = "mediatek,mt8186-topckgen", "syscon";
+ reg = <0x10000000 0x1000>;
+ #clock-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra-ccplex-cluster.yaml b/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra-ccplex-cluster.yaml
new file mode 100644
index 000000000000..8c6543b5c0dc
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra-ccplex-cluster.yaml
@@ -0,0 +1,52 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/arm/tegra/nvidia,tegra-ccplex-cluster.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: NVIDIA Tegra CPU COMPLEX CLUSTER area device tree bindings
+
+maintainers:
+ - Sumit Gupta <sumitg@nvidia.com>
+ - Mikko Perttunen <mperttunen@nvidia.com>
+ - Jon Hunter <jonathanh@nvidia.com>
+ - Thierry Reding <thierry.reding@gmail.com>
+
+description: |+
+ The Tegra CPU COMPLEX CLUSTER area contains memory-mapped
+ registers that initiate CPU frequency/voltage transitions.
+
+properties:
+ $nodename:
+ pattern: "ccplex@([0-9a-f]+)$"
+
+ compatible:
+ enum:
+ - nvidia,tegra186-ccplex-cluster
+ - nvidia,tegra234-ccplex-cluster
+
+ reg:
+ maxItems: 1
+
+ nvidia,bpmp:
+ $ref: '/schemas/types.yaml#/definitions/phandle'
+ description: |
+ Specifies the BPMP node that needs to be queried to get
+ operating point data for all CPUs.
+
+additionalProperties: false
+
+required:
+ - compatible
+ - reg
+ - nvidia,bpmp
+ - status
+
+examples:
+ - |
+ ccplex@e000000 {
+ compatible = "nvidia,tegra234-ccplex-cluster";
+ reg = <0x0e000000 0x5ffff>;
+ nvidia,bpmp = <&bpmp>;
+ status = "okay";
+ };
diff --git a/Documentation/devicetree/bindings/clock/airoha,en7523-scu.yaml b/Documentation/devicetree/bindings/clock/airoha,en7523-scu.yaml
new file mode 100644
index 000000000000..d60e74654809
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/airoha,en7523-scu.yaml
@@ -0,0 +1,58 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/airoha,en7523-scu.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: EN7523 Clock Device Tree Bindings
+
+maintainers:
+ - Felix Fietkau <nbd@nbd.name>
+ - John Crispin <nbd@nbd.name>
+
+description: |
+ This node defines the System Control Unit of the EN7523 SoC,
+ a collection of registers configuring many different aspects of the SoC.
+
+ The clock driver uses it to read and configure settings of the
+ PLL controller, which provides clocks for the CPU, the bus and
+ other SoC internal peripherals.
+
+ Each clock is assigned an identifier and client nodes use this identifier
+ to specify which clock they consume.
+
+ All these identifiers can be found in:
+ [1]: <include/dt-bindings/clock/en7523-clk.h>.
+
+ The clocks are provided inside a system controller node.
+
+properties:
+ compatible:
+ items:
+ - const: airoha,en7523-scu
+
+ reg:
+ maxItems: 2
+
+ "#clock-cells":
+ description:
+ The first cell indicates the clock number, see [1] for available
+ clocks.
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - '#clock-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/en7523-clk.h>
+ scu: system-controller@1fa20000 {
+ compatible = "airoha,en7523-scu";
+ reg = <0x1fa20000 0x400>,
+ <0x1fb00000 0x1000>;
+ #clock-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/clock-bindings.txt b/Documentation/devicetree/bindings/clock/clock-bindings.txt
index f2ea53832ac6..6fe541368889 100644
--- a/Documentation/devicetree/bindings/clock/clock-bindings.txt
+++ b/Documentation/devicetree/bindings/clock/clock-bindings.txt
@@ -1,186 +1,2 @@
-This binding is a work-in-progress, and are based on some experimental
-work by benh[1].
-
-Sources of clock signal can be represented by any node in the device
-tree. Those nodes are designated as clock providers. Clock consumer
-nodes use a phandle and clock specifier pair to connect clock provider
-outputs to clock inputs. Similar to the gpio specifiers, a clock
-specifier is an array of zero, one or more cells identifying the clock
-output on a device. The length of a clock specifier is defined by the
-value of a #clock-cells property in the clock provider node.
-
-[1] https://patchwork.ozlabs.org/patch/31551/
-
-==Clock providers==
-
-Required properties:
-#clock-cells: Number of cells in a clock specifier; Typically 0 for nodes
- with a single clock output and 1 for nodes with multiple
- clock outputs.
-
-Optional properties:
-clock-output-names: Recommended to be a list of strings of clock output signal
- names indexed by the first cell in the clock specifier.
- However, the meaning of clock-output-names is domain
- specific to the clock provider, and is only provided to
- encourage using the same meaning for the majority of clock
- providers. This format may not work for clock providers
- using a complex clock specifier format. In those cases it
- is recommended to omit this property and create a binding
- specific names property.
-
- Clock consumer nodes must never directly reference
- the provider's clock-output-names property.
-
-For example:
-
- oscillator {
- #clock-cells = <1>;
- clock-output-names = "ckil", "ckih";
- };
-
-- this node defines a device with two clock outputs, the first named
- "ckil" and the second named "ckih". Consumer nodes always reference
- clocks by index. The names should reflect the clock output signal
- names for the device.
-
-clock-indices: If the identifying number for the clocks in the node
- is not linear from zero, then this allows the mapping of
- identifiers into the clock-output-names array.
-
-For example, if we have two clocks <&oscillator 1> and <&oscillator 3>:
-
- oscillator {
- compatible = "myclocktype";
- #clock-cells = <1>;
- clock-indices = <1>, <3>;
- clock-output-names = "clka", "clkb";
- }
-
- This ensures we do not have any empty strings in clock-output-names
-
-
-==Clock consumers==
-
-Required properties:
-clocks: List of phandle and clock specifier pairs, one pair
- for each clock input to the device. Note: if the
- clock provider specifies '0' for #clock-cells, then
- only the phandle portion of the pair will appear.
-
-Optional properties:
-clock-names: List of clock input name strings sorted in the same
- order as the clocks property. Consumers drivers
- will use clock-names to match clock input names
- with clocks specifiers.
-clock-ranges: Empty property indicating that child nodes can inherit named
- clocks from this node. Useful for bus nodes to provide a
- clock to their children.
-
-For example:
-
- device {
- clocks = <&osc 1>, <&ref 0>;
- clock-names = "baud", "register";
- };
-
-
-This represents a device with two clock inputs, named "baud" and "register".
-The baud clock is connected to output 1 of the &osc device, and the register
-clock is connected to output 0 of the &ref.
-
-==Example==
-
- /* external oscillator */
- osc: oscillator {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <32678>;
- clock-output-names = "osc";
- };
-
- /* phase-locked-loop device, generates a higher frequency clock
- * from the external oscillator reference */
- pll: pll@4c000 {
- compatible = "vendor,some-pll-interface"
- #clock-cells = <1>;
- clocks = <&osc 0>;
- clock-names = "ref";
- reg = <0x4c000 0x1000>;
- clock-output-names = "pll", "pll-switched";
- };
-
- /* UART, using the low frequency oscillator for the baud clock,
- * and the high frequency switched PLL output for register
- * clocking */
- uart@a000 {
- compatible = "fsl,imx-uart";
- reg = <0xa000 0x1000>;
- interrupts = <33>;
- clocks = <&osc 0>, <&pll 1>;
- clock-names = "baud", "register";
- };
-
-This DT fragment defines three devices: an external oscillator to provide a
-low-frequency reference clock, a PLL device to generate a higher frequency
-clock signal, and a UART.
-
-* The oscillator is fixed-frequency, and provides one clock output, named "osc".
-* The PLL is both a clock provider and a clock consumer. It uses the clock
- signal generated by the external oscillator, and provides two output signals
- ("pll" and "pll-switched").
-* The UART has its baud clock connected the external oscillator and its
- register clock connected to the PLL clock (the "pll-switched" signal)
-
-==Assigned clock parents and rates==
-
-Some platforms may require initial configuration of default parent clocks
-and clock frequencies. Such a configuration can be specified in a device tree
-node through assigned-clocks, assigned-clock-parents and assigned-clock-rates
-properties. The assigned-clock-parents property should contain a list of parent
-clocks in the form of a phandle and clock specifier pair and the
-assigned-clock-rates property should contain a list of frequencies in Hz. Both
-these properties should correspond to the clocks listed in the assigned-clocks
-property.
-
-To skip setting parent or rate of a clock its corresponding entry should be
-set to 0, or can be omitted if it is not followed by any non-zero entry.
-
- uart@a000 {
- compatible = "fsl,imx-uart";
- reg = <0xa000 0x1000>;
- ...
- clocks = <&osc 0>, <&pll 1>;
- clock-names = "baud", "register";
-
- assigned-clocks = <&clkcon 0>, <&pll 2>;
- assigned-clock-parents = <&pll 2>;
- assigned-clock-rates = <0>, <460800>;
- };
-
-In this example the <&pll 2> clock is set as parent of clock <&clkcon 0> and
-the <&pll 2> clock is assigned a frequency value of 460800 Hz.
-
-Configuring a clock's parent and rate through the device node that consumes
-the clock can be done only for clocks that have a single user. Specifying
-conflicting parent or rate configuration in multiple consumer nodes for
-a shared clock is forbidden.
-
-Configuration of common clocks, which affect multiple consumer devices can
-be similarly specified in the clock provider node.
-
-==Protected clocks==
-
-Some platforms or firmwares may not fully expose all the clocks to the OS, such
-as in situations where those clks are used by drivers running in ARM secure
-execution levels. Such a configuration can be specified in device tree with the
-protected-clocks property in the form of a clock specifier list. This property should
-only be specified in the node that is providing the clocks being protected:
-
- clock-controller@a000f000 {
- compatible = "vendor,clk95;
- reg = <0xa000f000 0x1000>
- #clocks-cells = <1>;
- ...
- protected-clocks = <UART3_CLK>, <SPI5_CLK>;
- };
+This file has moved to the clock binding schema:
+https://github.com/devicetree-org/dt-schema/blob/main/dtschema/schemas/clock/clock.yaml
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc-apq8064.yaml b/Documentation/devicetree/bindings/clock/qcom,gcc-apq8064.yaml
index 97936411b6b4..9fafcb080069 100644
--- a/Documentation/devicetree/bindings/clock/qcom,gcc-apq8064.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc-apq8064.yaml
@@ -20,12 +20,10 @@ description: |
See also:
- dt-bindings/clock/qcom,gcc-msm8960.h
- dt-bindings/reset/qcom,gcc-msm8960.h
- - dt-bindings/clock/qcom,gcc-apq8084.h
- - dt-bindings/reset/qcom,gcc-apq8084.h
properties:
compatible:
- const: qcom,gcc-apq8084
+ const: qcom,gcc-apq8064
nvmem-cells:
minItems: 1
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc-apq8084.yaml b/Documentation/devicetree/bindings/clock/qcom,gcc-apq8084.yaml
new file mode 100644
index 000000000000..397fb918e032
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc-apq8084.yaml
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/qcom,gcc-apq8084.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Global Clock & Reset Controller Binding for APQ8084
+
+maintainers:
+ - Stephen Boyd <sboyd@kernel.org>
+ - Taniya Das <quic_tdas@quicinc.com>
+
+description: |
+ Qualcomm global clock control module which supports the clocks, resets and
+ power domains on APQ8084.
+
+ See also::
+ - dt-bindings/clock/qcom,gcc-apq8084.h
+ - dt-bindings/reset/qcom,gcc-apq8084.h
+
+allOf:
+ - $ref: qcom,gcc.yaml#
+
+properties:
+ compatible:
+ const: qcom,gcc-apq8084
+
+required:
+ - compatible
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ clock-controller@fc400000 {
+ compatible = "qcom,gcc-apq8084";
+ reg = <0xfc400000 0x4000>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc-sc8280xp.yaml b/Documentation/devicetree/bindings/clock/qcom,gcc-sc8280xp.yaml
new file mode 100644
index 000000000000..0bcdc69c6f89
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc-sc8280xp.yaml
@@ -0,0 +1,128 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/qcom,gcc-sc8280xp.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Global Clock & Reset Controller Binding for SC8280xp
+
+maintainers:
+ - Bjorn Andersson <bjorn.andersson@linaro.org>
+
+description: |
+ Qualcomm global clock control module which supports the clocks, resets and
+ power domains on SC8280xp.
+
+ See also:
+ - include/dt-bindings/clock/qcom,gcc-sc8280xp.h
+
+properties:
+ compatible:
+ const: qcom,gcc-sc8280xp
+
+ clocks:
+ items:
+ - description: XO reference clock
+ - description: Sleep clock
+ - description: UFS memory first RX symbol clock
+ - description: UFS memory second RX symbol clock
+ - description: UFS memory first TX symbol clock
+ - description: UFS card first RX symbol clock
+ - description: UFS card second RX symbol clock
+ - description: UFS card first TX symbol clock
+ - description: Primary USB SuperSpeed pipe clock
+ - description: USB4 PHY pipegmux clock source
+ - description: USB4 PHY DP gmux clock source
+ - description: USB4 PHY sys piegmux clock source
+ - description: USB4 PHY PCIe pipe clock
+ - description: USB4 PHY router max pipe clock
+ - description: Primary USB4 RX0 clock
+ - description: Primary USB4 RX1 clock
+ - description: Secondary USB SuperSpeed pipe clock
+ - description: Second USB4 PHY pipegmux clock source
+ - description: Second USB4 PHY DP gmux clock source
+ - description: Second USB4 PHY sys pipegmux clock source
+ - description: Second USB4 PHY PCIe pipe clock
+ - description: Second USB4 PHY router max pipe clock
+ - description: Secondary USB4 RX0 clock
+ - description: Secondary USB4 RX1 clock
+ - description: Multiport USB first SupserSpeed pipe clock
+ - description: Multiport USB second SuperSpeed pipe clock
+ - description: PCIe 2a pipe clock
+ - description: PCIe 2b pipe clock
+ - description: PCIe 3a pipe clock
+ - description: PCIe 3b pipe clock
+ - description: PCIe 4 pipe clock
+ - description: First EMAC controller reference clock
+ - description: Second EMAC controller reference clock
+
+ '#clock-cells':
+ const: 1
+
+ '#reset-cells':
+ const: 1
+
+ '#power-domain-cells':
+ const: 1
+
+ reg:
+ maxItems: 1
+
+ protected-clocks:
+ maxItems: 389
+
+required:
+ - compatible
+ - clocks
+ - reg
+ - '#clock-cells'
+ - '#reset-cells'
+ - '#power-domain-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/qcom,rpmh.h>
+ clock-controller@100000 {
+ compatible = "qcom,gcc-sc8280xp";
+ reg = <0x00100000 0x1f0000>;
+ clocks = <&rpmhcc RPMH_CXO_CLK>,
+ <&sleep_clk>,
+ <&ufs_phy_rx_symbol_0_clk>,
+ <&ufs_phy_rx_symbol_1_clk>,
+ <&ufs_phy_tx_symbol_0_clk>,
+ <&ufs_card_rx_symbol_0_clk>,
+ <&ufs_card_rx_symbol_1_clk>,
+ <&ufs_card_tx_symbol_0_clk>,
+ <&usb_0_ssphy>,
+ <&gcc_usb4_phy_pipegmux_clk_src>,
+ <&gcc_usb4_phy_dp_gmux_clk_src>,
+ <&gcc_usb4_phy_sys_pipegmux_clk_src>,
+ <&usb4_phy_gcc_usb4_pcie_pipe_clk>,
+ <&usb4_phy_gcc_usb4rtr_max_pipe_clk>,
+ <&qusb4phy_gcc_usb4_rx0_clk>,
+ <&qusb4phy_gcc_usb4_rx1_clk>,
+ <&usb_1_ssphy>,
+ <&gcc_usb4_1_phy_pipegmux_clk_src>,
+ <&gcc_usb4_1_phy_dp_gmux_clk_src>,
+ <&gcc_usb4_1_phy_sys_pipegmux_clk_src>,
+ <&usb4_1_phy_gcc_usb4_pcie_pipe_clk>,
+ <&usb4_1_phy_gcc_usb4rtr_max_pipe_clk>,
+ <&qusb4phy_1_gcc_usb4_rx0_clk>,
+ <&qusb4phy_1_gcc_usb4_rx1_clk>,
+ <&usb_2_ssphy>,
+ <&usb_3_ssphy>,
+ <&pcie2a_lane>,
+ <&pcie2b_lane>,
+ <&pcie3a_lane>,
+ <&pcie3b_lane>,
+ <&pcie4_lane>,
+ <&rxc0_ref_clk>,
+ <&rxc1_ref_clk>;
+
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/clock/qcom,mmcc.yaml b/Documentation/devicetree/bindings/clock/qcom,mmcc.yaml
index 4b79e89fd174..32e87014bb55 100644
--- a/Documentation/devicetree/bindings/clock/qcom,mmcc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,mmcc.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Qualcomm Multimedia Clock & Reset Controller Binding
maintainers:
- - Jeffrey Hugo <jhugo@codeaurora.org>
+ - Jeffrey Hugo <quic_jhugo@quicinc.com>
- Taniya Das <tdas@codeaurora.org>
description: |
diff --git a/Documentation/devicetree/bindings/clock/qcom,rpmcc.txt b/Documentation/devicetree/bindings/clock/qcom,rpmcc.txt
deleted file mode 100644
index da295c3c004b..000000000000
--- a/Documentation/devicetree/bindings/clock/qcom,rpmcc.txt
+++ /dev/null
@@ -1,63 +0,0 @@
-Qualcomm RPM Clock Controller Binding
-------------------------------------------------
-The RPM is a dedicated hardware engine for managing the shared
-SoC resources in order to keep the lowest power profile. It
-communicates with other hardware subsystems via shared memory
-and accepts clock requests, aggregates the requests and turns
-the clocks on/off or scales them on demand.
-
-Required properties :
-- compatible : shall contain only one of the following. The generic
- compatible "qcom,rpmcc" should be also included.
-
- "qcom,rpmcc-mdm9607", "qcom,rpmcc"
- "qcom,rpmcc-msm8660", "qcom,rpmcc"
- "qcom,rpmcc-apq8060", "qcom,rpmcc"
- "qcom,rpmcc-msm8226", "qcom,rpmcc"
- "qcom,rpmcc-msm8916", "qcom,rpmcc"
- "qcom,rpmcc-msm8936", "qcom,rpmcc"
- "qcom,rpmcc-msm8953", "qcom,rpmcc"
- "qcom,rpmcc-msm8974", "qcom,rpmcc"
- "qcom,rpmcc-msm8976", "qcom,rpmcc"
- "qcom,rpmcc-apq8064", "qcom,rpmcc"
- "qcom,rpmcc-ipq806x", "qcom,rpmcc"
- "qcom,rpmcc-msm8992",·"qcom,rpmcc"
- "qcom,rpmcc-msm8994",·"qcom,rpmcc"
- "qcom,rpmcc-msm8996", "qcom,rpmcc"
- "qcom,rpmcc-msm8998", "qcom,rpmcc"
- "qcom,rpmcc-qcm2290", "qcom,rpmcc"
- "qcom,rpmcc-qcs404", "qcom,rpmcc"
- "qcom,rpmcc-sdm660", "qcom,rpmcc"
- "qcom,rpmcc-sm6115", "qcom,rpmcc"
- "qcom,rpmcc-sm6125", "qcom,rpmcc"
-
-- #clock-cells : shall contain 1
-
-The clock enumerators are defined in <dt-bindings/clock/qcom,rpmcc.h>
-and come in pairs: FOO_CLK followed by FOO_A_CLK. The latter clock
-is an "active" clock, which means that the consumer only care that the
-clock is available when the apps CPU subsystem is active, i.e. not
-suspended or in deep idle. If it is important that the clock keeps running
-during system suspend, you need to specify the non-active clock, the one
-not containing *_A_* in the enumerator name.
-
-Example:
- smd {
- compatible = "qcom,smd";
-
- rpm {
- interrupts = <0 168 1>;
- qcom,ipc = <&apcs 8 0>;
- qcom,smd-edge = <15>;
-
- rpm_requests {
- compatible = "qcom,rpm-msm8916";
- qcom,smd-channels = "rpm_requests";
-
- rpmcc: clock-controller {
- compatible = "qcom,rpmcc-msm8916", "qcom,rpmcc";
- #clock-cells = <1>;
- };
- };
- };
- };
diff --git a/Documentation/devicetree/bindings/clock/qcom,rpmcc.yaml b/Documentation/devicetree/bindings/clock/qcom,rpmcc.yaml
new file mode 100644
index 000000000000..9d296b89a8d0
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,rpmcc.yaml
@@ -0,0 +1,75 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/qcom,rpmcc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm RPM Clock Controller
+
+maintainers:
+ - Bjorn Andersson <bjorn.andersson@linaro.org>
+ - Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+
+description: |
+ The clock enumerators are defined in <dt-bindings/clock/qcom,rpmcc.h> and
+ come in pairs:: FOO_CLK followed by FOO_A_CLK. The latter clock is
+ an "active" clock, which means that the consumer only care that the clock is
+ available when the apps CPU subsystem is active, i.e. not suspended or in
+ deep idle. If it is important that the clock keeps running during system
+ suspend, you need to specify the non-active clock, the one not containing
+ *_A_* in the enumerator name.
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - qcom,rpmcc-apq8060
+ - qcom,rpmcc-apq8064
+ - qcom,rpmcc-ipq806x
+ - qcom,rpmcc-mdm9607
+ - qcom,rpmcc-msm8226
+ - qcom,rpmcc-msm8660
+ - qcom,rpmcc-msm8916
+ - qcom,rpmcc-msm8936
+ - qcom,rpmcc-msm8953
+ - qcom,rpmcc-msm8974
+ - qcom,rpmcc-msm8976
+ - qcom,rpmcc-msm8992
+ - qcom,rpmcc-msm8994
+ - qcom,rpmcc-msm8996
+ - qcom,rpmcc-msm8998
+ - qcom,rpmcc-qcm2290
+ - qcom,rpmcc-qcs404
+ - qcom,rpmcc-sdm660
+ - qcom,rpmcc-sm6115
+ - qcom,rpmcc-sm6125
+ - const: qcom,rpmcc
+
+ '#clock-cells':
+ const: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ const: xo
+
+required:
+ - compatible
+ - '#clock-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ rpm {
+ rpm-requests {
+ compatible = "qcom,rpm-msm8916";
+ qcom,smd-channels = "rpm_requests";
+
+ clock-controller {
+ compatible = "qcom,rpmcc-msm8916", "qcom,rpmcc";
+ #clock-cells = <1>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.yaml b/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.yaml
index e0b86214f0f5..e57bc40d307a 100644
--- a/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.yaml
+++ b/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.yaml
@@ -49,6 +49,7 @@ properties:
- renesas,r8a77995-cpg-mssr # R-Car D3
- renesas,r8a779a0-cpg-mssr # R-Car V3U
- renesas,r8a779f0-cpg-mssr # R-Car S4-8
+ - renesas,r8a779g0-cpg-mssr # R-Car V4H
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/clock/renesas,r9a06g032-sysctrl.yaml b/Documentation/devicetree/bindings/clock/renesas,r9a06g032-sysctrl.yaml
index 25dbb0fac065..95bf485c6cec 100644
--- a/Documentation/devicetree/bindings/clock/renesas,r9a06g032-sysctrl.yaml
+++ b/Documentation/devicetree/bindings/clock/renesas,r9a06g032-sysctrl.yaml
@@ -39,6 +39,17 @@ properties:
'#power-domain-cells':
const: 0
+ '#address-cells':
+ const: 1
+
+ '#size-cells':
+ const: 1
+
+patternProperties:
+ "^dma-router@[a-f0-9]+$":
+ type: object
+ $ref: "../dma/renesas,rzn1-dmamux.yaml#"
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/clock/renesas,rzg2l-cpg.yaml b/Documentation/devicetree/bindings/clock/renesas,rzg2l-cpg.yaml
index bd3af8fc616b..8880b834f264 100644
--- a/Documentation/devicetree/bindings/clock/renesas,rzg2l-cpg.yaml
+++ b/Documentation/devicetree/bindings/clock/renesas,rzg2l-cpg.yaml
@@ -4,14 +4,15 @@
$id: "http://devicetree.org/schemas/clock/renesas,rzg2l-cpg.yaml#"
$schema: "http://devicetree.org/meta-schemas/core.yaml#"
-title: Renesas RZ/{G2L,V2L} Clock Pulse Generator / Module Standby Mode
+title: Renesas RZ/{G2L,V2L,V2M} Clock Pulse Generator / Module Standby Mode
maintainers:
- Geert Uytterhoeven <geert+renesas@glider.be>
description: |
- On Renesas RZ/{G2L,V2L} SoC, the CPG (Clock Pulse Generator) and Module
- Standby Mode share the same register block.
+ On Renesas RZ/{G2L,V2L}-alike SoC's, the CPG (Clock Pulse Generator) and Module
+ Standby Mode share the same register block. On RZ/V2M, the functionality is
+ similar, but does not have Clock Monitor Registers.
They provide the following functionalities:
- The CPG block generates various core clocks,
@@ -23,8 +24,10 @@ description: |
properties:
compatible:
enum:
- - renesas,r9a07g044-cpg # RZ/G2{L,LC}
- - renesas,r9a07g054-cpg # RZ/V2L
+ - renesas,r9a07g043-cpg # RZ/G2UL{Type-1,Type-2}
+ - renesas,r9a07g044-cpg # RZ/G2{L,LC}
+ - renesas,r9a07g054-cpg # RZ/V2L
+ - renesas,r9a09g011-cpg # RZ/V2M
reg:
maxItems: 1
@@ -42,9 +45,10 @@ properties:
description: |
- For CPG core clocks, the two clock specifier cells must be "CPG_CORE"
and a core clock reference, as defined in
- <dt-bindings/clock/r9a07g*-cpg.h>
+ <dt-bindings/clock/r9a0*-cpg.h>
- For module clocks, the two clock specifier cells must be "CPG_MOD" and
- a module number, as defined in the <dt-bindings/clock/r9a07g0*-cpg.h>.
+ a module number, as defined in the <dt-bindings/clock/r9a07g0*-cpg.h> or
+ <dt-bindings/clock/r9a09g011-cpg.h>.
const: 2
'#power-domain-cells':
@@ -58,7 +62,7 @@ properties:
'#reset-cells':
description:
The single reset specifier cell must be the module number, as defined in
- the <dt-bindings/clock/r9a07g0*-cpg.h>.
+ the <dt-bindings/clock/r9a07g0*-cpg.h> or <dt-bindings/clock/r9a09g011-cpg.h>.
const: 1
required:
diff --git a/Documentation/devicetree/bindings/clock/rockchip,px30-cru.txt b/Documentation/devicetree/bindings/clock/rockchip,px30-cru.txt
deleted file mode 100644
index 55e78cddec8c..000000000000
--- a/Documentation/devicetree/bindings/clock/rockchip,px30-cru.txt
+++ /dev/null
@@ -1,70 +0,0 @@
-* Rockchip PX30 Clock and Reset Unit
-
-The PX30 clock controller generates and supplies clock to various
-controllers within the SoC and also implements a reset controller for SoC
-peripherals.
-
-Required Properties:
-
-- compatible: PMU for CRU should be "rockchip,px30-pmu-cru"
-- compatible: CRU should be "rockchip,px30-cru"
-- reg: physical base address of the controller and length of memory mapped
- region.
-- clocks: A list of phandle + clock-specifier pairs for the clocks listed
- in clock-names
-- clock-names: Should contain the following:
- - "xin24m" for both PMUCRU and CRU
- - "gpll" for CRU (sourced from PMUCRU)
-- #clock-cells: should be 1.
-- #reset-cells: should be 1.
-
-Optional Properties:
-
-- rockchip,grf: phandle to the syscon managing the "general register files"
- If missing, pll rates are not changeable, due to the missing pll lock status.
-
-Each clock is assigned an identifier and client nodes can use this identifier
-to specify the clock which they consume. All available clocks are defined as
-preprocessor macros in the dt-bindings/clock/px30-cru.h headers and can be
-used in device tree sources. Similar macros exist for the reset sources in
-these files.
-
-External clocks:
-
-There are several clocks that are generated outside the SoC. It is expected
-that they are defined using standard clock bindings with following
-clock-output-names:
- - "xin24m" - crystal input - required,
- - "xin32k" - rtc clock - optional,
- - "i2sx_clkin" - external I2S clock - optional,
- - "gmac_clkin" - external GMAC clock - optional
-
-Example: Clock controller node:
-
- pmucru: clock-controller@ff2bc000 {
- compatible = "rockchip,px30-pmucru";
- reg = <0x0 0xff2bc000 0x0 0x1000>;
- #clock-cells = <1>;
- #reset-cells = <1>;
- };
-
- cru: clock-controller@ff2b0000 {
- compatible = "rockchip,px30-cru";
- reg = <0x0 0xff2b0000 0x0 0x1000>;
- rockchip,grf = <&grf>;
- #clock-cells = <1>;
- #reset-cells = <1>;
- };
-
-Example: UART controller node that consumes the clock generated by the clock
- controller:
-
- uart0: serial@ff030000 {
- compatible = "rockchip,px30-uart", "snps,dw-apb-uart";
- reg = <0x0 0xff030000 0x0 0x100>;
- interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&pmucru SCLK_UART0_PMU>, <&pmucru PCLK_UART0_PMU>;
- clock-names = "baudclk", "apb_pclk";
- reg-shift = <2>;
- reg-io-width = <4>;
- };
diff --git a/Documentation/devicetree/bindings/clock/rockchip,px30-cru.yaml b/Documentation/devicetree/bindings/clock/rockchip,px30-cru.yaml
new file mode 100644
index 000000000000..3eec381c7cf5
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/rockchip,px30-cru.yaml
@@ -0,0 +1,119 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/rockchip,px30-cru.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Rockchip PX30 Clock and Reset Unit (CRU)
+
+maintainers:
+ - Elaine Zhang <zhangqing@rock-chips.com>
+ - Heiko Stuebner <heiko@sntech.de>
+
+description: |
+ The PX30 clock controller generates and supplies clocks to various
+ controllers within the SoC and also implements a reset controller for SoC
+ peripherals.
+ Each clock is assigned an identifier and client nodes can use this identifier
+ to specify the clock which they consume. All available clocks are defined as
+ preprocessor macros in the dt-bindings/clock/px30-cru.h headers and can be
+ used in device tree sources. Similar macros exist for the reset sources in
+ these files.
+ There are several clocks that are generated outside the SoC. It is expected
+ that they are defined using standard clock bindings with following
+ clock-output-names:
+ - "xin24m" - crystal input - required
+ - "xin32k" - rtc clock - optional
+ - "i2sx_clkin" - external I2S clock - optional
+ - "gmac_clkin" - external GMAC clock - optional
+
+properties:
+ compatible:
+ enum:
+ - rockchip,px30-cru
+ - rockchip,px30-pmucru
+
+ reg:
+ maxItems: 1
+
+ "#clock-cells":
+ const: 1
+
+ "#reset-cells":
+ const: 1
+
+ clocks:
+ minItems: 1
+ items:
+ - description: Clock for both PMUCRU and CRU
+ - description: Clock for CRU (sourced from PMUCRU)
+
+ clock-names:
+ minItems: 1
+ items:
+ - const: xin24m
+ - const: gpll
+
+ rockchip,grf:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ Phandle to the syscon managing the "general register files" (GRF),
+ if missing pll rates are not changeable, due to the missing pll
+ lock status.
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - "#clock-cells"
+ - "#reset-cells"
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: rockchip,px30-cru
+
+ then:
+ properties:
+ clocks:
+ minItems: 2
+
+ clock-names:
+ minItems: 2
+
+ else:
+ properties:
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ maxItems: 1
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/px30-cru.h>
+
+ pmucru: clock-controller@ff2bc000 {
+ compatible = "rockchip,px30-pmucru";
+ reg = <0xff2bc000 0x1000>;
+ clocks = <&xin24m>;
+ clock-names = "xin24m";
+ rockchip,grf = <&grf>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ };
+
+ cru: clock-controller@ff2b0000 {
+ compatible = "rockchip,px30-cru";
+ reg = <0xff2b0000 0x1000>;
+ clocks = <&xin24m>, <&pmucru PLL_GPLL>;
+ clock-names = "xin24m", "gpll";
+ rockchip,grf = <&grf>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/rockchip,rk3036-cru.txt b/Documentation/devicetree/bindings/clock/rockchip,rk3036-cru.txt
deleted file mode 100644
index 20df350b9ef3..000000000000
--- a/Documentation/devicetree/bindings/clock/rockchip,rk3036-cru.txt
+++ /dev/null
@@ -1,56 +0,0 @@
-* Rockchip RK3036 Clock and Reset Unit
-
-The RK3036 clock controller generates and supplies clock to various
-controllers within the SoC and also implements a reset controller for SoC
-peripherals.
-
-Required Properties:
-
-- compatible: should be "rockchip,rk3036-cru"
-- reg: physical base address of the controller and length of memory mapped
- region.
-- #clock-cells: should be 1.
-- #reset-cells: should be 1.
-
-Optional Properties:
-
-- rockchip,grf: phandle to the syscon managing the "general register files"
- If missing pll rates are not changeable, due to the missing pll lock status.
-
-Each clock is assigned an identifier and client nodes can use this identifier
-to specify the clock which they consume. All available clocks are defined as
-preprocessor macros in the dt-bindings/clock/rk3036-cru.h headers and can be
-used in device tree sources. Similar macros exist for the reset sources in
-these files.
-
-External clocks:
-
-There are several clocks that are generated outside the SoC. It is expected
-that they are defined using standard clock bindings with following
-clock-output-names:
- - "xin24m" - crystal input - required,
- - "ext_i2s" - external I2S clock - optional,
- - "rmii_clkin" - external EMAC clock - optional
-
-Example: Clock controller node:
-
- cru: cru@20000000 {
- compatible = "rockchip,rk3036-cru";
- reg = <0x20000000 0x1000>;
- rockchip,grf = <&grf>;
-
- #clock-cells = <1>;
- #reset-cells = <1>;
- };
-
-Example: UART controller node that consumes the clock generated by the clock
- controller:
-
- uart0: serial@20060000 {
- compatible = "snps,dw-apb-uart";
- reg = <0x20060000 0x100>;
- interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>;
- reg-shift = <2>;
- reg-io-width = <4>;
- clocks = <&cru SCLK_UART0>;
- };
diff --git a/Documentation/devicetree/bindings/clock/rockchip,rk3036-cru.yaml b/Documentation/devicetree/bindings/clock/rockchip,rk3036-cru.yaml
new file mode 100644
index 000000000000..1376230fede6
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/rockchip,rk3036-cru.yaml
@@ -0,0 +1,72 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/rockchip,rk3036-cru.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Rockchip RK3036 Clock and Reset Unit (CRU)
+
+maintainers:
+ - Elaine Zhang <zhangqing@rock-chips.com>
+ - Heiko Stuebner <heiko@sntech.de>
+
+description: |
+ The RK3036 clock controller generates and supplies clocks to various
+ controllers within the SoC and also implements a reset controller for SoC
+ peripherals.
+ Each clock is assigned an identifier and client nodes can use this identifier
+ to specify the clock which they consume. All available clocks are defined as
+ preprocessor macros in the dt-bindings/clock/rk3036-cru.h headers and can be
+ used in device tree sources. Similar macros exist for the reset sources in
+ these files.
+ There are several clocks that are generated outside the SoC. It is expected
+ that they are defined using standard clock bindings with following
+ clock-output-names:
+ - "xin24m" - crystal input - required
+ - "ext_i2s" - external I2S clock - optional
+ - "rmii_clkin" - external EMAC clock - optional
+
+properties:
+ compatible:
+ enum:
+ - rockchip,rk3036-cru
+
+ reg:
+ maxItems: 1
+
+ "#clock-cells":
+ const: 1
+
+ "#reset-cells":
+ const: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ const: xin24m
+
+ rockchip,grf:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ Phandle to the syscon managing the "general register files" (GRF),
+ if missing pll rates are not changeable, due to the missing pll
+ lock status.
+
+required:
+ - compatible
+ - reg
+ - "#clock-cells"
+ - "#reset-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ cru: clock-controller@20000000 {
+ compatible = "rockchip,rk3036-cru";
+ reg = <0x20000000 0x1000>;
+ rockchip,grf = <&grf>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/rockchip,rk3188-cru.txt b/Documentation/devicetree/bindings/clock/rockchip,rk3188-cru.txt
deleted file mode 100644
index 7f368530a2e4..000000000000
--- a/Documentation/devicetree/bindings/clock/rockchip,rk3188-cru.txt
+++ /dev/null
@@ -1,61 +0,0 @@
-* Rockchip RK3188/RK3066 Clock and Reset Unit
-
-The RK3188/RK3066 clock controller generates and supplies clock to various
-controllers within the SoC and also implements a reset controller for SoC
-peripherals.
-
-Required Properties:
-
-- compatible: should be "rockchip,rk3188-cru", "rockchip,rk3188a-cru" or
- "rockchip,rk3066a-cru"
-- reg: physical base address of the controller and length of memory mapped
- region.
-- #clock-cells: should be 1.
-- #reset-cells: should be 1.
-
-Optional Properties:
-
-- rockchip,grf: phandle to the syscon managing the "general register files"
- If missing pll rates are not changeable, due to the missing pll lock status.
-
-Each clock is assigned an identifier and client nodes can use this identifier
-to specify the clock which they consume. All available clocks are defined as
-preprocessor macros in the dt-bindings/clock/rk3188-cru.h and
-dt-bindings/clock/rk3066-cru.h headers and can be used in device tree sources.
-Similar macros exist for the reset sources in these files.
-
-External clocks:
-
-There are several clocks that are generated outside the SoC. It is expected
-that they are defined using standard clock bindings with following
-clock-output-names:
- - "xin24m" - crystal input - required,
- - "xin32k" - rtc clock - optional,
- - "xin27m" - 27mhz crystal input on rk3066 - optional,
- - "ext_hsadc" - external HSADC clock - optional,
- - "ext_cif0" - external camera clock - optional,
- - "ext_rmii" - external RMII clock - optional,
- - "ext_jtag" - externalJTAG clock - optional
-
-Example: Clock controller node:
-
- cru: cru@20000000 {
- compatible = "rockchip,rk3188-cru";
- reg = <0x20000000 0x1000>;
- rockchip,grf = <&grf>;
-
- #clock-cells = <1>;
- #reset-cells = <1>;
- };
-
-Example: UART controller node that consumes the clock generated by the clock
- controller:
-
- uart0: serial@10124000 {
- compatible = "snps,dw-apb-uart";
- reg = <0x10124000 0x400>;
- interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
- reg-shift = <2>;
- reg-io-width = <1>;
- clocks = <&cru SCLK_UART0>;
- };
diff --git a/Documentation/devicetree/bindings/clock/rockchip,rk3188-cru.yaml b/Documentation/devicetree/bindings/clock/rockchip,rk3188-cru.yaml
new file mode 100644
index 000000000000..ddd7e46af0f2
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/rockchip,rk3188-cru.yaml
@@ -0,0 +1,78 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/rockchip,rk3188-cru.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Rockchip RK3188/RK3066 Clock and Reset Unit (CRU)
+
+maintainers:
+ - Elaine Zhang <zhangqing@rock-chips.com>
+ - Heiko Stuebner <heiko@sntech.de>
+
+description: |
+ The RK3188/RK3066 clock controller generates and supplies clocks to various
+ controllers within the SoC and also implements a reset controller for SoC
+ peripherals.
+ Each clock is assigned an identifier and client nodes can use this identifier
+ to specify the clock which they consume. All available clocks are defined as
+ preprocessor macros in the dt-bindings/clock/rk3188-cru.h and
+ dt-bindings/clock/rk3066-cru.h headers and can be used in device tree sources.
+ Similar macros exist for the reset sources in these files.
+ There are several clocks that are generated outside the SoC. It is expected
+ that they are defined using standard clock bindings with following
+ clock-output-names:
+ - "xin24m" - crystal input - required
+ - "xin32k" - RTC clock - optional
+ - "xin27m" - 27mhz crystal input on RK3066 - optional
+ - "ext_hsadc" - external HSADC clock - optional
+ - "ext_cif0" - external camera clock - optional
+ - "ext_rmii" - external RMII clock - optional
+ - "ext_jtag" - external JTAG clock - optional
+
+properties:
+ compatible:
+ enum:
+ - rockchip,rk3066a-cru
+ - rockchip,rk3188-cru
+ - rockchip,rk3188a-cru
+
+ reg:
+ maxItems: 1
+
+ "#clock-cells":
+ const: 1
+
+ "#reset-cells":
+ const: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ const: xin24m
+
+ rockchip,grf:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ Phandle to the syscon managing the "general register files" (GRF),
+ if missing pll rates are not changeable, due to the missing pll
+ lock status.
+
+required:
+ - compatible
+ - reg
+ - "#clock-cells"
+ - "#reset-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ cru: clock-controller@20000000 {
+ compatible = "rockchip,rk3188-cru";
+ reg = <0x20000000 0x1000>;
+ rockchip,grf = <&grf>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/rockchip,rk3228-cru.txt b/Documentation/devicetree/bindings/clock/rockchip,rk3228-cru.txt
deleted file mode 100644
index f323048127eb..000000000000
--- a/Documentation/devicetree/bindings/clock/rockchip,rk3228-cru.txt
+++ /dev/null
@@ -1,58 +0,0 @@
-* Rockchip RK3228 Clock and Reset Unit
-
-The RK3228 clock controller generates and supplies clock to various
-controllers within the SoC and also implements a reset controller for SoC
-peripherals.
-
-Required Properties:
-
-- compatible: should be "rockchip,rk3228-cru"
-- reg: physical base address of the controller and length of memory mapped
- region.
-- #clock-cells: should be 1.
-- #reset-cells: should be 1.
-
-Optional Properties:
-
-- rockchip,grf: phandle to the syscon managing the "general register files"
- If missing pll rates are not changeable, due to the missing pll lock status.
-
-Each clock is assigned an identifier and client nodes can use this identifier
-to specify the clock which they consume. All available clocks are defined as
-preprocessor macros in the dt-bindings/clock/rk3228-cru.h headers and can be
-used in device tree sources. Similar macros exist for the reset sources in
-these files.
-
-External clocks:
-
-There are several clocks that are generated outside the SoC. It is expected
-that they are defined using standard clock bindings with following
-clock-output-names:
- - "xin24m" - crystal input - required,
- - "ext_i2s" - external I2S clock - optional,
- - "ext_gmac" - external GMAC clock - optional
- - "ext_hsadc" - external HSADC clock - optional
- - "phy_50m_out" - output clock of the pll in the mac phy
-
-Example: Clock controller node:
-
- cru: cru@20000000 {
- compatible = "rockchip,rk3228-cru";
- reg = <0x20000000 0x1000>;
- rockchip,grf = <&grf>;
-
- #clock-cells = <1>;
- #reset-cells = <1>;
- };
-
-Example: UART controller node that consumes the clock generated by the clock
- controller:
-
- uart0: serial@10110000 {
- compatible = "snps,dw-apb-uart";
- reg = <0x10110000 0x100>;
- interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
- reg-shift = <2>;
- reg-io-width = <4>;
- clocks = <&cru SCLK_UART0>;
- };
diff --git a/Documentation/devicetree/bindings/clock/rockchip,rk3228-cru.yaml b/Documentation/devicetree/bindings/clock/rockchip,rk3228-cru.yaml
new file mode 100644
index 000000000000..cf7dc01d9478
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/rockchip,rk3228-cru.yaml
@@ -0,0 +1,74 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/rockchip,rk3228-cru.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Rockchip RK3228 Clock and Reset Unit (CRU)
+
+maintainers:
+ - Elaine Zhang <zhangqing@rock-chips.com>
+ - Heiko Stuebner <heiko@sntech.de>
+
+description: |
+ The RK3228 clock controller generates and supplies clocks to various
+ controllers within the SoC and also implements a reset controller for SoC
+ peripherals.
+ Each clock is assigned an identifier and client nodes can use this identifier
+ to specify the clock which they consume. All available clocks are defined as
+ preprocessor macros in the dt-bindings/clock/rk3228-cru.h headers and can be
+ used in device tree sources. Similar macros exist for the reset sources in
+ these files.
+ There are several clocks that are generated outside the SoC. It is expected
+ that they are defined using standard clock bindings with following
+ clock-output-names:
+ - "xin24m" - crystal input - required
+ - "ext_i2s" - external I2S clock - optional
+ - "ext_gmac" - external GMAC clock - optional
+ - "ext_hsadc" - external HSADC clock - optional
+ - "phy_50m_out" - output clock of the pll in the mac phy
+
+properties:
+ compatible:
+ enum:
+ - rockchip,rk3228-cru
+
+ reg:
+ maxItems: 1
+
+ "#clock-cells":
+ const: 1
+
+ "#reset-cells":
+ const: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ const: xin24m
+
+ rockchip,grf:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ Phandle to the syscon managing the "general register files" (GRF),
+ if missing pll rates are not changeable, due to the missing pll
+ lock status.
+
+required:
+ - compatible
+ - reg
+ - "#clock-cells"
+ - "#reset-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ cru: clock-controller@20000000 {
+ compatible = "rockchip,rk3228-cru";
+ reg = <0x20000000 0x1000>;
+ rockchip,grf = <&grf>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/rockchip,rk3288-cru.txt b/Documentation/devicetree/bindings/clock/rockchip,rk3288-cru.txt
deleted file mode 100644
index bf3a9ec19241..000000000000
--- a/Documentation/devicetree/bindings/clock/rockchip,rk3288-cru.txt
+++ /dev/null
@@ -1,67 +0,0 @@
-* Rockchip RK3288 Clock and Reset Unit
-
-The RK3288 clock controller generates and supplies clock to various
-controllers within the SoC and also implements a reset controller for SoC
-peripherals.
-
-A revision of this SoC is available: rk3288w. The clock tree is a bit
-different so another dt-compatible is available. Noticed that it is only
-setting the difference but there is no automatic revision detection. This
-should be performed by bootloaders.
-
-Required Properties:
-
-- compatible: should be "rockchip,rk3288-cru" or "rockchip,rk3288w-cru" in
- case of this revision of Rockchip rk3288.
-- reg: physical base address of the controller and length of memory mapped
- region.
-- #clock-cells: should be 1.
-- #reset-cells: should be 1.
-
-Optional Properties:
-
-- rockchip,grf: phandle to the syscon managing the "general register files"
- If missing pll rates are not changeable, due to the missing pll lock status.
-
-Each clock is assigned an identifier and client nodes can use this identifier
-to specify the clock which they consume. All available clocks are defined as
-preprocessor macros in the dt-bindings/clock/rk3288-cru.h headers and can be
-used in device tree sources. Similar macros exist for the reset sources in
-these files.
-
-External clocks:
-
-There are several clocks that are generated outside the SoC. It is expected
-that they are defined using standard clock bindings with following
-clock-output-names:
- - "xin24m" - crystal input - required,
- - "xin32k" - rtc clock - optional,
- - "ext_i2s" - external I2S clock - optional,
- - "ext_hsadc" - external HSADC clock - optional,
- - "ext_edp_24m" - external display port clock - optional,
- - "ext_vip" - external VIP clock - optional,
- - "ext_isp" - external ISP clock - optional,
- - "ext_jtag" - external JTAG clock - optional
-
-Example: Clock controller node:
-
- cru: cru@20000000 {
- compatible = "rockchip,rk3188-cru";
- reg = <0x20000000 0x1000>;
- rockchip,grf = <&grf>;
-
- #clock-cells = <1>;
- #reset-cells = <1>;
- };
-
-Example: UART controller node that consumes the clock generated by the clock
- controller:
-
- uart0: serial@10124000 {
- compatible = "snps,dw-apb-uart";
- reg = <0x10124000 0x400>;
- interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
- reg-shift = <2>;
- reg-io-width = <1>;
- clocks = <&cru SCLK_UART0>;
- };
diff --git a/Documentation/devicetree/bindings/clock/rockchip,rk3288-cru.yaml b/Documentation/devicetree/bindings/clock/rockchip,rk3288-cru.yaml
new file mode 100644
index 000000000000..96bc05749e1a
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/rockchip,rk3288-cru.yaml
@@ -0,0 +1,85 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/rockchip,rk3288-cru.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Rockchip RK3288 Clock and Reset Unit (CRU)
+
+maintainers:
+ - Elaine Zhang <zhangqing@rock-chips.com>
+ - Heiko Stuebner <heiko@sntech.de>
+
+description: |
+ The RK3288 clock controller generates and supplies clocks to various
+ controllers within the SoC and also implements a reset controller for SoC
+ peripherals.
+
+ A revision of this SoC is available: rk3288w. The clock tree is a bit
+ different so another dt-compatible is available. Noticed that it is only
+ setting the difference but there is no automatic revision detection. This
+ should be performed by boot loaders.
+
+ Each clock is assigned an identifier and client nodes can use this identifier
+ to specify the clock which they consume. All available clocks are defined as
+ preprocessor macros in the dt-bindings/clock/rk3288-cru.h headers and can be
+ used in device tree sources. Similar macros exist for the reset sources in
+ these files.
+
+ There are several clocks that are generated outside the SoC. It is expected
+ that they are defined using standard clock bindings with following
+ clock-output-names:
+ - "xin24m" - crystal input - required,
+ - "xin32k" - rtc clock - optional,
+ - "ext_i2s" - external I2S clock - optional,
+ - "ext_hsadc" - external HSADC clock - optional,
+ - "ext_edp_24m" - external display port clock - optional,
+ - "ext_vip" - external VIP clock - optional,
+ - "ext_isp" - external ISP clock - optional,
+ - "ext_jtag" - external JTAG clock - optional
+
+properties:
+ compatible:
+ enum:
+ - rockchip,rk3288-cru
+ - rockchip,rk3288w-cru
+
+ reg:
+ maxItems: 1
+
+ "#clock-cells":
+ const: 1
+
+ "#reset-cells":
+ const: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ const: xin24m
+
+ rockchip,grf:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ Phandle to the syscon managing the "general register files" (GRF),
+ if missing pll rates are not changeable, due to the missing pll
+ lock status.
+
+required:
+ - compatible
+ - reg
+ - "#clock-cells"
+ - "#reset-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ cru: clock-controller@ff760000 {
+ compatible = "rockchip,rk3288-cru";
+ reg = <0xff760000 0x1000>;
+ rockchip,grf = <&grf>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/rockchip,rk3308-cru.txt b/Documentation/devicetree/bindings/clock/rockchip,rk3308-cru.txt
deleted file mode 100644
index 9b151c5b0c90..000000000000
--- a/Documentation/devicetree/bindings/clock/rockchip,rk3308-cru.txt
+++ /dev/null
@@ -1,60 +0,0 @@
-* Rockchip RK3308 Clock and Reset Unit
-
-The RK3308 clock controller generates and supplies clock to various
-controllers within the SoC and also implements a reset controller for SoC
-peripherals.
-
-Required Properties:
-
-- compatible: CRU should be "rockchip,rk3308-cru"
-- reg: physical base address of the controller and length of memory mapped
- region.
-- #clock-cells: should be 1.
-- #reset-cells: should be 1.
-
-Optional Properties:
-
-- rockchip,grf: phandle to the syscon managing the "general register files"
- If missing, pll rates are not changeable, due to the missing pll lock status.
-
-Each clock is assigned an identifier and client nodes can use this identifier
-to specify the clock which they consume. All available clocks are defined as
-preprocessor macros in the dt-bindings/clock/rk3308-cru.h headers and can be
-used in device tree sources. Similar macros exist for the reset sources in
-these files.
-
-External clocks:
-
-There are several clocks that are generated outside the SoC. It is expected
-that they are defined using standard clock bindings with following
-clock-output-names:
- - "xin24m" - crystal input - required,
- - "xin32k" - rtc clock - optional,
- - "mclk_i2s0_8ch_in", "mclk_i2s1_8ch_in", "mclk_i2s2_8ch_in",
- "mclk_i2s3_8ch_in", "mclk_i2s0_2ch_in",
- "mclk_i2s1_2ch_in" - external I2S or SPDIF clock - optional,
- - "mac_clkin" - external MAC clock - optional
-
-Example: Clock controller node:
-
- cru: clock-controller@ff500000 {
- compatible = "rockchip,rk3308-cru";
- reg = <0x0 0xff500000 0x0 0x1000>;
- rockchip,grf = <&grf>;
- #clock-cells = <1>;
- #reset-cells = <1>;
- };
-
-Example: UART controller node that consumes the clock generated by the clock
- controller:
-
- uart0: serial@ff0a0000 {
- compatible = "rockchip,rk3308-uart", "snps,dw-apb-uart";
- reg = <0x0 0xff0a0000 0x0 0x100>;
- interrupts = <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cru SCLK_UART0>, <&cru PCLK_UART0>;
- clock-names = "baudclk", "apb_pclk";
- reg-shift = <2>;
- reg-io-width = <4>;
- status = "disabled";
- };
diff --git a/Documentation/devicetree/bindings/clock/rockchip,rk3308-cru.yaml b/Documentation/devicetree/bindings/clock/rockchip,rk3308-cru.yaml
new file mode 100644
index 000000000000..523ee578a586
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/rockchip,rk3308-cru.yaml
@@ -0,0 +1,76 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/rockchip,rk3308-cru.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Rockchip RK3308 Clock and Reset Unit (CRU)
+
+maintainers:
+ - Elaine Zhang <zhangqing@rock-chips.com>
+ - Heiko Stuebner <heiko@sntech.de>
+
+description: |
+ The RK3308 clock controller generates and supplies clocks to various
+ controllers within the SoC and also implements a reset controller for SoC
+ peripherals.
+ Each clock is assigned an identifier and client nodes can use this identifier
+ to specify the clock which they consume. All available clocks are defined as
+ preprocessor macros in the dt-bindings/clock/rk3308-cru.h headers and can be
+ used in device tree sources. Similar macros exist for the reset sources in
+ these files.
+ There are several clocks that are generated outside the SoC. It is expected
+ that they are defined using standard clock bindings with following
+ clock-output-names:
+ - "xin24m" - crystal input - required
+ - "xin32k" - rtc clock - optional
+ - "mclk_i2s0_8ch_in", "mclk_i2s1_8ch_in",
+ "mclk_i2s2_8ch_in", "mclk_i2s3_8ch_in",
+ "mclk_i2s0_2ch_in", "mclk_i2s1_2ch_in" - external I2S or
+ SPDIF clock - optional
+ - "mac_clkin" - external MAC clock - optional
+
+properties:
+ compatible:
+ enum:
+ - rockchip,rk3308-cru
+
+ reg:
+ maxItems: 1
+
+ "#clock-cells":
+ const: 1
+
+ "#reset-cells":
+ const: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ const: xin24m
+
+ rockchip,grf:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ Phandle to the syscon managing the "general register files" (GRF),
+ if missing pll rates are not changeable, due to the missing pll
+ lock status.
+
+required:
+ - compatible
+ - reg
+ - "#clock-cells"
+ - "#reset-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ cru: clock-controller@ff500000 {
+ compatible = "rockchip,rk3308-cru";
+ reg = <0xff500000 0x1000>;
+ rockchip,grf = <&grf>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/rockchip,rk3368-cru.txt b/Documentation/devicetree/bindings/clock/rockchip,rk3368-cru.txt
deleted file mode 100644
index 7c8bbcfed8d2..000000000000
--- a/Documentation/devicetree/bindings/clock/rockchip,rk3368-cru.txt
+++ /dev/null
@@ -1,61 +0,0 @@
-* Rockchip RK3368 Clock and Reset Unit
-
-The RK3368 clock controller generates and supplies clock to various
-controllers within the SoC and also implements a reset controller for SoC
-peripherals.
-
-Required Properties:
-
-- compatible: should be "rockchip,rk3368-cru"
-- reg: physical base address of the controller and length of memory mapped
- region.
-- #clock-cells: should be 1.
-- #reset-cells: should be 1.
-
-Optional Properties:
-
-- rockchip,grf: phandle to the syscon managing the "general register files"
- If missing, pll rates are not changeable, due to the missing pll lock status.
-
-Each clock is assigned an identifier and client nodes can use this identifier
-to specify the clock which they consume. All available clocks are defined as
-preprocessor macros in the dt-bindings/clock/rk3368-cru.h headers and can be
-used in device tree sources. Similar macros exist for the reset sources in
-these files.
-
-External clocks:
-
-There are several clocks that are generated outside the SoC. It is expected
-that they are defined using standard clock bindings with following
-clock-output-names:
- - "xin24m" - crystal input - required,
- - "xin32k" - rtc clock - optional,
- - "ext_i2s" - external I2S clock - optional,
- - "ext_gmac" - external GMAC clock - optional
- - "ext_hsadc" - external HSADC clock - optional,
- - "ext_isp" - external ISP clock - optional,
- - "ext_jtag" - external JTAG clock - optional
- - "ext_vip" - external VIP clock - optional,
- - "usbotg_out" - output clock of the pll in the otg phy
-
-Example: Clock controller node:
-
- cru: clock-controller@ff760000 {
- compatible = "rockchip,rk3368-cru";
- reg = <0x0 0xff760000 0x0 0x1000>;
- rockchip,grf = <&grf>;
- #clock-cells = <1>;
- #reset-cells = <1>;
- };
-
-Example: UART controller node that consumes the clock generated by the clock
- controller:
-
- uart0: serial@10124000 {
- compatible = "snps,dw-apb-uart";
- reg = <0x10124000 0x400>;
- interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
- reg-shift = <2>;
- reg-io-width = <1>;
- clocks = <&cru SCLK_UART0>;
- };
diff --git a/Documentation/devicetree/bindings/clock/rockchip,rk3368-cru.yaml b/Documentation/devicetree/bindings/clock/rockchip,rk3368-cru.yaml
new file mode 100644
index 000000000000..adb67877720d
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/rockchip,rk3368-cru.yaml
@@ -0,0 +1,78 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/rockchip,rk3368-cru.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Rockchip RK3368 Clock and Reset Unit (CRU)
+
+maintainers:
+ - Elaine Zhang <zhangqing@rock-chips.com>
+ - Heiko Stuebner <heiko@sntech.de>
+
+description: |
+ The RK3368 clock controller generates and supplies clocks to various
+ controllers within the SoC and also implements a reset controller for SoC
+ peripherals.
+ Each clock is assigned an identifier and client nodes can use this identifier
+ to specify the clock which they consume. All available clocks are defined as
+ preprocessor macros in the dt-bindings/clock/rk3368-cru.h headers and can be
+ used in device tree sources. Similar macros exist for the reset sources in
+ these files.
+ There are several clocks that are generated outside the SoC. It is expected
+ that they are defined using standard clock bindings with following
+ clock-output-names:
+ - "xin24m" - crystal input - required
+ - "xin32k" - rtc clock - optional
+ - "ext_i2s" - external I2S clock - optional
+ - "ext_gmac" - external GMAC clock - optional
+ - "ext_hsadc" - external HSADC clock - optional
+ - "ext_isp" - external ISP clock - optional
+ - "ext_jtag" - external JTAG clock - optional
+ - "ext_vip" - external VIP clock - optional
+ - "usbotg_out" - output clock of the pll in the otg phy
+
+properties:
+ compatible:
+ enum:
+ - rockchip,rk3368-cru
+
+ reg:
+ maxItems: 1
+
+ "#clock-cells":
+ const: 1
+
+ "#reset-cells":
+ const: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ const: xin24m
+
+ rockchip,grf:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ Phandle to the syscon managing the "general register files" (GRF),
+ if missing pll rates are not changeable, due to the missing pll
+ lock status.
+
+required:
+ - compatible
+ - reg
+ - "#clock-cells"
+ - "#reset-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ cru: clock-controller@ff760000 {
+ compatible = "rockchip,rk3368-cru";
+ reg = <0xff760000 0x1000>;
+ rockchip,grf = <&grf>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/rockchip,rk3399-cru.yaml b/Documentation/devicetree/bindings/clock/rockchip,rk3399-cru.yaml
index 72b286a1beba..54da1e31ea73 100644
--- a/Documentation/devicetree/bindings/clock/rockchip,rk3399-cru.yaml
+++ b/Documentation/devicetree/bindings/clock/rockchip,rk3399-cru.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Rockchip RK3399 Clock and Reset Unit
maintainers:
- - Xing Zheng <zhengxing@rock-chips.com>
+ - Elaine Zhang <zhangqing@rock-chips.com>
- Heiko Stuebner <heiko@sntech.de>
description: |
@@ -22,11 +22,11 @@ description: |
There are several clocks that are generated outside the SoC. It is expected
that they are defined using standard clock bindings with following
clock-output-names:
- - "xin24m" - crystal input - required,
- - "xin32k" - rtc clock - optional,
- - "clkin_gmac" - external GMAC clock - optional,
- - "clkin_i2s" - external I2S clock - optional,
- - "pclkin_cif" - external ISP clock - optional,
+ - "xin24m" - crystal input - required,
+ - "xin32k" - rtc clock - optional,
+ - "clkin_gmac" - external GMAC clock - optional,
+ - "clkin_i2s" - external I2S clock - optional,
+ - "pclkin_cif" - external ISP clock - optional,
- "clk_usbphy0_480m" - output clock of the pll in the usbphy0
- "clk_usbphy1_480m" - output clock of the pll in the usbphy1
@@ -46,24 +46,15 @@ properties:
const: 1
clocks:
- minItems: 1
-
- assigned-clocks:
- minItems: 1
- maxItems: 64
-
- assigned-clock-parents:
- minItems: 1
- maxItems: 64
+ maxItems: 1
- assigned-clock-rates:
- minItems: 1
- maxItems: 64
+ clock-names:
+ const: xin24m
rockchip,grf:
$ref: /schemas/types.yaml#/definitions/phandle
- description: >
- phandle to the syscon managing the "general register files". It is used
+ description:
+ Phandle to the syscon managing the "general register files". It is used
for GRF muxes, if missing any muxes present in the GRF will not be
available.
@@ -77,7 +68,7 @@ additionalProperties: false
examples:
- |
- pmucru: pmu-clock-controller@ff750000 {
+ pmucru: clock-controller@ff750000 {
compatible = "rockchip,rk3399-pmucru";
reg = <0xff750000 0x1000>;
#clock-cells = <1>;
diff --git a/Documentation/devicetree/bindings/clock/rockchip,rk3568-cru.yaml b/Documentation/devicetree/bindings/clock/rockchip,rk3568-cru.yaml
index b2c26097827f..fc7546f521c5 100644
--- a/Documentation/devicetree/bindings/clock/rockchip,rk3568-cru.yaml
+++ b/Documentation/devicetree/bindings/clock/rockchip,rk3568-cru.yaml
@@ -34,6 +34,19 @@ properties:
"#reset-cells":
const: 1
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ const: xin24m
+
+ rockchip,grf:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ Phandle to the syscon managing the "general register files" (GRF),
+ if missing pll rates are not changeable, due to the missing pll
+ lock status.
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/clock/rockchip,rv1108-cru.txt b/Documentation/devicetree/bindings/clock/rockchip,rv1108-cru.txt
deleted file mode 100644
index 161326a4f9c1..000000000000
--- a/Documentation/devicetree/bindings/clock/rockchip,rv1108-cru.txt
+++ /dev/null
@@ -1,59 +0,0 @@
-* Rockchip RV1108 Clock and Reset Unit
-
-The RV1108 clock controller generates and supplies clock to various
-controllers within the SoC and also implements a reset controller for SoC
-peripherals.
-
-Required Properties:
-
-- compatible: should be "rockchip,rv1108-cru"
-- reg: physical base address of the controller and length of memory mapped
- region.
-- #clock-cells: should be 1.
-- #reset-cells: should be 1.
-
-Optional Properties:
-
-- rockchip,grf: phandle to the syscon managing the "general register files"
- If missing pll rates are not changeable, due to the missing pll lock status.
-
-Each clock is assigned an identifier and client nodes can use this identifier
-to specify the clock which they consume. All available clocks are defined as
-preprocessor macros in the dt-bindings/clock/rv1108-cru.h headers and can be
-used in device tree sources. Similar macros exist for the reset sources in
-these files.
-
-External clocks:
-
-There are several clocks that are generated outside the SoC. It is expected
-that they are defined using standard clock bindings with following
-clock-output-names:
- - "xin24m" - crystal input - required,
- - "ext_vip" - external VIP clock - optional
- - "ext_i2s" - external I2S clock - optional
- - "ext_gmac" - external GMAC clock - optional
- - "hdmiphy" - external clock input derived from HDMI PHY - optional
- - "usbphy" - external clock input derived from USB PHY - optional
-
-Example: Clock controller node:
-
- cru: cru@20200000 {
- compatible = "rockchip,rv1108-cru";
- reg = <0x20200000 0x1000>;
- rockchip,grf = <&grf>;
-
- #clock-cells = <1>;
- #reset-cells = <1>;
- };
-
-Example: UART controller node that consumes the clock generated by the clock
- controller:
-
- uart0: serial@10230000 {
- compatible = "rockchip,rv1108-uart", "snps,dw-apb-uart";
- reg = <0x10230000 0x100>;
- interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>;
- reg-shift = <2>;
- reg-io-width = <4>;
- clocks = <&cru SCLK_UART0>;
- };
diff --git a/Documentation/devicetree/bindings/clock/rockchip,rv1108-cru.yaml b/Documentation/devicetree/bindings/clock/rockchip,rv1108-cru.yaml
new file mode 100644
index 000000000000..20421c22f184
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/rockchip,rv1108-cru.yaml
@@ -0,0 +1,75 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/rockchip,rv1108-cru.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Rockchip RV1108 Clock and Reset Unit (CRU)
+
+maintainers:
+ - Elaine Zhang <zhangqing@rock-chips.com>
+ - Heiko Stuebner <heiko@sntech.de>
+
+description: |
+ The RV1108 clock controller generates and supplies clocks to various
+ controllers within the SoC and also implements a reset controller for SoC
+ peripherals.
+ Each clock is assigned an identifier and client nodes can use this identifier
+ to specify the clock which they consume. All available clocks are defined as
+ preprocessor macros in the dt-bindings/clock/rv1108-cru.h headers and can be
+ used in device tree sources. Similar macros exist for the reset sources in
+ these files.
+ There are several clocks that are generated outside the SoC. It is expected
+ that they are defined using standard clock bindings with following
+ clock-output-names:
+ - "xin24m" - crystal input - required
+ - "ext_vip" - external VIP clock - optional
+ - "ext_i2s" - external I2S clock - optional
+ - "ext_gmac" - external GMAC clock - optional
+ - "hdmiphy" - external clock input derived from HDMI PHY - optional
+ - "usbphy" - external clock input derived from USB PHY - optional
+
+properties:
+ compatible:
+ enum:
+ - rockchip,rv1108-cru
+
+ reg:
+ maxItems: 1
+
+ "#clock-cells":
+ const: 1
+
+ "#reset-cells":
+ const: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ const: xin24m
+
+ rockchip,grf:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ Phandle to the syscon managing the "general register files" (GRF),
+ if missing pll rates are not changeable, due to the missing pll
+ lock status.
+
+required:
+ - compatible
+ - reg
+ - "#clock-cells"
+ - "#reset-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ cru: clock-controller@20200000 {
+ compatible = "rockchip,rv1108-cru";
+ reg = <0x20200000 0x1000>;
+ rockchip,grf = <&grf>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/st,stm32mp1-rcc.yaml b/Documentation/devicetree/bindings/clock/st,stm32mp1-rcc.yaml
index 45b94124366c..f8c474227807 100644
--- a/Documentation/devicetree/bindings/clock/st,stm32mp1-rcc.yaml
+++ b/Documentation/devicetree/bindings/clock/st,stm32mp1-rcc.yaml
@@ -41,6 +41,7 @@ description: |
The list of valid indices for STM32MP1 is available in:
include/dt-bindings/reset-controller/stm32mp1-resets.h
+ include/dt-bindings/reset-controller/stm32mp13-resets.h
This file implements defines like:
#define LTDC_R 3072
@@ -57,6 +58,7 @@ properties:
- enum:
- st,stm32mp1-rcc-secure
- st,stm32mp1-rcc
+ - st,stm32mp13-rcc
- const: syscon
clocks: true
clock-names: true
diff --git a/Documentation/devicetree/bindings/clock/stericsson,u8500-clks.yaml b/Documentation/devicetree/bindings/clock/stericsson,u8500-clks.yaml
index 9bc95a308477..2150307219a0 100644
--- a/Documentation/devicetree/bindings/clock/stericsson,u8500-clks.yaml
+++ b/Documentation/devicetree/bindings/clock/stericsson,u8500-clks.yaml
@@ -109,6 +109,25 @@ properties:
additionalProperties: false
+ clkout-clock:
+ description: A subnode with three clock cells for externally routed clocks,
+ output clocks. These are two PRCMU-internal clocks that can be divided and
+ muxed out on the pads of the DB8500 SoC.
+ type: object
+
+ properties:
+ '#clock-cells':
+ description:
+ The first cell indicates which output clock we are using,
+ possible values are 0 (CLKOUT1) and 1 (CLKOUT2).
+ The second cell indicates which clock we want to use as source,
+ possible values are 0 thru 7, see the defines for the different
+ source clocks.
+ The third cell is a divider, legal values are 1 thru 63.
+ const: 3
+
+ additionalProperties: false
+
required:
- compatible
- reg
@@ -119,3 +138,41 @@ required:
- smp-twd-clock
additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/ste-db8500-clkout.h>
+ clocks@8012 {
+ compatible = "stericsson,u8500-clks";
+ reg = <0x8012f000 0x1000>, <0x8011f000 0x1000>,
+ <0x8000f000 0x1000>, <0xa03ff000 0x1000>,
+ <0xa03cf000 0x1000>;
+
+ prcmu_clk: prcmu-clock {
+ #clock-cells = <1>;
+ };
+
+ prcc_pclk: prcc-periph-clock {
+ #clock-cells = <2>;
+ };
+
+ prcc_kclk: prcc-kernel-clock {
+ #clock-cells = <2>;
+ };
+
+ prcc_reset: prcc-reset-controller {
+ #reset-cells = <2>;
+ };
+
+ rtc_clk: rtc32k-clock {
+ #clock-cells = <0>;
+ };
+
+ smp_twd_clk: smp-twd-clock {
+ #clock-cells = <0>;
+ };
+
+ clkout_clk: clkout-clock {
+ #clock-cells = <3>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/clock/ti,am654-ehrpwm-tbclk.yaml b/Documentation/devicetree/bindings/clock/ti,am654-ehrpwm-tbclk.yaml
index 9b537bc876b5..66765116aff5 100644
--- a/Documentation/devicetree/bindings/clock/ti,am654-ehrpwm-tbclk.yaml
+++ b/Documentation/devicetree/bindings/clock/ti,am654-ehrpwm-tbclk.yaml
@@ -15,6 +15,7 @@ properties:
- enum:
- ti,am654-ehrpwm-tbclk
- ti,am64-epwm-tbclk
+ - ti,am62-epwm-tbclk
- const: syscon
"#clock-cells":
diff --git a/Documentation/devicetree/bindings/cpufreq/cpufreq-mediatek.txt b/Documentation/devicetree/bindings/cpufreq/cpufreq-mediatek.txt
index b8233ec91d3d..e0a4ba599abc 100644
--- a/Documentation/devicetree/bindings/cpufreq/cpufreq-mediatek.txt
+++ b/Documentation/devicetree/bindings/cpufreq/cpufreq-mediatek.txt
@@ -20,6 +20,13 @@ Optional properties:
Vsram to fit SoC specific needs. When absent, the voltage scaling
flow is handled by hardware, hence no software "voltage tracking" is
needed.
+- mediatek,cci:
+ Used to confirm the link status between cpufreq and mediatek cci. Because
+ cpufreq and mediatek cci could share the same regulator in some MediaTek SoCs.
+ To prevent the issue of high frequency and low voltage, we need to use this
+ property to make sure mediatek cci is ready.
+ For details of mediatek cci, please refer to
+ Documentation/devicetree/bindings/interconnect/mediatek,cci.yaml
- #cooling-cells:
For details, please refer to
Documentation/devicetree/bindings/thermal/thermal-cooling-devices.yaml
diff --git a/Documentation/devicetree/bindings/crypto/ti,sa2ul.yaml b/Documentation/devicetree/bindings/crypto/ti,sa2ul.yaml
index a410d2cedde6..02f47c2e7998 100644
--- a/Documentation/devicetree/bindings/crypto/ti,sa2ul.yaml
+++ b/Documentation/devicetree/bindings/crypto/ti,sa2ul.yaml
@@ -15,6 +15,7 @@ properties:
- ti,j721e-sa2ul
- ti,am654-sa2ul
- ti,am64-sa2ul
+ - ti,am62-sa3ul
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/dma/allwinner,sun50i-a64-dma.yaml b/Documentation/devicetree/bindings/dma/allwinner,sun50i-a64-dma.yaml
index b6e1ebfaf366..ff0a5c58d78c 100644
--- a/Documentation/devicetree/bindings/dma/allwinner,sun50i-a64-dma.yaml
+++ b/Documentation/devicetree/bindings/dma/allwinner,sun50i-a64-dma.yaml
@@ -20,9 +20,11 @@ properties:
compatible:
oneOf:
- - const: allwinner,sun50i-a64-dma
- - const: allwinner,sun50i-a100-dma
- - const: allwinner,sun50i-h6-dma
+ - enum:
+ - allwinner,sun20i-d1-dma
+ - allwinner,sun50i-a64-dma
+ - allwinner,sun50i-a100-dma
+ - allwinner,sun50i-h6-dma
- items:
- const: allwinner,sun8i-r40-dma
- const: allwinner,sun50i-a64-dma
@@ -58,6 +60,7 @@ if:
properties:
compatible:
enum:
+ - allwinner,sun20i-d1-dma
- allwinner,sun50i-a100-dma
- allwinner,sun50i-h6-dma
diff --git a/Documentation/devicetree/bindings/dma/altr,msgdma.yaml b/Documentation/devicetree/bindings/dma/altr,msgdma.yaml
index b193ee2db4a7..b53ac7631a76 100644
--- a/Documentation/devicetree/bindings/dma/altr,msgdma.yaml
+++ b/Documentation/devicetree/bindings/dma/altr,msgdma.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Altera mSGDMA IP core
maintainers:
- - Olivier Dautricourt <olivier.dautricourt@orolia.com>
+ - Olivier Dautricourt <olivierdautricourt@gmail.com>
description: |
Altera / Intel modular Scatter-Gather Direct Memory Access (mSGDMA)
diff --git a/Documentation/devicetree/bindings/dma/arm,pl330.yaml b/Documentation/devicetree/bindings/dma/arm,pl330.yaml
index decab185cf4d..2bec69b308f8 100644
--- a/Documentation/devicetree/bindings/dma/arm,pl330.yaml
+++ b/Documentation/devicetree/bindings/dma/arm,pl330.yaml
@@ -55,6 +55,9 @@ properties:
dma-coherent: true
+ power-domains:
+ maxItems: 1
+
resets:
minItems: 1
maxItems: 2
diff --git a/Documentation/devicetree/bindings/dma/mmp-dma.txt b/Documentation/devicetree/bindings/dma/mmp-dma.txt
index 8f7364a7b349..ec18bf0a802a 100644
--- a/Documentation/devicetree/bindings/dma/mmp-dma.txt
+++ b/Documentation/devicetree/bindings/dma/mmp-dma.txt
@@ -10,10 +10,12 @@ Required properties:
or one irq for pdma device
Optional properties:
-- #dma-channels: Number of DMA channels supported by the controller (defaults
+- dma-channels: Number of DMA channels supported by the controller (defaults
to 32 when not specified)
-- #dma-requests: Number of DMA requestor lines supported by the controller
+- #dma-channels: deprecated
+- dma-requests: Number of DMA requestor lines supported by the controller
(defaults to 32 when not specified)
+- #dma-requests: deprecated
"marvell,pdma-1.0"
Used platforms: pxa25x, pxa27x, pxa3xx, pxa93x, pxa168, pxa910, pxa688.
@@ -33,7 +35,7 @@ pdma: dma-controller@d4000000 {
reg = <0xd4000000 0x10000>;
interrupts = <0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15>;
interrupt-parent = <&intcmux32>;
- #dma-channels = <16>;
+ dma-channels = <16>;
};
/*
@@ -45,7 +47,7 @@ pdma: dma-controller@d4000000 {
compatible = "marvell,pdma-1.0";
reg = <0xd4000000 0x10000>;
interrupts = <47>;
- #dma-channels = <16>;
+ dma-channels = <16>;
};
diff --git a/Documentation/devicetree/bindings/dma/nvidia,tegra186-gpc-dma.yaml b/Documentation/devicetree/bindings/dma/nvidia,tegra186-gpc-dma.yaml
new file mode 100644
index 000000000000..9dd1476d1849
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/nvidia,tegra186-gpc-dma.yaml
@@ -0,0 +1,110 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/dma/nvidia,tegra186-gpc-dma.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NVIDIA Tegra GPC DMA Controller Device Tree Bindings
+
+description: |
+ The Tegra General Purpose Central (GPC) DMA controller is used for faster
+ data transfers between memory to memory, memory to device and device to
+ memory.
+
+maintainers:
+ - Jon Hunter <jonathanh@nvidia.com>
+ - Rajesh Gumasta <rgumasta@nvidia.com>
+
+allOf:
+ - $ref: "dma-controller.yaml#"
+
+properties:
+ compatible:
+ oneOf:
+ - const: nvidia,tegra186-gpcdma
+ - items:
+ - const: nvidia,tegra194-gpcdma
+ - const: nvidia,tegra186-gpcdma
+
+ "#dma-cells":
+ const: 1
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ description:
+ Should contain all of the per-channel DMA interrupts in
+ ascending order with respect to the DMA channel index.
+ minItems: 1
+ maxItems: 31
+
+ resets:
+ maxItems: 1
+
+ reset-names:
+ const: gpcdma
+
+ iommus:
+ maxItems: 1
+
+ dma-coherent: true
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - resets
+ - reset-names
+ - "#dma-cells"
+ - iommus
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/memory/tegra186-mc.h>
+ #include <dt-bindings/reset/tegra186-reset.h>
+
+ dma-controller@2600000 {
+ compatible = "nvidia,tegra186-gpcdma";
+ reg = <0x2600000 0x210000>;
+ resets = <&bpmp TEGRA186_RESET_GPCDMA>;
+ reset-names = "gpcdma";
+ interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 79 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 88 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 90 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 93 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
+ #dma-cells = <1>;
+ iommus = <&smmu TEGRA186_SID_GPCDMA_0>;
+ dma-coherent;
+ };
+...
diff --git a/Documentation/devicetree/bindings/dma/qcom,gpi.yaml b/Documentation/devicetree/bindings/dma/qcom,gpi.yaml
index d09d79d7406a..7d2fc4eb5530 100644
--- a/Documentation/devicetree/bindings/dma/qcom,gpi.yaml
+++ b/Documentation/devicetree/bindings/dma/qcom,gpi.yaml
@@ -19,9 +19,12 @@ allOf:
properties:
compatible:
enum:
+ - qcom,sc7280-gpi-dma
- qcom,sdm845-gpi-dma
- qcom,sm8150-gpi-dma
- qcom,sm8250-gpi-dma
+ - qcom,sm8350-gpi-dma
+ - qcom,sm8450-gpi-dma
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.yaml b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.yaml
index 7c6badf39921..7202cd68e759 100644
--- a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.yaml
+++ b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.yaml
@@ -42,11 +42,10 @@ properties:
- const: renesas,rcar-dmac
- items:
- - const: renesas,dmac-r8a779a0 # R-Car V3U
-
- - items:
- - const: renesas,dmac-r8a779f0 # R-Car S4-8
- - const: renesas,rcar-gen4-dmac
+ - enum:
+ - renesas,dmac-r8a779a0 # R-Car V3U
+ - renesas,dmac-r8a779f0 # R-Car S4-8
+ - const: renesas,rcar-gen4-dmac # R-Car Gen4
reg: true
@@ -121,7 +120,6 @@ if:
compatible:
contains:
enum:
- - renesas,dmac-r8a779a0
- renesas,rcar-gen4-dmac
then:
properties:
diff --git a/Documentation/devicetree/bindings/dma/renesas,rzn1-dmamux.yaml b/Documentation/devicetree/bindings/dma/renesas,rzn1-dmamux.yaml
new file mode 100644
index 000000000000..d83013b0dd74
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/renesas,rzn1-dmamux.yaml
@@ -0,0 +1,51 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/dma/renesas,rzn1-dmamux.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Renesas RZ/N1 DMA mux
+
+maintainers:
+ - Miquel Raynal <miquel.raynal@bootlin.com>
+
+allOf:
+ - $ref: "dma-router.yaml#"
+
+properties:
+ compatible:
+ const: renesas,rzn1-dmamux
+
+ reg:
+ maxItems: 1
+ description: DMA mux first register offset within the system control parent.
+
+ '#dma-cells':
+ const: 6
+ description:
+ The first four cells are dedicated to the master DMA controller. The fifth
+ cell gives the DMA mux bit index that must be set starting from 0. The
+ sixth cell gives the binary value that must be written there, ie. 0 or 1.
+
+ dma-masters:
+ minItems: 1
+ maxItems: 2
+
+ dma-requests:
+ const: 32
+
+required:
+ - reg
+ - dma-requests
+
+additionalProperties: false
+
+examples:
+ - |
+ dma-router@a0 {
+ compatible = "renesas,rzn1-dmamux";
+ reg = <0xa0 4>;
+ #dma-cells = <6>;
+ dma-masters = <&dma0 &dma1>;
+ dma-requests = <32>;
+ };
diff --git a/Documentation/devicetree/bindings/dma/sifive,fu540-c000-pdma.yaml b/Documentation/devicetree/bindings/dma/sifive,fu540-c000-pdma.yaml
index 47c46af25536..3271755787b4 100644
--- a/Documentation/devicetree/bindings/dma/sifive,fu540-c000-pdma.yaml
+++ b/Documentation/devicetree/bindings/dma/sifive,fu540-c000-pdma.yaml
@@ -28,7 +28,15 @@ allOf:
properties:
compatible:
items:
- - const: sifive,fu540-c000-pdma
+ - enum:
+ - sifive,fu540-c000-pdma
+ - const: sifive,pdma0
+ description:
+ Should be "sifive,<chip>-pdma" and "sifive,pdma<version>".
+ Supported compatible strings are -
+ "sifive,fu540-c000-pdma" for the SiFive PDMA v0 as integrated onto the
+ SiFive FU540 chip resp and "sifive,pdma0" for the SiFive PDMA v0 IP block
+ with no chip integration tweaks.
reg:
maxItems: 1
@@ -37,6 +45,12 @@ properties:
minItems: 1
maxItems: 8
+ dma-channels:
+ description: For backwards-compatibility, the default value is 4
+ minimum: 1
+ maximum: 4
+ default: 4
+
'#dma-cells':
const: 1
@@ -50,8 +64,9 @@ unevaluatedProperties: false
examples:
- |
dma-controller@3000000 {
- compatible = "sifive,fu540-c000-pdma";
+ compatible = "sifive,fu540-c000-pdma", "sifive,pdma0";
reg = <0x3000000 0x8000>;
+ dma-channels = <4>;
interrupts = <23>, <24>, <25>, <26>, <27>, <28>, <29>, <30>;
#dma-cells = <1>;
};
diff --git a/Documentation/devicetree/bindings/dma/snps,dma-spear1340.yaml b/Documentation/devicetree/bindings/dma/snps,dma-spear1340.yaml
index 6b35089ac017..c13649bf7f19 100644
--- a/Documentation/devicetree/bindings/dma/snps,dma-spear1340.yaml
+++ b/Documentation/devicetree/bindings/dma/snps,dma-spear1340.yaml
@@ -15,7 +15,13 @@ allOf:
properties:
compatible:
- const: snps,dma-spear1340
+ oneOf:
+ - const: snps,dma-spear1340
+ - items:
+ - enum:
+ - renesas,r9a06g032-dma
+ - const: renesas,rzn1-dma
+
"#dma-cells":
minimum: 3
diff --git a/Documentation/devicetree/bindings/dma/sprd-dma.txt b/Documentation/devicetree/bindings/dma/sprd-dma.txt
index adccea9941f1..c7e9b5fd50e7 100644
--- a/Documentation/devicetree/bindings/dma/sprd-dma.txt
+++ b/Documentation/devicetree/bindings/dma/sprd-dma.txt
@@ -8,10 +8,13 @@ Required properties:
- interrupts: Should contain one interrupt shared by all channel.
- #dma-cells: must be <1>. Used to represent the number of integer
cells in the dmas property of client device.
-- #dma-channels : Number of DMA channels supported. Should be 32.
+- dma-channels : Number of DMA channels supported. Should be 32.
- clock-names: Should contain the clock of the DMA controller.
- clocks: Should contain a clock specifier for each entry in clock-names.
+Deprecated properties:
+- #dma-channels : Number of DMA channels supported. Should be 32.
+
Example:
Controller:
@@ -20,7 +23,7 @@ apdma: dma-controller@20100000 {
reg = <0x20100000 0x4000>;
interrupts = <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>;
#dma-cells = <1>;
- #dma-channels = <32>;
+ dma-channels = <32>;
clock-names = "enable";
clocks = <&clk_ap_ahb_gates 5>;
};
diff --git a/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt b/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt
index 325aca52cd43..d1700a5c36bf 100644
--- a/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt
+++ b/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt
@@ -110,7 +110,11 @@ axi_vdma_0: axivdma@40030000 {
Required properties:
- dmas: a list of <[Video DMA device phandle] [Channel ID]> pairs,
where Channel ID is '0' for write/tx and '1' for read/rx
- channel.
+ channel. For MCMDA, MM2S channel(write/tx) ID start from
+ '0' and is in [0-15] range. S2MM channel(read/rx) ID start
+ from '16' and is in [16-31] range. These channels ID are
+ fixed irrespective of IP configuration.
+
- dma-names: a list of DMA channel names, one per "dmas" entry
Example:
diff --git a/Documentation/devicetree/bindings/extcon/siliconmitus,sm5502-muic.yaml b/Documentation/devicetree/bindings/extcon/siliconmitus,sm5502-muic.yaml
index fd2e55088888..7a224b2f0977 100644
--- a/Documentation/devicetree/bindings/extcon/siliconmitus,sm5502-muic.yaml
+++ b/Documentation/devicetree/bindings/extcon/siliconmitus,sm5502-muic.yaml
@@ -20,11 +20,12 @@ properties:
enum:
- siliconmitus,sm5502-muic
- siliconmitus,sm5504-muic
+ - siliconmitus,sm5703-muic
reg:
maxItems: 1
- description: I2C slave address of the device. Usually 0x25 for SM5502,
- 0x14 for SM5504.
+ description: I2C slave address of the device. Usually 0x25 for SM5502
+ and SM5703, 0x14 for SM5504.
interrupts:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/gpio/gpio-altera.txt b/Documentation/devicetree/bindings/gpio/gpio-altera.txt
index 146e554b3c67..2a80e272cd66 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-altera.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-altera.txt
@@ -9,8 +9,9 @@ Required properties:
- The second cell is reserved and is currently unused.
- gpio-controller : Marks the device node as a GPIO controller.
- interrupt-controller: Mark the device node as an interrupt controller
-- #interrupt-cells : Should be 1. The interrupt type is fixed in the hardware.
+- #interrupt-cells : Should be 2. The interrupt type is fixed in the hardware.
- The first cell is the GPIO offset number within the GPIO controller.
+ - The second cell is the interrupt trigger type and level flags.
- interrupts: Specify the interrupt.
- altr,interrupt-type: Specifies the interrupt trigger type the GPIO
hardware is synthesized. This field is required if the Altera GPIO controller
@@ -38,6 +39,6 @@ gpio_altr: gpio@ff200000 {
altr,interrupt-type = <IRQ_TYPE_EDGE_RISING>;
#gpio-cells = <2>;
gpio-controller;
- #interrupt-cells = <1>;
+ #interrupt-cells = <2>;
interrupt-controller;
};
diff --git a/Documentation/devicetree/bindings/i2c/renesas,rcar-i2c.yaml b/Documentation/devicetree/bindings/i2c/renesas,rcar-i2c.yaml
index c30107833a51..f9929578c761 100644
--- a/Documentation/devicetree/bindings/i2c/renesas,rcar-i2c.yaml
+++ b/Documentation/devicetree/bindings/i2c/renesas,rcar-i2c.yaml
@@ -46,11 +46,11 @@ properties:
- renesas,i2c-r8a77980 # R-Car V3H
- renesas,i2c-r8a77990 # R-Car E3
- renesas,i2c-r8a77995 # R-Car D3
- - renesas,i2c-r8a779a0 # R-Car V3U
- const: renesas,rcar-gen3-i2c # R-Car Gen3 and RZ/G2
- items:
- enum:
+ - renesas,i2c-r8a779a0 # R-Car V3U
- renesas,i2c-r8a779f0 # R-Car S4-8
- const: renesas,rcar-gen4-i2c # R-Car Gen4
diff --git a/Documentation/devicetree/bindings/i3c/cdns,i3c-master.txt b/Documentation/devicetree/bindings/i3c/cdns,i3c-master.txt
deleted file mode 100644
index 3716589d6999..000000000000
--- a/Documentation/devicetree/bindings/i3c/cdns,i3c-master.txt
+++ /dev/null
@@ -1,43 +0,0 @@
-Bindings for cadence I3C master block
-=====================================
-
-Required properties:
---------------------
-- compatible: shall be "cdns,i3c-master"
-- clocks: shall reference the pclk and sysclk
-- clock-names: shall contain "pclk" and "sysclk"
-- interrupts: the interrupt line connected to this I3C master
-- reg: I3C master registers
-
-Mandatory properties defined by the generic binding (see
-Documentation/devicetree/bindings/i3c/i3c.yaml for more details):
-
-- #address-cells: shall be set to 1
-- #size-cells: shall be set to 0
-
-Optional properties defined by the generic binding (see
-Documentation/devicetree/bindings/i3c/i3c.yaml for more details):
-
-- i2c-scl-hz
-- i3c-scl-hz
-
-I3C device connected on the bus follow the generic description (see
-Documentation/devicetree/bindings/i3c/i3c.yaml for more details).
-
-Example:
-
- i3c-master@0d040000 {
- compatible = "cdns,i3c-master";
- clocks = <&coreclock>, <&i3csysclock>;
- clock-names = "pclk", "sysclk";
- interrupts = <3 0>;
- reg = <0x0d040000 0x1000>;
- #address-cells = <1>;
- #size-cells = <0>;
- i2c-scl-hz = <100000>;
-
- nunchuk: nunchuk@52 {
- compatible = "nintendo,nunchuk";
- reg = <0x52 0x0 0x10>;
- };
- };
diff --git a/Documentation/devicetree/bindings/i3c/cdns,i3c-master.yaml b/Documentation/devicetree/bindings/i3c/cdns,i3c-master.yaml
new file mode 100644
index 000000000000..cc40d25358ec
--- /dev/null
+++ b/Documentation/devicetree/bindings/i3c/cdns,i3c-master.yaml
@@ -0,0 +1,60 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/i3c/cdns,i3c-master.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Cadence I3C master block
+
+maintainers:
+ - Boris Brezillon <bbrezillon@kernel.org>
+
+allOf:
+ - $ref: i3c.yaml#
+
+properties:
+ compatible:
+ const: cdns,i3c-master
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 2
+
+ clock-names:
+ items:
+ - const: pclk
+ - const: sysclk
+
+ interrupts:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - interrupts
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ i3c-master@d040000 {
+ compatible = "cdns,i3c-master";
+ clocks = <&coreclock>, <&i3csysclock>;
+ clock-names = "pclk", "sysclk";
+ interrupts = <3 0>;
+ reg = <0x0d040000 0x1000>;
+ #address-cells = <3>;
+ #size-cells = <0>;
+ i2c-scl-hz = <100000>;
+
+ eeprom@57{
+ compatible = "atmel,24c01";
+ reg = <0x57 0x0 0x10>;
+ pagesize = <0x8>;
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/i3c/snps,dw-i3c-master.txt b/Documentation/devicetree/bindings/i3c/snps,dw-i3c-master.txt
deleted file mode 100644
index 07f35f36085d..000000000000
--- a/Documentation/devicetree/bindings/i3c/snps,dw-i3c-master.txt
+++ /dev/null
@@ -1,41 +0,0 @@
-Bindings for Synopsys DesignWare I3C master block
-=================================================
-
-Required properties:
---------------------
-- compatible: shall be "snps,dw-i3c-master-1.00a"
-- clocks: shall reference the core_clk
-- interrupts: the interrupt line connected to this I3C master
-- reg: Offset and length of I3C master registers
-
-Mandatory properties defined by the generic binding (see
-Documentation/devicetree/bindings/i3c/i3c.yaml for more details):
-
-- #address-cells: shall be set to 3
-- #size-cells: shall be set to 0
-
-Optional properties defined by the generic binding (see
-Documentation/devicetree/bindings/i3c/i3c.yaml for more details):
-
-- i2c-scl-hz
-- i3c-scl-hz
-
-I3C device connected on the bus follow the generic description (see
-Documentation/devicetree/bindings/i3c/i3c.yaml for more details).
-
-Example:
-
- i3c-master@2000 {
- compatible = "snps,dw-i3c-master-1.00a";
- #address-cells = <3>;
- #size-cells = <0>;
- reg = <0x02000 0x1000>;
- interrupts = <0>;
- clocks = <&i3cclk>;
-
- eeprom@57{
- compatible = "atmel,24c01";
- reg = <0x57 0x0 0x10>;
- pagesize = <0x8>;
- };
- };
diff --git a/Documentation/devicetree/bindings/i3c/snps,dw-i3c-master.yaml b/Documentation/devicetree/bindings/i3c/snps,dw-i3c-master.yaml
new file mode 100644
index 000000000000..7a76fd32962a
--- /dev/null
+++ b/Documentation/devicetree/bindings/i3c/snps,dw-i3c-master.yaml
@@ -0,0 +1,52 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/i3c/snps,dw-i3c-master.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Synopsys DesignWare I3C master block
+
+maintainers:
+ - Alexandre Belloni <alexandre.belloni@bootlin.com>
+
+allOf:
+ - $ref: i3c.yaml#
+
+properties:
+ compatible:
+ const: snps,dw-i3c-master-1.00a
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - interrupts
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ i3c-master@2000 {
+ compatible = "snps,dw-i3c-master-1.00a";
+ #address-cells = <3>;
+ #size-cells = <0>;
+ reg = <0x02000 0x1000>;
+ interrupts = <0>;
+ clocks = <&i3cclk>;
+
+ eeprom@57{
+ compatible = "atmel,24c01";
+ reg = <0x57 0x0 0x10>;
+ pagesize = <0x8>;
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/iio/adc/renesas,rzg2l-adc.yaml b/Documentation/devicetree/bindings/iio/adc/renesas,rzg2l-adc.yaml
index c80201d6a716..d66c24cae1e1 100644
--- a/Documentation/devicetree/bindings/iio/adc/renesas,rzg2l-adc.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/renesas,rzg2l-adc.yaml
@@ -19,7 +19,8 @@ properties:
compatible:
items:
- enum:
- - renesas,r9a07g044-adc # RZ/G2{L,LC}
+ - renesas,r9a07g044-adc # RZ/G2L
+ - renesas,r9a07g054-adc # RZ/V2L
- const: renesas,rzg2l-adc
reg:
diff --git a/Documentation/devicetree/bindings/iio/adc/sprd,sc2720-adc.yaml b/Documentation/devicetree/bindings/iio/adc/sprd,sc2720-adc.yaml
index caa3ee0b4b8c..44aa28b59197 100644
--- a/Documentation/devicetree/bindings/iio/adc/sprd,sc2720-adc.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/sprd,sc2720-adc.yaml
@@ -20,6 +20,7 @@ properties:
- sprd,sc2723-adc
- sprd,sc2730-adc
- sprd,sc2731-adc
+ - sprd,ump9620-adc
reg:
maxItems: 1
@@ -33,13 +34,39 @@ properties:
hwlocks:
maxItems: 1
- nvmem-cells:
- maxItems: 2
+ nvmem-cells: true
- nvmem-cell-names:
- items:
- - const: big_scale_calib
- - const: small_scale_calib
+ nvmem-cell-names: true
+
+allOf:
+ - if:
+ not:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - sprd,ump9620-adc
+ then:
+ properties:
+ nvmem-cells:
+ maxItems: 2
+ nvmem-cell-names:
+ items:
+ - const: big_scale_calib
+ - const: small_scale_calib
+
+ else:
+ properties:
+ nvmem-cells:
+ maxItems: 6
+ nvmem-cell-names:
+ items:
+ - const: big_scale_calib1
+ - const: big_scale_calib2
+ - const: small_scale_calib1
+ - const: small_scale_calib2
+ - const: vbat_det_cal1
+ - const: vbat_det_cal2
required:
- compatible
@@ -69,4 +96,25 @@ examples:
nvmem-cell-names = "big_scale_calib", "small_scale_calib";
};
};
+
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ pmic {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ adc@504 {
+ compatible = "sprd,ump9620-adc";
+ reg = <0x504>;
+ interrupt-parent = <&ump9620_pmic>;
+ interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
+ #io-channel-cells = <1>;
+ hwlocks = <&hwlock 4>;
+ nvmem-cells = <&adc_bcal1>, <&adc_bcal2>,
+ <&adc_scal1>, <&adc_scal2>,
+ <&vbat_det_cal1>, <&vbat_det_cal2>;
+ nvmem-cell-names = "big_scale_calib1", "big_scale_calib2",
+ "small_scale_calib1", "small_scale_calib2",
+ "vbat_det_cal1", "vbat_det_cal2";
+ };
+ };
...
diff --git a/Documentation/devicetree/bindings/iio/adc/ti,ads1015.yaml b/Documentation/devicetree/bindings/iio/adc/ti,ads1015.yaml
index 2c2d01bbc296..a3b79438a13a 100644
--- a/Documentation/devicetree/bindings/iio/adc/ti,ads1015.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/ti,ads1015.yaml
@@ -4,7 +4,7 @@
$id: http://devicetree.org/schemas/iio/adc/ti,ads1015.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
-title: TI ADS1015 4 channel I2C analog to digital converter
+title: TI ADS1015/ADS1115 4 channel I2C analog to digital converter
maintainers:
- Daniel Baluta <daniel.baluta@nxp.com>
@@ -15,7 +15,10 @@ description: |
properties:
compatible:
- const: ti,ads1015
+ enum:
+ - ti,ads1015
+ - ti,ads1115
+ - ti,tla2024
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/iio/dac/adi,ad3552r.yaml b/Documentation/devicetree/bindings/iio/dac/adi,ad3552r.yaml
index 501a463e5d88..9c48c76993fe 100644
--- a/Documentation/devicetree/bindings/iio/dac/adi,ad3552r.yaml
+++ b/Documentation/devicetree/bindings/iio/dac/adi,ad3552r.yaml
@@ -8,7 +8,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Analog Devices AD2552R DAC device driver
maintainers:
- - Mihail Chindris <mihail.chindris@analog.com>
+ - Nuno Sá <nuno.sa@analog.com>
description: |
Bindings for the Analog Devices AD3552R DAC device and similar.
diff --git a/Documentation/devicetree/bindings/iio/dac/lltc,ltc2632.yaml b/Documentation/devicetree/bindings/iio/dac/lltc,ltc2632.yaml
index edf804d0aca2..b1eb77335d05 100644
--- a/Documentation/devicetree/bindings/iio/dac/lltc,ltc2632.yaml
+++ b/Documentation/devicetree/bindings/iio/dac/lltc,ltc2632.yaml
@@ -68,7 +68,7 @@ examples:
#size-cells = <0>;
dac@0 {
- compatible = "lltc,ltc2632";
+ compatible = "lltc,ltc2632-l12";
reg = <0>; /* CS0 */
spi-max-frequency = <1000000>;
vref-supply = <&vref>;
diff --git a/Documentation/devicetree/bindings/iio/imu/invensense,mpu6050.yaml b/Documentation/devicetree/bindings/iio/imu/invensense,mpu6050.yaml
index d69595a524c1..3ebc6526d82d 100644
--- a/Documentation/devicetree/bindings/iio/imu/invensense,mpu6050.yaml
+++ b/Documentation/devicetree/bindings/iio/imu/invensense,mpu6050.yaml
@@ -14,21 +14,25 @@ description: |
properties:
compatible:
- enum:
- - invensense,iam20680
- - invensense,icm20608
- - invensense,icm20609
- - invensense,icm20689
- - invensense,icm20602
- - invensense,icm20690
- - invensense,mpu6000
- - invensense,mpu6050
- - invensense,mpu6500
- - invensense,mpu6515
- - invensense,mpu6880
- - invensense,mpu9150
- - invensense,mpu9250
- - invensense,mpu9255
+ oneOf:
+ - enum:
+ - invensense,iam20680
+ - invensense,icm20608
+ - invensense,icm20609
+ - invensense,icm20689
+ - invensense,icm20602
+ - invensense,icm20690
+ - invensense,mpu6000
+ - invensense,mpu6050
+ - invensense,mpu6500
+ - invensense,mpu6515
+ - invensense,mpu6880
+ - invensense,mpu9150
+ - invensense,mpu9250
+ - invensense,mpu9255
+ - items:
+ - const: invensense,icm20608d
+ - const: invensense,icm20608
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/iio/imu/st,lsm6dsx.yaml b/Documentation/devicetree/bindings/iio/imu/st,lsm6dsx.yaml
index 0750f700a143..5d4839f00898 100644
--- a/Documentation/devicetree/bindings/iio/imu/st,lsm6dsx.yaml
+++ b/Documentation/devicetree/bindings/iio/imu/st,lsm6dsx.yaml
@@ -14,23 +14,27 @@ description:
properties:
compatible:
- enum:
- - st,lsm6ds3
- - st,lsm6ds3h
- - st,lsm6dsl
- - st,lsm6dsm
- - st,ism330dlc
- - st,lsm6dso
- - st,asm330lhh
- - st,lsm6dsox
- - st,lsm6dsr
- - st,lsm6ds3tr-c
- - st,ism330dhcx
- - st,lsm9ds1-imu
- - st,lsm6ds0
- - st,lsm6dsrx
- - st,lsm6dst
- - st,lsm6dsop
+ oneOf:
+ - enum:
+ - st,lsm6ds3
+ - st,lsm6ds3h
+ - st,lsm6dsl
+ - st,lsm6dsm
+ - st,ism330dlc
+ - st,lsm6dso
+ - st,asm330lhh
+ - st,lsm6dsox
+ - st,lsm6dsr
+ - st,lsm6ds3tr-c
+ - st,ism330dhcx
+ - st,lsm9ds1-imu
+ - st,lsm6ds0
+ - st,lsm6dsrx
+ - st,lsm6dst
+ - st,lsm6dsop
+ - items:
+ - const: st,asm330lhhx
+ - const: st,lsm6dsr
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/iio/light/stk33xx.yaml b/Documentation/devicetree/bindings/iio/light/stk33xx.yaml
index f92bf7b2b7f0..f6e22dc9814a 100644
--- a/Documentation/devicetree/bindings/iio/light/stk33xx.yaml
+++ b/Documentation/devicetree/bindings/iio/light/stk33xx.yaml
@@ -13,6 +13,9 @@ maintainers:
description: |
Ambient light and proximity sensor over an i2c interface.
+allOf:
+ - $ref: ../common.yaml#
+
properties:
compatible:
enum:
@@ -26,6 +29,8 @@ properties:
interrupts:
maxItems: 1
+ proximity-near-level: true
+
required:
- compatible
- reg
@@ -44,6 +49,7 @@ examples:
stk3310@48 {
compatible = "sensortek,stk3310";
reg = <0x48>;
+ proximity-near-level = <25>;
interrupt-parent = <&gpio1>;
interrupts = <5 IRQ_TYPE_LEVEL_LOW>;
};
diff --git a/Documentation/devicetree/bindings/iio/potentiometer/microchip,mcp4131.yaml b/Documentation/devicetree/bindings/iio/potentiometer/microchip,mcp4131.yaml
index 945a2d644ddc..32e92bced81f 100644
--- a/Documentation/devicetree/bindings/iio/potentiometer/microchip,mcp4131.yaml
+++ b/Documentation/devicetree/bindings/iio/potentiometer/microchip,mcp4131.yaml
@@ -95,7 +95,7 @@ examples:
#size-cells = <0>;
potentiometer@0 {
- compatible = "mcp4131-502";
+ compatible = "microchip,mcp4131-502";
reg = <0>;
spi-max-frequency = <500000>;
};
diff --git a/Documentation/devicetree/bindings/iio/st,st-sensors.yaml b/Documentation/devicetree/bindings/iio/st,st-sensors.yaml
index 9735a2048255..fcb2902683c7 100644
--- a/Documentation/devicetree/bindings/iio/st,st-sensors.yaml
+++ b/Documentation/devicetree/bindings/iio/st,st-sensors.yaml
@@ -29,6 +29,7 @@ properties:
- st,lis2dw12
- st,lis2hh12
- st,lis2dh12-accel
+ - st,lis302dl
- st,lis331dl-accel
- st,lis331dlh-accel
- st,lis3de
diff --git a/Documentation/devicetree/bindings/input/allwinner,sun4i-a10-lradc-keys.yaml b/Documentation/devicetree/bindings/input/allwinner,sun4i-a10-lradc-keys.yaml
index d74f2002409e..3399fc288afb 100644
--- a/Documentation/devicetree/bindings/input/allwinner,sun4i-a10-lradc-keys.yaml
+++ b/Documentation/devicetree/bindings/input/allwinner,sun4i-a10-lradc-keys.yaml
@@ -18,10 +18,20 @@ properties:
- items:
- const: allwinner,sun50i-a64-lradc
- const: allwinner,sun8i-a83t-r-lradc
+ - const: allwinner,sun50i-r329-lradc
+ - items:
+ - const: allwinner,sun20i-d1-lradc
+ - const: allwinner,sun50i-r329-lradc
reg:
maxItems: 1
+ clocks:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
interrupts:
maxItems: 1
@@ -68,6 +78,18 @@ required:
- interrupts
- vref-supply
+if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - allwinner,sun50i-r329-lradc
+
+then:
+ required:
+ - clocks
+ - resets
+
additionalProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/input/azoteq,iqs7222.yaml b/Documentation/devicetree/bindings/input/azoteq,iqs7222.yaml
new file mode 100644
index 000000000000..a3a1e5a65306
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/azoteq,iqs7222.yaml
@@ -0,0 +1,960 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/input/azoteq,iqs7222.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Azoteq IQS7222A/B/C Capacitive Touch Controller
+
+maintainers:
+ - Jeff LaBundy <jeff@labundy.com>
+
+description: |
+ The Azoteq IQS7222A, IQS7222B and IQS7222C are multichannel capacitive touch
+ controllers that feature additional sensing capabilities.
+
+ Link to datasheets: https://www.azoteq.com/
+
+properties:
+ compatible:
+ enum:
+ - azoteq,iqs7222a
+ - azoteq,iqs7222b
+ - azoteq,iqs7222c
+
+ reg:
+ maxItems: 1
+
+ irq-gpios:
+ maxItems: 1
+ description:
+ Specifies the GPIO connected to the device's active-low RDY output.
+
+ reset-gpios:
+ maxItems: 1
+ description:
+ Specifies the GPIO connected to the device's active-low MCLR input. The
+ device is temporarily held in hardware reset prior to initialization if
+ this property is present.
+
+ azoteq,rf-filt-enable:
+ type: boolean
+ description: Enables the device's internal RF filter.
+
+ azoteq,max-counts:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 1, 2, 3]
+ description: |
+ Specifies the maximum number of conversion periods (counts) that can be
+ reported as follows:
+ 0: 1023
+ 1: 2047
+ 2: 4095
+ 3: 16384
+
+ azoteq,auto-mode:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 1, 2, 3]
+ description: |
+ Specifies the number of conversions to occur before an interrupt is
+ generated as follows:
+ 0: 4
+ 1: 8
+ 2: 16
+ 3: 32
+
+ azoteq,ati-frac-div-fine:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 31
+ description: Specifies the preloaded ATI fine fractional divider.
+
+ azoteq,ati-frac-div-coarse:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 31
+ description: Specifies the preloaded ATI coarse fractional divider.
+
+ azoteq,ati-comp-select:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 1023
+ description: Specifies the preloaded ATI compensation selection.
+
+ azoteq,lta-beta-lp:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 15
+ description:
+ Specifies the long-term average filter damping factor to be applied during
+ low-power mode.
+
+ azoteq,lta-beta-np:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 15
+ description:
+ Specifies the long-term average filter damping factor to be applied during
+ normal-power mode.
+
+ azoteq,counts-beta-lp:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 15
+ description:
+ Specifies the counts filter damping factor to be applied during low-power
+ mode.
+
+ azoteq,counts-beta-np:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 15
+ description:
+ Specifies the counts filter damping factor to be applied during normal-
+ power mode.
+
+ azoteq,lta-fast-beta-lp:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 15
+ description:
+ Specifies the long-term average filter fast damping factor to be applied
+ during low-power mode.
+
+ azoteq,lta-fast-beta-np:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 15
+ description:
+ Specifies the long-term average filter fast damping factor to be applied
+ during normal-power mode.
+
+ azoteq,timeout-ati-ms:
+ multipleOf: 500
+ minimum: 0
+ maximum: 32767500
+ description:
+ Specifies the delay (in ms) before ATI is retried following an ATI error.
+
+ azoteq,rate-ati-ms:
+ minimum: 0
+ maximum: 65535
+ description: Specifies the rate (in ms) at which ATI status is evaluated.
+
+ azoteq,timeout-np-ms:
+ minimum: 0
+ maximum: 65535
+ description:
+ Specifies the length of time (in ms) to wait for an event before moving
+ from normal-power mode to low-power mode.
+
+ azoteq,rate-np-ms:
+ minimum: 0
+ maximum: 3000
+ description: Specifies the report rate (in ms) during normal-power mode.
+
+ azoteq,timeout-lp-ms:
+ minimum: 0
+ maximum: 65535
+ description:
+ Specifies the length of time (in ms) to wait for an event before moving
+ from low-power mode to ultra-low-power mode.
+
+ azoteq,rate-lp-ms:
+ minimum: 0
+ maximum: 3000
+ description: Specifies the report rate (in ms) during low-power mode.
+
+ azoteq,timeout-ulp-ms:
+ minimum: 0
+ maximum: 65535
+ description:
+ Specifies the rate (in ms) at which channels not regularly sampled during
+ ultra-low-power mode are updated.
+
+ azoteq,rate-ulp-ms:
+ minimum: 0
+ maximum: 3000
+ description: Specifies the report rate (in ms) during ultra-low-power mode.
+
+patternProperties:
+ "^cycle-[0-9]$":
+ type: object
+ description: Represents a conversion cycle serving two sensing channels.
+
+ properties:
+ azoteq,conv-period:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 255
+ description: Specifies the cycle's conversion period.
+
+ azoteq,conv-frac:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 255
+ description: Specifies the cycle's conversion frequency fraction.
+
+ azoteq,tx-enable:
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 1
+ maxItems: 9
+ items:
+ minimum: 0
+ maximum: 8
+ description: Specifies the CTx pin(s) associated with the cycle.
+
+ azoteq,rx-float-inactive:
+ type: boolean
+ description: Floats any inactive CRx pins instead of grounding them.
+
+ azoteq,dead-time-enable:
+ type: boolean
+ description:
+ Increases the denominator of the conversion frequency formula by one.
+
+ azoteq,tx-freq-fosc:
+ type: boolean
+ description:
+ Fixes the conversion frequency to that of the device's core clock.
+
+ azoteq,vbias-enable:
+ type: boolean
+ description: Enables the bias voltage for use during inductive sensing.
+
+ azoteq,sense-mode:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 1, 2, 3]
+ description: |
+ Specifies the cycle's sensing mode as follows:
+ 0: None
+ 1: Self capacitive
+ 2: Mutual capacitive
+ 3: Inductive
+
+ Note that in the case of IQS7222A, cycles 5 and 6 are restricted to
+ Hall-effect sensing.
+
+ azoteq,iref-enable:
+ type: boolean
+ description:
+ Enables the current reference for use during various sensing modes.
+
+ azoteq,iref-level:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 15
+ description: Specifies the cycle's current reference level.
+
+ azoteq,iref-trim:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 15
+ description: Specifies the cycle's current reference trim.
+
+ dependencies:
+ azoteq,iref-level: ["azoteq,iref-enable"]
+ azoteq,iref-trim: ["azoteq,iref-enable"]
+
+ additionalProperties: false
+
+ "^channel-([0-9]|1[0-9])$":
+ type: object
+ description:
+ Represents a single sensing channel. A channel is active if defined and
+ inactive otherwise.
+
+ Note that in the case of IQS7222A, channels 10 and 11 are restricted to
+ Hall-effect sensing with events reported on channel 10 only.
+
+ properties:
+ azoteq,ulp-allow:
+ type: boolean
+ description:
+ Permits the device to enter ultra-low-power mode while the channel
+ lies in a state of touch or proximity.
+
+ azoteq,ref-select:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 9
+ description: Specifies a separate reference channel to be followed.
+
+ azoteq,ref-weight:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 65535
+ description: Specifies the relative weight of the reference channel.
+
+ azoteq,use-prox:
+ type: boolean
+ description:
+ Activates the reference channel in response to proximity events
+ instead of touch events.
+
+ azoteq,ati-band:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 1, 2, 3]
+ description: |
+ Specifies the channel's ATI band as a fraction of its ATI target as
+ follows:
+ 0: 1/16
+ 1: 1/8
+ 2: 1/4
+ 3: 1/2
+
+ azoteq,global-halt:
+ type: boolean
+ description:
+ Specifies that the channel's long-term average is to freeze if any
+ other participating channel lies in a proximity or touch state.
+
+ azoteq,invert-enable:
+ type: boolean
+ description:
+ Inverts the polarity of the states reported for proximity and touch
+ events relative to their respective thresholds.
+
+ azoteq,dual-direction:
+ type: boolean
+ description:
+ Specifies that the channel's long-term average is to freeze in the
+ presence of either increasing or decreasing counts, thereby permit-
+ ting events to be reported in either direction.
+
+ azoteq,rx-enable:
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 1
+ maxItems: 4
+ items:
+ minimum: 0
+ maximum: 7
+ description: Specifies the CRx pin(s) associated with the channel.
+
+ azoteq,samp-cap-double:
+ type: boolean
+ description: Doubles the sampling capacitance from 40 pF to 80 pF.
+
+ azoteq,vref-half:
+ type: boolean
+ description: Halves the discharge threshold from 1.0 V to 0.5 V.
+
+ azoteq,proj-bias:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 1, 2, 3]
+ description: |
+ Specifies the bias current applied during mutual (projected)
+ capacitive sensing as follows:
+ 0: 2 uA
+ 1: 5 uA
+ 2: 7 uA
+ 3: 10 uA
+
+ azoteq,ati-target:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ multipleOf: 8
+ minimum: 0
+ maximum: 2040
+ description: Specifies the channel's ATI target.
+
+ azoteq,ati-base:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ multipleOf: 16
+ minimum: 0
+ maximum: 496
+ description: Specifies the channel's ATI base.
+
+ azoteq,ati-mode:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 1, 2, 3, 4, 5]
+ description: |
+ Specifies the channel's ATI mode as follows:
+ 0: Disabled
+ 1: Compensation
+ 2: Compensation divider
+ 3: Fine fractional divider
+ 4: Coarse fractional divider
+ 5: Full
+
+ azoteq,ati-frac-div-fine:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 31
+ description: Specifies the channel's ATI fine fractional divider.
+
+ azoteq,ati-frac-mult-coarse:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 15
+ description: Specifies the channel's ATI coarse fractional multiplier.
+
+ azoteq,ati-frac-div-coarse:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 31
+ description: Specifies the channel's ATI coarse fractional divider.
+
+ azoteq,ati-comp-div:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 31
+ description: Specifies the channel's ATI compensation divider.
+
+ azoteq,ati-comp-select:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 1023
+ description: Specifies the channel's ATI compensation selection.
+
+ azoteq,debounce-enter:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 15
+ description: Specifies the channel's debounce entrance factor.
+
+ azoteq,debounce-exit:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 15
+ description: Specifies the channel's debounce exit factor.
+
+ patternProperties:
+ "^event-(prox|touch)$":
+ type: object
+ description:
+ Represents a proximity or touch event reported by the channel.
+
+ properties:
+ azoteq,gpio-select:
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 1
+ maxItems: 3
+ items:
+ minimum: 0
+ maximum: 2
+ description: |
+ Specifies one or more GPIO mapped to the event as follows:
+ 0: GPIO0
+ 1: GPIO3 (IQS7222C only)
+ 2: GPIO4 (IQS7222C only)
+
+ Note that although multiple events can be mapped to a single
+ GPIO, they must all be of the same type (proximity, touch or
+ slider gesture).
+
+ azoteq,thresh:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ Specifies the threshold for the event. Valid entries range from
+ 0-127 and 0-255 for proximity and touch events, respectively.
+
+ azoteq,hyst:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 255
+ description:
+ Specifies the hysteresis for the event (touch events only).
+
+ azoteq,timeout-press-ms:
+ multipleOf: 500
+ minimum: 0
+ maximum: 127500
+ description:
+ Specifies the length of time (in ms) to wait before automatically
+ releasing a press event. Specify zero to allow the press state to
+ persist indefinitely.
+
+ The IQS7222B does not feature channel-specific timeouts; the time-
+ out specified for any one channel applies to all channels.
+
+ linux,code:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ Numeric key or switch code associated with the event. Specify
+ KEY_RESERVED (0) to opt out of event reporting.
+
+ linux,input-type:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [1, 5]
+ default: 1
+ description:
+ Specifies whether the event is to be interpreted as a key (1)
+ or a switch (5).
+
+ required:
+ - linux,code
+
+ additionalProperties: false
+
+ dependencies:
+ azoteq,ref-weight: ["azoteq,ref-select"]
+ azoteq,use-prox: ["azoteq,ref-select"]
+
+ additionalProperties: false
+
+ "^slider-[0-1]$":
+ type: object
+ description: Represents a slider comprising three or four channels.
+
+ properties:
+ azoteq,channel-select:
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 3
+ maxItems: 4
+ items:
+ minimum: 0
+ maximum: 9
+ description:
+ Specifies the order of the channels that participate in the slider.
+
+ azoteq,slider-size:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 65535
+ description:
+ Specifies the slider's one-dimensional resolution, equal to the
+ maximum coordinate plus one.
+
+ azoteq,lower-cal:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 255
+ description: Specifies the slider's lower starting point.
+
+ azoteq,upper-cal:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 255
+ description: Specifies the slider's upper starting point.
+
+ azoteq,top-speed:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 65535
+ description:
+ Specifies the speed of movement after which coordinate filtering is
+ no longer applied.
+
+ azoteq,bottom-speed:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ multipleOf: 4
+ minimum: 0
+ maximum: 1020
+ description:
+ Specifies the speed of movement after which coordinate filtering is
+ linearly reduced.
+
+ azoteq,bottom-beta:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 7
+ description:
+ Specifies the coordinate filter damping factor to be applied
+ while the speed of movement is below that which is specified
+ by azoteq,bottom-speed.
+
+ azoteq,static-beta:
+ type: boolean
+ description:
+ Applies the coordinate filter damping factor specified by
+ azoteq,bottom-beta regardless of the speed of movement.
+
+ azoteq,use-prox:
+ type: boolean
+ description:
+ Directs the slider to respond to the proximity states of the selected
+ channels instead of their corresponding touch states. Note the slider
+ cannot report granular coordinates during a state of proximity.
+
+ linux,axis:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ Specifies the absolute axis to which coordinates are mapped. Specify
+ ABS_WHEEL to operate the slider as a wheel (IQS7222C only).
+
+ patternProperties:
+ "^event-(press|tap|(swipe|flick)-(pos|neg))$":
+ type: object
+ description:
+ Represents a press or gesture (IQS7222A only) event reported by
+ the slider.
+
+ properties:
+ linux,code:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: Numeric key code associated with the event.
+
+ azoteq,gesture-max-ms:
+ multipleOf: 4
+ minimum: 0
+ maximum: 1020
+ description:
+ Specifies the length of time (in ms) within which a tap, swipe
+ or flick gesture must be completed in order to be acknowledged
+ by the device. The number specified for any one swipe or flick
+ gesture applies to all remaining swipe or flick gestures.
+
+ azoteq,gesture-min-ms:
+ multipleOf: 4
+ minimum: 0
+ maximum: 124
+ description:
+ Specifies the length of time (in ms) for which a tap gesture must
+ be held in order to be acknowledged by the device.
+
+ azoteq,gesture-dist:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ multipleOf: 16
+ minimum: 0
+ maximum: 4080
+ description:
+ Specifies the distance across which a swipe or flick gesture must
+ travel in order to be acknowledged by the device. The number spec-
+ ified for any one swipe or flick gesture applies to all remaining
+ swipe or flick gestures.
+
+ azoteq,gpio-select:
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 1
+ maxItems: 1
+ items:
+ minimum: 0
+ maximum: 0
+ description: |
+ Specifies an individual GPIO mapped to a tap, swipe or flick
+ gesture as follows:
+ 0: GPIO0
+ 1: GPIO3 (reserved)
+ 2: GPIO4 (reserved)
+
+ Note that although multiple events can be mapped to a single
+ GPIO, they must all be of the same type (proximity, touch or
+ slider gesture).
+
+ required:
+ - linux,code
+
+ additionalProperties: false
+
+ required:
+ - azoteq,channel-select
+
+ additionalProperties: false
+
+ "^gpio-[0-2]$":
+ type: object
+ description: |
+ Represents a GPIO mapped to one or more events as follows:
+ gpio-0: GPIO0
+ gpio-1: GPIO3 (IQS7222C only)
+ gpio-2: GPIO4 (IQS7222C only)
+
+ allOf:
+ - $ref: ../pinctrl/pincfg-node.yaml#
+
+ properties:
+ drive-open-drain: true
+
+ additionalProperties: false
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: azoteq,iqs7222b
+
+ then:
+ patternProperties:
+ "^cycle-[0-9]$":
+ properties:
+ azoteq,iref-enable: false
+
+ "^channel-([0-9]|1[0-9])$":
+ properties:
+ azoteq,ref-select: false
+
+ patternProperties:
+ "^event-(prox|touch)$":
+ properties:
+ azoteq,gpio-select: false
+
+ "^slider-[0-1]$": false
+
+ "^gpio-[0-2]$": false
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: azoteq,iqs7222a
+
+ then:
+ patternProperties:
+ "^channel-([0-9]|1[0-9])$":
+ patternProperties:
+ "^event-(prox|touch)$":
+ properties:
+ azoteq,gpio-select:
+ maxItems: 1
+ items:
+ maximum: 0
+
+ "^slider-[0-1]$":
+ properties:
+ azoteq,slider-size:
+ multipleOf: 16
+ maximum: 4080
+
+ azoteq,top-speed:
+ multipleOf: 4
+ maximum: 1020
+
+ else:
+ patternProperties:
+ "^channel-([0-9]|1[0-9])$":
+ properties:
+ azoteq,ulp-allow: false
+
+ "^slider-[0-1]$":
+ patternProperties:
+ "^event-(press|tap|(swipe|flick)-(pos|neg))$":
+ properties:
+ azoteq,gesture-max-ms: false
+
+ azoteq,gesture-min-ms: false
+
+ azoteq,gesture-dist: false
+
+ azoteq,gpio-select: false
+
+required:
+ - compatible
+ - reg
+ - irq-gpios
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/input/input.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ iqs7222a@44 {
+ compatible = "azoteq,iqs7222a";
+ reg = <0x44>;
+ irq-gpios = <&gpio 4 GPIO_ACTIVE_LOW>;
+ azoteq,lta-beta-lp = <7>;
+ azoteq,lta-beta-np = <8>;
+ azoteq,counts-beta-lp = <2>;
+ azoteq,counts-beta-np = <3>;
+ azoteq,lta-fast-beta-lp = <3>;
+ azoteq,lta-fast-beta-np = <4>;
+
+ cycle-0 {
+ azoteq,conv-period = <5>;
+ azoteq,conv-frac = <127>;
+ azoteq,tx-enable = <1>, <2>, <4>, <5>;
+ azoteq,dead-time-enable;
+ azoteq,sense-mode = <2>;
+ };
+
+ cycle-1 {
+ azoteq,conv-period = <5>;
+ azoteq,conv-frac = <127>;
+ azoteq,tx-enable = <5>;
+ azoteq,dead-time-enable;
+ azoteq,sense-mode = <2>;
+ };
+
+ cycle-2 {
+ azoteq,conv-period = <5>;
+ azoteq,conv-frac = <127>;
+ azoteq,tx-enable = <4>;
+ azoteq,dead-time-enable;
+ azoteq,sense-mode = <2>;
+ };
+
+ cycle-3 {
+ azoteq,conv-period = <5>;
+ azoteq,conv-frac = <127>;
+ azoteq,tx-enable = <2>;
+ azoteq,dead-time-enable;
+ azoteq,sense-mode = <2>;
+ };
+
+ cycle-4 {
+ azoteq,conv-period = <5>;
+ azoteq,conv-frac = <127>;
+ azoteq,tx-enable = <1>;
+ azoteq,dead-time-enable;
+ azoteq,sense-mode = <2>;
+ };
+
+ cycle-5 {
+ azoteq,conv-period = <2>;
+ azoteq,conv-frac = <0>;
+ };
+
+ cycle-6 {
+ azoteq,conv-period = <2>;
+ azoteq,conv-frac = <0>;
+ };
+
+ channel-0 {
+ azoteq,ulp-allow;
+ azoteq,global-halt;
+ azoteq,invert-enable;
+ azoteq,rx-enable = <3>;
+ azoteq,ati-target = <800>;
+ azoteq,ati-base = <208>;
+ azoteq,ati-mode = <5>;
+ };
+
+ channel-1 {
+ azoteq,global-halt;
+ azoteq,invert-enable;
+ azoteq,rx-enable = <3>;
+ azoteq,ati-target = <496>;
+ azoteq,ati-base = <208>;
+ azoteq,ati-mode = <5>;
+ };
+
+ channel-2 {
+ azoteq,global-halt;
+ azoteq,invert-enable;
+ azoteq,rx-enable = <3>;
+ azoteq,ati-target = <496>;
+ azoteq,ati-base = <208>;
+ azoteq,ati-mode = <5>;
+ };
+
+ channel-3 {
+ azoteq,global-halt;
+ azoteq,invert-enable;
+ azoteq,rx-enable = <3>;
+ azoteq,ati-target = <496>;
+ azoteq,ati-base = <208>;
+ azoteq,ati-mode = <5>;
+ };
+
+ channel-4 {
+ azoteq,global-halt;
+ azoteq,invert-enable;
+ azoteq,rx-enable = <3>;
+ azoteq,ati-target = <496>;
+ azoteq,ati-base = <208>;
+ azoteq,ati-mode = <5>;
+ };
+
+ channel-5 {
+ azoteq,ulp-allow;
+ azoteq,global-halt;
+ azoteq,invert-enable;
+ azoteq,rx-enable = <6>;
+ azoteq,ati-target = <800>;
+ azoteq,ati-base = <144>;
+ azoteq,ati-mode = <5>;
+ };
+
+ channel-6 {
+ azoteq,global-halt;
+ azoteq,invert-enable;
+ azoteq,rx-enable = <6>;
+ azoteq,ati-target = <496>;
+ azoteq,ati-base = <160>;
+ azoteq,ati-mode = <5>;
+
+ event-touch {
+ linux,code = <KEY_MUTE>;
+ };
+ };
+
+ channel-7 {
+ azoteq,global-halt;
+ azoteq,invert-enable;
+ azoteq,rx-enable = <6>;
+ azoteq,ati-target = <496>;
+ azoteq,ati-base = <160>;
+ azoteq,ati-mode = <5>;
+
+ event-touch {
+ linux,code = <KEY_VOLUMEDOWN>;
+ };
+ };
+
+ channel-8 {
+ azoteq,global-halt;
+ azoteq,invert-enable;
+ azoteq,rx-enable = <6>;
+ azoteq,ati-target = <496>;
+ azoteq,ati-base = <160>;
+ azoteq,ati-mode = <5>;
+
+ event-touch {
+ linux,code = <KEY_VOLUMEUP>;
+ };
+ };
+
+ channel-9 {
+ azoteq,global-halt;
+ azoteq,invert-enable;
+ azoteq,rx-enable = <6>;
+ azoteq,ati-target = <496>;
+ azoteq,ati-base = <160>;
+ azoteq,ati-mode = <5>;
+
+ event-touch {
+ linux,code = <KEY_POWER>;
+ };
+ };
+
+ channel-10 {
+ azoteq,ulp-allow;
+ azoteq,ati-target = <496>;
+ azoteq,ati-base = <112>;
+
+ event-touch {
+ linux,code = <SW_LID>;
+ linux,input-type = <EV_SW>;
+ };
+ };
+
+ channel-11 {
+ azoteq,ati-target = <496>;
+ azoteq,ati-base = <112>;
+ };
+
+ slider-0 {
+ azoteq,channel-select = <1>, <2>, <3>, <4>;
+ azoteq,slider-size = <4080>;
+ azoteq,upper-cal = <50>;
+ azoteq,lower-cal = <30>;
+ azoteq,top-speed = <200>;
+ azoteq,bottom-speed = <1>;
+ azoteq,bottom-beta = <3>;
+
+ event-tap {
+ linux,code = <KEY_PLAYPAUSE>;
+ azoteq,gesture-max-ms = <600>;
+ azoteq,gesture-min-ms = <24>;
+ };
+
+ event-flick-pos {
+ linux,code = <KEY_NEXTSONG>;
+ azoteq,gesture-max-ms = <600>;
+ azoteq,gesture-dist = <816>;
+ };
+
+ event-flick-neg {
+ linux,code = <KEY_PREVIOUSSONG>;
+ };
+ };
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/input/google,cros-ec-keyb.yaml b/Documentation/devicetree/bindings/input/google,cros-ec-keyb.yaml
index aa61fe64be63..e05690b3e963 100644
--- a/Documentation/devicetree/bindings/input/google,cros-ec-keyb.yaml
+++ b/Documentation/devicetree/bindings/input/google,cros-ec-keyb.yaml
@@ -15,14 +15,16 @@ description: |
Google's ChromeOS EC Keyboard is a simple matrix keyboard
implemented on a separate EC (Embedded Controller) device. It provides
a message for reading key scans from the EC. These are then converted
- into keycodes for processing by the kernel.
-
-allOf:
- - $ref: "/schemas/input/matrix-keymap.yaml#"
+ into keycodes for processing by the kernel. This device also supports
+ switches/buttons like power and volume buttons.
properties:
compatible:
- const: google,cros-ec-keyb
+ oneOf:
+ - description: ChromeOS EC with only buttons/switches
+ const: google,cros-ec-keyb-switches
+ - description: ChromeOS EC with keyboard and possibly buttons/switches
+ const: google,cros-ec-keyb
google,needs-ghost-filter:
description:
@@ -42,15 +44,31 @@ properties:
where the lower 16 bits are reserved. This property is specified only
when the keyboard has a custom design for the top row keys.
+dependencies:
+ function-row-phsymap: [ 'linux,keymap' ]
+ google,needs-ghost-filter: [ 'linux,keymap' ]
+
required:
- compatible
+if:
+ properties:
+ compatible:
+ contains:
+ const: google,cros-ec-keyb
+then:
+ $ref: "/schemas/input/matrix-keymap.yaml#"
+ required:
+ - keypad,num-rows
+ - keypad,num-columns
+ - linux,keymap
+
unevaluatedProperties: false
examples:
- |
#include <dt-bindings/input/input.h>
- cros-ec-keyb {
+ keyboard-controller {
compatible = "google,cros-ec-keyb";
keypad,num-rows = <8>;
keypad,num-columns = <13>;
@@ -114,3 +132,9 @@ examples:
/* UP LEFT */
0x070b0067 0x070c0069>;
};
+ - |
+ /* No matrix keyboard, just buttons/switches */
+ keyboard-controller {
+ compatible = "google,cros-ec-keyb-switches";
+ };
+...
diff --git a/Documentation/devicetree/bindings/interconnect/qcom,osm-l3.yaml b/Documentation/devicetree/bindings/interconnect/qcom,osm-l3.yaml
index 116e434d0daa..bf538c0c5a81 100644
--- a/Documentation/devicetree/bindings/interconnect/qcom,osm-l3.yaml
+++ b/Documentation/devicetree/bindings/interconnect/qcom,osm-l3.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Qualcomm Operating State Manager (OSM) L3 Interconnect Provider
maintainers:
- - Sibi Sankar <sibis@codeaurora.org>
+ - Sibi Sankar <quic_sibis@quicinc.com>
description:
L3 cache bandwidth requirements on Qualcomm SoCs is serviced by the OSM.
diff --git a/Documentation/devicetree/bindings/interconnect/qcom,rpmh.yaml b/Documentation/devicetree/bindings/interconnect/qcom,rpmh.yaml
index 5a911be0c2ea..28b3516aa089 100644
--- a/Documentation/devicetree/bindings/interconnect/qcom,rpmh.yaml
+++ b/Documentation/devicetree/bindings/interconnect/qcom,rpmh.yaml
@@ -31,7 +31,6 @@ properties:
- qcom,sc7180-config-noc
- qcom,sc7180-dc-noc
- qcom,sc7180-gem-noc
- - qcom,sc7180-ipa-virt
- qcom,sc7180-mc-virt
- qcom,sc7180-mmss-noc
- qcom,sc7180-npu-noc
@@ -59,7 +58,20 @@ properties:
- qcom,sc8180x-ipa-virt
- qcom,sc8180x-mc-virt
- qcom,sc8180x-mmss-noc
+ - qcom,sc8180x-qup-virt
- qcom,sc8180x-system-noc
+ - qcom,sc8280xp-aggre1-noc
+ - qcom,sc8280xp-aggre2-noc
+ - qcom,sc8280xp-clk-virt
+ - qcom,sc8280xp-config-noc
+ - qcom,sc8280xp-dc-noc
+ - qcom,sc8280xp-gem-noc
+ - qcom,sc8280xp-lpass-ag-noc
+ - qcom,sc8280xp-mc-virt
+ - qcom,sc8280xp-mmss-noc
+ - qcom,sc8280xp-nspa-noc
+ - qcom,sc8280xp-nspb-noc
+ - qcom,sc8280xp-system-noc
- qcom,sdm845-aggre1-noc
- qcom,sdm845-aggre2-noc
- qcom,sdm845-config-noc
@@ -68,10 +80,12 @@ properties:
- qcom,sdm845-mem-noc
- qcom,sdm845-mmss-noc
- qcom,sdm845-system-noc
- - qcom,sdx55-ipa-virt
- qcom,sdx55-mc-virt
- qcom,sdx55-mem-noc
- qcom,sdx55-system-noc
+ - qcom,sdx65-mc-virt
+ - qcom,sdx65-mem-noc
+ - qcom,sdx65-system-noc
- qcom,sm8150-aggre1-noc
- qcom,sm8150-aggre2-noc
- qcom,sm8150-camnoc-noc
diff --git a/Documentation/devicetree/bindings/interrupt-controller/qcom,pdc.txt b/Documentation/devicetree/bindings/interrupt-controller/qcom,pdc.txt
index 3b7b1134dea9..159a423e5586 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/qcom,pdc.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/qcom,pdc.txt
@@ -22,6 +22,7 @@ Properties:
- "qcom,sc7280-pdc": For SC7280
- "qcom,sdm845-pdc": For SDM845
- "qcom,sm6350-pdc": For SM6350
+ - "qcom,sm8150-pdc": For SM8150
- "qcom,sm8250-pdc": For SM8250
- "qcom,sm8350-pdc": For SM8350
diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu.yaml b/Documentation/devicetree/bindings/iommu/arm,smmu.yaml
index da5381c8ee11..76fc2c0f4d54 100644
--- a/Documentation/devicetree/bindings/iommu/arm,smmu.yaml
+++ b/Documentation/devicetree/bindings/iommu/arm,smmu.yaml
@@ -37,8 +37,10 @@ properties:
- qcom,sc7180-smmu-500
- qcom,sc7280-smmu-500
- qcom,sc8180x-smmu-500
+ - qcom,sc8280xp-smmu-500
- qcom,sdm845-smmu-500
- qcom,sdx55-smmu-500
+ - qcom,sdx65-smmu-500
- qcom,sm6350-smmu-500
- qcom,sm8150-smmu-500
- qcom,sm8250-smmu-500
@@ -62,8 +64,9 @@ properties:
for improved performance.
items:
- enum:
- - nvidia,tegra194-smmu
- nvidia,tegra186-smmu
+ - nvidia,tegra194-smmu
+ - nvidia,tegra234-smmu
- const: nvidia,smmu-500
- items:
- const: arm,mmu-500
@@ -157,6 +160,17 @@ properties:
power-domains:
maxItems: 1
+ nvidia,memory-controller:
+ description: |
+ A phandle to the memory controller on NVIDIA Tegra186 and later SoCs.
+ The memory controller needs to be programmed with a mapping of memory
+ client IDs to ARM SMMU stream IDs.
+
+ If this property is absent, the mapping programmed by early firmware
+ will be used and it is not guaranteed that IOMMU translations will be
+ enabled for any given device.
+ $ref: /schemas/types.yaml#/definitions/phandle
+
required:
- compatible
- reg
@@ -172,13 +186,20 @@ allOf:
compatible:
contains:
enum:
- - nvidia,tegra194-smmu
- nvidia,tegra186-smmu
+ - nvidia,tegra194-smmu
+ - nvidia,tegra234-smmu
then:
properties:
reg:
minItems: 1
maxItems: 2
+
+ # The reference to the memory controller is required to ensure that the
+ # memory client to stream ID mapping can be done synchronously with the
+ # IOMMU attachment.
+ required:
+ - nvidia,memory-controller
else:
properties:
reg:
diff --git a/Documentation/devicetree/bindings/iommu/mediatek,iommu.yaml b/Documentation/devicetree/bindings/iommu/mediatek,iommu.yaml
index 97e8c471a5e8..2ae3bbad7f1a 100644
--- a/Documentation/devicetree/bindings/iommu/mediatek,iommu.yaml
+++ b/Documentation/devicetree/bindings/iommu/mediatek,iommu.yaml
@@ -76,7 +76,11 @@ properties:
- mediatek,mt8167-m4u # generation two
- mediatek,mt8173-m4u # generation two
- mediatek,mt8183-m4u # generation two
+ - mediatek,mt8186-iommu-mm # generation two
- mediatek,mt8192-m4u # generation two
+ - mediatek,mt8195-iommu-vdo # generation two
+ - mediatek,mt8195-iommu-vpp # generation two
+ - mediatek,mt8195-iommu-infra # generation two
- description: mt7623 generation one
items:
@@ -119,7 +123,9 @@ properties:
dt-binding/memory/mt8167-larb-port.h for mt8167,
dt-binding/memory/mt8173-larb-port.h for mt8173,
dt-binding/memory/mt8183-larb-port.h for mt8183,
+ dt-binding/memory/mt8186-memory-port.h for mt8186,
dt-binding/memory/mt8192-larb-port.h for mt8192.
+ dt-binding/memory/mt8195-memory-port.h for mt8195.
power-domains:
maxItems: 1
@@ -128,7 +134,6 @@ required:
- compatible
- reg
- interrupts
- - mediatek,larbs
- '#iommu-cells'
allOf:
@@ -140,7 +145,10 @@ allOf:
- mediatek,mt2701-m4u
- mediatek,mt2712-m4u
- mediatek,mt8173-m4u
+ - mediatek,mt8186-iommu-mm
- mediatek,mt8192-m4u
+ - mediatek,mt8195-iommu-vdo
+ - mediatek,mt8195-iommu-vpp
then:
required:
@@ -150,12 +158,26 @@ allOf:
properties:
compatible:
enum:
+ - mediatek,mt8186-iommu-mm
- mediatek,mt8192-m4u
+ - mediatek,mt8195-iommu-vdo
+ - mediatek,mt8195-iommu-vpp
then:
required:
- power-domains
+ - if: # The IOMMUs don't have larbs.
+ not:
+ properties:
+ compatible:
+ contains:
+ const: mediatek,mt8195-iommu-infra
+
+ then:
+ required:
+ - mediatek,larbs
+
additionalProperties: false
examples:
@@ -173,13 +195,3 @@ examples:
<&larb3>, <&larb4>, <&larb5>;
#iommu-cells = <1>;
};
-
- - |
- #include <dt-bindings/memory/mt8173-larb-port.h>
-
- /* Example for a client device */
- display {
- compatible = "mediatek,mt8173-disp";
- iommus = <&iommu M4U_PORT_DISP_OVL0>,
- <&iommu M4U_PORT_DISP_RDMA0>;
- };
diff --git a/Documentation/devicetree/bindings/iommu/samsung,sysmmu.yaml b/Documentation/devicetree/bindings/iommu/samsung,sysmmu.yaml
index 783c6b37c9f0..672a0beea600 100644
--- a/Documentation/devicetree/bindings/iommu/samsung,sysmmu.yaml
+++ b/Documentation/devicetree/bindings/iommu/samsung,sysmmu.yaml
@@ -86,16 +86,6 @@ examples:
- |
#include <dt-bindings/clock/exynos5250.h>
- gsc_0: scaler@13e00000 {
- compatible = "samsung,exynos5-gsc";
- reg = <0x13e00000 0x1000>;
- interrupts = <0 85 0>;
- power-domains = <&pd_gsc>;
- clocks = <&clock CLK_GSCL0>;
- clock-names = "gscl";
- iommus = <&sysmmu_gsc0>;
- };
-
sysmmu_gsc0: iommu@13e80000 {
compatible = "samsung,exynos-sysmmu";
reg = <0x13E80000 0x1000>;
diff --git a/Documentation/devicetree/bindings/leds/kinetic,ktd2692.yaml b/Documentation/devicetree/bindings/leds/kinetic,ktd2692.yaml
new file mode 100644
index 000000000000..bac95a51afa1
--- /dev/null
+++ b/Documentation/devicetree/bindings/leds/kinetic,ktd2692.yaml
@@ -0,0 +1,87 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/leds/kinetic,ktd2692.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: KTD2692 Flash LED Driver from Kinetic Technologies
+
+maintainers:
+ - Markuss Broks <markuss.broks@gmail.com>
+
+description: |
+ KTD2692 is the ideal power solution for high-power flash LEDs.
+ It uses ExpressWire single-wire programming for maximum flexibility.
+
+ The ExpressWire interface through CTRL pin can control LED on/off and
+ enable/disable the IC, Movie(max 1/3 of Flash current) / Flash mode current,
+ Flash timeout, LVP(low voltage protection).
+
+ Also, When the AUX pin is pulled high while CTRL pin is high,
+ LED current will be ramped up to the flash-mode current level.
+
+properties:
+ compatible:
+ const: kinetic,ktd2692
+
+ ctrl-gpios:
+ maxItems: 1
+ description: Specifier of the GPIO connected to CTRL pin.
+
+ aux-gpios:
+ maxItems: 1
+ description: Specifier of the GPIO connected to CTRL pin.
+
+ vin-supply:
+ description: LED supply (2.7V to 5.5V).
+
+ led:
+ type: object
+ $ref: common.yaml#
+ description: Properties for the LED.
+ properties:
+ function: true
+ color: true
+ flash-max-timeout-us:
+ description: Flash LED maximum timeout.
+
+ led-max-microamp:
+ maximum: 300000
+ description: Minimum Threshold for Timer protection
+ is defined internally (Maximum 300mA).
+
+ flash-max-microamp:
+ maximum: 300000
+ description: Flash LED maximum current
+ Formula - I(uA) = 15000000 / Rset.
+
+ additionalProperties: false
+
+required:
+ - compatible
+ - ctrl-gpios
+ - led
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/leds/common.h>
+
+ ktd2692 {
+ compatible = "kinetic,ktd2692";
+ ctrl-gpios = <&gpc0 1 0>;
+ aux-gpios = <&gpc0 2 0>;
+ vin-supply = <&vbat>;
+
+ led {
+ function = LED_FUNCTION_FLASH;
+ color = <LED_COLOR_ID_WHITE>;
+ flash-max-timeout-us = <250000>;
+ flash-max-microamp = <150000>;
+ led-max-microamp = <25000>;
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/leds/leds-class-multicolor.yaml b/Documentation/devicetree/bindings/leds/leds-class-multicolor.yaml
index 37445c68cdef..f41d021ed677 100644
--- a/Documentation/devicetree/bindings/leds/leds-class-multicolor.yaml
+++ b/Documentation/devicetree/bindings/leds/leds-class-multicolor.yaml
@@ -20,7 +20,7 @@ description: |
within this documentation directory.
patternProperties:
- "^multi-led@([0-9a-f])$":
+ "^multi-led(@[0-9a-f])?$":
type: object
description: Represents the LEDs that are to be grouped.
properties:
diff --git a/Documentation/devicetree/bindings/leds/leds-ktd2692.txt b/Documentation/devicetree/bindings/leds/leds-ktd2692.txt
deleted file mode 100644
index 853737452580..000000000000
--- a/Documentation/devicetree/bindings/leds/leds-ktd2692.txt
+++ /dev/null
@@ -1,50 +0,0 @@
-* Kinetic Technologies - KTD2692 Flash LED Driver
-
-KTD2692 is the ideal power solution for high-power flash LEDs.
-It uses ExpressWire single-wire programming for maximum flexibility.
-
-The ExpressWire interface through CTRL pin can control LED on/off and
-enable/disable the IC, Movie(max 1/3 of Flash current) / Flash mode current,
-Flash timeout, LVP(low voltage protection).
-
-Also, When the AUX pin is pulled high while CTRL pin is high,
-LED current will be ramped up to the flash-mode current level.
-
-Required properties:
-- compatible : Should be "kinetic,ktd2692".
-- ctrl-gpios : Specifier of the GPIO connected to CTRL pin.
-- aux-gpios : Specifier of the GPIO connected to AUX pin.
-
-Optional properties:
-- vin-supply : "vin" LED supply (2.7V to 5.5V).
- See Documentation/devicetree/bindings/regulator/regulator.txt
-
-A discrete LED element connected to the device must be represented by a child
-node - See Documentation/devicetree/bindings/leds/common.txt
-
-Required properties for flash LED child nodes:
- See Documentation/devicetree/bindings/leds/common.txt
-- led-max-microamp : Minimum Threshold for Timer protection
- is defined internally (Maximum 300mA).
-- flash-max-microamp : Flash LED maximum current
- Formula : I(mA) = 15000 / Rset.
-- flash-max-timeout-us : Flash LED maximum timeout.
-
-Optional properties for flash LED child nodes:
-- label : See Documentation/devicetree/bindings/leds/common.txt
-
-Example:
-
-ktd2692 {
- compatible = "kinetic,ktd2692";
- ctrl-gpios = <&gpc0 1 0>;
- aux-gpios = <&gpc0 2 0>;
- vin-supply = <&vbat>;
-
- flash-led {
- label = "ktd2692-flash";
- led-max-microamp = <300000>;
- flash-max-microamp = <1500000>;
- flash-max-timeout-us = <1835000>;
- };
-};
diff --git a/Documentation/devicetree/bindings/leds/leds-pwm-multicolor.yaml b/Documentation/devicetree/bindings/leds/leds-pwm-multicolor.yaml
new file mode 100644
index 000000000000..6625a528f727
--- /dev/null
+++ b/Documentation/devicetree/bindings/leds/leds-pwm-multicolor.yaml
@@ -0,0 +1,79 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/leds/leds-pwm-multicolor.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Multi-color LEDs connected to PWM
+
+maintainers:
+ - Sven Schwermer <sven.schwermer@disruptive-technologies.com>
+
+description: |
+ This driver combines several monochrome PWM LEDs into one multi-color
+ LED using the multicolor LED class.
+
+properties:
+ compatible:
+ const: pwm-leds-multicolor
+
+ multi-led:
+ type: object
+
+ patternProperties:
+ "^led-[0-9a-z]+$":
+ type: object
+ $ref: common.yaml#
+
+ additionalProperties: false
+
+ properties:
+ pwms:
+ maxItems: 1
+
+ pwm-names: true
+
+ color: true
+
+ required:
+ - pwms
+ - color
+
+required:
+ - compatible
+
+allOf:
+ - $ref: leds-class-multicolor.yaml#
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/leds/common.h>
+
+ led-controller {
+ compatible = "pwm-leds-multicolor";
+
+ multi-led {
+ color = <LED_COLOR_ID_RGB>;
+ function = LED_FUNCTION_INDICATOR;
+ max-brightness = <65535>;
+
+ led-red {
+ pwms = <&pwm1 0 1000000>;
+ color = <LED_COLOR_ID_RED>;
+ };
+
+ led-green {
+ pwms = <&pwm2 0 1000000>;
+ color = <LED_COLOR_ID_GREEN>;
+ };
+
+ led-blue {
+ pwms = <&pwm3 0 1000000>;
+ color = <LED_COLOR_ID_BLUE>;
+ };
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/leds/leds-qcom-lpg.yaml b/Documentation/devicetree/bindings/leds/leds-qcom-lpg.yaml
new file mode 100644
index 000000000000..409a4c7298e1
--- /dev/null
+++ b/Documentation/devicetree/bindings/leds/leds-qcom-lpg.yaml
@@ -0,0 +1,174 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/leds/leds-qcom-lpg.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Light Pulse Generator
+
+maintainers:
+ - Bjorn Andersson <bjorn.andersson@linaro.org>
+
+description: >
+ The Qualcomm Light Pulse Generator consists of three different hardware blocks;
+ a ramp generator with lookup table, the light pulse generator and a three
+ channel current sink. These blocks are found in a wide range of Qualcomm PMICs.
+
+properties:
+ compatible:
+ enum:
+ - qcom,pm8150b-lpg
+ - qcom,pm8150l-lpg
+ - qcom,pm8350c-pwm
+ - qcom,pm8916-pwm
+ - qcom,pm8941-lpg
+ - qcom,pm8994-lpg
+ - qcom,pmc8180c-lpg
+ - qcom,pmi8994-lpg
+ - qcom,pmi8998-lpg
+
+ "#pwm-cells":
+ const: 2
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 0
+
+ qcom,power-source:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ power-source used to drive the output, as defined in the datasheet.
+ Should be specified if the TRILED block is present
+ enum: [0, 1, 3]
+
+ qcom,dtest:
+ $ref: /schemas/types.yaml#/definitions/uint32-matrix
+ description: >
+ A list of integer pairs, where each pair represent the dtest line the
+ particular channel should be connected to and the flags denoting how the
+ value should be outputed, as defined in the datasheet. The number of
+ pairs should be the same as the number of channels.
+ items:
+ items:
+ - description: dtest line to attach
+ - description: flags for the attachment
+
+ multi-led:
+ type: object
+ $ref: leds-class-multicolor.yaml#
+ properties:
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 0
+
+ patternProperties:
+ "^led@[0-9a-f]$":
+ type: object
+ $ref: common.yaml#
+
+patternProperties:
+ "^led@[0-9a-f]$":
+ type: object
+ $ref: common.yaml#
+
+ properties:
+ reg: true
+
+ required:
+ - reg
+
+required:
+ - compatible
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/leds/common.h>
+
+ led-controller {
+ compatible = "qcom,pmi8994-lpg";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qcom,power-source = <1>;
+
+ qcom,dtest = <0 0>,
+ <0 0>,
+ <0 0>,
+ <4 1>;
+
+ led@1 {
+ reg = <1>;
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_INDICATOR;
+ function-enumerator = <1>;
+ };
+
+ led@2 {
+ reg = <2>;
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_INDICATOR;
+ function-enumerator = <0>;
+ default-state = "on";
+ };
+
+ led@3 {
+ reg = <3>;
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_INDICATOR;
+ function-enumerator = <2>;
+ };
+
+ led@4 {
+ reg = <4>;
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_INDICATOR;
+ function-enumerator = <3>;
+ };
+ };
+ - |
+ #include <dt-bindings/leds/common.h>
+
+ led-controller {
+ compatible = "qcom,pmi8994-lpg";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qcom,power-source = <1>;
+
+ multi-led {
+ color = <LED_COLOR_ID_RGB>;
+ function = LED_FUNCTION_STATUS;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@1 {
+ reg = <1>;
+ color = <LED_COLOR_ID_RED>;
+ };
+
+ led@2 {
+ reg = <2>;
+ color = <LED_COLOR_ID_GREEN>;
+ };
+
+ led@3 {
+ reg = <3>;
+ color = <LED_COLOR_ID_BLUE>;
+ };
+ };
+ };
+ - |
+ pwm-controller {
+ compatible = "qcom,pm8916-pwm";
+ #pwm-cells = <2>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/leds/regulator-led.yaml b/Documentation/devicetree/bindings/leds/regulator-led.yaml
new file mode 100644
index 000000000000..3e020d700c00
--- /dev/null
+++ b/Documentation/devicetree/bindings/leds/regulator-led.yaml
@@ -0,0 +1,55 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/leds/regulator-led.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Device Tree Bindings for Regulator LEDs
+
+maintainers:
+ - Linus Walleij <linus.walleij@linaro.org>
+
+description: |
+ Regulator LEDs are powered by a single regulator such that they can
+ be turned on or off by enabling or disabling the regulator. The available
+ brightness settings will be inferred from the available voltages on the
+ regulator, and any constraints on the voltage or current will need to be
+ specified on the regulator.
+
+allOf:
+ - $ref: common.yaml#
+
+properties:
+ $nodename:
+ pattern: '^led.*$'
+
+ compatible:
+ const: regulator-led
+
+ vled-supply:
+ description:
+ The regulator controlling the current to the LED.
+
+ function: true
+ color: true
+ linux,default-trigger: true
+ default-state: true
+
+required:
+ - compatible
+ - vled-supply
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/leds/common.h>
+
+ led-heartbeat {
+ compatible = "regulator-led";
+ vled-supply = <&regulator>;
+ function = LED_FUNCTION_STATUS;
+ color = <LED_COLOR_ID_BLUE>;
+ linux,default-trigger = "heartbeat";
+ };
+...
diff --git a/Documentation/devicetree/bindings/memory-controllers/ingenic,nemc-peripherals.yaml b/Documentation/devicetree/bindings/memory-controllers/ingenic,nemc-peripherals.yaml
new file mode 100644
index 000000000000..b8ed52a44d57
--- /dev/null
+++ b/Documentation/devicetree/bindings/memory-controllers/ingenic,nemc-peripherals.yaml
@@ -0,0 +1,46 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/memory-controllers/ingenic,nemc-peripherals.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Ingenic SoCs NAND / External Memory Controller (NEMC) devicetree bindings
+
+maintainers:
+ - Paul Cercueil <paul@crapouillou.net>
+
+properties:
+ reg:
+ minItems: 1
+ maxItems: 255
+
+ ingenic,nemc-bus-width:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [8, 16]
+ description: Specifies the bus width in bits.
+
+ ingenic,nemc-tAS:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: Address setup time in nanoseconds.
+
+ ingenic,nemc-tAH:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: Address hold time in nanoseconds.
+
+ ingenic,nemc-tBP:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: Burst pitch time in nanoseconds.
+
+ ingenic,nemc-tAW:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: Address wait time in nanoseconds.
+
+ ingenic,nemc-tSTRV:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: Static memory recovery time in nanoseconds.
+
+required:
+ - reg
+
+additionalProperties: true
+...
diff --git a/Documentation/devicetree/bindings/memory-controllers/ingenic,nemc.yaml b/Documentation/devicetree/bindings/memory-controllers/ingenic,nemc.yaml
index 24f9e1982028..dd13a5106d6c 100644
--- a/Documentation/devicetree/bindings/memory-controllers/ingenic,nemc.yaml
+++ b/Documentation/devicetree/bindings/memory-controllers/ingenic,nemc.yaml
@@ -39,38 +39,6 @@ properties:
patternProperties:
".*@[0-9]+$":
type: object
- properties:
- reg:
- minItems: 1
- maxItems: 255
-
- ingenic,nemc-bus-width:
- $ref: /schemas/types.yaml#/definitions/uint32
- enum: [8, 16]
- description: Specifies the bus width in bits.
-
- ingenic,nemc-tAS:
- $ref: /schemas/types.yaml#/definitions/uint32
- description: Address setup time in nanoseconds.
-
- ingenic,nemc-tAH:
- $ref: /schemas/types.yaml#/definitions/uint32
- description: Address hold time in nanoseconds.
-
- ingenic,nemc-tBP:
- $ref: /schemas/types.yaml#/definitions/uint32
- description: Burst pitch time in nanoseconds.
-
- ingenic,nemc-tAW:
- $ref: /schemas/types.yaml#/definitions/uint32
- description: Address wait time in nanoseconds.
-
- ingenic,nemc-tSTRV:
- $ref: /schemas/types.yaml#/definitions/uint32
- description: Static memory recovery time in nanoseconds.
-
- required:
- - reg
required:
- compatible
diff --git a/Documentation/devicetree/bindings/mfd/da9063.txt b/Documentation/devicetree/bindings/mfd/da9063.txt
index 91b79a21d403..aa8b800cc4ad 100644
--- a/Documentation/devicetree/bindings/mfd/da9063.txt
+++ b/Documentation/devicetree/bindings/mfd/da9063.txt
@@ -64,10 +64,13 @@ Sub-nodes:
and KEY_SLEEP.
- watchdog : This node defines settings for the Watchdog timer associated
- with the DA9063 and DA9063L. There are currently no entries in this
- binding, however compatible = "dlg,da9063-watchdog" should be added
- if a node is created.
+ with the DA9063 and DA9063L. The node should contain the compatible property
+ with the value "dlg,da9063-watchdog".
+ Optional watchdog properties:
+ - dlg,use-sw-pm: Add this property to disable the watchdog during suspend.
+ Only use this option if you can't use the watchdog automatic suspend
+ function during a suspend (see register CONTROL_B).
Example:
diff --git a/Documentation/devicetree/bindings/mfd/google,cros-ec.yaml b/Documentation/devicetree/bindings/mfd/google,cros-ec.yaml
index afec0bd2f1de..e25caf8ef9f4 100644
--- a/Documentation/devicetree/bindings/mfd/google,cros-ec.yaml
+++ b/Documentation/devicetree/bindings/mfd/google,cros-ec.yaml
@@ -57,7 +57,7 @@ properties:
mediatek,rpmsg-name:
description:
Must be defined if the cros-ec is a rpmsg device for a Mediatek
- ARM Cortex M4 Co-processor. Contains the name pf the rpmsg
+ ARM Cortex M4 Co-processor. Contains the name of the rpmsg
device. Used to match the subnode to the rpmsg device announced by
the SCP.
$ref: "/schemas/types.yaml#/definitions/string"
diff --git a/Documentation/devicetree/bindings/mfd/rk808.txt b/Documentation/devicetree/bindings/mfd/rk808.txt
deleted file mode 100644
index 23a17a6663ec..000000000000
--- a/Documentation/devicetree/bindings/mfd/rk808.txt
+++ /dev/null
@@ -1,465 +0,0 @@
-RK8XX Power Management Integrated Circuit
-
-The rk8xx family current members:
-rk805
-rk808
-rk809
-rk817
-rk818
-
-Required properties:
-- compatible: "rockchip,rk805"
-- compatible: "rockchip,rk808"
-- compatible: "rockchip,rk809"
-- compatible: "rockchip,rk817"
-- compatible: "rockchip,rk818"
-- reg: I2C slave address
-- interrupts: the interrupt outputs of the controller.
-- #clock-cells: from common clock binding; shall be set to 1 (multiple clock
- outputs). See <dt-bindings/clock/rockchip,rk808.h> for clock IDs.
-
-Optional properties:
-- clock-output-names: From common clock binding to override the
- default output clock name
-- rockchip,system-power-controller: Telling whether or not this pmic is controlling
- the system power.
-- wakeup-source: Device can be used as a wakeup source.
-
-Optional RK805 properties:
-- vcc1-supply: The input supply for DCDC_REG1
-- vcc2-supply: The input supply for DCDC_REG2
-- vcc3-supply: The input supply for DCDC_REG3
-- vcc4-supply: The input supply for DCDC_REG4
-- vcc5-supply: The input supply for LDO_REG1 and LDO_REG2
-- vcc6-supply: The input supply for LDO_REG3
-
-Optional RK808 properties:
-- vcc1-supply: The input supply for DCDC_REG1
-- vcc2-supply: The input supply for DCDC_REG2
-- vcc3-supply: The input supply for DCDC_REG3
-- vcc4-supply: The input supply for DCDC_REG4
-- vcc6-supply: The input supply for LDO_REG1 and LDO_REG2
-- vcc7-supply: The input supply for LDO_REG3 and LDO_REG7
-- vcc8-supply: The input supply for SWITCH_REG1
-- vcc9-supply: The input supply for LDO_REG4 and LDO_REG5
-- vcc10-supply: The input supply for LDO_REG6
-- vcc11-supply: The input supply for LDO_REG8
-- vcc12-supply: The input supply for SWITCH_REG2
-- dvs-gpios: buck1/2 can be controlled by gpio dvs, this is GPIO specifiers
- for 2 host gpio's used for dvs. The format of the gpio specifier depends in
- the gpio controller. If DVS GPIOs aren't present, voltage changes will happen
- very quickly with no slow ramp time.
-
-Optional shared RK809 and RK817 properties:
-- vcc1-supply: The input supply for DCDC_REG1
-- vcc2-supply: The input supply for DCDC_REG2
-- vcc3-supply: The input supply for DCDC_REG3
-- vcc4-supply: The input supply for DCDC_REG4
-- vcc5-supply: The input supply for LDO_REG1, LDO_REG2, LDO_REG3
-- vcc6-supply: The input supply for LDO_REG4, LDO_REG5, LDO_REG6
-- vcc7-supply: The input supply for LDO_REG7, LDO_REG8, LDO_REG9
-
-Optional RK809 properties:
-- vcc8-supply: The input supply for SWITCH_REG1
-- vcc9-supply: The input supply for DCDC_REG5, SWITCH_REG2
-
-Optional RK817 properties:
-- clocks: The input clock for the audio codec
-- clock-names: The clock name for the codec clock. Should be "mclk".
-- #sound-dai-cells: Needed for the interpretation of sound dais. Should be 0.
-
-- vcc8-supply: The input supply for BOOST
-- vcc9-supply: The input supply for OTG_SWITCH
-- codec: The child node for the codec to hold additional properties.
- If no additional properties are required for the codec, this
- node can be omitted.
-
-- rockchip,mic-in-differential: Telling if the microphone uses differential
- mode. Should be under the codec child node.
-
-Optional RK818 properties:
-- vcc1-supply: The input supply for DCDC_REG1
-- vcc2-supply: The input supply for DCDC_REG2
-- vcc3-supply: The input supply for DCDC_REG3
-- vcc4-supply: The input supply for DCDC_REG4
-- boost-supply: The input supply for DCDC_BOOST
-- vcc6-supply: The input supply for LDO_REG1 and LDO_REG2
-- vcc7-supply: The input supply for LDO_REG3, LDO_REG5 and LDO_REG7
-- vcc8-supply: The input supply for LDO_REG4, LDO_REG6 and LDO_REG8
-- vcc9-supply: The input supply for LDO_REG9 and SWITCH_REG
-- h_5v-supply: The input supply for HDMI_SWITCH
-- usb-supply: The input supply for OTG_SWITCH
-
-Regulators: All the regulators of RK8XX to be instantiated shall be
-listed in a child node named 'regulators'. Each regulator is represented
-by a child node of the 'regulators' node.
-
- regulator-name {
- /* standard regulator bindings here */
- };
-
-Following regulators of the RK805 PMIC regulators are supported. Note that
-the 'n' in regulator name, as in DCDC_REGn or LDOn, represents the DCDC or LDO
-number as described in RK805 datasheet.
-
- - DCDC_REGn
- - valid values for n are 1 to 4.
- - LDO_REGn
- - valid values for n are 1 to 3
-
-Following regulators of the RK808 PMIC block are supported. Note that
-the 'n' in regulator name, as in DCDC_REGn or LDOn, represents the DCDC or LDO
-number as described in RK808 datasheet.
-
- - DCDC_REGn
- - valid values for n are 1 to 4.
- - LDO_REGn
- - valid values for n are 1 to 8.
- - SWITCH_REGn
- - valid values for n are 1 to 2
-
-Following regulators of the RK809 and RK817 PMIC blocks are supported. Note that
-the 'n' in regulator name, as in DCDC_REGn or LDOn, represents the DCDC or LDO
-number as described in RK809 and RK817 datasheets.
-
- - DCDC_REGn
- - valid values for n are 1 to 5 for RK809.
- - valid values for n are 1 to 4 for RK817.
- - LDO_REGn
- - valid values for n are 1 to 9 for RK809.
- - valid values for n are 1 to 9 for RK817.
- - SWITCH_REGn
- - valid values for n are 1 to 2 for RK809.
- - BOOST for RK817
- - OTG_SWITCH for RK817
-
-Following regulators of the RK818 PMIC block are supported. Note that
-the 'n' in regulator name, as in DCDC_REGn or LDOn, represents the DCDC or LDO
-number as described in RK818 datasheet.
-
- - DCDC_REGn
- - valid values for n are 1 to 4.
- - LDO_REGn
- - valid values for n are 1 to 9.
- - SWITCH_REG
- - HDMI_SWITCH
- - OTG_SWITCH
-
-It is necessary to configure three pins for both the RK809 and RK817, the three
-pins are "gpio_ts" "gpio_gt" "gpio_slp".
- The gpio_gt and gpio_ts pins support the gpio function.
- The gpio_slp pin is for controlling the pmic states, as below:
- - reset
- - power down
- - sleep
-
-Standard regulator bindings are used inside regulator subnodes. Check
- Documentation/devicetree/bindings/regulator/regulator.txt
-for more details
-
-Example:
- rk808: pmic@1b {
- compatible = "rockchip,rk808";
- clock-output-names = "xin32k", "rk808-clkout2";
- interrupt-parent = <&gpio0>;
- interrupts = <4 IRQ_TYPE_LEVEL_LOW>;
- pinctrl-names = "default";
- pinctrl-0 = <&pmic_int &dvs_1 &dvs_2>;
- dvs-gpios = <&gpio7 11 GPIO_ACTIVE_HIGH>,
- <&gpio7 15 GPIO_ACTIVE_HIGH>;
- reg = <0x1b>;
- rockchip,system-power-controller;
- wakeup-source;
- #clock-cells = <1>;
-
- vcc8-supply = <&vcc_18>;
- vcc9-supply = <&vcc_io>;
- vcc10-supply = <&vcc_io>;
- vcc12-supply = <&vcc_io>;
- vddio-supply = <&vccio_pmu>;
-
- regulators {
- vdd_cpu: DCDC_REG1 {
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <750000>;
- regulator-max-microvolt = <1300000>;
- regulator-name = "vdd_arm";
- };
-
- vdd_gpu: DCDC_REG2 {
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <850000>;
- regulator-max-microvolt = <1250000>;
- regulator-name = "vdd_gpu";
- };
-
- vcc_ddr: DCDC_REG3 {
- regulator-always-on;
- regulator-boot-on;
- regulator-name = "vcc_ddr";
- };
-
- vcc_io: DCDC_REG4 {
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- regulator-name = "vcc_io";
- };
-
- vccio_pmu: LDO_REG1 {
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- regulator-name = "vccio_pmu";
- };
-
- vcc_tp: LDO_REG2 {
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- regulator-name = "vcc_tp";
- };
-
- vdd_10: LDO_REG3 {
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <1000000>;
- regulator-max-microvolt = <1000000>;
- regulator-name = "vdd_10";
- };
-
- vcc18_lcd: LDO_REG4 {
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-name = "vcc18_lcd";
- };
-
- vccio_sd: LDO_REG5 {
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <3300000>;
- regulator-name = "vccio_sd";
- };
-
- vdd10_lcd: LDO_REG6 {
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <1000000>;
- regulator-max-microvolt = <1000000>;
- regulator-name = "vdd10_lcd";
- };
-
- vcc_18: LDO_REG7 {
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-name = "vcc_18";
- };
-
- vcca_codec: LDO_REG8 {
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- regulator-name = "vcca_codec";
- };
-
- vcc_wl: SWITCH_REG1 {
- regulator-always-on;
- regulator-boot-on;
- regulator-name = "vcc_wl";
- };
-
- vcc_lcd: SWITCH_REG2 {
- regulator-always-on;
- regulator-boot-on;
- regulator-name = "vcc_lcd";
- };
- };
- };
-
- rk817: pmic@20 {
- compatible = "rockchip,rk817";
- reg = <0x20>;
- interrupt-parent = <&gpio0>;
- interrupts = <RK_PB2 IRQ_TYPE_LEVEL_LOW>;
- clock-output-names = "rk808-clkout1", "xin32k";
- clock-names = "mclk";
- clocks = <&cru SCLK_I2S1_OUT>;
- pinctrl-names = "default";
- pinctrl-0 = <&pmic_int>, <&i2s1_2ch_mclk>;
- wakeup-source;
- #clock-cells = <1>;
- #sound-dai-cells = <0>;
-
- vcc1-supply = <&vccsys>;
- vcc2-supply = <&vccsys>;
- vcc3-supply = <&vccsys>;
- vcc4-supply = <&vccsys>;
- vcc5-supply = <&vccsys>;
- vcc6-supply = <&vccsys>;
- vcc7-supply = <&vccsys>;
-
- regulators {
- vdd_logic: DCDC_REG1 {
- regulator-name = "vdd_logic";
- regulator-min-microvolt = <950000>;
- regulator-max-microvolt = <1150000>;
- regulator-ramp-delay = <6001>;
- regulator-always-on;
- regulator-boot-on;
-
- regulator-state-mem {
- regulator-on-in-suspend;
- regulator-suspend-microvolt = <950000>;
- };
- };
-
- vdd_arm: DCDC_REG2 {
- regulator-name = "vdd_arm";
- regulator-min-microvolt = <950000>;
- regulator-max-microvolt = <1350000>;
- regulator-ramp-delay = <6001>;
- regulator-always-on;
- regulator-boot-on;
-
- regulator-state-mem {
- regulator-off-in-suspend;
- regulator-suspend-microvolt = <950000>;
- };
- };
-
- vcc_ddr: DCDC_REG3 {
- regulator-name = "vcc_ddr";
- regulator-always-on;
- regulator-boot-on;
-
- regulator-state-mem {
- regulator-on-in-suspend;
- };
- };
-
- vcc_3v3: DCDC_REG4 {
- regulator-name = "vcc_3v3";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- regulator-always-on;
- regulator-boot-on;
-
- regulator-state-mem {
- regulator-off-in-suspend;
- regulator-suspend-microvolt = <3300000>;
- };
- };
-
- vcc_1v8: LDO_REG2 {
- regulator-name = "vcc_1v8";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-always-on;
- regulator-boot-on;
-
- regulator-state-mem {
- regulator-on-in-suspend;
- regulator-suspend-microvolt = <1800000>;
- };
- };
-
- vdd_1v0: LDO_REG3 {
- regulator-name = "vdd_1v0";
- regulator-min-microvolt = <1000000>;
- regulator-max-microvolt = <1000000>;
- regulator-always-on;
- regulator-boot-on;
-
- regulator-state-mem {
- regulator-on-in-suspend;
- regulator-suspend-microvolt = <1000000>;
- };
- };
-
- vcc3v3_pmu: LDO_REG4 {
- regulator-name = "vcc3v3_pmu";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- regulator-always-on;
- regulator-boot-on;
-
- regulator-state-mem {
- regulator-on-in-suspend;
- regulator-suspend-microvolt = <3300000>;
- };
- };
-
- vccio_sd: LDO_REG5 {
- regulator-name = "vccio_sd";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <3300000>;
- regulator-always-on;
- regulator-boot-on;
-
- regulator-state-mem {
- regulator-on-in-suspend;
- regulator-suspend-microvolt = <3300000>;
- };
- };
-
- vcc_sd: LDO_REG6 {
- regulator-name = "vcc_sd";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- regulator-boot-on;
-
- regulator-state-mem {
- regulator-on-in-suspend;
- regulator-suspend-microvolt = <3300000>;
- };
- };
-
- vcc_bl: LDO_REG7 {
- regulator-name = "vcc_bl";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
-
- regulator-state-mem {
- regulator-off-in-suspend;
- regulator-suspend-microvolt = <3300000>;
- };
- };
-
- vcc_lcd: LDO_REG8 {
- regulator-name = "vcc_lcd";
- regulator-min-microvolt = <2800000>;
- regulator-max-microvolt = <2800000>;
-
- regulator-state-mem {
- regulator-off-in-suspend;
- regulator-suspend-microvolt = <2800000>;
- };
- };
-
- vcc_cam: LDO_REG9 {
- regulator-name = "vcc_cam";
- regulator-min-microvolt = <3000000>;
- regulator-max-microvolt = <3000000>;
-
- regulator-state-mem {
- regulator-off-in-suspend;
- regulator-suspend-microvolt = <3000000>;
- };
- };
- };
-
- rk817_codec: codec {
- rockchip,mic-in-differential;
- };
- };
diff --git a/Documentation/devicetree/bindings/mfd/rockchip,rk805.yaml b/Documentation/devicetree/bindings/mfd/rockchip,rk805.yaml
new file mode 100644
index 000000000000..4992f71b6fc3
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/rockchip,rk805.yaml
@@ -0,0 +1,219 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mfd/rockchip,rk805.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: RK805 Power Management Integrated Circuit
+
+maintainers:
+ - Chris Zhong <zyw@rock-chips.com>
+ - Zhang Qing <zhangqing@rock-chips.com>
+
+description: |
+ Rockchip RK805 series PMIC. This device consists of an i2c controlled MFD
+ that includes multiple switchable regulators.
+
+properties:
+ compatible:
+ enum:
+ - rockchip,rk805
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ '#clock-cells':
+ description:
+ See <dt-bindings/clock/rockchip,rk808.h> for clock IDs.
+ minimum: 0
+ maximum: 1
+
+ clock-output-names:
+ description:
+ From common clock binding to override the default output clock name.
+
+ gpio-controller: true
+
+ '#gpio-cells':
+ const: 2
+
+ rockchip,system-power-controller:
+ type: boolean
+ description:
+ Telling whether or not this PMIC is controlling the system power.
+
+ wakeup-source:
+ type: boolean
+ description:
+ Device can be used as a wakeup source.
+
+ vcc1-supply:
+ description:
+ The input supply for DCDC_REG1.
+
+ vcc2-supply:
+ description:
+ The input supply for DCDC_REG2.
+
+ vcc3-supply:
+ description:
+ The input supply for DCDC_REG3.
+
+ vcc4-supply:
+ description:
+ The input supply for DCDC_REG4.
+
+ vcc5-supply:
+ description:
+ The input supply for LDO_REG1 and LDO_REG2.
+
+ vcc6-supply:
+ description:
+ The input supply for LDO_REG3.
+
+ regulators:
+ type: object
+ patternProperties:
+ "^(DCDC_REG[1-4]|LDO_REG[1-3])$":
+ type: object
+ $ref: ../regulator/regulator.yaml#
+ unevaluatedProperties: false
+
+allOf:
+ - if:
+ properties:
+ '#clock-cells':
+ const: 0
+
+ then:
+ properties:
+ clock-output-names:
+ maxItems: 1
+
+ else:
+ properties:
+ clock-output-names:
+ maxItems: 2
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - "#clock-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/pinctrl/rockchip.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/gpio/gpio.h>
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pmic@18 {
+ compatible = "rockchip,rk805";
+ reg = <0x18>;
+ interrupt-parent = <&gpio2>;
+ interrupts = <RK_PA6 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pmic_int_l>;
+ rockchip,system-power-controller;
+ wakeup-source;
+ #clock-cells = <0>;
+
+ vcc1-supply = <&vcc_sys>;
+ vcc2-supply = <&vcc_sys>;
+ vcc3-supply = <&vcc_sys>;
+ vcc4-supply = <&vcc_sys>;
+ vcc5-supply = <&vcc_io>;
+ vcc6-supply = <&vcc_io>;
+
+ regulators {
+ vdd_logic: DCDC_REG1 {
+ regulator-name = "vdd_logic";
+ regulator-min-microvolt = <700000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1000000>;
+ };
+ };
+
+ vdd_arm: DCDC_REG2 {
+ regulator-name = "vdd_arm";
+ regulator-min-microvolt = <700000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <950000>;
+ };
+ };
+
+ vcc_ddr: DCDC_REG3 {
+ regulator-name = "vcc_ddr";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
+ };
+
+ vcc_io: DCDC_REG4 {
+ regulator-name = "vcc_io";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ };
+ };
+
+ vdd_18: LDO_REG1 {
+ regulator-name = "vdd_18";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vcc18_emmc: LDO_REG2 {
+ regulator-name = "vcc_18emmc";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vdd_11: LDO_REG3 {
+ regulator-name = "vdd_11";
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1100000>;
+ };
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mfd/rockchip,rk808.yaml b/Documentation/devicetree/bindings/mfd/rockchip,rk808.yaml
new file mode 100644
index 000000000000..f5908fa01a61
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/rockchip,rk808.yaml
@@ -0,0 +1,257 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mfd/rockchip,rk808.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: RK808 Power Management Integrated Circuit
+
+maintainers:
+ - Chris Zhong <zyw@rock-chips.com>
+ - Zhang Qing <zhangqing@rock-chips.com>
+
+description: |
+ Rockchip RK808 series PMIC. This device consists of an i2c controlled MFD
+ that includes regulators, an RTC, and a power button.
+
+properties:
+ compatible:
+ enum:
+ - rockchip,rk808
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ '#clock-cells':
+ description:
+ See <dt-bindings/clock/rockchip,rk808.h> for clock IDs.
+ const: 1
+
+ clock-output-names:
+ description:
+ From common clock binding to override the default output clock name.
+ maxItems: 2
+
+ rockchip,system-power-controller:
+ type: boolean
+ description:
+ Telling whether or not this PMIC is controlling the system power.
+
+ wakeup-source:
+ type: boolean
+ description:
+ Device can be used as a wakeup source.
+
+ vcc1-supply:
+ description:
+ The input supply for DCDC_REG1.
+
+ vcc2-supply:
+ description:
+ The input supply for DCDC_REG2.
+
+ vcc3-supply:
+ description:
+ The input supply for DCDC_REG3.
+
+ vcc4-supply:
+ description:
+ The input supply for DCDC_REG4.
+
+ vcc6-supply:
+ description:
+ The input supply for LDO_REG1 and LDO_REG2.
+
+ vcc7-supply:
+ description:
+ The input supply for LDO_REG3 and LDO_REG7.
+
+ vcc8-supply:
+ description:
+ The input supply for SWITCH_REG1.
+
+ vcc9-supply:
+ description:
+ The input supply for LDO_REG4 and LDO_REG5.
+
+ vcc10-supply:
+ description:
+ The input supply for LDO_REG6.
+
+ vcc11-supply:
+ description:
+ The input supply for LDO_REG8.
+
+ vcc12-supply:
+ description:
+ The input supply for SWITCH_REG2.
+
+ vddio-supply:
+ description:
+ The input supply for digital IO.
+
+ dvs-gpios:
+ description: |
+ buck1/2 can be controlled by gpio dvs, this is GPIO specifiers for
+ 2 host gpio's used for dvs. The format of the gpio specifier
+ depends in the gpio controller. If DVS GPIOs aren't present,
+ voltage changes will happen very quickly with no slow ramp time.
+ maxItems: 2
+
+ regulators:
+ type: object
+ patternProperties:
+ "^(DCDC_REG[1-4]|LDO_REG[1-8]|SWITCH_REG[1-2])$":
+ type: object
+ $ref: ../regulator/regulator.yaml#
+ unevaluatedProperties: false
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - "#clock-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/pinctrl/rockchip.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/gpio/gpio.h>
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ rk808: pmic@1b {
+ compatible = "rockchip,rk808";
+ clock-output-names = "xin32k", "rk808-clkout2";
+ interrupt-parent = <&gpio0>;
+ interrupts = <4 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pmic_int &dvs_1 &dvs_2>;
+ dvs-gpios = <&gpio7 11 GPIO_ACTIVE_HIGH>,
+ <&gpio7 15 GPIO_ACTIVE_HIGH>;
+ reg = <0x1b>;
+ rockchip,system-power-controller;
+ wakeup-source;
+ #clock-cells = <1>;
+
+ vcc8-supply = <&vcc_18>;
+ vcc9-supply = <&vcc_io>;
+ vcc10-supply = <&vcc_io>;
+ vcc12-supply = <&vcc_io>;
+ vddio-supply = <&vccio_pmu>;
+
+ regulators {
+ vdd_cpu: DCDC_REG1 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <1300000>;
+ regulator-name = "vdd_arm";
+ };
+
+ vdd_gpu: DCDC_REG2 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <1250000>;
+ regulator-name = "vdd_gpu";
+ };
+
+ vcc_ddr: DCDC_REG3 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-name = "vcc_ddr";
+ };
+
+ vcc_io: DCDC_REG4 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc_io";
+ };
+
+ vccio_pmu: LDO_REG1 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vccio_pmu";
+ };
+
+ vcc_tp: LDO_REG2 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc_tp";
+ };
+
+ vdd_10: LDO_REG3 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ regulator-name = "vdd_10";
+ };
+
+ vcc18_lcd: LDO_REG4 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc18_lcd";
+ };
+
+ vccio_sd: LDO_REG5 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vccio_sd";
+ };
+
+ vdd10_lcd: LDO_REG6 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ regulator-name = "vdd10_lcd";
+ };
+
+ vcc_18: LDO_REG7 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc_18";
+ };
+
+ vcca_codec: LDO_REG8 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcca_codec";
+ };
+
+ vcc_wl: SWITCH_REG1 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-name = "vcc_wl";
+ };
+
+ vcc_lcd: SWITCH_REG2 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-name = "vcc_lcd";
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mfd/rockchip,rk809.yaml b/Documentation/devicetree/bindings/mfd/rockchip,rk809.yaml
new file mode 100644
index 000000000000..7fb849ac74a7
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/rockchip,rk809.yaml
@@ -0,0 +1,284 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mfd/rockchip,rk809.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: RK809 Power Management Integrated Circuit
+
+maintainers:
+ - Chris Zhong <zyw@rock-chips.com>
+ - Zhang Qing <zhangqing@rock-chips.com>
+
+description: |
+ Rockchip RK809 series PMIC. This device consists of an i2c controlled MFD
+ that includes regulators, an RTC, and power button.
+
+properties:
+ compatible:
+ enum:
+ - rockchip,rk809
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ '#clock-cells':
+ description: |
+ See <dt-bindings/clock/rockchip,rk808.h> for clock IDs.
+ minimum: 0
+ maximum: 1
+
+ clock-output-names:
+ description:
+ From common clock binding to override the default output clock name.
+
+ rockchip,system-power-controller:
+ type: boolean
+ description:
+ Telling whether or not this PMIC is controlling the system power.
+
+ wakeup-source:
+ type: boolean
+ description:
+ Device can be used as a wakeup source.
+
+ vcc1-supply:
+ description:
+ The input supply for DCDC_REG1.
+
+ vcc2-supply:
+ description:
+ The input supply for DCDC_REG2.
+
+ vcc3-supply:
+ description:
+ The input supply for DCDC_REG3.
+
+ vcc4-supply:
+ description:
+ The input supply for DCDC_REG4.
+
+ vcc5-supply:
+ description:
+ The input supply for LDO_REG1, LDO_REG2, and LDO_REG3.
+
+ vcc6-supply:
+ description:
+ The input supply for LDO_REG4, LDO_REG5, and LDO_REG6.
+
+ vcc7-supply:
+ description:
+ The input supply for LDO_REG7, LDO_REG8, and LDO_REG9.
+
+ vcc8-supply:
+ description:
+ The input supply for SWITCH_REG1.
+
+ vcc9-supply:
+ description:
+ The input supply for DCDC_REG5 and SWITCH_REG2.
+
+ regulators:
+ type: object
+ patternProperties:
+ "^(LDO_REG[1-9]|DCDC_REG[1-5]|SWITCH_REG[1-2])$":
+ type: object
+ $ref: ../regulator/regulator.yaml#
+ unevaluatedProperties: false
+
+allOf:
+ - if:
+ properties:
+ '#clock-cells':
+ const: 0
+
+ then:
+ properties:
+ clock-output-names:
+ maxItems: 1
+
+ else:
+ properties:
+ clock-output-names:
+ maxItems: 2
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - "#clock-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/pinctrl/rockchip.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/gpio/gpio.h>
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ rk808: pmic@1b {
+ compatible = "rockchip,rk808";
+ reg = <0x1b>;
+ #clock-cells = <1>;
+ clock-output-names = "xin32k", "rk808-clkout2";
+ interrupt-parent = <&gpio3>;
+ interrupts = <10 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pmic_int_l_pin>;
+ rockchip,system-power-controller;
+ wakeup-source;
+
+ vcc1-supply = <&vcc_sysin>;
+ vcc2-supply = <&vcc_sysin>;
+ vcc3-supply = <&vcc_sysin>;
+ vcc4-supply = <&vcc_sysin>;
+ vcc6-supply = <&vcc_sysin>;
+ vcc7-supply = <&vcc_sysin>;
+ vcc8-supply = <&vcc3v3_sys>;
+ vcc9-supply = <&vcc_sysin>;
+ vcc10-supply = <&vcc_sysin>;
+ vcc11-supply = <&vcc_sysin>;
+ vcc12-supply = <&vcc3v3_sys>;
+
+ regulators {
+ vdd_center: DCDC_REG1 {
+ regulator-name = "vdd_center";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <6001>;
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_cpu_l: DCDC_REG2 {
+ regulator-name = "vdd_cpu_l";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <6001>;
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_ddr: DCDC_REG3 {
+ regulator-name = "vcc_ddr";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
+ };
+
+ vcc_1v8: vcc_wl: DCDC_REG4 {
+ regulator-name = "vcc_1v8";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vcc1v8_pmupll: LDO_REG3 {
+ regulator-name = "vcc1v8_pmupll";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vcc_sdio: LDO_REG4 {
+ regulator-name = "vcc_sdio";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3000000>;
+ };
+ };
+
+ vcca3v0_codec: LDO_REG5 {
+ regulator-name = "vcca3v0_codec";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_1v5: LDO_REG6 {
+ regulator-name = "vcc_1v5";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <1500000>;
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1500000>;
+ };
+ };
+
+ vcca1v8_codec: LDO_REG7 {
+ regulator-name = "vcca1v8_codec";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_3v0: LDO_REG8 {
+ regulator-name = "vcc_3v0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3000000>;
+ };
+ };
+
+ vcc3v3_s3: SWITCH_REG1 {
+ regulator-name = "vcc3v3_s3";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc3v3_s0: SWITCH_REG2 {
+ regulator-name = "vcc3v3_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mfd/rockchip,rk817.yaml b/Documentation/devicetree/bindings/mfd/rockchip,rk817.yaml
new file mode 100644
index 000000000000..bfc1720adc43
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/rockchip,rk817.yaml
@@ -0,0 +1,330 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mfd/rockchip,rk817.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: RK817 Power Management Integrated Circuit
+
+maintainers:
+ - Chris Zhong <zyw@rock-chips.com>
+ - Zhang Qing <zhangqing@rock-chips.com>
+
+description: |
+ Rockchip RK817 series PMIC. This device consists of an i2c controlled MFD
+ that includes regulators, an RTC, a power button, an audio codec, and a
+ battery charger manager.
+
+properties:
+ compatible:
+ enum:
+ - rockchip,rk817
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ '#clock-cells':
+ description:
+ See <dt-bindings/clock/rockchip,rk808.h> for clock IDs.
+ minimum: 0
+ maximum: 1
+
+ clock-output-names:
+ description:
+ From common clock binding to override the default output clock name.
+
+ rockchip,system-power-controller:
+ type: boolean
+ description:
+ Telling whether or not this PMIC is controlling the system power.
+
+ wakeup-source:
+ type: boolean
+ description:
+ Device can be used as a wakeup source.
+
+ vcc1-supply:
+ description:
+ The input supply for DCDC_REG1.
+
+ vcc2-supply:
+ description:
+ The input supply for DCDC_REG2.
+
+ vcc3-supply:
+ description:
+ The input supply for DCDC_REG3.
+
+ vcc4-supply:
+ description:
+ The input supply for DCDC_REG4.
+
+ vcc5-supply:
+ description:
+ The input supply for LDO_REG1, LDO_REG2, and LDO_REG3.
+
+ vcc6-supply:
+ description:
+ The input supply for LDO_REG4, LDO_REG5, and LDO_REG6.
+
+ vcc7-supply:
+ description:
+ The input supply for LDO_REG7, LDO_REG8, and LDO_REG9.
+
+ vcc8-supply:
+ description:
+ The input supply for BOOST.
+
+ vcc9-supply:
+ description:
+ The input supply for OTG_SWITCH.
+
+ regulators:
+ type: object
+ patternProperties:
+ "^(LDO_REG[1-9]|DCDC_REG[1-4]|BOOST|OTG_SWITCH)$":
+ type: object
+ $ref: ../regulator/regulator.yaml#
+ unevaluatedProperties: false
+
+ clocks:
+ description:
+ The input clock for the audio codec.
+
+ clock-names:
+ description:
+ The clock name for the codec clock.
+ items:
+ - const: mclk
+
+ '#sound-dai-cells':
+ description:
+ Needed for the interpretation of sound dais.
+ const: 0
+
+ codec:
+ description: |
+ The child node for the codec to hold additional properties. If no
+ additional properties are required for the codec, this node can be
+ omitted.
+ type: object
+ properties:
+ rockchip,mic-in-differential:
+ type: boolean
+ description:
+ Describes if the microphone uses differential mode.
+
+allOf:
+ - if:
+ properties:
+ '#clock-cells':
+ const: 0
+
+ then:
+ properties:
+ clock-output-names:
+ maxItems: 1
+
+ else:
+ properties:
+ clock-output-names:
+ maxItems: 2
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - "#clock-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/px30-cru.h>
+ #include <dt-bindings/pinctrl/rockchip.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/gpio/gpio.h>
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ rk817: pmic@20 {
+ compatible = "rockchip,rk817";
+ reg = <0x20>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <RK_PB2 IRQ_TYPE_LEVEL_LOW>;
+ clock-output-names = "rk808-clkout1", "xin32k";
+ clock-names = "mclk";
+ clocks = <&cru SCLK_I2S1_OUT>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pmic_int>, <&i2s1_2ch_mclk>;
+ wakeup-source;
+ #clock-cells = <1>;
+ #sound-dai-cells = <0>;
+
+ vcc1-supply = <&vccsys>;
+ vcc2-supply = <&vccsys>;
+ vcc3-supply = <&vccsys>;
+ vcc4-supply = <&vccsys>;
+ vcc5-supply = <&vccsys>;
+ vcc6-supply = <&vccsys>;
+ vcc7-supply = <&vccsys>;
+
+ regulators {
+ vdd_logic: DCDC_REG1 {
+ regulator-name = "vdd_logic";
+ regulator-min-microvolt = <950000>;
+ regulator-max-microvolt = <1150000>;
+ regulator-ramp-delay = <6001>;
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <950000>;
+ };
+ };
+
+ vdd_arm: DCDC_REG2 {
+ regulator-name = "vdd_arm";
+ regulator-min-microvolt = <950000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <6001>;
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <950000>;
+ };
+ };
+
+ vcc_ddr: DCDC_REG3 {
+ regulator-name = "vcc_ddr";
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
+ };
+
+ vcc_3v3: DCDC_REG4 {
+ regulator-name = "vcc_3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ };
+ };
+
+ vcc_1v8: LDO_REG2 {
+ regulator-name = "vcc_1v8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vdd_1v0: LDO_REG3 {
+ regulator-name = "vdd_1v0";
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1000000>;
+ };
+ };
+
+ vcc3v3_pmu: LDO_REG4 {
+ regulator-name = "vcc3v3_pmu";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ };
+ };
+
+ vccio_sd: LDO_REG5 {
+ regulator-name = "vccio_sd";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ };
+ };
+
+ vcc_sd: LDO_REG6 {
+ regulator-name = "vcc_sd";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ };
+ };
+
+ vcc_bl: LDO_REG7 {
+ regulator-name = "vcc_bl";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ };
+ };
+
+ vcc_lcd: LDO_REG8 {
+ regulator-name = "vcc_lcd";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <2800000>;
+ };
+ };
+
+ vcc_cam: LDO_REG9 {
+ regulator-name = "vcc_cam";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <3000000>;
+ };
+ };
+ };
+
+ rk817_codec: codec {
+ rockchip,mic-in-differential;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mfd/rockchip,rk818.yaml b/Documentation/devicetree/bindings/mfd/rockchip,rk818.yaml
new file mode 100644
index 000000000000..b57c4b005cf4
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/rockchip,rk818.yaml
@@ -0,0 +1,282 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mfd/rockchip,rk818.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: RK818 Power Management Integrated Circuit
+
+maintainers:
+ - Chris Zhong <zyw@rock-chips.com>
+ - Zhang Qing <zhangqing@rock-chips.com>
+
+description: |
+ Rockchip RK818 series PMIC. This device consists of an i2c controlled MFD
+ that includes regulators, an RTC, and a power button.
+
+properties:
+ compatible:
+ enum:
+ - rockchip,rk818
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ '#clock-cells':
+ description: |
+ See <dt-bindings/clock/rockchip,rk808.h> for clock IDs.
+ const: 1
+
+ clock-output-names:
+ description:
+ From common clock binding to override the default output clock name.
+ maxItems: 2
+
+ rockchip,system-power-controller:
+ type: boolean
+ description:
+ Telling whether or not this PMIC is controlling the system power.
+
+ wakeup-source:
+ type: boolean
+ description:
+ Device can be used as a wakeup source.
+
+ vcc1-supply:
+ description:
+ The input supply for DCDC_REG1.
+
+ vcc2-supply:
+ description:
+ The input supply for DCDC_REG2.
+
+ vcc3-supply:
+ description:
+ The input supply for DCDC_REG3.
+
+ vcc4-supply:
+ description:
+ The input supply for DCDC_REG4.
+
+ boost-supply:
+ description:
+ The input supply for DCDC_BOOST
+
+ vcc6-supply:
+ description:
+ The input supply for LDO_REG1 and LDO_REG2.
+
+ vcc7-supply:
+ description:
+ The input supply for LDO_REG3, LDO_REG5, and LDO_REG7.
+
+ vcc8-supply:
+ description:
+ The input supply for LDO_REG4, LDO_REG6, and LDO_REG8.
+
+ vcc9-supply:
+ description:
+ The input supply for LDO_REG9 and SWITCH_REG.
+
+ vddio-supply:
+ description:
+ The input supply for digital IO.
+
+ h_5v-supply:
+ description:
+ The input supply for HDMI_SWITCH.
+
+ usb-supply:
+ description:
+ The input supply for OTG_SWITCH.
+
+ regulators:
+ type: object
+ patternProperties:
+ "^(DCDC_REG[1-4]|DCDC_BOOST|LDO_REG[1-9]|SWITCH_REG|HDMI_SWITCH|OTG_SWITCH)$":
+ type: object
+ $ref: ../regulator/regulator.yaml#
+ unevaluatedProperties: false
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - "#clock-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/px30-cru.h>
+ #include <dt-bindings/pinctrl/rockchip.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/gpio/gpio.h>
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ rk818: pmic@1c {
+ compatible = "rockchip,rk818";
+ reg = <0x1c>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <4 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pmic_int>;
+ rockchip,system-power-controller;
+ wakeup-source;
+ #clock-cells = <1>;
+
+ vcc1-supply = <&vdd_sys>;
+ vcc2-supply = <&vdd_sys>;
+ vcc3-supply = <&vdd_sys>;
+ vcc4-supply = <&vdd_sys>;
+ boost-supply = <&vdd_in_otg_out>;
+ vcc6-supply = <&vdd_sys>;
+ vcc7-supply = <&vdd_misc_1v8>;
+ vcc8-supply = <&vdd_misc_1v8>;
+ vcc9-supply = <&vdd_3v3_io>;
+ vddio-supply = <&vdd_3v3_io>;
+
+ regulators {
+ vdd_log: DCDC_REG1 {
+ regulator-name = "vdd_log";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_gpu: DCDC_REG2 {
+ regulator-name = "vdd_gpu";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1250000>;
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1000000>;
+ };
+ };
+
+ vcc_ddr: DCDC_REG3 {
+ regulator-name = "vcc_ddr";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
+ };
+
+ vdd_3v3_io: DCDC_REG4 {
+ regulator-name = "vdd_3v3_io";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ };
+ };
+
+ vdd_sys: DCDC_BOOST {
+ regulator-name = "vdd_sys";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <5000000>;
+ };
+ };
+
+ vdd_sd: SWITCH_REG {
+ regulator-name = "vdd_sd";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_eth_2v5: LDO_REG2 {
+ regulator-name = "vdd_eth_2v5";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <2500000>;
+ regulator-max-microvolt = <2500000>;
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <2500000>;
+ };
+ };
+
+ vdd_1v0: LDO_REG3 {
+ regulator-name = "vdd_1v0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1000000>;
+ };
+ };
+
+ vdd_1v8_lcd_ldo: LDO_REG4 {
+ regulator-name = "vdd_1v8_lcd_ldo";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vdd_1v0_lcd: LDO_REG6 {
+ regulator-name = "vdd_1v0_lcd";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1000000>;
+ };
+ };
+
+ vdd_1v8_ldo: LDO_REG7 {
+ regulator-name = "vdd_1v8_ldo";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vdd_io_sd: LDO_REG9 {
+ regulator-name = "vdd_io_sd";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mfd/samsung,exynos5433-lpass.yaml b/Documentation/devicetree/bindings/mfd/samsung,exynos5433-lpass.yaml
index f7bb67d10eff..b97b06848729 100644
--- a/Documentation/devicetree/bindings/mfd/samsung,exynos5433-lpass.yaml
+++ b/Documentation/devicetree/bindings/mfd/samsung,exynos5433-lpass.yaml
@@ -79,8 +79,8 @@ examples:
clocks = <&cmu_aud CLK_ACLK_DMAC>;
clock-names = "apb_pclk";
#dma-cells = <1>;
- #dma-channels = <8>;
- #dma-requests = <32>;
+ dma-channels = <8>;
+ dma-requests = <32>;
power-domains = <&pd_aud>;
};
diff --git a/Documentation/devicetree/bindings/mfd/syscon.yaml b/Documentation/devicetree/bindings/mfd/syscon.yaml
index 13baa452cc9d..fb784045013f 100644
--- a/Documentation/devicetree/bindings/mfd/syscon.yaml
+++ b/Documentation/devicetree/bindings/mfd/syscon.yaml
@@ -100,12 +100,4 @@ examples:
compatible = "allwinner,sun8i-h3-system-controller", "syscon";
reg = <0x01c00000 0x1000>;
};
-
- - |
- gpr: iomuxc-gpr@20e0000 {
- compatible = "fsl,imx6q-iomuxc-gpr", "syscon";
- reg = <0x020e0000 0x38>;
- hwlocks = <&hwlock1 1>;
- };
-
...
diff --git a/Documentation/devicetree/bindings/mfd/wlf,arizona.yaml b/Documentation/devicetree/bindings/mfd/wlf,arizona.yaml
index 9e762d474218..ea3337dafaf5 100644
--- a/Documentation/devicetree/bindings/mfd/wlf,arizona.yaml
+++ b/Documentation/devicetree/bindings/mfd/wlf,arizona.yaml
@@ -14,6 +14,7 @@ description: |
range of analogue I/O.
allOf:
+ - $ref: /schemas/spi/spi-peripheral-props.yaml
- $ref: /schemas/sound/wlf,arizona.yaml#
- $ref: /schemas/regulator/wlf,arizona.yaml#
- $ref: /schemas/extcon/wlf,arizona.yaml#
diff --git a/Documentation/devicetree/bindings/mtd/ingenic,nand.yaml b/Documentation/devicetree/bindings/mtd/ingenic,nand.yaml
index 9de8ef6e59ca..8c272c842bfd 100644
--- a/Documentation/devicetree/bindings/mtd/ingenic,nand.yaml
+++ b/Documentation/devicetree/bindings/mtd/ingenic,nand.yaml
@@ -11,6 +11,7 @@ maintainers:
allOf:
- $ref: nand-controller.yaml#
+ - $ref: /schemas/memory-controllers/ingenic,nemc-peripherals.yaml#
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/mtd/spi-nand.yaml b/Documentation/devicetree/bindings/mtd/spi-nand.yaml
index 431faac518a4..dd3cd1d53009 100644
--- a/Documentation/devicetree/bindings/mtd/spi-nand.yaml
+++ b/Documentation/devicetree/bindings/mtd/spi-nand.yaml
@@ -11,6 +11,7 @@ maintainers:
allOf:
- $ref: "nand-chip.yaml#"
+ - $ref: /schemas/spi/spi-peripheral-props.yaml#
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/mux/reg-mux.yaml b/Documentation/devicetree/bindings/mux/reg-mux.yaml
index 3b030b8fb47c..dfd9ea582bb7 100644
--- a/Documentation/devicetree/bindings/mux/reg-mux.yaml
+++ b/Documentation/devicetree/bindings/mux/reg-mux.yaml
@@ -100,7 +100,6 @@ examples:
#include <dt-bindings/mux/mux.h>
syscon@1000 {
- compatible = "fsl,imx7d-iomuxc-gpr", "fsl,imx6q-iomuxc-gpr", "syscon", "simple-mfd";
reg = <0x1000 0x100>;
mux2: mux-controller {
diff --git a/Documentation/devicetree/bindings/net/adi,adin.yaml b/Documentation/devicetree/bindings/net/adi,adin.yaml
index 77750df0c2c4..929cf8c0b0fd 100644
--- a/Documentation/devicetree/bindings/net/adi,adin.yaml
+++ b/Documentation/devicetree/bindings/net/adi,adin.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Analog Devices ADIN1200/ADIN1300 PHY
maintainers:
- - Alexandru Ardelean <alexandru.ardelean@analog.com>
+ - Alexandru Tachici <alexandru.tachici@analog.com>
description: |
Bindings for Analog Devices Industrial Ethernet PHYs
@@ -37,7 +37,8 @@ properties:
default: 8
adi,phy-output-clock:
- description: Select clock output on GP_CLK pin. Two clocks are available:
+ description: |
+ Select clock output on GP_CLK pin. Two clocks are available:
A 25MHz reference and a free-running 125MHz.
The phy can alternatively automatically switch between the reference and
the 125MHz clocks based on its internal state.
diff --git a/Documentation/devicetree/bindings/net/cdns,macb.yaml b/Documentation/devicetree/bindings/net/cdns,macb.yaml
index 337cec4d85ca..86fc31c2d91b 100644
--- a/Documentation/devicetree/bindings/net/cdns,macb.yaml
+++ b/Documentation/devicetree/bindings/net/cdns,macb.yaml
@@ -191,7 +191,6 @@ examples:
clock-names = "pclk", "hclk", "tx_clk", "rx_clk", "tsu_clk";
#address-cells = <1>;
#size-cells = <0>;
- #stream-id-cells = <1>;
iommus = <&smmu 0x875>;
power-domains = <&zynqmp_firmware PD_ETH_1>;
resets = <&zynqmp_reset ZYNQMP_RESET_GEM1>;
diff --git a/Documentation/devicetree/bindings/net/dsa/brcm,b53.yaml b/Documentation/devicetree/bindings/net/dsa/brcm,b53.yaml
index c3c938893ad9..23114d691d2a 100644
--- a/Documentation/devicetree/bindings/net/dsa/brcm,b53.yaml
+++ b/Documentation/devicetree/bindings/net/dsa/brcm,b53.yaml
@@ -6,9 +6,6 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Broadcom BCM53xx Ethernet switches
-allOf:
- - $ref: dsa.yaml#
-
maintainers:
- Florian Fainelli <f.fainelli@gmail.com>
@@ -68,53 +65,71 @@ required:
- compatible
- reg
-# BCM585xx/586xx/88312 SoCs
-if:
- properties:
- compatible:
- contains:
- enum:
- - brcm,bcm58522-srab
- - brcm,bcm58523-srab
- - brcm,bcm58525-srab
- - brcm,bcm58622-srab
- - brcm,bcm58623-srab
- - brcm,bcm58625-srab
- - brcm,bcm88312-srab
-then:
- properties:
- reg:
- minItems: 3
- maxItems: 3
- reg-names:
- items:
- - const: srab
- - const: mux_config
- - const: sgmii_config
- interrupts:
- minItems: 13
- maxItems: 13
- interrupt-names:
- items:
- - const: link_state_p0
- - const: link_state_p1
- - const: link_state_p2
- - const: link_state_p3
- - const: link_state_p4
- - const: link_state_p5
- - const: link_state_p7
- - const: link_state_p8
- - const: phy
- - const: ts
- - const: imp_sleep_timer_p5
- - const: imp_sleep_timer_p7
- - const: imp_sleep_timer_p8
- required:
- - interrupts
-else:
- properties:
- reg:
- maxItems: 1
+allOf:
+ - $ref: dsa.yaml#
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - brcm,bcm5325
+ - brcm,bcm53115
+ - brcm,bcm53125
+ - brcm,bcm53128
+ - brcm,bcm5365
+ - brcm,bcm5395
+ - brcm,bcm5397
+ - brcm,bcm5398
+ then:
+ $ref: /schemas/spi/spi-peripheral-props.yaml
+
+ # BCM585xx/586xx/88312 SoCs
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - brcm,bcm58522-srab
+ - brcm,bcm58523-srab
+ - brcm,bcm58525-srab
+ - brcm,bcm58622-srab
+ - brcm,bcm58623-srab
+ - brcm,bcm58625-srab
+ - brcm,bcm88312-srab
+ then:
+ properties:
+ reg:
+ minItems: 3
+ maxItems: 3
+ reg-names:
+ items:
+ - const: srab
+ - const: mux_config
+ - const: sgmii_config
+ interrupts:
+ minItems: 13
+ maxItems: 13
+ interrupt-names:
+ items:
+ - const: link_state_p0
+ - const: link_state_p1
+ - const: link_state_p2
+ - const: link_state_p3
+ - const: link_state_p4
+ - const: link_state_p5
+ - const: link_state_p7
+ - const: link_state_p8
+ - const: phy
+ - const: ts
+ - const: imp_sleep_timer_p5
+ - const: imp_sleep_timer_p7
+ - const: imp_sleep_timer_p8
+ required:
+ - interrupts
+ else:
+ properties:
+ reg:
+ maxItems: 1
unevaluatedProperties: false
diff --git a/Documentation/devicetree/bindings/net/dsa/microchip,ksz.yaml b/Documentation/devicetree/bindings/net/dsa/microchip,ksz.yaml
index 184152087b60..6bbd8145b6c1 100644
--- a/Documentation/devicetree/bindings/net/dsa/microchip,ksz.yaml
+++ b/Documentation/devicetree/bindings/net/dsa/microchip,ksz.yaml
@@ -12,6 +12,7 @@ maintainers:
allOf:
- $ref: dsa.yaml#
+ - $ref: /schemas/spi/spi-peripheral-props.yaml#
properties:
# See Documentation/devicetree/bindings/net/dsa/dsa.yaml for a list of additional
diff --git a/Documentation/devicetree/bindings/net/dsa/nxp,sja1105.yaml b/Documentation/devicetree/bindings/net/dsa/nxp,sja1105.yaml
index 1ea0bd490473..1e26d876d146 100644
--- a/Documentation/devicetree/bindings/net/dsa/nxp,sja1105.yaml
+++ b/Documentation/devicetree/bindings/net/dsa/nxp,sja1105.yaml
@@ -14,6 +14,7 @@ description:
allOf:
- $ref: "dsa.yaml#"
+ - $ref: /schemas/spi/spi-peripheral-props.yaml#
maintainers:
- Vladimir Oltean <vladimir.oltean@nxp.com>
diff --git a/Documentation/devicetree/bindings/net/dsa/realtek.yaml b/Documentation/devicetree/bindings/net/dsa/realtek.yaml
index 99ee4b5b9346..4f99aff029dc 100644
--- a/Documentation/devicetree/bindings/net/dsa/realtek.yaml
+++ b/Documentation/devicetree/bindings/net/dsa/realtek.yaml
@@ -108,6 +108,7 @@ if:
- reg
then:
+ $ref: /schemas/spi/spi-peripheral-props.yaml#
not:
required:
- mdc-gpios
diff --git a/Documentation/devicetree/bindings/net/mediatek,net.yaml b/Documentation/devicetree/bindings/net/mediatek,net.yaml
index 699164dd1295..f5564ecddb62 100644
--- a/Documentation/devicetree/bindings/net/mediatek,net.yaml
+++ b/Documentation/devicetree/bindings/net/mediatek,net.yaml
@@ -27,6 +27,9 @@ properties:
reg:
maxItems: 1
+ clocks: true
+ clock-names: true
+
interrupts:
minItems: 3
maxItems: 4
diff --git a/Documentation/devicetree/bindings/net/mediatek-dwmac.yaml b/Documentation/devicetree/bindings/net/mediatek-dwmac.yaml
index 901944683322..61b2fb9e141b 100644
--- a/Documentation/devicetree/bindings/net/mediatek-dwmac.yaml
+++ b/Documentation/devicetree/bindings/net/mediatek-dwmac.yaml
@@ -58,6 +58,9 @@ properties:
- const: rmii_internal
- const: mac_cg
+ power-domains:
+ maxItems: 1
+
mediatek,pericfg:
$ref: /schemas/types.yaml#/definitions/phandle
description:
diff --git a/Documentation/devicetree/bindings/net/wireless/mediatek,mt76.yaml b/Documentation/devicetree/bindings/net/wireless/mediatek,mt76.yaml
index 249967d8d750..5a12dc32288a 100644
--- a/Documentation/devicetree/bindings/net/wireless/mediatek,mt76.yaml
+++ b/Documentation/devicetree/bindings/net/wireless/mediatek,mt76.yaml
@@ -51,7 +51,7 @@ properties:
description:
Specify the consys reset for mt7986.
- reset-name:
+ reset-names:
const: consys
mediatek,infracfg:
diff --git a/Documentation/devicetree/bindings/nvmem/apple,efuses.yaml b/Documentation/devicetree/bindings/nvmem/apple,efuses.yaml
new file mode 100644
index 000000000000..5ec8f2bdb3a5
--- /dev/null
+++ b/Documentation/devicetree/bindings/nvmem/apple,efuses.yaml
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/nvmem/apple,efuses.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Apple SoC eFuse-based NVMEM
+
+description: |
+ Apple SoCs such as the M1 contain factory-programmed eFuses used to e.g. store
+ calibration data for the PCIe and the Type-C PHY or unique chip identifiers
+ such as the ECID.
+
+maintainers:
+ - Sven Peter <sven@svenpeter.dev>
+
+allOf:
+ - $ref: "nvmem.yaml#"
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - apple,t8103-efuses
+ - apple,t6000-efuses
+ - const: apple,efuses
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ efuse@3d2bc000 {
+ compatible = "apple,t8103-efuses", "apple,efuses";
+ reg = <0x3d2bc000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ ecid: efuse@500 {
+ reg = <0x500 0x8>;
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/nvmem/fsl,layerscape-sfp.yaml b/Documentation/devicetree/bindings/nvmem/fsl,layerscape-sfp.yaml
index 80914b93638e..3b4e6e94cb81 100644
--- a/Documentation/devicetree/bindings/nvmem/fsl,layerscape-sfp.yaml
+++ b/Documentation/devicetree/bindings/nvmem/fsl,layerscape-sfp.yaml
@@ -10,7 +10,7 @@ maintainers:
- Michael Walle <michael@walle.cc>
description: |
- SFP is the security fuse processor which among other things provide a
+ SFP is the security fuse processor which among other things provides a
unique identifier per part.
allOf:
@@ -18,21 +18,45 @@ allOf:
properties:
compatible:
- enum:
- - fsl,ls1028a-sfp
+ oneOf:
+ - description: Trust architecture 2.1 SFP
+ items:
+ - const: fsl,ls1021a-sfp
+ - description: Trust architecture 3.0 SFP
+ items:
+ - const: fsl,ls1028a-sfp
reg:
maxItems: 1
+ clocks:
+ maxItems: 1
+ description:
+ The SFP clock. Typically, this is the platform clock divided by 4.
+
+ clock-names:
+ const: sfp
+
+ ta-prog-sfp-supply:
+ description:
+ The regulator for the TA_PROG_SFP pin. It will be enabled for programming
+ and disabled for reading.
+
required:
- compatible
- reg
+ - clock-names
+ - clocks
unevaluatedProperties: false
examples:
- |
+ #include <dt-bindings/clock/fsl,qoriq-clockgen.h>
efuse@1e80000 {
compatible = "fsl,ls1028a-sfp";
reg = <0x1e80000 0x8000>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(4)>;
+ clock-names = "sfp";
};
diff --git a/Documentation/devicetree/bindings/opp/opp-v2-kryo-cpu.yaml b/Documentation/devicetree/bindings/opp/opp-v2-kryo-cpu.yaml
index 8c2e9ac5f68d..30f7b596d609 100644
--- a/Documentation/devicetree/bindings/opp/opp-v2-kryo-cpu.yaml
+++ b/Documentation/devicetree/bindings/opp/opp-v2-kryo-cpu.yaml
@@ -17,10 +17,10 @@ description: |
the CPU frequencies subset and voltage value of each OPP varies based on
the silicon variant in use.
Qualcomm Technologies, Inc. Process Voltage Scaling Tables
- defines the voltage and frequency value based on the msm-id in SMEM
- and speedbin blown in the efuse combination.
- The qcom-cpufreq-nvmem driver reads the msm-id and efuse value from the SoC
- to provide the OPP framework with required information (existing HW bitmap).
+ defines the voltage and frequency value based on the speedbin blown in
+ the efuse combination.
+ The qcom-cpufreq-nvmem driver reads the efuse value from the SoC to provide
+ the OPP framework with required information (existing HW bitmap).
This is used to determine the voltage and frequency value for each OPP of
operating-points-v2 table when it is parsed by the OPP framework.
@@ -50,15 +50,11 @@ patternProperties:
description: |
A single 32 bit bitmap value, representing compatible HW.
Bitmap:
- 0: MSM8996 V3, speedbin 0
- 1: MSM8996 V3, speedbin 1
- 2: MSM8996 V3, speedbin 2
- 3: unused
- 4: MSM8996 SG, speedbin 0
- 5: MSM8996 SG, speedbin 1
- 6: MSM8996 SG, speedbin 2
- 7-31: unused
- maximum: 0x77
+ 0: MSM8996, speedbin 0
+ 1: MSM8996, speedbin 1
+ 2: MSM8996, speedbin 2
+ 3-31: unused
+ maximum: 0x7
clock-latency-ns: true
@@ -184,19 +180,19 @@ examples:
opp-307200000 {
opp-hz = /bits/ 64 <307200000>;
opp-microvolt = <905000 905000 1140000>;
- opp-supported-hw = <0x77>;
+ opp-supported-hw = <0x7>;
clock-latency-ns = <200000>;
};
- opp-1593600000 {
- opp-hz = /bits/ 64 <1593600000>;
+ opp-1401600000 {
+ opp-hz = /bits/ 64 <1401600000>;
opp-microvolt = <1140000 905000 1140000>;
- opp-supported-hw = <0x71>;
+ opp-supported-hw = <0x5>;
clock-latency-ns = <200000>;
};
- opp-2188800000 {
- opp-hz = /bits/ 64 <2188800000>;
+ opp-1593600000 {
+ opp-hz = /bits/ 64 <1593600000>;
opp-microvolt = <1140000 905000 1140000>;
- opp-supported-hw = <0x10>;
+ opp-supported-hw = <0x1>;
clock-latency-ns = <200000>;
};
};
@@ -209,25 +205,25 @@ examples:
opp-307200000 {
opp-hz = /bits/ 64 <307200000>;
opp-microvolt = <905000 905000 1140000>;
- opp-supported-hw = <0x77>;
+ opp-supported-hw = <0x7>;
clock-latency-ns = <200000>;
};
- opp-1593600000 {
- opp-hz = /bits/ 64 <1593600000>;
+ opp-1804800000 {
+ opp-hz = /bits/ 64 <1804800000>;
opp-microvolt = <1140000 905000 1140000>;
- opp-supported-hw = <0x70>;
+ opp-supported-hw = <0x6>;
clock-latency-ns = <200000>;
};
- opp-2150400000 {
- opp-hz = /bits/ 64 <2150400000>;
+ opp-1900800000 {
+ opp-hz = /bits/ 64 <1900800000>;
opp-microvolt = <1140000 905000 1140000>;
- opp-supported-hw = <0x31>;
+ opp-supported-hw = <0x4>;
clock-latency-ns = <200000>;
};
- opp-2342400000 {
- opp-hz = /bits/ 64 <2342400000>;
+ opp-2150400000 {
+ opp-hz = /bits/ 64 <2150400000>;
opp-microvolt = <1140000 905000 1140000>;
- opp-supported-hw = <0x10>;
+ opp-supported-hw = <0x1>;
clock-latency-ns = <200000>;
};
};
diff --git a/Documentation/devicetree/bindings/pci/apple,pcie.yaml b/Documentation/devicetree/bindings/pci/apple,pcie.yaml
index daf602ac0d0f..aa38680aaaca 100644
--- a/Documentation/devicetree/bindings/pci/apple,pcie.yaml
+++ b/Documentation/devicetree/bindings/pci/apple,pcie.yaml
@@ -68,6 +68,9 @@ properties:
iommu-map: true
iommu-map-mask: true
+ power-domains:
+ maxItems: 1
+
required:
- compatible
- reg
@@ -134,7 +137,7 @@ examples:
ranges = <0x43000000 0x6 0xa0000000 0x6 0xa0000000 0x0 0x20000000>,
<0x02000000 0x0 0xc0000000 0x6 0xc0000000 0x0 0x40000000>;
- power-domains = <&ps_apcie>, <&ps_apcie_gp>, <&ps_pcie_ref>;
+ power-domains = <&ps_apcie_gp>;
pinctrl-0 = <&pcie_pins>;
pinctrl-names = "default";
diff --git a/Documentation/devicetree/bindings/pci/layerscape-pci.txt b/Documentation/devicetree/bindings/pci/layerscape-pci.txt
index f36efa73a470..ee8a4791a78b 100644
--- a/Documentation/devicetree/bindings/pci/layerscape-pci.txt
+++ b/Documentation/devicetree/bindings/pci/layerscape-pci.txt
@@ -23,6 +23,7 @@ Required properties:
"fsl,ls1012a-pcie"
"fsl,ls1028a-pcie"
EP mode:
+ "fsl,ls1028a-pcie-ep", "fsl,ls-pcie-ep"
"fsl,ls1046a-pcie-ep", "fsl,ls-pcie-ep"
"fsl,ls1088a-pcie-ep", "fsl,ls-pcie-ep"
"fsl,ls2088a-pcie-ep", "fsl,ls-pcie-ep"
@@ -30,39 +31,49 @@ Required properties:
- reg: base addresses and lengths of the PCIe controller register blocks.
- interrupts: A list of interrupt outputs of the controller. Must contain an
entry for each entry in the interrupt-names property.
-- interrupt-names: Must include the following entries:
- "intr": The interrupt that is asserted for controller interrupts
+- interrupt-names: It could include the following entries:
+ "aer": Used for interrupt line which reports AER events when
+ non MSI/MSI-X/INTx mode is used
+ "pme": Used for interrupt line which reports PME events when
+ non MSI/MSI-X/INTx mode is used
+ "intr": Used for SoCs(like ls2080a, lx2160a, ls2080a, ls2088a, ls1088a)
+ which has a single interrupt line for miscellaneous controller
+ events(could include AER and PME events).
- fsl,pcie-scfg: Must include two entries.
The first entry must be a link to the SCFG device node
- The second entry must be '0' or '1' based on physical PCIe controller index.
+ The second entry is the physical PCIe controller index starting from '0'.
This is used to get SCFG PEXN registers
- dma-coherent: Indicates that the hardware IP block can ensure the coherency
of the data transferred from/to the IP block. This can avoid the software
cache flush/invalid actions, and improve the performance significantly.
+Optional properties:
+- big-endian: If the PEX_LUT and PF register block is in big-endian, specify
+ this property.
+
Example:
- pcie@3400000 {
- compatible = "fsl,ls1021a-pcie";
- reg = <0x00 0x03400000 0x0 0x00010000 /* controller registers */
- 0x40 0x00000000 0x0 0x00002000>; /* configuration space */
- reg-names = "regs", "config";
- interrupts = <GIC_SPI 177 IRQ_TYPE_LEVEL_HIGH>; /* controller interrupt */
- interrupt-names = "intr";
- fsl,pcie-scfg = <&scfg 0>;
- #address-cells = <3>;
- #size-cells = <2>;
- device_type = "pci";
- dma-coherent;
- num-lanes = <4>;
- bus-range = <0x0 0xff>;
- ranges = <0x81000000 0x0 0x00000000 0x40 0x00010000 0x0 0x00010000 /* downstream I/O */
- 0xc2000000 0x0 0x20000000 0x40 0x20000000 0x0 0x20000000 /* prefetchable memory */
- 0x82000000 0x0 0x40000000 0x40 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
- #interrupt-cells = <1>;
- interrupt-map-mask = <0 0 0 7>;
- interrupt-map = <0000 0 0 1 &gic GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>,
- <0000 0 0 2 &gic GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>,
- <0000 0 0 3 &gic GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>,
- <0000 0 0 4 &gic GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>;
- };
+ pcie@3400000 {
+ compatible = "fsl,ls1088a-pcie";
+ reg = <0x00 0x03400000 0x0 0x00100000>, /* controller registers */
+ <0x20 0x00000000 0x0 0x00002000>; /* configuration space */
+ reg-names = "regs", "config";
+ interrupts = <0 108 IRQ_TYPE_LEVEL_HIGH>; /* aer interrupt */
+ interrupt-names = "aer";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ device_type = "pci";
+ dma-coherent;
+ num-viewport = <256>;
+ bus-range = <0x0 0xff>;
+ ranges = <0x81000000 0x0 0x00000000 0x20 0x00010000 0x0 0x00010000 /* downstream I/O */
+ 0x82000000 0x0 0x40000000 0x20 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
+ msi-parent = <&its>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0000 0 0 1 &gic 0 0 0 109 IRQ_TYPE_LEVEL_HIGH>,
+ <0000 0 0 2 &gic 0 0 0 110 IRQ_TYPE_LEVEL_HIGH>,
+ <0000 0 0 3 &gic 0 0 0 111 IRQ_TYPE_LEVEL_HIGH>,
+ <0000 0 0 4 &gic 0 0 0 112 IRQ_TYPE_LEVEL_HIGH>;
+ iommu-map = <0 &smmu 0 1>; /* Fixed-up by bootloader */
+ };
diff --git a/Documentation/devicetree/bindings/pci/qcom,pcie.txt b/Documentation/devicetree/bindings/pci/qcom,pcie.txt
deleted file mode 100644
index 0adb56d5645e..000000000000
--- a/Documentation/devicetree/bindings/pci/qcom,pcie.txt
+++ /dev/null
@@ -1,397 +0,0 @@
-* Qualcomm PCI express root complex
-
-- compatible:
- Usage: required
- Value type: <stringlist>
- Definition: Value should contain
- - "qcom,pcie-ipq8064" for ipq8064
- - "qcom,pcie-ipq8064-v2" for ipq8064 rev 2 or ipq8065
- - "qcom,pcie-apq8064" for apq8064
- - "qcom,pcie-apq8084" for apq8084
- - "qcom,pcie-msm8996" for msm8996 or apq8096
- - "qcom,pcie-ipq4019" for ipq4019
- - "qcom,pcie-ipq8074" for ipq8074
- - "qcom,pcie-qcs404" for qcs404
- - "qcom,pcie-sc8180x" for sc8180x
- - "qcom,pcie-sdm845" for sdm845
- - "qcom,pcie-sm8250" for sm8250
- - "qcom,pcie-sm8450-pcie0" for PCIe0 on sm8450
- - "qcom,pcie-sm8450-pcie1" for PCIe1 on sm8450
- - "qcom,pcie-ipq6018" for ipq6018
-
-- reg:
- Usage: required
- Value type: <prop-encoded-array>
- Definition: Register ranges as listed in the reg-names property
-
-- reg-names:
- Usage: required
- Value type: <stringlist>
- Definition: Must include the following entries
- - "parf" Qualcomm specific registers
- - "dbi" DesignWare PCIe registers
- - "elbi" External local bus interface registers
- - "config" PCIe configuration space
- - "atu" ATU address space (optional)
-
-- device_type:
- Usage: required
- Value type: <string>
- Definition: Should be "pci". As specified in snps,dw-pcie.yaml
-
-- #address-cells:
- Usage: required
- Value type: <u32>
- Definition: Should be 3. As specified in snps,dw-pcie.yaml
-
-- #size-cells:
- Usage: required
- Value type: <u32>
- Definition: Should be 2. As specified in snps,dw-pcie.yaml
-
-- ranges:
- Usage: required
- Value type: <prop-encoded-array>
- Definition: As specified in snps,dw-pcie.yaml
-
-- interrupts:
- Usage: required
- Value type: <prop-encoded-array>
- Definition: MSI interrupt
-
-- interrupt-names:
- Usage: required
- Value type: <stringlist>
- Definition: Should contain "msi"
-
-- #interrupt-cells:
- Usage: required
- Value type: <u32>
- Definition: Should be 1. As specified in snps,dw-pcie.yaml
-
-- interrupt-map-mask:
- Usage: required
- Value type: <prop-encoded-array>
- Definition: As specified in snps,dw-pcie.yaml
-
-- interrupt-map:
- Usage: required
- Value type: <prop-encoded-array>
- Definition: As specified in snps,dw-pcie.yaml
-
-- clocks:
- Usage: required
- Value type: <prop-encoded-array>
- Definition: List of phandle and clock specifier pairs as listed
- in clock-names property
-
-- clock-names:
- Usage: required
- Value type: <stringlist>
- Definition: Should contain the following entries
- - "iface" Configuration AHB clock
-
-- clock-names:
- Usage: required for ipq/apq8064
- Value type: <stringlist>
- Definition: Should contain the following entries
- - "core" Clocks the pcie hw block
- - "phy" Clocks the pcie PHY block
- - "aux" Clocks the pcie AUX block
- - "ref" Clocks the pcie ref block
-- clock-names:
- Usage: required for apq8084/ipq4019
- Value type: <stringlist>
- Definition: Should contain the following entries
- - "aux" Auxiliary (AUX) clock
- - "bus_master" Master AXI clock
- - "bus_slave" Slave AXI clock
-
-- clock-names:
- Usage: required for msm8996/apq8096
- Value type: <stringlist>
- Definition: Should contain the following entries
- - "pipe" Pipe Clock driving internal logic
- - "aux" Auxiliary (AUX) clock
- - "cfg" Configuration clock
- - "bus_master" Master AXI clock
- - "bus_slave" Slave AXI clock
-
-- clock-names:
- Usage: required for ipq8074
- Value type: <stringlist>
- Definition: Should contain the following entries
- - "iface" PCIe to SysNOC BIU clock
- - "axi_m" AXI Master clock
- - "axi_s" AXI Slave clock
- - "ahb" AHB clock
- - "aux" Auxiliary clock
-
-- clock-names:
- Usage: required for ipq6018
- Value type: <stringlist>
- Definition: Should contain the following entries
- - "iface" PCIe to SysNOC BIU clock
- - "axi_m" AXI Master clock
- - "axi_s" AXI Slave clock
- - "axi_bridge" AXI bridge clock
- - "rchng"
-
-- clock-names:
- Usage: required for qcs404
- Value type: <stringlist>
- Definition: Should contain the following entries
- - "iface" AHB clock
- - "aux" Auxiliary clock
- - "master_bus" AXI Master clock
- - "slave_bus" AXI Slave clock
-
-- clock-names:
- Usage: required for sdm845
- Value type: <stringlist>
- Definition: Should contain the following entries
- - "aux" Auxiliary clock
- - "cfg" Configuration clock
- - "bus_master" Master AXI clock
- - "bus_slave" Slave AXI clock
- - "slave_q2a" Slave Q2A clock
- - "tbu" PCIe TBU clock
- - "pipe" PIPE clock
-
-- clock-names:
- Usage: required for sc8180x and sm8250
- Value type: <stringlist>
- Definition: Should contain the following entries
- - "aux" Auxiliary clock
- - "cfg" Configuration clock
- - "bus_master" Master AXI clock
- - "bus_slave" Slave AXI clock
- - "slave_q2a" Slave Q2A clock
- - "tbu" PCIe TBU clock
- - "ddrss_sf_tbu" PCIe SF TBU clock
- - "pipe" PIPE clock
-
-- clock-names:
- Usage: required for sm8450-pcie0 and sm8450-pcie1
- Value type: <stringlist>
- Definition: Should contain the following entries
- - "aux" Auxiliary clock
- - "cfg" Configuration clock
- - "bus_master" Master AXI clock
- - "bus_slave" Slave AXI clock
- - "slave_q2a" Slave Q2A clock
- - "tbu" PCIe TBU clock
- - "ddrss_sf_tbu" PCIe SF TBU clock
- - "pipe" PIPE clock
- - "pipe_mux" PIPE MUX
- - "phy_pipe" PIPE output clock
- - "ref" REFERENCE clock
- - "aggre0" Aggre NoC PCIe0 AXI clock, only for sm8450-pcie0
- - "aggre1" Aggre NoC PCIe1 AXI clock
-
-- resets:
- Usage: required
- Value type: <prop-encoded-array>
- Definition: List of phandle and reset specifier pairs as listed
- in reset-names property
-
-- reset-names:
- Usage: required for ipq/apq8064
- Value type: <stringlist>
- Definition: Should contain the following entries
- - "axi" AXI reset
- - "ahb" AHB reset
- - "por" POR reset
- - "pci" PCI reset
- - "phy" PHY reset
-
-- reset-names:
- Usage: required for apq8084
- Value type: <stringlist>
- Definition: Should contain the following entries
- - "core" Core reset
-
-- reset-names:
- Usage: required for ipq/apq8064
- Value type: <stringlist>
- Definition: Should contain the following entries
- - "axi_m" AXI master reset
- - "axi_s" AXI slave reset
- - "pipe" PIPE reset
- - "axi_m_vmid" VMID reset
- - "axi_s_xpu" XPU reset
- - "parf" PARF reset
- - "phy" PHY reset
- - "axi_m_sticky" AXI sticky reset
- - "pipe_sticky" PIPE sticky reset
- - "pwr" PWR reset
- - "ahb" AHB reset
- - "phy_ahb" PHY AHB reset
- - "ext" EXT reset
-
-- reset-names:
- Usage: required for ipq8074
- Value type: <stringlist>
- Definition: Should contain the following entries
- - "pipe" PIPE reset
- - "sleep" Sleep reset
- - "sticky" Core Sticky reset
- - "axi_m" AXI Master reset
- - "axi_s" AXI Slave reset
- - "ahb" AHB Reset
- - "axi_m_sticky" AXI Master Sticky reset
-
-- reset-names:
- Usage: required for ipq6018
- Value type: <stringlist>
- Definition: Should contain the following entries
- - "pipe" PIPE reset
- - "sleep" Sleep reset
- - "sticky" Core Sticky reset
- - "axi_m" AXI Master reset
- - "axi_s" AXI Slave reset
- - "ahb" AHB Reset
- - "axi_m_sticky" AXI Master Sticky reset
- - "axi_s_sticky" AXI Slave Sticky reset
-
-- reset-names:
- Usage: required for qcs404
- Value type: <stringlist>
- Definition: Should contain the following entries
- - "axi_m" AXI Master reset
- - "axi_s" AXI Slave reset
- - "axi_m_sticky" AXI Master Sticky reset
- - "pipe_sticky" PIPE sticky reset
- - "pwr" PWR reset
- - "ahb" AHB reset
-
-- reset-names:
- Usage: required for sc8180x, sdm845, sm8250 and sm8450
- Value type: <stringlist>
- Definition: Should contain the following entries
- - "pci" PCIe core reset
-
-- power-domains:
- Usage: required for apq8084 and msm8996/apq8096
- Value type: <prop-encoded-array>
- Definition: A phandle and power domain specifier pair to the
- power domain which is responsible for collapsing
- and restoring power to the peripheral
-
-- vdda-supply:
- Usage: required
- Value type: <phandle>
- Definition: A phandle to the core analog power supply
-
-- vdda_phy-supply:
- Usage: required for ipq/apq8064
- Value type: <phandle>
- Definition: A phandle to the analog power supply for PHY
-
-- vdda_refclk-supply:
- Usage: required for ipq/apq8064
- Value type: <phandle>
- Definition: A phandle to the analog power supply for IC which generates
- reference clock
-- vddpe-3v3-supply:
- Usage: optional
- Value type: <phandle>
- Definition: A phandle to the PCIe endpoint power supply
-
-- phys:
- Usage: required for apq8084 and qcs404
- Value type: <phandle>
- Definition: List of phandle(s) as listed in phy-names property
-
-- phy-names:
- Usage: required for apq8084 and qcs404
- Value type: <stringlist>
- Definition: Should contain "pciephy"
-
-- <name>-gpios:
- Usage: optional
- Value type: <prop-encoded-array>
- Definition: List of phandle and GPIO specifier pairs. Should contain
- - "perst-gpios" PCIe endpoint reset signal line
- - "wake-gpios" PCIe endpoint wake signal line
-
-* Example for ipq/apq8064
- pcie@1b500000 {
- compatible = "qcom,pcie-apq8064", "qcom,pcie-ipq8064", "snps,dw-pcie";
- reg = <0x1b500000 0x1000
- 0x1b502000 0x80
- 0x1b600000 0x100
- 0x0ff00000 0x100000>;
- reg-names = "dbi", "elbi", "parf", "config";
- device_type = "pci";
- linux,pci-domain = <0>;
- bus-range = <0x00 0xff>;
- num-lanes = <1>;
- #address-cells = <3>;
- #size-cells = <2>;
- ranges = <0x81000000 0 0 0x0fe00000 0 0x00100000 /* I/O */
- 0x82000000 0 0 0x08000000 0 0x07e00000>; /* memory */
- interrupts = <GIC_SPI 238 IRQ_TYPE_NONE>;
- interrupt-names = "msi";
- #interrupt-cells = <1>;
- interrupt-map-mask = <0 0 0 0x7>;
- interrupt-map = <0 0 0 1 &intc 0 36 IRQ_TYPE_LEVEL_HIGH>, /* int_a */
- <0 0 0 2 &intc 0 37 IRQ_TYPE_LEVEL_HIGH>, /* int_b */
- <0 0 0 3 &intc 0 38 IRQ_TYPE_LEVEL_HIGH>, /* int_c */
- <0 0 0 4 &intc 0 39 IRQ_TYPE_LEVEL_HIGH>; /* int_d */
- clocks = <&gcc PCIE_A_CLK>,
- <&gcc PCIE_H_CLK>,
- <&gcc PCIE_PHY_CLK>,
- <&gcc PCIE_AUX_CLK>,
- <&gcc PCIE_ALT_REF_CLK>;
- clock-names = "core", "iface", "phy", "aux", "ref";
- resets = <&gcc PCIE_ACLK_RESET>,
- <&gcc PCIE_HCLK_RESET>,
- <&gcc PCIE_POR_RESET>,
- <&gcc PCIE_PCI_RESET>,
- <&gcc PCIE_PHY_RESET>,
- <&gcc PCIE_EXT_RESET>;
- reset-names = "axi", "ahb", "por", "pci", "phy", "ext";
- pinctrl-0 = <&pcie_pins_default>;
- pinctrl-names = "default";
- };
-
-* Example for apq8084
- pcie0@fc520000 {
- compatible = "qcom,pcie-apq8084", "snps,dw-pcie";
- reg = <0xfc520000 0x2000>,
- <0xff000000 0x1000>,
- <0xff001000 0x1000>,
- <0xff002000 0x2000>;
- reg-names = "parf", "dbi", "elbi", "config";
- device_type = "pci";
- linux,pci-domain = <0>;
- bus-range = <0x00 0xff>;
- num-lanes = <1>;
- #address-cells = <3>;
- #size-cells = <2>;
- ranges = <0x81000000 0 0 0xff200000 0 0x00100000 /* I/O */
- 0x82000000 0 0x00300000 0xff300000 0 0x00d00000>; /* memory */
- interrupts = <GIC_SPI 243 IRQ_TYPE_NONE>;
- interrupt-names = "msi";
- #interrupt-cells = <1>;
- interrupt-map-mask = <0 0 0 0x7>;
- interrupt-map = <0 0 0 1 &intc 0 244 IRQ_TYPE_LEVEL_HIGH>, /* int_a */
- <0 0 0 2 &intc 0 245 IRQ_TYPE_LEVEL_HIGH>, /* int_b */
- <0 0 0 3 &intc 0 247 IRQ_TYPE_LEVEL_HIGH>, /* int_c */
- <0 0 0 4 &intc 0 248 IRQ_TYPE_LEVEL_HIGH>; /* int_d */
- clocks = <&gcc GCC_PCIE_0_CFG_AHB_CLK>,
- <&gcc GCC_PCIE_0_MSTR_AXI_CLK>,
- <&gcc GCC_PCIE_0_SLV_AXI_CLK>,
- <&gcc GCC_PCIE_0_AUX_CLK>;
- clock-names = "iface", "master_bus", "slave_bus", "aux";
- resets = <&gcc GCC_PCIE_0_BCR>;
- reset-names = "core";
- power-domains = <&gcc PCIE0_GDSC>;
- vdda-supply = <&pma8084_l3>;
- phys = <&pciephy0>;
- phy-names = "pciephy";
- perst-gpio = <&tlmm 70 GPIO_ACTIVE_LOW>;
- pinctrl-0 = <&pcie0_pins_default>;
- pinctrl-names = "default";
- };
diff --git a/Documentation/devicetree/bindings/pci/qcom,pcie.yaml b/Documentation/devicetree/bindings/pci/qcom,pcie.yaml
new file mode 100644
index 000000000000..0b69b12b849e
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/qcom,pcie.yaml
@@ -0,0 +1,714 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pci/qcom,pcie.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm PCI express root complex
+
+maintainers:
+ - Bjorn Andersson <bjorn.andersson@linaro.org>
+ - Stanimir Varbanov <svarbanov@mm-sol.com>
+
+description: |
+ Qualcomm PCIe root complex controller is bansed on the Synopsys DesignWare
+ PCIe IP.
+
+properties:
+ compatible:
+ enum:
+ - qcom,pcie-ipq8064
+ - qcom,pcie-ipq8064-v2
+ - qcom,pcie-apq8064
+ - qcom,pcie-apq8084
+ - qcom,pcie-msm8996
+ - qcom,pcie-ipq4019
+ - qcom,pcie-ipq8074
+ - qcom,pcie-qcs404
+ - qcom,pcie-sc7280
+ - qcom,pcie-sc8180x
+ - qcom,pcie-sdm845
+ - qcom,pcie-sm8150
+ - qcom,pcie-sm8250
+ - qcom,pcie-sm8450-pcie0
+ - qcom,pcie-sm8450-pcie1
+ - qcom,pcie-ipq6018
+
+ reg:
+ minItems: 4
+ maxItems: 5
+
+ reg-names:
+ minItems: 4
+ maxItems: 5
+
+ interrupts:
+ maxItems: 1
+
+ interrupt-names:
+ items:
+ - const: msi
+
+ # Common definitions for clocks, clock-names and reset.
+ # Platform constraints are described later.
+ clocks:
+ minItems: 3
+ maxItems: 12
+
+ clock-names:
+ minItems: 3
+ maxItems: 12
+
+ resets:
+ minItems: 1
+ maxItems: 12
+
+ resets-names:
+ minItems: 1
+ maxItems: 12
+
+ vdda-supply:
+ description: A phandle to the core analog power supply
+
+ vdda_phy-supply:
+ description: A phandle to the core analog power supply for PHY
+
+ vdda_refclk-supply:
+ description: A phandle to the core analog power supply for IC which generates reference clock
+
+ vddpe-3v3-supply:
+ description: A phandle to the PCIe endpoint power supply
+
+ phys:
+ maxItems: 1
+
+ phy-names:
+ items:
+ - const: pciephy
+
+ power-domains:
+ maxItems: 1
+
+ perst-gpios:
+ description: GPIO controlled connection to PERST# signal
+ maxItems: 1
+
+ wake-gpios:
+ description: GPIO controlled connection to WAKE# signal
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - interrupts
+ - interrupt-names
+ - "#interrupt-cells"
+ - interrupt-map-mask
+ - interrupt-map
+ - clocks
+ - clock-names
+
+allOf:
+ - $ref: /schemas/pci/pci-bus.yaml#
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,pcie-apq8064
+ - qcom,pcie-ipq4019
+ - qcom,pcie-ipq8064
+ - qcom,pcie-ipq8064v2
+ - qcom,pcie-ipq8074
+ - qcom,pcie-qcs404
+ then:
+ properties:
+ reg:
+ minItems: 4
+ maxItems: 4
+ reg-names:
+ items:
+ - const: dbi # DesignWare PCIe registers
+ - const: elbi # External local bus interface registers
+ - const: parf # Qualcomm specific registers
+ - const: config # PCIe configuration space
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,pcie-ipq6018
+ then:
+ properties:
+ reg:
+ minItems: 5
+ maxItems: 5
+ reg-names:
+ items:
+ - const: dbi # DesignWare PCIe registers
+ - const: elbi # External local bus interface registers
+ - const: atu # ATU address space
+ - const: parf # Qualcomm specific registers
+ - const: config # PCIe configuration space
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,pcie-apq8084
+ - qcom,pcie-msm8996
+ - qcom,pcie-sdm845
+ then:
+ properties:
+ reg:
+ minItems: 4
+ maxItems: 4
+ reg-names:
+ items:
+ - const: parf # Qualcomm specific registers
+ - const: dbi # DesignWare PCIe registers
+ - const: elbi # External local bus interface registers
+ - const: config # PCIe configuration space
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,pcie-sc7280
+ - qcom,pcie-sc8180x
+ - qcom,pcie-sm8250
+ - qcom,pcie-sm8450-pcie0
+ - qcom,pcie-sm8450-pcie1
+ then:
+ properties:
+ reg:
+ minItems: 5
+ maxItems: 5
+ reg-names:
+ items:
+ - const: parf # Qualcomm specific registers
+ - const: dbi # DesignWare PCIe registers
+ - const: elbi # External local bus interface registers
+ - const: atu # ATU address space
+ - const: config # PCIe configuration space
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,pcie-apq8064
+ - qcom,pcie-ipq8064
+ - qcom,pcie-ipq8064v2
+ then:
+ properties:
+ clocks:
+ minItems: 3
+ maxItems: 5
+ clock-names:
+ minItems: 3
+ items:
+ - const: core # Clocks the pcie hw block
+ - const: iface # Configuration AHB clock
+ - const: phy # Clocks the pcie PHY block
+ - const: aux # Clocks the pcie AUX block, not on apq8064
+ - const: ref # Clocks the pcie ref block, not on apq8064
+ resets:
+ minItems: 5
+ maxItems: 6
+ reset-names:
+ minItems: 5
+ items:
+ - const: axi # AXI reset
+ - const: ahb # AHB reset
+ - const: por # POR reset
+ - const: pci # PCI reset
+ - const: phy # PHY reset
+ - const: ext # EXT reset, not on apq8064
+ required:
+ - vdda-supply
+ - vdda_phy-supply
+ - vdda_refclk-supply
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,pcie-apq8084
+ then:
+ properties:
+ clocks:
+ minItems: 4
+ maxItems: 4
+ clock-names:
+ items:
+ - const: iface # Configuration AHB clock
+ - const: master_bus # Master AXI clock
+ - const: slave_bus # Slave AXI clock
+ - const: aux # Auxiliary (AUX) clock
+ resets:
+ maxItems: 1
+ reset-names:
+ items:
+ - const: core # Core reset
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,pcie-ipq4019
+ then:
+ properties:
+ clocks:
+ minItems: 3
+ maxItems: 3
+ clock-names:
+ items:
+ - const: aux # Auxiliary (AUX) clock
+ - const: master_bus # Master AXI clock
+ - const: slave_bus # Slave AXI clock
+ resets:
+ minItems: 12
+ maxItems: 12
+ reset-names:
+ items:
+ - const: axi_m # AXI master reset
+ - const: axi_s # AXI slave reset
+ - const: pipe # PIPE reset
+ - const: axi_m_vmid # VMID reset
+ - const: axi_s_xpu # XPU reset
+ - const: parf # PARF reset
+ - const: phy # PHY reset
+ - const: axi_m_sticky # AXI sticky reset
+ - const: pipe_sticky # PIPE sticky reset
+ - const: pwr # PWR reset
+ - const: ahb # AHB reset
+ - const: phy_ahb # PHY AHB reset
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,pcie-msm8996
+ then:
+ oneOf:
+ - properties:
+ clock-names:
+ items:
+ - const: pipe # Pipe Clock driving internal logic
+ - const: aux # Auxiliary (AUX) clock
+ - const: cfg # Configuration clock
+ - const: bus_master # Master AXI clock
+ - const: bus_slave # Slave AXI clock
+ - properties:
+ clock-names:
+ items:
+ - const: pipe # Pipe Clock driving internal logic
+ - const: bus_master # Master AXI clock
+ - const: bus_slave # Slave AXI clock
+ - const: cfg # Configuration clock
+ - const: aux # Auxiliary (AUX) clock
+ properties:
+ clocks:
+ minItems: 5
+ maxItems: 5
+ resets: false
+ reset-names: false
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,pcie-ipq8074
+ then:
+ properties:
+ clocks:
+ minItems: 5
+ maxItems: 5
+ clock-names:
+ items:
+ - const: iface # PCIe to SysNOC BIU clock
+ - const: axi_m # AXI Master clock
+ - const: axi_s # AXI Slave clock
+ - const: ahb # AHB clock
+ - const: aux # Auxiliary clock
+ resets:
+ minItems: 7
+ maxItems: 7
+ reset-names:
+ items:
+ - const: pipe # PIPE reset
+ - const: sleep # Sleep reset
+ - const: sticky # Core Sticky reset
+ - const: axi_m # AXI Master reset
+ - const: axi_s # AXI Slave reset
+ - const: ahb # AHB Reset
+ - const: axi_m_sticky # AXI Master Sticky reset
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,pcie-ipq6018
+ then:
+ properties:
+ clocks:
+ minItems: 5
+ maxItems: 5
+ clock-names:
+ items:
+ - const: iface # PCIe to SysNOC BIU clock
+ - const: axi_m # AXI Master clock
+ - const: axi_s # AXI Slave clock
+ - const: axi_bridge # AXI bridge clock
+ - const: rchng
+ resets:
+ minItems: 8
+ maxItems: 8
+ reset-names:
+ items:
+ - const: pipe # PIPE reset
+ - const: sleep # Sleep reset
+ - const: sticky # Core Sticky reset
+ - const: axi_m # AXI Master reset
+ - const: axi_s # AXI Slave reset
+ - const: ahb # AHB Reset
+ - const: axi_m_sticky # AXI Master Sticky reset
+ - const: axi_s_sticky # AXI Slave Sticky reset
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,pcie-qcs404
+ then:
+ properties:
+ clocks:
+ minItems: 4
+ maxItems: 4
+ clock-names:
+ items:
+ - const: iface # AHB clock
+ - const: aux # Auxiliary clock
+ - const: master_bus # AXI Master clock
+ - const: slave_bus # AXI Slave clock
+ resets:
+ minItems: 6
+ maxItems: 6
+ reset-names:
+ items:
+ - const: axi_m # AXI Master reset
+ - const: axi_s # AXI Slave reset
+ - const: axi_m_sticky # AXI Master Sticky reset
+ - const: pipe_sticky # PIPE sticky reset
+ - const: pwr # PWR reset
+ - const: ahb # AHB reset
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,pcie-sc7280
+ then:
+ properties:
+ clocks:
+ minItems: 11
+ maxItems: 11
+ clock-names:
+ items:
+ - const: pipe # PIPE clock
+ - const: pipe_mux # PIPE MUX
+ - const: phy_pipe # PIPE output clock
+ - const: ref # REFERENCE clock
+ - const: aux # Auxiliary clock
+ - const: cfg # Configuration clock
+ - const: bus_master # Master AXI clock
+ - const: bus_slave # Slave AXI clock
+ - const: slave_q2a # Slave Q2A clock
+ - const: tbu # PCIe TBU clock
+ - const: ddrss_sf_tbu # PCIe SF TBU clock
+ resets:
+ maxItems: 1
+ reset-names:
+ items:
+ - const: pci # PCIe core reset
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,pcie-sdm845
+ then:
+ oneOf:
+ # Unfortunately the "optional" ref clock is used in the middle of the list
+ - properties:
+ clocks:
+ minItems: 8
+ maxItems: 8
+ clock-names:
+ items:
+ - const: pipe # PIPE clock
+ - const: aux # Auxiliary clock
+ - const: cfg # Configuration clock
+ - const: bus_master # Master AXI clock
+ - const: bus_slave # Slave AXI clock
+ - const: slave_q2a # Slave Q2A clock
+ - const: ref # REFERENCE clock
+ - const: tbu # PCIe TBU clock
+ - properties:
+ clocks:
+ minItems: 7
+ maxItems: 7
+ clock-names:
+ items:
+ - const: pipe # PIPE clock
+ - const: aux # Auxiliary clock
+ - const: cfg # Configuration clock
+ - const: bus_master # Master AXI clock
+ - const: bus_slave # Slave AXI clock
+ - const: slave_q2a # Slave Q2A clock
+ - const: tbu # PCIe TBU clock
+ properties:
+ resets:
+ maxItems: 1
+ reset-names:
+ items:
+ - const: pci # PCIe core reset
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,pcie-sc8180x
+ - qcom,pcie-sm8150
+ - qcom,pcie-sm8250
+ then:
+ oneOf:
+ # Unfortunately the "optional" ref clock is used in the middle of the list
+ - properties:
+ clocks:
+ minItems: 9
+ maxItems: 9
+ clock-names:
+ items:
+ - const: pipe # PIPE clock
+ - const: aux # Auxiliary clock
+ - const: cfg # Configuration clock
+ - const: bus_master # Master AXI clock
+ - const: bus_slave # Slave AXI clock
+ - const: slave_q2a # Slave Q2A clock
+ - const: ref # REFERENCE clock
+ - const: tbu # PCIe TBU clock
+ - const: ddrss_sf_tbu # PCIe SF TBU clock
+ - properties:
+ clocks:
+ minItems: 8
+ maxItems: 8
+ clock-names:
+ items:
+ - const: pipe # PIPE clock
+ - const: aux # Auxiliary clock
+ - const: cfg # Configuration clock
+ - const: bus_master # Master AXI clock
+ - const: bus_slave # Slave AXI clock
+ - const: slave_q2a # Slave Q2A clock
+ - const: tbu # PCIe TBU clock
+ - const: ddrss_sf_tbu # PCIe SF TBU clock
+ properties:
+ resets:
+ maxItems: 1
+ reset-names:
+ items:
+ - const: pci # PCIe core reset
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,pcie-sm8450-pcie0
+ then:
+ properties:
+ clocks:
+ minItems: 12
+ maxItems: 12
+ clock-names:
+ items:
+ - const: pipe # PIPE clock
+ - const: pipe_mux # PIPE MUX
+ - const: phy_pipe # PIPE output clock
+ - const: ref # REFERENCE clock
+ - const: aux # Auxiliary clock
+ - const: cfg # Configuration clock
+ - const: bus_master # Master AXI clock
+ - const: bus_slave # Slave AXI clock
+ - const: slave_q2a # Slave Q2A clock
+ - const: ddrss_sf_tbu # PCIe SF TBU clock
+ - const: aggre0 # Aggre NoC PCIe0 AXI clock
+ - const: aggre1 # Aggre NoC PCIe1 AXI clock
+ resets:
+ maxItems: 1
+ reset-names:
+ items:
+ - const: pci # PCIe core reset
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,pcie-sm8450-pcie1
+ then:
+ properties:
+ clocks:
+ minItems: 11
+ maxItems: 11
+ clock-names:
+ items:
+ - const: pipe # PIPE clock
+ - const: pipe_mux # PIPE MUX
+ - const: phy_pipe # PIPE output clock
+ - const: ref # REFERENCE clock
+ - const: aux # Auxiliary clock
+ - const: cfg # Configuration clock
+ - const: bus_master # Master AXI clock
+ - const: bus_slave # Slave AXI clock
+ - const: slave_q2a # Slave Q2A clock
+ - const: ddrss_sf_tbu # PCIe SF TBU clock
+ - const: aggre1 # Aggre NoC PCIe1 AXI clock
+ resets:
+ maxItems: 1
+ reset-names:
+ items:
+ - const: pci # PCIe core reset
+
+ - if:
+ not:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,pcie-apq8064
+ - qcom,pcie-ipq4019
+ - qcom,pcie-ipq8064
+ - qcom,pcie-ipq8064v2
+ - qcom,pcie-ipq8074
+ - qcom,pcie-qcs404
+ then:
+ required:
+ - power-domains
+
+ - if:
+ not:
+ properties:
+ compatibles:
+ contains:
+ enum:
+ - qcom,pcie-msm8996
+ then:
+ required:
+ - resets
+ - reset-names
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ pcie@1b500000 {
+ compatible = "qcom,pcie-ipq8064";
+ reg = <0x1b500000 0x1000>,
+ <0x1b502000 0x80>,
+ <0x1b600000 0x100>,
+ <0x0ff00000 0x100000>;
+ reg-names = "dbi", "elbi", "parf", "config";
+ device_type = "pci";
+ linux,pci-domain = <0>;
+ bus-range = <0x00 0xff>;
+ num-lanes = <1>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges = <0x81000000 0 0 0x0fe00000 0 0x00100000>,
+ <0x82000000 0 0 0x08000000 0 0x07e00000>;
+ interrupts = <GIC_SPI 238 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "msi";
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0x7>;
+ interrupt-map = <0 0 0 1 &intc 0 36 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 2 &intc 0 37 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 3 &intc 0 38 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 4 &intc 0 39 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc 41>,
+ <&gcc 43>,
+ <&gcc 44>,
+ <&gcc 42>,
+ <&gcc 248>;
+ clock-names = "core", "iface", "phy", "aux", "ref";
+ resets = <&gcc 27>,
+ <&gcc 26>,
+ <&gcc 25>,
+ <&gcc 24>,
+ <&gcc 23>,
+ <&gcc 22>;
+ reset-names = "axi", "ahb", "por", "pci", "phy", "ext";
+ pinctrl-0 = <&pcie_pins_default>;
+ pinctrl-names = "default";
+ vdda-supply = <&pm8921_s3>;
+ vdda_phy-supply = <&pm8921_lvs6>;
+ vdda_refclk-supply = <&ext_3p3v>;
+ };
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/gpio/gpio.h>
+ pcie@fc520000 {
+ compatible = "qcom,pcie-apq8084";
+ reg = <0xfc520000 0x2000>,
+ <0xff000000 0x1000>,
+ <0xff001000 0x1000>,
+ <0xff002000 0x2000>;
+ reg-names = "parf", "dbi", "elbi", "config";
+ device_type = "pci";
+ linux,pci-domain = <0>;
+ bus-range = <0x00 0xff>;
+ num-lanes = <1>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges = <0x81000000 0 0 0xff200000 0 0x00100000>,
+ <0x82000000 0 0x00300000 0xff300000 0 0x00d00000>;
+ interrupts = <GIC_SPI 243 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "msi";
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0x7>;
+ interrupt-map = <0 0 0 1 &intc 0 244 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 2 &intc 0 245 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 3 &intc 0 247 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 4 &intc 0 248 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc 324>,
+ <&gcc 325>,
+ <&gcc 327>,
+ <&gcc 323>;
+ clock-names = "iface", "master_bus", "slave_bus", "aux";
+ resets = <&gcc 81>;
+ reset-names = "core";
+ power-domains = <&gcc 1>;
+ vdda-supply = <&pma8084_l3>;
+ phys = <&pciephy0>;
+ phy-names = "pciephy";
+ perst-gpios = <&tlmm 70 GPIO_ACTIVE_LOW>;
+ pinctrl-0 = <&pcie0_pins_default>;
+ pinctrl-names = "default";
+ };
+...
diff --git a/Documentation/devicetree/bindings/pci/rockchip-dw-pcie.yaml b/Documentation/devicetree/bindings/pci/rockchip-dw-pcie.yaml
index 142bbe577763..bc0a9d1db750 100644
--- a/Documentation/devicetree/bindings/pci/rockchip-dw-pcie.yaml
+++ b/Documentation/devicetree/bindings/pci/rockchip-dw-pcie.yaml
@@ -19,20 +19,10 @@ description: |+
allOf:
- $ref: /schemas/pci/pci-bus.yaml#
-# We need a select here so we don't match all nodes with 'snps,dw-pcie'
-select:
- properties:
- compatible:
- contains:
- const: rockchip,rk3568-pcie
- required:
- - compatible
-
properties:
compatible:
items:
- const: rockchip,rk3568-pcie
- - const: snps,dw-pcie
reg:
items:
@@ -110,7 +100,7 @@ examples:
#size-cells = <2>;
pcie3x2: pcie@fe280000 {
- compatible = "rockchip,rk3568-pcie", "snps,dw-pcie";
+ compatible = "rockchip,rk3568-pcie";
reg = <0x3 0xc0800000 0x0 0x390000>,
<0x0 0xfe280000 0x0 0x10000>,
<0x3 0x80000000 0x0 0x100000>;
diff --git a/Documentation/devicetree/bindings/pci/socionext,uniphier-pcie.yaml b/Documentation/devicetree/bindings/pci/socionext,uniphier-pcie.yaml
index f5926d0fb085..638b99db0433 100644
--- a/Documentation/devicetree/bindings/pci/socionext,uniphier-pcie.yaml
+++ b/Documentation/devicetree/bindings/pci/socionext,uniphier-pcie.yaml
@@ -51,6 +51,19 @@ properties:
phy-names:
const: pcie-phy
+ interrupt-controller:
+ type: object
+ additionalProperties: false
+
+ properties:
+ interrupt-controller: true
+
+ '#interrupt-cells':
+ const: 1
+
+ interrupts:
+ maxItems: 1
+
required:
- compatible
- reg
@@ -62,6 +75,13 @@ unevaluatedProperties: false
examples:
- |
+ bus {
+ gic: interrupt-controller {
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ };
+ };
+
pcie: pcie@66000000 {
compatible = "socionext,uniphier-pcie";
reg-names = "dbi", "link", "config";
@@ -80,6 +100,7 @@ examples:
phys = <&pcie_phy>;
#interrupt-cells = <1>;
interrupt-names = "dma", "msi";
+ interrupt-parent = <&gic>;
interrupts = <0 224 4>, <0 225 4>;
interrupt-map-mask = <0 0 0 7>;
interrupt-map = <0 0 0 1 &pcie_intc 0>,
@@ -87,7 +108,7 @@ examples:
<0 0 0 3 &pcie_intc 2>,
<0 0 0 4 &pcie_intc 3>;
- pcie_intc: legacy-interrupt-controller {
+ pcie_intc: interrupt-controller {
interrupt-controller;
#interrupt-cells = <1>;
interrupt-parent = <&gic>;
diff --git a/Documentation/devicetree/bindings/pci/xilinx-versal-cpm.yaml b/Documentation/devicetree/bindings/pci/xilinx-versal-cpm.yaml
index 32f4641085bc..cca395317a4c 100644
--- a/Documentation/devicetree/bindings/pci/xilinx-versal-cpm.yaml
+++ b/Documentation/devicetree/bindings/pci/xilinx-versal-cpm.yaml
@@ -18,13 +18,13 @@ properties:
reg:
items:
- - description: Configuration space region and bridge registers.
- description: CPM system level control and status registers.
+ - description: Configuration space region and bridge registers.
reg-names:
items:
- - const: cfg
- const: cpm_slcr
+ - const: cfg
interrupts:
maxItems: 1
@@ -86,9 +86,9 @@ examples:
ranges = <0x02000000 0x0 0xe0000000 0x0 0xe0000000 0x0 0x10000000>,
<0x43000000 0x80 0x00000000 0x80 0x00000000 0x0 0x80000000>;
msi-map = <0x0 &its_gic 0x0 0x10000>;
- reg = <0x6 0x00000000 0x0 0x10000000>,
- <0x0 0xfca10000 0x0 0x1000>;
- reg-names = "cfg", "cpm_slcr";
+ reg = <0x0 0xfca10000 0x0 0x1000>,
+ <0x6 0x00000000 0x0 0x10000000>;
+ reg-names = "cpm_slcr", "cfg";
pcie_intc_0: interrupt-controller {
#address-cells = <0>;
#interrupt-cells = <1>;
diff --git a/Documentation/devicetree/bindings/phy/allwinner,sun6i-a31-mipi-dphy.yaml b/Documentation/devicetree/bindings/phy/allwinner,sun6i-a31-mipi-dphy.yaml
index d0b541a461f3..22636c9fdab8 100644
--- a/Documentation/devicetree/bindings/phy/allwinner,sun6i-a31-mipi-dphy.yaml
+++ b/Documentation/devicetree/bindings/phy/allwinner,sun6i-a31-mipi-dphy.yaml
@@ -37,6 +37,18 @@ properties:
resets:
maxItems: 1
+ allwinner,direction:
+ $ref: '/schemas/types.yaml#/definitions/string'
+ description: |
+ Direction of the D-PHY:
+ - "rx" for receiving (e.g. when used with MIPI CSI-2);
+ - "tx" for transmitting (e.g. when used with MIPI DSI).
+
+ enum:
+ - tx
+ - rx
+ default: tx
+
required:
- "#phy-cells"
- compatible
diff --git a/Documentation/devicetree/bindings/phy/marvell,armada-3700-utmi-phy.yaml b/Documentation/devicetree/bindings/phy/marvell,armada-3700-utmi-phy.yaml
index 2437c3683326..632d61c07f40 100644
--- a/Documentation/devicetree/bindings/phy/marvell,armada-3700-utmi-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/marvell,armada-3700-utmi-phy.yaml
@@ -45,7 +45,7 @@ additionalProperties: false
examples:
- |
usb2_utmi_host_phy: phy@5f000 {
- compatible = "marvell,armada-3700-utmi-host-phy";
+ compatible = "marvell,a3700-utmi-host-phy";
reg = <0x5f000 0x800>;
marvell,usb-misc-reg = <&usb2_syscon>;
#phy-cells = <0>;
diff --git a/Documentation/devicetree/bindings/phy/mixel,mipi-dsi-phy.txt b/Documentation/devicetree/bindings/phy/mixel,mipi-dsi-phy.txt
deleted file mode 100644
index 9b23407233c0..000000000000
--- a/Documentation/devicetree/bindings/phy/mixel,mipi-dsi-phy.txt
+++ /dev/null
@@ -1,29 +0,0 @@
-Mixel DSI PHY for i.MX8
-
-The Mixel MIPI-DSI PHY IP block is e.g. found on i.MX8 platforms (along the
-MIPI-DSI IP from Northwest Logic). It represents the physical layer for the
-electrical signals for DSI.
-
-Required properties:
-- compatible: Must be:
- - "fsl,imx8mq-mipi-dphy"
-- clocks: Must contain an entry for each entry in clock-names.
-- clock-names: Must contain the following entries:
- - "phy_ref": phandle and specifier referring to the DPHY ref clock
-- reg: the register range of the PHY controller
-- #phy-cells: number of cells in PHY, as defined in
- Documentation/devicetree/bindings/phy/phy-bindings.txt
- this must be <0>
-
-Optional properties:
-- power-domains: phandle to power domain
-
-Example:
- dphy: dphy@30a0030 {
- compatible = "fsl,imx8mq-mipi-dphy";
- clocks = <&clk IMX8MQ_CLK_DSI_PHY_REF>;
- clock-names = "phy_ref";
- reg = <0x30a00300 0x100>;
- power-domains = <&pd_mipi0>;
- #phy-cells = <0>;
- };
diff --git a/Documentation/devicetree/bindings/phy/mixel,mipi-dsi-phy.yaml b/Documentation/devicetree/bindings/phy/mixel,mipi-dsi-phy.yaml
new file mode 100644
index 000000000000..786cfd71cb7e
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/mixel,mipi-dsi-phy.yaml
@@ -0,0 +1,107 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/mixel,mipi-dsi-phy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Mixel DSI PHY for i.MX8
+
+maintainers:
+ - Guido Günther <agx@sigxcpu.org>
+
+description: |
+ The Mixel MIPI-DSI PHY IP block is e.g. found on i.MX8 platforms (along the
+ MIPI-DSI IP from Northwest Logic). It represents the physical layer for the
+ electrical signals for DSI.
+
+ The Mixel PHY IP block found on i.MX8qxp is a combo PHY that can work
+ in either MIPI-DSI PHY mode or LVDS PHY mode.
+
+properties:
+ compatible:
+ enum:
+ - fsl,imx8mq-mipi-dphy
+ - fsl,imx8qxp-mipi-dphy
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ const: phy_ref
+
+ assigned-clocks:
+ maxItems: 1
+
+ assigned-clock-parents:
+ maxItems: 1
+
+ assigned-clock-rates:
+ maxItems: 1
+
+ "#phy-cells":
+ const: 0
+
+ fsl,syscon:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description: |
+ A phandle which points to Control and Status Registers(CSR) module.
+
+ power-domains:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - "#phy-cells"
+ - power-domains
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: fsl,imx8mq-mipi-dphy
+ then:
+ properties:
+ fsl,syscon: false
+
+ required:
+ - assigned-clocks
+ - assigned-clock-parents
+ - assigned-clock-rates
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: fsl,imx8qxp-mipi-dphy
+ then:
+ properties:
+ assigned-clocks: false
+ assigned-clock-parents: false
+ assigned-clock-rates: false
+
+ required:
+ - fsl,syscon
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/imx8mq-clock.h>
+ dphy: dphy@30a0030 {
+ compatible = "fsl,imx8mq-mipi-dphy";
+ reg = <0x30a00300 0x100>;
+ clocks = <&clk IMX8MQ_CLK_DSI_PHY_REF>;
+ clock-names = "phy_ref";
+ assigned-clocks = <&clk IMX8MQ_CLK_DSI_PHY_REF>;
+ assigned-clock-parents = <&clk IMX8MQ_VIDEO_PLL1_OUT>;
+ assigned-clock-rates = <24000000>;
+ #phy-cells = <0>;
+ power-domains = <&pgc_mipi>;
+ };
diff --git a/Documentation/devicetree/bindings/phy/qcom,qmp-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,qmp-phy.yaml
index e20d9b087bb8..8b850c5ab116 100644
--- a/Documentation/devicetree/bindings/phy/qcom,qmp-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,qmp-phy.yaml
@@ -39,6 +39,7 @@ properties:
- qcom,sdm845-qmp-usb3-phy
- qcom,sdm845-qmp-usb3-uni-phy
- qcom,sm6115-qmp-ufs-phy
+ - qcom,sm6350-qmp-ufs-phy
- qcom,sm8150-qmp-ufs-phy
- qcom,sm8150-qmp-usb3-phy
- qcom,sm8150-qmp-usb3-uni-phy
@@ -57,6 +58,7 @@ properties:
- qcom,sm8450-qmp-usb3-phy
- qcom,sdx55-qmp-pcie-phy
- qcom,sdx55-qmp-usb3-uni-phy
+ - qcom,sdx65-qmp-usb3-uni-phy
reg:
minItems: 1
@@ -163,6 +165,7 @@ allOf:
contains:
enum:
- qcom,sdx55-qmp-usb3-uni-phy
+ - qcom,sdx65-qmp-usb3-uni-phy
then:
properties:
clocks:
@@ -279,6 +282,7 @@ allOf:
enum:
- qcom,msm8998-qmp-ufs-phy
- qcom,sdm845-qmp-ufs-phy
+ - qcom,sm6350-qmp-ufs-phy
- qcom,sm8150-qmp-ufs-phy
- qcom,sm8250-qmp-ufs-phy
- qcom,sc8180x-qmp-ufs-phy
diff --git a/Documentation/devicetree/bindings/phy/renesas,usb2-phy.yaml b/Documentation/devicetree/bindings/phy/renesas,usb2-phy.yaml
index 16807bbbdcb1..f82649a55e91 100644
--- a/Documentation/devicetree/bindings/phy/renesas,usb2-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/renesas,usb2-phy.yaml
@@ -32,6 +32,7 @@ properties:
- items:
- enum:
+ - renesas,usb2-phy-r9a07g043 # RZ/G2UL
- renesas,usb2-phy-r9a07g044 # RZ/G2{L,LC}
- renesas,usb2-phy-r9a07g054 # RZ/V2L
- const: renesas,rzg2l-usb2-phy
diff --git a/Documentation/devicetree/bindings/phy/socionext,uniphier-ahci-phy.yaml b/Documentation/devicetree/bindings/phy/socionext,uniphier-ahci-phy.yaml
index 3b400a85b44a..a3cd45acea28 100644
--- a/Documentation/devicetree/bindings/phy/socionext,uniphier-ahci-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/socionext,uniphier-ahci-phy.yaml
@@ -30,32 +30,79 @@ properties:
minItems: 1
maxItems: 2
- clock-names:
- oneOf:
- - items: # for PXs2
- - const: link
- - items: # for Pro4
- - const: link
- - const: gio
- - items: # for others
- - const: link
- - const: phy
+ clock-names: true
resets:
minItems: 2
- maxItems: 5
+ maxItems: 6
- reset-names:
- oneOf:
- - items: # for Pro4
- - const: link
- - const: gio
- - const: pm
- - const: tx
- - const: rx
- - items: # for others
- - const: link
- - const: phy
+ reset-names: true
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: socionext,uniphier-pro4-ahci-phy
+ then:
+ properties:
+ clocks:
+ minItems: 2
+ maxItems: 2
+ clock-names:
+ items:
+ - const: link
+ - const: gio
+ resets:
+ minItems: 6
+ maxItems: 6
+ reset-names:
+ items:
+ - const: link
+ - const: gio
+ - const: phy
+ - const: pm
+ - const: tx
+ - const: rx
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: socionext,uniphier-pxs2-ahci-phy
+ then:
+ properties:
+ clocks:
+ maxItems: 1
+ clock-names:
+ const: link
+ resets:
+ minItems: 2
+ maxItems: 2
+ reset-names:
+ items:
+ - const: link
+ - const: phy
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: socionext,uniphier-pxs3-ahci-phy
+ then:
+ properties:
+ clocks:
+ minItems: 2
+ maxItems: 2
+ clock-names:
+ items:
+ - const: link
+ - const: phy
+ resets:
+ minItems: 2
+ maxItems: 2
+ reset-names:
+ items:
+ - const: link
+ - const: phy
required:
- compatible
diff --git a/Documentation/devicetree/bindings/phy/socionext,uniphier-pcie-phy.yaml b/Documentation/devicetree/bindings/phy/socionext,uniphier-pcie-phy.yaml
index fbb71d6dd531..b3ed2f74a414 100644
--- a/Documentation/devicetree/bindings/phy/socionext,uniphier-pcie-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/socionext,uniphier-pcie-phy.yaml
@@ -31,28 +31,51 @@ properties:
minItems: 1
maxItems: 2
- clock-names:
- oneOf:
- - items: # for Pro5
- - const: gio
- - const: link
- - const: link # for others
+ clock-names: true
resets:
minItems: 1
maxItems: 2
- reset-names:
- oneOf:
- - items: # for Pro5
- - const: gio
- - const: link
- - const: link # for others
+ reset-names: true
socionext,syscon:
$ref: /schemas/types.yaml#/definitions/phandle
description: A phandle to system control to set configurations for phy
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: socionext,uniphier-pro5-pcie-phy
+ then:
+ properties:
+ clocks:
+ minItems: 2
+ maxItems: 2
+ clock-names:
+ items:
+ - const: gio
+ - const: link
+ resets:
+ minItems: 2
+ maxItems: 2
+ reset-names:
+ items:
+ - const: gio
+ - const: link
+ else:
+ properties:
+ clocks:
+ maxItems: 1
+ clock-names:
+ const: link
+ resets:
+ maxItems: 1
+ reset-names:
+ const: link
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/phy/socionext,uniphier-usb2-phy.yaml b/Documentation/devicetree/bindings/phy/socionext,uniphier-usb2-phy.yaml
index 479b203f7aa6..63dab914a48d 100644
--- a/Documentation/devicetree/bindings/phy/socionext,uniphier-usb2-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/socionext,uniphier-usb2-phy.yaml
@@ -43,6 +43,9 @@ patternProperties:
"#phy-cells":
const: 0
+ vbus-supply:
+ description: A phandle to the regulator for USB VBUS, only for USB host
+
required:
- reg
- "#phy-cells"
diff --git a/Documentation/devicetree/bindings/phy/socionext,uniphier-usb3hs-phy.yaml b/Documentation/devicetree/bindings/phy/socionext,uniphier-usb3hs-phy.yaml
index 33946efcac5e..21e4414eea60 100644
--- a/Documentation/devicetree/bindings/phy/socionext,uniphier-usb3hs-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/socionext,uniphier-usb3hs-phy.yaml
@@ -31,27 +31,15 @@ properties:
const: 0
clocks:
- minItems: 1
+ minItems: 2
maxItems: 3
- clock-names:
- oneOf:
- - const: link # for PXs2
- - items: # for PXs3 with phy-ext
- - const: link
- - const: phy
- - const: phy-ext
- - items: # for others
- - const: link
- - const: phy
+ clock-names: true
resets:
maxItems: 2
- reset-names:
- items:
- - const: link
- - const: phy
+ reset-names: true
vbus-supply:
description: A phandle to the regulator for USB VBUS
@@ -74,6 +62,77 @@ properties:
required for each port, if any one is omitted, the trimming data
of the port will not be set at all.
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: socionext,uniphier-pro5-usb3-hsphy
+ then:
+ properties:
+ clocks:
+ minItems: 2
+ maxItems: 2
+ clock-names:
+ items:
+ - const: gio
+ - const: link
+ resets:
+ minItems: 2
+ maxItems: 2
+ reset-names:
+ items:
+ - const: gio
+ - const: link
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - socionext,uniphier-pxs2-usb3-hsphy
+ - socionext,uniphier-ld20-usb3-hsphy
+ then:
+ properties:
+ clocks:
+ minItems: 2
+ maxItems: 2
+ clock-names:
+ items:
+ - const: link
+ - const: phy
+ resets:
+ minItems: 2
+ maxItems: 2
+ reset-names:
+ items:
+ - const: link
+ - const: phy
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - socionext,uniphier-pxs3-usb3-hsphy
+ - socionext,uniphier-nx1-usb3-hsphy
+ then:
+ properties:
+ clocks:
+ minItems: 2
+ maxItems: 3
+ clock-names:
+ minItems: 2
+ items:
+ - const: link
+ - const: phy
+ - const: phy-ext
+ resets:
+ minItems: 2
+ maxItems: 2
+ reset-names:
+ items:
+ - const: link
+ - const: phy
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/phy/socionext,uniphier-usb3ss-phy.yaml b/Documentation/devicetree/bindings/phy/socionext,uniphier-usb3ss-phy.yaml
index 92d46eb913a3..4c26d2d2303d 100644
--- a/Documentation/devicetree/bindings/phy/socionext,uniphier-usb3ss-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/socionext,uniphier-usb3ss-phy.yaml
@@ -35,33 +35,88 @@ properties:
minItems: 2
maxItems: 3
- clock-names:
- oneOf:
- - items: # for Pro4, Pro5
- - const: gio
- - const: link
- - items: # for PXs3 with phy-ext
- - const: link
- - const: phy
- - const: phy-ext
- - items: # for others
- - const: link
- - const: phy
+ clock-names: true
resets:
maxItems: 2
- reset-names:
- oneOf:
- - items: # for Pro4,Pro5
- - const: gio
- - const: link
- - items: # for others
- - const: link
- - const: phy
+ reset-names: true
vbus-supply:
- description: A phandle to the regulator for USB VBUS
+ description: A phandle to the regulator for USB VBUS, only for USB host
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - socionext,uniphier-pro4-usb3-ssphy
+ - socionext,uniphier-pro5-usb3-ssphy
+ then:
+ properties:
+ clocks:
+ minItems: 2
+ maxItems: 2
+ clock-names:
+ items:
+ - const: gio
+ - const: link
+ resets:
+ minItems: 2
+ maxItems: 2
+ reset-names:
+ items:
+ - const: gio
+ - const: link
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - socionext,uniphier-pxs2-usb3-ssphy
+ - socionext,uniphier-ld20-usb3-ssphy
+ then:
+ properties:
+ clocks:
+ minItems: 2
+ maxItems: 2
+ clock-names:
+ items:
+ - const: link
+ - const: phy
+ resets:
+ minItems: 2
+ maxItems: 2
+ reset-names:
+ items:
+ - const: link
+ - const: phy
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - socionext,uniphier-pxs3-usb3-ssphy
+ - socionext,uniphier-nx1-usb3-ssphy
+ then:
+ properties:
+ clocks:
+ minItems: 2
+ maxItems: 3
+ clock-names:
+ minItems: 2
+ items:
+ - const: link
+ - const: phy
+ - const: phy-ext
+ resets:
+ minItems: 2
+ maxItems: 2
+ reset-names:
+ items:
+ - const: link
+ - const: phy
required:
- compatible
@@ -71,7 +126,6 @@ required:
- clock-names
- resets
- reset-names
- - vbus-supply
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/pinctrl/aspeed,ast2500-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/aspeed,ast2500-pinctrl.yaml
index 7c25c8d51116..9db904a528ee 100644
--- a/Documentation/devicetree/bindings/pinctrl/aspeed,ast2500-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/aspeed,ast2500-pinctrl.yaml
@@ -76,73 +76,24 @@ additionalProperties: false
examples:
- |
#include <dt-bindings/clock/aspeed-clock.h>
- apb {
- compatible = "simple-bus";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
-
- syscon: scu@1e6e2000 {
- compatible = "aspeed,ast2500-scu", "syscon", "simple-mfd";
- reg = <0x1e6e2000 0x1a8>;
- #clock-cells = <1>;
- #reset-cells = <1>;
-
- pinctrl: pinctrl {
- compatible = "aspeed,ast2500-pinctrl";
- aspeed,external-nodes = <&gfx>, <&lhc>;
-
- pinctrl_i2c3_default: i2c3_default {
- function = "I2C3";
- groups = "I2C3";
- };
-
- pinctrl_gpioh0_unbiased_default: gpioh0 {
- pins = "A18";
- bias-disable;
- };
+ scu@1e6e2000 {
+ compatible = "aspeed,ast2500-scu", "syscon", "simple-mfd";
+ reg = <0x1e6e2000 0x1a8>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+
+ pinctrl: pinctrl {
+ compatible = "aspeed,ast2500-pinctrl";
+ aspeed,external-nodes = <&gfx>, <&lhc>;
+
+ pinctrl_i2c3_default: i2c3_default {
+ function = "I2C3";
+ groups = "I2C3";
};
- };
-
- gfx: display@1e6e6000 {
- compatible = "aspeed,ast2500-gfx", "syscon";
- reg = <0x1e6e6000 0x1000>;
- reg-io-width = <4>;
- clocks = <&syscon ASPEED_CLK_GATE_D1CLK>;
- resets = <&syscon ASPEED_RESET_CRT1>;
- interrupts = <0x19>;
- syscon = <&syscon>;
- memory-region = <&gfx_memory>;
- };
- };
-
- lpc: lpc@1e789000 {
- compatible = "aspeed,ast2500-lpc", "simple-mfd";
- reg = <0x1e789000 0x1000>;
-
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0x0 0x1e789000 0x1000>;
-
- lpc_host: lpc-host@80 {
- compatible = "aspeed,ast2500-lpc-host", "simple-mfd", "syscon";
- reg = <0x80 0x1e0>;
- reg-io-width = <4>;
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0x0 0x80 0x1e0>;
-
- lhc: lhc@20 {
- compatible = "aspeed,ast2500-lhc";
- reg = <0x20 0x24>, <0x48 0x8>;
+ pinctrl_gpioh0_unbiased_default: gpioh0 {
+ pins = "A18";
+ bias-disable;
};
};
};
-
- gfx_memory: framebuffer {
- size = <0x01000000>;
- alignment = <0x01000000>;
- compatible = "shared-dma-pool";
- reusable;
- };
diff --git a/Documentation/devicetree/bindings/pinctrl/fsl,imx7d-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/fsl,imx7d-pinctrl.txt
deleted file mode 100644
index bfab5ca49fd1..000000000000
--- a/Documentation/devicetree/bindings/pinctrl/fsl,imx7d-pinctrl.txt
+++ /dev/null
@@ -1,87 +0,0 @@
-* Freescale i.MX7 Dual IOMUX Controller
-
-iMX7D supports two iomuxc controllers, fsl,imx7d-iomuxc controller is similar
-as previous iMX SoC generation and fsl,imx7d-iomuxc-lpsr which provides low
-power state retention capabilities on gpios that are part of iomuxc-lpsr
-(GPIO1_IO7..GPIO1_IO0). While iomuxc-lpsr provides its own set of registers for
-mux and pad control settings, it shares the input select register from main
-iomuxc controller for daisy chain settings, the fsl,input-sel property extends
-fsl,imx-pinctrl driver to support iomuxc-lpsr controller.
-
-iomuxc_lpsr: iomuxc-lpsr@302c0000 {
- compatible = "fsl,imx7d-iomuxc-lpsr";
- reg = <0x302c0000 0x10000>;
- fsl,input-sel = <&iomuxc>;
-};
-
-iomuxc: iomuxc@30330000 {
- compatible = "fsl,imx7d-iomuxc";
- reg = <0x30330000 0x10000>;
-};
-
-Peripherals using pads from iomuxc-lpsr support low state retention power
-state, under LPSR mode GPIO's state of pads are retain.
-
-Please refer to fsl,imx-pinctrl.txt in this directory for common binding part
-and usage.
-
-Required properties:
-- compatible: "fsl,imx7d-iomuxc" for main IOMUXC controller, or
- "fsl,imx7d-iomuxc-lpsr" for Low Power State Retention IOMUXC controller.
-- fsl,pins: each entry consists of 6 integers and represents the mux and config
- setting for one pin. The first 5 integers <mux_reg conf_reg input_reg mux_val
- input_val> are specified using a PIN_FUNC_ID macro, which can be found in
- imx7d-pinfunc.h under device tree source folder. The last integer CONFIG is
- the pad setting value like pull-up on this pin. Please refer to i.MX7 Dual
- Reference Manual for detailed CONFIG settings.
-- fsl,input-sel: required property for iomuxc-lpsr controller, this property is
- a phandle for main iomuxc controller which shares the input select register for
- daisy chain settings.
-
-CONFIG bits definition:
-PAD_CTL_PUS_100K_DOWN (0 << 5)
-PAD_CTL_PUS_5K_UP (1 << 5)
-PAD_CTL_PUS_47K_UP (2 << 5)
-PAD_CTL_PUS_100K_UP (3 << 5)
-PAD_CTL_PUE (1 << 4)
-PAD_CTL_HYS (1 << 3)
-PAD_CTL_SRE_SLOW (1 << 2)
-PAD_CTL_SRE_FAST (0 << 2)
-PAD_CTL_DSE_X1 (0 << 0)
-PAD_CTL_DSE_X4 (1 << 0)
-PAD_CTL_DSE_X2 (2 << 0)
-PAD_CTL_DSE_X6 (3 << 0)
-
-Examples:
-While iomuxc-lpsr is intended to be used by dedicated peripherals to take
-advantages of LPSR power mode, is also possible that an IP to use pads from
-any of the iomux controllers. For example the I2C1 IP can use SCL pad from
-iomuxc-lpsr controller and SDA pad from iomuxc controller as:
-
-i2c1: i2c@30a20000 {
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_i2c1_1>, <&pinctrl_i2c1_2>;
-};
-
-iomuxc-lpsr@302c0000 {
- compatible = "fsl,imx7d-iomuxc-lpsr";
- reg = <0x302c0000 0x10000>;
- fsl,input-sel = <&iomuxc>;
-
- pinctrl_i2c1_1: i2c1grp-1 {
- fsl,pins = <
- MX7D_PAD_GPIO1_IO04__I2C1_SCL 0x4000007f
- >;
- };
-};
-
-iomuxc@30330000 {
- compatible = "fsl,imx7d-iomuxc";
- reg = <0x30330000 0x10000>;
-
- pinctrl_i2c1_2: i2c1grp-2 {
- fsl,pins = <
- MX7D_PAD_I2C1_SDA__I2C1_SDA 0x4000007f
- >;
- };
-};
diff --git a/Documentation/devicetree/bindings/pinctrl/fsl,imx7d-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/fsl,imx7d-pinctrl.yaml
new file mode 100644
index 000000000000..621038662188
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/fsl,imx7d-pinctrl.yaml
@@ -0,0 +1,113 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pinctrl/fsl,imx7d-pinctrl.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale IMX7D IOMUX Controller
+
+maintainers:
+ - Dong Aisheng <aisheng.dong@nxp.com>
+
+description:
+ Please refer to fsl,imx-pinctrl.txt and pinctrl-bindings.txt in this directory
+ for common binding part and usage.
+
+properties:
+ compatible:
+ oneOf:
+ - enum:
+ - fsl,imx7d-iomuxc
+ - fsl,imx7d-iomuxc-lpsr
+
+ reg:
+ maxItems: 1
+
+ fsl,input-sel:
+ description:
+ phandle for main iomuxc controller which shares the input select
+ register for daisy chain settings.
+ $ref: /schemas/types.yaml#/definitions/phandle
+
+# Client device subnode's properties
+patternProperties:
+ 'grp$':
+ type: object
+ description:
+ Pinctrl node's client devices use subnodes for desired pin configuration.
+ Client device subnodes use below standard properties.
+
+ properties:
+ fsl,pins:
+ description:
+ each entry consists of 6 integers and represents the mux and config
+ setting for one pin. The first 5 integers <mux_reg conf_reg input_reg
+ mux_val input_val> are specified using a PIN_FUNC_ID macro, which can
+ be found in <arch/arm/boot/dts/imx7d-pinfunc.h>. The last integer
+ CONFIG is the pad setting value like pull-up on this pin. Please
+ refer to i.MX7D Reference Manual for detailed CONFIG settings.
+ $ref: /schemas/types.yaml#/definitions/uint32-matrix
+ items:
+ items:
+ - description: |
+ "mux_reg" indicates the offset of mux register.
+ - description: |
+ "conf_reg" indicates the offset of pad configuration register.
+ - description: |
+ "input_reg" indicates the offset of select input register.
+ - description: |
+ "mux_val" indicates the mux value to be applied.
+ - description: |
+ "input_val" indicates the select input value to be applied.
+ - description: |
+ "pad_setting" indicates the pad configuration value to be applied.
+
+ required:
+ - fsl,pins
+
+ additionalProperties: false
+
+allOf:
+ - $ref: "pinctrl.yaml#"
+
+required:
+ - compatible
+ - reg
+
+if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - fsl,imx7d-iomuxc-lpsr
+
+then:
+ required:
+ - fsl,input-sel
+
+additionalProperties: false
+
+examples:
+ - |
+ iomuxc: pinctrl@30330000 {
+ compatible = "fsl,imx7d-iomuxc";
+ reg = <0x30330000 0x10000>;
+
+ pinctrl_uart5: uart5grp {
+ fsl,pins =
+ <0x0160 0x03D0 0x0714 0x1 0x0 0x7e>,
+ <0x0164 0x03D4 0x0000 0x1 0x0 0x76>;
+ };
+ };
+ - |
+ iomuxc_lpsr: pinctrl@302c0000 {
+ compatible = "fsl,imx7d-iomuxc-lpsr";
+ reg = <0x302c0000 0x10000>;
+ fsl,input-sel = <&iomuxc>;
+
+ pinctrl_gpio_lpsr: gpio1-grp {
+ fsl,pins =
+ <0x0008 0x0038 0x0000 0x0 0x0 0x59>,
+ <0x000C 0x003C 0x0000 0x0 0x0 0x59>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pinctrl/fsl,imxrt1170.yaml b/Documentation/devicetree/bindings/pinctrl/fsl,imxrt1170.yaml
new file mode 100644
index 000000000000..2e880b3e537c
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/fsl,imxrt1170.yaml
@@ -0,0 +1,77 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pinctrl/fsl,imxrt1170.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale i.MXRT1170 IOMUX Controller
+
+maintainers:
+ - Giulio Benetti <giulio.benetti@benettiengineering.com>
+ - Jesse Taube <Mr.Bossman075@gmail.com>
+
+description:
+ Please refer to fsl,imx-pinctrl.txt and pinctrl-bindings.txt in this directory
+ for common binding part and usage.
+
+properties:
+ compatible:
+ const: fsl,imxrt1170-iomuxc
+
+ reg:
+ maxItems: 1
+
+# Client device subnode's properties
+patternProperties:
+ 'grp$':
+ type: object
+ description:
+ Pinctrl node's client devices use subnodes for desired pin configuration.
+ Client device subnodes use below standard properties.
+
+ properties:
+ fsl,pins:
+ description:
+ each entry consists of 6 integers and represents the mux and config
+ setting for one pin. The first 5 integers <mux_reg conf_reg input_reg
+ mux_val input_val> are specified using a PIN_FUNC_ID macro, which can
+ be found in <arch/arm/boot/dts/imxrt1170-pinfunc.h>. The last
+ integer CONFIG is the pad setting value like pull-up on this pin. Please
+ refer to i.MXRT1170 Reference Manual for detailed CONFIG settings.
+ $ref: /schemas/types.yaml#/definitions/uint32-matrix
+ items:
+ items:
+ - description: |
+ "mux_reg" indicates the offset of mux register.
+ - description: |
+ "conf_reg" indicates the offset of pad configuration register.
+ - description: |
+ "input_reg" indicates the offset of select input register.
+ - description: |
+ "mux_val" indicates the mux value to be applied.
+ - description: |
+ "input_val" indicates the select input value to be applied.
+ - description: |
+ "pad_setting" indicates the pad configuration value to be applied.
+ required:
+ - fsl,pins
+
+ additionalProperties: false
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ iomuxc: iomuxc@400e8000 {
+ compatible = "fsl,imxrt1170-iomuxc";
+ reg = <0x400e8000 0x4000>;
+ pinctrl_lpuart1: lpuart1grp {
+ fsl,pins =
+ <0x16C 0x3B0 0x620 0x0 0x0 0xf1>,
+ <0x170 0x3B4 0x61C 0x0 0x0 0xf1>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pinctrl/marvell,ac5-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/marvell,ac5-pinctrl.yaml
new file mode 100644
index 000000000000..a651b2744caf
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/marvell,ac5-pinctrl.yaml
@@ -0,0 +1,72 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pinctrl/marvell,ac5-pinctrl.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Marvell AC5 pin controller
+
+maintainers:
+ - Chris Packham <chris.packham@alliedtelesis.co.nz>
+
+description:
+ Bindings for Marvell's AC5 memory-mapped pin controller.
+
+properties:
+ compatible:
+ items:
+ - const: marvell,ac5-pinctrl
+
+ reg:
+ maxItems: 1
+
+patternProperties:
+ '-pins$':
+ type: object
+ $ref: pinmux-node.yaml#
+
+ properties:
+ marvell,function:
+ $ref: "/schemas/types.yaml#/definitions/string"
+ description:
+ Indicates the function to select.
+ enum: [ dev_init_done, ge, gpio, i2c0, i2c1, int_out, led, nand, pcie, ptp, sdio,
+ spi0, spi1, synce, tsen_int, uart0, uart1, uart2, uart3, uartsd, wd_int, xg ]
+
+ marvell,pins:
+ $ref: /schemas/types.yaml#/definitions/string-array
+ description:
+ Array of MPP pins to be used for the given function.
+ minItems: 1
+ items:
+ enum: [ mpp0, mpp1, mpp2, mpp3, mpp4, mpp5, mpp6, mpp7, mpp8, mpp9,
+ mpp10, mpp11, mpp12, mpp13, mpp14, mpp15, mpp16, mpp17, mpp18, mpp19,
+ mpp20, mpp21, mpp22, mpp23, mpp24, mpp25, mpp26, mpp27, mpp28, mpp29,
+ mpp30, mpp31, mpp32, mpp33, mpp34, mpp35, mpp36, mpp37, mpp38, mpp39,
+ mpp40, mpp41, mpp42, mpp43, mpp44, mpp45 ]
+
+allOf:
+ - $ref: "pinctrl.yaml#"
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ pinctrl@80020100 {
+ compatible = "marvell,ac5-pinctrl";
+ reg = <0x80020100 0x20>;
+
+ i2c0_pins: i2c0-pins {
+ marvell,pins = "mpp26", "mpp27";
+ marvell,function = "i2c0";
+ };
+
+ i2c0_gpio: i2c0-gpio-pins {
+ marvell,pins = "mpp26", "mpp27";
+ marvell,function = "gpio";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pinctrl/mediatek,pinctrl-mt6795.yaml b/Documentation/devicetree/bindings/pinctrl/mediatek,pinctrl-mt6795.yaml
new file mode 100644
index 000000000000..73ae6e11410b
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/mediatek,pinctrl-mt6795.yaml
@@ -0,0 +1,224 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pinctrl/mediatek,pinctrl-mt6795.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Mediatek MT6795 Pin Controller
+
+maintainers:
+ - AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ - Sean Wang <sean.wang@kernel.org>
+
+description: |
+ The Mediatek's Pin controller is used to control SoC pins.
+
+properties:
+ compatible:
+ const: mediatek,mt6795-pinctrl
+
+ gpio-controller: true
+
+ '#gpio-cells':
+ description: |
+ Number of cells in GPIO specifier. Since the generic GPIO binding is used,
+ the amount of cells must be specified as 2. See the below
+ mentioned gpio binding representation for description of particular cells.
+ const: 2
+
+ gpio-ranges:
+ description: GPIO valid number range.
+ maxItems: 1
+
+ reg:
+ description:
+ Physical address base for gpio base and eint registers.
+ minItems: 2
+
+ reg-names:
+ items:
+ - const: base
+ - const: eint
+
+ interrupt-controller: true
+
+ '#interrupt-cells':
+ const: 2
+
+ interrupts:
+ description: The interrupt outputs to sysirq.
+ maxItems: 1
+
+# PIN CONFIGURATION NODES
+patternProperties:
+ '-pins$':
+ type: object
+ additionalProperties: false
+ patternProperties:
+ '^pins':
+ type: object
+ additionalProperties: false
+ description: |
+ A pinctrl node should contain at least one subnodes representing the
+ pinctrl groups available on the machine. Each subnode will list the
+ pins it needs, and how they should be configured, with regard to muxer
+ configuration, pullups, drive strength, input enable/disable and
+ input schmitt.
+ An example of using macro:
+ pincontroller {
+ /* GPIO0 set as multifunction GPIO0 */
+ gpio-pins {
+ pins {
+ pinmux = <PINMUX_GPIO0__FUNC_GPIO0>;
+ }
+ };
+ /* GPIO45 set as multifunction SDA0 */
+ i2c0-pins {
+ pins {
+ pinmux = <PINMUX_GPIO45__FUNC_SDA0>;
+ }
+ };
+ };
+ $ref: "pinmux-node.yaml"
+
+ properties:
+ pinmux:
+ description: |
+ Integer array, represents gpio pin number and mux setting.
+ Supported pin number and mux varies for different SoCs, and are
+ defined as macros in dt-bindings/pinctrl/<soc>-pinfunc.h
+ directly.
+
+ drive-strength:
+ enum: [2, 4, 6, 8, 10, 12, 14, 16]
+
+ bias-pull-down:
+ oneOf:
+ - type: boolean
+ - enum: [100, 101, 102, 103]
+ description: mt6795 pull down PUPD/R0/R1 type define value.
+ description: |
+ For normal pull down type, it is not necessary to specify R1R0
+ values; When pull down type is PUPD/R0/R1, adding R1R0 defines
+ will set different resistance values.
+
+ bias-pull-up:
+ oneOf:
+ - type: boolean
+ - enum: [100, 101, 102, 103]
+ description: mt6795 pull up PUPD/R0/R1 type define value.
+ description: |
+ For normal pull up type, it is not necessary to specify R1R0
+ values; When pull up type is PUPD/R0/R1, adding R1R0 defines
+ will set different resistance values.
+
+ bias-disable: true
+
+ output-high: true
+
+ output-low: true
+
+ input-enable: true
+
+ input-disable: true
+
+ input-schmitt-enable: true
+
+ input-schmitt-disable: true
+
+ mediatek,pull-up-adv:
+ description: |
+ Pull up setings for 2 pull resistors, R0 and R1. User can
+ configure those special pins. Valid arguments are described as below:
+ 0: (R1, R0) = (0, 0) which means R1 disabled and R0 disabled.
+ 1: (R1, R0) = (0, 1) which means R1 disabled and R0 enabled.
+ 2: (R1, R0) = (1, 0) which means R1 enabled and R0 disabled.
+ 3: (R1, R0) = (1, 1) which means R1 enabled and R0 enabled.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 1, 2, 3]
+
+ mediatek,pull-down-adv:
+ description: |
+ Pull down settings for 2 pull resistors, R0 and R1. User can
+ configure those special pins. Valid arguments are described as below:
+ 0: (R1, R0) = (0, 0) which means R1 disabled and R0 disabled.
+ 1: (R1, R0) = (0, 1) which means R1 disabled and R0 enabled.
+ 2: (R1, R0) = (1, 0) which means R1 enabled and R0 disabled.
+ 3: (R1, R0) = (1, 1) which means R1 enabled and R0 enabled.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 1, 2, 3]
+
+ required:
+ - pinmux
+
+allOf:
+ - $ref: "pinctrl.yaml#"
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - interrupts
+ - interrupt-controller
+ - '#interrupt-cells'
+ - gpio-controller
+ - '#gpio-cells'
+ - gpio-ranges
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/pinctrl/mt6795-pinfunc.h>
+
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ pio: pinctrl@10005000 {
+ compatible = "mediatek,mt6795-pinctrl";
+ reg = <0 0x10005000 0 0x1000>, <0 0x1000b000 0 0x1000>;
+ reg-names = "base", "eint";
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pio 0 0 196>;
+ interrupt-controller;
+ interrupts = <GIC_SPI 153 IRQ_TYPE_LEVEL_HIGH>;
+ #interrupt-cells = <2>;
+
+ i2c0-pins {
+ pins-sda-scl {
+ pinmux = <PINMUX_GPIO45__FUNC_SDA0>,
+ <PINMUX_GPIO46__FUNC_SCL0>;
+ };
+ };
+
+ mmc0-pins {
+ pins-cmd-dat {
+ pinmux = <PINMUX_GPIO154__FUNC_MSDC0_DAT0>,
+ <PINMUX_GPIO155__FUNC_MSDC0_DAT1>,
+ <PINMUX_GPIO156__FUNC_MSDC0_DAT2>,
+ <PINMUX_GPIO157__FUNC_MSDC0_DAT3>,
+ <PINMUX_GPIO158__FUNC_MSDC0_DAT4>,
+ <PINMUX_GPIO159__FUNC_MSDC0_DAT5>,
+ <PINMUX_GPIO160__FUNC_MSDC0_DAT6>,
+ <PINMUX_GPIO161__FUNC_MSDC0_DAT7>,
+ <PINMUX_GPIO162__FUNC_MSDC0_CMD>;
+ input-enable;
+ bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
+ };
+
+ pins-clk {
+ pinmux = <PINMUX_GPIO163__FUNC_MSDC0_CLK>;
+ bias-pull-down = <MTK_PUPD_SET_R1R0_10>;
+ };
+
+ pins-rst {
+ pinmux = <PINMUX_GPIO165__FUNC_MSDC0_RSTB>;
+ bias-pull-up = <MTK_PUPD_SET_R1R0_10>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pinctrl/mscc,ocelot-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/mscc,ocelot-pinctrl.txt
deleted file mode 100644
index 5d84fd299ccf..000000000000
--- a/Documentation/devicetree/bindings/pinctrl/mscc,ocelot-pinctrl.txt
+++ /dev/null
@@ -1,42 +0,0 @@
-Microsemi Ocelot pin controller Device Tree Bindings
-----------------------------------------------------
-
-Required properties:
- - compatible : Should be "mscc,ocelot-pinctrl",
- "mscc,jaguar2-pinctrl", "microchip,sparx5-pinctrl",
- "mscc,luton-pinctrl", "mscc,serval-pinctrl",
- "microchip,lan966x-pinctrl" or "mscc,servalt-pinctrl"
- - reg : Address and length of the register set for the device
- - gpio-controller : Indicates this device is a GPIO controller
- - #gpio-cells : Must be 2.
- The first cell is the pin number and the
- second cell specifies GPIO flags, as defined in
- <dt-bindings/gpio/gpio.h>.
- - gpio-ranges : Range of pins managed by the GPIO controller.
-
-
-The ocelot-pinctrl driver uses the generic pin multiplexing and generic pin
-configuration documented in pinctrl-bindings.txt.
-
-The following generic properties are supported:
- - function
- - pins
-
-Example:
- gpio: pinctrl@71070034 {
- compatible = "mscc,ocelot-pinctrl";
- reg = <0x71070034 0x28>;
- gpio-controller;
- #gpio-cells = <2>;
- gpio-ranges = <&gpio 0 0 22>;
-
- uart_pins: uart-pins {
- pins = "GPIO_6", "GPIO_7";
- function = "uart";
- };
-
- uart2_pins: uart2-pins {
- pins = "GPIO_12", "GPIO_13";
- function = "uart2";
- };
- };
diff --git a/Documentation/devicetree/bindings/pinctrl/mscc,ocelot-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/mscc,ocelot-pinctrl.yaml
new file mode 100644
index 000000000000..98d547c34ef3
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/mscc,ocelot-pinctrl.yaml
@@ -0,0 +1,116 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pinctrl/mscc,ocelot-pinctrl.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Microsemi Ocelot pin controller
+
+maintainers:
+ - Alexandre Belloni <alexandre.belloni@bootlin.com>
+ - Lars Povlsen <lars.povlsen@microchip.com>
+
+properties:
+ compatible:
+ enum:
+ - microchip,lan966x-pinctrl
+ - microchip,sparx5-pinctrl
+ - mscc,jaguar2-pinctrl
+ - mscc,luton-pinctrl
+ - mscc,ocelot-pinctrl
+ - mscc,serval-pinctrl
+ - mscc,servalt-pinctrl
+
+ reg:
+ items:
+ - description: Base address
+ - description: Extended pin configuration registers
+ minItems: 1
+
+ gpio-controller: true
+
+ '#gpio-cells':
+ const: 2
+
+ gpio-ranges: true
+
+ interrupts:
+ maxItems: 1
+
+ interrupt-controller: true
+
+ "#interrupt-cells":
+ const: 2
+
+ resets:
+ maxItems: 1
+
+ reset-names:
+ description: Optional shared switch reset.
+ items:
+ - const: switch
+
+patternProperties:
+ '-pins$':
+ type: object
+ allOf:
+ - $ref: "pinmux-node.yaml"
+ - $ref: "pincfg-node.yaml"
+
+ properties:
+ function: true
+ pins: true
+ output-high: true
+ output-low: true
+ drive-strength: true
+
+ required:
+ - function
+ - pins
+
+ additionalProperties: false
+
+required:
+ - compatible
+ - reg
+ - gpio-controller
+ - '#gpio-cells'
+ - gpio-ranges
+
+allOf:
+ - $ref: "pinctrl.yaml#"
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - microchip,lan966x-pinctrl
+ - microchip,sparx5-pinctrl
+ then:
+ properties:
+ reg:
+ minItems: 2
+
+additionalProperties: false
+
+examples:
+ - |
+ gpio: pinctrl@71070034 {
+ compatible = "mscc,ocelot-pinctrl";
+ reg = <0x71070034 0x28>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&gpio 0 0 22>;
+
+ uart_pins: uart-pins {
+ pins = "GPIO_6", "GPIO_7";
+ function = "uart";
+ };
+
+ uart2_pins: uart2-pins {
+ pins = "GPIO_12", "GPIO_13";
+ function = "uart2";
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-mt8192.yaml b/Documentation/devicetree/bindings/pinctrl/pinctrl-mt8192.yaml
index 3c84676a167d..c90a132fbc79 100644
--- a/Documentation/devicetree/bindings/pinctrl/pinctrl-mt8192.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-mt8192.yaml
@@ -29,6 +29,8 @@ properties:
description: gpio valid number range.
maxItems: 1
+ gpio-line-names: true
+
reg:
description: |
Physical address base for gpio base registers. There are 11 GPIO
@@ -51,62 +53,92 @@ properties:
#PIN CONFIGURATION NODES
patternProperties:
- '^pins':
+ '-pins$':
type: object
- description: |
- A pinctrl node should contain at least one subnodes representing the
- pinctrl groups available on the machine. Each subnode will list the
- pins it needs, and how they should be configured, with regard to muxer
- configuration, pullups, drive strength, input enable/disable and
- input schmitt.
- An example of using macro:
- pincontroller {
- /* GPIO0 set as multifunction GPIO0 */
- state_0_node_a {
- pinmux = <PINMUX_GPIO0__FUNC_GPIO0>;
- };
- /* GPIO1 set as multifunction PWM */
- state_0_node_b {
- pinmux = <PINMUX_GPIO1__FUNC_PWM_1>;
- };
- };
- $ref: "pinmux-node.yaml"
-
- properties:
- pinmux:
- description: |
- Integer array, represents gpio pin number and mux setting.
- Supported pin number and mux varies for different SoCs, and are defined
- as macros in dt-bindings/pinctrl/<soc>-pinfunc.h directly.
-
- drive-strength:
- description: |
- It can support some arguments, such as MTK_DRIVE_4mA, MTK_DRIVE_6mA, etc. See
- dt-bindings/pinctrl/mt65xx.h. It can only support 2/4/6/8/10/12/14/16mA in mt8192.
- enum: [2, 4, 6, 8, 10, 12, 14, 16]
-
- bias-pull-down: true
-
- bias-pull-up: true
-
- bias-disable: true
-
- output-high: true
-
- output-low: true
-
- input-enable: true
-
- input-disable: true
-
- input-schmitt-enable: true
-
- input-schmitt-disable: true
-
- required:
- - pinmux
-
additionalProperties: false
+ patternProperties:
+ '^pins':
+ type: object
+ description: |
+ A pinctrl node should contain at least one subnodes representing the
+ pinctrl groups available on the machine. Each subnode will list the
+ pins it needs, and how they should be configured, with regard to muxer
+ configuration, pullups, drive strength, input enable/disable and
+ input schmitt.
+ $ref: "pinmux-node.yaml"
+
+ properties:
+ pinmux:
+ description: |
+ Integer array, represents gpio pin number and mux setting.
+ Supported pin number and mux varies for different SoCs, and are defined
+ as macros in dt-bindings/pinctrl/<soc>-pinfunc.h directly.
+
+ drive-strength:
+ description: |
+ It can support some arguments, such as MTK_DRIVE_4mA, MTK_DRIVE_6mA, etc. See
+ dt-bindings/pinctrl/mt65xx.h. It can only support 2/4/6/8/10/12/14/16mA in mt8192.
+ enum: [2, 4, 6, 8, 10, 12, 14, 16]
+
+ mediatek,drive-strength-adv:
+ description: |
+ Describe the specific driving setup property.
+ For I2C pins, the existing generic driving setup can only support
+ 2/4/6/8/10/12/14/16mA driving. But in specific driving setup, they
+ can support 0.125/0.25/0.5/1mA adjustment. If we enable specific
+ driving setup, the existing generic setup will be disabled.
+ The specific driving setup is controlled by E1E0EN.
+ When E1=0/E0=0, the strength is 0.125mA.
+ When E1=0/E0=1, the strength is 0.25mA.
+ When E1=1/E0=0, the strength is 0.5mA.
+ When E1=1/E0=1, the strength is 1mA.
+ EN is used to enable or disable the specific driving setup.
+ Valid arguments are described as below:
+ 0: (E1, E0, EN) = (0, 0, 0)
+ 1: (E1, E0, EN) = (0, 0, 1)
+ 2: (E1, E0, EN) = (0, 1, 0)
+ 3: (E1, E0, EN) = (0, 1, 1)
+ 4: (E1, E0, EN) = (1, 0, 0)
+ 5: (E1, E0, EN) = (1, 0, 1)
+ 6: (E1, E0, EN) = (1, 1, 0)
+ 7: (E1, E0, EN) = (1, 1, 1)
+ So the valid arguments are from 0 to 7.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 1, 2, 3, 4, 5, 6, 7]
+
+ mediatek,pull-up-adv:
+ description: |
+ Pull up settings for 2 pull resistors, R0 and R1. User can
+ configure those special pins. Valid arguments are described as below:
+ 0: (R1, R0) = (0, 0) which means R1 disabled and R0 disabled.
+ 1: (R1, R0) = (0, 1) which means R1 disabled and R0 enabled.
+ 2: (R1, R0) = (1, 0) which means R1 enabled and R0 disabled.
+ 3: (R1, R0) = (1, 1) which means R1 enabled and R0 enabled.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 1, 2, 3]
+
+ bias-pull-down: true
+
+ bias-pull-up: true
+
+ bias-disable: true
+
+ output-high: true
+
+ output-low: true
+
+ input-enable: true
+
+ input-disable: true
+
+ input-schmitt-enable: true
+
+ input-schmitt-disable: true
+
+ required:
+ - pinmux
+
+ additionalProperties: false
allOf:
- $ref: "pinctrl.yaml#"
@@ -151,8 +183,17 @@ examples:
interrupts = <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH 0>;
#interrupt-cells = <2>;
- pins {
- pinmux = <PINMUX_GPIO0__FUNC_GPIO0>;
- output-low;
+ spi1-default-pins {
+ pins-cs-mosi-clk {
+ pinmux = <PINMUX_GPIO157__FUNC_SPI1_A_CSB>,
+ <PINMUX_GPIO159__FUNC_SPI1_A_MO>,
+ <PINMUX_GPIO156__FUNC_SPI1_A_CLK>;
+ bias-disable;
+ };
+
+ pins-miso {
+ pinmux = <PINMUX_GPIO158__FUNC_SPI1_A_MI>;
+ bias-pull-down;
+ };
};
};
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.yaml
index fe2bcf0694d9..6f2efc3772cb 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.yaml
@@ -20,6 +20,7 @@ properties:
- qcom,pm2250-gpio
- qcom,pm660-gpio
- qcom,pm660l-gpio
+ - qcom,pm6125-gpio
- qcom,pm6150-gpio
- qcom,pm6150l-gpio
- qcom,pm6350-gpio
@@ -32,6 +33,7 @@ properties:
- qcom,pm8058-gpio
- qcom,pm8150-gpio
- qcom,pm8150b-gpio
+ - qcom,pm8150l-gpio
- qcom,pm8226-gpio
- qcom,pm8350-gpio
- qcom,pm8350b-gpio
@@ -49,10 +51,12 @@ properties:
- qcom,pmi8994-gpio
- qcom,pmi8998-gpio
- qcom,pmk8350-gpio
+ - qcom,pmm8155au-gpio
- qcom,pmr735a-gpio
- qcom,pmr735b-gpio
- qcom,pms405-gpio
- qcom,pmx55-gpio
+ - qcom,pmx65-gpio
- enum:
- qcom,spmi-gpio
@@ -71,6 +75,16 @@ properties:
gpio-ranges:
maxItems: 1
+ gpio-line-names:
+ minItems: 2
+ maxItems: 44
+
+ gpio-reserved-ranges:
+ minItems: 1
+ # maxItems as half of total number of GPIOs, as there has to be at
+ # least one usable GPIO between each reserved range.
+ maxItems: 22
+
'#gpio-cells':
const: 2
description:
@@ -87,13 +101,278 @@ required:
- gpio-ranges
- interrupt-controller
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,pm8008-gpio
+ - qcom,pmi8950-gpio
+ then:
+ properties:
+ gpio-line-names:
+ minItems: 2
+ maxItems: 2
+ gpio-reserved-ranges:
+ maxItems: 1
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,pm8005-gpio
+ - qcom,pm8450-gpio
+ - qcom,pm8916-gpio
+ - qcom,pmk8350-gpio
+ - qcom,pmr735a-gpio
+ - qcom,pmr735b-gpio
+ then:
+ properties:
+ gpio-line-names:
+ minItems: 4
+ maxItems: 4
+ gpio-reserved-ranges:
+ minItems: 1
+ maxItems: 2
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,pm8018-gpio
+ - qcom,pm8019-gpio
+ then:
+ properties:
+ gpio-line-names:
+ minItems: 6
+ maxItems: 6
+ gpio-reserved-ranges:
+ minItems: 1
+ maxItems: 3
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,pm8350b-gpio
+ - qcom,pm8950-gpio
+ then:
+ properties:
+ gpio-line-names:
+ minItems: 8
+ maxItems: 8
+ gpio-reserved-ranges:
+ minItems: 1
+ maxItems: 4
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,pm6350-gpio
+ - qcom,pm8350c-gpio
+ then:
+ properties:
+ gpio-line-names:
+ minItems: 9
+ maxItems: 9
+ gpio-reserved-ranges:
+ minItems: 1
+ maxItems: 5
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,pm2250-gpio
+ - qcom,pm6150-gpio
+ - qcom,pm7325-gpio
+ - qcom,pm8150-gpio
+ - qcom,pm8350-gpio
+ - qcom,pmc8180-gpio
+ - qcom,pmi8994-gpio
+ - qcom,pmm8155au-gpio
+ then:
+ properties:
+ gpio-line-names:
+ minItems: 10
+ maxItems: 10
+ gpio-reserved-ranges:
+ minItems: 1
+ maxItems: 5
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,pmx55-gpio
+ then:
+ properties:
+ gpio-line-names:
+ minItems: 11
+ maxItems: 11
+ gpio-reserved-ranges:
+ minItems: 1
+ maxItems: 6
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,pm660l-gpio
+ - qcom,pm6150l-gpio
+ - qcom,pm8038-gpio
+ - qcom,pm8150b-gpio
+ - qcom,pm8150l-gpio
+ - qcom,pmc8180c-gpio
+ - qcom,pms405-gpio
+ then:
+ properties:
+ gpio-line-names:
+ minItems: 12
+ maxItems: 12
+ gpio-reserved-ranges:
+ minItems: 1
+ maxItems: 6
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,pm660-gpio
+ then:
+ properties:
+ gpio-line-names:
+ minItems: 13
+ maxItems: 13
+ gpio-reserved-ranges:
+ minItems: 1
+ maxItems: 7
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,pmi8998-gpio
+ then:
+ properties:
+ gpio-line-names:
+ minItems: 14
+ maxItems: 14
+ gpio-reserved-ranges:
+ minItems: 1
+ maxItems: 7
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,pmx65-gpio
+ then:
+ properties:
+ gpio-line-names:
+ minItems: 16
+ maxItems: 16
+ gpio-reserved-ranges:
+ minItems: 1
+ maxItems: 8
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,pm8994-gpio
+ - qcom,pma8084-gpio
+ then:
+ properties:
+ gpio-line-names:
+ minItems: 22
+ maxItems: 22
+ gpio-reserved-ranges:
+ minItems: 1
+ maxItems: 11
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,pm8998-gpio
+ then:
+ properties:
+ gpio-line-names:
+ minItems: 26
+ maxItems: 26
+ gpio-reserved-ranges:
+ minItems: 1
+ maxItems: 13
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,pm8941-gpio
+ then:
+ properties:
+ gpio-line-names:
+ minItems: 36
+ maxItems: 36
+ gpio-reserved-ranges:
+ minItems: 1
+ maxItems: 18
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,pm8917-gpio
+ then:
+ properties:
+ gpio-line-names:
+ minItems: 38
+ maxItems: 38
+ gpio-reserved-ranges:
+ minItems: 1
+ maxItems: 19
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,pm8058-gpio
+ - qcom,pm8921-gpio
+ then:
+ properties:
+ gpio-line-names:
+ minItems: 44
+ maxItems: 44
+ gpio-reserved-ranges:
+ minItems: 1
+ maxItems: 22
+
patternProperties:
'-state$':
oneOf:
- $ref: "#/$defs/qcom-pmic-gpio-state"
- patternProperties:
- ".*":
+ "(pinconf|-pins)$":
$ref: "#/$defs/qcom-pmic-gpio-state"
+ additionalProperties: false
$defs:
qcom-pmic-gpio-state:
@@ -106,6 +385,7 @@ $defs:
description:
List of gpio pins affected by the properties specified in
this subnode. Valid pins are
+ - gpio1-gpio9 for pm6125
- gpio1-gpio10 for pm6150
- gpio1-gpio12 for pm6150l
- gpio1-gpio9 for pm6350
@@ -134,12 +414,14 @@ $defs:
- gpio1-gpio2 for pmi8950
- gpio1-gpio10 for pmi8994
- gpio1-gpio4 for pmk8350
+ - gpio1-gpio10 for pmm8155au
- gpio1-gpio4 for pmr735a
- gpio1-gpio4 for pmr735b
- gpio1-gpio12 for pms405 (holes on gpio1, gpio9
and gpio10)
- gpio1-gpio11 for pmx55 (holes on gpio3, gpio7, gpio10
and gpio11)
+ - gpio1-gpio16 for pmx65
items:
pattern: "^gpio([0-9]+)$"
@@ -174,6 +456,7 @@ $defs:
bias-high-impedance: true
input-enable: true
+ input-disable: true
output-high: true
output-low: true
output-enable: true
@@ -232,7 +515,7 @@ examples:
#gpio-cells = <2>;
pm8921_gpio_keys: gpio-keys-state {
- volume-keys {
+ volume-keys-pins {
pins = "gpio20", "gpio21";
function = "normal";
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,qcm2290-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,qcm2290-pinctrl.yaml
index 206f4f238736..3f4f1c0360b5 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,qcm2290-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,qcm2290-pinctrl.yaml
@@ -42,8 +42,7 @@ properties:
gpio-ranges:
maxItems: 1
- wakeup-parent:
- maxItems: 1
+ wakeup-parent: true
#PIN CONFIGURATION NODES
patternProperties:
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,sc7280-lpass-lpi-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,sc7280-lpass-lpi-pinctrl.yaml
new file mode 100644
index 000000000000..d32ee32776e8
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,sc7280-lpass-lpi-pinctrl.yaml
@@ -0,0 +1,115 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pinctrl/qcom,sc7280-lpass-lpi-pinctrl.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Technologies, Inc. Low Power Audio SubSystem (LPASS)
+ Low Power Island (LPI) TLMM block
+
+maintainers:
+ - Srinivasa Rao Mandadapu <srivasam@codeaurora.org>
+ - Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+
+description: |
+ This binding describes the Top Level Mode Multiplexer block found in the
+ LPASS LPI IP on most Qualcomm SoCs
+
+properties:
+ compatible:
+ const: qcom,sc7280-lpass-lpi-pinctrl
+
+ reg:
+ minItems: 2
+ maxItems: 2
+
+ gpio-controller: true
+
+ '#gpio-cells':
+ description: Specifying the pin number and flags, as defined in
+ include/dt-bindings/gpio/gpio.h
+ const: 2
+
+ gpio-ranges:
+ maxItems: 1
+
+#PIN CONFIGURATION NODES
+patternProperties:
+ '-pins$':
+ type: object
+ description:
+ Pinctrl node's client devices use subnodes for desired pin configuration.
+ Client device subnodes use below standard properties.
+ $ref: "/schemas/pinctrl/pincfg-node.yaml"
+
+ properties:
+ pins:
+ description:
+ List of gpio pins affected by the properties specified in this
+ subnode.
+ items:
+ oneOf:
+ - pattern: "^gpio([0-9]|[1-9][0-9])$"
+ minItems: 1
+ maxItems: 15
+
+ function:
+ enum: [ gpio, swr_tx_clk, qua_mi2s_sclk, swr_tx_data, qua_mi2s_ws,
+ qua_mi2s_data, swr_rx_clk, swr_rx_data, dmic1_clk, i2s1_clk,
+ dmic1_data, i2s1_ws, dmic2_clk, dmic2_data, i2s1_data,
+ i2s2_clk, wsa_swr_clk, i2s2_ws, wsa_swr_data, dmic3_clk,
+ dmic3_data, i2s2_data ]
+ description:
+ Specify the alternative function to be configured for the specified
+ pins.
+
+ drive-strength:
+ enum: [2, 4, 6, 8, 10, 12, 14, 16]
+ default: 2
+ description:
+ Selects the drive strength for the specified pins, in mA.
+
+ slew-rate:
+ enum: [0, 1, 2, 3]
+ default: 0
+ description: |
+ 0: No adjustments
+ 1: Higher Slew rate (faster edges)
+ 2: Lower Slew rate (slower edges)
+ 3: Reserved (No adjustments)
+
+ bias-pull-down: true
+
+ bias-pull-up: true
+
+ bias-disable: true
+
+ output-high: true
+
+ output-low: true
+
+ required:
+ - pins
+ - function
+
+ additionalProperties: false
+
+required:
+ - compatible
+ - reg
+ - gpio-controller
+ - '#gpio-cells'
+ - gpio-ranges
+
+additionalProperties: false
+
+examples:
+ - |
+ lpass_tlmm: pinctrl@33c0000 {
+ compatible = "qcom,sc7280-lpass-lpi-pinctrl";
+ reg = <0x33c0000 0x20000>,
+ <0x3550000 0x10000>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&lpass_tlmm 0 0 15>;
+ };
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,sc7280-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,sc7280-pinctrl.yaml
index 6c7c3f6a140e..2d228164357c 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,sc7280-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,sc7280-pinctrl.yaml
@@ -42,8 +42,7 @@ properties:
gpio-ranges:
maxItems: 1
- wakeup-parent:
- maxItems: 1
+ wakeup-parent: true
#PIN CONFIGURATION NODES
patternProperties:
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,sm6115-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,sm6115-pinctrl.yaml
index cfcde405d30a..a7a2bb8bff46 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,sm6115-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,sm6115-pinctrl.yaml
@@ -49,8 +49,7 @@ properties:
gpio-ranges:
maxItems: 1
- wakeup-parent:
- maxItems: 1
+ wakeup-parent: true
#PIN CONFIGURATION NODES
patternProperties:
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,lpass-lpi-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,sm8250-lpass-lpi-pinctrl.yaml
index 5c5542f1627c..06efb1382876 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,lpass-lpi-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,sm8250-lpass-lpi-pinctrl.yaml
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
-$id: http://devicetree.org/schemas/pinctrl/qcom,lpass-lpi-pinctrl.yaml#
+$id: http://devicetree.org/schemas/pinctrl/qcom,sm8250-lpass-lpi-pinctrl.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Qualcomm Technologies, Inc. Low Power Audio SubSystem (LPASS)
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,sm8250-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,sm8250-pinctrl.yaml
index cfa2c50fdb93..15bb1018cf21 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,sm8250-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,sm8250-pinctrl.yaml
@@ -49,8 +49,7 @@ properties:
gpio-ranges:
maxItems: 1
- wakeup-parent:
- maxItems: 1
+ wakeup-parent: true
#PIN CONFIGURATION NODES
patternProperties:
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,tlmm-common.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,tlmm-common.yaml
index 780f15bb5e40..c88c8dcb69d9 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,tlmm-common.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,tlmm-common.yaml
@@ -42,7 +42,6 @@ properties:
description:
Specifying the interrupt-controller used to wake up the system when the
TLMM block has been powered down.
- maxItems: 1
gpio-reserved-ranges:
description:
diff --git a/Documentation/devicetree/bindings/pinctrl/ralink,mt7620-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/ralink,mt7620-pinctrl.yaml
new file mode 100644
index 000000000000..4d820df24b89
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/ralink,mt7620-pinctrl.yaml
@@ -0,0 +1,91 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pinctrl/ralink,mt7620-pinctrl.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Ralink MT7620 Pin Controller
+
+maintainers:
+ - Arınç ÜNAL <arinc.unal@arinc9.com>
+ - Sergio Paracuellos <sergio.paracuellos@gmail.com>
+
+description:
+ Ralink MT7620 pin controller for MT7620, MT7628 and MT7688 SoCs.
+ The pin controller can only set the muxing of pin groups. Muxing individual
+ pins is not supported. There is no pinconf support.
+
+properties:
+ compatible:
+ const: ralink,mt7620-pinctrl
+
+patternProperties:
+ '-pins$':
+ type: object
+ patternProperties:
+ '^(.*-)?pinmux$':
+ type: object
+ description: node for pinctrl.
+ $ref: pinmux-node.yaml#
+
+ properties:
+ groups:
+ description: The pin group to select.
+ enum: [
+ # For MT7620 SoC
+ ephy, i2c, mdio, nd_sd, pa, pcie, rgmii1, rgmii2, spi, spi refclk,
+ uartf, uartlite, wdt, wled,
+
+ # For MT7628 and MT7688 SoCs
+ gpio, i2c, i2s, p0led_an, p0led_kn, p1led_an, p1led_kn, p2led_an,
+ p2led_kn, p3led_an, p3led_kn, p4led_an, p4led_kn, perst, pwm0,
+ pwm1, refclk, sdmode, spi, spi cs1, spis, uart0, uart1, uart2,
+ wdt, wled_an, wled_kn,
+ ]
+
+ function:
+ description: The mux function to select.
+ enum: [
+ # For MT7620 SoC
+ ephy, gpio, gpio i2s, gpio uartf, i2c, i2s uartf, mdio, nand, pa,
+ pcie refclk, pcie rst, pcm gpio, pcm i2s, pcm uartf, refclk,
+ rgmii1, rgmii2, sd, spi, spi refclk, uartf, uartlite, wdt refclk,
+ wdt rst, wled,
+
+ # For MT7628 and MT7688 SoCs
+ antenna, debug, gpio, i2c, i2s, jtag, p0led_an, p0led_kn,
+ p1led_an, p1led_kn, p2led_an, p2led_kn, p3led_an, p3led_kn,
+ p4led_an, p4led_kn, pcie, pcm, perst, pwm, pwm0, pwm1, pwm_uart2,
+ refclk, rsvd, sdxc, sdxc d5 d4, sdxc d6, sdxc d7, spi, spi cs1,
+ spis, sw_r, uart0, uart1, uart2, utif, wdt, wled_an, wled_kn, -,
+ ]
+
+ required:
+ - groups
+ - function
+
+ additionalProperties: false
+
+ additionalProperties: false
+
+allOf:
+ - $ref: "pinctrl.yaml#"
+
+required:
+ - compatible
+
+additionalProperties: false
+
+examples:
+ # Pinmux controller node
+ - |
+ pinctrl {
+ compatible = "ralink,mt7620-pinctrl";
+
+ i2c_pins: i2c0-pins {
+ pinmux {
+ groups = "i2c";
+ function = "i2c";
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pinctrl/ralink,rt2880-pinmux.yaml b/Documentation/devicetree/bindings/pinctrl/ralink,mt7621-pinctrl.yaml
index 9de8b0c075e2..61e5c847e8c8 100644
--- a/Documentation/devicetree/bindings/pinctrl/ralink,rt2880-pinmux.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/ralink,mt7621-pinctrl.yaml
@@ -1,21 +1,23 @@
# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
%YAML 1.2
---
-$id: http://devicetree.org/schemas/pinctrl/ralink,rt2880-pinmux.yaml#
+$id: http://devicetree.org/schemas/pinctrl/ralink,mt7621-pinctrl.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
-title: Ralink rt2880 pinmux controller
+title: Ralink MT7621 Pin Controller
maintainers:
+ - Arınç ÜNAL <arinc.unal@arinc9.com>
- Sergio Paracuellos <sergio.paracuellos@gmail.com>
description:
- The rt2880 pinmux can only set the muxing of pin groups. Muxing indiviual pins
- is not supported. There is no pinconf support.
+ Ralink MT7621 pin controller for MT7621 SoC.
+ The pin controller can only set the muxing of pin groups. Muxing individual
+ pins is not supported. There is no pinconf support.
properties:
compatible:
- const: ralink,rt2880-pinmux
+ const: ralink,mt7621-pinctrl
patternProperties:
'-pins$':
@@ -28,14 +30,15 @@ patternProperties:
properties:
groups:
- description: Name of the pin group to use for the functions.
- enum: [i2c, jtag, mdio, pcie, rgmii1, rgmii2, sdhci, spi,
- uart1, uart2, uart3, wdt]
+ description: The pin group to select.
+ enum: [i2c, jtag, mdio, pcie, rgmii1, rgmii2, sdhci, spi, uart1,
+ uart2, uart3, wdt]
+
function:
- description: The mux function to select
+ description: The mux function to select.
enum: [gpio, i2c, i2s, jtag, mdio, nand1, nand2, pcie refclk,
- pcie rst, pcm, rgmii1, rgmii2, sdhci, spdif2, spdif3,
- spi, uart1, uart2, uart3, wdt refclk, wdt rst]
+ pcie rst, pcm, rgmii1, rgmii2, sdhci, spdif2, spdif3, spi,
+ uart1, uart2, uart3, wdt refclk, wdt rst]
required:
- groups
@@ -57,7 +60,7 @@ examples:
# Pinmux controller node
- |
pinctrl {
- compatible = "ralink,rt2880-pinmux";
+ compatible = "ralink,mt7621-pinctrl";
i2c_pins: i2c0-pins {
pinmux {
diff --git a/Documentation/devicetree/bindings/pinctrl/ralink,rt2880-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/ralink,rt2880-pinctrl.yaml
new file mode 100644
index 000000000000..56e5becabcfd
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/ralink,rt2880-pinctrl.yaml
@@ -0,0 +1,68 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pinctrl/ralink,rt2880-pinctrl.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Ralink RT2880 Pin Controller
+
+maintainers:
+ - Arınç ÜNAL <arinc.unal@arinc9.com>
+ - Sergio Paracuellos <sergio.paracuellos@gmail.com>
+
+description:
+ Ralink RT2880 pin controller for RT2880 SoC.
+ The pin controller can only set the muxing of pin groups. Muxing individual
+ pins is not supported. There is no pinconf support.
+
+properties:
+ compatible:
+ const: ralink,rt2880-pinctrl
+
+patternProperties:
+ '-pins$':
+ type: object
+ patternProperties:
+ '^(.*-)?pinmux$':
+ type: object
+ description: node for pinctrl.
+ $ref: pinmux-node.yaml#
+
+ properties:
+ groups:
+ description: The pin group to select.
+ enum: [i2c, spi, uartlite, jtag, mdio, sdram, pci]
+
+ function:
+ description: The mux function to select.
+ enum: [gpio, i2c, spi, uartlite, jtag, mdio, sdram, pci]
+
+ required:
+ - groups
+ - function
+
+ additionalProperties: false
+
+ additionalProperties: false
+
+allOf:
+ - $ref: "pinctrl.yaml#"
+
+required:
+ - compatible
+
+additionalProperties: false
+
+examples:
+ # Pinmux controller node
+ - |
+ pinctrl {
+ compatible = "ralink,rt2880-pinctrl";
+
+ i2c_pins: i2c0-pins {
+ pinmux {
+ groups = "i2c";
+ function = "i2c";
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pinctrl/ralink,rt305x-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/ralink,rt305x-pinctrl.yaml
new file mode 100644
index 000000000000..425401c54269
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/ralink,rt305x-pinctrl.yaml
@@ -0,0 +1,92 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pinctrl/ralink,rt305x-pinctrl.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Ralink RT305X Pin Controller
+
+maintainers:
+ - Arınç ÜNAL <arinc.unal@arinc9.com>
+ - Sergio Paracuellos <sergio.paracuellos@gmail.com>
+
+description:
+ Ralink RT305X pin controller for RT3050, RT3052, RT3350, RT3352 and RT5350
+ SoCs.
+ The pin controller can only set the muxing of pin groups. Muxing individual
+ pins is not supported. There is no pinconf support.
+
+properties:
+ compatible:
+ const: ralink,rt305x-pinctrl
+
+patternProperties:
+ '-pins$':
+ type: object
+ patternProperties:
+ '^(.*-)?pinmux$':
+ type: object
+ description: node for pinctrl.
+ $ref: pinmux-node.yaml#
+
+ properties:
+ groups:
+ description: The pin group to select.
+ enum: [
+ # For RT3050, RT3052 and RT3350 SoCs
+ i2c, jtag, mdio, rgmii, sdram, spi, uartf, uartlite,
+
+ # For RT3352 SoC
+ i2c, jtag, led, lna, mdio, pa, rgmii, spi, spi_cs1, uartf,
+ uartlite,
+
+ # For RT5350 SoC
+ i2c, jtag, led, spi, spi_cs1, uartf, uartlite,
+ ]
+
+ function:
+ description: The mux function to select.
+ enum: [
+ # For RT3050, RT3052 and RT3350 SoCs
+ gpio, gpio i2s, gpio uartf, i2c, i2s uartf, jtag, mdio, pcm gpio,
+ pcm i2s, pcm uartf, rgmii, sdram, spi, uartf, uartlite,
+
+ # For RT3352 SoC
+ gpio, gpio i2s, gpio uartf, i2c, i2s uartf, jtag, led, lna, mdio,
+ pa, pcm gpio, pcm i2s, pcm uartf, rgmii, spi, spi_cs1, uartf,
+ uartlite, wdg_cs1,
+
+ # For RT5350 SoC
+ gpio, gpio i2s, gpio uartf, i2c, i2s uartf, jtag, led, pcm gpio,
+ pcm i2s, pcm uartf, spi, spi_cs1, uartf, uartlite, wdg_cs1,
+ ]
+
+ required:
+ - groups
+ - function
+
+ additionalProperties: false
+
+ additionalProperties: false
+
+allOf:
+ - $ref: "pinctrl.yaml#"
+
+required:
+ - compatible
+
+additionalProperties: false
+
+examples:
+ # Pinmux controller node
+ - |
+ pinctrl {
+ compatible = "ralink,rt305x-pinctrl";
+
+ i2c_pins: i2c0-pins {
+ pinmux {
+ groups = "i2c";
+ function = "i2c";
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pinctrl/ralink,rt3883-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/ralink,rt3883-pinctrl.yaml
new file mode 100644
index 000000000000..feb6e66dcb61
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/ralink,rt3883-pinctrl.yaml
@@ -0,0 +1,71 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pinctrl/ralink,rt3883-pinctrl.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Ralink RT3883 Pin Controller
+
+maintainers:
+ - Arınç ÜNAL <arinc.unal@arinc9.com>
+ - Sergio Paracuellos <sergio.paracuellos@gmail.com>
+
+description:
+ Ralink RT3883 pin controller for RT3883 SoC.
+ The pin controller can only set the muxing of pin groups. Muxing individual
+ pins is not supported. There is no pinconf support.
+
+properties:
+ compatible:
+ const: ralink,rt3883-pinctrl
+
+patternProperties:
+ '-pins$':
+ type: object
+ patternProperties:
+ '^(.*-)?pinmux$':
+ type: object
+ description: node for pinctrl.
+ $ref: pinmux-node.yaml#
+
+ properties:
+ groups:
+ description: The pin group to select.
+ enum: [ge1, ge2, i2c, jtag, lna a, lna g, mdio, pci, spi, uartf,
+ uartlite]
+
+ function:
+ description: The mux function to select.
+ enum: [ge1, ge2, gpio, gpio i2s, gpio uartf, i2c, i2s uartf, jtag,
+ lna a, lna g, mdio, pci-dev, pci-fnc, pci-host1, pci-host2,
+ pcm gpio, pcm i2s, pcm uartf, spi, uartf, uartlite]
+
+ required:
+ - groups
+ - function
+
+ additionalProperties: false
+
+ additionalProperties: false
+
+allOf:
+ - $ref: "pinctrl.yaml#"
+
+required:
+ - compatible
+
+additionalProperties: false
+
+examples:
+ # Pinmux controller node
+ - |
+ pinctrl {
+ compatible = "ralink,rt3883-pinctrl";
+
+ i2c_pins: i2c0-pins {
+ pinmux {
+ groups = "i2c";
+ function = "i2c";
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pinctrl/renesas,rzg2l-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/renesas,rzg2l-pinctrl.yaml
index 9ccf54870aa4..52df1b146174 100644
--- a/Documentation/devicetree/bindings/pinctrl/renesas,rzg2l-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/renesas,rzg2l-pinctrl.yaml
@@ -11,8 +11,8 @@ maintainers:
- Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
description:
- The Renesas SoCs of the RZ/{G2L,V2L} series feature a combined Pin and GPIO
- controller.
+ The Renesas SoCs of the RZ/{G2L,V2L} alike series feature a combined Pin and
+ GPIO controller.
Pin multiplexing and GPIO configuration is performed on a per-pin basis.
Each port features up to 8 pins, each of them configurable for GPIO function
(port mode) or in alternate function mode.
@@ -23,6 +23,7 @@ properties:
oneOf:
- items:
- enum:
+ - renesas,r9a07g043-pinctrl # RZ/G2UL{Type-1,Type-2}
- renesas,r9a07g044-pinctrl # RZ/G2{L,LC}
- items:
diff --git a/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.yaml
index b0eae3a67ab1..677a285ca416 100644
--- a/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.yaml
@@ -33,6 +33,7 @@ properties:
enum:
- rockchip,px30-pinctrl
- rockchip,rk2928-pinctrl
+ - rockchip,rk3036-pinctrl
- rockchip,rk3066a-pinctrl
- rockchip,rk3066b-pinctrl
- rockchip,rk3128-pinctrl
@@ -44,6 +45,7 @@ properties:
- rockchip,rk3368-pinctrl
- rockchip,rk3399-pinctrl
- rockchip,rk3568-pinctrl
+ - rockchip,rk3588-pinctrl
- rockchip,rv1108-pinctrl
rockchip,grf:
@@ -129,7 +131,7 @@ additionalProperties:
description:
Pin bank index.
- minimum: 0
- maximum: 6
+ maximum: 10
description:
Mux 0 means GPIO and mux 1 to N means
the specific device function.
diff --git a/Documentation/devicetree/bindings/powerpc/fsl/cache_sram.txt b/Documentation/devicetree/bindings/powerpc/fsl/cache_sram.txt
deleted file mode 100644
index 781955f5217d..000000000000
--- a/Documentation/devicetree/bindings/powerpc/fsl/cache_sram.txt
+++ /dev/null
@@ -1,20 +0,0 @@
-* Freescale PQ3 and QorIQ based Cache SRAM
-
-Freescale's mpc85xx and some QorIQ platforms provide an
-option of configuring a part of (or full) cache memory
-as SRAM. This cache SRAM representation in the device
-tree should be done as under:-
-
-Required properties:
-
-- compatible : should be "fsl,p2020-cache-sram"
-- fsl,cache-sram-ctlr-handle : points to the L2 controller
-- reg : offset and length of the cache-sram.
-
-Example:
-
-cache-sram@fff00000 {
- fsl,cache-sram-ctlr-handle = <&L2>;
- reg = <0 0xfff00000 0 0x10000>;
- compatible = "fsl,p2020-cache-sram";
-};
diff --git a/Documentation/devicetree/bindings/pwm/atmel,at91sam-pwm.yaml b/Documentation/devicetree/bindings/pwm/atmel,at91sam-pwm.yaml
new file mode 100644
index 000000000000..ab45df80345d
--- /dev/null
+++ b/Documentation/devicetree/bindings/pwm/atmel,at91sam-pwm.yaml
@@ -0,0 +1,47 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright (C) 2022 Microchip Technology, Inc. and its subsidiaries
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pwm/atmel,at91sam-pwm.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Atmel/Microchip PWM controller
+
+maintainers:
+ - Claudiu Beznea <claudiu.beznea@microchip.com>
+
+allOf:
+ - $ref: "pwm.yaml#"
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - atmel,at91sam9rl-pwm
+ - atmel,sama5d3-pwm
+ - atmel,sama5d2-pwm
+ - microchip,sam9x60-pwm
+ - items:
+ - const: microchip,sama7g5-pwm
+ - const: atmel,sama5d2-pwm
+
+ reg:
+ maxItems: 1
+
+ "#pwm-cells":
+ const: 3
+
+required:
+ - compatible
+ - reg
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ pwm0: pwm@f8034000 {
+ compatible = "atmel,at91sam9rl-pwm";
+ reg = <0xf8034000 0x400>;
+ #pwm-cells = <3>;
+ };
diff --git a/Documentation/devicetree/bindings/pwm/atmel-pwm.txt b/Documentation/devicetree/bindings/pwm/atmel-pwm.txt
deleted file mode 100644
index fbb5325be1f0..000000000000
--- a/Documentation/devicetree/bindings/pwm/atmel-pwm.txt
+++ /dev/null
@@ -1,35 +0,0 @@
-Atmel PWM controller
-
-Required properties:
- - compatible: should be one of:
- - "atmel,at91sam9rl-pwm"
- - "atmel,sama5d3-pwm"
- - "atmel,sama5d2-pwm"
- - "microchip,sam9x60-pwm"
- - reg: physical base address and length of the controller's registers
- - #pwm-cells: Should be 3. See pwm.yaml in this directory for a
- description of the cells format.
-
-Example:
-
- pwm0: pwm@f8034000 {
- compatible = "atmel,at91sam9rl-pwm";
- reg = <0xf8034000 0x400>;
- #pwm-cells = <3>;
- };
-
- pwmleds {
- compatible = "pwm-leds";
-
- d1 {
- label = "d1";
- pwms = <&pwm0 3 5000 0>
- max-brightness = <255>;
- };
-
- d2 {
- label = "d2";
- pwms = <&pwm0 1 5000 1>
- max-brightness = <255>;
- };
- };
diff --git a/Documentation/devicetree/bindings/pwm/google,cros-ec-pwm.yaml b/Documentation/devicetree/bindings/pwm/google,cros-ec-pwm.yaml
index 7ab6912a845f..c8577bdf6c94 100644
--- a/Documentation/devicetree/bindings/pwm/google,cros-ec-pwm.yaml
+++ b/Documentation/devicetree/bindings/pwm/google,cros-ec-pwm.yaml
@@ -21,7 +21,14 @@ allOf:
properties:
compatible:
- const: google,cros-ec-pwm
+ oneOf:
+ - description: PWM controlled using EC_PWM_TYPE_GENERIC channels.
+ items:
+ - const: google,cros-ec-pwm
+ - description: PWM controlled using CROS_EC_PWM_DT_<...> types.
+ items:
+ - const: google,cros-ec-pwm-type
+
"#pwm-cells":
description: The cell specifies the PWM index.
const: 1
diff --git a/Documentation/devicetree/bindings/pwm/mediatek,pwm-disp.yaml b/Documentation/devicetree/bindings/pwm/mediatek,pwm-disp.yaml
new file mode 100644
index 000000000000..e4fe2d1bfef5
--- /dev/null
+++ b/Documentation/devicetree/bindings/pwm/mediatek,pwm-disp.yaml
@@ -0,0 +1,75 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pwm/mediatek,pwm-disp.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: MediaTek DISP_PWM Controller Device Tree Bindings
+
+maintainers:
+ - Jitao Shi <jitao.shi@mediatek.com>
+ - Xinlei Lee <xinlei.lee@mediatek.com>
+
+allOf:
+ - $ref: pwm.yaml#
+
+properties:
+ compatible:
+ oneOf:
+ - enum:
+ - mediatek,mt2701-disp-pwm
+ - mediatek,mt6595-disp-pwm
+ - mediatek,mt8173-disp-pwm
+ - mediatek,mt8183-disp-pwm
+ - items:
+ - const: mediatek,mt8167-disp-pwm
+ - const: mediatek,mt8173-disp-pwm
+ - items:
+ - enum:
+ - mediatek,mt8186-disp-pwm
+ - mediatek,mt8192-disp-pwm
+ - mediatek,mt8195-disp-pwm
+ - const: mediatek,mt8183-disp-pwm
+
+ reg:
+ maxItems: 1
+
+ "#pwm-cells":
+ const: 2
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: Main Clock
+ - description: Mm Clock
+
+ clock-names:
+ items:
+ - const: main
+ - const: mm
+
+required:
+ - compatible
+ - reg
+ - "#pwm-cells"
+ - clocks
+ - clock-names
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/clock/mt8173-clk.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ pwm0: pwm@1401e000 {
+ compatible = "mediatek,mt8173-disp-pwm";
+ reg = <0x1401e000 0x1000>;
+ #pwm-cells = <2>;
+ clocks = <&mmsys CLK_MM_DISP_PWM026M>,
+ <&mmsys CLK_MM_DISP_PWM0MM>;
+ clock-names = "main", "mm";
+ };
diff --git a/Documentation/devicetree/bindings/pwm/pwm-mediatek.txt b/Documentation/devicetree/bindings/pwm/pwm-mediatek.txt
index 25ed214473d7..033d1fc0f405 100644
--- a/Documentation/devicetree/bindings/pwm/pwm-mediatek.txt
+++ b/Documentation/devicetree/bindings/pwm/pwm-mediatek.txt
@@ -3,6 +3,7 @@ MediaTek PWM controller
Required properties:
- compatible: should be "mediatek,<name>-pwm":
- "mediatek,mt2712-pwm": found on mt2712 SoC.
+ - "mediatek,mt6795-pwm": found on mt6795 SoC.
- "mediatek,mt7622-pwm": found on mt7622 SoC.
- "mediatek,mt7623-pwm": found on mt7623 SoC.
- "mediatek,mt7628-pwm": found on mt7628 SoC.
diff --git a/Documentation/devicetree/bindings/pwm/pwm-mtk-disp.txt b/Documentation/devicetree/bindings/pwm/pwm-mtk-disp.txt
deleted file mode 100644
index 691e58b6c223..000000000000
--- a/Documentation/devicetree/bindings/pwm/pwm-mtk-disp.txt
+++ /dev/null
@@ -1,45 +0,0 @@
-MediaTek display PWM controller
-
-Required properties:
- - compatible: should be "mediatek,<name>-disp-pwm":
- - "mediatek,mt2701-disp-pwm": found on mt2701 SoC.
- - "mediatek,mt6595-disp-pwm": found on mt6595 SoC.
- - "mediatek,mt8167-disp-pwm", "mediatek,mt8173-disp-pwm": found on mt8167 SoC.
- - "mediatek,mt8173-disp-pwm": found on mt8173 SoC.
- - "mediatek,mt8183-disp-pwm": found on mt8183 SoC.$
- - reg: physical base address and length of the controller's registers.
- - #pwm-cells: must be 2. See pwm.yaml in this directory for a description of
- the cell format.
- - clocks: phandle and clock specifier of the PWM reference clock.
- - clock-names: must contain the following:
- - "main": clock used to generate PWM signals.
- - "mm": sync signals from the modules of mmsys.
- - pinctrl-names: Must contain a "default" entry.
- - pinctrl-0: One property must exist for each entry in pinctrl-names.
- See pinctrl/pinctrl-bindings.txt for details of the property values.
-
-Example:
- pwm0: pwm@1401e000 {
- compatible = "mediatek,mt8173-disp-pwm",
- "mediatek,mt6595-disp-pwm";
- reg = <0 0x1401e000 0 0x1000>;
- #pwm-cells = <2>;
- clocks = <&mmsys CLK_MM_DISP_PWM026M>,
- <&mmsys CLK_MM_DISP_PWM0MM>;
- clock-names = "main", "mm";
- pinctrl-names = "default";
- pinctrl-0 = <&disp_pwm0_pins>;
- };
-
- backlight_lcd: backlight_lcd {
- compatible = "pwm-backlight";
- pwms = <&pwm0 0 1000000>;
- brightness-levels = <
- 0 16 32 48 64 80 96 112
- 128 144 160 176 192 208 224 240
- 255
- >;
- default-brightness-level = <9>;
- power-supply = <&mt6397_vio18_reg>;
- enable-gpios = <&pio 95 GPIO_ACTIVE_HIGH>;
- };
diff --git a/Documentation/devicetree/bindings/pwm/sunplus,sp7021-pwm.yaml b/Documentation/devicetree/bindings/pwm/sunplus,sp7021-pwm.yaml
new file mode 100644
index 000000000000..d4fc9e8db1d1
--- /dev/null
+++ b/Documentation/devicetree/bindings/pwm/sunplus,sp7021-pwm.yaml
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright (C) Sunplus Co., Ltd. 2021
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pwm/sunplus,sp7021-pwm.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Sunplus SoC SP7021 PWM Controller
+
+maintainers:
+ - Hammer Hsieh <hammerh0314@gmail.com>
+
+allOf:
+ - $ref: pwm.yaml#
+
+properties:
+ compatible:
+ const: sunplus,sp7021-pwm
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ '#pwm-cells':
+ const: 2
+
+unevaluatedProperties: false
+
+required:
+ - reg
+ - clocks
+
+examples:
+ - |
+ pwm: pwm@9c007a00 {
+ compatible = "sunplus,sp7021-pwm";
+ reg = <0x9c007a00 0x80>;
+ clocks = <&clkc 0xa2>;
+ #pwm-cells = <2>;
+ };
diff --git a/Documentation/devicetree/bindings/regulator/mt6315-regulator.yaml b/Documentation/devicetree/bindings/regulator/mt6315-regulator.yaml
index 5d2d989de893..37402c370fbb 100644
--- a/Documentation/devicetree/bindings/regulator/mt6315-regulator.yaml
+++ b/Documentation/devicetree/bindings/regulator/mt6315-regulator.yaml
@@ -55,7 +55,7 @@ examples:
regulator-min-microvolt = <300000>;
regulator-max-microvolt = <1193750>;
regulator-enable-ramp-delay = <256>;
- regulator-allowed-modes = <0 1 2 4>;
+ regulator-allowed-modes = <0 1 2>;
};
vbuck3 {
@@ -63,7 +63,7 @@ examples:
regulator-min-microvolt = <300000>;
regulator-max-microvolt = <1193750>;
regulator-enable-ramp-delay = <256>;
- regulator-allowed-modes = <0 1 2 4>;
+ regulator-allowed-modes = <0 1 2>;
};
};
};
diff --git a/Documentation/devicetree/bindings/remoteproc/fsl,imx-rproc.yaml b/Documentation/devicetree/bindings/remoteproc/fsl,imx-rproc.yaml
index fc16d903353e..3a1f59ad79e2 100644
--- a/Documentation/devicetree/bindings/remoteproc/fsl,imx-rproc.yaml
+++ b/Documentation/devicetree/bindings/remoteproc/fsl,imx-rproc.yaml
@@ -15,14 +15,15 @@ maintainers:
properties:
compatible:
enum:
- - fsl,imx8mq-cm4
+ - fsl,imx6sx-cm4
+ - fsl,imx7d-cm4
+ - fsl,imx7ulp-cm4
- fsl,imx8mm-cm4
- fsl,imx8mn-cm7
- fsl,imx8mp-cm7
+ - fsl,imx8mq-cm4
- fsl,imx8ulp-cm33
- - fsl,imx7d-cm4
- - fsl,imx7ulp-cm4
- - fsl,imx6sx-cm4
+ - fsl,imx93-cm33
clocks:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/remoteproc/mtk,scp.yaml b/Documentation/devicetree/bindings/remoteproc/mtk,scp.yaml
index 5b693a2d049c..eec3b9c4c713 100644
--- a/Documentation/devicetree/bindings/remoteproc/mtk,scp.yaml
+++ b/Documentation/devicetree/bindings/remoteproc/mtk,scp.yaml
@@ -23,11 +23,13 @@ properties:
reg:
description:
- Should contain the address ranges for memory regions SRAM, CFG, and
- L1TCM.
+ Should contain the address ranges for memory regions SRAM, CFG, and,
+ on some platforms, L1TCM.
+ minItems: 2
maxItems: 3
reg-names:
+ minItems: 2
items:
- const: sram
- const: cfg
@@ -42,21 +44,48 @@ properties:
clock-names:
const: main
+ interrupts:
+ maxItems: 1
+
+ firmware-name:
+ $ref: /schemas/types.yaml#/definitions/string
+ description:
+ If present, name (or relative path) of the file within the
+ firmware search path containing the firmware image used when
+ initializing SCP.
+
+ memory-region:
+ maxItems: 1
+
required:
- compatible
- reg
- reg-names
-if:
- properties:
- compatible:
- enum:
- - mediatek,mt8183-scp
- - mediatek,mt8192-scp
-then:
- required:
- - clocks
- - clock-names
+allOf:
+ - if:
+ properties:
+ compatible:
+ enum:
+ - mediatek,mt8183-scp
+ - mediatek,mt8192-scp
+ then:
+ required:
+ - clocks
+ - clock-names
+
+ - if:
+ properties:
+ compatible:
+ enum:
+ - mediatek,mt8183-scp
+ - mediatek,mt8186-scp
+ then:
+ properties:
+ reg:
+ maxItems: 2
+ reg-names:
+ maxItems: 2
additionalProperties:
type: object
@@ -76,10 +105,10 @@ additionalProperties:
examples:
- |
- #include <dt-bindings/clock/mt8183-clk.h>
+ #include <dt-bindings/clock/mt8192-clk.h>
scp@10500000 {
- compatible = "mediatek,mt8183-scp";
+ compatible = "mediatek,mt8192-scp";
reg = <0x10500000 0x80000>,
<0x10700000 0x8000>,
<0x10720000 0xe0000>;
diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,adsp.yaml b/Documentation/devicetree/bindings/remoteproc/qcom,adsp.yaml
index a4409c398193..947f94548d0e 100644
--- a/Documentation/devicetree/bindings/remoteproc/qcom,adsp.yaml
+++ b/Documentation/devicetree/bindings/remoteproc/qcom,adsp.yaml
@@ -16,6 +16,7 @@ description:
properties:
compatible:
enum:
+ - qcom,msm8226-adsp-pil
- qcom,msm8974-adsp-pil
- qcom,msm8996-adsp-pil
- qcom,msm8996-slpi-pil
@@ -29,6 +30,9 @@ properties:
- qcom,sc8180x-adsp-pas
- qcom,sc8180x-cdsp-pas
- qcom,sc8180x-mpss-pas
+ - qcom,sc8280xp-adsp-pas
+ - qcom,sc8280xp-nsp0-pas
+ - qcom,sc8280xp-nsp1-pas
- qcom,sdm660-adsp-pas
- qcom,sdm845-adsp-pas
- qcom,sdm845-cdsp-pas
@@ -159,6 +163,7 @@ allOf:
compatible:
contains:
enum:
+ - qcom,msm8226-adsp-pil
- qcom,msm8974-adsp-pil
- qcom,msm8996-adsp-pil
- qcom,msm8996-slpi-pil
@@ -169,6 +174,9 @@ allOf:
- qcom,sc8180x-adsp-pas
- qcom,sc8180x-cdsp-pas
- qcom,sc8180x-mpss-pas
+ - qcom,sc8280xp-adsp-pas
+ - qcom,sc8280xp-nsp0-pas
+ - qcom,sc8280xp-nsp1-pas
- qcom,sdm845-adsp-pas
- qcom,sdm845-cdsp-pas
- qcom,sm6350-adsp-pas
@@ -274,6 +282,7 @@ allOf:
compatible:
contains:
enum:
+ - qcom,msm8226-adsp-pil
- qcom,msm8974-adsp-pil
- qcom,msm8996-adsp-pil
- qcom,msm8996-slpi-pil
@@ -284,6 +293,9 @@ allOf:
- qcom,qcs404-wcss-pas
- qcom,sc8180x-adsp-pas
- qcom,sc8180x-cdsp-pas
+ - qcom,sc8280xp-adsp-pas
+ - qcom,sc8280xp-nsp0-pas
+ - qcom,sc8280xp-nsp1-pas
- qcom,sdm845-adsp-pas
- qcom,sdm845-cdsp-pas
- qcom,sm6350-adsp-pas
@@ -364,6 +376,7 @@ allOf:
compatible:
contains:
enum:
+ - qcom,msm8226-adsp-pil
- qcom,msm8996-adsp-pil
- qcom,msm8998-adsp-pas
then:
@@ -471,6 +484,7 @@ allOf:
enum:
- qcom,sc8180x-adsp-pas
- qcom,sc8180x-cdsp-pas
+ - qcom,sc8280xp-adsp-pas
- qcom,sm6350-adsp-pas
- qcom,sm8150-slpi-pas
- qcom,sm8250-adsp-pas
@@ -513,6 +527,22 @@ allOf:
compatible:
contains:
enum:
+ - qcom,sc8280xp-nsp0-pas
+ - qcom,sc8280xp-nsp1-pas
+ then:
+ properties:
+ power-domains:
+ items:
+ - description: NSP power domain
+ power-domain-names:
+ items:
+ - const: nsp
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
- qcom,qcs404-cdsp-pas
then:
properties:
@@ -546,6 +576,7 @@ allOf:
compatible:
contains:
enum:
+ - qcom,msm8226-adsp-pil
- qcom,msm8974-adsp-pil
- qcom,msm8996-adsp-pil
- qcom,msm8996-slpi-pil
diff --git a/Documentation/devicetree/bindings/remoteproc/st,stm32-rproc.yaml b/Documentation/devicetree/bindings/remoteproc/st,stm32-rproc.yaml
index be3d9b0e876b..da50f0e99fe2 100644
--- a/Documentation/devicetree/bindings/remoteproc/st,stm32-rproc.yaml
+++ b/Documentation/devicetree/bindings/remoteproc/st,stm32-rproc.yaml
@@ -43,8 +43,8 @@ properties:
items:
- items:
- description: Phandle of syscon block
- - description: FIXME
- - description: FIXME
+ - description: The offset of the trust zone setting register
+ - description: The field mask of the trust zone state
interrupts:
description: Should contain the WWDG1 watchdog reset interrupt
@@ -101,8 +101,8 @@ properties:
items:
- items:
- description: Phandle of syscon block
- - description: FIXME
- - description: FIXME
+ - description: The offset of the power setting register
+ - description: The field mask of the PDDS selection
st,syscfg-m4-state:
$ref: "/schemas/types.yaml#/definitions/phandle-array"
@@ -111,8 +111,8 @@ properties:
items:
- items:
- description: Phandle of syscon block with the tamp register
- - description: FIXME
- - description: FIXME
+ - description: The offset of the tamp register
+ - description: The field mask of the Cortex-M4 state
st,syscfg-rsc-tbl:
$ref: "/schemas/types.yaml#/definitions/phandle-array"
@@ -122,8 +122,8 @@ properties:
items:
- items:
- description: Phandle of syscon block with the tamp register
- - description: FIXME
- - description: FIXME
+ - description: The offset of the tamp register
+ - description: The field mask of the Cortex-M4 resource table address
st,auto-boot:
$ref: /schemas/types.yaml#/definitions/flag
diff --git a/Documentation/devicetree/bindings/reset/qcom,aoss-reset.yaml b/Documentation/devicetree/bindings/reset/qcom,aoss-reset.yaml
index a054757f4d9f..d92e2b3cc83f 100644
--- a/Documentation/devicetree/bindings/reset/qcom,aoss-reset.yaml
+++ b/Documentation/devicetree/bindings/reset/qcom,aoss-reset.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Qualcomm AOSS Reset Controller
maintainers:
- - Sibi Sankar <sibis@codeaurora.org>
+ - Sibi Sankar <quic_sibis@quicinc.com>
description:
The bindings describe the reset-controller found on AOSS-CC (always on
diff --git a/Documentation/devicetree/bindings/reset/qcom,pdc-global.yaml b/Documentation/devicetree/bindings/reset/qcom,pdc-global.yaml
index 831ea8d5d83f..ca5d79332189 100644
--- a/Documentation/devicetree/bindings/reset/qcom,pdc-global.yaml
+++ b/Documentation/devicetree/bindings/reset/qcom,pdc-global.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Qualcomm PDC Global
maintainers:
- - Sibi Sankar <sibis@codeaurora.org>
+ - Sibi Sankar <quic_sibis@quicinc.com>
description:
The bindings describes the reset-controller found on PDC-Global (Power Domain
diff --git a/Documentation/devicetree/bindings/riscv/microchip.yaml b/Documentation/devicetree/bindings/riscv/microchip.yaml
index 3f981e897126..1aa7336a9672 100644
--- a/Documentation/devicetree/bindings/riscv/microchip.yaml
+++ b/Documentation/devicetree/bindings/riscv/microchip.yaml
@@ -20,6 +20,8 @@ properties:
items:
- enum:
- microchip,mpfs-icicle-kit
+ - microchip,mpfs-icicle-reference-rtlv2203
+ - sundance,polarberry
- const: microchip,mpfs
additionalProperties: true
diff --git a/Documentation/devicetree/bindings/rtc/nxp,pcf85063.txt b/Documentation/devicetree/bindings/rtc/nxp,pcf85063.txt
index 6439682c9319..217b7cd06c11 100644
--- a/Documentation/devicetree/bindings/rtc/nxp,pcf85063.txt
+++ b/Documentation/devicetree/bindings/rtc/nxp,pcf85063.txt
@@ -2,6 +2,7 @@
Required properties:
- compatible: Should one of contain:
+ "nxp,pca85073a",
"nxp,pcf85063",
"nxp,pcf85063a",
"nxp,pcf85063tp",
diff --git a/Documentation/devicetree/bindings/rtc/renesas,rzn1-rtc.yaml b/Documentation/devicetree/bindings/rtc/renesas,rzn1-rtc.yaml
new file mode 100644
index 000000000000..2d4741f51663
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/renesas,rzn1-rtc.yaml
@@ -0,0 +1,70 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/rtc/renesas,rzn1-rtc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Renesas RZ/N1 SoCs Real-Time Clock DT bindings
+
+maintainers:
+ - Miquel Raynal <miquel.raynal@bootlin.com>
+
+allOf:
+ - $ref: rtc.yaml#
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - renesas,r9a06g032-rtc
+ - const: renesas,rzn1-rtc
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ minItems: 3
+ maxItems: 3
+
+ interrupt-names:
+ items:
+ - const: alarm
+ - const: timer
+ - const: pps
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ const: hclk
+
+ power-domains:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - interrupt-names
+ - clocks
+ - clock-names
+ - power-domains
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/clock/r9a06g032-sysctrl.h>
+ rtc@40006000 {
+ compatible = "renesas,r9a06g032-rtc", "renesas,rzn1-rtc";
+ reg = <0x40006000 0x1000>;
+ interrupts = <GIC_SPI 66 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 67 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 68 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "alarm", "timer", "pps";
+ clocks = <&sysctrl R9A06G032_HCLK_RTC>;
+ clock-names = "hclk";
+ power-domains = <&sysctrl>;
+ start-year = <2000>;
+ };
diff --git a/Documentation/devicetree/bindings/serial/fsl-lpuart.yaml b/Documentation/devicetree/bindings/serial/fsl-lpuart.yaml
index ff364bd0fbac..30eaa62e1aed 100644
--- a/Documentation/devicetree/bindings/serial/fsl-lpuart.yaml
+++ b/Documentation/devicetree/bindings/serial/fsl-lpuart.yaml
@@ -23,7 +23,9 @@ properties:
- fsl,imx8qxp-lpuart
- fsl,imxrt1050-lpuart
- items:
- - const: fsl,imx8ulp-lpuart
+ - enum:
+ - fsl,imx93-lpuart
+ - fsl,imx8ulp-lpuart
- const: fsl,imx7ulp-lpuart
- items:
- enum:
diff --git a/Documentation/devicetree/bindings/serial/qcom,serial-geni-qcom.yaml b/Documentation/devicetree/bindings/serial/qcom,serial-geni-qcom.yaml
new file mode 100644
index 000000000000..05a6999808d1
--- /dev/null
+++ b/Documentation/devicetree/bindings/serial/qcom,serial-geni-qcom.yaml
@@ -0,0 +1,86 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/serial/qcom,serial-geni-qcom.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Qualcomm Geni based QUP UART interface
+
+maintainers:
+ - Andy Gross <agross@kernel.org>
+ - Bjorn Andersson <bjorn.andersson@linaro.org>
+
+allOf:
+ - $ref: /schemas/serial/serial.yaml#
+
+properties:
+ compatible:
+ enum:
+ - qcom,geni-uart
+ - qcom,geni-debug-uart
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ const: se
+
+ interconnects:
+ maxItems: 2
+
+ interconnect-names:
+ items:
+ - const: qup-core
+ - const: qup-config
+
+ interrupts:
+ minItems: 1
+ items:
+ - description: UART core irq
+ - description: Wakeup irq (RX GPIO)
+
+ operating-points-v2: true
+
+ pinctrl-0: true
+ pinctrl-1: true
+
+ pinctrl-names:
+ minItems: 1
+ items:
+ - const: default
+ - const: sleep
+
+ power-domains:
+ maxItems: 1
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - clocks
+ - clock-names
+ - interrupts
+ - reg
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/clock/qcom,gcc-sc7180.h>
+ #include <dt-bindings/interconnect/qcom,sc7180.h>
+
+ serial@a88000 {
+ compatible = "qcom,geni-uart";
+ reg = <0xa88000 0x7000>;
+ interrupts = <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP0_S0_CLK>;
+ pinctrl-0 = <&qup_uart0_default>;
+ pinctrl-names = "default";
+ interconnects = <&qup_virt MASTER_QUP_CORE_0 0 &qup_virt SLAVE_QUP_CORE_0 0>,
+ <&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_QUP_0 0>;
+ interconnect-names = "qup-core", "qup-config";
+ };
+...
diff --git a/Documentation/devicetree/bindings/serial/renesas,em-uart.yaml b/Documentation/devicetree/bindings/serial/renesas,em-uart.yaml
index e98ec48fee46..b25aca733b72 100644
--- a/Documentation/devicetree/bindings/serial/renesas,em-uart.yaml
+++ b/Documentation/devicetree/bindings/serial/renesas,em-uart.yaml
@@ -9,12 +9,16 @@ title: Renesas EMMA Mobile UART Interface
maintainers:
- Magnus Damm <magnus.damm@gmail.com>
-allOf:
- - $ref: serial.yaml#
-
properties:
compatible:
- const: renesas,em-uart
+ oneOf:
+ - items:
+ - enum:
+ - renesas,r9a09g011-uart # RZ/V2M
+ - const: renesas,em-uart # generic EMMA Mobile compatible UART
+
+ - items:
+ - const: renesas,em-uart # generic EMMA Mobile compatible UART
reg:
maxItems: 1
@@ -23,10 +27,31 @@ properties:
maxItems: 1
clocks:
- maxItems: 1
+ minItems: 1
+ items:
+ - description: UART functional clock
+ - description: Internal clock to access the registers
clock-names:
- const: sclk
+ minItems: 1
+ items:
+ - const: sclk
+ - const: pclk
+
+allOf:
+ - $ref: serial.yaml#
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: renesas,r9a09g011-uart
+ then:
+ properties:
+ clocks:
+ minItems: 2
+ clock-names:
+ minItems: 2
required:
- compatible
diff --git a/Documentation/devicetree/bindings/serial/renesas,hscif.yaml b/Documentation/devicetree/bindings/serial/renesas,hscif.yaml
index ee9804cd49bb..87180d95cd4c 100644
--- a/Documentation/devicetree/bindings/serial/renesas,hscif.yaml
+++ b/Documentation/devicetree/bindings/serial/renesas,hscif.yaml
@@ -51,10 +51,16 @@ properties:
- renesas,hscif-r8a77980 # R-Car V3H
- renesas,hscif-r8a77990 # R-Car E3
- renesas,hscif-r8a77995 # R-Car D3
- - renesas,hscif-r8a779a0 # R-Car V3U
- const: renesas,rcar-gen3-hscif # R-Car Gen3 and RZ/G2
- const: renesas,hscif # generic HSCIF compatible UART
+ - items:
+ - enum:
+ - renesas,hscif-r8a779a0 # R-Car V3U
+ - renesas,hscif-r8a779g0 # R-Car V4H
+ - const: renesas,rcar-gen4-hscif # R-Car Gen4
+ - const: renesas,hscif # generic HSCIF compatible UART
+
reg:
maxItems: 1
@@ -113,6 +119,7 @@ if:
enum:
- renesas,rcar-gen2-hscif
- renesas,rcar-gen3-hscif
+ - renesas,rcar-gen4-hscif
then:
required:
- resets
diff --git a/Documentation/devicetree/bindings/serial/renesas,scif.yaml b/Documentation/devicetree/bindings/serial/renesas,scif.yaml
index 5d37f8f189fb..90fe45265fbc 100644
--- a/Documentation/devicetree/bindings/serial/renesas,scif.yaml
+++ b/Documentation/devicetree/bindings/serial/renesas,scif.yaml
@@ -60,12 +60,12 @@ properties:
- renesas,scif-r8a77980 # R-Car V3H
- renesas,scif-r8a77990 # R-Car E3
- renesas,scif-r8a77995 # R-Car D3
- - renesas,scif-r8a779a0 # R-Car V3U
- const: renesas,rcar-gen3-scif # R-Car Gen3 and RZ/G2
- const: renesas,scif # generic SCIF compatible UART
- items:
- enum:
+ - renesas,scif-r8a779a0 # R-Car V3U
- renesas,scif-r8a779f0 # R-Car S4-8
- const: renesas,rcar-gen4-scif # R-Car Gen4
- const: renesas,scif # generic SCIF compatible UART
diff --git a/Documentation/devicetree/bindings/serial/rs485.yaml b/Documentation/devicetree/bindings/serial/rs485.yaml
index 0c9fa694f85c..f2c9c9fe6aa7 100644
--- a/Documentation/devicetree/bindings/serial/rs485.yaml
+++ b/Documentation/devicetree/bindings/serial/rs485.yaml
@@ -33,6 +33,11 @@ properties:
description: drive RTS low when sending (default is high).
$ref: /schemas/types.yaml#/definitions/flag
+ rs485-rx-active-high:
+ description: Polarity of receiver enable signal (when separate from RTS).
+ True indicates active high (default is low).
+ $ref: /schemas/types.yaml#/definitions/flag
+
linux,rs485-enabled-at-boot-time:
description: enables the rs485 feature at boot time. It can be disabled
later with proper ioctl.
diff --git a/Documentation/devicetree/bindings/serial/socionext,uniphier-uart.yaml b/Documentation/devicetree/bindings/serial/socionext,uniphier-uart.yaml
index d490c7c4b967..3d01cc355778 100644
--- a/Documentation/devicetree/bindings/serial/socionext,uniphier-uart.yaml
+++ b/Documentation/devicetree/bindings/serial/socionext,uniphier-uart.yaml
@@ -20,7 +20,10 @@ properties:
maxItems: 1
clocks:
- minItems: 1
+ maxItems: 1
+
+ resets:
+ maxItems: 1
auto-flow-control:
description: enable automatic flow control support.
diff --git a/Documentation/devicetree/bindings/soc/imx/fsl,imx8mp-media-blk-ctrl.yaml b/Documentation/devicetree/bindings/soc/imx/fsl,imx8mp-media-blk-ctrl.yaml
index 21d3ee486295..b246d8386ba4 100644
--- a/Documentation/devicetree/bindings/soc/imx/fsl,imx8mp-media-blk-ctrl.yaml
+++ b/Documentation/devicetree/bindings/soc/imx/fsl,imx8mp-media-blk-ctrl.yaml
@@ -88,7 +88,7 @@ examples:
<&mediamix_pd>, <&ispdwp_pd>, <&ispdwp_pd>,
<&mipi_phy2_pd>;
power-domain-names = "bus", "mipi-dsi1", "mipi-csi1", "lcdif1", "isi",
- "mipi-csi2", "lcdif2", "isp1", "dwe", "mipi-dsi2";
+ "mipi-csi2", "lcdif2", "isp", "dwe", "mipi-dsi2";
clocks = <&clk IMX8MP_CLK_MEDIA_APB_ROOT>,
<&clk IMX8MP_CLK_MEDIA_AXI_ROOT>,
<&clk IMX8MP_CLK_MEDIA_CAM1_PIX_ROOT>,
diff --git a/Documentation/devicetree/bindings/soc/intel/intel,hps-copy-engine.yaml b/Documentation/devicetree/bindings/soc/intel/intel,hps-copy-engine.yaml
new file mode 100644
index 000000000000..8634865015cd
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/intel/intel,hps-copy-engine.yaml
@@ -0,0 +1,51 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+# Copyright (C) 2022, Intel Corporation
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/soc/intel/intel,hps-copy-engine.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Intel HPS Copy Engine
+
+maintainers:
+ - Matthew Gerlach <matthew.gerlach@linux.intel.com>
+
+description: |
+ The Intel Hard Processor System (HPS) Copy Engine is an IP block used to copy
+ a bootable image from host memory to HPS DDR. Additionally, there is a
+ register the HPS can use to indicate the state of booting the copied image as
+ well as a keep-a-live indication to the host.
+
+properties:
+ compatible:
+ const: intel,hps-copy-engine
+
+ '#dma-cells':
+ const: 1
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ bus@80000000 {
+ compatible = "simple-bus";
+ reg = <0x80000000 0x60000000>,
+ <0xf9000000 0x00100000>;
+ reg-names = "axi_h2f", "axi_h2f_lw";
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges = <0x00000000 0x00000000 0xf9000000 0x00001000>;
+
+ dma-controller@0 {
+ compatible = "intel,hps-copy-engine";
+ reg = <0x00000000 0x00000000 0x00001000>;
+ #dma-cells = <1>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,smd-rpm.yaml b/Documentation/devicetree/bindings/soc/qcom/qcom,smd-rpm.yaml
index 2a4b6c06e7c9..f0f1bf06aea6 100644
--- a/Documentation/devicetree/bindings/soc/qcom/qcom,smd-rpm.yaml
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,smd-rpm.yaml
@@ -47,6 +47,10 @@ properties:
- qcom,rpm-qcm2290
- qcom,rpm-qcs404
+ clock-controller:
+ $ref: /schemas/clock/qcom,rpmcc.yaml#
+ unevaluatedProperties: false
+
qcom,smd-channels:
$ref: /schemas/types.yaml#/definitions/string-array
description: Channel name used for the RPM communication
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,smd.yaml b/Documentation/devicetree/bindings/soc/qcom/qcom,smd.yaml
index e6f9ffa1c0ea..bca07bb13ebf 100644
--- a/Documentation/devicetree/bindings/soc/qcom/qcom,smd.yaml
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,smd.yaml
@@ -66,9 +66,7 @@ patternProperties:
The identifier for the remote processor as known by the rest of the
system.
- # Binding for edge subnodes is not complete
- patternProperties:
- "^rpm-requests$":
+ rpm-requests:
type: object
description:
In turn, subnodes of the "edges" represent devices tied to SMD
diff --git a/Documentation/devicetree/bindings/soundwire/qcom,sdw.txt b/Documentation/devicetree/bindings/soundwire/qcom,sdw.txt
index 51ddbc509382..c85c25779e3f 100644
--- a/Documentation/devicetree/bindings/soundwire/qcom,sdw.txt
+++ b/Documentation/devicetree/bindings/soundwire/qcom,sdw.txt
@@ -162,6 +162,18 @@ board specific bus parameters.
or applicable for the respective data port.
More info in MIPI Alliance SoundWire 1.0 Specifications.
+- reset:
+ Usage: optional
+ Value type: <prop-encoded-array>
+ Definition: Should specify the SoundWire audio CSR reset controller interface,
+ which is required for SoundWire version 1.6.0 and above.
+
+- reset-names:
+ Usage: optional
+ Value type: <stringlist>
+ Definition: should be "swr_audio_cgcr" for SoundWire audio CSR reset
+ controller interface.
+
Note:
More Information on detail of encoding of these fields can be
found in MIPI Alliance SoundWire 1.0 Specifications.
@@ -180,6 +192,8 @@ soundwire: soundwire@c85 {
interrupts = <20 IRQ_TYPE_EDGE_RISING>;
clocks = <&wcc>;
clock-names = "iface";
+ resets = <&lpass_audiocc LPASS_AUDIO_SWR_TX_CGCR>;
+ reset-names = "swr_audio_cgcr";
#sound-dai-cells = <1>;
qcom,dports-type = <0>;
qcom,dout-ports = <6>;
diff --git a/Documentation/devicetree/bindings/timer/hpe,gxp-timer.yaml b/Documentation/devicetree/bindings/timer/hpe,gxp-timer.yaml
new file mode 100644
index 000000000000..d33d90f44d28
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/hpe,gxp-timer.yaml
@@ -0,0 +1,47 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/timer/hpe,gxp-timer.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: HPE GXP Timer
+
+maintainers:
+ - Nick Hawkins <nick.hawkins@hpe.com>
+ - Jean-Marie Verdun <verdun@hpe.com>
+
+properties:
+ compatible:
+ const: hpe,gxp-timer
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ const: iop
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+
+additionalProperties: false
+
+examples:
+ - |
+ timer@c0000000 {
+ compatible = "hpe,gxp-timer";
+ reg = <0x80 0x16>;
+ interrupts = <0>;
+ interrupt-parent = <&vic0>;
+ clocks = <&iopclk>;
+ clock-names = "iop";
+ };
diff --git a/Documentation/devicetree/bindings/timer/xlnx,xps-timer.yaml b/Documentation/devicetree/bindings/timer/xlnx,xps-timer.yaml
new file mode 100644
index 000000000000..dd168d41d2e0
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/xlnx,xps-timer.yaml
@@ -0,0 +1,92 @@
+# SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/timer/xlnx,xps-timer.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Xilinx LogiCORE IP AXI Timer Device Tree Binding
+
+maintainers:
+ - Sean Anderson <sean.anderson@seco.com>
+
+properties:
+ compatible:
+ contains:
+ const: xlnx,xps-timer-1.00.a
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ const: s_axi_aclk
+
+ interrupts:
+ maxItems: 1
+
+ reg:
+ maxItems: 1
+
+ '#pwm-cells': true
+
+ xlnx,count-width:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [8, 16, 32]
+ default: 32
+ description:
+ The width of the counter(s), in bits.
+
+ xlnx,one-timer-only:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [ 0, 1 ]
+ description:
+ Whether only one timer is present in this block.
+
+required:
+ - compatible
+ - reg
+ - xlnx,one-timer-only
+
+allOf:
+ - if:
+ required:
+ - '#pwm-cells'
+ then:
+ allOf:
+ - required:
+ - clocks
+ - properties:
+ xlnx,one-timer-only:
+ const: 0
+ else:
+ required:
+ - interrupts
+ - if:
+ required:
+ - clocks
+ then:
+ required:
+ - clock-names
+
+additionalProperties: false
+
+examples:
+ - |
+ timer@800e0000 {
+ clock-names = "s_axi_aclk";
+ clocks = <&zynqmp_clk 71>;
+ compatible = "xlnx,xps-timer-1.00.a";
+ reg = <0x800e0000 0x10000>;
+ interrupts = <0 39 2>;
+ xlnx,count-width = <16>;
+ xlnx,one-timer-only = <0x0>;
+ };
+
+ timer@800f0000 {
+ #pwm-cells = <0>;
+ clock-names = "s_axi_aclk";
+ clocks = <&zynqmp_clk 71>;
+ compatible = "xlnx,xps-timer-1.00.a";
+ reg = <0x800e0000 0x10000>;
+ xlnx,count-width = <32>;
+ xlnx,one-timer-only = <0x0>;
+ };
diff --git a/Documentation/devicetree/bindings/timestamp/hardware-timestamps-common.yaml b/Documentation/devicetree/bindings/timestamp/hardware-timestamps-common.yaml
new file mode 100644
index 000000000000..fd6a7b51f571
--- /dev/null
+++ b/Documentation/devicetree/bindings/timestamp/hardware-timestamps-common.yaml
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/timestamp/hardware-timestamps-common.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Hardware timestamp providers
+
+maintainers:
+ - Dipen Patel <dipenp@nvidia.com>
+
+description:
+ Some devices/SoCs have hardware timestamp engines (HTE) which can use
+ hardware means to timestamp entity in realtime. The entity could be anything
+ from GPIOs, IRQs, Bus and so on. The hardware timestamp engine present
+ itself as a provider with the bindings described in this document.
+
+properties:
+ $nodename:
+ pattern: "^timestamp(@.*|-[0-9a-f])?$"
+
+ "#timestamp-cells":
+ description:
+ Number of cells in a HTE specifier.
+
+required:
+ - "#timestamp-cells"
+
+additionalProperties: true
diff --git a/Documentation/devicetree/bindings/timestamp/hte-consumer.yaml b/Documentation/devicetree/bindings/timestamp/hte-consumer.yaml
new file mode 100644
index 000000000000..6456515c3d26
--- /dev/null
+++ b/Documentation/devicetree/bindings/timestamp/hte-consumer.yaml
@@ -0,0 +1,39 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/timestamp/hte-consumer.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: HTE Consumer Device Tree Bindings
+
+maintainers:
+ - Dipen Patel <dipenp@nvidia.com>
+
+select: true
+
+properties:
+ timestamps:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ description:
+ The list of HTE provider phandle. The first cell must represent the
+ provider phandle followed by the line identifiers. The meaning of the
+ line identifier and exact number of arguments must be specified in the
+ HTE provider device tree binding document.
+
+ timestamp-names:
+ $ref: /schemas/types.yaml#/definitions/string-array
+ description:
+ An optional string property to label each line specifier present in the
+ timestamp property.
+
+dependencies:
+ timestamp-names: [ timestamps ]
+
+additionalProperties: true
+
+examples:
+ - |
+ hte_tegra_consumer {
+ timestamps = <&tegra_hte_aon 0x9>, <&tegra_hte_lic 0x19>;
+ timestamp-names = "hte-gpio", "hte-i2c";
+ };
diff --git a/Documentation/devicetree/bindings/timestamp/nvidia,tegra194-hte.yaml b/Documentation/devicetree/bindings/timestamp/nvidia,tegra194-hte.yaml
new file mode 100644
index 000000000000..c31e207d1652
--- /dev/null
+++ b/Documentation/devicetree/bindings/timestamp/nvidia,tegra194-hte.yaml
@@ -0,0 +1,88 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/timestamp/nvidia,tegra194-hte.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Tegra194 on chip generic hardware timestamping engine (HTE)
+
+maintainers:
+ - Dipen Patel <dipenp@nvidia.com>
+
+description:
+ Tegra SoC has two instances of generic hardware timestamping engines (GTE)
+ known as GTE GPIO and GTE IRQ, which can monitor subset of GPIO and on chip
+ IRQ lines for the state change respectively, upon detection it will record
+ timestamp (taken from system counter) in its internal hardware FIFO. It has
+ a bitmap array arranged in 32bit slices where each bit represent signal/line
+ to enable or disable for the hardware timestamping. The GTE GPIO monitors
+ GPIO lines from the AON (always on) GPIO controller.
+
+properties:
+ compatible:
+ enum:
+ - nvidia,tegra194-gte-aon
+ - nvidia,tegra194-gte-lic
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ nvidia,int-threshold:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ HTE device generates its interrupt based on this u32 FIFO threshold
+ value. The recommended value is 1.
+ minimum: 1
+ maximum: 256
+
+ nvidia,slices:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ HTE lines are arranged in 32 bit slice where each bit represents different
+ line/signal that it can enable/configure for the timestamp. It is u32
+ property and depends on the HTE instance in the chip. The value 3 is for
+ GPIO GTE and 11 for IRQ GTE.
+ enum: [3, 11]
+
+ '#timestamp-cells':
+ description:
+ This represents number of line id arguments as specified by the
+ consumers. For the GTE IRQ, this is IRQ number as mentioned in the
+ SoC technical reference manual. For the GTE GPIO, its value is same as
+ mentioned in the nvidia GPIO device tree binding document.
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - nvidia,slices
+ - "#timestamp-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ tegra_hte_aon: timestamp@c1e0000 {
+ compatible = "nvidia,tegra194-gte-aon";
+ reg = <0xc1e0000 0x10000>;
+ interrupts = <0 13 0x4>;
+ nvidia,int-threshold = <1>;
+ nvidia,slices = <3>;
+ #timestamp-cells = <1>;
+ };
+
+ - |
+ tegra_hte_lic: timestamp@3aa0000 {
+ compatible = "nvidia,tegra194-gte-lic";
+ reg = <0x3aa0000 0x10000>;
+ interrupts = <0 11 0x4>;
+ nvidia,int-threshold = <1>;
+ nvidia,slices = <11>;
+ #timestamp-cells = <1>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/trivial-devices.yaml b/Documentation/devicetree/bindings/trivial-devices.yaml
index 05f01f4acca8..6aafa71806a3 100644
--- a/Documentation/devicetree/bindings/trivial-devices.yaml
+++ b/Documentation/devicetree/bindings/trivial-devices.yaml
@@ -47,7 +47,9 @@ properties:
- at,24c08
# i2c trusted platform module (TPM)
- atmel,at97sc3204t
- # i2c h/w symmetric crypto module
+ # ATSHA204 - i2c h/w symmetric crypto module
+ - atmel,atsha204
+ # ATSHA204A - i2c h/w symmetric crypto module
- atmel,atsha204a
# i2c h/w elliptic curve crypto module
- atmel,atecc508a
diff --git a/Documentation/devicetree/bindings/usb/am33xx-usb.txt b/Documentation/devicetree/bindings/usb/am33xx-usb.txt
index 7a198a30408a..654ffc62d013 100644
--- a/Documentation/devicetree/bindings/usb/am33xx-usb.txt
+++ b/Documentation/devicetree/bindings/usb/am33xx-usb.txt
@@ -61,8 +61,9 @@ DMA
endpoint number (0 … 14 for endpoints 1 … 15 on instance 0 and 15 … 29
for endpoints 1 … 15 on instance 1). The second number is 0 for RX and
1 for TX transfers.
-- #dma-channels: should be set to 30 representing the 15 endpoints for
+- dma-channels: should be set to 30 representing the 15 endpoints for
each USB instance.
+- #dma-channels: deprecated
Example:
~~~~~~~~
@@ -193,7 +194,7 @@ usb: usb@47400000 {
interrupts = <17>;
interrupt-names = "glue";
#dma-cells = <2>;
- #dma-channels = <30>;
- #dma-requests = <256>;
+ dma-channels = <30>;
+ dma-requests = <256>;
};
};
diff --git a/Documentation/devicetree/bindings/usb/da8xx-usb.txt b/Documentation/devicetree/bindings/usb/da8xx-usb.txt
index 9ce22551b2b3..fb2027a7d80d 100644
--- a/Documentation/devicetree/bindings/usb/da8xx-usb.txt
+++ b/Documentation/devicetree/bindings/usb/da8xx-usb.txt
@@ -36,7 +36,8 @@ DMA
- #dma-cells: should be set to 2. The first number represents the
channel number (0 … 3 for endpoints 1 … 4).
The second number is 0 for RX and 1 for TX transfers.
-- #dma-channels: should be set to 4 representing the 4 endpoints.
+- dma-channels: should be set to 4 representing the 4 endpoints.
+- #dma-channels: deprecated
Example:
usb_phy: usb-phy {
@@ -74,7 +75,7 @@ Example:
reg-names = "controller", "scheduler", "queuemgr";
interrupts = <58>;
#dma-cells = <2>;
- #dma-channels = <4>;
+ dma-channels = <4>;
};
};
diff --git a/Documentation/devicetree/bindings/usb/dwc2.yaml b/Documentation/devicetree/bindings/usb/dwc2.yaml
index 17fc471a12bd..8d22a9843ba5 100644
--- a/Documentation/devicetree/bindings/usb/dwc2.yaml
+++ b/Documentation/devicetree/bindings/usb/dwc2.yaml
@@ -17,6 +17,13 @@ properties:
oneOf:
- const: brcm,bcm2835-usb
- const: hisilicon,hi6220-usb
+ - const: ingenic,jz4775-otg
+ - const: ingenic,jz4780-otg
+ - const: ingenic,x1000-otg
+ - const: ingenic,x1600-otg
+ - const: ingenic,x1700-otg
+ - const: ingenic,x1830-otg
+ - const: ingenic,x2000-otg
- items:
- const: rockchip,rk3066-usb
- const: snps,dwc2
diff --git a/Documentation/devicetree/bindings/usb/dwc3-xilinx.yaml b/Documentation/devicetree/bindings/usb/dwc3-xilinx.yaml
index f77c16e203d5..098b73134a1b 100644
--- a/Documentation/devicetree/bindings/usb/dwc3-xilinx.yaml
+++ b/Documentation/devicetree/bindings/usb/dwc3-xilinx.yaml
@@ -71,6 +71,10 @@ properties:
- usb2-phy
- usb3-phy
+ reset-gpios:
+ description: GPIO used for the reset ulpi-phy
+ maxItems: 1
+
# Required child node:
patternProperties:
diff --git a/Documentation/devicetree/bindings/usb/fcs,fsa4480.yaml b/Documentation/devicetree/bindings/usb/fcs,fsa4480.yaml
new file mode 100644
index 000000000000..9473f26b0621
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/fcs,fsa4480.yaml
@@ -0,0 +1,72 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/usb/fcs,fsa4480.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: ON Semiconductor Analog Audio Switch
+
+maintainers:
+ - Bjorn Andersson <bjorn.andersson@linaro.org>
+
+properties:
+ compatible:
+ enum:
+ - fcs,fsa4480
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ vcc-supply:
+ description: power supply (2.7V-5.5V)
+
+ mode-switch:
+ description: Flag the port as possible handle of altmode switching
+ type: boolean
+
+ orientation-switch:
+ description: Flag the port as possible handler of orientation switching
+ type: boolean
+
+ port:
+ $ref: /schemas/graph.yaml#/properties/port
+ description:
+ A port node to link the FSA4480 to a TypeC controller for the purpose of
+ handling altmode muxing and orientation switching.
+
+required:
+ - compatible
+ - reg
+ - port
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ i2c13 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsa4480@42 {
+ compatible = "fcs,fsa4480";
+ reg = <0x42>;
+
+ interrupts-extended = <&tlmm 2 IRQ_TYPE_LEVEL_LOW>;
+
+ vcc-supply = <&vreg_bob>;
+
+ mode-switch;
+ orientation-switch;
+
+ port {
+ fsa4480_ept: endpoint {
+ remote-endpoint = <&typec_controller>;
+ };
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/usb/generic-ehci.yaml b/Documentation/devicetree/bindings/usb/generic-ehci.yaml
index 8913497624de..0b4524b6409e 100644
--- a/Documentation/devicetree/bindings/usb/generic-ehci.yaml
+++ b/Documentation/devicetree/bindings/usb/generic-ehci.yaml
@@ -55,6 +55,7 @@ properties:
- brcm,bcm7420-ehci
- brcm,bcm7425-ehci
- brcm,bcm7435-ehci
+ - hpe,gxp-ehci
- ibm,476gtr-ehci
- nxp,lpc1850-ehci
- qca,ar7100-ehci
diff --git a/Documentation/devicetree/bindings/usb/generic-ohci.yaml b/Documentation/devicetree/bindings/usb/generic-ohci.yaml
index acbf94fa5f74..e2ac84665316 100644
--- a/Documentation/devicetree/bindings/usb/generic-ohci.yaml
+++ b/Documentation/devicetree/bindings/usb/generic-ohci.yaml
@@ -42,6 +42,7 @@ properties:
- brcm,bcm7420-ohci
- brcm,bcm7425-ohci
- brcm,bcm7435-ohci
+ - hpe,gxp-ohci
- ibm,476gtr-ohci
- ingenic,jz4740-ohci
- snps,hsdk-v1.0-ohci
diff --git a/Documentation/devicetree/bindings/usb/mediatek,mtu3.yaml b/Documentation/devicetree/bindings/usb/mediatek,mtu3.yaml
index df766f8de872..37b02a841dc4 100644
--- a/Documentation/devicetree/bindings/usb/mediatek,mtu3.yaml
+++ b/Documentation/devicetree/bindings/usb/mediatek,mtu3.yaml
@@ -25,6 +25,7 @@ properties:
- mediatek,mt8173-mtu3
- mediatek,mt8183-mtu3
- mediatek,mt8192-mtu3
+ - mediatek,mt8195-mtu3
- const: mediatek,mtu3
reg:
diff --git a/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml b/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml
index ce252db2aab3..e336fe2e03cc 100644
--- a/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml
+++ b/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml
@@ -16,16 +16,21 @@ properties:
- qcom,ipq4019-dwc3
- qcom,ipq6018-dwc3
- qcom,ipq8064-dwc3
+ - qcom,ipq8074-dwc3
- qcom,msm8953-dwc3
+ - qcom,msm8994-dwc3
- qcom,msm8996-dwc3
- qcom,msm8998-dwc3
+ - qcom,qcs404-dwc3
- qcom,sc7180-dwc3
- qcom,sc7280-dwc3
- qcom,sdm660-dwc3
- qcom,sdm845-dwc3
- qcom,sdx55-dwc3
+ - qcom,sdx65-dwc3
- qcom,sm4250-dwc3
- qcom,sm6115-dwc3
+ - qcom,sm6125-dwc3
- qcom,sm6350-dwc3
- qcom,sm8150-dwc3
- qcom,sm8250-dwc3
@@ -50,26 +55,22 @@ properties:
maxItems: 1
clocks:
- description:
- A list of phandle and clock-specifier pairs for the clocks
- listed in clock-names.
- items:
- - description: System Config NOC clock.
- - description: Master/Core clock, has to be >= 125 MHz
- for SS operation and >= 60MHz for HS operation.
- - description: System bus AXI clock.
- - description: Mock utmi clock needed for ITP/SOF generation
- in host mode. Its frequency should be 19.2MHz.
- - description: Sleep clock, used for wakeup when
- USB3 core goes into low power mode (U3).
+ description: |
+ Several clocks are used, depending on the variant. Typical ones are::
+ - cfg_noc:: System Config NOC clock.
+ - core:: Master/Core clock, has to be >= 125 MHz for SS operation and >=
+ 60MHz for HS operation.
+ - iface:: System bus AXI clock.
+ - sleep:: Sleep clock, used for wakeup when USB3 core goes into low
+ power mode (U3).
+ - mock_utmi:: Mock utmi clock needed for ITP/SOF generation in host
+ mode. Its frequency should be 19.2MHz.
+ minItems: 1
+ maxItems: 6
clock-names:
- items:
- - const: cfg_noc
- - const: core
- - const: iface
- - const: mock_utmi
- - const: sleep
+ minItems: 1
+ maxItems: 6
assigned-clocks:
items:
@@ -132,6 +133,185 @@ required:
- interrupts
- interrupt-names
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,ipq4019-dwc3
+ then:
+ properties:
+ clocks:
+ maxItems: 3
+ clock-names:
+ items:
+ - const: core
+ - const: sleep
+ - const: mock_utmi
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,ipq8064-dwc3
+ then:
+ properties:
+ clocks:
+ items:
+ - description: Master/Core clock, has to be >= 125 MHz
+ for SS operation and >= 60MHz for HS operation.
+ clock-names:
+ items:
+ - const: core
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,msm8953-dwc3
+ - qcom,msm8996-dwc3
+ - qcom,msm8998-dwc3
+ - qcom,sc7180-dwc3
+ - qcom,sc7280-dwc3
+ - qcom,sdm845-dwc3
+ - qcom,sdx55-dwc3
+ - qcom,sm6350-dwc3
+ then:
+ properties:
+ clocks:
+ maxItems: 5
+ clock-names:
+ items:
+ - const: cfg_noc
+ - const: core
+ - const: iface
+ - const: sleep
+ - const: mock_utmi
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,ipq6018-dwc3
+ then:
+ properties:
+ clocks:
+ minItems: 3
+ maxItems: 4
+ clock-names:
+ oneOf:
+ - items:
+ - const: core
+ - const: sleep
+ - const: mock_utmi
+ - items:
+ - const: cfg_noc
+ - const: core
+ - const: sleep
+ - const: mock_utmi
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,ipq8074-dwc3
+ then:
+ properties:
+ clocks:
+ maxItems: 4
+ clock-names:
+ items:
+ - const: cfg_noc
+ - const: core
+ - const: sleep
+ - const: mock_utmi
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,msm8994-dwc3
+ - qcom,qcs404-dwc3
+ then:
+ properties:
+ clocks:
+ maxItems: 4
+ clock-names:
+ items:
+ - const: core
+ - const: iface
+ - const: sleep
+ - const: mock_utmi
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,sdm660-dwc3
+ then:
+ properties:
+ clocks:
+ minItems: 6
+ clock-names:
+ items:
+ - const: cfg_noc
+ - const: core
+ - const: iface
+ - const: sleep
+ - const: mock_utmi
+ - const: bus
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,sm6125-dwc3
+ - qcom,sm8150-dwc3
+ - qcom,sm8250-dwc3
+ - qcom,sm8450-dwc3
+ then:
+ properties:
+ clocks:
+ minItems: 6
+ clock-names:
+ items:
+ - const: cfg_noc
+ - const: core
+ - const: iface
+ - const: sleep
+ - const: mock_utmi
+ - const: xo
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,sm8350-dwc3
+ then:
+ properties:
+ clocks:
+ minItems: 5
+ maxItems: 6
+ clock-names:
+ minItems: 5
+ items:
+ - const: cfg_noc
+ - const: core
+ - const: iface
+ - const: sleep
+ - const: mock_utmi
+ - const: xo
+
+
additionalProperties: false
examples:
@@ -153,10 +333,13 @@ examples:
clocks = <&gcc GCC_CFG_NOC_USB3_PRIM_AXI_CLK>,
<&gcc GCC_USB30_PRIM_MASTER_CLK>,
<&gcc GCC_AGGRE_USB3_PRIM_AXI_CLK>,
- <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>,
- <&gcc GCC_USB30_PRIM_SLEEP_CLK>;
- clock-names = "cfg_noc", "core", "iface", "mock_utmi",
- "sleep";
+ <&gcc GCC_USB30_PRIM_SLEEP_CLK>,
+ <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>;
+ clock-names = "cfg_noc",
+ "core",
+ "iface",
+ "sleep",
+ "mock_utmi";
assigned-clocks = <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>,
<&gcc GCC_USB30_PRIM_MASTER_CLK>;
diff --git a/Documentation/devicetree/bindings/usb/renesas,usbhs.yaml b/Documentation/devicetree/bindings/usb/renesas,usbhs.yaml
index 0bb841b28003..bad55dfb2fa0 100644
--- a/Documentation/devicetree/bindings/usb/renesas,usbhs.yaml
+++ b/Documentation/devicetree/bindings/usb/renesas,usbhs.yaml
@@ -19,6 +19,7 @@ properties:
- items:
- enum:
- renesas,usbhs-r7s9210 # RZ/A2
+ - renesas,usbhs-r9a07g043 # RZ/G2UL
- renesas,usbhs-r9a07g044 # RZ/G2{L,LC}
- renesas,usbhs-r9a07g054 # RZ/V2L
- const: renesas,rza2-usbhs
@@ -118,6 +119,7 @@ allOf:
compatible:
contains:
enum:
+ - renesas,usbhs-r9a07g043
- renesas,usbhs-r9a07g044
- renesas,usbhs-r9a07g054
then:
@@ -128,6 +130,8 @@ allOf:
- description: U2P_INT_DMA[0]
- description: U2P_INT_DMA[1]
- description: U2P_INT_DMAERR
+ required:
+ - resets
else:
properties:
interrupts:
diff --git a/Documentation/devicetree/bindings/usb/samsung,exynos-usb2.yaml b/Documentation/devicetree/bindings/usb/samsung,exynos-usb2.yaml
index 9c92defbba01..caa572dcee02 100644
--- a/Documentation/devicetree/bindings/usb/samsung,exynos-usb2.yaml
+++ b/Documentation/devicetree/bindings/usb/samsung,exynos-usb2.yaml
@@ -15,9 +15,6 @@ properties:
- samsung,exynos4210-ehci
- samsung,exynos4210-ohci
- '#address-cells':
- const: 1
-
clocks:
maxItems: 1
@@ -46,15 +43,6 @@ properties:
Only for controller in EHCI mode, if present, specifies the GPIO that
needs to be pulled up for the bus to be powered.
- '#size-cells':
- const: 0
-
-patternProperties:
- "^.*@[0-9a-f]{1,2}$":
- description: The hard wired USB devices
- type: object
- $ref: /usb/usb-device.yaml
-
required:
- compatible
- clocks
@@ -65,6 +53,7 @@ required:
- reg
allOf:
+ - $ref: usb-hcd.yaml#
- if:
properties:
compatible:
@@ -74,7 +63,7 @@ allOf:
properties:
samsung,vbus-gpio: false
-additionalProperties: false
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/usb/snps,dwc3.yaml b/Documentation/devicetree/bindings/usb/snps,dwc3.yaml
index f4471f8bdbef..d41265ba8ce2 100644
--- a/Documentation/devicetree/bindings/usb/snps,dwc3.yaml
+++ b/Documentation/devicetree/bindings/usb/snps,dwc3.yaml
@@ -68,6 +68,8 @@ properties:
- enum: [bus_early, ref, suspend]
- true
+ dma-coherent: true
+
iommus:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/usb/ti,am62-usb.yaml b/Documentation/devicetree/bindings/usb/ti,am62-usb.yaml
new file mode 100644
index 000000000000..d25fc708e32c
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/ti,am62-usb.yaml
@@ -0,0 +1,103 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/usb/ti,am62-usb.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: TI's AM62 wrapper module for the Synopsys USBSS-DRD controller
+
+maintainers:
+ - Aswath Govindraju <a-govindraju@ti.com>
+
+properties:
+ compatible:
+ const: ti,am62-usb
+
+ reg:
+ maxItems: 1
+
+ ranges: true
+
+ power-domains:
+ description:
+ PM domain provider node and an args specifier containing
+ the USB ISO device id value. See,
+ Documentation/devicetree/bindings/soc/ti/sci-pm-domain.yaml
+ maxItems: 1
+
+ clocks:
+ description: Clock phandle to usb2_refclk
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: ref
+
+ ti,vbus-divider:
+ description:
+ Should be present if USB VBUS line is connected to the
+ VBUS pin of the SoC via a 1/3 voltage divider.
+ type: boolean
+
+ ti,syscon-phy-pll-refclk:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ items:
+ - items:
+ - description: Phandle to the SYSCON entry
+ - description: USB phy control register offset within SYSCON
+ description:
+ Specifier for conveying frequency of ref clock input, for the
+ operation of USB2PHY.
+
+ '#address-cells':
+ const: 2
+
+ '#size-cells':
+ const: 2
+
+patternProperties:
+ "^usb@[0-9a-f]+$":
+ $ref: snps,dwc3.yaml#
+ description: Required child node
+
+required:
+ - compatible
+ - reg
+ - power-domains
+ - clocks
+ - clock-names
+ - ti,syscon-phy-pll-refclk
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/soc/ti,sci_pm_domain.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/gpio/gpio.h>
+
+ bus {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ usbss1: usb@f910000 {
+ compatible = "ti,am62-usb";
+ reg = <0x00 0x0f910000 0x00 0x800>;
+ clocks = <&k3_clks 162 3>;
+ clock-names = "ref";
+ ti,syscon-phy-pll-refclk = <&wkup_conf 0x4018>;
+ power-domains = <&k3_pds 179 TI_SCI_PD_EXCLUSIVE>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ usb@31100000 {
+ compatible = "snps,dwc3";
+ reg =<0x00 0x31100000 0x00 0x50000>;
+ interrupts = <GIC_SPI 226 IRQ_TYPE_LEVEL_HIGH>, /* irq.0 */
+ <GIC_SPI 226 IRQ_TYPE_LEVEL_HIGH>; /* irq.0 */
+ interrupt-names = "host", "peripheral";
+ maximum-speed = "high-speed";
+ dr_mode = "otg";
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml
index 495a01ced97e..6bb20b4554d7 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.yaml
+++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml
@@ -1207,6 +1207,8 @@ patternProperties:
description: Summit microelectronics
"^sunchip,.*":
description: Shenzhen Sunchip Technology Co., Ltd
+ "^sundance,.*":
+ description: Sundance DSP Inc.
"^sunplus,.*":
description: Sunplus Technology Co., Ltd.
"^SUNW,.*":
diff --git a/Documentation/devicetree/bindings/watchdog/da9062-wdt.txt b/Documentation/devicetree/bindings/watchdog/da9062-wdt.txt
index 950e4fba8dbc..354314d854ef 100644
--- a/Documentation/devicetree/bindings/watchdog/da9062-wdt.txt
+++ b/Documentation/devicetree/bindings/watchdog/da9062-wdt.txt
@@ -10,6 +10,12 @@ Optional properties:
- dlg,use-sw-pm: Add this property to disable the watchdog during suspend.
Only use this option if you can't use the watchdog automatic suspend
function during a suspend (see register CONTROL_B).
+- dlg,wdt-sd: Set what happens on watchdog timeout. If this bit is set the
+ watchdog timeout triggers SHUTDOWN, if cleared the watchdog triggers
+ POWERDOWN. Can be 0 or 1. Only use this option if you want to change the
+ default chip's OTP setting for WATCHDOG_SD bit. If this property is NOT
+ set the WATCHDOG_SD bit and on timeout watchdog behavior will match the
+ chip's OTP settings.
Example: DA9062
diff --git a/Documentation/devicetree/bindings/watchdog/faraday,ftwdt010.txt b/Documentation/devicetree/bindings/watchdog/faraday,ftwdt010.txt
deleted file mode 100644
index 9ecdb502e605..000000000000
--- a/Documentation/devicetree/bindings/watchdog/faraday,ftwdt010.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-Faraday Technology FTWDT010 watchdog
-
-This is an IP part from Faraday Technology found in the Gemini
-SoCs and others.
-
-Required properties:
-- compatible : must be one of
- "faraday,ftwdt010"
- "cortina,gemini-watchdog", "faraday,ftwdt010"
-- reg : shall contain base register location and length
-- interrupts : shall contain the interrupt for the watchdog
-
-Optional properties:
-- timeout-sec : the default watchdog timeout in seconds.
-
-Example:
-
-watchdog@41000000 {
- compatible = "faraday,ftwdt010";
- reg = <0x41000000 0x1000>;
- interrupts = <3 IRQ_TYPE_LEVEL_HIGH>;
-};
diff --git a/Documentation/devicetree/bindings/watchdog/faraday,ftwdt010.yaml b/Documentation/devicetree/bindings/watchdog/faraday,ftwdt010.yaml
new file mode 100644
index 000000000000..ca9e1beff76b
--- /dev/null
+++ b/Documentation/devicetree/bindings/watchdog/faraday,ftwdt010.yaml
@@ -0,0 +1,67 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/watchdog/faraday,ftwdt010.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Faraday Technology FTWDT010 watchdog
+
+maintainers:
+ - Linus Walleij <linus.walleij@linaro.org>
+ - Corentin Labbe <clabbe@baylibre.com>
+
+description: |
+ This is an IP part from Faraday Technology found in the Gemini
+ SoCs and others.
+
+allOf:
+ - $ref: "watchdog.yaml#"
+
+properties:
+ compatible:
+ oneOf:
+ - const: faraday,ftwdt010
+ - items:
+ - enum:
+ - cortina,gemini-watchdog
+ - moxa,moxart-watchdog
+ - const: faraday,ftwdt010
+
+ reg:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ const: PCLK
+
+ interrupts:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ watchdog@41000000 {
+ compatible = "faraday,ftwdt010";
+ reg = <0x41000000 0x1000>;
+ interrupts = <3 IRQ_TYPE_LEVEL_HIGH>;
+ timeout-secs = <5>;
+ };
+ - |
+ watchdog: watchdog@98500000 {
+ compatible = "moxa,moxart-watchdog", "faraday,ftwdt010";
+ reg = <0x98500000 0x10>;
+ clocks = <&clk_apb>;
+ clock-names = "PCLK";
+ };
+...
diff --git a/Documentation/devicetree/bindings/watchdog/fsl-imx7ulp-wdt.yaml b/Documentation/devicetree/bindings/watchdog/fsl-imx7ulp-wdt.yaml
index 4ca8a31359a5..8562978aa0c8 100644
--- a/Documentation/devicetree/bindings/watchdog/fsl-imx7ulp-wdt.yaml
+++ b/Documentation/devicetree/bindings/watchdog/fsl-imx7ulp-wdt.yaml
@@ -19,6 +19,7 @@ properties:
- items:
- const: fsl,imx8ulp-wdt
- const: fsl,imx7ulp-wdt
+ - const: fsl,imx93-wdt
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/watchdog/mtk-wdt.txt b/Documentation/devicetree/bindings/watchdog/mtk-wdt.txt
index a97418c74f6b..762c62e428ef 100644
--- a/Documentation/devicetree/bindings/watchdog/mtk-wdt.txt
+++ b/Documentation/devicetree/bindings/watchdog/mtk-wdt.txt
@@ -16,6 +16,7 @@ Required properties:
"mediatek,mt7629-wdt", "mediatek,mt6589-wdt": for MT7629
"mediatek,mt7986-wdt", "mediatek,mt6589-wdt": for MT7986
"mediatek,mt8183-wdt": for MT8183
+ "mediatek,mt8186-wdt", "mediatek,mt6589-wdt": for MT8186
"mediatek,mt8516-wdt", "mediatek,mt6589-wdt": for MT8516
"mediatek,mt8192-wdt": for MT8192
"mediatek,mt8195-wdt", "mediatek,mt6589-wdt": for MT8195
diff --git a/Documentation/devicetree/bindings/watchdog/qcom-wdt.yaml b/Documentation/devicetree/bindings/watchdog/qcom-wdt.yaml
index 16c6f82a13ca..2bd6b4a52637 100644
--- a/Documentation/devicetree/bindings/watchdog/qcom-wdt.yaml
+++ b/Documentation/devicetree/bindings/watchdog/qcom-wdt.yaml
@@ -14,22 +14,29 @@ allOf:
properties:
compatible:
- enum:
- - qcom,apss-wdt-qcs404
- - qcom,apss-wdt-sc7180
- - qcom,apss-wdt-sc7280
- - qcom,apss-wdt-sdm845
- - qcom,apss-wdt-sdx55
- - qcom,apss-wdt-sm6350
- - qcom,apss-wdt-sm8150
- - qcom,apss-wdt-sm8250
- - qcom,kpss-timer
- - qcom,kpss-wdt
- - qcom,kpss-wdt-apq8064
- - qcom,kpss-wdt-ipq4019
- - qcom,kpss-wdt-ipq8064
- - qcom,kpss-wdt-msm8960
- - qcom,scss-timer
+ oneOf:
+ - items:
+ - enum:
+ - qcom,apss-wdt-qcs404
+ - qcom,apss-wdt-sc7180
+ - qcom,apss-wdt-sc7280
+ - qcom,apss-wdt-sc8180x
+ - qcom,apss-wdt-sc8280xp
+ - qcom,apss-wdt-sdm845
+ - qcom,apss-wdt-sdx55
+ - qcom,apss-wdt-sm6350
+ - qcom,apss-wdt-sm8150
+ - qcom,apss-wdt-sm8250
+ - const: qcom,kpss-wdt
+ - items:
+ - enum:
+ - qcom,kpss-wdt
+ - qcom,kpss-timer
+ - qcom,kpss-wdt-apq8064
+ - qcom,kpss-wdt-ipq4019
+ - qcom,kpss-wdt-ipq8064
+ - qcom,kpss-wdt-msm8960
+ - qcom,scss-timer
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/watchdog/renesas,wdt.yaml b/Documentation/devicetree/bindings/watchdog/renesas,wdt.yaml
index d060438e1402..a8d7dde5271b 100644
--- a/Documentation/devicetree/bindings/watchdog/renesas,wdt.yaml
+++ b/Documentation/devicetree/bindings/watchdog/renesas,wdt.yaml
@@ -21,8 +21,15 @@ properties:
- items:
- enum:
+ - renesas,r9a06g032-wdt # RZ/N1D
+ - const: renesas,rzn1-wdt # RZ/N1
+
+ - items:
+ - enum:
+ - renesas,r9a07g043-wdt # RZ/G2UL
- renesas,r9a07g044-wdt # RZ/G2{L,LC}
- - const: renesas,rzg2l-wdt # RZ/G2L
+ - renesas,r9a07g054-wdt # RZ/V2L
+ - const: renesas,rzg2l-wdt
- items:
- enum:
@@ -52,11 +59,11 @@ properties:
- renesas,r8a77980-wdt # R-Car V3H
- renesas,r8a77990-wdt # R-Car E3
- renesas,r8a77995-wdt # R-Car D3
- - renesas,r8a779a0-wdt # R-Car V3U
- const: renesas,rcar-gen3-wdt # R-Car Gen3 and RZ/G2
- items:
- enum:
+ - renesas,r8a779a0-wdt # R-Car V3U
- renesas,r8a779f0-wdt # R-Car S4-8
- const: renesas,rcar-gen4-wdt # R-Car Gen4
@@ -94,6 +101,7 @@ allOf:
contains:
enum:
- renesas,rza-wdt
+ - renesas,rzn1-wdt
then:
required:
- power-domains
diff --git a/Documentation/devicetree/bindings/watchdog/socionext,uniphier-wdt.yaml b/Documentation/devicetree/bindings/watchdog/socionext,uniphier-wdt.yaml
index a059d16cb4f2..90698cfa8f94 100644
--- a/Documentation/devicetree/bindings/watchdog/socionext,uniphier-wdt.yaml
+++ b/Documentation/devicetree/bindings/watchdog/socionext,uniphier-wdt.yaml
@@ -19,7 +19,7 @@ properties:
required:
- compatible
-additionalProperties: false
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/watchdog/sunplus,sp7021-wdt.yaml b/Documentation/devicetree/bindings/watchdog/sunplus,sp7021-wdt.yaml
new file mode 100644
index 000000000000..d90271013191
--- /dev/null
+++ b/Documentation/devicetree/bindings/watchdog/sunplus,sp7021-wdt.yaml
@@ -0,0 +1,47 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright (C) Sunplus Co., Ltd. 2021
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/watchdog/sunplus,sp7021-wdt.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Sunplus SoCs Watchdog
+
+maintainers:
+ - XianTao Hu <xt.hu@cqplus1.com>
+
+allOf:
+ - $ref: watchdog.yaml#
+
+properties:
+ compatible:
+ const: sunplus,sp7021-wdt
+
+ reg:
+ items:
+ - description: watchdog registers regions
+ - description: miscellaneous control registers regions
+
+ clocks:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - resets
+
+additionalProperties: false
+
+examples:
+ - |
+ watchdog: watchdog@9c000630 {
+ compatible = "sunplus,sp7021-wdt";
+ reg = <0x9c000630 0x08>, <0x9c000274 0x04>;
+ clocks = <&clkc 0x24>;
+ resets = <&rstc 0x14>;
+ };
+...
diff --git a/Documentation/driver-api/dmaengine/provider.rst b/Documentation/driver-api/dmaengine/provider.rst
index 0072c9c7efd3..1e0f1f85d10e 100644
--- a/Documentation/driver-api/dmaengine/provider.rst
+++ b/Documentation/driver-api/dmaengine/provider.rst
@@ -206,6 +206,12 @@ Currently, the types available are:
- The device is able to perform parity check using RAID6 P+Q
algorithm against a memory buffer.
+- DMA_MEMSET
+
+ - The device is able to fill memory with the provided pattern
+
+ - The pattern is treated as a single byte signed value.
+
- DMA_INTERRUPT
- The device is able to trigger a dummy transfer that will
@@ -457,7 +463,7 @@ supported.
- Should use dma_set_residue to report it
- In the case of a cyclic transfer, it should only take into
- account the current period.
+ account the total size of the cyclic buffer.
- Should return DMA_OUT_OF_ORDER if the device does not support in order
completion and is completing the operation out of order.
diff --git a/Documentation/driver-api/firmware/fw_upload.rst b/Documentation/driver-api/firmware/fw_upload.rst
new file mode 100644
index 000000000000..76922591e446
--- /dev/null
+++ b/Documentation/driver-api/firmware/fw_upload.rst
@@ -0,0 +1,126 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===================
+Firmware Upload API
+===================
+
+A device driver that registers with the firmware loader will expose
+persistent sysfs nodes to enable users to initiate firmware updates for
+that device. It is the responsibility of the device driver and/or the
+device itself to perform any validation on the data received. Firmware
+upload uses the same *loading* and *data* sysfs files described in the
+documentation for firmware fallback. It also adds additional sysfs files
+to provide status on the transfer of the firmware image to the device.
+
+Register for firmware upload
+============================
+
+A device driver registers for firmware upload by calling
+firmware_upload_register(). Among the parameter list is a name to
+identify the device under /sys/class/firmware. A user may initiate a
+firmware upload by echoing a 1 to the *loading* sysfs file for the target
+device. Next, the user writes the firmware image to the *data* sysfs
+file. After writing the firmware data, the user echos 0 to the *loading*
+sysfs file to signal completion. Echoing 0 to *loading* also triggers the
+transfer of the firmware to the lower-lever device driver in the context
+of a kernel worker thread.
+
+To use the firmware upload API, write a driver that implements a set of
+ops. The probe function calls firmware_upload_register() and the remove
+function calls firmware_upload_unregister() such as::
+
+ static const struct fw_upload_ops m10bmc_ops = {
+ .prepare = m10bmc_sec_prepare,
+ .write = m10bmc_sec_write,
+ .poll_complete = m10bmc_sec_poll_complete,
+ .cancel = m10bmc_sec_cancel,
+ .cleanup = m10bmc_sec_cleanup,
+ };
+
+ static int m10bmc_sec_probe(struct platform_device *pdev)
+ {
+ const char *fw_name, *truncate;
+ struct m10bmc_sec *sec;
+ struct fw_upload *fwl;
+ unsigned int len;
+
+ sec = devm_kzalloc(&pdev->dev, sizeof(*sec), GFP_KERNEL);
+ if (!sec)
+ return -ENOMEM;
+
+ sec->dev = &pdev->dev;
+ sec->m10bmc = dev_get_drvdata(pdev->dev.parent);
+ dev_set_drvdata(&pdev->dev, sec);
+
+ fw_name = dev_name(sec->dev);
+ truncate = strstr(fw_name, ".auto");
+ len = (truncate) ? truncate - fw_name : strlen(fw_name);
+ sec->fw_name = kmemdup_nul(fw_name, len, GFP_KERNEL);
+
+ fwl = firmware_upload_register(sec->dev, sec->fw_name, &m10bmc_ops, sec);
+ if (IS_ERR(fwl)) {
+ dev_err(sec->dev, "Firmware Upload driver failed to start\n");
+ kfree(sec->fw_name);
+ return PTR_ERR(fwl);
+ }
+
+ sec->fwl = fwl;
+ return 0;
+ }
+
+ static int m10bmc_sec_remove(struct platform_device *pdev)
+ {
+ struct m10bmc_sec *sec = dev_get_drvdata(&pdev->dev);
+
+ firmware_upload_unregister(sec->fwl);
+ kfree(sec->fw_name);
+ return 0;
+ }
+
+firmware_upload_register
+------------------------
+.. kernel-doc:: drivers/base/firmware_loader/sysfs_upload.c
+ :identifiers: firmware_upload_register
+
+firmware_upload_unregister
+--------------------------
+.. kernel-doc:: drivers/base/firmware_loader/sysfs_upload.c
+ :identifiers: firmware_upload_unregister
+
+Firmware Upload Ops
+-------------------
+.. kernel-doc:: include/linux/firmware.h
+ :identifiers: fw_upload_ops
+
+Firmware Upload Progress Codes
+------------------------------
+The following progress codes are used internally by the firmware loader.
+Corresponding strings are reported through the status sysfs node that
+is described below and are documented in the ABI documentation.
+
+.. kernel-doc:: drivers/base/firmware_loader/sysfs_upload.h
+ :identifiers: fw_upload_prog
+
+Firmware Upload Error Codes
+---------------------------
+The following error codes may be returned by the driver ops in case of
+failure:
+
+.. kernel-doc:: include/linux/firmware.h
+ :identifiers: fw_upload_err
+
+Sysfs Attributes
+================
+
+In addition to the *loading* and *data* sysfs files, there are additional
+sysfs files to monitor the status of the data transfer to the target
+device and to determine the final pass/fail status of the transfer.
+Depending on the device and the size of the firmware image, a firmware
+update could take milliseconds or minutes.
+
+The additional sysfs files are:
+
+* status - provides an indication of the progress of a firmware update
+* error - provides error information for a failed firmware update
+* remaining_size - tracks the data transfer portion of an update
+* cancel - echo 1 to this file to cancel the update
diff --git a/Documentation/driver-api/firmware/index.rst b/Documentation/driver-api/firmware/index.rst
index 57415d657173..9d2c19dc8e36 100644
--- a/Documentation/driver-api/firmware/index.rst
+++ b/Documentation/driver-api/firmware/index.rst
@@ -8,6 +8,7 @@ Linux Firmware API
core
efi/index
request_firmware
+ fw_upload
other_interfaces
.. only:: subproject and html
diff --git a/Documentation/driver-api/gpio/driver.rst b/Documentation/driver-api/gpio/driver.rst
index a1ddefa1f55f..70ff43ac4fcc 100644
--- a/Documentation/driver-api/gpio/driver.rst
+++ b/Documentation/driver-api/gpio/driver.rst
@@ -429,7 +429,8 @@ call into the core gpiolib code:
static void my_gpio_mask_irq(struct irq_data *d)
{
- struct gpio_chip *gc = irq_desc_get_handler_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
/*
* Perform any necessary action to mask the interrupt,
@@ -437,14 +438,15 @@ call into the core gpiolib code:
* state.
*/
- gpiochip_disable_irq(gc, d->hwirq);
+ gpiochip_disable_irq(gc, hwirq);
}
static void my_gpio_unmask_irq(struct irq_data *d)
{
- struct gpio_chip *gc = irq_desc_get_handler_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
- gpiochip_enable_irq(gc, d->hwirq);
+ gpiochip_enable_irq(gc, hwirq);
/*
* Perform any necessary action to unmask the interrupt,
@@ -501,7 +503,8 @@ the interrupt separately and go with it:
static void my_gpio_mask_irq(struct irq_data *d)
{
- struct gpio_chip *gc = irq_desc_get_handler_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
/*
* Perform any necessary action to mask the interrupt,
@@ -509,14 +512,15 @@ the interrupt separately and go with it:
* state.
*/
- gpiochip_disable_irq(gc, d->hwirq);
+ gpiochip_disable_irq(gc, hwirq);
}
static void my_gpio_unmask_irq(struct irq_data *d)
{
- struct gpio_chip *gc = irq_desc_get_handler_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
- gpiochip_enable_irq(gc, d->hwirq);
+ gpiochip_enable_irq(gc, hwirq);
/*
* Perform any necessary action to unmask the interrupt,
@@ -576,7 +580,8 @@ In this case the typical set-up will look like this:
static void my_gpio_mask_irq(struct irq_data *d)
{
- struct gpio_chip *gc = irq_desc_get_handler_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
/*
* Perform any necessary action to mask the interrupt,
@@ -584,15 +589,16 @@ In this case the typical set-up will look like this:
* state.
*/
- gpiochip_disable_irq(gc, d->hwirq);
+ gpiochip_disable_irq(gc, hwirq);
irq_mask_mask_parent(d);
}
static void my_gpio_unmask_irq(struct irq_data *d)
{
- struct gpio_chip *gc = irq_desc_get_handler_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
- gpiochip_enable_irq(gc, d->hwirq);
+ gpiochip_enable_irq(gc, hwirq);
/*
* Perform any necessary action to unmask the interrupt,
diff --git a/Documentation/driver-api/index.rst b/Documentation/driver-api/index.rst
index a7b0223e2886..d76a60d95b58 100644
--- a/Documentation/driver-api/index.rst
+++ b/Documentation/driver-api/index.rst
@@ -101,6 +101,7 @@ available subsections can be seen below.
surface_aggregator/index
switchtec
sync_file
+ tty/index
vfio-mediated-device
vfio
vfio-pci-device-specific-driver-acceptance
diff --git a/Documentation/driver-api/pwm.rst b/Documentation/driver-api/pwm.rst
index ccb06e485756..fd26c3d895b6 100644
--- a/Documentation/driver-api/pwm.rst
+++ b/Documentation/driver-api/pwm.rst
@@ -49,6 +49,12 @@ After being requested, a PWM has to be configured using::
This API controls both the PWM period/duty_cycle config and the
enable/disable state.
+
+As a consumer, don't rely on the output's state for a disabled PWM. If it's
+easily possible, drivers are supposed to emit the inactive state, but some
+drivers cannot. If you rely on getting the inactive state, use .duty_cycle=0,
+.enabled=true.
+
There is also a usage_power setting: If set, the PWM driver is only required to
maintain the power output but has more freedom regarding signal form.
If supported by the driver, the signal can be optimized, for example to improve
diff --git a/Documentation/driver-api/serial/driver.rst b/Documentation/driver-api/serial/driver.rst
index 06ec04ba086f..7ef83fd3917b 100644
--- a/Documentation/driver-api/serial/driver.rst
+++ b/Documentation/driver-api/serial/driver.rst
@@ -311,7 +311,7 @@ hardware.
This call must not sleep
set_ldisc(port,termios)
- Notifier for discipline change. See Documentation/tty/tty_ldisc.rst.
+ Notifier for discipline change. See ../tty/tty_ldisc.rst.
Locking: caller holds tty_port->mutex
diff --git a/Documentation/driver-api/serial/index.rst b/Documentation/driver-api/serial/index.rst
index 7eb21a695fc3..03a55b987a1d 100644
--- a/Documentation/driver-api/serial/index.rst
+++ b/Documentation/driver-api/serial/index.rst
@@ -16,8 +16,6 @@ Serial drivers
.. toctree::
:maxdepth: 1
- moxa-smartio
- n_gsm
serial-iso7816
serial-rs485
diff --git a/Documentation/driver-api/serial/n_gsm.rst b/Documentation/driver-api/serial/n_gsm.rst
deleted file mode 100644
index 49956509ad73..000000000000
--- a/Documentation/driver-api/serial/n_gsm.rst
+++ /dev/null
@@ -1,159 +0,0 @@
-==============================
-GSM 0710 tty multiplexor HOWTO
-==============================
-
-This line discipline implements the GSM 07.10 multiplexing protocol
-detailed in the following 3GPP document:
-
- https://www.3gpp.org/ftp/Specs/archive/07_series/07.10/0710-720.zip
-
-This document give some hints on how to use this driver with GPRS and 3G
-modems connected to a physical serial port.
-
-How to use it
--------------
-1. config initiator
-^^^^^^^^^^^^^^^^^^^^^
-
-1.1 initialize the modem in 0710 mux mode (usually AT+CMUX= command) through
- its serial port. Depending on the modem used, you can pass more or less
- parameters to this command.
-
-1.2 switch the serial line to using the n_gsm line discipline by using
- TIOCSETD ioctl.
-
-1.3 configure the mux using GSMIOC_GETCONF / GSMIOC_SETCONF ioctl.
-
-1.4 obtain base gsmtty number for the used serial port.
-
-Major parts of the initialization program :
-(a good starting point is util-linux-ng/sys-utils/ldattach.c)::
-
- #include <stdio.h>
- #include <stdint.h>
- #include <linux/gsmmux.h>
- #include <linux/tty.h>
- #define DEFAULT_SPEED B115200
- #define SERIAL_PORT /dev/ttyS0
-
- int ldisc = N_GSM0710;
- struct gsm_config c;
- struct termios configuration;
- uint32_t first;
-
- /* open the serial port connected to the modem */
- fd = open(SERIAL_PORT, O_RDWR | O_NOCTTY | O_NDELAY);
-
- /* configure the serial port : speed, flow control ... */
-
- /* send the AT commands to switch the modem to CMUX mode
- and check that it's successful (should return OK) */
- write(fd, "AT+CMUX=0\r", 10);
-
- /* experience showed that some modems need some time before
- being able to answer to the first MUX packet so a delay
- may be needed here in some case */
- sleep(3);
-
- /* use n_gsm line discipline */
- ioctl(fd, TIOCSETD, &ldisc);
-
- /* get n_gsm configuration */
- ioctl(fd, GSMIOC_GETCONF, &c);
- /* we are initiator and need encoding 0 (basic) */
- c.initiator = 1;
- c.encapsulation = 0;
- /* our modem defaults to a maximum size of 127 bytes */
- c.mru = 127;
- c.mtu = 127;
- /* set the new configuration */
- ioctl(fd, GSMIOC_SETCONF, &c);
- /* get first gsmtty device node */
- ioctl(fd, GSMIOC_GETFIRST, &first);
- printf("first muxed line: /dev/gsmtty%i\n", first);
-
- /* and wait for ever to keep the line discipline enabled */
- daemon(0,0);
- pause();
-
-1.5 use these devices as plain serial ports.
-
- for example, it's possible:
-
- - and to use gnokii to send / receive SMS on ttygsm1
- - to use ppp to establish a datalink on ttygsm2
-
-1.6 first close all virtual ports before closing the physical port.
-
- Note that after closing the physical port the modem is still in multiplexing
- mode. This may prevent a successful re-opening of the port later. To avoid
- this situation either reset the modem if your hardware allows that or send
- a disconnect command frame manually before initializing the multiplexing mode
- for the second time. The byte sequence for the disconnect command frame is::
-
- 0xf9, 0x03, 0xef, 0x03, 0xc3, 0x16, 0xf9.
-
-2. config requester
-^^^^^^^^^^^^^^^^^^^^^
-
-2.1 receive string "AT+CMUX= command" through its serial port,initialize
- mux mode config
-
-2.2 switch the serial line to using the n_gsm line discipline by using
- TIOCSETD ioctl.
-
-2.3 configure the mux using GSMIOC_GETCONF / GSMIOC_SETCONF ioctl.
-
-2.4 obtain base gsmtty number for the used serial port::
-
- #include <stdio.h>
- #include <stdint.h>
- #include <linux/gsmmux.h>
- #include <linux/tty.h>
- #define DEFAULT_SPEED B115200
- #define SERIAL_PORT /dev/ttyS0
-
- int ldisc = N_GSM0710;
- struct gsm_config c;
- struct termios configuration;
- uint32_t first;
-
- /* open the serial port */
- fd = open(SERIAL_PORT, O_RDWR | O_NOCTTY | O_NDELAY);
-
- /* configure the serial port : speed, flow control ... */
-
- /* get serial data and check "AT+CMUX=command" parameter ... */
-
- /* use n_gsm line discipline */
- ioctl(fd, TIOCSETD, &ldisc);
-
- /* get n_gsm configuration */
- ioctl(fd, GSMIOC_GETCONF, &c);
- /* we are requester and need encoding 0 (basic) */
- c.initiator = 0;
- c.encapsulation = 0;
- /* our modem defaults to a maximum size of 127 bytes */
- c.mru = 127;
- c.mtu = 127;
- /* set the new configuration */
- ioctl(fd, GSMIOC_SETCONF, &c);
- /* get first gsmtty device node */
- ioctl(fd, GSMIOC_GETFIRST, &first);
- printf("first muxed line: /dev/gsmtty%i\n", first);
-
- /* and wait for ever to keep the line discipline enabled */
- daemon(0,0);
- pause();
-
-Additional Documentation
-------------------------
-More practical details on the protocol and how it's supported by industrial
-modems can be found in the following documents :
-
-- http://www.telit.com/module/infopool/download.php?id=616
-- http://www.u-blox.com/images/downloads/Product_Docs/LEON-G100-G200-MuxImplementation_ApplicationNote_%28GSM%20G1-CS-10002%29.pdf
-- http://www.sierrawireless.com/Support/Downloads/AirPrime/WMP_Series/~/media/Support_Downloads/AirPrime/Application_notes/CMUX_Feature_Application_Note-Rev004.ashx
-- http://wm.sim.com/sim/News/photo/2010721161442.pdf
-
-11-03-08 - Eric Bénard - <eric@eukrea.com>
diff --git a/Documentation/tty/index.rst b/Documentation/driver-api/tty/index.rst
index 21ea0cb21e55..2d32606a4278 100644
--- a/Documentation/tty/index.rst
+++ b/Documentation/driver-api/tty/index.rst
@@ -36,18 +36,16 @@ In-detail description of the named TTY structures is in separate documents:
tty_struct
tty_ldisc
tty_buffer
- n_tty
tty_internals
Writing TTY Driver
==================
Before one starts writing a TTY driver, they must consider
-:doc:`Serial <../driver-api/serial/driver>` and :doc:`USB Serial
-<../usb/usb-serial>` layers
-first. Drivers for serial devices can often use one of these specific layers to
-implement a serial driver. Only special devices should be handled directly by
-the TTY Layer. If you are about to write such a driver, read on.
+:doc:`Serial <../serial/driver>` and :doc:`USB Serial <../../usb/usb-serial>`
+layers first. Drivers for serial devices can often use one of these specific
+layers to implement a serial driver. Only special devices should be handled
+directly by the TTY Layer. If you are about to write such a driver, read on.
A *typical* sequence a TTY driver performs is as follows:
@@ -61,3 +59,15 @@ A *typical* sequence a TTY driver performs is as follows:
Steps regarding driver, i.e. 1., 3., and 5. are described in detail in
:doc:`tty_driver`. For the other two (devices handling), look into
:doc:`tty_port`.
+
+Other Documentation
+===================
+
+Miscellaneous documentation can be further found in these documents:
+
+.. toctree::
+ :maxdepth: 2
+
+ moxa-smartio
+ n_gsm
+ n_tty
diff --git a/Documentation/driver-api/serial/moxa-smartio.rst b/Documentation/driver-api/tty/moxa-smartio.rst
index af25bc5cc3e6..af25bc5cc3e6 100644
--- a/Documentation/driver-api/serial/moxa-smartio.rst
+++ b/Documentation/driver-api/tty/moxa-smartio.rst
diff --git a/Documentation/driver-api/tty/n_gsm.rst b/Documentation/driver-api/tty/n_gsm.rst
new file mode 100644
index 000000000000..35d7381515b0
--- /dev/null
+++ b/Documentation/driver-api/tty/n_gsm.rst
@@ -0,0 +1,153 @@
+==============================
+GSM 0710 tty multiplexor HOWTO
+==============================
+
+.. contents:: :local:
+
+This line discipline implements the GSM 07.10 multiplexing protocol
+detailed in the following 3GPP document:
+
+ https://www.3gpp.org/ftp/Specs/archive/07_series/07.10/0710-720.zip
+
+This document give some hints on how to use this driver with GPRS and 3G
+modems connected to a physical serial port.
+
+How to use it
+=============
+
+Config Initiator
+----------------
+
+#. Initialize the modem in 0710 mux mode (usually ``AT+CMUX=`` command) through
+ its serial port. Depending on the modem used, you can pass more or less
+ parameters to this command.
+
+#. Switch the serial line to using the n_gsm line discipline by using
+ ``TIOCSETD`` ioctl.
+
+#. Configure the mux using ``GSMIOC_GETCONF``/``GSMIOC_SETCONF`` ioctl.
+
+#. Obtain base gsmtty number for the used serial port.
+
+ Major parts of the initialization program
+ (a good starting point is util-linux-ng/sys-utils/ldattach.c)::
+
+ #include <stdio.h>
+ #include <stdint.h>
+ #include <linux/gsmmux.h>
+ #include <linux/tty.h>
+
+ #define DEFAULT_SPEED B115200
+ #define SERIAL_PORT /dev/ttyS0
+
+ int ldisc = N_GSM0710;
+ struct gsm_config c;
+ struct termios configuration;
+ uint32_t first;
+
+ /* open the serial port connected to the modem */
+ fd = open(SERIAL_PORT, O_RDWR | O_NOCTTY | O_NDELAY);
+
+ /* configure the serial port : speed, flow control ... */
+
+ /* send the AT commands to switch the modem to CMUX mode
+ and check that it's successful (should return OK) */
+ write(fd, "AT+CMUX=0\r", 10);
+
+ /* experience showed that some modems need some time before
+ being able to answer to the first MUX packet so a delay
+ may be needed here in some case */
+ sleep(3);
+
+ /* use n_gsm line discipline */
+ ioctl(fd, TIOCSETD, &ldisc);
+
+ /* get n_gsm configuration */
+ ioctl(fd, GSMIOC_GETCONF, &c);
+ /* we are initiator and need encoding 0 (basic) */
+ c.initiator = 1;
+ c.encapsulation = 0;
+ /* our modem defaults to a maximum size of 127 bytes */
+ c.mru = 127;
+ c.mtu = 127;
+ /* set the new configuration */
+ ioctl(fd, GSMIOC_SETCONF, &c);
+ /* get first gsmtty device node */
+ ioctl(fd, GSMIOC_GETFIRST, &first);
+ printf("first muxed line: /dev/gsmtty%i\n", first);
+
+ /* and wait for ever to keep the line discipline enabled */
+ daemon(0,0);
+ pause();
+
+#. Use these devices as plain serial ports.
+
+ For example, it's possible:
+
+ - to use *gnokii* to send / receive SMS on ``ttygsm1``
+ - to use *ppp* to establish a datalink on ``ttygsm2``
+
+#. First close all virtual ports before closing the physical port.
+
+ Note that after closing the physical port the modem is still in multiplexing
+ mode. This may prevent a successful re-opening of the port later. To avoid
+ this situation either reset the modem if your hardware allows that or send
+ a disconnect command frame manually before initializing the multiplexing mode
+ for the second time. The byte sequence for the disconnect command frame is::
+
+ 0xf9, 0x03, 0xef, 0x03, 0xc3, 0x16, 0xf9
+
+Config Requester
+----------------
+
+#. Receive ``AT+CMUX=`` command through its serial port, initialize mux mode
+ config.
+
+#. Switch the serial line to using the *n_gsm* line discipline by using
+ ``TIOCSETD`` ioctl.
+
+#. Configure the mux using ``GSMIOC_GETCONF``/``GSMIOC_SETCONF`` ioctl.
+
+#. Obtain base gsmtty number for the used serial port::
+
+ #include <stdio.h>
+ #include <stdint.h>
+ #include <linux/gsmmux.h>
+ #include <linux/tty.h>
+ #define DEFAULT_SPEED B115200
+ #define SERIAL_PORT /dev/ttyS0
+
+ int ldisc = N_GSM0710;
+ struct gsm_config c;
+ struct termios configuration;
+ uint32_t first;
+
+ /* open the serial port */
+ fd = open(SERIAL_PORT, O_RDWR | O_NOCTTY | O_NDELAY);
+
+ /* configure the serial port : speed, flow control ... */
+
+ /* get serial data and check "AT+CMUX=command" parameter ... */
+
+ /* use n_gsm line discipline */
+ ioctl(fd, TIOCSETD, &ldisc);
+
+ /* get n_gsm configuration */
+ ioctl(fd, GSMIOC_GETCONF, &c);
+ /* we are requester and need encoding 0 (basic) */
+ c.initiator = 0;
+ c.encapsulation = 0;
+ /* our modem defaults to a maximum size of 127 bytes */
+ c.mru = 127;
+ c.mtu = 127;
+ /* set the new configuration */
+ ioctl(fd, GSMIOC_SETCONF, &c);
+ /* get first gsmtty device node */
+ ioctl(fd, GSMIOC_GETFIRST, &first);
+ printf("first muxed line: /dev/gsmtty%i\n", first);
+
+ /* and wait for ever to keep the line discipline enabled */
+ daemon(0,0);
+ pause();
+
+11-03-08 - Eric Bénard - <eric@eukrea.com>
diff --git a/Documentation/tty/n_tty.rst b/Documentation/driver-api/tty/n_tty.rst
index 15b70faee72d..15b70faee72d 100644
--- a/Documentation/tty/n_tty.rst
+++ b/Documentation/driver-api/tty/n_tty.rst
diff --git a/Documentation/tty/tty_buffer.rst b/Documentation/driver-api/tty/tty_buffer.rst
index a39d4781e0d2..a39d4781e0d2 100644
--- a/Documentation/tty/tty_buffer.rst
+++ b/Documentation/driver-api/tty/tty_buffer.rst
diff --git a/Documentation/tty/tty_driver.rst b/Documentation/driver-api/tty/tty_driver.rst
index cc529f863406..cc529f863406 100644
--- a/Documentation/tty/tty_driver.rst
+++ b/Documentation/driver-api/tty/tty_driver.rst
diff --git a/Documentation/tty/tty_internals.rst b/Documentation/driver-api/tty/tty_internals.rst
index d0d415820300..d0d415820300 100644
--- a/Documentation/tty/tty_internals.rst
+++ b/Documentation/driver-api/tty/tty_internals.rst
diff --git a/Documentation/tty/tty_ldisc.rst b/Documentation/driver-api/tty/tty_ldisc.rst
index 5144751be804..5144751be804 100644
--- a/Documentation/tty/tty_ldisc.rst
+++ b/Documentation/driver-api/tty/tty_ldisc.rst
diff --git a/Documentation/tty/tty_port.rst b/Documentation/driver-api/tty/tty_port.rst
index 5cb90e954fcf..5cb90e954fcf 100644
--- a/Documentation/tty/tty_port.rst
+++ b/Documentation/driver-api/tty/tty_port.rst
diff --git a/Documentation/tty/tty_struct.rst b/Documentation/driver-api/tty/tty_struct.rst
index c72f5a4293b2..c72f5a4293b2 100644
--- a/Documentation/tty/tty_struct.rst
+++ b/Documentation/driver-api/tty/tty_struct.rst
diff --git a/Documentation/driver-api/vfio-mediated-device.rst b/Documentation/driver-api/vfio-mediated-device.rst
index 784bbeb22adc..eded8719180f 100644
--- a/Documentation/driver-api/vfio-mediated-device.rst
+++ b/Documentation/driver-api/vfio-mediated-device.rst
@@ -262,10 +262,10 @@ Translation APIs for Mediated Devices
The following APIs are provided for translating user pfn to host pfn in a VFIO
driver::
- extern int vfio_pin_pages(struct device *dev, unsigned long *user_pfn,
+ int vfio_pin_pages(struct vfio_device *device, unsigned long *user_pfn,
int npage, int prot, unsigned long *phys_pfn);
- extern int vfio_unpin_pages(struct device *dev, unsigned long *user_pfn,
+ int vfio_unpin_pages(struct vfio_device *device, unsigned long *user_pfn,
int npage);
These functions call back into the back-end IOMMU module by using the pin_pages
diff --git a/Documentation/filesystems/erofs.rst b/Documentation/filesystems/erofs.rst
index bef6d3040ce4..05e03d54af1a 100644
--- a/Documentation/filesystems/erofs.rst
+++ b/Documentation/filesystems/erofs.rst
@@ -1,63 +1,82 @@
.. SPDX-License-Identifier: GPL-2.0
======================================
-Enhanced Read-Only File System - EROFS
+EROFS - Enhanced Read-Only File System
======================================
Overview
========
-EROFS file-system stands for Enhanced Read-Only File System. Different
-from other read-only file systems, it aims to be designed for flexibility,
-scalability, but be kept simple and high performance.
+EROFS filesystem stands for Enhanced Read-Only File System. It aims to form a
+generic read-only filesystem solution for various read-only use cases instead
+of just focusing on storage space saving without considering any side effects
+of runtime performance.
-It is designed as a better filesystem solution for the following scenarios:
+It is designed to meet the needs of flexibility, feature extendability and user
+payload friendly, etc. Apart from those, it is still kept as a simple
+random-access friendly high-performance filesystem to get rid of unneeded I/O
+amplification and memory-resident overhead compared to similar approaches.
+
+It is implemented to be a better choice for the following scenarios:
- read-only storage media or
- part of a fully trusted read-only solution, which means it needs to be
immutable and bit-for-bit identical to the official golden image for
- their releases due to security and other considerations and
+ their releases due to security or other considerations and
- hope to minimize extra storage space with guaranteed end-to-end performance
by using compact layout, transparent file compression and direct access,
especially for those embedded devices with limited memory and high-density
- hosts with numerous containers;
+ hosts with numerous containers.
Here is the main features of EROFS:
- Little endian on-disk design;
- - Currently 4KB block size (nobh) and therefore maximum 16TB address space;
-
- - Metadata & data could be mixed by design;
+ - 4KiB block size and 32-bit block addresses, therefore 16TiB address space
+ at most for now;
- - 2 inode versions for different requirements:
+ - Two inode layouts for different requirements:
- ===================== ============ =====================================
+ ===================== ============ ======================================
compact (v1) extended (v2)
- ===================== ============ =====================================
+ ===================== ============ ======================================
Inode metadata size 32 bytes 64 bytes
- Max file size 4 GB 16 EB (also limited by max. vol size)
+ Max file size 4 GiB 16 EiB (also limited by max. vol size)
Max uids/gids 65536 4294967296
Per-inode timestamp no yes (64 + 32-bit timestamp)
Max hardlinks 65536 4294967296
- Metadata reserved 4 bytes 14 bytes
- ===================== ============ =====================================
+ Metadata reserved 8 bytes 18 bytes
+ ===================== ============ ======================================
+
+ - Metadata and data could be mixed as an option;
- Support extended attributes (xattrs) as an option;
- - Support xattr inline and tail-end data inline for all files;
+ - Support tailpacking data and xattr inline compared to byte-addressed
+ unaligned metadata or smaller block size alternatives;
- Support POSIX.1e ACLs by using xattrs;
- Support transparent data compression as an option:
- LZ4 algorithm with the fixed-sized output compression for high performance;
+ LZ4 and MicroLZMA algorithms can be used on a per-file basis; In addition,
+ inplace decompression is also supported to avoid bounce compressed buffers
+ and page cache thrashing.
+
+ - Support direct I/O on uncompressed files to avoid double caching for loop
+ devices;
- - Multiple device support for multi-layer container images.
+ - Support FSDAX on uncompressed images for secure containers and ramdisks in
+ order to get rid of unnecessary page cache.
+
+ - Support multiple devices for multi blob container images;
+
+ - Support file-based on-demand loading with the Fscache infrastructure.
The following git tree provides the file system user-space tools under
-development (ex, formatting tool mkfs.erofs):
+development, such as a formatting tool (mkfs.erofs), an on-disk consistency &
+compatibility checking tool (fsck.erofs), and a debugging tool (dump.erofs):
- git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs-utils.git
@@ -91,6 +110,7 @@ dax={always,never} Use direct access (no page cache). See
Documentation/filesystems/dax.rst.
dax A legacy option which is an alias for ``dax=always``.
device=%s Specify a path to an extra device to be used together.
+fsid=%s Specify a filesystem image ID for Fscache back-end.
=================== =========================================================
Sysfs Entries
@@ -226,8 +246,8 @@ Note that apart from the offset of the first filename, nameoff0 also indicates
the total number of directory entries in this block since it is no need to
introduce another on-disk field at all.
-Chunk-based file
-----------------
+Chunk-based files
+-----------------
In order to support chunk-based data deduplication, a new inode data layout has
been supported since Linux v5.15: Files are split in equal-sized data chunks
with ``extents`` area of the inode metadata indicating how to get the chunk
diff --git a/Documentation/filesystems/nfs/client-identifier.rst b/Documentation/filesystems/nfs/client-identifier.rst
new file mode 100644
index 000000000000..5147e15815a1
--- /dev/null
+++ b/Documentation/filesystems/nfs/client-identifier.rst
@@ -0,0 +1,216 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=======================
+NFSv4 client identifier
+=======================
+
+This document explains how the NFSv4 protocol identifies client
+instances in order to maintain file open and lock state during
+system restarts. A special identifier and principal are maintained
+on each client. These can be set by administrators, scripts
+provided by site administrators, or tools provided by Linux
+distributors.
+
+There are risks if a client's NFSv4 identifier and its principal
+are not chosen carefully.
+
+
+Introduction
+------------
+
+The NFSv4 protocol uses "lease-based file locking". Leases help
+NFSv4 servers provide file lock guarantees and manage their
+resources.
+
+Simply put, an NFSv4 server creates a lease for each NFSv4 client.
+The server collects each client's file open and lock state under
+the lease for that client.
+
+The client is responsible for periodically renewing its leases.
+While a lease remains valid, the server holding that lease
+guarantees the file locks the client has created remain in place.
+
+If a client stops renewing its lease (for example, if it crashes),
+the NFSv4 protocol allows the server to remove the client's open
+and lock state after a certain period of time. When a client
+restarts, it indicates to servers that open and lock state
+associated with its previous leases is no longer valid and can be
+destroyed immediately.
+
+In addition, each NFSv4 server manages a persistent list of client
+leases. When the server restarts and clients attempt to recover
+their state, the server uses this list to distinguish amongst
+clients that held state before the server restarted and clients
+sending fresh OPEN and LOCK requests. This enables file locks to
+persist safely across server restarts.
+
+NFSv4 client identifiers
+------------------------
+
+Each NFSv4 client presents an identifier to NFSv4 servers so that
+they can associate the client with its lease. Each client's
+identifier consists of two elements:
+
+ - co_ownerid: An arbitrary but fixed string.
+
+ - boot verifier: A 64-bit incarnation verifier that enables a
+ server to distinguish successive boot epochs of the same client.
+
+The NFSv4.0 specification refers to these two items as an
+"nfs_client_id4". The NFSv4.1 specification refers to these two
+items as a "client_owner4".
+
+NFSv4 servers tie this identifier to the principal and security
+flavor that the client used when presenting it. Servers use this
+principal to authorize subsequent lease modification operations
+sent by the client. Effectively this principal is a third element of
+the identifier.
+
+As part of the identity presented to servers, a good
+"co_ownerid" string has several important properties:
+
+ - The "co_ownerid" string identifies the client during reboot
+ recovery, therefore the string is persistent across client
+ reboots.
+ - The "co_ownerid" string helps servers distinguish the client
+ from others, therefore the string is globally unique. Note
+ that there is no central authority that assigns "co_ownerid"
+ strings.
+ - Because it often appears on the network in the clear, the
+ "co_ownerid" string does not reveal private information about
+ the client itself.
+ - The content of the "co_ownerid" string is set and unchanging
+ before the client attempts NFSv4 mounts after a restart.
+ - The NFSv4 protocol places a 1024-byte limit on the size of the
+ "co_ownerid" string.
+
+Protecting NFSv4 lease state
+----------------------------
+
+NFSv4 servers utilize the "client_owner4" as described above to
+assign a unique lease to each client. Under this scheme, there are
+circumstances where clients can interfere with each other. This is
+referred to as "lease stealing".
+
+If distinct clients present the same "co_ownerid" string and use
+the same principal (for example, AUTH_SYS and UID 0), a server is
+unable to tell that the clients are not the same. Each distinct
+client presents a different boot verifier, so it appears to the
+server as if there is one client that is rebooting frequently.
+Neither client can maintain open or lock state in this scenario.
+
+If distinct clients present the same "co_ownerid" string and use
+distinct principals, the server is likely to allow the first client
+to operate normally but reject subsequent clients with the same
+"co_ownerid" string.
+
+If a client's "co_ownerid" string or principal are not stable,
+state recovery after a server or client reboot is not guaranteed.
+If a client unexpectedly restarts but presents a different
+"co_ownerid" string or principal to the server, the server orphans
+the client's previous open and lock state. This blocks access to
+locked files until the server removes the orphaned state.
+
+If the server restarts and a client presents a changed "co_ownerid"
+string or principal to the server, the server will not allow the
+client to reclaim its open and lock state, and may give those locks
+to other clients in the meantime. This is referred to as "lock
+stealing".
+
+Lease stealing and lock stealing increase the potential for denial
+of service and in rare cases even data corruption.
+
+Selecting an appropriate client identifier
+------------------------------------------
+
+By default, the Linux NFSv4 client implementation constructs its
+"co_ownerid" string starting with the words "Linux NFS" followed by
+the client's UTS node name (the same node name, incidentally, that
+is used as the "machine name" in an AUTH_SYS credential). In small
+deployments, this construction is usually adequate. Often, however,
+the node name by itself is not adequately unique, and can change
+unexpectedly. Problematic situations include:
+
+ - NFS-root (diskless) clients, where the local DCHP server (or
+ equivalent) does not provide a unique host name.
+
+ - "Containers" within a single Linux host. If each container has
+ a separate network namespace, but does not use the UTS namespace
+ to provide a unique host name, then there can be multiple NFS
+ client instances with the same host name.
+
+ - Clients across multiple administrative domains that access a
+ common NFS server. If hostnames are not assigned centrally
+ then uniqueness cannot be guaranteed unless a domain name is
+ included in the hostname.
+
+Linux provides two mechanisms to add uniqueness to its "co_ownerid"
+string:
+
+ nfs.nfs4_unique_id
+ This module parameter can set an arbitrary uniquifier string
+ via the kernel command line, or when the "nfs" module is
+ loaded.
+
+ /sys/fs/nfs/client/net/identifier
+ This virtual file, available since Linux 5.3, is local to the
+ network namespace in which it is accessed and so can provide
+ distinction between network namespaces (containers) when the
+ hostname remains uniform.
+
+Note that this file is empty on name-space creation. If the
+container system has access to some sort of per-container identity
+then that uniquifier can be used. For example, a uniquifier might
+be formed at boot using the container's internal identifier:
+
+ sha256sum /etc/machine-id | awk '{print $1}' \\
+ > /sys/fs/nfs/client/net/identifier
+
+Security considerations
+-----------------------
+
+The use of cryptographic security for lease management operations
+is strongly encouraged.
+
+If NFS with Kerberos is not configured, a Linux NFSv4 client uses
+AUTH_SYS and UID 0 as the principal part of its client identity.
+This configuration is not only insecure, it increases the risk of
+lease and lock stealing. However, it might be the only choice for
+client configurations that have no local persistent storage.
+"co_ownerid" string uniqueness and persistence is critical in this
+case.
+
+When a Kerberos keytab is present on a Linux NFS client, the client
+attempts to use one of the principals in that keytab when
+identifying itself to servers. The "sec=" mount option does not
+control this behavior. Alternately, a single-user client with a
+Kerberos principal can use that principal in place of the client's
+host principal.
+
+Using Kerberos for this purpose enables the client and server to
+use the same lease for operations covered by all "sec=" settings.
+Additionally, the Linux NFS client uses the RPCSEC_GSS security
+flavor with Kerberos and the integrity QOS to prevent in-transit
+modification of lease modification requests.
+
+Additional notes
+----------------
+The Linux NFSv4 client establishes a single lease on each NFSv4
+server it accesses. NFSv4 mounts from a Linux NFSv4 client of a
+particular server then share that lease.
+
+Once a client establishes open and lock state, the NFSv4 protocol
+enables lease state to transition to other servers, following data
+that has been migrated. This hides data migration completely from
+running applications. The Linux NFSv4 client facilitates state
+migration by presenting the same "client_owner4" to all servers it
+encounters.
+
+========
+See Also
+========
+
+ - nfs(5)
+ - kerberos(7)
+ - RFC 7530 for the NFSv4.0 specification
+ - RFC 8881 for the NFSv4.1 specification.
diff --git a/Documentation/filesystems/nfs/index.rst b/Documentation/filesystems/nfs/index.rst
index 288d8ddb2bc6..8536134f31fd 100644
--- a/Documentation/filesystems/nfs/index.rst
+++ b/Documentation/filesystems/nfs/index.rst
@@ -6,6 +6,8 @@ NFS
.. toctree::
:maxdepth: 1
+ client-identifier
+ exporting
pnfs
rpc-cache
rpc-server-gss
diff --git a/Documentation/firmware-guide/acpi/enumeration.rst b/Documentation/firmware-guide/acpi/enumeration.rst
index 6b62425ef9cd..dbb03022b127 100644
--- a/Documentation/firmware-guide/acpi/enumeration.rst
+++ b/Documentation/firmware-guide/acpi/enumeration.rst
@@ -389,6 +389,31 @@ descriptors once the device is released.
See Documentation/firmware-guide/acpi/gpio-properties.rst for more information
about the _DSD binding related to GPIOs.
+RS-485 support
+==============
+
+ACPI _DSD (Device Specific Data) can be used to describe RS-485 capability
+of UART.
+
+For example::
+
+ Device (DEV)
+ {
+ ...
+
+ // ACPI 5.1 _DSD used for RS-485 capabilities
+ Name (_DSD, Package ()
+ {
+ ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
+ Package ()
+ {
+ Package () {"rs485-rts-active-low", Zero},
+ Package () {"rs485-rx-active-high", Zero},
+ Package () {"rs485-rx-during-tx", Zero},
+ }
+ })
+ ...
+
MFD devices
===========
diff --git a/Documentation/fpga/dfl.rst b/Documentation/fpga/dfl.rst
index ef9eec71f6f3..15b670926084 100644
--- a/Documentation/fpga/dfl.rst
+++ b/Documentation/fpga/dfl.rst
@@ -502,6 +502,11 @@ Developer only needs to provide a sub feature driver with matched feature id.
FME Partial Reconfiguration Sub Feature driver (see drivers/fpga/dfl-fme-pr.c)
could be a reference.
+Please refer to below link to existing feature id table and guide for new feature
+ids application.
+https://github.com/OPAE/dfl-feature-id
+
+
Location of DFLs on a PCI Device
================================
The original method for finding a DFL on a PCI device assumed the start of the
diff --git a/Documentation/hte/hte.rst b/Documentation/hte/hte.rst
new file mode 100644
index 000000000000..153f3233c100
--- /dev/null
+++ b/Documentation/hte/hte.rst
@@ -0,0 +1,79 @@
+.. SPDX-License-Identifier: GPL-2.0+
+
+============================================
+The Linux Hardware Timestamping Engine (HTE)
+============================================
+
+:Author: Dipen Patel
+
+Introduction
+------------
+
+Certain devices have built in hardware timestamping engines which can
+monitor sets of system signals, lines, buses etc... in realtime for state
+change; upon detecting the change they can automatically store the timestamp at
+the moment of occurrence. Such functionality may help achieve better accuracy
+in obtaining timestamps than using software counterparts i.e. ktime and
+friends.
+
+This document describes the API that can be used by hardware timestamping
+engine provider and consumer drivers that want to use the hardware timestamping
+engine (HTE) framework. Both consumers and providers must include
+``#include <linux/hte.h>``.
+
+The HTE framework APIs for the providers
+----------------------------------------
+
+.. kernel-doc:: drivers/hte/hte.c
+ :functions: devm_hte_register_chip hte_push_ts_ns
+
+The HTE framework APIs for the consumers
+----------------------------------------
+
+.. kernel-doc:: drivers/hte/hte.c
+ :functions: hte_init_line_attr hte_ts_get hte_ts_put devm_hte_request_ts_ns hte_request_ts_ns hte_enable_ts hte_disable_ts of_hte_req_count hte_get_clk_src_info
+
+The HTE framework public structures
+-----------------------------------
+.. kernel-doc:: include/linux/hte.h
+
+More on the HTE timestamp data
+------------------------------
+The ``struct hte_ts_data`` is used to pass timestamp details between the
+consumers and the providers. It expresses timestamp data in nanoseconds in
+u64. An example of the typical timestamp data life cycle, for the GPIO line is
+as follows::
+
+ - Monitors GPIO line change.
+ - Detects the state change on GPIO line.
+ - Converts timestamps in nanoseconds.
+ - Stores GPIO raw level in raw_level variable if the provider has that
+ hardware capability.
+ - Pushes this hte_ts_data object to HTE subsystem.
+ - HTE subsystem increments seq counter and invokes consumer provided callback.
+ Based on callback return value, the HTE core invokes secondary callback in
+ the thread context.
+
+HTE subsystem debugfs attributes
+--------------------------------
+HTE subsystem creates debugfs attributes at ``/sys/kernel/debug/hte/``.
+It also creates line/signal-related debugfs attributes at
+``/sys/kernel/debug/hte/<provider>/<label or line id>/``. Note that these
+attributes are read-only.
+
+`ts_requested`
+ The total number of entities requested from the given provider,
+ where entity is specified by the provider and could represent
+ lines, GPIO, chip signals, buses etc...
+ The attribute will be available at
+ ``/sys/kernel/debug/hte/<provider>/``.
+
+`total_ts`
+ The total number of entities supported by the provider.
+ The attribute will be available at
+ ``/sys/kernel/debug/hte/<provider>/``.
+
+`dropped_timestamps`
+ The dropped timestamps for a given line.
+ The attribute will be available at
+ ``/sys/kernel/debug/hte/<provider>/<label or line id>/``.
diff --git a/Documentation/hte/index.rst b/Documentation/hte/index.rst
new file mode 100644
index 000000000000..9f43301c05dc
--- /dev/null
+++ b/Documentation/hte/index.rst
@@ -0,0 +1,22 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+============================================
+The Linux Hardware Timestamping Engine (HTE)
+============================================
+
+The HTE Subsystem
+=================
+
+.. toctree::
+ :maxdepth: 1
+
+ hte
+
+HTE Tegra Provider
+==================
+
+.. toctree::
+ :maxdepth: 1
+
+ tegra194-hte
+
diff --git a/Documentation/hte/tegra194-hte.rst b/Documentation/hte/tegra194-hte.rst
new file mode 100644
index 000000000000..41983e04d2a0
--- /dev/null
+++ b/Documentation/hte/tegra194-hte.rst
@@ -0,0 +1,49 @@
+.. SPDX-License-Identifier: GPL-2.0+
+
+HTE Kernel provider driver
+==========================
+
+Description
+-----------
+The Nvidia tegra194 HTE provider driver implements two GTE
+(Generic Timestamping Engine) instances: 1) GPIO GTE and 2) LIC
+(Legacy Interrupt Controller) IRQ GTE. Both GTE instances get the
+timestamp from the system counter TSC which has 31.25MHz clock rate, and the
+driver converts clock tick rate to nanoseconds before storing it as timestamp
+value.
+
+GPIO GTE
+--------
+
+This GTE instance timestamps GPIO in real time. For that to happen GPIO
+needs to be configured as input. The always on (AON) GPIO controller instance
+supports timestamping GPIOs in real time and it has 39 GPIO lines. The GPIO GTE
+and AON GPIO controller are tightly coupled as it requires very specific bits
+to be set in GPIO config register before GPIO GTE can be used, for that GPIOLIB
+adds two optional APIs as below. The GPIO GTE code supports both kernel
+and userspace consumers. The kernel space consumers can directly talk to HTE
+subsystem while userspace consumers timestamp requests go through GPIOLIB CDEV
+framework to HTE subsystem.
+
+.. kernel-doc:: drivers/gpio/gpiolib.c
+ :functions: gpiod_enable_hw_timestamp_ns gpiod_disable_hw_timestamp_ns
+
+For userspace consumers, GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE flag must be
+specified during IOCTL calls. Refer to ``tools/gpio/gpio-event-mon.c``, which
+returns the timestamp in nanoseconds.
+
+LIC (Legacy Interrupt Controller) IRQ GTE
+-----------------------------------------
+
+This GTE instance timestamps LIC IRQ lines in real time. There are 352 IRQ
+lines which this instance can add timestamps to in real time. The hte
+devicetree binding described at ``Documentation/devicetree/bindings/hte/``
+provides an example of how a consumer can request an IRQ line. Since it is a
+one-to-one mapping with IRQ GTE provider, consumers can simply specify the IRQ
+number that they are interested in. There is no userspace consumer support for
+this GTE instance in the HTE framework.
+
+The provider source code of both IRQ and GPIO GTE instances is located at
+``drivers/hte/hte-tegra194.c``. The test driver
+``drivers/hte/hte-tegra194-test.c`` demonstrates HTE API usage for both IRQ
+and GPIO GTE.
diff --git a/Documentation/i2c/writing-clients.rst b/Documentation/i2c/writing-clients.rst
index 978cc8210bf3..e3b126cf4a3b 100644
--- a/Documentation/i2c/writing-clients.rst
+++ b/Documentation/i2c/writing-clients.rst
@@ -46,7 +46,7 @@ driver model device node, and its I2C address.
},
.id_table = foo_idtable,
- .probe = foo_probe,
+ .probe_new = foo_probe,
.remove = foo_remove,
/* if device autodetection is needed: */
.class = I2C_CLASS_SOMETHING,
@@ -155,8 +155,7 @@ those devices, and a remove() method to unbind.
::
- static int foo_probe(struct i2c_client *client,
- const struct i2c_device_id *id);
+ static int foo_probe(struct i2c_client *client);
static int foo_remove(struct i2c_client *client);
Remember that the i2c_driver does not create those client handles. The
@@ -165,8 +164,12 @@ handle may be used during foo_probe(). If foo_probe() reports success
foo_remove() returns. That binding model is used by most Linux drivers.
The probe function is called when an entry in the id_table name field
-matches the device's name. It is passed the entry that was matched so
-the driver knows which one in the table matched.
+matches the device's name. If the probe function needs that entry, it
+can retrieve it using
+
+::
+
+ const struct i2c_device_id *id = i2c_match_id(foo_idtable, client);
Device Creation
diff --git a/Documentation/COPYING-logo b/Documentation/images/COPYING-logo
index b21c7cf7d9f6..6a441d453cb5 100644
--- a/Documentation/COPYING-logo
+++ b/Documentation/images/COPYING-logo
@@ -11,3 +11,11 @@ Larry's web-page:
https://www.isc.tamu.edu/~lewing/linux/
+The SVG version was re-illustrated in vector by Garrett LeSage and
+refined and cleaned up by IFo Hancroft. It is also freely usable
+as long as you acknowledge Larry, Garrett and IFo as above.
+
+There are also black-and-white and inverted vector versions at
+Garrett's repository:
+
+ https://github.com/garrett/Tux
diff --git a/Documentation/logo.gif b/Documentation/images/logo.gif
index 2eae75fecfb9..2eae75fecfb9 100644
--- a/Documentation/logo.gif
+++ b/Documentation/images/logo.gif
Binary files differ
diff --git a/Documentation/images/logo.svg b/Documentation/images/logo.svg
new file mode 100644
index 000000000000..58a6881c74b6
--- /dev/null
+++ b/Documentation/images/logo.svg
@@ -0,0 +1,2040 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<svg
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:xlink="http://www.w3.org/1999/xlink"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ inkscape:export-ydpi="300"
+ inkscape:export-xdpi="300"
+ inkscape:export-filename="./tux-large.png"
+ sodipodi:docname="tux.svg"
+ inkscape:version="1.1-dev (79c9cd5f54, 2020-05-04)"
+ version="1.1"
+ id="svg27450"
+ height="304.18802"
+ width="249.14903">
+ <title
+ id="title20046">Tux</title>
+ <sodipodi:namedview
+ showborder="true"
+ inkscape:showpageshadow="false"
+ inkscape:guide-bbox="true"
+ showguides="true"
+ fit-margin-bottom="0"
+ fit-margin-right="0"
+ fit-margin-left="0"
+ fit-margin-top="0"
+ inkscape:window-maximized="0"
+ inkscape:window-y="0"
+ inkscape:window-x="0"
+ inkscape:window-height="1080"
+ inkscape:window-width="1920"
+ showgrid="false"
+ inkscape:current-layer="layer2"
+ inkscape:document-units="px"
+ inkscape:cy="145.53575"
+ inkscape:cx="157.20051"
+ inkscape:zoom="2.8284271"
+ inkscape:pageshadow="2"
+ inkscape:pageopacity="0.0"
+ borderopacity="0.41176471"
+ bordercolor="#666666"
+ pagecolor="#ffffff"
+ id="base" />
+ <defs
+ id="defs27452">
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect826"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect408"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect396"
+ effect="spiro" />
+ <linearGradient
+ id="linearGradient28799-4">
+ <stop
+ style="stop-color:#fefefc;stop-opacity:1;"
+ offset="0"
+ id="stop28801-13" />
+ <stop
+ id="stop28807-8"
+ offset="0.75733864"
+ style="stop-color:#fefefc;stop-opacity:1" />
+ <stop
+ style="stop-color:#d4d4d4;stop-opacity:1"
+ offset="1"
+ id="stop28803-8" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient28799-5-4">
+ <stop
+ style="stop-color:#fefefc;stop-opacity:1;"
+ offset="0"
+ id="stop28801-1-2" />
+ <stop
+ id="stop28807-2-5"
+ offset="0.75733864"
+ style="stop-color:#fefefc;stop-opacity:1" />
+ <stop
+ style="stop-color:#d4d4d4;stop-opacity:1"
+ offset="1"
+ id="stop28803-6-6" />
+ </linearGradient>
+ <clipPath
+ id="clipPath14186-9"
+ clipPathUnits="userSpaceOnUse">
+ <path
+ style="fill:#402c07;fill-opacity:1;stroke:none"
+ d="m 137.57703,281.0191 c 1.59929,-0.66295 3.3982,-0.78361 5.10074,-0.46963 1.70253,0.31398 3.31141,1.04948 4.74342,2.02239 2.86402,1.94583 4.98821,4.77774 7.02263,7.57952 4.67189,6.43406 9.16868,13.00227 13.24488,19.8293 3.30635,5.53766 6.34352,11.25685 10.16415,16.45304 2.49398,3.3919 5.3066,6.53947 7.813,9.92221 2.50639,3.38273 4.72794,7.05586 5.83931,11.11662 1.44411,5.27653 0.88463,11.09291 -1.62666,15.95302 -1.76663,3.41896 -4.47646,6.35228 -7.77242,8.33898 -3.29595,1.9867 -7.17064,3.01444 -11.01635,2.87021 -6.11413,-0.2293 -11.69944,-3.28515 -17.38362,-5.54906 -11.58097,-4.6125 -24.15978,-6.0594 -36.09666,-9.65174 -3.66859,-1.10404 -7.27582,-2.4107 -10.96988,-3.42629 -1.64125,-0.45122 -3.30866,-0.8482 -4.85875,-1.55144 -1.55008,-0.70325 -2.999548,-1.7491 -3.86171,-3.21675 -0.666391,-1.13439 -0.948386,-2.47002 -0.930187,-3.78554 0.0182,-1.31552 0.325889,-2.61453 0.773815,-3.85158 0.895851,-2.47409 2.343262,-4.71374 3.320162,-7.15696 1.59511,-3.98935 1.88169,-8.38839 1.66657,-12.67942 -0.21511,-4.29103 -0.91078,-8.54478 -1.20454,-12.83115 -0.13118,-1.91406 -0.18066,-3.85256 0.18479,-5.73598 0.36545,-1.88343 1.17577,-3.72459 2.55771,-5.05541 1.27406,-1.22693 2.96492,-1.95531 4.69643,-2.31651 1.73151,-0.3612 3.51533,-0.37747 5.28367,-0.33762 1.76833,0.0399 3.54067,0.13425 5.30351,-0.0106 1.76284,-0.14488 3.53347,-0.54055 5.06911,-1.41828 1.45996,-0.83447 2.65433,-2.0745 3.64374,-3.43424 0.9894,-1.35974 1.78909,-2.84573 2.60891,-4.31396 0.81983,-1.46823 1.66834,-2.93151 2.74157,-4.22611 1.07324,-1.2946 2.38923,-2.42304 3.94266,-3.06698"
+ id="path14188-1"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="ssssssssssssssss" />
+ </clipPath>
+ <linearGradient
+ id="linearGradient14132-6"
+ inkscape:collect="always">
+ <stop
+ id="stop14134-2"
+ offset="0"
+ style="stop-color:#b98309;stop-opacity:1" />
+ <stop
+ id="stop14136-2"
+ offset="1"
+ style="stop-color:#382605;stop-opacity:1" />
+ </linearGradient>
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect4669-1"
+ is_visible="true" />
+ <filter
+ style="color-interpolation-filters:sRGB"
+ height="1.2169911"
+ y="-0.10849553"
+ width="1.215018"
+ x="-0.10750898"
+ id="filter14148-8"
+ inkscape:collect="always">
+ <feGaussianBlur
+ id="feGaussianBlur14150-8"
+ stdDeviation="3.9237191"
+ inkscape:collect="always" />
+ </filter>
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect4669-4-3"
+ is_visible="true" />
+ <filter
+ style="color-interpolation-filters:sRGB"
+ id="filter14140-3"
+ inkscape:collect="always">
+ <feGaussianBlur
+ id="feGaussianBlur14142-5"
+ stdDeviation="2.4365744"
+ inkscape:collect="always" />
+ </filter>
+ <linearGradient
+ id="linearGradient14168-5"
+ inkscape:collect="always">
+ <stop
+ id="stop14170-8"
+ offset="0"
+ style="stop-color:#ebc40c;stop-opacity:1;" />
+ <stop
+ id="stop14172-0"
+ offset="1"
+ style="stop-color:#ebc40c;stop-opacity:0;" />
+ </linearGradient>
+ <filter
+ style="color-interpolation-filters:sRGB"
+ id="filter14176-5"
+ inkscape:collect="always">
+ <feGaussianBlur
+ id="feGaussianBlur14178-0"
+ stdDeviation="0.3702557"
+ inkscape:collect="always" />
+ </filter>
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect14972-2-2"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect15017-9"
+ effect="spiro" />
+ <filter
+ style="color-interpolation-filters:sRGB"
+ height="1.0853395"
+ y="-0.042669769"
+ width="1.2020705"
+ x="-0.10103524"
+ id="filter15053-7"
+ inkscape:collect="always">
+ <feGaussianBlur
+ id="feGaussianBlur15055-3"
+ stdDeviation="1.1322032"
+ inkscape:collect="always" />
+ </filter>
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect29716-6-1"
+ is_visible="true" />
+ <linearGradient
+ inkscape:collect="always"
+ id="linearGradient14830-4">
+ <stop
+ style="stop-color:#7c7c7c;stop-opacity:1;"
+ offset="0"
+ id="stop14832-6" />
+ <stop
+ style="stop-color:#7c7c7c;stop-opacity:0.32941177"
+ offset="1"
+ id="stop14834-5" />
+ </linearGradient>
+ <filter
+ style="color-interpolation-filters:sRGB"
+ height="1.3012044"
+ y="-0.15060219"
+ width="1.1409113"
+ x="-0.070455663"
+ id="filter14812-5"
+ inkscape:collect="always">
+ <feGaussianBlur
+ id="feGaussianBlur14814-2"
+ stdDeviation="1.0252435"
+ inkscape:collect="always" />
+ </filter>
+ <linearGradient
+ inkscape:collect="always"
+ id="linearGradient14830-8-1">
+ <stop
+ style="stop-color:#7c7c7c;stop-opacity:1;"
+ offset="0"
+ id="stop14832-2-5" />
+ <stop
+ style="stop-color:#7c7c7c;stop-opacity:0.32941177"
+ offset="1"
+ id="stop14834-8-0" />
+ </linearGradient>
+ <filter
+ style="color-interpolation-filters:sRGB"
+ height="1.3012044"
+ y="-0.15060219"
+ width="1.1409113"
+ x="-0.070455663"
+ id="filter14812-0-9"
+ inkscape:collect="always">
+ <feGaussianBlur
+ id="feGaussianBlur14814-6-7"
+ stdDeviation="1.0252435"
+ inkscape:collect="always" />
+ </filter>
+ <linearGradient
+ id="linearGradient14518-6">
+ <stop
+ style="stop-color:#110800;stop-opacity:1;"
+ offset="0"
+ id="stop14540-0" />
+ <stop
+ id="stop14542-5"
+ offset="0.59066743"
+ style="stop-color:#a65a00;stop-opacity:0.80000001;" />
+ <stop
+ id="stop14522-4"
+ offset="1"
+ style="stop-color:#ff921e;stop-opacity:0;" />
+ </linearGradient>
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect29716-0-0"
+ is_visible="true" />
+ <filter
+ style="color-interpolation-filters:sRGB"
+ height="1.4681805"
+ y="-0.23409025"
+ width="1.4010103"
+ x="-0.20050517"
+ id="filter14897-2"
+ inkscape:collect="always">
+ <feGaussianBlur
+ id="feGaussianBlur14899-5"
+ stdDeviation="2.8444356"
+ inkscape:collect="always" />
+ </filter>
+ <linearGradient
+ id="linearGradient14518-2-2">
+ <stop
+ style="stop-color:#110800;stop-opacity:1;"
+ offset="0"
+ id="stop14540-5-0" />
+ <stop
+ id="stop14542-2-7"
+ offset="0.59066743"
+ style="stop-color:#a65a00;stop-opacity:0.80000001;" />
+ <stop
+ id="stop14522-2-1"
+ offset="1"
+ style="stop-color:#ff921e;stop-opacity:0;" />
+ </linearGradient>
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect29716-0-8-3"
+ is_visible="true" />
+ <filter
+ style="color-interpolation-filters:sRGB"
+ height="1.2134444"
+ y="-0.10672215"
+ width="1.1828213"
+ x="-0.091410659"
+ id="filter14951-8"
+ inkscape:collect="always">
+ <feGaussianBlur
+ id="feGaussianBlur14953-7"
+ stdDeviation="1.2967831"
+ inkscape:collect="always" />
+ </filter>
+ <filter
+ style="color-interpolation-filters:sRGB"
+ height="1.5474488"
+ y="-0.27372435"
+ width="1.9386586"
+ x="-0.46932927"
+ id="filter15211-9"
+ inkscape:collect="always">
+ <feGaussianBlur
+ id="feGaussianBlur15213-7"
+ stdDeviation="0.85991809"
+ inkscape:collect="always" />
+ </filter>
+ <filter
+ style="color-interpolation-filters:sRGB"
+ height="1.9645272"
+ y="-0.48226368"
+ width="2.2077935"
+ x="-0.60389674"
+ id="filter14706-3"
+ inkscape:collect="always">
+ <feGaussianBlur
+ id="feGaussianBlur14708-8"
+ stdDeviation="2.6400036"
+ inkscape:collect="always" />
+ </filter>
+ <linearGradient
+ id="linearGradient15103-0"
+ inkscape:collect="always">
+ <stop
+ id="stop15105-1"
+ offset="0"
+ style="stop-color:#000000;stop-opacity:1;" />
+ <stop
+ id="stop15107-6"
+ offset="1"
+ style="stop-color:#000000;stop-opacity:0;" />
+ </linearGradient>
+ <filter
+ style="color-interpolation-filters:sRGB"
+ id="filter15115-3"
+ inkscape:collect="always">
+ <feGaussianBlur
+ id="feGaussianBlur15117-8"
+ stdDeviation="2.2091576"
+ inkscape:collect="always" />
+ </filter>
+ <linearGradient
+ id="linearGradient14392-8"
+ inkscape:collect="always">
+ <stop
+ id="stop14394-3"
+ offset="0"
+ style="stop-color:#3e2a06;stop-opacity:1" />
+ <stop
+ id="stop14396-0"
+ offset="1"
+ style="stop-color:#ad780a;stop-opacity:1" />
+ </linearGradient>
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect14300-7"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect14300-2-7"
+ effect="spiro" />
+ <filter
+ style="color-interpolation-filters:sRGB"
+ height="1.2859976"
+ y="-0.14299878"
+ width="1.2900307"
+ x="-0.14501533"
+ id="filter14416-8"
+ inkscape:collect="always">
+ <feGaussianBlur
+ id="feGaussianBlur14418-5"
+ stdDeviation="4.7787162"
+ inkscape:collect="always" />
+ </filter>
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect14300-2-2-0"
+ effect="spiro" />
+ <filter
+ style="color-interpolation-filters:sRGB"
+ height="1.2498901"
+ y="-0.12494507"
+ width="1.2305853"
+ x="-0.11529266"
+ id="filter14432-2"
+ inkscape:collect="always">
+ <feGaussianBlur
+ id="feGaussianBlur14434-2"
+ stdDeviation="3.1725155"
+ inkscape:collect="always" />
+ </filter>
+ <linearGradient
+ id="linearGradient17009"
+ inkscape:collect="always">
+ <stop
+ id="stop17011"
+ offset="0"
+ style="stop-color:#f3cd0c;stop-opacity:1;" />
+ <stop
+ id="stop17013"
+ offset="1"
+ style="stop-color:#f3cd0c;stop-opacity:0;" />
+ </linearGradient>
+ <filter
+ style="color-interpolation-filters:sRGB"
+ height="1.2585374"
+ y="-0.12926871"
+ width="1.0418237"
+ x="-0.020911871"
+ id="filter17044"
+ inkscape:collect="always">
+ <feGaussianBlur
+ id="feGaussianBlur17046"
+ stdDeviation="0.47946431"
+ inkscape:collect="always" />
+ </filter>
+ <filter
+ style="color-interpolation-filters:sRGB"
+ height="1.3188565"
+ y="-0.15942827"
+ width="1.3550917"
+ x="-0.17754583"
+ id="filter15185-2"
+ inkscape:collect="always">
+ <feGaussianBlur
+ id="feGaussianBlur15187-7"
+ stdDeviation="0.90083196"
+ inkscape:collect="always" />
+ </filter>
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect4582-3"
+ is_visible="true" />
+ <filter
+ style="color-interpolation-filters:sRGB"
+ inkscape:collect="always"
+ id="filter4592-2"
+ x="-0.81659585"
+ width="2.6331918"
+ y="-0.057823781"
+ height="1.1156476">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="1.4463082"
+ id="feGaussianBlur4594-6" />
+ </filter>
+ <linearGradient
+ inkscape:collect="always"
+ id="linearGradient4417-8-5">
+ <stop
+ style="stop-color:#000000;stop-opacity:1;"
+ offset="0"
+ id="stop4419-4-4" />
+ <stop
+ style="stop-color:#000000;stop-opacity:0.24886878"
+ offset="1"
+ id="stop4421-2-0" />
+ </linearGradient>
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect4500-7"
+ is_visible="true" />
+ <filter
+ style="color-interpolation-filters:sRGB"
+ inkscape:collect="always"
+ id="filter4538-7">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="0.7854602"
+ id="feGaussianBlur4540-9" />
+ </filter>
+ <filter
+ style="color-interpolation-filters:sRGB"
+ inkscape:collect="always"
+ id="filter4427-5-7">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="1.1383167"
+ id="feGaussianBlur4429-4-8" />
+ </filter>
+ <linearGradient
+ inkscape:collect="always"
+ id="linearGradient4417-0">
+ <stop
+ style="stop-color:#000000;stop-opacity:1;"
+ offset="0"
+ id="stop4419-8" />
+ <stop
+ style="stop-color:#000000;stop-opacity:0.24886878"
+ offset="1"
+ id="stop4421-8" />
+ </linearGradient>
+ <filter
+ style="color-interpolation-filters:sRGB"
+ inkscape:collect="always"
+ id="filter4487-6">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="0.6434074"
+ id="feGaussianBlur4489-4" />
+ </filter>
+ <filter
+ style="color-interpolation-filters:sRGB"
+ height="1.0800927"
+ y="-0.040046327"
+ width="1.2350631"
+ x="-0.11753157"
+ id="filter14666-9"
+ inkscape:collect="always">
+ <feGaussianBlur
+ id="feGaussianBlur14668-7"
+ stdDeviation="1.352085"
+ inkscape:collect="always" />
+ </filter>
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect29707-7-6"
+ is_visible="true" />
+ <linearGradient
+ id="linearGradient29652-2"
+ inkscape:collect="always">
+ <stop
+ id="stop29654-6"
+ offset="0"
+ style="stop-color:#c8c8c8;stop-opacity:1;" />
+ <stop
+ id="stop29656-1"
+ offset="1"
+ style="stop-color:#797978;stop-opacity:1" />
+ </linearGradient>
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect29553-7"
+ is_visible="true" />
+ <linearGradient
+ id="linearGradient28469-0">
+ <stop
+ id="stop28479-4"
+ offset="0"
+ style="stop-color:#d2940a;stop-opacity:1" />
+ <stop
+ id="stop28477-2"
+ offset="0.75143719"
+ style="stop-color:#d89c08;stop-opacity:1" />
+ <stop
+ style="stop-color:#b67e07;stop-opacity:1;"
+ offset="0.86579126"
+ id="stop28485-7" />
+ <stop
+ style="stop-color:#946106;stop-opacity:1"
+ offset="1"
+ id="stop28473-1" />
+ </linearGradient>
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect28463-4"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect28489-4"
+ is_visible="true" />
+ <filter
+ style="color-interpolation-filters:sRGB"
+ inkscape:collect="always"
+ id="filter28502-8"
+ x="-0.1548306"
+ width="1.3096611"
+ y="-0.21494099"
+ height="1.429882">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="0.89858666"
+ id="feGaussianBlur28504-3" />
+ </filter>
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect28273-54-7"
+ is_visible="true" />
+ <filter
+ style="color-interpolation-filters:sRGB"
+ id="filter15145-6"
+ inkscape:collect="always">
+ <feGaussianBlur
+ id="feGaussianBlur15147-1"
+ stdDeviation="0.75821369"
+ inkscape:collect="always" />
+ </filter>
+ <linearGradient
+ id="linearGradient28275-8">
+ <stop
+ style="stop-color:#ad780a;stop-opacity:1"
+ offset="0"
+ id="stop28277-1" />
+ <stop
+ id="stop28291-9"
+ offset="0.11972899"
+ style="stop-color:#d89e08;stop-opacity:1" />
+ <stop
+ id="stop28289-6"
+ offset="0.25514477"
+ style="stop-color:#edb80b;stop-opacity:1" />
+ <stop
+ id="stop28287-6"
+ offset="0.39194193"
+ style="stop-color:#ebc80d;stop-opacity:1" />
+ <stop
+ id="stop28285-7"
+ offset="0.52741116"
+ style="stop-color:#f5d838;stop-opacity:1" />
+ <stop
+ id="stop28283-7"
+ offset="0.76906693"
+ style="stop-color:#f6d811;stop-opacity:1" />
+ <stop
+ style="stop-color:#f5cd31;stop-opacity:1"
+ offset="1"
+ id="stop28279-5" />
+ </linearGradient>
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect28273-44"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect28384-7"
+ is_visible="true" />
+ <filter
+ style="color-interpolation-filters:sRGB"
+ height="1.260552"
+ y="-0.13027605"
+ width="1.3219118"
+ x="-0.16095592"
+ id="filter14963-7"
+ inkscape:collect="always">
+ <feGaussianBlur
+ id="feGaussianBlur14965-0"
+ stdDeviation="0.84878819"
+ inkscape:collect="always" />
+ </filter>
+ <linearGradient
+ id="linearGradient29529-6">
+ <stop
+ style="stop-color:#3a2903;stop-opacity:1;"
+ offset="0"
+ id="stop29531-0" />
+ <stop
+ id="stop29539-7"
+ offset="0.55472803"
+ style="stop-color:#735208;stop-opacity:1" />
+ <stop
+ style="stop-color:#ac8c04;stop-opacity:1"
+ offset="1"
+ id="stop29533-8" />
+ </linearGradient>
+ <filter
+ style="color-interpolation-filters:sRGB"
+ id="filter15177-1"
+ inkscape:collect="always">
+ <feGaussianBlur
+ id="feGaussianBlur15179-3"
+ stdDeviation="0.11039302"
+ inkscape:collect="always" />
+ </filter>
+ <filter
+ style="color-interpolation-filters:sRGB"
+ height="1.2114592"
+ y="-0.10572958"
+ width="1.2328929"
+ x="-0.11644644"
+ id="filter15173-0"
+ inkscape:collect="always">
+ <feGaussianBlur
+ id="feGaussianBlur15175-4"
+ stdDeviation="0.11039302"
+ inkscape:collect="always" />
+ </filter>
+ <linearGradient
+ inkscape:collect="always"
+ id="linearGradient28572-7">
+ <stop
+ style="stop-color:#f5ce2d;stop-opacity:1;"
+ offset="0"
+ id="stop28574-5" />
+ <stop
+ style="stop-color:#d79b08;stop-opacity:1"
+ offset="1"
+ id="stop28576-0" />
+ </linearGradient>
+ <filter
+ style="color-interpolation-filters:sRGB"
+ inkscape:collect="always"
+ id="filter28584-4"
+ x="-0.10730159"
+ width="1.2146032"
+ y="-0.13610739"
+ height="1.2722148">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="0.2640625"
+ id="feGaussianBlur28586-3" />
+ </filter>
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect29455-3"
+ is_visible="true" />
+ <linearGradient
+ id="linearGradient29477-3">
+ <stop
+ style="stop-color:#757574;stop-opacity:0"
+ offset="0"
+ id="stop29479-5" />
+ <stop
+ id="stop29487-5"
+ offset="0.26291031"
+ style="stop-color:#757574;stop-opacity:1;" />
+ <stop
+ id="stop29485-0"
+ offset="0.5"
+ style="stop-color:#757574;stop-opacity:1;" />
+ <stop
+ style="stop-color:#757574;stop-opacity:0"
+ offset="1"
+ id="stop29481-3" />
+ </linearGradient>
+ <filter
+ style="color-interpolation-filters:sRGB"
+ inkscape:collect="always"
+ id="filter29493-2"
+ x="-0.23446631"
+ width="1.4689326"
+ y="-0.14606842"
+ height="1.2921369">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="0.51262416"
+ id="feGaussianBlur29495-7" />
+ </filter>
+ <linearGradient
+ id="linearGradient29336-6">
+ <stop
+ style="stop-color:#646464;stop-opacity:0"
+ offset="0"
+ id="stop29338-1" />
+ <stop
+ id="stop29344-7"
+ offset="0.30628255"
+ style="stop-color:#646464;stop-opacity:0.5825243" />
+ <stop
+ style="stop-color:#646464;stop-opacity:1"
+ offset="0.47000006"
+ id="stop29354-5" />
+ <stop
+ id="stop29356-3"
+ offset="0.72834015"
+ style="stop-color:#646464;stop-opacity:0.25728154" />
+ <stop
+ style="stop-color:#646464;stop-opacity:0;"
+ offset="1"
+ id="stop29340-6" />
+ </linearGradient>
+ <filter
+ style="color-interpolation-filters:sRGB"
+ inkscape:collect="always"
+ id="filter29447-1">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="0.13475369"
+ id="feGaussianBlur29449-4" />
+ </filter>
+ <filter
+ style="color-interpolation-filters:sRGB"
+ inkscape:collect="always"
+ id="filter29350-1">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="0.1475"
+ id="feGaussianBlur29352-7" />
+ </filter>
+ <linearGradient
+ id="linearGradient28976-3">
+ <stop
+ style="stop-color:#747474;stop-opacity:1"
+ offset="0"
+ id="stop28978-7" />
+ <stop
+ id="stop29259-5"
+ offset="0.125"
+ style="stop-color:#8c8c8c;stop-opacity:1;" />
+ <stop
+ id="stop29257-0"
+ offset="0.25"
+ style="stop-color:#a4a4a4;stop-opacity:1;" />
+ <stop
+ id="stop28984-4"
+ offset="0.5"
+ style="stop-color:#d4d4d4;stop-opacity:1" />
+ <stop
+ style="stop-color:#d4d4d4;stop-opacity:1"
+ offset="0.61919296"
+ id="stop28986-0" />
+ <stop
+ style="stop-color:#7c7c7c;stop-opacity:1"
+ offset="1"
+ id="stop28980-2" />
+ </linearGradient>
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect28974-8"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect28881-6"
+ is_visible="true" />
+ <filter
+ style="color-interpolation-filters:sRGB"
+ inkscape:collect="always"
+ id="filter28927-8"
+ x="-0.15795375"
+ width="1.3159075"
+ y="-0.2091987"
+ height="1.4183974">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="0.24891089"
+ id="feGaussianBlur28929-2" />
+ </filter>
+ <linearGradient
+ id="linearGradient28935-8">
+ <stop
+ style="stop-color:#949494;stop-opacity:0.39215687;"
+ offset="0"
+ id="stop28937-7" />
+ <stop
+ id="stop28943-3"
+ offset="0.5"
+ style="stop-color:#949494;stop-opacity:1;" />
+ <stop
+ style="stop-color:#949494;stop-opacity:0.39215687;"
+ offset="1"
+ id="stop28939-8" />
+ </linearGradient>
+ <filter
+ style="color-interpolation-filters:sRGB"
+ inkscape:collect="always"
+ id="filter28949-8"
+ x="-0.17867894"
+ width="1.3573579"
+ y="-0.18134074"
+ height="1.3626815">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="0.51947927"
+ id="feGaussianBlur28951-1" />
+ </filter>
+ <filter
+ style="color-interpolation-filters:sRGB"
+ height="1.6165009"
+ y="-0.30825046"
+ width="1.5843594"
+ x="-0.2921797"
+ id="filter15133-1"
+ inkscape:collect="always">
+ <feGaussianBlur
+ id="feGaussianBlur15135-5"
+ stdDeviation="1.7403319"
+ inkscape:collect="always" />
+ </filter>
+ <linearGradient
+ id="linearGradient28853-5">
+ <stop
+ style="stop-color:#020204;stop-opacity:1"
+ offset="0"
+ id="stop28855-3" />
+ <stop
+ id="stop28865-0"
+ offset="0.73448181"
+ style="stop-color:#020204;stop-opacity:1" />
+ <stop
+ style="stop-color:#5c5c5c;stop-opacity:1;"
+ offset="1"
+ id="stop28857-9" />
+ </linearGradient>
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect28851-0"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect28273-4-2"
+ is_visible="true" />
+ <linearGradient
+ id="linearGradient28799-3">
+ <stop
+ style="stop-color:#fefefc;stop-opacity:1;"
+ offset="0"
+ id="stop28801-6" />
+ <stop
+ id="stop28807-7"
+ offset="0.75733864"
+ style="stop-color:#fefefc;stop-opacity:1" />
+ <stop
+ style="stop-color:#d4d4d4;stop-opacity:1"
+ offset="1"
+ id="stop28803-0" />
+ </linearGradient>
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect28797-81"
+ is_visible="true" />
+ <linearGradient
+ id="linearGradient28799-5-5">
+ <stop
+ style="stop-color:#fefefc;stop-opacity:1;"
+ offset="0"
+ id="stop28801-1-9" />
+ <stop
+ id="stop28807-2-6"
+ offset="0.75733864"
+ style="stop-color:#fefefc;stop-opacity:1" />
+ <stop
+ style="stop-color:#d4d4d4;stop-opacity:1"
+ offset="1"
+ id="stop28803-6-3" />
+ </linearGradient>
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect28797-8-1"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect28463-0-7"
+ is_visible="true" />
+ <filter
+ style="color-interpolation-filters:sRGB"
+ inkscape:collect="always"
+ id="filter30475-4">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="0.93152507"
+ id="feGaussianBlur30477-9" />
+ </filter>
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect28463-8-0"
+ is_visible="true" />
+ <filter
+ style="color-interpolation-filters:sRGB"
+ inkscape:collect="always"
+ id="filter30479-2"
+ x="-0.07637769"
+ width="1.1527554"
+ y="-0.12919053"
+ height="1.2583811">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="2.0355046"
+ id="feGaussianBlur30481-9" />
+ </filter>
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect29721-2"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect28714-5"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect29678-4-6"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect29678-1"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect29672-0"
+ is_visible="true" />
+ <radialGradient
+ r="14.572236"
+ fy="137.66095"
+ fx="223.19559"
+ cy="137.66095"
+ cx="223.19559"
+ gradientTransform="matrix(0.81524244,-0.03431182,0.02961133,1.2479887,-208.43744,-35.542647)"
+ gradientUnits="userSpaceOnUse"
+ id="radialGradient18806"
+ xlink:href="#linearGradient28799-5-5"
+ inkscape:collect="always" />
+ <radialGradient
+ r="14.572236"
+ fy="137.66095"
+ fx="223.19559"
+ cy="137.66095"
+ cx="223.19559"
+ gradientTransform="matrix(1.0857794,-0.03431182,0.03943781,1.2479887,-233.54194,-35.542647)"
+ gradientUnits="userSpaceOnUse"
+ id="radialGradient18808"
+ xlink:href="#linearGradient28799-3"
+ inkscape:collect="always" />
+ <radialGradient
+ r="15.382211"
+ fy="150.65126"
+ fx="275.53763"
+ cy="150.65126"
+ cx="275.53763"
+ gradientTransform="matrix(0.69784558,-0.50717348,0.46034105,0.63340638,-271.10191,183.03011)"
+ gradientUnits="userSpaceOnUse"
+ id="radialGradient18810"
+ xlink:href="#linearGradient28853-5"
+ inkscape:collect="always" />
+ <linearGradient
+ y2="140.72476"
+ x2="219.73343"
+ y1="132.76981"
+ x1="213.01591"
+ gradientTransform="translate(-60.00015,-58.362183)"
+ gradientUnits="userSpaceOnUse"
+ id="linearGradient18812"
+ xlink:href="#linearGradient28935-8"
+ inkscape:collect="always" />
+ <linearGradient
+ y2="132.48718"
+ x2="358.625"
+ y1="119.98718"
+ x1="337.25"
+ gradientTransform="translate(-250)"
+ gradientUnits="userSpaceOnUse"
+ id="linearGradient18814"
+ xlink:href="#linearGradient28976-3"
+ inkscape:collect="always" />
+ <linearGradient
+ y2="127.99684"
+ x2="308.74051"
+ y1="114.56181"
+ x1="294.50998"
+ gradientTransform="translate(-300.00015,-0.9999998)"
+ gradientUnits="userSpaceOnUse"
+ id="linearGradient18816"
+ xlink:href="#linearGradient29336-6"
+ inkscape:collect="always" />
+ <linearGradient
+ y2="128.57106"
+ x2="266.62701"
+ y1="115.66637"
+ x1="253.22745"
+ gradientTransform="translate(-300.00015,-0.9999998)"
+ gradientUnits="userSpaceOnUse"
+ id="linearGradient18818"
+ xlink:href="#linearGradient29336-6"
+ inkscape:collect="always" />
+ <linearGradient
+ gradientTransform="translate(-50.00015,-58.362183)"
+ y2="142.49252"
+ x2="169.8824"
+ y1="132.06271"
+ x1="164.04878"
+ gradientUnits="userSpaceOnUse"
+ id="linearGradient18820"
+ xlink:href="#linearGradient29477-3"
+ inkscape:collect="always" />
+ <radialGradient
+ r="31.111488"
+ fy="193.09949"
+ fx="294.48483"
+ cy="193.09949"
+ cx="294.48483"
+ gradientTransform="matrix(0.93618683,-0.38640412,0.27133164,0.65738721,-244.47527,146.7229)"
+ gradientUnits="userSpaceOnUse"
+ id="radialGradient18822"
+ xlink:href="#linearGradient28469-0"
+ inkscape:collect="always" />
+ <linearGradient
+ y2="157.8721"
+ x2="313.3367"
+ y1="158.31404"
+ x1="256.85657"
+ gradientTransform="translate(-210)"
+ gradientUnits="userSpaceOnUse"
+ id="linearGradient18824"
+ xlink:href="#linearGradient28275-8"
+ inkscape:collect="always" />
+ <radialGradient
+ r="3.2300935"
+ fy="147.09335"
+ fx="77.67215"
+ cy="147.09335"
+ cx="77.67215"
+ gradientTransform="matrix(1.0000004,0,0,0.5833264,59.999805,3.054009)"
+ gradientUnits="userSpaceOnUse"
+ id="radialGradient18826"
+ xlink:href="#linearGradient29529-6"
+ inkscape:collect="always" />
+ <radialGradient
+ r="1.5350333"
+ fy="147.44128"
+ fx="63.125401"
+ cy="147.44128"
+ cx="63.125401"
+ gradientTransform="matrix(1,0,0,1.0751189,59.99984,-69.456344)"
+ gradientUnits="userSpaceOnUse"
+ id="radialGradient18828"
+ xlink:href="#linearGradient29529-6"
+ inkscape:collect="always" />
+ <linearGradient
+ y2="159.76843"
+ x2="243.46875"
+ y1="157.01843"
+ x1="243.03125"
+ gradientUnits="userSpaceOnUse"
+ id="linearGradient18830"
+ xlink:href="#linearGradient28572-7"
+ inkscape:collect="always" />
+ <radialGradient
+ r="35.51144"
+ fy="126.53491"
+ fx="268.06998"
+ cy="126.53491"
+ cx="268.06998"
+ gradientTransform="matrix(0.20141143,-0.03316079,0.03065006,0.18616184,-3.1263574,114.03586)"
+ gradientUnits="userSpaceOnUse"
+ id="radialGradient18832"
+ xlink:href="#linearGradient29652-2"
+ inkscape:collect="always" />
+ <radialGradient
+ r="27.391165"
+ fy="220.53755"
+ fx="336.22372"
+ cy="220.53755"
+ cx="336.22372"
+ gradientTransform="matrix(-0.69844216,0,0,0.76335815,166.3057,50.219935)"
+ gradientUnits="userSpaceOnUse"
+ id="radialGradient18834"
+ xlink:href="#linearGradient4417-0"
+ inkscape:collect="always" />
+ <radialGradient
+ r="27.391165"
+ fy="236.36569"
+ fx="312.14502"
+ cy="236.36569"
+ cx="312.14502"
+ gradientTransform="matrix(1,0,0,0.76335815,-150.00015,-8.142243)"
+ gradientUnits="userSpaceOnUse"
+ id="radialGradient18836"
+ xlink:href="#linearGradient4417-8-5"
+ inkscape:collect="always" />
+ <radialGradient
+ r="10.84542"
+ fy="225.13487"
+ fx="275.55389"
+ cy="225.13487"
+ cx="275.55389"
+ gradientTransform="matrix(1,0,0,1.0692348,-150.00016,-73.222483)"
+ gradientUnits="userSpaceOnUse"
+ id="radialGradient18838"
+ xlink:href="#linearGradient4417-8-5"
+ inkscape:collect="always" />
+ <linearGradient
+ y2="351.48654"
+ x2="341.98224"
+ y1="323.90076"
+ x1="338.28552"
+ gradientTransform="translate(-310)"
+ gradientUnits="userSpaceOnUse"
+ id="linearGradient18840"
+ xlink:href="#linearGradient15103-0"
+ inkscape:collect="always" />
+ <linearGradient
+ gradientTransform="translate(-250.00016,-58.362183)"
+ y2="293.58548"
+ x2="490.12241"
+ y1="371.54401"
+ x1="442.03912"
+ gradientUnits="userSpaceOnUse"
+ id="linearGradient18842"
+ xlink:href="#linearGradient14392-8"
+ inkscape:collect="always" />
+ <linearGradient
+ y2="302.31699"
+ x2="353.74951"
+ y1="289.58905"
+ x1="355.16373"
+ gradientTransform="translate(-150.00016,-60.362183)"
+ gradientUnits="userSpaceOnUse"
+ id="linearGradient18844"
+ xlink:href="#linearGradient17009"
+ inkscape:collect="always" />
+ <radialGradient
+ r="16.845654"
+ fy="303.41541"
+ fx="363.33957"
+ cy="303.41541"
+ cx="363.33957"
+ gradientTransform="matrix(1.3082075,0.35053296,-0.36795399,1.3732236,-150.50951,-298.71133)"
+ gradientUnits="userSpaceOnUse"
+ id="radialGradient18846"
+ xlink:href="#linearGradient14518-6"
+ inkscape:collect="always" />
+ <radialGradient
+ r="16.845654"
+ fy="303.41541"
+ fx="363.33957"
+ cy="303.41541"
+ cx="363.33957"
+ gradientTransform="matrix(1.3082075,0.35053296,-0.36795399,1.3732236,-310.50935,-240.34915)"
+ gradientUnits="userSpaceOnUse"
+ id="radialGradient18848"
+ xlink:href="#linearGradient14518-2-2"
+ inkscape:collect="always" />
+ <radialGradient
+ r="20.537666"
+ fy="246.85757"
+ fx="382.23483"
+ cy="246.85757"
+ cx="382.23483"
+ gradientTransform="matrix(0.36025223,0.15680447,-0.07246786,0.16649214,260.61683,181.93825)"
+ gradientUnits="userSpaceOnUse"
+ id="radialGradient18850"
+ xlink:href="#linearGradient14830-4"
+ inkscape:collect="always" />
+ <linearGradient
+ y2="279.23718"
+ x2="361.5"
+ y1="279.36218"
+ x1="358.5"
+ gradientUnits="userSpaceOnUse"
+ id="linearGradient18852"
+ xlink:href="#linearGradient14830-8-1"
+ inkscape:collect="always" />
+ <linearGradient
+ gradientTransform="translate(-80.00015,-58.362183)"
+ y2="381.62027"
+ x2="170.86368"
+ y1="301.54044"
+ x1="123.13397"
+ gradientUnits="userSpaceOnUse"
+ id="linearGradient18854"
+ xlink:href="#linearGradient14132-6"
+ inkscape:collect="always" />
+ <linearGradient
+ y2="352.27536"
+ x2="186.5968"
+ y1="323.99109"
+ x1="171.57079"
+ gradientTransform="translate(-80.53048,-60.12995)"
+ gradientUnits="userSpaceOnUse"
+ id="linearGradient18856"
+ xlink:href="#linearGradient14168-5"
+ inkscape:collect="always" />
+ <clipPath
+ id="clipPath391"
+ clipPathUnits="userSpaceOnUse">
+ <path
+ inkscape:label="Clip - Left Foot Brighter Highlights"
+ transform="matrix(1,0,0,1.0182804,0,-4.0313444)"
+ style="fill:url(#linearGradient395);fill-opacity:1;stroke:none"
+ d="m 137.57703,281.0191 c 1.59929,-0.66295 3.3982,-0.78361 5.10074,-0.46963 1.70253,0.31398 3.31141,1.04948 4.74342,2.02239 2.86402,1.94583 4.98821,4.77774 7.02263,7.57952 4.67189,6.43406 9.16868,13.00227 13.24488,19.8293 3.30635,5.53766 6.34352,11.25685 10.16415,16.45304 2.49398,3.3919 5.3066,6.53947 7.813,9.92221 2.50639,3.38273 4.72794,7.05586 5.83931,11.11662 1.44411,5.27653 0.88463,11.09291 -1.62666,15.95302 -1.76663,3.41896 -4.47646,6.35228 -7.77242,8.33898 -3.29595,1.9867 -7.17064,3.01444 -11.01635,2.87021 -6.11413,-0.2293 -11.69944,-3.28515 -17.38362,-5.54906 -11.58097,-4.6125 -24.15978,-6.0594 -36.09666,-9.65174 -3.66859,-1.10404 -7.27582,-2.4107 -10.96988,-3.42629 -1.64125,-0.45122 -3.30866,-0.8482 -4.85875,-1.55144 -1.55008,-0.70325 -2.999548,-1.7491 -3.86171,-3.21675 -0.666391,-1.13439 -0.948386,-2.47002 -0.930187,-3.78554 0.0182,-1.31552 0.325889,-2.61453 0.773815,-3.85158 0.895851,-2.47409 2.343262,-4.71374 3.320162,-7.15696 1.59511,-3.98935 1.88169,-8.38839 1.66657,-12.67942 -0.21511,-4.29103 -0.91078,-8.54478 -1.20454,-12.83115 -0.13118,-1.91406 -0.18066,-3.85256 0.18479,-5.73598 0.36545,-1.88343 1.17577,-3.72459 2.55771,-5.05541 1.27406,-1.22693 2.96492,-1.95531 4.69643,-2.31651 1.73151,-0.3612 3.51533,-0.37747 5.28367,-0.33762 1.76833,0.0399 3.54067,0.13425 5.30351,-0.0106 1.76284,-0.14488 3.53347,-0.54055 5.06911,-1.41828 1.45996,-0.83447 2.65433,-2.0745 3.64374,-3.43424 0.9894,-1.35974 1.78909,-2.84573 2.60891,-4.31396 0.81983,-1.46823 1.66834,-2.93151 2.74157,-4.22611 1.07324,-1.2946 2.38923,-2.42304 3.94266,-3.06698"
+ id="path393"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="ssssssssssssssss" />
+ </clipPath>
+ <linearGradient
+ y2="381.62027"
+ x2="170.86368"
+ y1="301.54044"
+ x1="123.13397"
+ gradientUnits="userSpaceOnUse"
+ id="linearGradient395"
+ xlink:href="#linearGradient14132-6"
+ inkscape:collect="always" />
+ <clipPath
+ id="clipPath401"
+ clipPathUnits="userSpaceOnUse">
+ <path
+ sodipodi:nodetypes="ssssssssssssssss"
+ inkscape:connector-curvature="0"
+ id="path403"
+ d="m 137.57703,281.0191 c 1.59929,-0.66295 3.3982,-0.78361 5.10074,-0.46963 1.70253,0.31398 3.31141,1.04948 4.74342,2.02239 2.86402,1.94583 4.98821,4.77774 7.02263,7.57952 4.67189,6.43406 9.16868,13.00227 13.24488,19.8293 3.30635,5.53766 6.34352,11.25685 10.16415,16.45304 2.49398,3.3919 5.3066,6.53947 7.813,9.92221 2.50639,3.38273 4.72794,7.05586 5.83931,11.11662 1.44411,5.27653 0.88463,11.09291 -1.62666,15.95302 -1.76663,3.41896 -4.47646,6.35228 -7.77242,8.33898 -3.29595,1.9867 -7.17064,3.01444 -11.01635,2.87021 -6.11413,-0.2293 -11.69944,-3.28515 -17.38362,-5.54906 -11.58097,-4.6125 -24.15978,-6.0594 -36.09666,-9.65174 -3.66859,-1.10404 -7.27582,-2.4107 -10.96988,-3.42629 -1.64125,-0.45122 -3.30866,-0.8482 -4.85875,-1.55144 -1.55008,-0.70325 -2.999548,-1.7491 -3.86171,-3.21675 -0.666391,-1.13439 -0.948386,-2.47002 -0.930187,-3.78554 0.0182,-1.31552 0.325889,-2.61453 0.773815,-3.85158 0.895851,-2.47409 2.343262,-4.71374 3.320162,-7.15696 1.59511,-3.98935 1.88169,-8.38839 1.66657,-12.67942 -0.21511,-4.29103 -0.91078,-8.54478 -1.20454,-12.83115 -0.13118,-1.91406 -0.18066,-3.85256 0.18479,-5.73598 0.36545,-1.88343 1.17577,-3.72459 2.55771,-5.05541 1.27406,-1.22693 2.96492,-1.95531 4.69643,-2.31651 1.73151,-0.3612 3.51533,-0.37747 5.28367,-0.33762 1.76833,0.0399 3.54067,0.13425 5.30351,-0.0106 1.76284,-0.14488 3.53347,-0.54055 5.06911,-1.41828 1.45996,-0.83447 2.65433,-2.0745 3.64374,-3.43424 0.9894,-1.35974 1.78909,-2.84573 2.60891,-4.31396 0.81983,-1.46823 1.66834,-2.93151 2.74157,-4.22611 1.07324,-1.2946 2.38923,-2.42304 3.94266,-3.06698"
+ style="fill:url(#linearGradient405);fill-opacity:1;stroke:none"
+ transform="translate(-240.00015,-1)"
+ inkscape:label="Clip - Left Foot Highlights" />
+ </clipPath>
+ <linearGradient
+ y2="381.62027"
+ x2="170.86368"
+ y1="301.54044"
+ x1="123.13397"
+ gradientUnits="userSpaceOnUse"
+ id="linearGradient405"
+ xlink:href="#linearGradient14132-6"
+ inkscape:collect="always" />
+ <clipPath
+ id="clipPath419"
+ clipPathUnits="userSpaceOnUse">
+ <path
+ inkscape:label="Clip - Right Foot Highlights"
+ clip-path="none"
+ sodipodi:nodetypes="aaaaaaaaaaaaaaaaaaaaaaacscaascca"
+ inkscape:connector-curvature="0"
+ id="path421"
+ d="m 513.18983,336.61385 c -2.6238,3.11482 -6.268,5.17039 -9.89648,7.01985 -6.1886,3.15437 -12.60169,5.92177 -18.41964,9.71654 -3.89802,2.54249 -7.4959,5.52671 -10.86016,8.74238 -2.87719,2.75012 -5.60582,5.68745 -8.83247,8.01771 -3.25567,2.35122 -7.01915,4.05426 -10.99061,4.6502 -4.83026,0.72481 -9.82134,-0.21289 -14.29898,-2.16416 -3.13754,-1.36728 -6.15569,-3.3229 -7.96301,-6.22931 -1.81425,-2.91754 -2.22807,-6.48813 -2.23266,-9.92375 -0.008,-6.07666 1.11824,-12.09004 2.17848,-18.07349 0.88097,-4.97177 1.71949,-9.95483 2.26013,-14.97502 0.98337,-9.13118 0.9763,-18.35278 0.3199,-27.51327 -0.10993,-1.53416 -0.23754,-3.0832 -0.008,-4.60412 0.22922,-1.52092 0.85475,-3.0367 2.02069,-4.03986 1.07696,-0.9266 2.52093,-1.33598 3.93947,-1.4145 1.41854,-0.0785 2.83404,0.14655 4.23982,0.35197 3.31254,0.48405 6.65159,0.8649 9.88917,1.71656 2.04284,0.53738 4.03315,1.25925 6.0722,1.81081 3.40258,0.92039 6.96639,1.36144 10.46739,0.95192 3.76917,-0.44089 7.42987,-1.85678 11.22363,-1.76474 1.55658,0.0378 3.1015,0.33171 4.58649,0.79985 1.51539,0.47772 3.00914,1.16182 4.12281,2.29512 0.84639,0.8613 1.43579,1.94539 1.87872,3.06879 0.65982,1.67352 1.01492,3.457 1.16703,5.24945 0.13475,1.58788 0.11343,3.19441 0.41433,4.75933 0.49503,2.57458 1.84746,4.92305 3.52848,6.93494 1.68102,2.01189 3.68982,3.72048 5.69641,5.40783 1.99908,1.68103 4.0106,3.35469 6.16708,4.82839 1.0121,0.69165 2.05642,1.33949 3.01736,2.10062 0.96094,0.76113 1.84466,1.6468 2.44543,2.71535 0.81492,1.44944 1.06377,3.2077 0.53758,4.87655 -0.5262,1.66885 -1.48162,3.27659 -2.67059,4.68806 z"
+ style="display:inline;fill:url(#linearGradient423);fill-opacity:1;stroke:none" />
+ </clipPath>
+ <linearGradient
+ y2="293.58548"
+ x2="490.12241"
+ y1="371.54401"
+ x1="442.03912"
+ gradientTransform="translate(-250.00016,-58.362187)"
+ gradientUnits="userSpaceOnUse"
+ id="linearGradient423"
+ xlink:href="#linearGradient14392-8"
+ inkscape:collect="always" />
+ <clipPath
+ id="clipPath426"
+ clipPathUnits="userSpaceOnUse">
+ <path
+ inkscape:label="Clip - Right Foot Brighter Highlights"
+ clip-path="none"
+ sodipodi:nodetypes="aaaaaaaaaaaaaaaaaaaaaaacscaascca"
+ inkscape:connector-curvature="0"
+ id="path428"
+ d="m 509.36312,335.7449 c -2.29559,2.52764 -5.48394,4.19571 -8.65854,5.69652 -5.41448,2.55973 -11.02537,4.80544 -16.11557,7.88485 -3.41042,2.0632 -6.55825,4.48486 -9.50168,7.09433 -2.51729,2.23169 -4.9046,4.6153 -7.72764,6.50627 -2.84842,1.90799 -6.14114,3.28999 -9.61581,3.77358 -4.22606,0.58818 -8.59281,-0.17275 -12.51035,-1.75618 -2.74507,-1.10954 -5.38569,-2.6965 -6.96694,-5.05501 -1.5873,-2.36755 -1.94936,-5.26504 -1.95338,-8.053 -0.007,-4.93114 0.97837,-9.81092 1.90598,-14.66641 0.77077,-4.03453 1.5044,-8.07822 1.97742,-12.15205 0.86036,-7.40983 0.85417,-14.89305 0.27988,-22.32667 -0.0962,-1.24495 -0.20783,-2.50198 -0.007,-3.73619 0.20055,-1.23421 0.74783,-2.46424 1.76793,-3.27829 0.94224,-0.75193 2.20559,-1.08414 3.44669,-1.14785 1.24109,-0.0637 2.47953,0.11892 3.70947,0.28562 2.89818,0.3928 5.81955,0.70185 8.65215,1.39296 1.78731,0.43608 3.52865,1.02187 5.31264,1.46945 2.97696,0.74689 6.09498,1.10479 9.15805,0.77247 3.29769,-0.35777 6.50048,-1.50675 9.81968,-1.43206 1.36187,0.0307 2.71354,0.26918 4.01278,0.64907 1.32583,0.38766 2.63273,0.9428 3.6071,1.86246 0.74051,0.69893 1.25619,1.57866 1.64371,2.49028 0.57728,1.35804 0.88797,2.80532 1.02105,4.25987 0.11789,1.28854 0.0992,2.59222 0.3625,3.86213 0.43311,2.08924 1.61637,3.995 3.08711,5.62762 1.47074,1.63263 3.22827,3.01913 4.98386,4.38839 1.74902,1.36413 3.50892,2.72229 5.39565,3.91818 0.8855,0.56126 1.79919,1.08698 2.63992,1.70462 0.84074,0.61765 1.61392,1.33636 2.13954,2.20348 0.71298,1.1762 0.93071,2.60301 0.47034,3.95726 -0.46038,1.35425 -1.29629,2.65891 -2.33654,3.8043 z"
+ style="display:inline;fill:url(#linearGradient430);fill-opacity:1;stroke:none;stroke-width:0.84260321" />
+ </clipPath>
+ <linearGradient
+ y2="293.58548"
+ x2="490.12241"
+ y1="371.54401"
+ x1="442.03912"
+ gradientTransform="matrix(0.87491199,0,0,0.81148755,-158.36095,15.22676)"
+ gradientUnits="userSpaceOnUse"
+ id="linearGradient430"
+ xlink:href="#linearGradient14392-8"
+ inkscape:collect="always" />
+ <clipPath
+ id="clipPath433"
+ clipPathUnits="userSpaceOnUse">
+ <path
+ inkscape:label="Clip - Right Foot Brightest Highlights"
+ clip-path="none"
+ sodipodi:nodetypes="aaaaaaaaaaaaaaaaaaaaaaacscaascca"
+ inkscape:connector-curvature="0"
+ id="path435"
+ d="m 263.18967,278.25167 c -2.6238,3.11482 -6.268,5.17039 -9.89648,7.01985 -6.1886,3.15437 -12.60169,5.92177 -18.41964,9.71654 -3.89802,2.54249 -7.4959,5.52671 -10.86016,8.74238 -2.87719,2.75012 -5.60582,5.68745 -8.83247,8.01771 -3.25567,2.35122 -7.01915,4.05426 -10.99061,4.6502 -4.83026,0.72481 -9.82134,-0.21289 -14.29898,-2.16416 -3.13754,-1.36728 -6.15569,-3.3229 -7.96301,-6.22931 -1.81425,-2.91754 -2.22807,-6.48813 -2.23266,-9.92375 -0.008,-6.07666 1.11824,-12.09004 2.17848,-18.07349 0.88097,-4.97177 1.71949,-9.95483 2.26013,-14.97502 0.98337,-9.13118 0.9763,-18.35278 0.3199,-27.51327 -0.10993,-1.53416 -0.23754,-3.0832 -0.008,-4.60412 0.22922,-1.52092 0.85475,-3.0367 2.02069,-4.03986 1.07696,-0.9266 2.52093,-1.33598 3.93947,-1.4145 1.41854,-0.0785 2.83404,0.14655 4.23982,0.35197 3.31254,0.48405 6.65159,0.8649 9.88917,1.71656 2.04284,0.53738 4.03315,1.25925 6.0722,1.81081 3.40258,0.92039 6.96639,1.36144 10.46739,0.95192 3.76917,-0.44089 7.42987,-1.85678 11.22363,-1.76474 1.55658,0.0378 3.1015,0.33171 4.58649,0.79985 1.51539,0.47772 3.00914,1.16182 4.12281,2.29512 0.84639,0.8613 1.43579,1.94539 1.87872,3.06879 0.65982,1.67352 1.01492,3.457 1.16703,5.24945 0.13475,1.58788 0.11343,3.19441 0.41433,4.75933 0.49503,2.57458 1.84746,4.92305 3.52848,6.93494 1.68102,2.01189 3.68982,3.72048 5.69641,5.40783 1.99908,1.68103 4.0106,3.35469 6.16708,4.82839 1.0121,0.69165 2.05642,1.33949 3.01736,2.10062 0.96094,0.76113 1.84466,1.6468 2.44543,2.71535 0.81492,1.44944 1.06377,3.2077 0.53758,4.87655 -0.5262,1.66885 -1.48162,3.27659 -2.67059,4.68806 z"
+ style="display:inline;fill:url(#linearGradient437);fill-opacity:1;stroke:none" />
+ </clipPath>
+ <linearGradient
+ y2="293.58548"
+ x2="490.12241"
+ y1="371.54401"
+ x1="442.03912"
+ gradientTransform="translate(-500.00032,-116.72437)"
+ gradientUnits="userSpaceOnUse"
+ id="linearGradient437"
+ xlink:href="#linearGradient14392-8"
+ inkscape:collect="always" />
+ <clipPath
+ id="clipPath504"
+ clipPathUnits="userSpaceOnUse">
+ <path
+ sodipodi:nodetypes="aaaaassssaaaasssscssssc"
+ inkscape:connector-curvature="0"
+ d="m 304.84727,225.44951 c 5.97679,4.89463 9.76903,12.28597 10.94319,20.00305 0.91574,6.01859 0.32054,12.19496 -1.0124,18.13223 -1.33294,5.93726 -3.39093,11.67615 -5.43351,17.40051 -0.81452,2.2827 -1.63269,4.5871 -1.95634,6.9933 -0.32365,2.40621 -0.1187,4.95426 1.02109,7.08777 1.3066,2.44578 3.74526,4.13021 6.36677,4.92292 2.58816,0.78263 5.38374,0.76618 8.00354,0.10153 2.61979,-0.66466 5.06582,-1.96341 7.19828,-3.64929 5.41763,-4.28306 8.68657,-10.94871 9.95201,-17.81211 1.26545,-6.86339 0.68401,-13.95038 -0.49258,-20.83014 -1.60443,-9.38136 -4.30394,-18.55105 -7.74003,-27.40773 -2.52746,-6.51466 -5.7653,-12.74244 -9.61753,-18.52016 -3.77934,-5.66839 -9.14163,-10.09303 -13.10336,-15.63502 -1.37643,-1.92547 -3.03189,-3.93159 -4.38419,-5.87845 -2.91575,-4.19771 -2.25544,-3.41451 -4.06424,-6.13155 -1.31235,-1.9713 -3.38449,-2.6487 -5.56491,-3.51096 -2.18041,-0.86226 -4.629,-1.11623 -6.88065,-0.47108 -2.96781,0.85034 -5.39233,3.23113 -6.68215,6.08208 -1.28982,2.85095 -1.51545,6.12313 -1.01363,9.2201 0.64739,3.99536 2.44215,7.70258 4.46569,11.18873 2.28537,3.93724 4.93283,7.72707 8.38442,10.65407 3.60205,3.05459 7.95771,5.06875 11.61053,8.0602"
+ id="path506"
+ style="display:inline;fill:#020204;fill-opacity:1;stroke:none;stroke-width:0.9910841"
+ inkscape:label="Clip - Right Arm Shadow" />
+ </clipPath>
+ <clipPath
+ id="clipPath508"
+ clipPathUnits="userSpaceOnUse">
+ <path
+ style="display:inline;fill:#020204;fill-opacity:1;stroke:none"
+ d="m 240.47307,195.03592 c -7.07309,8.03686 -14.35222,15.81627 -18.34577,24.50506 -1.97625,4.41329 -2.91077,9.20725 -4.26498,13.84932 -1.5379,5.27176 -3.62609,10.3703 -5.97071,15.33612 -2.16496,4.58531 -4.54982,9.06291 -6.93891,13.53553 -1.7382,3.25409 -3.50514,6.58104 -4.10782,10.22071 -0.47628,2.87632 -0.1985,5.84423 0.53375,8.66626 0.73225,2.82202 1.90965,5.5106 3.23776,8.10601 5.66725,11.07504 14.17003,20.62168 24.24176,27.92472 4.57063,3.31418 9.46669,6.18109 14.60245,8.52595 2.78247,1.27041 5.71355,2.40436 8.77186,2.45744 1.52915,0.0265 3.0741,-0.22544 4.47434,-0.84055 1.40023,-0.6151 2.65068,-1.60373 3.48254,-2.88709 1.02278,-1.5779 1.36992,-3.53829 1.16461,-5.40743 -0.2053,-1.86914 -0.93484,-3.65294 -1.91324,-5.25873 -2.38997,-3.92251 -6.1652,-6.76055 -9.79642,-9.57343 -7.84055,-6.07358 -15.42465,-12.48039 -22.68212,-19.23996 -2.04912,-1.90854 -4.09841,-3.87759 -5.53019,-6.28412 -1.3943,-2.34352 -2.1476,-5.01376 -2.65783,-7.69253 -1.39972,-7.34873 -1.04092,-15.08286 1.45958,-22.13343 0.97822,-2.75826 2.27118,-5.39201 3.51815,-8.03965 2.16133,-4.58906 4.20725,-9.26564 7.04933,-13.46723 3.53798,-5.23037 8.26749,-9.66049 11.15147,-15.27803 2.43423,-4.74149 3.41994,-10.07236 4.36185,-15.31831 0.73693,-4.10434 2.15042,-8.12437 2.86923,-12.23193 -1.40611,2.66567 -5.93796,7.04283 -8.71069,10.5253 z"
+ id="path510"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cczzzzzzzzzzzzszzzzzcssscc"
+ inkscape:label="Clip - Left Arm Shadow" />
+ </clipPath>
+ <clipPath
+ id="clipPath533"
+ clipPathUnits="userSpaceOnUse">
+ <path
+ sodipodi:nodetypes="aaaasssaccaa"
+ inkscape:connector-curvature="0"
+ d="m 386.1875,285.32775 c -0.40516,-1.10369 -1.11845,-2.08156 -1.9907,-2.86987 -0.87226,-0.78832 -1.90049,-1.39229 -2.98278,-1.85155 -2.16459,-0.91852 -4.52053,-1.26149 -6.83152,-1.69556 -2.17919,-0.40931 -4.34179,-0.90631 -6.52782,-1.27734 -2.27136,-0.38551 -4.6179,-0.63213 -6.8653,-0.1253 -1.96583,0.44333 -3.7845,1.45879 -5.27172,2.81864 -1.48723,1.35984 -2.64911,3.0564 -3.48499,4.89007 -1.47218,3.22952 -1.93451,6.86503 -1.65394,10.40316 0.20881,2.63325 0.87532,5.34594 2.60877,7.33912 1.40065,1.61052 3.38733,2.61526 5.43398,3.22092 3.52502,1.04316 7.36663,0.98822 10.86038,-0.1553 5.76689,-1.93113 10.87568,-5.77387 14.33034,-10.77903 1.13861,-1.64963 2.11217,-3.44809 2.5532,-5.4034 0.33597,-1.48955 0.34831,-3.08112 -0.1779,-4.51456"
+ id="path535"
+ style="display:inline;fill:#020204;fill-opacity:1;stroke:none"
+ inkscape:label="Clip - Hand Lower Hightlight" />
+ </clipPath>
+ <clipPath
+ id="clipPath538"
+ clipPathUnits="userSpaceOnUse">
+ <path
+ inkscape:label="Clip - Hand Upper Highlight"
+ style="display:inline;fill:#020204;fill-opacity:1;stroke:none"
+ id="path540"
+ d="m 386.1875,285.32775 c -0.40516,-1.10369 -1.11845,-2.08156 -1.9907,-2.86987 -0.87226,-0.78832 -1.90049,-1.39229 -2.98278,-1.85155 -2.16459,-0.91852 -4.52053,-1.26149 -6.83152,-1.69556 -2.17919,-0.40931 -4.34179,-0.90631 -6.52782,-1.27734 -2.27136,-0.38551 -4.6179,-0.63213 -6.8653,-0.1253 -1.96583,0.44333 -3.7845,1.45879 -5.27172,2.81864 -1.48723,1.35984 -2.64911,3.0564 -3.48499,4.89007 -1.47218,3.22952 -1.93451,6.86503 -1.65394,10.40316 0.20881,2.63325 0.87532,5.34594 2.60877,7.33912 1.40065,1.61052 3.38733,2.61526 5.43398,3.22092 3.52502,1.04316 7.36663,0.98822 10.86038,-0.1553 5.76689,-1.93113 10.87568,-5.77387 14.33034,-10.77903 1.13861,-1.64963 2.11217,-3.44809 2.5532,-5.4034 0.33597,-1.48955 0.34831,-3.08112 -0.1779,-4.51456"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="aaaasssaccaa" />
+ </clipPath>
+ <clipPath
+ id="clipPath622"
+ clipPathUnits="userSpaceOnUse">
+ <path
+ sodipodi:nodetypes="aaaaaaaa"
+ inkscape:connector-curvature="0"
+ id="path624"
+ d="m 85.75,122.36218 c -2.78042,1.91023 -5.11057,4.57487 -6.25,7.75 -1.43603,4.00163 -0.88584,8.48071 0.5,12.5 1.41949,4.11688 3.79379,8.04098 7.37932,10.51234 1.79277,1.23567 3.86809,2.08301 6.0304,2.33859 2.16231,0.25558 4.40928,-0.0949 6.34028,-1.10093 2.35312,-1.22596 4.14782,-3.37278 5.26217,-5.78076 1.11436,-2.40798 1.5888,-5.0701 1.73783,-7.71924 0.18989,-3.37546 -0.14047,-6.80646 -1.25,-10 -1.20527,-3.46909 -3.39005,-6.67055 -6.47275,-8.6666 -1.54136,-0.99803 -3.29195,-1.68356 -5.11089,-1.93515 -1.81893,-0.25158 -3.70476,-0.0633 -5.41636,0.60175 -0.97547,0.37901 -1.88744,0.9074 -2.75,1.5"
+ style="display:inline;fill:url(#radialGradient626);fill-opacity:1;stroke:none"
+ inkscape:label="Clip - Right Eyelid" />
+ </clipPath>
+ <radialGradient
+ r="14.572236"
+ fy="137.66095"
+ fx="223.19559"
+ cy="137.66095"
+ cx="223.19559"
+ gradientTransform="matrix(1.0857794,-0.03431182,0.03943781,1.2479887,-15.5421,-75.904827)"
+ gradientUnits="userSpaceOnUse"
+ id="radialGradient626"
+ xlink:href="#linearGradient28799-3"
+ inkscape:collect="always" />
+ <clipPath
+ id="clipPath631"
+ clipPathUnits="userSpaceOnUse">
+ <path
+ sodipodi:nodetypes="zzzzzzz"
+ inkscape:connector-curvature="0"
+ id="path633"
+ d="m 54.23244,122.36218 c -1.78096,0.097 -3.48461,0.91899 -4.78785,2.1367 -1.30323,1.21771 -2.22137,2.81176 -2.78618,4.50357 -1.12962,3.38363 -0.87548,7.05177 -0.6187,10.60973 0.23251,3.22162 0.47041,6.50533 1.67679,9.50158 0.60319,1.49813 1.45024,2.91021 2.58034,4.06395 1.13009,1.15374 2.55173,2.04189 4.11829,2.43447 1.46884,0.36809 3.03816,0.29183 4.48279,-0.16209 1.44462,-0.45392 2.76391,-1.27887 3.84623,-2.33791 1.57904,-1.54507 2.64326,-3.5662 3.25345,-5.68947 0.61019,-2.12328 0.78416,-4.35155 0.7524,-6.56053 -0.0397,-2.76435 -0.40091,-5.53851 -1.26575,-8.16439 -0.86485,-2.62588 -2.24575,-5.10327 -4.1728,-7.08561 -0.93331,-0.96009 -1.99776,-1.80513 -3.19858,-2.39747 -1.20082,-0.59233 -2.54344,-0.92535 -3.88043,-0.85253"
+ style="display:inline;fill:url(#radialGradient635);fill-opacity:1;stroke:none"
+ inkscape:label="Clip - Left Eyelid" />
+ </clipPath>
+ <radialGradient
+ r="14.572236"
+ fy="137.66095"
+ fx="223.19559"
+ cy="137.66095"
+ cx="223.19559"
+ gradientTransform="matrix(0.81524244,-0.03431182,0.02961133,1.2479887,9.5624,-75.904827)"
+ gradientUnits="userSpaceOnUse"
+ id="radialGradient635"
+ xlink:href="#linearGradient28799-5-5"
+ inkscape:collect="always" />
+ <clipPath
+ id="clipPath697"
+ clipPathUnits="userSpaceOnUse">
+ <path
+ inkscape:label="Clip - Beak Side Highlight"
+ inkscape:connector-curvature="0"
+ id="path699"
+ d="m 214.63605,148.03815 c -2.51029,-0.0409 -4.99135,0.28921 -7.27146,0.88384 -4.05254,1.05688 -7.57367,2.93934 -10.08468,5.39315 -1.62814,0.85539 -3.05003,1.89919 -4.20722,3.08639 -0.66186,0.67901 -1.24391,1.41694 -1.50131,2.24757 -0.20244,0.65333 -0.19857,1.34469 -0.28524,2.01986 -0.0324,0.25293 -0.0778,0.5073 -0.0362,0.76548 0.0208,0.12909 0.0631,0.25809 0.13756,0.38081 0.0221,0.0364 0.0528,0.0707 0.0806,0.1055 0.0825,0.15031 0.18297,0.29681 0.31473,0.43099 0.28806,0.29334 0.68023,0.53107 1.09417,0.73203 0.41394,0.20097 0.85255,0.36757 1.2815,0.54936 2.28006,0.96628 4.22773,2.32456 5.9925,3.75924 2.3677,1.92485 4.52941,4.06099 7.5099,5.46004 2.10465,0.98794 4.52773,1.552 6.92602,1.72396 2.81637,0.20193 5.58521,-0.12293 8.18167,-0.69344 2.40631,-0.52873 4.69673,-1.27132 6.75202,-2.25401 3.90702,-1.86802 6.98699,-4.60634 11.42445,-5.83442 0.96876,-0.2681 1.99041,-0.45921 2.91317,-0.78993 0.92276,-0.33072 1.76305,-0.8265 2.11948,-1.52711 0.34261,-0.67347 0.2049,-1.45031 0.23569,-2.18968 0.0329,-0.791 0.26357,-1.5559 0.33312,-2.34278 0.0695,-0.78687 -0.0382,-1.6289 -0.62199,-2.35178 -0.12955,-0.16043 -0.28324,-0.31001 -0.45163,-0.45157 -0.0509,-0.29235 -0.22134,-0.58029 -0.46622,-0.83239 -0.50487,-0.51975 -1.29334,-0.87172 -2.09515,-1.11671 -1.09824,-0.33555 -2.25599,-0.50211 -3.39891,-0.69601 -3.51093,-0.59565 -6.96955,-1.47539 -10.29467,-2.60394 -1.65302,-0.56104 -3.27073,-1.18327 -4.90416,-1.77928 -1.67927,-0.61273 -3.38672,-1.20103 -5.16515,-1.57729 -1.49109,-0.31546 -3.00643,-0.47332 -4.51259,-0.49788 z"
+ style="display:inline;fill:url(#radialGradient701);fill-opacity:1;stroke:none;stroke-width:0.77538049" />
+ </clipPath>
+ <radialGradient
+ r="31.111488"
+ fy="193.09949"
+ fx="294.48483"
+ cy="193.09949"
+ cx="294.48483"
+ gradientTransform="matrix(0.81494503,-0.25452614,0.31491054,0.43302392,-75.371375,150.73818)"
+ gradientUnits="userSpaceOnUse"
+ id="radialGradient701"
+ xlink:href="#linearGradient28469-0"
+ inkscape:collect="always" />
+ <clipPath
+ id="clipPath816"
+ clipPathUnits="userSpaceOnUse">
+ <path
+ sodipodi:nodetypes="zssacaaaaaaaaaacsszzz"
+ inkscape:connector-curvature="0"
+ id="path818"
+ d="m -26.29567,154.81433 c -1.0464,1.31136 -1.72773,2.88726 -2.13927,4.51369 -0.41153,1.62642 -0.56228,3.30801 -0.62653,4.98446 -0.12849,3.35291 0.0765,6.77015 -0.81096,10.00604 -0.94874,3.4595 -3.07595,6.45649 -5.15761,9.37795 -3.60485,5.05916 -7.248548,10.25011 -9.027058,16.20217 -1.077103,3.60469 -1.435613,7.42255 -1.04841,11.16474 -4.035298,5.9262 -7.528852,12.22112 -10.4229,18.78069 -4.386197,9.94163 -7.396115,20.5265 -8.454552,31.34105 -1.296051,13.24236 0.397579,26.86184 5.627472,39.09655 3.781309,8.84592 9.417708,16.94379 16.68566,23.2466 3.695408,3.20468 7.799668,5.93944 12.189498,8.09709 15.21252,7.47713 34.01348,7.49101 48.97296,-0.48031 7.81838,-4.16611 14.41789,-10.2582 20.78084,-16.42232 3.83183,-3.71209 7.64353,-7.51249 10.56653,-11.97551 5.62746,-8.59236 7.58747,-19.03566 8.80544,-29.23436 2.12971,-17.83321 2.1984,-36.66998 -5.62137,-52.83816 -2.69219,-5.56638 -6.27896,-10.69891 -10.58065,-15.14052 -1.14547,-7.78087 -3.40638,-15.39666 -6.69212,-22.54215 -2.37045,-5.15502 -5.2683,-10.06187 -7.47079,-15.29085 -0.90422,-2.14672 -1.68995,-4.34486 -2.69346,-6.44699 -1.00352,-2.10213 -2.24145,-4.12498 -3.92446,-5.73541 -1.72343,-1.6491 -3.87096,-2.81824 -6.13593,-3.56631 -2.26498,-0.74806 -4.64917,-1.08697 -7.03147,-1.2068 -4.7646,-0.23966 -9.53872,0.38348 -14.30559,0.19423 -3.79476,-0.15066 -7.57776,-0.81566 -11.36892,-0.59186 -1.89557,0.1119 -3.79087,0.45058 -5.55026,1.1649 -1.7594,0.71432 -3.38173,1.81713 -4.56609,3.30139"
+ style="display:inline;fill:#fdfdfb;fill-opacity:1;stroke:none"
+ inkscape:label="Clip - Lower Beak Shadow" />
+ </clipPath>
+ <clipPath
+ id="clipPath820"
+ clipPathUnits="userSpaceOnUse">
+ <path
+ inkscape:label="Clip - Upper Beak Shadow"
+ style="display:inline;fill:#fdfdfb;fill-opacity:1;stroke:none;stroke-width:1.04194593"
+ d="m -47.29567,161.44377 c -1.0464,1.42368 -1.72773,3.13456 -2.13927,4.9003 -0.41153,1.76572 -0.56228,3.59134 -0.62653,5.41138 -0.12849,3.64009 0.0765,7.35002 -0.81096,10.86307 -0.94874,3.75581 -3.07595,7.0095 -5.15761,10.18119 -3.60485,5.49248 -7.248548,11.12804 -9.027058,17.58991 -1.077103,3.91343 -1.435613,8.0583 -1.04841,12.12101 -4.035298,6.43379 -7.528852,13.26788 -10.4229,20.38928 -4.386197,10.79315 -7.396115,22.28463 -8.454552,34.02546 -1.296051,14.37658 0.397579,29.16259 5.627472,42.44522 3.781309,9.60359 9.417708,18.39505 16.68566,25.23771 3.695408,3.47916 7.799668,6.44816 12.189498,8.79061 15.21252,8.11756 34.01348,8.13263 48.97296,-0.52145 7.81838,-4.52294 14.41789,-11.13683 20.78084,-17.82891 3.83183,-4.03004 7.64353,-8.15595 10.56653,-13.00123 5.62746,-9.32831 7.58747,-20.66609 8.80544,-31.73832 2.12971,-19.36065 2.1984,-39.81082 -5.62137,-57.36383 -2.69219,-6.04314 -6.27896,-11.61528 -10.58065,-16.43732 -1.14547,-8.44732 -3.40638,-16.71541 -6.69212,-24.47292 -2.37045,-5.59655 -5.2683,-10.92368 -7.47079,-16.60053 -0.90422,-2.33059 -1.68995,-4.71701 -2.69346,-6.99919 -1.00352,-2.28218 -2.24145,-4.47829 -3.92446,-6.22665 -1.72343,-1.79035 -3.87096,-3.05963 -6.13593,-3.87177 -2.26498,-0.81213 -4.64917,-1.18007 -7.03147,-1.31016 -4.7646,-0.26019 -9.53872,0.41632 -14.30559,0.21086 -3.79476,-0.16356 -7.57776,-0.88552 -11.36892,-0.64255 -1.89557,0.12148 -3.79087,0.48917 -5.55026,1.26467 -1.7594,0.77551 -3.38173,1.97277 -4.56609,3.58416"
+ id="path822"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="zssacaaaaaaaaaacsszzz" />
+ </clipPath>
+ </defs>
+ <metadata
+ id="metadata27455">
+ <rdf:RDF>
+ <cc:Work
+ rdf:about="">
+ <dc:format>image/svg+xml</dc:format>
+ <dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+ <dc:title>Tux</dc:title>
+ <dc:date>20 June 2012</dc:date>
+ <dc:creator>
+ <cc:Agent>
+ <dc:title>Garrett LeSage</dc:title>
+ </cc:Agent>
+ </dc:creator>
+ <cc:license
+ rdf:resource="http://creativecommons.org/publicdomain/zero/1.0/" />
+ <dc:contributor>
+ <cc:Agent>
+ <dc:title>Larry Ewing, the creator of the original Tux graphic</dc:title>
+ </cc:Agent>
+ </dc:contributor>
+ <dc:subject>
+ <rdf:Bag>
+ <rdf:li>tux</rdf:li>
+ <rdf:li>Linux</rdf:li>
+ <rdf:li>penguin</rdf:li>
+ <rdf:li>logo</rdf:li>
+ </rdf:Bag>
+ </dc:subject>
+ <dc:rights>
+ <cc:Agent>
+ <dc:title>Larry Ewing, Garrett LeSage</dc:title>
+ </cc:Agent>
+ </dc:rights>
+ <dc:source>https://github.com/garrett/Tux</dc:source>
+ </cc:Work>
+ <cc:License
+ rdf:about="http://creativecommons.org/publicdomain/zero/1.0/">
+ <cc:permits
+ rdf:resource="http://creativecommons.org/ns#Reproduction" />
+ <cc:permits
+ rdf:resource="http://creativecommons.org/ns#Distribution" />
+ <cc:permits
+ rdf:resource="http://creativecommons.org/ns#DerivativeWorks" />
+ </cc:License>
+ </rdf:RDF>
+ </metadata>
+ <g
+ transform="translate(-16.987948,-19.575154)"
+ style="display:inline"
+ inkscape:label="Tux"
+ id="layer2"
+ inkscape:groupmode="layer">
+ <g
+ inkscape:label="Tux"
+ id="g1212">
+ <g
+ id="g463"
+ inkscape:label="Body">
+ <path
+ inkscape:label="Body Without Tummy"
+ sodipodi:nodetypes="csssccssssccsscccsccscccccscsscscsssc"
+ inkscape:connector-curvature="0"
+ id="path28712-2"
+ d="m 140.8125,19.578125 c -7.16795,-0.07795 -14.42402,1.374646 -20.73438,4.775391 -6.70663,3.614308 -12.20088,9.395485 -15.58593,16.220703 -3.38347,6.822028 -4.712926,14.108148 -4.914065,22.132808 -0.382163,15.24684 0.34393,31.23872 1.494145,45.730473 0.3054,4.41258 0.85369,6.99499 0.29297,11.52344 -1.88652,9.62986 -10.313201,16.11178 -14.80468,24.57031 -4.954704,9.33089 -7.043403,19.88101 -10.783203,29.76172 -3.422488,9.04236 -8.227578,17.52067 -11.470703,26.62891 -4.534864,12.73604 -5.890504,26.73088 -2.894532,39.91406 2.283855,10.04965 7.054597,19.47291 13.484375,27.5332 -0.930503,1.67688 -1.832233,3.3716 -2.792968,5.03125 -2.979452,5.14693 -6.619557,10.02667 -8.316407,15.72656 -0.848425,2.84995 -1.182417,5.88981 -0.634765,8.8125 0.547652,2.92268 2.02651,5.71858 4.351562,7.57227 1.522028,1.21346 3.357446,1.99485 5.253906,2.43359 1.896461,0.43879 3.856625,0.54531 5.802735,0.50391 7.394587,-0.15718 14.559024,-2.40522 21.71289,-4.2832 4.23946,-1.11291 8.51036,-2.10105 12.80273,-2.98829 15.24055,-3.12209 32.25031,-1.87591 46.39844,0.17579 4.79197,0.72368 9.54981,1.67102 14.25977,2.8125 7.37714,1.78788 14.72878,4.06701 22.3164,4.2832 1.99729,0.0569 4.0106,-0.0306 5.96094,-0.46484 1.95034,-0.43429 3.84211,-1.22688 5.4043,-2.47266 2.32922,-1.85746 3.80834,-4.65745 4.35546,-7.58594 0.54713,-2.9285 0.20917,-5.97702 -0.64843,-8.83008 -1.71521,-5.70613 -5.38873,-10.5749 -8.43555,-15.69531 -1.20215,-2.0203 -2.32023,-4.0926 -3.51367,-6.11719 9.16873,-10.29563 16.54824,-22.20278 20.8164,-35.28125 4.65874,-14.27524 5.51426,-29.64566 3.55274,-44.5332 -1.96148,-14.88754 -6.6821,-29.32114 -12.8984,-42.99023 -7.79769,-17.13839 -14.35278,-23.331 -19.10351,-38.38086 -5.13471,-16.266273 -0.8948,-35.514213 -4.71094,-50.267583 -1.3618,-5.0173 -3.53277,-9.80681 -6.32617,-14.191405 -3.27306,-5.137474 -7.42457,-9.742407 -12.35743,-13.316406 -7.87066,-5.702527 -17.61519,-8.638455 -27.33398,-8.744141 z"
+ style="display:inline;fill:#020204;fill-opacity:1;stroke:none" />
+ <path
+ inkscape:label="Tummy"
+ style="display:inline;fill:#fdfdfb;fill-opacity:1;stroke:none"
+ d="m 112.70417,105.45215 c -1.0464,1.31136 -1.72773,2.88726 -2.13927,4.51369 -0.41153,1.62642 -0.56228,3.30801 -0.62653,4.98446 -0.12849,3.35291 0.0765,6.77015 -0.81096,10.00604 -0.94874,3.4595 -3.07595,6.45649 -5.15761,9.37795 -3.60485,5.05916 -7.248548,10.25011 -9.027058,16.20217 -1.077103,3.60469 -1.435613,7.42255 -1.04841,11.16474 -4.035298,5.9262 -7.528852,12.22112 -10.4229,18.78069 -4.386197,9.94163 -7.396115,20.5265 -8.454552,31.34105 -1.296051,13.24236 0.397579,26.86184 5.627472,39.09655 3.781309,8.84592 9.417708,16.94379 16.68566,23.2466 3.695408,3.20468 7.799668,5.93944 12.189498,8.09709 15.21252,7.47713 34.01348,7.49101 48.97296,-0.48031 7.81838,-4.16611 14.41789,-10.2582 20.78084,-16.42232 3.83183,-3.71209 7.64353,-7.51249 10.56653,-11.97551 5.62746,-8.59236 7.58747,-19.03566 8.80544,-29.23436 2.12971,-17.83321 2.1984,-36.66998 -5.62137,-52.83816 -2.69219,-5.56638 -6.27896,-10.69891 -10.58065,-15.14052 -1.14547,-7.78087 -3.40638,-15.39666 -6.69212,-22.54215 -2.37045,-5.15502 -5.2683,-10.06187 -7.47079,-15.29085 -0.90422,-2.14672 -1.68995,-4.34486 -2.69346,-6.44699 -1.00352,-2.10213 -2.24145,-4.12498 -3.92446,-5.73541 -1.72343,-1.6491 -3.87096,-2.81824 -6.13593,-3.56631 -2.26498,-0.74806 -4.64917,-1.08697 -7.03147,-1.2068 -4.7646,-0.23966 -9.53872,0.38348 -14.30559,0.19423 -3.79476,-0.15066 -7.57776,-0.81566 -11.36892,-0.59186 -1.89557,0.1119 -3.79087,0.45058 -5.55026,1.1649 -1.7594,0.71432 -3.38173,1.81713 -4.56609,3.30139"
+ id="path29719-5"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="zssacaaaaaaaaaacsszzz" />
+ </g>
+ <g
+ style="display:inline"
+ id="g562"
+ inkscape:label="Body Shadows">
+ <path
+ inkscape:label="Left Pec Shadow"
+ style="display:inline;opacity:0.25;fill:url(#radialGradient18834);fill-opacity:1;stroke:none;filter:url(#filter4487-6)"
+ d="m -61.00266,211.59308 c 0.88005,1.52387 -0.54737,6.77829 19.96381,3.4153 0,0 -3.60202,0.4573 -7.15281,1.40419 -5.52127,2.1334 -10.33021,4.51706 -14.04019,7.67524 -3.67553,3.12167 -6.36707,7.19694 -9.73973,10.69705 0,0 5.46173,-11.5187 6.82331,-14.98742 1.36157,-3.46872 -0.22795,-3.30999 0.84893,-8.4136 1.07688,-5.1036 3.71346,-10.00699 3.71346,-10.00699 0,0 -2.15241,7.21088 -0.41678,10.21623 z"
+ id="path4400-1"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="scccczzcs"
+ transform="matrix(1.1543044,0,0,1,166.33231,-58.362183)" />
+ <path
+ inkscape:label="Right Pec Shadow"
+ style="display:inline;opacity:0.42000002;fill:url(#radialGradient18836);fill-opacity:1;stroke:none;filter:url(#filter4427-5-7)"
+ d="m 172.04993,151.8559 c -4.82509,3.36138 -7.65241,2.96341 -13.50685,3.62087 -5.85444,0.65746 -21.69838,0.41943 -21.69838,0.41943 0,0 2.29371,-0.0427 7.37759,0.90419 5.08388,0.94693 15.45307,1.85232 21.29176,4.07468 5.83869,2.22236 7.96846,2.8566 11.51723,5.10056 5.05107,3.19388 8.75817,8.19694 13.587,11.69705 0,0 0.23377,-4.6437 -1.71568,-8.11242 -1.94945,-3.46872 -7.19037,-8.93499 -8.7322,-14.0386 -1.54183,-5.1036 -2.27429,-15.13199 -2.27429,-15.13199 0,0 -1.02108,8.10485 -5.84618,11.46623 z"
+ id="path4400-2-8"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="zzczzaczzcz" />
+ <path
+ inkscape:label="Middle Pec Shadow"
+ style="display:inline;opacity:0.2;fill:url(#radialGradient18838);fill-opacity:1;stroke:none;filter:url(#filter4538-7)"
+ d="m 126.66974,144.67794 c -0.17937,1.45594 -0.41189,2.90533 -0.69695,4.34431 -0.14052,0.70936 -0.2949,1.41989 -0.55905,2.09306 -0.26414,0.67317 -0.64419,1.31214 -1.18125,1.79639 -0.47071,0.42443 -1.04439,0.71595 -1.62069,0.97975 -2.24827,1.02916 -4.6544,1.71261 -7.10798,2.01899 0.97993,0.0719 1.95856,0.16127 2.93528,0.2682 0.61534,0.0674 1.23207,0.14208 1.83169,0.29586 0.59961,0.15377 1.18472,0.38955 1.68422,0.75518 0.54781,0.40099 0.97799,0.94833 1.29931,1.54636 0.64023,1.19159 0.85435,2.56281 0.97272,3.91031 0.15139,1.72336 0.16244,3.45904 0.033,5.18419 0.11585,-1.15429 0.35775,-2.29589 0.72,-3.39797 0.65284,-1.98614 1.70416,-3.84789 3.11974,-5.38642 0.56171,-0.6105 1.18038,-1.17036 1.85876,-1.6479 2.07821,-1.46294 4.71804,-2.1055 7.23612,-1.76133 -2.55897,0.11302 -5.14896,-0.69089 -7.19419,-2.23302 -1.04161,-0.78539 -1.94875,-1.76287 -2.57976,-2.90463 -0.97579,-1.76561 -1.25012,-3.90675 -0.75097,-5.86133"
+ id="path4491-9"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cssacaaaacaaacssc" />
+ <path
+ inkscape:label="Tummy Shadow"
+ style="color:#000000;display:inline;overflow:visible;visibility:visible;opacity:0.11000001;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:2;marker:none;filter:url(#filter4592-2);enable-background:accumulate"
+ d="m 120.49984,178.71875 c -1.22954,4.67934 -2.07519,9.45949 -2.52566,14.27665 -0.63702,6.81216 -0.48368,13.6725 -0.84934,20.5046 -0.31029,5.79753 -0.99107,11.65587 0.0159,17.3737 0.48017,2.72655 1.34273,5.38547 2.55456,7.87467 0.19249,-0.95006 0.33356,-1.91054 0.42239,-2.87583 0.42661,-4.63604 -0.3541,-9.28689 -0.61781,-13.93504 -0.46225,-8.14744 0.66569,-16.2899 1.125,-24.4375 0.3526,-6.25476 0.31082,-12.53173 -0.125,-18.78125 h -4e-5"
+ id="path4542-7"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cssscasacc" />
+ <path
+ inkscape:label="Left Pec Upper Shadow"
+ transform="matrix(-0.06991927,0.95700905,-0.56450744,-0.11853409,236.56758,-180.37928)"
+ style="display:inline;opacity:0.25;fill:#7c7c7c;fill-opacity:1;stroke:none;filter:url(#filter15211-9)"
+ d="m 351.9604,200.85653 c -1.45162,0.38883 -1.23008,3.99417 -0.29604,5.49789 0.78886,1.26999 3.07235,2.27109 3.75853,1.00504 1.11412,-2.05562 -1.47192,-7.03613 -3.46249,-6.50293 z"
+ id="path28767-9-3"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="aaaa" />
+ <path
+ inkscape:label="Right Pec Side Shadow"
+ sodipodi:nodetypes="zzzzsz"
+ inkscape:connector-curvature="0"
+ id="path15189-4"
+ d="m 361.75,209.34296 c 0.002,-1.53313 -7.56474,-10.0564 -9.7896,-8.48643 -2.22486,1.56998 -0.49172,3.7842 -0.29604,5.49789 0.19568,1.71368 -0.94537,6.60933 0.23849,7.25934 1.18386,0.65001 3.36607,-2.5198 5.30111,-4.27697 1.55818,-1.41494 4.54398,1.53929 4.54604,0.006 z"
+ style="display:inline;opacity:0.75;fill:#7c7c7c;fill-opacity:1;stroke:none;filter:url(#filter14706-3)"
+ transform="matrix(-0.09596121,-0.95700905,-0.77476232,0.11853409,398.90188,493.24449)" />
+ <path
+ inkscape:label="Head Shadow"
+ transform="matrix(1.1522137,0,0,1.1522137,-163.02721,-72.199565)"
+ style="display:inline;fill:#7c7c7c;fill-opacity:1;stroke:none;filter:url(#filter15133-1)"
+ d="m 277.9604,90.856536 c -2.22486,1.569973 -1.25289,3.530477 -0.29604,5.497884 0.95685,1.967407 -2.10429,7.63969 -2.13651,7.88434 -0.0322,0.24465 6.02534,-2.8754 7.67611,-4.901967 1.94956,-2.393373 6.87703,3.237917 6.60851,2.381167 0.002,-1.53312 -9.62721,-12.431397 -11.85207,-10.861424 z"
+ id="path28767-3"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="zzzscz" />
+ <path
+ transform="translate(160,-57.362183)"
+ sodipodi:nodetypes="asassa"
+ inkscape:connector-curvature="0"
+ id="path4596-3"
+ d="m 16.687345,165.86218 c -2.16217,1.96937 1.01359,4.92767 2.51966,8.40429 0.93626,2.16126 3.52677,5.20509 6.03244,4.7175 1.8848,-0.36677 3.05427,-3.07936 2.87588,-4.99121 -0.34416,-3.68852 -3.45669,-4.55256 -5.7172,-5.81949 -1.79139,-1.00401 -4.19258,-3.69391 -5.71078,-2.31109 z"
+ style="display:inline;fill:#838384;fill-opacity:1;stroke:none;filter:url(#filter15185-2)"
+ inkscape:label="Neck Shadow" />
+ <path
+ sodipodi:nodetypes="aaaasssaaaaaaaaaaa"
+ inkscape:connector-curvature="0"
+ d="m -28.632498,172.60136 c 1.702936,4.93775 5.13035,9.15199 9.185848,12.44354 1.348656,1.0946 2.782167,2.10442 4.366233,2.817 1.584067,0.71257 3.331648,1.11945 5.062377,0.97245 1.6949733,-0.14396 3.3074706,-0.80936 4.788721,-1.64575 1.4812505,-0.8364 2.8560173,-1.84688 4.29298914,-2.75725 2.46262056,-1.56015 5.09983966,-2.82139 7.65715996,-4.22092 3.0824622,-1.68692 6.0695999,-3.59014 8.6646899,-5.95927 1.187948,-1.08451 2.295748,-2.26795 3.607519,-3.19888 1.31177,-0.93094 2.882987,-1.60572 4.487811,-1.49651 1.203853,0.0819 2.332908,0.59386 3.51249,0.84794 0.589792,0.12704 1.200784,0.18932 1.797215,0.0984 0.596431,-0.0909 1.179727,-0.34439 1.597895,-0.77928 0.512367,-0.53286 0.736406,-1.29981 0.709607,-2.03855 -0.0268,-0.73874 -0.284448,-1.45303 -0.628853,-2.10712 -0.68881,-1.30819 -1.734547,-2.43513 -2.200224,-3.83833 -0.414395,-1.24867 -0.330451,-2.59887 -0.293929,-3.91401 0.03652,-1.31513 0.0075,-2.68902 -0.598601,-3.85671 -0.461591,-0.88922 -1.236126,-1.59525 -2.12164,-2.06391 -0.885513,-0.46867 -1.878578,-0.71001 -2.876081,-0.80365 -1.995007,-0.18727 -3.993929,0.19997 -5.994489,0.31349 -2.655817,0.1507 -5.321957,-0.18176 -7.9772499,-0.0221 -3.3112912,0.1991 -6.5570138,1.16053 -9.87428,1.16645 -3.7859765,0.007 -7.5681223,-1.23192 -11.3075401,-0.63996 -1.60458,0.25401 -3.134778,0.8376 -4.675685,1.35219 -1.540906,0.5146 -3.132742,0.96724 -4.757133,0.94371 -1.844198,-0.0267 -3.629272,-0.66537 -5.468985,-0.79666 -0.919856,-0.0656 -1.86247,-1.8e-4 -2.726086,0.32326 -0.863615,0.32344 -1.644513,0.92357 -2.068349,1.7426 -0.242869,0.46932 -0.363194,0.99683 -0.385859,1.52479 -0.02266,0.52795 0.05026,1.05702 0.177828,1.56983 0.255132,1.02563 0.724233,1.98285 1.109821,2.96688 1.392508,3.55373 1.692361,7.44806 2.93678,11.05632"
+ id="path28461-8-7"
+ style="display:inline;fill:#000000;fill-opacity:0.25882353;stroke:none;filter:url(#filter30479-2)"
+ transform="translate(138.99984,-49.362181)"
+ inkscape:label="Lower Beak Shadow"
+ clip-path="url(#clipPath816)" />
+ <path
+ transform="matrix(1,0,0,0.92110599,159.99984,-43.254675)"
+ sodipodi:nodetypes="caaaaaaaasssaaaaaac"
+ inkscape:connector-curvature="0"
+ d="m -54.3809,165.4735 c 3.308481,2.21892 6.276719,4.94413 8.76949,8.0515 2.313244,2.88358 4.281072,6.1543 7.29931,8.28886 2.132561,1.50819 4.694875,2.3578 7.29406,2.61606 3.051509,0.3032 6.139761,-0.18685 9.08171,-1.05205 2.72664,-0.80188 5.363225,-1.92931 7.78216,-3.4214 4.5982326,-2.83636 8.4392136,-6.99279 13.51002,-8.85709 1.1070251,-0.407 2.2592345,-0.69817 3.3265087,-1.20024 1.0672741,-0.50208 2.071356,-1.25404 2.5810913,-2.31768 0.489979,-1.02241 0.4709637,-2.20249 0.63053,-3.32496 0.1707072,-1.20085 0.5537633,-2.36184 0.7638732,-3.55642 0.2101099,-1.19458 0.2351735,-2.47234 -0.2814032,-3.56975 -0.4277722,-0.90876 -1.2053869,-1.6278 -2.0998754,-2.08466 -0.8944886,-0.45686 -1.9010816,-0.6644 -2.9042801,-0.71362 -2.00639693,-0.0985 -3.9875479,0.41519 -5.9880545,0.59766 -2.649555,0.24167 -5.3179008,-0.0991 -7.97725,-0.019 -3.308278,0.0996 -6.568191,0.84884 -9.87428,1.00503 -3.771652,0.17818 -7.534056,-0.41751 -11.30754,-0.55139 -1.632251,-0.0579 -3.2754,-0.0286 -4.884302,0.25254 -1.608902,0.28112 -3.188197,0.82168 -4.548518,1.72563 -1.319979,0.87714 -2.396737,2.06728 -3.606567,3.09101 -0.604916,0.51187 -1.247757,0.98508 -1.953748,1.34495 -0.705991,0.35987 -1.478799,0.60451 -2.270305,0.64257 -0.40728,0.0196 -0.818345,-0.0152 -1.2213,0.0472 -0.676172,0.10463 -1.303709,0.49355 -1.698284,1.05254 -0.394576,0.55899 -0.550896,1.28053 -0.423046,1.9527 v 1e-5"
+ id="path28461-84-3"
+ style="display:inline;opacity:0.3;fill:#000000;fill-opacity:1;stroke:none;filter:url(#filter30475-4)"
+ inkscape:label="Upper Beak Shadow"
+ clip-path="url(#clipPath820)" />
+ </g>
+ <g
+ id="g481"
+ inkscape:label="Arms"
+ style="display:inline">
+ <path
+ inkscape:label="Right Arm"
+ style="display:inline;fill:#020204;fill-opacity:1;stroke:none"
+ id="path29705-5-0"
+ d="m 45.10134,224.44951 c 6.084796,4.89463 9.945575,12.28597 11.14096,20.00305 0.932288,6.01859 0.326343,12.19496 -1.030694,18.13223 -1.357038,5.93726 -3.452212,11.67615 -5.531716,17.40051 -0.829244,2.2827 -1.662186,4.5871 -1.991686,6.9933 -0.3295,2.40621 -0.120849,4.95426 1.039536,7.08777 1.330223,2.44578 3.812954,4.13021 6.48184,4.92292 2.634941,0.78263 5.481042,0.76618 8.148186,0.10153 2.667145,-0.66466 7.157372,-1.52591 9.328374,-3.21179 5.515551,-4.28306 6.82474,-11.71935 8.13188,-18.24961 1.363195,-6.8103 0.69637,-13.95038 -0.50149,-20.83014 -1.63342,-9.38136 -4.38172,-18.55105 -7.87991,-27.40773 -2.573144,-6.51466 -5.8695,-12.74244 -9.79135,-18.52016 -3.847635,-5.66839 -9.306853,-10.09303 -13.34018,-15.63502 -1.401311,-1.92547 -3.086675,-3.93159 -4.463417,-5.87845 -2.968456,-4.19771 -2.296208,-3.41451 -4.137707,-6.13155 -1.336068,-1.9713 -3.445653,-2.6487 -5.665474,-3.51096 -2.219821,-0.86226 -4.71266,-1.11623 -7.005012,-0.47108 -3.021449,0.85034 -5.489775,3.23113 -6.802912,6.08208 -1.313136,2.85095 -1.54284,6.12313 -1.031948,9.2201 0.659095,3.99536 2.486278,7.70258 4.54639,11.18873 2.326679,3.93724 5.021993,7.72707 8.53596,10.65407 3.667149,3.05459 8.101532,5.06875 11.82037,8.0602"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="aaaaassssaaaasssscssssc"
+ transform="translate(160,-57.362183)" />
+ <path
+ inkscape:label="Left Arm"
+ sodipodi:nodetypes="cczzzzzzzzzzzzszzzzzcssscc"
+ inkscape:connector-curvature="0"
+ id="path14967-1-0"
+ d="m -69.527091,194.03592 c -7.073089,8.03686 -14.352222,15.81627 -18.345769,24.50506 -1.976249,4.41329 -2.910774,9.20725 -4.26498,13.84932 -1.537901,5.27176 -3.626089,10.3703 -5.97071,15.33612 -2.16496,4.58531 -4.54982,9.06291 -6.93891,13.53553 -1.7382,3.25409 -3.50514,6.58104 -4.10782,10.22071 -0.47628,2.87632 -0.1985,5.84423 0.53375,8.66626 0.73225,2.82202 1.90965,5.5106 3.23776,8.10601 5.667249,11.07504 14.170032,20.62168 24.24176,27.92472 4.570626,3.31418 9.466691,6.18109 14.60245,8.52595 2.782468,1.27041 5.713552,2.40436 8.771859,2.45744 1.529154,0.0265 3.074104,-0.22544 4.47434,-0.84055 1.400236,-0.6151 2.650677,-1.60373 3.482541,-2.88709 1.022778,-1.5779 1.369917,-3.53829 1.164614,-5.40743 -0.205303,-1.86914 -0.934843,-3.65294 -1.913244,-5.25873 -2.389971,-3.92251 -6.165196,-6.76055 -9.79642,-9.57343 -7.840549,-6.07358 -15.424654,-12.48039 -22.68212,-19.23996 -2.049117,-1.90854 -4.098407,-3.87759 -5.53019,-6.28412 -1.394295,-2.34352 -2.147602,-5.01376 -2.65783,-7.69253 -1.399719,-7.34873 -1.040921,-15.08286 1.45958,-22.13343 0.978222,-2.75826 2.271183,-5.39201 3.51815,-8.03965 2.161326,-4.58906 4.207248,-9.26564 7.04933,-13.46723 3.537978,-5.23037 8.267489,-9.66049 11.15147,-15.27803 2.434229,-4.74149 3.419942,-10.07236 4.36185,-15.31831 0.736933,-4.10434 2.150416,-8.12437 2.869234,-12.23193 -1.406111,2.66567 -5.937961,7.04283 -8.710695,10.5253 z"
+ style="display:inline;fill:#020204;fill-opacity:1;stroke:none"
+ transform="translate(160,-57.362183)" />
+ </g>
+ <g
+ style="display:inline"
+ id="g514"
+ inkscape:label="Arm Shadows">
+ <path
+ inkscape:label="Right Arm Shadow"
+ sodipodi:nodetypes="cssssssccsssscc"
+ transform="matrix(1.0180731,0,0,1,-105.25547,-58.362183)"
+ inkscape:connector-curvature="0"
+ clip-path="url(#clipPath504)"
+ id="path29705-9"
+ d="m 290.78125,216.01843 c 0.48482,0.46774 0.98091,0.94261 1.5,1.375 3.66715,3.05459 5.61879,6.48526 9.33763,9.47671 6.0848,4.89463 12.25895,13.34358 13.45434,21.06066 0.93229,6.01859 -0.30093,9.28947 -1.80468,16.3878 -1.50374,7.09832 -5.76944,17.14832 -8.07376,23.99211 -0.9189,2.7291 1.86121,1.60306 1.49609,4.4798 -0.17944,1.41384 -0.19766,2.84238 -0.0346,4.25917 0.0227,-0.27104 0.0388,-0.5525 0.0693,-0.82194 0.44281,-3.92274 1.62331,-7.69479 2.90878,-11.39514 2.47416,-7.12214 5.31434,-14.10109 7.27196,-21.40792 1.95763,-7.30683 1.74028,-12.56443 0.71875,-18.84375 -1.28459,-7.89637 -5.79703,-15.18702 -12.1875,-20 -4.51852,-3.40313 -9.84688,-5.58465 -14.65625,-8.5625 z"
+ style="display:inline;fill:#838384;fill-opacity:1;stroke:none;filter:url(#filter14666-9)" />
+ <path
+ inkscape:label="Left Arm Shadow"
+ transform="translate(-150.00016,-58.362183)"
+ clip-path="url(#clipPath508)"
+ sodipodi:nodetypes="csczzczzsccczc"
+ inkscape:connector-curvature="0"
+ id="path15007-0"
+ d="m 232.33049,224.26954 c -2.32126,2.13749 -4.33307,4.61051 -5.95338,7.31823 -2.66801,4.45854 -4.23905,9.46835 -6.17809,14.28882 -1.44362,3.58886 -3.12519,7.19314 -3.32662,11.05622 -0.10346,1.98418 0.19056,3.96588 0.25671,5.95165 0.0662,1.98578 -0.11756,4.05108 -1.08967,5.78391 -0.81338,1.44988 -2.1659,2.58902 -3.73298,3.14402 2.11547,0.70686 4.00453,2.07546 5.33532,3.86539 1.11451,1.49902 1.82759,3.25366 2.79609,4.85091 0.78716,1.29818 1.75335,2.50124 2.94285,3.44463 1.1895,0.94339 2.61141,1.61974 4.11726,1.81293 2.06623,0.26508 4.23574,-0.42815 5.76541,-1.84225 -1.92538,-18.0357 -0.16195,-36.4572 5.15013,-53.80008 0.33544,-1.09515 0.68725,-2.19828 0.77034,-3.34063 0.0831,-1.14235 -0.12896,-2.34792 -0.82414,-3.2582 -0.37014,-0.48467 -0.86838,-0.87059 -1.4302,-1.1078 -0.56182,-0.2372 -1.18588,-0.32512 -1.79136,-0.25236 -0.60549,0.0727 -1.19096,0.306 -1.68059,0.66954 -0.48964,0.36355 -0.88227,0.85651 -1.12706,1.41507 h -2e-5"
+ style="display:inline;opacity:0.95;fill:#7c7c7c;fill-opacity:1;stroke:none;filter:url(#filter15053-7)" />
+ </g>
+ <g
+ style="display:inline"
+ id="g457"
+ inkscape:label="Feet">
+ <g
+ inkscape:label="Right Foot"
+ id="g444">
+ <path
+ sodipodi:nodetypes="aaaaaaaaaaaaaaaaaaaaaaaacscaa"
+ inkscape:connector-curvature="0"
+ id="path14483-7-4"
+ d="m 86.05618,328.13191 c -0.45671,1.54919 -1.15216,3.04585 -2.04962,4.41089 -1.9805,3.01237 -4.85449,5.2794 -7.7268,7.37015 -4.89889,3.56589 -10.00272,6.83785 -14.56318,10.89029 -3.05551,2.71513 -5.84112,5.75937 -8.42278,8.96491 -2.20789,2.74146 -4.2839,5.61929 -6.80089,8.05729 -2.53964,2.45993 -5.53049,4.44676 -8.75187,5.52933 -3.91796,1.31667 -8.05795,1.2533 -11.83233,0.25938 -2.64475,-0.69647 -5.22365,-1.90703 -6.86216,-4.0969 -1.6448,-2.19828 -2.1777,-5.15218 -2.36802,-8.05186 -0.33651,-5.12875 0.25967,-10.36956 0.8034,-15.57549 0.45167,-4.3257 0.86825,-8.65475 1.03855,-12.97167 0.30984,-7.85202 -0.19668,-15.63586 -1.23186,-23.27336 -0.17336,-1.27909 -0.36202,-2.56818 -0.25656,-3.88562 0.10519,-1.31741 0.5354,-2.6883 1.43615,-3.70529 0.83202,-0.93937 1.9928,-1.49566 3.15071,-1.76892 1.15792,-0.27325 2.32983,-0.2898 3.4927,-0.3215 2.74018,-0.0747 5.49647,-0.24039 8.19521,0.006 1.70282,0.15559 3.37264,0.47459 5.07313,0.6427 2.83764,0.28052 5.78134,0.13286 8.62739,-0.72369 3.06405,-0.92215 5.98631,-2.65158 9.09944,-3.12742 1.27732,-0.19521 2.559,-0.17251 3.80104,0.006 1.26746,0.18218 2.5284,0.54175 3.50235,1.33598 0.74019,0.60362 1.28194,1.43281 1.70583,2.31655 0.63144,1.31651 1.01921,2.77031 1.24115,4.2613 0.19663,1.32082 0.26639,2.68017 0.59789,3.95737 0.54536,2.10123 1.78089,3.88647 3.26736,5.33963 1.48646,1.45316 3.22499,2.60245 4.96058,3.73413 1.72907,1.12743 3.46794,2.24684 5.31472,3.17629 0.86675,0.43621 1.75751,0.83074 2.58612,1.33307 0.8286,0.50234 1.60071,1.12107 2.15093,1.93549 0.74634,1.10471 1.04569,2.55273 0.82168,3.97474 l 2e-5,-1e-5"
+ style="display:inline;opacity:0.2;fill:url(#linearGradient18840);fill-opacity:1;stroke:none;filter:url(#filter15115-3)"
+ inkscape:transform-center-x="-21.512644"
+ inkscape:transform-center-y="-26.916075"
+ transform="matrix(1.0268828,0,0,1,157.6864,-58.362183)"
+ inkscape:label="Right Foot Shadow" />
+ <path
+ style="display:inline;fill:url(#linearGradient18842);fill-opacity:1;stroke:none"
+ d="m 263.18967,278.25167 c -2.6238,3.11482 -6.268,5.17039 -9.89648,7.01985 -6.1886,3.15437 -12.60169,5.92177 -18.41964,9.71654 -3.89802,2.54249 -7.4959,5.52671 -10.86016,8.74238 -2.87719,2.75012 -5.60582,5.68745 -8.83247,8.01771 -3.25567,2.35122 -7.01915,4.05426 -10.99061,4.6502 -4.83026,0.72481 -9.82134,-0.21289 -14.29898,-2.16416 -3.13754,-1.36728 -6.15569,-3.3229 -7.96301,-6.22931 -1.81425,-2.91754 -2.22807,-6.48813 -2.23266,-9.92375 -0.008,-6.07666 1.11824,-12.09004 2.17848,-18.07349 0.88097,-4.97177 1.71949,-9.95483 2.26013,-14.97502 0.98337,-9.13118 0.9763,-18.35278 0.3199,-27.51327 -0.10993,-1.53416 -0.23754,-3.0832 -0.008,-4.60412 0.22922,-1.52092 0.85475,-3.0367 2.02069,-4.03986 1.07696,-0.9266 2.52093,-1.33598 3.93947,-1.4145 1.41854,-0.0785 2.83404,0.14655 4.23982,0.35197 3.31254,0.48405 6.65159,0.8649 9.88917,1.71656 2.04284,0.53738 4.03315,1.25925 6.0722,1.81081 3.40258,0.92039 6.96639,1.36144 10.46739,0.95192 3.76917,-0.44089 7.42987,-1.85678 11.22363,-1.76474 1.55658,0.0378 3.1015,0.33171 4.58649,0.79985 1.51539,0.47772 3.00914,1.16182 4.12281,2.29512 0.84639,0.8613 1.43579,1.94539 1.87872,3.06879 0.65982,1.67352 1.01492,3.457 1.16703,5.24945 0.13475,1.58788 0.11343,3.19441 0.41433,4.75933 0.49503,2.57458 1.84746,4.92305 3.52848,6.93494 1.68102,2.01189 3.68982,3.72048 5.69641,5.40783 1.99908,1.68103 4.0106,3.35469 6.16708,4.82839 1.0121,0.69165 2.05642,1.33949 3.01736,2.10062 0.96094,0.76113 1.84466,1.6468 2.44543,2.71535 0.81492,1.44944 1.06377,3.2077 0.53758,4.87655 -0.5262,1.66885 -1.48162,3.27659 -2.67059,4.68806 z"
+ id="path14296-0"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="aaaaaaaaaaaaaaaaaaaaaaacscaascca"
+ clip-path="none"
+ inkscape:label="Right Foot" />
+ <path
+ style="display:inline;fill:#cd8907;fill-opacity:1;stroke:none;filter:url(#filter14416-8)"
+ d="m 512.89128,328.72435 c -0.61724,1.54745 -1.48971,2.99275 -2.57146,4.2598 -2.40248,2.814 -5.72921,4.65444 -9.03774,6.31099 -5.65305,2.83043 -11.50277,5.31761 -16.82133,8.73539 -3.55362,2.28361 -6.84076,4.96564 -9.9178,7.85959 -2.62917,2.47273 -5.12496,5.116 -8.06607,7.20809 -2.98093,2.12042 -6.41793,3.6468 -10.03693,4.18063 -4.40931,0.65041 -8.96019,-0.19314 -13.05822,-1.94562 -2.85719,-1.22185 -5.61733,-2.97002 -7.27205,-5.60029 -1.64629,-2.61688 -2.0313,-5.83002 -2.03893,-8.92166 -0.0135,-5.46467 1.01827,-10.87076 1.98945,-16.24846 0.80703,-4.46875 1.57531,-8.9482 2.06402,-13.46287 0.88853,-8.20825 0.8481,-16.49756 0.29214,-24.73502 -0.0931,-1.38017 -0.20023,-2.77381 0.0118,-4.14077 0.21204,-1.36695 0.77803,-2.72737 1.82595,-3.63036 0.9828,-0.84687 2.30304,-1.21795 3.5986,-1.28594 1.29556,-0.068 2.58744,0.14181 3.87096,0.3307 3.02315,0.4449 6.07241,0.77918 9.03106,1.54323 1.86541,0.48173 3.68372,1.13165 5.54531,1.62795 3.10947,0.82898 6.36227,1.22486 9.55911,0.8558 3.44127,-0.39728 6.78665,-1.67148 10.24974,-1.58654 1.42063,0.0348 2.83052,0.30037 4.1885,0.71908 1.38179,0.42605 2.74909,1.03446 3.76507,2.06337 0.76566,0.7754 1.29538,1.75352 1.7157,2.75891 0.62574,1.49674 1.03256,3.09742 1.06577,4.71936 0.0347,1.69374 -0.33552,3.39491 -0.10594,5.07338 0.18638,1.36264 0.7635,2.64802 1.50064,3.80912 0.73713,1.1611 1.634,2.2109 2.52251,3.26069 1.71726,2.02897 3.4393,4.09674 5.5931,5.65457 2.45218,1.77364 5.36188,2.81145 7.89508,4.46732 0.75511,0.49359 1.48596,1.05215 2.01814,1.78058 0.8972,1.22806 1.1387,2.90791 0.62379,4.33898 h 2e-5"
+ id="path14296-3-7"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="caaaaaaaaaaaaaaaaaaaaaasssaaac"
+ clip-path="url(#clipPath419)"
+ transform="translate(-250.00016,-58.362183)"
+ inkscape:label="Right Foot Hightlights" />
+ <path
+ style="display:inline;fill:#f5c021;fill-opacity:1;stroke:none;filter:url(#filter14432-2)"
+ d="m 508.79285,327.92545 c -0.60151,1.26455 -1.38215,2.44372 -2.31134,3.49133 -2.15335,2.42776 -5.06099,4.09917 -8.12349,5.1725 -5.04166,1.76698 -10.54565,2.00437 -15.49471,4.01618 -3.01615,1.22607 -5.73063,3.07339 -8.47914,4.81871 -2.22174,1.41082 -4.49246,2.76887 -6.93206,3.75622 -2.75548,1.1152 -5.68568,1.74047 -8.62582,2.17857 -1.87082,0.27876 -3.76259,0.48423 -5.65156,0.38704 -1.88898,-0.0972 -3.78418,-0.50735 -5.45127,-1.40092 -1.26399,-0.6775 -2.40126,-1.6529 -3.07596,-2.91839 -0.74956,-1.4059 -0.87959,-3.05603 -0.86243,-4.64917 0.0457,-4.24592 1.02557,-8.4458 0.99617,-12.69186 -0.0256,-3.69614 -0.81525,-7.34495 -1.04231,-11.03419 -0.43665,-7.09457 1.2047,-14.31322 -0.23989,-21.27287 -0.23125,-1.11413 -0.54212,-2.22686 -0.52701,-3.36463 0.008,-0.56889 0.0988,-1.14101 0.31541,-1.66709 0.21661,-0.52609 0.56289,-1.00508 1.02461,-1.33751 0.38878,-0.27992 0.85044,-0.45024 1.32336,-0.52677 0.47292,-0.0765 0.95748,-0.0616 1.43166,0.007 0.94836,0.13656 1.85188,0.48215 2.77546,0.73718 2.64193,0.72952 5.43254,0.71432 8.11748,1.26484 1.68527,0.34555 3.31679,0.91149 4.98436,1.33427 2.80028,0.70996 5.72013,1.0133 8.59212,0.70142 3.0885,-0.33539 6.10714,-1.37534 9.21289,-1.30034 1.27305,0.0307 2.53741,0.25005 3.76479,0.58936 1.22771,0.3394 2.45538,0.81951 3.38421,1.69114 0.6693,0.62809 1.15135,1.4307 1.54214,2.26121 0.5703,1.21202 0.96726,2.52854 0.95796,3.868 -0.005,0.6968 -0.11899,1.38758 -0.18672,2.0811 -0.0677,0.69352 -0.0878,1.40368 0.0914,2.07705 0.18009,0.67656 0.55415,1.2867 0.98269,1.84033 0.42854,0.55364 0.91471,1.06002 1.35819,1.60176 1.24195,1.51713 2.12961,3.28544 3.09724,4.99067 0.96764,1.70523 2.05232,3.39266 3.58036,4.62117 2.0797,1.67204 4.77798,2.34016 7.09642,3.66141 0.67877,0.38682 1.33676,0.84082 1.81399,1.45937 0.38231,0.49552 0.63762,1.0882 0.73509,1.70642 0.0975,0.61822 0.0369,1.26071 -0.1744,1.84982 h 9e-5"
+ id="path14296-3-1-9"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="caaaaaaaaaaaaaaaaaaaaaaaaaaaac"
+ transform="matrix(1.1429721,0,0,1.2323048,-318.99817,-135.48838)"
+ clip-path="url(#clipPath426)"
+ inkscape:label="Right Foot Brighter Highlights" />
+ <path
+ style="display:inline;fill:url(#linearGradient18844);fill-opacity:1;stroke:none;filter:url(#filter17044)"
+ d="m 187.30911,230.28754 c 3.27611,-0.88704 6.0662,1.5972 8.44228,3.47233 1.53527,1.30928 3.75348,0.97992 5.63665,1.04213 3.12069,-0.11321 6.22535,0.52281 9.34708,0.13577 6.14462,-0.51932 12.16847,-2.02966 18.34236,-2.28984 2.94948,-0.18579 6.25992,-0.35725 8.80813,1.36517 1.03299,0.7155 2.54702,3.74139 3.56647,2.60489 -0.42031,-3.17821 -2.77748,-6.25589 -5.93906,-7.10224 -2.47492,-0.38942 -4.98985,0.29134 -7.48947,0.0711 -7.42294,-0.17706 -14.79344,-1.5554 -22.23396,-1.16015 -5.17644,0.0448 -10.34657,-0.19501 -15.51546,-0.39662 -2.03057,-0.41489 -2.74674,1.38901 -3.8489,2.08085"
+ id="path16493"
+ inkscape:connector-curvature="0"
+ inkscape:label="Right Foot Brightest Highlights"
+ clip-path="url(#clipPath433)" />
+ </g>
+ <g
+ style="display:inline"
+ inkscape:label="Left Foot"
+ id="g411">
+ <path
+ sodipodi:nodetypes="ssssssssssssssss"
+ inkscape:connector-curvature="0"
+ id="path4635-1"
+ d="m 57.57688,222.65692 c 1.59929,-0.66295 3.3982,-0.78361 5.10074,-0.46963 1.70253,0.31398 3.31141,1.04948 4.74342,2.02239 2.86402,1.94583 4.98821,4.77774 7.02263,7.57952 4.67189,6.43406 9.16868,13.00227 13.24488,19.8293 3.30635,5.53766 6.34352,11.25685 10.16415,16.45304 2.49398,3.3919 5.3066,6.53947 7.813,9.92221 2.50639,3.38273 4.72794,7.05586 5.83931,11.11662 1.44411,5.27653 0.88463,11.09291 -1.62666,15.95302 -1.76663,3.41896 -4.47646,6.35228 -7.77242,8.33898 -3.29595,1.9867 -7.17064,3.01444 -11.01635,2.87021 -6.11413,-0.2293 -11.69944,-3.28515 -17.38362,-5.54906 -11.58097,-4.6125 -24.15978,-6.0594 -36.09666,-9.65174 -3.66859,-1.10404 -7.27582,-2.4107 -10.96988,-3.42629 -1.64125,-0.45122 -3.30866,-0.8482 -4.85875,-1.55144 -1.55008,-0.70325 -2.999548,-1.7491 -3.86171,-3.21675 -0.666391,-1.13439 -0.948386,-2.47002 -0.930187,-3.78554 0.0182,-1.31552 0.325889,-2.61453 0.773815,-3.85158 0.895851,-2.47409 2.343262,-4.71374 3.320162,-7.15696 1.59511,-3.98935 1.88169,-8.38839 1.66657,-12.67942 -0.21511,-4.29103 -0.91078,-8.54478 -1.20454,-12.83115 -0.13118,-1.91406 -0.18066,-3.85256 0.18479,-5.73598 0.36545,-1.88343 1.17577,-3.72459 2.55771,-5.05541 1.27406,-1.22693 2.96492,-1.95531 4.69643,-2.31651 1.73151,-0.3612 3.51533,-0.37747 5.28367,-0.33762 1.76833,0.0399 3.54067,0.13425 5.30351,-0.0106 1.76284,-0.14488 3.53347,-0.54055 5.06911,-1.41828 1.45996,-0.83447 2.65433,-2.0745 3.64374,-3.43424 0.9894,-1.35974 1.78909,-2.84573 2.60891,-4.31396 0.81983,-1.46823 1.66834,-2.93151 2.74157,-4.22611 1.07324,-1.2946 2.38923,-2.42304 3.94266,-3.06698"
+ style="display:inline;fill:url(#linearGradient18854);fill-opacity:1;stroke:none"
+ inkscape:label="Left Foot" />
+ <path
+ inkscape:connector-curvature="0"
+ style="display:inline;fill:#d99a03;fill-opacity:1;stroke:none;filter:url(#filter14148-8)"
+ d="m -99.89049,282.77885 c 1.45515,-0.58619 3.09423,-0.65064 4.62272,-0.30406 1.52849,0.34657 2.94957,1.09015 4.18836,2.047 2.47758,1.91371 4.19983,4.61379 5.85419,7.26861 3.97009,6.43306 7.8514,12.93381 11.5161,19.56716 2.7769,4.99324 5.4247,10.09253 8.83749,14.67892 2.26379,3.04154 4.84735,5.83139 7.15787,8.83675 2.31051,3.00536 4.37126,6.28214 5.3928,9.93347 1.31626,4.70582 0.78265,9.91001 -1.49541,14.23282 -1.63755,3.10576 -4.15203,5.74644 -7.18609,7.5126 -3.03406,1.76617 -6.57924,2.64923 -10.08655,2.48791 -5.59831,-0.25772 -10.71129,-3.05353 -15.98089,-4.95071 -10.10307,-3.66572 -21.05344,-4.15754 -31.41615,-7.02001 -3.71479,-1.00833 -7.33661,-2.35276 -11.06955,-3.29396 -1.65162,-0.41658 -3.33303,-0.75712 -4.90217,-1.4193 -1.56914,-0.66219 -3.04681,-1.68866 -3.89752,-3.16474 -0.63282,-1.09717 -0.88561,-2.38838 -0.84651,-3.65421 0.0391,-1.26584 0.35915,-2.51035 0.80992,-3.69386 0.90155,-2.36701 2.32025,-4.51029 3.22912,-6.87464 1.3787,-3.57425 1.54994,-7.50412 1.29397,-11.32617 -0.25597,-3.82205 -0.9211,-7.60949 -1.15326,-11.4336 -0.10374,-1.70896 -0.11933,-3.43899 0.22634,-5.11576 0.34564,-1.67677 1.07606,-3.30971 2.29486,-4.512 1.32089,-1.30904 3.14116,-2.02413 4.97727,-2.30427 1.83611,-0.28013 3.70601,-0.15808 5.55479,0.007 1.84877,0.16495 3.70503,0.37271 5.56113,0.26163 1.85609,-0.11109 3.7357,-0.56331 5.26886,-1.60694 1.39737,-0.94461 2.44584,-2.32407 3.24439,-3.79842 0.79856,-1.47435 1.3676,-3.05544 1.97644,-4.61656 0.60885,-1.56113 1.26672,-3.12189 2.2218,-4.50973 0.95509,-1.38785 2.23612,-2.60467 3.80568,-3.23473"
+ id="path13596-6"
+ clip-path="url(#clipPath401)"
+ transform="translate(160,-57.362183)"
+ inkscape:label="Left Foot Hightlights" />
+ <path
+ sodipodi:nodetypes="aaassssssssssssa"
+ inkscape:connector-curvature="0"
+ id="path4635-2-4"
+ d="m 138.7532,281.23531 c 1.40907,-0.7122 3.07062,-0.85812 4.61642,-0.53681 1.54579,0.3213 2.97823,1.09063 4.19572,2.09584 2.43498,2.0104 3.98026,4.8747 5.41939,7.68535 3.30494,6.45466 6.3834,13.04983 10.33791,19.12824 2.86875,4.40952 6.17965,8.51701 9.08155,12.90479 3.93557,5.95071 7.13582,12.4957 8.45639,19.50682 0.88822,4.71571 0.85899,9.80955 -1.37244,14.05779 -1.46869,2.79611 -3.85002,5.08988 -6.66339,6.52522 -2.81337,1.43533 -6.0432,2.01701 -9.18889,1.73441 -4.95423,-0.44507 -9.4537,-2.92512 -14.11748,-4.65475 -8.27469,-3.06879 -17.21809,-3.80325 -25.73435,-6.1187 -3.59196,-0.9766 -7.10999,-2.23521 -10.7509,-3.00963 -1.60616,-0.34163 -3.24361,-0.59125 -4.77675,-1.17943 -1.53313,-0.58818 -2.98623,-1.56578 -3.76965,-3.00894 -0.55139,-1.01573 -0.73656,-2.20459 -0.65433,-3.3574 0.0822,-1.15282 0.42084,-2.27486 0.86462,-3.34201 0.88755,-2.13429 2.20087,-4.08935 2.89035,-6.29561 1.01321,-3.24214 0.59672,-6.75718 -0.1636,-10.06777 -0.76031,-3.31059 -1.85667,-6.56127 -2.19448,-9.94121 -0.15046,-1.50543 -0.14681,-3.03993 0.19136,-4.51458 0.33818,-1.47465 1.02687,-2.89176 2.10855,-3.94955 1.3932,-1.36244 3.34372,-2.03997 5.28315,-2.22925 1.93944,-0.18927 3.89217,0.0689 5.82027,0.3512 1.9281,0.28227 3.86824,0.59003 5.8148,0.49986 1.94656,-0.0902 3.92849,-0.61081 5.45316,-1.82432 1.50782,-1.20011 2.45577,-2.98713 2.99939,-4.83599 0.54362,-1.84885 0.71997,-3.78191 0.94267,-5.69612 0.2227,-1.91421 0.50044,-3.8462 1.22971,-5.63 0.72928,-1.7838 1.96094,-3.42814 3.68085,-4.29745"
+ style="display:inline;fill:#f5bd0c;fill-opacity:1;stroke:none;filter:url(#filter14140-3)"
+ transform="matrix(1,0,0,0.98204782,-80.00015,-54.40321)"
+ clip-path="url(#clipPath391)"
+ inkscape:label="Left Foot Brighter Highlights" />
+ <path
+ id="path4792-8"
+ d="m 76.40702,237.60723 c 2.60622,4.71337 4.1958,10.12156 6.78125,14.875 2.3781,4.37223 5.08446,8.87379 7.5,12.90625 1.07545,1.79534 3.58329,4.5546 6.11895,8.83731 2.29771,3.88081 4.61826,9.29715 5.91658,11.1158 -0.74552,-2.12877 -2.27926,-7.84655 -4.10875,-11.92255 -1.70955,-3.80877 -3.69976,-5.98219 -4.92678,-8.03056 -2.41553,-4.03246 -5.01691,-7.65647 -7.5,-11.5 -3.42521,-5.30181 -6.03558,-11.23523 -9.78125,-16.28125 z"
+ style="display:inline;fill:url(#linearGradient18856);fill-opacity:1;stroke:none;filter:url(#filter14176-5)"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cssscsssc"
+ inkscape:label="Left Foot Brightest Highlights" />
+ </g>
+ </g>
+ <g
+ style="display:inline"
+ id="g526"
+ inkscape:label="Hand Shadows">
+ <path
+ inkscape:label="Hand Lower Shadow"
+ style="opacity:0.35;fill:url(#radialGradient18846);fill-opacity:1;stroke:none;filter:url(#filter14897-2)"
+ id="path29714-8-2"
+ d="m 231.4835,237.27796 c -0.56258,-1.10201 -1.58692,-1.92585 -2.72873,-2.40251 -1.1418,-0.47667 -2.39692,-0.6289 -3.63419,-0.61936 -2.47454,0.0191 -4.93459,0.66357 -7.39999,0.45028 -2.0826,-0.18018 -4.05875,-0.96301 -6.08982,-1.45739 -2.09726,-0.51049 -4.32188,-0.70969 -6.40465,-0.14297 -2.22595,0.60568 -4.18942,2.09362 -5.41915,4.04541 -1.08426,1.72091 -1.59909,3.75274 -1.76111,5.78028 -0.16202,2.02754 0.013,4.06578 0.21815,6.08941 0.1484,1.46363 0.31354,2.93079 0.66764,4.35866 0.35411,1.42788 0.90422,2.82282 1.76608,4.01506 1.24071,1.71632 3.08337,2.9395 5.06938,3.67497 3.24183,1.20053 6.9338,1.13597 10.13167,-0.17718 5.65885,-2.45702 10.44922,-6.8639 13.36879,-12.29857 1.04539,-1.94596 1.8574,-4.01932 2.38189,-6.16513 0.20845,-0.85283 0.37215,-1.72236 0.37977,-2.60027 0.008,-0.8779 -0.14655,-1.76875 -0.54573,-2.55069"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="aaaasssaccaa" />
+ <path
+ inkscape:label="Hand Upper Shadow"
+ transform="matrix(1,0,0,0.72292525,159.99984,20.396294)"
+ style="opacity:0.35;fill:url(#radialGradient18848);fill-opacity:1;stroke:none;filter:url(#filter14951-8)"
+ id="path29714-8-3-0"
+ d="m 71.483661,295.64014 c -0.562575,-1.10201 -1.586921,-1.92585 -2.728725,-2.40251 -1.141803,-0.47667 -2.396928,-0.6289 -3.634197,-0.61936 -2.474537,0.0191 -4.934587,0.66357 -7.399988,0.45028 -2.082597,-0.18018 -4.058745,-0.96301 -6.08982,-1.45739 -2.097262,-0.51049 -4.321879,-0.70969 -6.40465,-0.14297 -2.225952,0.60568 -4.189424,2.09362 -5.41915,4.04541 -1.084262,1.72091 -1.599093,3.75274 -1.76111,5.78028 -0.162016,2.02754 0.01297,4.06578 0.21815,6.08941 0.148398,1.46363 0.31354,2.93079 0.667645,4.35866 0.354105,1.42788 0.904219,2.82282 1.766075,4.01506 1.240713,1.71632 3.083374,2.9395 5.06938,3.67497 3.241832,1.20053 6.933796,1.13597 10.13167,-0.17718 5.658851,-2.45702 10.449216,-6.8639 13.36879,-12.29857 1.045394,-1.94596 1.857401,-4.01932 2.38189,-6.16513 0.208453,-0.85283 0.372151,-1.72236 0.379775,-2.60027 0.0076,-0.8779 -0.146556,-1.76875 -0.545735,-2.55069"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="aaaasssaccaa" />
+ </g>
+ <g
+ style="display:inline"
+ id="g545"
+ inkscape:label="Hand">
+ <path
+ inkscape:label="Hand"
+ transform="translate(159.99984,-58.362183)"
+ style="display:inline;fill:#020204;fill-opacity:1;stroke:none"
+ id="path29714-5-4"
+ d="m 76.1875,285.32775 c -0.405158,-1.10369 -1.118445,-2.08156 -1.990705,-2.86987 -0.872259,-0.78832 -1.900482,-1.39229 -2.982775,-1.85155 -2.164587,-0.91852 -4.520525,-1.26149 -6.83152,-1.69556 -2.179187,-0.40931 -4.34179,-0.90631 -6.52782,-1.27734 -2.27136,-0.38551 -4.617897,-0.63213 -6.8653,-0.1253 -1.965827,0.44333 -3.784499,1.45879 -5.271724,2.81864 -1.487225,1.35984 -2.649109,3.0564 -3.484986,4.89007 -1.472176,3.22952 -1.934512,6.86503 -1.65394,10.40316 0.208815,2.63325 0.875323,5.34594 2.60877,7.33912 1.400654,1.61052 3.387329,2.61526 5.43398,3.22092 3.525017,1.04316 7.366632,0.98822 10.86038,-0.1553 5.766894,-1.93113 10.875681,-5.77387 14.33034,-10.77903 1.138609,-1.64963 2.112174,-3.44809 2.5532,-5.4034 0.335973,-1.48955 0.348308,-3.08112 -0.1779,-4.51456"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="aaaasssaccaa" />
+ <path
+ inkscape:label="Hand Lower Hightlight"
+ transform="translate(-150.00016,-58.362183)"
+ clip-path="url(#clipPath533)"
+ inkscape:connector-curvature="0"
+ id="path29714-3-9-9"
+ d="m 362.21875,276.45593 c -0.54933,0.0306 -1.08144,0.0909 -1.625,0.1875 -3.46951,0.61686 -6.64705,2.80857 -8.4375,5.84375 -1.26396,2.14267 -1.83985,4.67634 -1.65625,7.15625 0.0732,-1.74163 0.52946,-3.44685 1.375,-4.96875 1.43442,-2.58185 4.03238,-4.52979 6.9375,-5.0625 1.78976,-0.32819 3.63182,-0.13095 5.4375,0.0937 1.73256,0.2156 3.48115,0.44287 5.1875,0.8125 2.64101,0.57209 5.25428,1.45135 7.46875,3 0.51646,0.36118 0.99955,0.76857 1.40625,1.25 0.40669,0.48143 0.72188,1.03792 0.84375,1.65625 0.17824,0.90428 -0.0794,1.85295 -0.53125,2.65625 -0.45189,0.8033 -1.06491,1.50665 -1.71875,2.15625 -0.52923,0.5258 -1.09482,1.03417 -1.65625,1.53125 2.559,-0.49571 5.15199,-1.19766 7.28125,-2.6875 0.89975,-0.62955 1.71523,-1.38464 2.25,-2.34375 0.53477,-0.95912 0.76245,-2.1212 0.5,-3.1875 -0.17714,-0.71971 -0.57137,-1.3824 -1.0625,-1.9375 -0.49114,-0.55511 -1.0805,-1.01217 -1.6875,-1.4375 -2.67877,-1.87701 -5.81493,-3.07854 -9.0625,-3.46875 -2.08149,-0.38286 -4.18122,-0.70597 -6.28125,-0.96875 -1.64344,-0.20564 -3.32077,-0.37313 -4.96875,-0.28125 z"
+ style="display:inline;fill:url(#radialGradient18850);fill-opacity:1;stroke:none;filter:url(#filter14812-5)" />
+ <path
+ inkscape:label="Hand Upper Highlight"
+ transform="translate(-150.00016,-58.36218)"
+ clip-path="url(#clipPath538)"
+ inkscape:connector-curvature="0"
+ id="path29714-3-9-3-8"
+ d="m 362.21875,276.45593 c -0.54933,0.0306 -1.08144,0.0909 -1.625,0.1875 -3.46951,0.61686 -6.64705,2.80857 -8.4375,5.84375 -1.26396,2.14267 -1.83985,4.67634 -1.65625,7.15625 0.0732,-1.74163 0.52946,-3.44685 1.375,-4.96875 1.43442,-2.58185 4.03238,-4.52979 6.9375,-5.0625 1.78976,-0.32819 3.63182,-0.13095 5.4375,0.0937 1.73256,0.2156 3.48115,0.44287 5.1875,0.8125 2.64101,0.57209 5.25428,1.45135 7.46875,3 0.51646,0.36118 0.99955,0.76857 1.40625,1.25 0.40669,0.48143 0.72188,1.03792 0.84375,1.65625 0.17824,0.90428 -0.0794,1.85295 -0.53125,2.65625 -0.45189,0.8033 -1.06491,1.50665 -1.71875,2.15625 -0.52923,0.5258 -1.09482,1.03417 -1.65625,1.53125 2.559,-0.49571 5.15199,-1.19766 7.28125,-2.6875 0.89975,-0.62955 1.71523,-1.38464 2.25,-2.34375 0.53477,-0.95912 0.76245,-2.1212 0.5,-3.1875 -0.17714,-0.71971 -0.57137,-1.3824 -1.0625,-1.9375 -0.49114,-0.55511 -1.0805,-1.01217 -1.6875,-1.4375 -2.67877,-1.87701 -5.81493,-3.07854 -9.0625,-3.46875 -2.08149,-0.38286 -4.18122,-0.70597 -6.28125,-0.96875 -1.64344,-0.20564 -3.32077,-0.37313 -4.96875,-0.28125 z"
+ style="display:inline;fill:url(#linearGradient18852);fill-opacity:1;stroke:none;filter:url(#filter14812-0-9)" />
+ </g>
+ <g
+ style="display:inline"
+ id="g777"
+ inkscape:label="Face">
+ <g
+ inkscape:label="Eyes"
+ id="g669">
+ <g
+ id="g643"
+ inkscape:label="Left Eye"
+ style="display:inline">
+ <path
+ inkscape:label="Left Eyeball"
+ transform="translate(138.99984,-49.362181)"
+ style="display:inline;fill:url(#radialGradient18806);fill-opacity:1;stroke:none"
+ d="m -24.767558,113.36218 c -1.780966,0.097 -3.484616,0.91899 -4.787852,2.1367 -1.303235,1.21771 -2.221372,2.81176 -2.786181,4.50357 -1.129618,3.38363 -0.87548,7.05177 -0.618697,10.60973 0.23251,3.22162 0.470404,6.50533 1.676785,9.50158 0.60319,1.49813 1.450246,2.91021 2.580338,4.06395 1.130092,1.15374 2.551736,2.04189 4.118297,2.43447 1.468838,0.36809 3.03816,0.29183 4.482783,-0.16209 1.444622,-0.45392 2.763916,-1.27887 3.846235,-2.33791 1.57904,-1.54507 2.643262,-3.5662 3.253449,-5.68947 0.610186,-2.12328 0.784157,-4.35155 0.752401,-6.56053 -0.03974,-2.76435 -0.400909,-5.53851 -1.265755,-8.16439 -0.864846,-2.62588 -2.245743,-5.10327 -4.172795,-7.08561 -0.933308,-0.96009 -1.997765,-1.80513 -3.198585,-2.39747 -1.200819,-0.59233 -2.543439,-0.92535 -3.880423,-0.85253"
+ id="path28795-9-5"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="zzzzzzz" />
+ <g
+ style="display:inline"
+ inkscape:label="Left Eye Pupil"
+ id="g605">
+ <path
+ sodipodi:nodetypes="aaaaaaaaa"
+ inkscape:connector-curvature="0"
+ d="m 159.93889,137.11161 c -0.37211,2.24574 -0.38563,4.60199 0.3864,6.74344 0.50979,1.41404 1.35041,2.69692 2.37218,3.79935 0.66903,0.72184 1.42824,1.37779 2.31576,1.80318 0.88752,0.42539 1.91578,0.60638 2.87035,0.36671 0.88113,-0.22123 1.65156,-0.78859 2.22013,-1.49715 0.56856,-0.70857 0.9476,-1.55295 1.2177,-2.42034 0.7974,-2.56075 0.66926,-5.36165 -0.12241,-7.92418 -0.5768,-1.86701 -1.53208,-3.66794 -3.02664,-4.9268 -0.71307,-0.60061 -1.54773,-1.07115 -2.45479,-1.28664 -0.90707,-0.2155 -1.88874,-0.16505 -2.73754,0.22063 -0.9423,0.42817 -1.67159,1.24304 -2.14907,2.16134 -0.47749,0.91829 -0.72288,1.93936 -0.89207,2.96046"
+ id="path29453-9"
+ style="fill:#020204;fill-opacity:1;stroke:none"
+ transform="translate(-50.00015,-58.362183)"
+ inkscape:label="Left Eye Pupil" />
+ <path
+ sodipodi:nodetypes="aaaaaaaaa"
+ inkscape:connector-curvature="0"
+ id="path29465-8"
+ d="m 114.68735,77.124997 c 0.24185,0.6337 1.05418,0.86381 1.5,1.375 0.43302,0.49651 0.88735,1.01055 1.125,1.625 0.4549,1.17616 -0.4488,2.91931 0.5,3.75 0.29782,0.26075 0.89472,0.26639 1.1875,0 1.14539,-1.04215 0.89094,-3.14433 0.4375,-4.625 -0.4115,-1.34371 -1.42747,-2.61637 -2.67923,-3.25512 -0.57882,-0.29536 -1.45077,-0.54089 -1.94577,-0.11988 -0.31898,0.2713 -0.27431,0.85878 -0.125,1.25 z"
+ style="fill:url(#linearGradient18820);fill-opacity:1;stroke:none;filter:url(#filter29493-2)"
+ inkscape:label="Left Eye Pupil Highlight" />
+ </g>
+ <path
+ inkscape:label="Left Eyelid"
+ sodipodi:nodetypes="aaaaaaaaaaaaa"
+ inkscape:connector-curvature="0"
+ id="path29551-9"
+ d="m 50.39208,129.52717 c 2.68537,-1.59933 5.95507,-1.97034 9.066699,-1.67565 3.111629,0.29468 6.125434,1.20847 9.141301,2.02921 2.211625,0.60188 4.451579,1.16149 6.525325,2.13777 2.073747,0.97627 3.99989,2.41568 5.141935,4.40296 0.183191,0.31877 0.345257,0.6497 0.539254,0.96201 0.193996,0.31232 0.42311,0.60867 0.716456,0.83031 0.293346,0.22164 0.656994,0.3643 1.024107,0.34424 0.183557,-0.01 0.365612,-0.0609 0.524176,-0.15388 0.158563,-0.093 0.292945,-0.22871 0.377987,-0.39169 0.09778,-0.18739 0.128079,-0.40446 0.117139,-0.61554 -0.01094,-0.21108 -0.06122,-0.41805 -0.117139,-0.62189 -0.755202,-2.75296 -2.53499,-5.08832 -3.88909,-7.6014 -0.8126,-1.5081 -1.476963,-3.09273 -2.2981,-4.5962 -2.81829,-5.16019 -7.443597,-9.21564 -12.701405,-11.84733 -5.257808,-2.6317 -11.127445,-3.89613 -16.997075,-4.23934 -6.801182,-0.39768 -13.619761,0.40945 -20.32932,1.59099 -2.908599,0.5122 -5.86079,1.11511 -8.435686,2.56156 -1.287447,0.72322 -2.467452,1.65662 -3.388474,2.81087 -0.921022,1.15425 -1.576477,2.53523 -1.78765,3.99673 -0.203522,1.40855 0.0088,2.86057 0.501301,4.19582 0.492484,1.33524 1.258246,2.5585 2.156537,3.66236 1.796584,2.20771 4.100665,3.93361 6.222432,5.83092 2.121308,1.8969 4.09001,3.99204 6.462948,5.56282 1.186469,0.78539 2.472664,1.43499 3.843385,1.81666 1.37072,0.38166 2.829918,0.48917 4.223827,0.20358 1.444987,-0.29606 2.782689,-1.005 3.953624,-1.90197 1.170934,-0.89697 2.186129,-1.98006 3.148417,-3.09793 1.924576,-2.23575 3.722539,-4.68648 6.257089,-6.19599"
+ style="display:inline;fill:url(#radialGradient18832);fill-opacity:1;stroke:none"
+ clip-path="url(#clipPath631)"
+ transform="translate(59.99984,-58.362183)" />
+ <path
+ sodipodi:nodetypes="ccccccccc"
+ inkscape:label="Left Eyebrow"
+ style="display:inline;fill:url(#linearGradient18818);fill-opacity:1;stroke:none;filter:url(#filter29447-1)"
+ d="m -38.437655,119.37798 c 2.5037,2.34533 4.36502,5.2397 5.625,8.30939 -0.550665,-3.38469 -1.423402,-6.10373 -3.625,-8.30939 -1.35129,-1.26581 -2.88639,-2.37775 -4.625,-3.1587 -1.52128,-0.68334 -3.213598,-1.10788 -4.180828,-1.12552 -0.96723,-0.0176 -1.2022,0.004 -1.40094,0.0134 -0.19874,0.009 -0.35739,0.0162 0.27185,0.0877 0.62924,0.0715 2.03368,0.45118 3.541104,1.12827 1.507424,0.6771 3.042524,1.78904 4.393814,3.05485 z"
+ id="path28795-9-2-2"
+ inkscape:connector-curvature="0"
+ transform="translate(160,-57.362183)" />
+ </g>
+ <g
+ id="g652"
+ inkscape:label="Right Eye">
+ <path
+ inkscape:label="Right Eyeball"
+ transform="translate(138.99984,-49.362181)"
+ style="display:inline;fill:url(#radialGradient18808);fill-opacity:1;stroke:none"
+ d="m 6.7500001,113.36218 c -2.780425,1.91023 -5.110569,4.57487 -6.24999996,7.75 -1.4360294,4.00163 -0.88583807,8.48071 0.49999996,12.5 1.4194877,4.11688 3.793788,8.04098 7.37932,10.51234 1.7927659,1.23567 3.8680909,2.08301 6.0304019,2.33859 2.162311,0.25558 4.409274,-0.0949 6.340278,-1.10093 2.353116,-1.22596 4.147816,-3.37278 5.262172,-5.78076 1.114356,-2.40798 1.588797,-5.0701 1.737828,-7.71924 0.189892,-3.37546 -0.140469,-6.80646 -1.25,-10 -1.205266,-3.46909 -3.390051,-6.67055 -6.472754,-8.6666 -1.541351,-0.99803 -3.291947,-1.68356 -5.110883,-1.93515 -1.818936,-0.25158 -3.704766,-0.0633 -5.4163629,0.60175 -0.9754713,0.37901 -1.8874384,0.9074 -2.75,1.5"
+ id="path28795-3"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="aaaaaaaa" />
+ <g
+ style="display:inline"
+ inkscape:label="Right Eye Pupil"
+ id="g613">
+ <path
+ sodipodi:nodetypes="zzzzzzz"
+ inkscape:connector-curvature="0"
+ d="m 302.16152,130.75695 c -1.04548,0.0749 -2.06437,0.4318 -2.95135,0.99028 -0.88699,0.55848 -1.64327,1.31521 -2.23701,2.17899 -1.18748,1.72757 -1.70894,3.84675 -1.793,5.94139 -0.0631,1.5723 0.11098,3.16512 0.63245,4.64977 0.52147,1.48465 1.40089,2.85877 2.61276,3.86251 1.24011,1.02713 2.81647,1.64364 4.42485,1.72094 1.60838,0.0773 3.23948,-0.38665 4.56105,-1.3066 1.05288,-0.73292 1.9021,-1.74168 2.50666,-2.87315 0.60455,-1.13148 0.96879,-2.38348 1.1353,-3.65549 0.29411,-2.24678 -0.0385,-4.59295 -1.07692,-6.60695 -1.03841,-2.01401 -2.80051,-3.67269 -4.92674,-4.45606 -0.92093,-0.3393 -1.90911,-0.51576 -2.88805,-0.44563"
+ id="path28879-6"
+ style="fill:#020204;fill-opacity:1;stroke:none"
+ transform="translate(-150.00015,-58.362183)"
+ inkscape:label="Right Eye Pupil" />
+ <path
+ sodipodi:nodetypes="aaaaaa"
+ inkscape:connector-curvature="0"
+ id="path28891-5"
+ d="m 154.6561,79.249997 c -0.86591,0.34162 -2.23657,0.12677 -2.61622,0.9767 -0.22493,0.50357 0.0927,1.33252 0.60343,1.54061 1.03244,0.42063 2.63193,-0.34111 3.04876,-1.3751 0.18104,-0.4491 -0.0934,-1.16101 -0.53974,-1.34865 -0.16515,-0.0694 -0.32958,0.14069 -0.49623,0.20644 z"
+ style="fill:#141413;fill-opacity:1;stroke:none;filter:url(#filter28927-8)"
+ inkscape:label="Right Eye Pupil Lower Hightlight" />
+ <path
+ sodipodi:nodetypes="sazas"
+ inkscape:connector-curvature="0"
+ id="path28887-6"
+ d="m 158.62485,81.499997 c 1.16113,-1.16113 -0.82613,-4.23951 -2.375,-5.5 -1.12184,-0.91296 -4.39063,-1.86851 -4.25,-0.875 0.14063,0.99351 1.60988,2.26647 2.59467,3.23744 1.21236,1.19533 3.47886,3.68903 4.03033,3.13756 z"
+ style="fill:url(#linearGradient18812);fill-opacity:1;stroke:none;filter:url(#filter28949-8)"
+ inkscape:label="Right Eye Pupil Upper Highlight" />
+ </g>
+ <path
+ inkscape:label="Right Eyelid"
+ style="display:inline;fill:url(#linearGradient18814);fill-opacity:1;stroke:none"
+ d="m 75.25,132.48718 c 2.383746,-1.98014 5.160908,-3.48474 8.12154,-4.40008 6.085564,-1.88147 12.999677,-1.13706 18.37846,2.27508 1.85708,1.17808 3.51244,2.64192 5.23935,4.00367 1.72691,1.36176 3.56115,2.64122 5.63565,3.37133 1.12086,0.39448 2.31818,0.62345 3.5,0.5 1.06768,-0.11153 2.09928,-0.5118 2.98444,-1.11915 0.88515,-0.60736 1.62476,-1.4185 2.18064,-2.33686 1.11176,-1.8367 1.47001,-4.06457 1.27839,-6.20298 -0.38324,-4.27682 -2.79556,-8.05341 -4.81847,-11.84101 -0.63342,-1.18598 -1.23642,-2.39333 -2,-3.5 -2.34327,-3.39616 -6.07312,-5.63562 -9.98498,-6.94794 -3.91185,-1.31233 -8.046257,-1.78639 -12.14002,-2.30206 -1.825736,-0.22998 -3.673032,-0.46998 -5.5,-0.25 -2.099797,0.25283 -4.075978,1.101 -6.125,1.625 -0.972648,0.24874 -1.963662,0.42478 -2.928029,0.70391 -0.964366,0.27912 -1.912957,0.669 -2.696971,1.29609 -1.144817,0.91567 -1.865056,2.29088 -2.176504,3.72338 -0.311449,1.4325 -0.240517,2.92444 -0.01161,4.37242 0.457809,2.89597 1.540886,5.72407 1.438116,8.6542 -0.07058,2.01227 -0.702287,3.98797 -0.625,6 0.02266,0.58987 0.106588,1.17738 0.25,1.75"
+ id="path28972-5"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cssssssssssssc"
+ clip-path="url(#clipPath622)"
+ transform="translate(59.99984,-58.362183)" />
+ <path
+ inkscape:label="Right Eyebrow"
+ style="display:inline;fill:url(#linearGradient18816);fill-opacity:1;stroke:none;filter:url(#filter29350-1)"
+ d="m -4.593905,113.125 c -0.47695,0.59985 -0.90798,1.25231 -1.25,1.96875 2.14641,0.46247 4.19906,1.34575 6.03125,2.5625 3.54507,2.35427 6.237,5.7965 8.125,9.625 0.44076,-0.48807 0.84202,-1.01184 1.1875,-1.59375 -1.89751,-3.9878 -4.64382,-7.5949 -8.3125,-10.03125 -1.76231,-1.17035 -3.72465,-2.05369 -5.78125,-2.53125 z"
+ id="path28795-92-2"
+ inkscape:connector-curvature="0"
+ transform="translate(160,-57.362183)" />
+ </g>
+ </g>
+ <g
+ inkscape:label="Beak"
+ id="g735"
+ style="display:inline">
+ <path
+ sodipodi:nodetypes="zzzzzzzz"
+ inkscape:connector-curvature="0"
+ id="path28849-2"
+ d="m -16.39938,136.86218 c 1.767366,-1.98662 2.976192,-4.41053 4.674142,-6.45679 0.848975,-1.02314 1.8284211,-1.95533 2.9816817,-2.61681 1.1532606,-0.66147 2.4919769,-1.0411 3.8165164,-0.9264 1.4744902,0.12769 2.8545436,0.86228 3.93407466,1.87472 1.07953103,1.01244 1.8797683,2.29027 2.51864534,3.62528 0.6117397,1.27831 1.0977635,2.64027 1.97912,3.75 0.940326,1.18398 2.2595274,1.99218 3.4510909,2.92288 0.5957818,0.46535 1.167477,0.96911 1.6383978,1.5605 0.4709209,0.59139 0.8396117,1.27595 0.9909913,2.01662 0.1537234,0.75214 0.077153,1.54506 -0.1851792,2.26653 -0.2623326,0.72148 -0.7066964,1.37174 -1.2596263,1.90429 -1.1058598,1.0651 -2.6135811,1.63957 -4.1338116,1.85466 -3.04046123,0.43016 -6.1146629,-0.47583 -9.1842429,-0.39142 -3.1068902,0.0854 -6.1415551,1.18366 -9.2475441,1.07007 -1.552994,-0.0568 -3.128063,-0.43624 -4.404252,-1.32301 -0.638094,-0.44339 -1.194008,-1.01055 -1.595831,-1.6756 -0.401824,-0.66505 -0.646688,-1.42894 -0.672863,-2.20552 -0.02497,-0.74092 0.148043,-1.48088 0.444075,-2.16055 0.296033,-0.67967 0.712681,-1.30175 1.182123,-1.87552 0.938883,-1.14753 2.086993,-2.10617 3.072492,-3.21393"
+ style="display:inline;fill:url(#radialGradient18810);fill-opacity:1;stroke:none"
+ transform="translate(138.99984,-49.362181)"
+ inkscape:label="Under Beak" />
+ <g
+ style="display:inline"
+ id="g712"
+ inkscape:label="Beak">
+ <path
+ inkscape:label="Lower Beak"
+ transform="translate(59.99984,-58.362183)"
+ style="display:inline;fill:url(#radialGradient18822);fill-opacity:1;stroke:none"
+ id="path28461-2"
+ d="m 45.751683,165.03156 c 0.06146,0.29539 0.172509,0.58039 0.32709,0.8395 0.265683,0.44533 0.653935,0.80631 1.073256,1.1114 0.419321,0.30509 0.872799,0.55947 1.311827,0.83545 2.333646,1.46695 4.235362,3.52905 5.924734,5.70709 2.266543,2.92217 4.271913,6.16491 7.29931,8.28886 2.137781,1.49982 4.695713,2.35501 7.29406,2.61606 3.051317,0.30656 6.139876,-0.18595 9.08171,-1.05205 2.726384,-0.80267 5.363099,-1.92956 7.78216,-3.4214 4.598507,-2.83591 8.439249,-6.99271 13.51002,-8.85709 1.10702,-0.40702 2.25922,-0.69819 3.3265,-1.20026 1.06727,-0.50207 2.07136,-1.25403 2.5811,-2.31766 0.48998,-1.02241 0.47097,-2.20249 0.63053,-3.32496 0.1707,-1.20084 0.55374,-2.36184 0.76385,-3.55642 0.2101,-1.19458 0.23517,-2.47233 -0.28138,-3.56975 -0.42775,-0.90878 -1.20535,-1.62786 -2.09983,-2.08475 -0.89448,-0.4569 -1.90108,-0.66447 -2.90429,-0.71372 -2.006415,-0.0985 -3.987581,0.41519 -5.98809,0.59785 -2.649534,0.24193 -5.317874,-0.0982 -7.97725,-0.019 -3.308296,0.0986 -6.568402,0.84468 -9.87428,1.00503 -3.771518,0.18294 -7.534685,-0.39851 -11.30754,-0.55139 -1.634066,-0.0662 -3.279962,-0.0512 -4.891819,0.22531 -1.611857,0.27654 -3.195234,0.82363 -4.541001,1.75286 -1.311442,0.90553 -2.355916,2.14022 -3.560189,3.18405 -0.602137,0.52192 -1.249488,0.99929 -1.966273,1.3474 -0.716785,0.34812 -1.50749,0.564 -2.304158,0.54708 -0.409601,-0.009 -0.830861,-0.0769 -1.2213,0.0472 -0.243915,0.0775 -0.460478,0.22705 -0.643532,0.40593 -0.183054,0.17888 -0.334787,0.38705 -0.477798,0.59931 -0.332537,0.49356 -0.623066,1.01541 -0.867417,1.55807"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="csssaaaaaaasssaaaaaac" />
+ <path
+ inkscape:label="Lower Beak Highlight"
+ transform="translate(59.99984,-58.362183)"
+ style="display:inline;fill:#d9b30d;fill-opacity:1;stroke:none;filter:url(#filter28502-8)"
+ id="path28487-2"
+ d="m 60.55673,169.09742 c -0.386462,1.59605 -0.151992,3.33408 0.64359,4.77067 0.795582,1.43659 2.144391,2.5575 3.70231,3.07676 1.977755,0.65919 4.206575,0.33635 6.05477,-0.62813 1.071362,-0.55909 2.051171,-1.34588 2.669379,-2.38425 0.309105,-0.51918 0.523981,-1.09707 0.604518,-1.69591 0.08054,-0.59884 0.02471,-1.2185 -0.184887,-1.78522 -0.229715,-0.62112 -0.640261,-1.16849 -1.146053,-1.59596 -0.505791,-0.42748 -1.104668,-0.7378 -1.733436,-0.94568 -1.257537,-0.41575 -2.610936,-0.42405 -3.933891,-0.36051 -2.005209,0.0963 -4.002918,0.34837 -5.9692,0.75318"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="caaaac" />
+ <path
+ inkscape:label="Upper Beak Undershadow"
+ transform="translate(59.99984,-58.362183)"
+ style="display:inline;fill:#604405;fill-opacity:1;stroke:none;filter:url(#filter15145-6)"
+ d="m 54.0663,156.67992 c -1.338955,0.79147 -2.628584,1.66369 -3.8975,2.56317 -0.656705,0.46551 -1.334168,0.96895 -1.68056,1.69557 -0.245501,0.51498 -0.301768,1.09903 -0.309586,1.66948 -0.0078,0.57045 0.02884,1.14399 -0.04618,1.70954 -0.05124,0.38625 -0.154326,0.76619 -0.171537,1.15544 -0.0086,0.19463 0.0047,0.39145 0.05602,0.57938 0.05134,0.18793 0.141902,0.36704 0.275482,0.50885 0.172556,0.18318 0.407931,0.29591 0.64865,0.36931 0.240719,0.0734 0.490638,0.1112 0.73562,0.16878 1.174662,0.27611 2.196917,0.99676 3.094125,1.80366 0.897208,0.8069 1.702883,1.71487 2.638865,2.47645 2.537255,2.06449 5.890478,2.91872 9.161088,2.97254 3.27061,0.0538 6.504204,-0.63066 9.695302,-1.34946 2.506322,-0.56456 5.014978,-1.15472 7.42544,-2.04356 3.702752,-1.36537 7.140748,-3.43167 10.11819,-6.02193 1.349968,-1.17442 2.617219,-2.46364 4.13251,-3.41525 1.340926,-0.84211 2.842622,-1.39796 4.206331,-2.20265 0.12193,-0.072 0.24321,-0.14621 0.35213,-0.23665 0.10893,-0.0905 0.20574,-0.1981 0.26892,-0.3248 0.10917,-0.21894 0.10937,-0.48123 0.0389,-0.71552 -0.0704,-0.23429 -0.20633,-0.44389 -0.36,-0.63425 -0.16999,-0.21058 -0.36336,-0.40158 -0.568951,-0.57756 -1.424379,-1.21921 -3.356756,-1.66245 -5.22581,-1.81067 -1.869053,-0.14822 -3.760672,-0.0434 -5.60996,-0.35238 -1.738647,-0.29048 -3.393268,-0.93881 -5.07175,-1.4773 -1.761942,-0.56527 -3.562776,-1.01251 -5.38903,-1.31044 -4.294756,-0.70063 -8.71732,-0.56641 -12.97748,0.32063 -4.057685,0.84488 -7.971287,2.37056 -11.53927,4.47962"
+ id="path27476-7-8"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="ssssssssssssssssaaas" />
+ <path
+ inkscape:label="Upper Beak"
+ transform="translate(59.99984,-58.362183)"
+ style="display:inline;fill:url(#linearGradient18824);fill-opacity:1;stroke:none"
+ d="m 53.63941,152.15408 c -1.929391,1.2986 -3.666135,2.88291 -5.13602,4.68523 -0.840698,1.03083 -1.603727,2.15084 -2.02709,3.41185 -0.332996,0.99185 -0.446478,2.04153 -0.65633,3.06652 -0.07861,0.38398 -0.171386,0.76923 -0.169741,1.16118 8.22e-4,0.19597 0.02568,0.39281 0.08646,0.57912 0.06079,0.18631 0.15831,0.36204 0.294069,0.50337 0.224679,0.23391 0.540409,0.36101 0.858102,0.42632 0.317692,0.0653 0.643798,0.0751 0.966058,0.11177 1.454637,0.16535 2.794463,0.87199 4.000333,1.70216 1.205869,0.83017 2.317112,1.79543 3.554437,2.57795 2.733893,1.72899 5.994554,2.49829 9.226902,2.62285 3.232347,0.12456 6.457354,-0.36641 9.629488,-0.99977 2.520903,-0.50334 5.033924,-1.10072 7.42544,-2.04356 3.662411,-1.44389 6.963507,-3.66693 10.11819,-6.02193 1.43301,-1.06976 2.84598,-2.17318 4.13251,-3.41525 0.43668,-0.42159 0.859162,-0.85947 1.327567,-1.24551 0.468404,-0.38603 0.988159,-0.72177 1.565973,-0.90766 0.880766,-0.28336 1.835622,-0.20203 2.748192,-0.0495 0.68732,0.11488 1.376,0.26902 2.07229,0.24128 0.34815,-0.0139 0.69661,-0.0742 1.02006,-0.2037 0.32345,-0.12954 0.62155,-0.33028 0.8433,-0.59903 0.29139,-0.35317 0.43996,-0.81445 0.4416,-1.2723 0.002,-0.45786 -0.1387,-0.91095 -0.37105,-1.30548 -0.4647,-0.78905 -1.26825,-1.32311 -2.10504,-1.69503 -1.14614,-0.50941 -2.3863,-0.76136 -3.605512,-1.05573 -3.745289,-0.90427 -7.384752,-2.24056 -10.83577,-3.95385 -1.715597,-0.85173 -3.383551,-1.79555 -5.07175,-2.70037 -1.735567,-0.93021 -3.504569,-1.82415 -5.38903,-2.39536 -4.21332,-1.27713 -8.818528,-0.85829 -12.97748,0.58609 -4.619909,1.60447 -8.797447,4.46312 -11.96616,8.18832 v 2e-5"
+ id="path27476-4"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cssssssssssssssssaaasc" />
+ <path
+ inkscape:label="Upper Beak Highlight"
+ transform="translate(59.99984,-58.362183)"
+ style="display:inline;fill:#f6da4a;fill-opacity:1;stroke:none;filter:url(#filter14963-7)"
+ d="m 83.23853,153.07989 c -0.226496,-0.28623 -0.551139,-0.48799 -0.901294,-0.59103 -0.350155,-0.10304 -0.724669,-0.1104 -1.084432,-0.0488 -0.719527,0.12322 -1.364496,0.51049 -1.965744,0.9245 -1.708552,1.17648 -3.218864,2.62822 -4.53731,4.22977 -1.745223,2.11996 -3.18499,4.57171 -3.66755,7.27489 -0.08131,0.45547 -0.135106,0.92132 -0.07821,1.38049 0.0569,0.45916 0.232792,0.91479 0.558708,1.24319 0.286171,0.28835 0.675727,0.46425 1.077847,0.52203 0.40212,0.0578 0.815901,0.002 1.200757,-0.12836 0.769713,-0.26019 1.408987,-0.79942 2.014436,-1.34126 3.335973,-2.98548 6.352522,-6.56776 7.55957,-10.87877 0.121128,-0.43261 0.224012,-0.87566 0.221233,-1.3249 -0.0028,-0.44924 -0.119237,-0.90947 -0.398013,-1.26176"
+ id="path28357-7"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="zzzzzzzz" />
+ <g
+ style="display:inline"
+ inkscape:label="Nostrils"
+ id="g681">
+ <path
+ sodipodi:nodetypes="aaaaaaa"
+ inkscape:connector-curvature="0"
+ id="path28396-7"
+ d="m 135.25114,88.527667 c 0.23129,0.7424 1.42778,0.61935 2.11906,0.97542 0.60659,0.31244 1.09447,0.99723 1.77651,1.01692 0.65093,0.0188 1.66398,-0.22542 1.74866,-0.87109 0.11187,-0.85303 -1.13379,-1.39511 -1.93536,-1.70762 -1.03148,-0.40216 -2.35301,-0.6062 -3.3206,-0.0682 -0.22173,0.12328 -0.46373,0.41238 -0.38827,0.65458 z"
+ style="display:inline;opacity:0.8;fill:url(#radialGradient18826);fill-opacity:1;stroke:none;filter:url(#filter15177-1)"
+ inkscape:label="Right Nostril" />
+ <path
+ sodipodi:nodetypes="asssa"
+ inkscape:connector-curvature="0"
+ id="path28398-7"
+ d="m 123.82694,88.107827 c -0.88816,-0.28854 -2.35748,1.27746 -1.87806,2.07886 0.13167,0.22009 0.53491,0.49916 0.80641,0.34992 0.40925,-0.22497 0.74404,-1.02958 1.18746,-1.34496 0.29608,-0.21058 0.22974,-0.97156 -0.11581,-1.08382 z"
+ style="display:inline;opacity:0.8;fill:url(#radialGradient18828);fill-opacity:1;stroke:none;filter:url(#filter15173-0)"
+ inkscape:label="Left Nostril" />
+ </g>
+ <path
+ clip-path="url(#clipPath697)"
+ inkscape:label="Beak Side Highlight"
+ inkscape:connector-curvature="0"
+ id="path28570-8"
+ d="m 245.90496,158.28406 a 2.608083,2.328125 0 0 1 -2.60809,2.32812 2.608083,2.328125 0 0 1 -2.60808,-2.32812 2.608083,2.328125 0 0 1 2.60808,-2.32813 2.608083,2.328125 0 0 1 2.60809,2.32813 z"
+ style="color:#000000;display:inline;overflow:visible;visibility:visible;fill:url(#linearGradient18830);fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:0.25;marker:none;filter:url(#filter28584-4);enable-background:accumulate"
+ transform="matrix(1.0956223,0,-0.17017853,1.5181314,-76.24447,-140.47964)" />
+ </g>
+ </g>
+ </g>
+ </g>
+ </g>
+</svg>
diff --git a/Documentation/index.rst b/Documentation/index.rst
index 062cf88c2216..8f9be0e658b4 100644
--- a/Documentation/index.rst
+++ b/Documentation/index.rst
@@ -136,8 +136,8 @@ needed).
misc-devices/index
scheduler/index
mhi/index
- tty/index
peci/index
+ hte/index
Architecture-agnostic documentation
-----------------------------------
diff --git a/Documentation/input/input-programming.rst b/Documentation/input/input-programming.rst
index 2638dce69764..c9264814c7aa 100644
--- a/Documentation/input/input-programming.rst
+++ b/Documentation/input/input-programming.rst
@@ -85,15 +85,15 @@ accepted by this input device. Our example device can only generate EV_KEY
type events, and from those only BTN_0 event code. Thus we only set these
two bits. We could have used::
- set_bit(EV_KEY, button_dev.evbit);
- set_bit(BTN_0, button_dev.keybit);
+ set_bit(EV_KEY, button_dev->evbit);
+ set_bit(BTN_0, button_dev->keybit);
as well, but with more than single bits the first approach tends to be
shorter.
Then the example driver registers the input device structure by calling::
- input_register_device(&button_dev);
+ input_register_device(button_dev);
This adds the button_dev structure to linked lists of the input driver and
calls device handler modules _connect functions to tell them a new input
diff --git a/Documentation/leds/leds-qcom-lpg.rst b/Documentation/leds/leds-qcom-lpg.rst
new file mode 100644
index 000000000000..de7ceead9337
--- /dev/null
+++ b/Documentation/leds/leds-qcom-lpg.rst
@@ -0,0 +1,78 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==============================
+Kernel driver for Qualcomm LPG
+==============================
+
+Description
+-----------
+
+The Qualcomm LPG can be found in a variety of Qualcomm PMICs and consists of a
+number of PWM channels, a programmable pattern lookup table and a RGB LED
+current sink.
+
+To facilitate the various use cases, the LPG channels can be exposed as
+individual LEDs, grouped together as RGB LEDs or otherwise be accessed as PWM
+channels. The output of each PWM channel is routed to other hardware
+blocks, such as the RGB current sink, GPIO pins etc.
+
+The each PWM channel can operate with a period between 27us and 384 seconds and
+has a 9 bit resolution of the duty cycle.
+
+In order to provide support for status notifications with the CPU subsystem in
+deeper idle states the LPG provides pattern support. This consists of a shared
+lookup table of brightness values and per channel properties to select the
+range within the table to use, the rate and if the pattern should repeat.
+
+The pattern for a channel can be programmed using the "pattern" trigger, using
+the hw_pattern attribute.
+
+/sys/class/leds/<led>/hw_pattern
+--------------------------------
+
+Specify a hardware pattern for a Qualcomm LPG LED.
+
+The pattern is a series of brightness and hold-time pairs, with the hold-time
+expressed in milliseconds. The hold time is a property of the pattern and must
+therefor be identical for each element in the pattern (except for the pauses
+described below). As the LPG hardware is not able to perform the linear
+transitions expected by the leds-trigger-pattern format, each entry in the
+pattern must be followed a zero-length entry of the same brightness.
+
+Simple pattern::
+
+ "255 500 255 0 0 500 0 0"
+
+ ^
+ |
+ 255 +----+ +----+
+ | | | | ...
+ 0 | +----+ +----
+ +---------------------->
+ 0 5 10 15 time (100ms)
+
+The LPG supports specifying a longer hold-time for the first and last element
+in the pattern, the so called "low pause" and "high pause".
+
+Low-pause pattern::
+
+ "255 1000 255 0 0 500 0 0 255 500 255 0 0 500 0 0"
+
+ ^
+ |
+ 255 +--------+ +----+ +----+ +--------+
+ | | | | | | | | ...
+ 0 | +----+ +----+ +----+ +----
+ +----------------------------->
+ 0 5 10 15 20 25 time (100ms)
+
+Similarily, the last entry can be stretched by using a higher hold-time on the
+last entry.
+
+In order to save space in the shared lookup table the LPG supports "ping-pong"
+mode, in which case each run through the pattern is performed by first running
+the pattern forward, then backwards. This mode is automatically used by the
+driver when the given pattern is a palindrome. In this case the "high pause"
+denotes the wait time before the pattern is run in reverse and as such the
+specified hold-time of the middle item in the pattern is allowed to have a
+different hold-time.
diff --git a/Documentation/loongarch/features.rst b/Documentation/loongarch/features.rst
new file mode 100644
index 000000000000..ebacade3ea45
--- /dev/null
+++ b/Documentation/loongarch/features.rst
@@ -0,0 +1,3 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. kernel-feat:: $srctree/Documentation/features loongarch
diff --git a/Documentation/loongarch/index.rst b/Documentation/loongarch/index.rst
new file mode 100644
index 000000000000..aaba648db907
--- /dev/null
+++ b/Documentation/loongarch/index.rst
@@ -0,0 +1,21 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+======================
+LoongArch Architecture
+======================
+
+.. toctree::
+ :maxdepth: 2
+ :numbered:
+
+ introduction
+ irq-chip-model
+
+ features
+
+.. only:: subproject and html
+
+ Indices
+ =======
+
+ * :ref:`genindex`
diff --git a/Documentation/loongarch/introduction.rst b/Documentation/loongarch/introduction.rst
new file mode 100644
index 000000000000..2bf40ad370df
--- /dev/null
+++ b/Documentation/loongarch/introduction.rst
@@ -0,0 +1,387 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=========================
+Introduction to LoongArch
+=========================
+
+LoongArch is a new RISC ISA, which is a bit like MIPS or RISC-V. There are
+currently 3 variants: a reduced 32-bit version (LA32R), a standard 32-bit
+version (LA32S) and a 64-bit version (LA64). There are 4 privilege levels
+(PLVs) defined in LoongArch: PLV0~PLV3, from high to low. Kernel runs at PLV0
+while applications run at PLV3. This document introduces the registers, basic
+instruction set, virtual memory and some other topics of LoongArch.
+
+Registers
+=========
+
+LoongArch registers include general purpose registers (GPRs), floating point
+registers (FPRs), vector registers (VRs) and control status registers (CSRs)
+used in privileged mode (PLV0).
+
+GPRs
+----
+
+LoongArch has 32 GPRs ( ``$r0`` ~ ``$r31`` ); each one is 32-bit wide in LA32
+and 64-bit wide in LA64. ``$r0`` is hard-wired to zero, and the other registers
+are not architecturally special. (Except ``$r1``, which is hard-wired as the
+link register of the BL instruction.)
+
+The kernel uses a variant of the LoongArch register convention, as described in
+the LoongArch ELF psABI spec, in :ref:`References <loongarch-references>`:
+
+================= =============== =================== ============
+Name Alias Usage Preserved
+ across calls
+================= =============== =================== ============
+``$r0`` ``$zero`` Constant zero Unused
+``$r1`` ``$ra`` Return address No
+``$r2`` ``$tp`` TLS/Thread pointer Unused
+``$r3`` ``$sp`` Stack pointer Yes
+``$r4``-``$r11`` ``$a0``-``$a7`` Argument registers No
+``$r4``-``$r5`` ``$v0``-``$v1`` Return value No
+``$r12``-``$r20`` ``$t0``-``$t8`` Temp registers No
+``$r21`` ``$u0`` Percpu base address Unused
+``$r22`` ``$fp`` Frame pointer Yes
+``$r23``-``$r31`` ``$s0``-``$s8`` Static registers Yes
+================= =============== =================== ============
+
+Note: The register ``$r21`` is reserved in the ELF psABI, but used by the Linux
+kernel for storing the percpu base address. It normally has no ABI name, but is
+called ``$u0`` in the kernel. You may also see ``$v0`` or ``$v1`` in some old code,
+however they are deprecated aliases of ``$a0`` and ``$a1`` respectively.
+
+FPRs
+----
+
+LoongArch has 32 FPRs ( ``$f0`` ~ ``$f31`` ) when FPU is present. Each one is
+64-bit wide on the LA64 cores.
+
+The floating-point register convention is the same as described in the
+LoongArch ELF psABI spec:
+
+================= ================== =================== ============
+Name Alias Usage Preserved
+ across calls
+================= ================== =================== ============
+``$f0``-``$f7`` ``$fa0``-``$fa7`` Argument registers No
+``$f0``-``$f1`` ``$fv0``-``$fv1`` Return value No
+``$f8``-``$f23`` ``$ft0``-``$ft15`` Temp registers No
+``$f24``-``$f31`` ``$fs0``-``$fs7`` Static registers Yes
+================= ================== =================== ============
+
+Note: You may see ``$fv0`` or ``$fv1`` in some old code, however they are deprecated
+aliases of ``$fa0`` and ``$fa1`` respectively.
+
+VRs
+----
+
+There are currently 2 vector extensions to LoongArch:
+
+- LSX (Loongson SIMD eXtension) with 128-bit vectors,
+- LASX (Loongson Advanced SIMD eXtension) with 256-bit vectors.
+
+LSX brings ``$v0`` ~ ``$v31`` while LASX brings ``$x0`` ~ ``$x31`` as the vector
+registers.
+
+The VRs overlap with FPRs: for example, on a core implementing LSX and LASX,
+the lower 128 bits of ``$x0`` is shared with ``$v0``, and the lower 64 bits of
+``$v0`` is shared with ``$f0``; same with all other VRs.
+
+CSRs
+----
+
+CSRs can only be accessed from privileged mode (PLV0):
+
+================= ===================================== ==============
+Address Full Name Abbrev Name
+================= ===================================== ==============
+0x0 Current Mode Information CRMD
+0x1 Pre-exception Mode Information PRMD
+0x2 Extension Unit Enable EUEN
+0x3 Miscellaneous Control MISC
+0x4 Exception Configuration ECFG
+0x5 Exception Status ESTAT
+0x6 Exception Return Address ERA
+0x7 Bad (Faulting) Virtual Address BADV
+0x8 Bad (Faulting) Instruction Word BADI
+0xC Exception Entrypoint Address EENTRY
+0x10 TLB Index TLBIDX
+0x11 TLB Entry High-order Bits TLBEHI
+0x12 TLB Entry Low-order Bits 0 TLBELO0
+0x13 TLB Entry Low-order Bits 1 TLBELO1
+0x18 Address Space Identifier ASID
+0x19 Page Global Directory Address for PGDL
+ Lower-half Address Space
+0x1A Page Global Directory Address for PGDH
+ Higher-half Address Space
+0x1B Page Global Directory Address PGD
+0x1C Page Walk Control for Lower- PWCL
+ half Address Space
+0x1D Page Walk Control for Higher- PWCH
+ half Address Space
+0x1E STLB Page Size STLBPS
+0x1F Reduced Virtual Address Configuration RVACFG
+0x20 CPU Identifier CPUID
+0x21 Privileged Resource Configuration 1 PRCFG1
+0x22 Privileged Resource Configuration 2 PRCFG2
+0x23 Privileged Resource Configuration 3 PRCFG3
+0x30+n (0≤n≤15) Saved Data register SAVEn
+0x40 Timer Identifier TID
+0x41 Timer Configuration TCFG
+0x42 Timer Value TVAL
+0x43 Compensation of Timer Count CNTC
+0x44 Timer Interrupt Clearing TICLR
+0x60 LLBit Control LLBCTL
+0x80 Implementation-specific Control 1 IMPCTL1
+0x81 Implementation-specific Control 2 IMPCTL2
+0x88 TLB Refill Exception Entrypoint TLBRENTRY
+ Address
+0x89 TLB Refill Exception BAD (Faulting) TLBRBADV
+ Virtual Address
+0x8A TLB Refill Exception Return Address TLBRERA
+0x8B TLB Refill Exception Saved Data TLBRSAVE
+ Register
+0x8C TLB Refill Exception Entry Low-order TLBRELO0
+ Bits 0
+0x8D TLB Refill Exception Entry Low-order TLBRELO1
+ Bits 1
+0x8E TLB Refill Exception Entry High-order TLBEHI
+ Bits
+0x8F TLB Refill Exception Pre-exception TLBRPRMD
+ Mode Information
+0x90 Machine Error Control MERRCTL
+0x91 Machine Error Information 1 MERRINFO1
+0x92 Machine Error Information 2 MERRINFO2
+0x93 Machine Error Exception Entrypoint MERRENTRY
+ Address
+0x94 Machine Error Exception Return MERRERA
+ Address
+0x95 Machine Error Exception Saved Data MERRSAVE
+ Register
+0x98 Cache TAGs CTAG
+0x180+n (0≤n≤3) Direct Mapping Configuration Window n DMWn
+0x200+2n (0≤n≤31) Performance Monitor Configuration n PMCFGn
+0x201+2n (0≤n≤31) Performance Monitor Overall Counter n PMCNTn
+0x300 Memory Load/Store WatchPoint MWPC
+ Overall Control
+0x301 Memory Load/Store WatchPoint MWPS
+ Overall Status
+0x310+8n (0≤n≤7) Memory Load/Store WatchPoint n MWPnCFG1
+ Configuration 1
+0x311+8n (0≤n≤7) Memory Load/Store WatchPoint n MWPnCFG2
+ Configuration 2
+0x312+8n (0≤n≤7) Memory Load/Store WatchPoint n MWPnCFG3
+ Configuration 3
+0x313+8n (0≤n≤7) Memory Load/Store WatchPoint n MWPnCFG4
+ Configuration 4
+0x380 Instruction Fetch WatchPoint FWPC
+ Overall Control
+0x381 Instruction Fetch WatchPoint FWPS
+ Overall Status
+0x390+8n (0≤n≤7) Instruction Fetch WatchPoint n FWPnCFG1
+ Configuration 1
+0x391+8n (0≤n≤7) Instruction Fetch WatchPoint n FWPnCFG2
+ Configuration 2
+0x392+8n (0≤n≤7) Instruction Fetch WatchPoint n FWPnCFG3
+ Configuration 3
+0x393+8n (0≤n≤7) Instruction Fetch WatchPoint n FWPnCFG4
+ Configuration 4
+0x500 Debug Register DBG
+0x501 Debug Exception Return Address DERA
+0x502 Debug Exception Saved Data Register DSAVE
+================= ===================================== ==============
+
+ERA, TLBRERA, MERRERA and DERA are sometimes also known as EPC, TLBREPC, MERREPC
+and DEPC respectively.
+
+Basic Instruction Set
+=====================
+
+Instruction formats
+-------------------
+
+LoongArch instructions are 32 bits wide, belonging to 9 basic instruction
+formats (and variants of them):
+
+=========== ==========================
+Format name Composition
+=========== ==========================
+2R Opcode + Rj + Rd
+3R Opcode + Rk + Rj + Rd
+4R Opcode + Ra + Rk + Rj + Rd
+2RI8 Opcode + I8 + Rj + Rd
+2RI12 Opcode + I12 + Rj + Rd
+2RI14 Opcode + I14 + Rj + Rd
+2RI16 Opcode + I16 + Rj + Rd
+1RI21 Opcode + I21L + Rj + I21H
+I26 Opcode + I26L + I26H
+=========== ==========================
+
+Rd is the destination register operand, while Rj, Rk and Ra ("a" stands for
+"additional") are the source register operands. I8/I12/I16/I21/I26 are
+immediate operands of respective width. The longer I21 and I26 are stored
+in separate higher and lower parts in the instruction word, denoted by the "L"
+and "H" suffixes.
+
+List of Instructions
+--------------------
+
+For brevity, only instruction names (mnemonics) are listed here; please see the
+:ref:`References <loongarch-references>` for details.
+
+
+1. Arithmetic Instructions::
+
+ ADD.W SUB.W ADDI.W ADD.D SUB.D ADDI.D
+ SLT SLTU SLTI SLTUI
+ AND OR NOR XOR ANDN ORN ANDI ORI XORI
+ MUL.W MULH.W MULH.WU DIV.W DIV.WU MOD.W MOD.WU
+ MUL.D MULH.D MULH.DU DIV.D DIV.DU MOD.D MOD.DU
+ PCADDI PCADDU12I PCADDU18I
+ LU12I.W LU32I.D LU52I.D ADDU16I.D
+
+2. Bit-shift Instructions::
+
+ SLL.W SRL.W SRA.W ROTR.W SLLI.W SRLI.W SRAI.W ROTRI.W
+ SLL.D SRL.D SRA.D ROTR.D SLLI.D SRLI.D SRAI.D ROTRI.D
+
+3. Bit-manipulation Instructions::
+
+ EXT.W.B EXT.W.H CLO.W CLO.D SLZ.W CLZ.D CTO.W CTO.D CTZ.W CTZ.D
+ BYTEPICK.W BYTEPICK.D BSTRINS.W BSTRINS.D BSTRPICK.W BSTRPICK.D
+ REVB.2H REVB.4H REVB.2W REVB.D REVH.2W REVH.D BITREV.4B BITREV.8B BITREV.W BITREV.D
+ MASKEQZ MASKNEZ
+
+4. Branch Instructions::
+
+ BEQ BNE BLT BGE BLTU BGEU BEQZ BNEZ B BL JIRL
+
+5. Load/Store Instructions::
+
+ LD.B LD.BU LD.H LD.HU LD.W LD.WU LD.D ST.B ST.H ST.W ST.D
+ LDX.B LDX.BU LDX.H LDX.HU LDX.W LDX.WU LDX.D STX.B STX.H STX.W STX.D
+ LDPTR.W LDPTR.D STPTR.W STPTR.D
+ PRELD PRELDX
+
+6. Atomic Operation Instructions::
+
+ LL.W SC.W LL.D SC.D
+ AMSWAP.W AMSWAP.D AMADD.W AMADD.D AMAND.W AMAND.D AMOR.W AMOR.D AMXOR.W AMXOR.D
+ AMMAX.W AMMAX.D AMMIN.W AMMIN.D
+
+7. Barrier Instructions::
+
+ IBAR DBAR
+
+8. Special Instructions::
+
+ SYSCALL BREAK CPUCFG NOP IDLE ERTN(ERET) DBCL(DBGCALL) RDTIMEL.W RDTIMEH.W RDTIME.D
+ ASRTLE.D ASRTGT.D
+
+9. Privileged Instructions::
+
+ CSRRD CSRWR CSRXCHG
+ IOCSRRD.B IOCSRRD.H IOCSRRD.W IOCSRRD.D IOCSRWR.B IOCSRWR.H IOCSRWR.W IOCSRWR.D
+ CACOP TLBP(TLBSRCH) TLBRD TLBWR TLBFILL TLBCLR TLBFLUSH INVTLB LDDIR LDPTE
+
+Virtual Memory
+==============
+
+LoongArch supports direct-mapped virtual memory and page-mapped virtual memory.
+
+Direct-mapped virtual memory is configured by CSR.DMWn (n=0~3), it has a simple
+relationship between virtual address (VA) and physical address (PA)::
+
+ VA = PA + FixedOffset
+
+Page-mapped virtual memory has arbitrary relationship between VA and PA, which
+is recorded in TLB and page tables. LoongArch's TLB includes a fully-associative
+MTLB (Multiple Page Size TLB) and set-associative STLB (Single Page Size TLB).
+
+By default, the whole virtual address space of LA32 is configured like this:
+
+============ =========================== =============================
+Name Address Range Attributes
+============ =========================== =============================
+``UVRANGE`` ``0x00000000 - 0x7FFFFFFF`` Page-mapped, Cached, PLV0~3
+``KPRANGE0`` ``0x80000000 - 0x9FFFFFFF`` Direct-mapped, Uncached, PLV0
+``KPRANGE1`` ``0xA0000000 - 0xBFFFFFFF`` Direct-mapped, Cached, PLV0
+``KVRANGE`` ``0xC0000000 - 0xFFFFFFFF`` Page-mapped, Cached, PLV0
+============ =========================== =============================
+
+User mode (PLV3) can only access UVRANGE. For direct-mapped KPRANGE0 and
+KPRANGE1, PA is equal to VA with bit30~31 cleared. For example, the uncached
+direct-mapped VA of 0x00001000 is 0x80001000, and the cached direct-mapped
+VA of 0x00001000 is 0xA0001000.
+
+By default, the whole virtual address space of LA64 is configured like this:
+
+============ ====================== ======================================
+Name Address Range Attributes
+============ ====================== ======================================
+``XUVRANGE`` ``0x0000000000000000 - Page-mapped, Cached, PLV0~3
+ 0x3FFFFFFFFFFFFFFF``
+``XSPRANGE`` ``0x4000000000000000 - Direct-mapped, Cached / Uncached, PLV0
+ 0x7FFFFFFFFFFFFFFF``
+``XKPRANGE`` ``0x8000000000000000 - Direct-mapped, Cached / Uncached, PLV0
+ 0xBFFFFFFFFFFFFFFF``
+``XKVRANGE`` ``0xC000000000000000 - Page-mapped, Cached, PLV0
+ 0xFFFFFFFFFFFFFFFF``
+============ ====================== ======================================
+
+User mode (PLV3) can only access XUVRANGE. For direct-mapped XSPRANGE and
+XKPRANGE, PA is equal to VA with bits 60~63 cleared, and the cache attribute
+is configured by bits 60~61 in VA: 0 is for strongly-ordered uncached, 1 is
+for coherent cached, and 2 is for weakly-ordered uncached.
+
+Currently we only use XKPRANGE for direct mapping and XSPRANGE is reserved.
+
+To put this in action: the strongly-ordered uncached direct-mapped VA (in
+XKPRANGE) of 0x00000000_00001000 is 0x80000000_00001000, the coherent cached
+direct-mapped VA (in XKPRANGE) of 0x00000000_00001000 is 0x90000000_00001000,
+and the weakly-ordered uncached direct-mapped VA (in XKPRANGE) of 0x00000000
+_00001000 is 0xA0000000_00001000.
+
+Relationship of Loongson and LoongArch
+======================================
+
+LoongArch is a RISC ISA which is different from any other existing ones, while
+Loongson is a family of processors. Loongson includes 3 series: Loongson-1 is
+the 32-bit processor series, Loongson-2 is the low-end 64-bit processor series,
+and Loongson-3 is the high-end 64-bit processor series. Old Loongson is based on
+MIPS, while New Loongson is based on LoongArch. Take Loongson-3 as an example:
+Loongson-3A1000/3B1500/3A2000/3A3000/3A4000 are MIPS-compatible, while Loongson-
+3A5000 (and future revisions) are all based on LoongArch.
+
+.. _loongarch-references:
+
+References
+==========
+
+Official web site of Loongson Technology Corp. Ltd.:
+
+ http://www.loongson.cn/
+
+Developer web site of Loongson and LoongArch (Software and Documentation):
+
+ http://www.loongnix.cn/
+
+ https://github.com/loongson/
+
+ https://loongson.github.io/LoongArch-Documentation/
+
+Documentation of LoongArch ISA:
+
+ https://github.com/loongson/LoongArch-Documentation/releases/latest/download/LoongArch-Vol1-v1.00-CN.pdf (in Chinese)
+
+ https://github.com/loongson/LoongArch-Documentation/releases/latest/download/LoongArch-Vol1-v1.00-EN.pdf (in English)
+
+Documentation of LoongArch ELF psABI:
+
+ https://github.com/loongson/LoongArch-Documentation/releases/latest/download/LoongArch-ELF-ABI-v1.00-CN.pdf (in Chinese)
+
+ https://github.com/loongson/LoongArch-Documentation/releases/latest/download/LoongArch-ELF-ABI-v1.00-EN.pdf (in English)
+
+Linux kernel repository of Loongson and LoongArch:
+
+ https://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson.git
diff --git a/Documentation/loongarch/irq-chip-model.rst b/Documentation/loongarch/irq-chip-model.rst
new file mode 100644
index 000000000000..8d88f7ab2e5e
--- /dev/null
+++ b/Documentation/loongarch/irq-chip-model.rst
@@ -0,0 +1,156 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=======================================
+IRQ chip model (hierarchy) of LoongArch
+=======================================
+
+Currently, LoongArch based processors (e.g. Loongson-3A5000) can only work together
+with LS7A chipsets. The irq chips in LoongArch computers include CPUINTC (CPU Core
+Interrupt Controller), LIOINTC (Legacy I/O Interrupt Controller), EIOINTC (Extended
+I/O Interrupt Controller), HTVECINTC (Hyper-Transport Vector Interrupt Controller),
+PCH-PIC (Main Interrupt Controller in LS7A chipset), PCH-LPC (LPC Interrupt Controller
+in LS7A chipset) and PCH-MSI (MSI Interrupt Controller).
+
+CPUINTC is a per-core controller (in CPU), LIOINTC/EIOINTC/HTVECINTC are per-package
+controllers (in CPU), while PCH-PIC/PCH-LPC/PCH-MSI are controllers out of CPU (i.e.,
+in chipsets). These controllers (in other words, irqchips) are linked in a hierarchy,
+and there are two models of hierarchy (legacy model and extended model).
+
+Legacy IRQ model
+================
+
+In this model, IPI (Inter-Processor Interrupt) and CPU Local Timer interrupt go
+to CPUINTC directly, CPU UARTS interrupts go to LIOINTC, while all other devices
+interrupts go to PCH-PIC/PCH-LPC/PCH-MSI and gathered by HTVECINTC, and then go
+to LIOINTC, and then CPUINTC::
+
+ +-----+ +---------+ +-------+
+ | IPI | --> | CPUINTC | <-- | Timer |
+ +-----+ +---------+ +-------+
+ ^
+ |
+ +---------+ +-------+
+ | LIOINTC | <-- | UARTs |
+ +---------+ +-------+
+ ^
+ |
+ +-----------+
+ | HTVECINTC |
+ +-----------+
+ ^ ^
+ | |
+ +---------+ +---------+
+ | PCH-PIC | | PCH-MSI |
+ +---------+ +---------+
+ ^ ^ ^
+ | | |
+ +---------+ +---------+ +---------+
+ | PCH-LPC | | Devices | | Devices |
+ +---------+ +---------+ +---------+
+ ^
+ |
+ +---------+
+ | Devices |
+ +---------+
+
+Extended IRQ model
+==================
+
+In this model, IPI (Inter-Processor Interrupt) and CPU Local Timer interrupt go
+to CPUINTC directly, CPU UARTS interrupts go to LIOINTC, while all other devices
+interrupts go to PCH-PIC/PCH-LPC/PCH-MSI and gathered by EIOINTC, and then go to
+to CPUINTC directly::
+
+ +-----+ +---------+ +-------+
+ | IPI | --> | CPUINTC | <-- | Timer |
+ +-----+ +---------+ +-------+
+ ^ ^
+ | |
+ +---------+ +---------+ +-------+
+ | EIOINTC | | LIOINTC | <-- | UARTs |
+ +---------+ +---------+ +-------+
+ ^ ^
+ | |
+ +---------+ +---------+
+ | PCH-PIC | | PCH-MSI |
+ +---------+ +---------+
+ ^ ^ ^
+ | | |
+ +---------+ +---------+ +---------+
+ | PCH-LPC | | Devices | | Devices |
+ +---------+ +---------+ +---------+
+ ^
+ |
+ +---------+
+ | Devices |
+ +---------+
+
+ACPI-related definitions
+========================
+
+CPUINTC::
+
+ ACPI_MADT_TYPE_CORE_PIC;
+ struct acpi_madt_core_pic;
+ enum acpi_madt_core_pic_version;
+
+LIOINTC::
+
+ ACPI_MADT_TYPE_LIO_PIC;
+ struct acpi_madt_lio_pic;
+ enum acpi_madt_lio_pic_version;
+
+EIOINTC::
+
+ ACPI_MADT_TYPE_EIO_PIC;
+ struct acpi_madt_eio_pic;
+ enum acpi_madt_eio_pic_version;
+
+HTVECINTC::
+
+ ACPI_MADT_TYPE_HT_PIC;
+ struct acpi_madt_ht_pic;
+ enum acpi_madt_ht_pic_version;
+
+PCH-PIC::
+
+ ACPI_MADT_TYPE_BIO_PIC;
+ struct acpi_madt_bio_pic;
+ enum acpi_madt_bio_pic_version;
+
+PCH-MSI::
+
+ ACPI_MADT_TYPE_MSI_PIC;
+ struct acpi_madt_msi_pic;
+ enum acpi_madt_msi_pic_version;
+
+PCH-LPC::
+
+ ACPI_MADT_TYPE_LPC_PIC;
+ struct acpi_madt_lpc_pic;
+ enum acpi_madt_lpc_pic_version;
+
+References
+==========
+
+Documentation of Loongson-3A5000:
+
+ https://github.com/loongson/LoongArch-Documentation/releases/latest/download/Loongson-3A5000-usermanual-1.02-CN.pdf (in Chinese)
+
+ https://github.com/loongson/LoongArch-Documentation/releases/latest/download/Loongson-3A5000-usermanual-1.02-EN.pdf (in English)
+
+Documentation of Loongson's LS7A chipset:
+
+ https://github.com/loongson/LoongArch-Documentation/releases/latest/download/Loongson-7A1000-usermanual-2.00-CN.pdf (in Chinese)
+
+ https://github.com/loongson/LoongArch-Documentation/releases/latest/download/Loongson-7A1000-usermanual-2.00-EN.pdf (in English)
+
+Note: CPUINTC is CSR.ECFG/CSR.ESTAT and its interrupt controller described
+in Section 7.4 of "LoongArch Reference Manual, Vol 1"; LIOINTC is "Legacy I/O
+Interrupts" described in Section 11.1 of "Loongson 3A5000 Processor Reference
+Manual"; EIOINTC is "Extended I/O Interrupts" described in Section 11.2 of
+"Loongson 3A5000 Processor Reference Manual"; HTVECINTC is "HyperTransport
+Interrupts" described in Section 14.3 of "Loongson 3A5000 Processor Reference
+Manual"; PCH-PIC/PCH-MSI is "Interrupt Controller" described in Section 5 of
+"Loongson 7A1000 Bridge User Manual"; PCH-LPC is "LPC Interrupts" described in
+Section 24.3 of "Loongson 7A1000 Bridge User Manual".
diff --git a/Documentation/misc-devices/index.rst b/Documentation/misc-devices/index.rst
index 30ac58f81901..756be15a49a4 100644
--- a/Documentation/misc-devices/index.rst
+++ b/Documentation/misc-devices/index.rst
@@ -25,6 +25,7 @@ fit into other categories.
isl29003
lis3lv02d
max6875
+ oxsemi-tornado
pci-endpoint-test
spear-pcie-gadget
uacce
diff --git a/Documentation/misc-devices/oxsemi-tornado.rst b/Documentation/misc-devices/oxsemi-tornado.rst
new file mode 100644
index 000000000000..b33351bef6cf
--- /dev/null
+++ b/Documentation/misc-devices/oxsemi-tornado.rst
@@ -0,0 +1,131 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+====================================================================
+Notes on Oxford Semiconductor PCIe (Tornado) 950 serial port devices
+====================================================================
+
+Oxford Semiconductor PCIe (Tornado) 950 serial port devices are driven
+by a fixed 62.5MHz clock input derived from the 100MHz PCI Express clock.
+
+The baud rate produced by the baud generator is obtained from this input
+frequency by dividing it by the clock prescaler, which can be set to any
+value from 1 to 63.875 in increments of 0.125, and then the usual 16-bit
+divisor is used as with the original 8250, to divide the frequency by a
+value from 1 to 65535. Finally a programmable oversampling rate is used
+that can take any value from 4 to 16 to divide the frequency further and
+determine the actual baud rate used. Baud rates from 15625000bps down
+to 0.933bps can be obtained this way.
+
+By default the oversampling rate is set to 16 and the clock prescaler is
+set to 33.875, meaning that the frequency to be used as the reference
+for the usual 16-bit divisor is 115313.653, which is close enough to the
+frequency of 115200 used by the original 8250 for the same values to be
+used for the divisor to obtain the requested baud rates by software that
+is unaware of the extra clock controls available.
+
+The oversampling rate is programmed with the TCR register and the clock
+prescaler is programmed with the CPR/CPR2 register pair [OX200]_ [OX952]_
+[OX954]_ [OX958]_. To switch away from the default value of 33.875 for
+the prescaler the enhanced mode has to be explicitly enabled though, by
+setting bit 4 of the EFR. In that mode setting bit 7 in the MCR enables
+the prescaler or otherwise it is bypassed as if the value of 1 was used.
+Additionally writing any value to CPR clears CPR2 for compatibility with
+old software written for older conventional PCI Oxford Semiconductor
+devices that do not have the extra prescaler's 9th bit in CPR2, so the
+CPR/CPR2 register pair has to be programmed in the right order.
+
+By using these parameters rates from 15625000bps down to 1bps can be
+obtained, with either exact or highly-accurate actual bit rates for
+standard and many non-standard rates.
+
+Here are the figures for the standard and some non-standard baud rates
+(including those quoted in Oxford Semiconductor documentation), giving
+the requested rate (r), the actual rate yielded (a) and its deviation
+from the requested rate (d), and the values of the oversampling rate
+(tcr), the clock prescaler (cpr) and the divisor (div) produced by the
+new ``get_divisor`` handler:
+
+::
+
+ r: 15625000, a: 15625000.00, d: 0.0000%, tcr: 4, cpr: 1.000, div: 1
+ r: 12500000, a: 12500000.00, d: 0.0000%, tcr: 5, cpr: 1.000, div: 1
+ r: 10416666, a: 10416666.67, d: 0.0000%, tcr: 6, cpr: 1.000, div: 1
+ r: 8928571, a: 8928571.43, d: 0.0000%, tcr: 7, cpr: 1.000, div: 1
+ r: 7812500, a: 7812500.00, d: 0.0000%, tcr: 8, cpr: 1.000, div: 1
+ r: 4000000, a: 4000000.00, d: 0.0000%, tcr: 5, cpr: 3.125, div: 1
+ r: 3686400, a: 3676470.59, d: -0.2694%, tcr: 8, cpr: 2.125, div: 1
+ r: 3500000, a: 3496503.50, d: -0.0999%, tcr: 13, cpr: 1.375, div: 1
+ r: 3000000, a: 2976190.48, d: -0.7937%, tcr: 14, cpr: 1.500, div: 1
+ r: 2500000, a: 2500000.00, d: 0.0000%, tcr: 10, cpr: 2.500, div: 1
+ r: 2000000, a: 2000000.00, d: 0.0000%, tcr: 10, cpr: 3.125, div: 1
+ r: 1843200, a: 1838235.29, d: -0.2694%, tcr: 16, cpr: 2.125, div: 1
+ r: 1500000, a: 1492537.31, d: -0.4975%, tcr: 5, cpr: 8.375, div: 1
+ r: 1152000, a: 1152073.73, d: 0.0064%, tcr: 14, cpr: 3.875, div: 1
+ r: 921600, a: 919117.65, d: -0.2694%, tcr: 16, cpr: 2.125, div: 2
+ r: 576000, a: 576036.87, d: 0.0064%, tcr: 14, cpr: 3.875, div: 2
+ r: 460800, a: 460829.49, d: 0.0064%, tcr: 7, cpr: 3.875, div: 5
+ r: 230400, a: 230414.75, d: 0.0064%, tcr: 14, cpr: 3.875, div: 5
+ r: 115200, a: 115207.37, d: 0.0064%, tcr: 14, cpr: 1.250, div: 31
+ r: 57600, a: 57603.69, d: 0.0064%, tcr: 8, cpr: 3.875, div: 35
+ r: 38400, a: 38402.46, d: 0.0064%, tcr: 14, cpr: 3.875, div: 30
+ r: 19200, a: 19201.23, d: 0.0064%, tcr: 8, cpr: 3.875, div: 105
+ r: 9600, a: 9600.06, d: 0.0006%, tcr: 9, cpr: 1.125, div: 643
+ r: 4800, a: 4799.98, d: -0.0004%, tcr: 7, cpr: 2.875, div: 647
+ r: 2400, a: 2400.02, d: 0.0008%, tcr: 9, cpr: 2.250, div: 1286
+ r: 1200, a: 1200.00, d: 0.0000%, tcr: 14, cpr: 2.875, div: 1294
+ r: 300, a: 300.00, d: 0.0000%, tcr: 11, cpr: 2.625, div: 7215
+ r: 200, a: 200.00, d: 0.0000%, tcr: 16, cpr: 1.250, div: 15625
+ r: 150, a: 150.00, d: 0.0000%, tcr: 13, cpr: 2.250, div: 14245
+ r: 134, a: 134.00, d: 0.0000%, tcr: 11, cpr: 2.625, div: 16153
+ r: 110, a: 110.00, d: 0.0000%, tcr: 12, cpr: 1.000, div: 47348
+ r: 75, a: 75.00, d: 0.0000%, tcr: 4, cpr: 5.875, div: 35461
+ r: 50, a: 50.00, d: 0.0000%, tcr: 16, cpr: 1.250, div: 62500
+ r: 25, a: 25.00, d: 0.0000%, tcr: 16, cpr: 2.500, div: 62500
+ r: 4, a: 4.00, d: 0.0000%, tcr: 16, cpr: 20.000, div: 48828
+ r: 2, a: 2.00, d: 0.0000%, tcr: 16, cpr: 40.000, div: 48828
+ r: 1, a: 1.00, d: 0.0000%, tcr: 16, cpr: 63.875, div: 61154
+
+With the baud base set to 15625000 and the unsigned 16-bit UART_DIV_MAX
+limitation imposed by ``serial8250_get_baud_rate`` standard baud rates
+below 300bps become unavailable in the regular way, e.g. the rate of
+200bps requires the baud base to be divided by 78125 and that is beyond
+the unsigned 16-bit range. The historic spd_cust feature can still be
+used by encoding the values for, the prescaler, the oversampling rate
+and the clock divisor (DLM/DLL) as follows to obtain such rates if so
+required:
+
+::
+
+ 31 29 28 20 19 16 15 0
+ +-----+-----------------+-------+-------------------------------+
+ |0 0 0| CPR2:CPR | TCR | DLM:DLL |
+ +-----+-----------------+-------+-------------------------------+
+
+Use a value such encoded for the ``custom_divisor`` field along with the
+ASYNC_SPD_CUST flag set in the ``flags`` field in ``struct serial_struct``
+passed with the TIOCSSERIAL ioctl(2), such as with the setserial(8)
+utility and its ``divisor`` and ``spd_cust`` parameters, and then select
+the baud rate of 38400bps. Note that the value of 0 in TCR sets the
+oversampling rate to 16 and prescaler values below 1 in CPR2/CPR are
+clamped by the driver to 1.
+
+For example the value of 0x1f4004e2 will set CPR2/CPR, TCR and DLM/DLL
+respectively to 0x1f4, 0x0 and 0x04e2, choosing the prescaler value,
+the oversampling rate and the clock divisor of 62.500, 16 and 1250
+respectively. These parameters will set the baud rate for the serial
+port to 62500000 / 62.500 / 1250 / 16 = 50bps.
+
+Maciej W. Rozycki <macro@orcam.me.uk>
+
+.. [OX200] "OXPCIe200 PCI Express Multi-Port Bridge", Oxford Semiconductor,
+ Inc., DS-0045, 10 Nov 2008, Section "950 Mode", pp. 64-65
+
+.. [OX952] "OXPCIe952 PCI Express Bridge to Dual Serial & Parallel Port",
+ Oxford Semiconductor, Inc., DS-0046, Mar 06 08, Section "950 Mode",
+ p. 20
+
+.. [OX954] "OXPCIe954 PCI Express Bridge to Quad Serial Port", Oxford
+ Semiconductor, Inc., DS-0047, Feb 08, Section "950 Mode", p. 20
+
+.. [OX958] "OXPCIe958 PCI Express Bridge to Octal Serial Port", Oxford
+ Semiconductor, Inc., DS-0048, Feb 08, Section "950 Mode", p. 20
diff --git a/Documentation/networking/ip-sysctl.rst b/Documentation/networking/ip-sysctl.rst
index b882d4238581..04216564a03c 100644
--- a/Documentation/networking/ip-sysctl.rst
+++ b/Documentation/networking/ip-sysctl.rst
@@ -2474,21 +2474,16 @@ drop_unsolicited_na - BOOLEAN
By default this is turned off.
-accept_unsolicited_na - BOOLEAN
- Add a new neighbour cache entry in STALE state for routers on receiving an
- unsolicited neighbour advertisement with target link-layer address option
- specified. This is as per router-side behavior documented in RFC9131.
- This has lower precedence than drop_unsolicited_na.
+accept_untracked_na - BOOLEAN
+ Add a new neighbour cache entry in STALE state for routers on receiving a
+ neighbour advertisement (either solicited or unsolicited) with target
+ link-layer address option specified if no neighbour entry is already
+ present for the advertised IPv6 address. Without this knob, NAs received
+ for untracked addresses (absent in neighbour cache) are silently ignored.
+
+ This is as per router-side behaviour documented in RFC9131.
- ==== ====== ====== ==============================================
- drop accept fwding behaviour
- ---- ------ ------ ----------------------------------------------
- 1 X X Drop NA packet and don't pass up the stack
- 0 0 X Pass NA packet up the stack, don't update NC
- 0 1 0 Pass NA packet up the stack, don't update NC
- 0 1 1 Pass NA packet up the stack, and add a STALE
- NC entry
- ==== ====== ====== ==============================================
+ This has lower precedence than drop_unsolicited_na.
This will optimize the return path for the initial off-link communication
that is initiated by a directly connected host, by ensuring that
diff --git a/Documentation/powerpc/dawr-power9.rst b/Documentation/powerpc/dawr-power9.rst
index e55ac6a24b97..310f2e0cea81 100644
--- a/Documentation/powerpc/dawr-power9.rst
+++ b/Documentation/powerpc/dawr-power9.rst
@@ -2,15 +2,23 @@
DAWR issues on POWER9
=====================
-On POWER9 the Data Address Watchpoint Register (DAWR) can cause a checkstop
-if it points to cache inhibited (CI) memory. Currently Linux has no way to
-distinguish CI memory when configuring the DAWR, so (for now) the DAWR is
-disabled by this commit::
-
- commit 9654153158d3e0684a1bdb76dbababdb7111d5a0
- Author: Michael Neuling <mikey@neuling.org>
- Date: Tue Mar 27 15:37:24 2018 +1100
- powerpc: Disable DAWR in the base POWER9 CPU features
+On older POWER9 processors, the Data Address Watchpoint Register (DAWR) can
+cause a checkstop if it points to cache inhibited (CI) memory. Currently Linux
+has no way to distinguish CI memory when configuring the DAWR, so on affected
+systems, the DAWR is disabled.
+
+Affected processor revisions
+============================
+
+This issue is only present on processors prior to v2.3. The revision can be
+found in /proc/cpuinfo::
+
+ processor : 0
+ cpu : POWER9, altivec supported
+ clock : 3800.000000MHz
+ revision : 2.3 (pvr 004e 1203)
+
+On a system with the issue, the DAWR is disabled as detailed below.
Technical Details:
==================
diff --git a/Documentation/powerpc/kasan.txt b/Documentation/powerpc/kasan.txt
new file mode 100644
index 000000000000..f032b4eaf205
--- /dev/null
+++ b/Documentation/powerpc/kasan.txt
@@ -0,0 +1,58 @@
+KASAN is supported on powerpc on 32-bit and Radix 64-bit only.
+
+32 bit support
+==============
+
+KASAN is supported on both hash and nohash MMUs on 32-bit.
+
+The shadow area sits at the top of the kernel virtual memory space above the
+fixmap area and occupies one eighth of the total kernel virtual memory space.
+
+Instrumentation of the vmalloc area is optional, unless built with modules,
+in which case it is required.
+
+64 bit support
+==============
+
+Currently, only the radix MMU is supported. There have been versions for hash
+and Book3E processors floating around on the mailing list, but nothing has been
+merged.
+
+KASAN support on Book3S is a bit tricky to get right:
+
+ - It would be good to support inline instrumentation so as to be able to catch
+ stack issues that cannot be caught with outline mode.
+
+ - Inline instrumentation requires a fixed offset.
+
+ - Book3S runs code with translations off ("real mode") during boot, including a
+ lot of generic device-tree parsing code which is used to determine MMU
+ features.
+
+ - Some code - most notably a lot of KVM code - also runs with translations off
+ after boot.
+
+ - Therefore any offset has to point to memory that is valid with
+ translations on or off.
+
+One approach is just to give up on inline instrumentation. This way boot-time
+checks can be delayed until after the MMU is set is up, and we can just not
+instrument any code that runs with translations off after booting. This is the
+current approach.
+
+To avoid this limitiation, the KASAN shadow would have to be placed inside the
+linear mapping, using the same high-bits trick we use for the rest of the linear
+mapping. This is tricky:
+
+ - We'd like to place it near the start of physical memory. In theory we can do
+ this at run-time based on how much physical memory we have, but this requires
+ being able to arbitrarily relocate the kernel, which is basically the tricky
+ part of KASLR. Not being game to implement both tricky things at once, this
+ is hopefully something we can revisit once we get KASLR for Book3S.
+
+ - Alternatively, we can place the shadow at the _end_ of memory, but this
+ requires knowing how much contiguous physical memory a system has _at compile
+ time_. This is a big hammer, and has some unfortunate consequences: inablity
+ to handle discontiguous physical memory, total failure to boot on machines
+ with less memory than specified, and that machines with more memory than
+ specified can't use it. This was deemed unacceptable.
diff --git a/Documentation/riscv/vm-layout.rst b/Documentation/riscv/vm-layout.rst
index 1bd687b97104..5b36e45fef60 100644
--- a/Documentation/riscv/vm-layout.rst
+++ b/Documentation/riscv/vm-layout.rst
@@ -61,3 +61,39 @@ RISC-V Linux Kernel SV39
ffffffff00000000 | -4 GB | ffffffff7fffffff | 2 GB | modules, BPF
ffffffff80000000 | -2 GB | ffffffffffffffff | 2 GB | kernel
__________________|____________|__________________|_________|____________________________________________________________
+
+
+RISC-V Linux Kernel SV48
+------------------------
+
+::
+
+ ========================================================================================================================
+ Start addr | Offset | End addr | Size | VM area description
+ ========================================================================================================================
+ | | | |
+ 0000000000000000 | 0 | 00007fffffffffff | 128 TB | user-space virtual memory, different per mm
+ __________________|____________|__________________|_________|___________________________________________________________
+ | | | |
+ 0000800000000000 | +128 TB | ffff7fffffffffff | ~16M TB | ... huge, almost 64 bits wide hole of non-canonical
+ | | | | virtual memory addresses up to the -128 TB
+ | | | | starting offset of kernel mappings.
+ __________________|____________|__________________|_________|___________________________________________________________
+ |
+ | Kernel-space virtual memory, shared between all processes:
+ ____________________________________________________________|___________________________________________________________
+ | | | |
+ ffff8d7ffee00000 | -114.5 TB | ffff8d7ffeffffff | 2 MB | fixmap
+ ffff8d7fff000000 | -114.5 TB | ffff8d7fffffffff | 16 MB | PCI io
+ ffff8d8000000000 | -114.5 TB | ffff8f7fffffffff | 2 TB | vmemmap
+ ffff8f8000000000 | -112.5 TB | ffffaf7fffffffff | 32 TB | vmalloc/ioremap space
+ ffffaf8000000000 | -80.5 TB | ffffef7fffffffff | 64 TB | direct mapping of all physical memory
+ ffffef8000000000 | -16.5 TB | fffffffeffffffff | 16.5 TB | kasan
+ __________________|____________|__________________|_________|____________________________________________________________
+ |
+ | Identical layout to the 39-bit one from here on:
+ ____________________________________________________________|____________________________________________________________
+ | | | |
+ ffffffff00000000 | -4 GB | ffffffff7fffffff | 2 GB | modules, BPF
+ ffffffff80000000 | -2 GB | ffffffffffffffff | 2 GB | kernel
+ __________________|____________|__________________|_________|____________________________________________________________
diff --git a/Documentation/tools/rtla/Makefile b/Documentation/tools/rtla/Makefile
index 9f2b84af1a6c..093af6d7a0e9 100644
--- a/Documentation/tools/rtla/Makefile
+++ b/Documentation/tools/rtla/Makefile
@@ -17,9 +17,21 @@ DOC_MAN1 = $(addprefix $(OUTPUT),$(_DOC_MAN1))
RST2MAN_DEP := $(shell command -v rst2man 2>/dev/null)
RST2MAN_OPTS += --verbose
+TEST_RST2MAN = $(shell sh -c "rst2man --version > /dev/null 2>&1 || echo n")
+
$(OUTPUT)%.1: %.rst
ifndef RST2MAN_DEP
- $(error "rst2man not found, but required to generate man pages")
+ $(info ********************************************)
+ $(info ** NOTICE: rst2man not found)
+ $(info **)
+ $(info ** Consider installing the latest rst2man from your)
+ $(info ** distribution, e.g., 'dnf install python3-docutils' on Fedora,)
+ $(info ** or from source:)
+ $(info **)
+ $(info ** https://docutils.sourceforge.io/docs/dev/repository.html )
+ $(info **)
+ $(info ********************************************)
+ $(error NOTICE: rst2man required to generate man pages)
endif
rst2man $(RST2MAN_OPTS) $< > $@
diff --git a/Documentation/trace/ftrace.rst b/Documentation/trace/ftrace.rst
index 45b8c56af67a..b37dc19e4d40 100644
--- a/Documentation/trace/ftrace.rst
+++ b/Documentation/trace/ftrace.rst
@@ -517,6 +517,18 @@ of ftrace. Here is a list of some of the key files:
processing should be able to handle them. See comments in the
ktime_get_boot_fast_ns() function for more information.
+ tai:
+ This is the tai clock (CLOCK_TAI) and is derived from the wall-
+ clock time. However, this clock does not experience
+ discontinuities and backwards jumps caused by NTP inserting leap
+ seconds. Since the clock access is designed for use in tracing,
+ side effects are possible. The clock access may yield wrong
+ readouts in case the internal TAI offset is updated e.g., caused
+ by setting the system time or using adjtimex() with an offset.
+ These effects are rare and post processing should be able to
+ handle them. See comments in the ktime_get_tai_fast_ns()
+ function for more information.
+
To set a clock, simply echo the clock name into this file::
# echo global > trace_clock
diff --git a/Documentation/trace/timerlat-tracer.rst b/Documentation/trace/timerlat-tracer.rst
index 64d1fe6e9b93..d643c95c01eb 100644
--- a/Documentation/trace/timerlat-tracer.rst
+++ b/Documentation/trace/timerlat-tracer.rst
@@ -74,8 +74,9 @@ directory. The timerlat configs are:
- stop_tracing_total_us: stop the system tracing if a
timer latency at the *thread* context is higher than the configured
value happens. Writing 0 disables this option.
- - print_stack: save the stack of the IRQ occurrence, and print
- it after the *thread context* event".
+ - print_stack: save the stack of the IRQ occurrence. The stack is printed
+ after the *thread context* event, or at the IRQ handler if *stop_tracing_us*
+ is hit.
timerlat and osnoise
----------------------------
diff --git a/Documentation/translations/zh_CN/index.rst b/Documentation/translations/zh_CN/index.rst
index ac32d8e306ac..ad7bb8c17562 100644
--- a/Documentation/translations/zh_CN/index.rst
+++ b/Documentation/translations/zh_CN/index.rst
@@ -171,6 +171,7 @@ TODOList:
riscv/index
openrisc/index
parisc/index
+ loongarch/index
TODOList:
diff --git a/Documentation/translations/zh_CN/loongarch/features.rst b/Documentation/translations/zh_CN/loongarch/features.rst
new file mode 100644
index 000000000000..3886e635ec06
--- /dev/null
+++ b/Documentation/translations/zh_CN/loongarch/features.rst
@@ -0,0 +1,8 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: Documentation/loongarch/features.rst
+:Translator: Huacai Chen <chenhuacai@loongson.cn>
+
+.. kernel-feat:: $srctree/Documentation/features loongarch
diff --git a/Documentation/translations/zh_CN/loongarch/index.rst b/Documentation/translations/zh_CN/loongarch/index.rst
new file mode 100644
index 000000000000..7d23eb78379d
--- /dev/null
+++ b/Documentation/translations/zh_CN/loongarch/index.rst
@@ -0,0 +1,26 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: Documentation/loongarch/index.rst
+:Translator: Huacai Chen <chenhuacai@loongson.cn>
+
+=================
+LoongArch体系结构
+=================
+
+.. toctree::
+ :maxdepth: 2
+ :numbered:
+
+ introduction
+ irq-chip-model
+
+ features
+
+.. only:: subproject and html
+
+ Indices
+ =======
+
+ * :ref:`genindex`
diff --git a/Documentation/translations/zh_CN/loongarch/introduction.rst b/Documentation/translations/zh_CN/loongarch/introduction.rst
new file mode 100644
index 000000000000..e31a1a928c48
--- /dev/null
+++ b/Documentation/translations/zh_CN/loongarch/introduction.rst
@@ -0,0 +1,351 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: Documentation/loongarch/introduction.rst
+:Translator: Huacai Chen <chenhuacai@loongson.cn>
+
+=============
+LoongArch介绍
+=============
+
+LoongArch是一种新的RISC ISA,在一定程度上类似于MIPS和RISC-V。LoongArch指令集
+包括一个精简32位版(LA32R)、一个标准32位版(LA32S)、一个64位版(LA64)。
+LoongArch定义了四个特权级(PLV0~PLV3),其中PLV0是最高特权级,用于内核;而PLV3
+是最低特权级,用于应用程序。本文档介绍了LoongArch的寄存器、基础指令集、虚拟内
+存以及其他一些主题。
+
+寄存器
+======
+
+LoongArch的寄存器包括通用寄存器(GPRs)、浮点寄存器(FPRs)、向量寄存器(VRs)
+和用于特权模式(PLV0)的控制状态寄存器(CSRs)。
+
+通用寄存器
+----------
+
+LoongArch包括32个通用寄存器( ``$r0`` ~ ``$r31`` ),LA32中每个寄存器为32位宽,
+LA64中每个寄存器为64位宽。 ``$r0`` 的内容总是固定为0,而其他寄存器在体系结构层面
+没有特殊功能。( ``$r1`` 算是一个例外,在BL指令中固定用作链接返回寄存器。)
+
+内核使用了一套LoongArch寄存器约定,定义在LoongArch ELF psABI规范中,详细描述参见
+:ref:`参考文献 <loongarch-references-zh_CN>`:
+
+================= =============== =================== ==========
+寄存器名 别名 用途 跨调用保持
+================= =============== =================== ==========
+``$r0`` ``$zero`` 常量0 不使用
+``$r1`` ``$ra`` 返回地址 否
+``$r2`` ``$tp`` TLS/线程信息指针 不使用
+``$r3`` ``$sp`` 栈指针 是
+``$r4``-``$r11`` ``$a0``-``$a7`` 参数寄存器 否
+``$r4``-``$r5`` ``$v0``-``$v1`` 返回值 否
+``$r12``-``$r20`` ``$t0``-``$t8`` 临时寄存器 否
+``$r21`` ``$u0`` 每CPU变量基地址 不使用
+``$r22`` ``$fp`` 帧指针 是
+``$r23``-``$r31`` ``$s0``-``$s8`` 静态寄存器 是
+================= =============== =================== ==========
+
+注意:``$r21``寄存器在ELF psABI中保留未使用,但是在Linux内核用于保存每CPU
+变量基地址。该寄存器没有ABI命名,不过在内核中称为``$u0``。在一些遗留代码
+中有时可能见到``$v0``和``$v1``,它们是``$a0``和``$a1``的别名,属于已经废弃
+的用法。
+
+浮点寄存器
+----------
+
+当系统中存在FPU时,LoongArch有32个浮点寄存器( ``$f0`` ~ ``$f31`` )。在LA64
+的CPU核上,每个寄存器均为64位宽。
+
+浮点寄存器的使用约定与LoongArch ELF psABI规范的描述相同:
+
+================= ================== =================== ==========
+寄存器名 别名 用途 跨调用保持
+================= ================== =================== ==========
+``$f0``-``$f7`` ``$fa0``-``$fa7`` 参数寄存器 否
+``$f0``-``$f1`` ``$fv0``-``$fv1`` 返回值 否
+``$f8``-``$f23`` ``$ft0``-``$ft15`` 临时寄存器 否
+``$f24``-``$f31`` ``$fs0``-``$fs7`` 静态寄存器 是
+================= ================== =================== ==========
+
+注意:在一些遗留代码中有时可能见到 ``$v0`` 和 ``$v1`` ,它们是 ``$a0``
+和 ``$a1`` 的别名,属于已经废弃的用法。
+
+
+向量寄存器
+----------
+
+LoongArch现有两种向量扩展:
+
+- 128位向量扩展LSX(全称Loongson SIMD eXtention),
+- 256位向量扩展LASX(全称Loongson Advanced SIMD eXtention)。
+
+LSX使用 ``$v0`` ~ ``$v31`` 向量寄存器,而LASX则使用 ``$x0`` ~ ``$x31`` 。
+
+浮点寄存器和向量寄存器是复用的,比如:在一个实现了LSX和LASX的核上, ``$x0`` 的
+低128位与 ``$v0`` 共用, ``$v0`` 的低64位与 ``$f0`` 共用,其他寄存器依此类推。
+
+控制状态寄存器
+--------------
+
+控制状态寄存器只能在特权模式(PLV0)下访问:
+
+================= ==================================== ==========
+地址 全称描述 简称
+================= ==================================== ==========
+0x0 当前模式信息 CRMD
+0x1 异常前模式信息 PRMD
+0x2 扩展部件使能 EUEN
+0x3 杂项控制 MISC
+0x4 异常配置 ECFG
+0x5 异常状态 ESTAT
+0x6 异常返回地址 ERA
+0x7 出错(Faulting)虚拟地址 BADV
+0x8 出错(Faulting)指令字 BADI
+0xC 异常入口地址 EENTRY
+0x10 TLB索引 TLBIDX
+0x11 TLB表项高位 TLBEHI
+0x12 TLB表项低位0 TLBELO0
+0x13 TLB表项低位1 TLBELO1
+0x18 地址空间标识符 ASID
+0x19 低半地址空间页全局目录基址 PGDL
+0x1A 高半地址空间页全局目录基址 PGDH
+0x1B 页全局目录基址 PGD
+0x1C 页表遍历控制低半部分 PWCL
+0x1D 页表遍历控制高半部分 PWCH
+0x1E STLB页大小 STLBPS
+0x1F 缩减虚地址配置 RVACFG
+0x20 CPU编号 CPUID
+0x21 特权资源配置信息1 PRCFG1
+0x22 特权资源配置信息2 PRCFG2
+0x23 特权资源配置信息3 PRCFG3
+0x30+n (0≤n≤15) 数据保存寄存器 SAVEn
+0x40 定时器编号 TID
+0x41 定时器配置 TCFG
+0x42 定时器值 TVAL
+0x43 计时器补偿 CNTC
+0x44 定时器中断清除 TICLR
+0x60 LLBit相关控制 LLBCTL
+0x80 实现相关控制1 IMPCTL1
+0x81 实现相关控制2 IMPCTL2
+0x88 TLB重填异常入口地址 TLBRENTRY
+0x89 TLB重填异常出错(Faulting)虚地址 TLBRBADV
+0x8A TLB重填异常返回地址 TLBRERA
+0x8B TLB重填异常数据保存 TLBRSAVE
+0x8C TLB重填异常表项低位0 TLBRELO0
+0x8D TLB重填异常表项低位1 TLBRELO1
+0x8E TLB重填异常表项高位 TLBEHI
+0x8F TLB重填异常前模式信息 TLBRPRMD
+0x90 机器错误控制 MERRCTL
+0x91 机器错误信息1 MERRINFO1
+0x92 机器错误信息2 MERRINFO2
+0x93 机器错误异常入口地址 MERRENTRY
+0x94 机器错误异常返回地址 MERRERA
+0x95 机器错误异常数据保存 MERRSAVE
+0x98 高速缓存标签 CTAG
+0x180+n (0≤n≤3) 直接映射配置窗口n DMWn
+0x200+2n (0≤n≤31) 性能监测配置n PMCFGn
+0x201+2n (0≤n≤31) 性能监测计数器n PMCNTn
+0x300 内存读写监视点整体控制 MWPC
+0x301 内存读写监视点整体状态 MWPS
+0x310+8n (0≤n≤7) 内存读写监视点n配置1 MWPnCFG1
+0x311+8n (0≤n≤7) 内存读写监视点n配置2 MWPnCFG2
+0x312+8n (0≤n≤7) 内存读写监视点n配置3 MWPnCFG3
+0x313+8n (0≤n≤7) 内存读写监视点n配置4 MWPnCFG4
+0x380 取指监视点整体控制 FWPC
+0x381 取指监视点整体状态 FWPS
+0x390+8n (0≤n≤7) 取指监视点n配置1 FWPnCFG1
+0x391+8n (0≤n≤7) 取指监视点n配置2 FWPnCFG2
+0x392+8n (0≤n≤7) 取指监视点n配置3 FWPnCFG3
+0x393+8n (0≤n≤7) 取指监视点n配置4 FWPnCFG4
+0x500 调试寄存器 DBG
+0x501 调试异常返回地址 DERA
+0x502 调试数据保存 DSAVE
+================= ==================================== ==========
+
+ERA,TLBRERA,MERRERA和DERA有时也分别称为EPC,TLBREPC,MERREPC和DEPC。
+
+基础指令集
+==========
+
+指令格式
+--------
+
+LoongArch的指令字长为32位,一共有9种基本指令格式(以及一些变体):
+
+=========== ==========================
+格式名称 指令构成
+=========== ==========================
+2R Opcode + Rj + Rd
+3R Opcode + Rk + Rj + Rd
+4R Opcode + Ra + Rk + Rj + Rd
+2RI8 Opcode + I8 + Rj + Rd
+2RI12 Opcode + I12 + Rj + Rd
+2RI14 Opcode + I14 + Rj + Rd
+2RI16 Opcode + I16 + Rj + Rd
+1RI21 Opcode + I21L + Rj + I21H
+I26 Opcode + I26L + I26H
+=========== ==========================
+
+Opcode是指令操作码,Rj和Rk是源操作数(寄存器),Rd是目标操作数(寄存器),Ra是
+4R-type格式特有的附加操作数(寄存器)。I8/I12/I16/I21/I26分别是8位/12位/16位/
+21位/26位的立即数。其中较长的21位和26位立即数在指令字中被分割为高位部分与低位
+部分,所以你们在这里的格式描述中能够看到I21L/I21H和I26L/I26H这样带后缀的表述。
+
+指令列表
+--------
+
+为了简便起见,我们在此只罗列一下指令名称(助记符),需要详细信息请阅读
+:ref:`参考文献 <loongarch-references-zh_CN>` 中的文档。
+
+1. 算术运算指令::
+
+ ADD.W SUB.W ADDI.W ADD.D SUB.D ADDI.D
+ SLT SLTU SLTI SLTUI
+ AND OR NOR XOR ANDN ORN ANDI ORI XORI
+ MUL.W MULH.W MULH.WU DIV.W DIV.WU MOD.W MOD.WU
+ MUL.D MULH.D MULH.DU DIV.D DIV.DU MOD.D MOD.DU
+ PCADDI PCADDU12I PCADDU18I
+ LU12I.W LU32I.D LU52I.D ADDU16I.D
+
+2. 移位运算指令::
+
+ SLL.W SRL.W SRA.W ROTR.W SLLI.W SRLI.W SRAI.W ROTRI.W
+ SLL.D SRL.D SRA.D ROTR.D SLLI.D SRLI.D SRAI.D ROTRI.D
+
+3. 位域操作指令::
+
+ EXT.W.B EXT.W.H CLO.W CLO.D SLZ.W CLZ.D CTO.W CTO.D CTZ.W CTZ.D
+ BYTEPICK.W BYTEPICK.D BSTRINS.W BSTRINS.D BSTRPICK.W BSTRPICK.D
+ REVB.2H REVB.4H REVB.2W REVB.D REVH.2W REVH.D BITREV.4B BITREV.8B BITREV.W BITREV.D
+ MASKEQZ MASKNEZ
+
+4. 分支转移指令::
+
+ BEQ BNE BLT BGE BLTU BGEU BEQZ BNEZ B BL JIRL
+
+5. 访存读写指令::
+
+ LD.B LD.BU LD.H LD.HU LD.W LD.WU LD.D ST.B ST.H ST.W ST.D
+ LDX.B LDX.BU LDX.H LDX.HU LDX.W LDX.WU LDX.D STX.B STX.H STX.W STX.D
+ LDPTR.W LDPTR.D STPTR.W STPTR.D
+ PRELD PRELDX
+
+6. 原子操作指令::
+
+ LL.W SC.W LL.D SC.D
+ AMSWAP.W AMSWAP.D AMADD.W AMADD.D AMAND.W AMAND.D AMOR.W AMOR.D AMXOR.W AMXOR.D
+ AMMAX.W AMMAX.D AMMIN.W AMMIN.D
+
+7. 栅障指令::
+
+ IBAR DBAR
+
+8. 特殊指令::
+
+ SYSCALL BREAK CPUCFG NOP IDLE ERTN(ERET) DBCL(DBGCALL) RDTIMEL.W RDTIMEH.W RDTIME.D
+ ASRTLE.D ASRTGT.D
+
+9. 特权指令::
+
+ CSRRD CSRWR CSRXCHG
+ IOCSRRD.B IOCSRRD.H IOCSRRD.W IOCSRRD.D IOCSRWR.B IOCSRWR.H IOCSRWR.W IOCSRWR.D
+ CACOP TLBP(TLBSRCH) TLBRD TLBWR TLBFILL TLBCLR TLBFLUSH INVTLB LDDIR LDPTE
+
+虚拟内存
+========
+
+LoongArch可以使用直接映射虚拟内存和分页映射虚拟内存。
+
+直接映射虚拟内存通过CSR.DMWn(n=0~3)来进行配置,虚拟地址(VA)和物理地址(PA)
+之间有简单的映射关系::
+
+ VA = PA + 固定偏移
+
+分页映射的虚拟地址(VA)和物理地址(PA)有任意的映射关系,这种关系记录在TLB和页
+表中。LoongArch的TLB包括一个全相联的MTLB(Multiple Page Size TLB,多样页大小TLB)
+和一个组相联的STLB(Single Page Size TLB,单一页大小TLB)。
+
+缺省状态下,LA32的整个虚拟地址空间配置如下:
+
+============ =========================== ===========================
+区段名 地址范围 属性
+============ =========================== ===========================
+``UVRANGE`` ``0x00000000 - 0x7FFFFFFF`` 分页映射, 可缓存, PLV0~3
+``KPRANGE0`` ``0x80000000 - 0x9FFFFFFF`` 直接映射, 非缓存, PLV0
+``KPRANGE1`` ``0xA0000000 - 0xBFFFFFFF`` 直接映射, 可缓存, PLV0
+``KVRANGE`` ``0xC0000000 - 0xFFFFFFFF`` 分页映射, 可缓存, PLV0
+============ =========================== ===========================
+
+用户态(PLV3)只能访问UVRANGE,对于直接映射的KPRANGE0和KPRANGE1,将虚拟地址的第
+30~31位清零就等于物理地址。例如:物理地址0x00001000对应的非缓存直接映射虚拟地址
+是0x80001000,而其可缓存直接映射虚拟地址是0xA0001000。
+
+缺省状态下,LA64的整个虚拟地址空间配置如下:
+
+============ ====================== ==================================
+区段名 地址范围 属性
+============ ====================== ==================================
+``XUVRANGE`` ``0x0000000000000000 - 分页映射, 可缓存, PLV0~3
+ 0x3FFFFFFFFFFFFFFF``
+``XSPRANGE`` ``0x4000000000000000 - 直接映射, 可缓存 / 非缓存, PLV0
+ 0x7FFFFFFFFFFFFFFF``
+``XKPRANGE`` ``0x8000000000000000 - 直接映射, 可缓存 / 非缓存, PLV0
+ 0xBFFFFFFFFFFFFFFF``
+``XKVRANGE`` ``0xC000000000000000 - 分页映射, 可缓存, PLV0
+ 0xFFFFFFFFFFFFFFFF``
+============ ====================== ==================================
+
+用户态(PLV3)只能访问XUVRANGE,对于直接映射的XSPRANGE和XKPRANGE,将虚拟地址的第
+60~63位清零就等于物理地址,而其缓存属性是通过虚拟地址的第60~61位配置的(0表示强序
+非缓存,1表示一致可缓存,2表示弱序非缓存)。
+
+目前,我们仅用XKPRANGE来进行直接映射,XSPRANGE保留给以后用。
+
+此处给出一个直接映射的例子:物理地址0x00000000_00001000的强序非缓存直接映射虚拟地址
+(在XKPRANGE中)是0x80000000_00001000,其一致可缓存直接映射虚拟地址(在XKPRANGE中)
+是0x90000000_00001000,而其弱序非缓存直接映射虚拟地址(在XKPRANGE中)是0xA0000000_
+00001000。
+
+Loongson与LoongArch的关系
+=========================
+
+LoongArch是一种RISC指令集架构(ISA),不同于现存的任何一种ISA,而Loongson(即龙
+芯)是一个处理器家族。龙芯包括三个系列:Loongson-1(龙芯1号)是32位处理器系列,
+Loongson-2(龙芯2号)是低端64位处理器系列,而Loongson-3(龙芯3号)是高端64位处理
+器系列。旧的龙芯处理器基于MIPS架构,而新的龙芯处理器基于LoongArch架构。以龙芯3号
+为例:龙芯3A1000/3B1500/3A2000/3A3000/3A4000都是兼容MIPS的,而龙芯3A5000(以及将
+来的型号)都是基于LoongArch的。
+
+.. _loongarch-references-zh_CN:
+
+参考文献
+========
+
+Loongson官方网站(龙芯中科技术股份有限公司):
+
+ http://www.loongson.cn/
+
+Loongson与LoongArch的开发者网站(软件与文档资源):
+
+ http://www.loongnix.cn/
+
+ https://github.com/loongson/
+
+ https://loongson.github.io/LoongArch-Documentation/
+
+LoongArch指令集架构的文档:
+
+ https://github.com/loongson/LoongArch-Documentation/releases/latest/download/LoongArch-Vol1-v1.00-CN.pdf (中文版)
+
+ https://github.com/loongson/LoongArch-Documentation/releases/latest/download/LoongArch-Vol1-v1.00-EN.pdf (英文版)
+
+LoongArch的ELF psABI文档:
+
+ https://github.com/loongson/LoongArch-Documentation/releases/latest/download/LoongArch-ELF-ABI-v1.00-CN.pdf (中文版)
+
+ https://github.com/loongson/LoongArch-Documentation/releases/latest/download/LoongArch-ELF-ABI-v1.00-EN.pdf (英文版)
+
+Loongson与LoongArch的Linux内核源码仓库:
+
+ https://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson.git
diff --git a/Documentation/translations/zh_CN/loongarch/irq-chip-model.rst b/Documentation/translations/zh_CN/loongarch/irq-chip-model.rst
new file mode 100644
index 000000000000..2a4c3ad38be4
--- /dev/null
+++ b/Documentation/translations/zh_CN/loongarch/irq-chip-model.rst
@@ -0,0 +1,155 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: Documentation/loongarch/irq-chip-model.rst
+:Translator: Huacai Chen <chenhuacai@loongson.cn>
+
+==================================
+LoongArch的IRQ芯片模型(层级关系)
+==================================
+
+目前,基于LoongArch的处理器(如龙芯3A5000)只能与LS7A芯片组配合工作。LoongArch计算机
+中的中断控制器(即IRQ芯片)包括CPUINTC(CPU Core Interrupt Controller)、LIOINTC(
+Legacy I/O Interrupt Controller)、EIOINTC(Extended I/O Interrupt Controller)、
+HTVECINTC(Hyper-Transport Vector Interrupt Controller)、PCH-PIC(LS7A芯片组的主中
+断控制器)、PCH-LPC(LS7A芯片组的LPC中断控制器)和PCH-MSI(MSI中断控制器)。
+
+CPUINTC是一种CPU内部的每个核本地的中断控制器,LIOINTC/EIOINTC/HTVECINTC是CPU内部的
+全局中断控制器(每个芯片一个,所有核共享),而PCH-PIC/PCH-LPC/PCH-MSI是CPU外部的中
+断控制器(在配套芯片组里面)。这些中断控制器(或者说IRQ芯片)以一种层次树的组织形式
+级联在一起,一共有两种层级关系模型(传统IRQ模型和扩展IRQ模型)。
+
+传统IRQ模型
+===========
+
+在这种模型里面,IPI(Inter-Processor Interrupt)和CPU本地时钟中断直接发送到CPUINTC,
+CPU串口(UARTs)中断发送到LIOINTC,而其他所有设备的中断则分别发送到所连接的PCH-PIC/
+PCH-LPC/PCH-MSI,然后被HTVECINTC统一收集,再发送到LIOINTC,最后到达CPUINTC::
+
+ +-----+ +---------+ +-------+
+ | IPI | --> | CPUINTC | <-- | Timer |
+ +-----+ +---------+ +-------+
+ ^
+ |
+ +---------+ +-------+
+ | LIOINTC | <-- | UARTs |
+ +---------+ +-------+
+ ^
+ |
+ +-----------+
+ | HTVECINTC |
+ +-----------+
+ ^ ^
+ | |
+ +---------+ +---------+
+ | PCH-PIC | | PCH-MSI |
+ +---------+ +---------+
+ ^ ^ ^
+ | | |
+ +---------+ +---------+ +---------+
+ | PCH-LPC | | Devices | | Devices |
+ +---------+ +---------+ +---------+
+ ^
+ |
+ +---------+
+ | Devices |
+ +---------+
+
+扩展IRQ模型
+===========
+
+在这种模型里面,IPI(Inter-Processor Interrupt)和CPU本地时钟中断直接发送到CPUINTC,
+CPU串口(UARTs)中断发送到LIOINTC,而其他所有设备的中断则分别发送到所连接的PCH-PIC/
+PCH-LPC/PCH-MSI,然后被EIOINTC统一收集,再直接到达CPUINTC::
+
+ +-----+ +---------+ +-------+
+ | IPI | --> | CPUINTC | <-- | Timer |
+ +-----+ +---------+ +-------+
+ ^ ^
+ | |
+ +---------+ +---------+ +-------+
+ | EIOINTC | | LIOINTC | <-- | UARTs |
+ +---------+ +---------+ +-------+
+ ^ ^
+ | |
+ +---------+ +---------+
+ | PCH-PIC | | PCH-MSI |
+ +---------+ +---------+
+ ^ ^ ^
+ | | |
+ +---------+ +---------+ +---------+
+ | PCH-LPC | | Devices | | Devices |
+ +---------+ +---------+ +---------+
+ ^
+ |
+ +---------+
+ | Devices |
+ +---------+
+
+ACPI相关的定义
+==============
+
+CPUINTC::
+
+ ACPI_MADT_TYPE_CORE_PIC;
+ struct acpi_madt_core_pic;
+ enum acpi_madt_core_pic_version;
+
+LIOINTC::
+
+ ACPI_MADT_TYPE_LIO_PIC;
+ struct acpi_madt_lio_pic;
+ enum acpi_madt_lio_pic_version;
+
+EIOINTC::
+
+ ACPI_MADT_TYPE_EIO_PIC;
+ struct acpi_madt_eio_pic;
+ enum acpi_madt_eio_pic_version;
+
+HTVECINTC::
+
+ ACPI_MADT_TYPE_HT_PIC;
+ struct acpi_madt_ht_pic;
+ enum acpi_madt_ht_pic_version;
+
+PCH-PIC::
+
+ ACPI_MADT_TYPE_BIO_PIC;
+ struct acpi_madt_bio_pic;
+ enum acpi_madt_bio_pic_version;
+
+PCH-MSI::
+
+ ACPI_MADT_TYPE_MSI_PIC;
+ struct acpi_madt_msi_pic;
+ enum acpi_madt_msi_pic_version;
+
+PCH-LPC::
+
+ ACPI_MADT_TYPE_LPC_PIC;
+ struct acpi_madt_lpc_pic;
+ enum acpi_madt_lpc_pic_version;
+
+参考文献
+========
+
+龙芯3A5000的文档:
+
+ https://github.com/loongson/LoongArch-Documentation/releases/latest/download/Loongson-3A5000-usermanual-1.02-CN.pdf (中文版)
+
+ https://github.com/loongson/LoongArch-Documentation/releases/latest/download/Loongson-3A5000-usermanual-1.02-EN.pdf (英文版)
+
+龙芯LS7A芯片组的文档:
+
+ https://github.com/loongson/LoongArch-Documentation/releases/latest/download/Loongson-7A1000-usermanual-2.00-CN.pdf (中文版)
+
+ https://github.com/loongson/LoongArch-Documentation/releases/latest/download/Loongson-7A1000-usermanual-2.00-EN.pdf (英文版)
+
+注:CPUINTC即《龙芯架构参考手册卷一》第7.4节所描述的CSR.ECFG/CSR.ESTAT寄存器及其中断
+控制逻辑;LIOINTC即《龙芯3A5000处理器使用手册》第11.1节所描述的“传统I/O中断”;EIOINTC
+即《龙芯3A5000处理器使用手册》第11.2节所描述的“扩展I/O中断”;HTVECINTC即《龙芯3A5000
+处理器使用手册》第14.3节所描述的“HyperTransport中断”;PCH-PIC/PCH-MSI即《龙芯7A1000桥
+片用户手册》第5章所描述的“中断控制器”;PCH-LPC即《龙芯7A1000桥片用户手册》第24.3节所
+描述的“LPC中断”。
diff --git a/Documentation/usb/gadget-testing.rst b/Documentation/usb/gadget-testing.rst
index c6d034abce3a..1c37159fa171 100644
--- a/Documentation/usb/gadget-testing.rst
+++ b/Documentation/usb/gadget-testing.rst
@@ -787,6 +787,7 @@ The uvc function provides these attributes in its function directory:
streaming_maxpacket maximum packet size this endpoint is capable of
sending or receiving when this configuration is
selected
+ function_name name of the interface
=================== ================================================
There are also "control" and "streaming" subdirectories, each of which contain
diff --git a/MAINTAINERS b/MAINTAINERS
index 41c9c8f2b96d..b54288db77aa 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -382,7 +382,7 @@ F: include/acpi/
F: tools/power/acpi/
ACPI FOR ARM64 (ACPI/arm64)
-M: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+M: Lorenzo Pieralisi <lpieralisi@kernel.org>
M: Hanjun Guo <guohanjun@huawei.com>
M: Sudeep Holla <sudeep.holla@arm.com>
L: linux-acpi@vger.kernel.org
@@ -820,7 +820,7 @@ S: Maintained
F: drivers/mailbox/mailbox-altera.c
ALTERA MSGDMA IP CORE DRIVER
-M: Olivier Dautricourt <olivier.dautricourt@orolia.com>
+M: Olivier Dautricourt <olivierdautricourt@gmail.com>
R: Stefan Roese <sr@denx.de>
L: dmaengine@vger.kernel.org
S: Odd Fixes
@@ -1090,6 +1090,14 @@ W: https://ez.analog.com/linux-software-drivers
F: Documentation/devicetree/bindings/iio/adc/adi,ad7292.yaml
F: drivers/iio/adc/ad7292.c
+ANALOG DEVICES INC AD3552R DRIVER
+M: Nuno Sá <nuno.sa@analog.com>
+L: linux-iio@vger.kernel.org
+S: Supported
+W: https://ez.analog.com/linux-software-drivers
+F: Documentation/devicetree/bindings/iio/dac/adi,ad3552r.yaml
+F: drivers/iio/dac/ad3552r.c
+
ANALOG DEVICES INC AD7293 DRIVER
M: Antoniu Miclaus <antoniu.miclaus@analog.com>
L: linux-iio@vger.kernel.org
@@ -1375,14 +1383,6 @@ L: linux-input@vger.kernel.org
S: Odd fixes
F: drivers/input/mouse/bcm5974.c
-APPLE DART IOMMU DRIVER
-M: Sven Peter <sven@svenpeter.dev>
-R: Alyssa Rosenzweig <alyssa@rosenzweig.io>
-L: iommu@lists.linux-foundation.org
-S: Maintained
-F: Documentation/devicetree/bindings/iommu/apple,dart.yaml
-F: drivers/iommu/apple-dart.c
-
APPLE PCIE CONTROLLER DRIVER
M: Alyssa Rosenzweig <alyssa@rosenzweig.io>
M: Marc Zyngier <maz@kernel.org>
@@ -1834,9 +1834,11 @@ F: Documentation/devicetree/bindings/arm/apple/*
F: Documentation/devicetree/bindings/clock/apple,nco.yaml
F: Documentation/devicetree/bindings/i2c/apple,i2c.yaml
F: Documentation/devicetree/bindings/interrupt-controller/apple,*
+F: Documentation/devicetree/bindings/iommu/apple,dart.yaml
F: Documentation/devicetree/bindings/iommu/apple,sart.yaml
F: Documentation/devicetree/bindings/mailbox/apple,mailbox.yaml
F: Documentation/devicetree/bindings/nvme/apple,nvme-ans.yaml
+F: Documentation/devicetree/bindings/nvmem/apple,efuses.yaml
F: Documentation/devicetree/bindings/pci/apple,pcie.yaml
F: Documentation/devicetree/bindings/pinctrl/apple,pinctrl.yaml
F: Documentation/devicetree/bindings/power/apple*
@@ -1845,9 +1847,11 @@ F: arch/arm64/boot/dts/apple/
F: drivers/clk/clk-apple-nco.c
F: drivers/i2c/busses/i2c-pasemi-core.c
F: drivers/i2c/busses/i2c-pasemi-platform.c
+F: drivers/iommu/apple-dart.c
F: drivers/irqchip/irq-apple-aic.c
F: drivers/mailbox/apple-mailbox.c
F: drivers/nvme/host/apple.c
+F: drivers/nvmem/apple-efuses.c
F: drivers/pinctrl/pinctrl-apple-gpio.c
F: drivers/soc/apple/*
F: drivers/watchdog/apple_wdt.c
@@ -2131,6 +2135,18 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/kristoffer/linux-hpc.git
F: arch/arm/mach-sa1100/include/mach/jornada720.h
F: arch/arm/mach-sa1100/jornada720.c
+ARM/HPE GXP ARCHITECTURE
+M: Jean-Marie Verdun <verdun@hpe.com>
+M: Nick Hawkins <nick.hawkins@hpe.com>
+S: Maintained
+F: Documentation/devicetree/bindings/arm/hpe,gxp.yaml
+F: Documentation/devicetree/bindings/timer/hpe,gxp-timer.yaml
+F: arch/arm/boot/dts/hpe-bmc*
+F: arch/arm/boot/dts/hpe-gxp*
+F: arch/arm/mach-hpe/
+F: drivers/clocksource/timer-gxp.c
+F: drivers/watchdog/gxp-wdt.c
+
ARM/IGEP MACHINE SUPPORT
M: Enric Balletbo i Serra <eballetbo@gmail.com>
M: Javier Martinez Canillas <javier@dowhile0.org>
@@ -2549,7 +2565,7 @@ F: drivers/pci/controller/dwc/pcie-qcom.c
F: drivers/phy/qualcomm/
F: drivers/power/*/msm*
F: drivers/reset/reset-qcom-*
-F: drivers/scsi/ufs/ufs-qcom*
+F: drivers/ufs/host/ufs-qcom*
F: drivers/spi/spi-geni-qcom.c
F: drivers/spi/spi-qcom-qspi.c
F: drivers/spi/spi-qup.c
@@ -2946,7 +2962,7 @@ N: uniphier
ARM/VERSATILE EXPRESS PLATFORM
M: Liviu Dudau <liviu.dudau@arm.com>
M: Sudeep Holla <sudeep.holla@arm.com>
-M: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+M: Lorenzo Pieralisi <lpieralisi@kernel.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: */*/*/vexpress*
@@ -3517,10 +3533,14 @@ R: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
R: Rasmus Villemoes <linux@rasmusvillemoes.dk>
S: Maintained
F: include/linux/bitmap.h
+F: include/linux/cpumask.h
F: include/linux/find.h
+F: include/linux/nodemask.h
F: lib/bitmap.c
+F: lib/cpumask.c
F: lib/find_bit.c
F: lib/find_bit_benchmark.c
+F: lib/nodemask.c
F: lib/test_bitmap.c
F: tools/include/linux/bitmap.h
F: tools/include/linux/find.h
@@ -4572,8 +4592,8 @@ F: drivers/power/supply/cw2015_battery.c
CEPH COMMON CODE (LIBCEPH)
M: Ilya Dryomov <idryomov@gmail.com>
-M: Jeff Layton <jlayton@kernel.org>
M: Xiubo Li <xiubli@redhat.com>
+R: Jeff Layton <jlayton@kernel.org>
L: ceph-devel@vger.kernel.org
S: Supported
W: http://ceph.com/
@@ -4583,9 +4603,9 @@ F: include/linux/crush/
F: net/ceph/
CEPH DISTRIBUTED FILE SYSTEM CLIENT (CEPH)
-M: Jeff Layton <jlayton@kernel.org>
M: Xiubo Li <xiubli@redhat.com>
M: Ilya Dryomov <idryomov@gmail.com>
+R: Jeff Layton <jlayton@kernel.org>
L: ceph-devel@vger.kernel.org
S: Supported
W: http://ceph.com/
@@ -5056,11 +5076,13 @@ M: Johannes Weiner <hannes@cmpxchg.org>
M: Michal Hocko <mhocko@kernel.org>
M: Roman Gushchin <roman.gushchin@linux.dev>
M: Shakeel Butt <shakeelb@google.com>
+R: Muchun Song <songmuchun@bytedance.com>
L: cgroups@vger.kernel.org
L: linux-mm@kvack.org
S: Maintained
F: mm/memcontrol.c
F: mm/swap_cgroup.c
+F: tools/testing/selftests/cgroup/memcg_protection.m
F: tools/testing/selftests/cgroup/test_kmem.c
F: tools/testing/selftests/cgroup/test_memcontrol.c
@@ -5160,7 +5182,7 @@ F: arch/x86/kernel/cpuid.c
F: arch/x86/kernel/msr.c
CPUIDLE DRIVER - ARM BIG LITTLE
-M: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+M: Lorenzo Pieralisi <lpieralisi@kernel.org>
M: Daniel Lezcano <daniel.lezcano@linaro.org>
L: linux-pm@vger.kernel.org
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -5180,7 +5202,7 @@ F: drivers/cpuidle/cpuidle-exynos.c
F: include/linux/platform_data/cpuidle-exynos.h
CPUIDLE DRIVER - ARM PSCI
-M: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+M: Lorenzo Pieralisi <lpieralisi@kernel.org>
M: Sudeep Holla <sudeep.holla@arm.com>
L: linux-pm@vger.kernel.org
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -7515,6 +7537,7 @@ S: Maintained
F: Documentation/admin-guide/bootconfig.rst
F: fs/proc/bootconfig.c
F: include/linux/bootconfig.h
+F: lib/bootconfig-data.S
F: lib/bootconfig.c
F: tools/bootconfig/*
F: tools/bootconfig/scripts/*
@@ -7708,6 +7731,7 @@ F: include/linux/arm_ffa.h
FIRMWARE LOADER (request_firmware)
M: Luis Chamberlain <mcgrof@kernel.org>
+M: Russ Weight <russell.h.weight@intel.com>
L: linux-kernel@vger.kernel.org
S: Maintained
F: Documentation/firmware_class/
@@ -7776,7 +7800,7 @@ R: Tom Rix <trix@redhat.com>
L: linux-fpga@vger.kernel.org
S: Maintained
Q: http://patchwork.kernel.org/project/linux-fpga/list/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/mdf/linux-fpga.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/fpga/linux-fpga.git
F: Documentation/devicetree/bindings/fpga/
F: Documentation/driver-api/fpga/
F: Documentation/fpga/
@@ -8402,7 +8426,7 @@ M: Mika Westerberg <mika.westerberg@linux.intel.com>
M: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
L: linux-gpio@vger.kernel.org
L: linux-acpi@vger.kernel.org
-S: Maintained
+S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/andy/linux-gpio-intel.git
F: Documentation/firmware-guide/acpi/gpio-properties.rst
F: drivers/gpio/gpiolib-acpi.c
@@ -9053,8 +9077,16 @@ L: linux-input@vger.kernel.org
S: Maintained
F: drivers/input/touchscreen/htcpen.c
+HTE SUBSYSTEM
+M: Dipen Patel <dipenp@nvidia.com>
+S: Maintained
+F: Documentation/devicetree/bindings/timestamp/
+F: Documentation/hte/
+F: drivers/hte/
+F: include/linux/hte.h
+
HTS221 TEMPERATURE-HUMIDITY IIO DRIVER
-M: Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+M: Lorenzo Bianconi <lorenzo@kernel.org>
L: linux-iio@vger.kernel.org
S: Maintained
W: http://www.st.com/
@@ -9322,13 +9354,13 @@ F: drivers/i2c/i2c-stub.c
I3C DRIVER FOR CADENCE I3C MASTER IP
M: Przemysław Gaj <pgaj@cadence.com>
S: Maintained
-F: Documentation/devicetree/bindings/i3c/cdns,i3c-master.txt
+F: Documentation/devicetree/bindings/i3c/cdns,i3c-master.yaml
F: drivers/i3c/master/i3c-master-cdns.c
I3C DRIVER FOR SYNOPSYS DESIGNWARE
M: Vitor Soares <vitor.soares@synopsys.com>
S: Maintained
-F: Documentation/devicetree/bindings/i3c/snps,dw-i3c-master.txt
+F: Documentation/devicetree/bindings/i3c/snps,dw-i3c-master.yaml
F: drivers/i3c/master/dw*
I3C SUBSYSTEM
@@ -9867,7 +9899,7 @@ F: drivers/video/fbdev/intelfb/
INTEL GPIO DRIVERS
M: Andy Shevchenko <andy@kernel.org>
L: linux-gpio@vger.kernel.org
-S: Maintained
+S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/andy/linux-gpio-intel.git
F: drivers/gpio/gpio-ich.c
F: drivers/gpio/gpio-merrifield.c
@@ -10088,7 +10120,7 @@ F: drivers/platform/x86/intel/pmc/
INTEL PMIC GPIO DRIVERS
M: Andy Shevchenko <andy@kernel.org>
-S: Maintained
+S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/andy/linux-gpio-intel.git
F: drivers/gpio/gpio-*cove.c
@@ -11437,8 +11469,6 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/livepatching/livepatching.g
F: Documentation/ABI/testing/sysfs-kernel-livepatch
F: Documentation/livepatch/
F: arch/powerpc/include/asm/livepatch.h
-F: arch/s390/include/asm/livepatch.h
-F: arch/x86/include/asm/livepatch.h
F: include/linux/livepatch.h
F: kernel/livepatch/
F: kernel/module/livepatch.c
@@ -11547,6 +11577,16 @@ S: Maintained
F: Documentation/devicetree/bindings/display/bridge/lontium,lt8912b.yaml
F: drivers/gpu/drm/bridge/lontium-lt8912b.c
+LOONGARCH
+M: Huacai Chen <chenhuacai@kernel.org>
+R: WANG Xuerui <kernel@xen0n.name>
+S: Maintained
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson.git
+F: arch/loongarch/
+F: drivers/*/*loongarch*
+F: Documentation/loongarch/
+F: Documentation/translations/zh_CN/loongarch/
+
LSILOGIC MPT FUSION DRIVERS (FC/SAS/SPI)
M: Sathya Prakash <sathya.prakash@broadcom.com>
M: Sreekanth Reddy <sreekanth.reddy@broadcom.com>
@@ -12895,7 +12935,7 @@ F: arch/arm64/boot/dts/marvell/armada-3720-uDPU.dts
MHI BUS
M: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
-R: Hemant Kumar <hemantk@codeaurora.org>
+R: Hemant Kumar <quic_hemantk@quicinc.com>
L: mhi@lists.linux.dev
L: linux-arm-msm@vger.kernel.org
S: Maintained
@@ -13065,7 +13105,7 @@ M: Claudiu Beznea <claudiu.beznea@microchip.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: linux-pwm@vger.kernel.org
S: Supported
-F: Documentation/devicetree/bindings/pwm/atmel-pwm.txt
+F: Documentation/devicetree/bindings/pwm/atmel,at91sam-pwm.yaml
F: drivers/pwm/pwm-atmel.c
MICROCHIP SAMA5D2-COMPATIBLE ADC DRIVER
@@ -13404,7 +13444,7 @@ F: drivers/net/phy/motorcomm.c
MOXA SMARTIO/INDUSTIO/INTELLIO SERIAL CARD
M: Jiri Slaby <jirislaby@kernel.org>
S: Maintained
-F: Documentation/driver-api/serial/moxa-smartio.rst
+F: Documentation/driver-api/tty/moxa-smartio.rst
F: drivers/tty/mxser.*
MR800 AVERMEDIA USB FM RADIO DRIVER
@@ -15287,7 +15327,7 @@ F: drivers/pci/controller/pci-v3-semi.c
PCI ENDPOINT SUBSYSTEM
M: Kishon Vijay Abraham I <kishon@ti.com>
-M: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+M: Lorenzo Pieralisi <lpieralisi@kernel.org>
R: Krzysztof Wilczyński <kw@linux.com>
L: linux-pci@vger.kernel.org
S: Supported
@@ -15350,7 +15390,7 @@ F: Documentation/devicetree/bindings/pci/xgene-pci-msi.txt
F: drivers/pci/controller/pci-xgene-msi.c
PCI NATIVE HOST BRIDGE AND ENDPOINT DRIVERS
-M: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+M: Lorenzo Pieralisi <lpieralisi@kernel.org>
R: Rob Herring <robh@kernel.org>
R: Krzysztof Wilczyński <kw@linux.com>
L: linux-pci@vger.kernel.org
@@ -15903,7 +15943,7 @@ F: include/linux/dtpm.h
POWER STATE COORDINATION INTERFACE (PSCI)
M: Mark Rutland <mark.rutland@arm.com>
-M: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+M: Lorenzo Pieralisi <lpieralisi@kernel.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: drivers/firmware/psci/
@@ -16097,7 +16137,6 @@ F: include/asm-generic/syscall.h
F: include/linux/ptrace.h
F: include/linux/regset.h
F: include/uapi/linux/ptrace.h
-F: include/uapi/linux/ptrace.h
F: kernel/ptrace.c
PULSE8-CEC DRIVER
@@ -16646,6 +16685,13 @@ L: linux-mips@vger.kernel.org
S: Maintained
F: arch/mips/boot/dts/ralink/mt7621*
+RALINK PINCTRL DRIVER
+M: Arınç ÜNAL <arinc.unal@arinc9.com>
+M: Sergio Paracuellos <sergio.paracuellos@gmail.com>
+L: linux-mips@vger.kernel.org
+S: Maintained
+F: drivers/pinctrl/ralink/
+
RALINK RT2X00 WIRELESS LAN DRIVER
M: Stanislaw Gruszka <stf_xl@wp.pl>
M: Helmut Schaa <helmut.schaa@googlemail.com>
@@ -16992,6 +17038,14 @@ S: Supported
F: Documentation/devicetree/bindings/iio/adc/renesas,rzg2l-adc.yaml
F: drivers/iio/adc/rzg2l_adc.c
+RENESAS RZ/N1 RTC CONTROLLER DRIVER
+M: Miquel Raynal <miquel.raynal@bootlin.com>
+L: linux-rtc@vger.kernel.org
+L: linux-renesas-soc@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/rtc/renesas,rzn1-rtc.yaml
+F: drivers/rtc/rtc-rzn1.c
+
RENESAS R-CAR GEN3 & RZ/N1 NAND CONTROLLER DRIVER
M: Miquel Raynal <miquel.raynal@bootlin.com>
L: linux-mtd@lists.infradead.org
@@ -17701,6 +17755,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkp/scsi.git
F: Documentation/devicetree/bindings/scsi/
F: drivers/scsi/
+F: drivers/ufs/
F: include/scsi/
SCSI TAPE DRIVER
@@ -18276,7 +18331,7 @@ F: drivers/net/ethernet/smsc/smc91x.*
SECURE MONITOR CALL(SMC) CALLING CONVENTION (SMCCC)
M: Mark Rutland <mark.rutland@arm.com>
-M: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+M: Lorenzo Pieralisi <lpieralisi@kernel.org>
M: Sudeep Holla <sudeep.holla@arm.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
@@ -18773,7 +18828,7 @@ S: Maintained
F: arch/alpha/kernel/srm_env.c
ST LSM6DSx IMU IIO DRIVER
-M: Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+M: Lorenzo Bianconi <lorenzo@kernel.org>
L: linux-iio@vger.kernel.org
S: Maintained
W: http://www.st.com/
@@ -19028,6 +19083,12 @@ S: Maintained
F: Documentation/devicetree/bindings/nvmem/sunplus,sp7021-ocotp.yaml
F: drivers/nvmem/sunplus-ocotp.c
+SUNPLUS PWM DRIVER
+M: Hammer Hsieh <hammerh0314@gmail.com>
+S: Maintained
+F: Documentation/devicetree/bindings/pwm/sunplus,sp7021-pwm.yaml
+F: drivers/pwm/pwm-sunplus.c
+
SUNPLUS RTC DRIVER
M: Vincent Shih <vincent.sunplus@gmail.com>
L: linux-rtc@vger.kernel.org
@@ -19048,6 +19109,13 @@ S: Maintained
F: Documentation/devicetree/bindings/serial/sunplus,sp7021-uart.yaml
F: drivers/tty/serial/sunplus-uart.c
+SUNPLUS WATCHDOG DRIVER
+M: Xiantao Hu <xt.hu@cqplus1.com>
+L: linux-watchdog@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/watchdog/sunplus,sp7021-wdt.yaml
+F: drivers/watchdog/sunplus_wdt.c
+
SUPERH
M: Yoshinori Sato <ysato@users.sourceforge.jp>
M: Rich Felker <dalias@libc.org>
@@ -19193,6 +19261,7 @@ SYNOPSYS DESIGNWARE DMAC DRIVER
M: Viresh Kumar <vireshk@kernel.org>
R: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
S: Maintained
+F: Documentation/devicetree/bindings/dma/renesas,rzn1-dmamux.yaml
F: Documentation/devicetree/bindings/dma/snps,dma-spear1340.yaml
F: drivers/dma/dw/
F: include/dt-bindings/dma/dw-dmac.h
@@ -20111,8 +20180,8 @@ M: Ingo Molnar <mingo@redhat.com>
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace.git
F: Documentation/trace/ftrace.rst
-F: arch/*/*/*/ftrace.h
-F: arch/*/kernel/ftrace.c
+F: arch/*/*/*/*ftrace*
+F: arch/*/*/*ftrace*
F: fs/tracefs/
F: include/*/ftrace.h
F: include/linux/trace*.h
@@ -20343,35 +20412,28 @@ F: drivers/cdrom/cdrom.c
F: include/linux/cdrom.h
F: include/uapi/linux/cdrom.h
-UNISYS S-PAR DRIVERS
-M: David Kershner <david.kershner@unisys.com>
-L: sparmaintainer@unisys.com (Unisys internal)
-S: Supported
-F: drivers/staging/unisys/
-F: drivers/visorbus/
-F: include/linux/visorbus.h
-
UNIVERSAL FLASH STORAGE HOST CONTROLLER DRIVER
R: Alim Akhtar <alim.akhtar@samsung.com>
R: Avri Altman <avri.altman@wdc.com>
+R: Bart Van Assche <bvanassche@acm.org>
L: linux-scsi@vger.kernel.org
S: Supported
F: Documentation/devicetree/bindings/ufs/
F: Documentation/scsi/ufs.rst
-F: drivers/scsi/ufs/
+F: drivers/ufs/core/
UNIVERSAL FLASH STORAGE HOST CONTROLLER DRIVER DWC HOOKS
M: Pedro Sousa <pedrom.sousa@synopsys.com>
L: linux-scsi@vger.kernel.org
S: Supported
-F: drivers/scsi/ufs/*dwc*
+F: drivers/ufs/host/*dwc*
UNIVERSAL FLASH STORAGE HOST CONTROLLER DRIVER MEDIATEK HOOKS
M: Stanley Chu <stanley.chu@mediatek.com>
L: linux-scsi@vger.kernel.org
L: linux-mediatek@lists.infradead.org (moderated for non-subscribers)
S: Maintained
-F: drivers/scsi/ufs/ufs-mediatek*
+F: drivers/ufs/host/ufs-mediatek*
UNSORTED BLOCK IMAGES (UBI)
M: Richard Weinberger <richard@nod.at>
@@ -21019,6 +21081,7 @@ F: include/uapi/linux/virtio_crypto.h
VIRTIO DRIVERS FOR S390
M: Cornelia Huck <cohuck@redhat.com>
M: Halil Pasic <pasic@linux.ibm.com>
+M: Eric Farman <farman@linux.ibm.com>
L: linux-s390@vger.kernel.org
L: virtualization@lists.linux-foundation.org
L: kvm@vger.kernel.org
@@ -21169,7 +21232,7 @@ L: linux-kernel@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc.git
F: Documentation/driver-api/vme.rst
-F: drivers/staging/vme/
+F: drivers/staging/vme_user/
F: drivers/vme/
F: include/linux/vme*
@@ -21695,23 +21758,29 @@ F: arch/arm64/include/asm/xen/
F: arch/arm64/xen/
XEN HYPERVISOR INTERFACE
-M: Boris Ostrovsky <boris.ostrovsky@oracle.com>
M: Juergen Gross <jgross@suse.com>
-R: Stefano Stabellini <sstabellini@kernel.org>
+M: Stefano Stabellini <sstabellini@kernel.org>
+R: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>
L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip.git
F: Documentation/ABI/stable/sysfs-hypervisor-xen
F: Documentation/ABI/testing/sysfs-hypervisor-xen
-F: arch/x86/include/asm/pvclock-abi.h
-F: arch/x86/include/asm/xen/
-F: arch/x86/platform/pvh/
-F: arch/x86/xen/
F: drivers/*/xen-*front.c
F: drivers/xen/
F: include/uapi/xen/
F: include/xen/
+XEN HYPERVISOR X86
+M: Juergen Gross <jgross@suse.com>
+R: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
+S: Supported
+F: arch/x86/include/asm/pvclock-abi.h
+F: arch/x86/include/asm/xen/
+F: arch/x86/platform/pvh/
+F: arch/x86/xen/
+
XEN NETWORK BACKEND DRIVER
M: Wei Liu <wei.liu@kernel.org>
M: Paul Durrant <paul@xen.org>
@@ -21816,6 +21885,12 @@ F: drivers/misc/Makefile
F: drivers/misc/xilinx_sdfec.c
F: include/uapi/misc/xilinx_sdfec.h
+XILINX PWM DRIVER
+M: Sean Anderson <sean.anderson@seco.com>
+S: Maintained
+F: drivers/pwm/pwm-xilinx.c
+F: include/clocksource/timer-xilinx.h
+
XILINX UARTLITE SERIAL DRIVER
M: Peter Korsgaard <jacmet@sunsite.dk>
L: linux-serial@vger.kernel.org
diff --git a/Makefile b/Makefile
index edc3f44cd96c..7011d43dff35 100644
--- a/Makefile
+++ b/Makefile
@@ -1490,7 +1490,7 @@ CLEAN_FILES += include/ksym vmlinux.symvers modules-only.symvers \
# Directories & files removed with 'make mrproper'
MRPROPER_FILES += include/config include/generated \
- arch/$(SRCARCH)/include/generated .tmp_objdiff \
+ arch/$(SRCARCH)/include/generated .objdiff \
debian snap tar-install \
.config .config.old .version \
Module.symvers \
@@ -1857,7 +1857,7 @@ clean: $(clean-dirs)
-o -name '*.lex.c' -o -name '*.tab.[ch]' \
-o -name '*.asn1.[ch]' \
-o -name '*.symtypes' -o -name 'modules.order' \
- -o -name '.tmp_*.o.*' \
+ -o -name '.tmp_*' \
-o -name '*.c.[012]*.*' \
-o -name '*.ll' \
-o -name '*.gcno' \
diff --git a/arch/Kconfig b/arch/Kconfig
index 904ed51736d4..fcf9a41a4ef5 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -1019,12 +1019,10 @@ config PAGE_SIZE_LESS_THAN_64KB
depends on !IA64_PAGE_SIZE_64KB
depends on !PAGE_SIZE_64KB
depends on !PARISC_PAGE_SIZE_64KB
- depends on !PPC_64K_PAGES
depends on PAGE_SIZE_LESS_THAN_256KB
config PAGE_SIZE_LESS_THAN_256KB
def_bool y
- depends on !PPC_256K_PAGES
depends on !PAGE_SIZE_256KB
# This allows to use a set of generic functions to determine mmap base
@@ -1050,6 +1048,10 @@ config HAVE_NOINSTR_HACK
config HAVE_NOINSTR_VALIDATION
bool
+config HAVE_UACCESS_VALIDATION
+ bool
+ select OBJTOOL
+
config HAVE_STACK_VALIDATION
bool
help
diff --git a/arch/alpha/include/uapi/asm/termbits.h b/arch/alpha/include/uapi/asm/termbits.h
index 4575ba34a0ea..f1290b22072b 100644
--- a/arch/alpha/include/uapi/asm/termbits.h
+++ b/arch/alpha/include/uapi/asm/termbits.h
@@ -2,10 +2,8 @@
#ifndef _ALPHA_TERMBITS_H
#define _ALPHA_TERMBITS_H
-#include <linux/posix_types.h>
+#include <asm-generic/termbits-common.h>
-typedef unsigned char cc_t;
-typedef unsigned int speed_t;
typedef unsigned int tcflag_t;
/*
@@ -53,76 +51,58 @@ struct ktermios {
};
/* c_cc characters */
-#define VEOF 0
-#define VEOL 1
-#define VEOL2 2
-#define VERASE 3
-#define VWERASE 4
-#define VKILL 5
-#define VREPRINT 6
-#define VSWTC 7
-#define VINTR 8
-#define VQUIT 9
-#define VSUSP 10
-#define VSTART 12
-#define VSTOP 13
-#define VLNEXT 14
-#define VDISCARD 15
-#define VMIN 16
-#define VTIME 17
+#define VEOF 0
+#define VEOL 1
+#define VEOL2 2
+#define VERASE 3
+#define VWERASE 4
+#define VKILL 5
+#define VREPRINT 6
+#define VSWTC 7
+#define VINTR 8
+#define VQUIT 9
+#define VSUSP 10
+#define VSTART 12
+#define VSTOP 13
+#define VLNEXT 14
+#define VDISCARD 15
+#define VMIN 16
+#define VTIME 17
/* c_iflag bits */
-#define IGNBRK 0000001
-#define BRKINT 0000002
-#define IGNPAR 0000004
-#define PARMRK 0000010
-#define INPCK 0000020
-#define ISTRIP 0000040
-#define INLCR 0000100
-#define IGNCR 0000200
-#define ICRNL 0000400
-#define IXON 0001000
-#define IXOFF 0002000
-#define IXANY 0004000
-#define IUCLC 0010000
-#define IMAXBEL 0020000
-#define IUTF8 0040000
+#define IXON 0x0200
+#define IXOFF 0x0400
+#define IUCLC 0x1000
+#define IMAXBEL 0x2000
+#define IUTF8 0x4000
/* c_oflag bits */
-#define OPOST 0000001
-#define ONLCR 0000002
-#define OLCUC 0000004
-
-#define OCRNL 0000010
-#define ONOCR 0000020
-#define ONLRET 0000040
-
-#define OFILL 00000100
-#define OFDEL 00000200
-#define NLDLY 00001400
-#define NL0 00000000
-#define NL1 00000400
-#define NL2 00001000
-#define NL3 00001400
-#define TABDLY 00006000
-#define TAB0 00000000
-#define TAB1 00002000
-#define TAB2 00004000
-#define TAB3 00006000
-#define CRDLY 00030000
-#define CR0 00000000
-#define CR1 00010000
-#define CR2 00020000
-#define CR3 00030000
-#define FFDLY 00040000
-#define FF0 00000000
-#define FF1 00040000
-#define BSDLY 00100000
-#define BS0 00000000
-#define BS1 00100000
-#define VTDLY 00200000
-#define VT0 00000000
-#define VT1 00200000
+#define ONLCR 0x00002
+#define OLCUC 0x00004
+#define NLDLY 0x00300
+#define NL0 0x00000
+#define NL1 0x00100
+#define NL2 0x00200
+#define NL3 0x00300
+#define TABDLY 0x00c00
+#define TAB0 0x00000
+#define TAB1 0x00400
+#define TAB2 0x00800
+#define TAB3 0x00c00
+#define CRDLY 0x03000
+#define CR0 0x00000
+#define CR1 0x01000
+#define CR2 0x02000
+#define CR3 0x03000
+#define FFDLY 0x04000
+#define FF0 0x00000
+#define FF1 0x04000
+#define BSDLY 0x08000
+#define BS0 0x00000
+#define BS1 0x08000
+#define VTDLY 0x10000
+#define VT0 0x00000
+#define VT1 0x10000
/*
* Should be equivalent to TAB3, see description of TAB3 in
* POSIX.1-2008, Ch. 11.2.3 "Output Modes"
@@ -130,61 +110,36 @@ struct ktermios {
#define XTABS TAB3
/* c_cflag bit meaning */
-#define CBAUD 0000037
-#define B0 0000000 /* hang up */
-#define B50 0000001
-#define B75 0000002
-#define B110 0000003
-#define B134 0000004
-#define B150 0000005
-#define B200 0000006
-#define B300 0000007
-#define B600 0000010
-#define B1200 0000011
-#define B1800 0000012
-#define B2400 0000013
-#define B4800 0000014
-#define B9600 0000015
-#define B19200 0000016
-#define B38400 0000017
-#define EXTA B19200
-#define EXTB B38400
-#define CBAUDEX 0000000
-#define B57600 00020
-#define B115200 00021
-#define B230400 00022
-#define B460800 00023
-#define B500000 00024
-#define B576000 00025
-#define B921600 00026
-#define B1000000 00027
-#define B1152000 00030
-#define B1500000 00031
-#define B2000000 00032
-#define B2500000 00033
-#define B3000000 00034
-#define B3500000 00035
-#define B4000000 00036
-#define BOTHER 00037
-
-#define CSIZE 00001400
-#define CS5 00000000
-#define CS6 00000400
-#define CS7 00001000
-#define CS8 00001400
-
-#define CSTOPB 00002000
-#define CREAD 00004000
-#define PARENB 00010000
-#define PARODD 00020000
-#define HUPCL 00040000
-
-#define CLOCAL 00100000
-#define CMSPAR 010000000000 /* mark or space (stick) parity */
-#define CRTSCTS 020000000000 /* flow control */
-
-#define CIBAUD 07600000
-#define IBSHIFT 16
+#define CBAUD 0x0000001f
+#define CBAUDEX 0x00000000
+#define BOTHER 0x0000001f
+#define B57600 0x00000010
+#define B115200 0x00000011
+#define B230400 0x00000012
+#define B460800 0x00000013
+#define B500000 0x00000014
+#define B576000 0x00000015
+#define B921600 0x00000016
+#define B1000000 0x00000017
+#define B1152000 0x00000018
+#define B1500000 0x00000019
+#define B2000000 0x0000001a
+#define B2500000 0x0000001b
+#define B3000000 0x0000001c
+#define B3500000 0x0000001d
+#define B4000000 0x0000001e
+#define CSIZE 0x00000300
+#define CS5 0x00000000
+#define CS6 0x00000100
+#define CS7 0x00000200
+#define CS8 0x00000300
+#define CSTOPB 0x00000400
+#define CREAD 0x00000800
+#define PARENB 0x00001000
+#define PARODD 0x00002000
+#define HUPCL 0x00004000
+#define CLOCAL 0x00008000
+#define CIBAUD 0x001f0000
/* c_lflag bits */
#define ISIG 0x00000080
@@ -204,17 +159,6 @@ struct ktermios {
#define IEXTEN 0x00000400
#define EXTPROC 0x10000000
-/* Values for the ACTION argument to `tcflow'. */
-#define TCOOFF 0
-#define TCOON 1
-#define TCIOFF 2
-#define TCION 3
-
-/* Values for the QUEUE_SELECTOR argument to `tcflush'. */
-#define TCIFLUSH 0
-#define TCOFLUSH 1
-#define TCIOFLUSH 2
-
/* Values for the OPTIONAL_ACTIONS argument to `tcsetattr'. */
#define TCSANOW 0
#define TCSADRAIN 1
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index 8bbeebb73cf0..d257293401e2 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -36,6 +36,7 @@
#include <linux/types.h>
#include <linux/ipc.h>
#include <linux/namei.h>
+#include <linux/mount.h>
#include <linux/uio.h>
#include <linux/vfs.h>
#include <linux/rcupdate.h>
diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c
index 5f8527081da9..e2e25f8b5e76 100644
--- a/arch/alpha/kernel/process.c
+++ b/arch/alpha/kernel/process.c
@@ -125,7 +125,7 @@ common_shutdown_1(void *generic_ptr)
/* Wait for the secondaries to halt. */
set_cpu_present(boot_cpuid, false);
set_cpu_possible(boot_cpuid, false);
- while (cpumask_weight(cpu_present_mask))
+ while (!cpumask_empty(cpu_present_mask))
barrier();
#endif
@@ -233,10 +233,11 @@ release_thread(struct task_struct *dead_task)
/*
* Copy architecture-specific thread state
*/
-int copy_thread(unsigned long clone_flags, unsigned long usp,
- unsigned long kthread_arg, struct task_struct *p,
- unsigned long tls)
+int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
{
+ unsigned long clone_flags = args->flags;
+ unsigned long usp = args->stack;
+ unsigned long tls = args->tls;
extern void ret_from_fork(void);
extern void ret_from_kernel_thread(void);
@@ -249,13 +250,13 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
childti->pcb.ksp = (unsigned long) childstack;
childti->pcb.flags = 1; /* set FEN, clear everything else */
- if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
+ if (unlikely(args->fn)) {
/* kernel thread */
memset(childstack, 0,
sizeof(struct switch_stack) + sizeof(struct pt_regs));
childstack->r26 = (unsigned long) ret_from_kernel_thread;
- childstack->r9 = usp; /* function */
- childstack->r10 = kthread_arg;
+ childstack->r9 = (unsigned long) args->fn;
+ childstack->r10 = (unsigned long) args->fn_arg;
childregs->hae = alpha_mv.hae_cache;
childti->pcb.usp = 0;
return 0;
diff --git a/arch/alpha/lib/csum_partial_copy.c b/arch/alpha/lib/csum_partial_copy.c
index 1931a04af85a..4d180d96f09e 100644
--- a/arch/alpha/lib/csum_partial_copy.c
+++ b/arch/alpha/lib/csum_partial_copy.c
@@ -353,7 +353,6 @@ csum_and_copy_from_user(const void __user *src, void *dst, int len)
return 0;
return __csum_and_copy(src, dst, len);
}
-EXPORT_SYMBOL(csum_and_copy_from_user);
__wsum
csum_partial_copy_nocheck(const void *src, void *dst, int len)
diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c
index 5f7f5aab361f..3369f0700702 100644
--- a/arch/arc/kernel/process.c
+++ b/arch/arc/kernel/process.c
@@ -162,10 +162,11 @@ asmlinkage void ret_from_fork(void);
* | user_r25 |
* ------------------ <===== END of PAGE
*/
-int copy_thread(unsigned long clone_flags, unsigned long usp,
- unsigned long kthread_arg, struct task_struct *p,
- unsigned long tls)
+int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
{
+ unsigned long clone_flags = args->flags;
+ unsigned long usp = args->stack;
+ unsigned long tls = args->tls;
struct pt_regs *c_regs; /* child's pt_regs */
unsigned long *childksp; /* to unwind out of __switch_to() */
struct callee_regs *c_callee; /* child's callee regs */
@@ -191,11 +192,11 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
childksp[0] = 0; /* fp */
childksp[1] = (unsigned long)ret_from_fork; /* blink */
- if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
+ if (unlikely(args->fn)) {
memset(c_regs, 0, sizeof(struct pt_regs));
- c_callee->r13 = kthread_arg;
- c_callee->r14 = usp; /* function */
+ c_callee->r13 = (unsigned long)args->fn_arg;
+ c_callee->r14 = (unsigned long)args->fn;
return 0;
}
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 903165a400a7..7630ba9cb6cc 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -357,25 +357,6 @@ config ARCH_FOOTBRIDGE
Support for systems based on the DC21285 companion chip
("FootBridge"), such as the Simtec CATS and the Rebel NetWinder.
-config ARCH_PXA
- bool "PXA2xx/PXA3xx-based"
- depends on CPU_LITTLE_ENDIAN
- select ARCH_MTD_XIP
- select ARM_CPU_SUSPEND if PM
- select AUTO_ZRELADDR
- select COMMON_CLK
- select CLKSRC_PXA
- select CLKSRC_MMIO
- select TIMER_OF
- select CPU_XSCALE if !CPU_XSC3
- select GPIO_PXA
- select GPIOLIB
- select IRQ_DOMAIN
- select PLAT_PXA
- select SPARSE_IRQ
- help
- Support for Intel/Marvell's PXA2xx/PXA3xx processor line.
-
config ARCH_RPC
bool "RiscPC"
depends on !CC_IS_CLANG && GCC_VERSION < 90100 && GCC_VERSION >= 60000
@@ -415,19 +396,6 @@ config ARCH_SA1100
help
Support for StrongARM 11x0 based boards.
-config ARCH_OMAP1
- bool "TI OMAP1"
- depends on CPU_LITTLE_ENDIAN
- select CLKSRC_MMIO
- select FORCE_PCI if PCCARD
- select GENERIC_IRQ_CHIP
- select GPIOLIB
- select HAVE_LEGACY_CLK
- select IRQ_DOMAIN
- select SPARSE_IRQ
- help
- Support for older TI OMAP1 (omap7xx, omap15xx or omap16xx)
-
endchoice
menu "Multiple platform selection"
@@ -550,6 +518,8 @@ source "arch/arm/mach-highbank/Kconfig"
source "arch/arm/mach-hisi/Kconfig"
+source "arch/arm/mach-hpe/Kconfig"
+
source "arch/arm/mach-imx/Kconfig"
source "arch/arm/mach-iop32x/Kconfig"
@@ -593,7 +563,6 @@ source "arch/arm/mach-orion5x/Kconfig"
source "arch/arm/mach-oxnas/Kconfig"
source "arch/arm/mach-pxa/Kconfig"
-source "arch/arm/plat-pxa/Kconfig"
source "arch/arm/mach-qcom/Kconfig"
@@ -672,9 +641,6 @@ config PLAT_ORION_LEGACY
bool
select PLAT_ORION
-config PLAT_PXA
- bool
-
config PLAT_VERSATILE
bool
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 954ec707d182..c8e3633f5434 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -179,6 +179,7 @@ machine-$(CONFIG_ARCH_FOOTBRIDGE) += footbridge
machine-$(CONFIG_ARCH_GEMINI) += gemini
machine-$(CONFIG_ARCH_HIGHBANK) += highbank
machine-$(CONFIG_ARCH_HISI) += hisi
+machine-$(CONFIG_ARCH_HPE) += hpe
machine-$(CONFIG_ARCH_IOP32X) += iop32x
machine-$(CONFIG_ARCH_IXP4XX) += ixp4xx
machine-$(CONFIG_ARCH_KEYSTONE) += keystone
@@ -225,7 +226,6 @@ machine-$(CONFIG_PLAT_SPEAR) += spear
# Platform directory name. This list is sorted alphanumerically
# by CONFIG_* macro name.
plat-$(CONFIG_PLAT_ORION) += orion
-plat-$(CONFIG_PLAT_PXA) += pxa
# The byte offset of the kernel image in RAM from the start of RAM.
TEXT_OFFSET := $(textofs-y)
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
index edfbedaa6168..184899808ee7 100644
--- a/arch/arm/boot/dts/Makefile
+++ b/arch/arm/boot/dts/Makefile
@@ -259,6 +259,8 @@ dtb-$(CONFIG_ARCH_HISI) += \
hi3519-demb.dtb
dtb-$(CONFIG_ARCH_HIX5HD2) += \
hisi-x5hd2-dkb.dtb
+dtb-$(CONFIG_ARCH_HPE_GXP) += \
+ hpe-bmc-dl360gen10.dtb
dtb-$(CONFIG_ARCH_INTEGRATOR) += \
integratorap.dtb \
integratorap-im-pd1.dtb \
@@ -1584,6 +1586,7 @@ dtb-$(CONFIG_ARCH_ASPEED) += \
aspeed-bmc-lenovo-hr630.dtb \
aspeed-bmc-lenovo-hr855xg2.dtb \
aspeed-bmc-microsoft-olympus.dtb \
+ aspeed-bmc-nuvia-dc-scm.dtb \
aspeed-bmc-opp-lanyang.dtb \
aspeed-bmc-opp-mihawk.dtb \
aspeed-bmc-opp-mowgli.dtb \
diff --git a/arch/arm/boot/dts/aspeed-ast2600-evb.dts b/arch/arm/boot/dts/aspeed-ast2600-evb.dts
index b7eb552640cb..5a6063bd4508 100644
--- a/arch/arm/boot/dts/aspeed-ast2600-evb.dts
+++ b/arch/arm/boot/dts/aspeed-ast2600-evb.dts
@@ -23,6 +23,26 @@
reg = <0x80000000 0x80000000>;
};
+ reserved-memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ video_engine_memory: video {
+ size = <0x04000000>;
+ alignment = <0x01000000>;
+ compatible = "shared-dma-pool";
+ reusable;
+ };
+
+ gfx_memory: framebuffer {
+ size = <0x01000000>;
+ alignment = <0x01000000>;
+ compatible = "shared-dma-pool";
+ reusable;
+ };
+ };
+
vcc_sdhci0: regulator-vcc-sdhci0 {
compatible = "regulator-fixed";
regulator-name = "SDHCI0 Vcc";
@@ -103,7 +123,7 @@
&mac0 {
status = "okay";
- phy-mode = "rgmii";
+ phy-mode = "rgmii-rxid";
phy-handle = <&ethphy0>;
pinctrl-names = "default";
@@ -114,7 +134,7 @@
&mac1 {
status = "okay";
- phy-mode = "rgmii";
+ phy-mode = "rgmii-rxid";
phy-handle = <&ethphy1>;
pinctrl-names = "default";
@@ -300,3 +320,18 @@
vqmmc-supply = <&vccq_sdhci1>;
clk-phase-sd-hs = <7>, <200>;
};
+
+&vhub {
+ status = "okay";
+ pinctrl-names = "default";
+};
+
+&video {
+ status = "okay";
+ memory-region = <&video_engine_memory>;
+};
+
+&gfx {
+ status = "okay";
+ memory-region = <&gfx_memory>;
+};
diff --git a/arch/arm/boot/dts/aspeed-bmc-facebook-bletchley.dts b/arch/arm/boot/dts/aspeed-bmc-facebook-bletchley.dts
index eaf1bc261ee3..41d2b1535d9a 100644
--- a/arch/arm/boot/dts/aspeed-bmc-facebook-bletchley.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-facebook-bletchley.dts
@@ -51,26 +51,6 @@
};
};
- spi2_gpio: spi2-gpio {
- compatible = "spi-gpio";
- #address-cells = <1>;
- #size-cells = <0>;
-
- gpio-sck = <&gpio0 ASPEED_GPIO(X, 3) GPIO_ACTIVE_HIGH>;
- gpio-mosi = <&gpio0 ASPEED_GPIO(X, 4) GPIO_ACTIVE_HIGH>;
- gpio-miso = <&gpio0 ASPEED_GPIO(X, 5) GPIO_ACTIVE_HIGH>;
- num-chipselects = <1>;
- cs-gpios = <&gpio0 ASPEED_GPIO(X, 0) GPIO_ACTIVE_LOW>;
-
- flash@0 {
- reg = <0>;
- compatible = "jedec,spi-nor";
- m25p,fast-read;
- label = "pnor";
- spi-max-frequency = <100000000>;
- };
- };
-
switchphy: ethernet-phy@0 {
// Fixed link
};
@@ -242,6 +222,19 @@
};
};
+&spi2 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_spi2_default>;
+
+ flash@0 {
+ status = "okay";
+ m25p,fast-read;
+ label = "pnor";
+ spi-max-frequency = <50000000>;
+ };
+};
+
&i2c0 {
status = "okay";
ina230@45 {
@@ -260,6 +253,17 @@
reg = <0x4f>;
};
+ sled1_ioexp41: pca9536@41 {
+ compatible = "nxp,pca9536";
+ reg = <0x41>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ gpio-line-names =
+ "SLED1_SWD_MUX", "SLED1_XRES_SWD_N",
+ "SLED1_CLKREQ_N", "SLED1_PCIE_PWR_EN";
+ };
+
sled1_ioexp: pca9539@76 {
compatible = "nxp,pca9539";
reg = <0x76>;
@@ -310,6 +314,11 @@
op-sink-microwatt = <10000000>;
};
};
+
+ eeprom@54 {
+ compatible = "atmel,24c64";
+ reg = <0x54>;
+ };
};
&i2c1 {
@@ -330,6 +339,17 @@
reg = <0x4f>;
};
+ sled2_ioexp41: pca9536@41 {
+ compatible = "nxp,pca9536";
+ reg = <0x41>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ gpio-line-names =
+ "SLED2_SWD_MUX", "SLED2_XRES_SWD_N",
+ "SLED2_CLKREQ_N", "SLED2_PCIE_PWR_EN";
+ };
+
sled2_ioexp: pca9539@76 {
compatible = "nxp,pca9539";
reg = <0x76>;
@@ -380,6 +400,11 @@
op-sink-microwatt = <10000000>;
};
};
+
+ eeprom@54 {
+ compatible = "atmel,24c64";
+ reg = <0x54>;
+ };
};
&i2c2 {
@@ -400,6 +425,17 @@
reg = <0x4f>;
};
+ sled3_ioexp41: pca9536@41 {
+ compatible = "nxp,pca9536";
+ reg = <0x41>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ gpio-line-names =
+ "SLED3_SWD_MUX", "SLED3_XRES_SWD_N",
+ "SLED3_CLKREQ_N", "SLED3_PCIE_PWR_EN";
+ };
+
sled3_ioexp: pca9539@76 {
compatible = "nxp,pca9539";
reg = <0x76>;
@@ -450,6 +486,11 @@
op-sink-microwatt = <10000000>;
};
};
+
+ eeprom@54 {
+ compatible = "atmel,24c64";
+ reg = <0x54>;
+ };
};
&i2c3 {
@@ -470,6 +511,17 @@
reg = <0x4f>;
};
+ sled4_ioexp41: pca9536@41 {
+ compatible = "nxp,pca9536";
+ reg = <0x41>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ gpio-line-names =
+ "SLED4_SWD_MUX", "SLED4_XRES_SWD_N",
+ "SLED4_CLKREQ_N", "SLED4_PCIE_PWR_EN";
+ };
+
sled4_ioexp: pca9539@76 {
compatible = "nxp,pca9539";
reg = <0x76>;
@@ -520,6 +572,11 @@
op-sink-microwatt = <10000000>;
};
};
+
+ eeprom@54 {
+ compatible = "atmel,24c64";
+ reg = <0x54>;
+ };
};
&i2c4 {
@@ -540,6 +597,17 @@
reg = <0x4f>;
};
+ sled5_ioexp41: pca9536@41 {
+ compatible = "nxp,pca9536";
+ reg = <0x41>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ gpio-line-names =
+ "SLED5_SWD_MUX", "SLED5_XRES_SWD_N",
+ "SLED5_CLKREQ_N", "SLED5_PCIE_PWR_EN";
+ };
+
sled5_ioexp: pca9539@76 {
compatible = "nxp,pca9539";
reg = <0x76>;
@@ -590,6 +658,11 @@
op-sink-microwatt = <10000000>;
};
};
+
+ eeprom@54 {
+ compatible = "atmel,24c64";
+ reg = <0x54>;
+ };
};
&i2c5 {
@@ -610,6 +683,17 @@
reg = <0x4f>;
};
+ sled6_ioexp41: pca9536@41 {
+ compatible = "nxp,pca9536";
+ reg = <0x41>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ gpio-line-names =
+ "SLED6_SWD_MUX", "SLED6_XRES_SWD_N",
+ "SLED6_CLKREQ_N", "SLED6_PCIE_PWR_EN";
+ };
+
sled6_ioexp: pca9539@76 {
compatible = "nxp,pca9539";
reg = <0x76>;
@@ -660,6 +744,11 @@
op-sink-microwatt = <10000000>;
};
};
+
+ eeprom@54 {
+ compatible = "atmel,24c64";
+ reg = <0x54>;
+ };
};
&i2c6 {
@@ -732,6 +821,8 @@
compatible = "adi,adm1278";
reg = <0x11>;
shunt-resistor-micro-ohms = <300>;
+ adi,volt-curr-sample-average = <128>;
+ adi,power-sample-average = <128>;
};
tmp421@4c {
@@ -771,43 +862,55 @@
&gpio0 {
gpio-line-names =
/*A0-A7*/ "","","","","","","","",
- /*B0-B7*/ "","","SEL_SPI2_MUX","SPI2_MUX1",
- "SPI2_MUX2","SPI2_MUX3","","",
+ /*B0-B7*/ "FUSB302_SLED1_INT_N","FUSB302_SLED2_INT_N",
+ "SEL_SPI2_MUX","SPI2_MUX1",
+ "SPI2_MUX2","SPI2_MUX3",
+ "","FUSB302_SLED3_INT_N",
/*C0-C7*/ "","","","","","","","",
/*D0-D7*/ "","","","","","","","",
/*E0-E7*/ "","","","","","","","",
- /*F0-F7*/ "","","","","","","","",
- /*G0-G7*/ "BSM_FRU_WP","SWITCH_FRU_MUX","","",
+ /*F0-F7*/ "BMC_SLED1_STCK","BMC_SLED2_STCK",
+ "BMC_SLED3_STCK","BMC_SLED4_STCK",
+ "BMC_SLED5_STCK","BMC_SLED6_STCK",
+ "","",
+ /*G0-G7*/ "BSM_FRU_WP","SWITCH_FRU_MUX","","FM_SOL_UART_CH_SEL",
"PWRGD_P1V05_VDDCORE","PWRGD_P1V5_VDD","","",
/*H0-H7*/ "presence-riser1","presence-riser2",
"presence-sled1","presence-sled2",
"presence-sled3","presence-sled4",
"presence-sled5","presence-sled6",
- /*I0-I7*/ "REV_ID0","","REV_ID1","REV_ID2",
- "","BSM_FLASH_WP_STATUS","BMC_TPM_PRES","",
+ /*I0-I7*/ "REV_ID0","",
+ "REV_ID1","REV_ID2",
+ "","BSM_FLASH_WP_STATUS",
+ "BMC_TPM_PRES_N","FUSB302_SLED6_INT_N",
/*J0-J7*/ "","","","","","","","",
/*K0-K7*/ "","","","","","","","",
/*L0-L7*/ "","","","","","BMC_RTC_INT","","",
- /*M0-M7*/ "ALERT_SLED1","ALERT_SLED2",
- "ALERT_SLED3","ALERT_SLED4",
- "ALERT_SLED5","ALERT_SLED6",
- "P12V_AUX_ALERT1","",
- /*N0-N7*/ "","","","","","","","",
+ /*M0-M7*/ "ALERT_SLED1_N","ALERT_SLED2_N",
+ "ALERT_SLED3_N","ALERT_SLED4_N",
+ "ALERT_SLED5_N","ALERT_SLED6_N",
+ "","",
+ /*N0-N7*/ "LED_POSTCODE_0","LED_POSTCODE_1",
+ "LED_POSTCODE_2","LED_POSTCODE_3",
+ "LED_POSTCODE_4","LED_POSTCODE_5",
+ "LED_POSTCODE_5","LED_POSTCODE_7",
/*O0-O7*/ "","","","",
"","BOARD_ID0","BOARD_ID1","BOARD_ID2",
/*P0-P7*/ "","","","","","","","BMC_HEARTBEAT",
/*Q0-Q7*/ "","","","","","","","",
/*R0-R7*/ "","","","","","","","",
/*S0-S7*/ "","","","BAT_DETECT",
- "BMC_BT_WP0","BMC_BT_WP1","","",
+ "BMC_BT_WP0_N","BMC_BT_WP1_N","","FUSB302_SLED4_INT_N",
/*T0-T7*/ "","","","","","","","",
/*U0-U7*/ "","","","","","","","",
- /*V0-V7*/ "PWRGD_CNS_PSU","RST_BMC_MVL","","PSU_PRSNT",
+ /*V0-V7*/ "PWRGD_CNS_PSU","RST_BMC_MVL_N",
+ "P12V_AUX_ALERT1_N","PSU_PRSNT",
"USB2_SEL0_A","USB2_SEL1_A",
"USB2_SEL0_B","USB2_SEL1_B",
- /*W0-W7*/ "RST_FRONT_IOEXP","","","","","","","",
+ /*W0-W7*/ "RST_FRONT_IOEXP_N","","","","","","","",
/*X0-X7*/ "","","","","","","","",
- /*Y0-Y7*/ "BMC_SELF_HW_RST","BSM_PRSNT","BSM_FLASH_LATCH","",
+ /*Y0-Y7*/ "BMC_SELF_HW_RST","BSM_PRSNT_N",
+ "BSM_FLASH_LATCH_N","FUSB302_SLED5_INT_N",
"","","","",
/*Z0-Z7*/ "","","","","","","","";
};
@@ -834,7 +937,16 @@
&pinctrl_adc14_default &pinctrl_adc15_default>;
};
+&mdio0 {
+ status = "okay";
+ /* TODO: Add Marvell 88E6191X */
+};
+
&mdio3 {
status = "okay";
/* TODO: Add Marvell 88X3310 */
};
+
+&ehci0 {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/aspeed-bmc-ibm-everest.dts b/arch/arm/boot/dts/aspeed-bmc-ibm-everest.dts
index 578f9e2fc7ed..382da7934eaa 100644
--- a/arch/arm/boot/dts/aspeed-bmc-ibm-everest.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-ibm-everest.dts
@@ -283,7 +283,7 @@
/*P0-P7*/ "","","","","led-pcieslot-power","","","",
/*Q0-Q7*/ "","","regulator-standby-faulted","","","","","",
/*R0-R7*/ "bmc-tpm-reset","power-chassis-control","power-chassis-good","","","I2C_FLASH_MICRO_N","","",
- /*S0-S7*/ "","","","","","","","",
+ /*S0-S7*/ "","","","","power-ffs-sync-history","","","",
/*T0-T7*/ "","","","","","","","",
/*U0-U7*/ "","","","","","","","",
/*V0-V7*/ "","BMC_3RESTART_ATTEMPT_P","","","","","","",
diff --git a/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts b/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts
index 528b49e2c0f8..7213434695bf 100644
--- a/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts
@@ -265,7 +265,7 @@
/*Q0-Q7*/ "cfam-reset","","regulator-standby-faulted","","","","","",
/*R0-R7*/ "bmc-tpm-reset","power-chassis-control","power-chassis-good","","","","","",
/*S0-S7*/ "presence-ps0","presence-ps1","presence-ps2","presence-ps3",
- "","","","",
+ "power-ffs-sync-history","","","",
/*T0-T7*/ "","","","","","","","",
/*U0-U7*/ "","","","","","","","",
/*V0-V7*/ "","","","","","","","",
diff --git a/arch/arm/boot/dts/aspeed-bmc-nuvia-dc-scm.dts b/arch/arm/boot/dts/aspeed-bmc-nuvia-dc-scm.dts
new file mode 100644
index 000000000000..f4a97cfb0f23
--- /dev/null
+++ b/arch/arm/boot/dts/aspeed-bmc-nuvia-dc-scm.dts
@@ -0,0 +1,190 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+// Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+
+/dts-v1/;
+
+#include "aspeed-g6.dtsi"
+
+/ {
+ model = "Nuvia DC-SCM BMC";
+ compatible = "nuvia,dc-scm-bmc", "aspeed,ast2600";
+
+ aliases {
+ serial4 = &uart5;
+ };
+
+ chosen {
+ stdout-path = &uart5;
+ bootargs = "console=ttyS4,115200n8";
+ };
+
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0x80000000 0x40000000>;
+ };
+};
+
+&mdio3 {
+ status = "okay";
+
+ ethphy3: ethernet-phy@1 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <1>;
+ };
+};
+
+&mac2 {
+ status = "okay";
+
+ /* Bootloader sets up the MAC to insert delay */
+ phy-mode = "rgmii";
+ phy-handle = <&ethphy3>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_rgmii3_default>;
+};
+
+&mac3 {
+ status = "okay";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_rmii4_default>;
+
+ use-ncsi;
+};
+
+&rtc {
+ status = "okay";
+};
+
+&fmc {
+ status = "okay";
+
+ flash@0 {
+ status = "okay";
+ m25p,fast-read;
+ label = "bmc";
+ spi-max-frequency = <133000000>;
+#include "openbmc-flash-layout-64.dtsi"
+ };
+
+ flash@1 {
+ status = "okay";
+ m25p,fast-read;
+ label = "alt-bmc";
+ spi-max-frequency = <133000000>;
+#include "openbmc-flash-layout-64-alt.dtsi"
+ };
+};
+
+&spi1 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_spi1_default>;
+
+ flash@0 {
+ status = "okay";
+ m25p,fast-read;
+ label = "bios";
+ spi-max-frequency = <133000000>;
+ };
+};
+
+&gpio0 {
+ gpio-line-names =
+ /*A0-A7*/ "","","","","","","","",
+ /*B0-B7*/ "BMC_FLASH_MUX_SEL","","","","","","","",
+ /*C0-C7*/ "","","","","","","","",
+ /*D0-D7*/ "","","","","","","","",
+ /*E0-E7*/ "","","","","","","","",
+ /*F0-F7*/ "","","","","","","","",
+ /*G0-G7*/ "","","","","","","","",
+ /*H0-H7*/ "","","","","","","","",
+ /*I0-I7*/ "","","","","","","","",
+ /*J0-J7*/ "","","","","","","","",
+ /*K0-K7*/ "","","","","","","","",
+ /*L0-L7*/ "","","","","","","","",
+ /*M0-M7*/ "","","","","","","","",
+ /*N0-N7*/ "BMC_FWSPI_RST_N","","GPIO_1_BMC_3V3","","","","","",
+ /*O0-O7*/ "JTAG_MUX_A","JTAG_MUX_B","","","","","","",
+ /*P0-P7*/ "","","","","","","","",
+ /*Q0-Q7*/ "","","","","","","","",
+ /*R0-R7*/ "","","","","","","","",
+ /*S0-S7*/ "","","","","","","","",
+ /*T0-T7*/ "","","","","","","","",
+ /*U0-U7*/ "","","","","","","","",
+ /*V0-V7*/ "","","","SCMFPGA_SPARE_GPIO1_3V3",
+ "SCMFPGA_SPARE_GPIO2_3V3","SCMFPGA_SPARE_GPIO3_3V3",
+ "SCMFPGA_SPARE_GPIO4_3V3","SCMFPGA_SPARE_GPIO5_3V3",
+ /*W0-W7*/ "","","","","","","","",
+ /*X0-X7*/ "","","","","","","","",
+ /*Y0-Y7*/ "","","","","","","","",
+ /*Z0-Z7*/ "","","","","","","","",
+ /*AA0-AA7*/ "","","","","","","","",
+ /*AB0-AB7*/ "","","","","","","","",
+ /*AC0-AC7*/ "","","","","","","","";
+};
+
+&gpio1 {
+ gpio-line-names =
+ /*A0-A7*/ "GPI_1_BMC_1V8","","","","","",
+ "SCMFPGA_SPARE_GPIO1_1V8","SCMFPGA_SPARE_GPIO2_1V8",
+ /*B0-B7*/ "SCMFPGA_SPARE_GPIO3_1V8","SCMFPGA_SPARE_GPIO4_1V8",
+ "SCMFPGA_SPARE_GPIO5_1V8","","","","","",
+ /*C0-C7*/ "","","","","","","","",
+ /*D0-D7*/ "","BMC_SPI1_RST_N","BIOS_FLASH_MUX_SEL","",
+ "","TPM2_PIRQ_N","TPM2_RST_N","",
+ /*E0-E7*/ "","","","","","","","";
+};
+
+&i2c2 {
+ status = "okay";
+};
+
+&i2c4 {
+ status = "okay";
+};
+
+&i2c5 {
+ status = "okay";
+};
+
+&i2c6 {
+ status = "okay";
+};
+
+&i2c7 {
+ status = "okay";
+};
+
+&i2c8 {
+ status = "okay";
+};
+
+&i2c9 {
+ status = "okay";
+};
+
+&i2c10 {
+ status = "okay";
+};
+
+&i2c12 {
+ status = "okay";
+};
+
+&i2c13 {
+ status = "okay";
+};
+
+&i2c14 {
+ status = "okay";
+};
+
+&i2c15 {
+ status = "okay";
+};
+
+&vhub {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/aspeed-g4.dtsi b/arch/arm/boot/dts/aspeed-g4.dtsi
index fa8b581c3d6c..530491ae5eb2 100644
--- a/arch/arm/boot/dts/aspeed-g4.dtsi
+++ b/arch/arm/boot/dts/aspeed-g4.dtsi
@@ -54,8 +54,7 @@
ranges;
fmc: spi@1e620000 {
- reg = < 0x1e620000 0x94
- 0x20000000 0x10000000 >;
+ reg = <0x1e620000 0x94>, <0x20000000 0x10000000>;
#address-cells = <1>;
#size-cells = <0>;
compatible = "aspeed,ast2400-fmc";
@@ -65,34 +64,42 @@
flash@0 {
reg = < 0 >;
compatible = "jedec,spi-nor";
+ spi-rx-bus-width = <2>;
spi-max-frequency = <50000000>;
status = "disabled";
};
flash@1 {
reg = < 1 >;
compatible = "jedec,spi-nor";
+ spi-rx-bus-width = <2>;
+ spi-max-frequency = <50000000>;
status = "disabled";
};
flash@2 {
reg = < 2 >;
compatible = "jedec,spi-nor";
+ spi-rx-bus-width = <2>;
+ spi-max-frequency = <50000000>;
status = "disabled";
};
flash@3 {
reg = < 3 >;
compatible = "jedec,spi-nor";
+ spi-rx-bus-width = <2>;
+ spi-max-frequency = <50000000>;
status = "disabled";
};
flash@4 {
reg = < 4 >;
compatible = "jedec,spi-nor";
+ spi-rx-bus-width = <2>;
+ spi-max-frequency = <50000000>;
status = "disabled";
};
};
spi: spi@1e630000 {
- reg = < 0x1e630000 0x18
- 0x30000000 0x10000000 >;
+ reg = <0x1e630000 0x18>, <0x30000000 0x10000000>;
#address-cells = <1>;
#size-cells = <0>;
compatible = "aspeed,ast2400-spi";
@@ -102,6 +109,7 @@
reg = < 0 >;
compatible = "jedec,spi-nor";
spi-max-frequency = <50000000>;
+ spi-rx-bus-width = <2>;
status = "disabled";
};
};
diff --git a/arch/arm/boot/dts/aspeed-g5.dtsi b/arch/arm/boot/dts/aspeed-g5.dtsi
index 4147b397c883..c89092c3905b 100644
--- a/arch/arm/boot/dts/aspeed-g5.dtsi
+++ b/arch/arm/boot/dts/aspeed-g5.dtsi
@@ -55,8 +55,7 @@
ranges;
fmc: spi@1e620000 {
- reg = < 0x1e620000 0xc4
- 0x20000000 0x10000000 >;
+ reg = <0x1e620000 0xc4>, <0x20000000 0x10000000>;
#address-cells = <1>;
#size-cells = <0>;
compatible = "aspeed,ast2500-fmc";
@@ -67,25 +66,27 @@
reg = < 0 >;
compatible = "jedec,spi-nor";
spi-max-frequency = <50000000>;
+ spi-rx-bus-width = <2>;
status = "disabled";
};
flash@1 {
reg = < 1 >;
compatible = "jedec,spi-nor";
spi-max-frequency = <50000000>;
+ spi-rx-bus-width = <2>;
status = "disabled";
};
flash@2 {
reg = < 2 >;
compatible = "jedec,spi-nor";
spi-max-frequency = <50000000>;
+ spi-rx-bus-width = <2>;
status = "disabled";
};
};
spi1: spi@1e630000 {
- reg = < 0x1e630000 0xc4
- 0x30000000 0x08000000 >;
+ reg = <0x1e630000 0xc4>, <0x30000000 0x08000000>;
#address-cells = <1>;
#size-cells = <0>;
compatible = "aspeed,ast2500-spi";
@@ -95,19 +96,20 @@
reg = < 0 >;
compatible = "jedec,spi-nor";
spi-max-frequency = <50000000>;
+ spi-rx-bus-width = <2>;
status = "disabled";
};
flash@1 {
reg = < 1 >;
compatible = "jedec,spi-nor";
spi-max-frequency = <50000000>;
+ spi-rx-bus-width = <2>;
status = "disabled";
};
};
spi2: spi@1e631000 {
- reg = < 0x1e631000 0xc4
- 0x38000000 0x08000000 >;
+ reg = <0x1e631000 0xc4>, <0x38000000 0x08000000>;
#address-cells = <1>;
#size-cells = <0>;
compatible = "aspeed,ast2500-spi";
@@ -117,12 +119,14 @@
reg = < 0 >;
compatible = "jedec,spi-nor";
spi-max-frequency = <50000000>;
+ spi-rx-bus-width = <2>;
status = "disabled";
};
flash@1 {
reg = < 1 >;
compatible = "jedec,spi-nor";
spi-max-frequency = <50000000>;
+ spi-rx-bus-width = <2>;
status = "disabled";
};
};
diff --git a/arch/arm/boot/dts/aspeed-g6.dtsi b/arch/arm/boot/dts/aspeed-g6.dtsi
index 3c1011678ce6..6660564855ff 100644
--- a/arch/arm/boot/dts/aspeed-g6.dtsi
+++ b/arch/arm/boot/dts/aspeed-g6.dtsi
@@ -95,8 +95,7 @@
};
fmc: spi@1e620000 {
- reg = < 0x1e620000 0xc4
- 0x20000000 0x10000000 >;
+ reg = <0x1e620000 0xc4>, <0x20000000 0x10000000>;
#address-cells = <1>;
#size-cells = <0>;
compatible = "aspeed,ast2600-fmc";
@@ -107,25 +106,27 @@
reg = < 0 >;
compatible = "jedec,spi-nor";
spi-max-frequency = <50000000>;
+ spi-rx-bus-width = <2>;
status = "disabled";
};
flash@1 {
reg = < 1 >;
compatible = "jedec,spi-nor";
spi-max-frequency = <50000000>;
+ spi-rx-bus-width = <2>;
status = "disabled";
};
flash@2 {
reg = < 2 >;
compatible = "jedec,spi-nor";
spi-max-frequency = <50000000>;
+ spi-rx-bus-width = <2>;
status = "disabled";
};
};
spi1: spi@1e630000 {
- reg = < 0x1e630000 0xc4
- 0x30000000 0x10000000 >;
+ reg = <0x1e630000 0xc4>, <0x30000000 0x10000000>;
#address-cells = <1>;
#size-cells = <0>;
compatible = "aspeed,ast2600-spi";
@@ -135,19 +136,20 @@
reg = < 0 >;
compatible = "jedec,spi-nor";
spi-max-frequency = <50000000>;
+ spi-rx-bus-width = <2>;
status = "disabled";
};
flash@1 {
reg = < 1 >;
compatible = "jedec,spi-nor";
spi-max-frequency = <50000000>;
+ spi-rx-bus-width = <2>;
status = "disabled";
};
};
spi2: spi@1e631000 {
- reg = < 0x1e631000 0xc4
- 0x50000000 0x10000000 >;
+ reg = <0x1e631000 0xc4>, <0x50000000 0x10000000>;
#address-cells = <1>;
#size-cells = <0>;
compatible = "aspeed,ast2600-spi";
@@ -157,18 +159,21 @@
reg = < 0 >;
compatible = "jedec,spi-nor";
spi-max-frequency = <50000000>;
+ spi-rx-bus-width = <2>;
status = "disabled";
};
flash@1 {
reg = < 1 >;
compatible = "jedec,spi-nor";
spi-max-frequency = <50000000>;
+ spi-rx-bus-width = <2>;
status = "disabled";
};
flash@2 {
reg = < 2 >;
compatible = "jedec,spi-nor";
spi-max-frequency = <50000000>;
+ spi-rx-bus-width = <2>;
status = "disabled";
};
};
@@ -302,6 +307,16 @@
status = "disabled";
};
+ udc: usb@1e6a2000 {
+ compatible = "aspeed,ast2600-udc";
+ reg = <0x1e6a2000 0x300>;
+ interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&syscon ASPEED_CLK_GATE_USBPORT2CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usb2bd_default>;
+ status = "disabled";
+ };
+
apb {
compatible = "simple-bus";
#address-cells = <1>;
@@ -355,6 +370,17 @@
quality = <100>;
};
+ gfx: display@1e6e6000 {
+ compatible = "aspeed,ast2600-gfx", "syscon";
+ reg = <0x1e6e6000 0x1000>;
+ reg-io-width = <4>;
+ clocks = <&syscon ASPEED_CLK_GATE_D1CLK>;
+ resets = <&syscon ASPEED_RESET_GRAPHICS>;
+ syscon = <&syscon>;
+ status = "disabled";
+ interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
xdma: xdma@1e6e7000 {
compatible = "aspeed,ast2600-xdma";
reg = <0x1e6e7000 0x100>;
diff --git a/arch/arm/boot/dts/at91-sama7g5ek.dts b/arch/arm/boot/dts/at91-sama7g5ek.dts
index d83f76a6cd6a..103544620fd7 100644
--- a/arch/arm/boot/dts/at91-sama7g5ek.dts
+++ b/arch/arm/boot/dts/at91-sama7g5ek.dts
@@ -14,6 +14,7 @@
#include <dt-bindings/mfd/atmel-flexcom.h>
#include <dt-bindings/input/input.h>
#include <dt-bindings/pinctrl/at91.h>
+#include <dt-bindings/sound/microchip,pdmc.h>
/ {
model = "Microchip SAMA7G5-EK";
@@ -456,7 +457,7 @@
&pinctrl_gmac1_mdio_default
&pinctrl_gmac1_phy_irq>;
phy-mode = "rmii";
- status = "okay";
+ status = "okay"; /* Conflict with pdmc0. */
ethernet-phy@0 {
reg = <0x0>;
@@ -470,6 +471,17 @@
pinctrl-0 = <&pinctrl_i2s0_default>;
};
+&pdmc0 {
+ #sound-dai-cells = <0>;
+ microchip,mic-pos = <MCHP_PDMC_DS0 MCHP_PDMC_CLK_NEGATIVE>, /* MIC 1 */
+ <MCHP_PDMC_DS1 MCHP_PDMC_CLK_NEGATIVE>, /* MIC 2 */
+ <MCHP_PDMC_DS0 MCHP_PDMC_CLK_POSITIVE>, /* MIC 3 */
+ <MCHP_PDMC_DS1 MCHP_PDMC_CLK_POSITIVE>; /* MIC 4 */
+ status = "disabled"; /* Conflict with gmac1. */
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pdmc0_default>;
+};
+
&pioA {
pinctrl_can0_default: can0_default {
@@ -639,6 +651,13 @@
bias-disable;
};
+ pinctrl_pdmc0_default: pdmc0_default {
+ pinmux = <PIN_PD23__PDMC0_DS0>,
+ <PIN_PD24__PDMC0_DS1>,
+ <PIN_PD22__PDMC0_CLK>;
+ bias_disable;
+ };
+
pinctrl_qspi: qspi {
pinmux = <PIN_PB12__QSPI0_IO0>,
<PIN_PB11__QSPI0_IO1>,
diff --git a/arch/arm/boot/dts/at91sam9261ek.dts b/arch/arm/boot/dts/at91sam9261ek.dts
index 8f11c0b7d76d..6fb4fe49cf1c 100644
--- a/arch/arm/boot/dts/at91sam9261ek.dts
+++ b/arch/arm/boot/dts/at91sam9261ek.dts
@@ -178,6 +178,10 @@
status = "okay";
};
+ rtc@fffffd20 {
+ atmel,rtt-rtc-time-reg = <&gpbr 0x0>;
+ };
+
watchdog@fffffd40 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/at91sam9263ek.dts b/arch/arm/boot/dts/at91sam9263ek.dts
index 42e734020235..e732565913a4 100644
--- a/arch/arm/boot/dts/at91sam9263ek.dts
+++ b/arch/arm/boot/dts/at91sam9263ek.dts
@@ -102,6 +102,10 @@
};
};
+ rtc@fffffd20 {
+ atmel,rtt-rtc-time-reg = <&gpbr 0x0>;
+ };
+
watchdog@fffffd40 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/at91sam9rlek.dts b/arch/arm/boot/dts/at91sam9rlek.dts
index d74b8d9d84aa..ddaadfec6751 100644
--- a/arch/arm/boot/dts/at91sam9rlek.dts
+++ b/arch/arm/boot/dts/at91sam9rlek.dts
@@ -212,6 +212,10 @@
status = "okay";
};
+ rtc@fffffd20 {
+ atmel,rtt-rtc-time-reg = <&gpbr 0x0>;
+ };
+
rtc@fffffe00 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/da850.dtsi b/arch/arm/boot/dts/da850.dtsi
index c3942b4e82ad..0386376fa486 100644
--- a/arch/arm/boot/dts/da850.dtsi
+++ b/arch/arm/boot/dts/da850.dtsi
@@ -679,7 +679,9 @@
"scheduler", "queuemgr";
interrupts = <58>;
#dma-cells = <2>;
+ /* For backwards compatibility: */
#dma-channels = <4>;
+ dma-channels = <4>;
power-domains = <&psc1 1>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/hpe-bmc-dl360gen10.dts b/arch/arm/boot/dts/hpe-bmc-dl360gen10.dts
new file mode 100644
index 000000000000..3a7382ce40ef
--- /dev/null
+++ b/arch/arm/boot/dts/hpe-bmc-dl360gen10.dts
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Device Tree file for HPE DL360Gen10
+ */
+
+/include/ "hpe-gxp.dtsi"
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "hpe,gxp-dl360gen10", "hpe,gxp";
+ model = "Hewlett Packard Enterprise ProLiant dl360 Gen10";
+
+ aliases {
+ serial0 = &uartc;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ memory@40000000 {
+ device_type = "memory";
+ reg = <0x40000000 0x20000000>;
+ };
+};
diff --git a/arch/arm/boot/dts/hpe-gxp.dtsi b/arch/arm/boot/dts/hpe-gxp.dtsi
new file mode 100644
index 000000000000..cf735b3c4f35
--- /dev/null
+++ b/arch/arm/boot/dts/hpe-gxp.dtsi
@@ -0,0 +1,127 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Device Tree file for HPE GXP
+ */
+
+/dts-v1/;
+/ {
+ model = "Hewlett Packard Enterprise GXP BMC";
+ compatible = "hpe,gxp";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ compatible = "arm,cortex-a9";
+ reg = <0>;
+ device_type = "cpu";
+ next-level-cache = <&L2>;
+ };
+ };
+
+ clocks {
+ pll: clock-0 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <1600000000>;
+ };
+
+ iopclk: clock-1 {
+ compatible = "fixed-factor-clock";
+ #clock-cells = <0>;
+ clock-div = <4>;
+ clock-mult = <1>;
+ clocks = <&pll>;
+ };
+ };
+
+ axi {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ dma-ranges;
+
+ L2: cache-controller@b0040000 {
+ compatible = "arm,pl310-cache";
+ reg = <0xb0040000 0x1000>;
+ cache-unified;
+ cache-level = <2>;
+ };
+
+ ahb@c0000000 {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0xc0000000 0x30000000>;
+ dma-ranges;
+
+ vic0: interrupt-controller@eff0000 {
+ compatible = "arm,pl192-vic";
+ reg = <0xeff0000 0x1000>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ vic1: interrupt-controller@80f00000 {
+ compatible = "arm,pl192-vic";
+ reg = <0x80f00000 0x1000>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ uarta: serial@e0 {
+ compatible = "ns16550a";
+ reg = <0xe0 0x8>;
+ interrupts = <17>;
+ interrupt-parent = <&vic0>;
+ clock-frequency = <1846153>;
+ reg-shift = <0>;
+ };
+
+ uartb: serial@e8 {
+ compatible = "ns16550a";
+ reg = <0xe8 0x8>;
+ interrupts = <18>;
+ interrupt-parent = <&vic0>;
+ clock-frequency = <1846153>;
+ reg-shift = <0>;
+ };
+
+ uartc: serial@f0 {
+ compatible = "ns16550a";
+ reg = <0xf0 0x8>;
+ interrupts = <19>;
+ interrupt-parent = <&vic0>;
+ clock-frequency = <1846153>;
+ reg-shift = <0>;
+ };
+
+ usb0: usb@efe0000 {
+ compatible = "hpe,gxp-ehci", "generic-ehci";
+ reg = <0xefe0000 0x100>;
+ interrupts = <7>;
+ interrupt-parent = <&vic0>;
+ };
+
+ st: timer@80 {
+ compatible = "hpe,gxp-timer";
+ reg = <0x80 0x16>;
+ interrupts = <0>;
+ interrupt-parent = <&vic0>;
+ clocks = <&iopclk>;
+ clock-names = "iop";
+ };
+
+ usb1: usb@efe0100 {
+ compatible = "hpe,gxp-ohci", "generic-ohci";
+ reg = <0xefe0100 0x110>;
+ interrupts = <6>;
+ interrupt-parent = <&vic0>;
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/mmp2.dtsi b/arch/arm/boot/dts/mmp2.dtsi
index 46984d4c5224..987d792f67ea 100644
--- a/arch/arm/boot/dts/mmp2.dtsi
+++ b/arch/arm/boot/dts/mmp2.dtsi
@@ -275,7 +275,9 @@
compatible = "marvell,pdma-1.0";
reg = <0xd4000000 0x10000>;
interrupts = <48>;
+ /* For backwards compatibility: */
#dma-channels = <16>;
+ dma-channels = <16>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/pxa25x.dtsi b/arch/arm/boot/dts/pxa25x.dtsi
index a248bf038033..5f8300e356ad 100644
--- a/arch/arm/boot/dts/pxa25x.dtsi
+++ b/arch/arm/boot/dts/pxa25x.dtsi
@@ -38,9 +38,12 @@
compatible = "marvell,pdma-1.0";
reg = <0x40000000 0x10000>;
interrupts = <25>;
- #dma-channels = <16>;
#dma-cells = <2>;
+ /* For backwards compatibility: */
+ #dma-channels = <16>;
+ dma-channels = <16>;
#dma-requests = <40>;
+ dma-requests = <40>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/pxa27x.dtsi b/arch/arm/boot/dts/pxa27x.dtsi
index ccbecad9c5c7..a2cbfb3be609 100644
--- a/arch/arm/boot/dts/pxa27x.dtsi
+++ b/arch/arm/boot/dts/pxa27x.dtsi
@@ -12,9 +12,12 @@
compatible = "marvell,pdma-1.0";
reg = <0x40000000 0x10000>;
interrupts = <25>;
- #dma-channels = <32>;
#dma-cells = <2>;
+ /* For backwards compatibility: */
+ #dma-channels = <32>;
+ dma-channels = <32>;
#dma-requests = <75>;
+ dma-requests = <75>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/pxa3xx.dtsi b/arch/arm/boot/dts/pxa3xx.dtsi
index d19674812cd2..f9c216f91865 100644
--- a/arch/arm/boot/dts/pxa3xx.dtsi
+++ b/arch/arm/boot/dts/pxa3xx.dtsi
@@ -122,9 +122,12 @@
compatible = "marvell,pdma-1.0";
reg = <0x40000000 0x10000>;
interrupts = <25>;
- #dma-channels = <32>;
#dma-cells = <2>;
+ /* For backwards compatibility: */
+ #dma-channels = <32>;
+ dma-channels = <32>;
#dma-requests = <100>;
+ dma-requests = <100>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/qcom-ipq4019.dtsi b/arch/arm/boot/dts/qcom-ipq4019.dtsi
index 9d5e934f2272..c5da723f7674 100644
--- a/arch/arm/boot/dts/qcom-ipq4019.dtsi
+++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi
@@ -646,7 +646,7 @@
clocks = <&gcc GCC_USB3_MASTER_CLK>,
<&gcc GCC_USB3_SLEEP_CLK>,
<&gcc GCC_USB3_MOCK_UTMI_CLK>;
- clock-names = "master", "sleep", "mock_utmi";
+ clock-names = "core", "sleep", "mock_utmi";
ranges;
status = "disabled";
diff --git a/arch/arm/boot/dts/qcom-sdx55.dtsi b/arch/arm/boot/dts/qcom-sdx55.dtsi
index 123390721b7f..1c2b208a5670 100644
--- a/arch/arm/boot/dts/qcom-sdx55.dtsi
+++ b/arch/arm/boot/dts/qcom-sdx55.dtsi
@@ -483,10 +483,13 @@
clocks = <&gcc GCC_USB30_SLV_AHB_CLK>,
<&gcc GCC_USB30_MASTER_CLK>,
<&gcc GCC_USB30_MSTR_AXI_CLK>,
- <&gcc GCC_USB30_MOCK_UTMI_CLK>,
- <&gcc GCC_USB30_SLEEP_CLK>;
- clock-names = "cfg_noc", "core", "iface", "mock_utmi",
- "sleep";
+ <&gcc GCC_USB30_SLEEP_CLK>,
+ <&gcc GCC_USB30_MOCK_UTMI_CLK>;
+ clock-names = "cfg_noc",
+ "core",
+ "iface",
+ "sleep",
+ "mock_utmi";
assigned-clocks = <&gcc GCC_USB30_MOCK_UTMI_CLK>,
<&gcc GCC_USB30_MASTER_CLK>;
diff --git a/arch/arm/boot/dts/rk3036.dtsi b/arch/arm/boot/dts/rk3036.dtsi
index 242ce42fbd87..9b0f0497567d 100644
--- a/arch/arm/boot/dts/rk3036.dtsi
+++ b/arch/arm/boot/dts/rk3036.dtsi
@@ -330,6 +330,8 @@
cru: clock-controller@20000000 {
compatible = "rockchip,rk3036-cru";
reg = <0x20000000 0x1000>;
+ clocks = <&xin24m>;
+ clock-names = "xin24m";
rockchip,grf = <&grf>;
#clock-cells = <1>;
#reset-cells = <1>;
diff --git a/arch/arm/boot/dts/rk3066a.dtsi b/arch/arm/boot/dts/rk3066a.dtsi
index c25b9695db4b..de9915d946f7 100644
--- a/arch/arm/boot/dts/rk3066a.dtsi
+++ b/arch/arm/boot/dts/rk3066a.dtsi
@@ -202,8 +202,9 @@
cru: clock-controller@20000000 {
compatible = "rockchip,rk3066a-cru";
reg = <0x20000000 0x1000>;
+ clocks = <&xin24m>;
+ clock-names = "xin24m";
rockchip,grf = <&grf>;
-
#clock-cells = <1>;
#reset-cells = <1>;
assigned-clocks = <&cru PLL_CPLL>, <&cru PLL_GPLL>,
diff --git a/arch/arm/boot/dts/rk3188.dtsi b/arch/arm/boot/dts/rk3188.dtsi
index a94321e90014..cdd4a0bd5133 100644
--- a/arch/arm/boot/dts/rk3188.dtsi
+++ b/arch/arm/boot/dts/rk3188.dtsi
@@ -195,8 +195,9 @@
cru: clock-controller@20000000 {
compatible = "rockchip,rk3188-cru";
reg = <0x20000000 0x1000>;
+ clocks = <&xin24m>;
+ clock-names = "xin24m";
rockchip,grf = <&grf>;
-
#clock-cells = <1>;
#reset-cells = <1>;
};
diff --git a/arch/arm/boot/dts/rk322x.dtsi b/arch/arm/boot/dts/rk322x.dtsi
index 6513ffcaac92..ffc16d6b97e1 100644
--- a/arch/arm/boot/dts/rk322x.dtsi
+++ b/arch/arm/boot/dts/rk322x.dtsi
@@ -484,6 +484,8 @@
cru: clock-controller@110e0000 {
compatible = "rockchip,rk3228-cru";
reg = <0x110e0000 0x1000>;
+ clocks = <&xin24m>;
+ clock-names = "xin24m";
rockchip,grf = <&grf>;
#clock-cells = <1>;
#reset-cells = <1>;
diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi
index 26b9bbe310af..487b0e03d4b4 100644
--- a/arch/arm/boot/dts/rk3288.dtsi
+++ b/arch/arm/boot/dts/rk3288.dtsi
@@ -862,6 +862,8 @@
cru: clock-controller@ff760000 {
compatible = "rockchip,rk3288-cru";
reg = <0x0 0xff760000 0x0 0x1000>;
+ clocks = <&xin24m>;
+ clock-names = "xin24m";
rockchip,grf = <&grf>;
#clock-cells = <1>;
#reset-cells = <1>;
diff --git a/arch/arm/boot/dts/rv1108.dtsi b/arch/arm/boot/dts/rv1108.dtsi
index 448254906452..c158a7ea86ec 100644
--- a/arch/arm/boot/dts/rv1108.dtsi
+++ b/arch/arm/boot/dts/rv1108.dtsi
@@ -85,24 +85,6 @@
#clock-cells = <0>;
};
- amba: bus {
- compatible = "simple-bus";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
-
- pdma: pdma@102a0000 {
- compatible = "arm,pl330", "arm,primecell";
- reg = <0x102a0000 0x4000>;
- interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>;
- #dma-cells = <1>;
- arm,pl330-broken-no-flushp;
- arm,pl330-periph-burst;
- clocks = <&cru ACLK_DMAC>;
- clock-names = "apb_pclk";
- };
- };
-
bus_intmem: sram@10080000 {
compatible = "mmio-sram";
reg = <0x10080000 0x2000>;
@@ -259,6 +241,17 @@
status = "disabled";
};
+ pdma: dma-controller@102a0000 {
+ compatible = "arm,pl330", "arm,primecell";
+ reg = <0x102a0000 0x4000>;
+ interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>;
+ #dma-cells = <1>;
+ arm,pl330-broken-no-flushp;
+ arm,pl330-periph-burst;
+ clocks = <&cru ACLK_DMAC>;
+ clock-names = "apb_pclk";
+ };
+
grf: syscon@10300000 {
compatible = "rockchip,rv1108-grf", "syscon", "simple-mfd";
reg = <0x10300000 0x1000>;
@@ -456,6 +449,8 @@
cru: clock-controller@20200000 {
compatible = "rockchip,rv1108-cru";
reg = <0x20200000 0x1000>;
+ clocks = <&xin24m>;
+ clock-names = "xin24m";
rockchip,grf = <&grf>;
#clock-cells = <1>;
#reset-cells = <1>;
diff --git a/arch/arm/boot/dts/sam9x60.dtsi b/arch/arm/boot/dts/sam9x60.dtsi
index 998629a3c34f..c328b67bea0c 100644
--- a/arch/arm/boot/dts/sam9x60.dtsi
+++ b/arch/arm/boot/dts/sam9x60.dtsi
@@ -684,7 +684,7 @@
status = "disabled";
};
- rtt: rtt@fffffe20 {
+ rtt: rtc@fffffe20 {
compatible = "microchip,sam9x60-rtt", "atmel,at91sam9260-rtt";
reg = <0xfffffe20 0x20>;
interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
diff --git a/arch/arm/boot/dts/sama7g5.dtsi b/arch/arm/boot/dts/sama7g5.dtsi
index b63263129692..a37e3a80392d 100644
--- a/arch/arm/boot/dts/sama7g5.dtsi
+++ b/arch/arm/boot/dts/sama7g5.dtsi
@@ -209,7 +209,7 @@
status = "disabled";
};
- rtt: rtt@e001d020 {
+ rtt: rtc@e001d020 {
compatible = "microchip,sama7g5-rtt", "microchip,sam9x60-rtt", "atmel,at91sam9260-rtt";
reg = <0xe001d020 0x30>;
interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
@@ -463,6 +463,30 @@
status = "disabled";
};
+ pdmc0: sound@e1608000 {
+ compatible = "microchip,sama7g5-pdmc";
+ reg = <0xe1608000 0x1000>;
+ interrupts = <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>;
+ #sound-dai-cells = <0>;
+ dmas = <&dma0 AT91_XDMAC_DT_PERID(37)>;
+ dma-names = "rx";
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 68>, <&pmc PMC_TYPE_GCK 68>;
+ clock-names = "pclk", "gclk";
+ status = "disabled";
+ };
+
+ pdmc1: sound@e160c000 {
+ compatible = "microchip,sama7g5-pdmc";
+ reg = <0xe160c000 0x1000>;
+ interrupts = <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
+ #sound-dai-cells = <0>;
+ dmas = <&dma0 AT91_XDMAC_DT_PERID(38)>;
+ dma-names = "rx";
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 69>, <&pmc PMC_TYPE_GCK 69>;
+ clock-names = "pclk", "gclk";
+ status = "disabled";
+ };
+
spdifrx: spdifrx@e1614000 {
#sound-dai-cells = <0>;
compatible = "microchip,sama7g5-spdifrx";
diff --git a/arch/arm/boot/dts/socfpga_arria10_socdk_qspi.dts b/arch/arm/boot/dts/socfpga_arria10_socdk_qspi.dts
index 2a745522404d..11ccdc6c2dc6 100644
--- a/arch/arm/boot/dts/socfpga_arria10_socdk_qspi.dts
+++ b/arch/arm/boot/dts/socfpga_arria10_socdk_qspi.dts
@@ -9,7 +9,7 @@
&qspi {
status = "okay";
- flash0: n25q00@0 {
+ flash0: flash@0 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "micron,mt25qu02g", "jedec,spi-nor";
diff --git a/arch/arm/boot/dts/socfpga_cyclone5_socdk.dts b/arch/arm/boot/dts/socfpga_cyclone5_socdk.dts
index 253ef139181d..b2241205c7a9 100644
--- a/arch/arm/boot/dts/socfpga_cyclone5_socdk.dts
+++ b/arch/arm/boot/dts/socfpga_cyclone5_socdk.dts
@@ -121,7 +121,7 @@
&qspi {
status = "okay";
- flash0: n25q00@0 {
+ flash0: flash@0 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "micron,mt25qu02g", "jedec,spi-nor";
diff --git a/arch/arm/boot/dts/socfpga_cyclone5_sodia.dts b/arch/arm/boot/dts/socfpga_cyclone5_sodia.dts
index b0003f350e65..2564671fc1c6 100644
--- a/arch/arm/boot/dts/socfpga_cyclone5_sodia.dts
+++ b/arch/arm/boot/dts/socfpga_cyclone5_sodia.dts
@@ -113,7 +113,7 @@
&qspi {
status = "okay";
- flash0: n25q512a@0 {
+ flash0: flash@0 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "micron,n25q512a", "jedec,spi-nor";
diff --git a/arch/arm/boot/dts/socfpga_cyclone5_vining_fpga.dts b/arch/arm/boot/dts/socfpga_cyclone5_vining_fpga.dts
index 25874e1b9c82..f24f17c2f5ee 100644
--- a/arch/arm/boot/dts/socfpga_cyclone5_vining_fpga.dts
+++ b/arch/arm/boot/dts/socfpga_cyclone5_vining_fpga.dts
@@ -221,7 +221,7 @@
&qspi {
status = "okay";
- n25q128@0 {
+ flash@0 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "micron,n25q128", "jedec,spi-nor";
@@ -238,7 +238,7 @@
cdns,tslch-ns = <4>;
};
- n25q00@1 {
+ flash@1 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "micron,mt25qu02g", "jedec,spi-nor";
diff --git a/arch/arm/common/locomo.c b/arch/arm/common/locomo.c
index 24d21ba63030..da30a4d4f35c 100644
--- a/arch/arm/common/locomo.c
+++ b/arch/arm/common/locomo.c
@@ -23,7 +23,6 @@
#include <linux/spinlock.h>
#include <linux/io.h>
-#include <mach/hardware.h>
#include <asm/irq.h>
#include <asm/mach/irq.h>
diff --git a/arch/arm/common/sa1111.c b/arch/arm/common/sa1111.c
index 5367f03beb46..2343e2b6214d 100644
--- a/arch/arm/common/sa1111.c
+++ b/arch/arm/common/sa1111.c
@@ -26,13 +26,16 @@
#include <linux/clk.h>
#include <linux/io.h>
-#include <mach/hardware.h>
#include <asm/mach/irq.h>
#include <asm/mach-types.h>
#include <linux/sizes.h>
#include <asm/hardware/sa1111.h>
+#ifdef CONFIG_ARCH_SA1100
+#include <mach/hardware.h>
+#endif
+
/* SA1111 IRQs */
#define IRQ_GPAIN0 (0)
#define IRQ_GPAIN1 (1)
diff --git a/arch/arm/configs/am200epdkit_defconfig b/arch/arm/configs/am200epdkit_defconfig
index 4e49d6cb2f62..9252ce0e722b 100644
--- a/arch/arm/configs/am200epdkit_defconfig
+++ b/arch/arm/configs/am200epdkit_defconfig
@@ -10,6 +10,7 @@ CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_ARCH_MULTI_V7 is not set
CONFIG_ARCH_PXA=y
CONFIG_ARCH_GUMSTIX=y
CONFIG_PCCARD=y
diff --git a/arch/arm/configs/cm_x300_defconfig b/arch/arm/configs/cm_x300_defconfig
index 45769d0ddd4e..bb0fcd82d2a7 100644
--- a/arch/arm/configs/cm_x300_defconfig
+++ b/arch/arm/configs/cm_x300_defconfig
@@ -10,6 +10,7 @@ CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_ARCH_MULTI_V7 is not set
CONFIG_ARCH_PXA=y
CONFIG_GPIO_PCA953X=y
CONFIG_MACH_CM_X300=y
diff --git a/arch/arm/configs/colibri_pxa270_defconfig b/arch/arm/configs/colibri_pxa270_defconfig
index 52bad9a544a0..b29898fd6a12 100644
--- a/arch/arm/configs/colibri_pxa270_defconfig
+++ b/arch/arm/configs/colibri_pxa270_defconfig
@@ -16,6 +16,7 @@ CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_ARCH_MULTI_V7 is not set
CONFIG_ARCH_PXA=y
CONFIG_MACH_COLIBRI=y
CONFIG_PREEMPT=y
diff --git a/arch/arm/configs/colibri_pxa300_defconfig b/arch/arm/configs/colibri_pxa300_defconfig
index 26e5a67f8e2d..f9d110294644 100644
--- a/arch/arm/configs/colibri_pxa300_defconfig
+++ b/arch/arm/configs/colibri_pxa300_defconfig
@@ -1,6 +1,7 @@
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_ARCH_MULTI_V7 is not set
CONFIG_ARCH_PXA=y
CONFIG_MACH_COLIBRI300=y
CONFIG_AEABI=y
diff --git a/arch/arm/configs/corgi_defconfig b/arch/arm/configs/corgi_defconfig
index 15b749f6996d..96c677c98bc7 100644
--- a/arch/arm/configs/corgi_defconfig
+++ b/arch/arm/configs/corgi_defconfig
@@ -9,6 +9,7 @@ CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_ARCH_MULTI_V7 is not set
CONFIG_ARCH_PXA=y
CONFIG_PXA_SHARPSL=y
CONFIG_MACH_POODLE=y
diff --git a/arch/arm/configs/eseries_pxa_defconfig b/arch/arm/configs/eseries_pxa_defconfig
index 046f4dc2e18e..2146adc1825e 100644
--- a/arch/arm/configs/eseries_pxa_defconfig
+++ b/arch/arm/configs/eseries_pxa_defconfig
@@ -9,6 +9,7 @@ CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_ARCH_MULTI_V7 is not set
CONFIG_ARCH_PXA=y
CONFIG_ARCH_PXA_ESERIES=y
# CONFIG_ARM_THUMB is not set
diff --git a/arch/arm/configs/ezx_defconfig b/arch/arm/configs/ezx_defconfig
index 0788a892e160..5d000c8be44e 100644
--- a/arch/arm/configs/ezx_defconfig
+++ b/arch/arm/configs/ezx_defconfig
@@ -14,6 +14,7 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_ARCH_MULTI_V7 is not set
CONFIG_ARCH_PXA=y
CONFIG_PXA_EZX=y
CONFIG_NO_HZ=y
diff --git a/arch/arm/configs/h5000_defconfig b/arch/arm/configs/h5000_defconfig
index f5a338fefda8..a67d6020aee5 100644
--- a/arch/arm/configs/h5000_defconfig
+++ b/arch/arm/configs/h5000_defconfig
@@ -10,6 +10,7 @@ CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_ARCH_MULTI_V7 is not set
CONFIG_ARCH_PXA=y
CONFIG_MACH_H5000=y
CONFIG_AEABI=y
diff --git a/arch/arm/configs/lpd270_defconfig b/arch/arm/configs/lpd270_defconfig
index 3a4d0e64cd6e..5c0a671ed294 100644
--- a/arch/arm/configs/lpd270_defconfig
+++ b/arch/arm/configs/lpd270_defconfig
@@ -2,6 +2,7 @@ CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_SLAB=y
CONFIG_MODULES=y
+# CONFIG_ARCH_MULTI_V7 is not set
CONFIG_ARCH_PXA=y
CONFIG_MACH_LOGICPD_PXA270=y
# CONFIG_ARM_THUMB is not set
diff --git a/arch/arm/configs/lubbock_defconfig b/arch/arm/configs/lubbock_defconfig
index 4ce2da2e76fa..cf49dc1629a7 100644
--- a/arch/arm/configs/lubbock_defconfig
+++ b/arch/arm/configs/lubbock_defconfig
@@ -1,6 +1,7 @@
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_MODULES=y
+# CONFIG_ARCH_MULTI_V7 is not set
CONFIG_ARCH_PXA=y
CONFIG_ARCH_LUBBOCK=y
# CONFIG_ARM_THUMB is not set
diff --git a/arch/arm/configs/magician_defconfig b/arch/arm/configs/magician_defconfig
index abde1fb23b20..13da808ffa13 100644
--- a/arch/arm/configs/magician_defconfig
+++ b/arch/arm/configs/magician_defconfig
@@ -9,6 +9,7 @@ CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_ARCH_MULTI_V7 is not set
CONFIG_ARCH_PXA=y
CONFIG_MACH_H4700=y
CONFIG_MACH_MAGICIAN=y
diff --git a/arch/arm/configs/mainstone_defconfig b/arch/arm/configs/mainstone_defconfig
index 26499b697f9f..03b4c61bdadd 100644
--- a/arch/arm/configs/mainstone_defconfig
+++ b/arch/arm/configs/mainstone_defconfig
@@ -1,6 +1,7 @@
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_MODULES=y
+# CONFIG_ARCH_MULTI_V7 is not set
CONFIG_ARCH_PXA=y
CONFIG_MACH_MAINSTONE=y
# CONFIG_ARM_THUMB is not set
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index 547918170f1f..ce9826bce29b 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -17,6 +17,7 @@ CONFIG_SOC_SAMA5D2=y
CONFIG_SOC_SAMA5D3=y
CONFIG_SOC_SAMA5D4=y
CONFIG_SOC_SAMA7G5=y
+CONFIG_SOC_LAN966=y
CONFIG_ARCH_BCM=y
CONFIG_ARCH_BCM_CYGNUS=y
CONFIG_ARCH_BCM_HR2=y
@@ -43,6 +44,8 @@ CONFIG_ARCH_HI3xxx=y
CONFIG_ARCH_HIP01=y
CONFIG_ARCH_HIP04=y
CONFIG_ARCH_HIX5HD2=y
+CONFIG_ARCH_HPE=y
+CONFIG_ARCH_HPE_GXP=y
CONFIG_ARCH_MXC=y
CONFIG_SOC_IMX50=y
CONFIG_SOC_IMX51=y
@@ -277,6 +280,7 @@ CONFIG_MV643XX_ETH=y
CONFIG_MVNETA=y
CONFIG_PXA168_ETH=m
CONFIG_KS8851=y
+CONFIG_LAN966X_SWITCH=m
CONFIG_R8169=y
CONFIG_SH_ETH=y
CONFIG_SMSC911X=y
@@ -287,6 +291,7 @@ CONFIG_TI_CPSW=y
CONFIG_TI_CPSW_SWITCHDEV=y
CONFIG_TI_CPTS=y
CONFIG_XILINX_EMACLITE=y
+CONFIG_SFP=m
CONFIG_BROADCOM_PHY=y
CONFIG_ICPLUS_PHY=y
CONFIG_MARVELL_PHY=y
@@ -294,6 +299,7 @@ CONFIG_AT803X_PHY=y
CONFIG_ROCKCHIP_PHY=y
CONFIG_DP83867_PHY=y
CONFIG_USB_BRCMSTB=m
+CONFIG_MDIO_MSCC_MIIM=m
CONFIG_USB_PEGASUS=y
CONFIG_USB_RTL8152=m
CONFIG_USB_LAN78XX=m
@@ -430,6 +436,7 @@ CONFIG_I2C_CROS_EC_TUNNEL=m
CONFIG_I2C_SLAVE_EEPROM=y
CONFIG_SPI=y
CONFIG_SPI_ATMEL=m
+CONFIG_SPI_ATMEL_QUADSPI=m
CONFIG_SPI_BCM2835=y
CONFIG_SPI_BCM2835AUX=y
CONFIG_SPI_CADENCE=y
@@ -459,6 +466,8 @@ CONFIG_SPMI=y
CONFIG_PTP_1588_CLOCK=y
CONFIG_PINCTRL_AS3722=y
CONFIG_PINCTRL_STMFX=y
+CONFIG_PINCTRL_MICROCHIP_SGPIO=y
+CONFIG_PINCTRL_OCELOT=y
CONFIG_PINCTRL_PALMAS=y
CONFIG_PINCTRL_OWL=y
CONFIG_PINCTRL_S500=y
@@ -517,6 +526,7 @@ CONFIG_CHARGER_TPS65090=y
CONFIG_SENSORS_ARM_SCMI=y
CONFIG_SENSORS_ASPEED=m
CONFIG_SENSORS_IIO_HWMON=y
+CONFIG_SENSORS_LAN966X=m
CONFIG_SENSORS_LM90=y
CONFIG_SENSORS_LM95245=y
CONFIG_SENSORS_NTC_THERMISTOR=m
@@ -563,6 +573,7 @@ CONFIG_BCM47XX_WDT=y
CONFIG_BCM2835_WDT=y
CONFIG_BCM_KONA_WDT=y
CONFIG_BCM7038_WDT=m
+CONFIG_GXP_WATCHDOG=y
CONFIG_BCMA_HOST_SOC=y
CONFIG_BCMA_DRIVER_GMAC_CMN=y
CONFIG_BCMA_DRIVER_GPIO=y
@@ -767,6 +778,8 @@ CONFIG_SND_ATMEL_SOC_WM8904=m
CONFIG_SND_ATMEL_SOC_PDMIC=m
CONFIG_SND_ATMEL_SOC_I2S=m
CONFIG_SND_BCM2835_SOC_I2S=m
+CONFIG_SND_IMX_SOC=m
+CONFIG_SND_SOC_FSL_ASOC_CARD=m
CONFIG_SND_SOC_FSL_SAI=m
CONFIG_SND_PXA_SOC_SSP=m
CONFIG_SND_MMP_SOC_SSPA=m
@@ -1018,6 +1031,7 @@ CONFIG_CROS_EC_SPI=m
CONFIG_COMMON_CLK_MAX77686=y
CONFIG_COMMON_CLK_RK808=m
CONFIG_COMMON_CLK_SCMI=y
+CONFIG_COMMON_CLK_LAN966X=y
CONFIG_COMMON_CLK_S2MPS11=m
CONFIG_CLK_RASPBERRYPI=y
CONFIG_COMMON_CLK_QCOM=y
@@ -1145,6 +1159,7 @@ CONFIG_PWM_SUN4I=y
CONFIG_PWM_TEGRA=y
CONFIG_PWM_VT8500=y
CONFIG_KEYSTONE_IRQ=y
+CONFIG_RESET_MCHP_SPARX5=y
CONFIG_PHY_SUN4I_USB=y
CONFIG_PHY_SUN9I_USB=y
CONFIG_PHY_HIX5HD2_SATA=y
@@ -1152,6 +1167,7 @@ CONFIG_PHY_BERLIN_SATA=y
CONFIG_PHY_BERLIN_USB=y
CONFIG_PHY_BRCM_USB=m
CONFIG_PHY_MMP3_USB=m
+CONFIG_PHY_LAN966X_SERDES=m
CONFIG_PHY_CPCAP_USB=m
CONFIG_PHY_QCOM_APQ8064_SATA=m
CONFIG_PHY_QCOM_USB_HS=y
diff --git a/arch/arm/configs/omap1_defconfig b/arch/arm/configs/omap1_defconfig
index 3148567b66b6..14c17a218ec5 100644
--- a/arch/arm/configs/omap1_defconfig
+++ b/arch/arm/configs/omap1_defconfig
@@ -17,6 +17,9 @@ CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
+CONFIG_ARCH_MULTI_V4T=y
+CONFIG_ARCH_MULTI_V5=y
+# CONFIG_ARCH_MULTI_V7 is not set
CONFIG_ARCH_OMAP=y
CONFIG_ARCH_OMAP1=y
CONFIG_OMAP_RESET_CLOCKS=y
diff --git a/arch/arm/configs/palmz72_defconfig b/arch/arm/configs/palmz72_defconfig
index b47c8abe85bc..e6acb1d588e2 100644
--- a/arch/arm/configs/palmz72_defconfig
+++ b/arch/arm/configs/palmz72_defconfig
@@ -7,6 +7,7 @@ CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_ARCH_MULTI_V7 is not set
CONFIG_ARCH_PXA=y
CONFIG_ARCH_PXA_PALM=y
# CONFIG_MACH_PALMTX is not set
diff --git a/arch/arm/configs/pcm027_defconfig b/arch/arm/configs/pcm027_defconfig
index e97a158081fc..106d5bef48e2 100644
--- a/arch/arm/configs/pcm027_defconfig
+++ b/arch/arm/configs/pcm027_defconfig
@@ -13,6 +13,7 @@ CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_ARCH_MULTI_V7 is not set
CONFIG_ARCH_PXA=y
CONFIG_MACH_PCM027=y
CONFIG_MACH_PCM990_BASEBOARD=y
diff --git a/arch/arm/configs/pxa255-idp_defconfig b/arch/arm/configs/pxa255-idp_defconfig
index 4a383afa5e87..5663245e9534 100644
--- a/arch/arm/configs/pxa255-idp_defconfig
+++ b/arch/arm/configs/pxa255-idp_defconfig
@@ -1,6 +1,7 @@
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_MODULES=y
+# CONFIG_ARCH_MULTI_V7 is not set
CONFIG_ARCH_PXA=y
CONFIG_ARCH_PXA_IDP=y
# CONFIG_ARM_THUMB is not set
diff --git a/arch/arm/configs/pxa3xx_defconfig b/arch/arm/configs/pxa3xx_defconfig
index f0c34017f2aa..228d4271748b 100644
--- a/arch/arm/configs/pxa3xx_defconfig
+++ b/arch/arm/configs/pxa3xx_defconfig
@@ -6,6 +6,7 @@ CONFIG_KALLSYMS_ALL=y
CONFIG_SLAB=y
CONFIG_MODULES=y
# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_ARCH_MULTI_V7 is not set
CONFIG_ARCH_PXA=y
CONFIG_MACH_LITTLETON=y
CONFIG_MACH_TAVOREVB=y
diff --git a/arch/arm/configs/pxa_defconfig b/arch/arm/configs/pxa_defconfig
index 29b1f192afbb..1db70dfd32d2 100644
--- a/arch/arm/configs/pxa_defconfig
+++ b/arch/arm/configs/pxa_defconfig
@@ -23,6 +23,7 @@ CONFIG_MODULE_SRCVERSION_ALL=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_LDM_PARTITION=y
CONFIG_CMDLINE_PARTITION=y
+# CONFIG_ARCH_MULTI_V7 is not set
CONFIG_ARCH_PXA=y
CONFIG_ARCH_LUBBOCK=y
CONFIG_MACH_MAINSTONE=y
diff --git a/arch/arm/configs/spitz_defconfig b/arch/arm/configs/spitz_defconfig
index f42c7a502b6e..43d079ee342a 100644
--- a/arch/arm/configs/spitz_defconfig
+++ b/arch/arm/configs/spitz_defconfig
@@ -9,6 +9,7 @@ CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_ARCH_MULTI_V7 is not set
CONFIG_ARCH_PXA=y
CONFIG_PXA_SHARPSL=y
CONFIG_MACH_AKITA=y
diff --git a/arch/arm/configs/trizeps4_defconfig b/arch/arm/configs/trizeps4_defconfig
index d66f0c287d41..baeba4667e9b 100644
--- a/arch/arm/configs/trizeps4_defconfig
+++ b/arch/arm/configs/trizeps4_defconfig
@@ -14,6 +14,7 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
+# CONFIG_ARCH_MULTI_V7 is not set
CONFIG_ARCH_PXA=y
CONFIG_TRIZEPS_PXA=y
CONFIG_MACH_TRIZEPS4=y
diff --git a/arch/arm/configs/viper_defconfig b/arch/arm/configs/viper_defconfig
index c28539bfd128..7c1029716ea5 100644
--- a/arch/arm/configs/viper_defconfig
+++ b/arch/arm/configs/viper_defconfig
@@ -9,6 +9,7 @@ CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_ARCH_MULTI_V7 is not set
CONFIG_ARCH_PXA=y
CONFIG_ARCH_VIPER=y
CONFIG_IWMMXT=y
diff --git a/arch/arm/configs/xcep_defconfig b/arch/arm/configs/xcep_defconfig
index 4d8e7f2eaef7..3752672f980e 100644
--- a/arch/arm/configs/xcep_defconfig
+++ b/arch/arm/configs/xcep_defconfig
@@ -19,6 +19,7 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
# CONFIG_BLOCK is not set
+# CONFIG_ARCH_MULTI_V7 is not set
CONFIG_ARCH_PXA=y
CONFIG_MACH_XCEP=y
CONFIG_IWMMXT=y
diff --git a/arch/arm/configs/zeus_defconfig b/arch/arm/configs/zeus_defconfig
index 25bb6995f105..03a12fb51259 100644
--- a/arch/arm/configs/zeus_defconfig
+++ b/arch/arm/configs/zeus_defconfig
@@ -4,6 +4,7 @@ CONFIG_LOG_BUF_SHIFT=13
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_ARCH_MULTI_V7 is not set
CONFIG_ARCH_PXA=y
CONFIG_MACH_ARCOM_ZEUS=y
CONFIG_PCCARD=m
diff --git a/arch/arm/include/asm/hardware/sa1111.h b/arch/arm/include/asm/hardware/sa1111.h
index 2e70db6f22ea..d8c6f8a99dfa 100644
--- a/arch/arm/include/asm/hardware/sa1111.h
+++ b/arch/arm/include/asm/hardware/sa1111.h
@@ -13,8 +13,6 @@
#ifndef _ASM_ARCH_SA1111
#define _ASM_ARCH_SA1111
-#include <mach/bitfield.h>
-
/*
* Don't ask the (SAC) DMA engines to move less than this amount.
*/
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index 2a0739a2350b..eba7cbc93b86 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -174,7 +174,7 @@ static inline void __iomem *__typesafe_io(unsigned long addr)
#define PCI_IO_VIRT_BASE 0xfee00000
#define PCI_IOBASE ((void __iomem *)PCI_IO_VIRT_BASE)
-#if defined(CONFIG_PCI)
+#if defined(CONFIG_PCI) || IS_ENABLED(CONFIG_PCMCIA)
void pci_ioremap_set_mem_type(int mem_type);
#else
static inline void pci_ioremap_set_mem_type(int mem_type) {}
@@ -200,32 +200,13 @@ void __iomem *pci_remap_cfgspace(resource_size_t res_cookie, size_t size);
*/
#ifdef CONFIG_NEED_MACH_IO_H
#include <mach/io.h>
-#elif defined(CONFIG_PCI)
-#define IO_SPACE_LIMIT ((resource_size_t)0xfffff)
-#define __io(a) __typesafe_io(PCI_IO_VIRT_BASE + ((a) & IO_SPACE_LIMIT))
#else
-#define __io(a) __typesafe_io((a) & IO_SPACE_LIMIT)
-#endif
-
-/*
- * This is the limit of PC card/PCI/ISA IO space, which is by default
- * 64K if we have PC card, PCI or ISA support. Otherwise, default to
- * zero to prevent ISA/PCI drivers claiming IO space (and potentially
- * oopsing.)
- *
- * Only set this larger if you really need inb() et.al. to operate over
- * a larger address space. Note that SOC_COMMON ioremaps each sockets
- * IO space area, and so inb() et.al. must be defined to operate as per
- * readb() et.al. on such platforms.
- */
-#ifndef IO_SPACE_LIMIT
-#if defined(CONFIG_PCMCIA_SOC_COMMON) || defined(CONFIG_PCMCIA_SOC_COMMON_MODULE)
-#define IO_SPACE_LIMIT ((resource_size_t)0xffffffff)
-#elif defined(CONFIG_PCI) || defined(CONFIG_ISA) || defined(CONFIG_PCCARD)
-#define IO_SPACE_LIMIT ((resource_size_t)0xffff)
+#if IS_ENABLED(CONFIG_PCMCIA) || defined(CONFIG_PCI)
+#define IO_SPACE_LIMIT ((resource_size_t)0xfffff)
#else
#define IO_SPACE_LIMIT ((resource_size_t)0)
#endif
+#define __io(a) __typesafe_io(PCI_IO_VIRT_BASE + ((a) & IO_SPACE_LIMIT))
#endif
/*
diff --git a/arch/arm/kernel/crash_dump.c b/arch/arm/kernel/crash_dump.c
index 53cb92435392..938bd932df9a 100644
--- a/arch/arm/kernel/crash_dump.c
+++ b/arch/arm/kernel/crash_dump.c
@@ -14,22 +14,10 @@
#include <linux/crash_dump.h>
#include <linux/uaccess.h>
#include <linux/io.h>
+#include <linux/uio.h>
-/**
- * copy_oldmem_page() - copy one page from old kernel memory
- * @pfn: page frame number to be copied
- * @buf: buffer where the copied page is placed
- * @csize: number of bytes to copy
- * @offset: offset in bytes into the page
- * @userbuf: if set, @buf is int he user address space
- *
- * This function copies one page from old kernel memory into buffer pointed by
- * @buf. If @buf is in userspace, set @userbuf to %1. Returns number of bytes
- * copied or negative error in case of failure.
- */
-ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
- size_t csize, unsigned long offset,
- int userbuf)
+ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn,
+ size_t csize, unsigned long offset)
{
void *vaddr;
@@ -40,14 +28,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
if (!vaddr)
return -ENOMEM;
- if (userbuf) {
- if (copy_to_user(buf, vaddr + offset, csize)) {
- iounmap(vaddr);
- return -EFAULT;
- }
- } else {
- memcpy(buf, vaddr + offset, csize);
- }
+ csize = copy_to_iter(vaddr + offset, csize, iter);
iounmap(vaddr);
return csize;
diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c
index 83cc068586bc..a0b6d1e3812f 100644
--- a/arch/arm/kernel/ftrace.c
+++ b/arch/arm/kernel/ftrace.c
@@ -79,16 +79,14 @@ static unsigned long __ref adjust_address(struct dyn_ftrace *rec,
return (unsigned long)&ftrace_regs_caller_from_init;
}
-int ftrace_arch_code_modify_prepare(void)
+void ftrace_arch_code_modify_prepare(void)
{
- return 0;
}
-int ftrace_arch_code_modify_post_process(void)
+void ftrace_arch_code_modify_post_process(void)
{
/* Make sure any TLB misses during machine stop are cleared. */
flush_tlb_all();
- return 0;
}
static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr,
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 0617af11377f..3d9cace63884 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -238,9 +238,11 @@ void release_thread(struct task_struct *dead_task)
asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
-int copy_thread(unsigned long clone_flags, unsigned long stack_start,
- unsigned long stk_sz, struct task_struct *p, unsigned long tls)
+int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
{
+ unsigned long clone_flags = args->flags;
+ unsigned long stack_start = args->stack;
+ unsigned long tls = args->tls;
struct thread_info *thread = task_thread_info(p);
struct pt_regs *childregs = task_pt_regs(p);
@@ -256,15 +258,15 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
thread->cpu_domain = get_domain();
#endif
- if (likely(!(p->flags & (PF_KTHREAD | PF_IO_WORKER)))) {
+ if (likely(!args->fn)) {
*childregs = *current_pt_regs();
childregs->ARM_r0 = 0;
if (stack_start)
childregs->ARM_sp = stack_start;
} else {
memset(childregs, 0, sizeof(struct pt_regs));
- thread->cpu_context.r4 = stk_sz;
- thread->cpu_context.r5 = stack_start;
+ thread->cpu_context.r4 = (unsigned long)args->fn_arg;
+ thread->cpu_context.r5 = (unsigned long)args->fn;
childregs->ARM_cpsr = SVC_MODE;
}
thread->cpu_context.pc = (unsigned long)ret_from_fork;
diff --git a/arch/arm/kernel/reboot.c b/arch/arm/kernel/reboot.c
index 3044fcb8d073..2cb943422554 100644
--- a/arch/arm/kernel/reboot.c
+++ b/arch/arm/kernel/reboot.c
@@ -116,9 +116,7 @@ void machine_power_off(void)
{
local_irq_disable();
smp_send_stop();
-
- if (pm_power_off)
- pm_power_off();
+ do_kernel_power_off();
}
/*
diff --git a/arch/arm/mach-at91/Kconfig b/arch/arm/mach-at91/Kconfig
index 3d7e66976206..3dd9e718661b 100644
--- a/arch/arm/mach-at91/Kconfig
+++ b/arch/arm/mach-at91/Kconfig
@@ -219,7 +219,7 @@ config SOC_SAMA5
select SRAM if PM
config ATMEL_PM
- bool "Atmel PM support"
+ bool
config ATMEL_SECURE_PM
bool "Atmel Secure PM support"
diff --git a/arch/arm/mach-ep93xx/clock.c b/arch/arm/mach-ep93xx/clock.c
index 4fa6ea5461b7..85a496ddc619 100644
--- a/arch/arm/mach-ep93xx/clock.c
+++ b/arch/arm/mach-ep93xx/clock.c
@@ -345,9 +345,10 @@ static struct clk_hw *clk_hw_register_ddiv(const char *name,
psc->hw.init = &init;
clk = clk_register(NULL, &psc->hw);
- if (IS_ERR(clk))
+ if (IS_ERR(clk)) {
kfree(psc);
-
+ return ERR_CAST(clk);
+ }
return &psc->hw;
}
@@ -452,9 +453,10 @@ static struct clk_hw *clk_hw_register_div(const char *name,
psc->hw.init = &init;
clk = clk_register(NULL, &psc->hw);
- if (IS_ERR(clk))
+ if (IS_ERR(clk)) {
kfree(psc);
-
+ return ERR_CAST(clk);
+ }
return &psc->hw;
}
diff --git a/arch/arm/mach-ep93xx/ts72xx.c b/arch/arm/mach-ep93xx/ts72xx.c
index e70bac011407..d3de7283ecb3 100644
--- a/arch/arm/mach-ep93xx/ts72xx.c
+++ b/arch/arm/mach-ep93xx/ts72xx.c
@@ -150,7 +150,7 @@ static struct platform_device ts72xx_nand_flash = {
.num_resources = ARRAY_SIZE(ts72xx_nand_resource),
};
-void __init ts72xx_register_flash(struct mtd_partition *parts, int n,
+static void __init ts72xx_register_flash(struct mtd_partition *parts, int n,
resource_size_t start)
{
/*
diff --git a/arch/arm/mach-hpe/Kconfig b/arch/arm/mach-hpe/Kconfig
new file mode 100644
index 000000000000..3372bbf38d38
--- /dev/null
+++ b/arch/arm/mach-hpe/Kconfig
@@ -0,0 +1,23 @@
+menuconfig ARCH_HPE
+ bool "HPE SoC support"
+ depends on ARCH_MULTI_V7
+ help
+ This enables support for HPE ARM based BMC chips.
+if ARCH_HPE
+
+config ARCH_HPE_GXP
+ bool "HPE GXP SoC"
+ depends on ARCH_MULTI_V7
+ select ARM_VIC
+ select GENERIC_IRQ_CHIP
+ select CLKSRC_MMIO
+ help
+ HPE GXP is the name of the HPE Soc. This SoC is used to implement many
+ BMC features at HPE. It supports ARMv7 architecture based on the Cortex
+ A9 core. It is capable of using an AXI bus to which a memory controller
+ is attached. It has multiple SPI interfaces to connect boot flash and
+ BIOS flash. It uses a 10/100/1000 MAC for network connectivity. It
+ has multiple i2c engines to drive connectivity with a host
+ infrastructure.
+
+endif
diff --git a/arch/arm/mach-hpe/Makefile b/arch/arm/mach-hpe/Makefile
new file mode 100644
index 000000000000..8b0a91234df4
--- /dev/null
+++ b/arch/arm/mach-hpe/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_ARCH_HPE_GXP) += gxp.o
diff --git a/arch/arm/mach-hpe/gxp.c b/arch/arm/mach-hpe/gxp.c
new file mode 100644
index 000000000000..ef3341373006
--- /dev/null
+++ b/arch/arm/mach-hpe/gxp.c
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2022 Hewlett-Packard Enterprise Development Company, L.P. */
+
+#include <linux/of_platform.h>
+#include <asm/mach/arch.h>
+
+static const char * const gxp_board_dt_compat[] = {
+ "hpe,gxp",
+ NULL,
+};
+
+DT_MACHINE_START(GXP_DT, "HPE GXP")
+ .dt_compat = gxp_board_dt_compat,
+ .l2c_aux_val = 0,
+ .l2c_aux_mask = ~0,
+MACHINE_END
diff --git a/arch/arm/mach-mmp/Kconfig b/arch/arm/mach-mmp/Kconfig
index 9642e6663a52..333229c65b28 100644
--- a/arch/arm/mach-mmp/Kconfig
+++ b/arch/arm/mach-mmp/Kconfig
@@ -39,16 +39,8 @@ config MACH_AVENGERS_LITE
Say 'Y' here if you want to support the Marvell PXA168-based
Avengers Lite Development Board.
-config MACH_TAVOREVB
- bool "Marvell's PXA910 TavorEVB Development Board"
- depends on ARCH_MULTI_V5
- select CPU_PXA910
- help
- Say 'Y' here if you want to support the Marvell PXA910-based
- TavorEVB Development Board.
-
config MACH_TTC_DKB
- bool "Marvell's PXA910 TavorEVB Development Board"
+ bool "Marvell's PXA910 TavorEVB/TTC_DKB Development Board"
depends on ARCH_MULTI_V5
select CPU_PXA910
help
diff --git a/arch/arm/mach-mmp/Makefile b/arch/arm/mach-mmp/Makefile
index e3758f7e1fe7..539d750aaf10 100644
--- a/arch/arm/mach-mmp/Makefile
+++ b/arch/arm/mach-mmp/Makefile
@@ -2,8 +2,6 @@
#
# Makefile for Marvell's PXA168 processors line
#
-ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/arch/arm/plat-pxa/include
-
obj-y += common.o devices.o time.o
# SoC support
@@ -24,7 +22,6 @@ endif
obj-$(CONFIG_MACH_ASPENITE) += aspenite.o
obj-$(CONFIG_MACH_ZYLONITE2) += aspenite.o
obj-$(CONFIG_MACH_AVENGERS_LITE)+= avengers_lite.o
-obj-$(CONFIG_MACH_TAVOREVB) += tavorevb.o
obj-$(CONFIG_MACH_TTC_DKB) += ttc_dkb.o
obj-$(CONFIG_MACH_BROWNSTONE) += brownstone.o
obj-$(CONFIG_MACH_FLINT) += flint.o
diff --git a/arch/arm/mach-mmp/devices.c b/arch/arm/mach-mmp/devices.c
index 18bee66a671f..79f4a2aa5475 100644
--- a/arch/arm/mach-mmp/devices.c
+++ b/arch/arm/mach-mmp/devices.c
@@ -14,7 +14,7 @@
#include <linux/soc/mmp/cputype.h>
#include "regs-usb.h"
-int __init pxa_register_device(struct pxa_device_desc *desc,
+int __init mmp_register_device(struct mmp_device_desc *desc,
void *data, size_t size)
{
struct platform_device *pdev;
diff --git a/arch/arm/mach-mmp/devices.h b/arch/arm/mach-mmp/devices.h
index 4df596c5c201..d4920ebfebc5 100644
--- a/arch/arm/mach-mmp/devices.h
+++ b/arch/arm/mach-mmp/devices.h
@@ -7,7 +7,7 @@
#define MAX_RESOURCE_DMA 2
/* structure for describing the on-chip devices */
-struct pxa_device_desc {
+struct mmp_device_desc {
const char *dev_name;
const char *drv_name;
int id;
@@ -18,7 +18,7 @@ struct pxa_device_desc {
};
#define PXA168_DEVICE(_name, _drv, _id, _irq, _start, _size, _dma...) \
-struct pxa_device_desc pxa168_device_##_name __initdata = { \
+struct mmp_device_desc pxa168_device_##_name __initdata = { \
.dev_name = "pxa168-" #_name, \
.drv_name = _drv, \
.id = _id, \
@@ -29,7 +29,7 @@ struct pxa_device_desc pxa168_device_##_name __initdata = { \
};
#define PXA910_DEVICE(_name, _drv, _id, _irq, _start, _size, _dma...) \
-struct pxa_device_desc pxa910_device_##_name __initdata = { \
+struct mmp_device_desc pxa910_device_##_name __initdata = { \
.dev_name = "pxa910-" #_name, \
.drv_name = _drv, \
.id = _id, \
@@ -40,7 +40,7 @@ struct pxa_device_desc pxa910_device_##_name __initdata = { \
};
#define MMP2_DEVICE(_name, _drv, _id, _irq, _start, _size, _dma...) \
-struct pxa_device_desc mmp2_device_##_name __initdata = { \
+struct mmp_device_desc mmp2_device_##_name __initdata = { \
.dev_name = "mmp2-" #_name, \
.drv_name = _drv, \
.id = _id, \
@@ -50,7 +50,7 @@ struct pxa_device_desc mmp2_device_##_name __initdata = { \
.dma = { _dma }, \
}
-extern int pxa_register_device(struct pxa_device_desc *, void *, size_t);
+extern int mmp_register_device(struct mmp_device_desc *, void *, size_t);
extern int pxa_usb_phy_init(void __iomem *phy_reg);
extern void pxa_usb_phy_deinit(void __iomem *phy_reg);
diff --git a/arch/arm/mach-mmp/mfp.h b/arch/arm/mach-mmp/mfp.h
index 75a4acb33b1b..6f3057987756 100644
--- a/arch/arm/mach-mmp/mfp.h
+++ b/arch/arm/mach-mmp/mfp.h
@@ -2,7 +2,7 @@
#ifndef __ASM_MACH_MFP_H
#define __ASM_MACH_MFP_H
-#include <plat/mfp.h>
+#include <linux/soc/pxa/mfp.h>
/*
* NOTE: the MFPR register bit definitions on PXA168 processor lines are a
diff --git a/arch/arm/mach-mmp/mmp2.h b/arch/arm/mach-mmp/mmp2.h
index adafc4fba8f4..3ebc1bb13f71 100644
--- a/arch/arm/mach-mmp/mmp2.h
+++ b/arch/arm/mach-mmp/mmp2.h
@@ -15,28 +15,28 @@ extern void mmp2_clear_pmic_int(void);
#include "devices.h"
-extern struct pxa_device_desc mmp2_device_uart1;
-extern struct pxa_device_desc mmp2_device_uart2;
-extern struct pxa_device_desc mmp2_device_uart3;
-extern struct pxa_device_desc mmp2_device_uart4;
-extern struct pxa_device_desc mmp2_device_twsi1;
-extern struct pxa_device_desc mmp2_device_twsi2;
-extern struct pxa_device_desc mmp2_device_twsi3;
-extern struct pxa_device_desc mmp2_device_twsi4;
-extern struct pxa_device_desc mmp2_device_twsi5;
-extern struct pxa_device_desc mmp2_device_twsi6;
-extern struct pxa_device_desc mmp2_device_sdh0;
-extern struct pxa_device_desc mmp2_device_sdh1;
-extern struct pxa_device_desc mmp2_device_sdh2;
-extern struct pxa_device_desc mmp2_device_sdh3;
-extern struct pxa_device_desc mmp2_device_asram;
-extern struct pxa_device_desc mmp2_device_isram;
+extern struct mmp_device_desc mmp2_device_uart1;
+extern struct mmp_device_desc mmp2_device_uart2;
+extern struct mmp_device_desc mmp2_device_uart3;
+extern struct mmp_device_desc mmp2_device_uart4;
+extern struct mmp_device_desc mmp2_device_twsi1;
+extern struct mmp_device_desc mmp2_device_twsi2;
+extern struct mmp_device_desc mmp2_device_twsi3;
+extern struct mmp_device_desc mmp2_device_twsi4;
+extern struct mmp_device_desc mmp2_device_twsi5;
+extern struct mmp_device_desc mmp2_device_twsi6;
+extern struct mmp_device_desc mmp2_device_sdh0;
+extern struct mmp_device_desc mmp2_device_sdh1;
+extern struct mmp_device_desc mmp2_device_sdh2;
+extern struct mmp_device_desc mmp2_device_sdh3;
+extern struct mmp_device_desc mmp2_device_asram;
+extern struct mmp_device_desc mmp2_device_isram;
extern struct platform_device mmp2_device_gpio;
static inline int mmp2_add_uart(int id)
{
- struct pxa_device_desc *d = NULL;
+ struct mmp_device_desc *d = NULL;
switch (id) {
case 1: d = &mmp2_device_uart1; break;
@@ -47,13 +47,13 @@ static inline int mmp2_add_uart(int id)
return -EINVAL;
}
- return pxa_register_device(d, NULL, 0);
+ return mmp_register_device(d, NULL, 0);
}
static inline int mmp2_add_twsi(int id, struct i2c_pxa_platform_data *data,
struct i2c_board_info *info, unsigned size)
{
- struct pxa_device_desc *d = NULL;
+ struct mmp_device_desc *d = NULL;
int ret;
switch (id) {
@@ -71,12 +71,12 @@ static inline int mmp2_add_twsi(int id, struct i2c_pxa_platform_data *data,
if (ret)
return ret;
- return pxa_register_device(d, data, sizeof(*data));
+ return mmp_register_device(d, data, sizeof(*data));
}
static inline int mmp2_add_sdhost(int id, struct sdhci_pxa_platdata *data)
{
- struct pxa_device_desc *d = NULL;
+ struct mmp_device_desc *d = NULL;
switch (id) {
case 0: d = &mmp2_device_sdh0; break;
@@ -87,17 +87,17 @@ static inline int mmp2_add_sdhost(int id, struct sdhci_pxa_platdata *data)
return -EINVAL;
}
- return pxa_register_device(d, data, sizeof(*data));
+ return mmp_register_device(d, data, sizeof(*data));
}
static inline int mmp2_add_asram(struct sram_platdata *data)
{
- return pxa_register_device(&mmp2_device_asram, data, sizeof(*data));
+ return mmp_register_device(&mmp2_device_asram, data, sizeof(*data));
}
static inline int mmp2_add_isram(struct sram_platdata *data)
{
- return pxa_register_device(&mmp2_device_isram, data, sizeof(*data));
+ return mmp_register_device(&mmp2_device_isram, data, sizeof(*data));
}
#endif /* __ASM_MACH_MMP2_H */
diff --git a/arch/arm/mach-mmp/pxa168.h b/arch/arm/mach-mmp/pxa168.h
index dff651b9f252..34f907cd165a 100644
--- a/arch/arm/mach-mmp/pxa168.h
+++ b/arch/arm/mach-mmp/pxa168.h
@@ -21,24 +21,24 @@ extern void pxa168_clear_keypad_wakeup(void);
#include "devices.h"
-extern struct pxa_device_desc pxa168_device_uart1;
-extern struct pxa_device_desc pxa168_device_uart2;
-extern struct pxa_device_desc pxa168_device_uart3;
-extern struct pxa_device_desc pxa168_device_twsi0;
-extern struct pxa_device_desc pxa168_device_twsi1;
-extern struct pxa_device_desc pxa168_device_pwm1;
-extern struct pxa_device_desc pxa168_device_pwm2;
-extern struct pxa_device_desc pxa168_device_pwm3;
-extern struct pxa_device_desc pxa168_device_pwm4;
-extern struct pxa_device_desc pxa168_device_ssp1;
-extern struct pxa_device_desc pxa168_device_ssp2;
-extern struct pxa_device_desc pxa168_device_ssp3;
-extern struct pxa_device_desc pxa168_device_ssp4;
-extern struct pxa_device_desc pxa168_device_ssp5;
-extern struct pxa_device_desc pxa168_device_nand;
-extern struct pxa_device_desc pxa168_device_fb;
-extern struct pxa_device_desc pxa168_device_keypad;
-extern struct pxa_device_desc pxa168_device_eth;
+extern struct mmp_device_desc pxa168_device_uart1;
+extern struct mmp_device_desc pxa168_device_uart2;
+extern struct mmp_device_desc pxa168_device_uart3;
+extern struct mmp_device_desc pxa168_device_twsi0;
+extern struct mmp_device_desc pxa168_device_twsi1;
+extern struct mmp_device_desc pxa168_device_pwm1;
+extern struct mmp_device_desc pxa168_device_pwm2;
+extern struct mmp_device_desc pxa168_device_pwm3;
+extern struct mmp_device_desc pxa168_device_pwm4;
+extern struct mmp_device_desc pxa168_device_ssp1;
+extern struct mmp_device_desc pxa168_device_ssp2;
+extern struct mmp_device_desc pxa168_device_ssp3;
+extern struct mmp_device_desc pxa168_device_ssp4;
+extern struct mmp_device_desc pxa168_device_ssp5;
+extern struct mmp_device_desc pxa168_device_nand;
+extern struct mmp_device_desc pxa168_device_fb;
+extern struct mmp_device_desc pxa168_device_keypad;
+extern struct mmp_device_desc pxa168_device_eth;
/* pdata can be NULL */
extern int __init pxa168_add_usb_host(struct mv_usb_platform_data *pdata);
@@ -48,7 +48,7 @@ extern struct platform_device pxa168_device_gpio;
static inline int pxa168_add_uart(int id)
{
- struct pxa_device_desc *d = NULL;
+ struct mmp_device_desc *d = NULL;
switch (id) {
case 1: d = &pxa168_device_uart1; break;
@@ -59,13 +59,13 @@ static inline int pxa168_add_uart(int id)
if (d == NULL)
return -EINVAL;
- return pxa_register_device(d, NULL, 0);
+ return mmp_register_device(d, NULL, 0);
}
static inline int pxa168_add_twsi(int id, struct i2c_pxa_platform_data *data,
struct i2c_board_info *info, unsigned size)
{
- struct pxa_device_desc *d = NULL;
+ struct mmp_device_desc *d = NULL;
int ret;
switch (id) {
@@ -79,12 +79,12 @@ static inline int pxa168_add_twsi(int id, struct i2c_pxa_platform_data *data,
if (ret)
return ret;
- return pxa_register_device(d, data, sizeof(*data));
+ return mmp_register_device(d, data, sizeof(*data));
}
static inline int pxa168_add_pwm(int id)
{
- struct pxa_device_desc *d = NULL;
+ struct mmp_device_desc *d = NULL;
switch (id) {
case 1: d = &pxa168_device_pwm1; break;
@@ -95,12 +95,12 @@ static inline int pxa168_add_pwm(int id)
return -EINVAL;
}
- return pxa_register_device(d, NULL, 0);
+ return mmp_register_device(d, NULL, 0);
}
static inline int pxa168_add_ssp(int id)
{
- struct pxa_device_desc *d = NULL;
+ struct mmp_device_desc *d = NULL;
switch (id) {
case 1: d = &pxa168_device_ssp1; break;
@@ -111,17 +111,17 @@ static inline int pxa168_add_ssp(int id)
default:
return -EINVAL;
}
- return pxa_register_device(d, NULL, 0);
+ return mmp_register_device(d, NULL, 0);
}
static inline int pxa168_add_nand(struct pxa3xx_nand_platform_data *info)
{
- return pxa_register_device(&pxa168_device_nand, info, sizeof(*info));
+ return mmp_register_device(&pxa168_device_nand, info, sizeof(*info));
}
static inline int pxa168_add_fb(struct pxa168fb_mach_info *mi)
{
- return pxa_register_device(&pxa168_device_fb, mi, sizeof(*mi));
+ return mmp_register_device(&pxa168_device_fb, mi, sizeof(*mi));
}
static inline int pxa168_add_keypad(struct pxa27x_keypad_platform_data *data)
@@ -129,11 +129,11 @@ static inline int pxa168_add_keypad(struct pxa27x_keypad_platform_data *data)
if (cpu_is_pxa168())
data->clear_wakeup_event = pxa168_clear_keypad_wakeup;
- return pxa_register_device(&pxa168_device_keypad, data, sizeof(*data));
+ return mmp_register_device(&pxa168_device_keypad, data, sizeof(*data));
}
static inline int pxa168_add_eth(struct pxa168_eth_platform_data *data)
{
- return pxa_register_device(&pxa168_device_eth, data, sizeof(*data));
+ return mmp_register_device(&pxa168_device_eth, data, sizeof(*data));
}
#endif /* __ASM_MACH_PXA168_H */
diff --git a/arch/arm/mach-mmp/pxa910.h b/arch/arm/mach-mmp/pxa910.h
index 2dfe38e4acc1..6ace5a8aa15b 100644
--- a/arch/arm/mach-mmp/pxa910.h
+++ b/arch/arm/mach-mmp/pxa910.h
@@ -13,28 +13,28 @@ extern void __init pxa910_init_irq(void);
#include "devices.h"
-extern struct pxa_device_desc pxa910_device_uart1;
-extern struct pxa_device_desc pxa910_device_uart2;
-extern struct pxa_device_desc pxa910_device_twsi0;
-extern struct pxa_device_desc pxa910_device_twsi1;
-extern struct pxa_device_desc pxa910_device_pwm1;
-extern struct pxa_device_desc pxa910_device_pwm2;
-extern struct pxa_device_desc pxa910_device_pwm3;
-extern struct pxa_device_desc pxa910_device_pwm4;
-extern struct pxa_device_desc pxa910_device_nand;
+extern struct mmp_device_desc pxa910_device_uart1;
+extern struct mmp_device_desc pxa910_device_uart2;
+extern struct mmp_device_desc pxa910_device_twsi0;
+extern struct mmp_device_desc pxa910_device_twsi1;
+extern struct mmp_device_desc pxa910_device_pwm1;
+extern struct mmp_device_desc pxa910_device_pwm2;
+extern struct mmp_device_desc pxa910_device_pwm3;
+extern struct mmp_device_desc pxa910_device_pwm4;
+extern struct mmp_device_desc pxa910_device_nand;
extern struct platform_device pxa168_device_usb_phy;
extern struct platform_device pxa168_device_u2o;
extern struct platform_device pxa168_device_u2ootg;
extern struct platform_device pxa168_device_u2oehci;
-extern struct pxa_device_desc pxa910_device_disp;
-extern struct pxa_device_desc pxa910_device_fb;
-extern struct pxa_device_desc pxa910_device_panel;
+extern struct mmp_device_desc pxa910_device_disp;
+extern struct mmp_device_desc pxa910_device_fb;
+extern struct mmp_device_desc pxa910_device_panel;
extern struct platform_device pxa910_device_gpio;
extern struct platform_device pxa910_device_rtc;
static inline int pxa910_add_uart(int id)
{
- struct pxa_device_desc *d = NULL;
+ struct mmp_device_desc *d = NULL;
switch (id) {
case 1: d = &pxa910_device_uart1; break;
@@ -44,13 +44,13 @@ static inline int pxa910_add_uart(int id)
if (d == NULL)
return -EINVAL;
- return pxa_register_device(d, NULL, 0);
+ return mmp_register_device(d, NULL, 0);
}
static inline int pxa910_add_twsi(int id, struct i2c_pxa_platform_data *data,
struct i2c_board_info *info, unsigned size)
{
- struct pxa_device_desc *d = NULL;
+ struct mmp_device_desc *d = NULL;
int ret;
switch (id) {
@@ -64,12 +64,12 @@ static inline int pxa910_add_twsi(int id, struct i2c_pxa_platform_data *data,
if (ret)
return ret;
- return pxa_register_device(d, data, sizeof(*data));
+ return mmp_register_device(d, data, sizeof(*data));
}
static inline int pxa910_add_pwm(int id)
{
- struct pxa_device_desc *d = NULL;
+ struct mmp_device_desc *d = NULL;
switch (id) {
case 1: d = &pxa910_device_pwm1; break;
@@ -80,11 +80,11 @@ static inline int pxa910_add_pwm(int id)
return -EINVAL;
}
- return pxa_register_device(d, NULL, 0);
+ return mmp_register_device(d, NULL, 0);
}
static inline int pxa910_add_nand(struct pxa3xx_nand_platform_data *info)
{
- return pxa_register_device(&pxa910_device_nand, info, sizeof(*info));
+ return mmp_register_device(&pxa910_device_nand, info, sizeof(*info));
}
#endif /* __ASM_MACH_PXA910_H */
diff --git a/arch/arm/mach-mmp/tavorevb.c b/arch/arm/mach-mmp/tavorevb.c
deleted file mode 100644
index 3261d2322198..000000000000
--- a/arch/arm/mach-mmp/tavorevb.c
+++ /dev/null
@@ -1,113 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * linux/arch/arm/mach-mmp/tavorevb.c
- *
- * Support for the Marvell PXA910-based TavorEVB Development Platform.
- */
-#include <linux/gpio.h>
-#include <linux/gpio-pxa.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
-#include <linux/smc91x.h>
-
-#include <asm/mach-types.h>
-#include <asm/mach/arch.h>
-#include "addr-map.h"
-#include "mfp-pxa910.h"
-#include "pxa910.h"
-#include "irqs.h"
-
-#include "common.h"
-
-static unsigned long tavorevb_pin_config[] __initdata = {
- /* UART2 */
- GPIO47_UART2_RXD,
- GPIO48_UART2_TXD,
-
- /* SMC */
- SM_nCS0_nCS0,
- SM_ADV_SM_ADV,
- SM_SCLK_SM_SCLK,
- SM_SCLK_SM_SCLK,
- SM_BE0_SM_BE0,
- SM_BE1_SM_BE1,
-
- /* DFI */
- DF_IO0_ND_IO0,
- DF_IO1_ND_IO1,
- DF_IO2_ND_IO2,
- DF_IO3_ND_IO3,
- DF_IO4_ND_IO4,
- DF_IO5_ND_IO5,
- DF_IO6_ND_IO6,
- DF_IO7_ND_IO7,
- DF_IO8_ND_IO8,
- DF_IO9_ND_IO9,
- DF_IO10_ND_IO10,
- DF_IO11_ND_IO11,
- DF_IO12_ND_IO12,
- DF_IO13_ND_IO13,
- DF_IO14_ND_IO14,
- DF_IO15_ND_IO15,
- DF_nCS0_SM_nCS2_nCS0,
- DF_ALE_SM_WEn_ND_ALE,
- DF_CLE_SM_OEn_ND_CLE,
- DF_WEn_DF_WEn,
- DF_REn_DF_REn,
- DF_RDY0_DF_RDY0,
-};
-
-static struct pxa_gpio_platform_data pxa910_gpio_pdata = {
- .irq_base = MMP_GPIO_TO_IRQ(0),
-};
-
-static struct smc91x_platdata tavorevb_smc91x_info = {
- .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT,
-};
-
-static struct resource smc91x_resources[] = {
- [0] = {
- .start = SMC_CS1_PHYS_BASE + 0x300,
- .end = SMC_CS1_PHYS_BASE + 0xfffff,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = MMP_GPIO_TO_IRQ(80),
- .end = MMP_GPIO_TO_IRQ(80),
- .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE,
- }
-};
-
-static struct platform_device smc91x_device = {
- .name = "smc91x",
- .id = 0,
- .dev = {
- .platform_data = &tavorevb_smc91x_info,
- },
- .num_resources = ARRAY_SIZE(smc91x_resources),
- .resource = smc91x_resources,
-};
-
-static void __init tavorevb_init(void)
-{
- mfp_config(ARRAY_AND_SIZE(tavorevb_pin_config));
-
- /* on-chip devices */
- pxa910_add_uart(1);
- platform_device_add_data(&pxa910_device_gpio, &pxa910_gpio_pdata,
- sizeof(struct pxa_gpio_platform_data));
- platform_device_register(&pxa910_device_gpio);
-
- /* off-chip devices */
- platform_device_register(&smc91x_device);
-}
-
-MACHINE_START(TAVOREVB, "PXA910 Evaluation Board (aka TavorEVB)")
- .map_io = mmp_map_io,
- .nr_irqs = MMP_NR_IRQS,
- .init_irq = pxa910_init_irq,
- .init_time = pxa910_timer_init,
- .init_machine = tavorevb_init,
- .restart = mmp_restart,
-MACHINE_END
diff --git a/arch/arm/mach-mmp/ttc_dkb.c b/arch/arm/mach-mmp/ttc_dkb.c
index 4f240760d4aa..345b2e6d5c7e 100644
--- a/arch/arm/mach-mmp/ttc_dkb.c
+++ b/arch/arm/mach-mmp/ttc_dkb.c
@@ -253,12 +253,12 @@ static struct spi_board_info spi_board_info[] __initdata = {
static void __init add_disp(void)
{
- pxa_register_device(&pxa910_device_disp,
+ mmp_register_device(&pxa910_device_disp,
&dkb_disp_info, sizeof(dkb_disp_info));
spi_register_board_info(spi_board_info, ARRAY_SIZE(spi_board_info));
- pxa_register_device(&pxa910_device_fb,
+ mmp_register_device(&pxa910_device_fb,
&dkb_fb_info, sizeof(dkb_fb_info));
- pxa_register_device(&pxa910_device_panel,
+ mmp_register_device(&pxa910_device_panel,
&dkb_tpo_panel_info, sizeof(dkb_tpo_panel_info));
}
#endif
diff --git a/arch/arm/mach-omap1/Kconfig b/arch/arm/mach-omap1/Kconfig
index d4b0cd91a4f9..0ac0567f721d 100644
--- a/arch/arm/mach-omap1/Kconfig
+++ b/arch/arm/mach-omap1/Kconfig
@@ -1,4 +1,16 @@
# SPDX-License-Identifier: GPL-2.0-only
+menuconfig ARCH_OMAP1
+ bool "TI OMAP1"
+ depends on ARCH_MULTI_V4T || ARCH_MULTI_V5
+ depends on CPU_LITTLE_ENDIAN
+ select ARCH_HAS_HOLES_MEMORYMODEL
+ select ARCH_OMAP
+ select CLKSRC_MMIO
+ select FORCE_PCI if PCCARD
+ select GPIOLIB
+ help
+ Support for older TI OMAP1 (omap7xx, omap15xx or omap16xx)
+
if ARCH_OMAP1
menu "TI OMAP1 specific features"
@@ -6,23 +18,27 @@ menu "TI OMAP1 specific features"
comment "OMAP Core Type"
config ARCH_OMAP730
+ depends on ARCH_MULTI_V5
bool "OMAP730 Based System"
select ARCH_OMAP_OTG
select CPU_ARM926T
select OMAP_MPU_TIMER
config ARCH_OMAP850
+ depends on ARCH_MULTI_V5
bool "OMAP850 Based System"
select ARCH_OMAP_OTG
select CPU_ARM926T
config ARCH_OMAP15XX
+ depends on ARCH_MULTI_V4T
default y
bool "OMAP15xx Based System"
select CPU_ARM925T
select OMAP_MPU_TIMER
config ARCH_OMAP16XX
+ depends on ARCH_MULTI_V5
bool "OMAP16xx Based System"
select ARCH_OMAP_OTG
select CPU_ARM926T
diff --git a/arch/arm/mach-omap1/clock.c b/arch/arm/mach-omap1/clock.c
index e5bd4d3b742d..83381e23fab9 100644
--- a/arch/arm/mach-omap1/clock.c
+++ b/arch/arm/mach-omap1/clock.c
@@ -16,7 +16,9 @@
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
#include <linux/soc/ti/omap1-io.h>
+#include <linux/spinlock.h>
#include <asm/mach-types.h>
@@ -28,33 +30,37 @@
#include "sram.h"
__u32 arm_idlect1_mask;
-struct clk *api_ck_p, *ck_dpll1_p, *ck_ref_p;
+/* provide direct internal access (not via clk API) to some clocks */
+struct omap1_clk *api_ck_p, *ck_dpll1_p, *ck_ref_p;
-static LIST_HEAD(clocks);
-static DEFINE_MUTEX(clocks_mutex);
-static DEFINE_SPINLOCK(clockfw_lock);
+/* protect registeres shared among clk_enable/disable() and clk_set_rate() operations */
+static DEFINE_SPINLOCK(arm_ckctl_lock);
+static DEFINE_SPINLOCK(arm_idlect2_lock);
+static DEFINE_SPINLOCK(mod_conf_ctrl_0_lock);
+static DEFINE_SPINLOCK(mod_conf_ctrl_1_lock);
+static DEFINE_SPINLOCK(swd_clk_div_ctrl_sel_lock);
/*
* Omap1 specific clock functions
*/
-unsigned long omap1_uart_recalc(struct clk *clk)
+unsigned long omap1_uart_recalc(struct omap1_clk *clk, unsigned long p_rate)
{
unsigned int val = __raw_readl(clk->enable_reg);
return val & 1 << clk->enable_bit ? 48000000 : 12000000;
}
-unsigned long omap1_sossi_recalc(struct clk *clk)
+unsigned long omap1_sossi_recalc(struct omap1_clk *clk, unsigned long p_rate)
{
u32 div = omap_readl(MOD_CONF_CTRL_1);
div = (div >> 17) & 0x7;
div++;
- return clk->parent->rate / div;
+ return p_rate / div;
}
-static void omap1_clk_allow_idle(struct clk *clk)
+static void omap1_clk_allow_idle(struct omap1_clk *clk)
{
struct arm_idlect1_clk * iclk = (struct arm_idlect1_clk *)clk;
@@ -65,7 +71,7 @@ static void omap1_clk_allow_idle(struct clk *clk)
arm_idlect1_mask |= 1 << iclk->idlect_shift;
}
-static void omap1_clk_deny_idle(struct clk *clk)
+static void omap1_clk_deny_idle(struct omap1_clk *clk)
{
struct arm_idlect1_clk * iclk = (struct arm_idlect1_clk *)clk;
@@ -129,7 +135,7 @@ static __u16 verify_ckctl_value(__u16 newval)
return newval;
}
-static int calc_dsor_exp(struct clk *clk, unsigned long rate)
+static int calc_dsor_exp(unsigned long rate, unsigned long realrate)
{
/* Note: If target frequency is too low, this function will return 4,
* which is invalid value. Caller must check for this value and act
@@ -142,15 +148,11 @@ static int calc_dsor_exp(struct clk *clk, unsigned long rate)
* DSP_CK >= TC_CK
* DSPMMU_CK >= TC_CK
*/
- unsigned long realrate;
- struct clk * parent;
unsigned dsor_exp;
- parent = clk->parent;
- if (unlikely(parent == NULL))
+ if (unlikely(realrate == 0))
return -EIO;
- realrate = parent->rate;
for (dsor_exp=0; dsor_exp<4; dsor_exp++) {
if (realrate <= rate)
break;
@@ -161,16 +163,50 @@ static int calc_dsor_exp(struct clk *clk, unsigned long rate)
return dsor_exp;
}
-unsigned long omap1_ckctl_recalc(struct clk *clk)
+unsigned long omap1_ckctl_recalc(struct omap1_clk *clk, unsigned long p_rate)
{
/* Calculate divisor encoded as 2-bit exponent */
int dsor = 1 << (3 & (omap_readw(ARM_CKCTL) >> clk->rate_offset));
- return clk->parent->rate / dsor;
+ /* update locally maintained rate, required by arm_ck for omap1_show_rates() */
+ clk->rate = p_rate / dsor;
+ return clk->rate;
}
-unsigned long omap1_ckctl_recalc_dsp_domain(struct clk *clk)
+static int omap1_clk_is_enabled(struct clk_hw *hw)
{
+ struct omap1_clk *clk = to_omap1_clk(hw);
+ bool api_ck_was_enabled = true;
+ __u32 regval32;
+ int ret;
+
+ if (!clk->ops) /* no gate -- always enabled */
+ return 1;
+
+ if (clk->ops == &clkops_dspck) {
+ api_ck_was_enabled = omap1_clk_is_enabled(&api_ck_p->hw);
+ if (!api_ck_was_enabled)
+ if (api_ck_p->ops->enable(api_ck_p) < 0)
+ return 0;
+ }
+
+ if (clk->flags & ENABLE_REG_32BIT)
+ regval32 = __raw_readl(clk->enable_reg);
+ else
+ regval32 = __raw_readw(clk->enable_reg);
+
+ ret = regval32 & (1 << clk->enable_bit);
+
+ if (!api_ck_was_enabled)
+ api_ck_p->ops->disable(api_ck_p);
+
+ return ret;
+}
+
+
+unsigned long omap1_ckctl_recalc_dsp_domain(struct omap1_clk *clk, unsigned long p_rate)
+{
+ bool api_ck_was_enabled;
int dsor;
/* Calculate divisor encoded as 2-bit exponent
@@ -180,15 +216,18 @@ unsigned long omap1_ckctl_recalc_dsp_domain(struct clk *clk)
* Note that DSP_CKCTL virt addr = phys addr, so
* we must use __raw_readw() instead of omap_readw().
*/
- omap1_clk_enable(api_ck_p);
+ api_ck_was_enabled = omap1_clk_is_enabled(&api_ck_p->hw);
+ if (!api_ck_was_enabled)
+ api_ck_p->ops->enable(api_ck_p);
dsor = 1 << (3 & (__raw_readw(DSP_CKCTL) >> clk->rate_offset));
- omap1_clk_disable(api_ck_p);
+ if (!api_ck_was_enabled)
+ api_ck_p->ops->disable(api_ck_p);
- return clk->parent->rate / dsor;
+ return p_rate / dsor;
}
/* MPU virtual clock functions */
-int omap1_select_table_rate(struct clk *clk, unsigned long rate)
+int omap1_select_table_rate(struct omap1_clk *clk, unsigned long rate, unsigned long p_rate)
{
/* Find the highest supported frequency <= rate and switch to it */
struct mpu_rate * ptr;
@@ -223,12 +262,12 @@ int omap1_select_table_rate(struct clk *clk, unsigned long rate)
return 0;
}
-int omap1_clk_set_rate_dsp_domain(struct clk *clk, unsigned long rate)
+int omap1_clk_set_rate_dsp_domain(struct omap1_clk *clk, unsigned long rate, unsigned long p_rate)
{
int dsor_exp;
u16 regval;
- dsor_exp = calc_dsor_exp(clk, rate);
+ dsor_exp = calc_dsor_exp(rate, p_rate);
if (dsor_exp > 3)
dsor_exp = -EINVAL;
if (dsor_exp < 0)
@@ -238,42 +277,51 @@ int omap1_clk_set_rate_dsp_domain(struct clk *clk, unsigned long rate)
regval &= ~(3 << clk->rate_offset);
regval |= dsor_exp << clk->rate_offset;
__raw_writew(regval, DSP_CKCTL);
- clk->rate = clk->parent->rate / (1 << dsor_exp);
+ clk->rate = p_rate / (1 << dsor_exp);
return 0;
}
-long omap1_clk_round_rate_ckctl_arm(struct clk *clk, unsigned long rate)
+long omap1_clk_round_rate_ckctl_arm(struct omap1_clk *clk, unsigned long rate,
+ unsigned long *p_rate)
{
- int dsor_exp = calc_dsor_exp(clk, rate);
+ int dsor_exp = calc_dsor_exp(rate, *p_rate);
+
if (dsor_exp < 0)
return dsor_exp;
if (dsor_exp > 3)
dsor_exp = 3;
- return clk->parent->rate / (1 << dsor_exp);
+ return *p_rate / (1 << dsor_exp);
}
-int omap1_clk_set_rate_ckctl_arm(struct clk *clk, unsigned long rate)
+int omap1_clk_set_rate_ckctl_arm(struct omap1_clk *clk, unsigned long rate, unsigned long p_rate)
{
+ unsigned long flags;
int dsor_exp;
u16 regval;
- dsor_exp = calc_dsor_exp(clk, rate);
+ dsor_exp = calc_dsor_exp(rate, p_rate);
if (dsor_exp > 3)
dsor_exp = -EINVAL;
if (dsor_exp < 0)
return dsor_exp;
+ /* protect ARM_CKCTL register from concurrent access via clk_enable/disable() */
+ spin_lock_irqsave(&arm_ckctl_lock, flags);
+
regval = omap_readw(ARM_CKCTL);
regval &= ~(3 << clk->rate_offset);
regval |= dsor_exp << clk->rate_offset;
regval = verify_ckctl_value(regval);
omap_writew(regval, ARM_CKCTL);
- clk->rate = clk->parent->rate / (1 << dsor_exp);
+ clk->rate = p_rate / (1 << dsor_exp);
+
+ spin_unlock_irqrestore(&arm_ckctl_lock, flags);
+
return 0;
}
-long omap1_round_to_table_rate(struct clk *clk, unsigned long rate)
+long omap1_round_to_table_rate(struct omap1_clk *clk, unsigned long rate, unsigned long *p_rate)
{
/* Find the highest supported frequency <= rate */
struct mpu_rate * ptr;
@@ -324,26 +372,40 @@ static unsigned calc_ext_dsor(unsigned long rate)
}
/* XXX Only needed on 1510 */
-int omap1_set_uart_rate(struct clk *clk, unsigned long rate)
+long omap1_round_uart_rate(struct omap1_clk *clk, unsigned long rate, unsigned long *p_rate)
{
+ return rate > 24000000 ? 48000000 : 12000000;
+}
+
+int omap1_set_uart_rate(struct omap1_clk *clk, unsigned long rate, unsigned long p_rate)
+{
+ unsigned long flags;
unsigned int val;
- val = __raw_readl(clk->enable_reg);
if (rate == 12000000)
- val &= ~(1 << clk->enable_bit);
+ val = 0;
else if (rate == 48000000)
- val |= (1 << clk->enable_bit);
+ val = 1 << clk->enable_bit;
else
return -EINVAL;
+
+ /* protect MOD_CONF_CTRL_0 register from concurrent access via clk_enable/disable() */
+ spin_lock_irqsave(&mod_conf_ctrl_0_lock, flags);
+
+ val |= __raw_readl(clk->enable_reg) & ~(1 << clk->enable_bit);
__raw_writel(val, clk->enable_reg);
+
+ spin_unlock_irqrestore(&mod_conf_ctrl_0_lock, flags);
+
clk->rate = rate;
return 0;
}
/* External clock (MCLK & BCLK) functions */
-int omap1_set_ext_clk_rate(struct clk *clk, unsigned long rate)
+int omap1_set_ext_clk_rate(struct omap1_clk *clk, unsigned long rate, unsigned long p_rate)
{
+ unsigned long flags;
unsigned dsor;
__u16 ratio_bits;
@@ -354,25 +416,53 @@ int omap1_set_ext_clk_rate(struct clk *clk, unsigned long rate)
else
ratio_bits = (dsor - 2) << 2;
+ /* protect SWD_CLK_DIV_CTRL_SEL register from concurrent access via clk_enable/disable() */
+ spin_lock_irqsave(&swd_clk_div_ctrl_sel_lock, flags);
+
ratio_bits |= __raw_readw(clk->enable_reg) & ~0xfd;
__raw_writew(ratio_bits, clk->enable_reg);
+ spin_unlock_irqrestore(&swd_clk_div_ctrl_sel_lock, flags);
+
return 0;
}
-int omap1_set_sossi_rate(struct clk *clk, unsigned long rate)
+static int calc_div_sossi(unsigned long rate, unsigned long p_rate)
{
- u32 l;
int div;
- unsigned long p_rate;
- p_rate = clk->parent->rate;
/* Round towards slower frequency */
div = (p_rate + rate - 1) / rate;
- div--;
+
+ return --div;
+}
+
+long omap1_round_sossi_rate(struct omap1_clk *clk, unsigned long rate, unsigned long *p_rate)
+{
+ int div;
+
+ div = calc_div_sossi(rate, *p_rate);
+ if (div < 0)
+ div = 0;
+ else if (div > 7)
+ div = 7;
+
+ return *p_rate / (div + 1);
+}
+
+int omap1_set_sossi_rate(struct omap1_clk *clk, unsigned long rate, unsigned long p_rate)
+{
+ unsigned long flags;
+ u32 l;
+ int div;
+
+ div = calc_div_sossi(rate, p_rate);
if (div < 0 || div > 7)
return -EINVAL;
+ /* protect MOD_CONF_CTRL_1 register from concurrent access via clk_enable/disable() */
+ spin_lock_irqsave(&mod_conf_ctrl_1_lock, flags);
+
l = omap_readl(MOD_CONF_CTRL_1);
l &= ~(7 << 17);
l |= div << 17;
@@ -380,15 +470,17 @@ int omap1_set_sossi_rate(struct clk *clk, unsigned long rate)
clk->rate = p_rate / (div + 1);
+ spin_unlock_irqrestore(&mod_conf_ctrl_1_lock, flags);
+
return 0;
}
-long omap1_round_ext_clk_rate(struct clk *clk, unsigned long rate)
+long omap1_round_ext_clk_rate(struct omap1_clk *clk, unsigned long rate, unsigned long *p_rate)
{
return 96000000 / calc_ext_dsor(rate);
}
-void omap1_init_ext_clk(struct clk *clk)
+int omap1_init_ext_clk(struct omap1_clk *clk)
{
unsigned dsor;
__u16 ratio_bits;
@@ -404,59 +496,59 @@ void omap1_init_ext_clk(struct clk *clk)
dsor = ratio_bits + 2;
clk-> rate = 96000000 / dsor;
+
+ return 0;
}
-int omap1_clk_enable(struct clk *clk)
+static int omap1_clk_enable(struct clk_hw *hw)
{
+ struct omap1_clk *clk = to_omap1_clk(hw), *parent = to_omap1_clk(clk_hw_get_parent(hw));
int ret = 0;
- if (clk->usecount++ == 0) {
- if (clk->parent) {
- ret = omap1_clk_enable(clk->parent);
- if (ret)
- goto err;
-
- if (clk->flags & CLOCK_NO_IDLE_PARENT)
- omap1_clk_deny_idle(clk->parent);
- }
+ if (parent && clk->flags & CLOCK_NO_IDLE_PARENT)
+ omap1_clk_deny_idle(parent);
+ if (clk->ops && !(WARN_ON(!clk->ops->enable)))
ret = clk->ops->enable(clk);
- if (ret) {
- if (clk->parent)
- omap1_clk_disable(clk->parent);
- goto err;
- }
- }
- return ret;
-err:
- clk->usecount--;
return ret;
}
-void omap1_clk_disable(struct clk *clk)
+static void omap1_clk_disable(struct clk_hw *hw)
{
- if (clk->usecount > 0 && !(--clk->usecount)) {
+ struct omap1_clk *clk = to_omap1_clk(hw), *parent = to_omap1_clk(clk_hw_get_parent(hw));
+
+ if (clk->ops && !(WARN_ON(!clk->ops->disable)))
clk->ops->disable(clk);
- if (likely(clk->parent)) {
- omap1_clk_disable(clk->parent);
- if (clk->flags & CLOCK_NO_IDLE_PARENT)
- omap1_clk_allow_idle(clk->parent);
- }
- }
+
+ if (likely(parent) && clk->flags & CLOCK_NO_IDLE_PARENT)
+ omap1_clk_allow_idle(parent);
}
-static int omap1_clk_enable_generic(struct clk *clk)
+static int omap1_clk_enable_generic(struct omap1_clk *clk)
{
+ unsigned long flags;
__u16 regval16;
__u32 regval32;
if (unlikely(clk->enable_reg == NULL)) {
printk(KERN_ERR "clock.c: Enable for %s without enable code\n",
- clk->name);
+ clk_hw_get_name(&clk->hw));
return -EINVAL;
}
+ /* protect clk->enable_reg from concurrent access via clk_set_rate() */
+ if (clk->enable_reg == OMAP1_IO_ADDRESS(ARM_CKCTL))
+ spin_lock_irqsave(&arm_ckctl_lock, flags);
+ else if (clk->enable_reg == OMAP1_IO_ADDRESS(ARM_IDLECT2))
+ spin_lock_irqsave(&arm_idlect2_lock, flags);
+ else if (clk->enable_reg == OMAP1_IO_ADDRESS(MOD_CONF_CTRL_0))
+ spin_lock_irqsave(&mod_conf_ctrl_0_lock, flags);
+ else if (clk->enable_reg == OMAP1_IO_ADDRESS(MOD_CONF_CTRL_1))
+ spin_lock_irqsave(&mod_conf_ctrl_1_lock, flags);
+ else if (clk->enable_reg == OMAP1_IO_ADDRESS(SWD_CLK_DIV_CTRL_SEL))
+ spin_lock_irqsave(&swd_clk_div_ctrl_sel_lock, flags);
+
if (clk->flags & ENABLE_REG_32BIT) {
regval32 = __raw_readl(clk->enable_reg);
regval32 |= (1 << clk->enable_bit);
@@ -467,17 +559,41 @@ static int omap1_clk_enable_generic(struct clk *clk)
__raw_writew(regval16, clk->enable_reg);
}
+ if (clk->enable_reg == OMAP1_IO_ADDRESS(ARM_CKCTL))
+ spin_unlock_irqrestore(&arm_ckctl_lock, flags);
+ else if (clk->enable_reg == OMAP1_IO_ADDRESS(ARM_IDLECT2))
+ spin_unlock_irqrestore(&arm_idlect2_lock, flags);
+ else if (clk->enable_reg == OMAP1_IO_ADDRESS(MOD_CONF_CTRL_0))
+ spin_unlock_irqrestore(&mod_conf_ctrl_0_lock, flags);
+ else if (clk->enable_reg == OMAP1_IO_ADDRESS(MOD_CONF_CTRL_1))
+ spin_unlock_irqrestore(&mod_conf_ctrl_1_lock, flags);
+ else if (clk->enable_reg == OMAP1_IO_ADDRESS(SWD_CLK_DIV_CTRL_SEL))
+ spin_unlock_irqrestore(&swd_clk_div_ctrl_sel_lock, flags);
+
return 0;
}
-static void omap1_clk_disable_generic(struct clk *clk)
+static void omap1_clk_disable_generic(struct omap1_clk *clk)
{
+ unsigned long flags;
__u16 regval16;
__u32 regval32;
if (clk->enable_reg == NULL)
return;
+ /* protect clk->enable_reg from concurrent access via clk_set_rate() */
+ if (clk->enable_reg == OMAP1_IO_ADDRESS(ARM_CKCTL))
+ spin_lock_irqsave(&arm_ckctl_lock, flags);
+ else if (clk->enable_reg == OMAP1_IO_ADDRESS(ARM_IDLECT2))
+ spin_lock_irqsave(&arm_idlect2_lock, flags);
+ else if (clk->enable_reg == OMAP1_IO_ADDRESS(MOD_CONF_CTRL_0))
+ spin_lock_irqsave(&mod_conf_ctrl_0_lock, flags);
+ else if (clk->enable_reg == OMAP1_IO_ADDRESS(MOD_CONF_CTRL_1))
+ spin_lock_irqsave(&mod_conf_ctrl_1_lock, flags);
+ else if (clk->enable_reg == OMAP1_IO_ADDRESS(SWD_CLK_DIV_CTRL_SEL))
+ spin_lock_irqsave(&swd_clk_div_ctrl_sel_lock, flags);
+
if (clk->flags & ENABLE_REG_32BIT) {
regval32 = __raw_readl(clk->enable_reg);
regval32 &= ~(1 << clk->enable_bit);
@@ -487,6 +603,17 @@ static void omap1_clk_disable_generic(struct clk *clk)
regval16 &= ~(1 << clk->enable_bit);
__raw_writew(regval16, clk->enable_reg);
}
+
+ if (clk->enable_reg == OMAP1_IO_ADDRESS(ARM_CKCTL))
+ spin_unlock_irqrestore(&arm_ckctl_lock, flags);
+ else if (clk->enable_reg == OMAP1_IO_ADDRESS(ARM_IDLECT2))
+ spin_unlock_irqrestore(&arm_idlect2_lock, flags);
+ else if (clk->enable_reg == OMAP1_IO_ADDRESS(MOD_CONF_CTRL_0))
+ spin_unlock_irqrestore(&mod_conf_ctrl_0_lock, flags);
+ else if (clk->enable_reg == OMAP1_IO_ADDRESS(MOD_CONF_CTRL_1))
+ spin_unlock_irqrestore(&mod_conf_ctrl_1_lock, flags);
+ else if (clk->enable_reg == OMAP1_IO_ADDRESS(SWD_CLK_DIV_CTRL_SEL))
+ spin_unlock_irqrestore(&swd_clk_div_ctrl_sel_lock, flags);
}
const struct clkops clkops_generic = {
@@ -494,25 +621,38 @@ const struct clkops clkops_generic = {
.disable = omap1_clk_disable_generic,
};
-static int omap1_clk_enable_dsp_domain(struct clk *clk)
+static int omap1_clk_enable_dsp_domain(struct omap1_clk *clk)
{
- int retval;
+ bool api_ck_was_enabled;
+ int retval = 0;
+
+ api_ck_was_enabled = omap1_clk_is_enabled(&api_ck_p->hw);
+ if (!api_ck_was_enabled)
+ retval = api_ck_p->ops->enable(api_ck_p);
- retval = omap1_clk_enable(api_ck_p);
if (!retval) {
retval = omap1_clk_enable_generic(clk);
- omap1_clk_disable(api_ck_p);
+
+ if (!api_ck_was_enabled)
+ api_ck_p->ops->disable(api_ck_p);
}
return retval;
}
-static void omap1_clk_disable_dsp_domain(struct clk *clk)
+static void omap1_clk_disable_dsp_domain(struct omap1_clk *clk)
{
- if (omap1_clk_enable(api_ck_p) == 0) {
- omap1_clk_disable_generic(clk);
- omap1_clk_disable(api_ck_p);
- }
+ bool api_ck_was_enabled;
+
+ api_ck_was_enabled = omap1_clk_is_enabled(&api_ck_p->hw);
+ if (!api_ck_was_enabled)
+ if (api_ck_p->ops->enable(api_ck_p) < 0)
+ return;
+
+ omap1_clk_disable_generic(clk);
+
+ if (!api_ck_was_enabled)
+ api_ck_p->ops->disable(api_ck_p);
}
const struct clkops clkops_dspck = {
@@ -521,7 +661,7 @@ const struct clkops clkops_dspck = {
};
/* XXX SYSC register handling does not belong in the clock framework */
-static int omap1_clk_enable_uart_functional_16xx(struct clk *clk)
+static int omap1_clk_enable_uart_functional_16xx(struct omap1_clk *clk)
{
int ret;
struct uart_clk *uclk;
@@ -538,7 +678,7 @@ static int omap1_clk_enable_uart_functional_16xx(struct clk *clk)
}
/* XXX SYSC register handling does not belong in the clock framework */
-static void omap1_clk_disable_uart_functional_16xx(struct clk *clk)
+static void omap1_clk_disable_uart_functional_16xx(struct omap1_clk *clk)
{
struct uart_clk *uclk;
@@ -555,20 +695,33 @@ const struct clkops clkops_uart_16xx = {
.disable = omap1_clk_disable_uart_functional_16xx,
};
-long omap1_clk_round_rate(struct clk *clk, unsigned long rate)
+static unsigned long omap1_clk_recalc_rate(struct clk_hw *hw, unsigned long p_rate)
{
- if (clk->round_rate != NULL)
- return clk->round_rate(clk, rate);
+ struct omap1_clk *clk = to_omap1_clk(hw);
+
+ if (clk->recalc)
+ return clk->recalc(clk, p_rate);
return clk->rate;
}
-int omap1_clk_set_rate(struct clk *clk, unsigned long rate)
+static long omap1_clk_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *p_rate)
+{
+ struct omap1_clk *clk = to_omap1_clk(hw);
+
+ if (clk->round_rate != NULL)
+ return clk->round_rate(clk, rate, p_rate);
+
+ return omap1_clk_recalc_rate(hw, *p_rate);
+}
+
+static int omap1_clk_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long p_rate)
{
+ struct omap1_clk *clk = to_omap1_clk(hw);
int ret = -EINVAL;
if (clk->set_rate)
- ret = clk->set_rate(clk, rate);
+ ret = clk->set_rate(clk, rate, p_rate);
return ret;
}
@@ -576,243 +729,105 @@ int omap1_clk_set_rate(struct clk *clk, unsigned long rate)
* Omap1 clock reset and init functions
*/
+static int omap1_clk_init_op(struct clk_hw *hw)
+{
+ struct omap1_clk *clk = to_omap1_clk(hw);
+
+ if (clk->init)
+ return clk->init(clk);
+
+ return 0;
+}
+
#ifdef CONFIG_OMAP_RESET_CLOCKS
-void omap1_clk_disable_unused(struct clk *clk)
+static void omap1_clk_disable_unused(struct clk_hw *hw)
{
- __u32 regval32;
+ struct omap1_clk *clk = to_omap1_clk(hw);
+ const char *name = clk_hw_get_name(hw);
/* Clocks in the DSP domain need api_ck. Just assume bootloader
* has not enabled any DSP clocks */
if (clk->enable_reg == DSP_IDLECT2) {
- pr_info("Skipping reset check for DSP domain clock \"%s\"\n",
- clk->name);
+ pr_info("Skipping reset check for DSP domain clock \"%s\"\n", name);
return;
}
- /* Is the clock already disabled? */
- if (clk->flags & ENABLE_REG_32BIT)
- regval32 = __raw_readl(clk->enable_reg);
- else
- regval32 = __raw_readw(clk->enable_reg);
-
- if ((regval32 & (1 << clk->enable_bit)) == 0)
- return;
-
- printk(KERN_INFO "Disabling unused clock \"%s\"... ", clk->name);
- clk->ops->disable(clk);
+ pr_info("Disabling unused clock \"%s\"... ", name);
+ omap1_clk_disable(hw);
printk(" done\n");
}
#endif
+const struct clk_ops omap1_clk_gate_ops = {
+ .enable = omap1_clk_enable,
+ .disable = omap1_clk_disable,
+ .is_enabled = omap1_clk_is_enabled,
+#ifdef CONFIG_OMAP_RESET_CLOCKS
+ .disable_unused = omap1_clk_disable_unused,
+#endif
+};
-int clk_enable(struct clk *clk)
-{
- unsigned long flags;
- int ret;
-
- if (IS_ERR_OR_NULL(clk))
- return -EINVAL;
-
- spin_lock_irqsave(&clockfw_lock, flags);
- ret = omap1_clk_enable(clk);
- spin_unlock_irqrestore(&clockfw_lock, flags);
-
- return ret;
-}
-EXPORT_SYMBOL(clk_enable);
-
-void clk_disable(struct clk *clk)
-{
- unsigned long flags;
-
- if (IS_ERR_OR_NULL(clk))
- return;
-
- spin_lock_irqsave(&clockfw_lock, flags);
- if (clk->usecount == 0) {
- pr_err("Trying disable clock %s with 0 usecount\n",
- clk->name);
- WARN_ON(1);
- goto out;
- }
-
- omap1_clk_disable(clk);
-
-out:
- spin_unlock_irqrestore(&clockfw_lock, flags);
-}
-EXPORT_SYMBOL(clk_disable);
-
-unsigned long clk_get_rate(struct clk *clk)
-{
- unsigned long flags;
- unsigned long ret;
-
- if (IS_ERR_OR_NULL(clk))
- return 0;
-
- spin_lock_irqsave(&clockfw_lock, flags);
- ret = clk->rate;
- spin_unlock_irqrestore(&clockfw_lock, flags);
-
- return ret;
-}
-EXPORT_SYMBOL(clk_get_rate);
-
-/*
- * Optional clock functions defined in include/linux/clk.h
- */
-
-long clk_round_rate(struct clk *clk, unsigned long rate)
-{
- unsigned long flags;
- long ret;
-
- if (IS_ERR_OR_NULL(clk))
- return 0;
-
- spin_lock_irqsave(&clockfw_lock, flags);
- ret = omap1_clk_round_rate(clk, rate);
- spin_unlock_irqrestore(&clockfw_lock, flags);
-
- return ret;
-}
-EXPORT_SYMBOL(clk_round_rate);
-
-int clk_set_rate(struct clk *clk, unsigned long rate)
-{
- unsigned long flags;
- int ret = -EINVAL;
-
- if (IS_ERR_OR_NULL(clk))
- return ret;
-
- spin_lock_irqsave(&clockfw_lock, flags);
- ret = omap1_clk_set_rate(clk, rate);
- if (ret == 0)
- propagate_rate(clk);
- spin_unlock_irqrestore(&clockfw_lock, flags);
-
- return ret;
-}
-EXPORT_SYMBOL(clk_set_rate);
-
-int clk_set_parent(struct clk *clk, struct clk *parent)
-{
- WARN_ONCE(1, "clk_set_parent() not implemented for OMAP1\n");
-
- return -EINVAL;
-}
-EXPORT_SYMBOL(clk_set_parent);
+const struct clk_ops omap1_clk_rate_ops = {
+ .recalc_rate = omap1_clk_recalc_rate,
+ .round_rate = omap1_clk_round_rate,
+ .set_rate = omap1_clk_set_rate,
+ .init = omap1_clk_init_op,
+};
-struct clk *clk_get_parent(struct clk *clk)
-{
- return clk->parent;
-}
-EXPORT_SYMBOL(clk_get_parent);
+const struct clk_ops omap1_clk_full_ops = {
+ .enable = omap1_clk_enable,
+ .disable = omap1_clk_disable,
+ .is_enabled = omap1_clk_is_enabled,
+#ifdef CONFIG_OMAP_RESET_CLOCKS
+ .disable_unused = omap1_clk_disable_unused,
+#endif
+ .recalc_rate = omap1_clk_recalc_rate,
+ .round_rate = omap1_clk_round_rate,
+ .set_rate = omap1_clk_set_rate,
+ .init = omap1_clk_init_op,
+};
/*
* OMAP specific clock functions shared between omap1 and omap2
*/
/* Used for clocks that always have same value as the parent clock */
-unsigned long followparent_recalc(struct clk *clk)
+unsigned long followparent_recalc(struct omap1_clk *clk, unsigned long p_rate)
{
- return clk->parent->rate;
+ return p_rate;
}
/*
* Used for clocks that have the same value as the parent clock,
* divided by some factor
*/
-unsigned long omap_fixed_divisor_recalc(struct clk *clk)
+unsigned long omap_fixed_divisor_recalc(struct omap1_clk *clk, unsigned long p_rate)
{
WARN_ON(!clk->fixed_div);
- return clk->parent->rate / clk->fixed_div;
+ return p_rate / clk->fixed_div;
}
/* Propagate rate to children */
-void propagate_rate(struct clk *tclk)
+void propagate_rate(struct omap1_clk *tclk)
{
struct clk *clkp;
- list_for_each_entry(clkp, &tclk->children, sibling) {
- if (clkp->recalc)
- clkp->rate = clkp->recalc(clkp);
- propagate_rate(clkp);
- }
-}
-
-static LIST_HEAD(root_clks);
-
-/**
- * clk_preinit - initialize any fields in the struct clk before clk init
- * @clk: struct clk * to initialize
- *
- * Initialize any struct clk fields needed before normal clk initialization
- * can run. No return value.
- */
-void clk_preinit(struct clk *clk)
-{
- INIT_LIST_HEAD(&clk->children);
-}
-
-int clk_register(struct clk *clk)
-{
- if (IS_ERR_OR_NULL(clk))
- return -EINVAL;
-
- /*
- * trap out already registered clocks
- */
- if (clk->node.next || clk->node.prev)
- return 0;
-
- mutex_lock(&clocks_mutex);
- if (clk->parent)
- list_add(&clk->sibling, &clk->parent->children);
- else
- list_add(&clk->sibling, &root_clks);
-
- list_add(&clk->node, &clocks);
- if (clk->init)
- clk->init(clk);
- mutex_unlock(&clocks_mutex);
-
- return 0;
-}
-EXPORT_SYMBOL(clk_register);
-
-void clk_unregister(struct clk *clk)
-{
- if (IS_ERR_OR_NULL(clk))
+ /* depend on CCF ability to recalculate new rates across whole clock subtree */
+ if (WARN_ON(!(clk_hw_get_flags(&tclk->hw) & CLK_GET_RATE_NOCACHE)))
return;
- mutex_lock(&clocks_mutex);
- list_del(&clk->sibling);
- list_del(&clk->node);
- mutex_unlock(&clocks_mutex);
-}
-EXPORT_SYMBOL(clk_unregister);
-
-/*
- * Low level helpers
- */
-static int clkll_enable_null(struct clk *clk)
-{
- return 0;
-}
+ clkp = clk_get_sys(NULL, clk_hw_get_name(&tclk->hw));
+ if (WARN_ON(!clkp))
+ return;
-static void clkll_disable_null(struct clk *clk)
-{
+ clk_get_rate(clkp);
+ clk_put(clkp);
}
-const struct clkops clkops_null = {
- .enable = clkll_enable_null,
- .disable = clkll_disable_null,
+const struct clk_ops omap1_clk_null_ops = {
};
/*
@@ -820,114 +835,6 @@ const struct clkops clkops_null = {
*
* Used for clock aliases that are needed on some OMAPs, but not others
*/
-struct clk dummy_ck = {
- .name = "dummy",
- .ops = &clkops_null,
+struct omap1_clk dummy_ck __refdata = {
+ .hw.init = CLK_HW_INIT_NO_PARENT("dummy", &omap1_clk_null_ops, 0),
};
-
-/*
- *
- */
-
-#ifdef CONFIG_OMAP_RESET_CLOCKS
-/*
- * Disable any unused clocks left on by the bootloader
- */
-static int __init clk_disable_unused(void)
-{
- struct clk *ck;
- unsigned long flags;
-
- pr_info("clock: disabling unused clocks to save power\n");
-
- spin_lock_irqsave(&clockfw_lock, flags);
- list_for_each_entry(ck, &clocks, node) {
- if (ck->ops == &clkops_null)
- continue;
-
- if (ck->usecount > 0 || !ck->enable_reg)
- continue;
-
- omap1_clk_disable_unused(ck);
- }
- spin_unlock_irqrestore(&clockfw_lock, flags);
-
- return 0;
-}
-late_initcall(clk_disable_unused);
-#endif
-
-#if defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS)
-/*
- * debugfs support to trace clock tree hierarchy and attributes
- */
-
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-
-static struct dentry *clk_debugfs_root;
-
-static int debug_clock_show(struct seq_file *s, void *unused)
-{
- struct clk *c;
- struct clk *pa;
-
- mutex_lock(&clocks_mutex);
- seq_printf(s, "%-30s %-30s %-10s %s\n",
- "clock-name", "parent-name", "rate", "use-count");
-
- list_for_each_entry(c, &clocks, node) {
- pa = c->parent;
- seq_printf(s, "%-30s %-30s %-10lu %d\n",
- c->name, pa ? pa->name : "none", c->rate,
- c->usecount);
- }
- mutex_unlock(&clocks_mutex);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(debug_clock);
-
-static void clk_debugfs_register_one(struct clk *c)
-{
- struct dentry *d;
- struct clk *pa = c->parent;
-
- d = debugfs_create_dir(c->name, pa ? pa->dent : clk_debugfs_root);
- c->dent = d;
-
- debugfs_create_u8("usecount", S_IRUGO, c->dent, &c->usecount);
- debugfs_create_ulong("rate", S_IRUGO, c->dent, &c->rate);
- debugfs_create_x8("flags", S_IRUGO, c->dent, &c->flags);
-}
-
-static void clk_debugfs_register(struct clk *c)
-{
- struct clk *pa = c->parent;
-
- if (pa && !pa->dent)
- clk_debugfs_register(pa);
-
- if (!c->dent)
- clk_debugfs_register_one(c);
-}
-
-static int __init clk_debugfs_init(void)
-{
- struct clk *c;
- struct dentry *d;
-
- d = debugfs_create_dir("clock", NULL);
- clk_debugfs_root = d;
-
- list_for_each_entry(c, &clocks, node)
- clk_debugfs_register(c);
-
- debugfs_create_file("summary", S_IRUGO, d, NULL, &debug_clock_fops);
-
- return 0;
-}
-late_initcall(clk_debugfs_init);
-
-#endif /* defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS) */
diff --git a/arch/arm/mach-omap1/clock.h b/arch/arm/mach-omap1/clock.h
index 8025e4a22469..16cfb2e86ee4 100644
--- a/arch/arm/mach-omap1/clock.h
+++ b/arch/arm/mach-omap1/clock.h
@@ -11,12 +11,11 @@
#define __ARCH_ARM_MACH_OMAP1_CLOCK_H
#include <linux/clk.h>
-#include <linux/list.h>
-
#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
struct module;
-struct clk;
+struct omap1_clk;
struct omap_clk {
u16 cpu;
@@ -29,7 +28,7 @@ struct omap_clk {
.lk = { \
.dev_id = dev, \
.con_id = con, \
- .clk = ck, \
+ .clk_hw = ck, \
}, \
}
@@ -40,10 +39,6 @@ struct omap_clk {
#define CK_16XX (1 << 3) /* 16xx, 17xx, 5912 */
#define CK_1710 (1 << 4) /* 1710 extra for rate selection */
-
-/* Temporary, needed during the common clock framework conversion */
-#define __clk_get_name(clk) (clk->name)
-
/**
* struct clkops - some clock function pointers
* @enable: fn ptr that enables the current clock in hardware
@@ -51,8 +46,8 @@ struct omap_clk {
* @allow_idle: fn ptr that enables autoidle for the current clock in hardware
*/
struct clkops {
- int (*enable)(struct clk *);
- void (*disable)(struct clk *);
+ int (*enable)(struct omap1_clk *clk);
+ void (*disable)(struct omap1_clk *clk);
};
/*
@@ -65,13 +60,9 @@ struct clkops {
#define CLOCK_NO_IDLE_PARENT (1 << 2)
/**
- * struct clk - OMAP struct clk
- * @node: list_head connecting this clock into the full clock list
+ * struct omap1_clk - OMAP1 struct clk
+ * @hw: struct clk_hw for common clock framework integration
* @ops: struct clkops * for this clock
- * @name: the name of the clock in the hardware (used in hwmod data and debug)
- * @parent: pointer to this clock's parent struct clk
- * @children: list_head connecting to the child clks' @sibling list_heads
- * @sibling: list_head connecting this clk to its parent clk's @children
* @rate: current clock rate
* @enable_reg: register to write to enable the clock (see @enable_bit)
* @recalc: fn ptr that returns the clock's current rate
@@ -79,102 +70,65 @@ struct clkops {
* @round_rate: fn ptr that can round the clock's current rate
* @init: fn ptr to do clock-specific initialization
* @enable_bit: bitshift to write to enable/disable the clock (see @enable_reg)
- * @usecount: number of users that have requested this clock to be enabled
* @fixed_div: when > 0, this clock's rate is its parent's rate / @fixed_div
* @flags: see "struct clk.flags possibilities" above
* @rate_offset: bitshift for rate selection bitfield (OMAP1 only)
- *
- * XXX @rate_offset should probably be removed and OMAP1
- * clock code converted to use clksel.
- *
- * XXX @usecount is poorly named. It should be "enable_count" or
- * something similar. "users" in the description refers to kernel
- * code (core code or drivers) that have called clk_enable() and not
- * yet called clk_disable(); the usecount of parent clocks is also
- * incremented by the clock code when clk_enable() is called on child
- * clocks and decremented by the clock code when clk_disable() is
- * called on child clocks.
- *
- * XXX @usecount, @children, @sibling should be marked for
- * internal use only.
- *
- * @children and @sibling are used to optimize parent-to-child clock
- * tree traversals. (child-to-parent traversals use @parent.)
- *
- * XXX The notion of the clock's current rate probably needs to be
- * separated from the clock's target rate.
*/
-struct clk {
- struct list_head node;
+struct omap1_clk {
+ struct clk_hw hw;
const struct clkops *ops;
- const char *name;
- struct clk *parent;
- struct list_head children;
- struct list_head sibling; /* node for children */
unsigned long rate;
void __iomem *enable_reg;
- unsigned long (*recalc)(struct clk *);
- int (*set_rate)(struct clk *, unsigned long);
- long (*round_rate)(struct clk *, unsigned long);
- void (*init)(struct clk *);
+ unsigned long (*recalc)(struct omap1_clk *clk, unsigned long rate);
+ int (*set_rate)(struct omap1_clk *clk, unsigned long rate,
+ unsigned long p_rate);
+ long (*round_rate)(struct omap1_clk *clk, unsigned long rate,
+ unsigned long *p_rate);
+ int (*init)(struct omap1_clk *clk);
u8 enable_bit;
- s8 usecount;
u8 fixed_div;
u8 flags;
u8 rate_offset;
-#if defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS)
- struct dentry *dent; /* For visible tree hierarchy */
-#endif
};
+#define to_omap1_clk(_hw) container_of(_hw, struct omap1_clk, hw)
-extern void clk_preinit(struct clk *clk);
-extern int clk_register(struct clk *clk);
-extern void clk_unregister(struct clk *clk);
-extern void propagate_rate(struct clk *clk);
-extern unsigned long followparent_recalc(struct clk *clk);
-unsigned long omap_fixed_divisor_recalc(struct clk *clk);
+void propagate_rate(struct omap1_clk *clk);
+unsigned long followparent_recalc(struct omap1_clk *clk, unsigned long p_rate);
+unsigned long omap_fixed_divisor_recalc(struct omap1_clk *clk, unsigned long p_rate);
-extern const struct clkops clkops_null;
-
-extern struct clk dummy_ck;
+extern struct omap1_clk dummy_ck;
int omap1_clk_init(void);
void omap1_clk_late_init(void);
-extern int omap1_clk_enable(struct clk *clk);
-extern void omap1_clk_disable(struct clk *clk);
-extern long omap1_clk_round_rate(struct clk *clk, unsigned long rate);
-extern int omap1_clk_set_rate(struct clk *clk, unsigned long rate);
-extern unsigned long omap1_ckctl_recalc(struct clk *clk);
-extern int omap1_set_sossi_rate(struct clk *clk, unsigned long rate);
-extern unsigned long omap1_sossi_recalc(struct clk *clk);
-extern unsigned long omap1_ckctl_recalc_dsp_domain(struct clk *clk);
-extern int omap1_clk_set_rate_dsp_domain(struct clk *clk, unsigned long rate);
-extern int omap1_set_uart_rate(struct clk *clk, unsigned long rate);
-extern unsigned long omap1_uart_recalc(struct clk *clk);
-extern int omap1_set_ext_clk_rate(struct clk *clk, unsigned long rate);
-extern long omap1_round_ext_clk_rate(struct clk *clk, unsigned long rate);
-extern void omap1_init_ext_clk(struct clk *clk);
-extern int omap1_select_table_rate(struct clk *clk, unsigned long rate);
-extern long omap1_round_to_table_rate(struct clk *clk, unsigned long rate);
-extern int omap1_clk_set_rate_ckctl_arm(struct clk *clk, unsigned long rate);
-extern long omap1_clk_round_rate_ckctl_arm(struct clk *clk, unsigned long rate);
-
-#ifdef CONFIG_OMAP_RESET_CLOCKS
-extern void omap1_clk_disable_unused(struct clk *clk);
-#else
-#define omap1_clk_disable_unused NULL
-#endif
+unsigned long omap1_ckctl_recalc(struct omap1_clk *clk, unsigned long p_rate);
+long omap1_round_sossi_rate(struct omap1_clk *clk, unsigned long rate, unsigned long *p_rate);
+int omap1_set_sossi_rate(struct omap1_clk *clk, unsigned long rate, unsigned long p_rate);
+unsigned long omap1_sossi_recalc(struct omap1_clk *clk, unsigned long p_rate);
+unsigned long omap1_ckctl_recalc_dsp_domain(struct omap1_clk *clk, unsigned long p_rate);
+int omap1_clk_set_rate_dsp_domain(struct omap1_clk *clk, unsigned long rate,
+ unsigned long p_rate);
+long omap1_round_uart_rate(struct omap1_clk *clk, unsigned long rate, unsigned long *p_rate);
+int omap1_set_uart_rate(struct omap1_clk *clk, unsigned long rate, unsigned long p_rate);
+unsigned long omap1_uart_recalc(struct omap1_clk *clk, unsigned long p_rate);
+int omap1_set_ext_clk_rate(struct omap1_clk *clk, unsigned long rate, unsigned long p_rate);
+long omap1_round_ext_clk_rate(struct omap1_clk *clk, unsigned long rate, unsigned long *p_rate);
+int omap1_init_ext_clk(struct omap1_clk *clk);
+int omap1_select_table_rate(struct omap1_clk *clk, unsigned long rate, unsigned long p_rate);
+long omap1_round_to_table_rate(struct omap1_clk *clk, unsigned long rate, unsigned long *p_rate);
+int omap1_clk_set_rate_ckctl_arm(struct omap1_clk *clk, unsigned long rate, unsigned long p_rate);
+long omap1_clk_round_rate_ckctl_arm(struct omap1_clk *clk, unsigned long rate,
+ unsigned long *p_rate);
struct uart_clk {
- struct clk clk;
- unsigned long sysc_addr;
+ struct omap1_clk clk;
+ unsigned long sysc_addr;
};
/* Provide a method for preventing idling some ARM IDLECT clocks */
struct arm_idlect1_clk {
- struct clk clk;
- unsigned long no_idle_count;
- __u8 idlect_shift;
+ struct omap1_clk clk;
+ unsigned long no_idle_count;
+ __u8 idlect_shift;
};
/* ARM_CKCTL bit shifts */
@@ -224,7 +178,7 @@ struct arm_idlect1_clk {
#define SOFT_REQ_REG2 0xfffe0880
extern __u32 arm_idlect1_mask;
-extern struct clk *api_ck_p, *ck_dpll1_p, *ck_ref_p;
+extern struct omap1_clk *api_ck_p, *ck_dpll1_p, *ck_ref_p;
extern const struct clkops clkops_dspck;
extern const struct clkops clkops_uart_16xx;
@@ -233,4 +187,9 @@ extern const struct clkops clkops_generic;
/* used for passing SoC type to omap1_{select,round_to}_table_rate() */
extern u32 cpu_mask;
+extern const struct clk_ops omap1_clk_null_ops;
+extern const struct clk_ops omap1_clk_gate_ops;
+extern const struct clk_ops omap1_clk_rate_ops;
+extern const struct clk_ops omap1_clk_full_ops;
+
#endif
diff --git a/arch/arm/mach-omap1/clock_data.c b/arch/arm/mach-omap1/clock_data.c
index 165b6a75a59b..96d846c37c43 100644
--- a/arch/arm/mach-omap1/clock_data.c
+++ b/arch/arm/mach-omap1/clock_data.c
@@ -14,6 +14,8 @@
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
#include <linux/cpufreq.h>
#include <linux/delay.h>
#include <linux/soc/ti/omap1-io.h>
@@ -71,16 +73,18 @@
* Omap1 clocks
*/
-static struct clk ck_ref = {
- .name = "ck_ref",
- .ops = &clkops_null,
+static struct omap1_clk ck_ref = {
+ .hw.init = CLK_HW_INIT_NO_PARENT("ck_ref", &omap1_clk_rate_ops, 0),
.rate = 12000000,
};
-static struct clk ck_dpll1 = {
- .name = "ck_dpll1",
- .ops = &clkops_null,
- .parent = &ck_ref,
+static struct omap1_clk ck_dpll1 = {
+ .hw.init = CLK_HW_INIT("ck_dpll1", "ck_ref", &omap1_clk_rate_ops,
+ /*
+ * force recursive refresh of rates of the clock
+ * and its children when clk_get_rate() is called
+ */
+ CLK_GET_RATE_NOCACHE),
};
/*
@@ -89,32 +93,28 @@ static struct clk ck_dpll1 = {
*/
static struct arm_idlect1_clk ck_dpll1out = {
.clk = {
- .name = "ck_dpll1out",
+ .hw.init = CLK_HW_INIT("ck_dpll1out", "ck_dpll1", &omap1_clk_gate_ops, 0),
.ops = &clkops_generic,
- .parent = &ck_dpll1,
.flags = CLOCK_IDLE_CONTROL | ENABLE_REG_32BIT,
.enable_reg = OMAP1_IO_ADDRESS(ARM_IDLECT2),
.enable_bit = EN_CKOUT_ARM,
- .recalc = &followparent_recalc,
},
.idlect_shift = IDL_CLKOUT_ARM_SHIFT,
};
-static struct clk sossi_ck = {
- .name = "ck_sossi",
+static struct omap1_clk sossi_ck = {
+ .hw.init = CLK_HW_INIT("ck_sossi", "ck_dpll1out", &omap1_clk_full_ops, 0),
.ops = &clkops_generic,
- .parent = &ck_dpll1out.clk,
.flags = CLOCK_NO_IDLE_PARENT | ENABLE_REG_32BIT,
.enable_reg = OMAP1_IO_ADDRESS(MOD_CONF_CTRL_1),
.enable_bit = CONF_MOD_SOSSI_CLK_EN_R,
.recalc = &omap1_sossi_recalc,
+ .round_rate = &omap1_round_sossi_rate,
.set_rate = &omap1_set_sossi_rate,
};
-static struct clk arm_ck = {
- .name = "arm_ck",
- .ops = &clkops_null,
- .parent = &ck_dpll1,
+static struct omap1_clk arm_ck = {
+ .hw.init = CLK_HW_INIT("arm_ck", "ck_dpll1", &omap1_clk_rate_ops, 0),
.rate_offset = CKCTL_ARMDIV_OFFSET,
.recalc = &omap1_ckctl_recalc,
.round_rate = omap1_clk_round_rate_ckctl_arm,
@@ -123,9 +123,9 @@ static struct clk arm_ck = {
static struct arm_idlect1_clk armper_ck = {
.clk = {
- .name = "armper_ck",
+ .hw.init = CLK_HW_INIT("armper_ck", "ck_dpll1", &omap1_clk_full_ops,
+ CLK_IS_CRITICAL),
.ops = &clkops_generic,
- .parent = &ck_dpll1,
.flags = CLOCK_IDLE_CONTROL,
.enable_reg = OMAP1_IO_ADDRESS(ARM_IDLECT2),
.enable_bit = EN_PERCK,
@@ -141,46 +141,41 @@ static struct arm_idlect1_clk armper_ck = {
* FIXME: This clock seems to be necessary but no-one has asked for its
* activation. [ GPIO code for 1510 ]
*/
-static struct clk arm_gpio_ck = {
- .name = "ick",
+static struct omap1_clk arm_gpio_ck = {
+ .hw.init = CLK_HW_INIT("ick", "ck_dpll1", &omap1_clk_gate_ops, CLK_IS_CRITICAL),
.ops = &clkops_generic,
- .parent = &ck_dpll1,
.enable_reg = OMAP1_IO_ADDRESS(ARM_IDLECT2),
.enable_bit = EN_GPIOCK,
- .recalc = &followparent_recalc,
};
static struct arm_idlect1_clk armxor_ck = {
.clk = {
- .name = "armxor_ck",
+ .hw.init = CLK_HW_INIT("armxor_ck", "ck_ref", &omap1_clk_gate_ops,
+ CLK_IS_CRITICAL),
.ops = &clkops_generic,
- .parent = &ck_ref,
.flags = CLOCK_IDLE_CONTROL,
.enable_reg = OMAP1_IO_ADDRESS(ARM_IDLECT2),
.enable_bit = EN_XORPCK,
- .recalc = &followparent_recalc,
},
.idlect_shift = IDLXORP_ARM_SHIFT,
};
static struct arm_idlect1_clk armtim_ck = {
.clk = {
- .name = "armtim_ck",
+ .hw.init = CLK_HW_INIT("armtim_ck", "ck_ref", &omap1_clk_gate_ops,
+ CLK_IS_CRITICAL),
.ops = &clkops_generic,
- .parent = &ck_ref,
.flags = CLOCK_IDLE_CONTROL,
.enable_reg = OMAP1_IO_ADDRESS(ARM_IDLECT2),
.enable_bit = EN_TIMCK,
- .recalc = &followparent_recalc,
},
.idlect_shift = IDLTIM_ARM_SHIFT,
};
static struct arm_idlect1_clk armwdt_ck = {
.clk = {
- .name = "armwdt_ck",
+ .hw.init = CLK_HW_INIT("armwdt_ck", "ck_ref", &omap1_clk_full_ops, 0),
.ops = &clkops_generic,
- .parent = &ck_ref,
.flags = CLOCK_IDLE_CONTROL,
.enable_reg = OMAP1_IO_ADDRESS(ARM_IDLECT2),
.enable_bit = EN_WDTCK,
@@ -190,11 +185,8 @@ static struct arm_idlect1_clk armwdt_ck = {
.idlect_shift = IDLWDT_ARM_SHIFT,
};
-static struct clk arminth_ck16xx = {
- .name = "arminth_ck",
- .ops = &clkops_null,
- .parent = &arm_ck,
- .recalc = &followparent_recalc,
+static struct omap1_clk arminth_ck16xx = {
+ .hw.init = CLK_HW_INIT("arminth_ck", "arm_ck", &omap1_clk_null_ops, 0),
/* Note: On 16xx the frequency can be divided by 2 by programming
* ARM_CKCTL:ARM_INTHCK_SEL(14) to 1
*
@@ -202,10 +194,9 @@ static struct clk arminth_ck16xx = {
*/
};
-static struct clk dsp_ck = {
- .name = "dsp_ck",
+static struct omap1_clk dsp_ck = {
+ .hw.init = CLK_HW_INIT("dsp_ck", "ck_dpll1", &omap1_clk_full_ops, 0),
.ops = &clkops_generic,
- .parent = &ck_dpll1,
.enable_reg = OMAP1_IO_ADDRESS(ARM_CKCTL),
.enable_bit = EN_DSPCK,
.rate_offset = CKCTL_DSPDIV_OFFSET,
@@ -214,20 +205,17 @@ static struct clk dsp_ck = {
.set_rate = omap1_clk_set_rate_ckctl_arm,
};
-static struct clk dspmmu_ck = {
- .name = "dspmmu_ck",
- .ops = &clkops_null,
- .parent = &ck_dpll1,
+static struct omap1_clk dspmmu_ck = {
+ .hw.init = CLK_HW_INIT("dspmmu_ck", "ck_dpll1", &omap1_clk_rate_ops, 0),
.rate_offset = CKCTL_DSPMMUDIV_OFFSET,
.recalc = &omap1_ckctl_recalc,
.round_rate = omap1_clk_round_rate_ckctl_arm,
.set_rate = omap1_clk_set_rate_ckctl_arm,
};
-static struct clk dspper_ck = {
- .name = "dspper_ck",
+static struct omap1_clk dspper_ck = {
+ .hw.init = CLK_HW_INIT("dspper_ck", "ck_dpll1", &omap1_clk_full_ops, 0),
.ops = &clkops_dspck,
- .parent = &ck_dpll1,
.enable_reg = DSP_IDLECT2,
.enable_bit = EN_PERCK,
.rate_offset = CKCTL_PERDIV_OFFSET,
@@ -236,29 +224,23 @@ static struct clk dspper_ck = {
.set_rate = &omap1_clk_set_rate_dsp_domain,
};
-static struct clk dspxor_ck = {
- .name = "dspxor_ck",
+static struct omap1_clk dspxor_ck = {
+ .hw.init = CLK_HW_INIT("dspxor_ck", "ck_ref", &omap1_clk_gate_ops, 0),
.ops = &clkops_dspck,
- .parent = &ck_ref,
.enable_reg = DSP_IDLECT2,
.enable_bit = EN_XORPCK,
- .recalc = &followparent_recalc,
};
-static struct clk dsptim_ck = {
- .name = "dsptim_ck",
+static struct omap1_clk dsptim_ck = {
+ .hw.init = CLK_HW_INIT("dsptim_ck", "ck_ref", &omap1_clk_gate_ops, 0),
.ops = &clkops_dspck,
- .parent = &ck_ref,
.enable_reg = DSP_IDLECT2,
.enable_bit = EN_DSPTIMCK,
- .recalc = &followparent_recalc,
};
static struct arm_idlect1_clk tc_ck = {
.clk = {
- .name = "tc_ck",
- .ops = &clkops_null,
- .parent = &ck_dpll1,
+ .hw.init = CLK_HW_INIT("tc_ck", "ck_dpll1", &omap1_clk_rate_ops, 0),
.flags = CLOCK_IDLE_CONTROL,
.rate_offset = CKCTL_TCDIV_OFFSET,
.recalc = &omap1_ckctl_recalc,
@@ -268,116 +250,88 @@ static struct arm_idlect1_clk tc_ck = {
.idlect_shift = IDLIF_ARM_SHIFT,
};
-static struct clk arminth_ck1510 = {
- .name = "arminth_ck",
- .ops = &clkops_null,
- .parent = &tc_ck.clk,
- .recalc = &followparent_recalc,
+static struct omap1_clk arminth_ck1510 = {
+ .hw.init = CLK_HW_INIT("arminth_ck", "tc_ck", &omap1_clk_null_ops, 0),
/* Note: On 1510 the frequency follows TC_CK
*
* 16xx version is in MPU clocks.
*/
};
-static struct clk tipb_ck = {
+static struct omap1_clk tipb_ck = {
/* No-idle controlled by "tc_ck" */
- .name = "tipb_ck",
- .ops = &clkops_null,
- .parent = &tc_ck.clk,
- .recalc = &followparent_recalc,
+ .hw.init = CLK_HW_INIT("tipb_ck", "tc_ck", &omap1_clk_null_ops, 0),
};
-static struct clk l3_ocpi_ck = {
+static struct omap1_clk l3_ocpi_ck = {
/* No-idle controlled by "tc_ck" */
- .name = "l3_ocpi_ck",
+ .hw.init = CLK_HW_INIT("l3_ocpi_ck", "tc_ck", &omap1_clk_gate_ops, 0),
.ops = &clkops_generic,
- .parent = &tc_ck.clk,
.enable_reg = OMAP1_IO_ADDRESS(ARM_IDLECT3),
.enable_bit = EN_OCPI_CK,
- .recalc = &followparent_recalc,
};
-static struct clk tc1_ck = {
- .name = "tc1_ck",
+static struct omap1_clk tc1_ck = {
+ .hw.init = CLK_HW_INIT("tc1_ck", "tc_ck", &omap1_clk_gate_ops, 0),
.ops = &clkops_generic,
- .parent = &tc_ck.clk,
.enable_reg = OMAP1_IO_ADDRESS(ARM_IDLECT3),
.enable_bit = EN_TC1_CK,
- .recalc = &followparent_recalc,
};
/*
* FIXME: This clock seems to be necessary but no-one has asked for its
* activation. [ pm.c (SRAM), CCP, Camera ]
*/
-static struct clk tc2_ck = {
- .name = "tc2_ck",
+
+static struct omap1_clk tc2_ck = {
+ .hw.init = CLK_HW_INIT("tc2_ck", "tc_ck", &omap1_clk_gate_ops, CLK_IS_CRITICAL),
.ops = &clkops_generic,
- .parent = &tc_ck.clk,
.enable_reg = OMAP1_IO_ADDRESS(ARM_IDLECT3),
.enable_bit = EN_TC2_CK,
- .recalc = &followparent_recalc,
};
-static struct clk dma_ck = {
+static struct omap1_clk dma_ck = {
/* No-idle controlled by "tc_ck" */
- .name = "dma_ck",
- .ops = &clkops_null,
- .parent = &tc_ck.clk,
- .recalc = &followparent_recalc,
+ .hw.init = CLK_HW_INIT("dma_ck", "tc_ck", &omap1_clk_null_ops, 0),
};
-static struct clk dma_lcdfree_ck = {
- .name = "dma_lcdfree_ck",
- .ops = &clkops_null,
- .parent = &tc_ck.clk,
- .recalc = &followparent_recalc,
+static struct omap1_clk dma_lcdfree_ck = {
+ .hw.init = CLK_HW_INIT("dma_lcdfree_ck", "tc_ck", &omap1_clk_null_ops, 0),
};
static struct arm_idlect1_clk api_ck = {
.clk = {
- .name = "api_ck",
+ .hw.init = CLK_HW_INIT("api_ck", "tc_ck", &omap1_clk_gate_ops, 0),
.ops = &clkops_generic,
- .parent = &tc_ck.clk,
.flags = CLOCK_IDLE_CONTROL,
.enable_reg = OMAP1_IO_ADDRESS(ARM_IDLECT2),
.enable_bit = EN_APICK,
- .recalc = &followparent_recalc,
},
.idlect_shift = IDLAPI_ARM_SHIFT,
};
static struct arm_idlect1_clk lb_ck = {
.clk = {
- .name = "lb_ck",
+ .hw.init = CLK_HW_INIT("lb_ck", "tc_ck", &omap1_clk_gate_ops, 0),
.ops = &clkops_generic,
- .parent = &tc_ck.clk,
.flags = CLOCK_IDLE_CONTROL,
.enable_reg = OMAP1_IO_ADDRESS(ARM_IDLECT2),
.enable_bit = EN_LBCK,
- .recalc = &followparent_recalc,
},
.idlect_shift = IDLLB_ARM_SHIFT,
};
-static struct clk rhea1_ck = {
- .name = "rhea1_ck",
- .ops = &clkops_null,
- .parent = &tc_ck.clk,
- .recalc = &followparent_recalc,
+static struct omap1_clk rhea1_ck = {
+ .hw.init = CLK_HW_INIT("rhea1_ck", "tc_ck", &omap1_clk_null_ops, 0),
};
-static struct clk rhea2_ck = {
- .name = "rhea2_ck",
- .ops = &clkops_null,
- .parent = &tc_ck.clk,
- .recalc = &followparent_recalc,
+static struct omap1_clk rhea2_ck = {
+ .hw.init = CLK_HW_INIT("rhea2_ck", "tc_ck", &omap1_clk_null_ops, 0),
};
-static struct clk lcd_ck_16xx = {
- .name = "lcd_ck",
+static struct omap1_clk lcd_ck_16xx = {
+ .hw.init = CLK_HW_INIT("lcd_ck", "ck_dpll1", &omap1_clk_full_ops, 0),
.ops = &clkops_generic,
- .parent = &ck_dpll1,
.enable_reg = OMAP1_IO_ADDRESS(ARM_IDLECT2),
.enable_bit = EN_LCDCK,
.rate_offset = CKCTL_LCDDIV_OFFSET,
@@ -388,9 +342,8 @@ static struct clk lcd_ck_16xx = {
static struct arm_idlect1_clk lcd_ck_1510 = {
.clk = {
- .name = "lcd_ck",
+ .hw.init = CLK_HW_INIT("lcd_ck", "ck_dpll1", &omap1_clk_full_ops, 0),
.ops = &clkops_generic,
- .parent = &ck_dpll1,
.flags = CLOCK_IDLE_CONTROL,
.enable_reg = OMAP1_IO_ADDRESS(ARM_IDLECT2),
.enable_bit = EN_LCDCK,
@@ -402,37 +355,35 @@ static struct arm_idlect1_clk lcd_ck_1510 = {
.idlect_shift = OMAP1510_IDLLCD_ARM_SHIFT,
};
+
/*
* XXX The enable_bit here is misused - it simply switches between 12MHz
- * and 48MHz. Reimplement with clksel.
+ * and 48MHz. Reimplement with clk_mux.
*
* XXX does this need SYSC register handling?
*/
-static struct clk uart1_1510 = {
- .name = "uart1_ck",
- .ops = &clkops_null,
+static struct omap1_clk uart1_1510 = {
/* Direct from ULPD, no real parent */
- .parent = &armper_ck.clk,
- .rate = 12000000,
+ .hw.init = CLK_HW_INIT("uart1_ck", "armper_ck", &omap1_clk_full_ops, 0),
.flags = ENABLE_REG_32BIT | CLOCK_NO_IDLE_PARENT,
.enable_reg = OMAP1_IO_ADDRESS(MOD_CONF_CTRL_0),
.enable_bit = CONF_MOD_UART1_CLK_MODE_R,
+ .round_rate = &omap1_round_uart_rate,
.set_rate = &omap1_set_uart_rate,
.recalc = &omap1_uart_recalc,
};
/*
* XXX The enable_bit here is misused - it simply switches between 12MHz
- * and 48MHz. Reimplement with clksel.
+ * and 48MHz. Reimplement with clk_mux.
*
* XXX SYSC register handling does not belong in the clock framework
*/
static struct uart_clk uart1_16xx = {
.clk = {
- .name = "uart1_ck",
.ops = &clkops_uart_16xx,
/* Direct from ULPD, no real parent */
- .parent = &armper_ck.clk,
+ .hw.init = CLK_HW_INIT("uart1_ck", "armper_ck", &omap1_clk_full_ops, 0),
.rate = 48000000,
.flags = ENABLE_REG_32BIT | CLOCK_NO_IDLE_PARENT,
.enable_reg = OMAP1_IO_ADDRESS(MOD_CONF_CTRL_0),
@@ -443,54 +394,49 @@ static struct uart_clk uart1_16xx = {
/*
* XXX The enable_bit here is misused - it simply switches between 12MHz
- * and 48MHz. Reimplement with clksel.
+ * and 48MHz. Reimplement with clk_mux.
*
* XXX does this need SYSC register handling?
*/
-static struct clk uart2_ck = {
- .name = "uart2_ck",
- .ops = &clkops_null,
+static struct omap1_clk uart2_ck = {
/* Direct from ULPD, no real parent */
- .parent = &armper_ck.clk,
- .rate = 12000000,
+ .hw.init = CLK_HW_INIT("uart2_ck", "armper_ck", &omap1_clk_full_ops, 0),
.flags = ENABLE_REG_32BIT | CLOCK_NO_IDLE_PARENT,
.enable_reg = OMAP1_IO_ADDRESS(MOD_CONF_CTRL_0),
.enable_bit = CONF_MOD_UART2_CLK_MODE_R,
+ .round_rate = &omap1_round_uart_rate,
.set_rate = &omap1_set_uart_rate,
.recalc = &omap1_uart_recalc,
};
/*
* XXX The enable_bit here is misused - it simply switches between 12MHz
- * and 48MHz. Reimplement with clksel.
+ * and 48MHz. Reimplement with clk_mux.
*
* XXX does this need SYSC register handling?
*/
-static struct clk uart3_1510 = {
- .name = "uart3_ck",
- .ops = &clkops_null,
+static struct omap1_clk uart3_1510 = {
/* Direct from ULPD, no real parent */
- .parent = &armper_ck.clk,
- .rate = 12000000,
+ .hw.init = CLK_HW_INIT("uart3_ck", "armper_ck", &omap1_clk_full_ops, 0),
.flags = ENABLE_REG_32BIT | CLOCK_NO_IDLE_PARENT,
.enable_reg = OMAP1_IO_ADDRESS(MOD_CONF_CTRL_0),
.enable_bit = CONF_MOD_UART3_CLK_MODE_R,
+ .round_rate = &omap1_round_uart_rate,
.set_rate = &omap1_set_uart_rate,
.recalc = &omap1_uart_recalc,
};
/*
* XXX The enable_bit here is misused - it simply switches between 12MHz
- * and 48MHz. Reimplement with clksel.
+ * and 48MHz. Reimplement with clk_mux.
*
* XXX SYSC register handling does not belong in the clock framework
*/
static struct uart_clk uart3_16xx = {
.clk = {
- .name = "uart3_ck",
.ops = &clkops_uart_16xx,
/* Direct from ULPD, no real parent */
- .parent = &armper_ck.clk,
+ .hw.init = CLK_HW_INIT("uart3_ck", "armper_ck", &omap1_clk_full_ops, 0),
.rate = 48000000,
.flags = ENABLE_REG_32BIT | CLOCK_NO_IDLE_PARENT,
.enable_reg = OMAP1_IO_ADDRESS(MOD_CONF_CTRL_0),
@@ -499,30 +445,30 @@ static struct uart_clk uart3_16xx = {
.sysc_addr = 0xfffb9854,
};
-static struct clk usb_clko = { /* 6 MHz output on W4_USB_CLKO */
- .name = "usb_clko",
+static struct omap1_clk usb_clko = { /* 6 MHz output on W4_USB_CLKO */
.ops = &clkops_generic,
/* Direct from ULPD, no parent */
+ .hw.init = CLK_HW_INIT_NO_PARENT("usb_clko", &omap1_clk_full_ops, 0),
.rate = 6000000,
.flags = ENABLE_REG_32BIT,
.enable_reg = OMAP1_IO_ADDRESS(ULPD_CLOCK_CTRL),
.enable_bit = USB_MCLK_EN_BIT,
};
-static struct clk usb_hhc_ck1510 = {
- .name = "usb_hhc_ck",
+static struct omap1_clk usb_hhc_ck1510 = {
.ops = &clkops_generic,
/* Direct from ULPD, no parent */
+ .hw.init = CLK_HW_INIT_NO_PARENT("usb_hhc_ck", &omap1_clk_full_ops, 0),
.rate = 48000000, /* Actually 2 clocks, 12MHz and 48MHz */
.flags = ENABLE_REG_32BIT,
.enable_reg = OMAP1_IO_ADDRESS(MOD_CONF_CTRL_0),
.enable_bit = USB_HOST_HHC_UHOST_EN,
};
-static struct clk usb_hhc_ck16xx = {
- .name = "usb_hhc_ck",
+static struct omap1_clk usb_hhc_ck16xx = {
.ops = &clkops_generic,
/* Direct from ULPD, no parent */
+ .hw.init = CLK_HW_INIT_NO_PARENT("usb_hhc_ck", &omap1_clk_full_ops, 0),
.rate = 48000000,
/* OTG_SYSCON_2.OTG_PADEN == 0 (not 1510-compatible) */
.flags = ENABLE_REG_32BIT,
@@ -530,46 +476,46 @@ static struct clk usb_hhc_ck16xx = {
.enable_bit = OTG_SYSCON_2_UHOST_EN_SHIFT
};
-static struct clk usb_dc_ck = {
- .name = "usb_dc_ck",
+static struct omap1_clk usb_dc_ck = {
.ops = &clkops_generic,
/* Direct from ULPD, no parent */
+ .hw.init = CLK_HW_INIT_NO_PARENT("usb_dc_ck", &omap1_clk_full_ops, 0),
.rate = 48000000,
.enable_reg = OMAP1_IO_ADDRESS(SOFT_REQ_REG),
.enable_bit = SOFT_USB_OTG_DPLL_REQ_SHIFT,
};
-static struct clk uart1_7xx = {
- .name = "uart1_ck",
+static struct omap1_clk uart1_7xx = {
.ops = &clkops_generic,
/* Direct from ULPD, no parent */
+ .hw.init = CLK_HW_INIT_NO_PARENT("uart1_ck", &omap1_clk_full_ops, 0),
.rate = 12000000,
.enable_reg = OMAP1_IO_ADDRESS(SOFT_REQ_REG),
.enable_bit = 9,
};
-static struct clk uart2_7xx = {
- .name = "uart2_ck",
+static struct omap1_clk uart2_7xx = {
.ops = &clkops_generic,
/* Direct from ULPD, no parent */
+ .hw.init = CLK_HW_INIT_NO_PARENT("uart2_ck", &omap1_clk_full_ops, 0),
.rate = 12000000,
.enable_reg = OMAP1_IO_ADDRESS(SOFT_REQ_REG),
.enable_bit = 11,
};
-static struct clk mclk_1510 = {
- .name = "mclk",
+static struct omap1_clk mclk_1510 = {
.ops = &clkops_generic,
/* Direct from ULPD, no parent. May be enabled by ext hardware. */
+ .hw.init = CLK_HW_INIT_NO_PARENT("mclk", &omap1_clk_full_ops, 0),
.rate = 12000000,
.enable_reg = OMAP1_IO_ADDRESS(SOFT_REQ_REG),
.enable_bit = SOFT_COM_MCKO_REQ_SHIFT,
};
-static struct clk mclk_16xx = {
- .name = "mclk",
+static struct omap1_clk mclk_16xx = {
.ops = &clkops_generic,
/* Direct from ULPD, no parent. May be enabled by ext hardware. */
+ .hw.init = CLK_HW_INIT_NO_PARENT("mclk", &omap1_clk_full_ops, 0),
.enable_reg = OMAP1_IO_ADDRESS(COM_CLK_DIV_CTRL_SEL),
.enable_bit = COM_ULPD_PLL_CLK_REQ,
.set_rate = &omap1_set_ext_clk_rate,
@@ -577,17 +523,16 @@ static struct clk mclk_16xx = {
.init = &omap1_init_ext_clk,
};
-static struct clk bclk_1510 = {
- .name = "bclk",
- .ops = &clkops_generic,
+static struct omap1_clk bclk_1510 = {
/* Direct from ULPD, no parent. May be enabled by ext hardware. */
+ .hw.init = CLK_HW_INIT_NO_PARENT("bclk", &omap1_clk_rate_ops, 0),
.rate = 12000000,
};
-static struct clk bclk_16xx = {
- .name = "bclk",
+static struct omap1_clk bclk_16xx = {
.ops = &clkops_generic,
/* Direct from ULPD, no parent. May be enabled by ext hardware. */
+ .hw.init = CLK_HW_INIT_NO_PARENT("bclk", &omap1_clk_full_ops, 0),
.enable_reg = OMAP1_IO_ADDRESS(SWD_CLK_DIV_CTRL_SEL),
.enable_bit = SWD_ULPD_PLL_CLK_REQ,
.set_rate = &omap1_set_ext_clk_rate,
@@ -595,11 +540,10 @@ static struct clk bclk_16xx = {
.init = &omap1_init_ext_clk,
};
-static struct clk mmc1_ck = {
- .name = "mmc1_ck",
+static struct omap1_clk mmc1_ck = {
.ops = &clkops_generic,
/* Functional clock is direct from ULPD, interface clock is ARMPER */
- .parent = &armper_ck.clk,
+ .hw.init = CLK_HW_INIT("mmc1_ck", "armper_ck", &omap1_clk_full_ops, 0),
.rate = 48000000,
.flags = ENABLE_REG_32BIT | CLOCK_NO_IDLE_PARENT,
.enable_reg = OMAP1_IO_ADDRESS(MOD_CONF_CTRL_0),
@@ -610,32 +554,29 @@ static struct clk mmc1_ck = {
* XXX MOD_CONF_CTRL_0 bit 20 is defined in the 1510 TRM as
* CONF_MOD_MCBSP3_AUXON ??
*/
-static struct clk mmc2_ck = {
- .name = "mmc2_ck",
+static struct omap1_clk mmc2_ck = {
.ops = &clkops_generic,
/* Functional clock is direct from ULPD, interface clock is ARMPER */
- .parent = &armper_ck.clk,
+ .hw.init = CLK_HW_INIT("mmc2_ck", "armper_ck", &omap1_clk_full_ops, 0),
.rate = 48000000,
.flags = ENABLE_REG_32BIT | CLOCK_NO_IDLE_PARENT,
.enable_reg = OMAP1_IO_ADDRESS(MOD_CONF_CTRL_0),
.enable_bit = 20,
};
-static struct clk mmc3_ck = {
- .name = "mmc3_ck",
+static struct omap1_clk mmc3_ck = {
.ops = &clkops_generic,
/* Functional clock is direct from ULPD, interface clock is ARMPER */
- .parent = &armper_ck.clk,
+ .hw.init = CLK_HW_INIT("mmc3_ck", "armper_ck", &omap1_clk_full_ops, 0),
.rate = 48000000,
.flags = ENABLE_REG_32BIT | CLOCK_NO_IDLE_PARENT,
.enable_reg = OMAP1_IO_ADDRESS(SOFT_REQ_REG),
.enable_bit = SOFT_MMC_DPLL_REQ_SHIFT,
};
-static struct clk virtual_ck_mpu = {
- .name = "mpu",
- .ops = &clkops_null,
- .parent = &arm_ck, /* Is smarter alias for */
+static struct omap1_clk virtual_ck_mpu = {
+ /* Is smarter alias for arm_ck */
+ .hw.init = CLK_HW_INIT("mpu", "arm_ck", &omap1_clk_rate_ops, 0),
.recalc = &followparent_recalc,
.set_rate = &omap1_select_table_rate,
.round_rate = &omap1_round_to_table_rate,
@@ -643,20 +584,14 @@ static struct clk virtual_ck_mpu = {
/* virtual functional clock domain for I2C. Just for making sure that ARMXOR_CK
remains active during MPU idle whenever this is enabled */
-static struct clk i2c_fck = {
- .name = "i2c_fck",
- .ops = &clkops_null,
+static struct omap1_clk i2c_fck = {
+ .hw.init = CLK_HW_INIT("i2c_fck", "armxor_ck", &omap1_clk_gate_ops, 0),
.flags = CLOCK_NO_IDLE_PARENT,
- .parent = &armxor_ck.clk,
- .recalc = &followparent_recalc,
};
-static struct clk i2c_ick = {
- .name = "i2c_ick",
- .ops = &clkops_null,
+static struct omap1_clk i2c_ick = {
+ .hw.init = CLK_HW_INIT("i2c_ick", "armper_ck", &omap1_clk_gate_ops, 0),
.flags = CLOCK_NO_IDLE_PARENT,
- .parent = &armper_ck.clk,
- .recalc = &followparent_recalc,
};
/*
@@ -665,81 +600,81 @@ static struct clk i2c_ick = {
static struct omap_clk omap_clks[] = {
/* non-ULPD clocks */
- CLK(NULL, "ck_ref", &ck_ref, CK_16XX | CK_1510 | CK_310 | CK_7XX),
- CLK(NULL, "ck_dpll1", &ck_dpll1, CK_16XX | CK_1510 | CK_310 | CK_7XX),
+ CLK(NULL, "ck_ref", &ck_ref.hw, CK_16XX | CK_1510 | CK_310 | CK_7XX),
+ CLK(NULL, "ck_dpll1", &ck_dpll1.hw, CK_16XX | CK_1510 | CK_310 | CK_7XX),
/* CK_GEN1 clocks */
- CLK(NULL, "ck_dpll1out", &ck_dpll1out.clk, CK_16XX),
- CLK(NULL, "ck_sossi", &sossi_ck, CK_16XX),
- CLK(NULL, "arm_ck", &arm_ck, CK_16XX | CK_1510 | CK_310),
- CLK(NULL, "armper_ck", &armper_ck.clk, CK_16XX | CK_1510 | CK_310),
- CLK("omap_gpio.0", "ick", &arm_gpio_ck, CK_1510 | CK_310),
- CLK(NULL, "armxor_ck", &armxor_ck.clk, CK_16XX | CK_1510 | CK_310 | CK_7XX),
- CLK(NULL, "armtim_ck", &armtim_ck.clk, CK_16XX | CK_1510 | CK_310),
- CLK("omap_wdt", "fck", &armwdt_ck.clk, CK_16XX | CK_1510 | CK_310),
- CLK("omap_wdt", "ick", &armper_ck.clk, CK_16XX),
- CLK("omap_wdt", "ick", &dummy_ck, CK_1510 | CK_310),
- CLK(NULL, "arminth_ck", &arminth_ck1510, CK_1510 | CK_310),
- CLK(NULL, "arminth_ck", &arminth_ck16xx, CK_16XX),
+ CLK(NULL, "ck_dpll1out", &ck_dpll1out.clk.hw, CK_16XX),
+ CLK(NULL, "ck_sossi", &sossi_ck.hw, CK_16XX),
+ CLK(NULL, "arm_ck", &arm_ck.hw, CK_16XX | CK_1510 | CK_310),
+ CLK(NULL, "armper_ck", &armper_ck.clk.hw, CK_16XX | CK_1510 | CK_310),
+ CLK("omap_gpio.0", "ick", &arm_gpio_ck.hw, CK_1510 | CK_310),
+ CLK(NULL, "armxor_ck", &armxor_ck.clk.hw, CK_16XX | CK_1510 | CK_310 | CK_7XX),
+ CLK(NULL, "armtim_ck", &armtim_ck.clk.hw, CK_16XX | CK_1510 | CK_310),
+ CLK("omap_wdt", "fck", &armwdt_ck.clk.hw, CK_16XX | CK_1510 | CK_310),
+ CLK("omap_wdt", "ick", &armper_ck.clk.hw, CK_16XX),
+ CLK("omap_wdt", "ick", &dummy_ck.hw, CK_1510 | CK_310),
+ CLK(NULL, "arminth_ck", &arminth_ck1510.hw, CK_1510 | CK_310),
+ CLK(NULL, "arminth_ck", &arminth_ck16xx.hw, CK_16XX),
/* CK_GEN2 clocks */
- CLK(NULL, "dsp_ck", &dsp_ck, CK_16XX | CK_1510 | CK_310),
- CLK(NULL, "dspmmu_ck", &dspmmu_ck, CK_16XX | CK_1510 | CK_310),
- CLK(NULL, "dspper_ck", &dspper_ck, CK_16XX | CK_1510 | CK_310),
- CLK(NULL, "dspxor_ck", &dspxor_ck, CK_16XX | CK_1510 | CK_310),
- CLK(NULL, "dsptim_ck", &dsptim_ck, CK_16XX | CK_1510 | CK_310),
+ CLK(NULL, "dsp_ck", &dsp_ck.hw, CK_16XX | CK_1510 | CK_310),
+ CLK(NULL, "dspmmu_ck", &dspmmu_ck.hw, CK_16XX | CK_1510 | CK_310),
+ CLK(NULL, "dspper_ck", &dspper_ck.hw, CK_16XX | CK_1510 | CK_310),
+ CLK(NULL, "dspxor_ck", &dspxor_ck.hw, CK_16XX | CK_1510 | CK_310),
+ CLK(NULL, "dsptim_ck", &dsptim_ck.hw, CK_16XX | CK_1510 | CK_310),
/* CK_GEN3 clocks */
- CLK(NULL, "tc_ck", &tc_ck.clk, CK_16XX | CK_1510 | CK_310 | CK_7XX),
- CLK(NULL, "tipb_ck", &tipb_ck, CK_1510 | CK_310),
- CLK(NULL, "l3_ocpi_ck", &l3_ocpi_ck, CK_16XX | CK_7XX),
- CLK(NULL, "tc1_ck", &tc1_ck, CK_16XX),
- CLK(NULL, "tc2_ck", &tc2_ck, CK_16XX),
- CLK(NULL, "dma_ck", &dma_ck, CK_16XX | CK_1510 | CK_310),
- CLK(NULL, "dma_lcdfree_ck", &dma_lcdfree_ck, CK_16XX),
- CLK(NULL, "api_ck", &api_ck.clk, CK_16XX | CK_1510 | CK_310 | CK_7XX),
- CLK(NULL, "lb_ck", &lb_ck.clk, CK_1510 | CK_310),
- CLK(NULL, "rhea1_ck", &rhea1_ck, CK_16XX),
- CLK(NULL, "rhea2_ck", &rhea2_ck, CK_16XX),
- CLK(NULL, "lcd_ck", &lcd_ck_16xx, CK_16XX | CK_7XX),
- CLK(NULL, "lcd_ck", &lcd_ck_1510.clk, CK_1510 | CK_310),
+ CLK(NULL, "tc_ck", &tc_ck.clk.hw, CK_16XX | CK_1510 | CK_310 | CK_7XX),
+ CLK(NULL, "tipb_ck", &tipb_ck.hw, CK_1510 | CK_310),
+ CLK(NULL, "l3_ocpi_ck", &l3_ocpi_ck.hw, CK_16XX | CK_7XX),
+ CLK(NULL, "tc1_ck", &tc1_ck.hw, CK_16XX),
+ CLK(NULL, "tc2_ck", &tc2_ck.hw, CK_16XX),
+ CLK(NULL, "dma_ck", &dma_ck.hw, CK_16XX | CK_1510 | CK_310),
+ CLK(NULL, "dma_lcdfree_ck", &dma_lcdfree_ck.hw, CK_16XX),
+ CLK(NULL, "api_ck", &api_ck.clk.hw, CK_16XX | CK_1510 | CK_310 | CK_7XX),
+ CLK(NULL, "lb_ck", &lb_ck.clk.hw, CK_1510 | CK_310),
+ CLK(NULL, "rhea1_ck", &rhea1_ck.hw, CK_16XX),
+ CLK(NULL, "rhea2_ck", &rhea2_ck.hw, CK_16XX),
+ CLK(NULL, "lcd_ck", &lcd_ck_16xx.hw, CK_16XX | CK_7XX),
+ CLK(NULL, "lcd_ck", &lcd_ck_1510.clk.hw, CK_1510 | CK_310),
/* ULPD clocks */
- CLK(NULL, "uart1_ck", &uart1_1510, CK_1510 | CK_310),
- CLK(NULL, "uart1_ck", &uart1_16xx.clk, CK_16XX),
- CLK(NULL, "uart1_ck", &uart1_7xx, CK_7XX),
- CLK(NULL, "uart2_ck", &uart2_ck, CK_16XX | CK_1510 | CK_310),
- CLK(NULL, "uart2_ck", &uart2_7xx, CK_7XX),
- CLK(NULL, "uart3_ck", &uart3_1510, CK_1510 | CK_310),
- CLK(NULL, "uart3_ck", &uart3_16xx.clk, CK_16XX),
- CLK(NULL, "usb_clko", &usb_clko, CK_16XX | CK_1510 | CK_310),
- CLK(NULL, "usb_hhc_ck", &usb_hhc_ck1510, CK_1510 | CK_310),
- CLK(NULL, "usb_hhc_ck", &usb_hhc_ck16xx, CK_16XX),
- CLK(NULL, "usb_dc_ck", &usb_dc_ck, CK_16XX | CK_7XX),
- CLK(NULL, "mclk", &mclk_1510, CK_1510 | CK_310),
- CLK(NULL, "mclk", &mclk_16xx, CK_16XX),
- CLK(NULL, "bclk", &bclk_1510, CK_1510 | CK_310),
- CLK(NULL, "bclk", &bclk_16xx, CK_16XX),
- CLK("mmci-omap.0", "fck", &mmc1_ck, CK_16XX | CK_1510 | CK_310),
- CLK("mmci-omap.0", "fck", &mmc3_ck, CK_7XX),
- CLK("mmci-omap.0", "ick", &armper_ck.clk, CK_16XX | CK_1510 | CK_310 | CK_7XX),
- CLK("mmci-omap.1", "fck", &mmc2_ck, CK_16XX),
- CLK("mmci-omap.1", "ick", &armper_ck.clk, CK_16XX),
+ CLK(NULL, "uart1_ck", &uart1_1510.hw, CK_1510 | CK_310),
+ CLK(NULL, "uart1_ck", &uart1_16xx.clk.hw, CK_16XX),
+ CLK(NULL, "uart1_ck", &uart1_7xx.hw, CK_7XX),
+ CLK(NULL, "uart2_ck", &uart2_ck.hw, CK_16XX | CK_1510 | CK_310),
+ CLK(NULL, "uart2_ck", &uart2_7xx.hw, CK_7XX),
+ CLK(NULL, "uart3_ck", &uart3_1510.hw, CK_1510 | CK_310),
+ CLK(NULL, "uart3_ck", &uart3_16xx.clk.hw, CK_16XX),
+ CLK(NULL, "usb_clko", &usb_clko.hw, CK_16XX | CK_1510 | CK_310),
+ CLK(NULL, "usb_hhc_ck", &usb_hhc_ck1510.hw, CK_1510 | CK_310),
+ CLK(NULL, "usb_hhc_ck", &usb_hhc_ck16xx.hw, CK_16XX),
+ CLK(NULL, "usb_dc_ck", &usb_dc_ck.hw, CK_16XX | CK_7XX),
+ CLK(NULL, "mclk", &mclk_1510.hw, CK_1510 | CK_310),
+ CLK(NULL, "mclk", &mclk_16xx.hw, CK_16XX),
+ CLK(NULL, "bclk", &bclk_1510.hw, CK_1510 | CK_310),
+ CLK(NULL, "bclk", &bclk_16xx.hw, CK_16XX),
+ CLK("mmci-omap.0", "fck", &mmc1_ck.hw, CK_16XX | CK_1510 | CK_310),
+ CLK("mmci-omap.0", "fck", &mmc3_ck.hw, CK_7XX),
+ CLK("mmci-omap.0", "ick", &armper_ck.clk.hw, CK_16XX | CK_1510 | CK_310 | CK_7XX),
+ CLK("mmci-omap.1", "fck", &mmc2_ck.hw, CK_16XX),
+ CLK("mmci-omap.1", "ick", &armper_ck.clk.hw, CK_16XX),
/* Virtual clocks */
- CLK(NULL, "mpu", &virtual_ck_mpu, CK_16XX | CK_1510 | CK_310),
- CLK("omap_i2c.1", "fck", &i2c_fck, CK_16XX | CK_1510 | CK_310 | CK_7XX),
- CLK("omap_i2c.1", "ick", &i2c_ick, CK_16XX),
- CLK("omap_i2c.1", "ick", &dummy_ck, CK_1510 | CK_310 | CK_7XX),
- CLK("omap1_spi100k.1", "fck", &dummy_ck, CK_7XX),
- CLK("omap1_spi100k.1", "ick", &dummy_ck, CK_7XX),
- CLK("omap1_spi100k.2", "fck", &dummy_ck, CK_7XX),
- CLK("omap1_spi100k.2", "ick", &dummy_ck, CK_7XX),
- CLK("omap_uwire", "fck", &armxor_ck.clk, CK_16XX | CK_1510 | CK_310),
- CLK("omap-mcbsp.1", "ick", &dspper_ck, CK_16XX),
- CLK("omap-mcbsp.1", "ick", &dummy_ck, CK_1510 | CK_310),
- CLK("omap-mcbsp.2", "ick", &armper_ck.clk, CK_16XX),
- CLK("omap-mcbsp.2", "ick", &dummy_ck, CK_1510 | CK_310),
- CLK("omap-mcbsp.3", "ick", &dspper_ck, CK_16XX),
- CLK("omap-mcbsp.3", "ick", &dummy_ck, CK_1510 | CK_310),
- CLK("omap-mcbsp.1", "fck", &dspxor_ck, CK_16XX | CK_1510 | CK_310),
- CLK("omap-mcbsp.2", "fck", &armper_ck.clk, CK_16XX | CK_1510 | CK_310),
- CLK("omap-mcbsp.3", "fck", &dspxor_ck, CK_16XX | CK_1510 | CK_310),
+ CLK(NULL, "mpu", &virtual_ck_mpu.hw, CK_16XX | CK_1510 | CK_310),
+ CLK("omap_i2c.1", "fck", &i2c_fck.hw, CK_16XX | CK_1510 | CK_310 | CK_7XX),
+ CLK("omap_i2c.1", "ick", &i2c_ick.hw, CK_16XX),
+ CLK("omap_i2c.1", "ick", &dummy_ck.hw, CK_1510 | CK_310 | CK_7XX),
+ CLK("omap1_spi100k.1", "fck", &dummy_ck.hw, CK_7XX),
+ CLK("omap1_spi100k.1", "ick", &dummy_ck.hw, CK_7XX),
+ CLK("omap1_spi100k.2", "fck", &dummy_ck.hw, CK_7XX),
+ CLK("omap1_spi100k.2", "ick", &dummy_ck.hw, CK_7XX),
+ CLK("omap_uwire", "fck", &armxor_ck.clk.hw, CK_16XX | CK_1510 | CK_310),
+ CLK("omap-mcbsp.1", "ick", &dspper_ck.hw, CK_16XX),
+ CLK("omap-mcbsp.1", "ick", &dummy_ck.hw, CK_1510 | CK_310),
+ CLK("omap-mcbsp.2", "ick", &armper_ck.clk.hw, CK_16XX),
+ CLK("omap-mcbsp.2", "ick", &dummy_ck.hw, CK_1510 | CK_310),
+ CLK("omap-mcbsp.3", "ick", &dspper_ck.hw, CK_16XX),
+ CLK("omap-mcbsp.3", "ick", &dummy_ck.hw, CK_1510 | CK_310),
+ CLK("omap-mcbsp.1", "fck", &dspxor_ck.hw, CK_16XX | CK_1510 | CK_310),
+ CLK("omap-mcbsp.2", "fck", &armper_ck.clk.hw, CK_16XX | CK_1510 | CK_310),
+ CLK("omap-mcbsp.3", "fck", &dspxor_ck.hw, CK_16XX | CK_1510 | CK_310),
};
/*
@@ -778,9 +713,6 @@ int __init omap1_clk_init(void)
/* By default all idlect1 clocks are allowed to idle */
arm_idlect1_mask = ~0;
- for (c = omap_clks; c < omap_clks + ARRAY_SIZE(omap_clks); c++)
- clk_preinit(c->lk.clk);
-
cpu_mask = 0;
if (cpu_is_omap1710())
cpu_mask |= CK_1710;
@@ -793,16 +725,10 @@ int __init omap1_clk_init(void)
if (cpu_is_omap310())
cpu_mask |= CK_310;
- for (c = omap_clks; c < omap_clks + ARRAY_SIZE(omap_clks); c++)
- if (c->cpu & cpu_mask) {
- clkdev_add(&c->lk);
- clk_register(c->lk.clk);
- }
-
/* Pointers to these clocks are needed by code in clock.c */
- api_ck_p = clk_get(NULL, "api_ck");
- ck_dpll1_p = clk_get(NULL, "ck_dpll1");
- ck_ref_p = clk_get(NULL, "ck_ref");
+ api_ck_p = &api_ck.clk;
+ ck_dpll1_p = &ck_dpll1;
+ ck_ref_p = &ck_ref;
if (cpu_is_omap7xx())
ck_ref.rate = 13000000;
@@ -844,10 +770,7 @@ int __init omap1_clk_init(void)
}
}
}
- propagate_rate(&ck_dpll1);
- /* Cache rates for clocks connected to ck_ref (not dpll1) */
- propagate_rate(&ck_ref);
- omap1_show_rates();
+
if (machine_is_omap_perseus2() || machine_is_omap_fsample()) {
/* Select slicer output as OMAP input clock */
omap_writew(omap_readw(OMAP7XX_PCC_UPLD_CTRL) & ~0x1,
@@ -879,16 +802,28 @@ int __init omap1_clk_init(void)
*/
omap_writew(0x0000, ARM_IDLECT2); /* Turn LCD clock off also */
- /*
- * Only enable those clocks we will need, let the drivers
- * enable other clocks as necessary
- */
- clk_enable(&armper_ck.clk);
- clk_enable(&armxor_ck.clk);
- clk_enable(&armtim_ck.clk); /* This should be done by timer code */
+ for (c = omap_clks; c < omap_clks + ARRAY_SIZE(omap_clks); c++) {
+ if (!(c->cpu & cpu_mask))
+ continue;
+
+ if (c->lk.clk_hw->init) { /* NULL if provider already registered */
+ const struct clk_init_data *init = c->lk.clk_hw->init;
+ const char *name = c->lk.clk_hw->init->name;
+ int err;
+
+ err = clk_hw_register(NULL, c->lk.clk_hw);
+ if (err < 0) {
+ pr_err("failed to register clock \"%s\"! (%d)\n", name, err);
+ /* may be tried again, restore init data */
+ c->lk.clk_hw->init = init;
+ continue;
+ }
+ }
+
+ clk_hw_register_clkdev(c->lk.clk_hw, c->lk.con_id, c->lk.dev_id);
+ }
- if (cpu_is_omap15xx())
- clk_enable(&arm_gpio_ck);
+ omap1_show_rates();
return 0;
}
@@ -900,7 +835,7 @@ void __init omap1_clk_late_init(void)
unsigned long rate = ck_dpll1.rate;
/* Find the highest supported frequency and enable it */
- if (omap1_select_table_rate(&virtual_ck_mpu, ~0)) {
+ if (omap1_select_table_rate(&virtual_ck_mpu, ~0, arm_ck.rate)) {
pr_err("System frequencies not set, using default. Check your config.\n");
/*
* Reprogramming the DPLL is tricky, it must be done from SRAM.
diff --git a/arch/arm/mach-omap1/hardware.h b/arch/arm/mach-omap1/hardware.h
index 738becbd2972..c228234a1ed4 100644
--- a/arch/arm/mach-omap1/hardware.h
+++ b/arch/arm/mach-omap1/hardware.h
@@ -64,7 +64,7 @@ static inline u32 omap_cs3_phys(void)
#define OMAP1_IO_OFFSET 0x00f00000 /* Virtual IO = 0xff0b0000 */
#define OMAP1_IO_ADDRESS(pa) IOMEM((pa) - OMAP1_IO_OFFSET)
-#include <mach/serial.h>
+#include "serial.h"
/*
* ---------------------------------------------------------------------------
diff --git a/arch/arm/mach-omap1/include/mach/uncompress.h b/arch/arm/mach-omap1/include/mach/uncompress.h
deleted file mode 100644
index 9cca6a56788f..000000000000
--- a/arch/arm/mach-omap1/include/mach/uncompress.h
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * arch/arm/plat-omap/include/mach/uncompress.h
- *
- * Serial port stubs for kernel decompress status messages
- *
- * Initially based on:
- * linux-2.4.15-rmk1-dsplinux1.6/arch/arm/plat-omap/include/mach1510/uncompress.h
- * Copyright (C) 2000 RidgeRun, Inc.
- * Author: Greg Lonnon <glonnon@ridgerun.com>
- *
- * Rewritten by:
- * Author: <source@mvista.com>
- * 2004 (c) MontaVista Software, Inc.
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-#include <linux/types.h>
-#include <linux/serial_reg.h>
-
-#include <asm/memory.h>
-#include <asm/mach-types.h>
-
-#include "serial.h"
-
-#define MDR1_MODE_MASK 0x07
-
-volatile u8 *uart_base;
-int uart_shift;
-
-/*
- * Store the DEBUG_LL uart number into memory.
- * See also debug-macro.S, and serial.c for related code.
- */
-static void set_omap_uart_info(unsigned char port)
-{
- /*
- * Get address of some.bss variable and round it down
- * a la CONFIG_AUTO_ZRELADDR.
- */
- u32 ram_start = (u32)&uart_shift & 0xf8000000;
- u32 *uart_info = (u32 *)(ram_start + OMAP_UART_INFO_OFS);
- *uart_info = port;
-}
-
-static inline void putc(int c)
-{
- if (!uart_base)
- return;
-
- /* Check for UART 16x mode */
- if ((uart_base[UART_OMAP_MDR1 << uart_shift] & MDR1_MODE_MASK) != 0)
- return;
-
- while (!(uart_base[UART_LSR << uart_shift] & UART_LSR_THRE))
- barrier();
- uart_base[UART_TX << uart_shift] = c;
-}
-
-static inline void flush(void)
-{
-}
-
-/*
- * Macros to configure UART1 and debug UART
- */
-#define _DEBUG_LL_ENTRY(mach, dbg_uart, dbg_shft, dbg_id) \
- if (machine_is_##mach()) { \
- uart_base = (volatile u8 *)(dbg_uart); \
- uart_shift = (dbg_shft); \
- port = (dbg_id); \
- set_omap_uart_info(port); \
- break; \
- }
-
-#define DEBUG_LL_OMAP7XX(p, mach) \
- _DEBUG_LL_ENTRY(mach, OMAP1_UART##p##_BASE, OMAP7XX_PORT_SHIFT, \
- OMAP1UART##p)
-
-#define DEBUG_LL_OMAP1(p, mach) \
- _DEBUG_LL_ENTRY(mach, OMAP1_UART##p##_BASE, OMAP_PORT_SHIFT, \
- OMAP1UART##p)
-
-static inline void arch_decomp_setup(void)
-{
- int port = 0;
-
- /*
- * Initialize the port based on the machine ID from the bootloader.
- * Note that we're using macros here instead of switch statement
- * as machine_is functions are optimized out for the boards that
- * are not selected.
- */
- do {
- /* omap7xx/8xx based boards using UART1 with shift 0 */
- DEBUG_LL_OMAP7XX(1, herald);
- DEBUG_LL_OMAP7XX(1, omap_perseus2);
-
- /* omap15xx/16xx based boards using UART1 */
- DEBUG_LL_OMAP1(1, ams_delta);
- DEBUG_LL_OMAP1(1, nokia770);
- DEBUG_LL_OMAP1(1, omap_h2);
- DEBUG_LL_OMAP1(1, omap_h3);
- DEBUG_LL_OMAP1(1, omap_innovator);
- DEBUG_LL_OMAP1(1, omap_osk);
- DEBUG_LL_OMAP1(1, omap_palmte);
- DEBUG_LL_OMAP1(1, omap_palmz71);
-
- /* omap15xx/16xx based boards using UART2 */
- DEBUG_LL_OMAP1(2, omap_palmtt);
-
- /* omap15xx/16xx based boards using UART3 */
- DEBUG_LL_OMAP1(3, sx1);
- } while (0);
-}
diff --git a/arch/arm/mach-omap1/io.c b/arch/arm/mach-omap1/io.c
index 05ee260918fa..d2db9b8aed3f 100644
--- a/arch/arm/mach-omap1/io.c
+++ b/arch/arm/mach-omap1/io.c
@@ -15,10 +15,8 @@
#include <asm/mach/map.h>
#include "tc.h"
-#include "mux.h"
#include "iomap.h"
#include "common.h"
-#include "clock.h"
/*
* The machine specific code may provide the extra mapping besides the
@@ -125,11 +123,6 @@ void __init omap1_init_early(void)
*/
omap_writew(0x0, MPU_PUBLIC_TIPB_CNTL);
omap_writew(0x0, MPU_PRIVATE_TIPB_CNTL);
-
- /* Must init clocks early to assure that timer interrupt works
- */
- omap1_clk_init();
- omap1_mux_init();
}
void __init omap1_init_late(void)
diff --git a/arch/arm/mach-omap1/serial.c b/arch/arm/mach-omap1/serial.c
index 299ae1106187..88928fc33b2e 100644
--- a/arch/arm/mach-omap1/serial.c
+++ b/arch/arm/mach-omap1/serial.c
@@ -19,8 +19,7 @@
#include <asm/mach-types.h>
-#include <mach/serial.h>
-
+#include "serial.h"
#include "mux.h"
#include "pm.h"
#include "soc.h"
diff --git a/arch/arm/mach-omap1/include/mach/serial.h b/arch/arm/mach-omap1/serial.h
index 4700e384c3d9..4700e384c3d9 100644
--- a/arch/arm/mach-omap1/include/mach/serial.h
+++ b/arch/arm/mach-omap1/serial.h
diff --git a/arch/arm/mach-omap1/time.c b/arch/arm/mach-omap1/time.c
index c34b9af00566..d5e127851dc7 100644
--- a/arch/arm/mach-omap1/time.c
+++ b/arch/arm/mach-omap1/time.c
@@ -51,8 +51,10 @@
#include <asm/mach/time.h>
#include "hardware.h"
+#include "mux.h"
#include "iomap.h"
#include "common.h"
+#include "clock.h"
#ifdef CONFIG_OMAP_MPU_TIMER
@@ -224,6 +226,9 @@ static inline void omap_mpu_timer_init(void)
*/
void __init omap1_timer_init(void)
{
+ omap1_clk_init();
+ omap1_mux_init();
+
if (omap_32k_timer_init() != 0)
omap_mpu_timer_init();
}
diff --git a/arch/arm/mach-pxa/Kconfig b/arch/arm/mach-pxa/Kconfig
index 57f0be4065c1..a5df1d9f3360 100644
--- a/arch/arm/mach-pxa/Kconfig
+++ b/arch/arm/mach-pxa/Kconfig
@@ -1,4 +1,19 @@
# SPDX-License-Identifier: GPL-2.0-only
+menuconfig ARCH_PXA
+ bool "PXA2xx/PXA3xx-based"
+ depends on ARCH_MULTI_V5
+ depends on CPU_LITTLE_ENDIAN
+ select ARM_CPU_SUSPEND if PM
+ select CLKSRC_PXA
+ select CLKSRC_MMIO
+ select TIMER_OF
+ select CPU_XSCALE if !CPU_XSC3
+ select GPIO_PXA
+ select GPIOLIB
+ select PLAT_PXA
+ help
+ Support for Intel/Marvell's PXA2xx/PXA3xx processor line.
+
if ARCH_PXA
menu "Intel PXA2xx/PXA3xx Implementations"
diff --git a/arch/arm/mach-pxa/Makefile b/arch/arm/mach-pxa/Makefile
index 68730ceb8b7c..0aec36e67dc1 100644
--- a/arch/arm/mach-pxa/Makefile
+++ b/arch/arm/mach-pxa/Makefile
@@ -37,7 +37,8 @@ obj-$(CONFIG_MACH_SAAR) += saar.o
obj-$(CONFIG_ARCH_PXA_IDP) += idp.o
obj-$(CONFIG_ARCH_VIPER) += viper.o
obj-$(CONFIG_MACH_ARCOM_ZEUS) += zeus.o
-obj-$(CONFIG_MACH_BALLOON3) += balloon3.o
+obj-$(CONFIG_ARCOM_PCMCIA) += viper-pcmcia.o
+obj-$(CONFIG_MACH_BALLOON3) += balloon3.o balloon3-pcmcia.o
obj-$(CONFIG_MACH_CSB726) += csb726.o
obj-$(CONFIG_CSB726_CSB701) += csb701.o
obj-$(CONFIG_MACH_CM_X300) += cm-x300.o
@@ -47,18 +48,20 @@ obj-$(CONFIG_GUMSTIX_AM200EPD) += am200epd.o
obj-$(CONFIG_GUMSTIX_AM300EPD) += am300epd.o
obj-$(CONFIG_MACH_XCEP) += xcep.o
obj-$(CONFIG_MACH_TRIZEPS4) += trizeps4.o
+obj-$(CONFIG_TRIZEPS_PCMCIA) += trizeps4-pcmcia.o
obj-$(CONFIG_MACH_LOGICPD_PXA270) += lpd270.o
obj-$(CONFIG_MACH_PCM027) += pcm027.o
obj-$(CONFIG_MACH_PCM990_BASEBOARD) += pcm990-baseboard.o
-obj-$(CONFIG_MACH_COLIBRI) += colibri-pxa270.o
+obj-$(CONFIG_MACH_COLIBRI) += colibri-pxa270.o colibri-pcmcia.o
obj-$(CONFIG_MACH_COLIBRI_EVALBOARD) += colibri-evalboard.o
obj-$(CONFIG_MACH_COLIBRI_PXA270_INCOME) += colibri-pxa270-income.o
obj-$(CONFIG_MACH_COLIBRI300) += colibri-pxa3xx.o colibri-pxa300.o
-obj-$(CONFIG_MACH_COLIBRI320) += colibri-pxa3xx.o colibri-pxa320.o
-obj-$(CONFIG_MACH_VPAC270) += vpac270.o
+obj-$(CONFIG_MACH_COLIBRI320) += colibri-pxa3xx.o colibri-pxa320.o colibri-pcmcia.o
+obj-$(CONFIG_MACH_VPAC270) += vpac270.o vpac270-pcmcia.o
# End-user Products
obj-$(CONFIG_MACH_H4700) += hx4700.o
+obj-$(CONFIG_MACH_H4700) += hx4700-pcmcia.o
obj-$(CONFIG_MACH_H5000) += h5000.o
obj-$(CONFIG_MACH_HIMALAYA) += himalaya.o
obj-$(CONFIG_MACH_MAGICIAN) += magician.o
@@ -66,12 +69,12 @@ obj-$(CONFIG_MACH_MIOA701) += mioa701.o mioa701_bootresume.o
obj-$(CONFIG_PXA_EZX) += ezx.o
obj-$(CONFIG_MACH_MP900C) += mp900.o
obj-$(CONFIG_MACH_PALMTE2) += palmte2.o
-obj-$(CONFIG_MACH_PALMTC) += palmtc.o
+obj-$(CONFIG_MACH_PALMTC) += palmtc.o palmtc-pcmcia.o
obj-$(CONFIG_MACH_PALM27X) += palm27x.o
obj-$(CONFIG_MACH_PALMT5) += palmt5.o
-obj-$(CONFIG_MACH_PALMTX) += palmtx.o
+obj-$(CONFIG_MACH_PALMTX) += palmtx.o palmtx-pcmcia.o
obj-$(CONFIG_MACH_PALMZ72) += palmz72.o
-obj-$(CONFIG_MACH_PALMLD) += palmld.o
+obj-$(CONFIG_MACH_PALMLD) += palmld.o palmld-pcmcia.o
obj-$(CONFIG_PALM_TREO) += palmtreo.o
obj-$(CONFIG_PXA_SHARP_C7xx) += corgi.o sharpsl_pm.o corgi_pm.o
obj-$(CONFIG_PXA_SHARP_Cxx00) += spitz.o sharpsl_pm.o spitz_pm.o
@@ -79,6 +82,7 @@ obj-$(CONFIG_MACH_POODLE) += poodle.o
obj-$(CONFIG_MACH_TOSA) += tosa.o
obj-$(CONFIG_MACH_ICONTROL) += icontrol.o mxm8x10.o
obj-$(CONFIG_ARCH_PXA_ESERIES) += eseries.o
+obj-$(CONFIG_MACH_E740) += e740-pcmcia.o
obj-$(CONFIG_MACH_ZIPIT2) += z2.o
obj-$(CONFIG_PXA_SYSTEMS_CPLDS) += pxa_cplds_irqs.o
diff --git a/arch/arm/mach-pxa/include/mach/addr-map.h b/arch/arm/mach-pxa/addr-map.h
index 93cfe7dbfec6..93cfe7dbfec6 100644
--- a/arch/arm/mach-pxa/include/mach/addr-map.h
+++ b/arch/arm/mach-pxa/addr-map.h
diff --git a/arch/arm/mach-pxa/am300epd.c b/arch/arm/mach-pxa/am300epd.c
index 17d08abeeb17..4b55bc89db8f 100644
--- a/arch/arm/mach-pxa/am300epd.c
+++ b/arch/arm/mach-pxa/am300epd.c
@@ -30,7 +30,7 @@
#include "gumstix.h"
#include "mfp-pxa25x.h"
-#include <mach/irqs.h>
+#include "irqs.h"
#include <linux/platform_data/video-pxafb.h>
#include "generic.h"
diff --git a/drivers/pcmcia/pxa2xx_balloon3.c b/arch/arm/mach-pxa/balloon3-pcmcia.c
index 5fe1da7a50e4..6a27b76cc603 100644
--- a/drivers/pcmcia/pxa2xx_balloon3.c
+++ b/arch/arm/mach-pxa/balloon3-pcmcia.c
@@ -20,11 +20,11 @@
#include <linux/irq.h>
#include <linux/io.h>
-#include <mach/balloon3.h>
+#include "balloon3.h"
#include <asm/mach-types.h>
-#include "soc_common.h"
+#include <pcmcia/soc_common.h>
static int balloon3_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
{
diff --git a/arch/arm/mach-pxa/balloon3.c b/arch/arm/mach-pxa/balloon3.c
index 26140249c784..896d47d9a8dc 100644
--- a/arch/arm/mach-pxa/balloon3.c
+++ b/arch/arm/mach-pxa/balloon3.c
@@ -40,8 +40,8 @@
#include <asm/mach/flash.h>
#include "pxa27x.h"
-#include <mach/balloon3.h>
-#include <mach/audio.h>
+#include "balloon3.h"
+#include <linux/platform_data/asoc-pxa.h>
#include <linux/platform_data/video-pxafb.h>
#include <linux/platform_data/mmc-pxamci.h>
#include "udc.h"
diff --git a/arch/arm/mach-pxa/include/mach/balloon3.h b/arch/arm/mach-pxa/balloon3.h
index 04f3639c4082..04f3639c4082 100644
--- a/arch/arm/mach-pxa/include/mach/balloon3.h
+++ b/arch/arm/mach-pxa/balloon3.h
diff --git a/arch/arm/mach-pxa/cm-x300.c b/arch/arm/mach-pxa/cm-x300.c
index 2e35354b61f5..01f364a66446 100644
--- a/arch/arm/mach-pxa/cm-x300.c
+++ b/arch/arm/mach-pxa/cm-x300.c
@@ -40,6 +40,8 @@
#include <linux/spi/spi_gpio.h>
#include <linux/spi/tdo24m.h>
+#include <linux/soc/pxa/cpu.h>
+
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/setup.h>
@@ -51,7 +53,7 @@
#include <linux/platform_data/mmc-pxamci.h>
#include <linux/platform_data/usb-ohci-pxa27x.h>
#include <linux/platform_data/mtd-nand-pxa3xx.h>
-#include <mach/audio.h>
+#include <linux/platform_data/asoc-pxa.h>
#include <linux/platform_data/usb-pxa3xx-ulpi.h>
#include <asm/mach/map.h>
@@ -354,13 +356,13 @@ static struct platform_device cm_x300_spi_gpio = {
static struct gpiod_lookup_table cm_x300_spi_gpiod_table = {
.dev_id = "spi_gpio",
.table = {
- GPIO_LOOKUP("gpio-pxa", GPIO_LCD_SCL,
+ GPIO_LOOKUP("pca9555.1", GPIO_LCD_SCL - GPIO_LCD_BASE,
"sck", GPIO_ACTIVE_HIGH),
- GPIO_LOOKUP("gpio-pxa", GPIO_LCD_DIN,
+ GPIO_LOOKUP("pca9555.1", GPIO_LCD_DIN - GPIO_LCD_BASE,
"mosi", GPIO_ACTIVE_HIGH),
- GPIO_LOOKUP("gpio-pxa", GPIO_LCD_DOUT,
+ GPIO_LOOKUP("pca9555.1", GPIO_LCD_DOUT - GPIO_LCD_BASE,
"miso", GPIO_ACTIVE_HIGH),
- GPIO_LOOKUP("gpio-pxa", GPIO_LCD_CS,
+ GPIO_LOOKUP("pca9555.1", GPIO_LCD_CS - GPIO_LCD_BASE,
"cs", GPIO_ACTIVE_HIGH),
{ },
},
diff --git a/arch/arm/mach-pxa/colibri-evalboard.c b/arch/arm/mach-pxa/colibri-evalboard.c
index b9c173ede891..b62af07b8f96 100644
--- a/arch/arm/mach-pxa/colibri-evalboard.c
+++ b/arch/arm/mach-pxa/colibri-evalboard.c
@@ -13,7 +13,6 @@
#include <linux/interrupt.h>
#include <linux/gpio/machine.h>
#include <asm/mach-types.h>
-#include <mach/hardware.h>
#include <asm/mach/arch.h>
#include <linux/i2c.h>
#include <linux/platform_data/i2c-pxa.h>
diff --git a/drivers/pcmcia/pxa2xx_colibri.c b/arch/arm/mach-pxa/colibri-pcmcia.c
index f0f725e99604..9da7b478e5eb 100644
--- a/drivers/pcmcia/pxa2xx_colibri.c
+++ b/arch/arm/mach-pxa/colibri-pcmcia.c
@@ -14,7 +14,7 @@
#include <asm/mach-types.h>
-#include "soc_common.h"
+#include <pcmcia/soc_common.h>
#define COLIBRI270_RESET_GPIO 53
#define COLIBRI270_PPEN_GPIO 107
diff --git a/arch/arm/mach-pxa/colibri-pxa270-income.c b/arch/arm/mach-pxa/colibri-pxa270-income.c
index e5879e8b0682..f6eaf464ca83 100644
--- a/arch/arm/mach-pxa/colibri-pxa270-income.c
+++ b/arch/arm/mach-pxa/colibri-pxa270-income.c
@@ -25,7 +25,6 @@
#include <asm/irq.h>
#include <asm/mach-types.h>
-#include <mach/hardware.h>
#include <linux/platform_data/mmc-pxamci.h>
#include <linux/platform_data/usb-ohci-pxa27x.h>
#include "pxa27x.h"
diff --git a/arch/arm/mach-pxa/colibri-pxa270.c b/arch/arm/mach-pxa/colibri-pxa270.c
index 2f2cd2ae4187..5dc669752836 100644
--- a/arch/arm/mach-pxa/colibri-pxa270.c
+++ b/arch/arm/mach-pxa/colibri-pxa270.c
@@ -23,7 +23,7 @@
#include <asm/mach-types.h>
#include <linux/sizes.h>
-#include <mach/audio.h>
+#include <linux/platform_data/asoc-pxa.h>
#include "colibri.h"
#include "pxa27x.h"
diff --git a/arch/arm/mach-pxa/colibri-pxa300.c b/arch/arm/mach-pxa/colibri-pxa300.c
index 82052dfd96b6..11ca6c4795e7 100644
--- a/arch/arm/mach-pxa/colibri-pxa300.c
+++ b/arch/arm/mach-pxa/colibri-pxa300.c
@@ -13,6 +13,7 @@
#include <linux/platform_device.h>
#include <linux/gpio.h>
#include <linux/interrupt.h>
+#include <linux/soc/pxa/cpu.h>
#include <asm/mach-types.h>
#include <linux/sizes.h>
@@ -23,7 +24,7 @@
#include "colibri.h"
#include <linux/platform_data/usb-ohci-pxa27x.h>
#include <linux/platform_data/video-pxafb.h>
-#include <mach/audio.h>
+#include <linux/platform_data/asoc-pxa.h>
#include "generic.h"
#include "devices.h"
diff --git a/arch/arm/mach-pxa/colibri-pxa320.c b/arch/arm/mach-pxa/colibri-pxa320.c
index 35dd3adb7712..1a59056e181e 100644
--- a/arch/arm/mach-pxa/colibri-pxa320.c
+++ b/arch/arm/mach-pxa/colibri-pxa320.c
@@ -24,7 +24,7 @@
#include "colibri.h"
#include <linux/platform_data/video-pxafb.h>
#include <linux/platform_data/usb-ohci-pxa27x.h>
-#include <mach/audio.h>
+#include <linux/platform_data/asoc-pxa.h>
#include "pxa27x-udc.h"
#include "udc.h"
diff --git a/arch/arm/mach-pxa/colibri-pxa3xx.c b/arch/arm/mach-pxa/colibri-pxa3xx.c
index 3cead80a2b37..77d6ef5fa42d 100644
--- a/arch/arm/mach-pxa/colibri-pxa3xx.c
+++ b/arch/arm/mach-pxa/colibri-pxa3xx.c
@@ -13,12 +13,11 @@
#include <linux/gpio.h>
#include <linux/etherdevice.h>
#include <asm/mach-types.h>
-#include <mach/hardware.h>
#include <linux/sizes.h>
#include <asm/system_info.h>
#include <asm/mach/arch.h>
#include <asm/mach/irq.h>
-#include <mach/pxa3xx-regs.h>
+#include "pxa3xx-regs.h"
#include "mfp-pxa300.h"
#include "colibri.h"
#include <linux/platform_data/mmc-pxamci.h>
diff --git a/arch/arm/mach-pxa/colibri.h b/arch/arm/mach-pxa/colibri.h
index 85525d49e321..01a46f36cc1f 100644
--- a/arch/arm/mach-pxa/colibri.h
+++ b/arch/arm/mach-pxa/colibri.h
@@ -3,7 +3,7 @@
#define _COLIBRI_H_
#include <net/ax88796.h>
-#include <mach/mfp.h>
+#include "mfp.h"
/*
* base board glue for PXA270 module
diff --git a/arch/arm/mach-pxa/corgi.c b/arch/arm/mach-pxa/corgi.c
index 44659fbc37ba..c546356d0f02 100644
--- a/arch/arm/mach-pxa/corgi.c
+++ b/arch/arm/mach-pxa/corgi.c
@@ -39,7 +39,6 @@
#include <asm/setup.h>
#include <asm/memory.h>
#include <asm/mach-types.h>
-#include <mach/hardware.h>
#include <asm/irq.h>
#include <asm/mach/arch.h>
@@ -50,7 +49,7 @@
#include <linux/platform_data/irda-pxaficp.h>
#include <linux/platform_data/mmc-pxamci.h>
#include "udc.h"
-#include <mach/corgi.h>
+#include "corgi.h"
#include "sharpsl_pm.h"
#include <asm/mach/sharpsl_param.h>
@@ -473,6 +472,25 @@ static struct platform_device corgiled_device = {
},
};
+static struct gpiod_lookup_table corgi_audio_gpio_table = {
+ .dev_id = "corgi-audio",
+ .table = {
+ GPIO_LOOKUP("sharp-scoop",
+ CORGI_GPIO_MUTE_L - CORGI_SCOOP_GPIO_BASE,
+ "mute-l", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("sharp-scoop",
+ CORGI_GPIO_MUTE_R - CORGI_SCOOP_GPIO_BASE,
+ "mute-r", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("sharp-scoop",
+ CORGI_GPIO_APM_ON - CORGI_SCOOP_GPIO_BASE,
+ "apm-on", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("sharp-scoop",
+ CORGI_GPIO_MIC_BIAS - CORGI_SCOOP_GPIO_BASE,
+ "mic-bias", GPIO_ACTIVE_HIGH),
+ { },
+ },
+};
+
/*
* Corgi Audio
*/
@@ -745,6 +763,7 @@ static void __init corgi_init(void)
pxa_set_udc_info(&udc_info);
gpiod_add_lookup_table(&corgi_mci_gpio_table);
+ gpiod_add_lookup_table(&corgi_audio_gpio_table);
pxa_set_mci_info(&corgi_mci_platform_data);
pxa_set_ficp_info(&corgi_ficp_platform_data);
pxa_set_i2c_info(NULL);
diff --git a/arch/arm/mach-pxa/include/mach/corgi.h b/arch/arm/mach-pxa/corgi.h
index b565ca7b8cda..b565ca7b8cda 100644
--- a/arch/arm/mach-pxa/include/mach/corgi.h
+++ b/arch/arm/mach-pxa/corgi.h
diff --git a/arch/arm/mach-pxa/corgi_pm.c b/arch/arm/mach-pxa/corgi_pm.c
index 092dcb9fced5..555a5c1afd96 100644
--- a/arch/arm/mach-pxa/corgi_pm.c
+++ b/arch/arm/mach-pxa/corgi_pm.c
@@ -19,10 +19,9 @@
#include <asm/irq.h>
#include <asm/mach-types.h>
-#include <mach/hardware.h>
-#include <mach/corgi.h>
-#include <mach/pxa2xx-regs.h>
+#include "corgi.h"
+#include "pxa2xx-regs.h"
#include "sharpsl_pm.h"
#include "generic.h"
diff --git a/arch/arm/mach-pxa/csb726.c b/arch/arm/mach-pxa/csb726.c
index 98fcdc6e2944..410b1af87d55 100644
--- a/arch/arm/mach-pxa/csb726.c
+++ b/arch/arm/mach-pxa/csb726.c
@@ -17,12 +17,13 @@
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
+
#include "csb726.h"
#include "pxa27x.h"
#include <linux/platform_data/mmc-pxamci.h>
#include <linux/platform_data/usb-ohci-pxa27x.h>
-#include <mach/audio.h>
-#include <mach/smemc.h>
+#include <linux/platform_data/asoc-pxa.h>
+#include "smemc.h"
#include "generic.h"
#include "devices.h"
diff --git a/arch/arm/mach-pxa/csb726.h b/arch/arm/mach-pxa/csb726.h
index 30d7cf926b84..628928743bd5 100644
--- a/arch/arm/mach-pxa/csb726.h
+++ b/arch/arm/mach-pxa/csb726.h
@@ -7,7 +7,7 @@
#ifndef CSB726_H
#define CSB726_H
-#include <mach/irqs.h> /* PXA_GPIO_TO_IRQ */
+#include "irqs.h" /* PXA_GPIO_TO_IRQ */
#define CSB726_GPIO_IRQ_LAN 52
#define CSB726_GPIO_IRQ_SM501 53
diff --git a/arch/arm/mach-pxa/devices.c b/arch/arm/mach-pxa/devices.c
index 09b8495f3fd9..a7b92dd1ca9e 100644
--- a/arch/arm/mach-pxa/devices.c
+++ b/arch/arm/mach-pxa/devices.c
@@ -9,21 +9,23 @@
#include <linux/dmaengine.h>
#include <linux/spi/pxa2xx_spi.h>
#include <linux/platform_data/i2c-pxa.h>
+#include <linux/soc/pxa/cpu.h>
#include "udc.h"
#include <linux/platform_data/usb-pxa3xx-ulpi.h>
#include <linux/platform_data/video-pxafb.h>
#include <linux/platform_data/mmc-pxamci.h>
#include <linux/platform_data/irda-pxaficp.h>
-#include <mach/irqs.h>
+#include "irqs.h"
#include <linux/platform_data/usb-ohci-pxa27x.h>
#include <linux/platform_data/keypad-pxa27x.h>
#include <linux/platform_data/media/camera-pxa.h>
-#include <mach/audio.h>
-#include <mach/hardware.h>
+#include <linux/platform_data/asoc-pxa.h>
#include <linux/platform_data/mmp_dma.h>
#include <linux/platform_data/mtd-nand-pxa3xx.h>
+#include "regs-ost.h"
+#include "reset.h"
#include "devices.h"
#include "generic.h"
@@ -1118,3 +1120,12 @@ void __init pxa2xx_set_dmac_info(struct mmp_dma_platdata *dma_pdata)
{
pxa_register_device(&pxa2xx_pxa_dma, dma_pdata);
}
+
+void __init pxa_register_wdt(unsigned int reset_status)
+{
+ struct resource res = DEFINE_RES_MEM(OST_PHYS, OST_LEN);
+
+ reset_status &= RESET_STATUS_WATCHDOG;
+ platform_device_register_resndata(NULL, "sa1100_wdt", -1, &res, 1,
+ &reset_status, sizeof(reset_status));
+}
diff --git a/drivers/pcmcia/pxa2xx_e740.c b/arch/arm/mach-pxa/e740-pcmcia.c
index 72caa6d05ab9..11a2c5d42920 100644
--- a/drivers/pcmcia/pxa2xx_e740.c
+++ b/arch/arm/mach-pxa/e740-pcmcia.c
@@ -13,12 +13,12 @@
#include <linux/interrupt.h>
#include <linux/platform_device.h>
-#include <mach/eseries-gpio.h>
+#include "eseries-gpio.h"
#include <asm/irq.h>
#include <asm/mach-types.h>
-#include "soc_common.h"
+#include <pcmcia/soc_common.h>
static int e740_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
{
diff --git a/arch/arm/mach-pxa/include/mach/eseries-gpio.h b/arch/arm/mach-pxa/eseries-gpio.h
index 5c645600d401..5c645600d401 100644
--- a/arch/arm/mach-pxa/include/mach/eseries-gpio.h
+++ b/arch/arm/mach-pxa/eseries-gpio.h
diff --git a/arch/arm/mach-pxa/eseries.c b/arch/arm/mach-pxa/eseries.c
index f37c44b6139d..08f8737aa8fd 100644
--- a/arch/arm/mach-pxa/eseries.c
+++ b/arch/arm/mach-pxa/eseries.c
@@ -24,6 +24,7 @@
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
#include <linux/memblock.h>
+#include <linux/gpio/machine.h>
#include <video/w100fb.h>
@@ -32,9 +33,9 @@
#include <asm/mach-types.h>
#include "pxa25x.h"
-#include <mach/eseries-gpio.h>
+#include "eseries-gpio.h"
#include "eseries-irq.h"
-#include <mach/audio.h>
+#include <linux/platform_data/asoc-pxa.h>
#include <linux/platform_data/video-pxafb.h>
#include "udc.h"
#include <linux/platform_data/irda-pxaficp.h>
@@ -520,6 +521,16 @@ static struct platform_device e740_audio_device = {
.id = -1,
};
+static struct gpiod_lookup_table e740_audio_gpio_table = {
+ .dev_id = "e740-audio",
+ .table = {
+ GPIO_LOOKUP("gpio-pxa", GPIO_E740_WM9705_nAVDD2, "Audio power", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("gpio-pxa", GPIO_E740_AMP_ON, "Output amp", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("gpio-pxa", GPIO_E740_MIC_ON, "Mic amp", GPIO_ACTIVE_HIGH),
+ { },
+ },
+};
+
/* ----------------------------------------------------------------------- */
static struct platform_device *e740_devices[] __initdata = {
@@ -540,6 +551,7 @@ static void __init e740_init(void)
"UDCCLK", &pxa25x_device_udc.dev),
eseries_get_tmio_gpios();
gpiod_add_lookup_table(&e7xx_gpio_vbus_gpiod_table);
+ gpiod_add_lookup_table(&e740_audio_gpio_table);
platform_add_devices(ARRAY_AND_SIZE(e740_devices));
pxa_set_ac97_info(NULL);
pxa_set_ficp_info(&e7xx_ficp_platform_data);
@@ -699,7 +711,6 @@ static struct tc6393xb_platform_data e750_tc6393xb_info = {
.irq_base = IRQ_BOARD_START,
.scr_pll2cr = 0x0cc1,
.scr_gper = 0,
- .gpio_base = -1,
.suspend = &eseries_tmio_suspend,
.resume = &eseries_tmio_resume,
.enable = &eseries_tmio_enable,
@@ -716,6 +727,15 @@ static struct platform_device e750_tc6393xb_device = {
.resource = eseries_tmio_resources,
};
+static struct gpiod_lookup_table e750_audio_gpio_table = {
+ .dev_id = "e750-audio",
+ .table = {
+ GPIO_LOOKUP("gpio-pxa", GPIO_E750_HP_AMP_OFF, "Output amp", GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP("gpio-pxa", GPIO_E750_SPK_AMP_OFF, "Mic amp", GPIO_ACTIVE_LOW),
+ { },
+ },
+};
+
static struct platform_device e750_audio_device = {
.name = "e750-audio",
.id = -1,
@@ -740,6 +760,7 @@ static void __init e750_init(void)
"GPIO11_CLK", NULL),
eseries_get_tmio_gpios();
gpiod_add_lookup_table(&e7xx_gpio_vbus_gpiod_table);
+ gpiod_add_lookup_table(&e750_audio_gpio_table);
platform_add_devices(ARRAY_AND_SIZE(e750_devices));
pxa_set_ac97_info(NULL);
pxa_set_ficp_info(&e7xx_ficp_platform_data);
@@ -918,7 +939,6 @@ static struct tc6393xb_platform_data e800_tc6393xb_info = {
.irq_base = IRQ_BOARD_START,
.scr_pll2cr = 0x0cc1,
.scr_gper = 0,
- .gpio_base = -1,
.suspend = &eseries_tmio_suspend,
.resume = &eseries_tmio_resume,
.enable = &eseries_tmio_enable,
@@ -935,6 +955,15 @@ static struct platform_device e800_tc6393xb_device = {
.resource = eseries_tmio_resources,
};
+static struct gpiod_lookup_table e800_audio_gpio_table = {
+ .dev_id = "e800-audio",
+ .table = {
+ GPIO_LOOKUP("gpio-pxa", GPIO_E800_HP_AMP_OFF, "Output amp", GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP("gpio-pxa", GPIO_E800_SPK_AMP_ON, "Mic amp", GPIO_ACTIVE_HIGH),
+ { },
+ },
+};
+
static struct platform_device e800_audio_device = {
.name = "e800-audio",
.id = -1,
@@ -959,6 +988,7 @@ static void __init e800_init(void)
"GPIO11_CLK", NULL),
eseries_get_tmio_gpios();
gpiod_add_lookup_table(&e800_gpio_vbus_gpiod_table);
+ gpiod_add_lookup_table(&e800_audio_gpio_table);
platform_add_devices(ARRAY_AND_SIZE(e800_devices));
pxa_set_ac97_info(NULL);
}
diff --git a/arch/arm/mach-pxa/ezx.c b/arch/arm/mach-pxa/ezx.c
index eb85950e7c0e..69c2ec02a16c 100644
--- a/arch/arm/mach-pxa/ezx.c
+++ b/arch/arm/mach-pxa/ezx.c
@@ -29,7 +29,6 @@
#include "pxa27x.h"
#include <linux/platform_data/video-pxafb.h>
#include <linux/platform_data/usb-ohci-pxa27x.h>
-#include <mach/hardware.h>
#include <linux/platform_data/keypad-pxa27x.h>
#include <linux/platform_data/media/camera-pxa.h>
diff --git a/arch/arm/mach-pxa/generic.c b/arch/arm/mach-pxa/generic.c
index ab7cdffd7ea8..02fdde7e3e34 100644
--- a/arch/arm/mach-pxa/generic.c
+++ b/arch/arm/mach-pxa/generic.c
@@ -17,15 +17,18 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/soc/pxa/cpu.h>
+#include <linux/soc/pxa/smemc.h>
+#include <linux/clk/pxa.h>
-#include <mach/hardware.h>
#include <asm/mach/map.h>
#include <asm/mach-types.h>
-#include <mach/irqs.h>
-#include <mach/reset.h>
-#include <mach/smemc.h>
-#include <mach/pxa3xx-regs.h>
+#include "addr-map.h"
+#include "irqs.h"
+#include "reset.h"
+#include "smemc.h"
+#include "pxa3xx-regs.h"
#include "generic.h"
#include <clocksource/pxa.h>
@@ -46,28 +49,47 @@ void clear_reset_status(unsigned int mask)
void __init pxa_timer_init(void)
{
if (cpu_is_pxa25x())
- pxa25x_clocks_init();
+ pxa25x_clocks_init(io_p2v(0x41300000));
if (cpu_is_pxa27x())
- pxa27x_clocks_init();
+ pxa27x_clocks_init(io_p2v(0x41300000));
if (cpu_is_pxa3xx())
- pxa3xx_clocks_init();
+ pxa3xx_clocks_init(io_p2v(0x41340000), io_p2v(0x41350000));
pxa_timer_nodt_init(IRQ_OST0, io_p2v(0x40a00000));
}
-/*
- * Get the clock frequency as reflected by CCCR and the turbo flag.
- * We assume these values have been applied via a fcs.
- * If info is not 0 we also display the current settings.
- */
-unsigned int get_clk_frequency_khz(int info)
+void pxa_smemc_set_pcmcia_timing(int sock, u32 mcmem, u32 mcatt, u32 mcio)
{
- if (cpu_is_pxa25x())
- return pxa25x_get_clk_frequency_khz(info);
- else if (cpu_is_pxa27x())
- return pxa27x_get_clk_frequency_khz(info);
- return 0;
+ __raw_writel(mcmem, MCMEM(sock));
+ __raw_writel(mcatt, MCATT(sock));
+ __raw_writel(mcio, MCIO(sock));
+}
+EXPORT_SYMBOL_GPL(pxa_smemc_set_pcmcia_timing);
+
+void pxa_smemc_set_pcmcia_socket(int nr)
+{
+ switch (nr) {
+ case 0:
+ __raw_writel(0, MECR);
+ break;
+ case 1:
+ /*
+ * We have at least one socket, so set MECR:CIT
+ * (Card Is There)
+ */
+ __raw_writel(MECR_CIT, MECR);
+ break;
+ case 2:
+ /* Set CIT and MECR:NOS (Number Of Sockets) */
+ __raw_writel(MECR_CIT | MECR_NOS, MECR);
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(pxa_smemc_set_pcmcia_socket);
+
+void __iomem *pxa_smemc_get_mdrefr(void)
+{
+ return MDREFR;
}
-EXPORT_SYMBOL(get_clk_frequency_khz);
/*
* Intel PXA2xx internal register mapping.
diff --git a/arch/arm/mach-pxa/generic.h b/arch/arm/mach-pxa/generic.h
index 3b7873f8e1f8..7bb1499de4c5 100644
--- a/arch/arm/mach-pxa/generic.h
+++ b/arch/arm/mach-pxa/generic.h
@@ -10,7 +10,6 @@
struct irq_data;
-extern unsigned int get_clk_frequency_khz(int info);
extern void __init pxa_dt_irq_init(int (*fn)(struct irq_data *,
unsigned int));
extern void __init pxa_map_io(void);
@@ -23,19 +22,16 @@ extern void pxa_timer_init(void);
#define ARRAY_AND_SIZE(x) (x), ARRAY_SIZE(x)
#define pxa25x_handle_irq icip_handle_irq
-extern int __init pxa25x_clocks_init(void);
extern void __init pxa25x_init_irq(void);
extern void __init pxa25x_map_io(void);
extern void __init pxa26x_init_irq(void);
#define pxa27x_handle_irq ichp_handle_irq
-extern int __init pxa27x_clocks_init(void);
extern unsigned pxa27x_get_clk_frequency_khz(int);
extern void __init pxa27x_init_irq(void);
extern void __init pxa27x_map_io(void);
#define pxa3xx_handle_irq ichp_handle_irq
-extern int __init pxa3xx_clocks_init(void);
extern void __init pxa3xx_init_irq(void);
extern void __init pxa3xx_map_io(void);
@@ -71,8 +67,3 @@ extern unsigned pxa25x_get_clk_frequency_khz(int);
#define pxa27x_get_clk_frequency_khz(x) (0)
#endif
-#ifdef CONFIG_PXA3xx
-extern unsigned pxa3xx_get_clk_frequency_khz(int);
-#else
-#define pxa3xx_get_clk_frequency_khz(x) (0)
-#endif
diff --git a/arch/arm/mach-pxa/gumstix.c b/arch/arm/mach-pxa/gumstix.c
index 49dd618b10f7..72b08a9bf0fd 100644
--- a/arch/arm/mach-pxa/gumstix.c
+++ b/arch/arm/mach-pxa/gumstix.c
@@ -28,7 +28,6 @@
#include <asm/setup.h>
#include <asm/memory.h>
#include <asm/mach-types.h>
-#include <mach/hardware.h>
#include <asm/irq.h>
#include <linux/sizes.h>
diff --git a/arch/arm/mach-pxa/gumstix.h b/arch/arm/mach-pxa/gumstix.h
index 470250cdee16..9005b3c0aabd 100644
--- a/arch/arm/mach-pxa/gumstix.h
+++ b/arch/arm/mach-pxa/gumstix.h
@@ -3,7 +3,7 @@
* arch/arm/mach-pxa/include/mach/gumstix.h
*/
-#include <mach/irqs.h> /* PXA_GPIO_TO_IRQ */
+#include "irqs.h" /* PXA_GPIO_TO_IRQ */
/* BTRESET - Reset line to Bluetooth module, active low signal. */
#define GPIO_GUMSTIX_BTRESET 7
diff --git a/arch/arm/mach-pxa/h5000.c b/arch/arm/mach-pxa/h5000.c
index ece1e71c90a9..212efe24aedb 100644
--- a/arch/arm/mach-pxa/h5000.c
+++ b/arch/arm/mach-pxa/h5000.c
@@ -29,7 +29,7 @@
#include "pxa25x.h"
#include "h5000.h"
#include "udc.h"
-#include <mach/smemc.h>
+#include "smemc.h"
#include "generic.h"
diff --git a/drivers/pcmcia/pxa2xx_hx4700.c b/arch/arm/mach-pxa/hx4700-pcmcia.c
index 87b6a1639d94..e2331dfe427d 100644
--- a/drivers/pcmcia/pxa2xx_hx4700.c
+++ b/arch/arm/mach-pxa/hx4700-pcmcia.c
@@ -10,9 +10,9 @@
#include <linux/irq.h>
#include <asm/mach-types.h>
-#include <mach/hx4700.h>
+#include "hx4700.h"
-#include "soc_common.h"
+#include <pcmcia/soc_common.h>
static struct gpio gpios[] = {
{ GPIO114_HX4700_CF_RESET, GPIOF_OUT_INIT_LOW, "CF reset" },
diff --git a/arch/arm/mach-pxa/hx4700.c b/arch/arm/mach-pxa/hx4700.c
index e1870fbb19e7..2ae06edf413c 100644
--- a/arch/arm/mach-pxa/hx4700.c
+++ b/arch/arm/mach-pxa/hx4700.c
@@ -36,12 +36,12 @@
#include <linux/spi/pxa2xx_spi.h>
#include <linux/platform_data/i2c-pxa.h>
-#include <mach/hardware.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include "pxa27x.h"
-#include <mach/hx4700.h>
+#include "addr-map.h"
+#include "hx4700.h"
#include <linux/platform_data/irda-pxaficp.h>
#include <sound/ak4641.h>
@@ -834,6 +834,19 @@ static struct i2c_board_info i2c_board_info[] __initdata = {
},
};
+static struct gpiod_lookup_table hx4700_audio_gpio_table = {
+ .dev_id = "hx4700-audio",
+ .table = {
+ GPIO_LOOKUP("gpio-pxa", GPIO75_HX4700_EARPHONE_nDET,
+ "earphone-det", GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP("gpio-pxa", GPIO92_HX4700_HP_DRIVER,
+ "hp-driver", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("gpio-pxa", GPIO107_HX4700_SPK_nSD,
+ "spk-sd", GPIO_ACTIVE_LOW),
+ { },
+ },
+};
+
static struct platform_device audio = {
.name = "hx4700-audio",
.id = -1,
@@ -895,6 +908,7 @@ static void __init hx4700_init(void)
gpiod_add_lookup_table(&bq24022_gpiod_table);
gpiod_add_lookup_table(&gpio_vbus_gpiod_table);
+ gpiod_add_lookup_table(&hx4700_audio_gpio_table);
platform_add_devices(devices, ARRAY_SIZE(devices));
pwm_add_table(hx4700_pwm_lookup, ARRAY_SIZE(hx4700_pwm_lookup));
diff --git a/arch/arm/mach-pxa/include/mach/hx4700.h b/arch/arm/mach-pxa/hx4700.h
index 0c30e6d9c660..0c30e6d9c660 100644
--- a/arch/arm/mach-pxa/include/mach/hx4700.h
+++ b/arch/arm/mach-pxa/hx4700.h
diff --git a/arch/arm/mach-pxa/idp.c b/arch/arm/mach-pxa/idp.c
index fb0850af8496..525d01ddfbbb 100644
--- a/arch/arm/mach-pxa/idp.c
+++ b/arch/arm/mach-pxa/idp.c
@@ -22,7 +22,6 @@
#include <asm/setup.h>
#include <asm/memory.h>
#include <asm/mach-types.h>
-#include <mach/hardware.h>
#include <asm/irq.h>
#include <asm/mach/arch.h>
@@ -31,7 +30,6 @@
#include "pxa25x.h"
#include "idp.h"
#include <linux/platform_data/video-pxafb.h>
-#include <mach/bitfield.h>
#include <linux/platform_data/mmc-pxamci.h>
#include <linux/smc91x.h>
diff --git a/arch/arm/mach-pxa/idp.h b/arch/arm/mach-pxa/idp.h
index a89e6723b1a1..81b9bd9ba754 100644
--- a/arch/arm/mach-pxa/idp.h
+++ b/arch/arm/mach-pxa/idp.h
@@ -20,7 +20,7 @@
* IDP hardware.
*/
-#include <mach/irqs.h> /* PXA_GPIO_TO_IRQ */
+#include "irqs.h" /* PXA_GPIO_TO_IRQ */
#define IDP_FLASH_PHYS (PXA_CS0_PHYS)
#define IDP_ALT_FLASH_PHYS (PXA_CS1_PHYS)
diff --git a/arch/arm/mach-pxa/include/mach/bitfield.h b/arch/arm/mach-pxa/include/mach/bitfield.h
deleted file mode 100644
index fe2ca441bc0a..000000000000
--- a/arch/arm/mach-pxa/include/mach/bitfield.h
+++ /dev/null
@@ -1,114 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * FILE bitfield.h
- *
- * Version 1.1
- * Author Copyright (c) Marc A. Viredaz, 1998
- * DEC Western Research Laboratory, Palo Alto, CA
- * Date April 1998 (April 1997)
- * System Advanced RISC Machine (ARM)
- * Language C or ARM Assembly
- * Purpose Definition of macros to operate on bit fields.
- */
-
-
-
-#ifndef __BITFIELD_H
-#define __BITFIELD_H
-
-#ifndef __ASSEMBLY__
-#define UData(Data) ((unsigned long) (Data))
-#else
-#define UData(Data) (Data)
-#endif
-
-
-/*
- * MACRO: Fld
- *
- * Purpose
- * The macro "Fld" encodes a bit field, given its size and its shift value
- * with respect to bit 0.
- *
- * Note
- * A more intuitive way to encode bit fields would have been to use their
- * mask. However, extracting size and shift value information from a bit
- * field's mask is cumbersome and might break the assembler (255-character
- * line-size limit).
- *
- * Input
- * Size Size of the bit field, in number of bits.
- * Shft Shift value of the bit field with respect to bit 0.
- *
- * Output
- * Fld Encoded bit field.
- */
-
-#define Fld(Size, Shft) (((Size) << 16) + (Shft))
-
-
-/*
- * MACROS: FSize, FShft, FMsk, FAlnMsk, F1stBit
- *
- * Purpose
- * The macros "FSize", "FShft", "FMsk", "FAlnMsk", and "F1stBit" return
- * the size, shift value, mask, aligned mask, and first bit of a
- * bit field.
- *
- * Input
- * Field Encoded bit field (using the macro "Fld").
- *
- * Output
- * FSize Size of the bit field, in number of bits.
- * FShft Shift value of the bit field with respect to bit 0.
- * FMsk Mask for the bit field.
- * FAlnMsk Mask for the bit field, aligned on bit 0.
- * F1stBit First bit of the bit field.
- */
-
-#define FSize(Field) ((Field) >> 16)
-#define FShft(Field) ((Field) & 0x0000FFFF)
-#define FMsk(Field) (((UData (1) << FSize (Field)) - 1) << FShft (Field))
-#define FAlnMsk(Field) ((UData (1) << FSize (Field)) - 1)
-#define F1stBit(Field) (UData (1) << FShft (Field))
-
-
-/*
- * MACRO: FInsrt
- *
- * Purpose
- * The macro "FInsrt" inserts a value into a bit field by shifting the
- * former appropriately.
- *
- * Input
- * Value Bit-field value.
- * Field Encoded bit field (using the macro "Fld").
- *
- * Output
- * FInsrt Bit-field value positioned appropriately.
- */
-
-#define FInsrt(Value, Field) \
- (UData (Value) << FShft (Field))
-
-
-/*
- * MACRO: FExtr
- *
- * Purpose
- * The macro "FExtr" extracts the value of a bit field by masking and
- * shifting it appropriately.
- *
- * Input
- * Data Data containing the bit-field to be extracted.
- * Field Encoded bit field (using the macro "Fld").
- *
- * Output
- * FExtr Bit-field value.
- */
-
-#define FExtr(Data, Field) \
- ((UData (Data) >> FShft (Field)) & FAlnMsk (Field))
-
-
-#endif /* __BITFIELD_H */
diff --git a/arch/arm/mach-pxa/include/mach/dma.h b/arch/arm/mach-pxa/include/mach/dma.h
deleted file mode 100644
index 79f9842a7e1c..000000000000
--- a/arch/arm/mach-pxa/include/mach/dma.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * arch/arm/mach-pxa/include/mach/dma.h
- *
- * Author: Nicolas Pitre
- * Created: Jun 15, 2001
- * Copyright: MontaVista Software, Inc.
- */
-#ifndef __ASM_ARCH_DMA_H
-#define __ASM_ARCH_DMA_H
-
-#include <mach/hardware.h>
-
-/* DMA Controller Registers Definitions */
-#define DMAC_REGS_VIRT io_p2v(0x40000000)
-
-#endif /* _ASM_ARCH_DMA_H */
diff --git a/arch/arm/mach-pxa/include/mach/generic.h b/arch/arm/mach-pxa/include/mach/generic.h
deleted file mode 100644
index 665542e0c9e2..000000000000
--- a/arch/arm/mach-pxa/include/mach/generic.h
+++ /dev/null
@@ -1 +0,0 @@
-#include "../../generic.h"
diff --git a/arch/arm/mach-pxa/include/mach/mtd-xip.h b/arch/arm/mach-pxa/include/mach/mtd-xip.h
deleted file mode 100644
index 4b31bef9e50a..000000000000
--- a/arch/arm/mach-pxa/include/mach/mtd-xip.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * MTD primitives for XIP support. Architecture specific functions
- *
- * Do not include this file directly. It's included from linux/mtd/xip.h
- *
- * Author: Nicolas Pitre
- * Created: Nov 2, 2004
- * Copyright: (C) 2004 MontaVista Software, Inc.
- */
-
-#ifndef __ARCH_PXA_MTD_XIP_H__
-#define __ARCH_PXA_MTD_XIP_H__
-
-#include <mach/regs-ost.h>
-
-/* restored July 2017, this did not build since 2011! */
-
-#define ICIP io_p2v(0x40d00000)
-#define ICMR io_p2v(0x40d00004)
-#define xip_irqpending() (readl(ICIP) & readl(ICMR))
-
-/* we sample OSCR and convert desired delta to usec (1/4 ~= 1000000/3686400) */
-#define xip_currtime() readl(OSCR)
-#define xip_elapsed_since(x) (signed)((readl(OSCR) - (x)) / 4)
-
-/*
- * xip_cpu_idle() is used when waiting for a delay equal or larger than
- * the system timer tick period. This should put the CPU into idle mode
- * to save power and to be woken up only when some interrupts are pending.
- * As above, this should not rely upon standard kernel code.
- */
-
-#define xip_cpu_idle() asm volatile ("mcr p14, 0, %0, c7, c0, 0" :: "r" (1))
-
-#endif /* __ARCH_PXA_MTD_XIP_H__ */
diff --git a/arch/arm/mach-pxa/include/mach/uncompress.h b/arch/arm/mach-pxa/include/mach/uncompress.h
deleted file mode 100644
index 1ed629e38ce6..000000000000
--- a/arch/arm/mach-pxa/include/mach/uncompress.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * arch/arm/mach-pxa/include/mach/uncompress.h
- *
- * Author: Nicolas Pitre
- * Copyright: (C) 2001 MontaVista Software Inc.
- */
-
-#include <linux/serial_reg.h>
-#include <asm/mach-types.h>
-
-#define FFUART_BASE (0x40100000)
-#define BTUART_BASE (0x40200000)
-#define STUART_BASE (0x40700000)
-
-unsigned long uart_base;
-unsigned int uart_shift;
-unsigned int uart_is_pxa;
-
-static inline unsigned char uart_read(int offset)
-{
- return *(volatile unsigned char *)(uart_base + (offset << uart_shift));
-}
-
-static inline void uart_write(unsigned char val, int offset)
-{
- *(volatile unsigned char *)(uart_base + (offset << uart_shift)) = val;
-}
-
-static inline int uart_is_enabled(void)
-{
- /* assume enabled by default for non-PXA uarts */
- return uart_is_pxa ? uart_read(UART_IER) & UART_IER_UUE : 1;
-}
-
-static inline void putc(char c)
-{
- if (!uart_is_enabled())
- return;
-
- while (!(uart_read(UART_LSR) & UART_LSR_THRE))
- barrier();
-
- uart_write(c, UART_TX);
-}
-
-/*
- * This does not append a newline
- */
-static inline void flush(void)
-{
-}
-
-static inline void arch_decomp_setup(void)
-{
- /* initialize to default */
- uart_base = FFUART_BASE;
- uart_shift = 2;
- uart_is_pxa = 1;
-
- if (machine_is_littleton() || machine_is_csb726() ||
- machine_is_cm_x300() || machine_is_balloon3())
- uart_base = STUART_BASE;
-
- if (machine_is_arcom_zeus()) {
- uart_base = 0x10000000; /* nCS4 */
- uart_shift = 1;
- uart_is_pxa = 0;
- }
-}
diff --git a/arch/arm/mach-pxa/irq.c b/arch/arm/mach-pxa/irq.c
index 74efc3ab595f..96f33ef1d9ea 100644
--- a/arch/arm/mach-pxa/irq.c
+++ b/arch/arm/mach-pxa/irq.c
@@ -17,13 +17,14 @@
#include <linux/irq.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
+#include <linux/soc/pxa/cpu.h>
#include <asm/exception.h>
-#include <mach/hardware.h>
-#include <mach/irqs.h>
+#include "irqs.h"
#include "generic.h"
+#include "pxa-regs.h"
#define ICIP (0x000)
#define ICMR (0x004)
diff --git a/arch/arm/mach-pxa/include/mach/irqs.h b/arch/arm/mach-pxa/irqs.h
index 22bf536a462d..22bf536a462d 100644
--- a/arch/arm/mach-pxa/include/mach/irqs.h
+++ b/arch/arm/mach-pxa/irqs.h
diff --git a/arch/arm/mach-pxa/littleton.c b/arch/arm/mach-pxa/littleton.c
index 73f5953b3bb6..f98dc61e87af 100644
--- a/arch/arm/mach-pxa/littleton.c
+++ b/arch/arm/mach-pxa/littleton.c
@@ -31,7 +31,6 @@
#include <asm/setup.h>
#include <asm/memory.h>
#include <asm/mach-types.h>
-#include <mach/hardware.h>
#include <asm/irq.h>
#include <asm/mach/arch.h>
diff --git a/arch/arm/mach-pxa/lpd270.c b/arch/arm/mach-pxa/lpd270.c
index 6fc40bc06910..0e4123c5fd42 100644
--- a/arch/arm/mach-pxa/lpd270.c
+++ b/arch/arm/mach-pxa/lpd270.c
@@ -28,7 +28,6 @@
#include <asm/setup.h>
#include <asm/memory.h>
#include <asm/mach-types.h>
-#include <mach/hardware.h>
#include <asm/irq.h>
#include <linux/sizes.h>
@@ -39,12 +38,13 @@
#include "pxa27x.h"
#include "lpd270.h"
-#include <mach/audio.h>
+#include "addr-map.h"
+#include <linux/platform_data/asoc-pxa.h>
#include <linux/platform_data/video-pxafb.h>
#include <linux/platform_data/mmc-pxamci.h>
#include <linux/platform_data/irda-pxaficp.h>
#include <linux/platform_data/usb-ohci-pxa27x.h>
-#include <mach/smemc.h>
+#include "smemc.h"
#include "generic.h"
#include "devices.h"
diff --git a/arch/arm/mach-pxa/lubbock.c b/arch/arm/mach-pxa/lubbock.c
index e2411971422d..4f0944f3b262 100644
--- a/arch/arm/mach-pxa/lubbock.c
+++ b/arch/arm/mach-pxa/lubbock.c
@@ -34,7 +34,6 @@
#include <asm/setup.h>
#include <asm/memory.h>
#include <asm/mach-types.h>
-#include <mach/hardware.h>
#include <asm/irq.h>
#include <linux/sizes.h>
@@ -46,14 +45,14 @@
#include <asm/hardware/sa1111.h>
#include "pxa25x.h"
-#include <mach/audio.h>
-#include <mach/lubbock.h>
+#include <linux/platform_data/asoc-pxa.h>
+#include "lubbock.h"
#include "udc.h"
#include <linux/platform_data/irda-pxaficp.h>
#include <linux/platform_data/video-pxafb.h>
#include <linux/platform_data/mmc-pxamci.h>
#include "pm.h"
-#include <mach/smemc.h>
+#include "smemc.h"
#include "generic.h"
#include "devices.h"
@@ -132,6 +131,13 @@ static struct pxa2xx_udc_mach_info udc_info __initdata = {
// no D+ pullup; lubbock can't connect/disconnect in software
};
+static struct resource lubbock_udc_resources[] = {
+ DEFINE_RES_MEM(0x40600000, 0x10000),
+ DEFINE_RES_IRQ(IRQ_USB),
+ DEFINE_RES_IRQ(LUBBOCK_USB_IRQ),
+ DEFINE_RES_IRQ(LUBBOCK_USB_DISC_IRQ),
+};
+
/* GPIOs for SA1111 PCMCIA */
static struct gpiod_lookup_table sa1111_pcmcia_gpio_table = {
.dev_id = "1800",
@@ -497,6 +503,9 @@ static void __init lubbock_init(void)
lubbock_init_pcmcia();
clk_add_alias("SA1111_CLK", NULL, "GPIO11_CLK", NULL);
+ /* lubbock has two extra IRQs */
+ pxa25x_device_udc.resource = lubbock_udc_resources;
+ pxa25x_device_udc.num_resources = ARRAY_SIZE(lubbock_udc_resources);
pxa_set_udc_info(&udc_info);
pxa_set_fb_info(NULL, &sharp_lm8v31);
pxa_set_mci_info(&lubbock_mci_platform_data);
diff --git a/arch/arm/mach-pxa/include/mach/lubbock.h b/arch/arm/mach-pxa/lubbock.h
index a3af4a2f9446..55cf91e22ae2 100644
--- a/arch/arm/mach-pxa/include/mach/lubbock.h
+++ b/arch/arm/mach-pxa/lubbock.h
@@ -1,13 +1,11 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * arch/arm/mach-pxa/include/mach/lubbock.h
- *
* Author: Nicolas Pitre
* Created: Jun 15, 2001
* Copyright: MontaVista Software Inc.
*/
-#include <mach/irqs.h>
+#include "irqs.h"
#define LUBBOCK_ETH_PHYS PXA_CS3_PHYS
diff --git a/arch/arm/mach-pxa/magician.c b/arch/arm/mach-pxa/magician.c
index 200fd35168e0..20456a55c4c5 100644
--- a/arch/arm/mach-pxa/magician.c
+++ b/arch/arm/mach-pxa/magician.c
@@ -29,13 +29,13 @@
#include <linux/regulator/machine.h>
#include <linux/platform_data/i2c-pxa.h>
-#include <mach/hardware.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/system_info.h>
#include "pxa27x.h"
-#include <mach/magician.h>
+#include "addr-map.h"
+#include "magician.h"
#include <linux/platform_data/video-pxafb.h>
#include <linux/platform_data/mmc-pxamci.h>
#include <linux/platform_data/irda-pxaficp.h>
@@ -53,6 +53,7 @@
#include <linux/spi/spi.h>
#include <linux/spi/pxa2xx_spi.h>
#include <linux/spi/ads7846.h>
+#include <sound/uda1380.h>
static unsigned long magician_pin_config[] __initdata = {
@@ -681,7 +682,7 @@ static struct platform_device bq24022 = {
static struct gpiod_lookup_table bq24022_gpiod_table = {
.dev_id = "gpio-regulator",
.table = {
- GPIO_LOOKUP("gpio-pxa", EGPIO_MAGICIAN_BQ24022_ISET2,
+ GPIO_LOOKUP("htc-egpio-0", EGPIO_MAGICIAN_BQ24022_ISET2 - MAGICIAN_EGPIO_BASE,
NULL, GPIO_ACTIVE_HIGH),
GPIO_LOOKUP("gpio-pxa", GPIO30_MAGICIAN_BQ24022_nCHARGE_EN,
"enable", GPIO_ACTIVE_LOW),
@@ -899,6 +900,53 @@ static struct platform_device strataflash = {
};
/*
+ * audio support
+ */
+static struct uda1380_platform_data uda1380_info = {
+ .gpio_power = EGPIO_MAGICIAN_CODEC_POWER,
+ .gpio_reset = EGPIO_MAGICIAN_CODEC_RESET,
+ .dac_clk = UDA1380_DAC_CLK_WSPLL,
+};
+
+static struct i2c_board_info magician_audio_i2c_board_info[] = {
+ {
+ I2C_BOARD_INFO("uda1380", 0x18),
+ .platform_data = &uda1380_info,
+ },
+};
+
+static struct gpiod_lookup_table magician_audio_gpio_table = {
+ .dev_id = "magician-audio",
+ .table = {
+ GPIO_LOOKUP("htc-egpio-0",
+ EGPIO_MAGICIAN_SPK_POWER - MAGICIAN_EGPIO_BASE,
+ "SPK_POWER", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("htc-egpio-0",
+ EGPIO_MAGICIAN_EP_POWER - MAGICIAN_EGPIO_BASE,
+ "EP_POWER", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("htc-egpio-0",
+ EGPIO_MAGICIAN_MIC_POWER - MAGICIAN_EGPIO_BASE,
+ "MIC_POWER", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("htc-egpio-0",
+ EGPIO_MAGICIAN_IN_SEL0 - MAGICIAN_EGPIO_BASE,
+ "IN_SEL0", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("htc-egpio-0",
+ EGPIO_MAGICIAN_IN_SEL1 - MAGICIAN_EGPIO_BASE,
+ "IN_SEL1", GPIO_ACTIVE_HIGH),
+ { },
+ },
+};
+
+static void magician_audio_init(void)
+{
+ i2c_register_board_info(0,
+ ARRAY_AND_SIZE(magician_audio_i2c_board_info));
+
+ gpiod_add_lookup_table(&magician_audio_gpio_table);
+ platform_device_register_simple("magician-audio", -1, NULL, 0);
+}
+
+/*
* PXA I2C main controller
*/
@@ -1048,6 +1096,8 @@ static void __init magician_init(void)
gpiod_add_lookup_table(&bq24022_gpiod_table);
gpiod_add_lookup_table(&gpio_vbus_gpiod_table);
platform_add_devices(ARRAY_AND_SIZE(devices));
+
+ magician_audio_init();
}
MACHINE_START(MAGICIAN, "HTC Magician")
diff --git a/arch/arm/mach-pxa/include/mach/magician.h b/arch/arm/mach-pxa/magician.h
index 7d3af561af6f..e1e4f9f6b22b 100644
--- a/arch/arm/mach-pxa/include/mach/magician.h
+++ b/arch/arm/mach-pxa/magician.h
@@ -9,7 +9,7 @@
#define _MAGICIAN_H_
#include <linux/gpio.h>
-#include <mach/irqs.h>
+#include "irqs.h"
/*
* PXA GPIOs
diff --git a/arch/arm/mach-pxa/mainstone.c b/arch/arm/mach-pxa/mainstone.c
index d237bd030238..fd386f1c414c 100644
--- a/arch/arm/mach-pxa/mainstone.c
+++ b/arch/arm/mach-pxa/mainstone.c
@@ -35,7 +35,6 @@
#include <asm/setup.h>
#include <asm/memory.h>
#include <asm/mach-types.h>
-#include <mach/hardware.h>
#include <asm/irq.h>
#include <linux/sizes.h>
@@ -45,14 +44,15 @@
#include <asm/mach/flash.h>
#include "pxa27x.h"
-#include <mach/mainstone.h>
-#include <mach/audio.h>
+#include "mainstone.h"
+#include <linux/platform_data/asoc-pxa.h>
#include <linux/platform_data/video-pxafb.h>
#include <linux/platform_data/mmc-pxamci.h>
#include <linux/platform_data/irda-pxaficp.h>
#include <linux/platform_data/usb-ohci-pxa27x.h>
#include <linux/platform_data/keypad-pxa27x.h>
-#include <mach/smemc.h>
+#include "addr-map.h"
+#include "smemc.h"
#include "generic.h"
#include "devices.h"
@@ -548,6 +548,14 @@ static struct gpiod_lookup_table mainstone_pcmcia_gpio_table = {
},
};
+static struct gpiod_lookup_table mainstone_wm97xx_gpio_table = {
+ .dev_id = "wm97xx-touch",
+ .table = {
+ GPIO_LOOKUP("gpio-pxa", 4, "touch", GPIO_ACTIVE_HIGH),
+ { },
+ },
+};
+
static void __init mainstone_init(void)
{
int SW7 = 0; /* FIXME: get from SCR (Mst doc section 3.2.1.1) */
@@ -562,6 +570,7 @@ static void __init mainstone_init(void)
"mst-pcmcia1", MST_PCMCIA_INPUTS, 0, NULL,
NULL, mst_pcmcia1_irqs);
gpiod_add_lookup_table(&mainstone_pcmcia_gpio_table);
+ gpiod_add_lookup_table(&mainstone_wm97xx_gpio_table);
pxa_set_ffuart_info(NULL);
pxa_set_btuart_info(NULL);
diff --git a/arch/arm/mach-pxa/include/mach/mainstone.h b/arch/arm/mach-pxa/mainstone.h
index 1698f2ffd7c7..f116c56cf5d9 100644
--- a/arch/arm/mach-pxa/include/mach/mainstone.h
+++ b/arch/arm/mach-pxa/mainstone.h
@@ -1,7 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * arch/arm/mach-pxa/include/mach/mainstone.h
- *
* Author: Nicolas Pitre
* Created: Nov 14, 2002
* Copyright: MontaVista Software Inc.
@@ -10,7 +8,7 @@
#ifndef ASM_ARCH_MAINSTONE_H
#define ASM_ARCH_MAINSTONE_H
-#include <mach/irqs.h>
+#include "irqs.h"
#define MST_ETH_PHYS PXA_CS4_PHYS
diff --git a/arch/arm/mach-pxa/mfp-pxa2xx.c b/arch/arm/mach-pxa/mfp-pxa2xx.c
index 6a5451b186c2..57b0782880de 100644
--- a/arch/arm/mach-pxa/mfp-pxa2xx.c
+++ b/arch/arm/mach-pxa/mfp-pxa2xx.c
@@ -16,8 +16,9 @@
#include <linux/init.h>
#include <linux/io.h>
#include <linux/syscore_ops.h>
+#include <linux/soc/pxa/cpu.h>
-#include <mach/pxa2xx-regs.h>
+#include "pxa2xx-regs.h"
#include "mfp-pxa2xx.h"
#include "generic.h"
diff --git a/arch/arm/mach-pxa/mfp-pxa2xx.h b/arch/arm/mach-pxa/mfp-pxa2xx.h
index 980145e7ee99..683a3ea5f154 100644
--- a/arch/arm/mach-pxa/mfp-pxa2xx.h
+++ b/arch/arm/mach-pxa/mfp-pxa2xx.h
@@ -2,7 +2,7 @@
#ifndef __ASM_ARCH_MFP_PXA2XX_H
#define __ASM_ARCH_MFP_PXA2XX_H
-#include <plat/mfp.h>
+#include <linux/soc/pxa/mfp.h>
/*
* the following MFP_xxx bit definitions in mfp.h are re-used for pxa2xx:
diff --git a/arch/arm/mach-pxa/mfp-pxa3xx.c b/arch/arm/mach-pxa/mfp-pxa3xx.c
index 56114df9700d..d16ab7451efe 100644
--- a/arch/arm/mach-pxa/mfp-pxa3xx.c
+++ b/arch/arm/mach-pxa/mfp-pxa3xx.c
@@ -16,9 +16,8 @@
#include <linux/io.h>
#include <linux/syscore_ops.h>
-#include <mach/hardware.h>
#include "mfp-pxa3xx.h"
-#include <mach/pxa3xx-regs.h>
+#include "pxa3xx-regs.h"
#ifdef CONFIG_PM
/*
diff --git a/arch/arm/mach-pxa/mfp-pxa3xx.h b/arch/arm/mach-pxa/mfp-pxa3xx.h
index cdd830926d1c..81fec4fa5a0f 100644
--- a/arch/arm/mach-pxa/mfp-pxa3xx.h
+++ b/arch/arm/mach-pxa/mfp-pxa3xx.h
@@ -2,7 +2,7 @@
#ifndef __ASM_ARCH_MFP_PXA3XX_H
#define __ASM_ARCH_MFP_PXA3XX_H
-#include <plat/mfp.h>
+#include <linux/soc/pxa/mfp.h>
#define MFPR_BASE (0x40e10000)
diff --git a/arch/arm/mach-pxa/include/mach/mfp.h b/arch/arm/mach-pxa/mfp.h
index dbb961fb570e..7e0879bd4102 100644
--- a/arch/arm/mach-pxa/include/mach/mfp.h
+++ b/arch/arm/mach-pxa/mfp.h
@@ -13,6 +13,6 @@
#ifndef __ASM_ARCH_MFP_H
#define __ASM_ARCH_MFP_H
-#include <plat/mfp.h>
+#include <linux/soc/pxa/mfp.h>
#endif /* __ASM_ARCH_MFP_H */
diff --git a/arch/arm/mach-pxa/mioa701.c b/arch/arm/mach-pxa/mioa701.c
index a79f296e81e0..d08f962ffb04 100644
--- a/arch/arm/mach-pxa/mioa701.c
+++ b/arch/arm/mach-pxa/mioa701.c
@@ -41,8 +41,8 @@
#include "udc.h"
#include "pxa27x-udc.h"
#include <linux/platform_data/media/camera-pxa.h>
-#include <mach/audio.h>
-#include <mach/smemc.h>
+#include <linux/platform_data/asoc-pxa.h>
+#include "smemc.h"
#include "mioa701.h"
diff --git a/arch/arm/mach-pxa/mxm8x10.c b/arch/arm/mach-pxa/mxm8x10.c
index fde386f6cffe..35546b59c88e 100644
--- a/arch/arm/mach-pxa/mxm8x10.c
+++ b/arch/arm/mach-pxa/mxm8x10.c
@@ -26,6 +26,7 @@
#include <linux/platform_data/video-pxafb.h>
#include <linux/platform_data/mmc-pxamci.h>
#include <linux/platform_data/usb-ohci-pxa27x.h>
+#include <linux/platform_data/asoc-pxa.h>
#include "pxa320.h"
#include "mxm8x10.h"
@@ -356,14 +357,9 @@ void __init mxm_8x10_usb_host_init(void)
pxa_set_ohci_info(&mxm_8x10_ohci_platform_data);
}
-/* AC97 Sound Support */
-static struct platform_device mxm_8x10_ac97_device = {
- .name = "pxa2xx-ac97"
-};
-
void __init mxm_8x10_ac97_init(void)
{
- platform_device_register(&mxm_8x10_ac97_device);
+ pxa_set_ac97_info(NULL);
}
/* NAND flash Support */
diff --git a/arch/arm/mach-pxa/palm27x.c b/arch/arm/mach-pxa/palm27x.c
index 6230381a7ca0..1a8d25eecac3 100644
--- a/arch/arm/mach-pxa/palm27x.c
+++ b/arch/arm/mach-pxa/palm27x.c
@@ -25,7 +25,7 @@
#include <asm/mach/map.h>
#include "pxa27x.h"
-#include <mach/audio.h>
+#include <linux/platform_data/asoc-pxa.h>
#include <linux/platform_data/mmc-pxamci.h>
#include <linux/platform_data/video-pxafb.h>
#include <linux/platform_data/irda-pxaficp.h>
diff --git a/drivers/pcmcia/pxa2xx_palmld.c b/arch/arm/mach-pxa/palmld-pcmcia.c
index cfff41ac9ca2..720294a50864 100644
--- a/drivers/pcmcia/pxa2xx_palmld.c
+++ b/arch/arm/mach-pxa/palmld-pcmcia.c
@@ -13,8 +13,9 @@
#include <linux/gpio.h>
#include <asm/mach-types.h>
-#include <mach/palmld.h>
-#include "soc_common.h"
+#include <pcmcia/soc_common.h>
+
+#include "palmld.h"
static struct gpio palmld_pcmcia_gpios[] = {
{ GPIO_NR_PALMLD_PCMCIA_POWER, GPIOF_INIT_LOW, "PCMCIA Power" },
diff --git a/arch/arm/mach-pxa/palmld.c b/arch/arm/mach-pxa/palmld.c
index 5f73716a77f0..32308c63884e 100644
--- a/arch/arm/mach-pxa/palmld.c
+++ b/arch/arm/mach-pxa/palmld.c
@@ -29,8 +29,8 @@
#include <asm/mach/map.h>
#include "pxa27x.h"
-#include <mach/audio.h>
-#include <mach/palmld.h>
+#include "palmld.h"
+#include <linux/platform_data/asoc-pxa.h>
#include <linux/platform_data/mmc-pxamci.h>
#include <linux/platform_data/video-pxafb.h>
#include <linux/platform_data/irda-pxaficp.h>
@@ -279,9 +279,15 @@ static inline void palmld_leds_init(void) {}
* HDD
******************************************************************************/
#if defined(CONFIG_PATA_PALMLD) || defined(CONFIG_PATA_PALMLD_MODULE)
+static struct resource palmld_ide_resources[] = {
+ DEFINE_RES_MEM(PALMLD_IDE_PHYS, 0x1000),
+};
+
static struct platform_device palmld_ide_device = {
- .name = "pata_palmld",
- .id = -1,
+ .name = "pata_palmld",
+ .id = -1,
+ .resource = palmld_ide_resources,
+ .num_resources = ARRAY_SIZE(palmld_ide_resources),
};
static struct gpiod_lookup_table palmld_ide_gpio_table = {
@@ -341,6 +347,14 @@ static struct gpiod_lookup_table palmld_mci_gpio_table = {
},
};
+static struct gpiod_lookup_table palmld_wm97xx_touch_gpio_table = {
+ .dev_id = "wm97xx-touch",
+ .table = {
+ GPIO_LOOKUP("gpio-pxa", 27, "touch", GPIO_ACTIVE_HIGH),
+ { },
+ },
+};
+
static void __init palmld_init(void)
{
pxa2xx_mfp_config(ARRAY_AND_SIZE(palmld_pin_config));
@@ -349,6 +363,7 @@ static void __init palmld_init(void)
pxa_set_stuart_info(NULL);
palm27x_mmc_init(&palmld_mci_gpio_table);
+ gpiod_add_lookup_table(&palmld_wm97xx_touch_gpio_table);
palm27x_pm_init(PALMLD_STR_BASE);
palm27x_lcd_init(-1, &palm_320x480_lcd_mode);
palm27x_irda_init(GPIO_NR_PALMLD_IR_DISABLE);
diff --git a/arch/arm/mach-pxa/include/mach/palmld.h b/arch/arm/mach-pxa/palmld.h
index 99a6d8b3a1e3..99a6d8b3a1e3 100644
--- a/arch/arm/mach-pxa/include/mach/palmld.h
+++ b/arch/arm/mach-pxa/palmld.h
diff --git a/arch/arm/mach-pxa/palmt5.c b/arch/arm/mach-pxa/palmt5.c
index 7c7cbb4e677e..463b62ec1b01 100644
--- a/arch/arm/mach-pxa/palmt5.c
+++ b/arch/arm/mach-pxa/palmt5.c
@@ -29,7 +29,7 @@
#include <asm/mach/map.h>
#include "pxa27x.h"
-#include <mach/audio.h>
+#include <linux/platform_data/asoc-pxa.h>
#include "palmt5.h"
#include <linux/platform_data/mmc-pxamci.h>
#include <linux/platform_data/video-pxafb.h>
@@ -190,6 +190,14 @@ static struct gpiod_lookup_table palmt5_mci_gpio_table = {
},
};
+static struct gpiod_lookup_table palmt5_wm97xx_touch_gpio_table = {
+ .dev_id = "wm97xx-touch",
+ .table = {
+ GPIO_LOOKUP("gpio-pxa", 27, "touch", GPIO_ACTIVE_HIGH),
+ { },
+ },
+};
+
static void __init palmt5_init(void)
{
pxa2xx_mfp_config(ARRAY_AND_SIZE(palmt5_pin_config));
@@ -198,6 +206,7 @@ static void __init palmt5_init(void)
pxa_set_stuart_info(NULL);
palm27x_mmc_init(&palmt5_mci_gpio_table);
+ gpiod_add_lookup_table(&palmt5_wm97xx_touch_gpio_table);
palm27x_pm_init(PALMT5_STR_BASE);
palm27x_lcd_init(-1, &palm_320x480_lcd_mode);
palm27x_udc_init(GPIO_NR_PALMT5_USB_DETECT_N,
diff --git a/arch/arm/mach-pxa/palmt5.h b/arch/arm/mach-pxa/palmt5.h
index 1fb1da7c8da3..cf84aedca717 100644
--- a/arch/arm/mach-pxa/palmt5.h
+++ b/arch/arm/mach-pxa/palmt5.h
@@ -11,7 +11,7 @@
#ifndef _INCLUDE_PALMT5_H_
#define _INCLUDE_PALMT5_H_
-#include <mach/irqs.h> /* PXA_GPIO_TO_IRQ */
+#include "irqs.h" /* PXA_GPIO_TO_IRQ */
/** HERE ARE GPIOs **/
diff --git a/drivers/pcmcia/pxa2xx_palmtc.c b/arch/arm/mach-pxa/palmtc-pcmcia.c
index 8fe05613ed04..8e3f382343fe 100644
--- a/drivers/pcmcia/pxa2xx_palmtc.c
+++ b/arch/arm/mach-pxa/palmtc-pcmcia.c
@@ -14,8 +14,8 @@
#include <linux/delay.h>
#include <asm/mach-types.h>
-#include <mach/palmtc.h>
-#include "soc_common.h"
+#include "palmtc.h"
+#include <pcmcia/soc_common.h>
static struct gpio palmtc_pcmcia_gpios[] = {
{ GPIO_NR_PALMTC_PCMCIA_POWER1, GPIOF_INIT_LOW, "PCMCIA Power 1" },
diff --git a/arch/arm/mach-pxa/palmtc.c b/arch/arm/mach-pxa/palmtc.c
index 455cb8ccaf26..3054ffa397ad 100644
--- a/arch/arm/mach-pxa/palmtc.c
+++ b/arch/arm/mach-pxa/palmtc.c
@@ -29,8 +29,8 @@
#include <asm/mach/map.h>
#include "pxa25x.h"
-#include <mach/audio.h>
-#include <mach/palmtc.h>
+#include <linux/platform_data/asoc-pxa.h>
+#include "palmtc.h"
#include <linux/platform_data/mmc-pxamci.h>
#include <linux/platform_data/video-pxafb.h>
#include <linux/platform_data/irda-pxaficp.h>
diff --git a/arch/arm/mach-pxa/include/mach/palmtc.h b/arch/arm/mach-pxa/palmtc.h
index 9257a02c46e5..9257a02c46e5 100644
--- a/arch/arm/mach-pxa/include/mach/palmtc.h
+++ b/arch/arm/mach-pxa/palmtc.h
diff --git a/arch/arm/mach-pxa/palmte2.c b/arch/arm/mach-pxa/palmte2.c
index a2b10db4aacc..fedac670a8af 100644
--- a/arch/arm/mach-pxa/palmte2.c
+++ b/arch/arm/mach-pxa/palmte2.c
@@ -29,7 +29,7 @@
#include <asm/mach/map.h>
#include "pxa25x.h"
-#include <mach/audio.h>
+#include <linux/platform_data/asoc-pxa.h>
#include "palmte2.h"
#include <linux/platform_data/mmc-pxamci.h>
#include <linux/platform_data/video-pxafb.h>
diff --git a/arch/arm/mach-pxa/palmtreo.c b/arch/arm/mach-pxa/palmtreo.c
index 2bf0f7f3ea24..238a31f32cba 100644
--- a/arch/arm/mach-pxa/palmtreo.c
+++ b/arch/arm/mach-pxa/palmtreo.c
@@ -29,7 +29,7 @@
#include "pxa27x.h"
#include "pxa27x-udc.h"
-#include <mach/audio.h>
+#include <linux/platform_data/asoc-pxa.h>
#include "palmtreo.h"
#include <linux/platform_data/mmc-pxamci.h>
#include <linux/platform_data/video-pxafb.h>
@@ -37,7 +37,7 @@
#include <linux/platform_data/keypad-pxa27x.h>
#include "udc.h"
#include <linux/platform_data/usb-ohci-pxa27x.h>
-#include <mach/pxa2xx-regs.h>
+#include "pxa2xx-regs.h"
#include <linux/platform_data/asoc-palm27x.h>
#include <linux/platform_data/media/camera-pxa.h>
#include "palm27x.h"
diff --git a/drivers/pcmcia/pxa2xx_palmtx.c b/arch/arm/mach-pxa/palmtx-pcmcia.c
index c449ca72cb87..8c2aaad93043 100644
--- a/drivers/pcmcia/pxa2xx_palmtx.c
+++ b/arch/arm/mach-pxa/palmtx-pcmcia.c
@@ -12,8 +12,8 @@
#include <linux/gpio.h>
#include <asm/mach-types.h>
-#include <mach/palmtx.h>
-#include "soc_common.h"
+#include "palmtx.h"
+#include <pcmcia/soc_common.h>
static struct gpio palmtx_pcmcia_gpios[] = {
{ GPIO_NR_PALMTX_PCMCIA_POWER1, GPIOF_INIT_LOW, "PCMCIA Power 1" },
diff --git a/arch/arm/mach-pxa/palmtx.c b/arch/arm/mach-pxa/palmtx.c
index 07332c92c9f7..c0d0762540ab 100644
--- a/arch/arm/mach-pxa/palmtx.c
+++ b/arch/arm/mach-pxa/palmtx.c
@@ -32,8 +32,8 @@
#include <asm/mach/map.h>
#include "pxa27x.h"
-#include <mach/audio.h>
-#include <mach/palmtx.h>
+#include <linux/platform_data/asoc-pxa.h>
+#include "palmtx.h"
#include <linux/platform_data/mmc-pxamci.h>
#include <linux/platform_data/video-pxafb.h>
#include <linux/platform_data/irda-pxaficp.h>
@@ -345,6 +345,14 @@ static struct gpiod_lookup_table palmtx_mci_gpio_table = {
},
};
+static struct gpiod_lookup_table palmtx_wm97xx_touch_gpio_table = {
+ .dev_id = "wm97xx-touch",
+ .table = {
+ GPIO_LOOKUP("gpio-pxa", 27, "touch", GPIO_ACTIVE_HIGH),
+ { },
+ },
+};
+
static void __init palmtx_init(void)
{
pxa2xx_mfp_config(ARRAY_AND_SIZE(palmtx_pin_config));
@@ -353,6 +361,7 @@ static void __init palmtx_init(void)
pxa_set_stuart_info(NULL);
palm27x_mmc_init(&palmtx_mci_gpio_table);
+ gpiod_add_lookup_table(&palmtx_wm97xx_touch_gpio_table);
palm27x_pm_init(PALMTX_STR_BASE);
palm27x_lcd_init(-1, &palm_320x480_lcd_mode);
palm27x_udc_init(GPIO_NR_PALMTX_USB_DETECT_N,
diff --git a/arch/arm/mach-pxa/include/mach/palmtx.h b/arch/arm/mach-pxa/palmtx.h
index ec88abf0fc6c..ec88abf0fc6c 100644
--- a/arch/arm/mach-pxa/include/mach/palmtx.h
+++ b/arch/arm/mach-pxa/palmtx.h
diff --git a/arch/arm/mach-pxa/palmz72.c b/arch/arm/mach-pxa/palmz72.c
index b4a5fe02a0af..66e8fe6f1661 100644
--- a/arch/arm/mach-pxa/palmz72.c
+++ b/arch/arm/mach-pxa/palmz72.c
@@ -34,7 +34,7 @@
#include <asm/mach/map.h>
#include "pxa27x.h"
-#include <mach/audio.h>
+#include <linux/platform_data/asoc-pxa.h>
#include "palmz72.h"
#include <linux/platform_data/mmc-pxamci.h>
#include <linux/platform_data/video-pxafb.h>
diff --git a/arch/arm/mach-pxa/pcm027.h b/arch/arm/mach-pxa/pcm027.h
index 0c4ab636ce4e..58ade4ad6ba3 100644
--- a/arch/arm/mach-pxa/pcm027.h
+++ b/arch/arm/mach-pxa/pcm027.h
@@ -10,7 +10,7 @@
* Definitions of CPU card resources only
*/
-#include <mach/irqs.h> /* PXA_GPIO_TO_IRQ */
+#include "irqs.h" /* PXA_GPIO_TO_IRQ */
/* phyCORE-PXA270 (PCM027) Interrupts */
#define PCM027_IRQ(x) (IRQ_BOARD_START + (x))
diff --git a/arch/arm/mach-pxa/pcm990-baseboard.c b/arch/arm/mach-pxa/pcm990-baseboard.c
index 8dfcc366d0fe..33a9d2eeca1c 100644
--- a/arch/arm/mach-pxa/pcm990-baseboard.c
+++ b/arch/arm/mach-pxa/pcm990-baseboard.c
@@ -26,7 +26,7 @@
#include <asm/mach/map.h>
#include "pxa27x.h"
-#include <mach/audio.h>
+#include <linux/platform_data/asoc-pxa.h>
#include <linux/platform_data/mmc-pxamci.h>
#include <linux/platform_data/usb-ohci-pxa27x.h>
#include "pcm990_baseboard.h"
diff --git a/arch/arm/mach-pxa/pcm990_baseboard.h b/arch/arm/mach-pxa/pcm990_baseboard.h
index 5be11d1b7019..18cf71decb03 100644
--- a/arch/arm/mach-pxa/pcm990_baseboard.h
+++ b/arch/arm/mach-pxa/pcm990_baseboard.h
@@ -7,7 +7,7 @@
*/
#include "pcm027.h"
-#include <mach/irqs.h> /* PXA_GPIO_TO_IRQ */
+#include "irqs.h" /* PXA_GPIO_TO_IRQ */
/*
* definitions relevant only when the PCM-990
diff --git a/arch/arm/mach-pxa/poodle.c b/arch/arm/mach-pxa/poodle.c
index 58cfa434afde..7772a39430ed 100644
--- a/arch/arm/mach-pxa/poodle.c
+++ b/arch/arm/mach-pxa/poodle.c
@@ -30,7 +30,6 @@
#include <linux/mtd/sharpsl.h>
#include <linux/memblock.h>
-#include <mach/hardware.h>
#include <asm/mach-types.h>
#include <asm/irq.h>
#include <asm/setup.h>
@@ -40,11 +39,13 @@
#include <asm/mach/irq.h>
#include "pxa25x.h"
-#include <linux/platform_data/mmc-pxamci.h>
#include "udc.h"
+#include "poodle.h"
+
+#include <linux/platform_data/mmc-pxamci.h>
#include <linux/platform_data/irda-pxaficp.h>
-#include <mach/poodle.h>
#include <linux/platform_data/video-pxafb.h>
+#include <linux/platform_data/asoc-poodle.h>
#include <asm/hardware/scoop.h>
#include <asm/hardware/locomo.h>
@@ -156,12 +157,6 @@ static struct scoop_pcmcia_config poodle_pcmcia_config = {
EXPORT_SYMBOL(poodle_scoop_device);
-
-static struct platform_device poodle_audio_device = {
- .name = "poodle-audio",
- .id = -1,
-};
-
/* LoCoMo device */
static struct resource locomo_resources[] = {
[0] = {
@@ -180,7 +175,7 @@ static struct locomo_platform_data locomo_info = {
.irq_base = IRQ_BOARD_START,
};
-struct platform_device poodle_locomo_device = {
+static struct platform_device poodle_locomo_device = {
.name = "locomo",
.id = 0,
.num_resources = ARRAY_SIZE(locomo_resources),
@@ -190,7 +185,21 @@ struct platform_device poodle_locomo_device = {
},
};
-EXPORT_SYMBOL(poodle_locomo_device);
+static struct poodle_audio_platform_data poodle_audio_pdata = {
+ .locomo_dev = &poodle_locomo_device.dev,
+
+ .gpio_amp_on = POODLE_LOCOMO_GPIO_AMP_ON,
+ .gpio_mute_l = POODLE_LOCOMO_GPIO_MUTE_L,
+ .gpio_mute_r = POODLE_LOCOMO_GPIO_MUTE_R,
+ .gpio_232vcc_on = POODLE_LOCOMO_GPIO_232VCC_ON,
+ .gpio_jk_b = POODLE_LOCOMO_GPIO_JK_B,
+};
+
+static struct platform_device poodle_audio_device = {
+ .name = "poodle-audio",
+ .id = -1,
+ .dev.platform_data = &poodle_audio_pdata,
+};
#if defined(CONFIG_SPI_PXA2XX) || defined(CONFIG_SPI_PXA2XX_MODULE)
static struct pxa2xx_spi_controller poodle_spi_info = {
diff --git a/arch/arm/mach-pxa/include/mach/poodle.h b/arch/arm/mach-pxa/poodle.h
index b56b19351a03..00798b44f204 100644
--- a/arch/arm/mach-pxa/include/mach/poodle.h
+++ b/arch/arm/mach-pxa/poodle.h
@@ -89,6 +89,4 @@
#define POODLE_NR_IRQS (IRQ_BOARD_START + 4) /* 4 for LoCoMo */
-extern struct platform_device poodle_locomo_device;
-
#endif /* __ASM_ARCH_POODLE_H */
diff --git a/arch/arm/mach-pxa/pxa-dt.c b/arch/arm/mach-pxa/pxa-dt.c
index d32d5c8e966f..5e5d543fdf46 100644
--- a/arch/arm/mach-pxa/pxa-dt.c
+++ b/arch/arm/mach-pxa/pxa-dt.c
@@ -11,7 +11,7 @@
#include <linux/of_platform.h>
#include <asm/mach/arch.h>
#include <asm/mach/time.h>
-#include <mach/irqs.h>
+#include "irqs.h"
#include "generic.h"
diff --git a/arch/arm/mach-pxa/pxa-regs.h b/arch/arm/mach-pxa/pxa-regs.h
new file mode 100644
index 000000000000..ba5120c06b8a
--- /dev/null
+++ b/arch/arm/mach-pxa/pxa-regs.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Author: Nicolas Pitre
+ * Created: Jun 15, 2001
+ * Copyright: MontaVista Software Inc.
+ */
+#ifndef __ASM_MACH_PXA_REGS_H
+#define __ASM_MACH_PXA_REGS_H
+
+/*
+ * Workarounds for at least 2 errata so far require this.
+ * The mapping is set in mach-pxa/generic.c.
+ */
+#define UNCACHED_PHYS_0 0xfe000000
+#define UNCACHED_PHYS_0_SIZE 0x00100000
+
+/*
+ * Intel PXA2xx internal register mapping:
+ *
+ * 0x40000000 - 0x41ffffff <--> 0xf2000000 - 0xf3ffffff
+ * 0x44000000 - 0x45ffffff <--> 0xf4000000 - 0xf5ffffff
+ * 0x48000000 - 0x49ffffff <--> 0xf6000000 - 0xf7ffffff
+ * 0x4c000000 - 0x4dffffff <--> 0xf8000000 - 0xf9ffffff
+ * 0x50000000 - 0x51ffffff <--> 0xfa000000 - 0xfbffffff
+ * 0x54000000 - 0x55ffffff <--> 0xfc000000 - 0xfdffffff
+ * 0x58000000 - 0x59ffffff <--> 0xfe000000 - 0xffffffff
+ *
+ * Note that not all PXA2xx chips implement all those addresses, and the
+ * kernel only maps the minimum needed range of this mapping.
+ */
+#define io_v2p(x) (0x3c000000 + ((x) & 0x01ffffff) + (((x) & 0x0e000000) << 1))
+#define io_p2v(x) IOMEM(0xf2000000 + ((x) & 0x01ffffff) + (((x) & 0x1c000000) >> 1))
+
+#ifndef __ASSEMBLY__
+# define __REG(x) (*((volatile u32 __iomem *)io_p2v(x)))
+
+/* With indexed regs we don't want to feed the index through io_p2v()
+ especially if it is a variable, otherwise horrible code will result. */
+# define __REG2(x,y) \
+ (*(volatile u32 __iomem*)((u32)&__REG(x) + (y)))
+
+# define __PREG(x) (io_v2p((u32)&(x)))
+
+#else
+
+# define __REG(x) io_p2v(x)
+# define __PREG(x) io_v2p(x)
+
+#endif
+
+
+#endif
diff --git a/arch/arm/mach-pxa/pxa25x.c b/arch/arm/mach-pxa/pxa25x.c
index 678641ab46e5..6b34d7c169ea 100644
--- a/arch/arm/mach-pxa/pxa25x.c
+++ b/arch/arm/mach-pxa/pxa25x.c
@@ -26,16 +26,16 @@
#include <linux/irq.h>
#include <linux/irqchip.h>
#include <linux/platform_data/mmp_dma.h>
+#include <linux/soc/pxa/cpu.h>
#include <asm/mach/map.h>
#include <asm/suspend.h>
-#include <mach/hardware.h>
-#include <mach/irqs.h>
+#include "irqs.h"
#include "pxa25x.h"
-#include <mach/reset.h>
+#include "reset.h"
#include "pm.h"
-#include <mach/dma.h>
-#include <mach/smemc.h>
+#include "addr-map.h"
+#include "smemc.h"
#include "generic.h"
#include "devices.h"
@@ -240,7 +240,7 @@ static int __init pxa25x_init(void)
if (cpu_is_pxa25x()) {
- reset_status = RCSR;
+ pxa_register_wdt(RCSR);
pxa25x_init_pm();
diff --git a/arch/arm/mach-pxa/pxa25x.h b/arch/arm/mach-pxa/pxa25x.h
index b58d0fbdb4db..eaaa87666324 100644
--- a/arch/arm/mach-pxa/pxa25x.h
+++ b/arch/arm/mach-pxa/pxa25x.h
@@ -2,9 +2,9 @@
#ifndef __MACH_PXA25x_H
#define __MACH_PXA25x_H
-#include <mach/hardware.h>
-#include <mach/pxa2xx-regs.h>
+#include "addr-map.h"
+#include "pxa2xx-regs.h"
#include "mfp-pxa25x.h"
-#include <mach/irqs.h>
+#include "irqs.h"
#endif /* __MACH_PXA25x_H */
diff --git a/arch/arm/mach-pxa/pxa27x-udc.h b/arch/arm/mach-pxa/pxa27x-udc.h
index faf73804697f..2d3df3b1cb68 100644
--- a/arch/arm/mach-pxa/pxa27x-udc.h
+++ b/arch/arm/mach-pxa/pxa27x-udc.h
@@ -2,6 +2,8 @@
#ifndef _ASM_ARCH_PXA27X_UDC_H
#define _ASM_ARCH_PXA27X_UDC_H
+#include "pxa-regs.h"
+
#ifdef _ASM_ARCH_PXA25X_UDC_H
#error You cannot include both PXA25x and PXA27x UDC support
#endif
diff --git a/arch/arm/mach-pxa/pxa27x.c b/arch/arm/mach-pxa/pxa27x.c
index f0ba7ed24cb6..afbf6ace954f 100644
--- a/arch/arm/mach-pxa/pxa27x.c
+++ b/arch/arm/mach-pxa/pxa27x.c
@@ -23,18 +23,18 @@
#include <linux/irq.h>
#include <linux/platform_data/i2c-pxa.h>
#include <linux/platform_data/mmp_dma.h>
+#include <linux/soc/pxa/cpu.h>
#include <asm/mach/map.h>
-#include <mach/hardware.h>
#include <asm/irq.h>
#include <asm/suspend.h>
-#include <mach/irqs.h>
+#include "irqs.h"
#include "pxa27x.h"
-#include <mach/reset.h>
+#include "reset.h"
#include <linux/platform_data/usb-ohci-pxa27x.h>
#include "pm.h"
-#include <mach/dma.h>
-#include <mach/smemc.h>
+#include "addr-map.h"
+#include "smemc.h"
#include "generic.h"
#include "devices.h"
@@ -337,7 +337,7 @@ static int __init pxa27x_init(void)
if (cpu_is_pxa27x()) {
- reset_status = RCSR;
+ pxa_register_wdt(RCSR);
pxa27x_init_pm();
diff --git a/arch/arm/mach-pxa/pxa27x.h b/arch/arm/mach-pxa/pxa27x.h
index abdc02fb4f03..ede96f3f7214 100644
--- a/arch/arm/mach-pxa/pxa27x.h
+++ b/arch/arm/mach-pxa/pxa27x.h
@@ -3,10 +3,10 @@
#define __MACH_PXA27x_H
#include <linux/suspend.h>
-#include <mach/hardware.h>
-#include <mach/pxa2xx-regs.h>
+#include "addr-map.h"
+#include "pxa2xx-regs.h"
#include "mfp-pxa27x.h"
-#include <mach/irqs.h>
+#include "irqs.h"
#define ARB_CNTRL __REG(0x48000048) /* Arbiter Control Register */
diff --git a/arch/arm/mach-pxa/include/mach/pxa2xx-regs.h b/arch/arm/mach-pxa/pxa2xx-regs.h
index fa121e135915..0b7eaf6b5813 100644
--- a/arch/arm/mach-pxa/include/mach/pxa2xx-regs.h
+++ b/arch/arm/mach-pxa/pxa2xx-regs.h
@@ -11,7 +11,7 @@
#ifndef __PXA2XX_REGS_H
#define __PXA2XX_REGS_H
-#include <mach/hardware.h>
+#include "pxa-regs.h"
/*
* Power Manager
@@ -136,51 +136,6 @@
#define CKEN io_p2v(0x41300004) /* Clock Enable Register */
#define OSCC io_p2v(0x41300008) /* Oscillator Configuration Register */
-#define CCCR_N_MASK 0x0380 /* Run Mode Frequency to Turbo Mode Frequency Multiplier */
-#define CCCR_M_MASK 0x0060 /* Memory Frequency to Run Mode Frequency Multiplier */
-#define CCCR_L_MASK 0x001f /* Crystal Frequency to Memory Frequency Multiplier */
-
-#define CCCR_CPDIS_BIT (31)
-#define CCCR_PPDIS_BIT (30)
-#define CCCR_LCD_26_BIT (27)
-#define CCCR_A_BIT (25)
-
-#define CCSR_N2_MASK CCCR_N_MASK
-#define CCSR_M_MASK CCCR_M_MASK
-#define CCSR_L_MASK CCCR_L_MASK
-#define CCSR_N2_SHIFT 7
-
-#define CKEN_AC97CONF (31) /* AC97 Controller Configuration */
-#define CKEN_CAMERA (24) /* Camera Interface Clock Enable */
-#define CKEN_SSP1 (23) /* SSP1 Unit Clock Enable */
-#define CKEN_MEMC (22) /* Memory Controller Clock Enable */
-#define CKEN_MEMSTK (21) /* Memory Stick Host Controller */
-#define CKEN_IM (20) /* Internal Memory Clock Enable */
-#define CKEN_KEYPAD (19) /* Keypad Interface Clock Enable */
-#define CKEN_USIM (18) /* USIM Unit Clock Enable */
-#define CKEN_MSL (17) /* MSL Unit Clock Enable */
-#define CKEN_LCD (16) /* LCD Unit Clock Enable */
-#define CKEN_PWRI2C (15) /* PWR I2C Unit Clock Enable */
-#define CKEN_I2C (14) /* I2C Unit Clock Enable */
-#define CKEN_FICP (13) /* FICP Unit Clock Enable */
-#define CKEN_MMC (12) /* MMC Unit Clock Enable */
-#define CKEN_USB (11) /* USB Unit Clock Enable */
-#define CKEN_ASSP (10) /* ASSP (SSP3) Clock Enable */
-#define CKEN_USBHOST (10) /* USB Host Unit Clock Enable */
-#define CKEN_OSTIMER (9) /* OS Timer Unit Clock Enable */
-#define CKEN_NSSP (9) /* NSSP (SSP2) Clock Enable */
-#define CKEN_I2S (8) /* I2S Unit Clock Enable */
-#define CKEN_BTUART (7) /* BTUART Unit Clock Enable */
-#define CKEN_FFUART (6) /* FFUART Unit Clock Enable */
-#define CKEN_STUART (5) /* STUART Unit Clock Enable */
-#define CKEN_HWUART (4) /* HWUART Unit Clock Enable */
-#define CKEN_SSP3 (4) /* SSP3 Unit Clock Enable */
-#define CKEN_SSP (3) /* SSP Unit Clock Enable */
-#define CKEN_SSP2 (3) /* SSP2 Unit Clock Enable */
-#define CKEN_AC97 (2) /* AC97 Unit Clock Enable */
-#define CKEN_PWM1 (1) /* PWM1 Clock Enable */
-#define CKEN_PWM0 (0) /* PWM0 Clock Enable */
-
#define OSCC_OON (1 << 1) /* 32.768kHz OON (write-once only bit) */
#define OSCC_OOK (1 << 0) /* 32.768kHz OOK (read-only bit) */
diff --git a/arch/arm/mach-pxa/pxa2xx.c b/arch/arm/mach-pxa/pxa2xx.c
index 2d26cd2afbf3..4aafd692c1e8 100644
--- a/arch/arm/mach-pxa/pxa2xx.c
+++ b/arch/arm/mach-pxa/pxa2xx.c
@@ -12,10 +12,12 @@
#include <linux/device.h>
#include <linux/io.h>
-#include <mach/hardware.h>
-#include <mach/pxa2xx-regs.h>
+#include "pxa2xx-regs.h"
#include "mfp-pxa25x.h"
-#include <mach/reset.h>
+#include "generic.h"
+#include "reset.h"
+#include "smemc.h"
+#include <linux/soc/pxa/smemc.h>
#include <linux/platform_data/irda-pxaficp.h>
void pxa2xx_clear_reset_status(unsigned int mask)
@@ -51,3 +53,27 @@ void pxa2xx_transceiver_mode(struct device *dev, int mode)
BUG();
}
EXPORT_SYMBOL_GPL(pxa2xx_transceiver_mode);
+
+#define MDCNFG_DRAC2(mdcnfg) (((mdcnfg) >> 21) & 0x3)
+#define MDCNFG_DRAC0(mdcnfg) (((mdcnfg) >> 5) & 0x3)
+
+int pxa2xx_smemc_get_sdram_rows(void)
+{
+ static int sdram_rows;
+ unsigned int drac2 = 0, drac0 = 0;
+ u32 mdcnfg;
+
+ if (sdram_rows)
+ return sdram_rows;
+
+ mdcnfg = readl_relaxed(MDCNFG);
+
+ if (mdcnfg & (MDCNFG_DE2 | MDCNFG_DE3))
+ drac2 = MDCNFG_DRAC2(mdcnfg);
+
+ if (mdcnfg & (MDCNFG_DE0 | MDCNFG_DE1))
+ drac0 = MDCNFG_DRAC0(mdcnfg);
+
+ sdram_rows = 1 << (11 + max(drac0, drac2));
+ return sdram_rows;
+}
diff --git a/arch/arm/mach-pxa/pxa300.c b/arch/arm/mach-pxa/pxa300.c
index 7f2f5a6a2263..f77ec118d5b9 100644
--- a/arch/arm/mach-pxa/pxa300.c
+++ b/arch/arm/mach-pxa/pxa300.c
@@ -14,6 +14,7 @@
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/io.h>
+#include <linux/soc/pxa/cpu.h>
#include "pxa300.h"
diff --git a/arch/arm/mach-pxa/pxa320.c b/arch/arm/mach-pxa/pxa320.c
index 78abcc741df7..e372e6c118de 100644
--- a/arch/arm/mach-pxa/pxa320.c
+++ b/arch/arm/mach-pxa/pxa320.c
@@ -14,6 +14,7 @@
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/io.h>
+#include <linux/soc/pxa/cpu.h>
#include "pxa320.h"
diff --git a/arch/arm/mach-pxa/include/mach/pxa3xx-regs.h b/arch/arm/mach-pxa/pxa3xx-regs.h
index 070f6c74196e..4b11cf81a9e6 100644
--- a/arch/arm/mach-pxa/include/mach/pxa3xx-regs.h
+++ b/arch/arm/mach-pxa/pxa3xx-regs.h
@@ -10,7 +10,7 @@
#ifndef __ASM_ARCH_PXA3XX_REGS_H
#define __ASM_ARCH_PXA3XX_REGS_H
-#include <mach/hardware.h>
+#include "pxa-regs.h"
/*
* Oscillator Configuration Register (OSCC)
@@ -131,73 +131,4 @@
#define CKENC __REG(0x41340024) /* C Clock Enable Register */
#define AC97_DIV __REG(0x41340014) /* AC97 clock divisor value register */
-#define ACCR_XPDIS (1 << 31) /* Core PLL Output Disable */
-#define ACCR_SPDIS (1 << 30) /* System PLL Output Disable */
-#define ACCR_D0CS (1 << 26) /* D0 Mode Clock Select */
-#define ACCR_PCCE (1 << 11) /* Power Mode Change Clock Enable */
-#define ACCR_DDR_D0CS (1 << 7) /* DDR SDRAM clock frequency in D0CS (PXA31x only) */
-
-#define ACCR_SMCFS_MASK (0x7 << 23) /* Static Memory Controller Frequency Select */
-#define ACCR_SFLFS_MASK (0x3 << 18) /* Frequency Select for Internal Memory Controller */
-#define ACCR_XSPCLK_MASK (0x3 << 16) /* Core Frequency during Frequency Change */
-#define ACCR_HSS_MASK (0x3 << 14) /* System Bus-Clock Frequency Select */
-#define ACCR_DMCFS_MASK (0x3 << 12) /* Dynamic Memory Controller Clock Frequency Select */
-#define ACCR_XN_MASK (0x7 << 8) /* Core PLL Turbo-Mode-to-Run-Mode Ratio */
-#define ACCR_XL_MASK (0x1f) /* Core PLL Run-Mode-to-Oscillator Ratio */
-
-#define ACCR_SMCFS(x) (((x) & 0x7) << 23)
-#define ACCR_SFLFS(x) (((x) & 0x3) << 18)
-#define ACCR_XSPCLK(x) (((x) & 0x3) << 16)
-#define ACCR_HSS(x) (((x) & 0x3) << 14)
-#define ACCR_DMCFS(x) (((x) & 0x3) << 12)
-#define ACCR_XN(x) (((x) & 0x7) << 8)
-#define ACCR_XL(x) ((x) & 0x1f)
-
-/*
- * Clock Enable Bit
- */
-#define CKEN_LCD 1 /* < LCD Clock Enable */
-#define CKEN_USBH 2 /* < USB host clock enable */
-#define CKEN_CAMERA 3 /* < Camera interface clock enable */
-#define CKEN_NAND 4 /* < NAND Flash Controller Clock Enable */
-#define CKEN_USB2 6 /* < USB 2.0 client clock enable. */
-#define CKEN_DMC 8 /* < Dynamic Memory Controller clock enable */
-#define CKEN_SMC 9 /* < Static Memory Controller clock enable */
-#define CKEN_ISC 10 /* < Internal SRAM Controller clock enable */
-#define CKEN_BOOT 11 /* < Boot rom clock enable */
-#define CKEN_MMC1 12 /* < MMC1 Clock enable */
-#define CKEN_MMC2 13 /* < MMC2 clock enable */
-#define CKEN_KEYPAD 14 /* < Keypand Controller Clock Enable */
-#define CKEN_CIR 15 /* < Consumer IR Clock Enable */
-#define CKEN_USIM0 17 /* < USIM[0] Clock Enable */
-#define CKEN_USIM1 18 /* < USIM[1] Clock Enable */
-#define CKEN_TPM 19 /* < TPM clock enable */
-#define CKEN_UDC 20 /* < UDC clock enable */
-#define CKEN_BTUART 21 /* < BTUART clock enable */
-#define CKEN_FFUART 22 /* < FFUART clock enable */
-#define CKEN_STUART 23 /* < STUART clock enable */
-#define CKEN_AC97 24 /* < AC97 clock enable */
-#define CKEN_TOUCH 25 /* < Touch screen Interface Clock Enable */
-#define CKEN_SSP1 26 /* < SSP1 clock enable */
-#define CKEN_SSP2 27 /* < SSP2 clock enable */
-#define CKEN_SSP3 28 /* < SSP3 clock enable */
-#define CKEN_SSP4 29 /* < SSP4 clock enable */
-#define CKEN_MSL0 30 /* < MSL0 clock enable */
-#define CKEN_PWM0 32 /* < PWM[0] clock enable */
-#define CKEN_PWM1 33 /* < PWM[1] clock enable */
-#define CKEN_I2C 36 /* < I2C clock enable */
-#define CKEN_INTC 38 /* < Interrupt controller clock enable */
-#define CKEN_GPIO 39 /* < GPIO clock enable */
-#define CKEN_1WIRE 40 /* < 1-wire clock enable */
-#define CKEN_HSIO2 41 /* < HSIO2 clock enable */
-#define CKEN_MINI_IM 48 /* < Mini-IM */
-#define CKEN_MINI_LCD 49 /* < Mini LCD */
-
-#define CKEN_MMC3 5 /* < MMC3 Clock Enable */
-#define CKEN_MVED 43 /* < MVED clock enable */
-
-/* Note: GCU clock enable bit differs on PXA300/PXA310 and PXA320 */
-#define CKEN_PXA300_GCU 42 /* Graphics controller clock enable */
-#define CKEN_PXA320_GCU 7 /* Graphics controller clock enable */
-
#endif /* __ASM_ARCH_PXA3XX_REGS_H */
diff --git a/arch/arm/mach-pxa/pxa3xx-ulpi.c b/arch/arm/mach-pxa/pxa3xx-ulpi.c
index 4bd7da1f8657..c29a7f0fa1b0 100644
--- a/arch/arm/mach-pxa/pxa3xx-ulpi.c
+++ b/arch/arm/mach-pxa/pxa3xx-ulpi.c
@@ -21,8 +21,8 @@
#include <linux/clk.h>
#include <linux/usb.h>
#include <linux/usb/otg.h>
+#include <linux/soc/pxa/cpu.h>
-#include <mach/hardware.h>
#include "regs-u2d.h"
#include <linux/platform_data/usb-pxa3xx-ulpi.h>
diff --git a/arch/arm/mach-pxa/pxa3xx.c b/arch/arm/mach-pxa/pxa3xx.c
index 560160682df6..979642aa7ffe 100644
--- a/arch/arm/mach-pxa/pxa3xx.c
+++ b/arch/arm/mach-pxa/pxa3xx.c
@@ -24,17 +24,18 @@
#include <linux/syscore_ops.h>
#include <linux/platform_data/i2c-pxa.h>
#include <linux/platform_data/mmp_dma.h>
+#include <linux/soc/pxa/cpu.h>
+#include <linux/clk/pxa.h>
#include <asm/mach/map.h>
#include <asm/suspend.h>
-#include <mach/hardware.h>
-#include <mach/pxa3xx-regs.h>
-#include <mach/reset.h>
+#include "pxa3xx-regs.h"
+#include "reset.h"
#include <linux/platform_data/usb-ohci-pxa27x.h>
#include "pm.h"
-#include <mach/dma.h>
-#include <mach/smemc.h>
-#include <mach/irqs.h>
+#include "addr-map.h"
+#include "smemc.h"
+#include "irqs.h"
#include "generic.h"
#include "devices.h"
@@ -51,6 +52,10 @@ extern void __init pxa_dt_irq_init(int (*fn)(struct irq_data *, unsigned int));
#define NDCR_ND_ARB_EN (1 << 12)
#define NDCR_ND_ARB_CNTL (1 << 19)
+#define CKEN_BOOT 11 /* < Boot rom clock enable */
+#define CKEN_TPM 19 /* < TPM clock enable */
+#define CKEN_HSIO2 41 /* < HSIO2 clock enable */
+
#ifdef CONFIG_PM
#define ISRAM_START 0x5c000000
@@ -463,7 +468,7 @@ static int __init pxa3xx_init(void)
if (cpu_is_pxa3xx()) {
- reset_status = ARSR;
+ pxa_register_wdt(ARSR);
/*
* clear RDH bit every time after reset
diff --git a/arch/arm/mach-pxa/pxa3xx.h b/arch/arm/mach-pxa/pxa3xx.h
index 6d4502aa9d06..81825f7ad258 100644
--- a/arch/arm/mach-pxa/pxa3xx.h
+++ b/arch/arm/mach-pxa/pxa3xx.h
@@ -2,8 +2,8 @@
#ifndef __MACH_PXA3XX_H
#define __MACH_PXA3XX_H
-#include <mach/hardware.h>
-#include <mach/pxa3xx-regs.h>
-#include <mach/irqs.h>
+#include "addr-map.h"
+#include "pxa3xx-regs.h"
+#include "irqs.h"
#endif /* __MACH_PXA3XX_H */
diff --git a/arch/arm/mach-pxa/pxa930.c b/arch/arm/mach-pxa/pxa930.c
index bf91de4267e5..b9021a40cbd1 100644
--- a/arch/arm/mach-pxa/pxa930.c
+++ b/arch/arm/mach-pxa/pxa930.c
@@ -13,6 +13,7 @@
#include <linux/irq.h>
#include <linux/gpio-pxa.h>
#include <linux/platform_device.h>
+#include <linux/soc/pxa/cpu.h>
#include "pxa930.h"
diff --git a/arch/arm/mach-pxa/include/mach/regs-ost.h b/arch/arm/mach-pxa/regs-ost.h
index deb564ed8ee7..c8001cfc8d6b 100644
--- a/arch/arm/mach-pxa/include/mach/regs-ost.h
+++ b/arch/arm/mach-pxa/regs-ost.h
@@ -2,11 +2,13 @@
#ifndef __ASM_MACH_REGS_OST_H
#define __ASM_MACH_REGS_OST_H
-#include <mach/hardware.h>
+#include "pxa-regs.h"
/*
* OS Timer & Match Registers
*/
+#define OST_PHYS 0x40A00000
+#define OST_LEN 0x00000020
#define OSMR0 io_p2v(0x40A00000) /* */
#define OSMR1 io_p2v(0x40A00004) /* */
diff --git a/arch/arm/mach-pxa/regs-rtc.h b/arch/arm/mach-pxa/regs-rtc.h
index b1f9ff14e335..96255a0f595e 100644
--- a/arch/arm/mach-pxa/regs-rtc.h
+++ b/arch/arm/mach-pxa/regs-rtc.h
@@ -2,7 +2,7 @@
#ifndef __ASM_MACH_REGS_RTC_H
#define __ASM_MACH_REGS_RTC_H
-#include <mach/hardware.h>
+#include "pxa-regs.h"
/*
* Real Time Clock
diff --git a/arch/arm/mach-pxa/regs-u2d.h b/arch/arm/mach-pxa/regs-u2d.h
index fe4c80ad87ec..ab517ba62c9a 100644
--- a/arch/arm/mach-pxa/regs-u2d.h
+++ b/arch/arm/mach-pxa/regs-u2d.h
@@ -2,8 +2,6 @@
#ifndef __ASM_ARCH_PXA3xx_U2D_H
#define __ASM_ARCH_PXA3xx_U2D_H
-#include <mach/bitfield.h>
-
/*
* USB2 device controller registers and bits definitions
*/
diff --git a/arch/arm/mach-pxa/include/mach/regs-uart.h b/arch/arm/mach-pxa/regs-uart.h
index 9a168f83afeb..490e9ca16297 100644
--- a/arch/arm/mach-pxa/include/mach/regs-uart.h
+++ b/arch/arm/mach-pxa/regs-uart.h
@@ -2,6 +2,8 @@
#ifndef __ASM_ARCH_REGS_UART_H
#define __ASM_ARCH_REGS_UART_H
+#include "pxa-regs.h"
+
/*
* UARTs
*/
diff --git a/arch/arm/mach-pxa/reset.c b/arch/arm/mach-pxa/reset.c
index af78405aa4e9..f0be90573ad3 100644
--- a/arch/arm/mach-pxa/reset.c
+++ b/arch/arm/mach-pxa/reset.c
@@ -7,12 +7,9 @@
#include <asm/proc-fns.h>
#include <asm/system_misc.h>
-#include <mach/regs-ost.h>
-#include <mach/reset.h>
-#include <mach/smemc.h>
-
-unsigned int reset_status;
-EXPORT_SYMBOL(reset_status);
+#include "regs-ost.h"
+#include "reset.h"
+#include "smemc.h"
static void do_hw_reset(void);
diff --git a/arch/arm/mach-pxa/include/mach/reset.h b/arch/arm/mach-pxa/reset.h
index e1c4d100fd45..963dd190bc13 100644
--- a/arch/arm/mach-pxa/include/mach/reset.h
+++ b/arch/arm/mach-pxa/reset.h
@@ -8,8 +8,8 @@
#define RESET_STATUS_GPIO (1 << 3) /* GPIO Reset */
#define RESET_STATUS_ALL (0xf)
-extern unsigned int reset_status;
extern void clear_reset_status(unsigned int mask);
+extern void pxa_register_wdt(unsigned int reset_status);
/**
* init_gpio_reset() - register GPIO as reset generator
diff --git a/arch/arm/mach-pxa/sharpsl_pm.c b/arch/arm/mach-pxa/sharpsl_pm.c
index 83cfbb882a2d..a829baf8d922 100644
--- a/arch/arm/mach-pxa/sharpsl_pm.c
+++ b/arch/arm/mach-pxa/sharpsl_pm.c
@@ -24,7 +24,7 @@
#include <asm/mach-types.h>
#include "pm.h"
-#include <mach/pxa2xx-regs.h>
+#include "pxa2xx-regs.h"
#include "regs-rtc.h"
#include "sharpsl_pm.h"
diff --git a/arch/arm/mach-pxa/sleep.S b/arch/arm/mach-pxa/sleep.S
index 6c5b3ffd2cd3..d58cf52e3848 100644
--- a/arch/arm/mach-pxa/sleep.S
+++ b/arch/arm/mach-pxa/sleep.S
@@ -13,13 +13,14 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
-#include <mach/hardware.h>
-#include <mach/smemc.h>
-#include <mach/pxa2xx-regs.h>
+#include "smemc.h"
+#include "pxa2xx-regs.h"
#define MDREFR_KDIV 0x200a4000 // all banks
#define CCCR_SLEEP 0x00000107 // L=7 2N=2 A=0 PPDIS=0 CPDIS=0
-
+#define CCCR_N_MASK 0x00000380
+#define CCCR_M_MASK 0x00000060
+#define CCCR_L_MASK 0x0000001f
.text
#ifdef CONFIG_PXA3xx
diff --git a/arch/arm/mach-pxa/smemc.c b/arch/arm/mach-pxa/smemc.c
index 32e82cc92ea5..2d2a321d82f8 100644
--- a/arch/arm/mach-pxa/smemc.c
+++ b/arch/arm/mach-pxa/smemc.c
@@ -8,9 +8,10 @@
#include <linux/init.h>
#include <linux/io.h>
#include <linux/syscore_ops.h>
+#include <linux/soc/pxa/cpu.h>
-#include <mach/hardware.h>
-#include <mach/smemc.h>
+#include "smemc.h"
+#include <linux/soc/pxa/smemc.h>
#ifdef CONFIG_PM
static unsigned long msc[2];
@@ -70,3 +71,11 @@ static int __init smemc_init(void)
}
subsys_initcall(smemc_init);
#endif
+
+static const unsigned int df_clkdiv[4] = { 1, 2, 4, 1 };
+unsigned int pxa3xx_smemc_get_memclkdiv(void)
+{
+ unsigned long memclkcfg = __raw_readl(MEMCLKCFG);
+
+ return df_clkdiv[(memclkcfg >> 16) & 0x3];
+}
diff --git a/arch/arm/mach-pxa/include/mach/smemc.h b/arch/arm/mach-pxa/smemc.h
index 9b2453a7ab23..9b2453a7ab23 100644
--- a/arch/arm/mach-pxa/include/mach/smemc.h
+++ b/arch/arm/mach-pxa/smemc.h
diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c
index a648e7094e84..dd88953adc9d 100644
--- a/arch/arm/mach-pxa/spitz.c
+++ b/arch/arm/mach-pxa/spitz.c
@@ -39,14 +39,14 @@
#include "pxa27x.h"
#include "pxa27x-udc.h"
-#include <mach/reset.h>
+#include "reset.h"
#include <linux/platform_data/irda-pxaficp.h>
#include <linux/platform_data/mmc-pxamci.h>
#include <linux/platform_data/usb-ohci-pxa27x.h>
#include <linux/platform_data/video-pxafb.h>
-#include <mach/spitz.h>
+#include "spitz.h"
#include "sharpsl_pm.h"
-#include <mach/smemc.h>
+#include "smemc.h"
#include "generic.h"
#include "devices.h"
@@ -962,11 +962,42 @@ static void __init spitz_i2c_init(void)
static inline void spitz_i2c_init(void) {}
#endif
+static struct gpiod_lookup_table spitz_audio_gpio_table = {
+ .dev_id = "spitz-audio",
+ .table = {
+ GPIO_LOOKUP("sharp-scoop.0", SPITZ_GPIO_MUTE_L - SPITZ_SCP_GPIO_BASE,
+ "mute-l", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("sharp-scoop.0", SPITZ_GPIO_MUTE_R - SPITZ_SCP_GPIO_BASE,
+ "mute-r", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("sharp-scoop.1", SPITZ_GPIO_MIC_BIAS - SPITZ_SCP2_GPIO_BASE,
+ "mic", GPIO_ACTIVE_HIGH),
+ { },
+ },
+};
+
+static struct gpiod_lookup_table akita_audio_gpio_table = {
+ .dev_id = "spitz-audio",
+ .table = {
+ GPIO_LOOKUP("sharp-scoop.0", SPITZ_GPIO_MUTE_L - SPITZ_SCP_GPIO_BASE,
+ "mute-l", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("sharp-scoop.0", SPITZ_GPIO_MUTE_R - SPITZ_SCP_GPIO_BASE,
+ "mute-r", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("i2c-max7310", AKITA_GPIO_MIC_BIAS - AKITA_IOEXP_GPIO_BASE,
+ "mic", GPIO_ACTIVE_HIGH),
+ { },
+ },
+};
+
/******************************************************************************
* Audio devices
******************************************************************************/
static inline void spitz_audio_init(void)
{
+ if (machine_is_akita())
+ gpiod_add_lookup_table(&akita_audio_gpio_table);
+ else
+ gpiod_add_lookup_table(&spitz_audio_gpio_table);
+
platform_device_register_simple("spitz-audio", -1, NULL, 0);
}
diff --git a/arch/arm/mach-pxa/include/mach/spitz.h b/arch/arm/mach-pxa/spitz.h
index 04828d8918aa..04828d8918aa 100644
--- a/arch/arm/mach-pxa/include/mach/spitz.h
+++ b/arch/arm/mach-pxa/spitz.h
diff --git a/arch/arm/mach-pxa/spitz_pm.c b/arch/arm/mach-pxa/spitz_pm.c
index 25a1f8c5a738..6689b67f9ce5 100644
--- a/arch/arm/mach-pxa/spitz_pm.c
+++ b/arch/arm/mach-pxa/spitz_pm.c
@@ -18,9 +18,8 @@
#include <asm/irq.h>
#include <asm/mach-types.h>
-#include <mach/hardware.h>
-#include <mach/spitz.h>
+#include "spitz.h"
#include "pxa27x.h"
#include "sharpsl_pm.h"
diff --git a/arch/arm/mach-pxa/standby.S b/arch/arm/mach-pxa/standby.S
index eab1645bb4ad..938310b708a0 100644
--- a/arch/arm/mach-pxa/standby.S
+++ b/arch/arm/mach-pxa/standby.S
@@ -11,9 +11,8 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
-#include <mach/hardware.h>
-#include <mach/pxa2xx-regs.h>
+#include "pxa2xx-regs.h"
.text
diff --git a/arch/arm/mach-pxa/tosa.c b/arch/arm/mach-pxa/tosa.c
index 431709725d02..6af8bc404825 100644
--- a/arch/arm/mach-pxa/tosa.c
+++ b/arch/arm/mach-pxa/tosa.c
@@ -40,16 +40,16 @@
#include <asm/mach-types.h>
#include "pxa25x.h"
-#include <mach/reset.h>
+#include "reset.h"
#include <linux/platform_data/irda-pxaficp.h>
#include <linux/platform_data/mmc-pxamci.h>
#include "udc.h"
#include "tosa_bt.h"
-#include <mach/audio.h>
-#include <mach/smemc.h>
+#include <linux/platform_data/asoc-pxa.h>
+#include "smemc.h"
#include <asm/mach/arch.h>
-#include <mach/tosa.h>
+#include "tosa.h"
#include <asm/hardware/scoop.h>
#include <asm/mach/sharpsl_param.h>
@@ -296,9 +296,9 @@ static struct gpiod_lookup_table tosa_mci_gpio_table = {
.table = {
GPIO_LOOKUP("gpio-pxa", TOSA_GPIO_nSD_DETECT,
"cd", GPIO_ACTIVE_LOW),
- GPIO_LOOKUP("gpio-pxa", TOSA_GPIO_SD_WP,
+ GPIO_LOOKUP("sharp-scoop.0", TOSA_GPIO_SD_WP - TOSA_SCOOP_GPIO_BASE,
"wp", GPIO_ACTIVE_LOW),
- GPIO_LOOKUP("gpio-pxa", TOSA_GPIO_PWR_ON,
+ GPIO_LOOKUP("sharp-scoop.0", TOSA_GPIO_PWR_ON - TOSA_SCOOP_GPIO_BASE,
"power", GPIO_ACTIVE_HIGH),
{ },
},
@@ -616,6 +616,22 @@ static struct resource tc6393xb_resources[] = {
},
};
+static struct gpiod_lookup_table tosa_battery_gpio_table = {
+ .dev_id = "wm97xx-battery",
+ .table = {
+ GPIO_LOOKUP("gpio-pxa", TOSA_GPIO_BAT0_CRG,
+ "main battery full", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("gpio-pxa", TOSA_GPIO_BAT1_CRG,
+ "jacket battery full", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("gpio-pxa", TOSA_GPIO_BAT0_LOW,
+ "main battery low", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("gpio-pxa", TOSA_GPIO_BAT1_LOW,
+ "jacket battery low", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("gpio-pxa", TOSA_GPIO_JACKET_DETECT,
+ "jacket detect", GPIO_ACTIVE_HIGH),
+ { },
+ },
+};
static int tosa_tc6393xb_enable(struct platform_device *dev)
{
@@ -709,31 +725,6 @@ static struct tmio_nand_data tosa_tc6393xb_nand_config = {
.part_parsers = probes,
};
-static int tosa_tc6393xb_setup(struct platform_device *dev)
-{
- int rc;
-
- rc = gpio_request(TOSA_GPIO_CARD_VCC_ON, "CARD_VCC_ON");
- if (rc)
- goto err_req;
-
- rc = gpio_direction_output(TOSA_GPIO_CARD_VCC_ON, 1);
- if (rc)
- goto err_dir;
-
- return rc;
-
-err_dir:
- gpio_free(TOSA_GPIO_CARD_VCC_ON);
-err_req:
- return rc;
-}
-
-static void tosa_tc6393xb_teardown(struct platform_device *dev)
-{
- gpio_free(TOSA_GPIO_CARD_VCC_ON);
-}
-
#ifdef CONFIG_MFD_TC6393XB
static struct fb_videomode tosa_tc6393xb_lcd_mode[] = {
{
@@ -778,9 +769,6 @@ static struct tc6393xb_platform_data tosa_tc6393xb_data = {
.scr_gper = 0x3300,
.irq_base = IRQ_BOARD_START,
- .gpio_base = TOSA_TC6393XB_GPIO_BASE,
- .setup = tosa_tc6393xb_setup,
- .teardown = tosa_tc6393xb_teardown,
.enable = tosa_tc6393xb_enable,
.disable = tosa_tc6393xb_disable,
@@ -821,26 +809,6 @@ static struct pxa2xx_spi_controller pxa_ssp_master_info = {
.num_chipselect = 1,
};
-static struct gpiod_lookup_table tosa_lcd_gpio_table = {
- .dev_id = "spi2.0",
- .table = {
- GPIO_LOOKUP("tc6393xb",
- TOSA_GPIO_TG_ON - TOSA_TC6393XB_GPIO_BASE,
- "tg #pwr", GPIO_ACTIVE_HIGH),
- { },
- },
-};
-
-static struct gpiod_lookup_table tosa_lcd_bl_gpio_table = {
- .dev_id = "i2c-tosa-bl",
- .table = {
- GPIO_LOOKUP("tc6393xb",
- TOSA_GPIO_BL_C20MA - TOSA_TC6393XB_GPIO_BASE,
- "backlight", GPIO_ACTIVE_HIGH),
- { },
- },
-};
-
static struct spi_board_info spi_board_info[] __initdata = {
{
.modalias = "tosa-lcd",
@@ -943,6 +911,8 @@ static void __init tosa_init(void)
/* enable batt_fault */
PMCR = 0x01;
+ gpiod_add_lookup_table(&tosa_battery_gpio_table);
+
gpiod_add_lookup_table(&tosa_mci_gpio_table);
pxa_set_mci_info(&tosa_mci_platform_data);
pxa_set_ficp_info(&tosa_ficp_platform_data);
@@ -951,8 +921,6 @@ static void __init tosa_init(void)
platform_scoop_config = &tosa_pcmcia_config;
pxa2xx_set_spi_info(2, &pxa_ssp_master_info);
- gpiod_add_lookup_table(&tosa_lcd_gpio_table);
- gpiod_add_lookup_table(&tosa_lcd_bl_gpio_table);
spi_register_board_info(spi_board_info, ARRAY_SIZE(spi_board_info));
clk_add_alias("CLK_CK3P6MI", tc6393xb_device.name, "GPIO11_CLK", NULL);
diff --git a/arch/arm/mach-pxa/include/mach/tosa.h b/arch/arm/mach-pxa/tosa.h
index 8bfaca3a8b64..3b3efa0a0e22 100644
--- a/arch/arm/mach-pxa/include/mach/tosa.h
+++ b/arch/arm/mach-pxa/tosa.h
@@ -55,24 +55,6 @@
#define TOSA_SCOOP_JC_IO_DIR (TOSA_SCOOP_JC_CARD_LIMIT_SEL)
/*
- * TC6393XB GPIOs
- */
-#define TOSA_TC6393XB_GPIO_BASE (PXA_NR_BUILTIN_GPIO + 2 * 12)
-
-#define TOSA_GPIO_TG_ON (TOSA_TC6393XB_GPIO_BASE + 0)
-#define TOSA_GPIO_L_MUTE (TOSA_TC6393XB_GPIO_BASE + 1)
-#define TOSA_GPIO_BL_C20MA (TOSA_TC6393XB_GPIO_BASE + 3)
-#define TOSA_GPIO_CARD_VCC_ON (TOSA_TC6393XB_GPIO_BASE + 4)
-#define TOSA_GPIO_CHARGE_OFF (TOSA_TC6393XB_GPIO_BASE + 6)
-#define TOSA_GPIO_CHARGE_OFF_JC (TOSA_TC6393XB_GPIO_BASE + 7)
-#define TOSA_GPIO_BAT0_V_ON (TOSA_TC6393XB_GPIO_BASE + 9)
-#define TOSA_GPIO_BAT1_V_ON (TOSA_TC6393XB_GPIO_BASE + 10)
-#define TOSA_GPIO_BU_CHRG_ON (TOSA_TC6393XB_GPIO_BASE + 11)
-#define TOSA_GPIO_BAT_SW_ON (TOSA_TC6393XB_GPIO_BASE + 12)
-#define TOSA_GPIO_BAT0_TH_ON (TOSA_TC6393XB_GPIO_BASE + 14)
-#define TOSA_GPIO_BAT1_TH_ON (TOSA_TC6393XB_GPIO_BASE + 15)
-
-/*
* PXA GPIOs
*/
#define TOSA_GPIO_POWERON (0)
diff --git a/drivers/pcmcia/pxa2xx_trizeps4.c b/arch/arm/mach-pxa/trizeps4-pcmcia.c
index 6db8fe880ed4..25e363770565 100644
--- a/drivers/pcmcia/pxa2xx_trizeps4.c
+++ b/arch/arm/mach-pxa/trizeps4-pcmcia.c
@@ -19,10 +19,10 @@
#include <asm/mach-types.h>
#include <asm/irq.h>
-#include <mach/pxa2xx-regs.h>
-#include <mach/trizeps4.h>
+#include "pxa2xx-regs.h"
+#include "trizeps4.h"
-#include "soc_common.h"
+#include <pcmcia/soc_common.h>
extern void board_pcmcia_power(int power);
diff --git a/arch/arm/mach-pxa/trizeps4.c b/arch/arm/mach-pxa/trizeps4.c
index f76f8be09554..716cce885379 100644
--- a/arch/arm/mach-pxa/trizeps4.c
+++ b/arch/arm/mach-pxa/trizeps4.c
@@ -40,13 +40,13 @@
#include <asm/mach/flash.h>
#include "pxa27x.h"
-#include <mach/trizeps4.h>
-#include <mach/audio.h>
+#include "trizeps4.h"
+#include <linux/platform_data/asoc-pxa.h>
#include <linux/platform_data/video-pxafb.h>
#include <linux/platform_data/mmc-pxamci.h>
#include <linux/platform_data/irda-pxaficp.h>
#include <linux/platform_data/usb-ohci-pxa27x.h>
-#include <mach/smemc.h>
+#include "smemc.h"
#include "generic.h"
#include "devices.h"
diff --git a/arch/arm/mach-pxa/include/mach/trizeps4.h b/arch/arm/mach-pxa/trizeps4.h
index 3cddb1428c5e..b6c19d155ef9 100644
--- a/arch/arm/mach-pxa/include/mach/trizeps4.h
+++ b/arch/arm/mach-pxa/trizeps4.h
@@ -11,6 +11,7 @@
#ifndef _TRIPEPS4_H_
#define _TRIPEPS4_H_
+#include "addr-map.h"
#include "irqs.h" /* PXA_GPIO_TO_IRQ */
/* physical memory regions */
diff --git a/drivers/pcmcia/pxa2xx_viper.c b/arch/arm/mach-pxa/viper-pcmcia.c
index 7ac6647d286e..26599dcc49b3 100644
--- a/drivers/pcmcia/pxa2xx_viper.c
+++ b/arch/arm/mach-pxa/viper-pcmcia.c
@@ -22,13 +22,11 @@
#include <linux/gpio.h>
#include <pcmcia/ss.h>
+#include <pcmcia/soc_common.h>
#include <asm/irq.h>
-#include <linux/platform_data/pcmcia-pxa2xx_viper.h>
-
-#include "soc_common.h"
-#include "pxa2xx_base.h"
+#include "viper-pcmcia.h"
static struct platform_device *arcom_pcmcia_dev;
diff --git a/include/linux/platform_data/pcmcia-pxa2xx_viper.h b/arch/arm/mach-pxa/viper-pcmcia.h
index a23b58aff9e1..a23b58aff9e1 100644
--- a/include/linux/platform_data/pcmcia-pxa2xx_viper.h
+++ b/arch/arm/mach-pxa/viper-pcmcia.h
diff --git a/arch/arm/mach-pxa/viper.c b/arch/arm/mach-pxa/viper.c
index 3aa34e9a15d3..5b43351ee840 100644
--- a/arch/arm/mach-pxa/viper.c
+++ b/arch/arm/mach-pxa/viper.c
@@ -46,10 +46,10 @@
#include <linux/syscore_ops.h>
#include "pxa25x.h"
-#include <mach/audio.h>
+#include <linux/platform_data/asoc-pxa.h>
#include <linux/platform_data/video-pxafb.h>
-#include <mach/regs-uart.h>
-#include <linux/platform_data/pcmcia-pxa2xx_viper.h>
+#include "regs-uart.h"
+#include "viper-pcmcia.h"
#include "viper.h"
#include <asm/setup.h>
@@ -851,7 +851,7 @@ static void __init viper_init_vcore_gpios(void)
goto err_dir;
/* c/should assume redboot set the correct level ??? */
- viper_set_core_cpu_voltage(get_clk_frequency_khz(0), 1);
+ viper_set_core_cpu_voltage(pxa25x_get_clk_frequency_khz(0), 1);
return;
@@ -998,6 +998,18 @@ static struct map_desc viper_io_desc[] __initdata = {
.length = 0x00800000,
.type = MT_DEVICE,
},
+ {
+ /*
+ * ISA I/O space mapping:
+ * - ports 0x0000-0x0fff are PC/104
+ * - ports 0x10000-0x10fff are PCMCIA slot 1
+ * - ports 0x11000-0x11fff are PC/104
+ */
+ .virtual = PCI_IO_VIRT_BASE,
+ .pfn = __phys_to_pfn(0x30000000),
+ .length = 0x1000,
+ .type = MT_DEVICE,
+ },
};
static void __init viper_map_io(void)
diff --git a/drivers/pcmcia/pxa2xx_vpac270.c b/arch/arm/mach-pxa/vpac270-pcmcia.c
index 3565add03a5e..9fd990c8a5fb 100644
--- a/drivers/pcmcia/pxa2xx_vpac270.c
+++ b/arch/arm/mach-pxa/vpac270-pcmcia.c
@@ -13,9 +13,9 @@
#include <asm/mach-types.h>
-#include <mach/vpac270.h>
+#include "vpac270.h"
-#include "soc_common.h"
+#include <pcmcia/soc_common.h>
static struct gpio vpac270_pcmcia_gpios[] = {
{ GPIO107_VPAC270_PCMCIA_PPEN, GPIOF_INIT_LOW, "PCMCIA PPEN" },
diff --git a/arch/arm/mach-pxa/vpac270.c b/arch/arm/mach-pxa/vpac270.c
index 14505e83479e..8f74bafcf1f9 100644
--- a/arch/arm/mach-pxa/vpac270.c
+++ b/arch/arm/mach-pxa/vpac270.c
@@ -29,8 +29,8 @@
#include <asm/mach/arch.h>
#include "pxa27x.h"
-#include <mach/audio.h>
-#include <mach/vpac270.h>
+#include <linux/platform_data/asoc-pxa.h>
+#include "vpac270.h"
#include <linux/platform_data/mmc-pxamci.h>
#include <linux/platform_data/video-pxafb.h>
#include <linux/platform_data/usb-ohci-pxa27x.h>
diff --git a/arch/arm/mach-pxa/include/mach/vpac270.h b/arch/arm/mach-pxa/vpac270.h
index 0cd094d8c553..0cd094d8c553 100644
--- a/arch/arm/mach-pxa/include/mach/vpac270.h
+++ b/arch/arm/mach-pxa/vpac270.h
diff --git a/arch/arm/mach-pxa/xcep.c b/arch/arm/mach-pxa/xcep.c
index f485146b899f..6bb02b65fb82 100644
--- a/arch/arm/mach-pxa/xcep.c
+++ b/arch/arm/mach-pxa/xcep.c
@@ -24,9 +24,9 @@
#include <asm/mach/irq.h>
#include <asm/mach/map.h>
-#include <mach/hardware.h>
#include "pxa25x.h"
-#include <mach/smemc.h>
+#include "addr-map.h"
+#include "smemc.h"
#include "generic.h"
#include "devices.h"
diff --git a/arch/arm/mach-pxa/z2.c b/arch/arm/mach-pxa/z2.c
index 7eaeda269927..d03520555497 100644
--- a/arch/arm/mach-pxa/z2.c
+++ b/arch/arm/mach-pxa/z2.c
@@ -34,7 +34,7 @@
#include "pxa27x.h"
#include "mfp-pxa27x.h"
-#include <mach/z2.h>
+#include "z2.h"
#include <linux/platform_data/video-pxafb.h>
#include <linux/platform_data/mmc-pxamci.h>
#include <linux/platform_data/keypad-pxa27x.h>
@@ -651,6 +651,15 @@ static void __init z2_spi_init(void)
static inline void z2_spi_init(void) {}
#endif
+static struct gpiod_lookup_table z2_audio_gpio_table = {
+ .dev_id = "soc-audio",
+ .table = {
+ GPIO_LOOKUP("gpio-pxa", GPIO37_ZIPITZ2_HEADSET_DETECT,
+ "hsdet-gpio", GPIO_ACTIVE_HIGH),
+ { },
+ },
+};
+
/******************************************************************************
* Core power regulator
******************************************************************************/
@@ -755,6 +764,8 @@ static void __init z2_init(void)
z2_keys_init();
z2_pmic_init();
+ gpiod_add_lookup_table(&z2_audio_gpio_table);
+
pm_power_off = z2_power_off;
}
diff --git a/arch/arm/mach-pxa/include/mach/z2.h b/arch/arm/mach-pxa/z2.h
index a78b2e28b1db..a78b2e28b1db 100644
--- a/arch/arm/mach-pxa/include/mach/z2.h
+++ b/arch/arm/mach-pxa/z2.h
diff --git a/arch/arm/mach-pxa/zeus.c b/arch/arm/mach-pxa/zeus.c
index 97700429633e..ff0d8bb9f557 100644
--- a/arch/arm/mach-pxa/zeus.c
+++ b/arch/arm/mach-pxa/zeus.c
@@ -39,17 +39,17 @@
#include "pxa27x.h"
#include "devices.h"
-#include <mach/regs-uart.h>
+#include "regs-uart.h"
#include <linux/platform_data/usb-ohci-pxa27x.h>
#include <linux/platform_data/mmc-pxamci.h>
#include "pxa27x-udc.h"
#include "udc.h"
#include <linux/platform_data/video-pxafb.h>
#include "pm.h"
-#include <mach/audio.h>
-#include <linux/platform_data/pcmcia-pxa2xx_viper.h>
+#include <linux/platform_data/asoc-pxa.h>
+#include "viper-pcmcia.h"
#include "zeus.h"
-#include <mach/smemc.h>
+#include "smemc.h"
#include "generic.h"
@@ -929,6 +929,18 @@ static struct map_desc zeus_io_desc[] __initdata = {
.length = 0x00800000,
.type = MT_DEVICE,
},
+ {
+ /*
+ * ISA I/O space mapping:
+ * - ports 0x0000-0x0fff are PC/104
+ * - ports 0x10000-0x10fff are PCMCIA slot 1
+ * - ports 0x11000-0x11fff are PC/104
+ */
+ .virtual = PCI_IO_VIRT_BASE,
+ .pfn = __phys_to_pfn(ZEUS_PC104IO_PHYS),
+ .length = 0x1000,
+ .type = MT_DEVICE,
+ },
};
static void __init zeus_map_io(void)
diff --git a/arch/arm/mach-pxa/zylonite.c b/arch/arm/mach-pxa/zylonite.c
index 79f0025fa17a..8ed75ac29b1a 100644
--- a/arch/arm/mach-pxa/zylonite.c
+++ b/arch/arm/mach-pxa/zylonite.c
@@ -20,17 +20,19 @@
#include <linux/pwm.h>
#include <linux/pwm_backlight.h>
#include <linux/smc91x.h>
+#include <linux/soc/pxa/cpu.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include "pxa3xx.h"
-#include <mach/audio.h>
+#include <linux/platform_data/asoc-pxa.h>
#include <linux/platform_data/video-pxafb.h>
#include "zylonite.h"
#include <linux/platform_data/mmc-pxamci.h>
#include <linux/platform_data/usb-ohci-pxa27x.h>
#include <linux/platform_data/keypad-pxa27x.h>
#include <linux/platform_data/mtd-nand-pxa3xx.h>
+#include "mfp.h"
#include "devices.h"
#include "generic.h"
@@ -424,6 +426,35 @@ static void __init zylonite_init_ohci(void)
static inline void zylonite_init_ohci(void) {}
#endif /* CONFIG_USB_OHCI_HCD || CONFIG_USB_OHCI_HCD_MODULE */
+static struct gpiod_lookup_table zylonite_wm97xx_touch_gpio15_table = {
+ .dev_id = "wm97xx-touch.0",
+ .table = {
+ GPIO_LOOKUP("gpio-pxa", mfp_to_gpio(MFP_PIN_GPIO15),
+ "touch", GPIO_ACTIVE_LOW),
+ { },
+ },
+};
+
+static struct gpiod_lookup_table zylonite_wm97xx_touch_gpio26_table = {
+ .dev_id = "wm97xx-touch.0",
+ .table = {
+ GPIO_LOOKUP("gpio-pxa", mfp_to_gpio(MFP_PIN_GPIO26),
+ "touch", GPIO_ACTIVE_LOW),
+ { },
+ },
+};
+
+static void __init zylonite_init_wm97xx_touch(void)
+{
+ if (!IS_ENABLED(CONFIG_TOUCHSCREEN_WM97XX_ZYLONITE))
+ return;
+
+ if (cpu_is_pxa320())
+ gpiod_add_lookup_table(&zylonite_wm97xx_touch_gpio15_table);
+ else
+ gpiod_add_lookup_table(&zylonite_wm97xx_touch_gpio26_table);
+}
+
static void __init zylonite_init(void)
{
pxa_set_ffuart_info(NULL);
@@ -449,6 +480,7 @@ static void __init zylonite_init(void)
zylonite_init_nand();
zylonite_init_leds();
zylonite_init_ohci();
+ zylonite_init_wm97xx_touch();
}
MACHINE_START(ZYLONITE, "PXA3xx Platform Development Kit (aka Zylonite)")
diff --git a/arch/arm/mach-pxa/zylonite.h b/arch/arm/mach-pxa/zylonite.h
index 7300ec2aac0d..afe3efcb8e04 100644
--- a/arch/arm/mach-pxa/zylonite.h
+++ b/arch/arm/mach-pxa/zylonite.h
@@ -2,6 +2,8 @@
#ifndef __ASM_ARCH_ZYLONITE_H
#define __ASM_ARCH_ZYLONITE_H
+#include <linux/soc/pxa/cpu.h>
+
#define ZYLONITE_ETH_PHYS 0x14000000
#define EXT_GPIO(x) (128 + (x))
diff --git a/arch/arm/mach-pxa/zylonite_pxa300.c b/arch/arm/mach-pxa/zylonite_pxa300.c
index 956fec1c4940..50a8a3547dbc 100644
--- a/arch/arm/mach-pxa/zylonite_pxa300.c
+++ b/arch/arm/mach-pxa/zylonite_pxa300.c
@@ -17,6 +17,7 @@
#include <linux/platform_data/i2c-pxa.h>
#include <linux/platform_data/pca953x.h>
#include <linux/gpio.h>
+#include <linux/soc/pxa/cpu.h>
#include "pxa300.h"
#include "devices.h"
diff --git a/arch/arm/mach-pxa/zylonite_pxa320.c b/arch/arm/mach-pxa/zylonite_pxa320.c
index 94cb834f36cd..67cab4f1194b 100644
--- a/arch/arm/mach-pxa/zylonite_pxa320.c
+++ b/arch/arm/mach-pxa/zylonite_pxa320.c
@@ -14,6 +14,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/gpio.h>
+#include <linux/soc/pxa/cpu.h>
#include "pxa320.h"
#include "zylonite.h"
diff --git a/arch/arm/mach-sa1100/generic.c b/arch/arm/mach-sa1100/generic.c
index 4dfb7554649d..6c21f214cd60 100644
--- a/arch/arm/mach-sa1100/generic.c
+++ b/arch/arm/mach-sa1100/generic.c
@@ -39,9 +39,6 @@
#include "generic.h"
#include <clocksource/pxa.h>
-unsigned int reset_status;
-EXPORT_SYMBOL(reset_status);
-
#define NR_FREQS 16
/*
@@ -319,10 +316,13 @@ static struct platform_device *sa11x0_devices[] __initdata = {
static int __init sa1100_init(void)
{
+ struct resource wdt_res = DEFINE_RES_MEM(0x90000000, 0x20);
pm_power_off = sa1100_power_off;
regulator_has_full_constraints();
+ platform_device_register_simple("sa1100_wdt", -1, &wdt_res, 1);
+
return platform_add_devices(sa11x0_devices, ARRAY_SIZE(sa11x0_devices));
}
diff --git a/arch/arm/mach-sa1100/include/mach/reset.h b/arch/arm/mach-sa1100/include/mach/reset.h
index 27695650a567..a6723d45ae2a 100644
--- a/arch/arm/mach-sa1100/include/mach/reset.h
+++ b/arch/arm/mach-sa1100/include/mach/reset.h
@@ -10,7 +10,6 @@
#define RESET_STATUS_GPIO (1 << 3) /* GPIO Reset */
#define RESET_STATUS_ALL (0xf)
-extern unsigned int reset_status;
static inline void clear_reset_status(unsigned int mask)
{
RCSR = mask;
diff --git a/arch/arm/mm/copypage-xsc3.c b/arch/arm/mm/copypage-xsc3.c
index 6f0909dda2f9..c86e79677ff9 100644
--- a/arch/arm/mm/copypage-xsc3.c
+++ b/arch/arm/mm/copypage-xsc3.c
@@ -29,6 +29,7 @@ static void xsc3_mc_copy_user_page(void *kto, const void *kfrom)
int tmp;
asm volatile ("\
+.arch xscale \n\
pld [%1, #0] \n\
pld [%1, #32] \n\
1: pld [%1, #64] \n\
@@ -80,6 +81,7 @@ void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
{
void *ptr, *kaddr = kmap_atomic(page);
asm volatile ("\
+.arch xscale \n\
mov r1, %2 \n\
mov r2, #0 \n\
mov r3, #0 \n\
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 290702328a33..576c0e6c92fc 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -455,7 +455,7 @@ void iounmap(volatile void __iomem *cookie)
}
EXPORT_SYMBOL(iounmap);
-#ifdef CONFIG_PCI
+#if defined(CONFIG_PCI) || IS_ENABLED(CONFIG_PCMCIA)
static int pci_ioremap_mem_type = MT_DEVICE;
void pci_ioremap_set_mem_type(int mem_type)
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index a4968845e67f..1652a9800ebe 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -2157,10 +2157,6 @@ config DMI
endmenu # "Boot options"
-config SYSVIPC_COMPAT
- def_bool y
- depends on COMPAT && SYSVIPC
-
menu "Power management options"
source "kernel/power/Kconfig"
diff --git a/arch/arm64/boot/dts/intel/Makefile b/arch/arm64/boot/dts/intel/Makefile
index 0b5477442263..c2a723838344 100644
--- a/arch/arm64/boot/dts/intel/Makefile
+++ b/arch/arm64/boot/dts/intel/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
-dtb-$(CONFIG_ARCH_INTEL_SOCFPGA) += socfpga_agilex_socdk.dtb \
+dtb-$(CONFIG_ARCH_INTEL_SOCFPGA) += socfpga_agilex_n6000.dtb \
+ socfpga_agilex_socdk.dtb \
socfpga_agilex_socdk_nand.dtb \
socfpga_n5x_socdk.dtb
dtb-$(CONFIG_ARCH_KEEMBAY) += keembay-evm.dtb
diff --git a/arch/arm64/boot/dts/intel/socfpga_agilex_n6000.dts b/arch/arm64/boot/dts/intel/socfpga_agilex_n6000.dts
new file mode 100644
index 000000000000..6231a69204b1
--- /dev/null
+++ b/arch/arm64/boot/dts/intel/socfpga_agilex_n6000.dts
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021-2022, Intel Corporation
+ */
+#include "socfpga_agilex.dtsi"
+
+/ {
+ model = "SoCFPGA Agilex n6000";
+ compatible = "intel,socfpga-agilex-n6000", "intel,socfpga-agilex";
+
+ aliases {
+ serial0 = &uart1;
+ serial1 = &uart0;
+ ethernet0 = &gmac0;
+ ethernet1 = &gmac1;
+ ethernet2 = &gmac2;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ memory@0 {
+ device_type = "memory";
+ /* We expect the bootloader to fill in the reg */
+ reg = <0 0 0 0>;
+ };
+
+ soc {
+ bus@80000000 {
+ compatible = "simple-bus";
+ reg = <0x80000000 0x60000000>,
+ <0xf9000000 0x00100000>;
+ reg-names = "axi_h2f", "axi_h2f_lw";
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges = <0x00000000 0x00000000 0xf9000000 0x00001000>;
+
+ dma-controller@0 {
+ compatible = "intel,hps-copy-engine";
+ reg = <0x00000000 0x00000000 0x00001000>;
+ #dma-cells = <1>;
+ };
+ };
+ };
+};
+
+&osc1 {
+ clock-frequency = <25000000>;
+};
+
+&uart0 {
+ status = "okay";
+};
+
+&uart1 {
+ status = "okay";
+};
+
+&watchdog0 {
+ status = "okay";
+};
+
+&fpga_mgr {
+ status = "disabled";
+};
diff --git a/arch/arm64/boot/dts/qcom/apq8096-db820c.dts b/arch/arm64/boot/dts/qcom/apq8096-db820c.dts
index 56e54ce4d10e..49afbb1a066a 100644
--- a/arch/arm64/boot/dts/qcom/apq8096-db820c.dts
+++ b/arch/arm64/boot/dts/qcom/apq8096-db820c.dts
@@ -1052,22 +1052,22 @@
&usb2 {
status = "okay";
extcon = <&usb2_id>;
+};
- dwc3@7600000 {
- extcon = <&usb2_id>;
- dr_mode = "otg";
- maximum-speed = "high-speed";
- };
+&usb2_dwc3 {
+ extcon = <&usb2_id>;
+ dr_mode = "otg";
+ maximum-speed = "high-speed";
};
&usb3 {
status = "okay";
extcon = <&usb3_id>;
+};
- dwc3@6a00000 {
- extcon = <&usb3_id>;
- dr_mode = "otg";
- };
+&usb3_dwc3 {
+ extcon = <&usb3_id>;
+ dr_mode = "otg";
};
&usb3phy {
diff --git a/arch/arm64/boot/dts/qcom/ipq6018.dtsi b/arch/arm64/boot/dts/qcom/ipq6018.dtsi
index a4d363c187fc..c89499e366d3 100644
--- a/arch/arm64/boot/dts/qcom/ipq6018.dtsi
+++ b/arch/arm64/boot/dts/qcom/ipq6018.dtsi
@@ -653,7 +653,7 @@
status = "disabled";
};
- usb2: usb2@7000000 {
+ usb2: usb@70f8800 {
compatible = "qcom,ipq6018-dwc3", "qcom,dwc3";
reg = <0x0 0x070F8800 0x0 0x400>;
#address-cells = <2>;
@@ -662,7 +662,7 @@
clocks = <&gcc GCC_USB1_MASTER_CLK>,
<&gcc GCC_USB1_SLEEP_CLK>,
<&gcc GCC_USB1_MOCK_UTMI_CLK>;
- clock-names = "master",
+ clock-names = "core",
"sleep",
"mock_utmi";
@@ -730,7 +730,7 @@
status = "disabled";
};
- usb3: usb3@8A00000 {
+ usb3: usb@8af8800 {
compatible = "qcom,ipq6018-dwc3", "qcom,dwc3";
reg = <0x0 0x8AF8800 0x0 0x400>;
#address-cells = <2>;
@@ -741,8 +741,8 @@
<&gcc GCC_USB0_MASTER_CLK>,
<&gcc GCC_USB0_SLEEP_CLK>,
<&gcc GCC_USB0_MOCK_UTMI_CLK>;
- clock-names = "sys_noc_axi",
- "master",
+ clock-names = "cfg_noc",
+ "core",
"sleep",
"mock_utmi";
@@ -756,7 +756,7 @@
resets = <&gcc GCC_USB0_BCR>;
status = "disabled";
- dwc_0: usb@8A00000 {
+ dwc_0: usb@8a00000 {
compatible = "snps,dwc3";
reg = <0x0 0x8A00000 0x0 0xcd00>;
interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm64/boot/dts/qcom/ipq8074.dtsi b/arch/arm64/boot/dts/qcom/ipq8074.dtsi
index 943243d5515b..4c38b15c6fd4 100644
--- a/arch/arm64/boot/dts/qcom/ipq8074.dtsi
+++ b/arch/arm64/boot/dts/qcom/ipq8074.dtsi
@@ -553,7 +553,7 @@
};
usb_0: usb@8af8800 {
- compatible = "qcom,dwc3";
+ compatible = "qcom,ipq8074-dwc3", "qcom,dwc3";
reg = <0x08af8800 0x400>;
#address-cells = <1>;
#size-cells = <1>;
@@ -563,8 +563,8 @@
<&gcc GCC_USB0_MASTER_CLK>,
<&gcc GCC_USB0_SLEEP_CLK>,
<&gcc GCC_USB0_MOCK_UTMI_CLK>;
- clock-names = "sys_noc_axi",
- "master",
+ clock-names = "cfg_noc",
+ "core",
"sleep",
"mock_utmi";
@@ -578,7 +578,7 @@
resets = <&gcc GCC_USB0_BCR>;
status = "disabled";
- dwc_0: dwc3@8a00000 {
+ dwc_0: usb@8a00000 {
compatible = "snps,dwc3";
reg = <0x8a00000 0xcd00>;
interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>;
@@ -593,7 +593,7 @@
};
usb_1: usb@8cf8800 {
- compatible = "qcom,dwc3";
+ compatible = "qcom,ipq8074-dwc3", "qcom,dwc3";
reg = <0x08cf8800 0x400>;
#address-cells = <1>;
#size-cells = <1>;
@@ -603,8 +603,8 @@
<&gcc GCC_USB1_MASTER_CLK>,
<&gcc GCC_USB1_SLEEP_CLK>,
<&gcc GCC_USB1_MOCK_UTMI_CLK>;
- clock-names = "sys_noc_axi",
- "master",
+ clock-names = "cfg_noc",
+ "core",
"sleep",
"mock_utmi";
@@ -618,7 +618,7 @@
resets = <&gcc GCC_USB1_BCR>;
status = "disabled";
- dwc_1: dwc3@8c00000 {
+ dwc_1: usb@8c00000 {
compatible = "snps,dwc3";
reg = <0x8c00000 0xcd00>;
interrupts = <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm64/boot/dts/qcom/msm8953.dtsi b/arch/arm64/boot/dts/qcom/msm8953.dtsi
index 49903a6e9dfd..ffc3ec2cd3bc 100644
--- a/arch/arm64/boot/dts/qcom/msm8953.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8953.dtsi
@@ -759,10 +759,13 @@
clocks = <&gcc GCC_USB_PHY_CFG_AHB_CLK>,
<&gcc GCC_USB30_MASTER_CLK>,
<&gcc GCC_PCNOC_USB3_AXI_CLK>,
- <&gcc GCC_USB30_MOCK_UTMI_CLK>,
- <&gcc GCC_USB30_SLEEP_CLK>;
- clock-names = "cfg_noc", "core", "iface",
- "mock_utmi", "sleep";
+ <&gcc GCC_USB30_SLEEP_CLK>,
+ <&gcc GCC_USB30_MOCK_UTMI_CLK>;
+ clock-names = "cfg_noc",
+ "core",
+ "iface",
+ "sleep",
+ "mock_utmi";
assigned-clocks = <&gcc GCC_USB30_MOCK_UTMI_CLK>,
<&gcc GCC_USB30_MASTER_CLK>;
diff --git a/arch/arm64/boot/dts/qcom/msm8994.dtsi b/arch/arm64/boot/dts/qcom/msm8994.dtsi
index 367ed913902c..0318d42c5736 100644
--- a/arch/arm64/boot/dts/qcom/msm8994.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8994.dtsi
@@ -428,7 +428,7 @@
};
usb3: usb@f92f8800 {
- compatible = "qcom,msm8996-dwc3", "qcom,dwc3";
+ compatible = "qcom,msm8994-dwc3", "qcom,dwc3";
reg = <0xf92f8800 0x400>;
#address-cells = <1>;
#size-cells = <1>;
@@ -438,7 +438,10 @@
<&gcc GCC_SYS_NOC_USB3_AXI_CLK>,
<&gcc GCC_USB30_SLEEP_CLK>,
<&gcc GCC_USB30_MOCK_UTMI_CLK>;
- clock-names = "core", "iface", "sleep", "mock_utmi", "ref", "xo";
+ clock-names = "core",
+ "iface",
+ "sleep",
+ "mock_utmi";
assigned-clocks = <&gcc GCC_USB30_MOCK_UTMI_CLK>,
<&gcc GCC_USB30_MASTER_CLK>;
diff --git a/arch/arm64/boot/dts/qcom/msm8996-xiaomi-common.dtsi b/arch/arm64/boot/dts/qcom/msm8996-xiaomi-common.dtsi
index be4f643b1fd1..a7090befc16f 100644
--- a/arch/arm64/boot/dts/qcom/msm8996-xiaomi-common.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8996-xiaomi-common.dtsi
@@ -308,19 +308,19 @@
extcon = <&typec>;
qcom,select-utmi-as-pipe-clk;
+};
- dwc3@6a00000 {
- extcon = <&typec>;
+&usb3_dwc3 {
+ extcon = <&typec>;
- /* usb3-phy is not used on this device */
- phys = <&hsusb_phy1>;
- phy-names = "usb2-phy";
+ /* usb3-phy is not used on this device */
+ phys = <&hsusb_phy1>;
+ phy-names = "usb2-phy";
- maximum-speed = "high-speed";
- snps,is-utmi-l1-suspend;
- snps,usb2-gadget-lpm-disable;
- snps,hird-threshold = /bits/ 8 <0>;
- };
+ maximum-speed = "high-speed";
+ snps,is-utmi-l1-suspend;
+ snps,usb2-gadget-lpm-disable;
+ snps,hird-threshold = /bits/ 8 <0>;
};
&hsusb_phy1 {
diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi
index 205af7b479a8..9932186f7ceb 100644
--- a/arch/arm64/boot/dts/qcom/msm8996.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi
@@ -2718,11 +2718,15 @@
interrupt-names = "hs_phy_irq", "ss_phy_irq";
clocks = <&gcc GCC_SYS_NOC_USB3_AXI_CLK>,
- <&gcc GCC_USB30_MASTER_CLK>,
- <&gcc GCC_AGGRE2_USB3_AXI_CLK>,
- <&gcc GCC_USB30_MOCK_UTMI_CLK>,
- <&gcc GCC_USB30_SLEEP_CLK>,
- <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>;
+ <&gcc GCC_USB30_MASTER_CLK>,
+ <&gcc GCC_AGGRE2_USB3_AXI_CLK>,
+ <&gcc GCC_USB30_SLEEP_CLK>,
+ <&gcc GCC_USB30_MOCK_UTMI_CLK>;
+ clock-names = "cfg_noc",
+ "core",
+ "iface",
+ "sleep",
+ "mock_utmi";
assigned-clocks = <&gcc GCC_USB30_MOCK_UTMI_CLK>,
<&gcc GCC_USB30_MASTER_CLK>;
@@ -2731,7 +2735,7 @@
power-domains = <&gcc USB30_GDSC>;
status = "disabled";
- usb3_dwc3: dwc3@6a00000 {
+ usb3_dwc3: usb@6a00000 {
compatible = "snps,dwc3";
reg = <0x06a00000 0xcc00>;
interrupts = <0 131 IRQ_TYPE_LEVEL_HIGH>;
@@ -3050,6 +3054,11 @@
<&gcc GCC_USB20_MOCK_UTMI_CLK>,
<&gcc GCC_USB20_SLEEP_CLK>,
<&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>;
+ clock-names = "cfg_noc",
+ "core",
+ "iface",
+ "sleep",
+ "mock_utmi";
assigned-clocks = <&gcc GCC_USB20_MOCK_UTMI_CLK>,
<&gcc GCC_USB20_MASTER_CLK>;
@@ -3059,7 +3068,7 @@
qcom,select-utmi-as-pipe-clk;
status = "disabled";
- dwc3@7600000 {
+ usb2_dwc3: usb@7600000 {
compatible = "snps,dwc3";
reg = <0x07600000 0xcc00>;
interrupts = <0 138 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm64/boot/dts/qcom/msm8998.dtsi b/arch/arm64/boot/dts/qcom/msm8998.dtsi
index 4a84de6cee1e..758c45bbbe78 100644
--- a/arch/arm64/boot/dts/qcom/msm8998.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8998.dtsi
@@ -2023,10 +2023,13 @@
clocks = <&gcc GCC_CFG_NOC_USB3_AXI_CLK>,
<&gcc GCC_USB30_MASTER_CLK>,
<&gcc GCC_AGGRE1_USB3_AXI_CLK>,
- <&gcc GCC_USB30_MOCK_UTMI_CLK>,
- <&gcc GCC_USB30_SLEEP_CLK>;
- clock-names = "cfg_noc", "core", "iface", "mock_utmi",
- "sleep";
+ <&gcc GCC_USB30_SLEEP_CLK>,
+ <&gcc GCC_USB30_MOCK_UTMI_CLK>;
+ clock-names = "cfg_noc",
+ "core",
+ "iface",
+ "sleep",
+ "mock_utmi";
assigned-clocks = <&gcc GCC_USB30_MOCK_UTMI_CLK>,
<&gcc GCC_USB30_MASTER_CLK>;
@@ -2040,7 +2043,7 @@
resets = <&gcc GCC_USB_30_BCR>;
- usb3_dwc3: dwc3@a800000 {
+ usb3_dwc3: usb@a800000 {
compatible = "snps,dwc3";
reg = <0x0a800000 0xcd00>;
interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm64/boot/dts/qcom/qcs404-evb.dtsi b/arch/arm64/boot/dts/qcom/qcs404-evb.dtsi
index a80c578484ba..2f3104a84417 100644
--- a/arch/arm64/boot/dts/qcom/qcs404-evb.dtsi
+++ b/arch/arm64/boot/dts/qcom/qcs404-evb.dtsi
@@ -337,9 +337,10 @@
&usb3 {
status = "okay";
- dwc3@7580000 {
- dr_mode = "host";
- };
+};
+
+&usb3_dwc3 {
+ dr_mode = "host";
};
&usb2_phy_prim {
diff --git a/arch/arm64/boot/dts/qcom/qcs404.dtsi b/arch/arm64/boot/dts/qcom/qcs404.dtsi
index bc446c6002d0..d912166b7552 100644
--- a/arch/arm64/boot/dts/qcom/qcs404.dtsi
+++ b/arch/arm64/boot/dts/qcom/qcs404.dtsi
@@ -529,7 +529,7 @@
};
usb3: usb@7678800 {
- compatible = "qcom,dwc3";
+ compatible = "qcom,qcs404-dwc3", "qcom,dwc3";
reg = <0x07678800 0x400>;
#address-cells = <1>;
#size-cells = <1>;
@@ -544,7 +544,7 @@
assigned-clock-rates = <19200000>, <200000000>;
status = "disabled";
- dwc3@7580000 {
+ usb3_dwc3: usb@7580000 {
compatible = "snps,dwc3";
reg = <0x07580000 0xcd00>;
interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
@@ -558,7 +558,7 @@
};
usb2: usb@79b8800 {
- compatible = "qcom,dwc3";
+ compatible = "qcom,qcs404-dwc3", "qcom,dwc3";
reg = <0x079b8800 0x400>;
#address-cells = <1>;
#size-cells = <1>;
@@ -573,7 +573,7 @@
assigned-clock-rates = <19200000>, <133333333>;
status = "disabled";
- dwc3@78c0000 {
+ usb@78c0000 {
compatible = "snps,dwc3";
reg = <0x078c0000 0xcc00>;
interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm64/boot/dts/qcom/sc7180.dtsi b/arch/arm64/boot/dts/qcom/sc7180.dtsi
index 82fa009e540f..5dcaac23a138 100644
--- a/arch/arm64/boot/dts/qcom/sc7180.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc7180.dtsi
@@ -2755,10 +2755,13 @@
clocks = <&gcc GCC_CFG_NOC_USB3_PRIM_AXI_CLK>,
<&gcc GCC_USB30_PRIM_MASTER_CLK>,
<&gcc GCC_AGGRE_USB3_PRIM_AXI_CLK>,
- <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>,
- <&gcc GCC_USB30_PRIM_SLEEP_CLK>;
- clock-names = "cfg_noc", "core", "iface", "mock_utmi",
- "sleep";
+ <&gcc GCC_USB30_PRIM_SLEEP_CLK>,
+ <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>;
+ clock-names = "cfg_noc",
+ "core",
+ "iface",
+ "sleep",
+ "mock_utmi";
assigned-clocks = <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>,
<&gcc GCC_USB30_PRIM_MASTER_CLK>;
@@ -2779,7 +2782,7 @@
<&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_USB3 0>;
interconnect-names = "usb-ddr", "apps-usb";
- usb_1_dwc3: dwc3@a600000 {
+ usb_1_dwc3: usb@a600000 {
compatible = "snps,dwc3";
reg = <0 0x0a600000 0 0xe000>;
interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm64/boot/dts/qcom/sc7280.dtsi b/arch/arm64/boot/dts/qcom/sc7280.dtsi
index f72451f7f539..e66fc67de206 100644
--- a/arch/arm64/boot/dts/qcom/sc7280.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc7280.dtsi
@@ -3069,10 +3069,13 @@
clocks = <&gcc GCC_CFG_NOC_USB3_SEC_AXI_CLK>,
<&gcc GCC_USB30_SEC_MASTER_CLK>,
<&gcc GCC_AGGRE_USB3_SEC_AXI_CLK>,
- <&gcc GCC_USB30_SEC_MOCK_UTMI_CLK>,
- <&gcc GCC_USB30_SEC_SLEEP_CLK>;
- clock-names = "cfg_noc", "core", "iface","mock_utmi",
- "sleep";
+ <&gcc GCC_USB30_SEC_SLEEP_CLK>,
+ <&gcc GCC_USB30_SEC_MOCK_UTMI_CLK>;
+ clock-names = "cfg_noc",
+ "core",
+ "iface",
+ "sleep",
+ "mock_utmi";
assigned-clocks = <&gcc GCC_USB30_SEC_MOCK_UTMI_CLK>,
<&gcc GCC_USB30_SEC_MASTER_CLK>;
@@ -3102,6 +3105,12 @@
phys = <&usb_2_hsphy>;
phy-names = "usb2-phy";
maximum-speed = "high-speed";
+ usb-role-switch;
+ port {
+ usb2_role_switch: endpoint {
+ remote-endpoint = <&eud_ep>;
+ };
+ };
};
};
@@ -3194,6 +3203,36 @@
interrupts = <GIC_SPI 582 IRQ_TYPE_LEVEL_HIGH>;
};
+ eud: eud@88e0000 {
+ compatible = "qcom,sc7280-eud","qcom,eud";
+ reg = <0 0x88e0000 0 0x2000>,
+ <0 0x88e2000 0 0x1000>;
+ interrupts-extended = <&pdc 11 IRQ_TYPE_LEVEL_HIGH>;
+ ports {
+ port@0 {
+ eud_ep: endpoint {
+ remote-endpoint = <&usb2_role_switch>;
+ };
+ };
+ port@1 {
+ eud_con: endpoint {
+ remote-endpoint = <&con_eud>;
+ };
+ };
+ };
+ };
+
+ eud_typec: connector {
+ compatible = "usb-c-connector";
+ ports {
+ port@0 {
+ con_eud: endpoint {
+ remote-endpoint = <&eud_con>;
+ };
+ };
+ };
+ };
+
nsp_noc: interconnect@a0c0000 {
reg = <0 0x0a0c0000 0 0x10000>;
compatible = "qcom,sc7280-nsp-noc";
@@ -3213,21 +3252,26 @@
clocks = <&gcc GCC_CFG_NOC_USB3_PRIM_AXI_CLK>,
<&gcc GCC_USB30_PRIM_MASTER_CLK>,
<&gcc GCC_AGGRE_USB3_PRIM_AXI_CLK>,
- <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>,
- <&gcc GCC_USB30_PRIM_SLEEP_CLK>;
- clock-names = "cfg_noc", "core", "iface", "mock_utmi",
- "sleep";
+ <&gcc GCC_USB30_PRIM_SLEEP_CLK>,
+ <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>;
+ clock-names = "cfg_noc",
+ "core",
+ "iface",
+ "sleep",
+ "mock_utmi";
assigned-clocks = <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>,
<&gcc GCC_USB30_PRIM_MASTER_CLK>;
assigned-clock-rates = <19200000>, <200000000>;
interrupts-extended = <&intc GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
- <&pdc 14 IRQ_TYPE_EDGE_BOTH>,
+ <&pdc 17 IRQ_TYPE_EDGE_BOTH>,
<&pdc 15 IRQ_TYPE_EDGE_BOTH>,
- <&pdc 17 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "hs_phy_irq", "dp_hs_phy_irq",
- "dm_hs_phy_irq", "ss_phy_irq";
+ <&pdc 14 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "hs_phy_irq",
+ "ss_phy_irq",
+ "dm_hs_phy_irq",
+ "dp_hs_phy_irq";
power-domains = <&gcc GCC_USB30_PRIM_GDSC>;
diff --git a/arch/arm64/boot/dts/qcom/sdm630.dtsi b/arch/arm64/boot/dts/qcom/sdm630.dtsi
index 7f875bf9390a..b72e8e6c52f3 100644
--- a/arch/arm64/boot/dts/qcom/sdm630.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm630.dtsi
@@ -1215,11 +1215,15 @@
clocks = <&gcc GCC_CFG_NOC_USB3_AXI_CLK>,
<&gcc GCC_USB30_MASTER_CLK>,
<&gcc GCC_AGGRE2_USB3_AXI_CLK>,
- <&rpmcc RPM_SMD_AGGR2_NOC_CLK>,
+ <&gcc GCC_USB30_SLEEP_CLK>,
<&gcc GCC_USB30_MOCK_UTMI_CLK>,
- <&gcc GCC_USB30_SLEEP_CLK>;
- clock-names = "cfg_noc", "core", "iface", "bus",
- "mock_utmi", "sleep";
+ <&rpmcc RPM_SMD_AGGR2_NOC_CLK>;
+ clock-names = "cfg_noc",
+ "core",
+ "iface",
+ "sleep",
+ "mock_utmi",
+ "bus";
assigned-clocks = <&gcc GCC_USB30_MOCK_UTMI_CLK>,
<&gcc GCC_USB30_MASTER_CLK>,
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index 692cf4be4eef..0692ae0e60a4 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -3844,10 +3844,13 @@
clocks = <&gcc GCC_CFG_NOC_USB3_PRIM_AXI_CLK>,
<&gcc GCC_USB30_PRIM_MASTER_CLK>,
<&gcc GCC_AGGRE_USB3_PRIM_AXI_CLK>,
- <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>,
- <&gcc GCC_USB30_PRIM_SLEEP_CLK>;
- clock-names = "cfg_noc", "core", "iface", "mock_utmi",
- "sleep";
+ <&gcc GCC_USB30_PRIM_SLEEP_CLK>,
+ <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>;
+ clock-names = "cfg_noc",
+ "core",
+ "iface",
+ "sleep",
+ "mock_utmi";
assigned-clocks = <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>,
<&gcc GCC_USB30_PRIM_MASTER_CLK>;
@@ -3868,7 +3871,7 @@
<&gladiator_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_USB3_0 0>;
interconnect-names = "usb-ddr", "apps-usb";
- usb_1_dwc3: dwc3@a600000 {
+ usb_1_dwc3: usb@a600000 {
compatible = "snps,dwc3";
reg = <0 0x0a600000 0 0xcd00>;
interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;
@@ -3892,10 +3895,13 @@
clocks = <&gcc GCC_CFG_NOC_USB3_SEC_AXI_CLK>,
<&gcc GCC_USB30_SEC_MASTER_CLK>,
<&gcc GCC_AGGRE_USB3_SEC_AXI_CLK>,
- <&gcc GCC_USB30_SEC_MOCK_UTMI_CLK>,
- <&gcc GCC_USB30_SEC_SLEEP_CLK>;
- clock-names = "cfg_noc", "core", "iface", "mock_utmi",
- "sleep";
+ <&gcc GCC_USB30_SEC_SLEEP_CLK>,
+ <&gcc GCC_USB30_SEC_MOCK_UTMI_CLK>;
+ clock-names = "cfg_noc",
+ "core",
+ "iface",
+ "sleep",
+ "mock_utmi";
assigned-clocks = <&gcc GCC_USB30_SEC_MOCK_UTMI_CLK>,
<&gcc GCC_USB30_SEC_MASTER_CLK>;
@@ -3916,7 +3922,7 @@
<&gladiator_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_USB3_1 0>;
interconnect-names = "usb-ddr", "apps-usb";
- usb_2_dwc3: dwc3@a800000 {
+ usb_2_dwc3: usb@a800000 {
compatible = "snps,dwc3";
reg = <0 0x0a800000 0 0xcd00>;
interrupts = <GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm64/boot/dts/qcom/sm6125.dtsi b/arch/arm64/boot/dts/qcom/sm6125.dtsi
index e81b2a7794fb..135e6e0da27a 100644
--- a/arch/arm64/boot/dts/qcom/sm6125.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm6125.dtsi
@@ -481,18 +481,24 @@
};
usb3: usb@4ef8800 {
- compatible = "qcom,msm8996-dwc3", "qcom,dwc3";
+ compatible = "qcom,sm6125-dwc3", "qcom,dwc3";
reg = <0x04ef8800 0x400>;
#address-cells = <1>;
#size-cells = <1>;
ranges;
- clocks = <&gcc GCC_USB30_PRIM_MASTER_CLK>,
+ clocks = <&gcc GCC_CFG_NOC_USB3_PRIM_AXI_CLK>,
+ <&gcc GCC_USB30_PRIM_MASTER_CLK>,
<&gcc GCC_SYS_NOC_USB3_PRIM_AXI_CLK>,
- <&gcc GCC_CFG_NOC_USB3_PRIM_AXI_CLK>,
- <&gcc GCC_USB3_PRIM_CLKREF_CLK>,
<&gcc GCC_USB30_PRIM_SLEEP_CLK>,
- <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>;
+ <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>,
+ <&gcc GCC_USB3_PRIM_CLKREF_CLK>;
+ clock-names = "cfg_noc",
+ "core",
+ "iface",
+ "sleep",
+ "mock_utmi",
+ "xo";
assigned-clocks = <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>,
<&gcc GCC_USB30_PRIM_MASTER_CLK>;
diff --git a/arch/arm64/boot/dts/qcom/sm6350.dtsi b/arch/arm64/boot/dts/qcom/sm6350.dtsi
index fb1a0f662575..d4f8f33f3f0c 100644
--- a/arch/arm64/boot/dts/qcom/sm6350.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm6350.dtsi
@@ -1034,10 +1034,13 @@
clocks = <&gcc GCC_CFG_NOC_USB3_PRIM_AXI_CLK>,
<&gcc GCC_USB30_PRIM_MASTER_CLK>,
<&gcc GCC_AGGRE_USB3_PRIM_AXI_CLK>,
- <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>,
- <&gcc GCC_USB30_PRIM_SLEEP_CLK>;
- clock-names = "cfg_noc", "core", "iface", "mock_utmi",
- "sleep";
+ <&gcc GCC_USB30_PRIM_SLEEP_CLK>,
+ <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>;
+ clock-names = "cfg_noc",
+ "core",
+ "iface",
+ "sleep",
+ "mock_utmi";
interrupts-extended = <&intc GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>,
<&pdc 17 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi
index f70ae4c56762..8ea44c4b56b4 100644
--- a/arch/arm64/boot/dts/qcom/sm8150.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi
@@ -3614,11 +3614,15 @@
clocks = <&gcc GCC_CFG_NOC_USB3_PRIM_AXI_CLK>,
<&gcc GCC_USB30_PRIM_MASTER_CLK>,
<&gcc GCC_AGGRE_USB3_PRIM_AXI_CLK>,
- <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>,
<&gcc GCC_USB30_PRIM_SLEEP_CLK>,
+ <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>,
<&gcc GCC_USB3_SEC_CLKREF_CLK>;
- clock-names = "cfg_noc", "core", "iface", "mock_utmi",
- "sleep", "xo";
+ clock-names = "cfg_noc",
+ "core",
+ "iface",
+ "sleep",
+ "mock_utmi",
+ "xo";
assigned-clocks = <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>,
<&gcc GCC_USB30_PRIM_MASTER_CLK>;
@@ -3635,7 +3639,7 @@
resets = <&gcc GCC_USB30_PRIM_BCR>;
- usb_1_dwc3: dwc3@a600000 {
+ usb_1_dwc3: usb@a600000 {
compatible = "snps,dwc3";
reg = <0 0x0a600000 0 0xcd00>;
interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;
@@ -3659,11 +3663,15 @@
clocks = <&gcc GCC_CFG_NOC_USB3_SEC_AXI_CLK>,
<&gcc GCC_USB30_SEC_MASTER_CLK>,
<&gcc GCC_AGGRE_USB3_SEC_AXI_CLK>,
- <&gcc GCC_USB30_SEC_MOCK_UTMI_CLK>,
<&gcc GCC_USB30_SEC_SLEEP_CLK>,
+ <&gcc GCC_USB30_SEC_MOCK_UTMI_CLK>,
<&gcc GCC_USB3_SEC_CLKREF_CLK>;
- clock-names = "cfg_noc", "core", "iface", "mock_utmi",
- "sleep", "xo";
+ clock-names = "cfg_noc",
+ "core",
+ "iface",
+ "sleep",
+ "mock_utmi",
+ "xo";
assigned-clocks = <&gcc GCC_USB30_SEC_MOCK_UTMI_CLK>,
<&gcc GCC_USB30_SEC_MASTER_CLK>;
diff --git a/arch/arm64/boot/dts/qcom/sm8250.dtsi b/arch/arm64/boot/dts/qcom/sm8250.dtsi
index dc2562070336..cf0c97bd5ad3 100644
--- a/arch/arm64/boot/dts/qcom/sm8250.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8250.dtsi
@@ -2995,11 +2995,15 @@
clocks = <&gcc GCC_CFG_NOC_USB3_PRIM_AXI_CLK>,
<&gcc GCC_USB30_PRIM_MASTER_CLK>,
<&gcc GCC_AGGRE_USB3_PRIM_AXI_CLK>,
- <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>,
<&gcc GCC_USB30_PRIM_SLEEP_CLK>,
+ <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>,
<&gcc GCC_USB3_SEC_CLKREF_EN>;
- clock-names = "cfg_noc", "core", "iface", "mock_utmi",
- "sleep", "xo";
+ clock-names = "cfg_noc",
+ "core",
+ "iface",
+ "sleep",
+ "mock_utmi",
+ "xo";
assigned-clocks = <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>,
<&gcc GCC_USB30_PRIM_MASTER_CLK>;
@@ -3046,11 +3050,15 @@
clocks = <&gcc GCC_CFG_NOC_USB3_SEC_AXI_CLK>,
<&gcc GCC_USB30_SEC_MASTER_CLK>,
<&gcc GCC_AGGRE_USB3_SEC_AXI_CLK>,
- <&gcc GCC_USB30_SEC_MOCK_UTMI_CLK>,
<&gcc GCC_USB30_SEC_SLEEP_CLK>,
+ <&gcc GCC_USB30_SEC_MOCK_UTMI_CLK>,
<&gcc GCC_USB3_SEC_CLKREF_EN>;
- clock-names = "cfg_noc", "core", "iface", "mock_utmi",
- "sleep", "xo";
+ clock-names = "cfg_noc",
+ "core",
+ "iface",
+ "sleep",
+ "mock_utmi",
+ "xo";
assigned-clocks = <&gcc GCC_USB30_SEC_MOCK_UTMI_CLK>,
<&gcc GCC_USB30_SEC_MASTER_CLK>;
diff --git a/arch/arm64/boot/dts/qcom/sm8350.dtsi b/arch/arm64/boot/dts/qcom/sm8350.dtsi
index c0137bdcf94b..743cba9b683c 100644
--- a/arch/arm64/boot/dts/qcom/sm8350.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8350.dtsi
@@ -2449,10 +2449,13 @@
clocks = <&gcc GCC_CFG_NOC_USB3_PRIM_AXI_CLK>,
<&gcc GCC_USB30_PRIM_MASTER_CLK>,
<&gcc GCC_AGGRE_USB3_PRIM_AXI_CLK>,
- <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>,
- <&gcc GCC_USB30_PRIM_SLEEP_CLK>;
- clock-names = "cfg_noc", "core", "iface", "mock_utmi",
- "sleep";
+ <&gcc GCC_USB30_PRIM_SLEEP_CLK>,
+ <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>;
+ clock-names = "cfg_noc",
+ "core",
+ "iface",
+ "sleep",
+ "mock_utmi";
assigned-clocks = <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>,
<&gcc GCC_USB30_PRIM_MASTER_CLK>;
@@ -2492,11 +2495,15 @@
clocks = <&gcc GCC_CFG_NOC_USB3_SEC_AXI_CLK>,
<&gcc GCC_USB30_SEC_MASTER_CLK>,
<&gcc GCC_AGGRE_USB3_SEC_AXI_CLK>,
- <&gcc GCC_USB30_SEC_MOCK_UTMI_CLK>,
<&gcc GCC_USB30_SEC_SLEEP_CLK>,
+ <&gcc GCC_USB30_SEC_MOCK_UTMI_CLK>,
<&gcc GCC_USB3_SEC_CLKREF_EN>;
- clock-names = "cfg_noc", "core", "iface", "mock_utmi",
- "sleep", "xo";
+ clock-names = "cfg_noc",
+ "core",
+ "iface",
+ "sleep",
+ "mock_utmi",
+ "xo";
assigned-clocks = <&gcc GCC_USB30_SEC_MOCK_UTMI_CLK>,
<&gcc GCC_USB30_SEC_MASTER_CLK>;
diff --git a/arch/arm64/boot/dts/qcom/sm8450.dtsi b/arch/arm64/boot/dts/qcom/sm8450.dtsi
index 7f52c3cfdfb7..7d08fad76371 100644
--- a/arch/arm64/boot/dts/qcom/sm8450.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8450.dtsi
@@ -3107,22 +3107,28 @@
clocks = <&gcc GCC_CFG_NOC_USB3_PRIM_AXI_CLK>,
<&gcc GCC_USB30_PRIM_MASTER_CLK>,
<&gcc GCC_AGGRE_USB3_PRIM_AXI_CLK>,
- <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>,
<&gcc GCC_USB30_PRIM_SLEEP_CLK>,
+ <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>,
<&gcc GCC_USB3_0_CLKREF_EN>;
- clock-names = "cfg_noc", "core", "iface", "mock_utmi",
- "sleep", "xo";
+ clock-names = "cfg_noc",
+ "core",
+ "iface",
+ "sleep",
+ "mock_utmi",
+ "xo";
assigned-clocks = <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>,
<&gcc GCC_USB30_PRIM_MASTER_CLK>;
assigned-clock-rates = <19200000>, <200000000>;
interrupts-extended = <&intc GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>,
- <&pdc 14 IRQ_TYPE_EDGE_BOTH>,
+ <&pdc 17 IRQ_TYPE_LEVEL_HIGH>,
<&pdc 15 IRQ_TYPE_EDGE_BOTH>,
- <&pdc 17 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "hs_phy_irq", "dp_hs_phy_irq",
- "dm_hs_phy_irq", "ss_phy_irq";
+ <&pdc 14 IRQ_TYPE_EDGE_BOTH>;
+ interrupt-names = "hs_phy_irq",
+ "ss_phy_irq",
+ "dm_hs_phy_irq",
+ "dp_hs_phy_irq";
power-domains = <&gcc USB30_PRIM_GDSC>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3308.dtsi b/arch/arm64/boot/dts/rockchip/rk3308.dtsi
index 1cbe2126186e..2dfa67f1cd67 100644
--- a/arch/arm64/boot/dts/rockchip/rk3308.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3308.dtsi
@@ -745,10 +745,11 @@
cru: clock-controller@ff500000 {
compatible = "rockchip,rk3308-cru";
reg = <0x0 0xff500000 0x0 0x1000>;
+ clocks = <&xin24m>;
+ clock-names = "xin24m";
+ rockchip,grf = <&grf>;
#clock-cells = <1>;
#reset-cells = <1>;
- rockchip,grf = <&grf>;
-
assigned-clocks = <&cru SCLK_RTC32K>;
assigned-clock-rates = <32768>;
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3368.dtsi b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
index c99da90328e9..4f0b5feaa5e6 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
@@ -747,6 +747,8 @@
cru: clock-controller@ff760000 {
compatible = "rockchip,rk3368-cru";
reg = <0x0 0xff760000 0x0 0x1000>;
+ clocks = <&xin24m>;
+ clock-names = "xin24m";
rockchip,grf = <&grf>;
#clock-cells = <1>;
#reset-cells = <1>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3566-quartz64-a.dts b/arch/arm64/boot/dts/rockchip/rk3566-quartz64-a.dts
index 141a433429b5..1534e11a9ad1 100644
--- a/arch/arm64/boot/dts/rockchip/rk3566-quartz64-a.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3566-quartz64-a.dts
@@ -652,8 +652,8 @@
compatible = "brcm,bcm43438-bt";
clocks = <&rk817 1>;
clock-names = "lpo";
- device-wake-gpios = <&gpio2 RK_PC1 GPIO_ACTIVE_HIGH>;
- host-wake-gpios = <&gpio2 RK_PC0 GPIO_ACTIVE_HIGH>;
+ device-wakeup-gpios = <&gpio2 RK_PC1 GPIO_ACTIVE_HIGH>;
+ host-wakeup-gpios = <&gpio2 RK_PC0 GPIO_ACTIVE_HIGH>;
shutdown-gpios = <&gpio2 RK_PB7 GPIO_ACTIVE_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&bt_host_wake_l &bt_wake_l &bt_enable_h>;
diff --git a/arch/arm64/boot/dts/rockchip/rk356x.dtsi b/arch/arm64/boot/dts/rockchip/rk356x.dtsi
index 1042e68602de..914f13c0d399 100644
--- a/arch/arm64/boot/dts/rockchip/rk356x.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk356x.dtsi
@@ -397,6 +397,8 @@
cru: clock-controller@fdd20000 {
compatible = "rockchip,rk3568-cru";
reg = <0x0 0xfdd20000 0x0 0x1000>;
+ clocks = <&xin24m>;
+ clock-names = "xin24m";
#clock-cells = <1>;
#reset-cells = <1>;
assigned-clocks = <&cru PLL_GPLL>, <&pmucru PLL_PPLL>;
diff --git a/arch/arm64/boot/dts/sprd/whale2.dtsi b/arch/arm64/boot/dts/sprd/whale2.dtsi
index 79b9591c37aa..89d91abbd5d1 100644
--- a/arch/arm64/boot/dts/sprd/whale2.dtsi
+++ b/arch/arm64/boot/dts/sprd/whale2.dtsi
@@ -126,7 +126,9 @@
reg = <0 0x20100000 0 0x4000>;
interrupts = <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>;
#dma-cells = <1>;
+ /* For backwards compatibility: */
#dma-channels = <32>;
+ dma-channels = <32>;
clock-names = "enable";
clocks = <&apahb_gate CLK_DMA_EB>;
};
@@ -272,7 +274,9 @@
compatible = "sprd,sc9860-dma";
reg = <0 0x41580000 0 0x4000>;
#dma-cells = <1>;
+ /* For backwards compatibility: */
#dma-channels = <32>;
+ dma-channels = <32>;
clock-names = "enable", "ashb_eb";
clocks = <&agcp_gate CLK_AGCP_DMAAP_EB>,
<&agcp_gate CLK_AGCP_AP_ASHB_EB>;
diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig
index 2a965aa0188d..ac85682c013c 100644
--- a/arch/arm64/crypto/Kconfig
+++ b/arch/arm64/crypto/Kconfig
@@ -45,13 +45,25 @@ config CRYPTO_SM3_ARM64_CE
tristate "SM3 digest algorithm (ARMv8.2 Crypto Extensions)"
depends on KERNEL_MODE_NEON
select CRYPTO_HASH
- select CRYPTO_LIB_SM3
+ select CRYPTO_SM3
config CRYPTO_SM4_ARM64_CE
tristate "SM4 symmetric cipher (ARMv8.2 Crypto Extensions)"
depends on KERNEL_MODE_NEON
select CRYPTO_ALGAPI
- select CRYPTO_LIB_SM4
+ select CRYPTO_SM4
+
+config CRYPTO_SM4_ARM64_CE_BLK
+ tristate "SM4 in ECB/CBC/CFB/CTR modes using ARMv8 Crypto Extensions"
+ depends on KERNEL_MODE_NEON
+ select CRYPTO_SKCIPHER
+ select CRYPTO_SM4
+
+config CRYPTO_SM4_ARM64_NEON_BLK
+ tristate "SM4 in ECB/CBC/CFB/CTR modes using NEON instructions"
+ depends on KERNEL_MODE_NEON
+ select CRYPTO_SKCIPHER
+ select CRYPTO_SM4
config CRYPTO_GHASH_ARM64_CE
tristate "GHASH/AES-GCM using ARMv8 Crypto Extensions"
diff --git a/arch/arm64/crypto/Makefile b/arch/arm64/crypto/Makefile
index 09a805cc32d7..bea8995133b1 100644
--- a/arch/arm64/crypto/Makefile
+++ b/arch/arm64/crypto/Makefile
@@ -20,9 +20,15 @@ sha3-ce-y := sha3-ce-glue.o sha3-ce-core.o
obj-$(CONFIG_CRYPTO_SM3_ARM64_CE) += sm3-ce.o
sm3-ce-y := sm3-ce-glue.o sm3-ce-core.o
-obj-$(CONFIG_CRYPTO_SM4_ARM64_CE) += sm4-ce.o
+obj-$(CONFIG_CRYPTO_SM4_ARM64_CE) += sm4-ce-cipher.o
+sm4-ce-cipher-y := sm4-ce-cipher-glue.o sm4-ce-cipher-core.o
+
+obj-$(CONFIG_CRYPTO_SM4_ARM64_CE_BLK) += sm4-ce.o
sm4-ce-y := sm4-ce-glue.o sm4-ce-core.o
+obj-$(CONFIG_CRYPTO_SM4_ARM64_NEON_BLK) += sm4-neon.o
+sm4-neon-y := sm4-neon-glue.o sm4-neon-core.o
+
obj-$(CONFIG_CRYPTO_GHASH_ARM64_CE) += ghash-ce.o
ghash-ce-y := ghash-ce-glue.o ghash-ce-core.o
diff --git a/arch/arm64/crypto/sm4-ce-cipher-core.S b/arch/arm64/crypto/sm4-ce-cipher-core.S
new file mode 100644
index 000000000000..4ac6cfbc5797
--- /dev/null
+++ b/arch/arm64/crypto/sm4-ce-cipher-core.S
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+ .irp b, 0, 1, 2, 3, 4, 5, 6, 7, 8
+ .set .Lv\b\().4s, \b
+ .endr
+
+ .macro sm4e, rd, rn
+ .inst 0xcec08400 | .L\rd | (.L\rn << 5)
+ .endm
+
+ /*
+ * void sm4_ce_do_crypt(const u32 *rk, u32 *out, const u32 *in);
+ */
+ .text
+SYM_FUNC_START(sm4_ce_do_crypt)
+ ld1 {v8.4s}, [x2]
+ ld1 {v0.4s-v3.4s}, [x0], #64
+CPU_LE( rev32 v8.16b, v8.16b )
+ ld1 {v4.4s-v7.4s}, [x0]
+ sm4e v8.4s, v0.4s
+ sm4e v8.4s, v1.4s
+ sm4e v8.4s, v2.4s
+ sm4e v8.4s, v3.4s
+ sm4e v8.4s, v4.4s
+ sm4e v8.4s, v5.4s
+ sm4e v8.4s, v6.4s
+ sm4e v8.4s, v7.4s
+ rev64 v8.4s, v8.4s
+ ext v8.16b, v8.16b, v8.16b, #8
+CPU_LE( rev32 v8.16b, v8.16b )
+ st1 {v8.4s}, [x1]
+ ret
+SYM_FUNC_END(sm4_ce_do_crypt)
diff --git a/arch/arm64/crypto/sm4-ce-cipher-glue.c b/arch/arm64/crypto/sm4-ce-cipher-glue.c
new file mode 100644
index 000000000000..76a34ef4abbb
--- /dev/null
+++ b/arch/arm64/crypto/sm4-ce-cipher-glue.c
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <asm/neon.h>
+#include <asm/simd.h>
+#include <crypto/sm4.h>
+#include <crypto/internal/simd.h>
+#include <linux/module.h>
+#include <linux/cpufeature.h>
+#include <linux/crypto.h>
+#include <linux/types.h>
+
+MODULE_ALIAS_CRYPTO("sm4");
+MODULE_ALIAS_CRYPTO("sm4-ce");
+MODULE_DESCRIPTION("SM4 symmetric cipher using ARMv8 Crypto Extensions");
+MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+MODULE_LICENSE("GPL v2");
+
+asmlinkage void sm4_ce_do_crypt(const u32 *rk, void *out, const void *in);
+
+static int sm4_ce_setkey(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int key_len)
+{
+ struct sm4_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ return sm4_expandkey(ctx, key, key_len);
+}
+
+static void sm4_ce_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+{
+ const struct sm4_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if (!crypto_simd_usable()) {
+ sm4_crypt_block(ctx->rkey_enc, out, in);
+ } else {
+ kernel_neon_begin();
+ sm4_ce_do_crypt(ctx->rkey_enc, out, in);
+ kernel_neon_end();
+ }
+}
+
+static void sm4_ce_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+{
+ const struct sm4_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if (!crypto_simd_usable()) {
+ sm4_crypt_block(ctx->rkey_dec, out, in);
+ } else {
+ kernel_neon_begin();
+ sm4_ce_do_crypt(ctx->rkey_dec, out, in);
+ kernel_neon_end();
+ }
+}
+
+static struct crypto_alg sm4_ce_alg = {
+ .cra_name = "sm4",
+ .cra_driver_name = "sm4-ce",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
+ .cra_blocksize = SM4_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sm4_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_u.cipher = {
+ .cia_min_keysize = SM4_KEY_SIZE,
+ .cia_max_keysize = SM4_KEY_SIZE,
+ .cia_setkey = sm4_ce_setkey,
+ .cia_encrypt = sm4_ce_encrypt,
+ .cia_decrypt = sm4_ce_decrypt
+ }
+};
+
+static int __init sm4_ce_mod_init(void)
+{
+ return crypto_register_alg(&sm4_ce_alg);
+}
+
+static void __exit sm4_ce_mod_fini(void)
+{
+ crypto_unregister_alg(&sm4_ce_alg);
+}
+
+module_cpu_feature_match(SM4, sm4_ce_mod_init);
+module_exit(sm4_ce_mod_fini);
diff --git a/arch/arm64/crypto/sm4-ce-core.S b/arch/arm64/crypto/sm4-ce-core.S
index 4ac6cfbc5797..934e0f093279 100644
--- a/arch/arm64/crypto/sm4-ce-core.S
+++ b/arch/arm64/crypto/sm4-ce-core.S
@@ -1,36 +1,660 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * SM4 Cipher Algorithm for ARMv8 with Crypto Extensions
+ * as specified in
+ * https://tools.ietf.org/id/draft-ribose-cfrg-sm4-10.html
+ *
+ * Copyright (C) 2022, Alibaba Group.
+ * Copyright (C) 2022 Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
+ */
#include <linux/linkage.h>
#include <asm/assembler.h>
- .irp b, 0, 1, 2, 3, 4, 5, 6, 7, 8
- .set .Lv\b\().4s, \b
- .endr
-
- .macro sm4e, rd, rn
- .inst 0xcec08400 | .L\rd | (.L\rn << 5)
- .endm
-
- /*
- * void sm4_ce_do_crypt(const u32 *rk, u32 *out, const u32 *in);
- */
- .text
-SYM_FUNC_START(sm4_ce_do_crypt)
- ld1 {v8.4s}, [x2]
- ld1 {v0.4s-v3.4s}, [x0], #64
-CPU_LE( rev32 v8.16b, v8.16b )
- ld1 {v4.4s-v7.4s}, [x0]
- sm4e v8.4s, v0.4s
- sm4e v8.4s, v1.4s
- sm4e v8.4s, v2.4s
- sm4e v8.4s, v3.4s
- sm4e v8.4s, v4.4s
- sm4e v8.4s, v5.4s
- sm4e v8.4s, v6.4s
- sm4e v8.4s, v7.4s
- rev64 v8.4s, v8.4s
- ext v8.16b, v8.16b, v8.16b, #8
-CPU_LE( rev32 v8.16b, v8.16b )
- st1 {v8.4s}, [x1]
- ret
-SYM_FUNC_END(sm4_ce_do_crypt)
+.arch armv8-a+crypto
+
+.irp b, 0, 1, 2, 3, 4, 5, 6, 7, 16, 20, 24, 25, 26, 27, 28, 29, 30, 31
+ .set .Lv\b\().4s, \b
+.endr
+
+.macro sm4e, vd, vn
+ .inst 0xcec08400 | (.L\vn << 5) | .L\vd
+.endm
+
+.macro sm4ekey, vd, vn, vm
+ .inst 0xce60c800 | (.L\vm << 16) | (.L\vn << 5) | .L\vd
+.endm
+
+/* Register macros */
+
+#define RTMP0 v16
+#define RTMP1 v17
+#define RTMP2 v18
+#define RTMP3 v19
+
+#define RIV v20
+
+/* Helper macros. */
+
+#define PREPARE \
+ ld1 {v24.16b-v27.16b}, [x0], #64; \
+ ld1 {v28.16b-v31.16b}, [x0];
+
+#define SM4_CRYPT_BLK(b0) \
+ rev32 b0.16b, b0.16b; \
+ sm4e b0.4s, v24.4s; \
+ sm4e b0.4s, v25.4s; \
+ sm4e b0.4s, v26.4s; \
+ sm4e b0.4s, v27.4s; \
+ sm4e b0.4s, v28.4s; \
+ sm4e b0.4s, v29.4s; \
+ sm4e b0.4s, v30.4s; \
+ sm4e b0.4s, v31.4s; \
+ rev64 b0.4s, b0.4s; \
+ ext b0.16b, b0.16b, b0.16b, #8; \
+ rev32 b0.16b, b0.16b;
+
+#define SM4_CRYPT_BLK4(b0, b1, b2, b3) \
+ rev32 b0.16b, b0.16b; \
+ rev32 b1.16b, b1.16b; \
+ rev32 b2.16b, b2.16b; \
+ rev32 b3.16b, b3.16b; \
+ sm4e b0.4s, v24.4s; \
+ sm4e b1.4s, v24.4s; \
+ sm4e b2.4s, v24.4s; \
+ sm4e b3.4s, v24.4s; \
+ sm4e b0.4s, v25.4s; \
+ sm4e b1.4s, v25.4s; \
+ sm4e b2.4s, v25.4s; \
+ sm4e b3.4s, v25.4s; \
+ sm4e b0.4s, v26.4s; \
+ sm4e b1.4s, v26.4s; \
+ sm4e b2.4s, v26.4s; \
+ sm4e b3.4s, v26.4s; \
+ sm4e b0.4s, v27.4s; \
+ sm4e b1.4s, v27.4s; \
+ sm4e b2.4s, v27.4s; \
+ sm4e b3.4s, v27.4s; \
+ sm4e b0.4s, v28.4s; \
+ sm4e b1.4s, v28.4s; \
+ sm4e b2.4s, v28.4s; \
+ sm4e b3.4s, v28.4s; \
+ sm4e b0.4s, v29.4s; \
+ sm4e b1.4s, v29.4s; \
+ sm4e b2.4s, v29.4s; \
+ sm4e b3.4s, v29.4s; \
+ sm4e b0.4s, v30.4s; \
+ sm4e b1.4s, v30.4s; \
+ sm4e b2.4s, v30.4s; \
+ sm4e b3.4s, v30.4s; \
+ sm4e b0.4s, v31.4s; \
+ sm4e b1.4s, v31.4s; \
+ sm4e b2.4s, v31.4s; \
+ sm4e b3.4s, v31.4s; \
+ rev64 b0.4s, b0.4s; \
+ rev64 b1.4s, b1.4s; \
+ rev64 b2.4s, b2.4s; \
+ rev64 b3.4s, b3.4s; \
+ ext b0.16b, b0.16b, b0.16b, #8; \
+ ext b1.16b, b1.16b, b1.16b, #8; \
+ ext b2.16b, b2.16b, b2.16b, #8; \
+ ext b3.16b, b3.16b, b3.16b, #8; \
+ rev32 b0.16b, b0.16b; \
+ rev32 b1.16b, b1.16b; \
+ rev32 b2.16b, b2.16b; \
+ rev32 b3.16b, b3.16b;
+
+#define SM4_CRYPT_BLK8(b0, b1, b2, b3, b4, b5, b6, b7) \
+ rev32 b0.16b, b0.16b; \
+ rev32 b1.16b, b1.16b; \
+ rev32 b2.16b, b2.16b; \
+ rev32 b3.16b, b3.16b; \
+ rev32 b4.16b, b4.16b; \
+ rev32 b5.16b, b5.16b; \
+ rev32 b6.16b, b6.16b; \
+ rev32 b7.16b, b7.16b; \
+ sm4e b0.4s, v24.4s; \
+ sm4e b1.4s, v24.4s; \
+ sm4e b2.4s, v24.4s; \
+ sm4e b3.4s, v24.4s; \
+ sm4e b4.4s, v24.4s; \
+ sm4e b5.4s, v24.4s; \
+ sm4e b6.4s, v24.4s; \
+ sm4e b7.4s, v24.4s; \
+ sm4e b0.4s, v25.4s; \
+ sm4e b1.4s, v25.4s; \
+ sm4e b2.4s, v25.4s; \
+ sm4e b3.4s, v25.4s; \
+ sm4e b4.4s, v25.4s; \
+ sm4e b5.4s, v25.4s; \
+ sm4e b6.4s, v25.4s; \
+ sm4e b7.4s, v25.4s; \
+ sm4e b0.4s, v26.4s; \
+ sm4e b1.4s, v26.4s; \
+ sm4e b2.4s, v26.4s; \
+ sm4e b3.4s, v26.4s; \
+ sm4e b4.4s, v26.4s; \
+ sm4e b5.4s, v26.4s; \
+ sm4e b6.4s, v26.4s; \
+ sm4e b7.4s, v26.4s; \
+ sm4e b0.4s, v27.4s; \
+ sm4e b1.4s, v27.4s; \
+ sm4e b2.4s, v27.4s; \
+ sm4e b3.4s, v27.4s; \
+ sm4e b4.4s, v27.4s; \
+ sm4e b5.4s, v27.4s; \
+ sm4e b6.4s, v27.4s; \
+ sm4e b7.4s, v27.4s; \
+ sm4e b0.4s, v28.4s; \
+ sm4e b1.4s, v28.4s; \
+ sm4e b2.4s, v28.4s; \
+ sm4e b3.4s, v28.4s; \
+ sm4e b4.4s, v28.4s; \
+ sm4e b5.4s, v28.4s; \
+ sm4e b6.4s, v28.4s; \
+ sm4e b7.4s, v28.4s; \
+ sm4e b0.4s, v29.4s; \
+ sm4e b1.4s, v29.4s; \
+ sm4e b2.4s, v29.4s; \
+ sm4e b3.4s, v29.4s; \
+ sm4e b4.4s, v29.4s; \
+ sm4e b5.4s, v29.4s; \
+ sm4e b6.4s, v29.4s; \
+ sm4e b7.4s, v29.4s; \
+ sm4e b0.4s, v30.4s; \
+ sm4e b1.4s, v30.4s; \
+ sm4e b2.4s, v30.4s; \
+ sm4e b3.4s, v30.4s; \
+ sm4e b4.4s, v30.4s; \
+ sm4e b5.4s, v30.4s; \
+ sm4e b6.4s, v30.4s; \
+ sm4e b7.4s, v30.4s; \
+ sm4e b0.4s, v31.4s; \
+ sm4e b1.4s, v31.4s; \
+ sm4e b2.4s, v31.4s; \
+ sm4e b3.4s, v31.4s; \
+ sm4e b4.4s, v31.4s; \
+ sm4e b5.4s, v31.4s; \
+ sm4e b6.4s, v31.4s; \
+ sm4e b7.4s, v31.4s; \
+ rev64 b0.4s, b0.4s; \
+ rev64 b1.4s, b1.4s; \
+ rev64 b2.4s, b2.4s; \
+ rev64 b3.4s, b3.4s; \
+ rev64 b4.4s, b4.4s; \
+ rev64 b5.4s, b5.4s; \
+ rev64 b6.4s, b6.4s; \
+ rev64 b7.4s, b7.4s; \
+ ext b0.16b, b0.16b, b0.16b, #8; \
+ ext b1.16b, b1.16b, b1.16b, #8; \
+ ext b2.16b, b2.16b, b2.16b, #8; \
+ ext b3.16b, b3.16b, b3.16b, #8; \
+ ext b4.16b, b4.16b, b4.16b, #8; \
+ ext b5.16b, b5.16b, b5.16b, #8; \
+ ext b6.16b, b6.16b, b6.16b, #8; \
+ ext b7.16b, b7.16b, b7.16b, #8; \
+ rev32 b0.16b, b0.16b; \
+ rev32 b1.16b, b1.16b; \
+ rev32 b2.16b, b2.16b; \
+ rev32 b3.16b, b3.16b; \
+ rev32 b4.16b, b4.16b; \
+ rev32 b5.16b, b5.16b; \
+ rev32 b6.16b, b6.16b; \
+ rev32 b7.16b, b7.16b;
+
+
+.align 3
+SYM_FUNC_START(sm4_ce_expand_key)
+ /* input:
+ * x0: 128-bit key
+ * x1: rkey_enc
+ * x2: rkey_dec
+ * x3: fk array
+ * x4: ck array
+ */
+ ld1 {v0.16b}, [x0];
+ rev32 v0.16b, v0.16b;
+ ld1 {v1.16b}, [x3];
+ /* load ck */
+ ld1 {v24.16b-v27.16b}, [x4], #64;
+ ld1 {v28.16b-v31.16b}, [x4];
+
+ /* input ^ fk */
+ eor v0.16b, v0.16b, v1.16b;
+
+ sm4ekey v0.4s, v0.4s, v24.4s;
+ sm4ekey v1.4s, v0.4s, v25.4s;
+ sm4ekey v2.4s, v1.4s, v26.4s;
+ sm4ekey v3.4s, v2.4s, v27.4s;
+ sm4ekey v4.4s, v3.4s, v28.4s;
+ sm4ekey v5.4s, v4.4s, v29.4s;
+ sm4ekey v6.4s, v5.4s, v30.4s;
+ sm4ekey v7.4s, v6.4s, v31.4s;
+
+ st1 {v0.16b-v3.16b}, [x1], #64;
+ st1 {v4.16b-v7.16b}, [x1];
+ rev64 v7.4s, v7.4s;
+ rev64 v6.4s, v6.4s;
+ rev64 v5.4s, v5.4s;
+ rev64 v4.4s, v4.4s;
+ rev64 v3.4s, v3.4s;
+ rev64 v2.4s, v2.4s;
+ rev64 v1.4s, v1.4s;
+ rev64 v0.4s, v0.4s;
+ ext v7.16b, v7.16b, v7.16b, #8;
+ ext v6.16b, v6.16b, v6.16b, #8;
+ ext v5.16b, v5.16b, v5.16b, #8;
+ ext v4.16b, v4.16b, v4.16b, #8;
+ ext v3.16b, v3.16b, v3.16b, #8;
+ ext v2.16b, v2.16b, v2.16b, #8;
+ ext v1.16b, v1.16b, v1.16b, #8;
+ ext v0.16b, v0.16b, v0.16b, #8;
+ st1 {v7.16b}, [x2], #16;
+ st1 {v6.16b}, [x2], #16;
+ st1 {v5.16b}, [x2], #16;
+ st1 {v4.16b}, [x2], #16;
+ st1 {v3.16b}, [x2], #16;
+ st1 {v2.16b}, [x2], #16;
+ st1 {v1.16b}, [x2], #16;
+ st1 {v0.16b}, [x2];
+
+ ret;
+SYM_FUNC_END(sm4_ce_expand_key)
+
+.align 3
+SYM_FUNC_START(sm4_ce_crypt_block)
+ /* input:
+ * x0: round key array, CTX
+ * x1: dst
+ * x2: src
+ */
+ PREPARE;
+
+ ld1 {v0.16b}, [x2];
+ SM4_CRYPT_BLK(v0);
+ st1 {v0.16b}, [x1];
+
+ ret;
+SYM_FUNC_END(sm4_ce_crypt_block)
+
+.align 3
+SYM_FUNC_START(sm4_ce_crypt)
+ /* input:
+ * x0: round key array, CTX
+ * x1: dst
+ * x2: src
+ * w3: nblocks
+ */
+ PREPARE;
+
+.Lcrypt_loop_blk:
+ sub w3, w3, #8;
+ tbnz w3, #31, .Lcrypt_tail8;
+
+ ld1 {v0.16b-v3.16b}, [x2], #64;
+ ld1 {v4.16b-v7.16b}, [x2], #64;
+
+ SM4_CRYPT_BLK8(v0, v1, v2, v3, v4, v5, v6, v7);
+
+ st1 {v0.16b-v3.16b}, [x1], #64;
+ st1 {v4.16b-v7.16b}, [x1], #64;
+
+ cbz w3, .Lcrypt_end;
+ b .Lcrypt_loop_blk;
+
+.Lcrypt_tail8:
+ add w3, w3, #8;
+ cmp w3, #4;
+ blt .Lcrypt_tail4;
+
+ sub w3, w3, #4;
+
+ ld1 {v0.16b-v3.16b}, [x2], #64;
+ SM4_CRYPT_BLK4(v0, v1, v2, v3);
+ st1 {v0.16b-v3.16b}, [x1], #64;
+
+ cbz w3, .Lcrypt_end;
+
+.Lcrypt_tail4:
+ sub w3, w3, #1;
+
+ ld1 {v0.16b}, [x2], #16;
+ SM4_CRYPT_BLK(v0);
+ st1 {v0.16b}, [x1], #16;
+
+ cbnz w3, .Lcrypt_tail4;
+
+.Lcrypt_end:
+ ret;
+SYM_FUNC_END(sm4_ce_crypt)
+
+.align 3
+SYM_FUNC_START(sm4_ce_cbc_enc)
+ /* input:
+ * x0: round key array, CTX
+ * x1: dst
+ * x2: src
+ * x3: iv (big endian, 128 bit)
+ * w4: nblocks
+ */
+ PREPARE;
+
+ ld1 {RIV.16b}, [x3];
+
+.Lcbc_enc_loop:
+ sub w4, w4, #1;
+
+ ld1 {RTMP0.16b}, [x2], #16;
+ eor RIV.16b, RIV.16b, RTMP0.16b;
+
+ SM4_CRYPT_BLK(RIV);
+
+ st1 {RIV.16b}, [x1], #16;
+
+ cbnz w4, .Lcbc_enc_loop;
+
+ /* store new IV */
+ st1 {RIV.16b}, [x3];
+
+ ret;
+SYM_FUNC_END(sm4_ce_cbc_enc)
+
+.align 3
+SYM_FUNC_START(sm4_ce_cbc_dec)
+ /* input:
+ * x0: round key array, CTX
+ * x1: dst
+ * x2: src
+ * x3: iv (big endian, 128 bit)
+ * w4: nblocks
+ */
+ PREPARE;
+
+ ld1 {RIV.16b}, [x3];
+
+.Lcbc_loop_blk:
+ sub w4, w4, #8;
+ tbnz w4, #31, .Lcbc_tail8;
+
+ ld1 {v0.16b-v3.16b}, [x2], #64;
+ ld1 {v4.16b-v7.16b}, [x2];
+
+ SM4_CRYPT_BLK8(v0, v1, v2, v3, v4, v5, v6, v7);
+
+ sub x2, x2, #64;
+ eor v0.16b, v0.16b, RIV.16b;
+ ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64;
+ eor v1.16b, v1.16b, RTMP0.16b;
+ eor v2.16b, v2.16b, RTMP1.16b;
+ eor v3.16b, v3.16b, RTMP2.16b;
+ st1 {v0.16b-v3.16b}, [x1], #64;
+
+ eor v4.16b, v4.16b, RTMP3.16b;
+ ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64;
+ eor v5.16b, v5.16b, RTMP0.16b;
+ eor v6.16b, v6.16b, RTMP1.16b;
+ eor v7.16b, v7.16b, RTMP2.16b;
+
+ mov RIV.16b, RTMP3.16b;
+ st1 {v4.16b-v7.16b}, [x1], #64;
+
+ cbz w4, .Lcbc_end;
+ b .Lcbc_loop_blk;
+
+.Lcbc_tail8:
+ add w4, w4, #8;
+ cmp w4, #4;
+ blt .Lcbc_tail4;
+
+ sub w4, w4, #4;
+
+ ld1 {v0.16b-v3.16b}, [x2];
+
+ SM4_CRYPT_BLK4(v0, v1, v2, v3);
+
+ eor v0.16b, v0.16b, RIV.16b;
+ ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64;
+ eor v1.16b, v1.16b, RTMP0.16b;
+ eor v2.16b, v2.16b, RTMP1.16b;
+ eor v3.16b, v3.16b, RTMP2.16b;
+
+ mov RIV.16b, RTMP3.16b;
+ st1 {v0.16b-v3.16b}, [x1], #64;
+
+ cbz w4, .Lcbc_end;
+
+.Lcbc_tail4:
+ sub w4, w4, #1;
+
+ ld1 {v0.16b}, [x2];
+
+ SM4_CRYPT_BLK(v0);
+
+ eor v0.16b, v0.16b, RIV.16b;
+ ld1 {RIV.16b}, [x2], #16;
+ st1 {v0.16b}, [x1], #16;
+
+ cbnz w4, .Lcbc_tail4;
+
+.Lcbc_end:
+ /* store new IV */
+ st1 {RIV.16b}, [x3];
+
+ ret;
+SYM_FUNC_END(sm4_ce_cbc_dec)
+
+.align 3
+SYM_FUNC_START(sm4_ce_cfb_enc)
+ /* input:
+ * x0: round key array, CTX
+ * x1: dst
+ * x2: src
+ * x3: iv (big endian, 128 bit)
+ * w4: nblocks
+ */
+ PREPARE;
+
+ ld1 {RIV.16b}, [x3];
+
+.Lcfb_enc_loop:
+ sub w4, w4, #1;
+
+ SM4_CRYPT_BLK(RIV);
+
+ ld1 {RTMP0.16b}, [x2], #16;
+ eor RIV.16b, RIV.16b, RTMP0.16b;
+ st1 {RIV.16b}, [x1], #16;
+
+ cbnz w4, .Lcfb_enc_loop;
+
+ /* store new IV */
+ st1 {RIV.16b}, [x3];
+
+ ret;
+SYM_FUNC_END(sm4_ce_cfb_enc)
+
+.align 3
+SYM_FUNC_START(sm4_ce_cfb_dec)
+ /* input:
+ * x0: round key array, CTX
+ * x1: dst
+ * x2: src
+ * x3: iv (big endian, 128 bit)
+ * w4: nblocks
+ */
+ PREPARE;
+
+ ld1 {v0.16b}, [x3];
+
+.Lcfb_loop_blk:
+ sub w4, w4, #8;
+ tbnz w4, #31, .Lcfb_tail8;
+
+ ld1 {v1.16b, v2.16b, v3.16b}, [x2], #48;
+ ld1 {v4.16b-v7.16b}, [x2];
+
+ SM4_CRYPT_BLK8(v0, v1, v2, v3, v4, v5, v6, v7);
+
+ sub x2, x2, #48;
+ ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64;
+ eor v0.16b, v0.16b, RTMP0.16b;
+ eor v1.16b, v1.16b, RTMP1.16b;
+ eor v2.16b, v2.16b, RTMP2.16b;
+ eor v3.16b, v3.16b, RTMP3.16b;
+ st1 {v0.16b-v3.16b}, [x1], #64;
+
+ ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64;
+ eor v4.16b, v4.16b, RTMP0.16b;
+ eor v5.16b, v5.16b, RTMP1.16b;
+ eor v6.16b, v6.16b, RTMP2.16b;
+ eor v7.16b, v7.16b, RTMP3.16b;
+ st1 {v4.16b-v7.16b}, [x1], #64;
+
+ mov v0.16b, RTMP3.16b;
+
+ cbz w4, .Lcfb_end;
+ b .Lcfb_loop_blk;
+
+.Lcfb_tail8:
+ add w4, w4, #8;
+ cmp w4, #4;
+ blt .Lcfb_tail4;
+
+ sub w4, w4, #4;
+
+ ld1 {v1.16b, v2.16b, v3.16b}, [x2];
+
+ SM4_CRYPT_BLK4(v0, v1, v2, v3);
+
+ ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64;
+ eor v0.16b, v0.16b, RTMP0.16b;
+ eor v1.16b, v1.16b, RTMP1.16b;
+ eor v2.16b, v2.16b, RTMP2.16b;
+ eor v3.16b, v3.16b, RTMP3.16b;
+ st1 {v0.16b-v3.16b}, [x1], #64;
+
+ mov v0.16b, RTMP3.16b;
+
+ cbz w4, .Lcfb_end;
+
+.Lcfb_tail4:
+ sub w4, w4, #1;
+
+ SM4_CRYPT_BLK(v0);
+
+ ld1 {RTMP0.16b}, [x2], #16;
+ eor v0.16b, v0.16b, RTMP0.16b;
+ st1 {v0.16b}, [x1], #16;
+
+ mov v0.16b, RTMP0.16b;
+
+ cbnz w4, .Lcfb_tail4;
+
+.Lcfb_end:
+ /* store new IV */
+ st1 {v0.16b}, [x3];
+
+ ret;
+SYM_FUNC_END(sm4_ce_cfb_dec)
+
+.align 3
+SYM_FUNC_START(sm4_ce_ctr_enc)
+ /* input:
+ * x0: round key array, CTX
+ * x1: dst
+ * x2: src
+ * x3: ctr (big endian, 128 bit)
+ * w4: nblocks
+ */
+ PREPARE;
+
+ ldp x7, x8, [x3];
+ rev x7, x7;
+ rev x8, x8;
+
+.Lctr_loop_blk:
+ sub w4, w4, #8;
+ tbnz w4, #31, .Lctr_tail8;
+
+#define inc_le128(vctr) \
+ mov vctr.d[1], x8; \
+ mov vctr.d[0], x7; \
+ adds x8, x8, #1; \
+ adc x7, x7, xzr; \
+ rev64 vctr.16b, vctr.16b;
+
+ /* construct CTRs */
+ inc_le128(v0); /* +0 */
+ inc_le128(v1); /* +1 */
+ inc_le128(v2); /* +2 */
+ inc_le128(v3); /* +3 */
+ inc_le128(v4); /* +4 */
+ inc_le128(v5); /* +5 */
+ inc_le128(v6); /* +6 */
+ inc_le128(v7); /* +7 */
+
+ SM4_CRYPT_BLK8(v0, v1, v2, v3, v4, v5, v6, v7);
+
+ ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64;
+ eor v0.16b, v0.16b, RTMP0.16b;
+ eor v1.16b, v1.16b, RTMP1.16b;
+ eor v2.16b, v2.16b, RTMP2.16b;
+ eor v3.16b, v3.16b, RTMP3.16b;
+ st1 {v0.16b-v3.16b}, [x1], #64;
+
+ ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64;
+ eor v4.16b, v4.16b, RTMP0.16b;
+ eor v5.16b, v5.16b, RTMP1.16b;
+ eor v6.16b, v6.16b, RTMP2.16b;
+ eor v7.16b, v7.16b, RTMP3.16b;
+ st1 {v4.16b-v7.16b}, [x1], #64;
+
+ cbz w4, .Lctr_end;
+ b .Lctr_loop_blk;
+
+.Lctr_tail8:
+ add w4, w4, #8;
+ cmp w4, #4;
+ blt .Lctr_tail4;
+
+ sub w4, w4, #4;
+
+ /* construct CTRs */
+ inc_le128(v0); /* +0 */
+ inc_le128(v1); /* +1 */
+ inc_le128(v2); /* +2 */
+ inc_le128(v3); /* +3 */
+
+ SM4_CRYPT_BLK4(v0, v1, v2, v3);
+
+ ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64;
+ eor v0.16b, v0.16b, RTMP0.16b;
+ eor v1.16b, v1.16b, RTMP1.16b;
+ eor v2.16b, v2.16b, RTMP2.16b;
+ eor v3.16b, v3.16b, RTMP3.16b;
+ st1 {v0.16b-v3.16b}, [x1], #64;
+
+ cbz w4, .Lctr_end;
+
+.Lctr_tail4:
+ sub w4, w4, #1;
+
+ /* construct CTRs */
+ inc_le128(v0);
+
+ SM4_CRYPT_BLK(v0);
+
+ ld1 {RTMP0.16b}, [x2], #16;
+ eor v0.16b, v0.16b, RTMP0.16b;
+ st1 {v0.16b}, [x1], #16;
+
+ cbnz w4, .Lctr_tail4;
+
+.Lctr_end:
+ /* store new CTR */
+ rev x7, x7;
+ rev x8, x8;
+ stp x7, x8, [x3];
+
+ ret;
+SYM_FUNC_END(sm4_ce_ctr_enc)
diff --git a/arch/arm64/crypto/sm4-ce-glue.c b/arch/arm64/crypto/sm4-ce-glue.c
index 9c93cfc4841b..496d55c0d01a 100644
--- a/arch/arm64/crypto/sm4-ce-glue.c
+++ b/arch/arm64/crypto/sm4-ce-glue.c
@@ -1,82 +1,372 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * SM4 Cipher Algorithm, using ARMv8 Crypto Extensions
+ * as specified in
+ * https://tools.ietf.org/id/draft-ribose-cfrg-sm4-10.html
+ *
+ * Copyright (C) 2022, Alibaba Group.
+ * Copyright (C) 2022 Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
+ */
+#include <linux/module.h>
+#include <linux/crypto.h>
+#include <linux/kernel.h>
+#include <linux/cpufeature.h>
#include <asm/neon.h>
#include <asm/simd.h>
-#include <crypto/sm4.h>
#include <crypto/internal/simd.h>
-#include <linux/module.h>
-#include <linux/cpufeature.h>
-#include <linux/crypto.h>
-#include <linux/types.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/sm4.h>
-MODULE_ALIAS_CRYPTO("sm4");
-MODULE_ALIAS_CRYPTO("sm4-ce");
-MODULE_DESCRIPTION("SM4 symmetric cipher using ARMv8 Crypto Extensions");
-MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
-MODULE_LICENSE("GPL v2");
+#define BYTES2BLKS(nbytes) ((nbytes) >> 4)
+
+asmlinkage void sm4_ce_expand_key(const u8 *key, u32 *rkey_enc, u32 *rkey_dec,
+ const u32 *fk, const u32 *ck);
+asmlinkage void sm4_ce_crypt_block(const u32 *rkey, u8 *dst, const u8 *src);
+asmlinkage void sm4_ce_crypt(const u32 *rkey, u8 *dst, const u8 *src,
+ unsigned int nblks);
+asmlinkage void sm4_ce_cbc_enc(const u32 *rkey, u8 *dst, const u8 *src,
+ u8 *iv, unsigned int nblks);
+asmlinkage void sm4_ce_cbc_dec(const u32 *rkey, u8 *dst, const u8 *src,
+ u8 *iv, unsigned int nblks);
+asmlinkage void sm4_ce_cfb_enc(const u32 *rkey, u8 *dst, const u8 *src,
+ u8 *iv, unsigned int nblks);
+asmlinkage void sm4_ce_cfb_dec(const u32 *rkey, u8 *dst, const u8 *src,
+ u8 *iv, unsigned int nblks);
+asmlinkage void sm4_ce_ctr_enc(const u32 *rkey, u8 *dst, const u8 *src,
+ u8 *iv, unsigned int nblks);
+
+static int sm4_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int key_len)
+{
+ struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ if (key_len != SM4_KEY_SIZE)
+ return -EINVAL;
+
+ sm4_ce_expand_key(key, ctx->rkey_enc, ctx->rkey_dec,
+ crypto_sm4_fk, crypto_sm4_ck);
+ return 0;
+}
+
+static int sm4_ecb_do_crypt(struct skcipher_request *req, const u32 *rkey)
+{
+ struct skcipher_walk walk;
+ unsigned int nbytes;
+ int err;
+
+ err = skcipher_walk_virt(&walk, req, false);
+
+ while ((nbytes = walk.nbytes) > 0) {
+ const u8 *src = walk.src.virt.addr;
+ u8 *dst = walk.dst.virt.addr;
+ unsigned int nblks;
+
+ kernel_neon_begin();
+
+ nblks = BYTES2BLKS(nbytes);
+ if (nblks) {
+ sm4_ce_crypt(rkey, dst, src, nblks);
+ nbytes -= nblks * SM4_BLOCK_SIZE;
+ }
+
+ kernel_neon_end();
+
+ err = skcipher_walk_done(&walk, nbytes);
+ }
+
+ return err;
+}
+
+static int sm4_ecb_encrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ return sm4_ecb_do_crypt(req, ctx->rkey_enc);
+}
+
+static int sm4_ecb_decrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ return sm4_ecb_do_crypt(req, ctx->rkey_dec);
+}
+
+static int sm4_cbc_encrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
+ unsigned int nbytes;
+ int err;
+
+ err = skcipher_walk_virt(&walk, req, false);
+
+ while ((nbytes = walk.nbytes) > 0) {
+ const u8 *src = walk.src.virt.addr;
+ u8 *dst = walk.dst.virt.addr;
+ unsigned int nblks;
+
+ kernel_neon_begin();
+
+ nblks = BYTES2BLKS(nbytes);
+ if (nblks) {
+ sm4_ce_cbc_enc(ctx->rkey_enc, dst, src, walk.iv, nblks);
+ nbytes -= nblks * SM4_BLOCK_SIZE;
+ }
+
+ kernel_neon_end();
-asmlinkage void sm4_ce_do_crypt(const u32 *rk, void *out, const void *in);
+ err = skcipher_walk_done(&walk, nbytes);
+ }
+
+ return err;
+}
-static int sm4_ce_setkey(struct crypto_tfm *tfm, const u8 *key,
- unsigned int key_len)
+static int sm4_cbc_decrypt(struct skcipher_request *req)
{
- struct sm4_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
+ unsigned int nbytes;
+ int err;
+
+ err = skcipher_walk_virt(&walk, req, false);
+
+ while ((nbytes = walk.nbytes) > 0) {
+ const u8 *src = walk.src.virt.addr;
+ u8 *dst = walk.dst.virt.addr;
+ unsigned int nblks;
+
+ kernel_neon_begin();
+
+ nblks = BYTES2BLKS(nbytes);
+ if (nblks) {
+ sm4_ce_cbc_dec(ctx->rkey_dec, dst, src, walk.iv, nblks);
+ nbytes -= nblks * SM4_BLOCK_SIZE;
+ }
+
+ kernel_neon_end();
+
+ err = skcipher_walk_done(&walk, nbytes);
+ }
- return sm4_expandkey(ctx, key, key_len);
+ return err;
}
-static void sm4_ce_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+static int sm4_cfb_encrypt(struct skcipher_request *req)
{
- const struct sm4_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
+ unsigned int nbytes;
+ int err;
+
+ err = skcipher_walk_virt(&walk, req, false);
+
+ while ((nbytes = walk.nbytes) > 0) {
+ const u8 *src = walk.src.virt.addr;
+ u8 *dst = walk.dst.virt.addr;
+ unsigned int nblks;
- if (!crypto_simd_usable()) {
- sm4_crypt_block(ctx->rkey_enc, out, in);
- } else {
kernel_neon_begin();
- sm4_ce_do_crypt(ctx->rkey_enc, out, in);
+
+ nblks = BYTES2BLKS(nbytes);
+ if (nblks) {
+ sm4_ce_cfb_enc(ctx->rkey_enc, dst, src, walk.iv, nblks);
+ dst += nblks * SM4_BLOCK_SIZE;
+ src += nblks * SM4_BLOCK_SIZE;
+ nbytes -= nblks * SM4_BLOCK_SIZE;
+ }
+
+ /* tail */
+ if (walk.nbytes == walk.total && nbytes > 0) {
+ u8 keystream[SM4_BLOCK_SIZE];
+
+ sm4_ce_crypt_block(ctx->rkey_enc, keystream, walk.iv);
+ crypto_xor_cpy(dst, src, keystream, nbytes);
+ nbytes = 0;
+ }
+
kernel_neon_end();
+
+ err = skcipher_walk_done(&walk, nbytes);
}
+
+ return err;
}
-static void sm4_ce_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+static int sm4_cfb_decrypt(struct skcipher_request *req)
{
- const struct sm4_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
+ unsigned int nbytes;
+ int err;
+
+ err = skcipher_walk_virt(&walk, req, false);
+
+ while ((nbytes = walk.nbytes) > 0) {
+ const u8 *src = walk.src.virt.addr;
+ u8 *dst = walk.dst.virt.addr;
+ unsigned int nblks;
- if (!crypto_simd_usable()) {
- sm4_crypt_block(ctx->rkey_dec, out, in);
- } else {
kernel_neon_begin();
- sm4_ce_do_crypt(ctx->rkey_dec, out, in);
+
+ nblks = BYTES2BLKS(nbytes);
+ if (nblks) {
+ sm4_ce_cfb_dec(ctx->rkey_enc, dst, src, walk.iv, nblks);
+ dst += nblks * SM4_BLOCK_SIZE;
+ src += nblks * SM4_BLOCK_SIZE;
+ nbytes -= nblks * SM4_BLOCK_SIZE;
+ }
+
+ /* tail */
+ if (walk.nbytes == walk.total && nbytes > 0) {
+ u8 keystream[SM4_BLOCK_SIZE];
+
+ sm4_ce_crypt_block(ctx->rkey_enc, keystream, walk.iv);
+ crypto_xor_cpy(dst, src, keystream, nbytes);
+ nbytes = 0;
+ }
+
kernel_neon_end();
+
+ err = skcipher_walk_done(&walk, nbytes);
}
+
+ return err;
}
-static struct crypto_alg sm4_ce_alg = {
- .cra_name = "sm4",
- .cra_driver_name = "sm4-ce",
- .cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
- .cra_blocksize = SM4_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct sm4_ctx),
- .cra_module = THIS_MODULE,
- .cra_u.cipher = {
- .cia_min_keysize = SM4_KEY_SIZE,
- .cia_max_keysize = SM4_KEY_SIZE,
- .cia_setkey = sm4_ce_setkey,
- .cia_encrypt = sm4_ce_encrypt,
- .cia_decrypt = sm4_ce_decrypt
+static int sm4_ctr_crypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
+ unsigned int nbytes;
+ int err;
+
+ err = skcipher_walk_virt(&walk, req, false);
+
+ while ((nbytes = walk.nbytes) > 0) {
+ const u8 *src = walk.src.virt.addr;
+ u8 *dst = walk.dst.virt.addr;
+ unsigned int nblks;
+
+ kernel_neon_begin();
+
+ nblks = BYTES2BLKS(nbytes);
+ if (nblks) {
+ sm4_ce_ctr_enc(ctx->rkey_enc, dst, src, walk.iv, nblks);
+ dst += nblks * SM4_BLOCK_SIZE;
+ src += nblks * SM4_BLOCK_SIZE;
+ nbytes -= nblks * SM4_BLOCK_SIZE;
+ }
+
+ /* tail */
+ if (walk.nbytes == walk.total && nbytes > 0) {
+ u8 keystream[SM4_BLOCK_SIZE];
+
+ sm4_ce_crypt_block(ctx->rkey_enc, keystream, walk.iv);
+ crypto_inc(walk.iv, SM4_BLOCK_SIZE);
+ crypto_xor_cpy(dst, src, keystream, nbytes);
+ nbytes = 0;
+ }
+
+ kernel_neon_end();
+
+ err = skcipher_walk_done(&walk, nbytes);
+ }
+
+ return err;
+}
+
+static struct skcipher_alg sm4_algs[] = {
+ {
+ .base = {
+ .cra_name = "ecb(sm4)",
+ .cra_driver_name = "ecb-sm4-ce",
+ .cra_priority = 400,
+ .cra_blocksize = SM4_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sm4_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ .min_keysize = SM4_KEY_SIZE,
+ .max_keysize = SM4_KEY_SIZE,
+ .setkey = sm4_setkey,
+ .encrypt = sm4_ecb_encrypt,
+ .decrypt = sm4_ecb_decrypt,
+ }, {
+ .base = {
+ .cra_name = "cbc(sm4)",
+ .cra_driver_name = "cbc-sm4-ce",
+ .cra_priority = 400,
+ .cra_blocksize = SM4_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sm4_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ .min_keysize = SM4_KEY_SIZE,
+ .max_keysize = SM4_KEY_SIZE,
+ .ivsize = SM4_BLOCK_SIZE,
+ .setkey = sm4_setkey,
+ .encrypt = sm4_cbc_encrypt,
+ .decrypt = sm4_cbc_decrypt,
+ }, {
+ .base = {
+ .cra_name = "cfb(sm4)",
+ .cra_driver_name = "cfb-sm4-ce",
+ .cra_priority = 400,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct sm4_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ .min_keysize = SM4_KEY_SIZE,
+ .max_keysize = SM4_KEY_SIZE,
+ .ivsize = SM4_BLOCK_SIZE,
+ .chunksize = SM4_BLOCK_SIZE,
+ .setkey = sm4_setkey,
+ .encrypt = sm4_cfb_encrypt,
+ .decrypt = sm4_cfb_decrypt,
+ }, {
+ .base = {
+ .cra_name = "ctr(sm4)",
+ .cra_driver_name = "ctr-sm4-ce",
+ .cra_priority = 400,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct sm4_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ .min_keysize = SM4_KEY_SIZE,
+ .max_keysize = SM4_KEY_SIZE,
+ .ivsize = SM4_BLOCK_SIZE,
+ .chunksize = SM4_BLOCK_SIZE,
+ .setkey = sm4_setkey,
+ .encrypt = sm4_ctr_crypt,
+ .decrypt = sm4_ctr_crypt,
}
};
-static int __init sm4_ce_mod_init(void)
+static int __init sm4_init(void)
{
- return crypto_register_alg(&sm4_ce_alg);
+ return crypto_register_skciphers(sm4_algs, ARRAY_SIZE(sm4_algs));
}
-static void __exit sm4_ce_mod_fini(void)
+static void __exit sm4_exit(void)
{
- crypto_unregister_alg(&sm4_ce_alg);
+ crypto_unregister_skciphers(sm4_algs, ARRAY_SIZE(sm4_algs));
}
-module_cpu_feature_match(SM4, sm4_ce_mod_init);
-module_exit(sm4_ce_mod_fini);
+module_cpu_feature_match(SM4, sm4_init);
+module_exit(sm4_exit);
+
+MODULE_DESCRIPTION("SM4 ECB/CBC/CFB/CTR using ARMv8 Crypto Extensions");
+MODULE_ALIAS_CRYPTO("sm4-ce");
+MODULE_ALIAS_CRYPTO("sm4");
+MODULE_ALIAS_CRYPTO("ecb(sm4)");
+MODULE_ALIAS_CRYPTO("cbc(sm4)");
+MODULE_ALIAS_CRYPTO("cfb(sm4)");
+MODULE_ALIAS_CRYPTO("ctr(sm4)");
+MODULE_AUTHOR("Tianjia Zhang <tianjia.zhang@linux.alibaba.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/arch/arm64/crypto/sm4-neon-core.S b/arch/arm64/crypto/sm4-neon-core.S
new file mode 100644
index 000000000000..3d5256b354d2
--- /dev/null
+++ b/arch/arm64/crypto/sm4-neon-core.S
@@ -0,0 +1,487 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * SM4 Cipher Algorithm for ARMv8 NEON
+ * as specified in
+ * https://tools.ietf.org/id/draft-ribose-cfrg-sm4-10.html
+ *
+ * Copyright (C) 2022, Alibaba Group.
+ * Copyright (C) 2022 Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+/* Register macros */
+
+#define RTMP0 v8
+#define RTMP1 v9
+#define RTMP2 v10
+#define RTMP3 v11
+
+#define RX0 v12
+#define RX1 v13
+#define RKEY v14
+#define RIV v15
+
+/* Helper macros. */
+
+#define PREPARE \
+ adr_l x5, crypto_sm4_sbox; \
+ ld1 {v16.16b-v19.16b}, [x5], #64; \
+ ld1 {v20.16b-v23.16b}, [x5], #64; \
+ ld1 {v24.16b-v27.16b}, [x5], #64; \
+ ld1 {v28.16b-v31.16b}, [x5];
+
+#define transpose_4x4(s0, s1, s2, s3) \
+ zip1 RTMP0.4s, s0.4s, s1.4s; \
+ zip1 RTMP1.4s, s2.4s, s3.4s; \
+ zip2 RTMP2.4s, s0.4s, s1.4s; \
+ zip2 RTMP3.4s, s2.4s, s3.4s; \
+ zip1 s0.2d, RTMP0.2d, RTMP1.2d; \
+ zip2 s1.2d, RTMP0.2d, RTMP1.2d; \
+ zip1 s2.2d, RTMP2.2d, RTMP3.2d; \
+ zip2 s3.2d, RTMP2.2d, RTMP3.2d;
+
+#define rotate_clockwise_90(s0, s1, s2, s3) \
+ zip1 RTMP0.4s, s1.4s, s0.4s; \
+ zip2 RTMP1.4s, s1.4s, s0.4s; \
+ zip1 RTMP2.4s, s3.4s, s2.4s; \
+ zip2 RTMP3.4s, s3.4s, s2.4s; \
+ zip1 s0.2d, RTMP2.2d, RTMP0.2d; \
+ zip2 s1.2d, RTMP2.2d, RTMP0.2d; \
+ zip1 s2.2d, RTMP3.2d, RTMP1.2d; \
+ zip2 s3.2d, RTMP3.2d, RTMP1.2d;
+
+#define ROUND4(round, s0, s1, s2, s3) \
+ dup RX0.4s, RKEY.s[round]; \
+ /* rk ^ s1 ^ s2 ^ s3 */ \
+ eor RTMP1.16b, s2.16b, s3.16b; \
+ eor RX0.16b, RX0.16b, s1.16b; \
+ eor RX0.16b, RX0.16b, RTMP1.16b; \
+ \
+ /* sbox, non-linear part */ \
+ movi RTMP3.16b, #64; /* sizeof(sbox) / 4 */ \
+ tbl RTMP0.16b, {v16.16b-v19.16b}, RX0.16b; \
+ sub RX0.16b, RX0.16b, RTMP3.16b; \
+ tbx RTMP0.16b, {v20.16b-v23.16b}, RX0.16b; \
+ sub RX0.16b, RX0.16b, RTMP3.16b; \
+ tbx RTMP0.16b, {v24.16b-v27.16b}, RX0.16b; \
+ sub RX0.16b, RX0.16b, RTMP3.16b; \
+ tbx RTMP0.16b, {v28.16b-v31.16b}, RX0.16b; \
+ \
+ /* linear part */ \
+ shl RTMP1.4s, RTMP0.4s, #8; \
+ shl RTMP2.4s, RTMP0.4s, #16; \
+ shl RTMP3.4s, RTMP0.4s, #24; \
+ sri RTMP1.4s, RTMP0.4s, #(32-8); \
+ sri RTMP2.4s, RTMP0.4s, #(32-16); \
+ sri RTMP3.4s, RTMP0.4s, #(32-24); \
+ /* RTMP1 = x ^ rol32(x, 8) ^ rol32(x, 16) */ \
+ eor RTMP1.16b, RTMP1.16b, RTMP0.16b; \
+ eor RTMP1.16b, RTMP1.16b, RTMP2.16b; \
+ /* RTMP3 = x ^ rol32(x, 24) ^ rol32(RTMP1, 2) */ \
+ eor RTMP3.16b, RTMP3.16b, RTMP0.16b; \
+ shl RTMP2.4s, RTMP1.4s, 2; \
+ sri RTMP2.4s, RTMP1.4s, #(32-2); \
+ eor RTMP3.16b, RTMP3.16b, RTMP2.16b; \
+ /* s0 ^= RTMP3 */ \
+ eor s0.16b, s0.16b, RTMP3.16b;
+
+#define SM4_CRYPT_BLK4(b0, b1, b2, b3) \
+ rev32 b0.16b, b0.16b; \
+ rev32 b1.16b, b1.16b; \
+ rev32 b2.16b, b2.16b; \
+ rev32 b3.16b, b3.16b; \
+ \
+ transpose_4x4(b0, b1, b2, b3); \
+ \
+ mov x6, 8; \
+4: \
+ ld1 {RKEY.4s}, [x0], #16; \
+ subs x6, x6, #1; \
+ \
+ ROUND4(0, b0, b1, b2, b3); \
+ ROUND4(1, b1, b2, b3, b0); \
+ ROUND4(2, b2, b3, b0, b1); \
+ ROUND4(3, b3, b0, b1, b2); \
+ \
+ bne 4b; \
+ \
+ rotate_clockwise_90(b0, b1, b2, b3); \
+ rev32 b0.16b, b0.16b; \
+ rev32 b1.16b, b1.16b; \
+ rev32 b2.16b, b2.16b; \
+ rev32 b3.16b, b3.16b; \
+ \
+ /* repoint to rkey */ \
+ sub x0, x0, #128;
+
+#define ROUND8(round, s0, s1, s2, s3, t0, t1, t2, t3) \
+ /* rk ^ s1 ^ s2 ^ s3 */ \
+ dup RX0.4s, RKEY.s[round]; \
+ eor RTMP0.16b, s2.16b, s3.16b; \
+ mov RX1.16b, RX0.16b; \
+ eor RTMP1.16b, t2.16b, t3.16b; \
+ eor RX0.16b, RX0.16b, s1.16b; \
+ eor RX1.16b, RX1.16b, t1.16b; \
+ eor RX0.16b, RX0.16b, RTMP0.16b; \
+ eor RX1.16b, RX1.16b, RTMP1.16b; \
+ \
+ /* sbox, non-linear part */ \
+ movi RTMP3.16b, #64; /* sizeof(sbox) / 4 */ \
+ tbl RTMP0.16b, {v16.16b-v19.16b}, RX0.16b; \
+ tbl RTMP1.16b, {v16.16b-v19.16b}, RX1.16b; \
+ sub RX0.16b, RX0.16b, RTMP3.16b; \
+ sub RX1.16b, RX1.16b, RTMP3.16b; \
+ tbx RTMP0.16b, {v20.16b-v23.16b}, RX0.16b; \
+ tbx RTMP1.16b, {v20.16b-v23.16b}, RX1.16b; \
+ sub RX0.16b, RX0.16b, RTMP3.16b; \
+ sub RX1.16b, RX1.16b, RTMP3.16b; \
+ tbx RTMP0.16b, {v24.16b-v27.16b}, RX0.16b; \
+ tbx RTMP1.16b, {v24.16b-v27.16b}, RX1.16b; \
+ sub RX0.16b, RX0.16b, RTMP3.16b; \
+ sub RX1.16b, RX1.16b, RTMP3.16b; \
+ tbx RTMP0.16b, {v28.16b-v31.16b}, RX0.16b; \
+ tbx RTMP1.16b, {v28.16b-v31.16b}, RX1.16b; \
+ \
+ /* linear part */ \
+ shl RX0.4s, RTMP0.4s, #8; \
+ shl RX1.4s, RTMP1.4s, #8; \
+ shl RTMP2.4s, RTMP0.4s, #16; \
+ shl RTMP3.4s, RTMP1.4s, #16; \
+ sri RX0.4s, RTMP0.4s, #(32 - 8); \
+ sri RX1.4s, RTMP1.4s, #(32 - 8); \
+ sri RTMP2.4s, RTMP0.4s, #(32 - 16); \
+ sri RTMP3.4s, RTMP1.4s, #(32 - 16); \
+ /* RX = x ^ rol32(x, 8) ^ rol32(x, 16) */ \
+ eor RX0.16b, RX0.16b, RTMP0.16b; \
+ eor RX1.16b, RX1.16b, RTMP1.16b; \
+ eor RX0.16b, RX0.16b, RTMP2.16b; \
+ eor RX1.16b, RX1.16b, RTMP3.16b; \
+ /* RTMP0/1 ^= x ^ rol32(x, 24) ^ rol32(RX, 2) */ \
+ shl RTMP2.4s, RTMP0.4s, #24; \
+ shl RTMP3.4s, RTMP1.4s, #24; \
+ sri RTMP2.4s, RTMP0.4s, #(32 - 24); \
+ sri RTMP3.4s, RTMP1.4s, #(32 - 24); \
+ eor RTMP0.16b, RTMP0.16b, RTMP2.16b; \
+ eor RTMP1.16b, RTMP1.16b, RTMP3.16b; \
+ shl RTMP2.4s, RX0.4s, #2; \
+ shl RTMP3.4s, RX1.4s, #2; \
+ sri RTMP2.4s, RX0.4s, #(32 - 2); \
+ sri RTMP3.4s, RX1.4s, #(32 - 2); \
+ eor RTMP0.16b, RTMP0.16b, RTMP2.16b; \
+ eor RTMP1.16b, RTMP1.16b, RTMP3.16b; \
+ /* s0/t0 ^= RTMP0/1 */ \
+ eor s0.16b, s0.16b, RTMP0.16b; \
+ eor t0.16b, t0.16b, RTMP1.16b;
+
+#define SM4_CRYPT_BLK8(b0, b1, b2, b3, b4, b5, b6, b7) \
+ rev32 b0.16b, b0.16b; \
+ rev32 b1.16b, b1.16b; \
+ rev32 b2.16b, b2.16b; \
+ rev32 b3.16b, b3.16b; \
+ rev32 b4.16b, b4.16b; \
+ rev32 b5.16b, b5.16b; \
+ rev32 b6.16b, b6.16b; \
+ rev32 b7.16b, b7.16b; \
+ \
+ transpose_4x4(b0, b1, b2, b3); \
+ transpose_4x4(b4, b5, b6, b7); \
+ \
+ mov x6, 8; \
+8: \
+ ld1 {RKEY.4s}, [x0], #16; \
+ subs x6, x6, #1; \
+ \
+ ROUND8(0, b0, b1, b2, b3, b4, b5, b6, b7); \
+ ROUND8(1, b1, b2, b3, b0, b5, b6, b7, b4); \
+ ROUND8(2, b2, b3, b0, b1, b6, b7, b4, b5); \
+ ROUND8(3, b3, b0, b1, b2, b7, b4, b5, b6); \
+ \
+ bne 8b; \
+ \
+ rotate_clockwise_90(b0, b1, b2, b3); \
+ rotate_clockwise_90(b4, b5, b6, b7); \
+ rev32 b0.16b, b0.16b; \
+ rev32 b1.16b, b1.16b; \
+ rev32 b2.16b, b2.16b; \
+ rev32 b3.16b, b3.16b; \
+ rev32 b4.16b, b4.16b; \
+ rev32 b5.16b, b5.16b; \
+ rev32 b6.16b, b6.16b; \
+ rev32 b7.16b, b7.16b; \
+ \
+ /* repoint to rkey */ \
+ sub x0, x0, #128;
+
+
+.align 3
+SYM_FUNC_START_LOCAL(__sm4_neon_crypt_blk1_4)
+ /* input:
+ * x0: round key array, CTX
+ * x1: dst
+ * x2: src
+ * w3: num blocks (1..4)
+ */
+ PREPARE;
+
+ ld1 {v0.16b}, [x2], #16;
+ mov v1.16b, v0.16b;
+ mov v2.16b, v0.16b;
+ mov v3.16b, v0.16b;
+ cmp w3, #2;
+ blt .Lblk4_load_input_done;
+ ld1 {v1.16b}, [x2], #16;
+ beq .Lblk4_load_input_done;
+ ld1 {v2.16b}, [x2], #16;
+ cmp w3, #3;
+ beq .Lblk4_load_input_done;
+ ld1 {v3.16b}, [x2];
+
+.Lblk4_load_input_done:
+ SM4_CRYPT_BLK4(v0, v1, v2, v3);
+
+ st1 {v0.16b}, [x1], #16;
+ cmp w3, #2;
+ blt .Lblk4_store_output_done;
+ st1 {v1.16b}, [x1], #16;
+ beq .Lblk4_store_output_done;
+ st1 {v2.16b}, [x1], #16;
+ cmp w3, #3;
+ beq .Lblk4_store_output_done;
+ st1 {v3.16b}, [x1];
+
+.Lblk4_store_output_done:
+ ret;
+SYM_FUNC_END(__sm4_neon_crypt_blk1_4)
+
+.align 3
+SYM_FUNC_START(sm4_neon_crypt_blk1_8)
+ /* input:
+ * x0: round key array, CTX
+ * x1: dst
+ * x2: src
+ * w3: num blocks (1..8)
+ */
+ cmp w3, #5;
+ blt __sm4_neon_crypt_blk1_4;
+
+ PREPARE;
+
+ ld1 {v0.16b-v3.16b}, [x2], #64;
+ ld1 {v4.16b}, [x2], #16;
+ mov v5.16b, v4.16b;
+ mov v6.16b, v4.16b;
+ mov v7.16b, v4.16b;
+ beq .Lblk8_load_input_done;
+ ld1 {v5.16b}, [x2], #16;
+ cmp w3, #7;
+ blt .Lblk8_load_input_done;
+ ld1 {v6.16b}, [x2], #16;
+ beq .Lblk8_load_input_done;
+ ld1 {v7.16b}, [x2];
+
+.Lblk8_load_input_done:
+ SM4_CRYPT_BLK8(v0, v1, v2, v3, v4, v5, v6, v7);
+
+ cmp w3, #6;
+ st1 {v0.16b-v3.16b}, [x1], #64;
+ st1 {v4.16b}, [x1], #16;
+ blt .Lblk8_store_output_done;
+ st1 {v5.16b}, [x1], #16;
+ beq .Lblk8_store_output_done;
+ st1 {v6.16b}, [x1], #16;
+ cmp w3, #7;
+ beq .Lblk8_store_output_done;
+ st1 {v7.16b}, [x1];
+
+.Lblk8_store_output_done:
+ ret;
+SYM_FUNC_END(sm4_neon_crypt_blk1_8)
+
+.align 3
+SYM_FUNC_START(sm4_neon_crypt_blk8)
+ /* input:
+ * x0: round key array, CTX
+ * x1: dst
+ * x2: src
+ * w3: nblocks (multiples of 8)
+ */
+ PREPARE;
+
+.Lcrypt_loop_blk:
+ subs w3, w3, #8;
+ bmi .Lcrypt_end;
+
+ ld1 {v0.16b-v3.16b}, [x2], #64;
+ ld1 {v4.16b-v7.16b}, [x2], #64;
+
+ SM4_CRYPT_BLK8(v0, v1, v2, v3, v4, v5, v6, v7);
+
+ st1 {v0.16b-v3.16b}, [x1], #64;
+ st1 {v4.16b-v7.16b}, [x1], #64;
+
+ b .Lcrypt_loop_blk;
+
+.Lcrypt_end:
+ ret;
+SYM_FUNC_END(sm4_neon_crypt_blk8)
+
+.align 3
+SYM_FUNC_START(sm4_neon_cbc_dec_blk8)
+ /* input:
+ * x0: round key array, CTX
+ * x1: dst
+ * x2: src
+ * x3: iv (big endian, 128 bit)
+ * w4: nblocks (multiples of 8)
+ */
+ PREPARE;
+
+ ld1 {RIV.16b}, [x3];
+
+.Lcbc_loop_blk:
+ subs w4, w4, #8;
+ bmi .Lcbc_end;
+
+ ld1 {v0.16b-v3.16b}, [x2], #64;
+ ld1 {v4.16b-v7.16b}, [x2];
+
+ SM4_CRYPT_BLK8(v0, v1, v2, v3, v4, v5, v6, v7);
+
+ sub x2, x2, #64;
+ eor v0.16b, v0.16b, RIV.16b;
+ ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64;
+ eor v1.16b, v1.16b, RTMP0.16b;
+ eor v2.16b, v2.16b, RTMP1.16b;
+ eor v3.16b, v3.16b, RTMP2.16b;
+ st1 {v0.16b-v3.16b}, [x1], #64;
+
+ eor v4.16b, v4.16b, RTMP3.16b;
+ ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64;
+ eor v5.16b, v5.16b, RTMP0.16b;
+ eor v6.16b, v6.16b, RTMP1.16b;
+ eor v7.16b, v7.16b, RTMP2.16b;
+
+ mov RIV.16b, RTMP3.16b;
+ st1 {v4.16b-v7.16b}, [x1], #64;
+
+ b .Lcbc_loop_blk;
+
+.Lcbc_end:
+ /* store new IV */
+ st1 {RIV.16b}, [x3];
+
+ ret;
+SYM_FUNC_END(sm4_neon_cbc_dec_blk8)
+
+.align 3
+SYM_FUNC_START(sm4_neon_cfb_dec_blk8)
+ /* input:
+ * x0: round key array, CTX
+ * x1: dst
+ * x2: src
+ * x3: iv (big endian, 128 bit)
+ * w4: nblocks (multiples of 8)
+ */
+ PREPARE;
+
+ ld1 {v0.16b}, [x3];
+
+.Lcfb_loop_blk:
+ subs w4, w4, #8;
+ bmi .Lcfb_end;
+
+ ld1 {v1.16b, v2.16b, v3.16b}, [x2], #48;
+ ld1 {v4.16b-v7.16b}, [x2];
+
+ SM4_CRYPT_BLK8(v0, v1, v2, v3, v4, v5, v6, v7);
+
+ sub x2, x2, #48;
+ ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64;
+ eor v0.16b, v0.16b, RTMP0.16b;
+ eor v1.16b, v1.16b, RTMP1.16b;
+ eor v2.16b, v2.16b, RTMP2.16b;
+ eor v3.16b, v3.16b, RTMP3.16b;
+ st1 {v0.16b-v3.16b}, [x1], #64;
+
+ ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64;
+ eor v4.16b, v4.16b, RTMP0.16b;
+ eor v5.16b, v5.16b, RTMP1.16b;
+ eor v6.16b, v6.16b, RTMP2.16b;
+ eor v7.16b, v7.16b, RTMP3.16b;
+ st1 {v4.16b-v7.16b}, [x1], #64;
+
+ mov v0.16b, RTMP3.16b;
+
+ b .Lcfb_loop_blk;
+
+.Lcfb_end:
+ /* store new IV */
+ st1 {v0.16b}, [x3];
+
+ ret;
+SYM_FUNC_END(sm4_neon_cfb_dec_blk8)
+
+.align 3
+SYM_FUNC_START(sm4_neon_ctr_enc_blk8)
+ /* input:
+ * x0: round key array, CTX
+ * x1: dst
+ * x2: src
+ * x3: ctr (big endian, 128 bit)
+ * w4: nblocks (multiples of 8)
+ */
+ PREPARE;
+
+ ldp x7, x8, [x3];
+ rev x7, x7;
+ rev x8, x8;
+
+.Lctr_loop_blk:
+ subs w4, w4, #8;
+ bmi .Lctr_end;
+
+#define inc_le128(vctr) \
+ mov vctr.d[1], x8; \
+ mov vctr.d[0], x7; \
+ adds x8, x8, #1; \
+ adc x7, x7, xzr; \
+ rev64 vctr.16b, vctr.16b;
+
+ /* construct CTRs */
+ inc_le128(v0); /* +0 */
+ inc_le128(v1); /* +1 */
+ inc_le128(v2); /* +2 */
+ inc_le128(v3); /* +3 */
+ inc_le128(v4); /* +4 */
+ inc_le128(v5); /* +5 */
+ inc_le128(v6); /* +6 */
+ inc_le128(v7); /* +7 */
+
+ SM4_CRYPT_BLK8(v0, v1, v2, v3, v4, v5, v6, v7);
+
+ ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64;
+ eor v0.16b, v0.16b, RTMP0.16b;
+ eor v1.16b, v1.16b, RTMP1.16b;
+ eor v2.16b, v2.16b, RTMP2.16b;
+ eor v3.16b, v3.16b, RTMP3.16b;
+ st1 {v0.16b-v3.16b}, [x1], #64;
+
+ ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64;
+ eor v4.16b, v4.16b, RTMP0.16b;
+ eor v5.16b, v5.16b, RTMP1.16b;
+ eor v6.16b, v6.16b, RTMP2.16b;
+ eor v7.16b, v7.16b, RTMP3.16b;
+ st1 {v4.16b-v7.16b}, [x1], #64;
+
+ b .Lctr_loop_blk;
+
+.Lctr_end:
+ /* store new CTR */
+ rev x7, x7;
+ rev x8, x8;
+ stp x7, x8, [x3];
+
+ ret;
+SYM_FUNC_END(sm4_neon_ctr_enc_blk8)
diff --git a/arch/arm64/crypto/sm4-neon-glue.c b/arch/arm64/crypto/sm4-neon-glue.c
new file mode 100644
index 000000000000..03a6a6866a31
--- /dev/null
+++ b/arch/arm64/crypto/sm4-neon-glue.c
@@ -0,0 +1,442 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * SM4 Cipher Algorithm, using ARMv8 NEON
+ * as specified in
+ * https://tools.ietf.org/id/draft-ribose-cfrg-sm4-10.html
+ *
+ * Copyright (C) 2022, Alibaba Group.
+ * Copyright (C) 2022 Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
+ */
+
+#include <linux/module.h>
+#include <linux/crypto.h>
+#include <linux/kernel.h>
+#include <linux/cpufeature.h>
+#include <asm/neon.h>
+#include <asm/simd.h>
+#include <crypto/internal/simd.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/sm4.h>
+
+#define BYTES2BLKS(nbytes) ((nbytes) >> 4)
+#define BYTES2BLK8(nbytes) (((nbytes) >> 4) & ~(8 - 1))
+
+asmlinkage void sm4_neon_crypt_blk1_8(const u32 *rkey, u8 *dst, const u8 *src,
+ unsigned int nblks);
+asmlinkage void sm4_neon_crypt_blk8(const u32 *rkey, u8 *dst, const u8 *src,
+ unsigned int nblks);
+asmlinkage void sm4_neon_cbc_dec_blk8(const u32 *rkey, u8 *dst, const u8 *src,
+ u8 *iv, unsigned int nblks);
+asmlinkage void sm4_neon_cfb_dec_blk8(const u32 *rkey, u8 *dst, const u8 *src,
+ u8 *iv, unsigned int nblks);
+asmlinkage void sm4_neon_ctr_enc_blk8(const u32 *rkey, u8 *dst, const u8 *src,
+ u8 *iv, unsigned int nblks);
+
+static int sm4_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int key_len)
+{
+ struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ return sm4_expandkey(ctx, key, key_len);
+}
+
+static int sm4_ecb_do_crypt(struct skcipher_request *req, const u32 *rkey)
+{
+ struct skcipher_walk walk;
+ unsigned int nbytes;
+ int err;
+
+ err = skcipher_walk_virt(&walk, req, false);
+
+ while ((nbytes = walk.nbytes) > 0) {
+ const u8 *src = walk.src.virt.addr;
+ u8 *dst = walk.dst.virt.addr;
+ unsigned int nblks;
+
+ kernel_neon_begin();
+
+ nblks = BYTES2BLK8(nbytes);
+ if (nblks) {
+ sm4_neon_crypt_blk8(rkey, dst, src, nblks);
+ dst += nblks * SM4_BLOCK_SIZE;
+ src += nblks * SM4_BLOCK_SIZE;
+ nbytes -= nblks * SM4_BLOCK_SIZE;
+ }
+
+ nblks = BYTES2BLKS(nbytes);
+ if (nblks) {
+ sm4_neon_crypt_blk1_8(rkey, dst, src, nblks);
+ nbytes -= nblks * SM4_BLOCK_SIZE;
+ }
+
+ kernel_neon_end();
+
+ err = skcipher_walk_done(&walk, nbytes);
+ }
+
+ return err;
+}
+
+static int sm4_ecb_encrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ return sm4_ecb_do_crypt(req, ctx->rkey_enc);
+}
+
+static int sm4_ecb_decrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ return sm4_ecb_do_crypt(req, ctx->rkey_dec);
+}
+
+static int sm4_cbc_encrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
+ unsigned int nbytes;
+ int err;
+
+ err = skcipher_walk_virt(&walk, req, false);
+
+ while ((nbytes = walk.nbytes) > 0) {
+ const u8 *iv = walk.iv;
+ const u8 *src = walk.src.virt.addr;
+ u8 *dst = walk.dst.virt.addr;
+
+ while (nbytes >= SM4_BLOCK_SIZE) {
+ crypto_xor_cpy(dst, src, iv, SM4_BLOCK_SIZE);
+ sm4_crypt_block(ctx->rkey_enc, dst, dst);
+ iv = dst;
+ src += SM4_BLOCK_SIZE;
+ dst += SM4_BLOCK_SIZE;
+ nbytes -= SM4_BLOCK_SIZE;
+ }
+ if (iv != walk.iv)
+ memcpy(walk.iv, iv, SM4_BLOCK_SIZE);
+
+ err = skcipher_walk_done(&walk, nbytes);
+ }
+
+ return err;
+}
+
+static int sm4_cbc_decrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
+ unsigned int nbytes;
+ int err;
+
+ err = skcipher_walk_virt(&walk, req, false);
+
+ while ((nbytes = walk.nbytes) > 0) {
+ const u8 *src = walk.src.virt.addr;
+ u8 *dst = walk.dst.virt.addr;
+ unsigned int nblks;
+
+ kernel_neon_begin();
+
+ nblks = BYTES2BLK8(nbytes);
+ if (nblks) {
+ sm4_neon_cbc_dec_blk8(ctx->rkey_dec, dst, src,
+ walk.iv, nblks);
+ dst += nblks * SM4_BLOCK_SIZE;
+ src += nblks * SM4_BLOCK_SIZE;
+ nbytes -= nblks * SM4_BLOCK_SIZE;
+ }
+
+ nblks = BYTES2BLKS(nbytes);
+ if (nblks) {
+ u8 keystream[SM4_BLOCK_SIZE * 8];
+ u8 iv[SM4_BLOCK_SIZE];
+ int i;
+
+ sm4_neon_crypt_blk1_8(ctx->rkey_dec, keystream,
+ src, nblks);
+
+ src += ((int)nblks - 2) * SM4_BLOCK_SIZE;
+ dst += (nblks - 1) * SM4_BLOCK_SIZE;
+ memcpy(iv, src + SM4_BLOCK_SIZE, SM4_BLOCK_SIZE);
+
+ for (i = nblks - 1; i > 0; i--) {
+ crypto_xor_cpy(dst, src,
+ &keystream[i * SM4_BLOCK_SIZE],
+ SM4_BLOCK_SIZE);
+ src -= SM4_BLOCK_SIZE;
+ dst -= SM4_BLOCK_SIZE;
+ }
+ crypto_xor_cpy(dst, walk.iv,
+ keystream, SM4_BLOCK_SIZE);
+ memcpy(walk.iv, iv, SM4_BLOCK_SIZE);
+ nbytes -= nblks * SM4_BLOCK_SIZE;
+ }
+
+ kernel_neon_end();
+
+ err = skcipher_walk_done(&walk, nbytes);
+ }
+
+ return err;
+}
+
+static int sm4_cfb_encrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
+ unsigned int nbytes;
+ int err;
+
+ err = skcipher_walk_virt(&walk, req, false);
+
+ while ((nbytes = walk.nbytes) > 0) {
+ u8 keystream[SM4_BLOCK_SIZE];
+ const u8 *iv = walk.iv;
+ const u8 *src = walk.src.virt.addr;
+ u8 *dst = walk.dst.virt.addr;
+
+ while (nbytes >= SM4_BLOCK_SIZE) {
+ sm4_crypt_block(ctx->rkey_enc, keystream, iv);
+ crypto_xor_cpy(dst, src, keystream, SM4_BLOCK_SIZE);
+ iv = dst;
+ src += SM4_BLOCK_SIZE;
+ dst += SM4_BLOCK_SIZE;
+ nbytes -= SM4_BLOCK_SIZE;
+ }
+ if (iv != walk.iv)
+ memcpy(walk.iv, iv, SM4_BLOCK_SIZE);
+
+ /* tail */
+ if (walk.nbytes == walk.total && nbytes > 0) {
+ sm4_crypt_block(ctx->rkey_enc, keystream, walk.iv);
+ crypto_xor_cpy(dst, src, keystream, nbytes);
+ nbytes = 0;
+ }
+
+ err = skcipher_walk_done(&walk, nbytes);
+ }
+
+ return err;
+}
+
+static int sm4_cfb_decrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
+ unsigned int nbytes;
+ int err;
+
+ err = skcipher_walk_virt(&walk, req, false);
+
+ while ((nbytes = walk.nbytes) > 0) {
+ const u8 *src = walk.src.virt.addr;
+ u8 *dst = walk.dst.virt.addr;
+ unsigned int nblks;
+
+ kernel_neon_begin();
+
+ nblks = BYTES2BLK8(nbytes);
+ if (nblks) {
+ sm4_neon_cfb_dec_blk8(ctx->rkey_enc, dst, src,
+ walk.iv, nblks);
+ dst += nblks * SM4_BLOCK_SIZE;
+ src += nblks * SM4_BLOCK_SIZE;
+ nbytes -= nblks * SM4_BLOCK_SIZE;
+ }
+
+ nblks = BYTES2BLKS(nbytes);
+ if (nblks) {
+ u8 keystream[SM4_BLOCK_SIZE * 8];
+
+ memcpy(keystream, walk.iv, SM4_BLOCK_SIZE);
+ if (nblks > 1)
+ memcpy(&keystream[SM4_BLOCK_SIZE], src,
+ (nblks - 1) * SM4_BLOCK_SIZE);
+ memcpy(walk.iv, src + (nblks - 1) * SM4_BLOCK_SIZE,
+ SM4_BLOCK_SIZE);
+
+ sm4_neon_crypt_blk1_8(ctx->rkey_enc, keystream,
+ keystream, nblks);
+
+ crypto_xor_cpy(dst, src, keystream,
+ nblks * SM4_BLOCK_SIZE);
+ dst += nblks * SM4_BLOCK_SIZE;
+ src += nblks * SM4_BLOCK_SIZE;
+ nbytes -= nblks * SM4_BLOCK_SIZE;
+ }
+
+ kernel_neon_end();
+
+ /* tail */
+ if (walk.nbytes == walk.total && nbytes > 0) {
+ u8 keystream[SM4_BLOCK_SIZE];
+
+ sm4_crypt_block(ctx->rkey_enc, keystream, walk.iv);
+ crypto_xor_cpy(dst, src, keystream, nbytes);
+ nbytes = 0;
+ }
+
+ err = skcipher_walk_done(&walk, nbytes);
+ }
+
+ return err;
+}
+
+static int sm4_ctr_crypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
+ unsigned int nbytes;
+ int err;
+
+ err = skcipher_walk_virt(&walk, req, false);
+
+ while ((nbytes = walk.nbytes) > 0) {
+ const u8 *src = walk.src.virt.addr;
+ u8 *dst = walk.dst.virt.addr;
+ unsigned int nblks;
+
+ kernel_neon_begin();
+
+ nblks = BYTES2BLK8(nbytes);
+ if (nblks) {
+ sm4_neon_ctr_enc_blk8(ctx->rkey_enc, dst, src,
+ walk.iv, nblks);
+ dst += nblks * SM4_BLOCK_SIZE;
+ src += nblks * SM4_BLOCK_SIZE;
+ nbytes -= nblks * SM4_BLOCK_SIZE;
+ }
+
+ nblks = BYTES2BLKS(nbytes);
+ if (nblks) {
+ u8 keystream[SM4_BLOCK_SIZE * 8];
+ int i;
+
+ for (i = 0; i < nblks; i++) {
+ memcpy(&keystream[i * SM4_BLOCK_SIZE],
+ walk.iv, SM4_BLOCK_SIZE);
+ crypto_inc(walk.iv, SM4_BLOCK_SIZE);
+ }
+ sm4_neon_crypt_blk1_8(ctx->rkey_enc, keystream,
+ keystream, nblks);
+
+ crypto_xor_cpy(dst, src, keystream,
+ nblks * SM4_BLOCK_SIZE);
+ dst += nblks * SM4_BLOCK_SIZE;
+ src += nblks * SM4_BLOCK_SIZE;
+ nbytes -= nblks * SM4_BLOCK_SIZE;
+ }
+
+ kernel_neon_end();
+
+ /* tail */
+ if (walk.nbytes == walk.total && nbytes > 0) {
+ u8 keystream[SM4_BLOCK_SIZE];
+
+ sm4_crypt_block(ctx->rkey_enc, keystream, walk.iv);
+ crypto_inc(walk.iv, SM4_BLOCK_SIZE);
+ crypto_xor_cpy(dst, src, keystream, nbytes);
+ nbytes = 0;
+ }
+
+ err = skcipher_walk_done(&walk, nbytes);
+ }
+
+ return err;
+}
+
+static struct skcipher_alg sm4_algs[] = {
+ {
+ .base = {
+ .cra_name = "ecb(sm4)",
+ .cra_driver_name = "ecb-sm4-neon",
+ .cra_priority = 200,
+ .cra_blocksize = SM4_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sm4_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ .min_keysize = SM4_KEY_SIZE,
+ .max_keysize = SM4_KEY_SIZE,
+ .setkey = sm4_setkey,
+ .encrypt = sm4_ecb_encrypt,
+ .decrypt = sm4_ecb_decrypt,
+ }, {
+ .base = {
+ .cra_name = "cbc(sm4)",
+ .cra_driver_name = "cbc-sm4-neon",
+ .cra_priority = 200,
+ .cra_blocksize = SM4_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sm4_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ .min_keysize = SM4_KEY_SIZE,
+ .max_keysize = SM4_KEY_SIZE,
+ .ivsize = SM4_BLOCK_SIZE,
+ .setkey = sm4_setkey,
+ .encrypt = sm4_cbc_encrypt,
+ .decrypt = sm4_cbc_decrypt,
+ }, {
+ .base = {
+ .cra_name = "cfb(sm4)",
+ .cra_driver_name = "cfb-sm4-neon",
+ .cra_priority = 200,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct sm4_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ .min_keysize = SM4_KEY_SIZE,
+ .max_keysize = SM4_KEY_SIZE,
+ .ivsize = SM4_BLOCK_SIZE,
+ .chunksize = SM4_BLOCK_SIZE,
+ .setkey = sm4_setkey,
+ .encrypt = sm4_cfb_encrypt,
+ .decrypt = sm4_cfb_decrypt,
+ }, {
+ .base = {
+ .cra_name = "ctr(sm4)",
+ .cra_driver_name = "ctr-sm4-neon",
+ .cra_priority = 200,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct sm4_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ .min_keysize = SM4_KEY_SIZE,
+ .max_keysize = SM4_KEY_SIZE,
+ .ivsize = SM4_BLOCK_SIZE,
+ .chunksize = SM4_BLOCK_SIZE,
+ .setkey = sm4_setkey,
+ .encrypt = sm4_ctr_crypt,
+ .decrypt = sm4_ctr_crypt,
+ }
+};
+
+static int __init sm4_init(void)
+{
+ return crypto_register_skciphers(sm4_algs, ARRAY_SIZE(sm4_algs));
+}
+
+static void __exit sm4_exit(void)
+{
+ crypto_unregister_skciphers(sm4_algs, ARRAY_SIZE(sm4_algs));
+}
+
+module_init(sm4_init);
+module_exit(sm4_exit);
+
+MODULE_DESCRIPTION("SM4 ECB/CBC/CFB/CTR using ARMv8 NEON");
+MODULE_ALIAS_CRYPTO("sm4-neon");
+MODULE_ALIAS_CRYPTO("sm4");
+MODULE_ALIAS_CRYPTO("ecb(sm4)");
+MODULE_ALIAS_CRYPTO("cbc(sm4)");
+MODULE_ALIAS_CRYPTO("cfb(sm4)");
+MODULE_ALIAS_CRYPTO("ctr(sm4)");
+MODULE_AUTHOR("Tianjia Zhang <tianjia.zhang@linux.alibaba.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h
index eaa6ca062d89..9f362274a4f7 100644
--- a/arch/arm64/include/asm/compat.h
+++ b/arch/arm64/include/asm/compat.h
@@ -8,6 +8,15 @@
#define compat_mode_t compat_mode_t
typedef u16 compat_mode_t;
+#define __compat_uid_t __compat_uid_t
+typedef u16 __compat_uid_t;
+typedef u16 __compat_gid_t;
+
+#define compat_ipc_pid_t compat_ipc_pid_t
+typedef u16 compat_ipc_pid_t;
+
+#define compat_statfs compat_statfs
+
#include <asm-generic/compat.h>
#ifdef CONFIG_COMPAT
@@ -19,21 +28,15 @@ typedef u16 compat_mode_t;
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
-#define COMPAT_USER_HZ 100
#ifdef __AARCH64EB__
#define COMPAT_UTS_MACHINE "armv8b\0\0"
#else
#define COMPAT_UTS_MACHINE "armv8l\0\0"
#endif
-typedef u16 __compat_uid_t;
-typedef u16 __compat_gid_t;
typedef u16 __compat_uid16_t;
typedef u16 __compat_gid16_t;
-typedef u32 compat_dev_t;
typedef s32 compat_nlink_t;
-typedef u16 compat_ipc_pid_t;
-typedef __kernel_fsid_t compat_fsid_t;
struct compat_stat {
#ifdef __AARCH64EB__
@@ -65,26 +68,6 @@ struct compat_stat {
compat_ulong_t __unused4[2];
};
-struct compat_flock {
- short l_type;
- short l_whence;
- compat_off_t l_start;
- compat_off_t l_len;
- compat_pid_t l_pid;
-};
-
-#define F_GETLK64 12 /* using 'struct flock64' */
-#define F_SETLK64 13
-#define F_SETLKW64 14
-
-struct compat_flock64 {
- short l_type;
- short l_whence;
- compat_loff_t l_start;
- compat_loff_t l_len;
- compat_pid_t l_pid;
-};
-
struct compat_statfs {
int f_type;
int f_bsize;
@@ -107,64 +90,6 @@ struct compat_statfs {
#define compat_user_stack_pointer() (user_stack_pointer(task_pt_regs(current)))
#define COMPAT_MINSIGSTKSZ 2048
-struct compat_ipc64_perm {
- compat_key_t key;
- __compat_uid32_t uid;
- __compat_gid32_t gid;
- __compat_uid32_t cuid;
- __compat_gid32_t cgid;
- unsigned short mode;
- unsigned short __pad1;
- unsigned short seq;
- unsigned short __pad2;
- compat_ulong_t unused1;
- compat_ulong_t unused2;
-};
-
-struct compat_semid64_ds {
- struct compat_ipc64_perm sem_perm;
- compat_ulong_t sem_otime;
- compat_ulong_t sem_otime_high;
- compat_ulong_t sem_ctime;
- compat_ulong_t sem_ctime_high;
- compat_ulong_t sem_nsems;
- compat_ulong_t __unused3;
- compat_ulong_t __unused4;
-};
-
-struct compat_msqid64_ds {
- struct compat_ipc64_perm msg_perm;
- compat_ulong_t msg_stime;
- compat_ulong_t msg_stime_high;
- compat_ulong_t msg_rtime;
- compat_ulong_t msg_rtime_high;
- compat_ulong_t msg_ctime;
- compat_ulong_t msg_ctime_high;
- compat_ulong_t msg_cbytes;
- compat_ulong_t msg_qnum;
- compat_ulong_t msg_qbytes;
- compat_pid_t msg_lspid;
- compat_pid_t msg_lrpid;
- compat_ulong_t __unused4;
- compat_ulong_t __unused5;
-};
-
-struct compat_shmid64_ds {
- struct compat_ipc64_perm shm_perm;
- compat_size_t shm_segsz;
- compat_ulong_t shm_atime;
- compat_ulong_t shm_atime_high;
- compat_ulong_t shm_dtime;
- compat_ulong_t shm_dtime_high;
- compat_ulong_t shm_ctime;
- compat_ulong_t shm_ctime_high;
- compat_pid_t shm_cpid;
- compat_pid_t shm_lpid;
- compat_ulong_t shm_nattch;
- compat_ulong_t __unused4;
- compat_ulong_t __unused5;
-};
-
static inline int is_compat_task(void)
{
return test_thread_flag(TIF_32BIT);
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index bf8aafee1eac..9e58749db21d 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -92,8 +92,8 @@
#endif /* CONFIG_COMPAT */
#ifndef CONFIG_ARM64_FORCE_52BIT
-#define arch_get_mmap_end(addr) ((addr > DEFAULT_MAP_WINDOW) ? TASK_SIZE :\
- DEFAULT_MAP_WINDOW)
+#define arch_get_mmap_end(addr, len, flags) \
+ (((addr) > DEFAULT_MAP_WINDOW) ? TASK_SIZE : DEFAULT_MAP_WINDOW)
#define arch_get_mmap_base(addr, base) ((addr > DEFAULT_MAP_WINDOW) ? \
base + TASK_SIZE - DEFAULT_MAP_WINDOW :\
diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h
index 4e65da3445c7..037feba03a51 100644
--- a/arch/arm64/include/asm/unistd.h
+++ b/arch/arm64/include/asm/unistd.h
@@ -3,6 +3,7 @@
* Copyright (C) 2012 ARM Ltd.
*/
#ifdef CONFIG_COMPAT
+#define __ARCH_WANT_COMPAT_STAT
#define __ARCH_WANT_COMPAT_STAT64
#define __ARCH_WANT_SYS_GETHOSTNAME
#define __ARCH_WANT_SYS_PAUSE
diff --git a/arch/arm64/kernel/crash_dump.c b/arch/arm64/kernel/crash_dump.c
index 58303a9ec32c..670e4ce81822 100644
--- a/arch/arm64/kernel/crash_dump.c
+++ b/arch/arm64/kernel/crash_dump.c
@@ -9,25 +9,11 @@
#include <linux/crash_dump.h>
#include <linux/errno.h>
#include <linux/io.h>
-#include <linux/memblock.h>
-#include <linux/uaccess.h>
+#include <linux/uio.h>
#include <asm/memory.h>
-/**
- * copy_oldmem_page() - copy one page from old kernel memory
- * @pfn: page frame number to be copied
- * @buf: buffer where the copied page is placed
- * @csize: number of bytes to copy
- * @offset: offset in bytes into the page
- * @userbuf: if set, @buf is in a user address space
- *
- * This function copies one page from old kernel memory into buffer pointed by
- * @buf. If @buf is in userspace, set @userbuf to %1. Returns number of bytes
- * copied or negative error in case of failure.
- */
-ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
- size_t csize, unsigned long offset,
- int userbuf)
+ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn,
+ size_t csize, unsigned long offset)
{
void *vaddr;
@@ -38,14 +24,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
if (!vaddr)
return -ENOMEM;
- if (userbuf) {
- if (copy_to_user((char __user *)buf, vaddr + offset, csize)) {
- memunmap(vaddr);
- return -EFAULT;
- }
- } else {
- memcpy(buf, vaddr + offset, csize);
- }
+ csize = copy_to_iter(vaddr + offset, csize, iter);
memunmap(vaddr);
diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
index 6328308be272..2e248342476e 100644
--- a/arch/arm64/kernel/hibernate.c
+++ b/arch/arm64/kernel/hibernate.c
@@ -427,7 +427,7 @@ int swsusp_arch_resume(void)
return rc;
/*
- * We need a zero page that is zero before & after resume in order to
+ * We need a zero page that is zero before & after resume in order
* to break before make on the ttbr1 page tables.
*/
zero_page = (void *)get_safe_page(GFP_ATOMIC);
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 9734c9fb1a32..92bcc1768f0b 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -111,8 +111,7 @@ void machine_power_off(void)
{
local_irq_disable();
smp_send_stop();
- if (pm_power_off)
- pm_power_off();
+ do_kernel_power_off();
}
/*
@@ -344,9 +343,11 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
asmlinkage void ret_from_fork(void) asm("ret_from_fork");
-int copy_thread(unsigned long clone_flags, unsigned long stack_start,
- unsigned long stk_sz, struct task_struct *p, unsigned long tls)
+int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
{
+ unsigned long clone_flags = args->flags;
+ unsigned long stack_start = args->stack;
+ unsigned long tls = args->tls;
struct pt_regs *childregs = task_pt_regs(p);
memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context));
@@ -362,7 +363,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
ptrauth_thread_init_kernel(p);
- if (likely(!(p->flags & (PF_KTHREAD | PF_IO_WORKER)))) {
+ if (likely(!args->fn)) {
*childregs = *current_pt_regs();
childregs->regs[0] = 0;
@@ -400,8 +401,8 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
memset(childregs, 0, sizeof(struct pt_regs));
childregs->pstate = PSR_MODE_EL1h | PSR_IL_BIT;
- p->thread.cpu_context.x19 = stack_start;
- p->thread.cpu_context.x20 = stk_sz;
+ p->thread.cpu_context.x19 = (unsigned long)args->fn;
+ p->thread.cpu_context.x20 = (unsigned long)args->fn_arg;
}
p->thread.cpu_context.pc = (unsigned long)ret_from_fork;
p->thread.cpu_context.sp = (unsigned long)childregs;
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index fea3223704b6..cf3a759f10d4 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -303,13 +303,14 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p)
early_fixmap_init();
early_ioremap_init();
- setup_machine_fdt(__fdt_pointer);
-
/*
* Initialise the static keys early as they may be enabled by the
- * cpufeature code and early parameters.
+ * cpufeature code, early parameters, and DT setup.
*/
jump_label_init();
+
+ setup_machine_fdt(__fdt_pointer);
+
parse_early_param();
/*
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
index edb2d9206a78..b0980fbb6bc7 100644
--- a/arch/arm64/kernel/signal.c
+++ b/arch/arm64/kernel/signal.c
@@ -385,7 +385,7 @@ static int preserve_za_context(struct za_context __user *ctx)
return err ? -EFAULT : 0;
}
-static int restore_za_context(struct user_ctxs __user *user)
+static int restore_za_context(struct user_ctxs *user)
{
int err;
unsigned int vq;
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index 0f0c17dfeb9c..e2a5ec9fdc0d 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -507,12 +507,15 @@ pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
{
size_t pgsize;
int ncontig;
+ pte_t orig_pte;
if (!pte_cont(READ_ONCE(*ptep)))
return ptep_clear_flush(vma, addr, ptep);
ncontig = find_num_contig(vma->vm_mm, addr, ptep, &pgsize);
- return get_clear_flush(vma->vm_mm, addr, ptep, pgsize, ncontig);
+ orig_pte = get_clear_contig(vma->vm_mm, addr, ptep, pgsize, ncontig);
+ flush_tlb_range(vma, addr, addr + pgsize * ncontig);
+ return orig_pte;
}
static int __init hugetlbpage_init(void)
diff --git a/arch/csky/kernel/power.c b/arch/csky/kernel/power.c
index 923ee4e381b8..86ee202906f8 100644
--- a/arch/csky/kernel/power.c
+++ b/arch/csky/kernel/power.c
@@ -9,16 +9,14 @@ EXPORT_SYMBOL(pm_power_off);
void machine_power_off(void)
{
local_irq_disable();
- if (pm_power_off)
- pm_power_off();
+ do_kernel_power_off();
asm volatile ("bkpt");
}
void machine_halt(void)
{
local_irq_disable();
- if (pm_power_off)
- pm_power_off();
+ do_kernel_power_off();
asm volatile ("bkpt");
}
diff --git a/arch/csky/kernel/process.c b/arch/csky/kernel/process.c
index 5de04707aa07..eedddb155669 100644
--- a/arch/csky/kernel/process.c
+++ b/arch/csky/kernel/process.c
@@ -29,12 +29,11 @@ asmlinkage void ret_from_kernel_thread(void);
*/
void flush_thread(void){}
-int copy_thread(unsigned long clone_flags,
- unsigned long usp,
- unsigned long kthread_arg,
- struct task_struct *p,
- unsigned long tls)
+int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
{
+ unsigned long clone_flags = args->flags;
+ unsigned long usp = args->stack;
+ unsigned long tls = args->tls;
struct switch_stack *childstack;
struct pt_regs *childregs = task_pt_regs(p);
@@ -48,11 +47,11 @@ int copy_thread(unsigned long clone_flags,
/* setup thread.sp for switch_to !!! */
p->thread.sp = (unsigned long)childstack;
- if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
+ if (unlikely(args->fn)) {
memset(childregs, 0, sizeof(struct pt_regs));
childstack->r15 = (unsigned long) ret_from_kernel_thread;
- childstack->r10 = kthread_arg;
- childstack->r9 = usp;
+ childstack->r10 = (unsigned long) args->fn_arg;
+ childstack->r9 = (unsigned long) args->fn;
childregs->sr = mfcr("psr");
} else {
*childregs = *(current_pt_regs());
diff --git a/arch/hexagon/kernel/process.c b/arch/hexagon/kernel/process.c
index eab03c691f53..f0552f98a7ba 100644
--- a/arch/hexagon/kernel/process.c
+++ b/arch/hexagon/kernel/process.c
@@ -50,9 +50,11 @@ void arch_cpu_idle(void)
/*
* Copy architecture-specific thread state
*/
-int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg,
- struct task_struct *p, unsigned long tls)
+int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
{
+ unsigned long clone_flags = args->flags;
+ unsigned long usp = args->stack;
+ unsigned long tls = args->tls;
struct thread_info *ti = task_thread_info(p);
struct hexagon_switch_stack *ss;
struct pt_regs *childregs;
@@ -73,11 +75,11 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg,
sizeof(*ss));
ss->lr = (unsigned long)ret_from_fork;
p->thread.switch_sp = ss;
- if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
+ if (unlikely(args->fn)) {
memset(childregs, 0, sizeof(struct pt_regs));
/* r24 <- fn, r25 <- arg */
- ss->r24 = usp;
- ss->r25 = arg;
+ ss->r24 = (unsigned long)args->fn;
+ ss->r25 = (unsigned long)args->fn_arg;
pt_set_kmode(childregs);
return 0;
}
diff --git a/arch/ia64/include/asm/ptrace.h b/arch/ia64/include/asm/ptrace.h
index a10a498eede1..402874489890 100644
--- a/arch/ia64/include/asm/ptrace.h
+++ b/arch/ia64/include/asm/ptrace.h
@@ -139,10 +139,6 @@ static inline long regs_return_value(struct pt_regs *regs)
#define arch_ptrace_stop_needed() \
(!test_thread_flag(TIF_RESTORE_RSE))
- extern void ptrace_attach_sync_user_rbs (struct task_struct *);
- #define arch_ptrace_attach(child) \
- ptrace_attach_sync_user_rbs(child)
-
#define arch_has_single_step() (1)
#define arch_has_block_step() (1)
diff --git a/arch/ia64/kernel/crash_dump.c b/arch/ia64/kernel/crash_dump.c
index 0ed3c3dee4cd..4ef68e2aa757 100644
--- a/arch/ia64/kernel/crash_dump.c
+++ b/arch/ia64/kernel/crash_dump.c
@@ -10,42 +10,18 @@
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/crash_dump.h>
-
+#include <linux/uio.h>
#include <asm/page.h>
-#include <linux/uaccess.h>
-/**
- * copy_oldmem_page - copy one page from "oldmem"
- * @pfn: page frame number to be copied
- * @buf: target memory address for the copy; this can be in kernel address
- * space or user address space (see @userbuf)
- * @csize: number of bytes to copy
- * @offset: offset in bytes into the page (based on pfn) to begin the copy
- * @userbuf: if set, @buf is in user address space, use copy_to_user(),
- * otherwise @buf is in kernel address space, use memcpy().
- *
- * Copy a page from "oldmem". For this page, there is no pte mapped
- * in the current kernel. We stitch up a pte, similar to kmap_atomic.
- *
- * Calling copy_to_user() in atomic context is not desirable. Hence first
- * copying the data to a pre-allocated kernel page and then copying to user
- * space in non-atomic context.
- */
-ssize_t
-copy_oldmem_page(unsigned long pfn, char *buf,
- size_t csize, unsigned long offset, int userbuf)
+ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn,
+ size_t csize, unsigned long offset)
{
void *vaddr;
if (!csize)
return 0;
vaddr = __va(pfn<<PAGE_SHIFT);
- if (userbuf) {
- if (copy_to_user(buf, (vaddr + offset), csize)) {
- return -EFAULT;
- }
- } else
- memcpy(buf, (vaddr + offset), csize);
+ csize = copy_to_iter(vaddr + offset, csize, iter);
return csize;
}
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index 1a7bab1c5d7c..ca34e51e84b4 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -29,38 +29,38 @@ struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}};
enum instruction_type {A, I, M, F, B, L, X, u};
static enum instruction_type bundle_encoding[32][3] = {
- { M, I, I }, /* 00 */
- { M, I, I }, /* 01 */
- { M, I, I }, /* 02 */
- { M, I, I }, /* 03 */
- { M, L, X }, /* 04 */
- { M, L, X }, /* 05 */
- { u, u, u }, /* 06 */
- { u, u, u }, /* 07 */
- { M, M, I }, /* 08 */
- { M, M, I }, /* 09 */
- { M, M, I }, /* 0A */
- { M, M, I }, /* 0B */
- { M, F, I }, /* 0C */
- { M, F, I }, /* 0D */
- { M, M, F }, /* 0E */
- { M, M, F }, /* 0F */
- { M, I, B }, /* 10 */
- { M, I, B }, /* 11 */
- { M, B, B }, /* 12 */
- { M, B, B }, /* 13 */
- { u, u, u }, /* 14 */
- { u, u, u }, /* 15 */
- { B, B, B }, /* 16 */
- { B, B, B }, /* 17 */
- { M, M, B }, /* 18 */
- { M, M, B }, /* 19 */
- { u, u, u }, /* 1A */
- { u, u, u }, /* 1B */
- { M, F, B }, /* 1C */
- { M, F, B }, /* 1D */
- { u, u, u }, /* 1E */
- { u, u, u }, /* 1F */
+ [0x00] = { M, I, I },
+ [0x01] = { M, I, I },
+ [0x02] = { M, I, I },
+ [0x03] = { M, I, I },
+ [0x04] = { M, L, X },
+ [0x05] = { M, L, X },
+ [0x06] = { u, u, u },
+ [0x07] = { u, u, u },
+ [0x08] = { M, M, I },
+ [0x09] = { M, M, I },
+ [0x0A] = { M, M, I },
+ [0x0B] = { M, M, I },
+ [0x0C] = { M, F, I },
+ [0x0D] = { M, F, I },
+ [0x0E] = { M, M, F },
+ [0x0F] = { M, M, F },
+ [0x10] = { M, I, B },
+ [0x11] = { M, I, B },
+ [0x12] = { M, B, B },
+ [0x13] = { M, B, B },
+ [0x14] = { u, u, u },
+ [0x15] = { u, u, u },
+ [0x16] = { B, B, B },
+ [0x17] = { B, B, B },
+ [0x18] = { M, M, B },
+ [0x19] = { M, M, B },
+ [0x1A] = { u, u, u },
+ [0x1B] = { u, u, u },
+ [0x1C] = { M, F, B },
+ [0x1D] = { M, F, B },
+ [0x1E] = { u, u, u },
+ [0x1F] = { u, u, u },
};
/* Insert a long branch code */
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index e628a88607bb..c62a66710ad6 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -290,7 +290,6 @@ static void ia64_mlogbuf_finish(int wait)
{
BREAK_LOGLEVEL(console_loglevel);
- spin_lock_init(&mlogbuf_rlock);
ia64_mlogbuf_dump();
printk(KERN_EMERG "mlogbuf_finish: printing switched to urgent mode, "
"MCA/INIT might be dodgy or fail.\n");
diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
index 64189f04c1a4..b9ae093bfe37 100644
--- a/arch/ia64/kernel/palinfo.c
+++ b/arch/ia64/kernel/palinfo.c
@@ -120,7 +120,7 @@ static const char *mem_attrib[]={
* Input:
* - a pointer to a buffer to hold the string
* - a 64-bit vector
- * Ouput:
+ * Output:
* - a pointer to the end of the buffer
*
*/
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index d7a256bd9d6b..416305e550e2 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/notifier.h>
#include <linux/personality.h>
+#include <linux/reboot.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/sched/hotplug.h>
@@ -295,9 +296,12 @@ ia64_load_extra (struct task_struct *task)
* so there is nothing to worry about.
*/
int
-copy_thread(unsigned long clone_flags, unsigned long user_stack_base,
- unsigned long user_stack_size, struct task_struct *p, unsigned long tls)
+copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
{
+ unsigned long clone_flags = args->flags;
+ unsigned long user_stack_base = args->stack;
+ unsigned long user_stack_size = args->stack_size;
+ unsigned long tls = args->tls;
extern char ia64_ret_from_clone;
struct switch_stack *child_stack, *stack;
unsigned long rbs, child_rbs, rbs_size;
@@ -338,14 +342,14 @@ copy_thread(unsigned long clone_flags, unsigned long user_stack_base,
ia64_drop_fpu(p); /* don't pick up stale state from a CPU's fph */
- if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
- if (unlikely(!user_stack_base)) {
+ if (unlikely(args->fn)) {
+ if (unlikely(args->idle)) {
/* fork_idle() called us */
return 0;
}
memset(child_stack, 0, sizeof(*child_ptregs) + sizeof(*child_stack));
- child_stack->r4 = user_stack_base; /* payload */
- child_stack->r5 = user_stack_size; /* argument */
+ child_stack->r4 = (unsigned long) args->fn;
+ child_stack->r5 = (unsigned long) args->fn_arg;
/*
* Preserve PSR bits, except for bits 32-34 and 37-45,
* which we can't read.
@@ -599,8 +603,7 @@ machine_halt (void)
void
machine_power_off (void)
{
- if (pm_power_off)
- pm_power_off();
+ do_kernel_power_off();
machine_halt();
}
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
index a19acd9f5e1f..ab8aeb34d1d9 100644
--- a/arch/ia64/kernel/ptrace.c
+++ b/arch/ia64/kernel/ptrace.c
@@ -618,63 +618,6 @@ void ia64_sync_krbs(void)
}
/*
- * After PTRACE_ATTACH, a thread's register backing store area in user
- * space is assumed to contain correct data whenever the thread is
- * stopped. arch_ptrace_stop takes care of this on tracing stops.
- * But if the child was already stopped for job control when we attach
- * to it, then it might not ever get into ptrace_stop by the time we
- * want to examine the user memory containing the RBS.
- */
-void
-ptrace_attach_sync_user_rbs (struct task_struct *child)
-{
- int stopped = 0;
- struct unw_frame_info info;
-
- /*
- * If the child is in TASK_STOPPED, we need to change that to
- * TASK_TRACED momentarily while we operate on it. This ensures
- * that the child won't be woken up and return to user mode while
- * we are doing the sync. (It can only be woken up for SIGKILL.)
- */
-
- read_lock(&tasklist_lock);
- if (child->sighand) {
- spin_lock_irq(&child->sighand->siglock);
- if (READ_ONCE(child->__state) == TASK_STOPPED &&
- !test_and_set_tsk_thread_flag(child, TIF_RESTORE_RSE)) {
- set_notify_resume(child);
-
- WRITE_ONCE(child->__state, TASK_TRACED);
- stopped = 1;
- }
- spin_unlock_irq(&child->sighand->siglock);
- }
- read_unlock(&tasklist_lock);
-
- if (!stopped)
- return;
-
- unw_init_from_blocked_task(&info, child);
- do_sync_rbs(&info, ia64_sync_user_rbs);
-
- /*
- * Now move the child back into TASK_STOPPED if it should be in a
- * job control stop, so that SIGCONT can be used to wake it up.
- */
- read_lock(&tasklist_lock);
- if (child->sighand) {
- spin_lock_irq(&child->sighand->siglock);
- if (READ_ONCE(child->__state) == TASK_TRACED &&
- (child->signal->flags & SIGNAL_STOP_STOPPED)) {
- WRITE_ONCE(child->__state, TASK_STOPPED);
- }
- spin_unlock_irq(&child->sighand->siglock);
- }
- read_unlock(&tasklist_lock);
-}
-
-/*
* Write f32-f127 back to task->thread.fph if it has been modified.
*/
inline void
@@ -2025,7 +1968,7 @@ static void syscall_get_args_cb(struct unw_frame_info *info, void *data)
* - epsinstruction: cfm is set by br.call
* locals don't exist.
*
- * For both cases argguments are reachable in cfm.sof - cfm.sol.
+ * For both cases arguments are reachable in cfm.sof - cfm.sol.
* CFM: [ ... | sor: 17..14 | sol : 13..7 | sof : 6..0 ]
*/
cfm = pt->cr_ifs;
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 5010348fa21b..fd6301eafa9d 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -572,7 +572,7 @@ setup_arch (char **cmdline_p)
#ifdef CONFIG_ACPI_HOTPLUG_CPU
prefill_possible_map();
#endif
- per_cpu_scan_finalize((cpumask_weight(&early_cpu_possible_map) == 0 ?
+ per_cpu_scan_finalize((cpumask_empty(&early_cpu_possible_map) ?
32 : cpumask_weight(&early_cpu_possible_map)),
additional_cpus > 0 ? additional_cpus : 0);
#endif /* CONFIG_ACPI_NUMA */
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index d10f780c13b9..d0e935cf2093 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -576,8 +576,6 @@ clear_cpu_sibling_map(int cpu)
static void
remove_siblinginfo(int cpu)
{
- int last = 0;
-
if (cpu_data(cpu)->threads_per_core == 1 &&
cpu_data(cpu)->cores_per_socket == 1) {
cpumask_clear_cpu(cpu, &cpu_core_map[cpu]);
@@ -585,8 +583,6 @@ remove_siblinginfo(int cpu)
return;
}
- last = (cpumask_weight(&cpu_core_map[cpu]) == 1 ? 1 : 0);
-
/* remove it from all sibling map's */
clear_cpu_sibling_map(cpu);
}
diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c
index 753642366e12..53735b1d1be3 100644
--- a/arch/ia64/kernel/traps.c
+++ b/arch/ia64/kernel/traps.c
@@ -309,7 +309,7 @@ handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr)
/*
* Lower 4 bits are used as a count. Upper bits are a sequence
* number that is updated when count is reset. The cmpxchg will
- * fail is seqno has changed. This minimizes mutiple cpus
+ * fail is seqno has changed. This minimizes multiple cpus
* resetting the count.
*/
if (current_jiffies > last.time)
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 3c3e15b22608..855d949d81df 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -449,7 +449,7 @@ mem_init (void)
memblock_free_all();
/*
- * For fsyscall entrpoints with no light-weight handler, use the ordinary
+ * For fsyscall entrypoints with no light-weight handler, use the ordinary
* (heavy-weight) handler, but mark it by setting bit 0, so the fsyscall entry
* code can tell them apart.
*/
diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c
index 135b5135cace..ca060e7a2a46 100644
--- a/arch/ia64/mm/tlb.c
+++ b/arch/ia64/mm/tlb.c
@@ -174,7 +174,7 @@ __setup("nptcg=", set_nptcg);
* override table (in which case we should ignore the value from
* PAL_VM_SUMMARY).
*
- * Kernel parameter "nptcg=" overrides maximum number of simultanesous ptc.g
+ * Kernel parameter "nptcg=" overrides maximum number of simultaneous ptc.g
* purges defined in either PAL_VM_SUMMARY or PAL override table. In this case,
* we should ignore the value from either PAL_VM_SUMMARY or PAL override table.
*
@@ -516,7 +516,7 @@ found:
if (i >= per_cpu(ia64_tr_num, cpu))
return -EBUSY;
- /*Record tr info for mca hander use!*/
+ /*Record tr info for mca handler use!*/
if (i > per_cpu(ia64_tr_used, cpu))
per_cpu(ia64_tr_used, cpu) = i;
diff --git a/arch/loongarch/Kbuild b/arch/loongarch/Kbuild
new file mode 100644
index 000000000000..ab5373d0a24f
--- /dev/null
+++ b/arch/loongarch/Kbuild
@@ -0,0 +1,6 @@
+obj-y += kernel/
+obj-y += mm/
+obj-y += vdso/
+
+# for cleaning
+subdir- += boot
diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
new file mode 100644
index 000000000000..80657bf83b05
--- /dev/null
+++ b/arch/loongarch/Kconfig
@@ -0,0 +1,438 @@
+# SPDX-License-Identifier: GPL-2.0
+config LOONGARCH
+ bool
+ default y
+ select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI
+ select ARCH_BINFMT_ELF_STATE
+ select ARCH_ENABLE_MEMORY_HOTPLUG
+ select ARCH_ENABLE_MEMORY_HOTREMOVE
+ select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
+ select ARCH_HAS_PHYS_TO_DMA
+ select ARCH_HAS_PTE_SPECIAL
+ select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
+ select ARCH_INLINE_READ_LOCK if !PREEMPTION
+ select ARCH_INLINE_READ_LOCK_BH if !PREEMPTION
+ select ARCH_INLINE_READ_LOCK_IRQ if !PREEMPTION
+ select ARCH_INLINE_READ_LOCK_IRQSAVE if !PREEMPTION
+ select ARCH_INLINE_READ_UNLOCK if !PREEMPTION
+ select ARCH_INLINE_READ_UNLOCK_BH if !PREEMPTION
+ select ARCH_INLINE_READ_UNLOCK_IRQ if !PREEMPTION
+ select ARCH_INLINE_READ_UNLOCK_IRQRESTORE if !PREEMPTION
+ select ARCH_INLINE_WRITE_LOCK if !PREEMPTION
+ select ARCH_INLINE_WRITE_LOCK_BH if !PREEMPTION
+ select ARCH_INLINE_WRITE_LOCK_IRQ if !PREEMPTION
+ select ARCH_INLINE_WRITE_LOCK_IRQSAVE if !PREEMPTION
+ select ARCH_INLINE_WRITE_UNLOCK if !PREEMPTION
+ select ARCH_INLINE_WRITE_UNLOCK_BH if !PREEMPTION
+ select ARCH_INLINE_WRITE_UNLOCK_IRQ if !PREEMPTION
+ select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE if !PREEMPTION
+ select ARCH_INLINE_SPIN_TRYLOCK if !PREEMPTION
+ select ARCH_INLINE_SPIN_TRYLOCK_BH if !PREEMPTION
+ select ARCH_INLINE_SPIN_LOCK if !PREEMPTION
+ select ARCH_INLINE_SPIN_LOCK_BH if !PREEMPTION
+ select ARCH_INLINE_SPIN_LOCK_IRQ if !PREEMPTION
+ select ARCH_INLINE_SPIN_LOCK_IRQSAVE if !PREEMPTION
+ select ARCH_INLINE_SPIN_UNLOCK if !PREEMPTION
+ select ARCH_INLINE_SPIN_UNLOCK_BH if !PREEMPTION
+ select ARCH_INLINE_SPIN_UNLOCK_IRQ if !PREEMPTION
+ select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE if !PREEMPTION
+ select ARCH_MIGHT_HAVE_PC_PARPORT
+ select ARCH_MIGHT_HAVE_PC_SERIO
+ select ARCH_SPARSEMEM_ENABLE
+ select ARCH_SUPPORTS_ACPI
+ select ARCH_SUPPORTS_ATOMIC_RMW
+ select ARCH_SUPPORTS_HUGETLBFS
+ select ARCH_SUPPORTS_NUMA_BALANCING
+ select ARCH_USE_BUILTIN_BSWAP
+ select ARCH_USE_CMPXCHG_LOCKREF
+ select ARCH_USE_QUEUED_RWLOCKS
+ select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
+ select ARCH_WANTS_NO_INSTR
+ select BUILDTIME_TABLE_SORT
+ select COMMON_CLK
+ select GENERIC_CLOCKEVENTS
+ select GENERIC_CMOS_UPDATE
+ select GENERIC_CPU_AUTOPROBE
+ select GENERIC_ENTRY
+ select GENERIC_FIND_FIRST_BIT
+ select GENERIC_GETTIMEOFDAY
+ select GENERIC_IRQ_MULTI_HANDLER
+ select GENERIC_IRQ_PROBE
+ select GENERIC_IRQ_SHOW
+ select GENERIC_LIB_ASHLDI3
+ select GENERIC_LIB_ASHRDI3
+ select GENERIC_LIB_CMPDI2
+ select GENERIC_LIB_LSHRDI3
+ select GENERIC_LIB_UCMPDI2
+ select GENERIC_PCI_IOMAP
+ select GENERIC_SCHED_CLOCK
+ select GENERIC_SMP_IDLE_THREAD
+ select GENERIC_TIME_VSYSCALL
+ select GPIOLIB
+ select HAVE_ARCH_AUDITSYSCALL
+ select HAVE_ARCH_COMPILER_H
+ select HAVE_ARCH_MMAP_RND_BITS if MMU
+ select HAVE_ARCH_SECCOMP_FILTER
+ select HAVE_ARCH_TRACEHOOK
+ select HAVE_ARCH_TRANSPARENT_HUGEPAGE
+ select HAVE_ASM_MODVERSIONS
+ select HAVE_CONTEXT_TRACKING
+ select HAVE_COPY_THREAD_TLS
+ select HAVE_DEBUG_STACKOVERFLOW
+ select HAVE_DMA_CONTIGUOUS
+ select HAVE_EXIT_THREAD
+ select HAVE_FAST_GUP
+ select HAVE_GENERIC_VDSO
+ select HAVE_IOREMAP_PROT
+ select HAVE_IRQ_EXIT_ON_IRQ_STACK
+ select HAVE_IRQ_TIME_ACCOUNTING
+ select HAVE_MEMBLOCK
+ select HAVE_MEMBLOCK_NODE_MAP
+ select HAVE_MOD_ARCH_SPECIFIC
+ select HAVE_NMI
+ select HAVE_PERF_EVENTS
+ select HAVE_REGS_AND_STACK_ACCESS_API
+ select HAVE_RSEQ
+ select HAVE_SETUP_PER_CPU_AREA if NUMA
+ select HAVE_SYSCALL_TRACEPOINTS
+ select HAVE_TIF_NOHZ
+ select HAVE_VIRT_CPU_ACCOUNTING_GEN if !SMP
+ select IRQ_FORCED_THREADING
+ select IRQ_LOONGARCH_CPU
+ select MODULES_USE_ELF_RELA if MODULES
+ select NEED_PER_CPU_EMBED_FIRST_CHUNK
+ select NEED_PER_CPU_PAGE_FIRST_CHUNK
+ select OF
+ select OF_EARLY_FLATTREE
+ select PERF_USE_VMALLOC
+ select RTC_LIB
+ select SPARSE_IRQ
+ select SYSCTL_EXCEPTION_TRACE
+ select SWIOTLB
+ select TRACE_IRQFLAGS_SUPPORT
+ select USE_PERCPU_NUMA_NODE_ID
+ select ZONE_DMA32
+
+config 32BIT
+ bool
+
+config 64BIT
+ def_bool y
+
+config CPU_HAS_FPU
+ bool
+ default y
+
+config CPU_HAS_PREFETCH
+ bool
+ default y
+
+config GENERIC_CALIBRATE_DELAY
+ def_bool y
+
+config GENERIC_CSUM
+ def_bool y
+
+config GENERIC_HWEIGHT
+ def_bool y
+
+config L1_CACHE_SHIFT
+ int
+ default "6"
+
+config LOCKDEP_SUPPORT
+ bool
+ default y
+
+# MACH_LOONGSON32 and MACH_LOONGSON64 are delibrately carried over from the
+# MIPS Loongson code, to preserve Loongson-specific code paths in drivers that
+# are shared between architectures, and specifically expecting the symbols.
+config MACH_LOONGSON32
+ def_bool 32BIT
+
+config MACH_LOONGSON64
+ def_bool 64BIT
+
+config PAGE_SIZE_4KB
+ bool
+
+config PAGE_SIZE_16KB
+ bool
+
+config PAGE_SIZE_64KB
+ bool
+
+config PGTABLE_2LEVEL
+ bool
+
+config PGTABLE_3LEVEL
+ bool
+
+config PGTABLE_4LEVEL
+ bool
+
+config PGTABLE_LEVELS
+ int
+ default 2 if PGTABLE_2LEVEL
+ default 3 if PGTABLE_3LEVEL
+ default 4 if PGTABLE_4LEVEL
+
+config SCHED_OMIT_FRAME_POINTER
+ bool
+ default y
+
+menu "Kernel type and options"
+
+source "kernel/Kconfig.hz"
+
+choice
+ prompt "Page Table Layout"
+ default 16KB_2LEVEL if 32BIT
+ default 16KB_3LEVEL if 64BIT
+ help
+ Allows choosing the page table layout, which is a combination
+ of page size and page table levels. The size of virtual memory
+ address space are determined by the page table layout.
+
+config 4KB_3LEVEL
+ bool "4KB with 3 levels"
+ select PAGE_SIZE_4KB
+ select PGTABLE_3LEVEL
+ help
+ This option selects 4KB page size with 3 level page tables, which
+ support a maximum of 39 bits of application virtual memory.
+
+config 4KB_4LEVEL
+ bool "4KB with 4 levels"
+ select PAGE_SIZE_4KB
+ select PGTABLE_4LEVEL
+ help
+ This option selects 4KB page size with 4 level page tables, which
+ support a maximum of 48 bits of application virtual memory.
+
+config 16KB_2LEVEL
+ bool "16KB with 2 levels"
+ select PAGE_SIZE_16KB
+ select PGTABLE_2LEVEL
+ help
+ This option selects 16KB page size with 2 level page tables, which
+ support a maximum of 36 bits of application virtual memory.
+
+config 16KB_3LEVEL
+ bool "16KB with 3 levels"
+ select PAGE_SIZE_16KB
+ select PGTABLE_3LEVEL
+ help
+ This option selects 16KB page size with 3 level page tables, which
+ support a maximum of 47 bits of application virtual memory.
+
+config 64KB_2LEVEL
+ bool "64KB with 2 levels"
+ select PAGE_SIZE_64KB
+ select PGTABLE_2LEVEL
+ help
+ This option selects 64KB page size with 2 level page tables, which
+ support a maximum of 42 bits of application virtual memory.
+
+config 64KB_3LEVEL
+ bool "64KB with 3 levels"
+ select PAGE_SIZE_64KB
+ select PGTABLE_3LEVEL
+ help
+ This option selects 64KB page size with 3 level page tables, which
+ support a maximum of 55 bits of application virtual memory.
+
+endchoice
+
+config CMDLINE
+ string "Built-in kernel command line"
+ help
+ For most platforms, the arguments for the kernel's command line
+ are provided at run-time, during boot. However, there are cases
+ where either no arguments are being provided or the provided
+ arguments are insufficient or even invalid.
+
+ When that occurs, it is possible to define a built-in command
+ line here and choose how the kernel should use it later on.
+
+choice
+ prompt "Kernel command line type"
+ default CMDLINE_BOOTLOADER
+ help
+ Choose how the kernel will handle the provided built-in command
+ line.
+
+config CMDLINE_BOOTLOADER
+ bool "Use bootloader kernel arguments if available"
+ help
+ Prefer the command-line passed by the boot loader if available.
+ Use the built-in command line as fallback in case we get nothing
+ during boot. This is the default behaviour.
+
+config CMDLINE_EXTEND
+ bool "Use built-in to extend bootloader kernel arguments"
+ help
+ The command-line arguments provided during boot will be
+ appended to the built-in command line. This is useful in
+ cases where the provided arguments are insufficient and
+ you don't want to or cannot modify them.
+
+config CMDLINE_FORCE
+ bool "Always use the built-in kernel command string"
+ help
+ Always use the built-in command line, even if we get one during
+ boot. This is useful in case you need to override the provided
+ command line on systems where you don't have or want control
+ over it.
+
+endchoice
+
+config DMI
+ bool "Enable DMI scanning"
+ select DMI_SCAN_MACHINE_NON_EFI_FALLBACK
+ default y
+ help
+ This enables SMBIOS/DMI feature for systems, and scanning of
+ DMI to identify machine quirks.
+
+config EFI
+ bool "EFI runtime service support"
+ select UCS2_STRING
+ select EFI_PARAMS_FROM_FDT
+ select EFI_RUNTIME_WRAPPERS
+ help
+ This enables the kernel to use EFI runtime services that are
+ available (such as the EFI variable services).
+
+config SMP
+ bool "Multi-Processing support"
+ help
+ This enables support for systems with more than one CPU. If you have
+ a system with only one CPU, say N. If you have a system with more
+ than one CPU, say Y.
+
+ If you say N here, the kernel will run on uni- and multiprocessor
+ machines, but will use only one CPU of a multiprocessor machine. If
+ you say Y here, the kernel will run on many, but not all,
+ uniprocessor machines. On a uniprocessor machine, the kernel
+ will run faster if you say N here.
+
+ See also the SMP-HOWTO available at <http://www.tldp.org/docs.html#howto>.
+
+ If you don't know what to do here, say N.
+
+config HOTPLUG_CPU
+ bool "Support for hot-pluggable CPUs"
+ depends on SMP
+ select GENERIC_IRQ_MIGRATION
+ help
+ Say Y here to allow turning CPUs off and on. CPUs can be
+ controlled through /sys/devices/system/cpu.
+ (Note: power management support will enable this option
+ automatically on SMP systems. )
+ Say N if you want to disable CPU hotplug.
+
+config NR_CPUS
+ int "Maximum number of CPUs (2-256)"
+ range 2 256
+ depends on SMP
+ default "64"
+ help
+ This allows you to specify the maximum number of CPUs which this
+ kernel will support.
+
+config NUMA
+ bool "NUMA Support"
+ select ACPI_NUMA if ACPI
+ help
+ Say Y to compile the kernel with NUMA (Non-Uniform Memory Access)
+ support. This option improves performance on systems with more
+ than one NUMA node; on single node systems it is generally better
+ to leave it disabled.
+
+config NODES_SHIFT
+ int
+ default "6"
+ depends on NUMA
+
+config FORCE_MAX_ZONEORDER
+ int "Maximum zone order"
+ range 14 64 if PAGE_SIZE_64KB
+ default "14" if PAGE_SIZE_64KB
+ range 12 64 if PAGE_SIZE_16KB
+ default "12" if PAGE_SIZE_16KB
+ range 11 64
+ default "11"
+ help
+ The kernel memory allocator divides physically contiguous memory
+ blocks into "zones", where each zone is a power of two number of
+ pages. This option selects the largest power of two that the kernel
+ keeps in the memory allocator. If you need to allocate very large
+ blocks of physically contiguous memory, then you may need to
+ increase this value.
+
+ This config option is actually maximum order plus one. For example,
+ a value of 11 means that the largest free memory block is 2^10 pages.
+
+ The page size is not necessarily 4KB. Keep this in mind
+ when choosing a value for this option.
+
+config SECCOMP
+ bool "Enable seccomp to safely compute untrusted bytecode"
+ depends on PROC_FS
+ default y
+ help
+ This kernel feature is useful for number crunching applications
+ that may need to compute untrusted bytecode during their
+ execution. By using pipes or other transports made available to
+ the process as file descriptors supporting the read/write
+ syscalls, it's possible to isolate those applications in
+ their own address space using seccomp. Once seccomp is
+ enabled via /proc/<pid>/seccomp, it cannot be disabled
+ and the task is only allowed to execute a few safe syscalls
+ defined by each seccomp mode.
+
+ If unsure, say Y. Only embedded should say N here.
+
+endmenu
+
+config ARCH_SELECT_MEMORY_MODEL
+ def_bool y
+
+config ARCH_FLATMEM_ENABLE
+ def_bool y
+ depends on !NUMA
+
+config ARCH_SPARSEMEM_ENABLE
+ def_bool y
+ help
+ Say Y to support efficient handling of sparse physical memory,
+ for architectures which are either NUMA (Non-Uniform Memory Access)
+ or have huge holes in the physical address space for other reasons.
+ See <file:Documentation/vm/numa.rst> for more.
+
+config ARCH_ENABLE_THP_MIGRATION
+ def_bool y
+ depends on TRANSPARENT_HUGEPAGE
+
+config ARCH_MEMORY_PROBE
+ def_bool y
+ depends on MEMORY_HOTPLUG
+
+config MMU
+ bool
+ default y
+
+config ARCH_MMAP_RND_BITS_MIN
+ default 12
+
+config ARCH_MMAP_RND_BITS_MAX
+ default 18
+
+menu "Power management options"
+
+source "drivers/acpi/Kconfig"
+
+endmenu
+
+source "drivers/firmware/Kconfig"
diff --git a/arch/loongarch/Kconfig.debug b/arch/loongarch/Kconfig.debug
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/arch/loongarch/Kconfig.debug
diff --git a/arch/loongarch/Makefile b/arch/loongarch/Makefile
new file mode 100644
index 000000000000..fbe4277e6404
--- /dev/null
+++ b/arch/loongarch/Makefile
@@ -0,0 +1,100 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Author: Huacai Chen <chenhuacai@loongson.cn>
+# Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+
+boot := arch/loongarch/boot
+
+KBUILD_DEFCONFIG := loongson3_defconfig
+
+KBUILD_IMAGE = $(boot)/vmlinux
+
+#
+# Select the object file format to substitute into the linker script.
+#
+64bit-tool-archpref = loongarch64
+32bit-bfd = elf32-loongarch
+64bit-bfd = elf64-loongarch
+32bit-emul = elf32loongarch
+64bit-emul = elf64loongarch
+
+ifdef CONFIG_64BIT
+tool-archpref = $(64bit-tool-archpref)
+UTS_MACHINE := loongarch64
+endif
+
+ifneq ($(SUBARCH),$(ARCH))
+ ifeq ($(CROSS_COMPILE),)
+ CROSS_COMPILE := $(call cc-cross-prefix, $(tool-archpref)-linux- $(tool-archpref)-linux-gnu- $(tool-archpref)-unknown-linux-gnu-)
+ endif
+endif
+
+ifdef CONFIG_64BIT
+ld-emul = $(64bit-emul)
+cflags-y += -mabi=lp64s
+endif
+
+cflags-y += -G0 -pipe -msoft-float
+LDFLAGS_vmlinux += -G0 -static -n -nostdlib
+KBUILD_AFLAGS_KERNEL += -Wa,-mla-global-with-pcrel
+KBUILD_CFLAGS_KERNEL += -Wa,-mla-global-with-pcrel
+KBUILD_AFLAGS_MODULE += -Wa,-mla-global-with-abs
+KBUILD_CFLAGS_MODULE += -fplt -Wa,-mla-global-with-abs,-mla-local-with-abs
+
+cflags-y += -ffreestanding
+cflags-y += $(call cc-option, -mno-check-zero-division)
+
+load-y = 0x9000000000200000
+bootvars-y = VMLINUX_LOAD_ADDRESS=$(load-y)
+
+KBUILD_AFLAGS += $(cflags-y)
+KBUILD_CFLAGS += $(cflags-y)
+KBUILD_CPPFLAGS += -DVMLINUX_LOAD_ADDRESS=$(load-y)
+
+# This is required to get dwarf unwinding tables into .debug_frame
+# instead of .eh_frame so we don't discard them.
+KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
+
+# Don't emit unaligned accesses.
+# Not all LoongArch cores support unaligned access, and as kernel we can't
+# rely on others to provide emulation for these accesses.
+KBUILD_CFLAGS += $(call cc-option,-mstrict-align)
+
+KBUILD_CFLAGS += -isystem $(shell $(CC) -print-file-name=include)
+
+KBUILD_LDFLAGS += -m $(ld-emul)
+
+ifdef CONFIG_LOONGARCH
+CHECKFLAGS += $(shell $(CC) $(KBUILD_CFLAGS) -dM -E -x c /dev/null | \
+ egrep -vw '__GNUC_(MINOR_|PATCHLEVEL_)?_' | \
+ sed -e "s/^\#define /-D'/" -e "s/ /'='/" -e "s/$$/'/" -e 's/\$$/&&/g')
+endif
+
+head-y := arch/loongarch/kernel/head.o
+
+libs-y += arch/loongarch/lib/
+
+ifeq ($(KBUILD_EXTMOD),)
+prepare: vdso_prepare
+vdso_prepare: prepare0
+ $(Q)$(MAKE) $(build)=arch/loongarch/vdso include/generated/vdso-offsets.h
+endif
+
+PHONY += vdso_install
+vdso_install:
+ $(Q)$(MAKE) $(build)=arch/loongarch/vdso $@
+
+all: $(KBUILD_IMAGE)
+
+$(KBUILD_IMAGE): vmlinux
+ $(Q)$(MAKE) $(build)=$(boot) $(bootvars-y) $@
+
+install:
+ $(Q)install -D -m 755 $(KBUILD_IMAGE) $(INSTALL_PATH)/vmlinux-$(KERNELRELEASE)
+ $(Q)install -D -m 644 .config $(INSTALL_PATH)/config-$(KERNELRELEASE)
+ $(Q)install -D -m 644 System.map $(INSTALL_PATH)/System.map-$(KERNELRELEASE)
+
+define archhelp
+ echo ' install - install kernel into $(INSTALL_PATH)'
+ echo
+endef
diff --git a/arch/arm/mach-pxa/Makefile.boot b/arch/loongarch/boot/.gitignore
index bb6e353ecf06..49423ee96ef3 100644
--- a/arch/arm/mach-pxa/Makefile.boot
+++ b/arch/loongarch/boot/.gitignore
@@ -1,3 +1,2 @@
# SPDX-License-Identifier: GPL-2.0-only
- zreladdr-y += 0xa0008000
-
+vmlinux*
diff --git a/arch/loongarch/boot/Makefile b/arch/loongarch/boot/Makefile
new file mode 100644
index 000000000000..0125b17edc98
--- /dev/null
+++ b/arch/loongarch/boot/Makefile
@@ -0,0 +1,16 @@
+#
+# arch/loongarch/boot/Makefile
+#
+# Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+#
+
+drop-sections := .comment .note .options .note.gnu.build-id
+strip-flags := $(addprefix --remove-section=,$(drop-sections)) -S
+OBJCOPYFLAGS_vmlinux.efi := -O binary $(strip-flags)
+
+targets := vmlinux
+quiet_cmd_strip = STRIP $@
+ cmd_strip = $(STRIP) -s -o $@ $<
+
+$(obj)/vmlinux: vmlinux FORCE
+ $(call if_changed,strip)
diff --git a/arch/loongarch/boot/dts/Makefile b/arch/loongarch/boot/dts/Makefile
new file mode 100644
index 000000000000..5f1f55e911ad
--- /dev/null
+++ b/arch/loongarch/boot/dts/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+dtstree := $(srctree)/$(src)
+
+dtb-y := $(patsubst $(dtstree)/%.dts,%.dtb, $(wildcard $(dtstree)/*.dts))
diff --git a/arch/loongarch/configs/loongson3_defconfig b/arch/loongarch/configs/loongson3_defconfig
new file mode 100644
index 000000000000..eb9149786b6b
--- /dev/null
+++ b/arch/loongarch/configs/loongson3_defconfig
@@ -0,0 +1,771 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BPF_SYSCALL=y
+CONFIG_PREEMPT=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_LOG_BUF_SHIFT=18
+CONFIG_NUMA_BALANCING=y
+CONFIG_MEMCG=y
+CONFIG_BLK_CGROUP=y
+CONFIG_CFS_BANDWIDTH=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_HUGETLB=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_PERF=y
+CONFIG_CGROUP_BPF=y
+CONFIG_NAMESPACES=y
+CONFIG_USER_NS=y
+CONFIG_CHECKPOINT_RESTORE=y
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_SYSFS_DEPRECATED=y
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EXPERT=y
+CONFIG_USERFAULTFD=y
+CONFIG_PERF_EVENTS=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_LOONGARCH=y
+CONFIG_64BIT=y
+CONFIG_MACH_LOONGSON64=y
+CONFIG_DMI=y
+CONFIG_EFI=y
+CONFIG_SMP=y
+CONFIG_HOTPLUG_CPU=y
+CONFIG_NR_CPUS=64
+CONFIG_NUMA=y
+CONFIG_PAGE_SIZE_16KB=y
+CONFIG_HZ_250=y
+CONFIG_ACPI=y
+CONFIG_ACPI_SPCR_TABLE=y
+CONFIG_ACPI_HOTPLUG_CPU=y
+CONFIG_ACPI_TAD=y
+CONFIG_ACPI_DOCK=y
+CONFIG_ACPI_IPMI=m
+CONFIG_ACPI_PCI_SLOT=y
+CONFIG_ACPI_HOTPLUG_MEMORY=y
+CONFIG_EFI_GENERIC_STUB_INITRD_CMDLINE_LOADER=y
+CONFIG_EFI_CAPSULE_LOADER=m
+CONFIG_EFI_TEST=m
+CONFIG_MODULES=y
+CONFIG_MODULE_FORCE_LOAD=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_BLK_DEV_THROTTLING=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_IOSCHED_BFQ=y
+CONFIG_BFQ_GROUP_IOSCHED=y
+CONFIG_BINFMT_MISC=m
+CONFIG_MEMORY_HOTPLUG=y
+CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y
+CONFIG_MEMORY_HOTREMOVE=y
+CONFIG_KSM=y
+CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_ZSWAP=y
+CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD=y
+CONFIG_ZPOOL=y
+CONFIG_ZBUD=y
+CONFIG_Z3FOLD=y
+CONFIG_ZSMALLOC=m
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+CONFIG_NET_IPIP=m
+CONFIG_IP_MROUTE=y
+CONFIG_INET_ESP=m
+CONFIG_INET_UDP_DIAG=y
+CONFIG_TCP_CONG_ADVANCED=y
+CONFIG_TCP_CONG_BBR=m
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_MROUTE=y
+CONFIG_NETWORK_PHY_TIMESTAMPING=y
+CONFIG_NETFILTER=y
+CONFIG_BRIDGE_NETFILTER=m
+CONFIG_NETFILTER_NETLINK_LOG=m
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_LOG_NETDEV=m
+CONFIG_NF_CONNTRACK_AMANDA=m
+CONFIG_NF_CONNTRACK_FTP=m
+CONFIG_NF_CONNTRACK_NETBIOS_NS=m
+CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NF_CT_NETLINK=m
+CONFIG_NF_TABLES=m
+CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CONNLIMIT=m
+CONFIG_NFT_LOG=m
+CONFIG_NFT_LIMIT=m
+CONFIG_NFT_MASQ=m
+CONFIG_NFT_REDIR=m
+CONFIG_NFT_NAT=m
+CONFIG_NFT_TUNNEL=m
+CONFIG_NFT_OBJREF=m
+CONFIG_NFT_QUEUE=m
+CONFIG_NFT_QUOTA=m
+CONFIG_NFT_REJECT=m
+CONFIG_NFT_COMPAT=m
+CONFIG_NFT_HASH=m
+CONFIG_NFT_SOCKET=m
+CONFIG_NFT_OSF=m
+CONFIG_NFT_TPROXY=m
+CONFIG_NETFILTER_XT_SET=m
+CONFIG_NETFILTER_XT_TARGET_AUDIT=m
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
+CONFIG_NETFILTER_XT_TARGET_CT=m
+CONFIG_NETFILTER_XT_TARGET_DSCP=m
+CONFIG_NETFILTER_XT_TARGET_HMARK=m
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
+CONFIG_NETFILTER_XT_TARGET_LED=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
+CONFIG_NETFILTER_XT_TARGET_MARK=m
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_TRACE=m
+CONFIG_NETFILTER_XT_TARGET_SECMARK=m
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
+CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_NETFILTER_XT_MATCH_BPF=m
+CONFIG_NETFILTER_XT_MATCH_CGROUP=m
+CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
+CONFIG_NETFILTER_XT_MATCH_COMMENT=m
+CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_CPU=m
+CONFIG_NETFILTER_XT_MATCH_DCCP=m
+CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
+CONFIG_NETFILTER_XT_MATCH_DSCP=m
+CONFIG_NETFILTER_XT_MATCH_ESP=m
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_HELPER=m
+CONFIG_NETFILTER_XT_MATCH_IPCOMP=m
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
+CONFIG_NETFILTER_XT_MATCH_IPVS=m
+CONFIG_NETFILTER_XT_MATCH_LENGTH=m
+CONFIG_NETFILTER_XT_MATCH_LIMIT=m
+CONFIG_NETFILTER_XT_MATCH_MAC=m
+CONFIG_NETFILTER_XT_MATCH_MARK=m
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_NFACCT=m
+CONFIG_NETFILTER_XT_MATCH_OSF=m
+CONFIG_NETFILTER_XT_MATCH_OWNER=m
+CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
+CONFIG_NETFILTER_XT_MATCH_QUOTA=m
+CONFIG_NETFILTER_XT_MATCH_RATEEST=m
+CONFIG_NETFILTER_XT_MATCH_REALM=m
+CONFIG_NETFILTER_XT_MATCH_SOCKET=m
+CONFIG_NETFILTER_XT_MATCH_STATE=m
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
+CONFIG_NETFILTER_XT_MATCH_STRING=m
+CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
+CONFIG_NETFILTER_XT_MATCH_TIME=m
+CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_SET=m
+CONFIG_IP_VS=m
+CONFIG_IP_VS_IPV6=y
+CONFIG_IP_VS_PROTO_TCP=y
+CONFIG_IP_VS_PROTO_UDP=y
+CONFIG_IP_VS_RR=m
+CONFIG_IP_VS_NFCT=y
+CONFIG_NF_TABLES_IPV4=y
+CONFIG_NFT_DUP_IPV4=m
+CONFIG_NFT_FIB_IPV4=m
+CONFIG_NF_TABLES_ARP=y
+CONFIG_NF_LOG_ARP=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_AH=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_RPFILTER=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_SYNPROXY=m
+CONFIG_IP_NF_NAT=m
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_IP_NF_TARGET_NETMAP=m
+CONFIG_IP_NF_TARGET_REDIRECT=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_CLUSTERIP=m
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_TARGET_TTL=m
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_SECURITY=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARP_MANGLE=m
+CONFIG_NF_TABLES_IPV6=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_MATCH_AH=m
+CONFIG_IP6_NF_MATCH_EUI64=m
+CONFIG_IP6_NF_MATCH_FRAG=m
+CONFIG_IP6_NF_MATCH_OPTS=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RPFILTER=m
+CONFIG_IP6_NF_MATCH_RT=m
+CONFIG_IP6_NF_MATCH_SRH=m
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_TARGET_SYNPROXY=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_RAW=m
+CONFIG_IP6_NF_SECURITY=m
+CONFIG_IP6_NF_NAT=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
+CONFIG_IP6_NF_TARGET_NPT=m
+CONFIG_NF_TABLES_BRIDGE=m
+CONFIG_BRIDGE_NF_EBTABLES=m
+CONFIG_BRIDGE_EBT_BROUTE=m
+CONFIG_BRIDGE_EBT_T_FILTER=m
+CONFIG_BRIDGE_EBT_T_NAT=m
+CONFIG_BRIDGE_EBT_ARP=m
+CONFIG_BRIDGE_EBT_IP=m
+CONFIG_BRIDGE_EBT_IP6=m
+CONFIG_BPFILTER=y
+CONFIG_IP_SCTP=m
+CONFIG_RDS=y
+CONFIG_L2TP=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_VLAN_8021Q_GVRP=y
+CONFIG_VLAN_8021Q_MVRP=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_NET_CLS_CGROUP=m
+CONFIG_NET_CLS_BPF=m
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=m
+CONFIG_NET_ACT_GACT=m
+CONFIG_NET_ACT_MIRRED=m
+CONFIG_NET_ACT_IPT=m
+CONFIG_NET_ACT_NAT=m
+CONFIG_NET_ACT_BPF=m
+CONFIG_OPENVSWITCH=m
+CONFIG_NETLINK_DIAG=y
+CONFIG_CGROUP_NET_PRIO=y
+CONFIG_BT=m
+CONFIG_BT_HCIBTUSB=m
+# CONFIG_BT_HCIBTUSB_BCM is not set
+CONFIG_CFG80211=m
+CONFIG_CFG80211_WEXT=y
+CONFIG_MAC80211=m
+CONFIG_RFKILL=m
+CONFIG_RFKILL_INPUT=y
+CONFIG_NET_9P=y
+CONFIG_CEPH_LIB=m
+CONFIG_PCIEPORTBUS=y
+CONFIG_HOTPLUG_PCI_PCIE=y
+CONFIG_PCIEAER=y
+# CONFIG_PCIEASPM is not set
+CONFIG_PCI_IOV=y
+CONFIG_HOTPLUG_PCI=y
+CONFIG_HOTPLUG_PCI_SHPC=y
+CONFIG_PCCARD=m
+CONFIG_YENTA=m
+CONFIG_RAPIDIO=y
+CONFIG_RAPIDIO_TSI721=y
+CONFIG_RAPIDIO_ENABLE_RX_TX_PORTS=y
+CONFIG_RAPIDIO_ENUM_BASIC=m
+CONFIG_RAPIDIO_CHMAN=m
+CONFIG_RAPIDIO_MPORT_CDEV=m
+CONFIG_UEVENT_HELPER=y
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_MTD=m
+CONFIG_MTD_BLOCK=m
+CONFIG_MTD_CFI=m
+CONFIG_MTD_JEDECPROBE=m
+CONFIG_MTD_CFI_INTELEXT=m
+CONFIG_MTD_CFI_AMDSTD=m
+CONFIG_MTD_CFI_STAA=m
+CONFIG_MTD_RAM=m
+CONFIG_MTD_ROM=m
+CONFIG_PARPORT=y
+CONFIG_PARPORT_PC=y
+CONFIG_PARPORT_SERIAL=y
+CONFIG_PARPORT_PC_FIFO=y
+CONFIG_ZRAM=m
+CONFIG_ZRAM_DEF_COMP_ZSTD=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=y
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_BLK_DEV_RBD=m
+CONFIG_BLK_DEV_NVME=y
+CONFIG_EEPROM_AT24=m
+CONFIG_BLK_DEV_SD=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=m
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SPI_ATTRS=m
+CONFIG_SCSI_FC_ATTRS=m
+CONFIG_SCSI_SAS_ATA=y
+CONFIG_ISCSI_TCP=m
+CONFIG_SCSI_MVSAS=y
+# CONFIG_SCSI_MVSAS_DEBUG is not set
+CONFIG_SCSI_MVSAS_TASKLET=y
+CONFIG_SCSI_MVUMI=y
+CONFIG_MEGARAID_NEWGEN=y
+CONFIG_MEGARAID_MM=y
+CONFIG_MEGARAID_MAILBOX=y
+CONFIG_MEGARAID_LEGACY=y
+CONFIG_MEGARAID_SAS=y
+CONFIG_SCSI_MPT2SAS=y
+CONFIG_LIBFC=m
+CONFIG_LIBFCOE=m
+CONFIG_FCOE=m
+CONFIG_SCSI_QLOGIC_1280=m
+CONFIG_SCSI_QLA_FC=m
+CONFIG_TCM_QLA2XXX=m
+CONFIG_SCSI_QLA_ISCSI=m
+CONFIG_SCSI_LPFC=m
+CONFIG_ATA=y
+CONFIG_SATA_AHCI=y
+CONFIG_SATA_AHCI_PLATFORM=y
+CONFIG_PATA_ATIIXP=y
+CONFIG_PATA_PCMCIA=m
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=m
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_MD_RAID1=m
+CONFIG_MD_RAID10=m
+CONFIG_MD_RAID456=m
+CONFIG_MD_MULTIPATH=m
+CONFIG_BCACHE=m
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=m
+CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_CACHE=m
+CONFIG_DM_WRITECACHE=m
+CONFIG_DM_MIRROR=m
+CONFIG_DM_RAID=m
+CONFIG_DM_ZERO=m
+CONFIG_DM_MULTIPATH=m
+CONFIG_DM_MULTIPATH_QL=m
+CONFIG_DM_MULTIPATH_ST=m
+CONFIG_TARGET_CORE=m
+CONFIG_TCM_IBLOCK=m
+CONFIG_TCM_FILEIO=m
+CONFIG_TCM_PSCSI=m
+CONFIG_TCM_USER2=m
+CONFIG_LOOPBACK_TARGET=m
+CONFIG_ISCSI_TARGET=m
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=m
+CONFIG_DUMMY=y
+CONFIG_WIREGUARD=m
+CONFIG_MACVLAN=m
+CONFIG_MACVTAP=m
+CONFIG_IPVLAN=m
+CONFIG_VXLAN=y
+CONFIG_RIONET=m
+CONFIG_TUN=m
+CONFIG_VETH=m
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_ADAPTEC is not set
+# CONFIG_NET_VENDOR_AGERE is not set
+# CONFIG_NET_VENDOR_ALACRITECH is not set
+# CONFIG_NET_VENDOR_ALTEON is not set
+# CONFIG_NET_VENDOR_AMAZON is not set
+# CONFIG_NET_VENDOR_AMD is not set
+# CONFIG_NET_VENDOR_AQUANTIA is not set
+# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_VENDOR_ATHEROS is not set
+CONFIG_BNX2=y
+# CONFIG_NET_VENDOR_BROCADE is not set
+# CONFIG_NET_VENDOR_CAVIUM is not set
+CONFIG_CHELSIO_T1=m
+CONFIG_CHELSIO_T1_1G=y
+CONFIG_CHELSIO_T3=m
+CONFIG_CHELSIO_T4=m
+# CONFIG_NET_VENDOR_CIRRUS is not set
+# CONFIG_NET_VENDOR_CISCO is not set
+# CONFIG_NET_VENDOR_DEC is not set
+# CONFIG_NET_VENDOR_DLINK is not set
+# CONFIG_NET_VENDOR_EMULEX is not set
+# CONFIG_NET_VENDOR_EZCHIP is not set
+# CONFIG_NET_VENDOR_I825XX is not set
+CONFIG_E1000=y
+CONFIG_E1000E=y
+CONFIG_IGB=y
+CONFIG_IXGB=y
+CONFIG_IXGBE=y
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MELLANOX is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MYRI is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NETRONOME is not set
+# CONFIG_NET_VENDOR_NVIDIA is not set
+# CONFIG_NET_VENDOR_OKI is not set
+# CONFIG_NET_VENDOR_QLOGIC is not set
+# CONFIG_NET_VENDOR_QUALCOMM is not set
+# CONFIG_NET_VENDOR_RDC is not set
+CONFIG_8139CP=m
+CONFIG_8139TOO=m
+CONFIG_R8169=y
+# CONFIG_NET_VENDOR_RENESAS is not set
+# CONFIG_NET_VENDOR_ROCKER is not set
+# CONFIG_NET_VENDOR_SAMSUNG is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SOLARFLARE is not set
+# CONFIG_NET_VENDOR_SILAN is not set
+# CONFIG_NET_VENDOR_SIS is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+CONFIG_STMMAC_ETH=y
+# CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_TEHUTI is not set
+# CONFIG_NET_VENDOR_TI is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_NET_VENDOR_XILINX is not set
+CONFIG_PPP=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_MPPE=m
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPPOE=m
+CONFIG_PPPOL2TP=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+CONFIG_USB_RTL8150=m
+CONFIG_USB_RTL8152=m
+# CONFIG_USB_NET_AX8817X is not set
+# CONFIG_USB_NET_AX88179_178A is not set
+CONFIG_USB_NET_CDC_EEM=m
+CONFIG_USB_NET_HUAWEI_CDC_NCM=m
+CONFIG_USB_NET_CDC_MBIM=m
+# CONFIG_USB_NET_NET1080 is not set
+# CONFIG_USB_BELKIN is not set
+# CONFIG_USB_ARMLINUX is not set
+# CONFIG_USB_NET_ZAURUS is not set
+CONFIG_ATH9K=m
+CONFIG_ATH9K_HTC=m
+CONFIG_IWLWIFI=m
+CONFIG_IWLDVM=m
+CONFIG_IWLMVM=m
+CONFIG_IWLWIFI_BCAST_FILTERING=y
+CONFIG_HOSTAP=m
+CONFIG_MT7601U=m
+CONFIG_RT2X00=m
+CONFIG_RT2800USB=m
+CONFIG_RTL8192CE=m
+CONFIG_RTL8192SE=m
+CONFIG_RTL8192DE=m
+CONFIG_RTL8723AE=m
+CONFIG_RTL8723BE=m
+CONFIG_RTL8188EE=m
+CONFIG_RTL8192EE=m
+CONFIG_RTL8821AE=m
+CONFIG_RTL8192CU=m
+# CONFIG_RTLWIFI_DEBUG is not set
+CONFIG_RTL8XXXU=m
+CONFIG_ZD1211RW=m
+CONFIG_USB_NET_RNDIS_WLAN=m
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_INPUT_MOUSEDEV_PSAUX=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_XTKBD=m
+CONFIG_MOUSE_PS2_ELANTECH=y
+CONFIG_MOUSE_PS2_SENTELIC=y
+CONFIG_MOUSE_SERIAL=m
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_UINPUT=m
+CONFIG_SERIO_SERPORT=m
+CONFIG_SERIO_RAW=m
+CONFIG_LEGACY_PTY_COUNT=16
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=16
+CONFIG_SERIAL_8250_RUNTIME_UARTS=16
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_MANY_PORTS=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_SERIAL_8250_RSA=y
+CONFIG_SERIAL_NONSTANDARD=y
+CONFIG_PRINTER=m
+CONFIG_IPMI_HANDLER=m
+CONFIG_IPMI_DEVICE_INTERFACE=m
+CONFIG_IPMI_SI=m
+CONFIG_HW_RANDOM=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_PIIX4=y
+CONFIG_I2C_GPIO=y
+CONFIG_SPI=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_LOONGSON=y
+CONFIG_SENSORS_LM75=m
+CONFIG_SENSORS_LM93=m
+CONFIG_SENSORS_W83795=m
+CONFIG_SENSORS_W83627HF=m
+CONFIG_RC_CORE=m
+CONFIG_LIRC=y
+CONFIG_RC_DECODERS=y
+CONFIG_IR_NEC_DECODER=m
+CONFIG_IR_RC5_DECODER=m
+CONFIG_IR_RC6_DECODER=m
+CONFIG_IR_JVC_DECODER=m
+CONFIG_IR_SONY_DECODER=m
+CONFIG_IR_SANYO_DECODER=m
+CONFIG_IR_SHARP_DECODER=m
+CONFIG_IR_MCE_KBD_DECODER=m
+CONFIG_IR_XMP_DECODER=m
+CONFIG_IR_IMON_DECODER=m
+CONFIG_MEDIA_SUPPORT=m
+CONFIG_MEDIA_USB_SUPPORT=y
+CONFIG_USB_VIDEO_CLASS=m
+CONFIG_MEDIA_PCI_SUPPORT=y
+CONFIG_VIDEO_BT848=m
+CONFIG_DVB_BT8XX=m
+CONFIG_DRM=y
+CONFIG_DRM_RADEON=m
+CONFIG_DRM_RADEON_USERPTR=y
+CONFIG_DRM_AMDGPU=m
+CONFIG_DRM_AMDGPU_SI=y
+CONFIG_DRM_AMDGPU_CIK=y
+CONFIG_DRM_AMDGPU_USERPTR=y
+CONFIG_DRM_AST=y
+CONFIG_FB=y
+CONFIG_FB_EFI=y
+CONFIG_FB_RADEON=y
+CONFIG_LCD_PLATFORM=m
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
+CONFIG_LOGO=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_SEQUENCER=m
+CONFIG_SND_SEQ_DUMMY=m
+# CONFIG_SND_ISA is not set
+CONFIG_SND_BT87X=m
+CONFIG_SND_BT87X_OVERCLOCK=y
+CONFIG_SND_HDA_INTEL=y
+CONFIG_SND_HDA_HWDEP=y
+CONFIG_SND_HDA_INPUT_BEEP=y
+CONFIG_SND_HDA_PATCH_LOADER=y
+CONFIG_SND_HDA_CODEC_REALTEK=y
+CONFIG_SND_HDA_CODEC_SIGMATEL=y
+CONFIG_SND_HDA_CODEC_HDMI=y
+CONFIG_SND_HDA_CODEC_CONEXANT=y
+CONFIG_SND_USB_AUDIO=m
+CONFIG_HIDRAW=y
+CONFIG_UHID=m
+CONFIG_HID_A4TECH=m
+CONFIG_HID_CHERRY=m
+CONFIG_HID_LOGITECH=m
+CONFIG_HID_LOGITECH_DJ=m
+CONFIG_LOGITECH_FF=y
+CONFIG_LOGIRUMBLEPAD2_FF=y
+CONFIG_LOGIG940_FF=y
+CONFIG_HID_MICROSOFT=m
+CONFIG_HID_MULTITOUCH=m
+CONFIG_HID_SUNPLUS=m
+CONFIG_USB_HIDDEV=y
+CONFIG_USB=y
+CONFIG_USB_OTG=y
+CONFIG_USB_MON=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_ROOT_HUB_TT=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_UHCI_HCD=m
+CONFIG_USB_ACM=m
+CONFIG_USB_PRINTER=m
+CONFIG_USB_STORAGE=m
+CONFIG_USB_STORAGE_REALTEK=m
+CONFIG_USB_UAS=m
+CONFIG_USB_DWC2=y
+CONFIG_USB_DWC2_HOST=y
+CONFIG_USB_SERIAL=m
+CONFIG_USB_SERIAL_CH341=m
+CONFIG_USB_SERIAL_CP210X=m
+CONFIG_USB_SERIAL_FTDI_SIO=m
+CONFIG_USB_SERIAL_PL2303=m
+CONFIG_USB_SERIAL_OPTION=m
+CONFIG_USB_GADGET=y
+CONFIG_INFINIBAND=m
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_EFI=y
+CONFIG_DMADEVICES=y
+CONFIG_UIO=m
+CONFIG_UIO_PDRV_GENIRQ=m
+CONFIG_UIO_DMEM_GENIRQ=m
+CONFIG_UIO_PCI_GENERIC=m
+# CONFIG_VIRTIO_MENU is not set
+CONFIG_COMEDI=m
+CONFIG_COMEDI_PCI_DRIVERS=m
+CONFIG_COMEDI_8255_PCI=m
+CONFIG_COMEDI_ADL_PCI6208=m
+CONFIG_COMEDI_ADL_PCI7X3X=m
+CONFIG_COMEDI_ADL_PCI8164=m
+CONFIG_COMEDI_ADL_PCI9111=m
+CONFIG_COMEDI_ADL_PCI9118=m
+CONFIG_COMEDI_ADV_PCI1710=m
+CONFIG_COMEDI_ADV_PCI1720=m
+CONFIG_COMEDI_ADV_PCI1723=m
+CONFIG_COMEDI_ADV_PCI1724=m
+CONFIG_COMEDI_ADV_PCI1760=m
+CONFIG_COMEDI_ADV_PCI_DIO=m
+CONFIG_COMEDI_NI_LABPC_PCI=m
+CONFIG_COMEDI_NI_PCIDIO=m
+CONFIG_COMEDI_NI_PCIMIO=m
+CONFIG_STAGING=y
+CONFIG_R8188EU=m
+# CONFIG_88EU_AP_MODE is not set
+CONFIG_PM_DEVFREQ=y
+CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
+CONFIG_DEVFREQ_GOV_PERFORMANCE=y
+CONFIG_DEVFREQ_GOV_POWERSAVE=y
+CONFIG_DEVFREQ_GOV_USERSPACE=y
+CONFIG_PWM=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_XFS_FS=y
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_BTRFS_FS=y
+CONFIG_FANOTIFY=y
+CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
+CONFIG_QUOTA=y
+# CONFIG_PRINT_QUOTA_WARNING is not set
+CONFIG_QFMT_V1=m
+CONFIG_QFMT_V2=m
+CONFIG_AUTOFS4_FS=y
+CONFIG_FUSE_FS=m
+CONFIG_OVERLAY_FS=y
+CONFIG_OVERLAY_FS_INDEX=y
+CONFIG_OVERLAY_FS_XINO_AUTO=y
+CONFIG_OVERLAY_FS_METACOPY=y
+CONFIG_FSCACHE=y
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=y
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_FAT_DEFAULT_CODEPAGE=936
+CONFIG_FAT_DEFAULT_IOCHARSET="gb2312"
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_HUGETLBFS=y
+CONFIG_CONFIGFS_FS=y
+CONFIG_HFS_FS=m
+CONFIG_HFSPLUS_FS=m
+CONFIG_CRAMFS=m
+CONFIG_SQUASHFS=y
+CONFIG_SQUASHFS_XATTR=y
+CONFIG_SQUASHFS_LZ4=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_SQUASHFS_XZ=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_V4_1=y
+CONFIG_NFS_V4_2=y
+CONFIG_ROOT_NFS=y
+CONFIG_NFSD=y
+CONFIG_NFSD_V3_ACL=y
+CONFIG_NFSD_V4=y
+CONFIG_NFSD_BLOCKLAYOUT=y
+CONFIG_CIFS=m
+# CONFIG_CIFS_DEBUG is not set
+CONFIG_9P_FS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_936=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_UTF8=y
+CONFIG_KEY_DH_OPERATIONS=y
+CONFIG_SECURITY=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SELINUX_BOOTPARAM=y
+CONFIG_SECURITY_SELINUX_DISABLE=y
+CONFIG_SECURITY_APPARMOR=y
+CONFIG_SECURITY_YAMA=y
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_CRYPTO_USER=m
+# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
+CONFIG_CRYPTO_PCRYPT=m
+CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_VMAC=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SALSA20=m
+CONFIG_CRYPTO_SEED=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_DEFLATE=m
+CONFIG_CRYPTO_LZO=m
+CONFIG_CRYPTO_842=m
+CONFIG_CRYPTO_LZ4=m
+CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_USER_API_HASH=m
+CONFIG_CRYPTO_USER_API_SKCIPHER=m
+CONFIG_CRYPTO_USER_API_RNG=m
+CONFIG_CRYPTO_USER_API_AEAD=m
+CONFIG_PRINTK_TIME=y
+CONFIG_STRIP_ASM_SYMS=y
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_SCHED_DEBUG is not set
+CONFIG_SCHEDSTATS=y
+# CONFIG_DEBUG_PREEMPT is not set
+# CONFIG_FTRACE is not set
diff --git a/arch/loongarch/include/asm/Kbuild b/arch/loongarch/include/asm/Kbuild
new file mode 100644
index 000000000000..83bc0681e72b
--- /dev/null
+++ b/arch/loongarch/include/asm/Kbuild
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: GPL-2.0
+generic-y += dma-contiguous.h
+generic-y += export.h
+generic-y += parport.h
+generic-y += early_ioremap.h
+generic-y += qrwlock.h
+generic-y += qrwlock_types.h
+generic-y += spinlock.h
+generic-y += spinlock_types.h
+generic-y += rwsem.h
+generic-y += segment.h
+generic-y += user.h
+generic-y += stat.h
+generic-y += fcntl.h
+generic-y += ioctl.h
+generic-y += ioctls.h
+generic-y += mman.h
+generic-y += msgbuf.h
+generic-y += sembuf.h
+generic-y += shmbuf.h
+generic-y += statfs.h
+generic-y += socket.h
+generic-y += sockios.h
+generic-y += termios.h
+generic-y += termbits.h
+generic-y += poll.h
+generic-y += param.h
+generic-y += posix_types.h
+generic-y += resource.h
+generic-y += kvm_para.h
diff --git a/arch/loongarch/include/asm/acenv.h b/arch/loongarch/include/asm/acenv.h
new file mode 100644
index 000000000000..52f298f7293b
--- /dev/null
+++ b/arch/loongarch/include/asm/acenv.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * LoongArch specific ACPICA environments and implementation
+ *
+ * Author: Jianmin Lv <lvjianmin@loongson.cn>
+ * Huacai Chen <chenhuacai@loongson.cn>
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#ifndef _ASM_LOONGARCH_ACENV_H
+#define _ASM_LOONGARCH_ACENV_H
+
+/*
+ * This header is required by ACPI core, but we have nothing to fill in
+ * right now. Will be updated later when needed.
+ */
+
+#endif /* _ASM_LOONGARCH_ACENV_H */
diff --git a/arch/loongarch/include/asm/acpi.h b/arch/loongarch/include/asm/acpi.h
new file mode 100644
index 000000000000..62044cd5b7bc
--- /dev/null
+++ b/arch/loongarch/include/asm/acpi.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Author: Jianmin Lv <lvjianmin@loongson.cn>
+ * Huacai Chen <chenhuacai@loongson.cn>
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#ifndef _ASM_LOONGARCH_ACPI_H
+#define _ASM_LOONGARCH_ACPI_H
+
+#ifdef CONFIG_ACPI
+extern int acpi_strict;
+extern int acpi_disabled;
+extern int acpi_pci_disabled;
+extern int acpi_noirq;
+
+#define acpi_os_ioremap acpi_os_ioremap
+void __init __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size);
+
+static inline void disable_acpi(void)
+{
+ acpi_disabled = 1;
+ acpi_pci_disabled = 1;
+ acpi_noirq = 1;
+}
+
+static inline bool acpi_has_cpu_in_madt(void)
+{
+ return true;
+}
+
+extern struct list_head acpi_wakeup_device_list;
+
+#endif /* !CONFIG_ACPI */
+
+#define ACPI_TABLE_UPGRADE_MAX_PHYS ARCH_LOW_ADDRESS_LIMIT
+
+#endif /* _ASM_LOONGARCH_ACPI_H */
diff --git a/arch/loongarch/include/asm/addrspace.h b/arch/loongarch/include/asm/addrspace.h
new file mode 100644
index 000000000000..b91e0733b2e5
--- /dev/null
+++ b/arch/loongarch/include/asm/addrspace.h
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ *
+ * Derived from MIPS:
+ * Copyright (C) 1996, 99 Ralf Baechle
+ * Copyright (C) 2000, 2002 Maciej W. Rozycki
+ * Copyright (C) 1990, 1999 by Silicon Graphics, Inc.
+ */
+#ifndef _ASM_ADDRSPACE_H
+#define _ASM_ADDRSPACE_H
+
+#include <linux/const.h>
+
+#include <asm/loongarch.h>
+
+/*
+ * This gives the physical RAM offset.
+ */
+#ifndef __ASSEMBLY__
+#ifndef PHYS_OFFSET
+#define PHYS_OFFSET _AC(0, UL)
+#endif
+extern unsigned long vm_map_base;
+#endif /* __ASSEMBLY__ */
+
+#ifndef IO_BASE
+#define IO_BASE CSR_DMW0_BASE
+#endif
+
+#ifndef CACHE_BASE
+#define CACHE_BASE CSR_DMW1_BASE
+#endif
+
+#ifndef UNCACHE_BASE
+#define UNCACHE_BASE CSR_DMW0_BASE
+#endif
+
+#define DMW_PABITS 48
+#define TO_PHYS_MASK ((1ULL << DMW_PABITS) - 1)
+
+/*
+ * Memory above this physical address will be considered highmem.
+ */
+#ifndef HIGHMEM_START
+#define HIGHMEM_START (_AC(1, UL) << _AC(DMW_PABITS, UL))
+#endif
+
+#define TO_PHYS(x) ( ((x) & TO_PHYS_MASK))
+#define TO_CACHE(x) (CACHE_BASE | ((x) & TO_PHYS_MASK))
+#define TO_UNCACHE(x) (UNCACHE_BASE | ((x) & TO_PHYS_MASK))
+
+/*
+ * This handles the memory map.
+ */
+#ifndef PAGE_OFFSET
+#define PAGE_OFFSET (CACHE_BASE + PHYS_OFFSET)
+#endif
+
+#ifndef FIXADDR_TOP
+#define FIXADDR_TOP ((unsigned long)(long)(int)0xfffe0000)
+#endif
+
+#ifdef __ASSEMBLY__
+#define _ATYPE_
+#define _ATYPE32_
+#define _ATYPE64_
+#define _CONST64_(x) x
+#else
+#define _ATYPE_ __PTRDIFF_TYPE__
+#define _ATYPE32_ int
+#define _ATYPE64_ __s64
+#ifdef CONFIG_64BIT
+#define _CONST64_(x) x ## L
+#else
+#define _CONST64_(x) x ## LL
+#endif
+#endif
+
+/*
+ * 32/64-bit LoongArch address spaces
+ */
+#ifdef __ASSEMBLY__
+#define _ACAST32_
+#define _ACAST64_
+#else
+#define _ACAST32_ (_ATYPE_)(_ATYPE32_) /* widen if necessary */
+#define _ACAST64_ (_ATYPE64_) /* do _not_ narrow */
+#endif
+
+#ifdef CONFIG_32BIT
+
+#define UVRANGE 0x00000000
+#define KPRANGE0 0x80000000
+#define KPRANGE1 0xa0000000
+#define KVRANGE 0xc0000000
+
+#else
+
+#define XUVRANGE _CONST64_(0x0000000000000000)
+#define XSPRANGE _CONST64_(0x4000000000000000)
+#define XKPRANGE _CONST64_(0x8000000000000000)
+#define XKVRANGE _CONST64_(0xc000000000000000)
+
+#endif
+
+/*
+ * Returns the physical address of a KPRANGEx / XKPRANGE address
+ */
+#define PHYSADDR(a) ((_ACAST64_(a)) & TO_PHYS_MASK)
+
+#endif /* _ASM_ADDRSPACE_H */
diff --git a/arch/loongarch/include/asm/asm-offsets.h b/arch/loongarch/include/asm/asm-offsets.h
new file mode 100644
index 000000000000..d9ad88d293e7
--- /dev/null
+++ b/arch/loongarch/include/asm/asm-offsets.h
@@ -0,0 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <generated/asm-offsets.h>
diff --git a/arch/loongarch/include/asm/asm-prototypes.h b/arch/loongarch/include/asm/asm-prototypes.h
new file mode 100644
index 000000000000..ed06d3997420
--- /dev/null
+++ b/arch/loongarch/include/asm/asm-prototypes.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/uaccess.h>
+#include <asm/fpu.h>
+#include <asm/mmu_context.h>
+#include <asm/page.h>
+#include <asm/ftrace.h>
+#include <asm-generic/asm-prototypes.h>
diff --git a/arch/loongarch/include/asm/asm.h b/arch/loongarch/include/asm/asm.h
new file mode 100644
index 000000000000..40eea6aa469e
--- /dev/null
+++ b/arch/loongarch/include/asm/asm.h
@@ -0,0 +1,191 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Some useful macros for LoongArch assembler code
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ *
+ * Derived from MIPS:
+ * Copyright (C) 1995, 1996, 1997, 1999, 2001 by Ralf Baechle
+ * Copyright (C) 1999 by Silicon Graphics, Inc.
+ * Copyright (C) 2001 MIPS Technologies, Inc.
+ * Copyright (C) 2002 Maciej W. Rozycki
+ */
+#ifndef __ASM_ASM_H
+#define __ASM_ASM_H
+
+/* LoongArch pref instruction. */
+#ifdef CONFIG_CPU_HAS_PREFETCH
+
+#define PREF(hint, addr, offs) \
+ preld hint, addr, offs; \
+
+#define PREFX(hint, addr, index) \
+ preldx hint, addr, index; \
+
+#else /* !CONFIG_CPU_HAS_PREFETCH */
+
+#define PREF(hint, addr, offs)
+#define PREFX(hint, addr, index)
+
+#endif /* !CONFIG_CPU_HAS_PREFETCH */
+
+/*
+ * Stack alignment
+ */
+#define STACK_ALIGN ~(0xf)
+
+/*
+ * Macros to handle different pointer/register sizes for 32/64-bit code
+ */
+
+/*
+ * Size of a register
+ */
+#ifndef __loongarch64
+#define SZREG 4
+#else
+#define SZREG 8
+#endif
+
+/*
+ * Use the following macros in assemblercode to load/store registers,
+ * pointers etc.
+ */
+#if (SZREG == 4)
+#define REG_L ld.w
+#define REG_S st.w
+#define REG_ADD add.w
+#define REG_SUB sub.w
+#else /* SZREG == 8 */
+#define REG_L ld.d
+#define REG_S st.d
+#define REG_ADD add.d
+#define REG_SUB sub.d
+#endif
+
+/*
+ * How to add/sub/load/store/shift C int variables.
+ */
+#if (__SIZEOF_INT__ == 4)
+#define INT_ADD add.w
+#define INT_ADDI addi.w
+#define INT_SUB sub.w
+#define INT_L ld.w
+#define INT_S st.w
+#define INT_SLL slli.w
+#define INT_SLLV sll.w
+#define INT_SRL srli.w
+#define INT_SRLV srl.w
+#define INT_SRA srai.w
+#define INT_SRAV sra.w
+#endif
+
+#if (__SIZEOF_INT__ == 8)
+#define INT_ADD add.d
+#define INT_ADDI addi.d
+#define INT_SUB sub.d
+#define INT_L ld.d
+#define INT_S st.d
+#define INT_SLL slli.d
+#define INT_SLLV sll.d
+#define INT_SRL srli.d
+#define INT_SRLV srl.d
+#define INT_SRA srai.d
+#define INT_SRAV sra.d
+#endif
+
+/*
+ * How to add/sub/load/store/shift C long variables.
+ */
+#if (__SIZEOF_LONG__ == 4)
+#define LONG_ADD add.w
+#define LONG_ADDI addi.w
+#define LONG_SUB sub.w
+#define LONG_L ld.w
+#define LONG_S st.w
+#define LONG_SLL slli.w
+#define LONG_SLLV sll.w
+#define LONG_SRL srli.w
+#define LONG_SRLV srl.w
+#define LONG_SRA srai.w
+#define LONG_SRAV sra.w
+
+#ifdef __ASSEMBLY__
+#define LONG .word
+#endif
+#define LONGSIZE 4
+#define LONGMASK 3
+#define LONGLOG 2
+#endif
+
+#if (__SIZEOF_LONG__ == 8)
+#define LONG_ADD add.d
+#define LONG_ADDI addi.d
+#define LONG_SUB sub.d
+#define LONG_L ld.d
+#define LONG_S st.d
+#define LONG_SLL slli.d
+#define LONG_SLLV sll.d
+#define LONG_SRL srli.d
+#define LONG_SRLV srl.d
+#define LONG_SRA srai.d
+#define LONG_SRAV sra.d
+
+#ifdef __ASSEMBLY__
+#define LONG .dword
+#endif
+#define LONGSIZE 8
+#define LONGMASK 7
+#define LONGLOG 3
+#endif
+
+/*
+ * How to add/sub/load/store/shift pointers.
+ */
+#if (__SIZEOF_POINTER__ == 4)
+#define PTR_ADD add.w
+#define PTR_ADDI addi.w
+#define PTR_SUB sub.w
+#define PTR_L ld.w
+#define PTR_S st.w
+#define PTR_LI li.w
+#define PTR_SLL slli.w
+#define PTR_SLLV sll.w
+#define PTR_SRL srli.w
+#define PTR_SRLV srl.w
+#define PTR_SRA srai.w
+#define PTR_SRAV sra.w
+
+#define PTR_SCALESHIFT 2
+
+#ifdef __ASSEMBLY__
+#define PTR .word
+#endif
+#define PTRSIZE 4
+#define PTRLOG 2
+#endif
+
+#if (__SIZEOF_POINTER__ == 8)
+#define PTR_ADD add.d
+#define PTR_ADDI addi.d
+#define PTR_SUB sub.d
+#define PTR_L ld.d
+#define PTR_S st.d
+#define PTR_LI li.d
+#define PTR_SLL slli.d
+#define PTR_SLLV sll.d
+#define PTR_SRL srli.d
+#define PTR_SRLV srl.d
+#define PTR_SRA srai.d
+#define PTR_SRAV sra.d
+
+#define PTR_SCALESHIFT 3
+
+#ifdef __ASSEMBLY__
+#define PTR .dword
+#endif
+#define PTRSIZE 8
+#define PTRLOG 3
+#endif
+
+#endif /* __ASM_ASM_H */
diff --git a/arch/loongarch/include/asm/asmmacro.h b/arch/loongarch/include/asm/asmmacro.h
new file mode 100644
index 000000000000..a1a04083bd67
--- /dev/null
+++ b/arch/loongarch/include/asm/asmmacro.h
@@ -0,0 +1,289 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_ASMMACRO_H
+#define _ASM_ASMMACRO_H
+
+#include <asm/asm-offsets.h>
+#include <asm/regdef.h>
+#include <asm/fpregdef.h>
+#include <asm/loongarch.h>
+
+ .macro parse_v var val
+ \var = \val
+ .endm
+
+ .macro parse_r var r
+ \var = -1
+ .ifc \r, $r0
+ \var = 0
+ .endif
+ .ifc \r, $r1
+ \var = 1
+ .endif
+ .ifc \r, $r2
+ \var = 2
+ .endif
+ .ifc \r, $r3
+ \var = 3
+ .endif
+ .ifc \r, $r4
+ \var = 4
+ .endif
+ .ifc \r, $r5
+ \var = 5
+ .endif
+ .ifc \r, $r6
+ \var = 6
+ .endif
+ .ifc \r, $r7
+ \var = 7
+ .endif
+ .ifc \r, $r8
+ \var = 8
+ .endif
+ .ifc \r, $r9
+ \var = 9
+ .endif
+ .ifc \r, $r10
+ \var = 10
+ .endif
+ .ifc \r, $r11
+ \var = 11
+ .endif
+ .ifc \r, $r12
+ \var = 12
+ .endif
+ .ifc \r, $r13
+ \var = 13
+ .endif
+ .ifc \r, $r14
+ \var = 14
+ .endif
+ .ifc \r, $r15
+ \var = 15
+ .endif
+ .ifc \r, $r16
+ \var = 16
+ .endif
+ .ifc \r, $r17
+ \var = 17
+ .endif
+ .ifc \r, $r18
+ \var = 18
+ .endif
+ .ifc \r, $r19
+ \var = 19
+ .endif
+ .ifc \r, $r20
+ \var = 20
+ .endif
+ .ifc \r, $r21
+ \var = 21
+ .endif
+ .ifc \r, $r22
+ \var = 22
+ .endif
+ .ifc \r, $r23
+ \var = 23
+ .endif
+ .ifc \r, $r24
+ \var = 24
+ .endif
+ .ifc \r, $r25
+ \var = 25
+ .endif
+ .ifc \r, $r26
+ \var = 26
+ .endif
+ .ifc \r, $r27
+ \var = 27
+ .endif
+ .ifc \r, $r28
+ \var = 28
+ .endif
+ .ifc \r, $r29
+ \var = 29
+ .endif
+ .ifc \r, $r30
+ \var = 30
+ .endif
+ .ifc \r, $r31
+ \var = 31
+ .endif
+ .iflt \var
+ .error "Unable to parse register name \r"
+ .endif
+ .endm
+
+ .macro cpu_save_nonscratch thread
+ stptr.d s0, \thread, THREAD_REG23
+ stptr.d s1, \thread, THREAD_REG24
+ stptr.d s2, \thread, THREAD_REG25
+ stptr.d s3, \thread, THREAD_REG26
+ stptr.d s4, \thread, THREAD_REG27
+ stptr.d s5, \thread, THREAD_REG28
+ stptr.d s6, \thread, THREAD_REG29
+ stptr.d s7, \thread, THREAD_REG30
+ stptr.d s8, \thread, THREAD_REG31
+ stptr.d sp, \thread, THREAD_REG03
+ stptr.d fp, \thread, THREAD_REG22
+ .endm
+
+ .macro cpu_restore_nonscratch thread
+ ldptr.d s0, \thread, THREAD_REG23
+ ldptr.d s1, \thread, THREAD_REG24
+ ldptr.d s2, \thread, THREAD_REG25
+ ldptr.d s3, \thread, THREAD_REG26
+ ldptr.d s4, \thread, THREAD_REG27
+ ldptr.d s5, \thread, THREAD_REG28
+ ldptr.d s6, \thread, THREAD_REG29
+ ldptr.d s7, \thread, THREAD_REG30
+ ldptr.d s8, \thread, THREAD_REG31
+ ldptr.d ra, \thread, THREAD_REG01
+ ldptr.d sp, \thread, THREAD_REG03
+ ldptr.d fp, \thread, THREAD_REG22
+ .endm
+
+ .macro fpu_save_csr thread tmp
+ movfcsr2gr \tmp, fcsr0
+ stptr.w \tmp, \thread, THREAD_FCSR
+ .endm
+
+ .macro fpu_restore_csr thread tmp
+ ldptr.w \tmp, \thread, THREAD_FCSR
+ movgr2fcsr fcsr0, \tmp
+ .endm
+
+ .macro fpu_save_cc thread tmp0 tmp1
+ movcf2gr \tmp0, $fcc0
+ move \tmp1, \tmp0
+ movcf2gr \tmp0, $fcc1
+ bstrins.d \tmp1, \tmp0, 15, 8
+ movcf2gr \tmp0, $fcc2
+ bstrins.d \tmp1, \tmp0, 23, 16
+ movcf2gr \tmp0, $fcc3
+ bstrins.d \tmp1, \tmp0, 31, 24
+ movcf2gr \tmp0, $fcc4
+ bstrins.d \tmp1, \tmp0, 39, 32
+ movcf2gr \tmp0, $fcc5
+ bstrins.d \tmp1, \tmp0, 47, 40
+ movcf2gr \tmp0, $fcc6
+ bstrins.d \tmp1, \tmp0, 55, 48
+ movcf2gr \tmp0, $fcc7
+ bstrins.d \tmp1, \tmp0, 63, 56
+ stptr.d \tmp1, \thread, THREAD_FCC
+ .endm
+
+ .macro fpu_restore_cc thread tmp0 tmp1
+ ldptr.d \tmp0, \thread, THREAD_FCC
+ bstrpick.d \tmp1, \tmp0, 7, 0
+ movgr2cf $fcc0, \tmp1
+ bstrpick.d \tmp1, \tmp0, 15, 8
+ movgr2cf $fcc1, \tmp1
+ bstrpick.d \tmp1, \tmp0, 23, 16
+ movgr2cf $fcc2, \tmp1
+ bstrpick.d \tmp1, \tmp0, 31, 24
+ movgr2cf $fcc3, \tmp1
+ bstrpick.d \tmp1, \tmp0, 39, 32
+ movgr2cf $fcc4, \tmp1
+ bstrpick.d \tmp1, \tmp0, 47, 40
+ movgr2cf $fcc5, \tmp1
+ bstrpick.d \tmp1, \tmp0, 55, 48
+ movgr2cf $fcc6, \tmp1
+ bstrpick.d \tmp1, \tmp0, 63, 56
+ movgr2cf $fcc7, \tmp1
+ .endm
+
+ .macro fpu_save_double thread tmp
+ li.w \tmp, THREAD_FPR0
+ PTR_ADD \tmp, \tmp, \thread
+ fst.d $f0, \tmp, THREAD_FPR0 - THREAD_FPR0
+ fst.d $f1, \tmp, THREAD_FPR1 - THREAD_FPR0
+ fst.d $f2, \tmp, THREAD_FPR2 - THREAD_FPR0
+ fst.d $f3, \tmp, THREAD_FPR3 - THREAD_FPR0
+ fst.d $f4, \tmp, THREAD_FPR4 - THREAD_FPR0
+ fst.d $f5, \tmp, THREAD_FPR5 - THREAD_FPR0
+ fst.d $f6, \tmp, THREAD_FPR6 - THREAD_FPR0
+ fst.d $f7, \tmp, THREAD_FPR7 - THREAD_FPR0
+ fst.d $f8, \tmp, THREAD_FPR8 - THREAD_FPR0
+ fst.d $f9, \tmp, THREAD_FPR9 - THREAD_FPR0
+ fst.d $f10, \tmp, THREAD_FPR10 - THREAD_FPR0
+ fst.d $f11, \tmp, THREAD_FPR11 - THREAD_FPR0
+ fst.d $f12, \tmp, THREAD_FPR12 - THREAD_FPR0
+ fst.d $f13, \tmp, THREAD_FPR13 - THREAD_FPR0
+ fst.d $f14, \tmp, THREAD_FPR14 - THREAD_FPR0
+ fst.d $f15, \tmp, THREAD_FPR15 - THREAD_FPR0
+ fst.d $f16, \tmp, THREAD_FPR16 - THREAD_FPR0
+ fst.d $f17, \tmp, THREAD_FPR17 - THREAD_FPR0
+ fst.d $f18, \tmp, THREAD_FPR18 - THREAD_FPR0
+ fst.d $f19, \tmp, THREAD_FPR19 - THREAD_FPR0
+ fst.d $f20, \tmp, THREAD_FPR20 - THREAD_FPR0
+ fst.d $f21, \tmp, THREAD_FPR21 - THREAD_FPR0
+ fst.d $f22, \tmp, THREAD_FPR22 - THREAD_FPR0
+ fst.d $f23, \tmp, THREAD_FPR23 - THREAD_FPR0
+ fst.d $f24, \tmp, THREAD_FPR24 - THREAD_FPR0
+ fst.d $f25, \tmp, THREAD_FPR25 - THREAD_FPR0
+ fst.d $f26, \tmp, THREAD_FPR26 - THREAD_FPR0
+ fst.d $f27, \tmp, THREAD_FPR27 - THREAD_FPR0
+ fst.d $f28, \tmp, THREAD_FPR28 - THREAD_FPR0
+ fst.d $f29, \tmp, THREAD_FPR29 - THREAD_FPR0
+ fst.d $f30, \tmp, THREAD_FPR30 - THREAD_FPR0
+ fst.d $f31, \tmp, THREAD_FPR31 - THREAD_FPR0
+ .endm
+
+ .macro fpu_restore_double thread tmp
+ li.w \tmp, THREAD_FPR0
+ PTR_ADD \tmp, \tmp, \thread
+ fld.d $f0, \tmp, THREAD_FPR0 - THREAD_FPR0
+ fld.d $f1, \tmp, THREAD_FPR1 - THREAD_FPR0
+ fld.d $f2, \tmp, THREAD_FPR2 - THREAD_FPR0
+ fld.d $f3, \tmp, THREAD_FPR3 - THREAD_FPR0
+ fld.d $f4, \tmp, THREAD_FPR4 - THREAD_FPR0
+ fld.d $f5, \tmp, THREAD_FPR5 - THREAD_FPR0
+ fld.d $f6, \tmp, THREAD_FPR6 - THREAD_FPR0
+ fld.d $f7, \tmp, THREAD_FPR7 - THREAD_FPR0
+ fld.d $f8, \tmp, THREAD_FPR8 - THREAD_FPR0
+ fld.d $f9, \tmp, THREAD_FPR9 - THREAD_FPR0
+ fld.d $f10, \tmp, THREAD_FPR10 - THREAD_FPR0
+ fld.d $f11, \tmp, THREAD_FPR11 - THREAD_FPR0
+ fld.d $f12, \tmp, THREAD_FPR12 - THREAD_FPR0
+ fld.d $f13, \tmp, THREAD_FPR13 - THREAD_FPR0
+ fld.d $f14, \tmp, THREAD_FPR14 - THREAD_FPR0
+ fld.d $f15, \tmp, THREAD_FPR15 - THREAD_FPR0
+ fld.d $f16, \tmp, THREAD_FPR16 - THREAD_FPR0
+ fld.d $f17, \tmp, THREAD_FPR17 - THREAD_FPR0
+ fld.d $f18, \tmp, THREAD_FPR18 - THREAD_FPR0
+ fld.d $f19, \tmp, THREAD_FPR19 - THREAD_FPR0
+ fld.d $f20, \tmp, THREAD_FPR20 - THREAD_FPR0
+ fld.d $f21, \tmp, THREAD_FPR21 - THREAD_FPR0
+ fld.d $f22, \tmp, THREAD_FPR22 - THREAD_FPR0
+ fld.d $f23, \tmp, THREAD_FPR23 - THREAD_FPR0
+ fld.d $f24, \tmp, THREAD_FPR24 - THREAD_FPR0
+ fld.d $f25, \tmp, THREAD_FPR25 - THREAD_FPR0
+ fld.d $f26, \tmp, THREAD_FPR26 - THREAD_FPR0
+ fld.d $f27, \tmp, THREAD_FPR27 - THREAD_FPR0
+ fld.d $f28, \tmp, THREAD_FPR28 - THREAD_FPR0
+ fld.d $f29, \tmp, THREAD_FPR29 - THREAD_FPR0
+ fld.d $f30, \tmp, THREAD_FPR30 - THREAD_FPR0
+ fld.d $f31, \tmp, THREAD_FPR31 - THREAD_FPR0
+ .endm
+
+.macro not dst src
+ nor \dst, \src, zero
+.endm
+
+.macro bgt r0 r1 label
+ blt \r1, \r0, \label
+.endm
+
+.macro bltz r0 label
+ blt \r0, zero, \label
+.endm
+
+.macro bgez r0 label
+ bge \r0, zero, \label
+.endm
+
+#endif /* _ASM_ASMMACRO_H */
diff --git a/arch/loongarch/include/asm/atomic.h b/arch/loongarch/include/asm/atomic.h
new file mode 100644
index 000000000000..979367ad4e2c
--- /dev/null
+++ b/arch/loongarch/include/asm/atomic.h
@@ -0,0 +1,362 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Atomic operations.
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_ATOMIC_H
+#define _ASM_ATOMIC_H
+
+#include <linux/types.h>
+#include <asm/barrier.h>
+#include <asm/cmpxchg.h>
+#include <asm/compiler.h>
+
+#if __SIZEOF_LONG__ == 4
+#define __LL "ll.w "
+#define __SC "sc.w "
+#define __AMADD "amadd.w "
+#define __AMAND_DB "amand_db.w "
+#define __AMOR_DB "amor_db.w "
+#define __AMXOR_DB "amxor_db.w "
+#elif __SIZEOF_LONG__ == 8
+#define __LL "ll.d "
+#define __SC "sc.d "
+#define __AMADD "amadd.d "
+#define __AMAND_DB "amand_db.d "
+#define __AMOR_DB "amor_db.d "
+#define __AMXOR_DB "amxor_db.d "
+#endif
+
+#define ATOMIC_INIT(i) { (i) }
+
+/*
+ * arch_atomic_read - read atomic variable
+ * @v: pointer of type atomic_t
+ *
+ * Atomically reads the value of @v.
+ */
+#define arch_atomic_read(v) READ_ONCE((v)->counter)
+
+/*
+ * arch_atomic_set - set atomic variable
+ * @v: pointer of type atomic_t
+ * @i: required value
+ *
+ * Atomically sets the value of @v to @i.
+ */
+#define arch_atomic_set(v, i) WRITE_ONCE((v)->counter, (i))
+
+#define ATOMIC_OP(op, I, asm_op) \
+static inline void arch_atomic_##op(int i, atomic_t *v) \
+{ \
+ __asm__ __volatile__( \
+ "am"#asm_op"_db.w" " $zero, %1, %0 \n" \
+ : "+ZB" (v->counter) \
+ : "r" (I) \
+ : "memory"); \
+}
+
+#define ATOMIC_OP_RETURN(op, I, asm_op, c_op) \
+static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \
+{ \
+ int result; \
+ \
+ __asm__ __volatile__( \
+ "am"#asm_op"_db.w" " %1, %2, %0 \n" \
+ : "+ZB" (v->counter), "=&r" (result) \
+ : "r" (I) \
+ : "memory"); \
+ \
+ return result c_op I; \
+}
+
+#define ATOMIC_FETCH_OP(op, I, asm_op) \
+static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
+{ \
+ int result; \
+ \
+ __asm__ __volatile__( \
+ "am"#asm_op"_db.w" " %1, %2, %0 \n" \
+ : "+ZB" (v->counter), "=&r" (result) \
+ : "r" (I) \
+ : "memory"); \
+ \
+ return result; \
+}
+
+#define ATOMIC_OPS(op, I, asm_op, c_op) \
+ ATOMIC_OP(op, I, asm_op) \
+ ATOMIC_OP_RETURN(op, I, asm_op, c_op) \
+ ATOMIC_FETCH_OP(op, I, asm_op)
+
+ATOMIC_OPS(add, i, add, +)
+ATOMIC_OPS(sub, -i, add, +)
+
+#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
+#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
+#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
+#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
+
+#undef ATOMIC_OPS
+
+#define ATOMIC_OPS(op, I, asm_op) \
+ ATOMIC_OP(op, I, asm_op) \
+ ATOMIC_FETCH_OP(op, I, asm_op)
+
+ATOMIC_OPS(and, i, and)
+ATOMIC_OPS(or, i, or)
+ATOMIC_OPS(xor, i, xor)
+
+#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
+#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
+#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
+
+#undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
+
+static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
+{
+ int prev, rc;
+
+ __asm__ __volatile__ (
+ "0: ll.w %[p], %[c]\n"
+ " beq %[p], %[u], 1f\n"
+ " add.w %[rc], %[p], %[a]\n"
+ " sc.w %[rc], %[c]\n"
+ " beqz %[rc], 0b\n"
+ " b 2f\n"
+ "1:\n"
+ __WEAK_LLSC_MB
+ "2:\n"
+ : [p]"=&r" (prev), [rc]"=&r" (rc),
+ [c]"=ZB" (v->counter)
+ : [a]"r" (a), [u]"r" (u)
+ : "memory");
+
+ return prev;
+}
+#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
+
+/*
+ * arch_atomic_sub_if_positive - conditionally subtract integer from atomic variable
+ * @i: integer value to subtract
+ * @v: pointer of type atomic_t
+ *
+ * Atomically test @v and subtract @i if @v is greater or equal than @i.
+ * The function returns the old value of @v minus @i.
+ */
+static inline int arch_atomic_sub_if_positive(int i, atomic_t *v)
+{
+ int result;
+ int temp;
+
+ if (__builtin_constant_p(i)) {
+ __asm__ __volatile__(
+ "1: ll.w %1, %2 # atomic_sub_if_positive\n"
+ " addi.w %0, %1, %3 \n"
+ " or %1, %0, $zero \n"
+ " blt %0, $zero, 2f \n"
+ " sc.w %1, %2 \n"
+ " beq $zero, %1, 1b \n"
+ "2: \n"
+ __WEAK_LLSC_MB
+ : "=&r" (result), "=&r" (temp),
+ "+" GCC_OFF_SMALL_ASM() (v->counter)
+ : "I" (-i));
+ } else {
+ __asm__ __volatile__(
+ "1: ll.w %1, %2 # atomic_sub_if_positive\n"
+ " sub.w %0, %1, %3 \n"
+ " or %1, %0, $zero \n"
+ " blt %0, $zero, 2f \n"
+ " sc.w %1, %2 \n"
+ " beq $zero, %1, 1b \n"
+ "2: \n"
+ __WEAK_LLSC_MB
+ : "=&r" (result), "=&r" (temp),
+ "+" GCC_OFF_SMALL_ASM() (v->counter)
+ : "r" (i));
+ }
+
+ return result;
+}
+
+#define arch_atomic_cmpxchg(v, o, n) (arch_cmpxchg(&((v)->counter), (o), (n)))
+#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), (new)))
+
+/*
+ * arch_atomic_dec_if_positive - decrement by 1 if old value positive
+ * @v: pointer of type atomic_t
+ */
+#define arch_atomic_dec_if_positive(v) arch_atomic_sub_if_positive(1, v)
+
+#ifdef CONFIG_64BIT
+
+#define ATOMIC64_INIT(i) { (i) }
+
+/*
+ * arch_atomic64_read - read atomic variable
+ * @v: pointer of type atomic64_t
+ *
+ */
+#define arch_atomic64_read(v) READ_ONCE((v)->counter)
+
+/*
+ * arch_atomic64_set - set atomic variable
+ * @v: pointer of type atomic64_t
+ * @i: required value
+ */
+#define arch_atomic64_set(v, i) WRITE_ONCE((v)->counter, (i))
+
+#define ATOMIC64_OP(op, I, asm_op) \
+static inline void arch_atomic64_##op(long i, atomic64_t *v) \
+{ \
+ __asm__ __volatile__( \
+ "am"#asm_op"_db.d " " $zero, %1, %0 \n" \
+ : "+ZB" (v->counter) \
+ : "r" (I) \
+ : "memory"); \
+}
+
+#define ATOMIC64_OP_RETURN(op, I, asm_op, c_op) \
+static inline long arch_atomic64_##op##_return_relaxed(long i, atomic64_t *v) \
+{ \
+ long result; \
+ __asm__ __volatile__( \
+ "am"#asm_op"_db.d " " %1, %2, %0 \n" \
+ : "+ZB" (v->counter), "=&r" (result) \
+ : "r" (I) \
+ : "memory"); \
+ \
+ return result c_op I; \
+}
+
+#define ATOMIC64_FETCH_OP(op, I, asm_op) \
+static inline long arch_atomic64_fetch_##op##_relaxed(long i, atomic64_t *v) \
+{ \
+ long result; \
+ \
+ __asm__ __volatile__( \
+ "am"#asm_op"_db.d " " %1, %2, %0 \n" \
+ : "+ZB" (v->counter), "=&r" (result) \
+ : "r" (I) \
+ : "memory"); \
+ \
+ return result; \
+}
+
+#define ATOMIC64_OPS(op, I, asm_op, c_op) \
+ ATOMIC64_OP(op, I, asm_op) \
+ ATOMIC64_OP_RETURN(op, I, asm_op, c_op) \
+ ATOMIC64_FETCH_OP(op, I, asm_op)
+
+ATOMIC64_OPS(add, i, add, +)
+ATOMIC64_OPS(sub, -i, add, +)
+
+#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
+#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
+#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
+#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
+
+#undef ATOMIC64_OPS
+
+#define ATOMIC64_OPS(op, I, asm_op) \
+ ATOMIC64_OP(op, I, asm_op) \
+ ATOMIC64_FETCH_OP(op, I, asm_op)
+
+ATOMIC64_OPS(and, i, and)
+ATOMIC64_OPS(or, i, or)
+ATOMIC64_OPS(xor, i, xor)
+
+#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
+#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
+#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
+
+#undef ATOMIC64_OPS
+#undef ATOMIC64_FETCH_OP
+#undef ATOMIC64_OP_RETURN
+#undef ATOMIC64_OP
+
+static inline long arch_atomic64_fetch_add_unless(atomic64_t *v, long a, long u)
+{
+ long prev, rc;
+
+ __asm__ __volatile__ (
+ "0: ll.d %[p], %[c]\n"
+ " beq %[p], %[u], 1f\n"
+ " add.d %[rc], %[p], %[a]\n"
+ " sc.d %[rc], %[c]\n"
+ " beqz %[rc], 0b\n"
+ " b 2f\n"
+ "1:\n"
+ __WEAK_LLSC_MB
+ "2:\n"
+ : [p]"=&r" (prev), [rc]"=&r" (rc),
+ [c] "=ZB" (v->counter)
+ : [a]"r" (a), [u]"r" (u)
+ : "memory");
+
+ return prev;
+}
+#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
+
+/*
+ * arch_atomic64_sub_if_positive - conditionally subtract integer from atomic variable
+ * @i: integer value to subtract
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically test @v and subtract @i if @v is greater or equal than @i.
+ * The function returns the old value of @v minus @i.
+ */
+static inline long arch_atomic64_sub_if_positive(long i, atomic64_t *v)
+{
+ long result;
+ long temp;
+
+ if (__builtin_constant_p(i)) {
+ __asm__ __volatile__(
+ "1: ll.d %1, %2 # atomic64_sub_if_positive \n"
+ " addi.d %0, %1, %3 \n"
+ " or %1, %0, $zero \n"
+ " blt %0, $zero, 2f \n"
+ " sc.d %1, %2 \n"
+ " beq %1, $zero, 1b \n"
+ "2: \n"
+ __WEAK_LLSC_MB
+ : "=&r" (result), "=&r" (temp),
+ "+" GCC_OFF_SMALL_ASM() (v->counter)
+ : "I" (-i));
+ } else {
+ __asm__ __volatile__(
+ "1: ll.d %1, %2 # atomic64_sub_if_positive \n"
+ " sub.d %0, %1, %3 \n"
+ " or %1, %0, $zero \n"
+ " blt %0, $zero, 2f \n"
+ " sc.d %1, %2 \n"
+ " beq %1, $zero, 1b \n"
+ "2: \n"
+ __WEAK_LLSC_MB
+ : "=&r" (result), "=&r" (temp),
+ "+" GCC_OFF_SMALL_ASM() (v->counter)
+ : "r" (i));
+ }
+
+ return result;
+}
+
+#define arch_atomic64_cmpxchg(v, o, n) \
+ ((__typeof__((v)->counter))arch_cmpxchg(&((v)->counter), (o), (n)))
+#define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), (new)))
+
+/*
+ * arch_atomic64_dec_if_positive - decrement by 1 if old value positive
+ * @v: pointer of type atomic64_t
+ */
+#define arch_atomic64_dec_if_positive(v) arch_atomic64_sub_if_positive(1, v)
+
+#endif /* CONFIG_64BIT */
+
+#endif /* _ASM_ATOMIC_H */
diff --git a/arch/loongarch/include/asm/barrier.h b/arch/loongarch/include/asm/barrier.h
new file mode 100644
index 000000000000..b6517eeeb141
--- /dev/null
+++ b/arch/loongarch/include/asm/barrier.h
@@ -0,0 +1,159 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef __ASM_BARRIER_H
+#define __ASM_BARRIER_H
+
+#define __sync() __asm__ __volatile__("dbar 0" : : : "memory")
+
+#define fast_wmb() __sync()
+#define fast_rmb() __sync()
+#define fast_mb() __sync()
+#define fast_iob() __sync()
+#define wbflush() __sync()
+
+#define wmb() fast_wmb()
+#define rmb() fast_rmb()
+#define mb() fast_mb()
+#define iob() fast_iob()
+
+#define __smp_mb() __asm__ __volatile__("dbar 0" : : : "memory")
+#define __smp_rmb() __asm__ __volatile__("dbar 0" : : : "memory")
+#define __smp_wmb() __asm__ __volatile__("dbar 0" : : : "memory")
+
+#ifdef CONFIG_SMP
+#define __WEAK_LLSC_MB " dbar 0 \n"
+#else
+#define __WEAK_LLSC_MB " \n"
+#endif
+
+#define __smp_mb__before_atomic() barrier()
+#define __smp_mb__after_atomic() barrier()
+
+/**
+ * array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise
+ * @index: array element index
+ * @size: number of elements in array
+ *
+ * Returns:
+ * 0 - (@index < @size)
+ */
+#define array_index_mask_nospec array_index_mask_nospec
+static inline unsigned long array_index_mask_nospec(unsigned long index,
+ unsigned long size)
+{
+ unsigned long mask;
+
+ __asm__ __volatile__(
+ "sltu %0, %1, %2\n\t"
+#if (__SIZEOF_LONG__ == 4)
+ "sub.w %0, $r0, %0\n\t"
+#elif (__SIZEOF_LONG__ == 8)
+ "sub.d %0, $r0, %0\n\t"
+#endif
+ : "=r" (mask)
+ : "r" (index), "r" (size)
+ :);
+
+ return mask;
+}
+
+#define __smp_load_acquire(p) \
+({ \
+ union { typeof(*p) __val; char __c[1]; } __u; \
+ unsigned long __tmp = 0; \
+ compiletime_assert_atomic_type(*p); \
+ switch (sizeof(*p)) { \
+ case 1: \
+ *(__u8 *)__u.__c = *(volatile __u8 *)p; \
+ __smp_mb(); \
+ break; \
+ case 2: \
+ *(__u16 *)__u.__c = *(volatile __u16 *)p; \
+ __smp_mb(); \
+ break; \
+ case 4: \
+ __asm__ __volatile__( \
+ "amor_db.w %[val], %[tmp], %[mem] \n" \
+ : [val] "=&r" (*(__u32 *)__u.__c) \
+ : [mem] "ZB" (*(u32 *) p), [tmp] "r" (__tmp) \
+ : "memory"); \
+ break; \
+ case 8: \
+ __asm__ __volatile__( \
+ "amor_db.d %[val], %[tmp], %[mem] \n" \
+ : [val] "=&r" (*(__u64 *)__u.__c) \
+ : [mem] "ZB" (*(u64 *) p), [tmp] "r" (__tmp) \
+ : "memory"); \
+ break; \
+ } \
+ (typeof(*p))__u.__val; \
+})
+
+#define __smp_store_release(p, v) \
+do { \
+ union { typeof(*p) __val; char __c[1]; } __u = \
+ { .__val = (__force typeof(*p)) (v) }; \
+ unsigned long __tmp; \
+ compiletime_assert_atomic_type(*p); \
+ switch (sizeof(*p)) { \
+ case 1: \
+ __smp_mb(); \
+ *(volatile __u8 *)p = *(__u8 *)__u.__c; \
+ break; \
+ case 2: \
+ __smp_mb(); \
+ *(volatile __u16 *)p = *(__u16 *)__u.__c; \
+ break; \
+ case 4: \
+ __asm__ __volatile__( \
+ "amswap_db.w %[tmp], %[val], %[mem] \n" \
+ : [mem] "+ZB" (*(u32 *)p), [tmp] "=&r" (__tmp) \
+ : [val] "r" (*(__u32 *)__u.__c) \
+ : ); \
+ break; \
+ case 8: \
+ __asm__ __volatile__( \
+ "amswap_db.d %[tmp], %[val], %[mem] \n" \
+ : [mem] "+ZB" (*(u64 *)p), [tmp] "=&r" (__tmp) \
+ : [val] "r" (*(__u64 *)__u.__c) \
+ : ); \
+ break; \
+ } \
+} while (0)
+
+#define __smp_store_mb(p, v) \
+do { \
+ union { typeof(p) __val; char __c[1]; } __u = \
+ { .__val = (__force typeof(p)) (v) }; \
+ unsigned long __tmp; \
+ switch (sizeof(p)) { \
+ case 1: \
+ *(volatile __u8 *)&p = *(__u8 *)__u.__c; \
+ __smp_mb(); \
+ break; \
+ case 2: \
+ *(volatile __u16 *)&p = *(__u16 *)__u.__c; \
+ __smp_mb(); \
+ break; \
+ case 4: \
+ __asm__ __volatile__( \
+ "amswap_db.w %[tmp], %[val], %[mem] \n" \
+ : [mem] "+ZB" (*(u32 *)&p), [tmp] "=&r" (__tmp) \
+ : [val] "r" (*(__u32 *)__u.__c) \
+ : ); \
+ break; \
+ case 8: \
+ __asm__ __volatile__( \
+ "amswap_db.d %[tmp], %[val], %[mem] \n" \
+ : [mem] "+ZB" (*(u64 *)&p), [tmp] "=&r" (__tmp) \
+ : [val] "r" (*(__u64 *)__u.__c) \
+ : ); \
+ break; \
+ } \
+} while (0)
+
+#include <asm-generic/barrier.h>
+
+#endif /* __ASM_BARRIER_H */
diff --git a/arch/loongarch/include/asm/bitops.h b/arch/loongarch/include/asm/bitops.h
new file mode 100644
index 000000000000..69e00f8d8034
--- /dev/null
+++ b/arch/loongarch/include/asm/bitops.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_BITOPS_H
+#define _ASM_BITOPS_H
+
+#include <linux/compiler.h>
+
+#ifndef _LINUX_BITOPS_H
+#error only <linux/bitops.h> can be included directly
+#endif
+
+#include <asm/barrier.h>
+
+#include <asm-generic/bitops/builtin-ffs.h>
+#include <asm-generic/bitops/builtin-fls.h>
+#include <asm-generic/bitops/builtin-__ffs.h>
+#include <asm-generic/bitops/builtin-__fls.h>
+
+#include <asm-generic/bitops/ffz.h>
+#include <asm-generic/bitops/fls64.h>
+
+#include <asm-generic/bitops/sched.h>
+#include <asm-generic/bitops/hweight.h>
+
+#include <asm-generic/bitops/atomic.h>
+#include <asm-generic/bitops/non-atomic.h>
+#include <asm-generic/bitops/lock.h>
+#include <asm-generic/bitops/le.h>
+#include <asm-generic/bitops/ext2-atomic.h>
+
+#endif /* _ASM_BITOPS_H */
diff --git a/arch/loongarch/include/asm/bitrev.h b/arch/loongarch/include/asm/bitrev.h
new file mode 100644
index 000000000000..46f275b9cdf7
--- /dev/null
+++ b/arch/loongarch/include/asm/bitrev.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef __LOONGARCH_ASM_BITREV_H__
+#define __LOONGARCH_ASM_BITREV_H__
+
+#include <linux/swab.h>
+
+static __always_inline __attribute_const__ u32 __arch_bitrev32(u32 x)
+{
+ u32 ret;
+
+ asm("bitrev.4b %0, %1" : "=r"(ret) : "r"(__swab32(x)));
+ return ret;
+}
+
+static __always_inline __attribute_const__ u16 __arch_bitrev16(u16 x)
+{
+ u16 ret;
+
+ asm("bitrev.4b %0, %1" : "=r"(ret) : "r"(__swab16(x)));
+ return ret;
+}
+
+static __always_inline __attribute_const__ u8 __arch_bitrev8(u8 x)
+{
+ u8 ret;
+
+ asm("bitrev.4b %0, %1" : "=r"(ret) : "r"(x));
+ return ret;
+}
+
+#endif /* __LOONGARCH_ASM_BITREV_H__ */
diff --git a/arch/loongarch/include/asm/bootinfo.h b/arch/loongarch/include/asm/bootinfo.h
new file mode 100644
index 000000000000..9b8d49d9e61b
--- /dev/null
+++ b/arch/loongarch/include/asm/bootinfo.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_BOOTINFO_H
+#define _ASM_BOOTINFO_H
+
+#include <linux/types.h>
+#include <asm/setup.h>
+
+const char *get_system_type(void);
+
+extern void init_environ(void);
+extern void memblock_init(void);
+extern void platform_init(void);
+extern void plat_swiotlb_setup(void);
+extern int __init init_numa_memory(void);
+
+struct loongson_board_info {
+ int bios_size;
+ const char *bios_vendor;
+ const char *bios_version;
+ const char *bios_release_date;
+ const char *board_name;
+ const char *board_vendor;
+};
+
+struct loongson_system_configuration {
+ int nr_cpus;
+ int nr_nodes;
+ int nr_io_pics;
+ int boot_cpu_id;
+ int cores_per_node;
+ int cores_per_package;
+ const char *cpuname;
+};
+
+extern u64 efi_system_table;
+extern unsigned long fw_arg0, fw_arg1;
+extern struct loongson_board_info b_info;
+extern struct loongson_system_configuration loongson_sysconf;
+
+#endif /* _ASM_BOOTINFO_H */
diff --git a/arch/loongarch/include/asm/branch.h b/arch/loongarch/include/asm/branch.h
new file mode 100644
index 000000000000..3f33c89f35b4
--- /dev/null
+++ b/arch/loongarch/include/asm/branch.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_BRANCH_H
+#define _ASM_BRANCH_H
+
+#include <asm/ptrace.h>
+
+static inline unsigned long exception_era(struct pt_regs *regs)
+{
+ return regs->csr_era;
+}
+
+static inline int compute_return_era(struct pt_regs *regs)
+{
+ regs->csr_era += 4;
+ return 0;
+}
+
+#endif /* _ASM_BRANCH_H */
diff --git a/arch/loongarch/include/asm/bug.h b/arch/loongarch/include/asm/bug.h
new file mode 100644
index 000000000000..bda49108a76d
--- /dev/null
+++ b/arch/loongarch/include/asm/bug.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_BUG_H
+#define __ASM_BUG_H
+
+#include <linux/compiler.h>
+
+#ifdef CONFIG_BUG
+
+#include <asm/break.h>
+
+static inline void __noreturn BUG(void)
+{
+ __asm__ __volatile__("break %0" : : "i" (BRK_BUG));
+ unreachable();
+}
+
+#define HAVE_ARCH_BUG
+
+#endif
+
+#include <asm-generic/bug.h>
+
+#endif /* __ASM_BUG_H */
diff --git a/arch/loongarch/include/asm/cache.h b/arch/loongarch/include/asm/cache.h
new file mode 100644
index 000000000000..1b6d09617199
--- /dev/null
+++ b/arch/loongarch/include/asm/cache.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_CACHE_H
+#define _ASM_CACHE_H
+
+#define L1_CACHE_SHIFT CONFIG_L1_CACHE_SHIFT
+#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+
+#define __read_mostly __section(".data..read_mostly")
+
+#endif /* _ASM_CACHE_H */
diff --git a/arch/loongarch/include/asm/cacheflush.h b/arch/loongarch/include/asm/cacheflush.h
new file mode 100644
index 000000000000..670900141b7c
--- /dev/null
+++ b/arch/loongarch/include/asm/cacheflush.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_CACHEFLUSH_H
+#define _ASM_CACHEFLUSH_H
+
+#include <linux/mm.h>
+#include <asm/cpu-features.h>
+#include <asm/cacheops.h>
+
+extern void local_flush_icache_range(unsigned long start, unsigned long end);
+
+#define flush_icache_range local_flush_icache_range
+#define flush_icache_user_range local_flush_icache_range
+
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
+
+#define flush_cache_all() do { } while (0)
+#define flush_cache_mm(mm) do { } while (0)
+#define flush_cache_dup_mm(mm) do { } while (0)
+#define flush_cache_range(vma, start, end) do { } while (0)
+#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
+#define flush_cache_vmap(start, end) do { } while (0)
+#define flush_cache_vunmap(start, end) do { } while (0)
+#define flush_icache_page(vma, page) do { } while (0)
+#define flush_icache_user_page(vma, page, addr, len) do { } while (0)
+#define flush_dcache_page(page) do { } while (0)
+#define flush_dcache_mmap_lock(mapping) do { } while (0)
+#define flush_dcache_mmap_unlock(mapping) do { } while (0)
+
+#define cache_op(op, addr) \
+ __asm__ __volatile__( \
+ " cacop %0, %1 \n" \
+ : \
+ : "i" (op), "ZC" (*(unsigned char *)(addr)))
+
+static inline void flush_icache_line_indexed(unsigned long addr)
+{
+ cache_op(Index_Invalidate_I, addr);
+}
+
+static inline void flush_dcache_line_indexed(unsigned long addr)
+{
+ cache_op(Index_Writeback_Inv_D, addr);
+}
+
+static inline void flush_vcache_line_indexed(unsigned long addr)
+{
+ cache_op(Index_Writeback_Inv_V, addr);
+}
+
+static inline void flush_scache_line_indexed(unsigned long addr)
+{
+ cache_op(Index_Writeback_Inv_S, addr);
+}
+
+static inline void flush_icache_line(unsigned long addr)
+{
+ cache_op(Hit_Invalidate_I, addr);
+}
+
+static inline void flush_dcache_line(unsigned long addr)
+{
+ cache_op(Hit_Writeback_Inv_D, addr);
+}
+
+static inline void flush_vcache_line(unsigned long addr)
+{
+ cache_op(Hit_Writeback_Inv_V, addr);
+}
+
+static inline void flush_scache_line(unsigned long addr)
+{
+ cache_op(Hit_Writeback_Inv_S, addr);
+}
+
+#include <asm-generic/cacheflush.h>
+
+#endif /* _ASM_CACHEFLUSH_H */
diff --git a/arch/loongarch/include/asm/cacheops.h b/arch/loongarch/include/asm/cacheops.h
new file mode 100644
index 000000000000..dc280efecebd
--- /dev/null
+++ b/arch/loongarch/include/asm/cacheops.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Cache operations for the cache instruction.
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef __ASM_CACHEOPS_H
+#define __ASM_CACHEOPS_H
+
+/*
+ * Most cache ops are split into a 2 bit field identifying the cache, and a 3
+ * bit field identifying the cache operation.
+ */
+#define CacheOp_Cache 0x03
+#define CacheOp_Op 0x1c
+
+#define Cache_I 0x00
+#define Cache_D 0x01
+#define Cache_V 0x02
+#define Cache_S 0x03
+
+#define Index_Invalidate 0x08
+#define Index_Writeback_Inv 0x08
+#define Hit_Invalidate 0x10
+#define Hit_Writeback_Inv 0x10
+#define CacheOp_User_Defined 0x18
+
+#define Index_Invalidate_I (Cache_I | Index_Invalidate)
+#define Index_Writeback_Inv_D (Cache_D | Index_Writeback_Inv)
+#define Index_Writeback_Inv_V (Cache_V | Index_Writeback_Inv)
+#define Index_Writeback_Inv_S (Cache_S | Index_Writeback_Inv)
+#define Hit_Invalidate_I (Cache_I | Hit_Invalidate)
+#define Hit_Writeback_Inv_D (Cache_D | Hit_Writeback_Inv)
+#define Hit_Writeback_Inv_V (Cache_V | Hit_Writeback_Inv)
+#define Hit_Writeback_Inv_S (Cache_S | Hit_Writeback_Inv)
+
+#endif /* __ASM_CACHEOPS_H */
diff --git a/arch/loongarch/include/asm/clocksource.h b/arch/loongarch/include/asm/clocksource.h
new file mode 100644
index 000000000000..58e64aa05d26
--- /dev/null
+++ b/arch/loongarch/include/asm/clocksource.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Author: Huacai Chen <chenhuacai@loongson.cn>
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#ifndef __ASM_CLOCKSOURCE_H
+#define __ASM_CLOCKSOURCE_H
+
+#include <asm/vdso/clocksource.h>
+
+#endif /* __ASM_CLOCKSOURCE_H */
diff --git a/arch/loongarch/include/asm/cmpxchg.h b/arch/loongarch/include/asm/cmpxchg.h
new file mode 100644
index 000000000000..75b3a4478652
--- /dev/null
+++ b/arch/loongarch/include/asm/cmpxchg.h
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef __ASM_CMPXCHG_H
+#define __ASM_CMPXCHG_H
+
+#include <asm/barrier.h>
+#include <linux/build_bug.h>
+
+#define __xchg_asm(amswap_db, m, val) \
+({ \
+ __typeof(val) __ret; \
+ \
+ __asm__ __volatile__ ( \
+ " "amswap_db" %1, %z2, %0 \n" \
+ : "+ZB" (*m), "=&r" (__ret) \
+ : "Jr" (val) \
+ : "memory"); \
+ \
+ __ret; \
+})
+
+static inline unsigned long __xchg(volatile void *ptr, unsigned long x,
+ int size)
+{
+ switch (size) {
+ case 4:
+ return __xchg_asm("amswap_db.w", (volatile u32 *)ptr, (u32)x);
+
+ case 8:
+ return __xchg_asm("amswap_db.d", (volatile u64 *)ptr, (u64)x);
+
+ default:
+ BUILD_BUG();
+ }
+
+ return 0;
+}
+
+#define arch_xchg(ptr, x) \
+({ \
+ __typeof__(*(ptr)) __res; \
+ \
+ __res = (__typeof__(*(ptr))) \
+ __xchg((ptr), (unsigned long)(x), sizeof(*(ptr))); \
+ \
+ __res; \
+})
+
+#define __cmpxchg_asm(ld, st, m, old, new) \
+({ \
+ __typeof(old) __ret; \
+ \
+ __asm__ __volatile__( \
+ "1: " ld " %0, %2 # __cmpxchg_asm \n" \
+ " bne %0, %z3, 2f \n" \
+ " or $t0, %z4, $zero \n" \
+ " " st " $t0, %1 \n" \
+ " beq $zero, $t0, 1b \n" \
+ "2: \n" \
+ __WEAK_LLSC_MB \
+ : "=&r" (__ret), "=ZB"(*m) \
+ : "ZB"(*m), "Jr" (old), "Jr" (new) \
+ : "t0", "memory"); \
+ \
+ __ret; \
+})
+
+static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
+ unsigned long new, unsigned int size)
+{
+ switch (size) {
+ case 4:
+ return __cmpxchg_asm("ll.w", "sc.w", (volatile u32 *)ptr,
+ (u32)old, new);
+
+ case 8:
+ return __cmpxchg_asm("ll.d", "sc.d", (volatile u64 *)ptr,
+ (u64)old, new);
+
+ default:
+ BUILD_BUG();
+ }
+
+ return 0;
+}
+
+#define arch_cmpxchg_local(ptr, old, new) \
+ ((__typeof__(*(ptr))) \
+ __cmpxchg((ptr), \
+ (unsigned long)(__typeof__(*(ptr)))(old), \
+ (unsigned long)(__typeof__(*(ptr)))(new), \
+ sizeof(*(ptr))))
+
+#define arch_cmpxchg(ptr, old, new) \
+({ \
+ __typeof__(*(ptr)) __res; \
+ \
+ __res = arch_cmpxchg_local((ptr), (old), (new)); \
+ \
+ __res; \
+})
+
+#ifdef CONFIG_64BIT
+#define arch_cmpxchg64_local(ptr, o, n) \
+ ({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
+ arch_cmpxchg_local((ptr), (o), (n)); \
+ })
+
+#define arch_cmpxchg64(ptr, o, n) \
+ ({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
+ arch_cmpxchg((ptr), (o), (n)); \
+ })
+#else
+#include <asm-generic/cmpxchg-local.h>
+#define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
+#define arch_cmpxchg64(ptr, o, n) arch_cmpxchg64_local((ptr), (o), (n))
+#endif
+
+#endif /* __ASM_CMPXCHG_H */
diff --git a/arch/loongarch/include/asm/compiler.h b/arch/loongarch/include/asm/compiler.h
new file mode 100644
index 000000000000..657cebe70ace
--- /dev/null
+++ b/arch/loongarch/include/asm/compiler.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_COMPILER_H
+#define _ASM_COMPILER_H
+
+#define GCC_OFF_SMALL_ASM() "ZC"
+
+#define LOONGARCH_ISA_LEVEL "loongarch"
+#define LOONGARCH_ISA_ARCH_LEVEL "arch=loongarch"
+#define LOONGARCH_ISA_LEVEL_RAW loongarch
+#define LOONGARCH_ISA_ARCH_LEVEL_RAW LOONGARCH_ISA_LEVEL_RAW
+
+#endif /* _ASM_COMPILER_H */
diff --git a/arch/loongarch/include/asm/cpu-features.h b/arch/loongarch/include/asm/cpu-features.h
new file mode 100644
index 000000000000..a8d87c40a0eb
--- /dev/null
+++ b/arch/loongarch/include/asm/cpu-features.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ *
+ * Derived from MIPS:
+ * Copyright (C) 2003, 2004 Ralf Baechle
+ * Copyright (C) 2004 Maciej W. Rozycki
+ */
+#ifndef __ASM_CPU_FEATURES_H
+#define __ASM_CPU_FEATURES_H
+
+#include <asm/cpu.h>
+#include <asm/cpu-info.h>
+
+#define cpu_opt(opt) (cpu_data[0].options & (opt))
+#define cpu_has(feat) (cpu_data[0].options & BIT_ULL(feat))
+
+#define cpu_has_loongarch (cpu_has_loongarch32 | cpu_has_loongarch64)
+#define cpu_has_loongarch32 (cpu_data[0].isa_level & LOONGARCH_CPU_ISA_32BIT)
+#define cpu_has_loongarch64 (cpu_data[0].isa_level & LOONGARCH_CPU_ISA_64BIT)
+
+#define cpu_icache_line_size() cpu_data[0].icache.linesz
+#define cpu_dcache_line_size() cpu_data[0].dcache.linesz
+#define cpu_vcache_line_size() cpu_data[0].vcache.linesz
+#define cpu_scache_line_size() cpu_data[0].scache.linesz
+
+#ifdef CONFIG_32BIT
+# define cpu_has_64bits (cpu_data[0].isa_level & LOONGARCH_CPU_ISA_64BIT)
+# define cpu_vabits 31
+# define cpu_pabits 31
+#endif
+
+#ifdef CONFIG_64BIT
+# define cpu_has_64bits 1
+# define cpu_vabits cpu_data[0].vabits
+# define cpu_pabits cpu_data[0].pabits
+# define __NEED_ADDRBITS_PROBE
+#endif
+
+/*
+ * SMP assumption: Options of CPU 0 are a superset of all processors.
+ * This is true for all known LoongArch systems.
+ */
+#define cpu_has_cpucfg cpu_opt(LOONGARCH_CPU_CPUCFG)
+#define cpu_has_lam cpu_opt(LOONGARCH_CPU_LAM)
+#define cpu_has_ual cpu_opt(LOONGARCH_CPU_UAL)
+#define cpu_has_fpu cpu_opt(LOONGARCH_CPU_FPU)
+#define cpu_has_lsx cpu_opt(LOONGARCH_CPU_LSX)
+#define cpu_has_lasx cpu_opt(LOONGARCH_CPU_LASX)
+#define cpu_has_complex cpu_opt(LOONGARCH_CPU_COMPLEX)
+#define cpu_has_crypto cpu_opt(LOONGARCH_CPU_CRYPTO)
+#define cpu_has_lvz cpu_opt(LOONGARCH_CPU_LVZ)
+#define cpu_has_lbt_x86 cpu_opt(LOONGARCH_CPU_LBT_X86)
+#define cpu_has_lbt_arm cpu_opt(LOONGARCH_CPU_LBT_ARM)
+#define cpu_has_lbt_mips cpu_opt(LOONGARCH_CPU_LBT_MIPS)
+#define cpu_has_lbt (cpu_has_lbt_x86|cpu_has_lbt_arm|cpu_has_lbt_mips)
+#define cpu_has_csr cpu_opt(LOONGARCH_CPU_CSR)
+#define cpu_has_tlb cpu_opt(LOONGARCH_CPU_TLB)
+#define cpu_has_watch cpu_opt(LOONGARCH_CPU_WATCH)
+#define cpu_has_vint cpu_opt(LOONGARCH_CPU_VINT)
+#define cpu_has_csripi cpu_opt(LOONGARCH_CPU_CSRIPI)
+#define cpu_has_extioi cpu_opt(LOONGARCH_CPU_EXTIOI)
+#define cpu_has_prefetch cpu_opt(LOONGARCH_CPU_PREFETCH)
+#define cpu_has_pmp cpu_opt(LOONGARCH_CPU_PMP)
+#define cpu_has_perf cpu_opt(LOONGARCH_CPU_PMP)
+#define cpu_has_scalefreq cpu_opt(LOONGARCH_CPU_SCALEFREQ)
+#define cpu_has_flatmode cpu_opt(LOONGARCH_CPU_FLATMODE)
+#define cpu_has_eiodecode cpu_opt(LOONGARCH_CPU_EIODECODE)
+#define cpu_has_guestid cpu_opt(LOONGARCH_CPU_GUESTID)
+#define cpu_has_hypervisor cpu_opt(LOONGARCH_CPU_HYPERVISOR)
+
+
+#endif /* __ASM_CPU_FEATURES_H */
diff --git a/arch/loongarch/include/asm/cpu-info.h b/arch/loongarch/include/asm/cpu-info.h
new file mode 100644
index 000000000000..b6c4f96079df
--- /dev/null
+++ b/arch/loongarch/include/asm/cpu-info.h
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef __ASM_CPU_INFO_H
+#define __ASM_CPU_INFO_H
+
+#include <linux/cache.h>
+#include <linux/types.h>
+
+#include <asm/loongarch.h>
+
+/*
+ * Descriptor for a cache
+ */
+struct cache_desc {
+ unsigned int waysize; /* Bytes per way */
+ unsigned short sets; /* Number of lines per set */
+ unsigned char ways; /* Number of ways */
+ unsigned char linesz; /* Size of line in bytes */
+ unsigned char waybit; /* Bits to select in a cache set */
+ unsigned char flags; /* Flags describing cache properties */
+};
+
+struct cpuinfo_loongarch {
+ u64 asid_cache;
+ unsigned long asid_mask;
+
+ /*
+ * Capability and feature descriptor structure for LoongArch CPU
+ */
+ unsigned long long options;
+ unsigned int processor_id;
+ unsigned int fpu_vers;
+ unsigned int fpu_csr0;
+ unsigned int fpu_mask;
+ unsigned int cputype;
+ int isa_level;
+ int tlbsize;
+ int tlbsizemtlb;
+ int tlbsizestlbsets;
+ int tlbsizestlbways;
+ struct cache_desc icache; /* Primary I-cache */
+ struct cache_desc dcache; /* Primary D or combined I/D cache */
+ struct cache_desc vcache; /* Victim cache, between pcache and scache */
+ struct cache_desc scache; /* Secondary cache */
+ struct cache_desc tcache; /* Tertiary/split secondary cache */
+ int core; /* physical core number in package */
+ int package;/* physical package number */
+ int vabits; /* Virtual Address size in bits */
+ int pabits; /* Physical Address size in bits */
+ unsigned int ksave_mask; /* Usable KSave mask. */
+ unsigned int watch_dreg_count; /* Number data breakpoints */
+ unsigned int watch_ireg_count; /* Number instruction breakpoints */
+ unsigned int watch_reg_use_cnt; /* min(NUM_WATCH_REGS, watch_dreg_count + watch_ireg_count), Usable by ptrace */
+} __aligned(SMP_CACHE_BYTES);
+
+extern struct cpuinfo_loongarch cpu_data[];
+#define boot_cpu_data cpu_data[0]
+#define current_cpu_data cpu_data[smp_processor_id()]
+#define raw_current_cpu_data cpu_data[raw_smp_processor_id()]
+
+extern void cpu_probe(void);
+
+extern const char *__cpu_family[];
+extern const char *__cpu_full_name[];
+#define cpu_family_string() __cpu_family[raw_smp_processor_id()]
+#define cpu_full_name_string() __cpu_full_name[raw_smp_processor_id()]
+
+struct seq_file;
+struct notifier_block;
+
+extern int register_proc_cpuinfo_notifier(struct notifier_block *nb);
+extern int proc_cpuinfo_notifier_call_chain(unsigned long val, void *v);
+
+#define proc_cpuinfo_notifier(fn, pri) \
+({ \
+ static struct notifier_block fn##_nb = { \
+ .notifier_call = fn, \
+ .priority = pri \
+ }; \
+ \
+ register_proc_cpuinfo_notifier(&fn##_nb); \
+})
+
+struct proc_cpuinfo_notifier_args {
+ struct seq_file *m;
+ unsigned long n;
+};
+
+static inline bool cpus_are_siblings(int cpua, int cpub)
+{
+ struct cpuinfo_loongarch *infoa = &cpu_data[cpua];
+ struct cpuinfo_loongarch *infob = &cpu_data[cpub];
+
+ if (infoa->package != infob->package)
+ return false;
+
+ if (infoa->core != infob->core)
+ return false;
+
+ return true;
+}
+
+static inline unsigned long cpu_asid_mask(struct cpuinfo_loongarch *cpuinfo)
+{
+ return cpuinfo->asid_mask;
+}
+
+static inline void set_cpu_asid_mask(struct cpuinfo_loongarch *cpuinfo,
+ unsigned long asid_mask)
+{
+ cpuinfo->asid_mask = asid_mask;
+}
+
+#endif /* __ASM_CPU_INFO_H */
diff --git a/arch/loongarch/include/asm/cpu.h b/arch/loongarch/include/asm/cpu.h
new file mode 100644
index 000000000000..754f28506791
--- /dev/null
+++ b/arch/loongarch/include/asm/cpu.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * cpu.h: Values of the PRID register used to match up
+ * various LoongArch CPU types.
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_CPU_H
+#define _ASM_CPU_H
+
+/*
+ * As described in LoongArch specs from Loongson Technology, the PRID register
+ * (CPUCFG.00) has the following layout:
+ *
+ * +---------------+----------------+------------+--------------------+
+ * | Reserved | Company ID | Series ID | Product ID |
+ * +---------------+----------------+------------+--------------------+
+ * 31 24 23 16 15 12 11 0
+ */
+
+/*
+ * Assigned Company values for bits 23:16 of the PRID register.
+ */
+
+#define PRID_COMP_MASK 0xff0000
+
+#define PRID_COMP_LOONGSON 0x140000
+
+/*
+ * Assigned Series ID values for bits 15:12 of the PRID register. In order
+ * to detect a certain CPU type exactly eventually additional registers may
+ * need to be examined.
+ */
+
+#define PRID_SERIES_MASK 0xf000
+
+#define PRID_SERIES_LA132 0x8000 /* Loongson 32bit */
+#define PRID_SERIES_LA264 0xa000 /* Loongson 64bit, 2-issue */
+#define PRID_SERIES_LA364 0xb000 /* Loongson 64bit,3-issue */
+#define PRID_SERIES_LA464 0xc000 /* Loongson 64bit, 4-issue */
+#define PRID_SERIES_LA664 0xd000 /* Loongson 64bit, 6-issue */
+
+/*
+ * Particular Product ID values for bits 11:0 of the PRID register.
+ */
+
+#define PRID_PRODUCT_MASK 0x0fff
+
+#if !defined(__ASSEMBLY__)
+
+enum cpu_type_enum {
+ CPU_UNKNOWN,
+ CPU_LOONGSON32,
+ CPU_LOONGSON64,
+ CPU_LAST
+};
+
+#endif /* !__ASSEMBLY */
+
+/*
+ * ISA Level encodings
+ *
+ */
+
+#define LOONGARCH_CPU_ISA_LA32R 0x00000001
+#define LOONGARCH_CPU_ISA_LA32S 0x00000002
+#define LOONGARCH_CPU_ISA_LA64 0x00000004
+
+#define LOONGARCH_CPU_ISA_32BIT (LOONGARCH_CPU_ISA_LA32R | LOONGARCH_CPU_ISA_LA32S)
+#define LOONGARCH_CPU_ISA_64BIT LOONGARCH_CPU_ISA_LA64
+
+/*
+ * CPU Option encodings
+ */
+#define CPU_FEATURE_CPUCFG 0 /* CPU has CPUCFG */
+#define CPU_FEATURE_LAM 1 /* CPU has Atomic instructions */
+#define CPU_FEATURE_UAL 2 /* CPU supports unaligned access */
+#define CPU_FEATURE_FPU 3 /* CPU has FPU */
+#define CPU_FEATURE_LSX 4 /* CPU has LSX (128-bit SIMD) */
+#define CPU_FEATURE_LASX 5 /* CPU has LASX (256-bit SIMD) */
+#define CPU_FEATURE_COMPLEX 6 /* CPU has Complex instructions */
+#define CPU_FEATURE_CRYPTO 7 /* CPU has Crypto instructions */
+#define CPU_FEATURE_LVZ 8 /* CPU has Virtualization extension */
+#define CPU_FEATURE_LBT_X86 9 /* CPU has X86 Binary Translation */
+#define CPU_FEATURE_LBT_ARM 10 /* CPU has ARM Binary Translation */
+#define CPU_FEATURE_LBT_MIPS 11 /* CPU has MIPS Binary Translation */
+#define CPU_FEATURE_TLB 12 /* CPU has TLB */
+#define CPU_FEATURE_CSR 13 /* CPU has CSR */
+#define CPU_FEATURE_WATCH 14 /* CPU has watchpoint registers */
+#define CPU_FEATURE_VINT 15 /* CPU has vectored interrupts */
+#define CPU_FEATURE_CSRIPI 16 /* CPU has CSR-IPI */
+#define CPU_FEATURE_EXTIOI 17 /* CPU has EXT-IOI */
+#define CPU_FEATURE_PREFETCH 18 /* CPU has prefetch instructions */
+#define CPU_FEATURE_PMP 19 /* CPU has perfermance counter */
+#define CPU_FEATURE_SCALEFREQ 20 /* CPU supports cpufreq scaling */
+#define CPU_FEATURE_FLATMODE 21 /* CPU has flat mode */
+#define CPU_FEATURE_EIODECODE 22 /* CPU has EXTIOI interrupt pin decode mode */
+#define CPU_FEATURE_GUESTID 23 /* CPU has GuestID feature */
+#define CPU_FEATURE_HYPERVISOR 24 /* CPU has hypervisor (running in VM) */
+
+#define LOONGARCH_CPU_CPUCFG BIT_ULL(CPU_FEATURE_CPUCFG)
+#define LOONGARCH_CPU_LAM BIT_ULL(CPU_FEATURE_LAM)
+#define LOONGARCH_CPU_UAL BIT_ULL(CPU_FEATURE_UAL)
+#define LOONGARCH_CPU_FPU BIT_ULL(CPU_FEATURE_FPU)
+#define LOONGARCH_CPU_LSX BIT_ULL(CPU_FEATURE_LSX)
+#define LOONGARCH_CPU_LASX BIT_ULL(CPU_FEATURE_LASX)
+#define LOONGARCH_CPU_COMPLEX BIT_ULL(CPU_FEATURE_COMPLEX)
+#define LOONGARCH_CPU_CRYPTO BIT_ULL(CPU_FEATURE_CRYPTO)
+#define LOONGARCH_CPU_LVZ BIT_ULL(CPU_FEATURE_LVZ)
+#define LOONGARCH_CPU_LBT_X86 BIT_ULL(CPU_FEATURE_LBT_X86)
+#define LOONGARCH_CPU_LBT_ARM BIT_ULL(CPU_FEATURE_LBT_ARM)
+#define LOONGARCH_CPU_LBT_MIPS BIT_ULL(CPU_FEATURE_LBT_MIPS)
+#define LOONGARCH_CPU_TLB BIT_ULL(CPU_FEATURE_TLB)
+#define LOONGARCH_CPU_CSR BIT_ULL(CPU_FEATURE_CSR)
+#define LOONGARCH_CPU_WATCH BIT_ULL(CPU_FEATURE_WATCH)
+#define LOONGARCH_CPU_VINT BIT_ULL(CPU_FEATURE_VINT)
+#define LOONGARCH_CPU_CSRIPI BIT_ULL(CPU_FEATURE_CSRIPI)
+#define LOONGARCH_CPU_EXTIOI BIT_ULL(CPU_FEATURE_EXTIOI)
+#define LOONGARCH_CPU_PREFETCH BIT_ULL(CPU_FEATURE_PREFETCH)
+#define LOONGARCH_CPU_PMP BIT_ULL(CPU_FEATURE_PMP)
+#define LOONGARCH_CPU_SCALEFREQ BIT_ULL(CPU_FEATURE_SCALEFREQ)
+#define LOONGARCH_CPU_FLATMODE BIT_ULL(CPU_FEATURE_FLATMODE)
+#define LOONGARCH_CPU_EIODECODE BIT_ULL(CPU_FEATURE_EIODECODE)
+#define LOONGARCH_CPU_GUESTID BIT_ULL(CPU_FEATURE_GUESTID)
+#define LOONGARCH_CPU_HYPERVISOR BIT_ULL(CPU_FEATURE_HYPERVISOR)
+
+#endif /* _ASM_CPU_H */
diff --git a/arch/loongarch/include/asm/cpufeature.h b/arch/loongarch/include/asm/cpufeature.h
new file mode 100644
index 000000000000..4da22a8e63de
--- /dev/null
+++ b/arch/loongarch/include/asm/cpufeature.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * CPU feature definitions for module loading, used by
+ * module_cpu_feature_match(), see uapi/asm/hwcap.h for LoongArch CPU features.
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#ifndef __ASM_CPUFEATURE_H
+#define __ASM_CPUFEATURE_H
+
+#include <uapi/asm/hwcap.h>
+#include <asm/elf.h>
+
+#define MAX_CPU_FEATURES (8 * sizeof(elf_hwcap))
+
+#define cpu_feature(x) ilog2(HWCAP_ ## x)
+
+static inline bool cpu_have_feature(unsigned int num)
+{
+ return elf_hwcap & (1UL << num);
+}
+
+#endif /* __ASM_CPUFEATURE_H */
diff --git a/arch/loongarch/include/asm/delay.h b/arch/loongarch/include/asm/delay.h
new file mode 100644
index 000000000000..36d775191310
--- /dev/null
+++ b/arch/loongarch/include/asm/delay.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_DELAY_H
+#define _ASM_DELAY_H
+
+#include <linux/param.h>
+
+extern void __delay(unsigned long cycles);
+extern void __ndelay(unsigned long ns);
+extern void __udelay(unsigned long us);
+
+#define ndelay(ns) __ndelay(ns)
+#define udelay(us) __udelay(us)
+
+/* make sure "usecs *= ..." in udelay do not overflow. */
+#if HZ >= 1000
+#define MAX_UDELAY_MS 1
+#elif HZ <= 200
+#define MAX_UDELAY_MS 5
+#else
+#define MAX_UDELAY_MS (1000 / HZ)
+#endif
+
+#endif /* _ASM_DELAY_H */
diff --git a/arch/loongarch/include/asm/dma-direct.h b/arch/loongarch/include/asm/dma-direct.h
new file mode 100644
index 000000000000..75ccd808a2af
--- /dev/null
+++ b/arch/loongarch/include/asm/dma-direct.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _LOONGARCH_DMA_DIRECT_H
+#define _LOONGARCH_DMA_DIRECT_H
+
+dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
+phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
+
+#endif /* _LOONGARCH_DMA_DIRECT_H */
diff --git a/arch/loongarch/include/asm/dmi.h b/arch/loongarch/include/asm/dmi.h
new file mode 100644
index 000000000000..605493417753
--- /dev/null
+++ b/arch/loongarch/include/asm/dmi.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_DMI_H
+#define _ASM_DMI_H
+
+#include <linux/io.h>
+#include <linux/memblock.h>
+
+#define dmi_early_remap(x, l) dmi_remap(x, l)
+#define dmi_early_unmap(x, l) dmi_unmap(x)
+#define dmi_alloc(l) memblock_alloc(l, PAGE_SIZE)
+
+static inline void *dmi_remap(u64 phys_addr, unsigned long size)
+{
+ return ((void *)TO_CACHE(phys_addr));
+}
+
+static inline void dmi_unmap(void *addr)
+{
+}
+
+#endif /* _ASM_DMI_H */
diff --git a/arch/loongarch/include/asm/efi.h b/arch/loongarch/include/asm/efi.h
new file mode 100644
index 000000000000..0127d84d5e1d
--- /dev/null
+++ b/arch/loongarch/include/asm/efi.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_LOONGARCH_EFI_H
+#define _ASM_LOONGARCH_EFI_H
+
+#include <linux/efi.h>
+
+void __init efi_init(void);
+void __init efi_runtime_init(void);
+void efifb_setup_from_dmi(struct screen_info *si, const char *opt);
+
+#define ARCH_EFI_IRQ_FLAGS_MASK 0x00000004 /* Bit 2: CSR.CRMD.IE */
+
+#define arch_efi_call_virt_setup() \
+({ \
+})
+
+#define arch_efi_call_virt(p, f, args...) \
+({ \
+ efi_##f##_t * __f; \
+ __f = p->f; \
+ __f(args); \
+})
+
+#define arch_efi_call_virt_teardown() \
+({ \
+})
+
+#define EFI_ALLOC_ALIGN SZ_64K
+
+struct screen_info *alloc_screen_info(void);
+void free_screen_info(struct screen_info *si);
+
+static inline unsigned long efi_get_max_initrd_addr(unsigned long image_addr)
+{
+ return ULONG_MAX;
+}
+
+#endif /* _ASM_LOONGARCH_EFI_H */
diff --git a/arch/loongarch/include/asm/elf.h b/arch/loongarch/include/asm/elf.h
new file mode 100644
index 000000000000..f3960b18a90e
--- /dev/null
+++ b/arch/loongarch/include/asm/elf.h
@@ -0,0 +1,301 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_ELF_H
+#define _ASM_ELF_H
+
+#include <linux/auxvec.h>
+#include <linux/fs.h>
+#include <uapi/linux/elf.h>
+
+#include <asm/current.h>
+#include <asm/vdso.h>
+
+/* The ABI of a file. */
+#define EF_LOONGARCH_ABI_LP64_SOFT_FLOAT 0x1
+#define EF_LOONGARCH_ABI_LP64_SINGLE_FLOAT 0x2
+#define EF_LOONGARCH_ABI_LP64_DOUBLE_FLOAT 0x3
+
+#define EF_LOONGARCH_ABI_ILP32_SOFT_FLOAT 0x5
+#define EF_LOONGARCH_ABI_ILP32_SINGLE_FLOAT 0x6
+#define EF_LOONGARCH_ABI_ILP32_DOUBLE_FLOAT 0x7
+
+/* LoongArch relocation types used by the dynamic linker */
+#define R_LARCH_NONE 0
+#define R_LARCH_32 1
+#define R_LARCH_64 2
+#define R_LARCH_RELATIVE 3
+#define R_LARCH_COPY 4
+#define R_LARCH_JUMP_SLOT 5
+#define R_LARCH_TLS_DTPMOD32 6
+#define R_LARCH_TLS_DTPMOD64 7
+#define R_LARCH_TLS_DTPREL32 8
+#define R_LARCH_TLS_DTPREL64 9
+#define R_LARCH_TLS_TPREL32 10
+#define R_LARCH_TLS_TPREL64 11
+#define R_LARCH_IRELATIVE 12
+#define R_LARCH_MARK_LA 20
+#define R_LARCH_MARK_PCREL 21
+#define R_LARCH_SOP_PUSH_PCREL 22
+#define R_LARCH_SOP_PUSH_ABSOLUTE 23
+#define R_LARCH_SOP_PUSH_DUP 24
+#define R_LARCH_SOP_PUSH_GPREL 25
+#define R_LARCH_SOP_PUSH_TLS_TPREL 26
+#define R_LARCH_SOP_PUSH_TLS_GOT 27
+#define R_LARCH_SOP_PUSH_TLS_GD 28
+#define R_LARCH_SOP_PUSH_PLT_PCREL 29
+#define R_LARCH_SOP_ASSERT 30
+#define R_LARCH_SOP_NOT 31
+#define R_LARCH_SOP_SUB 32
+#define R_LARCH_SOP_SL 33
+#define R_LARCH_SOP_SR 34
+#define R_LARCH_SOP_ADD 35
+#define R_LARCH_SOP_AND 36
+#define R_LARCH_SOP_IF_ELSE 37
+#define R_LARCH_SOP_POP_32_S_10_5 38
+#define R_LARCH_SOP_POP_32_U_10_12 39
+#define R_LARCH_SOP_POP_32_S_10_12 40
+#define R_LARCH_SOP_POP_32_S_10_16 41
+#define R_LARCH_SOP_POP_32_S_10_16_S2 42
+#define R_LARCH_SOP_POP_32_S_5_20 43
+#define R_LARCH_SOP_POP_32_S_0_5_10_16_S2 44
+#define R_LARCH_SOP_POP_32_S_0_10_10_16_S2 45
+#define R_LARCH_SOP_POP_32_U 46
+#define R_LARCH_ADD8 47
+#define R_LARCH_ADD16 48
+#define R_LARCH_ADD24 49
+#define R_LARCH_ADD32 50
+#define R_LARCH_ADD64 51
+#define R_LARCH_SUB8 52
+#define R_LARCH_SUB16 53
+#define R_LARCH_SUB24 54
+#define R_LARCH_SUB32 55
+#define R_LARCH_SUB64 56
+#define R_LARCH_GNU_VTINHERIT 57
+#define R_LARCH_GNU_VTENTRY 58
+
+#ifndef ELF_ARCH
+
+/* ELF register definitions */
+
+/*
+ * General purpose have the following registers:
+ * Register Number
+ * GPRs 32
+ * ORIG_A0 1
+ * ERA 1
+ * BADVADDR 1
+ * CRMD 1
+ * PRMD 1
+ * EUEN 1
+ * ECFG 1
+ * ESTAT 1
+ * Reserved 5
+ */
+#define ELF_NGREG 45
+
+/*
+ * Floating point have the following registers:
+ * Register Number
+ * FPR 32
+ * FCC 1
+ * FCSR 1
+ */
+#define ELF_NFPREG 34
+
+typedef unsigned long elf_greg_t;
+typedef elf_greg_t elf_gregset_t[ELF_NGREG];
+
+typedef double elf_fpreg_t;
+typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
+
+void loongarch_dump_regs64(u64 *uregs, const struct pt_regs *regs);
+
+#ifdef CONFIG_32BIT
+/*
+ * This is used to ensure we don't load something for the wrong architecture.
+ */
+#define elf_check_arch elf32_check_arch
+
+/*
+ * These are used to set parameters in the core dumps.
+ */
+#define ELF_CLASS ELFCLASS32
+
+#define ELF_CORE_COPY_REGS(dest, regs) \
+ loongarch_dump_regs32((u32 *)&(dest), (regs));
+
+#endif /* CONFIG_32BIT */
+
+#ifdef CONFIG_64BIT
+/*
+ * This is used to ensure we don't load something for the wrong architecture.
+ */
+#define elf_check_arch elf64_check_arch
+
+/*
+ * These are used to set parameters in the core dumps.
+ */
+#define ELF_CLASS ELFCLASS64
+
+#define ELF_CORE_COPY_REGS(dest, regs) \
+ loongarch_dump_regs64((u64 *)&(dest), (regs));
+
+#endif /* CONFIG_64BIT */
+
+/*
+ * These are used to set parameters in the core dumps.
+ */
+#define ELF_DATA ELFDATA2LSB
+#define ELF_ARCH EM_LOONGARCH
+
+#endif /* !defined(ELF_ARCH) */
+
+#define loongarch_elf_check_machine(x) ((x)->e_machine == EM_LOONGARCH)
+
+#define vmcore_elf32_check_arch loongarch_elf_check_machine
+#define vmcore_elf64_check_arch loongarch_elf_check_machine
+
+/*
+ * Return non-zero if HDR identifies an 32bit ELF binary.
+ */
+#define elf32_check_arch(hdr) \
+({ \
+ int __res = 1; \
+ struct elfhdr *__h = (hdr); \
+ \
+ if (!loongarch_elf_check_machine(__h)) \
+ __res = 0; \
+ if (__h->e_ident[EI_CLASS] != ELFCLASS32) \
+ __res = 0; \
+ \
+ __res; \
+})
+
+/*
+ * Return non-zero if HDR identifies an 64bit ELF binary.
+ */
+#define elf64_check_arch(hdr) \
+({ \
+ int __res = 1; \
+ struct elfhdr *__h = (hdr); \
+ \
+ if (!loongarch_elf_check_machine(__h)) \
+ __res = 0; \
+ if (__h->e_ident[EI_CLASS] != ELFCLASS64) \
+ __res = 0; \
+ \
+ __res; \
+})
+
+#ifdef CONFIG_32BIT
+
+#define SET_PERSONALITY2(ex, state) \
+do { \
+ current->thread.vdso = &vdso_info; \
+ \
+ loongarch_set_personality_fcsr(state); \
+ \
+ if (personality(current->personality) != PER_LINUX) \
+ set_personality(PER_LINUX); \
+} while (0)
+
+#endif /* CONFIG_32BIT */
+
+#ifdef CONFIG_64BIT
+
+#define SET_PERSONALITY2(ex, state) \
+do { \
+ unsigned int p; \
+ \
+ clear_thread_flag(TIF_32BIT_REGS); \
+ clear_thread_flag(TIF_32BIT_ADDR); \
+ \
+ current->thread.vdso = &vdso_info; \
+ loongarch_set_personality_fcsr(state); \
+ \
+ p = personality(current->personality); \
+ if (p != PER_LINUX32 && p != PER_LINUX) \
+ set_personality(PER_LINUX); \
+} while (0)
+
+#endif /* CONFIG_64BIT */
+
+#define CORE_DUMP_USE_REGSET
+#define ELF_EXEC_PAGESIZE PAGE_SIZE
+
+/*
+ * This yields a mask that user programs can use to figure out what
+ * instruction set this cpu supports. This could be done in userspace,
+ * but it's not easy, and we've already done it here.
+ */
+
+#define ELF_HWCAP (elf_hwcap)
+extern unsigned int elf_hwcap;
+#include <asm/hwcap.h>
+
+/*
+ * This yields a string that ld.so will use to load implementation
+ * specific libraries for optimization. This is more specific in
+ * intent than poking at uname or /proc/cpuinfo.
+ */
+
+#define ELF_PLATFORM __elf_platform
+extern const char *__elf_platform;
+
+#define ELF_PLAT_INIT(_r, load_addr) do { \
+ _r->regs[1] = _r->regs[2] = _r->regs[3] = _r->regs[4] = 0; \
+ _r->regs[5] = _r->regs[6] = _r->regs[7] = _r->regs[8] = 0; \
+ _r->regs[9] = _r->regs[10] = _r->regs[11] = _r->regs[12] = 0; \
+ _r->regs[13] = _r->regs[14] = _r->regs[15] = _r->regs[16] = 0; \
+ _r->regs[17] = _r->regs[18] = _r->regs[19] = _r->regs[20] = 0; \
+ _r->regs[21] = _r->regs[22] = _r->regs[23] = _r->regs[24] = 0; \
+ _r->regs[25] = _r->regs[26] = _r->regs[27] = _r->regs[28] = 0; \
+ _r->regs[29] = _r->regs[30] = _r->regs[31] = 0; \
+} while (0)
+
+/*
+ * This is the location that an ET_DYN program is loaded if exec'ed. Typical
+ * use of this is to invoke "./ld.so someprog" to test out a new version of
+ * the loader. We need to make sure that it is out of the way of the program
+ * that it will "exec", and that there is sufficient room for the brk.
+ */
+
+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
+
+/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */
+#define ARCH_DLINFO \
+do { \
+ NEW_AUX_ENT(AT_SYSINFO_EHDR, \
+ (unsigned long)current->mm->context.vdso); \
+} while (0)
+
+#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
+struct linux_binprm;
+extern int arch_setup_additional_pages(struct linux_binprm *bprm,
+ int uses_interp);
+
+struct arch_elf_state {
+ int fp_abi;
+ int interp_fp_abi;
+};
+
+#define LOONGARCH_ABI_FP_ANY (0)
+
+#define INIT_ARCH_ELF_STATE { \
+ .fp_abi = LOONGARCH_ABI_FP_ANY, \
+ .interp_fp_abi = LOONGARCH_ABI_FP_ANY, \
+}
+
+#define elf_read_implies_exec(ex, exec_stk) (exec_stk == EXSTACK_DEFAULT)
+
+extern int arch_elf_pt_proc(void *ehdr, void *phdr, struct file *elf,
+ bool is_interp, struct arch_elf_state *state);
+
+extern int arch_check_elf(void *ehdr, bool has_interpreter, void *interp_ehdr,
+ struct arch_elf_state *state);
+
+extern void loongarch_set_personality_fcsr(struct arch_elf_state *state);
+
+#endif /* _ASM_ELF_H */
diff --git a/arch/loongarch/include/asm/entry-common.h b/arch/loongarch/include/asm/entry-common.h
new file mode 100644
index 000000000000..0fe2a098ded9
--- /dev/null
+++ b/arch/loongarch/include/asm/entry-common.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef ARCH_LOONGARCH_ENTRY_COMMON_H
+#define ARCH_LOONGARCH_ENTRY_COMMON_H
+
+#include <linux/sched.h>
+#include <linux/processor.h>
+
+static inline bool on_thread_stack(void)
+{
+ return !(((unsigned long)(current->stack) ^ current_stack_pointer) & ~(THREAD_SIZE - 1));
+}
+
+#endif
diff --git a/arch/loongarch/include/asm/exec.h b/arch/loongarch/include/asm/exec.h
new file mode 100644
index 000000000000..ba0220812ebb
--- /dev/null
+++ b/arch/loongarch/include/asm/exec.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_EXEC_H
+#define _ASM_EXEC_H
+
+extern unsigned long arch_align_stack(unsigned long sp);
+
+#endif /* _ASM_EXEC_H */
diff --git a/arch/loongarch/include/asm/fb.h b/arch/loongarch/include/asm/fb.h
new file mode 100644
index 000000000000..3116bde8772d
--- /dev/null
+++ b/arch/loongarch/include/asm/fb.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_FB_H_
+#define _ASM_FB_H_
+
+#include <linux/fb.h>
+#include <linux/fs.h>
+#include <asm/page.h>
+
+static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
+ unsigned long off)
+{
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+}
+
+static inline int fb_is_primary_device(struct fb_info *info)
+{
+ return 0;
+}
+
+#endif /* _ASM_FB_H_ */
diff --git a/arch/loongarch/include/asm/fixmap.h b/arch/loongarch/include/asm/fixmap.h
new file mode 100644
index 000000000000..b3541dfa2013
--- /dev/null
+++ b/arch/loongarch/include/asm/fixmap.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * fixmap.h: compile-time virtual memory allocation
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#ifndef _ASM_FIXMAP_H
+#define _ASM_FIXMAP_H
+
+#define NR_FIX_BTMAPS 64
+
+#endif
diff --git a/arch/loongarch/include/asm/fpregdef.h b/arch/loongarch/include/asm/fpregdef.h
new file mode 100644
index 000000000000..adb16e4b43b0
--- /dev/null
+++ b/arch/loongarch/include/asm/fpregdef.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Definitions for the FPU register names
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_FPREGDEF_H
+#define _ASM_FPREGDEF_H
+
+#define fa0 $f0 /* argument registers, fa0/fa1 reused as fv0/fv1 for return value */
+#define fa1 $f1
+#define fa2 $f2
+#define fa3 $f3
+#define fa4 $f4
+#define fa5 $f5
+#define fa6 $f6
+#define fa7 $f7
+#define ft0 $f8 /* caller saved */
+#define ft1 $f9
+#define ft2 $f10
+#define ft3 $f11
+#define ft4 $f12
+#define ft5 $f13
+#define ft6 $f14
+#define ft7 $f15
+#define ft8 $f16
+#define ft9 $f17
+#define ft10 $f18
+#define ft11 $f19
+#define ft12 $f20
+#define ft13 $f21
+#define ft14 $f22
+#define ft15 $f23
+#define fs0 $f24 /* callee saved */
+#define fs1 $f25
+#define fs2 $f26
+#define fs3 $f27
+#define fs4 $f28
+#define fs5 $f29
+#define fs6 $f30
+#define fs7 $f31
+
+/*
+ * Current binutils expects *GPRs* at FCSR position for the FCSR
+ * operation instructions, so define aliases for those used.
+ */
+#define fcsr0 $r0
+#define fcsr1 $r1
+#define fcsr2 $r2
+#define fcsr3 $r3
+#define vcsr16 $r16
+
+#endif /* _ASM_FPREGDEF_H */
diff --git a/arch/loongarch/include/asm/fpu.h b/arch/loongarch/include/asm/fpu.h
new file mode 100644
index 000000000000..358b254d9c1d
--- /dev/null
+++ b/arch/loongarch/include/asm/fpu.h
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Author: Huacai Chen <chenhuacai@loongson.cn>
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_FPU_H
+#define _ASM_FPU_H
+
+#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
+#include <linux/ptrace.h>
+#include <linux/thread_info.h>
+#include <linux/bitops.h>
+
+#include <asm/cpu.h>
+#include <asm/cpu-features.h>
+#include <asm/current.h>
+#include <asm/loongarch.h>
+#include <asm/processor.h>
+#include <asm/ptrace.h>
+
+struct sigcontext;
+
+extern void _init_fpu(unsigned int);
+extern void _save_fp(struct loongarch_fpu *);
+extern void _restore_fp(struct loongarch_fpu *);
+
+/*
+ * Mask the FCSR Cause bits according to the Enable bits, observing
+ * that Unimplemented is always enabled.
+ */
+static inline unsigned long mask_fcsr_x(unsigned long fcsr)
+{
+ return fcsr & ((fcsr & FPU_CSR_ALL_E) <<
+ (ffs(FPU_CSR_ALL_X) - ffs(FPU_CSR_ALL_E)));
+}
+
+static inline int is_fp_enabled(void)
+{
+ return (csr_read32(LOONGARCH_CSR_EUEN) & CSR_EUEN_FPEN) ?
+ 1 : 0;
+}
+
+#define enable_fpu() set_csr_euen(CSR_EUEN_FPEN)
+
+#define disable_fpu() clear_csr_euen(CSR_EUEN_FPEN)
+
+#define clear_fpu_owner() clear_thread_flag(TIF_USEDFPU)
+
+static inline int is_fpu_owner(void)
+{
+ return test_thread_flag(TIF_USEDFPU);
+}
+
+static inline void __own_fpu(void)
+{
+ enable_fpu();
+ set_thread_flag(TIF_USEDFPU);
+ KSTK_EUEN(current) |= CSR_EUEN_FPEN;
+}
+
+static inline void own_fpu_inatomic(int restore)
+{
+ if (cpu_has_fpu && !is_fpu_owner()) {
+ __own_fpu();
+ if (restore)
+ _restore_fp(&current->thread.fpu);
+ }
+}
+
+static inline void own_fpu(int restore)
+{
+ preempt_disable();
+ own_fpu_inatomic(restore);
+ preempt_enable();
+}
+
+static inline void lose_fpu_inatomic(int save, struct task_struct *tsk)
+{
+ if (is_fpu_owner()) {
+ if (save)
+ _save_fp(&tsk->thread.fpu);
+ disable_fpu();
+ clear_tsk_thread_flag(tsk, TIF_USEDFPU);
+ }
+ KSTK_EUEN(tsk) &= ~(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN);
+}
+
+static inline void lose_fpu(int save)
+{
+ preempt_disable();
+ lose_fpu_inatomic(save, current);
+ preempt_enable();
+}
+
+static inline void init_fpu(void)
+{
+ unsigned int fcsr = current->thread.fpu.fcsr;
+
+ __own_fpu();
+ _init_fpu(fcsr);
+ set_used_math();
+}
+
+static inline void save_fp(struct task_struct *tsk)
+{
+ if (cpu_has_fpu)
+ _save_fp(&tsk->thread.fpu);
+}
+
+static inline void restore_fp(struct task_struct *tsk)
+{
+ if (cpu_has_fpu)
+ _restore_fp(&tsk->thread.fpu);
+}
+
+static inline union fpureg *get_fpu_regs(struct task_struct *tsk)
+{
+ if (tsk == current) {
+ preempt_disable();
+ if (is_fpu_owner())
+ _save_fp(&current->thread.fpu);
+ preempt_enable();
+ }
+
+ return tsk->thread.fpu.fpr;
+}
+
+#endif /* _ASM_FPU_H */
diff --git a/arch/loongarch/include/asm/futex.h b/arch/loongarch/include/asm/futex.h
new file mode 100644
index 000000000000..9de8231694ec
--- /dev/null
+++ b/arch/loongarch/include/asm/futex.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_FUTEX_H
+#define _ASM_FUTEX_H
+
+#include <linux/futex.h>
+#include <linux/uaccess.h>
+#include <asm/barrier.h>
+#include <asm/compiler.h>
+#include <asm/errno.h>
+
+#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
+{ \
+ __asm__ __volatile__( \
+ "1: ll.w %1, %4 # __futex_atomic_op\n" \
+ " " insn " \n" \
+ "2: sc.w $t0, %2 \n" \
+ " beq $t0, $zero, 1b \n" \
+ "3: \n" \
+ " .section .fixup,\"ax\" \n" \
+ "4: li.w %0, %6 \n" \
+ " b 3b \n" \
+ " .previous \n" \
+ " .section __ex_table,\"a\" \n" \
+ " "__UA_ADDR "\t1b, 4b \n" \
+ " "__UA_ADDR "\t2b, 4b \n" \
+ " .previous \n" \
+ : "=r" (ret), "=&r" (oldval), \
+ "=ZC" (*uaddr) \
+ : "0" (0), "ZC" (*uaddr), "Jr" (oparg), \
+ "i" (-EFAULT) \
+ : "memory", "t0"); \
+}
+
+static inline int
+arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
+{
+ int oldval = 0, ret = 0;
+
+ pagefault_disable();
+
+ switch (op) {
+ case FUTEX_OP_SET:
+ __futex_atomic_op("move $t0, %z5", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ADD:
+ __futex_atomic_op("add.w $t0, %1, %z5", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_OR:
+ __futex_atomic_op("or $t0, %1, %z5", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ANDN:
+ __futex_atomic_op("and $t0, %1, %z5", ret, oldval, uaddr, ~oparg);
+ break;
+ case FUTEX_OP_XOR:
+ __futex_atomic_op("xor $t0, %1, %z5", ret, oldval, uaddr, oparg);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+
+ pagefault_enable();
+
+ if (!ret)
+ *oval = oldval;
+
+ return ret;
+}
+
+static inline int
+futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newval)
+{
+ int ret = 0;
+ u32 val = 0;
+
+ if (!access_ok(uaddr, sizeof(u32)))
+ return -EFAULT;
+
+ __asm__ __volatile__(
+ "# futex_atomic_cmpxchg_inatomic \n"
+ "1: ll.w %1, %3 \n"
+ " bne %1, %z4, 3f \n"
+ " or $t0, %z5, $zero \n"
+ "2: sc.w $t0, %2 \n"
+ " beq $zero, $t0, 1b \n"
+ "3: \n"
+ __WEAK_LLSC_MB
+ " .section .fixup,\"ax\" \n"
+ "4: li.d %0, %6 \n"
+ " b 3b \n"
+ " .previous \n"
+ " .section __ex_table,\"a\" \n"
+ " "__UA_ADDR "\t1b, 4b \n"
+ " "__UA_ADDR "\t2b, 4b \n"
+ " .previous \n"
+ : "+r" (ret), "=&r" (val), "=" GCC_OFF_SMALL_ASM() (*uaddr)
+ : GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval),
+ "i" (-EFAULT)
+ : "memory", "t0");
+
+ *uval = val;
+
+ return ret;
+}
+
+#endif /* _ASM_FUTEX_H */
diff --git a/arch/loongarch/include/asm/hardirq.h b/arch/loongarch/include/asm/hardirq.h
new file mode 100644
index 000000000000..befe8184aa08
--- /dev/null
+++ b/arch/loongarch/include/asm/hardirq.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_HARDIRQ_H
+#define _ASM_HARDIRQ_H
+
+#include <linux/cache.h>
+#include <linux/threads.h>
+#include <linux/irq.h>
+
+extern void ack_bad_irq(unsigned int irq);
+#define ack_bad_irq ack_bad_irq
+
+#define NR_IPI 2
+
+typedef struct {
+ unsigned int ipi_irqs[NR_IPI];
+ unsigned int __softirq_pending;
+} ____cacheline_aligned irq_cpustat_t;
+
+DECLARE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat);
+
+#define __ARCH_IRQ_STAT
+
+#endif /* _ASM_HARDIRQ_H */
diff --git a/arch/loongarch/include/asm/hugetlb.h b/arch/loongarch/include/asm/hugetlb.h
new file mode 100644
index 000000000000..aa44b3fe43dd
--- /dev/null
+++ b/arch/loongarch/include/asm/hugetlb.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#ifndef __ASM_HUGETLB_H
+#define __ASM_HUGETLB_H
+
+#include <asm/page.h>
+
+uint64_t pmd_to_entrylo(unsigned long pmd_val);
+
+#define __HAVE_ARCH_PREPARE_HUGEPAGE_RANGE
+static inline int prepare_hugepage_range(struct file *file,
+ unsigned long addr,
+ unsigned long len)
+{
+ unsigned long task_size = STACK_TOP;
+ struct hstate *h = hstate_file(file);
+
+ if (len & ~huge_page_mask(h))
+ return -EINVAL;
+ if (addr & ~huge_page_mask(h))
+ return -EINVAL;
+ if (len > task_size)
+ return -ENOMEM;
+ if (task_size - len < addr)
+ return -EINVAL;
+ return 0;
+}
+
+#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
+static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ pte_t clear;
+ pte_t pte = *ptep;
+
+ pte_val(clear) = (unsigned long)invalid_pte_table;
+ set_pte_at(mm, addr, ptep, clear);
+ return pte;
+}
+
+#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
+static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
+{
+ pte_t pte;
+
+ pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
+ flush_tlb_page(vma, addr);
+ return pte;
+}
+
+#define __HAVE_ARCH_HUGE_PTE_NONE
+static inline int huge_pte_none(pte_t pte)
+{
+ unsigned long val = pte_val(pte) & ~_PAGE_GLOBAL;
+ return !val || (val == (unsigned long)invalid_pte_table);
+}
+
+#define __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS
+static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+ unsigned long addr,
+ pte_t *ptep, pte_t pte,
+ int dirty)
+{
+ int changed = !pte_same(*ptep, pte);
+
+ if (changed) {
+ set_pte_at(vma->vm_mm, addr, ptep, pte);
+ /*
+ * There could be some standard sized pages in there,
+ * get them all.
+ */
+ flush_tlb_range(vma, addr, addr + HPAGE_SIZE);
+ }
+ return changed;
+}
+
+#include <asm-generic/hugetlb.h>
+
+#endif /* __ASM_HUGETLB_H */
diff --git a/arch/loongarch/include/asm/hw_irq.h b/arch/loongarch/include/asm/hw_irq.h
new file mode 100644
index 000000000000..af4f4e8fbd85
--- /dev/null
+++ b/arch/loongarch/include/asm/hw_irq.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef __ASM_HW_IRQ_H
+#define __ASM_HW_IRQ_H
+
+#include <linux/atomic.h>
+
+extern atomic_t irq_err_count;
+
+/*
+ * interrupt-retrigger: NOP for now. This may not be appropriate for all
+ * machines, we'll see ...
+ */
+
+#endif /* __ASM_HW_IRQ_H */
diff --git a/arch/loongarch/include/asm/idle.h b/arch/loongarch/include/asm/idle.h
new file mode 100644
index 000000000000..f7f2b7dbf958
--- /dev/null
+++ b/arch/loongarch/include/asm/idle.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_IDLE_H
+#define __ASM_IDLE_H
+
+#include <linux/linkage.h>
+
+extern asmlinkage void __arch_cpu_idle(void);
+
+#endif /* __ASM_IDLE_H */
diff --git a/arch/loongarch/include/asm/inst.h b/arch/loongarch/include/asm/inst.h
new file mode 100644
index 000000000000..575d1bb66ffb
--- /dev/null
+++ b/arch/loongarch/include/asm/inst.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_INST_H
+#define _ASM_INST_H
+
+#include <linux/types.h>
+#include <asm/asm.h>
+
+#define ADDR_IMMMASK_LU52ID 0xFFF0000000000000
+#define ADDR_IMMMASK_LU32ID 0x000FFFFF00000000
+#define ADDR_IMMMASK_ADDU16ID 0x00000000FFFF0000
+
+#define ADDR_IMMSHIFT_LU52ID 52
+#define ADDR_IMMSHIFT_LU32ID 32
+#define ADDR_IMMSHIFT_ADDU16ID 16
+
+#define ADDR_IMM(addr, INSN) ((addr & ADDR_IMMMASK_##INSN) >> ADDR_IMMSHIFT_##INSN)
+
+enum reg1i20_op {
+ lu12iw_op = 0x0a,
+ lu32id_op = 0x0b,
+};
+
+enum reg2i12_op {
+ lu52id_op = 0x0c,
+};
+
+enum reg2i16_op {
+ jirl_op = 0x13,
+};
+
+struct reg0i26_format {
+ unsigned int immediate_h : 10;
+ unsigned int immediate_l : 16;
+ unsigned int opcode : 6;
+};
+
+struct reg1i20_format {
+ unsigned int rd : 5;
+ unsigned int immediate : 20;
+ unsigned int opcode : 7;
+};
+
+struct reg1i21_format {
+ unsigned int immediate_h : 5;
+ unsigned int rj : 5;
+ unsigned int immediate_l : 16;
+ unsigned int opcode : 6;
+};
+
+struct reg2i12_format {
+ unsigned int rd : 5;
+ unsigned int rj : 5;
+ unsigned int immediate : 12;
+ unsigned int opcode : 10;
+};
+
+struct reg2i16_format {
+ unsigned int rd : 5;
+ unsigned int rj : 5;
+ unsigned int immediate : 16;
+ unsigned int opcode : 6;
+};
+
+union loongarch_instruction {
+ unsigned int word;
+ struct reg0i26_format reg0i26_format;
+ struct reg1i20_format reg1i20_format;
+ struct reg1i21_format reg1i21_format;
+ struct reg2i12_format reg2i12_format;
+ struct reg2i16_format reg2i16_format;
+};
+
+#define LOONGARCH_INSN_SIZE sizeof(union loongarch_instruction)
+
+enum loongarch_gpr {
+ LOONGARCH_GPR_ZERO = 0,
+ LOONGARCH_GPR_RA = 1,
+ LOONGARCH_GPR_TP = 2,
+ LOONGARCH_GPR_SP = 3,
+ LOONGARCH_GPR_A0 = 4, /* Reused as V0 for return value */
+ LOONGARCH_GPR_A1, /* Reused as V1 for return value */
+ LOONGARCH_GPR_A2,
+ LOONGARCH_GPR_A3,
+ LOONGARCH_GPR_A4,
+ LOONGARCH_GPR_A5,
+ LOONGARCH_GPR_A6,
+ LOONGARCH_GPR_A7,
+ LOONGARCH_GPR_T0 = 12,
+ LOONGARCH_GPR_T1,
+ LOONGARCH_GPR_T2,
+ LOONGARCH_GPR_T3,
+ LOONGARCH_GPR_T4,
+ LOONGARCH_GPR_T5,
+ LOONGARCH_GPR_T6,
+ LOONGARCH_GPR_T7,
+ LOONGARCH_GPR_T8,
+ LOONGARCH_GPR_FP = 22,
+ LOONGARCH_GPR_S0 = 23,
+ LOONGARCH_GPR_S1,
+ LOONGARCH_GPR_S2,
+ LOONGARCH_GPR_S3,
+ LOONGARCH_GPR_S4,
+ LOONGARCH_GPR_S5,
+ LOONGARCH_GPR_S6,
+ LOONGARCH_GPR_S7,
+ LOONGARCH_GPR_S8,
+ LOONGARCH_GPR_MAX
+};
+
+u32 larch_insn_gen_lu32id(enum loongarch_gpr rd, int imm);
+u32 larch_insn_gen_lu52id(enum loongarch_gpr rd, enum loongarch_gpr rj, int imm);
+u32 larch_insn_gen_jirl(enum loongarch_gpr rd, enum loongarch_gpr rj, unsigned long pc, unsigned long dest);
+
+#endif /* _ASM_INST_H */
diff --git a/arch/loongarch/include/asm/io.h b/arch/loongarch/include/asm/io.h
new file mode 100644
index 000000000000..884599739b36
--- /dev/null
+++ b/arch/loongarch/include/asm/io.h
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_IO_H
+#define _ASM_IO_H
+
+#define ARCH_HAS_IOREMAP_WC
+
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#include <asm/addrspace.h>
+#include <asm/bug.h>
+#include <asm/byteorder.h>
+#include <asm/cpu.h>
+#include <asm/page.h>
+#include <asm/pgtable-bits.h>
+#include <asm/string.h>
+
+/*
+ * On LoongArch, I/O ports mappring is following:
+ *
+ * | .... |
+ * |-----------------------|
+ * | pci io ports(64K~32M) |
+ * |-----------------------|
+ * | isa io ports(0 ~16K) |
+ * PCI_IOBASE ->|-----------------------|
+ * | .... |
+ */
+#define PCI_IOBASE ((void __iomem *)(vm_map_base + (2 * PAGE_SIZE)))
+#define PCI_IOSIZE SZ_32M
+#define ISA_IOSIZE SZ_16K
+#define IO_SPACE_LIMIT (PCI_IOSIZE - 1)
+
+/*
+ * Change "struct page" to physical address.
+ */
+#define page_to_phys(page) ((phys_addr_t)page_to_pfn(page) << PAGE_SHIFT)
+
+extern void __init __iomem *early_ioremap(u64 phys_addr, unsigned long size);
+extern void __init early_iounmap(void __iomem *addr, unsigned long size);
+
+#define early_memremap early_ioremap
+#define early_memunmap early_iounmap
+
+static inline void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
+ unsigned long prot_val)
+{
+ if (prot_val == _CACHE_CC)
+ return (void __iomem *)(unsigned long)(CACHE_BASE + offset);
+ else
+ return (void __iomem *)(unsigned long)(UNCACHE_BASE + offset);
+}
+
+/*
+ * ioremap - map bus memory into CPU space
+ * @offset: bus address of the memory
+ * @size: size of the resource to map
+ *
+ * ioremap performs a platform specific sequence of operations to
+ * make bus memory CPU accessible via the readb/readw/readl/writeb/
+ * writew/writel functions and the other mmio helpers. The returned
+ * address is not guaranteed to be usable directly as a virtual
+ * address.
+ */
+#define ioremap(offset, size) \
+ ioremap_prot((offset), (size), _CACHE_SUC)
+
+/*
+ * ioremap_wc - map bus memory into CPU space
+ * @offset: bus address of the memory
+ * @size: size of the resource to map
+ *
+ * ioremap_wc performs a platform specific sequence of operations to
+ * make bus memory CPU accessible via the readb/readw/readl/writeb/
+ * writew/writel functions and the other mmio helpers. The returned
+ * address is not guaranteed to be usable directly as a virtual
+ * address.
+ *
+ * This version of ioremap ensures that the memory is marked uncachable
+ * but accelerated by means of write-combining feature. It is specifically
+ * useful for PCIe prefetchable windows, which may vastly improve a
+ * communications performance. If it was determined on boot stage, what
+ * CPU CCA doesn't support WUC, the method shall fall-back to the
+ * _CACHE_SUC option (see cpu_probe() method).
+ */
+#define ioremap_wc(offset, size) \
+ ioremap_prot((offset), (size), _CACHE_WUC)
+
+/*
+ * ioremap_cache - map bus memory into CPU space
+ * @offset: bus address of the memory
+ * @size: size of the resource to map
+ *
+ * ioremap_cache performs a platform specific sequence of operations to
+ * make bus memory CPU accessible via the readb/readw/readl/writeb/
+ * writew/writel functions and the other mmio helpers. The returned
+ * address is not guaranteed to be usable directly as a virtual
+ * address.
+ *
+ * This version of ioremap ensures that the memory is marked cachable by
+ * the CPU. Also enables full write-combining. Useful for some
+ * memory-like regions on I/O busses.
+ */
+#define ioremap_cache(offset, size) \
+ ioremap_prot((offset), (size), _CACHE_CC)
+
+static inline void iounmap(const volatile void __iomem *addr)
+{
+}
+
+#define mmiowb() asm volatile ("dbar 0" ::: "memory")
+
+/*
+ * String version of I/O memory access operations.
+ */
+extern void __memset_io(volatile void __iomem *dst, int c, size_t count);
+extern void __memcpy_toio(volatile void __iomem *to, const void *from, size_t count);
+extern void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t count);
+#define memset_io(c, v, l) __memset_io((c), (v), (l))
+#define memcpy_fromio(a, c, l) __memcpy_fromio((a), (c), (l))
+#define memcpy_toio(c, a, l) __memcpy_toio((c), (a), (l))
+
+#include <asm-generic/io.h>
+
+#endif /* _ASM_IO_H */
diff --git a/arch/loongarch/include/asm/irq.h b/arch/loongarch/include/asm/irq.h
new file mode 100644
index 000000000000..ace3ea6da72e
--- /dev/null
+++ b/arch/loongarch/include/asm/irq.h
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_IRQ_H
+#define _ASM_IRQ_H
+
+#include <linux/irqdomain.h>
+#include <linux/irqreturn.h>
+
+#define IRQ_STACK_SIZE THREAD_SIZE
+#define IRQ_STACK_START (IRQ_STACK_SIZE - 16)
+
+DECLARE_PER_CPU(unsigned long, irq_stack);
+
+/*
+ * The highest address on the IRQ stack contains a dummy frame which is
+ * structured as follows:
+ *
+ * top ------------
+ * | task sp | <- irq_stack[cpu] + IRQ_STACK_START
+ * ------------
+ * | | <- First frame of IRQ context
+ * ------------
+ *
+ * task sp holds a copy of the task stack pointer where the struct pt_regs
+ * from exception entry can be found.
+ */
+
+static inline bool on_irq_stack(int cpu, unsigned long sp)
+{
+ unsigned long low = per_cpu(irq_stack, cpu);
+ unsigned long high = low + IRQ_STACK_SIZE;
+
+ return (low <= sp && sp <= high);
+}
+
+int get_ipi_irq(void);
+int get_pmc_irq(void);
+int get_timer_irq(void);
+void spurious_interrupt(void);
+
+#define NR_IRQS_LEGACY 16
+
+#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
+void arch_trigger_cpumask_backtrace(const struct cpumask *mask, bool exclude_self);
+
+#define MAX_IO_PICS 2
+#define NR_IRQS (64 + (256 * MAX_IO_PICS))
+
+#define CORES_PER_EIO_NODE 4
+
+#define LOONGSON_CPU_UART0_VEC 10 /* CPU UART0 */
+#define LOONGSON_CPU_THSENS_VEC 14 /* CPU Thsens */
+#define LOONGSON_CPU_HT0_VEC 16 /* CPU HT0 irq vector base number */
+#define LOONGSON_CPU_HT1_VEC 24 /* CPU HT1 irq vector base number */
+
+/* IRQ number definitions */
+#define LOONGSON_LPC_IRQ_BASE 0
+#define LOONGSON_LPC_LAST_IRQ (LOONGSON_LPC_IRQ_BASE + 15)
+
+#define LOONGSON_CPU_IRQ_BASE 16
+#define LOONGSON_CPU_LAST_IRQ (LOONGSON_CPU_IRQ_BASE + 14)
+
+#define LOONGSON_PCH_IRQ_BASE 64
+#define LOONGSON_PCH_ACPI_IRQ (LOONGSON_PCH_IRQ_BASE + 47)
+#define LOONGSON_PCH_LAST_IRQ (LOONGSON_PCH_IRQ_BASE + 64 - 1)
+
+#define LOONGSON_MSI_IRQ_BASE (LOONGSON_PCH_IRQ_BASE + 64)
+#define LOONGSON_MSI_LAST_IRQ (LOONGSON_PCH_IRQ_BASE + 256 - 1)
+
+#define GSI_MIN_LPC_IRQ LOONGSON_LPC_IRQ_BASE
+#define GSI_MAX_LPC_IRQ (LOONGSON_LPC_IRQ_BASE + 16 - 1)
+#define GSI_MIN_CPU_IRQ LOONGSON_CPU_IRQ_BASE
+#define GSI_MAX_CPU_IRQ (LOONGSON_CPU_IRQ_BASE + 48 - 1)
+#define GSI_MIN_PCH_IRQ LOONGSON_PCH_IRQ_BASE
+#define GSI_MAX_PCH_IRQ (LOONGSON_PCH_IRQ_BASE + 256 - 1)
+
+extern int find_pch_pic(u32 gsi);
+extern int eiointc_get_node(int id);
+
+static inline void eiointc_enable(void)
+{
+ uint64_t misc;
+
+ misc = iocsr_read64(LOONGARCH_IOCSR_MISC_FUNC);
+ misc |= IOCSR_MISC_FUNC_EXT_IOI_EN;
+ iocsr_write64(misc, LOONGARCH_IOCSR_MISC_FUNC);
+}
+
+struct acpi_madt_lio_pic;
+struct acpi_madt_eio_pic;
+struct acpi_madt_ht_pic;
+struct acpi_madt_bio_pic;
+struct acpi_madt_msi_pic;
+struct acpi_madt_lpc_pic;
+
+struct irq_domain *loongarch_cpu_irq_init(void);
+
+struct irq_domain *liointc_acpi_init(struct irq_domain *parent,
+ struct acpi_madt_lio_pic *acpi_liointc);
+struct irq_domain *eiointc_acpi_init(struct irq_domain *parent,
+ struct acpi_madt_eio_pic *acpi_eiointc);
+
+struct irq_domain *htvec_acpi_init(struct irq_domain *parent,
+ struct acpi_madt_ht_pic *acpi_htvec);
+struct irq_domain *pch_lpc_acpi_init(struct irq_domain *parent,
+ struct acpi_madt_lpc_pic *acpi_pchlpc);
+struct irq_domain *pch_msi_acpi_init(struct irq_domain *parent,
+ struct acpi_madt_msi_pic *acpi_pchmsi);
+struct irq_domain *pch_pic_acpi_init(struct irq_domain *parent,
+ struct acpi_madt_bio_pic *acpi_pchpic);
+
+extern struct acpi_madt_lio_pic *acpi_liointc;
+extern struct acpi_madt_eio_pic *acpi_eiointc[MAX_IO_PICS];
+
+extern struct acpi_madt_ht_pic *acpi_htintc;
+extern struct acpi_madt_lpc_pic *acpi_pchlpc;
+extern struct acpi_madt_msi_pic *acpi_pchmsi[MAX_IO_PICS];
+extern struct acpi_madt_bio_pic *acpi_pchpic[MAX_IO_PICS];
+
+extern struct irq_domain *cpu_domain;
+extern struct irq_domain *liointc_domain;
+extern struct irq_domain *pch_lpc_domain;
+extern struct irq_domain *pch_msi_domain[MAX_IO_PICS];
+extern struct irq_domain *pch_pic_domain[MAX_IO_PICS];
+
+extern irqreturn_t loongson3_ipi_interrupt(int irq, void *dev);
+
+#include <asm-generic/irq.h>
+
+#endif /* _ASM_IRQ_H */
diff --git a/arch/loongarch/include/asm/irq_regs.h b/arch/loongarch/include/asm/irq_regs.h
new file mode 100644
index 000000000000..3d62d815bf6b
--- /dev/null
+++ b/arch/loongarch/include/asm/irq_regs.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef __ASM_IRQ_REGS_H
+#define __ASM_IRQ_REGS_H
+
+#define ARCH_HAS_OWN_IRQ_REGS
+
+#include <linux/thread_info.h>
+
+static inline struct pt_regs *get_irq_regs(void)
+{
+ return current_thread_info()->regs;
+}
+
+static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs)
+{
+ struct pt_regs *old_regs;
+
+ old_regs = get_irq_regs();
+ current_thread_info()->regs = new_regs;
+
+ return old_regs;
+}
+
+#endif /* __ASM_IRQ_REGS_H */
diff --git a/arch/loongarch/include/asm/irqflags.h b/arch/loongarch/include/asm/irqflags.h
new file mode 100644
index 000000000000..52121cd791fe
--- /dev/null
+++ b/arch/loongarch/include/asm/irqflags.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_IRQFLAGS_H
+#define _ASM_IRQFLAGS_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/compiler.h>
+#include <linux/stringify.h>
+#include <asm/compiler.h>
+#include <asm/loongarch.h>
+
+static inline void arch_local_irq_enable(void)
+{
+ u32 flags = CSR_CRMD_IE;
+ __asm__ __volatile__(
+ "csrxchg %[val], %[mask], %[reg]\n\t"
+ : [val] "+r" (flags)
+ : [mask] "r" (CSR_CRMD_IE), [reg] "i" (LOONGARCH_CSR_CRMD)
+ : "memory");
+}
+
+static inline void arch_local_irq_disable(void)
+{
+ u32 flags = 0;
+ __asm__ __volatile__(
+ "csrxchg %[val], %[mask], %[reg]\n\t"
+ : [val] "+r" (flags)
+ : [mask] "r" (CSR_CRMD_IE), [reg] "i" (LOONGARCH_CSR_CRMD)
+ : "memory");
+}
+
+static inline unsigned long arch_local_irq_save(void)
+{
+ u32 flags = 0;
+ __asm__ __volatile__(
+ "csrxchg %[val], %[mask], %[reg]\n\t"
+ : [val] "+r" (flags)
+ : [mask] "r" (CSR_CRMD_IE), [reg] "i" (LOONGARCH_CSR_CRMD)
+ : "memory");
+ return flags;
+}
+
+static inline void arch_local_irq_restore(unsigned long flags)
+{
+ __asm__ __volatile__(
+ "csrxchg %[val], %[mask], %[reg]\n\t"
+ : [val] "+r" (flags)
+ : [mask] "r" (CSR_CRMD_IE), [reg] "i" (LOONGARCH_CSR_CRMD)
+ : "memory");
+}
+
+static inline unsigned long arch_local_save_flags(void)
+{
+ u32 flags;
+ __asm__ __volatile__(
+ "csrrd %[val], %[reg]\n\t"
+ : [val] "=r" (flags)
+ : [reg] "i" (LOONGARCH_CSR_CRMD)
+ : "memory");
+ return flags;
+}
+
+static inline int arch_irqs_disabled_flags(unsigned long flags)
+{
+ return !(flags & CSR_CRMD_IE);
+}
+
+static inline int arch_irqs_disabled(void)
+{
+ return arch_irqs_disabled_flags(arch_local_save_flags());
+}
+
+#endif /* #ifndef __ASSEMBLY__ */
+
+#endif /* _ASM_IRQFLAGS_H */
diff --git a/arch/loongarch/include/asm/kdebug.h b/arch/loongarch/include/asm/kdebug.h
new file mode 100644
index 000000000000..d721b4b82fae
--- /dev/null
+++ b/arch/loongarch/include/asm/kdebug.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_LOONGARCH_KDEBUG_H
+#define _ASM_LOONGARCH_KDEBUG_H
+
+#include <linux/notifier.h>
+
+enum die_val {
+ DIE_OOPS = 1,
+ DIE_RI,
+ DIE_FP,
+ DIE_SIMD,
+ DIE_TRAP,
+ DIE_PAGE_FAULT,
+ DIE_BREAK,
+ DIE_SSTEPBP,
+ DIE_UPROBE,
+ DIE_UPROBE_XOL,
+};
+
+#endif /* _ASM_LOONGARCH_KDEBUG_H */
diff --git a/arch/loongarch/include/asm/linkage.h b/arch/loongarch/include/asm/linkage.h
new file mode 100644
index 000000000000..81b0c4cfbf4f
--- /dev/null
+++ b/arch/loongarch/include/asm/linkage.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_LINKAGE_H
+#define __ASM_LINKAGE_H
+
+#define __ALIGN .align 2
+#define __ALIGN_STR __stringify(__ALIGN)
+
+#define SYM_FUNC_START(name) \
+ SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN) \
+ .cfi_startproc;
+
+#define SYM_FUNC_START_NOALIGN(name) \
+ SYM_START(name, SYM_L_GLOBAL, SYM_A_NONE) \
+ .cfi_startproc;
+
+#define SYM_FUNC_START_LOCAL(name) \
+ SYM_START(name, SYM_L_LOCAL, SYM_A_ALIGN) \
+ .cfi_startproc;
+
+#define SYM_FUNC_START_LOCAL_NOALIGN(name) \
+ SYM_START(name, SYM_L_LOCAL, SYM_A_NONE) \
+ .cfi_startproc;
+
+#define SYM_FUNC_START_WEAK(name) \
+ SYM_START(name, SYM_L_WEAK, SYM_A_ALIGN) \
+ .cfi_startproc;
+
+#define SYM_FUNC_START_WEAK_NOALIGN(name) \
+ SYM_START(name, SYM_L_WEAK, SYM_A_NONE) \
+ .cfi_startproc;
+
+#define SYM_FUNC_END(name) \
+ .cfi_endproc; \
+ SYM_END(name, SYM_T_FUNC)
+
+#endif
diff --git a/arch/loongarch/include/asm/local.h b/arch/loongarch/include/asm/local.h
new file mode 100644
index 000000000000..2052a2267337
--- /dev/null
+++ b/arch/loongarch/include/asm/local.h
@@ -0,0 +1,138 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ARCH_LOONGARCH_LOCAL_H
+#define _ARCH_LOONGARCH_LOCAL_H
+
+#include <linux/percpu.h>
+#include <linux/bitops.h>
+#include <linux/atomic.h>
+#include <asm/cmpxchg.h>
+#include <asm/compiler.h>
+
+typedef struct {
+ atomic_long_t a;
+} local_t;
+
+#define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
+
+#define local_read(l) atomic_long_read(&(l)->a)
+#define local_set(l, i) atomic_long_set(&(l)->a, (i))
+
+#define local_add(i, l) atomic_long_add((i), (&(l)->a))
+#define local_sub(i, l) atomic_long_sub((i), (&(l)->a))
+#define local_inc(l) atomic_long_inc(&(l)->a)
+#define local_dec(l) atomic_long_dec(&(l)->a)
+
+/*
+ * Same as above, but return the result value
+ */
+static inline long local_add_return(long i, local_t *l)
+{
+ unsigned long result;
+
+ __asm__ __volatile__(
+ " " __AMADD " %1, %2, %0 \n"
+ : "+ZB" (l->a.counter), "=&r" (result)
+ : "r" (i)
+ : "memory");
+ result = result + i;
+
+ return result;
+}
+
+static inline long local_sub_return(long i, local_t *l)
+{
+ unsigned long result;
+
+ __asm__ __volatile__(
+ " " __AMADD "%1, %2, %0 \n"
+ : "+ZB" (l->a.counter), "=&r" (result)
+ : "r" (-i)
+ : "memory");
+
+ result = result - i;
+
+ return result;
+}
+
+#define local_cmpxchg(l, o, n) \
+ ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
+#define local_xchg(l, n) (atomic_long_xchg((&(l)->a), (n)))
+
+/**
+ * local_add_unless - add unless the number is a given value
+ * @l: pointer of type local_t
+ * @a: the amount to add to l...
+ * @u: ...unless l is equal to u.
+ *
+ * Atomically adds @a to @l, so long as it was not @u.
+ * Returns non-zero if @l was not @u, and zero otherwise.
+ */
+#define local_add_unless(l, a, u) \
+({ \
+ long c, old; \
+ c = local_read(l); \
+ while (c != (u) && (old = local_cmpxchg((l), c, c + (a))) != c) \
+ c = old; \
+ c != (u); \
+})
+#define local_inc_not_zero(l) local_add_unless((l), 1, 0)
+
+#define local_dec_return(l) local_sub_return(1, (l))
+#define local_inc_return(l) local_add_return(1, (l))
+
+/*
+ * local_sub_and_test - subtract value from variable and test result
+ * @i: integer value to subtract
+ * @l: pointer of type local_t
+ *
+ * Atomically subtracts @i from @l and returns
+ * true if the result is zero, or false for all
+ * other cases.
+ */
+#define local_sub_and_test(i, l) (local_sub_return((i), (l)) == 0)
+
+/*
+ * local_inc_and_test - increment and test
+ * @l: pointer of type local_t
+ *
+ * Atomically increments @l by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+#define local_inc_and_test(l) (local_inc_return(l) == 0)
+
+/*
+ * local_dec_and_test - decrement by 1 and test
+ * @l: pointer of type local_t
+ *
+ * Atomically decrements @l by 1 and
+ * returns true if the result is 0, or false for all other
+ * cases.
+ */
+#define local_dec_and_test(l) (local_sub_return(1, (l)) == 0)
+
+/*
+ * local_add_negative - add and test if negative
+ * @l: pointer of type local_t
+ * @i: integer value to add
+ *
+ * Atomically adds @i to @l and returns true
+ * if the result is negative, or false when
+ * result is greater than or equal to zero.
+ */
+#define local_add_negative(i, l) (local_add_return(i, (l)) < 0)
+
+/* Use these for per-cpu local_t variables: on some archs they are
+ * much more efficient than these naive implementations. Note they take
+ * a variable, not an address.
+ */
+
+#define __local_inc(l) ((l)->a.counter++)
+#define __local_dec(l) ((l)->a.counter++)
+#define __local_add(i, l) ((l)->a.counter += (i))
+#define __local_sub(i, l) ((l)->a.counter -= (i))
+
+#endif /* _ARCH_LOONGARCH_LOCAL_H */
diff --git a/arch/loongarch/include/asm/loongarch.h b/arch/loongarch/include/asm/loongarch.h
new file mode 100644
index 000000000000..3ba4f7e87cd2
--- /dev/null
+++ b/arch/loongarch/include/asm/loongarch.h
@@ -0,0 +1,1516 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_LOONGARCH_H
+#define _ASM_LOONGARCH_H
+
+#include <linux/bits.h>
+#include <linux/linkage.h>
+#include <linux/types.h>
+
+#ifndef __ASSEMBLY__
+#include <larchintrin.h>
+
+/*
+ * parse_r var, r - Helper assembler macro for parsing register names.
+ *
+ * This converts the register name in $n form provided in \r to the
+ * corresponding register number, which is assigned to the variable \var. It is
+ * needed to allow explicit encoding of instructions in inline assembly where
+ * registers are chosen by the compiler in $n form, allowing us to avoid using
+ * fixed register numbers.
+ *
+ * It also allows newer instructions (not implemented by the assembler) to be
+ * transparently implemented using assembler macros, instead of needing separate
+ * cases depending on toolchain support.
+ *
+ * Simple usage example:
+ * __asm__ __volatile__("parse_r addr, %0\n\t"
+ * "#invtlb op, 0, %0\n\t"
+ * ".word ((0x6498000) | (addr << 10) | (0 << 5) | op)"
+ * : "=r" (status);
+ */
+
+/* Match an individual register number and assign to \var */
+#define _IFC_REG(n) \
+ ".ifc \\r, $r" #n "\n\t" \
+ "\\var = " #n "\n\t" \
+ ".endif\n\t"
+
+__asm__(".macro parse_r var r\n\t"
+ "\\var = -1\n\t"
+ _IFC_REG(0) _IFC_REG(1) _IFC_REG(2) _IFC_REG(3)
+ _IFC_REG(4) _IFC_REG(5) _IFC_REG(6) _IFC_REG(7)
+ _IFC_REG(8) _IFC_REG(9) _IFC_REG(10) _IFC_REG(11)
+ _IFC_REG(12) _IFC_REG(13) _IFC_REG(14) _IFC_REG(15)
+ _IFC_REG(16) _IFC_REG(17) _IFC_REG(18) _IFC_REG(19)
+ _IFC_REG(20) _IFC_REG(21) _IFC_REG(22) _IFC_REG(23)
+ _IFC_REG(24) _IFC_REG(25) _IFC_REG(26) _IFC_REG(27)
+ _IFC_REG(28) _IFC_REG(29) _IFC_REG(30) _IFC_REG(31)
+ ".iflt \\var\n\t"
+ ".error \"Unable to parse register name \\r\"\n\t"
+ ".endif\n\t"
+ ".endm");
+
+#undef _IFC_REG
+
+/* CPUCFG */
+static inline u32 read_cpucfg(u32 reg)
+{
+ return __cpucfg(reg);
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#ifdef __ASSEMBLY__
+
+/* LoongArch Registers */
+#define REG_ZERO 0x0
+#define REG_RA 0x1
+#define REG_TP 0x2
+#define REG_SP 0x3
+#define REG_A0 0x4 /* Reused as V0 for return value */
+#define REG_A1 0x5 /* Reused as V1 for return value */
+#define REG_A2 0x6
+#define REG_A3 0x7
+#define REG_A4 0x8
+#define REG_A5 0x9
+#define REG_A6 0xa
+#define REG_A7 0xb
+#define REG_T0 0xc
+#define REG_T1 0xd
+#define REG_T2 0xe
+#define REG_T3 0xf
+#define REG_T4 0x10
+#define REG_T5 0x11
+#define REG_T6 0x12
+#define REG_T7 0x13
+#define REG_T8 0x14
+#define REG_U0 0x15 /* Kernel uses it as percpu base */
+#define REG_FP 0x16
+#define REG_S0 0x17
+#define REG_S1 0x18
+#define REG_S2 0x19
+#define REG_S3 0x1a
+#define REG_S4 0x1b
+#define REG_S5 0x1c
+#define REG_S6 0x1d
+#define REG_S7 0x1e
+#define REG_S8 0x1f
+
+#endif /* __ASSEMBLY__ */
+
+/* Bit fields for CPUCFG registers */
+#define LOONGARCH_CPUCFG0 0x0
+#define CPUCFG0_PRID GENMASK(31, 0)
+
+#define LOONGARCH_CPUCFG1 0x1
+#define CPUCFG1_ISGR32 BIT(0)
+#define CPUCFG1_ISGR64 BIT(1)
+#define CPUCFG1_PAGING BIT(2)
+#define CPUCFG1_IOCSR BIT(3)
+#define CPUCFG1_PABITS GENMASK(11, 4)
+#define CPUCFG1_VABITS GENMASK(19, 12)
+#define CPUCFG1_UAL BIT(20)
+#define CPUCFG1_RI BIT(21)
+#define CPUCFG1_EP BIT(22)
+#define CPUCFG1_RPLV BIT(23)
+#define CPUCFG1_HUGEPG BIT(24)
+#define CPUCFG1_IOCSRBRD BIT(25)
+#define CPUCFG1_MSGINT BIT(26)
+
+#define LOONGARCH_CPUCFG2 0x2
+#define CPUCFG2_FP BIT(0)
+#define CPUCFG2_FPSP BIT(1)
+#define CPUCFG2_FPDP BIT(2)
+#define CPUCFG2_FPVERS GENMASK(5, 3)
+#define CPUCFG2_LSX BIT(6)
+#define CPUCFG2_LASX BIT(7)
+#define CPUCFG2_COMPLEX BIT(8)
+#define CPUCFG2_CRYPTO BIT(9)
+#define CPUCFG2_LVZP BIT(10)
+#define CPUCFG2_LVZVER GENMASK(13, 11)
+#define CPUCFG2_LLFTP BIT(14)
+#define CPUCFG2_LLFTPREV GENMASK(17, 15)
+#define CPUCFG2_X86BT BIT(18)
+#define CPUCFG2_ARMBT BIT(19)
+#define CPUCFG2_MIPSBT BIT(20)
+#define CPUCFG2_LSPW BIT(21)
+#define CPUCFG2_LAM BIT(22)
+
+#define LOONGARCH_CPUCFG3 0x3
+#define CPUCFG3_CCDMA BIT(0)
+#define CPUCFG3_SFB BIT(1)
+#define CPUCFG3_UCACC BIT(2)
+#define CPUCFG3_LLEXC BIT(3)
+#define CPUCFG3_SCDLY BIT(4)
+#define CPUCFG3_LLDBAR BIT(5)
+#define CPUCFG3_ITLBT BIT(6)
+#define CPUCFG3_ICACHET BIT(7)
+#define CPUCFG3_SPW_LVL GENMASK(10, 8)
+#define CPUCFG3_SPW_HG_HF BIT(11)
+#define CPUCFG3_RVA BIT(12)
+#define CPUCFG3_RVAMAX GENMASK(16, 13)
+
+#define LOONGARCH_CPUCFG4 0x4
+#define CPUCFG4_CCFREQ GENMASK(31, 0)
+
+#define LOONGARCH_CPUCFG5 0x5
+#define CPUCFG5_CCMUL GENMASK(15, 0)
+#define CPUCFG5_CCDIV GENMASK(31, 16)
+
+#define LOONGARCH_CPUCFG6 0x6
+#define CPUCFG6_PMP BIT(0)
+#define CPUCFG6_PAMVER GENMASK(3, 1)
+#define CPUCFG6_PMNUM GENMASK(7, 4)
+#define CPUCFG6_PMBITS GENMASK(13, 8)
+#define CPUCFG6_UPM BIT(14)
+
+#define LOONGARCH_CPUCFG16 0x10
+#define CPUCFG16_L1_IUPRE BIT(0)
+#define CPUCFG16_L1_IUUNIFY BIT(1)
+#define CPUCFG16_L1_DPRE BIT(2)
+#define CPUCFG16_L2_IUPRE BIT(3)
+#define CPUCFG16_L2_IUUNIFY BIT(4)
+#define CPUCFG16_L2_IUPRIV BIT(5)
+#define CPUCFG16_L2_IUINCL BIT(6)
+#define CPUCFG16_L2_DPRE BIT(7)
+#define CPUCFG16_L2_DPRIV BIT(8)
+#define CPUCFG16_L2_DINCL BIT(9)
+#define CPUCFG16_L3_IUPRE BIT(10)
+#define CPUCFG16_L3_IUUNIFY BIT(11)
+#define CPUCFG16_L3_IUPRIV BIT(12)
+#define CPUCFG16_L3_IUINCL BIT(13)
+#define CPUCFG16_L3_DPRE BIT(14)
+#define CPUCFG16_L3_DPRIV BIT(15)
+#define CPUCFG16_L3_DINCL BIT(16)
+
+#define LOONGARCH_CPUCFG17 0x11
+#define CPUCFG17_L1I_WAYS_M GENMASK(15, 0)
+#define CPUCFG17_L1I_SETS_M GENMASK(23, 16)
+#define CPUCFG17_L1I_SIZE_M GENMASK(30, 24)
+#define CPUCFG17_L1I_WAYS 0
+#define CPUCFG17_L1I_SETS 16
+#define CPUCFG17_L1I_SIZE 24
+
+#define LOONGARCH_CPUCFG18 0x12
+#define CPUCFG18_L1D_WAYS_M GENMASK(15, 0)
+#define CPUCFG18_L1D_SETS_M GENMASK(23, 16)
+#define CPUCFG18_L1D_SIZE_M GENMASK(30, 24)
+#define CPUCFG18_L1D_WAYS 0
+#define CPUCFG18_L1D_SETS 16
+#define CPUCFG18_L1D_SIZE 24
+
+#define LOONGARCH_CPUCFG19 0x13
+#define CPUCFG19_L2_WAYS_M GENMASK(15, 0)
+#define CPUCFG19_L2_SETS_M GENMASK(23, 16)
+#define CPUCFG19_L2_SIZE_M GENMASK(30, 24)
+#define CPUCFG19_L2_WAYS 0
+#define CPUCFG19_L2_SETS 16
+#define CPUCFG19_L2_SIZE 24
+
+#define LOONGARCH_CPUCFG20 0x14
+#define CPUCFG20_L3_WAYS_M GENMASK(15, 0)
+#define CPUCFG20_L3_SETS_M GENMASK(23, 16)
+#define CPUCFG20_L3_SIZE_M GENMASK(30, 24)
+#define CPUCFG20_L3_WAYS 0
+#define CPUCFG20_L3_SETS 16
+#define CPUCFG20_L3_SIZE 24
+
+#define LOONGARCH_CPUCFG48 0x30
+#define CPUCFG48_MCSR_LCK BIT(0)
+#define CPUCFG48_NAP_EN BIT(1)
+#define CPUCFG48_VFPU_CG BIT(2)
+#define CPUCFG48_RAM_CG BIT(3)
+
+#ifndef __ASSEMBLY__
+
+/* CSR */
+static __always_inline u32 csr_read32(u32 reg)
+{
+ return __csrrd_w(reg);
+}
+
+static __always_inline u64 csr_read64(u32 reg)
+{
+ return __csrrd_d(reg);
+}
+
+static __always_inline void csr_write32(u32 val, u32 reg)
+{
+ __csrwr_w(val, reg);
+}
+
+static __always_inline void csr_write64(u64 val, u32 reg)
+{
+ __csrwr_d(val, reg);
+}
+
+static __always_inline u32 csr_xchg32(u32 val, u32 mask, u32 reg)
+{
+ return __csrxchg_w(val, mask, reg);
+}
+
+static __always_inline u64 csr_xchg64(u64 val, u64 mask, u32 reg)
+{
+ return __csrxchg_d(val, mask, reg);
+}
+
+/* IOCSR */
+static __always_inline u32 iocsr_read32(u32 reg)
+{
+ return __iocsrrd_w(reg);
+}
+
+static __always_inline u64 iocsr_read64(u32 reg)
+{
+ return __iocsrrd_d(reg);
+}
+
+static __always_inline void iocsr_write32(u32 val, u32 reg)
+{
+ __iocsrwr_w(val, reg);
+}
+
+static __always_inline void iocsr_write64(u64 val, u32 reg)
+{
+ __iocsrwr_d(val, reg);
+}
+
+#endif /* !__ASSEMBLY__ */
+
+/* CSR register number */
+
+/* Basic CSR registers */
+#define LOONGARCH_CSR_CRMD 0x0 /* Current mode info */
+#define CSR_CRMD_WE_SHIFT 9
+#define CSR_CRMD_WE (_ULCAST_(0x1) << CSR_CRMD_WE_SHIFT)
+#define CSR_CRMD_DACM_SHIFT 7
+#define CSR_CRMD_DACM_WIDTH 2
+#define CSR_CRMD_DACM (_ULCAST_(0x3) << CSR_CRMD_DACM_SHIFT)
+#define CSR_CRMD_DACF_SHIFT 5
+#define CSR_CRMD_DACF_WIDTH 2
+#define CSR_CRMD_DACF (_ULCAST_(0x3) << CSR_CRMD_DACF_SHIFT)
+#define CSR_CRMD_PG_SHIFT 4
+#define CSR_CRMD_PG (_ULCAST_(0x1) << CSR_CRMD_PG_SHIFT)
+#define CSR_CRMD_DA_SHIFT 3
+#define CSR_CRMD_DA (_ULCAST_(0x1) << CSR_CRMD_DA_SHIFT)
+#define CSR_CRMD_IE_SHIFT 2
+#define CSR_CRMD_IE (_ULCAST_(0x1) << CSR_CRMD_IE_SHIFT)
+#define CSR_CRMD_PLV_SHIFT 0
+#define CSR_CRMD_PLV_WIDTH 2
+#define CSR_CRMD_PLV (_ULCAST_(0x3) << CSR_CRMD_PLV_SHIFT)
+
+#define PLV_KERN 0
+#define PLV_USER 3
+#define PLV_MASK 0x3
+
+#define LOONGARCH_CSR_PRMD 0x1 /* Prev-exception mode info */
+#define CSR_PRMD_PWE_SHIFT 3
+#define CSR_PRMD_PWE (_ULCAST_(0x1) << CSR_PRMD_PWE_SHIFT)
+#define CSR_PRMD_PIE_SHIFT 2
+#define CSR_PRMD_PIE (_ULCAST_(0x1) << CSR_PRMD_PIE_SHIFT)
+#define CSR_PRMD_PPLV_SHIFT 0
+#define CSR_PRMD_PPLV_WIDTH 2
+#define CSR_PRMD_PPLV (_ULCAST_(0x3) << CSR_PRMD_PPLV_SHIFT)
+
+#define LOONGARCH_CSR_EUEN 0x2 /* Extended unit enable */
+#define CSR_EUEN_LBTEN_SHIFT 3
+#define CSR_EUEN_LBTEN (_ULCAST_(0x1) << CSR_EUEN_LBTEN_SHIFT)
+#define CSR_EUEN_LASXEN_SHIFT 2
+#define CSR_EUEN_LASXEN (_ULCAST_(0x1) << CSR_EUEN_LASXEN_SHIFT)
+#define CSR_EUEN_LSXEN_SHIFT 1
+#define CSR_EUEN_LSXEN (_ULCAST_(0x1) << CSR_EUEN_LSXEN_SHIFT)
+#define CSR_EUEN_FPEN_SHIFT 0
+#define CSR_EUEN_FPEN (_ULCAST_(0x1) << CSR_EUEN_FPEN_SHIFT)
+
+#define LOONGARCH_CSR_MISC 0x3 /* Misc config */
+
+#define LOONGARCH_CSR_ECFG 0x4 /* Exception config */
+#define CSR_ECFG_VS_SHIFT 16
+#define CSR_ECFG_VS_WIDTH 3
+#define CSR_ECFG_VS (_ULCAST_(0x7) << CSR_ECFG_VS_SHIFT)
+#define CSR_ECFG_IM_SHIFT 0
+#define CSR_ECFG_IM_WIDTH 13
+#define CSR_ECFG_IM (_ULCAST_(0x1fff) << CSR_ECFG_IM_SHIFT)
+
+#define LOONGARCH_CSR_ESTAT 0x5 /* Exception status */
+#define CSR_ESTAT_ESUBCODE_SHIFT 22
+#define CSR_ESTAT_ESUBCODE_WIDTH 9
+#define CSR_ESTAT_ESUBCODE (_ULCAST_(0x1ff) << CSR_ESTAT_ESUBCODE_SHIFT)
+#define CSR_ESTAT_EXC_SHIFT 16
+#define CSR_ESTAT_EXC_WIDTH 6
+#define CSR_ESTAT_EXC (_ULCAST_(0x3f) << CSR_ESTAT_EXC_SHIFT)
+#define CSR_ESTAT_IS_SHIFT 0
+#define CSR_ESTAT_IS_WIDTH 15
+#define CSR_ESTAT_IS (_ULCAST_(0x7fff) << CSR_ESTAT_IS_SHIFT)
+
+#define LOONGARCH_CSR_ERA 0x6 /* ERA */
+
+#define LOONGARCH_CSR_BADV 0x7 /* Bad virtual address */
+
+#define LOONGARCH_CSR_BADI 0x8 /* Bad instruction */
+
+#define LOONGARCH_CSR_EENTRY 0xc /* Exception entry */
+
+/* TLB related CSR registers */
+#define LOONGARCH_CSR_TLBIDX 0x10 /* TLB Index, EHINV, PageSize, NP */
+#define CSR_TLBIDX_EHINV_SHIFT 31
+#define CSR_TLBIDX_EHINV (_ULCAST_(1) << CSR_TLBIDX_EHINV_SHIFT)
+#define CSR_TLBIDX_PS_SHIFT 24
+#define CSR_TLBIDX_PS_WIDTH 6
+#define CSR_TLBIDX_PS (_ULCAST_(0x3f) << CSR_TLBIDX_PS_SHIFT)
+#define CSR_TLBIDX_IDX_SHIFT 0
+#define CSR_TLBIDX_IDX_WIDTH 12
+#define CSR_TLBIDX_IDX (_ULCAST_(0xfff) << CSR_TLBIDX_IDX_SHIFT)
+#define CSR_TLBIDX_SIZEM 0x3f000000
+#define CSR_TLBIDX_SIZE CSR_TLBIDX_PS_SHIFT
+#define CSR_TLBIDX_IDXM 0xfff
+#define CSR_INVALID_ENTRY(e) (CSR_TLBIDX_EHINV | e)
+
+#define LOONGARCH_CSR_TLBEHI 0x11 /* TLB EntryHi */
+
+#define LOONGARCH_CSR_TLBELO0 0x12 /* TLB EntryLo0 */
+#define CSR_TLBLO0_RPLV_SHIFT 63
+#define CSR_TLBLO0_RPLV (_ULCAST_(0x1) << CSR_TLBLO0_RPLV_SHIFT)
+#define CSR_TLBLO0_NX_SHIFT 62
+#define CSR_TLBLO0_NX (_ULCAST_(0x1) << CSR_TLBLO0_NX_SHIFT)
+#define CSR_TLBLO0_NR_SHIFT 61
+#define CSR_TLBLO0_NR (_ULCAST_(0x1) << CSR_TLBLO0_NR_SHIFT)
+#define CSR_TLBLO0_PFN_SHIFT 12
+#define CSR_TLBLO0_PFN_WIDTH 36
+#define CSR_TLBLO0_PFN (_ULCAST_(0xfffffffff) << CSR_TLBLO0_PFN_SHIFT)
+#define CSR_TLBLO0_GLOBAL_SHIFT 6
+#define CSR_TLBLO0_GLOBAL (_ULCAST_(0x1) << CSR_TLBLO0_GLOBAL_SHIFT)
+#define CSR_TLBLO0_CCA_SHIFT 4
+#define CSR_TLBLO0_CCA_WIDTH 2
+#define CSR_TLBLO0_CCA (_ULCAST_(0x3) << CSR_TLBLO0_CCA_SHIFT)
+#define CSR_TLBLO0_PLV_SHIFT 2
+#define CSR_TLBLO0_PLV_WIDTH 2
+#define CSR_TLBLO0_PLV (_ULCAST_(0x3) << CSR_TLBLO0_PLV_SHIFT)
+#define CSR_TLBLO0_WE_SHIFT 1
+#define CSR_TLBLO0_WE (_ULCAST_(0x1) << CSR_TLBLO0_WE_SHIFT)
+#define CSR_TLBLO0_V_SHIFT 0
+#define CSR_TLBLO0_V (_ULCAST_(0x1) << CSR_TLBLO0_V_SHIFT)
+
+#define LOONGARCH_CSR_TLBELO1 0x13 /* TLB EntryLo1 */
+#define CSR_TLBLO1_RPLV_SHIFT 63
+#define CSR_TLBLO1_RPLV (_ULCAST_(0x1) << CSR_TLBLO1_RPLV_SHIFT)
+#define CSR_TLBLO1_NX_SHIFT 62
+#define CSR_TLBLO1_NX (_ULCAST_(0x1) << CSR_TLBLO1_NX_SHIFT)
+#define CSR_TLBLO1_NR_SHIFT 61
+#define CSR_TLBLO1_NR (_ULCAST_(0x1) << CSR_TLBLO1_NR_SHIFT)
+#define CSR_TLBLO1_PFN_SHIFT 12
+#define CSR_TLBLO1_PFN_WIDTH 36
+#define CSR_TLBLO1_PFN (_ULCAST_(0xfffffffff) << CSR_TLBLO1_PFN_SHIFT)
+#define CSR_TLBLO1_GLOBAL_SHIFT 6
+#define CSR_TLBLO1_GLOBAL (_ULCAST_(0x1) << CSR_TLBLO1_GLOBAL_SHIFT)
+#define CSR_TLBLO1_CCA_SHIFT 4
+#define CSR_TLBLO1_CCA_WIDTH 2
+#define CSR_TLBLO1_CCA (_ULCAST_(0x3) << CSR_TLBLO1_CCA_SHIFT)
+#define CSR_TLBLO1_PLV_SHIFT 2
+#define CSR_TLBLO1_PLV_WIDTH 2
+#define CSR_TLBLO1_PLV (_ULCAST_(0x3) << CSR_TLBLO1_PLV_SHIFT)
+#define CSR_TLBLO1_WE_SHIFT 1
+#define CSR_TLBLO1_WE (_ULCAST_(0x1) << CSR_TLBLO1_WE_SHIFT)
+#define CSR_TLBLO1_V_SHIFT 0
+#define CSR_TLBLO1_V (_ULCAST_(0x1) << CSR_TLBLO1_V_SHIFT)
+
+#define LOONGARCH_CSR_GTLBC 0x15 /* Guest TLB control */
+#define CSR_GTLBC_RID_SHIFT 16
+#define CSR_GTLBC_RID_WIDTH 8
+#define CSR_GTLBC_RID (_ULCAST_(0xff) << CSR_GTLBC_RID_SHIFT)
+#define CSR_GTLBC_TOTI_SHIFT 13
+#define CSR_GTLBC_TOTI (_ULCAST_(0x1) << CSR_GTLBC_TOTI_SHIFT)
+#define CSR_GTLBC_USERID_SHIFT 12
+#define CSR_GTLBC_USERID (_ULCAST_(0x1) << CSR_GTLBC_USERID_SHIFT)
+#define CSR_GTLBC_GMTLBSZ_SHIFT 0
+#define CSR_GTLBC_GMTLBSZ_WIDTH 6
+#define CSR_GTLBC_GMTLBSZ (_ULCAST_(0x3f) << CSR_GTLBC_GMTLBSZ_SHIFT)
+
+#define LOONGARCH_CSR_TRGP 0x16 /* TLBR read guest info */
+#define CSR_TRGP_RID_SHIFT 16
+#define CSR_TRGP_RID_WIDTH 8
+#define CSR_TRGP_RID (_ULCAST_(0xff) << CSR_TRGP_RID_SHIFT)
+#define CSR_TRGP_GTLB_SHIFT 0
+#define CSR_TRGP_GTLB (1 << CSR_TRGP_GTLB_SHIFT)
+
+#define LOONGARCH_CSR_ASID 0x18 /* ASID */
+#define CSR_ASID_BIT_SHIFT 16 /* ASIDBits */
+#define CSR_ASID_BIT_WIDTH 8
+#define CSR_ASID_BIT (_ULCAST_(0xff) << CSR_ASID_BIT_SHIFT)
+#define CSR_ASID_ASID_SHIFT 0
+#define CSR_ASID_ASID_WIDTH 10
+#define CSR_ASID_ASID (_ULCAST_(0x3ff) << CSR_ASID_ASID_SHIFT)
+
+#define LOONGARCH_CSR_PGDL 0x19 /* Page table base address when VA[47] = 0 */
+
+#define LOONGARCH_CSR_PGDH 0x1a /* Page table base address when VA[47] = 1 */
+
+#define LOONGARCH_CSR_PGD 0x1b /* Page table base */
+
+#define LOONGARCH_CSR_PWCTL0 0x1c /* PWCtl0 */
+#define CSR_PWCTL0_PTEW_SHIFT 30
+#define CSR_PWCTL0_PTEW_WIDTH 2
+#define CSR_PWCTL0_PTEW (_ULCAST_(0x3) << CSR_PWCTL0_PTEW_SHIFT)
+#define CSR_PWCTL0_DIR1WIDTH_SHIFT 25
+#define CSR_PWCTL0_DIR1WIDTH_WIDTH 5
+#define CSR_PWCTL0_DIR1WIDTH (_ULCAST_(0x1f) << CSR_PWCTL0_DIR1WIDTH_SHIFT)
+#define CSR_PWCTL0_DIR1BASE_SHIFT 20
+#define CSR_PWCTL0_DIR1BASE_WIDTH 5
+#define CSR_PWCTL0_DIR1BASE (_ULCAST_(0x1f) << CSR_PWCTL0_DIR1BASE_SHIFT)
+#define CSR_PWCTL0_DIR0WIDTH_SHIFT 15
+#define CSR_PWCTL0_DIR0WIDTH_WIDTH 5
+#define CSR_PWCTL0_DIR0WIDTH (_ULCAST_(0x1f) << CSR_PWCTL0_DIR0WIDTH_SHIFT)
+#define CSR_PWCTL0_DIR0BASE_SHIFT 10
+#define CSR_PWCTL0_DIR0BASE_WIDTH 5
+#define CSR_PWCTL0_DIR0BASE (_ULCAST_(0x1f) << CSR_PWCTL0_DIR0BASE_SHIFT)
+#define CSR_PWCTL0_PTWIDTH_SHIFT 5
+#define CSR_PWCTL0_PTWIDTH_WIDTH 5
+#define CSR_PWCTL0_PTWIDTH (_ULCAST_(0x1f) << CSR_PWCTL0_PTWIDTH_SHIFT)
+#define CSR_PWCTL0_PTBASE_SHIFT 0
+#define CSR_PWCTL0_PTBASE_WIDTH 5
+#define CSR_PWCTL0_PTBASE (_ULCAST_(0x1f) << CSR_PWCTL0_PTBASE_SHIFT)
+
+#define LOONGARCH_CSR_PWCTL1 0x1d /* PWCtl1 */
+#define CSR_PWCTL1_DIR3WIDTH_SHIFT 18
+#define CSR_PWCTL1_DIR3WIDTH_WIDTH 5
+#define CSR_PWCTL1_DIR3WIDTH (_ULCAST_(0x1f) << CSR_PWCTL1_DIR3WIDTH_SHIFT)
+#define CSR_PWCTL1_DIR3BASE_SHIFT 12
+#define CSR_PWCTL1_DIR3BASE_WIDTH 5
+#define CSR_PWCTL1_DIR3BASE (_ULCAST_(0x1f) << CSR_PWCTL0_DIR3BASE_SHIFT)
+#define CSR_PWCTL1_DIR2WIDTH_SHIFT 6
+#define CSR_PWCTL1_DIR2WIDTH_WIDTH 5
+#define CSR_PWCTL1_DIR2WIDTH (_ULCAST_(0x1f) << CSR_PWCTL1_DIR2WIDTH_SHIFT)
+#define CSR_PWCTL1_DIR2BASE_SHIFT 0
+#define CSR_PWCTL1_DIR2BASE_WIDTH 5
+#define CSR_PWCTL1_DIR2BASE (_ULCAST_(0x1f) << CSR_PWCTL0_DIR2BASE_SHIFT)
+
+#define LOONGARCH_CSR_STLBPGSIZE 0x1e
+#define CSR_STLBPGSIZE_PS_WIDTH 6
+#define CSR_STLBPGSIZE_PS (_ULCAST_(0x3f))
+
+#define LOONGARCH_CSR_RVACFG 0x1f
+#define CSR_RVACFG_RDVA_WIDTH 4
+#define CSR_RVACFG_RDVA (_ULCAST_(0xf))
+
+/* Config CSR registers */
+#define LOONGARCH_CSR_CPUID 0x20 /* CPU core id */
+#define CSR_CPUID_COREID_WIDTH 9
+#define CSR_CPUID_COREID _ULCAST_(0x1ff)
+
+#define LOONGARCH_CSR_PRCFG1 0x21 /* Config1 */
+#define CSR_CONF1_VSMAX_SHIFT 12
+#define CSR_CONF1_VSMAX_WIDTH 3
+#define CSR_CONF1_VSMAX (_ULCAST_(7) << CSR_CONF1_VSMAX_SHIFT)
+#define CSR_CONF1_TMRBITS_SHIFT 4
+#define CSR_CONF1_TMRBITS_WIDTH 8
+#define CSR_CONF1_TMRBITS (_ULCAST_(0xff) << CSR_CONF1_TMRBITS_SHIFT)
+#define CSR_CONF1_KSNUM_WIDTH 4
+#define CSR_CONF1_KSNUM _ULCAST_(0xf)
+
+#define LOONGARCH_CSR_PRCFG2 0x22 /* Config2 */
+#define CSR_CONF2_PGMASK_SUPP 0x3ffff000
+
+#define LOONGARCH_CSR_PRCFG3 0x23 /* Config3 */
+#define CSR_CONF3_STLBIDX_SHIFT 20
+#define CSR_CONF3_STLBIDX_WIDTH 6
+#define CSR_CONF3_STLBIDX (_ULCAST_(0x3f) << CSR_CONF3_STLBIDX_SHIFT)
+#define CSR_CONF3_STLBWAYS_SHIFT 12
+#define CSR_CONF3_STLBWAYS_WIDTH 8
+#define CSR_CONF3_STLBWAYS (_ULCAST_(0xff) << CSR_CONF3_STLBWAYS_SHIFT)
+#define CSR_CONF3_MTLBSIZE_SHIFT 4
+#define CSR_CONF3_MTLBSIZE_WIDTH 8
+#define CSR_CONF3_MTLBSIZE (_ULCAST_(0xff) << CSR_CONF3_MTLBSIZE_SHIFT)
+#define CSR_CONF3_TLBTYPE_SHIFT 0
+#define CSR_CONF3_TLBTYPE_WIDTH 4
+#define CSR_CONF3_TLBTYPE (_ULCAST_(0xf) << CSR_CONF3_TLBTYPE_SHIFT)
+
+/* KSave registers */
+#define LOONGARCH_CSR_KS0 0x30
+#define LOONGARCH_CSR_KS1 0x31
+#define LOONGARCH_CSR_KS2 0x32
+#define LOONGARCH_CSR_KS3 0x33
+#define LOONGARCH_CSR_KS4 0x34
+#define LOONGARCH_CSR_KS5 0x35
+#define LOONGARCH_CSR_KS6 0x36
+#define LOONGARCH_CSR_KS7 0x37
+#define LOONGARCH_CSR_KS8 0x38
+
+/* Exception allocated KS0, KS1 and KS2 statically */
+#define EXCEPTION_KS0 LOONGARCH_CSR_KS0
+#define EXCEPTION_KS1 LOONGARCH_CSR_KS1
+#define EXCEPTION_KS2 LOONGARCH_CSR_KS2
+#define EXC_KSAVE_MASK (1 << 0 | 1 << 1 | 1 << 2)
+
+/* Percpu-data base allocated KS3 statically */
+#define PERCPU_BASE_KS LOONGARCH_CSR_KS3
+#define PERCPU_KSAVE_MASK (1 << 3)
+
+/* KVM allocated KS4 and KS5 statically */
+#define KVM_VCPU_KS LOONGARCH_CSR_KS4
+#define KVM_TEMP_KS LOONGARCH_CSR_KS5
+#define KVM_KSAVE_MASK (1 << 4 | 1 << 5)
+
+/* Timer registers */
+#define LOONGARCH_CSR_TMID 0x40 /* Timer ID */
+
+#define LOONGARCH_CSR_TCFG 0x41 /* Timer config */
+#define CSR_TCFG_VAL_SHIFT 2
+#define CSR_TCFG_VAL_WIDTH 48
+#define CSR_TCFG_VAL (_ULCAST_(0x3fffffffffff) << CSR_TCFG_VAL_SHIFT)
+#define CSR_TCFG_PERIOD_SHIFT 1
+#define CSR_TCFG_PERIOD (_ULCAST_(0x1) << CSR_TCFG_PERIOD_SHIFT)
+#define CSR_TCFG_EN (_ULCAST_(0x1))
+
+#define LOONGARCH_CSR_TVAL 0x42 /* Timer value */
+
+#define LOONGARCH_CSR_CNTC 0x43 /* Timer offset */
+
+#define LOONGARCH_CSR_TINTCLR 0x44 /* Timer interrupt clear */
+#define CSR_TINTCLR_TI_SHIFT 0
+#define CSR_TINTCLR_TI (1 << CSR_TINTCLR_TI_SHIFT)
+
+/* Guest registers */
+#define LOONGARCH_CSR_GSTAT 0x50 /* Guest status */
+#define CSR_GSTAT_GID_SHIFT 16
+#define CSR_GSTAT_GID_WIDTH 8
+#define CSR_GSTAT_GID (_ULCAST_(0xff) << CSR_GSTAT_GID_SHIFT)
+#define CSR_GSTAT_GIDBIT_SHIFT 4
+#define CSR_GSTAT_GIDBIT_WIDTH 6
+#define CSR_GSTAT_GIDBIT (_ULCAST_(0x3f) << CSR_GSTAT_GIDBIT_SHIFT)
+#define CSR_GSTAT_PVM_SHIFT 1
+#define CSR_GSTAT_PVM (_ULCAST_(0x1) << CSR_GSTAT_PVM_SHIFT)
+#define CSR_GSTAT_VM_SHIFT 0
+#define CSR_GSTAT_VM (_ULCAST_(0x1) << CSR_GSTAT_VM_SHIFT)
+
+#define LOONGARCH_CSR_GCFG 0x51 /* Guest config */
+#define CSR_GCFG_GPERF_SHIFT 24
+#define CSR_GCFG_GPERF_WIDTH 3
+#define CSR_GCFG_GPERF (_ULCAST_(0x7) << CSR_GCFG_GPERF_SHIFT)
+#define CSR_GCFG_GCI_SHIFT 20
+#define CSR_GCFG_GCI_WIDTH 2
+#define CSR_GCFG_GCI (_ULCAST_(0x3) << CSR_GCFG_GCI_SHIFT)
+#define CSR_GCFG_GCI_ALL (_ULCAST_(0x0) << CSR_GCFG_GCI_SHIFT)
+#define CSR_GCFG_GCI_HIT (_ULCAST_(0x1) << CSR_GCFG_GCI_SHIFT)
+#define CSR_GCFG_GCI_SECURE (_ULCAST_(0x2) << CSR_GCFG_GCI_SHIFT)
+#define CSR_GCFG_GCIP_SHIFT 16
+#define CSR_GCFG_GCIP (_ULCAST_(0xf) << CSR_GCFG_GCIP_SHIFT)
+#define CSR_GCFG_GCIP_ALL (_ULCAST_(0x1) << CSR_GCFG_GCIP_SHIFT)
+#define CSR_GCFG_GCIP_HIT (_ULCAST_(0x1) << (CSR_GCFG_GCIP_SHIFT + 1))
+#define CSR_GCFG_GCIP_SECURE (_ULCAST_(0x1) << (CSR_GCFG_GCIP_SHIFT + 2))
+#define CSR_GCFG_TORU_SHIFT 15
+#define CSR_GCFG_TORU (_ULCAST_(0x1) << CSR_GCFG_TORU_SHIFT)
+#define CSR_GCFG_TORUP_SHIFT 14
+#define CSR_GCFG_TORUP (_ULCAST_(0x1) << CSR_GCFG_TORUP_SHIFT)
+#define CSR_GCFG_TOP_SHIFT 13
+#define CSR_GCFG_TOP (_ULCAST_(0x1) << CSR_GCFG_TOP_SHIFT)
+#define CSR_GCFG_TOPP_SHIFT 12
+#define CSR_GCFG_TOPP (_ULCAST_(0x1) << CSR_GCFG_TOPP_SHIFT)
+#define CSR_GCFG_TOE_SHIFT 11
+#define CSR_GCFG_TOE (_ULCAST_(0x1) << CSR_GCFG_TOE_SHIFT)
+#define CSR_GCFG_TOEP_SHIFT 10
+#define CSR_GCFG_TOEP (_ULCAST_(0x1) << CSR_GCFG_TOEP_SHIFT)
+#define CSR_GCFG_TIT_SHIFT 9
+#define CSR_GCFG_TIT (_ULCAST_(0x1) << CSR_GCFG_TIT_SHIFT)
+#define CSR_GCFG_TITP_SHIFT 8
+#define CSR_GCFG_TITP (_ULCAST_(0x1) << CSR_GCFG_TITP_SHIFT)
+#define CSR_GCFG_SIT_SHIFT 7
+#define CSR_GCFG_SIT (_ULCAST_(0x1) << CSR_GCFG_SIT_SHIFT)
+#define CSR_GCFG_SITP_SHIFT 6
+#define CSR_GCFG_SITP (_ULCAST_(0x1) << CSR_GCFG_SITP_SHIFT)
+#define CSR_GCFG_MATC_SHITF 4
+#define CSR_GCFG_MATC_WIDTH 2
+#define CSR_GCFG_MATC_MASK (_ULCAST_(0x3) << CSR_GCFG_MATC_SHITF)
+#define CSR_GCFG_MATC_GUEST (_ULCAST_(0x0) << CSR_GCFG_MATC_SHITF)
+#define CSR_GCFG_MATC_ROOT (_ULCAST_(0x1) << CSR_GCFG_MATC_SHITF)
+#define CSR_GCFG_MATC_NEST (_ULCAST_(0x2) << CSR_GCFG_MATC_SHITF)
+
+#define LOONGARCH_CSR_GINTC 0x52 /* Guest interrupt control */
+#define CSR_GINTC_HC_SHIFT 16
+#define CSR_GINTC_HC_WIDTH 8
+#define CSR_GINTC_HC (_ULCAST_(0xff) << CSR_GINTC_HC_SHIFT)
+#define CSR_GINTC_PIP_SHIFT 8
+#define CSR_GINTC_PIP_WIDTH 8
+#define CSR_GINTC_PIP (_ULCAST_(0xff) << CSR_GINTC_PIP_SHIFT)
+#define CSR_GINTC_VIP_SHIFT 0
+#define CSR_GINTC_VIP_WIDTH 8
+#define CSR_GINTC_VIP (_ULCAST_(0xff))
+
+#define LOONGARCH_CSR_GCNTC 0x53 /* Guest timer offset */
+
+/* LLBCTL register */
+#define LOONGARCH_CSR_LLBCTL 0x60 /* LLBit control */
+#define CSR_LLBCTL_ROLLB_SHIFT 0
+#define CSR_LLBCTL_ROLLB (_ULCAST_(1) << CSR_LLBCTL_ROLLB_SHIFT)
+#define CSR_LLBCTL_WCLLB_SHIFT 1
+#define CSR_LLBCTL_WCLLB (_ULCAST_(1) << CSR_LLBCTL_WCLLB_SHIFT)
+#define CSR_LLBCTL_KLO_SHIFT 2
+#define CSR_LLBCTL_KLO (_ULCAST_(1) << CSR_LLBCTL_KLO_SHIFT)
+
+/* Implement dependent */
+#define LOONGARCH_CSR_IMPCTL1 0x80 /* Loongson config1 */
+#define CSR_MISPEC_SHIFT 20
+#define CSR_MISPEC_WIDTH 8
+#define CSR_MISPEC (_ULCAST_(0xff) << CSR_MISPEC_SHIFT)
+#define CSR_SSEN_SHIFT 18
+#define CSR_SSEN (_ULCAST_(1) << CSR_SSEN_SHIFT)
+#define CSR_SCRAND_SHIFT 17
+#define CSR_SCRAND (_ULCAST_(1) << CSR_SCRAND_SHIFT)
+#define CSR_LLEXCL_SHIFT 16
+#define CSR_LLEXCL (_ULCAST_(1) << CSR_LLEXCL_SHIFT)
+#define CSR_DISVC_SHIFT 15
+#define CSR_DISVC (_ULCAST_(1) << CSR_DISVC_SHIFT)
+#define CSR_VCLRU_SHIFT 14
+#define CSR_VCLRU (_ULCAST_(1) << CSR_VCLRU_SHIFT)
+#define CSR_DCLRU_SHIFT 13
+#define CSR_DCLRU (_ULCAST_(1) << CSR_DCLRU_SHIFT)
+#define CSR_FASTLDQ_SHIFT 12
+#define CSR_FASTLDQ (_ULCAST_(1) << CSR_FASTLDQ_SHIFT)
+#define CSR_USERCAC_SHIFT 11
+#define CSR_USERCAC (_ULCAST_(1) << CSR_USERCAC_SHIFT)
+#define CSR_ANTI_MISPEC_SHIFT 10
+#define CSR_ANTI_MISPEC (_ULCAST_(1) << CSR_ANTI_MISPEC_SHIFT)
+#define CSR_AUTO_FLUSHSFB_SHIFT 9
+#define CSR_AUTO_FLUSHSFB (_ULCAST_(1) << CSR_AUTO_FLUSHSFB_SHIFT)
+#define CSR_STFILL_SHIFT 8
+#define CSR_STFILL (_ULCAST_(1) << CSR_STFILL_SHIFT)
+#define CSR_LIFEP_SHIFT 7
+#define CSR_LIFEP (_ULCAST_(1) << CSR_LIFEP_SHIFT)
+#define CSR_LLSYNC_SHIFT 6
+#define CSR_LLSYNC (_ULCAST_(1) << CSR_LLSYNC_SHIFT)
+#define CSR_BRBTDIS_SHIFT 5
+#define CSR_BRBTDIS (_ULCAST_(1) << CSR_BRBTDIS_SHIFT)
+#define CSR_RASDIS_SHIFT 4
+#define CSR_RASDIS (_ULCAST_(1) << CSR_RASDIS_SHIFT)
+#define CSR_STPRE_SHIFT 2
+#define CSR_STPRE_WIDTH 2
+#define CSR_STPRE (_ULCAST_(3) << CSR_STPRE_SHIFT)
+#define CSR_INSTPRE_SHIFT 1
+#define CSR_INSTPRE (_ULCAST_(1) << CSR_INSTPRE_SHIFT)
+#define CSR_DATAPRE_SHIFT 0
+#define CSR_DATAPRE (_ULCAST_(1) << CSR_DATAPRE_SHIFT)
+
+#define LOONGARCH_CSR_IMPCTL2 0x81 /* Loongson config2 */
+#define CSR_FLUSH_MTLB_SHIFT 0
+#define CSR_FLUSH_MTLB (_ULCAST_(1) << CSR_FLUSH_MTLB_SHIFT)
+#define CSR_FLUSH_STLB_SHIFT 1
+#define CSR_FLUSH_STLB (_ULCAST_(1) << CSR_FLUSH_STLB_SHIFT)
+#define CSR_FLUSH_DTLB_SHIFT 2
+#define CSR_FLUSH_DTLB (_ULCAST_(1) << CSR_FLUSH_DTLB_SHIFT)
+#define CSR_FLUSH_ITLB_SHIFT 3
+#define CSR_FLUSH_ITLB (_ULCAST_(1) << CSR_FLUSH_ITLB_SHIFT)
+#define CSR_FLUSH_BTAC_SHIFT 4
+#define CSR_FLUSH_BTAC (_ULCAST_(1) << CSR_FLUSH_BTAC_SHIFT)
+
+#define LOONGARCH_CSR_GNMI 0x82
+
+/* TLB Refill registers */
+#define LOONGARCH_CSR_TLBRENTRY 0x88 /* TLB refill exception entry */
+#define LOONGARCH_CSR_TLBRBADV 0x89 /* TLB refill badvaddr */
+#define LOONGARCH_CSR_TLBRERA 0x8a /* TLB refill ERA */
+#define LOONGARCH_CSR_TLBRSAVE 0x8b /* KSave for TLB refill exception */
+#define LOONGARCH_CSR_TLBRELO0 0x8c /* TLB refill entrylo0 */
+#define LOONGARCH_CSR_TLBRELO1 0x8d /* TLB refill entrylo1 */
+#define LOONGARCH_CSR_TLBREHI 0x8e /* TLB refill entryhi */
+#define CSR_TLBREHI_PS_SHIFT 0
+#define CSR_TLBREHI_PS (_ULCAST_(0x3f) << CSR_TLBREHI_PS_SHIFT)
+#define LOONGARCH_CSR_TLBRPRMD 0x8f /* TLB refill mode info */
+
+/* Machine Error registers */
+#define LOONGARCH_CSR_MERRCTL 0x90 /* MERRCTL */
+#define LOONGARCH_CSR_MERRINFO1 0x91 /* MError info1 */
+#define LOONGARCH_CSR_MERRINFO2 0x92 /* MError info2 */
+#define LOONGARCH_CSR_MERRENTRY 0x93 /* MError exception entry */
+#define LOONGARCH_CSR_MERRERA 0x94 /* MError exception ERA */
+#define LOONGARCH_CSR_MERRSAVE 0x95 /* KSave for machine error exception */
+
+#define LOONGARCH_CSR_CTAG 0x98 /* TagLo + TagHi */
+
+#define LOONGARCH_CSR_PRID 0xc0
+
+/* Shadow MCSR : 0xc0 ~ 0xff */
+#define LOONGARCH_CSR_MCSR0 0xc0 /* CPUCFG0 and CPUCFG1 */
+#define MCSR0_INT_IMPL_SHIFT 58
+#define MCSR0_INT_IMPL 0
+#define MCSR0_IOCSR_BRD_SHIFT 57
+#define MCSR0_IOCSR_BRD (_ULCAST_(1) << MCSR0_IOCSR_BRD_SHIFT)
+#define MCSR0_HUGEPG_SHIFT 56
+#define MCSR0_HUGEPG (_ULCAST_(1) << MCSR0_HUGEPG_SHIFT)
+#define MCSR0_RPLMTLB_SHIFT 55
+#define MCSR0_RPLMTLB (_ULCAST_(1) << MCSR0_RPLMTLB_SHIFT)
+#define MCSR0_EP_SHIFT 54
+#define MCSR0_EP (_ULCAST_(1) << MCSR0_EP_SHIFT)
+#define MCSR0_RI_SHIFT 53
+#define MCSR0_RI (_ULCAST_(1) << MCSR0_RI_SHIFT)
+#define MCSR0_UAL_SHIFT 52
+#define MCSR0_UAL (_ULCAST_(1) << MCSR0_UAL_SHIFT)
+#define MCSR0_VABIT_SHIFT 44
+#define MCSR0_VABIT_WIDTH 8
+#define MCSR0_VABIT (_ULCAST_(0xff) << MCSR0_VABIT_SHIFT)
+#define VABIT_DEFAULT 0x2f
+#define MCSR0_PABIT_SHIFT 36
+#define MCSR0_PABIT_WIDTH 8
+#define MCSR0_PABIT (_ULCAST_(0xff) << MCSR0_PABIT_SHIFT)
+#define PABIT_DEFAULT 0x2f
+#define MCSR0_IOCSR_SHIFT 35
+#define MCSR0_IOCSR (_ULCAST_(1) << MCSR0_IOCSR_SHIFT)
+#define MCSR0_PAGING_SHIFT 34
+#define MCSR0_PAGING (_ULCAST_(1) << MCSR0_PAGING_SHIFT)
+#define MCSR0_GR64_SHIFT 33
+#define MCSR0_GR64 (_ULCAST_(1) << MCSR0_GR64_SHIFT)
+#define GR64_DEFAULT 1
+#define MCSR0_GR32_SHIFT 32
+#define MCSR0_GR32 (_ULCAST_(1) << MCSR0_GR32_SHIFT)
+#define GR32_DEFAULT 0
+#define MCSR0_PRID_WIDTH 32
+#define MCSR0_PRID 0x14C010
+
+#define LOONGARCH_CSR_MCSR1 0xc1 /* CPUCFG2 and CPUCFG3 */
+#define MCSR1_HPFOLD_SHIFT 43
+#define MCSR1_HPFOLD (_ULCAST_(1) << MCSR1_HPFOLD_SHIFT)
+#define MCSR1_SPW_LVL_SHIFT 40
+#define MCSR1_SPW_LVL_WIDTH 3
+#define MCSR1_SPW_LVL (_ULCAST_(7) << MCSR1_SPW_LVL_SHIFT)
+#define MCSR1_ICACHET_SHIFT 39
+#define MCSR1_ICACHET (_ULCAST_(1) << MCSR1_ICACHET_SHIFT)
+#define MCSR1_ITLBT_SHIFT 38
+#define MCSR1_ITLBT (_ULCAST_(1) << MCSR1_ITLBT_SHIFT)
+#define MCSR1_LLDBAR_SHIFT 37
+#define MCSR1_LLDBAR (_ULCAST_(1) << MCSR1_LLDBAR_SHIFT)
+#define MCSR1_SCDLY_SHIFT 36
+#define MCSR1_SCDLY (_ULCAST_(1) << MCSR1_SCDLY_SHIFT)
+#define MCSR1_LLEXC_SHIFT 35
+#define MCSR1_LLEXC (_ULCAST_(1) << MCSR1_LLEXC_SHIFT)
+#define MCSR1_UCACC_SHIFT 34
+#define MCSR1_UCACC (_ULCAST_(1) << MCSR1_UCACC_SHIFT)
+#define MCSR1_SFB_SHIFT 33
+#define MCSR1_SFB (_ULCAST_(1) << MCSR1_SFB_SHIFT)
+#define MCSR1_CCDMA_SHIFT 32
+#define MCSR1_CCDMA (_ULCAST_(1) << MCSR1_CCDMA_SHIFT)
+#define MCSR1_LAMO_SHIFT 22
+#define MCSR1_LAMO (_ULCAST_(1) << MCSR1_LAMO_SHIFT)
+#define MCSR1_LSPW_SHIFT 21
+#define MCSR1_LSPW (_ULCAST_(1) << MCSR1_LSPW_SHIFT)
+#define MCSR1_MIPSBT_SHIFT 20
+#define MCSR1_MIPSBT (_ULCAST_(1) << MCSR1_MIPSBT_SHIFT)
+#define MCSR1_ARMBT_SHIFT 19
+#define MCSR1_ARMBT (_ULCAST_(1) << MCSR1_ARMBT_SHIFT)
+#define MCSR1_X86BT_SHIFT 18
+#define MCSR1_X86BT (_ULCAST_(1) << MCSR1_X86BT_SHIFT)
+#define MCSR1_LLFTPVERS_SHIFT 15
+#define MCSR1_LLFTPVERS_WIDTH 3
+#define MCSR1_LLFTPVERS (_ULCAST_(7) << MCSR1_LLFTPVERS_SHIFT)
+#define MCSR1_LLFTP_SHIFT 14
+#define MCSR1_LLFTP (_ULCAST_(1) << MCSR1_LLFTP_SHIFT)
+#define MCSR1_VZVERS_SHIFT 11
+#define MCSR1_VZVERS_WIDTH 3
+#define MCSR1_VZVERS (_ULCAST_(7) << MCSR1_VZVERS_SHIFT)
+#define MCSR1_VZ_SHIFT 10
+#define MCSR1_VZ (_ULCAST_(1) << MCSR1_VZ_SHIFT)
+#define MCSR1_CRYPTO_SHIFT 9
+#define MCSR1_CRYPTO (_ULCAST_(1) << MCSR1_CRYPTO_SHIFT)
+#define MCSR1_COMPLEX_SHIFT 8
+#define MCSR1_COMPLEX (_ULCAST_(1) << MCSR1_COMPLEX_SHIFT)
+#define MCSR1_LASX_SHIFT 7
+#define MCSR1_LASX (_ULCAST_(1) << MCSR1_LASX_SHIFT)
+#define MCSR1_LSX_SHIFT 6
+#define MCSR1_LSX (_ULCAST_(1) << MCSR1_LSX_SHIFT)
+#define MCSR1_FPVERS_SHIFT 3
+#define MCSR1_FPVERS_WIDTH 3
+#define MCSR1_FPVERS (_ULCAST_(7) << MCSR1_FPVERS_SHIFT)
+#define MCSR1_FPDP_SHIFT 2
+#define MCSR1_FPDP (_ULCAST_(1) << MCSR1_FPDP_SHIFT)
+#define MCSR1_FPSP_SHIFT 1
+#define MCSR1_FPSP (_ULCAST_(1) << MCSR1_FPSP_SHIFT)
+#define MCSR1_FP_SHIFT 0
+#define MCSR1_FP (_ULCAST_(1) << MCSR1_FP_SHIFT)
+
+#define LOONGARCH_CSR_MCSR2 0xc2 /* CPUCFG4 and CPUCFG5 */
+#define MCSR2_CCDIV_SHIFT 48
+#define MCSR2_CCDIV_WIDTH 16
+#define MCSR2_CCDIV (_ULCAST_(0xffff) << MCSR2_CCDIV_SHIFT)
+#define MCSR2_CCMUL_SHIFT 32
+#define MCSR2_CCMUL_WIDTH 16
+#define MCSR2_CCMUL (_ULCAST_(0xffff) << MCSR2_CCMUL_SHIFT)
+#define MCSR2_CCFREQ_WIDTH 32
+#define MCSR2_CCFREQ (_ULCAST_(0xffffffff))
+#define CCFREQ_DEFAULT 0x5f5e100 /* 100MHz */
+
+#define LOONGARCH_CSR_MCSR3 0xc3 /* CPUCFG6 */
+#define MCSR3_UPM_SHIFT 14
+#define MCSR3_UPM (_ULCAST_(1) << MCSR3_UPM_SHIFT)
+#define MCSR3_PMBITS_SHIFT 8
+#define MCSR3_PMBITS_WIDTH 6
+#define MCSR3_PMBITS (_ULCAST_(0x3f) << MCSR3_PMBITS_SHIFT)
+#define PMBITS_DEFAULT 0x40
+#define MCSR3_PMNUM_SHIFT 4
+#define MCSR3_PMNUM_WIDTH 4
+#define MCSR3_PMNUM (_ULCAST_(0xf) << MCSR3_PMNUM_SHIFT)
+#define MCSR3_PAMVER_SHIFT 1
+#define MCSR3_PAMVER_WIDTH 3
+#define MCSR3_PAMVER (_ULCAST_(0x7) << MCSR3_PAMVER_SHIFT)
+#define MCSR3_PMP_SHIFT 0
+#define MCSR3_PMP (_ULCAST_(1) << MCSR3_PMP_SHIFT)
+
+#define LOONGARCH_CSR_MCSR8 0xc8 /* CPUCFG16 and CPUCFG17 */
+#define MCSR8_L1I_SIZE_SHIFT 56
+#define MCSR8_L1I_SIZE_WIDTH 7
+#define MCSR8_L1I_SIZE (_ULCAST_(0x7f) << MCSR8_L1I_SIZE_SHIFT)
+#define MCSR8_L1I_IDX_SHIFT 48
+#define MCSR8_L1I_IDX_WIDTH 8
+#define MCSR8_L1I_IDX (_ULCAST_(0xff) << MCSR8_L1I_IDX_SHIFT)
+#define MCSR8_L1I_WAY_SHIFT 32
+#define MCSR8_L1I_WAY_WIDTH 16
+#define MCSR8_L1I_WAY (_ULCAST_(0xffff) << MCSR8_L1I_WAY_SHIFT)
+#define MCSR8_L3DINCL_SHIFT 16
+#define MCSR8_L3DINCL (_ULCAST_(1) << MCSR8_L3DINCL_SHIFT)
+#define MCSR8_L3DPRIV_SHIFT 15
+#define MCSR8_L3DPRIV (_ULCAST_(1) << MCSR8_L3DPRIV_SHIFT)
+#define MCSR8_L3DPRE_SHIFT 14
+#define MCSR8_L3DPRE (_ULCAST_(1) << MCSR8_L3DPRE_SHIFT)
+#define MCSR8_L3IUINCL_SHIFT 13
+#define MCSR8_L3IUINCL (_ULCAST_(1) << MCSR8_L3IUINCL_SHIFT)
+#define MCSR8_L3IUPRIV_SHIFT 12
+#define MCSR8_L3IUPRIV (_ULCAST_(1) << MCSR8_L3IUPRIV_SHIFT)
+#define MCSR8_L3IUUNIFY_SHIFT 11
+#define MCSR8_L3IUUNIFY (_ULCAST_(1) << MCSR8_L3IUUNIFY_SHIFT)
+#define MCSR8_L3IUPRE_SHIFT 10
+#define MCSR8_L3IUPRE (_ULCAST_(1) << MCSR8_L3IUPRE_SHIFT)
+#define MCSR8_L2DINCL_SHIFT 9
+#define MCSR8_L2DINCL (_ULCAST_(1) << MCSR8_L2DINCL_SHIFT)
+#define MCSR8_L2DPRIV_SHIFT 8
+#define MCSR8_L2DPRIV (_ULCAST_(1) << MCSR8_L2DPRIV_SHIFT)
+#define MCSR8_L2DPRE_SHIFT 7
+#define MCSR8_L2DPRE (_ULCAST_(1) << MCSR8_L2DPRE_SHIFT)
+#define MCSR8_L2IUINCL_SHIFT 6
+#define MCSR8_L2IUINCL (_ULCAST_(1) << MCSR8_L2IUINCL_SHIFT)
+#define MCSR8_L2IUPRIV_SHIFT 5
+#define MCSR8_L2IUPRIV (_ULCAST_(1) << MCSR8_L2IUPRIV_SHIFT)
+#define MCSR8_L2IUUNIFY_SHIFT 4
+#define MCSR8_L2IUUNIFY (_ULCAST_(1) << MCSR8_L2IUUNIFY_SHIFT)
+#define MCSR8_L2IUPRE_SHIFT 3
+#define MCSR8_L2IUPRE (_ULCAST_(1) << MCSR8_L2IUPRE_SHIFT)
+#define MCSR8_L1DPRE_SHIFT 2
+#define MCSR8_L1DPRE (_ULCAST_(1) << MCSR8_L1DPRE_SHIFT)
+#define MCSR8_L1IUUNIFY_SHIFT 1
+#define MCSR8_L1IUUNIFY (_ULCAST_(1) << MCSR8_L1IUUNIFY_SHIFT)
+#define MCSR8_L1IUPRE_SHIFT 0
+#define MCSR8_L1IUPRE (_ULCAST_(1) << MCSR8_L1IUPRE_SHIFT)
+
+#define LOONGARCH_CSR_MCSR9 0xc9 /* CPUCFG18 and CPUCFG19 */
+#define MCSR9_L2U_SIZE_SHIFT 56
+#define MCSR9_L2U_SIZE_WIDTH 7
+#define MCSR9_L2U_SIZE (_ULCAST_(0x7f) << MCSR9_L2U_SIZE_SHIFT)
+#define MCSR9_L2U_IDX_SHIFT 48
+#define MCSR9_L2U_IDX_WIDTH 8
+#define MCSR9_L2U_IDX (_ULCAST_(0xff) << MCSR9_IDX_LOG_SHIFT)
+#define MCSR9_L2U_WAY_SHIFT 32
+#define MCSR9_L2U_WAY_WIDTH 16
+#define MCSR9_L2U_WAY (_ULCAST_(0xffff) << MCSR9_L2U_WAY_SHIFT)
+#define MCSR9_L1D_SIZE_SHIFT 24
+#define MCSR9_L1D_SIZE_WIDTH 7
+#define MCSR9_L1D_SIZE (_ULCAST_(0x7f) << MCSR9_L1D_SIZE_SHIFT)
+#define MCSR9_L1D_IDX_SHIFT 16
+#define MCSR9_L1D_IDX_WIDTH 8
+#define MCSR9_L1D_IDX (_ULCAST_(0xff) << MCSR9_L1D_IDX_SHIFT)
+#define MCSR9_L1D_WAY_SHIFT 0
+#define MCSR9_L1D_WAY_WIDTH 16
+#define MCSR9_L1D_WAY (_ULCAST_(0xffff) << MCSR9_L1D_WAY_SHIFT)
+
+#define LOONGARCH_CSR_MCSR10 0xca /* CPUCFG20 */
+#define MCSR10_L3U_SIZE_SHIFT 24
+#define MCSR10_L3U_SIZE_WIDTH 7
+#define MCSR10_L3U_SIZE (_ULCAST_(0x7f) << MCSR10_L3U_SIZE_SHIFT)
+#define MCSR10_L3U_IDX_SHIFT 16
+#define MCSR10_L3U_IDX_WIDTH 8
+#define MCSR10_L3U_IDX (_ULCAST_(0xff) << MCSR10_L3U_IDX_SHIFT)
+#define MCSR10_L3U_WAY_SHIFT 0
+#define MCSR10_L3U_WAY_WIDTH 16
+#define MCSR10_L3U_WAY (_ULCAST_(0xffff) << MCSR10_L3U_WAY_SHIFT)
+
+#define LOONGARCH_CSR_MCSR24 0xf0 /* cpucfg48 */
+#define MCSR24_RAMCG_SHIFT 3
+#define MCSR24_RAMCG (_ULCAST_(1) << MCSR24_RAMCG_SHIFT)
+#define MCSR24_VFPUCG_SHIFT 2
+#define MCSR24_VFPUCG (_ULCAST_(1) << MCSR24_VFPUCG_SHIFT)
+#define MCSR24_NAPEN_SHIFT 1
+#define MCSR24_NAPEN (_ULCAST_(1) << MCSR24_NAPEN_SHIFT)
+#define MCSR24_MCSRLOCK_SHIFT 0
+#define MCSR24_MCSRLOCK (_ULCAST_(1) << MCSR24_MCSRLOCK_SHIFT)
+
+/* Uncached accelerate windows registers */
+#define LOONGARCH_CSR_UCAWIN 0x100
+#define LOONGARCH_CSR_UCAWIN0_LO 0x102
+#define LOONGARCH_CSR_UCAWIN0_HI 0x103
+#define LOONGARCH_CSR_UCAWIN1_LO 0x104
+#define LOONGARCH_CSR_UCAWIN1_HI 0x105
+#define LOONGARCH_CSR_UCAWIN2_LO 0x106
+#define LOONGARCH_CSR_UCAWIN2_HI 0x107
+#define LOONGARCH_CSR_UCAWIN3_LO 0x108
+#define LOONGARCH_CSR_UCAWIN3_HI 0x109
+
+/* Direct Map windows registers */
+#define LOONGARCH_CSR_DMWIN0 0x180 /* 64 direct map win0: MEM & IF */
+#define LOONGARCH_CSR_DMWIN1 0x181 /* 64 direct map win1: MEM & IF */
+#define LOONGARCH_CSR_DMWIN2 0x182 /* 64 direct map win2: MEM */
+#define LOONGARCH_CSR_DMWIN3 0x183 /* 64 direct map win3: MEM */
+
+/* Direct Map window 0/1 */
+#define CSR_DMW0_PLV0 _CONST64_(1 << 0)
+#define CSR_DMW0_VSEG _CONST64_(0x8000)
+#define CSR_DMW0_BASE (CSR_DMW0_VSEG << DMW_PABITS)
+#define CSR_DMW0_INIT (CSR_DMW0_BASE | CSR_DMW0_PLV0)
+
+#define CSR_DMW1_PLV0 _CONST64_(1 << 0)
+#define CSR_DMW1_MAT _CONST64_(1 << 4)
+#define CSR_DMW1_VSEG _CONST64_(0x9000)
+#define CSR_DMW1_BASE (CSR_DMW1_VSEG << DMW_PABITS)
+#define CSR_DMW1_INIT (CSR_DMW1_BASE | CSR_DMW1_MAT | CSR_DMW1_PLV0)
+
+/* Performance Counter registers */
+#define LOONGARCH_CSR_PERFCTRL0 0x200 /* 32 perf event 0 config */
+#define LOONGARCH_CSR_PERFCNTR0 0x201 /* 64 perf event 0 count value */
+#define LOONGARCH_CSR_PERFCTRL1 0x202 /* 32 perf event 1 config */
+#define LOONGARCH_CSR_PERFCNTR1 0x203 /* 64 perf event 1 count value */
+#define LOONGARCH_CSR_PERFCTRL2 0x204 /* 32 perf event 2 config */
+#define LOONGARCH_CSR_PERFCNTR2 0x205 /* 64 perf event 2 count value */
+#define LOONGARCH_CSR_PERFCTRL3 0x206 /* 32 perf event 3 config */
+#define LOONGARCH_CSR_PERFCNTR3 0x207 /* 64 perf event 3 count value */
+#define CSR_PERFCTRL_PLV0 (_ULCAST_(1) << 16)
+#define CSR_PERFCTRL_PLV1 (_ULCAST_(1) << 17)
+#define CSR_PERFCTRL_PLV2 (_ULCAST_(1) << 18)
+#define CSR_PERFCTRL_PLV3 (_ULCAST_(1) << 19)
+#define CSR_PERFCTRL_IE (_ULCAST_(1) << 20)
+#define CSR_PERFCTRL_EVENT 0x3ff
+
+/* Debug registers */
+#define LOONGARCH_CSR_MWPC 0x300 /* data breakpoint config */
+#define LOONGARCH_CSR_MWPS 0x301 /* data breakpoint status */
+
+#define LOONGARCH_CSR_DB0ADDR 0x310 /* data breakpoint 0 address */
+#define LOONGARCH_CSR_DB0MASK 0x311 /* data breakpoint 0 mask */
+#define LOONGARCH_CSR_DB0CTL 0x312 /* data breakpoint 0 control */
+#define LOONGARCH_CSR_DB0ASID 0x313 /* data breakpoint 0 asid */
+
+#define LOONGARCH_CSR_DB1ADDR 0x318 /* data breakpoint 1 address */
+#define LOONGARCH_CSR_DB1MASK 0x319 /* data breakpoint 1 mask */
+#define LOONGARCH_CSR_DB1CTL 0x31a /* data breakpoint 1 control */
+#define LOONGARCH_CSR_DB1ASID 0x31b /* data breakpoint 1 asid */
+
+#define LOONGARCH_CSR_DB2ADDR 0x320 /* data breakpoint 2 address */
+#define LOONGARCH_CSR_DB2MASK 0x321 /* data breakpoint 2 mask */
+#define LOONGARCH_CSR_DB2CTL 0x322 /* data breakpoint 2 control */
+#define LOONGARCH_CSR_DB2ASID 0x323 /* data breakpoint 2 asid */
+
+#define LOONGARCH_CSR_DB3ADDR 0x328 /* data breakpoint 3 address */
+#define LOONGARCH_CSR_DB3MASK 0x329 /* data breakpoint 3 mask */
+#define LOONGARCH_CSR_DB3CTL 0x32a /* data breakpoint 3 control */
+#define LOONGARCH_CSR_DB3ASID 0x32b /* data breakpoint 3 asid */
+
+#define LOONGARCH_CSR_DB4ADDR 0x330 /* data breakpoint 4 address */
+#define LOONGARCH_CSR_DB4MASK 0x331 /* data breakpoint 4 maks */
+#define LOONGARCH_CSR_DB4CTL 0x332 /* data breakpoint 4 control */
+#define LOONGARCH_CSR_DB4ASID 0x333 /* data breakpoint 4 asid */
+
+#define LOONGARCH_CSR_DB5ADDR 0x338 /* data breakpoint 5 address */
+#define LOONGARCH_CSR_DB5MASK 0x339 /* data breakpoint 5 mask */
+#define LOONGARCH_CSR_DB5CTL 0x33a /* data breakpoint 5 control */
+#define LOONGARCH_CSR_DB5ASID 0x33b /* data breakpoint 5 asid */
+
+#define LOONGARCH_CSR_DB6ADDR 0x340 /* data breakpoint 6 address */
+#define LOONGARCH_CSR_DB6MASK 0x341 /* data breakpoint 6 mask */
+#define LOONGARCH_CSR_DB6CTL 0x342 /* data breakpoint 6 control */
+#define LOONGARCH_CSR_DB6ASID 0x343 /* data breakpoint 6 asid */
+
+#define LOONGARCH_CSR_DB7ADDR 0x348 /* data breakpoint 7 address */
+#define LOONGARCH_CSR_DB7MASK 0x349 /* data breakpoint 7 mask */
+#define LOONGARCH_CSR_DB7CTL 0x34a /* data breakpoint 7 control */
+#define LOONGARCH_CSR_DB7ASID 0x34b /* data breakpoint 7 asid */
+
+#define LOONGARCH_CSR_FWPC 0x380 /* instruction breakpoint config */
+#define LOONGARCH_CSR_FWPS 0x381 /* instruction breakpoint status */
+
+#define LOONGARCH_CSR_IB0ADDR 0x390 /* inst breakpoint 0 address */
+#define LOONGARCH_CSR_IB0MASK 0x391 /* inst breakpoint 0 mask */
+#define LOONGARCH_CSR_IB0CTL 0x392 /* inst breakpoint 0 control */
+#define LOONGARCH_CSR_IB0ASID 0x393 /* inst breakpoint 0 asid */
+
+#define LOONGARCH_CSR_IB1ADDR 0x398 /* inst breakpoint 1 address */
+#define LOONGARCH_CSR_IB1MASK 0x399 /* inst breakpoint 1 mask */
+#define LOONGARCH_CSR_IB1CTL 0x39a /* inst breakpoint 1 control */
+#define LOONGARCH_CSR_IB1ASID 0x39b /* inst breakpoint 1 asid */
+
+#define LOONGARCH_CSR_IB2ADDR 0x3a0 /* inst breakpoint 2 address */
+#define LOONGARCH_CSR_IB2MASK 0x3a1 /* inst breakpoint 2 mask */
+#define LOONGARCH_CSR_IB2CTL 0x3a2 /* inst breakpoint 2 control */
+#define LOONGARCH_CSR_IB2ASID 0x3a3 /* inst breakpoint 2 asid */
+
+#define LOONGARCH_CSR_IB3ADDR 0x3a8 /* inst breakpoint 3 address */
+#define LOONGARCH_CSR_IB3MASK 0x3a9 /* breakpoint 3 mask */
+#define LOONGARCH_CSR_IB3CTL 0x3aa /* inst breakpoint 3 control */
+#define LOONGARCH_CSR_IB3ASID 0x3ab /* inst breakpoint 3 asid */
+
+#define LOONGARCH_CSR_IB4ADDR 0x3b0 /* inst breakpoint 4 address */
+#define LOONGARCH_CSR_IB4MASK 0x3b1 /* inst breakpoint 4 mask */
+#define LOONGARCH_CSR_IB4CTL 0x3b2 /* inst breakpoint 4 control */
+#define LOONGARCH_CSR_IB4ASID 0x3b3 /* inst breakpoint 4 asid */
+
+#define LOONGARCH_CSR_IB5ADDR 0x3b8 /* inst breakpoint 5 address */
+#define LOONGARCH_CSR_IB5MASK 0x3b9 /* inst breakpoint 5 mask */
+#define LOONGARCH_CSR_IB5CTL 0x3ba /* inst breakpoint 5 control */
+#define LOONGARCH_CSR_IB5ASID 0x3bb /* inst breakpoint 5 asid */
+
+#define LOONGARCH_CSR_IB6ADDR 0x3c0 /* inst breakpoint 6 address */
+#define LOONGARCH_CSR_IB6MASK 0x3c1 /* inst breakpoint 6 mask */
+#define LOONGARCH_CSR_IB6CTL 0x3c2 /* inst breakpoint 6 control */
+#define LOONGARCH_CSR_IB6ASID 0x3c3 /* inst breakpoint 6 asid */
+
+#define LOONGARCH_CSR_IB7ADDR 0x3c8 /* inst breakpoint 7 address */
+#define LOONGARCH_CSR_IB7MASK 0x3c9 /* inst breakpoint 7 mask */
+#define LOONGARCH_CSR_IB7CTL 0x3ca /* inst breakpoint 7 control */
+#define LOONGARCH_CSR_IB7ASID 0x3cb /* inst breakpoint 7 asid */
+
+#define LOONGARCH_CSR_DEBUG 0x500 /* debug config */
+#define LOONGARCH_CSR_DERA 0x501 /* debug era */
+#define LOONGARCH_CSR_DESAVE 0x502 /* debug save */
+
+/*
+ * CSR_ECFG IM
+ */
+#define ECFG0_IM 0x00001fff
+#define ECFGB_SIP0 0
+#define ECFGF_SIP0 (_ULCAST_(1) << ECFGB_SIP0)
+#define ECFGB_SIP1 1
+#define ECFGF_SIP1 (_ULCAST_(1) << ECFGB_SIP1)
+#define ECFGB_IP0 2
+#define ECFGF_IP0 (_ULCAST_(1) << ECFGB_IP0)
+#define ECFGB_IP1 3
+#define ECFGF_IP1 (_ULCAST_(1) << ECFGB_IP1)
+#define ECFGB_IP2 4
+#define ECFGF_IP2 (_ULCAST_(1) << ECFGB_IP2)
+#define ECFGB_IP3 5
+#define ECFGF_IP3 (_ULCAST_(1) << ECFGB_IP3)
+#define ECFGB_IP4 6
+#define ECFGF_IP4 (_ULCAST_(1) << ECFGB_IP4)
+#define ECFGB_IP5 7
+#define ECFGF_IP5 (_ULCAST_(1) << ECFGB_IP5)
+#define ECFGB_IP6 8
+#define ECFGF_IP6 (_ULCAST_(1) << ECFGB_IP6)
+#define ECFGB_IP7 9
+#define ECFGF_IP7 (_ULCAST_(1) << ECFGB_IP7)
+#define ECFGB_PMC 10
+#define ECFGF_PMC (_ULCAST_(1) << ECFGB_PMC)
+#define ECFGB_TIMER 11
+#define ECFGF_TIMER (_ULCAST_(1) << ECFGB_TIMER)
+#define ECFGB_IPI 12
+#define ECFGF_IPI (_ULCAST_(1) << ECFGB_IPI)
+#define ECFGF(hwirq) (_ULCAST_(1) << hwirq)
+
+#define ESTATF_IP 0x00001fff
+
+#define LOONGARCH_IOCSR_FEATURES 0x8
+#define IOCSRF_TEMP BIT_ULL(0)
+#define IOCSRF_NODECNT BIT_ULL(1)
+#define IOCSRF_MSI BIT_ULL(2)
+#define IOCSRF_EXTIOI BIT_ULL(3)
+#define IOCSRF_CSRIPI BIT_ULL(4)
+#define IOCSRF_FREQCSR BIT_ULL(5)
+#define IOCSRF_FREQSCALE BIT_ULL(6)
+#define IOCSRF_DVFSV1 BIT_ULL(7)
+#define IOCSRF_EIODECODE BIT_ULL(9)
+#define IOCSRF_FLATMODE BIT_ULL(10)
+#define IOCSRF_VM BIT_ULL(11)
+
+#define LOONGARCH_IOCSR_VENDOR 0x10
+
+#define LOONGARCH_IOCSR_CPUNAME 0x20
+
+#define LOONGARCH_IOCSR_NODECNT 0x408
+
+#define LOONGARCH_IOCSR_MISC_FUNC 0x420
+#define IOCSR_MISC_FUNC_TIMER_RESET BIT_ULL(21)
+#define IOCSR_MISC_FUNC_EXT_IOI_EN BIT_ULL(48)
+
+#define LOONGARCH_IOCSR_CPUTEMP 0x428
+
+/* PerCore CSR, only accessible by local cores */
+#define LOONGARCH_IOCSR_IPI_STATUS 0x1000
+#define LOONGARCH_IOCSR_IPI_EN 0x1004
+#define LOONGARCH_IOCSR_IPI_SET 0x1008
+#define LOONGARCH_IOCSR_IPI_CLEAR 0x100c
+#define LOONGARCH_IOCSR_MBUF0 0x1020
+#define LOONGARCH_IOCSR_MBUF1 0x1028
+#define LOONGARCH_IOCSR_MBUF2 0x1030
+#define LOONGARCH_IOCSR_MBUF3 0x1038
+
+#define LOONGARCH_IOCSR_IPI_SEND 0x1040
+#define IOCSR_IPI_SEND_IP_SHIFT 0
+#define IOCSR_IPI_SEND_CPU_SHIFT 16
+#define IOCSR_IPI_SEND_BLOCKING BIT(31)
+
+#define LOONGARCH_IOCSR_MBUF_SEND 0x1048
+#define IOCSR_MBUF_SEND_BLOCKING BIT_ULL(31)
+#define IOCSR_MBUF_SEND_BOX_SHIFT 2
+#define IOCSR_MBUF_SEND_BOX_LO(box) (box << 1)
+#define IOCSR_MBUF_SEND_BOX_HI(box) ((box << 1) + 1)
+#define IOCSR_MBUF_SEND_CPU_SHIFT 16
+#define IOCSR_MBUF_SEND_BUF_SHIFT 32
+#define IOCSR_MBUF_SEND_H32_MASK 0xFFFFFFFF00000000ULL
+
+#define LOONGARCH_IOCSR_ANY_SEND 0x1158
+#define IOCSR_ANY_SEND_BLOCKING BIT_ULL(31)
+#define IOCSR_ANY_SEND_CPU_SHIFT 16
+#define IOCSR_ANY_SEND_MASK_SHIFT 27
+#define IOCSR_ANY_SEND_BUF_SHIFT 32
+#define IOCSR_ANY_SEND_H32_MASK 0xFFFFFFFF00000000ULL
+
+/* Register offset and bit definition for CSR access */
+#define LOONGARCH_IOCSR_TIMER_CFG 0x1060
+#define LOONGARCH_IOCSR_TIMER_TICK 0x1070
+#define IOCSR_TIMER_CFG_RESERVED (_ULCAST_(1) << 63)
+#define IOCSR_TIMER_CFG_PERIODIC (_ULCAST_(1) << 62)
+#define IOCSR_TIMER_CFG_EN (_ULCAST_(1) << 61)
+#define IOCSR_TIMER_MASK 0x0ffffffffffffULL
+#define IOCSR_TIMER_INITVAL_RST (_ULCAST_(0xffff) << 48)
+
+#define LOONGARCH_IOCSR_EXTIOI_NODEMAP_BASE 0x14a0
+#define LOONGARCH_IOCSR_EXTIOI_IPMAP_BASE 0x14c0
+#define LOONGARCH_IOCSR_EXTIOI_EN_BASE 0x1600
+#define LOONGARCH_IOCSR_EXTIOI_BOUNCE_BASE 0x1680
+#define LOONGARCH_IOCSR_EXTIOI_ISR_BASE 0x1800
+#define LOONGARCH_IOCSR_EXTIOI_ROUTE_BASE 0x1c00
+#define IOCSR_EXTIOI_VECTOR_NUM 256
+
+#ifndef __ASSEMBLY__
+
+static inline u64 drdtime(void)
+{
+ int rID = 0;
+ u64 val = 0;
+
+ __asm__ __volatile__(
+ "rdtime.d %0, %1 \n\t"
+ : "=r"(val), "=r"(rID)
+ :
+ );
+ return val;
+}
+
+static inline unsigned int get_csr_cpuid(void)
+{
+ return csr_read32(LOONGARCH_CSR_CPUID);
+}
+
+static inline void csr_any_send(unsigned int addr, unsigned int data,
+ unsigned int data_mask, unsigned int cpu)
+{
+ uint64_t val = 0;
+
+ val = IOCSR_ANY_SEND_BLOCKING | addr;
+ val |= (cpu << IOCSR_ANY_SEND_CPU_SHIFT);
+ val |= (data_mask << IOCSR_ANY_SEND_MASK_SHIFT);
+ val |= ((uint64_t)data << IOCSR_ANY_SEND_BUF_SHIFT);
+ iocsr_write64(val, LOONGARCH_IOCSR_ANY_SEND);
+}
+
+static inline unsigned int read_csr_excode(void)
+{
+ return (csr_read32(LOONGARCH_CSR_ESTAT) & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT;
+}
+
+static inline void write_csr_index(unsigned int idx)
+{
+ csr_xchg32(idx, CSR_TLBIDX_IDXM, LOONGARCH_CSR_TLBIDX);
+}
+
+static inline unsigned int read_csr_pagesize(void)
+{
+ return (csr_read32(LOONGARCH_CSR_TLBIDX) & CSR_TLBIDX_SIZEM) >> CSR_TLBIDX_SIZE;
+}
+
+static inline void write_csr_pagesize(unsigned int size)
+{
+ csr_xchg32(size << CSR_TLBIDX_SIZE, CSR_TLBIDX_SIZEM, LOONGARCH_CSR_TLBIDX);
+}
+
+static inline unsigned int read_csr_tlbrefill_pagesize(void)
+{
+ return (csr_read64(LOONGARCH_CSR_TLBREHI) & CSR_TLBREHI_PS) >> CSR_TLBREHI_PS_SHIFT;
+}
+
+static inline void write_csr_tlbrefill_pagesize(unsigned int size)
+{
+ csr_xchg64(size << CSR_TLBREHI_PS_SHIFT, CSR_TLBREHI_PS, LOONGARCH_CSR_TLBREHI);
+}
+
+#define read_csr_asid() csr_read32(LOONGARCH_CSR_ASID)
+#define write_csr_asid(val) csr_write32(val, LOONGARCH_CSR_ASID)
+#define read_csr_entryhi() csr_read64(LOONGARCH_CSR_TLBEHI)
+#define write_csr_entryhi(val) csr_write64(val, LOONGARCH_CSR_TLBEHI)
+#define read_csr_entrylo0() csr_read64(LOONGARCH_CSR_TLBELO0)
+#define write_csr_entrylo0(val) csr_write64(val, LOONGARCH_CSR_TLBELO0)
+#define read_csr_entrylo1() csr_read64(LOONGARCH_CSR_TLBELO1)
+#define write_csr_entrylo1(val) csr_write64(val, LOONGARCH_CSR_TLBELO1)
+#define read_csr_ecfg() csr_read32(LOONGARCH_CSR_ECFG)
+#define write_csr_ecfg(val) csr_write32(val, LOONGARCH_CSR_ECFG)
+#define read_csr_estat() csr_read32(LOONGARCH_CSR_ESTAT)
+#define write_csr_estat(val) csr_write32(val, LOONGARCH_CSR_ESTAT)
+#define read_csr_tlbidx() csr_read32(LOONGARCH_CSR_TLBIDX)
+#define write_csr_tlbidx(val) csr_write32(val, LOONGARCH_CSR_TLBIDX)
+#define read_csr_euen() csr_read32(LOONGARCH_CSR_EUEN)
+#define write_csr_euen(val) csr_write32(val, LOONGARCH_CSR_EUEN)
+#define read_csr_cpuid() csr_read32(LOONGARCH_CSR_CPUID)
+#define read_csr_prcfg1() csr_read64(LOONGARCH_CSR_PRCFG1)
+#define write_csr_prcfg1(val) csr_write64(val, LOONGARCH_CSR_PRCFG1)
+#define read_csr_prcfg2() csr_read64(LOONGARCH_CSR_PRCFG2)
+#define write_csr_prcfg2(val) csr_write64(val, LOONGARCH_CSR_PRCFG2)
+#define read_csr_prcfg3() csr_read64(LOONGARCH_CSR_PRCFG3)
+#define write_csr_prcfg3(val) csr_write64(val, LOONGARCH_CSR_PRCFG3)
+#define read_csr_stlbpgsize() csr_read32(LOONGARCH_CSR_STLBPGSIZE)
+#define write_csr_stlbpgsize(val) csr_write32(val, LOONGARCH_CSR_STLBPGSIZE)
+#define read_csr_rvacfg() csr_read32(LOONGARCH_CSR_RVACFG)
+#define write_csr_rvacfg(val) csr_write32(val, LOONGARCH_CSR_RVACFG)
+#define write_csr_tintclear(val) csr_write32(val, LOONGARCH_CSR_TINTCLR)
+#define read_csr_impctl1() csr_read64(LOONGARCH_CSR_IMPCTL1)
+#define write_csr_impctl1(val) csr_write64(val, LOONGARCH_CSR_IMPCTL1)
+#define write_csr_impctl2(val) csr_write64(val, LOONGARCH_CSR_IMPCTL2)
+
+#define read_csr_perfctrl0() csr_read64(LOONGARCH_CSR_PERFCTRL0)
+#define read_csr_perfcntr0() csr_read64(LOONGARCH_CSR_PERFCNTR0)
+#define read_csr_perfctrl1() csr_read64(LOONGARCH_CSR_PERFCTRL1)
+#define read_csr_perfcntr1() csr_read64(LOONGARCH_CSR_PERFCNTR1)
+#define read_csr_perfctrl2() csr_read64(LOONGARCH_CSR_PERFCTRL2)
+#define read_csr_perfcntr2() csr_read64(LOONGARCH_CSR_PERFCNTR2)
+#define read_csr_perfctrl3() csr_read64(LOONGARCH_CSR_PERFCTRL3)
+#define read_csr_perfcntr3() csr_read64(LOONGARCH_CSR_PERFCNTR3)
+#define write_csr_perfctrl0(val) csr_write64(val, LOONGARCH_CSR_PERFCTRL0)
+#define write_csr_perfcntr0(val) csr_write64(val, LOONGARCH_CSR_PERFCNTR0)
+#define write_csr_perfctrl1(val) csr_write64(val, LOONGARCH_CSR_PERFCTRL1)
+#define write_csr_perfcntr1(val) csr_write64(val, LOONGARCH_CSR_PERFCNTR1)
+#define write_csr_perfctrl2(val) csr_write64(val, LOONGARCH_CSR_PERFCTRL2)
+#define write_csr_perfcntr2(val) csr_write64(val, LOONGARCH_CSR_PERFCNTR2)
+#define write_csr_perfctrl3(val) csr_write64(val, LOONGARCH_CSR_PERFCTRL3)
+#define write_csr_perfcntr3(val) csr_write64(val, LOONGARCH_CSR_PERFCNTR3)
+
+/*
+ * Manipulate bits in a register.
+ */
+#define __BUILD_CSR_COMMON(name) \
+static inline unsigned long \
+set_##name(unsigned long set) \
+{ \
+ unsigned long res, new; \
+ \
+ res = read_##name(); \
+ new = res | set; \
+ write_##name(new); \
+ \
+ return res; \
+} \
+ \
+static inline unsigned long \
+clear_##name(unsigned long clear) \
+{ \
+ unsigned long res, new; \
+ \
+ res = read_##name(); \
+ new = res & ~clear; \
+ write_##name(new); \
+ \
+ return res; \
+} \
+ \
+static inline unsigned long \
+change_##name(unsigned long change, unsigned long val) \
+{ \
+ unsigned long res, new; \
+ \
+ res = read_##name(); \
+ new = res & ~change; \
+ new |= (val & change); \
+ write_##name(new); \
+ \
+ return res; \
+}
+
+#define __BUILD_CSR_OP(name) __BUILD_CSR_COMMON(csr_##name)
+
+__BUILD_CSR_OP(euen)
+__BUILD_CSR_OP(ecfg)
+__BUILD_CSR_OP(tlbidx)
+
+#define set_csr_estat(val) \
+ csr_xchg32(val, val, LOONGARCH_CSR_ESTAT)
+#define clear_csr_estat(val) \
+ csr_xchg32(~(val), val, LOONGARCH_CSR_ESTAT)
+
+#endif /* __ASSEMBLY__ */
+
+/* Generic EntryLo bit definitions */
+#define ENTRYLO_V (_ULCAST_(1) << 0)
+#define ENTRYLO_D (_ULCAST_(1) << 1)
+#define ENTRYLO_PLV_SHIFT 2
+#define ENTRYLO_PLV (_ULCAST_(3) << ENTRYLO_PLV_SHIFT)
+#define ENTRYLO_C_SHIFT 4
+#define ENTRYLO_C (_ULCAST_(3) << ENTRYLO_C_SHIFT)
+#define ENTRYLO_G (_ULCAST_(1) << 6)
+#define ENTRYLO_NR (_ULCAST_(1) << 61)
+#define ENTRYLO_NX (_ULCAST_(1) << 62)
+
+/* Values for PageSize register */
+#define PS_4K 0x0000000c
+#define PS_8K 0x0000000d
+#define PS_16K 0x0000000e
+#define PS_32K 0x0000000f
+#define PS_64K 0x00000010
+#define PS_128K 0x00000011
+#define PS_256K 0x00000012
+#define PS_512K 0x00000013
+#define PS_1M 0x00000014
+#define PS_2M 0x00000015
+#define PS_4M 0x00000016
+#define PS_8M 0x00000017
+#define PS_16M 0x00000018
+#define PS_32M 0x00000019
+#define PS_64M 0x0000001a
+#define PS_128M 0x0000001b
+#define PS_256M 0x0000001c
+#define PS_512M 0x0000001d
+#define PS_1G 0x0000001e
+
+/* Default page size for a given kernel configuration */
+#ifdef CONFIG_PAGE_SIZE_4KB
+#define PS_DEFAULT_SIZE PS_4K
+#elif defined(CONFIG_PAGE_SIZE_16KB)
+#define PS_DEFAULT_SIZE PS_16K
+#elif defined(CONFIG_PAGE_SIZE_64KB)
+#define PS_DEFAULT_SIZE PS_64K
+#else
+#error Bad page size configuration!
+#endif
+
+/* Default huge tlb size for a given kernel configuration */
+#ifdef CONFIG_PAGE_SIZE_4KB
+#define PS_HUGE_SIZE PS_1M
+#elif defined(CONFIG_PAGE_SIZE_16KB)
+#define PS_HUGE_SIZE PS_16M
+#elif defined(CONFIG_PAGE_SIZE_64KB)
+#define PS_HUGE_SIZE PS_256M
+#else
+#error Bad page size configuration for hugetlbfs!
+#endif
+
+/* ExStatus.ExcCode */
+#define EXCCODE_RSV 0 /* Reserved */
+#define EXCCODE_TLBL 1 /* TLB miss on a load */
+#define EXCCODE_TLBS 2 /* TLB miss on a store */
+#define EXCCODE_TLBI 3 /* TLB miss on a ifetch */
+#define EXCCODE_TLBM 4 /* TLB modified fault */
+#define EXCCODE_TLBNR 5 /* TLB Read-Inhibit exception */
+#define EXCCODE_TLBNX 6 /* TLB Execution-Inhibit exception */
+#define EXCCODE_TLBPE 7 /* TLB Privilege Error */
+#define EXCCODE_ADE 8 /* Address Error */
+ #define EXSUBCODE_ADEF 0 /* Fetch Instruction */
+ #define EXSUBCODE_ADEM 1 /* Access Memory*/
+#define EXCCODE_ALE 9 /* Unalign Access */
+#define EXCCODE_OOB 10 /* Out of bounds */
+#define EXCCODE_SYS 11 /* System call */
+#define EXCCODE_BP 12 /* Breakpoint */
+#define EXCCODE_INE 13 /* Inst. Not Exist */
+#define EXCCODE_IPE 14 /* Inst. Privileged Error */
+#define EXCCODE_FPDIS 15 /* FPU Disabled */
+#define EXCCODE_LSXDIS 16 /* LSX Disabled */
+#define EXCCODE_LASXDIS 17 /* LASX Disabled */
+#define EXCCODE_FPE 18 /* Floating Point Exception */
+ #define EXCSUBCODE_FPE 0 /* Floating Point Exception */
+ #define EXCSUBCODE_VFPE 1 /* Vector Exception */
+#define EXCCODE_WATCH 19 /* Watch address reference */
+#define EXCCODE_BTDIS 20 /* Binary Trans. Disabled */
+#define EXCCODE_BTE 21 /* Binary Trans. Exception */
+#define EXCCODE_PSI 22 /* Guest Privileged Error */
+#define EXCCODE_HYP 23 /* Hypercall */
+#define EXCCODE_GCM 24 /* Guest CSR modified */
+ #define EXCSUBCODE_GCSC 0 /* Software caused */
+ #define EXCSUBCODE_GCHC 1 /* Hardware caused */
+#define EXCCODE_SE 25 /* Security */
+
+#define EXCCODE_INT_START 64
+#define EXCCODE_SIP0 64
+#define EXCCODE_SIP1 65
+#define EXCCODE_IP0 66
+#define EXCCODE_IP1 67
+#define EXCCODE_IP2 68
+#define EXCCODE_IP3 69
+#define EXCCODE_IP4 70
+#define EXCCODE_IP5 71
+#define EXCCODE_IP6 72
+#define EXCCODE_IP7 73
+#define EXCCODE_PMC 74 /* Performance Counter */
+#define EXCCODE_TIMER 75
+#define EXCCODE_IPI 76
+#define EXCCODE_NMI 77
+#define EXCCODE_INT_END 78
+#define EXCCODE_INT_NUM (EXCCODE_INT_END - EXCCODE_INT_START)
+
+/* FPU register names */
+#define LOONGARCH_FCSR0 $r0
+#define LOONGARCH_FCSR1 $r1
+#define LOONGARCH_FCSR2 $r2
+#define LOONGARCH_FCSR3 $r3
+
+/* FPU Status Register Values */
+#define FPU_CSR_RSVD 0xe0e0fce0
+
+/*
+ * X the exception cause indicator
+ * E the exception enable
+ * S the sticky/flag bit
+ */
+#define FPU_CSR_ALL_X 0x1f000000
+#define FPU_CSR_INV_X 0x10000000
+#define FPU_CSR_DIV_X 0x08000000
+#define FPU_CSR_OVF_X 0x04000000
+#define FPU_CSR_UDF_X 0x02000000
+#define FPU_CSR_INE_X 0x01000000
+
+#define FPU_CSR_ALL_S 0x001f0000
+#define FPU_CSR_INV_S 0x00100000
+#define FPU_CSR_DIV_S 0x00080000
+#define FPU_CSR_OVF_S 0x00040000
+#define FPU_CSR_UDF_S 0x00020000
+#define FPU_CSR_INE_S 0x00010000
+
+#define FPU_CSR_ALL_E 0x0000001f
+#define FPU_CSR_INV_E 0x00000010
+#define FPU_CSR_DIV_E 0x00000008
+#define FPU_CSR_OVF_E 0x00000004
+#define FPU_CSR_UDF_E 0x00000002
+#define FPU_CSR_INE_E 0x00000001
+
+/* Bits 8 and 9 of FPU Status Register specify the rounding mode */
+#define FPU_CSR_RM 0x300
+#define FPU_CSR_RN 0x000 /* nearest */
+#define FPU_CSR_RZ 0x100 /* towards zero */
+#define FPU_CSR_RU 0x200 /* towards +Infinity */
+#define FPU_CSR_RD 0x300 /* towards -Infinity */
+
+#define read_fcsr(source) \
+({ \
+ unsigned int __res; \
+\
+ __asm__ __volatile__( \
+ " movfcsr2gr %0, "__stringify(source)" \n" \
+ : "=r" (__res)); \
+ __res; \
+})
+
+#define write_fcsr(dest, val) \
+do { \
+ __asm__ __volatile__( \
+ " movgr2fcsr %0, "__stringify(dest)" \n" \
+ : : "r" (val)); \
+} while (0)
+
+#endif /* _ASM_LOONGARCH_H */
diff --git a/arch/loongarch/include/asm/loongson.h b/arch/loongarch/include/asm/loongson.h
new file mode 100644
index 000000000000..6a8038725ba7
--- /dev/null
+++ b/arch/loongarch/include/asm/loongson.h
@@ -0,0 +1,153 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Author: Huacai Chen <chenhuacai@loongson.cn>
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#ifndef __ASM_LOONGSON_H
+#define __ASM_LOONGSON_H
+
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/pci.h>
+#include <asm/addrspace.h>
+#include <asm/bootinfo.h>
+
+extern const struct plat_smp_ops loongson3_smp_ops;
+
+#define LOONGSON_REG(x) \
+ (*(volatile u32 *)((char *)TO_UNCACHE(LOONGSON_REG_BASE) + (x)))
+
+#define LOONGSON_LIO_BASE 0x18000000
+#define LOONGSON_LIO_SIZE 0x00100000 /* 1M */
+#define LOONGSON_LIO_TOP (LOONGSON_LIO_BASE+LOONGSON_LIO_SIZE-1)
+
+#define LOONGSON_BOOT_BASE 0x1c000000
+#define LOONGSON_BOOT_SIZE 0x02000000 /* 32M */
+#define LOONGSON_BOOT_TOP (LOONGSON_BOOT_BASE+LOONGSON_BOOT_SIZE-1)
+
+#define LOONGSON_REG_BASE 0x1fe00000
+#define LOONGSON_REG_SIZE 0x00100000 /* 1M */
+#define LOONGSON_REG_TOP (LOONGSON_REG_BASE+LOONGSON_REG_SIZE-1)
+
+/* GPIO Regs - r/w */
+
+#define LOONGSON_GPIODATA LOONGSON_REG(0x11c)
+#define LOONGSON_GPIOIE LOONGSON_REG(0x120)
+#define LOONGSON_REG_GPIO_BASE (LOONGSON_REG_BASE + 0x11c)
+
+#define MAX_PACKAGES 16
+
+/* Chip Config register of each physical cpu package */
+extern u64 loongson_chipcfg[MAX_PACKAGES];
+#define LOONGSON_CHIPCFG(id) (*(volatile u32 *)(loongson_chipcfg[id]))
+
+/* Chip Temperature register of each physical cpu package */
+extern u64 loongson_chiptemp[MAX_PACKAGES];
+#define LOONGSON_CHIPTEMP(id) (*(volatile u32 *)(loongson_chiptemp[id]))
+
+/* Freq Control register of each physical cpu package */
+extern u64 loongson_freqctrl[MAX_PACKAGES];
+#define LOONGSON_FREQCTRL(id) (*(volatile u32 *)(loongson_freqctrl[id]))
+
+#define xconf_readl(addr) readl(addr)
+#define xconf_readq(addr) readq(addr)
+
+static inline void xconf_writel(u32 val, volatile void __iomem *addr)
+{
+ asm volatile (
+ " st.w %[v], %[hw], 0 \n"
+ " ld.b $r0, %[hw], 0 \n"
+ :
+ : [hw] "r" (addr), [v] "r" (val)
+ );
+}
+
+static inline void xconf_writeq(u64 val64, volatile void __iomem *addr)
+{
+ asm volatile (
+ " st.d %[v], %[hw], 0 \n"
+ " ld.b $r0, %[hw], 0 \n"
+ :
+ : [hw] "r" (addr), [v] "r" (val64)
+ );
+}
+
+/* ============== LS7A registers =============== */
+#define LS7A_PCH_REG_BASE 0x10000000UL
+/* LPC regs */
+#define LS7A_LPC_REG_BASE (LS7A_PCH_REG_BASE + 0x00002000)
+/* CHIPCFG regs */
+#define LS7A_CHIPCFG_REG_BASE (LS7A_PCH_REG_BASE + 0x00010000)
+/* MISC reg base */
+#define LS7A_MISC_REG_BASE (LS7A_PCH_REG_BASE + 0x00080000)
+/* ACPI regs */
+#define LS7A_ACPI_REG_BASE (LS7A_MISC_REG_BASE + 0x00050000)
+/* RTC regs */
+#define LS7A_RTC_REG_BASE (LS7A_MISC_REG_BASE + 0x00050100)
+
+#define LS7A_DMA_CFG (volatile void *)TO_UNCACHE(LS7A_CHIPCFG_REG_BASE + 0x041c)
+#define LS7A_DMA_NODE_SHF 8
+#define LS7A_DMA_NODE_MASK 0x1F00
+
+#define LS7A_INT_MASK_REG (volatile void *)TO_UNCACHE(LS7A_PCH_REG_BASE + 0x020)
+#define LS7A_INT_EDGE_REG (volatile void *)TO_UNCACHE(LS7A_PCH_REG_BASE + 0x060)
+#define LS7A_INT_CLEAR_REG (volatile void *)TO_UNCACHE(LS7A_PCH_REG_BASE + 0x080)
+#define LS7A_INT_HTMSI_EN_REG (volatile void *)TO_UNCACHE(LS7A_PCH_REG_BASE + 0x040)
+#define LS7A_INT_ROUTE_ENTRY_REG (volatile void *)TO_UNCACHE(LS7A_PCH_REG_BASE + 0x100)
+#define LS7A_INT_HTMSI_VEC_REG (volatile void *)TO_UNCACHE(LS7A_PCH_REG_BASE + 0x200)
+#define LS7A_INT_STATUS_REG (volatile void *)TO_UNCACHE(LS7A_PCH_REG_BASE + 0x3a0)
+#define LS7A_INT_POL_REG (volatile void *)TO_UNCACHE(LS7A_PCH_REG_BASE + 0x3e0)
+#define LS7A_LPC_INT_CTL (volatile void *)TO_UNCACHE(LS7A_PCH_REG_BASE + 0x2000)
+#define LS7A_LPC_INT_ENA (volatile void *)TO_UNCACHE(LS7A_PCH_REG_BASE + 0x2004)
+#define LS7A_LPC_INT_STS (volatile void *)TO_UNCACHE(LS7A_PCH_REG_BASE + 0x2008)
+#define LS7A_LPC_INT_CLR (volatile void *)TO_UNCACHE(LS7A_PCH_REG_BASE + 0x200c)
+#define LS7A_LPC_INT_POL (volatile void *)TO_UNCACHE(LS7A_PCH_REG_BASE + 0x2010)
+
+#define LS7A_PMCON_SOC_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x000)
+#define LS7A_PMCON_RESUME_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x004)
+#define LS7A_PMCON_RTC_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x008)
+#define LS7A_PM1_EVT_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x00c)
+#define LS7A_PM1_ENA_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x010)
+#define LS7A_PM1_CNT_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x014)
+#define LS7A_PM1_TMR_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x018)
+#define LS7A_P_CNT_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x01c)
+#define LS7A_GPE0_STS_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x028)
+#define LS7A_GPE0_ENA_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x02c)
+#define LS7A_RST_CNT_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x030)
+#define LS7A_WD_SET_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x034)
+#define LS7A_WD_TIMER_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x038)
+#define LS7A_THSENS_CNT_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x04c)
+#define LS7A_GEN_RTC_1_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x050)
+#define LS7A_GEN_RTC_2_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x054)
+#define LS7A_DPM_CFG_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x400)
+#define LS7A_DPM_STS_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x404)
+#define LS7A_DPM_CNT_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x408)
+
+typedef enum {
+ ACPI_PCI_HOTPLUG_STATUS = 1 << 1,
+ ACPI_CPU_HOTPLUG_STATUS = 1 << 2,
+ ACPI_MEM_HOTPLUG_STATUS = 1 << 3,
+ ACPI_POWERBUTTON_STATUS = 1 << 8,
+ ACPI_RTC_WAKE_STATUS = 1 << 10,
+ ACPI_PCI_WAKE_STATUS = 1 << 14,
+ ACPI_ANY_WAKE_STATUS = 1 << 15,
+} AcpiEventStatusBits;
+
+#define HT1LO_OFFSET 0xe0000000000UL
+
+/* PCI Configuration Space Base */
+#define MCFG_EXT_PCICFG_BASE 0xefe00000000UL
+
+/* REG ACCESS*/
+#define ls7a_readb(addr) (*(volatile unsigned char *)TO_UNCACHE(addr))
+#define ls7a_readw(addr) (*(volatile unsigned short *)TO_UNCACHE(addr))
+#define ls7a_readl(addr) (*(volatile unsigned int *)TO_UNCACHE(addr))
+#define ls7a_readq(addr) (*(volatile unsigned long *)TO_UNCACHE(addr))
+#define ls7a_writeb(val, addr) *(volatile unsigned char *)TO_UNCACHE(addr) = (val)
+#define ls7a_writew(val, addr) *(volatile unsigned short *)TO_UNCACHE(addr) = (val)
+#define ls7a_writel(val, addr) *(volatile unsigned int *)TO_UNCACHE(addr) = (val)
+#define ls7a_writeq(val, addr) *(volatile unsigned long *)TO_UNCACHE(addr) = (val)
+
+#endif /* __ASM_LOONGSON_H */
diff --git a/arch/loongarch/include/asm/mmu.h b/arch/loongarch/include/asm/mmu.h
new file mode 100644
index 000000000000..0cc2d0803537
--- /dev/null
+++ b/arch/loongarch/include/asm/mmu.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef __ASM_MMU_H
+#define __ASM_MMU_H
+
+#include <linux/atomic.h>
+#include <linux/spinlock.h>
+
+typedef struct {
+ u64 asid[NR_CPUS];
+ void *vdso;
+} mm_context_t;
+
+#endif /* __ASM_MMU_H */
diff --git a/arch/loongarch/include/asm/mmu_context.h b/arch/loongarch/include/asm/mmu_context.h
new file mode 100644
index 000000000000..9f97c3453b9c
--- /dev/null
+++ b/arch/loongarch/include/asm/mmu_context.h
@@ -0,0 +1,152 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Switch a MMU context.
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_MMU_CONTEXT_H
+#define _ASM_MMU_CONTEXT_H
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/mm_types.h>
+#include <linux/smp.h>
+#include <linux/slab.h>
+
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+#include <asm-generic/mm_hooks.h>
+
+/*
+ * All unused by hardware upper bits will be considered
+ * as a software asid extension.
+ */
+static inline u64 asid_version_mask(unsigned int cpu)
+{
+ return ~(u64)(cpu_asid_mask(&cpu_data[cpu]));
+}
+
+static inline u64 asid_first_version(unsigned int cpu)
+{
+ return cpu_asid_mask(&cpu_data[cpu]) + 1;
+}
+
+#define cpu_context(cpu, mm) ((mm)->context.asid[cpu])
+#define asid_cache(cpu) (cpu_data[cpu].asid_cache)
+#define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & cpu_asid_mask(&cpu_data[cpu]))
+
+static inline int asid_valid(struct mm_struct *mm, unsigned int cpu)
+{
+ if ((cpu_context(cpu, mm) ^ asid_cache(cpu)) & asid_version_mask(cpu))
+ return 0;
+
+ return 1;
+}
+
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
+{
+}
+
+/* Normal, classic get_new_mmu_context */
+static inline void
+get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
+{
+ u64 asid = asid_cache(cpu);
+
+ if (!((++asid) & cpu_asid_mask(&cpu_data[cpu])))
+ local_flush_tlb_user(); /* start new asid cycle */
+
+ cpu_context(cpu, mm) = asid_cache(cpu) = asid;
+}
+
+/*
+ * Initialize the context related info for a new mm_struct
+ * instance.
+ */
+static inline int
+init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+{
+ int i;
+
+ for_each_possible_cpu(i)
+ cpu_context(i, mm) = 0;
+
+ return 0;
+}
+
+static inline void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *tsk)
+{
+ unsigned int cpu = smp_processor_id();
+
+ /* Check if our ASID is of an older version and thus invalid */
+ if (!asid_valid(next, cpu))
+ get_new_mmu_context(next, cpu);
+
+ write_csr_asid(cpu_asid(cpu, next));
+
+ if (next != &init_mm)
+ csr_write64((unsigned long)next->pgd, LOONGARCH_CSR_PGDL);
+ else
+ csr_write64((unsigned long)invalid_pg_dir, LOONGARCH_CSR_PGDL);
+
+ /*
+ * Mark current->active_mm as not "active" anymore.
+ * We don't want to mislead possible IPI tlb flush routines.
+ */
+ cpumask_set_cpu(cpu, mm_cpumask(next));
+}
+
+#define switch_mm_irqs_off switch_mm_irqs_off
+
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *tsk)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ switch_mm_irqs_off(prev, next, tsk);
+ local_irq_restore(flags);
+}
+
+/*
+ * Destroy context related info for an mm_struct that is about
+ * to be put to rest.
+ */
+static inline void destroy_context(struct mm_struct *mm)
+{
+}
+
+#define activate_mm(prev, next) switch_mm(prev, next, current)
+#define deactivate_mm(task, mm) do { } while (0)
+
+/*
+ * If mm is currently active, we can't really drop it.
+ * Instead, we will get a new one for it.
+ */
+static inline void
+drop_mmu_context(struct mm_struct *mm, unsigned int cpu)
+{
+ int asid;
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ asid = read_csr_asid() & cpu_asid_mask(&current_cpu_data);
+
+ if (asid == cpu_asid(cpu, mm)) {
+ if (!current->mm || (current->mm == mm)) {
+ get_new_mmu_context(mm, cpu);
+ write_csr_asid(cpu_asid(cpu, mm));
+ goto out;
+ }
+ }
+
+ /* Will get a new context next time */
+ cpu_context(cpu, mm) = 0;
+ cpumask_clear_cpu(cpu, mm_cpumask(mm));
+out:
+ local_irq_restore(flags);
+}
+
+#endif /* _ASM_MMU_CONTEXT_H */
diff --git a/arch/loongarch/include/asm/mmzone.h b/arch/loongarch/include/asm/mmzone.h
new file mode 100644
index 000000000000..fe67d0b4b33d
--- /dev/null
+++ b/arch/loongarch/include/asm/mmzone.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Author: Huacai Chen (chenhuacai@loongson.cn)
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_MMZONE_H_
+#define _ASM_MMZONE_H_
+
+#include <asm/page.h>
+#include <asm/numa.h>
+
+extern struct pglist_data *node_data[];
+
+#define NODE_DATA(nid) (node_data[(nid)])
+
+extern void setup_zero_pages(void);
+
+#endif /* _ASM_MMZONE_H_ */
diff --git a/arch/loongarch/include/asm/module.h b/arch/loongarch/include/asm/module.h
new file mode 100644
index 000000000000..9f6718df1854
--- /dev/null
+++ b/arch/loongarch/include/asm/module.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_MODULE_H
+#define _ASM_MODULE_H
+
+#include <asm/inst.h>
+#include <asm-generic/module.h>
+
+#define RELA_STACK_DEPTH 16
+
+struct mod_section {
+ Elf_Shdr *shdr;
+ int num_entries;
+ int max_entries;
+};
+
+struct mod_arch_specific {
+ struct mod_section plt;
+ struct mod_section plt_idx;
+};
+
+struct plt_entry {
+ u32 inst_lu12iw;
+ u32 inst_lu32id;
+ u32 inst_lu52id;
+ u32 inst_jirl;
+};
+
+struct plt_idx_entry {
+ unsigned long symbol_addr;
+};
+
+Elf_Addr module_emit_plt_entry(struct module *mod, unsigned long val);
+
+static inline struct plt_entry emit_plt_entry(unsigned long val)
+{
+ u32 lu12iw, lu32id, lu52id, jirl;
+
+ lu12iw = (lu12iw_op << 25 | (((val >> 12) & 0xfffff) << 5) | LOONGARCH_GPR_T1);
+ lu32id = larch_insn_gen_lu32id(LOONGARCH_GPR_T1, ADDR_IMM(val, LU32ID));
+ lu52id = larch_insn_gen_lu52id(LOONGARCH_GPR_T1, LOONGARCH_GPR_T1, ADDR_IMM(val, LU52ID));
+ jirl = larch_insn_gen_jirl(0, LOONGARCH_GPR_T1, 0, (val & 0xfff));
+
+ return (struct plt_entry) { lu12iw, lu32id, lu52id, jirl };
+}
+
+static inline struct plt_idx_entry emit_plt_idx_entry(unsigned long val)
+{
+ return (struct plt_idx_entry) { val };
+}
+
+static inline int get_plt_idx(unsigned long val, const struct mod_section *sec)
+{
+ int i;
+ struct plt_idx_entry *plt_idx = (struct plt_idx_entry *)sec->shdr->sh_addr;
+
+ for (i = 0; i < sec->num_entries; i++) {
+ if (plt_idx[i].symbol_addr == val)
+ return i;
+ }
+
+ return -1;
+}
+
+static inline struct plt_entry *get_plt_entry(unsigned long val,
+ const struct mod_section *sec_plt,
+ const struct mod_section *sec_plt_idx)
+{
+ int plt_idx = get_plt_idx(val, sec_plt_idx);
+ struct plt_entry *plt = (struct plt_entry *)sec_plt->shdr->sh_addr;
+
+ if (plt_idx < 0)
+ return NULL;
+
+ return plt + plt_idx;
+}
+
+#endif /* _ASM_MODULE_H */
diff --git a/arch/loongarch/include/asm/module.lds.h b/arch/loongarch/include/asm/module.lds.h
new file mode 100644
index 000000000000..31c1c0db11a3
--- /dev/null
+++ b/arch/loongarch/include/asm/module.lds.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2020-2022 Loongson Technology Corporation Limited */
+SECTIONS {
+ . = ALIGN(4);
+ .plt : { BYTE(0) }
+ .plt.idx : { BYTE(0) }
+}
diff --git a/arch/loongarch/include/asm/numa.h b/arch/loongarch/include/asm/numa.h
new file mode 100644
index 000000000000..27f319b49862
--- /dev/null
+++ b/arch/loongarch/include/asm/numa.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Author: Jianmin Lv <lvjianmin@loongson.cn>
+ * Huacai Chen <chenhuacai@loongson.cn>
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#ifndef _ASM_LOONGARCH_NUMA_H
+#define _ASM_LOONGARCH_NUMA_H
+
+#include <linux/nodemask.h>
+
+#define NODE_ADDRSPACE_SHIFT 44
+
+#define pa_to_nid(addr) (((addr) & 0xf00000000000) >> NODE_ADDRSPACE_SHIFT)
+#define nid_to_addrbase(nid) (_ULCAST_(nid) << NODE_ADDRSPACE_SHIFT)
+
+#ifdef CONFIG_NUMA
+
+extern int numa_off;
+extern s16 __cpuid_to_node[CONFIG_NR_CPUS];
+extern nodemask_t numa_nodes_parsed __initdata;
+
+struct numa_memblk {
+ u64 start;
+ u64 end;
+ int nid;
+};
+
+#define NR_NODE_MEMBLKS (MAX_NUMNODES*2)
+struct numa_meminfo {
+ int nr_blks;
+ struct numa_memblk blk[NR_NODE_MEMBLKS];
+};
+
+extern int __init numa_add_memblk(int nodeid, u64 start, u64 end);
+
+extern void __init early_numa_add_cpu(int cpuid, s16 node);
+extern void numa_add_cpu(unsigned int cpu);
+extern void numa_remove_cpu(unsigned int cpu);
+
+static inline void numa_clear_node(int cpu)
+{
+}
+
+static inline void set_cpuid_to_node(int cpuid, s16 node)
+{
+ __cpuid_to_node[cpuid] = node;
+}
+
+extern int early_cpu_to_node(int cpu);
+
+#else
+
+static inline void early_numa_add_cpu(int cpuid, s16 node) { }
+static inline void numa_add_cpu(unsigned int cpu) { }
+static inline void numa_remove_cpu(unsigned int cpu) { }
+
+static inline int early_cpu_to_node(int cpu)
+{
+ return 0;
+}
+
+#endif /* CONFIG_NUMA */
+
+#endif /* _ASM_LOONGARCH_NUMA_H */
diff --git a/arch/loongarch/include/asm/page.h b/arch/loongarch/include/asm/page.h
new file mode 100644
index 000000000000..3dba4986f6c9
--- /dev/null
+++ b/arch/loongarch/include/asm/page.h
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_PAGE_H
+#define _ASM_PAGE_H
+
+#include <linux/const.h>
+
+/*
+ * PAGE_SHIFT determines the page size
+ */
+#ifdef CONFIG_PAGE_SIZE_4KB
+#define PAGE_SHIFT 12
+#endif
+#ifdef CONFIG_PAGE_SIZE_16KB
+#define PAGE_SHIFT 14
+#endif
+#ifdef CONFIG_PAGE_SIZE_64KB
+#define PAGE_SHIFT 16
+#endif
+#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
+#define PAGE_MASK (~(PAGE_SIZE - 1))
+
+#define HPAGE_SHIFT (PAGE_SHIFT + PAGE_SHIFT - 3)
+#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
+#define HPAGE_MASK (~(HPAGE_SIZE - 1))
+#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
+
+#ifndef __ASSEMBLY__
+
+#include <linux/kernel.h>
+#include <linux/pfn.h>
+
+#define MAX_DMA32_PFN (1UL << (32 - PAGE_SHIFT))
+
+/*
+ * It's normally defined only for FLATMEM config but it's
+ * used in our early mem init code for all memory models.
+ * So always define it.
+ */
+#define ARCH_PFN_OFFSET PFN_UP(PHYS_OFFSET)
+
+extern void clear_page(void *page);
+extern void copy_page(void *to, void *from);
+
+#define clear_user_page(page, vaddr, pg) clear_page(page)
+#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
+
+extern unsigned long shm_align_mask;
+
+struct page;
+struct vm_area_struct;
+void copy_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma);
+
+#define __HAVE_ARCH_COPY_USER_HIGHPAGE
+
+typedef struct { unsigned long pte; } pte_t;
+#define pte_val(x) ((x).pte)
+#define __pte(x) ((pte_t) { (x) })
+typedef struct page *pgtable_t;
+
+typedef struct { unsigned long pgd; } pgd_t;
+#define pgd_val(x) ((x).pgd)
+#define __pgd(x) ((pgd_t) { (x) })
+
+/*
+ * Manipulate page protection bits
+ */
+typedef struct { unsigned long pgprot; } pgprot_t;
+#define pgprot_val(x) ((x).pgprot)
+#define __pgprot(x) ((pgprot_t) { (x) })
+#define pte_pgprot(x) __pgprot(pte_val(x) & ~_PFN_MASK)
+
+#define ptep_buddy(x) ((pte_t *)((unsigned long)(x) ^ sizeof(pte_t)))
+
+/*
+ * __pa()/__va() should be used only during mem init.
+ */
+#define __pa(x) PHYSADDR(x)
+#define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET))
+
+#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
+
+#ifdef CONFIG_FLATMEM
+
+static inline int pfn_valid(unsigned long pfn)
+{
+ /* avoid <linux/mm.h> include hell */
+ extern unsigned long max_mapnr;
+ unsigned long pfn_offset = ARCH_PFN_OFFSET;
+
+ return pfn >= pfn_offset && pfn < max_mapnr;
+}
+
+#endif
+
+#define virt_to_pfn(kaddr) PFN_DOWN(virt_to_phys((void *)(kaddr)))
+#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
+
+extern int __virt_addr_valid(volatile void *kaddr);
+#define virt_addr_valid(kaddr) __virt_addr_valid((volatile void *)(kaddr))
+
+#define VM_DATA_DEFAULT_FLAGS \
+ (VM_READ | VM_WRITE | \
+ ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+#include <asm-generic/memory_model.h>
+#include <asm-generic/getorder.h>
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_PAGE_H */
diff --git a/arch/loongarch/include/asm/percpu.h b/arch/loongarch/include/asm/percpu.h
new file mode 100644
index 000000000000..34f15a6fb1e7
--- /dev/null
+++ b/arch/loongarch/include/asm/percpu.h
@@ -0,0 +1,214 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef __ASM_PERCPU_H
+#define __ASM_PERCPU_H
+
+#include <asm/cmpxchg.h>
+
+/* Use r21 for fast access */
+register unsigned long __my_cpu_offset __asm__("$r21");
+
+static inline void set_my_cpu_offset(unsigned long off)
+{
+ __my_cpu_offset = off;
+ csr_write64(off, PERCPU_BASE_KS);
+}
+#define __my_cpu_offset __my_cpu_offset
+
+#define PERCPU_OP(op, asm_op, c_op) \
+static inline unsigned long __percpu_##op(void *ptr, \
+ unsigned long val, int size) \
+{ \
+ unsigned long ret; \
+ \
+ switch (size) { \
+ case 4: \
+ __asm__ __volatile__( \
+ "am"#asm_op".w" " %[ret], %[val], %[ptr] \n" \
+ : [ret] "=&r" (ret), [ptr] "+ZB"(*(u32 *)ptr) \
+ : [val] "r" (val)); \
+ break; \
+ case 8: \
+ __asm__ __volatile__( \
+ "am"#asm_op".d" " %[ret], %[val], %[ptr] \n" \
+ : [ret] "=&r" (ret), [ptr] "+ZB"(*(u64 *)ptr) \
+ : [val] "r" (val)); \
+ break; \
+ default: \
+ ret = 0; \
+ BUILD_BUG(); \
+ } \
+ \
+ return ret c_op val; \
+}
+
+PERCPU_OP(add, add, +)
+PERCPU_OP(and, and, &)
+PERCPU_OP(or, or, |)
+#undef PERCPU_OP
+
+static inline unsigned long __percpu_read(void *ptr, int size)
+{
+ unsigned long ret;
+
+ switch (size) {
+ case 1:
+ __asm__ __volatile__ ("ldx.b %[ret], $r21, %[ptr] \n"
+ : [ret] "=&r"(ret)
+ : [ptr] "r"(ptr)
+ : "memory");
+ break;
+ case 2:
+ __asm__ __volatile__ ("ldx.h %[ret], $r21, %[ptr] \n"
+ : [ret] "=&r"(ret)
+ : [ptr] "r"(ptr)
+ : "memory");
+ break;
+ case 4:
+ __asm__ __volatile__ ("ldx.w %[ret], $r21, %[ptr] \n"
+ : [ret] "=&r"(ret)
+ : [ptr] "r"(ptr)
+ : "memory");
+ break;
+ case 8:
+ __asm__ __volatile__ ("ldx.d %[ret], $r21, %[ptr] \n"
+ : [ret] "=&r"(ret)
+ : [ptr] "r"(ptr)
+ : "memory");
+ break;
+ default:
+ ret = 0;
+ BUILD_BUG();
+ }
+
+ return ret;
+}
+
+static inline void __percpu_write(void *ptr, unsigned long val, int size)
+{
+ switch (size) {
+ case 1:
+ __asm__ __volatile__("stx.b %[val], $r21, %[ptr] \n"
+ :
+ : [val] "r" (val), [ptr] "r" (ptr)
+ : "memory");
+ break;
+ case 2:
+ __asm__ __volatile__("stx.h %[val], $r21, %[ptr] \n"
+ :
+ : [val] "r" (val), [ptr] "r" (ptr)
+ : "memory");
+ break;
+ case 4:
+ __asm__ __volatile__("stx.w %[val], $r21, %[ptr] \n"
+ :
+ : [val] "r" (val), [ptr] "r" (ptr)
+ : "memory");
+ break;
+ case 8:
+ __asm__ __volatile__("stx.d %[val], $r21, %[ptr] \n"
+ :
+ : [val] "r" (val), [ptr] "r" (ptr)
+ : "memory");
+ break;
+ default:
+ BUILD_BUG();
+ }
+}
+
+static inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
+ int size)
+{
+ switch (size) {
+ case 4:
+ return __xchg_asm("amswap.w", (volatile u32 *)ptr, (u32)val);
+
+ case 8:
+ return __xchg_asm("amswap.d", (volatile u64 *)ptr, (u64)val);
+
+ default:
+ BUILD_BUG();
+ }
+
+ return 0;
+}
+
+/* this_cpu_cmpxchg */
+#define _protect_cmpxchg_local(pcp, o, n) \
+({ \
+ typeof(*raw_cpu_ptr(&(pcp))) __ret; \
+ preempt_disable_notrace(); \
+ __ret = cmpxchg_local(raw_cpu_ptr(&(pcp)), o, n); \
+ preempt_enable_notrace(); \
+ __ret; \
+})
+
+#define _percpu_read(pcp) \
+({ \
+ typeof(pcp) __retval; \
+ __retval = (typeof(pcp))__percpu_read(&(pcp), sizeof(pcp)); \
+ __retval; \
+})
+
+#define _percpu_write(pcp, val) \
+do { \
+ __percpu_write(&(pcp), (unsigned long)(val), sizeof(pcp)); \
+} while (0) \
+
+#define _pcp_protect(operation, pcp, val) \
+({ \
+ typeof(pcp) __retval; \
+ preempt_disable_notrace(); \
+ __retval = (typeof(pcp))operation(raw_cpu_ptr(&(pcp)), \
+ (val), sizeof(pcp)); \
+ preempt_enable_notrace(); \
+ __retval; \
+})
+
+#define _percpu_add(pcp, val) \
+ _pcp_protect(__percpu_add, pcp, val)
+
+#define _percpu_add_return(pcp, val) _percpu_add(pcp, val)
+
+#define _percpu_and(pcp, val) \
+ _pcp_protect(__percpu_and, pcp, val)
+
+#define _percpu_or(pcp, val) \
+ _pcp_protect(__percpu_or, pcp, val)
+
+#define _percpu_xchg(pcp, val) ((typeof(pcp)) \
+ _pcp_protect(__percpu_xchg, pcp, (unsigned long)(val)))
+
+#define this_cpu_add_4(pcp, val) _percpu_add(pcp, val)
+#define this_cpu_add_8(pcp, val) _percpu_add(pcp, val)
+
+#define this_cpu_add_return_4(pcp, val) _percpu_add_return(pcp, val)
+#define this_cpu_add_return_8(pcp, val) _percpu_add_return(pcp, val)
+
+#define this_cpu_and_4(pcp, val) _percpu_and(pcp, val)
+#define this_cpu_and_8(pcp, val) _percpu_and(pcp, val)
+
+#define this_cpu_or_4(pcp, val) _percpu_or(pcp, val)
+#define this_cpu_or_8(pcp, val) _percpu_or(pcp, val)
+
+#define this_cpu_read_1(pcp) _percpu_read(pcp)
+#define this_cpu_read_2(pcp) _percpu_read(pcp)
+#define this_cpu_read_4(pcp) _percpu_read(pcp)
+#define this_cpu_read_8(pcp) _percpu_read(pcp)
+
+#define this_cpu_write_1(pcp, val) _percpu_write(pcp, val)
+#define this_cpu_write_2(pcp, val) _percpu_write(pcp, val)
+#define this_cpu_write_4(pcp, val) _percpu_write(pcp, val)
+#define this_cpu_write_8(pcp, val) _percpu_write(pcp, val)
+
+#define this_cpu_xchg_4(pcp, val) _percpu_xchg(pcp, val)
+#define this_cpu_xchg_8(pcp, val) _percpu_xchg(pcp, val)
+
+#define this_cpu_cmpxchg_4(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
+#define this_cpu_cmpxchg_8(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
+
+#include <asm-generic/percpu.h>
+
+#endif /* __ASM_PERCPU_H */
diff --git a/arch/loongarch/include/asm/perf_event.h b/arch/loongarch/include/asm/perf_event.h
new file mode 100644
index 000000000000..dcb3b17053a8
--- /dev/null
+++ b/arch/loongarch/include/asm/perf_event.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Author: Huacai Chen <chenhuacai@loongson.cn>
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#ifndef __LOONGARCH_PERF_EVENT_H__
+#define __LOONGARCH_PERF_EVENT_H__
+/* Nothing to show here; the file is required by linux/perf_event.h. */
+#endif /* __LOONGARCH_PERF_EVENT_H__ */
diff --git a/arch/loongarch/include/asm/pgalloc.h b/arch/loongarch/include/asm/pgalloc.h
new file mode 100644
index 000000000000..b0a57b25c131
--- /dev/null
+++ b/arch/loongarch/include/asm/pgalloc.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_PGALLOC_H
+#define _ASM_PGALLOC_H
+
+#include <linux/mm.h>
+#include <linux/sched.h>
+
+#define __HAVE_ARCH_PMD_ALLOC_ONE
+#define __HAVE_ARCH_PUD_ALLOC_ONE
+#include <asm-generic/pgalloc.h>
+
+static inline void pmd_populate_kernel(struct mm_struct *mm,
+ pmd_t *pmd, pte_t *pte)
+{
+ set_pmd(pmd, __pmd((unsigned long)pte));
+}
+
+static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte)
+{
+ set_pmd(pmd, __pmd((unsigned long)page_address(pte)));
+}
+
+#ifndef __PAGETABLE_PMD_FOLDED
+
+static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+{
+ set_pud(pud, __pud((unsigned long)pmd));
+}
+#endif
+
+#ifndef __PAGETABLE_PUD_FOLDED
+
+static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
+{
+ set_p4d(p4d, __p4d((unsigned long)pud));
+}
+
+#endif /* __PAGETABLE_PUD_FOLDED */
+
+extern void pagetable_init(void);
+
+/*
+ * Initialize a new pmd table with invalid pointers.
+ */
+extern void pmd_init(unsigned long page, unsigned long pagetable);
+
+/*
+ * Initialize a new pgd / pmd table with invalid pointers.
+ */
+extern void pgd_init(unsigned long page);
+extern pgd_t *pgd_alloc(struct mm_struct *mm);
+
+#define __pte_free_tlb(tlb, pte, address) \
+do { \
+ pgtable_pte_page_dtor(pte); \
+ tlb_remove_page((tlb), pte); \
+} while (0)
+
+#ifndef __PAGETABLE_PMD_FOLDED
+
+static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
+{
+ pmd_t *pmd;
+ struct page *pg;
+
+ pg = alloc_pages(GFP_KERNEL_ACCOUNT, PMD_ORDER);
+ if (!pg)
+ return NULL;
+
+ if (!pgtable_pmd_page_ctor(pg)) {
+ __free_pages(pg, PMD_ORDER);
+ return NULL;
+ }
+
+ pmd = (pmd_t *)page_address(pg);
+ pmd_init((unsigned long)pmd, (unsigned long)invalid_pte_table);
+ return pmd;
+}
+
+#define __pmd_free_tlb(tlb, x, addr) pmd_free((tlb)->mm, x)
+
+#endif
+
+#ifndef __PAGETABLE_PUD_FOLDED
+
+static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
+{
+ pud_t *pud;
+
+ pud = (pud_t *) __get_free_pages(GFP_KERNEL, PUD_ORDER);
+ if (pud)
+ pud_init((unsigned long)pud, (unsigned long)invalid_pmd_table);
+ return pud;
+}
+
+#define __pud_free_tlb(tlb, x, addr) pud_free((tlb)->mm, x)
+
+#endif /* __PAGETABLE_PUD_FOLDED */
+
+#endif /* _ASM_PGALLOC_H */
diff --git a/arch/loongarch/include/asm/pgtable-bits.h b/arch/loongarch/include/asm/pgtable-bits.h
new file mode 100644
index 000000000000..3badd112d9ab
--- /dev/null
+++ b/arch/loongarch/include/asm/pgtable-bits.h
@@ -0,0 +1,131 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_PGTABLE_BITS_H
+#define _ASM_PGTABLE_BITS_H
+
+/* Page table bits */
+#define _PAGE_VALID_SHIFT 0
+#define _PAGE_ACCESSED_SHIFT 0 /* Reuse Valid for Accessed */
+#define _PAGE_DIRTY_SHIFT 1
+#define _PAGE_PLV_SHIFT 2 /* 2~3, two bits */
+#define _CACHE_SHIFT 4 /* 4~5, two bits */
+#define _PAGE_GLOBAL_SHIFT 6
+#define _PAGE_HUGE_SHIFT 6 /* HUGE is a PMD bit */
+#define _PAGE_PRESENT_SHIFT 7
+#define _PAGE_WRITE_SHIFT 8
+#define _PAGE_MODIFIED_SHIFT 9
+#define _PAGE_PROTNONE_SHIFT 10
+#define _PAGE_SPECIAL_SHIFT 11
+#define _PAGE_HGLOBAL_SHIFT 12 /* HGlobal is a PMD bit */
+#define _PAGE_PFN_SHIFT 12
+#define _PAGE_PFN_END_SHIFT 48
+#define _PAGE_NO_READ_SHIFT 61
+#define _PAGE_NO_EXEC_SHIFT 62
+#define _PAGE_RPLV_SHIFT 63
+
+/* Used by software */
+#define _PAGE_PRESENT (_ULCAST_(1) << _PAGE_PRESENT_SHIFT)
+#define _PAGE_WRITE (_ULCAST_(1) << _PAGE_WRITE_SHIFT)
+#define _PAGE_ACCESSED (_ULCAST_(1) << _PAGE_ACCESSED_SHIFT)
+#define _PAGE_MODIFIED (_ULCAST_(1) << _PAGE_MODIFIED_SHIFT)
+#define _PAGE_PROTNONE (_ULCAST_(1) << _PAGE_PROTNONE_SHIFT)
+#define _PAGE_SPECIAL (_ULCAST_(1) << _PAGE_SPECIAL_SHIFT)
+
+/* Used by TLB hardware (placed in EntryLo*) */
+#define _PAGE_VALID (_ULCAST_(1) << _PAGE_VALID_SHIFT)
+#define _PAGE_DIRTY (_ULCAST_(1) << _PAGE_DIRTY_SHIFT)
+#define _PAGE_PLV (_ULCAST_(3) << _PAGE_PLV_SHIFT)
+#define _PAGE_GLOBAL (_ULCAST_(1) << _PAGE_GLOBAL_SHIFT)
+#define _PAGE_HUGE (_ULCAST_(1) << _PAGE_HUGE_SHIFT)
+#define _PAGE_HGLOBAL (_ULCAST_(1) << _PAGE_HGLOBAL_SHIFT)
+#define _PAGE_NO_READ (_ULCAST_(1) << _PAGE_NO_READ_SHIFT)
+#define _PAGE_NO_EXEC (_ULCAST_(1) << _PAGE_NO_EXEC_SHIFT)
+#define _PAGE_RPLV (_ULCAST_(1) << _PAGE_RPLV_SHIFT)
+#define _CACHE_MASK (_ULCAST_(3) << _CACHE_SHIFT)
+#define _PFN_SHIFT (PAGE_SHIFT - 12 + _PAGE_PFN_SHIFT)
+
+#define _PAGE_USER (PLV_USER << _PAGE_PLV_SHIFT)
+#define _PAGE_KERN (PLV_KERN << _PAGE_PLV_SHIFT)
+
+#define _PFN_MASK (~((_ULCAST_(1) << (_PFN_SHIFT)) - 1) & \
+ ((_ULCAST_(1) << (_PAGE_PFN_END_SHIFT)) - 1))
+
+/*
+ * Cache attributes
+ */
+#ifndef _CACHE_SUC
+#define _CACHE_SUC (0<<_CACHE_SHIFT) /* Strong-ordered UnCached */
+#endif
+#ifndef _CACHE_CC
+#define _CACHE_CC (1<<_CACHE_SHIFT) /* Coherent Cached */
+#endif
+#ifndef _CACHE_WUC
+#define _CACHE_WUC (2<<_CACHE_SHIFT) /* Weak-ordered UnCached */
+#endif
+
+#define __READABLE (_PAGE_VALID)
+#define __WRITEABLE (_PAGE_DIRTY | _PAGE_WRITE)
+
+#define _PAGE_CHG_MASK (_PAGE_MODIFIED | _PAGE_SPECIAL | _PFN_MASK | _CACHE_MASK | _PAGE_PLV)
+#define _HPAGE_CHG_MASK (_PAGE_MODIFIED | _PAGE_SPECIAL | _PFN_MASK | _CACHE_MASK | _PAGE_PLV | _PAGE_HUGE)
+
+#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_NO_READ | \
+ _PAGE_USER | _CACHE_CC)
+#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_WRITE | \
+ _PAGE_USER | _CACHE_CC)
+#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _CACHE_CC)
+
+#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
+ _PAGE_GLOBAL | _PAGE_KERN | _CACHE_CC)
+#define PAGE_KERNEL_SUC __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
+ _PAGE_GLOBAL | _PAGE_KERN | _CACHE_SUC)
+#define PAGE_KERNEL_WUC __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
+ _PAGE_GLOBAL | _PAGE_KERN | _CACHE_WUC)
+
+#define __P000 __pgprot(_CACHE_CC | _PAGE_USER | _PAGE_PROTNONE | _PAGE_NO_EXEC | _PAGE_NO_READ)
+#define __P001 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT | _PAGE_NO_EXEC)
+#define __P010 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT | _PAGE_NO_EXEC)
+#define __P011 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT | _PAGE_NO_EXEC)
+#define __P100 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT)
+#define __P101 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT)
+#define __P110 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT)
+#define __P111 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT)
+
+#define __S000 __pgprot(_CACHE_CC | _PAGE_USER | _PAGE_PROTNONE | _PAGE_NO_EXEC | _PAGE_NO_READ)
+#define __S001 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT | _PAGE_NO_EXEC)
+#define __S010 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE)
+#define __S011 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE)
+#define __S100 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT)
+#define __S101 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT)
+#define __S110 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT | _PAGE_WRITE)
+#define __S111 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT | _PAGE_WRITE)
+
+#ifndef __ASSEMBLY__
+
+#define pgprot_noncached pgprot_noncached
+
+static inline pgprot_t pgprot_noncached(pgprot_t _prot)
+{
+ unsigned long prot = pgprot_val(_prot);
+
+ prot = (prot & ~_CACHE_MASK) | _CACHE_SUC;
+
+ return __pgprot(prot);
+}
+
+#define pgprot_writecombine pgprot_writecombine
+
+static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
+{
+ unsigned long prot = pgprot_val(_prot);
+
+ prot = (prot & ~_CACHE_MASK) | _CACHE_WUC;
+
+ return __pgprot(prot);
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_PGTABLE_BITS_H */
diff --git a/arch/loongarch/include/asm/pgtable.h b/arch/loongarch/include/asm/pgtable.h
new file mode 100644
index 000000000000..5dc84d8f18d6
--- /dev/null
+++ b/arch/loongarch/include/asm/pgtable.h
@@ -0,0 +1,565 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ *
+ * Derived from MIPS:
+ * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 2003 Ralf Baechle
+ * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
+ */
+#ifndef _ASM_PGTABLE_H
+#define _ASM_PGTABLE_H
+
+#include <linux/compiler.h>
+#include <asm/addrspace.h>
+#include <asm/pgtable-bits.h>
+
+#if CONFIG_PGTABLE_LEVELS == 2
+#include <asm-generic/pgtable-nopmd.h>
+#elif CONFIG_PGTABLE_LEVELS == 3
+#include <asm-generic/pgtable-nopud.h>
+#else
+#include <asm-generic/pgtable-nop4d.h>
+#endif
+
+#define PGD_ORDER 0
+#define PUD_ORDER 0
+#define PMD_ORDER 0
+#define PTE_ORDER 0
+
+#if CONFIG_PGTABLE_LEVELS == 2
+#define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT + PTE_ORDER - 3))
+#elif CONFIG_PGTABLE_LEVELS == 3
+#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT + PTE_ORDER - 3))
+#define PMD_SIZE (1UL << PMD_SHIFT)
+#define PMD_MASK (~(PMD_SIZE-1))
+#define PGDIR_SHIFT (PMD_SHIFT + (PAGE_SHIFT + PMD_ORDER - 3))
+#elif CONFIG_PGTABLE_LEVELS == 4
+#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT + PTE_ORDER - 3))
+#define PMD_SIZE (1UL << PMD_SHIFT)
+#define PMD_MASK (~(PMD_SIZE-1))
+#define PUD_SHIFT (PMD_SHIFT + (PAGE_SHIFT + PMD_ORDER - 3))
+#define PUD_SIZE (1UL << PUD_SHIFT)
+#define PUD_MASK (~(PUD_SIZE-1))
+#define PGDIR_SHIFT (PUD_SHIFT + (PAGE_SHIFT + PUD_ORDER - 3))
+#endif
+
+#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE-1))
+
+#define VA_BITS (PGDIR_SHIFT + (PAGE_SHIFT + PGD_ORDER - 3))
+
+#define PTRS_PER_PGD ((PAGE_SIZE << PGD_ORDER) >> 3)
+#if CONFIG_PGTABLE_LEVELS > 3
+#define PTRS_PER_PUD ((PAGE_SIZE << PUD_ORDER) >> 3)
+#endif
+#if CONFIG_PGTABLE_LEVELS > 2
+#define PTRS_PER_PMD ((PAGE_SIZE << PMD_ORDER) >> 3)
+#endif
+#define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) >> 3)
+
+#define USER_PTRS_PER_PGD ((TASK_SIZE64 / PGDIR_SIZE)?(TASK_SIZE64 / PGDIR_SIZE):1)
+
+#ifndef __ASSEMBLY__
+
+#include <linux/mm_types.h>
+#include <linux/mmzone.h>
+#include <asm/fixmap.h>
+#include <asm/io.h>
+
+struct mm_struct;
+struct vm_area_struct;
+
+/*
+ * ZERO_PAGE is a global shared page that is always zero; used
+ * for zero-mapped memory areas etc..
+ */
+
+extern unsigned long empty_zero_page;
+extern unsigned long zero_page_mask;
+
+#define ZERO_PAGE(vaddr) \
+ (virt_to_page((void *)(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask))))
+#define __HAVE_COLOR_ZERO_PAGE
+
+/*
+ * TLB refill handlers may also map the vmalloc area into xkvrange.
+ * Avoid the first couple of pages so NULL pointer dereferences will
+ * still reliably trap.
+ */
+#define MODULES_VADDR (vm_map_base + PCI_IOSIZE + (2 * PAGE_SIZE))
+#define MODULES_END (MODULES_VADDR + SZ_256M)
+
+#define VMALLOC_START MODULES_END
+#define VMALLOC_END \
+ (vm_map_base + \
+ min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits)) - PMD_SIZE)
+
+#define pte_ERROR(e) \
+ pr_err("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
+#ifndef __PAGETABLE_PMD_FOLDED
+#define pmd_ERROR(e) \
+ pr_err("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
+#endif
+#ifndef __PAGETABLE_PUD_FOLDED
+#define pud_ERROR(e) \
+ pr_err("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e))
+#endif
+#define pgd_ERROR(e) \
+ pr_err("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
+
+extern pte_t invalid_pte_table[PTRS_PER_PTE];
+
+#ifndef __PAGETABLE_PUD_FOLDED
+
+typedef struct { unsigned long pud; } pud_t;
+#define pud_val(x) ((x).pud)
+#define __pud(x) ((pud_t) { (x) })
+
+extern pud_t invalid_pud_table[PTRS_PER_PUD];
+
+/*
+ * Empty pgd/p4d entries point to the invalid_pud_table.
+ */
+static inline int p4d_none(p4d_t p4d)
+{
+ return p4d_val(p4d) == (unsigned long)invalid_pud_table;
+}
+
+static inline int p4d_bad(p4d_t p4d)
+{
+ return p4d_val(p4d) & ~PAGE_MASK;
+}
+
+static inline int p4d_present(p4d_t p4d)
+{
+ return p4d_val(p4d) != (unsigned long)invalid_pud_table;
+}
+
+static inline void p4d_clear(p4d_t *p4dp)
+{
+ p4d_val(*p4dp) = (unsigned long)invalid_pud_table;
+}
+
+static inline pud_t *p4d_pgtable(p4d_t p4d)
+{
+ return (pud_t *)p4d_val(p4d);
+}
+
+static inline void set_p4d(p4d_t *p4d, p4d_t p4dval)
+{
+ *p4d = p4dval;
+}
+
+#define p4d_phys(p4d) virt_to_phys((void *)p4d_val(p4d))
+#define p4d_page(p4d) (pfn_to_page(p4d_phys(p4d) >> PAGE_SHIFT))
+
+#endif
+
+#ifndef __PAGETABLE_PMD_FOLDED
+
+typedef struct { unsigned long pmd; } pmd_t;
+#define pmd_val(x) ((x).pmd)
+#define __pmd(x) ((pmd_t) { (x) })
+
+extern pmd_t invalid_pmd_table[PTRS_PER_PMD];
+
+/*
+ * Empty pud entries point to the invalid_pmd_table.
+ */
+static inline int pud_none(pud_t pud)
+{
+ return pud_val(pud) == (unsigned long)invalid_pmd_table;
+}
+
+static inline int pud_bad(pud_t pud)
+{
+ return pud_val(pud) & ~PAGE_MASK;
+}
+
+static inline int pud_present(pud_t pud)
+{
+ return pud_val(pud) != (unsigned long)invalid_pmd_table;
+}
+
+static inline void pud_clear(pud_t *pudp)
+{
+ pud_val(*pudp) = ((unsigned long)invalid_pmd_table);
+}
+
+static inline pmd_t *pud_pgtable(pud_t pud)
+{
+ return (pmd_t *)pud_val(pud);
+}
+
+#define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while (0)
+
+#define pud_phys(pud) virt_to_phys((void *)pud_val(pud))
+#define pud_page(pud) (pfn_to_page(pud_phys(pud) >> PAGE_SHIFT))
+
+#endif
+
+/*
+ * Empty pmd entries point to the invalid_pte_table.
+ */
+static inline int pmd_none(pmd_t pmd)
+{
+ return pmd_val(pmd) == (unsigned long)invalid_pte_table;
+}
+
+static inline int pmd_bad(pmd_t pmd)
+{
+ return (pmd_val(pmd) & ~PAGE_MASK);
+}
+
+static inline int pmd_present(pmd_t pmd)
+{
+ if (unlikely(pmd_val(pmd) & _PAGE_HUGE))
+ return !!(pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE));
+
+ return pmd_val(pmd) != (unsigned long)invalid_pte_table;
+}
+
+static inline void pmd_clear(pmd_t *pmdp)
+{
+ pmd_val(*pmdp) = ((unsigned long)invalid_pte_table);
+}
+
+#define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while (0)
+
+#define pmd_phys(pmd) virt_to_phys((void *)pmd_val(pmd))
+
+#ifndef CONFIG_TRANSPARENT_HUGEPAGE
+#define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+#define pmd_page_vaddr(pmd) pmd_val(pmd)
+
+extern pmd_t mk_pmd(struct page *page, pgprot_t prot);
+extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd);
+
+#define pte_page(x) pfn_to_page(pte_pfn(x))
+#define pte_pfn(x) ((unsigned long)(((x).pte & _PFN_MASK) >> _PFN_SHIFT))
+#define pfn_pte(pfn, prot) __pte(((pfn) << _PFN_SHIFT) | pgprot_val(prot))
+#define pfn_pmd(pfn, prot) __pmd(((pfn) << _PFN_SHIFT) | pgprot_val(prot))
+
+/*
+ * Initialize a new pgd / pmd table with invalid pointers.
+ */
+extern void pgd_init(unsigned long page);
+extern void pud_init(unsigned long page, unsigned long pagetable);
+extern void pmd_init(unsigned long page, unsigned long pagetable);
+
+/*
+ * Non-present pages: high 40 bits are offset, next 8 bits type,
+ * low 16 bits zero.
+ */
+static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
+{ pte_t pte; pte_val(pte) = (type << 16) | (offset << 24); return pte; }
+
+#define __swp_type(x) (((x).val >> 16) & 0xff)
+#define __swp_offset(x) ((x).val >> 24)
+#define __swp_entry(type, offset) ((swp_entry_t) { pte_val(mk_swap_pte((type), (offset))) })
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+#define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) })
+#define __swp_entry_to_pmd(x) ((pmd_t) { (x).val | _PAGE_HUGE })
+
+extern void paging_init(void);
+
+#define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL))
+#define pte_present(pte) (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROTNONE))
+#define pte_no_exec(pte) (pte_val(pte) & _PAGE_NO_EXEC)
+
+static inline void set_pte(pte_t *ptep, pte_t pteval)
+{
+ *ptep = pteval;
+ if (pte_val(pteval) & _PAGE_GLOBAL) {
+ pte_t *buddy = ptep_buddy(ptep);
+ /*
+ * Make sure the buddy is global too (if it's !none,
+ * it better already be global)
+ */
+#ifdef CONFIG_SMP
+ /*
+ * For SMP, multiple CPUs can race, so we need to do
+ * this atomically.
+ */
+ unsigned long page_global = _PAGE_GLOBAL;
+ unsigned long tmp;
+
+ __asm__ __volatile__ (
+ "1:" __LL "%[tmp], %[buddy] \n"
+ " bnez %[tmp], 2f \n"
+ " or %[tmp], %[tmp], %[global] \n"
+ __SC "%[tmp], %[buddy] \n"
+ " beqz %[tmp], 1b \n"
+ " nop \n"
+ "2: \n"
+ __WEAK_LLSC_MB
+ : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp)
+ : [global] "r" (page_global));
+#else /* !CONFIG_SMP */
+ if (pte_none(*buddy))
+ pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
+#endif /* CONFIG_SMP */
+ }
+}
+
+static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pteval)
+{
+ set_pte(ptep, pteval);
+}
+
+static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+{
+ /* Preserve global status for the pair */
+ if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL)
+ set_pte_at(mm, addr, ptep, __pte(_PAGE_GLOBAL));
+ else
+ set_pte_at(mm, addr, ptep, __pte(0));
+}
+
+#define PGD_T_LOG2 (__builtin_ffs(sizeof(pgd_t)) - 1)
+#define PMD_T_LOG2 (__builtin_ffs(sizeof(pmd_t)) - 1)
+#define PTE_T_LOG2 (__builtin_ffs(sizeof(pte_t)) - 1)
+
+extern pgd_t swapper_pg_dir[];
+extern pgd_t invalid_pg_dir[];
+
+/*
+ * The following only work if pte_present() is true.
+ * Undefined behaviour if not..
+ */
+static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
+static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
+static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_MODIFIED; }
+
+static inline pte_t pte_mkold(pte_t pte)
+{
+ pte_val(pte) &= ~_PAGE_ACCESSED;
+ return pte;
+}
+
+static inline pte_t pte_mkyoung(pte_t pte)
+{
+ pte_val(pte) |= _PAGE_ACCESSED;
+ return pte;
+}
+
+static inline pte_t pte_mkclean(pte_t pte)
+{
+ pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_MODIFIED);
+ return pte;
+}
+
+static inline pte_t pte_mkdirty(pte_t pte)
+{
+ pte_val(pte) |= (_PAGE_DIRTY | _PAGE_MODIFIED);
+ return pte;
+}
+
+static inline pte_t pte_mkwrite(pte_t pte)
+{
+ pte_val(pte) |= (_PAGE_WRITE | _PAGE_DIRTY);
+ return pte;
+}
+
+static inline pte_t pte_wrprotect(pte_t pte)
+{
+ pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_DIRTY);
+ return pte;
+}
+
+static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_HUGE; }
+
+static inline pte_t pte_mkhuge(pte_t pte)
+{
+ pte_val(pte) |= _PAGE_HUGE;
+ return pte;
+}
+
+#if defined(CONFIG_ARCH_HAS_PTE_SPECIAL)
+static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
+static inline pte_t pte_mkspecial(pte_t pte) { pte_val(pte) |= _PAGE_SPECIAL; return pte; }
+#endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
+
+#define pte_accessible pte_accessible
+static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a)
+{
+ if (pte_val(a) & _PAGE_PRESENT)
+ return true;
+
+ if ((pte_val(a) & _PAGE_PROTNONE) &&
+ atomic_read(&mm->tlb_flush_pending))
+ return true;
+
+ return false;
+}
+
+/*
+ * Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ */
+#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
+
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+ return __pte((pte_val(pte) & _PAGE_CHG_MASK) |
+ (pgprot_val(newprot) & ~_PAGE_CHG_MASK));
+}
+
+extern void __update_tlb(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep);
+
+static inline void update_mmu_cache(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep)
+{
+ __update_tlb(vma, address, ptep);
+}
+
+static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp)
+{
+ __update_tlb(vma, address, (pte_t *)pmdp);
+}
+
+#define kern_addr_valid(addr) (1)
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+
+/* We don't have hardware dirty/accessed bits, generic_pmdp_establish is fine.*/
+#define pmdp_establish generic_pmdp_establish
+
+static inline int pmd_trans_huge(pmd_t pmd)
+{
+ return !!(pmd_val(pmd) & _PAGE_HUGE) && pmd_present(pmd);
+}
+
+static inline pmd_t pmd_mkhuge(pmd_t pmd)
+{
+ pmd_val(pmd) = (pmd_val(pmd) & ~(_PAGE_GLOBAL)) |
+ ((pmd_val(pmd) & _PAGE_GLOBAL) << (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT));
+ pmd_val(pmd) |= _PAGE_HUGE;
+
+ return pmd;
+}
+
+#define pmd_write pmd_write
+static inline int pmd_write(pmd_t pmd)
+{
+ return !!(pmd_val(pmd) & _PAGE_WRITE);
+}
+
+static inline pmd_t pmd_mkwrite(pmd_t pmd)
+{
+ pmd_val(pmd) |= (_PAGE_WRITE | _PAGE_DIRTY);
+ return pmd;
+}
+
+static inline pmd_t pmd_wrprotect(pmd_t pmd)
+{
+ pmd_val(pmd) &= ~(_PAGE_WRITE | _PAGE_DIRTY);
+ return pmd;
+}
+
+static inline int pmd_dirty(pmd_t pmd)
+{
+ return !!(pmd_val(pmd) & _PAGE_MODIFIED);
+}
+
+static inline pmd_t pmd_mkclean(pmd_t pmd)
+{
+ pmd_val(pmd) &= ~(_PAGE_DIRTY | _PAGE_MODIFIED);
+ return pmd;
+}
+
+static inline pmd_t pmd_mkdirty(pmd_t pmd)
+{
+ pmd_val(pmd) |= (_PAGE_DIRTY | _PAGE_MODIFIED);
+ return pmd;
+}
+
+static inline int pmd_young(pmd_t pmd)
+{
+ return !!(pmd_val(pmd) & _PAGE_ACCESSED);
+}
+
+static inline pmd_t pmd_mkold(pmd_t pmd)
+{
+ pmd_val(pmd) &= ~_PAGE_ACCESSED;
+ return pmd;
+}
+
+static inline pmd_t pmd_mkyoung(pmd_t pmd)
+{
+ pmd_val(pmd) |= _PAGE_ACCESSED;
+ return pmd;
+}
+
+static inline unsigned long pmd_pfn(pmd_t pmd)
+{
+ return (pmd_val(pmd) & _PFN_MASK) >> _PFN_SHIFT;
+}
+
+static inline struct page *pmd_page(pmd_t pmd)
+{
+ if (pmd_trans_huge(pmd))
+ return pfn_to_page(pmd_pfn(pmd));
+
+ return pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT);
+}
+
+static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
+{
+ pmd_val(pmd) = (pmd_val(pmd) & _HPAGE_CHG_MASK) |
+ (pgprot_val(newprot) & ~_HPAGE_CHG_MASK);
+ return pmd;
+}
+
+static inline pmd_t pmd_mkinvalid(pmd_t pmd)
+{
+ pmd_val(pmd) &= ~(_PAGE_PRESENT | _PAGE_VALID | _PAGE_DIRTY | _PAGE_PROTNONE);
+
+ return pmd;
+}
+
+/*
+ * The generic version pmdp_huge_get_and_clear uses a version of pmd_clear() with a
+ * different prototype.
+ */
+#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
+static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
+ unsigned long address, pmd_t *pmdp)
+{
+ pmd_t old = *pmdp;
+
+ pmd_clear(pmdp);
+
+ return old;
+}
+
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+#ifdef CONFIG_NUMA_BALANCING
+static inline long pte_protnone(pte_t pte)
+{
+ return (pte_val(pte) & _PAGE_PROTNONE);
+}
+
+static inline long pmd_protnone(pmd_t pmd)
+{
+ return (pmd_val(pmd) & _PAGE_PROTNONE);
+}
+#endif /* CONFIG_NUMA_BALANCING */
+
+/*
+ * We provide our own get_unmapped area to cope with the virtual aliasing
+ * constraints placed on us by the cache architecture.
+ */
+#define HAVE_ARCH_UNMAPPED_AREA
+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_PGTABLE_H */
diff --git a/arch/loongarch/include/asm/prefetch.h b/arch/loongarch/include/asm/prefetch.h
new file mode 100644
index 000000000000..1672262a5e2e
--- /dev/null
+++ b/arch/loongarch/include/asm/prefetch.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef __ASM_PREFETCH_H
+#define __ASM_PREFETCH_H
+
+#define Pref_Load 0
+#define Pref_Store 8
+
+#ifdef __ASSEMBLY__
+
+ .macro __pref hint addr
+#ifdef CONFIG_CPU_HAS_PREFETCH
+ preld \hint, \addr, 0
+#endif
+ .endm
+
+ .macro pref_load addr
+ __pref Pref_Load, \addr
+ .endm
+
+ .macro pref_store addr
+ __pref Pref_Store, \addr
+ .endm
+
+#endif
+
+#endif /* __ASM_PREFETCH_H */
diff --git a/arch/loongarch/include/asm/processor.h b/arch/loongarch/include/asm/processor.h
new file mode 100644
index 000000000000..1d63c934b289
--- /dev/null
+++ b/arch/loongarch/include/asm/processor.h
@@ -0,0 +1,209 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_PROCESSOR_H
+#define _ASM_PROCESSOR_H
+
+#include <linux/atomic.h>
+#include <linux/cpumask.h>
+#include <linux/sizes.h>
+
+#include <asm/cpu.h>
+#include <asm/cpu-info.h>
+#include <asm/loongarch.h>
+#include <asm/vdso/processor.h>
+#include <uapi/asm/ptrace.h>
+#include <uapi/asm/sigcontext.h>
+
+#ifdef CONFIG_32BIT
+
+#define TASK_SIZE 0x80000000UL
+#define TASK_SIZE_MIN TASK_SIZE
+#define STACK_TOP_MAX TASK_SIZE
+
+#define TASK_IS_32BIT_ADDR 1
+
+#endif
+
+#ifdef CONFIG_64BIT
+
+#define TASK_SIZE32 0x100000000UL
+#define TASK_SIZE64 (0x1UL << ((cpu_vabits > VA_BITS) ? VA_BITS : cpu_vabits))
+
+#define TASK_SIZE (test_thread_flag(TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64)
+#define TASK_SIZE_MIN TASK_SIZE32
+#define STACK_TOP_MAX TASK_SIZE64
+
+#define TASK_SIZE_OF(tsk) \
+ (test_tsk_thread_flag(tsk, TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64)
+
+#define TASK_IS_32BIT_ADDR test_thread_flag(TIF_32BIT_ADDR)
+
+#endif
+
+#define VDSO_RANDOMIZE_SIZE (TASK_IS_32BIT_ADDR ? SZ_1M : SZ_64M)
+
+unsigned long stack_top(void);
+#define STACK_TOP stack_top()
+
+/*
+ * This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE / 3)
+
+#define FPU_REG_WIDTH 256
+#define FPU_ALIGN __attribute__((aligned(32)))
+
+union fpureg {
+ __u32 val32[FPU_REG_WIDTH / 32];
+ __u64 val64[FPU_REG_WIDTH / 64];
+};
+
+#define FPR_IDX(width, idx) (idx)
+
+#define BUILD_FPR_ACCESS(width) \
+static inline u##width get_fpr##width(union fpureg *fpr, unsigned idx) \
+{ \
+ return fpr->val##width[FPR_IDX(width, idx)]; \
+} \
+ \
+static inline void set_fpr##width(union fpureg *fpr, unsigned int idx, \
+ u##width val) \
+{ \
+ fpr->val##width[FPR_IDX(width, idx)] = val; \
+}
+
+BUILD_FPR_ACCESS(32)
+BUILD_FPR_ACCESS(64)
+
+struct loongarch_fpu {
+ unsigned int fcsr;
+ unsigned int vcsr;
+ uint64_t fcc; /* 8x8 */
+ union fpureg fpr[NUM_FPU_REGS];
+};
+
+#define INIT_CPUMASK { \
+ {0,} \
+}
+
+#define ARCH_MIN_TASKALIGN 32
+
+struct loongarch_vdso_info;
+
+/*
+ * If you change thread_struct remember to change the #defines below too!
+ */
+struct thread_struct {
+ /* Main processor registers. */
+ unsigned long reg01, reg03, reg22; /* ra sp fp */
+ unsigned long reg23, reg24, reg25, reg26; /* s0-s3 */
+ unsigned long reg27, reg28, reg29, reg30, reg31; /* s4-s8 */
+
+ /* CSR registers */
+ unsigned long csr_prmd;
+ unsigned long csr_crmd;
+ unsigned long csr_euen;
+ unsigned long csr_ecfg;
+ unsigned long csr_badvaddr; /* Last user fault */
+
+ /* Scratch registers */
+ unsigned long scr0;
+ unsigned long scr1;
+ unsigned long scr2;
+ unsigned long scr3;
+
+ /* Eflags register */
+ unsigned long eflags;
+
+ /* Other stuff associated with the thread. */
+ unsigned long trap_nr;
+ unsigned long error_code;
+ struct loongarch_vdso_info *vdso;
+
+ /*
+ * FPU & vector registers, must be at last because
+ * they are conditionally copied at fork().
+ */
+ struct loongarch_fpu fpu FPU_ALIGN;
+};
+
+#define INIT_THREAD { \
+ /* \
+ * Main processor registers \
+ */ \
+ .reg01 = 0, \
+ .reg03 = 0, \
+ .reg22 = 0, \
+ .reg23 = 0, \
+ .reg24 = 0, \
+ .reg25 = 0, \
+ .reg26 = 0, \
+ .reg27 = 0, \
+ .reg28 = 0, \
+ .reg29 = 0, \
+ .reg30 = 0, \
+ .reg31 = 0, \
+ .csr_crmd = 0, \
+ .csr_prmd = 0, \
+ .csr_euen = 0, \
+ .csr_ecfg = 0, \
+ .csr_badvaddr = 0, \
+ /* \
+ * Other stuff associated with the process \
+ */ \
+ .trap_nr = 0, \
+ .error_code = 0, \
+ /* \
+ * FPU & vector registers \
+ */ \
+ .fpu = { \
+ .fcsr = 0, \
+ .vcsr = 0, \
+ .fcc = 0, \
+ .fpr = {{{0,},},}, \
+ }, \
+}
+
+struct task_struct;
+
+/* Free all resources held by a thread. */
+#define release_thread(thread) do { } while (0)
+
+enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_HALT, IDLE_NOMWAIT, IDLE_POLL};
+
+extern unsigned long boot_option_idle_override;
+/*
+ * Do necessary setup to start up a newly executed thread.
+ */
+extern void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp);
+
+static inline void flush_thread(void)
+{
+}
+
+unsigned long __get_wchan(struct task_struct *p);
+
+#define __KSTK_TOS(tsk) ((unsigned long)task_stack_page(tsk) + \
+ THREAD_SIZE - 32 - sizeof(struct pt_regs))
+#define task_pt_regs(tsk) ((struct pt_regs *)__KSTK_TOS(tsk))
+#define KSTK_EIP(tsk) (task_pt_regs(tsk)->csr_era)
+#define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[3])
+#define KSTK_EUEN(tsk) (task_pt_regs(tsk)->csr_euen)
+#define KSTK_ECFG(tsk) (task_pt_regs(tsk)->csr_ecfg)
+
+#define return_address() ({__asm__ __volatile__("":::"$1"); __builtin_return_address(0);})
+
+#ifdef CONFIG_CPU_HAS_PREFETCH
+
+#define ARCH_HAS_PREFETCH
+#define prefetch(x) __builtin_prefetch((x), 0, 1)
+
+#define ARCH_HAS_PREFETCHW
+#define prefetchw(x) __builtin_prefetch((x), 1, 1)
+
+#endif
+
+#endif /* _ASM_PROCESSOR_H */
diff --git a/arch/loongarch/include/asm/ptrace.h b/arch/loongarch/include/asm/ptrace.h
new file mode 100644
index 000000000000..17838c6b7ccd
--- /dev/null
+++ b/arch/loongarch/include/asm/ptrace.h
@@ -0,0 +1,152 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_PTRACE_H
+#define _ASM_PTRACE_H
+
+#include <asm/page.h>
+#include <asm/thread_info.h>
+#include <uapi/asm/ptrace.h>
+
+/*
+ * This struct defines the way the registers are stored on the stack during
+ * a system call/exception. If you add a register here, please also add it to
+ * regoffset_table[] in arch/loongarch/kernel/ptrace.c.
+ */
+struct pt_regs {
+ /* Main processor registers. */
+ unsigned long regs[32];
+
+ /* Original syscall arg0. */
+ unsigned long orig_a0;
+
+ /* Special CSR registers. */
+ unsigned long csr_era;
+ unsigned long csr_badvaddr;
+ unsigned long csr_crmd;
+ unsigned long csr_prmd;
+ unsigned long csr_euen;
+ unsigned long csr_ecfg;
+ unsigned long csr_estat;
+ unsigned long __last[0];
+} __aligned(8);
+
+static inline int regs_irqs_disabled(struct pt_regs *regs)
+{
+ return arch_irqs_disabled_flags(regs->csr_prmd);
+}
+
+static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
+{
+ return regs->regs[3];
+}
+
+/*
+ * Don't use asm-generic/ptrace.h it defines FP accessors that don't make
+ * sense on LoongArch. We rather want an error if they get invoked.
+ */
+
+static inline void instruction_pointer_set(struct pt_regs *regs, unsigned long val)
+{
+ regs->csr_era = val;
+}
+
+/* Query offset/name of register from its name/offset */
+extern int regs_query_register_offset(const char *name);
+#define MAX_REG_OFFSET (offsetof(struct pt_regs, __last))
+
+/**
+ * regs_get_register() - get register value from its offset
+ * @regs: pt_regs from which register value is gotten.
+ * @offset: offset number of the register.
+ *
+ * regs_get_register returns the value of a register. The @offset is the
+ * offset of the register in struct pt_regs address which specified by @regs.
+ * If @offset is bigger than MAX_REG_OFFSET, this returns 0.
+ */
+static inline unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset)
+{
+ if (unlikely(offset > MAX_REG_OFFSET))
+ return 0;
+
+ return *(unsigned long *)((unsigned long)regs + offset);
+}
+
+/**
+ * regs_within_kernel_stack() - check the address in the stack
+ * @regs: pt_regs which contains kernel stack pointer.
+ * @addr: address which is checked.
+ *
+ * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
+ * If @addr is within the kernel stack, it returns true. If not, returns false.
+ */
+static inline int regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
+{
+ return ((addr & ~(THREAD_SIZE - 1)) ==
+ (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1)));
+}
+
+/**
+ * regs_get_kernel_stack_nth() - get Nth entry of the stack
+ * @regs: pt_regs which contains kernel stack pointer.
+ * @n: stack entry number.
+ *
+ * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
+ * is specified by @regs. If the @n th entry is NOT in the kernel stack,
+ * this returns 0.
+ */
+static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
+{
+ unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
+
+ addr += n;
+ if (regs_within_kernel_stack(regs, (unsigned long)addr))
+ return *addr;
+ else
+ return 0;
+}
+
+struct task_struct;
+
+/*
+ * Does the process account for user or for system time?
+ */
+#define user_mode(regs) (((regs)->csr_prmd & PLV_MASK) == PLV_USER)
+
+static inline long regs_return_value(struct pt_regs *regs)
+{
+ return regs->regs[4];
+}
+
+#define instruction_pointer(regs) ((regs)->csr_era)
+#define profile_pc(regs) instruction_pointer(regs)
+
+extern void die(const char *, struct pt_regs *) __noreturn;
+
+static inline void die_if_kernel(const char *str, struct pt_regs *regs)
+{
+ if (unlikely(!user_mode(regs)))
+ die(str, regs);
+}
+
+#define current_pt_regs() \
+({ \
+ unsigned long sp = (unsigned long)__builtin_frame_address(0); \
+ (struct pt_regs *)((sp | (THREAD_SIZE - 1)) + 1 - 32) - 1; \
+})
+
+/* Helpers for working with the user stack pointer */
+
+static inline unsigned long user_stack_pointer(struct pt_regs *regs)
+{
+ return regs->regs[3];
+}
+
+static inline void user_stack_pointer_set(struct pt_regs *regs,
+ unsigned long val)
+{
+ regs->regs[3] = val;
+}
+
+#endif /* _ASM_PTRACE_H */
diff --git a/arch/loongarch/include/asm/reboot.h b/arch/loongarch/include/asm/reboot.h
new file mode 100644
index 000000000000..51151749d8f0
--- /dev/null
+++ b/arch/loongarch/include/asm/reboot.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_REBOOT_H
+#define _ASM_REBOOT_H
+
+extern void (*pm_restart)(void);
+
+#endif /* _ASM_REBOOT_H */
diff --git a/arch/loongarch/include/asm/regdef.h b/arch/loongarch/include/asm/regdef.h
new file mode 100644
index 000000000000..49a374c2612c
--- /dev/null
+++ b/arch/loongarch/include/asm/regdef.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_REGDEF_H
+#define _ASM_REGDEF_H
+
+#define zero $r0 /* wired zero */
+#define ra $r1 /* return address */
+#define tp $r2
+#define sp $r3 /* stack pointer */
+#define a0 $r4 /* argument registers, a0/a1 reused as v0/v1 for return value */
+#define a1 $r5
+#define a2 $r6
+#define a3 $r7
+#define a4 $r8
+#define a5 $r9
+#define a6 $r10
+#define a7 $r11
+#define t0 $r12 /* caller saved */
+#define t1 $r13
+#define t2 $r14
+#define t3 $r15
+#define t4 $r16
+#define t5 $r17
+#define t6 $r18
+#define t7 $r19
+#define t8 $r20
+#define u0 $r21
+#define fp $r22 /* frame pointer */
+#define s0 $r23 /* callee saved */
+#define s1 $r24
+#define s2 $r25
+#define s3 $r26
+#define s4 $r27
+#define s5 $r28
+#define s6 $r29
+#define s7 $r30
+#define s8 $r31
+
+#endif /* _ASM_REGDEF_H */
diff --git a/arch/loongarch/include/asm/seccomp.h b/arch/loongarch/include/asm/seccomp.h
new file mode 100644
index 000000000000..31d6ab42e43e
--- /dev/null
+++ b/arch/loongarch/include/asm/seccomp.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _ASM_SECCOMP_H
+#define _ASM_SECCOMP_H
+
+#include <asm/unistd.h>
+
+#include <asm-generic/seccomp.h>
+
+#ifdef CONFIG_32BIT
+# define SECCOMP_ARCH_NATIVE AUDIT_ARCH_LOONGARCH32
+# define SECCOMP_ARCH_NATIVE_NR NR_syscalls
+# define SECCOMP_ARCH_NATIVE_NAME "loongarch32"
+#else
+# define SECCOMP_ARCH_NATIVE AUDIT_ARCH_LOONGARCH64
+# define SECCOMP_ARCH_NATIVE_NR NR_syscalls
+# define SECCOMP_ARCH_NATIVE_NAME "loongarch64"
+#endif
+
+#endif /* _ASM_SECCOMP_H */
diff --git a/arch/loongarch/include/asm/serial.h b/arch/loongarch/include/asm/serial.h
new file mode 100644
index 000000000000..3fb550eb9115
--- /dev/null
+++ b/arch/loongarch/include/asm/serial.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef __ASM__SERIAL_H
+#define __ASM__SERIAL_H
+
+#define BASE_BAUD 0
+#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST)
+
+#endif /* __ASM__SERIAL_H */
diff --git a/arch/loongarch/include/asm/setup.h b/arch/loongarch/include/asm/setup.h
new file mode 100644
index 000000000000..6d7d2a3e23dd
--- /dev/null
+++ b/arch/loongarch/include/asm/setup.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#ifndef _LOONGARCH_SETUP_H
+#define _LOONGARCH_SETUP_H
+
+#include <linux/types.h>
+#include <uapi/asm/setup.h>
+
+#define VECSIZE 0x200
+
+extern unsigned long eentry;
+extern unsigned long tlbrentry;
+extern void cpu_cache_init(void);
+extern void per_cpu_trap_init(int cpu);
+extern void set_handler(unsigned long offset, void *addr, unsigned long len);
+extern void set_merr_handler(unsigned long offset, void *addr, unsigned long len);
+
+#endif /* __SETUP_H */
diff --git a/arch/loongarch/include/asm/shmparam.h b/arch/loongarch/include/asm/shmparam.h
new file mode 100644
index 000000000000..c9554f48d2df
--- /dev/null
+++ b/arch/loongarch/include/asm/shmparam.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_SHMPARAM_H
+#define _ASM_SHMPARAM_H
+
+#define __ARCH_FORCE_SHMLBA 1
+
+#define SHMLBA SZ_64K /* attach addr a multiple of this */
+
+#endif /* _ASM_SHMPARAM_H */
diff --git a/arch/loongarch/include/asm/smp.h b/arch/loongarch/include/asm/smp.h
new file mode 100644
index 000000000000..551e1f37c705
--- /dev/null
+++ b/arch/loongarch/include/asm/smp.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Author: Huacai Chen <chenhuacai@loongson.cn>
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef __ASM_SMP_H
+#define __ASM_SMP_H
+
+#include <linux/atomic.h>
+#include <linux/bitops.h>
+#include <linux/linkage.h>
+#include <linux/smp.h>
+#include <linux/threads.h>
+#include <linux/cpumask.h>
+
+void loongson3_smp_setup(void);
+void loongson3_prepare_cpus(unsigned int max_cpus);
+void loongson3_boot_secondary(int cpu, struct task_struct *idle);
+void loongson3_init_secondary(void);
+void loongson3_smp_finish(void);
+void loongson3_send_ipi_single(int cpu, unsigned int action);
+void loongson3_send_ipi_mask(const struct cpumask *mask, unsigned int action);
+#ifdef CONFIG_HOTPLUG_CPU
+int loongson3_cpu_disable(void);
+void loongson3_cpu_die(unsigned int cpu);
+#endif
+
+#ifdef CONFIG_SMP
+
+static inline void plat_smp_setup(void)
+{
+ loongson3_smp_setup();
+}
+
+#else /* !CONFIG_SMP */
+
+static inline void plat_smp_setup(void) { }
+
+#endif /* !CONFIG_SMP */
+
+extern int smp_num_siblings;
+extern int num_processors;
+extern int disabled_cpus;
+extern cpumask_t cpu_sibling_map[];
+extern cpumask_t cpu_core_map[];
+extern cpumask_t cpu_foreign_map[];
+
+static inline int raw_smp_processor_id(void)
+{
+#if defined(__VDSO__)
+ extern int vdso_smp_processor_id(void)
+ __compiletime_error("VDSO should not call smp_processor_id()");
+ return vdso_smp_processor_id();
+#else
+ return current_thread_info()->cpu;
+#endif
+}
+#define raw_smp_processor_id raw_smp_processor_id
+
+/* Map from cpu id to sequential logical cpu number. This will only
+ * not be idempotent when cpus failed to come on-line. */
+extern int __cpu_number_map[NR_CPUS];
+#define cpu_number_map(cpu) __cpu_number_map[cpu]
+
+/* The reverse map from sequential logical cpu number to cpu id. */
+extern int __cpu_logical_map[NR_CPUS];
+#define cpu_logical_map(cpu) __cpu_logical_map[cpu]
+
+#define cpu_physical_id(cpu) cpu_logical_map(cpu)
+
+#define SMP_BOOT_CPU 0x1
+#define SMP_RESCHEDULE 0x2
+#define SMP_CALL_FUNCTION 0x4
+
+struct secondary_data {
+ unsigned long stack;
+ unsigned long thread_info;
+};
+extern struct secondary_data cpuboot_data;
+
+extern asmlinkage void smpboot_entry(void);
+
+extern void calculate_cpu_foreign_map(void);
+
+/*
+ * Generate IPI list text
+ */
+extern void show_ipi_list(struct seq_file *p, int prec);
+
+/*
+ * This function sends a 'reschedule' IPI to another CPU.
+ * it goes straight through and wastes no time serializing
+ * anything. Worst case is that we lose a reschedule ...
+ */
+static inline void smp_send_reschedule(int cpu)
+{
+ loongson3_send_ipi_single(cpu, SMP_RESCHEDULE);
+}
+
+static inline void arch_send_call_function_single_ipi(int cpu)
+{
+ loongson3_send_ipi_single(cpu, SMP_CALL_FUNCTION);
+}
+
+static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask)
+{
+ loongson3_send_ipi_mask(mask, SMP_CALL_FUNCTION);
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static inline int __cpu_disable(void)
+{
+ return loongson3_cpu_disable();
+}
+
+static inline void __cpu_die(unsigned int cpu)
+{
+ loongson3_cpu_die(cpu);
+}
+
+extern void play_dead(void);
+#endif
+
+#endif /* __ASM_SMP_H */
diff --git a/arch/loongarch/include/asm/sparsemem.h b/arch/loongarch/include/asm/sparsemem.h
new file mode 100644
index 000000000000..3d18cdf1b069
--- /dev/null
+++ b/arch/loongarch/include/asm/sparsemem.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LOONGARCH_SPARSEMEM_H
+#define _LOONGARCH_SPARSEMEM_H
+
+#ifdef CONFIG_SPARSEMEM
+
+/*
+ * SECTION_SIZE_BITS 2^N: how big each section will be
+ * MAX_PHYSMEM_BITS 2^N: how much memory we can have in that space
+ */
+#define SECTION_SIZE_BITS 29 /* 2^29 = Largest Huge Page Size */
+#define MAX_PHYSMEM_BITS 48
+
+#endif /* CONFIG_SPARSEMEM */
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+int memory_add_physaddr_to_nid(u64 addr);
+#define memory_add_physaddr_to_nid memory_add_physaddr_to_nid
+#endif
+
+#define INIT_MEMBLOCK_RESERVED_REGIONS (INIT_MEMBLOCK_REGIONS + NR_CPUS)
+
+#endif /* _LOONGARCH_SPARSEMEM_H */
diff --git a/arch/loongarch/include/asm/stackframe.h b/arch/loongarch/include/asm/stackframe.h
new file mode 100644
index 000000000000..4ca953062b5b
--- /dev/null
+++ b/arch/loongarch/include/asm/stackframe.h
@@ -0,0 +1,219 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_STACKFRAME_H
+#define _ASM_STACKFRAME_H
+
+#include <linux/threads.h>
+
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/asm-offsets.h>
+#include <asm/loongarch.h>
+#include <asm/thread_info.h>
+
+/* Make the addition of cfi info a little easier. */
+ .macro cfi_rel_offset reg offset=0 docfi=0
+ .if \docfi
+ .cfi_rel_offset \reg, \offset
+ .endif
+ .endm
+
+ .macro cfi_st reg offset=0 docfi=0
+ cfi_rel_offset \reg, \offset, \docfi
+ LONG_S \reg, sp, \offset
+ .endm
+
+ .macro cfi_restore reg offset=0 docfi=0
+ .if \docfi
+ .cfi_restore \reg
+ .endif
+ .endm
+
+ .macro cfi_ld reg offset=0 docfi=0
+ LONG_L \reg, sp, \offset
+ cfi_restore \reg \offset \docfi
+ .endm
+
+ .macro BACKUP_T0T1
+ csrwr t0, EXCEPTION_KS0
+ csrwr t1, EXCEPTION_KS1
+ .endm
+
+ .macro RELOAD_T0T1
+ csrrd t0, EXCEPTION_KS0
+ csrrd t1, EXCEPTION_KS1
+ .endm
+
+ .macro SAVE_TEMP docfi=0
+ RELOAD_T0T1
+ cfi_st t0, PT_R12, \docfi
+ cfi_st t1, PT_R13, \docfi
+ cfi_st t2, PT_R14, \docfi
+ cfi_st t3, PT_R15, \docfi
+ cfi_st t4, PT_R16, \docfi
+ cfi_st t5, PT_R17, \docfi
+ cfi_st t6, PT_R18, \docfi
+ cfi_st t7, PT_R19, \docfi
+ cfi_st t8, PT_R20, \docfi
+ .endm
+
+ .macro SAVE_STATIC docfi=0
+ cfi_st s0, PT_R23, \docfi
+ cfi_st s1, PT_R24, \docfi
+ cfi_st s2, PT_R25, \docfi
+ cfi_st s3, PT_R26, \docfi
+ cfi_st s4, PT_R27, \docfi
+ cfi_st s5, PT_R28, \docfi
+ cfi_st s6, PT_R29, \docfi
+ cfi_st s7, PT_R30, \docfi
+ cfi_st s8, PT_R31, \docfi
+ .endm
+
+/*
+ * get_saved_sp returns the SP for the current CPU by looking in the
+ * kernelsp array for it. It stores the current sp in t0 and loads the
+ * new value in sp.
+ */
+ .macro get_saved_sp docfi=0
+ la.abs t1, kernelsp
+#ifdef CONFIG_SMP
+ csrrd t0, PERCPU_BASE_KS
+ LONG_ADD t1, t1, t0
+#endif
+ move t0, sp
+ .if \docfi
+ .cfi_register sp, t0
+ .endif
+ LONG_L sp, t1, 0
+ .endm
+
+ .macro set_saved_sp stackp temp temp2
+ la.abs \temp, kernelsp
+#ifdef CONFIG_SMP
+ LONG_ADD \temp, \temp, u0
+#endif
+ LONG_S \stackp, \temp, 0
+ .endm
+
+ .macro SAVE_SOME docfi=0
+ csrrd t1, LOONGARCH_CSR_PRMD
+ andi t1, t1, 0x3 /* extract pplv bit */
+ move t0, sp
+ beqz t1, 8f
+ /* Called from user mode, new stack. */
+ get_saved_sp docfi=\docfi
+8:
+ PTR_ADDI sp, sp, -PT_SIZE
+ .if \docfi
+ .cfi_def_cfa sp, 0
+ .endif
+ cfi_st t0, PT_R3, \docfi
+ cfi_rel_offset sp, PT_R3, \docfi
+ LONG_S zero, sp, PT_R0
+ csrrd t0, LOONGARCH_CSR_PRMD
+ LONG_S t0, sp, PT_PRMD
+ csrrd t0, LOONGARCH_CSR_CRMD
+ LONG_S t0, sp, PT_CRMD
+ csrrd t0, LOONGARCH_CSR_EUEN
+ LONG_S t0, sp, PT_EUEN
+ csrrd t0, LOONGARCH_CSR_ECFG
+ LONG_S t0, sp, PT_ECFG
+ csrrd t0, LOONGARCH_CSR_ESTAT
+ PTR_S t0, sp, PT_ESTAT
+ cfi_st ra, PT_R1, \docfi
+ cfi_st a0, PT_R4, \docfi
+ cfi_st a1, PT_R5, \docfi
+ cfi_st a2, PT_R6, \docfi
+ cfi_st a3, PT_R7, \docfi
+ cfi_st a4, PT_R8, \docfi
+ cfi_st a5, PT_R9, \docfi
+ cfi_st a6, PT_R10, \docfi
+ cfi_st a7, PT_R11, \docfi
+ csrrd ra, LOONGARCH_CSR_ERA
+ LONG_S ra, sp, PT_ERA
+ .if \docfi
+ .cfi_rel_offset ra, PT_ERA
+ .endif
+ cfi_st tp, PT_R2, \docfi
+ cfi_st fp, PT_R22, \docfi
+
+ /* Set thread_info if we're coming from user mode */
+ csrrd t0, LOONGARCH_CSR_PRMD
+ andi t0, t0, 0x3 /* extract pplv bit */
+ beqz t0, 9f
+
+ li.d tp, ~_THREAD_MASK
+ and tp, tp, sp
+ cfi_st u0, PT_R21, \docfi
+ csrrd u0, PERCPU_BASE_KS
+9:
+ .endm
+
+ .macro SAVE_ALL docfi=0
+ SAVE_SOME \docfi
+ SAVE_TEMP \docfi
+ SAVE_STATIC \docfi
+ .endm
+
+ .macro RESTORE_TEMP docfi=0
+ cfi_ld t0, PT_R12, \docfi
+ cfi_ld t1, PT_R13, \docfi
+ cfi_ld t2, PT_R14, \docfi
+ cfi_ld t3, PT_R15, \docfi
+ cfi_ld t4, PT_R16, \docfi
+ cfi_ld t5, PT_R17, \docfi
+ cfi_ld t6, PT_R18, \docfi
+ cfi_ld t7, PT_R19, \docfi
+ cfi_ld t8, PT_R20, \docfi
+ .endm
+
+ .macro RESTORE_STATIC docfi=0
+ cfi_ld s0, PT_R23, \docfi
+ cfi_ld s1, PT_R24, \docfi
+ cfi_ld s2, PT_R25, \docfi
+ cfi_ld s3, PT_R26, \docfi
+ cfi_ld s4, PT_R27, \docfi
+ cfi_ld s5, PT_R28, \docfi
+ cfi_ld s6, PT_R29, \docfi
+ cfi_ld s7, PT_R30, \docfi
+ cfi_ld s8, PT_R31, \docfi
+ .endm
+
+ .macro RESTORE_SOME docfi=0
+ LONG_L a0, sp, PT_PRMD
+ andi a0, a0, 0x3 /* extract pplv bit */
+ beqz a0, 8f
+ cfi_ld u0, PT_R21, \docfi
+8:
+ LONG_L a0, sp, PT_ERA
+ csrwr a0, LOONGARCH_CSR_ERA
+ LONG_L a0, sp, PT_PRMD
+ csrwr a0, LOONGARCH_CSR_PRMD
+ cfi_ld ra, PT_R1, \docfi
+ cfi_ld a0, PT_R4, \docfi
+ cfi_ld a1, PT_R5, \docfi
+ cfi_ld a2, PT_R6, \docfi
+ cfi_ld a3, PT_R7, \docfi
+ cfi_ld a4, PT_R8, \docfi
+ cfi_ld a5, PT_R9, \docfi
+ cfi_ld a6, PT_R10, \docfi
+ cfi_ld a7, PT_R11, \docfi
+ cfi_ld tp, PT_R2, \docfi
+ cfi_ld fp, PT_R22, \docfi
+ .endm
+
+ .macro RESTORE_SP_AND_RET docfi=0
+ cfi_ld sp, PT_R3, \docfi
+ ertn
+ .endm
+
+ .macro RESTORE_ALL_AND_RET docfi=0
+ RESTORE_STATIC \docfi
+ RESTORE_TEMP \docfi
+ RESTORE_SOME \docfi
+ RESTORE_SP_AND_RET \docfi
+ .endm
+
+#endif /* _ASM_STACKFRAME_H */
diff --git a/arch/loongarch/include/asm/stacktrace.h b/arch/loongarch/include/asm/stacktrace.h
new file mode 100644
index 000000000000..26483e396ad1
--- /dev/null
+++ b/arch/loongarch/include/asm/stacktrace.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_STACKTRACE_H
+#define _ASM_STACKTRACE_H
+
+#include <asm/asm.h>
+#include <asm/ptrace.h>
+#include <asm/loongarch.h>
+#include <linux/stringify.h>
+
+#define STR_LONG_L __stringify(LONG_L)
+#define STR_LONG_S __stringify(LONG_S)
+#define STR_LONGSIZE __stringify(LONGSIZE)
+
+#define STORE_ONE_REG(r) \
+ STR_LONG_S " $r" __stringify(r)", %1, "STR_LONGSIZE"*"__stringify(r)"\n\t"
+
+#define CSRRD_ONE_REG(reg) \
+ __stringify(csrrd) " %0, "__stringify(reg)"\n\t"
+
+static __always_inline void prepare_frametrace(struct pt_regs *regs)
+{
+ __asm__ __volatile__(
+ /* Save $r1 */
+ STORE_ONE_REG(1)
+ /* Use $r1 to save PC */
+ "pcaddi $r1, 0\n\t"
+ STR_LONG_S " $r1, %0\n\t"
+ /* Restore $r1 */
+ STR_LONG_L " $r1, %1, "STR_LONGSIZE"\n\t"
+ STORE_ONE_REG(2)
+ STORE_ONE_REG(3)
+ STORE_ONE_REG(4)
+ STORE_ONE_REG(5)
+ STORE_ONE_REG(6)
+ STORE_ONE_REG(7)
+ STORE_ONE_REG(8)
+ STORE_ONE_REG(9)
+ STORE_ONE_REG(10)
+ STORE_ONE_REG(11)
+ STORE_ONE_REG(12)
+ STORE_ONE_REG(13)
+ STORE_ONE_REG(14)
+ STORE_ONE_REG(15)
+ STORE_ONE_REG(16)
+ STORE_ONE_REG(17)
+ STORE_ONE_REG(18)
+ STORE_ONE_REG(19)
+ STORE_ONE_REG(20)
+ STORE_ONE_REG(21)
+ STORE_ONE_REG(22)
+ STORE_ONE_REG(23)
+ STORE_ONE_REG(24)
+ STORE_ONE_REG(25)
+ STORE_ONE_REG(26)
+ STORE_ONE_REG(27)
+ STORE_ONE_REG(28)
+ STORE_ONE_REG(29)
+ STORE_ONE_REG(30)
+ STORE_ONE_REG(31)
+ : "=m" (regs->csr_era)
+ : "r" (regs->regs)
+ : "memory");
+ __asm__ __volatile__(CSRRD_ONE_REG(LOONGARCH_CSR_BADV) : "=r" (regs->csr_badvaddr));
+ __asm__ __volatile__(CSRRD_ONE_REG(LOONGARCH_CSR_CRMD) : "=r" (regs->csr_crmd));
+ __asm__ __volatile__(CSRRD_ONE_REG(LOONGARCH_CSR_PRMD) : "=r" (regs->csr_prmd));
+ __asm__ __volatile__(CSRRD_ONE_REG(LOONGARCH_CSR_EUEN) : "=r" (regs->csr_euen));
+ __asm__ __volatile__(CSRRD_ONE_REG(LOONGARCH_CSR_ECFG) : "=r" (regs->csr_ecfg));
+ __asm__ __volatile__(CSRRD_ONE_REG(LOONGARCH_CSR_ESTAT) : "=r" (regs->csr_estat));
+}
+
+#endif /* _ASM_STACKTRACE_H */
diff --git a/arch/loongarch/include/asm/string.h b/arch/loongarch/include/asm/string.h
new file mode 100644
index 000000000000..b07e60ded957
--- /dev/null
+++ b/arch/loongarch/include/asm/string.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_STRING_H
+#define _ASM_STRING_H
+
+extern void *memset(void *__s, int __c, size_t __count);
+extern void *memcpy(void *__to, __const__ void *__from, size_t __n);
+extern void *memmove(void *__dest, __const__ void *__src, size_t __n);
+
+#endif /* _ASM_STRING_H */
diff --git a/arch/loongarch/include/asm/switch_to.h b/arch/loongarch/include/asm/switch_to.h
new file mode 100644
index 000000000000..2a8d04375574
--- /dev/null
+++ b/arch/loongarch/include/asm/switch_to.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_SWITCH_TO_H
+#define _ASM_SWITCH_TO_H
+
+#include <asm/cpu-features.h>
+#include <asm/fpu.h>
+
+struct task_struct;
+
+/**
+ * __switch_to - switch execution of a task
+ * @prev: The task previously executed.
+ * @next: The task to begin executing.
+ * @next_ti: task_thread_info(next).
+ *
+ * This function is used whilst scheduling to save the context of prev & load
+ * the context of next. Returns prev.
+ */
+extern asmlinkage struct task_struct *__switch_to(struct task_struct *prev,
+ struct task_struct *next, struct thread_info *next_ti);
+
+/*
+ * For newly created kernel threads switch_to() will return to
+ * ret_from_kernel_thread, newly created user threads to ret_from_fork.
+ * That is, everything following __switch_to() will be skipped for new threads.
+ * So everything that matters to new threads should be placed before __switch_to().
+ */
+#define switch_to(prev, next, last) \
+do { \
+ lose_fpu_inatomic(1, prev); \
+ (last) = __switch_to(prev, next, task_thread_info(next)); \
+} while (0)
+
+#endif /* _ASM_SWITCH_TO_H */
diff --git a/arch/loongarch/include/asm/syscall.h b/arch/loongarch/include/asm/syscall.h
new file mode 100644
index 000000000000..e286dc58476e
--- /dev/null
+++ b/arch/loongarch/include/asm/syscall.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Author: Hanlu Li <lihanlu@loongson.cn>
+ * Huacai Chen <chenhuacai@loongson.cn>
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#ifndef __ASM_LOONGARCH_SYSCALL_H
+#define __ASM_LOONGARCH_SYSCALL_H
+
+#include <linux/compiler.h>
+#include <uapi/linux/audit.h>
+#include <linux/elf-em.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/uaccess.h>
+#include <asm/ptrace.h>
+#include <asm/unistd.h>
+
+extern void *sys_call_table[];
+
+static inline long syscall_get_nr(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ return regs->regs[11];
+}
+
+static inline void syscall_rollback(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ regs->regs[4] = regs->orig_a0;
+}
+
+static inline long syscall_get_error(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ unsigned long error = regs->regs[4];
+
+ return IS_ERR_VALUE(error) ? error : 0;
+}
+
+static inline long syscall_get_return_value(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ return regs->regs[4];
+}
+
+static inline void syscall_set_return_value(struct task_struct *task,
+ struct pt_regs *regs,
+ int error, long val)
+{
+ regs->regs[4] = (long) error ? error : val;
+}
+
+static inline void syscall_get_arguments(struct task_struct *task,
+ struct pt_regs *regs,
+ unsigned long *args)
+{
+ args[0] = regs->orig_a0;
+ memcpy(&args[1], &regs->regs[5], 5 * sizeof(long));
+}
+
+static inline int syscall_get_arch(struct task_struct *task)
+{
+ return AUDIT_ARCH_LOONGARCH64;
+}
+
+static inline bool arch_syscall_is_vdso_sigreturn(struct pt_regs *regs)
+{
+ return false;
+}
+
+#endif /* __ASM_LOONGARCH_SYSCALL_H */
diff --git a/arch/loongarch/include/asm/thread_info.h b/arch/loongarch/include/asm/thread_info.h
new file mode 100644
index 000000000000..99beb11c2fa8
--- /dev/null
+++ b/arch/loongarch/include/asm/thread_info.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * thread_info.h: LoongArch low-level thread information
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#ifndef _ASM_THREAD_INFO_H
+#define _ASM_THREAD_INFO_H
+
+#ifdef __KERNEL__
+
+#ifndef __ASSEMBLY__
+
+#include <asm/processor.h>
+
+/*
+ * low level task data that entry.S needs immediate access to
+ * - this struct should fit entirely inside of one cache line
+ * - this struct shares the supervisor stack pages
+ * - if the contents of this structure are changed, the assembly constants
+ * must also be changed
+ */
+struct thread_info {
+ struct task_struct *task; /* main task structure */
+ unsigned long flags; /* low level flags */
+ unsigned long tp_value; /* thread pointer */
+ __u32 cpu; /* current CPU */
+ int preempt_count; /* 0 => preemptible, <0 => BUG */
+ struct pt_regs *regs;
+ unsigned long syscall; /* syscall number */
+ unsigned long syscall_work; /* SYSCALL_WORK_ flags */
+};
+
+/*
+ * macros/functions for gaining access to the thread information structure
+ */
+#define INIT_THREAD_INFO(tsk) \
+{ \
+ .task = &tsk, \
+ .flags = 0, \
+ .cpu = 0, \
+ .preempt_count = INIT_PREEMPT_COUNT, \
+}
+
+/* How to get the thread information struct from C. */
+register struct thread_info *__current_thread_info __asm__("$r2");
+
+static inline struct thread_info *current_thread_info(void)
+{
+ return __current_thread_info;
+}
+
+register unsigned long current_stack_pointer __asm__("$r3");
+
+#endif /* !__ASSEMBLY__ */
+
+/* thread information allocation */
+#define THREAD_SIZE SZ_16K
+#define THREAD_MASK (THREAD_SIZE - 1UL)
+#define THREAD_SIZE_ORDER ilog2(THREAD_SIZE / PAGE_SIZE)
+/*
+ * thread information flags
+ * - these are process state flags that various assembly files may need to
+ * access
+ * - pending work-to-be-done flags are in LSW
+ * - other flags in MSW
+ */
+#define TIF_SIGPENDING 1 /* signal pending */
+#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
+#define TIF_NOTIFY_RESUME 3 /* callback before returning to user */
+#define TIF_NOTIFY_SIGNAL 4 /* signal notifications exist */
+#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */
+#define TIF_NOHZ 6 /* in adaptive nohz mode */
+#define TIF_UPROBE 7 /* breakpointed or singlestepping */
+#define TIF_USEDFPU 8 /* FPU was used by this task this quantum (SMP) */
+#define TIF_USEDSIMD 9 /* SIMD has been used this quantum */
+#define TIF_MEMDIE 10 /* is terminating due to OOM killer */
+#define TIF_FIXADE 11 /* Fix address errors in software */
+#define TIF_LOGADE 12 /* Log address errors to syslog */
+#define TIF_32BIT_REGS 13 /* 32-bit general purpose registers */
+#define TIF_32BIT_ADDR 14 /* 32-bit address space */
+#define TIF_LOAD_WATCH 15 /* If set, load watch registers */
+#define TIF_SINGLESTEP 16 /* Single Step */
+#define TIF_LSX_CTX_LIVE 17 /* LSX context must be preserved */
+#define TIF_LASX_CTX_LIVE 18 /* LASX context must be preserved */
+
+#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
+#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
+#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
+#define _TIF_NOTIFY_SIGNAL (1<<TIF_NOTIFY_SIGNAL)
+#define _TIF_NOHZ (1<<TIF_NOHZ)
+#define _TIF_UPROBE (1<<TIF_UPROBE)
+#define _TIF_USEDFPU (1<<TIF_USEDFPU)
+#define _TIF_USEDSIMD (1<<TIF_USEDSIMD)
+#define _TIF_FIXADE (1<<TIF_FIXADE)
+#define _TIF_LOGADE (1<<TIF_LOGADE)
+#define _TIF_32BIT_REGS (1<<TIF_32BIT_REGS)
+#define _TIF_32BIT_ADDR (1<<TIF_32BIT_ADDR)
+#define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH)
+#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP)
+#define _TIF_LSX_CTX_LIVE (1<<TIF_LSX_CTX_LIVE)
+#define _TIF_LASX_CTX_LIVE (1<<TIF_LASX_CTX_LIVE)
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_THREAD_INFO_H */
diff --git a/arch/loongarch/include/asm/time.h b/arch/loongarch/include/asm/time.h
new file mode 100644
index 000000000000..2eae219301d0
--- /dev/null
+++ b/arch/loongarch/include/asm/time.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_TIME_H
+#define _ASM_TIME_H
+
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
+#include <asm/loongarch.h>
+
+extern u64 cpu_clock_freq;
+extern u64 const_clock_freq;
+
+extern void sync_counter(void);
+
+static inline unsigned int calc_const_freq(void)
+{
+ unsigned int res;
+ unsigned int base_freq;
+ unsigned int cfm, cfd;
+
+ res = read_cpucfg(LOONGARCH_CPUCFG2);
+ if (!(res & CPUCFG2_LLFTP))
+ return 0;
+
+ base_freq = read_cpucfg(LOONGARCH_CPUCFG4);
+ res = read_cpucfg(LOONGARCH_CPUCFG5);
+ cfm = res & 0xffff;
+ cfd = (res >> 16) & 0xffff;
+
+ if (!base_freq || !cfm || !cfd)
+ return 0;
+
+ return (base_freq * cfm / cfd);
+}
+
+/*
+ * Initialize the calling CPU's timer interrupt as clockevent device
+ */
+extern int constant_clockevent_init(void);
+extern int constant_clocksource_init(void);
+
+static inline void clockevent_set_clock(struct clock_event_device *cd,
+ unsigned int clock)
+{
+ clockevents_calc_mult_shift(cd, clock, 4);
+}
+
+#endif /* _ASM_TIME_H */
diff --git a/arch/loongarch/include/asm/timex.h b/arch/loongarch/include/asm/timex.h
new file mode 100644
index 000000000000..d3ed99a4fdbd
--- /dev/null
+++ b/arch/loongarch/include/asm/timex.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_TIMEX_H
+#define _ASM_TIMEX_H
+
+#ifdef __KERNEL__
+
+#include <linux/compiler.h>
+
+#include <asm/cpu.h>
+#include <asm/cpu-features.h>
+
+/*
+ * Standard way to access the cycle counter.
+ * Currently only used on SMP for scheduling.
+ *
+ * We know that all SMP capable CPUs have cycle counters.
+ */
+
+typedef unsigned long cycles_t;
+
+#define get_cycles get_cycles
+
+static inline cycles_t get_cycles(void)
+{
+ return drdtime();
+}
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_TIMEX_H */
diff --git a/arch/loongarch/include/asm/tlb.h b/arch/loongarch/include/asm/tlb.h
new file mode 100644
index 000000000000..4f629ae9d5a9
--- /dev/null
+++ b/arch/loongarch/include/asm/tlb.h
@@ -0,0 +1,180 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef __ASM_TLB_H
+#define __ASM_TLB_H
+
+#include <linux/mm_types.h>
+#include <asm/cpu-features.h>
+#include <asm/loongarch.h>
+
+/*
+ * TLB Invalidate Flush
+ */
+static inline void tlbclr(void)
+{
+ __asm__ __volatile__("tlbclr");
+}
+
+static inline void tlbflush(void)
+{
+ __asm__ __volatile__("tlbflush");
+}
+
+/*
+ * TLB R/W operations.
+ */
+static inline void tlb_probe(void)
+{
+ __asm__ __volatile__("tlbsrch");
+}
+
+static inline void tlb_read(void)
+{
+ __asm__ __volatile__("tlbrd");
+}
+
+static inline void tlb_write_indexed(void)
+{
+ __asm__ __volatile__("tlbwr");
+}
+
+static inline void tlb_write_random(void)
+{
+ __asm__ __volatile__("tlbfill");
+}
+
+enum invtlb_ops {
+ /* Invalid all tlb */
+ INVTLB_ALL = 0x0,
+ /* Invalid current tlb */
+ INVTLB_CURRENT_ALL = 0x1,
+ /* Invalid all global=1 lines in current tlb */
+ INVTLB_CURRENT_GTRUE = 0x2,
+ /* Invalid all global=0 lines in current tlb */
+ INVTLB_CURRENT_GFALSE = 0x3,
+ /* Invalid global=0 and matched asid lines in current tlb */
+ INVTLB_GFALSE_AND_ASID = 0x4,
+ /* Invalid addr with global=0 and matched asid in current tlb */
+ INVTLB_ADDR_GFALSE_AND_ASID = 0x5,
+ /* Invalid addr with global=1 or matched asid in current tlb */
+ INVTLB_ADDR_GTRUE_OR_ASID = 0x6,
+ /* Invalid matched gid in guest tlb */
+ INVGTLB_GID = 0x9,
+ /* Invalid global=1, matched gid in guest tlb */
+ INVGTLB_GID_GTRUE = 0xa,
+ /* Invalid global=0, matched gid in guest tlb */
+ INVGTLB_GID_GFALSE = 0xb,
+ /* Invalid global=0, matched gid and asid in guest tlb */
+ INVGTLB_GID_GFALSE_ASID = 0xc,
+ /* Invalid global=0 , matched gid, asid and addr in guest tlb */
+ INVGTLB_GID_GFALSE_ASID_ADDR = 0xd,
+ /* Invalid global=1 , matched gid, asid and addr in guest tlb */
+ INVGTLB_GID_GTRUE_ASID_ADDR = 0xe,
+ /* Invalid all gid gva-->gpa guest tlb */
+ INVGTLB_ALLGID_GVA_TO_GPA = 0x10,
+ /* Invalid all gid gpa-->hpa tlb */
+ INVTLB_ALLGID_GPA_TO_HPA = 0x11,
+ /* Invalid all gid tlb, including gva-->gpa and gpa-->hpa */
+ INVTLB_ALLGID = 0x12,
+ /* Invalid matched gid gva-->gpa guest tlb */
+ INVGTLB_GID_GVA_TO_GPA = 0x13,
+ /* Invalid matched gid gpa-->hpa tlb */
+ INVTLB_GID_GPA_TO_HPA = 0x14,
+ /* Invalid matched gid tlb,including gva-->gpa and gpa-->hpa */
+ INVTLB_GID_ALL = 0x15,
+ /* Invalid matched gid and addr gpa-->hpa tlb */
+ INVTLB_GID_ADDR = 0x16,
+};
+
+/*
+ * invtlb op info addr
+ * (0x1 << 26) | (0x24 << 20) | (0x13 << 15) |
+ * (addr << 10) | (info << 5) | op
+ */
+static inline void invtlb(u32 op, u32 info, u64 addr)
+{
+ __asm__ __volatile__(
+ "parse_r addr,%0\n\t"
+ "parse_r info,%1\n\t"
+ ".word ((0x6498000) | (addr << 10) | (info << 5) | %2)\n\t"
+ :
+ : "r"(addr), "r"(info), "i"(op)
+ :
+ );
+}
+
+static inline void invtlb_addr(u32 op, u32 info, u64 addr)
+{
+ __asm__ __volatile__(
+ "parse_r addr,%0\n\t"
+ ".word ((0x6498000) | (addr << 10) | (0 << 5) | %1)\n\t"
+ :
+ : "r"(addr), "i"(op)
+ :
+ );
+}
+
+static inline void invtlb_info(u32 op, u32 info, u64 addr)
+{
+ __asm__ __volatile__(
+ "parse_r info,%0\n\t"
+ ".word ((0x6498000) | (0 << 10) | (info << 5) | %1)\n\t"
+ :
+ : "r"(info), "i"(op)
+ :
+ );
+}
+
+static inline void invtlb_all(u32 op, u32 info, u64 addr)
+{
+ __asm__ __volatile__(
+ ".word ((0x6498000) | (0 << 10) | (0 << 5) | %0)\n\t"
+ :
+ : "i"(op)
+ :
+ );
+}
+
+/*
+ * LoongArch doesn't need any special per-pte or per-vma handling, except
+ * we need to flush cache for area to be unmapped.
+ */
+#define tlb_start_vma(tlb, vma) \
+ do { \
+ if (!(tlb)->fullmm) \
+ flush_cache_range(vma, vma->vm_start, vma->vm_end); \
+ } while (0)
+#define tlb_end_vma(tlb, vma) do { } while (0)
+#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
+
+static void tlb_flush(struct mmu_gather *tlb);
+
+#define tlb_flush tlb_flush
+#include <asm-generic/tlb.h>
+
+static inline void tlb_flush(struct mmu_gather *tlb)
+{
+ struct vm_area_struct vma;
+
+ vma.vm_mm = tlb->mm;
+ vma.vm_flags = 0;
+ if (tlb->fullmm) {
+ flush_tlb_mm(tlb->mm);
+ return;
+ }
+
+ flush_tlb_range(&vma, tlb->start, tlb->end);
+}
+
+extern void handle_tlb_load(void);
+extern void handle_tlb_store(void);
+extern void handle_tlb_modify(void);
+extern void handle_tlb_refill(void);
+extern void handle_tlb_protect(void);
+
+extern void dump_tlb_all(void);
+extern void dump_tlb_regs(void);
+
+#endif /* __ASM_TLB_H */
diff --git a/arch/loongarch/include/asm/tlbflush.h b/arch/loongarch/include/asm/tlbflush.h
new file mode 100644
index 000000000000..a0785e590681
--- /dev/null
+++ b/arch/loongarch/include/asm/tlbflush.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef __ASM_TLBFLUSH_H
+#define __ASM_TLBFLUSH_H
+
+#include <linux/mm.h>
+
+/*
+ * TLB flushing:
+ *
+ * - flush_tlb_all() flushes all processes TLB entries
+ * - flush_tlb_mm(mm) flushes the specified mm context TLB entries
+ * - flush_tlb_page(vma, vmaddr) flushes one page
+ * - flush_tlb_range(vma, start, end) flushes a range of pages
+ * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
+ */
+extern void local_flush_tlb_all(void);
+extern void local_flush_tlb_user(void);
+extern void local_flush_tlb_kernel(void);
+extern void local_flush_tlb_mm(struct mm_struct *mm);
+extern void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
+extern void local_flush_tlb_kernel_range(unsigned long start, unsigned long end);
+extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
+extern void local_flush_tlb_one(unsigned long vaddr);
+
+#ifdef CONFIG_SMP
+
+extern void flush_tlb_all(void);
+extern void flush_tlb_mm(struct mm_struct *);
+extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long, unsigned long);
+extern void flush_tlb_kernel_range(unsigned long, unsigned long);
+extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
+extern void flush_tlb_one(unsigned long vaddr);
+
+#else /* CONFIG_SMP */
+
+#define flush_tlb_all() local_flush_tlb_all()
+#define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
+#define flush_tlb_range(vma, vmaddr, end) local_flush_tlb_range(vma, vmaddr, end)
+#define flush_tlb_kernel_range(vmaddr, end) local_flush_tlb_kernel_range(vmaddr, end)
+#define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page)
+#define flush_tlb_one(vaddr) local_flush_tlb_one(vaddr)
+
+#endif /* CONFIG_SMP */
+
+#endif /* __ASM_TLBFLUSH_H */
diff --git a/arch/loongarch/include/asm/topology.h b/arch/loongarch/include/asm/topology.h
new file mode 100644
index 000000000000..66128dec0bf6
--- /dev/null
+++ b/arch/loongarch/include/asm/topology.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef __ASM_TOPOLOGY_H
+#define __ASM_TOPOLOGY_H
+
+#include <linux/smp.h>
+
+#ifdef CONFIG_NUMA
+
+extern cpumask_t cpus_on_node[];
+
+#define cpumask_of_node(node) (&cpus_on_node[node])
+
+struct pci_bus;
+extern int pcibus_to_node(struct pci_bus *);
+
+#define cpumask_of_pcibus(bus) (cpu_online_mask)
+
+extern unsigned char node_distances[MAX_NUMNODES][MAX_NUMNODES];
+
+void numa_set_distance(int from, int to, int distance);
+
+#define node_distance(from, to) (node_distances[(from)][(to)])
+
+#else
+#define pcibus_to_node(bus) 0
+#endif
+
+#ifdef CONFIG_SMP
+#define topology_physical_package_id(cpu) (cpu_data[cpu].package)
+#define topology_core_id(cpu) (cpu_data[cpu].core)
+#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
+#define topology_sibling_cpumask(cpu) (&cpu_sibling_map[cpu])
+#endif
+
+#include <asm-generic/topology.h>
+
+static inline void arch_fix_phys_package_id(int num, u32 slot) { }
+#endif /* __ASM_TOPOLOGY_H */
diff --git a/arch/loongarch/include/asm/types.h b/arch/loongarch/include/asm/types.h
new file mode 100644
index 000000000000..baf15a0dcf8b
--- /dev/null
+++ b/arch/loongarch/include/asm/types.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_TYPES_H
+#define _ASM_TYPES_H
+
+#include <asm-generic/int-ll64.h>
+#include <uapi/asm/types.h>
+
+#ifdef __ASSEMBLY__
+#define _ULCAST_
+#define _U64CAST_
+#else
+#define _ULCAST_ (unsigned long)
+#define _U64CAST_ (u64)
+#endif
+
+#endif /* _ASM_TYPES_H */
diff --git a/arch/loongarch/include/asm/uaccess.h b/arch/loongarch/include/asm/uaccess.h
new file mode 100644
index 000000000000..217c6a3727b1
--- /dev/null
+++ b/arch/loongarch/include/asm/uaccess.h
@@ -0,0 +1,269 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ *
+ * Derived from MIPS:
+ * Copyright (C) 1996, 1997, 1998, 1999, 2000, 03, 04 by Ralf Baechle
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2007 Maciej W. Rozycki
+ * Copyright (C) 2014, Imagination Technologies Ltd.
+ */
+#ifndef _ASM_UACCESS_H
+#define _ASM_UACCESS_H
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/extable.h>
+#include <asm/pgtable.h>
+#include <asm-generic/extable.h>
+#include <asm-generic/access_ok.h>
+
+extern u64 __ua_limit;
+
+#define __UA_ADDR ".dword"
+#define __UA_LA "la.abs"
+#define __UA_LIMIT __ua_limit
+
+/*
+ * get_user: - Get a simple variable from user space.
+ * @x: Variable to store result.
+ * @ptr: Source address, in user space.
+ *
+ * Context: User context only. This function may sleep if pagefaults are
+ * enabled.
+ *
+ * This macro copies a single simple variable from user space to kernel
+ * space. It supports simple types like char and int, but not larger
+ * data types like structures or arrays.
+ *
+ * @ptr must have pointer-to-simple-variable type, and the result of
+ * dereferencing @ptr must be assignable to @x without a cast.
+ *
+ * Returns zero on success, or -EFAULT on error.
+ * On error, the variable @x is set to zero.
+ */
+#define get_user(x, ptr) \
+({ \
+ const __typeof__(*(ptr)) __user *__p = (ptr); \
+ \
+ might_fault(); \
+ access_ok(__p, sizeof(*__p)) ? __get_user((x), __p) : \
+ ((x) = 0, -EFAULT); \
+})
+
+/*
+ * put_user: - Write a simple value into user space.
+ * @x: Value to copy to user space.
+ * @ptr: Destination address, in user space.
+ *
+ * Context: User context only. This function may sleep if pagefaults are
+ * enabled.
+ *
+ * This macro copies a single simple value from kernel space to user
+ * space. It supports simple types like char and int, but not larger
+ * data types like structures or arrays.
+ *
+ * @ptr must have pointer-to-simple-variable type, and @x must be assignable
+ * to the result of dereferencing @ptr.
+ *
+ * Returns zero on success, or -EFAULT on error.
+ */
+#define put_user(x, ptr) \
+({ \
+ __typeof__(*(ptr)) __user *__p = (ptr); \
+ \
+ might_fault(); \
+ access_ok(__p, sizeof(*__p)) ? __put_user((x), __p) : -EFAULT; \
+})
+
+/*
+ * __get_user: - Get a simple variable from user space, with less checking.
+ * @x: Variable to store result.
+ * @ptr: Source address, in user space.
+ *
+ * Context: User context only. This function may sleep if pagefaults are
+ * enabled.
+ *
+ * This macro copies a single simple variable from user space to kernel
+ * space. It supports simple types like char and int, but not larger
+ * data types like structures or arrays.
+ *
+ * @ptr must have pointer-to-simple-variable type, and the result of
+ * dereferencing @ptr must be assignable to @x without a cast.
+ *
+ * Caller must check the pointer with access_ok() before calling this
+ * function.
+ *
+ * Returns zero on success, or -EFAULT on error.
+ * On error, the variable @x is set to zero.
+ */
+#define __get_user(x, ptr) \
+({ \
+ int __gu_err = 0; \
+ \
+ __chk_user_ptr(ptr); \
+ __get_user_common((x), sizeof(*(ptr)), ptr); \
+ __gu_err; \
+})
+
+/*
+ * __put_user: - Write a simple value into user space, with less checking.
+ * @x: Value to copy to user space.
+ * @ptr: Destination address, in user space.
+ *
+ * Context: User context only. This function may sleep if pagefaults are
+ * enabled.
+ *
+ * This macro copies a single simple value from kernel space to user
+ * space. It supports simple types like char and int, but not larger
+ * data types like structures or arrays.
+ *
+ * @ptr must have pointer-to-simple-variable type, and @x must be assignable
+ * to the result of dereferencing @ptr.
+ *
+ * Caller must check the pointer with access_ok() before calling this
+ * function.
+ *
+ * Returns zero on success, or -EFAULT on error.
+ */
+#define __put_user(x, ptr) \
+({ \
+ int __pu_err = 0; \
+ __typeof__(*(ptr)) __pu_val; \
+ \
+ __pu_val = (x); \
+ __chk_user_ptr(ptr); \
+ __put_user_common(ptr, sizeof(*(ptr))); \
+ __pu_err; \
+})
+
+struct __large_struct { unsigned long buf[100]; };
+#define __m(x) (*(struct __large_struct __user *)(x))
+
+#define __get_user_common(val, size, ptr) \
+do { \
+ switch (size) { \
+ case 1: __get_data_asm(val, "ld.b", ptr); break; \
+ case 2: __get_data_asm(val, "ld.h", ptr); break; \
+ case 4: __get_data_asm(val, "ld.w", ptr); break; \
+ case 8: __get_data_asm(val, "ld.d", ptr); break; \
+ default: BUILD_BUG(); break; \
+ } \
+} while (0)
+
+#define __get_kernel_common(val, size, ptr) __get_user_common(val, size, ptr)
+
+#define __get_data_asm(val, insn, ptr) \
+{ \
+ long __gu_tmp; \
+ \
+ __asm__ __volatile__( \
+ "1: " insn " %1, %2 \n" \
+ "2: \n" \
+ " .section .fixup,\"ax\" \n" \
+ "3: li.w %0, %3 \n" \
+ " or %1, $r0, $r0 \n" \
+ " b 2b \n" \
+ " .previous \n" \
+ " .section __ex_table,\"a\" \n" \
+ " "__UA_ADDR "\t1b, 3b \n" \
+ " .previous \n" \
+ : "+r" (__gu_err), "=r" (__gu_tmp) \
+ : "m" (__m(ptr)), "i" (-EFAULT)); \
+ \
+ (val) = (__typeof__(*(ptr))) __gu_tmp; \
+}
+
+#define __put_user_common(ptr, size) \
+do { \
+ switch (size) { \
+ case 1: __put_data_asm("st.b", ptr); break; \
+ case 2: __put_data_asm("st.h", ptr); break; \
+ case 4: __put_data_asm("st.w", ptr); break; \
+ case 8: __put_data_asm("st.d", ptr); break; \
+ default: BUILD_BUG(); break; \
+ } \
+} while (0)
+
+#define __put_kernel_common(ptr, size) __put_user_common(ptr, size)
+
+#define __put_data_asm(insn, ptr) \
+{ \
+ __asm__ __volatile__( \
+ "1: " insn " %z2, %1 # __put_user_asm\n" \
+ "2: \n" \
+ " .section .fixup,\"ax\" \n" \
+ "3: li.w %0, %3 \n" \
+ " b 2b \n" \
+ " .previous \n" \
+ " .section __ex_table,\"a\" \n" \
+ " " __UA_ADDR " 1b, 3b \n" \
+ " .previous \n" \
+ : "+r" (__pu_err), "=m" (__m(ptr)) \
+ : "Jr" (__pu_val), "i" (-EFAULT)); \
+}
+
+#define __get_kernel_nofault(dst, src, type, err_label) \
+do { \
+ int __gu_err = 0; \
+ \
+ __get_kernel_common(*((type *)(dst)), sizeof(type), \
+ (__force type *)(src)); \
+ if (unlikely(__gu_err)) \
+ goto err_label; \
+} while (0)
+
+#define __put_kernel_nofault(dst, src, type, err_label) \
+do { \
+ type __pu_val; \
+ int __pu_err = 0; \
+ \
+ __pu_val = *(__force type *)(src); \
+ __put_kernel_common(((type *)(dst)), sizeof(type)); \
+ if (unlikely(__pu_err)) \
+ goto err_label; \
+} while (0)
+
+extern unsigned long __copy_user(void *to, const void *from, __kernel_size_t n);
+
+static inline unsigned long __must_check
+raw_copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ return __copy_user(to, from, n);
+}
+
+static inline unsigned long __must_check
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ return __copy_user(to, from, n);
+}
+
+#define INLINE_COPY_FROM_USER
+#define INLINE_COPY_TO_USER
+
+/*
+ * __clear_user: - Zero a block of memory in user space, with less checking.
+ * @addr: Destination address, in user space.
+ * @size: Number of bytes to zero.
+ *
+ * Zero a block of memory in user space. Caller must check
+ * the specified block with access_ok() before calling this function.
+ *
+ * Returns number of bytes that could not be cleared.
+ * On success, this will be zero.
+ */
+extern unsigned long __clear_user(void __user *addr, __kernel_size_t size);
+
+#define clear_user(addr, n) \
+({ \
+ void __user *__cl_addr = (addr); \
+ unsigned long __cl_size = (n); \
+ if (__cl_size && access_ok(__cl_addr, __cl_size)) \
+ __cl_size = __clear_user(__cl_addr, __cl_size); \
+ __cl_size; \
+})
+
+extern long strncpy_from_user(char *to, const char __user *from, long n);
+extern long strnlen_user(const char __user *str, long n);
+
+#endif /* _ASM_UACCESS_H */
diff --git a/arch/loongarch/include/asm/unistd.h b/arch/loongarch/include/asm/unistd.h
new file mode 100644
index 000000000000..cfddb0116a8c
--- /dev/null
+++ b/arch/loongarch/include/asm/unistd.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Author: Hanlu Li <lihanlu@loongson.cn>
+ * Huacai Chen <chenhuacai@loongson.cn>
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#include <uapi/asm/unistd.h>
+
+#define NR_syscalls (__NR_syscalls)
diff --git a/arch/loongarch/include/asm/vdso.h b/arch/loongarch/include/asm/vdso.h
new file mode 100644
index 000000000000..8f8a0f9a4953
--- /dev/null
+++ b/arch/loongarch/include/asm/vdso.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Author: Huacai Chen <chenhuacai@loongson.cn>
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#ifndef __ASM_VDSO_H
+#define __ASM_VDSO_H
+
+#include <linux/mm_types.h>
+#include <vdso/datapage.h>
+
+#include <asm/barrier.h>
+
+/*
+ * struct loongarch_vdso_info - Details of a VDSO image.
+ * @vdso: Pointer to VDSO image (page-aligned).
+ * @size: Size of the VDSO image (page-aligned).
+ * @off_rt_sigreturn: Offset of the rt_sigreturn() trampoline.
+ * @code_mapping: Special mapping structure for vdso code.
+ * @code_mapping: Special mapping structure for vdso data.
+ *
+ * This structure contains details of a VDSO image, including the image data
+ * and offsets of certain symbols required by the kernel. It is generated as
+ * part of the VDSO build process, aside from the mapping page array, which is
+ * populated at runtime.
+ */
+struct loongarch_vdso_info {
+ void *vdso;
+ unsigned long size;
+ unsigned long offset_sigreturn;
+ struct vm_special_mapping code_mapping;
+ struct vm_special_mapping data_mapping;
+};
+
+extern struct loongarch_vdso_info vdso_info;
+
+#endif /* __ASM_VDSO_H */
diff --git a/arch/loongarch/include/asm/vdso/clocksource.h b/arch/loongarch/include/asm/vdso/clocksource.h
new file mode 100644
index 000000000000..13cd580d406d
--- /dev/null
+++ b/arch/loongarch/include/asm/vdso/clocksource.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef __ASM_VDSOCLOCKSOURCE_H
+#define __ASM_VDSOCLOCKSOURCE_H
+
+#define VDSO_ARCH_CLOCKMODES \
+ VDSO_CLOCKMODE_CPU
+
+#endif /* __ASM_VDSOCLOCKSOURCE_H */
diff --git a/arch/loongarch/include/asm/vdso/gettimeofday.h b/arch/loongarch/include/asm/vdso/gettimeofday.h
new file mode 100644
index 000000000000..7b2cd37641e2
--- /dev/null
+++ b/arch/loongarch/include/asm/vdso/gettimeofday.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Author: Huacai Chen <chenhuacai@loongson.cn>
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef __ASM_VDSO_GETTIMEOFDAY_H
+#define __ASM_VDSO_GETTIMEOFDAY_H
+
+#ifndef __ASSEMBLY__
+
+#include <asm/unistd.h>
+#include <asm/vdso/vdso.h>
+
+#define VDSO_HAS_CLOCK_GETRES 1
+
+static __always_inline long gettimeofday_fallback(
+ struct __kernel_old_timeval *_tv,
+ struct timezone *_tz)
+{
+ register struct __kernel_old_timeval *tv asm("a0") = _tv;
+ register struct timezone *tz asm("a1") = _tz;
+ register long nr asm("a7") = __NR_gettimeofday;
+ register long ret asm("a0");
+
+ asm volatile(
+ " syscall 0\n"
+ : "+r" (ret)
+ : "r" (nr), "r" (tv), "r" (tz)
+ : "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7",
+ "$t8", "memory");
+
+ return ret;
+}
+
+static __always_inline long clock_gettime_fallback(
+ clockid_t _clkid,
+ struct __kernel_timespec *_ts)
+{
+ register clockid_t clkid asm("a0") = _clkid;
+ register struct __kernel_timespec *ts asm("a1") = _ts;
+ register long nr asm("a7") = __NR_clock_gettime;
+ register long ret asm("a0");
+
+ asm volatile(
+ " syscall 0\n"
+ : "+r" (ret)
+ : "r" (nr), "r" (clkid), "r" (ts)
+ : "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7",
+ "$t8", "memory");
+
+ return ret;
+}
+
+static __always_inline int clock_getres_fallback(
+ clockid_t _clkid,
+ struct __kernel_timespec *_ts)
+{
+ register clockid_t clkid asm("a0") = _clkid;
+ register struct __kernel_timespec *ts asm("a1") = _ts;
+ register long nr asm("a7") = __NR_clock_getres;
+ register long ret asm("a0");
+
+ asm volatile(
+ " syscall 0\n"
+ : "+r" (ret)
+ : "r" (nr), "r" (clkid), "r" (ts)
+ : "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7",
+ "$t8", "memory");
+
+ return ret;
+}
+
+static __always_inline u64 __arch_get_hw_counter(s32 clock_mode,
+ const struct vdso_data *vd)
+{
+ uint64_t count;
+
+ __asm__ __volatile__(
+ " rdtime.d %0, $zero\n"
+ : "=r" (count));
+
+ return count;
+}
+
+static inline bool loongarch_vdso_hres_capable(void)
+{
+ return true;
+}
+#define __arch_vdso_hres_capable loongarch_vdso_hres_capable
+
+static __always_inline const struct vdso_data *__arch_get_vdso_data(void)
+{
+ return get_vdso_data();
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_GETTIMEOFDAY_H */
diff --git a/arch/loongarch/include/asm/vdso/processor.h b/arch/loongarch/include/asm/vdso/processor.h
new file mode 100644
index 000000000000..ef5770b343a0
--- /dev/null
+++ b/arch/loongarch/include/asm/vdso/processor.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef __ASM_VDSO_PROCESSOR_H
+#define __ASM_VDSO_PROCESSOR_H
+
+#ifndef __ASSEMBLY__
+
+#define cpu_relax() barrier()
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_PROCESSOR_H */
diff --git a/arch/loongarch/include/asm/vdso/vdso.h b/arch/loongarch/include/asm/vdso/vdso.h
new file mode 100644
index 000000000000..5a01643a65b3
--- /dev/null
+++ b/arch/loongarch/include/asm/vdso/vdso.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Author: Huacai Chen <chenhuacai@loongson.cn>
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#ifndef __ASSEMBLY__
+
+#include <asm/asm.h>
+#include <asm/page.h>
+
+static inline unsigned long get_vdso_base(void)
+{
+ unsigned long addr;
+
+ __asm__(
+ " la.pcrel %0, _start\n"
+ : "=r" (addr)
+ :
+ :);
+
+ return addr;
+}
+
+static inline const struct vdso_data *get_vdso_data(void)
+{
+ return (const struct vdso_data *)(get_vdso_base() - PAGE_SIZE);
+}
+
+#endif /* __ASSEMBLY__ */
diff --git a/arch/loongarch/include/asm/vdso/vsyscall.h b/arch/loongarch/include/asm/vdso/vsyscall.h
new file mode 100644
index 000000000000..5de615383a22
--- /dev/null
+++ b/arch/loongarch/include/asm/vdso/vsyscall.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_VDSO_VSYSCALL_H
+#define __ASM_VDSO_VSYSCALL_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/timekeeper_internal.h>
+#include <vdso/datapage.h>
+
+extern struct vdso_data *vdso_data;
+
+/*
+ * Update the vDSO data page to keep in sync with kernel timekeeping.
+ */
+static __always_inline
+struct vdso_data *__loongarch_get_k_vdso_data(void)
+{
+ return vdso_data;
+}
+#define __arch_get_k_vdso_data __loongarch_get_k_vdso_data
+
+/* The asm-generic header needs to be included after the definitions above */
+#include <asm-generic/vdso/vsyscall.h>
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_VSYSCALL_H */
diff --git a/arch/loongarch/include/asm/vermagic.h b/arch/loongarch/include/asm/vermagic.h
new file mode 100644
index 000000000000..8b47ccfe3aad
--- /dev/null
+++ b/arch/loongarch/include/asm/vermagic.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_VERMAGIC_H
+#define _ASM_VERMAGIC_H
+
+#define MODULE_PROC_FAMILY "LOONGARCH "
+
+#ifdef CONFIG_32BIT
+#define MODULE_KERNEL_TYPE "32BIT "
+#elif defined CONFIG_64BIT
+#define MODULE_KERNEL_TYPE "64BIT "
+#endif
+
+#define MODULE_ARCH_VERMAGIC \
+ MODULE_PROC_FAMILY MODULE_KERNEL_TYPE
+
+#endif /* _ASM_VERMAGIC_H */
diff --git a/arch/loongarch/include/asm/vmalloc.h b/arch/loongarch/include/asm/vmalloc.h
new file mode 100644
index 000000000000..965a0d41ac2d
--- /dev/null
+++ b/arch/loongarch/include/asm/vmalloc.h
@@ -0,0 +1,4 @@
+#ifndef _ASM_LOONGARCH_VMALLOC_H
+#define _ASM_LOONGARCH_VMALLOC_H
+
+#endif /* _ASM_LOONGARCH_VMALLOC_H */
diff --git a/drivers/staging/vme/Makefile b/arch/loongarch/include/uapi/asm/Kbuild
index cf2f686ccffe..4aa680ca2e5f 100644
--- a/drivers/staging/vme/Makefile
+++ b/arch/loongarch/include/uapi/asm/Kbuild
@@ -1,2 +1,2 @@
# SPDX-License-Identifier: GPL-2.0
-obj-y += devices/
+generic-y += kvm_para.h
diff --git a/arch/loongarch/include/uapi/asm/auxvec.h b/arch/loongarch/include/uapi/asm/auxvec.h
new file mode 100644
index 000000000000..922d9e6b5058
--- /dev/null
+++ b/arch/loongarch/include/uapi/asm/auxvec.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+/*
+ * Author: Hanlu Li <lihanlu@loongson.cn>
+ * Huacai Chen <chenhuacai@loongson.cn>
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#ifndef __ASM_AUXVEC_H
+#define __ASM_AUXVEC_H
+
+/* Location of VDSO image. */
+#define AT_SYSINFO_EHDR 33
+
+#define AT_VECTOR_SIZE_ARCH 1 /* entries in ARCH_DLINFO */
+
+#endif /* __ASM_AUXVEC_H */
diff --git a/arch/loongarch/include/uapi/asm/bitsperlong.h b/arch/loongarch/include/uapi/asm/bitsperlong.h
new file mode 100644
index 000000000000..00b4ba1e5cdf
--- /dev/null
+++ b/arch/loongarch/include/uapi/asm/bitsperlong.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef __ASM_LOONGARCH_BITSPERLONG_H
+#define __ASM_LOONGARCH_BITSPERLONG_H
+
+#define __BITS_PER_LONG (__SIZEOF_LONG__ * 8)
+
+#include <asm-generic/bitsperlong.h>
+
+#endif /* __ASM_LOONGARCH_BITSPERLONG_H */
diff --git a/arch/loongarch/include/uapi/asm/break.h b/arch/loongarch/include/uapi/asm/break.h
new file mode 100644
index 000000000000..bb9b82ba59f2
--- /dev/null
+++ b/arch/loongarch/include/uapi/asm/break.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef __UAPI_ASM_BREAK_H
+#define __UAPI_ASM_BREAK_H
+
+#define BRK_DEFAULT 0 /* Used as default */
+#define BRK_BUG 1 /* Used by BUG() */
+#define BRK_KDB 2 /* Used in KDB_ENTER() */
+#define BRK_MATHEMU 3 /* Used by FPU emulator */
+#define BRK_USERBP 4 /* User bp (used by debuggers) */
+#define BRK_SSTEPBP 5 /* User bp (used by debuggers) */
+#define BRK_OVERFLOW 6 /* Overflow check */
+#define BRK_DIVZERO 7 /* Divide by zero check */
+#define BRK_RANGE 8 /* Range error check */
+#define BRK_MULOVFL 9 /* Multiply overflow */
+#define BRK_KPROBE_BP 10 /* Kprobe break */
+#define BRK_KPROBE_SSTEPBP 11 /* Kprobe single step break */
+#define BRK_UPROBE_BP 12 /* See <asm/uprobes.h> */
+#define BRK_UPROBE_XOLBP 13 /* See <asm/uprobes.h> */
+
+#endif /* __UAPI_ASM_BREAK_H */
diff --git a/arch/loongarch/include/uapi/asm/byteorder.h b/arch/loongarch/include/uapi/asm/byteorder.h
new file mode 100644
index 000000000000..b1722d890deb
--- /dev/null
+++ b/arch/loongarch/include/uapi/asm/byteorder.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+/*
+ * Author: Hanlu Li <lihanlu@loongson.cn>
+ * Huacai Chen <chenhuacai@loongson.cn>
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_BYTEORDER_H
+#define _ASM_BYTEORDER_H
+
+#include <linux/byteorder/little_endian.h>
+
+#endif /* _ASM_BYTEORDER_H */
diff --git a/arch/loongarch/include/uapi/asm/hwcap.h b/arch/loongarch/include/uapi/asm/hwcap.h
new file mode 100644
index 000000000000..8840b72fa8e8
--- /dev/null
+++ b/arch/loongarch/include/uapi/asm/hwcap.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI_ASM_HWCAP_H
+#define _UAPI_ASM_HWCAP_H
+
+/* HWCAP flags */
+#define HWCAP_LOONGARCH_CPUCFG (1 << 0)
+#define HWCAP_LOONGARCH_LAM (1 << 1)
+#define HWCAP_LOONGARCH_UAL (1 << 2)
+#define HWCAP_LOONGARCH_FPU (1 << 3)
+#define HWCAP_LOONGARCH_LSX (1 << 4)
+#define HWCAP_LOONGARCH_LASX (1 << 5)
+#define HWCAP_LOONGARCH_CRC32 (1 << 6)
+#define HWCAP_LOONGARCH_COMPLEX (1 << 7)
+#define HWCAP_LOONGARCH_CRYPTO (1 << 8)
+#define HWCAP_LOONGARCH_LVZ (1 << 9)
+#define HWCAP_LOONGARCH_LBT_X86 (1 << 10)
+#define HWCAP_LOONGARCH_LBT_ARM (1 << 11)
+#define HWCAP_LOONGARCH_LBT_MIPS (1 << 12)
+
+#endif /* _UAPI_ASM_HWCAP_H */
diff --git a/arch/loongarch/include/uapi/asm/ptrace.h b/arch/loongarch/include/uapi/asm/ptrace.h
new file mode 100644
index 000000000000..083193f4a5d5
--- /dev/null
+++ b/arch/loongarch/include/uapi/asm/ptrace.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+/*
+ * Author: Hanlu Li <lihanlu@loongson.cn>
+ * Huacai Chen <chenhuacai@loongson.cn>
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _UAPI_ASM_PTRACE_H
+#define _UAPI_ASM_PTRACE_H
+
+#include <linux/types.h>
+
+#ifndef __KERNEL__
+#include <stdint.h>
+#endif
+
+/*
+ * For PTRACE_{POKE,PEEK}USR. 0 - 31 are GPRs,
+ * 32 is syscall's original ARG0, 33 is PC, 34 is BADVADDR.
+ */
+#define GPR_BASE 0
+#define GPR_NUM 32
+#define GPR_END (GPR_BASE + GPR_NUM - 1)
+#define ARG0 (GPR_END + 1)
+#define PC (GPR_END + 2)
+#define BADVADDR (GPR_END + 3)
+
+#define NUM_FPU_REGS 32
+
+struct user_pt_regs {
+ /* Main processor registers. */
+ unsigned long regs[32];
+
+ /* Original syscall arg0. */
+ unsigned long orig_a0;
+
+ /* Special CSR registers. */
+ unsigned long csr_era;
+ unsigned long csr_badv;
+ unsigned long reserved[10];
+} __attribute__((aligned(8)));
+
+struct user_fp_state {
+ uint64_t fpr[32];
+ uint64_t fcc;
+ uint32_t fcsr;
+};
+
+#define PTRACE_SYSEMU 0x1f
+#define PTRACE_SYSEMU_SINGLESTEP 0x20
+
+#endif /* _UAPI_ASM_PTRACE_H */
diff --git a/arch/loongarch/include/uapi/asm/reg.h b/arch/loongarch/include/uapi/asm/reg.h
new file mode 100644
index 000000000000..90ad910c60eb
--- /dev/null
+++ b/arch/loongarch/include/uapi/asm/reg.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Various register offset definitions for debuggers, core file
+ * examiners and whatnot.
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#ifndef __UAPI_ASM_LOONGARCH_REG_H
+#define __UAPI_ASM_LOONGARCH_REG_H
+
+#define LOONGARCH_EF_R0 0
+#define LOONGARCH_EF_R1 1
+#define LOONGARCH_EF_R2 2
+#define LOONGARCH_EF_R3 3
+#define LOONGARCH_EF_R4 4
+#define LOONGARCH_EF_R5 5
+#define LOONGARCH_EF_R6 6
+#define LOONGARCH_EF_R7 7
+#define LOONGARCH_EF_R8 8
+#define LOONGARCH_EF_R9 9
+#define LOONGARCH_EF_R10 10
+#define LOONGARCH_EF_R11 11
+#define LOONGARCH_EF_R12 12
+#define LOONGARCH_EF_R13 13
+#define LOONGARCH_EF_R14 14
+#define LOONGARCH_EF_R15 15
+#define LOONGARCH_EF_R16 16
+#define LOONGARCH_EF_R17 17
+#define LOONGARCH_EF_R18 18
+#define LOONGARCH_EF_R19 19
+#define LOONGARCH_EF_R20 20
+#define LOONGARCH_EF_R21 21
+#define LOONGARCH_EF_R22 22
+#define LOONGARCH_EF_R23 23
+#define LOONGARCH_EF_R24 24
+#define LOONGARCH_EF_R25 25
+#define LOONGARCH_EF_R26 26
+#define LOONGARCH_EF_R27 27
+#define LOONGARCH_EF_R28 28
+#define LOONGARCH_EF_R29 29
+#define LOONGARCH_EF_R30 30
+#define LOONGARCH_EF_R31 31
+
+/*
+ * Saved special registers
+ */
+#define LOONGARCH_EF_ORIG_A0 32
+#define LOONGARCH_EF_CSR_ERA 33
+#define LOONGARCH_EF_CSR_BADV 34
+#define LOONGARCH_EF_CSR_CRMD 35
+#define LOONGARCH_EF_CSR_PRMD 36
+#define LOONGARCH_EF_CSR_EUEN 37
+#define LOONGARCH_EF_CSR_ECFG 38
+#define LOONGARCH_EF_CSR_ESTAT 39
+
+#define LOONGARCH_EF_SIZE 320 /* size in bytes */
+
+#endif /* __UAPI_ASM_LOONGARCH_REG_H */
diff --git a/arch/loongarch/include/uapi/asm/sigcontext.h b/arch/loongarch/include/uapi/asm/sigcontext.h
new file mode 100644
index 000000000000..52e49b8bf4be
--- /dev/null
+++ b/arch/loongarch/include/uapi/asm/sigcontext.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+/*
+ * Author: Hanlu Li <lihanlu@loongson.cn>
+ * Huacai Chen <chenhuacai@loongson.cn>
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _UAPI_ASM_SIGCONTEXT_H
+#define _UAPI_ASM_SIGCONTEXT_H
+
+#include <linux/types.h>
+#include <linux/posix_types.h>
+
+/* FP context was used */
+#define SC_USED_FP (1 << 0)
+/* Address error was due to memory load */
+#define SC_ADDRERR_RD (1 << 30)
+/* Address error was due to memory store */
+#define SC_ADDRERR_WR (1 << 31)
+
+struct sigcontext {
+ __u64 sc_pc;
+ __u64 sc_regs[32];
+ __u32 sc_flags;
+ __u64 sc_extcontext[0] __attribute__((__aligned__(16)));
+};
+
+#define CONTEXT_INFO_ALIGN 16
+struct sctx_info {
+ __u32 magic;
+ __u32 size;
+ __u64 padding; /* padding to 16 bytes */
+};
+
+/* FPU context */
+#define FPU_CTX_MAGIC 0x46505501
+#define FPU_CTX_ALIGN 8
+struct fpu_context {
+ __u64 regs[32];
+ __u64 fcc;
+ __u32 fcsr;
+};
+
+#endif /* _UAPI_ASM_SIGCONTEXT_H */
diff --git a/arch/loongarch/include/uapi/asm/signal.h b/arch/loongarch/include/uapi/asm/signal.h
new file mode 100644
index 000000000000..992d965aa13f
--- /dev/null
+++ b/arch/loongarch/include/uapi/asm/signal.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _UAPI_ASM_SIGNAL_H
+#define _UAPI_ASM_SIGNAL_H
+
+#define MINSIGSTKSZ 4096
+#define SIGSTKSZ 16384
+
+#include <asm-generic/signal.h>
+
+#endif
diff --git a/arch/loongarch/include/uapi/asm/ucontext.h b/arch/loongarch/include/uapi/asm/ucontext.h
new file mode 100644
index 000000000000..12577e22b1c7
--- /dev/null
+++ b/arch/loongarch/include/uapi/asm/ucontext.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef __LOONGARCH_UAPI_ASM_UCONTEXT_H
+#define __LOONGARCH_UAPI_ASM_UCONTEXT_H
+
+/**
+ * struct ucontext - user context structure
+ * @uc_flags:
+ * @uc_link:
+ * @uc_stack:
+ * @uc_mcontext: holds basic processor state
+ * @uc_sigmask:
+ * @uc_extcontext: holds extended processor state
+ */
+struct ucontext {
+ unsigned long uc_flags;
+ struct ucontext *uc_link;
+ stack_t uc_stack;
+ sigset_t uc_sigmask;
+ /* There's some padding here to allow sigset_t to be expanded in the
+ * future. Though this is unlikely, other architectures put uc_sigmask
+ * at the end of this structure and explicitly state it can be
+ * expanded, so we didn't want to box ourselves in here. */
+ __u8 __unused[1024 / 8 - sizeof(sigset_t)];
+ /* We can't put uc_sigmask at the end of this structure because we need
+ * to be able to expand sigcontext in the future. For example, the
+ * vector ISA extension will almost certainly add ISA state. We want
+ * to ensure all user-visible ISA state can be saved and restored via a
+ * ucontext, so we're putting this at the end in order to allow for
+ * infinite extensibility. Since we know this will be extended and we
+ * assume sigset_t won't be extended an extreme amount, we're
+ * prioritizing this. */
+ struct sigcontext uc_mcontext;
+};
+
+#endif /* __LOONGARCH_UAPI_ASM_UCONTEXT_H */
diff --git a/arch/loongarch/include/uapi/asm/unistd.h b/arch/loongarch/include/uapi/asm/unistd.h
new file mode 100644
index 000000000000..fcb668984f03
--- /dev/null
+++ b/arch/loongarch/include/uapi/asm/unistd.h
@@ -0,0 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#define __ARCH_WANT_SYS_CLONE
+#define __ARCH_WANT_SYS_CLONE3
+
+#include <asm-generic/unistd.h>
diff --git a/arch/loongarch/kernel/.gitignore b/arch/loongarch/kernel/.gitignore
new file mode 100644
index 000000000000..bbb90f92d051
--- /dev/null
+++ b/arch/loongarch/kernel/.gitignore
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+vmlinux.lds
diff --git a/arch/loongarch/kernel/Makefile b/arch/loongarch/kernel/Makefile
new file mode 100644
index 000000000000..940de9173542
--- /dev/null
+++ b/arch/loongarch/kernel/Makefile
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the Linux/LoongArch kernel.
+#
+
+extra-y := head.o vmlinux.lds
+
+obj-y += cpu-probe.o cacheinfo.o env.o setup.o entry.o genex.o \
+ traps.o irq.o idle.o process.o dma.o mem.o io.o reset.o switch.o \
+ elf.o syscall.o signal.o time.o topology.o inst.o ptrace.o vdso.o
+
+obj-$(CONFIG_ACPI) += acpi.o
+obj-$(CONFIG_EFI) += efi.o
+
+obj-$(CONFIG_CPU_HAS_FPU) += fpu.o
+
+obj-$(CONFIG_MODULES) += module.o module-sections.o
+
+obj-$(CONFIG_PROC_FS) += proc.o
+
+obj-$(CONFIG_SMP) += smp.o
+
+obj-$(CONFIG_NUMA) += numa.o
+
+CPPFLAGS_vmlinux.lds := $(KBUILD_CFLAGS)
diff --git a/arch/loongarch/kernel/access-helper.h b/arch/loongarch/kernel/access-helper.h
new file mode 100644
index 000000000000..4a35ca81bd08
--- /dev/null
+++ b/arch/loongarch/kernel/access-helper.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <linux/uaccess.h>
+
+static inline int __get_inst(u32 *i, u32 *p, bool user)
+{
+ return user ? get_user(*i, (u32 __user *)p) : get_kernel_nofault(*i, p);
+}
+
+static inline int __get_addr(unsigned long *a, unsigned long *p, bool user)
+{
+ return user ? get_user(*a, (unsigned long __user *)p) : get_kernel_nofault(*a, p);
+}
diff --git a/arch/loongarch/kernel/acpi.c b/arch/loongarch/kernel/acpi.c
new file mode 100644
index 000000000000..b16c3dea5eeb
--- /dev/null
+++ b/arch/loongarch/kernel/acpi.c
@@ -0,0 +1,333 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * acpi.c - Architecture-Specific Low-Level ACPI Boot Support
+ *
+ * Author: Jianmin Lv <lvjianmin@loongson.cn>
+ * Huacai Chen <chenhuacai@loongson.cn>
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#include <linux/init.h>
+#include <linux/acpi.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/memblock.h>
+#include <linux/serial_core.h>
+#include <asm/io.h>
+#include <asm/numa.h>
+#include <asm/loongson.h>
+
+int acpi_disabled;
+EXPORT_SYMBOL(acpi_disabled);
+int acpi_noirq;
+int acpi_pci_disabled;
+EXPORT_SYMBOL(acpi_pci_disabled);
+int acpi_strict = 1; /* We have no workarounds on LoongArch */
+int num_processors;
+int disabled_cpus;
+enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_PLATFORM;
+
+u64 acpi_saved_sp;
+
+#define MAX_CORE_PIC 256
+
+#define PREFIX "ACPI: "
+
+int acpi_gsi_to_irq(u32 gsi, unsigned int *irqp)
+{
+ if (irqp != NULL)
+ *irqp = acpi_register_gsi(NULL, gsi, -1, -1);
+ return (*irqp >= 0) ? 0 : -EINVAL;
+}
+EXPORT_SYMBOL_GPL(acpi_gsi_to_irq);
+
+int acpi_isa_irq_to_gsi(unsigned int isa_irq, u32 *gsi)
+{
+ if (gsi)
+ *gsi = isa_irq;
+ return 0;
+}
+
+/*
+ * success: return IRQ number (>=0)
+ * failure: return < 0
+ */
+int acpi_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity)
+{
+ struct irq_fwspec fwspec;
+
+ switch (gsi) {
+ case GSI_MIN_CPU_IRQ ... GSI_MAX_CPU_IRQ:
+ fwspec.fwnode = liointc_domain->fwnode;
+ fwspec.param[0] = gsi - GSI_MIN_CPU_IRQ;
+ fwspec.param_count = 1;
+
+ return irq_create_fwspec_mapping(&fwspec);
+
+ case GSI_MIN_LPC_IRQ ... GSI_MAX_LPC_IRQ:
+ if (!pch_lpc_domain)
+ return -EINVAL;
+
+ fwspec.fwnode = pch_lpc_domain->fwnode;
+ fwspec.param[0] = gsi - GSI_MIN_LPC_IRQ;
+ fwspec.param[1] = acpi_dev_get_irq_type(trigger, polarity);
+ fwspec.param_count = 2;
+
+ return irq_create_fwspec_mapping(&fwspec);
+
+ case GSI_MIN_PCH_IRQ ... GSI_MAX_PCH_IRQ:
+ if (!pch_pic_domain[0])
+ return -EINVAL;
+
+ fwspec.fwnode = pch_pic_domain[0]->fwnode;
+ fwspec.param[0] = gsi - GSI_MIN_PCH_IRQ;
+ fwspec.param[1] = IRQ_TYPE_LEVEL_HIGH;
+ fwspec.param_count = 2;
+
+ return irq_create_fwspec_mapping(&fwspec);
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(acpi_register_gsi);
+
+void acpi_unregister_gsi(u32 gsi)
+{
+
+}
+EXPORT_SYMBOL_GPL(acpi_unregister_gsi);
+
+void __init __iomem * __acpi_map_table(unsigned long phys, unsigned long size)
+{
+
+ if (!phys || !size)
+ return NULL;
+
+ return early_memremap(phys, size);
+}
+void __init __acpi_unmap_table(void __iomem *map, unsigned long size)
+{
+ if (!map || !size)
+ return;
+
+ early_memunmap(map, size);
+}
+
+void __init __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
+{
+ if (!memblock_is_memory(phys))
+ return ioremap(phys, size);
+ else
+ return ioremap_cache(phys, size);
+}
+
+void __init acpi_boot_table_init(void)
+{
+ /*
+ * If acpi_disabled, bail out
+ */
+ if (acpi_disabled)
+ return;
+
+ /*
+ * Initialize the ACPI boot-time table parser.
+ */
+ if (acpi_table_init()) {
+ disable_acpi();
+ return;
+ }
+}
+
+static int set_processor_mask(u32 id, u32 flags)
+{
+
+ int cpu, cpuid = id;
+
+ if (num_processors >= nr_cpu_ids) {
+ pr_warn(PREFIX "nr_cpus/possible_cpus limit of %i reached."
+ " processor 0x%x ignored.\n", nr_cpu_ids, cpuid);
+
+ return -ENODEV;
+
+ }
+ if (cpuid == loongson_sysconf.boot_cpu_id)
+ cpu = 0;
+ else
+ cpu = cpumask_next_zero(-1, cpu_present_mask);
+
+ if (flags & ACPI_MADT_ENABLED) {
+ num_processors++;
+ set_cpu_possible(cpu, true);
+ set_cpu_present(cpu, true);
+ __cpu_number_map[cpuid] = cpu;
+ __cpu_logical_map[cpu] = cpuid;
+ } else
+ disabled_cpus++;
+
+ return cpu;
+}
+
+static void __init acpi_process_madt(void)
+{
+ int i;
+
+ for (i = 0; i < NR_CPUS; i++) {
+ __cpu_number_map[i] = -1;
+ __cpu_logical_map[i] = -1;
+ }
+
+ loongson_sysconf.nr_cpus = num_processors;
+}
+
+int __init acpi_boot_init(void)
+{
+ /*
+ * If acpi_disabled, bail out
+ */
+ if (acpi_disabled)
+ return -1;
+
+ loongson_sysconf.boot_cpu_id = read_csr_cpuid();
+
+ /*
+ * Process the Multiple APIC Description Table (MADT), if present
+ */
+ acpi_process_madt();
+
+ /* Do not enable ACPI SPCR console by default */
+ acpi_parse_spcr(earlycon_acpi_spcr_enable, false);
+
+ return 0;
+}
+
+#ifdef CONFIG_ACPI_NUMA
+
+static __init int setup_node(int pxm)
+{
+ return acpi_map_pxm_to_node(pxm);
+}
+
+/*
+ * Callback for SLIT parsing. pxm_to_node() returns NUMA_NO_NODE for
+ * I/O localities since SRAT does not list them. I/O localities are
+ * not supported at this point.
+ */
+unsigned int numa_distance_cnt;
+
+static inline unsigned int get_numa_distances_cnt(struct acpi_table_slit *slit)
+{
+ return slit->locality_count;
+}
+
+void __init numa_set_distance(int from, int to, int distance)
+{
+ if ((u8)distance != distance || (from == to && distance != LOCAL_DISTANCE)) {
+ pr_warn_once("Warning: invalid distance parameter, from=%d to=%d distance=%d\n",
+ from, to, distance);
+ return;
+ }
+
+ node_distances[from][to] = distance;
+}
+
+/* Callback for Proximity Domain -> CPUID mapping */
+void __init
+acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
+{
+ int pxm, node;
+
+ if (srat_disabled())
+ return;
+ if (pa->header.length != sizeof(struct acpi_srat_cpu_affinity)) {
+ bad_srat();
+ return;
+ }
+ if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0)
+ return;
+ pxm = pa->proximity_domain_lo;
+ if (acpi_srat_revision >= 2) {
+ pxm |= (pa->proximity_domain_hi[0] << 8);
+ pxm |= (pa->proximity_domain_hi[1] << 16);
+ pxm |= (pa->proximity_domain_hi[2] << 24);
+ }
+ node = setup_node(pxm);
+ if (node < 0) {
+ pr_err("SRAT: Too many proximity domains %x\n", pxm);
+ bad_srat();
+ return;
+ }
+
+ if (pa->apic_id >= CONFIG_NR_CPUS) {
+ pr_info("SRAT: PXM %u -> CPU 0x%02x -> Node %u skipped apicid that is too big\n",
+ pxm, pa->apic_id, node);
+ return;
+ }
+
+ early_numa_add_cpu(pa->apic_id, node);
+
+ set_cpuid_to_node(pa->apic_id, node);
+ node_set(node, numa_nodes_parsed);
+ pr_info("SRAT: PXM %u -> CPU 0x%02x -> Node %u\n", pxm, pa->apic_id, node);
+}
+
+void __init acpi_numa_arch_fixup(void) {}
+#endif
+
+void __init arch_reserve_mem_area(acpi_physical_address addr, size_t size)
+{
+ memblock_reserve(addr, size);
+}
+
+#ifdef CONFIG_ACPI_HOTPLUG_CPU
+
+#include <acpi/processor.h>
+
+static int __ref acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
+{
+#ifdef CONFIG_ACPI_NUMA
+ int nid;
+
+ nid = acpi_get_node(handle);
+ if (nid != NUMA_NO_NODE) {
+ set_cpuid_to_node(physid, nid);
+ node_set(nid, numa_nodes_parsed);
+ set_cpu_numa_node(cpu, nid);
+ cpumask_set_cpu(cpu, cpumask_of_node(nid));
+ }
+#endif
+ return 0;
+}
+
+int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id, int *pcpu)
+{
+ int cpu;
+
+ cpu = set_processor_mask(physid, ACPI_MADT_ENABLED);
+ if (cpu < 0) {
+ pr_info(PREFIX "Unable to map lapic to logical cpu number\n");
+ return cpu;
+ }
+
+ acpi_map_cpu2node(handle, cpu, physid);
+
+ *pcpu = cpu;
+
+ return 0;
+}
+EXPORT_SYMBOL(acpi_map_cpu);
+
+int acpi_unmap_cpu(int cpu)
+{
+#ifdef CONFIG_ACPI_NUMA
+ set_cpuid_to_node(cpu_logical_map(cpu), NUMA_NO_NODE);
+#endif
+ set_cpu_present(cpu, false);
+ num_processors--;
+
+ pr_info("cpu%d hot remove!\n", cpu);
+
+ return 0;
+}
+EXPORT_SYMBOL(acpi_unmap_cpu);
+
+#endif /* CONFIG_ACPI_HOTPLUG_CPU */
diff --git a/arch/loongarch/kernel/asm-offsets.c b/arch/loongarch/kernel/asm-offsets.c
new file mode 100644
index 000000000000..bfb65eb2844f
--- /dev/null
+++ b/arch/loongarch/kernel/asm-offsets.c
@@ -0,0 +1,264 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * asm-offsets.c: Calculate pt_regs and task_struct offsets.
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/kbuild.h>
+#include <linux/suspend.h>
+#include <asm/cpu-info.h>
+#include <asm/ptrace.h>
+#include <asm/processor.h>
+
+void output_ptreg_defines(void)
+{
+ COMMENT("LoongArch pt_regs offsets.");
+ OFFSET(PT_R0, pt_regs, regs[0]);
+ OFFSET(PT_R1, pt_regs, regs[1]);
+ OFFSET(PT_R2, pt_regs, regs[2]);
+ OFFSET(PT_R3, pt_regs, regs[3]);
+ OFFSET(PT_R4, pt_regs, regs[4]);
+ OFFSET(PT_R5, pt_regs, regs[5]);
+ OFFSET(PT_R6, pt_regs, regs[6]);
+ OFFSET(PT_R7, pt_regs, regs[7]);
+ OFFSET(PT_R8, pt_regs, regs[8]);
+ OFFSET(PT_R9, pt_regs, regs[9]);
+ OFFSET(PT_R10, pt_regs, regs[10]);
+ OFFSET(PT_R11, pt_regs, regs[11]);
+ OFFSET(PT_R12, pt_regs, regs[12]);
+ OFFSET(PT_R13, pt_regs, regs[13]);
+ OFFSET(PT_R14, pt_regs, regs[14]);
+ OFFSET(PT_R15, pt_regs, regs[15]);
+ OFFSET(PT_R16, pt_regs, regs[16]);
+ OFFSET(PT_R17, pt_regs, regs[17]);
+ OFFSET(PT_R18, pt_regs, regs[18]);
+ OFFSET(PT_R19, pt_regs, regs[19]);
+ OFFSET(PT_R20, pt_regs, regs[20]);
+ OFFSET(PT_R21, pt_regs, regs[21]);
+ OFFSET(PT_R22, pt_regs, regs[22]);
+ OFFSET(PT_R23, pt_regs, regs[23]);
+ OFFSET(PT_R24, pt_regs, regs[24]);
+ OFFSET(PT_R25, pt_regs, regs[25]);
+ OFFSET(PT_R26, pt_regs, regs[26]);
+ OFFSET(PT_R27, pt_regs, regs[27]);
+ OFFSET(PT_R28, pt_regs, regs[28]);
+ OFFSET(PT_R29, pt_regs, regs[29]);
+ OFFSET(PT_R30, pt_regs, regs[30]);
+ OFFSET(PT_R31, pt_regs, regs[31]);
+ OFFSET(PT_CRMD, pt_regs, csr_crmd);
+ OFFSET(PT_PRMD, pt_regs, csr_prmd);
+ OFFSET(PT_EUEN, pt_regs, csr_euen);
+ OFFSET(PT_ECFG, pt_regs, csr_ecfg);
+ OFFSET(PT_ESTAT, pt_regs, csr_estat);
+ OFFSET(PT_ERA, pt_regs, csr_era);
+ OFFSET(PT_BVADDR, pt_regs, csr_badvaddr);
+ OFFSET(PT_ORIG_A0, pt_regs, orig_a0);
+ DEFINE(PT_SIZE, sizeof(struct pt_regs));
+ BLANK();
+}
+
+void output_task_defines(void)
+{
+ COMMENT("LoongArch task_struct offsets.");
+ OFFSET(TASK_STATE, task_struct, __state);
+ OFFSET(TASK_THREAD_INFO, task_struct, stack);
+ OFFSET(TASK_FLAGS, task_struct, flags);
+ OFFSET(TASK_MM, task_struct, mm);
+ OFFSET(TASK_PID, task_struct, pid);
+ DEFINE(TASK_STRUCT_SIZE, sizeof(struct task_struct));
+ BLANK();
+}
+
+void output_thread_info_defines(void)
+{
+ COMMENT("LoongArch thread_info offsets.");
+ OFFSET(TI_TASK, thread_info, task);
+ OFFSET(TI_FLAGS, thread_info, flags);
+ OFFSET(TI_TP_VALUE, thread_info, tp_value);
+ OFFSET(TI_CPU, thread_info, cpu);
+ OFFSET(TI_PRE_COUNT, thread_info, preempt_count);
+ OFFSET(TI_REGS, thread_info, regs);
+ DEFINE(_THREAD_SIZE, THREAD_SIZE);
+ DEFINE(_THREAD_MASK, THREAD_MASK);
+ DEFINE(_IRQ_STACK_SIZE, IRQ_STACK_SIZE);
+ DEFINE(_IRQ_STACK_START, IRQ_STACK_START);
+ BLANK();
+}
+
+void output_thread_defines(void)
+{
+ COMMENT("LoongArch specific thread_struct offsets.");
+ OFFSET(THREAD_REG01, task_struct, thread.reg01);
+ OFFSET(THREAD_REG03, task_struct, thread.reg03);
+ OFFSET(THREAD_REG22, task_struct, thread.reg22);
+ OFFSET(THREAD_REG23, task_struct, thread.reg23);
+ OFFSET(THREAD_REG24, task_struct, thread.reg24);
+ OFFSET(THREAD_REG25, task_struct, thread.reg25);
+ OFFSET(THREAD_REG26, task_struct, thread.reg26);
+ OFFSET(THREAD_REG27, task_struct, thread.reg27);
+ OFFSET(THREAD_REG28, task_struct, thread.reg28);
+ OFFSET(THREAD_REG29, task_struct, thread.reg29);
+ OFFSET(THREAD_REG30, task_struct, thread.reg30);
+ OFFSET(THREAD_REG31, task_struct, thread.reg31);
+ OFFSET(THREAD_CSRCRMD, task_struct,
+ thread.csr_crmd);
+ OFFSET(THREAD_CSRPRMD, task_struct,
+ thread.csr_prmd);
+ OFFSET(THREAD_CSREUEN, task_struct,
+ thread.csr_euen);
+ OFFSET(THREAD_CSRECFG, task_struct,
+ thread.csr_ecfg);
+
+ OFFSET(THREAD_SCR0, task_struct, thread.scr0);
+ OFFSET(THREAD_SCR1, task_struct, thread.scr1);
+ OFFSET(THREAD_SCR2, task_struct, thread.scr2);
+ OFFSET(THREAD_SCR3, task_struct, thread.scr3);
+
+ OFFSET(THREAD_EFLAGS, task_struct, thread.eflags);
+
+ OFFSET(THREAD_FPU, task_struct, thread.fpu);
+
+ OFFSET(THREAD_BVADDR, task_struct, \
+ thread.csr_badvaddr);
+ OFFSET(THREAD_ECODE, task_struct, \
+ thread.error_code);
+ OFFSET(THREAD_TRAPNO, task_struct, thread.trap_nr);
+ BLANK();
+}
+
+void output_thread_fpu_defines(void)
+{
+ OFFSET(THREAD_FPR0, loongarch_fpu, fpr[0]);
+ OFFSET(THREAD_FPR1, loongarch_fpu, fpr[1]);
+ OFFSET(THREAD_FPR2, loongarch_fpu, fpr[2]);
+ OFFSET(THREAD_FPR3, loongarch_fpu, fpr[3]);
+ OFFSET(THREAD_FPR4, loongarch_fpu, fpr[4]);
+ OFFSET(THREAD_FPR5, loongarch_fpu, fpr[5]);
+ OFFSET(THREAD_FPR6, loongarch_fpu, fpr[6]);
+ OFFSET(THREAD_FPR7, loongarch_fpu, fpr[7]);
+ OFFSET(THREAD_FPR8, loongarch_fpu, fpr[8]);
+ OFFSET(THREAD_FPR9, loongarch_fpu, fpr[9]);
+ OFFSET(THREAD_FPR10, loongarch_fpu, fpr[10]);
+ OFFSET(THREAD_FPR11, loongarch_fpu, fpr[11]);
+ OFFSET(THREAD_FPR12, loongarch_fpu, fpr[12]);
+ OFFSET(THREAD_FPR13, loongarch_fpu, fpr[13]);
+ OFFSET(THREAD_FPR14, loongarch_fpu, fpr[14]);
+ OFFSET(THREAD_FPR15, loongarch_fpu, fpr[15]);
+ OFFSET(THREAD_FPR16, loongarch_fpu, fpr[16]);
+ OFFSET(THREAD_FPR17, loongarch_fpu, fpr[17]);
+ OFFSET(THREAD_FPR18, loongarch_fpu, fpr[18]);
+ OFFSET(THREAD_FPR19, loongarch_fpu, fpr[19]);
+ OFFSET(THREAD_FPR20, loongarch_fpu, fpr[20]);
+ OFFSET(THREAD_FPR21, loongarch_fpu, fpr[21]);
+ OFFSET(THREAD_FPR22, loongarch_fpu, fpr[22]);
+ OFFSET(THREAD_FPR23, loongarch_fpu, fpr[23]);
+ OFFSET(THREAD_FPR24, loongarch_fpu, fpr[24]);
+ OFFSET(THREAD_FPR25, loongarch_fpu, fpr[25]);
+ OFFSET(THREAD_FPR26, loongarch_fpu, fpr[26]);
+ OFFSET(THREAD_FPR27, loongarch_fpu, fpr[27]);
+ OFFSET(THREAD_FPR28, loongarch_fpu, fpr[28]);
+ OFFSET(THREAD_FPR29, loongarch_fpu, fpr[29]);
+ OFFSET(THREAD_FPR30, loongarch_fpu, fpr[30]);
+ OFFSET(THREAD_FPR31, loongarch_fpu, fpr[31]);
+
+ OFFSET(THREAD_FCSR, loongarch_fpu, fcsr);
+ OFFSET(THREAD_FCC, loongarch_fpu, fcc);
+ OFFSET(THREAD_VCSR, loongarch_fpu, vcsr);
+ BLANK();
+}
+
+void output_mm_defines(void)
+{
+ COMMENT("Size of struct page");
+ DEFINE(STRUCT_PAGE_SIZE, sizeof(struct page));
+ BLANK();
+ COMMENT("Linux mm_struct offsets.");
+ OFFSET(MM_USERS, mm_struct, mm_users);
+ OFFSET(MM_PGD, mm_struct, pgd);
+ OFFSET(MM_CONTEXT, mm_struct, context);
+ BLANK();
+ DEFINE(_PGD_T_SIZE, sizeof(pgd_t));
+ DEFINE(_PMD_T_SIZE, sizeof(pmd_t));
+ DEFINE(_PTE_T_SIZE, sizeof(pte_t));
+ BLANK();
+ DEFINE(_PGD_T_LOG2, PGD_T_LOG2);
+#ifndef __PAGETABLE_PMD_FOLDED
+ DEFINE(_PMD_T_LOG2, PMD_T_LOG2);
+#endif
+ DEFINE(_PTE_T_LOG2, PTE_T_LOG2);
+ BLANK();
+ DEFINE(_PGD_ORDER, PGD_ORDER);
+#ifndef __PAGETABLE_PMD_FOLDED
+ DEFINE(_PMD_ORDER, PMD_ORDER);
+#endif
+ DEFINE(_PTE_ORDER, PTE_ORDER);
+ BLANK();
+ DEFINE(_PMD_SHIFT, PMD_SHIFT);
+ DEFINE(_PGDIR_SHIFT, PGDIR_SHIFT);
+ BLANK();
+ DEFINE(_PTRS_PER_PGD, PTRS_PER_PGD);
+ DEFINE(_PTRS_PER_PMD, PTRS_PER_PMD);
+ DEFINE(_PTRS_PER_PTE, PTRS_PER_PTE);
+ BLANK();
+ DEFINE(_PAGE_SHIFT, PAGE_SHIFT);
+ DEFINE(_PAGE_SIZE, PAGE_SIZE);
+ BLANK();
+}
+
+void output_sc_defines(void)
+{
+ COMMENT("Linux sigcontext offsets.");
+ OFFSET(SC_REGS, sigcontext, sc_regs);
+ OFFSET(SC_PC, sigcontext, sc_pc);
+ BLANK();
+}
+
+void output_signal_defines(void)
+{
+ COMMENT("Linux signal numbers.");
+ DEFINE(_SIGHUP, SIGHUP);
+ DEFINE(_SIGINT, SIGINT);
+ DEFINE(_SIGQUIT, SIGQUIT);
+ DEFINE(_SIGILL, SIGILL);
+ DEFINE(_SIGTRAP, SIGTRAP);
+ DEFINE(_SIGIOT, SIGIOT);
+ DEFINE(_SIGABRT, SIGABRT);
+ DEFINE(_SIGFPE, SIGFPE);
+ DEFINE(_SIGKILL, SIGKILL);
+ DEFINE(_SIGBUS, SIGBUS);
+ DEFINE(_SIGSEGV, SIGSEGV);
+ DEFINE(_SIGSYS, SIGSYS);
+ DEFINE(_SIGPIPE, SIGPIPE);
+ DEFINE(_SIGALRM, SIGALRM);
+ DEFINE(_SIGTERM, SIGTERM);
+ DEFINE(_SIGUSR1, SIGUSR1);
+ DEFINE(_SIGUSR2, SIGUSR2);
+ DEFINE(_SIGCHLD, SIGCHLD);
+ DEFINE(_SIGPWR, SIGPWR);
+ DEFINE(_SIGWINCH, SIGWINCH);
+ DEFINE(_SIGURG, SIGURG);
+ DEFINE(_SIGIO, SIGIO);
+ DEFINE(_SIGSTOP, SIGSTOP);
+ DEFINE(_SIGTSTP, SIGTSTP);
+ DEFINE(_SIGCONT, SIGCONT);
+ DEFINE(_SIGTTIN, SIGTTIN);
+ DEFINE(_SIGTTOU, SIGTTOU);
+ DEFINE(_SIGVTALRM, SIGVTALRM);
+ DEFINE(_SIGPROF, SIGPROF);
+ DEFINE(_SIGXCPU, SIGXCPU);
+ DEFINE(_SIGXFSZ, SIGXFSZ);
+ BLANK();
+}
+
+#ifdef CONFIG_SMP
+void output_smpboot_defines(void)
+{
+ COMMENT("Linux smp cpu boot offsets.");
+ OFFSET(CPU_BOOT_STACK, secondary_data, stack);
+ OFFSET(CPU_BOOT_TINFO, secondary_data, thread_info);
+ BLANK();
+}
+#endif
diff --git a/arch/loongarch/kernel/cacheinfo.c b/arch/loongarch/kernel/cacheinfo.c
new file mode 100644
index 000000000000..8c9fe29e98f0
--- /dev/null
+++ b/arch/loongarch/kernel/cacheinfo.c
@@ -0,0 +1,122 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * LoongArch cacheinfo support
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/cacheinfo.h>
+
+/* Populates leaf and increments to next leaf */
+#define populate_cache(cache, leaf, c_level, c_type) \
+do { \
+ leaf->type = c_type; \
+ leaf->level = c_level; \
+ leaf->coherency_line_size = c->cache.linesz; \
+ leaf->number_of_sets = c->cache.sets; \
+ leaf->ways_of_associativity = c->cache.ways; \
+ leaf->size = c->cache.linesz * c->cache.sets * \
+ c->cache.ways; \
+ leaf++; \
+} while (0)
+
+int init_cache_level(unsigned int cpu)
+{
+ struct cpuinfo_loongarch *c = &current_cpu_data;
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+ int levels = 0, leaves = 0;
+
+ /*
+ * If Dcache is not set, we assume the cache structures
+ * are not properly initialized.
+ */
+ if (c->dcache.waysize)
+ levels += 1;
+ else
+ return -ENOENT;
+
+
+ leaves += (c->icache.waysize) ? 2 : 1;
+
+ if (c->vcache.waysize) {
+ levels++;
+ leaves++;
+ }
+
+ if (c->scache.waysize) {
+ levels++;
+ leaves++;
+ }
+
+ if (c->tcache.waysize) {
+ levels++;
+ leaves++;
+ }
+
+ this_cpu_ci->num_levels = levels;
+ this_cpu_ci->num_leaves = leaves;
+ return 0;
+}
+
+static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
+ struct cacheinfo *sib_leaf)
+{
+ return !((this_leaf->level == 1) || (this_leaf->level == 2));
+}
+
+static void cache_cpumap_setup(unsigned int cpu)
+{
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+ struct cacheinfo *this_leaf, *sib_leaf;
+ unsigned int index;
+
+ for (index = 0; index < this_cpu_ci->num_leaves; index++) {
+ unsigned int i;
+
+ this_leaf = this_cpu_ci->info_list + index;
+ /* skip if shared_cpu_map is already populated */
+ if (!cpumask_empty(&this_leaf->shared_cpu_map))
+ continue;
+
+ cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
+ for_each_online_cpu(i) {
+ struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i);
+
+ if (i == cpu || !sib_cpu_ci->info_list)
+ continue;/* skip if itself or no cacheinfo */
+ sib_leaf = sib_cpu_ci->info_list + index;
+ if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
+ cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
+ cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
+ }
+ }
+ }
+}
+
+int populate_cache_leaves(unsigned int cpu)
+{
+ int level = 1;
+ struct cpuinfo_loongarch *c = &current_cpu_data;
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+ struct cacheinfo *this_leaf = this_cpu_ci->info_list;
+
+ if (c->icache.waysize) {
+ populate_cache(dcache, this_leaf, level, CACHE_TYPE_DATA);
+ populate_cache(icache, this_leaf, level++, CACHE_TYPE_INST);
+ } else {
+ populate_cache(dcache, this_leaf, level++, CACHE_TYPE_UNIFIED);
+ }
+
+ if (c->vcache.waysize)
+ populate_cache(vcache, this_leaf, level++, CACHE_TYPE_UNIFIED);
+
+ if (c->scache.waysize)
+ populate_cache(scache, this_leaf, level++, CACHE_TYPE_UNIFIED);
+
+ if (c->tcache.waysize)
+ populate_cache(tcache, this_leaf, level++, CACHE_TYPE_UNIFIED);
+
+ cache_cpumap_setup(cpu);
+ this_cpu_ci->cpu_map_populated = true;
+
+ return 0;
+}
diff --git a/arch/loongarch/kernel/cpu-probe.c b/arch/loongarch/kernel/cpu-probe.c
new file mode 100644
index 000000000000..6c87ea36b257
--- /dev/null
+++ b/arch/loongarch/kernel/cpu-probe.c
@@ -0,0 +1,292 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Processor capabilities determination functions.
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/ptrace.h>
+#include <linux/smp.h>
+#include <linux/stddef.h>
+#include <linux/export.h>
+#include <linux/printk.h>
+#include <linux/uaccess.h>
+
+#include <asm/cpu-features.h>
+#include <asm/elf.h>
+#include <asm/fpu.h>
+#include <asm/loongarch.h>
+#include <asm/pgtable-bits.h>
+#include <asm/setup.h>
+
+/* Hardware capabilities */
+unsigned int elf_hwcap __read_mostly;
+EXPORT_SYMBOL_GPL(elf_hwcap);
+
+/*
+ * Determine the FCSR mask for FPU hardware.
+ */
+static inline void cpu_set_fpu_fcsr_mask(struct cpuinfo_loongarch *c)
+{
+ unsigned long sr, mask, fcsr, fcsr0, fcsr1;
+
+ fcsr = c->fpu_csr0;
+ mask = FPU_CSR_ALL_X | FPU_CSR_ALL_E | FPU_CSR_ALL_S | FPU_CSR_RM;
+
+ sr = read_csr_euen();
+ enable_fpu();
+
+ fcsr0 = fcsr & mask;
+ write_fcsr(LOONGARCH_FCSR0, fcsr0);
+ fcsr0 = read_fcsr(LOONGARCH_FCSR0);
+
+ fcsr1 = fcsr | ~mask;
+ write_fcsr(LOONGARCH_FCSR0, fcsr1);
+ fcsr1 = read_fcsr(LOONGARCH_FCSR0);
+
+ write_fcsr(LOONGARCH_FCSR0, fcsr);
+
+ write_csr_euen(sr);
+
+ c->fpu_mask = ~(fcsr0 ^ fcsr1) & ~mask;
+}
+
+static inline void set_elf_platform(int cpu, const char *plat)
+{
+ if (cpu == 0)
+ __elf_platform = plat;
+}
+
+/* MAP BASE */
+unsigned long vm_map_base;
+EXPORT_SYMBOL_GPL(vm_map_base);
+
+static void cpu_probe_addrbits(struct cpuinfo_loongarch *c)
+{
+#ifdef __NEED_ADDRBITS_PROBE
+ c->pabits = (read_cpucfg(LOONGARCH_CPUCFG1) & CPUCFG1_PABITS) >> 4;
+ c->vabits = (read_cpucfg(LOONGARCH_CPUCFG1) & CPUCFG1_VABITS) >> 12;
+ vm_map_base = 0UL - (1UL << c->vabits);
+#endif
+}
+
+static void set_isa(struct cpuinfo_loongarch *c, unsigned int isa)
+{
+ switch (isa) {
+ case LOONGARCH_CPU_ISA_LA64:
+ c->isa_level |= LOONGARCH_CPU_ISA_LA64;
+ fallthrough;
+ case LOONGARCH_CPU_ISA_LA32S:
+ c->isa_level |= LOONGARCH_CPU_ISA_LA32S;
+ fallthrough;
+ case LOONGARCH_CPU_ISA_LA32R:
+ c->isa_level |= LOONGARCH_CPU_ISA_LA32R;
+ break;
+ }
+}
+
+static void cpu_probe_common(struct cpuinfo_loongarch *c)
+{
+ unsigned int config;
+ unsigned long asid_mask;
+
+ c->options = LOONGARCH_CPU_CPUCFG | LOONGARCH_CPU_CSR |
+ LOONGARCH_CPU_TLB | LOONGARCH_CPU_VINT | LOONGARCH_CPU_WATCH;
+
+ elf_hwcap |= HWCAP_LOONGARCH_CRC32;
+
+ config = read_cpucfg(LOONGARCH_CPUCFG1);
+ if (config & CPUCFG1_UAL) {
+ c->options |= LOONGARCH_CPU_UAL;
+ elf_hwcap |= HWCAP_LOONGARCH_UAL;
+ }
+
+ config = read_cpucfg(LOONGARCH_CPUCFG2);
+ if (config & CPUCFG2_LAM) {
+ c->options |= LOONGARCH_CPU_LAM;
+ elf_hwcap |= HWCAP_LOONGARCH_LAM;
+ }
+ if (config & CPUCFG2_FP) {
+ c->options |= LOONGARCH_CPU_FPU;
+ elf_hwcap |= HWCAP_LOONGARCH_FPU;
+ }
+ if (config & CPUCFG2_COMPLEX) {
+ c->options |= LOONGARCH_CPU_COMPLEX;
+ elf_hwcap |= HWCAP_LOONGARCH_COMPLEX;
+ }
+ if (config & CPUCFG2_CRYPTO) {
+ c->options |= LOONGARCH_CPU_CRYPTO;
+ elf_hwcap |= HWCAP_LOONGARCH_CRYPTO;
+ }
+ if (config & CPUCFG2_LVZP) {
+ c->options |= LOONGARCH_CPU_LVZ;
+ elf_hwcap |= HWCAP_LOONGARCH_LVZ;
+ }
+
+ config = read_cpucfg(LOONGARCH_CPUCFG6);
+ if (config & CPUCFG6_PMP)
+ c->options |= LOONGARCH_CPU_PMP;
+
+ config = iocsr_read32(LOONGARCH_IOCSR_FEATURES);
+ if (config & IOCSRF_CSRIPI)
+ c->options |= LOONGARCH_CPU_CSRIPI;
+ if (config & IOCSRF_EXTIOI)
+ c->options |= LOONGARCH_CPU_EXTIOI;
+ if (config & IOCSRF_FREQSCALE)
+ c->options |= LOONGARCH_CPU_SCALEFREQ;
+ if (config & IOCSRF_FLATMODE)
+ c->options |= LOONGARCH_CPU_FLATMODE;
+ if (config & IOCSRF_EIODECODE)
+ c->options |= LOONGARCH_CPU_EIODECODE;
+ if (config & IOCSRF_VM)
+ c->options |= LOONGARCH_CPU_HYPERVISOR;
+
+ config = csr_read32(LOONGARCH_CSR_ASID);
+ config = (config & CSR_ASID_BIT) >> CSR_ASID_BIT_SHIFT;
+ asid_mask = GENMASK(config - 1, 0);
+ set_cpu_asid_mask(c, asid_mask);
+
+ config = read_csr_prcfg1();
+ c->ksave_mask = GENMASK((config & CSR_CONF1_KSNUM) - 1, 0);
+ c->ksave_mask &= ~(EXC_KSAVE_MASK | PERCPU_KSAVE_MASK | KVM_KSAVE_MASK);
+
+ config = read_csr_prcfg3();
+ switch (config & CSR_CONF3_TLBTYPE) {
+ case 0:
+ c->tlbsizemtlb = 0;
+ c->tlbsizestlbsets = 0;
+ c->tlbsizestlbways = 0;
+ c->tlbsize = 0;
+ break;
+ case 1:
+ c->tlbsizemtlb = ((config & CSR_CONF3_MTLBSIZE) >> CSR_CONF3_MTLBSIZE_SHIFT) + 1;
+ c->tlbsizestlbsets = 0;
+ c->tlbsizestlbways = 0;
+ c->tlbsize = c->tlbsizemtlb + c->tlbsizestlbsets * c->tlbsizestlbways;
+ break;
+ case 2:
+ c->tlbsizemtlb = ((config & CSR_CONF3_MTLBSIZE) >> CSR_CONF3_MTLBSIZE_SHIFT) + 1;
+ c->tlbsizestlbsets = 1 << ((config & CSR_CONF3_STLBIDX) >> CSR_CONF3_STLBIDX_SHIFT);
+ c->tlbsizestlbways = ((config & CSR_CONF3_STLBWAYS) >> CSR_CONF3_STLBWAYS_SHIFT) + 1;
+ c->tlbsize = c->tlbsizemtlb + c->tlbsizestlbsets * c->tlbsizestlbways;
+ break;
+ default:
+ pr_warn("Warning: unknown TLB type\n");
+ }
+}
+
+#define MAX_NAME_LEN 32
+#define VENDOR_OFFSET 0
+#define CPUNAME_OFFSET 9
+
+static char cpu_full_name[MAX_NAME_LEN] = " - ";
+
+static inline void cpu_probe_loongson(struct cpuinfo_loongarch *c, unsigned int cpu)
+{
+ uint64_t *vendor = (void *)(&cpu_full_name[VENDOR_OFFSET]);
+ uint64_t *cpuname = (void *)(&cpu_full_name[CPUNAME_OFFSET]);
+
+ __cpu_full_name[cpu] = cpu_full_name;
+ *vendor = iocsr_read64(LOONGARCH_IOCSR_VENDOR);
+ *cpuname = iocsr_read64(LOONGARCH_IOCSR_CPUNAME);
+
+ switch (c->processor_id & PRID_SERIES_MASK) {
+ case PRID_SERIES_LA132:
+ c->cputype = CPU_LOONGSON32;
+ set_isa(c, LOONGARCH_CPU_ISA_LA32S);
+ __cpu_family[cpu] = "Loongson-32bit";
+ pr_info("32-bit Loongson Processor probed (LA132 Core)\n");
+ break;
+ case PRID_SERIES_LA264:
+ c->cputype = CPU_LOONGSON64;
+ set_isa(c, LOONGARCH_CPU_ISA_LA64);
+ __cpu_family[cpu] = "Loongson-64bit";
+ pr_info("64-bit Loongson Processor probed (LA264 Core)\n");
+ break;
+ case PRID_SERIES_LA364:
+ c->cputype = CPU_LOONGSON64;
+ set_isa(c, LOONGARCH_CPU_ISA_LA64);
+ __cpu_family[cpu] = "Loongson-64bit";
+ pr_info("64-bit Loongson Processor probed (LA364 Core)\n");
+ break;
+ case PRID_SERIES_LA464:
+ c->cputype = CPU_LOONGSON64;
+ set_isa(c, LOONGARCH_CPU_ISA_LA64);
+ __cpu_family[cpu] = "Loongson-64bit";
+ pr_info("64-bit Loongson Processor probed (LA464 Core)\n");
+ break;
+ case PRID_SERIES_LA664:
+ c->cputype = CPU_LOONGSON64;
+ set_isa(c, LOONGARCH_CPU_ISA_LA64);
+ __cpu_family[cpu] = "Loongson-64bit";
+ pr_info("64-bit Loongson Processor probed (LA664 Core)\n");
+ break;
+ default: /* Default to 64 bit */
+ c->cputype = CPU_LOONGSON64;
+ set_isa(c, LOONGARCH_CPU_ISA_LA64);
+ __cpu_family[cpu] = "Loongson-64bit";
+ pr_info("64-bit Loongson Processor probed (Unknown Core)\n");
+ }
+}
+
+#ifdef CONFIG_64BIT
+/* For use by uaccess.h */
+u64 __ua_limit;
+EXPORT_SYMBOL(__ua_limit);
+#endif
+
+const char *__cpu_family[NR_CPUS];
+const char *__cpu_full_name[NR_CPUS];
+const char *__elf_platform;
+
+static void cpu_report(void)
+{
+ struct cpuinfo_loongarch *c = &current_cpu_data;
+
+ pr_info("CPU%d revision is: %08x (%s)\n",
+ smp_processor_id(), c->processor_id, cpu_family_string());
+ if (c->options & LOONGARCH_CPU_FPU)
+ pr_info("FPU%d revision is: %08x\n", smp_processor_id(), c->fpu_vers);
+}
+
+void cpu_probe(void)
+{
+ unsigned int cpu = smp_processor_id();
+ struct cpuinfo_loongarch *c = &current_cpu_data;
+
+ /*
+ * Set a default ELF platform, cpu probe may later
+ * overwrite it with a more precise value
+ */
+ set_elf_platform(cpu, "loongarch");
+
+ c->cputype = CPU_UNKNOWN;
+ c->processor_id = read_cpucfg(LOONGARCH_CPUCFG0);
+ c->fpu_vers = (read_cpucfg(LOONGARCH_CPUCFG2) >> 3) & 0x3;
+
+ c->fpu_csr0 = FPU_CSR_RN;
+ c->fpu_mask = FPU_CSR_RSVD;
+
+ cpu_probe_common(c);
+
+ per_cpu_trap_init(cpu);
+
+ switch (c->processor_id & PRID_COMP_MASK) {
+ case PRID_COMP_LOONGSON:
+ cpu_probe_loongson(c, cpu);
+ break;
+ }
+
+ BUG_ON(!__cpu_family[cpu]);
+ BUG_ON(c->cputype == CPU_UNKNOWN);
+
+ cpu_probe_addrbits(c);
+
+#ifdef CONFIG_64BIT
+ if (cpu == 0)
+ __ua_limit = ~((1ull << cpu_vabits) - 1);
+#endif
+
+ cpu_report();
+}
diff --git a/arch/loongarch/kernel/dma.c b/arch/loongarch/kernel/dma.c
new file mode 100644
index 000000000000..8c9b5314a13e
--- /dev/null
+++ b/arch/loongarch/kernel/dma.c
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/init.h>
+#include <linux/dma-direct.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma-map-ops.h>
+#include <linux/swiotlb.h>
+
+#include <asm/bootinfo.h>
+#include <asm/dma.h>
+#include <asm/loongson.h>
+
+/*
+ * We extract 4bit node id (bit 44~47) from Loongson-3's
+ * 48bit physical address space and embed it into 40bit.
+ */
+
+static int node_id_offset;
+
+dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+ long nid = (paddr >> 44) & 0xf;
+
+ return ((nid << 44) ^ paddr) | (nid << node_id_offset);
+}
+
+phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
+{
+ long nid = (daddr >> node_id_offset) & 0xf;
+
+ return ((nid << node_id_offset) ^ daddr) | (nid << 44);
+}
+
+void __init plat_swiotlb_setup(void)
+{
+ swiotlb_init(true, SWIOTLB_VERBOSE);
+ node_id_offset = ((readl(LS7A_DMA_CFG) & LS7A_DMA_NODE_MASK) >> LS7A_DMA_NODE_SHF) + 36;
+}
diff --git a/arch/loongarch/kernel/efi.c b/arch/loongarch/kernel/efi.c
new file mode 100644
index 000000000000..a50b60c587fa
--- /dev/null
+++ b/arch/loongarch/kernel/efi.c
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * EFI initialization
+ *
+ * Author: Jianmin Lv <lvjianmin@loongson.cn>
+ * Huacai Chen <chenhuacai@loongson.cn>
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#include <linux/acpi.h>
+#include <linux/efi.h>
+#include <linux/efi-bgrt.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/io.h>
+#include <linux/kobject.h>
+#include <linux/memblock.h>
+#include <linux/reboot.h>
+#include <linux/uaccess.h>
+
+#include <asm/early_ioremap.h>
+#include <asm/efi.h>
+#include <asm/loongson.h>
+
+static unsigned long efi_nr_tables;
+static unsigned long efi_config_table;
+
+static efi_system_table_t *efi_systab;
+static efi_config_table_type_t arch_tables[] __initdata = {{},};
+
+void __init efi_runtime_init(void)
+{
+ if (!efi_enabled(EFI_BOOT))
+ return;
+
+ if (efi_runtime_disabled()) {
+ pr_info("EFI runtime services will be disabled.\n");
+ return;
+ }
+
+ efi.runtime = (efi_runtime_services_t *)efi_systab->runtime;
+ efi.runtime_version = (unsigned int)efi.runtime->hdr.revision;
+
+ efi_native_runtime_setup();
+ set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
+}
+
+void __init efi_init(void)
+{
+ int size;
+ void *config_tables;
+
+ if (!efi_system_table)
+ return;
+
+ efi_systab = (efi_system_table_t *)early_memremap_ro(efi_system_table, sizeof(*efi_systab));
+ if (!efi_systab) {
+ pr_err("Can't find EFI system table.\n");
+ return;
+ }
+
+ set_bit(EFI_64BIT, &efi.flags);
+ efi_nr_tables = efi_systab->nr_tables;
+ efi_config_table = (unsigned long)efi_systab->tables;
+
+ size = sizeof(efi_config_table_t);
+ config_tables = early_memremap(efi_config_table, efi_nr_tables * size);
+ efi_config_parse_tables(config_tables, efi_systab->nr_tables, arch_tables);
+ early_memunmap(config_tables, efi_nr_tables * size);
+}
diff --git a/arch/loongarch/kernel/elf.c b/arch/loongarch/kernel/elf.c
new file mode 100644
index 000000000000..183e94fc9c69
--- /dev/null
+++ b/arch/loongarch/kernel/elf.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Author: Huacai Chen <chenhuacai@loongson.cn>
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#include <linux/binfmts.h>
+#include <linux/elf.h>
+#include <linux/export.h>
+#include <linux/sched.h>
+
+#include <asm/cpu-features.h>
+#include <asm/cpu-info.h>
+
+int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf,
+ bool is_interp, struct arch_elf_state *state)
+{
+ return 0;
+}
+
+int arch_check_elf(void *_ehdr, bool has_interpreter, void *_interp_ehdr,
+ struct arch_elf_state *state)
+{
+ return 0;
+}
+
+void loongarch_set_personality_fcsr(struct arch_elf_state *state)
+{
+ current->thread.fpu.fcsr = boot_cpu_data.fpu_csr0;
+}
diff --git a/arch/loongarch/kernel/entry.S b/arch/loongarch/kernel/entry.S
new file mode 100644
index 000000000000..d5b3dbcf5425
--- /dev/null
+++ b/arch/loongarch/kernel/entry.S
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ *
+ * Derived from MIPS:
+ * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2001 MIPS Technologies, Inc.
+ */
+
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/loongarch.h>
+#include <asm/regdef.h>
+#include <asm/stackframe.h>
+#include <asm/thread_info.h>
+
+ .text
+ .cfi_sections .debug_frame
+ .align 5
+SYM_FUNC_START(handle_syscall)
+ csrrd t0, PERCPU_BASE_KS
+ la.abs t1, kernelsp
+ add.d t1, t1, t0
+ move t2, sp
+ ld.d sp, t1, 0
+
+ addi.d sp, sp, -PT_SIZE
+ cfi_st t2, PT_R3
+ cfi_rel_offset sp, PT_R3
+ st.d zero, sp, PT_R0
+ csrrd t2, LOONGARCH_CSR_PRMD
+ st.d t2, sp, PT_PRMD
+ csrrd t2, LOONGARCH_CSR_CRMD
+ st.d t2, sp, PT_CRMD
+ csrrd t2, LOONGARCH_CSR_EUEN
+ st.d t2, sp, PT_EUEN
+ csrrd t2, LOONGARCH_CSR_ECFG
+ st.d t2, sp, PT_ECFG
+ csrrd t2, LOONGARCH_CSR_ESTAT
+ st.d t2, sp, PT_ESTAT
+ cfi_st ra, PT_R1
+ cfi_st a0, PT_R4
+ cfi_st a1, PT_R5
+ cfi_st a2, PT_R6
+ cfi_st a3, PT_R7
+ cfi_st a4, PT_R8
+ cfi_st a5, PT_R9
+ cfi_st a6, PT_R10
+ cfi_st a7, PT_R11
+ csrrd ra, LOONGARCH_CSR_ERA
+ st.d ra, sp, PT_ERA
+ cfi_rel_offset ra, PT_ERA
+
+ cfi_st tp, PT_R2
+ cfi_st u0, PT_R21
+ cfi_st fp, PT_R22
+
+ SAVE_STATIC
+
+ move u0, t0
+ li.d tp, ~_THREAD_MASK
+ and tp, tp, sp
+
+ move a0, sp
+ bl do_syscall
+
+ RESTORE_ALL_AND_RET
+SYM_FUNC_END(handle_syscall)
+
+SYM_CODE_START(ret_from_fork)
+ bl schedule_tail # a0 = struct task_struct *prev
+ move a0, sp
+ bl syscall_exit_to_user_mode
+ RESTORE_STATIC
+ RESTORE_SOME
+ RESTORE_SP_AND_RET
+SYM_CODE_END(ret_from_fork)
+
+SYM_CODE_START(ret_from_kernel_thread)
+ bl schedule_tail # a0 = struct task_struct *prev
+ move a0, s1
+ jirl ra, s0, 0
+ move a0, sp
+ bl syscall_exit_to_user_mode
+ RESTORE_STATIC
+ RESTORE_SOME
+ RESTORE_SP_AND_RET
+SYM_CODE_END(ret_from_kernel_thread)
diff --git a/arch/loongarch/kernel/env.c b/arch/loongarch/kernel/env.c
new file mode 100644
index 000000000000..467946ecf451
--- /dev/null
+++ b/arch/loongarch/kernel/env.c
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Author: Huacai Chen <chenhuacai@loongson.cn>
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/acpi.h>
+#include <linux/efi.h>
+#include <linux/export.h>
+#include <linux/memblock.h>
+#include <linux/of_fdt.h>
+#include <asm/early_ioremap.h>
+#include <asm/bootinfo.h>
+#include <asm/loongson.h>
+
+u64 efi_system_table;
+struct loongson_system_configuration loongson_sysconf;
+EXPORT_SYMBOL(loongson_sysconf);
+
+u64 loongson_chipcfg[MAX_PACKAGES];
+u64 loongson_chiptemp[MAX_PACKAGES];
+u64 loongson_freqctrl[MAX_PACKAGES];
+unsigned long long smp_group[MAX_PACKAGES];
+
+static void __init register_addrs_set(u64 *registers, const u64 addr, int num)
+{
+ u64 i;
+
+ for (i = 0; i < num; i++) {
+ *registers = (i << 44) | addr;
+ registers++;
+ }
+}
+
+void __init init_environ(void)
+{
+ int efi_boot = fw_arg0;
+ struct efi_memory_map_data data;
+ void *fdt_ptr = early_memremap_ro(fw_arg1, SZ_64K);
+
+ if (efi_boot)
+ set_bit(EFI_BOOT, &efi.flags);
+ else
+ clear_bit(EFI_BOOT, &efi.flags);
+
+ early_init_dt_scan(fdt_ptr);
+ early_init_fdt_reserve_self();
+ efi_system_table = efi_get_fdt_params(&data);
+
+ efi_memmap_init_early(&data);
+ memblock_reserve(data.phys_map & PAGE_MASK,
+ PAGE_ALIGN(data.size + (data.phys_map & ~PAGE_MASK)));
+
+ register_addrs_set(smp_group, TO_UNCACHE(0x1fe01000), 16);
+ register_addrs_set(loongson_chipcfg, TO_UNCACHE(0x1fe00180), 16);
+ register_addrs_set(loongson_chiptemp, TO_UNCACHE(0x1fe0019c), 16);
+ register_addrs_set(loongson_freqctrl, TO_UNCACHE(0x1fe001d0), 16);
+}
+
+static int __init init_cpu_fullname(void)
+{
+ int cpu;
+
+ if (loongson_sysconf.cpuname && !strncmp(loongson_sysconf.cpuname, "Loongson", 8)) {
+ for (cpu = 0; cpu < NR_CPUS; cpu++)
+ __cpu_full_name[cpu] = loongson_sysconf.cpuname;
+ }
+ return 0;
+}
+arch_initcall(init_cpu_fullname);
+
+static ssize_t boardinfo_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf,
+ "BIOS Information\n"
+ "Vendor\t\t\t: %s\n"
+ "Version\t\t\t: %s\n"
+ "ROM Size\t\t: %d KB\n"
+ "Release Date\t\t: %s\n\n"
+ "Board Information\n"
+ "Manufacturer\t\t: %s\n"
+ "Board Name\t\t: %s\n"
+ "Family\t\t\t: LOONGSON64\n\n",
+ b_info.bios_vendor, b_info.bios_version,
+ b_info.bios_size, b_info.bios_release_date,
+ b_info.board_vendor, b_info.board_name);
+}
+
+static struct kobj_attribute boardinfo_attr = __ATTR(boardinfo, 0444,
+ boardinfo_show, NULL);
+
+static int __init boardinfo_init(void)
+{
+ struct kobject *loongson_kobj;
+
+ loongson_kobj = kobject_create_and_add("loongson", firmware_kobj);
+
+ return sysfs_create_file(loongson_kobj, &boardinfo_attr.attr);
+}
+late_initcall(boardinfo_init);
diff --git a/arch/loongarch/kernel/fpu.S b/arch/loongarch/kernel/fpu.S
new file mode 100644
index 000000000000..75c6ce0682a2
--- /dev/null
+++ b/arch/loongarch/kernel/fpu.S
@@ -0,0 +1,261 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Author: Lu Zeng <zenglu@loongson.cn>
+ * Pei Huang <huangpei@loongson.cn>
+ * Huacai Chen <chenhuacai@loongson.cn>
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/asm-offsets.h>
+#include <asm/errno.h>
+#include <asm/export.h>
+#include <asm/fpregdef.h>
+#include <asm/loongarch.h>
+#include <asm/regdef.h>
+
+#define FPU_REG_WIDTH 8
+#define LSX_REG_WIDTH 16
+#define LASX_REG_WIDTH 32
+
+ .macro EX insn, reg, src, offs
+.ex\@: \insn \reg, \src, \offs
+ .section __ex_table,"a"
+ PTR .ex\@, fault
+ .previous
+ .endm
+
+ .macro sc_save_fp base
+ EX fst.d $f0, \base, (0 * FPU_REG_WIDTH)
+ EX fst.d $f1, \base, (1 * FPU_REG_WIDTH)
+ EX fst.d $f2, \base, (2 * FPU_REG_WIDTH)
+ EX fst.d $f3, \base, (3 * FPU_REG_WIDTH)
+ EX fst.d $f4, \base, (4 * FPU_REG_WIDTH)
+ EX fst.d $f5, \base, (5 * FPU_REG_WIDTH)
+ EX fst.d $f6, \base, (6 * FPU_REG_WIDTH)
+ EX fst.d $f7, \base, (7 * FPU_REG_WIDTH)
+ EX fst.d $f8, \base, (8 * FPU_REG_WIDTH)
+ EX fst.d $f9, \base, (9 * FPU_REG_WIDTH)
+ EX fst.d $f10, \base, (10 * FPU_REG_WIDTH)
+ EX fst.d $f11, \base, (11 * FPU_REG_WIDTH)
+ EX fst.d $f12, \base, (12 * FPU_REG_WIDTH)
+ EX fst.d $f13, \base, (13 * FPU_REG_WIDTH)
+ EX fst.d $f14, \base, (14 * FPU_REG_WIDTH)
+ EX fst.d $f15, \base, (15 * FPU_REG_WIDTH)
+ EX fst.d $f16, \base, (16 * FPU_REG_WIDTH)
+ EX fst.d $f17, \base, (17 * FPU_REG_WIDTH)
+ EX fst.d $f18, \base, (18 * FPU_REG_WIDTH)
+ EX fst.d $f19, \base, (19 * FPU_REG_WIDTH)
+ EX fst.d $f20, \base, (20 * FPU_REG_WIDTH)
+ EX fst.d $f21, \base, (21 * FPU_REG_WIDTH)
+ EX fst.d $f22, \base, (22 * FPU_REG_WIDTH)
+ EX fst.d $f23, \base, (23 * FPU_REG_WIDTH)
+ EX fst.d $f24, \base, (24 * FPU_REG_WIDTH)
+ EX fst.d $f25, \base, (25 * FPU_REG_WIDTH)
+ EX fst.d $f26, \base, (26 * FPU_REG_WIDTH)
+ EX fst.d $f27, \base, (27 * FPU_REG_WIDTH)
+ EX fst.d $f28, \base, (28 * FPU_REG_WIDTH)
+ EX fst.d $f29, \base, (29 * FPU_REG_WIDTH)
+ EX fst.d $f30, \base, (30 * FPU_REG_WIDTH)
+ EX fst.d $f31, \base, (31 * FPU_REG_WIDTH)
+ .endm
+
+ .macro sc_restore_fp base
+ EX fld.d $f0, \base, (0 * FPU_REG_WIDTH)
+ EX fld.d $f1, \base, (1 * FPU_REG_WIDTH)
+ EX fld.d $f2, \base, (2 * FPU_REG_WIDTH)
+ EX fld.d $f3, \base, (3 * FPU_REG_WIDTH)
+ EX fld.d $f4, \base, (4 * FPU_REG_WIDTH)
+ EX fld.d $f5, \base, (5 * FPU_REG_WIDTH)
+ EX fld.d $f6, \base, (6 * FPU_REG_WIDTH)
+ EX fld.d $f7, \base, (7 * FPU_REG_WIDTH)
+ EX fld.d $f8, \base, (8 * FPU_REG_WIDTH)
+ EX fld.d $f9, \base, (9 * FPU_REG_WIDTH)
+ EX fld.d $f10, \base, (10 * FPU_REG_WIDTH)
+ EX fld.d $f11, \base, (11 * FPU_REG_WIDTH)
+ EX fld.d $f12, \base, (12 * FPU_REG_WIDTH)
+ EX fld.d $f13, \base, (13 * FPU_REG_WIDTH)
+ EX fld.d $f14, \base, (14 * FPU_REG_WIDTH)
+ EX fld.d $f15, \base, (15 * FPU_REG_WIDTH)
+ EX fld.d $f16, \base, (16 * FPU_REG_WIDTH)
+ EX fld.d $f17, \base, (17 * FPU_REG_WIDTH)
+ EX fld.d $f18, \base, (18 * FPU_REG_WIDTH)
+ EX fld.d $f19, \base, (19 * FPU_REG_WIDTH)
+ EX fld.d $f20, \base, (20 * FPU_REG_WIDTH)
+ EX fld.d $f21, \base, (21 * FPU_REG_WIDTH)
+ EX fld.d $f22, \base, (22 * FPU_REG_WIDTH)
+ EX fld.d $f23, \base, (23 * FPU_REG_WIDTH)
+ EX fld.d $f24, \base, (24 * FPU_REG_WIDTH)
+ EX fld.d $f25, \base, (25 * FPU_REG_WIDTH)
+ EX fld.d $f26, \base, (26 * FPU_REG_WIDTH)
+ EX fld.d $f27, \base, (27 * FPU_REG_WIDTH)
+ EX fld.d $f28, \base, (28 * FPU_REG_WIDTH)
+ EX fld.d $f29, \base, (29 * FPU_REG_WIDTH)
+ EX fld.d $f30, \base, (30 * FPU_REG_WIDTH)
+ EX fld.d $f31, \base, (31 * FPU_REG_WIDTH)
+ .endm
+
+ .macro sc_save_fcc base, tmp0, tmp1
+ movcf2gr \tmp0, $fcc0
+ move \tmp1, \tmp0
+ movcf2gr \tmp0, $fcc1
+ bstrins.d \tmp1, \tmp0, 15, 8
+ movcf2gr \tmp0, $fcc2
+ bstrins.d \tmp1, \tmp0, 23, 16
+ movcf2gr \tmp0, $fcc3
+ bstrins.d \tmp1, \tmp0, 31, 24
+ movcf2gr \tmp0, $fcc4
+ bstrins.d \tmp1, \tmp0, 39, 32
+ movcf2gr \tmp0, $fcc5
+ bstrins.d \tmp1, \tmp0, 47, 40
+ movcf2gr \tmp0, $fcc6
+ bstrins.d \tmp1, \tmp0, 55, 48
+ movcf2gr \tmp0, $fcc7
+ bstrins.d \tmp1, \tmp0, 63, 56
+ EX st.d \tmp1, \base, 0
+ .endm
+
+ .macro sc_restore_fcc base, tmp0, tmp1
+ EX ld.d \tmp0, \base, 0
+ bstrpick.d \tmp1, \tmp0, 7, 0
+ movgr2cf $fcc0, \tmp1
+ bstrpick.d \tmp1, \tmp0, 15, 8
+ movgr2cf $fcc1, \tmp1
+ bstrpick.d \tmp1, \tmp0, 23, 16
+ movgr2cf $fcc2, \tmp1
+ bstrpick.d \tmp1, \tmp0, 31, 24
+ movgr2cf $fcc3, \tmp1
+ bstrpick.d \tmp1, \tmp0, 39, 32
+ movgr2cf $fcc4, \tmp1
+ bstrpick.d \tmp1, \tmp0, 47, 40
+ movgr2cf $fcc5, \tmp1
+ bstrpick.d \tmp1, \tmp0, 55, 48
+ movgr2cf $fcc6, \tmp1
+ bstrpick.d \tmp1, \tmp0, 63, 56
+ movgr2cf $fcc7, \tmp1
+ .endm
+
+ .macro sc_save_fcsr base, tmp0
+ movfcsr2gr \tmp0, fcsr0
+ EX st.w \tmp0, \base, 0
+ .endm
+
+ .macro sc_restore_fcsr base, tmp0
+ EX ld.w \tmp0, \base, 0
+ movgr2fcsr fcsr0, \tmp0
+ .endm
+
+ .macro sc_save_vcsr base, tmp0
+ movfcsr2gr \tmp0, vcsr16
+ EX st.w \tmp0, \base, 0
+ .endm
+
+ .macro sc_restore_vcsr base, tmp0
+ EX ld.w \tmp0, \base, 0
+ movgr2fcsr vcsr16, \tmp0
+ .endm
+
+/*
+ * Save a thread's fp context.
+ */
+SYM_FUNC_START(_save_fp)
+ fpu_save_csr a0 t1
+ fpu_save_double a0 t1 # clobbers t1
+ fpu_save_cc a0 t1 t2 # clobbers t1, t2
+ jirl zero, ra, 0
+SYM_FUNC_END(_save_fp)
+EXPORT_SYMBOL(_save_fp)
+
+/*
+ * Restore a thread's fp context.
+ */
+SYM_FUNC_START(_restore_fp)
+ fpu_restore_double a0 t1 # clobbers t1
+ fpu_restore_csr a0 t1
+ fpu_restore_cc a0 t1 t2 # clobbers t1, t2
+ jirl zero, ra, 0
+SYM_FUNC_END(_restore_fp)
+
+/*
+ * Load the FPU with signalling NANS. This bit pattern we're using has
+ * the property that no matter whether considered as single or as double
+ * precision represents signaling NANS.
+ *
+ * The value to initialize fcsr0 to comes in $a0.
+ */
+
+SYM_FUNC_START(_init_fpu)
+ li.w t1, CSR_EUEN_FPEN
+ csrxchg t1, t1, LOONGARCH_CSR_EUEN
+
+ movgr2fcsr fcsr0, a0
+
+ li.w t1, -1 # SNaN
+
+ movgr2fr.d $f0, t1
+ movgr2fr.d $f1, t1
+ movgr2fr.d $f2, t1
+ movgr2fr.d $f3, t1
+ movgr2fr.d $f4, t1
+ movgr2fr.d $f5, t1
+ movgr2fr.d $f6, t1
+ movgr2fr.d $f7, t1
+ movgr2fr.d $f8, t1
+ movgr2fr.d $f9, t1
+ movgr2fr.d $f10, t1
+ movgr2fr.d $f11, t1
+ movgr2fr.d $f12, t1
+ movgr2fr.d $f13, t1
+ movgr2fr.d $f14, t1
+ movgr2fr.d $f15, t1
+ movgr2fr.d $f16, t1
+ movgr2fr.d $f17, t1
+ movgr2fr.d $f18, t1
+ movgr2fr.d $f19, t1
+ movgr2fr.d $f20, t1
+ movgr2fr.d $f21, t1
+ movgr2fr.d $f22, t1
+ movgr2fr.d $f23, t1
+ movgr2fr.d $f24, t1
+ movgr2fr.d $f25, t1
+ movgr2fr.d $f26, t1
+ movgr2fr.d $f27, t1
+ movgr2fr.d $f28, t1
+ movgr2fr.d $f29, t1
+ movgr2fr.d $f30, t1
+ movgr2fr.d $f31, t1
+
+ jirl zero, ra, 0
+SYM_FUNC_END(_init_fpu)
+
+/*
+ * a0: fpregs
+ * a1: fcc
+ * a2: fcsr
+ */
+SYM_FUNC_START(_save_fp_context)
+ sc_save_fcc a1 t1 t2
+ sc_save_fcsr a2 t1
+ sc_save_fp a0
+ li.w a0, 0 # success
+ jirl zero, ra, 0
+SYM_FUNC_END(_save_fp_context)
+
+/*
+ * a0: fpregs
+ * a1: fcc
+ * a2: fcsr
+ */
+SYM_FUNC_START(_restore_fp_context)
+ sc_restore_fp a0
+ sc_restore_fcc a1 t1 t2
+ sc_restore_fcsr a2 t1
+ li.w a0, 0 # success
+ jirl zero, ra, 0
+SYM_FUNC_END(_restore_fp_context)
+
+SYM_FUNC_START(fault)
+ li.w a0, -EFAULT # failure
+ jirl zero, ra, 0
+SYM_FUNC_END(fault)
diff --git a/arch/loongarch/kernel/genex.S b/arch/loongarch/kernel/genex.S
new file mode 100644
index 000000000000..93496852b3cc
--- /dev/null
+++ b/arch/loongarch/kernel/genex.S
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ *
+ * Derived from MIPS:
+ * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2002, 2007 Maciej W. Rozycki
+ * Copyright (C) 2001, 2012 MIPS Technologies, Inc. All rights reserved.
+ */
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/loongarch.h>
+#include <asm/regdef.h>
+#include <asm/fpregdef.h>
+#include <asm/stackframe.h>
+#include <asm/thread_info.h>
+
+ .align 5
+SYM_FUNC_START(__arch_cpu_idle)
+ /* start of rollback region */
+ LONG_L t0, tp, TI_FLAGS
+ nop
+ andi t0, t0, _TIF_NEED_RESCHED
+ bnez t0, 1f
+ nop
+ nop
+ nop
+ idle 0
+ /* end of rollback region */
+1: jirl zero, ra, 0
+SYM_FUNC_END(__arch_cpu_idle)
+
+SYM_FUNC_START(handle_vint)
+ BACKUP_T0T1
+ SAVE_ALL
+ la.abs t1, __arch_cpu_idle
+ LONG_L t0, sp, PT_ERA
+ /* 32 byte rollback region */
+ ori t0, t0, 0x1f
+ xori t0, t0, 0x1f
+ bne t0, t1, 1f
+ LONG_S t0, sp, PT_ERA
+1: move a0, sp
+ move a1, sp
+ la.abs t0, do_vint
+ jirl ra, t0, 0
+ RESTORE_ALL_AND_RET
+SYM_FUNC_END(handle_vint)
+
+SYM_FUNC_START(except_vec_cex)
+ b cache_parity_error
+SYM_FUNC_END(except_vec_cex)
+
+ .macro build_prep_badv
+ csrrd t0, LOONGARCH_CSR_BADV
+ PTR_S t0, sp, PT_BVADDR
+ .endm
+
+ .macro build_prep_fcsr
+ movfcsr2gr a1, fcsr0
+ .endm
+
+ .macro build_prep_none
+ .endm
+
+ .macro BUILD_HANDLER exception handler prep
+ .align 5
+ SYM_FUNC_START(handle_\exception)
+ BACKUP_T0T1
+ SAVE_ALL
+ build_prep_\prep
+ move a0, sp
+ la.abs t0, do_\handler
+ jirl ra, t0, 0
+ RESTORE_ALL_AND_RET
+ SYM_FUNC_END(handle_\exception)
+ .endm
+
+ BUILD_HANDLER ade ade badv
+ BUILD_HANDLER ale ale badv
+ BUILD_HANDLER bp bp none
+ BUILD_HANDLER fpe fpe fcsr
+ BUILD_HANDLER fpu fpu none
+ BUILD_HANDLER lsx lsx none
+ BUILD_HANDLER lasx lasx none
+ BUILD_HANDLER lbt lbt none
+ BUILD_HANDLER ri ri none
+ BUILD_HANDLER watch watch none
+ BUILD_HANDLER reserved reserved none /* others */
+
+SYM_FUNC_START(handle_sys)
+ la.abs t0, handle_syscall
+ jirl zero, t0, 0
+SYM_FUNC_END(handle_sys)
diff --git a/arch/loongarch/kernel/head.S b/arch/loongarch/kernel/head.S
new file mode 100644
index 000000000000..e596dfcd924b
--- /dev/null
+++ b/arch/loongarch/kernel/head.S
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/init.h>
+#include <linux/threads.h>
+
+#include <asm/addrspace.h>
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/regdef.h>
+#include <asm/loongarch.h>
+#include <asm/stackframe.h>
+
+ __REF
+
+SYM_ENTRY(_stext, SYM_L_GLOBAL, SYM_A_NONE)
+
+SYM_CODE_START(kernel_entry) # kernel entry point
+
+ /* Config direct window and set PG */
+ li.d t0, CSR_DMW0_INIT # UC, PLV0, 0x8000 xxxx xxxx xxxx
+ csrwr t0, LOONGARCH_CSR_DMWIN0
+ li.d t0, CSR_DMW1_INIT # CA, PLV0, 0x9000 xxxx xxxx xxxx
+ csrwr t0, LOONGARCH_CSR_DMWIN1
+ /* Enable PG */
+ li.w t0, 0xb0 # PLV=0, IE=0, PG=1
+ csrwr t0, LOONGARCH_CSR_CRMD
+ li.w t0, 0x04 # PLV=0, PIE=1, PWE=0
+ csrwr t0, LOONGARCH_CSR_PRMD
+ li.w t0, 0x00 # FPE=0, SXE=0, ASXE=0, BTE=0
+ csrwr t0, LOONGARCH_CSR_EUEN
+
+ /* We might not get launched at the address the kernel is linked to,
+ so we jump there. */
+ la.abs t0, 0f
+ jirl zero, t0, 0
+0:
+ la t0, __bss_start # clear .bss
+ st.d zero, t0, 0
+ la t1, __bss_stop - LONGSIZE
+1:
+ addi.d t0, t0, LONGSIZE
+ st.d zero, t0, 0
+ bne t0, t1, 1b
+
+ la t0, fw_arg0
+ st.d a0, t0, 0 # firmware arguments
+ la t0, fw_arg1
+ st.d a1, t0, 0
+
+ /* KSave3 used for percpu base, initialized as 0 */
+ csrwr zero, PERCPU_BASE_KS
+ /* GPR21 used for percpu base (runtime), initialized as 0 */
+ or u0, zero, zero
+
+ la tp, init_thread_union
+ /* Set the SP after an empty pt_regs. */
+ PTR_LI sp, (_THREAD_SIZE - 32 - PT_SIZE)
+ PTR_ADD sp, sp, tp
+ set_saved_sp sp, t0, t1
+ PTR_ADDI sp, sp, -4 * SZREG # init stack pointer
+
+ bl start_kernel
+
+SYM_CODE_END(kernel_entry)
+
+#ifdef CONFIG_SMP
+
+/*
+ * SMP slave cpus entry point. Board specific code for bootstrap calls this
+ * function after setting up the stack and tp registers.
+ */
+SYM_CODE_START(smpboot_entry)
+ li.d t0, CSR_DMW0_INIT # UC, PLV0
+ csrwr t0, LOONGARCH_CSR_DMWIN0
+ li.d t0, CSR_DMW1_INIT # CA, PLV0
+ csrwr t0, LOONGARCH_CSR_DMWIN1
+ li.w t0, 0xb0 # PLV=0, IE=0, PG=1
+ csrwr t0, LOONGARCH_CSR_CRMD
+ li.w t0, 0x04 # PLV=0, PIE=1, PWE=0
+ csrwr t0, LOONGARCH_CSR_PRMD
+ li.w t0, 0x00 # FPE=0, SXE=0, ASXE=0, BTE=0
+ csrwr t0, LOONGARCH_CSR_EUEN
+
+ la.abs t0, cpuboot_data
+ ld.d sp, t0, CPU_BOOT_STACK
+ ld.d tp, t0, CPU_BOOT_TINFO
+
+ la.abs t0, 0f
+ jirl zero, t0, 0
+0:
+ bl start_secondary
+SYM_CODE_END(smpboot_entry)
+
+#endif /* CONFIG_SMP */
+
+SYM_ENTRY(kernel_entry_end, SYM_L_GLOBAL, SYM_A_NONE)
diff --git a/arch/loongarch/kernel/idle.c b/arch/loongarch/kernel/idle.c
new file mode 100644
index 000000000000..1a65d0527d25
--- /dev/null
+++ b/arch/loongarch/kernel/idle.c
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * LoongArch idle loop support.
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/cpu.h>
+#include <linux/irqflags.h>
+#include <asm/cpu.h>
+#include <asm/idle.h>
+
+void __cpuidle arch_cpu_idle(void)
+{
+ raw_local_irq_enable();
+ __arch_cpu_idle(); /* idle instruction needs irq enabled */
+}
diff --git a/arch/loongarch/kernel/inst.c b/arch/loongarch/kernel/inst.c
new file mode 100644
index 000000000000..b1df0ec34bd1
--- /dev/null
+++ b/arch/loongarch/kernel/inst.c
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <asm/inst.h>
+
+u32 larch_insn_gen_lu32id(enum loongarch_gpr rd, int imm)
+{
+ union loongarch_instruction insn;
+
+ insn.reg1i20_format.opcode = lu32id_op;
+ insn.reg1i20_format.rd = rd;
+ insn.reg1i20_format.immediate = imm;
+
+ return insn.word;
+}
+
+u32 larch_insn_gen_lu52id(enum loongarch_gpr rd, enum loongarch_gpr rj, int imm)
+{
+ union loongarch_instruction insn;
+
+ insn.reg2i12_format.opcode = lu52id_op;
+ insn.reg2i12_format.rd = rd;
+ insn.reg2i12_format.rj = rj;
+ insn.reg2i12_format.immediate = imm;
+
+ return insn.word;
+}
+
+u32 larch_insn_gen_jirl(enum loongarch_gpr rd, enum loongarch_gpr rj, unsigned long pc, unsigned long dest)
+{
+ union loongarch_instruction insn;
+
+ insn.reg2i16_format.opcode = jirl_op;
+ insn.reg2i16_format.rd = rd;
+ insn.reg2i16_format.rj = rj;
+ insn.reg2i16_format.immediate = (dest - pc) >> 2;
+
+ return insn.word;
+}
diff --git a/arch/loongarch/kernel/io.c b/arch/loongarch/kernel/io.c
new file mode 100644
index 000000000000..cb85bda5a6ad
--- /dev/null
+++ b/arch/loongarch/kernel/io.c
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/export.h>
+#include <linux/types.h>
+#include <linux/io.h>
+
+/*
+ * Copy data from IO memory space to "real" memory space.
+ */
+void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t count)
+{
+ while (count && !IS_ALIGNED((unsigned long)from, 8)) {
+ *(u8 *)to = __raw_readb(from);
+ from++;
+ to++;
+ count--;
+ }
+
+ while (count >= 8) {
+ *(u64 *)to = __raw_readq(from);
+ from += 8;
+ to += 8;
+ count -= 8;
+ }
+
+ while (count) {
+ *(u8 *)to = __raw_readb(from);
+ from++;
+ to++;
+ count--;
+ }
+}
+EXPORT_SYMBOL(__memcpy_fromio);
+
+/*
+ * Copy data from "real" memory space to IO memory space.
+ */
+void __memcpy_toio(volatile void __iomem *to, const void *from, size_t count)
+{
+ while (count && !IS_ALIGNED((unsigned long)to, 8)) {
+ __raw_writeb(*(u8 *)from, to);
+ from++;
+ to++;
+ count--;
+ }
+
+ while (count >= 8) {
+ __raw_writeq(*(u64 *)from, to);
+ from += 8;
+ to += 8;
+ count -= 8;
+ }
+
+ while (count) {
+ __raw_writeb(*(u8 *)from, to);
+ from++;
+ to++;
+ count--;
+ }
+}
+EXPORT_SYMBOL(__memcpy_toio);
+
+/*
+ * "memset" on IO memory space.
+ */
+void __memset_io(volatile void __iomem *dst, int c, size_t count)
+{
+ u64 qc = (u8)c;
+
+ qc |= qc << 8;
+ qc |= qc << 16;
+ qc |= qc << 32;
+
+ while (count && !IS_ALIGNED((unsigned long)dst, 8)) {
+ __raw_writeb(c, dst);
+ dst++;
+ count--;
+ }
+
+ while (count >= 8) {
+ __raw_writeq(qc, dst);
+ dst += 8;
+ count -= 8;
+ }
+
+ while (count) {
+ __raw_writeb(c, dst);
+ dst++;
+ count--;
+ }
+}
+EXPORT_SYMBOL(__memset_io);
diff --git a/arch/loongarch/kernel/irq.c b/arch/loongarch/kernel/irq.c
new file mode 100644
index 000000000000..4b671d305ede
--- /dev/null
+++ b/arch/loongarch/kernel/irq.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/kernel.h>
+#include <linux/acpi.h>
+#include <linux/atomic.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irqchip.h>
+#include <linux/kernel_stat.h>
+#include <linux/proc_fs.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/kallsyms.h>
+#include <linux/uaccess.h>
+
+#include <asm/irq.h>
+#include <asm/loongson.h>
+#include <asm/setup.h>
+
+DEFINE_PER_CPU(unsigned long, irq_stack);
+
+struct irq_domain *cpu_domain;
+struct irq_domain *liointc_domain;
+struct irq_domain *pch_lpc_domain;
+struct irq_domain *pch_msi_domain[MAX_IO_PICS];
+struct irq_domain *pch_pic_domain[MAX_IO_PICS];
+
+/*
+ * 'what should we do if we get a hw irq event on an illegal vector'.
+ * each architecture has to answer this themselves.
+ */
+void ack_bad_irq(unsigned int irq)
+{
+ pr_warn("Unexpected IRQ # %d\n", irq);
+}
+
+atomic_t irq_err_count;
+
+asmlinkage void spurious_interrupt(void)
+{
+ atomic_inc(&irq_err_count);
+}
+
+int arch_show_interrupts(struct seq_file *p, int prec)
+{
+#ifdef CONFIG_SMP
+ show_ipi_list(p, prec);
+#endif
+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
+ return 0;
+}
+
+void __init init_IRQ(void)
+{
+ int i, r, ipi_irq;
+ static int ipi_dummy_dev;
+ unsigned int order = get_order(IRQ_STACK_SIZE);
+ struct page *page;
+
+ clear_csr_ecfg(ECFG0_IM);
+ clear_csr_estat(ESTATF_IP);
+
+ irqchip_init();
+#ifdef CONFIG_SMP
+ ipi_irq = EXCCODE_IPI - EXCCODE_INT_START;
+ irq_set_percpu_devid(ipi_irq);
+ r = request_percpu_irq(ipi_irq, loongson3_ipi_interrupt, "IPI", &ipi_dummy_dev);
+ if (r < 0)
+ panic("IPI IRQ request failed\n");
+#endif
+
+ for (i = 0; i < NR_IRQS; i++)
+ irq_set_noprobe(i);
+
+ for_each_possible_cpu(i) {
+ page = alloc_pages_node(cpu_to_node(i), GFP_KERNEL, order);
+
+ per_cpu(irq_stack, i) = (unsigned long)page_address(page);
+ pr_debug("CPU%d IRQ stack at 0x%lx - 0x%lx\n", i,
+ per_cpu(irq_stack, i), per_cpu(irq_stack, i) + IRQ_STACK_SIZE);
+ }
+
+ set_csr_ecfg(ECFGF_IP0 | ECFGF_IP1 | ECFGF_IP2 | ECFGF_IPI | ECFGF_PMC);
+}
diff --git a/arch/loongarch/kernel/mem.c b/arch/loongarch/kernel/mem.c
new file mode 100644
index 000000000000..7423361b0ebc
--- /dev/null
+++ b/arch/loongarch/kernel/mem.c
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/efi.h>
+#include <linux/initrd.h>
+#include <linux/memblock.h>
+
+#include <asm/bootinfo.h>
+#include <asm/loongson.h>
+#include <asm/sections.h>
+
+void __init memblock_init(void)
+{
+ u32 mem_type;
+ u64 mem_start, mem_end, mem_size;
+ efi_memory_desc_t *md;
+
+ /* Parse memory information */
+ for_each_efi_memory_desc(md) {
+ mem_type = md->type;
+ mem_start = md->phys_addr;
+ mem_size = md->num_pages << EFI_PAGE_SHIFT;
+ mem_end = mem_start + mem_size;
+
+ switch (mem_type) {
+ case EFI_LOADER_CODE:
+ case EFI_LOADER_DATA:
+ case EFI_BOOT_SERVICES_CODE:
+ case EFI_BOOT_SERVICES_DATA:
+ case EFI_PERSISTENT_MEMORY:
+ case EFI_CONVENTIONAL_MEMORY:
+ memblock_add(mem_start, mem_size);
+ if (max_low_pfn < (mem_end >> PAGE_SHIFT))
+ max_low_pfn = mem_end >> PAGE_SHIFT;
+ break;
+ case EFI_PAL_CODE:
+ case EFI_UNUSABLE_MEMORY:
+ case EFI_ACPI_RECLAIM_MEMORY:
+ memblock_add(mem_start, mem_size);
+ fallthrough;
+ case EFI_RESERVED_TYPE:
+ case EFI_RUNTIME_SERVICES_CODE:
+ case EFI_RUNTIME_SERVICES_DATA:
+ case EFI_MEMORY_MAPPED_IO:
+ case EFI_MEMORY_MAPPED_IO_PORT_SPACE:
+ memblock_reserve(mem_start, mem_size);
+ break;
+ }
+ }
+
+ memblock_set_current_limit(PFN_PHYS(max_low_pfn));
+ memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
+
+ /* Reserve the first 2MB */
+ memblock_reserve(PHYS_OFFSET, 0x200000);
+
+ /* Reserve the kernel text/data/bss */
+ memblock_reserve(__pa_symbol(&_text),
+ __pa_symbol(&_end) - __pa_symbol(&_text));
+
+ /* Reserve the initrd */
+ reserve_initrd_mem();
+}
diff --git a/arch/loongarch/kernel/module-sections.c b/arch/loongarch/kernel/module-sections.c
new file mode 100644
index 000000000000..6d498288977d
--- /dev/null
+++ b/arch/loongarch/kernel/module-sections.c
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#include <linux/elf.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+Elf_Addr module_emit_plt_entry(struct module *mod, unsigned long val)
+{
+ int nr;
+ struct mod_section *plt_sec = &mod->arch.plt;
+ struct mod_section *plt_idx_sec = &mod->arch.plt_idx;
+ struct plt_entry *plt = get_plt_entry(val, plt_sec, plt_idx_sec);
+ struct plt_idx_entry *plt_idx;
+
+ if (plt)
+ return (Elf_Addr)plt;
+
+ nr = plt_sec->num_entries;
+
+ /* There is no duplicate entry, create a new one */
+ plt = (struct plt_entry *)plt_sec->shdr->sh_addr;
+ plt[nr] = emit_plt_entry(val);
+ plt_idx = (struct plt_idx_entry *)plt_idx_sec->shdr->sh_addr;
+ plt_idx[nr] = emit_plt_idx_entry(val);
+
+ plt_sec->num_entries++;
+ plt_idx_sec->num_entries++;
+ BUG_ON(plt_sec->num_entries > plt_sec->max_entries);
+
+ return (Elf_Addr)&plt[nr];
+}
+
+static int is_rela_equal(const Elf_Rela *x, const Elf_Rela *y)
+{
+ return x->r_info == y->r_info && x->r_addend == y->r_addend;
+}
+
+static bool duplicate_rela(const Elf_Rela *rela, int idx)
+{
+ int i;
+
+ for (i = 0; i < idx; i++) {
+ if (is_rela_equal(&rela[i], &rela[idx]))
+ return true;
+ }
+
+ return false;
+}
+
+static void count_max_entries(Elf_Rela *relas, int num, unsigned int *plts)
+{
+ unsigned int i, type;
+
+ for (i = 0; i < num; i++) {
+ type = ELF_R_TYPE(relas[i].r_info);
+ if (type == R_LARCH_SOP_PUSH_PLT_PCREL) {
+ if (!duplicate_rela(relas, i))
+ (*plts)++;
+ }
+ }
+}
+
+int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
+ char *secstrings, struct module *mod)
+{
+ unsigned int i, num_plts = 0;
+
+ /*
+ * Find the empty .plt sections.
+ */
+ for (i = 0; i < ehdr->e_shnum; i++) {
+ if (!strcmp(secstrings + sechdrs[i].sh_name, ".plt"))
+ mod->arch.plt.shdr = sechdrs + i;
+ else if (!strcmp(secstrings + sechdrs[i].sh_name, ".plt.idx"))
+ mod->arch.plt_idx.shdr = sechdrs + i;
+ }
+
+ if (!mod->arch.plt.shdr) {
+ pr_err("%s: module PLT section(s) missing\n", mod->name);
+ return -ENOEXEC;
+ }
+ if (!mod->arch.plt_idx.shdr) {
+ pr_err("%s: module PLT.IDX section(s) missing\n", mod->name);
+ return -ENOEXEC;
+ }
+
+ /* Calculate the maxinum number of entries */
+ for (i = 0; i < ehdr->e_shnum; i++) {
+ int num_rela = sechdrs[i].sh_size / sizeof(Elf_Rela);
+ Elf_Rela *relas = (void *)ehdr + sechdrs[i].sh_offset;
+ Elf_Shdr *dst_sec = sechdrs + sechdrs[i].sh_info;
+
+ if (sechdrs[i].sh_type != SHT_RELA)
+ continue;
+
+ /* ignore relocations that operate on non-exec sections */
+ if (!(dst_sec->sh_flags & SHF_EXECINSTR))
+ continue;
+
+ count_max_entries(relas, num_rela, &num_plts);
+ }
+
+ mod->arch.plt.shdr->sh_type = SHT_NOBITS;
+ mod->arch.plt.shdr->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
+ mod->arch.plt.shdr->sh_addralign = L1_CACHE_BYTES;
+ mod->arch.plt.shdr->sh_size = (num_plts + 1) * sizeof(struct plt_entry);
+ mod->arch.plt.num_entries = 0;
+ mod->arch.plt.max_entries = num_plts;
+
+ mod->arch.plt_idx.shdr->sh_type = SHT_NOBITS;
+ mod->arch.plt_idx.shdr->sh_flags = SHF_ALLOC;
+ mod->arch.plt_idx.shdr->sh_addralign = L1_CACHE_BYTES;
+ mod->arch.plt_idx.shdr->sh_size = (num_plts + 1) * sizeof(struct plt_idx_entry);
+ mod->arch.plt_idx.num_entries = 0;
+ mod->arch.plt_idx.max_entries = num_plts;
+
+ return 0;
+}
diff --git a/arch/loongarch/kernel/module.c b/arch/loongarch/kernel/module.c
new file mode 100644
index 000000000000..638427ff0d51
--- /dev/null
+++ b/arch/loongarch/kernel/module.c
@@ -0,0 +1,375 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Author: Hanlu Li <lihanlu@loongson.cn>
+ * Huacai Chen <chenhuacai@loongson.cn>
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#define pr_fmt(fmt) "kmod: " fmt
+
+#include <linux/moduleloader.h>
+#include <linux/elf.h>
+#include <linux/mm.h>
+#include <linux/numa.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+
+static inline bool signed_imm_check(long val, unsigned int bit)
+{
+ return -(1L << (bit - 1)) <= val && val < (1L << (bit - 1));
+}
+
+static inline bool unsigned_imm_check(unsigned long val, unsigned int bit)
+{
+ return val < (1UL << bit);
+}
+
+static int rela_stack_push(s64 stack_value, s64 *rela_stack, size_t *rela_stack_top)
+{
+ if (*rela_stack_top >= RELA_STACK_DEPTH)
+ return -ENOEXEC;
+
+ rela_stack[(*rela_stack_top)++] = stack_value;
+ pr_debug("%s stack_value = 0x%llx\n", __func__, stack_value);
+
+ return 0;
+}
+
+static int rela_stack_pop(s64 *stack_value, s64 *rela_stack, size_t *rela_stack_top)
+{
+ if (*rela_stack_top == 0)
+ return -ENOEXEC;
+
+ *stack_value = rela_stack[--(*rela_stack_top)];
+ pr_debug("%s stack_value = 0x%llx\n", __func__, *stack_value);
+
+ return 0;
+}
+
+static int apply_r_larch_none(struct module *mod, u32 *location, Elf_Addr v,
+ s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+{
+ return 0;
+}
+
+static int apply_r_larch_error(struct module *me, u32 *location, Elf_Addr v,
+ s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+{
+ pr_err("%s: Unsupport relocation type %u, please add its support.\n", me->name, type);
+ return -EINVAL;
+}
+
+static int apply_r_larch_32(struct module *mod, u32 *location, Elf_Addr v,
+ s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+{
+ *location = v;
+ return 0;
+}
+
+static int apply_r_larch_64(struct module *mod, u32 *location, Elf_Addr v,
+ s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+{
+ *(Elf_Addr *)location = v;
+ return 0;
+}
+
+static int apply_r_larch_sop_push_pcrel(struct module *mod, u32 *location, Elf_Addr v,
+ s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+{
+ return rela_stack_push(v - (u64)location, rela_stack, rela_stack_top);
+}
+
+static int apply_r_larch_sop_push_absolute(struct module *mod, u32 *location, Elf_Addr v,
+ s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+{
+ return rela_stack_push(v, rela_stack, rela_stack_top);
+}
+
+static int apply_r_larch_sop_push_dup(struct module *mod, u32 *location, Elf_Addr v,
+ s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+{
+ int err = 0;
+ s64 opr1;
+
+ err = rela_stack_pop(&opr1, rela_stack, rela_stack_top);
+ if (err)
+ return err;
+ err = rela_stack_push(opr1, rela_stack, rela_stack_top);
+ if (err)
+ return err;
+ err = rela_stack_push(opr1, rela_stack, rela_stack_top);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int apply_r_larch_sop_push_plt_pcrel(struct module *mod, u32 *location, Elf_Addr v,
+ s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+{
+ ptrdiff_t offset = (void *)v - (void *)location;
+
+ if (offset >= SZ_128M)
+ v = module_emit_plt_entry(mod, v);
+
+ if (offset < -SZ_128M)
+ v = module_emit_plt_entry(mod, v);
+
+ return apply_r_larch_sop_push_pcrel(mod, location, v, rela_stack, rela_stack_top, type);
+}
+
+static int apply_r_larch_sop(struct module *mod, u32 *location, Elf_Addr v,
+ s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+{
+ int err = 0;
+ s64 opr1, opr2, opr3;
+
+ if (type == R_LARCH_SOP_IF_ELSE) {
+ err = rela_stack_pop(&opr3, rela_stack, rela_stack_top);
+ if (err)
+ return err;
+ }
+
+ err = rela_stack_pop(&opr2, rela_stack, rela_stack_top);
+ if (err)
+ return err;
+ err = rela_stack_pop(&opr1, rela_stack, rela_stack_top);
+ if (err)
+ return err;
+
+ switch (type) {
+ case R_LARCH_SOP_AND:
+ err = rela_stack_push(opr1 & opr2, rela_stack, rela_stack_top);
+ break;
+ case R_LARCH_SOP_ADD:
+ err = rela_stack_push(opr1 + opr2, rela_stack, rela_stack_top);
+ break;
+ case R_LARCH_SOP_SUB:
+ err = rela_stack_push(opr1 - opr2, rela_stack, rela_stack_top);
+ break;
+ case R_LARCH_SOP_SL:
+ err = rela_stack_push(opr1 << opr2, rela_stack, rela_stack_top);
+ break;
+ case R_LARCH_SOP_SR:
+ err = rela_stack_push(opr1 >> opr2, rela_stack, rela_stack_top);
+ break;
+ case R_LARCH_SOP_IF_ELSE:
+ err = rela_stack_push(opr1 ? opr2 : opr3, rela_stack, rela_stack_top);
+ break;
+ default:
+ pr_err("%s: Unsupport relocation type %u\n", mod->name, type);
+ return -EINVAL;
+ }
+
+ return err;
+}
+
+static int apply_r_larch_sop_imm_field(struct module *mod, u32 *location, Elf_Addr v,
+ s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+{
+ int err = 0;
+ s64 opr1;
+ union loongarch_instruction *insn = (union loongarch_instruction *)location;
+
+ err = rela_stack_pop(&opr1, rela_stack, rela_stack_top);
+ if (err)
+ return err;
+
+ switch (type) {
+ case R_LARCH_SOP_POP_32_U_10_12:
+ if (!unsigned_imm_check(opr1, 12))
+ goto overflow;
+
+ /* (*(uint32_t *) PC) [21 ... 10] = opr [11 ... 0] */
+ insn->reg2i12_format.immediate = opr1 & 0xfff;
+ return 0;
+ case R_LARCH_SOP_POP_32_S_10_12:
+ if (!signed_imm_check(opr1, 12))
+ goto overflow;
+
+ insn->reg2i12_format.immediate = opr1 & 0xfff;
+ return 0;
+ case R_LARCH_SOP_POP_32_S_10_16:
+ if (!signed_imm_check(opr1, 16))
+ goto overflow;
+
+ insn->reg2i16_format.immediate = opr1 & 0xffff;
+ return 0;
+ case R_LARCH_SOP_POP_32_S_10_16_S2:
+ if (opr1 % 4)
+ goto unaligned;
+
+ if (!signed_imm_check(opr1, 18))
+ goto overflow;
+
+ insn->reg2i16_format.immediate = (opr1 >> 2) & 0xffff;
+ return 0;
+ case R_LARCH_SOP_POP_32_S_5_20:
+ if (!signed_imm_check(opr1, 20))
+ goto overflow;
+
+ insn->reg1i20_format.immediate = (opr1) & 0xfffff;
+ return 0;
+ case R_LARCH_SOP_POP_32_S_0_5_10_16_S2:
+ if (opr1 % 4)
+ goto unaligned;
+
+ if (!signed_imm_check(opr1, 23))
+ goto overflow;
+
+ opr1 >>= 2;
+ insn->reg1i21_format.immediate_l = opr1 & 0xffff;
+ insn->reg1i21_format.immediate_h = (opr1 >> 16) & 0x1f;
+ return 0;
+ case R_LARCH_SOP_POP_32_S_0_10_10_16_S2:
+ if (opr1 % 4)
+ goto unaligned;
+
+ if (!signed_imm_check(opr1, 28))
+ goto overflow;
+
+ opr1 >>= 2;
+ insn->reg0i26_format.immediate_l = opr1 & 0xffff;
+ insn->reg0i26_format.immediate_h = (opr1 >> 16) & 0x3ff;
+ return 0;
+ case R_LARCH_SOP_POP_32_U:
+ if (!unsigned_imm_check(opr1, 32))
+ goto overflow;
+
+ /* (*(uint32_t *) PC) = opr */
+ *location = (u32)opr1;
+ return 0;
+ default:
+ pr_err("%s: Unsupport relocation type %u\n", mod->name, type);
+ return -EINVAL;
+ }
+
+overflow:
+ pr_err("module %s: opr1 = 0x%llx overflow! dangerous %s (%u) relocation\n",
+ mod->name, opr1, __func__, type);
+ return -ENOEXEC;
+
+unaligned:
+ pr_err("module %s: opr1 = 0x%llx unaligned! dangerous %s (%u) relocation\n",
+ mod->name, opr1, __func__, type);
+ return -ENOEXEC;
+}
+
+static int apply_r_larch_add_sub(struct module *mod, u32 *location, Elf_Addr v,
+ s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+{
+ switch (type) {
+ case R_LARCH_ADD32:
+ *(s32 *)location += v;
+ return 0;
+ case R_LARCH_ADD64:
+ *(s64 *)location += v;
+ return 0;
+ case R_LARCH_SUB32:
+ *(s32 *)location -= v;
+ return 0;
+ case R_LARCH_SUB64:
+ *(s64 *)location -= v;
+ return 0;
+ default:
+ pr_err("%s: Unsupport relocation type %u\n", mod->name, type);
+ return -EINVAL;
+ }
+}
+
+/*
+ * reloc_handlers_rela() - Apply a particular relocation to a module
+ * @mod: the module to apply the reloc to
+ * @location: the address at which the reloc is to be applied
+ * @v: the value of the reloc, with addend for RELA-style
+ * @rela_stack: the stack used for store relocation info, LOCAL to THIS module
+ * @rela_stac_top: where the stack operation(pop/push) applies to
+ *
+ * Return: 0 upon success, else -ERRNO
+ */
+typedef int (*reloc_rela_handler)(struct module *mod, u32 *location, Elf_Addr v,
+ s64 *rela_stack, size_t *rela_stack_top, unsigned int type);
+
+/* The handlers for known reloc types */
+static reloc_rela_handler reloc_rela_handlers[] = {
+ [R_LARCH_NONE ... R_LARCH_SUB64] = apply_r_larch_error,
+
+ [R_LARCH_NONE] = apply_r_larch_none,
+ [R_LARCH_32] = apply_r_larch_32,
+ [R_LARCH_64] = apply_r_larch_64,
+ [R_LARCH_MARK_LA] = apply_r_larch_none,
+ [R_LARCH_MARK_PCREL] = apply_r_larch_none,
+ [R_LARCH_SOP_PUSH_PCREL] = apply_r_larch_sop_push_pcrel,
+ [R_LARCH_SOP_PUSH_ABSOLUTE] = apply_r_larch_sop_push_absolute,
+ [R_LARCH_SOP_PUSH_DUP] = apply_r_larch_sop_push_dup,
+ [R_LARCH_SOP_PUSH_PLT_PCREL] = apply_r_larch_sop_push_plt_pcrel,
+ [R_LARCH_SOP_SUB ... R_LARCH_SOP_IF_ELSE] = apply_r_larch_sop,
+ [R_LARCH_SOP_POP_32_S_10_5 ... R_LARCH_SOP_POP_32_U] = apply_r_larch_sop_imm_field,
+ [R_LARCH_ADD32 ... R_LARCH_SUB64] = apply_r_larch_add_sub,
+};
+
+int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
+ unsigned int symindex, unsigned int relsec,
+ struct module *mod)
+{
+ int i, err;
+ unsigned int type;
+ s64 rela_stack[RELA_STACK_DEPTH];
+ size_t rela_stack_top = 0;
+ reloc_rela_handler handler;
+ void *location;
+ Elf_Addr v;
+ Elf_Sym *sym;
+ Elf_Rela *rel = (void *) sechdrs[relsec].sh_addr;
+
+ pr_debug("%s: Applying relocate section %u to %u\n", __func__, relsec,
+ sechdrs[relsec].sh_info);
+
+ rela_stack_top = 0;
+ for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
+ /* This is where to make the change */
+ location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
+ /* This is the symbol it is referring to */
+ sym = (Elf_Sym *)sechdrs[symindex].sh_addr + ELF_R_SYM(rel[i].r_info);
+ if (IS_ERR_VALUE(sym->st_value)) {
+ /* Ignore unresolved weak symbol */
+ if (ELF_ST_BIND(sym->st_info) == STB_WEAK)
+ continue;
+ pr_warn("%s: Unknown symbol %s\n", mod->name, strtab + sym->st_name);
+ return -ENOENT;
+ }
+
+ type = ELF_R_TYPE(rel[i].r_info);
+
+ if (type < ARRAY_SIZE(reloc_rela_handlers))
+ handler = reloc_rela_handlers[type];
+ else
+ handler = NULL;
+
+ if (!handler) {
+ pr_err("%s: Unknown relocation type %u\n", mod->name, type);
+ return -EINVAL;
+ }
+
+ pr_debug("type %d st_value %llx r_addend %llx loc %llx\n",
+ (int)ELF_R_TYPE(rel[i].r_info),
+ sym->st_value, rel[i].r_addend, (u64)location);
+
+ v = sym->st_value + rel[i].r_addend;
+ err = handler(mod, location, v, rela_stack, &rela_stack_top, type);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+void *module_alloc(unsigned long size)
+{
+ return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
+ GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE, __builtin_return_address(0));
+}
diff --git a/arch/loongarch/kernel/numa.c b/arch/loongarch/kernel/numa.c
new file mode 100644
index 000000000000..a76f547a5aa3
--- /dev/null
+++ b/arch/loongarch/kernel/numa.c
@@ -0,0 +1,466 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Author: Xiang Gao <gaoxiang@loongson.cn>
+ * Huacai Chen <chenhuacai@loongson.cn>
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/mmzone.h>
+#include <linux/export.h>
+#include <linux/nodemask.h>
+#include <linux/swap.h>
+#include <linux/memblock.h>
+#include <linux/pfn.h>
+#include <linux/acpi.h>
+#include <linux/efi.h>
+#include <linux/irq.h>
+#include <linux/pci.h>
+#include <asm/bootinfo.h>
+#include <asm/loongson.h>
+#include <asm/numa.h>
+#include <asm/page.h>
+#include <asm/pgalloc.h>
+#include <asm/sections.h>
+#include <asm/time.h>
+
+int numa_off;
+struct pglist_data *node_data[MAX_NUMNODES];
+unsigned char node_distances[MAX_NUMNODES][MAX_NUMNODES];
+
+EXPORT_SYMBOL(node_data);
+EXPORT_SYMBOL(node_distances);
+
+static struct numa_meminfo numa_meminfo;
+cpumask_t cpus_on_node[MAX_NUMNODES];
+cpumask_t phys_cpus_on_node[MAX_NUMNODES];
+EXPORT_SYMBOL(cpus_on_node);
+
+/*
+ * apicid, cpu, node mappings
+ */
+s16 __cpuid_to_node[CONFIG_NR_CPUS] = {
+ [0 ... CONFIG_NR_CPUS - 1] = NUMA_NO_NODE
+};
+EXPORT_SYMBOL(__cpuid_to_node);
+
+nodemask_t numa_nodes_parsed __initdata;
+
+#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
+unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
+EXPORT_SYMBOL(__per_cpu_offset);
+
+static int __init pcpu_cpu_to_node(int cpu)
+{
+ return early_cpu_to_node(cpu);
+}
+
+static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
+{
+ if (early_cpu_to_node(from) == early_cpu_to_node(to))
+ return LOCAL_DISTANCE;
+ else
+ return REMOTE_DISTANCE;
+}
+
+void __init pcpu_populate_pte(unsigned long addr)
+{
+ pgd_t *pgd = pgd_offset_k(addr);
+ p4d_t *p4d = p4d_offset(pgd, addr);
+ pud_t *pud;
+ pmd_t *pmd;
+
+ if (p4d_none(*p4d)) {
+ pud_t *new;
+
+ new = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ pgd_populate(&init_mm, pgd, new);
+#ifndef __PAGETABLE_PUD_FOLDED
+ pud_init((unsigned long)new, (unsigned long)invalid_pmd_table);
+#endif
+ }
+
+ pud = pud_offset(p4d, addr);
+ if (pud_none(*pud)) {
+ pmd_t *new;
+
+ new = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ pud_populate(&init_mm, pud, new);
+#ifndef __PAGETABLE_PMD_FOLDED
+ pmd_init((unsigned long)new, (unsigned long)invalid_pte_table);
+#endif
+ }
+
+ pmd = pmd_offset(pud, addr);
+ if (!pmd_present(*pmd)) {
+ pte_t *new;
+
+ new = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ pmd_populate_kernel(&init_mm, pmd, new);
+ }
+}
+
+void __init setup_per_cpu_areas(void)
+{
+ unsigned long delta;
+ unsigned int cpu;
+ int rc = -EINVAL;
+
+ if (pcpu_chosen_fc == PCPU_FC_AUTO) {
+ if (nr_node_ids >= 8)
+ pcpu_chosen_fc = PCPU_FC_PAGE;
+ else
+ pcpu_chosen_fc = PCPU_FC_EMBED;
+ }
+
+ /*
+ * Always reserve area for module percpu variables. That's
+ * what the legacy allocator did.
+ */
+ if (pcpu_chosen_fc != PCPU_FC_PAGE) {
+ rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
+ PERCPU_DYNAMIC_RESERVE, PMD_SIZE,
+ pcpu_cpu_distance, pcpu_cpu_to_node);
+ if (rc < 0)
+ pr_warn("%s allocator failed (%d), falling back to page size\n",
+ pcpu_fc_names[pcpu_chosen_fc], rc);
+ }
+ if (rc < 0)
+ rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE, pcpu_cpu_to_node);
+ if (rc < 0)
+ panic("cannot initialize percpu area (err=%d)", rc);
+
+ delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
+ for_each_possible_cpu(cpu)
+ __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
+}
+#endif
+
+/*
+ * Get nodeid by logical cpu number.
+ * __cpuid_to_node maps phyical cpu id to node, so we
+ * should use cpu_logical_map(cpu) to index it.
+ *
+ * This routine is only used in early phase during
+ * booting, after setup_per_cpu_areas calling and numa_node
+ * initialization, cpu_to_node will be used instead.
+ */
+int early_cpu_to_node(int cpu)
+{
+ int physid = cpu_logical_map(cpu);
+
+ if (physid < 0)
+ return NUMA_NO_NODE;
+
+ return __cpuid_to_node[physid];
+}
+
+void __init early_numa_add_cpu(int cpuid, s16 node)
+{
+ int cpu = __cpu_number_map[cpuid];
+
+ if (cpu < 0)
+ return;
+
+ cpumask_set_cpu(cpu, &cpus_on_node[node]);
+ cpumask_set_cpu(cpuid, &phys_cpus_on_node[node]);
+}
+
+void numa_add_cpu(unsigned int cpu)
+{
+ int nid = cpu_to_node(cpu);
+ cpumask_set_cpu(cpu, &cpus_on_node[nid]);
+}
+
+void numa_remove_cpu(unsigned int cpu)
+{
+ int nid = cpu_to_node(cpu);
+ cpumask_clear_cpu(cpu, &cpus_on_node[nid]);
+}
+
+static int __init numa_add_memblk_to(int nid, u64 start, u64 end,
+ struct numa_meminfo *mi)
+{
+ /* ignore zero length blks */
+ if (start == end)
+ return 0;
+
+ /* whine about and ignore invalid blks */
+ if (start > end || nid < 0 || nid >= MAX_NUMNODES) {
+ pr_warn("NUMA: Warning: invalid memblk node %d [mem %#010Lx-%#010Lx]\n",
+ nid, start, end - 1);
+ return 0;
+ }
+
+ if (mi->nr_blks >= NR_NODE_MEMBLKS) {
+ pr_err("NUMA: too many memblk ranges\n");
+ return -EINVAL;
+ }
+
+ mi->blk[mi->nr_blks].start = PFN_ALIGN(start);
+ mi->blk[mi->nr_blks].end = PFN_ALIGN(end - PAGE_SIZE + 1);
+ mi->blk[mi->nr_blks].nid = nid;
+ mi->nr_blks++;
+ return 0;
+}
+
+/**
+ * numa_add_memblk - Add one numa_memblk to numa_meminfo
+ * @nid: NUMA node ID of the new memblk
+ * @start: Start address of the new memblk
+ * @end: End address of the new memblk
+ *
+ * Add a new memblk to the default numa_meminfo.
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+int __init numa_add_memblk(int nid, u64 start, u64 end)
+{
+ return numa_add_memblk_to(nid, start, end, &numa_meminfo);
+}
+
+static void __init alloc_node_data(int nid)
+{
+ void *nd;
+ unsigned long nd_pa;
+ size_t nd_sz = roundup(sizeof(pg_data_t), PAGE_SIZE);
+
+ nd_pa = memblock_phys_alloc_try_nid(nd_sz, SMP_CACHE_BYTES, nid);
+ if (!nd_pa) {
+ pr_err("Cannot find %zu Byte for node_data (initial node: %d)\n", nd_sz, nid);
+ return;
+ }
+
+ nd = __va(nd_pa);
+
+ node_data[nid] = nd;
+ memset(nd, 0, sizeof(pg_data_t));
+}
+
+static void __init node_mem_init(unsigned int node)
+{
+ unsigned long start_pfn, end_pfn;
+ unsigned long node_addrspace_offset;
+
+ node_addrspace_offset = nid_to_addrbase(node);
+ pr_info("Node%d's addrspace_offset is 0x%lx\n",
+ node, node_addrspace_offset);
+
+ get_pfn_range_for_nid(node, &start_pfn, &end_pfn);
+ pr_info("Node%d: start_pfn=0x%lx, end_pfn=0x%lx\n",
+ node, start_pfn, end_pfn);
+
+ alloc_node_data(node);
+}
+
+#ifdef CONFIG_ACPI_NUMA
+
+/*
+ * Sanity check to catch more bad NUMA configurations (they are amazingly
+ * common). Make sure the nodes cover all memory.
+ */
+static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
+{
+ int i;
+ u64 numaram, biosram;
+
+ numaram = 0;
+ for (i = 0; i < mi->nr_blks; i++) {
+ u64 s = mi->blk[i].start >> PAGE_SHIFT;
+ u64 e = mi->blk[i].end >> PAGE_SHIFT;
+
+ numaram += e - s;
+ numaram -= __absent_pages_in_range(mi->blk[i].nid, s, e);
+ if ((s64)numaram < 0)
+ numaram = 0;
+ }
+ max_pfn = max_low_pfn;
+ biosram = max_pfn - absent_pages_in_range(0, max_pfn);
+
+ BUG_ON((s64)(biosram - numaram) >= (1 << (20 - PAGE_SHIFT)));
+ return true;
+}
+
+static void __init add_node_intersection(u32 node, u64 start, u64 size, u32 type)
+{
+ static unsigned long num_physpages;
+
+ num_physpages += (size >> PAGE_SHIFT);
+ pr_info("Node%d: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx Bytes\n",
+ node, type, start, size);
+ pr_info(" start_pfn:0x%llx, end_pfn:0x%llx, num_physpages:0x%lx\n",
+ start >> PAGE_SHIFT, (start + size) >> PAGE_SHIFT, num_physpages);
+ memblock_set_node(start, size, &memblock.memory, node);
+}
+
+/*
+ * add_numamem_region
+ *
+ * Add a uasable memory region described by BIOS. The
+ * routine gets each intersection between BIOS's region
+ * and node's region, and adds them into node's memblock
+ * pool.
+ *
+ */
+static void __init add_numamem_region(u64 start, u64 end, u32 type)
+{
+ u32 i;
+ u64 ofs = start;
+
+ if (start >= end) {
+ pr_debug("Invalid region: %016llx-%016llx\n", start, end);
+ return;
+ }
+
+ for (i = 0; i < numa_meminfo.nr_blks; i++) {
+ struct numa_memblk *mb = &numa_meminfo.blk[i];
+
+ if (ofs > mb->end)
+ continue;
+
+ if (end > mb->end) {
+ add_node_intersection(mb->nid, ofs, mb->end - ofs, type);
+ ofs = mb->end;
+ } else {
+ add_node_intersection(mb->nid, ofs, end - ofs, type);
+ break;
+ }
+ }
+}
+
+static void __init init_node_memblock(void)
+{
+ u32 mem_type;
+ u64 mem_end, mem_start, mem_size;
+ efi_memory_desc_t *md;
+
+ /* Parse memory information and activate */
+ for_each_efi_memory_desc(md) {
+ mem_type = md->type;
+ mem_start = md->phys_addr;
+ mem_size = md->num_pages << EFI_PAGE_SHIFT;
+ mem_end = mem_start + mem_size;
+
+ switch (mem_type) {
+ case EFI_LOADER_CODE:
+ case EFI_LOADER_DATA:
+ case EFI_BOOT_SERVICES_CODE:
+ case EFI_BOOT_SERVICES_DATA:
+ case EFI_PERSISTENT_MEMORY:
+ case EFI_CONVENTIONAL_MEMORY:
+ add_numamem_region(mem_start, mem_end, mem_type);
+ break;
+ case EFI_PAL_CODE:
+ case EFI_UNUSABLE_MEMORY:
+ case EFI_ACPI_RECLAIM_MEMORY:
+ add_numamem_region(mem_start, mem_end, mem_type);
+ fallthrough;
+ case EFI_RESERVED_TYPE:
+ case EFI_RUNTIME_SERVICES_CODE:
+ case EFI_RUNTIME_SERVICES_DATA:
+ case EFI_MEMORY_MAPPED_IO:
+ case EFI_MEMORY_MAPPED_IO_PORT_SPACE:
+ pr_info("Resvd: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx Bytes\n",
+ mem_type, mem_start, mem_size);
+ break;
+ }
+ }
+}
+
+static void __init numa_default_distance(void)
+{
+ int row, col;
+
+ for (row = 0; row < MAX_NUMNODES; row++)
+ for (col = 0; col < MAX_NUMNODES; col++) {
+ if (col == row)
+ node_distances[row][col] = LOCAL_DISTANCE;
+ else
+ /* We assume that one node per package here!
+ *
+ * A SLIT should be used for multiple nodes
+ * per package to override default setting.
+ */
+ node_distances[row][col] = REMOTE_DISTANCE;
+ }
+}
+
+int __init init_numa_memory(void)
+{
+ int i;
+ int ret;
+ int node;
+
+ for (i = 0; i < NR_CPUS; i++)
+ set_cpuid_to_node(i, NUMA_NO_NODE);
+
+ numa_default_distance();
+ nodes_clear(numa_nodes_parsed);
+ nodes_clear(node_possible_map);
+ nodes_clear(node_online_map);
+ memset(&numa_meminfo, 0, sizeof(numa_meminfo));
+
+ /* Parse SRAT and SLIT if provided by firmware. */
+ ret = acpi_numa_init();
+ if (ret < 0)
+ return ret;
+
+ node_possible_map = numa_nodes_parsed;
+ if (WARN_ON(nodes_empty(node_possible_map)))
+ return -EINVAL;
+
+ init_node_memblock();
+ if (numa_meminfo_cover_memory(&numa_meminfo) == false)
+ return -EINVAL;
+
+ for_each_node_mask(node, node_possible_map) {
+ node_mem_init(node);
+ node_set_online(node);
+ }
+ max_low_pfn = PHYS_PFN(memblock_end_of_DRAM());
+
+ setup_nr_node_ids();
+ loongson_sysconf.nr_nodes = nr_node_ids;
+ loongson_sysconf.cores_per_node = cpumask_weight(&phys_cpus_on_node[0]);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(init_numa_memory);
+#endif
+
+void __init paging_init(void)
+{
+ unsigned int node;
+ unsigned long zones_size[MAX_NR_ZONES] = {0, };
+
+ for_each_online_node(node) {
+ unsigned long start_pfn, end_pfn;
+
+ get_pfn_range_for_nid(node, &start_pfn, &end_pfn);
+
+ if (end_pfn > max_low_pfn)
+ max_low_pfn = end_pfn;
+ }
+#ifdef CONFIG_ZONE_DMA32
+ zones_size[ZONE_DMA32] = MAX_DMA32_PFN;
+#endif
+ zones_size[ZONE_NORMAL] = max_low_pfn;
+ free_area_init(zones_size);
+}
+
+void __init mem_init(void)
+{
+ high_memory = (void *) __va(get_num_physpages() << PAGE_SHIFT);
+ memblock_free_all();
+ setup_zero_pages(); /* This comes from node 0 */
+}
+
+int pcibus_to_node(struct pci_bus *bus)
+{
+ return dev_to_node(&bus->dev);
+}
+EXPORT_SYMBOL(pcibus_to_node);
diff --git a/arch/loongarch/kernel/proc.c b/arch/loongarch/kernel/proc.c
new file mode 100644
index 000000000000..1effc73850fe
--- /dev/null
+++ b/arch/loongarch/kernel/proc.c
@@ -0,0 +1,127 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <asm/bootinfo.h>
+#include <asm/cpu.h>
+#include <asm/cpu-features.h>
+#include <asm/idle.h>
+#include <asm/processor.h>
+#include <asm/time.h>
+
+/*
+ * No lock; only written during early bootup by CPU 0.
+ */
+static RAW_NOTIFIER_HEAD(proc_cpuinfo_chain);
+
+int __ref register_proc_cpuinfo_notifier(struct notifier_block *nb)
+{
+ return raw_notifier_chain_register(&proc_cpuinfo_chain, nb);
+}
+
+int proc_cpuinfo_notifier_call_chain(unsigned long val, void *v)
+{
+ return raw_notifier_call_chain(&proc_cpuinfo_chain, val, v);
+}
+
+static int show_cpuinfo(struct seq_file *m, void *v)
+{
+ unsigned long n = (unsigned long) v - 1;
+ unsigned int version = cpu_data[n].processor_id & 0xff;
+ unsigned int fp_version = cpu_data[n].fpu_vers;
+ struct proc_cpuinfo_notifier_args proc_cpuinfo_notifier_args;
+
+#ifdef CONFIG_SMP
+ if (!cpu_online(n))
+ return 0;
+#endif
+
+ /*
+ * For the first processor also print the system type
+ */
+ if (n == 0)
+ seq_printf(m, "system type\t\t: %s\n\n", get_system_type());
+
+ seq_printf(m, "processor\t\t: %ld\n", n);
+ seq_printf(m, "package\t\t\t: %d\n", cpu_data[n].package);
+ seq_printf(m, "core\t\t\t: %d\n", cpu_data[n].core);
+ seq_printf(m, "CPU Family\t\t: %s\n", __cpu_family[n]);
+ seq_printf(m, "Model Name\t\t: %s\n", __cpu_full_name[n]);
+ seq_printf(m, "CPU Revision\t\t: 0x%02x\n", version);
+ seq_printf(m, "FPU Revision\t\t: 0x%02x\n", fp_version);
+ seq_printf(m, "CPU MHz\t\t\t: %llu.%02llu\n",
+ cpu_clock_freq / 1000000, (cpu_clock_freq / 10000) % 100);
+ seq_printf(m, "BogoMIPS\t\t: %llu.%02llu\n",
+ (lpj_fine * cpu_clock_freq / const_clock_freq) / (500000/HZ),
+ ((lpj_fine * cpu_clock_freq / const_clock_freq) / (5000/HZ)) % 100);
+ seq_printf(m, "TLB Entries\t\t: %d\n", cpu_data[n].tlbsize);
+ seq_printf(m, "Address Sizes\t\t: %d bits physical, %d bits virtual\n",
+ cpu_pabits + 1, cpu_vabits + 1);
+
+ seq_printf(m, "ISA\t\t\t:");
+ if (cpu_has_loongarch32)
+ seq_printf(m, " loongarch32");
+ if (cpu_has_loongarch64)
+ seq_printf(m, " loongarch64");
+ seq_printf(m, "\n");
+
+ seq_printf(m, "Features\t\t:");
+ if (cpu_has_cpucfg) seq_printf(m, " cpucfg");
+ if (cpu_has_lam) seq_printf(m, " lam");
+ if (cpu_has_ual) seq_printf(m, " ual");
+ if (cpu_has_fpu) seq_printf(m, " fpu");
+ if (cpu_has_lsx) seq_printf(m, " lsx");
+ if (cpu_has_lasx) seq_printf(m, " lasx");
+ if (cpu_has_complex) seq_printf(m, " complex");
+ if (cpu_has_crypto) seq_printf(m, " crypto");
+ if (cpu_has_lvz) seq_printf(m, " lvz");
+ if (cpu_has_lbt_x86) seq_printf(m, " lbt_x86");
+ if (cpu_has_lbt_arm) seq_printf(m, " lbt_arm");
+ if (cpu_has_lbt_mips) seq_printf(m, " lbt_mips");
+ seq_printf(m, "\n");
+
+ seq_printf(m, "Hardware Watchpoint\t: %s",
+ cpu_has_watch ? "yes, " : "no\n");
+ if (cpu_has_watch) {
+ seq_printf(m, "iwatch count: %d, dwatch count: %d\n",
+ cpu_data[n].watch_ireg_count, cpu_data[n].watch_dreg_count);
+ }
+
+ proc_cpuinfo_notifier_args.m = m;
+ proc_cpuinfo_notifier_args.n = n;
+
+ raw_notifier_call_chain(&proc_cpuinfo_chain, 0,
+ &proc_cpuinfo_notifier_args);
+
+ seq_printf(m, "\n");
+
+ return 0;
+}
+
+static void *c_start(struct seq_file *m, loff_t *pos)
+{
+ unsigned long i = *pos;
+
+ return i < NR_CPUS ? (void *)(i + 1) : NULL;
+}
+
+static void *c_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ ++*pos;
+ return c_start(m, pos);
+}
+
+static void c_stop(struct seq_file *m, void *v)
+{
+}
+
+const struct seq_operations cpuinfo_op = {
+ .start = c_start,
+ .next = c_next,
+ .stop = c_stop,
+ .show = show_cpuinfo,
+};
diff --git a/arch/loongarch/kernel/process.c b/arch/loongarch/kernel/process.c
new file mode 100644
index 000000000000..6d944d65f600
--- /dev/null
+++ b/arch/loongarch/kernel/process.c
@@ -0,0 +1,267 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Author: Huacai Chen <chenhuacai@loongson.cn>
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ *
+ * Derived from MIPS:
+ * Copyright (C) 1994 - 1999, 2000 by Ralf Baechle and others.
+ * Copyright (C) 2005, 2006 by Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2004 Thiemo Seufer
+ * Copyright (C) 2013 Imagination Technologies Ltd.
+ */
+#include <linux/cpu.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/sched/debug.h>
+#include <linux/sched/task.h>
+#include <linux/sched/task_stack.h>
+#include <linux/mm.h>
+#include <linux/stddef.h>
+#include <linux/unistd.h>
+#include <linux/export.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/personality.h>
+#include <linux/sys.h>
+#include <linux/completion.h>
+#include <linux/kallsyms.h>
+#include <linux/random.h>
+#include <linux/prctl.h>
+#include <linux/nmi.h>
+
+#include <asm/asm.h>
+#include <asm/bootinfo.h>
+#include <asm/cpu.h>
+#include <asm/elf.h>
+#include <asm/fpu.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/irq_regs.h>
+#include <asm/loongarch.h>
+#include <asm/pgtable.h>
+#include <asm/processor.h>
+#include <asm/reg.h>
+#include <asm/vdso.h>
+
+/*
+ * Idle related variables and functions
+ */
+
+unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
+EXPORT_SYMBOL(boot_option_idle_override);
+
+#ifdef CONFIG_HOTPLUG_CPU
+void arch_cpu_idle_dead(void)
+{
+ play_dead();
+}
+#endif
+
+asmlinkage void ret_from_fork(void);
+asmlinkage void ret_from_kernel_thread(void);
+
+void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp)
+{
+ unsigned long crmd;
+ unsigned long prmd;
+ unsigned long euen;
+
+ /* New thread loses kernel privileges. */
+ crmd = regs->csr_crmd & ~(PLV_MASK);
+ crmd |= PLV_USER;
+ regs->csr_crmd = crmd;
+
+ prmd = regs->csr_prmd & ~(PLV_MASK);
+ prmd |= PLV_USER;
+ regs->csr_prmd = prmd;
+
+ euen = regs->csr_euen & ~(CSR_EUEN_FPEN);
+ regs->csr_euen = euen;
+ lose_fpu(0);
+
+ clear_thread_flag(TIF_LSX_CTX_LIVE);
+ clear_thread_flag(TIF_LASX_CTX_LIVE);
+ clear_used_math();
+ regs->csr_era = pc;
+ regs->regs[3] = sp;
+}
+
+void exit_thread(struct task_struct *tsk)
+{
+}
+
+int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
+{
+ /*
+ * Save any process state which is live in hardware registers to the
+ * parent context prior to duplication. This prevents the new child
+ * state becoming stale if the parent is preempted before copy_thread()
+ * gets a chance to save the parent's live hardware registers to the
+ * child context.
+ */
+ preempt_disable();
+
+ if (is_fpu_owner())
+ save_fp(current);
+
+ preempt_enable();
+
+ if (used_math())
+ memcpy(dst, src, sizeof(struct task_struct));
+ else
+ memcpy(dst, src, offsetof(struct task_struct, thread.fpu.fpr));
+
+ return 0;
+}
+
+/*
+ * Copy architecture-specific thread state
+ */
+int copy_thread(unsigned long clone_flags, unsigned long usp,
+ unsigned long kthread_arg, struct task_struct *p, unsigned long tls)
+{
+ unsigned long childksp;
+ struct pt_regs *childregs, *regs = current_pt_regs();
+
+ childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32;
+
+ /* set up new TSS. */
+ childregs = (struct pt_regs *) childksp - 1;
+ /* Put the stack after the struct pt_regs. */
+ childksp = (unsigned long) childregs;
+ p->thread.csr_euen = 0;
+ p->thread.csr_crmd = csr_read32(LOONGARCH_CSR_CRMD);
+ p->thread.csr_prmd = csr_read32(LOONGARCH_CSR_PRMD);
+ p->thread.csr_ecfg = csr_read32(LOONGARCH_CSR_ECFG);
+ if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
+ /* kernel thread */
+ p->thread.reg23 = usp; /* fn */
+ p->thread.reg24 = kthread_arg;
+ p->thread.reg03 = childksp;
+ p->thread.reg01 = (unsigned long) ret_from_kernel_thread;
+ memset(childregs, 0, sizeof(struct pt_regs));
+ childregs->csr_euen = p->thread.csr_euen;
+ childregs->csr_crmd = p->thread.csr_crmd;
+ childregs->csr_prmd = p->thread.csr_prmd;
+ childregs->csr_ecfg = p->thread.csr_ecfg;
+ return 0;
+ }
+
+ /* user thread */
+ *childregs = *regs;
+ childregs->regs[4] = 0; /* Child gets zero as return value */
+ if (usp)
+ childregs->regs[3] = usp;
+
+ p->thread.reg03 = (unsigned long) childregs;
+ p->thread.reg01 = (unsigned long) ret_from_fork;
+
+ /*
+ * New tasks lose permission to use the fpu. This accelerates context
+ * switching for most programs since they don't use the fpu.
+ */
+ childregs->csr_euen = 0;
+
+ clear_tsk_thread_flag(p, TIF_USEDFPU);
+ clear_tsk_thread_flag(p, TIF_USEDSIMD);
+ clear_tsk_thread_flag(p, TIF_LSX_CTX_LIVE);
+ clear_tsk_thread_flag(p, TIF_LASX_CTX_LIVE);
+
+ if (clone_flags & CLONE_SETTLS)
+ childregs->regs[2] = tls;
+
+ return 0;
+}
+
+unsigned long __get_wchan(struct task_struct *task)
+{
+ return 0;
+}
+
+unsigned long stack_top(void)
+{
+ unsigned long top = TASK_SIZE & PAGE_MASK;
+
+ /* Space for the VDSO & data page */
+ top -= PAGE_ALIGN(current->thread.vdso->size);
+ top -= PAGE_SIZE;
+
+ /* Space to randomize the VDSO base */
+ if (current->flags & PF_RANDOMIZE)
+ top -= VDSO_RANDOMIZE_SIZE;
+
+ return top;
+}
+
+/*
+ * Don't forget that the stack pointer must be aligned on a 8 bytes
+ * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
+ */
+unsigned long arch_align_stack(unsigned long sp)
+{
+ if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+ sp -= get_random_int() & ~PAGE_MASK;
+
+ return sp & STACK_ALIGN;
+}
+
+static DEFINE_PER_CPU(call_single_data_t, backtrace_csd);
+static struct cpumask backtrace_csd_busy;
+
+static void handle_backtrace(void *info)
+{
+ nmi_cpu_backtrace(get_irq_regs());
+ cpumask_clear_cpu(smp_processor_id(), &backtrace_csd_busy);
+}
+
+static void raise_backtrace(cpumask_t *mask)
+{
+ call_single_data_t *csd;
+ int cpu;
+
+ for_each_cpu(cpu, mask) {
+ /*
+ * If we previously sent an IPI to the target CPU & it hasn't
+ * cleared its bit in the busy cpumask then it didn't handle
+ * our previous IPI & it's not safe for us to reuse the
+ * call_single_data_t.
+ */
+ if (cpumask_test_and_set_cpu(cpu, &backtrace_csd_busy)) {
+ pr_warn("Unable to send backtrace IPI to CPU%u - perhaps it hung?\n",
+ cpu);
+ continue;
+ }
+
+ csd = &per_cpu(backtrace_csd, cpu);
+ csd->func = handle_backtrace;
+ smp_call_function_single_async(cpu, csd);
+ }
+}
+
+void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
+{
+ nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace);
+}
+
+#ifdef CONFIG_64BIT
+void loongarch_dump_regs64(u64 *uregs, const struct pt_regs *regs)
+{
+ unsigned int i;
+
+ for (i = LOONGARCH_EF_R1; i <= LOONGARCH_EF_R31; i++) {
+ uregs[i] = regs->regs[i - LOONGARCH_EF_R0];
+ }
+
+ uregs[LOONGARCH_EF_ORIG_A0] = regs->orig_a0;
+ uregs[LOONGARCH_EF_CSR_ERA] = regs->csr_era;
+ uregs[LOONGARCH_EF_CSR_BADV] = regs->csr_badvaddr;
+ uregs[LOONGARCH_EF_CSR_CRMD] = regs->csr_crmd;
+ uregs[LOONGARCH_EF_CSR_PRMD] = regs->csr_prmd;
+ uregs[LOONGARCH_EF_CSR_EUEN] = regs->csr_euen;
+ uregs[LOONGARCH_EF_CSR_ECFG] = regs->csr_ecfg;
+ uregs[LOONGARCH_EF_CSR_ESTAT] = regs->csr_estat;
+}
+#endif /* CONFIG_64BIT */
diff --git a/arch/loongarch/kernel/ptrace.c b/arch/loongarch/kernel/ptrace.c
new file mode 100644
index 000000000000..e6ab87948e1d
--- /dev/null
+++ b/arch/loongarch/kernel/ptrace.c
@@ -0,0 +1,431 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Author: Hanlu Li <lihanlu@loongson.cn>
+ * Huacai Chen <chenhuacai@loongson.cn>
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ *
+ * Derived from MIPS:
+ * Copyright (C) 1992 Ross Biro
+ * Copyright (C) Linus Torvalds
+ * Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle
+ * Copyright (C) 1996 David S. Miller
+ * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 1999 MIPS Technologies, Inc.
+ * Copyright (C) 2000 Ulf Carlsson
+ */
+#include <linux/kernel.h>
+#include <linux/audit.h>
+#include <linux/compiler.h>
+#include <linux/context_tracking.h>
+#include <linux/elf.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/ptrace.h>
+#include <linux/regset.h>
+#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
+#include <linux/security.h>
+#include <linux/smp.h>
+#include <linux/stddef.h>
+#include <linux/seccomp.h>
+#include <linux/uaccess.h>
+
+#include <asm/byteorder.h>
+#include <asm/cpu.h>
+#include <asm/cpu-info.h>
+#include <asm/fpu.h>
+#include <asm/loongarch.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/processor.h>
+#include <asm/reg.h>
+#include <asm/syscall.h>
+
+static void init_fp_ctx(struct task_struct *target)
+{
+ /* The target already has context */
+ if (tsk_used_math(target))
+ return;
+
+ /* Begin with data registers set to all 1s... */
+ memset(&target->thread.fpu.fpr, ~0, sizeof(target->thread.fpu.fpr));
+ set_stopped_child_used_math(target);
+}
+
+/*
+ * Called by kernel/ptrace.c when detaching..
+ *
+ * Make sure single step bits etc are not set.
+ */
+void ptrace_disable(struct task_struct *child)
+{
+ /* Don't load the watchpoint registers for the ex-child. */
+ clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
+ clear_tsk_thread_flag(child, TIF_SINGLESTEP);
+}
+
+/* regset get/set implementations */
+
+static int gpr_get(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf to)
+{
+ int r;
+ struct pt_regs *regs = task_pt_regs(target);
+
+ r = membuf_write(&to, &regs->regs, sizeof(u64) * GPR_NUM);
+ r = membuf_write(&to, &regs->orig_a0, sizeof(u64));
+ r = membuf_write(&to, &regs->csr_era, sizeof(u64));
+ r = membuf_write(&to, &regs->csr_badvaddr, sizeof(u64));
+
+ return r;
+}
+
+static int gpr_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ int err;
+ int a0_start = sizeof(u64) * GPR_NUM;
+ int era_start = a0_start + sizeof(u64);
+ int badvaddr_start = era_start + sizeof(u64);
+ struct pt_regs *regs = task_pt_regs(target);
+
+ err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &regs->regs,
+ 0, a0_start);
+ err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &regs->orig_a0,
+ a0_start, a0_start + sizeof(u64));
+ err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &regs->csr_era,
+ era_start, era_start + sizeof(u64));
+ err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &regs->csr_badvaddr,
+ badvaddr_start, badvaddr_start + sizeof(u64));
+
+ return err;
+}
+
+
+/*
+ * Get the general floating-point registers.
+ */
+static int gfpr_get(struct task_struct *target, struct membuf *to)
+{
+ return membuf_write(to, &target->thread.fpu.fpr,
+ sizeof(elf_fpreg_t) * NUM_FPU_REGS);
+}
+
+static int gfpr_get_simd(struct task_struct *target, struct membuf *to)
+{
+ int i, r;
+ u64 fpr_val;
+
+ BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
+ for (i = 0; i < NUM_FPU_REGS; i++) {
+ fpr_val = get_fpr64(&target->thread.fpu.fpr[i], 0);
+ r = membuf_write(to, &fpr_val, sizeof(elf_fpreg_t));
+ }
+
+ return r;
+}
+
+/*
+ * Choose the appropriate helper for general registers, and then copy
+ * the FCC and FCSR registers separately.
+ */
+static int fpr_get(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf to)
+{
+ int r;
+
+ if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
+ r = gfpr_get(target, &to);
+ else
+ r = gfpr_get_simd(target, &to);
+
+ r = membuf_write(&to, &target->thread.fpu.fcc, sizeof(target->thread.fpu.fcc));
+ r = membuf_write(&to, &target->thread.fpu.fcsr, sizeof(target->thread.fpu.fcsr));
+
+ return r;
+}
+
+static int gfpr_set(struct task_struct *target,
+ unsigned int *pos, unsigned int *count,
+ const void **kbuf, const void __user **ubuf)
+{
+ return user_regset_copyin(pos, count, kbuf, ubuf,
+ &target->thread.fpu.fpr,
+ 0, NUM_FPU_REGS * sizeof(elf_fpreg_t));
+}
+
+static int gfpr_set_simd(struct task_struct *target,
+ unsigned int *pos, unsigned int *count,
+ const void **kbuf, const void __user **ubuf)
+{
+ int i, err;
+ u64 fpr_val;
+
+ BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
+ for (i = 0; i < NUM_FPU_REGS && *count > 0; i++) {
+ err = user_regset_copyin(pos, count, kbuf, ubuf,
+ &fpr_val, i * sizeof(elf_fpreg_t),
+ (i + 1) * sizeof(elf_fpreg_t));
+ if (err)
+ return err;
+ set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val);
+ }
+
+ return 0;
+}
+
+/*
+ * Choose the appropriate helper for general registers, and then copy
+ * the FCC register separately.
+ */
+static int fpr_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ const int fcc_start = NUM_FPU_REGS * sizeof(elf_fpreg_t);
+ const int fcc_end = fcc_start + sizeof(u64);
+ int err;
+
+ BUG_ON(count % sizeof(elf_fpreg_t));
+ if (pos + count > sizeof(elf_fpregset_t))
+ return -EIO;
+
+ init_fp_ctx(target);
+
+ if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
+ err = gfpr_set(target, &pos, &count, &kbuf, &ubuf);
+ else
+ err = gfpr_set_simd(target, &pos, &count, &kbuf, &ubuf);
+ if (err)
+ return err;
+
+ if (count > 0)
+ err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.fpu.fcc,
+ fcc_start, fcc_end);
+
+ return err;
+}
+
+static int cfg_get(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf to)
+{
+ int i, r;
+ u32 cfg_val;
+
+ i = 0;
+ while (to.left > 0) {
+ cfg_val = read_cpucfg(i++);
+ r = membuf_write(&to, &cfg_val, sizeof(u32));
+ }
+
+ return r;
+}
+
+/*
+ * CFG registers are read-only.
+ */
+static int cfg_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ return 0;
+}
+
+struct pt_regs_offset {
+ const char *name;
+ int offset;
+};
+
+#define REG_OFFSET_NAME(n, r) {.name = #n, .offset = offsetof(struct pt_regs, r)}
+#define REG_OFFSET_END {.name = NULL, .offset = 0}
+
+static const struct pt_regs_offset regoffset_table[] = {
+ REG_OFFSET_NAME(r0, regs[0]),
+ REG_OFFSET_NAME(r1, regs[1]),
+ REG_OFFSET_NAME(r2, regs[2]),
+ REG_OFFSET_NAME(r3, regs[3]),
+ REG_OFFSET_NAME(r4, regs[4]),
+ REG_OFFSET_NAME(r5, regs[5]),
+ REG_OFFSET_NAME(r6, regs[6]),
+ REG_OFFSET_NAME(r7, regs[7]),
+ REG_OFFSET_NAME(r8, regs[8]),
+ REG_OFFSET_NAME(r9, regs[9]),
+ REG_OFFSET_NAME(r10, regs[10]),
+ REG_OFFSET_NAME(r11, regs[11]),
+ REG_OFFSET_NAME(r12, regs[12]),
+ REG_OFFSET_NAME(r13, regs[13]),
+ REG_OFFSET_NAME(r14, regs[14]),
+ REG_OFFSET_NAME(r15, regs[15]),
+ REG_OFFSET_NAME(r16, regs[16]),
+ REG_OFFSET_NAME(r17, regs[17]),
+ REG_OFFSET_NAME(r18, regs[18]),
+ REG_OFFSET_NAME(r19, regs[19]),
+ REG_OFFSET_NAME(r20, regs[20]),
+ REG_OFFSET_NAME(r21, regs[21]),
+ REG_OFFSET_NAME(r22, regs[22]),
+ REG_OFFSET_NAME(r23, regs[23]),
+ REG_OFFSET_NAME(r24, regs[24]),
+ REG_OFFSET_NAME(r25, regs[25]),
+ REG_OFFSET_NAME(r26, regs[26]),
+ REG_OFFSET_NAME(r27, regs[27]),
+ REG_OFFSET_NAME(r28, regs[28]),
+ REG_OFFSET_NAME(r29, regs[29]),
+ REG_OFFSET_NAME(r30, regs[30]),
+ REG_OFFSET_NAME(r31, regs[31]),
+ REG_OFFSET_NAME(orig_a0, orig_a0),
+ REG_OFFSET_NAME(csr_era, csr_era),
+ REG_OFFSET_NAME(csr_badvaddr, csr_badvaddr),
+ REG_OFFSET_NAME(csr_crmd, csr_crmd),
+ REG_OFFSET_NAME(csr_prmd, csr_prmd),
+ REG_OFFSET_NAME(csr_euen, csr_euen),
+ REG_OFFSET_NAME(csr_ecfg, csr_ecfg),
+ REG_OFFSET_NAME(csr_estat, csr_estat),
+ REG_OFFSET_END,
+};
+
+/**
+ * regs_query_register_offset() - query register offset from its name
+ * @name: the name of a register
+ *
+ * regs_query_register_offset() returns the offset of a register in struct
+ * pt_regs from its name. If the name is invalid, this returns -EINVAL;
+ */
+int regs_query_register_offset(const char *name)
+{
+ const struct pt_regs_offset *roff;
+
+ for (roff = regoffset_table; roff->name != NULL; roff++)
+ if (!strcmp(roff->name, name))
+ return roff->offset;
+ return -EINVAL;
+}
+
+enum loongarch_regset {
+ REGSET_GPR,
+ REGSET_FPR,
+ REGSET_CPUCFG,
+};
+
+static const struct user_regset loongarch64_regsets[] = {
+ [REGSET_GPR] = {
+ .core_note_type = NT_PRSTATUS,
+ .n = ELF_NGREG,
+ .size = sizeof(elf_greg_t),
+ .align = sizeof(elf_greg_t),
+ .regset_get = gpr_get,
+ .set = gpr_set,
+ },
+ [REGSET_FPR] = {
+ .core_note_type = NT_PRFPREG,
+ .n = ELF_NFPREG,
+ .size = sizeof(elf_fpreg_t),
+ .align = sizeof(elf_fpreg_t),
+ .regset_get = fpr_get,
+ .set = fpr_set,
+ },
+ [REGSET_CPUCFG] = {
+ .core_note_type = NT_LOONGARCH_CPUCFG,
+ .n = 64,
+ .size = sizeof(u32),
+ .align = sizeof(u32),
+ .regset_get = cfg_get,
+ .set = cfg_set,
+ },
+};
+
+static const struct user_regset_view user_loongarch64_view = {
+ .name = "loongarch64",
+ .e_machine = ELF_ARCH,
+ .regsets = loongarch64_regsets,
+ .n = ARRAY_SIZE(loongarch64_regsets),
+};
+
+
+const struct user_regset_view *task_user_regset_view(struct task_struct *task)
+{
+ return &user_loongarch64_view;
+}
+
+static inline int read_user(struct task_struct *target, unsigned long addr,
+ unsigned long __user *data)
+{
+ unsigned long tmp = 0;
+
+ switch (addr) {
+ case 0 ... 31:
+ tmp = task_pt_regs(target)->regs[addr];
+ break;
+ case ARG0:
+ tmp = task_pt_regs(target)->orig_a0;
+ break;
+ case PC:
+ tmp = task_pt_regs(target)->csr_era;
+ break;
+ case BADVADDR:
+ tmp = task_pt_regs(target)->csr_badvaddr;
+ break;
+ default:
+ return -EIO;
+ }
+
+ return put_user(tmp, data);
+}
+
+static inline int write_user(struct task_struct *target, unsigned long addr,
+ unsigned long data)
+{
+ switch (addr) {
+ case 0 ... 31:
+ task_pt_regs(target)->regs[addr] = data;
+ break;
+ case ARG0:
+ task_pt_regs(target)->orig_a0 = data;
+ break;
+ case PC:
+ task_pt_regs(target)->csr_era = data;
+ break;
+ case BADVADDR:
+ task_pt_regs(target)->csr_badvaddr = data;
+ break;
+ default:
+ return -EIO;
+ }
+
+ return 0;
+}
+
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
+{
+ int ret;
+ unsigned long __user *datap = (void __user *) data;
+
+ switch (request) {
+ case PTRACE_PEEKUSR:
+ ret = read_user(child, addr, datap);
+ break;
+
+ case PTRACE_POKEUSR:
+ ret = write_user(child, addr, data);
+ break;
+
+ default:
+ ret = ptrace_request(child, request, addr, data);
+ break;
+ }
+
+ return ret;
+}
diff --git a/arch/loongarch/kernel/reset.c b/arch/loongarch/kernel/reset.c
new file mode 100644
index 000000000000..2b86469e4718
--- /dev/null
+++ b/arch/loongarch/kernel/reset.c
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/kernel.h>
+#include <linux/acpi.h>
+#include <linux/efi.h>
+#include <linux/export.h>
+#include <linux/pm.h>
+#include <linux/types.h>
+#include <linux/reboot.h>
+#include <linux/delay.h>
+#include <linux/console.h>
+
+#include <acpi/reboot.h>
+#include <asm/compiler.h>
+#include <asm/idle.h>
+#include <asm/loongarch.h>
+#include <asm/reboot.h>
+
+static void default_halt(void)
+{
+ local_irq_disable();
+ clear_csr_ecfg(ECFG0_IM);
+
+ pr_notice("\n\n** You can safely turn off the power now **\n\n");
+ console_flush_on_panic(CONSOLE_FLUSH_PENDING);
+
+ while (true) {
+ __arch_cpu_idle();
+ }
+}
+
+static void default_poweroff(void)
+{
+#ifdef CONFIG_EFI
+ efi.reset_system(EFI_RESET_SHUTDOWN, EFI_SUCCESS, 0, NULL);
+#endif
+ while (true) {
+ __arch_cpu_idle();
+ }
+}
+
+static void default_restart(void)
+{
+#ifdef CONFIG_EFI
+ if (efi_capsule_pending(NULL))
+ efi_reboot(REBOOT_WARM, NULL);
+ else
+ efi_reboot(REBOOT_COLD, NULL);
+#endif
+ if (!acpi_disabled)
+ acpi_reboot();
+
+ while (true) {
+ __arch_cpu_idle();
+ }
+}
+
+void (*pm_restart)(void);
+EXPORT_SYMBOL(pm_restart);
+
+void (*pm_power_off)(void);
+EXPORT_SYMBOL(pm_power_off);
+
+void machine_halt(void)
+{
+#ifdef CONFIG_SMP
+ preempt_disable();
+ smp_send_stop();
+#endif
+ default_halt();
+}
+
+void machine_power_off(void)
+{
+#ifdef CONFIG_SMP
+ preempt_disable();
+ smp_send_stop();
+#endif
+ pm_power_off();
+}
+
+void machine_restart(char *command)
+{
+#ifdef CONFIG_SMP
+ preempt_disable();
+ smp_send_stop();
+#endif
+ do_kernel_restart(command);
+ pm_restart();
+}
+
+static int __init loongarch_reboot_setup(void)
+{
+ pm_restart = default_restart;
+ pm_power_off = default_poweroff;
+
+ return 0;
+}
+
+arch_initcall(loongarch_reboot_setup);
diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c
new file mode 100644
index 000000000000..185e4035811a
--- /dev/null
+++ b/arch/loongarch/kernel/setup.c
@@ -0,0 +1,374 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ *
+ * Derived from MIPS:
+ * Copyright (C) 1995 Linus Torvalds
+ * Copyright (C) 1995 Waldorf Electronics
+ * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03 Ralf Baechle
+ * Copyright (C) 1996 Stoned Elipot
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ * Copyright (C) 2000, 2001, 2002, 2007 Maciej W. Rozycki
+ */
+#include <linux/init.h>
+#include <linux/acpi.h>
+#include <linux/dmi.h>
+#include <linux/efi.h>
+#include <linux/export.h>
+#include <linux/screen_info.h>
+#include <linux/memblock.h>
+#include <linux/initrd.h>
+#include <linux/ioport.h>
+#include <linux/root_dev.h>
+#include <linux/console.h>
+#include <linux/pfn.h>
+#include <linux/platform_device.h>
+#include <linux/sizes.h>
+#include <linux/device.h>
+#include <linux/dma-map-ops.h>
+#include <linux/swiotlb.h>
+
+#include <asm/addrspace.h>
+#include <asm/bootinfo.h>
+#include <asm/cache.h>
+#include <asm/cpu.h>
+#include <asm/dma.h>
+#include <asm/efi.h>
+#include <asm/loongson.h>
+#include <asm/numa.h>
+#include <asm/pgalloc.h>
+#include <asm/sections.h>
+#include <asm/setup.h>
+#include <asm/smp.h>
+#include <asm/time.h>
+
+#define SMBIOS_BIOSSIZE_OFFSET 0x09
+#define SMBIOS_BIOSEXTERN_OFFSET 0x13
+#define SMBIOS_FREQLOW_OFFSET 0x16
+#define SMBIOS_FREQHIGH_OFFSET 0x17
+#define SMBIOS_FREQLOW_MASK 0xFF
+#define SMBIOS_CORE_PACKAGE_OFFSET 0x23
+#define LOONGSON_EFI_ENABLE (1 << 3)
+
+#ifdef CONFIG_VT
+struct screen_info screen_info;
+#endif
+
+unsigned long fw_arg0, fw_arg1;
+DEFINE_PER_CPU(unsigned long, kernelsp);
+struct cpuinfo_loongarch cpu_data[NR_CPUS] __read_mostly;
+
+EXPORT_SYMBOL(cpu_data);
+
+struct loongson_board_info b_info;
+static const char dmi_empty_string[] = " ";
+
+/*
+ * Setup information
+ *
+ * These are initialized so they are in the .data section
+ */
+
+static int num_standard_resources;
+static struct resource *standard_resources;
+
+static struct resource code_resource = { .name = "Kernel code", };
+static struct resource data_resource = { .name = "Kernel data", };
+static struct resource bss_resource = { .name = "Kernel bss", };
+
+const char *get_system_type(void)
+{
+ return "generic-loongson-machine";
+}
+
+static const char *dmi_string_parse(const struct dmi_header *dm, u8 s)
+{
+ const u8 *bp = ((u8 *) dm) + dm->length;
+
+ if (s) {
+ s--;
+ while (s > 0 && *bp) {
+ bp += strlen(bp) + 1;
+ s--;
+ }
+
+ if (*bp != 0) {
+ size_t len = strlen(bp)+1;
+ size_t cmp_len = len > 8 ? 8 : len;
+
+ if (!memcmp(bp, dmi_empty_string, cmp_len))
+ return dmi_empty_string;
+
+ return bp;
+ }
+ }
+
+ return "";
+}
+
+static void __init parse_cpu_table(const struct dmi_header *dm)
+{
+ long freq_temp = 0;
+ char *dmi_data = (char *)dm;
+
+ freq_temp = ((*(dmi_data + SMBIOS_FREQHIGH_OFFSET) << 8) +
+ ((*(dmi_data + SMBIOS_FREQLOW_OFFSET)) & SMBIOS_FREQLOW_MASK));
+ cpu_clock_freq = freq_temp * 1000000;
+
+ loongson_sysconf.cpuname = (void *)dmi_string_parse(dm, dmi_data[16]);
+ loongson_sysconf.cores_per_package = *(dmi_data + SMBIOS_CORE_PACKAGE_OFFSET);
+
+ pr_info("CpuClock = %llu\n", cpu_clock_freq);
+}
+
+static void __init parse_bios_table(const struct dmi_header *dm)
+{
+ int bios_extern;
+ char *dmi_data = (char *)dm;
+
+ bios_extern = *(dmi_data + SMBIOS_BIOSEXTERN_OFFSET);
+ b_info.bios_size = *(dmi_data + SMBIOS_BIOSSIZE_OFFSET);
+
+ if (bios_extern & LOONGSON_EFI_ENABLE)
+ set_bit(EFI_BOOT, &efi.flags);
+ else
+ clear_bit(EFI_BOOT, &efi.flags);
+}
+
+static void __init find_tokens(const struct dmi_header *dm, void *dummy)
+{
+ switch (dm->type) {
+ case 0x0: /* Extern BIOS */
+ parse_bios_table(dm);
+ break;
+ case 0x4: /* Calling interface */
+ parse_cpu_table(dm);
+ break;
+ }
+}
+static void __init smbios_parse(void)
+{
+ b_info.bios_vendor = (void *)dmi_get_system_info(DMI_BIOS_VENDOR);
+ b_info.bios_version = (void *)dmi_get_system_info(DMI_BIOS_VERSION);
+ b_info.bios_release_date = (void *)dmi_get_system_info(DMI_BIOS_DATE);
+ b_info.board_vendor = (void *)dmi_get_system_info(DMI_BOARD_VENDOR);
+ b_info.board_name = (void *)dmi_get_system_info(DMI_BOARD_NAME);
+ dmi_walk(find_tokens, NULL);
+}
+
+static int usermem __initdata;
+
+static int __init early_parse_mem(char *p)
+{
+ phys_addr_t start, size;
+
+ if (!p) {
+ pr_err("mem parameter is empty, do nothing\n");
+ return -EINVAL;
+ }
+
+ /*
+ * If a user specifies memory size, we
+ * blow away any automatically generated
+ * size.
+ */
+ if (usermem == 0) {
+ usermem = 1;
+ memblock_remove(memblock_start_of_DRAM(),
+ memblock_end_of_DRAM() - memblock_start_of_DRAM());
+ }
+ start = 0;
+ size = memparse(p, &p);
+ if (*p == '@')
+ start = memparse(p + 1, &p);
+ else {
+ pr_err("Invalid format!\n");
+ return -EINVAL;
+ }
+
+ if (!IS_ENABLED(CONFIG_NUMA))
+ memblock_add(start, size);
+ else
+ memblock_add_node(start, size, pa_to_nid(start), MEMBLOCK_NONE);
+
+ return 0;
+}
+early_param("mem", early_parse_mem);
+
+void __init platform_init(void)
+{
+ efi_init();
+#ifdef CONFIG_ACPI_TABLE_UPGRADE
+ acpi_table_upgrade();
+#endif
+#ifdef CONFIG_ACPI
+ acpi_gbl_use_default_register_widths = false;
+ acpi_boot_table_init();
+ acpi_boot_init();
+#endif
+
+#ifdef CONFIG_NUMA
+ init_numa_memory();
+#endif
+ dmi_setup();
+ smbios_parse();
+ pr_info("The BIOS Version: %s\n", b_info.bios_version);
+
+ efi_runtime_init();
+}
+
+static void __init check_kernel_sections_mem(void)
+{
+ phys_addr_t start = __pa_symbol(&_text);
+ phys_addr_t size = __pa_symbol(&_end) - start;
+
+ if (!memblock_is_region_memory(start, size)) {
+ pr_info("Kernel sections are not in the memory maps\n");
+ memblock_add(start, size);
+ }
+}
+
+/*
+ * arch_mem_init - initialize memory management subsystem
+ */
+static void __init arch_mem_init(char **cmdline_p)
+{
+ if (usermem)
+ pr_info("User-defined physical RAM map overwrite\n");
+
+ check_kernel_sections_mem();
+
+ /*
+ * In order to reduce the possibility of kernel panic when failed to
+ * get IO TLB memory under CONFIG_SWIOTLB, it is better to allocate
+ * low memory as small as possible before plat_swiotlb_setup(), so
+ * make sparse_init() using top-down allocation.
+ */
+ memblock_set_bottom_up(false);
+ sparse_init();
+ memblock_set_bottom_up(true);
+
+ plat_swiotlb_setup();
+
+ dma_contiguous_reserve(PFN_PHYS(max_low_pfn));
+
+ memblock_dump_all();
+
+ early_memtest(PFN_PHYS(ARCH_PFN_OFFSET), PFN_PHYS(max_low_pfn));
+}
+
+static void __init resource_init(void)
+{
+ long i = 0;
+ size_t res_size;
+ struct resource *res;
+ struct memblock_region *region;
+
+ code_resource.start = __pa_symbol(&_text);
+ code_resource.end = __pa_symbol(&_etext) - 1;
+ data_resource.start = __pa_symbol(&_etext);
+ data_resource.end = __pa_symbol(&_edata) - 1;
+ bss_resource.start = __pa_symbol(&__bss_start);
+ bss_resource.end = __pa_symbol(&__bss_stop) - 1;
+
+ num_standard_resources = memblock.memory.cnt;
+ res_size = num_standard_resources * sizeof(*standard_resources);
+ standard_resources = memblock_alloc(res_size, SMP_CACHE_BYTES);
+
+ for_each_mem_region(region) {
+ res = &standard_resources[i++];
+ if (!memblock_is_nomap(region)) {
+ res->name = "System RAM";
+ res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
+ res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
+ res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
+ } else {
+ res->name = "Reserved";
+ res->flags = IORESOURCE_MEM;
+ res->start = __pfn_to_phys(memblock_region_reserved_base_pfn(region));
+ res->end = __pfn_to_phys(memblock_region_reserved_end_pfn(region)) - 1;
+ }
+
+ request_resource(&iomem_resource, res);
+
+ /*
+ * We don't know which RAM region contains kernel data,
+ * so we try it repeatedly and let the resource manager
+ * test it.
+ */
+ request_resource(res, &code_resource);
+ request_resource(res, &data_resource);
+ request_resource(res, &bss_resource);
+ }
+}
+
+static int __init reserve_memblock_reserved_regions(void)
+{
+ u64 i, j;
+
+ for (i = 0; i < num_standard_resources; ++i) {
+ struct resource *mem = &standard_resources[i];
+ phys_addr_t r_start, r_end, mem_size = resource_size(mem);
+
+ if (!memblock_is_region_reserved(mem->start, mem_size))
+ continue;
+
+ for_each_reserved_mem_range(j, &r_start, &r_end) {
+ resource_size_t start, end;
+
+ start = max(PFN_PHYS(PFN_DOWN(r_start)), mem->start);
+ end = min(PFN_PHYS(PFN_UP(r_end)) - 1, mem->end);
+
+ if (start > mem->end || end < mem->start)
+ continue;
+
+ reserve_region_with_split(mem, start, end, "Reserved");
+ }
+ }
+
+ return 0;
+}
+arch_initcall(reserve_memblock_reserved_regions);
+
+#ifdef CONFIG_SMP
+static void __init prefill_possible_map(void)
+{
+ int i, possible;
+
+ possible = num_processors + disabled_cpus;
+ if (possible > nr_cpu_ids)
+ possible = nr_cpu_ids;
+
+ pr_info("SMP: Allowing %d CPUs, %d hotplug CPUs\n",
+ possible, max((possible - num_processors), 0));
+
+ for (i = 0; i < possible; i++)
+ set_cpu_possible(i, true);
+ for (; i < NR_CPUS; i++)
+ set_cpu_possible(i, false);
+
+ nr_cpu_ids = possible;
+}
+#else
+static inline void prefill_possible_map(void) {}
+#endif
+
+void __init setup_arch(char **cmdline_p)
+{
+ cpu_probe();
+ *cmdline_p = boot_command_line;
+
+ init_environ();
+ memblock_init();
+ parse_early_param();
+
+ platform_init();
+ pagetable_init();
+ arch_mem_init(cmdline_p);
+
+ resource_init();
+ plat_smp_setup();
+ prefill_possible_map();
+
+ paging_init();
+}
diff --git a/arch/loongarch/kernel/signal.c b/arch/loongarch/kernel/signal.c
new file mode 100644
index 000000000000..7f4889df4a17
--- /dev/null
+++ b/arch/loongarch/kernel/signal.c
@@ -0,0 +1,566 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Author: Hanlu Li <lihanlu@loongson.cn>
+ * Huacai Chen <chenhuacai@loongson.cn>
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ *
+ * Derived from MIPS:
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ * Copyright (C) 1994 - 2000 Ralf Baechle
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2014, Imagination Technologies Ltd.
+ */
+#include <linux/audit.h>
+#include <linux/cache.h>
+#include <linux/context_tracking.h>
+#include <linux/irqflags.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/personality.h>
+#include <linux/smp.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/errno.h>
+#include <linux/wait.h>
+#include <linux/ptrace.h>
+#include <linux/unistd.h>
+#include <linux/compiler.h>
+#include <linux/syscalls.h>
+#include <linux/uaccess.h>
+
+#include <asm/asm.h>
+#include <asm/cacheflush.h>
+#include <asm/cpu-features.h>
+#include <asm/fpu.h>
+#include <asm/ucontext.h>
+#include <asm/vdso.h>
+
+#ifdef DEBUG_SIG
+# define DEBUGP(fmt, args...) printk("%s: " fmt, __func__, ##args)
+#else
+# define DEBUGP(fmt, args...)
+#endif
+
+/* Make sure we will not lose FPU ownership */
+#define lock_fpu_owner() ({ preempt_disable(); pagefault_disable(); })
+#define unlock_fpu_owner() ({ pagefault_enable(); preempt_enable(); })
+
+/* Assembly functions to move context to/from the FPU */
+extern asmlinkage int
+_save_fp_context(void __user *fpregs, void __user *fcc, void __user *csr);
+extern asmlinkage int
+_restore_fp_context(void __user *fpregs, void __user *fcc, void __user *csr);
+
+struct rt_sigframe {
+ struct siginfo rs_info;
+ struct ucontext rs_uctx;
+};
+
+struct _ctx_layout {
+ struct sctx_info *addr;
+ unsigned int size;
+};
+
+struct extctx_layout {
+ unsigned long size;
+ unsigned int flags;
+ struct _ctx_layout fpu;
+ struct _ctx_layout end;
+};
+
+static void __user *get_ctx_through_ctxinfo(struct sctx_info *info)
+{
+ return (void __user *)((char *)info + sizeof(struct sctx_info));
+}
+
+/*
+ * Thread saved context copy to/from a signal context presumed to be on the
+ * user stack, and therefore accessed with appropriate macros from uaccess.h.
+ */
+static int copy_fpu_to_sigcontext(struct fpu_context __user *ctx)
+{
+ int i;
+ int err = 0;
+ uint64_t __user *regs = (uint64_t *)&ctx->regs;
+ uint64_t __user *fcc = &ctx->fcc;
+ uint32_t __user *fcsr = &ctx->fcsr;
+
+ for (i = 0; i < NUM_FPU_REGS; i++) {
+ err |=
+ __put_user(get_fpr64(&current->thread.fpu.fpr[i], 0),
+ &regs[i]);
+ }
+ err |= __put_user(current->thread.fpu.fcc, fcc);
+ err |= __put_user(current->thread.fpu.fcsr, fcsr);
+
+ return err;
+}
+
+static int copy_fpu_from_sigcontext(struct fpu_context __user *ctx)
+{
+ int i;
+ int err = 0;
+ u64 fpr_val;
+ uint64_t __user *regs = (uint64_t *)&ctx->regs;
+ uint64_t __user *fcc = &ctx->fcc;
+ uint32_t __user *fcsr = &ctx->fcsr;
+
+ for (i = 0; i < NUM_FPU_REGS; i++) {
+ err |= __get_user(fpr_val, &regs[i]);
+ set_fpr64(&current->thread.fpu.fpr[i], 0, fpr_val);
+ }
+ err |= __get_user(current->thread.fpu.fcc, fcc);
+ err |= __get_user(current->thread.fpu.fcsr, fcsr);
+
+ return err;
+}
+
+/*
+ * Wrappers for the assembly _{save,restore}_fp_context functions.
+ */
+static int save_hw_fpu_context(struct fpu_context __user *ctx)
+{
+ uint64_t __user *regs = (uint64_t *)&ctx->regs;
+ uint64_t __user *fcc = &ctx->fcc;
+ uint32_t __user *fcsr = &ctx->fcsr;
+
+ return _save_fp_context(regs, fcc, fcsr);
+}
+
+static int restore_hw_fpu_context(struct fpu_context __user *ctx)
+{
+ uint64_t __user *regs = (uint64_t *)&ctx->regs;
+ uint64_t __user *fcc = &ctx->fcc;
+ uint32_t __user *fcsr = &ctx->fcsr;
+
+ return _restore_fp_context(regs, fcc, fcsr);
+}
+
+static int fcsr_pending(unsigned int __user *fcsr)
+{
+ int err, sig = 0;
+ unsigned int csr, enabled;
+
+ err = __get_user(csr, fcsr);
+ enabled = ((csr & FPU_CSR_ALL_E) << 24);
+ /*
+ * If the signal handler set some FPU exceptions, clear it and
+ * send SIGFPE.
+ */
+ if (csr & enabled) {
+ csr &= ~enabled;
+ err |= __put_user(csr, fcsr);
+ sig = SIGFPE;
+ }
+ return err ?: sig;
+}
+
+/*
+ * Helper routines
+ */
+static int protected_save_fpu_context(struct extctx_layout *extctx)
+{
+ int err = 0;
+ struct sctx_info __user *info = extctx->fpu.addr;
+ struct fpu_context __user *fpu_ctx = (struct fpu_context *)get_ctx_through_ctxinfo(info);
+ uint64_t __user *regs = (uint64_t *)&fpu_ctx->regs;
+ uint64_t __user *fcc = &fpu_ctx->fcc;
+ uint32_t __user *fcsr = &fpu_ctx->fcsr;
+
+ while (1) {
+ lock_fpu_owner();
+ if (is_fpu_owner())
+ err = save_hw_fpu_context(fpu_ctx);
+ else
+ err = copy_fpu_to_sigcontext(fpu_ctx);
+ unlock_fpu_owner();
+
+ err |= __put_user(FPU_CTX_MAGIC, &info->magic);
+ err |= __put_user(extctx->fpu.size, &info->size);
+
+ if (likely(!err))
+ break;
+ /* Touch the FPU context and try again */
+ err = __put_user(0, &regs[0]) |
+ __put_user(0, &regs[31]) |
+ __put_user(0, fcc) |
+ __put_user(0, fcsr);
+ if (err)
+ return err; /* really bad sigcontext */
+ }
+
+ return err;
+}
+
+static int protected_restore_fpu_context(struct extctx_layout *extctx)
+{
+ int err = 0, sig = 0, tmp __maybe_unused;
+ struct sctx_info __user *info = extctx->fpu.addr;
+ struct fpu_context __user *fpu_ctx = (struct fpu_context *)get_ctx_through_ctxinfo(info);
+ uint64_t __user *regs = (uint64_t *)&fpu_ctx->regs;
+ uint64_t __user *fcc = &fpu_ctx->fcc;
+ uint32_t __user *fcsr = &fpu_ctx->fcsr;
+
+ err = sig = fcsr_pending(fcsr);
+ if (err < 0)
+ return err;
+
+ while (1) {
+ lock_fpu_owner();
+ if (is_fpu_owner())
+ err = restore_hw_fpu_context(fpu_ctx);
+ else
+ err = copy_fpu_from_sigcontext(fpu_ctx);
+ unlock_fpu_owner();
+
+ if (likely(!err))
+ break;
+ /* Touch the FPU context and try again */
+ err = __get_user(tmp, &regs[0]) |
+ __get_user(tmp, &regs[31]) |
+ __get_user(tmp, fcc) |
+ __get_user(tmp, fcsr);
+ if (err)
+ break; /* really bad sigcontext */
+ }
+
+ return err ?: sig;
+}
+
+static int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
+ struct extctx_layout *extctx)
+{
+ int i, err = 0;
+ struct sctx_info __user *info;
+
+ err |= __put_user(regs->csr_era, &sc->sc_pc);
+ err |= __put_user(extctx->flags, &sc->sc_flags);
+
+ err |= __put_user(0, &sc->sc_regs[0]);
+ for (i = 1; i < 32; i++)
+ err |= __put_user(regs->regs[i], &sc->sc_regs[i]);
+
+ if (extctx->fpu.addr)
+ err |= protected_save_fpu_context(extctx);
+
+ /* Set the "end" magic */
+ info = (struct sctx_info *)extctx->end.addr;
+ err |= __put_user(0, &info->magic);
+ err |= __put_user(0, &info->size);
+
+ return err;
+}
+
+static int parse_extcontext(struct sigcontext __user *sc, struct extctx_layout *extctx)
+{
+ int err = 0;
+ unsigned int magic, size;
+ struct sctx_info __user *info = (struct sctx_info __user *)&sc->sc_extcontext;
+
+ while(1) {
+ err |= __get_user(magic, &info->magic);
+ err |= __get_user(size, &info->size);
+ if (err)
+ return err;
+
+ switch (magic) {
+ case 0: /* END */
+ goto done;
+
+ case FPU_CTX_MAGIC:
+ if (size < (sizeof(struct sctx_info) +
+ sizeof(struct fpu_context)))
+ goto invalid;
+ extctx->fpu.addr = info;
+ break;
+
+ default:
+ goto invalid;
+ }
+
+ info = (struct sctx_info *)((char *)info + size);
+ }
+
+done:
+ return 0;
+
+invalid:
+ return -EINVAL;
+}
+
+static int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
+{
+ int i, err = 0;
+ struct extctx_layout extctx;
+
+ memset(&extctx, 0, sizeof(struct extctx_layout));
+
+ err = __get_user(extctx.flags, &sc->sc_flags);
+ if (err)
+ goto bad;
+
+ err = parse_extcontext(sc, &extctx);
+ if (err)
+ goto bad;
+
+ conditional_used_math(extctx.flags & SC_USED_FP);
+
+ /*
+ * The signal handler may have used FPU; give it up if the program
+ * doesn't want it following sigreturn.
+ */
+ if (!(extctx.flags & SC_USED_FP))
+ lose_fpu(0);
+
+ /* Always make any pending restarted system calls return -EINTR */
+ current->restart_block.fn = do_no_restart_syscall;
+
+ err |= __get_user(regs->csr_era, &sc->sc_pc);
+ for (i = 1; i < 32; i++)
+ err |= __get_user(regs->regs[i], &sc->sc_regs[i]);
+
+ if (extctx.fpu.addr)
+ err |= protected_restore_fpu_context(&extctx);
+
+bad:
+ return err;
+}
+
+static unsigned int handle_flags(void)
+{
+ unsigned int flags = 0;
+
+ flags = used_math() ? SC_USED_FP : 0;
+
+ switch (current->thread.error_code) {
+ case 1:
+ flags |= SC_ADDRERR_RD;
+ break;
+ case 2:
+ flags |= SC_ADDRERR_WR;
+ break;
+ }
+
+ return flags;
+}
+
+static unsigned long extframe_alloc(struct extctx_layout *extctx,
+ struct _ctx_layout *layout,
+ size_t size, unsigned int align, unsigned long base)
+{
+ unsigned long new_base = base - size;
+
+ new_base = round_down(new_base, (align < 16 ? 16 : align));
+ new_base -= sizeof(struct sctx_info);
+
+ layout->addr = (void *)new_base;
+ layout->size = (unsigned int)(base - new_base);
+ extctx->size += layout->size;
+
+ return new_base;
+}
+
+static unsigned long setup_extcontext(struct extctx_layout *extctx, unsigned long sp)
+{
+ unsigned long new_sp = sp;
+
+ memset(extctx, 0, sizeof(struct extctx_layout));
+
+ extctx->flags = handle_flags();
+
+ /* Grow down, alloc "end" context info first. */
+ new_sp -= sizeof(struct sctx_info);
+ extctx->end.addr = (void *)new_sp;
+ extctx->end.size = (unsigned int)sizeof(struct sctx_info);
+ extctx->size += extctx->end.size;
+
+ if (extctx->flags & SC_USED_FP) {
+ if (cpu_has_fpu)
+ new_sp = extframe_alloc(extctx, &extctx->fpu,
+ sizeof(struct fpu_context), FPU_CTX_ALIGN, new_sp);
+ }
+
+ return new_sp;
+}
+
+void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
+ struct extctx_layout *extctx)
+{
+ unsigned long sp;
+
+ /* Default to using normal stack */
+ sp = regs->regs[3];
+
+ /*
+ * If we are on the alternate signal stack and would overflow it, don't.
+ * Return an always-bogus address instead so we will die with SIGSEGV.
+ */
+ if (on_sig_stack(sp) &&
+ !likely(on_sig_stack(sp - sizeof(struct rt_sigframe))))
+ return (void __user __force *)(-1UL);
+
+ sp = sigsp(sp, ksig);
+ sp = round_down(sp, 16);
+ sp = setup_extcontext(extctx, sp);
+ sp -= sizeof(struct rt_sigframe);
+
+ if (!IS_ALIGNED(sp, 16))
+ BUG();
+
+ return (void __user *)sp;
+}
+
+/*
+ * Atomically swap in the new signal mask, and wait for a signal.
+ */
+
+asmlinkage long sys_rt_sigreturn(void)
+{
+ int sig;
+ sigset_t set;
+ struct pt_regs *regs;
+ struct rt_sigframe __user *frame;
+
+ regs = current_pt_regs();
+ frame = (struct rt_sigframe __user *)regs->regs[3];
+ if (!access_ok(frame, sizeof(*frame)))
+ goto badframe;
+ if (__copy_from_user(&set, &frame->rs_uctx.uc_sigmask, sizeof(set)))
+ goto badframe;
+
+ set_current_blocked(&set);
+
+ sig = restore_sigcontext(regs, &frame->rs_uctx.uc_mcontext);
+ if (sig < 0)
+ goto badframe;
+ else if (sig)
+ force_sig(sig);
+
+ regs->regs[0] = 0; /* No syscall restarting */
+ if (restore_altstack(&frame->rs_uctx.uc_stack))
+ goto badframe;
+
+ return regs->regs[4];
+
+badframe:
+ force_sig(SIGSEGV);
+ return 0;
+}
+
+static int setup_rt_frame(void *sig_return, struct ksignal *ksig,
+ struct pt_regs *regs, sigset_t *set)
+{
+ int err = 0;
+ struct extctx_layout extctx;
+ struct rt_sigframe __user *frame;
+
+ frame = get_sigframe(ksig, regs, &extctx);
+ if (!access_ok(frame, sizeof(*frame) + extctx.size))
+ return -EFAULT;
+
+ /* Create siginfo. */
+ err |= copy_siginfo_to_user(&frame->rs_info, &ksig->info);
+
+ /* Create the ucontext. */
+ err |= __put_user(0, &frame->rs_uctx.uc_flags);
+ err |= __put_user(NULL, &frame->rs_uctx.uc_link);
+ err |= __save_altstack(&frame->rs_uctx.uc_stack, regs->regs[3]);
+ err |= setup_sigcontext(regs, &frame->rs_uctx.uc_mcontext, &extctx);
+ err |= __copy_to_user(&frame->rs_uctx.uc_sigmask, set, sizeof(*set));
+
+ if (err)
+ return -EFAULT;
+
+ /*
+ * Arguments to signal handler:
+ *
+ * a0 = signal number
+ * a1 = pointer to siginfo
+ * a2 = pointer to ucontext
+ *
+ * c0_era point to the signal handler, $r3 (sp) points to
+ * the struct rt_sigframe.
+ */
+ regs->regs[4] = ksig->sig;
+ regs->regs[5] = (unsigned long) &frame->rs_info;
+ regs->regs[6] = (unsigned long) &frame->rs_uctx;
+ regs->regs[3] = (unsigned long) frame;
+ regs->regs[1] = (unsigned long) sig_return;
+ regs->csr_era = (unsigned long) ksig->ka.sa.sa_handler;
+
+ DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
+ current->comm, current->pid,
+ frame, regs->csr_era, regs->regs[1]);
+
+ return 0;
+}
+
+static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
+{
+ int ret;
+ sigset_t *oldset = sigmask_to_save();
+ void *vdso = current->mm->context.vdso;
+
+ /* Are we from a system call? */
+ if (regs->regs[0]) {
+ switch (regs->regs[4]) {
+ case -ERESTART_RESTARTBLOCK:
+ case -ERESTARTNOHAND:
+ regs->regs[4] = -EINTR;
+ break;
+ case -ERESTARTSYS:
+ if (!(ksig->ka.sa.sa_flags & SA_RESTART)) {
+ regs->regs[4] = -EINTR;
+ break;
+ }
+ fallthrough;
+ case -ERESTARTNOINTR:
+ regs->regs[4] = regs->orig_a0;
+ regs->csr_era -= 4;
+ }
+
+ regs->regs[0] = 0; /* Don't deal with this again. */
+ }
+
+ rseq_signal_deliver(ksig, regs);
+
+ ret = setup_rt_frame(vdso + current->thread.vdso->offset_sigreturn, ksig, regs, oldset);
+
+ signal_setup_done(ret, ksig, 0);
+}
+
+void arch_do_signal_or_restart(struct pt_regs *regs, bool has_signal)
+{
+ struct ksignal ksig;
+
+ if (has_signal && get_signal(&ksig)) {
+ /* Whee! Actually deliver the signal. */
+ handle_signal(&ksig, regs);
+ return;
+ }
+
+ /* Are we from a system call? */
+ if (regs->regs[0]) {
+ switch (regs->regs[4]) {
+ case -ERESTARTNOHAND:
+ case -ERESTARTSYS:
+ case -ERESTARTNOINTR:
+ regs->regs[4] = regs->orig_a0;
+ regs->csr_era -= 4;
+ break;
+
+ case -ERESTART_RESTARTBLOCK:
+ regs->regs[4] = regs->orig_a0;
+ regs->regs[11] = __NR_restart_syscall;
+ regs->csr_era -= 4;
+ break;
+ }
+ regs->regs[0] = 0; /* Don't deal with this again. */
+ }
+
+ /*
+ * If there's no signal to deliver, we just put the saved sigmask
+ * back
+ */
+ restore_saved_sigmask();
+}
diff --git a/arch/loongarch/kernel/smp.c b/arch/loongarch/kernel/smp.c
new file mode 100644
index 000000000000..b8c53b755a25
--- /dev/null
+++ b/arch/loongarch/kernel/smp.c
@@ -0,0 +1,751 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ *
+ * Derived from MIPS:
+ * Copyright (C) 2000, 2001 Kanoj Sarcar
+ * Copyright (C) 2000, 2001 Ralf Baechle
+ * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
+ * Copyright (C) 2000, 2001, 2003 Broadcom Corporation
+ */
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/seq_file.h>
+#include <linux/smp.h>
+#include <linux/threads.h>
+#include <linux/export.h>
+#include <linux/time.h>
+#include <linux/tracepoint.h>
+#include <linux/sched/hotplug.h>
+#include <linux/sched/task_stack.h>
+
+#include <asm/cpu.h>
+#include <asm/idle.h>
+#include <asm/loongson.h>
+#include <asm/mmu_context.h>
+#include <asm/numa.h>
+#include <asm/processor.h>
+#include <asm/setup.h>
+#include <asm/time.h>
+
+int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
+EXPORT_SYMBOL(__cpu_number_map);
+
+int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
+EXPORT_SYMBOL(__cpu_logical_map);
+
+/* Number of threads (siblings) per CPU core */
+int smp_num_siblings = 1;
+EXPORT_SYMBOL(smp_num_siblings);
+
+/* Representing the threads (siblings) of each logical CPU */
+cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
+EXPORT_SYMBOL(cpu_sibling_map);
+
+/* Representing the core map of multi-core chips of each logical CPU */
+cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
+EXPORT_SYMBOL(cpu_core_map);
+
+static DECLARE_COMPLETION(cpu_starting);
+static DECLARE_COMPLETION(cpu_running);
+
+/*
+ * A logcal cpu mask containing only one VPE per core to
+ * reduce the number of IPIs on large MT systems.
+ */
+cpumask_t cpu_foreign_map[NR_CPUS] __read_mostly;
+EXPORT_SYMBOL(cpu_foreign_map);
+
+/* representing cpus for which sibling maps can be computed */
+static cpumask_t cpu_sibling_setup_map;
+
+/* representing cpus for which core maps can be computed */
+static cpumask_t cpu_core_setup_map;
+
+struct secondary_data cpuboot_data;
+static DEFINE_PER_CPU(int, cpu_state);
+DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
+EXPORT_PER_CPU_SYMBOL(irq_stat);
+
+enum ipi_msg_type {
+ IPI_RESCHEDULE,
+ IPI_CALL_FUNCTION,
+};
+
+static const char *ipi_types[NR_IPI] __tracepoint_string = {
+ [IPI_RESCHEDULE] = "Rescheduling interrupts",
+ [IPI_CALL_FUNCTION] = "Function call interrupts",
+};
+
+void show_ipi_list(struct seq_file *p, int prec)
+{
+ unsigned int cpu, i;
+
+ for (i = 0; i < NR_IPI; i++) {
+ seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i, prec >= 4 ? " " : "");
+ for_each_online_cpu(cpu)
+ seq_printf(p, "%10u ", per_cpu(irq_stat, cpu).ipi_irqs[i]);
+ seq_printf(p, " LoongArch %d %s\n", i + 1, ipi_types[i]);
+ }
+}
+
+/* Send mailbox buffer via Mail_Send */
+static void csr_mail_send(uint64_t data, int cpu, int mailbox)
+{
+ uint64_t val;
+
+ /* Send high 32 bits */
+ val = IOCSR_MBUF_SEND_BLOCKING;
+ val |= (IOCSR_MBUF_SEND_BOX_HI(mailbox) << IOCSR_MBUF_SEND_BOX_SHIFT);
+ val |= (cpu << IOCSR_MBUF_SEND_CPU_SHIFT);
+ val |= (data & IOCSR_MBUF_SEND_H32_MASK);
+ iocsr_write64(val, LOONGARCH_IOCSR_MBUF_SEND);
+
+ /* Send low 32 bits */
+ val = IOCSR_MBUF_SEND_BLOCKING;
+ val |= (IOCSR_MBUF_SEND_BOX_LO(mailbox) << IOCSR_MBUF_SEND_BOX_SHIFT);
+ val |= (cpu << IOCSR_MBUF_SEND_CPU_SHIFT);
+ val |= (data << IOCSR_MBUF_SEND_BUF_SHIFT);
+ iocsr_write64(val, LOONGARCH_IOCSR_MBUF_SEND);
+};
+
+static u32 ipi_read_clear(int cpu)
+{
+ u32 action;
+
+ /* Load the ipi register to figure out what we're supposed to do */
+ action = iocsr_read32(LOONGARCH_IOCSR_IPI_STATUS);
+ /* Clear the ipi register to clear the interrupt */
+ iocsr_write32(action, LOONGARCH_IOCSR_IPI_CLEAR);
+ smp_mb();
+
+ return action;
+}
+
+static void ipi_write_action(int cpu, u32 action)
+{
+ unsigned int irq = 0;
+
+ while ((irq = ffs(action))) {
+ uint32_t val = IOCSR_IPI_SEND_BLOCKING;
+
+ val |= (irq - 1);
+ val |= (cpu << IOCSR_IPI_SEND_CPU_SHIFT);
+ iocsr_write32(val, LOONGARCH_IOCSR_IPI_SEND);
+ action &= ~BIT(irq - 1);
+ }
+}
+
+void loongson3_send_ipi_single(int cpu, unsigned int action)
+{
+ ipi_write_action(cpu_logical_map(cpu), (u32)action);
+}
+
+void loongson3_send_ipi_mask(const struct cpumask *mask, unsigned int action)
+{
+ unsigned int i;
+
+ for_each_cpu(i, mask)
+ ipi_write_action(cpu_logical_map(i), (u32)action);
+}
+
+irqreturn_t loongson3_ipi_interrupt(int irq, void *dev)
+{
+ unsigned int action;
+ unsigned int cpu = smp_processor_id();
+
+ action = ipi_read_clear(cpu_logical_map(cpu));
+
+ if (action & SMP_RESCHEDULE) {
+ scheduler_ipi();
+ per_cpu(irq_stat, cpu).ipi_irqs[IPI_RESCHEDULE]++;
+ }
+
+ if (action & SMP_CALL_FUNCTION) {
+ generic_smp_call_function_interrupt();
+ per_cpu(irq_stat, cpu).ipi_irqs[IPI_CALL_FUNCTION]++;
+ }
+
+ return IRQ_HANDLED;
+}
+
+void __init loongson3_smp_setup(void)
+{
+ cpu_data[0].core = cpu_logical_map(0) % loongson_sysconf.cores_per_package;
+ cpu_data[0].package = cpu_logical_map(0) / loongson_sysconf.cores_per_package;
+
+ iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_EN);
+ pr_info("Detected %i available CPU(s)\n", loongson_sysconf.nr_cpus);
+}
+
+void __init loongson3_prepare_cpus(unsigned int max_cpus)
+{
+ int i = 0;
+
+ for (i = 0; i < loongson_sysconf.nr_cpus; i++) {
+ set_cpu_present(i, true);
+ csr_mail_send(0, __cpu_logical_map[i], 0);
+ }
+
+ per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
+}
+
+/*
+ * Setup the PC, SP, and TP of a secondary processor and start it running!
+ */
+void loongson3_boot_secondary(int cpu, struct task_struct *idle)
+{
+ unsigned long entry;
+
+ pr_info("Booting CPU#%d...\n", cpu);
+
+ entry = __pa_symbol((unsigned long)&smpboot_entry);
+ cpuboot_data.stack = (unsigned long)__KSTK_TOS(idle);
+ cpuboot_data.thread_info = (unsigned long)task_thread_info(idle);
+
+ csr_mail_send(entry, cpu_logical_map(cpu), 0);
+
+ loongson3_send_ipi_single(cpu, SMP_BOOT_CPU);
+}
+
+/*
+ * SMP init and finish on secondary CPUs
+ */
+void loongson3_init_secondary(void)
+{
+ unsigned int cpu = smp_processor_id();
+ unsigned int imask = ECFGF_IP0 | ECFGF_IP1 | ECFGF_IP2 |
+ ECFGF_IPI | ECFGF_PMC | ECFGF_TIMER;
+
+ change_csr_ecfg(ECFG0_IM, imask);
+
+ iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_EN);
+
+#ifdef CONFIG_NUMA
+ numa_add_cpu(cpu);
+#endif
+ per_cpu(cpu_state, cpu) = CPU_ONLINE;
+ cpu_data[cpu].core =
+ cpu_logical_map(cpu) % loongson_sysconf.cores_per_package;
+ cpu_data[cpu].package =
+ cpu_logical_map(cpu) / loongson_sysconf.cores_per_package;
+}
+
+void loongson3_smp_finish(void)
+{
+ local_irq_enable();
+ iocsr_write64(0, LOONGARCH_IOCSR_MBUF0);
+ pr_info("CPU#%d finished\n", smp_processor_id());
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+static bool io_master(int cpu)
+{
+ if (cpu == 0)
+ return true;
+
+ return false;
+}
+
+int loongson3_cpu_disable(void)
+{
+ unsigned long flags;
+ unsigned int cpu = smp_processor_id();
+
+ if (io_master(cpu))
+ return -EBUSY;
+
+#ifdef CONFIG_NUMA
+ numa_remove_cpu(cpu);
+#endif
+ set_cpu_online(cpu, false);
+ calculate_cpu_foreign_map();
+ local_irq_save(flags);
+ irq_migrate_all_off_this_cpu();
+ clear_csr_ecfg(ECFG0_IM);
+ local_irq_restore(flags);
+ local_flush_tlb_all();
+
+ return 0;
+}
+
+void loongson3_cpu_die(unsigned int cpu)
+{
+ while (per_cpu(cpu_state, cpu) != CPU_DEAD)
+ cpu_relax();
+
+ mb();
+}
+
+/*
+ * The target CPU should go to XKPRANGE (uncached area) and flush
+ * ICache/DCache/VCache before the control CPU can safely disable its clock.
+ */
+static void loongson3_play_dead(int *state_addr)
+{
+ register int val;
+ register void *addr;
+ register void (*init_fn)(void);
+
+ __asm__ __volatile__(
+ " li.d %[addr], 0x8000000000000000\n"
+ "1: cacop 0x8, %[addr], 0 \n" /* flush ICache */
+ " cacop 0x8, %[addr], 1 \n"
+ " cacop 0x8, %[addr], 2 \n"
+ " cacop 0x8, %[addr], 3 \n"
+ " cacop 0x9, %[addr], 0 \n" /* flush DCache */
+ " cacop 0x9, %[addr], 1 \n"
+ " cacop 0x9, %[addr], 2 \n"
+ " cacop 0x9, %[addr], 3 \n"
+ " addi.w %[sets], %[sets], -1 \n"
+ " addi.d %[addr], %[addr], 0x40 \n"
+ " bnez %[sets], 1b \n"
+ " li.d %[addr], 0x8000000000000000\n"
+ "2: cacop 0xa, %[addr], 0 \n" /* flush VCache */
+ " cacop 0xa, %[addr], 1 \n"
+ " cacop 0xa, %[addr], 2 \n"
+ " cacop 0xa, %[addr], 3 \n"
+ " cacop 0xa, %[addr], 4 \n"
+ " cacop 0xa, %[addr], 5 \n"
+ " cacop 0xa, %[addr], 6 \n"
+ " cacop 0xa, %[addr], 7 \n"
+ " cacop 0xa, %[addr], 8 \n"
+ " cacop 0xa, %[addr], 9 \n"
+ " cacop 0xa, %[addr], 10 \n"
+ " cacop 0xa, %[addr], 11 \n"
+ " cacop 0xa, %[addr], 12 \n"
+ " cacop 0xa, %[addr], 13 \n"
+ " cacop 0xa, %[addr], 14 \n"
+ " cacop 0xa, %[addr], 15 \n"
+ " addi.w %[vsets], %[vsets], -1 \n"
+ " addi.d %[addr], %[addr], 0x40 \n"
+ " bnez %[vsets], 2b \n"
+ " li.w %[val], 0x7 \n" /* *state_addr = CPU_DEAD; */
+ " st.w %[val], %[state_addr], 0 \n"
+ " dbar 0 \n"
+ " cacop 0x11, %[state_addr], 0 \n" /* flush entry of *state_addr */
+ : [addr] "=&r" (addr), [val] "=&r" (val)
+ : [state_addr] "r" (state_addr),
+ [sets] "r" (cpu_data[smp_processor_id()].dcache.sets),
+ [vsets] "r" (cpu_data[smp_processor_id()].vcache.sets));
+
+ local_irq_enable();
+ change_csr_ecfg(ECFG0_IM, ECFGF_IPI);
+
+ __asm__ __volatile__(
+ " idle 0 \n"
+ " li.w $t0, 0x1020 \n"
+ " iocsrrd.d %[init_fn], $t0 \n" /* Get init PC */
+ : [init_fn] "=&r" (addr)
+ : /* No Input */
+ : "a0");
+ init_fn = __va(addr);
+
+ init_fn();
+ unreachable();
+}
+
+void play_dead(void)
+{
+ int *state_addr;
+ unsigned int cpu = smp_processor_id();
+ void (*play_dead_uncached)(int *s);
+
+ idle_task_exit();
+ play_dead_uncached = (void *)TO_UNCACHE(__pa((unsigned long)loongson3_play_dead));
+ state_addr = &per_cpu(cpu_state, cpu);
+ mb();
+ play_dead_uncached(state_addr);
+}
+
+static int loongson3_enable_clock(unsigned int cpu)
+{
+ uint64_t core_id = cpu_data[cpu].core;
+ uint64_t package_id = cpu_data[cpu].package;
+
+ LOONGSON_FREQCTRL(package_id) |= 1 << (core_id * 4 + 3);
+
+ return 0;
+}
+
+static int loongson3_disable_clock(unsigned int cpu)
+{
+ uint64_t core_id = cpu_data[cpu].core;
+ uint64_t package_id = cpu_data[cpu].package;
+
+ LOONGSON_FREQCTRL(package_id) &= ~(1 << (core_id * 4 + 3));
+
+ return 0;
+}
+
+static int register_loongson3_notifier(void)
+{
+ return cpuhp_setup_state_nocalls(CPUHP_LOONGARCH_SOC_PREPARE,
+ "loongarch/loongson:prepare",
+ loongson3_enable_clock,
+ loongson3_disable_clock);
+}
+early_initcall(register_loongson3_notifier);
+
+#endif
+
+/*
+ * Power management
+ */
+#ifdef CONFIG_PM
+
+static int loongson3_ipi_suspend(void)
+{
+ return 0;
+}
+
+static void loongson3_ipi_resume(void)
+{
+ iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_EN);
+}
+
+static struct syscore_ops loongson3_ipi_syscore_ops = {
+ .resume = loongson3_ipi_resume,
+ .suspend = loongson3_ipi_suspend,
+};
+
+/*
+ * Enable boot cpu ipi before enabling nonboot cpus
+ * during syscore_resume.
+ */
+static int __init ipi_pm_init(void)
+{
+ register_syscore_ops(&loongson3_ipi_syscore_ops);
+ return 0;
+}
+
+core_initcall(ipi_pm_init);
+#endif
+
+static inline void set_cpu_sibling_map(int cpu)
+{
+ int i;
+
+ cpumask_set_cpu(cpu, &cpu_sibling_setup_map);
+
+ if (smp_num_siblings <= 1)
+ cpumask_set_cpu(cpu, &cpu_sibling_map[cpu]);
+ else {
+ for_each_cpu(i, &cpu_sibling_setup_map) {
+ if (cpus_are_siblings(cpu, i)) {
+ cpumask_set_cpu(i, &cpu_sibling_map[cpu]);
+ cpumask_set_cpu(cpu, &cpu_sibling_map[i]);
+ }
+ }
+ }
+}
+
+static inline void set_cpu_core_map(int cpu)
+{
+ int i;
+
+ cpumask_set_cpu(cpu, &cpu_core_setup_map);
+
+ for_each_cpu(i, &cpu_core_setup_map) {
+ if (cpu_data[cpu].package == cpu_data[i].package) {
+ cpumask_set_cpu(i, &cpu_core_map[cpu]);
+ cpumask_set_cpu(cpu, &cpu_core_map[i]);
+ }
+ }
+}
+
+/*
+ * Calculate a new cpu_foreign_map mask whenever a
+ * new cpu appears or disappears.
+ */
+void calculate_cpu_foreign_map(void)
+{
+ int i, k, core_present;
+ cpumask_t temp_foreign_map;
+
+ /* Re-calculate the mask */
+ cpumask_clear(&temp_foreign_map);
+ for_each_online_cpu(i) {
+ core_present = 0;
+ for_each_cpu(k, &temp_foreign_map)
+ if (cpus_are_siblings(i, k))
+ core_present = 1;
+ if (!core_present)
+ cpumask_set_cpu(i, &temp_foreign_map);
+ }
+
+ for_each_online_cpu(i)
+ cpumask_andnot(&cpu_foreign_map[i],
+ &temp_foreign_map, &cpu_sibling_map[i]);
+}
+
+/* Preload SMP state for boot cpu */
+void smp_prepare_boot_cpu(void)
+{
+ unsigned int cpu, node, rr_node;
+
+ set_cpu_possible(0, true);
+ set_cpu_online(0, true);
+ set_my_cpu_offset(per_cpu_offset(0));
+
+ rr_node = first_node(node_online_map);
+ for_each_possible_cpu(cpu) {
+ node = early_cpu_to_node(cpu);
+
+ /*
+ * The mapping between present cpus and nodes has been
+ * built during MADT and SRAT parsing.
+ *
+ * If possible cpus = present cpus here, early_cpu_to_node
+ * will return valid node.
+ *
+ * If possible cpus > present cpus here (e.g. some possible
+ * cpus will be added by cpu-hotplug later), for possible but
+ * not present cpus, early_cpu_to_node will return NUMA_NO_NODE,
+ * and we just map them to online nodes in round-robin way.
+ * Once hotplugged, new correct mapping will be built for them.
+ */
+ if (node != NUMA_NO_NODE)
+ set_cpu_numa_node(cpu, node);
+ else {
+ set_cpu_numa_node(cpu, rr_node);
+ rr_node = next_node_in(rr_node, node_online_map);
+ }
+ }
+}
+
+/* called from main before smp_init() */
+void __init smp_prepare_cpus(unsigned int max_cpus)
+{
+ init_new_context(current, &init_mm);
+ current_thread_info()->cpu = 0;
+ loongson3_prepare_cpus(max_cpus);
+ set_cpu_sibling_map(0);
+ set_cpu_core_map(0);
+ calculate_cpu_foreign_map();
+#ifndef CONFIG_HOTPLUG_CPU
+ init_cpu_present(cpu_possible_mask);
+#endif
+}
+
+int __cpu_up(unsigned int cpu, struct task_struct *tidle)
+{
+ loongson3_boot_secondary(cpu, tidle);
+
+ /* Wait for CPU to start and be ready to sync counters */
+ if (!wait_for_completion_timeout(&cpu_starting,
+ msecs_to_jiffies(5000))) {
+ pr_crit("CPU%u: failed to start\n", cpu);
+ return -EIO;
+ }
+
+ /* Wait for CPU to finish startup & mark itself online before return */
+ wait_for_completion(&cpu_running);
+
+ return 0;
+}
+
+/*
+ * First C code run on the secondary CPUs after being started up by
+ * the master.
+ */
+asmlinkage void start_secondary(void)
+{
+ unsigned int cpu;
+
+ sync_counter();
+ cpu = smp_processor_id();
+ set_my_cpu_offset(per_cpu_offset(cpu));
+
+ cpu_probe();
+ constant_clockevent_init();
+ loongson3_init_secondary();
+
+ set_cpu_sibling_map(cpu);
+ set_cpu_core_map(cpu);
+
+ notify_cpu_starting(cpu);
+
+ /* Notify boot CPU that we're starting */
+ complete(&cpu_starting);
+
+ /* The CPU is running, now mark it online */
+ set_cpu_online(cpu, true);
+
+ calculate_cpu_foreign_map();
+
+ /*
+ * Notify boot CPU that we're up & online and it can safely return
+ * from __cpu_up()
+ */
+ complete(&cpu_running);
+
+ /*
+ * irq will be enabled in loongson3_smp_finish(), enabling it too
+ * early is dangerous.
+ */
+ WARN_ON_ONCE(!irqs_disabled());
+ loongson3_smp_finish();
+
+ cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
+}
+
+void __init smp_cpus_done(unsigned int max_cpus)
+{
+}
+
+static void stop_this_cpu(void *dummy)
+{
+ set_cpu_online(smp_processor_id(), false);
+ calculate_cpu_foreign_map();
+ local_irq_disable();
+ while (true);
+}
+
+void smp_send_stop(void)
+{
+ smp_call_function(stop_this_cpu, NULL, 0);
+}
+
+int setup_profiling_timer(unsigned int multiplier)
+{
+ return 0;
+}
+
+static void flush_tlb_all_ipi(void *info)
+{
+ local_flush_tlb_all();
+}
+
+void flush_tlb_all(void)
+{
+ on_each_cpu(flush_tlb_all_ipi, NULL, 1);
+}
+
+static void flush_tlb_mm_ipi(void *mm)
+{
+ local_flush_tlb_mm((struct mm_struct *)mm);
+}
+
+void flush_tlb_mm(struct mm_struct *mm)
+{
+ if (atomic_read(&mm->mm_users) == 0)
+ return; /* happens as a result of exit_mmap() */
+
+ preempt_disable();
+
+ if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
+ on_each_cpu_mask(mm_cpumask(mm), flush_tlb_mm_ipi, mm, 1);
+ } else {
+ unsigned int cpu;
+
+ for_each_online_cpu(cpu) {
+ if (cpu != smp_processor_id() && cpu_context(cpu, mm))
+ cpu_context(cpu, mm) = 0;
+ }
+ local_flush_tlb_mm(mm);
+ }
+
+ preempt_enable();
+}
+
+struct flush_tlb_data {
+ struct vm_area_struct *vma;
+ unsigned long addr1;
+ unsigned long addr2;
+};
+
+static void flush_tlb_range_ipi(void *info)
+{
+ struct flush_tlb_data *fd = info;
+
+ local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
+}
+
+void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
+{
+ struct mm_struct *mm = vma->vm_mm;
+
+ preempt_disable();
+ if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
+ struct flush_tlb_data fd = {
+ .vma = vma,
+ .addr1 = start,
+ .addr2 = end,
+ };
+
+ on_each_cpu_mask(mm_cpumask(mm), flush_tlb_range_ipi, &fd, 1);
+ } else {
+ unsigned int cpu;
+
+ for_each_online_cpu(cpu) {
+ if (cpu != smp_processor_id() && cpu_context(cpu, mm))
+ cpu_context(cpu, mm) = 0;
+ }
+ local_flush_tlb_range(vma, start, end);
+ }
+ preempt_enable();
+}
+
+static void flush_tlb_kernel_range_ipi(void *info)
+{
+ struct flush_tlb_data *fd = info;
+
+ local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
+}
+
+void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+ struct flush_tlb_data fd = {
+ .addr1 = start,
+ .addr2 = end,
+ };
+
+ on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1);
+}
+
+static void flush_tlb_page_ipi(void *info)
+{
+ struct flush_tlb_data *fd = info;
+
+ local_flush_tlb_page(fd->vma, fd->addr1);
+}
+
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
+{
+ preempt_disable();
+ if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) {
+ struct flush_tlb_data fd = {
+ .vma = vma,
+ .addr1 = page,
+ };
+
+ on_each_cpu_mask(mm_cpumask(vma->vm_mm), flush_tlb_page_ipi, &fd, 1);
+ } else {
+ unsigned int cpu;
+
+ for_each_online_cpu(cpu) {
+ if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm))
+ cpu_context(cpu, vma->vm_mm) = 0;
+ }
+ local_flush_tlb_page(vma, page);
+ }
+ preempt_enable();
+}
+EXPORT_SYMBOL(flush_tlb_page);
+
+static void flush_tlb_one_ipi(void *info)
+{
+ unsigned long vaddr = (unsigned long) info;
+
+ local_flush_tlb_one(vaddr);
+}
+
+void flush_tlb_one(unsigned long vaddr)
+{
+ on_each_cpu(flush_tlb_one_ipi, (void *)vaddr, 1);
+}
+EXPORT_SYMBOL(flush_tlb_one);
diff --git a/arch/loongarch/kernel/switch.S b/arch/loongarch/kernel/switch.S
new file mode 100644
index 000000000000..53e2fa8e580e
--- /dev/null
+++ b/arch/loongarch/kernel/switch.S
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/asm-offsets.h>
+#include <asm/loongarch.h>
+#include <asm/regdef.h>
+#include <asm/stackframe.h>
+#include <asm/thread_info.h>
+
+/*
+ * task_struct *__switch_to(task_struct *prev, task_struct *next,
+ * struct thread_info *next_ti)
+ */
+ .align 5
+SYM_FUNC_START(__switch_to)
+ csrrd t1, LOONGARCH_CSR_PRMD
+ stptr.d t1, a0, THREAD_CSRPRMD
+
+ cpu_save_nonscratch a0
+ stptr.d ra, a0, THREAD_REG01
+ move tp, a2
+ cpu_restore_nonscratch a1
+
+ li.w t0, _THREAD_SIZE - 32
+ PTR_ADD t0, t0, tp
+ set_saved_sp t0, t1, t2
+
+ ldptr.d t1, a1, THREAD_CSRPRMD
+ csrwr t1, LOONGARCH_CSR_PRMD
+
+ jr ra
+SYM_FUNC_END(__switch_to)
diff --git a/arch/loongarch/kernel/syscall.c b/arch/loongarch/kernel/syscall.c
new file mode 100644
index 000000000000..3fc4211db989
--- /dev/null
+++ b/arch/loongarch/kernel/syscall.c
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Author: Hanlu Li <lihanlu@loongson.cn>
+ * Huacai Chen <chenhuacai@loongson.cn>
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/capability.h>
+#include <linux/entry-common.h>
+#include <linux/errno.h>
+#include <linux/linkage.h>
+#include <linux/syscalls.h>
+#include <linux/unistd.h>
+
+#include <asm/asm.h>
+#include <asm/signal.h>
+#include <asm/switch_to.h>
+#include <asm-generic/syscalls.h>
+
+#undef __SYSCALL
+#define __SYSCALL(nr, call) [nr] = (call),
+
+SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len, unsigned long,
+ prot, unsigned long, flags, unsigned long, fd, off_t, offset)
+{
+ if (offset & ~PAGE_MASK)
+ return -EINVAL;
+
+ return ksys_mmap_pgoff(addr, len, prot, flags, fd, offset >> PAGE_SHIFT);
+}
+
+void *sys_call_table[__NR_syscalls] = {
+ [0 ... __NR_syscalls - 1] = sys_ni_syscall,
+#include <asm/unistd.h>
+};
+
+typedef long (*sys_call_fn)(unsigned long, unsigned long,
+ unsigned long, unsigned long, unsigned long, unsigned long);
+
+void noinstr do_syscall(struct pt_regs *regs)
+{
+ unsigned long nr;
+ sys_call_fn syscall_fn;
+
+ nr = regs->regs[11];
+ /* Set for syscall restarting */
+ if (nr < NR_syscalls)
+ regs->regs[0] = nr + 1;
+
+ regs->csr_era += 4;
+ regs->orig_a0 = regs->regs[4];
+ regs->regs[4] = -ENOSYS;
+
+ nr = syscall_enter_from_user_mode(regs, nr);
+
+ if (nr < NR_syscalls) {
+ syscall_fn = sys_call_table[nr];
+ regs->regs[4] = syscall_fn(regs->orig_a0, regs->regs[5], regs->regs[6],
+ regs->regs[7], regs->regs[8], regs->regs[9]);
+ }
+
+ syscall_exit_to_user_mode(regs);
+}
diff --git a/arch/loongarch/kernel/time.c b/arch/loongarch/kernel/time.c
new file mode 100644
index 000000000000..fe6823875895
--- /dev/null
+++ b/arch/loongarch/kernel/time.c
@@ -0,0 +1,214 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Common time service routines for LoongArch machines.
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/clockchips.h>
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/sched_clock.h>
+#include <linux/spinlock.h>
+
+#include <asm/cpu-features.h>
+#include <asm/loongarch.h>
+#include <asm/time.h>
+
+u64 cpu_clock_freq;
+EXPORT_SYMBOL(cpu_clock_freq);
+u64 const_clock_freq;
+EXPORT_SYMBOL(const_clock_freq);
+
+static DEFINE_RAW_SPINLOCK(state_lock);
+static DEFINE_PER_CPU(struct clock_event_device, constant_clockevent_device);
+
+static void constant_event_handler(struct clock_event_device *dev)
+{
+}
+
+irqreturn_t constant_timer_interrupt(int irq, void *data)
+{
+ int cpu = smp_processor_id();
+ struct clock_event_device *cd;
+
+ /* Clear Timer Interrupt */
+ write_csr_tintclear(CSR_TINTCLR_TI);
+ cd = &per_cpu(constant_clockevent_device, cpu);
+ cd->event_handler(cd);
+
+ return IRQ_HANDLED;
+}
+
+static int constant_set_state_oneshot(struct clock_event_device *evt)
+{
+ unsigned long timer_config;
+
+ raw_spin_lock(&state_lock);
+
+ timer_config = csr_read64(LOONGARCH_CSR_TCFG);
+ timer_config |= CSR_TCFG_EN;
+ timer_config &= ~CSR_TCFG_PERIOD;
+ csr_write64(timer_config, LOONGARCH_CSR_TCFG);
+
+ raw_spin_unlock(&state_lock);
+
+ return 0;
+}
+
+static int constant_set_state_oneshot_stopped(struct clock_event_device *evt)
+{
+ unsigned long timer_config;
+
+ raw_spin_lock(&state_lock);
+
+ timer_config = csr_read64(LOONGARCH_CSR_TCFG);
+ timer_config &= ~CSR_TCFG_EN;
+ csr_write64(timer_config, LOONGARCH_CSR_TCFG);
+
+ raw_spin_unlock(&state_lock);
+
+ return 0;
+}
+
+static int constant_set_state_periodic(struct clock_event_device *evt)
+{
+ unsigned long period;
+ unsigned long timer_config;
+
+ raw_spin_lock(&state_lock);
+
+ period = const_clock_freq / HZ;
+ timer_config = period & CSR_TCFG_VAL;
+ timer_config |= (CSR_TCFG_PERIOD | CSR_TCFG_EN);
+ csr_write64(timer_config, LOONGARCH_CSR_TCFG);
+
+ raw_spin_unlock(&state_lock);
+
+ return 0;
+}
+
+static int constant_set_state_shutdown(struct clock_event_device *evt)
+{
+ return 0;
+}
+
+static int constant_timer_next_event(unsigned long delta, struct clock_event_device *evt)
+{
+ unsigned long timer_config;
+
+ delta &= CSR_TCFG_VAL;
+ timer_config = delta | CSR_TCFG_EN;
+ csr_write64(timer_config, LOONGARCH_CSR_TCFG);
+
+ return 0;
+}
+
+static unsigned long __init get_loops_per_jiffy(void)
+{
+ unsigned long lpj = (unsigned long)const_clock_freq;
+
+ do_div(lpj, HZ);
+
+ return lpj;
+}
+
+static long init_timeval;
+
+void sync_counter(void)
+{
+ /* Ensure counter begin at 0 */
+ csr_write64(-init_timeval, LOONGARCH_CSR_CNTC);
+}
+
+int constant_clockevent_init(void)
+{
+ unsigned int irq;
+ unsigned int cpu = smp_processor_id();
+ unsigned long min_delta = 0x600;
+ unsigned long max_delta = (1UL << 48) - 1;
+ struct clock_event_device *cd;
+ static int timer_irq_installed = 0;
+
+ irq = EXCCODE_TIMER - EXCCODE_INT_START;
+
+ cd = &per_cpu(constant_clockevent_device, cpu);
+
+ cd->name = "Constant";
+ cd->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_PERCPU;
+
+ cd->irq = irq;
+ cd->rating = 320;
+ cd->cpumask = cpumask_of(cpu);
+ cd->set_state_oneshot = constant_set_state_oneshot;
+ cd->set_state_oneshot_stopped = constant_set_state_oneshot_stopped;
+ cd->set_state_periodic = constant_set_state_periodic;
+ cd->set_state_shutdown = constant_set_state_shutdown;
+ cd->set_next_event = constant_timer_next_event;
+ cd->event_handler = constant_event_handler;
+
+ clockevents_config_and_register(cd, const_clock_freq, min_delta, max_delta);
+
+ if (timer_irq_installed)
+ return 0;
+
+ timer_irq_installed = 1;
+
+ sync_counter();
+
+ if (request_irq(irq, constant_timer_interrupt, IRQF_PERCPU | IRQF_TIMER, "timer", NULL))
+ pr_err("Failed to request irq %d (timer)\n", irq);
+
+ lpj_fine = get_loops_per_jiffy();
+ pr_info("Constant clock event device register\n");
+
+ return 0;
+}
+
+static u64 read_const_counter(struct clocksource *clk)
+{
+ return drdtime();
+}
+
+static u64 native_sched_clock(void)
+{
+ return read_const_counter(NULL);
+}
+
+static struct clocksource clocksource_const = {
+ .name = "Constant",
+ .rating = 400,
+ .read = read_const_counter,
+ .mask = CLOCKSOURCE_MASK(64),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+ .vdso_clock_mode = VDSO_CLOCKMODE_CPU,
+};
+
+int __init constant_clocksource_init(void)
+{
+ int res;
+ unsigned long freq = const_clock_freq;
+
+ res = clocksource_register_hz(&clocksource_const, freq);
+
+ sched_clock_register(native_sched_clock, 64, freq);
+
+ pr_info("Constant clock source device register\n");
+
+ return res;
+}
+
+void __init time_init(void)
+{
+ if (!cpu_has_cpucfg)
+ const_clock_freq = cpu_clock_freq;
+ else
+ const_clock_freq = calc_const_freq();
+
+ init_timeval = drdtime() - csr_read64(LOONGARCH_CSR_CNTC);
+
+ constant_clockevent_init();
+ constant_clocksource_init();
+}
diff --git a/arch/loongarch/kernel/topology.c b/arch/loongarch/kernel/topology.c
new file mode 100644
index 000000000000..ab1a75c0b5a6
--- /dev/null
+++ b/arch/loongarch/kernel/topology.c
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/init.h>
+#include <linux/node.h>
+#include <linux/nodemask.h>
+#include <linux/percpu.h>
+
+static DEFINE_PER_CPU(struct cpu, cpu_devices);
+
+#ifdef CONFIG_HOTPLUG_CPU
+int arch_register_cpu(int cpu)
+{
+ int ret;
+ struct cpu *c = &per_cpu(cpu_devices, cpu);
+
+ c->hotpluggable = 1;
+ ret = register_cpu(c, cpu);
+ if (ret < 0)
+ pr_warn("register_cpu %d failed (%d)\n", cpu, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL(arch_register_cpu);
+
+void arch_unregister_cpu(int cpu)
+{
+ struct cpu *c = &per_cpu(cpu_devices, cpu);
+
+ c->hotpluggable = 0;
+ unregister_cpu(c);
+}
+EXPORT_SYMBOL(arch_unregister_cpu);
+#endif
+
+static int __init topology_init(void)
+{
+ int i, ret;
+
+ for_each_present_cpu(i) {
+ struct cpu *c = &per_cpu(cpu_devices, i);
+
+ c->hotpluggable = !!i;
+ ret = register_cpu(c, i);
+ if (ret < 0)
+ pr_warn("topology_init: register_cpu %d failed (%d)\n", i, ret);
+ }
+
+ return 0;
+}
+
+subsys_initcall(topology_init);
diff --git a/arch/loongarch/kernel/traps.c b/arch/loongarch/kernel/traps.c
new file mode 100644
index 000000000000..e4060f84a221
--- /dev/null
+++ b/arch/loongarch/kernel/traps.c
@@ -0,0 +1,725 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Author: Huacai Chen <chenhuacai@loongson.cn>
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/bitops.h>
+#include <linux/bug.h>
+#include <linux/compiler.h>
+#include <linux/context_tracking.h>
+#include <linux/entry-common.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/extable.h>
+#include <linux/mm.h>
+#include <linux/sched/mm.h>
+#include <linux/sched/debug.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
+#include <linux/kallsyms.h>
+#include <linux/memblock.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/kgdb.h>
+#include <linux/kdebug.h>
+#include <linux/kprobes.h>
+#include <linux/notifier.h>
+#include <linux/irq.h>
+#include <linux/perf_event.h>
+
+#include <asm/addrspace.h>
+#include <asm/bootinfo.h>
+#include <asm/branch.h>
+#include <asm/break.h>
+#include <asm/cpu.h>
+#include <asm/fpu.h>
+#include <asm/loongarch.h>
+#include <asm/mmu_context.h>
+#include <asm/pgtable.h>
+#include <asm/ptrace.h>
+#include <asm/sections.h>
+#include <asm/siginfo.h>
+#include <asm/stacktrace.h>
+#include <asm/tlb.h>
+#include <asm/types.h>
+
+#include "access-helper.h"
+
+extern asmlinkage void handle_ade(void);
+extern asmlinkage void handle_ale(void);
+extern asmlinkage void handle_sys(void);
+extern asmlinkage void handle_bp(void);
+extern asmlinkage void handle_ri(void);
+extern asmlinkage void handle_fpu(void);
+extern asmlinkage void handle_fpe(void);
+extern asmlinkage void handle_lbt(void);
+extern asmlinkage void handle_lsx(void);
+extern asmlinkage void handle_lasx(void);
+extern asmlinkage void handle_reserved(void);
+extern asmlinkage void handle_watch(void);
+extern asmlinkage void handle_vint(void);
+
+static void show_backtrace(struct task_struct *task, const struct pt_regs *regs,
+ const char *loglvl, bool user)
+{
+ unsigned long addr;
+ unsigned long *sp = (unsigned long *)(regs->regs[3] & ~3);
+
+ printk("%sCall Trace:", loglvl);
+#ifdef CONFIG_KALLSYMS
+ printk("%s\n", loglvl);
+#endif
+ while (!kstack_end(sp)) {
+ if (__get_addr(&addr, sp++, user)) {
+ printk("%s (Bad stack address)", loglvl);
+ break;
+ }
+ if (__kernel_text_address(addr))
+ print_ip_sym(loglvl, addr);
+ }
+ printk("%s\n", loglvl);
+}
+
+static void show_stacktrace(struct task_struct *task,
+ const struct pt_regs *regs, const char *loglvl, bool user)
+{
+ int i;
+ const int field = 2 * sizeof(unsigned long);
+ unsigned long stackdata;
+ unsigned long *sp = (unsigned long *)regs->regs[3];
+
+ printk("%sStack :", loglvl);
+ i = 0;
+ while ((unsigned long) sp & (PAGE_SIZE - 1)) {
+ if (i && ((i % (64 / field)) == 0)) {
+ pr_cont("\n");
+ printk("%s ", loglvl);
+ }
+ if (i > 39) {
+ pr_cont(" ...");
+ break;
+ }
+
+ if (__get_addr(&stackdata, sp++, user)) {
+ pr_cont(" (Bad stack address)");
+ break;
+ }
+
+ pr_cont(" %0*lx", field, stackdata);
+ i++;
+ }
+ pr_cont("\n");
+ show_backtrace(task, regs, loglvl, user);
+}
+
+void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
+{
+ struct pt_regs regs;
+
+ regs.csr_crmd = 0;
+ if (sp) {
+ regs.csr_era = 0;
+ regs.regs[1] = 0;
+ regs.regs[3] = (unsigned long)sp;
+ } else {
+ if (!task || task == current)
+ prepare_frametrace(&regs);
+ else {
+ regs.csr_era = task->thread.reg01;
+ regs.regs[1] = 0;
+ regs.regs[3] = task->thread.reg03;
+ regs.regs[22] = task->thread.reg22;
+ }
+ }
+
+ show_stacktrace(task, &regs, loglvl, false);
+}
+
+static void show_code(unsigned int *pc, bool user)
+{
+ long i;
+ unsigned int insn;
+
+ printk("Code:");
+
+ for(i = -3 ; i < 6 ; i++) {
+ if (__get_inst(&insn, pc + i, user)) {
+ pr_cont(" (Bad address in era)\n");
+ break;
+ }
+ pr_cont("%c%08x%c", (i?' ':'<'), insn, (i?' ':'>'));
+ }
+ pr_cont("\n");
+}
+
+static void __show_regs(const struct pt_regs *regs)
+{
+ const int field = 2 * sizeof(unsigned long);
+ unsigned int excsubcode;
+ unsigned int exccode;
+ int i;
+
+ show_regs_print_info(KERN_DEFAULT);
+
+ /*
+ * Saved main processor registers
+ */
+ for (i = 0; i < 32; ) {
+ if ((i % 4) == 0)
+ printk("$%2d :", i);
+ pr_cont(" %0*lx", field, regs->regs[i]);
+
+ i++;
+ if ((i % 4) == 0)
+ pr_cont("\n");
+ }
+
+ /*
+ * Saved csr registers
+ */
+ printk("era : %0*lx %pS\n", field, regs->csr_era,
+ (void *) regs->csr_era);
+ printk("ra : %0*lx %pS\n", field, regs->regs[1],
+ (void *) regs->regs[1]);
+
+ printk("CSR crmd: %08lx ", regs->csr_crmd);
+ printk("CSR prmd: %08lx ", regs->csr_prmd);
+ printk("CSR euen: %08lx ", regs->csr_euen);
+ printk("CSR ecfg: %08lx ", regs->csr_ecfg);
+ printk("CSR estat: %08lx ", regs->csr_estat);
+
+ pr_cont("\n");
+
+ exccode = ((regs->csr_estat) & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT;
+ excsubcode = ((regs->csr_estat) & CSR_ESTAT_ESUBCODE) >> CSR_ESTAT_ESUBCODE_SHIFT;
+ printk("ExcCode : %x (SubCode %x)\n", exccode, excsubcode);
+
+ if (exccode >= EXCCODE_TLBL && exccode <= EXCCODE_ALE)
+ printk("BadVA : %0*lx\n", field, regs->csr_badvaddr);
+
+ printk("PrId : %08x (%s)\n", read_cpucfg(LOONGARCH_CPUCFG0),
+ cpu_family_string());
+}
+
+void show_regs(struct pt_regs *regs)
+{
+ __show_regs((struct pt_regs *)regs);
+ dump_stack();
+}
+
+void show_registers(struct pt_regs *regs)
+{
+ __show_regs(regs);
+ print_modules();
+ printk("Process %s (pid: %d, threadinfo=%p, task=%p)\n",
+ current->comm, current->pid, current_thread_info(), current);
+
+ show_stacktrace(current, regs, KERN_DEFAULT, user_mode(regs));
+ show_code((void *)regs->csr_era, user_mode(regs));
+ printk("\n");
+}
+
+static DEFINE_RAW_SPINLOCK(die_lock);
+
+void __noreturn die(const char *str, struct pt_regs *regs)
+{
+ static int die_counter;
+ int sig = SIGSEGV;
+
+ oops_enter();
+
+ if (notify_die(DIE_OOPS, str, regs, 0, current->thread.trap_nr,
+ SIGSEGV) == NOTIFY_STOP)
+ sig = 0;
+
+ console_verbose();
+ raw_spin_lock_irq(&die_lock);
+ bust_spinlocks(1);
+
+ printk("%s[#%d]:\n", str, ++die_counter);
+ show_registers(regs);
+ add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
+ raw_spin_unlock_irq(&die_lock);
+
+ oops_exit();
+
+ if (in_interrupt())
+ panic("Fatal exception in interrupt");
+
+ if (panic_on_oops)
+ panic("Fatal exception");
+
+ make_task_dead(sig);
+}
+
+static inline void setup_vint_size(unsigned int size)
+{
+ unsigned int vs;
+
+ vs = ilog2(size/4);
+
+ if (vs == 0 || vs > 7)
+ panic("vint_size %d Not support yet", vs);
+
+ csr_xchg32(vs<<CSR_ECFG_VS_SHIFT, CSR_ECFG_VS, LOONGARCH_CSR_ECFG);
+}
+
+/*
+ * Send SIGFPE according to FCSR Cause bits, which must have already
+ * been masked against Enable bits. This is impotant as Inexact can
+ * happen together with Overflow or Underflow, and `ptrace' can set
+ * any bits.
+ */
+void force_fcsr_sig(unsigned long fcsr, void __user *fault_addr,
+ struct task_struct *tsk)
+{
+ int si_code = FPE_FLTUNK;
+
+ if (fcsr & FPU_CSR_INV_X)
+ si_code = FPE_FLTINV;
+ else if (fcsr & FPU_CSR_DIV_X)
+ si_code = FPE_FLTDIV;
+ else if (fcsr & FPU_CSR_OVF_X)
+ si_code = FPE_FLTOVF;
+ else if (fcsr & FPU_CSR_UDF_X)
+ si_code = FPE_FLTUND;
+ else if (fcsr & FPU_CSR_INE_X)
+ si_code = FPE_FLTRES;
+
+ force_sig_fault(SIGFPE, si_code, fault_addr);
+}
+
+int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcsr)
+{
+ int si_code;
+
+ switch (sig) {
+ case 0:
+ return 0;
+
+ case SIGFPE:
+ force_fcsr_sig(fcsr, fault_addr, current);
+ return 1;
+
+ case SIGBUS:
+ force_sig_fault(SIGBUS, BUS_ADRERR, fault_addr);
+ return 1;
+
+ case SIGSEGV:
+ mmap_read_lock(current->mm);
+ if (vma_lookup(current->mm, (unsigned long)fault_addr))
+ si_code = SEGV_ACCERR;
+ else
+ si_code = SEGV_MAPERR;
+ mmap_read_unlock(current->mm);
+ force_sig_fault(SIGSEGV, si_code, fault_addr);
+ return 1;
+
+ default:
+ force_sig(sig);
+ return 1;
+ }
+}
+
+/*
+ * Delayed fp exceptions when doing a lazy ctx switch
+ */
+asmlinkage void noinstr do_fpe(struct pt_regs *regs, unsigned long fcsr)
+{
+ int sig;
+ void __user *fault_addr;
+ irqentry_state_t state = irqentry_enter(regs);
+
+ if (notify_die(DIE_FP, "FP exception", regs, 0, current->thread.trap_nr,
+ SIGFPE) == NOTIFY_STOP)
+ goto out;
+
+ /* Clear FCSR.Cause before enabling interrupts */
+ write_fcsr(LOONGARCH_FCSR0, fcsr & ~mask_fcsr_x(fcsr));
+ local_irq_enable();
+
+ die_if_kernel("FP exception in kernel code", regs);
+
+ sig = SIGFPE;
+ fault_addr = (void __user *) regs->csr_era;
+
+ /* Send a signal if required. */
+ process_fpemu_return(sig, fault_addr, fcsr);
+
+out:
+ local_irq_disable();
+ irqentry_exit(regs, state);
+}
+
+asmlinkage void noinstr do_ade(struct pt_regs *regs)
+{
+ irqentry_state_t state = irqentry_enter(regs);
+
+ die_if_kernel("Kernel ade access", regs);
+ force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)regs->csr_badvaddr);
+
+ irqentry_exit(regs, state);
+}
+
+asmlinkage void noinstr do_ale(struct pt_regs *regs)
+{
+ irqentry_state_t state = irqentry_enter(regs);
+
+ die_if_kernel("Kernel ale access", regs);
+ force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr);
+
+ irqentry_exit(regs, state);
+}
+
+asmlinkage void noinstr do_bp(struct pt_regs *regs)
+{
+ bool user = user_mode(regs);
+ unsigned int opcode, bcode;
+ unsigned long era = exception_era(regs);
+ irqentry_state_t state = irqentry_enter(regs);
+
+ local_irq_enable();
+ current->thread.trap_nr = read_csr_excode();
+ if (__get_inst(&opcode, (u32 *)era, user))
+ goto out_sigsegv;
+
+ bcode = (opcode & 0x7fff);
+
+ /*
+ * notify the kprobe handlers, if instruction is likely to
+ * pertain to them.
+ */
+ switch (bcode) {
+ case BRK_KPROBE_BP:
+ if (notify_die(DIE_BREAK, "Kprobe", regs, bcode,
+ current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
+ goto out;
+ else
+ break;
+ case BRK_KPROBE_SSTEPBP:
+ if (notify_die(DIE_SSTEPBP, "Kprobe_SingleStep", regs, bcode,
+ current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
+ goto out;
+ else
+ break;
+ case BRK_UPROBE_BP:
+ if (notify_die(DIE_UPROBE, "Uprobe", regs, bcode,
+ current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
+ goto out;
+ else
+ break;
+ case BRK_UPROBE_XOLBP:
+ if (notify_die(DIE_UPROBE_XOL, "Uprobe_XOL", regs, bcode,
+ current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
+ goto out;
+ else
+ break;
+ default:
+ if (notify_die(DIE_TRAP, "Break", regs, bcode,
+ current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
+ goto out;
+ else
+ break;
+ }
+
+ switch (bcode) {
+ case BRK_BUG:
+ die_if_kernel("Kernel bug detected", regs);
+ force_sig(SIGTRAP);
+ break;
+ case BRK_DIVZERO:
+ die_if_kernel("Break instruction in kernel code", regs);
+ force_sig_fault(SIGFPE, FPE_INTDIV, (void __user *)regs->csr_era);
+ break;
+ case BRK_OVERFLOW:
+ die_if_kernel("Break instruction in kernel code", regs);
+ force_sig_fault(SIGFPE, FPE_INTOVF, (void __user *)regs->csr_era);
+ break;
+ default:
+ die_if_kernel("Break instruction in kernel code", regs);
+ force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->csr_era);
+ break;
+ }
+
+out:
+ local_irq_disable();
+ irqentry_exit(regs, state);
+ return;
+
+out_sigsegv:
+ force_sig(SIGSEGV);
+ goto out;
+}
+
+asmlinkage void noinstr do_watch(struct pt_regs *regs)
+{
+ pr_warn("Hardware watch point handler not implemented!\n");
+}
+
+asmlinkage void noinstr do_ri(struct pt_regs *regs)
+{
+ int status = -1;
+ unsigned int opcode = 0;
+ unsigned int __user *era = (unsigned int __user *)exception_era(regs);
+ unsigned long old_era = regs->csr_era;
+ unsigned long old_ra = regs->regs[1];
+ irqentry_state_t state = irqentry_enter(regs);
+
+ local_irq_enable();
+ current->thread.trap_nr = read_csr_excode();
+
+ if (notify_die(DIE_RI, "RI Fault", regs, 0, current->thread.trap_nr,
+ SIGILL) == NOTIFY_STOP)
+ goto out;
+
+ die_if_kernel("Reserved instruction in kernel code", regs);
+
+ if (unlikely(compute_return_era(regs) < 0))
+ goto out;
+
+ if (unlikely(get_user(opcode, era) < 0)) {
+ status = SIGSEGV;
+ current->thread.error_code = 1;
+ }
+
+ if (status < 0)
+ status = SIGILL;
+
+ if (unlikely(status > 0)) {
+ regs->csr_era = old_era; /* Undo skip-over. */
+ regs->regs[1] = old_ra;
+ force_sig(status);
+ }
+
+out:
+ local_irq_disable();
+ irqentry_exit(regs, state);
+}
+
+static void init_restore_fp(void)
+{
+ if (!used_math()) {
+ /* First time FP context user. */
+ init_fpu();
+ } else {
+ /* This task has formerly used the FP context */
+ if (!is_fpu_owner())
+ own_fpu_inatomic(1);
+ }
+
+ BUG_ON(!is_fp_enabled());
+}
+
+asmlinkage void noinstr do_fpu(struct pt_regs *regs)
+{
+ irqentry_state_t state = irqentry_enter(regs);
+
+ local_irq_enable();
+ die_if_kernel("do_fpu invoked from kernel context!", regs);
+
+ preempt_disable();
+ init_restore_fp();
+ preempt_enable();
+
+ local_irq_disable();
+ irqentry_exit(regs, state);
+}
+
+asmlinkage void noinstr do_lsx(struct pt_regs *regs)
+{
+ irqentry_state_t state = irqentry_enter(regs);
+
+ local_irq_enable();
+ force_sig(SIGILL);
+ local_irq_disable();
+
+ irqentry_exit(regs, state);
+}
+
+asmlinkage void noinstr do_lasx(struct pt_regs *regs)
+{
+ irqentry_state_t state = irqentry_enter(regs);
+
+ local_irq_enable();
+ force_sig(SIGILL);
+ local_irq_disable();
+
+ irqentry_exit(regs, state);
+}
+
+asmlinkage void noinstr do_lbt(struct pt_regs *regs)
+{
+ irqentry_state_t state = irqentry_enter(regs);
+
+ local_irq_enable();
+ force_sig(SIGILL);
+ local_irq_disable();
+
+ irqentry_exit(regs, state);
+}
+
+asmlinkage void noinstr do_reserved(struct pt_regs *regs)
+{
+ irqentry_state_t state = irqentry_enter(regs);
+
+ local_irq_enable();
+ /*
+ * Game over - no way to handle this if it ever occurs. Most probably
+ * caused by a fatal error after another hardware/software error.
+ */
+ pr_err("Caught reserved exception %u on pid:%d [%s] - should not happen\n",
+ read_csr_excode(), current->pid, current->comm);
+ die_if_kernel("do_reserved exception", regs);
+ force_sig(SIGUNUSED);
+
+ local_irq_disable();
+
+ irqentry_exit(regs, state);
+}
+
+asmlinkage void cache_parity_error(void)
+{
+ /* For the moment, report the problem and hang. */
+ pr_err("Cache error exception:\n");
+ pr_err("csr_merrctl == %08x\n", csr_read32(LOONGARCH_CSR_MERRCTL));
+ pr_err("csr_merrera == %016llx\n", csr_read64(LOONGARCH_CSR_MERRERA));
+ panic("Can't handle the cache error!");
+}
+
+asmlinkage void noinstr handle_loongarch_irq(struct pt_regs *regs)
+{
+ struct pt_regs *old_regs;
+
+ irq_enter_rcu();
+ old_regs = set_irq_regs(regs);
+ handle_arch_irq(regs);
+ set_irq_regs(old_regs);
+ irq_exit_rcu();
+}
+
+asmlinkage void noinstr do_vint(struct pt_regs *regs, unsigned long sp)
+{
+ register int cpu;
+ register unsigned long stack;
+ irqentry_state_t state = irqentry_enter(regs);
+
+ cpu = smp_processor_id();
+
+ if (on_irq_stack(cpu, sp))
+ handle_loongarch_irq(regs);
+ else {
+ stack = per_cpu(irq_stack, cpu) + IRQ_STACK_START;
+
+ /* Save task's sp on IRQ stack for unwinding */
+ *(unsigned long *)stack = sp;
+
+ __asm__ __volatile__(
+ "move $s0, $sp \n" /* Preserve sp */
+ "move $sp, %[stk] \n" /* Switch stack */
+ "move $a0, %[regs] \n"
+ "bl handle_loongarch_irq \n"
+ "move $sp, $s0 \n" /* Restore sp */
+ : /* No outputs */
+ : [stk] "r" (stack), [regs] "r" (regs)
+ : "$a0", "$a1", "$a2", "$a3", "$a4", "$a5", "$a6", "$a7", "$s0",
+ "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7", "$t8",
+ "memory");
+ }
+
+ irqentry_exit(regs, state);
+}
+
+extern void tlb_init(int cpu);
+extern void cache_error_setup(void);
+
+unsigned long eentry;
+unsigned long tlbrentry;
+
+long exception_handlers[VECSIZE * 128 / sizeof(long)] __aligned(SZ_64K);
+
+static void configure_exception_vector(void)
+{
+ eentry = (unsigned long)exception_handlers;
+ tlbrentry = (unsigned long)exception_handlers + 80*VECSIZE;
+
+ csr_write64(eentry, LOONGARCH_CSR_EENTRY);
+ csr_write64(eentry, LOONGARCH_CSR_MERRENTRY);
+ csr_write64(tlbrentry, LOONGARCH_CSR_TLBRENTRY);
+}
+
+void per_cpu_trap_init(int cpu)
+{
+ unsigned int i;
+
+ setup_vint_size(VECSIZE);
+
+ configure_exception_vector();
+
+ if (!cpu_data[cpu].asid_cache)
+ cpu_data[cpu].asid_cache = asid_first_version(cpu);
+
+ mmgrab(&init_mm);
+ current->active_mm = &init_mm;
+ BUG_ON(current->mm);
+ enter_lazy_tlb(&init_mm, current);
+
+ /* Initialise exception handlers */
+ if (cpu == 0)
+ for (i = 0; i < 64; i++)
+ set_handler(i * VECSIZE, handle_reserved, VECSIZE);
+
+ tlb_init(cpu);
+ cpu_cache_init();
+}
+
+/* Install CPU exception handler */
+void set_handler(unsigned long offset, void *addr, unsigned long size)
+{
+ memcpy((void *)(eentry + offset), addr, size);
+ local_flush_icache_range(eentry + offset, eentry + offset + size);
+}
+
+static const char panic_null_cerr[] =
+ "Trying to set NULL cache error exception handler\n";
+
+/*
+ * Install uncached CPU exception handler.
+ * This is suitable only for the cache error exception which is the only
+ * exception handler that is being run uncached.
+ */
+void set_merr_handler(unsigned long offset, void *addr, unsigned long size)
+{
+ unsigned long uncached_eentry = TO_UNCACHE(__pa(eentry));
+
+ if (!addr)
+ panic(panic_null_cerr);
+
+ memcpy((void *)(uncached_eentry + offset), addr, size);
+}
+
+void __init trap_init(void)
+{
+ long i;
+
+ /* Set interrupt vector handler */
+ for (i = EXCCODE_INT_START; i < EXCCODE_INT_END; i++)
+ set_handler(i * VECSIZE, handle_vint, VECSIZE);
+
+ set_handler(EXCCODE_ADE * VECSIZE, handle_ade, VECSIZE);
+ set_handler(EXCCODE_ALE * VECSIZE, handle_ale, VECSIZE);
+ set_handler(EXCCODE_SYS * VECSIZE, handle_sys, VECSIZE);
+ set_handler(EXCCODE_BP * VECSIZE, handle_bp, VECSIZE);
+ set_handler(EXCCODE_INE * VECSIZE, handle_ri, VECSIZE);
+ set_handler(EXCCODE_IPE * VECSIZE, handle_ri, VECSIZE);
+ set_handler(EXCCODE_FPDIS * VECSIZE, handle_fpu, VECSIZE);
+ set_handler(EXCCODE_LSXDIS * VECSIZE, handle_lsx, VECSIZE);
+ set_handler(EXCCODE_LASXDIS * VECSIZE, handle_lasx, VECSIZE);
+ set_handler(EXCCODE_FPE * VECSIZE, handle_fpe, VECSIZE);
+ set_handler(EXCCODE_BTDIS * VECSIZE, handle_lbt, VECSIZE);
+ set_handler(EXCCODE_WATCH * VECSIZE, handle_watch, VECSIZE);
+
+ cache_error_setup();
+
+ local_flush_icache_range(eentry, eentry + 0x400);
+}
diff --git a/arch/loongarch/kernel/vdso.c b/arch/loongarch/kernel/vdso.c
new file mode 100644
index 000000000000..e20c8ca87473
--- /dev/null
+++ b/arch/loongarch/kernel/vdso.c
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Author: Huacai Chen <chenhuacai@loongson.cn>
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#include <linux/binfmts.h>
+#include <linux/elf.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/random.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/timekeeper_internal.h>
+
+#include <asm/page.h>
+#include <asm/vdso.h>
+#include <vdso/helpers.h>
+#include <vdso/vsyscall.h>
+#include <generated/vdso-offsets.h>
+
+extern char vdso_start[], vdso_end[];
+
+/* Kernel-provided data used by the VDSO. */
+static union loongarch_vdso_data {
+ u8 page[PAGE_SIZE];
+ struct vdso_data data[CS_BASES];
+} loongarch_vdso_data __page_aligned_data;
+struct vdso_data *vdso_data = loongarch_vdso_data.data;
+static struct page *vdso_pages[] = { NULL };
+
+static int vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma)
+{
+ current->mm->context.vdso = (void *)(new_vma->vm_start);
+
+ return 0;
+}
+
+struct loongarch_vdso_info vdso_info = {
+ .vdso = vdso_start,
+ .size = PAGE_SIZE,
+ .code_mapping = {
+ .name = "[vdso]",
+ .pages = vdso_pages,
+ .mremap = vdso_mremap,
+ },
+ .data_mapping = {
+ .name = "[vvar]",
+ },
+ .offset_sigreturn = vdso_offset_sigreturn,
+};
+
+static int __init init_vdso(void)
+{
+ unsigned long i, pfn;
+
+ BUG_ON(!PAGE_ALIGNED(vdso_info.vdso));
+ BUG_ON(!PAGE_ALIGNED(vdso_info.size));
+
+ pfn = __phys_to_pfn(__pa_symbol(vdso_info.vdso));
+ for (i = 0; i < vdso_info.size / PAGE_SIZE; i++)
+ vdso_info.code_mapping.pages[i] = pfn_to_page(pfn + i);
+
+ return 0;
+}
+subsys_initcall(init_vdso);
+
+static unsigned long vdso_base(void)
+{
+ unsigned long base = STACK_TOP;
+
+ if (current->flags & PF_RANDOMIZE) {
+ base += get_random_int() & (VDSO_RANDOMIZE_SIZE - 1);
+ base = PAGE_ALIGN(base);
+ }
+
+ return base;
+}
+
+int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+{
+ int ret;
+ unsigned long vvar_size, size, data_addr, vdso_addr;
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+ struct loongarch_vdso_info *info = current->thread.vdso;
+
+ if (mmap_write_lock_killable(mm))
+ return -EINTR;
+
+ /*
+ * Determine total area size. This includes the VDSO data itself
+ * and the data page.
+ */
+ vvar_size = PAGE_SIZE;
+ size = vvar_size + info->size;
+
+ data_addr = get_unmapped_area(NULL, vdso_base(), size, 0, 0);
+ if (IS_ERR_VALUE(data_addr)) {
+ ret = data_addr;
+ goto out;
+ }
+ vdso_addr = data_addr + PAGE_SIZE;
+
+ vma = _install_special_mapping(mm, data_addr, vvar_size,
+ VM_READ | VM_MAYREAD,
+ &info->data_mapping);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto out;
+ }
+
+ /* Map VDSO data page. */
+ ret = remap_pfn_range(vma, data_addr,
+ virt_to_phys(vdso_data) >> PAGE_SHIFT,
+ PAGE_SIZE, PAGE_READONLY);
+ if (ret)
+ goto out;
+
+ /* Map VDSO code page. */
+ vma = _install_special_mapping(mm, vdso_addr, info->size,
+ VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
+ &info->code_mapping);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto out;
+ }
+
+ mm->context.vdso = (void *)vdso_addr;
+ ret = 0;
+
+out:
+ mmap_write_unlock(mm);
+ return ret;
+}
diff --git a/arch/loongarch/kernel/vmlinux.lds.S b/arch/loongarch/kernel/vmlinux.lds.S
new file mode 100644
index 000000000000..9d508158fe1a
--- /dev/null
+++ b/arch/loongarch/kernel/vmlinux.lds.S
@@ -0,0 +1,120 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/sizes.h>
+#include <asm/asm-offsets.h>
+#include <asm/thread_info.h>
+
+#define PAGE_SIZE _PAGE_SIZE
+
+/*
+ * Put .bss..swapper_pg_dir as the first thing in .bss. This will
+ * ensure that it has .bss alignment (64K).
+ */
+#define BSS_FIRST_SECTIONS *(.bss..swapper_pg_dir)
+
+#include <asm-generic/vmlinux.lds.h>
+
+/*
+ * Max avaliable Page Size is 64K, so we set SectionAlignment
+ * field of EFI application to 64K.
+ */
+PECOFF_FILE_ALIGN = 0x200;
+PECOFF_SEGMENT_ALIGN = 0x10000;
+
+OUTPUT_ARCH(loongarch)
+ENTRY(kernel_entry)
+PHDRS {
+ text PT_LOAD FLAGS(7); /* RWX */
+ note PT_NOTE FLAGS(4); /* R__ */
+}
+
+jiffies = jiffies_64;
+
+SECTIONS
+{
+ . = VMLINUX_LOAD_ADDRESS;
+
+ _text = .;
+ HEAD_TEXT_SECTION
+
+ . = ALIGN(PECOFF_SEGMENT_ALIGN);
+ .text : {
+ TEXT_TEXT
+ SCHED_TEXT
+ CPUIDLE_TEXT
+ LOCK_TEXT
+ KPROBES_TEXT
+ IRQENTRY_TEXT
+ SOFTIRQENTRY_TEXT
+ *(.fixup)
+ *(.gnu.warning)
+ } :text = 0
+ . = ALIGN(PECOFF_SEGMENT_ALIGN);
+ _etext = .;
+
+ EXCEPTION_TABLE(16)
+
+ . = ALIGN(PECOFF_SEGMENT_ALIGN);
+ __init_begin = .;
+ __inittext_begin = .;
+
+ INIT_TEXT_SECTION(PAGE_SIZE)
+ .exit.text : {
+ EXIT_TEXT
+ }
+
+ . = ALIGN(PECOFF_SEGMENT_ALIGN);
+ __inittext_end = .;
+
+ __initdata_begin = .;
+
+ INIT_DATA_SECTION(16)
+ .exit.data : {
+ EXIT_DATA
+ }
+
+#ifdef CONFIG_SMP
+ PERCPU_SECTION(1 << CONFIG_L1_CACHE_SHIFT)
+#endif
+
+ .init.bss : {
+ *(.init.bss)
+ }
+ . = ALIGN(PECOFF_SEGMENT_ALIGN);
+ __initdata_end = .;
+
+ __init_end = .;
+
+ _sdata = .;
+ RO_DATA(4096)
+ RW_DATA(1 << CONFIG_L1_CACHE_SHIFT, PAGE_SIZE, THREAD_SIZE)
+
+ .sdata : {
+ *(.sdata)
+ }
+ .edata_padding : { BYTE(0); . = ALIGN(PECOFF_FILE_ALIGN); }
+ _edata = .;
+
+ BSS_SECTION(0, SZ_64K, 8)
+ . = ALIGN(PECOFF_SEGMENT_ALIGN);
+
+ _end = .;
+
+ STABS_DEBUG
+ DWARF_DEBUG
+
+ .gptab.sdata : {
+ *(.gptab.data)
+ *(.gptab.sdata)
+ }
+ .gptab.sbss : {
+ *(.gptab.bss)
+ *(.gptab.sbss)
+ }
+
+ DISCARDS
+ /DISCARD/ : {
+ *(.gnu.attributes)
+ *(.options)
+ *(.eh_frame)
+ }
+}
diff --git a/arch/loongarch/lib/Makefile b/arch/loongarch/lib/Makefile
new file mode 100644
index 000000000000..e36635fccb69
--- /dev/null
+++ b/arch/loongarch/lib/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for LoongArch-specific library files.
+#
+
+lib-y += delay.o clear_user.o copy_user.o dump_tlb.o
diff --git a/arch/loongarch/lib/clear_user.S b/arch/loongarch/lib/clear_user.S
new file mode 100644
index 000000000000..25d9be5fbb19
--- /dev/null
+++ b/arch/loongarch/lib/clear_user.S
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/export.h>
+#include <asm/regdef.h>
+
+.macro fixup_ex from, to, offset, fix
+.if \fix
+ .section .fixup, "ax"
+\to: addi.d a0, a1, \offset
+ jr ra
+ .previous
+.endif
+ .section __ex_table, "a"
+ PTR \from\()b, \to\()b
+ .previous
+.endm
+
+/*
+ * unsigned long __clear_user(void *addr, size_t size)
+ *
+ * a0: addr
+ * a1: size
+ */
+SYM_FUNC_START(__clear_user)
+ beqz a1, 2f
+
+1: st.b zero, a0, 0
+ addi.d a0, a0, 1
+ addi.d a1, a1, -1
+ bgt a1, zero, 1b
+
+2: move a0, a1
+ jr ra
+
+ fixup_ex 1, 3, 0, 1
+SYM_FUNC_END(__clear_user)
+
+EXPORT_SYMBOL(__clear_user)
diff --git a/arch/loongarch/lib/copy_user.S b/arch/loongarch/lib/copy_user.S
new file mode 100644
index 000000000000..9ae507f851b5
--- /dev/null
+++ b/arch/loongarch/lib/copy_user.S
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/export.h>
+#include <asm/regdef.h>
+
+.macro fixup_ex from, to, offset, fix
+.if \fix
+ .section .fixup, "ax"
+\to: addi.d a0, a2, \offset
+ jr ra
+ .previous
+.endif
+ .section __ex_table, "a"
+ PTR \from\()b, \to\()b
+ .previous
+.endm
+
+/*
+ * unsigned long __copy_user(void *to, const void *from, size_t n)
+ *
+ * a0: to
+ * a1: from
+ * a2: n
+ */
+SYM_FUNC_START(__copy_user)
+ beqz a2, 3f
+
+1: ld.b t0, a1, 0
+2: st.b t0, a0, 0
+ addi.d a0, a0, 1
+ addi.d a1, a1, 1
+ addi.d a2, a2, -1
+ bgt a2, zero, 1b
+
+3: move a0, a2
+ jr ra
+
+ fixup_ex 1, 4, 0, 1
+ fixup_ex 2, 4, 0, 0
+SYM_FUNC_END(__copy_user)
+
+EXPORT_SYMBOL(__copy_user)
diff --git a/arch/loongarch/lib/delay.c b/arch/loongarch/lib/delay.c
new file mode 100644
index 000000000000..5d856694fcfe
--- /dev/null
+++ b/arch/loongarch/lib/delay.c
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/smp.h>
+#include <linux/timex.h>
+
+#include <asm/compiler.h>
+#include <asm/processor.h>
+
+void __delay(unsigned long cycles)
+{
+ u64 t0 = get_cycles();
+
+ while ((unsigned long)(get_cycles() - t0) < cycles)
+ cpu_relax();
+}
+EXPORT_SYMBOL(__delay);
+
+/*
+ * Division by multiplication: you don't have to worry about
+ * loss of precision.
+ *
+ * Use only for very small delays ( < 1 msec). Should probably use a
+ * lookup table, really, as the multiplications take much too long with
+ * short delays. This is a "reasonable" implementation, though (and the
+ * first constant multiplications gets optimized away if the delay is
+ * a constant)
+ */
+
+void __udelay(unsigned long us)
+{
+ __delay((us * 0x000010c7ull * HZ * lpj_fine) >> 32);
+}
+EXPORT_SYMBOL(__udelay);
+
+void __ndelay(unsigned long ns)
+{
+ __delay((ns * 0x00000005ull * HZ * lpj_fine) >> 32);
+}
+EXPORT_SYMBOL(__ndelay);
diff --git a/arch/loongarch/lib/dump_tlb.c b/arch/loongarch/lib/dump_tlb.c
new file mode 100644
index 000000000000..cda2c6bc7f09
--- /dev/null
+++ b/arch/loongarch/lib/dump_tlb.c
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ *
+ * Derived from MIPS:
+ * Copyright (C) 1994, 1995 by Waldorf Electronics, written by Ralf Baechle.
+ * Copyright (C) 1999 by Silicon Graphics, Inc.
+ */
+#include <linux/kernel.h>
+#include <linux/mm.h>
+
+#include <asm/loongarch.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/tlb.h>
+
+void dump_tlb_regs(void)
+{
+ const int field = 2 * sizeof(unsigned long);
+
+ pr_info("Index : %0x\n", read_csr_tlbidx());
+ pr_info("PageSize : %0x\n", read_csr_pagesize());
+ pr_info("EntryHi : %0*llx\n", field, read_csr_entryhi());
+ pr_info("EntryLo0 : %0*llx\n", field, read_csr_entrylo0());
+ pr_info("EntryLo1 : %0*llx\n", field, read_csr_entrylo1());
+}
+
+static void dump_tlb(int first, int last)
+{
+ unsigned long s_entryhi, entryhi, asid;
+ unsigned long long entrylo0, entrylo1, pa;
+ unsigned int index;
+ unsigned int s_index, s_asid;
+ unsigned int pagesize, c0, c1, i;
+ unsigned long asidmask = cpu_asid_mask(&current_cpu_data);
+ int pwidth = 11;
+ int vwidth = 11;
+ int asidwidth = DIV_ROUND_UP(ilog2(asidmask) + 1, 4);
+
+ s_entryhi = read_csr_entryhi();
+ s_index = read_csr_tlbidx();
+ s_asid = read_csr_asid();
+
+ for (i = first; i <= last; i++) {
+ write_csr_index(i);
+ tlb_read();
+ pagesize = read_csr_pagesize();
+ entryhi = read_csr_entryhi();
+ entrylo0 = read_csr_entrylo0();
+ entrylo1 = read_csr_entrylo1();
+ index = read_csr_tlbidx();
+ asid = read_csr_asid();
+
+ /* EHINV bit marks entire entry as invalid */
+ if (index & CSR_TLBIDX_EHINV)
+ continue;
+ /*
+ * ASID takes effect in absence of G (global) bit.
+ */
+ if (!((entrylo0 | entrylo1) & ENTRYLO_G) &&
+ asid != s_asid)
+ continue;
+
+ /*
+ * Only print entries in use
+ */
+ pr_info("Index: %2d pgsize=%x ", i, (1 << pagesize));
+
+ c0 = (entrylo0 & ENTRYLO_C) >> ENTRYLO_C_SHIFT;
+ c1 = (entrylo1 & ENTRYLO_C) >> ENTRYLO_C_SHIFT;
+
+ pr_cont("va=%0*lx asid=%0*lx",
+ vwidth, (entryhi & ~0x1fffUL), asidwidth, asid & asidmask);
+
+ /* NR/NX are in awkward places, so mask them off separately */
+ pa = entrylo0 & ~(ENTRYLO_NR | ENTRYLO_NX);
+ pa = pa & PAGE_MASK;
+ pr_cont("\n\t[");
+ pr_cont("ri=%d xi=%d ",
+ (entrylo0 & ENTRYLO_NR) ? 1 : 0,
+ (entrylo0 & ENTRYLO_NX) ? 1 : 0);
+ pr_cont("pa=%0*llx c=%d d=%d v=%d g=%d plv=%lld] [",
+ pwidth, pa, c0,
+ (entrylo0 & ENTRYLO_D) ? 1 : 0,
+ (entrylo0 & ENTRYLO_V) ? 1 : 0,
+ (entrylo0 & ENTRYLO_G) ? 1 : 0,
+ (entrylo0 & ENTRYLO_PLV) >> ENTRYLO_PLV_SHIFT);
+ /* NR/NX are in awkward places, so mask them off separately */
+ pa = entrylo1 & ~(ENTRYLO_NR | ENTRYLO_NX);
+ pa = pa & PAGE_MASK;
+ pr_cont("ri=%d xi=%d ",
+ (entrylo1 & ENTRYLO_NR) ? 1 : 0,
+ (entrylo1 & ENTRYLO_NX) ? 1 : 0);
+ pr_cont("pa=%0*llx c=%d d=%d v=%d g=%d plv=%lld]\n",
+ pwidth, pa, c1,
+ (entrylo1 & ENTRYLO_D) ? 1 : 0,
+ (entrylo1 & ENTRYLO_V) ? 1 : 0,
+ (entrylo1 & ENTRYLO_G) ? 1 : 0,
+ (entrylo1 & ENTRYLO_PLV) >> ENTRYLO_PLV_SHIFT);
+ }
+ pr_info("\n");
+
+ write_csr_entryhi(s_entryhi);
+ write_csr_tlbidx(s_index);
+ write_csr_asid(s_asid);
+}
+
+void dump_tlb_all(void)
+{
+ dump_tlb(0, current_cpu_data.tlbsize - 1);
+}
diff --git a/arch/loongarch/mm/Makefile b/arch/loongarch/mm/Makefile
new file mode 100644
index 000000000000..8ffc6383f836
--- /dev/null
+++ b/arch/loongarch/mm/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the Linux/LoongArch-specific parts of the memory manager.
+#
+
+obj-y += init.o cache.o tlb.o tlbex.o extable.o \
+ fault.o ioremap.o maccess.o mmap.o pgtable.o page.o
+
+obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
diff --git a/arch/loongarch/mm/cache.c b/arch/loongarch/mm/cache.c
new file mode 100644
index 000000000000..9e5ce5aa73f7
--- /dev/null
+++ b/arch/loongarch/mm/cache.c
@@ -0,0 +1,141 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ *
+ * Derived from MIPS:
+ * Copyright (C) 1994 - 2003, 06, 07 by Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 2007 MIPS Technologies, Inc.
+ */
+#include <linux/export.h>
+#include <linux/fcntl.h>
+#include <linux/fs.h>
+#include <linux/highmem.h>
+#include <linux/kernel.h>
+#include <linux/linkage.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/syscalls.h>
+
+#include <asm/cacheflush.h>
+#include <asm/cpu.h>
+#include <asm/cpu-features.h>
+#include <asm/dma.h>
+#include <asm/loongarch.h>
+#include <asm/processor.h>
+#include <asm/setup.h>
+
+/*
+ * LoongArch maintains ICache/DCache coherency by hardware,
+ * we just need "ibar" to avoid instruction hazard here.
+ */
+void local_flush_icache_range(unsigned long start, unsigned long end)
+{
+ asm volatile ("\tibar 0\n"::);
+}
+EXPORT_SYMBOL(local_flush_icache_range);
+
+void cache_error_setup(void)
+{
+ extern char __weak except_vec_cex;
+ set_merr_handler(0x0, &except_vec_cex, 0x80);
+}
+
+static unsigned long icache_size __read_mostly;
+static unsigned long dcache_size __read_mostly;
+static unsigned long vcache_size __read_mostly;
+static unsigned long scache_size __read_mostly;
+
+static char *way_string[] = { NULL, "direct mapped", "2-way",
+ "3-way", "4-way", "5-way", "6-way", "7-way", "8-way",
+ "9-way", "10-way", "11-way", "12-way",
+ "13-way", "14-way", "15-way", "16-way",
+};
+
+static void probe_pcache(void)
+{
+ struct cpuinfo_loongarch *c = &current_cpu_data;
+ unsigned int lsize, sets, ways;
+ unsigned int config;
+
+ config = read_cpucfg(LOONGARCH_CPUCFG17);
+ lsize = 1 << ((config & CPUCFG17_L1I_SIZE_M) >> CPUCFG17_L1I_SIZE);
+ sets = 1 << ((config & CPUCFG17_L1I_SETS_M) >> CPUCFG17_L1I_SETS);
+ ways = ((config & CPUCFG17_L1I_WAYS_M) >> CPUCFG17_L1I_WAYS) + 1;
+
+ c->icache.linesz = lsize;
+ c->icache.sets = sets;
+ c->icache.ways = ways;
+ icache_size = sets * ways * lsize;
+ c->icache.waysize = icache_size / c->icache.ways;
+
+ config = read_cpucfg(LOONGARCH_CPUCFG18);
+ lsize = 1 << ((config & CPUCFG18_L1D_SIZE_M) >> CPUCFG18_L1D_SIZE);
+ sets = 1 << ((config & CPUCFG18_L1D_SETS_M) >> CPUCFG18_L1D_SETS);
+ ways = ((config & CPUCFG18_L1D_WAYS_M) >> CPUCFG18_L1D_WAYS) + 1;
+
+ c->dcache.linesz = lsize;
+ c->dcache.sets = sets;
+ c->dcache.ways = ways;
+ dcache_size = sets * ways * lsize;
+ c->dcache.waysize = dcache_size / c->dcache.ways;
+
+ c->options |= LOONGARCH_CPU_PREFETCH;
+
+ pr_info("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n",
+ icache_size >> 10, way_string[c->icache.ways], "VIPT", c->icache.linesz);
+
+ pr_info("Primary data cache %ldkB, %s, %s, %s, linesize %d bytes\n",
+ dcache_size >> 10, way_string[c->dcache.ways], "VIPT", "no aliases", c->dcache.linesz);
+}
+
+static void probe_vcache(void)
+{
+ struct cpuinfo_loongarch *c = &current_cpu_data;
+ unsigned int lsize, sets, ways;
+ unsigned int config;
+
+ config = read_cpucfg(LOONGARCH_CPUCFG19);
+ lsize = 1 << ((config & CPUCFG19_L2_SIZE_M) >> CPUCFG19_L2_SIZE);
+ sets = 1 << ((config & CPUCFG19_L2_SETS_M) >> CPUCFG19_L2_SETS);
+ ways = ((config & CPUCFG19_L2_WAYS_M) >> CPUCFG19_L2_WAYS) + 1;
+
+ c->vcache.linesz = lsize;
+ c->vcache.sets = sets;
+ c->vcache.ways = ways;
+ vcache_size = lsize * sets * ways;
+ c->vcache.waysize = vcache_size / c->vcache.ways;
+
+ pr_info("Unified victim cache %ldkB %s, linesize %d bytes.\n",
+ vcache_size >> 10, way_string[c->vcache.ways], c->vcache.linesz);
+}
+
+static void probe_scache(void)
+{
+ struct cpuinfo_loongarch *c = &current_cpu_data;
+ unsigned int lsize, sets, ways;
+ unsigned int config;
+
+ config = read_cpucfg(LOONGARCH_CPUCFG20);
+ lsize = 1 << ((config & CPUCFG20_L3_SIZE_M) >> CPUCFG20_L3_SIZE);
+ sets = 1 << ((config & CPUCFG20_L3_SETS_M) >> CPUCFG20_L3_SETS);
+ ways = ((config & CPUCFG20_L3_WAYS_M) >> CPUCFG20_L3_WAYS) + 1;
+
+ c->scache.linesz = lsize;
+ c->scache.sets = sets;
+ c->scache.ways = ways;
+ /* 4 cores. scaches are shared */
+ scache_size = lsize * sets * ways;
+ c->scache.waysize = scache_size / c->scache.ways;
+
+ pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
+ scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
+}
+
+void cpu_cache_init(void)
+{
+ probe_pcache();
+ probe_vcache();
+ probe_scache();
+
+ shm_align_mask = PAGE_SIZE - 1;
+}
diff --git a/arch/loongarch/mm/extable.c b/arch/loongarch/mm/extable.c
new file mode 100644
index 000000000000..bc20988f2b87
--- /dev/null
+++ b/arch/loongarch/mm/extable.c
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/extable.h>
+#include <linux/spinlock.h>
+#include <asm/branch.h>
+#include <linux/uaccess.h>
+
+int fixup_exception(struct pt_regs *regs)
+{
+ const struct exception_table_entry *fixup;
+
+ fixup = search_exception_tables(exception_era(regs));
+ if (fixup) {
+ regs->csr_era = fixup->fixup;
+
+ return 1;
+ }
+
+ return 0;
+}
diff --git a/arch/loongarch/mm/fault.c b/arch/loongarch/mm/fault.c
new file mode 100644
index 000000000000..605579b19a00
--- /dev/null
+++ b/arch/loongarch/mm/fault.c
@@ -0,0 +1,261 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ *
+ * Derived from MIPS:
+ * Copyright (C) 1995 - 2000 by Ralf Baechle
+ */
+#include <linux/context_tracking.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/entry-common.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/ratelimit.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/kdebug.h>
+#include <linux/kprobes.h>
+#include <linux/perf_event.h>
+#include <linux/uaccess.h>
+
+#include <asm/branch.h>
+#include <asm/mmu_context.h>
+#include <asm/ptrace.h>
+
+int show_unhandled_signals = 1;
+
+static void __kprobes no_context(struct pt_regs *regs, unsigned long address)
+{
+ const int field = sizeof(unsigned long) * 2;
+
+ /* Are we prepared to handle this kernel fault? */
+ if (fixup_exception(regs))
+ return;
+
+ /*
+ * Oops. The kernel tried to access some bad page. We'll have to
+ * terminate things with extreme prejudice.
+ */
+ bust_spinlocks(1);
+
+ pr_alert("CPU %d Unable to handle kernel paging request at "
+ "virtual address %0*lx, era == %0*lx, ra == %0*lx\n",
+ raw_smp_processor_id(), field, address, field, regs->csr_era,
+ field, regs->regs[1]);
+ die("Oops", regs);
+}
+
+static void __kprobes do_out_of_memory(struct pt_regs *regs, unsigned long address)
+{
+ /*
+ * We ran out of memory, call the OOM killer, and return the userspace
+ * (which will retry the fault, or kill us if we got oom-killed).
+ */
+ if (!user_mode(regs)) {
+ no_context(regs, address);
+ return;
+ }
+ pagefault_out_of_memory();
+}
+
+static void __kprobes do_sigbus(struct pt_regs *regs,
+ unsigned long write, unsigned long address, int si_code)
+{
+ /* Kernel mode? Handle exceptions or die */
+ if (!user_mode(regs)) {
+ no_context(regs, address);
+ return;
+ }
+
+ /*
+ * Send a sigbus, regardless of whether we were in kernel
+ * or user mode.
+ */
+ current->thread.csr_badvaddr = address;
+ current->thread.trap_nr = read_csr_excode();
+ force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address);
+}
+
+static void __kprobes do_sigsegv(struct pt_regs *regs,
+ unsigned long write, unsigned long address, int si_code)
+{
+ const int field = sizeof(unsigned long) * 2;
+ static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
+
+ /* Kernel mode? Handle exceptions or die */
+ if (!user_mode(regs)) {
+ no_context(regs, address);
+ return;
+ }
+
+ /* User mode accesses just cause a SIGSEGV */
+ current->thread.csr_badvaddr = address;
+ if (!write)
+ current->thread.error_code = 1;
+ else
+ current->thread.error_code = 2;
+ current->thread.trap_nr = read_csr_excode();
+
+ if (show_unhandled_signals &&
+ unhandled_signal(current, SIGSEGV) && __ratelimit(&ratelimit_state)) {
+ pr_info("do_page_fault(): sending SIGSEGV to %s for invalid %s %0*lx\n",
+ current->comm,
+ write ? "write access to" : "read access from",
+ field, address);
+ pr_info("era = %0*lx in", field,
+ (unsigned long) regs->csr_era);
+ print_vma_addr(KERN_CONT " ", regs->csr_era);
+ pr_cont("\n");
+ pr_info("ra = %0*lx in", field,
+ (unsigned long) regs->regs[1]);
+ print_vma_addr(KERN_CONT " ", regs->regs[1]);
+ pr_cont("\n");
+ }
+ force_sig_fault(SIGSEGV, si_code, (void __user *)address);
+}
+
+/*
+ * This routine handles page faults. It determines the address,
+ * and the problem, and then passes it off to one of the appropriate
+ * routines.
+ */
+static void __kprobes __do_page_fault(struct pt_regs *regs,
+ unsigned long write, unsigned long address)
+{
+ int si_code = SEGV_MAPERR;
+ unsigned int flags = FAULT_FLAG_DEFAULT;
+ struct task_struct *tsk = current;
+ struct mm_struct *mm = tsk->mm;
+ struct vm_area_struct *vma = NULL;
+ vm_fault_t fault;
+
+ /*
+ * We fault-in kernel-space virtual memory on-demand. The
+ * 'reference' page table is init_mm.pgd.
+ *
+ * NOTE! We MUST NOT take any locks for this case. We may
+ * be in an interrupt or a critical region, and should
+ * only copy the information from the master page table,
+ * nothing more.
+ */
+ if (address & __UA_LIMIT) {
+ if (!user_mode(regs))
+ no_context(regs, address);
+ else
+ do_sigsegv(regs, write, address, si_code);
+ return;
+ }
+
+ /*
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+ if (faulthandler_disabled() || !mm) {
+ do_sigsegv(regs, write, address, si_code);
+ return;
+ }
+
+ if (user_mode(regs))
+ flags |= FAULT_FLAG_USER;
+
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
+retry:
+ mmap_read_lock(mm);
+ vma = find_vma(mm, address);
+ if (!vma)
+ goto bad_area;
+ if (vma->vm_start <= address)
+ goto good_area;
+ if (!(vma->vm_flags & VM_GROWSDOWN))
+ goto bad_area;
+ if (!expand_stack(vma, address))
+ goto good_area;
+/*
+ * Something tried to access memory that isn't in our memory map..
+ * Fix it, but check if it's kernel or user first..
+ */
+bad_area:
+ mmap_read_unlock(mm);
+ do_sigsegv(regs, write, address, si_code);
+ return;
+
+/*
+ * Ok, we have a good vm_area for this memory access, so
+ * we can handle it..
+ */
+good_area:
+ si_code = SEGV_ACCERR;
+
+ if (write) {
+ flags |= FAULT_FLAG_WRITE;
+ if (!(vma->vm_flags & VM_WRITE))
+ goto bad_area;
+ } else {
+ if (!(vma->vm_flags & VM_READ) && address != exception_era(regs))
+ goto bad_area;
+ if (!(vma->vm_flags & VM_EXEC) && address == exception_era(regs))
+ goto bad_area;
+ }
+
+ /*
+ * If for any reason at all we couldn't handle the fault,
+ * make sure we exit gracefully rather than endlessly redo
+ * the fault.
+ */
+ fault = handle_mm_fault(vma, address, flags, regs);
+
+ if (fault_signal_pending(fault, regs)) {
+ if (!user_mode(regs))
+ no_context(regs, address);
+ return;
+ }
+
+ if (unlikely(fault & VM_FAULT_RETRY)) {
+ flags |= FAULT_FLAG_TRIED;
+
+ /*
+ * No need to mmap_read_unlock(mm) as we would
+ * have already released it in __lock_page_or_retry
+ * in mm/filemap.c.
+ */
+ goto retry;
+ }
+ if (unlikely(fault & VM_FAULT_ERROR)) {
+ mmap_read_unlock(mm);
+ if (fault & VM_FAULT_OOM) {
+ do_out_of_memory(regs, address);
+ return;
+ } else if (fault & VM_FAULT_SIGSEGV) {
+ do_sigsegv(regs, write, address, si_code);
+ return;
+ } else if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
+ do_sigbus(regs, write, address, si_code);
+ return;
+ }
+ BUG();
+ }
+
+ mmap_read_unlock(mm);
+}
+
+asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
+ unsigned long write, unsigned long address)
+{
+ irqentry_state_t state = irqentry_enter(regs);
+
+ /* Enable interrupt if enabled in parent context */
+ if (likely(regs->csr_prmd & CSR_PRMD_PIE))
+ local_irq_enable();
+
+ __do_page_fault(regs, write, address);
+
+ local_irq_disable();
+
+ irqentry_exit(regs, state);
+}
diff --git a/arch/loongarch/mm/hugetlbpage.c b/arch/loongarch/mm/hugetlbpage.c
new file mode 100644
index 000000000000..ba138117b124
--- /dev/null
+++ b/arch/loongarch/mm/hugetlbpage.c
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/hugetlb.h>
+#include <linux/pagemap.h>
+#include <linux/err.h>
+#include <linux/sysctl.h>
+#include <asm/mman.h>
+#include <asm/tlb.h>
+#include <asm/tlbflush.h>
+
+pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long addr, unsigned long sz)
+{
+ pgd_t *pgd;
+ p4d_t *p4d;
+ pud_t *pud;
+ pte_t *pte = NULL;
+
+ pgd = pgd_offset(mm, addr);
+ p4d = p4d_alloc(mm, pgd, addr);
+ pud = pud_alloc(mm, p4d, addr);
+ if (pud)
+ pte = (pte_t *)pmd_alloc(mm, pud, addr);
+
+ return pte;
+}
+
+pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
+ unsigned long sz)
+{
+ pgd_t *pgd;
+ p4d_t *p4d;
+ pud_t *pud;
+ pmd_t *pmd = NULL;
+
+ pgd = pgd_offset(mm, addr);
+ if (pgd_present(*pgd)) {
+ p4d = p4d_offset(pgd, addr);
+ if (p4d_present(*p4d)) {
+ pud = pud_offset(p4d, addr);
+ if (pud_present(*pud))
+ pmd = pmd_offset(pud, addr);
+ }
+ }
+ return (pte_t *) pmd;
+}
+
+/*
+ * This function checks for proper alignment of input addr and len parameters.
+ */
+int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
+{
+ if (len & ~HPAGE_MASK)
+ return -EINVAL;
+ if (addr & ~HPAGE_MASK)
+ return -EINVAL;
+ return 0;
+}
+
+int pmd_huge(pmd_t pmd)
+{
+ return (pmd_val(pmd) & _PAGE_HUGE) != 0;
+}
+
+int pud_huge(pud_t pud)
+{
+ return (pud_val(pud) & _PAGE_HUGE) != 0;
+}
+
+uint64_t pmd_to_entrylo(unsigned long pmd_val)
+{
+ uint64_t val;
+ /* PMD as PTE. Must be huge page */
+ if (!pmd_huge(__pmd(pmd_val)))
+ panic("%s", __func__);
+
+ val = pmd_val ^ _PAGE_HUGE;
+ val |= ((val & _PAGE_HGLOBAL) >>
+ (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT));
+
+ return val;
+}
diff --git a/arch/loongarch/mm/init.c b/arch/loongarch/mm/init.c
new file mode 100644
index 000000000000..7094a68c9b83
--- /dev/null
+++ b/arch/loongarch/mm/init.c
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/init.h>
+#include <linux/export.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/pagemap.h>
+#include <linux/memblock.h>
+#include <linux/memremap.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/highmem.h>
+#include <linux/swap.h>
+#include <linux/proc_fs.h>
+#include <linux/pfn.h>
+#include <linux/hardirq.h>
+#include <linux/gfp.h>
+#include <linux/initrd.h>
+#include <linux/mmzone.h>
+
+#include <asm/asm-offsets.h>
+#include <asm/bootinfo.h>
+#include <asm/cpu.h>
+#include <asm/dma.h>
+#include <asm/mmu_context.h>
+#include <asm/sections.h>
+#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
+#include <asm/tlb.h>
+
+/*
+ * We have up to 8 empty zeroed pages so we can map one of the right colour
+ * when needed. Since page is never written to after the initialization we
+ * don't have to care about aliases on other CPUs.
+ */
+unsigned long empty_zero_page, zero_page_mask;
+EXPORT_SYMBOL_GPL(empty_zero_page);
+EXPORT_SYMBOL(zero_page_mask);
+
+void setup_zero_pages(void)
+{
+ unsigned int order, i;
+ struct page *page;
+
+ order = 0;
+
+ empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
+ if (!empty_zero_page)
+ panic("Oh boy, that early out of memory?");
+
+ page = virt_to_page((void *)empty_zero_page);
+ split_page(page, order);
+ for (i = 0; i < (1 << order); i++, page++)
+ mark_page_reserved(page);
+
+ zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
+}
+
+void copy_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma)
+{
+ void *vfrom, *vto;
+
+ vto = kmap_atomic(to);
+ vfrom = kmap_atomic(from);
+ copy_page(vto, vfrom);
+ kunmap_atomic(vfrom);
+ kunmap_atomic(vto);
+ /* Make sure this page is cleared on other CPU's too before using it */
+ smp_wmb();
+}
+
+int __ref page_is_ram(unsigned long pfn)
+{
+ unsigned long addr = PFN_PHYS(pfn);
+
+ return memblock_is_memory(addr) && !memblock_is_reserved(addr);
+}
+
+#ifndef CONFIG_NUMA
+void __init paging_init(void)
+{
+ unsigned long max_zone_pfns[MAX_NR_ZONES];
+
+#ifdef CONFIG_ZONE_DMA
+ max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
+#endif
+#ifdef CONFIG_ZONE_DMA32
+ max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
+#endif
+ max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
+
+ free_area_init(max_zone_pfns);
+}
+
+void __init mem_init(void)
+{
+ max_mapnr = max_low_pfn;
+ high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
+
+ memblock_free_all();
+ setup_zero_pages(); /* Setup zeroed pages. */
+}
+#endif /* !CONFIG_NUMA */
+
+void __ref free_initmem(void)
+{
+ free_initmem_default(POISON_FREE_INITMEM);
+}
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+int arch_add_memory(int nid, u64 start, u64 size, struct mhp_params *params)
+{
+ unsigned long start_pfn = start >> PAGE_SHIFT;
+ unsigned long nr_pages = size >> PAGE_SHIFT;
+ int ret;
+
+ ret = __add_pages(nid, start_pfn, nr_pages, params);
+
+ if (ret)
+ pr_warn("%s: Problem encountered in __add_pages() as ret=%d\n",
+ __func__, ret);
+
+ return ret;
+}
+
+#ifdef CONFIG_NUMA
+int memory_add_physaddr_to_nid(u64 start)
+{
+ int nid;
+
+ nid = pa_to_nid(start);
+ return nid;
+}
+EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
+#endif
+
+#ifdef CONFIG_MEMORY_HOTREMOVE
+void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
+{
+ unsigned long start_pfn = start >> PAGE_SHIFT;
+ unsigned long nr_pages = size >> PAGE_SHIFT;
+ struct page *page = pfn_to_page(start_pfn);
+
+ /* With altmap the first mapped page is offset from @start */
+ if (altmap)
+ page += vmem_altmap_offset(altmap);
+ __remove_pages(start_pfn, nr_pages, altmap);
+}
+#endif
+#endif
+
+/*
+ * Align swapper_pg_dir in to 64K, allows its address to be loaded
+ * with a single LUI instruction in the TLB handlers. If we used
+ * __aligned(64K), its size would get rounded up to the alignment
+ * size, and waste space. So we place it in its own section and align
+ * it in the linker script.
+ */
+pgd_t swapper_pg_dir[_PTRS_PER_PGD] __section(".bss..swapper_pg_dir");
+
+pgd_t invalid_pg_dir[_PTRS_PER_PGD] __page_aligned_bss;
+#ifndef __PAGETABLE_PUD_FOLDED
+pud_t invalid_pud_table[PTRS_PER_PUD] __page_aligned_bss;
+#endif
+#ifndef __PAGETABLE_PMD_FOLDED
+pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss;
+EXPORT_SYMBOL_GPL(invalid_pmd_table);
+#endif
+pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
+EXPORT_SYMBOL(invalid_pte_table);
diff --git a/arch/loongarch/mm/ioremap.c b/arch/loongarch/mm/ioremap.c
new file mode 100644
index 000000000000..73b0980ab6f5
--- /dev/null
+++ b/arch/loongarch/mm/ioremap.c
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#include <asm/io.h>
+
+void __init __iomem *early_ioremap(u64 phys_addr, unsigned long size)
+{
+ return ((void __iomem *)TO_CACHE(phys_addr));
+}
+
+void __init early_iounmap(void __iomem *addr, unsigned long size)
+{
+
+}
+
+void *early_memremap_ro(resource_size_t phys_addr, unsigned long size)
+{
+ return early_memremap(phys_addr, size);
+}
+
+void *early_memremap_prot(resource_size_t phys_addr, unsigned long size,
+ unsigned long prot_val)
+{
+ return early_memremap(phys_addr, size);
+}
diff --git a/arch/loongarch/mm/maccess.c b/arch/loongarch/mm/maccess.c
new file mode 100644
index 000000000000..58173842c6be
--- /dev/null
+++ b/arch/loongarch/mm/maccess.c
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/uaccess.h>
+#include <linux/kernel.h>
+
+bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
+{
+ /* highest bit set means kernel space */
+ return (unsigned long)unsafe_src >> (BITS_PER_LONG - 1);
+}
diff --git a/arch/loongarch/mm/mmap.c b/arch/loongarch/mm/mmap.c
new file mode 100644
index 000000000000..52e40f0ba732
--- /dev/null
+++ b/arch/loongarch/mm/mmap.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/compiler.h>
+#include <linux/elf-randomize.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/export.h>
+#include <linux/personality.h>
+#include <linux/random.h>
+#include <linux/sched/signal.h>
+#include <linux/sched/mm.h>
+
+unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
+EXPORT_SYMBOL(shm_align_mask);
+
+#define COLOUR_ALIGN(addr, pgoff) \
+ ((((addr) + shm_align_mask) & ~shm_align_mask) + \
+ (((pgoff) << PAGE_SHIFT) & shm_align_mask))
+
+enum mmap_allocation_direction {UP, DOWN};
+
+static unsigned long arch_get_unmapped_area_common(struct file *filp,
+ unsigned long addr0, unsigned long len, unsigned long pgoff,
+ unsigned long flags, enum mmap_allocation_direction dir)
+{
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+ unsigned long addr = addr0;
+ int do_color_align;
+ struct vm_unmapped_area_info info;
+
+ if (unlikely(len > TASK_SIZE))
+ return -ENOMEM;
+
+ if (flags & MAP_FIXED) {
+ /* Even MAP_FIXED mappings must reside within TASK_SIZE */
+ if (TASK_SIZE - len < addr)
+ return -EINVAL;
+
+ /*
+ * We do not accept a shared mapping if it would violate
+ * cache aliasing constraints.
+ */
+ if ((flags & MAP_SHARED) &&
+ ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
+ return -EINVAL;
+ return addr;
+ }
+
+ do_color_align = 0;
+ if (filp || (flags & MAP_SHARED))
+ do_color_align = 1;
+
+ /* requesting a specific address */
+ if (addr) {
+ if (do_color_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
+ else
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+ if (TASK_SIZE - len >= addr &&
+ (!vma || addr + len <= vm_start_gap(vma)))
+ return addr;
+ }
+
+ info.length = len;
+ info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
+ info.align_offset = pgoff << PAGE_SHIFT;
+
+ if (dir == DOWN) {
+ info.flags = VM_UNMAPPED_AREA_TOPDOWN;
+ info.low_limit = PAGE_SIZE;
+ info.high_limit = mm->mmap_base;
+ addr = vm_unmapped_area(&info);
+
+ if (!(addr & ~PAGE_MASK))
+ return addr;
+
+ /*
+ * A failed mmap() very likely causes application failure,
+ * so fall back to the bottom-up function here. This scenario
+ * can happen with large stack limits and large mmap()
+ * allocations.
+ */
+ }
+
+ info.flags = 0;
+ info.low_limit = mm->mmap_base;
+ info.high_limit = TASK_SIZE;
+ return vm_unmapped_area(&info);
+}
+
+unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0,
+ unsigned long len, unsigned long pgoff, unsigned long flags)
+{
+ return arch_get_unmapped_area_common(filp,
+ addr0, len, pgoff, flags, UP);
+}
+
+/*
+ * There is no need to export this but sched.h declares the function as
+ * extern so making it static here results in an error.
+ */
+unsigned long arch_get_unmapped_area_topdown(struct file *filp,
+ unsigned long addr0, unsigned long len, unsigned long pgoff,
+ unsigned long flags)
+{
+ return arch_get_unmapped_area_common(filp,
+ addr0, len, pgoff, flags, DOWN);
+}
+
+int __virt_addr_valid(volatile void *kaddr)
+{
+ unsigned long vaddr = (unsigned long)kaddr;
+
+ if ((vaddr < PAGE_OFFSET) || (vaddr >= vm_map_base))
+ return 0;
+
+ return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
+}
+EXPORT_SYMBOL_GPL(__virt_addr_valid);
diff --git a/arch/loongarch/mm/page.S b/arch/loongarch/mm/page.S
new file mode 100644
index 000000000000..ddc78ab33c7b
--- /dev/null
+++ b/arch/loongarch/mm/page.S
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/linkage.h>
+#include <asm/asm.h>
+#include <asm/export.h>
+#include <asm/page.h>
+#include <asm/regdef.h>
+
+ .align 5
+SYM_FUNC_START(clear_page)
+ lu12i.w t0, 1 << (PAGE_SHIFT - 12)
+ add.d t0, t0, a0
+1:
+ st.d zero, a0, 0
+ st.d zero, a0, 8
+ st.d zero, a0, 16
+ st.d zero, a0, 24
+ st.d zero, a0, 32
+ st.d zero, a0, 40
+ st.d zero, a0, 48
+ st.d zero, a0, 56
+ addi.d a0, a0, 128
+ st.d zero, a0, -64
+ st.d zero, a0, -56
+ st.d zero, a0, -48
+ st.d zero, a0, -40
+ st.d zero, a0, -32
+ st.d zero, a0, -24
+ st.d zero, a0, -16
+ st.d zero, a0, -8
+ bne t0, a0, 1b
+
+ jirl $r0, ra, 0
+SYM_FUNC_END(clear_page)
+EXPORT_SYMBOL(clear_page)
+
+.align 5
+SYM_FUNC_START(copy_page)
+ lu12i.w t8, 1 << (PAGE_SHIFT - 12)
+ add.d t8, t8, a0
+1:
+ ld.d t0, a1, 0
+ ld.d t1, a1, 8
+ ld.d t2, a1, 16
+ ld.d t3, a1, 24
+ ld.d t4, a1, 32
+ ld.d t5, a1, 40
+ ld.d t6, a1, 48
+ ld.d t7, a1, 56
+
+ st.d t0, a0, 0
+ st.d t1, a0, 8
+ ld.d t0, a1, 64
+ ld.d t1, a1, 72
+ st.d t2, a0, 16
+ st.d t3, a0, 24
+ ld.d t2, a1, 80
+ ld.d t3, a1, 88
+ st.d t4, a0, 32
+ st.d t5, a0, 40
+ ld.d t4, a1, 96
+ ld.d t5, a1, 104
+ st.d t6, a0, 48
+ st.d t7, a0, 56
+ ld.d t6, a1, 112
+ ld.d t7, a1, 120
+ addi.d a0, a0, 128
+ addi.d a1, a1, 128
+
+ st.d t0, a0, -64
+ st.d t1, a0, -56
+ st.d t2, a0, -48
+ st.d t3, a0, -40
+ st.d t4, a0, -32
+ st.d t5, a0, -24
+ st.d t6, a0, -16
+ st.d t7, a0, -8
+
+ bne t8, a0, 1b
+ jirl $r0, ra, 0
+SYM_FUNC_END(copy_page)
+EXPORT_SYMBOL(copy_page)
diff --git a/arch/loongarch/mm/pgtable.c b/arch/loongarch/mm/pgtable.c
new file mode 100644
index 000000000000..0569647152e9
--- /dev/null
+++ b/arch/loongarch/mm/pgtable.c
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/init.h>
+#include <linux/export.h>
+#include <linux/mm.h>
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+#include <asm/tlbflush.h>
+
+pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+ pgd_t *ret, *init;
+
+ ret = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER);
+ if (ret) {
+ init = pgd_offset(&init_mm, 0UL);
+ pgd_init((unsigned long)ret);
+ memcpy(ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
+ (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pgd_alloc);
+
+void pgd_init(unsigned long page)
+{
+ unsigned long *p, *end;
+ unsigned long entry;
+
+#if !defined(__PAGETABLE_PUD_FOLDED)
+ entry = (unsigned long)invalid_pud_table;
+#elif !defined(__PAGETABLE_PMD_FOLDED)
+ entry = (unsigned long)invalid_pmd_table;
+#else
+ entry = (unsigned long)invalid_pte_table;
+#endif
+
+ p = (unsigned long *) page;
+ end = p + PTRS_PER_PGD;
+
+ do {
+ p[0] = entry;
+ p[1] = entry;
+ p[2] = entry;
+ p[3] = entry;
+ p[4] = entry;
+ p += 8;
+ p[-3] = entry;
+ p[-2] = entry;
+ p[-1] = entry;
+ } while (p != end);
+}
+EXPORT_SYMBOL_GPL(pgd_init);
+
+#ifndef __PAGETABLE_PMD_FOLDED
+void pmd_init(unsigned long addr, unsigned long pagetable)
+{
+ unsigned long *p, *end;
+
+ p = (unsigned long *) addr;
+ end = p + PTRS_PER_PMD;
+
+ do {
+ p[0] = pagetable;
+ p[1] = pagetable;
+ p[2] = pagetable;
+ p[3] = pagetable;
+ p[4] = pagetable;
+ p += 8;
+ p[-3] = pagetable;
+ p[-2] = pagetable;
+ p[-1] = pagetable;
+ } while (p != end);
+}
+EXPORT_SYMBOL_GPL(pmd_init);
+#endif
+
+#ifndef __PAGETABLE_PUD_FOLDED
+void pud_init(unsigned long addr, unsigned long pagetable)
+{
+ unsigned long *p, *end;
+
+ p = (unsigned long *)addr;
+ end = p + PTRS_PER_PUD;
+
+ do {
+ p[0] = pagetable;
+ p[1] = pagetable;
+ p[2] = pagetable;
+ p[3] = pagetable;
+ p[4] = pagetable;
+ p += 8;
+ p[-3] = pagetable;
+ p[-2] = pagetable;
+ p[-1] = pagetable;
+ } while (p != end);
+}
+#endif
+
+pmd_t mk_pmd(struct page *page, pgprot_t prot)
+{
+ pmd_t pmd;
+
+ pmd_val(pmd) = (page_to_pfn(page) << _PFN_SHIFT) | pgprot_val(prot);
+
+ return pmd;
+}
+
+void set_pmd_at(struct mm_struct *mm, unsigned long addr,
+ pmd_t *pmdp, pmd_t pmd)
+{
+ *pmdp = pmd;
+ flush_tlb_all();
+}
+
+void __init pagetable_init(void)
+{
+ /* Initialize the entire pgd. */
+ pgd_init((unsigned long)swapper_pg_dir);
+ pgd_init((unsigned long)invalid_pg_dir);
+#ifndef __PAGETABLE_PUD_FOLDED
+ pud_init((unsigned long)invalid_pud_table, (unsigned long)invalid_pmd_table);
+#endif
+#ifndef __PAGETABLE_PMD_FOLDED
+ pmd_init((unsigned long)invalid_pmd_table, (unsigned long)invalid_pte_table);
+#endif
+}
diff --git a/arch/loongarch/mm/tlb.c b/arch/loongarch/mm/tlb.c
new file mode 100644
index 000000000000..e272f8ac57d1
--- /dev/null
+++ b/arch/loongarch/mm/tlb.c
@@ -0,0 +1,305 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/mm.h>
+#include <linux/hugetlb.h>
+#include <linux/export.h>
+
+#include <asm/cpu.h>
+#include <asm/bootinfo.h>
+#include <asm/mmu_context.h>
+#include <asm/pgtable.h>
+#include <asm/tlb.h>
+
+void local_flush_tlb_all(void)
+{
+ invtlb_all(INVTLB_CURRENT_ALL, 0, 0);
+}
+EXPORT_SYMBOL(local_flush_tlb_all);
+
+void local_flush_tlb_user(void)
+{
+ invtlb_all(INVTLB_CURRENT_GFALSE, 0, 0);
+}
+EXPORT_SYMBOL(local_flush_tlb_user);
+
+void local_flush_tlb_kernel(void)
+{
+ invtlb_all(INVTLB_CURRENT_GTRUE, 0, 0);
+}
+EXPORT_SYMBOL(local_flush_tlb_kernel);
+
+/*
+ * All entries common to a mm share an asid. To effectively flush
+ * these entries, we just bump the asid.
+ */
+void local_flush_tlb_mm(struct mm_struct *mm)
+{
+ int cpu;
+
+ preempt_disable();
+
+ cpu = smp_processor_id();
+
+ if (asid_valid(mm, cpu))
+ drop_mmu_context(mm, cpu);
+ else
+ cpumask_clear_cpu(cpu, mm_cpumask(mm));
+
+ preempt_enable();
+}
+
+void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ int cpu = smp_processor_id();
+
+ if (asid_valid(mm, cpu)) {
+ unsigned long size, flags;
+
+ local_irq_save(flags);
+ start = round_down(start, PAGE_SIZE << 1);
+ end = round_up(end, PAGE_SIZE << 1);
+ size = (end - start) >> (PAGE_SHIFT + 1);
+ if (size <= (current_cpu_data.tlbsizestlbsets ?
+ current_cpu_data.tlbsize / 8 :
+ current_cpu_data.tlbsize / 2)) {
+ int asid = cpu_asid(cpu, mm);
+
+ while (start < end) {
+ invtlb(INVTLB_ADDR_GFALSE_AND_ASID, asid, start);
+ start += (PAGE_SIZE << 1);
+ }
+ } else {
+ drop_mmu_context(mm, cpu);
+ }
+ local_irq_restore(flags);
+ } else {
+ cpumask_clear_cpu(cpu, mm_cpumask(mm));
+ }
+}
+
+void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+ unsigned long size, flags;
+
+ local_irq_save(flags);
+ size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+ size = (size + 1) >> 1;
+ if (size <= (current_cpu_data.tlbsizestlbsets ?
+ current_cpu_data.tlbsize / 8 :
+ current_cpu_data.tlbsize / 2)) {
+
+ start &= (PAGE_MASK << 1);
+ end += ((PAGE_SIZE << 1) - 1);
+ end &= (PAGE_MASK << 1);
+
+ while (start < end) {
+ invtlb_addr(INVTLB_ADDR_GTRUE_OR_ASID, 0, start);
+ start += (PAGE_SIZE << 1);
+ }
+ } else {
+ local_flush_tlb_kernel();
+ }
+ local_irq_restore(flags);
+}
+
+void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
+{
+ int cpu = smp_processor_id();
+
+ if (asid_valid(vma->vm_mm, cpu)) {
+ int newpid;
+
+ newpid = cpu_asid(cpu, vma->vm_mm);
+ page &= (PAGE_MASK << 1);
+ invtlb(INVTLB_ADDR_GFALSE_AND_ASID, newpid, page);
+ } else {
+ cpumask_clear_cpu(cpu, mm_cpumask(vma->vm_mm));
+ }
+}
+
+/*
+ * This one is only used for pages with the global bit set so we don't care
+ * much about the ASID.
+ */
+void local_flush_tlb_one(unsigned long page)
+{
+ page &= (PAGE_MASK << 1);
+ invtlb_addr(INVTLB_ADDR_GTRUE_OR_ASID, 0, page);
+}
+
+static void __update_hugetlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
+{
+#ifdef CONFIG_HUGETLB_PAGE
+ int idx;
+ unsigned long lo;
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ address &= (PAGE_MASK << 1);
+ write_csr_entryhi(address);
+ tlb_probe();
+ idx = read_csr_tlbidx();
+ write_csr_pagesize(PS_HUGE_SIZE);
+ lo = pmd_to_entrylo(pte_val(*ptep));
+ write_csr_entrylo0(lo);
+ write_csr_entrylo1(lo + (HPAGE_SIZE >> 1));
+
+ if (idx < 0)
+ tlb_write_random();
+ else
+ tlb_write_indexed();
+ write_csr_pagesize(PS_DEFAULT_SIZE);
+
+ local_irq_restore(flags);
+#endif
+}
+
+void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
+{
+ int idx;
+ unsigned long flags;
+
+ /*
+ * Handle debugger faulting in for debugee.
+ */
+ if (current->active_mm != vma->vm_mm)
+ return;
+
+ if (pte_val(*ptep) & _PAGE_HUGE) {
+ __update_hugetlb(vma, address, ptep);
+ return;
+ }
+
+ local_irq_save(flags);
+
+ if ((unsigned long)ptep & sizeof(pte_t))
+ ptep--;
+
+ address &= (PAGE_MASK << 1);
+ write_csr_entryhi(address);
+ tlb_probe();
+ idx = read_csr_tlbidx();
+ write_csr_pagesize(PS_DEFAULT_SIZE);
+ write_csr_entrylo0(pte_val(*ptep++));
+ write_csr_entrylo1(pte_val(*ptep));
+ if (idx < 0)
+ tlb_write_random();
+ else
+ tlb_write_indexed();
+
+ local_irq_restore(flags);
+}
+
+static void setup_ptwalker(void)
+{
+ unsigned long pwctl0, pwctl1;
+ unsigned long pgd_i = 0, pgd_w = 0;
+ unsigned long pud_i = 0, pud_w = 0;
+ unsigned long pmd_i = 0, pmd_w = 0;
+ unsigned long pte_i = 0, pte_w = 0;
+
+ pgd_i = PGDIR_SHIFT;
+ pgd_w = PAGE_SHIFT - 3;
+#if CONFIG_PGTABLE_LEVELS > 3
+ pud_i = PUD_SHIFT;
+ pud_w = PAGE_SHIFT - 3;
+#endif
+#if CONFIG_PGTABLE_LEVELS > 2
+ pmd_i = PMD_SHIFT;
+ pmd_w = PAGE_SHIFT - 3;
+#endif
+ pte_i = PAGE_SHIFT;
+ pte_w = PAGE_SHIFT - 3;
+
+ pwctl0 = pte_i | pte_w << 5 | pmd_i << 10 | pmd_w << 15 | pud_i << 20 | pud_w << 25;
+ pwctl1 = pgd_i | pgd_w << 6;
+
+ csr_write64(pwctl0, LOONGARCH_CSR_PWCTL0);
+ csr_write64(pwctl1, LOONGARCH_CSR_PWCTL1);
+ csr_write64((long)swapper_pg_dir, LOONGARCH_CSR_PGDH);
+ csr_write64((long)invalid_pg_dir, LOONGARCH_CSR_PGDL);
+ csr_write64((long)smp_processor_id(), LOONGARCH_CSR_TMID);
+}
+
+static void output_pgtable_bits_defines(void)
+{
+#define pr_define(fmt, ...) \
+ pr_debug("#define " fmt, ##__VA_ARGS__)
+
+ pr_debug("#include <asm/asm.h>\n");
+ pr_debug("#include <asm/regdef.h>\n");
+ pr_debug("\n");
+
+ pr_define("_PAGE_VALID_SHIFT %d\n", _PAGE_VALID_SHIFT);
+ pr_define("_PAGE_DIRTY_SHIFT %d\n", _PAGE_DIRTY_SHIFT);
+ pr_define("_PAGE_HUGE_SHIFT %d\n", _PAGE_HUGE_SHIFT);
+ pr_define("_PAGE_GLOBAL_SHIFT %d\n", _PAGE_GLOBAL_SHIFT);
+ pr_define("_PAGE_PRESENT_SHIFT %d\n", _PAGE_PRESENT_SHIFT);
+ pr_define("_PAGE_WRITE_SHIFT %d\n", _PAGE_WRITE_SHIFT);
+ pr_define("_PAGE_NO_READ_SHIFT %d\n", _PAGE_NO_READ_SHIFT);
+ pr_define("_PAGE_NO_EXEC_SHIFT %d\n", _PAGE_NO_EXEC_SHIFT);
+ pr_define("_PFN_SHIFT %d\n", _PFN_SHIFT);
+ pr_debug("\n");
+}
+
+#ifdef CONFIG_NUMA
+static unsigned long pcpu_handlers[NR_CPUS];
+#endif
+extern long exception_handlers[VECSIZE * 128 / sizeof(long)];
+
+void setup_tlb_handler(int cpu)
+{
+ setup_ptwalker();
+ output_pgtable_bits_defines();
+
+ /* The tlb handlers are generated only once */
+ if (cpu == 0) {
+ memcpy((void *)tlbrentry, handle_tlb_refill, 0x80);
+ local_flush_icache_range(tlbrentry, tlbrentry + 0x80);
+ set_handler(EXCCODE_TLBI * VECSIZE, handle_tlb_load, VECSIZE);
+ set_handler(EXCCODE_TLBL * VECSIZE, handle_tlb_load, VECSIZE);
+ set_handler(EXCCODE_TLBS * VECSIZE, handle_tlb_store, VECSIZE);
+ set_handler(EXCCODE_TLBM * VECSIZE, handle_tlb_modify, VECSIZE);
+ set_handler(EXCCODE_TLBNR * VECSIZE, handle_tlb_protect, VECSIZE);
+ set_handler(EXCCODE_TLBNX * VECSIZE, handle_tlb_protect, VECSIZE);
+ set_handler(EXCCODE_TLBPE * VECSIZE, handle_tlb_protect, VECSIZE);
+ }
+#ifdef CONFIG_NUMA
+ else {
+ void *addr;
+ struct page *page;
+ const int vec_sz = sizeof(exception_handlers);
+
+ if (pcpu_handlers[cpu])
+ return;
+
+ page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, get_order(vec_sz));
+ if (!page)
+ return;
+
+ addr = page_address(page);
+ pcpu_handlers[cpu] = virt_to_phys(addr);
+ memcpy((void *)addr, (void *)eentry, vec_sz);
+ local_flush_icache_range((unsigned long)addr, (unsigned long)addr + vec_sz);
+ csr_write64(pcpu_handlers[cpu], LOONGARCH_CSR_TLBRENTRY);
+ csr_write64(pcpu_handlers[cpu] + 80*VECSIZE, LOONGARCH_CSR_TLBRENTRY);
+ }
+#endif
+}
+
+void tlb_init(int cpu)
+{
+ write_csr_pagesize(PS_DEFAULT_SIZE);
+ write_csr_stlbpgsize(PS_DEFAULT_SIZE);
+ write_csr_tlbrefill_pagesize(PS_DEFAULT_SIZE);
+ setup_tlb_handler(cpu);
+ local_flush_tlb_all();
+}
diff --git a/arch/loongarch/mm/tlbex.S b/arch/loongarch/mm/tlbex.S
new file mode 100644
index 000000000000..7eee40271577
--- /dev/null
+++ b/arch/loongarch/mm/tlbex.S
@@ -0,0 +1,546 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <asm/asm.h>
+#include <asm/export.h>
+#include <asm/loongarch.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/regdef.h>
+#include <asm/stackframe.h>
+
+ .macro tlb_do_page_fault, write
+ SYM_FUNC_START(tlb_do_page_fault_\write)
+ SAVE_ALL
+ csrrd a2, LOONGARCH_CSR_BADV
+ move a0, sp
+ REG_S a2, sp, PT_BVADDR
+ li.w a1, \write
+ la.abs t0, do_page_fault
+ jirl ra, t0, 0
+ RESTORE_ALL_AND_RET
+ SYM_FUNC_END(tlb_do_page_fault_\write)
+ .endm
+
+ tlb_do_page_fault 0
+ tlb_do_page_fault 1
+
+SYM_FUNC_START(handle_tlb_protect)
+ BACKUP_T0T1
+ SAVE_ALL
+ move a0, sp
+ move a1, zero
+ csrrd a2, LOONGARCH_CSR_BADV
+ REG_S a2, sp, PT_BVADDR
+ la.abs t0, do_page_fault
+ jirl ra, t0, 0
+ RESTORE_ALL_AND_RET
+SYM_FUNC_END(handle_tlb_protect)
+
+SYM_FUNC_START(handle_tlb_load)
+ csrwr t0, EXCEPTION_KS0
+ csrwr t1, EXCEPTION_KS1
+ csrwr ra, EXCEPTION_KS2
+
+ /*
+ * The vmalloc handling is not in the hotpath.
+ */
+ csrrd t0, LOONGARCH_CSR_BADV
+ blt t0, $r0, vmalloc_load
+ csrrd t1, LOONGARCH_CSR_PGDL
+
+vmalloc_done_load:
+ /* Get PGD offset in bytes */
+ srli.d t0, t0, PGDIR_SHIFT
+ andi t0, t0, (PTRS_PER_PGD - 1)
+ slli.d t0, t0, 3
+ add.d t1, t1, t0
+#if CONFIG_PGTABLE_LEVELS > 3
+ csrrd t0, LOONGARCH_CSR_BADV
+ ld.d t1, t1, 0
+ srli.d t0, t0, PUD_SHIFT
+ andi t0, t0, (PTRS_PER_PUD - 1)
+ slli.d t0, t0, 3
+ add.d t1, t1, t0
+#endif
+#if CONFIG_PGTABLE_LEVELS > 2
+ csrrd t0, LOONGARCH_CSR_BADV
+ ld.d t1, t1, 0
+ srli.d t0, t0, PMD_SHIFT
+ andi t0, t0, (PTRS_PER_PMD - 1)
+ slli.d t0, t0, 3
+ add.d t1, t1, t0
+#endif
+ ld.d ra, t1, 0
+
+ /*
+ * For huge tlb entries, pmde doesn't contain an address but
+ * instead contains the tlb pte. Check the PAGE_HUGE bit and
+ * see if we need to jump to huge tlb processing.
+ */
+ andi t0, ra, _PAGE_HUGE
+ bne t0, $r0, tlb_huge_update_load
+
+ csrrd t0, LOONGARCH_CSR_BADV
+ srli.d t0, t0, (PAGE_SHIFT + PTE_ORDER)
+ andi t0, t0, (PTRS_PER_PTE - 1)
+ slli.d t0, t0, _PTE_T_LOG2
+ add.d t1, ra, t0
+
+#ifdef CONFIG_SMP
+smp_pgtable_change_load:
+#endif
+#ifdef CONFIG_SMP
+ ll.d t0, t1, 0
+#else
+ ld.d t0, t1, 0
+#endif
+ tlbsrch
+
+ srli.d ra, t0, _PAGE_PRESENT_SHIFT
+ andi ra, ra, 1
+ beq ra, $r0, nopage_tlb_load
+
+ ori t0, t0, _PAGE_VALID
+#ifdef CONFIG_SMP
+ sc.d t0, t1, 0
+ beq t0, $r0, smp_pgtable_change_load
+#else
+ st.d t0, t1, 0
+#endif
+ ori t1, t1, 8
+ xori t1, t1, 8
+ ld.d t0, t1, 0
+ ld.d t1, t1, 8
+ csrwr t0, LOONGARCH_CSR_TLBELO0
+ csrwr t1, LOONGARCH_CSR_TLBELO1
+ tlbwr
+leave_load:
+ csrrd t0, EXCEPTION_KS0
+ csrrd t1, EXCEPTION_KS1
+ csrrd ra, EXCEPTION_KS2
+ ertn
+#ifdef CONFIG_64BIT
+vmalloc_load:
+ la.abs t1, swapper_pg_dir
+ b vmalloc_done_load
+#endif
+
+ /*
+ * This is the entry point when build_tlbchange_handler_head
+ * spots a huge page.
+ */
+tlb_huge_update_load:
+#ifdef CONFIG_SMP
+ ll.d t0, t1, 0
+#else
+ ld.d t0, t1, 0
+#endif
+ srli.d ra, t0, _PAGE_PRESENT_SHIFT
+ andi ra, ra, 1
+ beq ra, $r0, nopage_tlb_load
+ tlbsrch
+
+ ori t0, t0, _PAGE_VALID
+#ifdef CONFIG_SMP
+ sc.d t0, t1, 0
+ beq t0, $r0, tlb_huge_update_load
+ ld.d t0, t1, 0
+#else
+ st.d t0, t1, 0
+#endif
+ addu16i.d t1, $r0, -(CSR_TLBIDX_EHINV >> 16)
+ addi.d ra, t1, 0
+ csrxchg ra, t1, LOONGARCH_CSR_TLBIDX
+ tlbwr
+
+ csrxchg $r0, t1, LOONGARCH_CSR_TLBIDX
+
+ /*
+ * A huge PTE describes an area the size of the
+ * configured huge page size. This is twice the
+ * of the large TLB entry size we intend to use.
+ * A TLB entry half the size of the configured
+ * huge page size is configured into entrylo0
+ * and entrylo1 to cover the contiguous huge PTE
+ * address space.
+ */
+ /* Huge page: Move Global bit */
+ xori t0, t0, _PAGE_HUGE
+ lu12i.w t1, _PAGE_HGLOBAL >> 12
+ and t1, t0, t1
+ srli.d t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
+ or t0, t0, t1
+
+ addi.d ra, t0, 0
+ csrwr t0, LOONGARCH_CSR_TLBELO0
+ addi.d t0, ra, 0
+
+ /* Convert to entrylo1 */
+ addi.d t1, $r0, 1
+ slli.d t1, t1, (HPAGE_SHIFT - 1)
+ add.d t0, t0, t1
+ csrwr t0, LOONGARCH_CSR_TLBELO1
+
+ /* Set huge page tlb entry size */
+ addu16i.d t0, $r0, (CSR_TLBIDX_PS >> 16)
+ addu16i.d t1, $r0, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
+ csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
+
+ tlbfill
+
+ addu16i.d t0, $r0, (CSR_TLBIDX_PS >> 16)
+ addu16i.d t1, $r0, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
+ csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
+
+nopage_tlb_load:
+ dbar 0
+ csrrd ra, EXCEPTION_KS2
+ la.abs t0, tlb_do_page_fault_0
+ jirl $r0, t0, 0
+SYM_FUNC_END(handle_tlb_load)
+
+SYM_FUNC_START(handle_tlb_store)
+ csrwr t0, EXCEPTION_KS0
+ csrwr t1, EXCEPTION_KS1
+ csrwr ra, EXCEPTION_KS2
+
+ /*
+ * The vmalloc handling is not in the hotpath.
+ */
+ csrrd t0, LOONGARCH_CSR_BADV
+ blt t0, $r0, vmalloc_store
+ csrrd t1, LOONGARCH_CSR_PGDL
+
+vmalloc_done_store:
+ /* Get PGD offset in bytes */
+ srli.d t0, t0, PGDIR_SHIFT
+ andi t0, t0, (PTRS_PER_PGD - 1)
+ slli.d t0, t0, 3
+ add.d t1, t1, t0
+
+#if CONFIG_PGTABLE_LEVELS > 3
+ csrrd t0, LOONGARCH_CSR_BADV
+ ld.d t1, t1, 0
+ srli.d t0, t0, PUD_SHIFT
+ andi t0, t0, (PTRS_PER_PUD - 1)
+ slli.d t0, t0, 3
+ add.d t1, t1, t0
+#endif
+#if CONFIG_PGTABLE_LEVELS > 2
+ csrrd t0, LOONGARCH_CSR_BADV
+ ld.d t1, t1, 0
+ srli.d t0, t0, PMD_SHIFT
+ andi t0, t0, (PTRS_PER_PMD - 1)
+ slli.d t0, t0, 3
+ add.d t1, t1, t0
+#endif
+ ld.d ra, t1, 0
+
+ /*
+ * For huge tlb entries, pmde doesn't contain an address but
+ * instead contains the tlb pte. Check the PAGE_HUGE bit and
+ * see if we need to jump to huge tlb processing.
+ */
+ andi t0, ra, _PAGE_HUGE
+ bne t0, $r0, tlb_huge_update_store
+
+ csrrd t0, LOONGARCH_CSR_BADV
+ srli.d t0, t0, (PAGE_SHIFT + PTE_ORDER)
+ andi t0, t0, (PTRS_PER_PTE - 1)
+ slli.d t0, t0, _PTE_T_LOG2
+ add.d t1, ra, t0
+
+#ifdef CONFIG_SMP
+smp_pgtable_change_store:
+#endif
+#ifdef CONFIG_SMP
+ ll.d t0, t1, 0
+#else
+ ld.d t0, t1, 0
+#endif
+ tlbsrch
+
+ srli.d ra, t0, _PAGE_PRESENT_SHIFT
+ andi ra, ra, ((_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT)
+ xori ra, ra, ((_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT)
+ bne ra, $r0, nopage_tlb_store
+
+ ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
+#ifdef CONFIG_SMP
+ sc.d t0, t1, 0
+ beq t0, $r0, smp_pgtable_change_store
+#else
+ st.d t0, t1, 0
+#endif
+
+ ori t1, t1, 8
+ xori t1, t1, 8
+ ld.d t0, t1, 0
+ ld.d t1, t1, 8
+ csrwr t0, LOONGARCH_CSR_TLBELO0
+ csrwr t1, LOONGARCH_CSR_TLBELO1
+ tlbwr
+leave_store:
+ csrrd t0, EXCEPTION_KS0
+ csrrd t1, EXCEPTION_KS1
+ csrrd ra, EXCEPTION_KS2
+ ertn
+#ifdef CONFIG_64BIT
+vmalloc_store:
+ la.abs t1, swapper_pg_dir
+ b vmalloc_done_store
+#endif
+
+ /*
+ * This is the entry point when build_tlbchange_handler_head
+ * spots a huge page.
+ */
+tlb_huge_update_store:
+#ifdef CONFIG_SMP
+ ll.d t0, t1, 0
+#else
+ ld.d t0, t1, 0
+#endif
+ srli.d ra, t0, _PAGE_PRESENT_SHIFT
+ andi ra, ra, ((_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT)
+ xori ra, ra, ((_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT)
+ bne ra, $r0, nopage_tlb_store
+
+ tlbsrch
+ ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
+
+#ifdef CONFIG_SMP
+ sc.d t0, t1, 0
+ beq t0, $r0, tlb_huge_update_store
+ ld.d t0, t1, 0
+#else
+ st.d t0, t1, 0
+#endif
+ addu16i.d t1, $r0, -(CSR_TLBIDX_EHINV >> 16)
+ addi.d ra, t1, 0
+ csrxchg ra, t1, LOONGARCH_CSR_TLBIDX
+ tlbwr
+
+ csrxchg $r0, t1, LOONGARCH_CSR_TLBIDX
+ /*
+ * A huge PTE describes an area the size of the
+ * configured huge page size. This is twice the
+ * of the large TLB entry size we intend to use.
+ * A TLB entry half the size of the configured
+ * huge page size is configured into entrylo0
+ * and entrylo1 to cover the contiguous huge PTE
+ * address space.
+ */
+ /* Huge page: Move Global bit */
+ xori t0, t0, _PAGE_HUGE
+ lu12i.w t1, _PAGE_HGLOBAL >> 12
+ and t1, t0, t1
+ srli.d t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
+ or t0, t0, t1
+
+ addi.d ra, t0, 0
+ csrwr t0, LOONGARCH_CSR_TLBELO0
+ addi.d t0, ra, 0
+
+ /* Convert to entrylo1 */
+ addi.d t1, $r0, 1
+ slli.d t1, t1, (HPAGE_SHIFT - 1)
+ add.d t0, t0, t1
+ csrwr t0, LOONGARCH_CSR_TLBELO1
+
+ /* Set huge page tlb entry size */
+ addu16i.d t0, $r0, (CSR_TLBIDX_PS >> 16)
+ addu16i.d t1, $r0, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
+ csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
+
+ tlbfill
+
+ /* Reset default page size */
+ addu16i.d t0, $r0, (CSR_TLBIDX_PS >> 16)
+ addu16i.d t1, $r0, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
+ csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
+
+nopage_tlb_store:
+ dbar 0
+ csrrd ra, EXCEPTION_KS2
+ la.abs t0, tlb_do_page_fault_1
+ jirl $r0, t0, 0
+SYM_FUNC_END(handle_tlb_store)
+
+SYM_FUNC_START(handle_tlb_modify)
+ csrwr t0, EXCEPTION_KS0
+ csrwr t1, EXCEPTION_KS1
+ csrwr ra, EXCEPTION_KS2
+
+ /*
+ * The vmalloc handling is not in the hotpath.
+ */
+ csrrd t0, LOONGARCH_CSR_BADV
+ blt t0, $r0, vmalloc_modify
+ csrrd t1, LOONGARCH_CSR_PGDL
+
+vmalloc_done_modify:
+ /* Get PGD offset in bytes */
+ srli.d t0, t0, PGDIR_SHIFT
+ andi t0, t0, (PTRS_PER_PGD - 1)
+ slli.d t0, t0, 3
+ add.d t1, t1, t0
+#if CONFIG_PGTABLE_LEVELS > 3
+ csrrd t0, LOONGARCH_CSR_BADV
+ ld.d t1, t1, 0
+ srli.d t0, t0, PUD_SHIFT
+ andi t0, t0, (PTRS_PER_PUD - 1)
+ slli.d t0, t0, 3
+ add.d t1, t1, t0
+#endif
+#if CONFIG_PGTABLE_LEVELS > 2
+ csrrd t0, LOONGARCH_CSR_BADV
+ ld.d t1, t1, 0
+ srli.d t0, t0, PMD_SHIFT
+ andi t0, t0, (PTRS_PER_PMD - 1)
+ slli.d t0, t0, 3
+ add.d t1, t1, t0
+#endif
+ ld.d ra, t1, 0
+
+ /*
+ * For huge tlb entries, pmde doesn't contain an address but
+ * instead contains the tlb pte. Check the PAGE_HUGE bit and
+ * see if we need to jump to huge tlb processing.
+ */
+ andi t0, ra, _PAGE_HUGE
+ bne t0, $r0, tlb_huge_update_modify
+
+ csrrd t0, LOONGARCH_CSR_BADV
+ srli.d t0, t0, (PAGE_SHIFT + PTE_ORDER)
+ andi t0, t0, (PTRS_PER_PTE - 1)
+ slli.d t0, t0, _PTE_T_LOG2
+ add.d t1, ra, t0
+
+#ifdef CONFIG_SMP
+smp_pgtable_change_modify:
+#endif
+#ifdef CONFIG_SMP
+ ll.d t0, t1, 0
+#else
+ ld.d t0, t1, 0
+#endif
+ tlbsrch
+
+ srli.d ra, t0, _PAGE_WRITE_SHIFT
+ andi ra, ra, 1
+ beq ra, $r0, nopage_tlb_modify
+
+ ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
+#ifdef CONFIG_SMP
+ sc.d t0, t1, 0
+ beq t0, $r0, smp_pgtable_change_modify
+#else
+ st.d t0, t1, 0
+#endif
+ ori t1, t1, 8
+ xori t1, t1, 8
+ ld.d t0, t1, 0
+ ld.d t1, t1, 8
+ csrwr t0, LOONGARCH_CSR_TLBELO0
+ csrwr t1, LOONGARCH_CSR_TLBELO1
+ tlbwr
+leave_modify:
+ csrrd t0, EXCEPTION_KS0
+ csrrd t1, EXCEPTION_KS1
+ csrrd ra, EXCEPTION_KS2
+ ertn
+#ifdef CONFIG_64BIT
+vmalloc_modify:
+ la.abs t1, swapper_pg_dir
+ b vmalloc_done_modify
+#endif
+
+ /*
+ * This is the entry point when
+ * build_tlbchange_handler_head spots a huge page.
+ */
+tlb_huge_update_modify:
+#ifdef CONFIG_SMP
+ ll.d t0, t1, 0
+#else
+ ld.d t0, t1, 0
+#endif
+
+ srli.d ra, t0, _PAGE_WRITE_SHIFT
+ andi ra, ra, 1
+ beq ra, $r0, nopage_tlb_modify
+
+ tlbsrch
+ ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
+
+#ifdef CONFIG_SMP
+ sc.d t0, t1, 0
+ beq t0, $r0, tlb_huge_update_modify
+ ld.d t0, t1, 0
+#else
+ st.d t0, t1, 0
+#endif
+ /*
+ * A huge PTE describes an area the size of the
+ * configured huge page size. This is twice the
+ * of the large TLB entry size we intend to use.
+ * A TLB entry half the size of the configured
+ * huge page size is configured into entrylo0
+ * and entrylo1 to cover the contiguous huge PTE
+ * address space.
+ */
+ /* Huge page: Move Global bit */
+ xori t0, t0, _PAGE_HUGE
+ lu12i.w t1, _PAGE_HGLOBAL >> 12
+ and t1, t0, t1
+ srli.d t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
+ or t0, t0, t1
+
+ addi.d ra, t0, 0
+ csrwr t0, LOONGARCH_CSR_TLBELO0
+ addi.d t0, ra, 0
+
+ /* Convert to entrylo1 */
+ addi.d t1, $r0, 1
+ slli.d t1, t1, (HPAGE_SHIFT - 1)
+ add.d t0, t0, t1
+ csrwr t0, LOONGARCH_CSR_TLBELO1
+
+ /* Set huge page tlb entry size */
+ addu16i.d t0, $r0, (CSR_TLBIDX_PS >> 16)
+ addu16i.d t1, $r0, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
+ csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
+
+ tlbwr
+
+ /* Reset default page size */
+ addu16i.d t0, $r0, (CSR_TLBIDX_PS >> 16)
+ addu16i.d t1, $r0, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
+ csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
+
+nopage_tlb_modify:
+ dbar 0
+ csrrd ra, EXCEPTION_KS2
+ la.abs t0, tlb_do_page_fault_1
+ jirl $r0, t0, 0
+SYM_FUNC_END(handle_tlb_modify)
+
+SYM_FUNC_START(handle_tlb_refill)
+ csrwr t0, LOONGARCH_CSR_TLBRSAVE
+ csrrd t0, LOONGARCH_CSR_PGD
+ lddir t0, t0, 3
+#if CONFIG_PGTABLE_LEVELS > 3
+ lddir t0, t0, 2
+#endif
+#if CONFIG_PGTABLE_LEVELS > 2
+ lddir t0, t0, 1
+#endif
+ ldpte t0, 0
+ ldpte t0, 1
+ tlbfill
+ csrrd t0, LOONGARCH_CSR_TLBRSAVE
+ ertn
+SYM_FUNC_END(handle_tlb_refill)
diff --git a/arch/loongarch/pci/Makefile b/arch/loongarch/pci/Makefile
new file mode 100644
index 000000000000..8101ef3df71c
--- /dev/null
+++ b/arch/loongarch/pci/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the PCI specific kernel interface routines under Linux.
+#
+
+obj-y += pci.o
+obj-$(CONFIG_ACPI) += acpi.o
diff --git a/arch/loongarch/vdso/.gitignore b/arch/loongarch/vdso/.gitignore
new file mode 100644
index 000000000000..652e31d82582
--- /dev/null
+++ b/arch/loongarch/vdso/.gitignore
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+vdso.lds
diff --git a/arch/loongarch/vdso/Makefile b/arch/loongarch/vdso/Makefile
new file mode 100644
index 000000000000..6b6e16732c60
--- /dev/null
+++ b/arch/loongarch/vdso/Makefile
@@ -0,0 +1,96 @@
+# SPDX-License-Identifier: GPL-2.0
+# Objects to go into the VDSO.
+
+# Absolute relocation type $(ARCH_REL_TYPE_ABS) needs to be defined before
+# the inclusion of generic Makefile.
+ARCH_REL_TYPE_ABS := R_LARCH_32|R_LARCH_64|R_LARCH_MARK_LA|R_LARCH_JUMP_SLOT
+include $(srctree)/lib/vdso/Makefile
+
+obj-vdso-y := elf.o vgettimeofday.o sigreturn.o
+
+# Common compiler flags between ABIs.
+ccflags-vdso := \
+ $(filter -I%,$(KBUILD_CFLAGS)) \
+ $(filter -E%,$(KBUILD_CFLAGS)) \
+ $(filter -march=%,$(KBUILD_CFLAGS)) \
+ $(filter -m%-float,$(KBUILD_CFLAGS)) \
+ -D__VDSO__
+
+ifeq ($(cc-name),clang)
+ccflags-vdso += $(filter --target=%,$(KBUILD_CFLAGS))
+endif
+
+cflags-vdso := $(ccflags-vdso) \
+ $(filter -W%,$(filter-out -Wa$(comma)%,$(KBUILD_CFLAGS))) \
+ -O2 -g -fno-strict-aliasing -fno-common -fno-builtin -G0 \
+ -fno-stack-protector -fno-jump-tables -DDISABLE_BRANCH_PROFILING \
+ $(call cc-option, -fno-asynchronous-unwind-tables) \
+ $(call cc-option, -fno-stack-protector)
+aflags-vdso := $(ccflags-vdso) \
+ -D__ASSEMBLY__ -Wa,-gdwarf-2
+
+ifneq ($(c-gettimeofday-y),)
+ CFLAGS_vgettimeofday.o += -include $(c-gettimeofday-y)
+endif
+
+# VDSO linker flags.
+ldflags-y := -Bsymbolic --no-undefined -soname=linux-vdso.so.1 \
+ $(filter -E%,$(KBUILD_CFLAGS)) -nostdlib -shared \
+ --hash-style=sysv --build-id -T
+
+GCOV_PROFILE := n
+
+#
+# Shared build commands.
+#
+
+quiet_cmd_vdsold_and_vdso_check = LD $@
+ cmd_vdsold_and_vdso_check = $(cmd_ld); $(cmd_vdso_check)
+
+quiet_cmd_vdsoas_o_S = AS $@
+ cmd_vdsoas_o_S = $(CC) $(a_flags) -c -o $@ $<
+
+# Generate VDSO offsets using helper script
+gen-vdsosym := $(srctree)/$(src)/gen_vdso_offsets.sh
+quiet_cmd_vdsosym = VDSOSYM $@
+ cmd_vdsosym = $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@
+
+include/generated/vdso-offsets.h: $(obj)/vdso.so.dbg FORCE
+ $(call if_changed,vdsosym)
+
+#
+# Build native VDSO.
+#
+
+native-abi := $(filter -mabi=%,$(KBUILD_CFLAGS))
+
+targets += $(obj-vdso-y)
+targets += vdso.lds vdso.so.dbg vdso.so
+
+obj-vdso := $(obj-vdso-y:%.o=$(obj)/%.o)
+
+$(obj-vdso): KBUILD_CFLAGS := $(cflags-vdso) $(native-abi)
+$(obj-vdso): KBUILD_AFLAGS := $(aflags-vdso) $(native-abi)
+
+$(obj)/vdso.lds: KBUILD_CPPFLAGS := $(ccflags-vdso) $(native-abi)
+
+$(obj)/vdso.so.dbg: $(obj)/vdso.lds $(obj-vdso) FORCE
+ $(call if_changed,vdsold_and_vdso_check)
+
+$(obj)/vdso.so: OBJCOPYFLAGS := -S
+$(obj)/vdso.so: $(obj)/vdso.so.dbg FORCE
+ $(call if_changed,objcopy)
+
+obj-y += vdso.o
+
+$(obj)/vdso.o : $(obj)/vdso.so
+
+# install commands for the unstripped file
+quiet_cmd_vdso_install = INSTALL $@
+ cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@
+
+vdso.so: $(obj)/vdso.so.dbg
+ @mkdir -p $(MODLIB)/vdso
+ $(call cmd,vdso_install)
+
+vdso_install: vdso.so
diff --git a/arch/loongarch/vdso/elf.S b/arch/loongarch/vdso/elf.S
new file mode 100644
index 000000000000..9bb21b9f9583
--- /dev/null
+++ b/arch/loongarch/vdso/elf.S
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Author: Huacai Chen <chenhuacai@loongson.cn>
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#include <asm/vdso/vdso.h>
+
+#include <linux/elfnote.h>
+#include <linux/version.h>
+
+ELFNOTE_START(Linux, 0, "a")
+ .long LINUX_VERSION_CODE
+ELFNOTE_END
diff --git a/arch/loongarch/vdso/gen_vdso_offsets.sh b/arch/loongarch/vdso/gen_vdso_offsets.sh
new file mode 100755
index 000000000000..1bb4e12642ff
--- /dev/null
+++ b/arch/loongarch/vdso/gen_vdso_offsets.sh
@@ -0,0 +1,13 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+#
+# Derived from RISC-V and ARM64:
+# Author: Will Deacon <will.deacon@arm.com>
+#
+# Match symbols in the DSO that look like VDSO_*; produce a header file
+# of constant offsets into the shared object.
+#
+
+LC_ALL=C sed -n -e 's/^00*/0/' -e \
+'s/^\([0-9a-fA-F]*\) . VDSO_\([a-zA-Z0-9_]*\)$/\#define vdso_offset_\2\t0x\1/p'
diff --git a/arch/loongarch/vdso/sigreturn.S b/arch/loongarch/vdso/sigreturn.S
new file mode 100644
index 000000000000..9cb3c58fad03
--- /dev/null
+++ b/arch/loongarch/vdso/sigreturn.S
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Author: Huacai Chen <chenhuacai@loongson.cn>
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#include <asm/vdso/vdso.h>
+
+#include <linux/linkage.h>
+#include <uapi/asm/unistd.h>
+
+#include <asm/regdef.h>
+#include <asm/asm.h>
+
+ .section .text
+ .cfi_sections .debug_frame
+
+SYM_FUNC_START(__vdso_rt_sigreturn)
+
+ li.w a7, __NR_rt_sigreturn
+ syscall 0
+
+SYM_FUNC_END(__vdso_rt_sigreturn)
diff --git a/arch/loongarch/vdso/vdso.S b/arch/loongarch/vdso/vdso.S
new file mode 100644
index 000000000000..46789bade6ff
--- /dev/null
+++ b/arch/loongarch/vdso/vdso.S
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ *
+ * Derived from RISC-V:
+ * Copyright (C) 2014 Regents of the University of California
+ */
+
+#include <linux/init.h>
+#include <linux/linkage.h>
+#include <asm/page.h>
+
+ __PAGE_ALIGNED_DATA
+
+ .globl vdso_start, vdso_end
+ .balign PAGE_SIZE
+vdso_start:
+ .incbin "arch/loongarch/vdso/vdso.so"
+ .balign PAGE_SIZE
+vdso_end:
+
+ .previous
diff --git a/arch/loongarch/vdso/vdso.lds.S b/arch/loongarch/vdso/vdso.lds.S
new file mode 100644
index 000000000000..955f02de4a2d
--- /dev/null
+++ b/arch/loongarch/vdso/vdso.lds.S
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Author: Huacai Chen <chenhuacai@loongson.cn>
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+OUTPUT_FORMAT("elf64-loongarch", "elf64-loongarch", "elf64-loongarch")
+
+OUTPUT_ARCH(loongarch)
+
+SECTIONS
+{
+ PROVIDE(_start = .);
+ . = SIZEOF_HEADERS;
+
+ .hash : { *(.hash) } :text
+ .gnu.hash : { *(.gnu.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+
+ .note : { *(.note.*) } :text :note
+
+ .text : { *(.text*) } :text
+ PROVIDE (__etext = .);
+ PROVIDE (_etext = .);
+ PROVIDE (etext = .);
+
+ .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr
+ .eh_frame : { KEEP (*(.eh_frame)) } :text
+
+ .dynamic : { *(.dynamic) } :text :dynamic
+
+ .rodata : { *(.rodata*) } :text
+
+ _end = .;
+ PROVIDE(end = .);
+
+ /DISCARD/ : {
+ *(.gnu.attributes)
+ *(.note.GNU-stack)
+ *(.data .data.* .gnu.linkonce.d.* .sdata*)
+ *(.bss .sbss .dynbss .dynsbss)
+ }
+}
+
+PHDRS
+{
+ text PT_LOAD FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */
+ dynamic PT_DYNAMIC FLAGS(4); /* PF_R */
+ note PT_NOTE FLAGS(4); /* PF_R */
+ eh_frame_hdr PT_GNU_EH_FRAME;
+}
+
+VERSION
+{
+ LINUX_5.10 {
+ global:
+ __vdso_clock_getres;
+ __vdso_clock_gettime;
+ __vdso_gettimeofday;
+ __vdso_rt_sigreturn;
+ local: *;
+ };
+}
+
+/*
+ * Make the sigreturn code visible to the kernel.
+ */
+VDSO_sigreturn = __vdso_rt_sigreturn;
diff --git a/arch/loongarch/vdso/vgettimeofday.c b/arch/loongarch/vdso/vgettimeofday.c
new file mode 100644
index 000000000000..b1f4548dae92
--- /dev/null
+++ b/arch/loongarch/vdso/vgettimeofday.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * LoongArch userspace implementations of gettimeofday() and similar.
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/types.h>
+
+int __vdso_clock_gettime(clockid_t clock,
+ struct __kernel_timespec *ts)
+{
+ return __cvdso_clock_gettime(clock, ts);
+}
+
+int __vdso_gettimeofday(struct __kernel_old_timeval *tv,
+ struct timezone *tz)
+{
+ return __cvdso_gettimeofday(tv, tz);
+}
+
+int __vdso_clock_getres(clockid_t clock_id,
+ struct __kernel_timespec *res)
+{
+ return __cvdso_clock_getres(clock_id, res);
+}
diff --git a/arch/m68k/Kconfig.bus b/arch/m68k/Kconfig.bus
index d1e93a39cd3b..d5c0b2990ee1 100644
--- a/arch/m68k/Kconfig.bus
+++ b/arch/m68k/Kconfig.bus
@@ -56,16 +56,6 @@ config ATARI_ROM_ISA
The only driver currently using this adapter is the EtherNEC
driver for RTL8019AS based NE2000 compatible network cards.
-config GENERIC_ISA_DMA
- def_bool ISA
-
source "drivers/zorro/Kconfig"
endif
-
-if COLDFIRE
-
-config ISA_DMA_API
- def_bool !M5272
-
-endif
diff --git a/arch/m68k/Kconfig.cpu b/arch/m68k/Kconfig.cpu
index 3d5da25c73b5..f3aa44156969 100644
--- a/arch/m68k/Kconfig.cpu
+++ b/arch/m68k/Kconfig.cpu
@@ -37,7 +37,7 @@ endchoice
if M68KCLASSIC
config M68000
- bool
+ def_bool y
depends on !MMU
select CPU_HAS_NO_BITFIELDS
select CPU_HAS_NO_CAS
diff --git a/arch/m68k/Kconfig.machine b/arch/m68k/Kconfig.machine
index 188a8f8a0104..a1042568b9ad 100644
--- a/arch/m68k/Kconfig.machine
+++ b/arch/m68k/Kconfig.machine
@@ -352,6 +352,7 @@ comment "Machine Options"
config UBOOT
bool "Support for U-Boot command line parameters"
+ depends on COLDFIRE
help
If you say Y here kernel will try to collect command
line parameters from the initial u-boot stack.
diff --git a/arch/m68k/coldfire/Makefile b/arch/m68k/coldfire/Makefile
index a3e18d73d8b8..9419a6c1f036 100644
--- a/arch/m68k/coldfire/Makefile
+++ b/arch/m68k/coldfire/Makefile
@@ -15,7 +15,7 @@
asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1
-obj-$(CONFIG_COLDFIRE) += cache.o clk.o device.o dma.o entry.o vectors.o
+obj-$(CONFIG_COLDFIRE) += cache.o clk.o device.o entry.o vectors.o
obj-$(CONFIG_M5206) += m5206.o intc.o reset.o
obj-$(CONFIG_M5206e) += m5206.o intc.o reset.o
obj-$(CONFIG_M520x) += m520x.o intc-simr.o reset.o
diff --git a/arch/m68k/coldfire/dma.c b/arch/m68k/coldfire/dma.c
deleted file mode 100644
index c3279f7467d7..000000000000
--- a/arch/m68k/coldfire/dma.c
+++ /dev/null
@@ -1,43 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/***************************************************************************/
-
-/*
- * dma.c -- Freescale ColdFire DMA support
- *
- * Copyright (C) 2007, Greg Ungerer (gerg@snapgear.com)
- */
-
-/***************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <asm/dma.h>
-#include <asm/coldfire.h>
-#include <asm/mcfsim.h>
-#include <asm/mcfdma.h>
-
-/***************************************************************************/
-
-/*
- * DMA channel base address table.
- */
-unsigned int dma_base_addr[MAX_M68K_DMA_CHANNELS] = {
-#ifdef MCFDMA_BASE0
- MCFDMA_BASE0,
-#endif
-#ifdef MCFDMA_BASE1
- MCFDMA_BASE1,
-#endif
-#ifdef MCFDMA_BASE2
- MCFDMA_BASE2,
-#endif
-#ifdef MCFDMA_BASE3
- MCFDMA_BASE3,
-#endif
-};
-EXPORT_SYMBOL(dma_base_addr);
-
-unsigned int dma_device_address[MAX_M68K_DMA_CHANNELS];
-EXPORT_SYMBOL(dma_device_address);
-
-/***************************************************************************/
diff --git a/arch/m68k/coldfire/intc.c b/arch/m68k/coldfire/intc.c
index cce257420388..20c084e932c8 100644
--- a/arch/m68k/coldfire/intc.c
+++ b/arch/m68k/coldfire/intc.c
@@ -28,7 +28,7 @@
unsigned char mcf_irq2imr[NR_IRQS];
/*
- * Define the miniumun and maximum external interrupt numbers.
+ * Define the minimum and maximum external interrupt numbers.
* This is also used as the "level" interrupt numbers.
*/
#define EIRQ1 25
diff --git a/arch/m68k/coldfire/m53xx.c b/arch/m68k/coldfire/m53xx.c
index bd033e1975bf..17af5f673796 100644
--- a/arch/m68k/coldfire/m53xx.c
+++ b/arch/m68k/coldfire/m53xx.c
@@ -532,7 +532,7 @@ int clock_pll(int fsys, int flags)
writel(readl(MCF_SDRAMC_SDCR) | MCF_SDRAMC_SDCR_CKE,
MCF_SDRAMC_SDCR);
- /* Errata - workaround for SDRAM opeartion after exiting LIMP mode */
+ /* Errata - workaround for SDRAM operation after exiting LIMP mode */
writel(MCF_SDRAMC_REFRESH, MCF_SDRAMC_LIMP_FIX);
/* wait for DQS logic to relock */
diff --git a/arch/m68k/coldfire/pci.c b/arch/m68k/coldfire/pci.c
index 84eab0f5e00a..ceb5775b8d23 100644
--- a/arch/m68k/coldfire/pci.c
+++ b/arch/m68k/coldfire/pci.c
@@ -31,7 +31,7 @@ static struct pci_bus *rootbus;
static unsigned long iospace;
/*
- * We need to be carefull probing on bus 0 (directly connected to host
+ * We need to be careful probing on bus 0 (directly connected to host
* bridge). We should only access the well defined possible devices in
* use, ignore aliases and the like.
*/
diff --git a/arch/m68k/emu/natfeat.c b/arch/m68k/emu/natfeat.c
index 71b78ecee75c..b19dc00026d9 100644
--- a/arch/m68k/emu/natfeat.c
+++ b/arch/m68k/emu/natfeat.c
@@ -15,6 +15,7 @@
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/reboot.h>
#include <linux/io.h>
#include <asm/machdep.h>
#include <asm/natfeat.h>
@@ -90,5 +91,5 @@ void __init nf_init(void)
pr_info("NatFeats found (%s, %lu.%lu)\n", buf, version >> 16,
version & 0xffff);
- mach_power_off = nf_poweroff;
+ register_platform_power_off(nf_poweroff);
}
diff --git a/arch/m68k/hp300/config.c b/arch/m68k/hp300/config.c
index 2c92843397c3..e4bd6913f50e 100644
--- a/arch/m68k/hp300/config.c
+++ b/arch/m68k/hp300/config.c
@@ -240,12 +240,6 @@ static int hp300_hwclk(int op, struct rtc_time *t)
return 0;
}
-static unsigned int hp300_get_ss(void)
-{
- return hp300_rtc_read(RTC_REG_SEC1) * 10 +
- hp300_rtc_read(RTC_REG_SEC2);
-}
-
static void __init hp300_init_IRQ(void)
{
}
@@ -256,7 +250,6 @@ void __init config_hp300(void)
mach_init_IRQ = hp300_init_IRQ;
mach_get_model = hp300_get_model;
mach_hwclk = hp300_hwclk;
- mach_get_ss = hp300_get_ss;
mach_reset = hp300_reset;
#ifdef CONFIG_HEARTBEAT
mach_heartbeat = hp300_pulse;
diff --git a/arch/m68k/include/asm/dma.h b/arch/m68k/include/asm/dma.h
index ae2021964e32..f6c5e0dfb4e5 100644
--- a/arch/m68k/include/asm/dma.h
+++ b/arch/m68k/include/asm/dma.h
@@ -2,493 +2,10 @@
#ifndef _M68K_DMA_H
#define _M68K_DMA_H 1
-#ifdef CONFIG_COLDFIRE
-/*
- * ColdFire DMA Model:
- * ColdFire DMA supports two forms of DMA: Single and Dual address. Single
- * address mode emits a source address, and expects that the device will either
- * pick up the data (DMA READ) or source data (DMA WRITE). This implies that
- * the device will place data on the correct byte(s) of the data bus, as the
- * memory transactions are always 32 bits. This implies that only 32 bit
- * devices will find single mode transfers useful. Dual address DMA mode
- * performs two cycles: source read and destination write. ColdFire will
- * align the data so that the device will always get the correct bytes, thus
- * is useful for 8 and 16 bit devices. This is the mode that is supported
- * below.
- *
- * AUG/22/2000 : added support for 32-bit Dual-Address-Mode (K) 2000
- * Oliver Kamphenkel (O.Kamphenkel@tu-bs.de)
- *
- * AUG/25/2000 : added support for 8, 16 and 32-bit Single-Address-Mode (K)2000
- * Oliver Kamphenkel (O.Kamphenkel@tu-bs.de)
- *
- * APR/18/2002 : added proper support for MCF5272 DMA controller.
- * Arthur Shipkowski (art@videon-central.com)
- */
-
-#include <asm/coldfire.h>
-#include <asm/mcfsim.h>
-#include <asm/mcfdma.h>
-
-/*
- * Set number of channels of DMA on ColdFire for different implementations.
- */
-#if defined(CONFIG_M5249) || defined(CONFIG_M5307) || defined(CONFIG_M5407) || \
- defined(CONFIG_M523x) || defined(CONFIG_M527x) || \
- defined(CONFIG_M528x) || defined(CONFIG_M525x)
-
-#define MAX_M68K_DMA_CHANNELS 4
-#elif defined(CONFIG_M5272)
-#define MAX_M68K_DMA_CHANNELS 1
-#elif defined(CONFIG_M53xx)
-#define MAX_M68K_DMA_CHANNELS 0
-#else
-#define MAX_M68K_DMA_CHANNELS 2
-#endif
-
-extern unsigned int dma_base_addr[MAX_M68K_DMA_CHANNELS];
-extern unsigned int dma_device_address[MAX_M68K_DMA_CHANNELS];
-
-#if !defined(CONFIG_M5272)
-#define DMA_MODE_WRITE_BIT 0x01 /* Memory/IO to IO/Memory select */
-#define DMA_MODE_WORD_BIT 0x02 /* 8 or 16 bit transfers */
-#define DMA_MODE_LONG_BIT 0x04 /* or 32 bit transfers */
-#define DMA_MODE_SINGLE_BIT 0x08 /* single-address-mode */
-
-/* I/O to memory, 8 bits, mode */
-#define DMA_MODE_READ 0
-/* memory to I/O, 8 bits, mode */
-#define DMA_MODE_WRITE 1
-/* I/O to memory, 16 bits, mode */
-#define DMA_MODE_READ_WORD 2
-/* memory to I/O, 16 bits, mode */
-#define DMA_MODE_WRITE_WORD 3
-/* I/O to memory, 32 bits, mode */
-#define DMA_MODE_READ_LONG 4
-/* memory to I/O, 32 bits, mode */
-#define DMA_MODE_WRITE_LONG 5
-/* I/O to memory, 8 bits, single-address-mode */
-#define DMA_MODE_READ_SINGLE 8
-/* memory to I/O, 8 bits, single-address-mode */
-#define DMA_MODE_WRITE_SINGLE 9
-/* I/O to memory, 16 bits, single-address-mode */
-#define DMA_MODE_READ_WORD_SINGLE 10
-/* memory to I/O, 16 bits, single-address-mode */
-#define DMA_MODE_WRITE_WORD_SINGLE 11
-/* I/O to memory, 32 bits, single-address-mode */
-#define DMA_MODE_READ_LONG_SINGLE 12
-/* memory to I/O, 32 bits, single-address-mode */
-#define DMA_MODE_WRITE_LONG_SINGLE 13
-
-#else /* CONFIG_M5272 is defined */
-
-/* Source static-address mode */
-#define DMA_MODE_SRC_SA_BIT 0x01
-/* Two bits to select between all four modes */
-#define DMA_MODE_SSIZE_MASK 0x06
-/* Offset to shift bits in */
-#define DMA_MODE_SSIZE_OFF 0x01
-/* Destination static-address mode */
-#define DMA_MODE_DES_SA_BIT 0x10
-/* Two bits to select between all four modes */
-#define DMA_MODE_DSIZE_MASK 0x60
-/* Offset to shift bits in */
-#define DMA_MODE_DSIZE_OFF 0x05
-/* Size modifiers */
-#define DMA_MODE_SIZE_LONG 0x00
-#define DMA_MODE_SIZE_BYTE 0x01
-#define DMA_MODE_SIZE_WORD 0x02
-#define DMA_MODE_SIZE_LINE 0x03
-
-/*
- * Aliases to help speed quick ports; these may be suboptimal, however. They
- * do not include the SINGLE mode modifiers since the MCF5272 does not have a
- * mode where the device is in control of its addressing.
- */
-
-/* I/O to memory, 8 bits, mode */
-#define DMA_MODE_READ ((DMA_MODE_SIZE_BYTE << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_BYTE << DMA_MODE_SSIZE_OFF) | DMA_SRC_SA_BIT)
-/* memory to I/O, 8 bits, mode */
-#define DMA_MODE_WRITE ((DMA_MODE_SIZE_BYTE << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_BYTE << DMA_MODE_SSIZE_OFF) | DMA_DES_SA_BIT)
-/* I/O to memory, 16 bits, mode */
-#define DMA_MODE_READ_WORD ((DMA_MODE_SIZE_WORD << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_WORD << DMA_MODE_SSIZE_OFF) | DMA_SRC_SA_BIT)
-/* memory to I/O, 16 bits, mode */
-#define DMA_MODE_WRITE_WORD ((DMA_MODE_SIZE_WORD << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_WORD << DMA_MODE_SSIZE_OFF) | DMA_DES_SA_BIT)
-/* I/O to memory, 32 bits, mode */
-#define DMA_MODE_READ_LONG ((DMA_MODE_SIZE_LONG << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_LONG << DMA_MODE_SSIZE_OFF) | DMA_SRC_SA_BIT)
-/* memory to I/O, 32 bits, mode */
-#define DMA_MODE_WRITE_LONG ((DMA_MODE_SIZE_LONG << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_LONG << DMA_MODE_SSIZE_OFF) | DMA_DES_SA_BIT)
-
-#endif /* !defined(CONFIG_M5272) */
-
-#if !defined(CONFIG_M5272)
-/* enable/disable a specific DMA channel */
-static __inline__ void enable_dma(unsigned int dmanr)
-{
- volatile unsigned short *dmawp;
-
-#ifdef DMA_DEBUG
- printk("enable_dma(dmanr=%d)\n", dmanr);
-#endif
-
- dmawp = (unsigned short *) dma_base_addr[dmanr];
- dmawp[MCFDMA_DCR] |= MCFDMA_DCR_EEXT;
-}
-
-static __inline__ void disable_dma(unsigned int dmanr)
-{
- volatile unsigned short *dmawp;
- volatile unsigned char *dmapb;
-
-#ifdef DMA_DEBUG
- printk("disable_dma(dmanr=%d)\n", dmanr);
-#endif
-
- dmawp = (unsigned short *) dma_base_addr[dmanr];
- dmapb = (unsigned char *) dma_base_addr[dmanr];
-
- /* Turn off external requests, and stop any DMA in progress */
- dmawp[MCFDMA_DCR] &= ~MCFDMA_DCR_EEXT;
- dmapb[MCFDMA_DSR] = MCFDMA_DSR_DONE;
-}
-
-/*
- * Clear the 'DMA Pointer Flip Flop'.
- * Write 0 for LSB/MSB, 1 for MSB/LSB access.
- * Use this once to initialize the FF to a known state.
- * After that, keep track of it. :-)
- * --- In order to do that, the DMA routines below should ---
- * --- only be used while interrupts are disabled! ---
- *
- * This is a NOP for ColdFire. Provide a stub for compatibility.
- */
-static __inline__ void clear_dma_ff(unsigned int dmanr)
-{
-}
-
-/* set mode (above) for a specific DMA channel */
-static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
-{
-
- volatile unsigned char *dmabp;
- volatile unsigned short *dmawp;
-
-#ifdef DMA_DEBUG
- printk("set_dma_mode(dmanr=%d,mode=%d)\n", dmanr, mode);
-#endif
-
- dmabp = (unsigned char *) dma_base_addr[dmanr];
- dmawp = (unsigned short *) dma_base_addr[dmanr];
-
- /* Clear config errors */
- dmabp[MCFDMA_DSR] = MCFDMA_DSR_DONE;
-
- /* Set command register */
- dmawp[MCFDMA_DCR] =
- MCFDMA_DCR_INT | /* Enable completion irq */
- MCFDMA_DCR_CS | /* Force one xfer per request */
- MCFDMA_DCR_AA | /* Enable auto alignment */
- /* single-address-mode */
- ((mode & DMA_MODE_SINGLE_BIT) ? MCFDMA_DCR_SAA : 0) |
- /* sets s_rw (-> r/w) high if Memory to I/0 */
- ((mode & DMA_MODE_WRITE_BIT) ? MCFDMA_DCR_S_RW : 0) |
- /* Memory to I/O or I/O to Memory */
- ((mode & DMA_MODE_WRITE_BIT) ? MCFDMA_DCR_SINC : MCFDMA_DCR_DINC) |
- /* 32 bit, 16 bit or 8 bit transfers */
- ((mode & DMA_MODE_WORD_BIT) ? MCFDMA_DCR_SSIZE_WORD :
- ((mode & DMA_MODE_LONG_BIT) ? MCFDMA_DCR_SSIZE_LONG :
- MCFDMA_DCR_SSIZE_BYTE)) |
- ((mode & DMA_MODE_WORD_BIT) ? MCFDMA_DCR_DSIZE_WORD :
- ((mode & DMA_MODE_LONG_BIT) ? MCFDMA_DCR_DSIZE_LONG :
- MCFDMA_DCR_DSIZE_BYTE));
-
-#ifdef DEBUG_DMA
- printk("%s(%d): dmanr=%d DSR[%x]=%x DCR[%x]=%x\n", __FILE__, __LINE__,
- dmanr, (int) &dmabp[MCFDMA_DSR], dmabp[MCFDMA_DSR],
- (int) &dmawp[MCFDMA_DCR], dmawp[MCFDMA_DCR]);
-#endif
-}
-
-/* Set transfer address for specific DMA channel */
-static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a)
-{
- volatile unsigned short *dmawp;
- volatile unsigned int *dmalp;
-
-#ifdef DMA_DEBUG
- printk("set_dma_addr(dmanr=%d,a=%x)\n", dmanr, a);
-#endif
-
- dmawp = (unsigned short *) dma_base_addr[dmanr];
- dmalp = (unsigned int *) dma_base_addr[dmanr];
-
- /* Determine which address registers are used for memory/device accesses */
- if (dmawp[MCFDMA_DCR] & MCFDMA_DCR_SINC) {
- /* Source incrementing, must be memory */
- dmalp[MCFDMA_SAR] = a;
- /* Set dest address, must be device */
- dmalp[MCFDMA_DAR] = dma_device_address[dmanr];
- } else {
- /* Destination incrementing, must be memory */
- dmalp[MCFDMA_DAR] = a;
- /* Set source address, must be device */
- dmalp[MCFDMA_SAR] = dma_device_address[dmanr];
- }
-
-#ifdef DEBUG_DMA
- printk("%s(%d): dmanr=%d DCR[%x]=%x SAR[%x]=%08x DAR[%x]=%08x\n",
- __FILE__, __LINE__, dmanr, (int) &dmawp[MCFDMA_DCR], dmawp[MCFDMA_DCR],
- (int) &dmalp[MCFDMA_SAR], dmalp[MCFDMA_SAR],
- (int) &dmalp[MCFDMA_DAR], dmalp[MCFDMA_DAR]);
-#endif
-}
-
-/*
- * Specific for Coldfire - sets device address.
- * Should be called after the mode set call, and before set DMA address.
- */
-static __inline__ void set_dma_device_addr(unsigned int dmanr, unsigned int a)
-{
-#ifdef DMA_DEBUG
- printk("set_dma_device_addr(dmanr=%d,a=%x)\n", dmanr, a);
-#endif
-
- dma_device_address[dmanr] = a;
-}
-
-/*
- * NOTE 2: "count" represents _bytes_.
- */
-static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
-{
- volatile unsigned short *dmawp;
-
-#ifdef DMA_DEBUG
- printk("set_dma_count(dmanr=%d,count=%d)\n", dmanr, count);
-#endif
-
- dmawp = (unsigned short *) dma_base_addr[dmanr];
- dmawp[MCFDMA_BCR] = (unsigned short)count;
-}
-
-/*
- * Get DMA residue count. After a DMA transfer, this
- * should return zero. Reading this while a DMA transfer is
- * still in progress will return unpredictable results.
- * Otherwise, it returns the number of _bytes_ left to transfer.
- */
-static __inline__ int get_dma_residue(unsigned int dmanr)
-{
- volatile unsigned short *dmawp;
- unsigned short count;
-
-#ifdef DMA_DEBUG
- printk("get_dma_residue(dmanr=%d)\n", dmanr);
-#endif
-
- dmawp = (unsigned short *) dma_base_addr[dmanr];
- count = dmawp[MCFDMA_BCR];
- return((int) count);
-}
-#else /* CONFIG_M5272 is defined */
-
-/*
- * The MCF5272 DMA controller is very different than the controller defined above
- * in terms of register mapping. For instance, with the exception of the 16-bit
- * interrupt register (IRQ#85, for reference), all of the registers are 32-bit.
- *
- * The big difference, however, is the lack of device-requested DMA. All modes
- * are dual address transfer, and there is no 'device' setup or direction bit.
- * You can DMA between a device and memory, between memory and memory, or even between
- * two devices directly, with any combination of incrementing and non-incrementing
- * addresses you choose. This puts a crimp in distinguishing between the 'device
- * address' set up by set_dma_device_addr.
- *
- * Therefore, there are two options. One is to use set_dma_addr and set_dma_device_addr,
- * which will act exactly as above in -- it will look to see if the source is set to
- * autoincrement, and if so it will make the source use the set_dma_addr value and the
- * destination the set_dma_device_addr value. Otherwise the source will be set to the
- * set_dma_device_addr value and the destination will get the set_dma_addr value.
- *
- * The other is to use the provided set_dma_src_addr and set_dma_dest_addr functions
- * and make it explicit. Depending on what you're doing, one of these two should work
- * for you, but don't mix them in the same transfer setup.
- */
-
-/* enable/disable a specific DMA channel */
-static __inline__ void enable_dma(unsigned int dmanr)
-{
- volatile unsigned int *dmalp;
-
-#ifdef DMA_DEBUG
- printk("enable_dma(dmanr=%d)\n", dmanr);
-#endif
-
- dmalp = (unsigned int *) dma_base_addr[dmanr];
- dmalp[MCFDMA_DMR] |= MCFDMA_DMR_EN;
-}
-
-static __inline__ void disable_dma(unsigned int dmanr)
-{
- volatile unsigned int *dmalp;
-
-#ifdef DMA_DEBUG
- printk("disable_dma(dmanr=%d)\n", dmanr);
-#endif
-
- dmalp = (unsigned int *) dma_base_addr[dmanr];
-
- /* Turn off external requests, and stop any DMA in progress */
- dmalp[MCFDMA_DMR] &= ~MCFDMA_DMR_EN;
- dmalp[MCFDMA_DMR] |= MCFDMA_DMR_RESET;
-}
-
-/*
- * Clear the 'DMA Pointer Flip Flop'.
- * Write 0 for LSB/MSB, 1 for MSB/LSB access.
- * Use this once to initialize the FF to a known state.
- * After that, keep track of it. :-)
- * --- In order to do that, the DMA routines below should ---
- * --- only be used while interrupts are disabled! ---
- *
- * This is a NOP for ColdFire. Provide a stub for compatibility.
- */
-static __inline__ void clear_dma_ff(unsigned int dmanr)
-{
-}
-
-/* set mode (above) for a specific DMA channel */
-static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
-{
-
- volatile unsigned int *dmalp;
- volatile unsigned short *dmawp;
-
-#ifdef DMA_DEBUG
- printk("set_dma_mode(dmanr=%d,mode=%d)\n", dmanr, mode);
-#endif
- dmalp = (unsigned int *) dma_base_addr[dmanr];
- dmawp = (unsigned short *) dma_base_addr[dmanr];
-
- /* Clear config errors */
- dmalp[MCFDMA_DMR] |= MCFDMA_DMR_RESET;
-
- /* Set command register */
- dmalp[MCFDMA_DMR] =
- MCFDMA_DMR_RQM_DUAL | /* Mandatory Request Mode setting */
- MCFDMA_DMR_DSTT_SD | /* Set up addressing types; set to supervisor-data. */
- MCFDMA_DMR_SRCT_SD | /* Set up addressing types; set to supervisor-data. */
- /* source static-address-mode */
- ((mode & DMA_MODE_SRC_SA_BIT) ? MCFDMA_DMR_SRCM_SA : MCFDMA_DMR_SRCM_IA) |
- /* dest static-address-mode */
- ((mode & DMA_MODE_DES_SA_BIT) ? MCFDMA_DMR_DSTM_SA : MCFDMA_DMR_DSTM_IA) |
- /* burst, 32 bit, 16 bit or 8 bit transfers are separately configurable on the MCF5272 */
- (((mode & DMA_MODE_SSIZE_MASK) >> DMA_MODE_SSIZE_OFF) << MCFDMA_DMR_DSTS_OFF) |
- (((mode & DMA_MODE_SSIZE_MASK) >> DMA_MODE_SSIZE_OFF) << MCFDMA_DMR_SRCS_OFF);
-
- dmawp[MCFDMA_DIR] |= MCFDMA_DIR_ASCEN; /* Enable completion interrupts */
-
-#ifdef DEBUG_DMA
- printk("%s(%d): dmanr=%d DMR[%x]=%x DIR[%x]=%x\n", __FILE__, __LINE__,
- dmanr, (int) &dmalp[MCFDMA_DMR], dmalp[MCFDMA_DMR],
- (int) &dmawp[MCFDMA_DIR], dmawp[MCFDMA_DIR]);
-#endif
-}
-
-/* Set transfer address for specific DMA channel */
-static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a)
-{
- volatile unsigned int *dmalp;
-
-#ifdef DMA_DEBUG
- printk("set_dma_addr(dmanr=%d,a=%x)\n", dmanr, a);
-#endif
-
- dmalp = (unsigned int *) dma_base_addr[dmanr];
-
- /* Determine which address registers are used for memory/device accesses */
- if (dmalp[MCFDMA_DMR] & MCFDMA_DMR_SRCM) {
- /* Source incrementing, must be memory */
- dmalp[MCFDMA_DSAR] = a;
- /* Set dest address, must be device */
- dmalp[MCFDMA_DDAR] = dma_device_address[dmanr];
- } else {
- /* Destination incrementing, must be memory */
- dmalp[MCFDMA_DDAR] = a;
- /* Set source address, must be device */
- dmalp[MCFDMA_DSAR] = dma_device_address[dmanr];
- }
-
-#ifdef DEBUG_DMA
- printk("%s(%d): dmanr=%d DMR[%x]=%x SAR[%x]=%08x DAR[%x]=%08x\n",
- __FILE__, __LINE__, dmanr, (int) &dmalp[MCFDMA_DMR], dmalp[MCFDMA_DMR],
- (int) &dmalp[MCFDMA_DSAR], dmalp[MCFDMA_DSAR],
- (int) &dmalp[MCFDMA_DDAR], dmalp[MCFDMA_DDAR]);
-#endif
-}
-
-/*
- * Specific for Coldfire - sets device address.
- * Should be called after the mode set call, and before set DMA address.
- */
-static __inline__ void set_dma_device_addr(unsigned int dmanr, unsigned int a)
-{
-#ifdef DMA_DEBUG
- printk("set_dma_device_addr(dmanr=%d,a=%x)\n", dmanr, a);
-#endif
-
- dma_device_address[dmanr] = a;
-}
-
-/*
- * NOTE 2: "count" represents _bytes_.
- *
- * NOTE 3: While a 32-bit register, "count" is only a maximum 24-bit value.
- */
-static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
-{
- volatile unsigned int *dmalp;
-
-#ifdef DMA_DEBUG
- printk("set_dma_count(dmanr=%d,count=%d)\n", dmanr, count);
-#endif
-
- dmalp = (unsigned int *) dma_base_addr[dmanr];
- dmalp[MCFDMA_DBCR] = count;
-}
-
-/*
- * Get DMA residue count. After a DMA transfer, this
- * should return zero. Reading this while a DMA transfer is
- * still in progress will return unpredictable results.
- * Otherwise, it returns the number of _bytes_ left to transfer.
- */
-static __inline__ int get_dma_residue(unsigned int dmanr)
-{
- volatile unsigned int *dmalp;
- unsigned int count;
-
-#ifdef DMA_DEBUG
- printk("get_dma_residue(dmanr=%d)\n", dmanr);
-#endif
-
- dmalp = (unsigned int *) dma_base_addr[dmanr];
- count = dmalp[MCFDMA_DBCR];
- return(count);
-}
-
-#endif /* !defined(CONFIG_M5272) */
-#endif /* CONFIG_COLDFIRE */
-
/* it's useless on the m68k, but unfortunately needed by the new
bootmem allocator (but this should do it for this) */
#define MAX_DMA_ADDRESS PAGE_OFFSET
-#define MAX_DMA_CHANNELS 8
-
-extern int request_dma(unsigned int dmanr, const char * device_id); /* reserve a DMA channel */
-extern void free_dma(unsigned int dmanr); /* release it again */
-
#ifdef CONFIG_PCI
extern int isa_dma_bridge_buggy;
#else
diff --git a/arch/m68k/include/asm/elf.h b/arch/m68k/include/asm/elf.h
index 3d387ceaea3f..2def06a99b08 100644
--- a/arch/m68k/include/asm/elf.h
+++ b/arch/m68k/include/asm/elf.h
@@ -60,6 +60,13 @@ typedef struct user_m68kfp_struct elf_fpregset_t;
is actually used on ASV. */
#define ELF_PLAT_INIT(_r, load_addr) _r->a1 = 0
+#define ELF_FDPIC_PLAT_INIT(_r, _exec_map_addr, _interp_map_addr, dynamic_addr) \
+ do { \
+ (_r)->d3 = _exec_map_addr; \
+ (_r)->d4 = _interp_map_addr; \
+ (_r)->d5 = dynamic_addr; \
+ } while(0)
+
#if defined(CONFIG_SUN3) || defined(CONFIG_COLDFIRE)
#define ELF_EXEC_PAGESIZE 8192
#else
@@ -114,4 +121,6 @@ typedef struct user_m68kfp_struct elf_fpregset_t;
#define ELF_PLATFORM (NULL)
+#define ELF_FDPIC_CORE_EFLAGS 0
+
#endif
diff --git a/arch/m68k/include/asm/machdep.h b/arch/m68k/include/asm/machdep.h
index 8fd80ef1b77e..48d27f1fecc7 100644
--- a/arch/m68k/include/asm/machdep.h
+++ b/arch/m68k/include/asm/machdep.h
@@ -19,12 +19,10 @@ extern void (*mach_get_model) (char *model);
extern void (*mach_get_hardware_list) (struct seq_file *m);
/* machine dependent timer functions */
extern int (*mach_hwclk)(int, struct rtc_time*);
-extern unsigned int (*mach_get_ss)(void);
extern int (*mach_get_rtc_pll)(struct rtc_pll_info *);
extern int (*mach_set_rtc_pll)(struct rtc_pll_info *);
extern void (*mach_reset)( void );
extern void (*mach_halt)( void );
-extern void (*mach_power_off)( void );
extern unsigned long (*mach_hd_init) (unsigned long, unsigned long);
extern void (*mach_hd_setup)(char *, int *);
extern void (*mach_heartbeat) (int);
diff --git a/arch/m68k/include/asm/mmu.h b/arch/m68k/include/asm/mmu.h
index 5c15aacb1370..e00672425b00 100644
--- a/arch/m68k/include/asm/mmu.h
+++ b/arch/m68k/include/asm/mmu.h
@@ -6,9 +6,7 @@
/* Default "unsigned long" context */
typedef unsigned long mm_context_t;
#else
-typedef struct {
- unsigned long end_brk;
-} mm_context_t;
+#include <asm-generic/mmu.h>
#endif
#endif
diff --git a/arch/m68k/include/asm/pgtable_no.h b/arch/m68k/include/asm/pgtable_no.h
index 87151d67d91e..bce5ca56c388 100644
--- a/arch/m68k/include/asm/pgtable_no.h
+++ b/arch/m68k/include/asm/pgtable_no.h
@@ -42,7 +42,8 @@ extern void paging_init(void);
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
*/
-#define ZERO_PAGE(vaddr) (virt_to_page(0))
+extern void *empty_zero_page;
+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
/*
* All 32bit addresses are effectively valid for vmalloc...
diff --git a/arch/m68k/include/uapi/asm/ptrace.h b/arch/m68k/include/uapi/asm/ptrace.h
index 19a1b9d0d858..5b50ea592e00 100644
--- a/arch/m68k/include/uapi/asm/ptrace.h
+++ b/arch/m68k/include/uapi/asm/ptrace.h
@@ -74,7 +74,12 @@ struct switch_stack {
#define PTRACE_GET_THREAD_AREA 25
+#define PTRACE_GETFDPIC 31
+
#define PTRACE_SINGLEBLOCK 33 /* resume execution until next branch */
+#define PTRACE_GETFDPIC_EXEC 0
+#define PTRACE_GETFDPIC_INTERP 1
+
#endif /* __ASSEMBLY__ */
#endif /* _UAPI_M68K_PTRACE_H */
diff --git a/arch/m68k/kernel/process.c b/arch/m68k/kernel/process.c
index a6030dbaa089..2cb4a61bcfac 100644
--- a/arch/m68k/kernel/process.c
+++ b/arch/m68k/kernel/process.c
@@ -67,12 +67,11 @@ void machine_halt(void)
void machine_power_off(void)
{
- if (mach_power_off)
- mach_power_off();
+ do_kernel_power_off();
for (;;);
}
-void (*pm_power_off)(void) = machine_power_off;
+void (*pm_power_off)(void);
EXPORT_SYMBOL(pm_power_off);
void show_regs(struct pt_regs * regs)
@@ -138,9 +137,11 @@ asmlinkage int m68k_clone3(struct pt_regs *regs)
return sys_clone3((struct clone_args __user *)regs->d1, regs->d2);
}
-int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg,
- struct task_struct *p, unsigned long tls)
+int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
{
+ unsigned long clone_flags = args->flags;
+ unsigned long usp = args->stack;
+ unsigned long tls = args->tls;
struct fork_frame {
struct switch_stack sw;
struct pt_regs regs;
@@ -157,12 +158,12 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg,
*/
p->thread.fc = USER_DATA;
- if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
+ if (unlikely(args->fn)) {
/* kernel thread */
memset(frame, 0, sizeof(struct fork_frame));
frame->regs.sr = PS_S;
- frame->sw.a3 = usp; /* function */
- frame->sw.d7 = arg;
+ frame->sw.a3 = (unsigned long)args->fn;
+ frame->sw.d7 = (unsigned long)args->fn_arg;
frame->sw.retpc = (unsigned long)ret_from_kernel_thread;
p->thread.usp = 0;
return 0;
diff --git a/arch/m68k/kernel/ptrace.c b/arch/m68k/kernel/ptrace.c
index daebccdd2c09..0a4184a37461 100644
--- a/arch/m68k/kernel/ptrace.c
+++ b/arch/m68k/kernel/ptrace.c
@@ -19,6 +19,8 @@
#include <linux/ptrace.h>
#include <linux/user.h>
#include <linux/signal.h>
+#include <linux/regset.h>
+#include <linux/elf.h>
#include <linux/uaccess.h>
#include <asm/page.h>
@@ -284,3 +286,59 @@ asmlinkage void syscall_trace_leave(void)
if (test_thread_flag(TIF_SYSCALL_TRACE))
ptrace_report_syscall_exit(task_pt_regs(current), 0);
}
+
+#if defined(CONFIG_BINFMT_ELF_FDPIC) && defined(CONFIG_ELF_CORE)
+/*
+ * Currently the only thing that needs to use regsets for m68k is the
+ * coredump support of the elf_fdpic loader. Implement the minimum
+ * definitions required for that.
+ */
+static int m68k_regset_get(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf to)
+{
+ struct pt_regs *ptregs = task_pt_regs(target);
+ u32 uregs[ELF_NGREG];
+
+ ELF_CORE_COPY_REGS(uregs, ptregs);
+ return membuf_write(&to, uregs, sizeof(uregs));
+}
+
+enum m68k_regset {
+ REGSET_GPR,
+#ifdef CONFIG_FPU
+ REGSET_FPU,
+#endif
+};
+
+static const struct user_regset m68k_user_regsets[] = {
+ [REGSET_GPR] = {
+ .core_note_type = NT_PRSTATUS,
+ .n = ELF_NGREG,
+ .size = sizeof(u32),
+ .align = sizeof(u16),
+ .regset_get = m68k_regset_get,
+ },
+#ifdef CONFIG_FPU
+ [REGSET_FPU] = {
+ .core_note_type = NT_PRFPREG,
+ .n = sizeof(struct user_m68kfp_struct) / sizeof(u32),
+ .size = sizeof(u32),
+ .align = sizeof(u32),
+ }
+#endif /* CONFIG_FPU */
+};
+
+static const struct user_regset_view user_m68k_view = {
+ .name = "m68k",
+ .e_machine = EM_68K,
+ .ei_osabi = ELF_OSABI,
+ .regsets = m68k_user_regsets,
+ .n = ARRAY_SIZE(m68k_user_regsets)
+};
+
+const struct user_regset_view *task_user_regset_view(struct task_struct *task)
+{
+ return &user_m68k_view;
+}
+#endif /* CONFIG_BINFMT_ELF_FDPIC && CONFIG_ELF_CORE */
diff --git a/arch/m68k/kernel/setup_mm.c b/arch/m68k/kernel/setup_mm.c
index 78ab562beb31..e62fa8f2149b 100644
--- a/arch/m68k/kernel/setup_mm.c
+++ b/arch/m68k/kernel/setup_mm.c
@@ -87,18 +87,8 @@ void (*mach_sched_init) (void) __initdata = NULL;
void (*mach_init_IRQ) (void) __initdata = NULL;
void (*mach_get_model) (char *model);
void (*mach_get_hardware_list) (struct seq_file *m);
-/* machine dependent timer functions */
-int (*mach_hwclk) (int, struct rtc_time*);
-EXPORT_SYMBOL(mach_hwclk);
-unsigned int (*mach_get_ss)(void);
-int (*mach_get_rtc_pll)(struct rtc_pll_info *);
-int (*mach_set_rtc_pll)(struct rtc_pll_info *);
-EXPORT_SYMBOL(mach_get_ss);
-EXPORT_SYMBOL(mach_get_rtc_pll);
-EXPORT_SYMBOL(mach_set_rtc_pll);
void (*mach_reset)( void );
void (*mach_halt)( void );
-void (*mach_power_off)( void );
#ifdef CONFIG_HEARTBEAT
void (*mach_heartbeat) (int);
EXPORT_SYMBOL(mach_heartbeat);
diff --git a/arch/m68k/kernel/setup_no.c b/arch/m68k/kernel/setup_no.c
index 5e4104f07a44..cb6def585851 100644
--- a/arch/m68k/kernel/setup_no.c
+++ b/arch/m68k/kernel/setup_no.c
@@ -50,12 +50,10 @@ char __initdata command_line[COMMAND_LINE_SIZE];
/* machine dependent timer functions */
void (*mach_sched_init)(void) __initdata = NULL;
-int (*mach_hwclk) (int, struct rtc_time*);
/* machine dependent reboot functions */
void (*mach_reset)(void);
void (*mach_halt)(void);
-void (*mach_power_off)(void);
#ifdef CONFIG_M68000
#if defined(CONFIG_M68328)
diff --git a/arch/m68k/kernel/time.c b/arch/m68k/kernel/time.c
index 340ffeea0a9d..a97600b2af50 100644
--- a/arch/m68k/kernel/time.c
+++ b/arch/m68k/kernel/time.c
@@ -63,6 +63,15 @@ void timer_heartbeat(void)
#endif /* CONFIG_HEARTBEAT */
#ifdef CONFIG_M68KCLASSIC
+/* machine dependent timer functions */
+int (*mach_hwclk) (int, struct rtc_time*);
+EXPORT_SYMBOL(mach_hwclk);
+
+int (*mach_get_rtc_pll)(struct rtc_pll_info *);
+int (*mach_set_rtc_pll)(struct rtc_pll_info *);
+EXPORT_SYMBOL(mach_get_rtc_pll);
+EXPORT_SYMBOL(mach_set_rtc_pll);
+
#if !IS_BUILTIN(CONFIG_RTC_DRV_GENERIC)
void read_persistent_clock64(struct timespec64 *ts)
{
diff --git a/arch/m68k/lib/checksum.c b/arch/m68k/lib/checksum.c
index 7e6afeae6217..5acb821849d3 100644
--- a/arch/m68k/lib/checksum.c
+++ b/arch/m68k/lib/checksum.c
@@ -265,8 +265,6 @@ csum_and_copy_from_user(const void __user *src, void *dst, int len)
return sum;
}
-EXPORT_SYMBOL(csum_and_copy_from_user);
-
/*
* copy from kernel space while checksumming, otherwise like csum_partial
diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c
index 65d124ec80bb..382f656c29ea 100644
--- a/arch/m68k/mac/config.c
+++ b/arch/m68k/mac/config.c
@@ -12,6 +12,7 @@
#include <linux/errno.h>
#include <linux/module.h>
+#include <linux/reboot.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/tty.h>
@@ -140,7 +141,6 @@ void __init config_mac(void)
mach_hwclk = mac_hwclk;
mach_reset = mac_reset;
mach_halt = mac_poweroff;
- mach_power_off = mac_poweroff;
#if IS_ENABLED(CONFIG_INPUT_M68K_BEEP)
mach_beep = mac_mksound;
#endif
@@ -160,6 +160,8 @@ void __init config_mac(void)
if (macintosh_config->ident == MAC_MODEL_IICI)
mach_l2_flush = via_l2_flush;
+
+ register_platform_power_off(mac_poweroff);
}
diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c
index ecbe948f4c1a..df7f797c908a 100644
--- a/arch/m68k/mm/motorola.c
+++ b/arch/m68k/mm/motorola.c
@@ -27,7 +27,6 @@
#include <asm/pgalloc.h>
#include <asm/machdep.h>
#include <asm/io.h>
-#include <asm/dma.h>
#ifdef CONFIG_ATARI
#include <asm/atari_stram.h>
#endif
diff --git a/arch/m68k/q40/config.c b/arch/m68k/q40/config.c
index 9237243077ce..c78ee709b458 100644
--- a/arch/m68k/q40/config.c
+++ b/arch/m68k/q40/config.c
@@ -41,7 +41,6 @@ static void q40_get_model(char *model);
extern void q40_sched_init(void);
static int q40_hwclk(int, struct rtc_time *);
-static unsigned int q40_get_ss(void);
static int q40_get_rtc_pll(struct rtc_pll_info *pll);
static int q40_set_rtc_pll(struct rtc_pll_info *pll);
@@ -169,7 +168,6 @@ void __init config_q40(void)
mach_init_IRQ = q40_init_IRQ;
mach_hwclk = q40_hwclk;
- mach_get_ss = q40_get_ss;
mach_get_rtc_pll = q40_get_rtc_pll;
mach_set_rtc_pll = q40_set_rtc_pll;
@@ -246,11 +244,6 @@ static int q40_hwclk(int op, struct rtc_time *t)
return 0;
}
-static unsigned int q40_get_ss(void)
-{
- return bcd2bin(Q40_RTC_SECS);
-}
-
/* get and set PLL calibration of RTC clock */
#define Q40_RTC_PLL_MASK ((1<<5)-1)
#define Q40_RTC_PLL_SIGN (1<<5)
diff --git a/arch/m68k/virt/config.c b/arch/m68k/virt/config.c
index 68d29c8b87e1..632ba200ad42 100644
--- a/arch/m68k/virt/config.c
+++ b/arch/m68k/virt/config.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
+#include <linux/reboot.h>
#include <linux/serial_core.h>
#include <clocksource/timer-goldfish.h>
@@ -126,5 +127,6 @@ void __init config_virt(void)
mach_get_model = virt_get_model;
mach_reset = virt_reset;
mach_halt = virt_halt;
- mach_power_off = virt_halt;
+
+ register_platform_power_off(virt_halt);
}
diff --git a/arch/microblaze/include/asm/string.h b/arch/microblaze/include/asm/string.h
index 34071a848b6a..8798ad2c132a 100644
--- a/arch/microblaze/include/asm/string.h
+++ b/arch/microblaze/include/asm/string.h
@@ -8,6 +8,7 @@
#ifdef __KERNEL__
+#ifdef CONFIG_OPT_LIB_FUNCTION
#define __HAVE_ARCH_MEMSET
#define __HAVE_ARCH_MEMCPY
#define __HAVE_ARCH_MEMMOVE
@@ -15,6 +16,7 @@
extern void *memset(void *, int, __kernel_size_t);
extern void *memcpy(void *, const void *, __kernel_size_t);
extern void *memmove(void *, const void *, __kernel_size_t);
+#endif
#endif /* __KERNEL__ */
diff --git a/arch/microblaze/kernel/kgdb.c b/arch/microblaze/kernel/kgdb.c
index 130cd0f064ce..df4b9d0112e5 100644
--- a/arch/microblaze/kernel/kgdb.c
+++ b/arch/microblaze/kernel/kgdb.c
@@ -31,7 +31,7 @@
#define GDB_RTLBLO 55
#define GDB_RTLBHI 56
-/* keep pvr separately because it is unchangeble */
+/* keep pvr separately because it is unchangeable */
static struct pvr_s pvr;
void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c
index 1b944d319d73..3c6241bcaea8 100644
--- a/arch/microblaze/kernel/process.c
+++ b/arch/microblaze/kernel/process.c
@@ -52,20 +52,22 @@ void flush_thread(void)
{
}
-int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg,
- struct task_struct *p, unsigned long tls)
+int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
{
+ unsigned long clone_flags = args->flags;
+ unsigned long usp = args->stack;
+ unsigned long tls = args->tls;
struct pt_regs *childregs = task_pt_regs(p);
struct thread_info *ti = task_thread_info(p);
- if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
+ if (unlikely(args->fn)) {
/* if we're creating a new kernel thread then just zeroing all
* the registers. That's OK for a brand new thread.*/
memset(childregs, 0, sizeof(struct pt_regs));
memset(&ti->cpu_context, 0, sizeof(struct cpu_context));
ti->cpu_context.r1 = (unsigned long)childregs;
- ti->cpu_context.r20 = (unsigned long)usp; /* fn */
- ti->cpu_context.r19 = (unsigned long)arg;
+ ti->cpu_context.r20 = (unsigned long)args->fn;
+ ti->cpu_context.r19 = (unsigned long)args->fn_arg;
childregs->pt_mode = 1;
local_save_flags(childregs->msr);
ti->cpu_context.msr = childregs->msr & ~MSR_IE;
diff --git a/arch/microblaze/kernel/timer.c b/arch/microblaze/kernel/timer.c
index f8832cf49384..26c385582c3b 100644
--- a/arch/microblaze/kernel/timer.c
+++ b/arch/microblaze/kernel/timer.c
@@ -251,6 +251,10 @@ static int __init xilinx_timer_init(struct device_node *timer)
u32 timer_num = 1;
int ret;
+ /* If this property is present, the device is a PWM and not a timer */
+ if (of_property_read_bool(timer, "#pwm-cells"))
+ return 0;
+
if (initialized)
return -EINVAL;
diff --git a/arch/microblaze/lib/memcpy.c b/arch/microblaze/lib/memcpy.c
index 63041fdf916d..9966dce55619 100644
--- a/arch/microblaze/lib/memcpy.c
+++ b/arch/microblaze/lib/memcpy.c
@@ -31,20 +31,7 @@
#include <linux/string.h>
-#ifdef __HAVE_ARCH_MEMCPY
-#ifndef CONFIG_OPT_LIB_FUNCTION
-void *memcpy(void *v_dst, const void *v_src, __kernel_size_t c)
-{
- const char *src = v_src;
- char *dst = v_dst;
-
- /* Simple, byte oriented memcpy. */
- while (c--)
- *dst++ = *src++;
-
- return v_dst;
-}
-#else /* CONFIG_OPT_LIB_FUNCTION */
+#ifdef CONFIG_OPT_LIB_FUNCTION
void *memcpy(void *v_dst, const void *v_src, __kernel_size_t c)
{
const char *src = v_src;
@@ -188,6 +175,5 @@ void *memcpy(void *v_dst, const void *v_src, __kernel_size_t c)
return v_dst;
}
-#endif /* CONFIG_OPT_LIB_FUNCTION */
EXPORT_SYMBOL(memcpy);
-#endif /* __HAVE_ARCH_MEMCPY */
+#endif /* CONFIG_OPT_LIB_FUNCTION */
diff --git a/arch/microblaze/lib/memmove.c b/arch/microblaze/lib/memmove.c
index 9862f6b1e59d..c1f08c484e20 100644
--- a/arch/microblaze/lib/memmove.c
+++ b/arch/microblaze/lib/memmove.c
@@ -30,31 +30,7 @@
#include <linux/compiler.h>
#include <linux/string.h>
-#ifdef __HAVE_ARCH_MEMMOVE
-#ifndef CONFIG_OPT_LIB_FUNCTION
-void *memmove(void *v_dst, const void *v_src, __kernel_size_t c)
-{
- const char *src = v_src;
- char *dst = v_dst;
-
- if (!c)
- return v_dst;
-
- /* Use memcpy when source is higher than dest */
- if (v_dst <= v_src)
- return memcpy(v_dst, v_src, c);
-
- /* copy backwards, from end to beginning */
- src += c;
- dst += c;
-
- /* Simple, byte oriented memmove. */
- while (c--)
- *--dst = *--src;
-
- return v_dst;
-}
-#else /* CONFIG_OPT_LIB_FUNCTION */
+#ifdef CONFIG_OPT_LIB_FUNCTION
void *memmove(void *v_dst, const void *v_src, __kernel_size_t c)
{
const char *src = v_src;
@@ -102,7 +78,7 @@ void *memmove(void *v_dst, const void *v_src, __kernel_size_t c)
i_dst = (void *)dst;
/* Choose a copy scheme based on the source */
- /* alignment relative to dstination. */
+ /* alignment relative to destination. */
switch ((unsigned long)src & 3) {
case 0x0: /* Both byte offsets are aligned */
@@ -215,6 +191,5 @@ void *memmove(void *v_dst, const void *v_src, __kernel_size_t c)
}
return v_dst;
}
-#endif /* CONFIG_OPT_LIB_FUNCTION */
EXPORT_SYMBOL(memmove);
-#endif /* __HAVE_ARCH_MEMMOVE */
+#endif /* CONFIG_OPT_LIB_FUNCTION */
diff --git a/arch/microblaze/lib/memset.c b/arch/microblaze/lib/memset.c
index eb6c8988af02..7c2352d56bb0 100644
--- a/arch/microblaze/lib/memset.c
+++ b/arch/microblaze/lib/memset.c
@@ -30,22 +30,7 @@
#include <linux/compiler.h>
#include <linux/string.h>
-#ifdef __HAVE_ARCH_MEMSET
-#ifndef CONFIG_OPT_LIB_FUNCTION
-void *memset(void *v_src, int c, __kernel_size_t n)
-{
- char *src = v_src;
-
- /* Truncate c to 8 bits */
- c = (c & 0xFF);
-
- /* Simple, byte oriented memset or the rest of count. */
- while (n--)
- *src++ = c;
-
- return v_src;
-}
-#else /* CONFIG_OPT_LIB_FUNCTION */
+#ifdef CONFIG_OPT_LIB_FUNCTION
void *memset(void *v_src, int c, __kernel_size_t n)
{
char *src = v_src;
@@ -89,11 +74,21 @@ void *memset(void *v_src, int c, __kernel_size_t n)
}
/* Simple, byte oriented memset or the rest of count. */
- while (n--)
+ switch (n) {
+ case 3:
+ *src++ = c;
+ fallthrough;
+ case 2:
*src++ = c;
+ fallthrough;
+ case 1:
+ *src++ = c;
+ break;
+ default:
+ break;
+ }
return v_src;
}
-#endif /* CONFIG_OPT_LIB_FUNCTION */
EXPORT_SYMBOL(memset);
-#endif /* __HAVE_ARCH_MEMSET */
+#endif /* CONFIG_OPT_LIB_FUNCTION */
diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c
index 952f35b335b2..f4e503461d24 100644
--- a/arch/microblaze/mm/init.c
+++ b/arch/microblaze/mm/init.c
@@ -13,6 +13,7 @@
#include <linux/kernel.h>
#include <linux/mm.h> /* mem_init */
#include <linux/initrd.h>
+#include <linux/of_fdt.h>
#include <linux/pagemap.h>
#include <linux/pfn.h>
#include <linux/slab.h>
@@ -261,8 +262,12 @@ asmlinkage void __init mmu_init(void)
parse_early_param();
+ early_init_fdt_scan_reserved_mem();
+
/* CMA initialization */
dma_contiguous_reserve(memory_start + lowmem_size - 1);
+
+ memblock_dump_all();
}
void * __ref zalloc_maybe_bootmem(size_t size, gfp_t mask)
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index de3b32a507d2..db09d45d59ec 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -1321,11 +1321,11 @@ config CPU_LOONGSON64
select SWIOTLB
select HAVE_KVM
help
- The Loongson GSx64(GS264/GS464/GS464E/GS464V) series of processor
- cores implements the MIPS64R2 instruction set with many extensions,
- including most 64-bit Loongson-2 (2H, 2K) and Loongson-3 (3A1000,
- 3B1000, 3B1500, 3A2000, 3A3000 and 3A4000) processors. However, old
- Loongson-2E/2F is not covered here and will be removed in future.
+ The Loongson GSx64(GS264/GS464/GS464E/GS464V) series of processor
+ cores implements the MIPS64R2 instruction set with many extensions,
+ including most 64-bit Loongson-2 (2H, 2K) and Loongson-3 (3A1000,
+ 3B1000, 3B1500, 3A2000, 3A3000 and 3A4000) processors. However, old
+ Loongson-2E/2F is not covered here and will be removed in future.
config LOONGSON3_ENHANCEMENT
bool "New Loongson-3 CPU Enhancements"
@@ -3198,16 +3198,12 @@ config MIPS32_COMPAT
config COMPAT
bool
-config SYSVIPC_COMPAT
- bool
-
config MIPS32_O32
bool "Kernel support for o32 binaries"
depends on 64BIT
select ARCH_WANT_OLD_COMPAT_IPC
select COMPAT
select MIPS32_COMPAT
- select SYSVIPC_COMPAT if SYSVIPC
help
Select this option if you want to run o32 binaries. These are pure
32-bit binaries as used by the 32-bit Linux/MIPS port. Most of
@@ -3221,7 +3217,6 @@ config MIPS32_N32
select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
select COMPAT
select MIPS32_COMPAT
- select SYSVIPC_COMPAT if SYSVIPC
help
Select this option if you want to run n32 binaries. These are
64-bit binaries using 32-bit quantities for addressing and certain
@@ -3255,7 +3250,7 @@ menu "CPU Power Management"
if CPU_SUPPORTS_CPUFREQ && MIPS_EXTERNAL_TIMER
source "drivers/cpufreq/Kconfig"
-endif
+endif # CPU_SUPPORTS_CPUFREQ && MIPS_EXTERNAL_TIMER
source "drivers/cpuidle/Kconfig"
diff --git a/arch/mips/alchemy/common/dbdma.c b/arch/mips/alchemy/common/dbdma.c
index 4ca2c28878e0..5ab043000409 100644
--- a/arch/mips/alchemy/common/dbdma.c
+++ b/arch/mips/alchemy/common/dbdma.c
@@ -574,7 +574,7 @@ u32 au1xxx_dbdma_ring_alloc(u32 chanid, int entries)
dp++;
}
- /* Make last descrptor point to the first. */
+ /* Make last descriptor point to the first. */
dp--;
dp->dscr_nxtptr = DSCR_NXTPTR(virt_to_phys(ctp->chan_desc_base));
ctp->get_ptr = ctp->put_ptr = ctp->cur_ptr = ctp->chan_desc_base;
diff --git a/arch/mips/alchemy/devboards/db1300.c b/arch/mips/alchemy/devboards/db1300.c
index cd72eaa1168f..e70e529ddd91 100644
--- a/arch/mips/alchemy/devboards/db1300.c
+++ b/arch/mips/alchemy/devboards/db1300.c
@@ -732,16 +732,7 @@ static struct platform_device db1300_lcd_dev = {
/**********************************************************************/
#if IS_ENABLED(CONFIG_TOUCHSCREEN_WM97XX)
-static void db1300_wm97xx_irqen(struct wm97xx *wm, int enable)
-{
- if (enable)
- enable_irq(DB1300_AC97_PEN_INT);
- else
- disable_irq_nosync(DB1300_AC97_PEN_INT);
-}
-
static struct wm97xx_mach_ops db1300_wm97xx_ops = {
- .irq_enable = db1300_wm97xx_irqen,
.irq_gpio = WM97XX_GPIO_3,
};
diff --git a/arch/mips/bmips/dma.c b/arch/mips/bmips/dma.c
index c535f9cb75ec..33788668cbdb 100644
--- a/arch/mips/bmips/dma.c
+++ b/arch/mips/bmips/dma.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0+
#include <linux/types.h>
+#include <linux/dma-map-ops.h>
#include <asm/bmips.h>
#include <asm/io.h>
diff --git a/arch/mips/boot/dts/brcm/bcm97358svmb.dts b/arch/mips/boot/dts/brcm/bcm97358svmb.dts
index 522f2c40d6e6..c17fc14d4899 100644
--- a/arch/mips/boot/dts/brcm/bcm97358svmb.dts
+++ b/arch/mips/boot/dts/brcm/bcm97358svmb.dts
@@ -78,7 +78,7 @@
&qspi {
status = "okay";
- m25p80@0 {
+ flash@0 {
compatible = "m25p80";
reg = <0>;
spi-max-frequency = <40000000>;
diff --git a/arch/mips/boot/dts/brcm/bcm97360svmb.dts b/arch/mips/boot/dts/brcm/bcm97360svmb.dts
index 01f215b08dba..c9b76f41e7a6 100644
--- a/arch/mips/boot/dts/brcm/bcm97360svmb.dts
+++ b/arch/mips/boot/dts/brcm/bcm97360svmb.dts
@@ -81,7 +81,7 @@
&qspi {
status = "okay";
- m25p80@0 {
+ flash@0 {
compatible = "m25p80";
reg = <0>;
spi-max-frequency = <40000000>;
diff --git a/arch/mips/boot/dts/brcm/bcm97425svmb.dts b/arch/mips/boot/dts/brcm/bcm97425svmb.dts
index f38934934349..289a57b912ef 100644
--- a/arch/mips/boot/dts/brcm/bcm97425svmb.dts
+++ b/arch/mips/boot/dts/brcm/bcm97425svmb.dts
@@ -116,7 +116,7 @@
&qspi {
status = "okay";
- m25p80@0 {
+ flash@0 {
compatible = "m25p80";
reg = <0>;
spi-max-frequency = <40000000>;
diff --git a/arch/mips/boot/dts/ingenic/cu1000-neo.dts b/arch/mips/boot/dts/ingenic/cu1000-neo.dts
index f98cf029efc3..c89abf94e74f 100644
--- a/arch/mips/boot/dts/ingenic/cu1000-neo.dts
+++ b/arch/mips/boot/dts/ingenic/cu1000-neo.dts
@@ -31,42 +31,6 @@
};
};
- ssi: spi-gpio {
- compatible = "spi-gpio";
- #address-cells = <1>;
- #size-cells = <0>;
- num-chipselects = <1>;
-
- mosi-gpios = <&gpd 2 GPIO_ACTIVE_HIGH>;
- miso-gpios = <&gpd 3 GPIO_ACTIVE_HIGH>;
- sck-gpios = <&gpd 0 GPIO_ACTIVE_HIGH>;
- cs-gpios = <&gpd 1 GPIO_ACTIVE_HIGH>;
-
- status = "okay";
-
- spi-max-frequency = <50000000>;
-
- sc16is752: expander@0 {
- compatible = "nxp,sc16is752";
- reg = <0>; /* CE0 */
- spi-max-frequency = <4000000>;
-
- clocks = <&exclk_sc16is752>;
-
- interrupt-parent = <&gpc>;
- interrupts = <6 IRQ_TYPE_EDGE_FALLING>;
-
- gpio-controller;
- #gpio-cells = <2>;
-
- exclk_sc16is752: sc16is752 {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <48000000>;
- };
- };
- };
-
wlan_pwrseq: msc1-pwrseq {
compatible = "mmc-pwrseq-simple";
@@ -90,7 +54,7 @@
&ost {
/* 1500 kHz for the system timer and clocksource */
- assigned-clocks = <&ost OST_CLK_PERCPU_TIMER>, <&ost OST_CLK_GLOBAL_TIMER>;
+ assigned-clocks = <&ost OST_CLK_EVENT_TIMER>, <&ost OST_CLK_GLOBAL_TIMER>;
assigned-clock-rates = <1500000>, <1500000>;
};
@@ -101,6 +65,39 @@
pinctrl-0 = <&pins_uart2>;
};
+&ssi {
+ status = "okay";
+
+ num-cs = <2>;
+ cs-gpios = <0>, <&gpc 20 GPIO_ACTIVE_LOW>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pins_ssi>;
+
+ sc16is752: expander@0 {
+ compatible = "nxp,sc16is752";
+ reg = <0>; /* CE0 */
+
+ spi-rx-bus-width = <1>;
+ spi-tx-bus-width = <1>;
+ spi-max-frequency = <4000000>;
+
+ clocks = <&exclk_sc16is752>;
+
+ interrupt-parent = <&gpc>;
+ interrupts = <6 IRQ_TYPE_EDGE_FALLING>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ exclk_sc16is752: sc16is752 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <48000000>;
+ };
+ };
+};
+
&i2c0 {
status = "okay";
@@ -192,6 +189,12 @@
bias-pull-up;
};
+ pins_ssi: ssi {
+ function = "ssi";
+ groups = "ssi-dt-d", "ssi-dr-d", "ssi-clk-d", "ssi-ce0-d";
+ bias-disable;
+ };
+
pins_i2c0: i2c0 {
function = "i2c0";
groups = "i2c0-data";
diff --git a/arch/mips/boot/dts/ingenic/cu1830-neo.dts b/arch/mips/boot/dts/ingenic/cu1830-neo.dts
index cfcb40edb7d9..3c7784983332 100644
--- a/arch/mips/boot/dts/ingenic/cu1830-neo.dts
+++ b/arch/mips/boot/dts/ingenic/cu1830-neo.dts
@@ -31,42 +31,6 @@
};
};
- ssi0: spi-gpio {
- compatible = "spi-gpio";
- #address-cells = <1>;
- #size-cells = <0>;
- num-chipselects = <1>;
-
- mosi-gpios = <&gpc 12 GPIO_ACTIVE_HIGH>;
- miso-gpios = <&gpc 11 GPIO_ACTIVE_HIGH>;
- sck-gpios = <&gpc 15 GPIO_ACTIVE_HIGH>;
- cs-gpios = <&gpc 16 GPIO_ACTIVE_HIGH>;
-
- status = "okay";
-
- spi-max-frequency = <50000000>;
-
- sc16is752: expander@0 {
- compatible = "nxp,sc16is752";
- reg = <0>; /* CE0 */
- spi-max-frequency = <4000000>;
-
- clocks = <&exclk_sc16is752>;
-
- interrupt-parent = <&gpb>;
- interrupts = <18 IRQ_TYPE_EDGE_FALLING>;
-
- gpio-controller;
- #gpio-cells = <2>;
-
- exclk_sc16is752: sc16is752 {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <48000000>;
- };
- };
- };
-
wlan_pwrseq: msc1-pwrseq {
compatible = "mmc-pwrseq-simple";
@@ -90,7 +54,7 @@
&ost {
/* 1500 kHz for the system timer and clocksource */
- assigned-clocks = <&ost OST_CLK_PERCPU_TIMER>, <&ost OST_CLK_GLOBAL_TIMER>;
+ assigned-clocks = <&ost OST_CLK_EVENT_TIMER>, <&ost OST_CLK_GLOBAL_TIMER>;
assigned-clock-rates = <1500000>, <1500000>;
};
@@ -101,6 +65,38 @@
pinctrl-0 = <&pins_uart1>;
};
+&ssi0 {
+ status = "okay";
+
+ num-cs = <2>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pins_ssi0>;
+
+ sc16is752: expander@0 {
+ compatible = "nxp,sc16is752";
+ reg = <0>; /* CE0 */
+
+ spi-rx-bus-width = <1>;
+ spi-tx-bus-width = <1>;
+ spi-max-frequency = <4000000>;
+
+ clocks = <&exclk_sc16is752>;
+
+ interrupt-parent = <&gpb>;
+ interrupts = <18 IRQ_TYPE_EDGE_FALLING>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ exclk_sc16is752: sc16is752 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <48000000>;
+ };
+ };
+};
+
&i2c0 {
status = "okay";
@@ -196,6 +192,12 @@
bias-pull-up;
};
+ pins_ssi0: ssi0 {
+ function = "ssi0";
+ groups = "ssi0-dt", "ssi0-dr", "ssi0-clk", "ssi0-ce0", "ssi0-ce1";
+ bias-disable;
+ };
+
pins_i2c0: i2c0 {
function = "i2c0";
groups = "i2c0-data";
diff --git a/arch/mips/boot/dts/ingenic/jz4780.dtsi b/arch/mips/boot/dts/ingenic/jz4780.dtsi
index b998301f179c..c182a656d63b 100644
--- a/arch/mips/boot/dts/ingenic/jz4780.dtsi
+++ b/arch/mips/boot/dts/ingenic/jz4780.dtsi
@@ -577,7 +577,7 @@
};
otg: usb@13500000 {
- compatible = "ingenic,jz4780-otg", "snps,dwc2";
+ compatible = "ingenic,jz4780-otg";
reg = <0x13500000 0x40000>;
interrupt-parent = <&intc>;
diff --git a/arch/mips/boot/dts/ingenic/x1000.dtsi b/arch/mips/boot/dts/ingenic/x1000.dtsi
index 8bd27edef216..b0a034b468bb 100644
--- a/arch/mips/boot/dts/ingenic/x1000.dtsi
+++ b/arch/mips/boot/dts/ingenic/x1000.dtsi
@@ -127,6 +127,18 @@
clocks = <&tcu TCU_CLK_WDT>;
clock-names = "wdt";
};
+
+ pwm: pwm@40 {
+ compatible = "ingenic,x1000-pwm";
+ reg = <0x40 0x50>;
+
+ #pwm-cells = <3>;
+
+ clocks = <&tcu TCU_CLK_TIMER0>, <&tcu TCU_CLK_TIMER1>,
+ <&tcu TCU_CLK_TIMER2>, <&tcu TCU_CLK_TIMER3>,
+ <&tcu TCU_CLK_TIMER4>;
+ clock-names = "timer0", "timer1", "timer2", "timer3", "timer4";
+ };
};
rtc: rtc@10003000 {
@@ -246,6 +258,25 @@
status = "disabled";
};
+ ssi: spi@10043000 {
+ compatible = "ingenic,x1000-spi";
+ reg = <0x10043000 0x20>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <8>;
+
+ clocks = <&cgu X1000_CLK_SSI>;
+ clock-names = "spi";
+
+ dmas = <&pdma X1000_DMA_SSI0_RX 0xffffffff>,
+ <&pdma X1000_DMA_SSI0_TX 0xffffffff>;
+ dma-names = "rx", "tx";
+
+ status = "disabled";
+ };
+
i2c0: i2c-controller@10050000 {
compatible = "ingenic,x1000-i2c";
reg = <0x10050000 0x1000>;
@@ -291,6 +322,7 @@
pdma: dma-controller@13420000 {
compatible = "ingenic,x1000-dma";
reg = <0x13420000 0x400>, <0x13421000 0x40>;
+
#dma-cells = <2>;
interrupt-parent = <&intc>;
@@ -366,7 +398,7 @@
};
otg: usb@13500000 {
- compatible = "ingenic,x1000-otg", "snps,dwc2";
+ compatible = "ingenic,x1000-otg";
reg = <0x13500000 0x40000>;
interrupt-parent = <&intc>;
diff --git a/arch/mips/boot/dts/ingenic/x1830.dtsi b/arch/mips/boot/dts/ingenic/x1830.dtsi
index 2595df8671c7..dbf21afaccb1 100644
--- a/arch/mips/boot/dts/ingenic/x1830.dtsi
+++ b/arch/mips/boot/dts/ingenic/x1830.dtsi
@@ -120,6 +120,20 @@
clocks = <&tcu TCU_CLK_WDT>;
clock-names = "wdt";
};
+
+ pwm: pwm@40 {
+ compatible = "ingenic,x1830-pwm", "ingenic,jz4740-pwm";
+ reg = <0x40 0x80>;
+
+ #pwm-cells = <3>;
+
+ clocks = <&tcu TCU_CLK_TIMER0>, <&tcu TCU_CLK_TIMER1>,
+ <&tcu TCU_CLK_TIMER2>, <&tcu TCU_CLK_TIMER3>,
+ <&tcu TCU_CLK_TIMER4>, <&tcu TCU_CLK_TIMER5>,
+ <&tcu TCU_CLK_TIMER6>, <&tcu TCU_CLK_TIMER7>;
+ clock-names = "timer0", "timer1", "timer2", "timer3",
+ "timer4", "timer5", "timer6", "timer7";
+ };
};
rtc: rtc@10003000 {
@@ -226,6 +240,44 @@
status = "disabled";
};
+ ssi0: spi@10043000 {
+ compatible = "ingenic,x1830-spi", "ingenic,x1000-spi";
+ reg = <0x10043000 0x20>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <9>;
+
+ clocks = <&cgu X1830_CLK_SSI0>;
+ clock-names = "spi";
+
+ dmas = <&pdma X1830_DMA_SSI0_RX 0xffffffff>,
+ <&pdma X1830_DMA_SSI0_TX 0xffffffff>;
+ dma-names = "rx", "tx";
+
+ status = "disabled";
+ };
+
+ ssi1: spi@10044000 {
+ compatible = "ingenic,x1830-spi", "ingenic,x1000-spi";
+ reg = <0x10044000 0x20>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <8>;
+
+ clocks = <&cgu X1830_CLK_SSI1>;
+ clock-names = "spi";
+
+ dmas = <&pdma X1830_DMA_SSI1_RX 0xffffffff>,
+ <&pdma X1830_DMA_SSI1_TX 0xffffffff>;
+ dma-names = "rx", "tx";
+
+ status = "disabled";
+ };
+
i2c0: i2c-controller@10050000 {
compatible = "ingenic,x1830-i2c", "ingenic,x1000-i2c";
reg = <0x10050000 0x1000>;
@@ -280,6 +332,7 @@
pdma: dma-controller@13420000 {
compatible = "ingenic,x1830-dma";
reg = <0x13420000 0x400>, <0x13421000 0x40>;
+
#dma-cells = <2>;
interrupt-parent = <&intc>;
@@ -355,7 +408,7 @@
};
otg: usb@13500000 {
- compatible = "ingenic,x1830-otg", "snps,dwc2";
+ compatible = "ingenic,x1830-otg";
reg = <0x13500000 0x40000>;
interrupt-parent = <&intc>;
diff --git a/arch/mips/boot/dts/mscc/jaguar2_pcb110.dts b/arch/mips/boot/dts/mscc/jaguar2_pcb110.dts
index d80cd6842b2a..0ea7bc5b5746 100644
--- a/arch/mips/boot/dts/mscc/jaguar2_pcb110.dts
+++ b/arch/mips/boot/dts/mscc/jaguar2_pcb110.dts
@@ -180,27 +180,27 @@
pins = "GPIO_49";
function = "si";
};
- i2cmux_pins_i: i2cmux-pins-i {
+ i2cmux_pins_i: i2cmux-pins {
pins = "GPIO_17", "GPIO_18", "GPIO_20", "GPIO_21";
function = "twi_scl_m";
output-low;
};
- i2cmux_0: i2cmux-0 {
+ i2cmux_0: i2cmux-0-pins {
pins = "GPIO_17";
function = "twi_scl_m";
output-high;
};
- i2cmux_1: i2cmux-1 {
+ i2cmux_1: i2cmux-1-pins {
pins = "GPIO_18";
function = "twi_scl_m";
output-high;
};
- i2cmux_2: i2cmux-2 {
+ i2cmux_2: i2cmux-2-pins {
pins = "GPIO_20";
function = "twi_scl_m";
output-high;
};
- i2cmux_3: i2cmux-3 {
+ i2cmux_3: i2cmux-3-pins {
pins = "GPIO_21";
function = "twi_scl_m";
output-high;
diff --git a/arch/mips/boot/dts/mscc/jaguar2_pcb111.dts b/arch/mips/boot/dts/mscc/jaguar2_pcb111.dts
index 813c5e16013c..05d8c6a96dc4 100644
--- a/arch/mips/boot/dts/mscc/jaguar2_pcb111.dts
+++ b/arch/mips/boot/dts/mscc/jaguar2_pcb111.dts
@@ -79,27 +79,27 @@
};
&gpio {
- i2cmux_pins_i: i2cmux-pins-i {
+ i2cmux_pins_i: i2cmux-pins {
pins = "GPIO_17", "GPIO_18";
function = "twi_scl_m";
output-low;
};
- i2cmux_0: i2cmux-0 {
+ i2cmux_0: i2cmux-0-pins {
pins = "GPIO_17";
function = "twi_scl_m";
output-high;
};
- i2cmux_1: i2cmux-1 {
+ i2cmux_1: i2cmux-1-pins {
pins = "GPIO_18";
function = "twi_scl_m";
output-high;
};
- i2cmux_2: i2cmux-2 {
+ i2cmux_2: i2cmux-2-pins {
pins = "GPIO_20";
function = "twi_scl_m";
output-high;
};
- i2cmux_3: i2cmux-3 {
+ i2cmux_3: i2cmux-3-pins {
pins = "GPIO_21";
function = "twi_scl_m";
output-high;
diff --git a/arch/mips/boot/dts/mscc/jaguar2_pcb118.dts b/arch/mips/boot/dts/mscc/jaguar2_pcb118.dts
index 27c644f2d17f..cf2cf591a211 100644
--- a/arch/mips/boot/dts/mscc/jaguar2_pcb118.dts
+++ b/arch/mips/boot/dts/mscc/jaguar2_pcb118.dts
@@ -39,17 +39,17 @@
};
&gpio {
- i2cmux_pins_i: i2cmux-pins-i {
+ i2cmux_pins_i: i2cmux-pins {
pins = "GPIO_17", "GPIO_16";
function = "twi_scl_m";
output-low;
};
- i2cmux_0: i2cmux-0 {
+ i2cmux_0: i2cmux-0-pins {
pins = "GPIO_17";
function = "twi_scl_m";
output-high;
};
- i2cmux_1: i2cmux-1 {
+ i2cmux_1: i2cmux-1-pins {
pins = "GPIO_16";
function = "twi_scl_m";
output-high;
diff --git a/arch/mips/boot/dts/mscc/ocelot.dtsi b/arch/mips/boot/dts/mscc/ocelot.dtsi
index e51db651af13..cfc219a72bdd 100644
--- a/arch/mips/boot/dts/mscc/ocelot.dtsi
+++ b/arch/mips/boot/dts/mscc/ocelot.dtsi
@@ -225,7 +225,7 @@
function = "uart2";
};
- miim1: miim1 {
+ miim1_pins: miim1-pins {
pins = "GPIO_14", "GPIO_15";
function = "miim";
};
@@ -261,7 +261,7 @@
reg = <0x10700c0 0x24>;
interrupts = <15>;
pinctrl-names = "default";
- pinctrl-0 = <&miim1>;
+ pinctrl-0 = <&miim1_pins>;
status = "disabled";
};
diff --git a/arch/mips/boot/dts/mscc/ocelot_pcb120.dts b/arch/mips/boot/dts/mscc/ocelot_pcb120.dts
index bd240690cb37..d348742c233d 100644
--- a/arch/mips/boot/dts/mscc/ocelot_pcb120.dts
+++ b/arch/mips/boot/dts/mscc/ocelot_pcb120.dts
@@ -22,12 +22,12 @@
};
&gpio {
- phy_int_pins: phy_int_pins {
+ phy_int_pins: phy-int-pins {
pins = "GPIO_4";
function = "gpio";
};
- phy_load_save_pins: phy_load_save_pins {
+ phy_load_save_pins: phy-load-save-pins {
pins = "GPIO_10";
function = "ptp2";
};
@@ -40,7 +40,7 @@
&mdio1 {
status = "okay";
pinctrl-names = "default";
- pinctrl-0 = <&miim1>, <&phy_int_pins>, <&phy_load_save_pins>;
+ pinctrl-0 = <&miim1_pins>, <&phy_int_pins>, <&phy_load_save_pins>;
phy7: ethernet-phy@0 {
reg = <0>;
diff --git a/arch/mips/boot/dts/mscc/serval_common.dtsi b/arch/mips/boot/dts/mscc/serval_common.dtsi
index 5b404836db5e..0893de420e27 100644
--- a/arch/mips/boot/dts/mscc/serval_common.dtsi
+++ b/arch/mips/boot/dts/mscc/serval_common.dtsi
@@ -82,38 +82,38 @@
pins = "GPIO_7"; /* No "default" scl for i2c0 */
function = "twi";
};
- i2cmux_pins_i: i2cmux-pins-i {
+ i2cmux_pins_i: i2cmux-pins {
pins = "GPIO_11", "GPIO_12", "GPIO_18", "GPIO_19",
"GPIO_20", "GPIO_21";
function = "twi_scl_m";
output-low;
};
- i2cmux_0: i2cmux-0 {
+ i2cmux_0: i2cmux-0-pins {
pins = "GPIO_11";
function = "twi_scl_m";
output-high;
};
- i2cmux_1: i2cmux-1 {
+ i2cmux_1: i2cmux-1-pins {
pins = "GPIO_12";
function = "twi_scl_m";
output-high;
};
- i2cmux_2: i2cmux-2 {
+ i2cmux_2: i2cmux-2-pins {
pins = "GPIO_18";
function = "twi_scl_m";
output-high;
};
- i2cmux_3: i2cmux-3 {
+ i2cmux_3: i2cmux-3-pins {
pins = "GPIO_19";
function = "twi_scl_m";
output-high;
};
- i2cmux_4: i2cmux-4 {
+ i2cmux_4: i2cmux-4-pins {
pins = "GPIO_20";
function = "twi_scl_m";
output-high;
};
- i2cmux_5: i2cmux-5 {
+ i2cmux_5: i2cmux-5-pins {
pins = "GPIO_21";
function = "twi_scl_m";
output-high;
diff --git a/arch/mips/boot/dts/ralink/gardena_smart_gateway_mt7688.dts b/arch/mips/boot/dts/ralink/gardena_smart_gateway_mt7688.dts
index 6069b33cf09f..826e91b840a3 100644
--- a/arch/mips/boot/dts/ralink/gardena_smart_gateway_mt7688.dts
+++ b/arch/mips/boot/dts/ralink/gardena_smart_gateway_mt7688.dts
@@ -129,7 +129,7 @@
pinctrl-names = "default";
pinctrl-0 = <&pinmux_spi_spi>, <&pinmux_spi_cs1_cs>;
- m25p80@0 {
+ flash@0 {
compatible = "jedec,spi-nor";
reg = <0>;
spi-max-frequency = <40000000>;
diff --git a/arch/mips/boot/dts/ralink/mt7621-gnubee-gb-pc1.dts b/arch/mips/boot/dts/ralink/mt7621-gnubee-gb-pc1.dts
index 5892bcf71595..37037e4f3c3b 100644
--- a/arch/mips/boot/dts/ralink/mt7621-gnubee-gb-pc1.dts
+++ b/arch/mips/boot/dts/ralink/mt7621-gnubee-gb-pc1.dts
@@ -60,7 +60,7 @@
&spi0 {
status = "okay";
- m25p80@0 {
+ flash@0 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "jedec,spi-nor";
@@ -97,20 +97,15 @@
status = "okay";
};
-&pinctrl {
- pinctrl-names = "default";
- pinctrl-0 = <&state_default>;
-
- state_default: state-default {
- gpio-pinmux {
- groups = "rgmii2", "uart3", "wdt";
- function = "gpio";
- };
- };
+&gmac1 {
+ status = "okay";
+ phy-handle = <&ethphy4>;
};
-&ethernet {
- pinctrl-0 = <&mdio_pins>, <&rgmii1_pins>;
+&mdio {
+ ethphy4: ethernet-phy@4 {
+ reg = <4>;
+ };
};
&switch0 {
@@ -119,10 +114,5 @@
status = "okay";
label = "ethblack";
};
-
- port@4 {
- status = "okay";
- label = "ethblue";
- };
};
};
diff --git a/arch/mips/boot/dts/ralink/mt7621-gnubee-gb-pc2.dts b/arch/mips/boot/dts/ralink/mt7621-gnubee-gb-pc2.dts
index a7fce8de6147..a6201a119a1f 100644
--- a/arch/mips/boot/dts/ralink/mt7621-gnubee-gb-pc2.dts
+++ b/arch/mips/boot/dts/ralink/mt7621-gnubee-gb-pc2.dts
@@ -44,7 +44,7 @@
&spi0 {
status = "okay";
- m25p80@0 {
+ flash@0 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "jedec,spi-nor";
@@ -81,29 +81,15 @@
status = "okay";
};
-&pinctrl {
- pinctrl-names = "default";
- pinctrl-0 = <&state_default>;
-
- state_default: state-default {
- gpio-pinmux {
- groups = "wdt";
- function = "gpio";
- };
- };
+&gmac1 {
+ status = "okay";
+ phy-handle = <&ethphy7>;
};
-&ethernet {
- gmac1: mac@1 {
- status = "okay";
- phy-handle = <&ethphy7>;
- };
-
- mdio-bus {
- ethphy7: ethernet-phy@7 {
- reg = <7>;
- phy-mode = "rgmii-rxid";
- };
+&mdio {
+ ethphy7: ethernet-phy@7 {
+ reg = <7>;
+ phy-mode = "rgmii-rxid";
};
};
diff --git a/arch/mips/boot/dts/ralink/mt7621.dtsi b/arch/mips/boot/dts/ralink/mt7621.dtsi
index 3222684915ac..ee46ace0bcc1 100644
--- a/arch/mips/boot/dts/ralink/mt7621.dtsi
+++ b/arch/mips/boot/dts/ralink/mt7621.dtsi
@@ -151,7 +151,7 @@
};
pinctrl: pinctrl {
- compatible = "ralink,rt2880-pinmux";
+ compatible = "ralink,mt7621-pinctrl";
i2c_pins: i2c0-pins {
pinmux {
@@ -342,7 +342,7 @@
phy-mode = "rgmii-rxid";
};
- mdio-bus {
+ mdio: mdio-bus {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/mips/boot/tools/relocs.c b/arch/mips/boot/tools/relocs.c
index 1bf53f3524b3..02fc85f3e8ff 100644
--- a/arch/mips/boot/tools/relocs.c
+++ b/arch/mips/boot/tools/relocs.c
@@ -351,7 +351,7 @@ static void read_symtabs(FILE *fp)
static void read_relocs(FILE *fp)
{
- static unsigned long base = 0;
+ static unsigned long base;
int i, j;
if (!base) {
diff --git a/arch/mips/cavium-octeon/executive/cvmx-bootmem.c b/arch/mips/cavium-octeon/executive/cvmx-bootmem.c
index b63ad5d42cc7..306cee07ce3f 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-bootmem.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-bootmem.c
@@ -318,7 +318,7 @@ int64_t cvmx_bootmem_phy_alloc(uint64_t req_size, uint64_t address_min,
}
/*
- * Determine if this is an entry that can satisify the
+ * Determine if this is an entry that can satisfy the
* request Check to make sure entry is large enough to
* satisfy request.
*/
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-xaui.c b/arch/mips/cavium-octeon/executive/cvmx-helper-xaui.c
index fea71a85bb29..a92632223497 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper-xaui.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper-xaui.c
@@ -156,8 +156,9 @@ int __cvmx_helper_xaui_enable(int interface)
xauiCtl.u64 = cvmx_read_csr(CVMX_PCSXX_CONTROL1_REG(interface));
xauiCtl.s.lo_pwr = 0;
- /* Issuing a reset here seems to hang some CN68XX chips. */
- if (!OCTEON_IS_MODEL(OCTEON_CN68XX_PASS1_X) &&
+ /* Issuing a reset here seems to hang some CN66XX/CN68XX chips. */
+ if (!OCTEON_IS_MODEL(OCTEON_CN66XX) &&
+ !OCTEON_IS_MODEL(OCTEON_CN68XX_PASS1_X) &&
!OCTEON_IS_MODEL(OCTEON_CN68XX_PASS2_X))
xauiCtl.s.reset = 1;
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper.c b/arch/mips/cavium-octeon/executive/cvmx-helper.c
index b22f664e2d29..6f49fd9be1f3 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper.c
@@ -61,6 +61,12 @@ int cvmx_helper_get_number_of_interfaces(void)
{
if (OCTEON_IS_MODEL(OCTEON_CN68XX))
return 9;
+ if (OCTEON_IS_MODEL(OCTEON_CN66XX)) {
+ if (OCTEON_IS_MODEL(OCTEON_CN66XX_PASS1_0))
+ return 7;
+ else
+ return 8;
+ }
if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN52XX))
return 4;
if (OCTEON_IS_MODEL(OCTEON_CN7XXX))
diff --git a/arch/mips/cavium-octeon/executive/cvmx-pko.c b/arch/mips/cavium-octeon/executive/cvmx-pko.c
index ae8806e7bce2..15faca494c80 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-pko.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-pko.c
@@ -377,7 +377,7 @@ cvmx_pko_status_t cvmx_pko_config_port(uint64_t port, uint64_t base_queue,
/*
* Check to make sure all static priority
* queues are contiguous. Also catches some
- * cases of static priorites not starting at
+ * cases of static priorities not starting at
* queue 0.
*/
if (static_priority_end != -1
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
index 07d7ff5a981d..6cdcbf4de763 100644
--- a/arch/mips/cavium-octeon/octeon-irq.c
+++ b/arch/mips/cavium-octeon/octeon-irq.c
@@ -1405,7 +1405,7 @@ static void octeon_irq_init_ciu2_percpu(void)
* completed.
*
* There are 9 registers and 3 IPX levels with strides 0x1000
- * and 0x200 respectivly. Use loops to clear them.
+ * and 0x200 respectively. Use loops to clear them.
*/
for (regx = 0; regx <= 0x8000; regx += 0x1000) {
for (ipx = 0; ipx <= 0x400; ipx += 0x200)
diff --git a/arch/mips/cavium-octeon/octeon-usb.c b/arch/mips/cavium-octeon/octeon-usb.c
index 4df919d26b08..5cffe1ed2447 100644
--- a/arch/mips/cavium-octeon/octeon-usb.c
+++ b/arch/mips/cavium-octeon/octeon-usb.c
@@ -419,7 +419,7 @@ static int dwc3_octeon_clocks_start(struct device *dev, u64 base)
/* Step 5c: Enable SuperSpeed. */
uctl_ctl.s.ref_ssp_en = 1;
- /* Step 5d: Cofngiure PHYs. SKIP */
+ /* Step 5d: Configure PHYs. SKIP */
/* Step 6a & 6b: Power up PHYs. */
uctl_ctl.s.hs_power_en = 1;
diff --git a/arch/mips/configs/cu1000-neo_defconfig b/arch/mips/configs/cu1000-neo_defconfig
index 9d75f5b77d5d..5bd55eb32fe5 100644
--- a/arch/mips/configs/cu1000-neo_defconfig
+++ b/arch/mips/configs/cu1000-neo_defconfig
@@ -61,7 +61,7 @@ CONFIG_SERIAL_SC16IS7XX_SPI=y
CONFIG_I2C=y
CONFIG_I2C_JZ4780=y
CONFIG_SPI=y
-CONFIG_SPI_GPIO=y
+CONFIG_SPI_INGENIC=y
CONFIG_GPIO_SYSFS=y
CONFIG_SENSORS_ADS7828=m
CONFIG_WATCHDOG=y
diff --git a/arch/mips/configs/cu1830-neo_defconfig b/arch/mips/configs/cu1830-neo_defconfig
index 29decd0003c6..cc69688962e8 100644
--- a/arch/mips/configs/cu1830-neo_defconfig
+++ b/arch/mips/configs/cu1830-neo_defconfig
@@ -64,7 +64,7 @@ CONFIG_SERIAL_SC16IS7XX_SPI=y
CONFIG_I2C=y
CONFIG_I2C_JZ4780=y
CONFIG_SPI=y
-CONFIG_SPI_GPIO=y
+CONFIG_SPI_INGENIC=y
CONFIG_GPIO_SYSFS=y
CONFIG_SENSORS_ADS7828=m
CONFIG_WATCHDOG=y
diff --git a/arch/mips/dec/ioasic-irq.c b/arch/mips/dec/ioasic-irq.c
index 130eb67bd3c9..971f7b46759b 100644
--- a/arch/mips/dec/ioasic-irq.c
+++ b/arch/mips/dec/ioasic-irq.c
@@ -68,13 +68,13 @@ static struct irq_chip ioasic_dma_irq_type = {
* I/O ASIC implements two kinds of DMA interrupts, informational and
* error interrupts.
*
- * The formers do not stop DMA and should be cleared as soon as possible
+ * The former do not stop DMA and should be cleared as soon as possible
* so that if they retrigger before the handler has completed, usually as
* a side effect of actions taken by the handler, then they are reissued.
* These use the `handle_edge_irq' handler that clears the request right
* away.
*
- * The latters stop DMA and do not resume it until the interrupt has been
+ * The latter stop DMA and do not resume it until the interrupt has been
* cleared. This cannot be done until after a corrective action has been
* taken and this also means they will not retrigger. Therefore they use
* the `handle_fasteoi_irq' handler that only clears the request on the
diff --git a/arch/mips/dec/setup.c b/arch/mips/dec/setup.c
index 82b00e45ce50..6c3704f51d0d 100644
--- a/arch/mips/dec/setup.c
+++ b/arch/mips/dec/setup.c
@@ -71,7 +71,7 @@ volatile u32 *ioasic_base;
EXPORT_SYMBOL(ioasic_base);
/*
- * IRQ routing and priority tables. Priorites are set as follows:
+ * IRQ routing and priority tables. Priorities are set as follows:
*
* KN01 KN230 KN02 KN02-BA KN02-CA KN03
*
diff --git a/arch/mips/fw/arc/memory.c b/arch/mips/fw/arc/memory.c
index ef5fc1ca1b5d..66188739f54d 100644
--- a/arch/mips/fw/arc/memory.c
+++ b/arch/mips/fw/arc/memory.c
@@ -32,7 +32,7 @@ static phys_addr_t prom_mem_size[MAX_PROM_MEM] __initdata;
static unsigned int nr_prom_mem __initdata;
/*
- * For ARC firmware memory functions the unit of meassuring memory is always
+ * For ARC firmware memory functions the unit of measuring memory is always
* a 4k page of memory
*/
#define ARC_PAGE_SHIFT 12
diff --git a/arch/mips/generic/board-ingenic.c b/arch/mips/generic/board-ingenic.c
index 3f44f14bdb33..c422bbc890ed 100644
--- a/arch/mips/generic/board-ingenic.c
+++ b/arch/mips/generic/board-ingenic.c
@@ -131,36 +131,10 @@ static const struct platform_suspend_ops ingenic_pm_ops __maybe_unused = {
static int __init ingenic_pm_init(void)
{
- struct device_node *cpu_node;
- struct clk *cpu0_clk;
- int ret;
-
if (boot_cpu_type() == CPU_XBURST) {
if (IS_ENABLED(CONFIG_PM_SLEEP))
suspend_set_ops(&ingenic_pm_ops);
_machine_halt = ingenic_halt;
-
- /*
- * Unconditionally enable the clock for the first CPU.
- * This makes sure that the PLL that feeds the CPU won't be
- * stopped while the kernel is running.
- */
- cpu_node = of_get_cpu_node(0, NULL);
- if (!cpu_node) {
- pr_err("Unable to get CPU node\n");
- } else {
- cpu0_clk = of_clk_get(cpu_node, 0);
- if (IS_ERR(cpu0_clk)) {
- pr_err("Unable to get CPU0 clock\n");
- return PTR_ERR(cpu0_clk);
- }
-
- ret = clk_prepare_enable(cpu0_clk);
- if (ret) {
- pr_err("Unable to enable CPU0 clock\n");
- return ret;
- }
- }
}
return 0;
diff --git a/arch/mips/include/asm/checksum.h b/arch/mips/include/asm/checksum.h
index 1e6c1354f245..4044eaf989ac 100644
--- a/arch/mips/include/asm/checksum.h
+++ b/arch/mips/include/asm/checksum.h
@@ -128,48 +128,45 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
__u32 len, __u8 proto,
- __wsum sum)
+ __wsum isum)
{
- unsigned long tmp = (__force unsigned long)sum;
-
- __asm__(
- " .set push # csum_tcpudp_nofold\n"
- " .set noat \n"
-#ifdef CONFIG_32BIT
- " addu %0, %2 \n"
- " sltu $1, %0, %2 \n"
- " addu %0, $1 \n"
-
- " addu %0, %3 \n"
- " sltu $1, %0, %3 \n"
- " addu %0, $1 \n"
-
- " addu %0, %4 \n"
- " sltu $1, %0, %4 \n"
- " addu %0, $1 \n"
-#endif
-#ifdef CONFIG_64BIT
- " daddu %0, %2 \n"
- " daddu %0, %3 \n"
- " daddu %0, %4 \n"
- " dsll32 $1, %0, 0 \n"
- " daddu %0, $1 \n"
- " sltu $1, %0, $1 \n"
- " dsra32 %0, %0, 0 \n"
- " addu %0, $1 \n"
-#endif
- " .set pop"
- : "=r" (tmp)
- : "0" ((__force unsigned long)daddr),
- "r" ((__force unsigned long)saddr),
-#ifdef __MIPSEL__
- "r" ((proto + len) << 8),
-#else
- "r" (proto + len),
-#endif
- "r" ((__force unsigned long)sum));
-
- return (__force __wsum)tmp;
+ const unsigned int sh32 = IS_ENABLED(CONFIG_64BIT) ? 32 : 0;
+ unsigned long sum = (__force unsigned long)daddr;
+ unsigned long tmp;
+ __u32 osum;
+
+ tmp = (__force unsigned long)saddr;
+ sum += tmp;
+
+ if (IS_ENABLED(CONFIG_32BIT))
+ sum += sum < tmp;
+
+ /*
+ * We know PROTO + LEN has the sign bit clear, so cast to a signed
+ * type to avoid an extraneous zero-extension where TMP is 64-bit.
+ */
+ tmp = (__s32)(proto + len);
+ tmp <<= IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN) ? 8 : 0;
+ sum += tmp;
+ if (IS_ENABLED(CONFIG_32BIT))
+ sum += sum < tmp;
+
+ tmp = (__force unsigned long)isum;
+ sum += tmp;
+
+ if (IS_ENABLED(CONFIG_32BIT)) {
+ sum += sum < tmp;
+ osum = sum;
+ } else if (IS_ENABLED(CONFIG_64BIT)) {
+ tmp = sum << sh32;
+ sum += tmp;
+ osum = sum < tmp;
+ osum += sum >> sh32;
+ } else {
+ BUILD_BUG();
+ }
+
+ return (__force __wsum)osum;
}
#define csum_tcpudp_nofold csum_tcpudp_nofold
diff --git a/arch/mips/include/asm/compat.h b/arch/mips/include/asm/compat.h
index bbb3bc5a42fd..ec01dc000a41 100644
--- a/arch/mips/include/asm/compat.h
+++ b/arch/mips/include/asm/compat.h
@@ -9,28 +9,28 @@
#include <asm/page.h>
#include <asm/ptrace.h>
+#define __compat_uid_t __compat_uid_t
typedef s32 __compat_uid_t;
typedef s32 __compat_gid_t;
+
typedef __compat_uid_t __compat_uid32_t;
typedef __compat_gid_t __compat_gid32_t;
#define __compat_uid32_t __compat_uid32_t
-#define __compat_gid32_t __compat_gid32_t
+
+#define compat_statfs compat_statfs
+#define compat_ipc64_perm compat_ipc64_perm
#define _COMPAT_NSIG 128 /* Don't ask !$@#% ... */
#define _COMPAT_NSIG_BPW 32
typedef u32 compat_sigset_word;
+#define COMPAT_RLIM_INFINITY 0x7fffffffUL
+
#include <asm-generic/compat.h>
-#define COMPAT_USER_HZ 100
#define COMPAT_UTS_MACHINE "mips\0\0\0"
-typedef u32 compat_dev_t;
typedef u32 compat_nlink_t;
-typedef s32 compat_ipc_pid_t;
-typedef struct {
- s32 val[2];
-} compat_fsid_t;
struct compat_stat {
compat_dev_t st_dev;
@@ -55,27 +55,8 @@ struct compat_stat {
s32 st_pad4[14];
};
-struct compat_flock {
- short l_type;
- short l_whence;
- compat_off_t l_start;
- compat_off_t l_len;
- s32 l_sysid;
- compat_pid_t l_pid;
- s32 pad[4];
-};
-
-#define F_GETLK64 33
-#define F_SETLK64 34
-#define F_SETLKW64 35
-
-struct compat_flock64 {
- short l_type;
- short l_whence;
- compat_loff_t l_start;
- compat_loff_t l_len;
- compat_pid_t l_pid;
-};
+#define __ARCH_COMPAT_FLOCK_EXTRA_SYSID s32 l_sysid;
+#define __ARCH_COMPAT_FLOCK_PAD s32 pad[4];
struct compat_statfs {
int f_type;
@@ -92,10 +73,6 @@ struct compat_statfs {
int f_spare[5];
};
-#define COMPAT_RLIM_INFINITY 0x7fffffffUL
-
-#define COMPAT_OFF_T_MAX 0x7fffffff
-
struct compat_ipc64_perm {
compat_key_t key;
__compat_uid32_t uid;
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
index de8cb2ccb781..c0983130a44c 100644
--- a/arch/mips/include/asm/cpu-features.h
+++ b/arch/mips/include/asm/cpu-features.h
@@ -133,6 +133,9 @@
# define raw_cpu_has_fpu 0
# endif
#else
+# if cpu_has_fpu
+# error "Forcing `cpu_has_fpu' to non-zero is not supported"
+# endif
# define raw_cpu_has_fpu cpu_has_fpu
#endif
#ifndef cpu_has_32fpr
diff --git a/arch/mips/include/asm/mach-ip27/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ip27/cpu-feature-overrides.h
index c8385c4e8664..568fe09332eb 100644
--- a/arch/mips/include/asm/mach-ip27/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-ip27/cpu-feature-overrides.h
@@ -25,7 +25,6 @@
#define cpu_has_4kex 1
#define cpu_has_3k_cache 0
#define cpu_has_4k_cache 1
-#define cpu_has_fpu 1
#define cpu_has_nofpuex 0
#define cpu_has_32fpr 1
#define cpu_has_counter 1
diff --git a/arch/mips/include/asm/mach-ip30/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ip30/cpu-feature-overrides.h
index 8ad0c424a9af..ce4e4c6e09e2 100644
--- a/arch/mips/include/asm/mach-ip30/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-ip30/cpu-feature-overrides.h
@@ -28,7 +28,6 @@
#define cpu_has_4kex 1
#define cpu_has_3k_cache 0
#define cpu_has_4k_cache 1
-#define cpu_has_fpu 1
#define cpu_has_nofpuex 0
#define cpu_has_32fpr 1
#define cpu_has_counter 1
diff --git a/arch/mips/include/asm/mach-ralink/spaces.h b/arch/mips/include/asm/mach-ralink/spaces.h
index f7af11ea2d61..a9f0570d0f04 100644
--- a/arch/mips/include/asm/mach-ralink/spaces.h
+++ b/arch/mips/include/asm/mach-ralink/spaces.h
@@ -6,7 +6,9 @@
#define PCI_IOSIZE SZ_64K
#define IO_SPACE_LIMIT (PCI_IOSIZE - 1)
+#ifdef CONFIG_PCI_DRIVERS_GENERIC
#define pci_remap_iospace pci_remap_iospace
+#endif
#include <asm/mach-generic/spaces.h>
#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-bootinfo.h b/arch/mips/include/asm/octeon/cvmx-bootinfo.h
index 6c61e0a63924..c1c0b3230e0a 100644
--- a/arch/mips/include/asm/octeon/cvmx-bootinfo.h
+++ b/arch/mips/include/asm/octeon/cvmx-bootinfo.h
@@ -253,6 +253,7 @@ enum cvmx_board_types_enum {
CVMX_BOARD_TYPE_REDWING = 43,
CVMX_BOARD_TYPE_NIC68_4 = 44,
CVMX_BOARD_TYPE_NIC10E_66 = 45,
+ CVMX_BOARD_TYPE_SNIC10E = 50,
CVMX_BOARD_TYPE_MAX,
/*
@@ -369,6 +370,7 @@ static inline const char *cvmx_board_type_to_string(enum
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_REDWING)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NIC68_4)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NIC10E_66)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_SNIC10E)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_MAX)
/* Customer boards listed here */
diff --git a/arch/mips/include/asm/unistd.h b/arch/mips/include/asm/unistd.h
index c2196b1b6604..25a5253db7f4 100644
--- a/arch/mips/include/asm/unistd.h
+++ b/arch/mips/include/asm/unistd.h
@@ -50,6 +50,8 @@
# ifdef CONFIG_32BIT
# define __ARCH_WANT_STAT64
# define __ARCH_WANT_SYS_TIME32
+# else
+# define __ARCH_WANT_COMPAT_STAT
# endif
# ifdef CONFIG_MIPS32_O32
# define __ARCH_WANT_SYS_TIME32
diff --git a/arch/mips/include/uapi/asm/fcntl.h b/arch/mips/include/uapi/asm/fcntl.h
index 42e13dead543..0369a38e3d4f 100644
--- a/arch/mips/include/uapi/asm/fcntl.h
+++ b/arch/mips/include/uapi/asm/fcntl.h
@@ -44,36 +44,16 @@
#define F_SETOWN 24 /* for sockets. */
#define F_GETOWN 23 /* for sockets. */
-#ifndef __mips64
+#if __BITS_PER_LONG == 32 || defined(__KERNEL__)
#define F_GETLK64 33 /* using 'struct flock64' */
#define F_SETLK64 34
#define F_SETLKW64 35
-#endif
-
-/*
- * The flavours of struct flock. "struct flock" is the ABI compliant
- * variant. Finally struct flock64 is the LFS variant of struct flock. As
- * a historic accident and inconsistence with the ABI definition it doesn't
- * contain all the same fields as struct flock.
- */
+#endif /* __BITS_PER_LONG == 32 || defined(__KERNEL__) */
#if _MIPS_SIM != _MIPS_SIM_ABI64
-
-#include <linux/types.h>
-
-struct flock {
- short l_type;
- short l_whence;
- __kernel_off_t l_start;
- __kernel_off_t l_len;
- long l_sysid;
- __kernel_pid_t l_pid;
- long pad[4];
-};
-
-#define HAVE_ARCH_STRUCT_FLOCK
-
-#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
+#define __ARCH_FLOCK_EXTRA_SYSID long l_sysid;
+#define __ARCH_FLOCK_PAD long pad[4];
+#endif
#include <asm-generic/fcntl.h>
diff --git a/arch/mips/include/uapi/asm/stat.h b/arch/mips/include/uapi/asm/stat.h
index 8a8bb78883a4..aaccdc61be74 100644
--- a/arch/mips/include/uapi/asm/stat.h
+++ b/arch/mips/include/uapi/asm/stat.h
@@ -22,8 +22,8 @@ struct stat {
__kernel_ino_t st_ino;
__kernel_mode_t st_mode;
__u32 st_nlink;
- __kernel_uid_t st_uid;
- __kernel_gid_t st_gid;
+ __kernel_uid32_t st_uid;
+ __kernel_gid32_t st_gid;
unsigned st_rdev;
long st_pad2[2];
long st_size;
@@ -58,8 +58,8 @@ struct stat64 {
__kernel_mode_t st_mode;
__u32 st_nlink;
- __kernel_uid_t st_uid;
- __kernel_gid_t st_gid;
+ __kernel_uid32_t st_uid;
+ __kernel_gid32_t st_gid;
unsigned long st_rdev;
unsigned long st_pad1[3]; /* Reserved for st_rdev expansion */
@@ -99,8 +99,8 @@ struct stat {
__kernel_mode_t st_mode;
__u32 st_nlink;
- __kernel_uid_t st_uid;
- __kernel_gid_t st_gid;
+ __kernel_uid32_t st_uid;
+ __kernel_gid32_t st_gid;
unsigned int st_rdev;
unsigned int st_pad1[3]; /* Reserved for st_rdev expansion */
diff --git a/arch/mips/include/uapi/asm/termbits.h b/arch/mips/include/uapi/asm/termbits.h
index dfeffba729b7..1eb60903d6f0 100644
--- a/arch/mips/include/uapi/asm/termbits.h
+++ b/arch/mips/include/uapi/asm/termbits.h
@@ -11,11 +11,9 @@
#ifndef _ASM_TERMBITS_H
#define _ASM_TERMBITS_H
-#include <linux/posix_types.h>
+#include <asm-generic/termbits-common.h>
-typedef unsigned char cc_t;
-typedef unsigned int speed_t;
-typedef unsigned int tcflag_t;
+typedef unsigned int tcflag_t;
/*
* The ABI says nothing about NCC but seems to use NCCS as
@@ -54,175 +52,126 @@ struct ktermios {
};
/* c_cc characters */
-#define VINTR 0 /* Interrupt character [ISIG]. */
-#define VQUIT 1 /* Quit character [ISIG]. */
-#define VERASE 2 /* Erase character [ICANON]. */
-#define VKILL 3 /* Kill-line character [ICANON]. */
-#define VMIN 4 /* Minimum number of bytes read at once [!ICANON]. */
-#define VTIME 5 /* Time-out value (tenths of a second) [!ICANON]. */
-#define VEOL2 6 /* Second EOL character [ICANON]. */
+#define VINTR 0 /* Interrupt character [ISIG] */
+#define VQUIT 1 /* Quit character [ISIG] */
+#define VERASE 2 /* Erase character [ICANON] */
+#define VKILL 3 /* Kill-line character [ICANON] */
+#define VMIN 4 /* Minimum number of bytes read at once [!ICANON] */
+#define VTIME 5 /* Time-out value (tenths of a second) [!ICANON] */
+#define VEOL2 6 /* Second EOL character [ICANON] */
#define VSWTC 7 /* ??? */
#define VSWTCH VSWTC
-#define VSTART 8 /* Start (X-ON) character [IXON, IXOFF]. */
-#define VSTOP 9 /* Stop (X-OFF) character [IXON, IXOFF]. */
-#define VSUSP 10 /* Suspend character [ISIG]. */
+#define VSTART 8 /* Start (X-ON) character [IXON, IXOFF] */
+#define VSTOP 9 /* Stop (X-OFF) character [IXON, IXOFF] */
+#define VSUSP 10 /* Suspend character [ISIG] */
#if 0
/*
* VDSUSP is not supported
*/
-#define VDSUSP 11 /* Delayed suspend character [ISIG]. */
+#define VDSUSP 11 /* Delayed suspend character [ISIG] */
#endif
-#define VREPRINT 12 /* Reprint-line character [ICANON]. */
-#define VDISCARD 13 /* Discard character [IEXTEN]. */
-#define VWERASE 14 /* Word-erase character [ICANON]. */
-#define VLNEXT 15 /* Literal-next character [IEXTEN]. */
-#define VEOF 16 /* End-of-file character [ICANON]. */
-#define VEOL 17 /* End-of-line character [ICANON]. */
+#define VREPRINT 12 /* Reprint-line character [ICANON] */
+#define VDISCARD 13 /* Discard character [IEXTEN] */
+#define VWERASE 14 /* Word-erase character [ICANON] */
+#define VLNEXT 15 /* Literal-next character [IEXTEN] */
+#define VEOF 16 /* End-of-file character [ICANON] */
+#define VEOL 17 /* End-of-line character [ICANON] */
/* c_iflag bits */
-#define IGNBRK 0000001 /* Ignore break condition. */
-#define BRKINT 0000002 /* Signal interrupt on break. */
-#define IGNPAR 0000004 /* Ignore characters with parity errors. */
-#define PARMRK 0000010 /* Mark parity and framing errors. */
-#define INPCK 0000020 /* Enable input parity check. */
-#define ISTRIP 0000040 /* Strip 8th bit off characters. */
-#define INLCR 0000100 /* Map NL to CR on input. */
-#define IGNCR 0000200 /* Ignore CR. */
-#define ICRNL 0000400 /* Map CR to NL on input. */
-#define IUCLC 0001000 /* Map upper case to lower case on input. */
-#define IXON 0002000 /* Enable start/stop output control. */
-#define IXANY 0004000 /* Any character will restart after stop. */
-#define IXOFF 0010000 /* Enable start/stop input control. */
-#define IMAXBEL 0020000 /* Ring bell when input queue is full. */
-#define IUTF8 0040000 /* Input is UTF-8 */
+#define IUCLC 0x0200 /* Map upper case to lower case on input */
+#define IXON 0x0400 /* Enable start/stop output control */
+#define IXOFF 0x1000 /* Enable start/stop input control */
+#define IMAXBEL 0x2000 /* Ring bell when input queue is full */
+#define IUTF8 0x4000 /* Input is UTF-8 */
/* c_oflag bits */
-#define OPOST 0000001 /* Perform output processing. */
-#define OLCUC 0000002 /* Map lower case to upper case on output. */
-#define ONLCR 0000004 /* Map NL to CR-NL on output. */
-#define OCRNL 0000010
-#define ONOCR 0000020
-#define ONLRET 0000040
-#define OFILL 0000100
-#define OFDEL 0000200
-#define NLDLY 0000400
-#define NL0 0000000
-#define NL1 0000400
-#define CRDLY 0003000
-#define CR0 0000000
-#define CR1 0001000
-#define CR2 0002000
-#define CR3 0003000
-#define TABDLY 0014000
-#define TAB0 0000000
-#define TAB1 0004000
-#define TAB2 0010000
-#define TAB3 0014000
-#define XTABS 0014000
-#define BSDLY 0020000
-#define BS0 0000000
-#define BS1 0020000
-#define VTDLY 0040000
-#define VT0 0000000
-#define VT1 0040000
-#define FFDLY 0100000
-#define FF0 0000000
-#define FF1 0100000
+#define OLCUC 0x00002 /* Map lower case to upper case on output */
+#define ONLCR 0x00004 /* Map NL to CR-NL on output */
+#define NLDLY 0x00100
+#define NL0 0x00000
+#define NL1 0x00100
+#define CRDLY 0x00600
+#define CR0 0x00000
+#define CR1 0x00200
+#define CR2 0x00400
+#define CR3 0x00600
+#define TABDLY 0x01800
+#define TAB0 0x00000
+#define TAB1 0x00800
+#define TAB2 0x01000
+#define TAB3 0x01800
+#define XTABS 0x01800
+#define BSDLY 0x02000
+#define BS0 0x00000
+#define BS1 0x02000
+#define VTDLY 0x04000
+#define VT0 0x00000
+#define VT1 0x04000
+#define FFDLY 0x08000
+#define FF0 0x00000
+#define FF1 0x08000
/*
#define PAGEOUT ???
#define WRAP ???
*/
/* c_cflag bit meaning */
-#define CBAUD 0010017
-#define B0 0000000 /* hang up */
-#define B50 0000001
-#define B75 0000002
-#define B110 0000003
-#define B134 0000004
-#define B150 0000005
-#define B200 0000006
-#define B300 0000007
-#define B600 0000010
-#define B1200 0000011
-#define B1800 0000012
-#define B2400 0000013
-#define B4800 0000014
-#define B9600 0000015
-#define B19200 0000016
-#define B38400 0000017
-#define EXTA B19200
-#define EXTB B38400
-#define CSIZE 0000060 /* Number of bits per byte (mask). */
-#define CS5 0000000 /* 5 bits per byte. */
-#define CS6 0000020 /* 6 bits per byte. */
-#define CS7 0000040 /* 7 bits per byte. */
-#define CS8 0000060 /* 8 bits per byte. */
-#define CSTOPB 0000100 /* Two stop bits instead of one. */
-#define CREAD 0000200 /* Enable receiver. */
-#define PARENB 0000400 /* Parity enable. */
-#define PARODD 0001000 /* Odd parity instead of even. */
-#define HUPCL 0002000 /* Hang up on last close. */
-#define CLOCAL 0004000 /* Ignore modem status lines. */
-#define CBAUDEX 0010000
-#define BOTHER 0010000
-#define B57600 0010001
-#define B115200 0010002
-#define B230400 0010003
-#define B460800 0010004
-#define B500000 0010005
-#define B576000 0010006
-#define B921600 0010007
-#define B1000000 0010010
-#define B1152000 0010011
-#define B1500000 0010012
-#define B2000000 0010013
-#define B2500000 0010014
-#define B3000000 0010015
-#define B3500000 0010016
-#define B4000000 0010017
-#define CIBAUD 002003600000 /* input baud rate */
-#define CMSPAR 010000000000 /* mark or space (stick) parity */
-#define CRTSCTS 020000000000 /* flow control */
-
-#define IBSHIFT 16 /* Shift from CBAUD to CIBAUD */
+#define CBAUD 0x0000100f
+#define CSIZE 0x00000030 /* Number of bits per byte (mask) */
+#define CS5 0x00000000 /* 5 bits per byte */
+#define CS6 0x00000010 /* 6 bits per byte */
+#define CS7 0x00000020 /* 7 bits per byte */
+#define CS8 0x00000030 /* 8 bits per byte */
+#define CSTOPB 0x00000040 /* Two stop bits instead of one */
+#define CREAD 0x00000080 /* Enable receiver */
+#define PARENB 0x00000100 /* Parity enable */
+#define PARODD 0x00000200 /* Odd parity instead of even */
+#define HUPCL 0x00000400 /* Hang up on last close */
+#define CLOCAL 0x00000800 /* Ignore modem status lines */
+#define CBAUDEX 0x00001000
+#define BOTHER 0x00001000
+#define B57600 0x00001001
+#define B115200 0x00001002
+#define B230400 0x00001003
+#define B460800 0x00001004
+#define B500000 0x00001005
+#define B576000 0x00001006
+#define B921600 0x00001007
+#define B1000000 0x00001008
+#define B1152000 0x00001009
+#define B1500000 0x0000100a
+#define B2000000 0x0000100b
+#define B2500000 0x0000100c
+#define B3000000 0x0000100d
+#define B3500000 0x0000100e
+#define B4000000 0x0000100f
+#define CIBAUD 0x100f0000 /* input baud rate */
/* c_lflag bits */
-#define ISIG 0000001 /* Enable signals. */
-#define ICANON 0000002 /* Do erase and kill processing. */
-#define XCASE 0000004
-#define ECHO 0000010 /* Enable echo. */
-#define ECHOE 0000020 /* Visual erase for ERASE. */
-#define ECHOK 0000040 /* Echo NL after KILL. */
-#define ECHONL 0000100 /* Echo NL even if ECHO is off. */
-#define NOFLSH 0000200 /* Disable flush after interrupt. */
-#define IEXTEN 0000400 /* Enable DISCARD and LNEXT. */
-#define ECHOCTL 0001000 /* Echo control characters as ^X. */
-#define ECHOPRT 0002000 /* Hardcopy visual erase. */
-#define ECHOKE 0004000 /* Visual erase for KILL. */
-#define FLUSHO 0020000
-#define PENDIN 0040000 /* Retype pending input (state). */
-#define TOSTOP 0100000 /* Send SIGTTOU for background output. */
-#define ITOSTOP TOSTOP
-#define EXTPROC 0200000 /* External processing on pty */
+#define ISIG 0x00001 /* Enable signals */
+#define ICANON 0x00002 /* Do erase and kill processing */
+#define XCASE 0x00004
+#define ECHO 0x00008 /* Enable echo */
+#define ECHOE 0x00010 /* Visual erase for ERASE */
+#define ECHOK 0x00020 /* Echo NL after KILL */
+#define ECHONL 0x00040 /* Echo NL even if ECHO is off */
+#define NOFLSH 0x00080 /* Disable flush after interrupt */
+#define IEXTEN 0x00100 /* Enable DISCARD and LNEXT */
+#define ECHOCTL 0x00200 /* Echo control characters as ^X */
+#define ECHOPRT 0x00400 /* Hardcopy visual erase */
+#define ECHOKE 0x00800 /* Visual erase for KILL */
+#define FLUSHO 0x02000
+#define PENDIN 0x04000 /* Retype pending input (state) */
+#define TOSTOP 0x08000 /* Send SIGTTOU for background output */
+#define ITOSTOP TOSTOP
+#define EXTPROC 0x10000 /* External processing on pty */
/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
-/* tcflow() and TCXONC use these */
-#define TCOOFF 0 /* Suspend output. */
-#define TCOON 1 /* Restart suspended output. */
-#define TCIOFF 2 /* Send a STOP character. */
-#define TCION 3 /* Send a START character. */
-
-/* tcflush() and TCFLSH use these */
-#define TCIFLUSH 0 /* Discard data received but not yet read. */
-#define TCOFLUSH 1 /* Discard data written but not yet sent. */
-#define TCIOFLUSH 2 /* Discard all pending data. */
-
/* tcsetattr uses these */
-#define TCSANOW TCSETS /* Change immediately. */
-#define TCSADRAIN TCSETSW /* Change when pending output is written. */
-#define TCSAFLUSH TCSETSF /* Flush pending input before changing. */
+#define TCSANOW TCSETS /* Change immediately */
+#define TCSADRAIN TCSETSW /* Change when pending output is written */
+#define TCSAFLUSH TCSETSF /* Flush pending input before changing */
#endif /* _ASM_TERMBITS_H */
diff --git a/arch/mips/jazz/irq.c b/arch/mips/jazz/irq.c
index 495ba7cc56ec..264d453876aa 100644
--- a/arch/mips/jazz/irq.c
+++ b/arch/mips/jazz/irq.c
@@ -141,7 +141,7 @@ void __init plat_time_init(void)
/*
* Set clock to 100Hz.
*
- * The R4030 timer receives an input clock of 1kHz which is divieded by
+ * The R4030 timer receives an input clock of 1kHz which is divided by
* a programmable 4-bit divider. This makes it fairly inflexible.
*/
r4030_write_reg32(JAZZ_TIMER_INTERVAL, 9);
diff --git a/arch/mips/kernel/cmpxchg.c b/arch/mips/kernel/cmpxchg.c
index ac9c8cfb2ba9..e974a4954df8 100644
--- a/arch/mips/kernel/cmpxchg.c
+++ b/arch/mips/kernel/cmpxchg.c
@@ -22,7 +22,7 @@ unsigned long __xchg_small(volatile void *ptr, unsigned long val, unsigned int s
/*
* Calculate a shift & mask that correspond to the value we wish to
- * exchange within the naturally aligned 4 byte integerthat includes
+ * exchange within the naturally aligned 4 byte integer that includes
* it.
*/
shift = (unsigned long)ptr & 0x3;
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index f0ea92937546..d510f628ee03 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -156,7 +156,7 @@ static inline void check_errata(void)
/*
* Erratum "RPS May Cause Incorrect Instruction Execution"
* This code only handles VPE0, any SMP/RTOS code
- * making use of VPE1 will be responsable for that VPE.
+ * making use of VPE1 will be responsible for that VPE.
*/
if ((c->processor_id & PRID_REV_MASK) <= PRID_REV_34K_V1_0_2)
write_c0_config7(read_c0_config7() | MIPS_CONF7_RPS);
diff --git a/arch/mips/kernel/crash_dump.c b/arch/mips/kernel/crash_dump.c
index 2e50f55185a6..6e50f4902409 100644
--- a/arch/mips/kernel/crash_dump.c
+++ b/arch/mips/kernel/crash_dump.c
@@ -1,22 +1,10 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/highmem.h>
#include <linux/crash_dump.h>
+#include <linux/uio.h>
-/**
- * copy_oldmem_page - copy one page from "oldmem"
- * @pfn: page frame number to be copied
- * @buf: target memory address for the copy; this can be in kernel address
- * space or user address space (see @userbuf)
- * @csize: number of bytes to copy
- * @offset: offset in bytes into the page (based on pfn) to begin the copy
- * @userbuf: if set, @buf is in user address space, use copy_to_user(),
- * otherwise @buf is in kernel address space, use memcpy().
- *
- * Copy a page from "oldmem". For this page, there is no pte mapped
- * in the current kernel.
- */
-ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
- size_t csize, unsigned long offset, int userbuf)
+ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn,
+ size_t csize, unsigned long offset)
{
void *vaddr;
@@ -24,14 +12,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
return 0;
vaddr = kmap_local_pfn(pfn);
-
- if (!userbuf) {
- memcpy(buf, vaddr + offset, csize);
- } else {
- if (copy_to_user(buf, vaddr + offset, csize))
- csize = -EFAULT;
- }
-
+ csize = copy_to_iter(vaddr + offset, csize, iter);
kunmap_local(vaddr);
return csize;
diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c
index 146d9fa77f75..53adcc1b2ed5 100644
--- a/arch/mips/kernel/idle.c
+++ b/arch/mips/kernel/idle.c
@@ -228,7 +228,7 @@ void __init check_wait(void)
break;
/*
- * Another rev is incremeting c0_count at a reduced clock
+ * Another rev is incrementing c0_count at a reduced clock
* rate while in WAIT mode. So we basically have the choice
* between using the cp0 timer as clocksource or avoiding
* the WAIT instruction. Until more details are known,
diff --git a/arch/mips/kernel/kprobes.c b/arch/mips/kernel/kprobes.c
index 6c7f3b143fdc..316b27d0d2fb 100644
--- a/arch/mips/kernel/kprobes.c
+++ b/arch/mips/kernel/kprobes.c
@@ -44,10 +44,11 @@ static const union mips_instruction breakpoint2_insn = {
DEFINE_PER_CPU(struct kprobe *, current_kprobe);
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
-static int __kprobes insn_has_delayslot(union mips_instruction insn)
+static int insn_has_delayslot(union mips_instruction insn)
{
return __insn_has_delay_slot(insn);
}
+NOKPROBE_SYMBOL(insn_has_delayslot);
/*
* insn_has_ll_or_sc function checks whether instruction is ll or sc
@@ -56,7 +57,7 @@ static int __kprobes insn_has_delayslot(union mips_instruction insn)
* instructions; cannot do much about breakpoint in the middle of
* ll/sc pair; it is upto user to avoid those places
*/
-static int __kprobes insn_has_ll_or_sc(union mips_instruction insn)
+static int insn_has_ll_or_sc(union mips_instruction insn)
{
int ret = 0;
@@ -72,8 +73,9 @@ static int __kprobes insn_has_ll_or_sc(union mips_instruction insn)
}
return ret;
}
+NOKPROBE_SYMBOL(insn_has_ll_or_sc);
-int __kprobes arch_prepare_kprobe(struct kprobe *p)
+int arch_prepare_kprobe(struct kprobe *p)
{
union mips_instruction insn;
union mips_instruction prev_insn;
@@ -132,26 +134,30 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
out:
return ret;
}
+NOKPROBE_SYMBOL(arch_prepare_kprobe);
-void __kprobes arch_arm_kprobe(struct kprobe *p)
+void arch_arm_kprobe(struct kprobe *p)
{
*p->addr = breakpoint_insn;
flush_insn_slot(p);
}
+NOKPROBE_SYMBOL(arch_arm_kprobe);
-void __kprobes arch_disarm_kprobe(struct kprobe *p)
+void arch_disarm_kprobe(struct kprobe *p)
{
*p->addr = p->opcode;
flush_insn_slot(p);
}
+NOKPROBE_SYMBOL(arch_disarm_kprobe);
-void __kprobes arch_remove_kprobe(struct kprobe *p)
+void arch_remove_kprobe(struct kprobe *p)
{
if (p->ainsn.insn) {
free_insn_slot(p->ainsn.insn, 0);
p->ainsn.insn = NULL;
}
}
+NOKPROBE_SYMBOL(arch_remove_kprobe);
static void save_previous_kprobe(struct kprobe_ctlblk *kcb)
{
@@ -257,7 +263,7 @@ static void prepare_singlestep(struct kprobe *p, struct pt_regs *regs,
* breakpoint trap. In case of branch instructions, the target
* epc to be restored.
*/
-static void __kprobes resume_execution(struct kprobe *p,
+static void resume_execution(struct kprobe *p,
struct pt_regs *regs,
struct kprobe_ctlblk *kcb)
{
@@ -268,8 +274,9 @@ static void __kprobes resume_execution(struct kprobe *p,
regs->cp0_epc = orig_epc + 4;
}
}
+NOKPROBE_SYMBOL(resume_execution);
-static int __kprobes kprobe_handler(struct pt_regs *regs)
+static int kprobe_handler(struct pt_regs *regs)
{
struct kprobe *p;
int ret = 0;
@@ -367,6 +374,7 @@ no_kprobe:
return ret;
}
+NOKPROBE_SYMBOL(kprobe_handler);
static inline int post_kprobe_handler(struct pt_regs *regs)
{
@@ -415,7 +423,7 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
/*
* Wrapper routine for handling exceptions.
*/
-int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
+int kprobe_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data)
{
@@ -446,6 +454,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
}
return ret;
}
+NOKPROBE_SYMBOL(kprobe_exceptions_notify);
/*
* Function return probe trampoline:
@@ -469,7 +478,7 @@ static void __used kretprobe_trampoline_holder(void)
void __kretprobe_trampoline(void);
-void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+void arch_prepare_kretprobe(struct kretprobe_instance *ri,
struct pt_regs *regs)
{
ri->ret_addr = (kprobe_opcode_t *) regs->regs[31];
@@ -478,11 +487,12 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
/* Replace the return addr with trampoline addr */
regs->regs[31] = (unsigned long)__kretprobe_trampoline;
}
+NOKPROBE_SYMBOL(arch_prepare_kretprobe);
/*
* Called when the probe at kretprobe trampoline is hit
*/
-static int __kprobes trampoline_probe_handler(struct kprobe *p,
+static int trampoline_probe_handler(struct kprobe *p,
struct pt_regs *regs)
{
instruction_pointer(regs) = __kretprobe_trampoline_handler(regs, NULL);
@@ -493,14 +503,16 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
*/
return 1;
}
+NOKPROBE_SYMBOL(trampoline_probe_handler);
-int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+int arch_trampoline_kprobe(struct kprobe *p)
{
if (p->addr == (kprobe_opcode_t *)__kretprobe_trampoline)
return 1;
return 0;
}
+NOKPROBE_SYMBOL(arch_trampoline_kprobe);
static struct kprobe trampoline_p = {
.addr = (kprobe_opcode_t *)__kretprobe_trampoline,
diff --git a/arch/mips/kernel/mips-cpc.c b/arch/mips/kernel/mips-cpc.c
index 17aff13cd7ce..3e386f7e1545 100644
--- a/arch/mips/kernel/mips-cpc.c
+++ b/arch/mips/kernel/mips-cpc.c
@@ -28,6 +28,7 @@ phys_addr_t __weak mips_cpc_default_phys_base(void)
cpc_node = of_find_compatible_node(of_root, NULL, "mti,mips-cpc");
if (cpc_node) {
err = of_address_to_resource(cpc_node, 0, &res);
+ of_node_put(cpc_node);
if (!err)
return res.start;
}
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index 1641d274fe37..c4d6b09136b1 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -329,7 +329,7 @@ static int mipsxx_pmu_alloc_counter(struct cpu_hw_events *cpuc,
for (i = mipspmu.num_counters - 1; i >= 0; i--) {
/*
* Note that some MIPS perf events can be counted by both
- * even and odd counters, wheresas many other are only by
+ * even and odd counters, whereas many other are only by
* even _or_ odd counters. This introduces an issue that
* when the former kind of event takes the counter the
* latter kind of event wants to use, then the "counter
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index c2d5f4bfe1f3..35b912bce429 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -105,10 +105,11 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
/*
* Copy architecture-specific thread state
*/
-int copy_thread(unsigned long clone_flags, unsigned long usp,
- unsigned long kthread_arg, struct task_struct *p,
- unsigned long tls)
+int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
{
+ unsigned long clone_flags = args->flags;
+ unsigned long usp = args->stack;
+ unsigned long tls = args->tls;
struct thread_info *ti = task_thread_info(p);
struct pt_regs *childregs, *regs = current_pt_regs();
unsigned long childksp;
@@ -120,12 +121,12 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
/* Put the stack after the struct pt_regs. */
childksp = (unsigned long) childregs;
p->thread.cp0_status = (read_c0_status() & ~(ST0_CU2|ST0_CU1)) | ST0_KERNEL_CUMASK;
- if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
+ if (unlikely(args->fn)) {
/* kernel thread */
unsigned long status = p->thread.cp0_status;
memset(childregs, 0, sizeof(struct pt_regs));
- p->thread.reg16 = usp; /* fn */
- p->thread.reg17 = kthread_arg;
+ p->thread.reg16 = (unsigned long)args->fn;
+ p->thread.reg17 = (unsigned long)args->fn_arg;
p->thread.reg29 = childksp;
p->thread.reg31 = (unsigned long) ret_from_kernel_thread;
#if defined(CONFIG_CPU_R3000)
diff --git a/arch/mips/kernel/reset.c b/arch/mips/kernel/reset.c
index 6288780b779e..e7ce07b3e79b 100644
--- a/arch/mips/kernel/reset.c
+++ b/arch/mips/kernel/reset.c
@@ -114,8 +114,7 @@ void machine_halt(void)
void machine_power_off(void)
{
- if (pm_power_off)
- pm_power_off();
+ do_kernel_power_off();
#ifdef CONFIG_SMP
preempt_disable();
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index ef73ba1e0ec1..2ca156a5b231 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -37,6 +37,7 @@
#include <asm/cdmm.h>
#include <asm/cpu.h>
#include <asm/debug.h>
+#include <asm/mmzone.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/smp-ops.h>
@@ -344,6 +345,11 @@ static int __init early_parse_mem(char *p)
{
phys_addr_t start, size;
+ if (!p) {
+ pr_err("mem parameter is empty, do nothing\n");
+ return -EINVAL;
+ }
+
/*
* If a user specifies memory size, we
* blow away any automatically generated
@@ -359,7 +365,10 @@ static int __init early_parse_mem(char *p)
if (*p == '@')
start = memparse(p + 1, &p);
- memblock_add(start, size);
+ if (IS_ENABLED(CONFIG_NUMA))
+ memblock_add_node(start, size, pa_to_nid(start), MEMBLOCK_NONE);
+ else
+ memblock_add(start, size);
return 0;
}
@@ -554,7 +563,7 @@ static void __init bootcmdline_init(void)
* unmodified.
*/
if (IS_ENABLED(CONFIG_CMDLINE_OVERRIDE)) {
- strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
+ strscpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
return;
}
@@ -566,7 +575,7 @@ static void __init bootcmdline_init(void)
* boot_command_line to undo anything early_init_dt_scan_chosen() did.
*/
if (IS_ENABLED(CONFIG_MIPS_CMDLINE_BUILTIN_EXTEND))
- strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
+ strscpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
else
boot_command_line[0] = 0;
@@ -628,7 +637,7 @@ static void __init arch_mem_init(char **cmdline_p)
memblock_set_bottom_up(true);
bootcmdline_init();
- strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
+ strscpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
*cmdline_p = command_line;
parse_early_param();
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 1986d1309410..1d93b85271ba 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -518,6 +518,12 @@ static inline void smp_on_each_tlb(void (*func) (void *info), void *info)
void flush_tlb_mm(struct mm_struct *mm)
{
+ if (!mm)
+ return;
+
+ if (atomic_read(&mm->mm_users) == 0)
+ return; /* happens as a result of exit_mmap() */
+
preempt_disable();
if (cpu_has_mmid) {
diff --git a/arch/mips/kvm/tlb.c b/arch/mips/kvm/tlb.c
index a3b50d5e3b25..4e91971daae1 100644
--- a/arch/mips/kvm/tlb.c
+++ b/arch/mips/kvm/tlb.c
@@ -153,7 +153,7 @@ EXPORT_SYMBOL_GPL(kvm_vz_host_tlb_inv);
* kvm_vz_guest_tlb_lookup() - Lookup a guest VZ TLB mapping.
* @vcpu: KVM VCPU pointer.
* @gpa: Guest virtual address in a TLB mapped guest segment.
- * @gpa: Ponter to output guest physical address it maps to.
+ * @gpa: Pointer to output guest physical address it maps to.
*
* Converts a guest virtual address in a guest TLB mapped segment to a guest
* physical address, by probing the guest TLB.
diff --git a/arch/mips/loongson32/Kconfig b/arch/mips/loongson32/Kconfig
index e27879b4813b..2ef9da0016df 100644
--- a/arch/mips/loongson32/Kconfig
+++ b/arch/mips/loongson32/Kconfig
@@ -46,7 +46,7 @@ menuconfig CEVT_CSRC_LS1X
If unsure, say N.
choice
- prompt "Select clockevent/clocksource"
+ prompt "Select clockevent/clocksource"
depends on CEVT_CSRC_LS1X
default TIMER_USE_PWM0
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index 44f98100e84e..b08bc556d30d 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -35,7 +35,7 @@ int show_unhandled_signals = 1;
* and the problem, and then passes it off to one of the appropriate
* routines.
*/
-static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write,
+static void __do_page_fault(struct pt_regs *regs, unsigned long write,
unsigned long address)
{
struct vm_area_struct * vma = NULL;
@@ -322,8 +322,9 @@ vmalloc_fault:
}
#endif
}
+NOKPROBE_SYMBOL(__do_page_fault);
-asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
+asmlinkage void do_page_fault(struct pt_regs *regs,
unsigned long write, unsigned long address)
{
enum ctx_state prev_state;
@@ -332,3 +333,4 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
__do_page_fault(regs, write, address);
exception_exit(prev_state);
}
+NOKPROBE_SYMBOL(do_page_fault);
diff --git a/arch/mips/net/bpf_jit_comp32.c b/arch/mips/net/bpf_jit_comp32.c
index 044b11b65bca..83c975d5cca2 100644
--- a/arch/mips/net/bpf_jit_comp32.c
+++ b/arch/mips/net/bpf_jit_comp32.c
@@ -722,7 +722,7 @@ static void emit_atomic_r32(struct jit_context *ctx,
0, JIT_RESERVED_STACK);
/*
* Argument 1: dst+off if xchg, otherwise src, passed in register a0
- * Argument 2: src if xchg, othersize dst+off, passed in register a1
+ * Argument 2: src if xchg, otherwise dst+off, passed in register a1
*/
emit(ctx, move, MIPS_R_T9, dst);
if (code == BPF_XCHG) {
diff --git a/arch/mips/pci/pcie-octeon.c b/arch/mips/pci/pcie-octeon.c
index d919a0d813a1..c9edd3fb380d 100644
--- a/arch/mips/pci/pcie-octeon.c
+++ b/arch/mips/pci/pcie-octeon.c
@@ -895,7 +895,7 @@ retry:
mem_access_subid.s.nsw = 0; /* Enable Snoop for Writes. */
mem_access_subid.s.ror = 0; /* Disable Relaxed Ordering for Reads. */
mem_access_subid.s.row = 0; /* Disable Relaxed Ordering for Writes. */
- mem_access_subid.s.ba = 0; /* PCIe Adddress Bits <63:34>. */
+ mem_access_subid.s.ba = 0; /* PCIe Address Bits <63:34>. */
/*
* Setup mem access 12-15 for port 0, 16-19 for port 1,
@@ -1345,7 +1345,7 @@ static int __cvmx_pcie_rc_initialize_gen2(int pcie_port)
mem_access_subid.s.esw = 1; /* Endian-swap for Writes. */
mem_access_subid.s.wtype = 0; /* "No snoop" and "Relaxed ordering" are not set */
mem_access_subid.s.rtype = 0; /* "No snoop" and "Relaxed ordering" are not set */
- /* PCIe Adddress Bits <63:34>. */
+ /* PCIe Address Bits <63:34>. */
if (OCTEON_IS_MODEL(OCTEON_CN68XX))
mem_access_subid.cn68xx.ba = 0;
else
diff --git a/arch/mips/pic32/pic32mzda/config.c b/arch/mips/pic32/pic32mzda/config.c
index 36afe1b5b9c7..f69532007717 100644
--- a/arch/mips/pic32/pic32mzda/config.c
+++ b/arch/mips/pic32/pic32mzda/config.c
@@ -111,7 +111,7 @@ void __init pic32_config_init(void)
pic32_reset_status = readl(pic32_conf_base + PIC32_RCON);
writel(-1, PIC32_CLR(pic32_conf_base + PIC32_RCON));
- /* Device Inforation */
+ /* Device Information */
pr_info("Device Id: 0x%08x, Device Ver: 0x%04x\n",
pic32_get_device_id(),
pic32_get_device_version());
diff --git a/arch/mips/sgi-ip22/ip22-reset.c b/arch/mips/sgi-ip22/ip22-reset.c
index 9028dbbb45dd..8f0861c58080 100644
--- a/arch/mips/sgi-ip22/ip22-reset.c
+++ b/arch/mips/sgi-ip22/ip22-reset.c
@@ -11,7 +11,6 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/sched/signal.h>
-#include <linux/notifier.h>
#include <linux/panic_notifier.h>
#include <linux/pm.h>
#include <linux/timer.h>
@@ -41,7 +40,7 @@
static struct timer_list power_timer, blink_timer, debounce_timer;
static unsigned long blink_timer_timeout;
-#define MACHINE_PANICED 1
+#define MACHINE_PANICKED 1
#define MACHINE_SHUTTING_DOWN 2
static int machine_state;
@@ -112,7 +111,7 @@ static void debounce(struct timer_list *unused)
return;
}
- if (machine_state & MACHINE_PANICED)
+ if (machine_state & MACHINE_PANICKED)
sgimc->cpuctrl0 |= SGIMC_CCTRL0_SYSINIT;
enable_irq(SGI_PANEL_IRQ);
@@ -120,7 +119,7 @@ static void debounce(struct timer_list *unused)
static inline void power_button(void)
{
- if (machine_state & MACHINE_PANICED)
+ if (machine_state & MACHINE_PANICKED)
return;
if ((machine_state & MACHINE_SHUTTING_DOWN) ||
@@ -167,9 +166,9 @@ static irqreturn_t panel_int(int irq, void *dev_id)
static int panic_event(struct notifier_block *this, unsigned long event,
void *ptr)
{
- if (machine_state & MACHINE_PANICED)
+ if (machine_state & MACHINE_PANICKED)
return NOTIFY_DONE;
- machine_state |= MACHINE_PANICED;
+ machine_state |= MACHINE_PANICKED;
blink_timer_timeout = PANIC_FREQ;
blink_timeout(&blink_timer);
diff --git a/arch/mips/sgi-ip27/ip27-xtalk.c b/arch/mips/sgi-ip27/ip27-xtalk.c
index 000ede156bdc..e762886d1dda 100644
--- a/arch/mips/sgi-ip27/ip27-xtalk.c
+++ b/arch/mips/sgi-ip27/ip27-xtalk.c
@@ -53,6 +53,8 @@ static void bridge_platform_create(nasid_t nasid, int widget, int masterwid)
}
platform_device_add_resources(pdev, &w1_res, 1);
platform_device_add_data(pdev, wd, sizeof(*wd));
+ /* platform_device_add_data() duplicates the data */
+ kfree(wd);
platform_device_add(pdev);
bd = kzalloc(sizeof(*bd), GFP_KERNEL);
@@ -83,6 +85,8 @@ static void bridge_platform_create(nasid_t nasid, int widget, int masterwid)
bd->io_offset = offset;
platform_device_add_data(pdev, bd, sizeof(*bd));
+ /* platform_device_add_data() duplicates the data */
+ kfree(bd);
platform_device_add(pdev);
pr_info("xtalk:n%d/%x bridge widget\n", nasid, widget);
return;
diff --git a/arch/mips/sgi-ip30/ip30-xtalk.c b/arch/mips/sgi-ip30/ip30-xtalk.c
index 8a2894645529..8129524421cb 100644
--- a/arch/mips/sgi-ip30/ip30-xtalk.c
+++ b/arch/mips/sgi-ip30/ip30-xtalk.c
@@ -63,6 +63,8 @@ static void bridge_platform_create(int widget, int masterwid)
}
platform_device_add_resources(pdev, &w1_res, 1);
platform_device_add_data(pdev, wd, sizeof(*wd));
+ /* platform_device_add_data() duplicates the data */
+ kfree(wd);
platform_device_add(pdev);
bd = kzalloc(sizeof(*bd), GFP_KERNEL);
@@ -92,6 +94,8 @@ static void bridge_platform_create(int widget, int masterwid)
bd->io_offset = IP30_SWIN_BASE(widget);
platform_device_add_data(pdev, bd, sizeof(*bd));
+ /* platform_device_add_data() duplicates the data */
+ kfree(bd);
platform_device_add(pdev);
pr_info("xtalk:%x bridge widget\n", widget);
return;
diff --git a/arch/mips/sibyte/bcm1480/setup.c b/arch/mips/sibyte/bcm1480/setup.c
index 6f34b871b08e..e3e807046a9c 100644
--- a/arch/mips/sibyte/bcm1480/setup.c
+++ b/arch/mips/sibyte/bcm1480/setup.c
@@ -34,8 +34,6 @@ static char *pass_str;
static int __init setup_bcm1x80_bcm1x55(void)
{
- int ret = 0;
-
switch (soc_pass) {
case K_SYS_REVISION_BCM1480_S0:
periph_rev = 1;
@@ -64,7 +62,7 @@ static int __init setup_bcm1x80_bcm1x55(void)
break;
}
- return ret;
+ return 0;
}
/* Setup code likely to be common to all SiByte platforms */
diff --git a/arch/mips/tools/loongson3-llsc-check.c b/arch/mips/tools/loongson3-llsc-check.c
index bdbc7b4324ec..5f68a4fa8a7e 100644
--- a/arch/mips/tools/loongson3-llsc-check.c
+++ b/arch/mips/tools/loongson3-llsc-check.c
@@ -217,7 +217,7 @@ static int check_code(uint64_t pc, uint32_t *code, size_t sz)
)
/*
- * Skip the first instructionm allowing check_ll to look backwards
+ * Skip the first instruction, allowing check_ll to look backwards
* unconditionally.
*/
advance();
diff --git a/arch/mips/txx9/generic/pci.c b/arch/mips/txx9/generic/pci.c
index fb998726bd5d..e98845543b77 100644
--- a/arch/mips/txx9/generic/pci.c
+++ b/arch/mips/txx9/generic/pci.c
@@ -225,7 +225,7 @@ txx9_alloc_pci_controller(struct pci_controller *pcic,
static int __init
txx9_arch_pci_init(void)
{
- PCIBIOS_MIN_IO = 0x8000; /* reseve legacy I/O space */
+ PCIBIOS_MIN_IO = 0x8000; /* reserve legacy I/O space */
return 0;
}
arch_initcall(txx9_arch_pci_init);
diff --git a/arch/mips/vr41xx/common/cmu.c b/arch/mips/vr41xx/common/cmu.c
index b59ee5479313..e4cbe116b26d 100644
--- a/arch/mips/vr41xx/common/cmu.c
+++ b/arch/mips/vr41xx/common/cmu.c
@@ -236,8 +236,6 @@ static int __init vr41xx_cmu_init(void)
if (current_cpu_type() == CPU_VR4133)
cmuclkmsk2 = cmu_read(CMUCLKMSK2);
- spin_lock_init(&cmu_lock);
-
return 0;
}
diff --git a/arch/nios2/kernel/process.c b/arch/nios2/kernel/process.c
index f8ea522a1588..29593b98567d 100644
--- a/arch/nios2/kernel/process.c
+++ b/arch/nios2/kernel/process.c
@@ -100,21 +100,23 @@ void flush_thread(void)
{
}
-int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg,
- struct task_struct *p, unsigned long tls)
+int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
{
+ unsigned long clone_flags = args->flags;
+ unsigned long usp = args->stack;
+ unsigned long tls = args->tls;
struct pt_regs *childregs = task_pt_regs(p);
struct pt_regs *regs;
struct switch_stack *stack;
struct switch_stack *childstack =
((struct switch_stack *)childregs) - 1;
- if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
+ if (unlikely(args->fn)) {
memset(childstack, 0,
sizeof(struct switch_stack) + sizeof(struct pt_regs));
- childstack->r16 = usp; /* fn */
- childstack->r17 = arg;
+ childstack->r16 = (unsigned long) args->fn;
+ childstack->r17 = (unsigned long) args->fn_arg;
childstack->ra = (unsigned long) ret_from_kernel_thread;
childregs->estatus = STATUS_PIE;
childregs->sp = (unsigned long) childstack;
diff --git a/arch/openrisc/kernel/process.c b/arch/openrisc/kernel/process.c
index 1d4c0921aafa..52dc983ddeba 100644
--- a/arch/openrisc/kernel/process.c
+++ b/arch/openrisc/kernel/process.c
@@ -167,9 +167,11 @@ extern asmlinkage void ret_from_fork(void);
*/
int
-copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg,
- struct task_struct *p, unsigned long tls)
+copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
{
+ unsigned long clone_flags = args->flags;
+ unsigned long usp = args->stack;
+ unsigned long tls = args->tls;
struct pt_regs *userregs;
struct pt_regs *kregs;
unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
@@ -187,10 +189,10 @@ copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg,
sp -= sizeof(struct pt_regs);
kregs = (struct pt_regs *)sp;
- if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
+ if (unlikely(args->fn)) {
memset(kregs, 0, sizeof(struct pt_regs));
- kregs->gpr[20] = usp; /* fn, kernel thread */
- kregs->gpr[22] = arg;
+ kregs->gpr[20] = (unsigned long)args->fn;
+ kregs->gpr[22] = (unsigned long)args->fn_arg;
} else {
*userregs = *current_pt_regs();
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index bd22578859d0..5f2448dc5a2b 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -332,10 +332,6 @@ config COMPAT
def_bool y
depends on 64BIT
-config SYSVIPC_COMPAT
- def_bool y
- depends on COMPAT && SYSVIPC
-
config AUDIT_ARCH
def_bool y
diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile
index aca1710fd658..e38d993d87f2 100644
--- a/arch/parisc/Makefile
+++ b/arch/parisc/Makefile
@@ -18,7 +18,6 @@
boot := arch/parisc/boot
KBUILD_IMAGE := $(boot)/bzImage
-NM = sh $(srctree)/arch/parisc/nm
CHECKFLAGS += -D__hppa__=1
ifdef CONFIG_64BIT
diff --git a/arch/parisc/include/asm/assembly.h b/arch/parisc/include/asm/assembly.h
index ea0cb318b13d..0f0d4a496fef 100644
--- a/arch/parisc/include/asm/assembly.h
+++ b/arch/parisc/include/asm/assembly.h
@@ -143,7 +143,7 @@
depd,z \r, 63-(\sa), 64-(\sa), \t
.endm
- /* Shift Right - note the r and t can NOT be the same! */
+ /* Shift Right for 32-bit. Clobbers upper 32-bit on PA2.0. */
.macro shr r, sa, t
extru \r, 31-(\sa), 32-(\sa), \t
.endm
@@ -174,6 +174,16 @@
#endif
.endm
+ /* The depw instruction leaves the most significant 32 bits of the
+ * target register in an undefined state on PA 2.0 systems. */
+ .macro dep_safe i, p, len, t
+#ifdef CONFIG_64BIT
+ depd \i, 32+(\p), \len, \t
+#else
+ depw \i, \p, \len, \t
+#endif
+ .endm
+
/* load 32-bit 'value' into 'reg' compensating for the ldil
* sign-extension when running in wide mode.
* WARNING!! neither 'value' nor 'reg' can be expressions
diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
index 5032e758594e..e23d06b51a20 100644
--- a/arch/parisc/include/asm/cache.h
+++ b/arch/parisc/include/asm/cache.h
@@ -54,6 +54,7 @@ void parisc_setup_cache_timing(void);
#define asm_io_sync() asm volatile("sync" \
ALTERNATIVE(ALT_COND_NO_DCACHE, INSN_NOP) \
ALTERNATIVE(ALT_COND_NO_IOC_FDC, INSN_NOP) :::"memory")
+#define asm_syncdma() asm volatile("syncdma" :::"memory")
#endif /* ! __ASSEMBLY__ */
diff --git a/arch/parisc/include/asm/compat.h b/arch/parisc/include/asm/compat.h
index c04f5a637c39..339d1b833fa7 100644
--- a/arch/parisc/include/asm/compat.h
+++ b/arch/parisc/include/asm/compat.h
@@ -11,16 +11,16 @@
#define compat_mode_t compat_mode_t
typedef u16 compat_mode_t;
+#define compat_ipc_pid_t compat_ipc_pid_t
+typedef u16 compat_ipc_pid_t;
+
+#define compat_ipc64_perm compat_ipc64_perm
+
#include <asm-generic/compat.h>
-#define COMPAT_USER_HZ 100
#define COMPAT_UTS_MACHINE "parisc\0\0"
-typedef u32 __compat_uid_t;
-typedef u32 __compat_gid_t;
-typedef u32 compat_dev_t;
typedef u16 compat_nlink_t;
-typedef u16 compat_ipc_pid_t;
struct compat_stat {
compat_dev_t st_dev; /* dev_t is 32 bits on parisc */
@@ -53,37 +53,6 @@ struct compat_stat {
u32 st_spare4[3];
};
-struct compat_flock {
- short l_type;
- short l_whence;
- compat_off_t l_start;
- compat_off_t l_len;
- compat_pid_t l_pid;
-};
-
-struct compat_flock64 {
- short l_type;
- short l_whence;
- compat_loff_t l_start;
- compat_loff_t l_len;
- compat_pid_t l_pid;
-};
-
-struct compat_statfs {
- s32 f_type;
- s32 f_bsize;
- s32 f_blocks;
- s32 f_bfree;
- s32 f_bavail;
- s32 f_files;
- s32 f_ffree;
- __kernel_fsid_t f_fsid;
- s32 f_namelen;
- s32 f_frsize;
- s32 f_flags;
- s32 f_spare[4];
-};
-
struct compat_sigcontext {
compat_int_t sc_flags;
compat_int_t sc_gr[32]; /* PSW in sc_gr[0] */
@@ -93,10 +62,6 @@ struct compat_sigcontext {
compat_int_t sc_sar; /* cr11 */
};
-#define COMPAT_RLIM_INFINITY 0xffffffff
-
-#define COMPAT_OFF_T_MAX 0x7fffffff
-
struct compat_ipc64_perm {
compat_key_t key;
__compat_uid_t uid;
diff --git a/arch/parisc/include/asm/fb.h b/arch/parisc/include/asm/fb.h
index c4cd6360f996..d63a2acb91f2 100644
--- a/arch/parisc/include/asm/fb.h
+++ b/arch/parisc/include/asm/fb.h
@@ -12,9 +12,13 @@ static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
}
+#if defined(CONFIG_STI_CONSOLE) || defined(CONFIG_FB_STI)
+int fb_is_primary_device(struct fb_info *info);
+#else
static inline int fb_is_primary_device(struct fb_info *info)
{
return 0;
}
+#endif
#endif /* _ASM_FB_H_ */
diff --git a/arch/parisc/include/asm/fixmap.h b/arch/parisc/include/asm/fixmap.h
index e480b2c05407..5cd80ce1163a 100644
--- a/arch/parisc/include/asm/fixmap.h
+++ b/arch/parisc/include/asm/fixmap.h
@@ -9,12 +9,27 @@
*
* All of the values in this file must be <4GB (because of assembly
* loading restrictions). If you place this region anywhere above
- * __PAGE_OFFSET, you must adjust the memory map accordingly */
+ * __PAGE_OFFSET, you must adjust the memory map accordingly
+ */
-/* The alias region is used in kernel space to do copy/clear to or
- * from areas congruently mapped with user space. It is 8MB large
- * and must be 16MB aligned */
-#define TMPALIAS_MAP_START ((__PAGE_OFFSET) - 16*1024*1024)
+/*
+ * The tmpalias region is used in kernel space to copy/clear/flush data
+ * from pages congruently mapped with user space. It is comprised of
+ * a pair regions. The size of these regions is determined by the largest
+ * cache aliasing boundary for machines that support equivalent aliasing.
+ *
+ * The c3750 with PA8700 processor returns an alias value of 11. This
+ * indicates that it has an alias boundary of 4 MB. It also supports
+ * non-equivalent aliasing without a performance penalty.
+ *
+ * Machines with PA8800/PA8900 processors return an alias value of 0.
+ * This indicates the alias boundary is unknown and may be larger than
+ * 16 MB. Non-equivalent aliasing is not supported.
+ *
+ * Here we assume the maximum alias boundary is 4 MB.
+ */
+#define TMPALIAS_SIZE_BITS 22 /* 4 MB */
+#define TMPALIAS_MAP_START ((__PAGE_OFFSET) - (2 << TMPALIAS_SIZE_BITS))
#define FIXMAP_SIZE (FIX_BITMAP_COUNT << PAGE_SHIFT)
#define FIXMAP_START (TMPALIAS_MAP_START - FIXMAP_SIZE)
diff --git a/arch/parisc/include/asm/unistd.h b/arch/parisc/include/asm/unistd.h
index 7708a5806f09..e38f9a90ac15 100644
--- a/arch/parisc/include/asm/unistd.h
+++ b/arch/parisc/include/asm/unistd.h
@@ -142,7 +142,6 @@ type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \
}
#define __ARCH_WANT_NEW_STAT
-#define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_STAT64
#define __ARCH_WANT_SYS_ALARM
#define __ARCH_WANT_SYS_GETHOSTNAME
@@ -156,7 +155,6 @@ type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \
#define __ARCH_WANT_SYS_FADVISE64
#define __ARCH_WANT_SYS_GETPGRP
#define __ARCH_WANT_SYS_NICE
-#define __ARCH_WANT_SYS_OLDUMOUNT
#define __ARCH_WANT_SYS_SIGPENDING
#define __ARCH_WANT_SYS_SIGPROCMASK
#define __ARCH_WANT_SYS_FORK
@@ -164,6 +162,7 @@ type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \
#define __ARCH_WANT_SYS_CLONE
#define __ARCH_WANT_SYS_CLONE3
#define __ARCH_WANT_COMPAT_SYS_SENDFILE
+#define __ARCH_WANT_COMPAT_STAT
#ifdef CONFIG_64BIT
#define __ARCH_WANT_SYS_TIME
diff --git a/arch/parisc/include/uapi/asm/termbits.h b/arch/parisc/include/uapi/asm/termbits.h
index 40e920f8d683..3a8938d26fb4 100644
--- a/arch/parisc/include/uapi/asm/termbits.h
+++ b/arch/parisc/include/uapi/asm/termbits.h
@@ -2,10 +2,8 @@
#ifndef __ARCH_PARISC_TERMBITS_H__
#define __ARCH_PARISC_TERMBITS_H__
-#include <linux/posix_types.h>
+#include <asm-generic/termbits-common.h>
-typedef unsigned char cc_t;
-typedef unsigned int speed_t;
typedef unsigned int tcflag_t;
#define NCCS 19
@@ -41,158 +39,107 @@ struct ktermios {
};
/* c_cc characters */
-#define VINTR 0
-#define VQUIT 1
-#define VERASE 2
-#define VKILL 3
-#define VEOF 4
-#define VTIME 5
-#define VMIN 6
-#define VSWTC 7
-#define VSTART 8
-#define VSTOP 9
-#define VSUSP 10
-#define VEOL 11
-#define VREPRINT 12
-#define VDISCARD 13
-#define VWERASE 14
-#define VLNEXT 15
-#define VEOL2 16
-
+#define VINTR 0
+#define VQUIT 1
+#define VERASE 2
+#define VKILL 3
+#define VEOF 4
+#define VTIME 5
+#define VMIN 6
+#define VSWTC 7
+#define VSTART 8
+#define VSTOP 9
+#define VSUSP 10
+#define VEOL 11
+#define VREPRINT 12
+#define VDISCARD 13
+#define VWERASE 14
+#define VLNEXT 15
+#define VEOL2 16
/* c_iflag bits */
-#define IGNBRK 0000001
-#define BRKINT 0000002
-#define IGNPAR 0000004
-#define PARMRK 0000010
-#define INPCK 0000020
-#define ISTRIP 0000040
-#define INLCR 0000100
-#define IGNCR 0000200
-#define ICRNL 0000400
-#define IUCLC 0001000
-#define IXON 0002000
-#define IXANY 0004000
-#define IXOFF 0010000
-#define IMAXBEL 0040000
-#define IUTF8 0100000
+#define IUCLC 0x0200
+#define IXON 0x0400
+#define IXOFF 0x1000
+#define IMAXBEL 0x4000
+#define IUTF8 0x8000
/* c_oflag bits */
-#define OPOST 0000001
-#define OLCUC 0000002
-#define ONLCR 0000004
-#define OCRNL 0000010
-#define ONOCR 0000020
-#define ONLRET 0000040
-#define OFILL 0000100
-#define OFDEL 0000200
-#define NLDLY 0000400
-#define NL0 0000000
-#define NL1 0000400
-#define CRDLY 0003000
-#define CR0 0000000
-#define CR1 0001000
-#define CR2 0002000
-#define CR3 0003000
-#define TABDLY 0014000
-#define TAB0 0000000
-#define TAB1 0004000
-#define TAB2 0010000
-#define TAB3 0014000
-#define XTABS 0014000
-#define BSDLY 0020000
-#define BS0 0000000
-#define BS1 0020000
-#define VTDLY 0040000
-#define VT0 0000000
-#define VT1 0040000
-#define FFDLY 0100000
-#define FF0 0000000
-#define FF1 0100000
+#define OLCUC 0x00002
+#define ONLCR 0x00004
+#define NLDLY 0x00100
+#define NL0 0x00000
+#define NL1 0x00100
+#define CRDLY 0x00600
+#define CR0 0x00000
+#define CR1 0x00200
+#define CR2 0x00400
+#define CR3 0x00600
+#define TABDLY 0x01800
+#define TAB0 0x00000
+#define TAB1 0x00800
+#define TAB2 0x01000
+#define TAB3 0x01800
+#define XTABS 0x01800
+#define BSDLY 0x02000
+#define BS0 0x00000
+#define BS1 0x02000
+#define VTDLY 0x04000
+#define VT0 0x00000
+#define VT1 0x04000
+#define FFDLY 0x08000
+#define FF0 0x00000
+#define FF1 0x08000
/* c_cflag bit meaning */
-#define CBAUD 0010017
-#define B0 0000000 /* hang up */
-#define B50 0000001
-#define B75 0000002
-#define B110 0000003
-#define B134 0000004
-#define B150 0000005
-#define B200 0000006
-#define B300 0000007
-#define B600 0000010
-#define B1200 0000011
-#define B1800 0000012
-#define B2400 0000013
-#define B4800 0000014
-#define B9600 0000015
-#define B19200 0000016
-#define B38400 0000017
-#define EXTA B19200
-#define EXTB B38400
-#define CSIZE 0000060
-#define CS5 0000000
-#define CS6 0000020
-#define CS7 0000040
-#define CS8 0000060
-#define CSTOPB 0000100
-#define CREAD 0000200
-#define PARENB 0000400
-#define PARODD 0001000
-#define HUPCL 0002000
-#define CLOCAL 0004000
-#define CBAUDEX 0010000
-#define BOTHER 0010000
-#define B57600 0010001
-#define B115200 0010002
-#define B230400 0010003
-#define B460800 0010004
-#define B500000 0010005
-#define B576000 0010006
-#define B921600 0010007
-#define B1000000 0010010
-#define B1152000 0010011
-#define B1500000 0010012
-#define B2000000 0010013
-#define B2500000 0010014
-#define B3000000 0010015
-#define B3500000 0010016
-#define B4000000 0010017
-#define CIBAUD 002003600000 /* input baud rate */
-#define CMSPAR 010000000000 /* mark or space (stick) parity */
-#define CRTSCTS 020000000000 /* flow control */
-
-#define IBSHIFT 16 /* Shift from CBAUD to CIBAUD */
-
+#define CBAUD 0x0000100f
+#define CSIZE 0x00000030
+#define CS5 0x00000000
+#define CS6 0x00000010
+#define CS7 0x00000020
+#define CS8 0x00000030
+#define CSTOPB 0x00000040
+#define CREAD 0x00000080
+#define PARENB 0x00000100
+#define PARODD 0x00000200
+#define HUPCL 0x00000400
+#define CLOCAL 0x00000800
+#define CBAUDEX 0x00001000
+#define BOTHER 0x00001000
+#define B57600 0x00001001
+#define B115200 0x00001002
+#define B230400 0x00001003
+#define B460800 0x00001004
+#define B500000 0x00001005
+#define B576000 0x00001006
+#define B921600 0x00001007
+#define B1000000 0x00001008
+#define B1152000 0x00001009
+#define B1500000 0x0000100a
+#define B2000000 0x0000100b
+#define B2500000 0x0000100c
+#define B3000000 0x0000100d
+#define B3500000 0x0000100e
+#define B4000000 0x0000100f
+#define CIBAUD 0x100f0000 /* input baud rate */
/* c_lflag bits */
-#define ISIG 0000001
-#define ICANON 0000002
-#define XCASE 0000004
-#define ECHO 0000010
-#define ECHOE 0000020
-#define ECHOK 0000040
-#define ECHONL 0000100
-#define NOFLSH 0000200
-#define TOSTOP 0000400
-#define ECHOCTL 0001000
-#define ECHOPRT 0002000
-#define ECHOKE 0004000
-#define FLUSHO 0010000
-#define PENDIN 0040000
-#define IEXTEN 0100000
-#define EXTPROC 0200000
-
-/* tcflow() and TCXONC use these */
-#define TCOOFF 0
-#define TCOON 1
-#define TCIOFF 2
-#define TCION 3
-
-/* tcflush() and TCFLSH use these */
-#define TCIFLUSH 0
-#define TCOFLUSH 1
-#define TCIOFLUSH 2
+#define ISIG 0x00001
+#define ICANON 0x00002
+#define XCASE 0x00004
+#define ECHO 0x00008
+#define ECHOE 0x00010
+#define ECHOK 0x00020
+#define ECHONL 0x00040
+#define NOFLSH 0x00080
+#define TOSTOP 0x00100
+#define ECHOCTL 0x00200
+#define ECHOPRT 0x00400
+#define ECHOKE 0x00800
+#define FLUSHO 0x01000
+#define PENDIN 0x04000
+#define IEXTEN 0x08000
+#define EXTPROC 0x10000
/* tcsetattr uses these */
#define TCSANOW 0
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index 0fd04073d4b6..c8a11fcecf4c 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -754,6 +754,9 @@ void invalidate_kernel_vmap_range(void *vaddr, int size)
unsigned long start = (unsigned long)vaddr;
unsigned long end = start + size;
+ /* Ensure DMA is complete */
+ asm_syncdma();
+
if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
(unsigned long)size >= parisc_cache_flush_threshold) {
flush_tlb_kernel_range(start, end);
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index ecf50159359e..df8102fb435f 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -554,8 +554,9 @@
extrd,s \pte,63,25,\pte
.endm
- /* The alias region is an 8MB aligned 16MB to do clear and
- * copy user pages at addresses congruent with the user
+ /* The alias region is comprised of a pair of 4 MB regions
+ * aligned to 8 MB. It is used to clear/copy/flush user pages
+ * using kernel virtual addresses congruent with the user
* virtual address.
*
* To use the alias page, you set %r26 up with the to TLB
@@ -565,13 +566,8 @@
.macro do_alias spc,tmp,tmp1,va,pte,prot,fault,patype
cmpib,COND(<>),n 0,\spc,\fault
ldil L%(TMPALIAS_MAP_START),\tmp
-#if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000)
- /* on LP64, ldi will sign extend into the upper 32 bits,
- * which is behaviour we don't want */
- depdi 0,31,32,\tmp
-#endif
copy \va,\tmp1
- depi 0,31,23,\tmp1
+ depi_safe 0,31,TMPALIAS_SIZE_BITS+1,\tmp1
cmpb,COND(<>),n \tmp,\tmp1,\fault
mfctl %cr19,\tmp /* iir */
/* get the opcode (first six bits) into \tmp */
@@ -604,13 +600,13 @@
* OK, it is in the temp alias region, check whether "from" or "to".
* Check "subtle" note in pacache.S re: r23/r26.
*/
-#ifdef CONFIG_64BIT
- extrd,u,*= \va,41,1,%r0
-#else
- extrw,u,= \va,9,1,%r0
-#endif
+ extrw,u,= \va,31-TMPALIAS_SIZE_BITS,1,%r0
or,COND(tr) %r23,%r0,\pte
or %r26,%r0,\pte
+
+ /* convert phys addr in \pte (from r23 or r26) to tlb insert format */
+ SHRREG \pte,PAGE_SHIFT+PAGE_ADD_SHIFT-5, \pte
+ depi_safe _PAGE_SIZE_ENCODING_DEFAULT, 31,5, \pte
.endm
diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S
index b4c3f01e2399..9a0018f1f42c 100644
--- a/arch/parisc/kernel/pacache.S
+++ b/arch/parisc/kernel/pacache.S
@@ -300,7 +300,6 @@ fdoneloop2:
fdce,m %arg1(%sr1, %arg0) /* Fdce for one loop */
fdsync:
- syncdma
sync
mtsm %r22 /* restore I-bit */
89: ALTERNATIVE(88b, 89b, ALT_COND_NO_DCACHE, INSN_NOP)
@@ -488,6 +487,8 @@ ENDPROC_CFI(copy_page_asm)
* parisc chip designers that there will not ever be a parisc
* chip with a larger alias boundary (Never say never :-) ).
*
+ * Yah, what about the PA8800 and PA8900 processors?
+ *
* Subtle: the dtlb miss handlers support the temp alias region by
* "knowing" that if a dtlb miss happens within the temp alias
* region it must have occurred while in clear_user_page. Since
@@ -499,19 +500,10 @@ ENDPROC_CFI(copy_page_asm)
* miss on the translation, the dtlb miss handler inserts the
* translation into the tlb using these values:
*
- * %r26 physical page (shifted for tlb insert) of "to" translation
- * %r23 physical page (shifted for tlb insert) of "from" translation
+ * %r26 physical address of "to" translation
+ * %r23 physical address of "from" translation
*/
- /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
- #define PAGE_ADD_SHIFT (PAGE_SHIFT-12)
- .macro convert_phys_for_tlb_insert20 phys
- extrd,u \phys, 56-PAGE_ADD_SHIFT, 32-PAGE_ADD_SHIFT, \phys
-#if _PAGE_SIZE_ENCODING_DEFAULT
- depdi _PAGE_SIZE_ENCODING_DEFAULT, 63, (63-58), \phys
-#endif
- .endm
-
/*
* copy_user_page_asm() performs a page copy using mappings
* equivalent to the user page mappings. It can be used to
@@ -540,24 +532,10 @@ ENTRY_CFI(copy_user_page_asm)
sub %r25, %r1, %r23
ldil L%(TMPALIAS_MAP_START), %r28
-#ifdef CONFIG_64BIT
-#if (TMPALIAS_MAP_START >= 0x80000000)
- depdi 0, 31,32, %r28 /* clear any sign extension */
-#endif
- convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */
- convert_phys_for_tlb_insert20 %r23 /* convert phys addr to tlb insert format */
- depd %r24,63,22, %r28 /* Form aliased virtual address 'to' */
- depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */
- copy %r28, %r29
- depdi 1, 41,1, %r29 /* Form aliased virtual address 'from' */
-#else
- extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
- extrw,u %r23, 24,25, %r23 /* convert phys addr to tlb insert format */
- depw %r24, 31,22, %r28 /* Form aliased virtual address 'to' */
- depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
+ dep_safe %r24, 31,TMPALIAS_SIZE_BITS, %r28 /* Form aliased virtual address 'to' */
+ depi_safe 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
copy %r28, %r29
- depwi 1, 9,1, %r29 /* Form aliased virtual address 'from' */
-#endif
+ depi_safe 1, 31-TMPALIAS_SIZE_BITS,1, %r29 /* Form aliased virtual address 'from' */
/* Purge any old translations */
@@ -687,18 +665,8 @@ ENTRY_CFI(clear_user_page_asm)
tophys_r1 %r26
ldil L%(TMPALIAS_MAP_START), %r28
-#ifdef CONFIG_64BIT
-#if (TMPALIAS_MAP_START >= 0x80000000)
- depdi 0, 31,32, %r28 /* clear any sign extension */
-#endif
- convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */
- depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */
- depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */
-#else
- extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
- depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */
- depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
-#endif
+ dep_safe %r25, 31,TMPALIAS_SIZE_BITS, %r28 /* Form aliased virtual address 'to' */
+ depi_safe 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
/* Purge any old translation */
@@ -763,18 +731,8 @@ ENDPROC_CFI(clear_user_page_asm)
ENTRY_CFI(flush_dcache_page_asm)
ldil L%(TMPALIAS_MAP_START), %r28
-#ifdef CONFIG_64BIT
-#if (TMPALIAS_MAP_START >= 0x80000000)
- depdi 0, 31,32, %r28 /* clear any sign extension */
-#endif
- convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */
- depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */
- depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */
-#else
- extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
- depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */
- depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
-#endif
+ dep_safe %r25, 31,TMPALIAS_SIZE_BITS, %r28 /* Form aliased virtual address 'to' */
+ depi_safe 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
/* Purge any old translation */
@@ -822,18 +780,8 @@ ENDPROC_CFI(flush_dcache_page_asm)
ENTRY_CFI(purge_dcache_page_asm)
ldil L%(TMPALIAS_MAP_START), %r28
-#ifdef CONFIG_64BIT
-#if (TMPALIAS_MAP_START >= 0x80000000)
- depdi 0, 31,32, %r28 /* clear any sign extension */
-#endif
- convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */
- depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */
- depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */
-#else
- extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
- depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */
- depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
-#endif
+ dep_safe %r25, 31,TMPALIAS_SIZE_BITS, %r28 /* Form aliased virtual address 'to' */
+ depi_safe 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
/* Purge any old translation */
@@ -881,18 +829,8 @@ ENDPROC_CFI(purge_dcache_page_asm)
ENTRY_CFI(flush_icache_page_asm)
ldil L%(TMPALIAS_MAP_START), %r28
-#ifdef CONFIG_64BIT
-#if (TMPALIAS_MAP_START >= 0x80000000)
- depdi 0, 31,32, %r28 /* clear any sign extension */
-#endif
- convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */
- depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */
- depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */
-#else
- extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
- depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */
- depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
-#endif
+ dep_safe %r25, 31,TMPALIAS_SIZE_BITS, %r28 /* Form aliased virtual address 'to' */
+ depi_safe 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
/* Purge any old translation. Note that the FIC instruction
* may use either the instruction or data TLB. Given that we
@@ -1098,7 +1036,6 @@ ENTRY_CFI(flush_kernel_dcache_range_asm)
sync
89: ALTERNATIVE(88b, 89b, ALT_COND_NO_DCACHE, INSN_NOP)
- syncdma
bv %r0(%r2)
nop
ENDPROC_CFI(flush_kernel_dcache_range_asm)
@@ -1140,7 +1077,6 @@ ENTRY_CFI(purge_kernel_dcache_range_asm)
sync
89: ALTERNATIVE(88b, 89b, ALT_COND_NO_DCACHE, INSN_NOP)
- syncdma
bv %r0(%r2)
nop
ENDPROC_CFI(purge_kernel_dcache_range_asm)
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
index 28b6a2a5574c..7c37e09c92da 100644
--- a/arch/parisc/kernel/process.c
+++ b/arch/parisc/kernel/process.c
@@ -26,6 +26,7 @@
#include <linux/module.h>
#include <linux/personality.h>
#include <linux/ptrace.h>
+#include <linux/reboot.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/sched/task.h>
@@ -116,8 +117,7 @@ void machine_power_off(void)
pdc_chassis_send_status(PDC_CHASSIS_DIRECT_SHUTDOWN);
/* ipmi_poweroff may have been installed. */
- if (pm_power_off)
- pm_power_off();
+ do_kernel_power_off();
/* It seems we have no way to power the system off via
* software. The user has to press the button himself. */
@@ -206,9 +206,11 @@ arch_initcall(parisc_idle_init);
* Copy architecture-specific thread state
*/
int
-copy_thread(unsigned long clone_flags, unsigned long usp,
- unsigned long kthread_arg, struct task_struct *p, unsigned long tls)
+copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
{
+ unsigned long clone_flags = args->flags;
+ unsigned long usp = args->stack;
+ unsigned long tls = args->tls;
struct pt_regs *cregs = &(p->thread.regs);
void *stack = task_stack_page(p);
@@ -218,10 +220,10 @@ copy_thread(unsigned long clone_flags, unsigned long usp,
extern void * const ret_from_kernel_thread;
extern void * const child_return;
- if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
+ if (unlikely(args->fn)) {
/* kernel thread */
memset(cregs, 0, sizeof(struct pt_regs));
- if (!usp) /* idle thread */
+ if (args->idle) /* idle thread */
return 0;
/* Must exit via ret_from_kernel_thread in order
* to call schedule_tail()
@@ -233,12 +235,12 @@ copy_thread(unsigned long clone_flags, unsigned long usp,
* ret_from_kernel_thread.
*/
#ifdef CONFIG_64BIT
- cregs->gr[27] = ((unsigned long *)usp)[3];
- cregs->gr[26] = ((unsigned long *)usp)[2];
+ cregs->gr[27] = ((unsigned long *)args->fn)[3];
+ cregs->gr[26] = ((unsigned long *)args->fn)[2];
#else
- cregs->gr[26] = usp;
+ cregs->gr[26] = (unsigned long) args->fn;
#endif
- cregs->gr[25] = kthread_arg;
+ cregs->gr[25] = (unsigned long) args->fn_arg;
} else {
/* user thread */
/* usp must be word aligned. This also prevents users from
diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c
index 26eb568f8b96..dddaaa6e7a82 100644
--- a/arch/parisc/kernel/processor.c
+++ b/arch/parisc/kernel/processor.c
@@ -327,8 +327,6 @@ int init_per_cpu(int cpunum)
set_firmware_width();
ret = pdc_coproc_cfg(&coproc_cfg);
- store_cpu_topology(cpunum);
-
if(ret >= 0 && coproc_cfg.ccr_functional) {
mtctl(coproc_cfg.ccr_functional, 10); /* 10 == Coprocessor Control Reg */
diff --git a/arch/parisc/kernel/topology.c b/arch/parisc/kernel/topology.c
index 9696e3cb6a2a..b9d845e314f8 100644
--- a/arch/parisc/kernel/topology.c
+++ b/arch/parisc/kernel/topology.c
@@ -20,8 +20,6 @@
static DEFINE_PER_CPU(struct cpu, cpu_devices);
-static int dualcores_found;
-
/*
* store_cpu_topology is called at boot when only one cpu is running
* and with the mutex cpu_hotplug.lock locked, when several cpus have booted,
@@ -60,7 +58,6 @@ void store_cpu_topology(unsigned int cpuid)
if (p->cpu_loc) {
cpuid_topo->core_id++;
cpuid_topo->package_id = cpu_topology[cpu].package_id;
- dualcores_found = 1;
continue;
}
}
@@ -80,22 +77,11 @@ void store_cpu_topology(unsigned int cpuid)
cpu_topology[cpuid].package_id);
}
-static struct sched_domain_topology_level parisc_mc_topology[] = {
-#ifdef CONFIG_SCHED_MC
- { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
-#endif
-
- { cpu_cpu_mask, SD_INIT_NAME(DIE) },
- { NULL, },
-};
-
/*
* init_cpu_topology is called at boot when only one cpu is running
* which prevent simultaneous write access to cpu_topology array
*/
void __init init_cpu_topology(void)
{
- /* Set scheduler topology descriptor */
- if (dualcores_found)
- set_sched_topology(parisc_mc_topology);
+ reset_cpu_topology();
}
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index 1dc2e88e7b04..0a81499dd35e 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -555,6 +555,12 @@ void __init mem_init(void)
BUILD_BUG_ON(PT_INITIAL > PTRS_PER_PGD);
#endif
+#ifdef CONFIG_64BIT
+ /* avoid ldil_%L() asm statements to sign-extend into upper 32-bits */
+ BUILD_BUG_ON(__PAGE_OFFSET >= 0x80000000);
+ BUILD_BUG_ON(TMPALIAS_MAP_START >= 0x80000000);
+#endif
+
high_memory = __va((max_pfn << PAGE_SHIFT));
set_max_mapnr(max_low_pfn);
memblock_free_all();
diff --git a/arch/parisc/nm b/arch/parisc/nm
deleted file mode 100644
index c788308de33f..000000000000
--- a/arch/parisc/nm
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/bin/sh
-##
-# Hack to have an nm which removes the local symbols. We also rely
-# on this nm being hidden out of the ordinarily executable path
-##
-${CROSS_COMPILE}nm $* | grep -v '.LC*[0-9]*$'
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 22e2f1113c4c..be68c1f02b79 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -109,6 +109,7 @@ config PPC
# Please keep this list sorted alphabetically.
#
select ARCH_32BIT_OFF_T if PPC32
+ select ARCH_DISABLE_KASAN_INLINE if PPC_RADIX_MMU
select ARCH_ENABLE_MEMORY_HOTPLUG
select ARCH_ENABLE_MEMORY_HOTREMOVE
select ARCH_HAS_COPY_MC if PPC64
@@ -118,7 +119,6 @@ config PPC
select ARCH_HAS_DEBUG_WX if STRICT_KERNEL_RWX
select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_DMA_MAP_DIRECT if PPC_PSERIES
- select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_HUGEPD if HUGETLB_PAGE
@@ -155,10 +155,12 @@ config PPC
select ARCH_USE_MEMTEST
select ARCH_USE_QUEUED_RWLOCKS if PPC_QUEUED_SPINLOCKS
select ARCH_USE_QUEUED_SPINLOCKS if PPC_QUEUED_SPINLOCKS
+ select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
select ARCH_WANT_IPC_PARSE_VERSION
select ARCH_WANT_IRQS_OFF_ACTIVATE_MM
select ARCH_WANT_LD_ORPHAN_WARN
select ARCH_WANTS_MODULES_DATA_IN_VMALLOC if PPC_BOOK3S_32 || PPC_8xx
+ select ARCH_WANTS_NO_INSTR
select ARCH_WEAK_RELEASE_ACQUIRE
select BINFMT_ELF
select BUILDTIME_TABLE_SORT
@@ -190,7 +192,8 @@ config PPC
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_JUMP_LABEL_RELATIVE
select HAVE_ARCH_KASAN if PPC32 && PPC_PAGE_SHIFT <= 14
- select HAVE_ARCH_KASAN_VMALLOC if PPC32 && PPC_PAGE_SHIFT <= 14
+ select HAVE_ARCH_KASAN if PPC_RADIX_MMU
+ select HAVE_ARCH_KASAN_VMALLOC if HAVE_ARCH_KASAN
select HAVE_ARCH_KFENCE if PPC_BOOK3S_32 || PPC_8xx || 40x
select HAVE_ARCH_KGDB
select HAVE_ARCH_MMAP_RND_BITS
@@ -210,7 +213,7 @@ config PPC
select HAVE_EFFICIENT_UNALIGNED_ACCESS if !(CPU_LITTLE_ENDIAN && POWER7_CPU)
select HAVE_FAST_GUP
select HAVE_FTRACE_MCOUNT_RECORD
- select HAVE_FUNCTION_DESCRIPTORS if PPC64 && !CPU_LITTLE_ENDIAN
+ select HAVE_FUNCTION_DESCRIPTORS if PPC64_ELF_ABI_V1
select HAVE_FUNCTION_ERROR_INJECTION
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_TRACER
@@ -300,11 +303,6 @@ config COMPAT
select ARCH_WANT_OLD_COMPAT_IPC
select COMPAT_OLD_SIGACTION
-config SYSVIPC_COMPAT
- bool
- depends on COMPAT && SYSVIPC
- default y
-
config SCHED_OMIT_FRAME_POINTER
bool
default y
@@ -760,6 +758,22 @@ config PPC_256K_PAGES
endchoice
+config PAGE_SIZE_4KB
+ def_bool y
+ depends on PPC_4K_PAGES
+
+config PAGE_SIZE_16KB
+ def_bool y
+ depends on PPC_16K_PAGES
+
+config PAGE_SIZE_64KB
+ def_bool y
+ depends on PPC_64K_PAGES
+
+config PAGE_SIZE_256KB
+ def_bool y
+ depends on PPC_256K_PAGES
+
config PPC_PAGE_SHIFT
int
default 18 if PPC_256K_PAGES
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index 192f0ed0097f..9f363c143d86 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -374,4 +374,5 @@ config PPC_FAST_ENDIAN_SWITCH
config KASAN_SHADOW_OFFSET
hex
depends on KASAN
- default 0xe0000000
+ default 0xe0000000 if PPC32
+ default 0xa80e000000000000 if PPC64
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 45a9caa37b4e..a0cd70712061 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -89,10 +89,10 @@ endif
ifdef CONFIG_PPC64
ifndef CONFIG_CC_IS_CLANG
-cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mabi=elfv1)
-cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mcall-aixdesc)
-aflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mabi=elfv1)
-aflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mabi=elfv2
+cflags-$(CONFIG_PPC64_ELF_ABI_V1) += $(call cc-option,-mabi=elfv1)
+cflags-$(CONFIG_PPC64_ELF_ABI_V1) += $(call cc-option,-mcall-aixdesc)
+aflags-$(CONFIG_PPC64_ELF_ABI_V1) += $(call cc-option,-mabi=elfv1)
+aflags-$(CONFIG_PPC64_ELF_ABI_V2) += -mabi=elfv2
endif
endif
@@ -141,7 +141,7 @@ endif
CFLAGS-$(CONFIG_PPC64) := $(call cc-option,-mtraceback=no)
ifndef CONFIG_CC_IS_CLANG
-ifdef CONFIG_CPU_LITTLE_ENDIAN
+ifdef CONFIG_PPC64_ELF_ABI_V2
CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv2,$(call cc-option,-mcall-aixdesc))
AFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv2)
else
@@ -213,7 +213,7 @@ CHECKFLAGS += -m$(BITS) -D__powerpc__ -D__powerpc$(BITS)__
ifdef CONFIG_CPU_BIG_ENDIAN
CHECKFLAGS += -D__BIG_ENDIAN__
else
-CHECKFLAGS += -D__LITTLE_ENDIAN__ -D_CALL_ELF=2
+CHECKFLAGS += -D__LITTLE_ENDIAN__
endif
ifdef CONFIG_476FPE_ERR46
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index 008bf0bff186..a9cd2ea4a861 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -38,9 +38,13 @@ BOOTCFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
$(LINUXINCLUDE)
ifdef CONFIG_PPC64_BOOT_WRAPPER
-BOOTCFLAGS += -m64
+ifdef CONFIG_CPU_LITTLE_ENDIAN
+BOOTCFLAGS += -m64 -mcpu=powerpc64le
else
-BOOTCFLAGS += -m32
+BOOTCFLAGS += -m64 -mcpu=powerpc64
+endif
+else
+BOOTCFLAGS += -m32 -mcpu=powerpc
endif
BOOTCFLAGS += -isystem $(shell $(BOOTCC) -print-file-name=include)
@@ -49,6 +53,8 @@ ifdef CONFIG_CPU_BIG_ENDIAN
BOOTCFLAGS += -mbig-endian
else
BOOTCFLAGS += -mlittle-endian
+endif
+ifdef CONFIG_PPC64_ELF_ABI_V2
BOOTCFLAGS += $(call cc-option,-mabi=elfv2)
endif
diff --git a/arch/powerpc/boot/crt0.S b/arch/powerpc/boot/crt0.S
index feadee18e271..44544720daae 100644
--- a/arch/powerpc/boot/crt0.S
+++ b/arch/powerpc/boot/crt0.S
@@ -8,7 +8,8 @@
#include "ppc_asm.h"
RELA = 7
-RELACOUNT = 0x6ffffff9
+RELASZ = 8
+RELAENT = 9
.data
/* A procedure descriptor used when booting this as a COFF file.
@@ -75,34 +76,39 @@ p_base: mflr r10 /* r10 now points to runtime addr of p_base */
bne 11f
lwz r9,4(r12) /* get RELA pointer in r9 */
b 12f
-11: addis r8,r8,(-RELACOUNT)@ha
- cmpwi r8,RELACOUNT@l
+11: cmpwi r8,RELASZ
+ bne .Lcheck_for_relaent
+ lwz r0,4(r12) /* get RELASZ value in r0 */
+ b 12f
+.Lcheck_for_relaent:
+ cmpwi r8,RELAENT
bne 12f
- lwz r0,4(r12) /* get RELACOUNT value in r0 */
+ lwz r14,4(r12) /* get RELAENT value in r14 */
12: addi r12,r12,8
b 9b
/* The relocation section contains a list of relocations.
* We now do the R_PPC_RELATIVE ones, which point to words
- * which need to be initialized with addend + offset.
- * The R_PPC_RELATIVE ones come first and there are RELACOUNT
- * of them. */
+ * which need to be initialized with addend + offset */
10: /* skip relocation if we don't have both */
cmpwi r0,0
beq 3f
cmpwi r9,0
beq 3f
+ cmpwi r14,0
+ beq 3f
add r9,r9,r11 /* Relocate RELA pointer */
+ divwu r0,r0,r14 /* RELASZ / RELAENT */
mtctr r0
2: lbz r0,4+3(r9) /* ELF32_R_INFO(reloc->r_info) */
cmpwi r0,22 /* R_PPC_RELATIVE */
- bne 3f
+ bne .Lnext
lwz r12,0(r9) /* reloc->r_offset */
lwz r0,8(r9) /* reloc->r_addend */
add r0,r0,r11
stwx r0,r11,r12
- addi r9,r9,12
+.Lnext: add r9,r9,r14
bdnz 2b
/* Do a cache flush for our text, in case the loader didn't */
@@ -160,32 +166,39 @@ p_base: mflr r10 /* r10 now points to runtime addr of p_base */
bne 10f
ld r13,8(r11) /* get RELA pointer in r13 */
b 11f
-10: addis r12,r12,(-RELACOUNT)@ha
- cmpdi r12,RELACOUNT@l
- bne 11f
- ld r8,8(r11) /* get RELACOUNT value in r8 */
+10: cmpwi r12,RELASZ
+ bne .Lcheck_for_relaent
+ lwz r8,8(r11) /* get RELASZ pointer in r8 */
+ b 11f
+.Lcheck_for_relaent:
+ cmpwi r12,RELAENT
+ bne 11f
+ lwz r14,8(r11) /* get RELAENT pointer in r14 */
11: addi r11,r11,16
b 9b
12:
- cmpdi r13,0 /* check we have both RELA and RELACOUNT */
+ cmpdi r13,0 /* check we have both RELA, RELASZ, RELAENT*/
cmpdi cr1,r8,0
beq 3f
beq cr1,3f
+ cmpdi r14,0
+ beq 3f
/* Calcuate the runtime offset. */
subf r13,r13,r9
/* Run through the list of relocations and process the
* R_PPC64_RELATIVE ones. */
+ divdu r8,r8,r14 /* RELASZ / RELAENT */
mtctr r8
13: ld r0,8(r9) /* ELF64_R_TYPE(reloc->r_info) */
cmpdi r0,22 /* R_PPC64_RELATIVE */
- bne 3f
+ bne .Lnext
ld r12,0(r9) /* reloc->r_offset */
ld r0,16(r9) /* reloc->r_addend */
add r0,r0,r13
stdx r0,r13,r12
- addi r9,r9,24
+.Lnext: add r9,r9,r14
bdnz 13b
/* Do a cache flush for our text, in case the loader didn't */
diff --git a/arch/powerpc/boot/cuboot-hotfoot.c b/arch/powerpc/boot/cuboot-hotfoot.c
index 888a6b9bfead..0e5532f855d6 100644
--- a/arch/powerpc/boot/cuboot-hotfoot.c
+++ b/arch/powerpc/boot/cuboot-hotfoot.c
@@ -70,7 +70,7 @@ static void hotfoot_fixups(void)
printf("Fixing devtree for 4M Flash\n");
- /* First fix up the base addresse */
+ /* First fix up the base address */
getprop(devp, "reg", regs, sizeof(regs));
regs[0] = 0;
regs[1] = 0xffc00000;
diff --git a/arch/powerpc/boot/dts/fsl/p2020si-post.dtsi b/arch/powerpc/boot/dts/fsl/p2020si-post.dtsi
index 884e01bcb243..7a590c92fe56 100644
--- a/arch/powerpc/boot/dts/fsl/p2020si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p2020si-post.dtsi
@@ -198,4 +198,9 @@
reg = <0xe0000 0x1000>;
fsl,has-rstcr;
};
+
+ pmc: power@e0070 {
+ compatible = "fsl,mpc8548-pmc";
+ reg = <0xe0070 0x20>;
+ };
};
diff --git a/arch/powerpc/boot/dts/microwatt.dts b/arch/powerpc/boot/dts/microwatt.dts
index 65b270a90f94..b69db1d275cd 100644
--- a/arch/powerpc/boot/dts/microwatt.dts
+++ b/arch/powerpc/boot/dts/microwatt.dts
@@ -90,6 +90,8 @@
64-bit;
d-cache-size = <0x1000>;
ibm,chip-id = <0>;
+ ibm,mmu-lpid-bits = <12>;
+ ibm,mmu-pid-bits = <20>;
};
};
diff --git a/arch/powerpc/boot/ops.h b/arch/powerpc/boot/ops.h
index 6455fc9a244f..8334bc3cbe49 100644
--- a/arch/powerpc/boot/ops.h
+++ b/arch/powerpc/boot/ops.h
@@ -200,12 +200,6 @@ void __dt_fixup_mac_addresses(u32 startindex, ...);
__dt_fixup_mac_addresses(0, __VA_ARGS__, NULL)
-static inline void *find_node_by_linuxphandle(const u32 linuxphandle)
-{
- return find_node_by_prop_value(NULL, "linux,phandle",
- (char *)&linuxphandle, sizeof(u32));
-}
-
static inline char *get_path(const void *phandle, char *buf, int len)
{
if (dt_ops.get_path)
diff --git a/arch/powerpc/boot/wrapper b/arch/powerpc/boot/wrapper
index 9184eda780fd..55978f32fa77 100755
--- a/arch/powerpc/boot/wrapper
+++ b/arch/powerpc/boot/wrapper
@@ -162,7 +162,7 @@ while [ "$#" -gt 0 ]; do
fi
;;
--no-gzip)
- # a "feature" of the the wrapper script is that it can be used outside
+ # a "feature" of the wrapper script is that it can be used outside
# the kernel tree. So keeping this around for backwards compatibility.
compression=
uboot_comp=none
diff --git a/arch/powerpc/crypto/aes-spe-glue.c b/arch/powerpc/crypto/aes-spe-glue.c
index c2b23b69d7b1..e8dfe9fb0266 100644
--- a/arch/powerpc/crypto/aes-spe-glue.c
+++ b/arch/powerpc/crypto/aes-spe-glue.c
@@ -404,7 +404,7 @@ static int ppc_xts_decrypt(struct skcipher_request *req)
/*
* Algorithm definitions. Disabling alignment (cra_alignmask=0) was chosen
- * because the e500 platform can handle unaligned reads/writes very efficently.
+ * because the e500 platform can handle unaligned reads/writes very efficiently.
* This improves IPsec thoughput by another few percent. Additionally we assume
* that AES context is always aligned to at least 8 bytes because it is created
* with kmalloc() in the crypto infrastructure
diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h
index a7a0572f3846..17e7a778c856 100644
--- a/arch/powerpc/include/asm/book3s/64/hash.h
+++ b/arch/powerpc/include/asm/book3s/64/hash.h
@@ -18,6 +18,10 @@
#include <asm/book3s/64/hash-4k.h>
#endif
+#define H_PTRS_PER_PTE (1 << H_PTE_INDEX_SIZE)
+#define H_PTRS_PER_PMD (1 << H_PMD_INDEX_SIZE)
+#define H_PTRS_PER_PUD (1 << H_PUD_INDEX_SIZE)
+
/* Bits to set in a PMD/PUD/PGD entry valid bit*/
#define HASH_PMD_VAL_BITS (0x8000000000000000UL)
#define HASH_PUD_VAL_BITS (0x8000000000000000UL)
diff --git a/arch/powerpc/include/asm/book3s/64/hugetlb.h b/arch/powerpc/include/asm/book3s/64/hugetlb.h
index 12e150e615b7..b37a28f62cf6 100644
--- a/arch/powerpc/include/asm/book3s/64/hugetlb.h
+++ b/arch/powerpc/include/asm/book3s/64/hugetlb.h
@@ -8,10 +8,6 @@
*/
void radix__flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
void radix__local_flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
-extern unsigned long
-radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
- unsigned long len, unsigned long pgoff,
- unsigned long flags);
extern void radix__huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
diff --git a/arch/powerpc/include/asm/book3s/64/mmu-hash.h b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
index 21f780942911..1c4eebbc69c9 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu-hash.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
@@ -18,6 +18,7 @@
* complete pgtable.h but only a portion of it.
*/
#include <asm/book3s/64/pgtable.h>
+#include <asm/book3s/64/slice.h>
#include <asm/task_size_64.h>
#include <asm/cpu_has_feature.h>
diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h
index 006cbec70ffe..570a4960cf17 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu.h
@@ -4,12 +4,6 @@
#include <asm/page.h>
-#ifdef CONFIG_HUGETLB_PAGE
-#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
-#endif
-#define HAVE_ARCH_UNMAPPED_AREA
-#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
-
#ifndef __ASSEMBLY__
/*
* Page size definition
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index eecff2036869..cb9d5fd39d7f 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -231,6 +231,9 @@ extern unsigned long __pmd_frag_size_shift;
#define PTRS_PER_PUD (1 << PUD_INDEX_SIZE)
#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE)
+#define MAX_PTRS_PER_PTE ((H_PTRS_PER_PTE > R_PTRS_PER_PTE) ? H_PTRS_PER_PTE : R_PTRS_PER_PTE)
+#define MAX_PTRS_PER_PMD ((H_PTRS_PER_PMD > R_PTRS_PER_PMD) ? H_PTRS_PER_PMD : R_PTRS_PER_PMD)
+#define MAX_PTRS_PER_PUD ((H_PTRS_PER_PUD > R_PTRS_PER_PUD) ? H_PTRS_PER_PUD : R_PTRS_PER_PUD)
#define MAX_PTRS_PER_PGD (1 << (H_PGD_INDEX_SIZE > RADIX_PGD_INDEX_SIZE ? \
H_PGD_INDEX_SIZE : RADIX_PGD_INDEX_SIZE))
diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h
index d090d9612348..686001eda936 100644
--- a/arch/powerpc/include/asm/book3s/64/radix.h
+++ b/arch/powerpc/include/asm/book3s/64/radix.h
@@ -35,6 +35,11 @@
#define RADIX_PMD_SHIFT (PAGE_SHIFT + RADIX_PTE_INDEX_SIZE)
#define RADIX_PUD_SHIFT (RADIX_PMD_SHIFT + RADIX_PMD_INDEX_SIZE)
#define RADIX_PGD_SHIFT (RADIX_PUD_SHIFT + RADIX_PUD_INDEX_SIZE)
+
+#define R_PTRS_PER_PTE (1 << RADIX_PTE_INDEX_SIZE)
+#define R_PTRS_PER_PMD (1 << RADIX_PMD_INDEX_SIZE)
+#define R_PTRS_PER_PUD (1 << RADIX_PUD_INDEX_SIZE)
+
/*
* Size of EA range mapped by our pagetables.
*/
@@ -68,11 +73,11 @@
*
*
* 3rd quadrant expanded:
- * +------------------------------+
- * | |
+ * +------------------------------+ Highest address (0xc010000000000000)
+ * +------------------------------+ KASAN shadow end (0xc00fc00000000000)
* | |
* | |
- * +------------------------------+ Kernel vmemmap end (0xc010000000000000)
+ * +------------------------------+ Kernel vmemmap end/shadow start (0xc00e000000000000)
* | |
* | 512TB |
* | |
@@ -91,6 +96,7 @@
* +------------------------------+ Kernel linear (0xc.....)
*/
+/* For the sizes of the shadow area, see kasan.h */
/*
* If we store section details in page->flags we can't increase the MAX_PHYSMEM_BITS
diff --git a/arch/powerpc/include/asm/book3s/64/slice.h b/arch/powerpc/include/asm/book3s/64/slice.h
index f0d3194ba41b..5fbe18544cbd 100644
--- a/arch/powerpc/include/asm/book3s/64/slice.h
+++ b/arch/powerpc/include/asm/book3s/64/slice.h
@@ -2,6 +2,16 @@
#ifndef _ASM_POWERPC_BOOK3S_64_SLICE_H
#define _ASM_POWERPC_BOOK3S_64_SLICE_H
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_PPC_64S_HASH_MMU
+#ifdef CONFIG_HUGETLB_PAGE
+#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
+#endif
+#define HAVE_ARCH_UNMAPPED_AREA
+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
+#endif
+
#define SLICE_LOW_SHIFT 28
#define SLICE_LOW_TOP (0x100000000ul)
#define SLICE_NUM_LOW (SLICE_LOW_TOP >> SLICE_LOW_SHIFT)
@@ -13,4 +23,20 @@
#define SLB_ADDR_LIMIT_DEFAULT DEFAULT_MAP_WINDOW_USER64
+struct mm_struct;
+
+unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
+ unsigned long flags, unsigned int psize,
+ int topdown);
+
+unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr);
+
+void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
+ unsigned long len, unsigned int psize);
+
+void slice_init_new_context_exec(struct mm_struct *mm);
+void slice_setup_new_exec(void);
+
+#endif /* __ASSEMBLY__ */
+
#endif /* _ASM_POWERPC_BOOK3S_64_SLICE_H */
diff --git a/arch/powerpc/include/asm/checksum.h b/arch/powerpc/include/asm/checksum.h
index ab3832b93f0a..4b573a3b7e17 100644
--- a/arch/powerpc/include/asm/checksum.h
+++ b/arch/powerpc/include/asm/checksum.h
@@ -38,14 +38,15 @@ extern __wsum csum_and_copy_to_user(const void *src, void __user *dst,
*/
static inline __sum16 csum_fold(__wsum sum)
{
- unsigned int tmp;
-
- /* swap the two 16-bit halves of sum */
- __asm__("rlwinm %0,%1,16,0,31" : "=r" (tmp) : "r" (sum));
- /* if there is a carry from adding the two 16-bit halves,
- it will carry from the lower half into the upper half,
- giving us the correct sum in the upper half. */
- return (__force __sum16)(~((__force u32)sum + tmp) >> 16);
+ u32 tmp = (__force u32)sum;
+
+ /*
+ * swap the two 16-bit halves of sum
+ * if there is a carry from adding the two 16-bit halves,
+ * it will carry from the lower half into the upper half,
+ * giving us the correct sum in the upper half.
+ */
+ return (__force __sum16)(~(tmp + rol32(tmp, 16)) >> 16);
}
static inline u32 from64to32(u64 x)
@@ -95,16 +96,15 @@ static __always_inline __wsum csum_add(__wsum csum, __wsum addend)
{
#ifdef __powerpc64__
u64 res = (__force u64)csum;
-#endif
+
+ res += (__force u64)addend;
+ return (__force __wsum)((u32)res + (res >> 32));
+#else
if (__builtin_constant_p(csum) && csum == 0)
return addend;
if (__builtin_constant_p(addend) && addend == 0)
return csum;
-#ifdef __powerpc64__
- res += (__force u64)addend;
- return (__force __wsum)((u32)res + (res >> 32));
-#else
asm("addc %0,%0,%1;"
"addze %0,%0;"
: "+r" (csum) : "r" (addend) : "xer");
diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h
index 409483b2d0ce..1c6316ec4b74 100644
--- a/arch/powerpc/include/asm/code-patching.h
+++ b/arch/powerpc/include/asm/code-patching.h
@@ -22,10 +22,55 @@
#define BRANCH_SET_LINK 0x1
#define BRANCH_ABSOLUTE 0x2
-bool is_offset_in_branch_range(long offset);
-bool is_offset_in_cond_branch_range(long offset);
-int create_branch(ppc_inst_t *instr, const u32 *addr,
- unsigned long target, int flags);
+DECLARE_STATIC_KEY_FALSE(init_mem_is_free);
+
+/*
+ * Powerpc branch instruction is :
+ *
+ * 0 6 30 31
+ * +---------+----------------+---+---+
+ * | opcode | LI |AA |LK |
+ * +---------+----------------+---+---+
+ * Where AA = 0 and LK = 0
+ *
+ * LI is a signed 24 bits integer. The real branch offset is computed
+ * by: imm32 = SignExtend(LI:'0b00', 32);
+ *
+ * So the maximum forward branch should be:
+ * (0x007fffff << 2) = 0x01fffffc = 0x1fffffc
+ * The maximum backward branch should be:
+ * (0xff800000 << 2) = 0xfe000000 = -0x2000000
+ */
+static inline bool is_offset_in_branch_range(long offset)
+{
+ return (offset >= -0x2000000 && offset <= 0x1fffffc && !(offset & 0x3));
+}
+
+static inline bool is_offset_in_cond_branch_range(long offset)
+{
+ return offset >= -0x8000 && offset <= 0x7fff && !(offset & 0x3);
+}
+
+static inline int create_branch(ppc_inst_t *instr, const u32 *addr,
+ unsigned long target, int flags)
+{
+ long offset;
+
+ *instr = ppc_inst(0);
+ offset = target;
+ if (! (flags & BRANCH_ABSOLUTE))
+ offset = offset - (unsigned long)addr;
+
+ /* Check we can represent the target in the instruction format */
+ if (!is_offset_in_branch_range(offset))
+ return 1;
+
+ /* Mask out the flags and target, so they don't step on each other. */
+ *instr = ppc_inst(0x48000000 | (flags & 0x3) | (offset & 0x03FFFFFC));
+
+ return 0;
+}
+
int create_cond_branch(ppc_inst_t *instr, const u32 *addr,
unsigned long target, int flags);
int patch_branch(u32 *addr, unsigned long target, int flags);
@@ -87,7 +132,7 @@ bool is_conditional_branch(ppc_inst_t instr);
static inline unsigned long ppc_function_entry(void *func)
{
-#ifdef PPC64_ELF_ABI_v2
+#ifdef CONFIG_PPC64_ELF_ABI_V2
u32 *insn = func;
/*
@@ -112,7 +157,7 @@ static inline unsigned long ppc_function_entry(void *func)
return (unsigned long)(insn + 2);
else
return (unsigned long)func;
-#elif defined(PPC64_ELF_ABI_v1)
+#elif defined(CONFIG_PPC64_ELF_ABI_V1)
/*
* On PPC64 ABIv1 the function pointer actually points to the
* function's descriptor. The first entry in the descriptor is the
@@ -126,7 +171,7 @@ static inline unsigned long ppc_function_entry(void *func)
static inline unsigned long ppc_global_function_entry(void *func)
{
-#ifdef PPC64_ELF_ABI_v2
+#ifdef CONFIG_PPC64_ELF_ABI_V2
/* PPC64 ABIv2 the global entry point is at the address */
return (unsigned long)func;
#else
@@ -143,7 +188,7 @@ static inline unsigned long ppc_global_function_entry(void *func)
static inline unsigned long ppc_kallsyms_lookup_name(const char *name)
{
unsigned long addr;
-#ifdef PPC64_ELF_ABI_v1
+#ifdef CONFIG_PPC64_ELF_ABI_V1
/* check for dot variant */
char dot_name[1 + KSYM_NAME_LEN];
bool dot_appended = false;
@@ -164,7 +209,7 @@ static inline unsigned long ppc_kallsyms_lookup_name(const char *name)
if (!addr && dot_appended)
/* Let's try the original non-dot symbol lookup */
addr = kallsyms_lookup_name(name);
-#elif defined(PPC64_ELF_ABI_v2)
+#elif defined(CONFIG_PPC64_ELF_ABI_V2)
addr = kallsyms_lookup_name(name);
if (addr)
addr = ppc_function_entry((void *)addr);
@@ -174,14 +219,13 @@ static inline unsigned long ppc_kallsyms_lookup_name(const char *name)
return addr;
}
-#ifdef CONFIG_PPC64
/*
* Some instruction encodings commonly used in dynamic ftracing
* and function live patching.
*/
/* This must match the definition of STK_GOT in <asm/ppc_asm.h> */
-#ifdef PPC64_ELF_ABI_v2
+#ifdef CONFIG_PPC64_ELF_ABI_V2
#define R2_STACK_OFFSET 24
#else
#define R2_STACK_OFFSET 40
@@ -191,6 +235,5 @@ static inline unsigned long ppc_kallsyms_lookup_name(const char *name)
/* usually preceded by a mflr r0 */
#define PPC_INST_STD_LR PPC_RAW_STD(_R0, _R1, PPC_LR_STKOFF)
-#endif /* CONFIG_PPC64 */
#endif /* _ASM_POWERPC_CODE_PATCHING_H */
diff --git a/arch/powerpc/include/asm/compat.h b/arch/powerpc/include/asm/compat.h
index 7afc96fb6524..dda4091fd012 100644
--- a/arch/powerpc/include/asm/compat.h
+++ b/arch/powerpc/include/asm/compat.h
@@ -8,21 +8,20 @@
#include <linux/types.h>
#include <linux/sched.h>
+#define compat_ipc_pid_t compat_ipc_pid_t
+typedef u16 compat_ipc_pid_t;
+
+#define compat_ipc64_perm compat_ipc64_perm
+
#include <asm-generic/compat.h>
-#define COMPAT_USER_HZ 100
#ifdef __BIG_ENDIAN__
#define COMPAT_UTS_MACHINE "ppc\0\0"
#else
#define COMPAT_UTS_MACHINE "ppcle\0\0"
#endif
-typedef u32 __compat_uid_t;
-typedef u32 __compat_gid_t;
-typedef u32 compat_dev_t;
typedef s16 compat_nlink_t;
-typedef u16 compat_ipc_pid_t;
-typedef __kernel_fsid_t compat_fsid_t;
struct compat_stat {
compat_dev_t st_dev;
@@ -44,45 +43,6 @@ struct compat_stat {
u32 __unused4[2];
};
-struct compat_flock {
- short l_type;
- short l_whence;
- compat_off_t l_start;
- compat_off_t l_len;
- compat_pid_t l_pid;
-};
-
-#define F_GETLK64 12 /* using 'struct flock64' */
-#define F_SETLK64 13
-#define F_SETLKW64 14
-
-struct compat_flock64 {
- short l_type;
- short l_whence;
- compat_loff_t l_start;
- compat_loff_t l_len;
- compat_pid_t l_pid;
-};
-
-struct compat_statfs {
- int f_type;
- int f_bsize;
- int f_blocks;
- int f_bfree;
- int f_bavail;
- int f_files;
- int f_ffree;
- compat_fsid_t f_fsid;
- int f_namelen; /* SunOS ignores this field. */
- int f_frsize;
- int f_flags;
- int f_spare[4];
-};
-
-#define COMPAT_RLIM_INFINITY 0xffffffff
-
-#define COMPAT_OFF_T_MAX 0x7fffffff
-
/*
* ipc64_perm is actually 32/64bit clean but since the compat layer refers to
* it we may as well define it.
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index e85c849214a2..549eb6dd146f 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -440,6 +440,10 @@ static inline void cpu_feature_keys_init(void) { }
#define CPU_FTRS_POWER9_DD2_2 (CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD2_1 | \
CPU_FTR_P9_TM_HV_ASSIST | \
CPU_FTR_P9_TM_XER_SO_BUG)
+#define CPU_FTRS_POWER9_DD2_3 (CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD2_1 | \
+ CPU_FTR_P9_TM_HV_ASSIST | \
+ CPU_FTR_P9_TM_XER_SO_BUG | \
+ CPU_FTR_DAWR)
#define CPU_FTRS_POWER10 (CPU_FTR_LWSYNC | \
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\
CPU_FTR_MMCRA | CPU_FTR_SMT | \
@@ -469,14 +473,16 @@ static inline void cpu_feature_keys_init(void) { }
#define CPU_FTRS_POSSIBLE \
(CPU_FTRS_POWER7 | CPU_FTRS_POWER8E | CPU_FTRS_POWER8 | \
CPU_FTR_ALTIVEC_COMP | CPU_FTR_VSX_COMP | CPU_FTRS_POWER9 | \
- CPU_FTRS_POWER9_DD2_1 | CPU_FTRS_POWER9_DD2_2 | CPU_FTRS_POWER10)
+ CPU_FTRS_POWER9_DD2_1 | CPU_FTRS_POWER9_DD2_2 | \
+ CPU_FTRS_POWER9_DD2_3 | CPU_FTRS_POWER10)
#else
#define CPU_FTRS_POSSIBLE \
(CPU_FTRS_PPC970 | CPU_FTRS_POWER5 | \
CPU_FTRS_POWER6 | CPU_FTRS_POWER7 | CPU_FTRS_POWER8E | \
CPU_FTRS_POWER8 | CPU_FTRS_CELL | CPU_FTRS_PA6T | \
CPU_FTR_VSX_COMP | CPU_FTR_ALTIVEC_COMP | CPU_FTRS_POWER9 | \
- CPU_FTRS_POWER9_DD2_1 | CPU_FTRS_POWER9_DD2_2 | CPU_FTRS_POWER10)
+ CPU_FTRS_POWER9_DD2_1 | CPU_FTRS_POWER9_DD2_2 | \
+ CPU_FTRS_POWER9_DD2_3 | CPU_FTRS_POWER10)
#endif /* CONFIG_CPU_LITTLE_ENDIAN */
#endif
#else
@@ -541,14 +547,16 @@ enum {
#define CPU_FTRS_ALWAYS \
(CPU_FTRS_POSSIBLE & ~CPU_FTR_HVMODE & CPU_FTRS_POWER7 & \
CPU_FTRS_POWER8E & CPU_FTRS_POWER8 & CPU_FTRS_POWER9 & \
- CPU_FTRS_POWER9_DD2_1 & CPU_FTRS_DT_CPU_BASE)
+ CPU_FTRS_POWER9_DD2_1 & CPU_FTRS_POWER9_DD2_2 & \
+ CPU_FTRS_POWER10 & CPU_FTRS_DT_CPU_BASE)
#else
#define CPU_FTRS_ALWAYS \
(CPU_FTRS_PPC970 & CPU_FTRS_POWER5 & \
CPU_FTRS_POWER6 & CPU_FTRS_POWER7 & CPU_FTRS_CELL & \
CPU_FTRS_PA6T & CPU_FTRS_POWER8 & CPU_FTRS_POWER8E & \
~CPU_FTR_HVMODE & CPU_FTRS_POSSIBLE & CPU_FTRS_POWER9 & \
- CPU_FTRS_POWER9_DD2_1 & CPU_FTRS_DT_CPU_BASE)
+ CPU_FTRS_POWER9_DD2_1 & CPU_FTRS_POWER9_DD2_2 & \
+ CPU_FTRS_POWER10 & CPU_FTRS_DT_CPU_BASE)
#endif /* CONFIG_CPU_LITTLE_ENDIAN */
#endif
#else
diff --git a/arch/powerpc/include/asm/drmem.h b/arch/powerpc/include/asm/drmem.h
index 4265d5e95c2c..13bf6dee8e2d 100644
--- a/arch/powerpc/include/asm/drmem.h
+++ b/arch/powerpc/include/asm/drmem.h
@@ -23,6 +23,9 @@ struct drmem_lmb_info {
u64 lmb_size;
};
+struct device_node;
+struct property;
+
extern struct drmem_lmb_info *drmem_info;
static inline struct drmem_lmb *drmem_lmb_next(struct drmem_lmb *lmb,
diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h
index bd513fd49be9..514dd056c2c8 100644
--- a/arch/powerpc/include/asm/eeh.h
+++ b/arch/powerpc/include/asm/eeh.h
@@ -333,8 +333,6 @@ static inline bool eeh_enabled(void)
static inline void eeh_show_enabled(void) { }
-static inline void eeh_dev_phb_init_dynamic(struct pci_controller *phb) { }
-
static inline int eeh_check_failure(const volatile void __iomem *token)
{
return 0;
@@ -354,11 +352,7 @@ static inline int eeh_phb_pe_create(struct pci_controller *phb) { return 0; }
#endif /* CONFIG_EEH */
#if defined(CONFIG_PPC_PSERIES) && defined(CONFIG_EEH)
-void pseries_eeh_init_edev(struct pci_dn *pdn);
void pseries_eeh_init_edev_recursive(struct pci_dn *pdn);
-#else
-static inline void pseries_eeh_add_device_early(struct pci_dn *pdn) { }
-static inline void pseries_eeh_add_device_tree_early(struct pci_dn *pdn) { }
#endif
#ifdef CONFIG_PPC64
diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
index 971589a21bc0..79f1c480b5eb 100644
--- a/arch/powerpc/include/asm/elf.h
+++ b/arch/powerpc/include/asm/elf.h
@@ -160,7 +160,7 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
* even if DLINFO_ARCH_ITEMS goes to zero or is undefined.
* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes
*/
-#define ARCH_DLINFO \
+#define COMMON_ARCH_DLINFO \
do { \
/* Handle glibc compatibility. */ \
NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
@@ -173,6 +173,18 @@ do { \
ARCH_DLINFO_CACHE_GEOMETRY; \
} while (0)
+#define ARCH_DLINFO \
+do { \
+ COMMON_ARCH_DLINFO; \
+ NEW_AUX_ENT(AT_MINSIGSTKSZ, get_min_sigframe_size()); \
+} while (0)
+
+#define COMPAT_ARCH_DLINFO \
+do { \
+ COMMON_ARCH_DLINFO; \
+ NEW_AUX_ENT(AT_MINSIGSTKSZ, get_min_sigframe_size_compat()); \
+} while (0)
+
/* Relocate the kernel image to @final_address */
void relocate(unsigned long final_address);
diff --git a/arch/powerpc/include/asm/fadump-internal.h b/arch/powerpc/include/asm/fadump-internal.h
index 81bcb9abb371..27f9e11eda28 100644
--- a/arch/powerpc/include/asm/fadump-internal.h
+++ b/arch/powerpc/include/asm/fadump-internal.h
@@ -50,7 +50,7 @@ struct fadump_crash_info_header {
u64 elfcorehdr_addr;
u32 crashing_cpu;
struct pt_regs regs;
- struct cpumask online_mask;
+ struct cpumask cpu_mask;
};
struct fadump_memory_range {
diff --git a/arch/powerpc/include/asm/fsl_85xx_cache_sram.h b/arch/powerpc/include/asm/fsl_85xx_cache_sram.h
deleted file mode 100644
index 0235a0447baa..000000000000
--- a/arch/powerpc/include/asm/fsl_85xx_cache_sram.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright 2009 Freescale Semiconductor, Inc.
- *
- * Cache SRAM handling for QorIQ platform
- *
- * Author: Vivek Mahajan <vivek.mahajan@freescale.com>
-
- * This file is derived from the original work done
- * by Sylvain Munaut for the Bestcomm SRAM allocator.
- */
-
-#ifndef __ASM_POWERPC_FSL_85XX_CACHE_SRAM_H__
-#define __ASM_POWERPC_FSL_85XX_CACHE_SRAM_H__
-
-#include <asm/rheap.h>
-#include <linux/spinlock.h>
-
-/*
- * Cache-SRAM
- */
-
-struct mpc85xx_cache_sram {
- phys_addr_t base_phys;
- void *base_virt;
- unsigned int size;
- rh_info_t *rh;
- spinlock_t lock;
-};
-
-extern void mpc85xx_cache_sram_free(void *ptr);
-extern void *mpc85xx_cache_sram_alloc(unsigned int size,
- phys_addr_t *phys, unsigned int align);
-
-#endif /* __AMS_POWERPC_FSL_85XX_CACHE_SRAM_H__ */
diff --git a/arch/powerpc/include/asm/ftrace.h b/arch/powerpc/include/asm/ftrace.h
index d83758acd1c7..3cee7115441b 100644
--- a/arch/powerpc/include/asm/ftrace.h
+++ b/arch/powerpc/include/asm/ftrace.h
@@ -64,7 +64,7 @@ void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
* those.
*/
#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
-#ifdef PPC64_ELF_ABI_v1
+#ifdef CONFIG_PPC64_ELF_ABI_V1
static inline bool arch_syscall_match_sym_name(const char *sym, const char *name)
{
/* We need to skip past the initial dot, and the __se_sys alias */
@@ -83,10 +83,10 @@ static inline bool arch_syscall_match_sym_name(const char *sym, const char *name
(!strncmp(sym, "ppc32_", 6) && !strcmp(sym + 6, name + 4)) ||
(!strncmp(sym, "ppc64_", 6) && !strcmp(sym + 6, name + 4));
}
-#endif /* PPC64_ELF_ABI_v1 */
+#endif /* CONFIG_PPC64_ELF_ABI_V1 */
#endif /* CONFIG_FTRACE_SYSCALLS */
-#ifdef CONFIG_PPC64
+#if defined(CONFIG_PPC64) && defined(CONFIG_FUNCTION_TRACER)
#include <asm/paca.h>
static inline void this_cpu_disable_ftrace(void)
@@ -110,11 +110,13 @@ static inline u8 this_cpu_get_ftrace_enabled(void)
return get_paca()->ftrace_enabled;
}
+void ftrace_free_init_tramp(void);
#else /* CONFIG_PPC64 */
static inline void this_cpu_disable_ftrace(void) { }
static inline void this_cpu_enable_ftrace(void) { }
static inline void this_cpu_set_ftrace_enabled(u8 ftrace_enabled) { }
static inline u8 this_cpu_get_ftrace_enabled(void) { return 1; }
+static inline void ftrace_free_init_tramp(void) { }
#endif /* CONFIG_PPC64 */
#endif /* !__ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h
index 8a5674fd120d..32ce0fb7548f 100644
--- a/arch/powerpc/include/asm/hugetlb.h
+++ b/arch/powerpc/include/asm/hugetlb.h
@@ -24,7 +24,7 @@ static inline int is_hugepage_only_range(struct mm_struct *mm,
unsigned long addr,
unsigned long len)
{
- if (IS_ENABLED(CONFIG_PPC_MM_SLICES) && !radix_enabled())
+ if (IS_ENABLED(CONFIG_PPC_64S_HASH_MMU) && !radix_enabled())
return slice_is_hugepage_only_range(mm, addr, len);
return 0;
}
diff --git a/arch/powerpc/include/asm/inst.h b/arch/powerpc/include/asm/inst.h
index 80b6d74146c6..b49aae9f6f27 100644
--- a/arch/powerpc/include/asm/inst.h
+++ b/arch/powerpc/include/asm/inst.h
@@ -158,13 +158,10 @@ static inline char *__ppc_inst_as_str(char str[PPC_INST_STR_LEN], ppc_inst_t x)
__str; \
})
-static inline int copy_inst_from_kernel_nofault(ppc_inst_t *inst, u32 *src)
+static inline int __copy_inst_from_kernel_nofault(ppc_inst_t *inst, u32 *src)
{
unsigned int val, suffix;
- if (unlikely(!is_kernel_addr((unsigned long)src)))
- return -ERANGE;
-
/* See https://github.com/ClangBuiltLinux/linux/issues/1521 */
#if defined(CONFIG_CC_IS_CLANG) && CONFIG_CLANG_VERSION < 140000
val = suffix = 0;
@@ -181,4 +178,12 @@ Efault:
return -EFAULT;
}
+static inline int copy_inst_from_kernel_nofault(ppc_inst_t *inst, u32 *src)
+{
+ if (unlikely(!is_kernel_addr((unsigned long)src)))
+ return -ERANGE;
+
+ return __copy_inst_from_kernel_nofault(inst, src);
+}
+
#endif /* _ASM_POWERPC_INST_H */
diff --git a/arch/powerpc/include/asm/interrupt.h b/arch/powerpc/include/asm/interrupt.h
index f964ef5c57d8..b14f54d789d2 100644
--- a/arch/powerpc/include/asm/interrupt.h
+++ b/arch/powerpc/include/asm/interrupt.h
@@ -324,22 +324,46 @@ static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct inte
}
#endif
+ /* If data relocations are enabled, it's safe to use nmi_enter() */
+ if (mfmsr() & MSR_DR) {
+ nmi_enter();
+ return;
+ }
+
/*
- * Do not use nmi_enter() for pseries hash guest taking a real-mode
+ * But do not use nmi_enter() for pseries hash guest taking a real-mode
* NMI because not everything it touches is within the RMA limit.
*/
- if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64) ||
- !firmware_has_feature(FW_FEATURE_LPAR) ||
- radix_enabled() || (mfmsr() & MSR_DR))
- nmi_enter();
+ if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) &&
+ firmware_has_feature(FW_FEATURE_LPAR) &&
+ !radix_enabled())
+ return;
+
+ /*
+ * Likewise, don't use it if we have some form of instrumentation (like
+ * KASAN shadow) that is not safe to access in real mode (even on radix)
+ */
+ if (IS_ENABLED(CONFIG_KASAN))
+ return;
+
+ /* Otherwise, it should be safe to call it */
+ nmi_enter();
}
static inline void interrupt_nmi_exit_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state)
{
- if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64) ||
- !firmware_has_feature(FW_FEATURE_LPAR) ||
- radix_enabled() || (mfmsr() & MSR_DR))
+ if (mfmsr() & MSR_DR) {
+ // nmi_exit if relocations are on
nmi_exit();
+ } else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) &&
+ firmware_has_feature(FW_FEATURE_LPAR) &&
+ !radix_enabled()) {
+ // no nmi_exit for a pseries hash guest taking a real mode exception
+ } else if (IS_ENABLED(CONFIG_KASAN)) {
+ // no nmi_exit for KASAN in real mode
+ } else {
+ nmi_exit();
+ }
/*
* nmi does not call nap_adjust_return because nmi should not create
@@ -407,7 +431,8 @@ static inline void interrupt_nmi_exit_prepare(struct pt_regs *regs, struct inter
* Specific handlers may have additional restrictions.
*/
#define DEFINE_INTERRUPT_HANDLER_RAW(func) \
-static __always_inline long ____##func(struct pt_regs *regs); \
+static __always_inline __no_sanitize_address __no_kcsan long \
+____##func(struct pt_regs *regs); \
\
interrupt_handler long func(struct pt_regs *regs) \
{ \
@@ -421,7 +446,8 @@ interrupt_handler long func(struct pt_regs *regs) \
} \
NOKPROBE_SYMBOL(func); \
\
-static __always_inline long ____##func(struct pt_regs *regs)
+static __always_inline __no_sanitize_address __no_kcsan long \
+____##func(struct pt_regs *regs)
/**
* DECLARE_INTERRUPT_HANDLER - Declare synchronous interrupt handler function
@@ -541,7 +567,8 @@ static __always_inline void ____##func(struct pt_regs *regs)
* body with a pair of curly brackets.
*/
#define DEFINE_INTERRUPT_HANDLER_NMI(func) \
-static __always_inline long ____##func(struct pt_regs *regs); \
+static __always_inline __no_sanitize_address __no_kcsan long \
+____##func(struct pt_regs *regs); \
\
interrupt_handler long func(struct pt_regs *regs) \
{ \
@@ -558,7 +585,8 @@ interrupt_handler long func(struct pt_regs *regs) \
} \
NOKPROBE_SYMBOL(func); \
\
-static __always_inline long ____##func(struct pt_regs *regs)
+static __always_inline __no_sanitize_address __no_kcsan long \
+____##func(struct pt_regs *regs)
/* Interrupt handlers */
diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
index fee979d3a1aa..c5a5f7c9b231 100644
--- a/arch/powerpc/include/asm/io.h
+++ b/arch/powerpc/include/asm/io.h
@@ -38,8 +38,6 @@ extern struct pci_dev *isa_bridge_pcidev;
#define SIO_CONFIG_RA 0x398
#define SIO_CONFIG_RD 0x399
-#define SLOW_DOWN_IO
-
/* 32 bits uses slightly different variables for the various IO
* bases. Most of this file only uses _IO_BASE though which we
* define properly based on the platform
diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
index d7912b66c874..7e29c73e3dd4 100644
--- a/arch/powerpc/include/asm/iommu.h
+++ b/arch/powerpc/include/asm/iommu.h
@@ -51,13 +51,11 @@ struct iommu_table_ops {
int (*xchg_no_kill)(struct iommu_table *tbl,
long index,
unsigned long *hpa,
- enum dma_data_direction *direction,
- bool realmode);
+ enum dma_data_direction *direction);
void (*tce_kill)(struct iommu_table *tbl,
unsigned long index,
- unsigned long pages,
- bool realmode);
+ unsigned long pages);
__be64 *(*useraddrptr)(struct iommu_table *tbl, long index, bool alloc);
#endif
diff --git a/arch/powerpc/include/asm/kasan.h b/arch/powerpc/include/asm/kasan.h
index 3c478e5ef24c..a6be4025cba2 100644
--- a/arch/powerpc/include/asm/kasan.h
+++ b/arch/powerpc/include/asm/kasan.h
@@ -30,9 +30,31 @@
#define KASAN_SHADOW_OFFSET ASM_CONST(CONFIG_KASAN_SHADOW_OFFSET)
+#ifdef CONFIG_PPC32
#define KASAN_SHADOW_END (-(-KASAN_SHADOW_START >> KASAN_SHADOW_SCALE_SHIFT))
+#elif defined(CONFIG_PPC_BOOK3S_64)
+/*
+ * The shadow ends before the highest accessible address
+ * because we don't need a shadow for the shadow. Instead:
+ * c00e000000000000 << 3 + a80e000000000000 = c00fc00000000000
+ */
+#define KASAN_SHADOW_END 0xc00fc00000000000UL
+#endif
#ifdef CONFIG_KASAN
+#ifdef CONFIG_PPC_BOOK3S_64
+DECLARE_STATIC_KEY_FALSE(powerpc_kasan_enabled_key);
+
+static __always_inline bool kasan_arch_is_ready(void)
+{
+ if (static_branch_likely(&powerpc_kasan_enabled_key))
+ return true;
+ return false;
+}
+
+#define kasan_arch_is_ready kasan_arch_is_ready
+#endif
+
void kasan_early_init(void);
void kasan_mmu_init(void);
void kasan_init(void);
diff --git a/arch/powerpc/include/asm/kup.h b/arch/powerpc/include/asm/kup.h
index fb2237809d63..d751ddd08110 100644
--- a/arch/powerpc/include/asm/kup.h
+++ b/arch/powerpc/include/asm/kup.h
@@ -52,7 +52,6 @@ __bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
return false;
}
-static inline void __kuap_assert_locked(void) { }
static inline void __kuap_lock(void) { }
static inline void __kuap_save_and_lock(struct pt_regs *regs) { }
static inline void kuap_user_restore(struct pt_regs *regs) { }
diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h
index b6d31bff5209..c8882d9b86c2 100644
--- a/arch/powerpc/include/asm/kvm_book3s_asm.h
+++ b/arch/powerpc/include/asm/kvm_book3s_asm.h
@@ -14,9 +14,6 @@
#define XICS_MFRR 0xc
#define XICS_IPI 2 /* interrupt source # for IPIs */
-/* LPIDs we support with this build -- runtime limit may be lower */
-#define KVMPPC_NR_LPIDS (LPID_RSVD + 1)
-
/* Maximum number of threads per physical core */
#define MAX_SMT_THREADS 8
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index faf301d0dec0..2909a88acd16 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -36,7 +36,12 @@
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
#include <asm/kvm_book3s_asm.h> /* for MAX_SMT_THREADS */
#define KVM_MAX_VCPU_IDS (MAX_SMT_THREADS * KVM_MAX_VCORES)
-#define KVM_MAX_NESTED_GUESTS KVMPPC_NR_LPIDS
+
+/*
+ * Limit the nested partition table to 4096 entries (because that's what
+ * hardware supports). Both guest and host use this value.
+ */
+#define KVM_MAX_NESTED_GUESTS_SHIFT 12
#else
#define KVM_MAX_VCPU_IDS KVM_MAX_VCPUS
@@ -327,8 +332,7 @@ struct kvm_arch {
struct list_head uvmem_pfns;
struct mutex mmu_setup_lock; /* nests inside vcpu mutexes */
u64 l1_ptcr;
- int max_nested_lpid;
- struct kvm_nested_guest *nested_guests[KVM_MAX_NESTED_GUESTS];
+ struct idr kvm_nested_guest_idr;
/* This array can grow quite large, keep it at the end */
struct kvmppc_vcore *vcores[KVM_MAX_VCORES];
#endif
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 838d4cb460b7..9f625af3b65b 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -177,8 +177,6 @@ extern void kvmppc_setup_partition_table(struct kvm *kvm);
extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
struct kvm_create_spapr_tce_64 *args);
-extern struct kvmppc_spapr_tce_table *kvmppc_find_table(
- struct kvm *kvm, unsigned long liobn);
#define kvmppc_ioba_validate(stt, ioba, npages) \
(iommu_tce_check_ioba((stt)->page_shift, (stt)->offset, \
(stt)->size, (ioba), (npages)) ? \
@@ -685,7 +683,7 @@ extern int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
int level, bool line_status);
extern void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu);
extern void kvmppc_xive_pull_vcpu(struct kvm_vcpu *vcpu);
-extern void kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu);
+extern bool kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu);
static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu)
{
@@ -723,7 +721,7 @@ static inline int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 ir
int level, bool line_status) { return -ENODEV; }
static inline void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu) { }
static inline void kvmppc_xive_pull_vcpu(struct kvm_vcpu *vcpu) { }
-static inline void kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu) { }
+static inline bool kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu) { return true; }
static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu)
{ return 0; }
@@ -789,13 +787,6 @@ long kvmppc_rm_h_page_init(struct kvm_vcpu *vcpu, unsigned long flags,
unsigned long dest, unsigned long src);
long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
unsigned long slb_v, unsigned int status, bool data);
-unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu);
-unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu);
-unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server);
-int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
- unsigned long mfrr);
-int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr);
-int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr);
void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu);
/*
@@ -877,7 +868,6 @@ int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
struct kvm_dirty_tlb *cfg);
long kvmppc_alloc_lpid(void);
-void kvmppc_claim_lpid(long lpid);
void kvmppc_free_lpid(long lpid);
void kvmppc_init_lpid(unsigned long nr_lpids);
diff --git a/arch/powerpc/include/asm/linkage.h b/arch/powerpc/include/asm/linkage.h
index 1f00d2891d69..b71b9582e754 100644
--- a/arch/powerpc/include/asm/linkage.h
+++ b/arch/powerpc/include/asm/linkage.h
@@ -4,7 +4,7 @@
#include <asm/types.h>
-#ifdef PPC64_ELF_ABI_v1
+#ifdef CONFIG_PPC64_ELF_ABI_V1
#define cond_syscall(x) \
asm ("\t.weak " #x "\n\t.set " #x ", sys_ni_syscall\n" \
"\t.weak ." #x "\n\t.set ." #x ", .sys_ni_syscall\n")
diff --git a/arch/powerpc/include/asm/livepatch.h b/arch/powerpc/include/asm/livepatch.h
index 1c60094ea0cd..d044a1fd4f44 100644
--- a/arch/powerpc/include/asm/livepatch.h
+++ b/arch/powerpc/include/asm/livepatch.h
@@ -7,17 +7,9 @@
#ifndef _ASM_POWERPC_LIVEPATCH_H
#define _ASM_POWERPC_LIVEPATCH_H
-#include <linux/module.h>
-#include <linux/ftrace.h>
+#include <linux/sched.h>
#include <linux/sched/task_stack.h>
-#ifdef CONFIG_LIVEPATCH
-static inline void klp_arch_set_pc(struct ftrace_regs *fregs, unsigned long ip)
-{
- ftrace_instruction_pointer_set(fregs, ip);
-}
-#endif /* CONFIG_LIVEPATCH */
-
#ifdef CONFIG_LIVEPATCH_64
static inline void klp_init_thread_info(struct task_struct *p)
{
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index b8527a74bd4d..3f25bd3e14eb 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -34,15 +34,10 @@ extern void mm_iommu_init(struct mm_struct *mm);
extern void mm_iommu_cleanup(struct mm_struct *mm);
extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
unsigned long ua, unsigned long size);
-extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm(
- struct mm_struct *mm, unsigned long ua, unsigned long size);
extern struct mm_iommu_table_group_mem_t *mm_iommu_get(struct mm_struct *mm,
unsigned long ua, unsigned long entries);
extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
unsigned long ua, unsigned int pageshift, unsigned long *hpa);
-extern long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
- unsigned long ua, unsigned int pageshift, unsigned long *hpa);
-extern void mm_iommu_ua_mark_dirty_rm(struct mm_struct *mm, unsigned long ua);
extern bool mm_iommu_is_devmem(struct mm_struct *mm, unsigned long hpa,
unsigned int pageshift, unsigned long *size);
extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem);
diff --git a/arch/powerpc/include/asm/module.h b/arch/powerpc/include/asm/module.h
index 857d9ff24295..09e2ffd360bb 100644
--- a/arch/powerpc/include/asm/module.h
+++ b/arch/powerpc/include/asm/module.h
@@ -41,10 +41,8 @@ struct mod_arch_specific {
#ifdef CONFIG_DYNAMIC_FTRACE
unsigned long tramp;
-#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
unsigned long tramp_regs;
#endif
-#endif
/* List of BUG addresses, source line numbers and filenames */
struct list_head bug_list;
diff --git a/arch/powerpc/include/asm/nohash/tlbflush.h b/arch/powerpc/include/asm/nohash/tlbflush.h
index c08d25e3e626..698935d4f72d 100644
--- a/arch/powerpc/include/asm/nohash/tlbflush.h
+++ b/arch/powerpc/include/asm/nohash/tlbflush.h
@@ -30,7 +30,6 @@ struct mm_struct;
extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end);
-extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
#ifdef CONFIG_PPC_8xx
static inline void local_flush_tlb_mm(struct mm_struct *mm)
@@ -45,7 +44,18 @@ static inline void local_flush_tlb_page(struct vm_area_struct *vma, unsigned lon
{
asm volatile ("tlbie %0; sync" : : "r" (vmaddr) : "memory");
}
+
+static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+ start &= PAGE_MASK;
+
+ if (end - start <= PAGE_SIZE)
+ asm volatile ("tlbie %0; sync" : : "r" (start) : "memory");
+ else
+ asm volatile ("sync; tlbia; isync" : : : "memory");
+}
#else
+extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
extern void local_flush_tlb_mm(struct mm_struct *mm);
extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index 8330968ca346..4d7aaab82702 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -12,6 +12,7 @@
#ifdef CONFIG_PPC64
+#include <linux/cache.h>
#include <linux/string.h>
#include <asm/types.h>
#include <asm/lppaca.h>
@@ -152,16 +153,9 @@ struct paca_struct {
struct tlb_core_data tcd;
#endif /* CONFIG_PPC_BOOK3E */
-#ifdef CONFIG_PPC_BOOK3S
#ifdef CONFIG_PPC_64S_HASH_MMU
-#ifdef CONFIG_PPC_MM_SLICES
unsigned char mm_ctx_low_slices_psize[BITS_PER_LONG / BITS_PER_BYTE];
unsigned char mm_ctx_high_slices_psize[SLICE_ARRAY_SIZE];
-#else
- u16 mm_ctx_user_psize;
- u16 mm_ctx_sllp;
-#endif
-#endif
#endif
/*
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index f2c5c26869f1..e5f75c70eda8 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -216,6 +216,9 @@ static inline bool pfn_valid(unsigned long pfn)
#define __pa(x) ((phys_addr_t)(unsigned long)(x) - VIRT_PHYS_OFFSET)
#else
#ifdef CONFIG_PPC64
+
+#define VIRTUAL_WARN_ON(x) WARN_ON(IS_ENABLED(CONFIG_DEBUG_VIRTUAL) && (x))
+
/*
* gcc miscompiles (unsigned long)(&static_var) - PAGE_OFFSET
* with -mcmodel=medium, so we use & and | instead of - and + on 64-bit.
@@ -223,13 +226,13 @@ static inline bool pfn_valid(unsigned long pfn)
*/
#define __va(x) \
({ \
- VIRTUAL_BUG_ON((unsigned long)(x) >= PAGE_OFFSET); \
+ VIRTUAL_WARN_ON((unsigned long)(x) >= PAGE_OFFSET); \
(void *)(unsigned long)((phys_addr_t)(x) | PAGE_OFFSET); \
})
#define __pa(x) \
({ \
- VIRTUAL_BUG_ON((unsigned long)(x) < PAGE_OFFSET); \
+ VIRTUAL_WARN_ON((unsigned long)(x) < PAGE_OFFSET); \
(unsigned long)(x) & 0x0fffffffffffffffUL; \
})
@@ -333,6 +336,5 @@ static inline unsigned long kaslr_offset(void)
#include <asm-generic/memory_model.h>
#endif /* __ASSEMBLY__ */
-#include <asm/slice.h>
#endif /* _ASM_POWERPC_PAGE_H */
diff --git a/arch/powerpc/include/asm/parport.h b/arch/powerpc/include/asm/parport.h
index 8abfb8f7c33d..42cc321ed754 100644
--- a/arch/powerpc/include/asm/parport.h
+++ b/arch/powerpc/include/asm/parport.h
@@ -11,7 +11,7 @@
#define _ASM_POWERPC_PARPORT_H
#ifdef __KERNEL__
-#include <asm/prom.h>
+#include <linux/of_irq.h>
static int parport_pc_find_nonpci_ports (int autoirq, int autodma)
{
diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h
index 90f488fa4c17..c85f901227c9 100644
--- a/arch/powerpc/include/asm/pci-bridge.h
+++ b/arch/powerpc/include/asm/pci-bridge.h
@@ -170,10 +170,10 @@ static inline struct pci_controller *pci_bus_to_host(const struct pci_bus *bus)
return bus->sysdata;
}
-#ifndef CONFIG_PPC64
-
extern int pci_device_from_OF_node(struct device_node *node,
u8 *bus, u8 *devfn);
+#ifndef CONFIG_PPC64
+
extern void pci_create_OF_bus_map(void);
#else /* CONFIG_PPC64 */
@@ -235,16 +235,6 @@ struct pci_dn *add_sriov_vf_pdns(struct pci_dev *pdev);
void remove_sriov_vf_pdns(struct pci_dev *pdev);
#endif
-static inline int pci_device_from_OF_node(struct device_node *np,
- u8 *bus, u8 *devfn)
-{
- if (!PCI_DN(np))
- return -ENODEV;
- *bus = PCI_DN(np)->busno;
- *devfn = PCI_DN(np)->devfn;
- return 0;
-}
-
#if defined(CONFIG_EEH)
static inline struct eeh_dev *pdn_to_eeh_dev(struct pci_dn *pdn)
{
diff --git a/arch/powerpc/include/asm/pnv-pci.h b/arch/powerpc/include/asm/pnv-pci.h
index b3f480799352..8afc92860dbb 100644
--- a/arch/powerpc/include/asm/pnv-pci.h
+++ b/arch/powerpc/include/asm/pnv-pci.h
@@ -9,6 +9,7 @@
#include <linux/pci.h>
#include <linux/pci_hotplug.h>
#include <linux/irq.h>
+#include <linux/of.h>
#include <misc/cxl-base.h>
#include <asm/opal-api.h>
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 82f1f0041c6f..89beabf5325c 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -127,8 +127,53 @@
/* opcode and xopcode for instructions */
-#define OP_TRAP 3
-#define OP_TRAP_64 2
+#define OP_PREFIX 1
+#define OP_TRAP_64 2
+#define OP_TRAP 3
+#define OP_SC 17
+#define OP_19 19
+#define OP_31 31
+#define OP_LWZ 32
+#define OP_LWZU 33
+#define OP_LBZ 34
+#define OP_LBZU 35
+#define OP_STW 36
+#define OP_STWU 37
+#define OP_STB 38
+#define OP_STBU 39
+#define OP_LHZ 40
+#define OP_LHZU 41
+#define OP_LHA 42
+#define OP_LHAU 43
+#define OP_STH 44
+#define OP_STHU 45
+#define OP_LMW 46
+#define OP_STMW 47
+#define OP_LFS 48
+#define OP_LFSU 49
+#define OP_LFD 50
+#define OP_LFDU 51
+#define OP_STFS 52
+#define OP_STFSU 53
+#define OP_STFD 54
+#define OP_STFDU 55
+#define OP_LQ 56
+#define OP_LD 58
+#define OP_STD 62
+
+#define OP_19_XOP_RFID 18
+#define OP_19_XOP_RFMCI 38
+#define OP_19_XOP_RFDI 39
+#define OP_19_XOP_RFI 50
+#define OP_19_XOP_RFCI 51
+#define OP_19_XOP_RFSCV 82
+#define OP_19_XOP_HRFID 274
+#define OP_19_XOP_URFID 306
+#define OP_19_XOP_STOP 370
+#define OP_19_XOP_DOZE 402
+#define OP_19_XOP_NAP 434
+#define OP_19_XOP_SLEEP 466
+#define OP_19_XOP_RVWINKLE 498
#define OP_31_XOP_TRAP 4
#define OP_31_XOP_LDX 21
@@ -150,6 +195,8 @@
#define OP_31_XOP_LHZUX 311
#define OP_31_XOP_MSGSNDP 142
#define OP_31_XOP_MSGCLRP 174
+#define OP_31_XOP_MTMSR 146
+#define OP_31_XOP_MTMSRD 178
#define OP_31_XOP_TLBIE 306
#define OP_31_XOP_MFSPR 339
#define OP_31_XOP_LWAX 341
@@ -208,42 +255,6 @@
/* VMX Vector Store Instructions */
#define OP_31_XOP_STVX 231
-/* Prefixed Instructions */
-#define OP_PREFIX 1
-
-#define OP_31 31
-#define OP_LWZ 32
-#define OP_STFS 52
-#define OP_STFSU 53
-#define OP_STFD 54
-#define OP_STFDU 55
-#define OP_LD 58
-#define OP_LWZU 33
-#define OP_LBZ 34
-#define OP_LBZU 35
-#define OP_STW 36
-#define OP_STWU 37
-#define OP_STD 62
-#define OP_STB 38
-#define OP_STBU 39
-#define OP_LHZ 40
-#define OP_LHZU 41
-#define OP_LHA 42
-#define OP_LHAU 43
-#define OP_STH 44
-#define OP_STHU 45
-#define OP_LMW 46
-#define OP_STMW 47
-#define OP_LFS 48
-#define OP_LFSU 49
-#define OP_LFD 50
-#define OP_LFDU 51
-#define OP_STFS 52
-#define OP_STFSU 53
-#define OP_STFD 54
-#define OP_STFDU 55
-#define OP_LQ 56
-
/* sorted alphabetically */
#define PPC_INST_BCCTR_FLUSH 0x4c400420
#define PPC_INST_COPY 0x7c20060c
@@ -285,13 +296,6 @@
#define PPC_INST_TRECHKPT 0x7c0007dd
#define PPC_INST_TRECLAIM 0x7c00075d
#define PPC_INST_TSR 0x7c0005dd
-#define PPC_INST_LD 0xe8000000
-#define PPC_INST_STD 0xf8000000
-#define PPC_INST_ADDIS 0x3c000000
-#define PPC_INST_ADD 0x7c000214
-#define PPC_INST_DIVD 0x7c0003d2
-#define PPC_INST_BRANCH 0x48000000
-#define PPC_INST_BL 0x48000001
#define PPC_INST_BRANCH_COND 0x40800000
/* Prefixes */
@@ -352,6 +356,10 @@
#define PPC_HIGHER(v) (((v) >> 32) & 0xffff)
#define PPC_HIGHEST(v) (((v) >> 48) & 0xffff)
+/* LI Field */
+#define PPC_LI_MASK 0x03fffffc
+#define PPC_LI(v) ((v) & PPC_LI_MASK)
+
/*
* Only use the larx hint bit on 64bit CPUs. e500v1/v2 based CPUs will treat a
* larx with EH set as an illegal instruction.
@@ -460,10 +468,10 @@
(0x100000c7 | ___PPC_RT(vrt) | ___PPC_RA(vra) | ___PPC_RB(vrb) | __PPC_RC21)
#define PPC_RAW_VCMPEQUB_RC(vrt, vra, vrb) \
(0x10000006 | ___PPC_RT(vrt) | ___PPC_RA(vra) | ___PPC_RB(vrb) | __PPC_RC21)
-#define PPC_RAW_LD(r, base, i) (PPC_INST_LD | ___PPC_RT(r) | ___PPC_RA(base) | IMM_DS(i))
+#define PPC_RAW_LD(r, base, i) (0xe8000000 | ___PPC_RT(r) | ___PPC_RA(base) | IMM_DS(i))
#define PPC_RAW_LWZ(r, base, i) (0x80000000 | ___PPC_RT(r) | ___PPC_RA(base) | IMM_L(i))
#define PPC_RAW_LWZX(t, a, b) (0x7c00002e | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b))
-#define PPC_RAW_STD(r, base, i) (PPC_INST_STD | ___PPC_RS(r) | ___PPC_RA(base) | IMM_DS(i))
+#define PPC_RAW_STD(r, base, i) (0xf8000000 | ___PPC_RS(r) | ___PPC_RA(base) | IMM_DS(i))
#define PPC_RAW_STDCX(s, a, b) (0x7c0001ad | ___PPC_RS(s) | ___PPC_RA(a) | ___PPC_RB(b))
#define PPC_RAW_LFSX(t, a, b) (0x7c00042e | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b))
#define PPC_RAW_STFSX(s, a, b) (0x7c00052e | ___PPC_RS(s) | ___PPC_RA(a) | ___PPC_RB(b))
@@ -474,8 +482,8 @@
#define PPC_RAW_ADDE(t, a, b) (0x7c000114 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b))
#define PPC_RAW_ADDZE(t, a) (0x7c000194 | ___PPC_RT(t) | ___PPC_RA(a))
#define PPC_RAW_ADDME(t, a) (0x7c0001d4 | ___PPC_RT(t) | ___PPC_RA(a))
-#define PPC_RAW_ADD(t, a, b) (PPC_INST_ADD | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b))
-#define PPC_RAW_ADD_DOT(t, a, b) (PPC_INST_ADD | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | 0x1)
+#define PPC_RAW_ADD(t, a, b) (0x7c000214 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_ADD_DOT(t, a, b) (0x7c000214 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | 0x1)
#define PPC_RAW_ADDC(t, a, b) (0x7c000014 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b))
#define PPC_RAW_ADDC_DOT(t, a, b) (0x7c000014 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | 0x1)
#define PPC_RAW_NOP() PPC_RAW_ORI(0, 0, 0)
@@ -571,7 +579,8 @@
#define PPC_RAW_MTSPR(spr, d) (0x7c0003a6 | ___PPC_RS(d) | __PPC_SPR(spr))
#define PPC_RAW_EIEIO() (0x7c0006ac)
-#define PPC_RAW_BRANCH(addr) (PPC_INST_BRANCH | ((addr) & 0x03fffffc))
+#define PPC_RAW_BRANCH(offset) (0x48000000 | PPC_LI(offset))
+#define PPC_RAW_BL(offset) (0x48000001 | PPC_LI(offset))
/* Deal with instructions that older assemblers aren't aware of */
#define PPC_BCCTR_FLUSH stringify_in_c(.long PPC_INST_BCCTR_FLUSH)
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index 4dea2d963738..83c02f5a7f2a 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -149,7 +149,7 @@
#define __STK_REG(i) (112 + ((i)-14)*8)
#define STK_REG(i) __STK_REG(__REG_##i)
-#ifdef PPC64_ELF_ABI_v2
+#ifdef CONFIG_PPC64_ELF_ABI_V2
#define STK_GOT 24
#define __STK_PARAM(i) (32 + ((i)-3)*8)
#else
@@ -158,7 +158,7 @@
#endif
#define STK_PARAM(i) __STK_PARAM(__REG_##i)
-#ifdef PPC64_ELF_ABI_v2
+#ifdef CONFIG_PPC64_ELF_ABI_V2
#define _GLOBAL(name) \
.align 2 ; \
diff --git a/arch/powerpc/include/asm/probes.h b/arch/powerpc/include/asm/probes.h
index c5d984700d24..6f66e358aa37 100644
--- a/arch/powerpc/include/asm/probes.h
+++ b/arch/powerpc/include/asm/probes.h
@@ -8,6 +8,7 @@
* Copyright IBM Corporation, 2012
*/
#include <linux/types.h>
+#include <asm/disassemble.h>
typedef u32 ppc_opcode_t;
#define BREAKPOINT_INSTRUCTION 0x7fe00008 /* trap */
@@ -31,6 +32,41 @@ typedef u32 ppc_opcode_t;
#define MSR_SINGLESTEP (MSR_SE)
#endif
+static inline bool can_single_step(u32 inst)
+{
+ switch (get_op(inst)) {
+ case OP_TRAP_64: return false;
+ case OP_TRAP: return false;
+ case OP_SC: return false;
+ case OP_19:
+ switch (get_xop(inst)) {
+ case OP_19_XOP_RFID: return false;
+ case OP_19_XOP_RFMCI: return false;
+ case OP_19_XOP_RFDI: return false;
+ case OP_19_XOP_RFI: return false;
+ case OP_19_XOP_RFCI: return false;
+ case OP_19_XOP_RFSCV: return false;
+ case OP_19_XOP_HRFID: return false;
+ case OP_19_XOP_URFID: return false;
+ case OP_19_XOP_STOP: return false;
+ case OP_19_XOP_DOZE: return false;
+ case OP_19_XOP_NAP: return false;
+ case OP_19_XOP_SLEEP: return false;
+ case OP_19_XOP_RVWINKLE: return false;
+ }
+ break;
+ case OP_31:
+ switch (get_xop(inst)) {
+ case OP_31_XOP_TRAP: return false;
+ case OP_31_XOP_TRAP_64: return false;
+ case OP_31_XOP_MTMSR: return false;
+ case OP_31_XOP_MTMSRD: return false;
+ }
+ break;
+ }
+ return true;
+}
+
/* Enable single stepping for the current task */
static inline void enable_single_step(struct pt_regs *regs)
{
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 39c25021030f..fdfaae194ddd 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -392,8 +392,6 @@ static inline void prefetchw(const void *x)
#define spin_lock_prefetch(x) prefetchw(x)
-#define HAVE_ARCH_PICK_MMAP_LAYOUT
-
/* asm stubs */
extern unsigned long isa300_idle_stop_noloss(unsigned long psscr_val);
extern unsigned long isa300_idle_stop_mayloss(unsigned long psscr_val);
diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
index 42f89e2d8f04..a03403695cd4 100644
--- a/arch/powerpc/include/asm/ptrace.h
+++ b/arch/powerpc/include/asm/ptrace.h
@@ -120,7 +120,7 @@ struct pt_regs
STACK_FRAME_OVERHEAD + KERNEL_REDZONE_SIZE)
#define STACK_FRAME_MARKER 12
-#ifdef PPC64_ELF_ABI_v2
+#ifdef CONFIG_PPC64_ELF_ABI_V2
#define STACK_FRAME_MIN_SIZE 32
#else
#define STACK_FRAME_MIN_SIZE STACK_FRAME_OVERHEAD
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 2835f6363228..1e8b2e04e626 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -417,7 +417,6 @@
#define FSCR_DSCR __MASK(FSCR_DSCR_LG)
#define FSCR_INTR_CAUSE (ASM_CONST(0xFF) << 56) /* interrupt cause */
#define SPRN_HFSCR 0xbe /* HV=1 Facility Status & Control Register */
-#define HFSCR_PREFIX __MASK(FSCR_PREFIX_LG)
#define HFSCR_MSGP __MASK(FSCR_MSGP_LG)
#define HFSCR_TAR __MASK(FSCR_TAR_LG)
#define HFSCR_EBB __MASK(FSCR_EBB_LG)
@@ -474,8 +473,6 @@
#ifndef SPRN_LPID
#define SPRN_LPID 0x13F /* Logical Partition Identifier */
#endif
-#define LPID_RSVD_POWER7 0x3ff /* Reserved LPID for partn switching */
-#define LPID_RSVD 0xfff /* Reserved LPID for partn switching */
#define SPRN_HMER 0x150 /* Hypervisor maintenance exception reg */
#define HMER_DEBUG_TRIG (1ul << (63 - 17)) /* Debug trigger */
#define SPRN_HMEER 0x151 /* Hyp maintenance exception enable reg */
diff --git a/arch/powerpc/include/asm/signal.h b/arch/powerpc/include/asm/signal.h
index 99e1c6de27bc..922d43700fb4 100644
--- a/arch/powerpc/include/asm/signal.h
+++ b/arch/powerpc/include/asm/signal.h
@@ -9,4 +9,9 @@
struct pt_regs;
void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags);
+unsigned long get_min_sigframe_size_32(void);
+unsigned long get_min_sigframe_size_64(void);
+unsigned long get_min_sigframe_size(void);
+unsigned long get_min_sigframe_size_compat(void);
+
#endif /* _ASM_POWERPC_SIGNAL_H */
diff --git a/arch/powerpc/include/asm/slice.h b/arch/powerpc/include/asm/slice.h
deleted file mode 100644
index 0bdd9c62eca0..000000000000
--- a/arch/powerpc/include/asm/slice.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_POWERPC_SLICE_H
-#define _ASM_POWERPC_SLICE_H
-
-#ifdef CONFIG_PPC_BOOK3S_64
-#include <asm/book3s/64/slice.h>
-#endif
-
-#ifndef __ASSEMBLY__
-
-struct mm_struct;
-
-#ifdef CONFIG_PPC_MM_SLICES
-
-#ifdef CONFIG_HUGETLB_PAGE
-#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
-#endif
-#define HAVE_ARCH_UNMAPPED_AREA
-#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
-
-unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
- unsigned long flags, unsigned int psize,
- int topdown);
-
-unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr);
-
-void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
- unsigned long len, unsigned int psize);
-
-void slice_init_new_context_exec(struct mm_struct *mm);
-void slice_setup_new_exec(void);
-
-#else /* CONFIG_PPC_MM_SLICES */
-
-static inline void slice_init_new_context_exec(struct mm_struct *mm) {}
-
-static inline unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr)
-{
- return 0;
-}
-
-#endif /* CONFIG_PPC_MM_SLICES */
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* _ASM_POWERPC_SLICE_H */
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index 60ab739a5e3b..f63505d74932 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -189,8 +189,6 @@ extern void __cpu_die(unsigned int cpu);
#define smp_setup_cpu_maps()
#define thread_group_shares_l2 0
#define thread_group_shares_l3 0
-static inline void inhibit_secondary_onlining(void) {}
-static inline void uninhibit_secondary_onlining(void) {}
static inline const struct cpumask *cpu_sibling_mask(int cpu)
{
return cpumask_of(cpu);
diff --git a/arch/powerpc/include/asm/svm.h b/arch/powerpc/include/asm/svm.h
index 85580b30aba4..a02bd54b8948 100644
--- a/arch/powerpc/include/asm/svm.h
+++ b/arch/powerpc/include/asm/svm.h
@@ -10,6 +10,8 @@
#ifdef CONFIG_PPC_SVM
+#include <asm/reg.h>
+
static inline bool is_secure_guest(void)
{
return mfmsr() & MSR_S;
diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h
index 1f43ef696033..aee25e3ebf96 100644
--- a/arch/powerpc/include/asm/switch_to.h
+++ b/arch/powerpc/include/asm/switch_to.h
@@ -62,6 +62,15 @@ static inline void disable_kernel_altivec(void)
#else
static inline void save_altivec(struct task_struct *t) { }
static inline void __giveup_altivec(struct task_struct *t) { }
+static inline void enable_kernel_altivec(void)
+{
+ BUILD_BUG();
+}
+
+static inline void disable_kernel_altivec(void)
+{
+ BUILD_BUG();
+}
#endif
#ifdef CONFIG_VSX
diff --git a/arch/powerpc/include/asm/task_size_64.h b/arch/powerpc/include/asm/task_size_64.h
index 38fdf8041d12..5a709951c901 100644
--- a/arch/powerpc/include/asm/task_size_64.h
+++ b/arch/powerpc/include/asm/task_size_64.h
@@ -72,4 +72,12 @@
#define STACK_TOP_MAX TASK_SIZE_USER64
#define STACK_TOP (is_32bit_task() ? STACK_TOP_USER32 : STACK_TOP_USER64)
+#define arch_get_mmap_base(addr, base) \
+ (((addr) > DEFAULT_MAP_WINDOW) ? (base) + TASK_SIZE - DEFAULT_MAP_WINDOW : (base))
+
+#define arch_get_mmap_end(addr, len, flags) \
+ (((addr) > DEFAULT_MAP_WINDOW) || \
+ (((flags) & MAP_FIXED) && ((addr) + (len) > DEFAULT_MAP_WINDOW)) ? TASK_SIZE : \
+ DEFAULT_MAP_WINDOW)
+
#endif /* _ASM_POWERPC_TASK_SIZE_64_H */
diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h
index 924b2157882f..1e5643a9b1f2 100644
--- a/arch/powerpc/include/asm/time.h
+++ b/arch/powerpc/include/asm/time.h
@@ -24,6 +24,7 @@ extern unsigned long tb_ticks_per_jiffy;
extern unsigned long tb_ticks_per_usec;
extern unsigned long tb_ticks_per_sec;
extern struct clock_event_device decrementer_clockevent;
+extern u64 decrementer_max;
extern void generic_calibrate_decr(void);
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
index 36fcafb1fd6d..8a4d4f4d9749 100644
--- a/arch/powerpc/include/asm/topology.h
+++ b/arch/powerpc/include/asm/topology.h
@@ -111,14 +111,10 @@ static inline void unmap_cpu_from_node(unsigned long cpu) {}
#endif /* CONFIG_NUMA */
#if defined(CONFIG_NUMA) && defined(CONFIG_PPC_SPLPAR)
-extern int find_and_online_cpu_nid(int cpu);
+void find_and_update_cpu_nid(int cpu);
extern int cpu_to_coregroup_id(int cpu);
#else
-static inline int find_and_online_cpu_nid(int cpu)
-{
- return 0;
-}
-
+static inline void find_and_update_cpu_nid(int cpu) {}
static inline int cpu_to_coregroup_id(int cpu)
{
#ifdef CONFIG_SMP
diff --git a/arch/powerpc/include/asm/types.h b/arch/powerpc/include/asm/types.h
index 84078c28c1a2..93157a661dcc 100644
--- a/arch/powerpc/include/asm/types.h
+++ b/arch/powerpc/include/asm/types.h
@@ -11,14 +11,6 @@
#include <uapi/asm/types.h>
-#ifdef __powerpc64__
-#if defined(_CALL_ELF) && _CALL_ELF == 2
-#define PPC64_ELF_ABI_v2 1
-#else
-#define PPC64_ELF_ABI_v1 1
-#endif
-#endif /* __powerpc64__ */
-
#ifndef __ASSEMBLY__
typedef __vector128 vector128;
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index 5eb462af6766..b1129b4ef57d 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -44,6 +44,7 @@
#define __ARCH_WANT_SYS_TIME
#define __ARCH_WANT_SYS_UTIME
#define __ARCH_WANT_SYS_NEWFSTATAT
+#define __ARCH_WANT_COMPAT_STAT
#define __ARCH_WANT_COMPAT_SYS_SENDFILE
#endif
#define __ARCH_WANT_SYS_FORK
diff --git a/arch/powerpc/include/asm/vas.h b/arch/powerpc/include/asm/vas.h
index 83afcb6c194b..c36f71e01c0f 100644
--- a/arch/powerpc/include/asm/vas.h
+++ b/arch/powerpc/include/asm/vas.h
@@ -126,7 +126,7 @@ static inline void vas_user_win_add_mm_context(struct vas_user_win_ref *ref)
* Receive window attributes specified by the (in-kernel) owner of window.
*/
struct vas_rx_win_attr {
- void *rx_fifo;
+ u64 rx_fifo;
int rx_fifo_size;
int wcreds_max;
diff --git a/arch/powerpc/include/uapi/asm/auxvec.h b/arch/powerpc/include/uapi/asm/auxvec.h
index 7af21dc0e320..aa7c16215453 100644
--- a/arch/powerpc/include/uapi/asm/auxvec.h
+++ b/arch/powerpc/include/uapi/asm/auxvec.h
@@ -48,6 +48,8 @@
#define AT_L3_CACHESIZE 46
#define AT_L3_CACHEGEOMETRY 47
-#define AT_VECTOR_SIZE_ARCH 14 /* entries in ARCH_DLINFO */
+#define AT_MINSIGSTKSZ 51 /* stack needed for signal delivery */
+
+#define AT_VECTOR_SIZE_ARCH 15 /* entries in ARCH_DLINFO */
#endif
diff --git a/arch/powerpc/include/uapi/asm/signal.h b/arch/powerpc/include/uapi/asm/signal.h
index 37d41d87c45b..a5dfe84f50ab 100644
--- a/arch/powerpc/include/uapi/asm/signal.h
+++ b/arch/powerpc/include/uapi/asm/signal.h
@@ -62,8 +62,13 @@ typedef struct {
#define SA_RESTORER 0x04000000U
+#ifdef __powerpc64__
+#define MINSIGSTKSZ 8192
+#define SIGSTKSZ 32768
+#else
#define MINSIGSTKSZ 2048
#define SIGSTKSZ 8192
+#endif
#include <asm-generic/signal-defs.h>
diff --git a/arch/powerpc/include/uapi/asm/stat.h b/arch/powerpc/include/uapi/asm/stat.h
index a28c9a1201fa..d50901664239 100644
--- a/arch/powerpc/include/uapi/asm/stat.h
+++ b/arch/powerpc/include/uapi/asm/stat.h
@@ -37,8 +37,8 @@ struct stat {
__kernel_mode_t st_mode;
unsigned short st_nlink;
#endif
- __kernel_uid_t st_uid;
- __kernel_gid_t st_gid;
+ __kernel_uid32_t st_uid;
+ __kernel_gid32_t st_gid;
unsigned long st_rdev;
long st_size;
unsigned long st_blksize;
diff --git a/arch/powerpc/include/uapi/asm/termbits.h b/arch/powerpc/include/uapi/asm/termbits.h
index ed18bc61f63d..21dc86dcb2f1 100644
--- a/arch/powerpc/include/uapi/asm/termbits.h
+++ b/arch/powerpc/include/uapi/asm/termbits.h
@@ -9,8 +9,8 @@
* 2 of the License, or (at your option) any later version.
*/
-typedef unsigned char cc_t;
-typedef unsigned int speed_t;
+#include <asm-generic/termbits-common.h>
+
typedef unsigned int tcflag_t;
/*
@@ -64,115 +64,72 @@ struct ktermios {
#define VDISCARD 16
/* c_iflag bits */
-#define IGNBRK 0000001
-#define BRKINT 0000002
-#define IGNPAR 0000004
-#define PARMRK 0000010
-#define INPCK 0000020
-#define ISTRIP 0000040
-#define INLCR 0000100
-#define IGNCR 0000200
-#define ICRNL 0000400
-#define IXON 0001000
-#define IXOFF 0002000
-#define IXANY 0004000
-#define IUCLC 0010000
-#define IMAXBEL 0020000
-#define IUTF8 0040000
+#define IXON 0x0200
+#define IXOFF 0x0400
+#define IUCLC 0x1000
+#define IMAXBEL 0x2000
+#define IUTF8 0x4000
/* c_oflag bits */
-#define OPOST 0000001
-#define ONLCR 0000002
-#define OLCUC 0000004
-
-#define OCRNL 0000010
-#define ONOCR 0000020
-#define ONLRET 0000040
-
-#define OFILL 00000100
-#define OFDEL 00000200
-#define NLDLY 00001400
-#define NL0 00000000
-#define NL1 00000400
-#define NL2 00001000
-#define NL3 00001400
-#define TABDLY 00006000
-#define TAB0 00000000
-#define TAB1 00002000
-#define TAB2 00004000
-#define TAB3 00006000
-#define XTABS 00006000 /* required by POSIX to == TAB3 */
-#define CRDLY 00030000
-#define CR0 00000000
-#define CR1 00010000
-#define CR2 00020000
-#define CR3 00030000
-#define FFDLY 00040000
-#define FF0 00000000
-#define FF1 00040000
-#define BSDLY 00100000
-#define BS0 00000000
-#define BS1 00100000
-#define VTDLY 00200000
-#define VT0 00000000
-#define VT1 00200000
+#define ONLCR 0x00002
+#define OLCUC 0x00004
+#define NLDLY 0x00300
+#define NL0 0x00000
+#define NL1 0x00100
+#define NL2 0x00200
+#define NL3 0x00300
+#define TABDLY 0x00c00
+#define TAB0 0x00000
+#define TAB1 0x00400
+#define TAB2 0x00800
+#define TAB3 0x00c00
+#define XTABS 0x00c00 /* required by POSIX to == TAB3 */
+#define CRDLY 0x03000
+#define CR0 0x00000
+#define CR1 0x01000
+#define CR2 0x02000
+#define CR3 0x03000
+#define FFDLY 0x04000
+#define FF0 0x00000
+#define FF1 0x04000
+#define BSDLY 0x08000
+#define BS0 0x00000
+#define BS1 0x08000
+#define VTDLY 0x10000
+#define VT0 0x00000
+#define VT1 0x10000
/* c_cflag bit meaning */
-#define CBAUD 0000377
-#define B0 0000000 /* hang up */
-#define B50 0000001
-#define B75 0000002
-#define B110 0000003
-#define B134 0000004
-#define B150 0000005
-#define B200 0000006
-#define B300 0000007
-#define B600 0000010
-#define B1200 0000011
-#define B1800 0000012
-#define B2400 0000013
-#define B4800 0000014
-#define B9600 0000015
-#define B19200 0000016
-#define B38400 0000017
-#define EXTA B19200
-#define EXTB B38400
-#define CBAUDEX 0000000
-#define B57600 00020
-#define B115200 00021
-#define B230400 00022
-#define B460800 00023
-#define B500000 00024
-#define B576000 00025
-#define B921600 00026
-#define B1000000 00027
-#define B1152000 00030
-#define B1500000 00031
-#define B2000000 00032
-#define B2500000 00033
-#define B3000000 00034
-#define B3500000 00035
-#define B4000000 00036
-#define BOTHER 00037
-
-#define CIBAUD 077600000
-#define IBSHIFT 16 /* Shift from CBAUD to CIBAUD */
-
-#define CSIZE 00001400
-#define CS5 00000000
-#define CS6 00000400
-#define CS7 00001000
-#define CS8 00001400
-
-#define CSTOPB 00002000
-#define CREAD 00004000
-#define PARENB 00010000
-#define PARODD 00020000
-#define HUPCL 00040000
-
-#define CLOCAL 00100000
-#define CMSPAR 010000000000 /* mark or space (stick) parity */
-#define CRTSCTS 020000000000 /* flow control */
+#define CBAUD 0x000000ff
+#define CBAUDEX 0x00000000
+#define BOTHER 0x0000001f
+#define B57600 0x00000010
+#define B115200 0x00000011
+#define B230400 0x00000012
+#define B460800 0x00000013
+#define B500000 0x00000014
+#define B576000 0x00000015
+#define B921600 0x00000016
+#define B1000000 0x00000017
+#define B1152000 0x00000018
+#define B1500000 0x00000019
+#define B2000000 0x0000001a
+#define B2500000 0x0000001b
+#define B3000000 0x0000001c
+#define B3500000 0x0000001d
+#define B4000000 0x0000001e
+#define CSIZE 0x00000300
+#define CS5 0x00000000
+#define CS6 0x00000100
+#define CS7 0x00000200
+#define CS8 0x00000300
+#define CSTOPB 0x00000400
+#define CREAD 0x00000800
+#define PARENB 0x00001000
+#define PARODD 0x00002000
+#define HUPCL 0x00004000
+#define CLOCAL 0x00008000
+#define CIBAUD 0x00ff0000
/* c_lflag bits */
#define ISIG 0x00000080
@@ -192,17 +149,6 @@ struct ktermios {
#define IEXTEN 0x00000400
#define EXTPROC 0x10000000
-/* Values for the ACTION argument to `tcflow'. */
-#define TCOOFF 0
-#define TCOON 1
-#define TCIOFF 2
-#define TCION 3
-
-/* Values for the QUEUE_SELECTOR argument to `tcflush'. */
-#define TCIFLUSH 0
-#define TCOFLUSH 1
-#define TCIOFLUSH 2
-
/* Values for the OPTIONAL_ACTIONS argument to `tcsetattr'. */
#define TCSANOW 0
#define TCSADRAIN 1
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 4ddd161aef32..2e2a2a9bcf43 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -33,6 +33,17 @@ KASAN_SANITIZE_early_32.o := n
KASAN_SANITIZE_cputable.o := n
KASAN_SANITIZE_prom_init.o := n
KASAN_SANITIZE_btext.o := n
+KASAN_SANITIZE_paca.o := n
+KASAN_SANITIZE_setup_64.o := n
+KASAN_SANITIZE_mce.o := n
+KASAN_SANITIZE_mce_power.o := n
+
+# we have to be particularly careful in ppc64 to exclude code that
+# runs with translations off, as we cannot access the shadow with
+# translations off. However, ppc32 can sanitize this.
+ifdef CONFIG_PPC64
+KASAN_SANITIZE_traps.o := n
+endif
ifdef CONFIG_KASAN
CFLAGS_early_32.o += -DDISABLE_BRANCH_PROFILING
@@ -68,7 +79,7 @@ obj-$(CONFIG_PPC_BOOK3S_IDLE) += idle_book3s.o
procfs-y := proc_powerpc.o
obj-$(CONFIG_PROC_FS) += $(procfs-y)
rtaspci-$(CONFIG_PPC64)-$(CONFIG_PCI) := rtas_pci.o
-obj-$(CONFIG_PPC_RTAS) += rtas.o rtas-rtc.o $(rtaspci-y-y)
+obj-$(CONFIG_PPC_RTAS) += rtas_entry.o rtas.o rtas-rtc.o $(rtaspci-y-y)
obj-$(CONFIG_PPC_RTAS_DAEMON) += rtasd.o
obj-$(CONFIG_RTAS_FLASH) += rtas_flash.o
obj-$(CONFIG_RTAS_PROC) += rtas-proc.o
diff --git a/arch/powerpc/kernel/btext.c b/arch/powerpc/kernel/btext.c
index 9d9d56b574cc..8f69bb07e500 100644
--- a/arch/powerpc/kernel/btext.c
+++ b/arch/powerpc/kernel/btext.c
@@ -10,9 +10,9 @@
#include <linux/export.h>
#include <linux/memblock.h>
#include <linux/pgtable.h>
+#include <linux/of.h>
#include <asm/sections.h>
-#include <asm/prom.h>
#include <asm/btext.h>
#include <asm/page.h>
#include <asm/mmu.h>
@@ -45,8 +45,7 @@ unsigned long disp_BAT[2] __initdata = {0, 0};
static unsigned char vga_font[cmapsz];
-int boot_text_mapped __force_data = 0;
-int force_printk_to_btext = 0;
+static int boot_text_mapped __force_data;
extern void rmci_on(void);
extern void rmci_off(void);
diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c
index 00b0992be3e7..f502337dd37d 100644
--- a/arch/powerpc/kernel/cacheinfo.c
+++ b/arch/powerpc/kernel/cacheinfo.c
@@ -18,7 +18,6 @@
#include <linux/of.h>
#include <linux/percpu.h>
#include <linux/slab.h>
-#include <asm/prom.h>
#include <asm/cputhreads.h>
#include <asm/smp.h>
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index ae0fdef0ac11..a5dbfccd2047 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -12,9 +12,9 @@
#include <linux/init.h>
#include <linux/export.h>
#include <linux/jump_label.h>
+#include <linux/of.h>
#include <asm/cputable.h>
-#include <asm/prom.h> /* for PTRRELOC on ARCH=ppc */
#include <asm/mce.h>
#include <asm/mmu.h>
#include <asm/setup.h>
@@ -487,11 +487,29 @@ static struct cpu_spec __initdata cpu_specs[] = {
.machine_check_early = __machine_check_early_realmode_p9,
.platform = "power9",
},
- { /* Power9 DD2.2 or later */
+ { /* Power9 DD2.2 */
+ .pvr_mask = 0xffffefff,
+ .pvr_value = 0x004e0202,
+ .cpu_name = "POWER9 (raw)",
+ .cpu_features = CPU_FTRS_POWER9_DD2_2,
+ .cpu_user_features = COMMON_USER_POWER9,
+ .cpu_user_features2 = COMMON_USER2_POWER9,
+ .mmu_features = MMU_FTRS_POWER9,
+ .icache_bsize = 128,
+ .dcache_bsize = 128,
+ .num_pmcs = 6,
+ .pmc_type = PPC_PMC_IBM,
+ .oprofile_cpu_type = "ppc64/power9",
+ .cpu_setup = __setup_cpu_power9,
+ .cpu_restore = __restore_cpu_power9,
+ .machine_check_early = __machine_check_early_realmode_p9,
+ .platform = "power9",
+ },
+ { /* Power9 DD2.3 or later */
.pvr_mask = 0xffff0000,
.pvr_value = 0x004e0000,
.cpu_name = "POWER9 (raw)",
- .cpu_features = CPU_FTRS_POWER9_DD2_2,
+ .cpu_features = CPU_FTRS_POWER9_DD2_3,
.cpu_user_features = COMMON_USER_POWER9,
.cpu_user_features2 = COMMON_USER2_POWER9,
.mmu_features = MMU_FTRS_POWER9,
@@ -2025,7 +2043,7 @@ static struct cpu_spec * __init setup_cpu_spec(unsigned long offset,
* oprofile_cpu_type already has a value, then we are
* possibly overriding a real PVR with a logical one,
* and, in that case, keep the current value for
- * oprofile_cpu_type. Futhermore, let's ensure that the
+ * oprofile_cpu_type. Furthermore, let's ensure that the
* fix for the PMAO bug is enabled on compatibility mode.
*/
if (old.oprofile_cpu_type != NULL) {
@@ -2119,7 +2137,7 @@ void __init cpu_feature_keys_init(void)
struct static_key_true mmu_feature_keys[NUM_MMU_FTR_KEYS] = {
[0 ... NUM_MMU_FTR_KEYS - 1] = STATIC_KEY_TRUE_INIT
};
-EXPORT_SYMBOL_GPL(mmu_feature_keys);
+EXPORT_SYMBOL(mmu_feature_keys);
void __init mmu_feature_keys_init(void)
{
diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c
index 5693e1c67c2b..9a3b85bfc83f 100644
--- a/arch/powerpc/kernel/crash_dump.c
+++ b/arch/powerpc/kernel/crash_dump.c
@@ -12,11 +12,11 @@
#include <linux/crash_dump.h>
#include <linux/io.h>
#include <linux/memblock.h>
+#include <linux/of.h>
#include <asm/code-patching.h>
#include <asm/kdump.h>
-#include <asm/prom.h>
#include <asm/firmware.h>
-#include <linux/uaccess.h>
+#include <linux/uio.h>
#include <asm/rtas.h>
#include <asm/inst.h>
@@ -68,33 +68,8 @@ void __init setup_kdump_trampoline(void)
}
#endif /* CONFIG_NONSTATIC_KERNEL */
-static size_t copy_oldmem_vaddr(void *vaddr, char *buf, size_t csize,
- unsigned long offset, int userbuf)
-{
- if (userbuf) {
- if (copy_to_user((char __user *)buf, (vaddr + offset), csize))
- return -EFAULT;
- } else
- memcpy(buf, (vaddr + offset), csize);
-
- return csize;
-}
-
-/**
- * copy_oldmem_page - copy one page from "oldmem"
- * @pfn: page frame number to be copied
- * @buf: target memory address for the copy; this can be in kernel address
- * space or user address space (see @userbuf)
- * @csize: number of bytes to copy
- * @offset: offset in bytes into the page (based on pfn) to begin the copy
- * @userbuf: if set, @buf is in user address space, use copy_to_user(),
- * otherwise @buf is in kernel address space, use memcpy().
- *
- * Copy a page from "oldmem". For this page, there is no pte mapped
- * in the current kernel. We stitch up a pte, similar to kmap_atomic.
- */
-ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
- size_t csize, unsigned long offset, int userbuf)
+ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn,
+ size_t csize, unsigned long offset)
{
void *vaddr;
phys_addr_t paddr;
@@ -107,10 +82,10 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
if (memblock_is_region_memory(paddr, csize)) {
vaddr = __va(paddr);
- csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf);
+ csize = copy_to_iter(vaddr + offset, csize, iter);
} else {
vaddr = ioremap_cache(paddr, PAGE_SIZE);
- csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf);
+ csize = copy_to_iter(vaddr + offset, csize, iter);
iounmap(vaddr);
}
diff --git a/arch/powerpc/kernel/dawr.c b/arch/powerpc/kernel/dawr.c
index 64e423d2fe0f..30d4eca88d17 100644
--- a/arch/powerpc/kernel/dawr.c
+++ b/arch/powerpc/kernel/dawr.c
@@ -27,7 +27,7 @@ int set_dawr(int nr, struct arch_hw_breakpoint *brk)
dawrx |= (brk->type & (HW_BRK_TYPE_PRIV_ALL)) >> 3;
/*
* DAWR length is stored in field MDR bits 48:53. Matches range in
- * doublewords (64 bits) baised by -1 eg. 0b000000=1DW and
+ * doublewords (64 bits) biased by -1 eg. 0b000000=1DW and
* 0b111111=64DW.
* brk->hw_len is in bytes.
* This aligns up to double word size, shifts and does the bias.
diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c
index 7d1b2c4a4891..2ad365c21afa 100644
--- a/arch/powerpc/kernel/dt_cpu_ftrs.c
+++ b/arch/powerpc/kernel/dt_cpu_ftrs.c
@@ -10,6 +10,7 @@
#include <linux/jump_label.h>
#include <linux/libfdt.h>
#include <linux/memblock.h>
+#include <linux/of_fdt.h>
#include <linux/printk.h>
#include <linux/sched.h>
#include <linux/string.h>
@@ -19,7 +20,6 @@
#include <asm/dt_cpu_ftrs.h>
#include <asm/mce.h>
#include <asm/mmu.h>
-#include <asm/prom.h>
#include <asm/setup.h>
@@ -774,20 +774,26 @@ static __init void cpufeatures_cpu_quirks(void)
if ((version & 0xffffefff) == 0x004e0200) {
/* DD2.0 has no feature flag */
cur_cpu_spec->cpu_features |= CPU_FTR_P9_RADIX_PREFETCH_BUG;
+ cur_cpu_spec->cpu_features &= ~(CPU_FTR_DAWR);
} else if ((version & 0xffffefff) == 0x004e0201) {
cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
cur_cpu_spec->cpu_features |= CPU_FTR_P9_RADIX_PREFETCH_BUG;
+ cur_cpu_spec->cpu_features &= ~(CPU_FTR_DAWR);
} else if ((version & 0xffffefff) == 0x004e0202) {
cur_cpu_spec->cpu_features |= CPU_FTR_P9_TM_HV_ASSIST;
cur_cpu_spec->cpu_features |= CPU_FTR_P9_TM_XER_SO_BUG;
cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
+ cur_cpu_spec->cpu_features &= ~(CPU_FTR_DAWR);
+ } else if ((version & 0xffffefff) == 0x004e0203) {
+ cur_cpu_spec->cpu_features |= CPU_FTR_P9_TM_HV_ASSIST;
+ cur_cpu_spec->cpu_features |= CPU_FTR_P9_TM_XER_SO_BUG;
+ cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
} else if ((version & 0xffff0000) == 0x004e0000) {
/* DD2.1 and up have DD2_1 */
cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
}
if ((version & 0xffff0000) == 0x004e0000) {
- cur_cpu_spec->cpu_features &= ~(CPU_FTR_DAWR);
cur_cpu_spec->cpu_features |= CPU_FTR_P9_TIDR;
}
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index 28bb1e7263a6..ab316e155ea9 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -1329,7 +1329,7 @@ int eeh_pe_set_option(struct eeh_pe *pe, int option)
/*
* EEH functionality could possibly be disabled, just
- * return error for the case. And the EEH functinality
+ * return error for the case. And the EEH functionality
* isn't expected to be disabled on one specific PE.
*/
switch (option) {
@@ -1804,7 +1804,7 @@ static int eeh_debugfs_break_device(struct pci_dev *pdev)
* PE freeze. Using the in_8() accessor skips the eeh detection hook
* so the freeze hook so the EEH Detection machinery won't be
* triggered here. This is to match the usual behaviour of EEH
- * where the HW will asyncronously freeze a PE and it's up to
+ * where the HW will asynchronously freeze a PE and it's up to
* the kernel to notice and deal with it.
*
* 3. Turn Memory space back on. This is more important for VFs
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index 422f80b5b27b..260273e56431 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -16,7 +16,6 @@
#include <asm/eeh_event.h>
#include <asm/ppc-pci.h>
#include <asm/pci-bridge.h>
-#include <asm/prom.h>
#include <asm/rtas.h>
struct eeh_rmv_data {
diff --git a/arch/powerpc/kernel/eeh_event.c b/arch/powerpc/kernel/eeh_event.c
index a7a8dc182efb..c23a454af08a 100644
--- a/arch/powerpc/kernel/eeh_event.c
+++ b/arch/powerpc/kernel/eeh_event.c
@@ -143,7 +143,7 @@ int __eeh_send_failure_event(struct eeh_pe *pe)
int eeh_send_failure_event(struct eeh_pe *pe)
{
/*
- * If we've manually supressed recovery events via debugfs
+ * If we've manually suppressed recovery events via debugfs
* then just drop it on the floor.
*/
if (eeh_debugfs_no_recover) {
diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c
index 845e024321d4..d2873d17d2b1 100644
--- a/arch/powerpc/kernel/eeh_pe.c
+++ b/arch/powerpc/kernel/eeh_pe.c
@@ -13,6 +13,7 @@
#include <linux/export.h>
#include <linux/gfp.h>
#include <linux/kernel.h>
+#include <linux/of.h>
#include <linux/pci.h>
#include <linux/string.h>
@@ -301,7 +302,7 @@ struct eeh_pe *eeh_pe_get(struct pci_controller *phb, int pe_no)
* @new_pe_parent.
*
* If @new_pe_parent is NULL then the new PE will be inserted under
- * directly under the the PHB.
+ * directly under the PHB.
*/
int eeh_pe_tree_insert(struct eeh_dev *edev, struct eeh_pe *new_pe_parent)
{
diff --git a/arch/powerpc/kernel/eeh_sysfs.c b/arch/powerpc/kernel/eeh_sysfs.c
index 429620da73ba..706e1eb95efe 100644
--- a/arch/powerpc/kernel/eeh_sysfs.c
+++ b/arch/powerpc/kernel/eeh_sysfs.c
@@ -6,6 +6,7 @@
*
* Send comments and feedback to Linas Vepstas <linas@austin.ibm.com>
*/
+#include <linux/of.h>
#include <linux/pci.h>
#include <linux/stat.h>
#include <asm/ppc-pci.h>
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 7748c278d13c..1d599df6f169 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -555,52 +555,3 @@ ret_from_mcheck_exc:
_ASM_NOKPROBE_SYMBOL(ret_from_mcheck_exc)
#endif /* CONFIG_BOOKE */
#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
-
-/*
- * PROM code for specific machines follows. Put it
- * here so it's easy to add arch-specific sections later.
- * -- Cort
- */
-#ifdef CONFIG_PPC_RTAS
-/*
- * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
- * called with the MMU off.
- */
-_GLOBAL(enter_rtas)
- stwu r1,-INT_FRAME_SIZE(r1)
- mflr r0
- stw r0,INT_FRAME_SIZE+4(r1)
- LOAD_REG_ADDR(r4, rtas)
- lis r6,1f@ha /* physical return address for rtas */
- addi r6,r6,1f@l
- tophys(r6,r6)
- lwz r8,RTASENTRY(r4)
- lwz r4,RTASBASE(r4)
- mfmsr r9
- stw r9,8(r1)
- LOAD_REG_IMMEDIATE(r0,MSR_KERNEL)
- mtmsr r0 /* disable interrupts so SRR0/1 don't get trashed */
- li r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
- mtlr r6
- stw r1, THREAD + RTAS_SP(r2)
- mtspr SPRN_SRR0,r8
- mtspr SPRN_SRR1,r9
- rfi
-1:
- lis r8, 1f@h
- ori r8, r8, 1f@l
- LOAD_REG_IMMEDIATE(r9,MSR_KERNEL)
- mtspr SPRN_SRR0,r8
- mtspr SPRN_SRR1,r9
- rfi /* Reactivate MMU translation */
-1:
- lwz r8,INT_FRAME_SIZE+4(r1) /* get return address */
- lwz r9,8(r1) /* original msr value */
- addi r1,r1,INT_FRAME_SIZE
- li r0,0
- stw r0, THREAD + RTAS_SP(r2)
- mtlr r8
- mtmsr r9
- blr /* return to caller */
-_ASM_NOKPROBE_SYMBOL(enter_rtas)
-#endif /* CONFIG_PPC_RTAS */
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 9581906b5ee9..01ace4c56104 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -264,156 +264,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
addi r1,r1,SWITCH_FRAME_SIZE
blr
-#ifdef CONFIG_PPC_RTAS
-/*
- * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
- * called with the MMU off.
- *
- * In addition, we need to be in 32b mode, at least for now.
- *
- * Note: r3 is an input parameter to rtas, so don't trash it...
- */
-_GLOBAL(enter_rtas)
- mflr r0
- std r0,16(r1)
- stdu r1,-SWITCH_FRAME_SIZE(r1) /* Save SP and create stack space. */
-
- /* Because RTAS is running in 32b mode, it clobbers the high order half
- * of all registers that it saves. We therefore save those registers
- * RTAS might touch to the stack. (r0, r3-r13 are caller saved)
- */
- SAVE_GPR(2, r1) /* Save the TOC */
- SAVE_GPR(13, r1) /* Save paca */
- SAVE_NVGPRS(r1) /* Save the non-volatiles */
-
- mfcr r4
- std r4,_CCR(r1)
- mfctr r5
- std r5,_CTR(r1)
- mfspr r6,SPRN_XER
- std r6,_XER(r1)
- mfdar r7
- std r7,_DAR(r1)
- mfdsisr r8
- std r8,_DSISR(r1)
-
- /* Temporary workaround to clear CR until RTAS can be modified to
- * ignore all bits.
- */
- li r0,0
- mtcr r0
-
-#ifdef CONFIG_BUG
- /* There is no way it is acceptable to get here with interrupts enabled,
- * check it with the asm equivalent of WARN_ON
- */
- lbz r0,PACAIRQSOFTMASK(r13)
-1: tdeqi r0,IRQS_ENABLED
- EMIT_WARN_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
-#endif
-
- /* Hard-disable interrupts */
- mfmsr r6
- rldicl r7,r6,48,1
- rotldi r7,r7,16
- mtmsrd r7,1
-
- /* Unfortunately, the stack pointer and the MSR are also clobbered,
- * so they are saved in the PACA which allows us to restore
- * our original state after RTAS returns.
- */
- std r1,PACAR1(r13)
- std r6,PACASAVEDMSR(r13)
-
- /* Setup our real return addr */
- LOAD_REG_ADDR(r4,rtas_return_loc)
- clrldi r4,r4,2 /* convert to realmode address */
- mtlr r4
-
- li r0,0
- ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
- andc r0,r6,r0
-
- li r9,1
- rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
- ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI|MSR_LE
- andc r6,r0,r9
-
-__enter_rtas:
- sync /* disable interrupts so SRR0/1 */
- mtmsrd r0 /* don't get trashed */
-
- LOAD_REG_ADDR(r4, rtas)
- ld r5,RTASENTRY(r4) /* get the rtas->entry value */
- ld r4,RTASBASE(r4) /* get the rtas->base value */
-
- mtspr SPRN_SRR0,r5
- mtspr SPRN_SRR1,r6
- RFI_TO_KERNEL
- b . /* prevent speculative execution */
-
-rtas_return_loc:
- FIXUP_ENDIAN
-
- /*
- * Clear RI and set SF before anything.
- */
- mfmsr r6
- li r0,MSR_RI
- andc r6,r6,r0
- sldi r0,r0,(MSR_SF_LG - MSR_RI_LG)
- or r6,r6,r0
- sync
- mtmsrd r6
-
- /* relocation is off at this point */
- GET_PACA(r4)
- clrldi r4,r4,2 /* convert to realmode address */
-
- bcl 20,31,$+4
-0: mflr r3
- ld r3,(1f-0b)(r3) /* get &rtas_restore_regs */
-
- ld r1,PACAR1(r4) /* Restore our SP */
- ld r4,PACASAVEDMSR(r4) /* Restore our MSR */
-
- mtspr SPRN_SRR0,r3
- mtspr SPRN_SRR1,r4
- RFI_TO_KERNEL
- b . /* prevent speculative execution */
-_ASM_NOKPROBE_SYMBOL(__enter_rtas)
-_ASM_NOKPROBE_SYMBOL(rtas_return_loc)
-
- .align 3
-1: .8byte rtas_restore_regs
-
-rtas_restore_regs:
- /* relocation is on at this point */
- REST_GPR(2, r1) /* Restore the TOC */
- REST_GPR(13, r1) /* Restore paca */
- REST_NVGPRS(r1) /* Restore the non-volatiles */
-
- GET_PACA(r13)
-
- ld r4,_CCR(r1)
- mtcr r4
- ld r5,_CTR(r1)
- mtctr r5
- ld r6,_XER(r1)
- mtspr SPRN_XER,r6
- ld r7,_DAR(r1)
- mtdar r7
- ld r8,_DSISR(r1)
- mtdsisr r8
-
- addi r1,r1,SWITCH_FRAME_SIZE /* Unstack our frame */
- ld r0,16(r1) /* get return address */
-
- mtlr r0
- blr /* return to caller */
-
-#endif /* CONFIG_PPC_RTAS */
-
_GLOBAL(enter_prom)
mflr r0
std r0,16(r1)
diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
index 4c09c6688ac6..ea0a073abd96 100644
--- a/arch/powerpc/kernel/fadump.c
+++ b/arch/powerpc/kernel/fadump.c
@@ -25,9 +25,10 @@
#include <linux/cma.h>
#include <linux/hugetlb.h>
#include <linux/debugfs.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
#include <asm/page.h>
-#include <asm/prom.h>
#include <asm/fadump.h>
#include <asm/fadump-internal.h>
#include <asm/setup.h>
@@ -73,8 +74,8 @@ static struct cma *fadump_cma;
* The total size of fadump reserved memory covers for boot memory size
* + cpu data size + hpte size and metadata.
* Initialize only the area equivalent to boot memory size for CMA use.
- * The reamining portion of fadump reserved memory will be not given
- * to CMA and pages for thoes will stay reserved. boot memory size is
+ * The remaining portion of fadump reserved memory will be not given
+ * to CMA and pages for those will stay reserved. boot memory size is
* aligned per CMA requirement to satisy cma_init_reserved_mem() call.
* But for some reason even if it fails we still have the memory reservation
* with us and we can still continue doing fadump.
@@ -365,6 +366,11 @@ static unsigned long __init get_fadump_area_size(void)
size += fw_dump.cpu_state_data_size;
size += fw_dump.hpte_region_size;
+ /*
+ * Account for pagesize alignment of boot memory area destination address.
+ * This faciliates in mmap reading of first kernel's memory.
+ */
+ size = PAGE_ALIGN(size);
size += fw_dump.boot_memory_size;
size += sizeof(struct fadump_crash_info_header);
size += sizeof(struct elfhdr); /* ELF core header.*/
@@ -728,7 +734,7 @@ void crash_fadump(struct pt_regs *regs, const char *str)
else
ppc_save_regs(&fdh->regs);
- fdh->online_mask = *cpu_online_mask;
+ fdh->cpu_mask = *cpu_online_mask;
/*
* If we came in via system reset, wait a while for the secondary
@@ -867,7 +873,6 @@ static int fadump_alloc_mem_ranges(struct fadump_mrange_info *mrange_info)
sizeof(struct fadump_memory_range));
return 0;
}
-
static inline int fadump_add_mem_range(struct fadump_mrange_info *mrange_info,
u64 base, u64 end)
{
@@ -886,7 +891,12 @@ static inline int fadump_add_mem_range(struct fadump_mrange_info *mrange_info,
start = mem_ranges[mrange_info->mem_range_cnt - 1].base;
size = mem_ranges[mrange_info->mem_range_cnt - 1].size;
- if ((start + size) == base)
+ /*
+ * Boot memory area needs separate PT_LOAD segment(s) as it
+ * is moved to a different location at the time of crash.
+ * So, fold only if the region is not boot memory area.
+ */
+ if ((start + size) == base && start >= fw_dump.boot_mem_top)
is_adjacent = true;
}
if (!is_adjacent) {
@@ -968,11 +978,14 @@ static int fadump_init_elfcore_header(char *bufp)
elf->e_entry = 0;
elf->e_phoff = sizeof(struct elfhdr);
elf->e_shoff = 0;
-#if defined(_CALL_ELF)
- elf->e_flags = _CALL_ELF;
-#else
- elf->e_flags = 0;
-#endif
+
+ if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2))
+ elf->e_flags = 2;
+ else if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V1))
+ elf->e_flags = 1;
+ else
+ elf->e_flags = 0;
+
elf->e_ehsize = sizeof(struct elfhdr);
elf->e_phentsize = sizeof(struct elf_phdr);
elf->e_phnum = 0;
@@ -1164,6 +1177,11 @@ static unsigned long init_fadump_header(unsigned long addr)
fdh->elfcorehdr_addr = addr;
/* We will set the crashing cpu id in crash_fadump() during crash. */
fdh->crashing_cpu = FADUMP_CPU_UNKNOWN;
+ /*
+ * When LPAR is terminated by PYHP, ensure all possible CPUs'
+ * register data is processed while exporting the vmcore.
+ */
+ fdh->cpu_mask = *cpu_possible_mask;
return addr;
}
@@ -1271,7 +1289,6 @@ static void fadump_release_reserved_area(u64 start, u64 end)
static void sort_and_merge_mem_ranges(struct fadump_mrange_info *mrange_info)
{
struct fadump_memory_range *mem_ranges;
- struct fadump_memory_range tmp_range;
u64 base, size;
int i, j, idx;
@@ -1286,11 +1303,8 @@ static void sort_and_merge_mem_ranges(struct fadump_mrange_info *mrange_info)
if (mem_ranges[idx].base > mem_ranges[j].base)
idx = j;
}
- if (idx != i) {
- tmp_range = mem_ranges[idx];
- mem_ranges[idx] = mem_ranges[i];
- mem_ranges[i] = tmp_range;
- }
+ if (idx != i)
+ swap(mem_ranges[idx], mem_ranges[i]);
}
/* Merge adjacent reserved ranges */
@@ -1661,8 +1675,8 @@ int __init setup_fadump(void)
}
/*
* Use subsys_initcall_sync() here because there is dependency with
- * crash_save_vmcoreinfo_init(), which mush run first to ensure vmcoreinfo initialization
- * is done before regisering with f/w.
+ * crash_save_vmcoreinfo_init(), which must run first to ensure vmcoreinfo initialization
+ * is done before registering with f/w.
*/
subsys_initcall_sync(setup_fadump);
#else /* !CONFIG_PRESERVE_FA_DUMP */
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 5c5181e8d5f1..d3eea633d11a 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -111,7 +111,7 @@ __secondary_hold_acknowledge:
#ifdef CONFIG_RELOCATABLE
/* This flag is set to 1 by a loader if the kernel should run
* at the loaded address instead of the linked address. This
- * is used by kexec-tools to keep the the kdump kernel in the
+ * is used by kexec-tools to keep the kdump kernel in the
* crash_kernel region. The loader is responsible for
* observing the alignment requirement.
*/
@@ -435,7 +435,7 @@ generic_secondary_common_init:
ld r12,CPU_SPEC_RESTORE(r23)
cmpdi 0,r12,0
beq 3f
-#ifdef PPC64_ELF_ABI_v1
+#ifdef CONFIG_PPC64_ELF_ABI_V1
ld r12,0(r12)
#endif
mtctr r12
diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c
index 4ad79eb638c6..77cd4c5a2d63 100644
--- a/arch/powerpc/kernel/idle.c
+++ b/arch/powerpc/kernel/idle.c
@@ -37,7 +37,7 @@ static int __init powersave_off(char *arg)
{
ppc_md.power_save = NULL;
cpuidle_disable = IDLE_POWERSAVE_OFF;
- return 0;
+ return 1;
}
__setup("powersave=off", powersave_off);
diff --git a/arch/powerpc/kernel/interrupt_64.S b/arch/powerpc/kernel/interrupt_64.S
index 7bab2d7de372..ce25b28cf418 100644
--- a/arch/powerpc/kernel/interrupt_64.S
+++ b/arch/powerpc/kernel/interrupt_64.S
@@ -219,16 +219,6 @@ system_call_vectored common 0x3000
*/
system_call_vectored sigill 0x7ff0
-
-/*
- * Entered via kernel return set up by kernel/sstep.c, must match entry regs
- */
- .globl system_call_vectored_emulate
-system_call_vectored_emulate:
-_ASM_NOKPROBE_SYMBOL(system_call_vectored_emulate)
- li r10,IRQS_ALL_DISABLED
- stb r10,PACAIRQSOFTMASK(r13)
- b system_call_vectored_common
#endif /* CONFIG_PPC_BOOK3S */
.balign IFETCH_ALIGN_BYTES
@@ -721,7 +711,7 @@ _GLOBAL(ret_from_kernel_thread)
REST_NVGPRS(r1)
mtctr r14
mr r3,r15
-#ifdef PPC64_ELF_ABI_v2
+#ifdef CONFIG_PPC64_ELF_ABI_V2
mr r12,r14
#endif
bctrl
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index 07093b7cdcb9..7e56ddb3e0b9 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -27,7 +27,6 @@
#include <linux/sched.h>
#include <linux/debugfs.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/iommu.h>
#include <asm/pci-bridge.h>
#include <asm/machdep.h>
@@ -1065,7 +1064,7 @@ extern long iommu_tce_xchg_no_kill(struct mm_struct *mm,
long ret;
unsigned long size = 0;
- ret = tbl->it_ops->xchg_no_kill(tbl, entry, hpa, direction, false);
+ ret = tbl->it_ops->xchg_no_kill(tbl, entry, hpa, direction);
if (!ret && ((*direction == DMA_FROM_DEVICE) ||
(*direction == DMA_BIDIRECTIONAL)) &&
!mm_iommu_is_devmem(mm, *hpa, tbl->it_page_shift,
@@ -1080,7 +1079,7 @@ void iommu_tce_kill(struct iommu_table *tbl,
unsigned long entry, unsigned long pages)
{
if (tbl->it_ops->tce_kill)
- tbl->it_ops->tce_kill(tbl, entry, pages, false);
+ tbl->it_ops->tce_kill(tbl, entry, pages);
}
EXPORT_SYMBOL_GPL(iommu_tce_kill);
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 752fb182eacb..dd09919c3c66 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -52,18 +52,17 @@
#include <linux/of_irq.h>
#include <linux/vmalloc.h>
#include <linux/pgtable.h>
+#include <linux/static_call.h>
#include <linux/uaccess.h>
#include <asm/interrupt.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/cache.h>
-#include <asm/prom.h>
#include <asm/ptrace.h>
#include <asm/machdep.h>
#include <asm/udbg.h>
#include <asm/smp.h>
-#include <asm/livepatch.h>
#include <asm/hw_irq.h>
#include <asm/softirq_stack.h>
@@ -217,7 +216,6 @@ static inline void replay_soft_interrupts_irqrestore(void)
#define replay_soft_interrupts_irqrestore() replay_soft_interrupts()
#endif
-#ifdef CONFIG_CC_HAS_ASM_GOTO
notrace void arch_local_irq_restore(unsigned long mask)
{
unsigned char irq_happened;
@@ -313,82 +311,6 @@ happened:
__hard_irq_enable();
preempt_enable();
}
-#else
-notrace void arch_local_irq_restore(unsigned long mask)
-{
- unsigned char irq_happened;
-
- /* Write the new soft-enabled value */
- irq_soft_mask_set(mask);
- if (mask)
- return;
-
- if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
- WARN_ON_ONCE(in_nmi() || in_hardirq());
-
- /*
- * From this point onward, we can take interrupts, preempt,
- * etc... unless we got hard-disabled. We check if an event
- * happened. If none happened, we know we can just return.
- *
- * We may have preempted before the check below, in which case
- * we are checking the "new" CPU instead of the old one. This
- * is only a problem if an event happened on the "old" CPU.
- *
- * External interrupt events will have caused interrupts to
- * be hard-disabled, so there is no problem, we
- * cannot have preempted.
- */
- irq_happened = get_irq_happened();
- if (!irq_happened) {
- if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
- WARN_ON_ONCE(!(mfmsr() & MSR_EE));
- return;
- }
-
- /* We need to hard disable to replay. */
- if (!(irq_happened & PACA_IRQ_HARD_DIS)) {
- if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
- WARN_ON_ONCE(!(mfmsr() & MSR_EE));
- __hard_irq_disable();
- local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
- } else {
- /*
- * We should already be hard disabled here. We had bugs
- * where that wasn't the case so let's dbl check it and
- * warn if we are wrong. Only do that when IRQ tracing
- * is enabled as mfmsr() can be costly.
- */
- if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
- if (WARN_ON_ONCE(mfmsr() & MSR_EE))
- __hard_irq_disable();
- }
-
- if (irq_happened == PACA_IRQ_HARD_DIS) {
- local_paca->irq_happened = 0;
- __hard_irq_enable();
- return;
- }
- }
-
- /*
- * Disable preempt here, so that the below preempt_enable will
- * perform resched if required (a replayed interrupt may set
- * need_resched).
- */
- preempt_disable();
- irq_soft_mask_set(IRQS_ALL_DISABLED);
- trace_hardirqs_off();
-
- replay_soft_interrupts_irqrestore();
- local_paca->irq_happened = 0;
-
- trace_hardirqs_on();
- irq_soft_mask_set(IRQS_ENABLED);
- __hard_irq_enable();
- preempt_enable();
-}
-#endif
EXPORT_SYMBOL(arch_local_irq_restore);
/*
@@ -730,6 +652,8 @@ static __always_inline void call_do_irq(struct pt_regs *regs, void *sp)
);
}
+DEFINE_STATIC_CALL_RET0(ppc_get_irq, *ppc_md.get_irq);
+
void __do_irq(struct pt_regs *regs)
{
unsigned int irq;
@@ -741,7 +665,7 @@ void __do_irq(struct pt_regs *regs)
*
* This will typically lower the interrupt line to the CPU
*/
- irq = ppc_md.get_irq();
+ irq = static_call(ppc_get_irq)();
/* We can hard enable interrupts now to allow perf interrupts */
if (should_hard_irq_enable())
@@ -809,6 +733,9 @@ void __init init_IRQ(void)
if (ppc_md.init_IRQ)
ppc_md.init_IRQ();
+
+ if (!WARN_ON(!ppc_md.get_irq))
+ static_call_update(ppc_get_irq, ppc_md.get_irq);
}
#ifdef CONFIG_BOOKE_OR_40x
diff --git a/arch/powerpc/kernel/isa-bridge.c b/arch/powerpc/kernel/isa-bridge.c
index 39c625737c09..dc746611ebc0 100644
--- a/arch/powerpc/kernel/isa-bridge.c
+++ b/arch/powerpc/kernel/isa-bridge.c
@@ -18,11 +18,11 @@
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/notifier.h>
+#include <linux/of_address.h>
#include <linux/vmalloc.h>
#include <asm/processor.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/pci-bridge.h>
#include <asm/machdep.h>
#include <asm/ppc-pci.h>
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index 7dae0b01abfb..1c97c0f177ae 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -45,7 +45,7 @@ kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset)
{
kprobe_opcode_t *addr = NULL;
-#ifdef PPC64_ELF_ABI_v2
+#ifdef CONFIG_PPC64_ELF_ABI_V2
/* PPC64 ABIv2 needs local entry point */
addr = (kprobe_opcode_t *)kallsyms_lookup_name(name);
if (addr && !offset) {
@@ -63,7 +63,7 @@ kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset)
#endif
addr = (kprobe_opcode_t *)ppc_function_entry(addr);
}
-#elif defined(PPC64_ELF_ABI_v1)
+#elif defined(CONFIG_PPC64_ELF_ABI_V1)
/*
* 64bit powerpc ABIv1 uses function descriptors:
* - Check for the dot variant of the symbol first.
@@ -107,7 +107,7 @@ kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset)
static bool arch_kprobe_on_func_entry(unsigned long offset)
{
-#ifdef PPC64_ELF_ABI_v2
+#ifdef CONFIG_PPC64_ELF_ABI_V2
#ifdef CONFIG_KPROBES_ON_FTRACE
return offset <= 16;
#else
@@ -150,8 +150,8 @@ int arch_prepare_kprobe(struct kprobe *p)
if ((unsigned long)p->addr & 0x03) {
printk("Attempt to register kprobe at an unaligned address\n");
ret = -EINVAL;
- } else if (IS_MTMSRD(insn) || IS_RFID(insn)) {
- printk("Cannot register a kprobe on mtmsr[d]/rfi[d]\n");
+ } else if (!can_single_step(ppc_inst_val(insn))) {
+ printk("Cannot register a kprobe on instructions that can't be single stepped\n");
ret = -EINVAL;
} else if ((unsigned long)p->addr & ~PAGE_MASK &&
ppc_inst_prefixed(ppc_inst_read(p->addr - 1))) {
diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c
index cfc03e016ff2..5c58460b269a 100644
--- a/arch/powerpc/kernel/legacy_serial.c
+++ b/arch/powerpc/kernel/legacy_serial.c
@@ -7,10 +7,10 @@
#include <linux/pci.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
+#include <linux/of_irq.h>
#include <linux/serial_reg.h>
#include <asm/io.h>
#include <asm/mmu.h>
-#include <asm/prom.h>
#include <asm/serial.h>
#include <asm/udbg.h>
#include <asm/pci-bridge.h>
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index d38a019b38e1..fd6d8d3a548e 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -454,7 +454,7 @@ _GLOBAL(kexec_sequence)
beq 1f
/* clear out hardware hash page table and tlb */
-#ifdef PPC64_ELF_ABI_v1
+#ifdef CONFIG_PPC64_ELF_ABI_V1
ld r12,0(r27) /* deref function descriptor */
#else
mr r12,r27
diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c
index 97a76a8619fb..f6d6ae0a1692 100644
--- a/arch/powerpc/kernel/module.c
+++ b/arch/powerpc/kernel/module.c
@@ -64,13 +64,13 @@ int module_finalize(const Elf_Ehdr *hdr,
(void *)sect->sh_addr + sect->sh_size);
#endif /* CONFIG_PPC64 */
-#ifdef PPC64_ELF_ABI_v1
+#ifdef CONFIG_PPC64_ELF_ABI_V1
sect = find_section(hdr, sechdrs, ".opd");
if (sect != NULL) {
me->arch.start_opd = sect->sh_addr;
me->arch.end_opd = sect->sh_addr + sect->sh_size;
}
-#endif /* PPC64_ELF_ABI_v1 */
+#endif /* CONFIG_PPC64_ELF_ABI_V1 */
#ifdef CONFIG_PPC_BARRIER_NOSPEC
sect = find_section(hdr, sechdrs, "__spec_barrier_fixup");
diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
index a0432ef46967..ea6536171778 100644
--- a/arch/powerpc/kernel/module_32.c
+++ b/arch/powerpc/kernel/module_32.c
@@ -99,7 +99,7 @@ static unsigned long get_plt_size(const Elf32_Ehdr *hdr,
/* Sort the relocation information based on a symbol and
* addend key. This is a stable O(n*log n) complexity
- * alogrithm but it will reduce the complexity of
+ * algorithm but it will reduce the complexity of
* count_relocs() to linear complexity O(n)
*/
sort((void *)hdr + sechdrs[i].sh_offset,
@@ -256,9 +256,8 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
value, (uint32_t)location);
pr_debug("Location before: %08X.\n",
*(uint32_t *)location);
- value = (*(uint32_t *)location & ~0x03fffffc)
- | ((value - (uint32_t)location)
- & 0x03fffffc);
+ value = (*(uint32_t *)location & ~PPC_LI_MASK) |
+ PPC_LI(value - (uint32_t)location);
if (patch_instruction(location, ppc_inst(value)))
return -EFAULT;
@@ -266,10 +265,8 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
pr_debug("Location after: %08X.\n",
*(uint32_t *)location);
pr_debug("ie. jump to %08X+%08X = %08X\n",
- *(uint32_t *)location & 0x03fffffc,
- (uint32_t)location,
- (*(uint32_t *)location & 0x03fffffc)
- + (uint32_t)location);
+ *(uint32_t *)PPC_LI((uint32_t)location), (uint32_t)location,
+ (*(uint32_t *)PPC_LI((uint32_t)location)) + (uint32_t)location);
break;
case R_PPC_REL32:
@@ -289,23 +286,32 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
}
#ifdef CONFIG_DYNAMIC_FTRACE
-int module_trampoline_target(struct module *mod, unsigned long addr,
- unsigned long *target)
+notrace int module_trampoline_target(struct module *mod, unsigned long addr,
+ unsigned long *target)
{
- unsigned int jmp[4];
+ ppc_inst_t jmp[4];
/* Find where the trampoline jumps to */
- if (copy_from_kernel_nofault(jmp, (void *)addr, sizeof(jmp)))
+ if (copy_inst_from_kernel_nofault(jmp, (void *)addr))
+ return -EFAULT;
+ if (__copy_inst_from_kernel_nofault(jmp + 1, (void *)addr + 4))
+ return -EFAULT;
+ if (__copy_inst_from_kernel_nofault(jmp + 2, (void *)addr + 8))
+ return -EFAULT;
+ if (__copy_inst_from_kernel_nofault(jmp + 3, (void *)addr + 12))
return -EFAULT;
/* verify that this is what we expect it to be */
- if ((jmp[0] & 0xffff0000) != PPC_RAW_LIS(_R12, 0) ||
- (jmp[1] & 0xffff0000) != PPC_RAW_ADDI(_R12, _R12, 0) ||
- jmp[2] != PPC_RAW_MTCTR(_R12) ||
- jmp[3] != PPC_RAW_BCTR())
+ if ((ppc_inst_val(jmp[0]) & 0xffff0000) != PPC_RAW_LIS(_R12, 0))
+ return -EINVAL;
+ if ((ppc_inst_val(jmp[1]) & 0xffff0000) != PPC_RAW_ADDI(_R12, _R12, 0))
+ return -EINVAL;
+ if (ppc_inst_val(jmp[2]) != PPC_RAW_MTCTR(_R12))
+ return -EINVAL;
+ if (ppc_inst_val(jmp[3]) != PPC_RAW_BCTR())
return -EINVAL;
- addr = (jmp[1] & 0xffff) | ((jmp[0] & 0xffff) << 16);
+ addr = (ppc_inst_val(jmp[1]) & 0xffff) | ((ppc_inst_val(jmp[0]) & 0xffff) << 16);
if (addr & 0x8000)
addr -= 0x10000;
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
index 794720530442..7e45dc98df8a 100644
--- a/arch/powerpc/kernel/module_64.c
+++ b/arch/powerpc/kernel/module_64.c
@@ -31,7 +31,7 @@
this, and makes other things simpler. Anton?
--RR. */
-#ifdef PPC64_ELF_ABI_v2
+#ifdef CONFIG_PPC64_ELF_ABI_V2
static func_desc_t func_desc(unsigned long addr)
{
@@ -122,7 +122,7 @@ static u32 ppc64_stub_insns[] = {
/* Save current r2 value in magic place on the stack. */
PPC_RAW_STD(_R2, _R1, R2_STACK_OFFSET),
PPC_RAW_LD(_R12, _R11, 32),
-#ifdef PPC64_ELF_ABI_v1
+#ifdef CONFIG_PPC64_ELF_ABI_V1
/* Set up new r2 from function descriptor */
PPC_RAW_LD(_R2, _R11, 40),
#endif
@@ -194,7 +194,7 @@ static unsigned long get_stubs_size(const Elf64_Ehdr *hdr,
/* Sort the relocation information based on a symbol and
* addend key. This is a stable O(n*log n) complexity
- * alogrithm but it will reduce the complexity of
+ * algorithm but it will reduce the complexity of
* count_relocs() to linear complexity O(n)
*/
sort((void *)sechdrs[i].sh_addr,
@@ -361,7 +361,7 @@ static inline int create_ftrace_stub(struct ppc64_stub_entry *entry,
entry->jump[1] |= PPC_HA(reladdr);
entry->jump[2] |= PPC_LO(reladdr);
- /* Eventhough we don't use funcdata in the stub, it's needed elsewhere. */
+ /* Even though we don't use funcdata in the stub, it's needed elsewhere. */
entry->funcdata = func_desc(addr);
entry->magic = STUB_MAGIC;
@@ -653,8 +653,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
}
/* Only replace bits 2 through 26 */
- value = (*(uint32_t *)location & ~0x03fffffc)
- | (value & 0x03fffffc);
+ value = (*(uint32_t *)location & ~PPC_LI_MASK) | PPC_LI(value);
if (patch_instruction((u32 *)location, ppc_inst(value)))
return -EFAULT;
diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c
index 0d9f9cd41e13..e385d3164648 100644
--- a/arch/powerpc/kernel/nvram_64.c
+++ b/arch/powerpc/kernel/nvram_64.c
@@ -19,9 +19,9 @@
#include <linux/pstore.h>
#include <linux/zlib.h>
#include <linux/uaccess.h>
+#include <linux/of.h>
#include <asm/nvram.h>
#include <asm/rtas.h>
-#include <asm/prom.h>
#include <asm/machdep.h>
#undef DEBUG_NVRAM
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index 39da688a9455..ba593fd60124 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -344,15 +344,10 @@ void copy_mm_to_paca(struct mm_struct *mm)
{
mm_context_t *context = &mm->context;
-#ifdef CONFIG_PPC_MM_SLICES
VM_BUG_ON(!mm_ctx_slb_addr_limit(context));
memcpy(&get_paca()->mm_ctx_low_slices_psize, mm_ctx_low_slices(context),
LOW_SLICE_ARRAY_SZ);
memcpy(&get_paca()->mm_ctx_high_slices_psize, mm_ctx_high_slices(context),
TASK_SLICE_ARRAY_SZ(context));
-#else /* CONFIG_PPC_MM_SLICES */
- get_paca()->mm_ctx_user_psize = context->user_psize;
- get_paca()->mm_ctx_sllp = context->sllp;
-#endif
}
#endif /* CONFIG_PPC_64S_HASH_MMU */
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 8bc9cf62cd93..068410cd54a3 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -30,10 +30,10 @@
#include <linux/vgaarb.h>
#include <linux/numa.h>
#include <linux/msi.h>
+#include <linux/irqdomain.h>
#include <asm/processor.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/pci-bridge.h>
#include <asm/byteorder.h>
#include <asm/machdep.h>
@@ -42,7 +42,7 @@
#include "../../../drivers/pci/pci.h"
-/* hose_spinlock protects accesses to the the phb_bitmap. */
+/* hose_spinlock protects accesses to the phb_bitmap. */
static DEFINE_SPINLOCK(hose_spinlock);
LIST_HEAD(hose_list);
@@ -1688,7 +1688,7 @@ EXPORT_SYMBOL_GPL(pcibios_scan_phb);
static void fixup_hide_host_resource_fsl(struct pci_dev *dev)
{
int i, class = dev->class >> 8;
- /* When configured as agent, programing interface = 1 */
+ /* When configured as agent, programming interface = 1 */
int prog_if = dev->class & 0xf;
if ((class == PCI_CLASS_PROCESSOR_POWERPC ||
diff --git a/arch/powerpc/kernel/pci-hotplug.c b/arch/powerpc/kernel/pci-hotplug.c
index 2fc12198ec07..0fe251c6ac2c 100644
--- a/arch/powerpc/kernel/pci-hotplug.c
+++ b/arch/powerpc/kernel/pci-hotplug.c
@@ -12,6 +12,7 @@
#include <linux/pci.h>
#include <linux/export.h>
+#include <linux/of.h>
#include <asm/pci-bridge.h>
#include <asm/ppc-pci.h>
#include <asm/firmware.h>
diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c
index 48537964fba1..5a174936c9a0 100644
--- a/arch/powerpc/kernel/pci_32.c
+++ b/arch/powerpc/kernel/pci_32.c
@@ -21,7 +21,6 @@
#include <asm/processor.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/sections.h>
#include <asm/pci-bridge.h>
#include <asm/ppc-pci.h>
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index 3fb7e572abed..19b03ddf5631 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -19,10 +19,10 @@
#include <linux/syscalls.h>
#include <linux/irq.h>
#include <linux/vmalloc.h>
+#include <linux/of.h>
#include <asm/processor.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/pci-bridge.h>
#include <asm/byteorder.h>
#include <asm/machdep.h>
@@ -285,3 +285,12 @@ int pcibus_to_node(struct pci_bus *bus)
}
EXPORT_SYMBOL(pcibus_to_node);
#endif
+
+int pci_device_from_OF_node(struct device_node *np, u8 *bus, u8 *devfn)
+{
+ if (!PCI_DN(np))
+ return -ENODEV;
+ *bus = PCI_DN(np)->busno;
+ *devfn = PCI_DN(np)->devfn;
+ return 0;
+}
diff --git a/arch/powerpc/kernel/pci_dn.c b/arch/powerpc/kernel/pci_dn.c
index 61571ae23953..938ab8838ab5 100644
--- a/arch/powerpc/kernel/pci_dn.c
+++ b/arch/powerpc/kernel/pci_dn.c
@@ -12,9 +12,9 @@
#include <linux/export.h>
#include <linux/init.h>
#include <linux/gfp.h>
+#include <linux/of.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/pci-bridge.h>
#include <asm/ppc-pci.h>
#include <asm/firmware.h>
diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c
index c3024f104765..756043dd06e9 100644
--- a/arch/powerpc/kernel/pci_of_scan.c
+++ b/arch/powerpc/kernel/pci_of_scan.c
@@ -13,8 +13,8 @@
#include <linux/pci.h>
#include <linux/export.h>
+#include <linux/of.h>
#include <asm/pci-bridge.h>
-#include <asm/prom.h>
/**
* get_int_prop - Decode a u32 from a device tree property
@@ -244,7 +244,7 @@ EXPORT_SYMBOL(of_create_pci_dev);
* @dev: pci_dev structure for the bridge
*
* of_scan_bus() calls this routine for each PCI bridge that it finds, and
- * this routine in turn call of_scan_bus() recusively to scan for more child
+ * this routine in turn call of_scan_bus() recursively to scan for more child
* devices.
*/
void of_scan_pci_bridge(struct pci_dev *dev)
diff --git a/arch/powerpc/kernel/proc_powerpc.c b/arch/powerpc/kernel/proc_powerpc.c
index 6a029f2378e1..b109cd7b5d01 100644
--- a/arch/powerpc/kernel/proc_powerpc.c
+++ b/arch/powerpc/kernel/proc_powerpc.c
@@ -7,12 +7,12 @@
#include <linux/mm.h>
#include <linux/proc_fs.h>
#include <linux/kernel.h>
+#include <linux/of.h>
#include <asm/machdep.h>
#include <asm/vdso_datapage.h>
#include <asm/rtas.h>
#include <linux/uaccess.h>
-#include <asm/prom.h>
#ifdef CONFIG_PPC64
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 984813a4d5dc..b62046bf3bb8 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -34,10 +34,8 @@
#include <linux/ftrace.h>
#include <linux/kernel_stat.h>
#include <linux/personality.h>
-#include <linux/random.h>
#include <linux/hw_breakpoint.h>
#include <linux/uaccess.h>
-#include <linux/elf-randomize.h>
#include <linux/pkeys.h>
#include <linux/seq_buf.h>
@@ -45,7 +43,6 @@
#include <asm/io.h>
#include <asm/processor.h>
#include <asm/mmu.h>
-#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/time.h>
#include <asm/runlatch.h>
@@ -307,7 +304,7 @@ static void __giveup_vsx(struct task_struct *tsk)
unsigned long msr = tsk->thread.regs->msr;
/*
- * We should never be ssetting MSR_VSX without also setting
+ * We should never be setting MSR_VSX without also setting
* MSR_FP and MSR_VEC
*/
WARN_ON((msr & MSR_VSX) && !((msr & MSR_FP) && (msr & MSR_VEC)));
@@ -645,7 +642,7 @@ static void do_break_handler(struct pt_regs *regs)
return;
}
- /* Otherwise findout which DAWR caused exception and disable it. */
+ /* Otherwise find out which DAWR caused exception and disable it. */
wp_get_instr_detail(regs, &instr, &type, &size, &ea);
for (i = 0; i < nr_wp_slots(); i++) {
@@ -1716,10 +1713,11 @@ static void setup_ksp_vsid(struct task_struct *p, unsigned long sp)
/*
* Copy architecture-specific thread state
*/
-int copy_thread(unsigned long clone_flags, unsigned long usp,
- unsigned long kthread_arg, struct task_struct *p,
- unsigned long tls)
+int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
{
+ unsigned long clone_flags = args->flags;
+ unsigned long usp = args->stack;
+ unsigned long tls = args->tls;
struct pt_regs *childregs, *kregs;
extern void ret_from_fork(void);
extern void ret_from_fork_scv(void);
@@ -1736,18 +1734,18 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
/* Copy registers */
sp -= sizeof(struct pt_regs);
childregs = (struct pt_regs *) sp;
- if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
+ if (unlikely(args->fn)) {
/* kernel thread */
memset(childregs, 0, sizeof(struct pt_regs));
childregs->gpr[1] = sp + sizeof(struct pt_regs);
/* function */
- if (usp)
- childregs->gpr[14] = ppc_function_entry((void *)usp);
+ if (args->fn)
+ childregs->gpr[14] = ppc_function_entry((void *)args->fn);
#ifdef CONFIG_PPC64
clear_tsk_thread_flag(p, TIF_32BIT);
childregs->softe = IRQS_ENABLED;
#endif
- childregs->gpr[15] = kthread_arg;
+ childregs->gpr[15] = (unsigned long)args->fn_arg;
p->thread.regs = NULL; /* no user register state */
ti->flags |= _TIF_RESTOREALL;
f = ret_from_kernel_thread;
@@ -2313,42 +2311,3 @@ unsigned long arch_align_stack(unsigned long sp)
sp -= get_random_int() & ~PAGE_MASK;
return sp & ~0xf;
}
-
-static inline unsigned long brk_rnd(void)
-{
- unsigned long rnd = 0;
-
- /* 8MB for 32bit, 1GB for 64bit */
- if (is_32bit_task())
- rnd = (get_random_long() % (1UL<<(23-PAGE_SHIFT)));
- else
- rnd = (get_random_long() % (1UL<<(30-PAGE_SHIFT)));
-
- return rnd << PAGE_SHIFT;
-}
-
-unsigned long arch_randomize_brk(struct mm_struct *mm)
-{
- unsigned long base = mm->brk;
- unsigned long ret;
-
-#ifdef CONFIG_PPC_BOOK3S_64
- /*
- * If we are using 1TB segments and we are allowed to randomise
- * the heap, we can put it above 1TB so it is backed by a 1TB
- * segment. Otherwise the heap will be in the bottom 1TB
- * which always uses 256MB segments and this may result in a
- * performance penalty.
- */
- if (!radix_enabled() && !is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
-#endif
-
- ret = PAGE_ALIGN(base + brk_rnd());
-
- if (ret < mm->brk)
- return mm->brk;
-
- return ret;
-}
-
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 86c4f009563d..feae8509b59c 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -31,7 +31,6 @@
#include <linux/cpu.h>
#include <linux/pgtable.h>
-#include <asm/prom.h>
#include <asm/rtas.h>
#include <asm/page.h>
#include <asm/processor.h>
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 0ac5faacc909..04694ec423f6 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -28,6 +28,8 @@
#include <linux/bitops.h>
#include <linux/pgtable.h>
#include <linux/printk.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
#include <asm/prom.h>
#include <asm/rtas.h>
#include <asm/page.h>
@@ -3416,7 +3418,7 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
*
* PowerMacs use a different mechanism to spin CPUs
*
- * (This must be done after instanciating RTAS)
+ * (This must be done after instantiating RTAS)
*/
if (of_platform != PLATFORM_POWERMAC)
prom_hold_cpus();
diff --git a/arch/powerpc/kernel/ptrace/ptrace-view.c b/arch/powerpc/kernel/ptrace/ptrace-view.c
index f15bc78caf71..076d867412c7 100644
--- a/arch/powerpc/kernel/ptrace/ptrace-view.c
+++ b/arch/powerpc/kernel/ptrace/ptrace-view.c
@@ -174,7 +174,7 @@ int ptrace_get_reg(struct task_struct *task, int regno, unsigned long *data)
/*
* softe copies paca->irq_soft_mask variable state. Since irq_soft_mask is
- * no more used as a flag, lets force usr to alway see the softe value as 1
+ * no more used as a flag, lets force usr to always see the softe value as 1
* which means interrupts are not soft disabled.
*/
if (IS_ENABLED(CONFIG_PPC64) && regno == PT_SOFTE) {
diff --git a/arch/powerpc/kernel/ptrace/ptrace.c b/arch/powerpc/kernel/ptrace/ptrace.c
index 6d5026a9db4f..4d2dc22d4a2d 100644
--- a/arch/powerpc/kernel/ptrace/ptrace.c
+++ b/arch/powerpc/kernel/ptrace/ptrace.c
@@ -444,10 +444,4 @@ void __init pt_regs_check(void)
* real registers.
*/
BUILD_BUG_ON(PT_DSCR < sizeof(struct user_pt_regs) / sizeof(unsigned long));
-
-#ifdef PPC64_ELF_ABI_v1
- BUILD_BUG_ON(!IS_ENABLED(CONFIG_HAVE_FUNCTION_DESCRIPTORS));
-#else
- BUILD_BUG_ON(IS_ENABLED(CONFIG_HAVE_FUNCTION_DESCRIPTORS));
-#endif
}
diff --git a/arch/powerpc/kernel/rtas-proc.c b/arch/powerpc/kernel/rtas-proc.c
index 6857a5b0a1c3..081b2b741a8c 100644
--- a/arch/powerpc/kernel/rtas-proc.c
+++ b/arch/powerpc/kernel/rtas-proc.c
@@ -24,11 +24,11 @@
#include <linux/seq_file.h>
#include <linux/bitops.h>
#include <linux/rtc.h>
+#include <linux/of.h>
#include <linux/uaccess.h>
#include <asm/processor.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/rtas.h>
#include <asm/machdep.h> /* for ppc_md */
#include <asm/time.h>
@@ -259,7 +259,6 @@ __initcall(proc_rtas_init);
static int parse_number(const char __user *p, size_t count, u64 *val)
{
char buf[40];
- char *end;
if (count > 39)
return -EINVAL;
@@ -269,11 +268,7 @@ static int parse_number(const char __user *p, size_t count, u64 *val)
buf[count] = 0;
- *val = simple_strtoull(buf, &end, 10);
- if (*end && *end != '\n')
- return -EINVAL;
-
- return 0;
+ return kstrtoull(buf, 10, val);
}
/* ****************************************************************** */
diff --git a/arch/powerpc/kernel/rtas-rtc.c b/arch/powerpc/kernel/rtas-rtc.c
index 33c07c8af6c8..5a31d1829bca 100644
--- a/arch/powerpc/kernel/rtas-rtc.c
+++ b/arch/powerpc/kernel/rtas-rtc.c
@@ -6,7 +6,6 @@
#include <linux/rtc.h>
#include <linux/delay.h>
#include <linux/ratelimit.h>
-#include <asm/prom.h>
#include <asm/rtas.h>
#include <asm/time.h>
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 1f42aabbbab3..9bb43aa53d43 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -24,9 +24,10 @@
#include <linux/slab.h>
#include <linux/reboot.h>
#include <linux/syscalls.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
#include <asm/interrupt.h>
-#include <asm/prom.h>
#include <asm/rtas.h>
#include <asm/hvcall.h>
#include <asm/machdep.h>
@@ -49,6 +50,19 @@ void enter_rtas(unsigned long);
static inline void do_enter_rtas(unsigned long args)
{
+ unsigned long msr;
+
+ /*
+ * Make sure MSR[RI] is currently enabled as it will be forced later
+ * in enter_rtas.
+ */
+ msr = mfmsr();
+ BUG_ON(!(msr & MSR_RI));
+
+ BUG_ON(!irqs_disabled());
+
+ hard_irq_disable(); /* Ensure MSR[EE] is disabled on PPC64 */
+
enter_rtas(args);
srr_regs_clobbered(); /* rtas uses SRRs, invalidate */
@@ -462,6 +476,11 @@ int rtas_call(int token, int nargs, int nret, int *outputs, ...)
if (!rtas.entry || token == RTAS_UNKNOWN_SERVICE)
return -1;
+ if ((mfmsr() & (MSR_IR|MSR_DR)) != (MSR_IR|MSR_DR)) {
+ WARN_ON_ONCE(1);
+ return -1;
+ }
+
s = lock_rtas();
/* We use the global rtas args buffer */
diff --git a/arch/powerpc/kernel/rtas_entry.S b/arch/powerpc/kernel/rtas_entry.S
new file mode 100644
index 000000000000..9a434d42e660
--- /dev/null
+++ b/arch/powerpc/kernel/rtas_entry.S
@@ -0,0 +1,172 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#include <asm/asm-offsets.h>
+#include <asm/bug.h>
+#include <asm/page.h>
+#include <asm/ppc_asm.h>
+
+/*
+ * RTAS is called with MSR IR, DR, EE disabled, and LR in the return address.
+ *
+ * Note: r3 is an input parameter to rtas, so don't trash it...
+ */
+
+#ifdef CONFIG_PPC32
+_GLOBAL(enter_rtas)
+ stwu r1,-INT_FRAME_SIZE(r1)
+ mflr r0
+ stw r0,INT_FRAME_SIZE+4(r1)
+ LOAD_REG_ADDR(r4, rtas)
+ lis r6,1f@ha /* physical return address for rtas */
+ addi r6,r6,1f@l
+ tophys(r6,r6)
+ lwz r8,RTASENTRY(r4)
+ lwz r4,RTASBASE(r4)
+ mfmsr r9
+ stw r9,8(r1)
+ li r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
+ mtlr r6
+ stw r1, THREAD + RTAS_SP(r2)
+ mtspr SPRN_SRR0,r8
+ mtspr SPRN_SRR1,r9
+ rfi
+1:
+ lis r8, 1f@h
+ ori r8, r8, 1f@l
+ LOAD_REG_IMMEDIATE(r9,MSR_KERNEL)
+ mtspr SPRN_SRR0,r8
+ mtspr SPRN_SRR1,r9
+ rfi /* Reactivate MMU translation */
+1:
+ lwz r8,INT_FRAME_SIZE+4(r1) /* get return address */
+ lwz r9,8(r1) /* original msr value */
+ addi r1,r1,INT_FRAME_SIZE
+ li r0,0
+ stw r0, THREAD + RTAS_SP(r2)
+ mtlr r8
+ mtmsr r9
+ blr /* return to caller */
+_ASM_NOKPROBE_SYMBOL(enter_rtas)
+
+#else /* CONFIG_PPC32 */
+#include <asm/exception-64s.h>
+
+/*
+ * 32-bit rtas on 64-bit machines has the additional problem that RTAS may
+ * not preserve the upper parts of registers it uses.
+ */
+_GLOBAL(enter_rtas)
+ mflr r0
+ std r0,16(r1)
+ stdu r1,-SWITCH_FRAME_SIZE(r1) /* Save SP and create stack space. */
+
+ /* Because RTAS is running in 32b mode, it clobbers the high order half
+ * of all registers that it saves. We therefore save those registers
+ * RTAS might touch to the stack. (r0, r3-r12 are caller saved)
+ */
+ SAVE_GPR(2, r1) /* Save the TOC */
+ SAVE_NVGPRS(r1) /* Save the non-volatiles */
+
+ mfcr r4
+ std r4,_CCR(r1)
+ mfctr r5
+ std r5,_CTR(r1)
+ mfspr r6,SPRN_XER
+ std r6,_XER(r1)
+ mfdar r7
+ std r7,_DAR(r1)
+ mfdsisr r8
+ std r8,_DSISR(r1)
+
+ /* Temporary workaround to clear CR until RTAS can be modified to
+ * ignore all bits.
+ */
+ li r0,0
+ mtcr r0
+
+ mfmsr r6
+
+ /* Unfortunately, the stack pointer and the MSR are also clobbered,
+ * so they are saved in the PACA which allows us to restore
+ * our original state after RTAS returns.
+ */
+ std r1,PACAR1(r13)
+ std r6,PACASAVEDMSR(r13)
+
+ /* Setup our real return addr */
+ LOAD_REG_ADDR(r4,rtas_return_loc)
+ clrldi r4,r4,2 /* convert to realmode address */
+ mtlr r4
+
+__enter_rtas:
+ LOAD_REG_ADDR(r4, rtas)
+ ld r5,RTASENTRY(r4) /* get the rtas->entry value */
+ ld r4,RTASBASE(r4) /* get the rtas->base value */
+
+ /*
+ * RTAS runs in 32-bit big endian real mode, but leave MSR[RI] on as we
+ * may hit NMI (SRESET or MCE) while in RTAS. RTAS should disable RI in
+ * its critical regions (as specified in PAPR+ section 7.2.1). MSR[S]
+ * is not impacted by RFI_TO_KERNEL (only urfid can unset it). So if
+ * MSR[S] is set, it will remain when entering RTAS.
+ */
+ LOAD_REG_IMMEDIATE(r6, MSR_ME | MSR_RI)
+
+ li r0,0
+ mtmsrd r0,1 /* disable RI before using SRR0/1 */
+
+ mtspr SPRN_SRR0,r5
+ mtspr SPRN_SRR1,r6
+ RFI_TO_KERNEL
+ b . /* prevent speculative execution */
+rtas_return_loc:
+ FIXUP_ENDIAN
+
+ /* Set SF before anything. */
+ LOAD_REG_IMMEDIATE(r6, MSR_KERNEL & ~(MSR_IR|MSR_DR))
+ mtmsrd r6
+
+ /* relocation is off at this point */
+ GET_PACA(r13)
+
+ bcl 20,31,$+4
+0: mflr r3
+ ld r3,(1f-0b)(r3) /* get &rtas_restore_regs */
+
+ ld r1,PACAR1(r13) /* Restore our SP */
+ ld r4,PACASAVEDMSR(r13) /* Restore our MSR */
+
+ mtspr SPRN_SRR0,r3
+ mtspr SPRN_SRR1,r4
+ RFI_TO_KERNEL
+ b . /* prevent speculative execution */
+_ASM_NOKPROBE_SYMBOL(enter_rtas)
+_ASM_NOKPROBE_SYMBOL(__enter_rtas)
+_ASM_NOKPROBE_SYMBOL(rtas_return_loc)
+
+ .align 3
+1: .8byte rtas_restore_regs
+
+rtas_restore_regs:
+ /* relocation is on at this point */
+ REST_GPR(2, r1) /* Restore the TOC */
+ REST_NVGPRS(r1) /* Restore the non-volatiles */
+
+ ld r4,_CCR(r1)
+ mtcr r4
+ ld r5,_CTR(r1)
+ mtctr r5
+ ld r6,_XER(r1)
+ mtspr SPRN_XER,r6
+ ld r7,_DAR(r1)
+ mtdar r7
+ ld r8,_DSISR(r1)
+ mtdsisr r8
+
+ addi r1,r1,SWITCH_FRAME_SIZE /* Unstack our frame */
+ ld r0,16(r1) /* get return address */
+
+ mtlr r0
+ blr /* return to caller */
+
+#endif /* CONFIG_PPC32 */
diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c
index a99179d83538..bc817a5619d6 100644
--- a/arch/powerpc/kernel/rtas_flash.c
+++ b/arch/powerpc/kernel/rtas_flash.c
@@ -120,7 +120,7 @@ static struct kmem_cache *flash_block_cache = NULL;
/*
* Local copy of the flash block list.
*
- * The rtas_firmware_flash_list varable will be
+ * The rtas_firmware_flash_list variable will be
* set once the data is fully read.
*
* For convenience as we build the list we use virtual addrs,
diff --git a/arch/powerpc/kernel/rtas_pci.c b/arch/powerpc/kernel/rtas_pci.c
index 781c1869902e..5a2f5ea3b054 100644
--- a/arch/powerpc/kernel/rtas_pci.c
+++ b/arch/powerpc/kernel/rtas_pci.c
@@ -14,10 +14,11 @@
#include <linux/string.h>
#include <linux/init.h>
#include <linux/pgtable.h>
+#include <linux/of_address.h>
+#include <linux/of_fdt.h>
#include <asm/io.h>
#include <asm/irq.h>
-#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/pci-bridge.h>
#include <asm/iommu.h>
diff --git a/arch/powerpc/kernel/rtasd.c b/arch/powerpc/kernel/rtasd.c
index cf0f42909ddf..5270b450bbde 100644
--- a/arch/powerpc/kernel/rtasd.c
+++ b/arch/powerpc/kernel/rtasd.c
@@ -22,7 +22,6 @@
#include <linux/uaccess.h>
#include <asm/io.h>
#include <asm/rtas.h>
-#include <asm/prom.h>
#include <asm/nvram.h>
#include <linux/atomic.h>
#include <asm/machdep.h>
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 518ae5aa9410..eb0077b302e2 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -23,19 +23,19 @@
#include <linux/console.h>
#include <linux/screen_info.h>
#include <linux/root_dev.h>
-#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/unistd.h>
#include <linux/serial.h>
#include <linux/serial_8250.h>
#include <linux/percpu.h>
#include <linux/memblock.h>
+#include <linux/of_irq.h>
+#include <linux/of_fdt.h>
#include <linux/of_platform.h>
#include <linux/hugetlb.h>
#include <linux/pgtable.h>
#include <asm/io.h>
#include <asm/paca.h>
-#include <asm/prom.h>
#include <asm/processor.h>
#include <asm/vdso_datapage.h>
#include <asm/smp.h>
@@ -161,9 +161,7 @@ void machine_restart(char *cmd)
void machine_power_off(void)
{
machine_shutdown();
- if (pm_power_off)
- pm_power_off();
-
+ do_kernel_power_off();
smp_send_stop();
machine_hang();
}
@@ -279,7 +277,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
proc_freq / 1000000, proc_freq % 1000000);
/* If we are a Freescale core do a simple check so
- * we dont have to keep adding cases in the future */
+ * we don't have to keep adding cases in the future */
if (PVR_VER(pvr) & 0x8000) {
switch (PVR_VER(pvr)) {
case 0x8000: /* 7441/7450/7451, Voyager */
@@ -680,8 +678,25 @@ int check_legacy_ioport(unsigned long base_port)
}
EXPORT_SYMBOL(check_legacy_ioport);
-static int ppc_panic_event(struct notifier_block *this,
- unsigned long event, void *ptr)
+/*
+ * Panic notifiers setup
+ *
+ * We have 3 notifiers for powerpc, each one from a different "nature":
+ *
+ * - ppc_panic_fadump_handler() is a hypervisor notifier, which hard-disables
+ * IRQs and deal with the Firmware-Assisted dump, when it is configured;
+ * should run early in the panic path.
+ *
+ * - dump_kernel_offset() is an informative notifier, just showing the KASLR
+ * offset if we have RANDOMIZE_BASE set.
+ *
+ * - ppc_panic_platform_handler() is a low-level handler that's registered
+ * only if the platform wishes to perform final actions in the panic path,
+ * hence it should run late and might not even return. Currently, only
+ * pseries and ps3 platforms register callbacks.
+ */
+static int ppc_panic_fadump_handler(struct notifier_block *this,
+ unsigned long event, void *ptr)
{
/*
* panic does a local_irq_disable, but we really
@@ -691,45 +706,63 @@ static int ppc_panic_event(struct notifier_block *this,
/*
* If firmware-assisted dump has been registered then trigger
- * firmware-assisted dump and let firmware handle everything else.
+ * its callback and let the firmware handles everything else.
*/
crash_fadump(NULL, ptr);
- if (ppc_md.panic)
- ppc_md.panic(ptr); /* May not return */
+
return NOTIFY_DONE;
}
-static struct notifier_block ppc_panic_block = {
- .notifier_call = ppc_panic_event,
- .priority = INT_MIN /* may not return; must be done last */
-};
-
-/*
- * Dump out kernel offset information on panic.
- */
static int dump_kernel_offset(struct notifier_block *self, unsigned long v,
void *p)
{
pr_emerg("Kernel Offset: 0x%lx from 0x%lx\n",
kaslr_offset(), KERNELBASE);
- return 0;
+ return NOTIFY_DONE;
}
+static int ppc_panic_platform_handler(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ /*
+ * This handler is only registered if we have a panic callback
+ * on ppc_md, hence NULL check is not needed.
+ * Also, it may not return, so it runs really late on panic path.
+ */
+ ppc_md.panic(ptr);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block ppc_fadump_block = {
+ .notifier_call = ppc_panic_fadump_handler,
+ .priority = INT_MAX, /* run early, to notify the firmware ASAP */
+};
+
static struct notifier_block kernel_offset_notifier = {
- .notifier_call = dump_kernel_offset
+ .notifier_call = dump_kernel_offset,
+};
+
+static struct notifier_block ppc_panic_block = {
+ .notifier_call = ppc_panic_platform_handler,
+ .priority = INT_MIN, /* may not return; must be done last */
};
void __init setup_panic(void)
{
+ /* Hard-disables IRQs + deal with FW-assisted dump (fadump) */
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &ppc_fadump_block);
+
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_offset() > 0)
atomic_notifier_chain_register(&panic_notifier_list,
&kernel_offset_notifier);
- /* PPC64 always does a hard irq disable in its panic handler */
- if (!IS_ENABLED(CONFIG_PPC64) && !ppc_md.panic)
- return;
- atomic_notifier_chain_register(&panic_notifier_list, &ppc_panic_block);
+ /* Low-level platform-specific routines that should run on panic */
+ if (ppc_md.panic)
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &ppc_panic_block);
}
#ifdef CONFIG_CHECK_CACHE_COHERENCY
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index a6e9d36d7c01..813261789303 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -20,9 +20,10 @@
#include <linux/export.h>
#include <linux/nvram.h>
#include <linux/pgtable.h>
+#include <linux/of_fdt.h>
+#include <linux/irq.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/processor.h>
#include <asm/setup.h>
#include <asm/smp.h>
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index a96f05063bc9..5761f08dae95 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -31,11 +31,12 @@
#include <linux/memory.h>
#include <linux/nmi.h>
#include <linux/pgtable.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
#include <asm/kvm_guest.h>
#include <asm/io.h>
#include <asm/kdump.h>
-#include <asm/prom.h>
#include <asm/processor.h>
#include <asm/smp.h>
#include <asm/elf.h>
@@ -59,7 +60,7 @@
#include <asm/udbg.h>
#include <asm/kexec.h>
#include <asm/code-patching.h>
-#include <asm/livepatch.h>
+#include <asm/ftrace.h>
#include <asm/opal.h>
#include <asm/cputhreads.h>
#include <asm/hw_irq.h>
diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
index f7f8620663c7..68a91e553e14 100644
--- a/arch/powerpc/kernel/signal.c
+++ b/arch/powerpc/kernel/signal.c
@@ -141,6 +141,21 @@ unsigned long copy_ckvsx_from_user(struct task_struct *task,
int show_unhandled_signals = 1;
+unsigned long get_min_sigframe_size(void)
+{
+ if (IS_ENABLED(CONFIG_PPC64))
+ return get_min_sigframe_size_64();
+ else
+ return get_min_sigframe_size_32();
+}
+
+#ifdef CONFIG_COMPAT
+unsigned long get_min_sigframe_size_compat(void)
+{
+ return get_min_sigframe_size_32();
+}
+#endif
+
/*
* Allocate space for the signal frame
*/
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index d84c434b2b78..157a7403e3eb 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -233,6 +233,12 @@ struct rt_sigframe {
int abigap[56];
};
+unsigned long get_min_sigframe_size_32(void)
+{
+ return max(sizeof(struct rt_sigframe) + __SIGNAL_FRAMESIZE + 16,
+ sizeof(struct sigframe) + __SIGNAL_FRAMESIZE);
+}
+
/*
* Save the current user registers on the user stack.
* We only save the altivec/spe registers if the process has used
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index 73d483b07ff3..472596a109e2 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -66,6 +66,11 @@ struct rt_sigframe {
char abigap[USER_REDZONE_SIZE];
} __attribute__ ((aligned (16)));
+unsigned long get_min_sigframe_size_64(void)
+{
+ return sizeof(struct rt_sigframe) + __SIGNAL_FRAMESIZE;
+}
+
/*
* This computes a quad word aligned pointer inside the vmx_reserve array
* element. For historical reasons sigcontext might not be quad word aligned,
@@ -123,7 +128,7 @@ static long notrace __unsafe_setup_sigcontext(struct sigcontext __user *sc,
#endif
struct pt_regs *regs = tsk->thread.regs;
unsigned long msr = regs->msr;
- /* Force usr to alway see softe as 1 (interrupts enabled) */
+ /* Force usr to always see softe as 1 (interrupts enabled) */
unsigned long softe = 0x1;
BUG_ON(tsk != current);
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index de0f6f09a5dd..bcefab484ea6 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -43,7 +43,6 @@
#include <asm/kvm_ppc.h>
#include <asm/dbell.h>
#include <asm/page.h>
-#include <asm/prom.h>
#include <asm/smp.h>
#include <asm/time.h>
#include <asm/machdep.h>
@@ -412,32 +411,32 @@ static struct cpumask nmi_ipi_pending_mask;
static bool nmi_ipi_busy = false;
static void (*nmi_ipi_function)(struct pt_regs *) = NULL;
-static void nmi_ipi_lock_start(unsigned long *flags)
+noinstr static void nmi_ipi_lock_start(unsigned long *flags)
{
raw_local_irq_save(*flags);
hard_irq_disable();
- while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) {
+ while (arch_atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) {
raw_local_irq_restore(*flags);
- spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0);
+ spin_until_cond(arch_atomic_read(&__nmi_ipi_lock) == 0);
raw_local_irq_save(*flags);
hard_irq_disable();
}
}
-static void nmi_ipi_lock(void)
+noinstr static void nmi_ipi_lock(void)
{
- while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1)
- spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0);
+ while (arch_atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1)
+ spin_until_cond(arch_atomic_read(&__nmi_ipi_lock) == 0);
}
-static void nmi_ipi_unlock(void)
+noinstr static void nmi_ipi_unlock(void)
{
smp_mb();
- WARN_ON(atomic_read(&__nmi_ipi_lock) != 1);
- atomic_set(&__nmi_ipi_lock, 0);
+ WARN_ON(arch_atomic_read(&__nmi_ipi_lock) != 1);
+ arch_atomic_set(&__nmi_ipi_lock, 0);
}
-static void nmi_ipi_unlock_end(unsigned long *flags)
+noinstr static void nmi_ipi_unlock_end(unsigned long *flags)
{
nmi_ipi_unlock();
raw_local_irq_restore(*flags);
@@ -446,7 +445,7 @@ static void nmi_ipi_unlock_end(unsigned long *flags)
/*
* Platform NMI handler calls this to ack
*/
-int smp_handle_nmi_ipi(struct pt_regs *regs)
+noinstr int smp_handle_nmi_ipi(struct pt_regs *regs)
{
void (*fn)(struct pt_regs *) = NULL;
unsigned long flags;
@@ -875,7 +874,7 @@ out_free:
* @tg : The thread-group structure of the CPU node which @cpu belongs
* to.
*
- * Returns the index to tg->thread_list that points to the the start
+ * Returns the index to tg->thread_list that points to the start
* of the thread_group that @cpu belongs to.
*
* Returns -1 if cpu doesn't belong to any of the groups pointed to by
@@ -1102,7 +1101,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
DBG("smp_prepare_cpus\n");
/*
- * setup_cpu may need to be called on the boot cpu. We havent
+ * setup_cpu may need to be called on the boot cpu. We haven't
* spun any cpus up but lets be paranoid.
*/
BUG_ON(boot_cpuid != smp_processor_id());
diff --git a/arch/powerpc/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c
index c4f5b4ce926f..fc999140bc27 100644
--- a/arch/powerpc/kernel/syscalls.c
+++ b/arch/powerpc/kernel/syscalls.c
@@ -73,7 +73,7 @@ SYSCALL_DEFINE6(mmap, unsigned long, addr, size_t, len,
int
ppc_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp, struct __kernel_old_timeval __user *tvp)
{
- if ( (unsigned long)n >= 4096 )
+ if ((unsigned long)n >= 4096)
return sys_old_select((void __user *)n);
return sys_select(n, inp, outp, exp, tvp);
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index 2069bbb90a9a..3a10cda9c05e 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -9,12 +9,12 @@
#include <linux/nodemask.h>
#include <linux/cpumask.h>
#include <linux/notifier.h>
+#include <linux/of.h>
#include <asm/current.h>
#include <asm/processor.h>
#include <asm/cputable.h>
#include <asm/hvcall.h>
-#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/smp.h>
#include <asm/pmc.h>
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index f80cce0e3899..587adcc12860 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -54,8 +54,10 @@
#include <linux/of_clk.h>
#include <linux/suspend.h>
#include <linux/processor.h>
-#include <asm/trace.h>
+#include <linux/mc146818rtc.h>
+#include <linux/platform_device.h>
+#include <asm/trace.h>
#include <asm/interrupt.h>
#include <asm/io.h>
#include <asm/nvram.h>
@@ -63,7 +65,6 @@
#include <asm/machdep.h>
#include <linux/uaccess.h>
#include <asm/time.h>
-#include <asm/prom.h>
#include <asm/irq.h>
#include <asm/div64.h>
#include <asm/smp.h>
@@ -156,10 +157,6 @@ bool tb_invalid;
u64 __cputime_usec_factor;
EXPORT_SYMBOL(__cputime_usec_factor);
-#ifdef CONFIG_PPC_SPLPAR
-void (*dtl_consumer)(struct dtl_entry *, u64);
-#endif
-
static void calc_cputime_factors(void)
{
struct div_result res;
@@ -185,6 +182,8 @@ static inline unsigned long read_spurr(unsigned long tb)
#include <asm/dtl.h>
+void (*dtl_consumer)(struct dtl_entry *, u64);
+
/*
* Scan the dispatch trace log and count up the stolen time.
* Should be called with interrupts disabled.
@@ -829,7 +828,7 @@ static void __read_persistent_clock(struct timespec64 *ts)
static int first = 1;
ts->tv_nsec = 0;
- /* XXX this is a litle fragile but will work okay in the short term */
+ /* XXX this is a little fragile but will work okay in the short term */
if (first) {
first = 0;
if (ppc_md.time_init)
@@ -974,7 +973,7 @@ void secondary_cpu_time_init(void)
*/
start_cpu_decrementer();
- /* FIME: Should make unrelatred change to move snapshot_timebase
+ /* FIME: Should make unrelated change to move snapshot_timebase
* call here ! */
register_decrementer_clockevent(smp_processor_id());
}
diff --git a/arch/powerpc/kernel/trace/Makefile b/arch/powerpc/kernel/trace/Makefile
index 542aa7a8b2b4..af8527538fe4 100644
--- a/arch/powerpc/kernel/trace/Makefile
+++ b/arch/powerpc/kernel/trace/Makefile
@@ -14,10 +14,7 @@ obj64-$(CONFIG_FUNCTION_TRACER) += ftrace_mprofile.o
else
obj64-$(CONFIG_FUNCTION_TRACER) += ftrace_64_pg.o
endif
-obj-$(CONFIG_FUNCTION_TRACER) += ftrace_low.o
-obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
-obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
-obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o
+obj-$(CONFIG_FUNCTION_TRACER) += ftrace_low.o ftrace.o
obj-$(CONFIG_TRACING) += trace_clock.o
obj-$(CONFIG_PPC64) += $(obj64-y)
diff --git a/arch/powerpc/kernel/trace/ftrace.c b/arch/powerpc/kernel/trace/ftrace.c
index 4ee04aacf9f1..2a893e06e4f1 100644
--- a/arch/powerpc/kernel/trace/ftrace.c
+++ b/arch/powerpc/kernel/trace/ftrace.c
@@ -28,9 +28,6 @@
#include <asm/syscall.h>
#include <asm/inst.h>
-
-#ifdef CONFIG_DYNAMIC_FTRACE
-
/*
* We generally only have a single long_branch tramp and at most 2 or 3 plt
* tramps generated. But, we don't use the plt tramps currently. We also allot
@@ -48,12 +45,12 @@ ftrace_call_replace(unsigned long ip, unsigned long addr, int link)
addr = ppc_function_entry((void *)addr);
/* if (link) set op to 'bl' else 'b' */
- create_branch(&op, (u32 *)ip, addr, link ? 1 : 0);
+ create_branch(&op, (u32 *)ip, addr, link ? BRANCH_SET_LINK : 0);
return op;
}
-static int
+static inline int
ftrace_modify_code(unsigned long ip, ppc_inst_t old, ppc_inst_t new)
{
ppc_inst_t replaced;
@@ -78,10 +75,7 @@ ftrace_modify_code(unsigned long ip, ppc_inst_t old, ppc_inst_t new)
}
/* replace the text with the new text */
- if (patch_instruction((u32 *)ip, new))
- return -EPERM;
-
- return 0;
+ return patch_instruction((u32 *)ip, new);
}
/*
@@ -89,28 +83,26 @@ ftrace_modify_code(unsigned long ip, ppc_inst_t old, ppc_inst_t new)
*/
static int test_24bit_addr(unsigned long ip, unsigned long addr)
{
- ppc_inst_t op;
addr = ppc_function_entry((void *)addr);
- /* use the create_branch to verify that this offset can be branched */
- return create_branch(&op, (u32 *)ip, addr, 0) == 0;
+ return is_offset_in_branch_range(addr - ip);
}
static int is_bl_op(ppc_inst_t op)
{
- return (ppc_inst_val(op) & 0xfc000003) == 0x48000001;
+ return (ppc_inst_val(op) & ~PPC_LI_MASK) == PPC_RAW_BL(0);
}
static int is_b_op(ppc_inst_t op)
{
- return (ppc_inst_val(op) & 0xfc000003) == 0x48000000;
+ return (ppc_inst_val(op) & ~PPC_LI_MASK) == PPC_RAW_BRANCH(0);
}
static unsigned long find_bl_target(unsigned long ip, ppc_inst_t op)
{
int offset;
- offset = (ppc_inst_val(op) & 0x03fffffc);
+ offset = PPC_LI(ppc_inst_val(op));
/* make it signed */
if (offset & 0x02000000)
offset |= 0xfe000000;
@@ -119,7 +111,6 @@ static unsigned long find_bl_target(unsigned long ip, ppc_inst_t op)
}
#ifdef CONFIG_MODULES
-#ifdef CONFIG_PPC64
static int
__ftrace_make_nop(struct module *mod,
struct dyn_ftrace *rec, unsigned long addr)
@@ -159,25 +150,39 @@ __ftrace_make_nop(struct module *mod,
return -EINVAL;
}
-#ifdef CONFIG_MPROFILE_KERNEL
- /* When using -mkernel_profile there is no load to jump over */
- pop = ppc_inst(PPC_RAW_NOP());
+ if (IS_ENABLED(CONFIG_MPROFILE_KERNEL)) {
+ if (copy_inst_from_kernel_nofault(&op, (void *)(ip - 4))) {
+ pr_err("Fetching instruction at %lx failed.\n", ip - 4);
+ return -EFAULT;
+ }
- if (copy_inst_from_kernel_nofault(&op, (void *)(ip - 4))) {
- pr_err("Fetching instruction at %lx failed.\n", ip - 4);
- return -EFAULT;
- }
+ /* We expect either a mflr r0, or a std r0, LRSAVE(r1) */
+ if (!ppc_inst_equal(op, ppc_inst(PPC_RAW_MFLR(_R0))) &&
+ !ppc_inst_equal(op, ppc_inst(PPC_INST_STD_LR))) {
+ pr_err("Unexpected instruction %s around bl _mcount\n",
+ ppc_inst_as_str(op));
+ return -EINVAL;
+ }
+ } else if (IS_ENABLED(CONFIG_PPC64)) {
+ /*
+ * Check what is in the next instruction. We can see ld r2,40(r1), but
+ * on first pass after boot we will see mflr r0.
+ */
+ if (copy_inst_from_kernel_nofault(&op, (void *)(ip + 4))) {
+ pr_err("Fetching op failed.\n");
+ return -EFAULT;
+ }
- /* We expect either a mflr r0, or a std r0, LRSAVE(r1) */
- if (!ppc_inst_equal(op, ppc_inst(PPC_RAW_MFLR(_R0))) &&
- !ppc_inst_equal(op, ppc_inst(PPC_INST_STD_LR))) {
- pr_err("Unexpected instruction %s around bl _mcount\n",
- ppc_inst_as_str(op));
- return -EINVAL;
+ if (!ppc_inst_equal(op, ppc_inst(PPC_INST_LD_TOC))) {
+ pr_err("Expected %08lx found %s\n", PPC_INST_LD_TOC, ppc_inst_as_str(op));
+ return -EINVAL;
+ }
}
-#else
+
/*
- * Our original call site looks like:
+ * When using -mprofile-kernel or PPC32 there is no load to jump over.
+ *
+ * Otherwise our original call site looks like:
*
* bl <tramp>
* ld r2,XX(r1)
@@ -189,23 +194,10 @@ __ftrace_make_nop(struct module *mod,
*
* Use a b +8 to jump over the load.
*/
-
- pop = ppc_inst(PPC_INST_BRANCH | 8); /* b +8 */
-
- /*
- * Check what is in the next instruction. We can see ld r2,40(r1), but
- * on first pass after boot we will see mflr r0.
- */
- if (copy_inst_from_kernel_nofault(&op, (void *)(ip + 4))) {
- pr_err("Fetching op failed.\n");
- return -EFAULT;
- }
-
- if (!ppc_inst_equal(op, ppc_inst(PPC_INST_LD_TOC))) {
- pr_err("Expected %08lx found %s\n", PPC_INST_LD_TOC, ppc_inst_as_str(op));
- return -EINVAL;
- }
-#endif /* CONFIG_MPROFILE_KERNEL */
+ if (IS_ENABLED(CONFIG_MPROFILE_KERNEL) || IS_ENABLED(CONFIG_PPC32))
+ pop = ppc_inst(PPC_RAW_NOP());
+ else
+ pop = ppc_inst(PPC_RAW_BRANCH(8)); /* b +8 */
if (patch_instruction((u32 *)ip, pop)) {
pr_err("Patching NOP failed.\n");
@@ -214,54 +206,16 @@ __ftrace_make_nop(struct module *mod,
return 0;
}
-
-#else /* !PPC64 */
-static int
-__ftrace_make_nop(struct module *mod,
- struct dyn_ftrace *rec, unsigned long addr)
+#else
+static int __ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr)
{
- ppc_inst_t op;
- unsigned long ip = rec->ip;
- unsigned long tramp, ptr;
-
- if (copy_from_kernel_nofault(&op, (void *)ip, MCOUNT_INSN_SIZE))
- return -EFAULT;
-
- /* Make sure that that this is still a 24bit jump */
- if (!is_bl_op(op)) {
- pr_err("Not expected bl: opcode is %s\n", ppc_inst_as_str(op));
- return -EINVAL;
- }
-
- /* lets find where the pointer goes */
- tramp = find_bl_target(ip, op);
-
- /* Find where the trampoline jumps to */
- if (module_trampoline_target(mod, tramp, &ptr)) {
- pr_err("Failed to get trampoline target\n");
- return -EFAULT;
- }
-
- if (ptr != addr) {
- pr_err("Trampoline location %08lx does not match addr\n",
- tramp);
- return -EINVAL;
- }
-
- op = ppc_inst(PPC_RAW_NOP());
-
- if (patch_instruction((u32 *)ip, op))
- return -EPERM;
-
return 0;
}
-#endif /* PPC64 */
#endif /* CONFIG_MODULES */
static unsigned long find_ftrace_tramp(unsigned long ip)
{
int i;
- ppc_inst_t instr;
/*
* We have the compiler generated long_branch tramps at the end
@@ -270,8 +224,7 @@ static unsigned long find_ftrace_tramp(unsigned long ip)
for (i = NUM_FTRACE_TRAMPS - 1; i >= 0; i--)
if (!ftrace_tramps[i])
continue;
- else if (create_branch(&instr, (void *)ip,
- ftrace_tramps[i], 0) == 0)
+ else if (is_offset_in_branch_range(ftrace_tramps[i] - ip))
return ftrace_tramps[i];
return 0;
@@ -301,23 +254,12 @@ static int setup_mcount_compiler_tramp(unsigned long tramp)
int i;
ppc_inst_t op;
unsigned long ptr;
- ppc_inst_t instr;
- static unsigned long ftrace_plt_tramps[NUM_FTRACE_TRAMPS];
/* Is this a known long jump tramp? */
for (i = 0; i < NUM_FTRACE_TRAMPS; i++)
- if (!ftrace_tramps[i])
- break;
- else if (ftrace_tramps[i] == tramp)
+ if (ftrace_tramps[i] == tramp)
return 0;
- /* Is this a known plt tramp? */
- for (i = 0; i < NUM_FTRACE_TRAMPS; i++)
- if (!ftrace_plt_tramps[i])
- break;
- else if (ftrace_plt_tramps[i] == tramp)
- return -1;
-
/* New trampoline -- read where this goes */
if (copy_inst_from_kernel_nofault(&op, (void *)tramp)) {
pr_debug("Fetching opcode failed.\n");
@@ -339,16 +281,10 @@ static int setup_mcount_compiler_tramp(unsigned long tramp)
}
/* Let's re-write the tramp to go to ftrace_[regs_]caller */
-#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
- ptr = ppc_global_function_entry((void *)ftrace_regs_caller);
-#else
- ptr = ppc_global_function_entry((void *)ftrace_caller);
-#endif
- if (create_branch(&instr, (void *)tramp, ptr, 0)) {
- pr_debug("%ps is not reachable from existing mcount tramp\n",
- (void *)ptr);
- return -1;
- }
+ if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
+ ptr = ppc_global_function_entry((void *)ftrace_regs_caller);
+ else
+ ptr = ppc_global_function_entry((void *)ftrace_caller);
if (patch_branch((u32 *)tramp, ptr, 0)) {
pr_debug("REL24 out of range!\n");
@@ -418,10 +354,12 @@ int ftrace_make_nop(struct module *mod,
old = ftrace_call_replace(ip, addr, 1);
new = ppc_inst(PPC_RAW_NOP());
return ftrace_modify_code(ip, old, new);
- } else if (core_kernel_text(ip))
+ } else if (core_kernel_text(ip)) {
return __ftrace_make_nop_kernel(rec, addr);
+ } else if (!IS_ENABLED(CONFIG_MODULES)) {
+ return -EINVAL;
+ }
-#ifdef CONFIG_MODULES
/*
* Out of range jumps are called from modules.
* We should either already have a pointer to the module
@@ -444,53 +382,27 @@ int ftrace_make_nop(struct module *mod,
mod = rec->arch.mod;
return __ftrace_make_nop(mod, rec, addr);
-#else
- /* We should not get here without modules */
- return -EINVAL;
-#endif /* CONFIG_MODULES */
}
#ifdef CONFIG_MODULES
-#ifdef CONFIG_PPC64
/*
* Examine the existing instructions for __ftrace_make_call.
* They should effectively be a NOP, and follow formal constraints,
* depending on the ABI. Return false if they don't.
*/
-#ifndef CONFIG_MPROFILE_KERNEL
-static int
-expected_nop_sequence(void *ip, ppc_inst_t op0, ppc_inst_t op1)
+static bool expected_nop_sequence(void *ip, ppc_inst_t op0, ppc_inst_t op1)
{
- /*
- * We expect to see:
- *
- * b +8
- * ld r2,XX(r1)
- *
- * The load offset is different depending on the ABI. For simplicity
- * just mask it out when doing the compare.
- */
- if (!ppc_inst_equal(op0, ppc_inst(0x48000008)) ||
- (ppc_inst_val(op1) & 0xffff0000) != 0xe8410000)
- return 0;
- return 1;
-}
-#else
-static int
-expected_nop_sequence(void *ip, ppc_inst_t op0, ppc_inst_t op1)
-{
- /* look for patched "NOP" on ppc64 with -mprofile-kernel */
- if (!ppc_inst_equal(op0, ppc_inst(PPC_RAW_NOP())))
- return 0;
- return 1;
+ if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V1))
+ return ppc_inst_equal(op0, ppc_inst(PPC_RAW_BRANCH(8))) &&
+ ppc_inst_equal(op1, ppc_inst(PPC_INST_LD_TOC));
+ else
+ return ppc_inst_equal(op0, ppc_inst(PPC_RAW_NOP()));
}
-#endif
static int
__ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
ppc_inst_t op[2];
- ppc_inst_t instr;
void *ip = (void *)rec->ip;
unsigned long entry, ptr, tramp;
struct module *mod = rec->arch.mod;
@@ -499,7 +411,8 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
if (copy_inst_from_kernel_nofault(op, ip))
return -EFAULT;
- if (copy_inst_from_kernel_nofault(op + 1, ip + 4))
+ if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V1) &&
+ copy_inst_from_kernel_nofault(op + 1, ip + 4))
return -EFAULT;
if (!expected_nop_sequence(ip, op[0], op[1])) {
@@ -509,20 +422,15 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
}
/* If we never set up ftrace trampoline(s), then bail */
-#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
- if (!mod->arch.tramp || !mod->arch.tramp_regs) {
-#else
- if (!mod->arch.tramp) {
-#endif
+ if (!mod->arch.tramp ||
+ (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) && !mod->arch.tramp_regs)) {
pr_err("No ftrace trampoline\n");
return -EINVAL;
}
-#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
- if (rec->flags & FTRACE_FL_REGS)
+ if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) && rec->flags & FTRACE_FL_REGS)
tramp = mod->arch.tramp_regs;
else
-#endif
tramp = mod->arch.tramp;
if (module_trampoline_target(mod, tramp, &ptr)) {
@@ -539,12 +447,6 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
return -EINVAL;
}
- /* Ensure branch is within 24 bits */
- if (create_branch(&instr, ip, tramp, BRANCH_SET_LINK)) {
- pr_err("Branch out of range\n");
- return -EINVAL;
- }
-
if (patch_branch(ip, tramp, BRANCH_SET_LINK)) {
pr_err("REL24 out of range!\n");
return -EINVAL;
@@ -552,58 +454,11 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
return 0;
}
-
-#else /* !CONFIG_PPC64: */
-static int
-__ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
-{
- int err;
- ppc_inst_t op;
- u32 *ip = (u32 *)rec->ip;
- struct module *mod = rec->arch.mod;
- unsigned long tramp;
-
- /* read where this goes */
- if (copy_inst_from_kernel_nofault(&op, ip))
- return -EFAULT;
-
- /* It should be pointing to a nop */
- if (!ppc_inst_equal(op, ppc_inst(PPC_RAW_NOP()))) {
- pr_err("Expected NOP but have %s\n", ppc_inst_as_str(op));
- return -EINVAL;
- }
-
- /* If we never set up a trampoline to ftrace_caller, then bail */
-#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
- if (!mod->arch.tramp || !mod->arch.tramp_regs) {
#else
- if (!mod->arch.tramp) {
-#endif
- pr_err("No ftrace trampoline\n");
- return -EINVAL;
- }
-
-#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
- if (rec->flags & FTRACE_FL_REGS)
- tramp = mod->arch.tramp_regs;
- else
-#endif
- tramp = mod->arch.tramp;
- /* create the branch to the trampoline */
- err = create_branch(&op, ip, tramp, BRANCH_SET_LINK);
- if (err) {
- pr_err("REL24 out of range!\n");
- return -EINVAL;
- }
-
- pr_devel("write to %lx\n", rec->ip);
-
- if (patch_instruction(ip, op))
- return -EPERM;
-
+static int __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
+{
return 0;
}
-#endif /* CONFIG_PPC64 */
#endif /* CONFIG_MODULES */
static int __ftrace_make_call_kernel(struct dyn_ftrace *rec, unsigned long addr)
@@ -616,16 +471,12 @@ static int __ftrace_make_call_kernel(struct dyn_ftrace *rec, unsigned long addr)
entry = ppc_global_function_entry((void *)ftrace_caller);
ptr = ppc_global_function_entry((void *)addr);
- if (ptr != entry) {
-#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+ if (ptr != entry && IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
entry = ppc_global_function_entry((void *)ftrace_regs_caller);
- if (ptr != entry) {
-#endif
- pr_err("Unknown ftrace addr to patch: %ps\n", (void *)ptr);
- return -EINVAL;
-#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
- }
-#endif
+
+ if (ptr != entry) {
+ pr_err("Unknown ftrace addr to patch: %ps\n", (void *)ptr);
+ return -EINVAL;
}
/* Make sure we have a nop */
@@ -668,10 +519,13 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
old = ppc_inst(PPC_RAW_NOP());
new = ftrace_call_replace(ip, addr, 1);
return ftrace_modify_code(ip, old, new);
- } else if (core_kernel_text(ip))
+ } else if (core_kernel_text(ip)) {
return __ftrace_make_call_kernel(rec, addr);
+ } else if (!IS_ENABLED(CONFIG_MODULES)) {
+ /* We should not get here without modules */
+ return -EINVAL;
+ }
-#ifdef CONFIG_MODULES
/*
* Out of range jumps are called from modules.
* Being that we are converting from nop, it had better
@@ -683,10 +537,6 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
}
return __ftrace_make_call(rec, addr);
-#else
- /* We should not get here without modules */
- return -EINVAL;
-#endif /* CONFIG_MODULES */
}
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
@@ -770,12 +620,6 @@ __ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
return -EINVAL;
}
- /* Ensure branch is within 24 bits */
- if (create_branch(&op, (u32 *)ip, tramp, BRANCH_SET_LINK)) {
- pr_err("Branch out of range\n");
- return -EINVAL;
- }
-
if (patch_branch((u32 *)ip, tramp, BRANCH_SET_LINK)) {
pr_err("REL24 out of range!\n");
return -EINVAL;
@@ -783,6 +627,11 @@ __ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
return 0;
}
+#else
+static int __ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, unsigned long addr)
+{
+ return 0;
+}
#endif
int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
@@ -807,9 +656,11 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
* variant, so there is nothing to do here
*/
return 0;
+ } else if (!IS_ENABLED(CONFIG_MODULES)) {
+ /* We should not get here without modules */
+ return -EINVAL;
}
-#ifdef CONFIG_MODULES
/*
* Out of range jumps are called from modules.
*/
@@ -819,10 +670,6 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
}
return __ftrace_modify_call(rec, old_addr, addr);
-#else
- /* We should not get here without modules */
- return -EINVAL;
-#endif /* CONFIG_MODULES */
}
#endif
@@ -836,15 +683,13 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
new = ftrace_call_replace(ip, (unsigned long)func, 1);
ret = ftrace_modify_code(ip, old, new);
-#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
/* Also update the regs callback function */
- if (!ret) {
+ if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) && !ret) {
ip = (unsigned long)(&ftrace_regs_call);
old = ppc_inst_read((u32 *)&ftrace_regs_call);
new = ftrace_call_replace(ip, (unsigned long)func, 1);
ret = ftrace_modify_code(ip, old, new);
}
-#endif
return ret;
}
@@ -863,25 +708,39 @@ void arch_ftrace_update_code(int command)
extern unsigned int ftrace_tramp_text[], ftrace_tramp_init[];
+void ftrace_free_init_tramp(void)
+{
+ int i;
+
+ for (i = 0; i < NUM_FTRACE_TRAMPS && ftrace_tramps[i]; i++)
+ if (ftrace_tramps[i] == (unsigned long)ftrace_tramp_init) {
+ ftrace_tramps[i] = 0;
+ return;
+ }
+}
+
int __init ftrace_dyn_arch_init(void)
{
int i;
unsigned int *tramp[] = { ftrace_tramp_text, ftrace_tramp_init };
u32 stub_insns[] = {
- 0xe98d0000 | PACATOC, /* ld r12,PACATOC(r13) */
- 0x3d8c0000, /* addis r12,r12,<high> */
- 0x398c0000, /* addi r12,r12,<low> */
- 0x7d8903a6, /* mtctr r12 */
- 0x4e800420, /* bctr */
+ PPC_RAW_LD(_R12, _R13, PACATOC),
+ PPC_RAW_ADDIS(_R12, _R12, 0),
+ PPC_RAW_ADDI(_R12, _R12, 0),
+ PPC_RAW_MTCTR(_R12),
+ PPC_RAW_BCTR()
};
-#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
- unsigned long addr = ppc_global_function_entry((void *)ftrace_regs_caller);
-#else
- unsigned long addr = ppc_global_function_entry((void *)ftrace_caller);
-#endif
- long reladdr = addr - kernel_toc_addr();
+ unsigned long addr;
+ long reladdr;
+
+ if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
+ addr = ppc_global_function_entry((void *)ftrace_regs_caller);
+ else
+ addr = ppc_global_function_entry((void *)ftrace_caller);
- if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) {
+ reladdr = addr - kernel_toc_addr();
+
+ if (reladdr >= SZ_2G || reladdr < -(long)SZ_2G) {
pr_err("Address of %ps out of range of kernel_toc.\n",
(void *)addr);
return -1;
@@ -896,13 +755,7 @@ int __init ftrace_dyn_arch_init(void)
return 0;
}
-#else
-int __init ftrace_dyn_arch_init(void)
-{
- return 0;
-}
#endif
-#endif /* CONFIG_DYNAMIC_FTRACE */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
@@ -939,8 +792,8 @@ int ftrace_disable_ftrace_graph_caller(void)
* Hook the return address and push it in the stack of return addrs
* in current thread info. Return the address we want to divert to.
*/
-unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip,
- unsigned long sp)
+static unsigned long
+__prepare_ftrace_return(unsigned long parent, unsigned long ip, unsigned long sp)
{
unsigned long return_hooker;
int bit;
@@ -969,12 +822,18 @@ out:
void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct ftrace_regs *fregs)
{
- fregs->regs.link = prepare_ftrace_return(parent_ip, ip, fregs->regs.gpr[1]);
+ fregs->regs.link = __prepare_ftrace_return(parent_ip, ip, fregs->regs.gpr[1]);
+}
+#else
+unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip,
+ unsigned long sp)
+{
+ return __prepare_ftrace_return(parent, ip, sp);
}
#endif
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
-#ifdef PPC64_ELF_ABI_v1
+#ifdef CONFIG_PPC64_ELF_ABI_V1
char *arch_ftrace_match_adjust(char *str, const char *search)
{
if (str[0] == '.' && search[0] != '.')
@@ -982,4 +841,4 @@ char *arch_ftrace_match_adjust(char *str, const char *search)
else
return str;
}
-#endif /* PPC64_ELF_ABI_v1 */
+#endif /* CONFIG_PPC64_ELF_ABI_V1 */
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index a08bb7cefdc5..3aaa50e5c72f 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -393,7 +393,7 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
* Builds that do not support KVM could take this second option to increase
* the recoverability of NMIs.
*/
-void hv_nmi_check_nonrecoverable(struct pt_regs *regs)
+noinstr void hv_nmi_check_nonrecoverable(struct pt_regs *regs)
{
#ifdef CONFIG_PPC_POWERNV
unsigned long kbase = (unsigned long)_stext;
@@ -433,7 +433,9 @@ void hv_nmi_check_nonrecoverable(struct pt_regs *regs)
return;
nonrecoverable:
- regs_set_unrecoverable(regs);
+ regs->msr &= ~MSR_RI;
+ local_paca->hsrr_valid = 0;
+ local_paca->srr_valid = 0;
#endif
}
DEFINE_INTERRUPT_HANDLER_NMI(system_reset_exception)
diff --git a/arch/powerpc/kernel/uprobes.c b/arch/powerpc/kernel/uprobes.c
index c6975467d9ff..95a41ae9dfa7 100644
--- a/arch/powerpc/kernel/uprobes.c
+++ b/arch/powerpc/kernel/uprobes.c
@@ -48,6 +48,11 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe,
return -EINVAL;
}
+ if (!can_single_step(ppc_inst_val(ppc_inst_read(auprobe->insn)))) {
+ pr_info_ratelimited("Cannot register a uprobe on instructions that can't be single stepped\n");
+ return -ENOTSUPP;
+ }
+
return 0;
}
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index 717f2c9a7573..0da287544054 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -25,7 +25,6 @@
#include <asm/processor.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
-#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/cputable.h>
#include <asm/sections.h>
diff --git a/arch/powerpc/kernel/vdso/Makefile b/arch/powerpc/kernel/vdso/Makefile
index 954974287ee7..096b0bf1335f 100644
--- a/arch/powerpc/kernel/vdso/Makefile
+++ b/arch/powerpc/kernel/vdso/Makefile
@@ -48,6 +48,7 @@ UBSAN_SANITIZE := n
KASAN_SANITIZE := n
ccflags-y := -shared -fno-common -fno-builtin -nostdlib -Wl,--hash-style=both
+ccflags-$(CONFIG_LD_IS_LLD) += $(call cc-option,--ld-path=$(LD),-fuse-ld=lld)
CC32FLAGS := -Wl,-soname=linux-vdso32.so.1 -m32
AS32FLAGS := -D__VDSO32__ -s
diff --git a/arch/powerpc/kernel/vdso/vdso32.lds.S b/arch/powerpc/kernel/vdso/vdso32.lds.S
index 58e0099f70f4..e0d19d74455f 100644
--- a/arch/powerpc/kernel/vdso/vdso32.lds.S
+++ b/arch/powerpc/kernel/vdso/vdso32.lds.S
@@ -13,7 +13,6 @@ OUTPUT_FORMAT("elf32-powerpcle", "elf32-powerpcle", "elf32-powerpcle")
OUTPUT_FORMAT("elf32-powerpc", "elf32-powerpc", "elf32-powerpc")
#endif
OUTPUT_ARCH(powerpc:common)
-ENTRY(_start)
SECTIONS
{
diff --git a/arch/powerpc/kernel/vdso/vdso64.lds.S b/arch/powerpc/kernel/vdso/vdso64.lds.S
index 0288cad428b0..1a4a7bc4c815 100644
--- a/arch/powerpc/kernel/vdso/vdso64.lds.S
+++ b/arch/powerpc/kernel/vdso/vdso64.lds.S
@@ -13,7 +13,6 @@ OUTPUT_FORMAT("elf64-powerpcle", "elf64-powerpcle", "elf64-powerpcle")
OUTPUT_FORMAT("elf64-powerpc", "elf64-powerpc", "elf64-powerpc")
#endif
OUTPUT_ARCH(powerpc:common64)
-ENTRY(_start)
SECTIONS
{
diff --git a/arch/powerpc/kernel/watchdog.c b/arch/powerpc/kernel/watchdog.c
index bfc27496fe7e..7d28b9553654 100644
--- a/arch/powerpc/kernel/watchdog.c
+++ b/arch/powerpc/kernel/watchdog.c
@@ -56,7 +56,7 @@
* solved by also having a SMP watchdog where all CPUs check all other
* CPUs heartbeat.
*
- * The SMP checker can detect lockups on other CPUs. A gobal "pending"
+ * The SMP checker can detect lockups on other CPUs. A global "pending"
* cpumask is kept, containing all CPUs which enable the watchdog. Each
* CPU clears their pending bit in their heartbeat timer. When the bitmask
* becomes empty, the last CPU to clear its pending bit updates a global
diff --git a/arch/powerpc/kexec/Makefile b/arch/powerpc/kexec/Makefile
index b6c52608cb49..0c2abe7f9908 100644
--- a/arch/powerpc/kexec/Makefile
+++ b/arch/powerpc/kexec/Makefile
@@ -13,3 +13,5 @@ obj-$(CONFIG_KEXEC_FILE) += file_load.o ranges.o file_load_$(BITS).o elf_$(BITS)
GCOV_PROFILE_core_$(BITS).o := n
KCOV_INSTRUMENT_core_$(BITS).o := n
UBSAN_SANITIZE_core_$(BITS).o := n
+KASAN_SANITIZE_core.o := n
+KASAN_SANITIZE_core_$(BITS) := n
diff --git a/arch/powerpc/kexec/core.c b/arch/powerpc/kexec/core.c
index abf5897ae88c..7ab4980fe13a 100644
--- a/arch/powerpc/kexec/core.c
+++ b/arch/powerpc/kexec/core.c
@@ -18,7 +18,6 @@
#include <asm/kdump.h>
#include <asm/machdep.h>
#include <asm/pgalloc.h>
-#include <asm/prom.h>
#include <asm/sections.h>
void machine_kexec_mask_interrupts(void) {
diff --git a/arch/powerpc/kexec/core_64.c b/arch/powerpc/kexec/core_64.c
index 6cc7793b8420..c2bea9db1c1e 100644
--- a/arch/powerpc/kexec/core_64.c
+++ b/arch/powerpc/kexec/core_64.c
@@ -16,6 +16,7 @@
#include <linux/kernel.h>
#include <linux/cpu.h>
#include <linux/hardirq.h>
+#include <linux/of.h>
#include <asm/page.h>
#include <asm/current.h>
@@ -25,7 +26,6 @@
#include <asm/paca.h>
#include <asm/mmu.h>
#include <asm/sections.h> /* _end */
-#include <asm/prom.h>
#include <asm/smp.h>
#include <asm/hw_breakpoint.h>
#include <asm/svm.h>
@@ -406,7 +406,7 @@ static int __init export_htab_values(void)
if (!node)
return -ENODEV;
- /* remove any stale propertys so ours can be found */
+ /* remove any stale properties so ours can be found */
of_remove_property(node, of_find_property(node, htab_base_prop.name, NULL));
of_remove_property(node, of_find_property(node, htab_size_prop.name, NULL));
diff --git a/arch/powerpc/kexec/crash.c b/arch/powerpc/kexec/crash.c
index 22ceeeb705ab..d85fa9fc6f3c 100644
--- a/arch/powerpc/kexec/crash.c
+++ b/arch/powerpc/kexec/crash.c
@@ -20,7 +20,6 @@
#include <asm/processor.h>
#include <asm/machdep.h>
#include <asm/kexec.h>
-#include <asm/prom.h>
#include <asm/smp.h>
#include <asm/setjmp.h>
#include <asm/debug.h>
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile
index 9bdfc8b50899..0cd23ce07d68 100644
--- a/arch/powerpc/kvm/Makefile
+++ b/arch/powerpc/kvm/Makefile
@@ -37,9 +37,6 @@ kvm-e500mc-objs := \
e500_emulate.o
kvm-objs-$(CONFIG_KVM_E500MC) := $(kvm-e500mc-objs)
-kvm-book3s_64-builtin-objs-$(CONFIG_SPAPR_TCE_IOMMU) := \
- book3s_64_vio_hv.o
-
kvm-pr-y := \
fpu.o \
emulate.o \
@@ -76,7 +73,7 @@ kvm-hv-$(CONFIG_PPC_TRANSACTIONAL_MEM) += \
book3s_hv_tm.o
kvm-book3s_64-builtin-xics-objs-$(CONFIG_KVM_XICS) := \
- book3s_hv_rm_xics.o book3s_hv_rm_xive.o
+ book3s_hv_rm_xics.o
kvm-book3s_64-builtin-tm-objs-$(CONFIG_PPC_TRANSACTIONAL_MEM) += \
book3s_hv_tm_builtin.o
@@ -134,3 +131,8 @@ obj-$(CONFIG_KVM_BOOK3S_64_PR) += kvm-pr.o
obj-$(CONFIG_KVM_BOOK3S_64_HV) += kvm-hv.o
obj-y += $(kvm-book3s_64-builtin-objs-y)
+
+# KVM does a lot in real-mode, and 64-bit Book3S KASAN doesn't support that
+ifdef CONFIG_PPC_BOOK3S_64
+KASAN_SANITIZE := n
+endif
diff --git a/arch/powerpc/kvm/book3s_64_entry.S b/arch/powerpc/kvm/book3s_64_entry.S
index e42d1c609e47..e43704547a1e 100644
--- a/arch/powerpc/kvm/book3s_64_entry.S
+++ b/arch/powerpc/kvm/book3s_64_entry.S
@@ -124,7 +124,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
/*
* "Skip" interrupts are part of a trick KVM uses a with hash guests to load
- * the faulting instruction in guest memory from the the hypervisor without
+ * the faulting instruction in guest memory from the hypervisor without
* walking page tables.
*
* When the guest takes a fault that requires the hypervisor to load the
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 0aeb51738ca9..514fd45c1994 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -58,7 +58,7 @@ struct kvm_resize_hpt {
/* Possible values and their usage:
* <0 an error occurred during allocation,
* -EBUSY allocation is in the progress,
- * 0 allocation made successfuly.
+ * 0 allocation made successfully.
*/
int error;
@@ -256,26 +256,34 @@ void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot,
int kvmppc_mmu_hv_init(void)
{
- unsigned long host_lpid, rsvd_lpid;
+ unsigned long nr_lpids;
if (!mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE))
return -EINVAL;
- host_lpid = 0;
- if (cpu_has_feature(CPU_FTR_HVMODE))
- host_lpid = mfspr(SPRN_LPID);
+ if (cpu_has_feature(CPU_FTR_HVMODE)) {
+ if (WARN_ON(mfspr(SPRN_LPID) != 0))
+ return -EINVAL;
+ nr_lpids = 1UL << mmu_lpid_bits;
+ } else {
+ nr_lpids = 1UL << KVM_MAX_NESTED_GUESTS_SHIFT;
+ }
- /* POWER8 and above have 12-bit LPIDs (10-bit in POWER7) */
- if (cpu_has_feature(CPU_FTR_ARCH_207S))
- rsvd_lpid = LPID_RSVD;
- else
- rsvd_lpid = LPID_RSVD_POWER7;
+ if (!cpu_has_feature(CPU_FTR_ARCH_300)) {
+ /* POWER7 has 10-bit LPIDs, POWER8 has 12-bit LPIDs */
+ if (cpu_has_feature(CPU_FTR_ARCH_207S))
+ WARN_ON(nr_lpids != 1UL << 12);
+ else
+ WARN_ON(nr_lpids != 1UL << 10);
- kvmppc_init_lpid(rsvd_lpid + 1);
+ /*
+ * Reserve the last implemented LPID use in partition
+ * switching for POWER7 and POWER8.
+ */
+ nr_lpids -= 1;
+ }
- kvmppc_claim_lpid(host_lpid);
- /* rsvd_lpid is reserved for use in partition switching */
- kvmppc_claim_lpid(rsvd_lpid);
+ kvmppc_init_lpid(nr_lpids);
return 0;
}
@@ -879,7 +887,7 @@ static bool kvm_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
struct revmap_entry *rev = kvm->arch.hpt.rev;
unsigned long head, i, j;
__be64 *hptep;
- int ret = 0;
+ bool ret = false;
unsigned long *rmapp;
rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn];
@@ -887,7 +895,7 @@ static bool kvm_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
lock_rmap(rmapp);
if (*rmapp & KVMPPC_RMAP_REFERENCED) {
*rmapp &= ~KVMPPC_RMAP_REFERENCED;
- ret = 1;
+ ret = true;
}
if (!(*rmapp & KVMPPC_RMAP_PRESENT)) {
unlock_rmap(rmapp);
@@ -919,7 +927,7 @@ static bool kvm_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
rev[i].guest_rpte |= HPTE_R_R;
note_hpte_modification(kvm, &rev[i]);
}
- ret = 1;
+ ret = true;
}
__unlock_hpte(hptep, be64_to_cpu(hptep[0]));
} while ((i = j) != head);
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
index 85cfa6328222..d6589c4fe889 100644
--- a/arch/powerpc/kvm/book3s_64_vio.c
+++ b/arch/powerpc/kvm/book3s_64_vio.c
@@ -32,6 +32,18 @@
#include <asm/tce.h>
#include <asm/mmu_context.h>
+static struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm *kvm,
+ unsigned long liobn)
+{
+ struct kvmppc_spapr_tce_table *stt;
+
+ list_for_each_entry_lockless(stt, &kvm->arch.spapr_tce_tables, list)
+ if (stt->liobn == liobn)
+ return stt;
+
+ return NULL;
+}
+
static unsigned long kvmppc_tce_pages(unsigned long iommu_pages)
{
return ALIGN(iommu_pages * sizeof(u64), PAGE_SIZE) / PAGE_SIZE;
@@ -753,3 +765,34 @@ long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
return ret;
}
EXPORT_SYMBOL_GPL(kvmppc_h_stuff_tce);
+
+long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
+ unsigned long ioba)
+{
+ struct kvmppc_spapr_tce_table *stt;
+ long ret;
+ unsigned long idx;
+ struct page *page;
+ u64 *tbl;
+
+ stt = kvmppc_find_table(vcpu->kvm, liobn);
+ if (!stt)
+ return H_TOO_HARD;
+
+ ret = kvmppc_ioba_validate(stt, ioba, 1);
+ if (ret != H_SUCCESS)
+ return ret;
+
+ idx = (ioba >> stt->page_shift) - stt->offset;
+ page = stt->pages[idx / TCES_PER_PAGE];
+ if (!page) {
+ vcpu->arch.regs.gpr[4] = 0;
+ return H_SUCCESS;
+ }
+ tbl = (u64 *)page_address(page);
+
+ vcpu->arch.regs.gpr[4] = tbl[idx % TCES_PER_PAGE];
+
+ return H_SUCCESS;
+}
+EXPORT_SYMBOL_GPL(kvmppc_h_get_tce);
diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c
deleted file mode 100644
index fdeda6a9cff4..000000000000
--- a/arch/powerpc/kvm/book3s_64_vio_hv.c
+++ /dev/null
@@ -1,672 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- *
- * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
- * Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com>
- * Copyright 2016 Alexey Kardashevskiy, IBM Corporation <aik@au1.ibm.com>
- */
-
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/kvm.h>
-#include <linux/kvm_host.h>
-#include <linux/highmem.h>
-#include <linux/gfp.h>
-#include <linux/slab.h>
-#include <linux/hugetlb.h>
-#include <linux/list.h>
-#include <linux/stringify.h>
-
-#include <asm/kvm_ppc.h>
-#include <asm/kvm_book3s.h>
-#include <asm/book3s/64/mmu-hash.h>
-#include <asm/mmu_context.h>
-#include <asm/hvcall.h>
-#include <asm/synch.h>
-#include <asm/ppc-opcode.h>
-#include <asm/udbg.h>
-#include <asm/iommu.h>
-#include <asm/tce.h>
-#include <asm/pte-walk.h>
-
-#ifdef CONFIG_BUG
-
-#define WARN_ON_ONCE_RM(condition) ({ \
- static bool __section(".data.unlikely") __warned; \
- int __ret_warn_once = !!(condition); \
- \
- if (unlikely(__ret_warn_once && !__warned)) { \
- __warned = true; \
- pr_err("WARN_ON_ONCE_RM: (%s) at %s:%u\n", \
- __stringify(condition), \
- __func__, __LINE__); \
- dump_stack(); \
- } \
- unlikely(__ret_warn_once); \
-})
-
-#else
-
-#define WARN_ON_ONCE_RM(condition) ({ \
- int __ret_warn_on = !!(condition); \
- unlikely(__ret_warn_on); \
-})
-
-#endif
-
-/*
- * Finds a TCE table descriptor by LIOBN.
- *
- * WARNING: This will be called in real or virtual mode on HV KVM and virtual
- * mode on PR KVM
- */
-struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm *kvm,
- unsigned long liobn)
-{
- struct kvmppc_spapr_tce_table *stt;
-
- list_for_each_entry_lockless(stt, &kvm->arch.spapr_tce_tables, list)
- if (stt->liobn == liobn)
- return stt;
-
- return NULL;
-}
-EXPORT_SYMBOL_GPL(kvmppc_find_table);
-
-#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
-static long kvmppc_rm_tce_to_ua(struct kvm *kvm,
- unsigned long tce, unsigned long *ua)
-{
- unsigned long gfn = tce >> PAGE_SHIFT;
- struct kvm_memory_slot *memslot;
-
- memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn);
- if (!memslot)
- return -EINVAL;
-
- *ua = __gfn_to_hva_memslot(memslot, gfn) |
- (tce & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE));
-
- return 0;
-}
-
-/*
- * Validates TCE address.
- * At the moment flags and page mask are validated.
- * As the host kernel does not access those addresses (just puts them
- * to the table and user space is supposed to process them), we can skip
- * checking other things (such as TCE is a guest RAM address or the page
- * was actually allocated).
- */
-static long kvmppc_rm_tce_validate(struct kvmppc_spapr_tce_table *stt,
- unsigned long tce)
-{
- unsigned long gpa = tce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
- enum dma_data_direction dir = iommu_tce_direction(tce);
- struct kvmppc_spapr_tce_iommu_table *stit;
- unsigned long ua = 0;
-
- /* Allow userspace to poison TCE table */
- if (dir == DMA_NONE)
- return H_SUCCESS;
-
- if (iommu_tce_check_gpa(stt->page_shift, gpa))
- return H_PARAMETER;
-
- if (kvmppc_rm_tce_to_ua(stt->kvm, tce, &ua))
- return H_TOO_HARD;
-
- list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
- unsigned long hpa = 0;
- struct mm_iommu_table_group_mem_t *mem;
- long shift = stit->tbl->it_page_shift;
-
- mem = mm_iommu_lookup_rm(stt->kvm->mm, ua, 1ULL << shift);
- if (!mem)
- return H_TOO_HARD;
-
- if (mm_iommu_ua_to_hpa_rm(mem, ua, shift, &hpa))
- return H_TOO_HARD;
- }
-
- return H_SUCCESS;
-}
-
-/* Note on the use of page_address() in real mode,
- *
- * It is safe to use page_address() in real mode on ppc64 because
- * page_address() is always defined as lowmem_page_address()
- * which returns __va(PFN_PHYS(page_to_pfn(page))) which is arithmetic
- * operation and does not access page struct.
- *
- * Theoretically page_address() could be defined different
- * but either WANT_PAGE_VIRTUAL or HASHED_PAGE_VIRTUAL
- * would have to be enabled.
- * WANT_PAGE_VIRTUAL is never enabled on ppc32/ppc64,
- * HASHED_PAGE_VIRTUAL could be enabled for ppc32 only and only
- * if CONFIG_HIGHMEM is defined. As CONFIG_SPARSEMEM_VMEMMAP
- * is not expected to be enabled on ppc32, page_address()
- * is safe for ppc32 as well.
- *
- * WARNING: This will be called in real-mode on HV KVM and virtual
- * mode on PR KVM
- */
-static u64 *kvmppc_page_address(struct page *page)
-{
-#if defined(HASHED_PAGE_VIRTUAL) || defined(WANT_PAGE_VIRTUAL)
-#error TODO: fix to avoid page_address() here
-#endif
- return (u64 *) page_address(page);
-}
-
-/*
- * Handles TCE requests for emulated devices.
- * Puts guest TCE values to the table and expects user space to convert them.
- * Cannot fail so kvmppc_rm_tce_validate must be called before it.
- */
-static void kvmppc_rm_tce_put(struct kvmppc_spapr_tce_table *stt,
- unsigned long idx, unsigned long tce)
-{
- struct page *page;
- u64 *tbl;
-
- idx -= stt->offset;
- page = stt->pages[idx / TCES_PER_PAGE];
- /*
- * kvmppc_rm_ioba_validate() allows pages not be allocated if TCE is
- * being cleared, otherwise it returns H_TOO_HARD and we skip this.
- */
- if (!page) {
- WARN_ON_ONCE_RM(tce != 0);
- return;
- }
- tbl = kvmppc_page_address(page);
-
- tbl[idx % TCES_PER_PAGE] = tce;
-}
-
-/*
- * TCEs pages are allocated in kvmppc_rm_tce_put() which won't be able to do so
- * in real mode.
- * Check if kvmppc_rm_tce_put() can succeed in real mode, i.e. a TCEs page is
- * allocated or not required (when clearing a tce entry).
- */
-static long kvmppc_rm_ioba_validate(struct kvmppc_spapr_tce_table *stt,
- unsigned long ioba, unsigned long npages, bool clearing)
-{
- unsigned long i, idx, sttpage, sttpages;
- unsigned long ret = kvmppc_ioba_validate(stt, ioba, npages);
-
- if (ret)
- return ret;
- /*
- * clearing==true says kvmppc_rm_tce_put won't be allocating pages
- * for empty tces.
- */
- if (clearing)
- return H_SUCCESS;
-
- idx = (ioba >> stt->page_shift) - stt->offset;
- sttpage = idx / TCES_PER_PAGE;
- sttpages = ALIGN(idx % TCES_PER_PAGE + npages, TCES_PER_PAGE) /
- TCES_PER_PAGE;
- for (i = sttpage; i < sttpage + sttpages; ++i)
- if (!stt->pages[i])
- return H_TOO_HARD;
-
- return H_SUCCESS;
-}
-
-static long iommu_tce_xchg_no_kill_rm(struct mm_struct *mm,
- struct iommu_table *tbl,
- unsigned long entry, unsigned long *hpa,
- enum dma_data_direction *direction)
-{
- long ret;
-
- ret = tbl->it_ops->xchg_no_kill(tbl, entry, hpa, direction, true);
-
- if (!ret && ((*direction == DMA_FROM_DEVICE) ||
- (*direction == DMA_BIDIRECTIONAL))) {
- __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
- /*
- * kvmppc_rm_tce_iommu_do_map() updates the UA cache after
- * calling this so we still get here a valid UA.
- */
- if (pua && *pua)
- mm_iommu_ua_mark_dirty_rm(mm, be64_to_cpu(*pua));
- }
-
- return ret;
-}
-
-static void iommu_tce_kill_rm(struct iommu_table *tbl,
- unsigned long entry, unsigned long pages)
-{
- if (tbl->it_ops->tce_kill)
- tbl->it_ops->tce_kill(tbl, entry, pages, true);
-}
-
-static void kvmppc_rm_clear_tce(struct kvm *kvm, struct kvmppc_spapr_tce_table *stt,
- struct iommu_table *tbl, unsigned long entry)
-{
- unsigned long i;
- unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
- unsigned long io_entry = entry << (stt->page_shift - tbl->it_page_shift);
-
- for (i = 0; i < subpages; ++i) {
- unsigned long hpa = 0;
- enum dma_data_direction dir = DMA_NONE;
-
- iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, io_entry + i, &hpa, &dir);
- }
-}
-
-static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm,
- struct iommu_table *tbl, unsigned long entry)
-{
- struct mm_iommu_table_group_mem_t *mem = NULL;
- const unsigned long pgsize = 1ULL << tbl->it_page_shift;
- __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
-
- if (!pua)
- /* it_userspace allocation might be delayed */
- return H_TOO_HARD;
-
- mem = mm_iommu_lookup_rm(kvm->mm, be64_to_cpu(*pua), pgsize);
- if (!mem)
- return H_TOO_HARD;
-
- mm_iommu_mapped_dec(mem);
-
- *pua = cpu_to_be64(0);
-
- return H_SUCCESS;
-}
-
-static long kvmppc_rm_tce_iommu_do_unmap(struct kvm *kvm,
- struct iommu_table *tbl, unsigned long entry)
-{
- enum dma_data_direction dir = DMA_NONE;
- unsigned long hpa = 0;
- long ret;
-
- if (iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir))
- /*
- * real mode xchg can fail if struct page crosses
- * a page boundary
- */
- return H_TOO_HARD;
-
- if (dir == DMA_NONE)
- return H_SUCCESS;
-
- ret = kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry);
- if (ret)
- iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir);
-
- return ret;
-}
-
-static long kvmppc_rm_tce_iommu_unmap(struct kvm *kvm,
- struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
- unsigned long entry)
-{
- unsigned long i, ret = H_SUCCESS;
- unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
- unsigned long io_entry = entry * subpages;
-
- for (i = 0; i < subpages; ++i) {
- ret = kvmppc_rm_tce_iommu_do_unmap(kvm, tbl, io_entry + i);
- if (ret != H_SUCCESS)
- break;
- }
-
- iommu_tce_kill_rm(tbl, io_entry, subpages);
-
- return ret;
-}
-
-static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
- unsigned long entry, unsigned long ua,
- enum dma_data_direction dir)
-{
- long ret;
- unsigned long hpa = 0;
- __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
- struct mm_iommu_table_group_mem_t *mem;
-
- if (!pua)
- /* it_userspace allocation might be delayed */
- return H_TOO_HARD;
-
- mem = mm_iommu_lookup_rm(kvm->mm, ua, 1ULL << tbl->it_page_shift);
- if (!mem)
- return H_TOO_HARD;
-
- if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, tbl->it_page_shift,
- &hpa)))
- return H_TOO_HARD;
-
- if (WARN_ON_ONCE_RM(mm_iommu_mapped_inc(mem)))
- return H_TOO_HARD;
-
- ret = iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir);
- if (ret) {
- mm_iommu_mapped_dec(mem);
- /*
- * real mode xchg can fail if struct page crosses
- * a page boundary
- */
- return H_TOO_HARD;
- }
-
- if (dir != DMA_NONE)
- kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry);
-
- *pua = cpu_to_be64(ua);
-
- return 0;
-}
-
-static long kvmppc_rm_tce_iommu_map(struct kvm *kvm,
- struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
- unsigned long entry, unsigned long ua,
- enum dma_data_direction dir)
-{
- unsigned long i, pgoff, ret = H_SUCCESS;
- unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
- unsigned long io_entry = entry * subpages;
-
- for (i = 0, pgoff = 0; i < subpages;
- ++i, pgoff += IOMMU_PAGE_SIZE(tbl)) {
-
- ret = kvmppc_rm_tce_iommu_do_map(kvm, tbl,
- io_entry + i, ua + pgoff, dir);
- if (ret != H_SUCCESS)
- break;
- }
-
- iommu_tce_kill_rm(tbl, io_entry, subpages);
-
- return ret;
-}
-
-long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
- unsigned long ioba, unsigned long tce)
-{
- struct kvmppc_spapr_tce_table *stt;
- long ret;
- struct kvmppc_spapr_tce_iommu_table *stit;
- unsigned long entry, ua = 0;
- enum dma_data_direction dir;
-
- /* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
- /* liobn, ioba, tce); */
-
- stt = kvmppc_find_table(vcpu->kvm, liobn);
- if (!stt)
- return H_TOO_HARD;
-
- ret = kvmppc_rm_ioba_validate(stt, ioba, 1, tce == 0);
- if (ret != H_SUCCESS)
- return ret;
-
- ret = kvmppc_rm_tce_validate(stt, tce);
- if (ret != H_SUCCESS)
- return ret;
-
- dir = iommu_tce_direction(tce);
- if ((dir != DMA_NONE) && kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua))
- return H_PARAMETER;
-
- entry = ioba >> stt->page_shift;
-
- list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
- if (dir == DMA_NONE)
- ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, stt,
- stit->tbl, entry);
- else
- ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
- stit->tbl, entry, ua, dir);
-
- if (ret != H_SUCCESS) {
- kvmppc_rm_clear_tce(vcpu->kvm, stt, stit->tbl, entry);
- return ret;
- }
- }
-
- kvmppc_rm_tce_put(stt, entry, tce);
-
- return H_SUCCESS;
-}
-
-static long kvmppc_rm_ua_to_hpa(struct kvm_vcpu *vcpu, unsigned long mmu_seq,
- unsigned long ua, unsigned long *phpa)
-{
- pte_t *ptep, pte;
- unsigned shift = 0;
-
- /*
- * Called in real mode with MSR_EE = 0. We are safe here.
- * It is ok to do the lookup with arch.pgdir here, because
- * we are doing this on secondary cpus and current task there
- * is not the hypervisor. Also this is safe against THP in the
- * host, because an IPI to primary thread will wait for the secondary
- * to exit which will agains result in the below page table walk
- * to finish.
- */
- /* an rmap lock won't make it safe. because that just ensure hash
- * page table entries are removed with rmap lock held. After that
- * mmu notifier returns and we go ahead and removing ptes from Qemu page table.
- */
- ptep = find_kvm_host_pte(vcpu->kvm, mmu_seq, ua, &shift);
- if (!ptep)
- return -ENXIO;
-
- pte = READ_ONCE(*ptep);
- if (!pte_present(pte))
- return -ENXIO;
-
- if (!shift)
- shift = PAGE_SHIFT;
-
- /* Avoid handling anything potentially complicated in realmode */
- if (shift > PAGE_SHIFT)
- return -EAGAIN;
-
- if (!pte_young(pte))
- return -EAGAIN;
-
- *phpa = (pte_pfn(pte) << PAGE_SHIFT) | (ua & ((1ULL << shift) - 1)) |
- (ua & ~PAGE_MASK);
-
- return 0;
-}
-
-long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
- unsigned long liobn, unsigned long ioba,
- unsigned long tce_list, unsigned long npages)
-{
- struct kvm *kvm = vcpu->kvm;
- struct kvmppc_spapr_tce_table *stt;
- long i, ret = H_SUCCESS;
- unsigned long tces, entry, ua = 0;
- unsigned long mmu_seq;
- bool prereg = false;
- struct kvmppc_spapr_tce_iommu_table *stit;
-
- /*
- * used to check for invalidations in progress
- */
- mmu_seq = kvm->mmu_notifier_seq;
- smp_rmb();
-
- stt = kvmppc_find_table(vcpu->kvm, liobn);
- if (!stt)
- return H_TOO_HARD;
-
- entry = ioba >> stt->page_shift;
- /*
- * The spec says that the maximum size of the list is 512 TCEs
- * so the whole table addressed resides in 4K page
- */
- if (npages > 512)
- return H_PARAMETER;
-
- if (tce_list & (SZ_4K - 1))
- return H_PARAMETER;
-
- ret = kvmppc_rm_ioba_validate(stt, ioba, npages, false);
- if (ret != H_SUCCESS)
- return ret;
-
- if (mm_iommu_preregistered(vcpu->kvm->mm)) {
- /*
- * We get here if guest memory was pre-registered which
- * is normally VFIO case and gpa->hpa translation does not
- * depend on hpt.
- */
- struct mm_iommu_table_group_mem_t *mem;
-
- if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce_list, &ua))
- return H_TOO_HARD;
-
- mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K);
- if (mem)
- prereg = mm_iommu_ua_to_hpa_rm(mem, ua,
- IOMMU_PAGE_SHIFT_4K, &tces) == 0;
- }
-
- if (!prereg) {
- /*
- * This is usually a case of a guest with emulated devices only
- * when TCE list is not in preregistered memory.
- * We do not require memory to be preregistered in this case
- * so lock rmap and do __find_linux_pte_or_hugepte().
- */
- if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce_list, &ua))
- return H_TOO_HARD;
-
- arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock);
- if (kvmppc_rm_ua_to_hpa(vcpu, mmu_seq, ua, &tces)) {
- ret = H_TOO_HARD;
- goto unlock_exit;
- }
- }
-
- for (i = 0; i < npages; ++i) {
- unsigned long tce = be64_to_cpu(((u64 *)tces)[i]);
-
- ret = kvmppc_rm_tce_validate(stt, tce);
- if (ret != H_SUCCESS)
- goto unlock_exit;
- }
-
- for (i = 0; i < npages; ++i) {
- unsigned long tce = be64_to_cpu(((u64 *)tces)[i]);
-
- ua = 0;
- if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua)) {
- ret = H_PARAMETER;
- goto unlock_exit;
- }
-
- list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
- ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
- stit->tbl, entry + i, ua,
- iommu_tce_direction(tce));
-
- if (ret != H_SUCCESS) {
- kvmppc_rm_clear_tce(vcpu->kvm, stt, stit->tbl,
- entry + i);
- goto unlock_exit;
- }
- }
-
- kvmppc_rm_tce_put(stt, entry + i, tce);
- }
-
-unlock_exit:
- if (!prereg)
- arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
- return ret;
-}
-
-long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
- unsigned long liobn, unsigned long ioba,
- unsigned long tce_value, unsigned long npages)
-{
- struct kvmppc_spapr_tce_table *stt;
- long i, ret;
- struct kvmppc_spapr_tce_iommu_table *stit;
-
- stt = kvmppc_find_table(vcpu->kvm, liobn);
- if (!stt)
- return H_TOO_HARD;
-
- ret = kvmppc_rm_ioba_validate(stt, ioba, npages, tce_value == 0);
- if (ret != H_SUCCESS)
- return ret;
-
- /* Check permission bits only to allow userspace poison TCE for debug */
- if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ))
- return H_PARAMETER;
-
- list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
- unsigned long entry = ioba >> stt->page_shift;
-
- for (i = 0; i < npages; ++i) {
- ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, stt,
- stit->tbl, entry + i);
-
- if (ret == H_SUCCESS)
- continue;
-
- if (ret == H_TOO_HARD)
- return ret;
-
- WARN_ON_ONCE_RM(1);
- kvmppc_rm_clear_tce(vcpu->kvm, stt, stit->tbl, entry + i);
- }
- }
-
- for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
- kvmppc_rm_tce_put(stt, ioba >> stt->page_shift, tce_value);
-
- return ret;
-}
-
-/* This can be called in either virtual mode or real mode */
-long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
- unsigned long ioba)
-{
- struct kvmppc_spapr_tce_table *stt;
- long ret;
- unsigned long idx;
- struct page *page;
- u64 *tbl;
-
- stt = kvmppc_find_table(vcpu->kvm, liobn);
- if (!stt)
- return H_TOO_HARD;
-
- ret = kvmppc_ioba_validate(stt, ioba, 1);
- if (ret != H_SUCCESS)
- return ret;
-
- idx = (ioba >> stt->page_shift) - stt->offset;
- page = stt->pages[idx / TCES_PER_PAGE];
- if (!page) {
- vcpu->arch.regs.gpr[4] = 0;
- return H_SUCCESS;
- }
- tbl = (u64 *)page_address(page);
-
- vcpu->arch.regs.gpr[4] = tbl[idx % TCES_PER_PAGE];
-
- return H_SUCCESS;
-}
-EXPORT_SYMBOL_GPL(kvmppc_h_get_tce);
-
-#endif /* KVM_BOOK3S_HV_POSSIBLE */
diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c
index fdb57be71aa6..5bbfb2eed127 100644
--- a/arch/powerpc/kvm/book3s_emulate.c
+++ b/arch/powerpc/kvm/book3s_emulate.c
@@ -268,7 +268,7 @@ int kvmppc_core_emulate_op_pr(struct kvm_vcpu *vcpu,
/*
* add rules to fit in ISA specification regarding TM
- * state transistion in TM disable/Suspended state,
+ * state transition in TM disable/Suspended state,
* and target TM state is TM inactive(00) state. (the
* change should be suppressed).
*/
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 6fa518f6501d..e08fb3124dca 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -42,6 +42,7 @@
#include <linux/module.h>
#include <linux/compiler.h>
#include <linux/of.h>
+#include <linux/irqdomain.h>
#include <asm/ftrace.h>
#include <asm/reg.h>
@@ -1326,6 +1327,12 @@ static int kvmppc_hcall_impl_hv(unsigned long cmd)
case H_CONFER:
case H_REGISTER_VPA:
case H_SET_MODE:
+#ifdef CONFIG_SPAPR_TCE_IOMMU
+ case H_GET_TCE:
+ case H_PUT_TCE:
+ case H_PUT_TCE_INDIRECT:
+ case H_STUFF_TCE:
+#endif
case H_LOGICAL_CI_LOAD:
case H_LOGICAL_CI_STORE:
#ifdef CONFIG_KVM_XICS
@@ -2834,7 +2841,7 @@ static int kvmppc_core_vcpu_create_hv(struct kvm_vcpu *vcpu)
* to trap and then we emulate them.
*/
vcpu->arch.hfscr = HFSCR_TAR | HFSCR_EBB | HFSCR_PM | HFSCR_BHRB |
- HFSCR_DSCR | HFSCR_VECVSX | HFSCR_FP | HFSCR_PREFIX;
+ HFSCR_DSCR | HFSCR_VECVSX | HFSCR_FP;
if (cpu_has_feature(CPU_FTR_HVMODE)) {
vcpu->arch.hfscr &= mfspr(SPRN_HFSCR);
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
@@ -3967,6 +3974,7 @@ static int kvmhv_vcpu_entry_p9_nested(struct kvm_vcpu *vcpu, u64 time_limit, uns
kvmhv_save_hv_regs(vcpu, &hvregs);
hvregs.lpcr = lpcr;
+ hvregs.amor = ~0;
vcpu->arch.regs.msr = vcpu->arch.shregs.msr;
hvregs.version = HV_GUEST_STATE_VERSION;
if (vcpu->arch.nested) {
@@ -4029,6 +4037,8 @@ static int kvmhv_vcpu_entry_p9_nested(struct kvm_vcpu *vcpu, u64 time_limit, uns
static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
unsigned long lpcr, u64 *tb)
{
+ struct kvm *kvm = vcpu->kvm;
+ struct kvm_nested_guest *nested = vcpu->arch.nested;
u64 next_timer;
int trap;
@@ -4048,34 +4058,61 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
trap = kvmhv_vcpu_entry_p9_nested(vcpu, time_limit, lpcr, tb);
/* H_CEDE has to be handled now, not later */
- if (trap == BOOK3S_INTERRUPT_SYSCALL && !vcpu->arch.nested &&
+ if (trap == BOOK3S_INTERRUPT_SYSCALL && !nested &&
kvmppc_get_gpr(vcpu, 3) == H_CEDE) {
kvmppc_cede(vcpu);
kvmppc_set_gpr(vcpu, 3, 0);
trap = 0;
}
- } else {
- struct kvm *kvm = vcpu->kvm;
+ } else if (nested) {
+ __this_cpu_write(cpu_in_guest, kvm);
+ trap = kvmhv_vcpu_entry_p9(vcpu, time_limit, lpcr, tb);
+ __this_cpu_write(cpu_in_guest, NULL);
+ } else {
kvmppc_xive_push_vcpu(vcpu);
__this_cpu_write(cpu_in_guest, kvm);
trap = kvmhv_vcpu_entry_p9(vcpu, time_limit, lpcr, tb);
__this_cpu_write(cpu_in_guest, NULL);
- if (trap == BOOK3S_INTERRUPT_SYSCALL && !vcpu->arch.nested &&
+ if (trap == BOOK3S_INTERRUPT_SYSCALL &&
!(vcpu->arch.shregs.msr & MSR_PR)) {
unsigned long req = kvmppc_get_gpr(vcpu, 3);
- /* H_CEDE has to be handled now, not later */
+ /*
+ * XIVE rearm and XICS hcalls must be handled
+ * before xive context is pulled (is this
+ * true?)
+ */
if (req == H_CEDE) {
+ /* H_CEDE has to be handled now */
kvmppc_cede(vcpu);
- kvmppc_xive_rearm_escalation(vcpu); /* may un-cede */
+ if (!kvmppc_xive_rearm_escalation(vcpu)) {
+ /*
+ * Pending escalation so abort
+ * the cede.
+ */
+ vcpu->arch.ceded = 0;
+ }
kvmppc_set_gpr(vcpu, 3, 0);
trap = 0;
- /* XICS hcalls must be handled before xive is pulled */
+ } else if (req == H_ENTER_NESTED) {
+ /*
+ * L2 should not run with the L1
+ * context so rearm and pull it.
+ */
+ if (!kvmppc_xive_rearm_escalation(vcpu)) {
+ /*
+ * Pending escalation so abort
+ * H_ENTER_NESTED.
+ */
+ kvmppc_set_gpr(vcpu, 3, 0);
+ trap = 0;
+ }
+
} else if (hcall_is_xics(req)) {
int ret;
@@ -4233,13 +4270,13 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
start_wait = ktime_get();
vc->vcore_state = VCORE_SLEEPING;
- trace_kvmppc_vcore_blocked(vc, 0);
+ trace_kvmppc_vcore_blocked(vc->runner, 0);
spin_unlock(&vc->lock);
schedule();
finish_rcuwait(&vc->wait);
spin_lock(&vc->lock);
vc->vcore_state = VCORE_INACTIVE;
- trace_kvmppc_vcore_blocked(vc, 1);
+ trace_kvmppc_vcore_blocked(vc->runner, 1);
++vc->runner->stat.halt_successful_wait;
cur = ktime_get();
@@ -4519,9 +4556,14 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
if (!nested) {
kvmppc_core_prepare_to_enter(vcpu);
- if (test_bit(BOOK3S_IRQPRIO_EXTERNAL,
- &vcpu->arch.pending_exceptions))
+ if (vcpu->arch.shregs.msr & MSR_EE) {
+ if (xive_interrupt_pending(vcpu))
+ kvmppc_inject_interrupt_hv(vcpu,
+ BOOK3S_INTERRUPT_EXTERNAL, 0);
+ } else if (test_bit(BOOK3S_IRQPRIO_EXTERNAL,
+ &vcpu->arch.pending_exceptions)) {
lpcr |= LPCR_MER;
+ }
} else if (vcpu->arch.pending_exceptions ||
vcpu->arch.doorbell_request ||
xive_interrupt_pending(vcpu)) {
@@ -4619,9 +4661,9 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
if (kvmppc_vcpu_check_block(vcpu))
break;
- trace_kvmppc_vcore_blocked(vc, 0);
+ trace_kvmppc_vcore_blocked(vcpu, 0);
schedule();
- trace_kvmppc_vcore_blocked(vc, 1);
+ trace_kvmppc_vcore_blocked(vcpu, 1);
}
finish_rcuwait(wait);
}
@@ -5283,6 +5325,10 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm)
kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR);
lpcr &= LPCR_PECE | LPCR_LPES;
} else {
+ /*
+ * The L2 LPES mode will be set by the L0 according to whether
+ * or not it needs to take external interrupts in HV mode.
+ */
lpcr = 0;
}
lpcr |= (4UL << LPCR_DPFD_SH) | LPCR_HDICE |
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index 7e52d0beee77..88a8f6473c4e 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -489,70 +489,6 @@ static long kvmppc_read_one_intr(bool *again)
return kvmppc_check_passthru(xisr, xirr, again);
}
-#ifdef CONFIG_KVM_XICS
-unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu)
-{
- if (!kvmppc_xics_enabled(vcpu))
- return H_TOO_HARD;
- if (xics_on_xive())
- return xive_rm_h_xirr(vcpu);
- else
- return xics_rm_h_xirr(vcpu);
-}
-
-unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu)
-{
- if (!kvmppc_xics_enabled(vcpu))
- return H_TOO_HARD;
- vcpu->arch.regs.gpr[5] = get_tb();
- if (xics_on_xive())
- return xive_rm_h_xirr(vcpu);
- else
- return xics_rm_h_xirr(vcpu);
-}
-
-unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server)
-{
- if (!kvmppc_xics_enabled(vcpu))
- return H_TOO_HARD;
- if (xics_on_xive())
- return xive_rm_h_ipoll(vcpu, server);
- else
- return H_TOO_HARD;
-}
-
-int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
- unsigned long mfrr)
-{
- if (!kvmppc_xics_enabled(vcpu))
- return H_TOO_HARD;
- if (xics_on_xive())
- return xive_rm_h_ipi(vcpu, server, mfrr);
- else
- return xics_rm_h_ipi(vcpu, server, mfrr);
-}
-
-int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
-{
- if (!kvmppc_xics_enabled(vcpu))
- return H_TOO_HARD;
- if (xics_on_xive())
- return xive_rm_h_cppr(vcpu, cppr);
- else
- return xics_rm_h_cppr(vcpu, cppr);
-}
-
-int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
-{
- if (!kvmppc_xics_enabled(vcpu))
- return H_TOO_HARD;
- if (xics_on_xive())
- return xive_rm_h_eoi(vcpu, xirr);
- else
- return xics_rm_h_eoi(vcpu, xirr);
-}
-#endif /* CONFIG_KVM_XICS */
-
void kvmppc_bad_interrupt(struct pt_regs *regs)
{
/*
diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c
index c943a051c6e7..0644732d1a25 100644
--- a/arch/powerpc/kvm/book3s_hv_nested.c
+++ b/arch/powerpc/kvm/book3s_hv_nested.c
@@ -261,8 +261,7 @@ static void load_l2_hv_regs(struct kvm_vcpu *vcpu,
/*
* Don't let L1 change LPCR bits for the L2 except these:
*/
- mask = LPCR_DPFD | LPCR_ILE | LPCR_TC | LPCR_AIL | LPCR_LD |
- LPCR_LPES | LPCR_MER;
+ mask = LPCR_DPFD | LPCR_ILE | LPCR_TC | LPCR_AIL | LPCR_LD | LPCR_MER;
/*
* Additional filtering is required depending on hardware
@@ -439,10 +438,11 @@ long kvmhv_nested_init(void)
if (!radix_enabled())
return -ENODEV;
- /* find log base 2 of KVMPPC_NR_LPIDS, rounding up */
- ptb_order = __ilog2(KVMPPC_NR_LPIDS - 1) + 1;
- if (ptb_order < 8)
- ptb_order = 8;
+ /* Partition table entry is 1<<4 bytes in size, hence the 4. */
+ ptb_order = KVM_MAX_NESTED_GUESTS_SHIFT + 4;
+ /* Minimum partition table size is 1<<12 bytes */
+ if (ptb_order < 12)
+ ptb_order = 12;
pseries_partition_tb = kmalloc(sizeof(struct patb_entry) << ptb_order,
GFP_KERNEL);
if (!pseries_partition_tb) {
@@ -450,7 +450,7 @@ long kvmhv_nested_init(void)
return -ENOMEM;
}
- ptcr = __pa(pseries_partition_tb) | (ptb_order - 8);
+ ptcr = __pa(pseries_partition_tb) | (ptb_order - 12);
rc = plpar_hcall_norets(H_SET_PARTITION_TABLE, ptcr);
if (rc != H_SUCCESS) {
pr_err("kvm-hv: Parent hypervisor does not support nesting (rc=%ld)\n",
@@ -521,11 +521,6 @@ static void kvmhv_set_nested_ptbl(struct kvm_nested_guest *gp)
kvmhv_set_ptbl_entry(gp->shadow_lpid, dw0, gp->process_table);
}
-void kvmhv_vm_nested_init(struct kvm *kvm)
-{
- kvm->arch.max_nested_lpid = -1;
-}
-
/*
* Handle the H_SET_PARTITION_TABLE hcall.
* r4 = guest real address of partition table + log_2(size) - 12
@@ -539,16 +534,14 @@ long kvmhv_set_partition_table(struct kvm_vcpu *vcpu)
long ret = H_SUCCESS;
srcu_idx = srcu_read_lock(&kvm->srcu);
- /*
- * Limit the partition table to 4096 entries (because that's what
- * hardware supports), and check the base address.
- */
- if ((ptcr & PRTS_MASK) > 12 - 8 ||
+ /* Check partition size and base address. */
+ if ((ptcr & PRTS_MASK) + 12 - 4 > KVM_MAX_NESTED_GUESTS_SHIFT ||
!kvm_is_visible_gfn(vcpu->kvm, (ptcr & PRTB_MASK) >> PAGE_SHIFT))
ret = H_PARAMETER;
srcu_read_unlock(&kvm->srcu, srcu_idx);
if (ret == H_SUCCESS)
kvm->arch.l1_ptcr = ptcr;
+
return ret;
}
@@ -644,7 +637,7 @@ static void kvmhv_update_ptbl_cache(struct kvm_nested_guest *gp)
ret = -EFAULT;
ptbl_addr = (kvm->arch.l1_ptcr & PRTB_MASK) + (gp->l1_lpid << 4);
- if (gp->l1_lpid < (1ul << ((kvm->arch.l1_ptcr & PRTS_MASK) + 8))) {
+ if (gp->l1_lpid < (1ul << ((kvm->arch.l1_ptcr & PRTS_MASK) + 12 - 4))) {
int srcu_idx = srcu_read_lock(&kvm->srcu);
ret = kvm_read_guest(kvm, ptbl_addr,
&ptbl_entry, sizeof(ptbl_entry));
@@ -660,6 +653,35 @@ static void kvmhv_update_ptbl_cache(struct kvm_nested_guest *gp)
kvmhv_set_nested_ptbl(gp);
}
+void kvmhv_vm_nested_init(struct kvm *kvm)
+{
+ idr_init(&kvm->arch.kvm_nested_guest_idr);
+}
+
+static struct kvm_nested_guest *__find_nested(struct kvm *kvm, int lpid)
+{
+ return idr_find(&kvm->arch.kvm_nested_guest_idr, lpid);
+}
+
+static bool __prealloc_nested(struct kvm *kvm, int lpid)
+{
+ if (idr_alloc(&kvm->arch.kvm_nested_guest_idr,
+ NULL, lpid, lpid + 1, GFP_KERNEL) != lpid)
+ return false;
+ return true;
+}
+
+static void __add_nested(struct kvm *kvm, int lpid, struct kvm_nested_guest *gp)
+{
+ if (idr_replace(&kvm->arch.kvm_nested_guest_idr, gp, lpid))
+ WARN_ON(1);
+}
+
+static void __remove_nested(struct kvm *kvm, int lpid)
+{
+ idr_remove(&kvm->arch.kvm_nested_guest_idr, lpid);
+}
+
static struct kvm_nested_guest *kvmhv_alloc_nested(struct kvm *kvm, unsigned int lpid)
{
struct kvm_nested_guest *gp;
@@ -720,13 +742,8 @@ static void kvmhv_remove_nested(struct kvm_nested_guest *gp)
long ref;
spin_lock(&kvm->mmu_lock);
- if (gp == kvm->arch.nested_guests[lpid]) {
- kvm->arch.nested_guests[lpid] = NULL;
- if (lpid == kvm->arch.max_nested_lpid) {
- while (--lpid >= 0 && !kvm->arch.nested_guests[lpid])
- ;
- kvm->arch.max_nested_lpid = lpid;
- }
+ if (gp == __find_nested(kvm, lpid)) {
+ __remove_nested(kvm, lpid);
--gp->refcnt;
}
ref = gp->refcnt;
@@ -743,24 +760,22 @@ static void kvmhv_remove_nested(struct kvm_nested_guest *gp)
*/
void kvmhv_release_all_nested(struct kvm *kvm)
{
- int i;
+ int lpid;
struct kvm_nested_guest *gp;
struct kvm_nested_guest *freelist = NULL;
struct kvm_memory_slot *memslot;
int srcu_idx, bkt;
spin_lock(&kvm->mmu_lock);
- for (i = 0; i <= kvm->arch.max_nested_lpid; i++) {
- gp = kvm->arch.nested_guests[i];
- if (!gp)
- continue;
- kvm->arch.nested_guests[i] = NULL;
+ idr_for_each_entry(&kvm->arch.kvm_nested_guest_idr, gp, lpid) {
+ __remove_nested(kvm, lpid);
if (--gp->refcnt == 0) {
gp->next = freelist;
freelist = gp;
}
}
- kvm->arch.max_nested_lpid = -1;
+ idr_destroy(&kvm->arch.kvm_nested_guest_idr);
+ /* idr is empty and may be reused at this point */
spin_unlock(&kvm->mmu_lock);
while ((gp = freelist) != NULL) {
freelist = gp->next;
@@ -792,12 +807,11 @@ struct kvm_nested_guest *kvmhv_get_nested(struct kvm *kvm, int l1_lpid,
{
struct kvm_nested_guest *gp, *newgp;
- if (l1_lpid >= KVM_MAX_NESTED_GUESTS ||
- l1_lpid >= (1ul << ((kvm->arch.l1_ptcr & PRTS_MASK) + 12 - 4)))
+ if (l1_lpid >= (1ul << ((kvm->arch.l1_ptcr & PRTS_MASK) + 12 - 4)))
return NULL;
spin_lock(&kvm->mmu_lock);
- gp = kvm->arch.nested_guests[l1_lpid];
+ gp = __find_nested(kvm, l1_lpid);
if (gp)
++gp->refcnt;
spin_unlock(&kvm->mmu_lock);
@@ -808,17 +822,19 @@ struct kvm_nested_guest *kvmhv_get_nested(struct kvm *kvm, int l1_lpid,
newgp = kvmhv_alloc_nested(kvm, l1_lpid);
if (!newgp)
return NULL;
+
+ if (!__prealloc_nested(kvm, l1_lpid)) {
+ kvmhv_release_nested(newgp);
+ return NULL;
+ }
+
spin_lock(&kvm->mmu_lock);
- if (kvm->arch.nested_guests[l1_lpid]) {
- /* someone else beat us to it */
- gp = kvm->arch.nested_guests[l1_lpid];
- } else {
- kvm->arch.nested_guests[l1_lpid] = newgp;
+ gp = __find_nested(kvm, l1_lpid);
+ if (!gp) {
+ __add_nested(kvm, l1_lpid, newgp);
++newgp->refcnt;
gp = newgp;
newgp = NULL;
- if (l1_lpid > kvm->arch.max_nested_lpid)
- kvm->arch.max_nested_lpid = l1_lpid;
}
++gp->refcnt;
spin_unlock(&kvm->mmu_lock);
@@ -841,20 +857,13 @@ void kvmhv_put_nested(struct kvm_nested_guest *gp)
kvmhv_release_nested(gp);
}
-static struct kvm_nested_guest *kvmhv_find_nested(struct kvm *kvm, int lpid)
-{
- if (lpid > kvm->arch.max_nested_lpid)
- return NULL;
- return kvm->arch.nested_guests[lpid];
-}
-
pte_t *find_kvm_nested_guest_pte(struct kvm *kvm, unsigned long lpid,
unsigned long ea, unsigned *hshift)
{
struct kvm_nested_guest *gp;
pte_t *pte;
- gp = kvmhv_find_nested(kvm, lpid);
+ gp = __find_nested(kvm, lpid);
if (!gp)
return NULL;
@@ -960,7 +969,7 @@ static void kvmhv_remove_nest_rmap(struct kvm *kvm, u64 n_rmap,
gpa = n_rmap & RMAP_NESTED_GPA_MASK;
lpid = (n_rmap & RMAP_NESTED_LPID_MASK) >> RMAP_NESTED_LPID_SHIFT;
- gp = kvmhv_find_nested(kvm, lpid);
+ gp = __find_nested(kvm, lpid);
if (!gp)
return;
@@ -1152,16 +1161,13 @@ static void kvmhv_emulate_tlbie_all_lpid(struct kvm_vcpu *vcpu, int ric)
{
struct kvm *kvm = vcpu->kvm;
struct kvm_nested_guest *gp;
- int i;
+ int lpid;
spin_lock(&kvm->mmu_lock);
- for (i = 0; i <= kvm->arch.max_nested_lpid; i++) {
- gp = kvm->arch.nested_guests[i];
- if (gp) {
- spin_unlock(&kvm->mmu_lock);
- kvmhv_emulate_tlbie_lpid(vcpu, gp, ric);
- spin_lock(&kvm->mmu_lock);
- }
+ idr_for_each_entry(&kvm->arch.kvm_nested_guest_idr, gp, lpid) {
+ spin_unlock(&kvm->mmu_lock);
+ kvmhv_emulate_tlbie_lpid(vcpu, gp, ric);
+ spin_lock(&kvm->mmu_lock);
}
spin_unlock(&kvm->mmu_lock);
}
@@ -1313,7 +1319,7 @@ long do_h_rpt_invalidate_pat(struct kvm_vcpu *vcpu, unsigned long lpid,
* H_ENTER_NESTED call. Since we can't differentiate this case from
* the invalid case, we ignore such flush requests and return success.
*/
- if (!kvmhv_find_nested(vcpu->kvm, lpid))
+ if (!__find_nested(vcpu->kvm, lpid))
return H_SUCCESS;
/*
@@ -1657,15 +1663,12 @@ long int kvmhv_nested_page_fault(struct kvm_vcpu *vcpu)
int kvmhv_nested_next_lpid(struct kvm *kvm, int lpid)
{
- int ret = -1;
+ int ret = lpid + 1;
spin_lock(&kvm->mmu_lock);
- while (++lpid <= kvm->arch.max_nested_lpid) {
- if (kvm->arch.nested_guests[lpid]) {
- ret = lpid;
- break;
- }
- }
+ if (!idr_get_next(&kvm->arch.kvm_nested_guest_idr, &ret))
+ ret = -1;
spin_unlock(&kvm->mmu_lock);
+
return ret;
}
diff --git a/arch/powerpc/kvm/book3s_hv_p9_entry.c b/arch/powerpc/kvm/book3s_hv_p9_entry.c
index a28e5b3daabd..112a09b33328 100644
--- a/arch/powerpc/kvm/book3s_hv_p9_entry.c
+++ b/arch/powerpc/kvm/book3s_hv_p9_entry.c
@@ -379,7 +379,7 @@ void restore_p9_host_os_sprs(struct kvm_vcpu *vcpu,
{
/*
* current->thread.xxx registers must all be restored to host
- * values before a potential context switch, othrewise the context
+ * values before a potential context switch, otherwise the context
* switch itself will overwrite current->thread.xxx with the values
* from the guest SPRs.
*/
@@ -539,8 +539,10 @@ static void switch_mmu_to_guest_radix(struct kvm *kvm, struct kvm_vcpu *vcpu, u6
{
struct kvm_nested_guest *nested = vcpu->arch.nested;
u32 lpid;
+ u32 pid;
lpid = nested ? nested->shadow_lpid : kvm->arch.lpid;
+ pid = vcpu->arch.pid;
/*
* Prior memory accesses to host PID Q3 must be completed before we
@@ -551,7 +553,7 @@ static void switch_mmu_to_guest_radix(struct kvm *kvm, struct kvm_vcpu *vcpu, u6
isync();
mtspr(SPRN_LPID, lpid);
mtspr(SPRN_LPCR, lpcr);
- mtspr(SPRN_PID, vcpu->arch.pid);
+ mtspr(SPRN_PID, pid);
/*
* isync not required here because we are HRFID'ing to guest before
* any guest context access, which is context synchronising.
@@ -561,9 +563,11 @@ static void switch_mmu_to_guest_radix(struct kvm *kvm, struct kvm_vcpu *vcpu, u6
static void switch_mmu_to_guest_hpt(struct kvm *kvm, struct kvm_vcpu *vcpu, u64 lpcr)
{
u32 lpid;
+ u32 pid;
int i;
lpid = kvm->arch.lpid;
+ pid = vcpu->arch.pid;
/*
* See switch_mmu_to_guest_radix. ptesync should not be required here
@@ -574,7 +578,7 @@ static void switch_mmu_to_guest_hpt(struct kvm *kvm, struct kvm_vcpu *vcpu, u64
isync();
mtspr(SPRN_LPID, lpid);
mtspr(SPRN_LPCR, lpcr);
- mtspr(SPRN_PID, vcpu->arch.pid);
+ mtspr(SPRN_PID, pid);
for (i = 0; i < vcpu->arch.slb_max; i++)
mtslb(vcpu->arch.slb[i].orige, vcpu->arch.slb[i].origv);
@@ -585,6 +589,9 @@ static void switch_mmu_to_guest_hpt(struct kvm *kvm, struct kvm_vcpu *vcpu, u64
static void switch_mmu_to_host(struct kvm *kvm, u32 pid)
{
+ u32 lpid = kvm->arch.host_lpid;
+ u64 lpcr = kvm->arch.host_lpcr;
+
/*
* The guest has exited, so guest MMU context is no longer being
* non-speculatively accessed, but a hwsync is needed before the
@@ -594,8 +601,8 @@ static void switch_mmu_to_host(struct kvm *kvm, u32 pid)
asm volatile("hwsync" ::: "memory");
isync();
mtspr(SPRN_PID, pid);
- mtspr(SPRN_LPID, kvm->arch.host_lpid);
- mtspr(SPRN_LPCR, kvm->arch.host_lpcr);
+ mtspr(SPRN_LPID, lpid);
+ mtspr(SPRN_LPCR, lpcr);
/*
* isync is not required after the switch, because mtmsrd with L=0
* is performed after this switch, which is context synchronising.
diff --git a/arch/powerpc/kvm/book3s_hv_rm_xics.c b/arch/powerpc/kvm/book3s_hv_rm_xics.c
index 587c33fc4564..e165bfa842bf 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_xics.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_xics.c
@@ -479,6 +479,11 @@ static void icp_rm_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
}
}
+unsigned long xics_rm_h_xirr_x(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.regs.gpr[5] = get_tb();
+ return xics_rm_h_xirr(vcpu);
+}
unsigned long xics_rm_h_xirr(struct kvm_vcpu *vcpu)
{
@@ -883,7 +888,7 @@ long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu,
/* --- Non-real mode XICS-related built-in routines --- */
-/**
+/*
* Host Operations poked by RM KVM
*/
static void rm_host_ipi_action(int action, void *data)
diff --git a/arch/powerpc/kvm/book3s_hv_rm_xive.c b/arch/powerpc/kvm/book3s_hv_rm_xive.c
deleted file mode 100644
index dd9880731bd6..000000000000
--- a/arch/powerpc/kvm/book3s_hv_rm_xive.c
+++ /dev/null
@@ -1,46 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/kernel.h>
-#include <linux/kvm_host.h>
-#include <linux/err.h>
-#include <linux/kernel_stat.h>
-#include <linux/pgtable.h>
-
-#include <asm/kvm_book3s.h>
-#include <asm/kvm_ppc.h>
-#include <asm/hvcall.h>
-#include <asm/xics.h>
-#include <asm/debug.h>
-#include <asm/synch.h>
-#include <asm/cputhreads.h>
-#include <asm/ppc-opcode.h>
-#include <asm/pnv-pci.h>
-#include <asm/opal.h>
-#include <asm/smp.h>
-#include <asm/xive.h>
-#include <asm/xive-regs.h>
-
-#include "book3s_xive.h"
-
-/* XXX */
-#include <asm/udbg.h>
-//#define DBG(fmt...) udbg_printf(fmt)
-#define DBG(fmt...) do { } while(0)
-
-static inline void __iomem *get_tima_phys(void)
-{
- return local_paca->kvm_hstate.xive_tima_phys;
-}
-
-#undef XIVE_RUNTIME_CHECKS
-#define X_PFX xive_rm_
-#define X_STATIC
-#define X_STAT_PFX stat_rm_
-#define __x_tima get_tima_phys()
-#define __x_eoi_page(xd) ((void __iomem *)((xd)->eoi_page))
-#define __x_trig_page(xd) ((void __iomem *)((xd)->trig_page))
-#define __x_writeb __raw_rm_writeb
-#define __x_readw __raw_rm_readw
-#define __x_readq __raw_rm_readq
-#define __x_writeq __raw_rm_writeq
-
-#include "book3s_xive_template.c"
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index d185dee26026..0fc0e68d20d0 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -51,6 +51,14 @@
#define STACK_SLOT_FSCR (SFS-96)
/*
+ * Use the last LPID (all implemented LPID bits = 1) for partition switching.
+ * This is reserved in the LPID allocator. POWER7 only implements 0x3ff, but
+ * we write 0xfff into the LPID SPR anyway, which seems to work and just
+ * ignores the top bits.
+ */
+#define LPID_RSVD 0xfff
+
+/*
* Call kvmppc_hv_entry in real mode.
* Must be called with interrupts hard-disabled.
*
@@ -1784,13 +1792,8 @@ hcall_real_table:
.long DOTSYM(kvmppc_h_clear_mod) - hcall_real_table
.long DOTSYM(kvmppc_h_clear_ref) - hcall_real_table
.long DOTSYM(kvmppc_h_protect) - hcall_real_table
-#ifdef CONFIG_SPAPR_TCE_IOMMU
- .long DOTSYM(kvmppc_h_get_tce) - hcall_real_table
- .long DOTSYM(kvmppc_rm_h_put_tce) - hcall_real_table
-#else
.long 0 /* 0x1c */
.long 0 /* 0x20 */
-#endif
.long 0 /* 0x24 - H_SET_SPRG0 */
.long DOTSYM(kvmppc_h_set_dabr) - hcall_real_table
.long DOTSYM(kvmppc_rm_h_page_init) - hcall_real_table
@@ -1808,11 +1811,11 @@ hcall_real_table:
.long 0 /* 0x5c */
.long 0 /* 0x60 */
#ifdef CONFIG_KVM_XICS
- .long DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table
- .long DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table
- .long DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table
- .long DOTSYM(kvmppc_rm_h_ipoll) - hcall_real_table
- .long DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table
+ .long DOTSYM(xics_rm_h_eoi) - hcall_real_table
+ .long DOTSYM(xics_rm_h_cppr) - hcall_real_table
+ .long DOTSYM(xics_rm_h_ipi) - hcall_real_table
+ .long 0 /* 0x70 - H_IPOLL */
+ .long DOTSYM(xics_rm_h_xirr) - hcall_real_table
#else
.long 0 /* 0x64 - H_EOI */
.long 0 /* 0x68 - H_CPPR */
@@ -1868,13 +1871,8 @@ hcall_real_table:
.long 0 /* 0x12c */
.long 0 /* 0x130 */
.long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table
-#ifdef CONFIG_SPAPR_TCE_IOMMU
- .long DOTSYM(kvmppc_rm_h_stuff_tce) - hcall_real_table
- .long DOTSYM(kvmppc_rm_h_put_tce_indirect) - hcall_real_table
-#else
.long 0 /* 0x138 */
.long 0 /* 0x13c */
-#endif
.long 0 /* 0x140 */
.long 0 /* 0x144 */
.long 0 /* 0x148 */
@@ -1987,7 +1985,7 @@ hcall_real_table:
.long 0 /* 0x2f4 */
.long 0 /* 0x2f8 */
#ifdef CONFIG_KVM_XICS
- .long DOTSYM(kvmppc_rm_h_xirr_x) - hcall_real_table
+ .long DOTSYM(xics_rm_h_xirr_x) - hcall_real_table
#else
.long 0 /* 0x2fc - H_XIRR_X*/
#endif
diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c
index 45c993dd05f5..598006301620 100644
--- a/arch/powerpc/kvm/book3s_hv_uvmem.c
+++ b/arch/powerpc/kvm/book3s_hv_uvmem.c
@@ -120,7 +120,7 @@ static DEFINE_SPINLOCK(kvmppc_uvmem_bitmap_lock);
* content is un-encrypted.
*
* (c) Normal - The GFN is a normal. The GFN is associated with
- * a normal VM. The contents of the GFN is accesible to
+ * a normal VM. The contents of the GFN is accessible to
* the Hypervisor. Its content is never encrypted.
*
* States of a VM.
@@ -361,13 +361,15 @@ static bool kvmppc_gfn_is_uvmem_pfn(unsigned long gfn, struct kvm *kvm,
static bool kvmppc_next_nontransitioned_gfn(const struct kvm_memory_slot *memslot,
struct kvm *kvm, unsigned long *gfn)
{
- struct kvmppc_uvmem_slot *p;
+ struct kvmppc_uvmem_slot *p = NULL, *iter;
bool ret = false;
unsigned long i;
- list_for_each_entry(p, &kvm->arch.uvmem_pfns, list)
- if (*gfn >= p->base_pfn && *gfn < p->base_pfn + p->nr_pfns)
+ list_for_each_entry(iter, &kvm->arch.uvmem_pfns, list)
+ if (*gfn >= iter->base_pfn && *gfn < iter->base_pfn + iter->nr_pfns) {
+ p = iter;
break;
+ }
if (!p)
return ret;
/*
diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S
index 25a3679fb590..f4bec2fc51aa 100644
--- a/arch/powerpc/kvm/book3s_interrupts.S
+++ b/arch/powerpc/kvm/book3s_interrupts.S
@@ -15,7 +15,7 @@
#include <asm/asm-compat.h>
#if defined(CONFIG_PPC_BOOK3S_64)
-#ifdef PPC64_ELF_ABI_v2
+#ifdef CONFIG_PPC64_ELF_ABI_V2
#define FUNC(name) name
#else
#define FUNC(name) GLUE(.,name)
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index 7bf9e6ca5c2d..d6abed6e51e6 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -1287,7 +1287,7 @@ int kvmppc_handle_exit_pr(struct kvm_vcpu *vcpu, unsigned int exit_nr)
/* Get last sc for papr */
if (vcpu->arch.papr_enabled) {
- /* The sc instuction points SRR0 to the next inst */
+ /* The sc instruction points SRR0 to the next inst */
emul = kvmppc_get_last_inst(vcpu, INST_SC, &last_sc);
if (emul != EMULATE_DONE) {
kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) - 4);
diff --git a/arch/powerpc/kvm/book3s_pr_papr.c b/arch/powerpc/kvm/book3s_pr_papr.c
index dc4f51ac84bc..a1f2978b2a86 100644
--- a/arch/powerpc/kvm/book3s_pr_papr.c
+++ b/arch/powerpc/kvm/book3s_pr_papr.c
@@ -433,9 +433,12 @@ int kvmppc_hcall_impl_pr(unsigned long cmd)
case H_REMOVE:
case H_PROTECT:
case H_BULK_REMOVE:
+#ifdef CONFIG_SPAPR_TCE_IOMMU
+ case H_GET_TCE:
case H_PUT_TCE:
case H_PUT_TCE_INDIRECT:
case H_STUFF_TCE:
+#endif
case H_CEDE:
case H_LOGICAL_CI_LOAD:
case H_LOGICAL_CI_STORE:
@@ -464,7 +467,10 @@ static unsigned int default_hcall_list[] = {
H_REMOVE,
H_PROTECT,
H_BULK_REMOVE,
+#ifdef CONFIG_SPAPR_TCE_IOMMU
+ H_GET_TCE,
H_PUT_TCE,
+#endif
H_CEDE,
H_SET_MODE,
#ifdef CONFIG_KVM_XICS
diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S
index b45b750fa77a..03886ca24498 100644
--- a/arch/powerpc/kvm/book3s_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_rmhandlers.S
@@ -26,7 +26,7 @@
#if defined(CONFIG_PPC_BOOK3S_64)
-#ifdef PPC64_ELF_ABI_v2
+#ifdef CONFIG_PPC64_ELF_ABI_V2
#define FUNC(name) name
#else
#define FUNC(name) GLUE(.,name)
diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c
index ab6d37d78c62..589a8f257120 100644
--- a/arch/powerpc/kvm/book3s_xics.c
+++ b/arch/powerpc/kvm/book3s_xics.c
@@ -462,7 +462,7 @@ static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
* new guy. We cannot assume that the rejected interrupt is less
* favored than the new one, and thus doesn't need to be delivered,
* because by the time we exit icp_try_to_deliver() the target
- * processor may well have alrady consumed & completed it, and thus
+ * processor may well have already consumed & completed it, and thus
* the rejected interrupt might actually be already acceptable.
*/
if (icp_try_to_deliver(icp, new_irq, state->priority, &reject)) {
diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c
index c0ce5531d9bc..4ca23644f752 100644
--- a/arch/powerpc/kvm/book3s_xive.c
+++ b/arch/powerpc/kvm/book3s_xive.c
@@ -30,27 +30,629 @@
#include "book3s_xive.h"
-
-/*
- * Virtual mode variants of the hcalls for use on radix/radix
- * with AIL. They require the VCPU's VP to be "pushed"
- *
- * We still instantiate them here because we use some of the
- * generated utility functions as well in this file.
- */
-#define XIVE_RUNTIME_CHECKS
-#define X_PFX xive_vm_
-#define X_STATIC static
-#define X_STAT_PFX stat_vm_
-#define __x_tima xive_tima
#define __x_eoi_page(xd) ((void __iomem *)((xd)->eoi_mmio))
#define __x_trig_page(xd) ((void __iomem *)((xd)->trig_mmio))
-#define __x_writeb __raw_writeb
-#define __x_readw __raw_readw
-#define __x_readq __raw_readq
-#define __x_writeq __raw_writeq
-#include "book3s_xive_template.c"
+/* Dummy interrupt used when taking interrupts out of a queue in H_CPPR */
+#define XICS_DUMMY 1
+
+static void xive_vm_ack_pending(struct kvmppc_xive_vcpu *xc)
+{
+ u8 cppr;
+ u16 ack;
+
+ /*
+ * Ensure any previous store to CPPR is ordered vs.
+ * the subsequent loads from PIPR or ACK.
+ */
+ eieio();
+
+ /* Perform the acknowledge OS to register cycle. */
+ ack = be16_to_cpu(__raw_readw(xive_tima + TM_SPC_ACK_OS_REG));
+
+ /* Synchronize subsequent queue accesses */
+ mb();
+
+ /* XXX Check grouping level */
+
+ /* Anything ? */
+ if (!((ack >> 8) & TM_QW1_NSR_EO))
+ return;
+
+ /* Grab CPPR of the most favored pending interrupt */
+ cppr = ack & 0xff;
+ if (cppr < 8)
+ xc->pending |= 1 << cppr;
+
+ /* Check consistency */
+ if (cppr >= xc->hw_cppr)
+ pr_warn("KVM-XIVE: CPU %d odd ack CPPR, got %d at %d\n",
+ smp_processor_id(), cppr, xc->hw_cppr);
+
+ /*
+ * Update our image of the HW CPPR. We don't yet modify
+ * xc->cppr, this will be done as we scan for interrupts
+ * in the queues.
+ */
+ xc->hw_cppr = cppr;
+}
+
+static u8 xive_vm_esb_load(struct xive_irq_data *xd, u32 offset)
+{
+ u64 val;
+
+ if (offset == XIVE_ESB_SET_PQ_10 && xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
+ offset |= XIVE_ESB_LD_ST_MO;
+
+ val = __raw_readq(__x_eoi_page(xd) + offset);
+#ifdef __LITTLE_ENDIAN__
+ val >>= 64-8;
+#endif
+ return (u8)val;
+}
+
+
+static void xive_vm_source_eoi(u32 hw_irq, struct xive_irq_data *xd)
+{
+ /* If the XIVE supports the new "store EOI facility, use it */
+ if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
+ __raw_writeq(0, __x_eoi_page(xd) + XIVE_ESB_STORE_EOI);
+ else if (xd->flags & XIVE_IRQ_FLAG_LSI) {
+ /*
+ * For LSIs the HW EOI cycle is used rather than PQ bits,
+ * as they are automatically re-triggred in HW when still
+ * pending.
+ */
+ __raw_readq(__x_eoi_page(xd) + XIVE_ESB_LOAD_EOI);
+ } else {
+ uint64_t eoi_val;
+
+ /*
+ * Otherwise for EOI, we use the special MMIO that does
+ * a clear of both P and Q and returns the old Q,
+ * except for LSIs where we use the "EOI cycle" special
+ * load.
+ *
+ * This allows us to then do a re-trigger if Q was set
+ * rather than synthetizing an interrupt in software
+ */
+ eoi_val = xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_00);
+
+ /* Re-trigger if needed */
+ if ((eoi_val & 1) && __x_trig_page(xd))
+ __raw_writeq(0, __x_trig_page(xd));
+ }
+}
+
+enum {
+ scan_fetch,
+ scan_poll,
+ scan_eoi,
+};
+
+static u32 xive_vm_scan_interrupts(struct kvmppc_xive_vcpu *xc,
+ u8 pending, int scan_type)
+{
+ u32 hirq = 0;
+ u8 prio = 0xff;
+
+ /* Find highest pending priority */
+ while ((xc->mfrr != 0xff || pending != 0) && hirq == 0) {
+ struct xive_q *q;
+ u32 idx, toggle;
+ __be32 *qpage;
+
+ /*
+ * If pending is 0 this will return 0xff which is what
+ * we want
+ */
+ prio = ffs(pending) - 1;
+
+ /* Don't scan past the guest cppr */
+ if (prio >= xc->cppr || prio > 7) {
+ if (xc->mfrr < xc->cppr) {
+ prio = xc->mfrr;
+ hirq = XICS_IPI;
+ }
+ break;
+ }
+
+ /* Grab queue and pointers */
+ q = &xc->queues[prio];
+ idx = q->idx;
+ toggle = q->toggle;
+
+ /*
+ * Snapshot the queue page. The test further down for EOI
+ * must use the same "copy" that was used by __xive_read_eq
+ * since qpage can be set concurrently and we don't want
+ * to miss an EOI.
+ */
+ qpage = READ_ONCE(q->qpage);
+
+skip_ipi:
+ /*
+ * Try to fetch from the queue. Will return 0 for a
+ * non-queueing priority (ie, qpage = 0).
+ */
+ hirq = __xive_read_eq(qpage, q->msk, &idx, &toggle);
+
+ /*
+ * If this was a signal for an MFFR change done by
+ * H_IPI we skip it. Additionally, if we were fetching
+ * we EOI it now, thus re-enabling reception of a new
+ * such signal.
+ *
+ * We also need to do that if prio is 0 and we had no
+ * page for the queue. In this case, we have non-queued
+ * IPI that needs to be EOId.
+ *
+ * This is safe because if we have another pending MFRR
+ * change that wasn't observed above, the Q bit will have
+ * been set and another occurrence of the IPI will trigger.
+ */
+ if (hirq == XICS_IPI || (prio == 0 && !qpage)) {
+ if (scan_type == scan_fetch) {
+ xive_vm_source_eoi(xc->vp_ipi,
+ &xc->vp_ipi_data);
+ q->idx = idx;
+ q->toggle = toggle;
+ }
+ /* Loop back on same queue with updated idx/toggle */
+ WARN_ON(hirq && hirq != XICS_IPI);
+ if (hirq)
+ goto skip_ipi;
+ }
+
+ /* If it's the dummy interrupt, continue searching */
+ if (hirq == XICS_DUMMY)
+ goto skip_ipi;
+
+ /* Clear the pending bit if the queue is now empty */
+ if (!hirq) {
+ pending &= ~(1 << prio);
+
+ /*
+ * Check if the queue count needs adjusting due to
+ * interrupts being moved away.
+ */
+ if (atomic_read(&q->pending_count)) {
+ int p = atomic_xchg(&q->pending_count, 0);
+
+ if (p) {
+ WARN_ON(p > atomic_read(&q->count));
+ atomic_sub(p, &q->count);
+ }
+ }
+ }
+
+ /*
+ * If the most favoured prio we found pending is less
+ * favored (or equal) than a pending IPI, we return
+ * the IPI instead.
+ */
+ if (prio >= xc->mfrr && xc->mfrr < xc->cppr) {
+ prio = xc->mfrr;
+ hirq = XICS_IPI;
+ break;
+ }
+
+ /* If fetching, update queue pointers */
+ if (scan_type == scan_fetch) {
+ q->idx = idx;
+ q->toggle = toggle;
+ }
+ }
+
+ /* If we are just taking a "peek", do nothing else */
+ if (scan_type == scan_poll)
+ return hirq;
+
+ /* Update the pending bits */
+ xc->pending = pending;
+
+ /*
+ * If this is an EOI that's it, no CPPR adjustment done here,
+ * all we needed was cleanup the stale pending bits and check
+ * if there's anything left.
+ */
+ if (scan_type == scan_eoi)
+ return hirq;
+
+ /*
+ * If we found an interrupt, adjust what the guest CPPR should
+ * be as if we had just fetched that interrupt from HW.
+ *
+ * Note: This can only make xc->cppr smaller as the previous
+ * loop will only exit with hirq != 0 if prio is lower than
+ * the current xc->cppr. Thus we don't need to re-check xc->mfrr
+ * for pending IPIs.
+ */
+ if (hirq)
+ xc->cppr = prio;
+ /*
+ * If it was an IPI the HW CPPR might have been lowered too much
+ * as the HW interrupt we use for IPIs is routed to priority 0.
+ *
+ * We re-sync it here.
+ */
+ if (xc->cppr != xc->hw_cppr) {
+ xc->hw_cppr = xc->cppr;
+ __raw_writeb(xc->cppr, xive_tima + TM_QW1_OS + TM_CPPR);
+ }
+
+ return hirq;
+}
+
+static unsigned long xive_vm_h_xirr(struct kvm_vcpu *vcpu)
+{
+ struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+ u8 old_cppr;
+ u32 hirq;
+
+ pr_devel("H_XIRR\n");
+
+ xc->stat_vm_h_xirr++;
+
+ /* First collect pending bits from HW */
+ xive_vm_ack_pending(xc);
+
+ pr_devel(" new pending=0x%02x hw_cppr=%d cppr=%d\n",
+ xc->pending, xc->hw_cppr, xc->cppr);
+
+ /* Grab previous CPPR and reverse map it */
+ old_cppr = xive_prio_to_guest(xc->cppr);
+
+ /* Scan for actual interrupts */
+ hirq = xive_vm_scan_interrupts(xc, xc->pending, scan_fetch);
+
+ pr_devel(" got hirq=0x%x hw_cppr=%d cppr=%d\n",
+ hirq, xc->hw_cppr, xc->cppr);
+
+ /* That should never hit */
+ if (hirq & 0xff000000)
+ pr_warn("XIVE: Weird guest interrupt number 0x%08x\n", hirq);
+
+ /*
+ * XXX We could check if the interrupt is masked here and
+ * filter it. If we chose to do so, we would need to do:
+ *
+ * if (masked) {
+ * lock();
+ * if (masked) {
+ * old_Q = true;
+ * hirq = 0;
+ * }
+ * unlock();
+ * }
+ */
+
+ /* Return interrupt and old CPPR in GPR4 */
+ vcpu->arch.regs.gpr[4] = hirq | (old_cppr << 24);
+
+ return H_SUCCESS;
+}
+
+static unsigned long xive_vm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server)
+{
+ struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+ u8 pending = xc->pending;
+ u32 hirq;
+
+ pr_devel("H_IPOLL(server=%ld)\n", server);
+
+ xc->stat_vm_h_ipoll++;
+
+ /* Grab the target VCPU if not the current one */
+ if (xc->server_num != server) {
+ vcpu = kvmppc_xive_find_server(vcpu->kvm, server);
+ if (!vcpu)
+ return H_PARAMETER;
+ xc = vcpu->arch.xive_vcpu;
+
+ /* Scan all priorities */
+ pending = 0xff;
+ } else {
+ /* Grab pending interrupt if any */
+ __be64 qw1 = __raw_readq(xive_tima + TM_QW1_OS);
+ u8 pipr = be64_to_cpu(qw1) & 0xff;
+
+ if (pipr < 8)
+ pending |= 1 << pipr;
+ }
+
+ hirq = xive_vm_scan_interrupts(xc, pending, scan_poll);
+
+ /* Return interrupt and old CPPR in GPR4 */
+ vcpu->arch.regs.gpr[4] = hirq | (xc->cppr << 24);
+
+ return H_SUCCESS;
+}
+
+static void xive_vm_push_pending_to_hw(struct kvmppc_xive_vcpu *xc)
+{
+ u8 pending, prio;
+
+ pending = xc->pending;
+ if (xc->mfrr != 0xff) {
+ if (xc->mfrr < 8)
+ pending |= 1 << xc->mfrr;
+ else
+ pending |= 0x80;
+ }
+ if (!pending)
+ return;
+ prio = ffs(pending) - 1;
+
+ __raw_writeb(prio, xive_tima + TM_SPC_SET_OS_PENDING);
+}
+
+static void xive_vm_scan_for_rerouted_irqs(struct kvmppc_xive *xive,
+ struct kvmppc_xive_vcpu *xc)
+{
+ unsigned int prio;
+
+ /* For each priority that is now masked */
+ for (prio = xc->cppr; prio < KVMPPC_XIVE_Q_COUNT; prio++) {
+ struct xive_q *q = &xc->queues[prio];
+ struct kvmppc_xive_irq_state *state;
+ struct kvmppc_xive_src_block *sb;
+ u32 idx, toggle, entry, irq, hw_num;
+ struct xive_irq_data *xd;
+ __be32 *qpage;
+ u16 src;
+
+ idx = q->idx;
+ toggle = q->toggle;
+ qpage = READ_ONCE(q->qpage);
+ if (!qpage)
+ continue;
+
+ /* For each interrupt in the queue */
+ for (;;) {
+ entry = be32_to_cpup(qpage + idx);
+
+ /* No more ? */
+ if ((entry >> 31) == toggle)
+ break;
+ irq = entry & 0x7fffffff;
+
+ /* Skip dummies and IPIs */
+ if (irq == XICS_DUMMY || irq == XICS_IPI)
+ goto next;
+ sb = kvmppc_xive_find_source(xive, irq, &src);
+ if (!sb)
+ goto next;
+ state = &sb->irq_state[src];
+
+ /* Has it been rerouted ? */
+ if (xc->server_num == state->act_server)
+ goto next;
+
+ /*
+ * Allright, it *has* been re-routed, kill it from
+ * the queue.
+ */
+ qpage[idx] = cpu_to_be32((entry & 0x80000000) | XICS_DUMMY);
+
+ /* Find the HW interrupt */
+ kvmppc_xive_select_irq(state, &hw_num, &xd);
+
+ /* If it's not an LSI, set PQ to 11 the EOI will force a resend */
+ if (!(xd->flags & XIVE_IRQ_FLAG_LSI))
+ xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_11);
+
+ /* EOI the source */
+ xive_vm_source_eoi(hw_num, xd);
+
+next:
+ idx = (idx + 1) & q->msk;
+ if (idx == 0)
+ toggle ^= 1;
+ }
+ }
+}
+
+static int xive_vm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
+{
+ struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+ struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
+ u8 old_cppr;
+
+ pr_devel("H_CPPR(cppr=%ld)\n", cppr);
+
+ xc->stat_vm_h_cppr++;
+
+ /* Map CPPR */
+ cppr = xive_prio_from_guest(cppr);
+
+ /* Remember old and update SW state */
+ old_cppr = xc->cppr;
+ xc->cppr = cppr;
+
+ /*
+ * Order the above update of xc->cppr with the subsequent
+ * read of xc->mfrr inside push_pending_to_hw()
+ */
+ smp_mb();
+
+ if (cppr > old_cppr) {
+ /*
+ * We are masking less, we need to look for pending things
+ * to deliver and set VP pending bits accordingly to trigger
+ * a new interrupt otherwise we might miss MFRR changes for
+ * which we have optimized out sending an IPI signal.
+ */
+ xive_vm_push_pending_to_hw(xc);
+ } else {
+ /*
+ * We are masking more, we need to check the queue for any
+ * interrupt that has been routed to another CPU, take
+ * it out (replace it with the dummy) and retrigger it.
+ *
+ * This is necessary since those interrupts may otherwise
+ * never be processed, at least not until this CPU restores
+ * its CPPR.
+ *
+ * This is in theory racy vs. HW adding new interrupts to
+ * the queue. In practice this works because the interesting
+ * cases are when the guest has done a set_xive() to move the
+ * interrupt away, which flushes the xive, followed by the
+ * target CPU doing a H_CPPR. So any new interrupt coming into
+ * the queue must still be routed to us and isn't a source
+ * of concern.
+ */
+ xive_vm_scan_for_rerouted_irqs(xive, xc);
+ }
+
+ /* Apply new CPPR */
+ xc->hw_cppr = cppr;
+ __raw_writeb(cppr, xive_tima + TM_QW1_OS + TM_CPPR);
+
+ return H_SUCCESS;
+}
+
+static int xive_vm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
+{
+ struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
+ struct kvmppc_xive_src_block *sb;
+ struct kvmppc_xive_irq_state *state;
+ struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+ struct xive_irq_data *xd;
+ u8 new_cppr = xirr >> 24;
+ u32 irq = xirr & 0x00ffffff, hw_num;
+ u16 src;
+ int rc = 0;
+
+ pr_devel("H_EOI(xirr=%08lx)\n", xirr);
+
+ xc->stat_vm_h_eoi++;
+
+ xc->cppr = xive_prio_from_guest(new_cppr);
+
+ /*
+ * IPIs are synthetized from MFRR and thus don't need
+ * any special EOI handling. The underlying interrupt
+ * used to signal MFRR changes is EOId when fetched from
+ * the queue.
+ */
+ if (irq == XICS_IPI || irq == 0) {
+ /*
+ * This barrier orders the setting of xc->cppr vs.
+ * subsquent test of xc->mfrr done inside
+ * scan_interrupts and push_pending_to_hw
+ */
+ smp_mb();
+ goto bail;
+ }
+
+ /* Find interrupt source */
+ sb = kvmppc_xive_find_source(xive, irq, &src);
+ if (!sb) {
+ pr_devel(" source not found !\n");
+ rc = H_PARAMETER;
+ /* Same as above */
+ smp_mb();
+ goto bail;
+ }
+ state = &sb->irq_state[src];
+ kvmppc_xive_select_irq(state, &hw_num, &xd);
+
+ state->in_eoi = true;
+
+ /*
+ * This barrier orders both setting of in_eoi above vs,
+ * subsequent test of guest_priority, and the setting
+ * of xc->cppr vs. subsquent test of xc->mfrr done inside
+ * scan_interrupts and push_pending_to_hw
+ */
+ smp_mb();
+
+again:
+ if (state->guest_priority == MASKED) {
+ arch_spin_lock(&sb->lock);
+ if (state->guest_priority != MASKED) {
+ arch_spin_unlock(&sb->lock);
+ goto again;
+ }
+ pr_devel(" EOI on saved P...\n");
+
+ /* Clear old_p, that will cause unmask to perform an EOI */
+ state->old_p = false;
+
+ arch_spin_unlock(&sb->lock);
+ } else {
+ pr_devel(" EOI on source...\n");
+
+ /* Perform EOI on the source */
+ xive_vm_source_eoi(hw_num, xd);
+
+ /* If it's an emulated LSI, check level and resend */
+ if (state->lsi && state->asserted)
+ __raw_writeq(0, __x_trig_page(xd));
+
+ }
+
+ /*
+ * This barrier orders the above guest_priority check
+ * and spin_lock/unlock with clearing in_eoi below.
+ *
+ * It also has to be a full mb() as it must ensure
+ * the MMIOs done in source_eoi() are completed before
+ * state->in_eoi is visible.
+ */
+ mb();
+ state->in_eoi = false;
+bail:
+
+ /* Re-evaluate pending IRQs and update HW */
+ xive_vm_scan_interrupts(xc, xc->pending, scan_eoi);
+ xive_vm_push_pending_to_hw(xc);
+ pr_devel(" after scan pending=%02x\n", xc->pending);
+
+ /* Apply new CPPR */
+ xc->hw_cppr = xc->cppr;
+ __raw_writeb(xc->cppr, xive_tima + TM_QW1_OS + TM_CPPR);
+
+ return rc;
+}
+
+static int xive_vm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
+ unsigned long mfrr)
+{
+ struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+
+ pr_devel("H_IPI(server=%08lx,mfrr=%ld)\n", server, mfrr);
+
+ xc->stat_vm_h_ipi++;
+
+ /* Find target */
+ vcpu = kvmppc_xive_find_server(vcpu->kvm, server);
+ if (!vcpu)
+ return H_PARAMETER;
+ xc = vcpu->arch.xive_vcpu;
+
+ /* Locklessly write over MFRR */
+ xc->mfrr = mfrr;
+
+ /*
+ * The load of xc->cppr below and the subsequent MMIO store
+ * to the IPI must happen after the above mfrr update is
+ * globally visible so that:
+ *
+ * - Synchronize with another CPU doing an H_EOI or a H_CPPR
+ * updating xc->cppr then reading xc->mfrr.
+ *
+ * - The target of the IPI sees the xc->mfrr update
+ */
+ mb();
+
+ /* Shoot the IPI if most favored than target cppr */
+ if (mfrr < xc->cppr)
+ __raw_writeq(0, __x_trig_page(&xc->vp_ipi_data));
+
+ return H_SUCCESS;
+}
/*
* We leave a gap of a couple of interrupts in the queue to
@@ -124,7 +726,7 @@ void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu)
* interrupt might have fired and be on its way to the
* host queue while we mask it, and if we unmask it
* early enough (re-cede right away), there is a
- * theorical possibility that it fires again, thus
+ * theoretical possibility that it fires again, thus
* landing in the target queue more than once which is
* a big no-no.
*
@@ -179,12 +781,13 @@ void kvmppc_xive_pull_vcpu(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_GPL(kvmppc_xive_pull_vcpu);
-void kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu)
+bool kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu)
{
void __iomem *esc_vaddr = (void __iomem *)vcpu->arch.xive_esc_vaddr;
+ bool ret = true;
if (!esc_vaddr)
- return;
+ return ret;
/* we are using XIVE with single escalation */
@@ -197,7 +800,7 @@ void kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu)
* we also don't want to set xive_esc_on to 1 here in
* case we race with xive_esc_irq().
*/
- vcpu->arch.ceded = 0;
+ ret = false;
/*
* The escalation interrupts are special as we don't EOI them.
* There is no need to use the load-after-store ordering offset
@@ -210,6 +813,8 @@ void kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu)
__raw_readq(esc_vaddr + XIVE_ESB_SET_PQ_00);
}
mb();
+
+ return ret;
}
EXPORT_SYMBOL_GPL(kvmppc_xive_rearm_escalation);
@@ -238,7 +843,7 @@ static irqreturn_t xive_esc_irq(int irq, void *data)
vcpu->arch.irq_pending = 1;
smp_mb();
- if (vcpu->arch.ceded)
+ if (vcpu->arch.ceded || vcpu->arch.nested)
kvmppc_fast_vcpu_kick(vcpu);
/* Since we have the no-EOI flag, the interrupt is effectively
@@ -622,7 +1227,7 @@ static int xive_target_interrupt(struct kvm *kvm,
/*
* Targetting rules: In order to avoid losing track of
- * pending interrupts accross mask and unmask, which would
+ * pending interrupts across mask and unmask, which would
* allow queue overflows, we implement the following rules:
*
* - Unless it was never enabled (or we run out of capacity)
@@ -1073,7 +1678,7 @@ int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
/*
* If old_p is set, the interrupt is pending, we switch it to
* PQ=11. This will force a resend in the host so the interrupt
- * isn't lost to whatver host driver may pick it up
+ * isn't lost to whatever host driver may pick it up
*/
if (state->old_p)
xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_11);
diff --git a/arch/powerpc/kvm/book3s_xive.h b/arch/powerpc/kvm/book3s_xive.h
index 09d0657596c3..1e48f72e8aa5 100644
--- a/arch/powerpc/kvm/book3s_xive.h
+++ b/arch/powerpc/kvm/book3s_xive.h
@@ -285,13 +285,6 @@ static inline u32 __xive_read_eq(__be32 *qpage, u32 msk, u32 *idx, u32 *toggle)
return cur & 0x7fffffff;
}
-extern unsigned long xive_rm_h_xirr(struct kvm_vcpu *vcpu);
-extern unsigned long xive_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server);
-extern int xive_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
- unsigned long mfrr);
-extern int xive_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr);
-extern int xive_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr);
-
/*
* Common Xive routines for XICS-over-XIVE and XIVE native
*/
diff --git a/arch/powerpc/kvm/book3s_xive_native.c b/arch/powerpc/kvm/book3s_xive_native.c
index f81ba6f84e72..5271c33fe79e 100644
--- a/arch/powerpc/kvm/book3s_xive_native.c
+++ b/arch/powerpc/kvm/book3s_xive_native.c
@@ -209,7 +209,7 @@ static int kvmppc_xive_native_reset_mapped(struct kvm *kvm, unsigned long irq)
/*
* Clear the ESB pages of the IRQ number being mapped (or
- * unmapped) into the guest and let the the VM fault handler
+ * unmapped) into the guest and let the VM fault handler
* repopulate with the appropriate ESB pages (device or IC)
*/
pr_debug("clearing esb pages for girq 0x%lx\n", irq);
diff --git a/arch/powerpc/kvm/book3s_xive_template.c b/arch/powerpc/kvm/book3s_xive_template.c
deleted file mode 100644
index b0015e05d99a..000000000000
--- a/arch/powerpc/kvm/book3s_xive_template.c
+++ /dev/null
@@ -1,636 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright 2017 Benjamin Herrenschmidt, IBM Corporation
- */
-
-/* File to be included by other .c files */
-
-#define XGLUE(a,b) a##b
-#define GLUE(a,b) XGLUE(a,b)
-
-/* Dummy interrupt used when taking interrupts out of a queue in H_CPPR */
-#define XICS_DUMMY 1
-
-static void GLUE(X_PFX,ack_pending)(struct kvmppc_xive_vcpu *xc)
-{
- u8 cppr;
- u16 ack;
-
- /*
- * Ensure any previous store to CPPR is ordered vs.
- * the subsequent loads from PIPR or ACK.
- */
- eieio();
-
- /* Perform the acknowledge OS to register cycle. */
- ack = be16_to_cpu(__x_readw(__x_tima + TM_SPC_ACK_OS_REG));
-
- /* Synchronize subsequent queue accesses */
- mb();
-
- /* XXX Check grouping level */
-
- /* Anything ? */
- if (!((ack >> 8) & TM_QW1_NSR_EO))
- return;
-
- /* Grab CPPR of the most favored pending interrupt */
- cppr = ack & 0xff;
- if (cppr < 8)
- xc->pending |= 1 << cppr;
-
-#ifdef XIVE_RUNTIME_CHECKS
- /* Check consistency */
- if (cppr >= xc->hw_cppr)
- pr_warn("KVM-XIVE: CPU %d odd ack CPPR, got %d at %d\n",
- smp_processor_id(), cppr, xc->hw_cppr);
-#endif
-
- /*
- * Update our image of the HW CPPR. We don't yet modify
- * xc->cppr, this will be done as we scan for interrupts
- * in the queues.
- */
- xc->hw_cppr = cppr;
-}
-
-static u8 GLUE(X_PFX,esb_load)(struct xive_irq_data *xd, u32 offset)
-{
- u64 val;
-
- if (offset == XIVE_ESB_SET_PQ_10 && xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
- offset |= XIVE_ESB_LD_ST_MO;
-
- val =__x_readq(__x_eoi_page(xd) + offset);
-#ifdef __LITTLE_ENDIAN__
- val >>= 64-8;
-#endif
- return (u8)val;
-}
-
-
-static void GLUE(X_PFX,source_eoi)(u32 hw_irq, struct xive_irq_data *xd)
-{
- /* If the XIVE supports the new "store EOI facility, use it */
- if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
- __x_writeq(0, __x_eoi_page(xd) + XIVE_ESB_STORE_EOI);
- else if (xd->flags & XIVE_IRQ_FLAG_LSI) {
- /*
- * For LSIs the HW EOI cycle is used rather than PQ bits,
- * as they are automatically re-triggred in HW when still
- * pending.
- */
- __x_readq(__x_eoi_page(xd) + XIVE_ESB_LOAD_EOI);
- } else {
- uint64_t eoi_val;
-
- /*
- * Otherwise for EOI, we use the special MMIO that does
- * a clear of both P and Q and returns the old Q,
- * except for LSIs where we use the "EOI cycle" special
- * load.
- *
- * This allows us to then do a re-trigger if Q was set
- * rather than synthetizing an interrupt in software
- */
- eoi_val = GLUE(X_PFX,esb_load)(xd, XIVE_ESB_SET_PQ_00);
-
- /* Re-trigger if needed */
- if ((eoi_val & 1) && __x_trig_page(xd))
- __x_writeq(0, __x_trig_page(xd));
- }
-}
-
-enum {
- scan_fetch,
- scan_poll,
- scan_eoi,
-};
-
-static u32 GLUE(X_PFX,scan_interrupts)(struct kvmppc_xive_vcpu *xc,
- u8 pending, int scan_type)
-{
- u32 hirq = 0;
- u8 prio = 0xff;
-
- /* Find highest pending priority */
- while ((xc->mfrr != 0xff || pending != 0) && hirq == 0) {
- struct xive_q *q;
- u32 idx, toggle;
- __be32 *qpage;
-
- /*
- * If pending is 0 this will return 0xff which is what
- * we want
- */
- prio = ffs(pending) - 1;
-
- /* Don't scan past the guest cppr */
- if (prio >= xc->cppr || prio > 7) {
- if (xc->mfrr < xc->cppr) {
- prio = xc->mfrr;
- hirq = XICS_IPI;
- }
- break;
- }
-
- /* Grab queue and pointers */
- q = &xc->queues[prio];
- idx = q->idx;
- toggle = q->toggle;
-
- /*
- * Snapshot the queue page. The test further down for EOI
- * must use the same "copy" that was used by __xive_read_eq
- * since qpage can be set concurrently and we don't want
- * to miss an EOI.
- */
- qpage = READ_ONCE(q->qpage);
-
-skip_ipi:
- /*
- * Try to fetch from the queue. Will return 0 for a
- * non-queueing priority (ie, qpage = 0).
- */
- hirq = __xive_read_eq(qpage, q->msk, &idx, &toggle);
-
- /*
- * If this was a signal for an MFFR change done by
- * H_IPI we skip it. Additionally, if we were fetching
- * we EOI it now, thus re-enabling reception of a new
- * such signal.
- *
- * We also need to do that if prio is 0 and we had no
- * page for the queue. In this case, we have non-queued
- * IPI that needs to be EOId.
- *
- * This is safe because if we have another pending MFRR
- * change that wasn't observed above, the Q bit will have
- * been set and another occurrence of the IPI will trigger.
- */
- if (hirq == XICS_IPI || (prio == 0 && !qpage)) {
- if (scan_type == scan_fetch) {
- GLUE(X_PFX,source_eoi)(xc->vp_ipi,
- &xc->vp_ipi_data);
- q->idx = idx;
- q->toggle = toggle;
- }
- /* Loop back on same queue with updated idx/toggle */
-#ifdef XIVE_RUNTIME_CHECKS
- WARN_ON(hirq && hirq != XICS_IPI);
-#endif
- if (hirq)
- goto skip_ipi;
- }
-
- /* If it's the dummy interrupt, continue searching */
- if (hirq == XICS_DUMMY)
- goto skip_ipi;
-
- /* Clear the pending bit if the queue is now empty */
- if (!hirq) {
- pending &= ~(1 << prio);
-
- /*
- * Check if the queue count needs adjusting due to
- * interrupts being moved away.
- */
- if (atomic_read(&q->pending_count)) {
- int p = atomic_xchg(&q->pending_count, 0);
- if (p) {
-#ifdef XIVE_RUNTIME_CHECKS
- WARN_ON(p > atomic_read(&q->count));
-#endif
- atomic_sub(p, &q->count);
- }
- }
- }
-
- /*
- * If the most favoured prio we found pending is less
- * favored (or equal) than a pending IPI, we return
- * the IPI instead.
- */
- if (prio >= xc->mfrr && xc->mfrr < xc->cppr) {
- prio = xc->mfrr;
- hirq = XICS_IPI;
- break;
- }
-
- /* If fetching, update queue pointers */
- if (scan_type == scan_fetch) {
- q->idx = idx;
- q->toggle = toggle;
- }
- }
-
- /* If we are just taking a "peek", do nothing else */
- if (scan_type == scan_poll)
- return hirq;
-
- /* Update the pending bits */
- xc->pending = pending;
-
- /*
- * If this is an EOI that's it, no CPPR adjustment done here,
- * all we needed was cleanup the stale pending bits and check
- * if there's anything left.
- */
- if (scan_type == scan_eoi)
- return hirq;
-
- /*
- * If we found an interrupt, adjust what the guest CPPR should
- * be as if we had just fetched that interrupt from HW.
- *
- * Note: This can only make xc->cppr smaller as the previous
- * loop will only exit with hirq != 0 if prio is lower than
- * the current xc->cppr. Thus we don't need to re-check xc->mfrr
- * for pending IPIs.
- */
- if (hirq)
- xc->cppr = prio;
- /*
- * If it was an IPI the HW CPPR might have been lowered too much
- * as the HW interrupt we use for IPIs is routed to priority 0.
- *
- * We re-sync it here.
- */
- if (xc->cppr != xc->hw_cppr) {
- xc->hw_cppr = xc->cppr;
- __x_writeb(xc->cppr, __x_tima + TM_QW1_OS + TM_CPPR);
- }
-
- return hirq;
-}
-
-X_STATIC unsigned long GLUE(X_PFX,h_xirr)(struct kvm_vcpu *vcpu)
-{
- struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
- u8 old_cppr;
- u32 hirq;
-
- pr_devel("H_XIRR\n");
-
- xc->GLUE(X_STAT_PFX,h_xirr)++;
-
- /* First collect pending bits from HW */
- GLUE(X_PFX,ack_pending)(xc);
-
- pr_devel(" new pending=0x%02x hw_cppr=%d cppr=%d\n",
- xc->pending, xc->hw_cppr, xc->cppr);
-
- /* Grab previous CPPR and reverse map it */
- old_cppr = xive_prio_to_guest(xc->cppr);
-
- /* Scan for actual interrupts */
- hirq = GLUE(X_PFX,scan_interrupts)(xc, xc->pending, scan_fetch);
-
- pr_devel(" got hirq=0x%x hw_cppr=%d cppr=%d\n",
- hirq, xc->hw_cppr, xc->cppr);
-
-#ifdef XIVE_RUNTIME_CHECKS
- /* That should never hit */
- if (hirq & 0xff000000)
- pr_warn("XIVE: Weird guest interrupt number 0x%08x\n", hirq);
-#endif
-
- /*
- * XXX We could check if the interrupt is masked here and
- * filter it. If we chose to do so, we would need to do:
- *
- * if (masked) {
- * lock();
- * if (masked) {
- * old_Q = true;
- * hirq = 0;
- * }
- * unlock();
- * }
- */
-
- /* Return interrupt and old CPPR in GPR4 */
- vcpu->arch.regs.gpr[4] = hirq | (old_cppr << 24);
-
- return H_SUCCESS;
-}
-
-X_STATIC unsigned long GLUE(X_PFX,h_ipoll)(struct kvm_vcpu *vcpu, unsigned long server)
-{
- struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
- u8 pending = xc->pending;
- u32 hirq;
-
- pr_devel("H_IPOLL(server=%ld)\n", server);
-
- xc->GLUE(X_STAT_PFX,h_ipoll)++;
-
- /* Grab the target VCPU if not the current one */
- if (xc->server_num != server) {
- vcpu = kvmppc_xive_find_server(vcpu->kvm, server);
- if (!vcpu)
- return H_PARAMETER;
- xc = vcpu->arch.xive_vcpu;
-
- /* Scan all priorities */
- pending = 0xff;
- } else {
- /* Grab pending interrupt if any */
- __be64 qw1 = __x_readq(__x_tima + TM_QW1_OS);
- u8 pipr = be64_to_cpu(qw1) & 0xff;
- if (pipr < 8)
- pending |= 1 << pipr;
- }
-
- hirq = GLUE(X_PFX,scan_interrupts)(xc, pending, scan_poll);
-
- /* Return interrupt and old CPPR in GPR4 */
- vcpu->arch.regs.gpr[4] = hirq | (xc->cppr << 24);
-
- return H_SUCCESS;
-}
-
-static void GLUE(X_PFX,push_pending_to_hw)(struct kvmppc_xive_vcpu *xc)
-{
- u8 pending, prio;
-
- pending = xc->pending;
- if (xc->mfrr != 0xff) {
- if (xc->mfrr < 8)
- pending |= 1 << xc->mfrr;
- else
- pending |= 0x80;
- }
- if (!pending)
- return;
- prio = ffs(pending) - 1;
-
- __x_writeb(prio, __x_tima + TM_SPC_SET_OS_PENDING);
-}
-
-static void GLUE(X_PFX,scan_for_rerouted_irqs)(struct kvmppc_xive *xive,
- struct kvmppc_xive_vcpu *xc)
-{
- unsigned int prio;
-
- /* For each priority that is now masked */
- for (prio = xc->cppr; prio < KVMPPC_XIVE_Q_COUNT; prio++) {
- struct xive_q *q = &xc->queues[prio];
- struct kvmppc_xive_irq_state *state;
- struct kvmppc_xive_src_block *sb;
- u32 idx, toggle, entry, irq, hw_num;
- struct xive_irq_data *xd;
- __be32 *qpage;
- u16 src;
-
- idx = q->idx;
- toggle = q->toggle;
- qpage = READ_ONCE(q->qpage);
- if (!qpage)
- continue;
-
- /* For each interrupt in the queue */
- for (;;) {
- entry = be32_to_cpup(qpage + idx);
-
- /* No more ? */
- if ((entry >> 31) == toggle)
- break;
- irq = entry & 0x7fffffff;
-
- /* Skip dummies and IPIs */
- if (irq == XICS_DUMMY || irq == XICS_IPI)
- goto next;
- sb = kvmppc_xive_find_source(xive, irq, &src);
- if (!sb)
- goto next;
- state = &sb->irq_state[src];
-
- /* Has it been rerouted ? */
- if (xc->server_num == state->act_server)
- goto next;
-
- /*
- * Allright, it *has* been re-routed, kill it from
- * the queue.
- */
- qpage[idx] = cpu_to_be32((entry & 0x80000000) | XICS_DUMMY);
-
- /* Find the HW interrupt */
- kvmppc_xive_select_irq(state, &hw_num, &xd);
-
- /* If it's not an LSI, set PQ to 11 the EOI will force a resend */
- if (!(xd->flags & XIVE_IRQ_FLAG_LSI))
- GLUE(X_PFX,esb_load)(xd, XIVE_ESB_SET_PQ_11);
-
- /* EOI the source */
- GLUE(X_PFX,source_eoi)(hw_num, xd);
-
- next:
- idx = (idx + 1) & q->msk;
- if (idx == 0)
- toggle ^= 1;
- }
- }
-}
-
-X_STATIC int GLUE(X_PFX,h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr)
-{
- struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
- struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
- u8 old_cppr;
-
- pr_devel("H_CPPR(cppr=%ld)\n", cppr);
-
- xc->GLUE(X_STAT_PFX,h_cppr)++;
-
- /* Map CPPR */
- cppr = xive_prio_from_guest(cppr);
-
- /* Remember old and update SW state */
- old_cppr = xc->cppr;
- xc->cppr = cppr;
-
- /*
- * Order the above update of xc->cppr with the subsequent
- * read of xc->mfrr inside push_pending_to_hw()
- */
- smp_mb();
-
- if (cppr > old_cppr) {
- /*
- * We are masking less, we need to look for pending things
- * to deliver and set VP pending bits accordingly to trigger
- * a new interrupt otherwise we might miss MFRR changes for
- * which we have optimized out sending an IPI signal.
- */
- GLUE(X_PFX,push_pending_to_hw)(xc);
- } else {
- /*
- * We are masking more, we need to check the queue for any
- * interrupt that has been routed to another CPU, take
- * it out (replace it with the dummy) and retrigger it.
- *
- * This is necessary since those interrupts may otherwise
- * never be processed, at least not until this CPU restores
- * its CPPR.
- *
- * This is in theory racy vs. HW adding new interrupts to
- * the queue. In practice this works because the interesting
- * cases are when the guest has done a set_xive() to move the
- * interrupt away, which flushes the xive, followed by the
- * target CPU doing a H_CPPR. So any new interrupt coming into
- * the queue must still be routed to us and isn't a source
- * of concern.
- */
- GLUE(X_PFX,scan_for_rerouted_irqs)(xive, xc);
- }
-
- /* Apply new CPPR */
- xc->hw_cppr = cppr;
- __x_writeb(cppr, __x_tima + TM_QW1_OS + TM_CPPR);
-
- return H_SUCCESS;
-}
-
-X_STATIC int GLUE(X_PFX,h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr)
-{
- struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
- struct kvmppc_xive_src_block *sb;
- struct kvmppc_xive_irq_state *state;
- struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
- struct xive_irq_data *xd;
- u8 new_cppr = xirr >> 24;
- u32 irq = xirr & 0x00ffffff, hw_num;
- u16 src;
- int rc = 0;
-
- pr_devel("H_EOI(xirr=%08lx)\n", xirr);
-
- xc->GLUE(X_STAT_PFX,h_eoi)++;
-
- xc->cppr = xive_prio_from_guest(new_cppr);
-
- /*
- * IPIs are synthetized from MFRR and thus don't need
- * any special EOI handling. The underlying interrupt
- * used to signal MFRR changes is EOId when fetched from
- * the queue.
- */
- if (irq == XICS_IPI || irq == 0) {
- /*
- * This barrier orders the setting of xc->cppr vs.
- * subsquent test of xc->mfrr done inside
- * scan_interrupts and push_pending_to_hw
- */
- smp_mb();
- goto bail;
- }
-
- /* Find interrupt source */
- sb = kvmppc_xive_find_source(xive, irq, &src);
- if (!sb) {
- pr_devel(" source not found !\n");
- rc = H_PARAMETER;
- /* Same as above */
- smp_mb();
- goto bail;
- }
- state = &sb->irq_state[src];
- kvmppc_xive_select_irq(state, &hw_num, &xd);
-
- state->in_eoi = true;
-
- /*
- * This barrier orders both setting of in_eoi above vs,
- * subsequent test of guest_priority, and the setting
- * of xc->cppr vs. subsquent test of xc->mfrr done inside
- * scan_interrupts and push_pending_to_hw
- */
- smp_mb();
-
-again:
- if (state->guest_priority == MASKED) {
- arch_spin_lock(&sb->lock);
- if (state->guest_priority != MASKED) {
- arch_spin_unlock(&sb->lock);
- goto again;
- }
- pr_devel(" EOI on saved P...\n");
-
- /* Clear old_p, that will cause unmask to perform an EOI */
- state->old_p = false;
-
- arch_spin_unlock(&sb->lock);
- } else {
- pr_devel(" EOI on source...\n");
-
- /* Perform EOI on the source */
- GLUE(X_PFX,source_eoi)(hw_num, xd);
-
- /* If it's an emulated LSI, check level and resend */
- if (state->lsi && state->asserted)
- __x_writeq(0, __x_trig_page(xd));
-
- }
-
- /*
- * This barrier orders the above guest_priority check
- * and spin_lock/unlock with clearing in_eoi below.
- *
- * It also has to be a full mb() as it must ensure
- * the MMIOs done in source_eoi() are completed before
- * state->in_eoi is visible.
- */
- mb();
- state->in_eoi = false;
-bail:
-
- /* Re-evaluate pending IRQs and update HW */
- GLUE(X_PFX,scan_interrupts)(xc, xc->pending, scan_eoi);
- GLUE(X_PFX,push_pending_to_hw)(xc);
- pr_devel(" after scan pending=%02x\n", xc->pending);
-
- /* Apply new CPPR */
- xc->hw_cppr = xc->cppr;
- __x_writeb(xc->cppr, __x_tima + TM_QW1_OS + TM_CPPR);
-
- return rc;
-}
-
-X_STATIC int GLUE(X_PFX,h_ipi)(struct kvm_vcpu *vcpu, unsigned long server,
- unsigned long mfrr)
-{
- struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
-
- pr_devel("H_IPI(server=%08lx,mfrr=%ld)\n", server, mfrr);
-
- xc->GLUE(X_STAT_PFX,h_ipi)++;
-
- /* Find target */
- vcpu = kvmppc_xive_find_server(vcpu->kvm, server);
- if (!vcpu)
- return H_PARAMETER;
- xc = vcpu->arch.xive_vcpu;
-
- /* Locklessly write over MFRR */
- xc->mfrr = mfrr;
-
- /*
- * The load of xc->cppr below and the subsequent MMIO store
- * to the IPI must happen after the above mfrr update is
- * globally visible so that:
- *
- * - Synchronize with another CPU doing an H_EOI or a H_CPPR
- * updating xc->cppr then reading xc->mfrr.
- *
- * - The target of the IPI sees the xc->mfrr update
- */
- mb();
-
- /* Shoot the IPI if most favored than target cppr */
- if (mfrr < xc->cppr)
- __x_writeq(0, __x_trig_page(&xc->vp_ipi_data));
-
- return H_SUCCESS;
-}
diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c
index fa0d8dbbe484..57e0ad6a2ca3 100644
--- a/arch/powerpc/kvm/e500mc.c
+++ b/arch/powerpc/kvm/e500mc.c
@@ -309,7 +309,7 @@ static int kvmppc_core_vcpu_create_e500mc(struct kvm_vcpu *vcpu)
BUILD_BUG_ON(offsetof(struct kvmppc_vcpu_e500, vcpu) != 0);
vcpu_e500 = to_e500(vcpu);
- /* Invalid PIR value -- this LPID dosn't have valid state on any cpu */
+ /* Invalid PIR value -- this LPID doesn't have valid state on any cpu */
vcpu->arch.oldpir = 0xffffffff;
err = kvmppc_e500_tlb_init(vcpu_e500);
@@ -399,7 +399,6 @@ static int __init kvmppc_e500mc_init(void)
* allocator.
*/
kvmppc_init_lpid(KVMPPC_NR_LPIDS/threads_per_core);
- kvmppc_claim_lpid(0); /* host */
r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE);
if (r)
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 533c4232e5ab..191992fcb2c2 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/irqbypass.h>
#include <linux/kvm_irqfd.h>
+#include <linux/of.h>
#include <asm/cputable.h>
#include <linux/uaccess.h>
#include <asm/kvm_ppc.h>
@@ -2496,41 +2497,37 @@ out:
return r;
}
-static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)];
+static DEFINE_IDA(lpid_inuse);
static unsigned long nr_lpids;
long kvmppc_alloc_lpid(void)
{
- long lpid;
+ int lpid;
- do {
- lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS);
- if (lpid >= nr_lpids) {
+ /* The host LPID must always be 0 (allocation starts at 1) */
+ lpid = ida_alloc_range(&lpid_inuse, 1, nr_lpids - 1, GFP_KERNEL);
+ if (lpid < 0) {
+ if (lpid == -ENOMEM)
+ pr_err("%s: Out of memory\n", __func__);
+ else
pr_err("%s: No LPIDs free\n", __func__);
- return -ENOMEM;
- }
- } while (test_and_set_bit(lpid, lpid_inuse));
+ return -ENOMEM;
+ }
return lpid;
}
EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid);
-void kvmppc_claim_lpid(long lpid)
-{
- set_bit(lpid, lpid_inuse);
-}
-EXPORT_SYMBOL_GPL(kvmppc_claim_lpid);
-
void kvmppc_free_lpid(long lpid)
{
- clear_bit(lpid, lpid_inuse);
+ ida_free(&lpid_inuse, lpid);
}
EXPORT_SYMBOL_GPL(kvmppc_free_lpid);
+/* nr_lpids_param includes the host LPID */
void kvmppc_init_lpid(unsigned long nr_lpids_param)
{
- nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param);
- memset(lpid_inuse, 0, sizeof(lpid_inuse));
+ nr_lpids = nr_lpids_param;
}
EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
diff --git a/arch/powerpc/kvm/trace_hv.h b/arch/powerpc/kvm/trace_hv.h
index 38cd0ed0a617..32e2cb5811cc 100644
--- a/arch/powerpc/kvm/trace_hv.h
+++ b/arch/powerpc/kvm/trace_hv.h
@@ -409,9 +409,9 @@ TRACE_EVENT(kvmppc_run_core,
);
TRACE_EVENT(kvmppc_vcore_blocked,
- TP_PROTO(struct kvmppc_vcore *vc, int where),
+ TP_PROTO(struct kvm_vcpu *vcpu, int where),
- TP_ARGS(vc, where),
+ TP_ARGS(vcpu, where),
TP_STRUCT__entry(
__field(int, n_runnable)
@@ -421,8 +421,8 @@ TRACE_EVENT(kvmppc_vcore_blocked,
),
TP_fast_assign(
- __entry->runner_vcpu = vc->runner->vcpu_id;
- __entry->n_runnable = vc->n_runnable;
+ __entry->runner_vcpu = vcpu->vcpu_id;
+ __entry->n_runnable = vcpu->arch.vcore->n_runnable;
__entry->where = where;
__entry->tgid = current->tgid;
),
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
index 5d1881d2e39a..8560c912186d 100644
--- a/arch/powerpc/lib/Makefile
+++ b/arch/powerpc/lib/Makefile
@@ -13,6 +13,9 @@ CFLAGS_REMOVE_feature-fixups.o = $(CC_FLAGS_FTRACE)
KASAN_SANITIZE_code-patching.o := n
KASAN_SANITIZE_feature-fixups.o := n
+# restart_table.o contains functions called in the NMI interrupt path
+# which can be in real mode. Disable KASAN.
+KASAN_SANITIZE_restart_table.o := n
ifdef CONFIG_KASAN
CFLAGS_code-patching.o += -DDISABLE_BRANCH_PROFILING
diff --git a/arch/powerpc/lib/checksum_wrappers.c b/arch/powerpc/lib/checksum_wrappers.c
index f3999cbb2fcc..1a14c8780278 100644
--- a/arch/powerpc/lib/checksum_wrappers.c
+++ b/arch/powerpc/lib/checksum_wrappers.c
@@ -24,7 +24,6 @@ __wsum csum_and_copy_from_user(const void __user *src, void *dst,
user_read_access_end();
return csum;
}
-EXPORT_SYMBOL(csum_and_copy_from_user);
__wsum csum_and_copy_to_user(const void *src, void __user *dst, int len)
{
@@ -38,4 +37,3 @@ __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len)
user_write_access_end();
return csum;
}
-EXPORT_SYMBOL(csum_and_copy_to_user);
diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
index 00c68e7fb11e..6edf0697a526 100644
--- a/arch/powerpc/lib/code-patching.c
+++ b/arch/powerpc/lib/code-patching.c
@@ -8,6 +8,7 @@
#include <linux/init.h>
#include <linux/cpuhotplug.h>
#include <linux/uaccess.h>
+#include <linux/jump_label.h>
#include <asm/tlbflush.h>
#include <asm/page.h>
@@ -32,7 +33,7 @@ static int __patch_instruction(u32 *exec_addr, ppc_inst_t instr, u32 *patch_addr
return 0;
failed:
- return -EFAULT;
+ return -EPERM;
}
int raw_patch_instruction(u32 *addr, ppc_inst_t instr)
@@ -78,6 +79,8 @@ static int text_area_cpu_down(unsigned int cpu)
return 0;
}
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(poking_init_done);
+
/*
* Although BUG_ON() is rude, in this case it should only happen if ENOMEM, and
* we judge it as being preferable to a kernel that will crash later when
@@ -88,6 +91,7 @@ void __init poking_init(void)
BUG_ON(!cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
"powerpc/text_poke:online", text_area_cpu_up,
text_area_cpu_down));
+ static_branch_enable(&poking_init_done);
}
/*
@@ -97,7 +101,7 @@ static int map_patch_area(void *addr, unsigned long text_poke_addr)
{
unsigned long pfn;
- if (is_vmalloc_or_module_addr(addr))
+ if (IS_ENABLED(CONFIG_MODULES) && is_vmalloc_or_module_addr(addr))
pfn = vmalloc_to_pfn(addr);
else
pfn = __pa_symbol(addr) >> PAGE_SHIFT;
@@ -170,7 +174,7 @@ static int do_patch_instruction(u32 *addr, ppc_inst_t instr)
* when text_poke_area is not ready, but we still need
* to allow patching. We just do the plain old patching
*/
- if (!this_cpu_read(text_poke_area))
+ if (!static_branch_likely(&poking_init_done))
return raw_patch_instruction(addr, instr);
local_irq_save(flags);
@@ -188,10 +192,12 @@ static int do_patch_instruction(u32 *addr, ppc_inst_t instr)
#endif /* CONFIG_STRICT_KERNEL_RWX */
+__ro_after_init DEFINE_STATIC_KEY_FALSE(init_mem_is_free);
+
int patch_instruction(u32 *addr, ppc_inst_t instr)
{
/* Make sure we aren't patching a freed init section */
- if (system_state >= SYSTEM_FREEING_INITMEM && init_section_contains(addr, 4))
+ if (static_branch_likely(&init_mem_is_free) && init_section_contains(addr, 4))
return 0;
return do_patch_instruction(addr, instr);
@@ -208,33 +214,6 @@ int patch_branch(u32 *addr, unsigned long target, int flags)
return patch_instruction(addr, instr);
}
-bool is_offset_in_branch_range(long offset)
-{
- /*
- * Powerpc branch instruction is :
- *
- * 0 6 30 31
- * +---------+----------------+---+---+
- * | opcode | LI |AA |LK |
- * +---------+----------------+---+---+
- * Where AA = 0 and LK = 0
- *
- * LI is a signed 24 bits integer. The real branch offset is computed
- * by: imm32 = SignExtend(LI:'0b00', 32);
- *
- * So the maximum forward branch should be:
- * (0x007fffff << 2) = 0x01fffffc = 0x1fffffc
- * The maximum backward branch should be:
- * (0xff800000 << 2) = 0xfe000000 = -0x2000000
- */
- return (offset >= -0x2000000 && offset <= 0x1fffffc && !(offset & 0x3));
-}
-
-bool is_offset_in_cond_branch_range(long offset)
-{
- return offset >= -0x8000 && offset <= 0x7fff && !(offset & 0x3);
-}
-
/*
* Helper to check if a given instruction is a conditional branch
* Derived from the conditional checks in analyse_instr()
@@ -257,26 +236,6 @@ bool is_conditional_branch(ppc_inst_t instr)
}
NOKPROBE_SYMBOL(is_conditional_branch);
-int create_branch(ppc_inst_t *instr, const u32 *addr,
- unsigned long target, int flags)
-{
- long offset;
-
- *instr = ppc_inst(0);
- offset = target;
- if (! (flags & BRANCH_ABSOLUTE))
- offset = offset - (unsigned long)addr;
-
- /* Check we can represent the target in the instruction format */
- if (!is_offset_in_branch_range(offset))
- return 1;
-
- /* Mask out the flags and target, so they don't step on each other. */
- *instr = ppc_inst(0x48000000 | (flags & 0x3) | (offset & 0x03FFFFFC));
-
- return 0;
-}
-
int create_cond_branch(ppc_inst_t *instr, const u32 *addr,
unsigned long target, int flags)
{
diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
index 343a78826035..993d3f31832a 100644
--- a/arch/powerpc/lib/feature-fixups.c
+++ b/arch/powerpc/lib/feature-fixups.c
@@ -451,7 +451,7 @@ static int __do_rfi_flush_fixups(void *data)
if (types & L1D_FLUSH_FALLBACK)
/* b .+16 to fallback flush */
- instrs[0] = PPC_INST_BRANCH | 16;
+ instrs[0] = PPC_RAW_BRANCH(16);
i = 0;
if (types & L1D_FLUSH_ORI) {
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
index 6f79bde6d6c2..398b5694aeb7 100644
--- a/arch/powerpc/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -15,9 +15,6 @@
#include <asm/cputable.h>
#include <asm/disassemble.h>
-extern char system_call_common[];
-extern char system_call_vectored_emulate[];
-
#ifdef CONFIG_PPC64
/* Bits in SRR1 that are copied from MSR */
#define MSR_MASK 0xffffffff87c0ffffUL
@@ -1166,7 +1163,7 @@ static nokprobe_inline void add_with_carry(const struct pt_regs *regs,
if (carry_in)
++val;
- op->type = COMPUTE + SETREG + SETXER;
+ op->type = COMPUTE | SETREG | SETXER;
op->reg = rd;
op->val = val;
val = truncate_if_32bit(regs->msr, val);
@@ -1187,7 +1184,7 @@ static nokprobe_inline void do_cmp_signed(const struct pt_regs *regs,
{
unsigned int crval, shift;
- op->type = COMPUTE + SETCC;
+ op->type = COMPUTE | SETCC;
crval = (regs->xer >> 31) & 1; /* get SO bit */
if (v1 < v2)
crval |= 8;
@@ -1206,7 +1203,7 @@ static nokprobe_inline void do_cmp_unsigned(const struct pt_regs *regs,
{
unsigned int crval, shift;
- op->type = COMPUTE + SETCC;
+ op->type = COMPUTE | SETCC;
crval = (regs->xer >> 31) & 1; /* get SO bit */
if (v1 < v2)
crval |= 8;
@@ -1376,7 +1373,6 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
if (branch_taken(word, regs, op))
op->type |= BRTAKEN;
return 1;
-#ifdef CONFIG_PPC64
case 17: /* sc */
if ((word & 0xfe2) == 2)
op->type = SYSCALL;
@@ -1388,7 +1384,6 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
} else
op->type = UNKNOWN;
return 0;
-#endif
case 18: /* b */
op->type = BRANCH | BRTAKEN;
imm = word & 0x03fffffc;
@@ -3643,43 +3638,22 @@ int emulate_step(struct pt_regs *regs, ppc_inst_t instr)
regs_set_return_msr(regs, (regs->msr & ~op.val) | (val & op.val));
goto instr_done;
-#ifdef CONFIG_PPC64
case SYSCALL: /* sc */
/*
- * N.B. this uses knowledge about how the syscall
- * entry code works. If that is changed, this will
- * need to be changed also.
+ * Per ISA v3.1, section 7.5.15 'Trace Interrupt', we can't
+ * single step a system call instruction:
+ *
+ * Successful completion for an instruction means that the
+ * instruction caused no other interrupt. Thus a Trace
+ * interrupt never occurs for a System Call or System Call
+ * Vectored instruction, or for a Trap instruction that
+ * traps.
*/
- if (IS_ENABLED(CONFIG_PPC_FAST_ENDIAN_SWITCH) &&
- cpu_has_feature(CPU_FTR_REAL_LE) &&
- regs->gpr[0] == 0x1ebe) {
- regs_set_return_msr(regs, regs->msr ^ MSR_LE);
- goto instr_done;
- }
- regs->gpr[9] = regs->gpr[13];
- regs->gpr[10] = MSR_KERNEL;
- regs->gpr[11] = regs->nip + 4;
- regs->gpr[12] = regs->msr & MSR_MASK;
- regs->gpr[13] = (unsigned long) get_paca();
- regs_set_return_ip(regs, (unsigned long) &system_call_common);
- regs_set_return_msr(regs, MSR_KERNEL);
- return 1;
-
-#ifdef CONFIG_PPC_BOOK3S_64
+ return -1;
case SYSCALL_VECTORED_0: /* scv 0 */
- regs->gpr[9] = regs->gpr[13];
- regs->gpr[10] = MSR_KERNEL;
- regs->gpr[11] = regs->nip + 4;
- regs->gpr[12] = regs->msr & MSR_MASK;
- regs->gpr[13] = (unsigned long) get_paca();
- regs_set_return_ip(regs, (unsigned long) &system_call_vectored_emulate);
- regs_set_return_msr(regs, MSR_KERNEL);
- return 1;
-#endif
-
+ return -1;
case RFI:
return -1;
-#endif
}
return 0;
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
index df8172da2301..503a6e249940 100644
--- a/arch/powerpc/mm/Makefile
+++ b/arch/powerpc/mm/Makefile
@@ -5,7 +5,7 @@
ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
-obj-y := fault.o mem.o pgtable.o mmap.o maccess.o pageattr.o \
+obj-y := fault.o mem.o pgtable.o maccess.o pageattr.o \
init_$(BITS).o pgtable_$(BITS).o \
pgtable-frag.o ioremap.o ioremap_$(BITS).o \
init-common.o mmu_context.o drmem.o \
@@ -14,7 +14,6 @@ obj-$(CONFIG_PPC_MMU_NOHASH) += nohash/
obj-$(CONFIG_PPC_BOOK3S_32) += book3s32/
obj-$(CONFIG_PPC_BOOK3S_64) += book3s64/
obj-$(CONFIG_NUMA) += numa.o
-obj-$(CONFIG_PPC_MM_SLICES) += slice.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-noncoherent.o
obj-$(CONFIG_PPC_COPRO_BASE) += copro_fault.o
diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c
index 203735caf691..49a737fbbd18 100644
--- a/arch/powerpc/mm/book3s32/mmu.c
+++ b/arch/powerpc/mm/book3s32/mmu.c
@@ -23,7 +23,6 @@
#include <linux/highmem.h>
#include <linux/memblock.h>
-#include <asm/prom.h>
#include <asm/mmu.h>
#include <asm/machdep.h>
#include <asm/code-patching.h>
diff --git a/arch/powerpc/mm/book3s64/Makefile b/arch/powerpc/mm/book3s64/Makefile
index 2d50cac499c5..cad2abc1730f 100644
--- a/arch/powerpc/mm/book3s64/Makefile
+++ b/arch/powerpc/mm/book3s64/Makefile
@@ -5,7 +5,7 @@ ccflags-y := $(NO_MINIMAL_TOC)
obj-y += mmu_context.o pgtable.o trace.o
ifdef CONFIG_PPC_64S_HASH_MMU
CFLAGS_REMOVE_slb.o = $(CC_FLAGS_FTRACE)
-obj-y += hash_pgtable.o hash_utils.o hash_tlb.o slb.o
+obj-y += hash_pgtable.o hash_utils.o hash_tlb.o slb.o slice.o
obj-$(CONFIG_PPC_HASH_MMU_NATIVE) += hash_native.o
obj-$(CONFIG_PPC_4K_PAGES) += hash_4k.o
obj-$(CONFIG_PPC_64K_PAGES) += hash_64k.o
@@ -24,3 +24,12 @@ obj-$(CONFIG_PPC_PKEY) += pkeys.o
# Instrumenting the SLB fault path can lead to duplicate SLB entries
KCOV_INSTRUMENT_slb.o := n
+
+# Parts of these can run in real mode and therefore are
+# not safe with the current outline KASAN implementation
+KASAN_SANITIZE_mmu_context.o := n
+KASAN_SANITIZE_pgtable.o := n
+KASAN_SANITIZE_radix_pgtable.o := n
+KASAN_SANITIZE_radix_tlb.o := n
+KASAN_SANITIZE_slb.o := n
+KASAN_SANITIZE_pkeys.o := n
diff --git a/arch/powerpc/mm/book3s64/hash_pgtable.c b/arch/powerpc/mm/book3s64/hash_pgtable.c
index 7ce8914992e3..2e0cad5817ba 100644
--- a/arch/powerpc/mm/book3s64/hash_pgtable.c
+++ b/arch/powerpc/mm/book3s64/hash_pgtable.c
@@ -377,7 +377,7 @@ int hash__has_transparent_hugepage(void)
if (mmu_psize_defs[MMU_PAGE_16M].shift != PMD_SHIFT)
return 0;
/*
- * We need to make sure that we support 16MB hugepage in a segement
+ * We need to make sure that we support 16MB hugepage in a segment
* with base page size 64K or 4K. We only enable THP with a PAGE_SIZE
* of 64K.
*/
diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
index 985cabdd7f67..fc92613dc2bf 100644
--- a/arch/powerpc/mm/book3s64/hash_utils.c
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
@@ -37,6 +37,9 @@
#include <linux/cpu.h>
#include <linux/pgtable.h>
#include <linux/debugfs.h>
+#include <linux/random.h>
+#include <linux/elf-randomize.h>
+#include <linux/of_fdt.h>
#include <asm/interrupt.h>
#include <asm/processor.h>
@@ -46,7 +49,6 @@
#include <asm/types.h>
#include <linux/uaccess.h>
#include <asm/machdep.h>
-#include <asm/prom.h>
#include <asm/io.h>
#include <asm/eeh.h>
#include <asm/tlb.h>
@@ -1264,7 +1266,6 @@ unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap)
return pp;
}
-#ifdef CONFIG_PPC_MM_SLICES
static unsigned int get_paca_psize(unsigned long addr)
{
unsigned char *psizes;
@@ -1281,12 +1282,6 @@ static unsigned int get_paca_psize(unsigned long addr)
return (psizes[index >> 1] >> (mask_index * 4)) & 0xF;
}
-#else
-unsigned int get_paca_psize(unsigned long addr)
-{
- return get_paca()->mm_ctx_user_psize;
-}
-#endif
/*
* Demote a segment to using 4k pages.
@@ -1343,7 +1338,7 @@ static int subpage_protection(struct mm_struct *mm, unsigned long ea)
spp >>= 30 - 2 * ((ea >> 12) & 0xf);
/*
- * 0 -> full premission
+ * 0 -> full permission
* 1 -> Read only
* 2 -> no access.
* We return the flag that need to be cleared.
@@ -1664,7 +1659,7 @@ DEFINE_INTERRUPT_HANDLER(do_hash_fault)
err = hash_page_mm(mm, ea, access, TRAP(regs), flags);
if (unlikely(err < 0)) {
- // failed to instert a hash PTE due to an hypervisor error
+ // failed to insert a hash PTE due to an hypervisor error
if (user_mode(regs)) {
if (IS_ENABLED(CONFIG_PPC_SUBPAGE_PROT) && err == -2)
_exception(SIGSEGV, regs, SEGV_ACCERR, ea);
@@ -1680,7 +1675,6 @@ DEFINE_INTERRUPT_HANDLER(do_hash_fault)
}
}
-#ifdef CONFIG_PPC_MM_SLICES
static bool should_hash_preload(struct mm_struct *mm, unsigned long ea)
{
int psize = get_slice_psize(mm, ea);
@@ -1697,12 +1691,6 @@ static bool should_hash_preload(struct mm_struct *mm, unsigned long ea)
return true;
}
-#else
-static bool should_hash_preload(struct mm_struct *mm, unsigned long ea)
-{
- return true;
-}
-#endif
static void hash_preload(struct mm_struct *mm, pte_t *ptep, unsigned long ea,
bool is_exec, unsigned long trap)
@@ -2147,3 +2135,20 @@ void __init print_system_hash_info(void)
if (htab_hash_mask)
pr_info("htab_hash_mask = 0x%lx\n", htab_hash_mask);
}
+
+unsigned long arch_randomize_brk(struct mm_struct *mm)
+{
+ /*
+ * If we are using 1TB segments and we are allowed to randomise
+ * the heap, we can put it above 1TB so it is backed by a 1TB
+ * segment. Otherwise the heap will be in the bottom 1TB
+ * which always uses 256MB segments and this may result in a
+ * performance penalty.
+ */
+ if (is_32bit_task())
+ return randomize_page(mm->brk, SZ_32M);
+ else if (!radix_enabled() && mmu_highuser_ssize == MMU_SEGSIZE_1T)
+ return randomize_page(max_t(unsigned long, mm->brk, SZ_1T), SZ_1G);
+ else
+ return randomize_page(mm->brk, SZ_1G);
+}
diff --git a/arch/powerpc/mm/book3s64/iommu_api.c b/arch/powerpc/mm/book3s64/iommu_api.c
index cd18e94d0843..7fcfba162e0d 100644
--- a/arch/powerpc/mm/book3s64/iommu_api.c
+++ b/arch/powerpc/mm/book3s64/iommu_api.c
@@ -305,24 +305,6 @@ struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
}
EXPORT_SYMBOL_GPL(mm_iommu_lookup);
-struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm(struct mm_struct *mm,
- unsigned long ua, unsigned long size)
-{
- struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
-
- list_for_each_entry_lockless(mem, &mm->context.iommu_group_mem_list,
- next) {
- if ((mem->ua <= ua) &&
- (ua + size <= mem->ua +
- (mem->entries << PAGE_SHIFT))) {
- ret = mem;
- break;
- }
- }
-
- return ret;
-}
-
struct mm_iommu_table_group_mem_t *mm_iommu_get(struct mm_struct *mm,
unsigned long ua, unsigned long entries)
{
@@ -369,56 +351,6 @@ long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
}
EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa);
-long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
- unsigned long ua, unsigned int pageshift, unsigned long *hpa)
-{
- const long entry = (ua - mem->ua) >> PAGE_SHIFT;
- unsigned long *pa;
-
- if (entry >= mem->entries)
- return -EFAULT;
-
- if (pageshift > mem->pageshift)
- return -EFAULT;
-
- if (!mem->hpas) {
- *hpa = mem->dev_hpa + (ua - mem->ua);
- return 0;
- }
-
- pa = (void *) vmalloc_to_phys(&mem->hpas[entry]);
- if (!pa)
- return -EFAULT;
-
- *hpa = (*pa & MM_IOMMU_TABLE_GROUP_PAGE_MASK) | (ua & ~PAGE_MASK);
-
- return 0;
-}
-
-extern void mm_iommu_ua_mark_dirty_rm(struct mm_struct *mm, unsigned long ua)
-{
- struct mm_iommu_table_group_mem_t *mem;
- long entry;
- void *va;
- unsigned long *pa;
-
- mem = mm_iommu_lookup_rm(mm, ua, PAGE_SIZE);
- if (!mem)
- return;
-
- if (mem->dev_hpa != MM_IOMMU_TABLE_INVALID_HPA)
- return;
-
- entry = (ua - mem->ua) >> PAGE_SHIFT;
- va = &mem->hpas[entry];
-
- pa = (void *) vmalloc_to_phys(va);
- if (!pa)
- return;
-
- *pa |= MM_IOMMU_TABLE_GROUP_PAGE_DIRTY;
-}
-
bool mm_iommu_is_devmem(struct mm_struct *mm, unsigned long hpa,
unsigned int pageshift, unsigned long *size)
{
diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c
index 8b474ab32f67..7b9966402b25 100644
--- a/arch/powerpc/mm/book3s64/pgtable.c
+++ b/arch/powerpc/mm/book3s64/pgtable.c
@@ -332,7 +332,7 @@ static pmd_t *__alloc_for_pmdcache(struct mm_struct *mm)
spin_lock(&mm->page_table_lock);
/*
* If we find pgtable_page set, we return
- * the allocated page with single fragement
+ * the allocated page with single fragment
* count.
*/
if (likely(!mm->context.pmd_frag)) {
diff --git a/arch/powerpc/mm/book3s64/radix_hugetlbpage.c b/arch/powerpc/mm/book3s64/radix_hugetlbpage.c
index 23d3e08911d3..d2fb776febb4 100644
--- a/arch/powerpc/mm/book3s64/radix_hugetlbpage.c
+++ b/arch/powerpc/mm/book3s64/radix_hugetlbpage.c
@@ -41,61 +41,6 @@ void radix__flush_hugetlb_tlb_range(struct vm_area_struct *vma, unsigned long st
radix__flush_tlb_range_psize(vma->vm_mm, start, end, psize);
}
-/*
- * A vairant of hugetlb_get_unmapped_area doing topdown search
- * FIXME!! should we do as x86 does or non hugetlb area does ?
- * ie, use topdown or not based on mmap_is_legacy check ?
- */
-unsigned long
-radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
- unsigned long len, unsigned long pgoff,
- unsigned long flags)
-{
- struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
- struct hstate *h = hstate_file(file);
- int fixed = (flags & MAP_FIXED);
- unsigned long high_limit;
- struct vm_unmapped_area_info info;
-
- high_limit = DEFAULT_MAP_WINDOW;
- if (addr >= high_limit || (fixed && (addr + len > high_limit)))
- high_limit = TASK_SIZE;
-
- if (len & ~huge_page_mask(h))
- return -EINVAL;
- if (len > high_limit)
- return -ENOMEM;
-
- if (fixed) {
- if (addr > high_limit - len)
- return -ENOMEM;
- if (prepare_hugepage_range(file, addr, len))
- return -EINVAL;
- return addr;
- }
-
- if (addr) {
- addr = ALIGN(addr, huge_page_size(h));
- vma = find_vma(mm, addr);
- if (high_limit - len >= addr && addr >= mmap_min_addr &&
- (!vma || addr + len <= vm_start_gap(vma)))
- return addr;
- }
- /*
- * We are always doing an topdown search here. Slice code
- * does that too.
- */
- info.flags = VM_UNMAPPED_AREA_TOPDOWN;
- info.length = len;
- info.low_limit = max(PAGE_SIZE, mmap_min_addr);
- info.high_limit = mm->mmap_base + (high_limit - DEFAULT_MAP_WINDOW);
- info.align_mask = PAGE_MASK & ~huge_page_mask(h);
- info.align_offset = 0;
-
- return vm_unmapped_area(&info);
-}
-
void radix__huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
pte_t old_pte, pte_t pte)
diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
index def04631a74d..db2f3d193448 100644
--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
+++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
@@ -359,7 +359,7 @@ static void __init radix_init_pgtable(void)
if (!cpu_has_feature(CPU_FTR_HVMODE) &&
cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) {
/*
- * Older versions of KVM on these machines perfer if the
+ * Older versions of KVM on these machines prefer if the
* guest only uses the low 19 PID bits.
*/
mmu_pid_bits = 19;
diff --git a/arch/powerpc/mm/book3s64/radix_tlb.c b/arch/powerpc/mm/book3s64/radix_tlb.c
index 7724af19ed7e..dda51fef2d2e 100644
--- a/arch/powerpc/mm/book3s64/radix_tlb.c
+++ b/arch/powerpc/mm/book3s64/radix_tlb.c
@@ -397,7 +397,7 @@ static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
/*
* Workaround the fact that the "ric" argument to __tlbie_pid
- * must be a compile-time contraint to match the "i" constraint
+ * must be a compile-time constraint to match the "i" constraint
* in the asm statement.
*/
switch (ric) {
diff --git a/arch/powerpc/mm/book3s64/slb.c b/arch/powerpc/mm/book3s64/slb.c
index 81091b9587f6..6956f637a38c 100644
--- a/arch/powerpc/mm/book3s64/slb.c
+++ b/arch/powerpc/mm/book3s64/slb.c
@@ -347,7 +347,7 @@ void slb_setup_new_exec(void)
/*
* We have no good place to clear the slb preload cache on exec,
* flush_thread is about the earliest arch hook but that happens
- * after we switch to the mm and have aleady preloaded the SLBEs.
+ * after we switch to the mm and have already preloaded the SLBEs.
*
* For the most part that's probably okay to use entries from the
* previous exec, they will age out if unused. It may turn out to
@@ -615,7 +615,7 @@ static void slb_cache_update(unsigned long esid_data)
} else {
/*
* Our cache is full and the current cache content strictly
- * doesn't indicate the active SLB conents. Bump the ptr
+ * doesn't indicate the active SLB contents. Bump the ptr
* so that switch_slb() will ignore the cache.
*/
local_paca->slb_cache_ptr = SLB_CACHE_ENTRIES + 1;
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/book3s64/slice.c
index f42711f865f3..c0b58afb9a47 100644
--- a/arch/powerpc/mm/slice.c
+++ b/arch/powerpc/mm/book3s64/slice.c
@@ -276,20 +276,18 @@ static bool slice_scan_available(unsigned long addr,
}
static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
- unsigned long len,
+ unsigned long addr, unsigned long len,
const struct slice_mask *available,
int psize, unsigned long high_limit)
{
int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
- unsigned long addr, found, next_end;
+ unsigned long found, next_end;
struct vm_unmapped_area_info info;
info.flags = 0;
info.length = len;
info.align_mask = PAGE_MASK & ((1ul << pshift) - 1);
info.align_offset = 0;
-
- addr = TASK_UNMAPPED_BASE;
/*
* Check till the allow max value for this mmap request
*/
@@ -322,12 +320,12 @@ static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
}
static unsigned long slice_find_area_topdown(struct mm_struct *mm,
- unsigned long len,
+ unsigned long addr, unsigned long len,
const struct slice_mask *available,
int psize, unsigned long high_limit)
{
int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
- unsigned long addr, found, prev;
+ unsigned long found, prev;
struct vm_unmapped_area_info info;
unsigned long min_addr = max(PAGE_SIZE, mmap_min_addr);
@@ -335,8 +333,6 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
info.length = len;
info.align_mask = PAGE_MASK & ((1ul << pshift) - 1);
info.align_offset = 0;
-
- addr = mm->mmap_base;
/*
* If we are trying to allocate above DEFAULT_MAP_WINDOW
* Add the different to the mmap_base.
@@ -377,7 +373,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
* can happen with large stack limits and large mmap()
* allocations.
*/
- return slice_find_area_bottomup(mm, len, available, psize, high_limit);
+ return slice_find_area_bottomup(mm, TASK_UNMAPPED_BASE, len, available, psize, high_limit);
}
@@ -386,9 +382,9 @@ static unsigned long slice_find_area(struct mm_struct *mm, unsigned long len,
int topdown, unsigned long high_limit)
{
if (topdown)
- return slice_find_area_topdown(mm, len, mask, psize, high_limit);
+ return slice_find_area_topdown(mm, mm->mmap_base, len, mask, psize, high_limit);
else
- return slice_find_area_bottomup(mm, len, mask, psize, high_limit);
+ return slice_find_area_bottomup(mm, mm->mmap_base, len, mask, psize, high_limit);
}
static inline void slice_copy_mask(struct slice_mask *dst,
@@ -639,6 +635,32 @@ return_addr:
}
EXPORT_SYMBOL_GPL(slice_get_unmapped_area);
+unsigned long arch_get_unmapped_area(struct file *filp,
+ unsigned long addr,
+ unsigned long len,
+ unsigned long pgoff,
+ unsigned long flags)
+{
+ if (radix_enabled())
+ return generic_get_unmapped_area(filp, addr, len, pgoff, flags);
+
+ return slice_get_unmapped_area(addr, len, flags,
+ mm_ctx_user_psize(&current->mm->context), 0);
+}
+
+unsigned long arch_get_unmapped_area_topdown(struct file *filp,
+ const unsigned long addr0,
+ const unsigned long len,
+ const unsigned long pgoff,
+ const unsigned long flags)
+{
+ if (radix_enabled())
+ return generic_get_unmapped_area_topdown(filp, addr0, len, pgoff, flags);
+
+ return slice_get_unmapped_area(addr0, len, flags,
+ mm_ctx_user_psize(&current->mm->context), 1);
+}
+
unsigned int notrace get_slice_psize(struct mm_struct *mm, unsigned long addr)
{
unsigned char *psizes;
@@ -692,7 +714,6 @@ void slice_init_new_context_exec(struct mm_struct *mm)
bitmap_fill(mask->high_slices, SLICE_NUM_HIGH);
}
-#ifdef CONFIG_PPC_BOOK3S_64
void slice_setup_new_exec(void)
{
struct mm_struct *mm = current->mm;
@@ -704,7 +725,6 @@ void slice_setup_new_exec(void)
mm_ctx_set_slb_addr_limit(&mm->context, DEFAULT_MAP_WINDOW);
}
-#endif
void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
unsigned long len, unsigned int psize)
@@ -759,4 +779,29 @@ int slice_is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
return !slice_check_range_fits(mm, maskp, addr, len);
}
+
+unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
+{
+ /* With radix we don't use slice, so derive it from vma*/
+ if (radix_enabled())
+ return vma_kernel_pagesize(vma);
+
+ return 1UL << mmu_psize_to_shift(get_slice_psize(vma->vm_mm, vma->vm_start));
+}
+
+static int file_to_psize(struct file *file)
+{
+ struct hstate *hstate = hstate_file(file);
+ return shift_to_mmu_psize(huge_page_shift(hstate));
+}
+
+unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags)
+{
+ if (radix_enabled())
+ return generic_hugetlb_get_unmapped_area(file, addr, len, pgoff, flags);
+
+ return slice_get_unmapped_area(addr, len, flags, file_to_psize(file), 1);
+}
#endif
diff --git a/arch/powerpc/mm/cacheflush.c b/arch/powerpc/mm/cacheflush.c
index 63363787e000..0e9b4879c0f9 100644
--- a/arch/powerpc/mm/cacheflush.c
+++ b/arch/powerpc/mm/cacheflush.c
@@ -12,7 +12,7 @@ static inline bool flush_coherent_icache(void)
/*
* For a snooping icache, we still need a dummy icbi to purge all the
* prefetched instructions from the ifetch buffers. We also need a sync
- * before the icbi to order the the actual stores to memory that might
+ * before the icbi to order the actual stores to memory that might
* have modified instructions with the icbi.
*/
if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) {
diff --git a/arch/powerpc/mm/drmem.c b/arch/powerpc/mm/drmem.c
index 22197b18d85e..2369d1bf2411 100644
--- a/arch/powerpc/mm/drmem.c
+++ b/arch/powerpc/mm/drmem.c
@@ -11,7 +11,7 @@
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <linux/memblock.h>
-#include <asm/prom.h>
+#include <linux/slab.h>
#include <asm/drmem.h>
static int n_root_addr_cells, n_root_size_cells;
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index b642a5a8668f..b282af39fcf6 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -542,40 +542,6 @@ retry:
return page;
}
-#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
-static inline int file_to_psize(struct file *file)
-{
- struct hstate *hstate = hstate_file(file);
- return shift_to_mmu_psize(huge_page_shift(hstate));
-}
-
-unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
- unsigned long len, unsigned long pgoff,
- unsigned long flags)
-{
-#ifdef CONFIG_PPC_RADIX_MMU
- if (radix_enabled())
- return radix__hugetlb_get_unmapped_area(file, addr, len,
- pgoff, flags);
-#endif
-#ifdef CONFIG_PPC_MM_SLICES
- return slice_get_unmapped_area(addr, len, flags, file_to_psize(file), 1);
-#endif
- BUG();
-}
-#endif
-
-unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
-{
- /* With radix we don't use slice, so derive it from vma*/
- if (IS_ENABLED(CONFIG_PPC_MM_SLICES) && !radix_enabled()) {
- unsigned int psize = get_slice_psize(vma->vm_mm, vma->vm_start);
-
- return 1UL << mmu_psize_to_shift(psize);
- }
- return vma_kernel_pagesize(vma);
-}
-
bool __init arch_hugetlb_valid_size(unsigned long size)
{
int shift = __ffs(size);
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c
index 3d690be48e84..693a3a7a9463 100644
--- a/arch/powerpc/mm/init_32.c
+++ b/arch/powerpc/mm/init_32.c
@@ -29,7 +29,6 @@
#include <linux/slab.h>
#include <linux/hugetlb.h>
-#include <asm/prom.h>
#include <asm/io.h>
#include <asm/mmu.h>
#include <asm/smp.h>
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 83c0ee9fbf05..05b0d584e50b 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -111,7 +111,7 @@ static int __meminit vmemmap_populated(unsigned long vmemmap_addr, int vmemmap_m
}
/*
- * vmemmap virtual address space management does not have a traditonal page
+ * vmemmap virtual address space management does not have a traditional page
* table to track which virtual struct pages are backed by physical mapping.
* The virtual to physical mappings are tracked in a simple linked list
* format. 'vmemmap_list' maintains the entire vmemmap physical mapping at
@@ -128,7 +128,7 @@ static struct vmemmap_backing *next;
/*
* The same pointer 'next' tracks individual chunks inside the allocated
- * full page during the boot time and again tracks the freeed nodes during
+ * full page during the boot time and again tracks the freed nodes during
* runtime. It is racy but it does not happen as they are separated by the
* boot process. Will create problem if some how we have memory hotplug
* operation during boot !!
@@ -372,6 +372,9 @@ void register_page_bootmem_memmap(unsigned long section_nr,
#ifdef CONFIG_PPC_BOOK3S_64
unsigned int mmu_lpid_bits;
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+EXPORT_SYMBOL_GPL(mmu_lpid_bits);
+#endif
unsigned int mmu_pid_bits;
static bool disable_radix = !IS_ENABLED(CONFIG_PPC_RADIX_MMU_DEFAULT);
diff --git a/arch/powerpc/mm/kasan/Makefile b/arch/powerpc/mm/kasan/Makefile
index bb1a5408b86b..4999aadb1867 100644
--- a/arch/powerpc/mm/kasan/Makefile
+++ b/arch/powerpc/mm/kasan/Makefile
@@ -2,6 +2,7 @@
KASAN_SANITIZE := n
-obj-$(CONFIG_PPC32) += kasan_init_32.o
+obj-$(CONFIG_PPC32) += init_32.o
obj-$(CONFIG_PPC_8xx) += 8xx.o
obj-$(CONFIG_PPC_BOOK3S_32) += book3s_32.o
+obj-$(CONFIG_PPC_BOOK3S_64) += init_book3s_64.o
diff --git a/arch/powerpc/mm/kasan/kasan_init_32.c b/arch/powerpc/mm/kasan/init_32.c
index f3e4d069e0ba..f3e4d069e0ba 100644
--- a/arch/powerpc/mm/kasan/kasan_init_32.c
+++ b/arch/powerpc/mm/kasan/init_32.c
diff --git a/arch/powerpc/mm/kasan/init_book3s_64.c b/arch/powerpc/mm/kasan/init_book3s_64.c
new file mode 100644
index 000000000000..0da5566d6b84
--- /dev/null
+++ b/arch/powerpc/mm/kasan/init_book3s_64.c
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KASAN for 64-bit Book3S powerpc
+ *
+ * Copyright 2019-2022, Daniel Axtens, IBM Corporation.
+ */
+
+/*
+ * ppc64 turns on virtual memory late in boot, after calling into generic code
+ * like the device-tree parser, so it uses this in conjunction with a hook in
+ * outline mode to avoid invalid access early in boot.
+ */
+
+#define DISABLE_BRANCH_PROFILING
+
+#include <linux/kasan.h>
+#include <linux/printk.h>
+#include <linux/sched/task.h>
+#include <linux/memblock.h>
+#include <asm/pgalloc.h>
+
+DEFINE_STATIC_KEY_FALSE(powerpc_kasan_enabled_key);
+
+static void __init kasan_init_phys_region(void *start, void *end)
+{
+ unsigned long k_start, k_end, k_cur;
+ void *va;
+
+ if (start >= end)
+ return;
+
+ k_start = ALIGN_DOWN((unsigned long)kasan_mem_to_shadow(start), PAGE_SIZE);
+ k_end = ALIGN((unsigned long)kasan_mem_to_shadow(end), PAGE_SIZE);
+
+ va = memblock_alloc(k_end - k_start, PAGE_SIZE);
+ for (k_cur = k_start; k_cur < k_end; k_cur += PAGE_SIZE, va += PAGE_SIZE)
+ map_kernel_page(k_cur, __pa(va), PAGE_KERNEL);
+}
+
+void __init kasan_init(void)
+{
+ /*
+ * We want to do the following things:
+ * 1) Map real memory into the shadow for all physical memblocks
+ * This takes us from c000... to c008...
+ * 2) Leave a hole over the shadow of vmalloc space. KASAN_VMALLOC
+ * will manage this for us.
+ * This takes us from c008... to c00a...
+ * 3) Map the 'early shadow'/zero page over iomap and vmemmap space.
+ * This takes us up to where we start at c00e...
+ */
+
+ void *k_start = kasan_mem_to_shadow((void *)RADIX_VMALLOC_END);
+ void *k_end = kasan_mem_to_shadow((void *)RADIX_VMEMMAP_END);
+ phys_addr_t start, end;
+ u64 i;
+ pte_t zero_pte = pfn_pte(virt_to_pfn(kasan_early_shadow_page), PAGE_KERNEL);
+
+ if (!early_radix_enabled()) {
+ pr_warn("KASAN not enabled as it requires radix!");
+ return;
+ }
+
+ for_each_mem_range(i, &start, &end)
+ kasan_init_phys_region((void *)start, (void *)end);
+
+ for (i = 0; i < PTRS_PER_PTE; i++)
+ __set_pte_at(&init_mm, (unsigned long)kasan_early_shadow_page,
+ &kasan_early_shadow_pte[i], zero_pte, 0);
+
+ for (i = 0; i < PTRS_PER_PMD; i++)
+ pmd_populate_kernel(&init_mm, &kasan_early_shadow_pmd[i],
+ kasan_early_shadow_pte);
+
+ for (i = 0; i < PTRS_PER_PUD; i++)
+ pud_populate(&init_mm, &kasan_early_shadow_pud[i],
+ kasan_early_shadow_pmd);
+
+ /* map the early shadow over the iomap and vmemmap space */
+ kasan_populate_early_shadow(k_start, k_end);
+
+ /* mark early shadow region as RO and wipe it */
+ zero_pte = pfn_pte(virt_to_pfn(kasan_early_shadow_page), PAGE_KERNEL_RO);
+ for (i = 0; i < PTRS_PER_PTE; i++)
+ __set_pte_at(&init_mm, (unsigned long)kasan_early_shadow_page,
+ &kasan_early_shadow_pte[i], zero_pte, 0);
+
+ /*
+ * clear_page relies on some cache info that hasn't been set up yet.
+ * It ends up looping ~forever and blows up other data.
+ * Use memset instead.
+ */
+ memset(kasan_early_shadow_page, 0, PAGE_SIZE);
+
+ static_branch_inc(&powerpc_kasan_enabled_key);
+
+ /* Enable error messages */
+ init_task.kasan_depth = 0;
+ pr_info("KASAN init done\n");
+}
+
+void __init kasan_late_init(void) { }
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 46fb78e3bb36..52b77684acda 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -23,6 +23,8 @@
#include <asm/kasan.h>
#include <asm/svm.h>
#include <asm/mmzone.h>
+#include <asm/ftrace.h>
+#include <asm/code-patching.h>
#include <mm/mmu_decl.h>
@@ -309,7 +311,9 @@ void free_initmem(void)
{
ppc_md.progress = ppc_printk_progress;
mark_initmem_nx();
+ static_branch_enable(&init_mem_is_free);
free_initmem_default(POISON_FREE_INITMEM);
+ ftrace_free_init_tramp();
}
/*
diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
deleted file mode 100644
index c475cf810aa8..000000000000
--- a/arch/powerpc/mm/mmap.c
+++ /dev/null
@@ -1,256 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * flexible mmap layout support
- *
- * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
- * All Rights Reserved.
- *
- * Started by Ingo Molnar <mingo@elte.hu>
- */
-
-#include <linux/personality.h>
-#include <linux/mm.h>
-#include <linux/random.h>
-#include <linux/sched/signal.h>
-#include <linux/sched/mm.h>
-#include <linux/elf-randomize.h>
-#include <linux/security.h>
-#include <linux/mman.h>
-
-/*
- * Top of mmap area (just below the process stack).
- *
- * Leave at least a ~128 MB hole.
- */
-#define MIN_GAP (128*1024*1024)
-#define MAX_GAP (TASK_SIZE/6*5)
-
-static inline int mmap_is_legacy(struct rlimit *rlim_stack)
-{
- if (current->personality & ADDR_COMPAT_LAYOUT)
- return 1;
-
- if (rlim_stack->rlim_cur == RLIM_INFINITY)
- return 1;
-
- return sysctl_legacy_va_layout;
-}
-
-unsigned long arch_mmap_rnd(void)
-{
- unsigned long shift, rnd;
-
- shift = mmap_rnd_bits;
-#ifdef CONFIG_COMPAT
- if (is_32bit_task())
- shift = mmap_rnd_compat_bits;
-#endif
- rnd = get_random_long() % (1ul << shift);
-
- return rnd << PAGE_SHIFT;
-}
-
-static inline unsigned long stack_maxrandom_size(void)
-{
- if (!(current->flags & PF_RANDOMIZE))
- return 0;
-
- /* 8MB for 32bit, 1GB for 64bit */
- if (is_32bit_task())
- return (1<<23);
- else
- return (1<<30);
-}
-
-static inline unsigned long mmap_base(unsigned long rnd,
- struct rlimit *rlim_stack)
-{
- unsigned long gap = rlim_stack->rlim_cur;
- unsigned long pad = stack_maxrandom_size() + stack_guard_gap;
-
- /* Values close to RLIM_INFINITY can overflow. */
- if (gap + pad > gap)
- gap += pad;
-
- if (gap < MIN_GAP)
- gap = MIN_GAP;
- else if (gap > MAX_GAP)
- gap = MAX_GAP;
-
- return PAGE_ALIGN(DEFAULT_MAP_WINDOW - gap - rnd);
-}
-
-#ifdef HAVE_ARCH_UNMAPPED_AREA
-#ifdef CONFIG_PPC_RADIX_MMU
-/*
- * Same function as generic code used only for radix, because we don't need to overload
- * the generic one. But we will have to duplicate, because hash select
- * HAVE_ARCH_UNMAPPED_AREA
- */
-static unsigned long
-radix__arch_get_unmapped_area(struct file *filp, unsigned long addr,
- unsigned long len, unsigned long pgoff,
- unsigned long flags)
-{
- struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
- int fixed = (flags & MAP_FIXED);
- unsigned long high_limit;
- struct vm_unmapped_area_info info;
-
- high_limit = DEFAULT_MAP_WINDOW;
- if (addr >= high_limit || (fixed && (addr + len > high_limit)))
- high_limit = TASK_SIZE;
-
- if (len > high_limit)
- return -ENOMEM;
-
- if (fixed) {
- if (addr > high_limit - len)
- return -ENOMEM;
- return addr;
- }
-
- if (addr) {
- addr = PAGE_ALIGN(addr);
- vma = find_vma(mm, addr);
- if (high_limit - len >= addr && addr >= mmap_min_addr &&
- (!vma || addr + len <= vm_start_gap(vma)))
- return addr;
- }
-
- info.flags = 0;
- info.length = len;
- info.low_limit = mm->mmap_base;
- info.high_limit = high_limit;
- info.align_mask = 0;
-
- return vm_unmapped_area(&info);
-}
-
-static unsigned long
-radix__arch_get_unmapped_area_topdown(struct file *filp,
- const unsigned long addr0,
- const unsigned long len,
- const unsigned long pgoff,
- const unsigned long flags)
-{
- struct vm_area_struct *vma;
- struct mm_struct *mm = current->mm;
- unsigned long addr = addr0;
- int fixed = (flags & MAP_FIXED);
- unsigned long high_limit;
- struct vm_unmapped_area_info info;
-
- high_limit = DEFAULT_MAP_WINDOW;
- if (addr >= high_limit || (fixed && (addr + len > high_limit)))
- high_limit = TASK_SIZE;
-
- if (len > high_limit)
- return -ENOMEM;
-
- if (fixed) {
- if (addr > high_limit - len)
- return -ENOMEM;
- return addr;
- }
-
- if (addr) {
- addr = PAGE_ALIGN(addr);
- vma = find_vma(mm, addr);
- if (high_limit - len >= addr && addr >= mmap_min_addr &&
- (!vma || addr + len <= vm_start_gap(vma)))
- return addr;
- }
-
- info.flags = VM_UNMAPPED_AREA_TOPDOWN;
- info.length = len;
- info.low_limit = max(PAGE_SIZE, mmap_min_addr);
- info.high_limit = mm->mmap_base + (high_limit - DEFAULT_MAP_WINDOW);
- info.align_mask = 0;
-
- addr = vm_unmapped_area(&info);
- if (!(addr & ~PAGE_MASK))
- return addr;
- VM_BUG_ON(addr != -ENOMEM);
-
- /*
- * A failed mmap() very likely causes application failure,
- * so fall back to the bottom-up function here. This scenario
- * can happen with large stack limits and large mmap()
- * allocations.
- */
- return radix__arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
-}
-#endif
-
-unsigned long arch_get_unmapped_area(struct file *filp,
- unsigned long addr,
- unsigned long len,
- unsigned long pgoff,
- unsigned long flags)
-{
-#ifdef CONFIG_PPC_MM_SLICES
- return slice_get_unmapped_area(addr, len, flags,
- mm_ctx_user_psize(&current->mm->context), 0);
-#else
- BUG();
-#endif
-}
-
-unsigned long arch_get_unmapped_area_topdown(struct file *filp,
- const unsigned long addr0,
- const unsigned long len,
- const unsigned long pgoff,
- const unsigned long flags)
-{
-#ifdef CONFIG_PPC_MM_SLICES
- return slice_get_unmapped_area(addr0, len, flags,
- mm_ctx_user_psize(&current->mm->context), 1);
-#else
- BUG();
-#endif
-}
-#endif /* HAVE_ARCH_UNMAPPED_AREA */
-
-static void radix__arch_pick_mmap_layout(struct mm_struct *mm,
- unsigned long random_factor,
- struct rlimit *rlim_stack)
-{
-#ifdef CONFIG_PPC_RADIX_MMU
- if (mmap_is_legacy(rlim_stack)) {
- mm->mmap_base = TASK_UNMAPPED_BASE;
- mm->get_unmapped_area = radix__arch_get_unmapped_area;
- } else {
- mm->mmap_base = mmap_base(random_factor, rlim_stack);
- mm->get_unmapped_area = radix__arch_get_unmapped_area_topdown;
- }
-#endif
-}
-
-/*
- * This function, called very early during the creation of a new
- * process VM image, sets up which VM layout function to use:
- */
-void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
-{
- unsigned long random_factor = 0UL;
-
- if (current->flags & PF_RANDOMIZE)
- random_factor = arch_mmap_rnd();
-
- if (radix_enabled())
- return radix__arch_pick_mmap_layout(mm, random_factor,
- rlim_stack);
- /*
- * Fall back to the standard layout if the personality
- * bit is set, or if the expected stack growth is unlimited:
- */
- if (mmap_is_legacy(rlim_stack)) {
- mm->mmap_base = TASK_UNMAPPED_BASE;
- mm->get_unmapped_area = arch_get_unmapped_area;
- } else {
- mm->mmap_base = mmap_base(random_factor, rlim_stack);
- mm->get_unmapped_area = arch_get_unmapped_area_topdown;
- }
-}
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index 0dd4c18f8363..63c4b1a4d435 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -155,6 +155,10 @@ struct tlbcam {
u32 MAS3;
u32 MAS7;
};
+
+#define NUM_TLBCAMS 64
+
+extern struct tlbcam TLBCAM[NUM_TLBCAMS];
#endif
#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_FSL_BOOKE) || defined(CONFIG_PPC_8xx)
diff --git a/arch/powerpc/mm/nohash/40x.c b/arch/powerpc/mm/nohash/40x.c
index 95751c322f6c..b32e465a3d52 100644
--- a/arch/powerpc/mm/nohash/40x.c
+++ b/arch/powerpc/mm/nohash/40x.c
@@ -32,7 +32,6 @@
#include <linux/highmem.h>
#include <linux/memblock.h>
-#include <asm/prom.h>
#include <asm/io.h>
#include <asm/mmu_context.h>
#include <asm/mmu.h>
diff --git a/arch/powerpc/mm/nohash/book3e_hugetlbpage.c b/arch/powerpc/mm/nohash/book3e_hugetlbpage.c
index 8b88be91b622..307ca919d393 100644
--- a/arch/powerpc/mm/nohash/book3e_hugetlbpage.c
+++ b/arch/powerpc/mm/nohash/book3e_hugetlbpage.c
@@ -142,7 +142,7 @@ book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea, pte_t pte)
tsize = shift - 10;
/*
* We can't be interrupted while we're setting up the MAS
- * regusters or after we've confirmed that no tlb exists.
+ * registers or after we've confirmed that no tlb exists.
*/
local_irq_save(flags);
diff --git a/arch/powerpc/mm/nohash/fsl_book3e.c b/arch/powerpc/mm/nohash/fsl_book3e.c
index dfe715e0f70a..b8ae6c08c06f 100644
--- a/arch/powerpc/mm/nohash/fsl_book3e.c
+++ b/arch/powerpc/mm/nohash/fsl_book3e.c
@@ -36,8 +36,8 @@
#include <linux/delay.h>
#include <linux/highmem.h>
#include <linux/memblock.h>
+#include <linux/of_fdt.h>
-#include <asm/prom.h>
#include <asm/io.h>
#include <asm/mmu_context.h>
#include <asm/mmu.h>
@@ -51,10 +51,9 @@
unsigned int tlbcam_index;
-#define NUM_TLBCAMS (64)
struct tlbcam TLBCAM[NUM_TLBCAMS];
-struct tlbcamrange {
+static struct {
unsigned long start;
unsigned long limit;
phys_addr_t phys;
@@ -274,7 +273,7 @@ void __init adjust_total_lowmem(void)
i = switch_to_as1();
__max_low_memory = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM, false, true);
- restore_to_as0(i, 0, 0, 1);
+ restore_to_as0(i, 0, NULL, 1);
pr_info("Memory CAM mapping: ");
for (i = 0; i < tlbcam_index - 1; i++)
@@ -288,21 +287,18 @@ void __init adjust_total_lowmem(void)
#ifdef CONFIG_STRICT_KERNEL_RWX
void mmu_mark_rodata_ro(void)
{
- /* Everything is done in mmu_mark_initmem_nx() */
-}
-#endif
-
-void mmu_mark_initmem_nx(void)
-{
unsigned long remapped;
- if (!strict_kernel_rwx_enabled())
- return;
-
remapped = map_mem_in_cams(__max_low_memory, CONFIG_LOWMEM_CAM_NUM, false, false);
WARN_ON(__max_low_memory != remapped);
}
+#endif
+
+void mmu_mark_initmem_nx(void)
+{
+ /* Everything is done in mmu_mark_rodata_ro() */
+}
void setup_initial_memory_limit(phys_addr_t first_memblock_base,
phys_addr_t first_memblock_size)
diff --git a/arch/powerpc/mm/nohash/kaslr_booke.c b/arch/powerpc/mm/nohash/kaslr_booke.c
index 96c38f971603..1f3f9fedf1bc 100644
--- a/arch/powerpc/mm/nohash/kaslr_booke.c
+++ b/arch/powerpc/mm/nohash/kaslr_booke.c
@@ -14,8 +14,9 @@
#include <linux/memblock.h>
#include <linux/libfdt.h>
#include <linux/crash_core.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
#include <asm/cacheflush.h>
-#include <asm/prom.h>
#include <asm/kdump.h>
#include <mm/mmu_decl.h>
#include <generated/compile.h>
@@ -315,7 +316,7 @@ static unsigned long __init kaslr_choose_location(void *dt_ptr, phys_addr_t size
ram = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM, true, true);
linear_sz = min_t(unsigned long, ram, SZ_512M);
- /* If the linear size is smaller than 64M, do not randmize */
+ /* If the linear size is smaller than 64M, do not randomize */
if (linear_sz < SZ_64M)
return 0;
diff --git a/arch/powerpc/mm/nohash/mmu_context.c b/arch/powerpc/mm/nohash/mmu_context.c
index 85b048f04c56..ccd5819b1bd9 100644
--- a/arch/powerpc/mm/nohash/mmu_context.c
+++ b/arch/powerpc/mm/nohash/mmu_context.c
@@ -317,15 +317,6 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next,
*/
int init_new_context(struct task_struct *t, struct mm_struct *mm)
{
- /*
- * We have MMU_NO_CONTEXT set to be ~0. Hence check
- * explicitly against context.id == 0. This ensures that we properly
- * initialize context slice details for newly allocated mm's (which will
- * have id == 0) and don't alter context slice inherited via fork (which
- * will have id != 0).
- */
- if (mm->context.id == 0)
- slice_init_new_context_exec(mm);
mm->context.id = MMU_NO_CONTEXT;
mm->context.active = 0;
pte_frag_set(&mm->context, NULL);
diff --git a/arch/powerpc/mm/nohash/tlb.c b/arch/powerpc/mm/nohash/tlb.c
index fd2c77af5c55..5e7ccb48b79c 100644
--- a/arch/powerpc/mm/nohash/tlb.c
+++ b/arch/powerpc/mm/nohash/tlb.c
@@ -358,6 +358,7 @@ void __init early_init_mmu_47x(void)
/*
* Flush kernel TLB entries in the given range
*/
+#ifndef CONFIG_PPC_8xx
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
#ifdef CONFIG_SMP
@@ -370,6 +371,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
#endif
}
EXPORT_SYMBOL(flush_tlb_kernel_range);
+#endif
/*
* Currently, for range flushing, we just do a full mm flush. This should
@@ -773,9 +775,5 @@ void __init early_init_mmu(void)
#ifdef CONFIG_PPC_47x
early_init_mmu_47x();
#endif
-
-#ifdef CONFIG_PPC_MM_SLICES
- mm_ctx_set_slb_addr_limit(&init_mm.context, SLB_ADDR_LIMIT_DEFAULT);
-#endif
}
#endif /* CONFIG_PPC64 */
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 13022d734951..0801b2ce9b7d 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -26,7 +26,6 @@
#include <linux/slab.h>
#include <asm/cputhreads.h>
#include <asm/sparsemem.h>
-#include <asm/prom.h>
#include <asm/smp.h>
#include <asm/topology.h>
#include <asm/firmware.h>
@@ -1423,43 +1422,26 @@ out:
return rc;
}
-int find_and_online_cpu_nid(int cpu)
+void find_and_update_cpu_nid(int cpu)
{
__be32 associativity[VPHN_ASSOC_BUFSIZE] = {0};
int new_nid;
/* Use associativity from first thread for all siblings */
if (vphn_get_associativity(cpu, associativity))
- return cpu_to_node(cpu);
+ return;
+ /* Do not have previous associativity, so find it now. */
new_nid = associativity_to_nid(associativity);
- if (new_nid < 0 || !node_possible(new_nid))
- new_nid = first_online_node;
- if (!node_online(new_nid)) {
-#ifdef CONFIG_MEMORY_HOTPLUG
- /*
- * Need to ensure that NODE_DATA is initialized for a node from
- * available memory (see memblock_alloc_try_nid). If unable to
- * init the node, then default to nearest node that has memory
- * installed. Skip onlining a node if the subsystems are not
- * yet initialized.
- */
- if (!topology_inited || try_online_node(new_nid))
- new_nid = first_online_node;
-#else
- /*
- * Default to using the nearest node that has memory installed.
- * Otherwise, it would be necessary to patch the kernel MM code
- * to deal with more memoryless-node error conditions.
- */
+ if (new_nid < 0 || !node_possible(new_nid))
new_nid = first_online_node;
-#endif
- }
+ else
+ // Associate node <-> cpu, so cpu_up() calls
+ // try_online_node() on the right node.
+ set_cpu_numa_node(cpu, new_nid);
- pr_debug("%s:%d cpu %d nid %d\n", __FUNCTION__, __LINE__,
- cpu, new_nid);
- return new_nid;
+ pr_debug("%s:%d cpu %d nid %d\n", __func__, __LINE__, cpu, new_nid);
}
int cpu_to_coregroup_id(int cpu)
diff --git a/arch/powerpc/mm/pageattr.c b/arch/powerpc/mm/pageattr.c
index 85753e32a4de..6163e484bc6d 100644
--- a/arch/powerpc/mm/pageattr.c
+++ b/arch/powerpc/mm/pageattr.c
@@ -31,6 +31,7 @@ static int change_page_attr(pte_t *ptep, unsigned long addr, void *data)
{
long action = (long)data;
+ addr &= PAGE_MASK;
/* modify the PTE bits as desired */
switch (action) {
case SET_MEMORY_RO:
diff --git a/arch/powerpc/mm/pgtable-frag.c b/arch/powerpc/mm/pgtable-frag.c
index 97ae4935da79..20652daa1d7e 100644
--- a/arch/powerpc/mm/pgtable-frag.c
+++ b/arch/powerpc/mm/pgtable-frag.c
@@ -83,7 +83,7 @@ static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel)
spin_lock(&mm->page_table_lock);
/*
* If we find pgtable_page set, we return
- * the allocated page with single fragement
+ * the allocated page with single fragment
* count.
*/
if (likely(!pte_frag_get(&mm->context))) {
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index 6ec5a7dd7913..e6166b71d36d 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -351,7 +351,7 @@ EXPORT_SYMBOL_GPL(vmalloc_to_phys);
* (4) hugepd pointer, _PAGE_PTE = 0 and bits [2..6] indicate size of table
*
* So long as we atomically load page table pointers we are safe against teardown,
- * we can follow the address down to the the page and take a ref on it.
+ * we can follow the address down to the page and take a ref on it.
* This function need to be called with interrupts disabled. We use this variant
* when we have MSR[EE] = 0 but the paca->irq_soft_mask = IRQS_ENABLED
*/
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 175aabf101e8..5ac1fd30341b 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -32,7 +32,6 @@
#include <linux/hugetlb.h>
#include <asm/page.h>
-#include <asm/prom.h>
#include <asm/mmu_context.h>
#include <asm/mmu.h>
#include <asm/smp.h>
diff --git a/arch/powerpc/mm/ptdump/ptdump.c b/arch/powerpc/mm/ptdump/ptdump.c
index 8c846982766f..2313053fe679 100644
--- a/arch/powerpc/mm/ptdump/ptdump.c
+++ b/arch/powerpc/mm/ptdump/ptdump.c
@@ -21,6 +21,7 @@
#include <linux/seq_file.h>
#include <asm/fixmap.h>
#include <linux/const.h>
+#include <linux/kasan.h>
#include <asm/page.h>
#include <asm/hugetlb.h>
@@ -289,11 +290,11 @@ static void populate_markers(void)
#endif
address_markers[i++].start_address = FIXADDR_START;
address_markers[i++].start_address = FIXADDR_TOP;
+#endif /* CONFIG_PPC64 */
#ifdef CONFIG_KASAN
address_markers[i++].start_address = KASAN_SHADOW_START;
address_markers[i++].start_address = KASAN_SHADOW_END;
#endif
-#endif /* CONFIG_PPC64 */
}
static int ptdump_show(struct seq_file *m, void *v)
diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h
index 979701d360da..a4f7880f959d 100644
--- a/arch/powerpc/net/bpf_jit.h
+++ b/arch/powerpc/net/bpf_jit.h
@@ -13,7 +13,7 @@
#include <asm/types.h>
#include <asm/ppc-opcode.h>
-#ifdef PPC64_ELF_ABI_v1
+#ifdef CONFIG_PPC64_ELF_ABI_V1
#define FUNCTION_DESCR_SIZE 24
#else
#define FUNCTION_DESCR_SIZE 0
@@ -35,7 +35,7 @@
} while (0)
/* bl (unconditional 'branch' with link) */
-#define PPC_BL(dest) EMIT(PPC_INST_BL | (((dest) - (unsigned long)(image + ctx->idx)) & 0x03fffffc))
+#define PPC_BL(dest) EMIT(PPC_RAW_BL((dest) - (unsigned long)(image + ctx->idx)))
/* "cond" here covers BO:BI fields. */
#define PPC_BCC_SHORT(cond, dest) \
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index 427185256216..43e634126514 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -276,7 +276,7 @@ skip_codegen_passes:
*/
bpf_jit_dump(flen, proglen, pass, code_base);
-#ifdef PPC64_ELF_ABI_v1
+#ifdef CONFIG_PPC64_ELF_ABI_V1
/* Function descriptor nastiness: Address + TOC */
((u64 *)image)[0] = (u64)code_base;
((u64 *)image)[1] = local_paca->kernel_toc;
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
index 585f257da045..594c54931e20 100644
--- a/arch/powerpc/net/bpf_jit_comp64.c
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -126,7 +126,7 @@ void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
{
int i;
- if (__is_defined(PPC64_ELF_ABI_v2))
+ if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2))
EMIT(PPC_RAW_LD(_R2, _R13, offsetof(struct paca_struct, kernel_toc)));
/*
@@ -266,7 +266,7 @@ static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 o
int b2p_index = bpf_to_ppc(BPF_REG_3);
int bpf_tailcall_prologue_size = 8;
- if (__is_defined(PPC64_ELF_ABI_v2))
+ if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2))
bpf_tailcall_prologue_size += 4; /* skip past the toc load */
/*
diff --git a/arch/powerpc/perf/8xx-pmu.c b/arch/powerpc/perf/8xx-pmu.c
index 4738c4dbf567..308a2e40d7be 100644
--- a/arch/powerpc/perf/8xx-pmu.c
+++ b/arch/powerpc/perf/8xx-pmu.c
@@ -157,7 +157,7 @@ static void mpc8xx_pmu_del(struct perf_event *event, int flags)
mpc8xx_pmu_read(event);
- /* If it was the last user, stop counting to avoid useles overhead */
+ /* If it was the last user, stop counting to avoid useless overhead */
switch (event_type(event)) {
case PERF_8xx_ID_CPU_CYCLES:
break;
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index b5b42cf0a703..140502a7fdf8 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -1142,7 +1142,7 @@ static u64 check_and_compute_delta(u64 prev, u64 val)
/*
* POWER7 can roll back counter values, if the new value is smaller
* than the previous value it will cause the delta and the counter to
- * have bogus values unless we rolled a counter over. If a coutner is
+ * have bogus values unless we rolled a counter over. If a counter is
* rolled back, it will be smaller, but within 256, which is the maximum
* number of events to rollback at once. If we detect a rollback
* return 0. This can lead to a small lack of precision in the
@@ -2057,7 +2057,7 @@ static int power_pmu_event_init(struct perf_event *event)
/*
* PMU config registers have fields that are
* reserved and some specific values for bit fields are reserved.
- * For ex., MMCRA[61:62] is Randome Sampling Mode (SM)
+ * For ex., MMCRA[61:62] is Random Sampling Mode (SM)
* and value of 0b11 to this field is reserved.
* Check for invalid values in attr.config.
*/
@@ -2447,7 +2447,7 @@ static void __perf_event_interrupt(struct pt_regs *regs)
}
/*
- * During system wide profling or while specific CPU is monitored for an
+ * During system wide profiling or while specific CPU is monitored for an
* event, some corner cases could cause PMC to overflow in idle path. This
* will trigger a PMI after waking up from idle. Since counter values are _not_
* saved/restored in idle path, can lead to below "Can't find PMC" message.
diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c
index 12c1777187fc..cf5406b31e27 100644
--- a/arch/powerpc/perf/hv-24x7.c
+++ b/arch/powerpc/perf/hv-24x7.c
@@ -33,7 +33,7 @@ static bool aggregate_result_elements;
static cpumask_t hv_24x7_cpumask;
-static bool domain_is_valid(unsigned domain)
+static bool domain_is_valid(unsigned int domain)
{
switch (domain) {
#define DOMAIN(n, v, x, c) \
@@ -47,7 +47,7 @@ static bool domain_is_valid(unsigned domain)
}
}
-static bool is_physical_domain(unsigned domain)
+static bool is_physical_domain(unsigned int domain)
{
switch (domain) {
#define DOMAIN(n, v, x, c) \
@@ -128,7 +128,7 @@ static bool domain_needs_aggregation(unsigned int domain)
domain <= HV_PERF_DOMAIN_VCPU_REMOTE_NODE));
}
-static const char *domain_name(unsigned domain)
+static const char *domain_name(unsigned int domain)
{
if (!domain_is_valid(domain))
return NULL;
@@ -146,7 +146,7 @@ static const char *domain_name(unsigned domain)
return NULL;
}
-static bool catalog_entry_domain_is_valid(unsigned domain)
+static bool catalog_entry_domain_is_valid(unsigned int domain)
{
/* POWER8 doesn't support virtual domains. */
if (interface_version == 1)
@@ -258,7 +258,7 @@ static char *event_name(struct hv_24x7_event_data *ev, int *len)
static char *event_desc(struct hv_24x7_event_data *ev, int *len)
{
- unsigned nl = be16_to_cpu(ev->event_name_len);
+ unsigned int nl = be16_to_cpu(ev->event_name_len);
__be16 *desc_len = (__be16 *)(ev->remainder + nl - 2);
*len = be16_to_cpu(*desc_len) - 2;
@@ -267,9 +267,9 @@ static char *event_desc(struct hv_24x7_event_data *ev, int *len)
static char *event_long_desc(struct hv_24x7_event_data *ev, int *len)
{
- unsigned nl = be16_to_cpu(ev->event_name_len);
+ unsigned int nl = be16_to_cpu(ev->event_name_len);
__be16 *desc_len_ = (__be16 *)(ev->remainder + nl - 2);
- unsigned desc_len = be16_to_cpu(*desc_len_);
+ unsigned int desc_len = be16_to_cpu(*desc_len_);
__be16 *long_desc_len = (__be16 *)(ev->remainder + nl + desc_len - 2);
*len = be16_to_cpu(*long_desc_len) - 2;
@@ -296,8 +296,8 @@ static void *event_end(struct hv_24x7_event_data *ev, void *end)
{
void *start = ev;
__be16 *dl_, *ldl_;
- unsigned dl, ldl;
- unsigned nl = be16_to_cpu(ev->event_name_len);
+ unsigned int dl, ldl;
+ unsigned int nl = be16_to_cpu(ev->event_name_len);
if (nl < 2) {
pr_debug("%s: name length too short: %d", __func__, nl);
@@ -398,7 +398,7 @@ static long h_get_24x7_catalog_page(char page[], u64 version, u32 index)
* - Specifying (i.e overriding) values for other parameters
* is undefined.
*/
-static char *event_fmt(struct hv_24x7_event_data *event, unsigned domain)
+static char *event_fmt(struct hv_24x7_event_data *event, unsigned int domain)
{
const char *sindex;
const char *lpar;
@@ -529,9 +529,9 @@ out_s:
return NULL;
}
-static struct attribute *event_to_attr(unsigned ix,
+static struct attribute *event_to_attr(unsigned int ix,
struct hv_24x7_event_data *event,
- unsigned domain,
+ unsigned int domain,
int nonce)
{
int event_name_len;
@@ -599,8 +599,8 @@ event_to_long_desc_attr(struct hv_24x7_event_data *event, int nonce)
return device_str_attr_create(name, nl, nonce, desc, dl);
}
-static int event_data_to_attrs(unsigned ix, struct attribute **attrs,
- struct hv_24x7_event_data *event, int nonce)
+static int event_data_to_attrs(unsigned int ix, struct attribute **attrs,
+ struct hv_24x7_event_data *event, int nonce)
{
*attrs = event_to_attr(ix, event, event->domain, nonce);
if (!*attrs)
@@ -614,8 +614,8 @@ struct event_uniq {
struct rb_node node;
const char *name;
int nl;
- unsigned ct;
- unsigned domain;
+ unsigned int ct;
+ unsigned int domain;
};
static int memord(const void *d1, size_t s1, const void *d2, size_t s2)
@@ -628,8 +628,8 @@ static int memord(const void *d1, size_t s1, const void *d2, size_t s2)
return memcmp(d1, d2, s1);
}
-static int ev_uniq_ord(const void *v1, size_t s1, unsigned d1, const void *v2,
- size_t s2, unsigned d2)
+static int ev_uniq_ord(const void *v1, size_t s1, unsigned int d1,
+ const void *v2, size_t s2, unsigned int d2)
{
int r = memord(v1, s1, v2, s2);
@@ -643,7 +643,7 @@ static int ev_uniq_ord(const void *v1, size_t s1, unsigned d1, const void *v2,
}
static int event_uniq_add(struct rb_root *root, const char *name, int nl,
- unsigned domain)
+ unsigned int domain)
{
struct rb_node **new = &(root->rb_node), *parent = NULL;
struct event_uniq *data;
@@ -1398,7 +1398,7 @@ out:
static int h_24x7_event_init(struct perf_event *event)
{
struct hv_perf_caps caps;
- unsigned domain;
+ unsigned int domain;
unsigned long hret;
u64 ct;
diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c
index 526d4b767534..d7976ab40d38 100644
--- a/arch/powerpc/perf/imc-pmu.c
+++ b/arch/powerpc/perf/imc-pmu.c
@@ -6,6 +6,7 @@
* (C) 2017 Anju T Sudhakar, IBM Corporation.
* (C) 2017 Hemant K Shaw, IBM Corporation.
*/
+#include <linux/of.h>
#include <linux/perf_event.h>
#include <linux/slab.h>
#include <asm/opal.h>
@@ -521,7 +522,7 @@ static int nest_imc_event_init(struct perf_event *event)
/*
* Nest HW counter memory resides in a per-chip reserve-memory (HOMER).
- * Get the base memory addresss for this cpu.
+ * Get the base memory address for this cpu.
*/
chip_id = cpu_to_chip_id(event->cpu);
@@ -674,7 +675,7 @@ static int ppc_core_imc_cpu_offline(unsigned int cpu)
/*
* Check whether core_imc is registered. We could end up here
* if the cpuhotplug callback registration fails. i.e, callback
- * invokes the offline path for all sucessfully registered cpus.
+ * invokes the offline path for all successfully registered cpus.
* At this stage, core_imc pmu will not be registered and we
* should return here.
*
diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c
index a74d382ecbb7..42abbcfc73da 100644
--- a/arch/powerpc/perf/isa207-common.c
+++ b/arch/powerpc/perf/isa207-common.c
@@ -82,11 +82,11 @@ static unsigned long sdar_mod_val(u64 event)
static void mmcra_sdar_mode(u64 event, unsigned long *mmcra)
{
/*
- * MMCRA[SDAR_MODE] specifices how the SDAR should be updated in
- * continous sampling mode.
+ * MMCRA[SDAR_MODE] specifies how the SDAR should be updated in
+ * continuous sampling mode.
*
* Incase of Power8:
- * MMCRA[SDAR_MODE] will be programmed as "0b01" for continous sampling
+ * MMCRA[SDAR_MODE] will be programmed as "0b01" for continuous sampling
* mode and will be un-changed when setting MMCRA[63] (Marked events).
*
* Incase of Power9/power10:
@@ -108,7 +108,7 @@ static void mmcra_sdar_mode(u64 event, unsigned long *mmcra)
*mmcra |= MMCRA_SDAR_MODE_TLB;
}
-static u64 p10_thresh_cmp_val(u64 value)
+static int p10_thresh_cmp_val(u64 value)
{
int exp = 0;
u64 result = value;
@@ -139,7 +139,7 @@ static u64 p10_thresh_cmp_val(u64 value)
* exponent is also zero.
*/
if (!(value & 0xC0) && exp)
- result = 0;
+ result = -1;
else
result = (exp << 8) | value;
}
@@ -187,7 +187,7 @@ static bool is_thresh_cmp_valid(u64 event)
unsigned int cmp, exp;
if (cpu_has_feature(CPU_FTR_ARCH_31))
- return p10_thresh_cmp_val(event) != 0;
+ return p10_thresh_cmp_val(event) >= 0;
/*
* Check the mantissa upper two bits are not zero, unless the
@@ -502,12 +502,14 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp,
value |= CNST_THRESH_CTL_SEL_VAL(event >> EVENT_THRESH_SHIFT);
mask |= p10_CNST_THRESH_CMP_MASK;
value |= p10_CNST_THRESH_CMP_VAL(p10_thresh_cmp_val(event_config1));
- }
+ } else if (event_is_threshold(event))
+ return -1;
} else if (cpu_has_feature(CPU_FTR_ARCH_300)) {
if (event_is_threshold(event) && is_thresh_cmp_valid(event)) {
mask |= CNST_THRESH_MASK;
value |= CNST_THRESH_VAL(event >> EVENT_THRESH_SHIFT);
- }
+ } else if (event_is_threshold(event))
+ return -1;
} else {
/*
* Special case for PM_MRK_FAB_RSP_MATCH and PM_MRK_FAB_RSP_MATCH_CYC,
diff --git a/arch/powerpc/perf/power9-pmu.c b/arch/powerpc/perf/power9-pmu.c
index c393e837648e..3ad40ffb9256 100644
--- a/arch/powerpc/perf/power9-pmu.c
+++ b/arch/powerpc/perf/power9-pmu.c
@@ -98,7 +98,7 @@ extern u64 PERF_REG_EXTENDED_MASK;
/* PowerISA v2.07 format attribute structure*/
extern const struct attribute_group isa207_pmu_format_group;
-int p9_dd21_bl_ev[] = {
+static int p9_dd21_bl_ev[] = {
PM_MRK_ST_DONE_L2,
PM_RADIX_PWC_L1_HIT,
PM_FLOP_CMPL,
@@ -112,7 +112,7 @@ int p9_dd21_bl_ev[] = {
PM_DISP_HELD_SYNC_HOLD,
};
-int p9_dd22_bl_ev[] = {
+static int p9_dd22_bl_ev[] = {
PM_DTLB_MISS_16G,
PM_DERAT_MISS_2M,
PM_DTLB_MISS_2M,
diff --git a/arch/powerpc/platforms/40x/ppc40x_simple.c b/arch/powerpc/platforms/40x/ppc40x_simple.c
index e70b42729322..dce696c32679 100644
--- a/arch/powerpc/platforms/40x/ppc40x_simple.c
+++ b/arch/powerpc/platforms/40x/ppc40x_simple.c
@@ -13,7 +13,6 @@
#include <asm/machdep.h>
#include <asm/pci-bridge.h>
#include <asm/ppc4xx.h>
-#include <asm/prom.h>
#include <asm/time.h>
#include <asm/udbg.h>
#include <asm/uic.h>
diff --git a/arch/powerpc/platforms/44x/canyonlands.c b/arch/powerpc/platforms/44x/canyonlands.c
index 807968a755ef..5b23aef8bdef 100644
--- a/arch/powerpc/platforms/44x/canyonlands.c
+++ b/arch/powerpc/platforms/44x/canyonlands.c
@@ -12,6 +12,7 @@
#include <asm/ppc4xx.h>
#include <asm/udbg.h>
#include <asm/uic.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/delay.h>
#include "44x.h"
diff --git a/arch/powerpc/platforms/44x/fsp2.c b/arch/powerpc/platforms/44x/fsp2.c
index af13a59d2f60..e2e4f6d8150d 100644
--- a/arch/powerpc/platforms/44x/fsp2.c
+++ b/arch/powerpc/platforms/44x/fsp2.c
@@ -14,11 +14,11 @@
*/
#include <linux/init.h>
+#include <linux/of_fdt.h>
#include <linux/of_platform.h>
#include <linux/rtc.h>
#include <asm/machdep.h>
-#include <asm/prom.h>
#include <asm/udbg.h>
#include <asm/time.h>
#include <asm/uic.h>
diff --git a/arch/powerpc/platforms/44x/ppc44x_simple.c b/arch/powerpc/platforms/44x/ppc44x_simple.c
index 3dbd8ddd734a..2a0dcdf04b21 100644
--- a/arch/powerpc/platforms/44x/ppc44x_simple.c
+++ b/arch/powerpc/platforms/44x/ppc44x_simple.c
@@ -13,7 +13,6 @@
#include <asm/machdep.h>
#include <asm/pci-bridge.h>
#include <asm/ppc4xx.h>
-#include <asm/prom.h>
#include <asm/time.h>
#include <asm/udbg.h>
#include <asm/uic.h>
diff --git a/arch/powerpc/platforms/44x/ppc476.c b/arch/powerpc/platforms/44x/ppc476.c
index fb7db5cedd4e..20cc8f80b086 100644
--- a/arch/powerpc/platforms/44x/ppc476.c
+++ b/arch/powerpc/platforms/44x/ppc476.c
@@ -19,11 +19,11 @@
#include <linux/init.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/rtc.h>
#include <asm/machdep.h>
-#include <asm/prom.h>
#include <asm/udbg.h>
#include <asm/time.h>
#include <asm/uic.h>
diff --git a/arch/powerpc/platforms/44x/sam440ep.c b/arch/powerpc/platforms/44x/sam440ep.c
index 68ba4b009da0..ed854b53877e 100644
--- a/arch/powerpc/platforms/44x/sam440ep.c
+++ b/arch/powerpc/platforms/44x/sam440ep.c
@@ -17,7 +17,6 @@
#include <linux/of_platform.h>
#include <asm/machdep.h>
-#include <asm/prom.h>
#include <asm/udbg.h>
#include <asm/time.h>
#include <asm/uic.h>
diff --git a/arch/powerpc/platforms/44x/warp.c b/arch/powerpc/platforms/44x/warp.c
index 665f18e37efb..f03432ef010b 100644
--- a/arch/powerpc/platforms/44x/warp.c
+++ b/arch/powerpc/platforms/44x/warp.c
@@ -11,12 +11,13 @@
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_gpio.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <asm/machdep.h>
-#include <asm/prom.h>
#include <asm/udbg.h>
#include <asm/time.h>
#include <asm/uic.h>
diff --git a/arch/powerpc/platforms/4xx/cpm.c b/arch/powerpc/platforms/4xx/cpm.c
index 2571841625a2..1d3bc35ee1a7 100644
--- a/arch/powerpc/platforms/4xx/cpm.c
+++ b/arch/powerpc/platforms/4xx/cpm.c
@@ -327,6 +327,6 @@ late_initcall(cpm_init);
static int __init cpm_powersave_off(char *arg)
{
cpm.powersave_off = 1;
- return 0;
+ return 1;
}
__setup("powersave=off", cpm_powersave_off);
diff --git a/arch/powerpc/platforms/4xx/hsta_msi.c b/arch/powerpc/platforms/4xx/hsta_msi.c
index fee430eadcc6..d4f7fff1fc87 100644
--- a/arch/powerpc/platforms/4xx/hsta_msi.c
+++ b/arch/powerpc/platforms/4xx/hsta_msi.c
@@ -10,6 +10,7 @@
#include <linux/interrupt.h>
#include <linux/msi.h>
#include <linux/of.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/pci.h>
#include <linux/semaphore.h>
diff --git a/arch/powerpc/platforms/4xx/pci.c b/arch/powerpc/platforms/4xx/pci.c
index 24f41e178cbc..ca5dd7a5842a 100644
--- a/arch/powerpc/platforms/4xx/pci.c
+++ b/arch/powerpc/platforms/4xx/pci.c
@@ -22,6 +22,7 @@
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/delay.h>
#include <linux/slab.h>
diff --git a/arch/powerpc/platforms/4xx/uic.c b/arch/powerpc/platforms/4xx/uic.c
index 89e2587b1a59..d667ad039bd3 100644
--- a/arch/powerpc/platforms/4xx/uic.c
+++ b/arch/powerpc/platforms/4xx/uic.c
@@ -19,9 +19,10 @@
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
#include <asm/irq.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/dcr.h>
#define NR_UIC_INTS 32
diff --git a/arch/powerpc/platforms/512x/clock-commonclk.c b/arch/powerpc/platforms/512x/clock-commonclk.c
index 0b03d812baae..0652c7e69225 100644
--- a/arch/powerpc/platforms/512x/clock-commonclk.c
+++ b/arch/powerpc/platforms/512x/clock-commonclk.c
@@ -663,7 +663,7 @@ static void __init mpc512x_clk_setup_mclk(struct mclk_setup_data *entry, size_t
* the PSC/MSCAN/SPDIF (serial drivers et al) need the MCLK
* for their bitrate
* - in the absence of "aliases" for clocks we need to create
- * individial 'struct clk' items for whatever might get
+ * individual 'struct clk' items for whatever might get
* referenced or looked up, even if several of those items are
* identical from the logical POV (their rate value)
* - for easier future maintenance and for better reflection of
diff --git a/arch/powerpc/platforms/512x/mpc5121_ads.c b/arch/powerpc/platforms/512x/mpc5121_ads.c
index 9d030c2e0004..fc3fb999cd74 100644
--- a/arch/powerpc/platforms/512x/mpc5121_ads.c
+++ b/arch/powerpc/platforms/512x/mpc5121_ads.c
@@ -14,7 +14,6 @@
#include <asm/machdep.h>
#include <asm/ipic.h>
-#include <asm/prom.h>
#include <asm/time.h>
#include <sysdev/fsl_pci.h>
diff --git a/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c b/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c
index ea46870e5d6e..6f08d07aee3b 100644
--- a/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c
+++ b/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c
@@ -14,7 +14,8 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/io.h>
-#include <asm/prom.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
static struct device_node *cpld_pic_node;
static struct irq_domain *cpld_pic_host;
diff --git a/arch/powerpc/platforms/512x/mpc512x_generic.c b/arch/powerpc/platforms/512x/mpc512x_generic.c
index 303bc308b2e6..364564c995bd 100644
--- a/arch/powerpc/platforms/512x/mpc512x_generic.c
+++ b/arch/powerpc/platforms/512x/mpc512x_generic.c
@@ -13,7 +13,6 @@
#include <asm/machdep.h>
#include <asm/ipic.h>
-#include <asm/prom.h>
#include <asm/time.h>
#include "mpc512x.h"
diff --git a/arch/powerpc/platforms/512x/mpc512x_shared.c b/arch/powerpc/platforms/512x/mpc512x_shared.c
index e3411663edad..5ac0ead2540f 100644
--- a/arch/powerpc/platforms/512x/mpc512x_shared.c
+++ b/arch/powerpc/platforms/512x/mpc512x_shared.c
@@ -12,6 +12,7 @@
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/irq.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/fsl-diu-fb.h>
#include <linux/memblock.h>
@@ -20,7 +21,6 @@
#include <asm/cacheflush.h>
#include <asm/machdep.h>
#include <asm/ipic.h>
-#include <asm/prom.h>
#include <asm/time.h>
#include <asm/mpc5121.h>
#include <asm/mpc52xx_psc.h>
@@ -289,7 +289,7 @@ static void __init mpc512x_setup_diu(void)
/*
* We do not allocate and configure new area for bitmap buffer
- * because it would requere copying bitmap data (splash image)
+ * because it would require copying bitmap data (splash image)
* and so negatively affect boot time. Instead we reserve the
* already configured frame buffer area so that it won't be
* destroyed. The starting address of the area to reserve and
diff --git a/arch/powerpc/platforms/52xx/efika.c b/arch/powerpc/platforms/52xx/efika.c
index 3b7d70d71692..e0647720ed5e 100644
--- a/arch/powerpc/platforms/52xx/efika.c
+++ b/arch/powerpc/platforms/52xx/efika.c
@@ -14,7 +14,6 @@
#include <linux/pci.h>
#include <linux/of.h>
#include <asm/dma.h>
-#include <asm/prom.h>
#include <asm/time.h>
#include <asm/machdep.h>
#include <asm/rtas.h>
diff --git a/arch/powerpc/platforms/52xx/lite5200.c b/arch/powerpc/platforms/52xx/lite5200.c
index 04cc97397095..7ea9b6ce0591 100644
--- a/arch/powerpc/platforms/52xx/lite5200.c
+++ b/arch/powerpc/platforms/52xx/lite5200.c
@@ -21,7 +21,6 @@
#include <asm/time.h>
#include <asm/io.h>
#include <asm/machdep.h>
-#include <asm/prom.h>
#include <asm/mpc52xx.h>
/* ************************************************************************
diff --git a/arch/powerpc/platforms/52xx/lite5200_pm.c b/arch/powerpc/platforms/52xx/lite5200_pm.c
index e7da22d1df87..129313b1d021 100644
--- a/arch/powerpc/platforms/52xx/lite5200_pm.c
+++ b/arch/powerpc/platforms/52xx/lite5200_pm.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/init.h>
#include <linux/suspend.h>
+#include <linux/of_address.h>
+
#include <asm/io.h>
#include <asm/time.h>
#include <asm/mpc52xx.h>
diff --git a/arch/powerpc/platforms/52xx/media5200.c b/arch/powerpc/platforms/52xx/media5200.c
index 110c444f4bc7..ee367ff3ec8a 100644
--- a/arch/powerpc/platforms/52xx/media5200.c
+++ b/arch/powerpc/platforms/52xx/media5200.c
@@ -20,8 +20,9 @@
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <asm/time.h>
-#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/mpc52xx.h>
diff --git a/arch/powerpc/platforms/52xx/mpc5200_simple.c b/arch/powerpc/platforms/52xx/mpc5200_simple.c
index b9f5675b0a1d..cc349d579061 100644
--- a/arch/powerpc/platforms/52xx/mpc5200_simple.c
+++ b/arch/powerpc/platforms/52xx/mpc5200_simple.c
@@ -22,8 +22,8 @@
*/
#undef DEBUG
+#include <linux/of.h>
#include <asm/time.h>
-#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/mpc52xx.h>
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_common.c b/arch/powerpc/platforms/52xx/mpc52xx_common.c
index 565e3a83dc9e..4348506d667d 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_common.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_common.c
@@ -15,11 +15,11 @@
#include <linux/gpio.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/of_gpio.h>
#include <linux/export.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/mpc52xx.h>
/* MPC5200 device tree match tables */
@@ -308,7 +308,7 @@ int mpc5200_psc_ac97_gpio_reset(int psc_number)
spin_lock_irqsave(&gpio_lock, flags);
- /* Reconfiure pin-muxing to gpio */
+ /* Reconfigure pin-muxing to gpio */
mux = in_be32(&simple_gpio->port_config);
out_be32(&simple_gpio->port_config, mux & (~gpio));
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
index f862b48b4824..968f5b727273 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
@@ -5,7 +5,7 @@
* Copyright (c) 2009 Secret Lab Technologies Ltd.
* Copyright (c) 2008 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
*
- * This file is a driver for the the General Purpose Timer (gpt) devices
+ * This file is a driver for the General Purpose Timer (gpt) devices
* found on the MPC5200 SoC. Each timer has an IO pin which can be used
* for GPIO or can be used to raise interrupts. The timer function can
* be used independently from the IO pin, or it can be used to control
@@ -55,6 +55,8 @@
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/of_gpio.h>
#include <linux/kernel.h>
@@ -398,7 +400,7 @@ static int mpc52xx_gpt_do_start(struct mpc52xx_gpt_priv *gpt, u64 period,
set |= MPC52xx_GPT_MODE_CONTINUOUS;
/* Determine the number of clocks in the requested period. 64 bit
- * arithmatic is done here to preserve the precision until the value
+ * arithmetic is done here to preserve the precision until the value
* is scaled back down into the u32 range. Period is in 'ns', bus
* frequency is in Hz. */
clocks = period * (u64)gpt->ipb_freq;
@@ -502,7 +504,7 @@ u64 mpc52xx_gpt_timer_period(struct mpc52xx_gpt_priv *gpt)
if (prescale == 0)
prescale = 0x10000;
period = period * prescale * 1000000000ULL;
- do_div(period, (u64)gpt->ipb_freq);
+ do_div(period, gpt->ipb_freq);
return period;
}
EXPORT_SYMBOL(mpc52xx_gpt_timer_period);
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
index b91ebebd9ff2..48038aaedbd3 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
@@ -11,11 +11,12 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/spinlock.h>
#include <linux/module.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/mpc52xx.h>
#include <asm/time.h>
@@ -104,7 +105,7 @@ static void mpc52xx_lpbfifo_kick(struct mpc52xx_lpbfifo_request *req)
*
* Configure the watermarks so DMA will always complete correctly.
* It may be worth experimenting with the ALARM value to see if
- * there is a performance impacit. However, if it is wrong there
+ * there is a performance impact. However, if it is wrong there
* is a risk of DMA not transferring the last chunk of data
*/
if (write) {
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pci.c b/arch/powerpc/platforms/52xx/mpc52xx_pci.c
index af0f79995214..859e2818c43d 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_pci.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_pci.c
@@ -13,6 +13,7 @@
#undef DEBUG
#include <linux/pci.h>
+#include <linux/of_address.h>
#include <asm/mpc52xx.h>
#include <asm/delay.h>
#include <asm/machdep.h>
@@ -242,7 +243,7 @@ mpc52xx_pci_setup(struct pci_controller *hose,
u32 tmp;
int iwcr0 = 0, iwcr1 = 0, iwcr2 = 0;
- pr_debug("mpc52xx_pci_setup(hose=%p, pci_regs=%p)\n", hose, pci_regs);
+ pr_debug("%s(hose=%p, pci_regs=%p)\n", __func__, hose, pci_regs);
/* pci_process_bridge_OF_ranges() found all our addresses for us;
* now store them in the right places */
@@ -257,11 +258,7 @@ mpc52xx_pci_setup(struct pci_controller *hose,
/* Memory windows */
res = &hose->mem_resources[0];
if (res->flags) {
- pr_debug("mem_resource[0] = "
- "{.start=%llx, .end=%llx, .flags=%llx}\n",
- (unsigned long long)res->start,
- (unsigned long long)res->end,
- (unsigned long long)res->flags);
+ pr_debug("mem_resource[0] = %pr\n", res);
out_be32(&pci_regs->iw0btar,
MPC52xx_PCI_IWBTAR_TRANSLATION(res->start, res->start,
resource_size(res)));
@@ -274,8 +271,7 @@ mpc52xx_pci_setup(struct pci_controller *hose,
res = &hose->mem_resources[1];
if (res->flags) {
- pr_debug("mem_resource[1] = {.start=%x, .end=%x, .flags=%lx}\n",
- res->start, res->end, res->flags);
+ pr_debug("mem_resource[1] = %pr\n", res);
out_be32(&pci_regs->iw1btar,
MPC52xx_PCI_IWBTAR_TRANSLATION(res->start, res->start,
resource_size(res)));
@@ -292,11 +288,8 @@ mpc52xx_pci_setup(struct pci_controller *hose,
printk(KERN_ERR "%s: Didn't find IO resources\n", __FILE__);
return;
}
- pr_debug(".io_resource={.start=%llx,.end=%llx,.flags=%llx} "
- ".io_base_phys=0x%p\n",
- (unsigned long long)res->start,
- (unsigned long long)res->end,
- (unsigned long long)res->flags, (void*)hose->io_base_phys);
+ pr_debug(".io_resource = %pr .io_base_phys=0x%pa\n",
+ res, &hose->io_base_phys);
out_be32(&pci_regs->iw2btar,
MPC52xx_PCI_IWBTAR_TRANSLATION(hose->io_base_phys,
res->start,
@@ -336,8 +329,7 @@ mpc52xx_pci_fixup_resources(struct pci_dev *dev)
{
int i;
- pr_debug("mpc52xx_pci_fixup_resources() %.4x:%.4x\n",
- dev->vendor, dev->device);
+ pr_debug("%s() %.4x:%.4x\n", __func__, dev->vendor, dev->device);
/* We don't rely on boot loader for PCI and resets all
devices */
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pic.c b/arch/powerpc/platforms/52xx/mpc52xx_pic.c
index 76a8102bdb98..1e0a5e9644dc 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_pic.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_pic.c
@@ -101,8 +101,9 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/mpc52xx.h>
/* HW IRQ mapping */
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pm.c b/arch/powerpc/platforms/52xx/mpc52xx_pm.c
index b1d208ded981..549b3629e39a 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_pm.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_pm.c
@@ -2,6 +2,8 @@
#include <linux/init.h>
#include <linux/suspend.h>
#include <linux/io.h>
+#include <linux/of_address.h>
+
#include <asm/time.h>
#include <asm/cacheflush.h>
#include <asm/mpc52xx.h>
diff --git a/arch/powerpc/platforms/82xx/ep8248e.c b/arch/powerpc/platforms/82xx/ep8248e.c
index 369ebb1b7af1..28e627f8a320 100644
--- a/arch/powerpc/platforms/82xx/ep8248e.c
+++ b/arch/powerpc/platforms/82xx/ep8248e.c
@@ -20,7 +20,6 @@
#include <asm/machdep.h>
#include <asm/time.h>
#include <asm/mpc8260.h>
-#include <asm/prom.h>
#include <sysdev/fsl_soc.h>
#include <sysdev/cpm2_pic.h>
diff --git a/arch/powerpc/platforms/82xx/km82xx.c b/arch/powerpc/platforms/82xx/km82xx.c
index 745ed61df5d8..1c8bbf4251d9 100644
--- a/arch/powerpc/platforms/82xx/km82xx.c
+++ b/arch/powerpc/platforms/82xx/km82xx.c
@@ -20,7 +20,6 @@
#include <asm/machdep.h>
#include <linux/time.h>
#include <asm/mpc8260.h>
-#include <asm/prom.h>
#include <sysdev/fsl_soc.h>
#include <sysdev/cpm2_pic.h>
diff --git a/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c b/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c
index 285bfe19b798..cf3210042a2e 100644
--- a/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c
+++ b/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c
@@ -14,9 +14,9 @@
#include <linux/irq.h>
#include <linux/types.h>
#include <linux/slab.h>
+#include <linux/of_irq.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/cpm2.h>
#include "pq2.h"
diff --git a/arch/powerpc/platforms/83xx/km83xx.c b/arch/powerpc/platforms/83xx/km83xx.c
index d9eed0decb28..907acdecc94a 100644
--- a/arch/powerpc/platforms/83xx/km83xx.c
+++ b/arch/powerpc/platforms/83xx/km83xx.c
@@ -29,7 +29,6 @@
#include <asm/machdep.h>
#include <asm/ipic.h>
#include <asm/irq.h>
-#include <asm/prom.h>
#include <asm/udbg.h>
#include <sysdev/fsl_soc.h>
#include <sysdev/fsl_pci.h>
diff --git a/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c b/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c
index a38372f9ac12..abb62fa630ef 100644
--- a/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c
+++ b/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c
@@ -8,17 +8,16 @@
*/
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/mutex.h>
#include <linux/i2c.h>
#include <linux/gpio/driver.h>
-#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/slab.h>
#include <linux/kthread.h>
+#include <linux/property.h>
#include <linux/reboot.h>
-#include <asm/prom.h>
#include <asm/machdep.h>
/*
@@ -116,21 +115,17 @@ static int mcu_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
static int mcu_gpiochip_add(struct mcu *mcu)
{
- struct device_node *np;
+ struct device *dev = &mcu->client->dev;
struct gpio_chip *gc = &mcu->gc;
- np = of_find_compatible_node(NULL, NULL, "fsl,mcu-mpc8349emitx");
- if (!np)
- return -ENODEV;
-
gc->owner = THIS_MODULE;
- gc->label = kasprintf(GFP_KERNEL, "%pOF", np);
+ gc->label = kasprintf(GFP_KERNEL, "%pfw", dev_fwnode(dev));
gc->can_sleep = 1;
gc->ngpio = MCU_NUM_GPIO;
gc->base = -1;
gc->set = mcu_gpio_set;
gc->direction_output = mcu_gpio_dir_out;
- gc->of_node = np;
+ gc->parent = dev;
return gpiochip_add_data(gc, mcu);
}
diff --git a/arch/powerpc/platforms/83xx/mpc832x_mds.c b/arch/powerpc/platforms/83xx/mpc832x_mds.c
index 850d566ef900..435344405d2c 100644
--- a/arch/powerpc/platforms/83xx/mpc832x_mds.c
+++ b/arch/powerpc/platforms/83xx/mpc832x_mds.c
@@ -28,7 +28,6 @@
#include <asm/machdep.h>
#include <asm/ipic.h>
#include <asm/irq.h>
-#include <asm/prom.h>
#include <asm/udbg.h>
#include <sysdev/fsl_soc.h>
#include <sysdev/fsl_pci.h>
diff --git a/arch/powerpc/platforms/83xx/mpc832x_rdb.c b/arch/powerpc/platforms/83xx/mpc832x_rdb.c
index b6133a237a70..bb8caa5071f8 100644
--- a/arch/powerpc/platforms/83xx/mpc832x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc832x_rdb.c
@@ -15,6 +15,7 @@
#include <linux/spi/spi.h>
#include <linux/spi/mmc_spi.h>
#include <linux/mmc/host.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/fsl_devices.h>
diff --git a/arch/powerpc/platforms/83xx/mpc834x_itx.c b/arch/powerpc/platforms/83xx/mpc834x_itx.c
index 9630f3aa4d9c..6a110f275304 100644
--- a/arch/powerpc/platforms/83xx/mpc834x_itx.c
+++ b/arch/powerpc/platforms/83xx/mpc834x_itx.c
@@ -27,7 +27,6 @@
#include <asm/machdep.h>
#include <asm/ipic.h>
#include <asm/irq.h>
-#include <asm/prom.h>
#include <asm/udbg.h>
#include <sysdev/fsl_soc.h>
#include <sysdev/fsl_pci.h>
diff --git a/arch/powerpc/platforms/83xx/mpc834x_mds.c b/arch/powerpc/platforms/83xx/mpc834x_mds.c
index 0713deffb40c..7dde5a75332b 100644
--- a/arch/powerpc/platforms/83xx/mpc834x_mds.c
+++ b/arch/powerpc/platforms/83xx/mpc834x_mds.c
@@ -19,6 +19,7 @@
#include <linux/delay.h>
#include <linux/seq_file.h>
#include <linux/root_dev.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/atomic.h>
@@ -27,7 +28,6 @@
#include <asm/machdep.h>
#include <asm/ipic.h>
#include <asm/irq.h>
-#include <asm/prom.h>
#include <asm/udbg.h>
#include <sysdev/fsl_soc.h>
#include <sysdev/fsl_pci.h>
diff --git a/arch/powerpc/platforms/83xx/mpc836x_mds.c b/arch/powerpc/platforms/83xx/mpc836x_mds.c
index da4cf52cb55b..b1e6665be5d3 100644
--- a/arch/powerpc/platforms/83xx/mpc836x_mds.c
+++ b/arch/powerpc/platforms/83xx/mpc836x_mds.c
@@ -35,7 +35,6 @@
#include <asm/machdep.h>
#include <asm/ipic.h>
#include <asm/irq.h>
-#include <asm/prom.h>
#include <asm/udbg.h>
#include <sysdev/fsl_soc.h>
#include <sysdev/fsl_pci.h>
diff --git a/arch/powerpc/platforms/83xx/mpc836x_rdk.c b/arch/powerpc/platforms/83xx/mpc836x_rdk.c
index 3427ad0d9d38..731bc5ce726d 100644
--- a/arch/powerpc/platforms/83xx/mpc836x_rdk.c
+++ b/arch/powerpc/platforms/83xx/mpc836x_rdk.c
@@ -12,7 +12,6 @@
#include <linux/pci.h>
#include <linux/of_platform.h>
#include <linux/io.h>
-#include <asm/prom.h>
#include <asm/time.h>
#include <asm/ipic.h>
#include <asm/udbg.h>
diff --git a/arch/powerpc/platforms/83xx/mpc837x_mds.c b/arch/powerpc/platforms/83xx/mpc837x_mds.c
index fc88ab97f6e3..fa3538803af7 100644
--- a/arch/powerpc/platforms/83xx/mpc837x_mds.c
+++ b/arch/powerpc/platforms/83xx/mpc837x_mds.c
@@ -9,12 +9,12 @@
#include <linux/pci.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <asm/time.h>
#include <asm/ipic.h>
#include <asm/udbg.h>
-#include <asm/prom.h>
#include <sysdev/fsl_pci.h>
#include "mpc83xx.h"
diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c
index bb147d34d4a6..6d47a5b81485 100644
--- a/arch/powerpc/platforms/83xx/suspend.c
+++ b/arch/powerpc/platforms/83xx/suspend.c
@@ -322,18 +322,15 @@ static const struct platform_suspend_ops mpc83xx_suspend_ops = {
static const struct of_device_id pmc_match[];
static int pmc_probe(struct platform_device *ofdev)
{
- const struct of_device_id *match;
struct device_node *np = ofdev->dev.of_node;
struct resource res;
const struct pmc_type *type;
int ret = 0;
- match = of_match_device(pmc_match, &ofdev->dev);
- if (!match)
+ type = of_device_get_match_data(&ofdev->dev);
+ if (!type)
return -EINVAL;
- type = match->data;
-
if (!of_device_is_available(np))
return -ENODEV;
diff --git a/arch/powerpc/platforms/83xx/usb.c b/arch/powerpc/platforms/83xx/usb.c
index b0bda20aaccf..e2a13a052f96 100644
--- a/arch/powerpc/platforms/83xx/usb.c
+++ b/arch/powerpc/platforms/83xx/usb.c
@@ -11,9 +11,9 @@
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <sysdev/fsl_soc.h>
#include "mpc83xx.h"
diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig
index 4142ebf01382..2be17ffe8714 100644
--- a/arch/powerpc/platforms/85xx/Kconfig
+++ b/arch/powerpc/platforms/85xx/Kconfig
@@ -16,15 +16,6 @@ if FSL_SOC_BOOKE
if PPC32
-config FSL_85XX_CACHE_SRAM
- bool
- select PPC_LIB_RHEAP
- help
- When selected, this option enables cache-sram support
- for memory allocation on P1/P2 QorIQ platforms.
- cache-sram-size and cache-sram-offset kernel boot
- parameters should be passed when this option is enabled.
-
config BSC9131_RDB
bool "Freescale BSC9131RDB"
select DEFAULT_UIMAGE
diff --git a/arch/powerpc/platforms/85xx/corenet_generic.c b/arch/powerpc/platforms/85xx/corenet_generic.c
index 17ae75d62518..28d6b36f1ccd 100644
--- a/arch/powerpc/platforms/85xx/corenet_generic.c
+++ b/arch/powerpc/platforms/85xx/corenet_generic.c
@@ -19,7 +19,6 @@
#include <asm/pci-bridge.h>
#include <asm/ppc-pci.h>
#include <mm/mmu_decl.h>
-#include <asm/prom.h>
#include <asm/udbg.h>
#include <asm/mpic.h>
#include <asm/ehv_pic.h>
diff --git a/arch/powerpc/platforms/85xx/ge_imp3a.c b/arch/powerpc/platforms/85xx/ge_imp3a.c
index 743c65e4d8e4..8e827376d97b 100644
--- a/arch/powerpc/platforms/85xx/ge_imp3a.c
+++ b/arch/powerpc/platforms/85xx/ge_imp3a.c
@@ -17,13 +17,13 @@
#include <linux/delay.h>
#include <linux/seq_file.h>
#include <linux/interrupt.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <asm/time.h>
#include <asm/machdep.h>
#include <asm/pci-bridge.h>
#include <mm/mmu_decl.h>
-#include <asm/prom.h>
#include <asm/udbg.h>
#include <asm/mpic.h>
#include <asm/swiotlb.h>
diff --git a/arch/powerpc/platforms/85xx/ksi8560.c b/arch/powerpc/platforms/85xx/ksi8560.c
index 6ef8580fdc0e..bdf9d42f8521 100644
--- a/arch/powerpc/platforms/85xx/ksi8560.c
+++ b/arch/powerpc/platforms/85xx/ksi8560.c
@@ -26,7 +26,6 @@
#include <asm/mpic.h>
#include <mm/mmu_decl.h>
#include <asm/udbg.h>
-#include <asm/prom.h>
#include <sysdev/fsl_soc.h>
#include <sysdev/fsl_pci.h>
diff --git a/arch/powerpc/platforms/85xx/mpc8536_ds.c b/arch/powerpc/platforms/85xx/mpc8536_ds.c
index 53bccb8bbcbe..e5d7386ad612 100644
--- a/arch/powerpc/platforms/85xx/mpc8536_ds.c
+++ b/arch/powerpc/platforms/85xx/mpc8536_ds.c
@@ -18,7 +18,6 @@
#include <asm/machdep.h>
#include <asm/pci-bridge.h>
#include <mm/mmu_decl.h>
-#include <asm/prom.h>
#include <asm/udbg.h>
#include <asm/mpic.h>
#include <asm/swiotlb.h>
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_cds.c b/arch/powerpc/platforms/85xx/mpc85xx_cds.c
index 5bd487030256..48f3acfece0b 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_cds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_cds.c
@@ -21,6 +21,8 @@
#include <linux/initrd.h>
#include <linux/interrupt.h>
#include <linux/fsl_devices.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/pgtable.h>
@@ -33,7 +35,6 @@
#include <asm/pci-bridge.h>
#include <asm/irq.h>
#include <mm/mmu_decl.h>
-#include <asm/prom.h>
#include <asm/udbg.h>
#include <asm/mpic.h>
#include <asm/i8259.h>
@@ -151,7 +152,7 @@ static void __init mpc85xx_cds_pci_irq_fixup(struct pci_dev *dev)
*/
case PCI_DEVICE_ID_VIA_82C586_2:
/* There are two USB controllers.
- * Identify them by functon number
+ * Identify them by function number
*/
if (PCI_FUNC(dev->devfn) == 3)
dev->irq = 11;
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ds.c b/arch/powerpc/platforms/85xx/mpc85xx_ds.c
index 2157a8017aa4..f8d2c97f39bd 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_ds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_ds.c
@@ -15,13 +15,13 @@
#include <linux/delay.h>
#include <linux/seq_file.h>
#include <linux/interrupt.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <asm/time.h>
#include <asm/machdep.h>
#include <asm/pci-bridge.h>
#include <mm/mmu_decl.h>
-#include <asm/prom.h>
#include <asm/udbg.h>
#include <asm/mpic.h>
#include <asm/i8259.h>
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
index 7759eca7d535..3a2ac410af18 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
@@ -39,7 +39,6 @@
#include <asm/pci-bridge.h>
#include <asm/irq.h>
#include <mm/mmu_decl.h>
-#include <asm/prom.h>
#include <asm/udbg.h>
#include <sysdev/fsl_soc.h>
#include <sysdev/fsl_pci.h>
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
index 80a80174768c..d99aba158235 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
@@ -19,7 +19,6 @@
#include <asm/machdep.h>
#include <asm/pci-bridge.h>
#include <mm/mmu_decl.h>
-#include <asm/prom.h>
#include <asm/udbg.h>
#include <asm/mpic.h>
#include <soc/fsl/qe/qe.h>
diff --git a/arch/powerpc/platforms/85xx/p1010rdb.c b/arch/powerpc/platforms/85xx/p1010rdb.c
index 24855284b14a..8ba9306a96b6 100644
--- a/arch/powerpc/platforms/85xx/p1010rdb.c
+++ b/arch/powerpc/platforms/85xx/p1010rdb.c
@@ -16,7 +16,6 @@
#include <asm/machdep.h>
#include <asm/pci-bridge.h>
#include <mm/mmu_decl.h>
-#include <asm/prom.h>
#include <asm/udbg.h>
#include <asm/mpic.h>
diff --git a/arch/powerpc/platforms/85xx/p1022_ds.c b/arch/powerpc/platforms/85xx/p1022_ds.c
index 1f1af0557470..537599906146 100644
--- a/arch/powerpc/platforms/85xx/p1022_ds.c
+++ b/arch/powerpc/platforms/85xx/p1022_ds.c
@@ -18,6 +18,7 @@
#include <linux/fsl/guts.h>
#include <linux/pci.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <asm/div64.h>
#include <asm/mpic.h>
diff --git a/arch/powerpc/platforms/85xx/p1022_rdk.c b/arch/powerpc/platforms/85xx/p1022_rdk.c
index fd9e3e7ef234..bc58a99164c9 100644
--- a/arch/powerpc/platforms/85xx/p1022_rdk.c
+++ b/arch/powerpc/platforms/85xx/p1022_rdk.c
@@ -14,6 +14,7 @@
#include <linux/fsl/guts.h>
#include <linux/pci.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <asm/div64.h>
#include <asm/mpic.h>
diff --git a/arch/powerpc/platforms/85xx/p1023_rdb.c b/arch/powerpc/platforms/85xx/p1023_rdb.c
index 3b9cc4979641..c04868eb2eb1 100644
--- a/arch/powerpc/platforms/85xx/p1023_rdb.c
+++ b/arch/powerpc/platforms/85xx/p1023_rdb.c
@@ -15,6 +15,7 @@
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/fsl_devices.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/of_device.h>
@@ -22,7 +23,6 @@
#include <asm/machdep.h>
#include <asm/pci-bridge.h>
#include <mm/mmu_decl.h>
-#include <asm/prom.h>
#include <asm/udbg.h>
#include <asm/mpic.h>
#include "smp.h"
diff --git a/arch/powerpc/platforms/85xx/qemu_e500.c b/arch/powerpc/platforms/85xx/qemu_e500.c
index 4c4d577effd9..64109ad6736c 100644
--- a/arch/powerpc/platforms/85xx/qemu_e500.c
+++ b/arch/powerpc/platforms/85xx/qemu_e500.c
@@ -12,6 +12,7 @@
*/
#include <linux/kernel.h>
+#include <linux/of.h>
#include <linux/of_fdt.h>
#include <linux/pgtable.h>
#include <asm/machdep.h>
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index a1c6a7827c8f..9c43cf32f4c9 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -208,7 +208,7 @@ static int smp_85xx_start_cpu(int cpu)
* The bootpage and highmem can be accessed via ioremap(), but
* we need to directly access the spinloop if its in lowmem.
*/
- ioremappable = *cpu_rel_addr > virt_to_phys(high_memory);
+ ioremappable = *cpu_rel_addr > virt_to_phys(high_memory - 1);
/* Map the spin table */
if (ioremappable)
diff --git a/arch/powerpc/platforms/85xx/socrates.c b/arch/powerpc/platforms/85xx/socrates.c
index 166b3515ba73..09f64470c765 100644
--- a/arch/powerpc/platforms/85xx/socrates.c
+++ b/arch/powerpc/platforms/85xx/socrates.c
@@ -29,7 +29,6 @@
#include <asm/machdep.h>
#include <asm/pci-bridge.h>
#include <asm/mpic.h>
-#include <asm/prom.h>
#include <mm/mmu_decl.h>
#include <asm/udbg.h>
diff --git a/arch/powerpc/platforms/85xx/stx_gp3.c b/arch/powerpc/platforms/85xx/stx_gp3.c
index 69e917e3ba1c..6b1fe7bb3a8c 100644
--- a/arch/powerpc/platforms/85xx/stx_gp3.c
+++ b/arch/powerpc/platforms/85xx/stx_gp3.c
@@ -28,7 +28,6 @@
#include <asm/machdep.h>
#include <asm/pci-bridge.h>
#include <asm/mpic.h>
-#include <asm/prom.h>
#include <mm/mmu_decl.h>
#include <asm/udbg.h>
diff --git a/arch/powerpc/platforms/85xx/tqm85xx.c b/arch/powerpc/platforms/85xx/tqm85xx.c
index 95a1a1118a31..d187f4b8bff6 100644
--- a/arch/powerpc/platforms/85xx/tqm85xx.c
+++ b/arch/powerpc/platforms/85xx/tqm85xx.c
@@ -26,7 +26,6 @@
#include <asm/machdep.h>
#include <asm/pci-bridge.h>
#include <asm/mpic.h>
-#include <asm/prom.h>
#include <mm/mmu_decl.h>
#include <asm/udbg.h>
diff --git a/arch/powerpc/platforms/85xx/xes_mpc85xx.c b/arch/powerpc/platforms/85xx/xes_mpc85xx.c
index 397e158c1edb..5836e4ecb7a0 100644
--- a/arch/powerpc/platforms/85xx/xes_mpc85xx.c
+++ b/arch/powerpc/platforms/85xx/xes_mpc85xx.c
@@ -16,13 +16,13 @@
#include <linux/delay.h>
#include <linux/seq_file.h>
#include <linux/interrupt.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <asm/time.h>
#include <asm/machdep.h>
#include <asm/pci-bridge.h>
#include <mm/mmu_decl.h>
-#include <asm/prom.h>
#include <asm/udbg.h>
#include <asm/mpic.h>
diff --git a/arch/powerpc/platforms/86xx/gef_ppc9a.c b/arch/powerpc/platforms/86xx/gef_ppc9a.c
index 44bbbc535e1d..8e358fa0bc41 100644
--- a/arch/powerpc/platforms/86xx/gef_ppc9a.c
+++ b/arch/powerpc/platforms/86xx/gef_ppc9a.c
@@ -18,12 +18,12 @@
#include <linux/kdev_t.h>
#include <linux/delay.h>
#include <linux/seq_file.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <asm/time.h>
#include <asm/machdep.h>
#include <asm/pci-bridge.h>
-#include <asm/prom.h>
#include <mm/mmu_decl.h>
#include <asm/udbg.h>
@@ -180,7 +180,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_USB,
*
* This function is called to determine whether the BSP is compatible with the
* supplied device-tree, which is assumed to be the correct one for the actual
- * board. It is expected thati, in the future, a kernel may support multiple
+ * board. It is expected that, in the future, a kernel may support multiple
* boards.
*/
static int __init gef_ppc9a_probe(void)
diff --git a/arch/powerpc/platforms/86xx/gef_sbc310.c b/arch/powerpc/platforms/86xx/gef_sbc310.c
index 46d6d3d4957a..b5b2733567cb 100644
--- a/arch/powerpc/platforms/86xx/gef_sbc310.c
+++ b/arch/powerpc/platforms/86xx/gef_sbc310.c
@@ -18,12 +18,12 @@
#include <linux/kdev_t.h>
#include <linux/delay.h>
#include <linux/seq_file.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <asm/time.h>
#include <asm/machdep.h>
#include <asm/pci-bridge.h>
-#include <asm/prom.h>
#include <mm/mmu_decl.h>
#include <asm/udbg.h>
@@ -167,7 +167,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_USB,
*
* This function is called to determine whether the BSP is compatible with the
* supplied device-tree, which is assumed to be the correct one for the actual
- * board. It is expected thati, in the future, a kernel may support multiple
+ * board. It is expected that, in the future, a kernel may support multiple
* boards.
*/
static int __init gef_sbc310_probe(void)
diff --git a/arch/powerpc/platforms/86xx/gef_sbc610.c b/arch/powerpc/platforms/86xx/gef_sbc610.c
index acf2c6c3c1eb..bb4c8e6b44d0 100644
--- a/arch/powerpc/platforms/86xx/gef_sbc610.c
+++ b/arch/powerpc/platforms/86xx/gef_sbc610.c
@@ -18,12 +18,12 @@
#include <linux/kdev_t.h>
#include <linux/delay.h>
#include <linux/seq_file.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <asm/time.h>
#include <asm/machdep.h>
#include <asm/pci-bridge.h>
-#include <asm/prom.h>
#include <mm/mmu_decl.h>
#include <asm/udbg.h>
@@ -157,7 +157,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_USB,
*
* This function is called to determine whether the BSP is compatible with the
* supplied device-tree, which is assumed to be the correct one for the actual
- * board. It is expected thati, in the future, a kernel may support multiple
+ * board. It is expected that, in the future, a kernel may support multiple
* boards.
*/
static int __init gef_sbc610_probe(void)
diff --git a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
index 7733d0607da2..b593b9afd30a 100644
--- a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
+++ b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
@@ -20,12 +20,13 @@
#include <linux/delay.h>
#include <linux/seq_file.h>
#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/fsl/guts.h>
#include <asm/time.h>
#include <asm/machdep.h>
#include <asm/pci-bridge.h>
-#include <asm/prom.h>
#include <mm/mmu_decl.h>
#include <asm/udbg.h>
diff --git a/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c b/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
index a6b8ffcbf01a..5294394c9c07 100644
--- a/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
+++ b/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
@@ -19,7 +19,6 @@
#include <asm/time.h>
#include <asm/machdep.h>
#include <asm/pci-bridge.h>
-#include <asm/prom.h>
#include <mm/mmu_decl.h>
#include <asm/udbg.h>
#include <asm/swiotlb.h>
diff --git a/arch/powerpc/platforms/86xx/mvme7100.c b/arch/powerpc/platforms/86xx/mvme7100.c
index ee983613570c..b2cc32a32d0b 100644
--- a/arch/powerpc/platforms/86xx/mvme7100.c
+++ b/arch/powerpc/platforms/86xx/mvme7100.c
@@ -19,6 +19,7 @@
#include <linux/pci.h>
#include <linux/of.h>
+#include <linux/of_fdt.h>
#include <linux/of_platform.h>
#include <linux/of_address.h>
#include <asm/udbg.h>
diff --git a/arch/powerpc/platforms/8xx/Makefile b/arch/powerpc/platforms/8xx/Makefile
index 27a7c6f828e0..5a098f7d5d31 100644
--- a/arch/powerpc/platforms/8xx/Makefile
+++ b/arch/powerpc/platforms/8xx/Makefile
@@ -3,7 +3,7 @@
# Makefile for the PowerPC 8xx linux kernel.
#
obj-y += m8xx_setup.o machine_check.o pic.o
-obj-$(CONFIG_CPM1) += cpm1.o
+obj-$(CONFIG_CPM1) += cpm1.o cpm1-ic.o
obj-$(CONFIG_UCODE_PATCH) += micropatch.o
obj-$(CONFIG_MPC885ADS) += mpc885ads_setup.o
obj-$(CONFIG_MPC86XADS) += mpc86xads_setup.o
diff --git a/arch/powerpc/platforms/8xx/adder875.c b/arch/powerpc/platforms/8xx/adder875.c
index 651486acb896..10e6e4fe77fc 100644
--- a/arch/powerpc/platforms/8xx/adder875.c
+++ b/arch/powerpc/platforms/8xx/adder875.c
@@ -15,9 +15,9 @@
#include <asm/cpm1.h>
#include <asm/fs_pd.h>
#include <asm/udbg.h>
-#include <asm/prom.h>
#include "mpc8xx.h"
+#include "pic.h"
struct cpm_pin {
int port, pin, flags;
@@ -104,7 +104,7 @@ define_machine(adder875) {
.name = "Adder MPC875",
.probe = adder875_probe,
.setup_arch = adder875_setup,
- .init_IRQ = mpc8xx_pics_init,
+ .init_IRQ = mpc8xx_pic_init,
.get_irq = mpc8xx_get_irq,
.restart = mpc8xx_restart,
.calibrate_decr = generic_calibrate_decr,
diff --git a/arch/powerpc/platforms/8xx/cpm1-ic.c b/arch/powerpc/platforms/8xx/cpm1-ic.c
new file mode 100644
index 000000000000..a18fc7c99f83
--- /dev/null
+++ b/arch/powerpc/platforms/8xx/cpm1-ic.c
@@ -0,0 +1,188 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Interrupt controller for the
+ * Communication Processor Module.
+ * Copyright (c) 1997 Dan error_act (dmalek@jlc.net)
+ */
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/platform_device.h>
+#include <asm/cpm1.h>
+
+struct cpm_pic_data {
+ cpic8xx_t __iomem *reg;
+ struct irq_domain *host;
+};
+
+static void cpm_mask_irq(struct irq_data *d)
+{
+ struct cpm_pic_data *data = irq_data_get_irq_chip_data(d);
+ unsigned int cpm_vec = (unsigned int)irqd_to_hwirq(d);
+
+ clrbits32(&data->reg->cpic_cimr, (1 << cpm_vec));
+}
+
+static void cpm_unmask_irq(struct irq_data *d)
+{
+ struct cpm_pic_data *data = irq_data_get_irq_chip_data(d);
+ unsigned int cpm_vec = (unsigned int)irqd_to_hwirq(d);
+
+ setbits32(&data->reg->cpic_cimr, (1 << cpm_vec));
+}
+
+static void cpm_end_irq(struct irq_data *d)
+{
+ struct cpm_pic_data *data = irq_data_get_irq_chip_data(d);
+ unsigned int cpm_vec = (unsigned int)irqd_to_hwirq(d);
+
+ out_be32(&data->reg->cpic_cisr, (1 << cpm_vec));
+}
+
+static struct irq_chip cpm_pic = {
+ .name = "CPM PIC",
+ .irq_mask = cpm_mask_irq,
+ .irq_unmask = cpm_unmask_irq,
+ .irq_eoi = cpm_end_irq,
+};
+
+static int cpm_get_irq(struct irq_desc *desc)
+{
+ struct cpm_pic_data *data = irq_desc_get_handler_data(desc);
+ int cpm_vec;
+
+ /*
+ * Get the vector by setting the ACK bit and then reading
+ * the register.
+ */
+ out_be16(&data->reg->cpic_civr, 1);
+ cpm_vec = in_be16(&data->reg->cpic_civr);
+ cpm_vec >>= 11;
+
+ return irq_linear_revmap(data->host, cpm_vec);
+}
+
+static void cpm_cascade(struct irq_desc *desc)
+{
+ generic_handle_irq(cpm_get_irq(desc));
+}
+
+static int cpm_pic_host_map(struct irq_domain *h, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ irq_set_chip_data(virq, h->host_data);
+ irq_set_status_flags(virq, IRQ_LEVEL);
+ irq_set_chip_and_handler(virq, &cpm_pic, handle_fasteoi_irq);
+ return 0;
+}
+
+static const struct irq_domain_ops cpm_pic_host_ops = {
+ .map = cpm_pic_host_map,
+};
+
+static int cpm_pic_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int irq;
+ struct cpm_pic_data *data;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->reg = devm_ioremap(dev, res->start, resource_size(res));
+ if (!data->reg)
+ return -ENODEV;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ /* Initialize the CPM interrupt controller. */
+ out_be32(&data->reg->cpic_cicr,
+ (CICR_SCD_SCC4 | CICR_SCC_SCC3 | CICR_SCB_SCC2 | CICR_SCA_SCC1) |
+ ((virq_to_hw(irq) / 2) << 13) | CICR_HP_MASK);
+
+ out_be32(&data->reg->cpic_cimr, 0);
+
+ data->host = irq_domain_add_linear(dev->of_node, 64, &cpm_pic_host_ops, data);
+ if (!data->host)
+ return -ENODEV;
+
+ irq_set_handler_data(irq, data);
+ irq_set_chained_handler(irq, cpm_cascade);
+
+ setbits32(&data->reg->cpic_cicr, CICR_IEN);
+
+ return 0;
+}
+
+static const struct of_device_id cpm_pic_match[] = {
+ {
+ .compatible = "fsl,cpm1-pic",
+ }, {
+ .type = "cpm-pic",
+ .compatible = "CPM",
+ }, {},
+};
+
+static struct platform_driver cpm_pic_driver = {
+ .driver = {
+ .name = "cpm-pic",
+ .of_match_table = cpm_pic_match,
+ },
+ .probe = cpm_pic_probe,
+};
+
+static int __init cpm_pic_init(void)
+{
+ return platform_driver_register(&cpm_pic_driver);
+}
+arch_initcall(cpm_pic_init);
+
+/*
+ * The CPM can generate the error interrupt when there is a race condition
+ * between generating and masking interrupts. All we have to do is ACK it
+ * and return. This is a no-op function so we don't need any special
+ * tests in the interrupt handler.
+ */
+static irqreturn_t cpm_error_interrupt(int irq, void *dev)
+{
+ return IRQ_HANDLED;
+}
+
+static int cpm_error_probe(struct platform_device *pdev)
+{
+ int irq;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ return request_irq(irq, cpm_error_interrupt, IRQF_NO_THREAD, "error", NULL);
+}
+
+static const struct of_device_id cpm_error_ids[] = {
+ { .compatible = "fsl,cpm1" },
+ { .type = "cpm" },
+ {},
+};
+
+static struct platform_driver cpm_error_driver = {
+ .driver = {
+ .name = "cpm-error",
+ .of_match_table = cpm_error_ids,
+ },
+ .probe = cpm_error_probe,
+};
+
+static int __init cpm_error_init(void)
+{
+ return platform_driver_register(&cpm_error_driver);
+}
+subsys_initcall(cpm_error_init);
diff --git a/arch/powerpc/platforms/8xx/cpm1.c b/arch/powerpc/platforms/8xx/cpm1.c
index c58b6f1c40e3..bb38c8d8f8de 100644
--- a/arch/powerpc/platforms/8xx/cpm1.c
+++ b/arch/powerpc/platforms/8xx/cpm1.c
@@ -33,12 +33,12 @@
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
+#include <linux/of_irq.h>
#include <asm/page.h>
#include <asm/8xx_immap.h>
#include <asm/cpm1.h>
#include <asm/io.h>
#include <asm/rheap.h>
-#include <asm/prom.h>
#include <asm/cpm.h>
#include <asm/fs_pd.h>
@@ -51,145 +51,6 @@
cpm8xx_t __iomem *cpmp; /* Pointer to comm processor space */
immap_t __iomem *mpc8xx_immr = (void __iomem *)VIRT_IMMR_BASE;
-static cpic8xx_t __iomem *cpic_reg;
-
-static struct irq_domain *cpm_pic_host;
-
-static void cpm_mask_irq(struct irq_data *d)
-{
- unsigned int cpm_vec = (unsigned int)irqd_to_hwirq(d);
-
- clrbits32(&cpic_reg->cpic_cimr, (1 << cpm_vec));
-}
-
-static void cpm_unmask_irq(struct irq_data *d)
-{
- unsigned int cpm_vec = (unsigned int)irqd_to_hwirq(d);
-
- setbits32(&cpic_reg->cpic_cimr, (1 << cpm_vec));
-}
-
-static void cpm_end_irq(struct irq_data *d)
-{
- unsigned int cpm_vec = (unsigned int)irqd_to_hwirq(d);
-
- out_be32(&cpic_reg->cpic_cisr, (1 << cpm_vec));
-}
-
-static struct irq_chip cpm_pic = {
- .name = "CPM PIC",
- .irq_mask = cpm_mask_irq,
- .irq_unmask = cpm_unmask_irq,
- .irq_eoi = cpm_end_irq,
-};
-
-int cpm_get_irq(void)
-{
- int cpm_vec;
-
- /*
- * Get the vector by setting the ACK bit and then reading
- * the register.
- */
- out_be16(&cpic_reg->cpic_civr, 1);
- cpm_vec = in_be16(&cpic_reg->cpic_civr);
- cpm_vec >>= 11;
-
- return irq_linear_revmap(cpm_pic_host, cpm_vec);
-}
-
-static int cpm_pic_host_map(struct irq_domain *h, unsigned int virq,
- irq_hw_number_t hw)
-{
- pr_debug("cpm_pic_host_map(%d, 0x%lx)\n", virq, hw);
-
- irq_set_status_flags(virq, IRQ_LEVEL);
- irq_set_chip_and_handler(virq, &cpm_pic, handle_fasteoi_irq);
- return 0;
-}
-
-/*
- * The CPM can generate the error interrupt when there is a race condition
- * between generating and masking interrupts. All we have to do is ACK it
- * and return. This is a no-op function so we don't need any special
- * tests in the interrupt handler.
- */
-static irqreturn_t cpm_error_interrupt(int irq, void *dev)
-{
- return IRQ_HANDLED;
-}
-
-static const struct irq_domain_ops cpm_pic_host_ops = {
- .map = cpm_pic_host_map,
-};
-
-unsigned int __init cpm_pic_init(void)
-{
- struct device_node *np = NULL;
- struct resource res;
- unsigned int sirq = 0, hwirq, eirq;
- int ret;
-
- pr_debug("cpm_pic_init\n");
-
- np = of_find_compatible_node(NULL, NULL, "fsl,cpm1-pic");
- if (np == NULL)
- np = of_find_compatible_node(NULL, "cpm-pic", "CPM");
- if (np == NULL) {
- printk(KERN_ERR "CPM PIC init: can not find cpm-pic node\n");
- return sirq;
- }
-
- ret = of_address_to_resource(np, 0, &res);
- if (ret)
- goto end;
-
- cpic_reg = ioremap(res.start, resource_size(&res));
- if (cpic_reg == NULL)
- goto end;
-
- sirq = irq_of_parse_and_map(np, 0);
- if (!sirq)
- goto end;
-
- /* Initialize the CPM interrupt controller. */
- hwirq = (unsigned int)virq_to_hw(sirq);
- out_be32(&cpic_reg->cpic_cicr,
- (CICR_SCD_SCC4 | CICR_SCC_SCC3 | CICR_SCB_SCC2 | CICR_SCA_SCC1) |
- ((hwirq/2) << 13) | CICR_HP_MASK);
-
- out_be32(&cpic_reg->cpic_cimr, 0);
-
- cpm_pic_host = irq_domain_add_linear(np, 64, &cpm_pic_host_ops, NULL);
- if (cpm_pic_host == NULL) {
- printk(KERN_ERR "CPM2 PIC: failed to allocate irq host!\n");
- sirq = 0;
- goto end;
- }
-
- /* Install our own error handler. */
- np = of_find_compatible_node(NULL, NULL, "fsl,cpm1");
- if (np == NULL)
- np = of_find_node_by_type(NULL, "cpm");
- if (np == NULL) {
- printk(KERN_ERR "CPM PIC init: can not find cpm node\n");
- goto end;
- }
-
- eirq = irq_of_parse_and_map(np, 0);
- if (!eirq)
- goto end;
-
- if (request_irq(eirq, cpm_error_interrupt, IRQF_NO_THREAD, "error",
- NULL))
- printk(KERN_ERR "Could not allocate CPM error IRQ!");
-
- setbits32(&cpic_reg->cpic_cicr, CICR_IEN);
-
-end:
- of_node_put(np);
- return sirq;
-}
void __init cpm_reset(void)
{
@@ -280,6 +141,7 @@ cpm_setbrg(uint brg, uint rate)
out_be32(bp, (((BRG_UART_CLK_DIV16 / rate) - 1) << 1) |
CPM_BRG_EN | CPM_BRG_DIV16);
}
+EXPORT_SYMBOL(cpm_setbrg);
struct cpm_ioport16 {
__be16 dir, par, odr_sor, dat, intr;
diff --git a/arch/powerpc/platforms/8xx/ep88xc.c b/arch/powerpc/platforms/8xx/ep88xc.c
index ebcf34a14789..b3b22520b435 100644
--- a/arch/powerpc/platforms/8xx/ep88xc.c
+++ b/arch/powerpc/platforms/8xx/ep88xc.c
@@ -20,6 +20,7 @@
#include <asm/cpm1.h>
#include "mpc8xx.h"
+#include "pic.h"
struct cpm_pin {
int port, pin, flags;
@@ -166,7 +167,7 @@ define_machine(ep88xc) {
.name = "Embedded Planet EP88xC",
.probe = ep88xc_probe,
.setup_arch = ep88xc_setup_arch,
- .init_IRQ = mpc8xx_pics_init,
+ .init_IRQ = mpc8xx_pic_init,
.get_irq = mpc8xx_get_irq,
.restart = mpc8xx_restart,
.calibrate_decr = mpc8xx_calibrate_decr,
diff --git a/arch/powerpc/platforms/8xx/m8xx_setup.c b/arch/powerpc/platforms/8xx/m8xx_setup.c
index df4d57d07f9a..24f358f86d16 100644
--- a/arch/powerpc/platforms/8xx/m8xx_setup.c
+++ b/arch/powerpc/platforms/8xx/m8xx_setup.c
@@ -17,10 +17,11 @@
#include <linux/time.h>
#include <linux/rtc.h>
#include <linux/fsl_devices.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
#include <asm/io.h>
#include <asm/8xx_immap.h>
-#include <asm/prom.h>
#include <asm/fs_pd.h>
#include <mm/mmu_decl.h>
@@ -28,9 +29,6 @@
#include "mpc8xx.h"
-extern int cpm_pic_init(void);
-extern int cpm_get_irq(void);
-
/* A place holder for time base interrupts, if they are ever enabled. */
static irqreturn_t timebase_interrupt(int irq, void *dev)
{
@@ -207,28 +205,3 @@ void __noreturn mpc8xx_restart(char *cmd)
in_8(&clk_r->res[0]);
panic("Restart failed\n");
}
-
-static void cpm_cascade(struct irq_desc *desc)
-{
- generic_handle_irq(cpm_get_irq());
-}
-
-/* Initialize the internal interrupt controllers. The number of
- * interrupts supported can vary with the processor type, and the
- * 82xx family can have up to 64.
- * External interrupts can be either edge or level triggered, and
- * need to be initialized by the appropriate driver.
- */
-void __init mpc8xx_pics_init(void)
-{
- int irq;
-
- if (mpc8xx_pic_init()) {
- printk(KERN_ERR "Failed interrupt 8xx controller initialization\n");
- return;
- }
-
- irq = cpm_pic_init();
- if (irq)
- irq_set_chained_handler(irq, cpm_cascade);
-}
diff --git a/arch/powerpc/platforms/8xx/mpc86xads_setup.c b/arch/powerpc/platforms/8xx/mpc86xads_setup.c
index 8d02f5ff4481..03267e4a44a9 100644
--- a/arch/powerpc/platforms/8xx/mpc86xads_setup.c
+++ b/arch/powerpc/platforms/8xx/mpc86xads_setup.c
@@ -29,6 +29,7 @@
#include "mpc86xads.h"
#include "mpc8xx.h"
+#include "pic.h"
struct cpm_pin {
int port, pin, flags;
@@ -140,7 +141,7 @@ define_machine(mpc86x_ads) {
.name = "MPC86x ADS",
.probe = mpc86xads_probe,
.setup_arch = mpc86xads_setup_arch,
- .init_IRQ = mpc8xx_pics_init,
+ .init_IRQ = mpc8xx_pic_init,
.get_irq = mpc8xx_get_irq,
.restart = mpc8xx_restart,
.calibrate_decr = mpc8xx_calibrate_decr,
diff --git a/arch/powerpc/platforms/8xx/mpc885ads_setup.c b/arch/powerpc/platforms/8xx/mpc885ads_setup.c
index a0c83c1905c6..b1e39f96de00 100644
--- a/arch/powerpc/platforms/8xx/mpc885ads_setup.c
+++ b/arch/powerpc/platforms/8xx/mpc885ads_setup.c
@@ -42,6 +42,7 @@
#include "mpc885ads.h"
#include "mpc8xx.h"
+#include "pic.h"
static u32 __iomem *bcsr, *bcsr5;
@@ -216,7 +217,7 @@ define_machine(mpc885_ads) {
.name = "Freescale MPC885 ADS",
.probe = mpc885ads_probe,
.setup_arch = mpc885ads_setup_arch,
- .init_IRQ = mpc8xx_pics_init,
+ .init_IRQ = mpc8xx_pic_init,
.get_irq = mpc8xx_get_irq,
.restart = mpc8xx_restart,
.calibrate_decr = mpc8xx_calibrate_decr,
diff --git a/arch/powerpc/platforms/8xx/mpc8xx.h b/arch/powerpc/platforms/8xx/mpc8xx.h
index 31cc2ecace42..79fae3324866 100644
--- a/arch/powerpc/platforms/8xx/mpc8xx.h
+++ b/arch/powerpc/platforms/8xx/mpc8xx.h
@@ -15,7 +15,6 @@ extern void __noreturn mpc8xx_restart(char *cmd);
extern void mpc8xx_calibrate_decr(void);
extern int mpc8xx_set_rtc_time(struct rtc_time *tm);
extern void mpc8xx_get_rtc_time(struct rtc_time *tm);
-extern void mpc8xx_pics_init(void);
extern unsigned int mpc8xx_get_irq(void);
#endif /* __MPC8xx_H */
diff --git a/arch/powerpc/platforms/8xx/pic.c b/arch/powerpc/platforms/8xx/pic.c
index 04a6abf14c29..ea6b0e523c60 100644
--- a/arch/powerpc/platforms/8xx/pic.c
+++ b/arch/powerpc/platforms/8xx/pic.c
@@ -4,7 +4,8 @@
#include <linux/signal.h>
#include <linux/irq.h>
#include <linux/dma-mapping.h>
-#include <asm/prom.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <asm/irq.h>
#include <asm/io.h>
#include <asm/8xx_immap.h>
@@ -14,8 +15,6 @@
#define PIC_VEC_SPURRIOUS 15
-extern int cpm_get_irq(struct pt_regs *regs);
-
static struct irq_domain *mpc8xx_pic_host;
static unsigned long mpc8xx_cached_irq_mask;
static sysconf8xx_t __iomem *siu_reg;
@@ -125,7 +124,7 @@ static const struct irq_domain_ops mpc8xx_pic_host_ops = {
.xlate = mpc8xx_pic_host_xlate,
};
-int __init mpc8xx_pic_init(void)
+void __init mpc8xx_pic_init(void)
{
struct resource res;
struct device_node *np;
@@ -136,7 +135,7 @@ int __init mpc8xx_pic_init(void)
np = of_find_node_by_type(NULL, "mpc8xx-pic");
if (np == NULL) {
printk(KERN_ERR "Could not find fsl,pq1-pic node\n");
- return -ENOMEM;
+ return;
}
ret = of_address_to_resource(np, 0, &res);
@@ -144,20 +143,13 @@ int __init mpc8xx_pic_init(void)
goto out;
siu_reg = ioremap(res.start, resource_size(&res));
- if (siu_reg == NULL) {
- ret = -EINVAL;
+ if (!siu_reg)
goto out;
- }
mpc8xx_pic_host = irq_domain_add_linear(np, 64, &mpc8xx_pic_host_ops, NULL);
- if (mpc8xx_pic_host == NULL) {
+ if (!mpc8xx_pic_host)
printk(KERN_ERR "MPC8xx PIC: failed to allocate irq host!\n");
- ret = -ENOMEM;
- goto out;
- }
- ret = 0;
out:
of_node_put(np);
- return ret;
}
diff --git a/arch/powerpc/platforms/8xx/pic.h b/arch/powerpc/platforms/8xx/pic.h
index 9fe00eebdc8b..c70f1b446f94 100644
--- a/arch/powerpc/platforms/8xx/pic.h
+++ b/arch/powerpc/platforms/8xx/pic.h
@@ -4,7 +4,7 @@
#include <linux/irq.h>
#include <linux/interrupt.h>
-int mpc8xx_pic_init(void);
+void mpc8xx_pic_init(void);
unsigned int mpc8xx_get_irq(void);
/*
diff --git a/arch/powerpc/platforms/8xx/tqm8xx_setup.c b/arch/powerpc/platforms/8xx/tqm8xx_setup.c
index 4cea8b1afa44..3725d51248df 100644
--- a/arch/powerpc/platforms/8xx/tqm8xx_setup.c
+++ b/arch/powerpc/platforms/8xx/tqm8xx_setup.c
@@ -43,6 +43,7 @@
#include <asm/udbg.h>
#include "mpc8xx.h"
+#include "pic.h"
struct cpm_pin {
int port, pin, flags;
@@ -142,7 +143,7 @@ define_machine(tqm8xx) {
.name = "TQM8xx",
.probe = tqm8xx_probe,
.setup_arch = tqm8xx_setup_arch,
- .init_IRQ = mpc8xx_pics_init,
+ .init_IRQ = mpc8xx_pic_init,
.get_irq = mpc8xx_get_irq,
.restart = mpc8xx_restart,
.calibrate_decr = mpc8xx_calibrate_decr,
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index e2e1fec91c6e..9e2df4b66478 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -104,6 +104,7 @@ config PPC_BOOK3S_64
select HAVE_MOVE_PUD
select IRQ_WORK
select PPC_64S_HASH_MMU if !PPC_RADIX_MMU
+ select KASAN_VMALLOC if KASAN
config PPC_BOOK3E_64
bool "Embedded processors"
@@ -377,7 +378,6 @@ config SPE
config PPC_64S_HASH_MMU
bool "Hash MMU Support"
depends on PPC_BOOK3S_64
- select PPC_MM_SLICES
default y
help
Enable support for the Power ISA Hash style MMU. This is implemented
@@ -451,9 +451,6 @@ config PPC_BOOK3E_MMU
def_bool y
depends on FSL_BOOKE || PPC_BOOK3E
-config PPC_MM_SLICES
- bool
-
config PPC_HAVE_PMU_SUPPORT
bool
@@ -556,6 +553,12 @@ config CPU_LITTLE_ENDIAN
endchoice
+config PPC64_ELF_ABI_V1
+ def_bool PPC64 && CPU_BIG_ENDIAN
+
+config PPC64_ELF_ABI_V2
+ def_bool PPC64 && CPU_LITTLE_ENDIAN
+
config PPC64_BOOT_WRAPPER
def_bool n
depends on CPU_LITTLE_ENDIAN
diff --git a/arch/powerpc/platforms/amigaone/setup.c b/arch/powerpc/platforms/amigaone/setup.c
index 9d252c554f7f..397ce6a40bd0 100644
--- a/arch/powerpc/platforms/amigaone/setup.c
+++ b/arch/powerpc/platforms/amigaone/setup.c
@@ -8,6 +8,7 @@
* Copyright 2003 by Hans-Joerg Frieden and Thomas Frieden
*/
+#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/arch/powerpc/platforms/book3s/vas-api.c b/arch/powerpc/platforms/book3s/vas-api.c
index f9a1615b74da..c0799fb26b6d 100644
--- a/arch/powerpc/platforms/book3s/vas-api.c
+++ b/arch/powerpc/platforms/book3s/vas-api.c
@@ -30,7 +30,7 @@
*
* where "vas_copy" and "vas_paste" are defined in copy-paste.h.
* copy/paste returns to the user space directly. So refer NX hardware
- * documententation for exact copy/paste usage and completion / error
+ * documentation for exact copy/paste usage and completion / error
* conditions.
*/
diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c
index 354a58c1e6f2..f3291e957a19 100644
--- a/arch/powerpc/platforms/cell/axon_msi.c
+++ b/arch/powerpc/platforms/cell/axon_msi.c
@@ -13,10 +13,10 @@
#include <linux/of_platform.h>
#include <linux/slab.h>
#include <linux/debugfs.h>
+#include <linux/of_irq.h>
#include <asm/dcr.h>
#include <asm/machdep.h>
-#include <asm/prom.h>
#include "cell.h"
diff --git a/arch/powerpc/platforms/cell/cbe_powerbutton.c b/arch/powerpc/platforms/cell/cbe_powerbutton.c
index bda589dfb051..a3ee397486f6 100644
--- a/arch/powerpc/platforms/cell/cbe_powerbutton.c
+++ b/arch/powerpc/platforms/cell/cbe_powerbutton.c
@@ -9,9 +9,9 @@
#include <linux/input.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <asm/pmi.h>
-#include <asm/prom.h>
static struct input_dev *button_dev;
static struct platform_device *button_pdev;
diff --git a/arch/powerpc/platforms/cell/cbe_regs.c b/arch/powerpc/platforms/cell/cbe_regs.c
index 1c4c53bec66c..316e533afc00 100644
--- a/arch/powerpc/platforms/cell/cbe_regs.c
+++ b/arch/powerpc/platforms/cell/cbe_regs.c
@@ -10,12 +10,12 @@
#include <linux/percpu.h>
#include <linux/types.h>
#include <linux/export.h>
+#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
#include <linux/pgtable.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/ptrace.h>
#include <asm/cell-regs.h>
@@ -23,7 +23,7 @@
* Current implementation uses "cpu" nodes. We build our own mapping
* array of cpu numbers to cpu nodes locally for now to allow interrupt
* time code to have a fast path rather than call of_get_cpu_node(). If
- * we implement cpu hotplug, we'll have to install an appropriate norifier
+ * we implement cpu hotplug, we'll have to install an appropriate notifier
* in order to release references to the cpu going away
*/
static struct cbe_regs_map
diff --git a/arch/powerpc/platforms/cell/cbe_thermal.c b/arch/powerpc/platforms/cell/cbe_thermal.c
index abb5e527b4db..2f45428e32c8 100644
--- a/arch/powerpc/platforms/cell/cbe_thermal.c
+++ b/arch/powerpc/platforms/cell/cbe_thermal.c
@@ -39,7 +39,6 @@
#include <linux/stringify.h>
#include <asm/spu.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/cell-regs.h>
#include "spu_priv1_mmio.h"
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c
index 0873a7a20271..03ee8152ee97 100644
--- a/arch/powerpc/platforms/cell/interrupt.c
+++ b/arch/powerpc/platforms/cell/interrupt.c
@@ -18,15 +18,16 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/export.h>
#include <linux/percpu.h>
#include <linux/types.h>
#include <linux/ioport.h>
#include <linux/kernel_stat.h>
#include <linux/pgtable.h>
+#include <linux/of_address.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/ptrace.h>
#include <asm/machdep.h>
#include <asm/cell-regs.h>
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
index 25e726bf0172..0ca3efeef293 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -12,8 +12,10 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
#include <linux/notifier.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/slab.h>
#include <linux/memblock.h>
@@ -582,7 +584,7 @@ static int cell_of_bus_notify(struct notifier_block *nb, unsigned long action,
{
struct device *dev = data;
- /* We are only intereted in device addition */
+ /* We are only interested in device addition */
if (action != BUS_NOTIFY_ADD_DEVICE)
return 0;
diff --git a/arch/powerpc/platforms/cell/pervasive.c b/arch/powerpc/platforms/cell/pervasive.c
index dff8d5e7ab82..58d967ee38b3 100644
--- a/arch/powerpc/platforms/cell/pervasive.c
+++ b/arch/powerpc/platforms/cell/pervasive.c
@@ -19,7 +19,6 @@
#include <asm/io.h>
#include <asm/machdep.h>
-#include <asm/prom.h>
#include <asm/reg.h>
#include <asm/cell-regs.h>
#include <asm/cpu_has_feature.h>
diff --git a/arch/powerpc/platforms/cell/ras.c b/arch/powerpc/platforms/cell/ras.c
index 4325c05bedd9..8d934ea6270c 100644
--- a/arch/powerpc/platforms/cell/ras.c
+++ b/arch/powerpc/platforms/cell/ras.c
@@ -12,11 +12,11 @@
#include <linux/reboot.h>
#include <linux/kexec.h>
#include <linux/crash_dump.h>
+#include <linux/of.h>
#include <asm/kexec.h>
#include <asm/reg.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/rtas.h>
#include <asm/cell-regs.h>
diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c
index edefa785d2ef..52de014983c9 100644
--- a/arch/powerpc/platforms/cell/setup.c
+++ b/arch/powerpc/platforms/cell/setup.c
@@ -31,7 +31,6 @@
#include <asm/mmu.h>
#include <asm/processor.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/rtas.h>
#include <asm/pci-bridge.h>
#include <asm/iommu.h>
diff --git a/arch/powerpc/platforms/cell/smp.c b/arch/powerpc/platforms/cell/smp.c
index d7ab868aab54..31ce00b52a32 100644
--- a/arch/powerpc/platforms/cell/smp.c
+++ b/arch/powerpc/platforms/cell/smp.c
@@ -28,7 +28,6 @@
#include <asm/irq.h>
#include <asm/page.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/smp.h>
#include <asm/paca.h>
#include <asm/machdep.h>
diff --git a/arch/powerpc/platforms/cell/spider-pci.c b/arch/powerpc/platforms/cell/spider-pci.c
index a1c293f42a1f..e36ebd84f55b 100644
--- a/arch/powerpc/platforms/cell/spider-pci.c
+++ b/arch/powerpc/platforms/cell/spider-pci.c
@@ -8,6 +8,7 @@
#undef DEBUG
#include <linux/kernel.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/slab.h>
#include <linux/io.h>
@@ -81,7 +82,7 @@ static int __init spiderpci_pci_setup_chip(struct pci_controller *phb,
/*
* On CellBlade, we can't know that which XDR memory is used by
* kmalloc() to allocate dummy_page_va.
- * In order to imporve the performance, the XDR which is used to
+ * In order to improve the performance, the XDR which is used to
* allocate dummy_page_va is the nearest the spider-pci.
* We have to select the CBE which is the nearest the spider-pci
* to allocate memory from the best XDR, but I don't know that
diff --git a/arch/powerpc/platforms/cell/spider-pic.c b/arch/powerpc/platforms/cell/spider-pic.c
index 8af75867cb42..11df737c8c6a 100644
--- a/arch/powerpc/platforms/cell/spider-pic.c
+++ b/arch/powerpc/platforms/cell/spider-pic.c
@@ -10,9 +10,10 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/ioport.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/pgtable.h>
-#include <asm/prom.h>
#include <asm/io.h>
#include "interrupt.h"
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index 2eecba3345c3..7bd0b563e163 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -24,7 +24,6 @@
#include <asm/spu_priv1.h>
#include <asm/spu_csa.h>
#include <asm/xmon.h>
-#include <asm/prom.h>
#include <asm/kexec.h>
const struct spu_management_ops *spu_management_ops;
diff --git a/arch/powerpc/platforms/cell/spu_manage.c b/arch/powerpc/platforms/cell/spu_manage.c
index ddf8742f09a3..ae09c5a91b40 100644
--- a/arch/powerpc/platforms/cell/spu_manage.c
+++ b/arch/powerpc/platforms/cell/spu_manage.c
@@ -16,11 +16,12 @@
#include <linux/io.h>
#include <linux/mutex.h>
#include <linux/device.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <asm/spu.h>
#include <asm/spu_priv1.h>
#include <asm/firmware.h>
-#include <asm/prom.h>
#include "spufs/spufs.h"
#include "interrupt.h"
@@ -457,7 +458,7 @@ static void __init init_affinity_node(int cbe)
/*
* Walk through each phandle in vicinity property of the spu
- * (tipically two vicinity phandles per spe node)
+ * (typically two vicinity phandles per spe node)
*/
for (i = 0; i < (lenp / sizeof(phandle)); i++) {
if (vic_handles[i] == avoid_ph)
diff --git a/arch/powerpc/platforms/cell/spu_priv1_mmio.c b/arch/powerpc/platforms/cell/spu_priv1_mmio.c
index 0c2e6bb6fe51..d150e3987304 100644
--- a/arch/powerpc/platforms/cell/spu_priv1_mmio.c
+++ b/arch/powerpc/platforms/cell/spu_priv1_mmio.c
@@ -19,7 +19,6 @@
#include <asm/spu.h>
#include <asm/spu_priv1.h>
#include <asm/firmware.h>
-#include <asm/prom.h>
#include "interrupt.h"
#include "spu_priv1_mmio.h"
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index 4c702192412f..34334c32b7f5 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -21,10 +21,10 @@
#include <linux/namei.h>
#include <linux/pagemap.h>
#include <linux/poll.h>
+#include <linux/of.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
-#include <asm/prom.h>
#include <asm/spu.h>
#include <asm/spu_priv1.h>
#include <linux/uaccess.h>
diff --git a/arch/powerpc/platforms/chrp/nvram.c b/arch/powerpc/platforms/chrp/nvram.c
index e820332b59a0..dab78076fedb 100644
--- a/arch/powerpc/platforms/chrp/nvram.c
+++ b/arch/powerpc/platforms/chrp/nvram.c
@@ -10,7 +10,7 @@
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
-#include <asm/prom.h>
+#include <linux/of.h>
#include <asm/machdep.h>
#include <asm/rtas.h>
#include "chrp.h"
diff --git a/arch/powerpc/platforms/chrp/pci.c b/arch/powerpc/platforms/chrp/pci.c
index 76e6256cb0a7..6f6598e771ff 100644
--- a/arch/powerpc/platforms/chrp/pci.c
+++ b/arch/powerpc/platforms/chrp/pci.c
@@ -9,11 +9,11 @@
#include <linux/string.h>
#include <linux/init.h>
#include <linux/pgtable.h>
+#include <linux/of_address.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/hydra.h>
-#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/sections.h>
#include <asm/pci-bridge.h>
diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c
index 3cfc382841e5..ec63c0558db6 100644
--- a/arch/powerpc/platforms/chrp/setup.c
+++ b/arch/powerpc/platforms/chrp/setup.c
@@ -32,9 +32,11 @@
#include <linux/root_dev.h>
#include <linux/initrd.h>
#include <linux/timer.h>
+#include <linux/of_address.h>
+#include <linux/of_fdt.h>
+#include <linux/of_irq.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/pci-bridge.h>
#include <asm/dma.h>
#include <asm/machdep.h>
@@ -251,7 +253,7 @@ static void __noreturn briq_restart(char *cmd)
* Per default, input/output-device points to the keyboard/screen
* If no card is installed, the built-in serial port is used as a fallback.
* But unfortunately, the firmware does not connect /chosen/{stdin,stdout}
- * the the built-in serial node. Instead, a /failsafe node is created.
+ * to the built-in serial node. Instead, a /failsafe node is created.
*/
static __init void chrp_init(void)
{
diff --git a/arch/powerpc/platforms/chrp/smp.c b/arch/powerpc/platforms/chrp/smp.c
index e30cd2915e54..ab95155647a4 100644
--- a/arch/powerpc/platforms/chrp/smp.c
+++ b/arch/powerpc/platforms/chrp/smp.c
@@ -24,7 +24,6 @@
#include <asm/page.h>
#include <asm/sections.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/smp.h>
#include <asm/machdep.h>
#include <asm/mpic.h>
diff --git a/arch/powerpc/platforms/chrp/time.c b/arch/powerpc/platforms/chrp/time.c
index acde7bbe0716..d46417e3d8e0 100644
--- a/arch/powerpc/platforms/chrp/time.c
+++ b/arch/powerpc/platforms/chrp/time.c
@@ -21,17 +21,15 @@
#include <linux/init.h>
#include <linux/bcd.h>
#include <linux/ioport.h>
+#include <linux/of_address.h>
#include <asm/io.h>
#include <asm/nvram.h>
-#include <asm/prom.h>
#include <asm/sections.h>
#include <asm/time.h>
#include <platforms/chrp/chrp.h>
-extern spinlock_t rtc_lock;
-
#define NVRAM_AS0 0x74
#define NVRAM_AS1 0x75
#define NVRAM_DATA 0x77
diff --git a/arch/powerpc/platforms/embedded6xx/gamecube.c b/arch/powerpc/platforms/embedded6xx/gamecube.c
index ade928f7ea73..5c2575adcc7e 100644
--- a/arch/powerpc/platforms/embedded6xx/gamecube.c
+++ b/arch/powerpc/platforms/embedded6xx/gamecube.c
@@ -16,7 +16,6 @@
#include <asm/io.h>
#include <asm/machdep.h>
-#include <asm/prom.h>
#include <asm/time.h>
#include <asm/udbg.h>
diff --git a/arch/powerpc/platforms/embedded6xx/holly.c b/arch/powerpc/platforms/embedded6xx/holly.c
index 07e71ba3e846..78f2378d9223 100644
--- a/arch/powerpc/platforms/embedded6xx/holly.c
+++ b/arch/powerpc/platforms/embedded6xx/holly.c
@@ -22,12 +22,13 @@
#include <linux/serial.h>
#include <linux/tty.h>
#include <linux/serial_core.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/extable.h>
#include <asm/time.h>
#include <asm/machdep.h>
-#include <asm/prom.h>
#include <asm/udbg.h>
#include <asm/tsi108.h>
#include <asm/pci-bridge.h>
diff --git a/arch/powerpc/platforms/embedded6xx/linkstation.c b/arch/powerpc/platforms/embedded6xx/linkstation.c
index eb8342e7f84e..1830e1ac1f8f 100644
--- a/arch/powerpc/platforms/embedded6xx/linkstation.c
+++ b/arch/powerpc/platforms/embedded6xx/linkstation.c
@@ -15,7 +15,6 @@
#include <linux/of_platform.h>
#include <asm/time.h>
-#include <asm/prom.h>
#include <asm/mpic.h>
#include <asm/pci-bridge.h>
diff --git a/arch/powerpc/platforms/embedded6xx/ls_uart.c b/arch/powerpc/platforms/embedded6xx/ls_uart.c
index 9d891bd5df5a..0133e175a0fc 100644
--- a/arch/powerpc/platforms/embedded6xx/ls_uart.c
+++ b/arch/powerpc/platforms/embedded6xx/ls_uart.c
@@ -14,8 +14,8 @@
#include <linux/delay.h>
#include <linux/serial_reg.h>
#include <linux/serial_8250.h>
+#include <linux/of.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/termbits.h>
#include "mpc10x.h"
diff --git a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
index 9eb9abb5bce2..8b2b42210356 100644
--- a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
+++ b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
@@ -27,10 +27,10 @@
#include <linux/serial.h>
#include <linux/tty.h>
#include <linux/serial_core.h>
+#include <linux/of_irq.h>
#include <asm/time.h>
#include <asm/machdep.h>
-#include <asm/prom.h>
#include <asm/udbg.h>
#include <asm/tsi108.h>
#include <asm/pci-bridge.h>
diff --git a/arch/powerpc/platforms/embedded6xx/mvme5100.c b/arch/powerpc/platforms/embedded6xx/mvme5100.c
index c06a0490d157..4854cc592cec 100644
--- a/arch/powerpc/platforms/embedded6xx/mvme5100.c
+++ b/arch/powerpc/platforms/embedded6xx/mvme5100.c
@@ -12,12 +12,12 @@
* Author: Stephen Chivers <schivers@csc.com>
*/
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <asm/i8259.h>
#include <asm/pci-bridge.h>
#include <asm/mpic.h>
-#include <asm/prom.h>
#include <mm/mmu_decl.h>
#include <asm/udbg.h>
diff --git a/arch/powerpc/platforms/embedded6xx/storcenter.c b/arch/powerpc/platforms/embedded6xx/storcenter.c
index e188b90f7016..5f16e80b6ed6 100644
--- a/arch/powerpc/platforms/embedded6xx/storcenter.c
+++ b/arch/powerpc/platforms/embedded6xx/storcenter.c
@@ -17,7 +17,6 @@
#include <linux/of_platform.h>
#include <asm/time.h>
-#include <asm/prom.h>
#include <asm/mpic.h>
#include <asm/pci-bridge.h>
diff --git a/arch/powerpc/platforms/embedded6xx/usbgecko_udbg.c b/arch/powerpc/platforms/embedded6xx/usbgecko_udbg.c
index 5aea46566233..e02bdabf358c 100644
--- a/arch/powerpc/platforms/embedded6xx/usbgecko_udbg.c
+++ b/arch/powerpc/platforms/embedded6xx/usbgecko_udbg.c
@@ -7,10 +7,11 @@
* Copyright (C) 2008,2009 Albert Herranz
*/
+#include <linux/of_address.h>
+
#include <mm/mmu_decl.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/udbg.h>
#include <asm/fixmap.h>
diff --git a/arch/powerpc/platforms/embedded6xx/wii.c b/arch/powerpc/platforms/embedded6xx/wii.c
index f60ade584bb2..9e03ff8f631c 100644
--- a/arch/powerpc/platforms/embedded6xx/wii.c
+++ b/arch/powerpc/platforms/embedded6xx/wii.c
@@ -13,13 +13,13 @@
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/seq_file.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/memblock.h>
#include <mm/mmu_decl.h>
#include <asm/io.h>
#include <asm/machdep.h>
-#include <asm/prom.h>
#include <asm/time.h>
#include <asm/udbg.h>
diff --git a/arch/powerpc/platforms/fsl_uli1575.c b/arch/powerpc/platforms/fsl_uli1575.c
index 044a20c1fbde..84afae7a2561 100644
--- a/arch/powerpc/platforms/fsl_uli1575.c
+++ b/arch/powerpc/platforms/fsl_uli1575.c
@@ -10,6 +10,7 @@
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/mc146818rtc.h>
+#include <linux/of_irq.h>
#include <asm/pci-bridge.h>
diff --git a/arch/powerpc/platforms/maple/pci.c b/arch/powerpc/platforms/maple/pci.c
index 37875e478b3a..b911b31717cc 100644
--- a/arch/powerpc/platforms/maple/pci.c
+++ b/arch/powerpc/platforms/maple/pci.c
@@ -12,10 +12,10 @@
#include <linux/string.h>
#include <linux/init.h>
#include <linux/irq.h>
+#include <linux/of_irq.h>
#include <asm/sections.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/pci-bridge.h>
#include <asm/machdep.h>
#include <asm/iommu.h>
diff --git a/arch/powerpc/platforms/maple/setup.c b/arch/powerpc/platforms/maple/setup.c
index 4e9ad5bf3efb..c26c379e1cc8 100644
--- a/arch/powerpc/platforms/maple/setup.c
+++ b/arch/powerpc/platforms/maple/setup.c
@@ -36,12 +36,12 @@
#include <linux/serial.h>
#include <linux/smp.h>
#include <linux/bitops.h>
+#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/memblock.h>
#include <asm/processor.h>
#include <asm/sections.h>
-#include <asm/prom.h>
#include <asm/io.h>
#include <asm/pci-bridge.h>
#include <asm/iommu.h>
diff --git a/arch/powerpc/platforms/maple/time.c b/arch/powerpc/platforms/maple/time.c
index 78209bb7629c..823e219ef8ee 100644
--- a/arch/powerpc/platforms/maple/time.c
+++ b/arch/powerpc/platforms/maple/time.c
@@ -19,9 +19,9 @@
#include <linux/interrupt.h>
#include <linux/mc146818rtc.h>
#include <linux/bcd.h>
+#include <linux/of_address.h>
#include <asm/sections.h>
-#include <asm/prom.h>
#include <asm/io.h>
#include <asm/machdep.h>
#include <asm/time.h>
diff --git a/arch/powerpc/platforms/pasemi/dma_lib.c b/arch/powerpc/platforms/pasemi/dma_lib.c
index 26427311fc72..1be1f18f6f09 100644
--- a/arch/powerpc/platforms/pasemi/dma_lib.c
+++ b/arch/powerpc/platforms/pasemi/dma_lib.c
@@ -10,6 +10,8 @@
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/sched.h>
#include <asm/pasemi_dma.h>
diff --git a/arch/powerpc/platforms/pasemi/iommu.c b/arch/powerpc/platforms/pasemi/iommu.c
index 5be7242fbd86..0a38663d44ed 100644
--- a/arch/powerpc/platforms/pasemi/iommu.c
+++ b/arch/powerpc/platforms/pasemi/iommu.c
@@ -11,6 +11,7 @@
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/pci.h>
+#include <linux/of.h>
#include <asm/iommu.h>
#include <asm/machdep.h>
#include <asm/firmware.h>
diff --git a/arch/powerpc/platforms/pasemi/misc.c b/arch/powerpc/platforms/pasemi/misc.c
index 1bf65d02d3ba..f859ada29074 100644
--- a/arch/powerpc/platforms/pasemi/misc.c
+++ b/arch/powerpc/platforms/pasemi/misc.c
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/of.h>
+#include <linux/of_irq.h>
#include <linux/i2c.h>
#ifdef CONFIG_I2C_BOARDINFO
diff --git a/arch/powerpc/platforms/pasemi/msi.c b/arch/powerpc/platforms/pasemi/msi.c
index ea1e41451408..dc1846660005 100644
--- a/arch/powerpc/platforms/pasemi/msi.c
+++ b/arch/powerpc/platforms/pasemi/msi.c
@@ -9,9 +9,9 @@
*/
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/msi.h>
#include <asm/mpic.h>
-#include <asm/prom.h>
#include <asm/hw_irq.h>
#include <asm/ppc-pci.h>
#include <asm/msi_bitmap.h>
diff --git a/arch/powerpc/platforms/pasemi/pci.c b/arch/powerpc/platforms/pasemi/pci.c
index d4b922759d6e..55f0160910bf 100644
--- a/arch/powerpc/platforms/pasemi/pci.c
+++ b/arch/powerpc/platforms/pasemi/pci.c
@@ -12,6 +12,7 @@
#include <linux/kernel.h>
+#include <linux/of_address.h>
#include <linux/pci.h>
#include <asm/pci-bridge.h>
diff --git a/arch/powerpc/platforms/pasemi/setup.c b/arch/powerpc/platforms/pasemi/setup.c
index f974bfe7fde1..2aef49e04dd4 100644
--- a/arch/powerpc/platforms/pasemi/setup.c
+++ b/arch/powerpc/platforms/pasemi/setup.c
@@ -18,8 +18,8 @@
#include <linux/pci.h>
#include <linux/of_platform.h>
#include <linux/gfp.h>
+#include <linux/irqdomain.h>
-#include <asm/prom.h>
#include <asm/iommu.h>
#include <asm/machdep.h>
#include <asm/i8259.h>
diff --git a/arch/powerpc/platforms/powermac/backlight.c b/arch/powerpc/platforms/powermac/backlight.c
index 32224cb489d7..aeb79a8b3e10 100644
--- a/arch/powerpc/platforms/powermac/backlight.c
+++ b/arch/powerpc/platforms/powermac/backlight.c
@@ -15,7 +15,6 @@
#include <linux/pmu.h>
#include <linux/atomic.h>
#include <linux/export.h>
-#include <asm/prom.h>
#include <asm/backlight.h>
#define OLD_BACKLIGHT_MAX 15
diff --git a/arch/powerpc/platforms/powermac/bootx_init.c b/arch/powerpc/platforms/powermac/bootx_init.c
index d20ef35e6d9d..72eb99aba40f 100644
--- a/arch/powerpc/platforms/powermac/bootx_init.c
+++ b/arch/powerpc/platforms/powermac/bootx_init.c
@@ -8,6 +8,7 @@
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/init.h>
+#include <linux/of_fdt.h>
#include <generated/utsrelease.h>
#include <asm/sections.h>
#include <asm/prom.h>
@@ -243,7 +244,7 @@ static void __init bootx_scan_dt_build_strings(unsigned long base,
DBG(" detected display ! adding properties names !\n");
bootx_dt_add_string("linux,boot-display", mem_end);
bootx_dt_add_string("linux,opened", mem_end);
- strlcpy(bootx_disp_path, namep, sizeof(bootx_disp_path));
+ strscpy(bootx_disp_path, namep, sizeof(bootx_disp_path));
}
/* get and store all property names */
diff --git a/arch/powerpc/platforms/powermac/feature.c b/arch/powerpc/platforms/powermac/feature.c
index e67c624f35a2..5cc958adba13 100644
--- a/arch/powerpc/platforms/powermac/feature.c
+++ b/arch/powerpc/platforms/powermac/feature.c
@@ -31,7 +31,6 @@
#include <asm/keylargo.h>
#include <asm/uninorth.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/pmac_feature.h>
#include <asm/dbdma.h>
diff --git a/arch/powerpc/platforms/powermac/low_i2c.c b/arch/powerpc/platforms/powermac/low_i2c.c
index df89d916236d..c1c430c66dc9 100644
--- a/arch/powerpc/platforms/powermac/low_i2c.c
+++ b/arch/powerpc/platforms/powermac/low_i2c.c
@@ -40,10 +40,10 @@
#include <linux/mutex.h>
#include <linux/i2c.h>
#include <linux/slab.h>
+#include <linux/of_irq.h>
#include <asm/keylargo.h>
#include <asm/uninorth.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/smu.h>
#include <asm/pmac_pfunc.h>
@@ -1472,7 +1472,7 @@ int __init pmac_i2c_init(void)
smu_i2c_probe();
#endif
- /* Now add plaform functions for some known devices */
+ /* Now add platform functions for some known devices */
pmac_i2c_devscan(pmac_i2c_dev_create);
return 0;
diff --git a/arch/powerpc/platforms/powermac/nvram.c b/arch/powerpc/platforms/powermac/nvram.c
index de8fcb607290..fe2e0249cbc2 100644
--- a/arch/powerpc/platforms/powermac/nvram.c
+++ b/arch/powerpc/platforms/powermac/nvram.c
@@ -17,9 +17,9 @@
#include <linux/memblock.h>
#include <linux/completion.h>
#include <linux/spinlock.h>
+#include <linux/of_address.h>
#include <asm/sections.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/nvram.h>
@@ -71,7 +71,7 @@ struct core99_header {
static int nvram_naddrs;
static volatile unsigned char __iomem *nvram_data;
static int is_core_99;
-static int core99_bank = 0;
+static int core99_bank;
static int nvram_partitions[3];
// XXX Turn that into a sem
static DEFINE_RAW_SPINLOCK(nv_lock);
diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c
index e9abe0f2e7f0..d71359b5331c 100644
--- a/arch/powerpc/platforms/powermac/pci.c
+++ b/arch/powerpc/platforms/powermac/pci.c
@@ -12,11 +12,12 @@
#include <linux/string.h>
#include <linux/init.h>
#include <linux/irq.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_pci.h>
#include <asm/sections.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/pci-bridge.h>
#include <asm/machdep.h>
#include <asm/pmac_feature.h>
diff --git a/arch/powerpc/platforms/powermac/pfunc_core.c b/arch/powerpc/platforms/powermac/pfunc_core.c
index 94df0a91b46f..22741ddfd5b2 100644
--- a/arch/powerpc/platforms/powermac/pfunc_core.c
+++ b/arch/powerpc/platforms/powermac/pfunc_core.c
@@ -12,8 +12,8 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/of.h>
-#include <asm/prom.h>
#include <asm/pmac_pfunc.h>
/* Debug */
@@ -685,7 +685,7 @@ static int pmf_add_functions(struct pmf_device *dev, void *driverdata)
const int plen = strlen(PP_PREFIX);
int count = 0;
- for (pp = dev->node->properties; pp != 0; pp = pp->next) {
+ for_each_property_of_node(dev->node, pp) {
const char *name;
if (strncmp(pp->name, PP_PREFIX, plen) != 0)
continue;
diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c
index bb0566633af5..8c8d8e0a7d13 100644
--- a/arch/powerpc/platforms/powermac/pic.c
+++ b/arch/powerpc/platforms/powermac/pic.c
@@ -20,11 +20,13 @@
#include <linux/adb.h>
#include <linux/minmax.h>
#include <linux/pmu.h>
+#include <linux/irqdomain.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <asm/sections.h>
#include <asm/io.h>
#include <asm/smp.h>
-#include <asm/prom.h>
#include <asm/pci-bridge.h>
#include <asm/time.h>
#include <asm/pmac_feature.h>
@@ -382,7 +384,7 @@ static void __init pmac_pic_probe_oldstyle(void)
#endif
}
-int of_irq_parse_oldworld(struct device_node *device, int index,
+int of_irq_parse_oldworld(const struct device_node *device, int index,
struct of_phandle_args *out_irq)
{
const u32 *ints = NULL;
diff --git a/arch/powerpc/platforms/powermac/pmac.h b/arch/powerpc/platforms/powermac/pmac.h
index ba8d4e97095b..1b696f352640 100644
--- a/arch/powerpc/platforms/powermac/pmac.h
+++ b/arch/powerpc/platforms/powermac/pmac.h
@@ -16,6 +16,8 @@ struct rtc_time;
extern int pmac_newworld;
+void g5_phy_disable_cpu1(void);
+
extern long pmac_time_init(void);
extern time64_t pmac_get_boot_time(void);
extern void pmac_get_rtc_time(struct rtc_time *);
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c
index 974d4b49867b..f71735ec449f 100644
--- a/arch/powerpc/platforms/powermac/setup.c
+++ b/arch/powerpc/platforms/powermac/setup.c
@@ -50,7 +50,6 @@
#include <asm/reg.h>
#include <asm/sections.h>
-#include <asm/prom.h>
#include <asm/io.h>
#include <asm/pci-bridge.h>
#include <asm/ohare.h>
@@ -81,10 +80,6 @@ static int current_root_goodness = -1;
#define DEFAULT_ROOT_DEVICE Root_SDA1 /* sda1 - slightly silly choice */
-#ifdef CONFIG_PPC64
-int sccdbg;
-#endif
-
sys_ctrler_t sys_ctrler = SYS_CTRLER_UNKNOWN;
EXPORT_SYMBOL(sys_ctrler);
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index da1efdc30d6c..d9df45741ece 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -22,6 +22,7 @@
#include <linux/sched/hotplug.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
#include <linux/kernel_stat.h>
#include <linux/delay.h>
#include <linux/init.h>
@@ -39,7 +40,6 @@
#include <asm/page.h>
#include <asm/sections.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/smp.h>
#include <asm/machdep.h>
#include <asm/pmac_feature.h>
@@ -875,8 +875,6 @@ static int smp_core99_cpu_online(unsigned int cpu)
static void __init smp_core99_bringup_done(void)
{
- extern void __init g5_phy_disable_cpu1(void);
-
/* Close i2c bus if it was used for tb sync */
if (pmac_tb_clock_chip_host)
pmac_i2c_close(pmac_tb_clock_chip_host);
diff --git a/arch/powerpc/platforms/powermac/time.c b/arch/powerpc/platforms/powermac/time.c
index 31d6213a6c8f..4c5790aff1b5 100644
--- a/arch/powerpc/platforms/powermac/time.c
+++ b/arch/powerpc/platforms/powermac/time.c
@@ -24,9 +24,9 @@
#include <linux/interrupt.h>
#include <linux/hardirq.h>
#include <linux/rtc.h>
+#include <linux/of_address.h>
#include <asm/sections.h>
-#include <asm/prom.h>
#include <asm/io.h>
#include <asm/machdep.h>
#include <asm/time.h>
diff --git a/arch/powerpc/platforms/powermac/udbg_adb.c b/arch/powerpc/platforms/powermac/udbg_adb.c
index 12158bb4fed7..b4756defd596 100644
--- a/arch/powerpc/platforms/powermac/udbg_adb.c
+++ b/arch/powerpc/platforms/powermac/udbg_adb.c
@@ -7,11 +7,11 @@
#include <linux/adb.h>
#include <linux/pmu.h>
#include <linux/cuda.h>
+#include <linux/of.h>
#include <asm/machdep.h>
#include <asm/io.h>
#include <asm/page.h>
#include <asm/xmon.h>
-#include <asm/prom.h>
#include <asm/bootx.h>
#include <asm/errno.h>
#include <asm/pmac_feature.h>
diff --git a/arch/powerpc/platforms/powermac/udbg_scc.c b/arch/powerpc/platforms/powermac/udbg_scc.c
index 965827ac2e9c..734df5a32f99 100644
--- a/arch/powerpc/platforms/powermac/udbg_scc.c
+++ b/arch/powerpc/platforms/powermac/udbg_scc.c
@@ -5,10 +5,10 @@
* Copyright (C) 2001-2005 PPC 64 Team, IBM Corp
*/
#include <linux/types.h>
+#include <linux/of.h>
#include <asm/udbg.h>
#include <asm/processor.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/pmac_feature.h>
extern u8 real_readb(volatile u8 __iomem *addr);
diff --git a/arch/powerpc/platforms/powernv/Makefile b/arch/powerpc/platforms/powernv/Makefile
index dc7b37c23b60..6488b3842199 100644
--- a/arch/powerpc/platforms/powernv/Makefile
+++ b/arch/powerpc/platforms/powernv/Makefile
@@ -1,4 +1,12 @@
# SPDX-License-Identifier: GPL-2.0
+
+# nothing that deals with real mode is safe to KASAN
+# in particular, idle code runs a bunch of things in real mode
+KASAN_SANITIZE_idle.o := n
+KASAN_SANITIZE_pci-ioda.o := n
+# pnv_machine_check_early
+KASAN_SANITIZE_setup.o := n
+
obj-y += setup.o opal-call.o opal-wrappers.o opal.o opal-async.o
obj-y += idle.o opal-rtc.o opal-nvram.o opal-lpc.o opal-flash.o
obj-y += rng.o opal-elog.o opal-dump.o opal-sysparam.o opal-sensor.o
diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c
index 89e22c460ebf..a83cb679dd59 100644
--- a/arch/powerpc/platforms/powernv/eeh-powernv.c
+++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
@@ -11,6 +11,7 @@
#include <linux/export.h>
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
#include <linux/list.h>
#include <linux/msi.h>
#include <linux/of.h>
@@ -390,7 +391,7 @@ static struct eeh_dev *pnv_eeh_probe(struct pci_dev *pdev)
* should be blocked until PE reset. MMIO access is dropped
* by hardware certainly. In order to drop PCI config requests,
* one more flag (EEH_PE_CFG_RESTRICTED) is introduced, which
- * will be checked in the backend for PE state retrival. If
+ * will be checked in the backend for PE state retrieval. If
* the PE becomes frozen for the first time and the flag has
* been set for the PE, we will set EEH_PE_CFG_BLOCKED for
* that PE to block its config space.
@@ -981,7 +982,7 @@ static int pnv_eeh_do_af_flr(struct pci_dn *pdn, int option)
case EEH_RESET_FUNDAMENTAL:
/*
* Wait for Transaction Pending bit to clear. A word-aligned
- * test is used, so we use the conrol offset rather than status
+ * test is used, so we use the control offset rather than status
* and shift the test bit to match.
*/
pnv_eeh_wait_for_pending(pdn, "AF",
@@ -1048,7 +1049,7 @@ static int pnv_eeh_reset(struct eeh_pe *pe, int option)
* frozen state during PE reset. However, the good idea here from
* benh is to keep frozen state before we get PE reset done completely
* (until BAR restore). With the frozen state, HW drops illegal IO
- * or MMIO access, which can incur recrusive frozen PE during PE
+ * or MMIO access, which can incur recursive frozen PE during PE
* reset. The side effect is that EEH core has to clear the frozen
* state explicitly after BAR restore.
*/
@@ -1095,8 +1096,8 @@ static int pnv_eeh_reset(struct eeh_pe *pe, int option)
* bus is behind a hotplug slot and it will use the slot provided
* reset methods to prevent spurious hotplug events during the reset.
*
- * Fundemental resets need to be handled internally to EEH since the
- * PCI core doesn't really have a concept of a fundemental reset,
+ * Fundamental resets need to be handled internally to EEH since the
+ * PCI core doesn't really have a concept of a fundamental reset,
* mainly because there's no standard way to generate one. Only a
* few devices require an FRESET so it should be fine.
*/
@@ -1640,24 +1641,6 @@ static struct eeh_ops pnv_eeh_ops = {
.notify_resume = NULL
};
-#ifdef CONFIG_PCI_IOV
-static void pnv_pci_fixup_vf_mps(struct pci_dev *pdev)
-{
- struct pci_dn *pdn = pci_get_pdn(pdev);
- int parent_mps;
-
- if (!pdev->is_virtfn)
- return;
-
- /* Synchronize MPS for VF and PF */
- parent_mps = pcie_get_mps(pdev->physfn);
- if ((128 << pdev->pcie_mpss) >= parent_mps)
- pcie_set_mps(pdev, parent_mps);
- pdn->mps = pcie_get_mps(pdev);
-}
-DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pnv_pci_fixup_vf_mps);
-#endif /* CONFIG_PCI_IOV */
-
/**
* eeh_powernv_init - Register platform dependent EEH operations
*
diff --git a/arch/powerpc/platforms/powernv/idle.c b/arch/powerpc/platforms/powernv/idle.c
index a6677a111aca..6f94b808dd39 100644
--- a/arch/powerpc/platforms/powernv/idle.c
+++ b/arch/powerpc/platforms/powernv/idle.c
@@ -112,7 +112,7 @@ static int __init pnv_save_sprs_for_deep_states(void)
if (rc != 0)
return rc;
- /* Only p8 needs to set extra HID regiters */
+ /* Only p8 needs to set extra HID registers */
if (!cpu_has_feature(CPU_FTR_ARCH_300)) {
uint64_t hid1_val = mfspr(SPRN_HID1);
uint64_t hid4_val = mfspr(SPRN_HID4);
@@ -1204,7 +1204,7 @@ static void __init pnv_arch300_idle_init(void)
* The idle code does not deal with TB loss occurring
* in a shallower state than SPR loss, so force it to
* behave like SPRs are lost if TB is lost. POWER9 would
- * never encouter this, but a POWER8 core would if it
+ * never encounter this, but a POWER8 core would if it
* implemented the stop instruction. So this is for forward
* compatibility.
*/
diff --git a/arch/powerpc/platforms/powernv/ocxl.c b/arch/powerpc/platforms/powernv/ocxl.c
index 28b009b46464..27c936075031 100644
--- a/arch/powerpc/platforms/powernv/ocxl.c
+++ b/arch/powerpc/platforms/powernv/ocxl.c
@@ -289,7 +289,7 @@ int pnv_ocxl_get_pasid_count(struct pci_dev *dev, int *count)
* be used by a function depends on how many functions exist
* on the device. The NPU needs to be configured to know how
* many bits are available to PASIDs and how many are to be
- * used by the function BDF indentifier.
+ * used by the function BDF identifier.
*
* We only support one AFU-carrying function for now.
*/
diff --git a/arch/powerpc/platforms/powernv/opal-fadump.c b/arch/powerpc/platforms/powernv/opal-fadump.c
index c8ad057c7221..964f464b1b0e 100644
--- a/arch/powerpc/platforms/powernv/opal-fadump.c
+++ b/arch/powerpc/platforms/powernv/opal-fadump.c
@@ -60,7 +60,7 @@ void __init opal_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node)
addr = be64_to_cpu(addr);
pr_debug("Kernel metadata addr: %llx\n", addr);
opal_fdm_active = (void *)addr;
- if (opal_fdm_active->registered_regions == 0)
+ if (be16_to_cpu(opal_fdm_active->registered_regions) == 0)
return;
ret = opal_mpipl_query_tag(OPAL_MPIPL_TAG_BOOT_MEM, &addr);
@@ -95,17 +95,17 @@ static int opal_fadump_unregister(struct fw_dump *fadump_conf);
static void opal_fadump_update_config(struct fw_dump *fadump_conf,
const struct opal_fadump_mem_struct *fdm)
{
- pr_debug("Boot memory regions count: %d\n", fdm->region_cnt);
+ pr_debug("Boot memory regions count: %d\n", be16_to_cpu(fdm->region_cnt));
/*
* The destination address of the first boot memory region is the
* destination address of boot memory regions.
*/
- fadump_conf->boot_mem_dest_addr = fdm->rgn[0].dest;
+ fadump_conf->boot_mem_dest_addr = be64_to_cpu(fdm->rgn[0].dest);
pr_debug("Destination address of boot memory regions: %#016llx\n",
fadump_conf->boot_mem_dest_addr);
- fadump_conf->fadumphdr_addr = fdm->fadumphdr_addr;
+ fadump_conf->fadumphdr_addr = be64_to_cpu(fdm->fadumphdr_addr);
}
/*
@@ -126,9 +126,9 @@ static void __init opal_fadump_get_config(struct fw_dump *fadump_conf,
fadump_conf->boot_memory_size = 0;
pr_debug("Boot memory regions:\n");
- for (i = 0; i < fdm->region_cnt; i++) {
- base = fdm->rgn[i].src;
- size = fdm->rgn[i].size;
+ for (i = 0; i < be16_to_cpu(fdm->region_cnt); i++) {
+ base = be64_to_cpu(fdm->rgn[i].src);
+ size = be64_to_cpu(fdm->rgn[i].size);
pr_debug("\t[%03d] base: 0x%lx, size: 0x%lx\n", i, base, size);
fadump_conf->boot_mem_addr[i] = base;
@@ -143,7 +143,7 @@ static void __init opal_fadump_get_config(struct fw_dump *fadump_conf,
* Start address of reserve dump area (permanent reservation) for
* re-registering FADump after dump capture.
*/
- fadump_conf->reserve_dump_area_start = fdm->rgn[0].dest;
+ fadump_conf->reserve_dump_area_start = be64_to_cpu(fdm->rgn[0].dest);
/*
* Rarely, but it can so happen that system crashes before all
@@ -155,13 +155,14 @@ static void __init opal_fadump_get_config(struct fw_dump *fadump_conf,
* Hope the memory that could not be preserved only has pages
* that are usually filtered out while saving the vmcore.
*/
- if (fdm->region_cnt > fdm->registered_regions) {
+ if (be16_to_cpu(fdm->region_cnt) > be16_to_cpu(fdm->registered_regions)) {
pr_warn("Not all memory regions were saved!!!\n");
pr_warn(" Unsaved memory regions:\n");
- i = fdm->registered_regions;
- while (i < fdm->region_cnt) {
+ i = be16_to_cpu(fdm->registered_regions);
+ while (i < be16_to_cpu(fdm->region_cnt)) {
pr_warn("\t[%03d] base: 0x%llx, size: 0x%llx\n",
- i, fdm->rgn[i].src, fdm->rgn[i].size);
+ i, be64_to_cpu(fdm->rgn[i].src),
+ be64_to_cpu(fdm->rgn[i].size));
i++;
}
@@ -170,7 +171,7 @@ static void __init opal_fadump_get_config(struct fw_dump *fadump_conf,
}
fadump_conf->boot_mem_top = (fadump_conf->boot_memory_size + hole_size);
- fadump_conf->boot_mem_regs_cnt = fdm->region_cnt;
+ fadump_conf->boot_mem_regs_cnt = be16_to_cpu(fdm->region_cnt);
opal_fadump_update_config(fadump_conf, fdm);
}
@@ -178,35 +179,38 @@ static void __init opal_fadump_get_config(struct fw_dump *fadump_conf,
static void opal_fadump_init_metadata(struct opal_fadump_mem_struct *fdm)
{
fdm->version = OPAL_FADUMP_VERSION;
- fdm->region_cnt = 0;
- fdm->registered_regions = 0;
- fdm->fadumphdr_addr = 0;
+ fdm->region_cnt = cpu_to_be16(0);
+ fdm->registered_regions = cpu_to_be16(0);
+ fdm->fadumphdr_addr = cpu_to_be64(0);
}
static u64 opal_fadump_init_mem_struct(struct fw_dump *fadump_conf)
{
u64 addr = fadump_conf->reserve_dump_area_start;
+ u16 reg_cnt;
int i;
opal_fdm = __va(fadump_conf->kernel_metadata);
opal_fadump_init_metadata(opal_fdm);
/* Boot memory regions */
+ reg_cnt = be16_to_cpu(opal_fdm->region_cnt);
for (i = 0; i < fadump_conf->boot_mem_regs_cnt; i++) {
- opal_fdm->rgn[i].src = fadump_conf->boot_mem_addr[i];
- opal_fdm->rgn[i].dest = addr;
- opal_fdm->rgn[i].size = fadump_conf->boot_mem_sz[i];
+ opal_fdm->rgn[i].src = cpu_to_be64(fadump_conf->boot_mem_addr[i]);
+ opal_fdm->rgn[i].dest = cpu_to_be64(addr);
+ opal_fdm->rgn[i].size = cpu_to_be64(fadump_conf->boot_mem_sz[i]);
- opal_fdm->region_cnt++;
+ reg_cnt++;
addr += fadump_conf->boot_mem_sz[i];
}
+ opal_fdm->region_cnt = cpu_to_be16(reg_cnt);
/*
- * Kernel metadata is passed to f/w and retrieved in capture kerenl.
+ * Kernel metadata is passed to f/w and retrieved in capture kernel.
* So, use it to save fadump header address instead of calculating it.
*/
- opal_fdm->fadumphdr_addr = (opal_fdm->rgn[0].dest +
- fadump_conf->boot_memory_size);
+ opal_fdm->fadumphdr_addr = cpu_to_be64(be64_to_cpu(opal_fdm->rgn[0].dest) +
+ fadump_conf->boot_memory_size);
opal_fadump_update_config(fadump_conf, opal_fdm);
@@ -269,18 +273,21 @@ static u64 opal_fadump_get_bootmem_min(void)
static int opal_fadump_register(struct fw_dump *fadump_conf)
{
s64 rc = OPAL_PARAMETER;
+ u16 registered_regs;
int i, err = -EIO;
- for (i = 0; i < opal_fdm->region_cnt; i++) {
+ registered_regs = be16_to_cpu(opal_fdm->registered_regions);
+ for (i = 0; i < be16_to_cpu(opal_fdm->region_cnt); i++) {
rc = opal_mpipl_update(OPAL_MPIPL_ADD_RANGE,
- opal_fdm->rgn[i].src,
- opal_fdm->rgn[i].dest,
- opal_fdm->rgn[i].size);
+ be64_to_cpu(opal_fdm->rgn[i].src),
+ be64_to_cpu(opal_fdm->rgn[i].dest),
+ be64_to_cpu(opal_fdm->rgn[i].size));
if (rc != OPAL_SUCCESS)
break;
- opal_fdm->registered_regions++;
+ registered_regs++;
}
+ opal_fdm->registered_regions = cpu_to_be16(registered_regs);
switch (rc) {
case OPAL_SUCCESS:
@@ -291,7 +298,8 @@ static int opal_fadump_register(struct fw_dump *fadump_conf)
case OPAL_RESOURCE:
/* If MAX regions limit in f/w is hit, warn and proceed. */
pr_warn("%d regions could not be registered for MPIPL as MAX limit is reached!\n",
- (opal_fdm->region_cnt - opal_fdm->registered_regions));
+ (be16_to_cpu(opal_fdm->region_cnt) -
+ be16_to_cpu(opal_fdm->registered_regions)));
fadump_conf->dump_registered = 1;
err = 0;
break;
@@ -312,7 +320,7 @@ static int opal_fadump_register(struct fw_dump *fadump_conf)
* If some regions were registered before OPAL_MPIPL_ADD_RANGE
* OPAL call failed, unregister all regions.
*/
- if ((err < 0) && (opal_fdm->registered_regions > 0))
+ if ((err < 0) && (be16_to_cpu(opal_fdm->registered_regions) > 0))
opal_fadump_unregister(fadump_conf);
return err;
@@ -328,7 +336,7 @@ static int opal_fadump_unregister(struct fw_dump *fadump_conf)
return -EIO;
}
- opal_fdm->registered_regions = 0;
+ opal_fdm->registered_regions = cpu_to_be16(0);
fadump_conf->dump_registered = 0;
return 0;
}
@@ -563,25 +571,26 @@ static void opal_fadump_region_show(struct fw_dump *fadump_conf,
else
fdm_ptr = opal_fdm;
- for (i = 0; i < fdm_ptr->region_cnt; i++) {
+ for (i = 0; i < be16_to_cpu(fdm_ptr->region_cnt); i++) {
/*
* Only regions that are registered for MPIPL
* would have dump data.
*/
if ((fadump_conf->dump_active) &&
- (i < fdm_ptr->registered_regions))
- dumped_bytes = fdm_ptr->rgn[i].size;
+ (i < be16_to_cpu(fdm_ptr->registered_regions)))
+ dumped_bytes = be64_to_cpu(fdm_ptr->rgn[i].size);
seq_printf(m, "DUMP: Src: %#016llx, Dest: %#016llx, ",
- fdm_ptr->rgn[i].src, fdm_ptr->rgn[i].dest);
+ be64_to_cpu(fdm_ptr->rgn[i].src),
+ be64_to_cpu(fdm_ptr->rgn[i].dest));
seq_printf(m, "Size: %#llx, Dumped: %#llx bytes\n",
- fdm_ptr->rgn[i].size, dumped_bytes);
+ be64_to_cpu(fdm_ptr->rgn[i].size), dumped_bytes);
}
- /* Dump is active. Show reserved area start address. */
+ /* Dump is active. Show preserved area start address. */
if (fadump_conf->dump_active) {
- seq_printf(m, "\nMemory above %#016lx is reserved for saving crash dump\n",
- fadump_conf->reserve_dump_area_start);
+ seq_printf(m, "\nMemory above %#016llx is reserved for saving crash dump\n",
+ fadump_conf->boot_mem_top);
}
}
@@ -624,6 +633,7 @@ void __init opal_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node)
{
const __be32 *prop;
unsigned long dn;
+ __be64 be_addr;
u64 addr = 0;
int i, len;
s64 ret;
@@ -680,13 +690,13 @@ void __init opal_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node)
if (!prop)
return;
- ret = opal_mpipl_query_tag(OPAL_MPIPL_TAG_KERNEL, &addr);
- if ((ret != OPAL_SUCCESS) || !addr) {
+ ret = opal_mpipl_query_tag(OPAL_MPIPL_TAG_KERNEL, &be_addr);
+ if ((ret != OPAL_SUCCESS) || !be_addr) {
pr_err("Failed to get Kernel metadata (%lld)\n", ret);
return;
}
- addr = be64_to_cpu(addr);
+ addr = be64_to_cpu(be_addr);
pr_debug("Kernel metadata addr: %llx\n", addr);
opal_fdm_active = __va(addr);
@@ -697,14 +707,14 @@ void __init opal_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node)
}
/* Kernel regions not registered with f/w for MPIPL */
- if (opal_fdm_active->registered_regions == 0) {
+ if (be16_to_cpu(opal_fdm_active->registered_regions) == 0) {
opal_fdm_active = NULL;
return;
}
- ret = opal_mpipl_query_tag(OPAL_MPIPL_TAG_CPU, &addr);
- if (addr) {
- addr = be64_to_cpu(addr);
+ ret = opal_mpipl_query_tag(OPAL_MPIPL_TAG_CPU, &be_addr);
+ if (be_addr) {
+ addr = be64_to_cpu(be_addr);
pr_debug("CPU metadata addr: %llx\n", addr);
opal_cpu_metadata = __va(addr);
}
diff --git a/arch/powerpc/platforms/powernv/opal-fadump.h b/arch/powerpc/platforms/powernv/opal-fadump.h
index f1e9ecf548c5..3f715efb0aa6 100644
--- a/arch/powerpc/platforms/powernv/opal-fadump.h
+++ b/arch/powerpc/platforms/powernv/opal-fadump.h
@@ -31,14 +31,14 @@
* OPAL FADump kernel metadata
*
* The address of this structure will be registered with f/w for retrieving
- * and processing during crash dump.
+ * in the capture kernel to process the crash dump.
*/
struct opal_fadump_mem_struct {
u8 version;
u8 reserved[3];
- u16 region_cnt; /* number of regions */
- u16 registered_regions; /* Regions registered for MPIPL */
- u64 fadumphdr_addr;
+ __be16 region_cnt; /* number of regions */
+ __be16 registered_regions; /* Regions registered for MPIPL */
+ __be64 fadumphdr_addr;
struct opal_mpipl_region rgn[FADUMP_MAX_MEM_REGS];
} __packed;
@@ -135,7 +135,7 @@ static inline void opal_fadump_read_regs(char *bufp, unsigned int regs_cnt,
for (i = 0; i < regs_cnt; i++, bufp += reg_entry_size) {
reg_entry = (struct hdat_fadump_reg_entry *)bufp;
val = (cpu_endian ? be64_to_cpu(reg_entry->reg_val) :
- reg_entry->reg_val);
+ (u64)(reg_entry->reg_val));
opal_fadump_set_regval_regnum(regs,
be32_to_cpu(reg_entry->reg_type),
be32_to_cpu(reg_entry->reg_num),
diff --git a/arch/powerpc/platforms/powernv/opal-flash.c b/arch/powerpc/platforms/powernv/opal-flash.c
index 18481a8c52fa..d5ea04e8e4c5 100644
--- a/arch/powerpc/platforms/powernv/opal-flash.c
+++ b/arch/powerpc/platforms/powernv/opal-flash.c
@@ -520,6 +520,10 @@ void __init opal_flash_update_init(void)
{
int ret;
+ /* Firmware update is not supported by firmware */
+ if (!opal_check_token(OPAL_FLASH_VALIDATE))
+ return;
+
/* Allocate validate image buffer */
validate_flash_data.buf = kzalloc(VALIDATE_BUF_SIZE, GFP_KERNEL);
if (!validate_flash_data.buf) {
diff --git a/arch/powerpc/platforms/powernv/opal-imc.c b/arch/powerpc/platforms/powernv/opal-imc.c
index 3fea5da6d1b3..348a8cdaecd6 100644
--- a/arch/powerpc/platforms/powernv/opal-imc.c
+++ b/arch/powerpc/platforms/powernv/opal-imc.c
@@ -211,7 +211,7 @@ static void disable_core_pmu_counters(void)
get_hard_smp_processor_id(cpu));
if (rc)
pr_err("%s: Failed to stop Core (cpu = %d)\n",
- __FUNCTION__, cpu);
+ __func__, cpu);
}
cpus_read_unlock();
}
diff --git a/arch/powerpc/platforms/powernv/opal-lpc.c b/arch/powerpc/platforms/powernv/opal-lpc.c
index 5390c888db16..d129d6d45a50 100644
--- a/arch/powerpc/platforms/powernv/opal-lpc.c
+++ b/arch/powerpc/platforms/powernv/opal-lpc.c
@@ -197,7 +197,7 @@ static ssize_t lpc_debug_read(struct file *filp, char __user *ubuf,
/*
* Select access size based on count and alignment and
- * access type. IO and MEM only support byte acceses,
+ * access type. IO and MEM only support byte accesses,
* FW supports all 3.
*/
len = 1;
diff --git a/arch/powerpc/platforms/powernv/opal-memory-errors.c b/arch/powerpc/platforms/powernv/opal-memory-errors.c
index 1e8e17df9ce8..a1754a28265d 100644
--- a/arch/powerpc/platforms/powernv/opal-memory-errors.c
+++ b/arch/powerpc/platforms/powernv/opal-memory-errors.c
@@ -82,7 +82,7 @@ static DECLARE_WORK(mem_error_work, mem_error_handler);
/*
* opal_memory_err_event - notifier handler that queues up the opal message
- * to be preocessed later.
+ * to be processed later.
*/
static int opal_memory_err_event(struct notifier_block *nb,
unsigned long msg_type, void *msg)
diff --git a/arch/powerpc/platforms/powernv/pci-cxl.c b/arch/powerpc/platforms/powernv/pci-cxl.c
index 53172862d23b..7e419de71db8 100644
--- a/arch/powerpc/platforms/powernv/pci-cxl.c
+++ b/arch/powerpc/platforms/powernv/pci-cxl.c
@@ -4,6 +4,7 @@
*/
#include <linux/module.h>
+#include <misc/cxl-base.h>
#include <asm/pnv-pci.h>
#include <asm/opal.h>
diff --git a/arch/powerpc/platforms/powernv/pci-ioda-tce.c b/arch/powerpc/platforms/powernv/pci-ioda-tce.c
index 30551bbd7988..e96324502db0 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda-tce.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda-tce.c
@@ -145,8 +145,7 @@ int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
#ifdef CONFIG_IOMMU_API
int pnv_tce_xchg(struct iommu_table *tbl, long index,
- unsigned long *hpa, enum dma_data_direction *direction,
- bool alloc)
+ unsigned long *hpa, enum dma_data_direction *direction)
{
u64 proto_tce = iommu_direction_to_tce_perm(*direction);
unsigned long newtce = *hpa | proto_tce, oldtce;
@@ -164,7 +163,7 @@ int pnv_tce_xchg(struct iommu_table *tbl, long index,
}
if (!ptce) {
- ptce = pnv_tce(tbl, false, idx, alloc);
+ ptce = pnv_tce(tbl, false, idx, true);
if (!ptce)
return -ENOMEM;
}
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index b722ac902269..c8cf2728031a 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -21,10 +21,11 @@
#include <linux/rculist.h>
#include <linux/sizes.h>
#include <linux/debugfs.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <asm/sections.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/pci-bridge.h>
#include <asm/machdep.h>
#include <asm/msi_bitmap.h>
@@ -1267,22 +1268,20 @@ static bool pnv_pci_ioda_iommu_bypass_supported(struct pci_dev *pdev,
return false;
}
-static inline __be64 __iomem *pnv_ioda_get_inval_reg(struct pnv_phb *phb,
- bool real_mode)
+static inline __be64 __iomem *pnv_ioda_get_inval_reg(struct pnv_phb *phb)
{
- return real_mode ? (__be64 __iomem *)(phb->regs_phys + 0x210) :
- (phb->regs + 0x210);
+ return phb->regs + 0x210;
}
static void pnv_pci_p7ioc_tce_invalidate(struct iommu_table *tbl,
- unsigned long index, unsigned long npages, bool rm)
+ unsigned long index, unsigned long npages)
{
struct iommu_table_group_link *tgl = list_first_entry_or_null(
&tbl->it_group_list, struct iommu_table_group_link,
next);
struct pnv_ioda_pe *pe = container_of(tgl->table_group,
struct pnv_ioda_pe, table_group);
- __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb, rm);
+ __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb);
unsigned long start, end, inc;
start = __pa(((__be64 *)tbl->it_base) + index - tbl->it_offset);
@@ -1297,11 +1296,7 @@ static void pnv_pci_p7ioc_tce_invalidate(struct iommu_table *tbl,
mb(); /* Ensure above stores are visible */
while (start <= end) {
- if (rm)
- __raw_rm_writeq_be(start, invalidate);
- else
- __raw_writeq_be(start, invalidate);
-
+ __raw_writeq_be(start, invalidate);
start += inc;
}
@@ -1320,7 +1315,7 @@ static int pnv_ioda1_tce_build(struct iommu_table *tbl, long index,
attrs);
if (!ret)
- pnv_pci_p7ioc_tce_invalidate(tbl, index, npages, false);
+ pnv_pci_p7ioc_tce_invalidate(tbl, index, npages);
return ret;
}
@@ -1328,10 +1323,9 @@ static int pnv_ioda1_tce_build(struct iommu_table *tbl, long index,
#ifdef CONFIG_IOMMU_API
/* Common for IODA1 and IODA2 */
static int pnv_ioda_tce_xchg_no_kill(struct iommu_table *tbl, long index,
- unsigned long *hpa, enum dma_data_direction *direction,
- bool realmode)
+ unsigned long *hpa, enum dma_data_direction *direction)
{
- return pnv_tce_xchg(tbl, index, hpa, direction, !realmode);
+ return pnv_tce_xchg(tbl, index, hpa, direction);
}
#endif
@@ -1340,7 +1334,7 @@ static void pnv_ioda1_tce_free(struct iommu_table *tbl, long index,
{
pnv_tce_free(tbl, index, npages);
- pnv_pci_p7ioc_tce_invalidate(tbl, index, npages, false);
+ pnv_pci_p7ioc_tce_invalidate(tbl, index, npages);
}
static struct iommu_table_ops pnv_ioda1_iommu_ops = {
@@ -1361,18 +1355,18 @@ static struct iommu_table_ops pnv_ioda1_iommu_ops = {
static inline void pnv_pci_phb3_tce_invalidate_pe(struct pnv_ioda_pe *pe)
{
/* 01xb - invalidate TCEs that match the specified PE# */
- __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb, false);
+ __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb);
unsigned long val = PHB3_TCE_KILL_INVAL_PE | (pe->pe_number & 0xFF);
mb(); /* Ensure above stores are visible */
__raw_writeq_be(val, invalidate);
}
-static void pnv_pci_phb3_tce_invalidate(struct pnv_ioda_pe *pe, bool rm,
+static void pnv_pci_phb3_tce_invalidate(struct pnv_ioda_pe *pe,
unsigned shift, unsigned long index,
unsigned long npages)
{
- __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb, rm);
+ __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb);
unsigned long start, end, inc;
/* We'll invalidate DMA address in PE scope */
@@ -1387,10 +1381,7 @@ static void pnv_pci_phb3_tce_invalidate(struct pnv_ioda_pe *pe, bool rm,
mb();
while (start <= end) {
- if (rm)
- __raw_rm_writeq_be(start, invalidate);
- else
- __raw_writeq_be(start, invalidate);
+ __raw_writeq_be(start, invalidate);
start += inc;
}
}
@@ -1407,7 +1398,7 @@ static inline void pnv_pci_ioda2_tce_invalidate_pe(struct pnv_ioda_pe *pe)
}
static void pnv_pci_ioda2_tce_invalidate(struct iommu_table *tbl,
- unsigned long index, unsigned long npages, bool rm)
+ unsigned long index, unsigned long npages)
{
struct iommu_table_group_link *tgl;
@@ -1418,7 +1409,7 @@ static void pnv_pci_ioda2_tce_invalidate(struct iommu_table *tbl,
unsigned int shift = tbl->it_page_shift;
if (phb->model == PNV_PHB_MODEL_PHB3 && phb->regs)
- pnv_pci_phb3_tce_invalidate(pe, rm, shift,
+ pnv_pci_phb3_tce_invalidate(pe, shift,
index, npages);
else
opal_pci_tce_kill(phb->opal_id,
@@ -1437,7 +1428,7 @@ static int pnv_ioda2_tce_build(struct iommu_table *tbl, long index,
attrs);
if (!ret)
- pnv_pci_ioda2_tce_invalidate(tbl, index, npages, false);
+ pnv_pci_ioda2_tce_invalidate(tbl, index, npages);
return ret;
}
@@ -1447,7 +1438,7 @@ static void pnv_ioda2_tce_free(struct iommu_table *tbl, long index,
{
pnv_tce_free(tbl, index, npages);
- pnv_pci_ioda2_tce_invalidate(tbl, index, npages, false);
+ pnv_pci_ioda2_tce_invalidate(tbl, index, npages);
}
static struct iommu_table_ops pnv_ioda2_iommu_ops = {
@@ -2383,7 +2374,7 @@ static void pnv_ioda_setup_pe_res(struct pnv_ioda_pe *pe,
/*
* This function is supposed to be called on basis of PE from top
- * to bottom style. So the the I/O or MMIO segment assigned to
+ * to bottom style. So the I/O or MMIO segment assigned to
* parent PE could be overridden by its child PEs if necessary.
*/
static void pnv_ioda_setup_pe_seg(struct pnv_ioda_pe *pe)
@@ -2738,7 +2729,7 @@ static void pnv_pci_ioda1_release_pe_dma(struct pnv_ioda_pe *pe)
if (rc != OPAL_SUCCESS)
return;
- pnv_pci_p7ioc_tce_invalidate(tbl, tbl->it_offset, tbl->it_size, false);
+ pnv_pci_p7ioc_tce_invalidate(tbl, tbl->it_offset, tbl->it_size);
if (pe->table_group.group) {
iommu_group_put(pe->table_group.group);
WARN_ON(pe->table_group.group);
diff --git a/arch/powerpc/platforms/powernv/pci-sriov.c b/arch/powerpc/platforms/powernv/pci-sriov.c
index 04155aaaadb1..7195133b26bb 100644
--- a/arch/powerpc/platforms/powernv/pci-sriov.c
+++ b/arch/powerpc/platforms/powernv/pci-sriov.c
@@ -22,7 +22,7 @@
* have the same requirement.
*
* For a SR-IOV BAR things are a little more awkward since size and alignment
- * are not coupled. The alignment is set based on the the per-VF BAR size, but
+ * are not coupled. The alignment is set based on the per-VF BAR size, but
* the total BAR area is: number-of-vfs * per-vf-size. The number of VFs
* isn't necessarily a power of two, so neither is the total size. To fix that
* we need to finesse (read: hack) the Linux BAR allocator so that it will
@@ -699,7 +699,7 @@ static int pnv_pci_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
return -ENOSPC;
}
- /* allocate a contigious block of PEs for our VFs */
+ /* allocate a contiguous block of PEs for our VFs */
base_pe = pnv_ioda_alloc_pe(phb, num_vfs);
if (!base_pe) {
pci_err(pdev, "Unable to allocate PEs for %d VFs\n", num_vfs);
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index f7054879ecd4..233a50e65fce 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -18,7 +18,6 @@
#include <asm/sections.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/pci-bridge.h>
#include <asm/machdep.h>
#include <asm/msi_bitmap.h>
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
index 966a9eb64339..f12643958b8d 100644
--- a/arch/powerpc/platforms/powernv/pci.h
+++ b/arch/powerpc/platforms/powernv/pci.h
@@ -311,8 +311,7 @@ extern int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
unsigned long attrs);
extern void pnv_tce_free(struct iommu_table *tbl, long index, long npages);
extern int pnv_tce_xchg(struct iommu_table *tbl, long index,
- unsigned long *hpa, enum dma_data_direction *direction,
- bool alloc);
+ unsigned long *hpa, enum dma_data_direction *direction);
extern __be64 *pnv_tce_useraddrptr(struct iommu_table *tbl, long index,
bool alloc);
extern unsigned long pnv_tce_get(struct iommu_table *tbl, long index);
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index 105d889abd51..824c3ad7a0fa 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -96,6 +96,15 @@ static void __init init_fw_feat_flags(struct device_node *np)
if (fw_feature_is("disabled", "needs-spec-barrier-for-bound-checks", np))
security_ftr_clear(SEC_FTR_BNDS_CHK_SPEC_BAR);
+
+ if (fw_feature_is("enabled", "no-need-l1d-flush-msr-pr-1-to-0", np))
+ security_ftr_clear(SEC_FTR_L1D_FLUSH_ENTRY);
+
+ if (fw_feature_is("enabled", "no-need-l1d-flush-kernel-on-user-access", np))
+ security_ftr_clear(SEC_FTR_L1D_FLUSH_UACCESS);
+
+ if (fw_feature_is("enabled", "no-need-store-drain-on-priv-state-switch", np))
+ security_ftr_clear(SEC_FTR_STF_BARRIER);
}
static void __init pnv_setup_security_mitigations(void)
diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c
index cbb67813cd5d..9e1a25398f98 100644
--- a/arch/powerpc/platforms/powernv/smp.c
+++ b/arch/powerpc/platforms/powernv/smp.c
@@ -345,7 +345,7 @@ static void __init pnv_smp_probe(void)
}
}
-static int pnv_system_reset_exception(struct pt_regs *regs)
+noinstr static int pnv_system_reset_exception(struct pt_regs *regs)
{
if (smp_handle_nmi_ipi(regs))
return 1;
diff --git a/arch/powerpc/platforms/powernv/ultravisor.c b/arch/powerpc/platforms/powernv/ultravisor.c
index e4a00ad06f9d..67c8c4b2d8b1 100644
--- a/arch/powerpc/platforms/powernv/ultravisor.c
+++ b/arch/powerpc/platforms/powernv/ultravisor.c
@@ -55,6 +55,7 @@ static int __init uv_init(void)
return -ENODEV;
uv_memcons = memcons_init(node, "memcons");
+ of_node_put(node);
if (!uv_memcons)
return -ENOENT;
diff --git a/arch/powerpc/platforms/powernv/vas-fault.c b/arch/powerpc/platforms/powernv/vas-fault.c
index a7aabc18039e..c1bfad56447d 100644
--- a/arch/powerpc/platforms/powernv/vas-fault.c
+++ b/arch/powerpc/platforms/powernv/vas-fault.c
@@ -216,7 +216,7 @@ int vas_setup_fault_window(struct vas_instance *vinst)
vas_init_rx_win_attr(&attr, VAS_COP_TYPE_FAULT);
attr.rx_fifo_size = vinst->fault_fifo_size;
- attr.rx_fifo = vinst->fault_fifo;
+ attr.rx_fifo = __pa(vinst->fault_fifo);
/*
* Max creds is based on number of CRBs can fit in the FIFO.
diff --git a/arch/powerpc/platforms/powernv/vas-window.c b/arch/powerpc/platforms/powernv/vas-window.c
index 0f8d39fbf2b2..0072682531d8 100644
--- a/arch/powerpc/platforms/powernv/vas-window.c
+++ b/arch/powerpc/platforms/powernv/vas-window.c
@@ -404,7 +404,7 @@ static void init_winctx_regs(struct pnv_vas_window *window,
*
* See also: Design note in function header.
*/
- val = __pa(winctx->rx_fifo);
+ val = winctx->rx_fifo;
val = SET_FIELD(VAS_PAGE_MIGRATION_SELECT, val, 0);
write_hvwc_reg(window, VREG(LFIFO_BAR), val);
@@ -739,7 +739,7 @@ static void init_winctx_for_rxwin(struct pnv_vas_window *rxwin,
*/
winctx->fifo_disable = true;
winctx->intr_disable = true;
- winctx->rx_fifo = NULL;
+ winctx->rx_fifo = 0;
}
winctx->lnotify_lpid = rxattr->lnotify_lpid;
diff --git a/arch/powerpc/platforms/powernv/vas.h b/arch/powerpc/platforms/powernv/vas.h
index 8bb08e395de0..08d9d3d5a22b 100644
--- a/arch/powerpc/platforms/powernv/vas.h
+++ b/arch/powerpc/platforms/powernv/vas.h
@@ -376,7 +376,7 @@ struct pnv_vas_window {
* is a container for the register fields in the window context.
*/
struct vas_winctx {
- void *rx_fifo;
+ u64 rx_fifo;
int rx_fifo_size;
int wcreds_max;
int rsvd_txbuf_count;
diff --git a/arch/powerpc/platforms/ps3/Kconfig b/arch/powerpc/platforms/ps3/Kconfig
index a4048b8c8c50..610682caabc4 100644
--- a/arch/powerpc/platforms/ps3/Kconfig
+++ b/arch/powerpc/platforms/ps3/Kconfig
@@ -90,7 +90,7 @@ config PS3_VERBOSE_RESULT
bool "PS3 Verbose LV1 hypercall results" if PS3_ADVANCED
depends on PPC_PS3
help
- Enables more verbose log mesages for LV1 hypercall results.
+ Enables more verbose log messages for LV1 hypercall results.
If in doubt, say N here and reduce the size of the kernel by a
small amount.
diff --git a/arch/powerpc/platforms/ps3/htab.c b/arch/powerpc/platforms/ps3/htab.c
index ef710a715903..c27e6cf85272 100644
--- a/arch/powerpc/platforms/ps3/htab.c
+++ b/arch/powerpc/platforms/ps3/htab.c
@@ -10,7 +10,6 @@
#include <linux/memblock.h>
#include <asm/machdep.h>
-#include <asm/prom.h>
#include <asm/udbg.h>
#include <asm/lv1call.h>
#include <asm/ps3fb.h>
diff --git a/arch/powerpc/platforms/ps3/mm.c b/arch/powerpc/platforms/ps3/mm.c
index 5ce924611b94..1326de55fda6 100644
--- a/arch/powerpc/platforms/ps3/mm.c
+++ b/arch/powerpc/platforms/ps3/mm.c
@@ -14,7 +14,6 @@
#include <asm/cell-regs.h>
#include <asm/firmware.h>
-#include <asm/prom.h>
#include <asm/udbg.h>
#include <asm/lv1call.h>
#include <asm/setup.h>
@@ -364,7 +363,7 @@ static void __maybe_unused _dma_dump_region(const struct ps3_dma_region *r,
* @bus_addr: Starting ioc bus address of the area to map.
* @len: Length in bytes of the area to map.
* @link: A struct list_head used with struct ps3_dma_region.chunk_list, the
- * list of all chuncks owned by the region.
+ * list of all chunks owned by the region.
*
* This implementation uses a very simple dma page manager
* based on the dma_chunk structure. This scheme assumes
diff --git a/arch/powerpc/platforms/ps3/os-area.c b/arch/powerpc/platforms/ps3/os-area.c
index cb844e0add2b..b384cd2d6b99 100644
--- a/arch/powerpc/platforms/ps3/os-area.c
+++ b/arch/powerpc/platforms/ps3/os-area.c
@@ -17,8 +17,6 @@
#include <linux/of.h>
#include <linux/slab.h>
-#include <asm/prom.h>
-
#include "platform.h"
enum {
diff --git a/arch/powerpc/platforms/ps3/setup.c b/arch/powerpc/platforms/ps3/setup.c
index 3de9145c20bc..d7495785fe47 100644
--- a/arch/powerpc/platforms/ps3/setup.c
+++ b/arch/powerpc/platforms/ps3/setup.c
@@ -13,13 +13,13 @@
#include <linux/console.h>
#include <linux/export.h>
#include <linux/memblock.h>
+#include <linux/of.h>
#include <asm/machdep.h>
#include <asm/firmware.h>
#include <asm/time.h>
#include <asm/iommu.h>
#include <asm/udbg.h>
-#include <asm/prom.h>
#include <asm/lv1call.h>
#include <asm/ps3gpu.h>
diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platforms/ps3/system-bus.c
index b637bf292047..2502e9b17df4 100644
--- a/arch/powerpc/platforms/ps3/system-bus.c
+++ b/arch/powerpc/platforms/ps3/system-bus.c
@@ -601,7 +601,7 @@ static dma_addr_t ps3_ioc0_map_page(struct device *_dev, struct page *page,
iopte_flag |= CBE_IOPTE_PP_W | CBE_IOPTE_SO_RW;
break;
default:
- /* not happned */
+ /* not happened */
BUG();
}
result = ps3_dma_map(dev->d_region, (unsigned long)ptr, size,
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile
index 9764e1a2ed5c..7aaff5323544 100644
--- a/arch/powerpc/platforms/pseries/Makefile
+++ b/arch/powerpc/platforms/pseries/Makefile
@@ -33,3 +33,7 @@ obj-$(CONFIG_SUSPEND) += suspend.o
obj-$(CONFIG_PPC_VAS) += vas.o vas-sysfs.o
obj-$(CONFIG_ARCH_HAS_CC_PLATFORM) += cc_platform.o
+
+# nothing that operates in real mode is safe for KASAN
+KASAN_SANITIZE_ras.o := n
+KASAN_SANITIZE_kexec.o := n
diff --git a/arch/powerpc/platforms/pseries/cmm.c b/arch/powerpc/platforms/pseries/cmm.c
index 45a3a3022a85..15ed8206c463 100644
--- a/arch/powerpc/platforms/pseries/cmm.c
+++ b/arch/powerpc/platforms/pseries/cmm.c
@@ -475,8 +475,6 @@ static struct notifier_block cmm_reboot_nb = {
static int cmm_memory_cb(struct notifier_block *self,
unsigned long action, void *arg)
{
- int ret = 0;
-
switch (action) {
case MEM_GOING_OFFLINE:
mutex_lock(&hotplug_mutex);
@@ -493,7 +491,7 @@ static int cmm_memory_cb(struct notifier_block *self,
break;
}
- return notifier_from_errno(ret);
+ return NOTIFY_OK;
}
static struct notifier_block cmm_mem_nb = {
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index b1f01ac0c29e..498d6efcb5ae 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -19,7 +19,6 @@
#include "of_helpers.h"
#include "pseries.h"
-#include <asm/prom.h>
#include <asm/machdep.h>
#include <linux/uaccess.h>
#include <asm/rtas.h>
@@ -389,7 +388,7 @@ static void pseries_hp_work_fn(struct work_struct *work)
handle_dlpar_errorlog(hp_work->errlog);
kfree(hp_work->errlog);
- kfree((void *)work);
+ kfree(work);
}
void queue_hotplug_event(struct pseries_hp_errorlog *hp_errlog)
diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c
index 09fafcf2d3a0..1b0c901a6f3b 100644
--- a/arch/powerpc/platforms/pseries/eeh_pseries.c
+++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
@@ -43,6 +43,8 @@ static int ibm_get_config_addr_info;
static int ibm_get_config_addr_info2;
static int ibm_configure_pe;
+static void pseries_eeh_init_edev(struct pci_dn *pdn);
+
static void pseries_pcibios_bus_add_device(struct pci_dev *pdev)
{
struct pci_dn *pdn = pci_get_pdn(pdev);
@@ -359,7 +361,7 @@ static struct eeh_pe *pseries_eeh_pe_get_parent(struct eeh_dev *edev)
* This function takes care of the initialisation and inserts the eeh_dev
* into the correct eeh_pe. If no eeh_pe exists we'll allocate one.
*/
-void pseries_eeh_init_edev(struct pci_dn *pdn)
+static void pseries_eeh_init_edev(struct pci_dn *pdn)
{
struct eeh_pe pe, *parent;
struct eeh_dev *edev;
@@ -510,7 +512,7 @@ static int pseries_eeh_set_option(struct eeh_pe *pe, int option)
int ret = 0;
/*
- * When we're enabling or disabling EEH functioality on
+ * When we're enabling or disabling EEH functionality on
* the particular PE, the PE config address is possibly
* unavailable. Therefore, we have to figure it out from
* the FDT node.
@@ -845,8 +847,7 @@ static int __init eeh_pseries_init(void)
return -EINVAL;
}
- /* Initialize error log lock and size */
- spin_lock_init(&slot_errbuf_lock);
+ /* Initialize error log size */
eeh_error_buf_size = rtas_token("rtas-error-log-max");
if (eeh_error_buf_size == RTAS_UNKNOWN_SERVICE) {
pr_info("%s: unknown EEH error log size\n",
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
index b81fc846d99c..0f8cd8b06432 100644
--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
+++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
@@ -398,7 +398,7 @@ static int dlpar_online_cpu(struct device_node *dn)
if (get_hard_smp_processor_id(cpu) != thread)
continue;
cpu_maps_update_done();
- find_and_online_cpu_nid(cpu);
+ find_and_update_cpu_nid(cpu);
rc = device_online(get_cpu_device(cpu));
if (rc) {
dlpar_offline_cpu(dn);
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index 91cf23495ccb..2e3a317722a8 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -16,7 +16,6 @@
#include <asm/firmware.h>
#include <asm/machdep.h>
-#include <asm/prom.h>
#include <asm/sparsemem.h>
#include <asm/fadump.h>
#include <asm/drmem.h>
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index 4d991cf840d9..fba64304e859 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -666,8 +666,7 @@ static void pci_dma_bus_setup_pSeries(struct pci_bus *bus)
#ifdef CONFIG_IOMMU_API
static int tce_exchange_pseries(struct iommu_table *tbl, long index, unsigned
- long *tce, enum dma_data_direction *direction,
- bool realmode)
+ long *tce, enum dma_data_direction *direction)
{
long rc;
unsigned long ioba = (unsigned long) index << tbl->it_page_shift;
@@ -1430,7 +1429,7 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
pci->table_group->tables[1] = newtbl;
- /* Keep default DMA window stuct if removed */
+ /* Keep default DMA window struct if removed */
if (default_win_removed) {
tbl->it_size = 0;
vfree(tbl->it_map);
diff --git a/arch/powerpc/platforms/pseries/kexec.c b/arch/powerpc/platforms/pseries/kexec.c
index 145fcfbc017f..ab6cdbebb35e 100644
--- a/arch/powerpc/platforms/pseries/kexec.c
+++ b/arch/powerpc/platforms/pseries/kexec.c
@@ -61,3 +61,11 @@ void pseries_kexec_cpu_down(int crash_shutdown, int secondary)
} else
xics_kexec_teardown_cpu(secondary);
}
+
+void pseries_machine_kexec(struct kimage *image)
+{
+ if (firmware_has_feature(FW_FEATURE_SET_MODE))
+ pseries_disable_reloc_on_exc();
+
+ default_machine_kexec(image);
+}
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 760581c5752f..937f9c010b22 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -31,7 +31,6 @@
#include <asm/mmu_context.h>
#include <asm/iommu.h>
#include <asm/tlb.h>
-#include <asm/prom.h>
#include <asm/cputable.h>
#include <asm/udbg.h>
#include <asm/smp.h>
diff --git a/arch/powerpc/platforms/pseries/lparcfg.c b/arch/powerpc/platforms/pseries/lparcfg.c
index 2119c003fcf9..507dc0b5987d 100644
--- a/arch/powerpc/platforms/pseries/lparcfg.c
+++ b/arch/powerpc/platforms/pseries/lparcfg.c
@@ -28,7 +28,6 @@
#include <asm/firmware.h>
#include <asm/rtas.h>
#include <asm/time.h>
-#include <asm/prom.h>
#include <asm/vdso_datapage.h>
#include <asm/vio.h>
#include <asm/mmu.h>
diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c
index fb2919fd6bc0..a3a71d37cb9a 100644
--- a/arch/powerpc/platforms/pseries/msi.c
+++ b/arch/powerpc/platforms/pseries/msi.c
@@ -7,6 +7,7 @@
#include <linux/crash_dump.h>
#include <linux/device.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/msi.h>
#include <asm/rtas.h>
diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c
index 69db2eca367f..cbf1720eb4aa 100644
--- a/arch/powerpc/platforms/pseries/nvram.c
+++ b/arch/powerpc/platforms/pseries/nvram.c
@@ -13,9 +13,9 @@
#include <linux/slab.h>
#include <linux/ctype.h>
#include <linux/uaccess.h>
+#include <linux/of.h>
#include <asm/nvram.h>
#include <asm/rtas.h>
-#include <asm/prom.h>
#include <asm/machdep.h>
/* Max bytes to read/write in one go */
diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c
index 39962c905542..181b855b3050 100644
--- a/arch/powerpc/platforms/pseries/papr_scm.c
+++ b/arch/powerpc/platforms/pseries/papr_scm.c
@@ -125,8 +125,8 @@ struct papr_scm_priv {
/* The bits which needs to be overridden */
u64 health_bitmap_inject_mask;
- /* array to have event_code and stat_id mappings */
- char **nvdimm_events_map;
+ /* array to have event_code and stat_id mappings */
+ u8 *nvdimm_events_map;
};
static int papr_scm_pmem_flush(struct nd_region *nd_region,
@@ -370,7 +370,7 @@ static int papr_scm_pmu_get_value(struct perf_event *event, struct device *dev,
stat = &stats->scm_statistic[0];
memcpy(&stat->stat_id,
- p->nvdimm_events_map[event->attr.config],
+ &p->nvdimm_events_map[event->attr.config * sizeof(stat->stat_id)],
sizeof(stat->stat_id));
stat->stat_val = 0;
@@ -462,14 +462,13 @@ static int papr_scm_pmu_check_events(struct papr_scm_priv *p, struct nvdimm_pmu
{
struct papr_scm_perf_stat *stat;
struct papr_scm_perf_stats *stats;
- int index, rc, count;
u32 available_events;
-
- if (!p->stat_buffer_len)
- return -ENOENT;
+ int index, rc = 0;
available_events = (p->stat_buffer_len - sizeof(struct papr_scm_perf_stats))
/ sizeof(struct papr_scm_perf_stat);
+ if (available_events == 0)
+ return -EOPNOTSUPP;
/* Allocate the buffer for phyp where stats are written */
stats = kzalloc(p->stat_buffer_len, GFP_KERNEL);
@@ -478,35 +477,30 @@ static int papr_scm_pmu_check_events(struct papr_scm_priv *p, struct nvdimm_pmu
return rc;
}
- /* Allocate memory to nvdimm_event_map */
- p->nvdimm_events_map = kcalloc(available_events, sizeof(char *), GFP_KERNEL);
- if (!p->nvdimm_events_map) {
- rc = -ENOMEM;
- goto out_stats;
- }
-
/* Called to get list of events supported */
rc = drc_pmem_query_stats(p, stats, 0);
if (rc)
- goto out_nvdimm_events_map;
-
- for (index = 0, stat = stats->scm_statistic, count = 0;
- index < available_events; index++, ++stat) {
- p->nvdimm_events_map[count] = kmemdup_nul(stat->stat_id, 8, GFP_KERNEL);
- if (!p->nvdimm_events_map[count]) {
- rc = -ENOMEM;
- goto out_nvdimm_events_map;
- }
+ goto out;
- count++;
+ /*
+ * Allocate memory and populate nvdimm_event_map.
+ * Allocate an extra element for NULL entry
+ */
+ p->nvdimm_events_map = kcalloc(available_events + 1,
+ sizeof(stat->stat_id),
+ GFP_KERNEL);
+ if (!p->nvdimm_events_map) {
+ rc = -ENOMEM;
+ goto out;
}
- p->nvdimm_events_map[count] = NULL;
- kfree(stats);
- return 0;
-out_nvdimm_events_map:
- kfree(p->nvdimm_events_map);
-out_stats:
+ /* Copy all stat_ids to event map */
+ for (index = 0, stat = stats->scm_statistic;
+ index < available_events; index++, ++stat) {
+ memcpy(&p->nvdimm_events_map[index * sizeof(stat->stat_id)],
+ &stat->stat_id, sizeof(stat->stat_id));
+ }
+out:
kfree(stats);
return rc;
}
diff --git a/arch/powerpc/platforms/pseries/pci.c b/arch/powerpc/platforms/pseries/pci.c
index 3b6800f774c2..6e671c3809ec 100644
--- a/arch/powerpc/platforms/pseries/pci.c
+++ b/arch/powerpc/platforms/pseries/pci.c
@@ -14,7 +14,6 @@
#include <asm/eeh.h>
#include <asm/pci-bridge.h>
-#include <asm/prom.h>
#include <asm/ppc-pci.h>
#include <asm/pci.h>
#include "pseries.h"
diff --git a/arch/powerpc/platforms/pseries/pmem.c b/arch/powerpc/platforms/pseries/pmem.c
index 439ac72c2470..3c290b9ed01b 100644
--- a/arch/powerpc/platforms/pseries/pmem.c
+++ b/arch/powerpc/platforms/pseries/pmem.c
@@ -15,7 +15,6 @@
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/slab.h>
-#include <asm/prom.h>
#include <asm/rtas.h>
#include <asm/firmware.h>
#include <asm/machdep.h>
diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h
index af162aeeae86..f5c916c839c9 100644
--- a/arch/powerpc/platforms/pseries/pseries.h
+++ b/arch/powerpc/platforms/pseries/pseries.h
@@ -38,6 +38,7 @@ static inline void smp_init_pseries(void) { }
#endif
extern void pseries_kexec_cpu_down(int crash_shutdown, int secondary);
+void pseries_machine_kexec(struct kimage *image);
extern void pSeries_final_fixup(void);
diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c
index 7f7369fec46b..cad7a0c93117 100644
--- a/arch/powerpc/platforms/pseries/reconfig.c
+++ b/arch/powerpc/platforms/pseries/reconfig.c
@@ -13,7 +13,6 @@
#include <linux/slab.h>
#include <linux/of.h>
-#include <asm/prom.h>
#include <asm/machdep.h>
#include <linux/uaccess.h>
#include <asm/mmu.h>
diff --git a/arch/powerpc/platforms/pseries/rtas-fadump.c b/arch/powerpc/platforms/pseries/rtas-fadump.c
index 35f9cb602c30..b5853e9fcc3c 100644
--- a/arch/powerpc/platforms/pseries/rtas-fadump.c
+++ b/arch/powerpc/platforms/pseries/rtas-fadump.c
@@ -13,9 +13,10 @@
#include <linux/delay.h>
#include <linux/seq_file.h>
#include <linux/crash_dump.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
#include <asm/page.h>
-#include <asm/prom.h>
#include <asm/rtas.h>
#include <asm/fadump.h>
#include <asm/fadump-internal.h>
@@ -108,6 +109,12 @@ static u64 rtas_fadump_init_mem_struct(struct fw_dump *fadump_conf)
fdm.hpte_region.destination_address = cpu_to_be64(addr);
addr += fadump_conf->hpte_region_size;
+ /*
+ * Align boot memory area destination address to page boundary to
+ * be able to mmap read this area in the vmcore.
+ */
+ addr = PAGE_ALIGN(addr);
+
/* RMA region section */
fdm.rmr_region.request_flag = cpu_to_be32(RTAS_FADUMP_REQUEST_FLAG);
fdm.rmr_region.source_data_type =
@@ -351,7 +358,7 @@ static int __init rtas_fadump_build_cpu_notes(struct fw_dump *fadump_conf)
/* Lower 4 bytes of reg_value contains logical cpu id */
cpu = (be64_to_cpu(reg_entry->reg_value) &
RTAS_FADUMP_CPU_ID_MASK);
- if (fdh && !cpumask_test_cpu(cpu, &fdh->online_mask)) {
+ if (fdh && !cpumask_test_cpu(cpu, &fdh->cpu_mask)) {
RTAS_FADUMP_SKIP_TO_NEXT_CPU(reg_entry);
continue;
}
@@ -462,10 +469,10 @@ static void rtas_fadump_region_show(struct fw_dump *fadump_conf,
be64_to_cpu(fdm_ptr->rmr_region.source_len),
be64_to_cpu(fdm_ptr->rmr_region.bytes_dumped));
- /* Dump is active. Show reserved area start address. */
+ /* Dump is active. Show preserved area start address. */
if (fdm_active) {
- seq_printf(m, "\nMemory above %#016lx is reserved for saving crash dump\n",
- fadump_conf->reserve_dump_area_start);
+ seq_printf(m, "\nMemory above %#016llx is reserved for saving crash dump\n",
+ fadump_conf->boot_mem_top);
}
}
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 0f74b2284773..afb074269b42 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -36,6 +36,7 @@
#include <linux/seq_file.h>
#include <linux/root_dev.h>
#include <linux/of.h>
+#include <linux/of_irq.h>
#include <linux/of_pci.h>
#include <linux/memblock.h>
#include <linux/swiotlb.h>
@@ -43,7 +44,6 @@
#include <asm/mmu.h>
#include <asm/processor.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/rtas.h>
#include <asm/pci-bridge.h>
#include <asm/iommu.h>
@@ -421,16 +421,6 @@ void pseries_disable_reloc_on_exc(void)
}
EXPORT_SYMBOL(pseries_disable_reloc_on_exc);
-#ifdef CONFIG_KEXEC_CORE
-static void pSeries_machine_kexec(struct kimage *image)
-{
- if (firmware_has_feature(FW_FEATURE_SET_MODE))
- pseries_disable_reloc_on_exc();
-
- default_machine_kexec(image);
-}
-#endif
-
#ifdef __LITTLE_ENDIAN__
void pseries_big_endian_exceptions(void)
{
@@ -658,7 +648,7 @@ static resource_size_t pseries_get_iov_fw_value(struct pci_dev *dev, int resno,
*/
num_res = of_read_number(&indexes[NUM_RES_PROPERTY], 1);
if (resno >= num_res)
- return 0; /* or an errror */
+ return 0; /* or an error */
i = START_OF_ENTRIES + NEXT_ENTRY * resno;
switch (value) {
@@ -762,7 +752,7 @@ static void pseries_pci_fixup_iov_resources(struct pci_dev *pdev)
if (!pdev->is_physfn)
return;
- /*Firmware must support open sriov otherwise dont configure*/
+ /*Firmware must support open sriov otherwise don't configure*/
indexes = of_get_property(dn, "ibm,open-sriov-vf-bar-info", NULL);
if (indexes)
of_pci_parse_iov_addrs(pdev, indexes);
@@ -1096,7 +1086,7 @@ define_machine(pseries) {
.machine_check_exception = pSeries_machine_check_exception,
.machine_check_log_err = pSeries_machine_check_log_err,
#ifdef CONFIG_KEXEC_CORE
- .machine_kexec = pSeries_machine_kexec,
+ .machine_kexec = pseries_machine_kexec,
.kexec_cpu_down = pseries_kexec_cpu_down,
#endif
#ifdef CONFIG_MEMORY_HOTPLUG
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
index f47429323eee..fd2174edfa1d 100644
--- a/arch/powerpc/platforms/pseries/smp.c
+++ b/arch/powerpc/platforms/pseries/smp.c
@@ -27,7 +27,6 @@
#include <asm/irq.h>
#include <asm/page.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/smp.h>
#include <asm/paca.h>
#include <asm/machdep.h>
diff --git a/arch/powerpc/platforms/pseries/vas-sysfs.c b/arch/powerpc/platforms/pseries/vas-sysfs.c
index ec65586cbeb3..f9f682724e77 100644
--- a/arch/powerpc/platforms/pseries/vas-sysfs.c
+++ b/arch/powerpc/platforms/pseries/vas-sysfs.c
@@ -74,26 +74,26 @@ struct vas_sysfs_entry {
/*
* Create sysfs interface:
- * /sys/devices/vas/vas0/gzip/default_capabilities
+ * /sys/devices/virtual/misc/vas/vas0/gzip/default_capabilities
* This directory contains the following VAS GZIP capabilities
- * for the defaule credit type.
- * /sys/devices/vas/vas0/gzip/default_capabilities/nr_total_credits
+ * for the default credit type.
+ * /sys/devices/virtual/misc/vas/vas0/gzip/default_capabilities/nr_total_credits
* Total number of default credits assigned to the LPAR which
* can be changed with DLPAR operation.
- * /sys/devices/vas/vas0/gzip/default_capabilities/nr_used_credits
+ * /sys/devices/virtual/misc/vas/vas0/gzip/default_capabilities/nr_used_credits
* Number of credits used by the user space. One credit will
* be assigned for each window open.
*
- * /sys/devices/vas/vas0/gzip/qos_capabilities
+ * /sys/devices/virtual/misc/vas/vas0/gzip/qos_capabilities
* This directory contains the following VAS GZIP capabilities
* for the Quality of Service (QoS) credit type.
- * /sys/devices/vas/vas0/gzip/qos_capabilities/nr_total_credits
+ * /sys/devices/virtual/misc/vas/vas0/gzip/qos_capabilities/nr_total_credits
* Total number of QoS credits assigned to the LPAR. The user
* has to define this value using HMC interface. It can be
* changed dynamically by the user.
- * /sys/devices/vas/vas0/gzip/qos_capabilities/nr_used_credits
+ * /sys/devices/virtual/misc/vas/vas0/gzip/qos_capabilities/nr_used_credits
* Number of credits used by the user space.
- * /sys/devices/vas/vas0/gzip/qos_capabilities/update_total_credits
+ * /sys/devices/virtual/misc/vas/vas0/gzip/qos_capabilities/update_total_credits
* Update total QoS credits dynamically
*/
@@ -248,6 +248,7 @@ int __init sysfs_pseries_vas_init(struct vas_all_caps *vas_caps)
pseries_vas_kobj = kobject_create_and_add("vas0",
&vas_miscdev.this_device->kobj);
if (!pseries_vas_kobj) {
+ misc_deregister(&vas_miscdev);
pr_err("Failed to create VAS sysfs entry\n");
return -ENOMEM;
}
@@ -259,6 +260,7 @@ int __init sysfs_pseries_vas_init(struct vas_all_caps *vas_caps)
if (!gzip_caps_kobj) {
pr_err("Failed to create VAS GZIP capability entry\n");
kobject_put(pseries_vas_kobj);
+ misc_deregister(&vas_miscdev);
return -ENOMEM;
}
}
diff --git a/arch/powerpc/platforms/pseries/vas.c b/arch/powerpc/platforms/pseries/vas.c
index ec643bbdb67f..500a1fc4a1d7 100644
--- a/arch/powerpc/platforms/pseries/vas.c
+++ b/arch/powerpc/platforms/pseries/vas.c
@@ -801,7 +801,7 @@ int vas_reconfig_capabilties(u8 type, int new_nr_creds)
atomic_set(&caps->nr_total_credits, new_nr_creds);
/*
* The total number of available credits may be decreased or
- * inceased with DLPAR operation. Means some windows have to be
+ * increased with DLPAR operation. Means some windows have to be
* closed / reopened. Hold the vas_pseries_mutex so that the
* the user space can not open new windows.
*/
diff --git a/arch/powerpc/platforms/pseries/vio.c b/arch/powerpc/platforms/pseries/vio.c
index c9f9be4ea26a..00ecac2c205b 100644
--- a/arch/powerpc/platforms/pseries/vio.c
+++ b/arch/powerpc/platforms/pseries/vio.c
@@ -23,6 +23,7 @@
#include <linux/dma-map-ops.h>
#include <linux/kobject.h>
#include <linux/kexec.h>
+#include <linux/of_irq.h>
#include <asm/iommu.h>
#include <asm/dma.h>
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile
index 026b3f01a991..9cb1d029511a 100644
--- a/arch/powerpc/sysdev/Makefile
+++ b/arch/powerpc/sysdev/Makefile
@@ -23,7 +23,6 @@ obj-$(CONFIG_FSL_PMC) += fsl_pmc.o
obj-$(CONFIG_FSL_CORENET_RCPM) += fsl_rcpm.o
obj-$(CONFIG_FSL_LBC) += fsl_lbc.o
obj-$(CONFIG_FSL_GTM) += fsl_gtm.o
-obj-$(CONFIG_FSL_85XX_CACHE_SRAM) += fsl_85xx_l2ctlr.o fsl_85xx_cache_sram.o
obj-$(CONFIG_FSL_RIO) += fsl_rio.o fsl_rmu.o
obj-$(CONFIG_TSI108_BRIDGE) += tsi108_pci.o tsi108_dev.o
obj-$(CONFIG_RTC_DRV_CMOS) += rtc_cmos_setup.o
diff --git a/arch/powerpc/sysdev/cpm2_pic.c b/arch/powerpc/sysdev/cpm2_pic.c
index 9e86074719a9..cb9ba4ef557a 100644
--- a/arch/powerpc/sysdev/cpm2_pic.c
+++ b/arch/powerpc/sysdev/cpm2_pic.c
@@ -30,11 +30,11 @@
#include <linux/sched.h>
#include <linux/signal.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <asm/immap_cpm2.h>
#include <asm/mpc8260.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/fs_pd.h>
#include "cpm2_pic.h"
diff --git a/arch/powerpc/sysdev/dart_iommu.c b/arch/powerpc/sysdev/dart_iommu.c
index be6b99b1b352..98096bbfd62e 100644
--- a/arch/powerpc/sysdev/dart_iommu.c
+++ b/arch/powerpc/sysdev/dart_iommu.c
@@ -25,8 +25,8 @@
#include <linux/memblock.h>
#include <linux/gfp.h>
#include <linux/kmemleak.h>
+#include <linux/of_address.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/iommu.h>
#include <asm/pci-bridge.h>
#include <asm/machdep.h>
@@ -404,9 +404,10 @@ void __init iommu_init_early_dart(struct pci_controller_ops *controller_ops)
}
/* Initialize the DART HW */
- if (dart_init(dn) != 0)
+ if (dart_init(dn) != 0) {
+ of_node_put(dn);
return;
-
+ }
/*
* U4 supports a DART bypass, we use it for 64-bit capable devices to
* improve performance. However, that only works for devices connected
@@ -419,6 +420,7 @@ void __init iommu_init_early_dart(struct pci_controller_ops *controller_ops)
/* Setup pci_dma ops */
set_pci_dma_ops(&dma_iommu_ops);
+ of_node_put(dn);
}
#ifdef CONFIG_PM
diff --git a/arch/powerpc/sysdev/dcr.c b/arch/powerpc/sysdev/dcr.c
index 22991e1128e3..3093f14111e6 100644
--- a/arch/powerpc/sysdev/dcr.c
+++ b/arch/powerpc/sysdev/dcr.c
@@ -8,7 +8,7 @@
#include <linux/kernel.h>
#include <linux/export.h>
-#include <asm/prom.h>
+#include <linux/of_address.h>
#include <asm/dcr.h>
#ifdef CONFIG_PPC_DCR_MMIO
diff --git a/arch/powerpc/sysdev/fsl_85xx_cache_ctlr.h b/arch/powerpc/sysdev/fsl_85xx_cache_ctlr.h
deleted file mode 100644
index ce370749add9..000000000000
--- a/arch/powerpc/sysdev/fsl_85xx_cache_ctlr.h
+++ /dev/null
@@ -1,88 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright 2009-2010, 2012 Freescale Semiconductor, Inc
- *
- * QorIQ based Cache Controller Memory Mapped Registers
- *
- * Author: Vivek Mahajan <vivek.mahajan@freescale.com>
- */
-
-#ifndef __FSL_85XX_CACHE_CTLR_H__
-#define __FSL_85XX_CACHE_CTLR_H__
-
-#define L2CR_L2FI 0x40000000 /* L2 flash invalidate */
-#define L2CR_L2IO 0x00200000 /* L2 instruction only */
-#define L2CR_SRAM_ZERO 0x00000000 /* L2SRAM zero size */
-#define L2CR_SRAM_FULL 0x00010000 /* L2SRAM full size */
-#define L2CR_SRAM_HALF 0x00020000 /* L2SRAM half size */
-#define L2CR_SRAM_TWO_HALFS 0x00030000 /* L2SRAM two half sizes */
-#define L2CR_SRAM_QUART 0x00040000 /* L2SRAM one quarter size */
-#define L2CR_SRAM_TWO_QUARTS 0x00050000 /* L2SRAM two quarter size */
-#define L2CR_SRAM_EIGHTH 0x00060000 /* L2SRAM one eighth size */
-#define L2CR_SRAM_TWO_EIGHTH 0x00070000 /* L2SRAM two eighth size */
-
-#define L2SRAM_OPTIMAL_SZ_SHIFT 0x00000003 /* Optimum size for L2SRAM */
-
-#define L2SRAM_BAR_MSK_LO18 0xFFFFC000 /* Lower 18 bits */
-#define L2SRAM_BARE_MSK_HI4 0x0000000F /* Upper 4 bits */
-
-enum cache_sram_lock_ways {
- LOCK_WAYS_ZERO,
- LOCK_WAYS_EIGHTH,
- LOCK_WAYS_TWO_EIGHTH,
- LOCK_WAYS_HALF = 4,
- LOCK_WAYS_FULL = 8,
-};
-
-struct mpc85xx_l2ctlr {
- u32 ctl; /* 0x000 - L2 control */
- u8 res1[0xC];
- u32 ewar0; /* 0x010 - External write address 0 */
- u32 ewarea0; /* 0x014 - External write address extended 0 */
- u32 ewcr0; /* 0x018 - External write ctrl */
- u8 res2[4];
- u32 ewar1; /* 0x020 - External write address 1 */
- u32 ewarea1; /* 0x024 - External write address extended 1 */
- u32 ewcr1; /* 0x028 - External write ctrl 1 */
- u8 res3[4];
- u32 ewar2; /* 0x030 - External write address 2 */
- u32 ewarea2; /* 0x034 - External write address extended 2 */
- u32 ewcr2; /* 0x038 - External write ctrl 2 */
- u8 res4[4];
- u32 ewar3; /* 0x040 - External write address 3 */
- u32 ewarea3; /* 0x044 - External write address extended 3 */
- u32 ewcr3; /* 0x048 - External write ctrl 3 */
- u8 res5[0xB4];
- u32 srbar0; /* 0x100 - SRAM base address 0 */
- u32 srbarea0; /* 0x104 - SRAM base addr reg ext address 0 */
- u32 srbar1; /* 0x108 - SRAM base address 1 */
- u32 srbarea1; /* 0x10C - SRAM base addr reg ext address 1 */
- u8 res6[0xCF0];
- u32 errinjhi; /* 0xE00 - Error injection mask high */
- u32 errinjlo; /* 0xE04 - Error injection mask low */
- u32 errinjctl; /* 0xE08 - Error injection tag/ecc control */
- u8 res7[0x14];
- u32 captdatahi; /* 0xE20 - Error data high capture */
- u32 captdatalo; /* 0xE24 - Error data low capture */
- u32 captecc; /* 0xE28 - Error syndrome */
- u8 res8[0x14];
- u32 errdet; /* 0xE40 - Error detect */
- u32 errdis; /* 0xE44 - Error disable */
- u32 errinten; /* 0xE48 - Error interrupt enable */
- u32 errattr; /* 0xE4c - Error attribute capture */
- u32 erradrrl; /* 0xE50 - Error address capture low */
- u32 erradrrh; /* 0xE54 - Error address capture high */
- u32 errctl; /* 0xE58 - Error control */
- u8 res9[0x1A4];
-};
-
-struct sram_parameters {
- unsigned int sram_size;
- phys_addr_t sram_offset;
-};
-
-extern int instantiate_cache_sram(struct platform_device *dev,
- struct sram_parameters sram_params);
-extern void remove_cache_sram(struct platform_device *dev);
-
-#endif /* __FSL_85XX_CACHE_CTLR_H__ */
diff --git a/arch/powerpc/sysdev/fsl_85xx_cache_sram.c b/arch/powerpc/sysdev/fsl_85xx_cache_sram.c
deleted file mode 100644
index a3aeaa5f0f1b..000000000000
--- a/arch/powerpc/sysdev/fsl_85xx_cache_sram.c
+++ /dev/null
@@ -1,147 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright 2009-2010 Freescale Semiconductor, Inc.
- *
- * Simple memory allocator abstraction for QorIQ (P1/P2) based Cache-SRAM
- *
- * Author: Vivek Mahajan <vivek.mahajan@freescale.com>
- *
- * This file is derived from the original work done
- * by Sylvain Munaut for the Bestcomm SRAM allocator.
- */
-
-#include <linux/kernel.h>
-#include <linux/export.h>
-#include <linux/slab.h>
-#include <linux/err.h>
-#include <linux/of_platform.h>
-#include <linux/pgtable.h>
-#include <asm/fsl_85xx_cache_sram.h>
-
-#include "fsl_85xx_cache_ctlr.h"
-
-struct mpc85xx_cache_sram *cache_sram;
-
-void *mpc85xx_cache_sram_alloc(unsigned int size,
- phys_addr_t *phys, unsigned int align)
-{
- unsigned long offset;
- unsigned long flags;
-
- if (unlikely(cache_sram == NULL))
- return NULL;
-
- if (!size || (size > cache_sram->size) || (align > cache_sram->size)) {
- pr_err("%s(): size(=%x) or align(=%x) zero or too big\n",
- __func__, size, align);
- return NULL;
- }
-
- if ((align & (align - 1)) || align <= 1) {
- pr_err("%s(): align(=%x) must be power of two and >1\n",
- __func__, align);
- return NULL;
- }
-
- spin_lock_irqsave(&cache_sram->lock, flags);
- offset = rh_alloc_align(cache_sram->rh, size, align, NULL);
- spin_unlock_irqrestore(&cache_sram->lock, flags);
-
- if (IS_ERR_VALUE(offset))
- return NULL;
-
- *phys = cache_sram->base_phys + offset;
-
- return (unsigned char *)cache_sram->base_virt + offset;
-}
-EXPORT_SYMBOL(mpc85xx_cache_sram_alloc);
-
-void mpc85xx_cache_sram_free(void *ptr)
-{
- unsigned long flags;
- BUG_ON(!ptr);
-
- spin_lock_irqsave(&cache_sram->lock, flags);
- rh_free(cache_sram->rh, ptr - cache_sram->base_virt);
- spin_unlock_irqrestore(&cache_sram->lock, flags);
-}
-EXPORT_SYMBOL(mpc85xx_cache_sram_free);
-
-int __init instantiate_cache_sram(struct platform_device *dev,
- struct sram_parameters sram_params)
-{
- int ret = 0;
-
- if (cache_sram) {
- dev_err(&dev->dev, "Already initialized cache-sram\n");
- return -EBUSY;
- }
-
- cache_sram = kzalloc(sizeof(struct mpc85xx_cache_sram), GFP_KERNEL);
- if (!cache_sram) {
- dev_err(&dev->dev, "Out of memory for cache_sram structure\n");
- return -ENOMEM;
- }
-
- cache_sram->base_phys = sram_params.sram_offset;
- cache_sram->size = sram_params.sram_size;
-
- if (!request_mem_region(cache_sram->base_phys, cache_sram->size,
- "fsl_85xx_cache_sram")) {
- dev_err(&dev->dev, "%pOF: request memory failed\n",
- dev->dev.of_node);
- ret = -ENXIO;
- goto out_free;
- }
-
- cache_sram->base_virt = ioremap_coherent(cache_sram->base_phys,
- cache_sram->size);
- if (!cache_sram->base_virt) {
- dev_err(&dev->dev, "%pOF: ioremap_coherent failed\n",
- dev->dev.of_node);
- ret = -ENOMEM;
- goto out_release;
- }
-
- cache_sram->rh = rh_create(sizeof(unsigned int));
- if (IS_ERR(cache_sram->rh)) {
- dev_err(&dev->dev, "%pOF: Unable to create remote heap\n",
- dev->dev.of_node);
- ret = PTR_ERR(cache_sram->rh);
- goto out_unmap;
- }
-
- rh_attach_region(cache_sram->rh, 0, cache_sram->size);
- spin_lock_init(&cache_sram->lock);
-
- dev_info(&dev->dev, "[base:0x%llx, size:0x%x] configured and loaded\n",
- (unsigned long long)cache_sram->base_phys, cache_sram->size);
-
- return 0;
-
-out_unmap:
- iounmap(cache_sram->base_virt);
-
-out_release:
- release_mem_region(cache_sram->base_phys, cache_sram->size);
-
-out_free:
- kfree(cache_sram);
- return ret;
-}
-
-void remove_cache_sram(struct platform_device *dev)
-{
- BUG_ON(!cache_sram);
-
- rh_detach_region(cache_sram->rh, 0, cache_sram->size);
- rh_destroy(cache_sram->rh);
-
- iounmap(cache_sram->base_virt);
- release_mem_region(cache_sram->base_phys, cache_sram->size);
-
- kfree(cache_sram);
- cache_sram = NULL;
-
- dev_info(&dev->dev, "MPC85xx Cache-SRAM driver unloaded\n");
-}
diff --git a/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c b/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c
deleted file mode 100644
index 2d0af0c517bb..000000000000
--- a/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c
+++ /dev/null
@@ -1,216 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright 2009-2010, 2012 Freescale Semiconductor, Inc.
- *
- * QorIQ (P1/P2) L2 controller init for Cache-SRAM instantiation
- *
- * Author: Vivek Mahajan <vivek.mahajan@freescale.com>
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/of_platform.h>
-#include <asm/io.h>
-
-#include "fsl_85xx_cache_ctlr.h"
-
-static char *sram_size;
-static char *sram_offset;
-struct mpc85xx_l2ctlr __iomem *l2ctlr;
-
-static int get_cache_sram_params(struct sram_parameters *sram_params)
-{
- unsigned long long addr;
- unsigned int size;
-
- if (!sram_size || (kstrtouint(sram_size, 0, &size) < 0))
- return -EINVAL;
-
- if (!sram_offset || (kstrtoull(sram_offset, 0, &addr) < 0))
- return -EINVAL;
-
- sram_params->sram_offset = addr;
- sram_params->sram_size = size;
-
- return 0;
-}
-
-static int __init get_size_from_cmdline(char *str)
-{
- if (!str)
- return 0;
-
- sram_size = str;
- return 1;
-}
-
-static int __init get_offset_from_cmdline(char *str)
-{
- if (!str)
- return 0;
-
- sram_offset = str;
- return 1;
-}
-
-__setup("cache-sram-size=", get_size_from_cmdline);
-__setup("cache-sram-offset=", get_offset_from_cmdline);
-
-static int mpc85xx_l2ctlr_of_probe(struct platform_device *dev)
-{
- long rval;
- unsigned int rem;
- unsigned char ways;
- const unsigned int *prop;
- unsigned int l2cache_size;
- struct sram_parameters sram_params;
-
- if (!dev->dev.of_node) {
- dev_err(&dev->dev, "Device's OF-node is NULL\n");
- return -EINVAL;
- }
-
- prop = of_get_property(dev->dev.of_node, "cache-size", NULL);
- if (!prop) {
- dev_err(&dev->dev, "Missing L2 cache-size\n");
- return -EINVAL;
- }
- l2cache_size = *prop;
-
- if (get_cache_sram_params(&sram_params))
- return 0; /* fall back to L2 cache only */
-
- rem = l2cache_size % sram_params.sram_size;
- ways = LOCK_WAYS_FULL * sram_params.sram_size / l2cache_size;
- if (rem || (ways & (ways - 1))) {
- dev_err(&dev->dev, "Illegal cache-sram-size in command line\n");
- return -EINVAL;
- }
-
- l2ctlr = of_iomap(dev->dev.of_node, 0);
- if (!l2ctlr) {
- dev_err(&dev->dev, "Can't map L2 controller\n");
- return -EINVAL;
- }
-
- /*
- * Write bits[0-17] to srbar0
- */
- out_be32(&l2ctlr->srbar0,
- lower_32_bits(sram_params.sram_offset) & L2SRAM_BAR_MSK_LO18);
-
- /*
- * Write bits[18-21] to srbare0
- */
-#ifdef CONFIG_PHYS_64BIT
- out_be32(&l2ctlr->srbarea0,
- upper_32_bits(sram_params.sram_offset) & L2SRAM_BARE_MSK_HI4);
-#endif
-
- clrsetbits_be32(&l2ctlr->ctl, L2CR_L2E, L2CR_L2FI);
-
- switch (ways) {
- case LOCK_WAYS_EIGHTH:
- setbits32(&l2ctlr->ctl,
- L2CR_L2E | L2CR_L2FI | L2CR_SRAM_EIGHTH);
- break;
-
- case LOCK_WAYS_TWO_EIGHTH:
- setbits32(&l2ctlr->ctl,
- L2CR_L2E | L2CR_L2FI | L2CR_SRAM_QUART);
- break;
-
- case LOCK_WAYS_HALF:
- setbits32(&l2ctlr->ctl,
- L2CR_L2E | L2CR_L2FI | L2CR_SRAM_HALF);
- break;
-
- case LOCK_WAYS_FULL:
- default:
- setbits32(&l2ctlr->ctl,
- L2CR_L2E | L2CR_L2FI | L2CR_SRAM_FULL);
- break;
- }
- eieio();
-
- rval = instantiate_cache_sram(dev, sram_params);
- if (rval < 0) {
- dev_err(&dev->dev, "Can't instantiate Cache-SRAM\n");
- iounmap(l2ctlr);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int mpc85xx_l2ctlr_of_remove(struct platform_device *dev)
-{
- BUG_ON(!l2ctlr);
-
- iounmap(l2ctlr);
- remove_cache_sram(dev);
- dev_info(&dev->dev, "MPC85xx L2 controller unloaded\n");
-
- return 0;
-}
-
-static const struct of_device_id mpc85xx_l2ctlr_of_match[] = {
- {
- .compatible = "fsl,p2020-l2-cache-controller",
- },
- {
- .compatible = "fsl,p2010-l2-cache-controller",
- },
- {
- .compatible = "fsl,p1020-l2-cache-controller",
- },
- {
- .compatible = "fsl,p1011-l2-cache-controller",
- },
- {
- .compatible = "fsl,p1013-l2-cache-controller",
- },
- {
- .compatible = "fsl,p1022-l2-cache-controller",
- },
- {
- .compatible = "fsl,mpc8548-l2-cache-controller",
- },
- { .compatible = "fsl,mpc8544-l2-cache-controller",},
- { .compatible = "fsl,mpc8572-l2-cache-controller",},
- { .compatible = "fsl,mpc8536-l2-cache-controller",},
- { .compatible = "fsl,p1021-l2-cache-controller",},
- { .compatible = "fsl,p1012-l2-cache-controller",},
- { .compatible = "fsl,p1025-l2-cache-controller",},
- { .compatible = "fsl,p1016-l2-cache-controller",},
- { .compatible = "fsl,p1024-l2-cache-controller",},
- { .compatible = "fsl,p1015-l2-cache-controller",},
- { .compatible = "fsl,p1010-l2-cache-controller",},
- { .compatible = "fsl,bsc9131-l2-cache-controller",},
- {},
-};
-
-static struct platform_driver mpc85xx_l2ctlr_of_platform_driver = {
- .driver = {
- .name = "fsl-l2ctlr",
- .of_match_table = mpc85xx_l2ctlr_of_match,
- },
- .probe = mpc85xx_l2ctlr_of_probe,
- .remove = mpc85xx_l2ctlr_of_remove,
-};
-
-static __init int mpc85xx_l2ctlr_of_init(void)
-{
- return platform_driver_register(&mpc85xx_l2ctlr_of_platform_driver);
-}
-
-static void __exit mpc85xx_l2ctlr_of_exit(void)
-{
- platform_driver_unregister(&mpc85xx_l2ctlr_of_platform_driver);
-}
-
-subsys_initcall(mpc85xx_l2ctlr_of_init);
-module_exit(mpc85xx_l2ctlr_of_exit);
-
-MODULE_DESCRIPTION("Freescale MPC85xx L2 controller init");
-MODULE_LICENSE("GPL v2");
diff --git a/arch/powerpc/sysdev/fsl_lbc.c b/arch/powerpc/sysdev/fsl_lbc.c
index 1985e067e952..217cea150987 100644
--- a/arch/powerpc/sysdev/fsl_lbc.c
+++ b/arch/powerpc/sysdev/fsl_lbc.c
@@ -18,13 +18,14 @@
#include <linux/types.h>
#include <linux/io.h>
#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/mod_devicetable.h>
#include <linux/syscore_ops.h>
-#include <asm/prom.h>
#include <asm/fsl_lbc.h>
static DEFINE_SPINLOCK(fsl_lbc_lock);
@@ -37,7 +38,7 @@ EXPORT_SYMBOL(fsl_lbc_ctrl_dev);
*
* This function converts a base address of lbc into the right format for the
* BR register. If the SOC has eLBC then it returns 32bit physical address
- * else it convers a 34bit local bus physical address to correct format of
+ * else it converts a 34bit local bus physical address to correct format of
* 32bit address for BR register (Example: MPC8641).
*/
u32 fsl_lbc_addr(phys_addr_t addr_base)
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
index b3475ae9f236..ef9a5999fa93 100644
--- a/arch/powerpc/sysdev/fsl_msi.c
+++ b/arch/powerpc/sysdev/fsl_msi.c
@@ -11,11 +11,13 @@
#include <linux/msi.h>
#include <linux/pci.h>
#include <linux/slab.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
#include <linux/seq_file.h>
#include <sysdev/fsl_soc.h>
-#include <asm/prom.h>
#include <asm/hw_irq.h>
#include <asm/ppc-pci.h>
#include <asm/mpic.h>
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index a97ce602394e..1011cfea2e32 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -22,6 +22,8 @@
#include <linux/interrupt.h>
#include <linux/memblock.h>
#include <linux/log2.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/suspend.h>
@@ -29,7 +31,6 @@
#include <linux/uaccess.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/pci-bridge.h>
#include <asm/ppc-pci.h>
#include <asm/machdep.h>
@@ -218,7 +219,7 @@ static void setup_pci_atmu(struct pci_controller *hose)
* windows have implemented the default target value as 0xf
* for CCSR space.In all Freescale legacy devices the target
* of 0xf is reserved for local memory space. 9132 Rev1.0
- * now has local mempry space mapped to target 0x0 instead of
+ * now has local memory space mapped to target 0x0 instead of
* 0xf. Hence adding a workaround to remove the target 0xf
* defined for memory space from Inbound window attributes.
*/
diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c
index ff7906b48ca1..1bfc9afa8a1a 100644
--- a/arch/powerpc/sysdev/fsl_rio.c
+++ b/arch/powerpc/sysdev/fsl_rio.c
@@ -505,8 +505,10 @@ int fsl_rio_setup(struct platform_device *dev)
if (rc) {
dev_err(&dev->dev, "Can't get %pOF property 'reg'\n",
rmu_node);
+ of_node_put(rmu_node);
goto err_rmu;
}
+ of_node_put(rmu_node);
rmu_regs_win = ioremap(rmu_regs.start, resource_size(&rmu_regs));
if (!rmu_regs_win) {
dev_err(&dev->dev, "Unable to map rmu register window\n");
diff --git a/arch/powerpc/sysdev/fsl_soc.c b/arch/powerpc/sysdev/fsl_soc.c
index 90ad16161604..78118c188993 100644
--- a/arch/powerpc/sysdev/fsl_soc.c
+++ b/arch/powerpc/sysdev/fsl_soc.c
@@ -31,7 +31,6 @@
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/time.h>
-#include <asm/prom.h>
#include <asm/machdep.h>
#include <sysdev/fsl_soc.h>
#include <mm/mmu_decl.h>
diff --git a/arch/powerpc/sysdev/ge/ge_pic.c b/arch/powerpc/sysdev/ge/ge_pic.c
index 02553a8ce191..a6c424680c37 100644
--- a/arch/powerpc/sysdev/ge/ge_pic.c
+++ b/arch/powerpc/sysdev/ge/ge_pic.c
@@ -14,12 +14,14 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/interrupt.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/spinlock.h>
#include <asm/byteorder.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/irq.h>
#include "ge_pic.h"
@@ -150,7 +152,7 @@ static struct irq_chip gef_pic_chip = {
};
-/* When an interrupt is being configured, this call allows some flexibilty
+/* When an interrupt is being configured, this call allows some flexibility
* in deciding which irq_chip structure is used
*/
static int gef_pic_host_map(struct irq_domain *h, unsigned int virq,
diff --git a/arch/powerpc/sysdev/grackle.c b/arch/powerpc/sysdev/grackle.c
index aaba0b809032..fd2f94a884f0 100644
--- a/arch/powerpc/sysdev/grackle.c
+++ b/arch/powerpc/sysdev/grackle.c
@@ -9,9 +9,9 @@
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/init.h>
+#include <linux/of.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/pci-bridge.h>
#include <asm/grackle.h>
diff --git a/arch/powerpc/sysdev/i8259.c b/arch/powerpc/sysdev/i8259.c
index 3b1ae98e3ce9..06e391485da7 100644
--- a/arch/powerpc/sysdev/i8259.c
+++ b/arch/powerpc/sysdev/i8259.c
@@ -6,11 +6,11 @@
#include <linux/ioport.h>
#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <asm/io.h>
#include <asm/i8259.h>
-#include <asm/prom.h>
static volatile void __iomem *pci_intack; /* RO, gives us the irq vector */
diff --git a/arch/powerpc/sysdev/indirect_pci.c b/arch/powerpc/sysdev/indirect_pci.c
index 09b36617425e..1aacb403a010 100644
--- a/arch/powerpc/sysdev/indirect_pci.c
+++ b/arch/powerpc/sysdev/indirect_pci.c
@@ -12,7 +12,6 @@
#include <linux/init.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/pci-bridge.h>
#include <asm/machdep.h>
diff --git a/arch/powerpc/sysdev/ipic.c b/arch/powerpc/sysdev/ipic.c
index 3f10c9fc3b68..5f69e2d50f26 100644
--- a/arch/powerpc/sysdev/ipic.c
+++ b/arch/powerpc/sysdev/ipic.c
@@ -18,9 +18,10 @@
#include <linux/device.h>
#include <linux/spinlock.h>
#include <linux/fsl_devices.h>
+#include <linux/irqdomain.h>
+#include <linux/of_address.h>
#include <asm/irq.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/ipic.h>
#include "ipic.h"
diff --git a/arch/powerpc/sysdev/mmio_nvram.c b/arch/powerpc/sysdev/mmio_nvram.c
index 628f9b759c84..eb48210ef98e 100644
--- a/arch/powerpc/sysdev/mmio_nvram.c
+++ b/arch/powerpc/sysdev/mmio_nvram.c
@@ -10,12 +10,12 @@
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/of_address.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <asm/machdep.h>
#include <asm/nvram.h>
-#include <asm/prom.h>
static void __iomem *mmio_nvram_start;
static long mmio_nvram_len;
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index dbcbaa4c0663..9a9381f102d6 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -30,6 +30,8 @@
#include <linux/syscore_ops.h>
#include <linux/ratelimit.h>
#include <linux/pgtable.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <asm/ptrace.h>
#include <asm/signal.h>
diff --git a/arch/powerpc/sysdev/mpic_msgr.c b/arch/powerpc/sysdev/mpic_msgr.c
index 36ec0bdd8b63..698fefaaa6dd 100644
--- a/arch/powerpc/sysdev/mpic_msgr.c
+++ b/arch/powerpc/sysdev/mpic_msgr.c
@@ -7,12 +7,13 @@
*/
#include <linux/list.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/export.h>
#include <linux/slab.h>
-#include <asm/prom.h>
#include <asm/hw_irq.h>
#include <asm/ppc-pci.h>
#include <asm/mpic_msgr.h>
@@ -99,7 +100,7 @@ void mpic_msgr_disable(struct mpic_msgr *msgr)
EXPORT_SYMBOL_GPL(mpic_msgr_disable);
/* The following three functions are used to compute the order and number of
- * the message register blocks. They are clearly very inefficent. However,
+ * the message register blocks. They are clearly very inefficient. However,
* they are called *only* a few times during device initialization.
*/
static unsigned int mpic_msgr_number_of_blocks(void)
diff --git a/arch/powerpc/sysdev/mpic_msi.c b/arch/powerpc/sysdev/mpic_msi.c
index f412d6ad0b66..34246c8e01c2 100644
--- a/arch/powerpc/sysdev/mpic_msi.c
+++ b/arch/powerpc/sysdev/mpic_msi.c
@@ -4,10 +4,11 @@
*/
#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of_irq.h>
#include <linux/bitmap.h>
#include <linux/msi.h>
#include <asm/mpic.h>
-#include <asm/prom.h>
#include <asm/hw_irq.h>
#include <asm/ppc-pci.h>
#include <asm/msi_bitmap.h>
@@ -37,7 +38,7 @@ static int __init mpic_msi_reserve_u3_hwirqs(struct mpic *mpic)
/* Reserve source numbers we know are reserved in the HW.
*
* This is a bit of a mix of U3 and U4 reserves but that's going
- * to work fine, we have plenty enugh numbers left so let's just
+ * to work fine, we have plenty enough numbers left so let's just
* mark anything we don't like reserved.
*/
for (i = 0; i < 8; i++)
diff --git a/arch/powerpc/sysdev/mpic_timer.c b/arch/powerpc/sysdev/mpic_timer.c
index 444e9ce42d0a..b2f0a73e8f93 100644
--- a/arch/powerpc/sysdev/mpic_timer.c
+++ b/arch/powerpc/sysdev/mpic_timer.c
@@ -255,7 +255,7 @@ EXPORT_SYMBOL(mpic_start_timer);
/**
* mpic_stop_timer - stop hardware timer
- * @handle: the timer to be stoped
+ * @handle: the timer to be stopped
*
* The timer periodically generates an interrupt. Unless user stops the timer.
*/
diff --git a/arch/powerpc/sysdev/mpic_u3msi.c b/arch/powerpc/sysdev/mpic_u3msi.c
index 3f4841dfefb5..1d8cfdfdf115 100644
--- a/arch/powerpc/sysdev/mpic_u3msi.c
+++ b/arch/powerpc/sysdev/mpic_u3msi.c
@@ -5,9 +5,9 @@
*/
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/msi.h>
#include <asm/mpic.h>
-#include <asm/prom.h>
#include <asm/hw_irq.h>
#include <asm/ppc-pci.h>
#include <asm/msi_bitmap.h>
@@ -78,7 +78,7 @@ static u64 find_u4_magic_addr(struct pci_dev *pdev, unsigned int hwirq)
/* U4 PCIe MSIs need to write to the special register in
* the bridge that generates interrupts. There should be
- * theorically a register at 0xf8005000 where you just write
+ * theoretically a register at 0xf8005000 where you just write
* the MSI number and that triggers the right interrupt, but
* unfortunately, this is busted in HW, the bridge endian swaps
* the value and hits the wrong nibble in the register.
diff --git a/arch/powerpc/sysdev/msi_bitmap.c b/arch/powerpc/sysdev/msi_bitmap.c
index fdd3e17150fc..0b6e37f3ffb8 100644
--- a/arch/powerpc/sysdev/msi_bitmap.c
+++ b/arch/powerpc/sysdev/msi_bitmap.c
@@ -8,6 +8,7 @@
#include <linux/kmemleak.h>
#include <linux/bitmap.h>
#include <linux/memblock.h>
+#include <linux/of.h>
#include <asm/msi_bitmap.h>
#include <asm/setup.h>
diff --git a/arch/powerpc/sysdev/pmi.c b/arch/powerpc/sysdev/pmi.c
index 9c8744e09a9c..9dabb50c36eb 100644
--- a/arch/powerpc/sysdev/pmi.c
+++ b/arch/powerpc/sysdev/pmi.c
@@ -17,12 +17,13 @@
#include <linux/spinlock.h>
#include <linux/module.h>
#include <linux/workqueue.h>
+#include <linux/of_address.h>
#include <linux/of_device.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <asm/io.h>
#include <asm/pmi.h>
-#include <asm/prom.h>
struct pmi_data {
struct list_head handler;
diff --git a/arch/powerpc/sysdev/rtc_cmos_setup.c b/arch/powerpc/sysdev/rtc_cmos_setup.c
index af0f9beddca9..47cc87bd6a33 100644
--- a/arch/powerpc/sysdev/rtc_cmos_setup.c
+++ b/arch/powerpc/sysdev/rtc_cmos_setup.c
@@ -14,8 +14,8 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mc146818rtc.h>
+#include <linux/of_address.h>
-#include <asm/prom.h>
static int __init add_rtc(void)
{
diff --git a/arch/powerpc/sysdev/tsi108_dev.c b/arch/powerpc/sysdev/tsi108_dev.c
index 9e13fb35ed5c..30051397292f 100644
--- a/arch/powerpc/sysdev/tsi108_dev.c
+++ b/arch/powerpc/sysdev/tsi108_dev.c
@@ -16,13 +16,14 @@
#include <linux/device.h>
#include <linux/etherdevice.h>
#include <linux/platform_device.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_net.h>
#include <asm/tsi108.h>
#include <linux/atomic.h>
#include <asm/io.h>
#include <asm/irq.h>
-#include <asm/prom.h>
#include <mm/mmu_decl.h>
#undef DEBUG
diff --git a/arch/powerpc/sysdev/tsi108_pci.c b/arch/powerpc/sysdev/tsi108_pci.c
index 1070220f15d5..5af4c35ff584 100644
--- a/arch/powerpc/sysdev/tsi108_pci.c
+++ b/arch/powerpc/sysdev/tsi108_pci.c
@@ -12,7 +12,9 @@
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/interrupt.h>
+#include <linux/of_address.h>
#include <asm/byteorder.h>
#include <asm/io.h>
@@ -23,7 +25,6 @@
#include <asm/tsi108.h>
#include <asm/tsi108_pci.h>
#include <asm/tsi108_irq.h>
-#include <asm/prom.h>
#undef DEBUG
#ifdef DEBUG
diff --git a/arch/powerpc/sysdev/xics/icp-native.c b/arch/powerpc/sysdev/xics/icp-native.c
index 7d13d2ef5a90..edc17b6b1cc2 100644
--- a/arch/powerpc/sysdev/xics/icp-native.c
+++ b/arch/powerpc/sysdev/xics/icp-native.c
@@ -6,15 +6,16 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/cpu.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/spinlock.h>
#include <linux/module.h>
-#include <asm/prom.h>
#include <asm/io.h>
#include <asm/smp.h>
#include <asm/irq.h>
diff --git a/arch/powerpc/sysdev/xics/icp-opal.c b/arch/powerpc/sysdev/xics/icp-opal.c
index bda4c32582d9..4dae624b9f2f 100644
--- a/arch/powerpc/sysdev/xics/icp-opal.c
+++ b/arch/powerpc/sysdev/xics/icp-opal.c
@@ -196,6 +196,7 @@ int __init icp_opal_init(void)
printk("XICS: Using OPAL ICP fallbacks\n");
+ of_node_put(np);
return 0;
}
diff --git a/arch/powerpc/sysdev/xics/ics-native.c b/arch/powerpc/sysdev/xics/ics-native.c
index dec7d93a8ba1..112c8a1e8159 100644
--- a/arch/powerpc/sysdev/xics/ics-native.c
+++ b/arch/powerpc/sysdev/xics/ics-native.c
@@ -15,11 +15,11 @@
#include <linux/init.h>
#include <linux/cpu.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/spinlock.h>
#include <linux/msi.h>
#include <linux/list.h>
-#include <asm/prom.h>
#include <asm/smp.h>
#include <asm/machdep.h>
#include <asm/irq.h>
diff --git a/arch/powerpc/sysdev/xics/ics-opal.c b/arch/powerpc/sysdev/xics/ics-opal.c
index c4d95d8beb6f..6cfbb4fac7fb 100644
--- a/arch/powerpc/sysdev/xics/ics-opal.c
+++ b/arch/powerpc/sysdev/xics/ics-opal.c
@@ -18,7 +18,6 @@
#include <linux/spinlock.h>
#include <linux/msi.h>
-#include <asm/prom.h>
#include <asm/smp.h>
#include <asm/machdep.h>
#include <asm/irq.h>
diff --git a/arch/powerpc/sysdev/xics/ics-rtas.c b/arch/powerpc/sysdev/xics/ics-rtas.c
index b9da317b7a2d..9e7007f9aca5 100644
--- a/arch/powerpc/sysdev/xics/ics-rtas.c
+++ b/arch/powerpc/sysdev/xics/ics-rtas.c
@@ -10,7 +10,6 @@
#include <linux/spinlock.h>
#include <linux/msi.h>
-#include <asm/prom.h>
#include <asm/smp.h>
#include <asm/machdep.h>
#include <asm/irq.h>
diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c
index f3fb2a12124c..d3a4156e8788 100644
--- a/arch/powerpc/sysdev/xics/xics-common.c
+++ b/arch/powerpc/sysdev/xics/xics-common.c
@@ -6,6 +6,7 @@
#include <linux/threads.h>
#include <linux/kernel.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/debugfs.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
@@ -17,7 +18,6 @@
#include <linux/spinlock.h>
#include <linux/delay.h>
-#include <asm/prom.h>
#include <asm/io.h>
#include <asm/smp.h>
#include <asm/machdep.h>
@@ -146,7 +146,7 @@ void __init xics_smp_probe(void)
#endif /* CONFIG_SMP */
-void xics_teardown_cpu(void)
+noinstr void xics_teardown_cpu(void)
{
struct xics_cppr *os_cppr = this_cpu_ptr(&xics_cppr);
@@ -159,7 +159,7 @@ void xics_teardown_cpu(void)
icp_ops->teardown_cpu();
}
-void xics_kexec_teardown_cpu(int secondary)
+noinstr void xics_kexec_teardown_cpu(int secondary)
{
xics_teardown_cpu();
diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c
index bb5bda6b2357..61b9f98dfd4a 100644
--- a/arch/powerpc/sysdev/xive/common.c
+++ b/arch/powerpc/sysdev/xive/common.c
@@ -9,6 +9,7 @@
#include <linux/threads.h>
#include <linux/kernel.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/debugfs.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
@@ -21,7 +22,6 @@
#include <linux/msi.h>
#include <linux/vmalloc.h>
-#include <asm/prom.h>
#include <asm/io.h>
#include <asm/smp.h>
#include <asm/machdep.h>
@@ -1241,7 +1241,7 @@ static int xive_setup_cpu_ipi(unsigned int cpu)
return 0;
}
-static void xive_cleanup_cpu_ipi(unsigned int cpu, struct xive_cpu *xc)
+noinstr static void xive_cleanup_cpu_ipi(unsigned int cpu, struct xive_cpu *xc)
{
unsigned int xive_ipi_irq = xive_ipi_cpu_to_irq(cpu);
@@ -1634,7 +1634,7 @@ void xive_flush_interrupt(void)
#endif /* CONFIG_SMP */
-void xive_teardown_cpu(void)
+noinstr void xive_teardown_cpu(void)
{
struct xive_cpu *xc = __this_cpu_read(xive_cpu);
unsigned int cpu = smp_processor_id();
diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c
index f940428ad13f..d25d8c692909 100644
--- a/arch/powerpc/sysdev/xive/native.c
+++ b/arch/powerpc/sysdev/xive/native.c
@@ -13,6 +13,7 @@
#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
@@ -21,7 +22,6 @@
#include <linux/kmemleak.h>
#include <asm/machdep.h>
-#include <asm/prom.h>
#include <asm/io.h>
#include <asm/smp.h>
#include <asm/irq.h>
@@ -617,7 +617,7 @@ bool __init xive_native_init(void)
xive_tima_os = r.start;
- /* Grab size of provisionning pages */
+ /* Grab size of provisioning pages */
xive_parse_provisioning(np);
/* Switch the XIVE to exploitation mode */
diff --git a/arch/powerpc/sysdev/xive/spapr.c b/arch/powerpc/sysdev/xive/spapr.c
index 29456c255f9f..7d5128676e83 100644
--- a/arch/powerpc/sysdev/xive/spapr.c
+++ b/arch/powerpc/sysdev/xive/spapr.c
@@ -11,6 +11,8 @@
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_fdt.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/cpumask.h>
@@ -830,12 +832,12 @@ bool __init xive_spapr_init(void)
/* Resource 1 is the OS ring TIMA */
if (of_address_to_resource(np, 1, &r)) {
pr_err("Failed to get thread mgmnt area resource\n");
- return false;
+ goto err_put;
}
tima = ioremap(r.start, resource_size(&r));
if (!tima) {
pr_err("Failed to map thread mgmnt area\n");
- return false;
+ goto err_put;
}
if (!xive_get_max_prio(&max_prio))
@@ -871,6 +873,7 @@ bool __init xive_spapr_init(void)
if (!xive_core_init(np, &xive_spapr_ops, tima, TM_QW1_OS, max_prio))
goto err_mem_free;
+ of_node_put(np);
pr_info("Using %dkB queues\n", 1 << (xive_queue_shift - 10));
return true;
@@ -878,6 +881,8 @@ err_mem_free:
xive_irq_bitmap_remove_all();
err_unmap:
iounmap(tima);
+err_put:
+ of_node_put(np);
return false;
}
diff --git a/arch/powerpc/xmon/ppc-opc.c b/arch/powerpc/xmon/ppc-opc.c
index dfb80810b16c..0774d711453e 100644
--- a/arch/powerpc/xmon/ppc-opc.c
+++ b/arch/powerpc/xmon/ppc-opc.c
@@ -408,7 +408,7 @@ const struct powerpc_operand powerpc_operands[] =
#define FXM4 FXM + 1
{ 0xff, 12, insert_fxm, extract_fxm,
PPC_OPERAND_OPTIONAL | PPC_OPERAND_OPTIONAL_VALUE},
- /* If the FXM4 operand is ommitted, use the sentinel value -1. */
+ /* If the FXM4 operand is omitted, use the sentinel value -1. */
{ -1, -1, NULL, NULL, 0},
/* The IMM20 field in an LI instruction. */
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index fd72753e8ad5..3d9782ea3fa7 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -31,7 +31,6 @@
#include <asm/ptrace.h>
#include <asm/smp.h>
#include <asm/string.h>
-#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/xmon.h>
#include <asm/processor.h>
@@ -373,7 +372,7 @@ static void write_ciabr(unsigned long ciabr)
* set_ciabr() - set the CIABR
* @addr: The value to set.
*
- * This function sets the correct privilege value into the the HW
+ * This function sets the correct privilege value into the HW
* breakpoint address before writing it up in the CIABR register.
*/
static void set_ciabr(unsigned long addr)
@@ -921,9 +920,9 @@ static void insert_bpts(void)
bp->enabled = 0;
continue;
}
- if (IS_MTMSRD(instr) || IS_RFID(instr)) {
- printf("Breakpoint at %lx is on an mtmsrd or rfid "
- "instruction, disabling it\n", bp->address);
+ if (!can_single_step(ppc_inst_val(instr))) {
+ printf("Breakpoint at %lx is on an instruction that can't be single stepped, disabling it\n",
+ bp->address);
bp->enabled = 0;
continue;
}
@@ -1243,8 +1242,7 @@ static void bootcmds(void)
} else if (cmd == 'h') {
ppc_md.halt();
} else if (cmd == 'p') {
- if (pm_power_off)
- pm_power_off();
+ do_kernel_power_off();
}
}
@@ -1470,9 +1468,8 @@ static long check_bp_loc(unsigned long addr)
printf("Can't read instruction at address %lx\n", addr);
return 0;
}
- if (IS_MTMSRD(instr) || IS_RFID(instr)) {
- printf("Breakpoints may not be placed on mtmsrd or rfid "
- "instructions\n");
+ if (!can_single_step(ppc_inst_val(instr))) {
+ printf("Breakpoints may not be placed on instructions that can't be single stepped\n");
return 0;
}
return 1;
@@ -2024,7 +2021,7 @@ static void dump_206_sprs(void)
if (!cpu_has_feature(CPU_FTR_ARCH_206))
return;
- /* Actually some of these pre-date 2.06, but whatevs */
+ /* Actually some of these pre-date 2.06, but whatever */
printf("srr0 = %.16lx srr1 = %.16lx dsisr = %.8lx\n",
mfspr(SPRN_SRR0), mfspr(SPRN_SRR1), mfspr(SPRN_DSISR));
diff --git a/arch/riscv/Kbuild b/arch/riscv/Kbuild
index fb3397223d52..afa83e307a2e 100644
--- a/arch/riscv/Kbuild
+++ b/arch/riscv/Kbuild
@@ -2,6 +2,10 @@
obj-y += kernel/ mm/ net/
obj-$(CONFIG_BUILTIN_DTB) += boot/dts/
+obj-y += errata/
+obj-$(CONFIG_KVM) += kvm/
+
+obj-$(CONFIG_ARCH_HAS_KEXEC_PURGATORY) += purgatory/
# for cleaning
subdir- += boot
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index c0853f1474a7..c22f58155948 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -78,6 +78,7 @@ config RISCV
select HAVE_ARCH_KGDB if !XIP_KERNEL
select HAVE_ARCH_KGDB_QXFER_PKT
select HAVE_ARCH_MMAP_RND_BITS if MMU
+ select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_TRANSPARENT_HUGEPAGE if 64BIT && MMU
@@ -129,12 +130,18 @@ config ARCH_MMAP_RND_BITS_MIN
default 18 if 64BIT
default 8
+config ARCH_MMAP_RND_COMPAT_BITS_MIN
+ default 8
+
# max bits determined by the following formula:
# VA_BITS - PAGE_SHIFT - 3
config ARCH_MMAP_RND_BITS_MAX
default 24 if 64BIT # SV39 based
default 17
+config ARCH_MMAP_RND_COMPAT_BITS_MAX
+ default 17
+
# set if we run in machine mode, cleared if we run in supervisor mode
config RISCV_M_MODE
bool
@@ -326,6 +333,21 @@ config NODES_SHIFT
Specify the maximum number of NUMA Nodes available on the target
system. Increases memory reserved to accommodate various tables.
+config RISCV_ALTERNATIVE
+ bool
+ depends on !XIP_KERNEL
+ help
+ This Kconfig allows the kernel to automatically patch the
+ errata required by the execution platform at run time. The
+ code patching is performed once in the boot stages. It means
+ that the overhead from this mechanism is just taken once.
+
+config RISCV_ALTERNATIVE_EARLY
+ bool
+ depends on RISCV_ALTERNATIVE
+ help
+ Allows early patching of the kernel for special errata
+
config RISCV_ISA_C
bool "Emit compressed instructions when building Linux"
default y
@@ -336,6 +358,19 @@ config RISCV_ISA_C
If you don't know what to do here, say Y.
+config RISCV_ISA_SVPBMT
+ bool "SVPBMT extension support"
+ depends on 64BIT && MMU
+ select RISCV_ALTERNATIVE
+ default y
+ help
+ Adds support to dynamically detect the presence of the SVPBMT extension
+ (Supervisor-mode: page-based memory types) and enable its usage.
+
+ The SVPBMT extension is only available on 64Bit cpus.
+
+ If you don't know what to do here, say Y.
+
config FPU
bool "FPU support"
default y
@@ -361,7 +396,7 @@ config RISCV_SBI_V01
config RISCV_BOOT_SPINWAIT
bool "Spinwait booting method"
depends on SMP
- default y
+ default y if RISCV_SBI_V01 || RISCV_M_MODE
help
This enables support for booting Linux via spinwait method. In the
spinwait method, all cores randomly jump to Linux. One of the cores
@@ -372,6 +407,12 @@ config RISCV_BOOT_SPINWAIT
rely on ordered booting via SBI HSM extension which gets chosen
dynamically at runtime if the firmware supports it.
+ Since spinwait is incompatible with sparse hart IDs, it requires
+ NR_CPUS be large enough to contain the physical hart ID of the first
+ hart to enter Linux.
+
+ If unsure what to do here, say N.
+
config KEXEC
bool "Kexec system call"
select KEXEC_CORE
@@ -385,6 +426,26 @@ config KEXEC
The name comes from the similarity to the exec system call.
+config KEXEC_FILE
+ bool "kexec file based systmem call"
+ select KEXEC_CORE
+ select KEXEC_ELF
+ select HAVE_IMA_KEXEC if IMA
+ depends on 64BIT
+ help
+ This is new version of kexec system call. This system call is
+ file based and takes file descriptors as system call argument
+ for kernel and initramfs as opposed to list of segments as
+ accepted by previous system call.
+
+ If you don't know what to do here, say Y.
+
+config ARCH_HAS_KEXEC_PURGATORY
+ def_bool KEXEC_FILE
+ select BUILD_BIN2C
+ depends on CRYPTO=y
+ depends on CRYPTO_SHA256=y
+
config CRASH_DUMP
bool "Build kdump crash kernel"
help
@@ -396,6 +457,18 @@ config CRASH_DUMP
For more details see Documentation/admin-guide/kdump/kdump.rst
+config COMPAT
+ bool "Kernel support for 32-bit U-mode"
+ default 64BIT
+ depends on 64BIT && MMU
+ help
+ This option enables support for a 32-bit U-mode running under a 64-bit
+ kernel at S-mode. riscv32-specific components such as system calls,
+ the user helper functions (vdso), signal rt_frame functions and the
+ ptrace interface are handled appropriately by the kernel.
+
+ If you want to execute 32-bit userspace applications, say Y.
+
endmenu
menu "Boot options"
diff --git a/arch/riscv/Kconfig.erratas b/arch/riscv/Kconfig.erratas
index 0aacd7052585..ebfcd5cc6eaf 100644
--- a/arch/riscv/Kconfig.erratas
+++ b/arch/riscv/Kconfig.erratas
@@ -1,18 +1,9 @@
menu "CPU errata selection"
-config RISCV_ERRATA_ALTERNATIVE
- bool "RISC-V alternative scheme"
- depends on !XIP_KERNEL
- default y
- help
- This Kconfig allows the kernel to automatically patch the
- errata required by the execution platform at run time. The
- code patching is performed once in the boot stages. It means
- that the overhead from this mechanism is just taken once.
-
config ERRATA_SIFIVE
bool "SiFive errata"
- depends on RISCV_ERRATA_ALTERNATIVE
+ depends on !XIP_KERNEL
+ select RISCV_ALTERNATIVE
help
All SiFive errata Kconfig depend on this Kconfig. Disabling
this Kconfig will disable all SiFive errata. Please say "Y"
@@ -42,4 +33,25 @@ config ERRATA_SIFIVE_CIP_1200
If you don't know what to do here, say "Y".
+config ERRATA_THEAD
+ bool "T-HEAD errata"
+ select RISCV_ALTERNATIVE
+ help
+ All T-HEAD errata Kconfig depend on this Kconfig. Disabling
+ this Kconfig will disable all T-HEAD errata. Please say "Y"
+ here if your platform uses T-HEAD CPU cores.
+
+ Otherwise, please say "N" here to avoid unnecessary overhead.
+
+config ERRATA_THEAD_PBMT
+ bool "Apply T-Head memory type errata"
+ depends on ERRATA_THEAD && 64BIT
+ select RISCV_ALTERNATIVE_EARLY
+ default y
+ help
+ This will apply the memory type errata to handle the non-standard
+ memory type bits in page-table-entries on T-Head SoCs.
+
+ If you don't know what to do here, say "Y".
+
endmenu
diff --git a/arch/riscv/Kconfig.socs b/arch/riscv/Kconfig.socs
index f6ef358d8a2c..85670dc9fe95 100644
--- a/arch/riscv/Kconfig.socs
+++ b/arch/riscv/Kconfig.socs
@@ -14,7 +14,6 @@ config SOC_SIFIVE
select CLK_SIFIVE
select CLK_SIFIVE_PRCI
select SIFIVE_PLIC
- select RISCV_ERRATA_ALTERNATIVE if !XIP_KERNEL
select ERRATA_SIFIVE if !XIP_KERNEL
help
This enables support for SiFive SoC platform hardware.
diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile
index 2b93ca9f4fc3..34cf8a598617 100644
--- a/arch/riscv/Makefile
+++ b/arch/riscv/Makefile
@@ -103,21 +103,23 @@ endif
head-y := arch/riscv/kernel/head.o
-core-$(CONFIG_RISCV_ERRATA_ALTERNATIVE) += arch/riscv/errata/
-core-$(CONFIG_KVM) += arch/riscv/kvm/
-
libs-y += arch/riscv/lib/
libs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
PHONY += vdso_install
vdso_install:
$(Q)$(MAKE) $(build)=arch/riscv/kernel/vdso $@
+ $(if $(CONFIG_COMPAT),$(Q)$(MAKE) \
+ $(build)=arch/riscv/kernel/compat_vdso $@)
ifeq ($(KBUILD_EXTMOD),)
ifeq ($(CONFIG_MMU),y)
prepare: vdso_prepare
vdso_prepare: prepare0
$(Q)$(MAKE) $(build)=arch/riscv/kernel/vdso include/generated/vdso-offsets.h
+ $(if $(CONFIG_COMPAT),$(Q)$(MAKE) \
+ $(build)=arch/riscv/kernel/compat_vdso include/generated/compat_vdso-offsets.h)
+
endif
endif
@@ -153,3 +155,7 @@ PHONY += rv64_randconfig
rv64_randconfig:
$(Q)$(MAKE) KCONFIG_ALLCONFIG=$(srctree)/arch/riscv/configs/64-bit.config \
-f $(srctree)/Makefile randconfig
+
+PHONY += rv32_defconfig
+rv32_defconfig:
+ $(Q)$(MAKE) -f $(srctree)/Makefile defconfig 32-bit.config
diff --git a/arch/riscv/boot/.gitignore b/arch/riscv/boot/.gitignore
index 90e66adb7de5..0cea9f7fa9d5 100644
--- a/arch/riscv/boot/.gitignore
+++ b/arch/riscv/boot/.gitignore
@@ -4,3 +4,4 @@ Image.*
loader
loader.lds
loader.bin
+xipImage
diff --git a/arch/riscv/boot/dts/microchip/Makefile b/arch/riscv/boot/dts/microchip/Makefile
index 855c1502d912..39aae7b04f1c 100644
--- a/arch/riscv/boot/dts/microchip/Makefile
+++ b/arch/riscv/boot/dts/microchip/Makefile
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-dtb-$(CONFIG_SOC_MICROCHIP_POLARFIRE) += microchip-mpfs-icicle-kit.dtb
+dtb-$(CONFIG_SOC_MICROCHIP_POLARFIRE) += mpfs-icicle-kit.dtb
+dtb-$(CONFIG_SOC_MICROCHIP_POLARFIRE) += mpfs-polarberry.dtb
obj-$(CONFIG_BUILTIN_DTB) += $(addsuffix .o, $(dtb-y))
diff --git a/arch/riscv/boot/dts/microchip/microchip-mpfs-fabric.dtsi b/arch/riscv/boot/dts/microchip/mpfs-icicle-kit-fabric.dtsi
index ccaac3371cf9..0d28858b83f2 100644
--- a/arch/riscv/boot/dts/microchip/microchip-mpfs-fabric.dtsi
+++ b/arch/riscv/boot/dts/microchip/mpfs-icicle-kit-fabric.dtsi
@@ -2,6 +2,8 @@
/* Copyright (c) 2020-2021 Microchip Technology Inc */
/ {
+ compatible = "microchip,mpfs-icicle-reference-rtlv2203", "microchip,mpfs";
+
core_pwm0: pwm@41000000 {
compatible = "microchip,corepwm-rtl-v4";
reg = <0x0 0x41000000 0x0 0xF0>;
diff --git a/arch/riscv/boot/dts/microchip/microchip-mpfs-icicle-kit.dts b/arch/riscv/boot/dts/microchip/mpfs-icicle-kit.dts
index 3392153dd0f1..044982a11df5 100644
--- a/arch/riscv/boot/dts/microchip/microchip-mpfs-icicle-kit.dts
+++ b/arch/riscv/boot/dts/microchip/mpfs-icicle-kit.dts
@@ -3,7 +3,8 @@
/dts-v1/;
-#include "microchip-mpfs.dtsi"
+#include "mpfs.dtsi"
+#include "mpfs-icicle-kit-fabric.dtsi"
/* Clock frequency (in Hz) of the rtcclk */
#define RTCCLK_FREQ 1000000
@@ -32,41 +33,71 @@
ddrc_cache_lo: memory@80000000 {
device_type = "memory";
reg = <0x0 0x80000000 0x0 0x2e000000>;
- clocks = <&clkcfg CLK_DDRC>;
status = "okay";
};
ddrc_cache_hi: memory@1000000000 {
device_type = "memory";
reg = <0x10 0x0 0x0 0x40000000>;
- clocks = <&clkcfg CLK_DDRC>;
status = "okay";
};
};
-&refclk {
- clock-frequency = <125000000>;
+&core_pwm0 {
+ status = "okay";
};
-&mmuart1 {
+&gpio2 {
+ interrupts = <53>, <53>, <53>, <53>,
+ <53>, <53>, <53>, <53>,
+ <53>, <53>, <53>, <53>,
+ <53>, <53>, <53>, <53>,
+ <53>, <53>, <53>, <53>,
+ <53>, <53>, <53>, <53>,
+ <53>, <53>, <53>, <53>,
+ <53>, <53>, <53>, <53>;
status = "okay";
};
-&mmuart2 {
+&i2c0 {
status = "okay";
};
-&mmuart3 {
+&i2c1 {
status = "okay";
};
-&mmuart4 {
+&i2c2 {
status = "okay";
};
-&mmc {
+&mac0 {
+ phy-mode = "sgmii";
+ phy-handle = <&phy0>;
+ status = "okay";
+};
+
+&mac1 {
+ phy-mode = "sgmii";
+ phy-handle = <&phy1>;
status = "okay";
+ phy1: ethernet-phy@9 {
+ reg = <9>;
+ ti,fifo-depth = <0x1>;
+ };
+
+ phy0: ethernet-phy@8 {
+ reg = <8>;
+ ti,fifo-depth = <0x1>;
+ };
+};
+
+&mbox {
+ status = "okay";
+};
+
+&mmc {
bus-width = <4>;
disable-wp;
cap-sd-highspeed;
@@ -78,73 +109,46 @@
sd-uhs-sdr25;
sd-uhs-sdr50;
sd-uhs-sdr104;
-};
-
-&spi0 {
status = "okay";
};
-&spi1 {
+&mmuart1 {
status = "okay";
};
-&qspi {
+&mmuart2 {
status = "okay";
};
-&i2c0 {
+&mmuart3 {
status = "okay";
};
-&i2c1 {
+&mmuart4 {
status = "okay";
};
-&i2c2 {
+&pcie {
status = "okay";
};
-&mac0 {
- phy-mode = "sgmii";
- phy-handle = <&phy0>;
-};
-
-&mac1 {
+&qspi {
status = "okay";
- phy-mode = "sgmii";
- phy-handle = <&phy1>;
- phy1: ethernet-phy@9 {
- reg = <9>;
- ti,fifo-depth = <0x1>;
- };
- phy0: ethernet-phy@8 {
- reg = <8>;
- ti,fifo-depth = <0x1>;
- };
};
-&gpio2 {
- interrupts = <53>, <53>, <53>, <53>,
- <53>, <53>, <53>, <53>,
- <53>, <53>, <53>, <53>,
- <53>, <53>, <53>, <53>,
- <53>, <53>, <53>, <53>,
- <53>, <53>, <53>, <53>,
- <53>, <53>, <53>, <53>,
- <53>, <53>, <53>, <53>;
- status = "okay";
+&refclk {
+ clock-frequency = <125000000>;
};
&rtc {
status = "okay";
};
-&usb {
+&spi0 {
status = "okay";
- dr_mode = "host";
};
-&mbox {
+&spi1 {
status = "okay";
};
@@ -152,10 +156,7 @@
status = "okay";
};
-&pcie {
- status = "okay";
-};
-
-&core_pwm0 {
+&usb {
status = "okay";
+ dr_mode = "host";
};
diff --git a/arch/riscv/boot/dts/microchip/mpfs-polarberry-fabric.dtsi b/arch/riscv/boot/dts/microchip/mpfs-polarberry-fabric.dtsi
new file mode 100644
index 000000000000..49380c428ec9
--- /dev/null
+++ b/arch/riscv/boot/dts/microchip/mpfs-polarberry-fabric.dtsi
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Copyright (c) 2020-2022 Microchip Technology Inc */
+
+/ {
+ fabric_clk3: fabric-clk3 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <62500000>;
+ };
+
+ fabric_clk1: fabric-clk1 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <125000000>;
+ };
+};
diff --git a/arch/riscv/boot/dts/microchip/mpfs-polarberry.dts b/arch/riscv/boot/dts/microchip/mpfs-polarberry.dts
new file mode 100644
index 000000000000..82c93c8f5c17
--- /dev/null
+++ b/arch/riscv/boot/dts/microchip/mpfs-polarberry.dts
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Copyright (c) 2020-2022 Microchip Technology Inc */
+
+/dts-v1/;
+
+#include "mpfs.dtsi"
+#include "mpfs-polarberry-fabric.dtsi"
+
+/* Clock frequency (in Hz) of the rtcclk */
+#define MTIMER_FREQ 1000000
+
+/ {
+ model = "Sundance PolarBerry";
+ compatible = "sundance,polarberry", "microchip,mpfs";
+
+ aliases {
+ ethernet0 = &mac1;
+ serial0 = &mmuart0;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ cpus {
+ timebase-frequency = <MTIMER_FREQ>;
+ };
+
+ ddrc_cache_lo: memory@80000000 {
+ device_type = "memory";
+ reg = <0x0 0x80000000 0x0 0x2e000000>;
+ };
+
+ ddrc_cache_hi: memory@1000000000 {
+ device_type = "memory";
+ reg = <0x10 0x00000000 0x0 0xC0000000>;
+ };
+};
+
+/*
+ * phy0 is connected to mac0, but the port itself is on the (optional) carrier
+ * board.
+ */
+&mac0 {
+ phy-mode = "sgmii";
+ phy-handle = <&phy0>;
+ status = "disabled";
+};
+
+&mac1 {
+ phy-mode = "sgmii";
+ phy-handle = <&phy1>;
+ status = "okay";
+
+ phy1: ethernet-phy@5 {
+ reg = <5>;
+ ti,fifo-depth = <0x01>;
+ };
+
+ phy0: ethernet-phy@4 {
+ reg = <4>;
+ ti,fifo-depth = <0x01>;
+ };
+};
+
+&mbox {
+ status = "okay";
+};
+
+&mmc {
+ bus-width = <4>;
+ disable-wp;
+ cap-sd-highspeed;
+ cap-mmc-highspeed;
+ card-detect-delay = <200>;
+ mmc-ddr-1_8v;
+ mmc-hs200-1_8v;
+ sd-uhs-sdr12;
+ sd-uhs-sdr25;
+ sd-uhs-sdr50;
+ sd-uhs-sdr104;
+ status = "okay";
+};
+
+&mmuart0 {
+ status = "okay";
+};
+
+&refclk {
+ clock-frequency = <125000000>;
+};
+
+&rtc {
+ status = "okay";
+};
+
+&syscontroller {
+ status = "okay";
+};
diff --git a/arch/riscv/boot/dts/microchip/microchip-mpfs.dtsi b/arch/riscv/boot/dts/microchip/mpfs.dtsi
index cf2f55e1dcb6..8c3259134194 100644
--- a/arch/riscv/boot/dts/microchip/microchip-mpfs.dtsi
+++ b/arch/riscv/boot/dts/microchip/mpfs.dtsi
@@ -3,7 +3,6 @@
/dts-v1/;
#include "dt-bindings/clock/microchip,mpfs-clock.h"
-#include "microchip-mpfs-fabric.dtsi"
/ {
#address-cells = <2>;
@@ -146,6 +145,11 @@
#clock-cells = <0>;
};
+ syscontroller: syscontroller {
+ compatible = "microchip,mpfs-sys-controller";
+ mboxes = <&mbox 0>;
+ };
+
soc {
#address-cells = <2>;
#size-cells = <2>;
@@ -446,10 +450,5 @@
#mbox-cells = <1>;
status = "disabled";
};
-
- syscontroller: syscontroller {
- compatible = "microchip,mpfs-sys-controller";
- mboxes = <&mbox 0>;
- };
};
};
diff --git a/arch/riscv/boot/dts/sifive/fu540-c000.dtsi b/arch/riscv/boot/dts/sifive/fu540-c000.dtsi
index 5c638fd5b35c..e3172d0ffac4 100644
--- a/arch/riscv/boot/dts/sifive/fu540-c000.dtsi
+++ b/arch/riscv/boot/dts/sifive/fu540-c000.dtsi
@@ -168,11 +168,12 @@
status = "disabled";
};
dma: dma-controller@3000000 {
- compatible = "sifive,fu540-c000-pdma";
+ compatible = "sifive,fu540-c000-pdma", "sifive,pdma0";
reg = <0x0 0x3000000 0x0 0x8000>;
interrupt-parent = <&plic0>;
interrupts = <23>, <24>, <25>, <26>, <27>, <28>, <29>,
<30>;
+ dma-channels = <4>;
#dma-cells = <1>;
};
uart1: serial@10011000 {
diff --git a/arch/riscv/errata/Makefile b/arch/riscv/errata/Makefile
index b8f8740a3e44..a1055965fbee 100644
--- a/arch/riscv/errata/Makefile
+++ b/arch/riscv/errata/Makefile
@@ -1,2 +1,2 @@
-obj-y += alternative.o
obj-$(CONFIG_ERRATA_SIFIVE) += sifive/
+obj-$(CONFIG_ERRATA_THEAD) += thead/
diff --git a/arch/riscv/errata/alternative.c b/arch/riscv/errata/alternative.c
deleted file mode 100644
index e8b4a0fe488c..000000000000
--- a/arch/riscv/errata/alternative.c
+++ /dev/null
@@ -1,75 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * alternative runtime patching
- * inspired by the ARM64 and x86 version
- *
- * Copyright (C) 2021 Sifive.
- */
-
-#include <linux/init.h>
-#include <linux/cpu.h>
-#include <linux/uaccess.h>
-#include <asm/alternative.h>
-#include <asm/sections.h>
-#include <asm/vendorid_list.h>
-#include <asm/sbi.h>
-#include <asm/csr.h>
-
-static struct cpu_manufacturer_info_t {
- unsigned long vendor_id;
- unsigned long arch_id;
- unsigned long imp_id;
-} cpu_mfr_info;
-
-static void (*vendor_patch_func)(struct alt_entry *begin, struct alt_entry *end,
- unsigned long archid,
- unsigned long impid) __initdata;
-
-static inline void __init riscv_fill_cpu_mfr_info(void)
-{
-#ifdef CONFIG_RISCV_M_MODE
- cpu_mfr_info.vendor_id = csr_read(CSR_MVENDORID);
- cpu_mfr_info.arch_id = csr_read(CSR_MARCHID);
- cpu_mfr_info.imp_id = csr_read(CSR_MIMPID);
-#else
- cpu_mfr_info.vendor_id = sbi_get_mvendorid();
- cpu_mfr_info.arch_id = sbi_get_marchid();
- cpu_mfr_info.imp_id = sbi_get_mimpid();
-#endif
-}
-
-static void __init init_alternative(void)
-{
- riscv_fill_cpu_mfr_info();
-
- switch (cpu_mfr_info.vendor_id) {
-#ifdef CONFIG_ERRATA_SIFIVE
- case SIFIVE_VENDOR_ID:
- vendor_patch_func = sifive_errata_patch_func;
- break;
-#endif
- default:
- vendor_patch_func = NULL;
- }
-}
-
-/*
- * This is called very early in the boot process (directly after we run
- * a feature detect on the boot CPU). No need to worry about other CPUs
- * here.
- */
-void __init apply_boot_alternatives(void)
-{
- /* If called on non-boot cpu things could go wrong */
- WARN_ON(smp_processor_id() != 0);
-
- init_alternative();
-
- if (!vendor_patch_func)
- return;
-
- vendor_patch_func((struct alt_entry *)__alt_start,
- (struct alt_entry *)__alt_end,
- cpu_mfr_info.arch_id, cpu_mfr_info.imp_id);
-}
-
diff --git a/arch/riscv/errata/sifive/errata.c b/arch/riscv/errata/sifive/errata.c
index f5e5ae70e829..672f02b21ce0 100644
--- a/arch/riscv/errata/sifive/errata.c
+++ b/arch/riscv/errata/sifive/errata.c
@@ -4,6 +4,7 @@
*/
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/string.h>
#include <linux/bug.h>
#include <asm/patch.h>
@@ -54,7 +55,8 @@ static struct errata_info_t errata_list[ERRATA_SIFIVE_NUMBER] = {
},
};
-static u32 __init sifive_errata_probe(unsigned long archid, unsigned long impid)
+static u32 __init_or_module sifive_errata_probe(unsigned long archid,
+ unsigned long impid)
{
int idx;
u32 cpu_req_errata = 0;
@@ -66,7 +68,7 @@ static u32 __init sifive_errata_probe(unsigned long archid, unsigned long impid)
return cpu_req_errata;
}
-static void __init warn_miss_errata(u32 miss_errata)
+static void __init_or_module warn_miss_errata(u32 miss_errata)
{
int i;
@@ -79,14 +81,22 @@ static void __init warn_miss_errata(u32 miss_errata)
pr_warn("----------------------------------------------------------------\n");
}
-void __init sifive_errata_patch_func(struct alt_entry *begin, struct alt_entry *end,
- unsigned long archid, unsigned long impid)
+void __init_or_module sifive_errata_patch_func(struct alt_entry *begin,
+ struct alt_entry *end,
+ unsigned long archid,
+ unsigned long impid,
+ unsigned int stage)
{
struct alt_entry *alt;
- u32 cpu_req_errata = sifive_errata_probe(archid, impid);
+ u32 cpu_req_errata;
u32 cpu_apply_errata = 0;
u32 tmp;
+ if (stage == RISCV_ALTERNATIVES_EARLY_BOOT)
+ return;
+
+ cpu_req_errata = sifive_errata_probe(archid, impid);
+
for (alt = begin; alt < end; alt++) {
if (alt->vendor_id != SIFIVE_VENDOR_ID)
continue;
diff --git a/arch/riscv/errata/thead/Makefile b/arch/riscv/errata/thead/Makefile
new file mode 100644
index 000000000000..137e700d9d3f
--- /dev/null
+++ b/arch/riscv/errata/thead/Makefile
@@ -0,0 +1,11 @@
+ifdef CONFIG_RISCV_ALTERNATIVE_EARLY
+CFLAGS_errata.o := -mcmodel=medany
+ifdef CONFIG_FTRACE
+CFLAGS_REMOVE_errata.o = $(CC_FLAGS_FTRACE)
+endif
+ifdef CONFIG_KASAN
+KASAN_SANITIZE_errata.o := n
+endif
+endif
+
+obj-y += errata.o
diff --git a/arch/riscv/errata/thead/errata.c b/arch/riscv/errata/thead/errata.c
new file mode 100644
index 000000000000..e5d75270b99c
--- /dev/null
+++ b/arch/riscv/errata/thead/errata.c
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021 Heiko Stuebner <heiko@sntech.de>
+ */
+
+#include <linux/bug.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+#include <asm/alternative.h>
+#include <asm/cacheflush.h>
+#include <asm/errata_list.h>
+#include <asm/patch.h>
+#include <asm/vendorid_list.h>
+
+struct errata_info {
+ char name[ERRATA_STRING_LENGTH_MAX];
+ bool (*check_func)(unsigned long arch_id, unsigned long impid);
+ unsigned int stage;
+};
+
+static bool errata_mt_check_func(unsigned long arch_id, unsigned long impid)
+{
+ if (arch_id != 0 || impid != 0)
+ return false;
+ return true;
+}
+
+static const struct errata_info errata_list[ERRATA_THEAD_NUMBER] = {
+ {
+ .name = "memory-types",
+ .stage = RISCV_ALTERNATIVES_EARLY_BOOT,
+ .check_func = errata_mt_check_func
+ },
+};
+
+static u32 thead_errata_probe(unsigned int stage, unsigned long archid, unsigned long impid)
+{
+ const struct errata_info *info;
+ u32 cpu_req_errata = 0;
+ int idx;
+
+ for (idx = 0; idx < ERRATA_THEAD_NUMBER; idx++) {
+ info = &errata_list[idx];
+
+ if ((stage == RISCV_ALTERNATIVES_MODULE ||
+ info->stage == stage) && info->check_func(archid, impid))
+ cpu_req_errata |= (1U << idx);
+ }
+
+ return cpu_req_errata;
+}
+
+void __init_or_module thead_errata_patch_func(struct alt_entry *begin, struct alt_entry *end,
+ unsigned long archid, unsigned long impid,
+ unsigned int stage)
+{
+ struct alt_entry *alt;
+ u32 cpu_req_errata = thead_errata_probe(stage, archid, impid);
+ u32 tmp;
+
+ for (alt = begin; alt < end; alt++) {
+ if (alt->vendor_id != THEAD_VENDOR_ID)
+ continue;
+ if (alt->errata_id >= ERRATA_THEAD_NUMBER)
+ continue;
+
+ tmp = (1U << alt->errata_id);
+ if (cpu_req_errata & tmp) {
+ /* On vm-alternatives, the mmu isn't running yet */
+ if (stage == RISCV_ALTERNATIVES_EARLY_BOOT)
+ memcpy((void *)__pa_symbol(alt->old_ptr),
+ (void *)__pa_symbol(alt->alt_ptr), alt->alt_len);
+ else
+ patch_text_nosync(alt->old_ptr, alt->alt_ptr, alt->alt_len);
+ }
+ }
+
+ if (stage == RISCV_ALTERNATIVES_EARLY_BOOT)
+ local_flush_icache_all();
+}
diff --git a/arch/riscv/include/asm/alternative-macros.h b/arch/riscv/include/asm/alternative-macros.h
index 67406c376389..ec2f3f1b836f 100644
--- a/arch/riscv/include/asm/alternative-macros.h
+++ b/arch/riscv/include/asm/alternative-macros.h
@@ -2,7 +2,7 @@
#ifndef __ASM_ALTERNATIVE_MACROS_H
#define __ASM_ALTERNATIVE_MACROS_H
-#ifdef CONFIG_RISCV_ERRATA_ALTERNATIVE
+#ifdef CONFIG_RISCV_ALTERNATIVE
#ifdef __ASSEMBLY__
@@ -21,17 +21,25 @@
.popsection
.subsection 1
888 :
+ .option push
+ .option norvc
+ .option norelax
\new_c
+ .option pop
889 :
- .previous
.org . - (889b - 888b) + (887b - 886b)
.org . - (887b - 886b) + (889b - 888b)
+ .previous
.endif
.endm
.macro __ALTERNATIVE_CFG old_c, new_c, vendor_id, errata_id, enable
886 :
+ .option push
+ .option norvc
+ .option norelax
\old_c
+ .option pop
887 :
ALT_NEW_CONTENT \vendor_id, \errata_id, \enable, \new_c
.endm
@@ -39,44 +47,97 @@
#define _ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, CONFIG_k) \
__ALTERNATIVE_CFG old_c, new_c, vendor_id, errata_id, IS_ENABLED(CONFIG_k)
+.macro __ALTERNATIVE_CFG_2 old_c, new_c_1, vendor_id_1, errata_id_1, enable_1, \
+ new_c_2, vendor_id_2, errata_id_2, enable_2
+886 :
+ .option push
+ .option norvc
+ .option norelax
+ \old_c
+ .option pop
+887 :
+ ALT_NEW_CONTENT \vendor_id_1, \errata_id_1, \enable_1, \new_c_1
+ ALT_NEW_CONTENT \vendor_id_2, \errata_id_2, \enable_2, \new_c_2
+.endm
+
+#define _ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, errata_id_1, \
+ CONFIG_k_1, \
+ new_c_2, vendor_id_2, errata_id_2, \
+ CONFIG_k_2) \
+ __ALTERNATIVE_CFG_2 old_c, new_c_1, vendor_id_1, errata_id_1, \
+ IS_ENABLED(CONFIG_k_1), \
+ new_c_2, vendor_id_2, errata_id_2, \
+ IS_ENABLED(CONFIG_k_2)
+
#else /* !__ASSEMBLY__ */
#include <asm/asm.h>
#include <linux/stringify.h>
-#define ALT_ENTRY(oldptr, newptr, vendor_id, errata_id, newlen) \
- RISCV_PTR " " oldptr "\n" \
- RISCV_PTR " " newptr "\n" \
- REG_ASM " " vendor_id "\n" \
- REG_ASM " " newlen "\n" \
+#define ALT_ENTRY(oldptr, newptr, vendor_id, errata_id, newlen) \
+ RISCV_PTR " " oldptr "\n" \
+ RISCV_PTR " " newptr "\n" \
+ REG_ASM " " vendor_id "\n" \
+ REG_ASM " " newlen "\n" \
".word " errata_id "\n"
-#define ALT_NEW_CONTENT(vendor_id, errata_id, enable, new_c) \
+#define ALT_NEW_CONTENT(vendor_id, errata_id, enable, new_c) \
".if " __stringify(enable) " == 1\n" \
".pushsection .alternative, \"a\"\n" \
ALT_ENTRY("886b", "888f", __stringify(vendor_id), __stringify(errata_id), "889f - 888f") \
".popsection\n" \
".subsection 1\n" \
"888 :\n" \
+ ".option push\n" \
+ ".option norvc\n" \
+ ".option norelax\n" \
new_c "\n" \
+ ".option pop\n" \
"889 :\n" \
- ".previous\n" \
".org . - (887b - 886b) + (889b - 888b)\n" \
".org . - (889b - 888b) + (887b - 886b)\n" \
+ ".previous\n" \
".endif\n"
-#define __ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, enable) \
- "886 :\n" \
- old_c "\n" \
- "887 :\n" \
+#define __ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, enable) \
+ "886 :\n" \
+ ".option push\n" \
+ ".option norvc\n" \
+ ".option norelax\n" \
+ old_c "\n" \
+ ".option pop\n" \
+ "887 :\n" \
ALT_NEW_CONTENT(vendor_id, errata_id, enable, new_c)
#define _ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, CONFIG_k) \
__ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, IS_ENABLED(CONFIG_k))
+#define __ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, errata_id_1, \
+ enable_1, \
+ new_c_2, vendor_id_2, errata_id_2, \
+ enable_2) \
+ "886 :\n" \
+ ".option push\n" \
+ ".option norvc\n" \
+ ".option norelax\n" \
+ old_c "\n" \
+ ".option pop\n" \
+ "887 :\n" \
+ ALT_NEW_CONTENT(vendor_id_1, errata_id_1, enable_1, new_c_1) \
+ ALT_NEW_CONTENT(vendor_id_2, errata_id_2, enable_2, new_c_2)
+
+#define _ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, errata_id_1, \
+ CONFIG_k_1, \
+ new_c_2, vendor_id_2, errata_id_2, \
+ CONFIG_k_2) \
+ __ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, errata_id_1, \
+ IS_ENABLED(CONFIG_k_1), \
+ new_c_2, vendor_id_2, errata_id_2, \
+ IS_ENABLED(CONFIG_k_2))
+
#endif /* __ASSEMBLY__ */
-#else /* !CONFIG_RISCV_ERRATA_ALTERNATIVE*/
+#else /* CONFIG_RISCV_ALTERNATIVE */
#ifdef __ASSEMBLY__
.macro __ALTERNATIVE_CFG old_c
@@ -86,6 +147,12 @@
#define _ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, CONFIG_k) \
__ALTERNATIVE_CFG old_c
+#define _ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, errata_id_1, \
+ CONFIG_k_1, \
+ new_c_2, vendor_id_2, errata_id_2, \
+ CONFIG_k_2) \
+ __ALTERNATIVE_CFG old_c
+
#else /* !__ASSEMBLY__ */
#define __ALTERNATIVE_CFG(old_c) \
@@ -94,8 +161,15 @@
#define _ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, CONFIG_k) \
__ALTERNATIVE_CFG(old_c)
+#define _ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, errata_id_1, \
+ CONFIG_k_1, \
+ new_c_2, vendor_id_2, errata_id_2, \
+ CONFIG_k_2) \
+ __ALTERNATIVE_CFG(old_c)
+
#endif /* __ASSEMBLY__ */
-#endif /* CONFIG_RISCV_ERRATA_ALTERNATIVE */
+#endif /* CONFIG_RISCV_ALTERNATIVE */
+
/*
* Usage:
* ALTERNATIVE(old_content, new_content, vendor_id, errata_id, CONFIG_k)
@@ -118,25 +192,14 @@
* this case, this vendor can create a new macro ALTERNATIVE_2() based
* on the following sample code and then replace ALTERNATIVE() with
* ALTERNATIVE_2() to append its customized content.
- *
- * .macro __ALTERNATIVE_CFG_2 old_c, new_c_1, vendor_id_1, errata_id_1, enable_1, \
- * new_c_2, vendor_id_2, errata_id_2, enable_2
- * 886 :
- * \old_c
- * 887 :
- * ALT_NEW_CONTENT \vendor_id_1, \errata_id_1, \enable_1, \new_c_1
- * ALT_NEW_CONTENT \vendor_id_2, \errata_id_2, \enable_2, \new_c_2
- * .endm
- *
- * #define _ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, errata_id_1, CONFIG_k_1, \
- * new_c_2, vendor_id_2, errata_id_2, CONFIG_k_2) \
- * __ALTERNATIVE_CFG_2 old_c, new_c_1, vendor_id_1, errata_id_1, IS_ENABLED(CONFIG_k_1), \
- * new_c_2, vendor_id_2, errata_id_2, IS_ENABLED(CONFIG_k_2) \
- *
- * #define ALTERNATIVE_2(old_content, new_content_1, vendor_id_1, errata_id_1, CONFIG_k_1, \
- * new_content_2, vendor_id_2, errata_id_2, CONFIG_k_2) \
- * _ALTERNATIVE_CFG_2(old_content, new_content_1, vendor_id_1, errata_id_1, CONFIG_k_1, \
- * new_content_2, vendor_id_2, errata_id_2, CONFIG_k_2)
- *
*/
+#define ALTERNATIVE_2(old_content, new_content_1, vendor_id_1, \
+ errata_id_1, CONFIG_k_1, \
+ new_content_2, vendor_id_2, \
+ errata_id_2, CONFIG_k_2) \
+ _ALTERNATIVE_CFG_2(old_content, new_content_1, vendor_id_1, \
+ errata_id_1, CONFIG_k_1, \
+ new_content_2, vendor_id_2, \
+ errata_id_2, CONFIG_k_2)
+
#endif
diff --git a/arch/riscv/include/asm/alternative.h b/arch/riscv/include/asm/alternative.h
index e625d3cafbed..6511dd73e812 100644
--- a/arch/riscv/include/asm/alternative.h
+++ b/arch/riscv/include/asm/alternative.h
@@ -12,12 +12,20 @@
#ifndef __ASSEMBLY__
+#ifdef CONFIG_RISCV_ALTERNATIVE
+
#include <linux/init.h>
#include <linux/types.h>
#include <linux/stddef.h>
#include <asm/hwcap.h>
+#define RISCV_ALTERNATIVES_BOOT 0 /* alternatives applied during regular boot */
+#define RISCV_ALTERNATIVES_MODULE 1 /* alternatives applied during module-init */
+#define RISCV_ALTERNATIVES_EARLY_BOOT 2 /* alternatives applied before mmu start */
+
void __init apply_boot_alternatives(void);
+void __init apply_early_boot_alternatives(void);
+void apply_module_alternatives(void *start, size_t length);
struct alt_entry {
void *old_ptr; /* address of original instruciton or data */
@@ -33,7 +41,22 @@ struct errata_checkfunc_id {
};
void sifive_errata_patch_func(struct alt_entry *begin, struct alt_entry *end,
- unsigned long archid, unsigned long impid);
+ unsigned long archid, unsigned long impid,
+ unsigned int stage);
+void thead_errata_patch_func(struct alt_entry *begin, struct alt_entry *end,
+ unsigned long archid, unsigned long impid,
+ unsigned int stage);
+
+void riscv_cpufeature_patch_func(struct alt_entry *begin, struct alt_entry *end,
+ unsigned int stage);
+
+#else /* CONFIG_RISCV_ALTERNATIVE */
+
+static inline void apply_boot_alternatives(void) { }
+static inline void apply_early_boot_alternatives(void) { }
+static inline void apply_module_alternatives(void *start, size_t length) { }
+
+#endif /* CONFIG_RISCV_ALTERNATIVE */
#endif
#endif
diff --git a/arch/riscv/include/asm/asm.h b/arch/riscv/include/asm/asm.h
index 8c2549b16ac0..618d7c5af1a2 100644
--- a/arch/riscv/include/asm/asm.h
+++ b/arch/riscv/include/asm/asm.h
@@ -67,30 +67,4 @@
#error "Unexpected __SIZEOF_SHORT__"
#endif
-#ifdef __ASSEMBLY__
-
-/* Common assembly source macros */
-
-#ifdef CONFIG_XIP_KERNEL
-.macro XIP_FIXUP_OFFSET reg
- REG_L t0, _xip_fixup
- add \reg, \reg, t0
-.endm
-.macro XIP_FIXUP_FLASH_OFFSET reg
- la t1, __data_loc
- REG_L t1, _xip_phys_offset
- sub \reg, \reg, t1
- add \reg, \reg, t0
-.endm
-_xip_fixup: .dword CONFIG_PHYS_RAM_BASE - CONFIG_XIP_PHYS_ADDR - XIP_OFFSET
-_xip_phys_offset: .dword CONFIG_XIP_PHYS_ADDR + XIP_OFFSET
-#else
-.macro XIP_FIXUP_OFFSET reg
-.endm
-.macro XIP_FIXUP_FLASH_OFFSET reg
-.endm
-#endif /* CONFIG_XIP_KERNEL */
-
-#endif /* __ASSEMBLY__ */
-
#endif /* _ASM_RISCV_ASM_H */
diff --git a/arch/riscv/include/asm/atomic.h b/arch/riscv/include/asm/atomic.h
index ac9bdf4fc404..0dfe9d857a76 100644
--- a/arch/riscv/include/asm/atomic.h
+++ b/arch/riscv/include/asm/atomic.h
@@ -310,47 +310,129 @@ ATOMIC_OPS()
#undef ATOMIC_OPS
#undef ATOMIC_OP
-static __always_inline int arch_atomic_sub_if_positive(atomic_t *v, int offset)
+static __always_inline bool arch_atomic_inc_unless_negative(atomic_t *v)
+{
+ int prev, rc;
+
+ __asm__ __volatile__ (
+ "0: lr.w %[p], %[c]\n"
+ " bltz %[p], 1f\n"
+ " addi %[rc], %[p], 1\n"
+ " sc.w.rl %[rc], %[rc], %[c]\n"
+ " bnez %[rc], 0b\n"
+ " fence rw, rw\n"
+ "1:\n"
+ : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
+ :
+ : "memory");
+ return !(prev < 0);
+}
+
+#define arch_atomic_inc_unless_negative arch_atomic_inc_unless_negative
+
+static __always_inline bool arch_atomic_dec_unless_positive(atomic_t *v)
+{
+ int prev, rc;
+
+ __asm__ __volatile__ (
+ "0: lr.w %[p], %[c]\n"
+ " bgtz %[p], 1f\n"
+ " addi %[rc], %[p], -1\n"
+ " sc.w.rl %[rc], %[rc], %[c]\n"
+ " bnez %[rc], 0b\n"
+ " fence rw, rw\n"
+ "1:\n"
+ : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
+ :
+ : "memory");
+ return !(prev > 0);
+}
+
+#define arch_atomic_dec_unless_positive arch_atomic_dec_unless_positive
+
+static __always_inline int arch_atomic_dec_if_positive(atomic_t *v)
{
int prev, rc;
__asm__ __volatile__ (
"0: lr.w %[p], %[c]\n"
- " sub %[rc], %[p], %[o]\n"
+ " addi %[rc], %[p], -1\n"
" bltz %[rc], 1f\n"
" sc.w.rl %[rc], %[rc], %[c]\n"
" bnez %[rc], 0b\n"
" fence rw, rw\n"
"1:\n"
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
- : [o]"r" (offset)
+ :
: "memory");
- return prev - offset;
+ return prev - 1;
}
-#define arch_atomic_dec_if_positive(v) arch_atomic_sub_if_positive(v, 1)
+#define arch_atomic_dec_if_positive arch_atomic_dec_if_positive
#ifndef CONFIG_GENERIC_ATOMIC64
-static __always_inline s64 arch_atomic64_sub_if_positive(atomic64_t *v, s64 offset)
+static __always_inline bool arch_atomic64_inc_unless_negative(atomic64_t *v)
+{
+ s64 prev;
+ long rc;
+
+ __asm__ __volatile__ (
+ "0: lr.d %[p], %[c]\n"
+ " bltz %[p], 1f\n"
+ " addi %[rc], %[p], 1\n"
+ " sc.d.rl %[rc], %[rc], %[c]\n"
+ " bnez %[rc], 0b\n"
+ " fence rw, rw\n"
+ "1:\n"
+ : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
+ :
+ : "memory");
+ return !(prev < 0);
+}
+
+#define arch_atomic64_inc_unless_negative arch_atomic64_inc_unless_negative
+
+static __always_inline bool arch_atomic64_dec_unless_positive(atomic64_t *v)
+{
+ s64 prev;
+ long rc;
+
+ __asm__ __volatile__ (
+ "0: lr.d %[p], %[c]\n"
+ " bgtz %[p], 1f\n"
+ " addi %[rc], %[p], -1\n"
+ " sc.d.rl %[rc], %[rc], %[c]\n"
+ " bnez %[rc], 0b\n"
+ " fence rw, rw\n"
+ "1:\n"
+ : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
+ :
+ : "memory");
+ return !(prev > 0);
+}
+
+#define arch_atomic64_dec_unless_positive arch_atomic64_dec_unless_positive
+
+static __always_inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
{
s64 prev;
long rc;
__asm__ __volatile__ (
"0: lr.d %[p], %[c]\n"
- " sub %[rc], %[p], %[o]\n"
+ " addi %[rc], %[p], -1\n"
" bltz %[rc], 1f\n"
" sc.d.rl %[rc], %[rc], %[c]\n"
" bnez %[rc], 0b\n"
" fence rw, rw\n"
"1:\n"
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
- : [o]"r" (offset)
+ :
: "memory");
- return prev - offset;
+ return prev - 1;
}
-#define arch_atomic64_dec_if_positive(v) arch_atomic64_sub_if_positive(v, 1)
+#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
#endif
#endif /* _ASM_RISCV_ATOMIC_H */
diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h
index 36dc962f6343..12debce235e5 100644
--- a/arch/riscv/include/asm/cmpxchg.h
+++ b/arch/riscv/include/asm/cmpxchg.h
@@ -348,18 +348,6 @@
#define arch_cmpxchg_local(ptr, o, n) \
(__cmpxchg_relaxed((ptr), (o), (n), sizeof(*(ptr))))
-#define cmpxchg32(ptr, o, n) \
-({ \
- BUILD_BUG_ON(sizeof(*(ptr)) != 4); \
- arch_cmpxchg((ptr), (o), (n)); \
-})
-
-#define cmpxchg32_local(ptr, o, n) \
-({ \
- BUILD_BUG_ON(sizeof(*(ptr)) != 4); \
- arch_cmpxchg_relaxed((ptr), (o), (n)) \
-})
-
#define arch_cmpxchg64(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
diff --git a/arch/riscv/include/asm/compat.h b/arch/riscv/include/asm/compat.h
new file mode 100644
index 000000000000..2ac955b51148
--- /dev/null
+++ b/arch/riscv/include/asm/compat.h
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __ASM_COMPAT_H
+#define __ASM_COMPAT_H
+
+#define COMPAT_UTS_MACHINE "riscv\0\0"
+
+/*
+ * Architecture specific compatibility types
+ */
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
+#include <asm-generic/compat.h>
+
+static inline int is_compat_task(void)
+{
+ return test_thread_flag(TIF_32BIT);
+}
+
+struct compat_user_regs_struct {
+ compat_ulong_t pc;
+ compat_ulong_t ra;
+ compat_ulong_t sp;
+ compat_ulong_t gp;
+ compat_ulong_t tp;
+ compat_ulong_t t0;
+ compat_ulong_t t1;
+ compat_ulong_t t2;
+ compat_ulong_t s0;
+ compat_ulong_t s1;
+ compat_ulong_t a0;
+ compat_ulong_t a1;
+ compat_ulong_t a2;
+ compat_ulong_t a3;
+ compat_ulong_t a4;
+ compat_ulong_t a5;
+ compat_ulong_t a6;
+ compat_ulong_t a7;
+ compat_ulong_t s2;
+ compat_ulong_t s3;
+ compat_ulong_t s4;
+ compat_ulong_t s5;
+ compat_ulong_t s6;
+ compat_ulong_t s7;
+ compat_ulong_t s8;
+ compat_ulong_t s9;
+ compat_ulong_t s10;
+ compat_ulong_t s11;
+ compat_ulong_t t3;
+ compat_ulong_t t4;
+ compat_ulong_t t5;
+ compat_ulong_t t6;
+};
+
+static inline void regs_to_cregs(struct compat_user_regs_struct *cregs,
+ struct pt_regs *regs)
+{
+ cregs->pc = (compat_ulong_t) regs->epc;
+ cregs->ra = (compat_ulong_t) regs->ra;
+ cregs->sp = (compat_ulong_t) regs->sp;
+ cregs->gp = (compat_ulong_t) regs->gp;
+ cregs->tp = (compat_ulong_t) regs->tp;
+ cregs->t0 = (compat_ulong_t) regs->t0;
+ cregs->t1 = (compat_ulong_t) regs->t1;
+ cregs->t2 = (compat_ulong_t) regs->t2;
+ cregs->s0 = (compat_ulong_t) regs->s0;
+ cregs->s1 = (compat_ulong_t) regs->s1;
+ cregs->a0 = (compat_ulong_t) regs->a0;
+ cregs->a1 = (compat_ulong_t) regs->a1;
+ cregs->a2 = (compat_ulong_t) regs->a2;
+ cregs->a3 = (compat_ulong_t) regs->a3;
+ cregs->a4 = (compat_ulong_t) regs->a4;
+ cregs->a5 = (compat_ulong_t) regs->a5;
+ cregs->a6 = (compat_ulong_t) regs->a6;
+ cregs->a7 = (compat_ulong_t) regs->a7;
+ cregs->s2 = (compat_ulong_t) regs->s2;
+ cregs->s3 = (compat_ulong_t) regs->s3;
+ cregs->s4 = (compat_ulong_t) regs->s4;
+ cregs->s5 = (compat_ulong_t) regs->s5;
+ cregs->s6 = (compat_ulong_t) regs->s6;
+ cregs->s7 = (compat_ulong_t) regs->s7;
+ cregs->s8 = (compat_ulong_t) regs->s8;
+ cregs->s9 = (compat_ulong_t) regs->s9;
+ cregs->s10 = (compat_ulong_t) regs->s10;
+ cregs->s11 = (compat_ulong_t) regs->s11;
+ cregs->t3 = (compat_ulong_t) regs->t3;
+ cregs->t4 = (compat_ulong_t) regs->t4;
+ cregs->t5 = (compat_ulong_t) regs->t5;
+ cregs->t6 = (compat_ulong_t) regs->t6;
+};
+
+static inline void cregs_to_regs(struct compat_user_regs_struct *cregs,
+ struct pt_regs *regs)
+{
+ regs->epc = (unsigned long) cregs->pc;
+ regs->ra = (unsigned long) cregs->ra;
+ regs->sp = (unsigned long) cregs->sp;
+ regs->gp = (unsigned long) cregs->gp;
+ regs->tp = (unsigned long) cregs->tp;
+ regs->t0 = (unsigned long) cregs->t0;
+ regs->t1 = (unsigned long) cregs->t1;
+ regs->t2 = (unsigned long) cregs->t2;
+ regs->s0 = (unsigned long) cregs->s0;
+ regs->s1 = (unsigned long) cregs->s1;
+ regs->a0 = (unsigned long) cregs->a0;
+ regs->a1 = (unsigned long) cregs->a1;
+ regs->a2 = (unsigned long) cregs->a2;
+ regs->a3 = (unsigned long) cregs->a3;
+ regs->a4 = (unsigned long) cregs->a4;
+ regs->a5 = (unsigned long) cregs->a5;
+ regs->a6 = (unsigned long) cregs->a6;
+ regs->a7 = (unsigned long) cregs->a7;
+ regs->s2 = (unsigned long) cregs->s2;
+ regs->s3 = (unsigned long) cregs->s3;
+ regs->s4 = (unsigned long) cregs->s4;
+ regs->s5 = (unsigned long) cregs->s5;
+ regs->s6 = (unsigned long) cregs->s6;
+ regs->s7 = (unsigned long) cregs->s7;
+ regs->s8 = (unsigned long) cregs->s8;
+ regs->s9 = (unsigned long) cregs->s9;
+ regs->s10 = (unsigned long) cregs->s10;
+ regs->s11 = (unsigned long) cregs->s11;
+ regs->t3 = (unsigned long) cregs->t3;
+ regs->t4 = (unsigned long) cregs->t4;
+ regs->t5 = (unsigned long) cregs->t5;
+ regs->t6 = (unsigned long) cregs->t6;
+};
+
+#endif /* __ASM_COMPAT_H */
diff --git a/arch/riscv/include/asm/csr.h b/arch/riscv/include/asm/csr.h
index cc40521e438b..6d85655e7edf 100644
--- a/arch/riscv/include/asm/csr.h
+++ b/arch/riscv/include/asm/csr.h
@@ -36,6 +36,13 @@
#define SR_SD _AC(0x8000000000000000, UL) /* FS/XS dirty */
#endif
+#ifdef CONFIG_64BIT
+#define SR_UXL _AC(0x300000000, UL) /* XLEN mask for U-mode */
+#define SR_UXL_32 _AC(0x100000000, UL) /* XLEN = 32 for U-mode */
+#define SR_UXL_64 _AC(0x200000000, UL) /* XLEN = 64 for U-mode */
+#define SR_UXL_SHIFT 32
+#endif
+
/* SATP flags */
#ifndef CONFIG_64BIT
#define SATP_PPN _AC(0x003FFFFF, UL)
diff --git a/arch/riscv/include/asm/elf.h b/arch/riscv/include/asm/elf.h
index f53c40026c7a..14fc7342490b 100644
--- a/arch/riscv/include/asm/elf.h
+++ b/arch/riscv/include/asm/elf.h
@@ -8,6 +8,8 @@
#ifndef _ASM_RISCV_ELF_H
#define _ASM_RISCV_ELF_H
+#include <uapi/linux/elf.h>
+#include <linux/compat.h>
#include <uapi/asm/elf.h>
#include <asm/auxvec.h>
#include <asm/byteorder.h>
@@ -18,18 +20,24 @@
*/
#define ELF_ARCH EM_RISCV
+#ifndef ELF_CLASS
#ifdef CONFIG_64BIT
#define ELF_CLASS ELFCLASS64
#else
#define ELF_CLASS ELFCLASS32
#endif
+#endif
#define ELF_DATA ELFDATA2LSB
/*
* This is used to ensure we don't load something for the wrong architecture.
*/
-#define elf_check_arch(x) ((x)->e_machine == EM_RISCV)
+#define elf_check_arch(x) (((x)->e_machine == EM_RISCV) && \
+ ((x)->e_ident[EI_CLASS] == ELF_CLASS))
+
+extern bool compat_elf_check_arch(Elf32_Ehdr *hdr);
+#define compat_elf_check_arch compat_elf_check_arch
#define CORE_DUMP_USE_REGSET
#define ELF_EXEC_PAGESIZE (PAGE_SIZE)
@@ -43,8 +51,14 @@
#define ELF_ET_DYN_BASE ((TASK_SIZE / 3) * 2)
#ifdef CONFIG_64BIT
+#ifdef CONFIG_COMPAT
+#define STACK_RND_MASK (test_thread_flag(TIF_32BIT) ? \
+ 0x7ff >> (PAGE_SHIFT - 12) : \
+ 0x3ffff >> (PAGE_SHIFT - 12))
+#else
#define STACK_RND_MASK (0x3ffff >> (PAGE_SHIFT - 12))
#endif
+#endif
/*
* This yields a mask that user programs can use to figure out what
* instruction set this CPU supports. This could be done in user space,
@@ -60,11 +74,19 @@ extern unsigned long elf_hwcap;
*/
#define ELF_PLATFORM (NULL)
+#define COMPAT_ELF_PLATFORM (NULL)
+
#ifdef CONFIG_MMU
#define ARCH_DLINFO \
do { \
+ /* \
+ * Note that we add ulong after elf_addr_t because \
+ * casting current->mm->context.vdso triggers a cast \
+ * warning of cast from pointer to integer for \
+ * COMPAT ELFCLASS32. \
+ */ \
NEW_AUX_ENT(AT_SYSINFO_EHDR, \
- (elf_addr_t)current->mm->context.vdso); \
+ (elf_addr_t)(ulong)current->mm->context.vdso); \
NEW_AUX_ENT(AT_L1I_CACHESIZE, \
get_cache_size(1, CACHE_TYPE_INST)); \
NEW_AUX_ENT(AT_L1I_CACHEGEOMETRY, \
@@ -90,4 +112,28 @@ do { \
*(struct user_regs_struct *)regs; \
} while (0);
+#ifdef CONFIG_COMPAT
+
+#define SET_PERSONALITY(ex) \
+do { if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \
+ set_thread_flag(TIF_32BIT); \
+ else \
+ clear_thread_flag(TIF_32BIT); \
+ if (personality(current->personality) != PER_LINUX32) \
+ set_personality(PER_LINUX | \
+ (current->personality & (~PER_MASK))); \
+} while (0)
+
+#define COMPAT_ELF_ET_DYN_BASE ((TASK_SIZE_32 / 3) * 2)
+
+/* rv32 registers */
+typedef compat_ulong_t compat_elf_greg_t;
+typedef compat_elf_greg_t compat_elf_gregset_t[ELF_NGREG];
+
+extern int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
+ int uses_interp);
+#define compat_arch_setup_additional_pages \
+ compat_arch_setup_additional_pages
+
+#endif /* CONFIG_COMPAT */
#endif /* _ASM_RISCV_ELF_H */
diff --git a/arch/riscv/include/asm/errata_list.h b/arch/riscv/include/asm/errata_list.h
index 5f1046e82d9f..9e2888dbb5b1 100644
--- a/arch/riscv/include/asm/errata_list.h
+++ b/arch/riscv/include/asm/errata_list.h
@@ -14,6 +14,14 @@
#define ERRATA_SIFIVE_NUMBER 2
#endif
+#ifdef CONFIG_ERRATA_THEAD
+#define ERRATA_THEAD_PBMT 0
+#define ERRATA_THEAD_NUMBER 1
+#endif
+
+#define CPUFEATURE_SVPBMT 0
+#define CPUFEATURE_NUMBER 1
+
#ifdef __ASSEMBLY__
#define ALT_INSN_FAULT(x) \
@@ -34,6 +42,57 @@ asm(ALTERNATIVE("sfence.vma %0", "sfence.vma", SIFIVE_VENDOR_ID, \
ERRATA_SIFIVE_CIP_1200, CONFIG_ERRATA_SIFIVE_CIP_1200) \
: : "r" (addr) : "memory")
+/*
+ * _val is marked as "will be overwritten", so need to set it to 0
+ * in the default case.
+ */
+#define ALT_SVPBMT_SHIFT 61
+#define ALT_THEAD_PBMT_SHIFT 59
+#define ALT_SVPBMT(_val, prot) \
+asm(ALTERNATIVE_2("li %0, 0\t\nnop", \
+ "li %0, %1\t\nslli %0,%0,%3", 0, \
+ CPUFEATURE_SVPBMT, CONFIG_RISCV_ISA_SVPBMT, \
+ "li %0, %2\t\nslli %0,%0,%4", THEAD_VENDOR_ID, \
+ ERRATA_THEAD_PBMT, CONFIG_ERRATA_THEAD_PBMT) \
+ : "=r"(_val) \
+ : "I"(prot##_SVPBMT >> ALT_SVPBMT_SHIFT), \
+ "I"(prot##_THEAD >> ALT_THEAD_PBMT_SHIFT), \
+ "I"(ALT_SVPBMT_SHIFT), \
+ "I"(ALT_THEAD_PBMT_SHIFT))
+
+#ifdef CONFIG_ERRATA_THEAD_PBMT
+/*
+ * IO/NOCACHE memory types are handled together with svpbmt,
+ * so on T-Head chips, check if no other memory type is set,
+ * and set the non-0 PMA type if applicable.
+ */
+#define ALT_THEAD_PMA(_val) \
+asm volatile(ALTERNATIVE( \
+ "nop\n\t" \
+ "nop\n\t" \
+ "nop\n\t" \
+ "nop\n\t" \
+ "nop\n\t" \
+ "nop\n\t" \
+ "nop", \
+ "li t3, %2\n\t" \
+ "slli t3, t3, %4\n\t" \
+ "and t3, %0, t3\n\t" \
+ "bne t3, zero, 2f\n\t" \
+ "li t3, %3\n\t" \
+ "slli t3, t3, %4\n\t" \
+ "or %0, %0, t3\n\t" \
+ "2:", THEAD_VENDOR_ID, \
+ ERRATA_THEAD_PBMT, CONFIG_ERRATA_THEAD_PBMT) \
+ : "+r"(_val) \
+ : "0"(_val), \
+ "I"(_PAGE_MTMASK_THEAD >> ALT_THEAD_PBMT_SHIFT), \
+ "I"(_PAGE_PMA_THEAD >> ALT_THEAD_PBMT_SHIFT), \
+ "I"(ALT_THEAD_PBMT_SHIFT))
+#else
+#define ALT_THEAD_PMA(_val)
+#endif
+
#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/riscv/include/asm/fixmap.h b/arch/riscv/include/asm/fixmap.h
index 3cfece8b6568..5c3e7b97fcc6 100644
--- a/arch/riscv/include/asm/fixmap.h
+++ b/arch/riscv/include/asm/fixmap.h
@@ -45,8 +45,6 @@ enum fixed_addresses {
__end_of_fixed_addresses
};
-#define FIXMAP_PAGE_IO PAGE_KERNEL
-
#define __early_set_fixmap __set_fixmap
#define __late_set_fixmap __set_fixmap
diff --git a/arch/riscv/include/asm/hwcap.h b/arch/riscv/include/asm/hwcap.h
index 0734e42f74f2..4e2486881840 100644
--- a/arch/riscv/include/asm/hwcap.h
+++ b/arch/riscv/include/asm/hwcap.h
@@ -52,6 +52,7 @@ extern unsigned long elf_hwcap;
*/
enum riscv_isa_ext_id {
RISCV_ISA_EXT_SSCOFPMF = RISCV_ISA_EXT_BASE,
+ RISCV_ISA_EXT_SVPBMT,
RISCV_ISA_EXT_ID_MAX = RISCV_ISA_EXT_MAX,
};
diff --git a/arch/riscv/include/asm/irq_work.h b/arch/riscv/include/asm/irq_work.h
index d6c277992f76..b53891964ae0 100644
--- a/arch/riscv/include/asm/irq_work.h
+++ b/arch/riscv/include/asm/irq_work.h
@@ -4,7 +4,7 @@
static inline bool arch_irq_work_has_interrupt(void)
{
- return true;
+ return IS_ENABLED(CONFIG_SMP);
}
extern void arch_irq_work_raise(void);
#endif /* _ASM_RISCV_IRQ_WORK_H */
diff --git a/arch/riscv/include/asm/kexec.h b/arch/riscv/include/asm/kexec.h
index e4e291d40759..eee260e8ab30 100644
--- a/arch/riscv/include/asm/kexec.h
+++ b/arch/riscv/include/asm/kexec.h
@@ -53,4 +53,15 @@ typedef void (*riscv_kexec_method)(unsigned long first_ind_entry,
extern riscv_kexec_method riscv_kexec_norelocate;
+#ifdef CONFIG_KEXEC_FILE
+extern const struct kexec_file_ops elf_kexec_ops;
+
+struct purgatory_info;
+int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
+ Elf_Shdr *section,
+ const Elf_Shdr *relsec,
+ const Elf_Shdr *symtab);
+#define arch_kexec_apply_relocations_add arch_kexec_apply_relocations_add
+#endif
+
#endif
diff --git a/arch/riscv/include/asm/mmu.h b/arch/riscv/include/asm/mmu.h
index 0099dc116168..cedcf8ea3c76 100644
--- a/arch/riscv/include/asm/mmu.h
+++ b/arch/riscv/include/asm/mmu.h
@@ -16,6 +16,7 @@ typedef struct {
atomic_long_t id;
#endif
void *vdso;
+ void *vdso_info;
#ifdef CONFIG_SMP
/* A local icache flush is needed before user execution can resume. */
cpumask_t icache_stale_mask;
diff --git a/arch/riscv/include/asm/pgtable-32.h b/arch/riscv/include/asm/pgtable-32.h
index 5b2e79e5bfa5..59ba1fbaf784 100644
--- a/arch/riscv/include/asm/pgtable-32.h
+++ b/arch/riscv/include/asm/pgtable-32.h
@@ -7,6 +7,7 @@
#define _ASM_RISCV_PGTABLE_32_H
#include <asm-generic/pgtable-nopmd.h>
+#include <linux/bits.h>
#include <linux/const.h>
/* Size of region mapped by a page global directory */
@@ -16,4 +17,20 @@
#define MAX_POSSIBLE_PHYSMEM_BITS 34
+/*
+ * rv32 PTE format:
+ * | XLEN-1 10 | 9 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0
+ * PFN reserved for SW D A G U X W R V
+ */
+#define _PAGE_PFN_MASK GENMASK(31, 10)
+
+#define _PAGE_NOCACHE 0
+#define _PAGE_IO 0
+#define _PAGE_MTMASK 0
+
+/* Set of bits to preserve across pte_modify() */
+#define _PAGE_CHG_MASK (~(unsigned long)(_PAGE_PRESENT | _PAGE_READ | \
+ _PAGE_WRITE | _PAGE_EXEC | \
+ _PAGE_USER | _PAGE_GLOBAL))
+
#endif /* _ASM_RISCV_PGTABLE_32_H */
diff --git a/arch/riscv/include/asm/pgtable-64.h b/arch/riscv/include/asm/pgtable-64.h
index ba2494cbc24f..5c2aba5efbd0 100644
--- a/arch/riscv/include/asm/pgtable-64.h
+++ b/arch/riscv/include/asm/pgtable-64.h
@@ -6,7 +6,9 @@
#ifndef _ASM_RISCV_PGTABLE_64_H
#define _ASM_RISCV_PGTABLE_64_H
+#include <linux/bits.h>
#include <linux/const.h>
+#include <asm/errata_list.h>
extern bool pgtable_l4_enabled;
extern bool pgtable_l5_enabled;
@@ -65,6 +67,71 @@ typedef struct {
#define PTRS_PER_PMD (PAGE_SIZE / sizeof(pmd_t))
+/*
+ * rv64 PTE format:
+ * | 63 | 62 61 | 60 54 | 53 10 | 9 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0
+ * N MT RSV PFN reserved for SW D A G U X W R V
+ */
+#define _PAGE_PFN_MASK GENMASK(53, 10)
+
+/*
+ * [62:61] Svpbmt Memory Type definitions:
+ *
+ * 00 - PMA Normal Cacheable, No change to implied PMA memory type
+ * 01 - NC Non-cacheable, idempotent, weakly-ordered Main Memory
+ * 10 - IO Non-cacheable, non-idempotent, strongly-ordered I/O memory
+ * 11 - Rsvd Reserved for future standard use
+ */
+#define _PAGE_NOCACHE_SVPBMT (1UL << 61)
+#define _PAGE_IO_SVPBMT (1UL << 62)
+#define _PAGE_MTMASK_SVPBMT (_PAGE_NOCACHE_SVPBMT | _PAGE_IO_SVPBMT)
+
+/*
+ * [63:59] T-Head Memory Type definitions:
+ *
+ * 00000 - NC Weakly-ordered, Non-cacheable, Non-bufferable, Non-shareable, Non-trustable
+ * 01110 - PMA Weakly-ordered, Cacheable, Bufferable, Shareable, Non-trustable
+ * 10000 - IO Strongly-ordered, Non-cacheable, Non-bufferable, Non-shareable, Non-trustable
+ */
+#define _PAGE_PMA_THEAD ((1UL << 62) | (1UL << 61) | (1UL << 60))
+#define _PAGE_NOCACHE_THEAD 0UL
+#define _PAGE_IO_THEAD (1UL << 63)
+#define _PAGE_MTMASK_THEAD (_PAGE_PMA_THEAD | _PAGE_IO_THEAD | (1UL << 59))
+
+static inline u64 riscv_page_mtmask(void)
+{
+ u64 val;
+
+ ALT_SVPBMT(val, _PAGE_MTMASK);
+ return val;
+}
+
+static inline u64 riscv_page_nocache(void)
+{
+ u64 val;
+
+ ALT_SVPBMT(val, _PAGE_NOCACHE);
+ return val;
+}
+
+static inline u64 riscv_page_io(void)
+{
+ u64 val;
+
+ ALT_SVPBMT(val, _PAGE_IO);
+ return val;
+}
+
+#define _PAGE_NOCACHE riscv_page_nocache()
+#define _PAGE_IO riscv_page_io()
+#define _PAGE_MTMASK riscv_page_mtmask()
+
+/* Set of bits to preserve across pte_modify() */
+#define _PAGE_CHG_MASK (~(unsigned long)(_PAGE_PRESENT | _PAGE_READ | \
+ _PAGE_WRITE | _PAGE_EXEC | \
+ _PAGE_USER | _PAGE_GLOBAL | \
+ _PAGE_MTMASK))
+
static inline int pud_present(pud_t pud)
{
return (pud_val(pud) & _PAGE_PRESENT);
@@ -113,12 +180,12 @@ static inline unsigned long _pud_pfn(pud_t pud)
static inline pmd_t *pud_pgtable(pud_t pud)
{
- return (pmd_t *)pfn_to_virt(pud_val(pud) >> _PAGE_PFN_SHIFT);
+ return (pmd_t *)pfn_to_virt(__page_val_to_pfn(pud_val(pud)));
}
static inline struct page *pud_page(pud_t pud)
{
- return pfn_to_page(pud_val(pud) >> _PAGE_PFN_SHIFT);
+ return pfn_to_page(__page_val_to_pfn(pud_val(pud)));
}
#define mm_p4d_folded mm_p4d_folded
@@ -143,12 +210,16 @@ static inline bool mm_pud_folded(struct mm_struct *mm)
static inline pmd_t pfn_pmd(unsigned long pfn, pgprot_t prot)
{
- return __pmd((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot));
+ unsigned long prot_val = pgprot_val(prot);
+
+ ALT_THEAD_PMA(prot_val);
+
+ return __pmd((pfn << _PAGE_PFN_SHIFT) | prot_val);
}
static inline unsigned long _pmd_pfn(pmd_t pmd)
{
- return pmd_val(pmd) >> _PAGE_PFN_SHIFT;
+ return __page_val_to_pfn(pmd_val(pmd));
}
#define mk_pmd(page, prot) pfn_pmd(page_to_pfn(page), prot)
diff --git a/arch/riscv/include/asm/pgtable-bits.h b/arch/riscv/include/asm/pgtable-bits.h
index a6b0c89824c2..b9e13a8fe2b7 100644
--- a/arch/riscv/include/asm/pgtable-bits.h
+++ b/arch/riscv/include/asm/pgtable-bits.h
@@ -6,12 +6,6 @@
#ifndef _ASM_RISCV_PGTABLE_BITS_H
#define _ASM_RISCV_PGTABLE_BITS_H
-/*
- * PTE format:
- * | XLEN-1 10 | 9 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0
- * PFN reserved for SW D A G U X W R V
- */
-
#define _PAGE_ACCESSED_OFFSET 6
#define _PAGE_PRESENT (1 << 0)
@@ -35,10 +29,6 @@
#define _PAGE_PFN_SHIFT 10
-/* Set of bits to preserve across pte_modify() */
-#define _PAGE_CHG_MASK (~(unsigned long)(_PAGE_PRESENT | _PAGE_READ | \
- _PAGE_WRITE | _PAGE_EXEC | \
- _PAGE_USER | _PAGE_GLOBAL))
/*
* when all of R/W/X are zero, the PTE is a pointer to the next level
* of the page table; otherwise, it is a leaf PTE.
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 4200ddede880..1d1be9d9419c 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -108,6 +108,8 @@
#include <asm/tlbflush.h>
#include <linux/mm_types.h>
+#define __page_val_to_pfn(_val) (((_val) & _PAGE_PFN_MASK) >> _PAGE_PFN_SHIFT)
+
#ifdef CONFIG_64BIT
#include <asm/pgtable-64.h>
#else
@@ -179,11 +181,8 @@ extern struct pt_alloc_ops pt_ops __initdata;
#define PAGE_TABLE __pgprot(_PAGE_TABLE)
-/*
- * The RISC-V ISA doesn't yet specify how to query or modify PMAs, so we can't
- * change the properties of memory regions.
- */
-#define _PAGE_IOREMAP _PAGE_KERNEL
+#define _PAGE_IOREMAP ((_PAGE_KERNEL & ~_PAGE_MTMASK) | _PAGE_IO)
+#define PAGE_KERNEL_IO __pgprot(_PAGE_IOREMAP)
extern pgd_t swapper_pg_dir[];
@@ -253,7 +252,11 @@ static inline void pmd_clear(pmd_t *pmdp)
static inline pgd_t pfn_pgd(unsigned long pfn, pgprot_t prot)
{
- return __pgd((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot));
+ unsigned long prot_val = pgprot_val(prot);
+
+ ALT_THEAD_PMA(prot_val);
+
+ return __pgd((pfn << _PAGE_PFN_SHIFT) | prot_val);
}
static inline unsigned long _pgd_pfn(pgd_t pgd)
@@ -263,12 +266,12 @@ static inline unsigned long _pgd_pfn(pgd_t pgd)
static inline struct page *pmd_page(pmd_t pmd)
{
- return pfn_to_page(pmd_val(pmd) >> _PAGE_PFN_SHIFT);
+ return pfn_to_page(__page_val_to_pfn(pmd_val(pmd)));
}
static inline unsigned long pmd_page_vaddr(pmd_t pmd)
{
- return (unsigned long)pfn_to_virt(pmd_val(pmd) >> _PAGE_PFN_SHIFT);
+ return (unsigned long)pfn_to_virt(__page_val_to_pfn(pmd_val(pmd)));
}
static inline pte_t pmd_pte(pmd_t pmd)
@@ -284,7 +287,7 @@ static inline pte_t pud_pte(pud_t pud)
/* Yields the page frame number (PFN) of a page table entry */
static inline unsigned long pte_pfn(pte_t pte)
{
- return (pte_val(pte) >> _PAGE_PFN_SHIFT);
+ return __page_val_to_pfn(pte_val(pte));
}
#define pte_page(x) pfn_to_page(pte_pfn(x))
@@ -292,7 +295,11 @@ static inline unsigned long pte_pfn(pte_t pte)
/* Constructs a page table entry */
static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
{
- return __pte((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot));
+ unsigned long prot_val = pgprot_val(prot);
+
+ ALT_THEAD_PMA(prot_val);
+
+ return __pte((pfn << _PAGE_PFN_SHIFT) | prot_val);
}
#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
@@ -406,7 +413,11 @@ static inline int pmd_protnone(pmd_t pmd)
/* Modify page protection bits */
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
- return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
+ unsigned long newprot_val = pgprot_val(newprot);
+
+ ALT_THEAD_PMA(newprot_val);
+
+ return __pte((pte_val(pte) & _PAGE_CHG_MASK) | newprot_val);
}
#define pgd_ERROR(e) \
@@ -539,6 +550,28 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
return ptep_test_and_clear_young(vma, address, ptep);
}
+#define pgprot_noncached pgprot_noncached
+static inline pgprot_t pgprot_noncached(pgprot_t _prot)
+{
+ unsigned long prot = pgprot_val(_prot);
+
+ prot &= ~_PAGE_MTMASK;
+ prot |= _PAGE_IO;
+
+ return __pgprot(prot);
+}
+
+#define pgprot_writecombine pgprot_writecombine
+static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
+{
+ unsigned long prot = pgprot_val(_prot);
+
+ prot &= ~_PAGE_MTMASK;
+ prot |= _PAGE_NOCACHE;
+
+ return __pgprot(prot);
+}
+
/*
* THP functions
*/
@@ -761,8 +794,17 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
* 63–48 all equal to bit 47, or else a page-fault exception will occur."
*/
#ifdef CONFIG_64BIT
-#define TASK_SIZE (PGDIR_SIZE * PTRS_PER_PGD / 2)
-#define TASK_SIZE_MIN (PGDIR_SIZE_L3 * PTRS_PER_PGD / 2)
+#define TASK_SIZE_64 (PGDIR_SIZE * PTRS_PER_PGD / 2)
+#define TASK_SIZE_MIN (PGDIR_SIZE_L3 * PTRS_PER_PGD / 2)
+
+#ifdef CONFIG_COMPAT
+#define TASK_SIZE_32 (_AC(0x80000000, UL) - PAGE_SIZE)
+#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \
+ TASK_SIZE_32 : TASK_SIZE_64)
+#else
+#define TASK_SIZE TASK_SIZE_64
+#endif
+
#else
#define TASK_SIZE FIXADDR_START
#define TASK_SIZE_MIN TASK_SIZE
diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h
index 0749924d9e55..21c8072dce17 100644
--- a/arch/riscv/include/asm/processor.h
+++ b/arch/riscv/include/asm/processor.h
@@ -19,7 +19,11 @@
#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE / 3)
#define STACK_TOP TASK_SIZE
-#define STACK_TOP_MAX STACK_TOP
+#ifdef CONFIG_64BIT
+#define STACK_TOP_MAX TASK_SIZE_64
+#else
+#define STACK_TOP_MAX TASK_SIZE
+#endif
#define STACK_ALIGN 16
#ifndef __ASSEMBLY__
diff --git a/arch/riscv/include/asm/signal32.h b/arch/riscv/include/asm/signal32.h
new file mode 100644
index 000000000000..96dc56932e76
--- /dev/null
+++ b/arch/riscv/include/asm/signal32.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ASM_SIGNAL32_H
+#define __ASM_SIGNAL32_H
+
+#if IS_ENABLED(CONFIG_COMPAT)
+int compat_setup_rt_frame(struct ksignal *ksig, sigset_t *set,
+ struct pt_regs *regs);
+#else
+static inline
+int compat_setup_rt_frame(struct ksignal *ksig, sigset_t *set,
+ struct pt_regs *regs)
+{
+ return -1;
+}
+#endif
+
+#endif
diff --git a/arch/riscv/include/asm/syscall.h b/arch/riscv/include/asm/syscall.h
index 7ac6a0e275f2..384a63b86420 100644
--- a/arch/riscv/include/asm/syscall.h
+++ b/arch/riscv/include/asm/syscall.h
@@ -16,6 +16,7 @@
/* The array of function pointers for syscalls. */
extern void * const sys_call_table[];
+extern void * const compat_sys_call_table[];
/*
* Only the low 32 bits of orig_r0 are meaningful, so we return int.
diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h
index 74d888c8d631..78933ac04995 100644
--- a/arch/riscv/include/asm/thread_info.h
+++ b/arch/riscv/include/asm/thread_info.h
@@ -97,6 +97,7 @@ struct thread_info {
#define TIF_SECCOMP 8 /* syscall secure computing */
#define TIF_NOTIFY_SIGNAL 9 /* signal notifications exist */
#define TIF_UPROBE 10 /* uprobe breakpoint or singlestep */
+#define TIF_32BIT 11 /* compat-mode 32bit process */
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
diff --git a/arch/riscv/include/asm/unistd.h b/arch/riscv/include/asm/unistd.h
index 6c316093a1e5..221630bdbd07 100644
--- a/arch/riscv/include/asm/unistd.h
+++ b/arch/riscv/include/asm/unistd.h
@@ -9,7 +9,17 @@
*/
#define __ARCH_WANT_SYS_CLONE
-#define __ARCH_WANT_MEMFD_SECRET
+
+#ifdef CONFIG_COMPAT
+#define __ARCH_WANT_COMPAT_TRUNCATE64
+#define __ARCH_WANT_COMPAT_FTRUNCATE64
+#define __ARCH_WANT_COMPAT_FALLOCATE
+#define __ARCH_WANT_COMPAT_PREAD64
+#define __ARCH_WANT_COMPAT_PWRITE64
+#define __ARCH_WANT_COMPAT_SYNC_FILE_RANGE
+#define __ARCH_WANT_COMPAT_READAHEAD
+#define __ARCH_WANT_COMPAT_FADVISE64_64
+#endif
#include <uapi/asm/unistd.h>
diff --git a/arch/riscv/include/asm/vdso.h b/arch/riscv/include/asm/vdso.h
index bc6f75f3a199..af981426fe0f 100644
--- a/arch/riscv/include/asm/vdso.h
+++ b/arch/riscv/include/asm/vdso.h
@@ -21,6 +21,15 @@
#define VDSO_SYMBOL(base, name) \
(void __user *)((unsigned long)(base) + __vdso_##name##_offset)
+
+#ifdef CONFIG_COMPAT
+#include <generated/compat_vdso-offsets.h>
+
+#define COMPAT_VDSO_SYMBOL(base, name) \
+ (void __user *)((unsigned long)(base) + compat__vdso_##name##_offset)
+
+#endif /* CONFIG_COMPAT */
+
#endif /* !__ASSEMBLY__ */
#endif /* CONFIG_MMU */
diff --git a/arch/riscv/include/asm/vendorid_list.h b/arch/riscv/include/asm/vendorid_list.h
index 9d934215b3c8..cb89af3f0704 100644
--- a/arch/riscv/include/asm/vendorid_list.h
+++ b/arch/riscv/include/asm/vendorid_list.h
@@ -6,5 +6,6 @@
#define ASM_VENDOR_LIST_H
#define SIFIVE_VENDOR_ID 0x489
+#define THEAD_VENDOR_ID 0x5b7
#endif
diff --git a/arch/riscv/include/asm/xip_fixup.h b/arch/riscv/include/asm/xip_fixup.h
new file mode 100644
index 000000000000..d4ffc3c37649
--- /dev/null
+++ b/arch/riscv/include/asm/xip_fixup.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * XIP fixup macros, only useful in assembly.
+ */
+#ifndef _ASM_RISCV_XIP_FIXUP_H
+#define _ASM_RISCV_XIP_FIXUP_H
+
+#include <linux/pgtable.h>
+
+#ifdef CONFIG_XIP_KERNEL
+.macro XIP_FIXUP_OFFSET reg
+ REG_L t0, _xip_fixup
+ add \reg, \reg, t0
+.endm
+.macro XIP_FIXUP_FLASH_OFFSET reg
+ la t1, __data_loc
+ REG_L t1, _xip_phys_offset
+ sub \reg, \reg, t1
+ add \reg, \reg, t0
+.endm
+
+_xip_fixup: .dword CONFIG_PHYS_RAM_BASE - CONFIG_XIP_PHYS_ADDR - XIP_OFFSET
+_xip_phys_offset: .dword CONFIG_XIP_PHYS_ADDR + XIP_OFFSET
+#else
+.macro XIP_FIXUP_OFFSET reg
+.endm
+.macro XIP_FIXUP_FLASH_OFFSET reg
+.endm
+#endif /* CONFIG_XIP_KERNEL */
+
+#endif
diff --git a/arch/riscv/include/uapi/asm/unistd.h b/arch/riscv/include/uapi/asm/unistd.h
index 8062996c2dfd..73d7cdd2ec49 100644
--- a/arch/riscv/include/uapi/asm/unistd.h
+++ b/arch/riscv/include/uapi/asm/unistd.h
@@ -15,12 +15,13 @@
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
-#ifdef __LP64__
+#if defined(__LP64__) && !defined(__SYSCALL_COMPAT)
#define __ARCH_WANT_NEW_STAT
#define __ARCH_WANT_SET_GET_RLIMIT
#endif /* __LP64__ */
#define __ARCH_WANT_SYS_CLONE3
+#define __ARCH_WANT_MEMFD_SECRET
#include <asm-generic/unistd.h>
diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile
index 87adbe47bc15..c71d6591d539 100644
--- a/arch/riscv/kernel/Makefile
+++ b/arch/riscv/kernel/Makefile
@@ -14,10 +14,25 @@ ifdef CONFIG_KEXEC
AFLAGS_kexec_relocate.o := -mcmodel=medany $(call cc-option,-mno-relax)
endif
+# cmodel=medany and notrace when patching early
+ifdef CONFIG_RISCV_ALTERNATIVE_EARLY
+CFLAGS_alternative.o := -mcmodel=medany
+CFLAGS_cpufeature.o := -mcmodel=medany
+ifdef CONFIG_FTRACE
+CFLAGS_REMOVE_alternative.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_cpufeature.o = $(CC_FLAGS_FTRACE)
+endif
+ifdef CONFIG_KASAN
+KASAN_SANITIZE_alternative.o := n
+KASAN_SANITIZE_cpufeature.o := n
+endif
+endif
+
extra-y += head.o
extra-y += vmlinux.lds
obj-y += soc.o
+obj-$(CONFIG_RISCV_ALTERNATIVE) += alternative.o
obj-y += cpu.o
obj-y += cpufeature.o
obj-y += entry.o
@@ -64,8 +79,12 @@ endif
obj-$(CONFIG_HOTPLUG_CPU) += cpu-hotplug.o
obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_KEXEC) += kexec_relocate.o crash_save_regs.o machine_kexec.o
+obj-$(CONFIG_KEXEC_FILE) += elf_kexec.o machine_kexec_file.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_JUMP_LABEL) += jump_label.o
obj-$(CONFIG_EFI) += efi.o
+obj-$(CONFIG_COMPAT) += compat_syscall_table.o
+obj-$(CONFIG_COMPAT) += compat_signal.o
+obj-$(CONFIG_COMPAT) += compat_vdso/
diff --git a/arch/riscv/kernel/alternative.c b/arch/riscv/kernel/alternative.c
new file mode 100644
index 000000000000..c9d0d3c53223
--- /dev/null
+++ b/arch/riscv/kernel/alternative.c
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * alternative runtime patching
+ * inspired by the ARM64 and x86 version
+ *
+ * Copyright (C) 2021 Sifive.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/cpu.h>
+#include <linux/uaccess.h>
+#include <asm/alternative.h>
+#include <asm/sections.h>
+#include <asm/vendorid_list.h>
+#include <asm/sbi.h>
+#include <asm/csr.h>
+
+struct cpu_manufacturer_info_t {
+ unsigned long vendor_id;
+ unsigned long arch_id;
+ unsigned long imp_id;
+ void (*vendor_patch_func)(struct alt_entry *begin, struct alt_entry *end,
+ unsigned long archid, unsigned long impid,
+ unsigned int stage);
+};
+
+static void __init_or_module riscv_fill_cpu_mfr_info(struct cpu_manufacturer_info_t *cpu_mfr_info)
+{
+#ifdef CONFIG_RISCV_M_MODE
+ cpu_mfr_info->vendor_id = csr_read(CSR_MVENDORID);
+ cpu_mfr_info->arch_id = csr_read(CSR_MARCHID);
+ cpu_mfr_info->imp_id = csr_read(CSR_MIMPID);
+#else
+ cpu_mfr_info->vendor_id = sbi_get_mvendorid();
+ cpu_mfr_info->arch_id = sbi_get_marchid();
+ cpu_mfr_info->imp_id = sbi_get_mimpid();
+#endif
+
+ switch (cpu_mfr_info->vendor_id) {
+#ifdef CONFIG_ERRATA_SIFIVE
+ case SIFIVE_VENDOR_ID:
+ cpu_mfr_info->vendor_patch_func = sifive_errata_patch_func;
+ break;
+#endif
+#ifdef CONFIG_ERRATA_THEAD
+ case THEAD_VENDOR_ID:
+ cpu_mfr_info->vendor_patch_func = thead_errata_patch_func;
+ break;
+#endif
+ default:
+ cpu_mfr_info->vendor_patch_func = NULL;
+ }
+}
+
+/*
+ * This is called very early in the boot process (directly after we run
+ * a feature detect on the boot CPU). No need to worry about other CPUs
+ * here.
+ */
+static void __init_or_module _apply_alternatives(struct alt_entry *begin,
+ struct alt_entry *end,
+ unsigned int stage)
+{
+ struct cpu_manufacturer_info_t cpu_mfr_info;
+
+ riscv_fill_cpu_mfr_info(&cpu_mfr_info);
+
+ riscv_cpufeature_patch_func(begin, end, stage);
+
+ if (!cpu_mfr_info.vendor_patch_func)
+ return;
+
+ cpu_mfr_info.vendor_patch_func(begin, end,
+ cpu_mfr_info.arch_id,
+ cpu_mfr_info.imp_id,
+ stage);
+}
+
+void __init apply_boot_alternatives(void)
+{
+ /* If called on non-boot cpu things could go wrong */
+ WARN_ON(smp_processor_id() != 0);
+
+ _apply_alternatives((struct alt_entry *)__alt_start,
+ (struct alt_entry *)__alt_end,
+ RISCV_ALTERNATIVES_BOOT);
+}
+
+/*
+ * apply_early_boot_alternatives() is called from setup_vm() with MMU-off.
+ *
+ * Following requirements should be honoured for it to work correctly:
+ * 1) It should use PC-relative addressing for accessing kernel symbols.
+ * To achieve this we always use GCC cmodel=medany.
+ * 2) The compiler instrumentation for FTRACE will not work for setup_vm()
+ * so disable compiler instrumentation when FTRACE is enabled.
+ *
+ * Currently, the above requirements are honoured by using custom CFLAGS
+ * for alternative.o in kernel/Makefile.
+ */
+void __init apply_early_boot_alternatives(void)
+{
+#ifdef CONFIG_RISCV_ALTERNATIVE_EARLY
+ _apply_alternatives((struct alt_entry *)__alt_start,
+ (struct alt_entry *)__alt_end,
+ RISCV_ALTERNATIVES_EARLY_BOOT);
+#endif
+}
+
+#ifdef CONFIG_MODULES
+void apply_module_alternatives(void *start, size_t length)
+{
+ _apply_alternatives((struct alt_entry *)start,
+ (struct alt_entry *)(start + length),
+ RISCV_ALTERNATIVES_MODULE);
+}
+#endif
diff --git a/arch/riscv/kernel/compat_signal.c b/arch/riscv/kernel/compat_signal.c
new file mode 100644
index 000000000000..6ec4e34255a9
--- /dev/null
+++ b/arch/riscv/kernel/compat_signal.c
@@ -0,0 +1,243 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/compat.h>
+#include <linux/signal.h>
+#include <linux/uaccess.h>
+#include <linux/syscalls.h>
+#include <linux/linkage.h>
+
+#include <asm/csr.h>
+#include <asm/signal32.h>
+#include <asm/switch_to.h>
+#include <asm/ucontext.h>
+#include <asm/vdso.h>
+
+#define COMPAT_DEBUG_SIG 0
+
+struct compat_sigcontext {
+ struct compat_user_regs_struct sc_regs;
+ union __riscv_fp_state sc_fpregs;
+};
+
+struct compat_ucontext {
+ compat_ulong_t uc_flags;
+ struct compat_ucontext *uc_link;
+ compat_stack_t uc_stack;
+ sigset_t uc_sigmask;
+ /* There's some padding here to allow sigset_t to be expanded in the
+ * future. Though this is unlikely, other architectures put uc_sigmask
+ * at the end of this structure and explicitly state it can be
+ * expanded, so we didn't want to box ourselves in here. */
+ __u8 __unused[1024 / 8 - sizeof(sigset_t)];
+ /* We can't put uc_sigmask at the end of this structure because we need
+ * to be able to expand sigcontext in the future. For example, the
+ * vector ISA extension will almost certainly add ISA state. We want
+ * to ensure all user-visible ISA state can be saved and restored via a
+ * ucontext, so we're putting this at the end in order to allow for
+ * infinite extensibility. Since we know this will be extended and we
+ * assume sigset_t won't be extended an extreme amount, we're
+ * prioritizing this. */
+ struct compat_sigcontext uc_mcontext;
+};
+
+struct compat_rt_sigframe {
+ struct compat_siginfo info;
+ struct compat_ucontext uc;
+};
+
+#ifdef CONFIG_FPU
+static long compat_restore_fp_state(struct pt_regs *regs,
+ union __riscv_fp_state __user *sc_fpregs)
+{
+ long err;
+ struct __riscv_d_ext_state __user *state = &sc_fpregs->d;
+ size_t i;
+
+ err = __copy_from_user(&current->thread.fstate, state, sizeof(*state));
+ if (unlikely(err))
+ return err;
+
+ fstate_restore(current, regs);
+
+ /* We support no other extension state at this time. */
+ for (i = 0; i < ARRAY_SIZE(sc_fpregs->q.reserved); i++) {
+ u32 value;
+
+ err = __get_user(value, &sc_fpregs->q.reserved[i]);
+ if (unlikely(err))
+ break;
+ if (value != 0)
+ return -EINVAL;
+ }
+
+ return err;
+}
+
+static long compat_save_fp_state(struct pt_regs *regs,
+ union __riscv_fp_state __user *sc_fpregs)
+{
+ long err;
+ struct __riscv_d_ext_state __user *state = &sc_fpregs->d;
+ size_t i;
+
+ fstate_save(current, regs);
+ err = __copy_to_user(state, &current->thread.fstate, sizeof(*state));
+ if (unlikely(err))
+ return err;
+
+ /* We support no other extension state at this time. */
+ for (i = 0; i < ARRAY_SIZE(sc_fpregs->q.reserved); i++) {
+ err = __put_user(0, &sc_fpregs->q.reserved[i]);
+ if (unlikely(err))
+ break;
+ }
+
+ return err;
+}
+#else
+#define compat_save_fp_state(task, regs) (0)
+#define compat_restore_fp_state(task, regs) (0)
+#endif
+
+static long compat_restore_sigcontext(struct pt_regs *regs,
+ struct compat_sigcontext __user *sc)
+{
+ long err;
+ struct compat_user_regs_struct cregs;
+
+ /* sc_regs is structured the same as the start of pt_regs */
+ err = __copy_from_user(&cregs, &sc->sc_regs, sizeof(sc->sc_regs));
+
+ cregs_to_regs(&cregs, regs);
+
+ /* Restore the floating-point state. */
+ if (has_fpu())
+ err |= compat_restore_fp_state(regs, &sc->sc_fpregs);
+ return err;
+}
+
+COMPAT_SYSCALL_DEFINE0(rt_sigreturn)
+{
+ struct pt_regs *regs = current_pt_regs();
+ struct compat_rt_sigframe __user *frame;
+ struct task_struct *task;
+ sigset_t set;
+
+ /* Always make any pending restarted system calls return -EINTR */
+ current->restart_block.fn = do_no_restart_syscall;
+
+ frame = (struct compat_rt_sigframe __user *)regs->sp;
+
+ if (!access_ok(frame, sizeof(*frame)))
+ goto badframe;
+
+ if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
+ goto badframe;
+
+ set_current_blocked(&set);
+
+ if (compat_restore_sigcontext(regs, &frame->uc.uc_mcontext))
+ goto badframe;
+
+ if (compat_restore_altstack(&frame->uc.uc_stack))
+ goto badframe;
+
+ return regs->a0;
+
+badframe:
+ task = current;
+ if (show_unhandled_signals) {
+ pr_info_ratelimited(
+ "%s[%d]: bad frame in %s: frame=%p pc=%p sp=%p\n",
+ task->comm, task_pid_nr(task), __func__,
+ frame, (void *)regs->epc, (void *)regs->sp);
+ }
+ force_sig(SIGSEGV);
+ return 0;
+}
+
+static long compat_setup_sigcontext(struct compat_rt_sigframe __user *frame,
+ struct pt_regs *regs)
+{
+ struct compat_sigcontext __user *sc = &frame->uc.uc_mcontext;
+ struct compat_user_regs_struct cregs;
+ long err;
+
+ regs_to_cregs(&cregs, regs);
+
+ /* sc_regs is structured the same as the start of pt_regs */
+ err = __copy_to_user(&sc->sc_regs, &cregs, sizeof(sc->sc_regs));
+ /* Save the floating-point state. */
+ if (has_fpu())
+ err |= compat_save_fp_state(regs, &sc->sc_fpregs);
+ return err;
+}
+
+static inline void __user *compat_get_sigframe(struct ksignal *ksig,
+ struct pt_regs *regs, size_t framesize)
+{
+ unsigned long sp;
+ /* Default to using normal stack */
+ sp = regs->sp;
+
+ /*
+ * If we are on the alternate signal stack and would overflow it, don't.
+ * Return an always-bogus address instead so we will die with SIGSEGV.
+ */
+ if (on_sig_stack(sp) && !likely(on_sig_stack(sp - framesize)))
+ return (void __user __force *)(-1UL);
+
+ /* This is the X/Open sanctioned signal stack switching. */
+ sp = sigsp(sp, ksig) - framesize;
+
+ /* Align the stack frame. */
+ sp &= ~0xfUL;
+
+ return (void __user *)sp;
+}
+
+int compat_setup_rt_frame(struct ksignal *ksig, sigset_t *set,
+ struct pt_regs *regs)
+{
+ struct compat_rt_sigframe __user *frame;
+ long err = 0;
+
+ frame = compat_get_sigframe(ksig, regs, sizeof(*frame));
+ if (!access_ok(frame, sizeof(*frame)))
+ return -EFAULT;
+
+ err |= copy_siginfo_to_user32(&frame->info, &ksig->info);
+
+ /* Create the ucontext. */
+ err |= __put_user(0, &frame->uc.uc_flags);
+ err |= __put_user(NULL, &frame->uc.uc_link);
+ err |= __compat_save_altstack(&frame->uc.uc_stack, regs->sp);
+ err |= compat_setup_sigcontext(frame, regs);
+ err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+ if (err)
+ return -EFAULT;
+
+ regs->ra = (unsigned long)COMPAT_VDSO_SYMBOL(
+ current->mm->context.vdso, rt_sigreturn);
+
+ /*
+ * Set up registers for signal handler.
+ * Registers that we don't modify keep the value they had from
+ * user-space at the time we took the signal.
+ * We always pass siginfo and mcontext, regardless of SA_SIGINFO,
+ * since some things rely on this (e.g. glibc's debug/segfault.c).
+ */
+ regs->epc = (unsigned long)ksig->ka.sa.sa_handler;
+ regs->sp = (unsigned long)frame;
+ regs->a0 = ksig->sig; /* a0: signal number */
+ regs->a1 = (unsigned long)(&frame->info); /* a1: siginfo pointer */
+ regs->a2 = (unsigned long)(&frame->uc); /* a2: ucontext pointer */
+
+#if COMPAT_DEBUG_SIG
+ pr_info("SIG deliver (%s:%d): sig=%d pc=%p ra=%p sp=%p\n",
+ current->comm, task_pid_nr(current), ksig->sig,
+ (void *)regs->epc, (void *)regs->ra, frame);
+#endif
+
+ return 0;
+}
diff --git a/arch/riscv/kernel/compat_syscall_table.c b/arch/riscv/kernel/compat_syscall_table.c
new file mode 100644
index 000000000000..651f2b009c28
--- /dev/null
+++ b/arch/riscv/kernel/compat_syscall_table.c
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#define __SYSCALL_COMPAT
+
+#include <linux/compat.h>
+#include <linux/syscalls.h>
+#include <asm-generic/mman-common.h>
+#include <asm-generic/syscalls.h>
+#include <asm/syscall.h>
+
+#undef __SYSCALL
+#define __SYSCALL(nr, call) [nr] = (call),
+
+asmlinkage long compat_sys_rt_sigreturn(void);
+
+void * const compat_sys_call_table[__NR_syscalls] = {
+ [0 ... __NR_syscalls - 1] = sys_ni_syscall,
+#include <asm/unistd.h>
+};
diff --git a/arch/riscv/kernel/compat_vdso/.gitignore b/arch/riscv/kernel/compat_vdso/.gitignore
new file mode 100644
index 000000000000..19d83d846c1e
--- /dev/null
+++ b/arch/riscv/kernel/compat_vdso/.gitignore
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+compat_vdso.lds
diff --git a/arch/riscv/kernel/compat_vdso/Makefile b/arch/riscv/kernel/compat_vdso/Makefile
new file mode 100644
index 000000000000..260daf3236d3
--- /dev/null
+++ b/arch/riscv/kernel/compat_vdso/Makefile
@@ -0,0 +1,78 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for compat_vdso
+#
+
+# Symbols present in the compat_vdso
+compat_vdso-syms = rt_sigreturn
+compat_vdso-syms += getcpu
+compat_vdso-syms += flush_icache
+
+COMPAT_CC := $(CC)
+COMPAT_LD := $(LD)
+
+COMPAT_CC_FLAGS := -march=rv32g -mabi=ilp32
+COMPAT_LD_FLAGS := -melf32lriscv
+
+# Files to link into the compat_vdso
+obj-compat_vdso = $(patsubst %, %.o, $(compat_vdso-syms)) note.o
+
+# Build rules
+targets := $(obj-compat_vdso) compat_vdso.so compat_vdso.so.dbg compat_vdso.lds
+obj-compat_vdso := $(addprefix $(obj)/, $(obj-compat_vdso))
+
+obj-y += compat_vdso.o
+CPPFLAGS_compat_vdso.lds += -P -C -U$(ARCH)
+
+# Disable profiling and instrumentation for VDSO code
+GCOV_PROFILE := n
+KCOV_INSTRUMENT := n
+KASAN_SANITIZE := n
+UBSAN_SANITIZE := n
+
+# Force dependency
+$(obj)/compat_vdso.o: $(obj)/compat_vdso.so
+
+# link rule for the .so file, .lds has to be first
+$(obj)/compat_vdso.so.dbg: $(obj)/compat_vdso.lds $(obj-compat_vdso) FORCE
+ $(call if_changed,compat_vdsold)
+LDFLAGS_compat_vdso.so.dbg = -shared -S -soname=linux-compat_vdso.so.1 \
+ --build-id=sha1 --hash-style=both --eh-frame-hdr
+
+$(obj-compat_vdso): %.o: %.S FORCE
+ $(call if_changed_dep,compat_vdsoas)
+
+# strip rule for the .so file
+$(obj)/%.so: OBJCOPYFLAGS := -S
+$(obj)/%.so: $(obj)/%.so.dbg FORCE
+ $(call if_changed,objcopy)
+
+# Generate VDSO offsets using helper script
+gen-compat_vdsosym := $(srctree)/$(src)/gen_compat_vdso_offsets.sh
+quiet_cmd_compat_vdsosym = VDSOSYM $@
+ cmd_compat_vdsosym = $(NM) $< | $(gen-compat_vdsosym) | LC_ALL=C sort > $@
+
+include/generated/compat_vdso-offsets.h: $(obj)/compat_vdso.so.dbg FORCE
+ $(call if_changed,compat_vdsosym)
+
+# actual build commands
+# The DSO images are built using a special linker script
+# Make sure only to export the intended __compat_vdso_xxx symbol offsets.
+quiet_cmd_compat_vdsold = VDSOLD $@
+ cmd_compat_vdsold = $(COMPAT_LD) $(ld_flags) $(COMPAT_LD_FLAGS) -T $(filter-out FORCE,$^) -o $@.tmp && \
+ $(OBJCOPY) $(patsubst %, -G __compat_vdso_%, $(compat_vdso-syms)) $@.tmp $@ && \
+ rm $@.tmp
+
+# actual build commands
+quiet_cmd_compat_vdsoas = VDSOAS $@
+ cmd_compat_vdsoas = $(COMPAT_CC) $(a_flags) $(COMPAT_CC_FLAGS) -c -o $@ $<
+
+# install commands for the unstripped file
+quiet_cmd_compat_vdso_install = INSTALL $@
+ cmd_compat_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/compat_vdso/$@
+
+compat_vdso.so: $(obj)/compat_vdso.so.dbg
+ @mkdir -p $(MODLIB)/compat_vdso
+ $(call cmd,compat_vdso_install)
+
+compat_vdso_install: compat_vdso.so
diff --git a/arch/riscv/kernel/compat_vdso/compat_vdso.S b/arch/riscv/kernel/compat_vdso/compat_vdso.S
new file mode 100644
index 000000000000..ffd66237e091
--- /dev/null
+++ b/arch/riscv/kernel/compat_vdso/compat_vdso.S
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#define vdso_start compat_vdso_start
+#define vdso_end compat_vdso_end
+
+#define __VDSO_PATH "arch/riscv/kernel/compat_vdso/compat_vdso.so"
+
+#include "../vdso/vdso.S"
diff --git a/arch/riscv/kernel/compat_vdso/compat_vdso.lds.S b/arch/riscv/kernel/compat_vdso/compat_vdso.lds.S
new file mode 100644
index 000000000000..c7c9355d311e
--- /dev/null
+++ b/arch/riscv/kernel/compat_vdso/compat_vdso.lds.S
@@ -0,0 +1,3 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#include "../vdso/vdso.lds.S"
diff --git a/arch/riscv/kernel/compat_vdso/flush_icache.S b/arch/riscv/kernel/compat_vdso/flush_icache.S
new file mode 100644
index 000000000000..523dd8b96045
--- /dev/null
+++ b/arch/riscv/kernel/compat_vdso/flush_icache.S
@@ -0,0 +1,3 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#include "../vdso/flush_icache.S"
diff --git a/arch/riscv/kernel/compat_vdso/gen_compat_vdso_offsets.sh b/arch/riscv/kernel/compat_vdso/gen_compat_vdso_offsets.sh
new file mode 100755
index 000000000000..8ac070c783b3
--- /dev/null
+++ b/arch/riscv/kernel/compat_vdso/gen_compat_vdso_offsets.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+LC_ALL=C
+sed -n -e 's/^[0]\+\(0[0-9a-fA-F]*\) . \(__vdso_[a-zA-Z0-9_]*\)$/\#define compat\2_offset\t0x\1/p'
diff --git a/arch/riscv/kernel/compat_vdso/getcpu.S b/arch/riscv/kernel/compat_vdso/getcpu.S
new file mode 100644
index 000000000000..10f463efe271
--- /dev/null
+++ b/arch/riscv/kernel/compat_vdso/getcpu.S
@@ -0,0 +1,3 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#include "../vdso/getcpu.S"
diff --git a/arch/riscv/kernel/compat_vdso/note.S b/arch/riscv/kernel/compat_vdso/note.S
new file mode 100644
index 000000000000..b10312907542
--- /dev/null
+++ b/arch/riscv/kernel/compat_vdso/note.S
@@ -0,0 +1,3 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#include "../vdso/note.S"
diff --git a/arch/riscv/kernel/compat_vdso/rt_sigreturn.S b/arch/riscv/kernel/compat_vdso/rt_sigreturn.S
new file mode 100644
index 000000000000..884aada4facc
--- /dev/null
+++ b/arch/riscv/kernel/compat_vdso/rt_sigreturn.S
@@ -0,0 +1,3 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#include "../vdso/rt_sigreturn.S"
diff --git a/arch/riscv/kernel/cpu.c b/arch/riscv/kernel/cpu.c
index ccb617791e56..fba9e9f46a8c 100644
--- a/arch/riscv/kernel/cpu.c
+++ b/arch/riscv/kernel/cpu.c
@@ -88,6 +88,7 @@ int riscv_of_parent_hartid(struct device_node *node)
*/
static struct riscv_isa_ext_data isa_ext_arr[] = {
__RISCV_ISA_EXT_DATA(sscofpmf, RISCV_ISA_EXT_SSCOFPMF),
+ __RISCV_ISA_EXT_DATA(svpbmt, RISCV_ISA_EXT_SVPBMT),
__RISCV_ISA_EXT_DATA("", RISCV_ISA_EXT_MAX),
};
@@ -138,6 +139,7 @@ static void print_mmu(struct seq_file *f)
{
char sv_type[16];
+#ifdef CONFIG_MMU
#if defined(CONFIG_32BIT)
strncpy(sv_type, "sv32", 5);
#elif defined(CONFIG_64BIT)
@@ -148,6 +150,9 @@ static void print_mmu(struct seq_file *f)
else
strncpy(sv_type, "sv39", 5);
#endif
+#else
+ strncpy(sv_type, "none", 5);
+#endif /* CONFIG_MMU */
seq_printf(f, "mmu\t\t: %s\n", sv_type);
}
diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c
index 1b2d42d7f589..a6f62a6d1edd 100644
--- a/arch/riscv/kernel/cpufeature.c
+++ b/arch/riscv/kernel/cpufeature.c
@@ -8,9 +8,15 @@
#include <linux/bitmap.h>
#include <linux/ctype.h>
+#include <linux/libfdt.h>
+#include <linux/module.h>
#include <linux/of.h>
-#include <asm/processor.h>
+#include <asm/alternative.h>
+#include <asm/errata_list.h>
#include <asm/hwcap.h>
+#include <asm/patch.h>
+#include <asm/pgtable.h>
+#include <asm/processor.h>
#include <asm/smp.h>
#include <asm/switch_to.h>
@@ -192,6 +198,7 @@ void __init riscv_fill_hwcap(void)
set_bit(*ext - 'a', this_isa);
} else {
SET_ISA_EXT_MAP("sscofpmf", RISCV_ISA_EXT_SSCOFPMF);
+ SET_ISA_EXT_MAP("svpbmt", RISCV_ISA_EXT_SVPBMT);
}
#undef SET_ISA_EXT_MAP
}
@@ -206,11 +213,10 @@ void __init riscv_fill_hwcap(void)
else
elf_hwcap = this_hwcap;
- if (bitmap_weight(riscv_isa, RISCV_ISA_EXT_MAX))
- bitmap_and(riscv_isa, riscv_isa, this_isa, RISCV_ISA_EXT_MAX);
- else
+ if (bitmap_empty(riscv_isa, RISCV_ISA_EXT_MAX))
bitmap_copy(riscv_isa, this_isa, RISCV_ISA_EXT_MAX);
-
+ else
+ bitmap_and(riscv_isa, riscv_isa, this_isa, RISCV_ISA_EXT_MAX);
}
/* We don't support systems with F but without D, so mask those out
@@ -237,3 +243,74 @@ void __init riscv_fill_hwcap(void)
static_branch_enable(&cpu_hwcap_fpu);
#endif
}
+
+#ifdef CONFIG_RISCV_ALTERNATIVE
+struct cpufeature_info {
+ char name[ERRATA_STRING_LENGTH_MAX];
+ bool (*check_func)(unsigned int stage);
+};
+
+static bool __init_or_module cpufeature_svpbmt_check_func(unsigned int stage)
+{
+#ifdef CONFIG_RISCV_ISA_SVPBMT
+ switch (stage) {
+ case RISCV_ALTERNATIVES_EARLY_BOOT:
+ return false;
+ default:
+ return riscv_isa_extension_available(NULL, SVPBMT);
+ }
+#endif
+
+ return false;
+}
+
+static const struct cpufeature_info __initdata_or_module
+cpufeature_list[CPUFEATURE_NUMBER] = {
+ {
+ .name = "svpbmt",
+ .check_func = cpufeature_svpbmt_check_func
+ },
+};
+
+static u32 __init_or_module cpufeature_probe(unsigned int stage)
+{
+ const struct cpufeature_info *info;
+ u32 cpu_req_feature = 0;
+ int idx;
+
+ for (idx = 0; idx < CPUFEATURE_NUMBER; idx++) {
+ info = &cpufeature_list[idx];
+
+ if (info->check_func(stage))
+ cpu_req_feature |= (1U << idx);
+ }
+
+ return cpu_req_feature;
+}
+
+void __init_or_module riscv_cpufeature_patch_func(struct alt_entry *begin,
+ struct alt_entry *end,
+ unsigned int stage)
+{
+ u32 cpu_req_feature = cpufeature_probe(stage);
+ u32 cpu_apply_feature = 0;
+ struct alt_entry *alt;
+ u32 tmp;
+
+ for (alt = begin; alt < end; alt++) {
+ if (alt->vendor_id != 0)
+ continue;
+ if (alt->errata_id >= CPUFEATURE_NUMBER) {
+ WARN(1, "This feature id:%d is not in kernel cpufeature list",
+ alt->errata_id);
+ continue;
+ }
+
+ tmp = (1U << alt->errata_id);
+ if (cpu_req_feature & tmp) {
+ patch_text_nosync(alt->old_ptr, alt->alt_ptr, alt->alt_len);
+ cpu_apply_feature |= tmp;
+ }
+ }
+}
+#endif
diff --git a/arch/riscv/kernel/crash_dump.c b/arch/riscv/kernel/crash_dump.c
index 86cc0ada5752..ea2158cee97b 100644
--- a/arch/riscv/kernel/crash_dump.c
+++ b/arch/riscv/kernel/crash_dump.c
@@ -7,22 +7,10 @@
#include <linux/crash_dump.h>
#include <linux/io.h>
+#include <linux/uio.h>
-/**
- * copy_oldmem_page() - copy one page from old kernel memory
- * @pfn: page frame number to be copied
- * @buf: buffer where the copied page is placed
- * @csize: number of bytes to copy
- * @offset: offset in bytes into the page
- * @userbuf: if set, @buf is in a user address space
- *
- * This function copies one page from old kernel memory into buffer pointed by
- * @buf. If @buf is in userspace, set @userbuf to %1. Returns number of bytes
- * copied or negative error in case of failure.
- */
-ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
- size_t csize, unsigned long offset,
- int userbuf)
+ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn,
+ size_t csize, unsigned long offset)
{
void *vaddr;
@@ -33,13 +21,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
if (!vaddr)
return -ENOMEM;
- if (userbuf) {
- if (copy_to_user((char __user *)buf, vaddr + offset, csize)) {
- memunmap(vaddr);
- return -EFAULT;
- }
- } else
- memcpy(buf, vaddr + offset, csize);
+ csize = copy_to_iter(vaddr + offset, csize, iter);
memunmap(vaddr);
return csize;
diff --git a/arch/riscv/kernel/efi.c b/arch/riscv/kernel/efi.c
index 024159298231..1aa540350abd 100644
--- a/arch/riscv/kernel/efi.c
+++ b/arch/riscv/kernel/efi.c
@@ -65,7 +65,7 @@ static int __init set_permissions(pte_t *ptep, unsigned long addr, void *data)
if (md->attribute & EFI_MEMORY_RO) {
val = pte_val(pte) & ~_PAGE_WRITE;
- val = pte_val(pte) | _PAGE_READ;
+ val |= _PAGE_READ;
pte = __pte(val);
}
if (md->attribute & EFI_MEMORY_XP) {
diff --git a/arch/riscv/kernel/elf_kexec.c b/arch/riscv/kernel/elf_kexec.c
new file mode 100644
index 000000000000..9cb85095fd45
--- /dev/null
+++ b/arch/riscv/kernel/elf_kexec.c
@@ -0,0 +1,448 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Load ELF vmlinux file for the kexec_file_load syscall.
+ *
+ * Copyright (C) 2021 Huawei Technologies Co, Ltd.
+ *
+ * Author: Liao Chang (liaochang1@huawei.com)
+ *
+ * Based on kexec-tools' kexec-elf-riscv.c, heavily modified
+ * for kernel.
+ */
+
+#define pr_fmt(fmt) "kexec_image: " fmt
+
+#include <linux/elf.h>
+#include <linux/kexec.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/libfdt.h>
+#include <linux/types.h>
+#include <linux/memblock.h>
+#include <asm/setup.h>
+
+static int riscv_kexec_elf_load(struct kimage *image, struct elfhdr *ehdr,
+ struct kexec_elf_info *elf_info, unsigned long old_pbase,
+ unsigned long new_pbase)
+{
+ int i;
+ int ret = 0;
+ size_t size;
+ struct kexec_buf kbuf;
+ const struct elf_phdr *phdr;
+
+ kbuf.image = image;
+
+ for (i = 0; i < ehdr->e_phnum; i++) {
+ phdr = &elf_info->proghdrs[i];
+ if (phdr->p_type != PT_LOAD)
+ continue;
+
+ size = phdr->p_filesz;
+ if (size > phdr->p_memsz)
+ size = phdr->p_memsz;
+
+ kbuf.buffer = (void *) elf_info->buffer + phdr->p_offset;
+ kbuf.bufsz = size;
+ kbuf.buf_align = phdr->p_align;
+ kbuf.mem = phdr->p_paddr - old_pbase + new_pbase;
+ kbuf.memsz = phdr->p_memsz;
+ kbuf.top_down = false;
+ ret = kexec_add_buffer(&kbuf);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * Go through the available phsyical memory regions and find one that hold
+ * an image of the specified size.
+ */
+static int elf_find_pbase(struct kimage *image, unsigned long kernel_len,
+ struct elfhdr *ehdr, struct kexec_elf_info *elf_info,
+ unsigned long *old_pbase, unsigned long *new_pbase)
+{
+ int i;
+ int ret;
+ struct kexec_buf kbuf;
+ const struct elf_phdr *phdr;
+ unsigned long lowest_paddr = ULONG_MAX;
+ unsigned long lowest_vaddr = ULONG_MAX;
+
+ for (i = 0; i < ehdr->e_phnum; i++) {
+ phdr = &elf_info->proghdrs[i];
+ if (phdr->p_type != PT_LOAD)
+ continue;
+
+ if (lowest_paddr > phdr->p_paddr)
+ lowest_paddr = phdr->p_paddr;
+
+ if (lowest_vaddr > phdr->p_vaddr)
+ lowest_vaddr = phdr->p_vaddr;
+ }
+
+ kbuf.image = image;
+ kbuf.buf_min = lowest_paddr;
+ kbuf.buf_max = ULONG_MAX;
+ kbuf.buf_align = PAGE_SIZE;
+ kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
+ kbuf.memsz = ALIGN(kernel_len, PAGE_SIZE);
+ kbuf.top_down = false;
+ ret = arch_kexec_locate_mem_hole(&kbuf);
+ if (!ret) {
+ *old_pbase = lowest_paddr;
+ *new_pbase = kbuf.mem;
+ image->start = ehdr->e_entry - lowest_vaddr + kbuf.mem;
+ }
+ return ret;
+}
+
+static int get_nr_ram_ranges_callback(struct resource *res, void *arg)
+{
+ unsigned int *nr_ranges = arg;
+
+ (*nr_ranges)++;
+ return 0;
+}
+
+static int prepare_elf64_ram_headers_callback(struct resource *res, void *arg)
+{
+ struct crash_mem *cmem = arg;
+
+ cmem->ranges[cmem->nr_ranges].start = res->start;
+ cmem->ranges[cmem->nr_ranges].end = res->end;
+ cmem->nr_ranges++;
+
+ return 0;
+}
+
+static int prepare_elf_headers(void **addr, unsigned long *sz)
+{
+ struct crash_mem *cmem;
+ unsigned int nr_ranges;
+ int ret;
+
+ nr_ranges = 1; /* For exclusion of crashkernel region */
+ walk_system_ram_res(0, -1, &nr_ranges, get_nr_ram_ranges_callback);
+
+ cmem = kmalloc(struct_size(cmem, ranges, nr_ranges), GFP_KERNEL);
+ if (!cmem)
+ return -ENOMEM;
+
+ cmem->max_nr_ranges = nr_ranges;
+ cmem->nr_ranges = 0;
+ ret = walk_system_ram_res(0, -1, cmem, prepare_elf64_ram_headers_callback);
+ if (ret)
+ goto out;
+
+ /* Exclude crashkernel region */
+ ret = crash_exclude_mem_range(cmem, crashk_res.start, crashk_res.end);
+ if (!ret)
+ ret = crash_prepare_elf64_headers(cmem, true, addr, sz);
+
+out:
+ kfree(cmem);
+ return ret;
+}
+
+static char *setup_kdump_cmdline(struct kimage *image, char *cmdline,
+ unsigned long cmdline_len)
+{
+ int elfcorehdr_strlen;
+ char *cmdline_ptr;
+
+ cmdline_ptr = kzalloc(COMMAND_LINE_SIZE, GFP_KERNEL);
+ if (!cmdline_ptr)
+ return NULL;
+
+ elfcorehdr_strlen = sprintf(cmdline_ptr, "elfcorehdr=0x%lx ",
+ image->elf_load_addr);
+
+ if (elfcorehdr_strlen + cmdline_len > COMMAND_LINE_SIZE) {
+ pr_err("Appending elfcorehdr=<addr> exceeds cmdline size\n");
+ kfree(cmdline_ptr);
+ return NULL;
+ }
+
+ memcpy(cmdline_ptr + elfcorehdr_strlen, cmdline, cmdline_len);
+ /* Ensure it's nul terminated */
+ cmdline_ptr[COMMAND_LINE_SIZE - 1] = '\0';
+ return cmdline_ptr;
+}
+
+static void *elf_kexec_load(struct kimage *image, char *kernel_buf,
+ unsigned long kernel_len, char *initrd,
+ unsigned long initrd_len, char *cmdline,
+ unsigned long cmdline_len)
+{
+ int ret;
+ unsigned long old_kernel_pbase = ULONG_MAX;
+ unsigned long new_kernel_pbase = 0UL;
+ unsigned long initrd_pbase = 0UL;
+ unsigned long headers_sz;
+ unsigned long kernel_start;
+ void *fdt, *headers;
+ struct elfhdr ehdr;
+ struct kexec_buf kbuf;
+ struct kexec_elf_info elf_info;
+ char *modified_cmdline = NULL;
+
+ ret = kexec_build_elf_info(kernel_buf, kernel_len, &ehdr, &elf_info);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = elf_find_pbase(image, kernel_len, &ehdr, &elf_info,
+ &old_kernel_pbase, &new_kernel_pbase);
+ if (ret)
+ goto out;
+ kernel_start = image->start;
+ pr_notice("The entry point of kernel at 0x%lx\n", image->start);
+
+ /* Add the kernel binary to the image */
+ ret = riscv_kexec_elf_load(image, &ehdr, &elf_info,
+ old_kernel_pbase, new_kernel_pbase);
+ if (ret)
+ goto out;
+
+ kbuf.image = image;
+ kbuf.buf_min = new_kernel_pbase + kernel_len;
+ kbuf.buf_max = ULONG_MAX;
+
+ /* Add elfcorehdr */
+ if (image->type == KEXEC_TYPE_CRASH) {
+ ret = prepare_elf_headers(&headers, &headers_sz);
+ if (ret) {
+ pr_err("Preparing elf core header failed\n");
+ goto out;
+ }
+
+ kbuf.buffer = headers;
+ kbuf.bufsz = headers_sz;
+ kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
+ kbuf.memsz = headers_sz;
+ kbuf.buf_align = ELF_CORE_HEADER_ALIGN;
+ kbuf.top_down = true;
+
+ ret = kexec_add_buffer(&kbuf);
+ if (ret) {
+ vfree(headers);
+ goto out;
+ }
+ image->elf_headers = headers;
+ image->elf_load_addr = kbuf.mem;
+ image->elf_headers_sz = headers_sz;
+
+ pr_debug("Loaded elf core header at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
+ image->elf_load_addr, kbuf.bufsz, kbuf.memsz);
+
+ /* Setup cmdline for kdump kernel case */
+ modified_cmdline = setup_kdump_cmdline(image, cmdline,
+ cmdline_len);
+ if (!modified_cmdline) {
+ pr_err("Setting up cmdline for kdump kernel failed\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ cmdline = modified_cmdline;
+ }
+
+#ifdef CONFIG_ARCH_HAS_KEXEC_PURGATORY
+ /* Add purgatory to the image */
+ kbuf.top_down = true;
+ kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
+ ret = kexec_load_purgatory(image, &kbuf);
+ if (ret) {
+ pr_err("Error loading purgatory ret=%d\n", ret);
+ goto out;
+ }
+ ret = kexec_purgatory_get_set_symbol(image, "riscv_kernel_entry",
+ &kernel_start,
+ sizeof(kernel_start), 0);
+ if (ret)
+ pr_err("Error update purgatory ret=%d\n", ret);
+#endif /* CONFIG_ARCH_HAS_KEXEC_PURGATORY */
+
+ /* Add the initrd to the image */
+ if (initrd != NULL) {
+ kbuf.buffer = initrd;
+ kbuf.bufsz = kbuf.memsz = initrd_len;
+ kbuf.buf_align = PAGE_SIZE;
+ kbuf.top_down = false;
+ kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
+ ret = kexec_add_buffer(&kbuf);
+ if (ret)
+ goto out;
+ initrd_pbase = kbuf.mem;
+ pr_notice("Loaded initrd at 0x%lx\n", initrd_pbase);
+ }
+
+ /* Add the DTB to the image */
+ fdt = of_kexec_alloc_and_setup_fdt(image, initrd_pbase,
+ initrd_len, cmdline, 0);
+ if (!fdt) {
+ pr_err("Error setting up the new device tree.\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ fdt_pack(fdt);
+ kbuf.buffer = fdt;
+ kbuf.bufsz = kbuf.memsz = fdt_totalsize(fdt);
+ kbuf.buf_align = PAGE_SIZE;
+ kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
+ kbuf.top_down = true;
+ ret = kexec_add_buffer(&kbuf);
+ if (ret) {
+ pr_err("Error add DTB kbuf ret=%d\n", ret);
+ goto out_free_fdt;
+ }
+ pr_notice("Loaded device tree at 0x%lx\n", kbuf.mem);
+ goto out;
+
+out_free_fdt:
+ kvfree(fdt);
+out:
+ kfree(modified_cmdline);
+ kexec_free_elf_info(&elf_info);
+ return ret ? ERR_PTR(ret) : NULL;
+}
+
+#define RV_X(x, s, n) (((x) >> (s)) & ((1 << (n)) - 1))
+#define RISCV_IMM_BITS 12
+#define RISCV_IMM_REACH (1LL << RISCV_IMM_BITS)
+#define RISCV_CONST_HIGH_PART(x) \
+ (((x) + (RISCV_IMM_REACH >> 1)) & ~(RISCV_IMM_REACH - 1))
+#define RISCV_CONST_LOW_PART(x) ((x) - RISCV_CONST_HIGH_PART(x))
+
+#define ENCODE_ITYPE_IMM(x) \
+ (RV_X(x, 0, 12) << 20)
+#define ENCODE_BTYPE_IMM(x) \
+ ((RV_X(x, 1, 4) << 8) | (RV_X(x, 5, 6) << 25) | \
+ (RV_X(x, 11, 1) << 7) | (RV_X(x, 12, 1) << 31))
+#define ENCODE_UTYPE_IMM(x) \
+ (RV_X(x, 12, 20) << 12)
+#define ENCODE_JTYPE_IMM(x) \
+ ((RV_X(x, 1, 10) << 21) | (RV_X(x, 11, 1) << 20) | \
+ (RV_X(x, 12, 8) << 12) | (RV_X(x, 20, 1) << 31))
+#define ENCODE_CBTYPE_IMM(x) \
+ ((RV_X(x, 1, 2) << 3) | (RV_X(x, 3, 2) << 10) | (RV_X(x, 5, 1) << 2) | \
+ (RV_X(x, 6, 2) << 5) | (RV_X(x, 8, 1) << 12))
+#define ENCODE_CJTYPE_IMM(x) \
+ ((RV_X(x, 1, 3) << 3) | (RV_X(x, 4, 1) << 11) | (RV_X(x, 5, 1) << 2) | \
+ (RV_X(x, 6, 1) << 7) | (RV_X(x, 7, 1) << 6) | (RV_X(x, 8, 2) << 9) | \
+ (RV_X(x, 10, 1) << 8) | (RV_X(x, 11, 1) << 12))
+#define ENCODE_UJTYPE_IMM(x) \
+ (ENCODE_UTYPE_IMM(RISCV_CONST_HIGH_PART(x)) | \
+ (ENCODE_ITYPE_IMM(RISCV_CONST_LOW_PART(x)) << 32))
+#define ENCODE_UITYPE_IMM(x) \
+ (ENCODE_UTYPE_IMM(x) | (ENCODE_ITYPE_IMM(x) << 32))
+
+#define CLEAN_IMM(type, x) \
+ ((~ENCODE_##type##_IMM((uint64_t)(-1))) & (x))
+
+int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
+ Elf_Shdr *section,
+ const Elf_Shdr *relsec,
+ const Elf_Shdr *symtab)
+{
+ const char *strtab, *name, *shstrtab;
+ const Elf_Shdr *sechdrs;
+ Elf_Rela *relas;
+ int i, r_type;
+
+ /* String & section header string table */
+ sechdrs = (void *)pi->ehdr + pi->ehdr->e_shoff;
+ strtab = (char *)pi->ehdr + sechdrs[symtab->sh_link].sh_offset;
+ shstrtab = (char *)pi->ehdr + sechdrs[pi->ehdr->e_shstrndx].sh_offset;
+
+ relas = (void *)pi->ehdr + relsec->sh_offset;
+
+ for (i = 0; i < relsec->sh_size / sizeof(*relas); i++) {
+ const Elf_Sym *sym; /* symbol to relocate */
+ unsigned long addr; /* final location after relocation */
+ unsigned long val; /* relocated symbol value */
+ unsigned long sec_base; /* relocated symbol value */
+ void *loc; /* tmp location to modify */
+
+ sym = (void *)pi->ehdr + symtab->sh_offset;
+ sym += ELF64_R_SYM(relas[i].r_info);
+
+ if (sym->st_name)
+ name = strtab + sym->st_name;
+ else
+ name = shstrtab + sechdrs[sym->st_shndx].sh_name;
+
+ loc = pi->purgatory_buf;
+ loc += section->sh_offset;
+ loc += relas[i].r_offset;
+
+ if (sym->st_shndx == SHN_ABS)
+ sec_base = 0;
+ else if (sym->st_shndx >= pi->ehdr->e_shnum) {
+ pr_err("Invalid section %d for symbol %s\n",
+ sym->st_shndx, name);
+ return -ENOEXEC;
+ } else
+ sec_base = pi->sechdrs[sym->st_shndx].sh_addr;
+
+ val = sym->st_value;
+ val += sec_base;
+ val += relas[i].r_addend;
+
+ addr = section->sh_addr + relas[i].r_offset;
+
+ r_type = ELF64_R_TYPE(relas[i].r_info);
+
+ switch (r_type) {
+ case R_RISCV_BRANCH:
+ *(u32 *)loc = CLEAN_IMM(BTYPE, *(u32 *)loc) |
+ ENCODE_BTYPE_IMM(val - addr);
+ break;
+ case R_RISCV_JAL:
+ *(u32 *)loc = CLEAN_IMM(JTYPE, *(u32 *)loc) |
+ ENCODE_JTYPE_IMM(val - addr);
+ break;
+ /*
+ * With no R_RISCV_PCREL_LO12_S, R_RISCV_PCREL_LO12_I
+ * sym is expected to be next to R_RISCV_PCREL_HI20
+ * in purgatory relsec. Handle it like R_RISCV_CALL
+ * sym, instead of searching the whole relsec.
+ */
+ case R_RISCV_PCREL_HI20:
+ case R_RISCV_CALL:
+ *(u64 *)loc = CLEAN_IMM(UITYPE, *(u64 *)loc) |
+ ENCODE_UJTYPE_IMM(val - addr);
+ break;
+ case R_RISCV_RVC_BRANCH:
+ *(u32 *)loc = CLEAN_IMM(CBTYPE, *(u32 *)loc) |
+ ENCODE_CBTYPE_IMM(val - addr);
+ break;
+ case R_RISCV_RVC_JUMP:
+ *(u32 *)loc = CLEAN_IMM(CJTYPE, *(u32 *)loc) |
+ ENCODE_CJTYPE_IMM(val - addr);
+ break;
+ case R_RISCV_ADD32:
+ *(u32 *)loc += val;
+ break;
+ case R_RISCV_SUB32:
+ *(u32 *)loc -= val;
+ break;
+ /* It has been applied by R_RISCV_PCREL_HI20 sym */
+ case R_RISCV_PCREL_LO12_I:
+ case R_RISCV_ALIGN:
+ case R_RISCV_RELAX:
+ break;
+ default:
+ pr_err("Unknown rela relocation: %d\n", r_type);
+ return -ENOEXEC;
+ }
+ }
+ return 0;
+}
+
+const struct kexec_file_ops elf_kexec_ops = {
+ .probe = kexec_elf_probe,
+ .load = elf_kexec_load,
+};
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
index c8b9ce274b9a..2e5b88ca11ce 100644
--- a/arch/riscv/kernel/entry.S
+++ b/arch/riscv/kernel/entry.S
@@ -207,13 +207,27 @@ check_syscall_nr:
* Syscall number held in a7.
* If syscall number is above allowed value, redirect to ni_syscall.
*/
- bgeu a7, t0, 1f
+ bgeu a7, t0, 3f
+#ifdef CONFIG_COMPAT
+ REG_L s0, PT_STATUS(sp)
+ srli s0, s0, SR_UXL_SHIFT
+ andi s0, s0, (SR_UXL >> SR_UXL_SHIFT)
+ li t0, (SR_UXL_32 >> SR_UXL_SHIFT)
+ sub t0, s0, t0
+ bnez t0, 1f
+
+ /* Call compat_syscall */
+ la s0, compat_sys_call_table
+ j 2f
+1:
+#endif
/* Call syscall */
la s0, sys_call_table
+2:
slli t0, a7, RISCV_LGPTR
add s0, s0, t0
REG_L s0, 0(s0)
-1:
+3:
jalr s0
ret_from_syscall:
diff --git a/arch/riscv/kernel/ftrace.c b/arch/riscv/kernel/ftrace.c
index 4716f4cdc038..2086f6585773 100644
--- a/arch/riscv/kernel/ftrace.c
+++ b/arch/riscv/kernel/ftrace.c
@@ -12,16 +12,14 @@
#include <asm/patch.h>
#ifdef CONFIG_DYNAMIC_FTRACE
-int ftrace_arch_code_modify_prepare(void) __acquires(&text_mutex)
+void ftrace_arch_code_modify_prepare(void) __acquires(&text_mutex)
{
mutex_lock(&text_mutex);
- return 0;
}
-int ftrace_arch_code_modify_post_process(void) __releases(&text_mutex)
+void ftrace_arch_code_modify_post_process(void) __releases(&text_mutex)
{
mutex_unlock(&text_mutex);
- return 0;
}
static int ftrace_check_current_call(unsigned long hook_pos,
diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S
index 893b8bb69391..b865046e4dbb 100644
--- a/arch/riscv/kernel/head.S
+++ b/arch/riscv/kernel/head.S
@@ -14,6 +14,7 @@
#include <asm/cpu_ops_sbi.h>
#include <asm/hwcap.h>
#include <asm/image.h>
+#include <asm/xip_fixup.h>
#include "efi-header.S"
__HEAD
@@ -297,6 +298,7 @@ clear_bss_done:
REG_S a0, (a2)
/* Initialize page tables and relocate to virtual addresses */
+ la tp, init_task
la sp, init_thread_union + THREAD_SIZE
XIP_FIXUP_OFFSET sp
#ifdef CONFIG_BUILTIN_DTB
diff --git a/arch/riscv/kernel/machine_kexec.c b/arch/riscv/kernel/machine_kexec.c
index cbef0fc73afa..df8e24559035 100644
--- a/arch/riscv/kernel/machine_kexec.c
+++ b/arch/riscv/kernel/machine_kexec.c
@@ -65,7 +65,9 @@ machine_kexec_prepare(struct kimage *image)
if (image->segment[i].memsz <= sizeof(fdt))
continue;
- if (copy_from_user(&fdt, image->segment[i].buf, sizeof(fdt)))
+ if (image->file_mode)
+ memcpy(&fdt, image->segment[i].buf, sizeof(fdt));
+ else if (copy_from_user(&fdt, image->segment[i].buf, sizeof(fdt)))
continue;
if (fdt_check_header(&fdt))
diff --git a/arch/riscv/kernel/machine_kexec_file.c b/arch/riscv/kernel/machine_kexec_file.c
new file mode 100644
index 000000000000..b0bf8c1722c0
--- /dev/null
+++ b/arch/riscv/kernel/machine_kexec_file.c
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * kexec_file for riscv, use vmlinux as the dump-capture kernel image.
+ *
+ * Copyright (C) 2021 Huawei Technologies Co, Ltd.
+ *
+ * Author: Liao Chang (liaochang1@huawei.com)
+ */
+#include <linux/kexec.h>
+
+const struct kexec_file_ops * const kexec_file_loaders[] = {
+ &elf_kexec_ops,
+ NULL
+};
diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c
index c29cef90d1dd..91fe16bfaa07 100644
--- a/arch/riscv/kernel/module.c
+++ b/arch/riscv/kernel/module.c
@@ -11,6 +11,7 @@
#include <linux/vmalloc.h>
#include <linux/sizes.h>
#include <linux/pgtable.h>
+#include <asm/alternative.h>
#include <asm/sections.h>
/*
@@ -427,3 +428,31 @@ void *module_alloc(unsigned long size)
__builtin_return_address(0));
}
#endif
+
+static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
+ const Elf_Shdr *sechdrs,
+ const char *name)
+{
+ const Elf_Shdr *s, *se;
+ const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
+
+ for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) {
+ if (strcmp(name, secstrs + s->sh_name) == 0)
+ return s;
+ }
+
+ return NULL;
+}
+
+int module_finalize(const Elf_Ehdr *hdr,
+ const Elf_Shdr *sechdrs,
+ struct module *me)
+{
+ const Elf_Shdr *s;
+
+ s = find_section(hdr, sechdrs, ".alternative");
+ if (s)
+ apply_module_alternatives((void *)s->sh_addr, s->sh_size);
+
+ return 0;
+}
diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c
index 504b496787aa..ceb9ebab6558 100644
--- a/arch/riscv/kernel/process.c
+++ b/arch/riscv/kernel/process.c
@@ -84,6 +84,34 @@ void show_regs(struct pt_regs *regs)
dump_backtrace(regs, NULL, KERN_DEFAULT);
}
+#ifdef CONFIG_COMPAT
+static bool compat_mode_supported __read_mostly;
+
+bool compat_elf_check_arch(Elf32_Ehdr *hdr)
+{
+ return compat_mode_supported &&
+ hdr->e_machine == EM_RISCV &&
+ hdr->e_ident[EI_CLASS] == ELFCLASS32;
+}
+
+static int __init compat_mode_detect(void)
+{
+ unsigned long tmp = csr_read(CSR_STATUS);
+
+ csr_write(CSR_STATUS, (tmp & ~SR_UXL) | SR_UXL_32);
+ compat_mode_supported =
+ (csr_read(CSR_STATUS) & SR_UXL) == SR_UXL_32;
+
+ csr_write(CSR_STATUS, tmp);
+
+ pr_info("riscv: ELF compat mode %s",
+ compat_mode_supported ? "supported" : "failed");
+
+ return 0;
+}
+early_initcall(compat_mode_detect);
+#endif
+
void start_thread(struct pt_regs *regs, unsigned long pc,
unsigned long sp)
{
@@ -98,6 +126,15 @@ void start_thread(struct pt_regs *regs, unsigned long pc,
}
regs->epc = pc;
regs->sp = sp;
+
+#ifdef CONFIG_64BIT
+ regs->status &= ~SR_UXL;
+
+ if (is_compat_task())
+ regs->status |= SR_UXL_32;
+ else
+ regs->status |= SR_UXL_64;
+#endif
}
void flush_thread(void)
@@ -120,13 +157,15 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
return 0;
}
-int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg,
- struct task_struct *p, unsigned long tls)
+int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
{
+ unsigned long clone_flags = args->flags;
+ unsigned long usp = args->stack;
+ unsigned long tls = args->tls;
struct pt_regs *childregs = task_pt_regs(p);
/* p->thread holds context to be restored by __switch_to() */
- if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
+ if (unlikely(args->fn)) {
/* Kernel thread */
memset(childregs, 0, sizeof(struct pt_regs));
childregs->gp = gp_in_global;
@@ -134,8 +173,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg,
childregs->status = SR_PP | SR_PIE;
p->thread.ra = (unsigned long)ret_from_kernel_thread;
- p->thread.s[0] = usp; /* fn */
- p->thread.s[1] = arg;
+ p->thread.s[0] = (unsigned long)args->fn;
+ p->thread.s[1] = (unsigned long)args->fn_arg;
} else {
*childregs = *(current_pt_regs());
if (usp) /* User fork */
diff --git a/arch/riscv/kernel/ptrace.c b/arch/riscv/kernel/ptrace.c
index 793c7da0554b..2ae8280ae475 100644
--- a/arch/riscv/kernel/ptrace.c
+++ b/arch/riscv/kernel/ptrace.c
@@ -12,6 +12,7 @@
#include <asm/thread_info.h>
#include <asm/switch_to.h>
#include <linux/audit.h>
+#include <linux/compat.h>
#include <linux/ptrace.h>
#include <linux/elf.h>
#include <linux/regset.h>
@@ -110,11 +111,6 @@ static const struct user_regset_view riscv_user_native_view = {
.n = ARRAY_SIZE(riscv_user_regset),
};
-const struct user_regset_view *task_user_regset_view(struct task_struct *task)
-{
- return &riscv_user_native_view;
-}
-
struct pt_regs_offset {
const char *name;
int offset;
@@ -272,3 +268,84 @@ __visible void do_syscall_trace_exit(struct pt_regs *regs)
trace_sys_exit(regs, regs_return_value(regs));
#endif
}
+
+#ifdef CONFIG_COMPAT
+static int compat_riscv_gpr_get(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf to)
+{
+ struct compat_user_regs_struct cregs;
+
+ regs_to_cregs(&cregs, task_pt_regs(target));
+
+ return membuf_write(&to, &cregs,
+ sizeof(struct compat_user_regs_struct));
+}
+
+static int compat_riscv_gpr_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ int ret;
+ struct compat_user_regs_struct cregs;
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &cregs, 0, -1);
+
+ cregs_to_regs(&cregs, task_pt_regs(target));
+
+ return ret;
+}
+
+static const struct user_regset compat_riscv_user_regset[] = {
+ [REGSET_X] = {
+ .core_note_type = NT_PRSTATUS,
+ .n = ELF_NGREG,
+ .size = sizeof(compat_elf_greg_t),
+ .align = sizeof(compat_elf_greg_t),
+ .regset_get = compat_riscv_gpr_get,
+ .set = compat_riscv_gpr_set,
+ },
+#ifdef CONFIG_FPU
+ [REGSET_F] = {
+ .core_note_type = NT_PRFPREG,
+ .n = ELF_NFPREG,
+ .size = sizeof(elf_fpreg_t),
+ .align = sizeof(elf_fpreg_t),
+ .regset_get = riscv_fpr_get,
+ .set = riscv_fpr_set,
+ },
+#endif
+};
+
+static const struct user_regset_view compat_riscv_user_native_view = {
+ .name = "riscv",
+ .e_machine = EM_RISCV,
+ .regsets = compat_riscv_user_regset,
+ .n = ARRAY_SIZE(compat_riscv_user_regset),
+};
+
+long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
+ compat_ulong_t caddr, compat_ulong_t cdata)
+{
+ long ret = -EIO;
+
+ switch (request) {
+ default:
+ ret = compat_ptrace_request(child, request, caddr, cdata);
+ break;
+ }
+
+ return ret;
+}
+#endif /* CONFIG_COMPAT */
+
+const struct user_regset_view *task_user_regset_view(struct task_struct *task)
+{
+#ifdef CONFIG_COMPAT
+ if (test_tsk_thread_flag(task, TIF_32BIT))
+ return &compat_riscv_user_native_view;
+ else
+#endif
+ return &riscv_user_native_view;
+}
diff --git a/arch/riscv/kernel/reset.c b/arch/riscv/kernel/reset.c
index 9c842c41684a..912288572226 100644
--- a/arch/riscv/kernel/reset.c
+++ b/arch/riscv/kernel/reset.c
@@ -23,16 +23,12 @@ void machine_restart(char *cmd)
void machine_halt(void)
{
- if (pm_power_off != NULL)
- pm_power_off();
- else
- default_power_off();
+ do_kernel_power_off();
+ default_power_off();
}
void machine_power_off(void)
{
- if (pm_power_off != NULL)
- pm_power_off();
- else
- default_power_off();
+ do_kernel_power_off();
+ default_power_off();
}
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index 834eb652a7b9..f0f36a4a0e9b 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -21,6 +21,7 @@
#include <linux/efi.h>
#include <linux/crash_dump.h>
+#include <asm/alternative.h>
#include <asm/cpu_ops.h>
#include <asm/early_ioremap.h>
#include <asm/pgtable.h>
@@ -189,7 +190,7 @@ static void __init init_resources(void)
res = &mem_res[res_idx--];
res->name = "Reserved";
- res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ res->flags = IORESOURCE_MEM | IORESOURCE_EXCLUSIVE;
res->start = __pfn_to_phys(memblock_region_reserved_base_pfn(region));
res->end = __pfn_to_phys(memblock_region_reserved_end_pfn(region)) - 1;
@@ -214,7 +215,7 @@ static void __init init_resources(void)
if (unlikely(memblock_is_nomap(region))) {
res->name = "Reserved";
- res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ res->flags = IORESOURCE_MEM | IORESOURCE_EXCLUSIVE;
} else {
res->name = "System RAM";
res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
@@ -295,6 +296,7 @@ void __init setup_arch(char **cmdline_p)
#endif
riscv_fill_hwcap();
+ apply_boot_alternatives();
}
static int __init topology_init(void)
diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c
index 9f4e59f80551..38b05ca6fe66 100644
--- a/arch/riscv/kernel/signal.c
+++ b/arch/riscv/kernel/signal.c
@@ -6,6 +6,7 @@
* Copyright (C) 2012 Regents of the University of California
*/
+#include <linux/compat.h>
#include <linux/signal.h>
#include <linux/uaccess.h>
#include <linux/syscalls.h>
@@ -14,6 +15,7 @@
#include <asm/ucontext.h>
#include <asm/vdso.h>
+#include <asm/signal32.h>
#include <asm/switch_to.h>
#include <asm/csr.h>
@@ -261,7 +263,10 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
rseq_signal_deliver(ksig, regs);
/* Set up the stack frame */
- ret = setup_rt_frame(ksig, oldset, regs);
+ if (is_compat_task())
+ ret = compat_setup_rt_frame(ksig, oldset, regs);
+ else
+ ret = setup_rt_frame(ksig, oldset, regs);
signal_setup_done(ret, ksig, 0);
}
diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c
index 622f226454d5..f1e4948a4b52 100644
--- a/arch/riscv/kernel/smpboot.c
+++ b/arch/riscv/kernel/smpboot.c
@@ -32,7 +32,6 @@
#include <asm/sections.h>
#include <asm/sbi.h>
#include <asm/smp.h>
-#include <asm/alternative.h>
#include "head.h"
@@ -41,9 +40,6 @@ static DECLARE_COMPLETION(cpu_running);
void __init smp_prepare_boot_cpu(void)
{
init_cpu_topology();
-#ifdef CONFIG_RISCV_ERRATA_ALTERNATIVE
- apply_boot_alternatives();
-#endif
}
void __init smp_prepare_cpus(unsigned int max_cpus)
diff --git a/arch/riscv/kernel/suspend_entry.S b/arch/riscv/kernel/suspend_entry.S
index 4b07b809a2b8..aafcca58c19d 100644
--- a/arch/riscv/kernel/suspend_entry.S
+++ b/arch/riscv/kernel/suspend_entry.S
@@ -8,6 +8,7 @@
#include <asm/asm.h>
#include <asm/asm-offsets.h>
#include <asm/csr.h>
+#include <asm/xip_fixup.h>
.text
.altmacro
diff --git a/arch/riscv/kernel/sys_riscv.c b/arch/riscv/kernel/sys_riscv.c
index 12f8a7fce78b..9c0194f176fc 100644
--- a/arch/riscv/kernel/sys_riscv.c
+++ b/arch/riscv/kernel/sys_riscv.c
@@ -33,7 +33,9 @@ SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
{
return riscv_sys_mmap(addr, len, prot, flags, fd, offset, 0);
}
-#else
+#endif
+
+#if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT)
SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len,
unsigned long, prot, unsigned long, flags,
unsigned long, fd, off_t, offset)
@@ -44,7 +46,7 @@ SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len,
*/
return riscv_sys_mmap(addr, len, prot, flags, fd, offset, 12);
}
-#endif /* !CONFIG_64BIT */
+#endif
/*
* Allows the instruction cache to be flushed from userspace. Despite RISC-V
diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
index fe92e119e6a3..b40426509244 100644
--- a/arch/riscv/kernel/traps.c
+++ b/arch/riscv/kernel/traps.c
@@ -86,7 +86,7 @@ static void do_trap_error(struct pt_regs *regs, int signo, int code,
}
}
-#if defined (CONFIG_XIP_KERNEL) && defined (CONFIG_RISCV_ERRATA_ALTERNATIVE)
+#if defined(CONFIG_XIP_KERNEL) && defined(CONFIG_RISCV_ALTERNATIVE)
#define __trap_section __section(".xip.traps")
#else
#define __trap_section
diff --git a/arch/riscv/kernel/vdso.c b/arch/riscv/kernel/vdso.c
index a9436a65161a..69b05b6c181b 100644
--- a/arch/riscv/kernel/vdso.c
+++ b/arch/riscv/kernel/vdso.c
@@ -23,6 +23,9 @@ struct vdso_data {
#endif
extern char vdso_start[], vdso_end[];
+#ifdef CONFIG_COMPAT
+extern char compat_vdso_start[], compat_vdso_end[];
+#endif
enum vvar_pages {
VVAR_DATA_PAGE_OFFSET,
@@ -30,6 +33,11 @@ enum vvar_pages {
VVAR_NR_PAGES,
};
+enum rv_vdso_map {
+ RV_VDSO_MAP_VVAR,
+ RV_VDSO_MAP_VDSO,
+};
+
#define VVAR_SIZE (VVAR_NR_PAGES << PAGE_SHIFT)
/*
@@ -52,12 +60,6 @@ struct __vdso_info {
struct vm_special_mapping *cm;
};
-static struct __vdso_info vdso_info __ro_after_init = {
- .name = "vdso",
- .vdso_code_start = vdso_start,
- .vdso_code_end = vdso_end,
-};
-
static int vdso_mremap(const struct vm_special_mapping *sm,
struct vm_area_struct *new_vma)
{
@@ -66,37 +68,33 @@ static int vdso_mremap(const struct vm_special_mapping *sm,
return 0;
}
-static int __init __vdso_init(void)
+static void __init __vdso_init(struct __vdso_info *vdso_info)
{
unsigned int i;
struct page **vdso_pagelist;
unsigned long pfn;
- if (memcmp(vdso_info.vdso_code_start, "\177ELF", 4)) {
- pr_err("vDSO is not a valid ELF object!\n");
- return -EINVAL;
- }
+ if (memcmp(vdso_info->vdso_code_start, "\177ELF", 4))
+ panic("vDSO is not a valid ELF object!\n");
- vdso_info.vdso_pages = (
- vdso_info.vdso_code_end -
- vdso_info.vdso_code_start) >>
+ vdso_info->vdso_pages = (
+ vdso_info->vdso_code_end -
+ vdso_info->vdso_code_start) >>
PAGE_SHIFT;
- vdso_pagelist = kcalloc(vdso_info.vdso_pages,
+ vdso_pagelist = kcalloc(vdso_info->vdso_pages,
sizeof(struct page *),
GFP_KERNEL);
if (vdso_pagelist == NULL)
- return -ENOMEM;
+ panic("vDSO kcalloc failed!\n");
/* Grab the vDSO code pages. */
- pfn = sym_to_pfn(vdso_info.vdso_code_start);
+ pfn = sym_to_pfn(vdso_info->vdso_code_start);
- for (i = 0; i < vdso_info.vdso_pages; i++)
+ for (i = 0; i < vdso_info->vdso_pages; i++)
vdso_pagelist[i] = pfn_to_page(pfn + i);
- vdso_info.cm->pages = vdso_pagelist;
-
- return 0;
+ vdso_info->cm->pages = vdso_pagelist;
}
#ifdef CONFIG_TIME_NS
@@ -116,13 +114,14 @@ int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
{
struct mm_struct *mm = task->mm;
struct vm_area_struct *vma;
+ struct __vdso_info *vdso_info = mm->context.vdso_info;
mmap_read_lock(mm);
for (vma = mm->mmap; vma; vma = vma->vm_next) {
unsigned long size = vma->vm_end - vma->vm_start;
- if (vma_is_special_mapping(vma, vdso_info.dm))
+ if (vma_is_special_mapping(vma, vdso_info->dm))
zap_page_range(vma, vma->vm_start, size);
}
@@ -187,12 +186,27 @@ static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
return vmf_insert_pfn(vma, vmf->address, pfn);
}
-enum rv_vdso_map {
- RV_VDSO_MAP_VVAR,
- RV_VDSO_MAP_VDSO,
+static struct vm_special_mapping rv_vdso_maps[] __ro_after_init = {
+ [RV_VDSO_MAP_VVAR] = {
+ .name = "[vvar]",
+ .fault = vvar_fault,
+ },
+ [RV_VDSO_MAP_VDSO] = {
+ .name = "[vdso]",
+ .mremap = vdso_mremap,
+ },
};
-static struct vm_special_mapping rv_vdso_maps[] __ro_after_init = {
+static struct __vdso_info vdso_info __ro_after_init = {
+ .name = "vdso",
+ .vdso_code_start = vdso_start,
+ .vdso_code_end = vdso_end,
+ .dm = &rv_vdso_maps[RV_VDSO_MAP_VVAR],
+ .cm = &rv_vdso_maps[RV_VDSO_MAP_VDSO],
+};
+
+#ifdef CONFIG_COMPAT
+static struct vm_special_mapping rv_compat_vdso_maps[] __ro_after_init = {
[RV_VDSO_MAP_VVAR] = {
.name = "[vvar]",
.fault = vvar_fault,
@@ -203,25 +217,37 @@ static struct vm_special_mapping rv_vdso_maps[] __ro_after_init = {
},
};
+static struct __vdso_info compat_vdso_info __ro_after_init = {
+ .name = "compat_vdso",
+ .vdso_code_start = compat_vdso_start,
+ .vdso_code_end = compat_vdso_end,
+ .dm = &rv_compat_vdso_maps[RV_VDSO_MAP_VVAR],
+ .cm = &rv_compat_vdso_maps[RV_VDSO_MAP_VDSO],
+};
+#endif
+
static int __init vdso_init(void)
{
- vdso_info.dm = &rv_vdso_maps[RV_VDSO_MAP_VVAR];
- vdso_info.cm = &rv_vdso_maps[RV_VDSO_MAP_VDSO];
+ __vdso_init(&vdso_info);
+#ifdef CONFIG_COMPAT
+ __vdso_init(&compat_vdso_info);
+#endif
- return __vdso_init();
+ return 0;
}
arch_initcall(vdso_init);
static int __setup_additional_pages(struct mm_struct *mm,
struct linux_binprm *bprm,
- int uses_interp)
+ int uses_interp,
+ struct __vdso_info *vdso_info)
{
unsigned long vdso_base, vdso_text_len, vdso_mapping_len;
void *ret;
BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES);
- vdso_text_len = vdso_info.vdso_pages << PAGE_SHIFT;
+ vdso_text_len = vdso_info->vdso_pages << PAGE_SHIFT;
/* Be sure to map the data page */
vdso_mapping_len = vdso_text_len + VVAR_SIZE;
@@ -232,16 +258,18 @@ static int __setup_additional_pages(struct mm_struct *mm,
}
ret = _install_special_mapping(mm, vdso_base, VVAR_SIZE,
- (VM_READ | VM_MAYREAD | VM_PFNMAP), vdso_info.dm);
+ (VM_READ | VM_MAYREAD | VM_PFNMAP), vdso_info->dm);
if (IS_ERR(ret))
goto up_fail;
vdso_base += VVAR_SIZE;
mm->context.vdso = (void *)vdso_base;
+ mm->context.vdso_info = (void *)vdso_info;
+
ret =
_install_special_mapping(mm, vdso_base, vdso_text_len,
(VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC),
- vdso_info.cm);
+ vdso_info->cm);
if (IS_ERR(ret))
goto up_fail;
@@ -253,6 +281,24 @@ up_fail:
return PTR_ERR(ret);
}
+#ifdef CONFIG_COMPAT
+int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
+ int uses_interp)
+{
+ struct mm_struct *mm = current->mm;
+ int ret;
+
+ if (mmap_write_lock_killable(mm))
+ return -EINTR;
+
+ ret = __setup_additional_pages(mm, bprm, uses_interp,
+ &compat_vdso_info);
+ mmap_write_unlock(mm);
+
+ return ret;
+}
+#endif
+
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
struct mm_struct *mm = current->mm;
@@ -261,7 +307,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
if (mmap_write_lock_killable(mm))
return -EINTR;
- ret = __setup_additional_pages(mm, bprm, uses_interp);
+ ret = __setup_additional_pages(mm, bprm, uses_interp, &vdso_info);
mmap_write_unlock(mm);
return ret;
diff --git a/arch/riscv/kernel/vdso/vdso.S b/arch/riscv/kernel/vdso/vdso.S
index df222245be05..83f1c899e8d8 100644
--- a/arch/riscv/kernel/vdso/vdso.S
+++ b/arch/riscv/kernel/vdso/vdso.S
@@ -7,12 +7,16 @@
#include <linux/linkage.h>
#include <asm/page.h>
+#ifndef __VDSO_PATH
+#define __VDSO_PATH "arch/riscv/kernel/vdso/vdso.so"
+#endif
+
__PAGE_ALIGNED_DATA
.globl vdso_start, vdso_end
.balign PAGE_SIZE
vdso_start:
- .incbin "arch/riscv/kernel/vdso/vdso.so"
+ .incbin __VDSO_PATH
.balign PAGE_SIZE
vdso_end:
diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c
index 4e9efbe46d5f..40694f0cab9e 100644
--- a/arch/riscv/mm/fault.c
+++ b/arch/riscv/mm/fault.c
@@ -102,9 +102,9 @@ static inline void bad_area(struct pt_regs *regs, struct mm_struct *mm, int code
static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long addr)
{
pgd_t *pgd, *pgd_k;
- pud_t *pud, *pud_k;
- p4d_t *p4d, *p4d_k;
- pmd_t *pmd, *pmd_k;
+ pud_t *pud_k;
+ p4d_t *p4d_k;
+ pmd_t *pmd_k;
pte_t *pte_k;
int index;
unsigned long pfn;
@@ -132,14 +132,12 @@ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long a
}
set_pgd(pgd, *pgd_k);
- p4d = p4d_offset(pgd, addr);
p4d_k = p4d_offset(pgd_k, addr);
if (!p4d_present(*p4d_k)) {
no_context(regs, addr);
return;
}
- pud = pud_offset(p4d, addr);
pud_k = pud_offset(p4d_k, addr);
if (!pud_present(*pud_k)) {
no_context(regs, addr);
@@ -150,13 +148,11 @@ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long a
* Since the vmalloc area is global, it is unnecessary
* to copy individual PTEs
*/
- pmd = pmd_offset(pud, addr);
pmd_k = pmd_offset(pud_k, addr);
if (!pmd_present(*pmd_k)) {
no_context(regs, addr);
return;
}
- set_pmd(pmd, *pmd_k);
/*
* Make sure the actual PTE exists as well to
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index 180d6a3e2a05..d466ec670e1f 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -76,38 +76,74 @@ static void __init zone_sizes_init(void)
}
#if defined(CONFIG_MMU) && defined(CONFIG_DEBUG_VM)
+
+#define LOG2_SZ_1K ilog2(SZ_1K)
+#define LOG2_SZ_1M ilog2(SZ_1M)
+#define LOG2_SZ_1G ilog2(SZ_1G)
+#define LOG2_SZ_1T ilog2(SZ_1T)
+
static inline void print_mlk(char *name, unsigned long b, unsigned long t)
{
pr_notice("%12s : 0x%08lx - 0x%08lx (%4ld kB)\n", name, b, t,
- (((t) - (b)) >> 10));
+ (((t) - (b)) >> LOG2_SZ_1K));
}
static inline void print_mlm(char *name, unsigned long b, unsigned long t)
{
pr_notice("%12s : 0x%08lx - 0x%08lx (%4ld MB)\n", name, b, t,
- (((t) - (b)) >> 20));
+ (((t) - (b)) >> LOG2_SZ_1M));
+}
+
+static inline void print_mlg(char *name, unsigned long b, unsigned long t)
+{
+ pr_notice("%12s : 0x%08lx - 0x%08lx (%4ld GB)\n", name, b, t,
+ (((t) - (b)) >> LOG2_SZ_1G));
+}
+
+#ifdef CONFIG_64BIT
+static inline void print_mlt(char *name, unsigned long b, unsigned long t)
+{
+ pr_notice("%12s : 0x%08lx - 0x%08lx (%4ld TB)\n", name, b, t,
+ (((t) - (b)) >> LOG2_SZ_1T));
+}
+#else
+#define print_mlt(n, b, t) do {} while (0)
+#endif
+
+static inline void print_ml(char *name, unsigned long b, unsigned long t)
+{
+ unsigned long diff = t - b;
+
+ if (IS_ENABLED(CONFIG_64BIT) && (diff >> LOG2_SZ_1T) >= 10)
+ print_mlt(name, b, t);
+ else if ((diff >> LOG2_SZ_1G) >= 10)
+ print_mlg(name, b, t);
+ else if ((diff >> LOG2_SZ_1M) >= 10)
+ print_mlm(name, b, t);
+ else
+ print_mlk(name, b, t);
}
static void __init print_vm_layout(void)
{
pr_notice("Virtual kernel memory layout:\n");
- print_mlk("fixmap", (unsigned long)FIXADDR_START,
- (unsigned long)FIXADDR_TOP);
- print_mlm("pci io", (unsigned long)PCI_IO_START,
- (unsigned long)PCI_IO_END);
- print_mlm("vmemmap", (unsigned long)VMEMMAP_START,
- (unsigned long)VMEMMAP_END);
- print_mlm("vmalloc", (unsigned long)VMALLOC_START,
- (unsigned long)VMALLOC_END);
- print_mlm("lowmem", (unsigned long)PAGE_OFFSET,
- (unsigned long)high_memory);
+ print_ml("fixmap", (unsigned long)FIXADDR_START,
+ (unsigned long)FIXADDR_TOP);
+ print_ml("pci io", (unsigned long)PCI_IO_START,
+ (unsigned long)PCI_IO_END);
+ print_ml("vmemmap", (unsigned long)VMEMMAP_START,
+ (unsigned long)VMEMMAP_END);
+ print_ml("vmalloc", (unsigned long)VMALLOC_START,
+ (unsigned long)VMALLOC_END);
+ print_ml("lowmem", (unsigned long)PAGE_OFFSET,
+ (unsigned long)high_memory);
if (IS_ENABLED(CONFIG_64BIT)) {
#ifdef CONFIG_KASAN
- print_mlm("kasan", KASAN_SHADOW_START, KASAN_SHADOW_END);
+ print_ml("kasan", KASAN_SHADOW_START, KASAN_SHADOW_END);
#endif
- print_mlm("kernel", (unsigned long)KERNEL_LINK_ADDR,
- (unsigned long)ADDRESS_SPACE_END);
+ print_ml("kernel", (unsigned long)KERNEL_LINK_ADDR,
+ (unsigned long)ADDRESS_SPACE_END);
}
}
#else
@@ -578,9 +614,9 @@ static void __init create_p4d_mapping(p4d_t *p4dp,
create_pte_mapping(__nextp, __va, __pa, __sz, __prot)
#define fixmap_pgd_next ((uintptr_t)fixmap_pte)
#define early_dtb_pgd_next ((uintptr_t)early_dtb_pmd)
-#define create_p4d_mapping(__pmdp, __va, __pa, __sz, __prot)
-#define create_pud_mapping(__pmdp, __va, __pa, __sz, __prot)
-#define create_pmd_mapping(__pmdp, __va, __pa, __sz, __prot)
+#define create_p4d_mapping(__pmdp, __va, __pa, __sz, __prot) do {} while(0)
+#define create_pud_mapping(__pmdp, __va, __pa, __sz, __prot) do {} while(0)
+#define create_pmd_mapping(__pmdp, __va, __pa, __sz, __prot) do {} while(0)
#endif /* __PAGETABLE_PMD_FOLDED */
void __init create_pgd_mapping(pgd_t *pgdp,
@@ -671,7 +707,7 @@ static __init pgprot_t pgprot_from_va(uintptr_t va)
}
#endif /* CONFIG_STRICT_KERNEL_RWX */
-#ifdef CONFIG_64BIT
+#if defined(CONFIG_64BIT) && !defined(CONFIG_XIP_KERNEL)
static void __init disable_pgtable_l5(void)
{
pgtable_l5_enabled = false;
@@ -843,7 +879,7 @@ static void __init create_fdt_early_page_table(pgd_t *pgdir, uintptr_t dtb_pa)
* MMU is not enabled, the page tables are allocated directly using
* early_pmd/pud/p4d and the address returned is the physical one.
*/
-void __init pt_ops_set_early(void)
+static void __init pt_ops_set_early(void)
{
pt_ops.alloc_pte = alloc_pte_early;
pt_ops.get_pte_virt = get_pte_virt_early;
@@ -865,7 +901,7 @@ void __init pt_ops_set_early(void)
* Note that this is called with MMU disabled, hence kernel_mapping_pa_to_va,
* but it will be used as described above.
*/
-void __init pt_ops_set_fixmap(void)
+static void __init pt_ops_set_fixmap(void)
{
pt_ops.alloc_pte = kernel_mapping_pa_to_va((uintptr_t)alloc_pte_fixmap);
pt_ops.get_pte_virt = kernel_mapping_pa_to_va((uintptr_t)get_pte_virt_fixmap);
@@ -883,7 +919,7 @@ void __init pt_ops_set_fixmap(void)
* MMU is enabled and page table setup is complete, so from now, we can use
* generic page allocation functions to setup page table.
*/
-void __init pt_ops_set_late(void)
+static void __init pt_ops_set_late(void)
{
pt_ops.alloc_pte = alloc_pte_late;
pt_ops.get_pte_virt = get_pte_virt_late;
@@ -947,6 +983,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
BUG_ON((kernel_map.virt_addr + kernel_map.size) > ADDRESS_SPACE_END - SZ_4K);
#endif
+ apply_early_boot_alternatives();
pt_ops_set_early();
/* Setup early PGD for fixmap */
diff --git a/arch/riscv/purgatory/.gitignore b/arch/riscv/purgatory/.gitignore
new file mode 100644
index 000000000000..38d7d1bda4d7
--- /dev/null
+++ b/arch/riscv/purgatory/.gitignore
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+purgatory.chk
+purgatory.ro
+kexec-purgatory.c
diff --git a/arch/riscv/purgatory/Makefile b/arch/riscv/purgatory/Makefile
new file mode 100644
index 000000000000..d4df200f7edf
--- /dev/null
+++ b/arch/riscv/purgatory/Makefile
@@ -0,0 +1,95 @@
+# SPDX-License-Identifier: GPL-2.0
+OBJECT_FILES_NON_STANDARD := y
+
+purgatory-y := purgatory.o sha256.o entry.o string.o ctype.o memcpy.o memset.o
+
+targets += $(purgatory-y)
+PURGATORY_OBJS = $(addprefix $(obj)/,$(purgatory-y))
+
+$(obj)/string.o: $(srctree)/lib/string.c FORCE
+ $(call if_changed_rule,cc_o_c)
+
+$(obj)/ctype.o: $(srctree)/lib/ctype.c FORCE
+ $(call if_changed_rule,cc_o_c)
+
+$(obj)/memcpy.o: $(srctree)/arch/riscv/lib/memcpy.S FORCE
+ $(call if_changed_rule,as_o_S)
+
+$(obj)/memset.o: $(srctree)/arch/riscv/lib/memset.S FORCE
+ $(call if_changed_rule,as_o_S)
+
+$(obj)/sha256.o: $(srctree)/lib/crypto/sha256.c FORCE
+ $(call if_changed_rule,cc_o_c)
+
+CFLAGS_sha256.o := -D__DISABLE_EXPORTS
+CFLAGS_string.o := -D__DISABLE_EXPORTS
+CFLAGS_ctype.o := -D__DISABLE_EXPORTS
+
+# When linking purgatory.ro with -r unresolved symbols are not checked,
+# also link a purgatory.chk binary without -r to check for unresolved symbols.
+PURGATORY_LDFLAGS := -e purgatory_start -z nodefaultlib
+LDFLAGS_purgatory.ro := -r $(PURGATORY_LDFLAGS)
+LDFLAGS_purgatory.chk := $(PURGATORY_LDFLAGS)
+targets += purgatory.ro purgatory.chk
+
+# Sanitizer, etc. runtimes are unavailable and cannot be linked here.
+GCOV_PROFILE := n
+KASAN_SANITIZE := n
+UBSAN_SANITIZE := n
+KCSAN_SANITIZE := n
+KCOV_INSTRUMENT := n
+
+# These are adjustments to the compiler flags used for objects that
+# make up the standalone purgatory.ro
+
+PURGATORY_CFLAGS_REMOVE := -mcmodel=kernel
+PURGATORY_CFLAGS := -mcmodel=medany -ffreestanding -fno-zero-initialized-in-bss
+PURGATORY_CFLAGS += $(DISABLE_STACKLEAK_PLUGIN) -DDISABLE_BRANCH_PROFILING
+PURGATORY_CFLAGS += -fno-stack-protector -g0
+
+# Default KBUILD_CFLAGS can have -pg option set when FTRACE is enabled. That
+# in turn leaves some undefined symbols like __fentry__ in purgatory and not
+# sure how to relocate those.
+ifdef CONFIG_FUNCTION_TRACER
+PURGATORY_CFLAGS_REMOVE += $(CC_FLAGS_FTRACE)
+endif
+
+ifdef CONFIG_STACKPROTECTOR
+PURGATORY_CFLAGS_REMOVE += -fstack-protector
+endif
+
+ifdef CONFIG_STACKPROTECTOR_STRONG
+PURGATORY_CFLAGS_REMOVE += -fstack-protector-strong
+endif
+
+CFLAGS_REMOVE_purgatory.o += $(PURGATORY_CFLAGS_REMOVE)
+CFLAGS_purgatory.o += $(PURGATORY_CFLAGS)
+
+CFLAGS_REMOVE_sha256.o += $(PURGATORY_CFLAGS_REMOVE)
+CFLAGS_sha256.o += $(PURGATORY_CFLAGS)
+
+CFLAGS_REMOVE_string.o += $(PURGATORY_CFLAGS_REMOVE)
+CFLAGS_string.o += $(PURGATORY_CFLAGS)
+
+CFLAGS_REMOVE_ctype.o += $(PURGATORY_CFLAGS_REMOVE)
+CFLAGS_ctype.o += $(PURGATORY_CFLAGS)
+
+AFLAGS_REMOVE_entry.o += -Wa,-gdwarf-2
+AFLAGS_REMOVE_memcpy.o += -Wa,-gdwarf-2
+AFLAGS_REMOVE_memset.o += -Wa,-gdwarf-2
+
+$(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
+ $(call if_changed,ld)
+
+$(obj)/purgatory.chk: $(obj)/purgatory.ro FORCE
+ $(call if_changed,ld)
+
+targets += kexec-purgatory.c
+
+quiet_cmd_bin2c = BIN2C $@
+ cmd_bin2c = $(objtree)/scripts/bin2c kexec_purgatory < $< > $@
+
+$(obj)/kexec-purgatory.c: $(obj)/purgatory.ro $(obj)/purgatory.chk FORCE
+ $(call if_changed,bin2c)
+
+obj-$(CONFIG_ARCH_HAS_KEXEC_PURGATORY) += kexec-purgatory.o
diff --git a/arch/riscv/purgatory/entry.S b/arch/riscv/purgatory/entry.S
new file mode 100644
index 000000000000..0194f4554130
--- /dev/null
+++ b/arch/riscv/purgatory/entry.S
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * purgatory: Runs between two kernels
+ *
+ * Copyright (C) 2022 Huawei Technologies Co, Ltd.
+ *
+ * Author: Li Zhengyu (lizhengyu3@huawei.com)
+ *
+ */
+
+.macro size, sym:req
+ .size \sym, . - \sym
+.endm
+
+.text
+
+.globl purgatory_start
+purgatory_start:
+
+ lla sp, .Lstack
+ mv s0, a0 /* The hartid of the current hart */
+ mv s1, a1 /* Phys address of the FDT image */
+
+ jal purgatory
+
+ /* Start new image. */
+ mv a0, s0
+ mv a1, s1
+ ld a2, riscv_kernel_entry
+ jr a2
+
+size purgatory_start
+
+.align 4
+ .rept 256
+ .quad 0
+ .endr
+.Lstack:
+
+.data
+
+.globl riscv_kernel_entry
+riscv_kernel_entry:
+ .quad 0
+size riscv_kernel_entry
+
+.end
diff --git a/arch/riscv/purgatory/purgatory.c b/arch/riscv/purgatory/purgatory.c
new file mode 100644
index 000000000000..80596ab5fb62
--- /dev/null
+++ b/arch/riscv/purgatory/purgatory.c
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * purgatory: Runs between two kernels
+ *
+ * Copyright (C) 2022 Huawei Technologies Co, Ltd.
+ *
+ * Author: Li Zhengyu (lizhengyu3@huawei.com)
+ *
+ */
+
+#include <linux/purgatory.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <asm/string.h>
+
+u8 purgatory_sha256_digest[SHA256_DIGEST_SIZE] __section(".kexec-purgatory");
+
+struct kexec_sha_region purgatory_sha_regions[KEXEC_SEGMENT_MAX] __section(".kexec-purgatory");
+
+static int verify_sha256_digest(void)
+{
+ struct kexec_sha_region *ptr, *end;
+ struct sha256_state ss;
+ u8 digest[SHA256_DIGEST_SIZE];
+
+ sha256_init(&ss);
+ end = purgatory_sha_regions + ARRAY_SIZE(purgatory_sha_regions);
+ for (ptr = purgatory_sha_regions; ptr < end; ptr++)
+ sha256_update(&ss, (uint8_t *)(ptr->start), ptr->len);
+ sha256_final(&ss, digest);
+ if (memcmp(digest, purgatory_sha256_digest, sizeof(digest)) != 0)
+ return 1;
+ return 0;
+}
+
+/* workaround for a warning with -Wmissing-prototypes */
+void purgatory(void);
+
+void purgatory(void)
+{
+ if (verify_sha256_digest())
+ for (;;)
+ /* loop forever */
+ ;
+}
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 5886e039e16d..b1a88f6cc349 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -418,9 +418,6 @@ config COMPAT
(and some other stuff like libraries and such) is needed for
executing 31 bit applications. It is safe to say "Y".
-config SYSVIPC_COMPAT
- def_bool y if COMPAT && SYSVIPC
-
config SMP
def_bool y
@@ -735,11 +732,11 @@ config VFIO_AP
depends on S390_AP_IOMMU && VFIO_MDEV && KVM
depends on ZCRYPT
help
- This driver grants access to Adjunct Processor (AP) devices
- via the VFIO mediated device interface.
+ This driver grants access to Adjunct Processor (AP) devices
+ via the VFIO mediated device interface.
- To compile this driver as a module, choose M here: the module
- will be called vfio_ap.
+ To compile this driver as a module, choose M here: the module
+ will be called vfio_ap.
endmenu
diff --git a/arch/s390/Kconfig.debug b/arch/s390/Kconfig.debug
index e94a2a7f6bf4..c4300ea4abf8 100644
--- a/arch/s390/Kconfig.debug
+++ b/arch/s390/Kconfig.debug
@@ -14,9 +14,9 @@ config DEBUG_ENTRY
If unsure, say N.
config CIO_INJECT
- bool "CIO Inject interfaces"
- depends on DEBUG_KERNEL && DEBUG_FS
- help
- This option provides a debugging facility to inject certain artificial events
- and instruction responses to the CIO layer of Linux kernel. The newly created
- debugfs user-interfaces will be at /sys/kernel/debug/s390/cio/*
+ bool "CIO Inject interfaces"
+ depends on DEBUG_KERNEL && DEBUG_FS
+ help
+ This option provides a debugging facility to inject certain artificial events
+ and instruction responses to the CIO layer of Linux kernel. The newly created
+ debugfs user-interfaces will be at /sys/kernel/debug/s390/cio/*
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index 54c7536f2482..1023e9d43d44 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -701,7 +701,7 @@ static inline void _gcm_sg_unmap_and_advance(struct gcm_sg_walk *gw,
unsigned int nbytes)
{
gw->walk_bytes_remain -= nbytes;
- scatterwalk_unmap(&gw->walk);
+ scatterwalk_unmap(gw->walk_ptr);
scatterwalk_advance(&gw->walk, nbytes);
scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain);
gw->walk_ptr = NULL;
@@ -776,7 +776,7 @@ static int gcm_out_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
goto out;
}
- scatterwalk_unmap(&gw->walk);
+ scatterwalk_unmap(gw->walk_ptr);
gw->walk_ptr = NULL;
gw->ptr = gw->buf;
diff --git a/arch/s390/crypto/chacha-glue.c b/arch/s390/crypto/chacha-glue.c
index ccfff73e2c93..2ec51f339cec 100644
--- a/arch/s390/crypto/chacha-glue.c
+++ b/arch/s390/crypto/chacha-glue.c
@@ -62,6 +62,34 @@ static int chacha20_s390(struct skcipher_request *req)
return rc;
}
+void hchacha_block_arch(const u32 *state, u32 *stream, int nrounds)
+{
+ /* TODO: implement hchacha_block_arch() in assembly */
+ hchacha_block_generic(state, stream, nrounds);
+}
+EXPORT_SYMBOL(hchacha_block_arch);
+
+void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv)
+{
+ chacha_init_generic(state, key, iv);
+}
+EXPORT_SYMBOL(chacha_init_arch);
+
+void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src,
+ unsigned int bytes, int nrounds)
+{
+ /* s390 chacha20 implementation has 20 rounds hard-coded,
+ * it cannot handle a block of data or less, but otherwise
+ * it can handle data of arbitrary size
+ */
+ if (bytes <= CHACHA_BLOCK_SIZE || nrounds != 20)
+ chacha_crypt_generic(state, dst, src, bytes, nrounds);
+ else
+ chacha20_crypt_s390(state, dst, src, bytes,
+ &state[4], &state[12]);
+}
+EXPORT_SYMBOL(chacha_crypt_arch);
+
static struct skcipher_alg chacha_algs[] = {
{
.base.cra_name = "chacha20",
@@ -83,12 +111,14 @@ static struct skcipher_alg chacha_algs[] = {
static int __init chacha_mod_init(void)
{
- return crypto_register_skciphers(chacha_algs, ARRAY_SIZE(chacha_algs));
+ return IS_REACHABLE(CONFIG_CRYPTO_SKCIPHER) ?
+ crypto_register_skciphers(chacha_algs, ARRAY_SIZE(chacha_algs)) : 0;
}
static void __exit chacha_mod_fini(void)
{
- crypto_unregister_skciphers(chacha_algs, ARRAY_SIZE(chacha_algs));
+ if (IS_REACHABLE(CONFIG_CRYPTO_SKCIPHER))
+ crypto_unregister_skciphers(chacha_algs, ARRAY_SIZE(chacha_algs));
}
module_cpu_feature_match(VXRS, chacha_mod_init);
diff --git a/arch/s390/include/asm/asm-extable.h b/arch/s390/include/asm/asm-extable.h
index f24d9591aaed..b74f1070ddb2 100644
--- a/arch/s390/include/asm/asm-extable.h
+++ b/arch/s390/include/asm/asm-extable.h
@@ -3,12 +3,24 @@
#define __ASM_EXTABLE_H
#include <linux/stringify.h>
+#include <linux/bits.h>
#include <asm/asm-const.h>
-#define EX_TYPE_NONE 0
-#define EX_TYPE_FIXUP 1
-#define EX_TYPE_BPF 2
-#define EX_TYPE_UACCESS 3
+#define EX_TYPE_NONE 0
+#define EX_TYPE_FIXUP 1
+#define EX_TYPE_BPF 2
+#define EX_TYPE_UA_STORE 3
+#define EX_TYPE_UA_LOAD_MEM 4
+#define EX_TYPE_UA_LOAD_REG 5
+
+#define EX_DATA_REG_ERR_SHIFT 0
+#define EX_DATA_REG_ERR GENMASK(3, 0)
+
+#define EX_DATA_REG_ADDR_SHIFT 4
+#define EX_DATA_REG_ADDR GENMASK(7, 4)
+
+#define EX_DATA_LEN_SHIFT 8
+#define EX_DATA_LEN GENMASK(11, 8)
#define __EX_TABLE(_section, _fault, _target, _type) \
stringify_in_c(.section _section,"a";) \
@@ -19,35 +31,58 @@
stringify_in_c(.short 0;) \
stringify_in_c(.previous)
-#define __EX_TABLE_UA(_section, _fault, _target, _type, _reg) \
- stringify_in_c(.section _section,"a";) \
- stringify_in_c(.align 4;) \
- stringify_in_c(.long (_fault) - .;) \
- stringify_in_c(.long (_target) - .;) \
- stringify_in_c(.short (_type);) \
- stringify_in_c(.macro extable_reg reg;) \
- stringify_in_c(.set .Lfound, 0;) \
- stringify_in_c(.set .Lregnr, 0;) \
- stringify_in_c(.irp rs,r0,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11,r12,r13,r14,r15;) \
- stringify_in_c(.ifc "\reg", "%%\rs";) \
- stringify_in_c(.set .Lfound, 1;) \
- stringify_in_c(.short .Lregnr;) \
- stringify_in_c(.endif;) \
- stringify_in_c(.set .Lregnr, .Lregnr+1;) \
- stringify_in_c(.endr;) \
- stringify_in_c(.ifne (.Lfound != 1);) \
- stringify_in_c(.error "extable_reg: bad register argument";) \
- stringify_in_c(.endif;) \
- stringify_in_c(.endm;) \
- stringify_in_c(extable_reg _reg;) \
- stringify_in_c(.purgem extable_reg;) \
+#define __EX_TABLE_UA(_section, _fault, _target, _type, _regerr, _regaddr, _len)\
+ stringify_in_c(.section _section,"a";) \
+ stringify_in_c(.align 4;) \
+ stringify_in_c(.long (_fault) - .;) \
+ stringify_in_c(.long (_target) - .;) \
+ stringify_in_c(.short (_type);) \
+ stringify_in_c(.macro extable_reg regerr, regaddr;) \
+ stringify_in_c(.set .Lfound, 0;) \
+ stringify_in_c(.set .Lcurr, 0;) \
+ stringify_in_c(.irp rs,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15;) \
+ stringify_in_c( .ifc "\regerr", "%%r\rs";) \
+ stringify_in_c( .set .Lfound, 1;) \
+ stringify_in_c( .set .Lregerr, .Lcurr;) \
+ stringify_in_c( .endif;) \
+ stringify_in_c( .set .Lcurr, .Lcurr+1;) \
+ stringify_in_c(.endr;) \
+ stringify_in_c(.ifne (.Lfound != 1);) \
+ stringify_in_c( .error "extable_reg: bad register argument1";) \
+ stringify_in_c(.endif;) \
+ stringify_in_c(.set .Lfound, 0;) \
+ stringify_in_c(.set .Lcurr, 0;) \
+ stringify_in_c(.irp rs,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15;) \
+ stringify_in_c( .ifc "\regaddr", "%%r\rs";) \
+ stringify_in_c( .set .Lfound, 1;) \
+ stringify_in_c( .set .Lregaddr, .Lcurr;) \
+ stringify_in_c( .endif;) \
+ stringify_in_c( .set .Lcurr, .Lcurr+1;) \
+ stringify_in_c(.endr;) \
+ stringify_in_c(.ifne (.Lfound != 1);) \
+ stringify_in_c( .error "extable_reg: bad register argument2";) \
+ stringify_in_c(.endif;) \
+ stringify_in_c(.short .Lregerr << EX_DATA_REG_ERR_SHIFT | \
+ .Lregaddr << EX_DATA_REG_ADDR_SHIFT | \
+ _len << EX_DATA_LEN_SHIFT;) \
+ stringify_in_c(.endm;) \
+ stringify_in_c(extable_reg _regerr,_regaddr;) \
+ stringify_in_c(.purgem extable_reg;) \
stringify_in_c(.previous)
#define EX_TABLE(_fault, _target) \
__EX_TABLE(__ex_table, _fault, _target, EX_TYPE_FIXUP)
+
#define EX_TABLE_AMODE31(_fault, _target) \
__EX_TABLE(.amode31.ex_table, _fault, _target, EX_TYPE_FIXUP)
-#define EX_TABLE_UA(_fault, _target, _reg) \
- __EX_TABLE_UA(__ex_table, _fault, _target, EX_TYPE_UACCESS, _reg)
+
+#define EX_TABLE_UA_STORE(_fault, _target, _regerr) \
+ __EX_TABLE_UA(__ex_table, _fault, _target, EX_TYPE_UA_STORE, _regerr, _regerr, 0)
+
+#define EX_TABLE_UA_LOAD_MEM(_fault, _target, _regerr, _regmem, _len) \
+ __EX_TABLE_UA(__ex_table, _fault, _target, EX_TYPE_UA_LOAD_MEM, _regerr, _regmem, _len)
+
+#define EX_TABLE_UA_LOAD_REG(_fault, _target, _regerr, _regzero) \
+ __EX_TABLE_UA(__ex_table, _fault, _target, EX_TYPE_UA_LOAD_REG, _regerr, _regzero, 0)
#endif /* __ASM_EXTABLE_H */
diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h
index 7d6fe813ac39..a386070f1d56 100644
--- a/arch/s390/include/asm/compat.h
+++ b/arch/s390/include/asm/compat.h
@@ -13,6 +13,18 @@
#define compat_mode_t compat_mode_t
typedef u16 compat_mode_t;
+#define __compat_uid_t __compat_uid_t
+typedef u16 __compat_uid_t;
+typedef u16 __compat_gid_t;
+
+#define compat_dev_t compat_dev_t
+typedef u16 compat_dev_t;
+
+#define compat_ipc_pid_t compat_ipc_pid_t
+typedef u16 compat_ipc_pid_t;
+
+#define compat_statfs compat_statfs
+
#include <asm-generic/compat.h>
#define __TYPE_IS_PTR(t) (!__builtin_types_compatible_p( \
@@ -30,15 +42,9 @@ typedef u16 compat_mode_t;
PSW32_MASK_MCHECK | PSW32_MASK_PSTATE | \
PSW32_ASC_PRIMARY)
-#define COMPAT_USER_HZ 100
#define COMPAT_UTS_MACHINE "s390\0\0\0\0"
-typedef u16 __compat_uid_t;
-typedef u16 __compat_gid_t;
-typedef u16 compat_dev_t;
typedef u16 compat_nlink_t;
-typedef u16 compat_ipc_pid_t;
-typedef __kernel_fsid_t compat_fsid_t;
typedef struct {
u32 mask;
@@ -79,26 +85,6 @@ struct compat_stat {
u32 __unused5;
};
-struct compat_flock {
- short l_type;
- short l_whence;
- compat_off_t l_start;
- compat_off_t l_len;
- compat_pid_t l_pid;
-};
-
-#define F_GETLK64 12
-#define F_SETLK64 13
-#define F_SETLKW64 14
-
-struct compat_flock64 {
- short l_type;
- short l_whence;
- compat_loff_t l_start;
- compat_loff_t l_len;
- compat_pid_t l_pid;
-};
-
struct compat_statfs {
u32 f_type;
u32 f_bsize;
@@ -129,10 +115,6 @@ struct compat_statfs64 {
u32 f_spare[4];
};
-#define COMPAT_RLIM_INFINITY 0xffffffff
-
-#define COMPAT_OFF_T_MAX 0x7fffffff
-
/*
* A pointer passed in from user mode. This should not
* be used for syscall parameters, just declare them
@@ -155,61 +137,4 @@ static inline int is_compat_task(void)
#endif
-struct compat_ipc64_perm {
- compat_key_t key;
- __compat_uid32_t uid;
- __compat_gid32_t gid;
- __compat_uid32_t cuid;
- __compat_gid32_t cgid;
- compat_mode_t mode;
- unsigned short __pad1;
- unsigned short seq;
- unsigned short __pad2;
- unsigned int __unused1;
- unsigned int __unused2;
-};
-
-struct compat_semid64_ds {
- struct compat_ipc64_perm sem_perm;
- compat_ulong_t sem_otime;
- compat_ulong_t sem_otime_high;
- compat_ulong_t sem_ctime;
- compat_ulong_t sem_ctime_high;
- compat_ulong_t sem_nsems;
- compat_ulong_t __unused1;
- compat_ulong_t __unused2;
-};
-
-struct compat_msqid64_ds {
- struct compat_ipc64_perm msg_perm;
- compat_ulong_t msg_stime;
- compat_ulong_t msg_stime_high;
- compat_ulong_t msg_rtime;
- compat_ulong_t msg_rtime_high;
- compat_ulong_t msg_ctime;
- compat_ulong_t msg_ctime_high;
- compat_ulong_t msg_cbytes;
- compat_ulong_t msg_qnum;
- compat_ulong_t msg_qbytes;
- compat_pid_t msg_lspid;
- compat_pid_t msg_lrpid;
- compat_ulong_t __unused1;
- compat_ulong_t __unused2;
-};
-
-struct compat_shmid64_ds {
- struct compat_ipc64_perm shm_perm;
- compat_size_t shm_segsz;
- compat_ulong_t shm_atime;
- compat_ulong_t shm_atime_high;
- compat_ulong_t shm_dtime;
- compat_ulong_t shm_dtime_high;
- compat_ulong_t shm_ctime;
- compat_ulong_t shm_ctime_high;
- compat_pid_t shm_cpid;
- compat_pid_t shm_lpid;
- compat_ulong_t shm_nattch;
- compat_ulong_t __unused1;
- compat_ulong_t __unused2;
-};
#endif /* _ASM_S390X_COMPAT_H */
diff --git a/arch/s390/include/asm/kexec.h b/arch/s390/include/asm/kexec.h
index 7f3c9ac34bd8..649ecdcc8734 100644
--- a/arch/s390/include/asm/kexec.h
+++ b/arch/s390/include/asm/kexec.h
@@ -9,6 +9,8 @@
#ifndef _S390_KEXEC_H
#define _S390_KEXEC_H
+#include <linux/module.h>
+
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/setup.h>
@@ -29,7 +31,7 @@
#define KEXEC_CONTROL_MEMORY_LIMIT (1UL<<31)
/* Allocate control page with GFP_DMA */
-#define KEXEC_CONTROL_MEMORY_GFP GFP_DMA
+#define KEXEC_CONTROL_MEMORY_GFP (GFP_DMA | __GFP_NORETRY)
/* Maximum address we can use for the crash control pages */
#define KEXEC_CRASH_CONTROL_MEMORY_LIMIT (-1UL)
@@ -83,4 +85,12 @@ struct kimage_arch {
extern const struct kexec_file_ops s390_kexec_image_ops;
extern const struct kexec_file_ops s390_kexec_elf_ops;
+#ifdef CONFIG_KEXEC_FILE
+struct purgatory_info;
+int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
+ Elf_Shdr *section,
+ const Elf_Shdr *relsec,
+ const Elf_Shdr *symtab);
+#define arch_kexec_apply_relocations_add arch_kexec_apply_relocations_add
+#endif
#endif /*_S390_KEXEC_H */
diff --git a/arch/s390/include/asm/livepatch.h b/arch/s390/include/asm/livepatch.h
deleted file mode 100644
index 5209f223331a..000000000000
--- a/arch/s390/include/asm/livepatch.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-/*
- * livepatch.h - s390-specific Kernel Live Patching Core
- *
- * Copyright (c) 2013-2015 SUSE
- * Authors: Jiri Kosina
- * Vojtech Pavlik
- * Jiri Slaby
- */
-
-#ifndef ASM_LIVEPATCH_H
-#define ASM_LIVEPATCH_H
-
-#include <linux/ftrace.h>
-#include <asm/ptrace.h>
-
-static inline void klp_arch_set_pc(struct ftrace_regs *fregs, unsigned long ip)
-{
- ftrace_instruction_pointer_set(fregs, ip);
-}
-
-#endif
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index add764a2be8c..bd66f8e34949 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -304,12 +304,6 @@ static __always_inline void __noreturn disabled_wait(void)
while (1);
}
-/*
- * Basic Program Check Handler.
- */
-extern void s390_base_pgm_handler(void);
-extern void (*s390_base_pgm_handler_fn)(struct pt_regs *regs);
-
#define ARCH_LOW_ADDRESS_LIMIT 0x7fffffffUL
extern int memcpy_real(void *, unsigned long, size_t);
diff --git a/arch/s390/include/asm/stacktrace.h b/arch/s390/include/asm/stacktrace.h
index f8500191993d..b23c658dce77 100644
--- a/arch/s390/include/asm/stacktrace.h
+++ b/arch/s390/include/asm/stacktrace.h
@@ -39,8 +39,15 @@ static inline bool on_stack(struct stack_info *info,
* Kernel uses the packed stack layout (-mpacked-stack).
*/
struct stack_frame {
- unsigned long empty1[5];
- unsigned int empty2[8];
+ union {
+ unsigned long empty[9];
+ struct {
+ unsigned long sie_control_block;
+ unsigned long sie_savearea;
+ unsigned long sie_reason;
+ unsigned long sie_flags;
+ };
+ };
unsigned long gprs[10];
unsigned long back_chain;
};
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index 1f150a7cfb3d..f4511e21d646 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -3,7 +3,7 @@
* S390 version
* Copyright IBM Corp. 1999, 2000
* Author(s): Hartmut Penner (hp@de.ibm.com),
- * Martin Schwidefsky (schwidefsky@de.ibm.com)
+ * Martin Schwidefsky (schwidefsky@de.ibm.com)
*
* Derived from "include/asm-i386/uaccess.h"
*/
@@ -55,9 +55,6 @@ copy_to_user_key(void __user *to, const void *from, unsigned long n, unsigned lo
return n;
}
-int __put_user_bad(void) __attribute__((noreturn));
-int __get_user_bad(void) __attribute__((noreturn));
-
union oac {
unsigned int val;
struct {
@@ -80,8 +77,14 @@ union oac {
};
};
-#define __put_get_user_asm(to, from, size, oac_spec) \
+int __noreturn __put_user_bad(void);
+
+#define __put_user_asm(to, from, size) \
({ \
+ union oac __oac_spec = { \
+ .oac1.as = PSW_BITS_AS_SECONDARY, \
+ .oac1.a = 1, \
+ }; \
int __rc; \
\
asm volatile( \
@@ -89,26 +92,15 @@ union oac {
"0: mvcos %[_to],%[_from],%[_size]\n" \
"1: xr %[rc],%[rc]\n" \
"2:\n" \
- EX_TABLE_UA(0b,2b,%[rc]) EX_TABLE_UA(1b,2b,%[rc]) \
+ EX_TABLE_UA_STORE(0b, 2b, %[rc]) \
+ EX_TABLE_UA_STORE(1b, 2b, %[rc]) \
: [rc] "=&d" (__rc), [_to] "+Q" (*(to)) \
: [_size] "d" (size), [_from] "Q" (*(from)), \
- [spec] "d" (oac_spec.val) \
+ [spec] "d" (__oac_spec.val) \
: "cc", "0"); \
__rc; \
})
-#define __put_user_asm(to, from, size) \
- __put_get_user_asm(to, from, size, ((union oac) { \
- .oac1.as = PSW_BITS_AS_SECONDARY, \
- .oac1.a = 1 \
- }))
-
-#define __get_user_asm(to, from, size) \
- __put_get_user_asm(to, from, size, ((union oac) { \
- .oac2.as = PSW_BITS_AS_SECONDARY, \
- .oac2.a = 1 \
- })) \
-
static __always_inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
{
int rc;
@@ -141,6 +133,31 @@ static __always_inline int __put_user_fn(void *x, void __user *ptr, unsigned lon
return rc;
}
+int __noreturn __get_user_bad(void);
+
+#define __get_user_asm(to, from, size) \
+({ \
+ union oac __oac_spec = { \
+ .oac2.as = PSW_BITS_AS_SECONDARY, \
+ .oac2.a = 1, \
+ }; \
+ int __rc; \
+ \
+ asm volatile( \
+ " lr 0,%[spec]\n" \
+ "0: mvcos 0(%[_to]),%[_from],%[_size]\n" \
+ "1: xr %[rc],%[rc]\n" \
+ "2:\n" \
+ EX_TABLE_UA_LOAD_MEM(0b, 2b, %[rc], %[_to], %[_ksize]) \
+ EX_TABLE_UA_LOAD_MEM(1b, 2b, %[rc], %[_to], %[_ksize]) \
+ : [rc] "=&d" (__rc), "=Q" (*(to)) \
+ : [_size] "d" (size), [_from] "Q" (*(from)), \
+ [spec] "d" (__oac_spec.val), [_to] "a" (to), \
+ [_ksize] "K" (size) \
+ : "cc", "0"); \
+ __rc; \
+})
+
static __always_inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
{
int rc;
@@ -177,77 +194,77 @@ static __always_inline int __get_user_fn(void *x, const void __user *ptr, unsign
* These are the main single-value transfer routines. They automatically
* use the right size if we just have the right pointer type.
*/
-#define __put_user(x, ptr) \
-({ \
- __typeof__(*(ptr)) __x = (x); \
- int __pu_err = -EFAULT; \
- __chk_user_ptr(ptr); \
- switch (sizeof (*(ptr))) { \
- case 1: \
- case 2: \
- case 4: \
- case 8: \
- __pu_err = __put_user_fn(&__x, ptr, \
- sizeof(*(ptr))); \
- break; \
- default: \
- __put_user_bad(); \
- break; \
- } \
- __builtin_expect(__pu_err, 0); \
+#define __put_user(x, ptr) \
+({ \
+ __typeof__(*(ptr)) __x = (x); \
+ int __pu_err = -EFAULT; \
+ \
+ __chk_user_ptr(ptr); \
+ switch (sizeof(*(ptr))) { \
+ case 1: \
+ case 2: \
+ case 4: \
+ case 8: \
+ __pu_err = __put_user_fn(&__x, ptr, sizeof(*(ptr))); \
+ break; \
+ default: \
+ __put_user_bad(); \
+ break; \
+ } \
+ __builtin_expect(__pu_err, 0); \
})
-#define put_user(x, ptr) \
-({ \
- might_fault(); \
- __put_user(x, ptr); \
+#define put_user(x, ptr) \
+({ \
+ might_fault(); \
+ __put_user(x, ptr); \
})
-
-#define __get_user(x, ptr) \
-({ \
- int __gu_err = -EFAULT; \
- __chk_user_ptr(ptr); \
- switch (sizeof(*(ptr))) { \
- case 1: { \
- unsigned char __x = 0; \
- __gu_err = __get_user_fn(&__x, ptr, \
- sizeof(*(ptr))); \
- (x) = *(__force __typeof__(*(ptr)) *) &__x; \
- break; \
- }; \
- case 2: { \
- unsigned short __x = 0; \
- __gu_err = __get_user_fn(&__x, ptr, \
- sizeof(*(ptr))); \
- (x) = *(__force __typeof__(*(ptr)) *) &__x; \
- break; \
- }; \
- case 4: { \
- unsigned int __x = 0; \
- __gu_err = __get_user_fn(&__x, ptr, \
- sizeof(*(ptr))); \
- (x) = *(__force __typeof__(*(ptr)) *) &__x; \
- break; \
- }; \
- case 8: { \
- unsigned long long __x = 0; \
- __gu_err = __get_user_fn(&__x, ptr, \
- sizeof(*(ptr))); \
- (x) = *(__force __typeof__(*(ptr)) *) &__x; \
- break; \
- }; \
- default: \
- __get_user_bad(); \
- break; \
- } \
- __builtin_expect(__gu_err, 0); \
+#define __get_user(x, ptr) \
+({ \
+ int __gu_err = -EFAULT; \
+ \
+ __chk_user_ptr(ptr); \
+ switch (sizeof(*(ptr))) { \
+ case 1: { \
+ unsigned char __x; \
+ \
+ __gu_err = __get_user_fn(&__x, ptr, sizeof(*(ptr))); \
+ (x) = *(__force __typeof__(*(ptr)) *)&__x; \
+ break; \
+ }; \
+ case 2: { \
+ unsigned short __x; \
+ \
+ __gu_err = __get_user_fn(&__x, ptr, sizeof(*(ptr))); \
+ (x) = *(__force __typeof__(*(ptr)) *)&__x; \
+ break; \
+ }; \
+ case 4: { \
+ unsigned int __x; \
+ \
+ __gu_err = __get_user_fn(&__x, ptr, sizeof(*(ptr))); \
+ (x) = *(__force __typeof__(*(ptr)) *)&__x; \
+ break; \
+ }; \
+ case 8: { \
+ unsigned long __x; \
+ \
+ __gu_err = __get_user_fn(&__x, ptr, sizeof(*(ptr))); \
+ (x) = *(__force __typeof__(*(ptr)) *)&__x; \
+ break; \
+ }; \
+ default: \
+ __get_user_bad(); \
+ break; \
+ } \
+ __builtin_expect(__gu_err, 0); \
})
-#define get_user(x, ptr) \
-({ \
- might_fault(); \
- __get_user(x, ptr); \
+#define get_user(x, ptr) \
+({ \
+ might_fault(); \
+ __get_user(x, ptr); \
})
/*
@@ -278,19 +295,20 @@ int __noreturn __put_kernel_bad(void);
int __rc; \
\
asm volatile( \
- "0: " insn " %2,%1\n" \
- "1: xr %0,%0\n" \
+ "0: " insn " %[_val],%[_to]\n" \
+ "1: xr %[rc],%[rc]\n" \
"2:\n" \
- EX_TABLE_UA(0b,2b,%0) EX_TABLE_UA(1b,2b,%0) \
- : "=d" (__rc), "+Q" (*(to)) \
- : "d" (val) \
+ EX_TABLE_UA_STORE(0b, 2b, %[rc]) \
+ EX_TABLE_UA_STORE(1b, 2b, %[rc]) \
+ : [rc] "=d" (__rc), [_to] "+Q" (*(to)) \
+ : [_val] "d" (val) \
: "cc"); \
__rc; \
})
#define __put_kernel_nofault(dst, src, type, err_label) \
do { \
- u64 __x = (u64)(*((type *)(src))); \
+ unsigned long __x = (unsigned long)(*((type *)(src))); \
int __pk_err; \
\
switch (sizeof(type)) { \
@@ -321,12 +339,13 @@ int __noreturn __get_kernel_bad(void);
int __rc; \
\
asm volatile( \
- "0: " insn " %1,%2\n" \
- "1: xr %0,%0\n" \
+ "0: " insn " %[_val],%[_from]\n" \
+ "1: xr %[rc],%[rc]\n" \
"2:\n" \
- EX_TABLE_UA(0b,2b,%0) EX_TABLE_UA(1b,2b,%0) \
- : "=d" (__rc), "+d" (val) \
- : "Q" (*(from)) \
+ EX_TABLE_UA_LOAD_REG(0b, 2b, %[rc], %[_val]) \
+ EX_TABLE_UA_LOAD_REG(1b, 2b, %[rc], %[_val]) \
+ : [rc] "=d" (__rc), [_val] "=d" (val) \
+ : [_from] "Q" (*(from)) \
: "cc"); \
__rc; \
})
@@ -337,28 +356,28 @@ do { \
\
switch (sizeof(type)) { \
case 1: { \
- u8 __x = 0; \
+ unsigned char __x; \
\
__gk_err = __get_kernel_asm(__x, (type *)(src), "ic"); \
*((type *)(dst)) = (type)__x; \
break; \
}; \
case 2: { \
- u16 __x = 0; \
+ unsigned short __x; \
\
__gk_err = __get_kernel_asm(__x, (type *)(src), "lh"); \
*((type *)(dst)) = (type)__x; \
break; \
}; \
case 4: { \
- u32 __x = 0; \
+ unsigned int __x; \
\
__gk_err = __get_kernel_asm(__x, (type *)(src), "l"); \
*((type *)(dst)) = (type)__x; \
break; \
}; \
case 8: { \
- u64 __x = 0; \
+ unsigned long __x; \
\
__gk_err = __get_kernel_asm(__x, (type *)(src), "lg"); \
*((type *)(dst)) = (type)__x; \
diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h
index 9e9f75ef046a..4260bc5ce7f8 100644
--- a/arch/s390/include/asm/unistd.h
+++ b/arch/s390/include/asm/unistd.h
@@ -28,6 +28,7 @@
#define __ARCH_WANT_SYS_SIGPENDING
#define __ARCH_WANT_SYS_SIGPROCMASK
# ifdef CONFIG_COMPAT
+# define __ARCH_WANT_COMPAT_STAT
# define __ARCH_WANT_SYS_TIME32
# define __ARCH_WANT_SYS_UTIME32
# endif
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 5851041bb214..27d6b3c7aa06 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -33,7 +33,7 @@ CFLAGS_stacktrace.o += -fno-optimize-sibling-calls
CFLAGS_dumpstack.o += -fno-optimize-sibling-calls
CFLAGS_unwind_bc.o += -fno-optimize-sibling-calls
-obj-y := traps.o time.o process.o base.o early.o setup.o idle.o vtime.o
+obj-y := traps.o time.o process.o earlypgm.o early.o setup.o idle.o vtime.o
obj-y += processor.o syscall.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
obj-y += debug.o irq.o ipl.o dis.o diag.o vdso.o
obj-y += sysinfo.o lgr.o os_info.o machine_kexec.o
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 7c74f0e17e5a..d8ce965c0a97 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -32,6 +32,22 @@ int main(void)
/* pt_regs offsets */
OFFSET(__PT_PSW, pt_regs, psw);
OFFSET(__PT_GPRS, pt_regs, gprs);
+ OFFSET(__PT_R0, pt_regs, gprs[0]);
+ OFFSET(__PT_R1, pt_regs, gprs[1]);
+ OFFSET(__PT_R2, pt_regs, gprs[2]);
+ OFFSET(__PT_R3, pt_regs, gprs[3]);
+ OFFSET(__PT_R4, pt_regs, gprs[4]);
+ OFFSET(__PT_R5, pt_regs, gprs[5]);
+ OFFSET(__PT_R6, pt_regs, gprs[6]);
+ OFFSET(__PT_R7, pt_regs, gprs[7]);
+ OFFSET(__PT_R8, pt_regs, gprs[8]);
+ OFFSET(__PT_R9, pt_regs, gprs[9]);
+ OFFSET(__PT_R10, pt_regs, gprs[10]);
+ OFFSET(__PT_R11, pt_regs, gprs[11]);
+ OFFSET(__PT_R12, pt_regs, gprs[12]);
+ OFFSET(__PT_R13, pt_regs, gprs[13]);
+ OFFSET(__PT_R14, pt_regs, gprs[14]);
+ OFFSET(__PT_R15, pt_regs, gprs[15]);
OFFSET(__PT_ORIG_GPR2, pt_regs, orig_gpr2);
OFFSET(__PT_FLAGS, pt_regs, flags);
OFFSET(__PT_CR1, pt_regs, cr1);
@@ -41,11 +57,11 @@ int main(void)
/* stack_frame offsets */
OFFSET(__SF_BACKCHAIN, stack_frame, back_chain);
OFFSET(__SF_GPRS, stack_frame, gprs);
- OFFSET(__SF_EMPTY, stack_frame, empty1[0]);
- OFFSET(__SF_SIE_CONTROL, stack_frame, empty1[1]);
- OFFSET(__SF_SIE_SAVEAREA, stack_frame, empty1[2]);
- OFFSET(__SF_SIE_REASON, stack_frame, empty1[3]);
- OFFSET(__SF_SIE_FLAGS, stack_frame, empty1[4]);
+ OFFSET(__SF_EMPTY, stack_frame, empty[0]);
+ OFFSET(__SF_SIE_CONTROL, stack_frame, sie_control_block);
+ OFFSET(__SF_SIE_SAVEAREA, stack_frame, sie_savearea);
+ OFFSET(__SF_SIE_REASON, stack_frame, sie_reason);
+ OFFSET(__SF_SIE_FLAGS, stack_frame, sie_flags);
DEFINE(STACK_FRAME_OVERHEAD, sizeof(struct stack_frame));
BLANK();
/* idle data offsets */
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
index 69819b765250..a2c1c55daec0 100644
--- a/arch/s390/kernel/crash_dump.c
+++ b/arch/s390/kernel/crash_dump.c
@@ -15,6 +15,7 @@
#include <linux/slab.h>
#include <linux/memblock.h>
#include <linux/elf.h>
+#include <linux/uio.h>
#include <asm/asm-offsets.h>
#include <asm/os_info.h>
#include <asm/elf.h>
@@ -212,8 +213,8 @@ static int copy_oldmem_user(void __user *dst, unsigned long src, size_t count)
/*
* Copy one page from "oldmem"
*/
-ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize,
- unsigned long offset, int userbuf)
+ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn, size_t csize,
+ unsigned long offset)
{
unsigned long src;
int rc;
@@ -221,10 +222,12 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize,
if (!csize)
return 0;
src = pfn_to_phys(pfn) + offset;
- if (userbuf)
- rc = copy_oldmem_user((void __force __user *) buf, src, csize);
+
+ /* XXX: pass the iov_iter down to a common function */
+ if (iter_is_iovec(iter))
+ rc = copy_oldmem_user(iter->iov->iov_base, src, csize);
else
- rc = copy_oldmem_kernel((void *) buf, src, csize);
+ rc = copy_oldmem_kernel(iter->kvec->iov_base, src, csize);
return rc;
}
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 08cc86a0db90..432c8c987256 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -149,7 +149,7 @@ static __init void setup_topology(void)
topology_max_mnest = max_mnest;
}
-static void early_pgm_check_handler(struct pt_regs *regs)
+void __do_early_pgm_check(struct pt_regs *regs)
{
if (!fixup_exception(regs))
disabled_wait();
@@ -159,12 +159,11 @@ static noinline __init void setup_lowcore_early(void)
{
psw_t psw;
- psw.addr = (unsigned long)s390_base_pgm_handler;
+ psw.addr = (unsigned long)early_pgm_check_handler;
psw.mask = PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA;
if (IS_ENABLED(CONFIG_KASAN))
psw.mask |= PSW_MASK_DAT;
S390_lowcore.program_new_psw = psw;
- s390_base_pgm_handler_fn = early_pgm_check_handler;
S390_lowcore.preempt_count = INIT_PREEMPT_COUNT;
}
diff --git a/arch/s390/kernel/base.S b/arch/s390/kernel/earlypgm.S
index 172c23c8ca00..f521c6da37b8 100644
--- a/arch/s390/kernel/base.S
+++ b/arch/s390/kernel/earlypgm.S
@@ -1,23 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * arch/s390/kernel/base.S
- *
* Copyright IBM Corp. 2006, 2007
* Author(s): Michael Holzheu <holzheu@de.ibm.com>
*/
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
-#include <asm/nospec-insn.h>
-#include <asm/ptrace.h>
- GEN_BR_THUNK %r9
- GEN_BR_THUNK %r14
-
-__PT_R0 = __PT_GPRS
-__PT_R8 = __PT_GPRS + 64
-
-ENTRY(s390_base_pgm_handler)
+ENTRY(early_pgm_check_handler)
stmg %r8,%r15,__LC_SAVE_AREA_SYNC
aghi %r15,-(STACK_FRAME_OVERHEAD+__PT_SIZE)
la %r11,STACK_FRAME_OVERHEAD(%r15)
@@ -26,25 +16,8 @@ ENTRY(s390_base_pgm_handler)
mvc __PT_PSW(16,%r11),__LC_PGM_OLD_PSW
mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
lgr %r2,%r11
- larl %r1,s390_base_pgm_handler_fn
- lg %r9,0(%r1)
- ltgr %r9,%r9
- jz 1f
- BASR_EX %r14,%r9
+ brasl %r14,__do_early_pgm_check
mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
lpswe __LC_RETURN_PSW
-1: larl %r13,disabled_wait_psw
- lpswe 0(%r13)
-ENDPROC(s390_base_pgm_handler)
-
- .align 8
-disabled_wait_psw:
- .quad 0x0002000180000000,0x0000000000000000 + s390_base_pgm_handler
-
- .section .bss
- .align 8
- .globl s390_base_pgm_handler_fn
-s390_base_pgm_handler_fn:
- .quad 0
- .previous
+ENDPROC(early_pgm_check_handler)
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index df41132ccd06..d2a1f2f4f5b8 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -29,23 +29,6 @@
#include <asm/export.h>
#include <asm/nospec-insn.h>
-__PT_R0 = __PT_GPRS
-__PT_R1 = __PT_GPRS + 8
-__PT_R2 = __PT_GPRS + 16
-__PT_R3 = __PT_GPRS + 24
-__PT_R4 = __PT_GPRS + 32
-__PT_R5 = __PT_GPRS + 40
-__PT_R6 = __PT_GPRS + 48
-__PT_R7 = __PT_GPRS + 56
-__PT_R8 = __PT_GPRS + 64
-__PT_R9 = __PT_GPRS + 72
-__PT_R10 = __PT_GPRS + 80
-__PT_R11 = __PT_GPRS + 88
-__PT_R12 = __PT_GPRS + 96
-__PT_R13 = __PT_GPRS + 104
-__PT_R14 = __PT_GPRS + 112
-__PT_R15 = __PT_GPRS + 120
-
STACK_SHIFT = PAGE_SHIFT + THREAD_SIZE_ORDER
STACK_SIZE = 1 << STACK_SHIFT
STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
@@ -268,6 +251,10 @@ ENTRY(sie64a)
BPEXIT __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
.Lsie_entry:
sie 0(%r14)
+# Let the next instruction be NOP to avoid triggering a machine check
+# and handling it in a guest as result of the instruction execution.
+ nopr 7
+.Lsie_leave:
BPOFF
BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
.Lsie_skip:
@@ -564,7 +551,7 @@ ENTRY(mcck_int_handler)
jno .Lmcck_panic
#if IS_ENABLED(CONFIG_KVM)
OUTSIDE %r9,.Lsie_gmap,.Lsie_done,6f
- OUTSIDE %r9,.Lsie_entry,.Lsie_skip,4f
+ OUTSIDE %r9,.Lsie_entry,.Lsie_leave,4f
oi __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST
j 5f
4: CHKSTG .Lmcck_panic
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index 56e5e3712fbb..995ec7449feb 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -17,10 +17,12 @@ void ext_int_handler(void);
void io_int_handler(void);
void mcck_int_handler(void);
void restart_int_handler(void);
+void early_pgm_check_handler(void);
void __ret_from_fork(struct task_struct *prev, struct pt_regs *regs);
void __do_pgm_check(struct pt_regs *regs);
void __do_syscall(struct pt_regs *regs, int per_trap);
+void __do_early_pgm_check(struct pt_regs *regs);
void do_protection_exception(struct pt_regs *regs);
void do_dat_exception(struct pt_regs *regs);
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index 1852d46babb1..416b5a94353d 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -225,14 +225,13 @@ void arch_ftrace_update_code(int command)
ftrace_modify_all_code(command);
}
-int ftrace_arch_code_modify_post_process(void)
+void ftrace_arch_code_modify_post_process(void)
{
/*
* Flush any pre-fetched instructions on all
* CPUs to make the new code visible.
*/
text_poke_sync_lock();
- return 0;
}
#ifdef CONFIG_MODULES
diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c
index ea7729bebaa0..c27321cb0969 100644
--- a/arch/s390/kernel/perf_event.c
+++ b/arch/s390/kernel/perf_event.c
@@ -30,7 +30,7 @@ static struct kvm_s390_sie_block *sie_block(struct pt_regs *regs)
if (!stack)
return NULL;
- return (struct kvm_s390_sie_block *) stack->empty1[0];
+ return (struct kvm_s390_sie_block *)stack->sie_control_block;
}
static bool is_in_guest(struct pt_regs *regs)
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 71d86f73b02c..89949b9f3cf8 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -94,9 +94,11 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
return 0;
}
-int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
- unsigned long arg, struct task_struct *p, unsigned long tls)
+int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
{
+ unsigned long clone_flags = args->flags;
+ unsigned long new_stackp = args->stack;
+ unsigned long tls = args->tls;
struct fake_frame
{
struct stack_frame sf;
@@ -130,15 +132,15 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
frame->sf.gprs[9] = (unsigned long)frame;
/* Store access registers to kernel stack of new process. */
- if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
+ if (unlikely(args->fn)) {
/* kernel thread */
memset(&frame->childregs, 0, sizeof(struct pt_regs));
frame->childregs.psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT |
PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
frame->childregs.psw.addr =
(unsigned long)__ret_from_fork;
- frame->childregs.gprs[9] = new_stackp; /* function */
- frame->childregs.gprs[10] = arg;
+ frame->childregs.gprs[9] = (unsigned long)args->fn;
+ frame->childregs.gprs[10] = (unsigned long)args->fn_arg;
frame->childregs.orig_gpr2 = -1;
frame->childregs.last_break = 1;
return 0;
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 76ad6408cb2c..8fcb56141689 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -1332,8 +1332,7 @@ static int kvm_s390_set_processor_feat(struct kvm *kvm,
mutex_unlock(&kvm->lock);
return -EBUSY;
}
- bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
- KVM_S390_VM_CPU_FEAT_NR_BITS);
+ bitmap_from_arr64(kvm->arch.cpu_feat, data.feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
mutex_unlock(&kvm->lock);
VM_EVENT(kvm, 3, "SET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
data.feat[0],
@@ -1504,8 +1503,7 @@ static int kvm_s390_get_processor_feat(struct kvm *kvm,
{
struct kvm_s390_vm_cpu_feat data;
- bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat,
- KVM_S390_VM_CPU_FEAT_NR_BITS);
+ bitmap_to_arr64(data.feat, kvm->arch.cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
return -EFAULT;
VM_EVENT(kvm, 3, "GET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
@@ -1520,9 +1518,7 @@ static int kvm_s390_get_machine_feat(struct kvm *kvm,
{
struct kvm_s390_vm_cpu_feat data;
- bitmap_copy((unsigned long *) data.feat,
- kvm_s390_available_cpu_feat,
- KVM_S390_VM_CPU_FEAT_NR_BITS);
+ bitmap_to_arr64(data.feat, kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
return -EFAULT;
VM_EVENT(kvm, 3, "GET: host feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
diff --git a/arch/s390/mm/extable.c b/arch/s390/mm/extable.c
index 8ac8ad2474a0..1e4d2187541a 100644
--- a/arch/s390/mm/extable.c
+++ b/arch/s390/mm/extable.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
+#include <linux/bitfield.h>
#include <linux/extable.h>
+#include <linux/string.h>
#include <linux/errno.h>
#include <linux/panic.h>
#include <asm/asm-extable.h>
@@ -24,9 +26,34 @@ static bool ex_handler_fixup(const struct exception_table_entry *ex, struct pt_r
return true;
}
-static bool ex_handler_uaccess(const struct exception_table_entry *ex, struct pt_regs *regs)
+static bool ex_handler_ua_store(const struct exception_table_entry *ex, struct pt_regs *regs)
{
- regs->gprs[ex->data] = -EFAULT;
+ unsigned int reg_err = FIELD_GET(EX_DATA_REG_ERR, ex->data);
+
+ regs->gprs[reg_err] = -EFAULT;
+ regs->psw.addr = extable_fixup(ex);
+ return true;
+}
+
+static bool ex_handler_ua_load_mem(const struct exception_table_entry *ex, struct pt_regs *regs)
+{
+ unsigned int reg_addr = FIELD_GET(EX_DATA_REG_ADDR, ex->data);
+ unsigned int reg_err = FIELD_GET(EX_DATA_REG_ERR, ex->data);
+ size_t len = FIELD_GET(EX_DATA_LEN, ex->data);
+
+ regs->gprs[reg_err] = -EFAULT;
+ memset((void *)regs->gprs[reg_addr], 0, len);
+ regs->psw.addr = extable_fixup(ex);
+ return true;
+}
+
+static bool ex_handler_ua_load_reg(const struct exception_table_entry *ex, struct pt_regs *regs)
+{
+ unsigned int reg_zero = FIELD_GET(EX_DATA_REG_ADDR, ex->data);
+ unsigned int reg_err = FIELD_GET(EX_DATA_REG_ERR, ex->data);
+
+ regs->gprs[reg_err] = -EFAULT;
+ regs->gprs[reg_zero] = 0;
regs->psw.addr = extable_fixup(ex);
return true;
}
@@ -43,8 +70,12 @@ bool fixup_exception(struct pt_regs *regs)
return ex_handler_fixup(ex, regs);
case EX_TYPE_BPF:
return ex_handler_bpf(ex, regs);
- case EX_TYPE_UACCESS:
- return ex_handler_uaccess(ex, regs);
+ case EX_TYPE_UA_STORE:
+ return ex_handler_ua_store(ex, regs);
+ case EX_TYPE_UA_LOAD_MEM:
+ return ex_handler_ua_load_mem(ex, regs);
+ case EX_TYPE_UA_LOAD_REG:
+ return ex_handler_ua_load_reg(ex, regs);
}
panic("invalid exception table entry");
}
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index 1ac73917a8d3..b8ae4a4aa2ba 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -2608,6 +2608,18 @@ static int __s390_enable_skey_pte(pte_t *pte, unsigned long addr,
return 0;
}
+/*
+ * Give a chance to schedule after setting a key to 256 pages.
+ * We only hold the mm lock, which is a rwsem and the kvm srcu.
+ * Both can sleep.
+ */
+static int __s390_enable_skey_pmd(pmd_t *pmd, unsigned long addr,
+ unsigned long next, struct mm_walk *walk)
+{
+ cond_resched();
+ return 0;
+}
+
static int __s390_enable_skey_hugetlb(pte_t *pte, unsigned long addr,
unsigned long hmask, unsigned long next,
struct mm_walk *walk)
@@ -2630,12 +2642,14 @@ static int __s390_enable_skey_hugetlb(pte_t *pte, unsigned long addr,
end = start + HPAGE_SIZE - 1;
__storage_key_init_range(start, end);
set_bit(PG_arch_1, &page->flags);
+ cond_resched();
return 0;
}
static const struct mm_walk_ops enable_skey_walk_ops = {
.hugetlb_entry = __s390_enable_skey_hugetlb,
.pte_entry = __s390_enable_skey_pte,
+ .pmd_entry = __s390_enable_skey_pmd,
};
int s390_enable_skey(void)
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 697df02362af..4909dcd762e8 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -748,7 +748,7 @@ void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
pgste_val(pgste) |= PGSTE_GR_BIT | PGSTE_GC_BIT;
ptev = pte_val(*ptep);
if (!(ptev & _PAGE_INVALID) && (ptev & _PAGE_WRITE))
- page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 1);
+ page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 0);
pgste_set_unlock(ptep, pgste);
preempt_enable();
}
diff --git a/arch/sh/kernel/crash_dump.c b/arch/sh/kernel/crash_dump.c
index 5b41b59698c1..19ce6a950aac 100644
--- a/arch/sh/kernel/crash_dump.c
+++ b/arch/sh/kernel/crash_dump.c
@@ -8,23 +8,11 @@
#include <linux/errno.h>
#include <linux/crash_dump.h>
#include <linux/io.h>
+#include <linux/uio.h>
#include <linux/uaccess.h>
-/**
- * copy_oldmem_page - copy one page from "oldmem"
- * @pfn: page frame number to be copied
- * @buf: target memory address for the copy; this can be in kernel address
- * space or user address space (see @userbuf)
- * @csize: number of bytes to copy
- * @offset: offset in bytes into the page (based on pfn) to begin the copy
- * @userbuf: if set, @buf is in user address space, use copy_to_user(),
- * otherwise @buf is in kernel address space, use memcpy().
- *
- * Copy a page from "oldmem". For this page, there is no pte mapped
- * in the current kernel. We stitch up a pte, similar to kmap_atomic.
- */
-ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
- size_t csize, unsigned long offset, int userbuf)
+ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn,
+ size_t csize, unsigned long offset)
{
void __iomem *vaddr;
@@ -32,15 +20,8 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
return 0;
vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
-
- if (userbuf) {
- if (copy_to_user((void __user *)buf, (vaddr + offset), csize)) {
- iounmap(vaddr);
- return -EFAULT;
- }
- } else
- memcpy(buf, (vaddr + offset), csize);
-
+ csize = copy_to_iter(vaddr + offset, csize, iter);
iounmap(vaddr);
+
return csize;
}
diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c
index ca01286a0610..a808843375e7 100644
--- a/arch/sh/kernel/process_32.c
+++ b/arch/sh/kernel/process_32.c
@@ -92,9 +92,11 @@ void release_thread(struct task_struct *dead_task)
asmlinkage void ret_from_fork(void);
asmlinkage void ret_from_kernel_thread(void);
-int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg,
- struct task_struct *p, unsigned long tls)
+int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
{
+ unsigned long clone_flags = args->flags;
+ unsigned long usp = args->stack;
+ unsigned long tls = args->tls;
struct thread_info *ti = task_thread_info(p);
struct pt_regs *childregs;
@@ -114,11 +116,11 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg,
childregs = task_pt_regs(p);
p->thread.sp = (unsigned long) childregs;
- if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
+ if (unlikely(args->fn)) {
memset(childregs, 0, sizeof(struct pt_regs));
p->thread.pc = (unsigned long) ret_from_kernel_thread;
- childregs->regs[4] = arg;
- childregs->regs[5] = usp;
+ childregs->regs[4] = (unsigned long) args->fn_arg;
+ childregs->regs[5] = (unsigned long) args->fn;
childregs->sr = SR_MD;
#if defined(CONFIG_SH_FPU)
childregs->sr |= SR_FD;
diff --git a/arch/sh/kernel/reboot.c b/arch/sh/kernel/reboot.c
index 5c33f036418b..e8eeedc9b182 100644
--- a/arch/sh/kernel/reboot.c
+++ b/arch/sh/kernel/reboot.c
@@ -46,8 +46,7 @@ static void native_machine_shutdown(void)
static void native_machine_power_off(void)
{
- if (pm_power_off)
- pm_power_off();
+ do_kernel_power_off();
}
static void native_machine_halt(void)
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 85b573643af6..ba449c47effd 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -489,9 +489,4 @@ config COMPAT
select ARCH_WANT_OLD_COMPAT_IPC
select COMPAT_OLD_SIGACTION
-config SYSVIPC_COMPAT
- bool
- depends on COMPAT && SYSVIPC
- default y
-
source "drivers/sbus/char/Kconfig"
diff --git a/arch/sparc/include/asm/compat.h b/arch/sparc/include/asm/compat.h
index bd949fcf9d63..e4382d2efa56 100644
--- a/arch/sparc/include/asm/compat.h
+++ b/arch/sparc/include/asm/compat.h
@@ -9,17 +9,25 @@
#define compat_mode_t compat_mode_t
typedef u16 compat_mode_t;
+#define __compat_uid_t __compat_uid_t
+typedef u16 __compat_uid_t;
+typedef u16 __compat_gid_t;
+
+#define compat_dev_t compat_dev_t
+typedef u16 compat_dev_t;
+
+#define compat_ipc_pid_t compat_ipc_pid_t
+typedef u16 compat_ipc_pid_t;
+
+#define compat_ipc64_perm compat_ipc64_perm
+
+#define COMPAT_RLIM_INFINITY 0x7fffffff
+
#include <asm-generic/compat.h>
-#define COMPAT_USER_HZ 100
#define COMPAT_UTS_MACHINE "sparc\0\0"
-typedef u16 __compat_uid_t;
-typedef u16 __compat_gid_t;
-typedef u16 compat_dev_t;
typedef s16 compat_nlink_t;
-typedef u16 compat_ipc_pid_t;
-typedef __kernel_fsid_t compat_fsid_t;
struct compat_stat {
compat_dev_t st_dev;
@@ -75,46 +83,7 @@ struct compat_stat64 {
unsigned int __unused5;
};
-struct compat_flock {
- short l_type;
- short l_whence;
- compat_off_t l_start;
- compat_off_t l_len;
- compat_pid_t l_pid;
- short __unused;
-};
-
-#define F_GETLK64 12
-#define F_SETLK64 13
-#define F_SETLKW64 14
-
-struct compat_flock64 {
- short l_type;
- short l_whence;
- compat_loff_t l_start;
- compat_loff_t l_len;
- compat_pid_t l_pid;
- short __unused;
-};
-
-struct compat_statfs {
- int f_type;
- int f_bsize;
- int f_blocks;
- int f_bfree;
- int f_bavail;
- int f_files;
- int f_ffree;
- compat_fsid_t f_fsid;
- int f_namelen; /* SunOS ignores this field. */
- int f_frsize;
- int f_flags;
- int f_spare[4];
-};
-
-#define COMPAT_RLIM_INFINITY 0x7fffffff
-
-#define COMPAT_OFF_T_MAX 0x7fffffff
+#define __ARCH_COMPAT_FLOCK_PAD short __unused;
struct compat_ipc64_perm {
compat_key_t key;
diff --git a/arch/sparc/include/asm/unistd.h b/arch/sparc/include/asm/unistd.h
index 1e66278ba4a5..d6bc76706a7a 100644
--- a/arch/sparc/include/asm/unistd.h
+++ b/arch/sparc/include/asm/unistd.h
@@ -46,6 +46,7 @@
#define __ARCH_WANT_SYS_TIME
#define __ARCH_WANT_SYS_UTIME
#define __ARCH_WANT_COMPAT_SYS_SENDFILE
+#define __ARCH_WANT_COMPAT_STAT
#endif
#ifdef __32bit_syscall_numbers__
diff --git a/arch/sparc/include/uapi/asm/stat.h b/arch/sparc/include/uapi/asm/stat.h
index e03d6f8ec301..47f54133a141 100644
--- a/arch/sparc/include/uapi/asm/stat.h
+++ b/arch/sparc/include/uapi/asm/stat.h
@@ -11,8 +11,8 @@ struct stat {
__kernel_ino_t st_ino;
__kernel_mode_t st_mode;
short st_nlink;
- __kernel_uid_t st_uid;
- __kernel_gid_t st_gid;
+ __kernel_uid32_t st_uid;
+ __kernel_gid32_t st_gid;
unsigned int st_rdev;
long st_size;
long st_atime;
diff --git a/arch/sparc/include/uapi/asm/termbits.h b/arch/sparc/include/uapi/asm/termbits.h
index ce5ad5d0f105..4321322701fc 100644
--- a/arch/sparc/include/uapi/asm/termbits.h
+++ b/arch/sparc/include/uapi/asm/termbits.h
@@ -2,15 +2,12 @@
#ifndef _UAPI_SPARC_TERMBITS_H
#define _UAPI_SPARC_TERMBITS_H
-#include <linux/posix_types.h>
-
-typedef unsigned char cc_t;
-typedef unsigned int speed_t;
+#include <asm-generic/termbits-common.h>
#if defined(__sparc__) && defined(__arch64__)
-typedef unsigned int tcflag_t;
+typedef unsigned int tcflag_t;
#else
-typedef unsigned long tcflag_t;
+typedef unsigned long tcflag_t;
#endif
#define NCC 8
@@ -61,21 +58,19 @@ struct ktermios {
};
/* c_cc characters */
-#define VINTR 0
-#define VQUIT 1
-#define VERASE 2
-#define VKILL 3
-#define VEOF 4
-#define VEOL 5
-#define VEOL2 6
-#define VSWTC 7
-#define VSTART 8
-#define VSTOP 9
-
-
+#define VINTR 0
+#define VQUIT 1
+#define VERASE 2
+#define VKILL 3
+#define VEOF 4
+#define VEOL 5
+#define VEOL2 6
+#define VSWTC 7
+#define VSTART 8
+#define VSTOP 9
#define VSUSP 10
-#define VDSUSP 11 /* SunOS POSIX nicety I do believe... */
+#define VDSUSP 11 /* SunOS POSIX nicety I do believe... */
#define VREPRINT 12
#define VDISCARD 13
#define VWERASE 14
@@ -90,121 +85,83 @@ struct ktermios {
#endif
/* c_iflag bits */
-#define IGNBRK 0x00000001
-#define BRKINT 0x00000002
-#define IGNPAR 0x00000004
-#define PARMRK 0x00000008
-#define INPCK 0x00000010
-#define ISTRIP 0x00000020
-#define INLCR 0x00000040
-#define IGNCR 0x00000080
-#define ICRNL 0x00000100
-#define IUCLC 0x00000200
-#define IXON 0x00000400
-#define IXANY 0x00000800
-#define IXOFF 0x00001000
-#define IMAXBEL 0x00002000
-#define IUTF8 0x00004000
+#define IUCLC 0x0200
+#define IXON 0x0400
+#define IXOFF 0x1000
+#define IMAXBEL 0x2000
+#define IUTF8 0x4000
/* c_oflag bits */
-#define OPOST 0x00000001
-#define OLCUC 0x00000002
-#define ONLCR 0x00000004
-#define OCRNL 0x00000008
-#define ONOCR 0x00000010
-#define ONLRET 0x00000020
-#define OFILL 0x00000040
-#define OFDEL 0x00000080
-#define NLDLY 0x00000100
-#define NL0 0x00000000
-#define NL1 0x00000100
-#define CRDLY 0x00000600
-#define CR0 0x00000000
-#define CR1 0x00000200
-#define CR2 0x00000400
-#define CR3 0x00000600
-#define TABDLY 0x00001800
-#define TAB0 0x00000000
-#define TAB1 0x00000800
-#define TAB2 0x00001000
-#define TAB3 0x00001800
-#define XTABS 0x00001800
-#define BSDLY 0x00002000
-#define BS0 0x00000000
-#define BS1 0x00002000
-#define VTDLY 0x00004000
-#define VT0 0x00000000
-#define VT1 0x00004000
-#define FFDLY 0x00008000
-#define FF0 0x00000000
-#define FF1 0x00008000
-#define PAGEOUT 0x00010000 /* SUNOS specific */
-#define WRAP 0x00020000 /* SUNOS specific */
+#define OLCUC 0x00002
+#define ONLCR 0x00004
+#define NLDLY 0x00100
+#define NL0 0x00000
+#define NL1 0x00100
+#define CRDLY 0x00600
+#define CR0 0x00000
+#define CR1 0x00200
+#define CR2 0x00400
+#define CR3 0x00600
+#define TABDLY 0x01800
+#define TAB0 0x00000
+#define TAB1 0x00800
+#define TAB2 0x01000
+#define TAB3 0x01800
+#define XTABS 0x01800
+#define BSDLY 0x02000
+#define BS0 0x00000
+#define BS1 0x02000
+#define VTDLY 0x04000
+#define VT0 0x00000
+#define VT1 0x04000
+#define FFDLY 0x08000
+#define FF0 0x00000
+#define FF1 0x08000
+#define PAGEOUT 0x10000 /* SUNOS specific */
+#define WRAP 0x20000 /* SUNOS specific */
/* c_cflag bit meaning */
-#define CBAUD 0x0000100f
-#define B0 0x00000000 /* hang up */
-#define B50 0x00000001
-#define B75 0x00000002
-#define B110 0x00000003
-#define B134 0x00000004
-#define B150 0x00000005
-#define B200 0x00000006
-#define B300 0x00000007
-#define B600 0x00000008
-#define B1200 0x00000009
-#define B1800 0x0000000a
-#define B2400 0x0000000b
-#define B4800 0x0000000c
-#define B9600 0x0000000d
-#define B19200 0x0000000e
-#define B38400 0x0000000f
-#define EXTA B19200
-#define EXTB B38400
-#define CSIZE 0x00000030
-#define CS5 0x00000000
-#define CS6 0x00000010
-#define CS7 0x00000020
-#define CS8 0x00000030
-#define CSTOPB 0x00000040
-#define CREAD 0x00000080
-#define PARENB 0x00000100
-#define PARODD 0x00000200
-#define HUPCL 0x00000400
-#define CLOCAL 0x00000800
-#define CBAUDEX 0x00001000
+#define CBAUD 0x0000100f
+#define CSIZE 0x00000030
+#define CS5 0x00000000
+#define CS6 0x00000010
+#define CS7 0x00000020
+#define CS8 0x00000030
+#define CSTOPB 0x00000040
+#define CREAD 0x00000080
+#define PARENB 0x00000100
+#define PARODD 0x00000200
+#define HUPCL 0x00000400
+#define CLOCAL 0x00000800
+#define CBAUDEX 0x00001000
/* We'll never see these speeds with the Zilogs, but for completeness... */
-#define BOTHER 0x00001000
-#define B57600 0x00001001
-#define B115200 0x00001002
-#define B230400 0x00001003
-#define B460800 0x00001004
+#define BOTHER 0x00001000
+#define B57600 0x00001001
+#define B115200 0x00001002
+#define B230400 0x00001003
+#define B460800 0x00001004
/* This is what we can do with the Zilogs. */
-#define B76800 0x00001005
+#define B76800 0x00001005
/* This is what we can do with the SAB82532. */
-#define B153600 0x00001006
-#define B307200 0x00001007
-#define B614400 0x00001008
-#define B921600 0x00001009
+#define B153600 0x00001006
+#define B307200 0x00001007
+#define B614400 0x00001008
+#define B921600 0x00001009
/* And these are the rest... */
-#define B500000 0x0000100a
-#define B576000 0x0000100b
-#define B1000000 0x0000100c
-#define B1152000 0x0000100d
-#define B1500000 0x0000100e
-#define B2000000 0x0000100f
+#define B500000 0x0000100a
+#define B576000 0x0000100b
+#define B1000000 0x0000100c
+#define B1152000 0x0000100d
+#define B1500000 0x0000100e
+#define B2000000 0x0000100f
/* These have totally bogus values and nobody uses them
so far. Later on we'd have to use say 0x10000x and
adjust CBAUD constant and drivers accordingly.
-#define B2500000 0x00001010
-#define B3000000 0x00001011
-#define B3500000 0x00001012
-#define B4000000 0x00001013 */
-#define CIBAUD 0x100f0000 /* input baud rate (not used) */
-#define CMSPAR 0x40000000 /* mark or space (stick) parity */
-#define CRTSCTS 0x80000000 /* flow control */
-
-#define IBSHIFT 16 /* Shift from CBAUD to CIBAUD */
+#define B2500000 0x00001010
+#define B3000000 0x00001011
+#define B3500000 0x00001012
+#define B4000000 0x00001013 */
+#define CIBAUD 0x100f0000 /* input baud rate (not used) */
/* c_lflag bits */
#define ISIG 0x00000001
@@ -219,7 +176,7 @@ struct ktermios {
#define ECHOCTL 0x00000200
#define ECHOPRT 0x00000400
#define ECHOKE 0x00000800
-#define DEFECHO 0x00001000 /* SUNOS thing, what is it? */
+#define DEFECHO 0x00001000 /* SUNOS thing, what is it? */
#define FLUSHO 0x00002000
#define PENDIN 0x00004000
#define IEXTEN 0x00008000
@@ -244,21 +201,9 @@ struct ktermios {
/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
-
-/* tcflow() and TCXONC use these */
-#define TCOOFF 0
-#define TCOON 1
-#define TCIOFF 2
-#define TCION 3
-
-/* tcflush() and TCFLSH use these */
-#define TCIFLUSH 0
-#define TCOFLUSH 1
-#define TCIOFLUSH 2
-
/* tcsetattr uses these */
-#define TCSANOW 0
-#define TCSADRAIN 1
-#define TCSAFLUSH 2
+#define TCSANOW 0
+#define TCSADRAIN 1
+#define TCSAFLUSH 2
#endif /* _UAPI_SPARC_TERMBITS_H */
diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
index 88c0c14aaff0..33b0215a4182 100644
--- a/arch/sparc/kernel/process_32.c
+++ b/arch/sparc/kernel/process_32.c
@@ -259,9 +259,11 @@ clone_stackframe(struct sparc_stackf __user *dst,
extern void ret_from_fork(void);
extern void ret_from_kernel_thread(void);
-int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg,
- struct task_struct *p, unsigned long tls)
+int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
{
+ unsigned long clone_flags = args->flags;
+ unsigned long sp = args->stack;
+ unsigned long tls = args->tls;
struct thread_info *ti = task_thread_info(p);
struct pt_regs *childregs, *regs = current_pt_regs();
char *new_stack;
@@ -296,13 +298,13 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg,
ti->ksp = (unsigned long) new_stack;
p->thread.kregs = childregs;
- if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
+ if (unlikely(args->fn)) {
extern int nwindows;
unsigned long psr;
memset(new_stack, 0, STACKFRAME_SZ + TRACEREG_SZ);
ti->kpc = (((unsigned long) ret_from_kernel_thread) - 0x8);
- childregs->u_regs[UREG_G1] = sp; /* function */
- childregs->u_regs[UREG_G2] = arg;
+ childregs->u_regs[UREG_G1] = (unsigned long) args->fn;
+ childregs->u_regs[UREG_G2] = (unsigned long) args->fn_arg;
psr = childregs->psr = get_psr();
ti->kpsr = psr | PSR_PIL;
ti->kwim = 1 << (((psr & PSR_CWP) + 1) % nwindows);
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
index 9a2ceb080ac9..6335b698a4b4 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -564,9 +564,11 @@ barf:
* Parent --> %o0 == childs pid, %o1 == 0
* Child --> %o0 == parents pid, %o1 == 1
*/
-int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg,
- struct task_struct *p, unsigned long tls)
+int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
{
+ unsigned long clone_flags = args->flags;
+ unsigned long sp = args->stack;
+ unsigned long tls = args->tls;
struct thread_info *t = task_thread_info(p);
struct pt_regs *regs = current_pt_regs();
struct sparc_stackf *parent_sf;
@@ -584,12 +586,12 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg,
sizeof(struct sparc_stackf));
t->fpsaved[0] = 0;
- if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
+ if (unlikely(args->fn)) {
memset(child_trap_frame, 0, child_stack_sz);
__thread_flag_byte_ptr(t)[TI_FLAG_BYTE_CWP] =
(current_pt_regs()->tstate + 1) & TSTATE_CWP;
- t->kregs->u_regs[UREG_G1] = sp; /* function */
- t->kregs->u_regs[UREG_G2] = arg;
+ t->kregs->u_regs[UREG_G1] = (unsigned long) args->fn;
+ t->kregs->u_regs[UREG_G2] = (unsigned long) args->fn_arg;
return 0;
}
diff --git a/arch/um/Kconfig b/arch/um/Kconfig
index e8983d098e73..4ec22e156a2e 100644
--- a/arch/um/Kconfig
+++ b/arch/um/Kconfig
@@ -6,6 +6,7 @@ config UML
bool
default y
select ARCH_EPHEMERAL_INODES
+ select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_KCOV
select ARCH_HAS_STRNCPY_FROM_USER
select ARCH_HAS_STRNLEN_USER
diff --git a/arch/um/drivers/Kconfig b/arch/um/drivers/Kconfig
index f145842c40b9..914da774bd39 100644
--- a/arch/um/drivers/Kconfig
+++ b/arch/um/drivers/Kconfig
@@ -64,6 +64,13 @@ config XTERM_CHAN
its own xterm.
It is safe to say 'Y' here.
+config XTERM_CHAN_DEFAULT_EMULATOR
+ string "xterm channel default terminal emulator"
+ depends on XTERM_CHAN
+ default "xterm"
+ help
+ This option allows changing the default terminal emulator.
+
config NOCONFIG_CHAN
bool
default !(XTERM_CHAN && TTY_CHAN && PTY_CHAN && PORT_CHAN && NULL_CHAN)
@@ -231,6 +238,14 @@ config UML_NET_DAEMON
If unsure, say N.
+config UML_NET_DAEMON_DEFAULT_SOCK
+ string "Default socket for daemon transport"
+ default "/tmp/uml.ctl"
+ depends on UML_NET_DAEMON
+ help
+ This option allows setting the default socket for the daemon
+ transport, normally it defaults to /tmp/uml.ctl.
+
config UML_NET_VECTOR
bool "Vector I/O high performance network devices"
depends on UML_NET
diff --git a/arch/um/drivers/Makefile b/arch/um/drivers/Makefile
index 803666e85414..e1dc4292bd22 100644
--- a/arch/um/drivers/Makefile
+++ b/arch/um/drivers/Makefile
@@ -70,4 +70,6 @@ obj-$(CONFIG_UML_PCI_OVER_VIRTIO) += virt-pci.o
USER_OBJS := fd.o null.o pty.o tty.o xterm.o slip_common.o pcap_user.o vde_user.o vector_user.o
CFLAGS_null.o = -DDEV_NULL=$(DEV_NULL_PATH)
+CFLAGS_xterm.o += '-DCONFIG_XTERM_CHAN_DEFAULT_EMULATOR="$(CONFIG_XTERM_CHAN_DEFAULT_EMULATOR)"'
+
include arch/um/scripts/Makefile.rules
diff --git a/arch/um/drivers/chan_kern.c b/arch/um/drivers/chan_kern.c
index 62997055c454..26a702a06515 100644
--- a/arch/um/drivers/chan_kern.c
+++ b/arch/um/drivers/chan_kern.c
@@ -133,7 +133,7 @@ static void line_timer_cb(struct work_struct *work)
struct line *line = container_of(work, struct line, task.work);
if (!line->throttled)
- chan_interrupt(line, line->driver->read_irq);
+ chan_interrupt(line, line->read_irq);
}
int enable_chan(struct line *line)
@@ -195,9 +195,9 @@ void free_irqs(void)
chan = list_entry(ele, struct chan, free_list);
if (chan->input && chan->enabled)
- um_free_irq(chan->line->driver->read_irq, chan);
+ um_free_irq(chan->line->read_irq, chan);
if (chan->output && chan->enabled)
- um_free_irq(chan->line->driver->write_irq, chan);
+ um_free_irq(chan->line->write_irq, chan);
chan->enabled = 0;
}
}
@@ -215,9 +215,9 @@ static void close_one_chan(struct chan *chan, int delay_free_irq)
spin_unlock_irqrestore(&irqs_to_free_lock, flags);
} else {
if (chan->input && chan->enabled)
- um_free_irq(chan->line->driver->read_irq, chan);
+ um_free_irq(chan->line->read_irq, chan);
if (chan->output && chan->enabled)
- um_free_irq(chan->line->driver->write_irq, chan);
+ um_free_irq(chan->line->write_irq, chan);
chan->enabled = 0;
}
if (chan->ops->close != NULL)
diff --git a/arch/um/drivers/chan_user.c b/arch/um/drivers/chan_user.c
index 6040817c036f..25727ed648b7 100644
--- a/arch/um/drivers/chan_user.c
+++ b/arch/um/drivers/chan_user.c
@@ -220,7 +220,7 @@ static int winch_tramp(int fd, struct tty_port *port, int *fd_out,
unsigned long *stack_out)
{
struct winch_data data;
- int fds[2], n, err;
+ int fds[2], n, err, pid;
char c;
err = os_pipe(fds, 1, 1);
@@ -238,8 +238,9 @@ static int winch_tramp(int fd, struct tty_port *port, int *fd_out,
* problem with /dev/net/tun, which if held open by this
* thread, prevents the TUN/TAP device from being reused.
*/
- err = run_helper_thread(winch_thread, &data, CLONE_FILES, stack_out);
- if (err < 0) {
+ pid = run_helper_thread(winch_thread, &data, CLONE_FILES, stack_out);
+ if (pid < 0) {
+ err = pid;
printk(UM_KERN_ERR "fork of winch_thread failed - errno = %d\n",
-err);
goto out_close;
@@ -263,7 +264,7 @@ static int winch_tramp(int fd, struct tty_port *port, int *fd_out,
goto out_close;
}
- return err;
+ return pid;
out_close:
close(fds[1]);
diff --git a/arch/um/drivers/daemon_kern.c b/arch/um/drivers/daemon_kern.c
index fd2402669c49..afde1e82c056 100644
--- a/arch/um/drivers/daemon_kern.c
+++ b/arch/um/drivers/daemon_kern.c
@@ -65,7 +65,7 @@ static int daemon_setup(char *str, char **mac_out, void *data)
*init = ((struct daemon_init)
{ .sock_type = "unix",
- .ctl_sock = "/tmp/uml.ctl" });
+ .ctl_sock = CONFIG_UML_NET_DAEMON_DEFAULT_SOCK });
remain = split_if_spec(str, mac_out, &init->sock_type, &init->ctl_sock,
NULL);
diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c
index 8febf95da96e..02b0befd6763 100644
--- a/arch/um/drivers/line.c
+++ b/arch/um/drivers/line.c
@@ -139,7 +139,7 @@ static int flush_buffer(struct line *line)
count = line->buffer + LINE_BUFSIZE - line->head;
n = write_chan(line->chan_out, line->head, count,
- line->driver->write_irq);
+ line->write_irq);
if (n < 0)
return n;
if (n == count) {
@@ -156,7 +156,7 @@ static int flush_buffer(struct line *line)
count = line->tail - line->head;
n = write_chan(line->chan_out, line->head, count,
- line->driver->write_irq);
+ line->write_irq);
if (n < 0)
return n;
@@ -195,7 +195,7 @@ int line_write(struct tty_struct *tty, const unsigned char *buf, int len)
ret = buffer_data(line, buf, len);
else {
n = write_chan(line->chan_out, buf, len,
- line->driver->write_irq);
+ line->write_irq);
if (n < 0) {
ret = n;
goto out_up;
@@ -215,7 +215,7 @@ void line_throttle(struct tty_struct *tty)
{
struct line *line = tty->driver_data;
- deactivate_chan(line->chan_in, line->driver->read_irq);
+ deactivate_chan(line->chan_in, line->read_irq);
line->throttled = 1;
}
@@ -224,7 +224,7 @@ void line_unthrottle(struct tty_struct *tty)
struct line *line = tty->driver_data;
line->throttled = 0;
- chan_interrupt(line, line->driver->read_irq);
+ chan_interrupt(line, line->read_irq);
}
static irqreturn_t line_write_interrupt(int irq, void *data)
@@ -260,19 +260,23 @@ int line_setup_irq(int fd, int input, int output, struct line *line, void *data)
int err;
if (input) {
- err = um_request_irq(driver->read_irq, fd, IRQ_READ,
- line_interrupt, IRQF_SHARED,
+ err = um_request_irq(UM_IRQ_ALLOC, fd, IRQ_READ,
+ line_interrupt, 0,
driver->read_irq_name, data);
if (err < 0)
return err;
+
+ line->read_irq = err;
}
if (output) {
- err = um_request_irq(driver->write_irq, fd, IRQ_WRITE,
- line_write_interrupt, IRQF_SHARED,
+ err = um_request_irq(UM_IRQ_ALLOC, fd, IRQ_WRITE,
+ line_write_interrupt, 0,
driver->write_irq_name, data);
if (err < 0)
return err;
+
+ line->write_irq = err;
}
return 0;
diff --git a/arch/um/drivers/line.h b/arch/um/drivers/line.h
index bdb16b96e76f..f15be75a3bf3 100644
--- a/arch/um/drivers/line.h
+++ b/arch/um/drivers/line.h
@@ -23,9 +23,7 @@ struct line_driver {
const short minor_start;
const short type;
const short subtype;
- const int read_irq;
const char *read_irq_name;
- const int write_irq;
const char *write_irq_name;
struct mc_device mc;
struct tty_driver *driver;
@@ -35,6 +33,8 @@ struct line {
struct tty_port port;
int valid;
+ int read_irq, write_irq;
+
char *init_str;
struct list_head chan_list;
struct chan *chan_in, *chan_out;
diff --git a/arch/um/drivers/ssl.c b/arch/um/drivers/ssl.c
index 41eae2e8fb65..8514966778d5 100644
--- a/arch/um/drivers/ssl.c
+++ b/arch/um/drivers/ssl.c
@@ -47,9 +47,7 @@ static struct line_driver driver = {
.minor_start = 64,
.type = TTY_DRIVER_TYPE_SERIAL,
.subtype = 0,
- .read_irq = SSL_IRQ,
.read_irq_name = "ssl",
- .write_irq = SSL_WRITE_IRQ,
.write_irq_name = "ssl-write",
.mc = {
.list = LIST_HEAD_INIT(driver.mc.list),
diff --git a/arch/um/drivers/stdio_console.c b/arch/um/drivers/stdio_console.c
index e8b762f4d8c2..489d5a746ed3 100644
--- a/arch/um/drivers/stdio_console.c
+++ b/arch/um/drivers/stdio_console.c
@@ -53,9 +53,7 @@ static struct line_driver driver = {
.minor_start = 0,
.type = TTY_DRIVER_TYPE_CONSOLE,
.subtype = SYSTEM_TYPE_CONSOLE,
- .read_irq = CONSOLE_IRQ,
.read_irq_name = "console",
- .write_irq = CONSOLE_WRITE_IRQ,
.write_irq_name = "console-write",
.mc = {
.list = LIST_HEAD_INIT(driver.mc.list),
diff --git a/arch/um/drivers/virtio_uml.c b/arch/um/drivers/virtio_uml.c
index ba562d68dc04..82ff3785bf69 100644
--- a/arch/um/drivers/virtio_uml.c
+++ b/arch/um/drivers/virtio_uml.c
@@ -63,6 +63,7 @@ struct virtio_uml_device {
u8 config_changed_irq:1;
uint64_t vq_irq_vq_map;
+ int recv_rc;
};
struct virtio_uml_vq_info {
@@ -148,14 +149,6 @@ static int vhost_user_recv(struct virtio_uml_device *vu_dev,
rc = vhost_user_recv_header(fd, msg);
- if (rc == -ECONNRESET && vu_dev->registered) {
- struct virtio_uml_platform_data *pdata;
-
- pdata = vu_dev->pdata;
-
- virtio_break_device(&vu_dev->vdev);
- schedule_work(&pdata->conn_broken_wk);
- }
if (rc)
return rc;
size = msg->header.size;
@@ -164,6 +157,21 @@ static int vhost_user_recv(struct virtio_uml_device *vu_dev,
return full_read(fd, &msg->payload, size, false);
}
+static void vhost_user_check_reset(struct virtio_uml_device *vu_dev,
+ int rc)
+{
+ struct virtio_uml_platform_data *pdata = vu_dev->pdata;
+
+ if (rc != -ECONNRESET)
+ return;
+
+ if (!vu_dev->registered)
+ return;
+
+ virtio_break_device(&vu_dev->vdev);
+ schedule_work(&pdata->conn_broken_wk);
+}
+
static int vhost_user_recv_resp(struct virtio_uml_device *vu_dev,
struct vhost_user_msg *msg,
size_t max_payload_size)
@@ -171,8 +179,10 @@ static int vhost_user_recv_resp(struct virtio_uml_device *vu_dev,
int rc = vhost_user_recv(vu_dev, vu_dev->sock, msg,
max_payload_size, true);
- if (rc)
+ if (rc) {
+ vhost_user_check_reset(vu_dev, rc);
return rc;
+ }
if (msg->header.flags != (VHOST_USER_FLAG_REPLY | VHOST_USER_VERSION))
return -EPROTO;
@@ -369,6 +379,7 @@ static irqreturn_t vu_req_read_message(struct virtio_uml_device *vu_dev,
sizeof(msg.msg.payload) +
sizeof(msg.extra_payload));
+ vu_dev->recv_rc = rc;
if (rc)
return IRQ_NONE;
@@ -412,7 +423,9 @@ static irqreturn_t vu_req_interrupt(int irq, void *data)
if (!um_irq_timetravel_handler_used())
ret = vu_req_read_message(vu_dev, NULL);
- if (vu_dev->vq_irq_vq_map) {
+ if (vu_dev->recv_rc) {
+ vhost_user_check_reset(vu_dev, vu_dev->recv_rc);
+ } else if (vu_dev->vq_irq_vq_map) {
struct virtqueue *vq;
virtio_device_for_each_vq((&vu_dev->vdev), vq) {
diff --git a/arch/um/drivers/xterm.c b/arch/um/drivers/xterm.c
index 87ca4a47cd66..6918de5e2956 100644
--- a/arch/um/drivers/xterm.c
+++ b/arch/um/drivers/xterm.c
@@ -42,7 +42,7 @@ static void *xterm_init(char *str, int device, const struct chan_opts *opts)
}
/* Only changed by xterm_setup, which is a setup */
-static char *terminal_emulator = "xterm";
+static char *terminal_emulator = CONFIG_XTERM_CHAN_DEFAULT_EMULATOR;
static char *title_switch = "-T";
static char *exec_switch = "-e";
@@ -79,8 +79,9 @@ __uml_setup("xterm=", xterm_setup,
" respectively. The title switch must have the form '<switch> title',\n"
" not '<switch>=title'. Similarly, the exec switch must have the form\n"
" '<switch> command arg1 arg2 ...'.\n"
-" The default values are 'xterm=xterm,-T,-e'. Values for gnome-terminal\n"
-" are 'xterm=gnome-terminal,-t,-x'.\n\n"
+" The default values are 'xterm=" CONFIG_XTERM_CHAN_DEFAULT_EMULATOR
+ ",-T,-e'.\n"
+" Values for gnome-terminal are 'xterm=gnome-terminal,-t,-x'.\n\n"
);
static int xterm_open(int input, int output, int primary, void *d,
diff --git a/arch/um/include/asm/Kbuild b/arch/um/include/asm/Kbuild
index f1f3f52f1e9c..b2d834a29f3a 100644
--- a/arch/um/include/asm/Kbuild
+++ b/arch/um/include/asm/Kbuild
@@ -4,6 +4,7 @@ generic-y += bug.h
generic-y += compat.h
generic-y += current.h
generic-y += device.h
+generic-y += dma-mapping.h
generic-y += emergency-restart.h
generic-y += exec.h
generic-y += extable.h
diff --git a/arch/um/include/asm/irq.h b/arch/um/include/asm/irq.h
index e187c789369d..749dfe8512e8 100644
--- a/arch/um/include/asm/irq.h
+++ b/arch/um/include/asm/irq.h
@@ -4,19 +4,15 @@
#define TIMER_IRQ 0
#define UMN_IRQ 1
-#define CONSOLE_IRQ 2
-#define CONSOLE_WRITE_IRQ 3
-#define UBD_IRQ 4
-#define UM_ETH_IRQ 5
-#define SSL_IRQ 6
-#define SSL_WRITE_IRQ 7
-#define ACCEPT_IRQ 8
-#define MCONSOLE_IRQ 9
-#define WINCH_IRQ 10
-#define SIGIO_WRITE_IRQ 11
-#define TELNETD_IRQ 12
-#define XTERM_IRQ 13
-#define RANDOM_IRQ 14
+#define UBD_IRQ 2
+#define UM_ETH_IRQ 3
+#define ACCEPT_IRQ 4
+#define MCONSOLE_IRQ 5
+#define WINCH_IRQ 6
+#define SIGIO_WRITE_IRQ 7
+#define TELNETD_IRQ 8
+#define XTERM_IRQ 9
+#define RANDOM_IRQ 10
#ifdef CONFIG_UML_NET_VECTOR
diff --git a/arch/um/include/asm/thread_info.h b/arch/um/include/asm/thread_info.h
index 1395cbd7e340..c7b4b49826a2 100644
--- a/arch/um/include/asm/thread_info.h
+++ b/arch/um/include/asm/thread_info.h
@@ -60,6 +60,7 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_RESTORE_SIGMASK 7
#define TIF_NOTIFY_RESUME 8
#define TIF_SECCOMP 9 /* secure computing */
+#define TIF_SINGLESTEP 10 /* single stepping userspace */
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
@@ -68,5 +69,6 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_MEMDIE (1 << TIF_MEMDIE)
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
+#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
#endif
diff --git a/arch/um/kernel/exec.c b/arch/um/kernel/exec.c
index c85e40c72779..58938d75871a 100644
--- a/arch/um/kernel/exec.c
+++ b/arch/um/kernel/exec.c
@@ -43,7 +43,7 @@ void start_thread(struct pt_regs *regs, unsigned long eip, unsigned long esp)
{
PT_REGS_IP(regs) = eip;
PT_REGS_SP(regs) = esp;
- current->ptrace &= ~PT_DTRACE;
+ clear_thread_flag(TIF_SINGLESTEP);
#ifdef SUBARCH_EXECVE1
SUBARCH_EXECVE1(regs->regs);
#endif
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
index 80504680be08..80b90b1276a1 100644
--- a/arch/um/kernel/process.c
+++ b/arch/um/kernel/process.c
@@ -154,16 +154,17 @@ void fork_handler(void)
userspace(&current->thread.regs.regs, current_thread_info()->aux_fp_regs);
}
-int copy_thread(unsigned long clone_flags, unsigned long sp,
- unsigned long arg, struct task_struct * p, unsigned long tls)
+int copy_thread(struct task_struct * p, const struct kernel_clone_args *args)
{
+ unsigned long clone_flags = args->flags;
+ unsigned long sp = args->stack;
+ unsigned long tls = args->tls;
void (*handler)(void);
- int kthread = current->flags & (PF_KTHREAD | PF_IO_WORKER);
int ret = 0;
p->thread = (struct thread_struct) INIT_THREAD;
- if (!kthread) {
+ if (!args->fn) {
memcpy(&p->thread.regs.regs, current_pt_regs(),
sizeof(p->thread.regs.regs));
PT_REGS_SET_SYSCALL_RETURN(&p->thread.regs, 0);
@@ -175,14 +176,14 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
arch_copy_thread(&current->thread.arch, &p->thread.arch);
} else {
get_safe_registers(p->thread.regs.regs.gp, p->thread.regs.regs.fp);
- p->thread.request.u.thread.proc = (int (*)(void *))sp;
- p->thread.request.u.thread.arg = (void *)arg;
+ p->thread.request.u.thread.proc = args->fn;
+ p->thread.request.u.thread.arg = args->fn_arg;
handler = new_thread_handler;
}
new_thread(task_stack_page(p), &p->thread.switch_buf, handler);
- if (!kthread) {
+ if (!args->fn) {
clear_flushed_tls(p);
/*
@@ -335,7 +336,7 @@ int singlestepping(void * t)
{
struct task_struct *task = t ? t : current;
- if (!(task->ptrace & PT_DTRACE))
+ if (!test_thread_flag(TIF_SINGLESTEP))
return 0;
if (task->thread.singlestep_syscall)
diff --git a/arch/um/kernel/ptrace.c b/arch/um/kernel/ptrace.c
index bfaf6ab1ac03..5154b27de580 100644
--- a/arch/um/kernel/ptrace.c
+++ b/arch/um/kernel/ptrace.c
@@ -11,7 +11,7 @@
void user_enable_single_step(struct task_struct *child)
{
- child->ptrace |= PT_DTRACE;
+ set_tsk_thread_flag(child, TIF_SINGLESTEP);
child->thread.singlestep_syscall = 0;
#ifdef SUBARCH_SET_SINGLESTEPPING
@@ -21,7 +21,7 @@ void user_enable_single_step(struct task_struct *child)
void user_disable_single_step(struct task_struct *child)
{
- child->ptrace &= ~PT_DTRACE;
+ clear_tsk_thread_flag(child, TIF_SINGLESTEP);
child->thread.singlestep_syscall = 0;
#ifdef SUBARCH_SET_SINGLESTEPPING
@@ -120,7 +120,7 @@ static void send_sigtrap(struct uml_pt_regs *regs, int error_code)
}
/*
- * XXX Check PT_DTRACE vs TIF_SINGLESTEP for singlestepping check and
+ * XXX Check TIF_SINGLESTEP for singlestepping check and
* PT_PTRACED vs TIF_SYSCALL_TRACE for syscall tracing check
*/
int syscall_trace_enter(struct pt_regs *regs)
@@ -144,7 +144,7 @@ void syscall_trace_leave(struct pt_regs *regs)
audit_syscall_exit(regs);
/* Fake a debug trap */
- if (ptraced & PT_DTRACE)
+ if (test_thread_flag(TIF_SINGLESTEP))
send_sigtrap(&regs->regs, 0);
if (!test_thread_flag(TIF_SYSCALL_TRACE))
diff --git a/arch/um/kernel/signal.c b/arch/um/kernel/signal.c
index 88cd9b5c1b74..ae4658f576ab 100644
--- a/arch/um/kernel/signal.c
+++ b/arch/um/kernel/signal.c
@@ -53,7 +53,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
unsigned long sp;
int err;
- if ((current->ptrace & PT_DTRACE) && (current->ptrace & PT_PTRACED))
+ if (test_thread_flag(TIF_SINGLESTEP) && (current->ptrace & PT_PTRACED))
singlestep = 1;
/* Did we come from a system call? */
@@ -128,7 +128,7 @@ void do_signal(struct pt_regs *regs)
* on the host. The tracing thread will check this flag and
* PTRACE_SYSCALL if necessary.
*/
- if (current->ptrace & PT_DTRACE)
+ if (test_thread_flag(TIF_SINGLESTEP))
current->thread.singlestep_syscall =
is_syscall(PT_REGS_IP(&current->thread.regs));
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index cf531fbcd229..e0498b2e9b79 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -258,6 +258,7 @@ config X86
select HAVE_PREEMPT_DYNAMIC_CALL
select HAVE_RSEQ
select HAVE_SYSCALL_TRACEPOINTS
+ select HAVE_UACCESS_VALIDATION if HAVE_OBJTOOL
select HAVE_UNSTABLE_SCHED_CLOCK
select HAVE_USER_RETURN_NOTIFIER
select HAVE_GENERIC_VDSO
@@ -2870,10 +2871,6 @@ config COMPAT
if COMPAT
config COMPAT_FOR_U64_ALIGNMENT
def_bool y
-
-config SYSVIPC_COMPAT
- def_bool y
- depends on SYSVIPC
endif
endmenu
diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
index 0352e4589efa..f912d7770130 100644
--- a/arch/x86/boot/header.S
+++ b/arch/x86/boot/header.S
@@ -163,7 +163,7 @@ extra_header_fields:
.long 0x200 # SizeOfHeaders
.long 0 # CheckSum
.word IMAGE_SUBSYSTEM_EFI_APPLICATION # Subsystem (EFI application)
-#ifdef CONFIG_DXE_MEM_ATTRIBUTES
+#ifdef CONFIG_EFI_DXE_MEM_ATTRIBUTES
.word IMAGE_DLL_CHARACTERISTICS_NX_COMPAT # DllCharacteristics
#else
.word 0 # DllCharacteristics
diff --git a/arch/x86/crypto/blowfish_glue.c b/arch/x86/crypto/blowfish_glue.c
index fda6066437aa..ba06322c1e39 100644
--- a/arch/x86/crypto/blowfish_glue.c
+++ b/arch/x86/crypto/blowfish_glue.c
@@ -303,7 +303,7 @@ static int force;
module_param(force, int, 0);
MODULE_PARM_DESC(force, "Force module load, ignore CPU blacklist");
-static int __init init(void)
+static int __init blowfish_init(void)
{
int err;
@@ -327,15 +327,15 @@ static int __init init(void)
return err;
}
-static void __exit fini(void)
+static void __exit blowfish_fini(void)
{
crypto_unregister_alg(&bf_cipher_alg);
crypto_unregister_skciphers(bf_skcipher_algs,
ARRAY_SIZE(bf_skcipher_algs));
}
-module_init(init);
-module_exit(fini);
+module_init(blowfish_init);
+module_exit(blowfish_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Blowfish Cipher Algorithm, asm optimized");
diff --git a/arch/x86/crypto/camellia_glue.c b/arch/x86/crypto/camellia_glue.c
index 66c435ba9d3d..d45e9c0c42ac 100644
--- a/arch/x86/crypto/camellia_glue.c
+++ b/arch/x86/crypto/camellia_glue.c
@@ -1377,7 +1377,7 @@ static int force;
module_param(force, int, 0);
MODULE_PARM_DESC(force, "Force module load, ignore CPU blacklist");
-static int __init init(void)
+static int __init camellia_init(void)
{
int err;
@@ -1401,15 +1401,15 @@ static int __init init(void)
return err;
}
-static void __exit fini(void)
+static void __exit camellia_fini(void)
{
crypto_unregister_alg(&camellia_cipher_alg);
crypto_unregister_skciphers(camellia_skcipher_algs,
ARRAY_SIZE(camellia_skcipher_algs));
}
-module_init(init);
-module_exit(fini);
+module_init(camellia_init);
+module_exit(camellia_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Camellia Cipher Algorithm, asm optimized");
diff --git a/arch/x86/crypto/serpent_avx2_glue.c b/arch/x86/crypto/serpent_avx2_glue.c
index ccf0b5fa4933..347e97f4b713 100644
--- a/arch/x86/crypto/serpent_avx2_glue.c
+++ b/arch/x86/crypto/serpent_avx2_glue.c
@@ -96,7 +96,7 @@ static struct skcipher_alg serpent_algs[] = {
static struct simd_skcipher_alg *serpent_simd_algs[ARRAY_SIZE(serpent_algs)];
-static int __init init(void)
+static int __init serpent_avx2_init(void)
{
const char *feature_name;
@@ -115,14 +115,14 @@ static int __init init(void)
serpent_simd_algs);
}
-static void __exit fini(void)
+static void __exit serpent_avx2_fini(void)
{
simd_unregister_skciphers(serpent_algs, ARRAY_SIZE(serpent_algs),
serpent_simd_algs);
}
-module_init(init);
-module_exit(fini);
+module_init(serpent_avx2_init);
+module_exit(serpent_avx2_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Serpent Cipher Algorithm, AVX2 optimized");
diff --git a/arch/x86/crypto/twofish_glue.c b/arch/x86/crypto/twofish_glue.c
index 77e06c2da83d..f9c4adc27404 100644
--- a/arch/x86/crypto/twofish_glue.c
+++ b/arch/x86/crypto/twofish_glue.c
@@ -81,18 +81,18 @@ static struct crypto_alg alg = {
}
};
-static int __init init(void)
+static int __init twofish_glue_init(void)
{
return crypto_register_alg(&alg);
}
-static void __exit fini(void)
+static void __exit twofish_glue_fini(void)
{
crypto_unregister_alg(&alg);
}
-module_init(init);
-module_exit(fini);
+module_init(twofish_glue_init);
+module_exit(twofish_glue_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION ("Twofish Cipher Algorithm, asm optimized");
diff --git a/arch/x86/crypto/twofish_glue_3way.c b/arch/x86/crypto/twofish_glue_3way.c
index 3507cf2064f1..90454cf18e0d 100644
--- a/arch/x86/crypto/twofish_glue_3way.c
+++ b/arch/x86/crypto/twofish_glue_3way.c
@@ -140,7 +140,7 @@ static int force;
module_param(force, int, 0);
MODULE_PARM_DESC(force, "Force module load, ignore CPU blacklist");
-static int __init init(void)
+static int __init twofish_3way_init(void)
{
if (!force && is_blacklisted_cpu()) {
printk(KERN_INFO
@@ -154,13 +154,13 @@ static int __init init(void)
ARRAY_SIZE(tf_skciphers));
}
-static void __exit fini(void)
+static void __exit twofish_3way_fini(void)
{
crypto_unregister_skciphers(tf_skciphers, ARRAY_SIZE(tf_skciphers));
}
-module_init(init);
-module_exit(fini);
+module_init(twofish_3way_init);
+module_exit(twofish_3way_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Twofish Cipher Algorithm, 3-way parallel asm optimized");
diff --git a/arch/x86/events/Kconfig b/arch/x86/events/Kconfig
index 09c56965750a..dabdf3d7bf84 100644
--- a/arch/x86/events/Kconfig
+++ b/arch/x86/events/Kconfig
@@ -6,24 +6,24 @@ config PERF_EVENTS_INTEL_UNCORE
depends on PERF_EVENTS && CPU_SUP_INTEL && PCI
default y
help
- Include support for Intel uncore performance events. These are
- available on NehalemEX and more modern processors.
+ Include support for Intel uncore performance events. These are
+ available on NehalemEX and more modern processors.
config PERF_EVENTS_INTEL_RAPL
tristate "Intel/AMD rapl performance events"
depends on PERF_EVENTS && (CPU_SUP_INTEL || CPU_SUP_AMD) && PCI
default y
help
- Include support for Intel and AMD rapl performance events for power
- monitoring on modern processors.
+ Include support for Intel and AMD rapl performance events for power
+ monitoring on modern processors.
config PERF_EVENTS_INTEL_CSTATE
tristate "Intel cstate performance events"
depends on PERF_EVENTS && CPU_SUP_INTEL && PCI
default y
help
- Include support for Intel cstate performance events for power
- monitoring on modern processors.
+ Include support for Intel cstate performance events for power
+ monitoring on modern processors.
config PERF_EVENTS_AMD_POWER
depends on PERF_EVENTS && CPU_SUP_AMD
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 955ae91c56dc..45024abd929f 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -276,7 +276,7 @@ static struct event_constraint intel_icl_event_constraints[] = {
INTEL_EVENT_CONSTRAINT_RANGE(0x03, 0x0a, 0xf),
INTEL_EVENT_CONSTRAINT_RANGE(0x1f, 0x28, 0xf),
INTEL_EVENT_CONSTRAINT(0x32, 0xf), /* SW_PREFETCH_ACCESS.* */
- INTEL_EVENT_CONSTRAINT_RANGE(0x48, 0x54, 0xf),
+ INTEL_EVENT_CONSTRAINT_RANGE(0x48, 0x56, 0xf),
INTEL_EVENT_CONSTRAINT_RANGE(0x60, 0x8b, 0xf),
INTEL_UEVENT_CONSTRAINT(0x04a3, 0xff), /* CYCLE_ACTIVITY.STALLS_TOTAL */
INTEL_UEVENT_CONSTRAINT(0x10a3, 0xff), /* CYCLE_ACTIVITY.CYCLES_MEM_ANY */
diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
index 20fd0acd7d80..b1221da477b7 100644
--- a/arch/x86/include/asm/compat.h
+++ b/arch/x86/include/asm/compat.h
@@ -15,17 +15,23 @@
#define compat_mode_t compat_mode_t
typedef u16 compat_mode_t;
+#define __compat_uid_t __compat_uid_t
+typedef u16 __compat_uid_t;
+typedef u16 __compat_gid_t;
+
+#define compat_dev_t compat_dev_t
+typedef u16 compat_dev_t;
+
+#define compat_ipc_pid_t compat_ipc_pid_t
+typedef u16 compat_ipc_pid_t;
+
+#define compat_statfs compat_statfs
+
#include <asm-generic/compat.h>
-#define COMPAT_USER_HZ 100
#define COMPAT_UTS_MACHINE "i686\0\0"
-typedef u16 __compat_uid_t;
-typedef u16 __compat_gid_t;
-typedef u16 compat_dev_t;
typedef u16 compat_nlink_t;
-typedef u16 compat_ipc_pid_t;
-typedef __kernel_fsid_t compat_fsid_t;
struct compat_stat {
u32 st_dev;
@@ -48,29 +54,11 @@ struct compat_stat {
u32 __unused5;
};
-struct compat_flock {
- short l_type;
- short l_whence;
- compat_off_t l_start;
- compat_off_t l_len;
- compat_pid_t l_pid;
-};
-
-#define F_GETLK64 12 /* using 'struct flock64' */
-#define F_SETLK64 13
-#define F_SETLKW64 14
-
/*
- * IA32 uses 4 byte alignment for 64 bit quantities,
- * so we need to pack this structure.
+ * IA32 uses 4 byte alignment for 64 bit quantities, so we need to pack the
+ * compat flock64 structure.
*/
-struct compat_flock64 {
- short l_type;
- short l_whence;
- compat_loff_t l_start;
- compat_loff_t l_len;
- compat_pid_t l_pid;
-} __attribute__((packed));
+#define __ARCH_NEED_COMPAT_FLOCK64_PACKED
struct compat_statfs {
int f_type;
@@ -87,68 +75,6 @@ struct compat_statfs {
int f_spare[4];
};
-#define COMPAT_RLIM_INFINITY 0xffffffff
-
-#define COMPAT_OFF_T_MAX 0x7fffffff
-
-struct compat_ipc64_perm {
- compat_key_t key;
- __compat_uid32_t uid;
- __compat_gid32_t gid;
- __compat_uid32_t cuid;
- __compat_gid32_t cgid;
- unsigned short mode;
- unsigned short __pad1;
- unsigned short seq;
- unsigned short __pad2;
- compat_ulong_t unused1;
- compat_ulong_t unused2;
-};
-
-struct compat_semid64_ds {
- struct compat_ipc64_perm sem_perm;
- compat_ulong_t sem_otime;
- compat_ulong_t sem_otime_high;
- compat_ulong_t sem_ctime;
- compat_ulong_t sem_ctime_high;
- compat_ulong_t sem_nsems;
- compat_ulong_t __unused3;
- compat_ulong_t __unused4;
-};
-
-struct compat_msqid64_ds {
- struct compat_ipc64_perm msg_perm;
- compat_ulong_t msg_stime;
- compat_ulong_t msg_stime_high;
- compat_ulong_t msg_rtime;
- compat_ulong_t msg_rtime_high;
- compat_ulong_t msg_ctime;
- compat_ulong_t msg_ctime_high;
- compat_ulong_t msg_cbytes;
- compat_ulong_t msg_qnum;
- compat_ulong_t msg_qbytes;
- compat_pid_t msg_lspid;
- compat_pid_t msg_lrpid;
- compat_ulong_t __unused4;
- compat_ulong_t __unused5;
-};
-
-struct compat_shmid64_ds {
- struct compat_ipc64_perm shm_perm;
- compat_size_t shm_segsz;
- compat_ulong_t shm_atime;
- compat_ulong_t shm_atime_high;
- compat_ulong_t shm_dtime;
- compat_ulong_t shm_dtime_high;
- compat_ulong_t shm_ctime;
- compat_ulong_t shm_ctime_high;
- compat_pid_t shm_cpid;
- compat_pid_t shm_lpid;
- compat_ulong_t shm_nattch;
- compat_ulong_t __unused4;
- compat_ulong_t __unused5;
-};
-
#ifdef CONFIG_X86_X32_ABI
#define COMPAT_USE_64BIT_TIME \
(!!(task_pt_regs(current)->orig_ax & __X32_SYSCALL_BIT))
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 66d3e3b1d24d..ea34cc31b047 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -54,7 +54,7 @@ extern const char * const x86_power_flags[32];
extern const char * const x86_bug_flags[NBUGINTS*32];
#define test_cpu_cap(c, bit) \
- test_bit(bit, (unsigned long *)((c)->x86_capability))
+ arch_test_bit(bit, (unsigned long *)((c)->x86_capability))
/*
* There are 32 bits/features in each mask word. The high bits
diff --git a/arch/x86/include/asm/e820/api.h b/arch/x86/include/asm/e820/api.h
index e8f58ddd06d9..5a39ed59b6db 100644
--- a/arch/x86/include/asm/e820/api.h
+++ b/arch/x86/include/asm/e820/api.h
@@ -4,6 +4,9 @@
#include <asm/e820/types.h>
+struct device;
+struct resource;
+
extern struct e820_table *e820_table;
extern struct e820_table *e820_table_kexec;
extern struct e820_table *e820_table_firmware;
@@ -43,6 +46,8 @@ extern void e820__register_nosave_regions(unsigned long limit_pfn);
extern int e820__get_entry_type(u64 start, u64 end);
+extern void remove_e820_regions(struct device *dev, struct resource *avail);
+
/*
* Returns true iff the specified range [start,end) is completely contained inside
* the ISA region.
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index bed74a0f2932..71943dce691e 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -270,6 +270,8 @@ static inline u32 efi64_convert_status(efi_status_t status)
return (u32)(status | (u64)status >> 32);
}
+#define __efi64_split(val) (val) & U32_MAX, (u64)(val) >> 32
+
#define __efi64_argmap_free_pages(addr, size) \
((addr), 0, (size))
@@ -317,6 +319,13 @@ static inline u32 efi64_convert_status(efi_status_t status)
#define __efi64_argmap_hash_log_extend_event(prot, fl, addr, size, ev) \
((prot), (fl), 0ULL, (u64)(addr), 0ULL, (u64)(size), 0ULL, ev)
+/* DXE services */
+#define __efi64_argmap_get_memory_space_descriptor(phys, desc) \
+ (__efi64_split(phys), (desc))
+
+#define __efi64_argmap_set_memory_space_descriptor(phys, size, flags) \
+ (__efi64_split(phys), __efi64_split(size), __efi64_split(flags))
+
/*
* The macros below handle the plumbing for the argument mapping. To add a
* mapping for a specific EFI method, simply define a macro
diff --git a/arch/x86/include/asm/extable.h b/arch/x86/include/asm/extable.h
index 155c991ba95e..eeed395c3177 100644
--- a/arch/x86/include/asm/extable.h
+++ b/arch/x86/include/asm/extable.h
@@ -42,9 +42,13 @@ extern int ex_get_fixup_type(unsigned long ip);
extern void early_fixup_exception(struct pt_regs *regs, int trapnr);
#ifdef CONFIG_X86_MCE
-extern void ex_handler_msr_mce(struct pt_regs *regs, bool wrmsr);
+extern void __noreturn ex_handler_msr_mce(struct pt_regs *regs, bool wrmsr);
#else
-static inline void ex_handler_msr_mce(struct pt_regs *regs, bool wrmsr) { }
+static inline void __noreturn ex_handler_msr_mce(struct pt_regs *regs, bool wrmsr)
+{
+ for (;;)
+ cpu_relax();
+}
#endif
#if defined(CONFIG_BPF_JIT) && defined(CONFIG_X86_64)
diff --git a/arch/x86/include/asm/fpu/sched.h b/arch/x86/include/asm/fpu/sched.h
index 99a8820e8cc4..b2486b2cbc6e 100644
--- a/arch/x86/include/asm/fpu/sched.h
+++ b/arch/x86/include/asm/fpu/sched.h
@@ -11,7 +11,7 @@
extern void save_fpregs_to_fpstate(struct fpu *fpu);
extern void fpu__drop(struct fpu *fpu);
-extern int fpu_clone(struct task_struct *dst, unsigned long clone_flags);
+extern int fpu_clone(struct task_struct *dst, unsigned long clone_flags, bool minimal);
extern void fpu_flush_thread(void);
/*
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h
index 024d9797646e..b5ef474be858 100644
--- a/arch/x86/include/asm/ftrace.h
+++ b/arch/x86/include/asm/ftrace.h
@@ -9,6 +9,13 @@
# define MCOUNT_ADDR ((unsigned long)(__fentry__))
#define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */
+/* Ignore unused weak functions which will have non zero offsets */
+#ifdef CONFIG_HAVE_FENTRY
+# include <asm/ibt.h>
+/* Add offset for endbr64 if IBT enabled */
+# define FTRACE_MCOUNT_MAX_OFFSET ENDBR_INSN_SIZE
+#endif
+
#ifdef CONFIG_DYNAMIC_FTRACE
#define ARCH_SUPPORTS_FTRACE_OPS 1
#endif
diff --git a/arch/x86/include/asm/kexec.h b/arch/x86/include/asm/kexec.h
index 11b7c06e2828..6ad8d946cd3e 100644
--- a/arch/x86/include/asm/kexec.h
+++ b/arch/x86/include/asm/kexec.h
@@ -186,6 +186,14 @@ extern int arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages,
extern void arch_kexec_pre_free_pages(void *vaddr, unsigned int pages);
#define arch_kexec_pre_free_pages arch_kexec_pre_free_pages
+#ifdef CONFIG_KEXEC_FILE
+struct purgatory_info;
+int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
+ Elf_Shdr *section,
+ const Elf_Shdr *relsec,
+ const Elf_Shdr *symtab);
+#define arch_kexec_apply_relocations_add arch_kexec_apply_relocations_add
+#endif
#endif
typedef void crash_vmclear_fn(void);
diff --git a/arch/x86/include/asm/livepatch.h b/arch/x86/include/asm/livepatch.h
deleted file mode 100644
index 7c5cc6660e4b..000000000000
--- a/arch/x86/include/asm/livepatch.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * livepatch.h - x86-specific Kernel Live Patching Core
- *
- * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
- * Copyright (C) 2014 SUSE
- */
-
-#ifndef _ASM_X86_LIVEPATCH_H
-#define _ASM_X86_LIVEPATCH_H
-
-#include <asm/setup.h>
-#include <linux/ftrace.h>
-
-static inline void klp_arch_set_pc(struct ftrace_regs *fregs, unsigned long ip)
-{
- ftrace_instruction_pointer_set(fregs, ip);
-}
-
-#endif /* _ASM_X86_LIVEPATCH_H */
diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h
index 1307cd689d2a..f52a886d35cf 100644
--- a/arch/x86/include/asm/pci_x86.h
+++ b/arch/x86/include/asm/pci_x86.h
@@ -42,6 +42,8 @@ do { \
#define PCI_ROOT_NO_CRS 0x100000
#define PCI_NOASSIGN_BARS 0x200000
#define PCI_BIG_ROOT_WINDOW 0x400000
+#define PCI_USE_E820 0x800000
+#define PCI_NO_E820 0x1000000
extern unsigned int pci_probe;
extern unsigned long pirq_table_addr;
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 91d0f93a00c7..356308c73951 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -559,7 +559,7 @@ static __always_inline void native_swapgs(void)
#endif
}
-static inline unsigned long current_top_of_stack(void)
+static __always_inline unsigned long current_top_of_stack(void)
{
/*
* We can't read directly from tss.sp0: sp0 on x86_32 is special in
@@ -569,7 +569,7 @@ static inline unsigned long current_top_of_stack(void)
return this_cpu_read_stable(cpu_current_top_of_stack);
}
-static inline bool on_thread_stack(void)
+static __always_inline bool on_thread_stack(void)
{
return (unsigned long)(current_top_of_stack() -
current_stack_pointer) < THREAD_SIZE;
diff --git a/arch/x86/include/asm/set_memory.h b/arch/x86/include/asm/set_memory.h
index 78ca53512486..b45c4d27fd46 100644
--- a/arch/x86/include/asm/set_memory.h
+++ b/arch/x86/include/asm/set_memory.h
@@ -86,56 +86,4 @@ bool kernel_page_present(struct page *page);
extern int kernel_set_to_readonly;
-#ifdef CONFIG_X86_64
-/*
- * Prevent speculative access to the page by either unmapping
- * it (if we do not require access to any part of the page) or
- * marking it uncacheable (if we want to try to retrieve data
- * from non-poisoned lines in the page).
- */
-static inline int set_mce_nospec(unsigned long pfn, bool unmap)
-{
- unsigned long decoy_addr;
- int rc;
-
- /* SGX pages are not in the 1:1 map */
- if (arch_is_platform_page(pfn << PAGE_SHIFT))
- return 0;
- /*
- * We would like to just call:
- * set_memory_XX((unsigned long)pfn_to_kaddr(pfn), 1);
- * but doing that would radically increase the odds of a
- * speculative access to the poison page because we'd have
- * the virtual address of the kernel 1:1 mapping sitting
- * around in registers.
- * Instead we get tricky. We create a non-canonical address
- * that looks just like the one we want, but has bit 63 flipped.
- * This relies on set_memory_XX() properly sanitizing any __pa()
- * results with __PHYSICAL_MASK or PTE_PFN_MASK.
- */
- decoy_addr = (pfn << PAGE_SHIFT) + (PAGE_OFFSET ^ BIT(63));
-
- if (unmap)
- rc = set_memory_np(decoy_addr, 1);
- else
- rc = set_memory_uc(decoy_addr, 1);
- if (rc)
- pr_warn("Could not invalidate pfn=0x%lx from 1:1 map\n", pfn);
- return rc;
-}
-#define set_mce_nospec set_mce_nospec
-
-/* Restore full speculative operation to the pfn. */
-static inline int clear_mce_nospec(unsigned long pfn)
-{
- return set_memory_wb((unsigned long) pfn_to_kaddr(pfn), 1);
-}
-#define clear_mce_nospec clear_mce_nospec
-#else
-/*
- * Few people would run a 32-bit kernel on a machine that supports
- * recoverable errors because they have too much memory to boot 32-bit.
- */
-#endif
-
#endif /* _ASM_X86_SET_MEMORY_H */
diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
index b5f0d2ff47e4..c08eb0fdd11f 100644
--- a/arch/x86/include/asm/switch_to.h
+++ b/arch/x86/include/asm/switch_to.h
@@ -78,13 +78,13 @@ static inline void update_task_stack(struct task_struct *task)
}
static inline void kthread_frame_init(struct inactive_task_frame *frame,
- unsigned long fun, unsigned long arg)
+ int (*fun)(void *), void *arg)
{
- frame->bx = fun;
+ frame->bx = (unsigned long)fun;
#ifdef CONFIG_X86_32
- frame->di = arg;
+ frame->di = (unsigned long)arg;
#else
- frame->r12 = arg;
+ frame->r12 = (unsigned long)arg;
#endif
}
diff --git a/arch/x86/include/asm/unistd.h b/arch/x86/include/asm/unistd.h
index 80e9d5206a71..761173ccc33c 100644
--- a/arch/x86/include/asm/unistd.h
+++ b/arch/x86/include/asm/unistd.h
@@ -22,6 +22,7 @@
# include <asm/unistd_32_ia32.h>
# define __ARCH_WANT_SYS_TIME
# define __ARCH_WANT_SYS_UTIME
+# define __ARCH_WANT_COMPAT_STAT
# define __ARCH_WANT_COMPAT_SYS_PREADV64
# define __ARCH_WANT_COMPAT_SYS_PWRITEV64
# define __ARCH_WANT_COMPAT_SYS_PREADV64V2
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index 1fc67df50014..fa9ec20783fa 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -347,9 +347,6 @@ unsigned long arbitrary_virt_to_mfn(void *vaddr);
void make_lowmem_page_readonly(void *vaddr);
void make_lowmem_page_readwrite(void *vaddr);
-#define xen_remap(cookie, size) ioremap((cookie), (size))
-#define xen_unmap(cookie) iounmap((cookie))
-
static inline bool xen_arch_need_swiotlb(struct device *dev,
phys_addr_t phys,
dma_addr_t dev_addr)
diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
index d775fcd74e98..2c8ec5c71712 100644
--- a/arch/x86/kernel/cpu/mce/core.c
+++ b/arch/x86/kernel/cpu/mce/core.c
@@ -581,7 +581,7 @@ static int uc_decode_notifier(struct notifier_block *nb, unsigned long val,
pfn = mce->addr >> PAGE_SHIFT;
if (!memory_failure(pfn, 0)) {
- set_mce_nospec(pfn, whole_page(mce));
+ set_mce_nospec(pfn);
mce->kflags |= MCE_HANDLED_UC;
}
@@ -1318,7 +1318,7 @@ static void kill_me_maybe(struct callback_head *cb)
ret = memory_failure(p->mce_addr >> PAGE_SHIFT, flags);
if (!ret) {
- set_mce_nospec(p->mce_addr >> PAGE_SHIFT, p->mce_whole_page);
+ set_mce_nospec(p->mce_addr >> PAGE_SHIFT);
sync_core();
return;
}
@@ -1344,7 +1344,7 @@ static void kill_me_never(struct callback_head *cb)
p->mce_count = 0;
pr_err("Kernel accessed poison in user space at %llx\n", p->mce_addr);
if (!memory_failure(p->mce_addr >> PAGE_SHIFT, 0))
- set_mce_nospec(p->mce_addr >> PAGE_SHIFT, p->mce_whole_page);
+ set_mce_nospec(p->mce_addr >> PAGE_SHIFT);
}
static void queue_task_work(struct mce *m, char *msg, void (*func)(struct callback_head *))
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index 5b8f2c357160..831613959a92 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -457,6 +457,8 @@ static void __init ms_hyperv_init_platform(void)
*/
if (!(ms_hyperv.features & HV_ACCESS_TSC_INVARIANT))
mark_tsc_unstable("running on Hyper-V");
+
+ hardlockup_detector_disable();
}
static bool __init ms_hyperv_x2apic_available(void)
diff --git a/arch/x86/kernel/crash_dump_32.c b/arch/x86/kernel/crash_dump_32.c
index 5fcac46aaf6b..5f4ae5476e19 100644
--- a/arch/x86/kernel/crash_dump_32.c
+++ b/arch/x86/kernel/crash_dump_32.c
@@ -10,8 +10,7 @@
#include <linux/errno.h>
#include <linux/highmem.h>
#include <linux/crash_dump.h>
-
-#include <linux/uaccess.h>
+#include <linux/uio.h>
static inline bool is_crashed_pfn_valid(unsigned long pfn)
{
@@ -29,21 +28,8 @@ static inline bool is_crashed_pfn_valid(unsigned long pfn)
#endif
}
-/**
- * copy_oldmem_page - copy one page from "oldmem"
- * @pfn: page frame number to be copied
- * @buf: target memory address for the copy; this can be in kernel address
- * space or user address space (see @userbuf)
- * @csize: number of bytes to copy
- * @offset: offset in bytes into the page (based on pfn) to begin the copy
- * @userbuf: if set, @buf is in user address space, use copy_to_user(),
- * otherwise @buf is in kernel address space, use memcpy().
- *
- * Copy a page from "oldmem". For this page, there might be no pte mapped
- * in the current kernel.
- */
-ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize,
- unsigned long offset, int userbuf)
+ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn, size_t csize,
+ unsigned long offset)
{
void *vaddr;
@@ -54,14 +40,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize,
return -EFAULT;
vaddr = kmap_local_pfn(pfn);
-
- if (!userbuf) {
- memcpy(buf, vaddr + offset, csize);
- } else {
- if (copy_to_user(buf, vaddr + offset, csize))
- csize = -EFAULT;
- }
-
+ csize = copy_to_iter(vaddr + offset, csize, iter);
kunmap_local(vaddr);
return csize;
diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c
index 97529552dd24..e75bc2f217ff 100644
--- a/arch/x86/kernel/crash_dump_64.c
+++ b/arch/x86/kernel/crash_dump_64.c
@@ -8,12 +8,12 @@
#include <linux/errno.h>
#include <linux/crash_dump.h>
-#include <linux/uaccess.h>
+#include <linux/uio.h>
#include <linux/io.h>
#include <linux/cc_platform.h>
-static ssize_t __copy_oldmem_page(unsigned long pfn, char *buf, size_t csize,
- unsigned long offset, int userbuf,
+static ssize_t __copy_oldmem_page(struct iov_iter *iter, unsigned long pfn,
+ size_t csize, unsigned long offset,
bool encrypted)
{
void *vaddr;
@@ -29,50 +29,36 @@ static ssize_t __copy_oldmem_page(unsigned long pfn, char *buf, size_t csize,
if (!vaddr)
return -ENOMEM;
- if (userbuf) {
- if (copy_to_user((void __user *)buf, vaddr + offset, csize)) {
- iounmap((void __iomem *)vaddr);
- return -EFAULT;
- }
- } else
- memcpy(buf, vaddr + offset, csize);
+ csize = copy_to_iter(vaddr + offset, csize, iter);
iounmap((void __iomem *)vaddr);
return csize;
}
-/**
- * copy_oldmem_page - copy one page of memory
- * @pfn: page frame number to be copied
- * @buf: target memory address for the copy; this can be in kernel address
- * space or user address space (see @userbuf)
- * @csize: number of bytes to copy
- * @offset: offset in bytes into the page (based on pfn) to begin the copy
- * @userbuf: if set, @buf is in user address space, use copy_to_user(),
- * otherwise @buf is in kernel address space, use memcpy().
- *
- * Copy a page from the old kernel's memory. For this page, there is no pte
- * mapped in the current kernel. We stitch up a pte, similar to kmap_atomic.
- */
-ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize,
- unsigned long offset, int userbuf)
+ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn, size_t csize,
+ unsigned long offset)
{
- return __copy_oldmem_page(pfn, buf, csize, offset, userbuf, false);
+ return __copy_oldmem_page(iter, pfn, csize, offset, false);
}
-/**
+/*
* copy_oldmem_page_encrypted - same as copy_oldmem_page() above but ioremap the
* memory with the encryption mask set to accommodate kdump on SME-enabled
* machines.
*/
-ssize_t copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize,
- unsigned long offset, int userbuf)
+ssize_t copy_oldmem_page_encrypted(struct iov_iter *iter, unsigned long pfn,
+ size_t csize, unsigned long offset)
{
- return __copy_oldmem_page(pfn, buf, csize, offset, userbuf, true);
+ return __copy_oldmem_page(iter, pfn, csize, offset, true);
}
ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos)
{
- return read_from_oldmem(buf, count, ppos, 0,
+ struct kvec kvec = { .iov_base = buf, .iov_len = count };
+ struct iov_iter iter;
+
+ iov_iter_kvec(&iter, READ, &kvec, 1, count);
+
+ return read_from_oldmem(&iter, count, ppos,
cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT));
}
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index 0fdc807ae13f..0531d6a06df5 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -556,7 +556,7 @@ static inline void fpu_inherit_perms(struct fpu *dst_fpu)
}
/* Clone current's FPU state on fork */
-int fpu_clone(struct task_struct *dst, unsigned long clone_flags)
+int fpu_clone(struct task_struct *dst, unsigned long clone_flags, bool minimal)
{
struct fpu *src_fpu = &current->thread.fpu;
struct fpu *dst_fpu = &dst->thread.fpu;
@@ -579,7 +579,7 @@ int fpu_clone(struct task_struct *dst, unsigned long clone_flags)
* No FPU state inheritance for kernel threads and IO
* worker threads.
*/
- if (dst->flags & (PF_KTHREAD | PF_IO_WORKER)) {
+ if (minimal) {
/* Clear out the minimal state */
memcpy(&dst_fpu->fpstate->regs, &init_fpstate.regs,
init_fpstate_copy_size());
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index b09d73c2ba89..5b4efc927d80 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -37,7 +37,7 @@
static int ftrace_poke_late = 0;
-int ftrace_arch_code_modify_prepare(void)
+void ftrace_arch_code_modify_prepare(void)
__acquires(&text_mutex)
{
/*
@@ -47,10 +47,9 @@ int ftrace_arch_code_modify_prepare(void)
*/
mutex_lock(&text_mutex);
ftrace_poke_late = 1;
- return 0;
}
-int ftrace_arch_code_modify_post_process(void)
+void ftrace_arch_code_modify_post_process(void)
__releases(&text_mutex)
{
/*
@@ -61,7 +60,6 @@ int ftrace_arch_code_modify_post_process(void)
text_poke_finish();
ftrace_poke_late = 0;
mutex_unlock(&text_mutex);
- return 0;
}
static const char *ftrace_nop_replace(void)
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 58fb48d3004f..9b2772b7e1f3 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -131,9 +131,11 @@ static int set_new_tls(struct task_struct *p, unsigned long tls)
return do_set_thread_area_64(p, ARCH_SET_FS, tls);
}
-int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg,
- struct task_struct *p, unsigned long tls)
+int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
{
+ unsigned long clone_flags = args->flags;
+ unsigned long sp = args->stack;
+ unsigned long tls = args->tls;
struct inactive_task_frame *frame;
struct fork_frame *fork_frame;
struct pt_regs *childregs;
@@ -171,13 +173,13 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg,
frame->flags = X86_EFLAGS_FIXED;
#endif
- fpu_clone(p, clone_flags);
+ fpu_clone(p, clone_flags, args->fn);
/* Kernel thread ? */
if (unlikely(p->flags & PF_KTHREAD)) {
p->thread.pkru = pkru_get_init_value();
memset(childregs, 0, sizeof(struct pt_regs));
- kthread_frame_init(frame, sp, arg);
+ kthread_frame_init(frame, args->fn, args->fn_arg);
return 0;
}
@@ -193,10 +195,10 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg,
if (sp)
childregs->sp = sp;
- if (unlikely(p->flags & PF_IO_WORKER)) {
+ if (unlikely(args->fn)) {
/*
- * An IO thread is a user space thread, but it doesn't
- * return to ret_after_fork().
+ * A user space thread, but it doesn't return to
+ * ret_after_fork().
*
* In order to indicate that to tools like gdb,
* we reset the stack and instruction pointers.
@@ -206,7 +208,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg,
*/
childregs->sp = 0;
childregs->ip = 0;
- kthread_frame_init(frame, sp, arg);
+ kthread_frame_init(frame, args->fn, args->fn_arg);
return 0;
}
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index fa700b46588e..c3636ea4aa71 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -739,10 +739,10 @@ static void native_machine_halt(void)
static void native_machine_power_off(void)
{
- if (pm_power_off) {
+ if (kernel_can_power_off()) {
if (!reboot_force)
machine_shutdown();
- pm_power_off();
+ do_kernel_power_off();
}
/* A fallback in case there is no PM info available */
tboot_shutdown(TB_SHUTDOWN_HALT);
diff --git a/arch/x86/kernel/resource.c b/arch/x86/kernel/resource.c
index 9b9fb7882c20..db2b350a37b7 100644
--- a/arch/x86/kernel/resource.c
+++ b/arch/x86/kernel/resource.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
+#include <linux/dev_printk.h>
#include <linux/ioport.h>
#include <asm/e820/api.h>
@@ -23,16 +24,27 @@ static void resource_clip(struct resource *res, resource_size_t start,
res->start = end + 1;
}
-static void remove_e820_regions(struct resource *avail)
+void remove_e820_regions(struct device *dev, struct resource *avail)
{
int i;
struct e820_entry *entry;
+ u64 e820_start, e820_end;
+ struct resource orig = *avail;
+
+ if (!(avail->flags & IORESOURCE_MEM))
+ return;
for (i = 0; i < e820_table->nr_entries; i++) {
entry = &e820_table->entries[i];
-
- resource_clip(avail, entry->addr,
- entry->addr + entry->size - 1);
+ e820_start = entry->addr;
+ e820_end = entry->addr + entry->size - 1;
+
+ resource_clip(avail, e820_start, e820_end);
+ if (orig.start != avail->start || orig.end != avail->end) {
+ dev_info(dev, "clipped %pR to %pR for e820 entry [mem %#010Lx-%#010Lx]\n",
+ &orig, avail, e820_start, e820_end);
+ orig = *avail;
+ }
}
}
@@ -43,9 +55,6 @@ void arch_remove_reservations(struct resource *avail)
* the low 1MB unconditionally, as this area is needed for some ISA
* cards requiring a memory range, e.g. the i82365 PCMCIA controller.
*/
- if (avail->flags & IORESOURCE_MEM) {
+ if (avail->flags & IORESOURCE_MEM)
resource_clip(avail, BIOS_ROM_BASE, BIOS_ROM_END);
-
- remove_e820_regions(avail);
- }
}
diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
index 0f3c307b37b3..8e2b2552b5ee 100644
--- a/arch/x86/kernel/step.c
+++ b/arch/x86/kernel/step.c
@@ -180,8 +180,7 @@ void set_task_blockstep(struct task_struct *task, bool on)
*
* NOTE: this means that set/clear TIF_BLOCKSTEP is only safe if
* task is current or it can't be running, otherwise we can race
- * with __switch_to_xtra(). We rely on ptrace_freeze_traced() but
- * PTRACE_KILL is not safe.
+ * with __switch_to_xtra(). We rely on ptrace_freeze_traced().
*/
local_irq_disable();
debugctl = get_debugctlmsr();
diff --git a/arch/x86/kernel/tracepoint.c b/arch/x86/kernel/tracepoint.c
index fcfc077afe2d..03ae1caaa878 100644
--- a/arch/x86/kernel/tracepoint.c
+++ b/arch/x86/kernel/tracepoint.c
@@ -1,17 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Code for supporting irq vector tracepoints.
- *
* Copyright (C) 2013 Seiji Aguchi <seiji.aguchi@hds.com>
- *
*/
#include <linux/jump_label.h>
#include <linux/atomic.h>
-#include <asm/hw_irq.h>
-#include <asm/desc.h>
#include <asm/trace/exceptions.h>
-#include <asm/trace/irq_vectors.h>
DEFINE_STATIC_KEY_FALSE(trace_pagefault_key);
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index a0702b6be3e8..e2e95a6fccfd 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -90,7 +90,7 @@ static void synic_update_vector(struct kvm_vcpu_hv_synic *synic,
{
struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
- int auto_eoi_old, auto_eoi_new;
+ bool auto_eoi_old, auto_eoi_new;
if (vector < HV_SYNIC_FIRST_VALID_VECTOR)
return;
@@ -100,16 +100,16 @@ static void synic_update_vector(struct kvm_vcpu_hv_synic *synic,
else
__clear_bit(vector, synic->vec_bitmap);
- auto_eoi_old = bitmap_weight(synic->auto_eoi_bitmap, 256);
+ auto_eoi_old = !bitmap_empty(synic->auto_eoi_bitmap, 256);
if (synic_has_vector_auto_eoi(synic, vector))
__set_bit(vector, synic->auto_eoi_bitmap);
else
__clear_bit(vector, synic->auto_eoi_bitmap);
- auto_eoi_new = bitmap_weight(synic->auto_eoi_bitmap, 256);
+ auto_eoi_new = !bitmap_empty(synic->auto_eoi_bitmap, 256);
- if (!!auto_eoi_old == !!auto_eoi_new)
+ if (auto_eoi_old == auto_eoi_new)
return;
if (!enable_apicv)
@@ -1855,7 +1855,7 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
all_cpus = flush_ex.hv_vp_set.format !=
HV_GENERIC_SET_SPARSE_4K;
- if (hc->var_cnt != bitmap_weight((unsigned long *)&valid_bank_mask, 64))
+ if (hc->var_cnt != hweight64(valid_bank_mask))
return HV_STATUS_INVALID_HYPERCALL_INPUT;
if (all_cpus)
@@ -1956,7 +1956,7 @@ static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
valid_bank_mask = send_ipi_ex.vp_set.valid_bank_mask;
all_cpus = send_ipi_ex.vp_set.format == HV_GENERIC_SET_ALL;
- if (hc->var_cnt != bitmap_weight((unsigned long *)&valid_bank_mask, 64))
+ if (hc->var_cnt != hweight64(valid_bank_mask))
return HV_STATUS_INVALID_HYPERCALL_INPUT;
if (all_cpus)
diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
index 189344924a2b..145f9a0bde29 100644
--- a/arch/x86/lib/csum-wrappers_64.c
+++ b/arch/x86/lib/csum-wrappers_64.c
@@ -32,7 +32,6 @@ csum_and_copy_from_user(const void __user *src, void *dst, int len)
user_access_end();
return sum;
}
-EXPORT_SYMBOL(csum_and_copy_from_user);
/**
* csum_and_copy_to_user - Copy and checksum to user space.
@@ -57,7 +56,6 @@ csum_and_copy_to_user(const void *src, void __user *dst, int len)
user_access_end();
return sum;
}
-EXPORT_SYMBOL(csum_and_copy_to_user);
/**
* csum_partial_copy_nocheck - Copy and checksum.
diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
index 0656db33574d..1abd5438f126 100644
--- a/arch/x86/mm/pat/set_memory.c
+++ b/arch/x86/mm/pat/set_memory.c
@@ -19,6 +19,7 @@
#include <linux/vmstat.h>
#include <linux/kernel.h>
#include <linux/cc_platform.h>
+#include <linux/set_memory.h>
#include <asm/e820/api.h>
#include <asm/processor.h>
@@ -29,7 +30,6 @@
#include <asm/pgalloc.h>
#include <asm/proto.h>
#include <asm/memtype.h>
-#include <asm/set_memory.h>
#include <asm/hyperv-tlfs.h>
#include <asm/mshyperv.h>
@@ -1805,7 +1805,7 @@ static inline int cpa_clear_pages_array(struct page **pages, int numpages,
}
/*
- * _set_memory_prot is an internal helper for callers that have been passed
+ * __set_memory_prot is an internal helper for callers that have been passed
* a pgprot_t value from upper layers and a reservation has already been taken.
* If you want to set the pgprot to a specific page protocol, use the
* set_memory_xx() functions.
@@ -1914,6 +1914,51 @@ int set_memory_wb(unsigned long addr, int numpages)
}
EXPORT_SYMBOL(set_memory_wb);
+/* Prevent speculative access to a page by marking it not-present */
+#ifdef CONFIG_X86_64
+int set_mce_nospec(unsigned long pfn)
+{
+ unsigned long decoy_addr;
+ int rc;
+
+ /* SGX pages are not in the 1:1 map */
+ if (arch_is_platform_page(pfn << PAGE_SHIFT))
+ return 0;
+ /*
+ * We would like to just call:
+ * set_memory_XX((unsigned long)pfn_to_kaddr(pfn), 1);
+ * but doing that would radically increase the odds of a
+ * speculative access to the poison page because we'd have
+ * the virtual address of the kernel 1:1 mapping sitting
+ * around in registers.
+ * Instead we get tricky. We create a non-canonical address
+ * that looks just like the one we want, but has bit 63 flipped.
+ * This relies on set_memory_XX() properly sanitizing any __pa()
+ * results with __PHYSICAL_MASK or PTE_PFN_MASK.
+ */
+ decoy_addr = (pfn << PAGE_SHIFT) + (PAGE_OFFSET ^ BIT(63));
+
+ rc = set_memory_np(decoy_addr, 1);
+ if (rc)
+ pr_warn("Could not invalidate pfn=0x%lx from 1:1 map\n", pfn);
+ return rc;
+}
+
+static int set_memory_present(unsigned long *addr, int numpages)
+{
+ return change_page_attr_set(addr, numpages, __pgprot(_PAGE_PRESENT), 0);
+}
+
+/* Restore full speculative operation to the pfn. */
+int clear_mce_nospec(unsigned long pfn)
+{
+ unsigned long addr = (unsigned long) pfn_to_kaddr(pfn);
+
+ return set_memory_present(&addr, 1);
+}
+EXPORT_SYMBOL_GPL(clear_mce_nospec);
+#endif /* CONFIG_X86_64 */
+
int set_memory_x(unsigned long addr, int numpages)
{
if (!(__supported_pte_mask & _PAGE_NX))
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
index 052f1d78a562..a4f43054bc79 100644
--- a/arch/x86/pci/acpi.c
+++ b/arch/x86/pci/acpi.c
@@ -8,6 +8,7 @@
#include <linux/pci-acpi.h>
#include <asm/numa.h>
#include <asm/pci_x86.h>
+#include <asm/e820/api.h>
struct pci_root_info {
struct acpi_pci_root_info common;
@@ -19,6 +20,7 @@ struct pci_root_info {
#endif
};
+static bool pci_use_e820 = true;
static bool pci_use_crs = true;
static bool pci_ignore_seg;
@@ -41,6 +43,14 @@ static int __init set_ignore_seg(const struct dmi_system_id *id)
return 0;
}
+static int __init set_no_e820(const struct dmi_system_id *id)
+{
+ printk(KERN_INFO "PCI: %s detected: not clipping E820 regions from _CRS\n",
+ id->ident);
+ pci_use_e820 = false;
+ return 0;
+}
+
static const struct dmi_system_id pci_crs_quirks[] __initconst = {
/* http://bugzilla.kernel.org/show_bug.cgi?id=14183 */
{
@@ -135,6 +145,51 @@ static const struct dmi_system_id pci_crs_quirks[] __initconst = {
DMI_MATCH(DMI_PRODUCT_NAME, "HP xw9300 Workstation"),
},
},
+
+ /*
+ * Many Lenovo models with "IIL" in their DMI_PRODUCT_VERSION have
+ * an E820 reserved region that covers the entire 32-bit host
+ * bridge memory window from _CRS. Using the E820 region to clip
+ * _CRS means no space is available for hot-added or uninitialized
+ * PCI devices. This typically breaks I2C controllers for touchpads
+ * and hot-added Thunderbolt devices. See the commit log for
+ * models known to require this quirk and related bug reports.
+ */
+ {
+ .callback = set_no_e820,
+ .ident = "Lenovo *IIL* product version",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "IIL"),
+ },
+ },
+
+ /*
+ * The Acer Spin 5 (SP513-54N) has the same E820 reservation covering
+ * the entire _CRS 32-bit window issue as the Lenovo *IIL* models.
+ * See https://bugs.launchpad.net/bugs/1884232
+ */
+ {
+ .callback = set_no_e820,
+ .ident = "Acer Spin 5 (SP513-54N)",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Spin SP513-54N"),
+ },
+ },
+
+ /*
+ * Clevo X170KM-G barebones have the same E820 reservation covering
+ * the entire _CRS 32-bit window issue as the Lenovo *IIL* models.
+ * See https://bugzilla.kernel.org/show_bug.cgi?id=214259
+ */
+ {
+ .callback = set_no_e820,
+ .ident = "Clevo X170KM-G Barebone",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "X170KM-G"),
+ },
+ },
{}
};
@@ -145,6 +200,27 @@ void __init pci_acpi_crs_quirks(void)
if (year >= 0 && year < 2008 && iomem_resource.end <= 0xffffffff)
pci_use_crs = false;
+ /*
+ * Some firmware includes unusable space (host bridge registers,
+ * hidden PCI device BARs, etc) in PCI host bridge _CRS. This is a
+ * firmware defect, and 4dc2287c1805 ("x86: avoid E820 regions when
+ * allocating address space") has clipped out the unusable space in
+ * the past.
+ *
+ * But other firmware supplies E820 reserved regions that cover
+ * entire _CRS windows, so clipping throws away the entire window,
+ * leaving none for hot-added or uninitialized devices. These E820
+ * entries are probably *not* a firmware defect, so disable the
+ * clipping by default for post-2022 machines.
+ *
+ * We already have quirks to disable clipping for pre-2023
+ * machines, and we'll likely need quirks to *enable* clipping for
+ * post-2022 machines that incorrectly include unusable space in
+ * _CRS.
+ */
+ if (year >= 2023)
+ pci_use_e820 = false;
+
dmi_check_system(pci_crs_quirks);
/*
@@ -160,6 +236,17 @@ void __init pci_acpi_crs_quirks(void)
"if necessary, use \"pci=%s\" and report a bug\n",
pci_use_crs ? "Using" : "Ignoring",
pci_use_crs ? "nocrs" : "use_crs");
+
+ /* "pci=use_e820"/"pci=no_e820" on the kernel cmdline takes precedence */
+ if (pci_probe & PCI_NO_E820)
+ pci_use_e820 = false;
+ else if (pci_probe & PCI_USE_E820)
+ pci_use_e820 = true;
+
+ printk(KERN_INFO "PCI: %s E820 reservations for host bridge windows\n",
+ pci_use_e820 ? "Using" : "Ignoring");
+ if (pci_probe & (PCI_NO_E820 | PCI_USE_E820))
+ printk(KERN_INFO "PCI: Please notify linux-pci@vger.kernel.org so future kernels can this automatically\n");
}
#ifdef CONFIG_PCI_MMCONFIG
@@ -299,6 +386,12 @@ static int pci_acpi_root_prepare_resources(struct acpi_pci_root_info *ci)
int status;
status = acpi_pci_probe_root_resources(ci);
+
+ if (pci_use_e820) {
+ resource_list_for_each_entry(entry, &ci->resources)
+ remove_e820_regions(&device->dev, entry->res);
+ }
+
if (pci_use_crs) {
resource_list_for_each_entry_safe(entry, tmp, &ci->resources)
if (resource_is_pcicfg_ioport(entry->res))
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index 9e1e6b8d8876..ddb798603201 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -595,6 +595,14 @@ char *__init pcibios_setup(char *str)
} else if (!strcmp(str, "nocrs")) {
pci_probe |= PCI_ROOT_NO_CRS;
return NULL;
+ } else if (!strcmp(str, "use_e820")) {
+ pci_probe |= PCI_USE_E820;
+ add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
+ return NULL;
+ } else if (!strcmp(str, "no_e820")) {
+ pci_probe |= PCI_NO_E820;
+ add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
+ return NULL;
#ifdef CONFIG_PHYS_ADDR_T_64BIT
} else if (!strcmp(str, "big_root_window")) {
pci_probe |= PCI_BIG_ROOT_WINDOW;
diff --git a/arch/x86/um/ldt.c b/arch/x86/um/ldt.c
index 3ee234b6234d..255a44dd415a 100644
--- a/arch/x86/um/ldt.c
+++ b/arch/x86/um/ldt.c
@@ -23,9 +23,11 @@ static long write_ldt_entry(struct mm_id *mm_idp, int func,
{
long res;
void *stub_addr;
+
+ BUILD_BUG_ON(sizeof(*desc) % sizeof(long));
+
res = syscall_stub_data(mm_idp, (unsigned long *)desc,
- (sizeof(*desc) + sizeof(long) - 1) &
- ~(sizeof(long) - 1),
+ sizeof(*desc) / sizeof(long),
addr, &stub_addr);
if (!res) {
unsigned long args[] = { func,
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index ca85d1409917..f33a4421e7cd 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -30,6 +30,7 @@
#include <linux/pci.h>
#include <linux/gfp.h>
#include <linux/edd.h>
+#include <linux/reboot.h>
#include <xen/xen.h>
#include <xen/events.h>
@@ -1069,8 +1070,7 @@ static void xen_machine_halt(void)
static void xen_machine_power_off(void)
{
- if (pm_power_off)
- pm_power_off();
+ do_kernel_power_off();
xen_reboot(SHUTDOWN_poweroff);
}
diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c
index 7e38292dd07a..68e0e2f06d66 100644
--- a/arch/xtensa/kernel/process.c
+++ b/arch/xtensa/kernel/process.c
@@ -263,10 +263,11 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
* involved. Much simpler to just not copy those live frames across.
*/
-int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn,
- unsigned long thread_fn_arg, struct task_struct *p,
- unsigned long tls)
+int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
{
+ unsigned long clone_flags = args->flags;
+ unsigned long usp_thread_fn = args->stack;
+ unsigned long tls = args->tls;
struct pt_regs *childregs = task_pt_regs(p);
#if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS)
@@ -286,7 +287,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn,
#error Unsupported Xtensa ABI
#endif
- if (!(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
+ if (!args->fn) {
struct pt_regs *regs = current_pt_regs();
unsigned long usp = usp_thread_fn ?
usp_thread_fn : regs->areg[1];
@@ -338,15 +339,15 @@ int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn,
* Window underflow will load registers from the
* spill slots on the stack on return from _switch_to.
*/
- SPILL_SLOT(childregs, 2) = usp_thread_fn;
- SPILL_SLOT(childregs, 3) = thread_fn_arg;
+ SPILL_SLOT(childregs, 2) = (unsigned long)args->fn;
+ SPILL_SLOT(childregs, 3) = (unsigned long)args->fn_arg;
#elif defined(__XTENSA_CALL0_ABI__)
/*
* a12 = thread_fn, a13 = thread_fn arg.
* _switch_to epilogue will load registers from the stack.
*/
- ((unsigned long *)p->thread.sp)[0] = usp_thread_fn;
- ((unsigned long *)p->thread.sp)[1] = thread_fn_arg;
+ ((unsigned long *)p->thread.sp)[0] = (unsigned long)args->fn;
+ ((unsigned long *)p->thread.sp)[1] = (unsigned long)args->fn_arg;
#else
#error Unsupported Xtensa ABI
#endif
diff --git a/arch/xtensa/kernel/ptrace.c b/arch/xtensa/kernel/ptrace.c
index 22cdaa6729d3..f29477162ede 100644
--- a/arch/xtensa/kernel/ptrace.c
+++ b/arch/xtensa/kernel/ptrace.c
@@ -224,12 +224,12 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task)
void user_enable_single_step(struct task_struct *child)
{
- child->ptrace |= PT_SINGLESTEP;
+ set_tsk_thread_flag(child, TIF_SINGLESTEP);
}
void user_disable_single_step(struct task_struct *child)
{
- child->ptrace &= ~PT_SINGLESTEP;
+ clear_tsk_thread_flag(child, TIF_SINGLESTEP);
}
/*
diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c
index c9ffd42db873..876d5df157ed 100644
--- a/arch/xtensa/kernel/signal.c
+++ b/arch/xtensa/kernel/signal.c
@@ -472,7 +472,7 @@ static void do_signal(struct pt_regs *regs)
/* Set up the stack frame */
ret = setup_frame(&ksig, sigmask_to_save(), regs);
signal_setup_done(ret, &ksig, 0);
- if (current->ptrace & PT_SINGLESTEP)
+ if (test_thread_flag(TIF_SINGLESTEP))
task_pt_regs(current)->icountlevel = 1;
return;
@@ -498,7 +498,7 @@ static void do_signal(struct pt_regs *regs)
/* If there's no signal to deliver, we just restore the saved mask. */
restore_saved_sigmask();
- if (current->ptrace & PT_SINGLESTEP)
+ if (test_thread_flag(TIF_SINGLESTEP))
task_pt_regs(current)->icountlevel = 1;
return;
}
diff --git a/block/bio.c b/block/bio.c
index a3893d80dccc..f92d0223247b 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -722,6 +722,7 @@ static void bio_alloc_cache_destroy(struct bio_set *bs)
bio_alloc_cache_prune(cache, -1U);
}
free_percpu(bs->cache);
+ bs->cache = NULL;
}
/**
@@ -1366,10 +1367,12 @@ void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
struct bio_vec src_bv = bio_iter_iovec(src, *src_iter);
struct bio_vec dst_bv = bio_iter_iovec(dst, *dst_iter);
unsigned int bytes = min(src_bv.bv_len, dst_bv.bv_len);
- void *src_buf;
+ void *src_buf = bvec_kmap_local(&src_bv);
+ void *dst_buf = bvec_kmap_local(&dst_bv);
- src_buf = bvec_kmap_local(&src_bv);
- memcpy_to_bvec(&dst_bv, src_buf);
+ memcpy(dst_buf, src_buf, bytes);
+
+ kunmap_local(dst_buf);
kunmap_local(src_buf);
bio_advance_iter_single(src, src_iter, bytes);
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 40161a3f68d0..764e740b0c0f 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -1974,12 +1974,8 @@ EXPORT_SYMBOL_GPL(bio_associate_blkg);
*/
void bio_clone_blkg_association(struct bio *dst, struct bio *src)
{
- if (src->bi_blkg) {
- if (dst->bi_blkg)
- blkg_put(dst->bi_blkg);
- blkg_get(src->bi_blkg);
- dst->bi_blkg = src->bi_blkg;
- }
+ if (src->bi_blkg)
+ bio_associate_blkg_from_css(dst, bio_blkcg_css(src));
}
EXPORT_SYMBOL_GPL(bio_clone_blkg_association);
diff --git a/block/blk-core.c b/block/blk-core.c
index 80fa73c419a9..06ff5bbfe8f6 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -939,7 +939,7 @@ int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags)
blk_flush_plug(current->plug, false);
- if (blk_queue_enter(q, BLK_MQ_REQ_NOWAIT))
+ if (bio_queue_enter(bio))
return 0;
if (queue_is_mq(q)) {
ret = blk_mq_poll(q, cookie, iob, flags);
diff --git a/block/blk-ia-ranges.c b/block/blk-ia-ranges.c
index 18c68d8b9138..56ed48d2954e 100644
--- a/block/blk-ia-ranges.c
+++ b/block/blk-ia-ranges.c
@@ -54,13 +54,8 @@ static ssize_t blk_ia_range_sysfs_show(struct kobject *kobj,
container_of(attr, struct blk_ia_range_sysfs_entry, attr);
struct blk_independent_access_range *iar =
container_of(kobj, struct blk_independent_access_range, kobj);
- ssize_t ret;
- mutex_lock(&iar->queue->sysfs_lock);
- ret = entry->show(iar, buf);
- mutex_unlock(&iar->queue->sysfs_lock);
-
- return ret;
+ return entry->show(iar, buf);
}
static const struct sysfs_ops blk_ia_range_sysfs_ops = {
diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c
index 5b676c7cf2b6..9568bf8dfe82 100644
--- a/block/blk-iolatency.c
+++ b/block/blk-iolatency.c
@@ -87,7 +87,17 @@ struct iolatency_grp;
struct blk_iolatency {
struct rq_qos rqos;
struct timer_list timer;
- atomic_t enabled;
+
+ /*
+ * ->enabled is the master enable switch gating the throttling logic and
+ * inflight tracking. The number of cgroups which have iolat enabled is
+ * tracked in ->enable_cnt, and ->enable is flipped on/off accordingly
+ * from ->enable_work with the request_queue frozen. For details, See
+ * blkiolatency_enable_work_fn().
+ */
+ bool enabled;
+ atomic_t enable_cnt;
+ struct work_struct enable_work;
};
static inline struct blk_iolatency *BLKIOLATENCY(struct rq_qos *rqos)
@@ -95,11 +105,6 @@ static inline struct blk_iolatency *BLKIOLATENCY(struct rq_qos *rqos)
return container_of(rqos, struct blk_iolatency, rqos);
}
-static inline bool blk_iolatency_enabled(struct blk_iolatency *blkiolat)
-{
- return atomic_read(&blkiolat->enabled) > 0;
-}
-
struct child_latency_info {
spinlock_t lock;
@@ -464,7 +469,7 @@ static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio)
struct blkcg_gq *blkg = bio->bi_blkg;
bool issue_as_root = bio_issue_as_root_blkg(bio);
- if (!blk_iolatency_enabled(blkiolat))
+ if (!blkiolat->enabled)
return;
while (blkg && blkg->parent) {
@@ -594,7 +599,6 @@ static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)
u64 window_start;
u64 now;
bool issue_as_root = bio_issue_as_root_blkg(bio);
- bool enabled = false;
int inflight = 0;
blkg = bio->bi_blkg;
@@ -605,8 +609,7 @@ static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)
if (!iolat)
return;
- enabled = blk_iolatency_enabled(iolat->blkiolat);
- if (!enabled)
+ if (!iolat->blkiolat->enabled)
return;
now = ktime_to_ns(ktime_get());
@@ -645,6 +648,7 @@ static void blkcg_iolatency_exit(struct rq_qos *rqos)
struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
del_timer_sync(&blkiolat->timer);
+ flush_work(&blkiolat->enable_work);
blkcg_deactivate_policy(rqos->q, &blkcg_policy_iolatency);
kfree(blkiolat);
}
@@ -716,6 +720,44 @@ next:
rcu_read_unlock();
}
+/**
+ * blkiolatency_enable_work_fn - Enable or disable iolatency on the device
+ * @work: enable_work of the blk_iolatency of interest
+ *
+ * iolatency needs to keep track of the number of in-flight IOs per cgroup. This
+ * is relatively expensive as it involves walking up the hierarchy twice for
+ * every IO. Thus, if iolatency is not enabled in any cgroup for the device, we
+ * want to disable the in-flight tracking.
+ *
+ * We have to make sure that the counting is balanced - we don't want to leak
+ * the in-flight counts by disabling accounting in the completion path while IOs
+ * are in flight. This is achieved by ensuring that no IO is in flight by
+ * freezing the queue while flipping ->enabled. As this requires a sleepable
+ * context, ->enabled flipping is punted to this work function.
+ */
+static void blkiolatency_enable_work_fn(struct work_struct *work)
+{
+ struct blk_iolatency *blkiolat = container_of(work, struct blk_iolatency,
+ enable_work);
+ bool enabled;
+
+ /*
+ * There can only be one instance of this function running for @blkiolat
+ * and it's guaranteed to be executed at least once after the latest
+ * ->enabled_cnt modification. Acting on the latest ->enable_cnt is
+ * sufficient.
+ *
+ * Also, we know @blkiolat is safe to access as ->enable_work is flushed
+ * in blkcg_iolatency_exit().
+ */
+ enabled = atomic_read(&blkiolat->enable_cnt);
+ if (enabled != blkiolat->enabled) {
+ blk_mq_freeze_queue(blkiolat->rqos.q);
+ blkiolat->enabled = enabled;
+ blk_mq_unfreeze_queue(blkiolat->rqos.q);
+ }
+}
+
int blk_iolatency_init(struct request_queue *q)
{
struct blk_iolatency *blkiolat;
@@ -741,17 +783,15 @@ int blk_iolatency_init(struct request_queue *q)
}
timer_setup(&blkiolat->timer, blkiolatency_timer_fn, 0);
+ INIT_WORK(&blkiolat->enable_work, blkiolatency_enable_work_fn);
return 0;
}
-/*
- * return 1 for enabling iolatency, return -1 for disabling iolatency, otherwise
- * return 0.
- */
-static int iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
+static void iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
{
struct iolatency_grp *iolat = blkg_to_lat(blkg);
+ struct blk_iolatency *blkiolat = iolat->blkiolat;
u64 oldval = iolat->min_lat_nsec;
iolat->min_lat_nsec = val;
@@ -759,13 +799,15 @@ static int iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
iolat->cur_win_nsec = min_t(u64, iolat->cur_win_nsec,
BLKIOLATENCY_MAX_WIN_SIZE);
- if (!oldval && val)
- return 1;
+ if (!oldval && val) {
+ if (atomic_inc_return(&blkiolat->enable_cnt) == 1)
+ schedule_work(&blkiolat->enable_work);
+ }
if (oldval && !val) {
blkcg_clear_delay(blkg);
- return -1;
+ if (atomic_dec_return(&blkiolat->enable_cnt) == 0)
+ schedule_work(&blkiolat->enable_work);
}
- return 0;
}
static void iolatency_clear_scaling(struct blkcg_gq *blkg)
@@ -797,7 +839,6 @@ static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf,
u64 lat_val = 0;
u64 oldval;
int ret;
- int enable = 0;
ret = blkg_conf_prep(blkcg, &blkcg_policy_iolatency, buf, &ctx);
if (ret)
@@ -832,41 +873,12 @@ static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf,
blkg = ctx.blkg;
oldval = iolat->min_lat_nsec;
- enable = iolatency_set_min_lat_nsec(blkg, lat_val);
- if (enable) {
- if (!blk_get_queue(blkg->q)) {
- ret = -ENODEV;
- goto out;
- }
-
- blkg_get(blkg);
- }
-
- if (oldval != iolat->min_lat_nsec) {
+ iolatency_set_min_lat_nsec(blkg, lat_val);
+ if (oldval != iolat->min_lat_nsec)
iolatency_clear_scaling(blkg);
- }
-
ret = 0;
out:
blkg_conf_finish(&ctx);
- if (ret == 0 && enable) {
- struct iolatency_grp *tmp = blkg_to_lat(blkg);
- struct blk_iolatency *blkiolat = tmp->blkiolat;
-
- blk_mq_freeze_queue(blkg->q);
-
- if (enable == 1)
- atomic_inc(&blkiolat->enabled);
- else if (enable == -1)
- atomic_dec(&blkiolat->enabled);
- else
- WARN_ON_ONCE(1);
-
- blk_mq_unfreeze_queue(blkg->q);
-
- blkg_put(blkg);
- blk_put_queue(blkg->q);
- }
return ret ?: nbytes;
}
@@ -1005,14 +1017,8 @@ static void iolatency_pd_offline(struct blkg_policy_data *pd)
{
struct iolatency_grp *iolat = pd_to_lat(pd);
struct blkcg_gq *blkg = lat_to_blkg(iolat);
- struct blk_iolatency *blkiolat = iolat->blkiolat;
- int ret;
- ret = iolatency_set_min_lat_nsec(blkg, 0);
- if (ret == 1)
- atomic_inc(&blkiolat->enabled);
- if (ret == -1)
- atomic_dec(&blkiolat->enabled);
+ iolatency_set_min_lat_nsec(blkg, 0);
iolatency_clear_scaling(blkg);
}
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 68ac23d0b640..2dcd738c6952 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -228,7 +228,6 @@ void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
BUG_ON(real_tag >= tags->nr_tags);
sbitmap_queue_clear(&tags->bitmap_tags, real_tag, ctx->cpu);
} else {
- BUG_ON(tag >= tags->nr_reserved_tags);
sbitmap_queue_clear(&tags->breserved_tags, tag, ctx->cpu);
}
}
diff --git a/block/blk-mq.c b/block/blk-mq.c
index ae116b755648..e9bf950983c7 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -133,7 +133,8 @@ static bool blk_mq_check_inflight(struct request *rq, void *priv,
{
struct mq_inflight *mi = priv;
- if ((!mi->part->bd_partno || rq->part == mi->part) &&
+ if (rq->part && blk_do_io_stat(rq) &&
+ (!mi->part->bd_partno || rq->part == mi->part) &&
blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT)
mi->inflight[rq_data_dir(rq)]++;
@@ -1151,24 +1152,6 @@ void blk_mq_start_request(struct request *rq)
}
EXPORT_SYMBOL(blk_mq_start_request);
-/**
- * blk_end_sync_rq - executes a completion event on a request
- * @rq: request to complete
- * @error: end I/O status of the request
- */
-static void blk_end_sync_rq(struct request *rq, blk_status_t error)
-{
- struct completion *waiting = rq->end_io_data;
-
- rq->end_io_data = (void *)(uintptr_t)error;
-
- /*
- * complete last, if this is a stack request the process (and thus
- * the rq pointer) could be invalid right after this complete()
- */
- complete(waiting);
-}
-
/*
* Allow 2x BLK_MAX_REQUEST_COUNT requests on plug queue for multiple
* queues. This is important for md arrays to benefit from merging
@@ -1203,33 +1186,10 @@ static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
plug->rq_count++;
}
-static void __blk_execute_rq_nowait(struct request *rq, bool at_head,
- rq_end_io_fn *done, bool use_plug)
-{
- WARN_ON(irqs_disabled());
- WARN_ON(!blk_rq_is_passthrough(rq));
-
- rq->end_io = done;
-
- blk_account_io_start(rq);
-
- if (use_plug && current->plug) {
- blk_add_rq_to_plug(current->plug, rq);
- return;
- }
- /*
- * don't check dying flag for MQ because the request won't
- * be reused after dying flag is set
- */
- blk_mq_sched_insert_request(rq, at_head, true, false);
-}
-
-
/**
* blk_execute_rq_nowait - insert a request to I/O scheduler for execution
* @rq: request to insert
* @at_head: insert request at head or tail of queue
- * @done: I/O completion handler
*
* Description:
* Insert a fully prepared request at the back of the I/O scheduler queue
@@ -1238,13 +1198,32 @@ static void __blk_execute_rq_nowait(struct request *rq, bool at_head,
* Note:
* This function will invoke @done directly if the queue is dead.
*/
-void blk_execute_rq_nowait(struct request *rq, bool at_head, rq_end_io_fn *done)
+void blk_execute_rq_nowait(struct request *rq, bool at_head)
{
- __blk_execute_rq_nowait(rq, at_head, done, true);
+ WARN_ON(irqs_disabled());
+ WARN_ON(!blk_rq_is_passthrough(rq));
+ blk_account_io_start(rq);
+ if (current->plug)
+ blk_add_rq_to_plug(current->plug, rq);
+ else
+ blk_mq_sched_insert_request(rq, at_head, true, false);
}
EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
+struct blk_rq_wait {
+ struct completion done;
+ blk_status_t ret;
+};
+
+static void blk_end_sync_rq(struct request *rq, blk_status_t ret)
+{
+ struct blk_rq_wait *wait = rq->end_io_data;
+
+ wait->ret = ret;
+ complete(&wait->done);
+}
+
static bool blk_rq_is_poll(struct request *rq)
{
if (!rq->mq_hctx)
@@ -1276,30 +1255,37 @@ static void blk_rq_poll_completion(struct request *rq, struct completion *wait)
*/
blk_status_t blk_execute_rq(struct request *rq, bool at_head)
{
- DECLARE_COMPLETION_ONSTACK(wait);
- unsigned long hang_check;
+ struct blk_rq_wait wait = {
+ .done = COMPLETION_INITIALIZER_ONSTACK(wait.done),
+ };
+
+ WARN_ON(irqs_disabled());
+ WARN_ON(!blk_rq_is_passthrough(rq));
- /*
- * iopoll requires request to be submitted to driver, so can't
- * use plug
- */
rq->end_io_data = &wait;
- __blk_execute_rq_nowait(rq, at_head, blk_end_sync_rq,
- !blk_rq_is_poll(rq));
-
- /* Prevent hang_check timer from firing at us during very long I/O */
- hang_check = sysctl_hung_task_timeout_secs;
-
- if (blk_rq_is_poll(rq))
- blk_rq_poll_completion(rq, &wait);
- else if (hang_check)
- while (!wait_for_completion_io_timeout(&wait,
- hang_check * (HZ/2)))
- ;
- else
- wait_for_completion_io(&wait);
+ rq->end_io = blk_end_sync_rq;
+
+ blk_account_io_start(rq);
+ blk_mq_sched_insert_request(rq, at_head, true, false);
+
+ if (blk_rq_is_poll(rq)) {
+ blk_rq_poll_completion(rq, &wait.done);
+ } else {
+ /*
+ * Prevent hang_check timer from firing at us during very long
+ * I/O
+ */
+ unsigned long hang_check = sysctl_hung_task_timeout_secs;
+
+ if (hang_check)
+ while (!wait_for_completion_io_timeout(&wait.done,
+ hang_check * (HZ/2)))
+ ;
+ else
+ wait_for_completion_io(&wait.done);
+ }
- return (blk_status_t)(uintptr_t)rq->end_io_data;
+ return wait.ret;
}
EXPORT_SYMBOL(blk_execute_rq);
@@ -2174,8 +2160,7 @@ static bool blk_mq_has_sqsched(struct request_queue *q)
*/
static struct blk_mq_hw_ctx *blk_mq_get_sq_hctx(struct request_queue *q)
{
- struct blk_mq_hw_ctx *hctx;
-
+ struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
/*
* If the IO scheduler does not respect hardware queues when
* dispatching, we just don't bother with multiple HW queues and
@@ -2183,8 +2168,8 @@ static struct blk_mq_hw_ctx *blk_mq_get_sq_hctx(struct request_queue *q)
* just causes lock contention inside the scheduler and pointless cache
* bouncing.
*/
- hctx = blk_mq_map_queue_type(q, HCTX_TYPE_DEFAULT,
- raw_smp_processor_id());
+ struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, 0, ctx);
+
if (!blk_mq_hctx_stopped(hctx))
return hctx;
return NULL;
diff --git a/block/genhd.c b/block/genhd.c
index 36532b931841..27205ae47d59 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -385,6 +385,8 @@ int disk_scan_partitions(struct gendisk *disk, fmode_t mode)
if (disk->flags & (GENHD_FL_NO_PART | GENHD_FL_HIDDEN))
return -EINVAL;
+ if (test_bit(GD_SUPPRESS_PART_SCAN, &disk->state))
+ return -EINVAL;
if (disk->open_partitions)
return -EBUSY;
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 41068811fd0e..19197469cfab 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -274,7 +274,7 @@ config CRYPTO_ECRDSA
config CRYPTO_SM2
tristate "SM2 algorithm"
- select CRYPTO_LIB_SM3
+ select CRYPTO_SM3
select CRYPTO_AKCIPHER
select CRYPTO_MANAGER
select MPILIB
@@ -1010,9 +1010,12 @@ config CRYPTO_SHA3
http://keccak.noekeon.org/
config CRYPTO_SM3
+ tristate
+
+config CRYPTO_SM3_GENERIC
tristate "SM3 digest algorithm"
select CRYPTO_HASH
- select CRYPTO_LIB_SM3
+ select CRYPTO_SM3
help
SM3 secure hash function as defined by OSCCA GM/T 0004-2012 SM3).
It is part of the Chinese Commercial Cryptography suite.
@@ -1025,7 +1028,7 @@ config CRYPTO_SM3_AVX_X86_64
tristate "SM3 digest algorithm (x86_64/AVX)"
depends on X86 && 64BIT
select CRYPTO_HASH
- select CRYPTO_LIB_SM3
+ select CRYPTO_SM3
help
SM3 secure hash function as defined by OSCCA GM/T 0004-2012 SM3).
It is part of the Chinese Commercial Cryptography suite. This is
@@ -1572,9 +1575,12 @@ config CRYPTO_SERPENT_AVX2_X86_64
<https://www.cl.cam.ac.uk/~rja14/serpent.html>
config CRYPTO_SM4
+ tristate
+
+config CRYPTO_SM4_GENERIC
tristate "SM4 cipher algorithm"
select CRYPTO_ALGAPI
- select CRYPTO_LIB_SM4
+ select CRYPTO_SM4
help
SM4 cipher algorithms (OSCCA GB/T 32907-2016).
@@ -1603,7 +1609,7 @@ config CRYPTO_SM4_AESNI_AVX_X86_64
select CRYPTO_SKCIPHER
select CRYPTO_SIMD
select CRYPTO_ALGAPI
- select CRYPTO_LIB_SM4
+ select CRYPTO_SM4
help
SM4 cipher algorithms (OSCCA GB/T 32907-2016) (x86_64/AES-NI/AVX).
@@ -1624,7 +1630,7 @@ config CRYPTO_SM4_AESNI_AVX2_X86_64
select CRYPTO_SKCIPHER
select CRYPTO_SIMD
select CRYPTO_ALGAPI
- select CRYPTO_LIB_SM4
+ select CRYPTO_SM4
select CRYPTO_SM4_AESNI_AVX_X86_64
help
SM4 cipher algorithms (OSCCA GB/T 32907-2016) (x86_64/AES-NI/AVX2).
diff --git a/crypto/Makefile b/crypto/Makefile
index f754c4d17d6b..43bc33e247d1 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -78,7 +78,8 @@ obj-$(CONFIG_CRYPTO_SHA1) += sha1_generic.o
obj-$(CONFIG_CRYPTO_SHA256) += sha256_generic.o
obj-$(CONFIG_CRYPTO_SHA512) += sha512_generic.o
obj-$(CONFIG_CRYPTO_SHA3) += sha3_generic.o
-obj-$(CONFIG_CRYPTO_SM3) += sm3_generic.o
+obj-$(CONFIG_CRYPTO_SM3) += sm3.o
+obj-$(CONFIG_CRYPTO_SM3_GENERIC) += sm3_generic.o
obj-$(CONFIG_CRYPTO_STREEBOG) += streebog_generic.o
obj-$(CONFIG_CRYPTO_WP512) += wp512.o
CFLAGS_wp512.o := $(call cc-option,-fno-schedule-insns) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149
@@ -134,7 +135,8 @@ obj-$(CONFIG_CRYPTO_SERPENT) += serpent_generic.o
CFLAGS_serpent_generic.o := $(call cc-option,-fsched-pressure) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149
obj-$(CONFIG_CRYPTO_AES) += aes_generic.o
CFLAGS_aes_generic.o := $(call cc-option,-fno-code-hoisting) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=83356
-obj-$(CONFIG_CRYPTO_SM4) += sm4_generic.o
+obj-$(CONFIG_CRYPTO_SM4) += sm4.o
+obj-$(CONFIG_CRYPTO_SM4_GENERIC) += sm4_generic.o
obj-$(CONFIG_CRYPTO_AES_TI) += aes_ti.o
obj-$(CONFIG_CRYPTO_CAMELLIA) += camellia_generic.o
obj-$(CONFIG_CRYPTO_CAST_COMMON) += cast_common.o
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index a1bea0f4baa8..668095eca0fa 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -39,6 +39,10 @@ struct cryptd_cpu_queue {
};
struct cryptd_queue {
+ /*
+ * Protected by disabling BH to allow enqueueing from softinterrupt and
+ * dequeuing from kworker (cryptd_queue_worker()).
+ */
struct cryptd_cpu_queue __percpu *cpu_queue;
};
@@ -125,28 +129,28 @@ static void cryptd_fini_queue(struct cryptd_queue *queue)
static int cryptd_enqueue_request(struct cryptd_queue *queue,
struct crypto_async_request *request)
{
- int cpu, err;
+ int err;
struct cryptd_cpu_queue *cpu_queue;
refcount_t *refcnt;
- cpu = get_cpu();
+ local_bh_disable();
cpu_queue = this_cpu_ptr(queue->cpu_queue);
err = crypto_enqueue_request(&cpu_queue->queue, request);
refcnt = crypto_tfm_ctx(request->tfm);
if (err == -ENOSPC)
- goto out_put_cpu;
+ goto out;
- queue_work_on(cpu, cryptd_wq, &cpu_queue->work);
+ queue_work_on(smp_processor_id(), cryptd_wq, &cpu_queue->work);
if (!refcount_read(refcnt))
- goto out_put_cpu;
+ goto out;
refcount_inc(refcnt);
-out_put_cpu:
- put_cpu();
+out:
+ local_bh_enable();
return err;
}
@@ -162,15 +166,10 @@ static void cryptd_queue_worker(struct work_struct *work)
cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
/*
* Only handle one request at a time to avoid hogging crypto workqueue.
- * preempt_disable/enable is used to prevent being preempted by
- * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent
- * cryptd_enqueue_request() being accessed from software interrupts.
*/
local_bh_disable();
- preempt_disable();
backlog = crypto_get_backlog(&cpu_queue->queue);
req = crypto_dequeue_request(&cpu_queue->queue);
- preempt_enable();
local_bh_enable();
if (!req)
diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c
index 6056a990c9f2..bb8e77077f02 100644
--- a/crypto/crypto_engine.c
+++ b/crypto/crypto_engine.c
@@ -253,6 +253,7 @@ static void crypto_pump_work(struct kthread_work *work)
* crypto_transfer_request - transfer the new request into the engine queue
* @engine: the hardware engine
* @req: the request need to be listed into the engine queue
+ * @need_pump: indicates whether queue the pump of request to kthread_work
*/
static int crypto_transfer_request(struct crypto_engine *engine,
struct crypto_async_request *req,
diff --git a/crypto/ecrdsa.c b/crypto/ecrdsa.c
index b32ffcaad9ad..f3c6b5e15e75 100644
--- a/crypto/ecrdsa.c
+++ b/crypto/ecrdsa.c
@@ -113,15 +113,15 @@ static int ecrdsa_verify(struct akcipher_request *req)
/* Step 1: verify that 0 < r < q, 0 < s < q */
if (vli_is_zero(r, ndigits) ||
- vli_cmp(r, ctx->curve->n, ndigits) == 1 ||
+ vli_cmp(r, ctx->curve->n, ndigits) >= 0 ||
vli_is_zero(s, ndigits) ||
- vli_cmp(s, ctx->curve->n, ndigits) == 1)
+ vli_cmp(s, ctx->curve->n, ndigits) >= 0)
return -EKEYREJECTED;
/* Step 2: calculate hash (h) of the message (passed as input) */
/* Step 3: calculate e = h \mod q */
vli_from_le64(e, digest, ndigits);
- if (vli_cmp(e, ctx->curve->n, ndigits) == 1)
+ if (vli_cmp(e, ctx->curve->n, ndigits) >= 0)
vli_sub(e, e, ctx->curve->n, ndigits);
if (vli_is_zero(e, ndigits))
e[0] = 1;
@@ -137,7 +137,7 @@ static int ecrdsa_verify(struct akcipher_request *req)
/* Step 6: calculate point C = z_1P + z_2Q, and R = x_c \mod q */
ecc_point_mult_shamir(&cc, z1, &ctx->curve->g, z2, &ctx->pub_key,
ctx->curve);
- if (vli_cmp(cc.x, ctx->curve->n, ndigits) == 1)
+ if (vli_cmp(cc.x, ctx->curve->n, ndigits) >= 0)
vli_sub(cc.x, cc.x, ctx->curve->n, ndigits);
/* Step 7: if R == r signature is valid */
diff --git a/lib/crypto/sm3.c b/crypto/sm3.c
index d473e358a873..d473e358a873 100644
--- a/lib/crypto/sm3.c
+++ b/crypto/sm3.c
diff --git a/lib/crypto/sm4.c b/crypto/sm4.c
index 284e62576d0c..2c44193bc27e 100644
--- a/lib/crypto/sm4.c
+++ b/crypto/sm4.c
@@ -11,7 +11,7 @@
#include <asm/unaligned.h>
#include <crypto/sm4.h>
-static const u32 fk[4] = {
+static const u32 ____cacheline_aligned fk[4] = {
0xa3b1bac6, 0x56aa3350, 0x677d9197, 0xb27022dc
};
@@ -61,6 +61,14 @@ static const u8 ____cacheline_aligned sbox[256] = {
0x79, 0xee, 0x5f, 0x3e, 0xd7, 0xcb, 0x39, 0x48
};
+extern const u32 crypto_sm4_fk[4] __alias(fk);
+extern const u32 crypto_sm4_ck[32] __alias(ck);
+extern const u8 crypto_sm4_sbox[256] __alias(sbox);
+
+EXPORT_SYMBOL(crypto_sm4_fk);
+EXPORT_SYMBOL(crypto_sm4_ck);
+EXPORT_SYMBOL(crypto_sm4_sbox);
+
static inline u32 sm4_t_non_lin_sub(u32 x)
{
u32 out;
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 4948201065cc..5801a8f9f713 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -232,6 +232,20 @@ enum finalization_type {
FINALIZATION_TYPE_DIGEST, /* use digest() */
};
+/*
+ * Whether the crypto operation will occur in-place, and if so whether the
+ * source and destination scatterlist pointers will coincide (req->src ==
+ * req->dst), or whether they'll merely point to two separate scatterlists
+ * (req->src != req->dst) that reference the same underlying memory.
+ *
+ * This is only relevant for algorithm types that support in-place operation.
+ */
+enum inplace_mode {
+ OUT_OF_PLACE,
+ INPLACE_ONE_SGLIST,
+ INPLACE_TWO_SGLISTS,
+};
+
#define TEST_SG_TOTAL 10000
/**
@@ -265,7 +279,7 @@ struct test_sg_division {
* crypto test vector can be tested.
*
* @name: name of this config, logged for debugging purposes if a test fails
- * @inplace: operate on the data in-place, if applicable for the algorithm type?
+ * @inplace_mode: whether and how to operate on the data in-place, if applicable
* @req_flags: extra request_flags, e.g. CRYPTO_TFM_REQ_MAY_SLEEP
* @src_divs: description of how to arrange the source scatterlist
* @dst_divs: description of how to arrange the dst scatterlist, if applicable
@@ -282,7 +296,7 @@ struct test_sg_division {
*/
struct testvec_config {
const char *name;
- bool inplace;
+ enum inplace_mode inplace_mode;
u32 req_flags;
struct test_sg_division src_divs[XBUFSIZE];
struct test_sg_division dst_divs[XBUFSIZE];
@@ -307,11 +321,16 @@ struct testvec_config {
/* Configs for skciphers and aeads */
static const struct testvec_config default_cipher_testvec_configs[] = {
{
- .name = "in-place",
- .inplace = true,
+ .name = "in-place (one sglist)",
+ .inplace_mode = INPLACE_ONE_SGLIST,
+ .src_divs = { { .proportion_of_total = 10000 } },
+ }, {
+ .name = "in-place (two sglists)",
+ .inplace_mode = INPLACE_TWO_SGLISTS,
.src_divs = { { .proportion_of_total = 10000 } },
}, {
.name = "out-of-place",
+ .inplace_mode = OUT_OF_PLACE,
.src_divs = { { .proportion_of_total = 10000 } },
}, {
.name = "unaligned buffer, offset=1",
@@ -349,7 +368,7 @@ static const struct testvec_config default_cipher_testvec_configs[] = {
.key_offset = 3,
}, {
.name = "misaligned splits crossing pages, inplace",
- .inplace = true,
+ .inplace_mode = INPLACE_ONE_SGLIST,
.src_divs = {
{
.proportion_of_total = 7500,
@@ -749,18 +768,39 @@ static int build_cipher_test_sglists(struct cipher_test_sglists *tsgls,
iov_iter_kvec(&input, WRITE, inputs, nr_inputs, src_total_len);
err = build_test_sglist(&tsgls->src, cfg->src_divs, alignmask,
- cfg->inplace ?
+ cfg->inplace_mode != OUT_OF_PLACE ?
max(dst_total_len, src_total_len) :
src_total_len,
&input, NULL);
if (err)
return err;
- if (cfg->inplace) {
+ /*
+ * In-place crypto operations can use the same scatterlist for both the
+ * source and destination (req->src == req->dst), or can use separate
+ * scatterlists (req->src != req->dst) which point to the same
+ * underlying memory. Make sure to test both cases.
+ */
+ if (cfg->inplace_mode == INPLACE_ONE_SGLIST) {
tsgls->dst.sgl_ptr = tsgls->src.sgl;
tsgls->dst.nents = tsgls->src.nents;
return 0;
}
+ if (cfg->inplace_mode == INPLACE_TWO_SGLISTS) {
+ /*
+ * For now we keep it simple and only test the case where the
+ * two scatterlists have identical entries, rather than
+ * different entries that split up the same memory differently.
+ */
+ memcpy(tsgls->dst.sgl, tsgls->src.sgl,
+ tsgls->src.nents * sizeof(tsgls->src.sgl[0]));
+ memcpy(tsgls->dst.sgl_saved, tsgls->src.sgl,
+ tsgls->src.nents * sizeof(tsgls->src.sgl[0]));
+ tsgls->dst.sgl_ptr = tsgls->dst.sgl;
+ tsgls->dst.nents = tsgls->src.nents;
+ return 0;
+ }
+ /* Out of place */
return build_test_sglist(&tsgls->dst,
cfg->dst_divs[0].proportion_of_total ?
cfg->dst_divs : cfg->src_divs,
@@ -995,9 +1035,19 @@ static void generate_random_testvec_config(struct testvec_config *cfg,
p += scnprintf(p, end - p, "random:");
- if (prandom_u32() % 2 == 0) {
- cfg->inplace = true;
- p += scnprintf(p, end - p, " inplace");
+ switch (prandom_u32() % 4) {
+ case 0:
+ case 1:
+ cfg->inplace_mode = OUT_OF_PLACE;
+ break;
+ case 2:
+ cfg->inplace_mode = INPLACE_ONE_SGLIST;
+ p += scnprintf(p, end - p, " inplace_one_sglist");
+ break;
+ default:
+ cfg->inplace_mode = INPLACE_TWO_SGLISTS;
+ p += scnprintf(p, end - p, " inplace_two_sglists");
+ break;
}
if (prandom_u32() % 2 == 0) {
@@ -1034,7 +1084,7 @@ static void generate_random_testvec_config(struct testvec_config *cfg,
cfg->req_flags);
p += scnprintf(p, end - p, "]");
- if (!cfg->inplace && prandom_u32() % 2 == 0) {
+ if (cfg->inplace_mode == OUT_OF_PLACE && prandom_u32() % 2 == 0) {
p += scnprintf(p, end - p, " dst_divs=[");
p = generate_random_sgl_divisions(cfg->dst_divs,
ARRAY_SIZE(cfg->dst_divs),
@@ -2085,7 +2135,8 @@ static int test_aead_vec_cfg(int enc, const struct aead_testvec *vec,
/* Check for the correct output (ciphertext or plaintext) */
err = verify_correct_output(&tsgls->dst, enc ? vec->ctext : vec->ptext,
enc ? vec->clen : vec->plen,
- vec->alen, enc || !cfg->inplace);
+ vec->alen,
+ enc || cfg->inplace_mode == OUT_OF_PLACE);
if (err == -EOVERFLOW) {
pr_err("alg: aead: %s %s overran dst buffer on test vector %s, cfg=\"%s\"\n",
driver, op, vec_name, cfg->name);
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 8d6cd5d08722..b6a172d32a7d 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -107,6 +107,8 @@ source "drivers/usb/Kconfig"
source "drivers/mmc/Kconfig"
+source "drivers/ufs/Kconfig"
+
source "drivers/memstick/Kconfig"
source "drivers/leds/Kconfig"
@@ -225,8 +227,6 @@ source "drivers/mux/Kconfig"
source "drivers/opp/Kconfig"
-source "drivers/visorbus/Kconfig"
-
source "drivers/siox/Kconfig"
source "drivers/slimbus/Kconfig"
@@ -239,4 +239,6 @@ source "drivers/most/Kconfig"
source "drivers/peci/Kconfig"
+source "drivers/hte/Kconfig"
+
endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index 020780b6b4d2..9a30842b22c5 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -72,9 +72,9 @@ obj-$(CONFIG_PARPORT) += parport/
obj-y += base/ block/ misc/ mfd/ nfc/
obj-$(CONFIG_LIBNVDIMM) += nvdimm/
obj-$(CONFIG_DAX) += dax/
-obj-$(CONFIG_CXL_BUS) += cxl/
obj-$(CONFIG_DMA_SHARED_BUFFER) += dma-buf/
obj-$(CONFIG_NUBUS) += nubus/
+obj-y += cxl/
obj-y += macintosh/
obj-y += scsi/
obj-y += nvme/
@@ -128,6 +128,7 @@ obj-$(CONFIG_PM_OPP) += opp/
obj-$(CONFIG_CPU_FREQ) += cpufreq/
obj-$(CONFIG_CPU_IDLE) += cpuidle/
obj-y += mmc/
+obj-y += ufs/
obj-$(CONFIG_MEMSTICK) += memstick/
obj-$(CONFIG_NEW_LEDS) += leds/
obj-$(CONFIG_INFINIBAND) += infiniband/
@@ -181,10 +182,10 @@ obj-$(CONFIG_FPGA) += fpga/
obj-$(CONFIG_FSI) += fsi/
obj-$(CONFIG_TEE) += tee/
obj-$(CONFIG_MULTIPLEXER) += mux/
-obj-$(CONFIG_UNISYS_VISORBUS) += visorbus/
obj-$(CONFIG_SIOX) += siox/
obj-$(CONFIG_GNSS) += gnss/
obj-$(CONFIG_INTERCONNECT) += interconnect/
obj-$(CONFIG_COUNTER) += counter/
obj-$(CONFIG_MOST) += most/
obj-$(CONFIG_PECI) += peci/
+obj-$(CONFIG_HTE) += hte/
diff --git a/drivers/accessibility/speakup/fakekey.c b/drivers/accessibility/speakup/fakekey.c
index cd029968462f..868c47b2a59b 100644
--- a/drivers/accessibility/speakup/fakekey.c
+++ b/drivers/accessibility/speakup/fakekey.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0+
/* fakekey.c
- * Functions for simulating keypresses.
+ * Functions for simulating key presses.
*
* Copyright (C) 2010 the Speakup Team
*/
@@ -78,7 +78,7 @@ void speakup_fake_down_arrow(void)
}
/*
- * Are we handling a simulated keypress on the current CPU?
+ * Are we handling a simulated key press on the current CPU?
* Returns a boolean.
*/
bool speakup_fake_key_pressed(void)
diff --git a/drivers/accessibility/speakup/serialio.c b/drivers/accessibility/speakup/serialio.c
index 53580bdc5baa..3418ea31d28f 100644
--- a/drivers/accessibility/speakup/serialio.c
+++ b/drivers/accessibility/speakup/serialio.c
@@ -59,7 +59,7 @@ const struct old_serial_port *spk_serial_init(int index)
}
ser = rs_table + index;
- /* Divisor, bytesize and parity */
+ /* Divisor, byte size and parity */
quot = ser->baud_base / baud;
cval = cflag & (CSIZE | CSTOPB);
#if defined(__powerpc__) || defined(__alpha__)
diff --git a/drivers/accessibility/speakup/speakup_acntpc.c b/drivers/accessibility/speakup/speakup_acntpc.c
index 023172ca22ef..a55b60754eb1 100644
--- a/drivers/accessibility/speakup/speakup_acntpc.c
+++ b/drivers/accessibility/speakup/speakup_acntpc.c
@@ -6,7 +6,7 @@
* Copyright (C) 1998-99 Kirk Reiser.
* Copyright (C) 2003 David Borowski.
*
- * this code is specificly written as a driver for the speakup screenreview
+ * this code is specifically written as a driver for the speakup screenreview
* package and is not a general device driver.
* This driver is for the Aicom Acent PC internal synthesizer.
*/
diff --git a/drivers/accessibility/speakup/speakup_acntsa.c b/drivers/accessibility/speakup/speakup_acntsa.c
index 3a863dc61286..2697c51ed6b5 100644
--- a/drivers/accessibility/speakup/speakup_acntsa.c
+++ b/drivers/accessibility/speakup/speakup_acntsa.c
@@ -6,7 +6,7 @@
* Copyright (C) 1998-99 Kirk Reiser.
* Copyright (C) 2003 David Borowski.
*
- * this code is specificly written as a driver for the speakup screenreview
+ * this code is specifically written as a driver for the speakup screenreview
* package and is not a general device driver.
*/
diff --git a/drivers/accessibility/speakup/speakup_apollo.c b/drivers/accessibility/speakup/speakup_apollo.c
index cd63581b2e99..c84a7e0864b7 100644
--- a/drivers/accessibility/speakup/speakup_apollo.c
+++ b/drivers/accessibility/speakup/speakup_apollo.c
@@ -6,7 +6,7 @@
* Copyright (C) 1998-99 Kirk Reiser.
* Copyright (C) 2003 David Borowski.
*
- * this code is specificly written as a driver for the speakup screenreview
+ * this code is specifically written as a driver for the speakup screenreview
* package and is not a general device driver.
*/
#include <linux/jiffies.h>
diff --git a/drivers/accessibility/speakup/speakup_audptr.c b/drivers/accessibility/speakup/speakup_audptr.c
index a0c3b8ae17a1..4d16d60db9b2 100644
--- a/drivers/accessibility/speakup/speakup_audptr.c
+++ b/drivers/accessibility/speakup/speakup_audptr.c
@@ -6,7 +6,7 @@
* Copyright (C) 1998-99 Kirk Reiser.
* Copyright (C) 2003 David Borowski.
*
- * specificly written as a driver for the speakup screenreview
+ * specifically written as a driver for the speakup screenreview
* s not a general device driver.
*/
#include "spk_priv.h"
diff --git a/drivers/accessibility/speakup/speakup_bns.c b/drivers/accessibility/speakup/speakup_bns.c
index 76dfa3f7c058..b8103eb117b8 100644
--- a/drivers/accessibility/speakup/speakup_bns.c
+++ b/drivers/accessibility/speakup/speakup_bns.c
@@ -6,7 +6,7 @@
* Copyright (C) 1998-99 Kirk Reiser.
* Copyright (C) 2003 David Borowski.
*
- * this code is specificly written as a driver for the speakup screenreview
+ * this code is specifically written as a driver for the speakup screenreview
* package and is not a general device driver.
*/
#include "spk_priv.h"
diff --git a/drivers/accessibility/speakup/speakup_decext.c b/drivers/accessibility/speakup/speakup_decext.c
index 092cfd08a9e1..eaebf62300a4 100644
--- a/drivers/accessibility/speakup/speakup_decext.c
+++ b/drivers/accessibility/speakup/speakup_decext.c
@@ -6,7 +6,7 @@
* Copyright (C) 1998-99 Kirk Reiser.
* Copyright (C) 2003 David Borowski.
*
- * specificly written as a driver for the speakup screenreview
+ * specifically written as a driver for the speakup screenreview
* s not a general device driver.
*/
#include <linux/jiffies.h>
diff --git a/drivers/accessibility/speakup/speakup_dectlk.c b/drivers/accessibility/speakup/speakup_dectlk.c
index 78ca4987e619..2a7e8d727904 100644
--- a/drivers/accessibility/speakup/speakup_dectlk.c
+++ b/drivers/accessibility/speakup/speakup_dectlk.c
@@ -6,7 +6,7 @@
* Copyright (C) 1998-99 Kirk Reiser.
* Copyright (C) 2003 David Borowski.
*
- * specificly written as a driver for the speakup screenreview
+ * specifically written as a driver for the speakup screenreview
* s not a general device driver.
*/
#include <linux/unistd.h>
diff --git a/drivers/accessibility/speakup/speakup_dtlk.c b/drivers/accessibility/speakup/speakup_dtlk.c
index a9dd5c45d237..6f01e010aaf4 100644
--- a/drivers/accessibility/speakup/speakup_dtlk.c
+++ b/drivers/accessibility/speakup/speakup_dtlk.c
@@ -6,7 +6,7 @@
* Copyright (C) 1998-99 Kirk Reiser.
* Copyright (C) 2003 David Borowski.
*
- * specificly written as a driver for the speakup screenreview
+ * specifically written as a driver for the speakup screenreview
* package it's not a general device driver.
* This driver is for the RC Systems DoubleTalk PC internal synthesizer.
*/
diff --git a/drivers/accessibility/speakup/speakup_dummy.c b/drivers/accessibility/speakup/speakup_dummy.c
index 63c2f2943282..34f11cd47073 100644
--- a/drivers/accessibility/speakup/speakup_dummy.c
+++ b/drivers/accessibility/speakup/speakup_dummy.c
@@ -8,7 +8,7 @@
* Copyright (C) 2003 David Borowski.
* Copyright (C) 2007 Samuel Thibault.
*
- * specificly written as a driver for the speakup screenreview
+ * specifically written as a driver for the speakup screenreview
* s not a general device driver.
*/
#include "spk_priv.h"
diff --git a/drivers/accessibility/speakup/speakup_keypc.c b/drivers/accessibility/speakup/speakup_keypc.c
index 1618be87bff1..f61b62f1ea4d 100644
--- a/drivers/accessibility/speakup/speakup_keypc.c
+++ b/drivers/accessibility/speakup/speakup_keypc.c
@@ -4,7 +4,7 @@
*
* Copyright (C) 2003 David Borowski.
*
- * specificly written as a driver for the speakup screenreview
+ * specifically written as a driver for the speakup screenreview
* package it's not a general device driver.
* This driver is for the Keynote Gold internal synthesizer.
*/
diff --git a/drivers/accessibility/speakup/speakup_ltlk.c b/drivers/accessibility/speakup/speakup_ltlk.c
index 3e59b387d0c4..f885cfaa27c8 100644
--- a/drivers/accessibility/speakup/speakup_ltlk.c
+++ b/drivers/accessibility/speakup/speakup_ltlk.c
@@ -6,7 +6,7 @@
* Copyright (C) 1998-99 Kirk Reiser.
* Copyright (C) 2003 David Borowski.
*
- * specificly written as a driver for the speakup screenreview
+ * specifically written as a driver for the speakup screenreview
* s not a general device driver.
*/
#include "speakup.h"
diff --git a/drivers/accessibility/speakup/speakup_soft.c b/drivers/accessibility/speakup/speakup_soft.c
index 19824e7006fe..99f1d4ac426a 100644
--- a/drivers/accessibility/speakup/speakup_soft.c
+++ b/drivers/accessibility/speakup/speakup_soft.c
@@ -5,7 +5,7 @@
*
* Copyright (C) 2003 Kirk Reiser.
*
- * this code is specificly written as a driver for the speakup screenreview
+ * this code is specifically written as a driver for the speakup screenreview
* package and is not a general device driver.
*/
@@ -397,6 +397,7 @@ static int softsynth_probe(struct spk_synth *synth)
synthu_device.name = "softsynthu";
synthu_device.fops = &softsynthu_fops;
if (misc_register(&synthu_device)) {
+ misc_deregister(&synth_device);
pr_warn("Couldn't initialize miscdevice /dev/softsynthu.\n");
return -ENODEV;
}
diff --git a/drivers/accessibility/speakup/speakup_spkout.c b/drivers/accessibility/speakup/speakup_spkout.c
index bd3d8dc300ff..5e3bb3aa98b6 100644
--- a/drivers/accessibility/speakup/speakup_spkout.c
+++ b/drivers/accessibility/speakup/speakup_spkout.c
@@ -6,7 +6,7 @@
* Copyright (C) 1998-99 Kirk Reiser.
* Copyright (C) 2003 David Borowski.
*
- * specificly written as a driver for the speakup screenreview
+ * specifically written as a driver for the speakup screenreview
* s not a general device driver.
*/
#include "spk_priv.h"
diff --git a/drivers/accessibility/speakup/speakup_txprt.c b/drivers/accessibility/speakup/speakup_txprt.c
index a7326f226a5e..9e781347f7eb 100644
--- a/drivers/accessibility/speakup/speakup_txprt.c
+++ b/drivers/accessibility/speakup/speakup_txprt.c
@@ -6,7 +6,7 @@
* Copyright (C) 1998-99 Kirk Reiser.
* Copyright (C) 2003 David Borowski.
*
- * specificly written as a driver for the speakup screenreview
+ * specifically written as a driver for the speakup screenreview
* s not a general device driver.
*/
#include "spk_priv.h"
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index db487ff9dd1b..c29e41bfcf35 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -32,7 +32,6 @@ MODULE_AUTHOR("Paul Diefenbaugh");
MODULE_DESCRIPTION("ACPI AC Adapter Driver");
MODULE_LICENSE("GPL");
-
static int acpi_ac_add(struct acpi_device *device);
static int acpi_ac_remove(struct acpi_device *device);
static void acpi_ac_notify(struct acpi_device *device, u32 event);
@@ -125,6 +124,7 @@ static int get_ac_property(struct power_supply *psy,
default:
return -EINVAL;
}
+
return 0;
}
@@ -286,6 +286,7 @@ static int acpi_ac_resume(struct device *dev)
return 0;
if (old_state != ac->state)
kobject_uevent(&ac->charger->dev.kobj, KOBJ_CHANGE);
+
return 0;
}
#else
@@ -296,7 +297,6 @@ static int acpi_ac_remove(struct acpi_device *device)
{
struct acpi_ac *ac = NULL;
-
if (!device || !acpi_driver_data(device))
return -EINVAL;
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index 990ff5b0aeb8..e07782b1fbb6 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -1707,24 +1707,23 @@ static int acpi_video_resume(struct notifier_block *nb,
int i;
switch (val) {
- case PM_HIBERNATION_PREPARE:
- case PM_SUSPEND_PREPARE:
- case PM_RESTORE_PREPARE:
- return NOTIFY_DONE;
- }
-
- video = container_of(nb, struct acpi_video_bus, pm_nb);
-
- dev_info(&video->device->dev, "Restoring backlight state\n");
+ case PM_POST_HIBERNATION:
+ case PM_POST_SUSPEND:
+ case PM_POST_RESTORE:
+ video = container_of(nb, struct acpi_video_bus, pm_nb);
+
+ dev_info(&video->device->dev, "Restoring backlight state\n");
+
+ for (i = 0; i < video->attached_count; i++) {
+ video_device = video->attached_array[i].bind_info;
+ if (video_device && video_device->brightness)
+ acpi_video_device_lcd_set_level(video_device,
+ video_device->brightness->curr);
+ }
- for (i = 0; i < video->attached_count; i++) {
- video_device = video->attached_array[i].bind_info;
- if (video_device && video_device->brightness)
- acpi_video_device_lcd_set_level(video_device,
- video_device->brightness->curr);
+ return NOTIFY_OK;
}
-
- return NOTIFY_OK;
+ return NOTIFY_DONE;
}
static acpi_status
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index dc208f5f5a1f..306513fec1e1 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -52,7 +52,6 @@ static bool battery_driver_registered;
static int battery_bix_broken_package;
static int battery_notification_delay_ms;
static int battery_ac_is_broken;
-static int battery_quirk_notcharging;
static unsigned int cache_time = 1000;
module_param(cache_time, uint, 0644);
MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
@@ -216,10 +215,8 @@ static int acpi_battery_get_property(struct power_supply *psy,
val->intval = POWER_SUPPLY_STATUS_CHARGING;
else if (acpi_battery_is_charged(battery))
val->intval = POWER_SUPPLY_STATUS_FULL;
- else if (battery_quirk_notcharging)
- val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
else
- val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
+ val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
break;
case POWER_SUPPLY_PROP_PRESENT:
val->intval = acpi_battery_present(battery);
@@ -1105,12 +1102,6 @@ battery_ac_is_broken_quirk(const struct dmi_system_id *d)
return 0;
}
-static int __init battery_quirk_not_charging(const struct dmi_system_id *d)
-{
- battery_quirk_notcharging = 1;
- return 0;
-}
-
static const struct dmi_system_id bat_dmi_table[] __initconst = {
{
/* NEC LZ750/LS */
@@ -1140,19 +1131,6 @@ static const struct dmi_system_id bat_dmi_table[] __initconst = {
},
},
{
- /*
- * On Lenovo ThinkPads the BIOS specification defines
- * a state when the bits for charging and discharging
- * are both set to 0. That state is "Not Charging".
- */
- .callback = battery_quirk_not_charging,
- .ident = "Lenovo ThinkPad",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad"),
- },
- },
- {
/* Microsoft Surface Go 3 */
.callback = battery_notification_delay_quirk,
.matches = {
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index b67d2ee77cd1..86fa61a21826 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -443,7 +443,7 @@ static void acpi_bus_osc_negotiate_usb_control(void)
}
osc_sb_native_usb4_control =
- control & ((u32 *)context.ret.pointer)[OSC_CONTROL_DWORD];
+ control & acpi_osc_ctx_get_pci_control(&context);
acpi_bus_decode_usb_osc("USB4 _OSC: OS supports", control);
acpi_bus_decode_usb_osc("USB4 _OSC: OS controls",
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index 3b299b28a8af..903528f7e187 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -315,7 +315,7 @@ static int send_pcc_cmd(int pcc_ss_id, u16 cmd)
goto end;
}
- /* wait for completion and check for PCC errro bit */
+ /* wait for completion and check for PCC error bit */
ret = check_pcc_chan(pcc_ss_id, true);
if (pcc_ss_data->pcc_mrtt)
diff --git a/drivers/acpi/dptf/dptf_pch_fivr.c b/drivers/acpi/dptf/dptf_pch_fivr.c
index c0da24c9f8c3..4919e7abe93f 100644
--- a/drivers/acpi/dptf/dptf_pch_fivr.c
+++ b/drivers/acpi/dptf/dptf_pch_fivr.c
@@ -151,6 +151,7 @@ static int pch_fivr_remove(struct platform_device *pdev)
static const struct acpi_device_id pch_fivr_device_ids[] = {
{"INTC1045", 0},
{"INTC1049", 0},
+ {"INTC1064", 0},
{"INTC10A3", 0},
{"", 0},
};
diff --git a/drivers/acpi/dptf/dptf_power.c b/drivers/acpi/dptf/dptf_power.c
index 407b89d8a2ce..86561eda939f 100644
--- a/drivers/acpi/dptf/dptf_power.c
+++ b/drivers/acpi/dptf/dptf_power.c
@@ -232,6 +232,8 @@ static const struct acpi_device_id int3407_device_ids[] = {
{"INTC1050", 0},
{"INTC1060", 0},
{"INTC1061", 0},
+ {"INTC1065", 0},
+ {"INTC1066", 0},
{"INTC10A4", 0},
{"INTC10A5", 0},
{"", 0},
diff --git a/drivers/acpi/dptf/int340x_thermal.c b/drivers/acpi/dptf/int340x_thermal.c
index 42a556346548..b7113fa92fa6 100644
--- a/drivers/acpi/dptf/int340x_thermal.c
+++ b/drivers/acpi/dptf/int340x_thermal.c
@@ -27,6 +27,7 @@ static const struct acpi_device_id int340x_thermal_device_ids[] = {
{"INT3532"},
{"INTC1040"},
{"INTC1041"},
+ {"INTC1042"},
{"INTC1043"},
{"INTC1044"},
{"INTC1045"},
@@ -37,6 +38,11 @@ static const struct acpi_device_id int340x_thermal_device_ids[] = {
{"INTC1050"},
{"INTC1060"},
{"INTC1061"},
+ {"INTC1062"},
+ {"INTC1063"},
+ {"INTC1064"},
+ {"INTC1065"},
+ {"INTC1066"},
{"INTC10A0"},
{"INTC10A1"},
{"INTC10A2"},
diff --git a/drivers/acpi/fan.h b/drivers/acpi/fan.h
index 44728529a5b6..e7b4b4e4a55e 100644
--- a/drivers/acpi/fan.h
+++ b/drivers/acpi/fan.h
@@ -14,6 +14,7 @@
{"INT3404", }, /* Fan */ \
{"INTC1044", }, /* Fan for Tiger Lake generation */ \
{"INTC1048", }, /* Fan for Alder Lake generation */ \
+ {"INTC1063", }, /* Fan for Meteor Lake generation */ \
{"INTC10A2", }, /* Fan for Raptor Lake generation */ \
{"PNP0C0B", } /* Generic ACPI fan */
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index ef104809f27b..8d769114a048 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -79,17 +79,17 @@ static struct acpi_bus_type *acpi_get_bus_type(struct device *dev)
static int find_child_checks(struct acpi_device *adev, bool check_children)
{
- bool sta_present = true;
unsigned long long sta;
acpi_status status;
+ if (check_children && list_empty(&adev->children))
+ return -ENODEV;
+
status = acpi_evaluate_integer(adev->handle, "_STA", NULL, &sta);
if (status == AE_NOT_FOUND)
- sta_present = false;
- else if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_ENABLED))
- return -ENODEV;
+ return FIND_CHILD_MIN_SCORE;
- if (check_children && list_empty(&adev->children))
+ if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_ENABLED))
return -ENODEV;
/*
@@ -99,8 +99,10 @@ static int find_child_checks(struct acpi_device *adev, bool check_children)
* matched going forward. [This means a second spec violation in a row,
* so whatever we do here is best effort anyway.]
*/
- return sta_present && !adev->pnp.type.platform_id ?
- FIND_CHILD_MAX_SCORE : FIND_CHILD_MIN_SCORE;
+ if (adev->pnp.type.platform_id)
+ return FIND_CHILD_MIN_SCORE;
+
+ return FIND_CHILD_MAX_SCORE;
}
struct acpi_device *acpi_find_child_device(struct acpi_device *parent,
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index fe61f617a943..ae5f4acf2675 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -1230,7 +1230,7 @@ static ssize_t hw_error_scrub_store(struct device *dev,
if (rc)
return rc;
- nfit_device_lock(dev);
+ device_lock(dev);
nd_desc = dev_get_drvdata(dev);
if (nd_desc) {
struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
@@ -1247,7 +1247,7 @@ static ssize_t hw_error_scrub_store(struct device *dev,
break;
}
}
- nfit_device_unlock(dev);
+ device_unlock(dev);
if (rc)
return rc;
return size;
@@ -1267,10 +1267,10 @@ static ssize_t scrub_show(struct device *dev,
ssize_t rc = -ENXIO;
bool busy;
- nfit_device_lock(dev);
+ device_lock(dev);
nd_desc = dev_get_drvdata(dev);
if (!nd_desc) {
- nfit_device_unlock(dev);
+ device_unlock(dev);
return rc;
}
acpi_desc = to_acpi_desc(nd_desc);
@@ -1287,7 +1287,7 @@ static ssize_t scrub_show(struct device *dev,
}
mutex_unlock(&acpi_desc->init_mutex);
- nfit_device_unlock(dev);
+ device_unlock(dev);
return rc;
}
@@ -1304,14 +1304,14 @@ static ssize_t scrub_store(struct device *dev,
if (val != 1)
return -EINVAL;
- nfit_device_lock(dev);
+ device_lock(dev);
nd_desc = dev_get_drvdata(dev);
if (nd_desc) {
struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
rc = acpi_nfit_ars_rescan(acpi_desc, ARS_REQ_LONG);
}
- nfit_device_unlock(dev);
+ device_unlock(dev);
if (rc)
return rc;
return size;
@@ -1697,9 +1697,9 @@ static void acpi_nvdimm_notify(acpi_handle handle, u32 event, void *data)
struct acpi_device *adev = data;
struct device *dev = &adev->dev;
- nfit_device_lock(dev->parent);
+ device_lock(dev->parent);
__acpi_nvdimm_notify(dev, event);
- nfit_device_unlock(dev->parent);
+ device_unlock(dev->parent);
}
static bool acpi_nvdimm_has_method(struct acpi_device *adev, char *method)
@@ -3152,8 +3152,8 @@ static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc)
struct device *dev = acpi_desc->dev;
/* Bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */
- nfit_device_lock(dev);
- nfit_device_unlock(dev);
+ device_lock(dev);
+ device_unlock(dev);
/* Bounce the init_mutex to complete initial registration */
mutex_lock(&acpi_desc->init_mutex);
@@ -3305,8 +3305,8 @@ void acpi_nfit_shutdown(void *data)
* acpi_nfit_ars_rescan() submissions have had a chance to
* either submit or see ->cancel set.
*/
- nfit_device_lock(bus_dev);
- nfit_device_unlock(bus_dev);
+ device_lock(bus_dev);
+ device_unlock(bus_dev);
flush_workqueue(nfit_wq);
}
@@ -3449,9 +3449,9 @@ EXPORT_SYMBOL_GPL(__acpi_nfit_notify);
static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
{
- nfit_device_lock(&adev->dev);
+ device_lock(&adev->dev);
__acpi_nfit_notify(&adev->dev, adev->handle, event);
- nfit_device_unlock(&adev->dev);
+ device_unlock(&adev->dev);
}
static const struct acpi_device_id acpi_nfit_ids[] = {
diff --git a/drivers/acpi/nfit/mce.c b/drivers/acpi/nfit/mce.c
index ee8d9973f60b..d48a388b796e 100644
--- a/drivers/acpi/nfit/mce.c
+++ b/drivers/acpi/nfit/mce.c
@@ -32,6 +32,7 @@ static int nfit_handle_mce(struct notifier_block *nb, unsigned long val,
*/
mutex_lock(&acpi_desc_lock);
list_for_each_entry(acpi_desc, &acpi_descs, list) {
+ unsigned int align = 1UL << MCI_MISC_ADDR_LSB(mce->misc);
struct device *dev = acpi_desc->dev;
int found_match = 0;
@@ -63,8 +64,7 @@ static int nfit_handle_mce(struct notifier_block *nb, unsigned long val,
/* If this fails due to an -ENOMEM, there is little we can do */
nvdimm_bus_add_badrange(acpi_desc->nvdimm_bus,
- ALIGN(mce->addr, L1_CACHE_BYTES),
- L1_CACHE_BYTES);
+ ALIGN_DOWN(mce->addr, align), align);
nvdimm_region_notify(nfit_spa->nd_region,
NVDIMM_REVALIDATE_POISON);
diff --git a/drivers/acpi/nfit/nfit.h b/drivers/acpi/nfit/nfit.h
index 50882bdbeb96..6023ad61831a 100644
--- a/drivers/acpi/nfit/nfit.h
+++ b/drivers/acpi/nfit/nfit.h
@@ -337,30 +337,6 @@ static inline struct acpi_nfit_desc *to_acpi_desc(
return container_of(nd_desc, struct acpi_nfit_desc, nd_desc);
}
-#ifdef CONFIG_PROVE_LOCKING
-static inline void nfit_device_lock(struct device *dev)
-{
- device_lock(dev);
- mutex_lock(&dev->lockdep_mutex);
-}
-
-static inline void nfit_device_unlock(struct device *dev)
-{
- mutex_unlock(&dev->lockdep_mutex);
- device_unlock(dev);
-}
-#else
-static inline void nfit_device_lock(struct device *dev)
-{
- device_lock(dev);
-}
-
-static inline void nfit_device_unlock(struct device *dev)
-{
- device_unlock(dev);
-}
-#endif
-
const guid_t *to_nfit_uuid(enum nfit_uuids id);
int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *nfit, acpi_size sz);
void acpi_nfit_shutdown(void *data);
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 7a70c4bfc23c..3269a888fb7a 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -36,7 +36,6 @@
#include <linux/io-64-nonatomic-lo-hi.h>
#include "acpica/accommon.h"
-#include "acpica/acnamesp.h"
#include "internal.h"
/* Definitions for ACPI_DEBUG_PRINT() */
@@ -1496,91 +1495,6 @@ int acpi_check_region(resource_size_t start, resource_size_t n,
}
EXPORT_SYMBOL(acpi_check_region);
-static acpi_status acpi_deactivate_mem_region(acpi_handle handle, u32 level,
- void *_res, void **return_value)
-{
- struct acpi_mem_space_context **mem_ctx;
- union acpi_operand_object *handler_obj;
- union acpi_operand_object *region_obj2;
- union acpi_operand_object *region_obj;
- struct resource *res = _res;
- acpi_status status;
-
- region_obj = acpi_ns_get_attached_object(handle);
- if (!region_obj)
- return AE_OK;
-
- handler_obj = region_obj->region.handler;
- if (!handler_obj)
- return AE_OK;
-
- if (region_obj->region.space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
- return AE_OK;
-
- if (!(region_obj->region.flags & AOPOBJ_SETUP_COMPLETE))
- return AE_OK;
-
- region_obj2 = acpi_ns_get_secondary_object(region_obj);
- if (!region_obj2)
- return AE_OK;
-
- mem_ctx = (void *)&region_obj2->extra.region_context;
-
- if (!(mem_ctx[0]->address >= res->start &&
- mem_ctx[0]->address < res->end))
- return AE_OK;
-
- status = handler_obj->address_space.setup(region_obj,
- ACPI_REGION_DEACTIVATE,
- NULL, (void **)mem_ctx);
- if (ACPI_SUCCESS(status))
- region_obj->region.flags &= ~(AOPOBJ_SETUP_COMPLETE);
-
- return status;
-}
-
-/**
- * acpi_release_memory - Release any mappings done to a memory region
- * @handle: Handle to namespace node
- * @res: Memory resource
- * @level: A level that terminates the search
- *
- * Walks through @handle and unmaps all SystemMemory Operation Regions that
- * overlap with @res and that have already been activated (mapped).
- *
- * This is a helper that allows drivers to place special requirements on memory
- * region that may overlap with operation regions, primarily allowing them to
- * safely map the region as non-cached memory.
- *
- * The unmapped Operation Regions will be automatically remapped next time they
- * are called, so the drivers do not need to do anything else.
- */
-acpi_status acpi_release_memory(acpi_handle handle, struct resource *res,
- u32 level)
-{
- acpi_status status;
-
- if (!(res->flags & IORESOURCE_MEM))
- return AE_TYPE;
-
- status = acpi_walk_namespace(ACPI_TYPE_REGION, handle, level,
- acpi_deactivate_mem_region, NULL,
- res, NULL);
- if (ACPI_FAILURE(status))
- return status;
-
- /*
- * Wait for all of the mappings queued up for removal by
- * acpi_deactivate_mem_region() to actually go away.
- */
- synchronize_rcu();
- rcu_barrier();
- flush_scheduled_work();
-
- return AE_OK;
-}
-EXPORT_SYMBOL_GPL(acpi_release_memory);
-
/*
* Let drivers know whether the resource checks are effective
*/
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index b3b507f20e87..d57cf8454b93 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -140,6 +140,17 @@ static struct pci_osc_bit_struct pci_osc_control_bit[] = {
{ OSC_PCI_EXPRESS_DPC_CONTROL, "DPC" },
};
+static struct pci_osc_bit_struct cxl_osc_support_bit[] = {
+ { OSC_CXL_1_1_PORT_REG_ACCESS_SUPPORT, "CXL11PortRegAccess" },
+ { OSC_CXL_2_0_PORT_DEV_REG_ACCESS_SUPPORT, "CXL20PortDevRegAccess" },
+ { OSC_CXL_PROTOCOL_ERR_REPORTING_SUPPORT, "CXLProtocolErrorReporting" },
+ { OSC_CXL_NATIVE_HP_SUPPORT, "CXLNativeHotPlug" },
+};
+
+static struct pci_osc_bit_struct cxl_osc_control_bit[] = {
+ { OSC_CXL_ERROR_REPORTING_CONTROL, "CXLMemErrorReporting" },
+};
+
static void decode_osc_bits(struct acpi_pci_root *root, char *msg, u32 word,
struct pci_osc_bit_struct *table, int size)
{
@@ -168,33 +179,73 @@ static void decode_osc_control(struct acpi_pci_root *root, char *msg, u32 word)
ARRAY_SIZE(pci_osc_control_bit));
}
+static void decode_cxl_osc_support(struct acpi_pci_root *root, char *msg, u32 word)
+{
+ decode_osc_bits(root, msg, word, cxl_osc_support_bit,
+ ARRAY_SIZE(cxl_osc_support_bit));
+}
+
+static void decode_cxl_osc_control(struct acpi_pci_root *root, char *msg, u32 word)
+{
+ decode_osc_bits(root, msg, word, cxl_osc_control_bit,
+ ARRAY_SIZE(cxl_osc_control_bit));
+}
+
+static inline bool is_pcie(struct acpi_pci_root *root)
+{
+ return root->bridge_type == ACPI_BRIDGE_TYPE_PCIE;
+}
+
+static inline bool is_cxl(struct acpi_pci_root *root)
+{
+ return root->bridge_type == ACPI_BRIDGE_TYPE_CXL;
+}
+
static u8 pci_osc_uuid_str[] = "33DB4D5B-1FF7-401C-9657-7441C03DD766";
+static u8 cxl_osc_uuid_str[] = "68F2D50B-C469-4d8A-BD3D-941A103FD3FC";
-static acpi_status acpi_pci_run_osc(acpi_handle handle,
- const u32 *capbuf, u32 *retval)
+static char *to_uuid(struct acpi_pci_root *root)
+{
+ if (is_cxl(root))
+ return cxl_osc_uuid_str;
+ return pci_osc_uuid_str;
+}
+
+static int cap_length(struct acpi_pci_root *root)
+{
+ if (is_cxl(root))
+ return sizeof(u32) * OSC_CXL_CAPABILITY_DWORDS;
+ return sizeof(u32) * OSC_PCI_CAPABILITY_DWORDS;
+}
+
+static acpi_status acpi_pci_run_osc(struct acpi_pci_root *root,
+ const u32 *capbuf, u32 *pci_control,
+ u32 *cxl_control)
{
struct acpi_osc_context context = {
- .uuid_str = pci_osc_uuid_str,
+ .uuid_str = to_uuid(root),
.rev = 1,
- .cap.length = 12,
+ .cap.length = cap_length(root),
.cap.pointer = (void *)capbuf,
};
acpi_status status;
- status = acpi_run_osc(handle, &context);
+ status = acpi_run_osc(root->device->handle, &context);
if (ACPI_SUCCESS(status)) {
- *retval = *((u32 *)(context.ret.pointer + 8));
+ *pci_control = acpi_osc_ctx_get_pci_control(&context);
+ if (is_cxl(root))
+ *cxl_control = acpi_osc_ctx_get_cxl_control(&context);
kfree(context.ret.pointer);
}
return status;
}
-static acpi_status acpi_pci_query_osc(struct acpi_pci_root *root,
- u32 support,
- u32 *control)
+static acpi_status acpi_pci_query_osc(struct acpi_pci_root *root, u32 support,
+ u32 *control, u32 cxl_support,
+ u32 *cxl_control)
{
acpi_status status;
- u32 result, capbuf[3];
+ u32 pci_result, cxl_result, capbuf[OSC_CXL_CAPABILITY_DWORDS];
support |= root->osc_support_set;
@@ -202,10 +253,28 @@ static acpi_status acpi_pci_query_osc(struct acpi_pci_root *root,
capbuf[OSC_SUPPORT_DWORD] = support;
capbuf[OSC_CONTROL_DWORD] = *control | root->osc_control_set;
- status = acpi_pci_run_osc(root->device->handle, capbuf, &result);
+ if (is_cxl(root)) {
+ cxl_support |= root->osc_ext_support_set;
+ capbuf[OSC_EXT_SUPPORT_DWORD] = cxl_support;
+ capbuf[OSC_EXT_CONTROL_DWORD] = *cxl_control | root->osc_ext_control_set;
+ }
+
+retry:
+ status = acpi_pci_run_osc(root, capbuf, &pci_result, &cxl_result);
if (ACPI_SUCCESS(status)) {
root->osc_support_set = support;
- *control = result;
+ *control = pci_result;
+ if (is_cxl(root)) {
+ root->osc_ext_support_set = cxl_support;
+ *cxl_control = cxl_result;
+ }
+ } else if (is_cxl(root)) {
+ /*
+ * CXL _OSC is optional on CXL 1.1 hosts. Fall back to PCIe _OSC
+ * upon any failure using CXL _OSC.
+ */
+ root->bridge_type = ACPI_BRIDGE_TYPE_PCIE;
+ goto retry;
}
return status;
}
@@ -321,6 +390,8 @@ EXPORT_SYMBOL_GPL(acpi_get_pci_dev);
* @handle: ACPI handle of a PCI root bridge (or PCIe Root Complex).
* @mask: Mask of _OSC bits to request control of, place to store control mask.
* @support: _OSC supported capability.
+ * @cxl_mask: Mask of CXL _OSC control bits, place to store control mask.
+ * @cxl_support: CXL _OSC supported capability.
*
* Run _OSC query for @mask and if that is successful, compare the returned
* mask of control bits with @req. If all of the @req bits are set in the
@@ -331,12 +402,14 @@ EXPORT_SYMBOL_GPL(acpi_get_pci_dev);
* _OSC bits the BIOS has granted control of, but its contents are meaningless
* on failure.
**/
-static acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 *mask, u32 support)
+static acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 *mask,
+ u32 support, u32 *cxl_mask,
+ u32 cxl_support)
{
u32 req = OSC_PCI_EXPRESS_CAPABILITY_CONTROL;
struct acpi_pci_root *root;
acpi_status status;
- u32 ctrl, capbuf[3];
+ u32 ctrl, cxl_ctrl = 0, capbuf[OSC_CXL_CAPABILITY_DWORDS];
if (!mask)
return AE_BAD_PARAMETER;
@@ -348,20 +421,42 @@ static acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 *mask, u32 s
ctrl = *mask;
*mask |= root->osc_control_set;
+ if (is_cxl(root)) {
+ cxl_ctrl = *cxl_mask;
+ *cxl_mask |= root->osc_ext_control_set;
+ }
+
/* Need to check the available controls bits before requesting them. */
do {
- status = acpi_pci_query_osc(root, support, mask);
+ u32 pci_missing = 0, cxl_missing = 0;
+
+ status = acpi_pci_query_osc(root, support, mask, cxl_support,
+ cxl_mask);
if (ACPI_FAILURE(status))
return status;
- if (ctrl == *mask)
- break;
- decode_osc_control(root, "platform does not support",
- ctrl & ~(*mask));
+ if (is_cxl(root)) {
+ if (ctrl == *mask && cxl_ctrl == *cxl_mask)
+ break;
+ pci_missing = ctrl & ~(*mask);
+ cxl_missing = cxl_ctrl & ~(*cxl_mask);
+ } else {
+ if (ctrl == *mask)
+ break;
+ pci_missing = ctrl & ~(*mask);
+ }
+ if (pci_missing)
+ decode_osc_control(root, "platform does not support",
+ pci_missing);
+ if (cxl_missing)
+ decode_cxl_osc_control(root, "CXL platform does not support",
+ cxl_missing);
ctrl = *mask;
- } while (*mask);
+ cxl_ctrl = *cxl_mask;
+ } while (*mask || *cxl_mask);
/* No need to request _OSC if the control was already granted. */
- if ((root->osc_control_set & ctrl) == ctrl)
+ if ((root->osc_control_set & ctrl) == ctrl &&
+ (root->osc_ext_control_set & cxl_ctrl) == cxl_ctrl)
return AE_OK;
if ((ctrl & req) != req) {
@@ -373,11 +468,17 @@ static acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 *mask, u32 s
capbuf[OSC_QUERY_DWORD] = 0;
capbuf[OSC_SUPPORT_DWORD] = root->osc_support_set;
capbuf[OSC_CONTROL_DWORD] = ctrl;
- status = acpi_pci_run_osc(handle, capbuf, mask);
+ if (is_cxl(root)) {
+ capbuf[OSC_EXT_SUPPORT_DWORD] = root->osc_ext_support_set;
+ capbuf[OSC_EXT_CONTROL_DWORD] = cxl_ctrl;
+ }
+
+ status = acpi_pci_run_osc(root, capbuf, mask, cxl_mask);
if (ACPI_FAILURE(status))
return status;
root->osc_control_set = *mask;
+ root->osc_ext_control_set = *cxl_mask;
return AE_OK;
}
@@ -403,6 +504,53 @@ static u32 calculate_support(void)
return support;
}
+/*
+ * Background on hotplug support, and making it depend on only
+ * CONFIG_HOTPLUG_PCI_PCIE vs. also considering CONFIG_MEMORY_HOTPLUG:
+ *
+ * CONFIG_ACPI_HOTPLUG_MEMORY does depend on CONFIG_MEMORY_HOTPLUG, but
+ * there is no existing _OSC for memory hotplug support. The reason is that
+ * ACPI memory hotplug requires the OS to acknowledge / coordinate with
+ * memory plug events via a scan handler. On the CXL side the equivalent
+ * would be if Linux supported the Mechanical Retention Lock [1], or
+ * otherwise had some coordination for the driver of a PCI device
+ * undergoing hotplug to be consulted on whether the hotplug should
+ * proceed or not.
+ *
+ * The concern is that if Linux says no to supporting CXL hotplug then
+ * the BIOS may say no to giving the OS hotplug control of any other PCIe
+ * device. So the question here is not whether hotplug is enabled, it's
+ * whether it is handled natively by the at all OS, and if
+ * CONFIG_HOTPLUG_PCI_PCIE is enabled then the answer is "yes".
+ *
+ * Otherwise, the plan for CXL coordinated remove, since the kernel does
+ * not support blocking hotplug, is to require the memory device to be
+ * disabled before hotplug is attempted. When CONFIG_MEMORY_HOTPLUG is
+ * disabled that step will fail and the remove attempt cancelled by the
+ * user. If that is not honored and the card is removed anyway then it
+ * does not matter if CONFIG_MEMORY_HOTPLUG is enabled or not, it will
+ * cause a crash and other badness.
+ *
+ * Therefore, just say yes to CXL hotplug and require removal to
+ * be coordinated by userspace unless and until the kernel grows better
+ * mechanisms for doing "managed" removal of devices in consultation with
+ * the driver.
+ *
+ * [1]: https://lore.kernel.org/all/20201122014203.4706-1-ashok.raj@intel.com/
+ */
+static u32 calculate_cxl_support(void)
+{
+ u32 support;
+
+ support = OSC_CXL_2_0_PORT_DEV_REG_ACCESS_SUPPORT;
+ if (pci_aer_available())
+ support |= OSC_CXL_PROTOCOL_ERR_REPORTING_SUPPORT;
+ if (IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE))
+ support |= OSC_CXL_NATIVE_HP_SUPPORT;
+
+ return support;
+}
+
static u32 calculate_control(void)
{
u32 control;
@@ -434,6 +582,16 @@ static u32 calculate_control(void)
return control;
}
+static u32 calculate_cxl_control(void)
+{
+ u32 control = 0;
+
+ if (IS_ENABLED(CONFIG_MEMORY_FAILURE))
+ control |= OSC_CXL_ERROR_REPORTING_CONTROL;
+
+ return control;
+}
+
static bool os_control_query_checks(struct acpi_pci_root *root, u32 support)
{
struct acpi_device *device = root->device;
@@ -452,10 +610,10 @@ static bool os_control_query_checks(struct acpi_pci_root *root, u32 support)
return true;
}
-static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm,
- bool is_pcie)
+static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm)
{
u32 support, control = 0, requested = 0;
+ u32 cxl_support = 0, cxl_control = 0, cxl_requested = 0;
acpi_status status;
struct acpi_device *device = root->device;
acpi_handle handle = device->handle;
@@ -479,10 +637,20 @@ static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm,
if (os_control_query_checks(root, support))
requested = control = calculate_control();
- status = acpi_pci_osc_control_set(handle, &control, support);
+ if (is_cxl(root)) {
+ cxl_support = calculate_cxl_support();
+ decode_cxl_osc_support(root, "OS supports", cxl_support);
+ cxl_requested = cxl_control = calculate_cxl_control();
+ }
+
+ status = acpi_pci_osc_control_set(handle, &control, support,
+ &cxl_control, cxl_support);
if (ACPI_SUCCESS(status)) {
if (control)
decode_osc_control(root, "OS now controls", control);
+ if (cxl_control)
+ decode_cxl_osc_control(root, "OS now controls",
+ cxl_control);
if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) {
/*
@@ -504,13 +672,18 @@ static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm,
*no_aspm = 1;
/* _OSC is optional for PCI host bridges */
- if ((status == AE_NOT_FOUND) && !is_pcie)
+ if (status == AE_NOT_FOUND && !is_pcie(root))
return;
if (control) {
decode_osc_control(root, "OS requested", requested);
decode_osc_control(root, "platform willing to grant", control);
}
+ if (cxl_control) {
+ decode_cxl_osc_control(root, "OS requested", cxl_requested);
+ decode_cxl_osc_control(root, "platform willing to grant",
+ cxl_control);
+ }
dev_info(&device->dev, "_OSC: platform retains control of PCIe features (%s)\n",
acpi_format_exception(status));
@@ -527,7 +700,7 @@ static int acpi_pci_root_add(struct acpi_device *device,
acpi_handle handle = device->handle;
int no_aspm = 0;
bool hotadd = system_state == SYSTEM_RUNNING;
- bool is_pcie;
+ const char *acpi_hid;
root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL);
if (!root)
@@ -585,8 +758,15 @@ static int acpi_pci_root_add(struct acpi_device *device,
root->mcfg_addr = acpi_pci_root_get_mcfg_addr(handle);
- is_pcie = strcmp(acpi_device_hid(device), "PNP0A08") == 0;
- negotiate_os_control(root, &no_aspm, is_pcie);
+ acpi_hid = acpi_device_hid(root->device);
+ if (strcmp(acpi_hid, "PNP0A08") == 0)
+ root->bridge_type = ACPI_BRIDGE_TYPE_PCIE;
+ else if (strcmp(acpi_hid, "ACPI0016") == 0)
+ root->bridge_type = ACPI_BRIDGE_TYPE_CXL;
+ else
+ dev_dbg(&device->dev, "Assuming non-PCIe host bridge\n");
+
+ negotiate_os_control(root, &no_aspm);
/*
* TBD: Need PCI interface for enumeration/configuration of roots.
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index e9c84d0ac55b..6a5572a1a80c 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -38,11 +38,11 @@
#define ACPI_IDLE_STATE_START (IS_ENABLED(CONFIG_ARCH_HAS_CPU_RELAX) ? 1 : 0)
static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER;
-module_param(max_cstate, uint, 0000);
-static unsigned int nocst __read_mostly;
-module_param(nocst, uint, 0000);
-static int bm_check_disable __read_mostly;
-module_param(bm_check_disable, uint, 0000);
+module_param(max_cstate, uint, 0400);
+static bool nocst __read_mostly;
+module_param(nocst, bool, 0400);
+static bool bm_check_disable __read_mostly;
+module_param(bm_check_disable, bool, 0400);
static unsigned int latency_factor __read_mostly = 2;
module_param(latency_factor, uint, 0644);
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 3147702710af..04ea1569df78 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -1035,20 +1035,22 @@ static void acpi_sleep_hibernate_setup(void)
static inline void acpi_sleep_hibernate_setup(void) {}
#endif /* !CONFIG_HIBERNATION */
-static void acpi_power_off_prepare(void)
+static int acpi_power_off_prepare(struct sys_off_data *data)
{
/* Prepare to power off the system */
acpi_sleep_prepare(ACPI_STATE_S5);
acpi_disable_all_gpes();
acpi_os_wait_events_complete();
+ return NOTIFY_DONE;
}
-static void acpi_power_off(void)
+static int acpi_power_off(struct sys_off_data *data)
{
/* acpi_sleep_prepare(ACPI_STATE_S5) should have already been called */
pr_debug("%s called\n", __func__);
local_irq_disable();
acpi_enter_sleep_state(ACPI_STATE_S5);
+ return NOTIFY_DONE;
}
int __init acpi_sleep_init(void)
@@ -1067,8 +1069,14 @@ int __init acpi_sleep_init(void)
if (acpi_sleep_state_supported(ACPI_STATE_S5)) {
sleep_states[ACPI_STATE_S5] = 1;
- pm_power_off_prepare = acpi_power_off_prepare;
- pm_power_off = acpi_power_off;
+
+ register_sys_off_handler(SYS_OFF_MODE_POWER_OFF_PREPARE,
+ SYS_OFF_PRIO_FIRMWARE,
+ acpi_power_off_prepare, NULL);
+
+ register_sys_off_handler(SYS_OFF_MODE_POWER_OFF,
+ SYS_OFF_PRIO_FIRMWARE,
+ acpi_power_off, NULL);
} else {
acpi_no_s5 = true;
}
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index 7e775ba6fdd9..0e3ed5eb367b 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -20,6 +20,10 @@
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/of_irq.h>
+#include <linux/of_device.h>
+#include <linux/acpi.h>
+#include <linux/iommu.h>
+#include <linux/dma-map-ops.h>
#define to_amba_driver(d) container_of(d, struct amba_driver, drv)
@@ -94,31 +98,11 @@ static ssize_t driver_override_store(struct device *_dev,
const char *buf, size_t count)
{
struct amba_device *dev = to_amba_device(_dev);
- char *driver_override, *old, *cp;
-
- /* We need to keep extra room for a newline */
- if (count >= (PAGE_SIZE - 1))
- return -EINVAL;
-
- driver_override = kstrndup(buf, count, GFP_KERNEL);
- if (!driver_override)
- return -ENOMEM;
-
- cp = strchr(driver_override, '\n');
- if (cp)
- *cp = '\0';
-
- device_lock(_dev);
- old = dev->driver_override;
- if (strlen(driver_override)) {
- dev->driver_override = driver_override;
- } else {
- kfree(driver_override);
- dev->driver_override = NULL;
- }
- device_unlock(_dev);
+ int ret;
- kfree(old);
+ ret = driver_set_override(_dev, &dev->driver_override, buf, count);
+ if (ret)
+ return ret;
return count;
}
@@ -273,6 +257,36 @@ static void amba_shutdown(struct device *dev)
drv->shutdown(to_amba_device(dev));
}
+static int amba_dma_configure(struct device *dev)
+{
+ struct amba_driver *drv = to_amba_driver(dev->driver);
+ enum dev_dma_attr attr;
+ int ret = 0;
+
+ if (dev->of_node) {
+ ret = of_dma_configure(dev, dev->of_node, true);
+ } else if (has_acpi_companion(dev)) {
+ attr = acpi_get_dma_attr(to_acpi_device_node(dev->fwnode));
+ ret = acpi_dma_configure(dev, attr);
+ }
+
+ if (!ret && !drv->driver_managed_dma) {
+ ret = iommu_device_use_default_domain(dev);
+ if (ret)
+ arch_teardown_dma_ops(dev);
+ }
+
+ return ret;
+}
+
+static void amba_dma_cleanup(struct device *dev)
+{
+ struct amba_driver *drv = to_amba_driver(dev->driver);
+
+ if (!drv->driver_managed_dma)
+ iommu_device_unuse_default_domain(dev);
+}
+
#ifdef CONFIG_PM
/*
* Hooks to provide runtime PM of the pclk (bus clock). It is safe to
@@ -341,7 +355,8 @@ struct bus_type amba_bustype = {
.probe = amba_probe,
.remove = amba_remove,
.shutdown = amba_shutdown,
- .dma_configure = platform_dma_configure,
+ .dma_configure = amba_dma_configure,
+ .dma_cleanup = amba_dma_cleanup,
.pm = &amba_pm,
};
EXPORT_SYMBOL_GPL(amba_bustype);
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index f3b639e89dd8..9e0982289dde 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -133,18 +133,45 @@ static int binder_set_stop_on_user_error(const char *val,
module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
param_get_int, &binder_stop_on_user_error, 0644);
-#define binder_debug(mask, x...) \
- do { \
- if (binder_debug_mask & mask) \
- pr_info_ratelimited(x); \
- } while (0)
+static __printf(2, 3) void binder_debug(int mask, const char *format, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ if (binder_debug_mask & mask) {
+ va_start(args, format);
+ vaf.va = &args;
+ vaf.fmt = format;
+ pr_info_ratelimited("%pV", &vaf);
+ va_end(args);
+ }
+}
+
+#define binder_txn_error(x...) \
+ binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, x)
+
+static __printf(1, 2) void binder_user_error(const char *format, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) {
+ va_start(args, format);
+ vaf.va = &args;
+ vaf.fmt = format;
+ pr_info_ratelimited("%pV", &vaf);
+ va_end(args);
+ }
+
+ if (binder_stop_on_user_error)
+ binder_stop_on_user_error = 2;
+}
-#define binder_user_error(x...) \
+#define binder_set_extended_error(ee, _id, _command, _param) \
do { \
- if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
- pr_info_ratelimited(x); \
- if (binder_stop_on_user_error) \
- binder_stop_on_user_error = 2; \
+ (ee)->id = _id; \
+ (ee)->command = _command; \
+ (ee)->param = _param; \
} while (0)
#define to_flat_binder_object(hdr) \
@@ -1481,6 +1508,8 @@ static void binder_free_txn_fixups(struct binder_transaction *t)
list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
fput(fixup->file);
+ if (fixup->target_fd >= 0)
+ put_unused_fd(fixup->target_fd);
list_del(&fixup->fixup_entry);
kfree(fixup);
}
@@ -1855,7 +1884,7 @@ static void binder_deferred_fd_close(int fd)
if (!twcb)
return;
init_task_work(&twcb->twork, binder_do_fd_close);
- close_fd_get_file(fd, &twcb->file);
+ twcb->file = close_fd_get_file(fd);
if (twcb->file) {
filp_close(twcb->file, current->files);
task_work_add(current, &twcb->twork, TWA_RESUME);
@@ -2220,6 +2249,7 @@ static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
}
fixup->file = file;
fixup->offset = fd_offset;
+ fixup->target_fd = -1;
trace_binder_transaction_fd_send(t, fd, fixup->offset);
list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
@@ -2705,6 +2735,24 @@ static struct binder_node *binder_get_node_refs_for_txn(
return target_node;
}
+static void binder_set_txn_from_error(struct binder_transaction *t, int id,
+ uint32_t command, int32_t param)
+{
+ struct binder_thread *from = binder_get_txn_from_and_acq_inner(t);
+
+ if (!from) {
+ /* annotation for sparse */
+ __release(&from->proc->inner_lock);
+ return;
+ }
+
+ /* don't override existing errors */
+ if (from->ee.command == BR_OK)
+ binder_set_extended_error(&from->ee, id, command, param);
+ binder_inner_proc_unlock(from->proc);
+ binder_thread_dec_tmpref(from);
+}
+
static void binder_transaction(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_transaction_data *tr, int reply,
@@ -2750,6 +2798,10 @@ static void binder_transaction(struct binder_proc *proc,
e->offsets_size = tr->offsets_size;
strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);
+ binder_inner_proc_lock(proc);
+ binder_set_extended_error(&thread->ee, t_debug_id, BR_OK, 0);
+ binder_inner_proc_unlock(proc);
+
if (reply) {
binder_inner_proc_lock(proc);
in_reply_to = thread->transaction_stack;
@@ -2785,6 +2837,8 @@ static void binder_transaction(struct binder_proc *proc,
if (target_thread == NULL) {
/* annotation for sparse */
__release(&target_thread->proc->inner_lock);
+ binder_txn_error("%d:%d reply target not found\n",
+ thread->pid, proc->pid);
return_error = BR_DEAD_REPLY;
return_error_line = __LINE__;
goto err_dead_binder;
@@ -2850,6 +2904,8 @@ static void binder_transaction(struct binder_proc *proc,
}
}
if (!target_node) {
+ binder_txn_error("%d:%d cannot find target node\n",
+ thread->pid, proc->pid);
/*
* return_error is set above
*/
@@ -2859,6 +2915,8 @@ static void binder_transaction(struct binder_proc *proc,
}
e->to_node = target_node->debug_id;
if (WARN_ON(proc == target_proc)) {
+ binder_txn_error("%d:%d self transactions not allowed\n",
+ thread->pid, proc->pid);
return_error = BR_FAILED_REPLY;
return_error_param = -EINVAL;
return_error_line = __LINE__;
@@ -2866,6 +2924,8 @@ static void binder_transaction(struct binder_proc *proc,
}
if (security_binder_transaction(proc->cred,
target_proc->cred) < 0) {
+ binder_txn_error("%d:%d transaction credentials failed\n",
+ thread->pid, proc->pid);
return_error = BR_FAILED_REPLY;
return_error_param = -EPERM;
return_error_line = __LINE__;
@@ -2937,6 +2997,8 @@ static void binder_transaction(struct binder_proc *proc,
/* TODO: reuse incoming transaction for reply */
t = kzalloc(sizeof(*t), GFP_KERNEL);
if (t == NULL) {
+ binder_txn_error("%d:%d cannot allocate transaction\n",
+ thread->pid, proc->pid);
return_error = BR_FAILED_REPLY;
return_error_param = -ENOMEM;
return_error_line = __LINE__;
@@ -2948,6 +3010,8 @@ static void binder_transaction(struct binder_proc *proc,
tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
if (tcomplete == NULL) {
+ binder_txn_error("%d:%d cannot allocate work for transaction\n",
+ thread->pid, proc->pid);
return_error = BR_FAILED_REPLY;
return_error_param = -ENOMEM;
return_error_line = __LINE__;
@@ -2994,6 +3058,8 @@ static void binder_transaction(struct binder_proc *proc,
security_cred_getsecid(proc->cred, &secid);
ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
if (ret) {
+ binder_txn_error("%d:%d failed to get security context\n",
+ thread->pid, proc->pid);
return_error = BR_FAILED_REPLY;
return_error_param = ret;
return_error_line = __LINE__;
@@ -3002,7 +3068,8 @@ static void binder_transaction(struct binder_proc *proc,
added_size = ALIGN(secctx_sz, sizeof(u64));
extra_buffers_size += added_size;
if (extra_buffers_size < added_size) {
- /* integer overflow of extra_buffers_size */
+ binder_txn_error("%d:%d integer overflow of extra_buffers_size\n",
+ thread->pid, proc->pid);
return_error = BR_FAILED_REPLY;
return_error_param = -EINVAL;
return_error_line = __LINE__;
@@ -3016,9 +3083,15 @@ static void binder_transaction(struct binder_proc *proc,
tr->offsets_size, extra_buffers_size,
!reply && (t->flags & TF_ONE_WAY), current->tgid);
if (IS_ERR(t->buffer)) {
- /*
- * -ESRCH indicates VMA cleared. The target is dying.
- */
+ char *s;
+
+ ret = PTR_ERR(t->buffer);
+ s = (ret == -ESRCH) ? ": vma cleared, target dead or dying"
+ : (ret == -ENOSPC) ? ": no space left"
+ : (ret == -ENOMEM) ? ": memory allocation failed"
+ : "";
+ binder_txn_error("cannot allocate buffer%s", s);
+
return_error_param = PTR_ERR(t->buffer);
return_error = return_error_param == -ESRCH ?
BR_DEAD_REPLY : BR_FAILED_REPLY;
@@ -3101,6 +3174,8 @@ static void binder_transaction(struct binder_proc *proc,
t->buffer,
buffer_offset,
sizeof(object_offset))) {
+ binder_txn_error("%d:%d copy offset from buffer failed\n",
+ thread->pid, proc->pid);
return_error = BR_FAILED_REPLY;
return_error_param = -EINVAL;
return_error_line = __LINE__;
@@ -3159,6 +3234,8 @@ static void binder_transaction(struct binder_proc *proc,
t->buffer,
object_offset,
fp, sizeof(*fp))) {
+ binder_txn_error("%d:%d translate binder failed\n",
+ thread->pid, proc->pid);
return_error = BR_FAILED_REPLY;
return_error_param = ret;
return_error_line = __LINE__;
@@ -3176,6 +3253,8 @@ static void binder_transaction(struct binder_proc *proc,
t->buffer,
object_offset,
fp, sizeof(*fp))) {
+ binder_txn_error("%d:%d translate handle failed\n",
+ thread->pid, proc->pid);
return_error = BR_FAILED_REPLY;
return_error_param = ret;
return_error_line = __LINE__;
@@ -3196,6 +3275,8 @@ static void binder_transaction(struct binder_proc *proc,
t->buffer,
object_offset,
fp, sizeof(*fp))) {
+ binder_txn_error("%d:%d translate fd failed\n",
+ thread->pid, proc->pid);
return_error = BR_FAILED_REPLY;
return_error_param = ret;
return_error_line = __LINE__;
@@ -3265,6 +3346,8 @@ static void binder_transaction(struct binder_proc *proc,
object_offset,
fda, sizeof(*fda));
if (ret) {
+ binder_txn_error("%d:%d translate fd array failed\n",
+ thread->pid, proc->pid);
return_error = BR_FAILED_REPLY;
return_error_param = ret > 0 ? -EINVAL : ret;
return_error_line = __LINE__;
@@ -3292,6 +3375,8 @@ static void binder_transaction(struct binder_proc *proc,
(const void __user *)(uintptr_t)bp->buffer,
bp->length);
if (ret) {
+ binder_txn_error("%d:%d deferred copy failed\n",
+ thread->pid, proc->pid);
return_error = BR_FAILED_REPLY;
return_error_param = ret;
return_error_line = __LINE__;
@@ -3315,6 +3400,8 @@ static void binder_transaction(struct binder_proc *proc,
t->buffer,
object_offset,
bp, sizeof(*bp))) {
+ binder_txn_error("%d:%d failed to fixup parent\n",
+ thread->pid, proc->pid);
return_error = BR_FAILED_REPLY;
return_error_param = ret;
return_error_line = __LINE__;
@@ -3422,6 +3509,8 @@ static void binder_transaction(struct binder_proc *proc,
return;
err_dead_proc_or_thread:
+ binder_txn_error("%d:%d dead process or thread\n",
+ thread->pid, proc->pid);
return_error_line = __LINE__;
binder_dequeue_work(proc, tcomplete);
err_translate_failed:
@@ -3457,21 +3546,26 @@ err_bad_call_stack:
err_empty_call_stack:
err_dead_binder:
err_invalid_target_handle:
- if (target_thread)
- binder_thread_dec_tmpref(target_thread);
- if (target_proc)
- binder_proc_dec_tmpref(target_proc);
if (target_node) {
binder_dec_node(target_node, 1, 0);
binder_dec_node_tmpref(target_node);
}
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
- "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
- proc->pid, thread->pid, return_error, return_error_param,
+ "%d:%d transaction %s to %d:%d failed %d/%d/%d, size %lld-%lld line %d\n",
+ proc->pid, thread->pid, reply ? "reply" :
+ (tr->flags & TF_ONE_WAY ? "async" : "call"),
+ target_proc ? target_proc->pid : 0,
+ target_thread ? target_thread->pid : 0,
+ t_debug_id, return_error, return_error_param,
(u64)tr->data_size, (u64)tr->offsets_size,
return_error_line);
+ if (target_thread)
+ binder_thread_dec_tmpref(target_thread);
+ if (target_proc)
+ binder_proc_dec_tmpref(target_proc);
+
{
struct binder_transaction_log_entry *fe;
@@ -3491,10 +3585,16 @@ err_invalid_target_handle:
BUG_ON(thread->return_error.cmd != BR_OK);
if (in_reply_to) {
+ binder_set_txn_from_error(in_reply_to, t_debug_id,
+ return_error, return_error_param);
thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
binder_enqueue_thread_work(thread, &thread->return_error.work);
binder_send_failed_reply(in_reply_to, return_error);
} else {
+ binder_inner_proc_lock(proc);
+ binder_set_extended_error(&thread->ee, t_debug_id,
+ return_error, return_error_param);
+ binder_inner_proc_unlock(proc);
thread->return_error.cmd = return_error;
binder_enqueue_thread_work(thread, &thread->return_error.work);
}
@@ -3984,7 +4084,7 @@ static int binder_thread_write(struct binder_proc *proc,
} break;
default:
- pr_err("%d:%d unknown command %d\n",
+ pr_err("%d:%d unknown command %u\n",
proc->pid, thread->pid, cmd);
return -EINVAL;
}
@@ -4075,10 +4175,9 @@ static int binder_wait_for_work(struct binder_thread *thread,
* Now that we are in the context of the transaction target
* process, we can allocate and install fds. Process the
* list of fds to translate and fixup the buffer with the
- * new fds.
+ * new fds first and only then install the files.
*
- * If we fail to allocate an fd, then free the resources by
- * fput'ing files that have not been processed and ksys_close'ing
+ * If we fail to allocate an fd, skip the install and release
* any fds that have already been allocated.
*/
static int binder_apply_fd_fixups(struct binder_proc *proc,
@@ -4095,41 +4194,31 @@ static int binder_apply_fd_fixups(struct binder_proc *proc,
"failed fd fixup txn %d fd %d\n",
t->debug_id, fd);
ret = -ENOMEM;
- break;
+ goto err;
}
binder_debug(BINDER_DEBUG_TRANSACTION,
"fd fixup txn %d fd %d\n",
t->debug_id, fd);
trace_binder_transaction_fd_recv(t, fd, fixup->offset);
- fd_install(fd, fixup->file);
- fixup->file = NULL;
+ fixup->target_fd = fd;
if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
fixup->offset, &fd,
sizeof(u32))) {
ret = -EINVAL;
- break;
+ goto err;
}
}
list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
- if (fixup->file) {
- fput(fixup->file);
- } else if (ret) {
- u32 fd;
- int err;
-
- err = binder_alloc_copy_from_buffer(&proc->alloc, &fd,
- t->buffer,
- fixup->offset,
- sizeof(fd));
- WARN_ON(err);
- if (!err)
- binder_deferred_fd_close(fd);
- }
+ fd_install(fixup->target_fd, fixup->file);
list_del(&fixup->fixup_entry);
kfree(fixup);
}
return ret;
+
+err:
+ binder_free_txn_fixups(t);
+ return ret;
}
static int binder_thread_read(struct binder_proc *proc,
@@ -4490,7 +4579,7 @@ retry:
trace_binder_transaction_received(t);
binder_stat_br(proc, thread, cmd);
binder_debug(BINDER_DEBUG_TRANSACTION,
- "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
+ "%d:%d %s %d %d:%d, cmd %u size %zd-%zd ptr %016llx-%016llx\n",
proc->pid, thread->pid,
(cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
(cmd == BR_TRANSACTION_SEC_CTX) ?
@@ -4632,6 +4721,7 @@ static struct binder_thread *binder_get_thread_ilocked(
thread->return_error.cmd = BR_OK;
thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
thread->reply_error.cmd = BR_OK;
+ thread->ee.command = BR_OK;
INIT_LIST_HEAD(&new_thread->waiting_thread_node);
return thread;
}
@@ -5070,6 +5160,22 @@ static int binder_ioctl_get_freezer_info(
return 0;
}
+static int binder_ioctl_get_extended_error(struct binder_thread *thread,
+ void __user *ubuf)
+{
+ struct binder_extended_error ee;
+
+ binder_inner_proc_lock(thread->proc);
+ ee = thread->ee;
+ binder_set_extended_error(&thread->ee, 0, BR_OK, 0);
+ binder_inner_proc_unlock(thread->proc);
+
+ if (copy_to_user(ubuf, &ee, sizeof(ee)))
+ return -EFAULT;
+
+ return 0;
+}
+
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
int ret;
@@ -5278,6 +5384,11 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
binder_inner_proc_unlock(proc);
break;
}
+ case BINDER_GET_EXTENDED_ERROR:
+ ret = binder_ioctl_get_extended_error(thread, ubuf);
+ if (ret < 0)
+ goto err;
+ break;
default:
ret = -EINVAL;
goto err;
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 2ac1008a5f39..5649a0371a1f 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -1175,14 +1175,11 @@ static void binder_alloc_clear_buf(struct binder_alloc *alloc,
unsigned long size;
struct page *page;
pgoff_t pgoff;
- void *kptr;
page = binder_alloc_get_page(alloc, buffer,
buffer_offset, &pgoff);
size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
- kptr = kmap(page) + pgoff;
- memset(kptr, 0, size);
- kunmap(page);
+ memset_page(page, pgoff, 0, size);
bytes -= size;
buffer_offset += size;
}
@@ -1220,9 +1217,9 @@ binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc,
page = binder_alloc_get_page(alloc, buffer,
buffer_offset, &pgoff);
size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
- kptr = kmap(page) + pgoff;
+ kptr = kmap_local_page(page) + pgoff;
ret = copy_from_user(kptr, from, size);
- kunmap(page);
+ kunmap_local(kptr);
if (ret)
return bytes - size + ret;
bytes -= size;
@@ -1247,23 +1244,14 @@ static int binder_alloc_do_buffer_copy(struct binder_alloc *alloc,
unsigned long size;
struct page *page;
pgoff_t pgoff;
- void *tmpptr;
- void *base_ptr;
page = binder_alloc_get_page(alloc, buffer,
buffer_offset, &pgoff);
size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
- base_ptr = kmap_atomic(page);
- tmpptr = base_ptr + pgoff;
if (to_buffer)
- memcpy(tmpptr, ptr, size);
+ memcpy_to_page(page, pgoff, ptr, size);
else
- memcpy(ptr, tmpptr, size);
- /*
- * kunmap_atomic() takes care of flushing the cache
- * if this device has VIVT cache arch
- */
- kunmap_atomic(base_ptr);
+ memcpy_from_page(ptr, page, pgoff, size);
bytes -= size;
pgoff = 0;
ptr = ptr + size;
diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h
index d6b6b8cb7346..8dc0bccf8513 100644
--- a/drivers/android/binder_internal.h
+++ b/drivers/android/binder_internal.h
@@ -480,6 +480,8 @@ struct binder_proc {
* (only accessed by this thread)
* @reply_error: transaction errors reported by target thread
* (protected by @proc->inner_lock)
+ * @ee: extended error information from this thread
+ * (protected by @proc->inner_lock)
* @wait: wait queue for thread work
* @stats: per-thread statistics
* (atomics, no lock needed)
@@ -504,6 +506,7 @@ struct binder_thread {
bool process_todo;
struct binder_error return_error;
struct binder_error reply_error;
+ struct binder_extended_error ee;
wait_queue_head_t wait;
struct binder_stats stats;
atomic_t tmp_ref;
@@ -515,6 +518,7 @@ struct binder_thread {
* @fixup_entry: list entry
* @file: struct file to be associated with new fd
* @offset: offset in buffer data to this fixup
+ * @target_fd: fd to use by the target to install @file
*
* List element for fd fixups in a transaction. Since file
* descriptors need to be allocated in the context of the
@@ -525,6 +529,7 @@ struct binder_txn_fd_fixup {
struct list_head fixup_entry;
struct file *file;
size_t offset;
+ int target_fd;
};
struct binder_transaction {
diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c
index e3605cdd4335..6c5e94f6cb3a 100644
--- a/drivers/android/binderfs.c
+++ b/drivers/android/binderfs.c
@@ -60,6 +60,7 @@ enum binderfs_stats_mode {
struct binder_features {
bool oneway_spam_detection;
+ bool extended_error;
};
static const struct constant_table binderfs_param_stats[] = {
@@ -75,6 +76,7 @@ static const struct fs_parameter_spec binderfs_fs_parameters[] = {
static struct binder_features binder_features = {
.oneway_spam_detection = true,
+ .extended_error = true,
};
static inline struct binderfs_info *BINDERFS_SB(const struct super_block *sb)
@@ -615,6 +617,12 @@ static int init_binder_features(struct super_block *sb)
if (IS_ERR(dentry))
return PTR_ERR(dentry);
+ dentry = binderfs_create_file(dir, "extended_error",
+ &binder_features_fops,
+ &binder_features.extended_error);
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
+
return 0;
}
diff --git a/drivers/ata/pata_palmld.c b/drivers/ata/pata_palmld.c
index 2448441571ed..400e65190904 100644
--- a/drivers/ata/pata_palmld.c
+++ b/drivers/ata/pata_palmld.c
@@ -25,7 +25,6 @@
#include <linux/gpio/consumer.h>
#include <scsi/scsi_host.h>
-#include <mach/palmld.h>
#define DRV_NAME "pata_palmld"
@@ -63,7 +62,7 @@ static int palmld_pata_probe(struct platform_device *pdev)
return -ENOMEM;
/* remap drive's physical memory address */
- mem = devm_ioremap(dev, PALMLD_IDE_PHYS, 0x1000);
+ mem = devm_platform_ioremap_resource(pdev, 0);
if (!mem)
return -ENOMEM;
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index 02f7f1358e86..83217d243c25 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -25,6 +25,7 @@ obj-$(CONFIG_DEV_COREDUMP) += devcoredump.o
obj-$(CONFIG_GENERIC_MSI_IRQ_DOMAIN) += platform-msi.o
obj-$(CONFIG_GENERIC_ARCH_TOPOLOGY) += arch_topology.o
obj-$(CONFIG_GENERIC_ARCH_NUMA) += arch_numa.o
+obj-$(CONFIG_ACPI) += physical_location.o
obj-y += test/
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index f73b836047cf..579c851a2bd7 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -19,6 +19,9 @@
#include <linux/rcupdate.h>
#include <linux/sched.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/thermal_pressure.h>
+
static DEFINE_PER_CPU(struct scale_freq_data __rcu *, sft_data);
static struct cpumask scale_freq_counters_mask;
static bool scale_freq_invariant;
@@ -195,6 +198,8 @@ void topology_update_thermal_pressure(const struct cpumask *cpus,
th_pressure = max_capacity - capacity;
+ trace_thermal_pressure_update(cpu, th_pressure);
+
for_each_cpu(cpu, cpus)
WRITE_ONCE(per_cpu(thermal_pressure, cpu), th_pressure);
}
diff --git a/drivers/base/base.h b/drivers/base/base.h
index 2882af26392a..ab71403d102f 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -159,6 +159,7 @@ extern char *make_class_name(const char *name, struct kobject *kobj);
extern int devres_release_all(struct device *dev);
extern void device_block_probing(void);
extern void device_unblock_probing(void);
+extern void deferred_probe_extend_timeout(void);
/* /sys/devices directory */
extern struct kset *devices_kset;
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 97936ec49bde..7ca47e5b3c1f 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -617,7 +617,7 @@ int bus_add_driver(struct device_driver *drv)
if (drv->bus->p->drivers_autoprobe) {
error = driver_attach(drv);
if (error)
- goto out_unregister;
+ goto out_del_list;
}
module_add_driver(drv->owner, drv);
@@ -644,6 +644,8 @@ int bus_add_driver(struct device_driver *drv)
return 0;
+out_del_list:
+ klist_del(&priv->knode_bus);
out_unregister:
kobject_put(&priv->kobj);
/* drv->p is freed in driver_release() */
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 3d6430eb0c6a..7cd789c4985d 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -32,6 +32,7 @@
#include <linux/dma-map-ops.h> /* for dma_default_coherent */
#include "base.h"
+#include "physical_location.h"
#include "power/power.h"
#ifdef CONFIG_SYSFS_DEPRECATED
@@ -2649,8 +2650,17 @@ static int device_add_attrs(struct device *dev)
goto err_remove_dev_waiting_for_supplier;
}
+ if (dev_add_physical_location(dev)) {
+ error = device_add_group(dev,
+ &dev_attr_physical_location_group);
+ if (error)
+ goto err_remove_dev_removable;
+ }
+
return 0;
+ err_remove_dev_removable:
+ device_remove_file(dev, &dev_attr_removable);
err_remove_dev_waiting_for_supplier:
device_remove_file(dev, &dev_attr_waiting_for_supplier);
err_remove_dev_online:
@@ -2672,6 +2682,11 @@ static void device_remove_attrs(struct device *dev)
struct class *class = dev->class;
const struct device_type *type = dev->type;
+ if (dev->physical_location) {
+ device_remove_group(dev, &dev_attr_physical_location_group);
+ kfree(dev->physical_location);
+ }
+
device_remove_file(dev, &dev_attr_removable);
device_remove_file(dev, &dev_attr_waiting_for_supplier);
device_remove_file(dev, &dev_attr_online);
@@ -2864,9 +2879,6 @@ void device_initialize(struct device *dev)
kobject_init(&dev->kobj, &device_ktype);
INIT_LIST_HEAD(&dev->dma_pools);
mutex_init(&dev->mutex);
-#ifdef CONFIG_PROVE_LOCKING
- mutex_init(&dev->lockdep_mutex);
-#endif
lockdep_set_novalidate_class(&dev->mutex);
spin_lock_init(&dev->devres_lock);
INIT_LIST_HEAD(&dev->devres_head);
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 3fc3b5940bb3..11b0fb6414d3 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -60,6 +60,7 @@ static bool initcalls_done;
/* Save the async probe drivers' name from kernel cmdline */
#define ASYNC_DRV_NAMES_MAX_LEN 256
static char async_probe_drv_names[ASYNC_DRV_NAMES_MAX_LEN];
+static bool async_probe_default;
/*
* In some cases, like suspend to RAM or hibernation, It might be reasonable
@@ -257,7 +258,6 @@ DEFINE_SHOW_ATTRIBUTE(deferred_devs);
int driver_deferred_probe_timeout;
EXPORT_SYMBOL_GPL(driver_deferred_probe_timeout);
-static DECLARE_WAIT_QUEUE_HEAD(probe_timeout_waitqueue);
static int __init deferred_probe_timeout_setup(char *str)
{
@@ -274,10 +274,10 @@ __setup("deferred_probe_timeout=", deferred_probe_timeout_setup);
* @dev: device to check
*
* Return:
- * -ENODEV if initcalls have completed and modules are disabled.
- * -ETIMEDOUT if the deferred probe timeout was set and has expired
- * and modules are enabled.
- * -EPROBE_DEFER in other cases.
+ * * -ENODEV if initcalls have completed and modules are disabled.
+ * * -ETIMEDOUT if the deferred probe timeout was set and has expired
+ * and modules are enabled.
+ * * -EPROBE_DEFER in other cases.
*
* Drivers or subsystems can opt-in to calling this function instead of directly
* returning -EPROBE_DEFER.
@@ -312,10 +312,23 @@ static void deferred_probe_timeout_work_func(struct work_struct *work)
list_for_each_entry(p, &deferred_probe_pending_list, deferred_probe)
dev_info(p->device, "deferred probe pending\n");
mutex_unlock(&deferred_probe_mutex);
- wake_up_all(&probe_timeout_waitqueue);
}
static DECLARE_DELAYED_WORK(deferred_probe_timeout_work, deferred_probe_timeout_work_func);
+void deferred_probe_extend_timeout(void)
+{
+ /*
+ * If the work hasn't been queued yet or if the work expired, don't
+ * start a new one.
+ */
+ if (cancel_delayed_work(&deferred_probe_timeout_work)) {
+ schedule_delayed_work(&deferred_probe_timeout_work,
+ driver_deferred_probe_timeout * HZ);
+ pr_debug("Extended deferred probe timeout by %d secs\n",
+ driver_deferred_probe_timeout);
+ }
+}
+
/**
* deferred_probe_initcall() - Enable probing of deferred devices
*
@@ -671,6 +684,8 @@ sysfs_failed:
if (dev->bus)
blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
BUS_NOTIFY_DRIVER_NOT_BOUND, dev);
+ if (dev->bus && dev->bus->dma_cleanup)
+ dev->bus->dma_cleanup(dev);
pinctrl_bind_failed:
device_links_no_driver(dev);
device_unbind_cleanup(dev);
@@ -716,9 +731,6 @@ int driver_probe_done(void)
*/
void wait_for_device_probe(void)
{
- /* wait for probe timeout */
- wait_event(probe_timeout_waitqueue, !driver_deferred_probe_timeout);
-
/* wait for the deferred probe workqueue to finish */
flush_work(&deferred_probe_work);
@@ -797,7 +809,11 @@ static int driver_probe_device(struct device_driver *drv, struct device *dev)
static inline bool cmdline_requested_async_probing(const char *drv_name)
{
- return parse_option_str(async_probe_drv_names, drv_name);
+ bool async_drv;
+
+ async_drv = parse_option_str(async_probe_drv_names, drv_name);
+
+ return (async_probe_default != async_drv);
}
/* The option format is "driver_async_probe=drv_name1,drv_name2,..." */
@@ -807,6 +823,8 @@ static int __init save_async_options(char *buf)
pr_warn("Too long list of driver names for 'driver_async_probe'!\n");
strlcpy(async_probe_drv_names, buf, ASYNC_DRV_NAMES_MAX_LEN);
+ async_probe_default = parse_option_str(async_probe_drv_names, "*");
+
return 1;
}
__setup("driver_async_probe=", save_async_options);
@@ -941,6 +959,7 @@ out_unlock:
static int __device_attach(struct device *dev, bool allow_async)
{
int ret = 0;
+ bool async = false;
device_lock(dev);
if (dev->p->dead) {
@@ -979,7 +998,7 @@ static int __device_attach(struct device *dev, bool allow_async)
*/
dev_dbg(dev, "scheduling asynchronous probe\n");
get_device(dev);
- async_schedule_dev(__device_attach_async_helper, dev);
+ async = true;
} else {
pm_request_idle(dev);
}
@@ -989,6 +1008,8 @@ static int __device_attach(struct device *dev, bool allow_async)
}
out_unlock:
device_unlock(dev);
+ if (async)
+ async_schedule_dev(__device_attach_async_helper, dev);
return ret;
}
@@ -1082,6 +1103,7 @@ static void __driver_attach_async_helper(void *_dev, async_cookie_t cookie)
__device_driver_lock(dev, dev->parent);
drv = dev->p->async_driver;
+ dev->p->async_driver = NULL;
ret = driver_probe_device(drv, dev);
__device_driver_unlock(dev, dev->parent);
@@ -1128,7 +1150,7 @@ static int __driver_attach(struct device *dev, void *data)
*/
dev_dbg(dev, "probing driver %s asynchronously\n", drv->name);
device_lock(dev);
- if (!dev->driver) {
+ if (!dev->driver && !dev->p->async_driver) {
get_device(dev);
dev->p->async_driver = drv;
async_schedule_dev(__driver_attach_async_helper, dev);
@@ -1199,6 +1221,9 @@ static void __device_release_driver(struct device *dev, struct device *parent)
device_remove(dev);
+ if (dev->bus && dev->bus->dma_cleanup)
+ dev->bus->dma_cleanup(dev);
+
device_links_driver_cleanup(dev);
device_unbind_cleanup(dev);
diff --git a/drivers/base/driver.c b/drivers/base/driver.c
index 8c0d33e182fd..15a75afe6b84 100644
--- a/drivers/base/driver.c
+++ b/drivers/base/driver.c
@@ -31,6 +31,75 @@ static struct device *next_device(struct klist_iter *i)
}
/**
+ * driver_set_override() - Helper to set or clear driver override.
+ * @dev: Device to change
+ * @override: Address of string to change (e.g. &device->driver_override);
+ * The contents will be freed and hold newly allocated override.
+ * @s: NUL-terminated string, new driver name to force a match, pass empty
+ * string to clear it ("" or "\n", where the latter is only for sysfs
+ * interface).
+ * @len: length of @s
+ *
+ * Helper to set or clear driver override in a device, intended for the cases
+ * when the driver_override field is allocated by driver/bus code.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int driver_set_override(struct device *dev, const char **override,
+ const char *s, size_t len)
+{
+ const char *new, *old;
+ char *cp;
+
+ if (!override || !s)
+ return -EINVAL;
+
+ /*
+ * The stored value will be used in sysfs show callback (sysfs_emit()),
+ * which has a length limit of PAGE_SIZE and adds a trailing newline.
+ * Thus we can store one character less to avoid truncation during sysfs
+ * show.
+ */
+ if (len >= (PAGE_SIZE - 1))
+ return -EINVAL;
+
+ if (!len) {
+ /* Empty string passed - clear override */
+ device_lock(dev);
+ old = *override;
+ *override = NULL;
+ device_unlock(dev);
+ kfree(old);
+
+ return 0;
+ }
+
+ cp = strnchr(s, len, '\n');
+ if (cp)
+ len = cp - s;
+
+ new = kstrndup(s, len, GFP_KERNEL);
+ if (!new)
+ return -ENOMEM;
+
+ device_lock(dev);
+ old = *override;
+ if (cp != s) {
+ *override = new;
+ } else {
+ /* "\n" passed - clear override */
+ kfree(new);
+ *override = NULL;
+ }
+ device_unlock(dev);
+
+ kfree(old);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(driver_set_override);
+
+/**
* driver_for_each_device - Iterator for devices bound to a driver.
* @drv: Driver we're iterating.
* @start: Device to begin with
@@ -177,6 +246,7 @@ int driver_register(struct device_driver *drv)
return ret;
}
kobject_uevent(&drv->p->kobj, KOBJ_ADD);
+ deferred_probe_extend_timeout();
return ret;
}
diff --git a/drivers/base/firmware_loader/Kconfig b/drivers/base/firmware_loader/Kconfig
index 38f3b66bf52b..5166b323a0f8 100644
--- a/drivers/base/firmware_loader/Kconfig
+++ b/drivers/base/firmware_loader/Kconfig
@@ -29,6 +29,9 @@ if FW_LOADER
config FW_LOADER_PAGED_BUF
bool
+config FW_LOADER_SYSFS
+ bool
+
config EXTRA_FIRMWARE
string "Build named firmware blobs into the kernel binary"
help
@@ -72,6 +75,7 @@ config EXTRA_FIRMWARE_DIR
config FW_LOADER_USER_HELPER
bool "Enable the firmware sysfs fallback mechanism"
+ select FW_LOADER_SYSFS
select FW_LOADER_PAGED_BUF
help
This option enables a sysfs loading facility to enable firmware
@@ -159,21 +163,34 @@ config FW_LOADER_USER_HELPER_FALLBACK
config FW_LOADER_COMPRESS
bool "Enable compressed firmware support"
- select FW_LOADER_PAGED_BUF
- select XZ_DEC
help
This option enables the support for loading compressed firmware
files. The caller of firmware API receives the decompressed file
content. The compressed file is loaded as a fallback, only after
loading the raw file failed at first.
- Currently only XZ-compressed files are supported, and they have to
- be compressed with either none or crc32 integrity check type (pass
- "-C crc32" option to xz command).
-
Compressed firmware support does not apply to firmware images
that are built into the kernel image (CONFIG_EXTRA_FIRMWARE).
+if FW_LOADER_COMPRESS
+config FW_LOADER_COMPRESS_XZ
+ bool "Enable XZ-compressed firmware support"
+ select FW_LOADER_PAGED_BUF
+ select XZ_DEC
+ default y
+ help
+ This option adds the support for XZ-compressed files.
+ The files have to be compressed with either none or crc32
+ integrity check type (pass "-C crc32" option to xz command).
+
+config FW_LOADER_COMPRESS_ZSTD
+ bool "Enable ZSTD-compressed firmware support"
+ select ZSTD_DECOMPRESS
+ help
+ This option adds the support for ZSTD-compressed files.
+
+endif # FW_LOADER_COMPRESS
+
config FW_CACHE
bool "Enable firmware caching during suspend"
depends on PM_SLEEP
@@ -186,5 +203,19 @@ config FW_CACHE
If unsure, say Y.
+config FW_UPLOAD
+ bool "Enable users to initiate firmware updates using sysfs"
+ select FW_LOADER_SYSFS
+ select FW_LOADER_PAGED_BUF
+ help
+ Enabling this option will allow device drivers to expose a persistent
+ sysfs interface that allows firmware updates to be initiated from
+ userspace. For example, FPGA based PCIe cards load firmware and FPGA
+ images from local FLASH when the card boots. The images in FLASH may
+ be updated with new images provided by the user. Enable this device
+ to support cards that rely on user-initiated updates for firmware files.
+
+ If unsure, say N.
+
endif # FW_LOADER
endmenu
diff --git a/drivers/base/firmware_loader/Makefile b/drivers/base/firmware_loader/Makefile
index e87843408fe6..60d19f9e0ddc 100644
--- a/drivers/base/firmware_loader/Makefile
+++ b/drivers/base/firmware_loader/Makefile
@@ -6,5 +6,7 @@ obj-$(CONFIG_FW_LOADER) += firmware_class.o
firmware_class-objs := main.o
firmware_class-$(CONFIG_FW_LOADER_USER_HELPER) += fallback.o
firmware_class-$(CONFIG_EFI_EMBEDDED_FIRMWARE) += fallback_platform.o
+firmware_class-$(CONFIG_FW_LOADER_SYSFS) += sysfs.o
+firmware_class-$(CONFIG_FW_UPLOAD) += sysfs_upload.o
obj-y += builtin/
diff --git a/drivers/base/firmware_loader/fallback.c b/drivers/base/firmware_loader/fallback.c
index 4afb0e9312c0..bf68e3947814 100644
--- a/drivers/base/firmware_loader/fallback.c
+++ b/drivers/base/firmware_loader/fallback.c
@@ -3,12 +3,9 @@
#include <linux/types.h>
#include <linux/kconfig.h>
#include <linux/list.h>
-#include <linux/slab.h>
#include <linux/security.h>
-#include <linux/highmem.h>
#include <linux/umh.h>
#include <linux/sysctl.h>
-#include <linux/vmalloc.h>
#include <linux/module.h>
#include "fallback.h"
@@ -18,22 +15,6 @@
* firmware fallback mechanism
*/
-MODULE_IMPORT_NS(FIRMWARE_LOADER_PRIVATE);
-
-extern struct firmware_fallback_config fw_fallback_config;
-
-/* These getters are vetted to use int properly */
-static inline int __firmware_loading_timeout(void)
-{
- return fw_fallback_config.loading_timeout;
-}
-
-/* These setters are vetted to use int properly */
-static void __fw_fallback_set_timeout(int timeout)
-{
- fw_fallback_config.loading_timeout = timeout;
-}
-
/*
* use small loading timeout for caching devices' firmware because all these
* firmware images have been loaded successfully at lease once, also system is
@@ -58,52 +39,11 @@ static long firmware_loading_timeout(void)
__firmware_loading_timeout() * HZ : MAX_JIFFY_OFFSET;
}
-static inline bool fw_sysfs_done(struct fw_priv *fw_priv)
-{
- return __fw_state_check(fw_priv, FW_STATUS_DONE);
-}
-
-static inline bool fw_sysfs_loading(struct fw_priv *fw_priv)
-{
- return __fw_state_check(fw_priv, FW_STATUS_LOADING);
-}
-
static inline int fw_sysfs_wait_timeout(struct fw_priv *fw_priv, long timeout)
{
return __fw_state_wait_common(fw_priv, timeout);
}
-struct fw_sysfs {
- bool nowait;
- struct device dev;
- struct fw_priv *fw_priv;
- struct firmware *fw;
-};
-
-static struct fw_sysfs *to_fw_sysfs(struct device *dev)
-{
- return container_of(dev, struct fw_sysfs, dev);
-}
-
-static void __fw_load_abort(struct fw_priv *fw_priv)
-{
- /*
- * There is a small window in which user can write to 'loading'
- * between loading done/aborted and disappearance of 'loading'
- */
- if (fw_state_is_aborted(fw_priv) || fw_sysfs_done(fw_priv))
- return;
-
- fw_state_aborted(fw_priv);
-}
-
-static void fw_load_abort(struct fw_sysfs *fw_sysfs)
-{
- struct fw_priv *fw_priv = fw_sysfs->fw_priv;
-
- __fw_load_abort(fw_priv);
-}
-
static LIST_HEAD(pending_fw_head);
void kill_pending_fw_fallback_reqs(bool only_kill_custom)
@@ -120,376 +60,6 @@ void kill_pending_fw_fallback_reqs(bool only_kill_custom)
mutex_unlock(&fw_lock);
}
-static ssize_t timeout_show(struct class *class, struct class_attribute *attr,
- char *buf)
-{
- return sysfs_emit(buf, "%d\n", __firmware_loading_timeout());
-}
-
-/**
- * timeout_store() - set number of seconds to wait for firmware
- * @class: device class pointer
- * @attr: device attribute pointer
- * @buf: buffer to scan for timeout value
- * @count: number of bytes in @buf
- *
- * Sets the number of seconds to wait for the firmware. Once
- * this expires an error will be returned to the driver and no
- * firmware will be provided.
- *
- * Note: zero means 'wait forever'.
- **/
-static ssize_t timeout_store(struct class *class, struct class_attribute *attr,
- const char *buf, size_t count)
-{
- int tmp_loading_timeout = simple_strtol(buf, NULL, 10);
-
- if (tmp_loading_timeout < 0)
- tmp_loading_timeout = 0;
-
- __fw_fallback_set_timeout(tmp_loading_timeout);
-
- return count;
-}
-static CLASS_ATTR_RW(timeout);
-
-static struct attribute *firmware_class_attrs[] = {
- &class_attr_timeout.attr,
- NULL,
-};
-ATTRIBUTE_GROUPS(firmware_class);
-
-static void fw_dev_release(struct device *dev)
-{
- struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
-
- kfree(fw_sysfs);
-}
-
-static int do_firmware_uevent(struct fw_sysfs *fw_sysfs, struct kobj_uevent_env *env)
-{
- if (add_uevent_var(env, "FIRMWARE=%s", fw_sysfs->fw_priv->fw_name))
- return -ENOMEM;
- if (add_uevent_var(env, "TIMEOUT=%i", __firmware_loading_timeout()))
- return -ENOMEM;
- if (add_uevent_var(env, "ASYNC=%d", fw_sysfs->nowait))
- return -ENOMEM;
-
- return 0;
-}
-
-static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
-{
- struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
- int err = 0;
-
- mutex_lock(&fw_lock);
- if (fw_sysfs->fw_priv)
- err = do_firmware_uevent(fw_sysfs, env);
- mutex_unlock(&fw_lock);
- return err;
-}
-
-static struct class firmware_class = {
- .name = "firmware",
- .class_groups = firmware_class_groups,
- .dev_uevent = firmware_uevent,
- .dev_release = fw_dev_release,
-};
-
-int register_sysfs_loader(void)
-{
- int ret = class_register(&firmware_class);
-
- if (ret != 0)
- return ret;
- return register_firmware_config_sysctl();
-}
-
-void unregister_sysfs_loader(void)
-{
- unregister_firmware_config_sysctl();
- class_unregister(&firmware_class);
-}
-
-static ssize_t firmware_loading_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
- int loading = 0;
-
- mutex_lock(&fw_lock);
- if (fw_sysfs->fw_priv)
- loading = fw_sysfs_loading(fw_sysfs->fw_priv);
- mutex_unlock(&fw_lock);
-
- return sysfs_emit(buf, "%d\n", loading);
-}
-
-/**
- * firmware_loading_store() - set value in the 'loading' control file
- * @dev: device pointer
- * @attr: device attribute pointer
- * @buf: buffer to scan for loading control value
- * @count: number of bytes in @buf
- *
- * The relevant values are:
- *
- * 1: Start a load, discarding any previous partial load.
- * 0: Conclude the load and hand the data to the driver code.
- * -1: Conclude the load with an error and discard any written data.
- **/
-static ssize_t firmware_loading_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
- struct fw_priv *fw_priv;
- ssize_t written = count;
- int loading = simple_strtol(buf, NULL, 10);
-
- mutex_lock(&fw_lock);
- fw_priv = fw_sysfs->fw_priv;
- if (fw_state_is_aborted(fw_priv))
- goto out;
-
- switch (loading) {
- case 1:
- /* discarding any previous partial load */
- if (!fw_sysfs_done(fw_priv)) {
- fw_free_paged_buf(fw_priv);
- fw_state_start(fw_priv);
- }
- break;
- case 0:
- if (fw_sysfs_loading(fw_priv)) {
- int rc;
-
- /*
- * Several loading requests may be pending on
- * one same firmware buf, so let all requests
- * see the mapped 'buf->data' once the loading
- * is completed.
- * */
- rc = fw_map_paged_buf(fw_priv);
- if (rc)
- dev_err(dev, "%s: map pages failed\n",
- __func__);
- else
- rc = security_kernel_post_load_data(fw_priv->data,
- fw_priv->size,
- LOADING_FIRMWARE, "blob");
-
- /*
- * Same logic as fw_load_abort, only the DONE bit
- * is ignored and we set ABORT only on failure.
- */
- if (rc) {
- fw_state_aborted(fw_priv);
- written = rc;
- } else {
- fw_state_done(fw_priv);
- }
- break;
- }
- fallthrough;
- default:
- dev_err(dev, "%s: unexpected value (%d)\n", __func__, loading);
- fallthrough;
- case -1:
- fw_load_abort(fw_sysfs);
- break;
- }
-out:
- mutex_unlock(&fw_lock);
- return written;
-}
-
-static DEVICE_ATTR(loading, 0644, firmware_loading_show, firmware_loading_store);
-
-static void firmware_rw_data(struct fw_priv *fw_priv, char *buffer,
- loff_t offset, size_t count, bool read)
-{
- if (read)
- memcpy(buffer, fw_priv->data + offset, count);
- else
- memcpy(fw_priv->data + offset, buffer, count);
-}
-
-static void firmware_rw(struct fw_priv *fw_priv, char *buffer,
- loff_t offset, size_t count, bool read)
-{
- while (count) {
- void *page_data;
- int page_nr = offset >> PAGE_SHIFT;
- int page_ofs = offset & (PAGE_SIZE-1);
- int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count);
-
- page_data = kmap(fw_priv->pages[page_nr]);
-
- if (read)
- memcpy(buffer, page_data + page_ofs, page_cnt);
- else
- memcpy(page_data + page_ofs, buffer, page_cnt);
-
- kunmap(fw_priv->pages[page_nr]);
- buffer += page_cnt;
- offset += page_cnt;
- count -= page_cnt;
- }
-}
-
-static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buffer, loff_t offset, size_t count)
-{
- struct device *dev = kobj_to_dev(kobj);
- struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
- struct fw_priv *fw_priv;
- ssize_t ret_count;
-
- mutex_lock(&fw_lock);
- fw_priv = fw_sysfs->fw_priv;
- if (!fw_priv || fw_sysfs_done(fw_priv)) {
- ret_count = -ENODEV;
- goto out;
- }
- if (offset > fw_priv->size) {
- ret_count = 0;
- goto out;
- }
- if (count > fw_priv->size - offset)
- count = fw_priv->size - offset;
-
- ret_count = count;
-
- if (fw_priv->data)
- firmware_rw_data(fw_priv, buffer, offset, count, true);
- else
- firmware_rw(fw_priv, buffer, offset, count, true);
-
-out:
- mutex_unlock(&fw_lock);
- return ret_count;
-}
-
-static int fw_realloc_pages(struct fw_sysfs *fw_sysfs, int min_size)
-{
- int err;
-
- err = fw_grow_paged_buf(fw_sysfs->fw_priv,
- PAGE_ALIGN(min_size) >> PAGE_SHIFT);
- if (err)
- fw_load_abort(fw_sysfs);
- return err;
-}
-
-/**
- * firmware_data_write() - write method for firmware
- * @filp: open sysfs file
- * @kobj: kobject for the device
- * @bin_attr: bin_attr structure
- * @buffer: buffer being written
- * @offset: buffer offset for write in total data store area
- * @count: buffer size
- *
- * Data written to the 'data' attribute will be later handed to
- * the driver as a firmware image.
- **/
-static ssize_t firmware_data_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buffer, loff_t offset, size_t count)
-{
- struct device *dev = kobj_to_dev(kobj);
- struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
- struct fw_priv *fw_priv;
- ssize_t retval;
-
- if (!capable(CAP_SYS_RAWIO))
- return -EPERM;
-
- mutex_lock(&fw_lock);
- fw_priv = fw_sysfs->fw_priv;
- if (!fw_priv || fw_sysfs_done(fw_priv)) {
- retval = -ENODEV;
- goto out;
- }
-
- if (fw_priv->data) {
- if (offset + count > fw_priv->allocated_size) {
- retval = -ENOMEM;
- goto out;
- }
- firmware_rw_data(fw_priv, buffer, offset, count, false);
- retval = count;
- } else {
- retval = fw_realloc_pages(fw_sysfs, offset + count);
- if (retval)
- goto out;
-
- retval = count;
- firmware_rw(fw_priv, buffer, offset, count, false);
- }
-
- fw_priv->size = max_t(size_t, offset + count, fw_priv->size);
-out:
- mutex_unlock(&fw_lock);
- return retval;
-}
-
-static struct bin_attribute firmware_attr_data = {
- .attr = { .name = "data", .mode = 0644 },
- .size = 0,
- .read = firmware_data_read,
- .write = firmware_data_write,
-};
-
-static struct attribute *fw_dev_attrs[] = {
- &dev_attr_loading.attr,
- NULL
-};
-
-static struct bin_attribute *fw_dev_bin_attrs[] = {
- &firmware_attr_data,
- NULL
-};
-
-static const struct attribute_group fw_dev_attr_group = {
- .attrs = fw_dev_attrs,
- .bin_attrs = fw_dev_bin_attrs,
-};
-
-static const struct attribute_group *fw_dev_attr_groups[] = {
- &fw_dev_attr_group,
- NULL
-};
-
-static struct fw_sysfs *
-fw_create_instance(struct firmware *firmware, const char *fw_name,
- struct device *device, u32 opt_flags)
-{
- struct fw_sysfs *fw_sysfs;
- struct device *f_dev;
-
- fw_sysfs = kzalloc(sizeof(*fw_sysfs), GFP_KERNEL);
- if (!fw_sysfs) {
- fw_sysfs = ERR_PTR(-ENOMEM);
- goto exit;
- }
-
- fw_sysfs->nowait = !!(opt_flags & FW_OPT_NOWAIT);
- fw_sysfs->fw = firmware;
- f_dev = &fw_sysfs->dev;
-
- device_initialize(f_dev);
- dev_set_name(f_dev, "%s", fw_name);
- f_dev->parent = device;
- f_dev->class = &firmware_class;
- f_dev->groups = fw_dev_attr_groups;
-exit:
- return fw_sysfs;
-}
-
/**
* fw_load_sysfs_fallback() - load a firmware via the sysfs fallback mechanism
* @fw_sysfs: firmware sysfs information for the firmware to load
diff --git a/drivers/base/firmware_loader/fallback.h b/drivers/base/firmware_loader/fallback.h
index 9f3055d3b4ca..144148595660 100644
--- a/drivers/base/firmware_loader/fallback.h
+++ b/drivers/base/firmware_loader/fallback.h
@@ -6,29 +6,7 @@
#include <linux/device.h>
#include "firmware.h"
-
-/**
- * struct firmware_fallback_config - firmware fallback configuration settings
- *
- * Helps describe and fine tune the fallback mechanism.
- *
- * @force_sysfs_fallback: force the sysfs fallback mechanism to be used
- * as if one had enabled CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y.
- * Useful to help debug a CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
- * functionality on a kernel where that config entry has been disabled.
- * @ignore_sysfs_fallback: force to disable the sysfs fallback mechanism.
- * This emulates the behaviour as if we had set the kernel
- * config CONFIG_FW_LOADER_USER_HELPER=n.
- * @old_timeout: for internal use
- * @loading_timeout: the timeout to wait for the fallback mechanism before
- * giving up, in seconds.
- */
-struct firmware_fallback_config {
- unsigned int force_sysfs_fallback;
- unsigned int ignore_sysfs_fallback;
- int old_timeout;
- int loading_timeout;
-};
+#include "sysfs.h"
#ifdef CONFIG_FW_LOADER_USER_HELPER
int firmware_fallback_sysfs(struct firmware *fw, const char *name,
@@ -40,19 +18,6 @@ void kill_pending_fw_fallback_reqs(bool only_kill_custom);
void fw_fallback_set_cache_timeout(void);
void fw_fallback_set_default_timeout(void);
-int register_sysfs_loader(void);
-void unregister_sysfs_loader(void);
-#ifdef CONFIG_SYSCTL
-extern int register_firmware_config_sysctl(void);
-extern void unregister_firmware_config_sysctl(void);
-#else
-static inline int register_firmware_config_sysctl(void)
-{
- return 0;
-}
-static inline void unregister_firmware_config_sysctl(void) { }
-#endif /* CONFIG_SYSCTL */
-
#else /* CONFIG_FW_LOADER_USER_HELPER */
static inline int firmware_fallback_sysfs(struct firmware *fw, const char *name,
struct device *device,
@@ -66,15 +31,6 @@ static inline int firmware_fallback_sysfs(struct firmware *fw, const char *name,
static inline void kill_pending_fw_fallback_reqs(bool only_kill_custom) { }
static inline void fw_fallback_set_cache_timeout(void) { }
static inline void fw_fallback_set_default_timeout(void) { }
-
-static inline int register_sysfs_loader(void)
-{
- return 0;
-}
-
-static inline void unregister_sysfs_loader(void)
-{
-}
#endif /* CONFIG_FW_LOADER_USER_HELPER */
#ifdef CONFIG_EFI_EMBEDDED_FIRMWARE
diff --git a/drivers/base/firmware_loader/firmware.h b/drivers/base/firmware_loader/firmware.h
index 2889f446ad41..fe77e91c38a2 100644
--- a/drivers/base/firmware_loader/firmware.h
+++ b/drivers/base/firmware_loader/firmware.h
@@ -87,6 +87,7 @@ struct fw_priv {
};
extern struct mutex fw_lock;
+extern struct firmware_cache fw_cache;
static inline bool __fw_state_check(struct fw_priv *fw_priv,
enum fw_status status)
@@ -149,7 +150,22 @@ static inline void fw_state_done(struct fw_priv *fw_priv)
__fw_state_set(fw_priv, FW_STATUS_DONE);
}
+static inline bool fw_state_is_done(struct fw_priv *fw_priv)
+{
+ return __fw_state_check(fw_priv, FW_STATUS_DONE);
+}
+
+static inline bool fw_state_is_loading(struct fw_priv *fw_priv)
+{
+ return __fw_state_check(fw_priv, FW_STATUS_LOADING);
+}
+
+int alloc_lookup_fw_priv(const char *fw_name, struct firmware_cache *fwc,
+ struct fw_priv **fw_priv, void *dbuf, size_t size,
+ size_t offset, u32 opt_flags);
int assign_fw(struct firmware *fw, struct device *device);
+void free_fw_priv(struct fw_priv *fw_priv);
+void fw_state_init(struct fw_priv *fw_priv);
#ifdef CONFIG_FW_LOADER
bool firmware_is_builtin(const struct firmware *fw);
diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c
index 406a907a4cae..ac3f34e80194 100644
--- a/drivers/base/firmware_loader/main.c
+++ b/drivers/base/firmware_loader/main.c
@@ -35,6 +35,7 @@
#include <linux/syscore_ops.h>
#include <linux/reboot.h>
#include <linux/security.h>
+#include <linux/zstd.h>
#include <linux/xz.h>
#include <generated/utsrelease.h>
@@ -91,9 +92,9 @@ static inline struct fw_priv *to_fw_priv(struct kref *ref)
* guarding for corner cases a global lock should be OK */
DEFINE_MUTEX(fw_lock);
-static struct firmware_cache fw_cache;
+struct firmware_cache fw_cache;
-static void fw_state_init(struct fw_priv *fw_priv)
+void fw_state_init(struct fw_priv *fw_priv)
{
struct fw_state *fw_st = &fw_priv->fw_st;
@@ -163,13 +164,9 @@ static struct fw_priv *__lookup_fw_priv(const char *fw_name)
}
/* Returns 1 for batching firmware requests with the same name */
-static int alloc_lookup_fw_priv(const char *fw_name,
- struct firmware_cache *fwc,
- struct fw_priv **fw_priv,
- void *dbuf,
- size_t size,
- size_t offset,
- u32 opt_flags)
+int alloc_lookup_fw_priv(const char *fw_name, struct firmware_cache *fwc,
+ struct fw_priv **fw_priv, void *dbuf, size_t size,
+ size_t offset, u32 opt_flags)
{
struct fw_priv *tmp;
@@ -224,7 +221,7 @@ static void __free_fw_priv(struct kref *ref)
kfree(fw_priv);
}
-static void free_fw_priv(struct fw_priv *fw_priv)
+void free_fw_priv(struct fw_priv *fw_priv)
{
struct firmware_cache *fwc = fw_priv->fwc;
spin_lock(&fwc->lock);
@@ -253,6 +250,8 @@ void fw_free_paged_buf(struct fw_priv *fw_priv)
fw_priv->pages = NULL;
fw_priv->page_array_size = 0;
fw_priv->nr_pages = 0;
+ fw_priv->data = NULL;
+ fw_priv->size = 0;
}
int fw_grow_paged_buf(struct fw_priv *fw_priv, int pages_needed)
@@ -305,9 +304,73 @@ int fw_map_paged_buf(struct fw_priv *fw_priv)
#endif
/*
+ * ZSTD-compressed firmware support
+ */
+#ifdef CONFIG_FW_LOADER_COMPRESS_ZSTD
+static int fw_decompress_zstd(struct device *dev, struct fw_priv *fw_priv,
+ size_t in_size, const void *in_buffer)
+{
+ size_t len, out_size, workspace_size;
+ void *workspace, *out_buf;
+ zstd_dctx *ctx;
+ int err;
+
+ if (fw_priv->allocated_size) {
+ out_size = fw_priv->allocated_size;
+ out_buf = fw_priv->data;
+ } else {
+ zstd_frame_header params;
+
+ if (zstd_get_frame_header(&params, in_buffer, in_size) ||
+ params.frameContentSize == ZSTD_CONTENTSIZE_UNKNOWN) {
+ dev_dbg(dev, "%s: invalid zstd header\n", __func__);
+ return -EINVAL;
+ }
+ out_size = params.frameContentSize;
+ out_buf = vzalloc(out_size);
+ if (!out_buf)
+ return -ENOMEM;
+ }
+
+ workspace_size = zstd_dctx_workspace_bound();
+ workspace = kvzalloc(workspace_size, GFP_KERNEL);
+ if (!workspace) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ ctx = zstd_init_dctx(workspace, workspace_size);
+ if (!ctx) {
+ dev_dbg(dev, "%s: failed to initialize context\n", __func__);
+ err = -EINVAL;
+ goto error;
+ }
+
+ len = zstd_decompress_dctx(ctx, out_buf, out_size, in_buffer, in_size);
+ if (zstd_is_error(len)) {
+ dev_dbg(dev, "%s: failed to decompress: %d\n", __func__,
+ zstd_get_error_code(len));
+ err = -EINVAL;
+ goto error;
+ }
+
+ if (!fw_priv->allocated_size)
+ fw_priv->data = out_buf;
+ fw_priv->size = len;
+ err = 0;
+
+ error:
+ kvfree(workspace);
+ if (err && !fw_priv->allocated_size)
+ vfree(out_buf);
+ return err;
+}
+#endif /* CONFIG_FW_LOADER_COMPRESS_ZSTD */
+
+/*
* XZ-compressed firmware support
*/
-#ifdef CONFIG_FW_LOADER_COMPRESS
+#ifdef CONFIG_FW_LOADER_COMPRESS_XZ
/* show an error and return the standard error code */
static int fw_decompress_xz_error(struct device *dev, enum xz_ret xz_ret)
{
@@ -401,7 +464,7 @@ static int fw_decompress_xz(struct device *dev, struct fw_priv *fw_priv,
else
return fw_decompress_xz_pages(dev, fw_priv, in_size, in_buffer);
}
-#endif /* CONFIG_FW_LOADER_COMPRESS */
+#endif /* CONFIG_FW_LOADER_COMPRESS_XZ */
/* direct firmware loading support */
static char fw_path_para[256];
@@ -771,7 +834,12 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
if (!(opt_flags & FW_OPT_PARTIAL))
nondirect = true;
-#ifdef CONFIG_FW_LOADER_COMPRESS
+#ifdef CONFIG_FW_LOADER_COMPRESS_ZSTD
+ if (ret == -ENOENT && nondirect)
+ ret = fw_get_filesystem_firmware(device, fw->priv, ".zst",
+ fw_decompress_zstd);
+#endif
+#ifdef CONFIG_FW_LOADER_COMPRESS_XZ
if (ret == -ENOENT && nondirect)
ret = fw_get_filesystem_firmware(device, fw->priv, ".xz",
fw_decompress_xz);
diff --git a/drivers/base/firmware_loader/sysfs.c b/drivers/base/firmware_loader/sysfs.c
new file mode 100644
index 000000000000..5b0b85b70b6f
--- /dev/null
+++ b/drivers/base/firmware_loader/sysfs.c
@@ -0,0 +1,422 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/highmem.h>
+#include <linux/module.h>
+#include <linux/security.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include "sysfs.h"
+
+/*
+ * sysfs support for firmware loader
+ */
+
+void __fw_load_abort(struct fw_priv *fw_priv)
+{
+ /*
+ * There is a small window in which user can write to 'loading'
+ * between loading done/aborted and disappearance of 'loading'
+ */
+ if (fw_state_is_aborted(fw_priv) || fw_state_is_done(fw_priv))
+ return;
+
+ fw_state_aborted(fw_priv);
+}
+
+#ifdef CONFIG_FW_LOADER_USER_HELPER
+static ssize_t timeout_show(struct class *class, struct class_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%d\n", __firmware_loading_timeout());
+}
+
+/**
+ * timeout_store() - set number of seconds to wait for firmware
+ * @class: device class pointer
+ * @attr: device attribute pointer
+ * @buf: buffer to scan for timeout value
+ * @count: number of bytes in @buf
+ *
+ * Sets the number of seconds to wait for the firmware. Once
+ * this expires an error will be returned to the driver and no
+ * firmware will be provided.
+ *
+ * Note: zero means 'wait forever'.
+ **/
+static ssize_t timeout_store(struct class *class, struct class_attribute *attr,
+ const char *buf, size_t count)
+{
+ int tmp_loading_timeout = simple_strtol(buf, NULL, 10);
+
+ if (tmp_loading_timeout < 0)
+ tmp_loading_timeout = 0;
+
+ __fw_fallback_set_timeout(tmp_loading_timeout);
+
+ return count;
+}
+static CLASS_ATTR_RW(timeout);
+
+static struct attribute *firmware_class_attrs[] = {
+ &class_attr_timeout.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(firmware_class);
+
+static int do_firmware_uevent(struct fw_sysfs *fw_sysfs, struct kobj_uevent_env *env)
+{
+ if (add_uevent_var(env, "FIRMWARE=%s", fw_sysfs->fw_priv->fw_name))
+ return -ENOMEM;
+ if (add_uevent_var(env, "TIMEOUT=%i", __firmware_loading_timeout()))
+ return -ENOMEM;
+ if (add_uevent_var(env, "ASYNC=%d", fw_sysfs->nowait))
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
+ int err = 0;
+
+ mutex_lock(&fw_lock);
+ if (fw_sysfs->fw_priv)
+ err = do_firmware_uevent(fw_sysfs, env);
+ mutex_unlock(&fw_lock);
+ return err;
+}
+#endif /* CONFIG_FW_LOADER_USER_HELPER */
+
+static void fw_dev_release(struct device *dev)
+{
+ struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
+
+ if (fw_sysfs->fw_upload_priv) {
+ free_fw_priv(fw_sysfs->fw_priv);
+ kfree(fw_sysfs->fw_upload_priv);
+ }
+ kfree(fw_sysfs);
+}
+
+static struct class firmware_class = {
+ .name = "firmware",
+#ifdef CONFIG_FW_LOADER_USER_HELPER
+ .class_groups = firmware_class_groups,
+ .dev_uevent = firmware_uevent,
+#endif
+ .dev_release = fw_dev_release,
+};
+
+int register_sysfs_loader(void)
+{
+ int ret = class_register(&firmware_class);
+
+ if (ret != 0)
+ return ret;
+ return register_firmware_config_sysctl();
+}
+
+void unregister_sysfs_loader(void)
+{
+ unregister_firmware_config_sysctl();
+ class_unregister(&firmware_class);
+}
+
+static ssize_t firmware_loading_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
+ int loading = 0;
+
+ mutex_lock(&fw_lock);
+ if (fw_sysfs->fw_priv)
+ loading = fw_state_is_loading(fw_sysfs->fw_priv);
+ mutex_unlock(&fw_lock);
+
+ return sysfs_emit(buf, "%d\n", loading);
+}
+
+/**
+ * firmware_loading_store() - set value in the 'loading' control file
+ * @dev: device pointer
+ * @attr: device attribute pointer
+ * @buf: buffer to scan for loading control value
+ * @count: number of bytes in @buf
+ *
+ * The relevant values are:
+ *
+ * 1: Start a load, discarding any previous partial load.
+ * 0: Conclude the load and hand the data to the driver code.
+ * -1: Conclude the load with an error and discard any written data.
+ **/
+static ssize_t firmware_loading_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
+ struct fw_priv *fw_priv;
+ ssize_t written = count;
+ int loading = simple_strtol(buf, NULL, 10);
+
+ mutex_lock(&fw_lock);
+ fw_priv = fw_sysfs->fw_priv;
+ if (fw_state_is_aborted(fw_priv) || fw_state_is_done(fw_priv))
+ goto out;
+
+ switch (loading) {
+ case 1:
+ /* discarding any previous partial load */
+ fw_free_paged_buf(fw_priv);
+ fw_state_start(fw_priv);
+ break;
+ case 0:
+ if (fw_state_is_loading(fw_priv)) {
+ int rc;
+
+ /*
+ * Several loading requests may be pending on
+ * one same firmware buf, so let all requests
+ * see the mapped 'buf->data' once the loading
+ * is completed.
+ */
+ rc = fw_map_paged_buf(fw_priv);
+ if (rc)
+ dev_err(dev, "%s: map pages failed\n",
+ __func__);
+ else
+ rc = security_kernel_post_load_data(fw_priv->data,
+ fw_priv->size,
+ LOADING_FIRMWARE,
+ "blob");
+
+ /*
+ * Same logic as fw_load_abort, only the DONE bit
+ * is ignored and we set ABORT only on failure.
+ */
+ if (rc) {
+ fw_state_aborted(fw_priv);
+ written = rc;
+ } else {
+ fw_state_done(fw_priv);
+
+ /*
+ * If this is a user-initiated firmware upload
+ * then start the upload in a worker thread now.
+ */
+ rc = fw_upload_start(fw_sysfs);
+ if (rc)
+ written = rc;
+ }
+ break;
+ }
+ fallthrough;
+ default:
+ dev_err(dev, "%s: unexpected value (%d)\n", __func__, loading);
+ fallthrough;
+ case -1:
+ fw_load_abort(fw_sysfs);
+ if (fw_sysfs->fw_upload_priv)
+ fw_state_init(fw_sysfs->fw_priv);
+
+ break;
+ }
+out:
+ mutex_unlock(&fw_lock);
+ return written;
+}
+
+DEVICE_ATTR(loading, 0644, firmware_loading_show, firmware_loading_store);
+
+static void firmware_rw_data(struct fw_priv *fw_priv, char *buffer,
+ loff_t offset, size_t count, bool read)
+{
+ if (read)
+ memcpy(buffer, fw_priv->data + offset, count);
+ else
+ memcpy(fw_priv->data + offset, buffer, count);
+}
+
+static void firmware_rw(struct fw_priv *fw_priv, char *buffer,
+ loff_t offset, size_t count, bool read)
+{
+ while (count) {
+ void *page_data;
+ int page_nr = offset >> PAGE_SHIFT;
+ int page_ofs = offset & (PAGE_SIZE - 1);
+ int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count);
+
+ page_data = kmap(fw_priv->pages[page_nr]);
+
+ if (read)
+ memcpy(buffer, page_data + page_ofs, page_cnt);
+ else
+ memcpy(page_data + page_ofs, buffer, page_cnt);
+
+ kunmap(fw_priv->pages[page_nr]);
+ buffer += page_cnt;
+ offset += page_cnt;
+ count -= page_cnt;
+ }
+}
+
+static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buffer, loff_t offset, size_t count)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
+ struct fw_priv *fw_priv;
+ ssize_t ret_count;
+
+ mutex_lock(&fw_lock);
+ fw_priv = fw_sysfs->fw_priv;
+ if (!fw_priv || fw_state_is_done(fw_priv)) {
+ ret_count = -ENODEV;
+ goto out;
+ }
+ if (offset > fw_priv->size) {
+ ret_count = 0;
+ goto out;
+ }
+ if (count > fw_priv->size - offset)
+ count = fw_priv->size - offset;
+
+ ret_count = count;
+
+ if (fw_priv->data)
+ firmware_rw_data(fw_priv, buffer, offset, count, true);
+ else
+ firmware_rw(fw_priv, buffer, offset, count, true);
+
+out:
+ mutex_unlock(&fw_lock);
+ return ret_count;
+}
+
+static int fw_realloc_pages(struct fw_sysfs *fw_sysfs, int min_size)
+{
+ int err;
+
+ err = fw_grow_paged_buf(fw_sysfs->fw_priv,
+ PAGE_ALIGN(min_size) >> PAGE_SHIFT);
+ if (err)
+ fw_load_abort(fw_sysfs);
+ return err;
+}
+
+/**
+ * firmware_data_write() - write method for firmware
+ * @filp: open sysfs file
+ * @kobj: kobject for the device
+ * @bin_attr: bin_attr structure
+ * @buffer: buffer being written
+ * @offset: buffer offset for write in total data store area
+ * @count: buffer size
+ *
+ * Data written to the 'data' attribute will be later handed to
+ * the driver as a firmware image.
+ **/
+static ssize_t firmware_data_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buffer, loff_t offset, size_t count)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
+ struct fw_priv *fw_priv;
+ ssize_t retval;
+
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+
+ mutex_lock(&fw_lock);
+ fw_priv = fw_sysfs->fw_priv;
+ if (!fw_priv || fw_state_is_done(fw_priv)) {
+ retval = -ENODEV;
+ goto out;
+ }
+
+ if (fw_priv->data) {
+ if (offset + count > fw_priv->allocated_size) {
+ retval = -ENOMEM;
+ goto out;
+ }
+ firmware_rw_data(fw_priv, buffer, offset, count, false);
+ retval = count;
+ } else {
+ retval = fw_realloc_pages(fw_sysfs, offset + count);
+ if (retval)
+ goto out;
+
+ retval = count;
+ firmware_rw(fw_priv, buffer, offset, count, false);
+ }
+
+ fw_priv->size = max_t(size_t, offset + count, fw_priv->size);
+out:
+ mutex_unlock(&fw_lock);
+ return retval;
+}
+
+static struct bin_attribute firmware_attr_data = {
+ .attr = { .name = "data", .mode = 0644 },
+ .size = 0,
+ .read = firmware_data_read,
+ .write = firmware_data_write,
+};
+
+static struct attribute *fw_dev_attrs[] = {
+ &dev_attr_loading.attr,
+#ifdef CONFIG_FW_UPLOAD
+ &dev_attr_cancel.attr,
+ &dev_attr_status.attr,
+ &dev_attr_error.attr,
+ &dev_attr_remaining_size.attr,
+#endif
+ NULL
+};
+
+static struct bin_attribute *fw_dev_bin_attrs[] = {
+ &firmware_attr_data,
+ NULL
+};
+
+static const struct attribute_group fw_dev_attr_group = {
+ .attrs = fw_dev_attrs,
+ .bin_attrs = fw_dev_bin_attrs,
+#ifdef CONFIG_FW_UPLOAD
+ .is_visible = fw_upload_is_visible,
+#endif
+};
+
+static const struct attribute_group *fw_dev_attr_groups[] = {
+ &fw_dev_attr_group,
+ NULL
+};
+
+struct fw_sysfs *
+fw_create_instance(struct firmware *firmware, const char *fw_name,
+ struct device *device, u32 opt_flags)
+{
+ struct fw_sysfs *fw_sysfs;
+ struct device *f_dev;
+
+ fw_sysfs = kzalloc(sizeof(*fw_sysfs), GFP_KERNEL);
+ if (!fw_sysfs) {
+ fw_sysfs = ERR_PTR(-ENOMEM);
+ goto exit;
+ }
+
+ fw_sysfs->nowait = !!(opt_flags & FW_OPT_NOWAIT);
+ fw_sysfs->fw = firmware;
+ f_dev = &fw_sysfs->dev;
+
+ device_initialize(f_dev);
+ dev_set_name(f_dev, "%s", fw_name);
+ f_dev->parent = device;
+ f_dev->class = &firmware_class;
+ f_dev->groups = fw_dev_attr_groups;
+exit:
+ return fw_sysfs;
+}
diff --git a/drivers/base/firmware_loader/sysfs.h b/drivers/base/firmware_loader/sysfs.h
new file mode 100644
index 000000000000..5d8ff1675c79
--- /dev/null
+++ b/drivers/base/firmware_loader/sysfs.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __FIRMWARE_SYSFS_H
+#define __FIRMWARE_SYSFS_H
+
+#include <linux/device.h>
+
+#include "firmware.h"
+
+MODULE_IMPORT_NS(FIRMWARE_LOADER_PRIVATE);
+
+extern struct firmware_fallback_config fw_fallback_config;
+extern struct device_attribute dev_attr_loading;
+
+#ifdef CONFIG_FW_LOADER_USER_HELPER
+/**
+ * struct firmware_fallback_config - firmware fallback configuration settings
+ *
+ * Helps describe and fine tune the fallback mechanism.
+ *
+ * @force_sysfs_fallback: force the sysfs fallback mechanism to be used
+ * as if one had enabled CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y.
+ * Useful to help debug a CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
+ * functionality on a kernel where that config entry has been disabled.
+ * @ignore_sysfs_fallback: force to disable the sysfs fallback mechanism.
+ * This emulates the behaviour as if we had set the kernel
+ * config CONFIG_FW_LOADER_USER_HELPER=n.
+ * @old_timeout: for internal use
+ * @loading_timeout: the timeout to wait for the fallback mechanism before
+ * giving up, in seconds.
+ */
+struct firmware_fallback_config {
+ unsigned int force_sysfs_fallback;
+ unsigned int ignore_sysfs_fallback;
+ int old_timeout;
+ int loading_timeout;
+};
+
+/* These getters are vetted to use int properly */
+static inline int __firmware_loading_timeout(void)
+{
+ return fw_fallback_config.loading_timeout;
+}
+
+/* These setters are vetted to use int properly */
+static inline void __fw_fallback_set_timeout(int timeout)
+{
+ fw_fallback_config.loading_timeout = timeout;
+}
+#endif
+
+#ifdef CONFIG_FW_LOADER_SYSFS
+int register_sysfs_loader(void);
+void unregister_sysfs_loader(void);
+#if defined(CONFIG_FW_LOADER_USER_HELPER) && defined(CONFIG_SYSCTL)
+int register_firmware_config_sysctl(void);
+void unregister_firmware_config_sysctl(void);
+#else
+static inline int register_firmware_config_sysctl(void)
+{
+ return 0;
+}
+
+static inline void unregister_firmware_config_sysctl(void) { }
+#endif /* CONFIG_FW_LOADER_USER_HELPER && CONFIG_SYSCTL */
+#else /* CONFIG_FW_LOADER_SYSFS */
+static inline int register_sysfs_loader(void)
+{
+ return 0;
+}
+
+static inline void unregister_sysfs_loader(void)
+{
+}
+#endif /* CONFIG_FW_LOADER_SYSFS */
+
+struct fw_sysfs {
+ bool nowait;
+ struct device dev;
+ struct fw_priv *fw_priv;
+ struct firmware *fw;
+ void *fw_upload_priv;
+};
+
+static inline struct fw_sysfs *to_fw_sysfs(struct device *dev)
+{
+ return container_of(dev, struct fw_sysfs, dev);
+}
+
+void __fw_load_abort(struct fw_priv *fw_priv);
+
+static inline void fw_load_abort(struct fw_sysfs *fw_sysfs)
+{
+ struct fw_priv *fw_priv = fw_sysfs->fw_priv;
+
+ __fw_load_abort(fw_priv);
+}
+
+struct fw_sysfs *
+fw_create_instance(struct firmware *firmware, const char *fw_name,
+ struct device *device, u32 opt_flags);
+
+#ifdef CONFIG_FW_UPLOAD
+extern struct device_attribute dev_attr_status;
+extern struct device_attribute dev_attr_error;
+extern struct device_attribute dev_attr_cancel;
+extern struct device_attribute dev_attr_remaining_size;
+
+int fw_upload_start(struct fw_sysfs *fw_sysfs);
+umode_t fw_upload_is_visible(struct kobject *kobj, struct attribute *attr, int n);
+#else
+static inline int fw_upload_start(struct fw_sysfs *fw_sysfs)
+{
+ return 0;
+}
+#endif
+
+#endif /* __FIRMWARE_SYSFS_H */
diff --git a/drivers/base/firmware_loader/sysfs_upload.c b/drivers/base/firmware_loader/sysfs_upload.c
new file mode 100644
index 000000000000..87044d52322a
--- /dev/null
+++ b/drivers/base/firmware_loader/sysfs_upload.c
@@ -0,0 +1,397 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include "sysfs_upload.h"
+
+/*
+ * Support for user-space to initiate a firmware upload to a device.
+ */
+
+static const char * const fw_upload_prog_str[] = {
+ [FW_UPLOAD_PROG_IDLE] = "idle",
+ [FW_UPLOAD_PROG_RECEIVING] = "receiving",
+ [FW_UPLOAD_PROG_PREPARING] = "preparing",
+ [FW_UPLOAD_PROG_TRANSFERRING] = "transferring",
+ [FW_UPLOAD_PROG_PROGRAMMING] = "programming"
+};
+
+static const char * const fw_upload_err_str[] = {
+ [FW_UPLOAD_ERR_NONE] = "none",
+ [FW_UPLOAD_ERR_HW_ERROR] = "hw-error",
+ [FW_UPLOAD_ERR_TIMEOUT] = "timeout",
+ [FW_UPLOAD_ERR_CANCELED] = "user-abort",
+ [FW_UPLOAD_ERR_BUSY] = "device-busy",
+ [FW_UPLOAD_ERR_INVALID_SIZE] = "invalid-file-size",
+ [FW_UPLOAD_ERR_RW_ERROR] = "read-write-error",
+ [FW_UPLOAD_ERR_WEAROUT] = "flash-wearout",
+};
+
+static const char *fw_upload_progress(struct device *dev,
+ enum fw_upload_prog prog)
+{
+ const char *status = "unknown-status";
+
+ if (prog < FW_UPLOAD_PROG_MAX)
+ status = fw_upload_prog_str[prog];
+ else
+ dev_err(dev, "Invalid status during secure update: %d\n", prog);
+
+ return status;
+}
+
+static const char *fw_upload_error(struct device *dev,
+ enum fw_upload_err err_code)
+{
+ const char *error = "unknown-error";
+
+ if (err_code < FW_UPLOAD_ERR_MAX)
+ error = fw_upload_err_str[err_code];
+ else
+ dev_err(dev, "Invalid error code during secure update: %d\n",
+ err_code);
+
+ return error;
+}
+
+static ssize_t
+status_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct fw_upload_priv *fwlp = to_fw_sysfs(dev)->fw_upload_priv;
+
+ return sysfs_emit(buf, "%s\n", fw_upload_progress(dev, fwlp->progress));
+}
+DEVICE_ATTR_RO(status);
+
+static ssize_t
+error_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct fw_upload_priv *fwlp = to_fw_sysfs(dev)->fw_upload_priv;
+ int ret;
+
+ mutex_lock(&fwlp->lock);
+
+ if (fwlp->progress != FW_UPLOAD_PROG_IDLE)
+ ret = -EBUSY;
+ else if (!fwlp->err_code)
+ ret = 0;
+ else
+ ret = sysfs_emit(buf, "%s:%s\n",
+ fw_upload_progress(dev, fwlp->err_progress),
+ fw_upload_error(dev, fwlp->err_code));
+
+ mutex_unlock(&fwlp->lock);
+
+ return ret;
+}
+DEVICE_ATTR_RO(error);
+
+static ssize_t cancel_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fw_upload_priv *fwlp = to_fw_sysfs(dev)->fw_upload_priv;
+ int ret = count;
+ bool cancel;
+
+ if (kstrtobool(buf, &cancel) || !cancel)
+ return -EINVAL;
+
+ mutex_lock(&fwlp->lock);
+ if (fwlp->progress == FW_UPLOAD_PROG_IDLE)
+ ret = -ENODEV;
+
+ fwlp->ops->cancel(fwlp->fw_upload);
+ mutex_unlock(&fwlp->lock);
+
+ return ret;
+}
+DEVICE_ATTR_WO(cancel);
+
+static ssize_t remaining_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fw_upload_priv *fwlp = to_fw_sysfs(dev)->fw_upload_priv;
+
+ return sysfs_emit(buf, "%u\n", fwlp->remaining_size);
+}
+DEVICE_ATTR_RO(remaining_size);
+
+umode_t
+fw_upload_is_visible(struct kobject *kobj, struct attribute *attr, int n)
+{
+ static struct fw_sysfs *fw_sysfs;
+
+ fw_sysfs = to_fw_sysfs(kobj_to_dev(kobj));
+
+ if (fw_sysfs->fw_upload_priv || attr == &dev_attr_loading.attr)
+ return attr->mode;
+
+ return 0;
+}
+
+static void fw_upload_update_progress(struct fw_upload_priv *fwlp,
+ enum fw_upload_prog new_progress)
+{
+ mutex_lock(&fwlp->lock);
+ fwlp->progress = new_progress;
+ mutex_unlock(&fwlp->lock);
+}
+
+static void fw_upload_set_error(struct fw_upload_priv *fwlp,
+ enum fw_upload_err err_code)
+{
+ mutex_lock(&fwlp->lock);
+ fwlp->err_progress = fwlp->progress;
+ fwlp->err_code = err_code;
+ mutex_unlock(&fwlp->lock);
+}
+
+static void fw_upload_prog_complete(struct fw_upload_priv *fwlp)
+{
+ mutex_lock(&fwlp->lock);
+ fwlp->progress = FW_UPLOAD_PROG_IDLE;
+ mutex_unlock(&fwlp->lock);
+}
+
+static void fw_upload_main(struct work_struct *work)
+{
+ struct fw_upload_priv *fwlp;
+ struct fw_sysfs *fw_sysfs;
+ u32 written = 0, offset = 0;
+ enum fw_upload_err ret;
+ struct device *fw_dev;
+ struct fw_upload *fwl;
+
+ fwlp = container_of(work, struct fw_upload_priv, work);
+ fwl = fwlp->fw_upload;
+ fw_sysfs = (struct fw_sysfs *)fwl->priv;
+ fw_dev = &fw_sysfs->dev;
+
+ fw_upload_update_progress(fwlp, FW_UPLOAD_PROG_PREPARING);
+ ret = fwlp->ops->prepare(fwl, fwlp->data, fwlp->remaining_size);
+ if (ret != FW_UPLOAD_ERR_NONE) {
+ fw_upload_set_error(fwlp, ret);
+ goto putdev_exit;
+ }
+
+ fw_upload_update_progress(fwlp, FW_UPLOAD_PROG_TRANSFERRING);
+ while (fwlp->remaining_size) {
+ ret = fwlp->ops->write(fwl, fwlp->data, offset,
+ fwlp->remaining_size, &written);
+ if (ret != FW_UPLOAD_ERR_NONE || !written) {
+ if (ret == FW_UPLOAD_ERR_NONE) {
+ dev_warn(fw_dev, "write-op wrote zero data\n");
+ ret = FW_UPLOAD_ERR_RW_ERROR;
+ }
+ fw_upload_set_error(fwlp, ret);
+ goto done;
+ }
+
+ fwlp->remaining_size -= written;
+ offset += written;
+ }
+
+ fw_upload_update_progress(fwlp, FW_UPLOAD_PROG_PROGRAMMING);
+ ret = fwlp->ops->poll_complete(fwl);
+ if (ret != FW_UPLOAD_ERR_NONE)
+ fw_upload_set_error(fwlp, ret);
+
+done:
+ if (fwlp->ops->cleanup)
+ fwlp->ops->cleanup(fwl);
+
+putdev_exit:
+ put_device(fw_dev->parent);
+
+ /*
+ * Note: fwlp->remaining_size is left unmodified here to provide
+ * additional information on errors. It will be reinitialized when
+ * the next firmeware upload begins.
+ */
+ mutex_lock(&fw_lock);
+ fw_free_paged_buf(fw_sysfs->fw_priv);
+ fw_state_init(fw_sysfs->fw_priv);
+ mutex_unlock(&fw_lock);
+ fwlp->data = NULL;
+ fw_upload_prog_complete(fwlp);
+}
+
+/*
+ * Start a worker thread to upload data to the parent driver.
+ * Must be called with fw_lock held.
+ */
+int fw_upload_start(struct fw_sysfs *fw_sysfs)
+{
+ struct fw_priv *fw_priv = fw_sysfs->fw_priv;
+ struct device *fw_dev = &fw_sysfs->dev;
+ struct fw_upload_priv *fwlp;
+
+ if (!fw_sysfs->fw_upload_priv)
+ return 0;
+
+ if (!fw_priv->size) {
+ fw_free_paged_buf(fw_priv);
+ fw_state_init(fw_sysfs->fw_priv);
+ return 0;
+ }
+
+ fwlp = fw_sysfs->fw_upload_priv;
+ mutex_lock(&fwlp->lock);
+
+ /* Do not interfere with an on-going fw_upload */
+ if (fwlp->progress != FW_UPLOAD_PROG_IDLE) {
+ mutex_unlock(&fwlp->lock);
+ return -EBUSY;
+ }
+
+ get_device(fw_dev->parent); /* released in fw_upload_main */
+
+ fwlp->progress = FW_UPLOAD_PROG_RECEIVING;
+ fwlp->err_code = 0;
+ fwlp->remaining_size = fw_priv->size;
+ fwlp->data = fw_priv->data;
+
+ pr_debug("%s: fw-%s fw_priv=%p data=%p size=%u\n",
+ __func__, fw_priv->fw_name,
+ fw_priv, fw_priv->data,
+ (unsigned int)fw_priv->size);
+
+ queue_work(system_long_wq, &fwlp->work);
+ mutex_unlock(&fwlp->lock);
+
+ return 0;
+}
+
+/**
+ * firmware_upload_register() - register for the firmware upload sysfs API
+ * @module: kernel module of this device
+ * @parent: parent device instantiating firmware upload
+ * @name: firmware name to be associated with this device
+ * @ops: pointer to structure of firmware upload ops
+ * @dd_handle: pointer to parent driver private data
+ *
+ * @name must be unique among all users of firmware upload. The firmware
+ * sysfs files for this device will be found at /sys/class/firmware/@name.
+ *
+ * Return: struct fw_upload pointer or ERR_PTR()
+ *
+ **/
+struct fw_upload *
+firmware_upload_register(struct module *module, struct device *parent,
+ const char *name, const struct fw_upload_ops *ops,
+ void *dd_handle)
+{
+ u32 opt_flags = FW_OPT_NOCACHE;
+ struct fw_upload *fw_upload;
+ struct fw_upload_priv *fw_upload_priv;
+ struct fw_sysfs *fw_sysfs;
+ struct fw_priv *fw_priv;
+ struct device *fw_dev;
+ int ret;
+
+ if (!name || name[0] == '\0')
+ return ERR_PTR(-EINVAL);
+
+ if (!ops || !ops->cancel || !ops->prepare ||
+ !ops->write || !ops->poll_complete) {
+ dev_err(parent, "Attempt to register without all required ops\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!try_module_get(module))
+ return ERR_PTR(-EFAULT);
+
+ fw_upload = kzalloc(sizeof(*fw_upload), GFP_KERNEL);
+ if (!fw_upload) {
+ ret = -ENOMEM;
+ goto exit_module_put;
+ }
+
+ fw_upload_priv = kzalloc(sizeof(*fw_upload_priv), GFP_KERNEL);
+ if (!fw_upload_priv) {
+ ret = -ENOMEM;
+ goto free_fw_upload;
+ }
+
+ fw_upload_priv->fw_upload = fw_upload;
+ fw_upload_priv->ops = ops;
+ mutex_init(&fw_upload_priv->lock);
+ fw_upload_priv->module = module;
+ fw_upload_priv->name = name;
+ fw_upload_priv->err_code = 0;
+ fw_upload_priv->progress = FW_UPLOAD_PROG_IDLE;
+ INIT_WORK(&fw_upload_priv->work, fw_upload_main);
+ fw_upload->dd_handle = dd_handle;
+
+ fw_sysfs = fw_create_instance(NULL, name, parent, opt_flags);
+ if (IS_ERR(fw_sysfs)) {
+ ret = PTR_ERR(fw_sysfs);
+ goto free_fw_upload_priv;
+ }
+ fw_upload->priv = fw_sysfs;
+ fw_sysfs->fw_upload_priv = fw_upload_priv;
+ fw_dev = &fw_sysfs->dev;
+
+ ret = alloc_lookup_fw_priv(name, &fw_cache, &fw_priv, NULL, 0, 0,
+ FW_OPT_NOCACHE);
+ if (ret != 0) {
+ if (ret > 0)
+ ret = -EINVAL;
+ goto free_fw_sysfs;
+ }
+ fw_priv->is_paged_buf = true;
+ fw_sysfs->fw_priv = fw_priv;
+
+ ret = device_add(fw_dev);
+ if (ret) {
+ dev_err(fw_dev, "%s: device_register failed\n", __func__);
+ put_device(fw_dev);
+ goto exit_module_put;
+ }
+
+ return fw_upload;
+
+free_fw_sysfs:
+ kfree(fw_sysfs);
+
+free_fw_upload_priv:
+ kfree(fw_upload_priv);
+
+free_fw_upload:
+ kfree(fw_upload);
+
+exit_module_put:
+ module_put(module);
+
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(firmware_upload_register);
+
+/**
+ * firmware_upload_unregister() - Unregister firmware upload interface
+ * @fw_upload: pointer to struct fw_upload
+ **/
+void firmware_upload_unregister(struct fw_upload *fw_upload)
+{
+ struct fw_sysfs *fw_sysfs = fw_upload->priv;
+ struct fw_upload_priv *fw_upload_priv = fw_sysfs->fw_upload_priv;
+
+ mutex_lock(&fw_upload_priv->lock);
+ if (fw_upload_priv->progress == FW_UPLOAD_PROG_IDLE) {
+ mutex_unlock(&fw_upload_priv->lock);
+ goto unregister;
+ }
+
+ fw_upload_priv->ops->cancel(fw_upload);
+ mutex_unlock(&fw_upload_priv->lock);
+
+ /* Ensure lower-level device-driver is finished */
+ flush_work(&fw_upload_priv->work);
+
+unregister:
+ device_unregister(&fw_sysfs->dev);
+ module_put(fw_upload_priv->module);
+}
+EXPORT_SYMBOL_GPL(firmware_upload_unregister);
diff --git a/drivers/base/firmware_loader/sysfs_upload.h b/drivers/base/firmware_loader/sysfs_upload.h
new file mode 100644
index 000000000000..31931ff7808a
--- /dev/null
+++ b/drivers/base/firmware_loader/sysfs_upload.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __SYSFS_UPLOAD_H
+#define __SYSFS_UPLOAD_H
+
+#include <linux/device.h>
+
+#include "sysfs.h"
+
+/**
+ * enum fw_upload_prog - firmware upload progress codes
+ * @FW_UPLOAD_PROG_IDLE: there is no firmware upload in progress
+ * @FW_UPLOAD_PROG_RECEIVING: worker thread is receiving firmware data
+ * @FW_UPLOAD_PROG_PREPARING: target device is preparing for firmware upload
+ * @FW_UPLOAD_PROG_TRANSFERRING: data is being copied to the device
+ * @FW_UPLOAD_PROG_PROGRAMMING: device is performing the firmware update
+ * @FW_UPLOAD_PROG_MAX: Maximum progress code marker
+ */
+enum fw_upload_prog {
+ FW_UPLOAD_PROG_IDLE,
+ FW_UPLOAD_PROG_RECEIVING,
+ FW_UPLOAD_PROG_PREPARING,
+ FW_UPLOAD_PROG_TRANSFERRING,
+ FW_UPLOAD_PROG_PROGRAMMING,
+ FW_UPLOAD_PROG_MAX
+};
+
+struct fw_upload_priv {
+ struct fw_upload *fw_upload;
+ struct module *module;
+ const char *name;
+ const struct fw_upload_ops *ops;
+ struct mutex lock; /* protect data structure contents */
+ struct work_struct work;
+ const u8 *data; /* pointer to update data */
+ u32 remaining_size; /* size remaining to transfer */
+ enum fw_upload_prog progress;
+ enum fw_upload_prog err_progress; /* progress at time of failure */
+ enum fw_upload_err err_code; /* security manager error code */
+};
+
+#endif /* __SYSFS_UPLOAD_H */
diff --git a/drivers/base/physical_location.c b/drivers/base/physical_location.c
new file mode 100644
index 000000000000..87af641cfe1a
--- /dev/null
+++ b/drivers/base/physical_location.c
@@ -0,0 +1,143 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Device physical location support
+ *
+ * Author: Won Chung <wonchung@google.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/sysfs.h>
+
+#include "physical_location.h"
+
+bool dev_add_physical_location(struct device *dev)
+{
+ struct acpi_pld_info *pld;
+ acpi_status status;
+
+ if (!has_acpi_companion(dev))
+ return false;
+
+ status = acpi_get_physical_device_location(ACPI_HANDLE(dev), &pld);
+ if (ACPI_FAILURE(status))
+ return false;
+
+ dev->physical_location =
+ kzalloc(sizeof(*dev->physical_location), GFP_KERNEL);
+ if (!dev->physical_location)
+ return false;
+ dev->physical_location->panel = pld->panel;
+ dev->physical_location->vertical_position = pld->vertical_position;
+ dev->physical_location->horizontal_position = pld->horizontal_position;
+ dev->physical_location->dock = pld->dock;
+ dev->physical_location->lid = pld->lid;
+
+ ACPI_FREE(pld);
+ return true;
+}
+
+static ssize_t panel_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ const char *panel;
+
+ switch (dev->physical_location->panel) {
+ case DEVICE_PANEL_TOP:
+ panel = "top";
+ break;
+ case DEVICE_PANEL_BOTTOM:
+ panel = "bottom";
+ break;
+ case DEVICE_PANEL_LEFT:
+ panel = "left";
+ break;
+ case DEVICE_PANEL_RIGHT:
+ panel = "right";
+ break;
+ case DEVICE_PANEL_FRONT:
+ panel = "front";
+ break;
+ case DEVICE_PANEL_BACK:
+ panel = "back";
+ break;
+ default:
+ panel = "unknown";
+ }
+ return sysfs_emit(buf, "%s\n", panel);
+}
+static DEVICE_ATTR_RO(panel);
+
+static ssize_t vertical_position_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ const char *vertical_position;
+
+ switch (dev->physical_location->vertical_position) {
+ case DEVICE_VERT_POS_UPPER:
+ vertical_position = "upper";
+ break;
+ case DEVICE_VERT_POS_CENTER:
+ vertical_position = "center";
+ break;
+ case DEVICE_VERT_POS_LOWER:
+ vertical_position = "lower";
+ break;
+ default:
+ vertical_position = "unknown";
+ }
+ return sysfs_emit(buf, "%s\n", vertical_position);
+}
+static DEVICE_ATTR_RO(vertical_position);
+
+static ssize_t horizontal_position_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ const char *horizontal_position;
+
+ switch (dev->physical_location->horizontal_position) {
+ case DEVICE_HORI_POS_LEFT:
+ horizontal_position = "left";
+ break;
+ case DEVICE_HORI_POS_CENTER:
+ horizontal_position = "center";
+ break;
+ case DEVICE_HORI_POS_RIGHT:
+ horizontal_position = "right";
+ break;
+ default:
+ horizontal_position = "unknown";
+ }
+ return sysfs_emit(buf, "%s\n", horizontal_position);
+}
+static DEVICE_ATTR_RO(horizontal_position);
+
+static ssize_t dock_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%s\n",
+ dev->physical_location->dock ? "yes" : "no");
+}
+static DEVICE_ATTR_RO(dock);
+
+static ssize_t lid_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%s\n",
+ dev->physical_location->lid ? "yes" : "no");
+}
+static DEVICE_ATTR_RO(lid);
+
+static struct attribute *dev_attr_physical_location[] = {
+ &dev_attr_panel.attr,
+ &dev_attr_vertical_position.attr,
+ &dev_attr_horizontal_position.attr,
+ &dev_attr_dock.attr,
+ &dev_attr_lid.attr,
+ NULL,
+};
+
+const struct attribute_group dev_attr_physical_location_group = {
+ .name = "physical_location",
+ .attrs = dev_attr_physical_location,
+};
+
diff --git a/drivers/base/physical_location.h b/drivers/base/physical_location.h
new file mode 100644
index 000000000000..82cde9f1b161
--- /dev/null
+++ b/drivers/base/physical_location.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Device physical location support
+ *
+ * Author: Won Chung <wonchung@google.com>
+ */
+
+#include <linux/device.h>
+
+#ifdef CONFIG_ACPI
+extern bool dev_add_physical_location(struct device *dev);
+extern const struct attribute_group dev_attr_physical_location_group;
+#else
+static inline bool dev_add_physical_location(struct device *dev) { return false; };
+static const struct attribute_group dev_attr_physical_location_group = {};
+#endif
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 8cc272fd5c99..51bb2289865c 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -30,6 +30,8 @@
#include <linux/property.h>
#include <linux/kmemleak.h>
#include <linux/types.h>
+#include <linux/iommu.h>
+#include <linux/dma-map-ops.h>
#include "base.h"
#include "power/power.h"
@@ -231,7 +233,8 @@ int platform_get_irq_optional(struct platform_device *dev, unsigned int num)
out_not_found:
ret = -ENXIO;
out:
- WARN(ret == 0, "0 is an invalid IRQ number\n");
+ if (WARN(!ret, "0 is an invalid IRQ number\n"))
+ return -EINVAL;
return ret;
}
EXPORT_SYMBOL_GPL(platform_get_irq_optional);
@@ -446,7 +449,8 @@ static int __platform_get_irq_byname(struct platform_device *dev,
r = platform_get_resource_byname(dev, IORESOURCE_IRQ, name);
if (r) {
- WARN(r->start == 0, "0 is an invalid IRQ number\n");
+ if (WARN(!r->start, "0 is an invalid IRQ number\n"))
+ return -EINVAL;
return r->start;
}
@@ -1275,31 +1279,11 @@ static ssize_t driver_override_store(struct device *dev,
const char *buf, size_t count)
{
struct platform_device *pdev = to_platform_device(dev);
- char *driver_override, *old, *cp;
-
- /* We need to keep extra room for a newline */
- if (count >= (PAGE_SIZE - 1))
- return -EINVAL;
-
- driver_override = kstrndup(buf, count, GFP_KERNEL);
- if (!driver_override)
- return -ENOMEM;
-
- cp = strchr(driver_override, '\n');
- if (cp)
- *cp = '\0';
-
- device_lock(dev);
- old = pdev->driver_override;
- if (strlen(driver_override)) {
- pdev->driver_override = driver_override;
- } else {
- kfree(driver_override);
- pdev->driver_override = NULL;
- }
- device_unlock(dev);
+ int ret;
- kfree(old);
+ ret = driver_set_override(dev, &pdev->driver_override, buf, count);
+ if (ret)
+ return ret;
return count;
}
@@ -1454,9 +1438,9 @@ static void platform_shutdown(struct device *_dev)
drv->shutdown(dev);
}
-
-int platform_dma_configure(struct device *dev)
+static int platform_dma_configure(struct device *dev)
{
+ struct platform_driver *drv = to_platform_driver(dev->driver);
enum dev_dma_attr attr;
int ret = 0;
@@ -1467,9 +1451,23 @@ int platform_dma_configure(struct device *dev)
ret = acpi_dma_configure(dev, attr);
}
+ if (!ret && !drv->driver_managed_dma) {
+ ret = iommu_device_use_default_domain(dev);
+ if (ret)
+ arch_teardown_dma_ops(dev);
+ }
+
return ret;
}
+static void platform_dma_cleanup(struct device *dev)
+{
+ struct platform_driver *drv = to_platform_driver(dev->driver);
+
+ if (!drv->driver_managed_dma)
+ iommu_device_unuse_default_domain(dev);
+}
+
static const struct dev_pm_ops platform_dev_pm_ops = {
SET_RUNTIME_PM_OPS(pm_generic_runtime_suspend, pm_generic_runtime_resume, NULL)
USE_PLATFORM_PM_SLEEP_OPS
@@ -1484,6 +1482,7 @@ struct bus_type platform_bus_type = {
.remove = platform_remove,
.shutdown = platform_shutdown,
.dma_configure = platform_dma_configure,
+ .dma_cleanup = platform_dma_cleanup,
.pm = &platform_dev_pm_ops,
};
EXPORT_SYMBOL_GPL(platform_bus_type);
diff --git a/drivers/base/property.c b/drivers/base/property.c
index 3adcac2c78fa..ed6f449f8e5c 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -1206,15 +1206,23 @@ const void *device_get_match_data(struct device *dev)
}
EXPORT_SYMBOL_GPL(device_get_match_data);
-static void *
-fwnode_graph_devcon_match(struct fwnode_handle *fwnode, const char *con_id,
- void *data, devcon_match_fn_t match)
+static unsigned int fwnode_graph_devcon_matches(struct fwnode_handle *fwnode,
+ const char *con_id, void *data,
+ devcon_match_fn_t match,
+ void **matches,
+ unsigned int matches_len)
{
struct fwnode_handle *node;
struct fwnode_handle *ep;
+ unsigned int count = 0;
void *ret;
fwnode_graph_for_each_endpoint(fwnode, ep) {
+ if (matches && count >= matches_len) {
+ fwnode_handle_put(ep);
+ break;
+ }
+
node = fwnode_graph_get_remote_port_parent(ep);
if (!fwnode_device_is_available(node)) {
fwnode_handle_put(node);
@@ -1224,33 +1232,43 @@ fwnode_graph_devcon_match(struct fwnode_handle *fwnode, const char *con_id,
ret = match(node, con_id, data);
fwnode_handle_put(node);
if (ret) {
- fwnode_handle_put(ep);
- return ret;
+ if (matches)
+ matches[count] = ret;
+ count++;
}
}
- return NULL;
+ return count;
}
-static void *
-fwnode_devcon_match(struct fwnode_handle *fwnode, const char *con_id,
- void *data, devcon_match_fn_t match)
+static unsigned int fwnode_devcon_matches(struct fwnode_handle *fwnode,
+ const char *con_id, void *data,
+ devcon_match_fn_t match,
+ void **matches,
+ unsigned int matches_len)
{
struct fwnode_handle *node;
+ unsigned int count = 0;
+ unsigned int i;
void *ret;
- int i;
for (i = 0; ; i++) {
+ if (matches && count >= matches_len)
+ break;
+
node = fwnode_find_reference(fwnode, con_id, i);
if (IS_ERR(node))
break;
ret = match(node, NULL, data);
fwnode_handle_put(node);
- if (ret)
- return ret;
+ if (ret) {
+ if (matches)
+ matches[count] = ret;
+ count++;
+ }
}
- return NULL;
+ return count;
}
/**
@@ -1268,15 +1286,61 @@ void *fwnode_connection_find_match(struct fwnode_handle *fwnode,
const char *con_id, void *data,
devcon_match_fn_t match)
{
+ unsigned int count;
void *ret;
if (!fwnode || !match)
return NULL;
- ret = fwnode_graph_devcon_match(fwnode, con_id, data, match);
- if (ret)
+ count = fwnode_graph_devcon_matches(fwnode, con_id, data, match, &ret, 1);
+ if (count)
return ret;
- return fwnode_devcon_match(fwnode, con_id, data, match);
+ count = fwnode_devcon_matches(fwnode, con_id, data, match, &ret, 1);
+ return count ? ret : NULL;
}
EXPORT_SYMBOL_GPL(fwnode_connection_find_match);
+
+/**
+ * fwnode_connection_find_matches - Find connections from a device node
+ * @fwnode: Device node with the connection
+ * @con_id: Identifier for the connection
+ * @data: Data for the match function
+ * @match: Function to check and convert the connection description
+ * @matches: (Optional) array of pointers to fill with matches
+ * @matches_len: Length of @matches
+ *
+ * Find up to @matches_len connections with unique identifier @con_id between
+ * @fwnode and other device nodes. @match will be used to convert the
+ * connection description to data the caller is expecting to be returned
+ * through the @matches array.
+ * If @matches is NULL @matches_len is ignored and the total number of resolved
+ * matches is returned.
+ *
+ * Return: Number of matches resolved, or negative errno.
+ */
+int fwnode_connection_find_matches(struct fwnode_handle *fwnode,
+ const char *con_id, void *data,
+ devcon_match_fn_t match,
+ void **matches, unsigned int matches_len)
+{
+ unsigned int count_graph;
+ unsigned int count_ref;
+
+ if (!fwnode || !match)
+ return -EINVAL;
+
+ count_graph = fwnode_graph_devcon_matches(fwnode, con_id, data, match,
+ matches, matches_len);
+
+ if (matches) {
+ matches += count_graph;
+ matches_len -= count_graph;
+ }
+
+ count_ref = fwnode_devcon_matches(fwnode, con_id, data, match,
+ matches, matches_len);
+
+ return count_graph + count_ref;
+}
+EXPORT_SYMBOL_GPL(fwnode_connection_find_matches);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index f1dda4ef22cc..084f9b8a0ba3 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1102,7 +1102,7 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
lo->lo_flags |= LO_FLAGS_PARTSCAN;
partscan = lo->lo_flags & LO_FLAGS_PARTSCAN;
if (partscan)
- lo->lo_disk->flags &= ~GENHD_FL_NO_PART;
+ clear_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state);
loop_global_unlock(lo, is_loop);
if (partscan)
@@ -1198,7 +1198,7 @@ static void __loop_clr_fd(struct loop_device *lo, bool release)
*/
lo->lo_flags = 0;
if (!part_shift)
- lo->lo_disk->flags |= GENHD_FL_NO_PART;
+ set_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state);
mutex_lock(&lo->lo_mutex);
lo->lo_state = Lo_unbound;
mutex_unlock(&lo->lo_mutex);
@@ -1308,7 +1308,7 @@ out_unfreeze:
if (!err && (lo->lo_flags & LO_FLAGS_PARTSCAN) &&
!(prev_lo_flags & LO_FLAGS_PARTSCAN)) {
- lo->lo_disk->flags &= ~GENHD_FL_NO_PART;
+ clear_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state);
partscan = true;
}
out_unlock:
@@ -2011,7 +2011,7 @@ static int loop_add(int i)
* userspace tools. Parameters like this in general should be avoided.
*/
if (!part_shift)
- disk->flags |= GENHD_FL_NO_PART;
+ set_bit(GD_SUPPRESS_PART_SCAN, &disk->state);
mutex_init(&lo->lo_mutex);
lo->lo_number = i;
spin_lock_init(&lo->lo_lock);
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index ac8b045c777c..07f3c139a3d7 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -403,13 +403,14 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
if (!mutex_trylock(&cmd->lock))
return BLK_EH_RESET_TIMER;
- if (!__test_and_clear_bit(NBD_CMD_INFLIGHT, &cmd->flags)) {
+ if (!test_bit(NBD_CMD_INFLIGHT, &cmd->flags)) {
mutex_unlock(&cmd->lock);
return BLK_EH_DONE;
}
if (!refcount_inc_not_zero(&nbd->config_refs)) {
cmd->status = BLK_STS_TIMEOUT;
+ __clear_bit(NBD_CMD_INFLIGHT, &cmd->flags);
mutex_unlock(&cmd->lock);
goto done;
}
@@ -478,6 +479,7 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
dev_err_ratelimited(nbd_to_dev(nbd), "Connection timed out\n");
set_bit(NBD_RT_TIMEDOUT, &config->runtime_flags);
cmd->status = BLK_STS_IOERR;
+ __clear_bit(NBD_CMD_INFLIGHT, &cmd->flags);
mutex_unlock(&cmd->lock);
sock_shutdown(nbd);
nbd_config_put(nbd);
@@ -745,7 +747,7 @@ static struct nbd_cmd *nbd_handle_reply(struct nbd_device *nbd, int index,
cmd = blk_mq_rq_to_pdu(req);
mutex_lock(&cmd->lock);
- if (!__test_and_clear_bit(NBD_CMD_INFLIGHT, &cmd->flags)) {
+ if (!test_bit(NBD_CMD_INFLIGHT, &cmd->flags)) {
dev_err(disk_to_dev(nbd->disk), "Suspicious reply %d (status %u flags %lu)",
tag, cmd->status, cmd->flags);
ret = -ENOENT;
@@ -854,8 +856,16 @@ static void recv_work(struct work_struct *work)
}
rq = blk_mq_rq_from_pdu(cmd);
- if (likely(!blk_should_fake_timeout(rq->q)))
- blk_mq_complete_request(rq);
+ if (likely(!blk_should_fake_timeout(rq->q))) {
+ bool complete;
+
+ mutex_lock(&cmd->lock);
+ complete = __test_and_clear_bit(NBD_CMD_INFLIGHT,
+ &cmd->flags);
+ mutex_unlock(&cmd->lock);
+ if (complete)
+ blk_mq_complete_request(rq);
+ }
percpu_ref_put(&q->q_usage_counter);
}
@@ -1419,7 +1429,7 @@ static int nbd_start_device_ioctl(struct nbd_device *nbd)
static void nbd_clear_sock_ioctl(struct nbd_device *nbd,
struct block_device *bdev)
{
- sock_shutdown(nbd);
+ nbd_clear_sock(nbd);
__invalidate_device(bdev, true);
nbd_bdev_reset(nbd);
if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
@@ -1518,15 +1528,20 @@ static struct nbd_config *nbd_alloc_config(void)
{
struct nbd_config *config;
+ if (!try_module_get(THIS_MODULE))
+ return ERR_PTR(-ENODEV);
+
config = kzalloc(sizeof(struct nbd_config), GFP_NOFS);
- if (!config)
- return NULL;
+ if (!config) {
+ module_put(THIS_MODULE);
+ return ERR_PTR(-ENOMEM);
+ }
+
atomic_set(&config->recv_threads, 0);
init_waitqueue_head(&config->recv_wq);
init_waitqueue_head(&config->conn_wait);
config->blksize_bits = NBD_DEF_BLKSIZE_BITS;
atomic_set(&config->live_connections, 0);
- try_module_get(THIS_MODULE);
return config;
}
@@ -1553,12 +1568,13 @@ static int nbd_open(struct block_device *bdev, fmode_t mode)
mutex_unlock(&nbd->config_lock);
goto out;
}
- config = nbd->config = nbd_alloc_config();
- if (!config) {
- ret = -ENOMEM;
+ config = nbd_alloc_config();
+ if (IS_ERR(config)) {
+ ret = PTR_ERR(config);
mutex_unlock(&nbd->config_lock);
goto out;
}
+ nbd->config = config;
refcount_set(&nbd->config_refs, 1);
refcount_inc(&nbd->refs);
mutex_unlock(&nbd->config_lock);
@@ -1798,17 +1814,7 @@ static struct nbd_device *nbd_dev_add(int index, unsigned int refs)
refcount_set(&nbd->refs, 0);
INIT_LIST_HEAD(&nbd->list);
disk->major = NBD_MAJOR;
-
- /* Too big first_minor can cause duplicate creation of
- * sysfs files/links, since index << part_shift might overflow, or
- * MKDEV() expect that the max bits of first_minor is 20.
- */
disk->first_minor = index << part_shift;
- if (disk->first_minor < index || disk->first_minor > MINORMASK) {
- err = -EINVAL;
- goto out_free_work;
- }
-
disk->minors = 1 << part_shift;
disk->fops = &nbd_fops;
disk->private_data = nbd;
@@ -1913,14 +1919,25 @@ static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info)
if (!netlink_capable(skb, CAP_SYS_ADMIN))
return -EPERM;
- if (info->attrs[NBD_ATTR_INDEX])
+ if (info->attrs[NBD_ATTR_INDEX]) {
index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
+
+ /*
+ * Too big first_minor can cause duplicate creation of
+ * sysfs files/links, since index << part_shift might overflow, or
+ * MKDEV() expect that the max bits of first_minor is 20.
+ */
+ if (index < 0 || index > MINORMASK >> part_shift) {
+ pr_err("illegal input index %d\n", index);
+ return -EINVAL;
+ }
+ }
if (!info->attrs[NBD_ATTR_SOCKETS]) {
- printk(KERN_ERR "nbd: must specify at least one socket\n");
+ pr_err("must specify at least one socket\n");
return -EINVAL;
}
if (!info->attrs[NBD_ATTR_SIZE_BYTES]) {
- printk(KERN_ERR "nbd: must specify a size in bytes for the device\n");
+ pr_err("must specify a size in bytes for the device\n");
return -EINVAL;
}
again:
@@ -1956,7 +1973,7 @@ again:
nbd_put(nbd);
if (index == -1)
goto again;
- printk(KERN_ERR "nbd: nbd%d already in use\n", index);
+ pr_err("nbd%d already in use\n", index);
return -EBUSY;
}
if (WARN_ON(nbd->config)) {
@@ -1964,13 +1981,14 @@ again:
nbd_put(nbd);
return -EINVAL;
}
- config = nbd->config = nbd_alloc_config();
- if (!nbd->config) {
+ config = nbd_alloc_config();
+ if (IS_ERR(config)) {
mutex_unlock(&nbd->config_lock);
nbd_put(nbd);
- printk(KERN_ERR "nbd: couldn't allocate config\n");
- return -ENOMEM;
+ pr_err("couldn't allocate config\n");
+ return PTR_ERR(config);
}
+ nbd->config = config;
refcount_set(&nbd->config_refs, 1);
set_bit(NBD_RT_BOUND, &config->runtime_flags);
@@ -2023,7 +2041,7 @@ again:
struct nlattr *socks[NBD_SOCK_MAX+1];
if (nla_type(attr) != NBD_SOCK_ITEM) {
- printk(KERN_ERR "nbd: socks must be embedded in a SOCK_ITEM attr\n");
+ pr_err("socks must be embedded in a SOCK_ITEM attr\n");
ret = -EINVAL;
goto out;
}
@@ -2032,7 +2050,7 @@ again:
nbd_sock_policy,
info->extack);
if (ret != 0) {
- printk(KERN_ERR "nbd: error processing sock list\n");
+ pr_err("error processing sock list\n");
ret = -EINVAL;
goto out;
}
@@ -2104,7 +2122,7 @@ static int nbd_genl_disconnect(struct sk_buff *skb, struct genl_info *info)
return -EPERM;
if (!info->attrs[NBD_ATTR_INDEX]) {
- printk(KERN_ERR "nbd: must specify an index to disconnect\n");
+ pr_err("must specify an index to disconnect\n");
return -EINVAL;
}
index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
@@ -2112,14 +2130,12 @@ static int nbd_genl_disconnect(struct sk_buff *skb, struct genl_info *info)
nbd = idr_find(&nbd_index_idr, index);
if (!nbd) {
mutex_unlock(&nbd_index_mutex);
- printk(KERN_ERR "nbd: couldn't find device at index %d\n",
- index);
+ pr_err("couldn't find device at index %d\n", index);
return -EINVAL;
}
if (!refcount_inc_not_zero(&nbd->refs)) {
mutex_unlock(&nbd_index_mutex);
- printk(KERN_ERR "nbd: device at index %d is going down\n",
- index);
+ pr_err("device at index %d is going down\n", index);
return -EINVAL;
}
mutex_unlock(&nbd_index_mutex);
@@ -2144,7 +2160,7 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
return -EPERM;
if (!info->attrs[NBD_ATTR_INDEX]) {
- printk(KERN_ERR "nbd: must specify a device to reconfigure\n");
+ pr_err("must specify a device to reconfigure\n");
return -EINVAL;
}
index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
@@ -2152,8 +2168,7 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
nbd = idr_find(&nbd_index_idr, index);
if (!nbd) {
mutex_unlock(&nbd_index_mutex);
- printk(KERN_ERR "nbd: couldn't find a device at index %d\n",
- index);
+ pr_err("couldn't find a device at index %d\n", index);
return -EINVAL;
}
if (nbd->backend) {
@@ -2174,8 +2189,7 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
}
if (!refcount_inc_not_zero(&nbd->refs)) {
mutex_unlock(&nbd_index_mutex);
- printk(KERN_ERR "nbd: device at index %d is going down\n",
- index);
+ pr_err("device at index %d is going down\n", index);
return -EINVAL;
}
mutex_unlock(&nbd_index_mutex);
@@ -2239,7 +2253,7 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
struct nlattr *socks[NBD_SOCK_MAX+1];
if (nla_type(attr) != NBD_SOCK_ITEM) {
- printk(KERN_ERR "nbd: socks must be embedded in a SOCK_ITEM attr\n");
+ pr_err("socks must be embedded in a SOCK_ITEM attr\n");
ret = -EINVAL;
goto out;
}
@@ -2248,7 +2262,7 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
nbd_sock_policy,
info->extack);
if (ret != 0) {
- printk(KERN_ERR "nbd: error processing sock list\n");
+ pr_err("error processing sock list\n");
ret = -EINVAL;
goto out;
}
@@ -2465,7 +2479,7 @@ static int __init nbd_init(void)
BUILD_BUG_ON(sizeof(struct nbd_request) != 28);
if (max_part < 0) {
- printk(KERN_ERR "nbd: max_part must be >= 0\n");
+ pr_err("max_part must be >= 0\n");
return -EINVAL;
}
@@ -2528,6 +2542,12 @@ static void __exit nbd_cleanup(void)
struct nbd_device *nbd;
LIST_HEAD(del_list);
+ /*
+ * Unregister netlink interface prior to waiting
+ * for the completion of netlink commands.
+ */
+ genl_unregister_family(&nbd_genl_family);
+
nbd_dbg_close();
mutex_lock(&nbd_index_mutex);
@@ -2537,8 +2557,11 @@ static void __exit nbd_cleanup(void)
while (!list_empty(&del_list)) {
nbd = list_first_entry(&del_list, struct nbd_device, list);
list_del_init(&nbd->list);
+ if (refcount_read(&nbd->config_refs))
+ pr_err("possibly leaking nbd_config (ref %d)\n",
+ refcount_read(&nbd->config_refs));
if (refcount_read(&nbd->refs) != 1)
- printk(KERN_ERR "nbd: possibly leaking a device\n");
+ pr_err("possibly leaking a device\n");
nbd_put(nbd);
}
@@ -2546,7 +2569,6 @@ static void __exit nbd_cleanup(void)
destroy_workqueue(nbd_del_wq);
idr_destroy(&nbd_index_idr);
- genl_unregister_family(&nbd_genl_family);
unregister_blkdev(NBD_MAJOR, "nbd");
}
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index 539cfeac263d..6b67088f4ea7 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -77,12 +77,6 @@ enum {
NULL_IRQ_TIMER = 2,
};
-enum {
- NULL_Q_BIO = 0,
- NULL_Q_RQ = 1,
- NULL_Q_MQ = 2,
-};
-
static bool g_virt_boundary = false;
module_param_named(virt_boundary, g_virt_boundary, bool, 0444);
MODULE_PARM_DESC(virt_boundary, "Require a virtual boundary for the device. Default: False");
diff --git a/drivers/block/null_blk/null_blk.h b/drivers/block/null_blk/null_blk.h
index 4525a65e1b23..8359b43842f2 100644
--- a/drivers/block/null_blk/null_blk.h
+++ b/drivers/block/null_blk/null_blk.h
@@ -60,6 +60,13 @@ struct nullb_zone {
unsigned int capacity;
};
+/* Queue modes */
+enum {
+ NULL_Q_BIO = 0,
+ NULL_Q_RQ = 1,
+ NULL_Q_MQ = 2,
+};
+
struct nullb_device {
struct nullb *nullb;
struct config_item item;
diff --git a/drivers/block/null_blk/zoned.c b/drivers/block/null_blk/zoned.c
index ed158ea4fdd1..2fdd7b20c224 100644
--- a/drivers/block/null_blk/zoned.c
+++ b/drivers/block/null_blk/zoned.c
@@ -398,10 +398,10 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
*/
if (append) {
sector = zone->wp;
- if (cmd->bio)
- cmd->bio->bi_iter.bi_sector = sector;
- else
+ if (dev->queue_mode == NULL_Q_MQ)
cmd->rq->__sector = sector;
+ else
+ cmd->bio->bi_iter.bi_sector = sector;
} else if (sector != zone->wp) {
ret = BLK_STS_IOERR;
goto unlock;
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 2b21f717cce1..ef9bc62e9afd 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -756,24 +756,23 @@ static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
*/
static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
{
- struct rbd_client *client_node;
- bool found = false;
+ struct rbd_client *rbdc = NULL, *iter;
if (ceph_opts->flags & CEPH_OPT_NOSHARE)
return NULL;
spin_lock(&rbd_client_list_lock);
- list_for_each_entry(client_node, &rbd_client_list, node) {
- if (!ceph_compare_options(ceph_opts, client_node->client)) {
- __rbd_get_client(client_node);
+ list_for_each_entry(iter, &rbd_client_list, node) {
+ if (!ceph_compare_options(ceph_opts, iter->client)) {
+ __rbd_get_client(iter);
- found = true;
+ rbdc = iter;
break;
}
}
spin_unlock(&rbd_client_list_lock);
- return found ? client_node : NULL;
+ return rbdc;
}
/*
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
index b361583944b9..63b4f6431d2e 100644
--- a/drivers/block/sx8.c
+++ b/drivers/block/sx8.c
@@ -540,7 +540,7 @@ static int carm_array_info (struct carm_host *host, unsigned int array_idx)
spin_unlock_irq(&host->lock);
DPRINTK("blk_execute_rq_nowait, tag == %u\n", rq->tag);
- blk_execute_rq_nowait(rq, true, NULL);
+ blk_execute_rq_nowait(rq, true);
return 0;
@@ -579,7 +579,7 @@ static int carm_send_special (struct carm_host *host, carm_sspc_t func)
crq->msg_bucket = (u32) rc;
DPRINTK("blk_execute_rq_nowait, tag == %u\n", rq->tag);
- blk_execute_rq_nowait(rq, true, NULL);
+ blk_execute_rq_nowait(rq, true);
return 0;
}
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index d624cc8eddc3..6fc7850c2b0a 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -37,6 +37,10 @@ MODULE_PARM_DESC(num_request_queues,
"0 for no limit. "
"Values > nr_cpu_ids truncated to nr_cpu_ids.");
+static unsigned int poll_queues;
+module_param(poll_queues, uint, 0644);
+MODULE_PARM_DESC(poll_queues, "The number of dedicated virtqueues for polling I/O");
+
static int major;
static DEFINE_IDA(vd_index_ida);
@@ -74,6 +78,7 @@ struct virtio_blk {
/* num of vqs */
int num_vqs;
+ int io_queues[HCTX_MAX_TYPES];
struct virtio_blk_vq *vqs;
};
@@ -96,8 +101,7 @@ static inline blk_status_t virtblk_result(struct virtblk_req *vbr)
}
}
-static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr,
- struct scatterlist *data_sg, bool have_data)
+static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr)
{
struct scatterlist hdr, status, *sgs[3];
unsigned int num_out = 0, num_in = 0;
@@ -105,11 +109,11 @@ static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr,
sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
sgs[num_out++] = &hdr;
- if (have_data) {
+ if (vbr->sg_table.nents) {
if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT))
- sgs[num_out++] = data_sg;
+ sgs[num_out++] = vbr->sg_table.sgl;
else
- sgs[num_out + num_in++] = data_sg;
+ sgs[num_out + num_in++] = vbr->sg_table.sgl;
}
sg_init_one(&status, &vbr->status, sizeof(vbr->status));
@@ -299,6 +303,28 @@ static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx)
virtqueue_notify(vq->vq);
}
+static blk_status_t virtblk_prep_rq(struct blk_mq_hw_ctx *hctx,
+ struct virtio_blk *vblk,
+ struct request *req,
+ struct virtblk_req *vbr)
+{
+ blk_status_t status;
+
+ status = virtblk_setup_cmd(vblk->vdev, req, vbr);
+ if (unlikely(status))
+ return status;
+
+ blk_mq_start_request(req);
+
+ vbr->sg_table.nents = virtblk_map_data(hctx, req, vbr);
+ if (unlikely(vbr->sg_table.nents < 0)) {
+ virtblk_cleanup_cmd(req);
+ return BLK_STS_RESOURCE;
+ }
+
+ return BLK_STS_OK;
+}
+
static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
@@ -306,26 +332,17 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
struct request *req = bd->rq;
struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
unsigned long flags;
- int num;
int qid = hctx->queue_num;
bool notify = false;
blk_status_t status;
int err;
- status = virtblk_setup_cmd(vblk->vdev, req, vbr);
+ status = virtblk_prep_rq(hctx, vblk, req, vbr);
if (unlikely(status))
return status;
- blk_mq_start_request(req);
-
- num = virtblk_map_data(hctx, req, vbr);
- if (unlikely(num < 0)) {
- virtblk_cleanup_cmd(req);
- return BLK_STS_RESOURCE;
- }
-
spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
- err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg_table.sgl, num);
+ err = virtblk_add_req(vblk->vqs[qid].vq, vbr);
if (err) {
virtqueue_kick(vblk->vqs[qid].vq);
/* Don't stop the queue if -ENOMEM: we may have failed to
@@ -355,6 +372,75 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_STS_OK;
}
+static bool virtblk_prep_rq_batch(struct request *req)
+{
+ struct virtio_blk *vblk = req->mq_hctx->queue->queuedata;
+ struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
+
+ req->mq_hctx->tags->rqs[req->tag] = req;
+
+ return virtblk_prep_rq(req->mq_hctx, vblk, req, vbr) == BLK_STS_OK;
+}
+
+static bool virtblk_add_req_batch(struct virtio_blk_vq *vq,
+ struct request **rqlist,
+ struct request **requeue_list)
+{
+ unsigned long flags;
+ int err;
+ bool kick;
+
+ spin_lock_irqsave(&vq->lock, flags);
+
+ while (!rq_list_empty(*rqlist)) {
+ struct request *req = rq_list_pop(rqlist);
+ struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
+
+ err = virtblk_add_req(vq->vq, vbr);
+ if (err) {
+ virtblk_unmap_data(req, vbr);
+ virtblk_cleanup_cmd(req);
+ rq_list_add(requeue_list, req);
+ }
+ }
+
+ kick = virtqueue_kick_prepare(vq->vq);
+ spin_unlock_irqrestore(&vq->lock, flags);
+
+ return kick;
+}
+
+static void virtio_queue_rqs(struct request **rqlist)
+{
+ struct request *req, *next, *prev = NULL;
+ struct request *requeue_list = NULL;
+
+ rq_list_for_each_safe(rqlist, req, next) {
+ struct virtio_blk_vq *vq = req->mq_hctx->driver_data;
+ bool kick;
+
+ if (!virtblk_prep_rq_batch(req)) {
+ rq_list_move(rqlist, &requeue_list, req, prev);
+ req = prev;
+ if (!req)
+ continue;
+ }
+
+ if (!next || req->mq_hctx != next->mq_hctx) {
+ req->rq_next = NULL;
+ kick = virtblk_add_req_batch(vq, rqlist, &requeue_list);
+ if (kick)
+ virtqueue_notify(vq->vq);
+
+ *rqlist = next;
+ prev = NULL;
+ } else
+ prev = req;
+ }
+
+ *rqlist = requeue_list;
+}
+
/* return id (s/n) string for *disk to *id_str
*/
static int virtblk_get_id(struct gendisk *disk, char *id_str)
@@ -512,6 +598,7 @@ static int init_vq(struct virtio_blk *vblk)
const char **names;
struct virtqueue **vqs;
unsigned short num_vqs;
+ unsigned int num_poll_vqs;
struct virtio_device *vdev = vblk->vdev;
struct irq_affinity desc = { 0, };
@@ -520,6 +607,7 @@ static int init_vq(struct virtio_blk *vblk)
&num_vqs);
if (err)
num_vqs = 1;
+
if (!err && !num_vqs) {
dev_err(&vdev->dev, "MQ advertised but zero queues reported\n");
return -EINVAL;
@@ -529,6 +617,17 @@ static int init_vq(struct virtio_blk *vblk)
min_not_zero(num_request_queues, nr_cpu_ids),
num_vqs);
+ num_poll_vqs = min_t(unsigned int, poll_queues, num_vqs - 1);
+
+ vblk->io_queues[HCTX_TYPE_DEFAULT] = num_vqs - num_poll_vqs;
+ vblk->io_queues[HCTX_TYPE_READ] = 0;
+ vblk->io_queues[HCTX_TYPE_POLL] = num_poll_vqs;
+
+ dev_info(&vdev->dev, "%d/%d/%d default/read/poll queues\n",
+ vblk->io_queues[HCTX_TYPE_DEFAULT],
+ vblk->io_queues[HCTX_TYPE_READ],
+ vblk->io_queues[HCTX_TYPE_POLL]);
+
vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL);
if (!vblk->vqs)
return -ENOMEM;
@@ -541,12 +640,18 @@ static int init_vq(struct virtio_blk *vblk)
goto out;
}
- for (i = 0; i < num_vqs; i++) {
+ for (i = 0; i < num_vqs - num_poll_vqs; i++) {
callbacks[i] = virtblk_done;
snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req.%d", i);
names[i] = vblk->vqs[i].name;
}
+ for (; i < num_vqs; i++) {
+ callbacks[i] = NULL;
+ snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req_poll.%d", i);
+ names[i] = vblk->vqs[i].name;
+ }
+
/* Discover virtqueues and write information to configuration. */
err = virtio_find_vqs(vdev, num_vqs, vqs, callbacks, names, &desc);
if (err)
@@ -692,16 +797,90 @@ static const struct attribute_group *virtblk_attr_groups[] = {
static int virtblk_map_queues(struct blk_mq_tag_set *set)
{
struct virtio_blk *vblk = set->driver_data;
+ int i, qoff;
+
+ for (i = 0, qoff = 0; i < set->nr_maps; i++) {
+ struct blk_mq_queue_map *map = &set->map[i];
+
+ map->nr_queues = vblk->io_queues[i];
+ map->queue_offset = qoff;
+ qoff += map->nr_queues;
+
+ if (map->nr_queues == 0)
+ continue;
+
+ /*
+ * Regular queues have interrupts and hence CPU affinity is
+ * defined by the core virtio code, but polling queues have
+ * no interrupts so we let the block layer assign CPU affinity.
+ */
+ if (i == HCTX_TYPE_POLL)
+ blk_mq_map_queues(&set->map[i]);
+ else
+ blk_mq_virtio_map_queues(&set->map[i], vblk->vdev, 0);
+ }
+
+ return 0;
+}
+
+static void virtblk_complete_batch(struct io_comp_batch *iob)
+{
+ struct request *req;
+
+ rq_list_for_each(&iob->req_list, req) {
+ virtblk_unmap_data(req, blk_mq_rq_to_pdu(req));
+ virtblk_cleanup_cmd(req);
+ }
+ blk_mq_end_request_batch(iob);
+}
+
+static int virtblk_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
+{
+ struct virtio_blk *vblk = hctx->queue->queuedata;
+ struct virtio_blk_vq *vq = hctx->driver_data;
+ struct virtblk_req *vbr;
+ unsigned long flags;
+ unsigned int len;
+ int found = 0;
- return blk_mq_virtio_map_queues(&set->map[HCTX_TYPE_DEFAULT],
- vblk->vdev, 0);
+ spin_lock_irqsave(&vq->lock, flags);
+
+ while ((vbr = virtqueue_get_buf(vq->vq, &len)) != NULL) {
+ struct request *req = blk_mq_rq_from_pdu(vbr);
+
+ found++;
+ if (!blk_mq_add_to_batch(req, iob, vbr->status,
+ virtblk_complete_batch))
+ blk_mq_complete_request(req);
+ }
+
+ if (found)
+ blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
+
+ spin_unlock_irqrestore(&vq->lock, flags);
+
+ return found;
+}
+
+static int virtblk_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
+ unsigned int hctx_idx)
+{
+ struct virtio_blk *vblk = data;
+ struct virtio_blk_vq *vq = &vblk->vqs[hctx_idx];
+
+ WARN_ON(vblk->tag_set.tags[hctx_idx] != hctx->tags);
+ hctx->driver_data = vq;
+ return 0;
}
static const struct blk_mq_ops virtio_mq_ops = {
.queue_rq = virtio_queue_rq,
+ .queue_rqs = virtio_queue_rqs,
.commit_rqs = virtio_commit_rqs,
+ .init_hctx = virtblk_init_hctx,
.complete = virtblk_request_done,
.map_queues = virtblk_map_queues,
+ .poll = virtblk_poll,
};
static unsigned int virtblk_queue_depth;
@@ -778,6 +957,9 @@ static int virtblk_probe(struct virtio_device *vdev)
sizeof(struct scatterlist) * VIRTIO_BLK_INLINE_SG_CNT;
vblk->tag_set.driver_data = vblk;
vblk->tag_set.nr_hw_queues = vblk->num_vqs;
+ vblk->tag_set.nr_maps = 1;
+ if (vblk->io_queues[HCTX_TYPE_POLL])
+ vblk->tag_set.nr_maps = 3;
err = blk_mq_alloc_tag_set(&vblk->tag_set);
if (err)
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 55e004d03ced..a88ce4426400 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -1221,7 +1221,7 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo)
list_del(&persistent_gnt->node);
if (persistent_gnt->gref != INVALID_GRANT_REF) {
gnttab_end_foreign_access(persistent_gnt->gref,
- 0UL);
+ NULL);
rinfo->persistent_gnts_c--;
}
if (info->feature_persistent)
@@ -1244,7 +1244,7 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo)
rinfo->shadow[i].req.u.rw.nr_segments;
for (j = 0; j < segs; j++) {
persistent_gnt = rinfo->shadow[i].grants_used[j];
- gnttab_end_foreign_access(persistent_gnt->gref, 0UL);
+ gnttab_end_foreign_access(persistent_gnt->gref, NULL);
if (info->feature_persistent)
__free_page(persistent_gnt->page);
kfree(persistent_gnt);
@@ -1259,7 +1259,7 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo)
for (j = 0; j < INDIRECT_GREFS(segs); j++) {
persistent_gnt = rinfo->shadow[i].indirect_grants[j];
- gnttab_end_foreign_access(persistent_gnt->gref, 0UL);
+ gnttab_end_foreign_access(persistent_gnt->gref, NULL);
__free_page(persistent_gnt->page);
kfree(persistent_gnt);
}
diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c
index 8fd4a356a86e..e81a9700cfd0 100644
--- a/drivers/bus/fsl-mc/fsl-mc-bus.c
+++ b/drivers/bus/fsl-mc/fsl-mc-bus.c
@@ -21,6 +21,7 @@
#include <linux/dma-mapping.h>
#include <linux/acpi.h>
#include <linux/iommu.h>
+#include <linux/dma-map-ops.h>
#include "fsl-mc-private.h"
@@ -140,15 +141,33 @@ static int fsl_mc_dma_configure(struct device *dev)
{
struct device *dma_dev = dev;
struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+ struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(dev->driver);
u32 input_id = mc_dev->icid;
+ int ret;
while (dev_is_fsl_mc(dma_dev))
dma_dev = dma_dev->parent;
if (dev_of_node(dma_dev))
- return of_dma_configure_id(dev, dma_dev->of_node, 0, &input_id);
+ ret = of_dma_configure_id(dev, dma_dev->of_node, 0, &input_id);
+ else
+ ret = acpi_dma_configure_id(dev, DEV_DMA_COHERENT, &input_id);
+
+ if (!ret && !mc_drv->driver_managed_dma) {
+ ret = iommu_device_use_default_domain(dev);
+ if (ret)
+ arch_teardown_dma_ops(dev);
+ }
+
+ return ret;
+}
+
+static void fsl_mc_dma_cleanup(struct device *dev)
+{
+ struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(dev->driver);
- return acpi_dma_configure_id(dev, DEV_DMA_COHERENT, &input_id);
+ if (!mc_drv->driver_managed_dma)
+ iommu_device_unuse_default_domain(dev);
}
static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
@@ -166,31 +185,14 @@ static ssize_t driver_override_store(struct device *dev,
const char *buf, size_t count)
{
struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
- char *driver_override, *old = mc_dev->driver_override;
- char *cp;
+ int ret;
if (WARN_ON(dev->bus != &fsl_mc_bus_type))
return -EINVAL;
- if (count >= (PAGE_SIZE - 1))
- return -EINVAL;
-
- driver_override = kstrndup(buf, count, GFP_KERNEL);
- if (!driver_override)
- return -ENOMEM;
-
- cp = strchr(driver_override, '\n');
- if (cp)
- *cp = '\0';
-
- if (strlen(driver_override)) {
- mc_dev->driver_override = driver_override;
- } else {
- kfree(driver_override);
- mc_dev->driver_override = NULL;
- }
-
- kfree(old);
+ ret = driver_set_override(dev, &mc_dev->driver_override, buf, count);
+ if (ret)
+ return ret;
return count;
}
@@ -312,6 +314,7 @@ struct bus_type fsl_mc_bus_type = {
.match = fsl_mc_bus_match,
.uevent = fsl_mc_bus_uevent,
.dma_configure = fsl_mc_dma_configure,
+ .dma_cleanup = fsl_mc_dma_cleanup,
.dev_groups = fsl_mc_dev_groups,
.bus_groups = fsl_mc_bus_groups,
};
diff --git a/drivers/bus/mhi/Kconfig b/drivers/bus/mhi/Kconfig
index 4748df7f9cd5..b39a11e6c624 100644
--- a/drivers/bus/mhi/Kconfig
+++ b/drivers/bus/mhi/Kconfig
@@ -6,3 +6,4 @@
#
source "drivers/bus/mhi/host/Kconfig"
+source "drivers/bus/mhi/ep/Kconfig"
diff --git a/drivers/bus/mhi/Makefile b/drivers/bus/mhi/Makefile
index 5f5708a249f5..46981331b38f 100644
--- a/drivers/bus/mhi/Makefile
+++ b/drivers/bus/mhi/Makefile
@@ -1,2 +1,5 @@
# Host MHI stack
obj-y += host/
+
+# Endpoint MHI stack
+obj-y += ep/
diff --git a/drivers/bus/mhi/common.h b/drivers/bus/mhi/common.h
index b4ef9acd3ce7..f794b9c8049e 100644
--- a/drivers/bus/mhi/common.h
+++ b/drivers/bus/mhi/common.h
@@ -165,6 +165,22 @@
#define MHI_TRE_GET_EV_LINKSPEED(tre) FIELD_GET(GENMASK(31, 24), (MHI_TRE_GET_DWORD(tre, 1)))
#define MHI_TRE_GET_EV_LINKWIDTH(tre) FIELD_GET(GENMASK(7, 0), (MHI_TRE_GET_DWORD(tre, 0)))
+/* State change event */
+#define MHI_SC_EV_PTR 0
+#define MHI_SC_EV_DWORD0(state) cpu_to_le32(FIELD_PREP(GENMASK(31, 24), state))
+#define MHI_SC_EV_DWORD1(type) cpu_to_le32(FIELD_PREP(GENMASK(23, 16), type))
+
+/* EE event */
+#define MHI_EE_EV_PTR 0
+#define MHI_EE_EV_DWORD0(ee) cpu_to_le32(FIELD_PREP(GENMASK(31, 24), ee))
+#define MHI_EE_EV_DWORD1(type) cpu_to_le32(FIELD_PREP(GENMASK(23, 16), type))
+
+
+/* Command Completion event */
+#define MHI_CC_EV_PTR(ptr) cpu_to_le64(ptr)
+#define MHI_CC_EV_DWORD0(code) cpu_to_le32(FIELD_PREP(GENMASK(31, 24), code))
+#define MHI_CC_EV_DWORD1(type) cpu_to_le32(FIELD_PREP(GENMASK(23, 16), type))
+
/* Transfer descriptor macros */
#define MHI_TRE_DATA_PTR(ptr) cpu_to_le64(ptr)
#define MHI_TRE_DATA_DWORD0(len) cpu_to_le32(FIELD_PREP(GENMASK(15, 0), len))
@@ -175,6 +191,12 @@
FIELD_PREP(BIT(9), ieot) | \
FIELD_PREP(BIT(8), ieob) | \
FIELD_PREP(BIT(0), chain))
+#define MHI_TRE_DATA_GET_PTR(tre) le64_to_cpu((tre)->ptr)
+#define MHI_TRE_DATA_GET_LEN(tre) FIELD_GET(GENMASK(15, 0), MHI_TRE_GET_DWORD(tre, 0))
+#define MHI_TRE_DATA_GET_CHAIN(tre) (!!(FIELD_GET(BIT(0), MHI_TRE_GET_DWORD(tre, 1))))
+#define MHI_TRE_DATA_GET_IEOB(tre) (!!(FIELD_GET(BIT(8), MHI_TRE_GET_DWORD(tre, 1))))
+#define MHI_TRE_DATA_GET_IEOT(tre) (!!(FIELD_GET(BIT(9), MHI_TRE_GET_DWORD(tre, 1))))
+#define MHI_TRE_DATA_GET_BEI(tre) (!!(FIELD_GET(BIT(10), MHI_TRE_GET_DWORD(tre, 1))))
/* RSC transfer descriptor macros */
#define MHI_RSCTRE_DATA_PTR(ptr, len) cpu_to_le64(FIELD_PREP(GENMASK(64, 48), len) | ptr)
diff --git a/drivers/bus/mhi/ep/Kconfig b/drivers/bus/mhi/ep/Kconfig
new file mode 100644
index 000000000000..90ab3b040672
--- /dev/null
+++ b/drivers/bus/mhi/ep/Kconfig
@@ -0,0 +1,10 @@
+config MHI_BUS_EP
+ tristate "Modem Host Interface (MHI) bus Endpoint implementation"
+ help
+ Bus driver for MHI protocol. Modem Host Interface (MHI) is a
+ communication protocol used by a host processor to control
+ and communicate a modem device over a high speed peripheral
+ bus or shared memory.
+
+ MHI_BUS_EP implements the MHI protocol for the endpoint devices,
+ such as SDX55 modem connected to the host machine over PCIe.
diff --git a/drivers/bus/mhi/ep/Makefile b/drivers/bus/mhi/ep/Makefile
new file mode 100644
index 000000000000..aad85f180b70
--- /dev/null
+++ b/drivers/bus/mhi/ep/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_MHI_BUS_EP) += mhi_ep.o
+mhi_ep-y := main.o mmio.o ring.o sm.o
diff --git a/drivers/bus/mhi/ep/internal.h b/drivers/bus/mhi/ep/internal.h
new file mode 100644
index 000000000000..a2125fa5fe2f
--- /dev/null
+++ b/drivers/bus/mhi/ep/internal.h
@@ -0,0 +1,218 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2022, Linaro Ltd.
+ *
+ */
+
+#ifndef _MHI_EP_INTERNAL_
+#define _MHI_EP_INTERNAL_
+
+#include <linux/bitfield.h>
+
+#include "../common.h"
+
+extern struct bus_type mhi_ep_bus_type;
+
+#define MHI_REG_OFFSET 0x100
+#define BHI_REG_OFFSET 0x200
+
+/* MHI registers */
+#define EP_MHIREGLEN (MHI_REG_OFFSET + MHIREGLEN)
+#define EP_MHIVER (MHI_REG_OFFSET + MHIVER)
+#define EP_MHICFG (MHI_REG_OFFSET + MHICFG)
+#define EP_CHDBOFF (MHI_REG_OFFSET + CHDBOFF)
+#define EP_ERDBOFF (MHI_REG_OFFSET + ERDBOFF)
+#define EP_BHIOFF (MHI_REG_OFFSET + BHIOFF)
+#define EP_BHIEOFF (MHI_REG_OFFSET + BHIEOFF)
+#define EP_DEBUGOFF (MHI_REG_OFFSET + DEBUGOFF)
+#define EP_MHICTRL (MHI_REG_OFFSET + MHICTRL)
+#define EP_MHISTATUS (MHI_REG_OFFSET + MHISTATUS)
+#define EP_CCABAP_LOWER (MHI_REG_OFFSET + CCABAP_LOWER)
+#define EP_CCABAP_HIGHER (MHI_REG_OFFSET + CCABAP_HIGHER)
+#define EP_ECABAP_LOWER (MHI_REG_OFFSET + ECABAP_LOWER)
+#define EP_ECABAP_HIGHER (MHI_REG_OFFSET + ECABAP_HIGHER)
+#define EP_CRCBAP_LOWER (MHI_REG_OFFSET + CRCBAP_LOWER)
+#define EP_CRCBAP_HIGHER (MHI_REG_OFFSET + CRCBAP_HIGHER)
+#define EP_CRDB_LOWER (MHI_REG_OFFSET + CRDB_LOWER)
+#define EP_CRDB_HIGHER (MHI_REG_OFFSET + CRDB_HIGHER)
+#define EP_MHICTRLBASE_LOWER (MHI_REG_OFFSET + MHICTRLBASE_LOWER)
+#define EP_MHICTRLBASE_HIGHER (MHI_REG_OFFSET + MHICTRLBASE_HIGHER)
+#define EP_MHICTRLLIMIT_LOWER (MHI_REG_OFFSET + MHICTRLLIMIT_LOWER)
+#define EP_MHICTRLLIMIT_HIGHER (MHI_REG_OFFSET + MHICTRLLIMIT_HIGHER)
+#define EP_MHIDATABASE_LOWER (MHI_REG_OFFSET + MHIDATABASE_LOWER)
+#define EP_MHIDATABASE_HIGHER (MHI_REG_OFFSET + MHIDATABASE_HIGHER)
+#define EP_MHIDATALIMIT_LOWER (MHI_REG_OFFSET + MHIDATALIMIT_LOWER)
+#define EP_MHIDATALIMIT_HIGHER (MHI_REG_OFFSET + MHIDATALIMIT_HIGHER)
+
+/* MHI BHI registers */
+#define EP_BHI_INTVEC (BHI_REG_OFFSET + BHI_INTVEC)
+#define EP_BHI_EXECENV (BHI_REG_OFFSET + BHI_EXECENV)
+
+/* MHI Doorbell registers */
+#define CHDB_LOWER_n(n) (0x400 + 0x8 * (n))
+#define CHDB_HIGHER_n(n) (0x404 + 0x8 * (n))
+#define ERDB_LOWER_n(n) (0x800 + 0x8 * (n))
+#define ERDB_HIGHER_n(n) (0x804 + 0x8 * (n))
+
+#define MHI_CTRL_INT_STATUS 0x4
+#define MHI_CTRL_INT_STATUS_MSK BIT(0)
+#define MHI_CTRL_INT_STATUS_CRDB_MSK BIT(1)
+#define MHI_CHDB_INT_STATUS_n(n) (0x28 + 0x4 * (n))
+#define MHI_ERDB_INT_STATUS_n(n) (0x38 + 0x4 * (n))
+
+#define MHI_CTRL_INT_CLEAR 0x4c
+#define MHI_CTRL_INT_MMIO_WR_CLEAR BIT(2)
+#define MHI_CTRL_INT_CRDB_CLEAR BIT(1)
+#define MHI_CTRL_INT_CRDB_MHICTRL_CLEAR BIT(0)
+
+#define MHI_CHDB_INT_CLEAR_n(n) (0x70 + 0x4 * (n))
+#define MHI_CHDB_INT_CLEAR_n_CLEAR_ALL GENMASK(31, 0)
+#define MHI_ERDB_INT_CLEAR_n(n) (0x80 + 0x4 * (n))
+#define MHI_ERDB_INT_CLEAR_n_CLEAR_ALL GENMASK(31, 0)
+
+/*
+ * Unlike the usual "masking" convention, writing "1" to a bit in this register
+ * enables the interrupt and writing "0" will disable it..
+ */
+#define MHI_CTRL_INT_MASK 0x94
+#define MHI_CTRL_INT_MASK_MASK GENMASK(1, 0)
+#define MHI_CTRL_MHICTRL_MASK BIT(0)
+#define MHI_CTRL_CRDB_MASK BIT(1)
+
+#define MHI_CHDB_INT_MASK_n(n) (0xb8 + 0x4 * (n))
+#define MHI_CHDB_INT_MASK_n_EN_ALL GENMASK(31, 0)
+#define MHI_ERDB_INT_MASK_n(n) (0xc8 + 0x4 * (n))
+#define MHI_ERDB_INT_MASK_n_EN_ALL GENMASK(31, 0)
+
+#define NR_OF_CMD_RINGS 1
+#define MHI_MASK_ROWS_CH_DB 4
+#define MHI_MASK_ROWS_EV_DB 4
+#define MHI_MASK_CH_LEN 32
+#define MHI_MASK_EV_LEN 32
+
+/* Generic context */
+struct mhi_generic_ctx {
+ __le32 reserved0;
+ __le32 reserved1;
+ __le32 reserved2;
+
+ __le64 rbase __packed __aligned(4);
+ __le64 rlen __packed __aligned(4);
+ __le64 rp __packed __aligned(4);
+ __le64 wp __packed __aligned(4);
+};
+
+enum mhi_ep_ring_type {
+ RING_TYPE_CMD,
+ RING_TYPE_ER,
+ RING_TYPE_CH,
+};
+
+/* Ring element */
+union mhi_ep_ring_ctx {
+ struct mhi_cmd_ctxt cmd;
+ struct mhi_event_ctxt ev;
+ struct mhi_chan_ctxt ch;
+ struct mhi_generic_ctx generic;
+};
+
+struct mhi_ep_ring_item {
+ struct list_head node;
+ struct mhi_ep_ring *ring;
+};
+
+struct mhi_ep_ring {
+ struct mhi_ep_cntrl *mhi_cntrl;
+ union mhi_ep_ring_ctx *ring_ctx;
+ struct mhi_ring_element *ring_cache;
+ enum mhi_ep_ring_type type;
+ u64 rbase;
+ size_t rd_offset;
+ size_t wr_offset;
+ size_t ring_size;
+ u32 db_offset_h;
+ u32 db_offset_l;
+ u32 ch_id;
+ u32 er_index;
+ u32 irq_vector;
+ bool started;
+};
+
+struct mhi_ep_cmd {
+ struct mhi_ep_ring ring;
+};
+
+struct mhi_ep_event {
+ struct mhi_ep_ring ring;
+};
+
+struct mhi_ep_state_transition {
+ struct list_head node;
+ enum mhi_state state;
+};
+
+struct mhi_ep_chan {
+ char *name;
+ struct mhi_ep_device *mhi_dev;
+ struct mhi_ep_ring ring;
+ struct mutex lock;
+ void (*xfer_cb)(struct mhi_ep_device *mhi_dev, struct mhi_result *result);
+ enum mhi_ch_state state;
+ enum dma_data_direction dir;
+ u64 tre_loc;
+ u32 tre_size;
+ u32 tre_bytes_left;
+ u32 chan;
+ bool skip_td;
+};
+
+/* MHI Ring related functions */
+void mhi_ep_ring_init(struct mhi_ep_ring *ring, enum mhi_ep_ring_type type, u32 id);
+void mhi_ep_ring_reset(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring);
+int mhi_ep_ring_start(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring,
+ union mhi_ep_ring_ctx *ctx);
+size_t mhi_ep_ring_addr2offset(struct mhi_ep_ring *ring, u64 ptr);
+int mhi_ep_ring_add_element(struct mhi_ep_ring *ring, struct mhi_ring_element *element);
+void mhi_ep_ring_inc_index(struct mhi_ep_ring *ring);
+int mhi_ep_update_wr_offset(struct mhi_ep_ring *ring);
+
+/* MMIO related functions */
+u32 mhi_ep_mmio_read(struct mhi_ep_cntrl *mhi_cntrl, u32 offset);
+void mhi_ep_mmio_write(struct mhi_ep_cntrl *mhi_cntrl, u32 offset, u32 val);
+void mhi_ep_mmio_masked_write(struct mhi_ep_cntrl *mhi_cntrl, u32 offset, u32 mask, u32 val);
+u32 mhi_ep_mmio_masked_read(struct mhi_ep_cntrl *dev, u32 offset, u32 mask);
+void mhi_ep_mmio_enable_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_mmio_disable_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_mmio_enable_cmdb_interrupt(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_mmio_disable_cmdb_interrupt(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_mmio_enable_chdb(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id);
+void mhi_ep_mmio_disable_chdb(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id);
+void mhi_ep_mmio_enable_chdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl);
+bool mhi_ep_mmio_read_chdb_status_interrupts(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_mmio_mask_interrupts(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_mmio_get_chc_base(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_mmio_get_erc_base(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_mmio_get_crc_base(struct mhi_ep_cntrl *mhi_cntrl);
+u64 mhi_ep_mmio_get_db(struct mhi_ep_ring *ring);
+void mhi_ep_mmio_set_env(struct mhi_ep_cntrl *mhi_cntrl, u32 value);
+void mhi_ep_mmio_clear_reset(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_mmio_reset(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_mmio_get_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state *state,
+ bool *mhi_reset);
+void mhi_ep_mmio_init(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_mmio_update_ner(struct mhi_ep_cntrl *mhi_cntrl);
+
+/* MHI EP core functions */
+int mhi_ep_send_state_change_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state state);
+int mhi_ep_send_ee_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ee_type exec_env);
+bool mhi_ep_check_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state cur_mhi_state,
+ enum mhi_state mhi_state);
+int mhi_ep_set_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state mhi_state);
+int mhi_ep_set_m0_state(struct mhi_ep_cntrl *mhi_cntrl);
+int mhi_ep_set_m3_state(struct mhi_ep_cntrl *mhi_cntrl);
+int mhi_ep_set_ready_state(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_handle_syserr(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_resume_channels(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_suspend_channels(struct mhi_ep_cntrl *mhi_cntrl);
+
+#endif
diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c
new file mode 100644
index 000000000000..40109a79017a
--- /dev/null
+++ b/drivers/bus/mhi/ep/main.c
@@ -0,0 +1,1591 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * MHI Endpoint bus stack
+ *
+ * Copyright (C) 2022 Linaro Ltd.
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/dma-direction.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/mhi_ep.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include "internal.h"
+
+#define M0_WAIT_DELAY_MS 100
+#define M0_WAIT_COUNT 100
+
+static DEFINE_IDA(mhi_ep_cntrl_ida);
+
+static int mhi_ep_create_device(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id);
+static int mhi_ep_destroy_device(struct device *dev, void *data);
+
+static int mhi_ep_send_event(struct mhi_ep_cntrl *mhi_cntrl, u32 ring_idx,
+ struct mhi_ring_element *el, bool bei)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ union mhi_ep_ring_ctx *ctx;
+ struct mhi_ep_ring *ring;
+ int ret;
+
+ mutex_lock(&mhi_cntrl->event_lock);
+ ring = &mhi_cntrl->mhi_event[ring_idx].ring;
+ ctx = (union mhi_ep_ring_ctx *)&mhi_cntrl->ev_ctx_cache[ring_idx];
+ if (!ring->started) {
+ ret = mhi_ep_ring_start(mhi_cntrl, ring, ctx);
+ if (ret) {
+ dev_err(dev, "Error starting event ring (%u)\n", ring_idx);
+ goto err_unlock;
+ }
+ }
+
+ /* Add element to the event ring */
+ ret = mhi_ep_ring_add_element(ring, el);
+ if (ret) {
+ dev_err(dev, "Error adding element to event ring (%u)\n", ring_idx);
+ goto err_unlock;
+ }
+
+ mutex_unlock(&mhi_cntrl->event_lock);
+
+ /*
+ * Raise IRQ to host only if the BEI flag is not set in TRE. Host might
+ * set this flag for interrupt moderation as per MHI protocol.
+ */
+ if (!bei)
+ mhi_cntrl->raise_irq(mhi_cntrl, ring->irq_vector);
+
+ return 0;
+
+err_unlock:
+ mutex_unlock(&mhi_cntrl->event_lock);
+
+ return ret;
+}
+
+static int mhi_ep_send_completion_event(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring,
+ struct mhi_ring_element *tre, u32 len, enum mhi_ev_ccs code)
+{
+ struct mhi_ring_element event = {};
+
+ event.ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(*tre));
+ event.dword[0] = MHI_TRE_EV_DWORD0(code, len);
+ event.dword[1] = MHI_TRE_EV_DWORD1(ring->ch_id, MHI_PKT_TYPE_TX_EVENT);
+
+ return mhi_ep_send_event(mhi_cntrl, ring->er_index, &event, MHI_TRE_DATA_GET_BEI(tre));
+}
+
+int mhi_ep_send_state_change_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state state)
+{
+ struct mhi_ring_element event = {};
+
+ event.dword[0] = MHI_SC_EV_DWORD0(state);
+ event.dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_STATE_CHANGE_EVENT);
+
+ return mhi_ep_send_event(mhi_cntrl, 0, &event, 0);
+}
+
+int mhi_ep_send_ee_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ee_type exec_env)
+{
+ struct mhi_ring_element event = {};
+
+ event.dword[0] = MHI_EE_EV_DWORD0(exec_env);
+ event.dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_EE_EVENT);
+
+ return mhi_ep_send_event(mhi_cntrl, 0, &event, 0);
+}
+
+static int mhi_ep_send_cmd_comp_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ev_ccs code)
+{
+ struct mhi_ep_ring *ring = &mhi_cntrl->mhi_cmd->ring;
+ struct mhi_ring_element event = {};
+
+ event.ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(struct mhi_ring_element));
+ event.dword[0] = MHI_CC_EV_DWORD0(code);
+ event.dword[1] = MHI_CC_EV_DWORD1(MHI_PKT_TYPE_CMD_COMPLETION_EVENT);
+
+ return mhi_ep_send_event(mhi_cntrl, 0, &event, 0);
+}
+
+static int mhi_ep_process_cmd_ring(struct mhi_ep_ring *ring, struct mhi_ring_element *el)
+{
+ struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ struct mhi_result result = {};
+ struct mhi_ep_chan *mhi_chan;
+ struct mhi_ep_ring *ch_ring;
+ u32 tmp, ch_id;
+ int ret;
+
+ ch_id = MHI_TRE_GET_CMD_CHID(el);
+ mhi_chan = &mhi_cntrl->mhi_chan[ch_id];
+ ch_ring = &mhi_cntrl->mhi_chan[ch_id].ring;
+
+ switch (MHI_TRE_GET_CMD_TYPE(el)) {
+ case MHI_PKT_TYPE_START_CHAN_CMD:
+ dev_dbg(dev, "Received START command for channel (%u)\n", ch_id);
+
+ mutex_lock(&mhi_chan->lock);
+ /* Initialize and configure the corresponding channel ring */
+ if (!ch_ring->started) {
+ ret = mhi_ep_ring_start(mhi_cntrl, ch_ring,
+ (union mhi_ep_ring_ctx *)&mhi_cntrl->ch_ctx_cache[ch_id]);
+ if (ret) {
+ dev_err(dev, "Failed to start ring for channel (%u)\n", ch_id);
+ ret = mhi_ep_send_cmd_comp_event(mhi_cntrl,
+ MHI_EV_CC_UNDEFINED_ERR);
+ if (ret)
+ dev_err(dev, "Error sending completion event: %d\n", ret);
+
+ goto err_unlock;
+ }
+ }
+
+ /* Set channel state to RUNNING */
+ mhi_chan->state = MHI_CH_STATE_RUNNING;
+ tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg);
+ tmp &= ~CHAN_CTX_CHSTATE_MASK;
+ tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_RUNNING);
+ mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp);
+
+ ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS);
+ if (ret) {
+ dev_err(dev, "Error sending command completion event (%u)\n",
+ MHI_EV_CC_SUCCESS);
+ goto err_unlock;
+ }
+
+ mutex_unlock(&mhi_chan->lock);
+
+ /*
+ * Create MHI device only during UL channel start. Since the MHI
+ * channels operate in a pair, we'll associate both UL and DL
+ * channels to the same device.
+ *
+ * We also need to check for mhi_dev != NULL because, the host
+ * will issue START_CHAN command during resume and we don't
+ * destroy the device during suspend.
+ */
+ if (!(ch_id % 2) && !mhi_chan->mhi_dev) {
+ ret = mhi_ep_create_device(mhi_cntrl, ch_id);
+ if (ret) {
+ dev_err(dev, "Error creating device for channel (%u)\n", ch_id);
+ mhi_ep_handle_syserr(mhi_cntrl);
+ return ret;
+ }
+ }
+
+ /* Finally, enable DB for the channel */
+ mhi_ep_mmio_enable_chdb(mhi_cntrl, ch_id);
+
+ break;
+ case MHI_PKT_TYPE_STOP_CHAN_CMD:
+ dev_dbg(dev, "Received STOP command for channel (%u)\n", ch_id);
+ if (!ch_ring->started) {
+ dev_err(dev, "Channel (%u) not opened\n", ch_id);
+ return -ENODEV;
+ }
+
+ mutex_lock(&mhi_chan->lock);
+ /* Disable DB for the channel */
+ mhi_ep_mmio_disable_chdb(mhi_cntrl, ch_id);
+
+ /* Send channel disconnect status to client drivers */
+ result.transaction_status = -ENOTCONN;
+ result.bytes_xferd = 0;
+ mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+
+ /* Set channel state to STOP */
+ mhi_chan->state = MHI_CH_STATE_STOP;
+ tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg);
+ tmp &= ~CHAN_CTX_CHSTATE_MASK;
+ tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_STOP);
+ mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp);
+
+ ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS);
+ if (ret) {
+ dev_err(dev, "Error sending command completion event (%u)\n",
+ MHI_EV_CC_SUCCESS);
+ goto err_unlock;
+ }
+
+ mutex_unlock(&mhi_chan->lock);
+ break;
+ case MHI_PKT_TYPE_RESET_CHAN_CMD:
+ dev_dbg(dev, "Received STOP command for channel (%u)\n", ch_id);
+ if (!ch_ring->started) {
+ dev_err(dev, "Channel (%u) not opened\n", ch_id);
+ return -ENODEV;
+ }
+
+ mutex_lock(&mhi_chan->lock);
+ /* Stop and reset the transfer ring */
+ mhi_ep_ring_reset(mhi_cntrl, ch_ring);
+
+ /* Send channel disconnect status to client driver */
+ result.transaction_status = -ENOTCONN;
+ result.bytes_xferd = 0;
+ mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+
+ /* Set channel state to DISABLED */
+ mhi_chan->state = MHI_CH_STATE_DISABLED;
+ tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg);
+ tmp &= ~CHAN_CTX_CHSTATE_MASK;
+ tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_DISABLED);
+ mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp);
+
+ ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS);
+ if (ret) {
+ dev_err(dev, "Error sending command completion event (%u)\n",
+ MHI_EV_CC_SUCCESS);
+ goto err_unlock;
+ }
+
+ mutex_unlock(&mhi_chan->lock);
+ break;
+ default:
+ dev_err(dev, "Invalid command received: %lu for channel (%u)\n",
+ MHI_TRE_GET_CMD_TYPE(el), ch_id);
+ return -EINVAL;
+ }
+
+ return 0;
+
+err_unlock:
+ mutex_unlock(&mhi_chan->lock);
+
+ return ret;
+}
+
+bool mhi_ep_queue_is_empty(struct mhi_ep_device *mhi_dev, enum dma_data_direction dir)
+{
+ struct mhi_ep_chan *mhi_chan = (dir == DMA_FROM_DEVICE) ? mhi_dev->dl_chan :
+ mhi_dev->ul_chan;
+ struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct mhi_ep_ring *ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring;
+
+ return !!(ring->rd_offset == ring->wr_offset);
+}
+EXPORT_SYMBOL_GPL(mhi_ep_queue_is_empty);
+
+static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl,
+ struct mhi_ep_ring *ring,
+ struct mhi_result *result,
+ u32 len)
+{
+ struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id];
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ size_t tr_len, read_offset, write_offset;
+ struct mhi_ring_element *el;
+ bool tr_done = false;
+ void *write_addr;
+ u64 read_addr;
+ u32 buf_left;
+ int ret;
+
+ buf_left = len;
+
+ do {
+ /* Don't process the transfer ring if the channel is not in RUNNING state */
+ if (mhi_chan->state != MHI_CH_STATE_RUNNING) {
+ dev_err(dev, "Channel not available\n");
+ return -ENODEV;
+ }
+
+ el = &ring->ring_cache[ring->rd_offset];
+
+ /* Check if there is data pending to be read from previous read operation */
+ if (mhi_chan->tre_bytes_left) {
+ dev_dbg(dev, "TRE bytes remaining: %u\n", mhi_chan->tre_bytes_left);
+ tr_len = min(buf_left, mhi_chan->tre_bytes_left);
+ } else {
+ mhi_chan->tre_loc = MHI_TRE_DATA_GET_PTR(el);
+ mhi_chan->tre_size = MHI_TRE_DATA_GET_LEN(el);
+ mhi_chan->tre_bytes_left = mhi_chan->tre_size;
+
+ tr_len = min(buf_left, mhi_chan->tre_size);
+ }
+
+ read_offset = mhi_chan->tre_size - mhi_chan->tre_bytes_left;
+ write_offset = len - buf_left;
+ read_addr = mhi_chan->tre_loc + read_offset;
+ write_addr = result->buf_addr + write_offset;
+
+ dev_dbg(dev, "Reading %zd bytes from channel (%u)\n", tr_len, ring->ch_id);
+ ret = mhi_cntrl->read_from_host(mhi_cntrl, read_addr, write_addr, tr_len);
+ if (ret < 0) {
+ dev_err(&mhi_chan->mhi_dev->dev, "Error reading from channel\n");
+ return ret;
+ }
+
+ buf_left -= tr_len;
+ mhi_chan->tre_bytes_left -= tr_len;
+
+ /*
+ * Once the TRE (Transfer Ring Element) of a TD (Transfer Descriptor) has been
+ * read completely:
+ *
+ * 1. Send completion event to the host based on the flags set in TRE.
+ * 2. Increment the local read offset of the transfer ring.
+ */
+ if (!mhi_chan->tre_bytes_left) {
+ /*
+ * The host will split the data packet into multiple TREs if it can't fit
+ * the packet in a single TRE. In that case, CHAIN flag will be set by the
+ * host for all TREs except the last one.
+ */
+ if (MHI_TRE_DATA_GET_CHAIN(el)) {
+ /*
+ * IEOB (Interrupt on End of Block) flag will be set by the host if
+ * it expects the completion event for all TREs of a TD.
+ */
+ if (MHI_TRE_DATA_GET_IEOB(el)) {
+ ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el,
+ MHI_TRE_DATA_GET_LEN(el),
+ MHI_EV_CC_EOB);
+ if (ret < 0) {
+ dev_err(&mhi_chan->mhi_dev->dev,
+ "Error sending transfer compl. event\n");
+ return ret;
+ }
+ }
+ } else {
+ /*
+ * IEOT (Interrupt on End of Transfer) flag will be set by the host
+ * for the last TRE of the TD and expects the completion event for
+ * the same.
+ */
+ if (MHI_TRE_DATA_GET_IEOT(el)) {
+ ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el,
+ MHI_TRE_DATA_GET_LEN(el),
+ MHI_EV_CC_EOT);
+ if (ret < 0) {
+ dev_err(&mhi_chan->mhi_dev->dev,
+ "Error sending transfer compl. event\n");
+ return ret;
+ }
+ }
+
+ tr_done = true;
+ }
+
+ mhi_ep_ring_inc_index(ring);
+ }
+
+ result->bytes_xferd += tr_len;
+ } while (buf_left && !tr_done);
+
+ return 0;
+}
+
+static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring, struct mhi_ring_element *el)
+{
+ struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
+ struct mhi_result result = {};
+ u32 len = MHI_EP_DEFAULT_MTU;
+ struct mhi_ep_chan *mhi_chan;
+ int ret;
+
+ mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id];
+
+ /*
+ * Bail out if transfer callback is not registered for the channel.
+ * This is most likely due to the client driver not loaded at this point.
+ */
+ if (!mhi_chan->xfer_cb) {
+ dev_err(&mhi_chan->mhi_dev->dev, "Client driver not available\n");
+ return -ENODEV;
+ }
+
+ if (ring->ch_id % 2) {
+ /* DL channel */
+ result.dir = mhi_chan->dir;
+ mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+ } else {
+ /* UL channel */
+ result.buf_addr = kzalloc(len, GFP_KERNEL);
+ if (!result.buf_addr)
+ return -ENOMEM;
+
+ do {
+ ret = mhi_ep_read_channel(mhi_cntrl, ring, &result, len);
+ if (ret < 0) {
+ dev_err(&mhi_chan->mhi_dev->dev, "Failed to read channel\n");
+ kfree(result.buf_addr);
+ return ret;
+ }
+
+ result.dir = mhi_chan->dir;
+ mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+ result.bytes_xferd = 0;
+ memset(result.buf_addr, 0, len);
+
+ /* Read until the ring becomes empty */
+ } while (!mhi_ep_queue_is_empty(mhi_chan->mhi_dev, DMA_TO_DEVICE));
+
+ kfree(result.buf_addr);
+ }
+
+ return 0;
+}
+
+/* TODO: Handle partially formed TDs */
+int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb)
+{
+ struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct mhi_ep_chan *mhi_chan = mhi_dev->dl_chan;
+ struct device *dev = &mhi_chan->mhi_dev->dev;
+ struct mhi_ring_element *el;
+ u32 buf_left, read_offset;
+ struct mhi_ep_ring *ring;
+ enum mhi_ev_ccs code;
+ void *read_addr;
+ u64 write_addr;
+ size_t tr_len;
+ u32 tre_len;
+ int ret;
+
+ buf_left = skb->len;
+ ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring;
+
+ mutex_lock(&mhi_chan->lock);
+
+ do {
+ /* Don't process the transfer ring if the channel is not in RUNNING state */
+ if (mhi_chan->state != MHI_CH_STATE_RUNNING) {
+ dev_err(dev, "Channel not available\n");
+ ret = -ENODEV;
+ goto err_exit;
+ }
+
+ if (mhi_ep_queue_is_empty(mhi_dev, DMA_FROM_DEVICE)) {
+ dev_err(dev, "TRE not available!\n");
+ ret = -ENOSPC;
+ goto err_exit;
+ }
+
+ el = &ring->ring_cache[ring->rd_offset];
+ tre_len = MHI_TRE_DATA_GET_LEN(el);
+
+ tr_len = min(buf_left, tre_len);
+ read_offset = skb->len - buf_left;
+ read_addr = skb->data + read_offset;
+ write_addr = MHI_TRE_DATA_GET_PTR(el);
+
+ dev_dbg(dev, "Writing %zd bytes to channel (%u)\n", tr_len, ring->ch_id);
+ ret = mhi_cntrl->write_to_host(mhi_cntrl, read_addr, write_addr, tr_len);
+ if (ret < 0) {
+ dev_err(dev, "Error writing to the channel\n");
+ goto err_exit;
+ }
+
+ buf_left -= tr_len;
+ /*
+ * For all TREs queued by the host for DL channel, only the EOT flag will be set.
+ * If the packet doesn't fit into a single TRE, send the OVERFLOW event to
+ * the host so that the host can adjust the packet boundary to next TREs. Else send
+ * the EOT event to the host indicating the packet boundary.
+ */
+ if (buf_left)
+ code = MHI_EV_CC_OVERFLOW;
+ else
+ code = MHI_EV_CC_EOT;
+
+ ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, tr_len, code);
+ if (ret) {
+ dev_err(dev, "Error sending transfer completion event\n");
+ goto err_exit;
+ }
+
+ mhi_ep_ring_inc_index(ring);
+ } while (buf_left);
+
+ mutex_unlock(&mhi_chan->lock);
+
+ return 0;
+
+err_exit:
+ mutex_unlock(&mhi_chan->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mhi_ep_queue_skb);
+
+static int mhi_ep_cache_host_cfg(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ size_t cmd_ctx_host_size, ch_ctx_host_size, ev_ctx_host_size;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ int ret;
+
+ /* Update the number of event rings (NER) programmed by the host */
+ mhi_ep_mmio_update_ner(mhi_cntrl);
+
+ dev_dbg(dev, "Number of Event rings: %u, HW Event rings: %u\n",
+ mhi_cntrl->event_rings, mhi_cntrl->hw_event_rings);
+
+ ch_ctx_host_size = sizeof(struct mhi_chan_ctxt) * mhi_cntrl->max_chan;
+ ev_ctx_host_size = sizeof(struct mhi_event_ctxt) * mhi_cntrl->event_rings;
+ cmd_ctx_host_size = sizeof(struct mhi_cmd_ctxt) * NR_OF_CMD_RINGS;
+
+ /* Get the channel context base pointer from host */
+ mhi_ep_mmio_get_chc_base(mhi_cntrl);
+
+ /* Allocate and map memory for caching host channel context */
+ ret = mhi_cntrl->alloc_map(mhi_cntrl, mhi_cntrl->ch_ctx_host_pa,
+ &mhi_cntrl->ch_ctx_cache_phys,
+ (void __iomem **) &mhi_cntrl->ch_ctx_cache,
+ ch_ctx_host_size);
+ if (ret) {
+ dev_err(dev, "Failed to allocate and map ch_ctx_cache\n");
+ return ret;
+ }
+
+ /* Get the event context base pointer from host */
+ mhi_ep_mmio_get_erc_base(mhi_cntrl);
+
+ /* Allocate and map memory for caching host event context */
+ ret = mhi_cntrl->alloc_map(mhi_cntrl, mhi_cntrl->ev_ctx_host_pa,
+ &mhi_cntrl->ev_ctx_cache_phys,
+ (void __iomem **) &mhi_cntrl->ev_ctx_cache,
+ ev_ctx_host_size);
+ if (ret) {
+ dev_err(dev, "Failed to allocate and map ev_ctx_cache\n");
+ goto err_ch_ctx;
+ }
+
+ /* Get the command context base pointer from host */
+ mhi_ep_mmio_get_crc_base(mhi_cntrl);
+
+ /* Allocate and map memory for caching host command context */
+ ret = mhi_cntrl->alloc_map(mhi_cntrl, mhi_cntrl->cmd_ctx_host_pa,
+ &mhi_cntrl->cmd_ctx_cache_phys,
+ (void __iomem **) &mhi_cntrl->cmd_ctx_cache,
+ cmd_ctx_host_size);
+ if (ret) {
+ dev_err(dev, "Failed to allocate and map cmd_ctx_cache\n");
+ goto err_ev_ctx;
+ }
+
+ /* Initialize command ring */
+ ret = mhi_ep_ring_start(mhi_cntrl, &mhi_cntrl->mhi_cmd->ring,
+ (union mhi_ep_ring_ctx *)mhi_cntrl->cmd_ctx_cache);
+ if (ret) {
+ dev_err(dev, "Failed to start the command ring\n");
+ goto err_cmd_ctx;
+ }
+
+ return ret;
+
+err_cmd_ctx:
+ mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->cmd_ctx_host_pa, mhi_cntrl->cmd_ctx_cache_phys,
+ (void __iomem *) mhi_cntrl->cmd_ctx_cache, cmd_ctx_host_size);
+
+err_ev_ctx:
+ mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ev_ctx_host_pa, mhi_cntrl->ev_ctx_cache_phys,
+ (void __iomem *) mhi_cntrl->ev_ctx_cache, ev_ctx_host_size);
+
+err_ch_ctx:
+ mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ch_ctx_host_pa, mhi_cntrl->ch_ctx_cache_phys,
+ (void __iomem *) mhi_cntrl->ch_ctx_cache, ch_ctx_host_size);
+
+ return ret;
+}
+
+static void mhi_ep_free_host_cfg(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ size_t cmd_ctx_host_size, ch_ctx_host_size, ev_ctx_host_size;
+
+ ch_ctx_host_size = sizeof(struct mhi_chan_ctxt) * mhi_cntrl->max_chan;
+ ev_ctx_host_size = sizeof(struct mhi_event_ctxt) * mhi_cntrl->event_rings;
+ cmd_ctx_host_size = sizeof(struct mhi_cmd_ctxt) * NR_OF_CMD_RINGS;
+
+ mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->cmd_ctx_host_pa, mhi_cntrl->cmd_ctx_cache_phys,
+ (void __iomem *) mhi_cntrl->cmd_ctx_cache, cmd_ctx_host_size);
+
+ mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ev_ctx_host_pa, mhi_cntrl->ev_ctx_cache_phys,
+ (void __iomem *) mhi_cntrl->ev_ctx_cache, ev_ctx_host_size);
+
+ mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ch_ctx_host_pa, mhi_cntrl->ch_ctx_cache_phys,
+ (void __iomem *) mhi_cntrl->ch_ctx_cache, ch_ctx_host_size);
+}
+
+static void mhi_ep_enable_int(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ /*
+ * Doorbell interrupts are enabled when the corresponding channel gets started.
+ * Enabling all interrupts here triggers spurious irqs as some of the interrupts
+ * associated with hw channels always get triggered.
+ */
+ mhi_ep_mmio_enable_ctrl_interrupt(mhi_cntrl);
+ mhi_ep_mmio_enable_cmdb_interrupt(mhi_cntrl);
+}
+
+static int mhi_ep_enable(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ enum mhi_state state;
+ bool mhi_reset;
+ u32 count = 0;
+ int ret;
+
+ /* Wait for Host to set the M0 state */
+ do {
+ msleep(M0_WAIT_DELAY_MS);
+ mhi_ep_mmio_get_mhi_state(mhi_cntrl, &state, &mhi_reset);
+ if (mhi_reset) {
+ /* Clear the MHI reset if host is in reset state */
+ mhi_ep_mmio_clear_reset(mhi_cntrl);
+ dev_info(dev, "Detected Host reset while waiting for M0\n");
+ }
+ count++;
+ } while (state != MHI_STATE_M0 && count < M0_WAIT_COUNT);
+
+ if (state != MHI_STATE_M0) {
+ dev_err(dev, "Host failed to enter M0\n");
+ return -ETIMEDOUT;
+ }
+
+ ret = mhi_ep_cache_host_cfg(mhi_cntrl);
+ if (ret) {
+ dev_err(dev, "Failed to cache host config\n");
+ return ret;
+ }
+
+ mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS);
+
+ /* Enable all interrupts now */
+ mhi_ep_enable_int(mhi_cntrl);
+
+ return 0;
+}
+
+static void mhi_ep_cmd_ring_worker(struct work_struct *work)
+{
+ struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, cmd_ring_work);
+ struct mhi_ep_ring *ring = &mhi_cntrl->mhi_cmd->ring;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ struct mhi_ring_element *el;
+ int ret;
+
+ /* Update the write offset for the ring */
+ ret = mhi_ep_update_wr_offset(ring);
+ if (ret) {
+ dev_err(dev, "Error updating write offset for ring\n");
+ return;
+ }
+
+ /* Sanity check to make sure there are elements in the ring */
+ if (ring->rd_offset == ring->wr_offset)
+ return;
+
+ /*
+ * Process command ring element till write offset. In case of an error, just try to
+ * process next element.
+ */
+ while (ring->rd_offset != ring->wr_offset) {
+ el = &ring->ring_cache[ring->rd_offset];
+
+ ret = mhi_ep_process_cmd_ring(ring, el);
+ if (ret)
+ dev_err(dev, "Error processing cmd ring element: %zu\n", ring->rd_offset);
+
+ mhi_ep_ring_inc_index(ring);
+ }
+}
+
+static void mhi_ep_ch_ring_worker(struct work_struct *work)
+{
+ struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, ch_ring_work);
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ struct mhi_ep_ring_item *itr, *tmp;
+ struct mhi_ring_element *el;
+ struct mhi_ep_ring *ring;
+ struct mhi_ep_chan *chan;
+ unsigned long flags;
+ LIST_HEAD(head);
+ int ret;
+
+ spin_lock_irqsave(&mhi_cntrl->list_lock, flags);
+ list_splice_tail_init(&mhi_cntrl->ch_db_list, &head);
+ spin_unlock_irqrestore(&mhi_cntrl->list_lock, flags);
+
+ /* Process each queued channel ring. In case of an error, just process next element. */
+ list_for_each_entry_safe(itr, tmp, &head, node) {
+ list_del(&itr->node);
+ ring = itr->ring;
+
+ /* Update the write offset for the ring */
+ ret = mhi_ep_update_wr_offset(ring);
+ if (ret) {
+ dev_err(dev, "Error updating write offset for ring\n");
+ kfree(itr);
+ continue;
+ }
+
+ /* Sanity check to make sure there are elements in the ring */
+ if (ring->rd_offset == ring->wr_offset) {
+ kfree(itr);
+ continue;
+ }
+
+ el = &ring->ring_cache[ring->rd_offset];
+ chan = &mhi_cntrl->mhi_chan[ring->ch_id];
+
+ mutex_lock(&chan->lock);
+ dev_dbg(dev, "Processing the ring for channel (%u)\n", ring->ch_id);
+ ret = mhi_ep_process_ch_ring(ring, el);
+ if (ret) {
+ dev_err(dev, "Error processing ring for channel (%u): %d\n",
+ ring->ch_id, ret);
+ mutex_unlock(&chan->lock);
+ kfree(itr);
+ continue;
+ }
+
+ mutex_unlock(&chan->lock);
+ kfree(itr);
+ }
+}
+
+static void mhi_ep_state_worker(struct work_struct *work)
+{
+ struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, state_work);
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ struct mhi_ep_state_transition *itr, *tmp;
+ unsigned long flags;
+ LIST_HEAD(head);
+ int ret;
+
+ spin_lock_irqsave(&mhi_cntrl->list_lock, flags);
+ list_splice_tail_init(&mhi_cntrl->st_transition_list, &head);
+ spin_unlock_irqrestore(&mhi_cntrl->list_lock, flags);
+
+ list_for_each_entry_safe(itr, tmp, &head, node) {
+ list_del(&itr->node);
+ dev_dbg(dev, "Handling MHI state transition to %s\n",
+ mhi_state_str(itr->state));
+
+ switch (itr->state) {
+ case MHI_STATE_M0:
+ ret = mhi_ep_set_m0_state(mhi_cntrl);
+ if (ret)
+ dev_err(dev, "Failed to transition to M0 state\n");
+ break;
+ case MHI_STATE_M3:
+ ret = mhi_ep_set_m3_state(mhi_cntrl);
+ if (ret)
+ dev_err(dev, "Failed to transition to M3 state\n");
+ break;
+ default:
+ dev_err(dev, "Invalid MHI state transition: %d\n", itr->state);
+ break;
+ }
+ kfree(itr);
+ }
+}
+
+static void mhi_ep_queue_channel_db(struct mhi_ep_cntrl *mhi_cntrl, unsigned long ch_int,
+ u32 ch_idx)
+{
+ struct mhi_ep_ring_item *item;
+ struct mhi_ep_ring *ring;
+ bool work = !!ch_int;
+ LIST_HEAD(head);
+ u32 i;
+
+ /* First add the ring items to a local list */
+ for_each_set_bit(i, &ch_int, 32) {
+ /* Channel index varies for each register: 0, 32, 64, 96 */
+ u32 ch_id = ch_idx + i;
+
+ ring = &mhi_cntrl->mhi_chan[ch_id].ring;
+ item = kzalloc(sizeof(*item), GFP_ATOMIC);
+ if (!item)
+ return;
+
+ item->ring = ring;
+ list_add_tail(&item->node, &head);
+ }
+
+ /* Now, splice the local list into ch_db_list and queue the work item */
+ if (work) {
+ spin_lock(&mhi_cntrl->list_lock);
+ list_splice_tail_init(&head, &mhi_cntrl->ch_db_list);
+ spin_unlock(&mhi_cntrl->list_lock);
+
+ queue_work(mhi_cntrl->wq, &mhi_cntrl->ch_ring_work);
+ }
+}
+
+/*
+ * Channel interrupt statuses are contained in 4 registers each of 32bit length.
+ * For checking all interrupts, we need to loop through each registers and then
+ * check for bits set.
+ */
+static void mhi_ep_check_channel_interrupt(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ u32 ch_int, ch_idx, i;
+
+ /* Bail out if there is no channel doorbell interrupt */
+ if (!mhi_ep_mmio_read_chdb_status_interrupts(mhi_cntrl))
+ return;
+
+ for (i = 0; i < MHI_MASK_ROWS_CH_DB; i++) {
+ ch_idx = i * MHI_MASK_CH_LEN;
+
+ /* Only process channel interrupt if the mask is enabled */
+ ch_int = mhi_cntrl->chdb[i].status & mhi_cntrl->chdb[i].mask;
+ if (ch_int) {
+ mhi_ep_queue_channel_db(mhi_cntrl, ch_int, ch_idx);
+ mhi_ep_mmio_write(mhi_cntrl, MHI_CHDB_INT_CLEAR_n(i),
+ mhi_cntrl->chdb[i].status);
+ }
+ }
+}
+
+static void mhi_ep_process_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl,
+ enum mhi_state state)
+{
+ struct mhi_ep_state_transition *item;
+
+ item = kzalloc(sizeof(*item), GFP_ATOMIC);
+ if (!item)
+ return;
+
+ item->state = state;
+ spin_lock(&mhi_cntrl->list_lock);
+ list_add_tail(&item->node, &mhi_cntrl->st_transition_list);
+ spin_unlock(&mhi_cntrl->list_lock);
+
+ queue_work(mhi_cntrl->wq, &mhi_cntrl->state_work);
+}
+
+/*
+ * Interrupt handler that services interrupts raised by the host writing to
+ * MHICTRL and Command ring doorbell (CRDB) registers for state change and
+ * channel interrupts.
+ */
+static irqreturn_t mhi_ep_irq(int irq, void *data)
+{
+ struct mhi_ep_cntrl *mhi_cntrl = data;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ enum mhi_state state;
+ u32 int_value;
+ bool mhi_reset;
+
+ /* Acknowledge the ctrl interrupt */
+ int_value = mhi_ep_mmio_read(mhi_cntrl, MHI_CTRL_INT_STATUS);
+ mhi_ep_mmio_write(mhi_cntrl, MHI_CTRL_INT_CLEAR, int_value);
+
+ /* Check for ctrl interrupt */
+ if (FIELD_GET(MHI_CTRL_INT_STATUS_MSK, int_value)) {
+ dev_dbg(dev, "Processing ctrl interrupt\n");
+ mhi_ep_mmio_get_mhi_state(mhi_cntrl, &state, &mhi_reset);
+ if (mhi_reset) {
+ dev_info(dev, "Host triggered MHI reset!\n");
+ disable_irq_nosync(mhi_cntrl->irq);
+ schedule_work(&mhi_cntrl->reset_work);
+ return IRQ_HANDLED;
+ }
+
+ mhi_ep_process_ctrl_interrupt(mhi_cntrl, state);
+ }
+
+ /* Check for command doorbell interrupt */
+ if (FIELD_GET(MHI_CTRL_INT_STATUS_CRDB_MSK, int_value)) {
+ dev_dbg(dev, "Processing command doorbell interrupt\n");
+ queue_work(mhi_cntrl->wq, &mhi_cntrl->cmd_ring_work);
+ }
+
+ /* Check for channel interrupts */
+ mhi_ep_check_channel_interrupt(mhi_cntrl);
+
+ return IRQ_HANDLED;
+}
+
+static void mhi_ep_abort_transfer(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ struct mhi_ep_ring *ch_ring, *ev_ring;
+ struct mhi_result result = {};
+ struct mhi_ep_chan *mhi_chan;
+ int i;
+
+ /* Stop all the channels */
+ for (i = 0; i < mhi_cntrl->max_chan; i++) {
+ mhi_chan = &mhi_cntrl->mhi_chan[i];
+ if (!mhi_chan->ring.started)
+ continue;
+
+ mutex_lock(&mhi_chan->lock);
+ /* Send channel disconnect status to client drivers */
+ if (mhi_chan->xfer_cb) {
+ result.transaction_status = -ENOTCONN;
+ result.bytes_xferd = 0;
+ mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+ }
+
+ mhi_chan->state = MHI_CH_STATE_DISABLED;
+ mutex_unlock(&mhi_chan->lock);
+ }
+
+ flush_workqueue(mhi_cntrl->wq);
+
+ /* Destroy devices associated with all channels */
+ device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_ep_destroy_device);
+
+ /* Stop and reset the transfer rings */
+ for (i = 0; i < mhi_cntrl->max_chan; i++) {
+ mhi_chan = &mhi_cntrl->mhi_chan[i];
+ if (!mhi_chan->ring.started)
+ continue;
+
+ ch_ring = &mhi_cntrl->mhi_chan[i].ring;
+ mutex_lock(&mhi_chan->lock);
+ mhi_ep_ring_reset(mhi_cntrl, ch_ring);
+ mutex_unlock(&mhi_chan->lock);
+ }
+
+ /* Stop and reset the event rings */
+ for (i = 0; i < mhi_cntrl->event_rings; i++) {
+ ev_ring = &mhi_cntrl->mhi_event[i].ring;
+ if (!ev_ring->started)
+ continue;
+
+ mutex_lock(&mhi_cntrl->event_lock);
+ mhi_ep_ring_reset(mhi_cntrl, ev_ring);
+ mutex_unlock(&mhi_cntrl->event_lock);
+ }
+
+ /* Stop and reset the command ring */
+ mhi_ep_ring_reset(mhi_cntrl, &mhi_cntrl->mhi_cmd->ring);
+
+ mhi_ep_free_host_cfg(mhi_cntrl);
+ mhi_ep_mmio_mask_interrupts(mhi_cntrl);
+
+ mhi_cntrl->enabled = false;
+}
+
+static void mhi_ep_reset_worker(struct work_struct *work)
+{
+ struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, reset_work);
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ enum mhi_state cur_state;
+ int ret;
+
+ mhi_ep_abort_transfer(mhi_cntrl);
+
+ spin_lock_bh(&mhi_cntrl->state_lock);
+ /* Reset MMIO to signal host that the MHI_RESET is completed in endpoint */
+ mhi_ep_mmio_reset(mhi_cntrl);
+ cur_state = mhi_cntrl->mhi_state;
+ spin_unlock_bh(&mhi_cntrl->state_lock);
+
+ /*
+ * Only proceed further if the reset is due to SYS_ERR. The host will
+ * issue reset during shutdown also and we don't need to do re-init in
+ * that case.
+ */
+ if (cur_state == MHI_STATE_SYS_ERR) {
+ mhi_ep_mmio_init(mhi_cntrl);
+
+ /* Set AMSS EE before signaling ready state */
+ mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS);
+
+ /* All set, notify the host that we are ready */
+ ret = mhi_ep_set_ready_state(mhi_cntrl);
+ if (ret)
+ return;
+
+ dev_dbg(dev, "READY state notification sent to the host\n");
+
+ ret = mhi_ep_enable(mhi_cntrl);
+ if (ret) {
+ dev_err(dev, "Failed to enable MHI endpoint: %d\n", ret);
+ return;
+ }
+
+ enable_irq(mhi_cntrl->irq);
+ }
+}
+
+/*
+ * We don't need to do anything special other than setting the MHI SYS_ERR
+ * state. The host will reset all contexts and issue MHI RESET so that we
+ * could also recover from error state.
+ */
+void mhi_ep_handle_syserr(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ int ret;
+
+ ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR);
+ if (ret)
+ return;
+
+ /* Signal host that the device went to SYS_ERR state */
+ ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_SYS_ERR);
+ if (ret)
+ dev_err(dev, "Failed sending SYS_ERR state change event: %d\n", ret);
+}
+
+int mhi_ep_power_up(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ int ret, i;
+
+ /*
+ * Mask all interrupts until the state machine is ready. Interrupts will
+ * be enabled later with mhi_ep_enable().
+ */
+ mhi_ep_mmio_mask_interrupts(mhi_cntrl);
+ mhi_ep_mmio_init(mhi_cntrl);
+
+ mhi_cntrl->mhi_event = kzalloc(mhi_cntrl->event_rings * (sizeof(*mhi_cntrl->mhi_event)),
+ GFP_KERNEL);
+ if (!mhi_cntrl->mhi_event)
+ return -ENOMEM;
+
+ /* Initialize command, channel and event rings */
+ mhi_ep_ring_init(&mhi_cntrl->mhi_cmd->ring, RING_TYPE_CMD, 0);
+ for (i = 0; i < mhi_cntrl->max_chan; i++)
+ mhi_ep_ring_init(&mhi_cntrl->mhi_chan[i].ring, RING_TYPE_CH, i);
+ for (i = 0; i < mhi_cntrl->event_rings; i++)
+ mhi_ep_ring_init(&mhi_cntrl->mhi_event[i].ring, RING_TYPE_ER, i);
+
+ mhi_cntrl->mhi_state = MHI_STATE_RESET;
+
+ /* Set AMSS EE before signaling ready state */
+ mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS);
+
+ /* All set, notify the host that we are ready */
+ ret = mhi_ep_set_ready_state(mhi_cntrl);
+ if (ret)
+ goto err_free_event;
+
+ dev_dbg(dev, "READY state notification sent to the host\n");
+
+ ret = mhi_ep_enable(mhi_cntrl);
+ if (ret) {
+ dev_err(dev, "Failed to enable MHI endpoint\n");
+ goto err_free_event;
+ }
+
+ enable_irq(mhi_cntrl->irq);
+ mhi_cntrl->enabled = true;
+
+ return 0;
+
+err_free_event:
+ kfree(mhi_cntrl->mhi_event);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mhi_ep_power_up);
+
+void mhi_ep_power_down(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ if (mhi_cntrl->enabled)
+ mhi_ep_abort_transfer(mhi_cntrl);
+
+ kfree(mhi_cntrl->mhi_event);
+ disable_irq(mhi_cntrl->irq);
+}
+EXPORT_SYMBOL_GPL(mhi_ep_power_down);
+
+void mhi_ep_suspend_channels(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ struct mhi_ep_chan *mhi_chan;
+ u32 tmp;
+ int i;
+
+ for (i = 0; i < mhi_cntrl->max_chan; i++) {
+ mhi_chan = &mhi_cntrl->mhi_chan[i];
+
+ if (!mhi_chan->mhi_dev)
+ continue;
+
+ mutex_lock(&mhi_chan->lock);
+ /* Skip if the channel is not currently running */
+ tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[i].chcfg);
+ if (FIELD_GET(CHAN_CTX_CHSTATE_MASK, tmp) != MHI_CH_STATE_RUNNING) {
+ mutex_unlock(&mhi_chan->lock);
+ continue;
+ }
+
+ dev_dbg(&mhi_chan->mhi_dev->dev, "Suspending channel\n");
+ /* Set channel state to SUSPENDED */
+ tmp &= ~CHAN_CTX_CHSTATE_MASK;
+ tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_SUSPENDED);
+ mhi_cntrl->ch_ctx_cache[i].chcfg = cpu_to_le32(tmp);
+ mutex_unlock(&mhi_chan->lock);
+ }
+}
+
+void mhi_ep_resume_channels(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ struct mhi_ep_chan *mhi_chan;
+ u32 tmp;
+ int i;
+
+ for (i = 0; i < mhi_cntrl->max_chan; i++) {
+ mhi_chan = &mhi_cntrl->mhi_chan[i];
+
+ if (!mhi_chan->mhi_dev)
+ continue;
+
+ mutex_lock(&mhi_chan->lock);
+ /* Skip if the channel is not currently suspended */
+ tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[i].chcfg);
+ if (FIELD_GET(CHAN_CTX_CHSTATE_MASK, tmp) != MHI_CH_STATE_SUSPENDED) {
+ mutex_unlock(&mhi_chan->lock);
+ continue;
+ }
+
+ dev_dbg(&mhi_chan->mhi_dev->dev, "Resuming channel\n");
+ /* Set channel state to RUNNING */
+ tmp &= ~CHAN_CTX_CHSTATE_MASK;
+ tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_RUNNING);
+ mhi_cntrl->ch_ctx_cache[i].chcfg = cpu_to_le32(tmp);
+ mutex_unlock(&mhi_chan->lock);
+ }
+}
+
+static void mhi_ep_release_device(struct device *dev)
+{
+ struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
+
+ if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
+ mhi_dev->mhi_cntrl->mhi_dev = NULL;
+
+ /*
+ * We need to set the mhi_chan->mhi_dev to NULL here since the MHI
+ * devices for the channels will only get created in mhi_ep_create_device()
+ * if the mhi_dev associated with it is NULL.
+ */
+ if (mhi_dev->ul_chan)
+ mhi_dev->ul_chan->mhi_dev = NULL;
+
+ if (mhi_dev->dl_chan)
+ mhi_dev->dl_chan->mhi_dev = NULL;
+
+ kfree(mhi_dev);
+}
+
+static struct mhi_ep_device *mhi_ep_alloc_device(struct mhi_ep_cntrl *mhi_cntrl,
+ enum mhi_device_type dev_type)
+{
+ struct mhi_ep_device *mhi_dev;
+ struct device *dev;
+
+ mhi_dev = kzalloc(sizeof(*mhi_dev), GFP_KERNEL);
+ if (!mhi_dev)
+ return ERR_PTR(-ENOMEM);
+
+ dev = &mhi_dev->dev;
+ device_initialize(dev);
+ dev->bus = &mhi_ep_bus_type;
+ dev->release = mhi_ep_release_device;
+
+ /* Controller device is always allocated first */
+ if (dev_type == MHI_DEVICE_CONTROLLER)
+ /* for MHI controller device, parent is the bus device (e.g. PCI EPF) */
+ dev->parent = mhi_cntrl->cntrl_dev;
+ else
+ /* for MHI client devices, parent is the MHI controller device */
+ dev->parent = &mhi_cntrl->mhi_dev->dev;
+
+ mhi_dev->mhi_cntrl = mhi_cntrl;
+ mhi_dev->dev_type = dev_type;
+
+ return mhi_dev;
+}
+
+/*
+ * MHI channels are always defined in pairs with UL as the even numbered
+ * channel and DL as odd numbered one. This function gets UL channel (primary)
+ * as the ch_id and always looks after the next entry in channel list for
+ * the corresponding DL channel (secondary).
+ */
+static int mhi_ep_create_device(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id)
+{
+ struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ch_id];
+ struct device *dev = mhi_cntrl->cntrl_dev;
+ struct mhi_ep_device *mhi_dev;
+ int ret;
+
+ /* Check if the channel name is same for both UL and DL */
+ if (strcmp(mhi_chan->name, mhi_chan[1].name)) {
+ dev_err(dev, "UL and DL channel names are not same: (%s) != (%s)\n",
+ mhi_chan->name, mhi_chan[1].name);
+ return -EINVAL;
+ }
+
+ mhi_dev = mhi_ep_alloc_device(mhi_cntrl, MHI_DEVICE_XFER);
+ if (IS_ERR(mhi_dev))
+ return PTR_ERR(mhi_dev);
+
+ /* Configure primary channel */
+ mhi_dev->ul_chan = mhi_chan;
+ get_device(&mhi_dev->dev);
+ mhi_chan->mhi_dev = mhi_dev;
+
+ /* Configure secondary channel as well */
+ mhi_chan++;
+ mhi_dev->dl_chan = mhi_chan;
+ get_device(&mhi_dev->dev);
+ mhi_chan->mhi_dev = mhi_dev;
+
+ /* Channel name is same for both UL and DL */
+ mhi_dev->name = mhi_chan->name;
+ dev_set_name(&mhi_dev->dev, "%s_%s",
+ dev_name(&mhi_cntrl->mhi_dev->dev),
+ mhi_dev->name);
+
+ ret = device_add(&mhi_dev->dev);
+ if (ret)
+ put_device(&mhi_dev->dev);
+
+ return ret;
+}
+
+static int mhi_ep_destroy_device(struct device *dev, void *data)
+{
+ struct mhi_ep_device *mhi_dev;
+ struct mhi_ep_cntrl *mhi_cntrl;
+ struct mhi_ep_chan *ul_chan, *dl_chan;
+
+ if (dev->bus != &mhi_ep_bus_type)
+ return 0;
+
+ mhi_dev = to_mhi_ep_device(dev);
+ mhi_cntrl = mhi_dev->mhi_cntrl;
+
+ /* Only destroy devices created for channels */
+ if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
+ return 0;
+
+ ul_chan = mhi_dev->ul_chan;
+ dl_chan = mhi_dev->dl_chan;
+
+ if (ul_chan)
+ put_device(&ul_chan->mhi_dev->dev);
+
+ if (dl_chan)
+ put_device(&dl_chan->mhi_dev->dev);
+
+ dev_dbg(&mhi_cntrl->mhi_dev->dev, "Destroying device for chan:%s\n",
+ mhi_dev->name);
+
+ /* Notify the client and remove the device from MHI bus */
+ device_del(dev);
+ put_device(dev);
+
+ return 0;
+}
+
+static int mhi_ep_chan_init(struct mhi_ep_cntrl *mhi_cntrl,
+ const struct mhi_ep_cntrl_config *config)
+{
+ const struct mhi_ep_channel_config *ch_cfg;
+ struct device *dev = mhi_cntrl->cntrl_dev;
+ u32 chan, i;
+ int ret = -EINVAL;
+
+ mhi_cntrl->max_chan = config->max_channels;
+
+ /*
+ * Allocate max_channels supported by the MHI endpoint and populate
+ * only the defined channels
+ */
+ mhi_cntrl->mhi_chan = kcalloc(mhi_cntrl->max_chan, sizeof(*mhi_cntrl->mhi_chan),
+ GFP_KERNEL);
+ if (!mhi_cntrl->mhi_chan)
+ return -ENOMEM;
+
+ for (i = 0; i < config->num_channels; i++) {
+ struct mhi_ep_chan *mhi_chan;
+
+ ch_cfg = &config->ch_cfg[i];
+
+ chan = ch_cfg->num;
+ if (chan >= mhi_cntrl->max_chan) {
+ dev_err(dev, "Channel (%u) exceeds maximum available channels (%u)\n",
+ chan, mhi_cntrl->max_chan);
+ goto error_chan_cfg;
+ }
+
+ /* Bi-directional and direction less channels are not supported */
+ if (ch_cfg->dir == DMA_BIDIRECTIONAL || ch_cfg->dir == DMA_NONE) {
+ dev_err(dev, "Invalid direction (%u) for channel (%u)\n",
+ ch_cfg->dir, chan);
+ goto error_chan_cfg;
+ }
+
+ mhi_chan = &mhi_cntrl->mhi_chan[chan];
+ mhi_chan->name = ch_cfg->name;
+ mhi_chan->chan = chan;
+ mhi_chan->dir = ch_cfg->dir;
+ mutex_init(&mhi_chan->lock);
+ }
+
+ return 0;
+
+error_chan_cfg:
+ kfree(mhi_cntrl->mhi_chan);
+
+ return ret;
+}
+
+/*
+ * Allocate channel and command rings here. Event rings will be allocated
+ * in mhi_ep_power_up() as the config comes from the host.
+ */
+int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl,
+ const struct mhi_ep_cntrl_config *config)
+{
+ struct mhi_ep_device *mhi_dev;
+ int ret;
+
+ if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->mmio || !mhi_cntrl->irq)
+ return -EINVAL;
+
+ ret = mhi_ep_chan_init(mhi_cntrl, config);
+ if (ret)
+ return ret;
+
+ mhi_cntrl->mhi_cmd = kcalloc(NR_OF_CMD_RINGS, sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL);
+ if (!mhi_cntrl->mhi_cmd) {
+ ret = -ENOMEM;
+ goto err_free_ch;
+ }
+
+ INIT_WORK(&mhi_cntrl->state_work, mhi_ep_state_worker);
+ INIT_WORK(&mhi_cntrl->reset_work, mhi_ep_reset_worker);
+ INIT_WORK(&mhi_cntrl->cmd_ring_work, mhi_ep_cmd_ring_worker);
+ INIT_WORK(&mhi_cntrl->ch_ring_work, mhi_ep_ch_ring_worker);
+
+ mhi_cntrl->wq = alloc_workqueue("mhi_ep_wq", 0, 0);
+ if (!mhi_cntrl->wq) {
+ ret = -ENOMEM;
+ goto err_free_cmd;
+ }
+
+ INIT_LIST_HEAD(&mhi_cntrl->st_transition_list);
+ INIT_LIST_HEAD(&mhi_cntrl->ch_db_list);
+ spin_lock_init(&mhi_cntrl->state_lock);
+ spin_lock_init(&mhi_cntrl->list_lock);
+ mutex_init(&mhi_cntrl->event_lock);
+
+ /* Set MHI version and AMSS EE before enumeration */
+ mhi_ep_mmio_write(mhi_cntrl, EP_MHIVER, config->mhi_version);
+ mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS);
+
+ /* Set controller index */
+ ret = ida_alloc(&mhi_ep_cntrl_ida, GFP_KERNEL);
+ if (ret < 0)
+ goto err_destroy_wq;
+
+ mhi_cntrl->index = ret;
+
+ irq_set_status_flags(mhi_cntrl->irq, IRQ_NOAUTOEN);
+ ret = request_irq(mhi_cntrl->irq, mhi_ep_irq, IRQF_TRIGGER_HIGH,
+ "doorbell_irq", mhi_cntrl);
+ if (ret) {
+ dev_err(mhi_cntrl->cntrl_dev, "Failed to request Doorbell IRQ\n");
+ goto err_ida_free;
+ }
+
+ /* Allocate the controller device */
+ mhi_dev = mhi_ep_alloc_device(mhi_cntrl, MHI_DEVICE_CONTROLLER);
+ if (IS_ERR(mhi_dev)) {
+ dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate controller device\n");
+ ret = PTR_ERR(mhi_dev);
+ goto err_free_irq;
+ }
+
+ dev_set_name(&mhi_dev->dev, "mhi_ep%u", mhi_cntrl->index);
+ mhi_dev->name = dev_name(&mhi_dev->dev);
+ mhi_cntrl->mhi_dev = mhi_dev;
+
+ ret = device_add(&mhi_dev->dev);
+ if (ret)
+ goto err_put_dev;
+
+ dev_dbg(&mhi_dev->dev, "MHI EP Controller registered\n");
+
+ return 0;
+
+err_put_dev:
+ put_device(&mhi_dev->dev);
+err_free_irq:
+ free_irq(mhi_cntrl->irq, mhi_cntrl);
+err_ida_free:
+ ida_free(&mhi_ep_cntrl_ida, mhi_cntrl->index);
+err_destroy_wq:
+ destroy_workqueue(mhi_cntrl->wq);
+err_free_cmd:
+ kfree(mhi_cntrl->mhi_cmd);
+err_free_ch:
+ kfree(mhi_cntrl->mhi_chan);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mhi_ep_register_controller);
+
+/*
+ * It is expected that the controller drivers will power down the MHI EP stack
+ * using "mhi_ep_power_down()" before calling this function to unregister themselves.
+ */
+void mhi_ep_unregister_controller(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ struct mhi_ep_device *mhi_dev = mhi_cntrl->mhi_dev;
+
+ destroy_workqueue(mhi_cntrl->wq);
+
+ free_irq(mhi_cntrl->irq, mhi_cntrl);
+
+ kfree(mhi_cntrl->mhi_cmd);
+ kfree(mhi_cntrl->mhi_chan);
+
+ device_del(&mhi_dev->dev);
+ put_device(&mhi_dev->dev);
+
+ ida_free(&mhi_ep_cntrl_ida, mhi_cntrl->index);
+}
+EXPORT_SYMBOL_GPL(mhi_ep_unregister_controller);
+
+static int mhi_ep_driver_probe(struct device *dev)
+{
+ struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
+ struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(dev->driver);
+ struct mhi_ep_chan *ul_chan = mhi_dev->ul_chan;
+ struct mhi_ep_chan *dl_chan = mhi_dev->dl_chan;
+
+ ul_chan->xfer_cb = mhi_drv->ul_xfer_cb;
+ dl_chan->xfer_cb = mhi_drv->dl_xfer_cb;
+
+ return mhi_drv->probe(mhi_dev, mhi_dev->id);
+}
+
+static int mhi_ep_driver_remove(struct device *dev)
+{
+ struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
+ struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(dev->driver);
+ struct mhi_result result = {};
+ struct mhi_ep_chan *mhi_chan;
+ int dir;
+
+ /* Skip if it is a controller device */
+ if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
+ return 0;
+
+ /* Disconnect the channels associated with the driver */
+ for (dir = 0; dir < 2; dir++) {
+ mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
+
+ if (!mhi_chan)
+ continue;
+
+ mutex_lock(&mhi_chan->lock);
+ /* Send channel disconnect status to the client driver */
+ if (mhi_chan->xfer_cb) {
+ result.transaction_status = -ENOTCONN;
+ result.bytes_xferd = 0;
+ mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+ }
+
+ mhi_chan->state = MHI_CH_STATE_DISABLED;
+ mhi_chan->xfer_cb = NULL;
+ mutex_unlock(&mhi_chan->lock);
+ }
+
+ /* Remove the client driver now */
+ mhi_drv->remove(mhi_dev);
+
+ return 0;
+}
+
+int __mhi_ep_driver_register(struct mhi_ep_driver *mhi_drv, struct module *owner)
+{
+ struct device_driver *driver = &mhi_drv->driver;
+
+ if (!mhi_drv->probe || !mhi_drv->remove)
+ return -EINVAL;
+
+ /* Client drivers should have callbacks defined for both channels */
+ if (!mhi_drv->ul_xfer_cb || !mhi_drv->dl_xfer_cb)
+ return -EINVAL;
+
+ driver->bus = &mhi_ep_bus_type;
+ driver->owner = owner;
+ driver->probe = mhi_ep_driver_probe;
+ driver->remove = mhi_ep_driver_remove;
+
+ return driver_register(driver);
+}
+EXPORT_SYMBOL_GPL(__mhi_ep_driver_register);
+
+void mhi_ep_driver_unregister(struct mhi_ep_driver *mhi_drv)
+{
+ driver_unregister(&mhi_drv->driver);
+}
+EXPORT_SYMBOL_GPL(mhi_ep_driver_unregister);
+
+static int mhi_ep_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
+
+ return add_uevent_var(env, "MODALIAS=" MHI_EP_DEVICE_MODALIAS_FMT,
+ mhi_dev->name);
+}
+
+static int mhi_ep_match(struct device *dev, struct device_driver *drv)
+{
+ struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
+ struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(drv);
+ const struct mhi_device_id *id;
+
+ /*
+ * If the device is a controller type then there is no client driver
+ * associated with it
+ */
+ if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
+ return 0;
+
+ for (id = mhi_drv->id_table; id->chan[0]; id++)
+ if (!strcmp(mhi_dev->name, id->chan)) {
+ mhi_dev->id = id;
+ return 1;
+ }
+
+ return 0;
+};
+
+struct bus_type mhi_ep_bus_type = {
+ .name = "mhi_ep",
+ .dev_name = "mhi_ep",
+ .match = mhi_ep_match,
+ .uevent = mhi_ep_uevent,
+};
+
+static int __init mhi_ep_init(void)
+{
+ return bus_register(&mhi_ep_bus_type);
+}
+
+static void __exit mhi_ep_exit(void)
+{
+ bus_unregister(&mhi_ep_bus_type);
+}
+
+postcore_initcall(mhi_ep_init);
+module_exit(mhi_ep_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MHI Bus Endpoint stack");
+MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
diff --git a/drivers/bus/mhi/ep/mmio.c b/drivers/bus/mhi/ep/mmio.c
new file mode 100644
index 000000000000..b5bfd22f2c8e
--- /dev/null
+++ b/drivers/bus/mhi/ep/mmio.c
@@ -0,0 +1,273 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 Linaro Ltd.
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/io.h>
+#include <linux/mhi_ep.h>
+
+#include "internal.h"
+
+u32 mhi_ep_mmio_read(struct mhi_ep_cntrl *mhi_cntrl, u32 offset)
+{
+ return readl(mhi_cntrl->mmio + offset);
+}
+
+void mhi_ep_mmio_write(struct mhi_ep_cntrl *mhi_cntrl, u32 offset, u32 val)
+{
+ writel(val, mhi_cntrl->mmio + offset);
+}
+
+void mhi_ep_mmio_masked_write(struct mhi_ep_cntrl *mhi_cntrl, u32 offset, u32 mask, u32 val)
+{
+ u32 regval;
+
+ regval = mhi_ep_mmio_read(mhi_cntrl, offset);
+ regval &= ~mask;
+ regval |= (val << __ffs(mask)) & mask;
+ mhi_ep_mmio_write(mhi_cntrl, offset, regval);
+}
+
+u32 mhi_ep_mmio_masked_read(struct mhi_ep_cntrl *dev, u32 offset, u32 mask)
+{
+ u32 regval;
+
+ regval = mhi_ep_mmio_read(dev, offset);
+ regval &= mask;
+ regval >>= __ffs(mask);
+
+ return regval;
+}
+
+void mhi_ep_mmio_get_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state *state,
+ bool *mhi_reset)
+{
+ u32 regval;
+
+ regval = mhi_ep_mmio_read(mhi_cntrl, EP_MHICTRL);
+ *state = FIELD_GET(MHICTRL_MHISTATE_MASK, regval);
+ *mhi_reset = !!FIELD_GET(MHICTRL_RESET_MASK, regval);
+}
+
+static void mhi_ep_mmio_set_chdb(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id, bool enable)
+{
+ u32 chid_mask, chid_shift, chdb_idx, val;
+
+ chid_shift = ch_id % 32;
+ chid_mask = BIT(chid_shift);
+ chdb_idx = ch_id / 32;
+
+ val = enable ? 1 : 0;
+
+ mhi_ep_mmio_masked_write(mhi_cntrl, MHI_CHDB_INT_MASK_n(chdb_idx), chid_mask, val);
+
+ /* Update the local copy of the channel mask */
+ mhi_cntrl->chdb[chdb_idx].mask &= ~chid_mask;
+ mhi_cntrl->chdb[chdb_idx].mask |= val << chid_shift;
+}
+
+void mhi_ep_mmio_enable_chdb(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id)
+{
+ mhi_ep_mmio_set_chdb(mhi_cntrl, ch_id, true);
+}
+
+void mhi_ep_mmio_disable_chdb(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id)
+{
+ mhi_ep_mmio_set_chdb(mhi_cntrl, ch_id, false);
+}
+
+static void mhi_ep_mmio_set_chdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl, bool enable)
+{
+ u32 val, i;
+
+ val = enable ? MHI_CHDB_INT_MASK_n_EN_ALL : 0;
+
+ for (i = 0; i < MHI_MASK_ROWS_CH_DB; i++) {
+ mhi_ep_mmio_write(mhi_cntrl, MHI_CHDB_INT_MASK_n(i), val);
+ mhi_cntrl->chdb[i].mask = val;
+ }
+}
+
+void mhi_ep_mmio_enable_chdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ mhi_ep_mmio_set_chdb_interrupts(mhi_cntrl, true);
+}
+
+static void mhi_ep_mmio_mask_chdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ mhi_ep_mmio_set_chdb_interrupts(mhi_cntrl, false);
+}
+
+bool mhi_ep_mmio_read_chdb_status_interrupts(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ bool chdb = false;
+ u32 i;
+
+ for (i = 0; i < MHI_MASK_ROWS_CH_DB; i++) {
+ mhi_cntrl->chdb[i].status = mhi_ep_mmio_read(mhi_cntrl, MHI_CHDB_INT_STATUS_n(i));
+ if (mhi_cntrl->chdb[i].status)
+ chdb = true;
+ }
+
+ /* Return whether a channel doorbell interrupt occurred or not */
+ return chdb;
+}
+
+static void mhi_ep_mmio_set_erdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl, bool enable)
+{
+ u32 val, i;
+
+ val = enable ? MHI_ERDB_INT_MASK_n_EN_ALL : 0;
+
+ for (i = 0; i < MHI_MASK_ROWS_EV_DB; i++)
+ mhi_ep_mmio_write(mhi_cntrl, MHI_ERDB_INT_MASK_n(i), val);
+}
+
+static void mhi_ep_mmio_mask_erdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ mhi_ep_mmio_set_erdb_interrupts(mhi_cntrl, false);
+}
+
+void mhi_ep_mmio_enable_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ mhi_ep_mmio_masked_write(mhi_cntrl, MHI_CTRL_INT_MASK,
+ MHI_CTRL_MHICTRL_MASK, 1);
+}
+
+void mhi_ep_mmio_disable_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ mhi_ep_mmio_masked_write(mhi_cntrl, MHI_CTRL_INT_MASK,
+ MHI_CTRL_MHICTRL_MASK, 0);
+}
+
+void mhi_ep_mmio_enable_cmdb_interrupt(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ mhi_ep_mmio_masked_write(mhi_cntrl, MHI_CTRL_INT_MASK,
+ MHI_CTRL_CRDB_MASK, 1);
+}
+
+void mhi_ep_mmio_disable_cmdb_interrupt(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ mhi_ep_mmio_masked_write(mhi_cntrl, MHI_CTRL_INT_MASK,
+ MHI_CTRL_CRDB_MASK, 0);
+}
+
+void mhi_ep_mmio_mask_interrupts(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ mhi_ep_mmio_disable_ctrl_interrupt(mhi_cntrl);
+ mhi_ep_mmio_disable_cmdb_interrupt(mhi_cntrl);
+ mhi_ep_mmio_mask_chdb_interrupts(mhi_cntrl);
+ mhi_ep_mmio_mask_erdb_interrupts(mhi_cntrl);
+}
+
+static void mhi_ep_mmio_clear_interrupts(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ u32 i;
+
+ for (i = 0; i < MHI_MASK_ROWS_CH_DB; i++)
+ mhi_ep_mmio_write(mhi_cntrl, MHI_CHDB_INT_CLEAR_n(i),
+ MHI_CHDB_INT_CLEAR_n_CLEAR_ALL);
+
+ for (i = 0; i < MHI_MASK_ROWS_EV_DB; i++)
+ mhi_ep_mmio_write(mhi_cntrl, MHI_ERDB_INT_CLEAR_n(i),
+ MHI_ERDB_INT_CLEAR_n_CLEAR_ALL);
+
+ mhi_ep_mmio_write(mhi_cntrl, MHI_CTRL_INT_CLEAR,
+ MHI_CTRL_INT_MMIO_WR_CLEAR |
+ MHI_CTRL_INT_CRDB_CLEAR |
+ MHI_CTRL_INT_CRDB_MHICTRL_CLEAR);
+}
+
+void mhi_ep_mmio_get_chc_base(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ u32 regval;
+
+ regval = mhi_ep_mmio_read(mhi_cntrl, EP_CCABAP_HIGHER);
+ mhi_cntrl->ch_ctx_host_pa = regval;
+ mhi_cntrl->ch_ctx_host_pa <<= 32;
+
+ regval = mhi_ep_mmio_read(mhi_cntrl, EP_CCABAP_LOWER);
+ mhi_cntrl->ch_ctx_host_pa |= regval;
+}
+
+void mhi_ep_mmio_get_erc_base(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ u32 regval;
+
+ regval = mhi_ep_mmio_read(mhi_cntrl, EP_ECABAP_HIGHER);
+ mhi_cntrl->ev_ctx_host_pa = regval;
+ mhi_cntrl->ev_ctx_host_pa <<= 32;
+
+ regval = mhi_ep_mmio_read(mhi_cntrl, EP_ECABAP_LOWER);
+ mhi_cntrl->ev_ctx_host_pa |= regval;
+}
+
+void mhi_ep_mmio_get_crc_base(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ u32 regval;
+
+ regval = mhi_ep_mmio_read(mhi_cntrl, EP_CRCBAP_HIGHER);
+ mhi_cntrl->cmd_ctx_host_pa = regval;
+ mhi_cntrl->cmd_ctx_host_pa <<= 32;
+
+ regval = mhi_ep_mmio_read(mhi_cntrl, EP_CRCBAP_LOWER);
+ mhi_cntrl->cmd_ctx_host_pa |= regval;
+}
+
+u64 mhi_ep_mmio_get_db(struct mhi_ep_ring *ring)
+{
+ struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
+ u64 db_offset;
+ u32 regval;
+
+ regval = mhi_ep_mmio_read(mhi_cntrl, ring->db_offset_h);
+ db_offset = regval;
+ db_offset <<= 32;
+
+ regval = mhi_ep_mmio_read(mhi_cntrl, ring->db_offset_l);
+ db_offset |= regval;
+
+ return db_offset;
+}
+
+void mhi_ep_mmio_set_env(struct mhi_ep_cntrl *mhi_cntrl, u32 value)
+{
+ mhi_ep_mmio_write(mhi_cntrl, EP_BHI_EXECENV, value);
+}
+
+void mhi_ep_mmio_clear_reset(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ mhi_ep_mmio_masked_write(mhi_cntrl, EP_MHICTRL, MHICTRL_RESET_MASK, 0);
+}
+
+void mhi_ep_mmio_reset(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ mhi_ep_mmio_write(mhi_cntrl, EP_MHICTRL, 0);
+ mhi_ep_mmio_write(mhi_cntrl, EP_MHISTATUS, 0);
+ mhi_ep_mmio_clear_interrupts(mhi_cntrl);
+}
+
+void mhi_ep_mmio_init(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ u32 regval;
+
+ mhi_cntrl->chdb_offset = mhi_ep_mmio_read(mhi_cntrl, EP_CHDBOFF);
+ mhi_cntrl->erdb_offset = mhi_ep_mmio_read(mhi_cntrl, EP_ERDBOFF);
+
+ regval = mhi_ep_mmio_read(mhi_cntrl, EP_MHICFG);
+ mhi_cntrl->event_rings = FIELD_GET(MHICFG_NER_MASK, regval);
+ mhi_cntrl->hw_event_rings = FIELD_GET(MHICFG_NHWER_MASK, regval);
+
+ mhi_ep_mmio_reset(mhi_cntrl);
+}
+
+void mhi_ep_mmio_update_ner(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ u32 regval;
+
+ regval = mhi_ep_mmio_read(mhi_cntrl, EP_MHICFG);
+ mhi_cntrl->event_rings = FIELD_GET(MHICFG_NER_MASK, regval);
+ mhi_cntrl->hw_event_rings = FIELD_GET(MHICFG_NHWER_MASK, regval);
+}
diff --git a/drivers/bus/mhi/ep/ring.c b/drivers/bus/mhi/ep/ring.c
new file mode 100644
index 000000000000..115518ec76a4
--- /dev/null
+++ b/drivers/bus/mhi/ep/ring.c
@@ -0,0 +1,207 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 Linaro Ltd.
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ */
+
+#include <linux/mhi_ep.h>
+#include "internal.h"
+
+size_t mhi_ep_ring_addr2offset(struct mhi_ep_ring *ring, u64 ptr)
+{
+ return (ptr - ring->rbase) / sizeof(struct mhi_ring_element);
+}
+
+static u32 mhi_ep_ring_num_elems(struct mhi_ep_ring *ring)
+{
+ __le64 rlen;
+
+ memcpy_fromio(&rlen, (void __iomem *) &ring->ring_ctx->generic.rlen, sizeof(u64));
+
+ return le64_to_cpu(rlen) / sizeof(struct mhi_ring_element);
+}
+
+void mhi_ep_ring_inc_index(struct mhi_ep_ring *ring)
+{
+ ring->rd_offset = (ring->rd_offset + 1) % ring->ring_size;
+}
+
+static int __mhi_ep_cache_ring(struct mhi_ep_ring *ring, size_t end)
+{
+ struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ size_t start, copy_size;
+ int ret;
+
+ /* Don't proceed in the case of event ring. This happens during mhi_ep_ring_start(). */
+ if (ring->type == RING_TYPE_ER)
+ return 0;
+
+ /* No need to cache the ring if write pointer is unmodified */
+ if (ring->wr_offset == end)
+ return 0;
+
+ start = ring->wr_offset;
+ if (start < end) {
+ copy_size = (end - start) * sizeof(struct mhi_ring_element);
+ ret = mhi_cntrl->read_from_host(mhi_cntrl, ring->rbase +
+ (start * sizeof(struct mhi_ring_element)),
+ &ring->ring_cache[start], copy_size);
+ if (ret < 0)
+ return ret;
+ } else {
+ copy_size = (ring->ring_size - start) * sizeof(struct mhi_ring_element);
+ ret = mhi_cntrl->read_from_host(mhi_cntrl, ring->rbase +
+ (start * sizeof(struct mhi_ring_element)),
+ &ring->ring_cache[start], copy_size);
+ if (ret < 0)
+ return ret;
+
+ if (end) {
+ ret = mhi_cntrl->read_from_host(mhi_cntrl, ring->rbase,
+ &ring->ring_cache[0],
+ end * sizeof(struct mhi_ring_element));
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ dev_dbg(dev, "Cached ring: start %zu end %zu size %zu\n", start, end, copy_size);
+
+ return 0;
+}
+
+static int mhi_ep_cache_ring(struct mhi_ep_ring *ring, u64 wr_ptr)
+{
+ size_t wr_offset;
+ int ret;
+
+ wr_offset = mhi_ep_ring_addr2offset(ring, wr_ptr);
+
+ /* Cache the host ring till write offset */
+ ret = __mhi_ep_cache_ring(ring, wr_offset);
+ if (ret)
+ return ret;
+
+ ring->wr_offset = wr_offset;
+
+ return 0;
+}
+
+int mhi_ep_update_wr_offset(struct mhi_ep_ring *ring)
+{
+ u64 wr_ptr;
+
+ wr_ptr = mhi_ep_mmio_get_db(ring);
+
+ return mhi_ep_cache_ring(ring, wr_ptr);
+}
+
+/* TODO: Support for adding multiple ring elements to the ring */
+int mhi_ep_ring_add_element(struct mhi_ep_ring *ring, struct mhi_ring_element *el)
+{
+ struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ size_t old_offset = 0;
+ u32 num_free_elem;
+ __le64 rp;
+ int ret;
+
+ ret = mhi_ep_update_wr_offset(ring);
+ if (ret) {
+ dev_err(dev, "Error updating write pointer\n");
+ return ret;
+ }
+
+ if (ring->rd_offset < ring->wr_offset)
+ num_free_elem = (ring->wr_offset - ring->rd_offset) - 1;
+ else
+ num_free_elem = ((ring->ring_size - ring->rd_offset) + ring->wr_offset) - 1;
+
+ /* Check if there is space in ring for adding at least an element */
+ if (!num_free_elem) {
+ dev_err(dev, "No space left in the ring\n");
+ return -ENOSPC;
+ }
+
+ old_offset = ring->rd_offset;
+ mhi_ep_ring_inc_index(ring);
+
+ dev_dbg(dev, "Adding an element to ring at offset (%zu)\n", ring->rd_offset);
+
+ /* Update rp in ring context */
+ rp = cpu_to_le64(ring->rd_offset * sizeof(*el) + ring->rbase);
+ memcpy_toio((void __iomem *) &ring->ring_ctx->generic.rp, &rp, sizeof(u64));
+
+ ret = mhi_cntrl->write_to_host(mhi_cntrl, el, ring->rbase + (old_offset * sizeof(*el)),
+ sizeof(*el));
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+void mhi_ep_ring_init(struct mhi_ep_ring *ring, enum mhi_ep_ring_type type, u32 id)
+{
+ ring->type = type;
+ if (ring->type == RING_TYPE_CMD) {
+ ring->db_offset_h = EP_CRDB_HIGHER;
+ ring->db_offset_l = EP_CRDB_LOWER;
+ } else if (ring->type == RING_TYPE_CH) {
+ ring->db_offset_h = CHDB_HIGHER_n(id);
+ ring->db_offset_l = CHDB_LOWER_n(id);
+ ring->ch_id = id;
+ } else {
+ ring->db_offset_h = ERDB_HIGHER_n(id);
+ ring->db_offset_l = ERDB_LOWER_n(id);
+ }
+}
+
+int mhi_ep_ring_start(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring,
+ union mhi_ep_ring_ctx *ctx)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ __le64 val;
+ int ret;
+
+ ring->mhi_cntrl = mhi_cntrl;
+ ring->ring_ctx = ctx;
+ ring->ring_size = mhi_ep_ring_num_elems(ring);
+ memcpy_fromio(&val, (void __iomem *) &ring->ring_ctx->generic.rbase, sizeof(u64));
+ ring->rbase = le64_to_cpu(val);
+
+ if (ring->type == RING_TYPE_CH)
+ ring->er_index = le32_to_cpu(ring->ring_ctx->ch.erindex);
+
+ if (ring->type == RING_TYPE_ER)
+ ring->irq_vector = le32_to_cpu(ring->ring_ctx->ev.msivec);
+
+ /* During ring init, both rp and wp are equal */
+ memcpy_fromio(&val, (void __iomem *) &ring->ring_ctx->generic.rp, sizeof(u64));
+ ring->rd_offset = mhi_ep_ring_addr2offset(ring, le64_to_cpu(val));
+ ring->wr_offset = mhi_ep_ring_addr2offset(ring, le64_to_cpu(val));
+
+ /* Allocate ring cache memory for holding the copy of host ring */
+ ring->ring_cache = kcalloc(ring->ring_size, sizeof(struct mhi_ring_element), GFP_KERNEL);
+ if (!ring->ring_cache)
+ return -ENOMEM;
+
+ memcpy_fromio(&val, (void __iomem *) &ring->ring_ctx->generic.wp, sizeof(u64));
+ ret = mhi_ep_cache_ring(ring, le64_to_cpu(val));
+ if (ret) {
+ dev_err(dev, "Failed to cache ring\n");
+ kfree(ring->ring_cache);
+ return ret;
+ }
+
+ ring->started = true;
+
+ return 0;
+}
+
+void mhi_ep_ring_reset(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring)
+{
+ ring->started = false;
+ kfree(ring->ring_cache);
+ ring->ring_cache = NULL;
+}
diff --git a/drivers/bus/mhi/ep/sm.c b/drivers/bus/mhi/ep/sm.c
new file mode 100644
index 000000000000..3655c19e23c7
--- /dev/null
+++ b/drivers/bus/mhi/ep/sm.c
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 Linaro Ltd.
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ */
+
+#include <linux/errno.h>
+#include <linux/mhi_ep.h>
+#include "internal.h"
+
+bool __must_check mhi_ep_check_mhi_state(struct mhi_ep_cntrl *mhi_cntrl,
+ enum mhi_state cur_mhi_state,
+ enum mhi_state mhi_state)
+{
+ if (mhi_state == MHI_STATE_SYS_ERR)
+ return true; /* Allowed in any state */
+
+ if (mhi_state == MHI_STATE_READY)
+ return cur_mhi_state == MHI_STATE_RESET;
+
+ if (mhi_state == MHI_STATE_M0)
+ return cur_mhi_state == MHI_STATE_M3 || cur_mhi_state == MHI_STATE_READY;
+
+ if (mhi_state == MHI_STATE_M3)
+ return cur_mhi_state == MHI_STATE_M0;
+
+ return false;
+}
+
+int mhi_ep_set_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state mhi_state)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+
+ if (!mhi_ep_check_mhi_state(mhi_cntrl, mhi_cntrl->mhi_state, mhi_state)) {
+ dev_err(dev, "MHI state change to %s from %s is not allowed!\n",
+ mhi_state_str(mhi_state),
+ mhi_state_str(mhi_cntrl->mhi_state));
+ return -EACCES;
+ }
+
+ /* TODO: Add support for M1 and M2 states */
+ if (mhi_state == MHI_STATE_M1 || mhi_state == MHI_STATE_M2) {
+ dev_err(dev, "MHI state (%s) not supported\n", mhi_state_str(mhi_state));
+ return -EOPNOTSUPP;
+ }
+
+ mhi_ep_mmio_masked_write(mhi_cntrl, EP_MHISTATUS, MHISTATUS_MHISTATE_MASK, mhi_state);
+ mhi_cntrl->mhi_state = mhi_state;
+
+ if (mhi_state == MHI_STATE_READY)
+ mhi_ep_mmio_masked_write(mhi_cntrl, EP_MHISTATUS, MHISTATUS_READY_MASK, 1);
+
+ if (mhi_state == MHI_STATE_SYS_ERR)
+ mhi_ep_mmio_masked_write(mhi_cntrl, EP_MHISTATUS, MHISTATUS_SYSERR_MASK, 1);
+
+ return 0;
+}
+
+int mhi_ep_set_m0_state(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ enum mhi_state old_state;
+ int ret;
+
+ /* If MHI is in M3, resume suspended channels */
+ spin_lock_bh(&mhi_cntrl->state_lock);
+ old_state = mhi_cntrl->mhi_state;
+ if (old_state == MHI_STATE_M3)
+ mhi_ep_resume_channels(mhi_cntrl);
+
+ ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_M0);
+ spin_unlock_bh(&mhi_cntrl->state_lock);
+
+ if (ret) {
+ mhi_ep_handle_syserr(mhi_cntrl);
+ return ret;
+ }
+
+ /* Signal host that the device moved to M0 */
+ ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_M0);
+ if (ret) {
+ dev_err(dev, "Failed sending M0 state change event\n");
+ return ret;
+ }
+
+ if (old_state == MHI_STATE_READY) {
+ /* Send AMSS EE event to host */
+ ret = mhi_ep_send_ee_event(mhi_cntrl, MHI_EE_AMSS);
+ if (ret) {
+ dev_err(dev, "Failed sending AMSS EE event\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+int mhi_ep_set_m3_state(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ int ret;
+
+ spin_lock_bh(&mhi_cntrl->state_lock);
+ ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_M3);
+ spin_unlock_bh(&mhi_cntrl->state_lock);
+
+ if (ret) {
+ mhi_ep_handle_syserr(mhi_cntrl);
+ return ret;
+ }
+
+ mhi_ep_suspend_channels(mhi_cntrl);
+
+ /* Signal host that the device moved to M3 */
+ ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_M3);
+ if (ret) {
+ dev_err(dev, "Failed sending M3 state change event\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+int mhi_ep_set_ready_state(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ enum mhi_state mhi_state;
+ int ret, is_ready;
+
+ spin_lock_bh(&mhi_cntrl->state_lock);
+ /* Ensure that the MHISTATUS is set to RESET by host */
+ mhi_state = mhi_ep_mmio_masked_read(mhi_cntrl, EP_MHISTATUS, MHISTATUS_MHISTATE_MASK);
+ is_ready = mhi_ep_mmio_masked_read(mhi_cntrl, EP_MHISTATUS, MHISTATUS_READY_MASK);
+
+ if (mhi_state != MHI_STATE_RESET || is_ready) {
+ dev_err(dev, "READY state transition failed. MHI host not in RESET state\n");
+ spin_unlock_bh(&mhi_cntrl->state_lock);
+ return -EIO;
+ }
+
+ ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_READY);
+ spin_unlock_bh(&mhi_cntrl->state_lock);
+
+ if (ret)
+ mhi_ep_handle_syserr(mhi_cntrl);
+
+ return ret;
+}
diff --git a/drivers/bus/mhi/host/boot.c b/drivers/bus/mhi/host/boot.c
index b0da7ca4519c..26d0eddb1477 100644
--- a/drivers/bus/mhi/host/boot.c
+++ b/drivers/bus/mhi/host/boot.c
@@ -19,8 +19,8 @@
#include "internal.h"
/* Setup RDDM vector table for RDDM transfer and program RXVEC */
-void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
- struct image_info *img_info)
+int mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
+ struct image_info *img_info)
{
struct mhi_buf *mhi_buf = img_info->mhi_buf;
struct bhi_vec_entry *bhi_vec = img_info->bhi_vec;
@@ -28,6 +28,7 @@ void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
struct device *dev = &mhi_cntrl->mhi_dev->dev;
u32 sequence_id;
unsigned int i;
+ int ret;
for (i = 0; i < img_info->entries - 1; i++, mhi_buf++, bhi_vec++) {
bhi_vec->dma_addr = mhi_buf->dma_addr;
@@ -45,11 +46,17 @@ void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
mhi_write_reg(mhi_cntrl, base, BHIE_RXVECSIZE_OFFS, mhi_buf->len);
sequence_id = MHI_RANDOM_U32_NONZERO(BHIE_RXVECSTATUS_SEQNUM_BMSK);
- mhi_write_reg_field(mhi_cntrl, base, BHIE_RXVECDB_OFFS,
- BHIE_RXVECDB_SEQNUM_BMSK, sequence_id);
+ ret = mhi_write_reg_field(mhi_cntrl, base, BHIE_RXVECDB_OFFS,
+ BHIE_RXVECDB_SEQNUM_BMSK, sequence_id);
+ if (ret) {
+ dev_err(dev, "Failed to write sequence ID for BHIE_RXVECDB\n");
+ return ret;
+ }
dev_dbg(dev, "Address: %p and len: 0x%zx sequence: %u\n",
&mhi_buf->dma_addr, mhi_buf->len, sequence_id);
+
+ return 0;
}
/* Collect RDDM buffer during kernel panic */
@@ -198,10 +205,13 @@ static int mhi_fw_load_bhie(struct mhi_controller *mhi_cntrl,
mhi_write_reg(mhi_cntrl, base, BHIE_TXVECSIZE_OFFS, mhi_buf->len);
- mhi_write_reg_field(mhi_cntrl, base, BHIE_TXVECDB_OFFS,
- BHIE_TXVECDB_SEQNUM_BMSK, sequence_id);
+ ret = mhi_write_reg_field(mhi_cntrl, base, BHIE_TXVECDB_OFFS,
+ BHIE_TXVECDB_SEQNUM_BMSK, sequence_id);
read_unlock_bh(pm_lock);
+ if (ret)
+ return ret;
+
/* Wait for the image download to complete */
ret = wait_event_timeout(mhi_cntrl->state_event,
MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) ||
diff --git a/drivers/bus/mhi/host/init.c b/drivers/bus/mhi/host/init.c
index a665b8e92408..c137d55ccfa0 100644
--- a/drivers/bus/mhi/host/init.c
+++ b/drivers/bus/mhi/host/init.c
@@ -86,7 +86,7 @@ static ssize_t serial_number_show(struct device *dev,
struct mhi_device *mhi_dev = to_mhi_device(dev);
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
- return snprintf(buf, PAGE_SIZE, "Serial Number: %u\n",
+ return sysfs_emit(buf, "Serial Number: %u\n",
mhi_cntrl->serial_number);
}
static DEVICE_ATTR_RO(serial_number);
@@ -100,17 +100,30 @@ static ssize_t oem_pk_hash_show(struct device *dev,
int i, cnt = 0;
for (i = 0; i < ARRAY_SIZE(mhi_cntrl->oem_pk_hash); i++)
- cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
- "OEMPKHASH[%d]: 0x%x\n", i,
- mhi_cntrl->oem_pk_hash[i]);
+ cnt += sysfs_emit_at(buf, cnt, "OEMPKHASH[%d]: 0x%x\n",
+ i, mhi_cntrl->oem_pk_hash[i]);
return cnt;
}
static DEVICE_ATTR_RO(oem_pk_hash);
+static ssize_t soc_reset_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct mhi_device *mhi_dev = to_mhi_device(dev);
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+
+ mhi_soc_reset(mhi_cntrl);
+ return count;
+}
+static DEVICE_ATTR_WO(soc_reset);
+
static struct attribute *mhi_dev_attrs[] = {
&dev_attr_serial_number.attr,
&dev_attr_oem_pk_hash.attr,
+ &dev_attr_soc_reset.attr,
NULL,
};
ATTRIBUTE_GROUPS(mhi_dev);
@@ -425,74 +438,65 @@ int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
struct device *dev = &mhi_cntrl->mhi_dev->dev;
struct {
u32 offset;
- u32 mask;
u32 val;
} reg_info[] = {
{
- CCABAP_HIGHER, U32_MAX,
+ CCABAP_HIGHER,
upper_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr),
},
{
- CCABAP_LOWER, U32_MAX,
+ CCABAP_LOWER,
lower_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr),
},
{
- ECABAP_HIGHER, U32_MAX,
+ ECABAP_HIGHER,
upper_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr),
},
{
- ECABAP_LOWER, U32_MAX,
+ ECABAP_LOWER,
lower_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr),
},
{
- CRCBAP_HIGHER, U32_MAX,
+ CRCBAP_HIGHER,
upper_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr),
},
{
- CRCBAP_LOWER, U32_MAX,
+ CRCBAP_LOWER,
lower_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr),
},
{
- MHICFG, MHICFG_NER_MASK,
- mhi_cntrl->total_ev_rings,
- },
- {
- MHICFG, MHICFG_NHWER_MASK,
- mhi_cntrl->hw_ev_rings,
- },
- {
- MHICTRLBASE_HIGHER, U32_MAX,
+ MHICTRLBASE_HIGHER,
upper_32_bits(mhi_cntrl->iova_start),
},
{
- MHICTRLBASE_LOWER, U32_MAX,
+ MHICTRLBASE_LOWER,
lower_32_bits(mhi_cntrl->iova_start),
},
{
- MHIDATABASE_HIGHER, U32_MAX,
+ MHIDATABASE_HIGHER,
upper_32_bits(mhi_cntrl->iova_start),
},
{
- MHIDATABASE_LOWER, U32_MAX,
+ MHIDATABASE_LOWER,
lower_32_bits(mhi_cntrl->iova_start),
},
{
- MHICTRLLIMIT_HIGHER, U32_MAX,
+ MHICTRLLIMIT_HIGHER,
upper_32_bits(mhi_cntrl->iova_stop),
},
{
- MHICTRLLIMIT_LOWER, U32_MAX,
+ MHICTRLLIMIT_LOWER,
lower_32_bits(mhi_cntrl->iova_stop),
},
{
- MHIDATALIMIT_HIGHER, U32_MAX,
+ MHIDATALIMIT_HIGHER,
upper_32_bits(mhi_cntrl->iova_stop),
},
{
- MHIDATALIMIT_LOWER, U32_MAX,
+ MHIDATALIMIT_LOWER,
lower_32_bits(mhi_cntrl->iova_stop),
},
- { 0, 0, 0 }
+ {0, 0}
};
dev_dbg(dev, "Initializing MHI registers\n");
@@ -534,8 +538,22 @@ int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
/* Write to MMIO registers */
for (i = 0; reg_info[i].offset; i++)
- mhi_write_reg_field(mhi_cntrl, base, reg_info[i].offset,
- reg_info[i].mask, reg_info[i].val);
+ mhi_write_reg(mhi_cntrl, base, reg_info[i].offset,
+ reg_info[i].val);
+
+ ret = mhi_write_reg_field(mhi_cntrl, base, MHICFG, MHICFG_NER_MASK,
+ mhi_cntrl->total_ev_rings);
+ if (ret) {
+ dev_err(dev, "Unable to write MHICFG register\n");
+ return ret;
+ }
+
+ ret = mhi_write_reg_field(mhi_cntrl, base, MHICFG, MHICFG_NHWER_MASK,
+ mhi_cntrl->hw_ev_rings);
+ if (ret) {
+ dev_err(dev, "Unable to write MHICFG register\n");
+ return ret;
+ }
return 0;
}
@@ -1103,8 +1121,15 @@ int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl)
*/
mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->rddm_image,
mhi_cntrl->rddm_size);
- if (mhi_cntrl->rddm_image)
- mhi_rddm_prepare(mhi_cntrl, mhi_cntrl->rddm_image);
+ if (mhi_cntrl->rddm_image) {
+ ret = mhi_rddm_prepare(mhi_cntrl,
+ mhi_cntrl->rddm_image);
+ if (ret) {
+ mhi_free_bhie_table(mhi_cntrl,
+ mhi_cntrl->rddm_image);
+ goto error_reg_offset;
+ }
+ }
}
mutex_unlock(&mhi_cntrl->pm_mutex);
diff --git a/drivers/bus/mhi/host/internal.h b/drivers/bus/mhi/host/internal.h
index b47d8ef2624a..01fd10a399b6 100644
--- a/drivers/bus/mhi/host/internal.h
+++ b/drivers/bus/mhi/host/internal.h
@@ -324,8 +324,9 @@ int __must_check mhi_poll_reg_field(struct mhi_controller *mhi_cntrl,
u32 val, u32 delayus);
void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base,
u32 offset, u32 val);
-void mhi_write_reg_field(struct mhi_controller *mhi_cntrl, void __iomem *base,
- u32 offset, u32 mask, u32 val);
+int __must_check mhi_write_reg_field(struct mhi_controller *mhi_cntrl,
+ void __iomem *base, u32 offset, u32 mask,
+ u32 val);
void mhi_ring_er_db(struct mhi_event *mhi_event);
void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr,
dma_addr_t db_val);
@@ -339,7 +340,7 @@ int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl);
void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl);
int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl);
void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl);
-void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
+int mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
struct image_info *img_info);
void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl);
diff --git a/drivers/bus/mhi/host/main.c b/drivers/bus/mhi/host/main.c
index 9021be7f2359..f3aef77a6a4a 100644
--- a/drivers/bus/mhi/host/main.c
+++ b/drivers/bus/mhi/host/main.c
@@ -65,19 +65,22 @@ void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base,
mhi_cntrl->write_reg(mhi_cntrl, base + offset, val);
}
-void mhi_write_reg_field(struct mhi_controller *mhi_cntrl, void __iomem *base,
- u32 offset, u32 mask, u32 val)
+int __must_check mhi_write_reg_field(struct mhi_controller *mhi_cntrl,
+ void __iomem *base, u32 offset, u32 mask,
+ u32 val)
{
int ret;
u32 tmp;
ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp);
if (ret)
- return;
+ return ret;
tmp &= ~mask;
tmp |= (val << __ffs(mask));
mhi_write_reg(mhi_cntrl, base, offset, tmp);
+
+ return 0;
}
void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr,
@@ -531,18 +534,13 @@ irqreturn_t mhi_intvec_handler(int irq_number, void *dev)
static void mhi_recycle_ev_ring_element(struct mhi_controller *mhi_cntrl,
struct mhi_ring *ring)
{
- dma_addr_t ctxt_wp;
-
/* Update the WP */
ring->wp += ring->el_size;
- ctxt_wp = le64_to_cpu(*ring->ctxt_wp) + ring->el_size;
- if (ring->wp >= (ring->base + ring->len)) {
+ if (ring->wp >= (ring->base + ring->len))
ring->wp = ring->base;
- ctxt_wp = ring->iommu_base;
- }
- *ring->ctxt_wp = cpu_to_le64(ctxt_wp);
+ *ring->ctxt_wp = cpu_to_le64(ring->iommu_base + (ring->wp - ring->base));
/* Update the RP */
ring->rp += ring->el_size;
diff --git a/drivers/bus/mhi/host/pci_generic.c b/drivers/bus/mhi/host/pci_generic.c
index 541ced27d941..841626727f6b 100644
--- a/drivers/bus/mhi/host/pci_generic.c
+++ b/drivers/bus/mhi/host/pci_generic.c
@@ -371,7 +371,16 @@ static const struct mhi_pci_dev_info mhi_foxconn_sdx55_info = {
.sideband_wake = false,
};
-static const struct mhi_channel_config mhi_mv31_channels[] = {
+static const struct mhi_pci_dev_info mhi_foxconn_sdx65_info = {
+ .name = "foxconn-sdx65",
+ .config = &modem_foxconn_sdx55_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .mru_default = 32768,
+ .sideband_wake = false,
+};
+
+static const struct mhi_channel_config mhi_mv3x_channels[] = {
MHI_CHANNEL_CONFIG_UL(0, "LOOPBACK", 64, 0),
MHI_CHANNEL_CONFIG_DL(1, "LOOPBACK", 64, 0),
/* MBIM Control Channel */
@@ -382,25 +391,33 @@ static const struct mhi_channel_config mhi_mv31_channels[] = {
MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 512, 3),
};
-static struct mhi_event_config mhi_mv31_events[] = {
+static struct mhi_event_config mhi_mv3x_events[] = {
MHI_EVENT_CONFIG_CTRL(0, 256),
MHI_EVENT_CONFIG_DATA(1, 256),
MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100),
MHI_EVENT_CONFIG_HW_DATA(3, 1024, 101),
};
-static const struct mhi_controller_config modem_mv31_config = {
+static const struct mhi_controller_config modem_mv3x_config = {
.max_channels = 128,
.timeout_ms = 20000,
- .num_channels = ARRAY_SIZE(mhi_mv31_channels),
- .ch_cfg = mhi_mv31_channels,
- .num_events = ARRAY_SIZE(mhi_mv31_events),
- .event_cfg = mhi_mv31_events,
+ .num_channels = ARRAY_SIZE(mhi_mv3x_channels),
+ .ch_cfg = mhi_mv3x_channels,
+ .num_events = ARRAY_SIZE(mhi_mv3x_events),
+ .event_cfg = mhi_mv3x_events,
};
static const struct mhi_pci_dev_info mhi_mv31_info = {
.name = "cinterion-mv31",
- .config = &modem_mv31_config,
+ .config = &modem_mv3x_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .mru_default = 32768,
+};
+
+static const struct mhi_pci_dev_info mhi_mv32_info = {
+ .name = "cinterion-mv32",
+ .config = &modem_mv3x_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
.dma_data_width = 32,
.mru_default = 32768,
@@ -446,20 +463,100 @@ static const struct mhi_pci_dev_info mhi_sierra_em919x_info = {
.sideband_wake = false,
};
+static const struct mhi_channel_config mhi_telit_fn980_hw_v1_channels[] = {
+ MHI_CHANNEL_CONFIG_UL(14, "QMI", 32, 0),
+ MHI_CHANNEL_CONFIG_DL(15, "QMI", 32, 0),
+ MHI_CHANNEL_CONFIG_UL(20, "IPCR", 16, 0),
+ MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(21, "IPCR", 16, 0),
+ MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 128, 1),
+ MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 128, 2),
+};
+
+static struct mhi_event_config mhi_telit_fn980_hw_v1_events[] = {
+ MHI_EVENT_CONFIG_CTRL(0, 128),
+ MHI_EVENT_CONFIG_HW_DATA(1, 1024, 100),
+ MHI_EVENT_CONFIG_HW_DATA(2, 2048, 101)
+};
+
+static struct mhi_controller_config modem_telit_fn980_hw_v1_config = {
+ .max_channels = 128,
+ .timeout_ms = 20000,
+ .num_channels = ARRAY_SIZE(mhi_telit_fn980_hw_v1_channels),
+ .ch_cfg = mhi_telit_fn980_hw_v1_channels,
+ .num_events = ARRAY_SIZE(mhi_telit_fn980_hw_v1_events),
+ .event_cfg = mhi_telit_fn980_hw_v1_events,
+};
+
+static const struct mhi_pci_dev_info mhi_telit_fn980_hw_v1_info = {
+ .name = "telit-fn980-hwv1",
+ .fw = "qcom/sdx55m/sbl1.mbn",
+ .edl = "qcom/sdx55m/edl.mbn",
+ .config = &modem_telit_fn980_hw_v1_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .mru_default = 32768,
+ .sideband_wake = false,
+};
+
+static const struct mhi_channel_config mhi_telit_fn990_channels[] = {
+ MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 32, 0),
+ MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 32, 0),
+ MHI_CHANNEL_CONFIG_UL(4, "DIAG", 64, 1),
+ MHI_CHANNEL_CONFIG_DL(5, "DIAG", 64, 1),
+ MHI_CHANNEL_CONFIG_UL(12, "MBIM", 32, 0),
+ MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0),
+ MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
+ MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
+ MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2),
+ MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3),
+};
+
+static struct mhi_event_config mhi_telit_fn990_events[] = {
+ MHI_EVENT_CONFIG_CTRL(0, 128),
+ MHI_EVENT_CONFIG_DATA(1, 128),
+ MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100),
+ MHI_EVENT_CONFIG_HW_DATA(3, 2048, 101)
+};
+
+static const struct mhi_controller_config modem_telit_fn990_config = {
+ .max_channels = 128,
+ .timeout_ms = 20000,
+ .num_channels = ARRAY_SIZE(mhi_telit_fn990_channels),
+ .ch_cfg = mhi_telit_fn990_channels,
+ .num_events = ARRAY_SIZE(mhi_telit_fn990_events),
+ .event_cfg = mhi_telit_fn990_events,
+};
+
+static const struct mhi_pci_dev_info mhi_telit_fn990_info = {
+ .name = "telit-fn990",
+ .config = &modem_telit_fn990_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .sideband_wake = false,
+ .mru_default = 32768,
+};
+
+/* Keep the list sorted based on the PID. New VID should be added as the last entry */
static const struct pci_device_id mhi_pci_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0304),
+ .driver_data = (kernel_ulong_t) &mhi_qcom_sdx24_info },
/* EM919x (sdx55), use the same vid:pid as qcom-sdx55m */
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, 0x18d7, 0x0200),
.driver_data = (kernel_ulong_t) &mhi_sierra_em919x_info },
+ /* Telit FN980 hardware revision v1 */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, 0x1C5D, 0x2000),
+ .driver_data = (kernel_ulong_t) &mhi_telit_fn980_hw_v1_info },
{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0306),
.driver_data = (kernel_ulong_t) &mhi_qcom_sdx55_info },
- { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0304),
- .driver_data = (kernel_ulong_t) &mhi_qcom_sdx24_info },
+ /* Telit FN990 */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, 0x1c5d, 0x2010),
+ .driver_data = (kernel_ulong_t) &mhi_telit_fn990_info },
+ { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0308),
+ .driver_data = (kernel_ulong_t) &mhi_qcom_sdx65_info },
{ PCI_DEVICE(0x1eac, 0x1001), /* EM120R-GL (sdx24) */
.driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info },
{ PCI_DEVICE(0x1eac, 0x1002), /* EM160R-GL (sdx24) */
.driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info },
- { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0308),
- .driver_data = (kernel_ulong_t) &mhi_qcom_sdx65_info },
/* T99W175 (sdx55), Both for eSIM and Non-eSIM */
{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0ab),
.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
@@ -472,9 +569,21 @@ static const struct pci_device_id mhi_pci_id_table[] = {
/* T99W175 (sdx55), Based on Qualcomm new baseline */
{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0bf),
.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
+ /* T99W368 (sdx65) */
+ { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0d8),
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx65_info },
+ /* T99W373 (sdx62) */
+ { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0d9),
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx65_info },
/* MV31-W (Cinterion) */
{ PCI_DEVICE(0x1269, 0x00b3),
.driver_data = (kernel_ulong_t) &mhi_mv31_info },
+ /* MV32-WA (Cinterion) */
+ { PCI_DEVICE(0x1269, 0x00ba),
+ .driver_data = (kernel_ulong_t) &mhi_mv32_info },
+ /* MV32-WB (Cinterion) */
+ { PCI_DEVICE(0x1269, 0x00bb),
+ .driver_data = (kernel_ulong_t) &mhi_mv32_info },
{ }
};
MODULE_DEVICE_TABLE(pci, mhi_pci_id_table);
diff --git a/drivers/bus/mhi/host/pm.c b/drivers/bus/mhi/host/pm.c
index 3d90b8ecd3d9..dc2e8ff3bff2 100644
--- a/drivers/bus/mhi/host/pm.c
+++ b/drivers/bus/mhi/host/pm.c
@@ -129,13 +129,20 @@ enum mhi_pm_state __must_check mhi_tryset_pm_state(struct mhi_controller *mhi_cn
void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl, enum mhi_state state)
{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ int ret;
+
if (state == MHI_STATE_RESET) {
- mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
- MHICTRL_RESET_MASK, 1);
+ ret = mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
+ MHICTRL_RESET_MASK, 1);
} else {
- mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
- MHICTRL_MHISTATE_MASK, state);
+ ret = mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
+ MHICTRL_MHISTATE_MASK, state);
}
+
+ if (ret)
+ dev_err(dev, "Failed to set MHI state to: %s\n",
+ mhi_state_str(state));
}
/* NOP for backward compatibility, host allowed to ring DB in M2 state */
@@ -476,6 +483,15 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl)
* hence re-program it
*/
mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
+
+ if (!MHI_IN_PBL(mhi_get_exec_env(mhi_cntrl))) {
+ /* wait for ready to be set */
+ ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs,
+ MHISTATUS,
+ MHISTATUS_READY_MASK, 1, 25000);
+ if (ret)
+ dev_err(dev, "Device failed to enter READY state\n");
+ }
}
dev_dbg(dev,
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index 18363aa2a49d..9a7d12332fad 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -3395,7 +3395,9 @@ static int sysc_remove(struct platform_device *pdev)
struct sysc *ddata = platform_get_drvdata(pdev);
int error;
- cancel_delayed_work_sync(&ddata->idle_work);
+ /* Device can still be enabled, see deferred idle quirk in probe */
+ if (cancel_delayed_work_sync(&ddata->idle_work))
+ ti_sysc_idle(&ddata->idle_work.work);
error = pm_runtime_resume_and_get(ddata->dev);
if (error < 0) {
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 55f48375e3fe..69fd31ffb847 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -18,7 +18,8 @@ config TTY_PRINTK
The feature is useful to inline user messages with kernel
messages.
In order to use this feature, you should output user messages
- to /dev/ttyprintk or redirect console to this TTY.
+ to /dev/ttyprintk or redirect console to this TTY, or boot
+ the kernel with console=ttyprintk.
If unsure, say N.
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index a087156a5818..b3f2d55dc551 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -385,6 +385,19 @@ config HW_RANDOM_PIC32
If unsure, say Y.
+config HW_RANDOM_POLARFIRE_SOC
+ tristate "Microchip PolarFire SoC Random Number Generator support"
+ depends on HW_RANDOM && POLARFIRE_SOC_SYS_CTRL
+ help
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on PolarFire SoC (MPFS).
+
+ To compile this driver as a module, choose M here. The
+ module will be called mfps_rng.
+
+ If unsure, say N.
+
+
config HW_RANDOM_MESON
tristate "Amlogic Meson Random Number Generator support"
depends on HW_RANDOM
@@ -527,7 +540,7 @@ config HW_RANDOM_ARM_SMCCC_TRNG
config HW_RANDOM_CN10K
tristate "Marvell CN10K Random Number Generator support"
- depends on HW_RANDOM && PCI && ARM64
+ depends on HW_RANDOM && PCI && (ARM64 || (64BIT && COMPILE_TEST))
default HW_RANDOM
help
This driver provides support for the True Random Number
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index 584d47ba32f7..3e948cf04476 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -46,3 +46,4 @@ obj-$(CONFIG_HW_RANDOM_CCTRNG) += cctrng.o
obj-$(CONFIG_HW_RANDOM_XIPHERA) += xiphera-trng.o
obj-$(CONFIG_HW_RANDOM_ARM_SMCCC_TRNG) += arm_smccc_trng.o
obj-$(CONFIG_HW_RANDOM_CN10K) += cn10k-rng.o
+obj-$(CONFIG_HW_RANDOM_POLARFIRE_SOC) += mpfs-rng.o
diff --git a/drivers/char/hw_random/cn10k-rng.c b/drivers/char/hw_random/cn10k-rng.c
index 35001c63648b..a01e9307737c 100644
--- a/drivers/char/hw_random/cn10k-rng.c
+++ b/drivers/char/hw_random/cn10k-rng.c
@@ -31,26 +31,23 @@ struct cn10k_rng {
#define PLAT_OCTEONTX_RESET_RNG_EBG_HEALTH_STATE 0xc2000b0f
-static int reset_rng_health_state(struct cn10k_rng *rng)
+static unsigned long reset_rng_health_state(struct cn10k_rng *rng)
{
struct arm_smccc_res res;
/* Send SMC service call to reset EBG health state */
arm_smccc_smc(PLAT_OCTEONTX_RESET_RNG_EBG_HEALTH_STATE, 0, 0, 0, 0, 0, 0, 0, &res);
- if (res.a0 != 0UL)
- return -EIO;
-
- return 0;
+ return res.a0;
}
static int check_rng_health(struct cn10k_rng *rng)
{
u64 status;
- int err;
+ unsigned long err;
/* Skip checking health */
if (!rng->reg_base)
- return 0;
+ return -ENODEV;
status = readq(rng->reg_base + RNM_PF_EBG_HEALTH);
if (status & BIT_ULL(20)) {
@@ -58,7 +55,9 @@ static int check_rng_health(struct cn10k_rng *rng)
if (err) {
dev_err(&rng->pdev->dev, "HWRNG: Health test failed (status=%llx)\n",
status);
- dev_err(&rng->pdev->dev, "HWRNG: error during reset\n");
+ dev_err(&rng->pdev->dev, "HWRNG: error during reset (error=%lx)\n",
+ err);
+ return -EIO;
}
}
return 0;
@@ -90,6 +89,7 @@ static int cn10k_rng_read(struct hwrng *hwrng, void *data,
{
struct cn10k_rng *rng = (struct cn10k_rng *)hwrng->priv;
unsigned int size;
+ u8 *pos = data;
int err = 0;
u64 value;
@@ -102,17 +102,20 @@ static int cn10k_rng_read(struct hwrng *hwrng, void *data,
while (size >= 8) {
cn10k_read_trng(rng, &value);
- *((u64 *)data) = (u64)value;
+ *((u64 *)pos) = value;
size -= 8;
- data += 8;
+ pos += 8;
}
- while (size > 0) {
+ if (size > 0) {
cn10k_read_trng(rng, &value);
- *((u8 *)data) = (u8)value;
- size--;
- data++;
+ while (size > 0) {
+ *pos = (u8)value;
+ value >>= 8;
+ size--;
+ pos++;
+ }
}
return max - size;
diff --git a/drivers/char/hw_random/mpfs-rng.c b/drivers/char/hw_random/mpfs-rng.c
new file mode 100644
index 000000000000..5813da617a48
--- /dev/null
+++ b/drivers/char/hw_random/mpfs-rng.c
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Microchip PolarFire SoC (MPFS) hardware random driver
+ *
+ * Copyright (c) 2020-2022 Microchip Corporation. All rights reserved.
+ *
+ * Author: Conor Dooley <conor.dooley@microchip.com>
+ */
+
+#include <linux/module.h>
+#include <linux/hw_random.h>
+#include <linux/platform_device.h>
+#include <soc/microchip/mpfs.h>
+
+#define CMD_OPCODE 0x21
+#define CMD_DATA_SIZE 0U
+#define CMD_DATA NULL
+#define MBOX_OFFSET 0U
+#define RESP_OFFSET 0U
+#define RNG_RESP_BYTES 32U
+
+struct mpfs_rng {
+ struct mpfs_sys_controller *sys_controller;
+ struct hwrng rng;
+};
+
+static int mpfs_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
+{
+ struct mpfs_rng *rng_priv = container_of(rng, struct mpfs_rng, rng);
+ u32 response_msg[RNG_RESP_BYTES / sizeof(u32)];
+ unsigned int count = 0, copy_size_bytes;
+ int ret;
+
+ struct mpfs_mss_response response = {
+ .resp_status = 0U,
+ .resp_msg = (u32 *)response_msg,
+ .resp_size = RNG_RESP_BYTES
+ };
+ struct mpfs_mss_msg msg = {
+ .cmd_opcode = CMD_OPCODE,
+ .cmd_data_size = CMD_DATA_SIZE,
+ .response = &response,
+ .cmd_data = CMD_DATA,
+ .mbox_offset = MBOX_OFFSET,
+ .resp_offset = RESP_OFFSET
+ };
+
+ while (count < max) {
+ ret = mpfs_blocking_transaction(rng_priv->sys_controller, &msg);
+ if (ret)
+ return ret;
+
+ copy_size_bytes = max - count > RNG_RESP_BYTES ? RNG_RESP_BYTES : max - count;
+ memcpy(buf + count, response_msg, copy_size_bytes);
+
+ count += copy_size_bytes;
+ if (!wait)
+ break;
+ }
+
+ return count;
+}
+
+static int mpfs_rng_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mpfs_rng *rng_priv;
+ int ret;
+
+ rng_priv = devm_kzalloc(dev, sizeof(*rng_priv), GFP_KERNEL);
+ if (!rng_priv)
+ return -ENOMEM;
+
+ rng_priv->sys_controller = mpfs_sys_controller_get(&pdev->dev);
+ if (IS_ERR(rng_priv->sys_controller))
+ return dev_err_probe(dev, PTR_ERR(rng_priv->sys_controller),
+ "Failed to register system controller hwrng sub device\n");
+
+ rng_priv->rng.read = mpfs_rng_read;
+ rng_priv->rng.name = pdev->name;
+ rng_priv->rng.quality = 1024;
+
+ platform_set_drvdata(pdev, rng_priv);
+
+ ret = devm_hwrng_register(&pdev->dev, &rng_priv->rng);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Failed to register MPFS hwrng\n");
+
+ dev_info(&pdev->dev, "Registered MPFS hwrng\n");
+
+ return 0;
+}
+
+static struct platform_driver mpfs_rng_driver = {
+ .driver = {
+ .name = "mpfs-rng",
+ },
+ .probe = mpfs_rng_probe,
+};
+module_platform_driver(mpfs_rng_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Conor Dooley <conor.dooley@microchip.com>");
+MODULE_DESCRIPTION("PolarFire SoC (MPFS) hardware random driver");
diff --git a/drivers/char/hw_random/omap3-rom-rng.c b/drivers/char/hw_random/omap3-rom-rng.c
index e0d77fa048fb..f06e4f95114f 100644
--- a/drivers/char/hw_random/omap3-rom-rng.c
+++ b/drivers/char/hw_random/omap3-rom-rng.c
@@ -92,7 +92,7 @@ static int __maybe_unused omap_rom_rng_runtime_resume(struct device *dev)
r = ddata->rom_rng_call(0, 0, RNG_GEN_PRNG_HW_INIT);
if (r != 0) {
- clk_disable(ddata->clk);
+ clk_disable_unprepare(ddata->clk);
dev_err(dev, "HW init failed: %d\n", r);
return -EIO;
diff --git a/drivers/char/hw_random/optee-rng.c b/drivers/char/hw_random/optee-rng.c
index a948c0727b2b..96b5d546d136 100644
--- a/drivers/char/hw_random/optee-rng.c
+++ b/drivers/char/hw_random/optee-rng.c
@@ -115,7 +115,7 @@ static size_t get_optee_rng_data(struct optee_rng_private *pvt_data,
static int optee_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
{
struct optee_rng_private *pvt_data = to_optee_rng_private(rng);
- size_t read = 0, rng_size = 0;
+ size_t read = 0, rng_size;
int timeout = 1;
u8 *data = buf;
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index cc296f0823bd..84ca98ed1dad 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -101,7 +101,7 @@ static inline bool should_stop_iteration(void)
{
if (need_resched())
cond_resched();
- return fatal_signal_pending(current);
+ return signal_pending(current);
}
/*
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index ca5141ed5ef3..cba19bfdc44d 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -100,17 +100,18 @@ static const struct seq_operations misc_seq_ops = {
static int misc_open(struct inode *inode, struct file *file)
{
int minor = iminor(inode);
- struct miscdevice *c;
+ struct miscdevice *c = NULL, *iter;
int err = -ENODEV;
const struct file_operations *new_fops = NULL;
mutex_lock(&misc_mtx);
- list_for_each_entry(c, &misc_list, list) {
- if (c->minor == minor) {
- new_fops = fops_get(c->fops);
- break;
- }
+ list_for_each_entry(iter, &misc_list, list) {
+ if (iter->minor != minor)
+ continue;
+ c = iter;
+ new_fops = fops_get(iter->fops);
+ break;
}
if (!new_fops) {
@@ -118,11 +119,12 @@ static int misc_open(struct inode *inode, struct file *file)
request_module("char-major-%d-%d", MISC_MAJOR, minor);
mutex_lock(&misc_mtx);
- list_for_each_entry(c, &misc_list, list) {
- if (c->minor == minor) {
- new_fops = fops_get(c->fops);
- break;
- }
+ list_for_each_entry(iter, &misc_list, list) {
+ if (iter->minor != minor)
+ continue;
+ c = iter;
+ new_fops = fops_get(iter->fops);
+ break;
}
if (!new_fops)
goto fail;
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index 78baba55a8b5..8fc49b038372 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -922,7 +922,7 @@ static void rx_ready_async(MGSLPC_INFO *info, int tcd)
// BIT7:parity error
// BIT6:framing error
- if (status & (BIT7 + BIT6)) {
+ if (status & (BIT7 | BIT6)) {
if (status & BIT7)
icount->parity++;
else
@@ -1418,7 +1418,11 @@ static void mgslpc_change_params(MGSLPC_INFO *info, struct tty_struct *tty)
info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
/* byte size and parity */
-
+ if ((cflag & CSIZE) != CS8) {
+ cflag &= ~CSIZE;
+ cflag |= CS7;
+ tty->termios.c_cflag = cflag;
+ }
info->params.data_bits = tty_get_char_size(cflag);
if (cflag & CSTOPB)
@@ -1432,10 +1436,8 @@ static void mgslpc_change_params(MGSLPC_INFO *info, struct tty_struct *tty)
info->params.parity = ASYNC_PARITY_ODD;
else
info->params.parity = ASYNC_PARITY_EVEN;
-#ifdef CMSPAR
if (cflag & CMSPAR)
info->params.parity = ASYNC_PARITY_SPACE;
-#endif
}
/* calculate number of jiffies to transmit a full
diff --git a/drivers/char/ttyprintk.c b/drivers/char/ttyprintk.c
index adf941c47506..ed45d04905c2 100644
--- a/drivers/char/ttyprintk.c
+++ b/drivers/char/ttyprintk.c
@@ -11,6 +11,7 @@
* of the boot process, for example.
*/
+#include <linux/console.h>
#include <linux/device.h>
#include <linux/serial.h>
#include <linux/tty.h>
@@ -163,6 +164,18 @@ static const struct tty_port_operations tpk_port_ops = {
static struct tty_driver *ttyprintk_driver;
+static struct tty_driver *ttyprintk_console_device(struct console *c,
+ int *index)
+{
+ *index = 0;
+ return ttyprintk_driver;
+}
+
+static struct console ttyprintk_console = {
+ .name = "ttyprintk",
+ .device = ttyprintk_console_device,
+};
+
static int __init ttyprintk_init(void)
{
int ret;
@@ -195,6 +208,8 @@ static int __init ttyprintk_init(void)
goto error;
}
+ register_console(&ttyprintk_console);
+
return 0;
error:
@@ -205,6 +220,7 @@ error:
static void __exit ttyprintk_exit(void)
{
+ unregister_console(&ttyprintk_console);
tty_unregister_driver(ttyprintk_driver);
tty_driver_kref_put(ttyprintk_driver);
tty_port_destroy(&tpk_port.port);
diff --git a/drivers/char/xillybus/xillybus_class.c b/drivers/char/xillybus/xillybus_class.c
index 5046486011c8..0f238648dcfe 100644
--- a/drivers/char/xillybus/xillybus_class.c
+++ b/drivers/char/xillybus/xillybus_class.c
@@ -174,18 +174,17 @@ void xillybus_cleanup_chrdev(void *private_data,
struct device *dev)
{
int minor;
- struct xilly_unit *unit;
- bool found = false;
+ struct xilly_unit *unit = NULL, *iter;
mutex_lock(&unit_mutex);
- list_for_each_entry(unit, &unit_list, list_entry)
- if (unit->private_data == private_data) {
- found = true;
+ list_for_each_entry(iter, &unit_list, list_entry)
+ if (iter->private_data == private_data) {
+ unit = iter;
break;
}
- if (!found) {
+ if (!unit) {
dev_err(dev, "Weird bug: Failed to find unit\n");
mutex_unlock(&unit_mutex);
return;
@@ -216,22 +215,21 @@ int xillybus_find_inode(struct inode *inode,
{
int minor = iminor(inode);
int major = imajor(inode);
- struct xilly_unit *unit;
- bool found = false;
+ struct xilly_unit *unit = NULL, *iter;
mutex_lock(&unit_mutex);
- list_for_each_entry(unit, &unit_list, list_entry)
- if (unit->major == major &&
- minor >= unit->lowest_minor &&
- minor < (unit->lowest_minor + unit->num_nodes)) {
- found = true;
+ list_for_each_entry(iter, &unit_list, list_entry)
+ if (iter->major == major &&
+ minor >= iter->lowest_minor &&
+ minor < (iter->lowest_minor + iter->num_nodes)) {
+ unit = iter;
break;
}
mutex_unlock(&unit_mutex);
- if (!found)
+ if (!unit)
return -ENODEV;
*private_data = unit->private_data;
diff --git a/drivers/char/xillybus/xillyusb.c b/drivers/char/xillybus/xillyusb.c
index dc3551796e5e..39bcbfd908b4 100644
--- a/drivers/char/xillybus/xillyusb.c
+++ b/drivers/char/xillybus/xillyusb.c
@@ -549,6 +549,7 @@ static void cleanup_dev(struct kref *kref)
if (xdev->workq)
destroy_workqueue(xdev->workq);
+ usb_put_dev(xdev->udev);
kfree(xdev->channels); /* Argument may be NULL, and that's fine */
kfree(xdev);
}
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 5d596e778ff4..48f8f4221e21 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -210,6 +210,15 @@ config COMMON_CLK_CS2000_CP
help
If you say yes here you get support for the CS2000 clock multiplier.
+config COMMON_CLK_EN7523
+ bool "Clock driver for Airoha EN7523 SoC system clocks"
+ depends on OF
+ depends on ARCH_AIROHA || COMPILE_TEST
+ default ARCH_AIROHA
+ help
+ This driver provides the fixed clocks and gates present on Airoha
+ ARM silicon.
+
config COMMON_CLK_FSL_FLEXSPI
tristate "Clock driver for FlexSPI on Layerscape SoCs"
depends on ARCH_LAYERSCAPE || COMPILE_TEST
@@ -368,6 +377,11 @@ config COMMON_CLK_VC5
This driver supports the IDT VersaClock 5 and VersaClock 6
programmable clock generators.
+config COMMON_CLK_STM32MP135
+ def_bool COMMON_CLK && MACH_STM32MP13
+ help
+ Support for stm32mp135 SoC family clocks
+
config COMMON_CLK_STM32MP157
def_bool COMMON_CLK && MACH_STM32MP157
help
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index 21cb96b022e6..d5db170d38d2 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -30,6 +30,7 @@ obj-$(CONFIG_COMMON_CLK_CDCE925) += clk-cdce925.o
obj-$(CONFIG_ARCH_CLPS711X) += clk-clps711x.o
obj-$(CONFIG_COMMON_CLK_CS2000_CP) += clk-cs2000-cp.o
obj-$(CONFIG_ARCH_SPARX5) += clk-sparx5.o
+obj-$(CONFIG_COMMON_CLK_EN7523) += clk-en7523.o
obj-$(CONFIG_COMMON_CLK_FIXED_MMIO) += clk-fixed-mmio.o
obj-$(CONFIG_COMMON_CLK_FSL_FLEXSPI) += clk-fsl-flexspi.o
obj-$(CONFIG_COMMON_CLK_FSL_SAI) += clk-fsl-sai.o
@@ -114,6 +115,7 @@ obj-y += socfpga/
obj-$(CONFIG_PLAT_SPEAR) += spear/
obj-y += sprd/
obj-$(CONFIG_ARCH_STI) += st/
+obj-$(CONFIG_ARCH_STM32) += stm32/
obj-$(CONFIG_SOC_STARFIVE) += starfive/
obj-$(CONFIG_ARCH_SUNXI) += sunxi/
obj-y += sunxi-ng/
diff --git a/drivers/clk/actions/owl-pll.c b/drivers/clk/actions/owl-pll.c
index 02437bdedf4d..155f313986b4 100644
--- a/drivers/clk/actions/owl-pll.c
+++ b/drivers/clk/actions/owl-pll.c
@@ -25,7 +25,7 @@ static u32 owl_pll_calculate_mul(struct owl_pll_hw *pll_hw, unsigned long rate)
else if (mul > pll_hw->max_mul)
mul = pll_hw->max_mul;
- return mul &= mul_mask(pll_hw);
+ return mul & mul_mask(pll_hw);
}
static unsigned long _get_table_rate(const struct clk_pll_table *table,
diff --git a/drivers/clk/bcm/clk-raspberrypi.c b/drivers/clk/bcm/clk-raspberrypi.c
index 9d09621549b9..73518009a0f2 100644
--- a/drivers/clk/bcm/clk-raspberrypi.c
+++ b/drivers/clk/bcm/clk-raspberrypi.c
@@ -345,7 +345,7 @@ static int raspberrypi_discover_clocks(struct raspberrypi_clk *rpi,
int ret;
clks = devm_kcalloc(rpi->dev,
- sizeof(*clks), RPI_FIRMWARE_NUM_CLK_ID,
+ RPI_FIRMWARE_NUM_CLK_ID, sizeof(*clks),
GFP_KERNEL);
if (!clks)
return -ENOMEM;
diff --git a/drivers/clk/clk-cdce706.c b/drivers/clk/clk-cdce706.c
index c91e9096b070..5467d941ddfd 100644
--- a/drivers/clk/clk-cdce706.c
+++ b/drivers/clk/clk-cdce706.c
@@ -627,8 +627,7 @@ of_clk_cdce_get(struct of_phandle_args *clkspec, void *data)
return &cdce->clkout[idx].hw;
}
-static int cdce706_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int cdce706_probe(struct i2c_client *client)
{
struct i2c_adapter *adapter = client->adapter;
struct cdce706_dev_data *cdce;
@@ -692,7 +691,7 @@ static struct i2c_driver cdce706_i2c_driver = {
.name = "cdce706",
.of_match_table = of_match_ptr(cdce706_dt_match),
},
- .probe = cdce706_probe,
+ .probe_new = cdce706_probe,
.remove = cdce706_remove,
.id_table = cdce706_id,
};
diff --git a/drivers/clk/clk-cdce925.c b/drivers/clk/clk-cdce925.c
index 308b353815e1..ef9a2d44e40c 100644
--- a/drivers/clk/clk-cdce925.c
+++ b/drivers/clk/clk-cdce925.c
@@ -634,11 +634,20 @@ static struct regmap_bus regmap_cdce925_bus = {
.read = cdce925_regmap_i2c_read,
};
-static int cdce925_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static const struct i2c_device_id cdce925_id[] = {
+ { "cdce913", CDCE913 },
+ { "cdce925", CDCE925 },
+ { "cdce937", CDCE937 },
+ { "cdce949", CDCE949 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, cdce925_id);
+
+static int cdce925_probe(struct i2c_client *client)
{
struct clk_cdce925_chip *data;
struct device_node *node = client->dev.of_node;
+ const struct i2c_device_id *id = i2c_match_id(cdce925_id, client);
const char *parent_name;
const char *pll_clk_name[MAX_NUMBER_OF_PLLS] = {NULL,};
struct clk_init_data init;
@@ -814,15 +823,6 @@ error:
return err;
}
-static const struct i2c_device_id cdce925_id[] = {
- { "cdce913", CDCE913 },
- { "cdce925", CDCE925 },
- { "cdce937", CDCE937 },
- { "cdce949", CDCE949 },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, cdce925_id);
-
static const struct of_device_id clk_cdce925_of_match[] = {
{ .compatible = "ti,cdce913" },
{ .compatible = "ti,cdce925" },
@@ -837,7 +837,7 @@ static struct i2c_driver cdce925_driver = {
.name = "cdce925",
.of_match_table = of_match_ptr(clk_cdce925_of_match),
},
- .probe = cdce925_probe,
+ .probe_new = cdce925_probe,
.id_table = cdce925_id,
};
module_i2c_driver(cdce925_driver);
diff --git a/drivers/clk/clk-cs2000-cp.c b/drivers/clk/clk-cs2000-cp.c
index dc5040a84dcc..aa5c72bab83e 100644
--- a/drivers/clk/clk-cs2000-cp.c
+++ b/drivers/clk/clk-cs2000-cp.c
@@ -570,8 +570,7 @@ static int cs2000_remove(struct i2c_client *client)
return 0;
}
-static int cs2000_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int cs2000_probe(struct i2c_client *client)
{
struct cs2000_priv *priv;
struct device *dev = &client->dev;
@@ -625,7 +624,7 @@ static struct i2c_driver cs2000_driver = {
.pm = &cs2000_pm_ops,
.of_match_table = cs2000_of_match,
},
- .probe = cs2000_probe,
+ .probe_new = cs2000_probe,
.remove = cs2000_remove,
.id_table = cs2000_id,
};
diff --git a/drivers/clk/clk-en7523.c b/drivers/clk/clk-en7523.c
new file mode 100644
index 000000000000..29f0126cbd05
--- /dev/null
+++ b/drivers/clk/clk-en7523.c
@@ -0,0 +1,351 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/delay.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <dt-bindings/clock/en7523-clk.h>
+
+#define REG_PCI_CONTROL 0x88
+#define REG_PCI_CONTROL_PERSTOUT BIT(29)
+#define REG_PCI_CONTROL_PERSTOUT1 BIT(26)
+#define REG_PCI_CONTROL_REFCLK_EN1 BIT(22)
+#define REG_GSW_CLK_DIV_SEL 0x1b4
+#define REG_EMI_CLK_DIV_SEL 0x1b8
+#define REG_BUS_CLK_DIV_SEL 0x1bc
+#define REG_SPI_CLK_DIV_SEL 0x1c4
+#define REG_SPI_CLK_FREQ_SEL 0x1c8
+#define REG_NPU_CLK_DIV_SEL 0x1fc
+#define REG_CRYPTO_CLKSRC 0x200
+#define REG_RESET_CONTROL 0x834
+#define REG_RESET_CONTROL_PCIEHB BIT(29)
+#define REG_RESET_CONTROL_PCIE1 BIT(27)
+#define REG_RESET_CONTROL_PCIE2 BIT(26)
+
+struct en_clk_desc {
+ int id;
+ const char *name;
+ u32 base_reg;
+ u8 base_bits;
+ u8 base_shift;
+ union {
+ const unsigned int *base_values;
+ unsigned int base_value;
+ };
+ size_t n_base_values;
+
+ u16 div_reg;
+ u8 div_bits;
+ u8 div_shift;
+ u16 div_val0;
+ u8 div_step;
+};
+
+struct en_clk_gate {
+ void __iomem *base;
+ struct clk_hw hw;
+};
+
+static const u32 gsw_base[] = { 400000000, 500000000 };
+static const u32 emi_base[] = { 333000000, 400000000 };
+static const u32 bus_base[] = { 500000000, 540000000 };
+static const u32 slic_base[] = { 100000000, 3125000 };
+static const u32 npu_base[] = { 333000000, 400000000, 500000000 };
+
+static const struct en_clk_desc en7523_base_clks[] = {
+ {
+ .id = EN7523_CLK_GSW,
+ .name = "gsw",
+
+ .base_reg = REG_GSW_CLK_DIV_SEL,
+ .base_bits = 1,
+ .base_shift = 8,
+ .base_values = gsw_base,
+ .n_base_values = ARRAY_SIZE(gsw_base),
+
+ .div_bits = 3,
+ .div_shift = 0,
+ .div_step = 1,
+ }, {
+ .id = EN7523_CLK_EMI,
+ .name = "emi",
+
+ .base_reg = REG_EMI_CLK_DIV_SEL,
+ .base_bits = 1,
+ .base_shift = 8,
+ .base_values = emi_base,
+ .n_base_values = ARRAY_SIZE(emi_base),
+
+ .div_bits = 3,
+ .div_shift = 0,
+ .div_step = 1,
+ }, {
+ .id = EN7523_CLK_BUS,
+ .name = "bus",
+
+ .base_reg = REG_BUS_CLK_DIV_SEL,
+ .base_bits = 1,
+ .base_shift = 8,
+ .base_values = bus_base,
+ .n_base_values = ARRAY_SIZE(bus_base),
+
+ .div_bits = 3,
+ .div_shift = 0,
+ .div_step = 1,
+ }, {
+ .id = EN7523_CLK_SLIC,
+ .name = "slic",
+
+ .base_reg = REG_SPI_CLK_FREQ_SEL,
+ .base_bits = 1,
+ .base_shift = 0,
+ .base_values = slic_base,
+ .n_base_values = ARRAY_SIZE(slic_base),
+
+ .div_reg = REG_SPI_CLK_DIV_SEL,
+ .div_bits = 5,
+ .div_shift = 24,
+ .div_val0 = 20,
+ .div_step = 2,
+ }, {
+ .id = EN7523_CLK_SPI,
+ .name = "spi",
+
+ .base_reg = REG_SPI_CLK_DIV_SEL,
+
+ .base_value = 400000000,
+
+ .div_bits = 5,
+ .div_shift = 8,
+ .div_val0 = 40,
+ .div_step = 2,
+ }, {
+ .id = EN7523_CLK_NPU,
+ .name = "npu",
+
+ .base_reg = REG_NPU_CLK_DIV_SEL,
+ .base_bits = 2,
+ .base_shift = 8,
+ .base_values = npu_base,
+ .n_base_values = ARRAY_SIZE(npu_base),
+
+ .div_bits = 3,
+ .div_shift = 0,
+ .div_step = 1,
+ }, {
+ .id = EN7523_CLK_CRYPTO,
+ .name = "crypto",
+
+ .base_reg = REG_CRYPTO_CLKSRC,
+ .base_bits = 1,
+ .base_shift = 8,
+ .base_values = emi_base,
+ .n_base_values = ARRAY_SIZE(emi_base),
+ }
+};
+
+static const struct of_device_id of_match_clk_en7523[] = {
+ { .compatible = "airoha,en7523-scu", },
+ { /* sentinel */ }
+};
+
+static unsigned int en7523_get_base_rate(void __iomem *base, unsigned int i)
+{
+ const struct en_clk_desc *desc = &en7523_base_clks[i];
+ u32 val;
+
+ if (!desc->base_bits)
+ return desc->base_value;
+
+ val = readl(base + desc->base_reg);
+ val >>= desc->base_shift;
+ val &= (1 << desc->base_bits) - 1;
+
+ if (val >= desc->n_base_values)
+ return 0;
+
+ return desc->base_values[val];
+}
+
+static u32 en7523_get_div(void __iomem *base, int i)
+{
+ const struct en_clk_desc *desc = &en7523_base_clks[i];
+ u32 reg, val;
+
+ if (!desc->div_bits)
+ return 1;
+
+ reg = desc->div_reg ? desc->div_reg : desc->base_reg;
+ val = readl(base + reg);
+ val >>= desc->div_shift;
+ val &= (1 << desc->div_bits) - 1;
+
+ if (!val && desc->div_val0)
+ return desc->div_val0;
+
+ return (val + 1) * desc->div_step;
+}
+
+static int en7523_pci_is_enabled(struct clk_hw *hw)
+{
+ struct en_clk_gate *cg = container_of(hw, struct en_clk_gate, hw);
+
+ return !!(readl(cg->base + REG_PCI_CONTROL) & REG_PCI_CONTROL_REFCLK_EN1);
+}
+
+static int en7523_pci_prepare(struct clk_hw *hw)
+{
+ struct en_clk_gate *cg = container_of(hw, struct en_clk_gate, hw);
+ void __iomem *np_base = cg->base;
+ u32 val, mask;
+
+ /* Need to pull device low before reset */
+ val = readl(np_base + REG_PCI_CONTROL);
+ val &= ~(REG_PCI_CONTROL_PERSTOUT1 | REG_PCI_CONTROL_PERSTOUT);
+ writel(val, np_base + REG_PCI_CONTROL);
+ usleep_range(1000, 2000);
+
+ /* Enable PCIe port 1 */
+ val |= REG_PCI_CONTROL_REFCLK_EN1;
+ writel(val, np_base + REG_PCI_CONTROL);
+ usleep_range(1000, 2000);
+
+ /* Reset to default */
+ val = readl(np_base + REG_RESET_CONTROL);
+ mask = REG_RESET_CONTROL_PCIE1 | REG_RESET_CONTROL_PCIE2 |
+ REG_RESET_CONTROL_PCIEHB;
+ writel(val & ~mask, np_base + REG_RESET_CONTROL);
+ usleep_range(1000, 2000);
+ writel(val | mask, np_base + REG_RESET_CONTROL);
+ msleep(100);
+ writel(val & ~mask, np_base + REG_RESET_CONTROL);
+ usleep_range(5000, 10000);
+
+ /* Release device */
+ mask = REG_PCI_CONTROL_PERSTOUT1 | REG_PCI_CONTROL_PERSTOUT;
+ val = readl(np_base + REG_PCI_CONTROL);
+ writel(val & ~mask, np_base + REG_PCI_CONTROL);
+ usleep_range(1000, 2000);
+ writel(val | mask, np_base + REG_PCI_CONTROL);
+ msleep(250);
+
+ return 0;
+}
+
+static void en7523_pci_unprepare(struct clk_hw *hw)
+{
+ struct en_clk_gate *cg = container_of(hw, struct en_clk_gate, hw);
+ void __iomem *np_base = cg->base;
+ u32 val;
+
+ val = readl(np_base + REG_PCI_CONTROL);
+ val &= ~REG_PCI_CONTROL_REFCLK_EN1;
+ writel(val, np_base + REG_PCI_CONTROL);
+}
+
+static struct clk_hw *en7523_register_pcie_clk(struct device *dev,
+ void __iomem *np_base)
+{
+ static const struct clk_ops pcie_gate_ops = {
+ .is_enabled = en7523_pci_is_enabled,
+ .prepare = en7523_pci_prepare,
+ .unprepare = en7523_pci_unprepare,
+ };
+ struct clk_init_data init = {
+ .name = "pcie",
+ .ops = &pcie_gate_ops,
+ };
+ struct en_clk_gate *cg;
+
+ cg = devm_kzalloc(dev, sizeof(*cg), GFP_KERNEL);
+ if (!cg)
+ return NULL;
+
+ cg->base = np_base;
+ cg->hw.init = &init;
+ en7523_pci_unprepare(&cg->hw);
+
+ if (clk_hw_register(dev, &cg->hw))
+ return NULL;
+
+ return &cg->hw;
+}
+
+static void en7523_register_clocks(struct device *dev, struct clk_hw_onecell_data *clk_data,
+ void __iomem *base, void __iomem *np_base)
+{
+ struct clk_hw *hw;
+ u32 rate;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(en7523_base_clks); i++) {
+ const struct en_clk_desc *desc = &en7523_base_clks[i];
+
+ rate = en7523_get_base_rate(base, i);
+ rate /= en7523_get_div(base, i);
+
+ hw = clk_hw_register_fixed_rate(dev, desc->name, NULL, 0, rate);
+ if (IS_ERR(hw)) {
+ pr_err("Failed to register clk %s: %ld\n",
+ desc->name, PTR_ERR(hw));
+ continue;
+ }
+
+ clk_data->hws[desc->id] = hw;
+ }
+
+ hw = en7523_register_pcie_clk(dev, np_base);
+ clk_data->hws[EN7523_CLK_PCIE] = hw;
+
+ clk_data->num = EN7523_NUM_CLOCKS;
+}
+
+static int en7523_clk_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct clk_hw_onecell_data *clk_data;
+ void __iomem *base, *np_base;
+ int r;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ np_base = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(np_base))
+ return PTR_ERR(np_base);
+
+ clk_data = devm_kzalloc(&pdev->dev,
+ struct_size(clk_data, hws, EN7523_NUM_CLOCKS),
+ GFP_KERNEL);
+ if (!clk_data)
+ return -ENOMEM;
+
+ en7523_register_clocks(&pdev->dev, clk_data, base, np_base);
+
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ if (r)
+ dev_err(&pdev->dev,
+ "could not register clock provider: %s: %d\n",
+ pdev->name, r);
+
+ return r;
+}
+
+static struct platform_driver clk_en7523_drv = {
+ .probe = en7523_clk_probe,
+ .driver = {
+ .name = "clk-en7523",
+ .of_match_table = of_match_clk_en7523,
+ .suppress_bind_attrs = true,
+ },
+};
+
+static int __init clk_en7523_init(void)
+{
+ return platform_driver_register(&clk_en7523_drv);
+}
+
+arch_initcall(clk_en7523_init);
diff --git a/drivers/clk/clk-fixed-rate.c b/drivers/clk/clk-fixed-rate.c
index 45501637705c..ac68a6b40f0e 100644
--- a/drivers/clk/clk-fixed-rate.c
+++ b/drivers/clk/clk-fixed-rate.c
@@ -87,7 +87,7 @@ struct clk_hw *__clk_hw_register_fixed_rate(struct device *dev,
hw = &fixed->hw;
if (dev || !np)
ret = clk_hw_register(dev, hw);
- else if (np)
+ else
ret = of_clk_hw_register(np, hw);
if (ret) {
kfree(fixed);
diff --git a/drivers/clk/clk-max9485.c b/drivers/clk/clk-max9485.c
index 5e80f3d090f3..5f85b0a32872 100644
--- a/drivers/clk/clk-max9485.c
+++ b/drivers/clk/clk-max9485.c
@@ -254,8 +254,7 @@ max9485_of_clk_get(struct of_phandle_args *clkspec, void *data)
return &drvdata->hw[idx].hw;
}
-static int max9485_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int max9485_i2c_probe(struct i2c_client *client)
{
struct max9485_driver_data *drvdata;
struct device *dev = &client->dev;
@@ -377,7 +376,7 @@ static struct i2c_driver max9485_driver = {
.pm = &max9485_pm_ops,
.of_match_table = max9485_dt_ids,
},
- .probe = max9485_i2c_probe,
+ .probe_new = max9485_i2c_probe,
.id_table = max9485_i2c_ids,
};
module_i2c_driver(max9485_driver);
diff --git a/drivers/clk/clk-mux.c b/drivers/clk/clk-mux.c
index 214045f6e989..fa817c317c2a 100644
--- a/drivers/clk/clk-mux.c
+++ b/drivers/clk/clk-mux.c
@@ -157,11 +157,11 @@ struct clk_hw *__clk_hw_register_mux(struct device *dev, struct device_node *np,
struct clk_mux *mux;
struct clk_hw *hw;
struct clk_init_data init = {};
- u8 width = 0;
int ret = -EINVAL;
if (clk_mux_flags & CLK_MUX_HIWORD_MASK) {
- width = fls(mask) - ffs(mask) + 1;
+ u8 width = fls(mask) - ffs(mask) + 1;
+
if (width + shift > 16) {
pr_err("mux value exceeds LOWORD field\n");
return ERR_PTR(-EINVAL);
diff --git a/drivers/clk/clk-renesas-pcie.c b/drivers/clk/clk-renesas-pcie.c
index 59d9cf0053eb..4f5df1fc74b4 100644
--- a/drivers/clk/clk-renesas-pcie.c
+++ b/drivers/clk/clk-renesas-pcie.c
@@ -213,7 +213,7 @@ rs9_of_clk_get(struct of_phandle_args *clkspec, void *data)
return rs9->clk_dif[idx];
}
-static int rs9_probe(struct i2c_client *client, const struct i2c_device_id *id)
+static int rs9_probe(struct i2c_client *client)
{
unsigned char name[5] = "DIF0";
struct rs9_driver_data *rs9;
@@ -312,7 +312,7 @@ static struct i2c_driver rs9_driver = {
.pm = &rs9_pm_ops,
.of_match_table = clk_rs9_of_match,
},
- .probe = rs9_probe,
+ .probe_new = rs9_probe,
.id_table = rs9_id,
};
module_i2c_driver(rs9_driver);
diff --git a/drivers/clk/clk-si514.c b/drivers/clk/clk-si514.c
index 364b62b9928d..4481c4303534 100644
--- a/drivers/clk/clk-si514.c
+++ b/drivers/clk/clk-si514.c
@@ -327,8 +327,7 @@ static const struct regmap_config si514_regmap_config = {
.volatile_reg = si514_regmap_is_volatile,
};
-static int si514_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int si514_probe(struct i2c_client *client)
{
struct clk_si514 *data;
struct clk_init_data init;
@@ -394,7 +393,7 @@ static struct i2c_driver si514_driver = {
.name = "si514",
.of_match_table = clk_si514_of_match,
},
- .probe = si514_probe,
+ .probe_new = si514_probe,
.remove = si514_remove,
.id_table = si514_id,
};
diff --git a/drivers/clk/clk-si5341.c b/drivers/clk/clk-si5341.c
index 41851f41b682..4bca73212662 100644
--- a/drivers/clk/clk-si5341.c
+++ b/drivers/clk/clk-si5341.c
@@ -1547,8 +1547,7 @@ static const struct attribute *si5341_attributes[] = {
NULL
};
-static int si5341_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int si5341_probe(struct i2c_client *client)
{
struct clk_si5341 *data;
struct clk_init_data init;
@@ -1837,7 +1836,7 @@ static struct i2c_driver si5341_driver = {
.name = "si5341",
.of_match_table = clk_si5341_of_match,
},
- .probe = si5341_probe,
+ .probe_new = si5341_probe,
.remove = si5341_remove,
.id_table = si5341_id,
};
diff --git a/drivers/clk/clk-si5351.c b/drivers/clk/clk-si5351.c
index 93fa8c9e11be..b9f088c4ba2f 100644
--- a/drivers/clk/clk-si5351.c
+++ b/drivers/clk/clk-si5351.c
@@ -1367,9 +1367,18 @@ si53351_of_clk_get(struct of_phandle_args *clkspec, void *data)
}
#endif /* CONFIG_OF */
-static int si5351_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static const struct i2c_device_id si5351_i2c_ids[] = {
+ { "si5351a", SI5351_VARIANT_A },
+ { "si5351a-msop", SI5351_VARIANT_A3 },
+ { "si5351b", SI5351_VARIANT_B },
+ { "si5351c", SI5351_VARIANT_C },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, si5351_i2c_ids);
+
+static int si5351_i2c_probe(struct i2c_client *client)
{
+ const struct i2c_device_id *id = i2c_match_id(si5351_i2c_ids, client);
enum si5351_variant variant = (enum si5351_variant)id->driver_data;
struct si5351_platform_data *pdata;
struct si5351_driver_data *drvdata;
@@ -1649,21 +1658,12 @@ static int si5351_i2c_remove(struct i2c_client *client)
return 0;
}
-static const struct i2c_device_id si5351_i2c_ids[] = {
- { "si5351a", SI5351_VARIANT_A },
- { "si5351a-msop", SI5351_VARIANT_A3 },
- { "si5351b", SI5351_VARIANT_B },
- { "si5351c", SI5351_VARIANT_C },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, si5351_i2c_ids);
-
static struct i2c_driver si5351_driver = {
.driver = {
.name = "si5351",
.of_match_table = of_match_ptr(si5351_dt_ids),
},
- .probe = si5351_i2c_probe,
+ .probe_new = si5351_i2c_probe,
.remove = si5351_i2c_remove,
.id_table = si5351_i2c_ids,
};
diff --git a/drivers/clk/clk-si544.c b/drivers/clk/clk-si544.c
index d9ec9086184d..089786907641 100644
--- a/drivers/clk/clk-si544.c
+++ b/drivers/clk/clk-si544.c
@@ -451,11 +451,19 @@ static const struct regmap_config si544_regmap_config = {
.volatile_reg = si544_regmap_is_volatile,
};
-static int si544_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static const struct i2c_device_id si544_id[] = {
+ { "si544a", si544a },
+ { "si544b", si544b },
+ { "si544c", si544c },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, si544_id);
+
+static int si544_probe(struct i2c_client *client)
{
struct clk_si544 *data;
struct clk_init_data init;
+ const struct i2c_device_id *id = i2c_match_id(si544_id, client);
int err;
data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
@@ -499,14 +507,6 @@ static int si544_probe(struct i2c_client *client,
return 0;
}
-static const struct i2c_device_id si544_id[] = {
- { "si544a", si544a },
- { "si544b", si544b },
- { "si544c", si544c },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, si544_id);
-
static const struct of_device_id clk_si544_of_match[] = {
{ .compatible = "silabs,si544a" },
{ .compatible = "silabs,si544b" },
@@ -520,7 +520,7 @@ static struct i2c_driver si544_driver = {
.name = "si544",
.of_match_table = clk_si544_of_match,
},
- .probe = si544_probe,
+ .probe_new = si544_probe,
.id_table = si544_id,
};
module_i2c_driver(si544_driver);
diff --git a/drivers/clk/clk-si570.c b/drivers/clk/clk-si570.c
index eea50121718a..1ff8f32f734d 100644
--- a/drivers/clk/clk-si570.c
+++ b/drivers/clk/clk-si570.c
@@ -398,11 +398,20 @@ static const struct regmap_config si570_regmap_config = {
.volatile_reg = si570_regmap_is_volatile,
};
-static int si570_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static const struct i2c_device_id si570_id[] = {
+ { "si570", si57x },
+ { "si571", si57x },
+ { "si598", si59x },
+ { "si599", si59x },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, si570_id);
+
+static int si570_probe(struct i2c_client *client)
{
struct clk_si570 *data;
struct clk_init_data init;
+ const struct i2c_device_id *id = i2c_match_id(si570_id, client);
u32 initial_fout, factory_fout, stability;
bool skip_recall;
int err;
@@ -495,15 +504,6 @@ static int si570_remove(struct i2c_client *client)
return 0;
}
-static const struct i2c_device_id si570_id[] = {
- { "si570", si57x },
- { "si571", si57x },
- { "si598", si59x },
- { "si599", si59x },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, si570_id);
-
static const struct of_device_id clk_si570_of_match[] = {
{ .compatible = "silabs,si570" },
{ .compatible = "silabs,si571" },
@@ -518,7 +518,7 @@ static struct i2c_driver si570_driver = {
.name = "si570",
.of_match_table = clk_si570_of_match,
},
- .probe = si570_probe,
+ .probe_new = si570_probe,
.remove = si570_remove,
.id_table = si570_id,
};
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index ed119182aa1b..f00d4c1158d7 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -108,17 +108,10 @@ struct clk {
/*** runtime pm ***/
static int clk_pm_runtime_get(struct clk_core *core)
{
- int ret;
-
if (!core->rpm_enabled)
return 0;
- ret = pm_runtime_get_sync(core->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(core->dev);
- return ret;
- }
- return 0;
+ return pm_runtime_resume_and_get(core->dev);
}
static void clk_pm_runtime_put(struct clk_core *core)
diff --git a/drivers/clk/imx/clk-composite-8m.c b/drivers/clk/imx/clk-composite-8m.c
index 2dfd6149e528..cbf0d7955a00 100644
--- a/drivers/clk/imx/clk-composite-8m.c
+++ b/drivers/clk/imx/clk-composite-8m.c
@@ -178,7 +178,7 @@ struct clk_hw *__imx8m_clk_hw_composite(const char *name,
unsigned long flags)
{
struct clk_hw *hw = ERR_PTR(-ENOMEM), *mux_hw;
- struct clk_hw *div_hw, *gate_hw;
+ struct clk_hw *div_hw, *gate_hw = NULL;
struct clk_divider *div = NULL;
struct clk_gate *gate = NULL;
struct clk_mux *mux = NULL;
@@ -223,14 +223,17 @@ struct clk_hw *__imx8m_clk_hw_composite(const char *name,
div->lock = &imx_ccm_lock;
div->flags = CLK_DIVIDER_ROUND_CLOSEST;
- gate = kzalloc(sizeof(*gate), GFP_KERNEL);
- if (!gate)
- goto fail;
+ /* skip registering the gate ops if M4 is enabled */
+ if (!mcore_booted) {
+ gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+ if (!gate)
+ goto fail;
- gate_hw = &gate->hw;
- gate->reg = reg;
- gate->bit_idx = PCG_CGC_SHIFT;
- gate->lock = &imx_ccm_lock;
+ gate_hw = &gate->hw;
+ gate->reg = reg;
+ gate->bit_idx = PCG_CGC_SHIFT;
+ gate->lock = &imx_ccm_lock;
+ }
hw = clk_hw_register_composite(NULL, name, parent_names, num_parents,
mux_hw, mux_ops, div_hw,
diff --git a/drivers/clk/imx/clk-imx7d.c b/drivers/clk/imx/clk-imx7d.c
index 3f6fd7ef2a68..cbf8131c63f7 100644
--- a/drivers/clk/imx/clk-imx7d.c
+++ b/drivers/clk/imx/clk-imx7d.c
@@ -782,7 +782,6 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
hws[IMX7D_DRAM_PHYM_ALT_ROOT_CLK] = imx_clk_hw_gate2_flags("dram_phym_alt_root_clk", "dram_phym_alt_post_div", base + 0x4130, 0, CLK_IS_CRITICAL | CLK_OPS_PARENT_ENABLE);
hws[IMX7D_DRAM_ALT_ROOT_CLK] = imx_clk_hw_gate2_flags("dram_alt_root_clk", "dram_alt_post_div", base + 0x4130, 0, CLK_IS_CRITICAL | CLK_OPS_PARENT_ENABLE);
hws[IMX7D_OCOTP_CLK] = imx_clk_hw_gate4("ocotp_clk", "ipg_root_clk", base + 0x4230, 0);
- hws[IMX7D_SNVS_CLK] = imx_clk_hw_gate4("snvs_clk", "ipg_root_clk", base + 0x4250, 0);
hws[IMX7D_MU_ROOT_CLK] = imx_clk_hw_gate4("mu_root_clk", "ipg_root_clk", base + 0x4270, 0);
hws[IMX7D_CAAM_CLK] = imx_clk_hw_gate4("caam_clk", "ipg_root_clk", base + 0x4240, 0);
hws[IMX7D_USB_HSIC_ROOT_CLK] = imx_clk_hw_gate4("usb_hsic_root_clk", "usb_hsic_post_div", base + 0x4690, 0);
diff --git a/drivers/clk/imx/clk-imx8mm.c b/drivers/clk/imx/clk-imx8mm.c
index e8cbe181ec06..b6d275855b36 100644
--- a/drivers/clk/imx/clk-imx8mm.c
+++ b/drivers/clk/imx/clk-imx8mm.c
@@ -560,7 +560,6 @@ static int imx8mm_clocks_probe(struct platform_device *pdev)
hws[IMX8MM_CLK_SAI5_IPG] = imx_clk_hw_gate2_shared2("sai5_ipg_clk", "ipg_audio_root", base + 0x4370, 0, &share_count_sai5);
hws[IMX8MM_CLK_SAI6_ROOT] = imx_clk_hw_gate2_shared2("sai6_root_clk", "sai6", base + 0x4380, 0, &share_count_sai6);
hws[IMX8MM_CLK_SAI6_IPG] = imx_clk_hw_gate2_shared2("sai6_ipg_clk", "ipg_audio_root", base + 0x4380, 0, &share_count_sai6);
- hws[IMX8MM_CLK_SNVS_ROOT] = imx_clk_hw_gate4("snvs_root_clk", "ipg_root", base + 0x4470, 0);
hws[IMX8MM_CLK_UART1_ROOT] = imx_clk_hw_gate4("uart1_root_clk", "uart1", base + 0x4490, 0);
hws[IMX8MM_CLK_UART2_ROOT] = imx_clk_hw_gate4("uart2_root_clk", "uart2", base + 0x44a0, 0);
hws[IMX8MM_CLK_UART3_ROOT] = imx_clk_hw_gate4("uart3_root_clk", "uart3", base + 0x44b0, 0);
@@ -639,6 +638,8 @@ static struct platform_driver imx8mm_clk_driver = {
},
};
module_platform_driver(imx8mm_clk_driver);
+module_param(mcore_booted, bool, S_IRUGO);
+MODULE_PARM_DESC(mcore_booted, "See Cortex-M core is booted or not");
MODULE_AUTHOR("Bai Ping <ping.bai@nxp.com>");
MODULE_DESCRIPTION("NXP i.MX8MM clock driver");
diff --git a/drivers/clk/imx/clk-imx8mn.c b/drivers/clk/imx/clk-imx8mn.c
index 92fcbab4f5be..d37c45b676ab 100644
--- a/drivers/clk/imx/clk-imx8mn.c
+++ b/drivers/clk/imx/clk-imx8mn.c
@@ -227,6 +227,30 @@ static const char * const imx8mn_pwm4_sels[] = {"osc_24m", "sys_pll2_100m", "sys
"sys_pll1_40m", "sys_pll3_out", "clk_ext2",
"sys_pll1_80m", "video_pll1_out", };
+static const char * const imx8mn_gpt1_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_400m",
+ "sys_pll1_40m", "video_pll1_out", "sys_pll1_80m",
+ "audio_pll1_out", "clk_ext1", };
+
+static const char * const imx8mn_gpt2_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_400m",
+ "sys_pll1_40m", "video_pll1_out", "sys_pll1_80m",
+ "audio_pll1_out", "clk_ext1", };
+
+static const char * const imx8mn_gpt3_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_400m",
+ "sys_pll1_40m", "video_pll1_out", "sys_pll1_80m",
+ "audio_pll1_out", "clk_ext1", };
+
+static const char * const imx8mn_gpt4_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_400m",
+ "sys_pll1_40m", "video_pll1_out", "sys_pll1_80m",
+ "audio_pll1_out", "clk_ext1", };
+
+static const char * const imx8mn_gpt5_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_400m",
+ "sys_pll1_40m", "video_pll1_out", "sys_pll1_80m",
+ "audio_pll1_out", "clk_ext1", };
+
+static const char * const imx8mn_gpt6_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_400m",
+ "sys_pll1_40m", "video_pll1_out", "sys_pll1_80m",
+ "audio_pll1_out", "clk_ext1", };
+
static const char * const imx8mn_wdog_sels[] = {"osc_24m", "sys_pll1_133m", "sys_pll1_160m",
"vpu_pll_out", "sys_pll2_125m", "sys_pll3_out",
"sys_pll1_80m", "sys_pll2_166m", };
@@ -476,6 +500,12 @@ static int imx8mn_clocks_probe(struct platform_device *pdev)
hws[IMX8MN_CLK_PWM2] = imx8m_clk_hw_composite("pwm2", imx8mn_pwm2_sels, base + 0xb400);
hws[IMX8MN_CLK_PWM3] = imx8m_clk_hw_composite("pwm3", imx8mn_pwm3_sels, base + 0xb480);
hws[IMX8MN_CLK_PWM4] = imx8m_clk_hw_composite("pwm4", imx8mn_pwm4_sels, base + 0xb500);
+ hws[IMX8MN_CLK_GPT1] = imx8m_clk_hw_composite("gpt1", imx8mn_gpt1_sels, base + 0xb580);
+ hws[IMX8MN_CLK_GPT2] = imx8m_clk_hw_composite("gpt2", imx8mn_gpt2_sels, base + 0xb600);
+ hws[IMX8MN_CLK_GPT3] = imx8m_clk_hw_composite("gpt3", imx8mn_gpt3_sels, base + 0xb680);
+ hws[IMX8MN_CLK_GPT4] = imx8m_clk_hw_composite("gpt4", imx8mn_gpt4_sels, base + 0xb700);
+ hws[IMX8MN_CLK_GPT5] = imx8m_clk_hw_composite("gpt5", imx8mn_gpt5_sels, base + 0xb780);
+ hws[IMX8MN_CLK_GPT6] = imx8m_clk_hw_composite("gpt6", imx8mn_gpt6_sels, base + 0xb800);
hws[IMX8MN_CLK_WDOG] = imx8m_clk_hw_composite("wdog", imx8mn_wdog_sels, base + 0xb900);
hws[IMX8MN_CLK_WRCLK] = imx8m_clk_hw_composite("wrclk", imx8mn_wrclk_sels, base + 0xb980);
hws[IMX8MN_CLK_CLKO1] = imx8m_clk_hw_composite("clko1", imx8mn_clko1_sels, base + 0xba00);
@@ -501,6 +531,12 @@ static int imx8mn_clocks_probe(struct platform_device *pdev)
hws[IMX8MN_CLK_GPIO3_ROOT] = imx_clk_hw_gate4("gpio3_root_clk", "ipg_root", base + 0x40d0, 0);
hws[IMX8MN_CLK_GPIO4_ROOT] = imx_clk_hw_gate4("gpio4_root_clk", "ipg_root", base + 0x40e0, 0);
hws[IMX8MN_CLK_GPIO5_ROOT] = imx_clk_hw_gate4("gpio5_root_clk", "ipg_root", base + 0x40f0, 0);
+ hws[IMX8MN_CLK_GPT1_ROOT] = imx_clk_hw_gate4("gpt1_root_clk", "gpt1", base + 0x4100, 0);
+ hws[IMX8MN_CLK_GPT2_ROOT] = imx_clk_hw_gate4("gpt2_root_clk", "gpt2", base + 0x4110, 0);
+ hws[IMX8MN_CLK_GPT3_ROOT] = imx_clk_hw_gate4("gpt3_root_clk", "gpt3", base + 0x4120, 0);
+ hws[IMX8MN_CLK_GPT4_ROOT] = imx_clk_hw_gate4("gpt4_root_clk", "gpt4", base + 0x4130, 0);
+ hws[IMX8MN_CLK_GPT5_ROOT] = imx_clk_hw_gate4("gpt5_root_clk", "gpt5", base + 0x4140, 0);
+ hws[IMX8MN_CLK_GPT6_ROOT] = imx_clk_hw_gate4("gpt6_root_clk", "gpt6", base + 0x4150, 0);
hws[IMX8MN_CLK_I2C1_ROOT] = imx_clk_hw_gate4("i2c1_root_clk", "i2c1", base + 0x4170, 0);
hws[IMX8MN_CLK_I2C2_ROOT] = imx_clk_hw_gate4("i2c2_root_clk", "i2c2", base + 0x4180, 0);
hws[IMX8MN_CLK_I2C3_ROOT] = imx_clk_hw_gate4("i2c3_root_clk", "i2c3", base + 0x4190, 0);
@@ -522,7 +558,6 @@ static int imx8mn_clocks_probe(struct platform_device *pdev)
hws[IMX8MN_CLK_SAI5_IPG] = imx_clk_hw_gate2_shared2("sai5_ipg_clk", "ipg_audio_root", base + 0x4370, 0, &share_count_sai5);
hws[IMX8MN_CLK_SAI6_ROOT] = imx_clk_hw_gate2_shared2("sai6_root_clk", "sai6", base + 0x4380, 0, &share_count_sai6);
hws[IMX8MN_CLK_SAI6_IPG] = imx_clk_hw_gate2_shared2("sai6_ipg_clk", "ipg_audio_root", base + 0x4380, 0, &share_count_sai6);
- hws[IMX8MN_CLK_SNVS_ROOT] = imx_clk_hw_gate4("snvs_root_clk", "ipg_root", base + 0x4470, 0);
hws[IMX8MN_CLK_UART1_ROOT] = imx_clk_hw_gate4("uart1_root_clk", "uart1", base + 0x4490, 0);
hws[IMX8MN_CLK_UART2_ROOT] = imx_clk_hw_gate4("uart2_root_clk", "uart2", base + 0x44a0, 0);
hws[IMX8MN_CLK_UART3_ROOT] = imx_clk_hw_gate4("uart3_root_clk", "uart3", base + 0x44b0, 0);
@@ -549,6 +584,8 @@ static int imx8mn_clocks_probe(struct platform_device *pdev)
hws[IMX8MN_CLK_SDMA3_ROOT] = imx_clk_hw_gate4("sdma3_clk", "ipg_audio_root", base + 0x45f0, 0);
hws[IMX8MN_CLK_SAI7_ROOT] = imx_clk_hw_gate2_shared2("sai7_root_clk", "sai7", base + 0x4650, 0, &share_count_sai7);
+ hws[IMX8MN_CLK_GPT_3M] = imx_clk_hw_fixed_factor("gpt_3m", "osc_24m", 1, 8);
+
hws[IMX8MN_CLK_DRAM_ALT_ROOT] = imx_clk_hw_fixed_factor("dram_alt_root", "dram_alt", 1, 4);
hws[IMX8MN_CLK_ARM] = imx_clk_hw_cpu("arm", "arm_a53_core",
@@ -594,6 +631,8 @@ static struct platform_driver imx8mn_clk_driver = {
},
};
module_platform_driver(imx8mn_clk_driver);
+module_param(mcore_booted, bool, S_IRUGO);
+MODULE_PARM_DESC(mcore_booted, "See Cortex-M core is booted or not");
MODULE_AUTHOR("Anson Huang <Anson.Huang@nxp.com>");
MODULE_DESCRIPTION("NXP i.MX8MN clock driver");
diff --git a/drivers/clk/imx/clk-imx8mp.c b/drivers/clk/imx/clk-imx8mp.c
index 18f5b7c3ca9d..e89db568f5a8 100644
--- a/drivers/clk/imx/clk-imx8mp.c
+++ b/drivers/clk/imx/clk-imx8mp.c
@@ -358,7 +358,7 @@ static const char * const imx8mp_media_mipi_phy1_ref_sels[] = {"osc_24m", "sys_p
"clk_ext2", "audio_pll2_out",
"video_pll1_out", };
-static const char * const imx8mp_media_disp1_pix_sels[] = {"osc_24m", "video_pll1_out", "audio_pll2_out",
+static const char * const imx8mp_media_disp_pix_sels[] = {"osc_24m", "video_pll1_out", "audio_pll2_out",
"audio_pll1_out", "sys_pll1_800m",
"sys_pll2_1000m", "sys_pll3_out", "clk_ext4", };
@@ -399,6 +399,11 @@ static const char * const imx8mp_sai7_sels[] = {"osc_24m", "audio_pll1_out", "au
static const char * const imx8mp_dram_core_sels[] = {"dram_pll_out", "dram_alt_root", };
+static const char * const imx8mp_clkout_sels[] = {"audio_pll1_out", "audio_pll2_out", "video_pll1_out",
+ "dummy", "dummy", "gpu_pll_out", "vpu_pll_out",
+ "arm_pll_out", "sys_pll1", "sys_pll2", "sys_pll3",
+ "dummy", "dummy", "osc_24m", "dummy", "osc_32k"};
+
static struct clk_hw **hws;
static struct clk_hw_onecell_data *clk_hw_data;
@@ -504,6 +509,15 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
hws[IMX8MP_SYS_PLL2_500M] = imx_clk_hw_fixed_factor("sys_pll2_500m", "sys_pll2_out", 1, 2);
hws[IMX8MP_SYS_PLL2_1000M] = imx_clk_hw_fixed_factor("sys_pll2_1000m", "sys_pll2_out", 1, 1);
+ hws[IMX8MP_CLK_CLKOUT1_SEL] = imx_clk_hw_mux2("clkout1_sel", anatop_base + 0x128, 4, 4,
+ imx8mp_clkout_sels, ARRAY_SIZE(imx8mp_clkout_sels));
+ hws[IMX8MP_CLK_CLKOUT1_DIV] = imx_clk_hw_divider("clkout1_div", "clkout1_sel", anatop_base + 0x128, 0, 4);
+ hws[IMX8MP_CLK_CLKOUT1] = imx_clk_hw_gate("clkout1", "clkout1_div", anatop_base + 0x128, 8);
+ hws[IMX8MP_CLK_CLKOUT2_SEL] = imx_clk_hw_mux2("clkout2_sel", anatop_base + 0x128, 20, 4,
+ imx8mp_clkout_sels, ARRAY_SIZE(imx8mp_clkout_sels));
+ hws[IMX8MP_CLK_CLKOUT2_DIV] = imx_clk_hw_divider("clkout2_div", "clkout2_sel", anatop_base + 0x128, 16, 4);
+ hws[IMX8MP_CLK_CLKOUT2] = imx_clk_hw_gate("clkout2", "clkout2_div", anatop_base + 0x128, 24);
+
hws[IMX8MP_CLK_A53_DIV] = imx8m_clk_hw_composite_core("arm_a53_div", imx8mp_a53_sels, ccm_base + 0x8000);
hws[IMX8MP_CLK_A53_SRC] = hws[IMX8MP_CLK_A53_DIV];
hws[IMX8MP_CLK_A53_CG] = hws[IMX8MP_CLK_A53_DIV];
@@ -538,6 +552,7 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
hws[IMX8MP_CLK_AHB] = imx8m_clk_hw_composite_bus_critical("ahb_root", imx8mp_ahb_sels, ccm_base + 0x9000);
hws[IMX8MP_CLK_AUDIO_AHB] = imx8m_clk_hw_composite_bus("audio_ahb", imx8mp_audio_ahb_sels, ccm_base + 0x9100);
hws[IMX8MP_CLK_MIPI_DSI_ESC_RX] = imx8m_clk_hw_composite_bus("mipi_dsi_esc_rx", imx8mp_mipi_dsi_esc_rx_sels, ccm_base + 0x9200);
+ hws[IMX8MP_CLK_MEDIA_DISP2_PIX] = imx8m_clk_hw_composite("media_disp2_pix", imx8mp_media_disp_pix_sels, ccm_base + 0x9300);
hws[IMX8MP_CLK_IPG_ROOT] = imx_clk_hw_divider2("ipg_root", "ahb_root", ccm_base + 0x9080, 0, 1);
@@ -600,7 +615,7 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
hws[IMX8MP_CLK_USDHC3] = imx8m_clk_hw_composite("usdhc3", imx8mp_usdhc3_sels, ccm_base + 0xbc80);
hws[IMX8MP_CLK_MEDIA_CAM1_PIX] = imx8m_clk_hw_composite("media_cam1_pix", imx8mp_media_cam1_pix_sels, ccm_base + 0xbd00);
hws[IMX8MP_CLK_MEDIA_MIPI_PHY1_REF] = imx8m_clk_hw_composite("media_mipi_phy1_ref", imx8mp_media_mipi_phy1_ref_sels, ccm_base + 0xbd80);
- hws[IMX8MP_CLK_MEDIA_DISP1_PIX] = imx8m_clk_hw_composite("media_disp1_pix", imx8mp_media_disp1_pix_sels, ccm_base + 0xbe00);
+ hws[IMX8MP_CLK_MEDIA_DISP1_PIX] = imx8m_clk_hw_composite("media_disp1_pix", imx8mp_media_disp_pix_sels, ccm_base + 0xbe00);
hws[IMX8MP_CLK_MEDIA_CAM2_PIX] = imx8m_clk_hw_composite("media_cam2_pix", imx8mp_media_cam2_pix_sels, ccm_base + 0xbe80);
hws[IMX8MP_CLK_MEDIA_LDB] = imx8m_clk_hw_composite("media_ldb", imx8mp_media_ldb_sels, ccm_base + 0xbf00);
hws[IMX8MP_CLK_MEMREPAIR] = imx8m_clk_hw_composite_critical("mem_repair", imx8mp_memrepair_sels, ccm_base + 0xbf80);
@@ -654,12 +669,11 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
hws[IMX8MP_CLK_SIM_ENET_ROOT] = imx_clk_hw_gate4("sim_enet_root_clk", "enet_axi", ccm_base + 0x4400, 0);
hws[IMX8MP_CLK_GPU2D_ROOT] = imx_clk_hw_gate4("gpu2d_root_clk", "gpu2d_core", ccm_base + 0x4450, 0);
hws[IMX8MP_CLK_GPU3D_ROOT] = imx_clk_hw_gate4("gpu3d_root_clk", "gpu3d_core", ccm_base + 0x4460, 0);
- hws[IMX8MP_CLK_SNVS_ROOT] = imx_clk_hw_gate4("snvs_root_clk", "ipg_root", ccm_base + 0x4470, 0);
hws[IMX8MP_CLK_UART1_ROOT] = imx_clk_hw_gate4("uart1_root_clk", "uart1", ccm_base + 0x4490, 0);
hws[IMX8MP_CLK_UART2_ROOT] = imx_clk_hw_gate4("uart2_root_clk", "uart2", ccm_base + 0x44a0, 0);
hws[IMX8MP_CLK_UART3_ROOT] = imx_clk_hw_gate4("uart3_root_clk", "uart3", ccm_base + 0x44b0, 0);
hws[IMX8MP_CLK_UART4_ROOT] = imx_clk_hw_gate4("uart4_root_clk", "uart4", ccm_base + 0x44c0, 0);
- hws[IMX8MP_CLK_USB_ROOT] = imx_clk_hw_gate4("usb_root_clk", "osc_32k", ccm_base + 0x44d0, 0);
+ hws[IMX8MP_CLK_USB_ROOT] = imx_clk_hw_gate4("usb_root_clk", "hsio_axi", ccm_base + 0x44d0, 0);
hws[IMX8MP_CLK_USB_PHY_ROOT] = imx_clk_hw_gate4("usb_phy_root_clk", "usb_phy_ref", ccm_base + 0x44f0, 0);
hws[IMX8MP_CLK_USDHC1_ROOT] = imx_clk_hw_gate4("usdhc1_root_clk", "usdhc1", ccm_base + 0x4510, 0);
hws[IMX8MP_CLK_USDHC2_ROOT] = imx_clk_hw_gate4("usdhc2_root_clk", "usdhc2", ccm_base + 0x4520, 0);
@@ -721,6 +735,8 @@ static struct platform_driver imx8mp_clk_driver = {
},
};
module_platform_driver(imx8mp_clk_driver);
+module_param(mcore_booted, bool, S_IRUGO);
+MODULE_PARM_DESC(mcore_booted, "See Cortex-M core is booted or not");
MODULE_AUTHOR("Anson Huang <Anson.Huang@nxp.com>");
MODULE_DESCRIPTION("NXP i.MX8MP clock driver");
diff --git a/drivers/clk/imx/clk-imx8mq.c b/drivers/clk/imx/clk-imx8mq.c
index 83cc2b1c3294..882dcad4817d 100644
--- a/drivers/clk/imx/clk-imx8mq.c
+++ b/drivers/clk/imx/clk-imx8mq.c
@@ -25,7 +25,7 @@ static u32 share_count_sai6;
static u32 share_count_dcss;
static u32 share_count_nand;
-static const char * const pll_ref_sels[] = { "osc_25m", "osc_27m", "dummy", "dummy", };
+static const char * const pll_ref_sels[] = { "osc_25m", "osc_27m", "hdmi_phy_27m", "dummy", };
static const char * const arm_pll_bypass_sels[] = {"arm_pll", "arm_pll_ref_sel", };
static const char * const gpu_pll_bypass_sels[] = {"gpu_pll", "gpu_pll_ref_sel", };
static const char * const vpu_pll_bypass_sels[] = {"vpu_pll", "vpu_pll_ref_sel", };
@@ -557,7 +557,6 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
hws[IMX8MQ_CLK_SAI5_IPG] = imx_clk_hw_gate2_shared2("sai5_ipg_clk", "ipg_audio_root", base + 0x4370, 0, &share_count_sai5);
hws[IMX8MQ_CLK_SAI6_ROOT] = imx_clk_hw_gate2_shared2("sai6_root_clk", "sai6", base + 0x4380, 0, &share_count_sai6);
hws[IMX8MQ_CLK_SAI6_IPG] = imx_clk_hw_gate2_shared2("sai6_ipg_clk", "ipg_audio_root", base + 0x4380, 0, &share_count_sai6);
- hws[IMX8MQ_CLK_SNVS_ROOT] = imx_clk_hw_gate4("snvs_root_clk", "ipg_root", base + 0x4470, 0);
hws[IMX8MQ_CLK_UART1_ROOT] = imx_clk_hw_gate4("uart1_root_clk", "uart1", base + 0x4490, 0);
hws[IMX8MQ_CLK_UART2_ROOT] = imx_clk_hw_gate4("uart2_root_clk", "uart2", base + 0x44a0, 0);
hws[IMX8MQ_CLK_UART3_ROOT] = imx_clk_hw_gate4("uart3_root_clk", "uart3", base + 0x44b0, 0);
@@ -632,6 +631,8 @@ static struct platform_driver imx8mq_clk_driver = {
},
};
module_platform_driver(imx8mq_clk_driver);
+module_param(mcore_booted, bool, S_IRUGO);
+MODULE_PARM_DESC(mcore_booted, "See Cortex-M core is booted or not");
MODULE_AUTHOR("Abel Vesa <abel.vesa@nxp.com>");
MODULE_DESCRIPTION("NXP i.MX8MQ clock driver");
diff --git a/drivers/clk/imx/clk-scu.c b/drivers/clk/imx/clk-scu.c
index 083da31dc3ea..c56e406138db 100644
--- a/drivers/clk/imx/clk-scu.c
+++ b/drivers/clk/imx/clk-scu.c
@@ -528,7 +528,7 @@ static int imx_clk_scu_probe(struct platform_device *pdev)
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_enable(dev);
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret) {
pm_genpd_remove_device(dev);
pm_runtime_disable(dev);
@@ -683,7 +683,12 @@ struct clk_hw *imx_clk_scu_alloc_dev(const char *name,
return ERR_PTR(ret);
}
- pdev->driver_override = "imx-scu-clk";
+ ret = driver_set_override(&pdev->dev, &pdev->driver_override,
+ "imx-scu-clk", strlen("imx-scu-clk"));
+ if (ret) {
+ platform_device_put(pdev);
+ return ERR_PTR(ret);
+ }
ret = imx_clk_scu_attach_pd(&pdev->dev, rsrc_id);
if (ret)
@@ -837,8 +842,10 @@ struct clk_hw *__imx_clk_gpr_scu(const char *name, const char * const *parent_na
if (!clk_node)
return ERR_PTR(-ENOMEM);
- if (!imx_scu_clk_is_valid(rsrc_id))
+ if (!imx_scu_clk_is_valid(rsrc_id)) {
+ kfree(clk_node);
return ERR_PTR(-EINVAL);
+ }
clk = kzalloc(sizeof(*clk), GFP_KERNEL);
if (!clk) {
diff --git a/drivers/clk/imx/clk.c b/drivers/clk/imx/clk.c
index 7cc669934253..5582f18dd632 100644
--- a/drivers/clk/imx/clk.c
+++ b/drivers/clk/imx/clk.c
@@ -17,6 +17,9 @@
DEFINE_SPINLOCK(imx_ccm_lock);
EXPORT_SYMBOL_GPL(imx_ccm_lock);
+bool mcore_booted;
+EXPORT_SYMBOL_GPL(mcore_booted);
+
void imx_unregister_clocks(struct clk *clks[], unsigned int count)
{
unsigned int i;
@@ -173,6 +176,8 @@ void imx_register_uart_clocks(unsigned int clk_count)
int i;
imx_uart_clocks = kcalloc(clk_count, sizeof(struct clk *), GFP_KERNEL);
+ if (!imx_uart_clocks)
+ return;
if (!of_stdout)
return;
diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h
index a7cbbcd1a3f4..5061a06468df 100644
--- a/drivers/clk/imx/clk.h
+++ b/drivers/clk/imx/clk.h
@@ -7,6 +7,7 @@
#include <linux/clk-provider.h>
extern spinlock_t imx_ccm_lock;
+extern bool mcore_booted;
void imx_check_clocks(struct clk *clks[], unsigned int count);
void imx_check_clk_hws(struct clk_hw *clks[], unsigned int count);
diff --git a/drivers/clk/ingenic/cgu.c b/drivers/clk/ingenic/cgu.c
index af31633a8862..861c50d6cb24 100644
--- a/drivers/clk/ingenic/cgu.c
+++ b/drivers/clk/ingenic/cgu.c
@@ -660,7 +660,7 @@ static int ingenic_register_clock(struct ingenic_cgu *cgu, unsigned idx)
ingenic_clk->idx = idx;
clk_init.name = clk_info->name;
- clk_init.flags = 0;
+ clk_init.flags = clk_info->flags;
clk_init.parent_names = parent_names;
caps = clk_info->type;
diff --git a/drivers/clk/ingenic/cgu.h b/drivers/clk/ingenic/cgu.h
index bfc2b9c38a41..147b7df0d657 100644
--- a/drivers/clk/ingenic/cgu.h
+++ b/drivers/clk/ingenic/cgu.h
@@ -136,6 +136,7 @@ struct ingenic_cgu_custom_info {
* struct ingenic_cgu_clk_info - information about a clock
* @name: name of the clock
* @type: a bitmask formed from CGU_CLK_* values
+ * @flags: common clock flags to set on this clock
* @parents: an array of the indices of potential parents of this clock
* within the clock_info array of the CGU, or -1 in entries
* which correspond to no valid parent
@@ -161,6 +162,8 @@ struct ingenic_cgu_clk_info {
CGU_CLK_CUSTOM = BIT(7),
} type;
+ unsigned long flags;
+
int parents[4];
union {
diff --git a/drivers/clk/ingenic/jz4725b-cgu.c b/drivers/clk/ingenic/jz4725b-cgu.c
index 15d61793f53b..590e9c85cb25 100644
--- a/drivers/clk/ingenic/jz4725b-cgu.c
+++ b/drivers/clk/ingenic/jz4725b-cgu.c
@@ -87,6 +87,11 @@ static const struct ingenic_cgu_clk_info jz4725b_cgu_clocks[] = {
[JZ4725B_CLK_CCLK] = {
"cclk", CGU_CLK_DIV,
+ /*
+ * Disabling the CPU clock or any parent clocks will hang the
+ * system; mark it critical.
+ */
+ .flags = CLK_IS_CRITICAL,
.parents = { JZ4725B_CLK_PLL, -1, -1, -1 },
.div = {
CGU_REG_CPCCR, 0, 1, 4, 22, -1, -1, 0,
@@ -114,6 +119,11 @@ static const struct ingenic_cgu_clk_info jz4725b_cgu_clocks[] = {
[JZ4725B_CLK_MCLK] = {
"mclk", CGU_CLK_DIV,
+ /*
+ * Disabling MCLK or its parents will render DRAM
+ * inaccessible; mark it critical.
+ */
+ .flags = CLK_IS_CRITICAL,
.parents = { JZ4725B_CLK_PLL, -1, -1, -1 },
.div = {
CGU_REG_CPCCR, 12, 1, 4, 22, -1, -1, 0,
diff --git a/drivers/clk/ingenic/jz4740-cgu.c b/drivers/clk/ingenic/jz4740-cgu.c
index 43ffb62c42bb..3e0a30574ebb 100644
--- a/drivers/clk/ingenic/jz4740-cgu.c
+++ b/drivers/clk/ingenic/jz4740-cgu.c
@@ -102,6 +102,11 @@ static const struct ingenic_cgu_clk_info jz4740_cgu_clocks[] = {
[JZ4740_CLK_CCLK] = {
"cclk", CGU_CLK_DIV,
+ /*
+ * Disabling the CPU clock or any parent clocks will hang the
+ * system; mark it critical.
+ */
+ .flags = CLK_IS_CRITICAL,
.parents = { JZ4740_CLK_PLL, -1, -1, -1 },
.div = {
CGU_REG_CPCCR, 0, 1, 4, 22, -1, -1, 0,
@@ -129,6 +134,11 @@ static const struct ingenic_cgu_clk_info jz4740_cgu_clocks[] = {
[JZ4740_CLK_MCLK] = {
"mclk", CGU_CLK_DIV,
+ /*
+ * Disabling MCLK or its parents will render DRAM
+ * inaccessible; mark it critical.
+ */
+ .flags = CLK_IS_CRITICAL,
.parents = { JZ4740_CLK_PLL, -1, -1, -1 },
.div = {
CGU_REG_CPCCR, 12, 1, 4, 22, -1, -1, 0,
diff --git a/drivers/clk/ingenic/jz4760-cgu.c b/drivers/clk/ingenic/jz4760-cgu.c
index 8fdd383560fb..ecd395ac8a28 100644
--- a/drivers/clk/ingenic/jz4760-cgu.c
+++ b/drivers/clk/ingenic/jz4760-cgu.c
@@ -143,6 +143,11 @@ static const struct ingenic_cgu_clk_info jz4760_cgu_clocks[] = {
[JZ4760_CLK_CCLK] = {
"cclk", CGU_CLK_DIV,
+ /*
+ * Disabling the CPU clock or any parent clocks will hang the
+ * system; mark it critical.
+ */
+ .flags = CLK_IS_CRITICAL,
.parents = { JZ4760_CLK_PLL0, },
.div = {
CGU_REG_CPCCR, 0, 1, 4, 22, -1, -1, 0,
@@ -175,6 +180,11 @@ static const struct ingenic_cgu_clk_info jz4760_cgu_clocks[] = {
},
[JZ4760_CLK_MCLK] = {
"mclk", CGU_CLK_DIV,
+ /*
+ * Disabling MCLK or its parents will render DRAM
+ * inaccessible; mark it critical.
+ */
+ .flags = CLK_IS_CRITICAL,
.parents = { JZ4760_CLK_PLL0, },
.div = {
CGU_REG_CPCCR, 12, 1, 4, 22, -1, -1, 0,
diff --git a/drivers/clk/ingenic/jz4770-cgu.c b/drivers/clk/ingenic/jz4770-cgu.c
index 7ef91257630e..6ae1740367f9 100644
--- a/drivers/clk/ingenic/jz4770-cgu.c
+++ b/drivers/clk/ingenic/jz4770-cgu.c
@@ -149,6 +149,11 @@ static const struct ingenic_cgu_clk_info jz4770_cgu_clocks[] = {
[JZ4770_CLK_CCLK] = {
"cclk", CGU_CLK_DIV,
+ /*
+ * Disabling the CPU clock or any parent clocks will hang the
+ * system; mark it critical.
+ */
+ .flags = CLK_IS_CRITICAL,
.parents = { JZ4770_CLK_PLL0, },
.div = {
CGU_REG_CPCCR, 0, 1, 4, 22, -1, -1, 0,
diff --git a/drivers/clk/ingenic/jz4780-cgu.c b/drivers/clk/ingenic/jz4780-cgu.c
index e357c228e0f1..b1dadc0a5e75 100644
--- a/drivers/clk/ingenic/jz4780-cgu.c
+++ b/drivers/clk/ingenic/jz4780-cgu.c
@@ -341,12 +341,22 @@ static const struct ingenic_cgu_clk_info jz4780_cgu_clocks[] = {
[JZ4780_CLK_CPU] = {
"cpu", CGU_CLK_DIV,
+ /*
+ * Disabling the CPU clock or any parent clocks will hang the
+ * system; mark it critical.
+ */
+ .flags = CLK_IS_CRITICAL,
.parents = { JZ4780_CLK_CPUMUX, -1, -1, -1 },
.div = { CGU_REG_CLOCKCONTROL, 0, 1, 4, 22, -1, -1 },
},
[JZ4780_CLK_L2CACHE] = {
"l2cache", CGU_CLK_DIV,
+ /*
+ * The L2 cache clock is critical if caches are enabled and
+ * disabling it or any parent clocks will hang the system.
+ */
+ .flags = CLK_IS_CRITICAL,
.parents = { JZ4780_CLK_CPUMUX, -1, -1, -1 },
.div = { CGU_REG_CLOCKCONTROL, 4, 1, 4, -1, -1, -1 },
},
@@ -380,6 +390,11 @@ static const struct ingenic_cgu_clk_info jz4780_cgu_clocks[] = {
[JZ4780_CLK_DDR] = {
"ddr", CGU_CLK_MUX | CGU_CLK_DIV,
+ /*
+ * Disabling DDR clock or its parents will render DRAM
+ * inaccessible; mark it critical.
+ */
+ .flags = CLK_IS_CRITICAL,
.parents = { -1, JZ4780_CLK_SCLKA, JZ4780_CLK_MPLL, -1 },
.mux = { CGU_REG_DDRCDR, 30, 2 },
.div = { CGU_REG_DDRCDR, 0, 1, 4, 29, 28, 27 },
diff --git a/drivers/clk/ingenic/tcu.c b/drivers/clk/ingenic/tcu.c
index 77acfbeb4830..201bf6e6b6e0 100644
--- a/drivers/clk/ingenic/tcu.c
+++ b/drivers/clk/ingenic/tcu.c
@@ -31,6 +31,7 @@ struct ingenic_soc_info {
unsigned int num_channels;
bool has_ost;
bool has_tcu_clk;
+ bool allow_missing_tcu_clk;
};
struct ingenic_tcu_clk_info {
@@ -320,7 +321,8 @@ static const struct ingenic_soc_info jz4770_soc_info = {
static const struct ingenic_soc_info x1000_soc_info = {
.num_channels = 8,
.has_ost = false, /* X1000 has OST, but it not belong TCU */
- .has_tcu_clk = false,
+ .has_tcu_clk = true,
+ .allow_missing_tcu_clk = true,
};
static const struct of_device_id __maybe_unused ingenic_tcu_of_match[] __initconst = {
@@ -355,14 +357,27 @@ static int __init ingenic_tcu_probe(struct device_node *np)
tcu->clk = of_clk_get_by_name(np, "tcu");
if (IS_ERR(tcu->clk)) {
ret = PTR_ERR(tcu->clk);
- pr_crit("Cannot get TCU clock\n");
- goto err_free_tcu;
- }
- ret = clk_prepare_enable(tcu->clk);
- if (ret) {
- pr_crit("Unable to enable TCU clock\n");
- goto err_put_clk;
+ /*
+ * Old device trees for some SoCs did not include the
+ * TCU clock because this driver (incorrectly) didn't
+ * use it. In this case we complain loudly and attempt
+ * to continue without the clock, which might work if
+ * booting with workarounds like "clk_ignore_unused".
+ */
+ if (tcu->soc_info->allow_missing_tcu_clk && ret == -EINVAL) {
+ pr_warn("TCU clock missing from device tree, please update your device tree\n");
+ tcu->clk = NULL;
+ } else {
+ pr_crit("Cannot get TCU clock from device tree\n");
+ goto err_free_tcu;
+ }
+ } else {
+ ret = clk_prepare_enable(tcu->clk);
+ if (ret) {
+ pr_crit("Unable to enable TCU clock\n");
+ goto err_put_clk;
+ }
}
}
@@ -432,10 +447,10 @@ err_unregister_timer_clocks:
clk_hw_unregister(tcu->clocks->hws[i]);
kfree(tcu->clocks);
err_clk_disable:
- if (tcu->soc_info->has_tcu_clk)
+ if (tcu->clk)
clk_disable_unprepare(tcu->clk);
err_put_clk:
- if (tcu->soc_info->has_tcu_clk)
+ if (tcu->clk)
clk_put(tcu->clk);
err_free_tcu:
kfree(tcu);
diff --git a/drivers/clk/ingenic/x1000-cgu.c b/drivers/clk/ingenic/x1000-cgu.c
index 3c4d5a77ccbd..b2ce3fb83f54 100644
--- a/drivers/clk/ingenic/x1000-cgu.c
+++ b/drivers/clk/ingenic/x1000-cgu.c
@@ -251,6 +251,11 @@ static const struct ingenic_cgu_clk_info x1000_cgu_clocks[] = {
[X1000_CLK_CPU] = {
"cpu", CGU_CLK_DIV | CGU_CLK_GATE,
+ /*
+ * Disabling the CPU clock or any parent clocks will hang the
+ * system; mark it critical.
+ */
+ .flags = CLK_IS_CRITICAL,
.parents = { X1000_CLK_CPUMUX, -1, -1, -1 },
.div = { CGU_REG_CPCCR, 0, 1, 4, 22, -1, -1 },
.gate = { CGU_REG_CLKGR, 30 },
@@ -258,6 +263,11 @@ static const struct ingenic_cgu_clk_info x1000_cgu_clocks[] = {
[X1000_CLK_L2CACHE] = {
"l2cache", CGU_CLK_DIV,
+ /*
+ * The L2 cache clock is critical if caches are enabled and
+ * disabling it or any parent clocks will hang the system.
+ */
+ .flags = CLK_IS_CRITICAL,
.parents = { X1000_CLK_CPUMUX, -1, -1, -1 },
.div = { CGU_REG_CPCCR, 4, 1, 4, 22, -1, -1 },
},
@@ -290,6 +300,11 @@ static const struct ingenic_cgu_clk_info x1000_cgu_clocks[] = {
[X1000_CLK_DDR] = {
"ddr", CGU_CLK_MUX | CGU_CLK_DIV | CGU_CLK_GATE,
+ /*
+ * Disabling DDR clock or its parents will render DRAM
+ * inaccessible; mark it critical.
+ */
+ .flags = CLK_IS_CRITICAL,
.parents = { -1, X1000_CLK_SCLKA, X1000_CLK_MPLL, -1 },
.mux = { CGU_REG_DDRCDR, 30, 2 },
.div = { CGU_REG_DDRCDR, 0, 1, 4, 29, 28, 27 },
diff --git a/drivers/clk/ingenic/x1830-cgu.c b/drivers/clk/ingenic/x1830-cgu.c
index e01ec2dc7a1a..0fd46e50a513 100644
--- a/drivers/clk/ingenic/x1830-cgu.c
+++ b/drivers/clk/ingenic/x1830-cgu.c
@@ -225,6 +225,7 @@ static const struct ingenic_cgu_clk_info x1830_cgu_clocks[] = {
[X1830_CLK_CPU] = {
"cpu", CGU_CLK_DIV | CGU_CLK_GATE,
+ .flags = CLK_IS_CRITICAL,
.parents = { X1830_CLK_CPUMUX, -1, -1, -1 },
.div = { CGU_REG_CPCCR, 0, 1, 4, 22, -1, -1 },
.gate = { CGU_REG_CLKGR1, 15 },
@@ -232,6 +233,11 @@ static const struct ingenic_cgu_clk_info x1830_cgu_clocks[] = {
[X1830_CLK_L2CACHE] = {
"l2cache", CGU_CLK_DIV,
+ /*
+ * The L2 cache clock is critical if caches are enabled and
+ * disabling it or any parent clocks will hang the system.
+ */
+ .flags = CLK_IS_CRITICAL,
.parents = { X1830_CLK_CPUMUX, -1, -1, -1 },
.div = { CGU_REG_CPCCR, 4, 1, 4, 22, -1, -1 },
},
@@ -264,6 +270,11 @@ static const struct ingenic_cgu_clk_info x1830_cgu_clocks[] = {
[X1830_CLK_DDR] = {
"ddr", CGU_CLK_MUX | CGU_CLK_DIV | CGU_CLK_GATE,
+ /*
+ * Disabling DDR clock or its parents will render DRAM
+ * inaccessible; mark it critical.
+ */
+ .flags = CLK_IS_CRITICAL,
.parents = { -1, X1830_CLK_SCLKA, X1830_CLK_MPLL, -1 },
.mux = { CGU_REG_DDRCDR, 30, 2 },
.div = { CGU_REG_DDRCDR, 0, 1, 4, 29, 28, 27 },
diff --git a/drivers/clk/keystone/syscon-clk.c b/drivers/clk/keystone/syscon-clk.c
index aae1a4076281..19198325b909 100644
--- a/drivers/clk/keystone/syscon-clk.c
+++ b/drivers/clk/keystone/syscon-clk.c
@@ -162,6 +162,13 @@ static const struct ti_syscon_gate_clk_data am64_clk_data[] = {
{ /* Sentinel */ },
};
+static const struct ti_syscon_gate_clk_data am62_clk_data[] = {
+ TI_SYSCON_CLK_GATE("epwm_tbclk0", 0x0, 0),
+ TI_SYSCON_CLK_GATE("epwm_tbclk1", 0x0, 1),
+ TI_SYSCON_CLK_GATE("epwm_tbclk2", 0x0, 2),
+ { /* Sentinel */ },
+};
+
static const struct of_device_id ti_syscon_gate_clk_ids[] = {
{
.compatible = "ti,am654-ehrpwm-tbclk",
@@ -171,6 +178,10 @@ static const struct of_device_id ti_syscon_gate_clk_ids[] = {
.compatible = "ti,am64-epwm-tbclk",
.data = &am64_clk_data,
},
+ {
+ .compatible = "ti,am62-epwm-tbclk",
+ .data = &am62_clk_data,
+ },
{ }
};
MODULE_DEVICE_TABLE(of, ti_syscon_gate_clk_ids);
diff --git a/drivers/clk/mediatek/Kconfig b/drivers/clk/mediatek/Kconfig
index 01ef02c54725..d5936cfb3bee 100644
--- a/drivers/clk/mediatek/Kconfig
+++ b/drivers/clk/mediatek/Kconfig
@@ -512,6 +512,14 @@ config COMMON_CLK_MT8183_VENCSYS
help
This driver supports MediaTek MT8183 vencsys clocks.
+config COMMON_CLK_MT8186
+ bool "Clock driver for MediaTek MT8186"
+ depends on ARM64 || COMPILE_TEST
+ select COMMON_CLK_MEDIATEK
+ default ARCH_MEDIATEK
+ help
+ This driver supports MediaTek MT8186 clocks.
+
config COMMON_CLK_MT8192
bool "Clock driver for MediaTek MT8192"
depends on ARM64 || COMPILE_TEST
diff --git a/drivers/clk/mediatek/Makefile b/drivers/clk/mediatek/Makefile
index 7b0c2646ce4a..caf2ce93d666 100644
--- a/drivers/clk/mediatek/Makefile
+++ b/drivers/clk/mediatek/Makefile
@@ -71,6 +71,11 @@ obj-$(CONFIG_COMMON_CLK_MT8183_MFGCFG) += clk-mt8183-mfgcfg.o
obj-$(CONFIG_COMMON_CLK_MT8183_MMSYS) += clk-mt8183-mm.o
obj-$(CONFIG_COMMON_CLK_MT8183_VDECSYS) += clk-mt8183-vdec.o
obj-$(CONFIG_COMMON_CLK_MT8183_VENCSYS) += clk-mt8183-venc.o
+obj-$(CONFIG_COMMON_CLK_MT8186) += clk-mt8186-mcu.o clk-mt8186-topckgen.o clk-mt8186-infra_ao.o \
+ clk-mt8186-apmixedsys.o clk-mt8186-imp_iic_wrap.o \
+ clk-mt8186-mfg.o clk-mt8186-mm.o clk-mt8186-wpe.o \
+ clk-mt8186-img.o clk-mt8186-vdec.o clk-mt8186-venc.o \
+ clk-mt8186-cam.o clk-mt8186-mdp.o clk-mt8186-ipe.o
obj-$(CONFIG_COMMON_CLK_MT8192) += clk-mt8192.o
obj-$(CONFIG_COMMON_CLK_MT8192_AUDSYS) += clk-mt8192-aud.o
obj-$(CONFIG_COMMON_CLK_MT8192_CAMSYS) += clk-mt8192-cam.o
diff --git a/drivers/clk/mediatek/clk-apmixed.c b/drivers/clk/mediatek/clk-apmixed.c
index a29339cc26c4..fc3d4146f482 100644
--- a/drivers/clk/mediatek/clk-apmixed.c
+++ b/drivers/clk/mediatek/clk-apmixed.c
@@ -70,12 +70,12 @@ static const struct clk_ops mtk_ref2usb_tx_ops = {
.unprepare = mtk_ref2usb_tx_unprepare,
};
-struct clk * __init mtk_clk_register_ref2usb_tx(const char *name,
+struct clk_hw * __init mtk_clk_register_ref2usb_tx(const char *name,
const char *parent_name, void __iomem *reg)
{
struct mtk_ref2usb_tx *tx;
struct clk_init_data init = {};
- struct clk *clk;
+ int ret;
tx = kzalloc(sizeof(*tx), GFP_KERNEL);
if (!tx)
@@ -89,14 +89,14 @@ struct clk * __init mtk_clk_register_ref2usb_tx(const char *name,
init.parent_names = &parent_name;
init.num_parents = 1;
- clk = clk_register(NULL, &tx->hw);
+ ret = clk_hw_register(NULL, &tx->hw);
- if (IS_ERR(clk)) {
- pr_err("Failed to register clk %s: %pe\n", name, clk);
+ if (ret) {
kfree(tx);
+ return ERR_PTR(ret);
}
- return clk;
+ return &tx->hw;
}
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-cpumux.c b/drivers/clk/mediatek/clk-cpumux.c
index c11b3fae622e..2b5d48591738 100644
--- a/drivers/clk/mediatek/clk-cpumux.c
+++ b/drivers/clk/mediatek/clk-cpumux.c
@@ -57,12 +57,12 @@ static const struct clk_ops clk_cpumux_ops = {
.set_parent = clk_cpumux_set_parent,
};
-static struct clk *
+static struct clk_hw *
mtk_clk_register_cpumux(const struct mtk_composite *mux,
struct regmap *regmap)
{
struct mtk_clk_cpumux *cpumux;
- struct clk *clk;
+ int ret;
struct clk_init_data init;
cpumux = kzalloc(sizeof(*cpumux), GFP_KERNEL);
@@ -81,34 +81,33 @@ mtk_clk_register_cpumux(const struct mtk_composite *mux,
cpumux->regmap = regmap;
cpumux->hw.init = &init;
- clk = clk_register(NULL, &cpumux->hw);
- if (IS_ERR(clk))
+ ret = clk_hw_register(NULL, &cpumux->hw);
+ if (ret) {
kfree(cpumux);
+ return ERR_PTR(ret);
+ }
- return clk;
+ return &cpumux->hw;
}
-static void mtk_clk_unregister_cpumux(struct clk *clk)
+static void mtk_clk_unregister_cpumux(struct clk_hw *hw)
{
struct mtk_clk_cpumux *cpumux;
- struct clk_hw *hw;
-
- hw = __clk_get_hw(clk);
if (!hw)
return;
cpumux = to_mtk_clk_cpumux(hw);
- clk_unregister(clk);
+ clk_hw_unregister(hw);
kfree(cpumux);
}
int mtk_clk_register_cpumuxes(struct device_node *node,
const struct mtk_composite *clks, int num,
- struct clk_onecell_data *clk_data)
+ struct clk_hw_onecell_data *clk_data)
{
int i;
- struct clk *clk;
+ struct clk_hw *hw;
struct regmap *regmap;
regmap = device_node_to_regmap(node);
@@ -120,19 +119,20 @@ int mtk_clk_register_cpumuxes(struct device_node *node,
for (i = 0; i < num; i++) {
const struct mtk_composite *mux = &clks[i];
- if (!IS_ERR_OR_NULL(clk_data->clks[mux->id])) {
+ if (!IS_ERR_OR_NULL(clk_data->hws[mux->id])) {
pr_warn("%pOF: Trying to register duplicate clock ID: %d\n",
node, mux->id);
continue;
}
- clk = mtk_clk_register_cpumux(mux, regmap);
- if (IS_ERR(clk)) {
- pr_err("Failed to register clk %s: %pe\n", mux->name, clk);
+ hw = mtk_clk_register_cpumux(mux, regmap);
+ if (IS_ERR(hw)) {
+ pr_err("Failed to register clk %s: %pe\n", mux->name,
+ hw);
goto err;
}
- clk_data->clks[mux->id] = clk;
+ clk_data->hws[mux->id] = hw;
}
return 0;
@@ -141,29 +141,29 @@ err:
while (--i >= 0) {
const struct mtk_composite *mux = &clks[i];
- if (IS_ERR_OR_NULL(clk_data->clks[mux->id]))
+ if (IS_ERR_OR_NULL(clk_data->hws[mux->id]))
continue;
- mtk_clk_unregister_cpumux(clk_data->clks[mux->id]);
- clk_data->clks[mux->id] = ERR_PTR(-ENOENT);
+ mtk_clk_unregister_cpumux(clk_data->hws[mux->id]);
+ clk_data->hws[mux->id] = ERR_PTR(-ENOENT);
}
- return PTR_ERR(clk);
+ return PTR_ERR(hw);
}
void mtk_clk_unregister_cpumuxes(const struct mtk_composite *clks, int num,
- struct clk_onecell_data *clk_data)
+ struct clk_hw_onecell_data *clk_data)
{
int i;
for (i = num; i > 0; i--) {
const struct mtk_composite *mux = &clks[i - 1];
- if (IS_ERR_OR_NULL(clk_data->clks[mux->id]))
+ if (IS_ERR_OR_NULL(clk_data->hws[mux->id]))
continue;
- mtk_clk_unregister_cpumux(clk_data->clks[mux->id]);
- clk_data->clks[mux->id] = ERR_PTR(-ENOENT);
+ mtk_clk_unregister_cpumux(clk_data->hws[mux->id]);
+ clk_data->hws[mux->id] = ERR_PTR(-ENOENT);
}
}
diff --git a/drivers/clk/mediatek/clk-cpumux.h b/drivers/clk/mediatek/clk-cpumux.h
index b07e89f7c283..325adbef25d1 100644
--- a/drivers/clk/mediatek/clk-cpumux.h
+++ b/drivers/clk/mediatek/clk-cpumux.h
@@ -7,15 +7,15 @@
#ifndef __DRV_CLK_CPUMUX_H
#define __DRV_CLK_CPUMUX_H
-struct clk_onecell_data;
+struct clk_hw_onecell_data;
struct device_node;
struct mtk_composite;
int mtk_clk_register_cpumuxes(struct device_node *node,
const struct mtk_composite *clks, int num,
- struct clk_onecell_data *clk_data);
+ struct clk_hw_onecell_data *clk_data);
void mtk_clk_unregister_cpumuxes(const struct mtk_composite *clks, int num,
- struct clk_onecell_data *clk_data);
+ struct clk_hw_onecell_data *clk_data);
#endif /* __DRV_CLK_CPUMUX_H */
diff --git a/drivers/clk/mediatek/clk-gate.c b/drivers/clk/mediatek/clk-gate.c
index da52023f8455..421806236228 100644
--- a/drivers/clk/mediatek/clk-gate.c
+++ b/drivers/clk/mediatek/clk-gate.c
@@ -152,7 +152,7 @@ const struct clk_ops mtk_clk_gate_ops_no_setclr_inv = {
};
EXPORT_SYMBOL_GPL(mtk_clk_gate_ops_no_setclr_inv);
-static struct clk *mtk_clk_register_gate(const char *name,
+static struct clk_hw *mtk_clk_register_gate(const char *name,
const char *parent_name,
struct regmap *regmap, int set_ofs,
int clr_ofs, int sta_ofs, u8 bit,
@@ -160,7 +160,7 @@ static struct clk *mtk_clk_register_gate(const char *name,
unsigned long flags, struct device *dev)
{
struct mtk_clk_gate *cg;
- struct clk *clk;
+ int ret;
struct clk_init_data init = {};
cg = kzalloc(sizeof(*cg), GFP_KERNEL);
@@ -181,35 +181,34 @@ static struct clk *mtk_clk_register_gate(const char *name,
cg->hw.init = &init;
- clk = clk_register(dev, &cg->hw);
- if (IS_ERR(clk))
+ ret = clk_hw_register(dev, &cg->hw);
+ if (ret) {
kfree(cg);
+ return ERR_PTR(ret);
+ }
- return clk;
+ return &cg->hw;
}
-static void mtk_clk_unregister_gate(struct clk *clk)
+static void mtk_clk_unregister_gate(struct clk_hw *hw)
{
struct mtk_clk_gate *cg;
- struct clk_hw *hw;
-
- hw = __clk_get_hw(clk);
if (!hw)
return;
cg = to_mtk_clk_gate(hw);
- clk_unregister(clk);
+ clk_hw_unregister(hw);
kfree(cg);
}
int mtk_clk_register_gates_with_dev(struct device_node *node,
const struct mtk_gate *clks, int num,
- struct clk_onecell_data *clk_data,
+ struct clk_hw_onecell_data *clk_data,
struct device *dev)
{
int i;
- struct clk *clk;
+ struct clk_hw *hw;
struct regmap *regmap;
if (!clk_data)
@@ -224,13 +223,13 @@ int mtk_clk_register_gates_with_dev(struct device_node *node,
for (i = 0; i < num; i++) {
const struct mtk_gate *gate = &clks[i];
- if (!IS_ERR_OR_NULL(clk_data->clks[gate->id])) {
+ if (!IS_ERR_OR_NULL(clk_data->hws[gate->id])) {
pr_warn("%pOF: Trying to register duplicate clock ID: %d\n",
node, gate->id);
continue;
}
- clk = mtk_clk_register_gate(gate->name, gate->parent_name,
+ hw = mtk_clk_register_gate(gate->name, gate->parent_name,
regmap,
gate->regs->set_ofs,
gate->regs->clr_ofs,
@@ -238,12 +237,13 @@ int mtk_clk_register_gates_with_dev(struct device_node *node,
gate->shift, gate->ops,
gate->flags, dev);
- if (IS_ERR(clk)) {
- pr_err("Failed to register clk %s: %pe\n", gate->name, clk);
+ if (IS_ERR(hw)) {
+ pr_err("Failed to register clk %s: %pe\n", gate->name,
+ hw);
goto err;
}
- clk_data->clks[gate->id] = clk;
+ clk_data->hws[gate->id] = hw;
}
return 0;
@@ -252,26 +252,26 @@ err:
while (--i >= 0) {
const struct mtk_gate *gate = &clks[i];
- if (IS_ERR_OR_NULL(clk_data->clks[gate->id]))
+ if (IS_ERR_OR_NULL(clk_data->hws[gate->id]))
continue;
- mtk_clk_unregister_gate(clk_data->clks[gate->id]);
- clk_data->clks[gate->id] = ERR_PTR(-ENOENT);
+ mtk_clk_unregister_gate(clk_data->hws[gate->id]);
+ clk_data->hws[gate->id] = ERR_PTR(-ENOENT);
}
- return PTR_ERR(clk);
+ return PTR_ERR(hw);
}
int mtk_clk_register_gates(struct device_node *node,
const struct mtk_gate *clks, int num,
- struct clk_onecell_data *clk_data)
+ struct clk_hw_onecell_data *clk_data)
{
return mtk_clk_register_gates_with_dev(node, clks, num, clk_data, NULL);
}
EXPORT_SYMBOL_GPL(mtk_clk_register_gates);
void mtk_clk_unregister_gates(const struct mtk_gate *clks, int num,
- struct clk_onecell_data *clk_data)
+ struct clk_hw_onecell_data *clk_data)
{
int i;
@@ -281,11 +281,11 @@ void mtk_clk_unregister_gates(const struct mtk_gate *clks, int num,
for (i = num; i > 0; i--) {
const struct mtk_gate *gate = &clks[i - 1];
- if (IS_ERR_OR_NULL(clk_data->clks[gate->id]))
+ if (IS_ERR_OR_NULL(clk_data->hws[gate->id]))
continue;
- mtk_clk_unregister_gate(clk_data->clks[gate->id]);
- clk_data->clks[gate->id] = ERR_PTR(-ENOENT);
+ mtk_clk_unregister_gate(clk_data->hws[gate->id]);
+ clk_data->hws[gate->id] = ERR_PTR(-ENOENT);
}
}
EXPORT_SYMBOL_GPL(mtk_clk_unregister_gates);
diff --git a/drivers/clk/mediatek/clk-gate.h b/drivers/clk/mediatek/clk-gate.h
index 6b5738826a22..d9897ef53528 100644
--- a/drivers/clk/mediatek/clk-gate.h
+++ b/drivers/clk/mediatek/clk-gate.h
@@ -10,7 +10,7 @@
#include <linux/types.h>
struct clk;
-struct clk_onecell_data;
+struct clk_hw_onecell_data;
struct clk_ops;
struct device;
struct device_node;
@@ -52,14 +52,14 @@ struct mtk_gate {
int mtk_clk_register_gates(struct device_node *node,
const struct mtk_gate *clks, int num,
- struct clk_onecell_data *clk_data);
+ struct clk_hw_onecell_data *clk_data);
int mtk_clk_register_gates_with_dev(struct device_node *node,
const struct mtk_gate *clks, int num,
- struct clk_onecell_data *clk_data,
+ struct clk_hw_onecell_data *clk_data,
struct device *dev);
void mtk_clk_unregister_gates(const struct mtk_gate *clks, int num,
- struct clk_onecell_data *clk_data);
+ struct clk_hw_onecell_data *clk_data);
#endif /* __DRV_CLK_GATE_H */
diff --git a/drivers/clk/mediatek/clk-mt2701-aud.c b/drivers/clk/mediatek/clk-mt2701-aud.c
index e66896a44fad..6ba398eb7df9 100644
--- a/drivers/clk/mediatek/clk-mt2701-aud.c
+++ b/drivers/clk/mediatek/clk-mt2701-aud.c
@@ -145,7 +145,7 @@ static const struct of_device_id of_match_clk_mt2701_aud[] = {
static int clk_mt2701_aud_probe(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
struct device_node *node = pdev->dev.of_node;
int r;
@@ -154,7 +154,7 @@ static int clk_mt2701_aud_probe(struct platform_device *pdev)
mtk_clk_register_gates(node, audio_clks, ARRAY_SIZE(audio_clks),
clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r) {
dev_err(&pdev->dev,
"could not register clock provider: %s: %d\n",
diff --git a/drivers/clk/mediatek/clk-mt2701-bdp.c b/drivers/clk/mediatek/clk-mt2701-bdp.c
index ffa09cfbfd51..662a8ab3fbb1 100644
--- a/drivers/clk/mediatek/clk-mt2701-bdp.c
+++ b/drivers/clk/mediatek/clk-mt2701-bdp.c
@@ -101,7 +101,7 @@ static const struct of_device_id of_match_clk_mt2701_bdp[] = {
static int clk_mt2701_bdp_probe(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
int r;
struct device_node *node = pdev->dev.of_node;
@@ -110,7 +110,7 @@ static int clk_mt2701_bdp_probe(struct platform_device *pdev)
mtk_clk_register_gates(node, bdp_clks, ARRAY_SIZE(bdp_clks),
clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
dev_err(&pdev->dev,
"could not register clock provider: %s: %d\n",
diff --git a/drivers/clk/mediatek/clk-mt2701-eth.c b/drivers/clk/mediatek/clk-mt2701-eth.c
index 100ff6ca609e..47c2289f3d1d 100644
--- a/drivers/clk/mediatek/clk-mt2701-eth.c
+++ b/drivers/clk/mediatek/clk-mt2701-eth.c
@@ -43,7 +43,7 @@ static const struct of_device_id of_match_clk_mt2701_eth[] = {
static int clk_mt2701_eth_probe(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
int r;
struct device_node *node = pdev->dev.of_node;
@@ -52,7 +52,7 @@ static int clk_mt2701_eth_probe(struct platform_device *pdev)
mtk_clk_register_gates(node, eth_clks, ARRAY_SIZE(eth_clks),
clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
dev_err(&pdev->dev,
"could not register clock provider: %s: %d\n",
diff --git a/drivers/clk/mediatek/clk-mt2701-g3d.c b/drivers/clk/mediatek/clk-mt2701-g3d.c
index 1328c112a38f..79929ed37f83 100644
--- a/drivers/clk/mediatek/clk-mt2701-g3d.c
+++ b/drivers/clk/mediatek/clk-mt2701-g3d.c
@@ -37,7 +37,7 @@ static const struct mtk_gate g3d_clks[] = {
static int clk_mt2701_g3dsys_init(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
struct device_node *node = pdev->dev.of_node;
int r;
@@ -46,7 +46,7 @@ static int clk_mt2701_g3dsys_init(struct platform_device *pdev)
mtk_clk_register_gates(node, g3d_clks, ARRAY_SIZE(g3d_clks),
clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
dev_err(&pdev->dev,
"could not register clock provider: %s: %d\n",
diff --git a/drivers/clk/mediatek/clk-mt2701-hif.c b/drivers/clk/mediatek/clk-mt2701-hif.c
index 61444881c539..1aa36cb93ad0 100644
--- a/drivers/clk/mediatek/clk-mt2701-hif.c
+++ b/drivers/clk/mediatek/clk-mt2701-hif.c
@@ -40,7 +40,7 @@ static const struct of_device_id of_match_clk_mt2701_hif[] = {
static int clk_mt2701_hif_probe(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
int r;
struct device_node *node = pdev->dev.of_node;
@@ -49,7 +49,7 @@ static int clk_mt2701_hif_probe(struct platform_device *pdev)
mtk_clk_register_gates(node, hif_clks, ARRAY_SIZE(hif_clks),
clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r) {
dev_err(&pdev->dev,
"could not register clock provider: %s: %d\n",
diff --git a/drivers/clk/mediatek/clk-mt2701-img.c b/drivers/clk/mediatek/clk-mt2701-img.c
index 631e80f0fc7d..c4f3cd26df60 100644
--- a/drivers/clk/mediatek/clk-mt2701-img.c
+++ b/drivers/clk/mediatek/clk-mt2701-img.c
@@ -43,7 +43,7 @@ static const struct of_device_id of_match_clk_mt2701_img[] = {
static int clk_mt2701_img_probe(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
int r;
struct device_node *node = pdev->dev.of_node;
@@ -52,7 +52,7 @@ static int clk_mt2701_img_probe(struct platform_device *pdev)
mtk_clk_register_gates(node, img_clks, ARRAY_SIZE(img_clks),
clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
dev_err(&pdev->dev,
"could not register clock provider: %s: %d\n",
diff --git a/drivers/clk/mediatek/clk-mt2701-mm.c b/drivers/clk/mediatek/clk-mt2701-mm.c
index cb18e1849492..9ea7abad99d2 100644
--- a/drivers/clk/mediatek/clk-mt2701-mm.c
+++ b/drivers/clk/mediatek/clk-mt2701-mm.c
@@ -83,7 +83,7 @@ static int clk_mt2701_mm_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *node = dev->parent->of_node;
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
int r;
clk_data = mtk_alloc_clk_data(CLK_MM_NR);
@@ -91,7 +91,7 @@ static int clk_mt2701_mm_probe(struct platform_device *pdev)
mtk_clk_register_gates(node, mm_clks, ARRAY_SIZE(mm_clks),
clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
dev_err(&pdev->dev,
"could not register clock provider: %s: %d\n",
diff --git a/drivers/clk/mediatek/clk-mt2701-vdec.c b/drivers/clk/mediatek/clk-mt2701-vdec.c
index c9def728ad1e..a2f18117f27a 100644
--- a/drivers/clk/mediatek/clk-mt2701-vdec.c
+++ b/drivers/clk/mediatek/clk-mt2701-vdec.c
@@ -54,7 +54,7 @@ static const struct of_device_id of_match_clk_mt2701_vdec[] = {
static int clk_mt2701_vdec_probe(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
int r;
struct device_node *node = pdev->dev.of_node;
@@ -63,7 +63,7 @@ static int clk_mt2701_vdec_probe(struct platform_device *pdev)
mtk_clk_register_gates(node, vdec_clks, ARRAY_SIZE(vdec_clks),
clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
dev_err(&pdev->dev,
"could not register clock provider: %s: %d\n",
diff --git a/drivers/clk/mediatek/clk-mt2701.c b/drivers/clk/mediatek/clk-mt2701.c
index 1eb3e4563c3f..04ba356db2d7 100644
--- a/drivers/clk/mediatek/clk-mt2701.c
+++ b/drivers/clk/mediatek/clk-mt2701.c
@@ -666,7 +666,7 @@ static const struct mtk_gate top_clks[] = {
static int mtk_topckgen_init(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
void __iomem *base;
struct device_node *node = pdev->dev.of_node;
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -692,7 +692,7 @@ static int mtk_topckgen_init(struct platform_device *pdev)
mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks),
clk_data);
- return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
}
static const struct mtk_gate_regs infra_cg_regs = {
@@ -735,7 +735,7 @@ static const struct mtk_fixed_factor infra_fixed_divs[] = {
FACTOR(CLK_INFRA_CLK_13M, "clk13m", "clk26m", 1, 2),
};
-static struct clk_onecell_data *infra_clk_data;
+static struct clk_hw_onecell_data *infra_clk_data;
static void __init mtk_infrasys_init_early(struct device_node *node)
{
@@ -745,7 +745,7 @@ static void __init mtk_infrasys_init_early(struct device_node *node)
infra_clk_data = mtk_alloc_clk_data(CLK_INFRA_NR);
for (i = 0; i < CLK_INFRA_NR; i++)
- infra_clk_data->clks[i] = ERR_PTR(-EPROBE_DEFER);
+ infra_clk_data->hws[i] = ERR_PTR(-EPROBE_DEFER);
}
mtk_clk_register_factors(infra_fixed_divs, ARRAY_SIZE(infra_fixed_divs),
@@ -754,7 +754,8 @@ static void __init mtk_infrasys_init_early(struct device_node *node)
mtk_clk_register_cpumuxes(node, cpu_muxes, ARRAY_SIZE(cpu_muxes),
infra_clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, infra_clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get,
+ infra_clk_data);
if (r)
pr_err("%s(): could not register clock provider: %d\n",
__func__, r);
@@ -771,8 +772,8 @@ static int mtk_infrasys_init(struct platform_device *pdev)
infra_clk_data = mtk_alloc_clk_data(CLK_INFRA_NR);
} else {
for (i = 0; i < CLK_INFRA_NR; i++) {
- if (infra_clk_data->clks[i] == ERR_PTR(-EPROBE_DEFER))
- infra_clk_data->clks[i] = ERR_PTR(-ENOENT);
+ if (infra_clk_data->hws[i] == ERR_PTR(-EPROBE_DEFER))
+ infra_clk_data->hws[i] = ERR_PTR(-ENOENT);
}
}
@@ -781,7 +782,8 @@ static int mtk_infrasys_init(struct platform_device *pdev)
mtk_clk_register_factors(infra_fixed_divs, ARRAY_SIZE(infra_fixed_divs),
infra_clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, infra_clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get,
+ infra_clk_data);
if (r)
return r;
@@ -886,7 +888,7 @@ static const struct mtk_composite peri_muxs[] = {
static int mtk_pericfg_init(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
void __iomem *base;
int r;
struct device_node *node = pdev->dev.of_node;
@@ -904,7 +906,7 @@ static int mtk_pericfg_init(struct platform_device *pdev)
mtk_clk_register_composites(peri_muxs, ARRAY_SIZE(peri_muxs), base,
&mt2701_clk_lock, clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
return r;
@@ -935,13 +937,13 @@ static int mtk_pericfg_init(struct platform_device *pdev)
}
static const struct mtk_pll_data apmixed_plls[] = {
- PLL(CLK_APMIXED_ARMPLL, "armpll", 0x200, 0x20c, 0x80000001,
+ PLL(CLK_APMIXED_ARMPLL, "armpll", 0x200, 0x20c, 0x80000000,
PLL_AO, 21, 0x204, 24, 0x0, 0x204, 0),
- PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x210, 0x21c, 0xf0000001,
+ PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x210, 0x21c, 0xf0000000,
HAVE_RST_BAR, 21, 0x210, 4, 0x0, 0x214, 0),
- PLL(CLK_APMIXED_UNIVPLL, "univpll", 0x220, 0x22c, 0xf3000001,
+ PLL(CLK_APMIXED_UNIVPLL, "univpll", 0x220, 0x22c, 0xf3000000,
HAVE_RST_BAR, 7, 0x220, 4, 0x0, 0x224, 14),
- PLL(CLK_APMIXED_MMPLL, "mmpll", 0x230, 0x23c, 0x00000001, 0,
+ PLL(CLK_APMIXED_MMPLL, "mmpll", 0x230, 0x23c, 0, 0,
21, 0x230, 4, 0x0, 0x234, 0),
PLL(CLK_APMIXED_MSDCPLL, "msdcpll", 0x240, 0x24c, 0x00000001, 0,
21, 0x240, 4, 0x0, 0x244, 0),
@@ -969,7 +971,7 @@ static const struct mtk_fixed_factor apmixed_fixed_divs[] = {
static int mtk_apmixedsys_init(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
struct device_node *node = pdev->dev.of_node;
clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR);
@@ -981,7 +983,7 @@ static int mtk_apmixedsys_init(struct platform_device *pdev)
mtk_clk_register_factors(apmixed_fixed_divs, ARRAY_SIZE(apmixed_fixed_divs),
clk_data);
- return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
}
static const struct of_device_id of_match_clk_mt2701[] = {
diff --git a/drivers/clk/mediatek/clk-mt2712-bdp.c b/drivers/clk/mediatek/clk-mt2712-bdp.c
index a200714001d8..9acab4357133 100644
--- a/drivers/clk/mediatek/clk-mt2712-bdp.c
+++ b/drivers/clk/mediatek/clk-mt2712-bdp.c
@@ -60,7 +60,7 @@ static const struct mtk_gate bdp_clks[] = {
static int clk_mt2712_bdp_probe(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
int r;
struct device_node *node = pdev->dev.of_node;
@@ -69,7 +69,7 @@ static int clk_mt2712_bdp_probe(struct platform_device *pdev)
mtk_clk_register_gates(node, bdp_clks, ARRAY_SIZE(bdp_clks),
clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r != 0)
pr_err("%s(): could not register clock provider: %d\n",
diff --git a/drivers/clk/mediatek/clk-mt2712-img.c b/drivers/clk/mediatek/clk-mt2712-img.c
index 89b2a7197b02..5cc143e65e42 100644
--- a/drivers/clk/mediatek/clk-mt2712-img.c
+++ b/drivers/clk/mediatek/clk-mt2712-img.c
@@ -38,7 +38,7 @@ static const struct mtk_gate img_clks[] = {
static int clk_mt2712_img_probe(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
int r;
struct device_node *node = pdev->dev.of_node;
@@ -47,7 +47,7 @@ static int clk_mt2712_img_probe(struct platform_device *pdev)
mtk_clk_register_gates(node, img_clks, ARRAY_SIZE(img_clks),
clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r != 0)
pr_err("%s(): could not register clock provider: %d\n",
diff --git a/drivers/clk/mediatek/clk-mt2712-jpgdec.c b/drivers/clk/mediatek/clk-mt2712-jpgdec.c
index 58813c38ab4d..31fc30370d98 100644
--- a/drivers/clk/mediatek/clk-mt2712-jpgdec.c
+++ b/drivers/clk/mediatek/clk-mt2712-jpgdec.c
@@ -34,7 +34,7 @@ static const struct mtk_gate jpgdec_clks[] = {
static int clk_mt2712_jpgdec_probe(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
int r;
struct device_node *node = pdev->dev.of_node;
@@ -43,7 +43,7 @@ static int clk_mt2712_jpgdec_probe(struct platform_device *pdev)
mtk_clk_register_gates(node, jpgdec_clks, ARRAY_SIZE(jpgdec_clks),
clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r != 0)
pr_err("%s(): could not register clock provider: %d\n",
diff --git a/drivers/clk/mediatek/clk-mt2712-mfg.c b/drivers/clk/mediatek/clk-mt2712-mfg.c
index a6b827db17bc..a4d09675bf18 100644
--- a/drivers/clk/mediatek/clk-mt2712-mfg.c
+++ b/drivers/clk/mediatek/clk-mt2712-mfg.c
@@ -33,7 +33,7 @@ static const struct mtk_gate mfg_clks[] = {
static int clk_mt2712_mfg_probe(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
int r;
struct device_node *node = pdev->dev.of_node;
@@ -42,7 +42,7 @@ static int clk_mt2712_mfg_probe(struct platform_device *pdev)
mtk_clk_register_gates(node, mfg_clks, ARRAY_SIZE(mfg_clks),
clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r != 0)
pr_err("%s(): could not register clock provider: %d\n",
diff --git a/drivers/clk/mediatek/clk-mt2712-mm.c b/drivers/clk/mediatek/clk-mt2712-mm.c
index 5519c3d68c1f..7d44b09b8a0a 100644
--- a/drivers/clk/mediatek/clk-mt2712-mm.c
+++ b/drivers/clk/mediatek/clk-mt2712-mm.c
@@ -130,7 +130,7 @@ static int clk_mt2712_mm_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *node = dev->parent->of_node;
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
int r;
clk_data = mtk_alloc_clk_data(CLK_MM_NR_CLK);
@@ -138,7 +138,7 @@ static int clk_mt2712_mm_probe(struct platform_device *pdev)
mtk_clk_register_gates(node, mm_clks, ARRAY_SIZE(mm_clks),
clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r != 0)
pr_err("%s(): could not register clock provider: %d\n",
diff --git a/drivers/clk/mediatek/clk-mt2712-vdec.c b/drivers/clk/mediatek/clk-mt2712-vdec.c
index 4987ad9d3b11..af13f43dd831 100644
--- a/drivers/clk/mediatek/clk-mt2712-vdec.c
+++ b/drivers/clk/mediatek/clk-mt2712-vdec.c
@@ -52,7 +52,7 @@ static const struct mtk_gate vdec_clks[] = {
static int clk_mt2712_vdec_probe(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
int r;
struct device_node *node = pdev->dev.of_node;
@@ -61,7 +61,7 @@ static int clk_mt2712_vdec_probe(struct platform_device *pdev)
mtk_clk_register_gates(node, vdec_clks, ARRAY_SIZE(vdec_clks),
clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r != 0)
pr_err("%s(): could not register clock provider: %d\n",
diff --git a/drivers/clk/mediatek/clk-mt2712-venc.c b/drivers/clk/mediatek/clk-mt2712-venc.c
index 07c29daa1ad6..abc08a029753 100644
--- a/drivers/clk/mediatek/clk-mt2712-venc.c
+++ b/drivers/clk/mediatek/clk-mt2712-venc.c
@@ -35,7 +35,7 @@ static const struct mtk_gate venc_clks[] = {
static int clk_mt2712_venc_probe(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
int r;
struct device_node *node = pdev->dev.of_node;
@@ -44,7 +44,7 @@ static int clk_mt2712_venc_probe(struct platform_device *pdev)
mtk_clk_register_gates(node, venc_clks, ARRAY_SIZE(venc_clks),
clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r != 0)
pr_err("%s(): could not register clock provider: %d\n",
diff --git a/drivers/clk/mediatek/clk-mt2712.c b/drivers/clk/mediatek/clk-mt2712.c
index ff72b9ab945b..410b059727ea 100644
--- a/drivers/clk/mediatek/clk-mt2712.c
+++ b/drivers/clk/mediatek/clk-mt2712.c
@@ -1223,44 +1223,44 @@ static const struct mtk_pll_div_table mmpll_div_table[] = {
};
static const struct mtk_pll_data plls[] = {
- PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x0230, 0x023C, 0xf0000101,
+ PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x0230, 0x023C, 0xf0000100,
HAVE_RST_BAR, 31, 0x0230, 4, 0, 0, 0, 0x0234, 0),
- PLL(CLK_APMIXED_UNIVPLL, "univpll", 0x0240, 0x024C, 0xfe000101,
+ PLL(CLK_APMIXED_UNIVPLL, "univpll", 0x0240, 0x024C, 0xfe000100,
HAVE_RST_BAR, 31, 0x0240, 4, 0, 0, 0, 0x0244, 0),
- PLL(CLK_APMIXED_VCODECPLL, "vcodecpll", 0x0320, 0x032C, 0xc0000101,
+ PLL(CLK_APMIXED_VCODECPLL, "vcodecpll", 0x0320, 0x032C, 0xc0000100,
0, 31, 0x0320, 4, 0, 0, 0, 0x0324, 0),
- PLL(CLK_APMIXED_VENCPLL, "vencpll", 0x0280, 0x028C, 0x00000101,
+ PLL(CLK_APMIXED_VENCPLL, "vencpll", 0x0280, 0x028C, 0x00000100,
0, 31, 0x0280, 4, 0, 0, 0, 0x0284, 0),
- PLL(CLK_APMIXED_APLL1, "apll1", 0x0330, 0x0340, 0x00000101,
+ PLL(CLK_APMIXED_APLL1, "apll1", 0x0330, 0x0340, 0x00000100,
0, 31, 0x0330, 4, 0x0338, 0x0014, 0, 0x0334, 0),
- PLL(CLK_APMIXED_APLL2, "apll2", 0x0350, 0x0360, 0x00000101,
+ PLL(CLK_APMIXED_APLL2, "apll2", 0x0350, 0x0360, 0x00000100,
0, 31, 0x0350, 4, 0x0358, 0x0014, 1, 0x0354, 0),
- PLL(CLK_APMIXED_LVDSPLL, "lvdspll", 0x0370, 0x037c, 0x00000101,
+ PLL(CLK_APMIXED_LVDSPLL, "lvdspll", 0x0370, 0x037c, 0x00000100,
0, 31, 0x0370, 4, 0, 0, 0, 0x0374, 0),
- PLL(CLK_APMIXED_LVDSPLL2, "lvdspll2", 0x0390, 0x039C, 0x00000101,
+ PLL(CLK_APMIXED_LVDSPLL2, "lvdspll2", 0x0390, 0x039C, 0x00000100,
0, 31, 0x0390, 4, 0, 0, 0, 0x0394, 0),
- PLL(CLK_APMIXED_MSDCPLL, "msdcpll", 0x0270, 0x027C, 0x00000101,
+ PLL(CLK_APMIXED_MSDCPLL, "msdcpll", 0x0270, 0x027C, 0x00000100,
0, 31, 0x0270, 4, 0, 0, 0, 0x0274, 0),
- PLL(CLK_APMIXED_MSDCPLL2, "msdcpll2", 0x0410, 0x041C, 0x00000101,
+ PLL(CLK_APMIXED_MSDCPLL2, "msdcpll2", 0x0410, 0x041C, 0x00000100,
0, 31, 0x0410, 4, 0, 0, 0, 0x0414, 0),
- PLL(CLK_APMIXED_TVDPLL, "tvdpll", 0x0290, 0x029C, 0xc0000101,
+ PLL(CLK_APMIXED_TVDPLL, "tvdpll", 0x0290, 0x029C, 0xc0000100,
0, 31, 0x0290, 4, 0, 0, 0, 0x0294, 0),
- PLL_B(CLK_APMIXED_MMPLL, "mmpll", 0x0250, 0x0260, 0x00000101,
+ PLL_B(CLK_APMIXED_MMPLL, "mmpll", 0x0250, 0x0260, 0x00000100,
0, 31, 0x0250, 4, 0, 0, 0, 0x0254, 0,
mmpll_div_table),
- PLL_B(CLK_APMIXED_ARMCA35PLL, "armca35pll", 0x0100, 0x0110, 0xf0000101,
+ PLL_B(CLK_APMIXED_ARMCA35PLL, "armca35pll", 0x0100, 0x0110, 0xf0000100,
HAVE_RST_BAR, 31, 0x0100, 4, 0, 0, 0, 0x0104, 0,
armca35pll_div_table),
- PLL_B(CLK_APMIXED_ARMCA72PLL, "armca72pll", 0x0210, 0x0220, 0x00000101,
+ PLL_B(CLK_APMIXED_ARMCA72PLL, "armca72pll", 0x0210, 0x0220, 0x00000100,
0, 31, 0x0210, 4, 0, 0, 0, 0x0214, 0,
armca72pll_div_table),
- PLL(CLK_APMIXED_ETHERPLL, "etherpll", 0x0300, 0x030C, 0xc0000101,
+ PLL(CLK_APMIXED_ETHERPLL, "etherpll", 0x0300, 0x030C, 0xc0000100,
0, 31, 0x0300, 4, 0, 0, 0, 0x0304, 0),
};
static int clk_mt2712_apmixed_probe(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
int r;
struct device_node *node = pdev->dev.of_node;
@@ -1268,7 +1268,7 @@ static int clk_mt2712_apmixed_probe(struct platform_device *pdev)
mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r != 0)
pr_err("%s(): could not register clock provider: %d\n",
@@ -1277,7 +1277,7 @@ static int clk_mt2712_apmixed_probe(struct platform_device *pdev)
return r;
}
-static struct clk_onecell_data *top_clk_data;
+static struct clk_hw_onecell_data *top_clk_data;
static void clk_mt2712_top_init_early(struct device_node *node)
{
@@ -1287,13 +1287,13 @@ static void clk_mt2712_top_init_early(struct device_node *node)
top_clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
for (i = 0; i < CLK_TOP_NR_CLK; i++)
- top_clk_data->clks[i] = ERR_PTR(-EPROBE_DEFER);
+ top_clk_data->hws[i] = ERR_PTR(-EPROBE_DEFER);
}
mtk_clk_register_factors(top_early_divs, ARRAY_SIZE(top_early_divs),
top_clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, top_clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, top_clk_data);
if (r)
pr_err("%s(): could not register clock provider: %d\n",
__func__, r);
@@ -1318,8 +1318,8 @@ static int clk_mt2712_top_probe(struct platform_device *pdev)
top_clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
} else {
for (i = 0; i < CLK_TOP_NR_CLK; i++) {
- if (top_clk_data->clks[i] == ERR_PTR(-EPROBE_DEFER))
- top_clk_data->clks[i] = ERR_PTR(-ENOENT);
+ if (top_clk_data->hws[i] == ERR_PTR(-EPROBE_DEFER))
+ top_clk_data->hws[i] = ERR_PTR(-ENOENT);
}
}
@@ -1335,7 +1335,7 @@ static int clk_mt2712_top_probe(struct platform_device *pdev)
mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks),
top_clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, top_clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, top_clk_data);
if (r != 0)
pr_err("%s(): could not register clock provider: %d\n",
@@ -1346,7 +1346,7 @@ static int clk_mt2712_top_probe(struct platform_device *pdev)
static int clk_mt2712_infra_probe(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
int r;
struct device_node *node = pdev->dev.of_node;
@@ -1355,7 +1355,7 @@ static int clk_mt2712_infra_probe(struct platform_device *pdev)
mtk_clk_register_gates(node, infra_clks, ARRAY_SIZE(infra_clks),
clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r != 0)
pr_err("%s(): could not register clock provider: %d\n",
@@ -1368,7 +1368,7 @@ static int clk_mt2712_infra_probe(struct platform_device *pdev)
static int clk_mt2712_peri_probe(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
int r;
struct device_node *node = pdev->dev.of_node;
@@ -1377,7 +1377,7 @@ static int clk_mt2712_peri_probe(struct platform_device *pdev)
mtk_clk_register_gates(node, peri_clks, ARRAY_SIZE(peri_clks),
clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r != 0)
pr_err("%s(): could not register clock provider: %d\n",
@@ -1390,7 +1390,7 @@ static int clk_mt2712_peri_probe(struct platform_device *pdev)
static int clk_mt2712_mcu_probe(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
int r;
struct device_node *node = pdev->dev.of_node;
void __iomem *base;
@@ -1406,7 +1406,7 @@ static int clk_mt2712_mcu_probe(struct platform_device *pdev)
mtk_clk_register_composites(mcu_muxes, ARRAY_SIZE(mcu_muxes), base,
&mt2712_clk_lock, clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r != 0)
pr_err("%s(): could not register clock provider: %d\n",
diff --git a/drivers/clk/mediatek/clk-mt6765-audio.c b/drivers/clk/mediatek/clk-mt6765-audio.c
index 4c989165d795..9c6e9caad597 100644
--- a/drivers/clk/mediatek/clk-mt6765-audio.c
+++ b/drivers/clk/mediatek/clk-mt6765-audio.c
@@ -66,7 +66,7 @@ static const struct mtk_gate audio_clks[] = {
static int clk_mt6765_audio_probe(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
int r;
struct device_node *node = pdev->dev.of_node;
@@ -75,7 +75,7 @@ static int clk_mt6765_audio_probe(struct platform_device *pdev)
mtk_clk_register_gates(node, audio_clks,
ARRAY_SIZE(audio_clks), clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
pr_err("%s(): could not register clock provider: %d\n",
diff --git a/drivers/clk/mediatek/clk-mt6765-cam.c b/drivers/clk/mediatek/clk-mt6765-cam.c
index c96394893bcf..2586d3ac4cd4 100644
--- a/drivers/clk/mediatek/clk-mt6765-cam.c
+++ b/drivers/clk/mediatek/clk-mt6765-cam.c
@@ -41,7 +41,7 @@ static const struct mtk_gate cam_clks[] = {
static int clk_mt6765_cam_probe(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
int r;
struct device_node *node = pdev->dev.of_node;
@@ -49,7 +49,7 @@ static int clk_mt6765_cam_probe(struct platform_device *pdev)
mtk_clk_register_gates(node, cam_clks, ARRAY_SIZE(cam_clks), clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
pr_err("%s(): could not register clock provider: %d\n",
diff --git a/drivers/clk/mediatek/clk-mt6765-img.c b/drivers/clk/mediatek/clk-mt6765-img.c
index 6fd8bf8030fc..8cc95b98921e 100644
--- a/drivers/clk/mediatek/clk-mt6765-img.c
+++ b/drivers/clk/mediatek/clk-mt6765-img.c
@@ -37,7 +37,7 @@ static const struct mtk_gate img_clks[] = {
static int clk_mt6765_img_probe(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
int r;
struct device_node *node = pdev->dev.of_node;
@@ -45,7 +45,7 @@ static int clk_mt6765_img_probe(struct platform_device *pdev)
mtk_clk_register_gates(node, img_clks, ARRAY_SIZE(img_clks), clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
pr_err("%s(): could not register clock provider: %d\n",
diff --git a/drivers/clk/mediatek/clk-mt6765-mipi0a.c b/drivers/clk/mediatek/clk-mt6765-mipi0a.c
index 81744d0f95a0..c816e26a95f9 100644
--- a/drivers/clk/mediatek/clk-mt6765-mipi0a.c
+++ b/drivers/clk/mediatek/clk-mt6765-mipi0a.c
@@ -34,7 +34,7 @@ static const struct mtk_gate mipi0a_clks[] = {
static int clk_mt6765_mipi0a_probe(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
int r;
struct device_node *node = pdev->dev.of_node;
@@ -43,7 +43,7 @@ static int clk_mt6765_mipi0a_probe(struct platform_device *pdev)
mtk_clk_register_gates(node, mipi0a_clks,
ARRAY_SIZE(mipi0a_clks), clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
pr_err("%s(): could not register clock provider: %d\n",
diff --git a/drivers/clk/mediatek/clk-mt6765-mm.c b/drivers/clk/mediatek/clk-mt6765-mm.c
index 6d8214c51684..ee6d3b859a6c 100644
--- a/drivers/clk/mediatek/clk-mt6765-mm.c
+++ b/drivers/clk/mediatek/clk-mt6765-mm.c
@@ -63,7 +63,7 @@ static const struct mtk_gate mm_clks[] = {
static int clk_mt6765_mm_probe(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
int r;
struct device_node *node = pdev->dev.of_node;
@@ -71,7 +71,7 @@ static int clk_mt6765_mm_probe(struct platform_device *pdev)
mtk_clk_register_gates(node, mm_clks, ARRAY_SIZE(mm_clks), clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
pr_err("%s(): could not register clock provider: %d\n",
diff --git a/drivers/clk/mediatek/clk-mt6765-vcodec.c b/drivers/clk/mediatek/clk-mt6765-vcodec.c
index baae665fab31..d8045979d48a 100644
--- a/drivers/clk/mediatek/clk-mt6765-vcodec.c
+++ b/drivers/clk/mediatek/clk-mt6765-vcodec.c
@@ -36,7 +36,7 @@ static const struct mtk_gate venc_clks[] = {
static int clk_mt6765_vcodec_probe(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
int r;
struct device_node *node = pdev->dev.of_node;
@@ -45,7 +45,7 @@ static int clk_mt6765_vcodec_probe(struct platform_device *pdev)
mtk_clk_register_gates(node, venc_clks,
ARRAY_SIZE(venc_clks), clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
pr_err("%s(): could not register clock provider: %d\n",
diff --git a/drivers/clk/mediatek/clk-mt6765.c b/drivers/clk/mediatek/clk-mt6765.c
index 24829ca3bd1f..e9b9e6729733 100644
--- a/drivers/clk/mediatek/clk-mt6765.c
+++ b/drivers/clk/mediatek/clk-mt6765.c
@@ -748,32 +748,32 @@ static const struct mtk_gate apmixed_clks[] = {
_pcw_reg, _pcw_shift, NULL) \
static const struct mtk_pll_data plls[] = {
- PLL(CLK_APMIXED_ARMPLL_L, "armpll_l", 0x021C, 0x0228, BIT(0),
+ PLL(CLK_APMIXED_ARMPLL_L, "armpll_l", 0x021C, 0x0228, 0,
PLL_AO, 22, 8, 0x0220, 24, 0, 0, 0, 0x0220, 0),
- PLL(CLK_APMIXED_ARMPLL, "armpll", 0x020C, 0x0218, BIT(0),
+ PLL(CLK_APMIXED_ARMPLL, "armpll", 0x020C, 0x0218, 0,
PLL_AO, 22, 8, 0x0210, 24, 0, 0, 0, 0x0210, 0),
- PLL(CLK_APMIXED_CCIPLL, "ccipll", 0x022C, 0x0238, BIT(0),
+ PLL(CLK_APMIXED_CCIPLL, "ccipll", 0x022C, 0x0238, 0,
PLL_AO, 22, 8, 0x0230, 24, 0, 0, 0, 0x0230, 0),
- PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x023C, 0x0248, BIT(0),
+ PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x023C, 0x0248, 0,
(HAVE_RST_BAR | PLL_AO), 22, 8, 0x0240, 24, 0, 0, 0, 0x0240,
0),
- PLL(CLK_APMIXED_MFGPLL, "mfgpll", 0x024C, 0x0258, BIT(0),
+ PLL(CLK_APMIXED_MFGPLL, "mfgpll", 0x024C, 0x0258, 0,
0, 22, 8, 0x0250, 24, 0, 0, 0, 0x0250, 0),
- PLL(CLK_APMIXED_MMPLL, "mmpll", 0x025C, 0x0268, BIT(0),
+ PLL(CLK_APMIXED_MMPLL, "mmpll", 0x025C, 0x0268, 0,
0, 22, 8, 0x0260, 24, 0, 0, 0, 0x0260, 0),
- PLL(CLK_APMIXED_UNIV2PLL, "univ2pll", 0x026C, 0x0278, BIT(0),
+ PLL(CLK_APMIXED_UNIV2PLL, "univ2pll", 0x026C, 0x0278, 0,
HAVE_RST_BAR, 22, 8, 0x0270, 24, 0, 0, 0, 0x0270, 0),
- PLL(CLK_APMIXED_MSDCPLL, "msdcpll", 0x027C, 0x0288, BIT(0),
+ PLL(CLK_APMIXED_MSDCPLL, "msdcpll", 0x027C, 0x0288, 0,
0, 22, 8, 0x0280, 24, 0, 0, 0, 0x0280, 0),
- PLL(CLK_APMIXED_APLL1, "apll1", 0x028C, 0x029C, BIT(0),
+ PLL(CLK_APMIXED_APLL1, "apll1", 0x028C, 0x029C, 0,
0, 32, 8, 0x0290, 24, 0x0040, 0x000C, 0, 0x0294, 0),
- PLL(CLK_APMIXED_MPLL, "mpll", 0x02A0, 0x02AC, BIT(0),
+ PLL(CLK_APMIXED_MPLL, "mpll", 0x02A0, 0x02AC, 0,
PLL_AO, 22, 8, 0x02A4, 24, 0, 0, 0, 0x02A4, 0),
};
static int clk_mt6765_apmixed_probe(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
int r;
struct device_node *node = pdev->dev.of_node;
void __iomem *base;
@@ -791,7 +791,7 @@ static int clk_mt6765_apmixed_probe(struct platform_device *pdev)
mtk_clk_register_gates(node, apmixed_clks,
ARRAY_SIZE(apmixed_clks), clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
pr_err("%s(): could not register clock provider: %d\n",
@@ -811,7 +811,7 @@ static int clk_mt6765_top_probe(struct platform_device *pdev)
int r;
struct device_node *node = pdev->dev.of_node;
void __iomem *base;
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
base = devm_ioremap_resource(&pdev->dev, res);
@@ -831,7 +831,7 @@ static int clk_mt6765_top_probe(struct platform_device *pdev)
mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks),
clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
pr_err("%s(): could not register clock provider: %d\n",
@@ -848,7 +848,7 @@ static int clk_mt6765_top_probe(struct platform_device *pdev)
static int clk_mt6765_ifr_probe(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
int r;
struct device_node *node = pdev->dev.of_node;
void __iomem *base;
@@ -864,7 +864,7 @@ static int clk_mt6765_ifr_probe(struct platform_device *pdev)
mtk_clk_register_gates(node, ifr_clks, ARRAY_SIZE(ifr_clks),
clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
pr_err("%s(): could not register clock provider: %d\n",
diff --git a/drivers/clk/mediatek/clk-mt6779-aud.c b/drivers/clk/mediatek/clk-mt6779-aud.c
index 9e889e4c361a..97e44abb7e87 100644
--- a/drivers/clk/mediatek/clk-mt6779-aud.c
+++ b/drivers/clk/mediatek/clk-mt6779-aud.c
@@ -96,7 +96,7 @@ static const struct of_device_id of_match_clk_mt6779_aud[] = {
static int clk_mt6779_aud_probe(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
struct device_node *node = pdev->dev.of_node;
clk_data = mtk_alloc_clk_data(CLK_AUD_NR_CLK);
@@ -104,7 +104,7 @@ static int clk_mt6779_aud_probe(struct platform_device *pdev)
mtk_clk_register_gates(node, audio_clks, ARRAY_SIZE(audio_clks),
clk_data);
- return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
}
static struct platform_driver clk_mt6779_aud_drv = {
diff --git a/drivers/clk/mediatek/clk-mt6779-cam.c b/drivers/clk/mediatek/clk-mt6779-cam.c
index 7f07a2a139ac..9c5117aae146 100644
--- a/drivers/clk/mediatek/clk-mt6779-cam.c
+++ b/drivers/clk/mediatek/clk-mt6779-cam.c
@@ -45,7 +45,7 @@ static const struct of_device_id of_match_clk_mt6779_cam[] = {
static int clk_mt6779_cam_probe(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
struct device_node *node = pdev->dev.of_node;
clk_data = mtk_alloc_clk_data(CLK_CAM_NR_CLK);
@@ -53,7 +53,7 @@ static int clk_mt6779_cam_probe(struct platform_device *pdev)
mtk_clk_register_gates(node, cam_clks, ARRAY_SIZE(cam_clks),
clk_data);
- return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
}
static struct platform_driver clk_mt6779_cam_drv = {
diff --git a/drivers/clk/mediatek/clk-mt6779-img.c b/drivers/clk/mediatek/clk-mt6779-img.c
index f0961fa1a286..801271477d46 100644
--- a/drivers/clk/mediatek/clk-mt6779-img.c
+++ b/drivers/clk/mediatek/clk-mt6779-img.c
@@ -37,7 +37,7 @@ static const struct of_device_id of_match_clk_mt6779_img[] = {
static int clk_mt6779_img_probe(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
struct device_node *node = pdev->dev.of_node;
clk_data = mtk_alloc_clk_data(CLK_IMG_NR_CLK);
@@ -45,7 +45,7 @@ static int clk_mt6779_img_probe(struct platform_device *pdev)
mtk_clk_register_gates(node, img_clks, ARRAY_SIZE(img_clks),
clk_data);
- return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
}
static struct platform_driver clk_mt6779_img_drv = {
diff --git a/drivers/clk/mediatek/clk-mt6779-ipe.c b/drivers/clk/mediatek/clk-mt6779-ipe.c
index 8c6f3e154bf3..f67814ca7dfb 100644
--- a/drivers/clk/mediatek/clk-mt6779-ipe.c
+++ b/drivers/clk/mediatek/clk-mt6779-ipe.c
@@ -39,7 +39,7 @@ static const struct of_device_id of_match_clk_mt6779_ipe[] = {
static int clk_mt6779_ipe_probe(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
struct device_node *node = pdev->dev.of_node;
clk_data = mtk_alloc_clk_data(CLK_IPE_NR_CLK);
@@ -47,7 +47,7 @@ static int clk_mt6779_ipe_probe(struct platform_device *pdev)
mtk_clk_register_gates(node, ipe_clks, ARRAY_SIZE(ipe_clks),
clk_data);
- return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
}
static struct platform_driver clk_mt6779_ipe_drv = {
diff --git a/drivers/clk/mediatek/clk-mt6779-mfg.c b/drivers/clk/mediatek/clk-mt6779-mfg.c
index 9f3372886e6b..fc7387b59758 100644
--- a/drivers/clk/mediatek/clk-mt6779-mfg.c
+++ b/drivers/clk/mediatek/clk-mt6779-mfg.c
@@ -29,7 +29,7 @@ static const struct mtk_gate mfg_clks[] = {
static int clk_mt6779_mfg_probe(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
struct device_node *node = pdev->dev.of_node;
clk_data = mtk_alloc_clk_data(CLK_MFGCFG_NR_CLK);
@@ -37,7 +37,7 @@ static int clk_mt6779_mfg_probe(struct platform_device *pdev)
mtk_clk_register_gates(node, mfg_clks, ARRAY_SIZE(mfg_clks),
clk_data);
- return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
}
static const struct of_device_id of_match_clk_mt6779_mfg[] = {
diff --git a/drivers/clk/mediatek/clk-mt6779-mm.c b/drivers/clk/mediatek/clk-mt6779-mm.c
index 33946e647122..eda8cbee3d23 100644
--- a/drivers/clk/mediatek/clk-mt6779-mm.c
+++ b/drivers/clk/mediatek/clk-mt6779-mm.c
@@ -89,14 +89,14 @@ static int clk_mt6779_mm_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *node = dev->parent->of_node;
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
clk_data = mtk_alloc_clk_data(CLK_MM_NR_CLK);
mtk_clk_register_gates(node, mm_clks, ARRAY_SIZE(mm_clks),
clk_data);
- return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
}
static struct platform_driver clk_mt6779_mm_drv = {
diff --git a/drivers/clk/mediatek/clk-mt6779-vdec.c b/drivers/clk/mediatek/clk-mt6779-vdec.c
index f4358844c2e0..7e195b082e86 100644
--- a/drivers/clk/mediatek/clk-mt6779-vdec.c
+++ b/drivers/clk/mediatek/clk-mt6779-vdec.c
@@ -46,7 +46,7 @@ static const struct of_device_id of_match_clk_mt6779_vdec[] = {
static int clk_mt6779_vdec_probe(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
struct device_node *node = pdev->dev.of_node;
clk_data = mtk_alloc_clk_data(CLK_VDEC_GCON_NR_CLK);
@@ -54,7 +54,7 @@ static int clk_mt6779_vdec_probe(struct platform_device *pdev)
mtk_clk_register_gates(node, vdec_clks, ARRAY_SIZE(vdec_clks),
clk_data);
- return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
}
static struct platform_driver clk_mt6779_vdec_drv = {
diff --git a/drivers/clk/mediatek/clk-mt6779-venc.c b/drivers/clk/mediatek/clk-mt6779-venc.c
index ff67084af5aa..573efa87c9bd 100644
--- a/drivers/clk/mediatek/clk-mt6779-venc.c
+++ b/drivers/clk/mediatek/clk-mt6779-venc.c
@@ -37,7 +37,7 @@ static const struct of_device_id of_match_clk_mt6779_venc[] = {
static int clk_mt6779_venc_probe(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
struct device_node *node = pdev->dev.of_node;
clk_data = mtk_alloc_clk_data(CLK_VENC_GCON_NR_CLK);
@@ -45,7 +45,7 @@ static int clk_mt6779_venc_probe(struct platform_device *pdev)
mtk_clk_register_gates(node, venc_clks, ARRAY_SIZE(venc_clks),
clk_data);
- return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
}
static struct platform_driver clk_mt6779_venc_drv = {
diff --git a/drivers/clk/mediatek/clk-mt6779.c b/drivers/clk/mediatek/clk-mt6779.c
index 7b61664da18f..0d0a90ee5eb2 100644
--- a/drivers/clk/mediatek/clk-mt6779.c
+++ b/drivers/clk/mediatek/clk-mt6779.c
@@ -1182,39 +1182,39 @@ static const struct mtk_gate apmixed_clks[] = {
_pcw_chg_reg, NULL)
static const struct mtk_pll_data plls[] = {
- PLL(CLK_APMIXED_ARMPLL_LL, "armpll_ll", 0x0200, 0x020C, BIT(0),
+ PLL(CLK_APMIXED_ARMPLL_LL, "armpll_ll", 0x0200, 0x020C, 0,
PLL_AO, 0, 22, 8, 0x0204, 24, 0, 0, 0, 0x0204, 0, 0),
- PLL(CLK_APMIXED_ARMPLL_BL, "armpll_bl", 0x0210, 0x021C, BIT(0),
+ PLL(CLK_APMIXED_ARMPLL_BL, "armpll_bl", 0x0210, 0x021C, 0,
PLL_AO, 0, 22, 8, 0x0214, 24, 0, 0, 0, 0x0214, 0, 0),
- PLL(CLK_APMIXED_CCIPLL, "ccipll", 0x02A0, 0x02AC, BIT(0),
+ PLL(CLK_APMIXED_CCIPLL, "ccipll", 0x02A0, 0x02AC, 0,
PLL_AO, 0, 22, 8, 0x02A4, 24, 0, 0, 0, 0x02A4, 0, 0),
- PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x0230, 0x023C, BIT(0),
+ PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x0230, 0x023C, 0,
(HAVE_RST_BAR), BIT(24), 22, 8, 0x0234, 24, 0, 0, 0,
0x0234, 0, 0),
- PLL(CLK_APMIXED_UNIV2PLL, "univ2pll", 0x0240, 0x024C, BIT(0),
+ PLL(CLK_APMIXED_UNIV2PLL, "univ2pll", 0x0240, 0x024C, 0,
(HAVE_RST_BAR), BIT(24), 22, 8, 0x0244, 24,
0, 0, 0, 0x0244, 0, 0),
- PLL(CLK_APMIXED_MFGPLL, "mfgpll", 0x0250, 0x025C, BIT(0),
+ PLL(CLK_APMIXED_MFGPLL, "mfgpll", 0x0250, 0x025C, 0,
0, 0, 22, 8, 0x0254, 24, 0, 0, 0, 0x0254, 0, 0),
- PLL(CLK_APMIXED_MSDCPLL, "msdcpll", 0x0260, 0x026C, BIT(0),
+ PLL(CLK_APMIXED_MSDCPLL, "msdcpll", 0x0260, 0x026C, 0,
0, 0, 22, 8, 0x0264, 24, 0, 0, 0, 0x0264, 0, 0),
- PLL(CLK_APMIXED_TVDPLL, "tvdpll", 0x0270, 0x027C, BIT(0),
+ PLL(CLK_APMIXED_TVDPLL, "tvdpll", 0x0270, 0x027C, 0,
0, 0, 22, 8, 0x0274, 24, 0, 0, 0, 0x0274, 0, 0),
- PLL(CLK_APMIXED_ADSPPLL, "adsppll", 0x02b0, 0x02bC, BIT(0),
+ PLL(CLK_APMIXED_ADSPPLL, "adsppll", 0x02b0, 0x02bC, 0,
(HAVE_RST_BAR), BIT(23), 22, 8, 0x02b4, 24,
0, 0, 0, 0x02b4, 0, 0),
- PLL(CLK_APMIXED_MMPLL, "mmpll", 0x0280, 0x028C, BIT(0),
+ PLL(CLK_APMIXED_MMPLL, "mmpll", 0x0280, 0x028C, 0,
(HAVE_RST_BAR), BIT(23), 22, 8, 0x0284, 24,
0, 0, 0, 0x0284, 0, 0),
- PLL(CLK_APMIXED_APLL1, "apll1", 0x02C0, 0x02D0, BIT(0),
+ PLL(CLK_APMIXED_APLL1, "apll1", 0x02C0, 0x02D0, 0,
0, 0, 32, 8, 0x02C0, 1, 0, 0x14, 0, 0x02C4, 0, 0x2C0),
- PLL(CLK_APMIXED_APLL2, "apll2", 0x02D4, 0x02E4, BIT(0),
+ PLL(CLK_APMIXED_APLL2, "apll2", 0x02D4, 0x02E4, 0,
0, 0, 32, 8, 0x02D4, 1, 0, 0x14, 1, 0x02D8, 0, 0x02D4),
};
static int clk_mt6779_apmixed_probe(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
struct device_node *node = pdev->dev.of_node;
clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK);
@@ -1224,13 +1224,13 @@ static int clk_mt6779_apmixed_probe(struct platform_device *pdev)
mtk_clk_register_gates(node, apmixed_clks,
ARRAY_SIZE(apmixed_clks), clk_data);
- return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
}
static int clk_mt6779_top_probe(struct platform_device *pdev)
{
void __iomem *base;
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
struct device_node *node = pdev->dev.of_node;
base = devm_platform_ioremap_resource(pdev, 0);
@@ -1253,12 +1253,12 @@ static int clk_mt6779_top_probe(struct platform_device *pdev)
mtk_clk_register_composites(top_aud_divs, ARRAY_SIZE(top_aud_divs),
base, &mt6779_clk_lock, clk_data);
- return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
}
static int clk_mt6779_infra_probe(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
struct device_node *node = pdev->dev.of_node;
clk_data = mtk_alloc_clk_data(CLK_INFRA_NR_CLK);
@@ -1266,7 +1266,7 @@ static int clk_mt6779_infra_probe(struct platform_device *pdev)
mtk_clk_register_gates(node, infra_clks, ARRAY_SIZE(infra_clks),
clk_data);
- return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
}
static const struct of_device_id of_match_clk_mt6779[] = {
diff --git a/drivers/clk/mediatek/clk-mt6797-img.c b/drivers/clk/mediatek/clk-mt6797-img.c
index 908bf9784f03..25d17db13bac 100644
--- a/drivers/clk/mediatek/clk-mt6797-img.c
+++ b/drivers/clk/mediatek/clk-mt6797-img.c
@@ -39,7 +39,7 @@ static const struct of_device_id of_match_clk_mt6797_img[] = {
static int clk_mt6797_img_probe(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
int r;
struct device_node *node = pdev->dev.of_node;
@@ -48,7 +48,7 @@ static int clk_mt6797_img_probe(struct platform_device *pdev)
mtk_clk_register_gates(node, img_clks, ARRAY_SIZE(img_clks),
clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
dev_err(&pdev->dev,
"could not register clock provider: %s: %d\n",
diff --git a/drivers/clk/mediatek/clk-mt6797-mm.c b/drivers/clk/mediatek/clk-mt6797-mm.c
index 01fdce287247..0846011fc894 100644
--- a/drivers/clk/mediatek/clk-mt6797-mm.c
+++ b/drivers/clk/mediatek/clk-mt6797-mm.c
@@ -96,7 +96,7 @@ static int clk_mt6797_mm_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *node = dev->parent->of_node;
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
int r;
clk_data = mtk_alloc_clk_data(CLK_MM_NR);
@@ -104,7 +104,7 @@ static int clk_mt6797_mm_probe(struct platform_device *pdev)
mtk_clk_register_gates(node, mm_clks, ARRAY_SIZE(mm_clks),
clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
dev_err(&pdev->dev,
"could not register clock provider: %s: %d\n",
diff --git a/drivers/clk/mediatek/clk-mt6797-vdec.c b/drivers/clk/mediatek/clk-mt6797-vdec.c
index bbbc8119c3af..de857894e033 100644
--- a/drivers/clk/mediatek/clk-mt6797-vdec.c
+++ b/drivers/clk/mediatek/clk-mt6797-vdec.c
@@ -56,7 +56,7 @@ static const struct of_device_id of_match_clk_mt6797_vdec[] = {
static int clk_mt6797_vdec_probe(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
int r;
struct device_node *node = pdev->dev.of_node;
@@ -65,7 +65,7 @@ static int clk_mt6797_vdec_probe(struct platform_device *pdev)
mtk_clk_register_gates(node, vdec_clks, ARRAY_SIZE(vdec_clks),
clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
dev_err(&pdev->dev,
"could not register clock provider: %s: %d\n",
diff --git a/drivers/clk/mediatek/clk-mt6797-venc.c b/drivers/clk/mediatek/clk-mt6797-venc.c
index 2c75f0cbfb51..78b7ed55f979 100644
--- a/drivers/clk/mediatek/clk-mt6797-venc.c
+++ b/drivers/clk/mediatek/clk-mt6797-venc.c
@@ -41,7 +41,7 @@ static const struct of_device_id of_match_clk_mt6797_venc[] = {
static int clk_mt6797_venc_probe(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
int r;
struct device_node *node = pdev->dev.of_node;
@@ -50,7 +50,7 @@ static int clk_mt6797_venc_probe(struct platform_device *pdev)
mtk_clk_register_gates(node, venc_clks, ARRAY_SIZE(venc_clks),
clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
dev_err(&pdev->dev,
"could not register clock provider: %s: %d\n",
diff --git a/drivers/clk/mediatek/clk-mt6797.c b/drivers/clk/mediatek/clk-mt6797.c
index 02259e81625a..b89f325a4b9b 100644
--- a/drivers/clk/mediatek/clk-mt6797.c
+++ b/drivers/clk/mediatek/clk-mt6797.c
@@ -383,7 +383,7 @@ static const struct mtk_composite top_muxes[] = {
static int mtk_topckgen_init(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
void __iomem *base;
struct device_node *node = pdev->dev.of_node;
@@ -399,7 +399,7 @@ static int mtk_topckgen_init(struct platform_device *pdev)
mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes), base,
&mt6797_clk_lock, clk_data);
- return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
}
static const struct mtk_gate_regs infra0_cg_regs = {
@@ -556,7 +556,7 @@ static const struct mtk_fixed_factor infra_fixed_divs[] = {
FACTOR(CLK_INFRA_13M, "clk13m", "clk26m", 1, 2),
};
-static struct clk_onecell_data *infra_clk_data;
+static struct clk_hw_onecell_data *infra_clk_data;
static void mtk_infrasys_init_early(struct device_node *node)
{
@@ -566,13 +566,14 @@ static void mtk_infrasys_init_early(struct device_node *node)
infra_clk_data = mtk_alloc_clk_data(CLK_INFRA_NR);
for (i = 0; i < CLK_INFRA_NR; i++)
- infra_clk_data->clks[i] = ERR_PTR(-EPROBE_DEFER);
+ infra_clk_data->hws[i] = ERR_PTR(-EPROBE_DEFER);
}
mtk_clk_register_factors(infra_fixed_divs, ARRAY_SIZE(infra_fixed_divs),
infra_clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, infra_clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get,
+ infra_clk_data);
if (r)
pr_err("%s(): could not register clock provider: %d\n",
__func__, r);
@@ -590,8 +591,8 @@ static int mtk_infrasys_init(struct platform_device *pdev)
infra_clk_data = mtk_alloc_clk_data(CLK_INFRA_NR);
} else {
for (i = 0; i < CLK_INFRA_NR; i++) {
- if (infra_clk_data->clks[i] == ERR_PTR(-EPROBE_DEFER))
- infra_clk_data->clks[i] = ERR_PTR(-ENOENT);
+ if (infra_clk_data->hws[i] == ERR_PTR(-EPROBE_DEFER))
+ infra_clk_data->hws[i] = ERR_PTR(-ENOENT);
}
}
@@ -600,7 +601,8 @@ static int mtk_infrasys_init(struct platform_device *pdev)
mtk_clk_register_factors(infra_fixed_divs, ARRAY_SIZE(infra_fixed_divs),
infra_clk_data);
- return of_clk_add_provider(node, of_clk_src_onecell_get, infra_clk_data);
+ return of_clk_add_hw_provider(node, of_clk_hw_onecell_get,
+ infra_clk_data);
}
#define MT6797_PLL_FMAX (3000UL * MHZ)
@@ -635,31 +637,31 @@ static int mtk_infrasys_init(struct platform_device *pdev)
NULL)
static const struct mtk_pll_data plls[] = {
- PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x0220, 0x022C, 0xF0000101, PLL_AO,
+ PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x0220, 0x022C, 0xF0000100, PLL_AO,
21, 0x220, 4, 0x0, 0x224, 0),
- PLL(CLK_APMIXED_UNIVPLL, "univpll", 0x0230, 0x023C, 0xFE000011, 0, 7,
+ PLL(CLK_APMIXED_UNIVPLL, "univpll", 0x0230, 0x023C, 0xFE000010, 0, 7,
0x230, 4, 0x0, 0x234, 14),
- PLL(CLK_APMIXED_MFGPLL, "mfgpll", 0x0240, 0x024C, 0x00000101, 0, 21,
+ PLL(CLK_APMIXED_MFGPLL, "mfgpll", 0x0240, 0x024C, 0x00000100, 0, 21,
0x244, 24, 0x0, 0x244, 0),
- PLL(CLK_APMIXED_MSDCPLL, "msdcpll", 0x0250, 0x025C, 0x00000121, 0, 21,
+ PLL(CLK_APMIXED_MSDCPLL, "msdcpll", 0x0250, 0x025C, 0x00000120, 0, 21,
0x250, 4, 0x0, 0x254, 0),
- PLL(CLK_APMIXED_IMGPLL, "imgpll", 0x0260, 0x026C, 0x00000121, 0, 21,
+ PLL(CLK_APMIXED_IMGPLL, "imgpll", 0x0260, 0x026C, 0x00000120, 0, 21,
0x260, 4, 0x0, 0x264, 0),
- PLL(CLK_APMIXED_TVDPLL, "tvdpll", 0x0270, 0x027C, 0xC0000121, 0, 21,
+ PLL(CLK_APMIXED_TVDPLL, "tvdpll", 0x0270, 0x027C, 0xC0000120, 0, 21,
0x270, 4, 0x0, 0x274, 0),
- PLL(CLK_APMIXED_CODECPLL, "codecpll", 0x0290, 0x029C, 0x00000121, 0, 21,
+ PLL(CLK_APMIXED_CODECPLL, "codecpll", 0x0290, 0x029C, 0x00000120, 0, 21,
0x290, 4, 0x0, 0x294, 0),
- PLL(CLK_APMIXED_VDECPLL, "vdecpll", 0x02E4, 0x02F0, 0x00000121, 0, 21,
+ PLL(CLK_APMIXED_VDECPLL, "vdecpll", 0x02E4, 0x02F0, 0x00000120, 0, 21,
0x2E4, 4, 0x0, 0x2E8, 0),
- PLL(CLK_APMIXED_APLL1, "apll1", 0x02A0, 0x02B0, 0x00000131, 0, 31,
+ PLL(CLK_APMIXED_APLL1, "apll1", 0x02A0, 0x02B0, 0x00000130, 0, 31,
0x2A0, 4, 0x2A8, 0x2A4, 0),
- PLL(CLK_APMIXED_APLL2, "apll2", 0x02B4, 0x02C4, 0x00000131, 0, 31,
+ PLL(CLK_APMIXED_APLL2, "apll2", 0x02B4, 0x02C4, 0x00000130, 0, 31,
0x2B4, 4, 0x2BC, 0x2B8, 0),
};
static int mtk_apmixedsys_init(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
struct device_node *node = pdev->dev.of_node;
clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR);
@@ -668,7 +670,7 @@ static int mtk_apmixedsys_init(struct platform_device *pdev)
mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
- return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
}
static const struct of_device_id of_match_clk_mt6797[] = {
diff --git a/drivers/clk/mediatek/clk-mt7622-aud.c b/drivers/clk/mediatek/clk-mt7622-aud.c
index 2bd4295bc36b..9f2e5aa7b5d9 100644
--- a/drivers/clk/mediatek/clk-mt7622-aud.c
+++ b/drivers/clk/mediatek/clk-mt7622-aud.c
@@ -132,7 +132,7 @@ static const struct mtk_gate audio_clks[] = {
static int clk_mt7622_audiosys_init(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
struct device_node *node = pdev->dev.of_node;
int r;
@@ -141,7 +141,7 @@ static int clk_mt7622_audiosys_init(struct platform_device *pdev)
mtk_clk_register_gates(node, audio_clks, ARRAY_SIZE(audio_clks),
clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r) {
dev_err(&pdev->dev,
"could not register clock provider: %s: %d\n",
diff --git a/drivers/clk/mediatek/clk-mt7622-eth.c b/drivers/clk/mediatek/clk-mt7622-eth.c
index c9947dc7ba5a..b12d48705496 100644
--- a/drivers/clk/mediatek/clk-mt7622-eth.c
+++ b/drivers/clk/mediatek/clk-mt7622-eth.c
@@ -67,7 +67,7 @@ static const struct mtk_gate sgmii_clks[] = {
static int clk_mt7622_ethsys_init(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
struct device_node *node = pdev->dev.of_node;
int r;
@@ -76,7 +76,7 @@ static int clk_mt7622_ethsys_init(struct platform_device *pdev)
mtk_clk_register_gates(node, eth_clks, ARRAY_SIZE(eth_clks),
clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
dev_err(&pdev->dev,
"could not register clock provider: %s: %d\n",
@@ -89,7 +89,7 @@ static int clk_mt7622_ethsys_init(struct platform_device *pdev)
static int clk_mt7622_sgmiisys_init(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
struct device_node *node = pdev->dev.of_node;
int r;
@@ -98,7 +98,7 @@ static int clk_mt7622_sgmiisys_init(struct platform_device *pdev)
mtk_clk_register_gates(node, sgmii_clks, ARRAY_SIZE(sgmii_clks),
clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
dev_err(&pdev->dev,
"could not register clock provider: %s: %d\n",
diff --git a/drivers/clk/mediatek/clk-mt7622-hif.c b/drivers/clk/mediatek/clk-mt7622-hif.c
index 628be0c9f888..58728e35e80a 100644
--- a/drivers/clk/mediatek/clk-mt7622-hif.c
+++ b/drivers/clk/mediatek/clk-mt7622-hif.c
@@ -78,7 +78,7 @@ static const struct mtk_gate pcie_clks[] = {
static int clk_mt7622_ssusbsys_init(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
struct device_node *node = pdev->dev.of_node;
int r;
@@ -87,7 +87,7 @@ static int clk_mt7622_ssusbsys_init(struct platform_device *pdev)
mtk_clk_register_gates(node, ssusb_clks, ARRAY_SIZE(ssusb_clks),
clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
dev_err(&pdev->dev,
"could not register clock provider: %s: %d\n",
@@ -100,7 +100,7 @@ static int clk_mt7622_ssusbsys_init(struct platform_device *pdev)
static int clk_mt7622_pciesys_init(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
struct device_node *node = pdev->dev.of_node;
int r;
@@ -109,7 +109,7 @@ static int clk_mt7622_pciesys_init(struct platform_device *pdev)
mtk_clk_register_gates(node, pcie_clks, ARRAY_SIZE(pcie_clks),
clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
dev_err(&pdev->dev,
"could not register clock provider: %s: %d\n",
diff --git a/drivers/clk/mediatek/clk-mt7622.c b/drivers/clk/mediatek/clk-mt7622.c
index 0e1fb30a1e98..e4a5e5230861 100644
--- a/drivers/clk/mediatek/clk-mt7622.c
+++ b/drivers/clk/mediatek/clk-mt7622.c
@@ -329,23 +329,23 @@ static const struct mtk_gate_regs peri1_cg_regs = {
};
static const struct mtk_pll_data plls[] = {
- PLL(CLK_APMIXED_ARMPLL, "armpll", 0x0200, 0x020C, 0x00000001,
+ PLL(CLK_APMIXED_ARMPLL, "armpll", 0x0200, 0x020C, 0,
PLL_AO, 21, 0x0204, 24, 0, 0x0204, 0),
- PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x0210, 0x021C, 0x00000001,
+ PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x0210, 0x021C, 0,
HAVE_RST_BAR, 21, 0x0214, 24, 0, 0x0214, 0),
- PLL(CLK_APMIXED_UNIV2PLL, "univ2pll", 0x0220, 0x022C, 0x00000001,
+ PLL(CLK_APMIXED_UNIV2PLL, "univ2pll", 0x0220, 0x022C, 0,
HAVE_RST_BAR, 7, 0x0224, 24, 0, 0x0224, 14),
- PLL(CLK_APMIXED_ETH1PLL, "eth1pll", 0x0300, 0x0310, 0x00000001,
+ PLL(CLK_APMIXED_ETH1PLL, "eth1pll", 0x0300, 0x0310, 0,
0, 21, 0x0300, 1, 0, 0x0304, 0),
- PLL(CLK_APMIXED_ETH2PLL, "eth2pll", 0x0314, 0x0320, 0x00000001,
+ PLL(CLK_APMIXED_ETH2PLL, "eth2pll", 0x0314, 0x0320, 0,
0, 21, 0x0314, 1, 0, 0x0318, 0),
- PLL(CLK_APMIXED_AUD1PLL, "aud1pll", 0x0324, 0x0330, 0x00000001,
+ PLL(CLK_APMIXED_AUD1PLL, "aud1pll", 0x0324, 0x0330, 0,
0, 31, 0x0324, 1, 0, 0x0328, 0),
- PLL(CLK_APMIXED_AUD2PLL, "aud2pll", 0x0334, 0x0340, 0x00000001,
+ PLL(CLK_APMIXED_AUD2PLL, "aud2pll", 0x0334, 0x0340, 0,
0, 31, 0x0334, 1, 0, 0x0338, 0),
- PLL(CLK_APMIXED_TRGPLL, "trgpll", 0x0344, 0x0354, 0x00000001,
+ PLL(CLK_APMIXED_TRGPLL, "trgpll", 0x0344, 0x0354, 0,
0, 21, 0x0344, 1, 0, 0x0348, 0),
- PLL(CLK_APMIXED_SGMIPLL, "sgmipll", 0x0358, 0x0368, 0x00000001,
+ PLL(CLK_APMIXED_SGMIPLL, "sgmipll", 0x0358, 0x0368, 0,
0, 21, 0x0358, 1, 0, 0x035C, 0),
};
@@ -612,7 +612,7 @@ static struct mtk_composite peri_muxes[] = {
static int mtk_topckgen_init(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
void __iomem *base;
struct device_node *node = pdev->dev.of_node;
@@ -637,17 +637,17 @@ static int mtk_topckgen_init(struct platform_device *pdev)
mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks),
clk_data);
- clk_prepare_enable(clk_data->clks[CLK_TOP_AXI_SEL]);
- clk_prepare_enable(clk_data->clks[CLK_TOP_MEM_SEL]);
- clk_prepare_enable(clk_data->clks[CLK_TOP_DDRPHYCFG_SEL]);
+ clk_prepare_enable(clk_data->hws[CLK_TOP_AXI_SEL]->clk);
+ clk_prepare_enable(clk_data->hws[CLK_TOP_MEM_SEL]->clk);
+ clk_prepare_enable(clk_data->hws[CLK_TOP_DDRPHYCFG_SEL]->clk);
- return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
}
static int mtk_infrasys_init(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
int r;
clk_data = mtk_alloc_clk_data(CLK_INFRA_NR_CLK);
@@ -658,8 +658,8 @@ static int mtk_infrasys_init(struct platform_device *pdev)
mtk_clk_register_cpumuxes(node, infra_muxes, ARRAY_SIZE(infra_muxes),
clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get,
- clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get,
+ clk_data);
if (r)
return r;
@@ -670,7 +670,7 @@ static int mtk_infrasys_init(struct platform_device *pdev)
static int mtk_apmixedsys_init(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
struct device_node *node = pdev->dev.of_node;
clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK);
@@ -683,15 +683,15 @@ static int mtk_apmixedsys_init(struct platform_device *pdev)
mtk_clk_register_gates(node, apmixed_clks,
ARRAY_SIZE(apmixed_clks), clk_data);
- clk_prepare_enable(clk_data->clks[CLK_APMIXED_ARMPLL]);
- clk_prepare_enable(clk_data->clks[CLK_APMIXED_MAIN_CORE_EN]);
+ clk_prepare_enable(clk_data->hws[CLK_APMIXED_ARMPLL]->clk);
+ clk_prepare_enable(clk_data->hws[CLK_APMIXED_MAIN_CORE_EN]->clk);
- return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
}
static int mtk_pericfg_init(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
void __iomem *base;
int r;
struct device_node *node = pdev->dev.of_node;
@@ -708,11 +708,11 @@ static int mtk_pericfg_init(struct platform_device *pdev)
mtk_clk_register_composites(peri_muxes, ARRAY_SIZE(peri_muxes), base,
&mt7622_clk_lock, clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
return r;
- clk_prepare_enable(clk_data->clks[CLK_PERI_UART0_PD]);
+ clk_prepare_enable(clk_data->hws[CLK_PERI_UART0_PD]->clk);
mtk_register_reset_controller(node, 2, 0x0);
diff --git a/drivers/clk/mediatek/clk-mt7629-eth.c b/drivers/clk/mediatek/clk-mt7629-eth.c
index 88279d0ea1a7..c49fd732c9b2 100644
--- a/drivers/clk/mediatek/clk-mt7629-eth.c
+++ b/drivers/clk/mediatek/clk-mt7629-eth.c
@@ -78,7 +78,7 @@ static const struct mtk_gate sgmii_clks[2][4] = {
static int clk_mt7629_ethsys_init(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
struct device_node *node = pdev->dev.of_node;
int r;
@@ -86,7 +86,7 @@ static int clk_mt7629_ethsys_init(struct platform_device *pdev)
mtk_clk_register_gates(node, eth_clks, CLK_ETH_NR_CLK, clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
dev_err(&pdev->dev,
"could not register clock provider: %s: %d\n",
@@ -99,7 +99,7 @@ static int clk_mt7629_ethsys_init(struct platform_device *pdev)
static int clk_mt7629_sgmiisys_init(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
struct device_node *node = pdev->dev.of_node;
static int id;
int r;
@@ -109,7 +109,7 @@ static int clk_mt7629_sgmiisys_init(struct platform_device *pdev)
mtk_clk_register_gates(node, sgmii_clks[id++], CLK_SGMII_NR_CLK,
clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
dev_err(&pdev->dev,
"could not register clock provider: %s: %d\n",
diff --git a/drivers/clk/mediatek/clk-mt7629-hif.c b/drivers/clk/mediatek/clk-mt7629-hif.c
index 5c5b37207afb..acaa97fda331 100644
--- a/drivers/clk/mediatek/clk-mt7629-hif.c
+++ b/drivers/clk/mediatek/clk-mt7629-hif.c
@@ -73,7 +73,7 @@ static const struct mtk_gate pcie_clks[] = {
static int clk_mt7629_ssusbsys_init(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
struct device_node *node = pdev->dev.of_node;
int r;
@@ -82,7 +82,7 @@ static int clk_mt7629_ssusbsys_init(struct platform_device *pdev)
mtk_clk_register_gates(node, ssusb_clks, ARRAY_SIZE(ssusb_clks),
clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
dev_err(&pdev->dev,
"could not register clock provider: %s: %d\n",
@@ -95,7 +95,7 @@ static int clk_mt7629_ssusbsys_init(struct platform_device *pdev)
static int clk_mt7629_pciesys_init(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
struct device_node *node = pdev->dev.of_node;
int r;
@@ -104,7 +104,7 @@ static int clk_mt7629_pciesys_init(struct platform_device *pdev)
mtk_clk_register_gates(node, pcie_clks, ARRAY_SIZE(pcie_clks),
clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
dev_err(&pdev->dev,
"could not register clock provider: %s: %d\n",
diff --git a/drivers/clk/mediatek/clk-mt7629.c b/drivers/clk/mediatek/clk-mt7629.c
index c0e023bf31eb..e4a08c811adc 100644
--- a/drivers/clk/mediatek/clk-mt7629.c
+++ b/drivers/clk/mediatek/clk-mt7629.c
@@ -336,17 +336,17 @@ static const struct mtk_gate_regs peri1_cg_regs = {
};
static const struct mtk_pll_data plls[] = {
- PLL(CLK_APMIXED_ARMPLL, "armpll", 0x0200, 0x020C, 0x00000001,
+ PLL(CLK_APMIXED_ARMPLL, "armpll", 0x0200, 0x020C, 0,
0, 21, 0x0204, 24, 0, 0x0204, 0),
- PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x0210, 0x021C, 0x00000001,
+ PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x0210, 0x021C, 0,
HAVE_RST_BAR, 21, 0x0214, 24, 0, 0x0214, 0),
- PLL(CLK_APMIXED_UNIV2PLL, "univ2pll", 0x0220, 0x022C, 0x00000001,
+ PLL(CLK_APMIXED_UNIV2PLL, "univ2pll", 0x0220, 0x022C, 0,
HAVE_RST_BAR, 7, 0x0224, 24, 0, 0x0224, 14),
- PLL(CLK_APMIXED_ETH1PLL, "eth1pll", 0x0300, 0x0310, 0x00000001,
+ PLL(CLK_APMIXED_ETH1PLL, "eth1pll", 0x0300, 0x0310, 0,
0, 21, 0x0300, 1, 0, 0x0304, 0),
- PLL(CLK_APMIXED_ETH2PLL, "eth2pll", 0x0314, 0x0320, 0x00000001,
+ PLL(CLK_APMIXED_ETH2PLL, "eth2pll", 0x0314, 0x0320, 0,
0, 21, 0x0314, 1, 0, 0x0318, 0),
- PLL(CLK_APMIXED_SGMIPLL, "sgmipll", 0x0358, 0x0368, 0x00000001,
+ PLL(CLK_APMIXED_SGMIPLL, "sgmipll", 0x0358, 0x0368, 0,
0, 21, 0x0358, 1, 0, 0x035C, 0),
};
@@ -572,7 +572,7 @@ static struct mtk_composite peri_muxes[] = {
static int mtk_topckgen_init(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
void __iomem *base;
struct device_node *node = pdev->dev.of_node;
@@ -591,17 +591,17 @@ static int mtk_topckgen_init(struct platform_device *pdev)
mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes),
base, &mt7629_clk_lock, clk_data);
- clk_prepare_enable(clk_data->clks[CLK_TOP_AXI_SEL]);
- clk_prepare_enable(clk_data->clks[CLK_TOP_MEM_SEL]);
- clk_prepare_enable(clk_data->clks[CLK_TOP_DDRPHYCFG_SEL]);
+ clk_prepare_enable(clk_data->hws[CLK_TOP_AXI_SEL]->clk);
+ clk_prepare_enable(clk_data->hws[CLK_TOP_MEM_SEL]->clk);
+ clk_prepare_enable(clk_data->hws[CLK_TOP_DDRPHYCFG_SEL]->clk);
- return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
}
static int mtk_infrasys_init(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
clk_data = mtk_alloc_clk_data(CLK_INFRA_NR_CLK);
@@ -611,13 +611,13 @@ static int mtk_infrasys_init(struct platform_device *pdev)
mtk_clk_register_cpumuxes(node, infra_muxes, ARRAY_SIZE(infra_muxes),
clk_data);
- return of_clk_add_provider(node, of_clk_src_onecell_get,
- clk_data);
+ return of_clk_add_hw_provider(node, of_clk_hw_onecell_get,
+ clk_data);
}
static int mtk_pericfg_init(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
void __iomem *base;
int r;
struct device_node *node = pdev->dev.of_node;
@@ -634,18 +634,18 @@ static int mtk_pericfg_init(struct platform_device *pdev)
mtk_clk_register_composites(peri_muxes, ARRAY_SIZE(peri_muxes), base,
&mt7629_clk_lock, clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
return r;
- clk_prepare_enable(clk_data->clks[CLK_PERI_UART0_PD]);
+ clk_prepare_enable(clk_data->hws[CLK_PERI_UART0_PD]->clk);
return 0;
}
static int mtk_apmixedsys_init(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
struct device_node *node = pdev->dev.of_node;
clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK);
@@ -658,10 +658,10 @@ static int mtk_apmixedsys_init(struct platform_device *pdev)
mtk_clk_register_gates(node, apmixed_clks,
ARRAY_SIZE(apmixed_clks), clk_data);
- clk_prepare_enable(clk_data->clks[CLK_APMIXED_ARMPLL]);
- clk_prepare_enable(clk_data->clks[CLK_APMIXED_MAIN_CORE_EN]);
+ clk_prepare_enable(clk_data->hws[CLK_APMIXED_ARMPLL]->clk);
+ clk_prepare_enable(clk_data->hws[CLK_APMIXED_MAIN_CORE_EN]->clk);
- return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
}
diff --git a/drivers/clk/mediatek/clk-mt7986-apmixed.c b/drivers/clk/mediatek/clk-mt7986-apmixed.c
index 21d4c82e782a..62080ee4dbe3 100644
--- a/drivers/clk/mediatek/clk-mt7986-apmixed.c
+++ b/drivers/clk/mediatek/clk-mt7986-apmixed.c
@@ -42,21 +42,21 @@
"clkxtal")
static const struct mtk_pll_data plls[] = {
- PLL(CLK_APMIXED_ARMPLL, "armpll", 0x0200, 0x020C, 0x00000001, 0, 32,
+ PLL(CLK_APMIXED_ARMPLL, "armpll", 0x0200, 0x020C, 0x0, 0, 32,
0x0200, 4, 0, 0x0204, 0),
- PLL(CLK_APMIXED_NET2PLL, "net2pll", 0x0210, 0x021C, 0x00000001, 0, 32,
+ PLL(CLK_APMIXED_NET2PLL, "net2pll", 0x0210, 0x021C, 0x0, 0, 32,
0x0210, 4, 0, 0x0214, 0),
- PLL(CLK_APMIXED_MMPLL, "mmpll", 0x0220, 0x022C, 0x00000001, 0, 32,
+ PLL(CLK_APMIXED_MMPLL, "mmpll", 0x0220, 0x022C, 0x0, 0, 32,
0x0220, 4, 0, 0x0224, 0),
- PLL(CLK_APMIXED_SGMPLL, "sgmpll", 0x0230, 0x023c, 0x00000001, 0, 32,
+ PLL(CLK_APMIXED_SGMPLL, "sgmpll", 0x0230, 0x023c, 0x0, 0, 32,
0x0230, 4, 0, 0x0234, 0),
- PLL(CLK_APMIXED_WEDMCUPLL, "wedmcupll", 0x0240, 0x024c, 0x00000001, 0,
+ PLL(CLK_APMIXED_WEDMCUPLL, "wedmcupll", 0x0240, 0x024c, 0x0, 0,
32, 0x0240, 4, 0, 0x0244, 0),
- PLL(CLK_APMIXED_NET1PLL, "net1pll", 0x0250, 0x025c, 0x00000001, 0, 32,
+ PLL(CLK_APMIXED_NET1PLL, "net1pll", 0x0250, 0x025c, 0x0, 0, 32,
0x0250, 4, 0, 0x0254, 0),
- PLL(CLK_APMIXED_MPLL, "mpll", 0x0260, 0x0270, 0x00000001, 0, 32, 0x0260,
+ PLL(CLK_APMIXED_MPLL, "mpll", 0x0260, 0x0270, 0x0, 0, 32, 0x0260,
4, 0, 0x0264, 0),
- PLL(CLK_APMIXED_APLL2, "apll2", 0x0278, 0x0288, 0x00000001, 0, 32,
+ PLL(CLK_APMIXED_APLL2, "apll2", 0x0278, 0x0288, 0x0, 0, 32,
0x0278, 4, 0, 0x027c, 0),
};
@@ -67,7 +67,7 @@ static const struct of_device_id of_match_clk_mt7986_apmixed[] = {
static int clk_mt7986_apmixed_probe(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
struct device_node *node = pdev->dev.of_node;
int r;
@@ -77,9 +77,9 @@ static int clk_mt7986_apmixed_probe(struct platform_device *pdev)
mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
- clk_prepare_enable(clk_data->clks[CLK_APMIXED_ARMPLL]);
+ clk_prepare_enable(clk_data->hws[CLK_APMIXED_ARMPLL]->clk);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r) {
pr_err("%s(): could not register clock provider: %d\n",
__func__, r);
diff --git a/drivers/clk/mediatek/clk-mt7986-eth.c b/drivers/clk/mediatek/clk-mt7986-eth.c
index 495d023ccad7..7868c0728e96 100644
--- a/drivers/clk/mediatek/clk-mt7986-eth.c
+++ b/drivers/clk/mediatek/clk-mt7986-eth.c
@@ -79,7 +79,7 @@ static const struct mtk_gate eth_clks[] __initconst = {
static void __init mtk_sgmiisys_0_init(struct device_node *node)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
int r;
clk_data = mtk_alloc_clk_data(ARRAY_SIZE(sgmii0_clks));
@@ -87,7 +87,7 @@ static void __init mtk_sgmiisys_0_init(struct device_node *node)
mtk_clk_register_gates(node, sgmii0_clks, ARRAY_SIZE(sgmii0_clks),
clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
pr_err("%s(): could not register clock provider: %d\n",
__func__, r);
@@ -97,7 +97,7 @@ CLK_OF_DECLARE(mtk_sgmiisys_0, "mediatek,mt7986-sgmiisys_0",
static void __init mtk_sgmiisys_1_init(struct device_node *node)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
int r;
clk_data = mtk_alloc_clk_data(ARRAY_SIZE(sgmii1_clks));
@@ -105,7 +105,7 @@ static void __init mtk_sgmiisys_1_init(struct device_node *node)
mtk_clk_register_gates(node, sgmii1_clks, ARRAY_SIZE(sgmii1_clks),
clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
pr_err("%s(): could not register clock provider: %d\n",
@@ -116,17 +116,17 @@ CLK_OF_DECLARE(mtk_sgmiisys_1, "mediatek,mt7986-sgmiisys_1",
static void __init mtk_ethsys_init(struct device_node *node)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
int r;
clk_data = mtk_alloc_clk_data(ARRAY_SIZE(eth_clks));
mtk_clk_register_gates(node, eth_clks, ARRAY_SIZE(eth_clks), clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
pr_err("%s(): could not register clock provider: %d\n",
__func__, r);
}
-CLK_OF_DECLARE(mtk_ethsys, "mediatek,mt7986-ethsys_ck", mtk_ethsys_init);
+CLK_OF_DECLARE(mtk_ethsys, "mediatek,mt7986-ethsys", mtk_ethsys_init);
diff --git a/drivers/clk/mediatek/clk-mt7986-infracfg.c b/drivers/clk/mediatek/clk-mt7986-infracfg.c
index f209c559fbc3..d90727a53283 100644
--- a/drivers/clk/mediatek/clk-mt7986-infracfg.c
+++ b/drivers/clk/mediatek/clk-mt7986-infracfg.c
@@ -171,7 +171,7 @@ static const struct mtk_gate infra_clks[] = {
static int clk_mt7986_infracfg_probe(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
struct device_node *node = pdev->dev.of_node;
int r;
void __iomem *base;
@@ -195,7 +195,7 @@ static int clk_mt7986_infracfg_probe(struct platform_device *pdev)
mtk_clk_register_gates(node, infra_clks, ARRAY_SIZE(infra_clks),
clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r) {
pr_err("%s(): could not register clock provider: %d\n",
__func__, r);
diff --git a/drivers/clk/mediatek/clk-mt7986-topckgen.c b/drivers/clk/mediatek/clk-mt7986-topckgen.c
index 8f6f79b6e31e..de5121cf2877 100644
--- a/drivers/clk/mediatek/clk-mt7986-topckgen.c
+++ b/drivers/clk/mediatek/clk-mt7986-topckgen.c
@@ -283,7 +283,7 @@ static const struct mtk_mux top_muxes[] = {
static int clk_mt7986_topckgen_probe(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
struct device_node *node = pdev->dev.of_node;
int r;
void __iomem *base;
@@ -306,14 +306,14 @@ static int clk_mt7986_topckgen_probe(struct platform_device *pdev)
mtk_clk_register_muxes(top_muxes, ARRAY_SIZE(top_muxes), node,
&mt7986_clk_lock, clk_data);
- clk_prepare_enable(clk_data->clks[CLK_TOP_SYSAXI_SEL]);
- clk_prepare_enable(clk_data->clks[CLK_TOP_SYSAPB_SEL]);
- clk_prepare_enable(clk_data->clks[CLK_TOP_DRAMC_SEL]);
- clk_prepare_enable(clk_data->clks[CLK_TOP_DRAMC_MD32_SEL]);
- clk_prepare_enable(clk_data->clks[CLK_TOP_F26M_SEL]);
- clk_prepare_enable(clk_data->clks[CLK_TOP_SGM_REG_SEL]);
+ clk_prepare_enable(clk_data->hws[CLK_TOP_SYSAXI_SEL]->clk);
+ clk_prepare_enable(clk_data->hws[CLK_TOP_SYSAPB_SEL]->clk);
+ clk_prepare_enable(clk_data->hws[CLK_TOP_DRAMC_SEL]->clk);
+ clk_prepare_enable(clk_data->hws[CLK_TOP_DRAMC_MD32_SEL]->clk);
+ clk_prepare_enable(clk_data->hws[CLK_TOP_F26M_SEL]->clk);
+ clk_prepare_enable(clk_data->hws[CLK_TOP_SGM_REG_SEL]->clk);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r) {
pr_err("%s(): could not register clock provider: %d\n",
diff --git a/drivers/clk/mediatek/clk-mt8135.c b/drivers/clk/mediatek/clk-mt8135.c
index 09ad272d51f1..9ef524b44862 100644
--- a/drivers/clk/mediatek/clk-mt8135.c
+++ b/drivers/clk/mediatek/clk-mt8135.c
@@ -516,7 +516,7 @@ static const struct mtk_composite peri_clks[] __initconst = {
static void __init mtk_topckgen_init(struct device_node *node)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
void __iomem *base;
int r;
@@ -533,9 +533,9 @@ static void __init mtk_topckgen_init(struct device_node *node)
mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes), base,
&mt8135_clk_lock, clk_data);
- clk_prepare_enable(clk_data->clks[CLK_TOP_CCI_SEL]);
+ clk_prepare_enable(clk_data->hws[CLK_TOP_CCI_SEL]->clk);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
pr_err("%s(): could not register clock provider: %d\n",
__func__, r);
@@ -544,7 +544,7 @@ CLK_OF_DECLARE(mtk_topckgen, "mediatek,mt8135-topckgen", mtk_topckgen_init);
static void __init mtk_infrasys_init(struct device_node *node)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
int r;
clk_data = mtk_alloc_clk_data(CLK_INFRA_NR_CLK);
@@ -552,9 +552,9 @@ static void __init mtk_infrasys_init(struct device_node *node)
mtk_clk_register_gates(node, infra_clks, ARRAY_SIZE(infra_clks),
clk_data);
- clk_prepare_enable(clk_data->clks[CLK_INFRA_M4U]);
+ clk_prepare_enable(clk_data->hws[CLK_INFRA_M4U]->clk);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
pr_err("%s(): could not register clock provider: %d\n",
__func__, r);
@@ -565,7 +565,7 @@ CLK_OF_DECLARE(mtk_infrasys, "mediatek,mt8135-infracfg", mtk_infrasys_init);
static void __init mtk_pericfg_init(struct device_node *node)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
int r;
void __iomem *base;
@@ -582,7 +582,7 @@ static void __init mtk_pericfg_init(struct device_node *node)
mtk_clk_register_composites(peri_clks, ARRAY_SIZE(peri_clks), base,
&mt8135_clk_lock, clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
pr_err("%s(): could not register clock provider: %d\n",
__func__, r);
@@ -612,21 +612,21 @@ CLK_OF_DECLARE(mtk_pericfg, "mediatek,mt8135-pericfg", mtk_pericfg_init);
}
static const struct mtk_pll_data plls[] = {
- PLL(CLK_APMIXED_ARMPLL1, "armpll1", 0x200, 0x218, 0x80000001, 0, 21, 0x204, 24, 0x0, 0x204, 0),
- PLL(CLK_APMIXED_ARMPLL2, "armpll2", 0x2cc, 0x2e4, 0x80000001, 0, 21, 0x2d0, 24, 0x0, 0x2d0, 0),
- PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x21c, 0x234, 0xf0000001, HAVE_RST_BAR, 21, 0x21c, 6, 0x0, 0x220, 0),
- PLL(CLK_APMIXED_UNIVPLL, "univpll", 0x238, 0x250, 0xf3000001, HAVE_RST_BAR, 7, 0x238, 6, 0x0, 0x238, 9),
- PLL(CLK_APMIXED_MMPLL, "mmpll", 0x254, 0x26c, 0xf0000001, HAVE_RST_BAR, 21, 0x254, 6, 0x0, 0x258, 0),
- PLL(CLK_APMIXED_MSDCPLL, "msdcpll", 0x278, 0x290, 0x80000001, 0, 21, 0x278, 6, 0x0, 0x27c, 0),
- PLL(CLK_APMIXED_TVDPLL, "tvdpll", 0x294, 0x2ac, 0x80000001, 0, 31, 0x294, 6, 0x0, 0x298, 0),
- PLL(CLK_APMIXED_LVDSPLL, "lvdspll", 0x2b0, 0x2c8, 0x80000001, 0, 21, 0x2b0, 6, 0x0, 0x2b4, 0),
- PLL(CLK_APMIXED_AUDPLL, "audpll", 0x2e8, 0x300, 0x80000001, 0, 31, 0x2e8, 6, 0x2f8, 0x2ec, 0),
- PLL(CLK_APMIXED_VDECPLL, "vdecpll", 0x304, 0x31c, 0x80000001, 0, 21, 0x2b0, 6, 0x0, 0x308, 0),
+ PLL(CLK_APMIXED_ARMPLL1, "armpll1", 0x200, 0x218, 0x80000000, 0, 21, 0x204, 24, 0x0, 0x204, 0),
+ PLL(CLK_APMIXED_ARMPLL2, "armpll2", 0x2cc, 0x2e4, 0x80000000, 0, 21, 0x2d0, 24, 0x0, 0x2d0, 0),
+ PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x21c, 0x234, 0xf0000000, HAVE_RST_BAR, 21, 0x21c, 6, 0x0, 0x220, 0),
+ PLL(CLK_APMIXED_UNIVPLL, "univpll", 0x238, 0x250, 0xf3000000, HAVE_RST_BAR, 7, 0x238, 6, 0x0, 0x238, 9),
+ PLL(CLK_APMIXED_MMPLL, "mmpll", 0x254, 0x26c, 0xf0000000, HAVE_RST_BAR, 21, 0x254, 6, 0x0, 0x258, 0),
+ PLL(CLK_APMIXED_MSDCPLL, "msdcpll", 0x278, 0x290, 0x80000000, 0, 21, 0x278, 6, 0x0, 0x27c, 0),
+ PLL(CLK_APMIXED_TVDPLL, "tvdpll", 0x294, 0x2ac, 0x80000000, 0, 31, 0x294, 6, 0x0, 0x298, 0),
+ PLL(CLK_APMIXED_LVDSPLL, "lvdspll", 0x2b0, 0x2c8, 0x80000000, 0, 21, 0x2b0, 6, 0x0, 0x2b4, 0),
+ PLL(CLK_APMIXED_AUDPLL, "audpll", 0x2e8, 0x300, 0x80000000, 0, 31, 0x2e8, 6, 0x2f8, 0x2ec, 0),
+ PLL(CLK_APMIXED_VDECPLL, "vdecpll", 0x304, 0x31c, 0x80000000, 0, 21, 0x2b0, 6, 0x0, 0x308, 0),
};
static void __init mtk_apmixedsys_init(struct device_node *node)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK);
if (!clk_data)
diff --git a/drivers/clk/mediatek/clk-mt8167-aud.c b/drivers/clk/mediatek/clk-mt8167-aud.c
index 3f7bf6485792..ce1ae8d243c3 100644
--- a/drivers/clk/mediatek/clk-mt8167-aud.c
+++ b/drivers/clk/mediatek/clk-mt8167-aud.c
@@ -50,14 +50,14 @@ static const struct mtk_gate aud_clks[] __initconst = {
static void __init mtk_audsys_init(struct device_node *node)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
int r;
clk_data = mtk_alloc_clk_data(CLK_AUD_NR_CLK);
mtk_clk_register_gates(node, aud_clks, ARRAY_SIZE(aud_clks), clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
pr_err("%s(): could not register clock provider: %d\n",
__func__, r);
diff --git a/drivers/clk/mediatek/clk-mt8167-img.c b/drivers/clk/mediatek/clk-mt8167-img.c
index 3b4ec9eae432..e359e563d2b7 100644
--- a/drivers/clk/mediatek/clk-mt8167-img.c
+++ b/drivers/clk/mediatek/clk-mt8167-img.c
@@ -43,14 +43,14 @@ static const struct mtk_gate img_clks[] __initconst = {
static void __init mtk_imgsys_init(struct device_node *node)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
int r;
clk_data = mtk_alloc_clk_data(CLK_IMG_NR_CLK);
mtk_clk_register_gates(node, img_clks, ARRAY_SIZE(img_clks), clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
pr_err("%s(): could not register clock provider: %d\n",
diff --git a/drivers/clk/mediatek/clk-mt8167-mfgcfg.c b/drivers/clk/mediatek/clk-mt8167-mfgcfg.c
index 90b871730f2d..4fd82fe87d6e 100644
--- a/drivers/clk/mediatek/clk-mt8167-mfgcfg.c
+++ b/drivers/clk/mediatek/clk-mt8167-mfgcfg.c
@@ -41,14 +41,14 @@ static const struct mtk_gate mfg_clks[] __initconst = {
static void __init mtk_mfgcfg_init(struct device_node *node)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
int r;
clk_data = mtk_alloc_clk_data(CLK_MFG_NR_CLK);
mtk_clk_register_gates(node, mfg_clks, ARRAY_SIZE(mfg_clks), clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
pr_err("%s(): could not register clock provider: %d\n",
diff --git a/drivers/clk/mediatek/clk-mt8167-mm.c b/drivers/clk/mediatek/clk-mt8167-mm.c
index 963b129aade1..73910060577f 100644
--- a/drivers/clk/mediatek/clk-mt8167-mm.c
+++ b/drivers/clk/mediatek/clk-mt8167-mm.c
@@ -101,7 +101,7 @@ static int clk_mt8167_mm_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *node = dev->parent->of_node;
const struct clk_mt8167_mm_driver_data *data;
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
int ret;
clk_data = mtk_alloc_clk_data(CLK_MM_NR_CLK);
@@ -115,7 +115,7 @@ static int clk_mt8167_mm_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (ret)
return ret;
diff --git a/drivers/clk/mediatek/clk-mt8167-vdec.c b/drivers/clk/mediatek/clk-mt8167-vdec.c
index 910b28355ec0..ee4fffb6859d 100644
--- a/drivers/clk/mediatek/clk-mt8167-vdec.c
+++ b/drivers/clk/mediatek/clk-mt8167-vdec.c
@@ -56,14 +56,14 @@ static const struct mtk_gate vdec_clks[] __initconst = {
static void __init mtk_vdecsys_init(struct device_node *node)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
int r;
clk_data = mtk_alloc_clk_data(CLK_VDEC_NR_CLK);
mtk_clk_register_gates(node, vdec_clks, ARRAY_SIZE(vdec_clks), clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
pr_err("%s(): could not register clock provider: %d\n",
diff --git a/drivers/clk/mediatek/clk-mt8167.c b/drivers/clk/mediatek/clk-mt8167.c
index 812b33a57530..f900ac4bf7b8 100644
--- a/drivers/clk/mediatek/clk-mt8167.c
+++ b/drivers/clk/mediatek/clk-mt8167.c
@@ -923,7 +923,7 @@ static const struct mtk_gate top_clks[] __initconst = {
static void __init mtk_topckgen_init(struct device_node *node)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
int r;
void __iomem *base;
@@ -945,7 +945,7 @@ static void __init mtk_topckgen_init(struct device_node *node)
mtk_clk_register_dividers(top_adj_divs, ARRAY_SIZE(top_adj_divs),
base, &mt8167_clk_lock, clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
pr_err("%s(): could not register clock provider: %d\n",
__func__, r);
@@ -954,7 +954,7 @@ CLK_OF_DECLARE(mtk_topckgen, "mediatek,mt8167-topckgen", mtk_topckgen_init);
static void __init mtk_infracfg_init(struct device_node *node)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
int r;
void __iomem *base;
@@ -969,7 +969,7 @@ static void __init mtk_infracfg_init(struct device_node *node)
mtk_clk_register_composites(ifr_muxes, ARRAY_SIZE(ifr_muxes), base,
&mt8167_clk_lock, clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
pr_err("%s(): could not register clock provider: %d\n",
__func__, r);
@@ -1017,27 +1017,27 @@ static const struct mtk_pll_div_table mmpll_div_table[] = {
};
static const struct mtk_pll_data plls[] = {
- PLL(CLK_APMIXED_ARMPLL, "armpll", 0x0100, 0x0110, 0x00000001, 0,
+ PLL(CLK_APMIXED_ARMPLL, "armpll", 0x0100, 0x0110, 0, 0,
21, 0x0104, 24, 0, 0x0104, 0),
- PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x0120, 0x0130, 0x00000001,
+ PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x0120, 0x0130, 0,
HAVE_RST_BAR, 21, 0x0124, 24, 0, 0x0124, 0),
- PLL(CLK_APMIXED_UNIVPLL, "univpll", 0x0140, 0x0150, 0x30000001,
+ PLL(CLK_APMIXED_UNIVPLL, "univpll", 0x0140, 0x0150, 0x30000000,
HAVE_RST_BAR, 7, 0x0144, 24, 0, 0x0144, 0),
- PLL_B(CLK_APMIXED_MMPLL, "mmpll", 0x0160, 0x0170, 0x00000001, 0,
+ PLL_B(CLK_APMIXED_MMPLL, "mmpll", 0x0160, 0x0170, 0, 0,
21, 0x0164, 24, 0, 0x0164, 0, mmpll_div_table),
- PLL(CLK_APMIXED_APLL1, "apll1", 0x0180, 0x0190, 0x00000001, 0,
+ PLL(CLK_APMIXED_APLL1, "apll1", 0x0180, 0x0190, 0, 0,
31, 0x0180, 1, 0x0194, 0x0184, 0),
- PLL(CLK_APMIXED_APLL2, "apll2", 0x01A0, 0x01B0, 0x00000001, 0,
+ PLL(CLK_APMIXED_APLL2, "apll2", 0x01A0, 0x01B0, 0, 0,
31, 0x01A0, 1, 0x01B4, 0x01A4, 0),
- PLL(CLK_APMIXED_TVDPLL, "tvdpll", 0x01C0, 0x01D0, 0x00000001, 0,
+ PLL(CLK_APMIXED_TVDPLL, "tvdpll", 0x01C0, 0x01D0, 0, 0,
21, 0x01C4, 24, 0, 0x01C4, 0),
- PLL(CLK_APMIXED_LVDSPLL, "lvdspll", 0x01E0, 0x01F0, 0x00000001, 0,
+ PLL(CLK_APMIXED_LVDSPLL, "lvdspll", 0x01E0, 0x01F0, 0, 0,
21, 0x01E4, 24, 0, 0x01E4, 0),
};
static void __init mtk_apmixedsys_init(struct device_node *node)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
void __iomem *base;
int r;
@@ -1053,7 +1053,7 @@ static void __init mtk_apmixedsys_init(struct device_node *node)
mtk_clk_register_dividers(apmixed_adj_divs, ARRAY_SIZE(apmixed_adj_divs),
base, &mt8167_clk_lock, clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
pr_err("%s(): could not register clock provider: %d\n",
__func__, r);
diff --git a/drivers/clk/mediatek/clk-mt8173-mm.c b/drivers/clk/mediatek/clk-mt8173-mm.c
index 36fa20be77b6..8abf42c2030c 100644
--- a/drivers/clk/mediatek/clk-mt8173-mm.c
+++ b/drivers/clk/mediatek/clk-mt8173-mm.c
@@ -115,7 +115,7 @@ static int clk_mt8173_mm_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *node = dev->parent->of_node;
const struct clk_mt8173_mm_driver_data *data;
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
int ret;
clk_data = mtk_alloc_clk_data(CLK_MM_NR_CLK);
@@ -129,7 +129,7 @@ static int clk_mt8173_mm_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (ret)
return ret;
diff --git a/drivers/clk/mediatek/clk-mt8173.c b/drivers/clk/mediatek/clk-mt8173.c
index 46b7655feeaa..0929db330852 100644
--- a/drivers/clk/mediatek/clk-mt8173.c
+++ b/drivers/clk/mediatek/clk-mt8173.c
@@ -819,25 +819,25 @@ static const struct mtk_gate venclt_clks[] __initconst = {
GATE_VENCLT(CLK_VENCLT_CKE1, "venclt_cke1", "venclt_sel", 4),
};
-static struct clk_onecell_data *mt8173_top_clk_data __initdata;
-static struct clk_onecell_data *mt8173_pll_clk_data __initdata;
+static struct clk_hw_onecell_data *mt8173_top_clk_data __initdata;
+static struct clk_hw_onecell_data *mt8173_pll_clk_data __initdata;
static void __init mtk_clk_enable_critical(void)
{
if (!mt8173_top_clk_data || !mt8173_pll_clk_data)
return;
- clk_prepare_enable(mt8173_pll_clk_data->clks[CLK_APMIXED_ARMCA15PLL]);
- clk_prepare_enable(mt8173_pll_clk_data->clks[CLK_APMIXED_ARMCA7PLL]);
- clk_prepare_enable(mt8173_top_clk_data->clks[CLK_TOP_MEM_SEL]);
- clk_prepare_enable(mt8173_top_clk_data->clks[CLK_TOP_DDRPHYCFG_SEL]);
- clk_prepare_enable(mt8173_top_clk_data->clks[CLK_TOP_CCI400_SEL]);
- clk_prepare_enable(mt8173_top_clk_data->clks[CLK_TOP_RTC_SEL]);
+ clk_prepare_enable(mt8173_pll_clk_data->hws[CLK_APMIXED_ARMCA15PLL]->clk);
+ clk_prepare_enable(mt8173_pll_clk_data->hws[CLK_APMIXED_ARMCA7PLL]->clk);
+ clk_prepare_enable(mt8173_top_clk_data->hws[CLK_TOP_MEM_SEL]->clk);
+ clk_prepare_enable(mt8173_top_clk_data->hws[CLK_TOP_DDRPHYCFG_SEL]->clk);
+ clk_prepare_enable(mt8173_top_clk_data->hws[CLK_TOP_CCI400_SEL]->clk);
+ clk_prepare_enable(mt8173_top_clk_data->hws[CLK_TOP_RTC_SEL]->clk);
}
static void __init mtk_topckgen_init(struct device_node *node)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
void __iomem *base;
int r;
@@ -854,7 +854,7 @@ static void __init mtk_topckgen_init(struct device_node *node)
mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes), base,
&mt8173_clk_lock, clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
pr_err("%s(): could not register clock provider: %d\n",
__func__, r);
@@ -865,7 +865,7 @@ CLK_OF_DECLARE(mtk_topckgen, "mediatek,mt8173-topckgen", mtk_topckgen_init);
static void __init mtk_infrasys_init(struct device_node *node)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
int r;
clk_data = mtk_alloc_clk_data(CLK_INFRA_NR_CLK);
@@ -877,7 +877,7 @@ static void __init mtk_infrasys_init(struct device_node *node)
mtk_clk_register_cpumuxes(node, cpu_muxes, ARRAY_SIZE(cpu_muxes),
clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
pr_err("%s(): could not register clock provider: %d\n",
__func__, r);
@@ -888,7 +888,7 @@ CLK_OF_DECLARE(mtk_infrasys, "mediatek,mt8173-infracfg", mtk_infrasys_init);
static void __init mtk_pericfg_init(struct device_node *node)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
int r;
void __iomem *base;
@@ -905,7 +905,7 @@ static void __init mtk_pericfg_init(struct device_node *node)
mtk_clk_register_composites(peri_clks, ARRAY_SIZE(peri_clks), base,
&mt8173_clk_lock, clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
pr_err("%s(): could not register clock provider: %d\n",
__func__, r);
@@ -973,27 +973,27 @@ static const struct mtk_pll_div_table mmpll_div_table[] = {
};
static const struct mtk_pll_data plls[] = {
- PLL(CLK_APMIXED_ARMCA15PLL, "armca15pll", 0x200, 0x20c, 0x00000001, 0, 21, 0x204, 24, 0x0, 0x204, 0),
- PLL(CLK_APMIXED_ARMCA7PLL, "armca7pll", 0x210, 0x21c, 0x00000001, 0, 21, 0x214, 24, 0x0, 0x214, 0),
- PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x220, 0x22c, 0xf0000101, HAVE_RST_BAR, 21, 0x220, 4, 0x0, 0x224, 0),
- PLL(CLK_APMIXED_UNIVPLL, "univpll", 0x230, 0x23c, 0xfe000001, HAVE_RST_BAR, 7, 0x230, 4, 0x0, 0x234, 14),
- PLL_B(CLK_APMIXED_MMPLL, "mmpll", 0x240, 0x24c, 0x00000001, 0, 21, 0x244, 24, 0x0, 0x244, 0, mmpll_div_table),
- PLL(CLK_APMIXED_MSDCPLL, "msdcpll", 0x250, 0x25c, 0x00000001, 0, 21, 0x250, 4, 0x0, 0x254, 0),
- PLL(CLK_APMIXED_VENCPLL, "vencpll", 0x260, 0x26c, 0x00000001, 0, 21, 0x260, 4, 0x0, 0x264, 0),
- PLL(CLK_APMIXED_TVDPLL, "tvdpll", 0x270, 0x27c, 0x00000001, 0, 21, 0x270, 4, 0x0, 0x274, 0),
- PLL(CLK_APMIXED_MPLL, "mpll", 0x280, 0x28c, 0x00000001, 0, 21, 0x280, 4, 0x0, 0x284, 0),
- PLL(CLK_APMIXED_VCODECPLL, "vcodecpll", 0x290, 0x29c, 0x00000001, 0, 21, 0x290, 4, 0x0, 0x294, 0),
- PLL(CLK_APMIXED_APLL1, "apll1", 0x2a0, 0x2b0, 0x00000001, 0, 31, 0x2a0, 4, 0x2a4, 0x2a4, 0),
- PLL(CLK_APMIXED_APLL2, "apll2", 0x2b4, 0x2c4, 0x00000001, 0, 31, 0x2b4, 4, 0x2b8, 0x2b8, 0),
- PLL(CLK_APMIXED_LVDSPLL, "lvdspll", 0x2d0, 0x2dc, 0x00000001, 0, 21, 0x2d0, 4, 0x0, 0x2d4, 0),
- PLL(CLK_APMIXED_MSDCPLL2, "msdcpll2", 0x2f0, 0x2fc, 0x00000001, 0, 21, 0x2f0, 4, 0x0, 0x2f4, 0),
+ PLL(CLK_APMIXED_ARMCA15PLL, "armca15pll", 0x200, 0x20c, 0, 0, 21, 0x204, 24, 0x0, 0x204, 0),
+ PLL(CLK_APMIXED_ARMCA7PLL, "armca7pll", 0x210, 0x21c, 0, 0, 21, 0x214, 24, 0x0, 0x214, 0),
+ PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x220, 0x22c, 0xf0000100, HAVE_RST_BAR, 21, 0x220, 4, 0x0, 0x224, 0),
+ PLL(CLK_APMIXED_UNIVPLL, "univpll", 0x230, 0x23c, 0xfe000000, HAVE_RST_BAR, 7, 0x230, 4, 0x0, 0x234, 14),
+ PLL_B(CLK_APMIXED_MMPLL, "mmpll", 0x240, 0x24c, 0, 0, 21, 0x244, 24, 0x0, 0x244, 0, mmpll_div_table),
+ PLL(CLK_APMIXED_MSDCPLL, "msdcpll", 0x250, 0x25c, 0, 0, 21, 0x250, 4, 0x0, 0x254, 0),
+ PLL(CLK_APMIXED_VENCPLL, "vencpll", 0x260, 0x26c, 0, 0, 21, 0x260, 4, 0x0, 0x264, 0),
+ PLL(CLK_APMIXED_TVDPLL, "tvdpll", 0x270, 0x27c, 0, 0, 21, 0x270, 4, 0x0, 0x274, 0),
+ PLL(CLK_APMIXED_MPLL, "mpll", 0x280, 0x28c, 0, 0, 21, 0x280, 4, 0x0, 0x284, 0),
+ PLL(CLK_APMIXED_VCODECPLL, "vcodecpll", 0x290, 0x29c, 0, 0, 21, 0x290, 4, 0x0, 0x294, 0),
+ PLL(CLK_APMIXED_APLL1, "apll1", 0x2a0, 0x2b0, 0, 0, 31, 0x2a0, 4, 0x2a4, 0x2a4, 0),
+ PLL(CLK_APMIXED_APLL2, "apll2", 0x2b4, 0x2c4, 0, 0, 31, 0x2b4, 4, 0x2b8, 0x2b8, 0),
+ PLL(CLK_APMIXED_LVDSPLL, "lvdspll", 0x2d0, 0x2dc, 0, 0, 21, 0x2d0, 4, 0x0, 0x2d4, 0),
+ PLL(CLK_APMIXED_MSDCPLL2, "msdcpll2", 0x2f0, 0x2fc, 0, 0, 21, 0x2f0, 4, 0x0, 0x2f4, 0),
};
static void __init mtk_apmixedsys_init(struct device_node *node)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
void __iomem *base;
- struct clk *clk;
+ struct clk_hw *hw;
int r, i;
base = of_iomap(node, 0);
@@ -1013,24 +1013,21 @@ static void __init mtk_apmixedsys_init(struct device_node *node)
for (i = 0; i < ARRAY_SIZE(apmixed_usb); i++) {
const struct mtk_clk_usb *cku = &apmixed_usb[i];
- clk = mtk_clk_register_ref2usb_tx(cku->name, cku->parent,
- base + cku->reg_ofs);
-
- if (IS_ERR(clk)) {
- pr_err("Failed to register clk %s: %ld\n", cku->name,
- PTR_ERR(clk));
+ hw = mtk_clk_register_ref2usb_tx(cku->name, cku->parent, base + cku->reg_ofs);
+ if (IS_ERR(hw)) {
+ pr_err("Failed to register clk %s: %ld\n", cku->name, PTR_ERR(hw));
continue;
}
- clk_data->clks[cku->id] = clk;
+ clk_data->hws[cku->id] = hw;
}
- clk = clk_register_divider(NULL, "hdmi_ref", "tvdpll_594m", 0,
- base + 0x40, 16, 3, CLK_DIVIDER_POWER_OF_TWO,
- NULL);
- clk_data->clks[CLK_APMIXED_HDMI_REF] = clk;
+ hw = clk_hw_register_divider(NULL, "hdmi_ref", "tvdpll_594m", 0,
+ base + 0x40, 16, 3, CLK_DIVIDER_POWER_OF_TWO,
+ NULL);
+ clk_data->hws[CLK_APMIXED_HDMI_REF] = hw;
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
pr_err("%s(): could not register clock provider: %d\n",
__func__, r);
@@ -1042,7 +1039,7 @@ CLK_OF_DECLARE(mtk_apmixedsys, "mediatek,mt8173-apmixedsys",
static void __init mtk_imgsys_init(struct device_node *node)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
int r;
clk_data = mtk_alloc_clk_data(CLK_IMG_NR_CLK);
@@ -1050,7 +1047,7 @@ static void __init mtk_imgsys_init(struct device_node *node)
mtk_clk_register_gates(node, img_clks, ARRAY_SIZE(img_clks),
clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
pr_err("%s(): could not register clock provider: %d\n",
@@ -1060,7 +1057,7 @@ CLK_OF_DECLARE(mtk_imgsys, "mediatek,mt8173-imgsys", mtk_imgsys_init);
static void __init mtk_vdecsys_init(struct device_node *node)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
int r;
clk_data = mtk_alloc_clk_data(CLK_VDEC_NR_CLK);
@@ -1068,7 +1065,7 @@ static void __init mtk_vdecsys_init(struct device_node *node)
mtk_clk_register_gates(node, vdec_clks, ARRAY_SIZE(vdec_clks),
clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
pr_err("%s(): could not register clock provider: %d\n",
__func__, r);
@@ -1077,7 +1074,7 @@ CLK_OF_DECLARE(mtk_vdecsys, "mediatek,mt8173-vdecsys", mtk_vdecsys_init);
static void __init mtk_vencsys_init(struct device_node *node)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
int r;
clk_data = mtk_alloc_clk_data(CLK_VENC_NR_CLK);
@@ -1085,7 +1082,7 @@ static void __init mtk_vencsys_init(struct device_node *node)
mtk_clk_register_gates(node, venc_clks, ARRAY_SIZE(venc_clks),
clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
pr_err("%s(): could not register clock provider: %d\n",
__func__, r);
@@ -1094,7 +1091,7 @@ CLK_OF_DECLARE(mtk_vencsys, "mediatek,mt8173-vencsys", mtk_vencsys_init);
static void __init mtk_vencltsys_init(struct device_node *node)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
int r;
clk_data = mtk_alloc_clk_data(CLK_VENCLT_NR_CLK);
@@ -1102,7 +1099,7 @@ static void __init mtk_vencltsys_init(struct device_node *node)
mtk_clk_register_gates(node, venclt_clks, ARRAY_SIZE(venclt_clks),
clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
pr_err("%s(): could not register clock provider: %d\n",
__func__, r);
diff --git a/drivers/clk/mediatek/clk-mt8183-audio.c b/drivers/clk/mediatek/clk-mt8183-audio.c
index c87450180b7b..b2d7746eddbe 100644
--- a/drivers/clk/mediatek/clk-mt8183-audio.c
+++ b/drivers/clk/mediatek/clk-mt8183-audio.c
@@ -69,7 +69,7 @@ static const struct mtk_gate audio_clks[] = {
static int clk_mt8183_audio_probe(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
int r;
struct device_node *node = pdev->dev.of_node;
@@ -78,7 +78,7 @@ static int clk_mt8183_audio_probe(struct platform_device *pdev)
mtk_clk_register_gates(node, audio_clks, ARRAY_SIZE(audio_clks),
clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
return r;
diff --git a/drivers/clk/mediatek/clk-mt8183-cam.c b/drivers/clk/mediatek/clk-mt8183-cam.c
index 8643802c4471..fcc598a45165 100644
--- a/drivers/clk/mediatek/clk-mt8183-cam.c
+++ b/drivers/clk/mediatek/clk-mt8183-cam.c
@@ -36,7 +36,7 @@ static const struct mtk_gate cam_clks[] = {
static int clk_mt8183_cam_probe(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
struct device_node *node = pdev->dev.of_node;
clk_data = mtk_alloc_clk_data(CLK_CAM_NR_CLK);
@@ -44,7 +44,7 @@ static int clk_mt8183_cam_probe(struct platform_device *pdev)
mtk_clk_register_gates(node, cam_clks, ARRAY_SIZE(cam_clks),
clk_data);
- return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
}
static const struct of_device_id of_match_clk_mt8183_cam[] = {
diff --git a/drivers/clk/mediatek/clk-mt8183-img.c b/drivers/clk/mediatek/clk-mt8183-img.c
index 470d676a4a10..eb2def2cf0ae 100644
--- a/drivers/clk/mediatek/clk-mt8183-img.c
+++ b/drivers/clk/mediatek/clk-mt8183-img.c
@@ -36,7 +36,7 @@ static const struct mtk_gate img_clks[] = {
static int clk_mt8183_img_probe(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
struct device_node *node = pdev->dev.of_node;
clk_data = mtk_alloc_clk_data(CLK_IMG_NR_CLK);
@@ -44,7 +44,7 @@ static int clk_mt8183_img_probe(struct platform_device *pdev)
mtk_clk_register_gates(node, img_clks, ARRAY_SIZE(img_clks),
clk_data);
- return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
}
static const struct of_device_id of_match_clk_mt8183_img[] = {
diff --git a/drivers/clk/mediatek/clk-mt8183-ipu0.c b/drivers/clk/mediatek/clk-mt8183-ipu0.c
index c5cb76fc9e5e..b30fc9f47518 100644
--- a/drivers/clk/mediatek/clk-mt8183-ipu0.c
+++ b/drivers/clk/mediatek/clk-mt8183-ipu0.c
@@ -29,7 +29,7 @@ static const struct mtk_gate ipu_core0_clks[] = {
static int clk_mt8183_ipu_core0_probe(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
struct device_node *node = pdev->dev.of_node;
clk_data = mtk_alloc_clk_data(CLK_IPU_CORE0_NR_CLK);
@@ -37,7 +37,7 @@ static int clk_mt8183_ipu_core0_probe(struct platform_device *pdev)
mtk_clk_register_gates(node, ipu_core0_clks, ARRAY_SIZE(ipu_core0_clks),
clk_data);
- return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
}
static const struct of_device_id of_match_clk_mt8183_ipu_core0[] = {
diff --git a/drivers/clk/mediatek/clk-mt8183-ipu1.c b/drivers/clk/mediatek/clk-mt8183-ipu1.c
index 8fd5fe002890..b378957e11d0 100644
--- a/drivers/clk/mediatek/clk-mt8183-ipu1.c
+++ b/drivers/clk/mediatek/clk-mt8183-ipu1.c
@@ -29,7 +29,7 @@ static const struct mtk_gate ipu_core1_clks[] = {
static int clk_mt8183_ipu_core1_probe(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
struct device_node *node = pdev->dev.of_node;
clk_data = mtk_alloc_clk_data(CLK_IPU_CORE1_NR_CLK);
@@ -37,7 +37,7 @@ static int clk_mt8183_ipu_core1_probe(struct platform_device *pdev)
mtk_clk_register_gates(node, ipu_core1_clks, ARRAY_SIZE(ipu_core1_clks),
clk_data);
- return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
}
static const struct of_device_id of_match_clk_mt8183_ipu_core1[] = {
diff --git a/drivers/clk/mediatek/clk-mt8183-ipu_adl.c b/drivers/clk/mediatek/clk-mt8183-ipu_adl.c
index 3f37d0ef1df1..941b43ac8bec 100644
--- a/drivers/clk/mediatek/clk-mt8183-ipu_adl.c
+++ b/drivers/clk/mediatek/clk-mt8183-ipu_adl.c
@@ -27,7 +27,7 @@ static const struct mtk_gate ipu_adl_clks[] = {
static int clk_mt8183_ipu_adl_probe(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
struct device_node *node = pdev->dev.of_node;
clk_data = mtk_alloc_clk_data(CLK_IPU_ADL_NR_CLK);
@@ -35,7 +35,7 @@ static int clk_mt8183_ipu_adl_probe(struct platform_device *pdev)
mtk_clk_register_gates(node, ipu_adl_clks, ARRAY_SIZE(ipu_adl_clks),
clk_data);
- return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
}
static const struct of_device_id of_match_clk_mt8183_ipu_adl[] = {
diff --git a/drivers/clk/mediatek/clk-mt8183-ipu_conn.c b/drivers/clk/mediatek/clk-mt8183-ipu_conn.c
index 7e0eef79c461..ae82c2e17110 100644
--- a/drivers/clk/mediatek/clk-mt8183-ipu_conn.c
+++ b/drivers/clk/mediatek/clk-mt8183-ipu_conn.c
@@ -96,7 +96,7 @@ static const struct mtk_gate ipu_conn_clks[] = {
static int clk_mt8183_ipu_conn_probe(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
struct device_node *node = pdev->dev.of_node;
clk_data = mtk_alloc_clk_data(CLK_IPU_CONN_NR_CLK);
@@ -104,7 +104,7 @@ static int clk_mt8183_ipu_conn_probe(struct platform_device *pdev)
mtk_clk_register_gates(node, ipu_conn_clks, ARRAY_SIZE(ipu_conn_clks),
clk_data);
- return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
}
static const struct of_device_id of_match_clk_mt8183_ipu_conn[] = {
diff --git a/drivers/clk/mediatek/clk-mt8183-mfgcfg.c b/drivers/clk/mediatek/clk-mt8183-mfgcfg.c
index 37b4162c5882..d774edaf760b 100644
--- a/drivers/clk/mediatek/clk-mt8183-mfgcfg.c
+++ b/drivers/clk/mediatek/clk-mt8183-mfgcfg.c
@@ -28,7 +28,7 @@ static const struct mtk_gate mfg_clks[] = {
static int clk_mt8183_mfg_probe(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
struct device_node *node = pdev->dev.of_node;
pm_runtime_enable(&pdev->dev);
@@ -38,7 +38,7 @@ static int clk_mt8183_mfg_probe(struct platform_device *pdev)
mtk_clk_register_gates_with_dev(node, mfg_clks, ARRAY_SIZE(mfg_clks),
clk_data, &pdev->dev);
- return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
}
static const struct of_device_id of_match_clk_mt8183_mfg[] = {
diff --git a/drivers/clk/mediatek/clk-mt8183-mm.c b/drivers/clk/mediatek/clk-mt8183-mm.c
index 9d60e09619c1..11ecc6fb0065 100644
--- a/drivers/clk/mediatek/clk-mt8183-mm.c
+++ b/drivers/clk/mediatek/clk-mt8183-mm.c
@@ -86,14 +86,14 @@ static int clk_mt8183_mm_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *node = dev->parent->of_node;
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
clk_data = mtk_alloc_clk_data(CLK_MM_NR_CLK);
mtk_clk_register_gates(node, mm_clks, ARRAY_SIZE(mm_clks),
clk_data);
- return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
}
static struct platform_driver clk_mt8183_mm_drv = {
diff --git a/drivers/clk/mediatek/clk-mt8183-vdec.c b/drivers/clk/mediatek/clk-mt8183-vdec.c
index 6250fd1e0edc..0548cde159d0 100644
--- a/drivers/clk/mediatek/clk-mt8183-vdec.c
+++ b/drivers/clk/mediatek/clk-mt8183-vdec.c
@@ -40,7 +40,7 @@ static const struct mtk_gate vdec_clks[] = {
static int clk_mt8183_vdec_probe(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
struct device_node *node = pdev->dev.of_node;
clk_data = mtk_alloc_clk_data(CLK_VDEC_NR_CLK);
@@ -48,7 +48,7 @@ static int clk_mt8183_vdec_probe(struct platform_device *pdev)
mtk_clk_register_gates(node, vdec_clks, ARRAY_SIZE(vdec_clks),
clk_data);
- return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
}
static const struct of_device_id of_match_clk_mt8183_vdec[] = {
diff --git a/drivers/clk/mediatek/clk-mt8183-venc.c b/drivers/clk/mediatek/clk-mt8183-venc.c
index 6678ef03fab2..f86ec607d87a 100644
--- a/drivers/clk/mediatek/clk-mt8183-venc.c
+++ b/drivers/clk/mediatek/clk-mt8183-venc.c
@@ -32,7 +32,7 @@ static const struct mtk_gate venc_clks[] = {
static int clk_mt8183_venc_probe(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
struct device_node *node = pdev->dev.of_node;
clk_data = mtk_alloc_clk_data(CLK_VENC_NR_CLK);
@@ -40,7 +40,7 @@ static int clk_mt8183_venc_probe(struct platform_device *pdev)
mtk_clk_register_gates(node, venc_clks, ARRAY_SIZE(venc_clks),
clk_data);
- return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
}
static const struct of_device_id of_match_clk_mt8183_venc[] = {
diff --git a/drivers/clk/mediatek/clk-mt8183.c b/drivers/clk/mediatek/clk-mt8183.c
index 68496554dd3d..b5c17988c337 100644
--- a/drivers/clk/mediatek/clk-mt8183.c
+++ b/drivers/clk/mediatek/clk-mt8183.c
@@ -1122,40 +1122,40 @@ static const struct mtk_pll_div_table mfgpll_div_table[] = {
};
static const struct mtk_pll_data plls[] = {
- PLL_B(CLK_APMIXED_ARMPLL_LL, "armpll_ll", 0x0200, 0x020C, 0x00000001,
+ PLL_B(CLK_APMIXED_ARMPLL_LL, "armpll_ll", 0x0200, 0x020C, 0,
HAVE_RST_BAR | PLL_AO, BIT(24), 22, 8, 0x0204, 24, 0x0, 0x0, 0,
0x0204, 0, 0, armpll_div_table),
- PLL_B(CLK_APMIXED_ARMPLL_L, "armpll_l", 0x0210, 0x021C, 0x00000001,
+ PLL_B(CLK_APMIXED_ARMPLL_L, "armpll_l", 0x0210, 0x021C, 0,
HAVE_RST_BAR | PLL_AO, BIT(24), 22, 8, 0x0214, 24, 0x0, 0x0, 0,
0x0214, 0, 0, armpll_div_table),
- PLL(CLK_APMIXED_CCIPLL, "ccipll", 0x0290, 0x029C, 0x00000001,
+ PLL(CLK_APMIXED_CCIPLL, "ccipll", 0x0290, 0x029C, 0,
HAVE_RST_BAR | PLL_AO, BIT(24), 22, 8, 0x0294, 24, 0x0, 0x0, 0,
0x0294, 0, 0),
- PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x0220, 0x022C, 0x00000001,
+ PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x0220, 0x022C, 0,
HAVE_RST_BAR, BIT(24), 22, 8, 0x0224, 24, 0x0, 0x0, 0,
0x0224, 0, 0),
- PLL(CLK_APMIXED_UNIV2PLL, "univ2pll", 0x0230, 0x023C, 0x00000001,
+ PLL(CLK_APMIXED_UNIV2PLL, "univ2pll", 0x0230, 0x023C, 0,
HAVE_RST_BAR, BIT(24), 22, 8, 0x0234, 24, 0x0, 0x0, 0,
0x0234, 0, 0),
- PLL_B(CLK_APMIXED_MFGPLL, "mfgpll", 0x0240, 0x024C, 0x00000001,
+ PLL_B(CLK_APMIXED_MFGPLL, "mfgpll", 0x0240, 0x024C, 0,
0, 0, 22, 8, 0x0244, 24, 0x0, 0x0, 0, 0x0244, 0, 0,
mfgpll_div_table),
- PLL(CLK_APMIXED_MSDCPLL, "msdcpll", 0x0250, 0x025C, 0x00000001,
+ PLL(CLK_APMIXED_MSDCPLL, "msdcpll", 0x0250, 0x025C, 0,
0, 0, 22, 8, 0x0254, 24, 0x0, 0x0, 0, 0x0254, 0, 0),
- PLL(CLK_APMIXED_TVDPLL, "tvdpll", 0x0260, 0x026C, 0x00000001,
+ PLL(CLK_APMIXED_TVDPLL, "tvdpll", 0x0260, 0x026C, 0,
0, 0, 22, 8, 0x0264, 24, 0x0, 0x0, 0, 0x0264, 0, 0),
- PLL(CLK_APMIXED_MMPLL, "mmpll", 0x0270, 0x027C, 0x00000001,
+ PLL(CLK_APMIXED_MMPLL, "mmpll", 0x0270, 0x027C, 0,
HAVE_RST_BAR, BIT(23), 22, 8, 0x0274, 24, 0x0, 0x0, 0,
0x0274, 0, 0),
- PLL(CLK_APMIXED_APLL1, "apll1", 0x02A0, 0x02B0, 0x00000001,
+ PLL(CLK_APMIXED_APLL1, "apll1", 0x02A0, 0x02B0, 0,
0, 0, 32, 8, 0x02A0, 1, 0x02A8, 0x0014, 0, 0x02A4, 0, 0x02A0),
- PLL(CLK_APMIXED_APLL2, "apll2", 0x02b4, 0x02c4, 0x00000001,
+ PLL(CLK_APMIXED_APLL2, "apll2", 0x02b4, 0x02c4, 0,
0, 0, 32, 8, 0x02B4, 1, 0x02BC, 0x0014, 1, 0x02B8, 0, 0x02B4),
};
static int clk_mt8183_apmixed_probe(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
struct device_node *node = pdev->dev.of_node;
clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK);
@@ -1165,10 +1165,10 @@ static int clk_mt8183_apmixed_probe(struct platform_device *pdev)
mtk_clk_register_gates(node, apmixed_clks, ARRAY_SIZE(apmixed_clks),
clk_data);
- return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
}
-static struct clk_onecell_data *top_clk_data;
+static struct clk_hw_onecell_data *top_clk_data;
static void clk_mt8183_top_init_early(struct device_node *node)
{
@@ -1177,12 +1177,12 @@ static void clk_mt8183_top_init_early(struct device_node *node)
top_clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
for (i = 0; i < CLK_TOP_NR_CLK; i++)
- top_clk_data->clks[i] = ERR_PTR(-EPROBE_DEFER);
+ top_clk_data->hws[i] = ERR_PTR(-EPROBE_DEFER);
mtk_clk_register_factors(top_early_divs, ARRAY_SIZE(top_early_divs),
top_clk_data);
- of_clk_add_provider(node, of_clk_src_onecell_get, top_clk_data);
+ of_clk_add_hw_provider(node, of_clk_hw_onecell_get, top_clk_data);
}
CLK_OF_DECLARE_DRIVER(mt8183_topckgen, "mediatek,mt8183-topckgen",
@@ -1217,12 +1217,13 @@ static int clk_mt8183_top_probe(struct platform_device *pdev)
mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks),
top_clk_data);
- return of_clk_add_provider(node, of_clk_src_onecell_get, top_clk_data);
+ return of_clk_add_hw_provider(node, of_clk_hw_onecell_get,
+ top_clk_data);
}
static int clk_mt8183_infra_probe(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
struct device_node *node = pdev->dev.of_node;
int r;
@@ -1231,7 +1232,7 @@ static int clk_mt8183_infra_probe(struct platform_device *pdev)
mtk_clk_register_gates(node, infra_clks, ARRAY_SIZE(infra_clks),
clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r) {
dev_err(&pdev->dev,
"%s(): could not register clock provider: %d\n",
@@ -1246,7 +1247,7 @@ static int clk_mt8183_infra_probe(struct platform_device *pdev)
static int clk_mt8183_peri_probe(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
struct device_node *node = pdev->dev.of_node;
clk_data = mtk_alloc_clk_data(CLK_PERI_NR_CLK);
@@ -1254,12 +1255,12 @@ static int clk_mt8183_peri_probe(struct platform_device *pdev)
mtk_clk_register_gates(node, peri_clks, ARRAY_SIZE(peri_clks),
clk_data);
- return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
}
static int clk_mt8183_mcu_probe(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
struct device_node *node = pdev->dev.of_node;
void __iomem *base;
@@ -1272,7 +1273,7 @@ static int clk_mt8183_mcu_probe(struct platform_device *pdev)
mtk_clk_register_composites(mcu_muxes, ARRAY_SIZE(mcu_muxes), base,
&mt8183_clk_lock, clk_data);
- return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
}
static const struct of_device_id of_match_clk_mt8183[] = {
diff --git a/drivers/clk/mediatek/clk-mt8186-apmixedsys.c b/drivers/clk/mediatek/clk-mt8186-apmixedsys.c
new file mode 100644
index 000000000000..e692a2a67ce1
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8186-apmixedsys.c
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (c) 2022 MediaTek Inc.
+// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+#include <dt-bindings/clock/mt8186-clk.h>
+
+#include "clk-mtk.h"
+#include "clk-pll.h"
+
+#define MT8186_PLL_FMAX (3800UL * MHZ)
+#define MT8186_PLL_FMIN (1500UL * MHZ)
+#define MT8186_INTEGER_BITS (8)
+
+#define PLL(_id, _name, _reg, _pwr_reg, _en_mask, _flags, \
+ _rst_bar_mask, _pcwbits, _pd_reg, _pd_shift, \
+ _tuner_reg, _tuner_en_reg, _tuner_en_bit, \
+ _pcw_reg) { \
+ .id = _id, \
+ .name = _name, \
+ .reg = _reg, \
+ .pwr_reg = _pwr_reg, \
+ .en_mask = _en_mask, \
+ .flags = _flags, \
+ .rst_bar_mask = _rst_bar_mask, \
+ .fmax = MT8186_PLL_FMAX, \
+ .fmin = MT8186_PLL_FMIN, \
+ .pcwbits = _pcwbits, \
+ .pcwibits = MT8186_INTEGER_BITS, \
+ .pd_reg = _pd_reg, \
+ .pd_shift = _pd_shift, \
+ .tuner_reg = _tuner_reg, \
+ .tuner_en_reg = _tuner_en_reg, \
+ .tuner_en_bit = _tuner_en_bit, \
+ .pcw_reg = _pcw_reg, \
+ .pcw_shift = 0, \
+ .pcw_chg_reg = 0, \
+ .en_reg = 0, \
+ .pll_en_bit = 0, \
+ }
+
+static const struct mtk_pll_data plls[] = {
+ /*
+ * armpll_ll/armpll_bl/ccipll are main clock source of AP MCU,
+ * should not be closed in Linux world.
+ */
+ PLL(CLK_APMIXED_ARMPLL_LL, "armpll_ll", 0x0204, 0x0210, 0,
+ PLL_AO, 0, 22, 0x0208, 24, 0, 0, 0, 0x0208),
+ PLL(CLK_APMIXED_ARMPLL_BL, "armpll_bl", 0x0214, 0x0220, 0,
+ PLL_AO, 0, 22, 0x0218, 24, 0, 0, 0, 0x0218),
+ PLL(CLK_APMIXED_CCIPLL, "ccipll", 0x0224, 0x0230, 0,
+ PLL_AO, 0, 22, 0x0228, 24, 0, 0, 0, 0x0228),
+ PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x0244, 0x0250, 0xff000000,
+ HAVE_RST_BAR, BIT(23), 22, 0x0248, 24, 0, 0, 0, 0x0248),
+ PLL(CLK_APMIXED_UNIV2PLL, "univ2pll", 0x0324, 0x0330, 0xff000000,
+ HAVE_RST_BAR, BIT(23), 22, 0x0328, 24, 0, 0, 0, 0x0328),
+ PLL(CLK_APMIXED_MSDCPLL, "msdcpll", 0x038C, 0x0398, 0,
+ 0, 0, 22, 0x0390, 24, 0, 0, 0, 0x0390),
+ PLL(CLK_APMIXED_MMPLL, "mmpll", 0x0254, 0x0260, 0,
+ 0, 0, 22, 0x0258, 24, 0, 0, 0, 0x0258),
+ PLL(CLK_APMIXED_NNAPLL, "nnapll", 0x035C, 0x0368, 0,
+ 0, 0, 22, 0x0360, 24, 0, 0, 0, 0x0360),
+ PLL(CLK_APMIXED_NNA2PLL, "nna2pll", 0x036C, 0x0378, 0,
+ 0, 0, 22, 0x0370, 24, 0, 0, 0, 0x0370),
+ PLL(CLK_APMIXED_ADSPPLL, "adsppll", 0x0304, 0x0310, 0,
+ 0, 0, 22, 0x0308, 24, 0, 0, 0, 0x0308),
+ PLL(CLK_APMIXED_MFGPLL, "mfgpll", 0x0314, 0x0320, 0,
+ 0, 0, 22, 0x0318, 24, 0, 0, 0, 0x0318),
+ PLL(CLK_APMIXED_TVDPLL, "tvdpll", 0x0264, 0x0270, 0,
+ 0, 0, 22, 0x0268, 24, 0, 0, 0, 0x0268),
+ PLL(CLK_APMIXED_APLL1, "apll1", 0x0334, 0x0344, 0,
+ 0, 0, 32, 0x0338, 24, 0x0040, 0x000C, 0, 0x033C),
+ PLL(CLK_APMIXED_APLL2, "apll2", 0x0348, 0x0358, 0,
+ 0, 0, 32, 0x034C, 24, 0x0044, 0x000C, 5, 0x0350),
+};
+
+static const struct of_device_id of_match_clk_mt8186_apmixed[] = {
+ { .compatible = "mediatek,mt8186-apmixedsys", },
+ {}
+};
+
+static int clk_mt8186_apmixed_probe(struct platform_device *pdev)
+{
+ struct clk_hw_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ int r;
+
+ clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK);
+ if (!clk_data)
+ return -ENOMEM;
+
+ r = mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
+ if (r)
+ goto free_apmixed_data;
+
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ if (r)
+ goto unregister_plls;
+
+ platform_set_drvdata(pdev, clk_data);
+
+ return r;
+
+unregister_plls:
+ mtk_clk_unregister_plls(plls, ARRAY_SIZE(plls), clk_data);
+free_apmixed_data:
+ mtk_free_clk_data(clk_data);
+ return r;
+}
+
+static int clk_mt8186_apmixed_remove(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct clk_hw_onecell_data *clk_data = platform_get_drvdata(pdev);
+
+ of_clk_del_provider(node);
+ mtk_clk_unregister_plls(plls, ARRAY_SIZE(plls), clk_data);
+ mtk_free_clk_data(clk_data);
+
+ return 0;
+}
+
+static struct platform_driver clk_mt8186_apmixed_drv = {
+ .probe = clk_mt8186_apmixed_probe,
+ .remove = clk_mt8186_apmixed_remove,
+ .driver = {
+ .name = "clk-mt8186-apmixed",
+ .of_match_table = of_match_clk_mt8186_apmixed,
+ },
+};
+builtin_platform_driver(clk_mt8186_apmixed_drv);
diff --git a/drivers/clk/mediatek/clk-mt8186-cam.c b/drivers/clk/mediatek/clk-mt8186-cam.c
new file mode 100644
index 000000000000..9ec345a2ce66
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8186-cam.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (c) 2022 MediaTek Inc.
+// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+#include <dt-bindings/clock/mt8186-clk.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs cam_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x0,
+};
+
+#define GATE_CAM(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &cam_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+static const struct mtk_gate cam_clks[] = {
+ GATE_CAM(CLK_CAM_LARB13, "cam_larb13", "top_cam", 0),
+ GATE_CAM(CLK_CAM_DFP_VAD, "cam_dfp_vad", "top_cam", 1),
+ GATE_CAM(CLK_CAM_LARB14, "cam_larb14", "top_cam", 2),
+ GATE_CAM(CLK_CAM, "cam", "top_cam", 6),
+ GATE_CAM(CLK_CAMTG, "camtg", "top_cam", 7),
+ GATE_CAM(CLK_CAM_SENINF, "cam_seninf", "top_cam", 8),
+ GATE_CAM(CLK_CAMSV1, "camsv1", "top_cam", 10),
+ GATE_CAM(CLK_CAMSV2, "camsv2", "top_cam", 11),
+ GATE_CAM(CLK_CAMSV3, "camsv3", "top_cam", 12),
+ GATE_CAM(CLK_CAM_CCU0, "cam_ccu0", "top_cam", 13),
+ GATE_CAM(CLK_CAM_CCU1, "cam_ccu1", "top_cam", 14),
+ GATE_CAM(CLK_CAM_MRAW0, "cam_mraw0", "top_cam", 15),
+ GATE_CAM(CLK_CAM_FAKE_ENG, "cam_fake_eng", "top_cam", 17),
+ GATE_CAM(CLK_CAM_CCU_GALS, "cam_ccu_gals", "top_cam", 18),
+ GATE_CAM(CLK_CAM2MM_GALS, "cam2mm_gals", "top_cam", 19),
+};
+
+static const struct mtk_gate cam_rawa_clks[] = {
+ GATE_CAM(CLK_CAM_RAWA_LARBX_RAWA, "cam_rawa_larbx_rawa", "top_cam", 0),
+ GATE_CAM(CLK_CAM_RAWA, "cam_rawa", "top_cam", 1),
+ GATE_CAM(CLK_CAM_RAWA_CAMTG_RAWA, "cam_rawa_camtg_rawa", "top_cam", 2),
+};
+
+static const struct mtk_gate cam_rawb_clks[] = {
+ GATE_CAM(CLK_CAM_RAWB_LARBX_RAWB, "cam_rawb_larbx_rawb", "top_cam", 0),
+ GATE_CAM(CLK_CAM_RAWB, "cam_rawb", "top_cam", 1),
+ GATE_CAM(CLK_CAM_RAWB_CAMTG_RAWB, "cam_rawb_camtg_rawb", "top_cam", 2),
+};
+
+static const struct mtk_clk_desc cam_desc = {
+ .clks = cam_clks,
+ .num_clks = ARRAY_SIZE(cam_clks),
+};
+
+static const struct mtk_clk_desc cam_rawa_desc = {
+ .clks = cam_rawa_clks,
+ .num_clks = ARRAY_SIZE(cam_rawa_clks),
+};
+
+static const struct mtk_clk_desc cam_rawb_desc = {
+ .clks = cam_rawb_clks,
+ .num_clks = ARRAY_SIZE(cam_rawb_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8186_cam[] = {
+ {
+ .compatible = "mediatek,mt8186-camsys",
+ .data = &cam_desc,
+ }, {
+ .compatible = "mediatek,mt8186-camsys_rawa",
+ .data = &cam_rawa_desc,
+ }, {
+ .compatible = "mediatek,mt8186-camsys_rawb",
+ .data = &cam_rawb_desc,
+ }, {
+ /* sentinel */
+ }
+};
+
+static struct platform_driver clk_mt8186_cam_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8186-cam",
+ .of_match_table = of_match_clk_mt8186_cam,
+ },
+};
+builtin_platform_driver(clk_mt8186_cam_drv);
diff --git a/drivers/clk/mediatek/clk-mt8186-img.c b/drivers/clk/mediatek/clk-mt8186-img.c
new file mode 100644
index 000000000000..08a625475aee
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8186-img.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (c) 2022 MediaTek Inc.
+// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+#include <dt-bindings/clock/mt8186-clk.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs img_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x0,
+};
+
+#define GATE_IMG(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &img_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+static const struct mtk_gate img1_clks[] = {
+ GATE_IMG(CLK_IMG1_LARB9_IMG1, "img1_larb9_img1", "top_img1", 0),
+ GATE_IMG(CLK_IMG1_LARB10_IMG1, "img1_larb10_img1", "top_img1", 1),
+ GATE_IMG(CLK_IMG1_DIP, "img1_dip", "top_img1", 2),
+ GATE_IMG(CLK_IMG1_GALS_IMG1, "img1_gals_img1", "top_img1", 12),
+};
+
+static const struct mtk_gate img2_clks[] = {
+ GATE_IMG(CLK_IMG2_LARB9_IMG2, "img2_larb9_img2", "top_img1", 0),
+ GATE_IMG(CLK_IMG2_LARB10_IMG2, "img2_larb10_img2", "top_img1", 1),
+ GATE_IMG(CLK_IMG2_MFB, "img2_mfb", "top_img1", 6),
+ GATE_IMG(CLK_IMG2_WPE, "img2_wpe", "top_img1", 7),
+ GATE_IMG(CLK_IMG2_MSS, "img2_mss", "top_img1", 8),
+ GATE_IMG(CLK_IMG2_GALS_IMG2, "img2_gals_img2", "top_img1", 12),
+};
+
+static const struct mtk_clk_desc img1_desc = {
+ .clks = img1_clks,
+ .num_clks = ARRAY_SIZE(img1_clks),
+};
+
+static const struct mtk_clk_desc img2_desc = {
+ .clks = img2_clks,
+ .num_clks = ARRAY_SIZE(img2_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8186_img[] = {
+ {
+ .compatible = "mediatek,mt8186-imgsys1",
+ .data = &img1_desc,
+ }, {
+ .compatible = "mediatek,mt8186-imgsys2",
+ .data = &img2_desc,
+ }, {
+ /* sentinel */
+ }
+};
+
+static struct platform_driver clk_mt8186_img_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8186-img",
+ .of_match_table = of_match_clk_mt8186_img,
+ },
+};
+builtin_platform_driver(clk_mt8186_img_drv);
diff --git a/drivers/clk/mediatek/clk-mt8186-imp_iic_wrap.c b/drivers/clk/mediatek/clk-mt8186-imp_iic_wrap.c
new file mode 100644
index 000000000000..47f2e480a05e
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8186-imp_iic_wrap.c
@@ -0,0 +1,67 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (c) 2022 MediaTek Inc.
+// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+#include <dt-bindings/clock/mt8186-clk.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs imp_iic_wrap_cg_regs = {
+ .set_ofs = 0xe08,
+ .clr_ofs = 0xe04,
+ .sta_ofs = 0xe00,
+};
+
+#define GATE_IMP_IIC_WRAP(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &imp_iic_wrap_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+static const struct mtk_gate imp_iic_wrap_clks[] = {
+ GATE_IMP_IIC_WRAP(CLK_IMP_IIC_WRAP_AP_CLOCK_I2C0,
+ "imp_iic_wrap_ap_clock_i2c0", "infra_ao_i2c_ap", 0),
+ GATE_IMP_IIC_WRAP(CLK_IMP_IIC_WRAP_AP_CLOCK_I2C1,
+ "imp_iic_wrap_ap_clock_i2c1", "infra_ao_i2c_ap", 1),
+ GATE_IMP_IIC_WRAP(CLK_IMP_IIC_WRAP_AP_CLOCK_I2C2,
+ "imp_iic_wrap_ap_clock_i2c2", "infra_ao_i2c_ap", 2),
+ GATE_IMP_IIC_WRAP(CLK_IMP_IIC_WRAP_AP_CLOCK_I2C3,
+ "imp_iic_wrap_ap_clock_i2c3", "infra_ao_i2c_ap", 3),
+ GATE_IMP_IIC_WRAP(CLK_IMP_IIC_WRAP_AP_CLOCK_I2C4,
+ "imp_iic_wrap_ap_clock_i2c4", "infra_ao_i2c_ap", 4),
+ GATE_IMP_IIC_WRAP(CLK_IMP_IIC_WRAP_AP_CLOCK_I2C5,
+ "imp_iic_wrap_ap_clock_i2c5", "infra_ao_i2c_ap", 5),
+ GATE_IMP_IIC_WRAP(CLK_IMP_IIC_WRAP_AP_CLOCK_I2C6,
+ "imp_iic_wrap_ap_clock_i2c6", "infra_ao_i2c_ap", 6),
+ GATE_IMP_IIC_WRAP(CLK_IMP_IIC_WRAP_AP_CLOCK_I2C7,
+ "imp_iic_wrap_ap_clock_i2c7", "infra_ao_i2c_ap", 7),
+ GATE_IMP_IIC_WRAP(CLK_IMP_IIC_WRAP_AP_CLOCK_I2C8,
+ "imp_iic_wrap_ap_clock_i2c8", "infra_ao_i2c_ap", 8),
+ GATE_IMP_IIC_WRAP(CLK_IMP_IIC_WRAP_AP_CLOCK_I2C9,
+ "imp_iic_wrap_ap_clock_i2c9", "infra_ao_i2c_ap", 9),
+};
+
+static const struct mtk_clk_desc imp_iic_wrap_desc = {
+ .clks = imp_iic_wrap_clks,
+ .num_clks = ARRAY_SIZE(imp_iic_wrap_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8186_imp_iic_wrap[] = {
+ {
+ .compatible = "mediatek,mt8186-imp_iic_wrap",
+ .data = &imp_iic_wrap_desc,
+ }, {
+ /* sentinel */
+ }
+};
+
+static struct platform_driver clk_mt8186_imp_iic_wrap_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8186-imp_iic_wrap",
+ .of_match_table = of_match_clk_mt8186_imp_iic_wrap,
+ },
+};
+builtin_platform_driver(clk_mt8186_imp_iic_wrap_drv);
diff --git a/drivers/clk/mediatek/clk-mt8186-infra_ao.c b/drivers/clk/mediatek/clk-mt8186-infra_ao.c
new file mode 100644
index 000000000000..2a7adc25abaa
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8186-infra_ao.c
@@ -0,0 +1,216 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (c) 2022 MediaTek Inc.
+// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+#include <dt-bindings/clock/mt8186-clk.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs infra_ao0_cg_regs = {
+ .set_ofs = 0x80,
+ .clr_ofs = 0x84,
+ .sta_ofs = 0x90,
+};
+
+static const struct mtk_gate_regs infra_ao1_cg_regs = {
+ .set_ofs = 0x88,
+ .clr_ofs = 0x8c,
+ .sta_ofs = 0x94,
+};
+
+static const struct mtk_gate_regs infra_ao2_cg_regs = {
+ .set_ofs = 0xa4,
+ .clr_ofs = 0xa8,
+ .sta_ofs = 0xac,
+};
+
+static const struct mtk_gate_regs infra_ao3_cg_regs = {
+ .set_ofs = 0xc0,
+ .clr_ofs = 0xc4,
+ .sta_ofs = 0xc8,
+};
+
+#define GATE_INFRA_AO0_FLAGS(_id, _name, _parent, _shift, _flag) \
+ GATE_MTK_FLAGS(_id, _name, _parent, &infra_ao0_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr, _flag)
+
+#define GATE_INFRA_AO0(_id, _name, _parent, _shift) \
+ GATE_INFRA_AO0_FLAGS(_id, _name, _parent, _shift, 0)
+
+#define GATE_INFRA_AO1_FLAGS(_id, _name, _parent, _shift, _flag) \
+ GATE_MTK_FLAGS(_id, _name, _parent, &infra_ao1_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr, _flag)
+
+#define GATE_INFRA_AO1(_id, _name, _parent, _shift) \
+ GATE_INFRA_AO1_FLAGS(_id, _name, _parent, _shift, 0)
+
+#define GATE_INFRA_AO2_FLAGS(_id, _name, _parent, _shift, _flag) \
+ GATE_MTK_FLAGS(_id, _name, _parent, &infra_ao2_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr, _flag)
+
+#define GATE_INFRA_AO2(_id, _name, _parent, _shift) \
+ GATE_INFRA_AO2_FLAGS(_id, _name, _parent, _shift, 0)
+
+ #define GATE_INFRA_AO3_FLAGS(_id, _name, _parent, _shift, _flag) \
+ GATE_MTK_FLAGS(_id, _name, _parent, &infra_ao3_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr, _flag)
+
+#define GATE_INFRA_AO3(_id, _name, _parent, _shift) \
+ GATE_INFRA_AO3_FLAGS(_id, _name, _parent, _shift, 0)
+
+static const struct mtk_gate infra_ao_clks[] = {
+ /* INFRA_AO0 */
+ GATE_INFRA_AO0(CLK_INFRA_AO_PMIC_TMR, "infra_ao_pmic_tmr", "top_pwrap_ulposc", 0),
+ GATE_INFRA_AO0(CLK_INFRA_AO_PMIC_AP, "infra_ao_pmic_ap", "top_pwrap_ulposc", 1),
+ GATE_INFRA_AO0(CLK_INFRA_AO_PMIC_MD, "infra_ao_pmic_md", "top_pwrap_ulposc", 2),
+ GATE_INFRA_AO0(CLK_INFRA_AO_PMIC_CONN, "infra_ao_pmic_conn", "top_pwrap_ulposc", 3),
+ /* infra_ao_scp_core are main clock in always-on co-processor. */
+ GATE_INFRA_AO0_FLAGS(CLK_INFRA_AO_SCP_CORE,
+ "infra_ao_scp_core", "top_scp", 4, CLK_IS_CRITICAL),
+ /* infra_ao_sej is main clock for secure engine with JTAG support */
+ GATE_INFRA_AO0_FLAGS(CLK_INFRA_AO_SEJ,
+ "infra_ao_sej", "top_axi", 5, CLK_IS_CRITICAL),
+ GATE_INFRA_AO0(CLK_INFRA_AO_APXGPT, "infra_ao_apxgpt", "top_axi", 6),
+ GATE_INFRA_AO0(CLK_INFRA_AO_ICUSB, "infra_ao_icusb", "top_axi", 8),
+ GATE_INFRA_AO0(CLK_INFRA_AO_GCE, "infra_ao_gce", "top_axi", 9),
+ GATE_INFRA_AO0(CLK_INFRA_AO_THERM, "infra_ao_therm", "top_axi", 10),
+ GATE_INFRA_AO0(CLK_INFRA_AO_I2C_AP, "infra_ao_i2c_ap", "top_i2c", 11),
+ GATE_INFRA_AO0(CLK_INFRA_AO_I2C_CCU, "infra_ao_i2c_ccu", "top_i2c", 12),
+ GATE_INFRA_AO0(CLK_INFRA_AO_I2C_SSPM, "infra_ao_i2c_sspm", "top_i2c", 13),
+ GATE_INFRA_AO0(CLK_INFRA_AO_I2C_RSV, "infra_ao_i2c_rsv", "top_i2c", 14),
+ GATE_INFRA_AO0(CLK_INFRA_AO_PWM_HCLK, "infra_ao_pwm_hclk", "top_axi", 15),
+ GATE_INFRA_AO0(CLK_INFRA_AO_PWM1, "infra_ao_pwm1", "top_pwm", 16),
+ GATE_INFRA_AO0(CLK_INFRA_AO_PWM2, "infra_ao_pwm2", "top_pwm", 17),
+ GATE_INFRA_AO0(CLK_INFRA_AO_PWM3, "infra_ao_pwm3", "top_pwm", 18),
+ GATE_INFRA_AO0(CLK_INFRA_AO_PWM4, "infra_ao_pwm4", "top_pwm", 19),
+ GATE_INFRA_AO0(CLK_INFRA_AO_PWM5, "infra_ao_pwm5", "top_pwm", 20),
+ GATE_INFRA_AO0(CLK_INFRA_AO_PWM, "infra_ao_pwm", "top_pwm", 21),
+ GATE_INFRA_AO0(CLK_INFRA_AO_UART0, "infra_ao_uart0", "top_uart", 22),
+ GATE_INFRA_AO0(CLK_INFRA_AO_UART1, "infra_ao_uart1", "top_uart", 23),
+ GATE_INFRA_AO0(CLK_INFRA_AO_UART2, "infra_ao_uart2", "top_uart", 24),
+ GATE_INFRA_AO0(CLK_INFRA_AO_GCE_26M, "infra_ao_gce_26m", "clk26m", 27),
+ GATE_INFRA_AO0(CLK_INFRA_AO_CQ_DMA_FPC, "infra_ao_dma", "top_axi", 28),
+ GATE_INFRA_AO0(CLK_INFRA_AO_BTIF, "infra_ao_btif", "top_axi", 31),
+ /* INFRA_AO1 */
+ GATE_INFRA_AO1(CLK_INFRA_AO_SPI0, "infra_ao_spi0", "top_spi", 1),
+ GATE_INFRA_AO1(CLK_INFRA_AO_MSDC0, "infra_ao_msdc0", "top_msdc5hclk", 2),
+ GATE_INFRA_AO1(CLK_INFRA_AO_MSDCFDE, "infra_ao_msdcfde", "top_aes_msdcfde", 3),
+ GATE_INFRA_AO1(CLK_INFRA_AO_MSDC1, "infra_ao_msdc1", "top_axi", 4),
+ /* infra_ao_dvfsrc is for internal DVFS usage, should not be handled by Linux */
+ GATE_INFRA_AO1_FLAGS(CLK_INFRA_AO_DVFSRC,
+ "infra_ao_dvfsrc", "top_dvfsrc", 7, CLK_IS_CRITICAL),
+ GATE_INFRA_AO1(CLK_INFRA_AO_GCPU, "infra_ao_gcpu", "top_axi", 8),
+ GATE_INFRA_AO1(CLK_INFRA_AO_TRNG, "infra_ao_trng", "top_axi", 9),
+ GATE_INFRA_AO1(CLK_INFRA_AO_AUXADC, "infra_ao_auxadc", "clk26m", 10),
+ GATE_INFRA_AO1(CLK_INFRA_AO_CPUM, "infra_ao_cpum", "top_axi", 11),
+ GATE_INFRA_AO1(CLK_INFRA_AO_CCIF1_AP, "infra_ao_ccif1_ap", "top_axi", 12),
+ GATE_INFRA_AO1(CLK_INFRA_AO_CCIF1_MD, "infra_ao_ccif1_md", "top_axi", 13),
+ GATE_INFRA_AO1(CLK_INFRA_AO_AUXADC_MD, "infra_ao_auxadc_md", "clk26m", 14),
+ GATE_INFRA_AO1(CLK_INFRA_AO_AP_DMA, "infra_ao_ap_dma", "top_axi", 18),
+ GATE_INFRA_AO1(CLK_INFRA_AO_XIU, "infra_ao_xiu", "top_axi", 19),
+ /* infra_ao_device_apc is for device access permission control module */
+ GATE_INFRA_AO1_FLAGS(CLK_INFRA_AO_DEVICE_APC,
+ "infra_ao_dapc", "top_axi", 20, CLK_IS_CRITICAL),
+ GATE_INFRA_AO1(CLK_INFRA_AO_CCIF_AP, "infra_ao_ccif_ap", "top_axi", 23),
+ GATE_INFRA_AO1(CLK_INFRA_AO_DEBUGTOP, "infra_ao_debugtop", "top_axi", 24),
+ GATE_INFRA_AO1(CLK_INFRA_AO_AUDIO, "infra_ao_audio", "top_axi", 25),
+ GATE_INFRA_AO1(CLK_INFRA_AO_CCIF_MD, "infra_ao_ccif_md", "top_axi", 26),
+ GATE_INFRA_AO1(CLK_INFRA_AO_DXCC_SEC_CORE, "infra_ao_secore", "top_dxcc", 27),
+ GATE_INFRA_AO1(CLK_INFRA_AO_DXCC_AO, "infra_ao_dxcc_ao", "top_dxcc", 28),
+ GATE_INFRA_AO1(CLK_INFRA_AO_IMP_IIC, "infra_ao_imp_iic", "top_axi", 29),
+ GATE_INFRA_AO1(CLK_INFRA_AO_DRAMC_F26M, "infra_ao_dramc26", "clk26m", 31),
+ /* INFRA_AO2 */
+ GATE_INFRA_AO2(CLK_INFRA_AO_RG_PWM_FBCLK6, "infra_ao_pwm_fbclk6", "clk26m", 0),
+ GATE_INFRA_AO2(CLK_INFRA_AO_SSUSB_TOP_HCLK, "infra_ao_ssusb_hclk", "top_axi", 1),
+ GATE_INFRA_AO2(CLK_INFRA_AO_DISP_PWM, "infra_ao_disp_pwm", "top_disp_pwm", 2),
+ GATE_INFRA_AO2(CLK_INFRA_AO_CLDMA_BCLK, "infra_ao_cldmabclk", "top_axi", 3),
+ GATE_INFRA_AO2(CLK_INFRA_AO_AUDIO_26M_BCLK, "infra_ao_audio26m", "clk26m", 4),
+ GATE_INFRA_AO2(CLK_INFRA_AO_SSUSB_TOP_P1_HCLK, "infra_ao_ssusb_p1_hclk", "top_axi", 5),
+ GATE_INFRA_AO2(CLK_INFRA_AO_SPI1, "infra_ao_spi1", "top_spi", 6),
+ GATE_INFRA_AO2(CLK_INFRA_AO_I2C4, "infra_ao_i2c4", "top_i2c", 7),
+ GATE_INFRA_AO2(CLK_INFRA_AO_MODEM_TEMP_SHARE, "infra_ao_mdtemp", "clk26m", 8),
+ GATE_INFRA_AO2(CLK_INFRA_AO_SPI2, "infra_ao_spi2", "top_spi", 9),
+ GATE_INFRA_AO2(CLK_INFRA_AO_SPI3, "infra_ao_spi3", "top_spi", 10),
+ GATE_INFRA_AO2(CLK_INFRA_AO_SSUSB_TOP_REF, "infra_ao_ssusb_ref", "clk26m", 11),
+ GATE_INFRA_AO2(CLK_INFRA_AO_SSUSB_TOP_XHCI, "infra_ao_ssusb_xhci", "top_ssusb_xhci", 12),
+ GATE_INFRA_AO2(CLK_INFRA_AO_SSUSB_TOP_P1_REF, "infra_ao_ssusb_p1_ref", "clk26m", 13),
+ GATE_INFRA_AO2(CLK_INFRA_AO_SSUSB_TOP_P1_XHCI,
+ "infra_ao_ssusb_p1_xhci", "top_ssusb_xhci_1p", 14),
+ /* infra_ao_sspm is main clock in co-processor, should not be closed in Linux. */
+ GATE_INFRA_AO2_FLAGS(CLK_INFRA_AO_SSPM, "infra_ao_sspm", "top_sspm", 15, CLK_IS_CRITICAL),
+ GATE_INFRA_AO2(CLK_INFRA_AO_SSUSB_TOP_P1_SYS,
+ "infra_ao_ssusb_p1_sys", "top_ssusb_1p", 16),
+ GATE_INFRA_AO2(CLK_INFRA_AO_I2C5, "infra_ao_i2c5", "top_i2c", 18),
+ GATE_INFRA_AO2(CLK_INFRA_AO_I2C5_ARBITER, "infra_ao_i2c5a", "top_i2c", 19),
+ GATE_INFRA_AO2(CLK_INFRA_AO_I2C5_IMM, "infra_ao_i2c5_imm", "top_i2c", 20),
+ GATE_INFRA_AO2(CLK_INFRA_AO_I2C1_ARBITER, "infra_ao_i2c1a", "top_i2c", 21),
+ GATE_INFRA_AO2(CLK_INFRA_AO_I2C1_IMM, "infra_ao_i2c1_imm", "top_i2c", 22),
+ GATE_INFRA_AO2(CLK_INFRA_AO_I2C2_ARBITER, "infra_ao_i2c2a", "top_i2c", 23),
+ GATE_INFRA_AO2(CLK_INFRA_AO_I2C2_IMM, "infra_ao_i2c2_imm", "top_i2c", 24),
+ GATE_INFRA_AO2(CLK_INFRA_AO_SPI4, "infra_ao_spi4", "top_spi", 25),
+ GATE_INFRA_AO2(CLK_INFRA_AO_SPI5, "infra_ao_spi5", "top_spi", 26),
+ GATE_INFRA_AO2(CLK_INFRA_AO_CQ_DMA, "infra_ao_cq_dma", "top_axi", 27),
+ GATE_INFRA_AO2(CLK_INFRA_AO_BIST2FPC, "infra_ao_bist2fpc", "f_bist2fpc_ck", 28),
+ /* INFRA_AO3 */
+ GATE_INFRA_AO3(CLK_INFRA_AO_MSDC0_SELF, "infra_ao_msdc0sf", "top_msdc50_0", 0),
+ GATE_INFRA_AO3(CLK_INFRA_AO_SPINOR, "infra_ao_spinor", "top_spinor", 1),
+ /*
+ * infra_ao_sspm_26m/infra_ao_sspm_32k are main clocks in co-processor,
+ * should not be closed in Linux.
+ */
+ GATE_INFRA_AO3_FLAGS(CLK_INFRA_AO_SSPM_26M_SELF, "infra_ao_sspm_26m", "clk26m", 3,
+ CLK_IS_CRITICAL),
+ GATE_INFRA_AO3_FLAGS(CLK_INFRA_AO_SSPM_32K_SELF, "infra_ao_sspm_32k", "clk32k", 4,
+ CLK_IS_CRITICAL),
+ GATE_INFRA_AO3(CLK_INFRA_AO_I2C6, "infra_ao_i2c6", "top_i2c", 6),
+ GATE_INFRA_AO3(CLK_INFRA_AO_AP_MSDC0, "infra_ao_ap_msdc0", "top_axi", 7),
+ GATE_INFRA_AO3(CLK_INFRA_AO_MD_MSDC0, "infra_ao_md_msdc0", "top_axi", 8),
+ GATE_INFRA_AO3(CLK_INFRA_AO_MSDC0_SRC, "infra_ao_msdc0_clk", "top_msdc50_0", 9),
+ GATE_INFRA_AO3(CLK_INFRA_AO_MSDC1_SRC, "infra_ao_msdc1_clk", "top_msdc30_1", 10),
+ /* infra_ao_sej_f13m is main clock for secure engine with JTAG support */
+ GATE_INFRA_AO3_FLAGS(CLK_INFRA_AO_SEJ_F13M,
+ "infra_ao_sej_f13m", "clk26m", 15, CLK_IS_CRITICAL),
+ /* infra_ao_aes_top0_bclk is for secure encryption */
+ GATE_INFRA_AO3_FLAGS(CLK_INFRA_AO_AES_TOP0_BCLK,
+ "infra_ao_aes_top0_bclk", "top_axi", 16, CLK_IS_CRITICAL),
+ GATE_INFRA_AO3(CLK_INFRA_AO_MCU_PM_BCLK, "infra_ao_mcu_pm_bclk", "top_axi", 17),
+ GATE_INFRA_AO3(CLK_INFRA_AO_CCIF2_AP, "infra_ao_ccif2_ap", "top_axi", 18),
+ GATE_INFRA_AO3(CLK_INFRA_AO_CCIF2_MD, "infra_ao_ccif2_md", "top_axi", 19),
+ GATE_INFRA_AO3(CLK_INFRA_AO_CCIF3_AP, "infra_ao_ccif3_ap", "top_axi", 20),
+ GATE_INFRA_AO3(CLK_INFRA_AO_CCIF3_MD, "infra_ao_ccif3_md", "top_axi", 21),
+ GATE_INFRA_AO3(CLK_INFRA_AO_FADSP_26M, "infra_ao_fadsp_26m", "clk26m", 22),
+ GATE_INFRA_AO3(CLK_INFRA_AO_FADSP_32K, "infra_ao_fadsp_32k", "clk32k", 23),
+ GATE_INFRA_AO3(CLK_INFRA_AO_CCIF4_AP, "infra_ao_ccif4_ap", "top_axi", 24),
+ GATE_INFRA_AO3(CLK_INFRA_AO_CCIF4_MD, "infra_ao_ccif4_md", "top_axi", 25),
+ GATE_INFRA_AO3(CLK_INFRA_AO_FADSP, "infra_ao_fadsp", "top_audiodsp", 27),
+ GATE_INFRA_AO3(CLK_INFRA_AO_FLASHIF_133M, "infra_ao_flashif_133m", "top_axi", 28),
+ GATE_INFRA_AO3(CLK_INFRA_AO_FLASHIF_66M, "infra_ao_flashif_66m", "top_axi", 29),
+};
+
+static const struct mtk_clk_desc infra_ao_desc = {
+ .clks = infra_ao_clks,
+ .num_clks = ARRAY_SIZE(infra_ao_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8186_infra_ao[] = {
+ {
+ .compatible = "mediatek,mt8186-infracfg_ao",
+ .data = &infra_ao_desc,
+ }, {
+ /* sentinel */
+ }
+};
+
+static struct platform_driver clk_mt8186_infra_ao_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8186-infra-ao",
+ .of_match_table = of_match_clk_mt8186_infra_ao,
+ },
+};
+builtin_platform_driver(clk_mt8186_infra_ao_drv);
diff --git a/drivers/clk/mediatek/clk-mt8186-ipe.c b/drivers/clk/mediatek/clk-mt8186-ipe.c
new file mode 100644
index 000000000000..8fca148effa6
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8186-ipe.c
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (c) 2022 MediaTek Inc.
+// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+#include <dt-bindings/clock/mt8186-clk.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs ipe_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x0,
+};
+
+#define GATE_IPE(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &ipe_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+static const struct mtk_gate ipe_clks[] = {
+ GATE_IPE(CLK_IPE_LARB19, "ipe_larb19", "top_ipe", 0),
+ GATE_IPE(CLK_IPE_LARB20, "ipe_larb20", "top_ipe", 1),
+ GATE_IPE(CLK_IPE_SMI_SUBCOM, "ipe_smi_subcom", "top_ipe", 2),
+ GATE_IPE(CLK_IPE_FD, "ipe_fd", "top_ipe", 3),
+ GATE_IPE(CLK_IPE_FE, "ipe_fe", "top_ipe", 4),
+ GATE_IPE(CLK_IPE_RSC, "ipe_rsc", "top_ipe", 5),
+ GATE_IPE(CLK_IPE_DPE, "ipe_dpe", "top_ipe", 6),
+ GATE_IPE(CLK_IPE_GALS_IPE, "ipe_gals_ipe", "top_img1", 8),
+};
+
+static const struct mtk_clk_desc ipe_desc = {
+ .clks = ipe_clks,
+ .num_clks = ARRAY_SIZE(ipe_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8186_ipe[] = {
+ {
+ .compatible = "mediatek,mt8186-ipesys",
+ .data = &ipe_desc,
+ }, {
+ /* sentinel */
+ }
+};
+
+static struct platform_driver clk_mt8186_ipe_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8186-ipe",
+ .of_match_table = of_match_clk_mt8186_ipe,
+ },
+};
+builtin_platform_driver(clk_mt8186_ipe_drv);
diff --git a/drivers/clk/mediatek/clk-mt8186-mcu.c b/drivers/clk/mediatek/clk-mt8186-mcu.c
new file mode 100644
index 000000000000..dfc305c1fc5d
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8186-mcu.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (c) 2022 MediaTek Inc.
+// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+#include <dt-bindings/clock/mt8186-clk.h>
+
+#include "clk-mtk.h"
+
+static const char * const mcu_armpll_ll_parents[] = {
+ "clk26m",
+ "armpll_ll",
+ "mainpll",
+ "univpll_d2"
+};
+
+static const char * const mcu_armpll_bl_parents[] = {
+ "clk26m",
+ "armpll_bl",
+ "mainpll",
+ "univpll_d2"
+};
+
+static const char * const mcu_armpll_bus_parents[] = {
+ "clk26m",
+ "ccipll",
+ "mainpll",
+ "univpll_d2"
+};
+
+/*
+ * We only configure the CPU muxes when adjust CPU frequency in MediaTek CPUFreq Driver.
+ * Other fields like divider always keep the same value. (set once in bootloader)
+ */
+static struct mtk_composite mcu_muxes[] = {
+ /* CPU_PLLDIV_CFG0 */
+ MUX(CLK_MCU_ARMPLL_LL_SEL, "mcu_armpll_ll_sel", mcu_armpll_ll_parents, 0x2A0, 9, 2),
+ /* CPU_PLLDIV_CFG1 */
+ MUX(CLK_MCU_ARMPLL_BL_SEL, "mcu_armpll_bl_sel", mcu_armpll_bl_parents, 0x2A4, 9, 2),
+ /* BUS_PLLDIV_CFG */
+ MUX(CLK_MCU_ARMPLL_BUS_SEL, "mcu_armpll_bus_sel", mcu_armpll_bus_parents, 0x2E0, 9, 2),
+};
+
+static const struct of_device_id of_match_clk_mt8186_mcu[] = {
+ { .compatible = "mediatek,mt8186-mcusys", },
+ {}
+};
+
+static int clk_mt8186_mcu_probe(struct platform_device *pdev)
+{
+ struct clk_hw_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ int r;
+ void __iomem *base;
+
+ clk_data = mtk_alloc_clk_data(CLK_MCU_NR_CLK);
+ if (!clk_data)
+ return -ENOMEM;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base)) {
+ r = PTR_ERR(base);
+ goto free_mcu_data;
+ }
+
+ r = mtk_clk_register_composites(mcu_muxes, ARRAY_SIZE(mcu_muxes), base,
+ NULL, clk_data);
+ if (r)
+ goto free_mcu_data;
+
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ if (r)
+ goto unregister_composite_muxes;
+
+ platform_set_drvdata(pdev, clk_data);
+
+ return r;
+
+unregister_composite_muxes:
+ mtk_clk_unregister_composites(mcu_muxes, ARRAY_SIZE(mcu_muxes), clk_data);
+free_mcu_data:
+ mtk_free_clk_data(clk_data);
+ return r;
+}
+
+static int clk_mt8186_mcu_remove(struct platform_device *pdev)
+{
+ struct clk_hw_onecell_data *clk_data = platform_get_drvdata(pdev);
+ struct device_node *node = pdev->dev.of_node;
+
+ of_clk_del_provider(node);
+ mtk_clk_unregister_composites(mcu_muxes, ARRAY_SIZE(mcu_muxes), clk_data);
+ mtk_free_clk_data(clk_data);
+
+ return 0;
+}
+
+static struct platform_driver clk_mt8186_mcu_drv = {
+ .probe = clk_mt8186_mcu_probe,
+ .remove = clk_mt8186_mcu_remove,
+ .driver = {
+ .name = "clk-mt8186-mcu",
+ .of_match_table = of_match_clk_mt8186_mcu,
+ },
+};
+builtin_platform_driver(clk_mt8186_mcu_drv);
diff --git a/drivers/clk/mediatek/clk-mt8186-mdp.c b/drivers/clk/mediatek/clk-mt8186-mdp.c
new file mode 100644
index 000000000000..05174088ef20
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8186-mdp.c
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (c) 2022 MediaTek Inc.
+// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+#include <dt-bindings/clock/mt8186-clk.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs mdp0_cg_regs = {
+ .set_ofs = 0x104,
+ .clr_ofs = 0x108,
+ .sta_ofs = 0x100,
+};
+
+static const struct mtk_gate_regs mdp2_cg_regs = {
+ .set_ofs = 0x124,
+ .clr_ofs = 0x128,
+ .sta_ofs = 0x120,
+};
+
+#define GATE_MDP0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &mdp0_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+#define GATE_MDP2(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &mdp2_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+static const struct mtk_gate mdp_clks[] = {
+ /* MDP0 */
+ GATE_MDP0(CLK_MDP_RDMA0, "mdp_rdma0", "top_mdp", 0),
+ GATE_MDP0(CLK_MDP_TDSHP0, "mdp_tdshp0", "top_mdp", 1),
+ GATE_MDP0(CLK_MDP_IMG_DL_ASYNC0, "mdp_img_dl_async0", "top_mdp", 2),
+ GATE_MDP0(CLK_MDP_IMG_DL_ASYNC1, "mdp_img_dl_async1", "top_mdp", 3),
+ GATE_MDP0(CLK_MDP_DISP_RDMA, "mdp_disp_rdma", "top_mdp", 4),
+ GATE_MDP0(CLK_MDP_HMS, "mdp_hms", "top_mdp", 5),
+ GATE_MDP0(CLK_MDP_SMI0, "mdp_smi0", "top_mdp", 6),
+ GATE_MDP0(CLK_MDP_APB_BUS, "mdp_apb_bus", "top_mdp", 7),
+ GATE_MDP0(CLK_MDP_WROT0, "mdp_wrot0", "top_mdp", 8),
+ GATE_MDP0(CLK_MDP_RSZ0, "mdp_rsz0", "top_mdp", 9),
+ GATE_MDP0(CLK_MDP_HDR0, "mdp_hdr0", "top_mdp", 10),
+ GATE_MDP0(CLK_MDP_MUTEX0, "mdp_mutex0", "top_mdp", 11),
+ GATE_MDP0(CLK_MDP_WROT1, "mdp_wrot1", "top_mdp", 12),
+ GATE_MDP0(CLK_MDP_RSZ1, "mdp_rsz1", "top_mdp", 13),
+ GATE_MDP0(CLK_MDP_FAKE_ENG0, "mdp_fake_eng0", "top_mdp", 14),
+ GATE_MDP0(CLK_MDP_AAL0, "mdp_aal0", "top_mdp", 15),
+ GATE_MDP0(CLK_MDP_DISP_WDMA, "mdp_disp_wdma", "top_mdp", 16),
+ GATE_MDP0(CLK_MDP_COLOR, "mdp_color", "top_mdp", 17),
+ GATE_MDP0(CLK_MDP_IMG_DL_ASYNC2, "mdp_img_dl_async2", "top_mdp", 18),
+ /* MDP2 */
+ GATE_MDP2(CLK_MDP_IMG_DL_RELAY0_ASYNC0, "mdp_img_dl_rel0_as0", "top_mdp", 0),
+ GATE_MDP2(CLK_MDP_IMG_DL_RELAY1_ASYNC1, "mdp_img_dl_rel1_as1", "top_mdp", 8),
+ GATE_MDP2(CLK_MDP_IMG_DL_RELAY2_ASYNC2, "mdp_img_dl_rel2_as2", "top_mdp", 24),
+};
+
+static const struct mtk_clk_desc mdp_desc = {
+ .clks = mdp_clks,
+ .num_clks = ARRAY_SIZE(mdp_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8186_mdp[] = {
+ {
+ .compatible = "mediatek,mt8186-mdpsys",
+ .data = &mdp_desc,
+ }, {
+ /* sentinel */
+ }
+};
+
+static struct platform_driver clk_mt8186_mdp_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8186-mdp",
+ .of_match_table = of_match_clk_mt8186_mdp,
+ },
+};
+builtin_platform_driver(clk_mt8186_mdp_drv);
diff --git a/drivers/clk/mediatek/clk-mt8186-mfg.c b/drivers/clk/mediatek/clk-mt8186-mfg.c
new file mode 100644
index 000000000000..f1f92216f894
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8186-mfg.c
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (c) 2022 MediaTek Inc.
+// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+#include <dt-bindings/clock/mt8186-clk.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs mfg_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x0,
+};
+
+#define GATE_MFG(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &mfg_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+static const struct mtk_gate mfg_clks[] = {
+ GATE_MFG(CLK_MFG_BG3D, "mfg_bg3d", "top_mfg", 0),
+};
+
+static const struct mtk_clk_desc mfg_desc = {
+ .clks = mfg_clks,
+ .num_clks = ARRAY_SIZE(mfg_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8186_mfg[] = {
+ {
+ .compatible = "mediatek,mt8186-mfgsys",
+ .data = &mfg_desc,
+ }, {
+ /* sentinel */
+ }
+};
+
+static struct platform_driver clk_mt8186_mfg_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8186-mfg",
+ .of_match_table = of_match_clk_mt8186_mfg,
+ },
+};
+builtin_platform_driver(clk_mt8186_mfg_drv);
diff --git a/drivers/clk/mediatek/clk-mt8186-mm.c b/drivers/clk/mediatek/clk-mt8186-mm.c
new file mode 100644
index 000000000000..1d33be407947
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8186-mm.c
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (c) 2022 MediaTek Inc.
+// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+#include <dt-bindings/clock/mt8186-clk.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs mm0_cg_regs = {
+ .set_ofs = 0x104,
+ .clr_ofs = 0x108,
+ .sta_ofs = 0x100,
+};
+
+static const struct mtk_gate_regs mm1_cg_regs = {
+ .set_ofs = 0x1a4,
+ .clr_ofs = 0x1a8,
+ .sta_ofs = 0x1a0,
+};
+
+#define GATE_MM0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &mm0_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+#define GATE_MM1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &mm1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+static const struct mtk_gate mm_clks[] = {
+ /* MM0 */
+ GATE_MM0(CLK_MM_DISP_MUTEX0, "mm_disp_mutex0", "top_disp", 0),
+ GATE_MM0(CLK_MM_APB_MM_BUS, "mm_apb_mm_bus", "top_disp", 1),
+ GATE_MM0(CLK_MM_DISP_OVL0, "mm_disp_ovl0", "top_disp", 2),
+ GATE_MM0(CLK_MM_DISP_RDMA0, "mm_disp_rdma0", "top_disp", 3),
+ GATE_MM0(CLK_MM_DISP_OVL0_2L, "mm_disp_ovl0_2l", "top_disp", 4),
+ GATE_MM0(CLK_MM_DISP_WDMA0, "mm_disp_wdma0", "top_disp", 5),
+ GATE_MM0(CLK_MM_DISP_RSZ0, "mm_disp_rsz0", "top_disp", 7),
+ GATE_MM0(CLK_MM_DISP_AAL0, "mm_disp_aal0", "top_disp", 8),
+ GATE_MM0(CLK_MM_DISP_CCORR0, "mm_disp_ccorr0", "top_disp", 9),
+ GATE_MM0(CLK_MM_DISP_COLOR0, "mm_disp_color0", "top_disp", 10),
+ GATE_MM0(CLK_MM_SMI_INFRA, "mm_smi_infra", "top_disp", 11),
+ GATE_MM0(CLK_MM_DISP_DSC_WRAP0, "mm_disp_dsc_wrap0", "top_disp", 12),
+ GATE_MM0(CLK_MM_DISP_GAMMA0, "mm_disp_gamma0", "top_disp", 13),
+ GATE_MM0(CLK_MM_DISP_POSTMASK0, "mm_disp_postmask0", "top_disp", 14),
+ GATE_MM0(CLK_MM_DISP_DITHER0, "mm_disp_dither0", "top_disp", 16),
+ GATE_MM0(CLK_MM_SMI_COMMON, "mm_smi_common", "top_disp", 17),
+ GATE_MM0(CLK_MM_DSI0, "mm_dsi0", "top_disp", 19),
+ GATE_MM0(CLK_MM_DISP_FAKE_ENG0, "mm_disp_fake_eng0", "top_disp", 20),
+ GATE_MM0(CLK_MM_DISP_FAKE_ENG1, "mm_disp_fake_eng1", "top_disp", 21),
+ GATE_MM0(CLK_MM_SMI_GALS, "mm_smi_gals", "top_disp", 22),
+ GATE_MM0(CLK_MM_SMI_IOMMU, "mm_smi_iommu", "top_disp", 24),
+ GATE_MM0(CLK_MM_DISP_RDMA1, "mm_disp_rdma1", "top_disp", 25),
+ GATE_MM0(CLK_MM_DISP_DPI, "mm_disp_dpi", "top_disp", 26),
+ /* MM1 */
+ GATE_MM1(CLK_MM_DSI0_DSI_CK_DOMAIN, "mm_dsi0_dsi_domain", "top_disp", 0),
+ GATE_MM1(CLK_MM_DISP_26M, "mm_disp_26m_ck", "top_disp", 10),
+};
+
+static int clk_mt8186_mm_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->parent->of_node;
+ struct clk_hw_onecell_data *clk_data;
+ int r;
+
+ clk_data = mtk_alloc_clk_data(CLK_MM_NR_CLK);
+ if (!clk_data)
+ return -ENOMEM;
+
+ r = mtk_clk_register_gates(node, mm_clks, ARRAY_SIZE(mm_clks), clk_data);
+ if (r)
+ goto free_mm_data;
+
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ if (r)
+ goto unregister_gates;
+
+ platform_set_drvdata(pdev, clk_data);
+
+ return r;
+
+unregister_gates:
+ mtk_clk_unregister_gates(mm_clks, ARRAY_SIZE(mm_clks), clk_data);
+free_mm_data:
+ mtk_free_clk_data(clk_data);
+ return r;
+}
+
+static int clk_mt8186_mm_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->parent->of_node;
+ struct clk_hw_onecell_data *clk_data = platform_get_drvdata(pdev);
+
+ of_clk_del_provider(node);
+ mtk_clk_unregister_gates(mm_clks, ARRAY_SIZE(mm_clks), clk_data);
+ mtk_free_clk_data(clk_data);
+
+ return 0;
+}
+
+static struct platform_driver clk_mt8186_mm_drv = {
+ .probe = clk_mt8186_mm_probe,
+ .remove = clk_mt8186_mm_remove,
+ .driver = {
+ .name = "clk-mt8186-mm",
+ },
+};
+builtin_platform_driver(clk_mt8186_mm_drv);
diff --git a/drivers/clk/mediatek/clk-mt8186-topckgen.c b/drivers/clk/mediatek/clk-mt8186-topckgen.c
new file mode 100644
index 000000000000..d7f2c4663c85
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8186-topckgen.c
@@ -0,0 +1,780 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (c) 2022 MediaTek Inc.
+// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+#include <dt-bindings/clock/mt8186-clk.h>
+
+#include "clk-mtk.h"
+#include "clk-mux.h"
+
+static DEFINE_SPINLOCK(mt8186_clk_lock);
+
+static const struct mtk_fixed_clk top_fixed_clks[] = {
+ FIXED_CLK(CLK_TOP_ULPOSC1, "ulposc1", NULL, 250000000),
+ FIXED_CLK(CLK_TOP_466M_FMEM, "hd_466m_fmem_ck", NULL, 466000000),
+ FIXED_CLK(CLK_TOP_MPLL, "mpll", NULL, 208000000),
+};
+
+static const struct mtk_fixed_factor top_divs[] = {
+ FACTOR(CLK_TOP_MAINPLL_D2, "mainpll_d2", "mainpll", 1, 2),
+ FACTOR(CLK_TOP_MAINPLL_D2_D2, "mainpll_d2_d2", "mainpll_d2", 1, 2),
+ FACTOR(CLK_TOP_MAINPLL_D2_D4, "mainpll_d2_d4", "mainpll_d2", 1, 4),
+ FACTOR(CLK_TOP_MAINPLL_D2_D16, "mainpll_d2_d16", "mainpll_d2", 1, 16),
+ FACTOR(CLK_TOP_MAINPLL_D3, "mainpll_d3", "mainpll", 1, 3),
+ FACTOR(CLK_TOP_MAINPLL_D3_D2, "mainpll_d3_d2", "mainpll_d3", 1, 2),
+ FACTOR(CLK_TOP_MAINPLL_D3_D4, "mainpll_d3_d4", "mainpll_d3", 1, 4),
+ FACTOR(CLK_TOP_MAINPLL_D5, "mainpll_d5", "mainpll", 1, 5),
+ FACTOR(CLK_TOP_MAINPLL_D5_D2, "mainpll_d5_d2", "mainpll_d5", 1, 2),
+ FACTOR(CLK_TOP_MAINPLL_D5_D4, "mainpll_d5_d4", "mainpll_d5", 1, 4),
+ FACTOR(CLK_TOP_MAINPLL_D7, "mainpll_d7", "mainpll", 1, 7),
+ FACTOR(CLK_TOP_MAINPLL_D7_D2, "mainpll_d7_d2", "mainpll_d7", 1, 2),
+ FACTOR(CLK_TOP_MAINPLL_D7_D4, "mainpll_d7_d4", "mainpll_d7", 1, 4),
+ FACTOR(CLK_TOP_UNIVPLL, "univpll", "univ2pll", 1, 2),
+ FACTOR(CLK_TOP_UNIVPLL_D2, "univpll_d2", "univpll", 1, 2),
+ FACTOR(CLK_TOP_UNIVPLL_D2_D2, "univpll_d2_d2", "univpll_d2", 1, 2),
+ FACTOR(CLK_TOP_UNIVPLL_D2_D4, "univpll_d2_d4", "univpll_d2", 1, 4),
+ FACTOR(CLK_TOP_UNIVPLL_D3, "univpll_d3", "univpll", 1, 3),
+ FACTOR(CLK_TOP_UNIVPLL_D3_D2, "univpll_d3_d2", "univpll_d3", 1, 2),
+ FACTOR(CLK_TOP_UNIVPLL_D3_D4, "univpll_d3_d4", "univpll_d3", 1, 4),
+ FACTOR(CLK_TOP_UNIVPLL_D3_D8, "univpll_d3_d8", "univpll_d3", 1, 8),
+ FACTOR(CLK_TOP_UNIVPLL_D3_D32, "univpll_d3_d32", "univpll_d3", 1, 32),
+ FACTOR(CLK_TOP_UNIVPLL_D5, "univpll_d5", "univpll", 1, 5),
+ FACTOR(CLK_TOP_UNIVPLL_D5_D2, "univpll_d5_d2", "univpll_d5", 1, 2),
+ FACTOR(CLK_TOP_UNIVPLL_D5_D4, "univpll_d5_d4", "univpll_d5", 1, 4),
+ FACTOR(CLK_TOP_UNIVPLL_D7, "univpll_d7", "univpll", 1, 7),
+ FACTOR(CLK_TOP_UNIVPLL_192M, "univpll_192m", "univ2pll", 1, 13),
+ FACTOR(CLK_TOP_UNIVPLL_192M_D4, "univpll_192m_d4", "univpll_192m", 1, 4),
+ FACTOR(CLK_TOP_UNIVPLL_192M_D8, "univpll_192m_d8", "univpll_192m", 1, 8),
+ FACTOR(CLK_TOP_UNIVPLL_192M_D16, "univpll_192m_d16", "univpll_192m", 1, 16),
+ FACTOR(CLK_TOP_UNIVPLL_192M_D32, "univpll_192m_d32", "univpll_192m", 1, 32),
+ FACTOR(CLK_TOP_APLL1_D2, "apll1_d2", "apll1", 1, 2),
+ FACTOR(CLK_TOP_APLL1_D4, "apll1_d4", "apll1", 1, 4),
+ FACTOR(CLK_TOP_APLL1_D8, "apll1_d8", "apll1", 1, 8),
+ FACTOR(CLK_TOP_APLL2_D2, "apll2_d2", "apll2", 1, 2),
+ FACTOR(CLK_TOP_APLL2_D4, "apll2_d4", "apll2", 1, 4),
+ FACTOR(CLK_TOP_APLL2_D8, "apll2_d8", "apll2", 1, 8),
+ FACTOR(CLK_TOP_MMPLL_D2, "mmpll_d2", "mmpll", 1, 2),
+ FACTOR(CLK_TOP_TVDPLL_D2, "tvdpll_d2", "tvdpll", 1, 2),
+ FACTOR(CLK_TOP_TVDPLL_D4, "tvdpll_d4", "tvdpll", 1, 4),
+ FACTOR(CLK_TOP_TVDPLL_D8, "tvdpll_d8", "tvdpll", 1, 8),
+ FACTOR(CLK_TOP_TVDPLL_D16, "tvdpll_d16", "tvdpll", 1, 16),
+ FACTOR(CLK_TOP_TVDPLL_D32, "tvdpll_d32", "tvdpll", 1, 32),
+ FACTOR(CLK_TOP_MSDCPLL_D2, "msdcpll_d2", "msdcpll", 1, 2),
+ FACTOR(CLK_TOP_ULPOSC1_D2, "ulposc1_d2", "ulposc1", 1, 2),
+ FACTOR(CLK_TOP_ULPOSC1_D4, "ulposc1_d4", "ulposc1", 1, 4),
+ FACTOR(CLK_TOP_ULPOSC1_D8, "ulposc1_d8", "ulposc1", 1, 8),
+ FACTOR(CLK_TOP_ULPOSC1_D10, "ulposc1_d10", "ulposc1", 1, 10),
+ FACTOR(CLK_TOP_ULPOSC1_D16, "ulposc1_d16", "ulposc1", 1, 16),
+ FACTOR(CLK_TOP_ULPOSC1_D32, "ulposc1_d32", "ulposc1", 1, 32),
+ FACTOR(CLK_TOP_ADSPPLL_D2, "adsppll_d2", "adsppll", 1, 2),
+ FACTOR(CLK_TOP_ADSPPLL_D4, "adsppll_d4", "adsppll", 1, 4),
+ FACTOR(CLK_TOP_ADSPPLL_D8, "adsppll_d8", "adsppll", 1, 8),
+ FACTOR(CLK_TOP_NNAPLL_D2, "nnapll_d2", "nnapll", 1, 2),
+ FACTOR(CLK_TOP_NNAPLL_D4, "nnapll_d4", "nnapll", 1, 4),
+ FACTOR(CLK_TOP_NNAPLL_D8, "nnapll_d8", "nnapll", 1, 8),
+ FACTOR(CLK_TOP_NNA2PLL_D2, "nna2pll_d2", "nna2pll", 1, 2),
+ FACTOR(CLK_TOP_NNA2PLL_D4, "nna2pll_d4", "nna2pll", 1, 4),
+ FACTOR(CLK_TOP_NNA2PLL_D8, "nna2pll_d8", "nna2pll", 1, 8),
+ FACTOR(CLK_TOP_F_BIST2FPC, "f_bist2fpc_ck", "univpll_d3_d2", 1, 1),
+};
+
+static const char * const axi_parents[] = {
+ "clk26m",
+ "mainpll_d7",
+ "mainpll_d2_d4",
+ "univpll_d7"
+};
+
+static const char * const scp_parents[] = {
+ "clk26m",
+ "mainpll_d2_d4",
+ "mainpll_d5",
+ "mainpll_d2_d2",
+ "mainpll_d3",
+ "univpll_d3"
+};
+
+static const char * const mfg_parents[] = {
+ "clk26m",
+ "mfgpll",
+ "mainpll_d3",
+ "mainpll_d5"
+};
+
+static const char * const camtg_parents[] = {
+ "clk26m",
+ "univpll_192m_d8",
+ "univpll_d3_d8",
+ "univpll_192m_d4",
+ "univpll_d3_d32",
+ "univpll_192m_d16",
+ "univpll_192m_d32"
+};
+
+static const char * const uart_parents[] = {
+ "clk26m",
+ "univpll_d3_d8"
+};
+
+static const char * const spi_parents[] = {
+ "clk26m",
+ "mainpll_d5_d4",
+ "mainpll_d3_d4",
+ "mainpll_d5_d2",
+ "mainpll_d2_d4",
+ "mainpll_d7",
+ "mainpll_d3_d2",
+ "mainpll_d5"
+};
+
+static const char * const msdc5hclk_parents[] = {
+ "clk26m",
+ "mainpll_d2_d2",
+ "mainpll_d7",
+ "mainpll_d3_d2"
+};
+
+static const char * const msdc50_0_parents[] = {
+ "clk26m",
+ "msdcpll",
+ "univpll_d3",
+ "msdcpll_d2",
+ "mainpll_d7",
+ "mainpll_d3_d2",
+ "univpll_d2_d2"
+};
+
+static const char * const msdc30_1_parents[] = {
+ "clk26m",
+ "msdcpll_d2",
+ "univpll_d3_d2",
+ "mainpll_d3_d2",
+ "mainpll_d7"
+};
+
+static const char * const audio_parents[] = {
+ "clk26m",
+ "mainpll_d5_d4",
+ "mainpll_d7_d4",
+ "mainpll_d2_d16"
+};
+
+static const char * const aud_intbus_parents[] = {
+ "clk26m",
+ "mainpll_d2_d4",
+ "mainpll_d7_d2"
+};
+
+static const char * const aud_1_parents[] = {
+ "clk26m",
+ "apll1"
+};
+
+static const char * const aud_2_parents[] = {
+ "clk26m",
+ "apll2"
+};
+
+static const char * const aud_engen1_parents[] = {
+ "clk26m",
+ "apll1_d2",
+ "apll1_d4",
+ "apll1_d8"
+};
+
+static const char * const aud_engen2_parents[] = {
+ "clk26m",
+ "apll2_d2",
+ "apll2_d4",
+ "apll2_d8"
+};
+
+static const char * const disp_pwm_parents[] = {
+ "clk26m",
+ "univpll_d5_d2",
+ "univpll_d3_d4",
+ "ulposc1_d2",
+ "ulposc1_d8"
+};
+
+static const char * const sspm_parents[] = {
+ "clk26m",
+ "mainpll_d2_d2",
+ "mainpll_d3_d2",
+ "mainpll_d5",
+ "mainpll_d3"
+};
+
+static const char * const dxcc_parents[] = {
+ "clk26m",
+ "mainpll_d2_d2",
+ "mainpll_d2_d4"
+};
+
+static const char * const usb_parents[] = {
+ "clk26m",
+ "univpll_d5_d4",
+ "univpll_d5_d2"
+};
+
+static const char * const srck_parents[] = {
+ "clk32k",
+ "clk26m",
+ "ulposc1_d10"
+};
+
+static const char * const spm_parents[] = {
+ "clk32k",
+ "ulposc1_d10",
+ "clk26m",
+ "mainpll_d7_d2"
+};
+
+static const char * const i2c_parents[] = {
+ "clk26m",
+ "univpll_d5_d4",
+ "univpll_d3_d4",
+ "univpll_d5_d2"
+};
+
+static const char * const pwm_parents[] = {
+ "clk26m",
+ "univpll_d3_d8",
+ "univpll_d3_d4",
+ "univpll_d2_d4"
+};
+
+static const char * const seninf_parents[] = {
+ "clk26m",
+ "univpll_d2_d4",
+ "univpll_d2_d2",
+ "univpll_d3_d2"
+};
+
+static const char * const aes_msdcfde_parents[] = {
+ "clk26m",
+ "univpll_d3",
+ "mainpll_d3",
+ "univpll_d2_d2",
+ "mainpll_d2_d2",
+ "mainpll_d2_d4"
+};
+
+static const char * const pwrap_ulposc_parents[] = {
+ "clk26m",
+ "univpll_d5_d4",
+ "ulposc1_d4",
+ "ulposc1_d8",
+ "ulposc1_d10",
+ "ulposc1_d16",
+ "ulposc1_d32"
+};
+
+static const char * const camtm_parents[] = {
+ "clk26m",
+ "univpll_d2_d4",
+ "univpll_d3_d2"
+};
+
+static const char * const venc_parents[] = {
+ "clk26m",
+ "mmpll",
+ "mainpll_d2_d2",
+ "mainpll_d2",
+ "univpll_d3",
+ "univpll_d2_d2",
+ "mainpll_d3",
+ "mmpll"
+};
+
+static const char * const isp_parents[] = {
+ "clk26m",
+ "mainpll_d2",
+ "mainpll_d2_d2",
+ "univpll_d3",
+ "mainpll_d3",
+ "mmpll",
+ "univpll_d5",
+ "univpll_d2_d2",
+ "mmpll_d2"
+};
+
+static const char * const dpmaif_parents[] = {
+ "clk26m",
+ "univpll_d2_d2",
+ "mainpll_d3",
+ "mainpll_d2_d2",
+ "univpll_d3_d2"
+};
+
+static const char * const vdec_parents[] = {
+ "clk26m",
+ "mainpll_d3",
+ "mainpll_d2_d2",
+ "univpll_d5",
+ "mainpll_d2",
+ "univpll_d3",
+ "univpll_d2_d2"
+};
+
+static const char * const disp_parents[] = {
+ "clk26m",
+ "univpll_d3_d2",
+ "mainpll_d5",
+ "univpll_d5",
+ "univpll_d2_d2",
+ "mainpll_d3",
+ "univpll_d3",
+ "mainpll_d2",
+ "mmpll"
+};
+
+static const char * const mdp_parents[] = {
+ "clk26m",
+ "mainpll_d5",
+ "univpll_d5",
+ "mainpll_d2_d2",
+ "univpll_d2_d2",
+ "mainpll_d3",
+ "univpll_d3",
+ "mainpll_d2",
+ "mmpll"
+};
+
+static const char * const audio_h_parents[] = {
+ "clk26m",
+ "univpll_d7",
+ "apll1",
+ "apll2"
+};
+
+static const char * const ufs_parents[] = {
+ "clk26m",
+ "mainpll_d7",
+ "univpll_d2_d4",
+ "mainpll_d2_d4"
+};
+
+static const char * const aes_fde_parents[] = {
+ "clk26m",
+ "univpll_d3",
+ "mainpll_d2_d2",
+ "univpll_d5"
+};
+
+static const char * const audiodsp_parents[] = {
+ "clk26m",
+ "ulposc1_d10",
+ "adsppll",
+ "adsppll_d2",
+ "adsppll_d4",
+ "adsppll_d8"
+};
+
+static const char * const dvfsrc_parents[] = {
+ "clk26m",
+ "ulposc1_d10",
+};
+
+static const char * const dsi_occ_parents[] = {
+ "clk26m",
+ "univpll_d3_d2",
+ "mpll",
+ "mainpll_d5"
+};
+
+static const char * const spmi_mst_parents[] = {
+ "clk26m",
+ "univpll_d5_d4",
+ "ulposc1_d4",
+ "ulposc1_d8",
+ "ulposc1_d10",
+ "ulposc1_d16",
+ "ulposc1_d32"
+};
+
+static const char * const spinor_parents[] = {
+ "clk26m",
+ "clk13m",
+ "mainpll_d7_d4",
+ "univpll_d3_d8",
+ "univpll_d5_d4",
+ "mainpll_d7_d2"
+};
+
+static const char * const nna_parents[] = {
+ "clk26m",
+ "univpll_d3_d8",
+ "mainpll_d2_d4",
+ "univpll_d3_d2",
+ "mainpll_d2_d2",
+ "univpll_d2_d2",
+ "mainpll_d3",
+ "univpll_d3",
+ "mmpll",
+ "mainpll_d2",
+ "univpll_d2",
+ "nnapll_d2",
+ "nnapll_d4",
+ "nnapll_d8",
+ "nnapll",
+ "nna2pll"
+};
+
+static const char * const nna2_parents[] = {
+ "clk26m",
+ "univpll_d3_d8",
+ "mainpll_d2_d4",
+ "univpll_d3_d2",
+ "mainpll_d2_d2",
+ "univpll_d2_d2",
+ "mainpll_d3",
+ "univpll_d3",
+ "mmpll",
+ "mainpll_d2",
+ "univpll_d2",
+ "nna2pll_d2",
+ "nna2pll_d4",
+ "nna2pll_d8",
+ "nnapll",
+ "nna2pll"
+};
+
+static const char * const ssusb_parents[] = {
+ "clk26m",
+ "univpll_d5_d4",
+ "univpll_d5_d2"
+};
+
+static const char * const wpe_parents[] = {
+ "clk26m",
+ "univpll_d3_d2",
+ "mainpll_d5",
+ "univpll_d5",
+ "univpll_d2_d2",
+ "mainpll_d3",
+ "univpll_d3",
+ "mainpll_d2",
+ "mmpll"
+};
+
+static const char * const dpi_parents[] = {
+ "clk26m",
+ "tvdpll",
+ "tvdpll_d2",
+ "tvdpll_d4",
+ "tvdpll_d8",
+ "tvdpll_d16",
+ "tvdpll_d32"
+};
+
+static const char * const u3_occ_250m_parents[] = {
+ "clk26m",
+ "univpll_d5"
+};
+
+static const char * const u3_occ_500m_parents[] = {
+ "clk26m",
+ "nna2pll_d2"
+};
+
+static const char * const adsp_bus_parents[] = {
+ "clk26m",
+ "ulposc1_d2",
+ "mainpll_d5",
+ "mainpll_d2_d2",
+ "mainpll_d3",
+ "mainpll_d2",
+ "univpll_d3"
+};
+
+static const char * const apll_mck_parents[] = {
+ "top_aud_1",
+ "top_aud_2"
+};
+
+static const struct mtk_mux top_mtk_muxes[] = {
+ /*
+ * CLK_CFG_0
+ * top_axi is bus clock, should not be closed by Linux.
+ * top_scp is main clock in always-on co-processor.
+ */
+ MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_AXI, "top_axi", axi_parents,
+ 0x0040, 0x0044, 0x0048, 0, 2, 7, 0x0004, 0,
+ CLK_IS_CRITICAL),
+ MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_SCP, "top_scp", scp_parents,
+ 0x0040, 0x0044, 0x0048, 8, 3, 15, 0x0004, 1,
+ CLK_IS_CRITICAL),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MFG, "top_mfg",
+ mfg_parents, 0x0040, 0x0044, 0x0048, 16, 2, 23, 0x0004, 2),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTG, "top_camtg",
+ camtg_parents, 0x0040, 0x0044, 0x0048, 24, 3, 31, 0x0004, 3),
+ /* CLK_CFG_1 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTG1, "top_camtg1",
+ camtg_parents, 0x0050, 0x0054, 0x0058, 0, 3, 7, 0x0004, 4),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTG2, "top_camtg2",
+ camtg_parents, 0x0050, 0x0054, 0x0058, 8, 3, 15, 0x0004, 5),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTG3, "top_camtg3",
+ camtg_parents, 0x0050, 0x0054, 0x0058, 16, 3, 23, 0x0004, 6),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTG4, "top_camtg4",
+ camtg_parents, 0x0050, 0x0054, 0x0058, 24, 3, 31, 0x0004, 7),
+ /* CLK_CFG_2 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTG5, "top_camtg5",
+ camtg_parents, 0x0060, 0x0064, 0x0068, 0, 3, 7, 0x0004, 8),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTG6, "top_camtg6",
+ camtg_parents, 0x0060, 0x0064, 0x0068, 8, 3, 15, 0x0004, 9),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_UART, "top_uart",
+ uart_parents, 0x0060, 0x0064, 0x0068, 16, 1, 23, 0x0004, 10),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SPI, "top_spi",
+ spi_parents, 0x0060, 0x0064, 0x0068, 24, 3, 31, 0x0004, 11),
+ /* CLK_CFG_3 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MSDC50_0_HCLK, "top_msdc5hclk",
+ msdc5hclk_parents, 0x0070, 0x0074, 0x0078, 0, 2, 7, 0x0004, 12),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MSDC50_0, "top_msdc50_0",
+ msdc50_0_parents, 0x0070, 0x0074, 0x0078, 8, 3, 15, 0x0004, 13),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MSDC30_1, "top_msdc30_1",
+ msdc30_1_parents, 0x0070, 0x0074, 0x0078, 16, 3, 23, 0x0004, 14),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AUDIO, "top_audio",
+ audio_parents, 0x0070, 0x0074, 0x0078, 24, 2, 31, 0x0004, 15),
+ /* CLK_CFG_4 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AUD_INTBUS, "top_aud_intbus",
+ aud_intbus_parents, 0x0080, 0x0084, 0x0088, 0, 2, 7, 0x0004, 16),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AUD_1, "top_aud_1",
+ aud_1_parents, 0x0080, 0x0084, 0x0088, 8, 1, 15, 0x0004, 17),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AUD_2, "top_aud_2",
+ aud_2_parents, 0x0080, 0x0084, 0x0088, 16, 1, 23, 0x0004, 18),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AUD_ENGEN1, "top_aud_engen1",
+ aud_engen1_parents, 0x0080, 0x0084, 0x0088, 24, 2, 31, 0x0004, 19),
+ /*
+ * CLK_CFG_5
+ * top_sspm is main clock in always-on co-processor, should not be closed
+ * in Linux.
+ */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AUD_ENGEN2, "top_aud_engen2",
+ aud_engen2_parents, 0x0090, 0x0094, 0x0098, 0, 2, 7, 0x0004, 20),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DISP_PWM, "top_disp_pwm",
+ disp_pwm_parents, 0x0090, 0x0094, 0x0098, 8, 3, 15, 0x0004, 21),
+ MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_SSPM, "top_sspm", sspm_parents,
+ 0x0090, 0x0094, 0x0098, 16, 3, 23, 0x0004, 22,
+ CLK_IS_CRITICAL),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DXCC, "top_dxcc",
+ dxcc_parents, 0x0090, 0x0094, 0x0098, 24, 2, 31, 0x0004, 23),
+ /*
+ * CLK_CFG_6
+ * top_spm and top_srck are main clocks in always-on co-processor.
+ */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_USB_TOP, "top_usb",
+ usb_parents, 0x00a0, 0x00a4, 0x00a8, 0, 2, 7, 0x0004, 24),
+ MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_SRCK, "top_srck", srck_parents,
+ 0x00a0, 0x00a4, 0x00a8, 8, 2, 15, 0x0004, 25,
+ CLK_IS_CRITICAL),
+ MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_SPM, "top_spm", spm_parents,
+ 0x00a0, 0x00a4, 0x00a8, 16, 2, 23, 0x0004, 26,
+ CLK_IS_CRITICAL),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_I2C, "top_i2c",
+ i2c_parents, 0x00a0, 0x00a4, 0x00a8, 24, 2, 31, 0x0004, 27),
+ /* CLK_CFG_7 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_PWM, "top_pwm",
+ pwm_parents, 0x00b0, 0x00b4, 0x00b8, 0, 2, 7, 0x0004, 28),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SENINF, "top_seninf",
+ seninf_parents, 0x00b0, 0x00b4, 0x00b8, 8, 2, 15, 0x0004, 29),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SENINF1, "top_seninf1",
+ seninf_parents, 0x00b0, 0x00b4, 0x00b8, 16, 2, 23, 0x0004, 30),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SENINF2, "top_seninf2",
+ seninf_parents, 0x00b0, 0x00b4, 0x00b8, 24, 2, 31, 0x0008, 0),
+ /* CLK_CFG_8 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SENINF3, "top_seninf3",
+ seninf_parents, 0x00c0, 0x00c4, 0x00c8, 0, 2, 7, 0x0008, 1),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AES_MSDCFDE, "top_aes_msdcfde",
+ aes_msdcfde_parents, 0x00c0, 0x00c4, 0x00c8, 8, 3, 15, 0x0008, 2),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_PWRAP_ULPOSC, "top_pwrap_ulposc",
+ pwrap_ulposc_parents, 0x00c0, 0x00c4, 0x00c8, 16, 3, 23, 0x0008, 3),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTM, "top_camtm",
+ camtm_parents, 0x00c0, 0x00c4, 0x00c8, 24, 2, 31, 0x0008, 4),
+ /* CLK_CFG_9 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_VENC, "top_venc",
+ venc_parents, 0x00d0, 0x00d4, 0x00d8, 0, 3, 7, 0x0008, 5),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_CAM, "top_cam",
+ isp_parents, 0x00d0, 0x00d4, 0x00d8, 8, 4, 15, 0x0008, 6),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_IMG1, "top_img1",
+ isp_parents, 0x00d0, 0x00d4, 0x00d8, 16, 4, 23, 0x0008, 7),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_IPE, "top_ipe",
+ isp_parents, 0x00d0, 0x00d4, 0x00d8, 24, 4, 31, 0x0008, 8),
+ /* CLK_CFG_10 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DPMAIF, "top_dpmaif",
+ dpmaif_parents, 0x00e0, 0x00e4, 0x00e8, 0, 3, 7, 0x0008, 9),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_VDEC, "top_vdec",
+ vdec_parents, 0x00e0, 0x00e4, 0x00e8, 8, 3, 15, 0x0008, 10),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DISP, "top_disp",
+ disp_parents, 0x00e0, 0x00e4, 0x00e8, 16, 4, 23, 0x0008, 11),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MDP, "top_mdp",
+ mdp_parents, 0x00e0, 0x00e4, 0x00e8, 24, 4, 31, 0x0008, 12),
+ /* CLK_CFG_11 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AUDIO_H, "top_audio_h",
+ audio_h_parents, 0x00ec, 0x00f0, 0x00f4, 0, 2, 7, 0x0008, 13),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_UFS, "top_ufs",
+ ufs_parents, 0x00ec, 0x00f0, 0x00f4, 8, 2, 15, 0x0008, 14),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AES_FDE, "top_aes_fde",
+ aes_fde_parents, 0x00ec, 0x00f0, 0x00f4, 16, 2, 23, 0x0008, 15),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AUDIODSP, "top_audiodsp",
+ audiodsp_parents, 0x00ec, 0x00f0, 0x00f4, 24, 3, 31, 0x0008, 16),
+ /*
+ * CLK_CFG_12
+ * dvfsrc is for internal DVFS usage, should not be closed in Linux.
+ */
+ MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_DVFSRC, "top_dvfsrc", dvfsrc_parents,
+ 0x0100, 0x0104, 0x0108, 0, 1, 7, 0x0008, 17,
+ CLK_IS_CRITICAL),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DSI_OCC, "top_dsi_occ",
+ dsi_occ_parents, 0x0100, 0x0104, 0x0108, 8, 2, 15, 0x0008, 18),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SPMI_MST, "top_spmi_mst",
+ spmi_mst_parents, 0x0100, 0x0104, 0x0108, 16, 3, 23, 0x0008, 19),
+ /* CLK_CFG_13 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SPINOR, "top_spinor",
+ spinor_parents, 0x0110, 0x0114, 0x0118, 0, 3, 6, 0x0008, 20),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_NNA, "top_nna",
+ nna_parents, 0x0110, 0x0114, 0x0118, 7, 4, 14, 0x0008, 21),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_NNA1, "top_nna1",
+ nna_parents, 0x0110, 0x0114, 0x0118, 15, 4, 22, 0x0008, 22),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_NNA2, "top_nna2",
+ nna2_parents, 0x0110, 0x0114, 0x0118, 23, 4, 30, 0x0008, 23),
+ /* CLK_CFG_14 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SSUSB_XHCI, "top_ssusb_xhci",
+ ssusb_parents, 0x0120, 0x0124, 0x0128, 0, 2, 5, 0x0008, 24),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SSUSB_TOP_1P, "top_ssusb_1p",
+ ssusb_parents, 0x0120, 0x0124, 0x0128, 6, 2, 11, 0x0008, 25),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SSUSB_XHCI_1P, "top_ssusb_xhci_1p",
+ ssusb_parents, 0x0120, 0x0124, 0x0128, 12, 2, 17, 0x0008, 26),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_WPE, "top_wpe",
+ wpe_parents, 0x0120, 0x0124, 0x0128, 18, 4, 25, 0x0008, 27),
+ /* CLK_CFG_15 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DPI, "top_dpi",
+ dpi_parents, 0x0180, 0x0184, 0x0188, 0, 3, 6, 0x0008, 28),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_U3_OCC_250M, "top_u3_occ_250m",
+ u3_occ_250m_parents, 0x0180, 0x0184, 0x0188, 7, 1, 11, 0x0008, 29),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_U3_OCC_500M, "top_u3_occ_500m",
+ u3_occ_500m_parents, 0x0180, 0x0184, 0x0188, 12, 1, 16, 0x0008, 30),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_ADSP_BUS, "top_adsp_bus",
+ adsp_bus_parents, 0x0180, 0x0184, 0x0188, 17, 3, 23, 0x0008, 31),
+};
+
+static struct mtk_composite top_muxes[] = {
+ /* CLK_AUDDIV_0 */
+ MUX(CLK_TOP_APLL_I2S0_MCK_SEL, "apll_i2s0_mck_sel", apll_mck_parents, 0x0320, 16, 1),
+ MUX(CLK_TOP_APLL_I2S1_MCK_SEL, "apll_i2s1_mck_sel", apll_mck_parents, 0x0320, 17, 1),
+ MUX(CLK_TOP_APLL_I2S2_MCK_SEL, "apll_i2s2_mck_sel", apll_mck_parents, 0x0320, 18, 1),
+ MUX(CLK_TOP_APLL_I2S4_MCK_SEL, "apll_i2s4_mck_sel", apll_mck_parents, 0x0320, 19, 1),
+ MUX(CLK_TOP_APLL_TDMOUT_MCK_SEL, "apll_tdmout_mck_sel", apll_mck_parents,
+ 0x0320, 20, 1),
+};
+
+static const struct mtk_composite top_adj_divs[] = {
+ DIV_GATE(CLK_TOP_APLL12_CK_DIV0, "apll12_div0", "apll_i2s0_mck_sel",
+ 0x0320, 0, 0x0328, 8, 0),
+ DIV_GATE(CLK_TOP_APLL12_CK_DIV1, "apll12_div1", "apll_i2s1_mck_sel",
+ 0x0320, 1, 0x0328, 8, 8),
+ DIV_GATE(CLK_TOP_APLL12_CK_DIV2, "apll12_div2", "apll_i2s2_mck_sel",
+ 0x0320, 2, 0x0328, 8, 16),
+ DIV_GATE(CLK_TOP_APLL12_CK_DIV4, "apll12_div4", "apll_i2s4_mck_sel",
+ 0x0320, 3, 0x0328, 8, 24),
+ DIV_GATE(CLK_TOP_APLL12_CK_DIV_TDMOUT_M, "apll12_div_tdmout_m", "apll_tdmout_mck_sel",
+ 0x0320, 4, 0x0334, 8, 0),
+};
+
+static const struct of_device_id of_match_clk_mt8186_topck[] = {
+ { .compatible = "mediatek,mt8186-topckgen", },
+ {}
+};
+
+static int clk_mt8186_topck_probe(struct platform_device *pdev)
+{
+ struct clk_hw_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ int r;
+ void __iomem *base;
+
+ clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
+ if (!clk_data)
+ return -ENOMEM;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base)) {
+ r = PTR_ERR(base);
+ goto free_top_data;
+ }
+
+ r = mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks),
+ clk_data);
+ if (r)
+ goto free_top_data;
+
+ r = mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), clk_data);
+ if (r)
+ goto unregister_fixed_clks;
+
+ r = mtk_clk_register_muxes(top_mtk_muxes, ARRAY_SIZE(top_mtk_muxes), node,
+ &mt8186_clk_lock, clk_data);
+ if (r)
+ goto unregister_factors;
+
+ r = mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes), base,
+ &mt8186_clk_lock, clk_data);
+ if (r)
+ goto unregister_muxes;
+
+ r = mtk_clk_register_composites(top_adj_divs, ARRAY_SIZE(top_adj_divs), base,
+ &mt8186_clk_lock, clk_data);
+ if (r)
+ goto unregister_composite_muxes;
+
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ if (r)
+ goto unregister_composite_divs;
+
+ platform_set_drvdata(pdev, clk_data);
+
+ return r;
+
+unregister_composite_divs:
+ mtk_clk_unregister_composites(top_adj_divs, ARRAY_SIZE(top_adj_divs), clk_data);
+unregister_composite_muxes:
+ mtk_clk_unregister_composites(top_muxes, ARRAY_SIZE(top_muxes), clk_data);
+unregister_muxes:
+ mtk_clk_unregister_muxes(top_mtk_muxes, ARRAY_SIZE(top_mtk_muxes), clk_data);
+unregister_factors:
+ mtk_clk_unregister_factors(top_divs, ARRAY_SIZE(top_divs), clk_data);
+unregister_fixed_clks:
+ mtk_clk_unregister_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks), clk_data);
+free_top_data:
+ mtk_free_clk_data(clk_data);
+ return r;
+}
+
+static int clk_mt8186_topck_remove(struct platform_device *pdev)
+{
+ struct clk_hw_onecell_data *clk_data = platform_get_drvdata(pdev);
+ struct device_node *node = pdev->dev.of_node;
+
+ of_clk_del_provider(node);
+ mtk_clk_unregister_composites(top_adj_divs, ARRAY_SIZE(top_adj_divs), clk_data);
+ mtk_clk_unregister_composites(top_muxes, ARRAY_SIZE(top_muxes), clk_data);
+ mtk_clk_unregister_muxes(top_mtk_muxes, ARRAY_SIZE(top_mtk_muxes), clk_data);
+ mtk_clk_unregister_factors(top_divs, ARRAY_SIZE(top_divs), clk_data);
+ mtk_clk_unregister_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks), clk_data);
+ mtk_free_clk_data(clk_data);
+
+ return 0;
+}
+
+static struct platform_driver clk_mt8186_topck_drv = {
+ .probe = clk_mt8186_topck_probe,
+ .remove = clk_mt8186_topck_remove,
+ .driver = {
+ .name = "clk-mt8186-topck",
+ .of_match_table = of_match_clk_mt8186_topck,
+ },
+};
+builtin_platform_driver(clk_mt8186_topck_drv);
diff --git a/drivers/clk/mediatek/clk-mt8186-vdec.c b/drivers/clk/mediatek/clk-mt8186-vdec.c
new file mode 100644
index 000000000000..5ad7e1ae0bac
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8186-vdec.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (c) 2022 MediaTek Inc.
+// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt8186-clk.h>
+
+static const struct mtk_gate_regs vdec0_cg_regs = {
+ .set_ofs = 0x0,
+ .clr_ofs = 0x4,
+ .sta_ofs = 0x0,
+};
+
+static const struct mtk_gate_regs vdec1_cg_regs = {
+ .set_ofs = 0x190,
+ .clr_ofs = 0x190,
+ .sta_ofs = 0x190,
+};
+
+static const struct mtk_gate_regs vdec2_cg_regs = {
+ .set_ofs = 0x200,
+ .clr_ofs = 0x204,
+ .sta_ofs = 0x200,
+};
+
+static const struct mtk_gate_regs vdec3_cg_regs = {
+ .set_ofs = 0x8,
+ .clr_ofs = 0xc,
+ .sta_ofs = 0x8,
+};
+
+#define GATE_VDEC0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vdec0_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
+
+#define GATE_VDEC1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vdec1_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
+
+#define GATE_VDEC2(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vdec2_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
+
+#define GATE_VDEC3(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vdec3_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
+
+static const struct mtk_gate vdec_clks[] = {
+ /* VDEC0 */
+ GATE_VDEC0(CLK_VDEC_CKEN, "vdec_cken", "top_vdec", 0),
+ GATE_VDEC0(CLK_VDEC_ACTIVE, "vdec_active", "top_vdec", 4),
+ GATE_VDEC0(CLK_VDEC_CKEN_ENG, "vdec_cken_eng", "top_vdec", 8),
+ /* VDEC1 */
+ GATE_VDEC1(CLK_VDEC_MINI_MDP_CKEN_CFG_RG, "vdec_mini_mdp_cken_cfg_rg", "top_vdec", 0),
+ /* VDEC2 */
+ GATE_VDEC2(CLK_VDEC_LAT_CKEN, "vdec_lat_cken", "top_vdec", 0),
+ GATE_VDEC2(CLK_VDEC_LAT_ACTIVE, "vdec_lat_active", "top_vdec", 4),
+ GATE_VDEC2(CLK_VDEC_LAT_CKEN_ENG, "vdec_lat_cken_eng", "top_vdec", 8),
+ /* VDEC3 */
+ GATE_VDEC3(CLK_VDEC_LARB1_CKEN, "vdec_larb1_cken", "top_vdec", 0),
+};
+
+static const struct mtk_clk_desc vdec_desc = {
+ .clks = vdec_clks,
+ .num_clks = ARRAY_SIZE(vdec_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8186_vdec[] = {
+ {
+ .compatible = "mediatek,mt8186-vdecsys",
+ .data = &vdec_desc,
+ }, {
+ /* sentinel */
+ }
+};
+
+static struct platform_driver clk_mt8186_vdec_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8186-vdec",
+ .of_match_table = of_match_clk_mt8186_vdec,
+ },
+};
+builtin_platform_driver(clk_mt8186_vdec_drv);
diff --git a/drivers/clk/mediatek/clk-mt8186-venc.c b/drivers/clk/mediatek/clk-mt8186-venc.c
new file mode 100644
index 000000000000..f5519f794c45
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8186-venc.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (c) 2022 MediaTek Inc.
+// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+#include <dt-bindings/clock/mt8186-clk.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs venc_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x0,
+};
+
+#define GATE_VENC(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &venc_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
+
+static const struct mtk_gate venc_clks[] = {
+ GATE_VENC(CLK_VENC_CKE0_LARB, "venc_cke0_larb", "top_venc", 0),
+ GATE_VENC(CLK_VENC_CKE1_VENC, "venc_cke1_venc", "top_venc", 4),
+ GATE_VENC(CLK_VENC_CKE2_JPGENC, "venc_cke2_jpgenc", "top_venc", 8),
+ GATE_VENC(CLK_VENC_CKE5_GALS, "venc_cke5_gals", "top_venc", 28),
+};
+
+static const struct mtk_clk_desc venc_desc = {
+ .clks = venc_clks,
+ .num_clks = ARRAY_SIZE(venc_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8186_venc[] = {
+ {
+ .compatible = "mediatek,mt8186-vencsys",
+ .data = &venc_desc,
+ }, {
+ /* sentinel */
+ }
+};
+
+static struct platform_driver clk_mt8186_venc_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8186-venc",
+ .of_match_table = of_match_clk_mt8186_venc,
+ },
+};
+builtin_platform_driver(clk_mt8186_venc_drv);
diff --git a/drivers/clk/mediatek/clk-mt8186-wpe.c b/drivers/clk/mediatek/clk-mt8186-wpe.c
new file mode 100644
index 000000000000..8db3e9178a1e
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8186-wpe.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (c) 2022 MediaTek Inc.
+// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+#include <dt-bindings/clock/mt8186-clk.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs wpe_cg_regs = {
+ .set_ofs = 0x0,
+ .clr_ofs = 0x0,
+ .sta_ofs = 0x0,
+};
+
+#define GATE_WPE(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &wpe_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
+
+static const struct mtk_gate wpe_clks[] = {
+ GATE_WPE(CLK_WPE_CK_EN, "wpe", "top_wpe", 17),
+ GATE_WPE(CLK_WPE_SMI_LARB8_CK_EN, "wpe_smi_larb8", "top_wpe", 19),
+ GATE_WPE(CLK_WPE_SYS_EVENT_TX_CK_EN, "wpe_sys_event_tx", "top_wpe", 20),
+ GATE_WPE(CLK_WPE_SMI_LARB8_PCLK_EN, "wpe_smi_larb8_p_en", "top_wpe", 25),
+};
+
+static const struct mtk_clk_desc wpe_desc = {
+ .clks = wpe_clks,
+ .num_clks = ARRAY_SIZE(wpe_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8186_wpe[] = {
+ {
+ .compatible = "mediatek,mt8186-wpesys",
+ .data = &wpe_desc,
+ }, {
+ /* sentinel */
+ }
+};
+
+static struct platform_driver clk_mt8186_wpe_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8186-wpe",
+ .of_match_table = of_match_clk_mt8186_wpe,
+ },
+};
+builtin_platform_driver(clk_mt8186_wpe_drv);
diff --git a/drivers/clk/mediatek/clk-mt8192-aud.c b/drivers/clk/mediatek/clk-mt8192-aud.c
index f28d56628045..8c989bffd8c7 100644
--- a/drivers/clk/mediatek/clk-mt8192-aud.c
+++ b/drivers/clk/mediatek/clk-mt8192-aud.c
@@ -79,7 +79,7 @@ static const struct mtk_gate aud_clks[] = {
static int clk_mt8192_aud_probe(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
struct device_node *node = pdev->dev.of_node;
int r;
@@ -91,7 +91,7 @@ static int clk_mt8192_aud_probe(struct platform_device *pdev)
if (r)
return r;
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
return r;
diff --git a/drivers/clk/mediatek/clk-mt8192-mm.c b/drivers/clk/mediatek/clk-mt8192-mm.c
index 4a0b4c4bc06a..1be3ff4d407d 100644
--- a/drivers/clk/mediatek/clk-mt8192-mm.c
+++ b/drivers/clk/mediatek/clk-mt8192-mm.c
@@ -84,7 +84,7 @@ static int clk_mt8192_mm_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *node = dev->parent->of_node;
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
int r;
clk_data = mtk_alloc_clk_data(CLK_MM_NR_CLK);
@@ -95,7 +95,7 @@ static int clk_mt8192_mm_probe(struct platform_device *pdev)
if (r)
return r;
- return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
}
static struct platform_driver clk_mt8192_mm_drv = {
diff --git a/drivers/clk/mediatek/clk-mt8192.c b/drivers/clk/mediatek/clk-mt8192.c
index ab27cd66b866..dda211b7a745 100644
--- a/drivers/clk/mediatek/clk-mt8192.c
+++ b/drivers/clk/mediatek/clk-mt8192.c
@@ -1178,7 +1178,7 @@ static const struct mtk_pll_data plls[] = {
0, 0, 32, 0x0330, 24, 0, 0, 0, 0x0334, 0),
};
-static struct clk_onecell_data *top_clk_data;
+static struct clk_hw_onecell_data *top_clk_data;
static void clk_mt8192_top_init_early(struct device_node *node)
{
@@ -1189,11 +1189,11 @@ static void clk_mt8192_top_init_early(struct device_node *node)
return;
for (i = 0; i < CLK_TOP_NR_CLK; i++)
- top_clk_data->clks[i] = ERR_PTR(-EPROBE_DEFER);
+ top_clk_data->hws[i] = ERR_PTR(-EPROBE_DEFER);
mtk_clk_register_factors(top_early_divs, ARRAY_SIZE(top_early_divs), top_clk_data);
- of_clk_add_provider(node, of_clk_src_onecell_get, top_clk_data);
+ of_clk_add_hw_provider(node, of_clk_hw_onecell_get, top_clk_data);
}
CLK_OF_DECLARE_DRIVER(mt8192_topckgen, "mediatek,mt8192-topckgen",
@@ -1222,12 +1222,13 @@ static int clk_mt8192_top_probe(struct platform_device *pdev)
if (r)
return r;
- return of_clk_add_provider(node, of_clk_src_onecell_get, top_clk_data);
+ return of_clk_add_hw_provider(node, of_clk_hw_onecell_get,
+ top_clk_data);
}
static int clk_mt8192_infra_probe(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
struct device_node *node = pdev->dev.of_node;
int r;
@@ -1239,7 +1240,7 @@ static int clk_mt8192_infra_probe(struct platform_device *pdev)
if (r)
goto free_clk_data;
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
goto free_clk_data;
@@ -1252,7 +1253,7 @@ free_clk_data:
static int clk_mt8192_peri_probe(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
struct device_node *node = pdev->dev.of_node;
int r;
@@ -1264,7 +1265,7 @@ static int clk_mt8192_peri_probe(struct platform_device *pdev)
if (r)
goto free_clk_data;
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
goto free_clk_data;
@@ -1277,7 +1278,7 @@ free_clk_data:
static int clk_mt8192_apmixed_probe(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
struct device_node *node = pdev->dev.of_node;
int r;
@@ -1290,7 +1291,7 @@ static int clk_mt8192_apmixed_probe(struct platform_device *pdev)
if (r)
goto free_clk_data;
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
goto free_clk_data;
diff --git a/drivers/clk/mediatek/clk-mt8195-apmixedsys.c b/drivers/clk/mediatek/clk-mt8195-apmixedsys.c
index eecc7035a56a..0dfed6ec4d15 100644
--- a/drivers/clk/mediatek/clk-mt8195-apmixedsys.c
+++ b/drivers/clk/mediatek/clk-mt8195-apmixedsys.c
@@ -112,7 +112,7 @@ static const struct of_device_id of_match_clk_mt8195_apmixed[] = {
static int clk_mt8195_apmixed_probe(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
struct device_node *node = pdev->dev.of_node;
int r;
@@ -128,7 +128,7 @@ static int clk_mt8195_apmixed_probe(struct platform_device *pdev)
if (r)
goto unregister_plls;
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
goto unregister_gates;
@@ -148,7 +148,7 @@ free_apmixed_data:
static int clk_mt8195_apmixed_remove(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
- struct clk_onecell_data *clk_data = platform_get_drvdata(pdev);
+ struct clk_hw_onecell_data *clk_data = platform_get_drvdata(pdev);
of_clk_del_provider(node);
mtk_clk_unregister_gates(apmixed_clks, ARRAY_SIZE(apmixed_clks), clk_data);
diff --git a/drivers/clk/mediatek/clk-mt8195-apusys_pll.c b/drivers/clk/mediatek/clk-mt8195-apusys_pll.c
index 8cd88dfc3283..0b52f6a009c4 100644
--- a/drivers/clk/mediatek/clk-mt8195-apusys_pll.c
+++ b/drivers/clk/mediatek/clk-mt8195-apusys_pll.c
@@ -58,7 +58,7 @@ static const struct mtk_pll_data apusys_plls[] = {
static int clk_mt8195_apusys_pll_probe(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
struct device_node *node = pdev->dev.of_node;
int r;
@@ -70,7 +70,7 @@ static int clk_mt8195_apusys_pll_probe(struct platform_device *pdev)
if (r)
goto free_apusys_pll_data;
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
goto unregister_plls;
@@ -87,7 +87,7 @@ free_apusys_pll_data:
static int clk_mt8195_apusys_pll_remove(struct platform_device *pdev)
{
- struct clk_onecell_data *clk_data = platform_get_drvdata(pdev);
+ struct clk_hw_onecell_data *clk_data = platform_get_drvdata(pdev);
struct device_node *node = pdev->dev.of_node;
of_clk_del_provider(node);
diff --git a/drivers/clk/mediatek/clk-mt8195-topckgen.c b/drivers/clk/mediatek/clk-mt8195-topckgen.c
index b602fcd7f1d1..ec70e1f65eaf 100644
--- a/drivers/clk/mediatek/clk-mt8195-topckgen.c
+++ b/drivers/clk/mediatek/clk-mt8195-topckgen.c
@@ -1224,7 +1224,7 @@ static const struct of_device_id of_match_clk_mt8195_topck[] = {
static int clk_mt8195_topck_probe(struct platform_device *pdev)
{
- struct clk_onecell_data *top_clk_data;
+ struct clk_hw_onecell_data *top_clk_data;
struct device_node *node = pdev->dev.of_node;
int r;
void __iomem *base;
@@ -1267,7 +1267,7 @@ static int clk_mt8195_topck_probe(struct platform_device *pdev)
if (r)
goto unregister_composite_divs;
- r = of_clk_add_provider(node, of_clk_src_onecell_get, top_clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, top_clk_data);
if (r)
goto unregister_gates;
@@ -1294,7 +1294,7 @@ free_top_data:
static int clk_mt8195_topck_remove(struct platform_device *pdev)
{
- struct clk_onecell_data *top_clk_data = platform_get_drvdata(pdev);
+ struct clk_hw_onecell_data *top_clk_data = platform_get_drvdata(pdev);
struct device_node *node = pdev->dev.of_node;
of_clk_del_provider(node);
diff --git a/drivers/clk/mediatek/clk-mt8195-vdo0.c b/drivers/clk/mediatek/clk-mt8195-vdo0.c
index 3bc7ed19d550..261a7f76dd3c 100644
--- a/drivers/clk/mediatek/clk-mt8195-vdo0.c
+++ b/drivers/clk/mediatek/clk-mt8195-vdo0.c
@@ -92,7 +92,7 @@ static int clk_mt8195_vdo0_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *node = dev->parent->of_node;
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
int r;
clk_data = mtk_alloc_clk_data(CLK_VDO0_NR_CLK);
@@ -103,7 +103,7 @@ static int clk_mt8195_vdo0_probe(struct platform_device *pdev)
if (r)
goto free_vdo0_data;
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
goto unregister_gates;
@@ -122,7 +122,7 @@ static int clk_mt8195_vdo0_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *node = dev->parent->of_node;
- struct clk_onecell_data *clk_data = platform_get_drvdata(pdev);
+ struct clk_hw_onecell_data *clk_data = platform_get_drvdata(pdev);
of_clk_del_provider(node);
mtk_clk_unregister_gates(vdo0_clks, ARRAY_SIZE(vdo0_clks), clk_data);
diff --git a/drivers/clk/mediatek/clk-mt8195-vdo1.c b/drivers/clk/mediatek/clk-mt8195-vdo1.c
index 90c738a85ff1..3378487d2c90 100644
--- a/drivers/clk/mediatek/clk-mt8195-vdo1.c
+++ b/drivers/clk/mediatek/clk-mt8195-vdo1.c
@@ -109,7 +109,7 @@ static int clk_mt8195_vdo1_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *node = dev->parent->of_node;
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
int r;
clk_data = mtk_alloc_clk_data(CLK_VDO1_NR_CLK);
@@ -120,7 +120,7 @@ static int clk_mt8195_vdo1_probe(struct platform_device *pdev)
if (r)
goto free_vdo1_data;
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
goto unregister_gates;
@@ -139,7 +139,7 @@ static int clk_mt8195_vdo1_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *node = dev->parent->of_node;
- struct clk_onecell_data *clk_data = platform_get_drvdata(pdev);
+ struct clk_hw_onecell_data *clk_data = platform_get_drvdata(pdev);
of_clk_del_provider(node);
mtk_clk_unregister_gates(vdo1_clks, ARRAY_SIZE(vdo1_clks), clk_data);
diff --git a/drivers/clk/mediatek/clk-mt8516-aud.c b/drivers/clk/mediatek/clk-mt8516-aud.c
index 6ab3a06dc9d5..90f48068a8de 100644
--- a/drivers/clk/mediatek/clk-mt8516-aud.c
+++ b/drivers/clk/mediatek/clk-mt8516-aud.c
@@ -49,14 +49,14 @@ static const struct mtk_gate aud_clks[] __initconst = {
static void __init mtk_audsys_init(struct device_node *node)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
int r;
clk_data = mtk_alloc_clk_data(CLK_AUD_NR_CLK);
mtk_clk_register_gates(node, aud_clks, ARRAY_SIZE(aud_clks), clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
pr_err("%s(): could not register clock provider: %d\n",
__func__, r);
diff --git a/drivers/clk/mediatek/clk-mt8516.c b/drivers/clk/mediatek/clk-mt8516.c
index a37143f920ce..b96db88893e2 100644
--- a/drivers/clk/mediatek/clk-mt8516.c
+++ b/drivers/clk/mediatek/clk-mt8516.c
@@ -677,7 +677,7 @@ static const struct mtk_gate top_clks[] __initconst = {
static void __init mtk_topckgen_init(struct device_node *node)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
int r;
void __iomem *base;
@@ -699,7 +699,7 @@ static void __init mtk_topckgen_init(struct device_node *node)
mtk_clk_register_dividers(top_adj_divs, ARRAY_SIZE(top_adj_divs),
base, &mt8516_clk_lock, clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
pr_err("%s(): could not register clock provider: %d\n",
__func__, r);
@@ -708,7 +708,7 @@ CLK_OF_DECLARE(mtk_topckgen, "mediatek,mt8516-topckgen", mtk_topckgen_init);
static void __init mtk_infracfg_init(struct device_node *node)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
int r;
void __iomem *base;
@@ -723,7 +723,7 @@ static void __init mtk_infracfg_init(struct device_node *node)
mtk_clk_register_composites(ifr_muxes, ARRAY_SIZE(ifr_muxes), base,
&mt8516_clk_lock, clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
pr_err("%s(): could not register clock provider: %d\n",
__func__, r);
@@ -771,23 +771,23 @@ static const struct mtk_pll_div_table mmpll_div_table[] = {
};
static const struct mtk_pll_data plls[] = {
- PLL(CLK_APMIXED_ARMPLL, "armpll", 0x0100, 0x0110, 0x00000001, 0,
+ PLL(CLK_APMIXED_ARMPLL, "armpll", 0x0100, 0x0110, 0, 0,
21, 0x0104, 24, 0, 0x0104, 0),
- PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x0120, 0x0130, 0x00000001,
+ PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x0120, 0x0130, 0,
HAVE_RST_BAR, 21, 0x0124, 24, 0, 0x0124, 0),
- PLL(CLK_APMIXED_UNIVPLL, "univpll", 0x0140, 0x0150, 0x30000001,
+ PLL(CLK_APMIXED_UNIVPLL, "univpll", 0x0140, 0x0150, 0x30000000,
HAVE_RST_BAR, 7, 0x0144, 24, 0, 0x0144, 0),
- PLL_B(CLK_APMIXED_MMPLL, "mmpll", 0x0160, 0x0170, 0x00000001, 0,
+ PLL_B(CLK_APMIXED_MMPLL, "mmpll", 0x0160, 0x0170, 0, 0,
21, 0x0164, 24, 0, 0x0164, 0, mmpll_div_table),
- PLL(CLK_APMIXED_APLL1, "apll1", 0x0180, 0x0190, 0x00000001, 0,
+ PLL(CLK_APMIXED_APLL1, "apll1", 0x0180, 0x0190, 0, 0,
31, 0x0180, 1, 0x0194, 0x0184, 0),
- PLL(CLK_APMIXED_APLL2, "apll2", 0x01A0, 0x01B0, 0x00000001, 0,
+ PLL(CLK_APMIXED_APLL2, "apll2", 0x01A0, 0x01B0, 0, 0,
31, 0x01A0, 1, 0x01B4, 0x01A4, 0),
};
static void __init mtk_apmixedsys_init(struct device_node *node)
{
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
void __iomem *base;
int r;
@@ -801,7 +801,7 @@ static void __init mtk_apmixedsys_init(struct device_node *node)
mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
pr_err("%s(): could not register clock provider: %d\n",
__func__, r);
diff --git a/drivers/clk/mediatek/clk-mtk.c b/drivers/clk/mediatek/clk-mtk.c
index b4063261cf56..b9188000ab3c 100644
--- a/drivers/clk/mediatek/clk-mtk.c
+++ b/drivers/clk/mediatek/clk-mtk.c
@@ -18,46 +18,35 @@
#include "clk-mtk.h"
#include "clk-gate.h"
-struct clk_onecell_data *mtk_alloc_clk_data(unsigned int clk_num)
+struct clk_hw_onecell_data *mtk_alloc_clk_data(unsigned int clk_num)
{
int i;
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
- clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL);
+ clk_data = kzalloc(struct_size(clk_data, hws, clk_num), GFP_KERNEL);
if (!clk_data)
return NULL;
- clk_data->clks = kcalloc(clk_num, sizeof(*clk_data->clks), GFP_KERNEL);
- if (!clk_data->clks)
- goto err_out;
-
- clk_data->clk_num = clk_num;
+ clk_data->num = clk_num;
for (i = 0; i < clk_num; i++)
- clk_data->clks[i] = ERR_PTR(-ENOENT);
+ clk_data->hws[i] = ERR_PTR(-ENOENT);
return clk_data;
-err_out:
- kfree(clk_data);
-
- return NULL;
}
EXPORT_SYMBOL_GPL(mtk_alloc_clk_data);
-void mtk_free_clk_data(struct clk_onecell_data *clk_data)
+void mtk_free_clk_data(struct clk_hw_onecell_data *clk_data)
{
- if (!clk_data)
- return;
-
- kfree(clk_data->clks);
kfree(clk_data);
}
+EXPORT_SYMBOL_GPL(mtk_free_clk_data);
int mtk_clk_register_fixed_clks(const struct mtk_fixed_clk *clks, int num,
- struct clk_onecell_data *clk_data)
+ struct clk_hw_onecell_data *clk_data)
{
int i;
- struct clk *clk;
+ struct clk_hw *hw;
if (!clk_data)
return -ENOMEM;
@@ -65,20 +54,21 @@ int mtk_clk_register_fixed_clks(const struct mtk_fixed_clk *clks, int num,
for (i = 0; i < num; i++) {
const struct mtk_fixed_clk *rc = &clks[i];
- if (!IS_ERR_OR_NULL(clk_data->clks[rc->id])) {
+ if (!IS_ERR_OR_NULL(clk_data->hws[rc->id])) {
pr_warn("Trying to register duplicate clock ID: %d\n", rc->id);
continue;
}
- clk = clk_register_fixed_rate(NULL, rc->name, rc->parent, 0,
+ hw = clk_hw_register_fixed_rate(NULL, rc->name, rc->parent, 0,
rc->rate);
- if (IS_ERR(clk)) {
- pr_err("Failed to register clk %s: %pe\n", rc->name, clk);
+ if (IS_ERR(hw)) {
+ pr_err("Failed to register clk %s: %pe\n", rc->name,
+ hw);
goto err;
}
- clk_data->clks[rc->id] = clk;
+ clk_data->hws[rc->id] = hw;
}
return 0;
@@ -87,19 +77,19 @@ err:
while (--i >= 0) {
const struct mtk_fixed_clk *rc = &clks[i];
- if (IS_ERR_OR_NULL(clk_data->clks[rc->id]))
+ if (IS_ERR_OR_NULL(clk_data->hws[rc->id]))
continue;
- clk_unregister_fixed_rate(clk_data->clks[rc->id]);
- clk_data->clks[rc->id] = ERR_PTR(-ENOENT);
+ clk_unregister_fixed_rate(clk_data->hws[rc->id]->clk);
+ clk_data->hws[rc->id] = ERR_PTR(-ENOENT);
}
- return PTR_ERR(clk);
+ return PTR_ERR(hw);
}
EXPORT_SYMBOL_GPL(mtk_clk_register_fixed_clks);
void mtk_clk_unregister_fixed_clks(const struct mtk_fixed_clk *clks, int num,
- struct clk_onecell_data *clk_data)
+ struct clk_hw_onecell_data *clk_data)
{
int i;
@@ -109,20 +99,20 @@ void mtk_clk_unregister_fixed_clks(const struct mtk_fixed_clk *clks, int num,
for (i = num; i > 0; i--) {
const struct mtk_fixed_clk *rc = &clks[i - 1];
- if (IS_ERR_OR_NULL(clk_data->clks[rc->id]))
+ if (IS_ERR_OR_NULL(clk_data->hws[rc->id]))
continue;
- clk_unregister_fixed_rate(clk_data->clks[rc->id]);
- clk_data->clks[rc->id] = ERR_PTR(-ENOENT);
+ clk_unregister_fixed_rate(clk_data->hws[rc->id]->clk);
+ clk_data->hws[rc->id] = ERR_PTR(-ENOENT);
}
}
EXPORT_SYMBOL_GPL(mtk_clk_unregister_fixed_clks);
int mtk_clk_register_factors(const struct mtk_fixed_factor *clks, int num,
- struct clk_onecell_data *clk_data)
+ struct clk_hw_onecell_data *clk_data)
{
int i;
- struct clk *clk;
+ struct clk_hw *hw;
if (!clk_data)
return -ENOMEM;
@@ -130,20 +120,21 @@ int mtk_clk_register_factors(const struct mtk_fixed_factor *clks, int num,
for (i = 0; i < num; i++) {
const struct mtk_fixed_factor *ff = &clks[i];
- if (!IS_ERR_OR_NULL(clk_data->clks[ff->id])) {
+ if (!IS_ERR_OR_NULL(clk_data->hws[ff->id])) {
pr_warn("Trying to register duplicate clock ID: %d\n", ff->id);
continue;
}
- clk = clk_register_fixed_factor(NULL, ff->name, ff->parent_name,
+ hw = clk_hw_register_fixed_factor(NULL, ff->name, ff->parent_name,
CLK_SET_RATE_PARENT, ff->mult, ff->div);
- if (IS_ERR(clk)) {
- pr_err("Failed to register clk %s: %pe\n", ff->name, clk);
+ if (IS_ERR(hw)) {
+ pr_err("Failed to register clk %s: %pe\n", ff->name,
+ hw);
goto err;
}
- clk_data->clks[ff->id] = clk;
+ clk_data->hws[ff->id] = hw;
}
return 0;
@@ -152,19 +143,19 @@ err:
while (--i >= 0) {
const struct mtk_fixed_factor *ff = &clks[i];
- if (IS_ERR_OR_NULL(clk_data->clks[ff->id]))
+ if (IS_ERR_OR_NULL(clk_data->hws[ff->id]))
continue;
- clk_unregister_fixed_factor(clk_data->clks[ff->id]);
- clk_data->clks[ff->id] = ERR_PTR(-ENOENT);
+ clk_unregister_fixed_factor(clk_data->hws[ff->id]->clk);
+ clk_data->hws[ff->id] = ERR_PTR(-ENOENT);
}
- return PTR_ERR(clk);
+ return PTR_ERR(hw);
}
EXPORT_SYMBOL_GPL(mtk_clk_register_factors);
void mtk_clk_unregister_factors(const struct mtk_fixed_factor *clks, int num,
- struct clk_onecell_data *clk_data)
+ struct clk_hw_onecell_data *clk_data)
{
int i;
@@ -174,19 +165,19 @@ void mtk_clk_unregister_factors(const struct mtk_fixed_factor *clks, int num,
for (i = num; i > 0; i--) {
const struct mtk_fixed_factor *ff = &clks[i - 1];
- if (IS_ERR_OR_NULL(clk_data->clks[ff->id]))
+ if (IS_ERR_OR_NULL(clk_data->hws[ff->id]))
continue;
- clk_unregister_fixed_factor(clk_data->clks[ff->id]);
- clk_data->clks[ff->id] = ERR_PTR(-ENOENT);
+ clk_unregister_fixed_factor(clk_data->hws[ff->id]->clk);
+ clk_data->hws[ff->id] = ERR_PTR(-ENOENT);
}
}
EXPORT_SYMBOL_GPL(mtk_clk_unregister_factors);
-struct clk *mtk_clk_register_composite(const struct mtk_composite *mc,
+static struct clk_hw *mtk_clk_register_composite(const struct mtk_composite *mc,
void __iomem *base, spinlock_t *lock)
{
- struct clk *clk;
+ struct clk_hw *hw;
struct clk_mux *mux = NULL;
struct clk_gate *gate = NULL;
struct clk_divider *div = NULL;
@@ -250,18 +241,18 @@ struct clk *mtk_clk_register_composite(const struct mtk_composite *mc,
div_ops = &clk_divider_ops;
}
- clk = clk_register_composite(NULL, mc->name, parent_names, num_parents,
+ hw = clk_hw_register_composite(NULL, mc->name, parent_names, num_parents,
mux_hw, mux_ops,
div_hw, div_ops,
gate_hw, gate_ops,
mc->flags);
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
+ if (IS_ERR(hw)) {
+ ret = PTR_ERR(hw);
goto err_out;
}
- return clk;
+ return hw;
err_out:
kfree(div);
kfree(gate);
@@ -270,15 +261,13 @@ err_out:
return ERR_PTR(ret);
}
-static void mtk_clk_unregister_composite(struct clk *clk)
+static void mtk_clk_unregister_composite(struct clk_hw *hw)
{
- struct clk_hw *hw;
struct clk_composite *composite;
struct clk_mux *mux = NULL;
struct clk_gate *gate = NULL;
struct clk_divider *div = NULL;
- hw = __clk_get_hw(clk);
if (!hw)
return;
@@ -290,7 +279,7 @@ static void mtk_clk_unregister_composite(struct clk *clk)
if (composite->rate_hw)
div = to_clk_divider(composite->rate_hw);
- clk_unregister_composite(clk);
+ clk_hw_unregister_composite(hw);
kfree(div);
kfree(gate);
kfree(mux);
@@ -298,9 +287,9 @@ static void mtk_clk_unregister_composite(struct clk *clk)
int mtk_clk_register_composites(const struct mtk_composite *mcs, int num,
void __iomem *base, spinlock_t *lock,
- struct clk_onecell_data *clk_data)
+ struct clk_hw_onecell_data *clk_data)
{
- struct clk *clk;
+ struct clk_hw *hw;
int i;
if (!clk_data)
@@ -309,20 +298,21 @@ int mtk_clk_register_composites(const struct mtk_composite *mcs, int num,
for (i = 0; i < num; i++) {
const struct mtk_composite *mc = &mcs[i];
- if (!IS_ERR_OR_NULL(clk_data->clks[mc->id])) {
+ if (!IS_ERR_OR_NULL(clk_data->hws[mc->id])) {
pr_warn("Trying to register duplicate clock ID: %d\n",
mc->id);
continue;
}
- clk = mtk_clk_register_composite(mc, base, lock);
+ hw = mtk_clk_register_composite(mc, base, lock);
- if (IS_ERR(clk)) {
- pr_err("Failed to register clk %s: %pe\n", mc->name, clk);
+ if (IS_ERR(hw)) {
+ pr_err("Failed to register clk %s: %pe\n", mc->name,
+ hw);
goto err;
}
- clk_data->clks[mc->id] = clk;
+ clk_data->hws[mc->id] = hw;
}
return 0;
@@ -331,19 +321,19 @@ err:
while (--i >= 0) {
const struct mtk_composite *mc = &mcs[i];
- if (IS_ERR_OR_NULL(clk_data->clks[mcs->id]))
+ if (IS_ERR_OR_NULL(clk_data->hws[mcs->id]))
continue;
- mtk_clk_unregister_composite(clk_data->clks[mc->id]);
- clk_data->clks[mc->id] = ERR_PTR(-ENOENT);
+ mtk_clk_unregister_composite(clk_data->hws[mc->id]);
+ clk_data->hws[mc->id] = ERR_PTR(-ENOENT);
}
- return PTR_ERR(clk);
+ return PTR_ERR(hw);
}
EXPORT_SYMBOL_GPL(mtk_clk_register_composites);
void mtk_clk_unregister_composites(const struct mtk_composite *mcs, int num,
- struct clk_onecell_data *clk_data)
+ struct clk_hw_onecell_data *clk_data)
{
int i;
@@ -353,20 +343,20 @@ void mtk_clk_unregister_composites(const struct mtk_composite *mcs, int num,
for (i = num; i > 0; i--) {
const struct mtk_composite *mc = &mcs[i - 1];
- if (IS_ERR_OR_NULL(clk_data->clks[mc->id]))
+ if (IS_ERR_OR_NULL(clk_data->hws[mc->id]))
continue;
- mtk_clk_unregister_composite(clk_data->clks[mc->id]);
- clk_data->clks[mc->id] = ERR_PTR(-ENOENT);
+ mtk_clk_unregister_composite(clk_data->hws[mc->id]);
+ clk_data->hws[mc->id] = ERR_PTR(-ENOENT);
}
}
EXPORT_SYMBOL_GPL(mtk_clk_unregister_composites);
int mtk_clk_register_dividers(const struct mtk_clk_divider *mcds, int num,
void __iomem *base, spinlock_t *lock,
- struct clk_onecell_data *clk_data)
+ struct clk_hw_onecell_data *clk_data)
{
- struct clk *clk;
+ struct clk_hw *hw;
int i;
if (!clk_data)
@@ -375,22 +365,23 @@ int mtk_clk_register_dividers(const struct mtk_clk_divider *mcds, int num,
for (i = 0; i < num; i++) {
const struct mtk_clk_divider *mcd = &mcds[i];
- if (!IS_ERR_OR_NULL(clk_data->clks[mcd->id])) {
+ if (!IS_ERR_OR_NULL(clk_data->hws[mcd->id])) {
pr_warn("Trying to register duplicate clock ID: %d\n",
mcd->id);
continue;
}
- clk = clk_register_divider(NULL, mcd->name, mcd->parent_name,
+ hw = clk_hw_register_divider(NULL, mcd->name, mcd->parent_name,
mcd->flags, base + mcd->div_reg, mcd->div_shift,
mcd->div_width, mcd->clk_divider_flags, lock);
- if (IS_ERR(clk)) {
- pr_err("Failed to register clk %s: %pe\n", mcd->name, clk);
+ if (IS_ERR(hw)) {
+ pr_err("Failed to register clk %s: %pe\n", mcd->name,
+ hw);
goto err;
}
- clk_data->clks[mcd->id] = clk;
+ clk_data->hws[mcd->id] = hw;
}
return 0;
@@ -399,18 +390,18 @@ err:
while (--i >= 0) {
const struct mtk_clk_divider *mcd = &mcds[i];
- if (IS_ERR_OR_NULL(clk_data->clks[mcd->id]))
+ if (IS_ERR_OR_NULL(clk_data->hws[mcd->id]))
continue;
- mtk_clk_unregister_composite(clk_data->clks[mcd->id]);
- clk_data->clks[mcd->id] = ERR_PTR(-ENOENT);
+ mtk_clk_unregister_composite(clk_data->hws[mcd->id]);
+ clk_data->hws[mcd->id] = ERR_PTR(-ENOENT);
}
- return PTR_ERR(clk);
+ return PTR_ERR(hw);
}
void mtk_clk_unregister_dividers(const struct mtk_clk_divider *mcds, int num,
- struct clk_onecell_data *clk_data)
+ struct clk_hw_onecell_data *clk_data)
{
int i;
@@ -420,18 +411,18 @@ void mtk_clk_unregister_dividers(const struct mtk_clk_divider *mcds, int num,
for (i = num; i > 0; i--) {
const struct mtk_clk_divider *mcd = &mcds[i - 1];
- if (IS_ERR_OR_NULL(clk_data->clks[mcd->id]))
+ if (IS_ERR_OR_NULL(clk_data->hws[mcd->id]))
continue;
- clk_unregister_divider(clk_data->clks[mcd->id]);
- clk_data->clks[mcd->id] = ERR_PTR(-ENOENT);
+ clk_unregister_divider(clk_data->hws[mcd->id]->clk);
+ clk_data->hws[mcd->id] = ERR_PTR(-ENOENT);
}
}
int mtk_clk_simple_probe(struct platform_device *pdev)
{
const struct mtk_clk_desc *mcd;
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
struct device_node *node = pdev->dev.of_node;
int r;
@@ -447,7 +438,7 @@ int mtk_clk_simple_probe(struct platform_device *pdev)
if (r)
goto free_data;
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
goto unregister_clks;
@@ -465,7 +456,7 @@ free_data:
int mtk_clk_simple_remove(struct platform_device *pdev)
{
const struct mtk_clk_desc *mcd = of_device_get_match_data(&pdev->dev);
- struct clk_onecell_data *clk_data = platform_get_drvdata(pdev);
+ struct clk_hw_onecell_data *clk_data = platform_get_drvdata(pdev);
struct device_node *node = pdev->dev.of_node;
of_clk_del_provider(node);
diff --git a/drivers/clk/mediatek/clk-mtk.h b/drivers/clk/mediatek/clk-mtk.h
index bf6565aa7319..adb1304d35d4 100644
--- a/drivers/clk/mediatek/clk-mtk.h
+++ b/drivers/clk/mediatek/clk-mtk.h
@@ -35,9 +35,9 @@ struct mtk_fixed_clk {
}
int mtk_clk_register_fixed_clks(const struct mtk_fixed_clk *clks, int num,
- struct clk_onecell_data *clk_data);
+ struct clk_hw_onecell_data *clk_data);
void mtk_clk_unregister_fixed_clks(const struct mtk_fixed_clk *clks, int num,
- struct clk_onecell_data *clk_data);
+ struct clk_hw_onecell_data *clk_data);
struct mtk_fixed_factor {
int id;
@@ -56,9 +56,9 @@ struct mtk_fixed_factor {
}
int mtk_clk_register_factors(const struct mtk_fixed_factor *clks, int num,
- struct clk_onecell_data *clk_data);
+ struct clk_hw_onecell_data *clk_data);
void mtk_clk_unregister_factors(const struct mtk_fixed_factor *clks, int num,
- struct clk_onecell_data *clk_data);
+ struct clk_hw_onecell_data *clk_data);
struct mtk_composite {
int id;
@@ -147,14 +147,11 @@ struct mtk_composite {
.flags = 0, \
}
-struct clk *mtk_clk_register_composite(const struct mtk_composite *mc,
- void __iomem *base, spinlock_t *lock);
-
int mtk_clk_register_composites(const struct mtk_composite *mcs, int num,
void __iomem *base, spinlock_t *lock,
- struct clk_onecell_data *clk_data);
+ struct clk_hw_onecell_data *clk_data);
void mtk_clk_unregister_composites(const struct mtk_composite *mcs, int num,
- struct clk_onecell_data *clk_data);
+ struct clk_hw_onecell_data *clk_data);
struct mtk_clk_divider {
int id;
@@ -180,14 +177,14 @@ struct mtk_clk_divider {
int mtk_clk_register_dividers(const struct mtk_clk_divider *mcds, int num,
void __iomem *base, spinlock_t *lock,
- struct clk_onecell_data *clk_data);
+ struct clk_hw_onecell_data *clk_data);
void mtk_clk_unregister_dividers(const struct mtk_clk_divider *mcds, int num,
- struct clk_onecell_data *clk_data);
+ struct clk_hw_onecell_data *clk_data);
-struct clk_onecell_data *mtk_alloc_clk_data(unsigned int clk_num);
-void mtk_free_clk_data(struct clk_onecell_data *clk_data);
+struct clk_hw_onecell_data *mtk_alloc_clk_data(unsigned int clk_num);
+void mtk_free_clk_data(struct clk_hw_onecell_data *clk_data);
-struct clk *mtk_clk_register_ref2usb_tx(const char *name,
+struct clk_hw *mtk_clk_register_ref2usb_tx(const char *name,
const char *parent_name, void __iomem *reg);
void mtk_register_reset_controller(struct device_node *np,
diff --git a/drivers/clk/mediatek/clk-mux.c b/drivers/clk/mediatek/clk-mux.c
index 21ad5a4afd65..cd5f9fd8cb98 100644
--- a/drivers/clk/mediatek/clk-mux.c
+++ b/drivers/clk/mediatek/clk-mux.c
@@ -143,13 +143,13 @@ const struct clk_ops mtk_mux_gate_clr_set_upd_ops = {
};
EXPORT_SYMBOL_GPL(mtk_mux_gate_clr_set_upd_ops);
-static struct clk *mtk_clk_register_mux(const struct mtk_mux *mux,
+static struct clk_hw *mtk_clk_register_mux(const struct mtk_mux *mux,
struct regmap *regmap,
spinlock_t *lock)
{
struct mtk_clk_mux *clk_mux;
struct clk_init_data init = {};
- struct clk *clk;
+ int ret;
clk_mux = kzalloc(sizeof(*clk_mux), GFP_KERNEL);
if (!clk_mux)
@@ -166,37 +166,34 @@ static struct clk *mtk_clk_register_mux(const struct mtk_mux *mux,
clk_mux->lock = lock;
clk_mux->hw.init = &init;
- clk = clk_register(NULL, &clk_mux->hw);
- if (IS_ERR(clk)) {
+ ret = clk_hw_register(NULL, &clk_mux->hw);
+ if (ret) {
kfree(clk_mux);
- return clk;
+ return ERR_PTR(ret);
}
- return clk;
+ return &clk_mux->hw;
}
-static void mtk_clk_unregister_mux(struct clk *clk)
+static void mtk_clk_unregister_mux(struct clk_hw *hw)
{
struct mtk_clk_mux *mux;
- struct clk_hw *hw;
-
- hw = __clk_get_hw(clk);
if (!hw)
return;
mux = to_mtk_clk_mux(hw);
- clk_unregister(clk);
+ clk_hw_unregister(hw);
kfree(mux);
}
int mtk_clk_register_muxes(const struct mtk_mux *muxes,
int num, struct device_node *node,
spinlock_t *lock,
- struct clk_onecell_data *clk_data)
+ struct clk_hw_onecell_data *clk_data)
{
struct regmap *regmap;
- struct clk *clk;
+ struct clk_hw *hw;
int i;
regmap = device_node_to_regmap(node);
@@ -208,20 +205,21 @@ int mtk_clk_register_muxes(const struct mtk_mux *muxes,
for (i = 0; i < num; i++) {
const struct mtk_mux *mux = &muxes[i];
- if (!IS_ERR_OR_NULL(clk_data->clks[mux->id])) {
+ if (!IS_ERR_OR_NULL(clk_data->hws[mux->id])) {
pr_warn("%pOF: Trying to register duplicate clock ID: %d\n",
node, mux->id);
continue;
}
- clk = mtk_clk_register_mux(mux, regmap, lock);
+ hw = mtk_clk_register_mux(mux, regmap, lock);
- if (IS_ERR(clk)) {
- pr_err("Failed to register clk %s: %pe\n", mux->name, clk);
+ if (IS_ERR(hw)) {
+ pr_err("Failed to register clk %s: %pe\n", mux->name,
+ hw);
goto err;
}
- clk_data->clks[mux->id] = clk;
+ clk_data->hws[mux->id] = hw;
}
return 0;
@@ -230,19 +228,19 @@ err:
while (--i >= 0) {
const struct mtk_mux *mux = &muxes[i];
- if (IS_ERR_OR_NULL(clk_data->clks[mux->id]))
+ if (IS_ERR_OR_NULL(clk_data->hws[mux->id]))
continue;
- mtk_clk_unregister_mux(clk_data->clks[mux->id]);
- clk_data->clks[mux->id] = ERR_PTR(-ENOENT);
+ mtk_clk_unregister_mux(clk_data->hws[mux->id]);
+ clk_data->hws[mux->id] = ERR_PTR(-ENOENT);
}
- return PTR_ERR(clk);
+ return PTR_ERR(hw);
}
EXPORT_SYMBOL_GPL(mtk_clk_register_muxes);
void mtk_clk_unregister_muxes(const struct mtk_mux *muxes, int num,
- struct clk_onecell_data *clk_data)
+ struct clk_hw_onecell_data *clk_data)
{
int i;
@@ -252,11 +250,11 @@ void mtk_clk_unregister_muxes(const struct mtk_mux *muxes, int num,
for (i = num; i > 0; i--) {
const struct mtk_mux *mux = &muxes[i - 1];
- if (IS_ERR_OR_NULL(clk_data->clks[mux->id]))
+ if (IS_ERR_OR_NULL(clk_data->hws[mux->id]))
continue;
- mtk_clk_unregister_mux(clk_data->clks[mux->id]);
- clk_data->clks[mux->id] = ERR_PTR(-ENOENT);
+ mtk_clk_unregister_mux(clk_data->hws[mux->id]);
+ clk_data->hws[mux->id] = ERR_PTR(-ENOENT);
}
}
EXPORT_SYMBOL_GPL(mtk_clk_unregister_muxes);
diff --git a/drivers/clk/mediatek/clk-mux.h b/drivers/clk/mediatek/clk-mux.h
index 903a3c937959..6539c58f5d7d 100644
--- a/drivers/clk/mediatek/clk-mux.h
+++ b/drivers/clk/mediatek/clk-mux.h
@@ -11,7 +11,7 @@
#include <linux/types.h>
struct clk;
-struct clk_onecell_data;
+struct clk_hw_onecell_data;
struct clk_ops;
struct device_node;
@@ -84,9 +84,9 @@ extern const struct clk_ops mtk_mux_gate_clr_set_upd_ops;
int mtk_clk_register_muxes(const struct mtk_mux *muxes,
int num, struct device_node *node,
spinlock_t *lock,
- struct clk_onecell_data *clk_data);
+ struct clk_hw_onecell_data *clk_data);
void mtk_clk_unregister_muxes(const struct mtk_mux *muxes, int num,
- struct clk_onecell_data *clk_data);
+ struct clk_hw_onecell_data *clk_data);
#endif /* __DRV_CLK_MTK_MUX_H */
diff --git a/drivers/clk/mediatek/clk-pll.c b/drivers/clk/mediatek/clk-pll.c
index ccaa2085ab4d..54e6cfd29dfc 100644
--- a/drivers/clk/mediatek/clk-pll.c
+++ b/drivers/clk/mediatek/clk-pll.c
@@ -243,7 +243,6 @@ static int mtk_pll_prepare(struct clk_hw *hw)
{
struct mtk_clk_pll *pll = to_mtk_clk_pll(hw);
u32 r;
- u32 div_en_mask;
r = readl(pll->pwr_addr) | CON0_PWR_ON;
writel(r, pll->pwr_addr);
@@ -256,9 +255,8 @@ static int mtk_pll_prepare(struct clk_hw *hw)
r = readl(pll->en_addr) | BIT(pll->data->pll_en_bit);
writel(r, pll->en_addr);
- div_en_mask = pll->data->en_mask & ~CON0_BASE_EN;
- if (div_en_mask) {
- r = readl(pll->base_addr + REG_CON0) | div_en_mask;
+ if (pll->data->en_mask) {
+ r = readl(pll->base_addr + REG_CON0) | pll->data->en_mask;
writel(r, pll->base_addr + REG_CON0);
}
@@ -279,7 +277,6 @@ static void mtk_pll_unprepare(struct clk_hw *hw)
{
struct mtk_clk_pll *pll = to_mtk_clk_pll(hw);
u32 r;
- u32 div_en_mask;
if (pll->data->flags & HAVE_RST_BAR) {
r = readl(pll->base_addr + REG_CON0);
@@ -289,9 +286,8 @@ static void mtk_pll_unprepare(struct clk_hw *hw)
__mtk_pll_tuner_disable(pll);
- div_en_mask = pll->data->en_mask & ~CON0_BASE_EN;
- if (div_en_mask) {
- r = readl(pll->base_addr + REG_CON0) & ~div_en_mask;
+ if (pll->data->en_mask) {
+ r = readl(pll->base_addr + REG_CON0) & ~pll->data->en_mask;
writel(r, pll->base_addr + REG_CON0);
}
@@ -314,12 +310,12 @@ static const struct clk_ops mtk_pll_ops = {
.set_rate = mtk_pll_set_rate,
};
-static struct clk *mtk_clk_register_pll(const struct mtk_pll_data *data,
+static struct clk_hw *mtk_clk_register_pll(const struct mtk_pll_data *data,
void __iomem *base)
{
struct mtk_clk_pll *pll;
struct clk_init_data init = {};
- struct clk *clk;
+ int ret;
const char *parent_name = "clk26m";
pll = kzalloc(sizeof(*pll), GFP_KERNEL);
@@ -354,36 +350,36 @@ static struct clk *mtk_clk_register_pll(const struct mtk_pll_data *data,
init.parent_names = &parent_name;
init.num_parents = 1;
- clk = clk_register(NULL, &pll->hw);
+ ret = clk_hw_register(NULL, &pll->hw);
- if (IS_ERR(clk))
+ if (ret) {
kfree(pll);
+ return ERR_PTR(ret);
+ }
- return clk;
+ return &pll->hw;
}
-static void mtk_clk_unregister_pll(struct clk *clk)
+static void mtk_clk_unregister_pll(struct clk_hw *hw)
{
- struct clk_hw *hw;
struct mtk_clk_pll *pll;
- hw = __clk_get_hw(clk);
if (!hw)
return;
pll = to_mtk_clk_pll(hw);
- clk_unregister(clk);
+ clk_hw_unregister(hw);
kfree(pll);
}
int mtk_clk_register_plls(struct device_node *node,
const struct mtk_pll_data *plls, int num_plls,
- struct clk_onecell_data *clk_data)
+ struct clk_hw_onecell_data *clk_data)
{
void __iomem *base;
int i;
- struct clk *clk;
+ struct clk_hw *hw;
base = of_iomap(node, 0);
if (!base) {
@@ -394,20 +390,21 @@ int mtk_clk_register_plls(struct device_node *node,
for (i = 0; i < num_plls; i++) {
const struct mtk_pll_data *pll = &plls[i];
- if (!IS_ERR_OR_NULL(clk_data->clks[pll->id])) {
+ if (!IS_ERR_OR_NULL(clk_data->hws[pll->id])) {
pr_warn("%pOF: Trying to register duplicate clock ID: %d\n",
node, pll->id);
continue;
}
- clk = mtk_clk_register_pll(pll, base);
+ hw = mtk_clk_register_pll(pll, base);
- if (IS_ERR(clk)) {
- pr_err("Failed to register clk %s: %pe\n", pll->name, clk);
+ if (IS_ERR(hw)) {
+ pr_err("Failed to register clk %s: %pe\n", pll->name,
+ hw);
goto err;
}
- clk_data->clks[pll->id] = clk;
+ clk_data->hws[pll->id] = hw;
}
return 0;
@@ -416,27 +413,26 @@ err:
while (--i >= 0) {
const struct mtk_pll_data *pll = &plls[i];
- mtk_clk_unregister_pll(clk_data->clks[pll->id]);
- clk_data->clks[pll->id] = ERR_PTR(-ENOENT);
+ mtk_clk_unregister_pll(clk_data->hws[pll->id]);
+ clk_data->hws[pll->id] = ERR_PTR(-ENOENT);
}
iounmap(base);
- return PTR_ERR(clk);
+ return PTR_ERR(hw);
}
EXPORT_SYMBOL_GPL(mtk_clk_register_plls);
-static __iomem void *mtk_clk_pll_get_base(struct clk *clk,
+static __iomem void *mtk_clk_pll_get_base(struct clk_hw *hw,
const struct mtk_pll_data *data)
{
- struct clk_hw *hw = __clk_get_hw(clk);
struct mtk_clk_pll *pll = to_mtk_clk_pll(hw);
return pll->base_addr - data->reg;
}
void mtk_clk_unregister_plls(const struct mtk_pll_data *plls, int num_plls,
- struct clk_onecell_data *clk_data)
+ struct clk_hw_onecell_data *clk_data)
{
__iomem void *base = NULL;
int i;
@@ -447,7 +443,7 @@ void mtk_clk_unregister_plls(const struct mtk_pll_data *plls, int num_plls,
for (i = num_plls; i > 0; i--) {
const struct mtk_pll_data *pll = &plls[i - 1];
- if (IS_ERR_OR_NULL(clk_data->clks[pll->id]))
+ if (IS_ERR_OR_NULL(clk_data->hws[pll->id]))
continue;
/*
@@ -456,10 +452,10 @@ void mtk_clk_unregister_plls(const struct mtk_pll_data *plls, int num_plls,
* pointer to the I/O region base address. We have to fetch
* it from one of the registered clks.
*/
- base = mtk_clk_pll_get_base(clk_data->clks[pll->id], pll);
+ base = mtk_clk_pll_get_base(clk_data->hws[pll->id], pll);
- mtk_clk_unregister_pll(clk_data->clks[pll->id]);
- clk_data->clks[pll->id] = ERR_PTR(-ENOENT);
+ mtk_clk_unregister_pll(clk_data->hws[pll->id]);
+ clk_data->hws[pll->id] = ERR_PTR(-ENOENT);
}
iounmap(base);
diff --git a/drivers/clk/mediatek/clk-pll.h b/drivers/clk/mediatek/clk-pll.h
index bf06e44caef9..fe3199715688 100644
--- a/drivers/clk/mediatek/clk-pll.h
+++ b/drivers/clk/mediatek/clk-pll.h
@@ -10,7 +10,7 @@
#include <linux/types.h>
struct clk_ops;
-struct clk_onecell_data;
+struct clk_hw_onecell_data;
struct device_node;
struct mtk_pll_div_table {
@@ -50,8 +50,8 @@ struct mtk_pll_data {
int mtk_clk_register_plls(struct device_node *node,
const struct mtk_pll_data *plls, int num_plls,
- struct clk_onecell_data *clk_data);
+ struct clk_hw_onecell_data *clk_data);
void mtk_clk_unregister_plls(const struct mtk_pll_data *plls, int num_plls,
- struct clk_onecell_data *clk_data);
+ struct clk_hw_onecell_data *clk_data);
#endif /* __DRV_CLK_MTK_PLL_H */
diff --git a/drivers/clk/pxa/clk-pxa.c b/drivers/clk/pxa/clk-pxa.c
index cfc79f942b07..03de634efc52 100644
--- a/drivers/clk/pxa/clk-pxa.c
+++ b/drivers/clk/pxa/clk-pxa.c
@@ -11,6 +11,7 @@
#include <linux/clkdev.h>
#include <linux/io.h>
#include <linux/of.h>
+#include <linux/soc/pxa/smemc.h>
#include <dt-bindings/clock/pxa-clock.h>
#include "clk-pxa.h"
@@ -94,7 +95,8 @@ void __init clkdev_pxa_register(int ckid, const char *con_id,
clk_register_clkdev(clk, con_id, dev_id);
}
-int __init clk_pxa_cken_init(const struct desc_clk_cken *clks, int nb_clks)
+int __init clk_pxa_cken_init(const struct desc_clk_cken *clks,
+ int nb_clks, void __iomem *clk_regs)
{
int i;
struct pxa_clk *pxa_clk;
@@ -106,6 +108,7 @@ int __init clk_pxa_cken_init(const struct desc_clk_cken *clks, int nb_clks)
pxa_clk->lp = clks[i].lp;
pxa_clk->hp = clks[i].hp;
pxa_clk->gate = clks[i].gate;
+ pxa_clk->gate.reg = clk_regs + clks[i].cken_reg;
pxa_clk->gate.lock = &pxa_clk_lock;
clk = clk_register_composite(NULL, clks[i].name,
clks[i].parent_names, 2,
@@ -150,12 +153,13 @@ void pxa2xx_core_turbo_switch(bool on)
}
void pxa2xx_cpll_change(struct pxa2xx_freq *freq,
- u32 (*mdrefr_dri)(unsigned int), void __iomem *mdrefr,
+ u32 (*mdrefr_dri)(unsigned int),
void __iomem *cccr)
{
unsigned int clkcfg = freq->clkcfg;
unsigned int unused, preset_mdrefr, postset_mdrefr;
unsigned long flags;
+ void __iomem *mdrefr = pxa_smemc_get_mdrefr();
local_irq_save(flags);
diff --git a/drivers/clk/pxa/clk-pxa.h b/drivers/clk/pxa/clk-pxa.h
index 5768e0f728ce..7ec2d2821d8f 100644
--- a/drivers/clk/pxa/clk-pxa.h
+++ b/drivers/clk/pxa/clk-pxa.h
@@ -105,6 +105,7 @@
struct desc_clk_cken {
struct clk_hw hw;
int ckid;
+ int cken_reg;
const char *name;
const char *dev_id;
const char *con_id;
@@ -119,11 +120,12 @@ struct desc_clk_cken {
#define PXA_CKEN(_dev_id, _con_id, _name, parents, _mult_lp, _div_lp, \
_mult_hp, _div_hp, is_lp, _cken_reg, _cken_bit, flag) \
{ .ckid = CLK_ ## _name, .name = #_name, \
+ .cken_reg = _cken_reg, \
.dev_id = _dev_id, .con_id = _con_id, .parent_names = parents,\
.lp = { .mult = _mult_lp, .div = _div_lp }, \
.hp = { .mult = _mult_hp, .div = _div_hp }, \
.is_in_low_power = is_lp, \
- .gate = { .reg = (void __iomem *)_cken_reg, .bit_idx = _cken_bit }, \
+ .gate = { .bit_idx = _cken_bit }, \
.flags = flag, \
}
#define PXA_CKEN_1RATE(dev_id, con_id, name, parents, cken_reg, \
@@ -146,12 +148,13 @@ static inline int dummy_clk_set_parent(struct clk_hw *hw, u8 index)
extern void clkdev_pxa_register(int ckid, const char *con_id,
const char *dev_id, struct clk *clk);
-extern int clk_pxa_cken_init(const struct desc_clk_cken *clks, int nb_clks);
+extern int clk_pxa_cken_init(const struct desc_clk_cken *clks,
+ int nb_clks, void __iomem *clk_regs);
void clk_pxa_dt_common_init(struct device_node *np);
void pxa2xx_core_turbo_switch(bool on);
void pxa2xx_cpll_change(struct pxa2xx_freq *freq,
- u32 (*mdrefr_dri)(unsigned int), void __iomem *mdrefr,
+ u32 (*mdrefr_dri)(unsigned int),
void __iomem *cccr);
int pxa2xx_determine_rate(struct clk_rate_request *req,
struct pxa2xx_freq *freqs, int nb_freqs);
diff --git a/drivers/clk/pxa/clk-pxa25x.c b/drivers/clk/pxa/clk-pxa25x.c
index d0f957996acb..93d5907b8530 100644
--- a/drivers/clk/pxa/clk-pxa25x.c
+++ b/drivers/clk/pxa/clk-pxa25x.c
@@ -14,11 +14,11 @@
#include <linux/clkdev.h>
#include <linux/io.h>
#include <linux/of.h>
-#include <mach/pxa2xx-regs.h>
-#include <mach/smemc.h>
+#include <linux/soc/pxa/smemc.h>
#include <dt-bindings/clock/pxa-clock.h>
#include "clk-pxa.h"
+#include "clk-pxa2xx.h"
#define KHz 1000
#define MHz (1000 * 1000)
@@ -33,15 +33,13 @@ enum {
((T) ? CLKCFG_TURBO : 0))
#define PXA25x_CCCR(N2, M, L) (N2 << 7 | M << 5 | L)
-#define MDCNFG_DRAC2(mdcnfg) (((mdcnfg) >> 21) & 0x3)
-#define MDCNFG_DRAC0(mdcnfg) (((mdcnfg) >> 5) & 0x3)
-
/* Define the refresh period in mSec for the SDRAM and the number of rows */
#define SDRAM_TREF 64 /* standard 64ms SDRAM */
/*
* Various clock factors driven by the CCCR register.
*/
+static void __iomem *clk_regs;
/* Crystal Frequency to Memory Frequency Multiplier (L) */
static unsigned char L_clk_mult[32] = { 0, 27, 32, 36, 40, 45, 0, };
@@ -57,30 +55,9 @@ static const char * const get_freq_khz[] = {
"core", "run", "cpll", "memory"
};
-static int get_sdram_rows(void)
-{
- static int sdram_rows;
- unsigned int drac2 = 0, drac0 = 0;
- u32 mdcnfg;
-
- if (sdram_rows)
- return sdram_rows;
-
- mdcnfg = readl_relaxed(MDCNFG);
-
- if (mdcnfg & (MDCNFG_DE2 | MDCNFG_DE3))
- drac2 = MDCNFG_DRAC2(mdcnfg);
-
- if (mdcnfg & (MDCNFG_DE0 | MDCNFG_DE1))
- drac0 = MDCNFG_DRAC0(mdcnfg);
-
- sdram_rows = 1 << (11 + max(drac0, drac2));
- return sdram_rows;
-}
-
static u32 mdrefr_dri(unsigned int freq_khz)
{
- u32 interval = freq_khz * SDRAM_TREF / get_sdram_rows();
+ u32 interval = freq_khz * SDRAM_TREF / pxa2xx_smemc_get_sdram_rows();
return interval / 32;
}
@@ -121,7 +98,7 @@ unsigned int pxa25x_get_clk_frequency_khz(int info)
static unsigned long clk_pxa25x_memory_get_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
- unsigned long cccr = readl(CCCR);
+ unsigned long cccr = readl(clk_regs + CCCR);
unsigned int m = M_clk_mult[(cccr >> 5) & 0x03];
return parent_rate / m;
@@ -225,7 +202,7 @@ MUX_OPS(clk_pxa25x_core, "core", CLK_SET_RATE_PARENT);
static unsigned long clk_pxa25x_run_get_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
- unsigned long cccr = readl(CCCR);
+ unsigned long cccr = readl(clk_regs + CCCR);
unsigned int n2 = N2_clk_mult[(cccr >> 7) & 0x07];
return (parent_rate / n2) * 2;
@@ -236,7 +213,7 @@ RATE_RO_OPS(clk_pxa25x_run, "run");
static unsigned long clk_pxa25x_cpll_get_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
- unsigned long clkcfg, cccr = readl(CCCR);
+ unsigned long clkcfg, cccr = readl(clk_regs + CCCR);
unsigned int l, m, n2, t;
asm("mrc\tp14, 0, %0, c6, c0, 0" : "=r" (clkcfg));
@@ -268,7 +245,7 @@ static int clk_pxa25x_cpll_set_rate(struct clk_hw *hw, unsigned long rate,
if (i >= ARRAY_SIZE(pxa25x_freqs))
return -EINVAL;
- pxa2xx_cpll_change(&pxa25x_freqs[i], mdrefr_dri, MDREFR, CCCR);
+ pxa2xx_cpll_change(&pxa25x_freqs[i], mdrefr_dri, clk_regs + CCCR);
return 0;
}
@@ -345,16 +322,17 @@ static void __init pxa25x_dummy_clocks_init(void)
}
}
-int __init pxa25x_clocks_init(void)
+int __init pxa25x_clocks_init(void __iomem *regs)
{
+ clk_regs = regs;
pxa25x_base_clocks_init();
pxa25x_dummy_clocks_init();
- return clk_pxa_cken_init(pxa25x_clocks, ARRAY_SIZE(pxa25x_clocks));
+ return clk_pxa_cken_init(pxa25x_clocks, ARRAY_SIZE(pxa25x_clocks), clk_regs);
}
static void __init pxa25x_dt_clocks_init(struct device_node *np)
{
- pxa25x_clocks_init();
+ pxa25x_clocks_init(ioremap(0x41300000ul, 0x10));
clk_pxa_dt_common_init(np);
}
CLK_OF_DECLARE(pxa25x_clks, "marvell,pxa250-core-clocks",
diff --git a/drivers/clk/pxa/clk-pxa27x.c b/drivers/clk/pxa/clk-pxa27x.c
index 7b123105b5de..116c6ac666e3 100644
--- a/drivers/clk/pxa/clk-pxa27x.c
+++ b/drivers/clk/pxa/clk-pxa27x.c
@@ -7,16 +7,15 @@
* Heavily inspired from former arch/arm/mach-pxa/clock.c.
*/
#include <linux/clk-provider.h>
-#include <mach/pxa2xx-regs.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/of.h>
-
-#include <mach/smemc.h>
+#include <linux/soc/pxa/smemc.h>
#include <dt-bindings/clock/pxa-clock.h>
#include "clk-pxa.h"
+#include "clk-pxa2xx.h"
#define KHz 1000
#define MHz (1000 * 1000)
@@ -50,41 +49,19 @@ enum {
((T) ? CLKCFG_TURBO : 0))
#define PXA27x_CCCR(A, L, N2) (A << 25 | N2 << 7 | L)
-#define MDCNFG_DRAC2(mdcnfg) (((mdcnfg) >> 21) & 0x3)
-#define MDCNFG_DRAC0(mdcnfg) (((mdcnfg) >> 5) & 0x3)
-
/* Define the refresh period in mSec for the SDRAM and the number of rows */
#define SDRAM_TREF 64 /* standard 64ms SDRAM */
+static void __iomem *clk_regs;
+
static const char * const get_freq_khz[] = {
"core", "run", "cpll", "memory",
"system_bus"
};
-static int get_sdram_rows(void)
-{
- static int sdram_rows;
- unsigned int drac2 = 0, drac0 = 0;
- u32 mdcnfg;
-
- if (sdram_rows)
- return sdram_rows;
-
- mdcnfg = readl_relaxed(MDCNFG);
-
- if (mdcnfg & (MDCNFG_DE2 | MDCNFG_DE3))
- drac2 = MDCNFG_DRAC2(mdcnfg);
-
- if (mdcnfg & (MDCNFG_DE0 | MDCNFG_DE1))
- drac0 = MDCNFG_DRAC0(mdcnfg);
-
- sdram_rows = 1 << (11 + max(drac0, drac2));
- return sdram_rows;
-}
-
static u32 mdrefr_dri(unsigned int freq_khz)
{
- u32 interval = freq_khz * SDRAM_TREF / get_sdram_rows();
+ u32 interval = freq_khz * SDRAM_TREF / pxa2xx_smemc_get_sdram_rows();
return (interval - 31) / 32;
}
@@ -124,7 +101,7 @@ unsigned int pxa27x_get_clk_frequency_khz(int info)
bool pxa27x_is_ppll_disabled(void)
{
- unsigned long ccsr = readl(CCSR);
+ unsigned long ccsr = readl(clk_regs + CCSR);
return ccsr & (1 << CCCR_PPDIS_BIT);
}
@@ -226,7 +203,7 @@ static unsigned long clk_pxa27x_cpll_get_rate(struct clk_hw *hw,
unsigned long clkcfg;
unsigned int t, ht;
unsigned int l, L, n2, N;
- unsigned long ccsr = readl(CCSR);
+ unsigned long ccsr = readl(clk_regs + CCSR);
asm("mrc\tp14, 0, %0, c6, c0, 0" : "=r" (clkcfg));
t = clkcfg & (1 << 0);
@@ -260,7 +237,7 @@ static int clk_pxa27x_cpll_set_rate(struct clk_hw *hw, unsigned long rate,
if (i >= ARRAY_SIZE(pxa27x_freqs))
return -EINVAL;
- pxa2xx_cpll_change(&pxa27x_freqs[i], mdrefr_dri, MDREFR, CCCR);
+ pxa2xx_cpll_change(&pxa27x_freqs[i], mdrefr_dri, clk_regs + CCCR);
return 0;
}
@@ -271,8 +248,8 @@ static unsigned long clk_pxa27x_lcd_base_get_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
unsigned int l, osc_forced;
- unsigned long ccsr = readl(CCSR);
- unsigned long cccr = readl(CCCR);
+ unsigned long ccsr = readl(clk_regs + CCSR);
+ unsigned long cccr = readl(clk_regs + CCCR);
l = ccsr & CCSR_L_MASK;
osc_forced = ccsr & (1 << CCCR_CPDIS_BIT);
@@ -293,7 +270,7 @@ static unsigned long clk_pxa27x_lcd_base_get_rate(struct clk_hw *hw,
static u8 clk_pxa27x_lcd_base_get_parent(struct clk_hw *hw)
{
unsigned int osc_forced;
- unsigned long ccsr = readl(CCSR);
+ unsigned long ccsr = readl(clk_regs + CCSR);
osc_forced = ccsr & (1 << CCCR_CPDIS_BIT);
if (osc_forced)
@@ -322,7 +299,7 @@ static u8 clk_pxa27x_core_get_parent(struct clk_hw *hw)
{
unsigned long clkcfg;
unsigned int t, ht, osc_forced;
- unsigned long ccsr = readl(CCSR);
+ unsigned long ccsr = readl(clk_regs + CCSR);
osc_forced = ccsr & (1 << CCCR_CPDIS_BIT);
if (osc_forced)
@@ -359,7 +336,7 @@ MUX_OPS(clk_pxa27x_core, "core", CLK_SET_RATE_PARENT);
static unsigned long clk_pxa27x_run_get_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
- unsigned long ccsr = readl(CCSR);
+ unsigned long ccsr = readl(clk_regs + CCSR);
unsigned int n2 = (ccsr & CCSR_N2_MASK) >> CCSR_N2_SHIFT;
return (parent_rate / n2) * 2;
@@ -382,7 +359,7 @@ static unsigned long clk_pxa27x_system_bus_get_rate(struct clk_hw *hw,
{
unsigned long clkcfg;
unsigned int b, osc_forced;
- unsigned long ccsr = readl(CCSR);
+ unsigned long ccsr = readl(clk_regs + CCSR);
osc_forced = ccsr & (1 << CCCR_CPDIS_BIT);
asm("mrc\tp14, 0, %0, c6, c0, 0" : "=r" (clkcfg));
@@ -399,7 +376,7 @@ static unsigned long clk_pxa27x_system_bus_get_rate(struct clk_hw *hw,
static u8 clk_pxa27x_system_bus_get_parent(struct clk_hw *hw)
{
unsigned int osc_forced;
- unsigned long ccsr = readl(CCSR);
+ unsigned long ccsr = readl(clk_regs + CCSR);
osc_forced = ccsr & (1 << CCCR_CPDIS_BIT);
if (osc_forced)
@@ -415,8 +392,8 @@ static unsigned long clk_pxa27x_memory_get_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
unsigned int a, l, osc_forced;
- unsigned long cccr = readl(CCCR);
- unsigned long ccsr = readl(CCSR);
+ unsigned long cccr = readl(clk_regs + CCCR);
+ unsigned long ccsr = readl(clk_regs + CCSR);
osc_forced = ccsr & (1 << CCCR_CPDIS_BIT);
a = cccr & (1 << CCCR_A_BIT);
@@ -434,8 +411,8 @@ static unsigned long clk_pxa27x_memory_get_rate(struct clk_hw *hw,
static u8 clk_pxa27x_memory_get_parent(struct clk_hw *hw)
{
unsigned int osc_forced, a;
- unsigned long cccr = readl(CCCR);
- unsigned long ccsr = readl(CCSR);
+ unsigned long cccr = readl(clk_regs + CCCR);
+ unsigned long ccsr = readl(clk_regs + CCSR);
osc_forced = ccsr & (1 << CCCR_CPDIS_BIT);
a = cccr & (1 << CCCR_A_BIT);
@@ -490,16 +467,17 @@ static void __init pxa27x_base_clocks_init(void)
clk_register_clk_pxa27x_lcd_base();
}
-int __init pxa27x_clocks_init(void)
+int __init pxa27x_clocks_init(void __iomem *regs)
{
+ clk_regs = regs;
pxa27x_base_clocks_init();
pxa27x_dummy_clocks_init();
- return clk_pxa_cken_init(pxa27x_clocks, ARRAY_SIZE(pxa27x_clocks));
+ return clk_pxa_cken_init(pxa27x_clocks, ARRAY_SIZE(pxa27x_clocks), regs);
}
static void __init pxa27x_dt_clocks_init(struct device_node *np)
{
- pxa27x_clocks_init();
+ pxa27x_clocks_init(ioremap(0x41300000ul, 0x10));
clk_pxa_dt_common_init(np);
}
CLK_OF_DECLARE(pxa_clks, "marvell,pxa270-clocks", pxa27x_dt_clocks_init);
diff --git a/drivers/clk/pxa/clk-pxa2xx.h b/drivers/clk/pxa/clk-pxa2xx.h
new file mode 100644
index 000000000000..94b03d0e32ff
--- /dev/null
+++ b/drivers/clk/pxa/clk-pxa2xx.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __CLK_PXA2XX_H
+#define __CLK_PXA2XX_H
+
+#define CCCR (0x0000) /* Core Clock Configuration Register */
+#define CCSR (0x000C) /* Core Clock Status Register */
+#define CKEN (0x0004) /* Clock Enable Register */
+#define OSCC (0x0008) /* Oscillator Configuration Register */
+
+#define CCCR_N_MASK 0x0380 /* Run Mode Frequency to Turbo Mode Frequency Multiplier */
+#define CCCR_M_MASK 0x0060 /* Memory Frequency to Run Mode Frequency Multiplier */
+#define CCCR_L_MASK 0x001f /* Crystal Frequency to Memory Frequency Multiplier */
+
+#define CCCR_CPDIS_BIT (31)
+#define CCCR_PPDIS_BIT (30)
+#define CCCR_LCD_26_BIT (27)
+#define CCCR_A_BIT (25)
+
+#define CCSR_N2_MASK CCCR_N_MASK
+#define CCSR_M_MASK CCCR_M_MASK
+#define CCSR_L_MASK CCCR_L_MASK
+#define CCSR_N2_SHIFT 7
+
+#define CKEN_AC97CONF (31) /* AC97 Controller Configuration */
+#define CKEN_CAMERA (24) /* Camera Interface Clock Enable */
+#define CKEN_SSP1 (23) /* SSP1 Unit Clock Enable */
+#define CKEN_MEMC (22) /* Memory Controller Clock Enable */
+#define CKEN_MEMSTK (21) /* Memory Stick Host Controller */
+#define CKEN_IM (20) /* Internal Memory Clock Enable */
+#define CKEN_KEYPAD (19) /* Keypad Interface Clock Enable */
+#define CKEN_USIM (18) /* USIM Unit Clock Enable */
+#define CKEN_MSL (17) /* MSL Unit Clock Enable */
+#define CKEN_LCD (16) /* LCD Unit Clock Enable */
+#define CKEN_PWRI2C (15) /* PWR I2C Unit Clock Enable */
+#define CKEN_I2C (14) /* I2C Unit Clock Enable */
+#define CKEN_FICP (13) /* FICP Unit Clock Enable */
+#define CKEN_MMC (12) /* MMC Unit Clock Enable */
+#define CKEN_USB (11) /* USB Unit Clock Enable */
+#define CKEN_ASSP (10) /* ASSP (SSP3) Clock Enable */
+#define CKEN_USBHOST (10) /* USB Host Unit Clock Enable */
+#define CKEN_OSTIMER (9) /* OS Timer Unit Clock Enable */
+#define CKEN_NSSP (9) /* NSSP (SSP2) Clock Enable */
+#define CKEN_I2S (8) /* I2S Unit Clock Enable */
+#define CKEN_BTUART (7) /* BTUART Unit Clock Enable */
+#define CKEN_FFUART (6) /* FFUART Unit Clock Enable */
+#define CKEN_STUART (5) /* STUART Unit Clock Enable */
+#define CKEN_HWUART (4) /* HWUART Unit Clock Enable */
+#define CKEN_SSP3 (4) /* SSP3 Unit Clock Enable */
+#define CKEN_SSP (3) /* SSP Unit Clock Enable */
+#define CKEN_SSP2 (3) /* SSP2 Unit Clock Enable */
+#define CKEN_AC97 (2) /* AC97 Unit Clock Enable */
+#define CKEN_PWM1 (1) /* PWM1 Clock Enable */
+#define CKEN_PWM0 (0) /* PWM0 Clock Enable */
+
+#define OSCC_OON (1 << 1) /* 32.768kHz OON (write-once only bit) */
+#define OSCC_OOK (1 << 0) /* 32.768kHz OOK (read-only bit) */
+
+#endif
diff --git a/drivers/clk/pxa/clk-pxa3xx.c b/drivers/clk/pxa/clk-pxa3xx.c
index 60db92772e72..42958a542662 100644
--- a/drivers/clk/pxa/clk-pxa3xx.c
+++ b/drivers/clk/pxa/clk-pxa3xx.c
@@ -14,8 +14,9 @@
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
#include <linux/of.h>
-#include <mach/smemc.h>
-#include <mach/pxa3xx-regs.h>
+#include <linux/soc/pxa/cpu.h>
+#include <linux/soc/pxa/smemc.h>
+#include <linux/clk/pxa.h>
#include <dt-bindings/clock/pxa-clock.h>
#include "clk-pxa.h"
@@ -23,6 +24,84 @@
#define KHz 1000
#define MHz (1000 * 1000)
+#define ACCR (0x0000) /* Application Subsystem Clock Configuration Register */
+#define ACSR (0x0004) /* Application Subsystem Clock Status Register */
+#define AICSR (0x0008) /* Application Subsystem Interrupt Control/Status Register */
+#define CKENA (0x000C) /* A Clock Enable Register */
+#define CKENB (0x0010) /* B Clock Enable Register */
+#define CKENC (0x0024) /* C Clock Enable Register */
+#define AC97_DIV (0x0014) /* AC97 clock divisor value register */
+
+#define ACCR_XPDIS (1 << 31) /* Core PLL Output Disable */
+#define ACCR_SPDIS (1 << 30) /* System PLL Output Disable */
+#define ACCR_D0CS (1 << 26) /* D0 Mode Clock Select */
+#define ACCR_PCCE (1 << 11) /* Power Mode Change Clock Enable */
+#define ACCR_DDR_D0CS (1 << 7) /* DDR SDRAM clock frequency in D0CS (PXA31x only) */
+
+#define ACCR_SMCFS_MASK (0x7 << 23) /* Static Memory Controller Frequency Select */
+#define ACCR_SFLFS_MASK (0x3 << 18) /* Frequency Select for Internal Memory Controller */
+#define ACCR_XSPCLK_MASK (0x3 << 16) /* Core Frequency during Frequency Change */
+#define ACCR_HSS_MASK (0x3 << 14) /* System Bus-Clock Frequency Select */
+#define ACCR_DMCFS_MASK (0x3 << 12) /* Dynamic Memory Controller Clock Frequency Select */
+#define ACCR_XN_MASK (0x7 << 8) /* Core PLL Turbo-Mode-to-Run-Mode Ratio */
+#define ACCR_XL_MASK (0x1f) /* Core PLL Run-Mode-to-Oscillator Ratio */
+
+#define ACCR_SMCFS(x) (((x) & 0x7) << 23)
+#define ACCR_SFLFS(x) (((x) & 0x3) << 18)
+#define ACCR_XSPCLK(x) (((x) & 0x3) << 16)
+#define ACCR_HSS(x) (((x) & 0x3) << 14)
+#define ACCR_DMCFS(x) (((x) & 0x3) << 12)
+#define ACCR_XN(x) (((x) & 0x7) << 8)
+#define ACCR_XL(x) ((x) & 0x1f)
+
+/*
+ * Clock Enable Bit
+ */
+#define CKEN_LCD 1 /* < LCD Clock Enable */
+#define CKEN_USBH 2 /* < USB host clock enable */
+#define CKEN_CAMERA 3 /* < Camera interface clock enable */
+#define CKEN_NAND 4 /* < NAND Flash Controller Clock Enable */
+#define CKEN_USB2 6 /* < USB 2.0 client clock enable. */
+#define CKEN_DMC 8 /* < Dynamic Memory Controller clock enable */
+#define CKEN_SMC 9 /* < Static Memory Controller clock enable */
+#define CKEN_ISC 10 /* < Internal SRAM Controller clock enable */
+#define CKEN_BOOT 11 /* < Boot rom clock enable */
+#define CKEN_MMC1 12 /* < MMC1 Clock enable */
+#define CKEN_MMC2 13 /* < MMC2 clock enable */
+#define CKEN_KEYPAD 14 /* < Keypand Controller Clock Enable */
+#define CKEN_CIR 15 /* < Consumer IR Clock Enable */
+#define CKEN_USIM0 17 /* < USIM[0] Clock Enable */
+#define CKEN_USIM1 18 /* < USIM[1] Clock Enable */
+#define CKEN_TPM 19 /* < TPM clock enable */
+#define CKEN_UDC 20 /* < UDC clock enable */
+#define CKEN_BTUART 21 /* < BTUART clock enable */
+#define CKEN_FFUART 22 /* < FFUART clock enable */
+#define CKEN_STUART 23 /* < STUART clock enable */
+#define CKEN_AC97 24 /* < AC97 clock enable */
+#define CKEN_TOUCH 25 /* < Touch screen Interface Clock Enable */
+#define CKEN_SSP1 26 /* < SSP1 clock enable */
+#define CKEN_SSP2 27 /* < SSP2 clock enable */
+#define CKEN_SSP3 28 /* < SSP3 clock enable */
+#define CKEN_SSP4 29 /* < SSP4 clock enable */
+#define CKEN_MSL0 30 /* < MSL0 clock enable */
+#define CKEN_PWM0 32 /* < PWM[0] clock enable */
+#define CKEN_PWM1 33 /* < PWM[1] clock enable */
+#define CKEN_I2C 36 /* < I2C clock enable */
+#define CKEN_INTC 38 /* < Interrupt controller clock enable */
+#define CKEN_GPIO 39 /* < GPIO clock enable */
+#define CKEN_1WIRE 40 /* < 1-wire clock enable */
+#define CKEN_HSIO2 41 /* < HSIO2 clock enable */
+#define CKEN_MINI_IM 48 /* < Mini-IM */
+#define CKEN_MINI_LCD 49 /* < Mini LCD */
+
+#define CKEN_MMC3 5 /* < MMC3 Clock Enable */
+#define CKEN_MVED 43 /* < MVED clock enable */
+
+/* Note: GCU clock enable bit differs on PXA300/PXA310 and PXA320 */
+#define CKEN_PXA300_GCU 42 /* Graphics controller clock enable */
+#define CKEN_PXA320_GCU 7 /* Graphics controller clock enable */
+
+
enum {
PXA_CORE_60Mhz = 0,
PXA_CORE_RUN,
@@ -39,12 +118,12 @@ static unsigned char hss_mult[4] = { 8, 12, 16, 24 };
/* crystal frequency to static memory controller multiplier (SMCFS) */
static unsigned int smcfs_mult[8] = { 6, 0, 8, 0, 0, 16, };
-static unsigned int df_clkdiv[4] = { 1, 2, 4, 1 };
-
static const char * const get_freq_khz[] = {
"core", "ring_osc_60mhz", "run", "cpll", "system_bus"
};
+static void __iomem *clk_regs;
+
/*
* Get the clock frequency as reflected by ACSR and the turbo flag.
* We assume these values have been applied via a fcs.
@@ -78,12 +157,27 @@ unsigned int pxa3xx_get_clk_frequency_khz(int info)
return (unsigned int)clks[0] / KHz;
}
+void pxa3xx_clk_update_accr(u32 disable, u32 enable, u32 xclkcfg, u32 mask)
+{
+ u32 accr = readl(clk_regs + ACCR);
+
+ accr &= ~disable;
+ accr |= enable;
+
+ writel(accr, ACCR);
+ if (xclkcfg)
+ __asm__("mcr p14, 0, %0, c6, c0, 0\n" : : "r"(xclkcfg));
+
+ while ((readl(clk_regs + ACSR) & mask) != (accr & mask))
+ cpu_relax();
+}
+
static unsigned long clk_pxa3xx_ac97_get_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
unsigned long ac97_div, rate;
- ac97_div = AC97_DIV;
+ ac97_div = readl(clk_regs + AC97_DIV);
/* This may loose precision for some rates but won't for the
* standard 24.576MHz.
@@ -100,18 +194,18 @@ RATE_RO_OPS(clk_pxa3xx_ac97, "ac97");
static unsigned long clk_pxa3xx_smemc_get_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
- unsigned long acsr = ACSR;
- unsigned long memclkcfg = __raw_readl(MEMCLKCFG);
+ unsigned long acsr = readl(clk_regs + ACSR);
return (parent_rate / 48) * smcfs_mult[(acsr >> 23) & 0x7] /
- df_clkdiv[(memclkcfg >> 16) & 0x3];
+ pxa3xx_smemc_get_memclkdiv();
+
}
PARENTS(clk_pxa3xx_smemc) = { "spll_624mhz" };
RATE_RO_OPS(clk_pxa3xx_smemc, "smemc");
static bool pxa3xx_is_ring_osc_forced(void)
{
- unsigned long acsr = ACSR;
+ unsigned long acsr = readl(clk_regs + ACSR);
return acsr & ACCR_D0CS;
}
@@ -123,7 +217,7 @@ PARENTS(pxa3xx_ac97_bus) = { "ring_osc_60mhz", "ac97" };
PARENTS(pxa3xx_sbus) = { "ring_osc_60mhz", "system_bus" };
PARENTS(pxa3xx_smemcbus) = { "ring_osc_60mhz", "smemc" };
-#define CKEN_AB(bit) ((CKEN_ ## bit > 31) ? &CKENB : &CKENA)
+#define CKEN_AB(bit) ((CKEN_ ## bit > 31) ? CKENB : CKENA)
#define PXA3XX_CKEN(dev_id, con_id, parents, mult_lp, div_lp, mult_hp, \
div_hp, bit, is_lp, flags) \
PXA_CKEN(dev_id, con_id, bit, parents, mult_lp, div_lp, \
@@ -191,7 +285,7 @@ static struct desc_clk_cken pxa93x_clocks[] __initdata = {
static unsigned long clk_pxa3xx_system_bus_get_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
- unsigned long acsr = ACSR;
+ unsigned long acsr = readl(clk_regs + ACSR);
unsigned int hss = (acsr >> 14) & 0x3;
if (pxa3xx_is_ring_osc_forced())
@@ -238,7 +332,7 @@ MUX_RO_RATE_RO_OPS(clk_pxa3xx_core, "core");
static unsigned long clk_pxa3xx_run_get_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
- unsigned long acsr = ACSR;
+ unsigned long acsr = readl(clk_regs + ACSR);
unsigned int xn = (acsr & ACCR_XN_MASK) >> 8;
unsigned int t, xclkcfg;
@@ -254,7 +348,7 @@ RATE_RO_OPS(clk_pxa3xx_run, "run");
static unsigned long clk_pxa3xx_cpll_get_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
- unsigned long acsr = ACSR;
+ unsigned long acsr = readl(clk_regs + ACSR);
unsigned int xn = (acsr & ACCR_XN_MASK) >> 8;
unsigned int xl = acsr & ACCR_XL_MASK;
unsigned int t, xclkcfg;
@@ -325,7 +419,7 @@ static void __init pxa3xx_dummy_clocks_init(void)
}
}
-static void __init pxa3xx_base_clocks_init(void)
+static void __init pxa3xx_base_clocks_init(void __iomem *oscc_reg)
{
struct clk *clk;
@@ -335,34 +429,35 @@ static void __init pxa3xx_base_clocks_init(void)
clk_register_clk_pxa3xx_ac97();
clk_register_clk_pxa3xx_smemc();
clk = clk_register_gate(NULL, "CLK_POUT",
- "osc_13mhz", 0, OSCC, 11, 0, NULL);
+ "osc_13mhz", 0, oscc_reg, 11, 0, NULL);
clk_register_clkdev(clk, "CLK_POUT", NULL);
clkdev_pxa_register(CLK_OSTIMER, "OSTIMER0", NULL,
clk_register_fixed_factor(NULL, "os-timer0",
"osc_13mhz", 0, 1, 4));
}
-int __init pxa3xx_clocks_init(void)
+int __init pxa3xx_clocks_init(void __iomem *regs, void __iomem *oscc_reg)
{
int ret;
- pxa3xx_base_clocks_init();
+ clk_regs = regs;
+ pxa3xx_base_clocks_init(oscc_reg);
pxa3xx_dummy_clocks_init();
- ret = clk_pxa_cken_init(pxa3xx_clocks, ARRAY_SIZE(pxa3xx_clocks));
+ ret = clk_pxa_cken_init(pxa3xx_clocks, ARRAY_SIZE(pxa3xx_clocks), regs);
if (ret)
return ret;
if (cpu_is_pxa320())
return clk_pxa_cken_init(pxa320_clocks,
- ARRAY_SIZE(pxa320_clocks));
+ ARRAY_SIZE(pxa320_clocks), regs);
if (cpu_is_pxa300() || cpu_is_pxa310())
return clk_pxa_cken_init(pxa300_310_clocks,
- ARRAY_SIZE(pxa300_310_clocks));
- return clk_pxa_cken_init(pxa93x_clocks, ARRAY_SIZE(pxa93x_clocks));
+ ARRAY_SIZE(pxa300_310_clocks), regs);
+ return clk_pxa_cken_init(pxa93x_clocks, ARRAY_SIZE(pxa93x_clocks), regs);
}
static void __init pxa3xx_dt_clocks_init(struct device_node *np)
{
- pxa3xx_clocks_init();
+ pxa3xx_clocks_init(ioremap(0x41340000, 0x10), ioremap(0x41350000, 4));
clk_pxa_dt_common_init(np);
}
CLK_OF_DECLARE(pxa_clks, "marvell,pxa300-clocks", pxa3xx_dt_clocks_init);
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index d01436be6d7a..bc4dcf356d82 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -419,6 +419,15 @@ config SC_GCC_8180X
Say Y if you want to use peripheral devices such as UART, SPI,
I2C, USB, UFS, SDCC, etc.
+config SC_GCC_8280XP
+ tristate "SC8280XP Global Clock Controller"
+ select QCOM_GDSC
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on SC8280XP devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ I2C, USB, UFS, SDCC, etc.
+
config SC_GPUCC_7180
tristate "SC7180 Graphics Clock Controller"
select SC_GCC_7180
@@ -452,6 +461,16 @@ config SC_LPASS_CORECC_7180
Say Y if you want to use LPASS clocks and power domains of the LPASS
core clock controller.
+config SC_LPASS_CORECC_7280
+ tristate "SC7280 LPASS Core & Audio Clock Controller"
+ select SC_GCC_7280
+ select QCOM_GDSC
+ help
+ Support for the LPASS(Low Power Audio Subsystem) core and audio clock
+ controller on SC7280 devices.
+ Say Y if you want to use LPASS clocks and power domains of the LPASS
+ core clock controller.
+
config SC_MSS_7180
tristate "SC7180 Modem Clock Controller"
select SC_GCC_7180
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 671cf5821af1..36789f5233ef 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -67,10 +67,12 @@ obj-$(CONFIG_SC_DISPCC_7280) += dispcc-sc7280.o
obj-$(CONFIG_SC_GCC_7180) += gcc-sc7180.o
obj-$(CONFIG_SC_GCC_7280) += gcc-sc7280.o
obj-$(CONFIG_SC_GCC_8180X) += gcc-sc8180x.o
+obj-$(CONFIG_SC_GCC_8280XP) += gcc-sc8280xp.o
obj-$(CONFIG_SC_GPUCC_7180) += gpucc-sc7180.o
obj-$(CONFIG_SC_GPUCC_7280) += gpucc-sc7280.o
obj-$(CONFIG_SC_LPASSCC_7280) += lpasscc-sc7280.o
obj-$(CONFIG_SC_LPASS_CORECC_7180) += lpasscorecc-sc7180.o
+obj-$(CONFIG_SC_LPASS_CORECC_7280) += lpasscorecc-sc7280.o lpassaudiocc-sc7280.o
obj-$(CONFIG_SC_MSS_7180) += mss-sc7180.o
obj-$(CONFIG_SC_VIDEOCC_7180) += videocc-sc7180.o
obj-$(CONFIG_SC_VIDEOCC_7280) += videocc-sc7280.o
diff --git a/drivers/clk/qcom/clk-rcg.h b/drivers/clk/qcom/clk-rcg.h
index 00cea508d49e..012e745794fd 100644
--- a/drivers/clk/qcom/clk-rcg.h
+++ b/drivers/clk/qcom/clk-rcg.h
@@ -140,6 +140,7 @@ extern const struct clk_ops clk_dyn_rcg_ops;
* @freq_tbl: frequency table
* @clkr: regmap clock handle
* @cfg_off: defines the cfg register offset from the CMD_RCGR + CFG_REG
+ * @parked_cfg: cached value of the CFG register for parked RCGs
*/
struct clk_rcg2 {
u32 cmd_rcgr;
@@ -150,6 +151,7 @@ struct clk_rcg2 {
const struct freq_tbl *freq_tbl;
struct clk_regmap clkr;
u8 cfg_off;
+ u32 parked_cfg;
};
#define to_clk_rcg2(_hw) container_of(to_clk_regmap(_hw), struct clk_rcg2, clkr)
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index e9c357309fd9..8e5dce09d162 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -73,16 +73,11 @@ static int clk_rcg2_is_enabled(struct clk_hw *hw)
return (cmd & CMD_ROOT_OFF) == 0;
}
-static u8 clk_rcg2_get_parent(struct clk_hw *hw)
+static u8 __clk_rcg2_get_parent(struct clk_hw *hw, u32 cfg)
{
struct clk_rcg2 *rcg = to_clk_rcg2(hw);
int num_parents = clk_hw_get_num_parents(hw);
- u32 cfg;
- int i, ret;
-
- ret = regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg);
- if (ret)
- goto err;
+ int i;
cfg &= CFG_SRC_SEL_MASK;
cfg >>= CFG_SRC_SEL_SHIFT;
@@ -91,12 +86,27 @@ static u8 clk_rcg2_get_parent(struct clk_hw *hw)
if (cfg == rcg->parent_map[i].cfg)
return i;
-err:
pr_debug("%s: Clock %s has invalid parent, using default.\n",
__func__, clk_hw_get_name(hw));
return 0;
}
+static u8 clk_rcg2_get_parent(struct clk_hw *hw)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ u32 cfg;
+ int ret;
+
+ ret = regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg);
+ if (ret) {
+ pr_debug("%s: Unable to read CFG register for %s\n",
+ __func__, clk_hw_get_name(hw));
+ return 0;
+ }
+
+ return __clk_rcg2_get_parent(hw, cfg);
+}
+
static int update_config(struct clk_rcg2 *rcg)
{
int count, ret;
@@ -163,12 +173,10 @@ calc_rate(unsigned long rate, u32 m, u32 n, u32 mode, u32 hid_div)
}
static unsigned long
-clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+__clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate, u32 cfg)
{
struct clk_rcg2 *rcg = to_clk_rcg2(hw);
- u32 cfg, hid_div, m = 0, n = 0, mode = 0, mask;
-
- regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg);
+ u32 hid_div, m = 0, n = 0, mode = 0, mask;
if (rcg->mnd_width) {
mask = BIT(rcg->mnd_width) - 1;
@@ -189,6 +197,17 @@ clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
return calc_rate(parent_rate, m, n, mode, hid_div);
}
+static unsigned long
+clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ u32 cfg;
+
+ regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg);
+
+ return __clk_rcg2_recalc_rate(hw, parent_rate, cfg);
+}
+
static int _freq_tbl_determine_rate(struct clk_hw *hw, const struct freq_tbl *f,
struct clk_rate_request *req,
enum freq_policy policy)
@@ -262,7 +281,8 @@ static int clk_rcg2_determine_floor_rate(struct clk_hw *hw,
return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, FLOOR);
}
-static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
+static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f,
+ u32 *_cfg)
{
u32 cfg, mask, d_val, not2d_val, n_minus_m;
struct clk_hw *hw = &rcg->clkr.hw;
@@ -304,15 +324,27 @@ static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
cfg |= rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
if (rcg->mnd_width && f->n && (f->m != f->n))
cfg |= CFG_MODE_DUAL_EDGE;
- return regmap_update_bits(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg),
- mask, cfg);
+
+ *_cfg &= ~mask;
+ *_cfg |= cfg;
+
+ return 0;
}
static int clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
{
+ u32 cfg;
int ret;
- ret = __clk_rcg2_configure(rcg, f);
+ ret = regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg);
+ if (ret)
+ return ret;
+
+ ret = __clk_rcg2_configure(rcg, f, &cfg);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), cfg);
if (ret)
return ret;
@@ -979,11 +1011,12 @@ static int clk_rcg2_shared_set_rate(struct clk_hw *hw, unsigned long rate,
return -EINVAL;
/*
- * In case clock is disabled, update the CFG, M, N and D registers
- * and don't hit the update bit of CMD register.
+ * In case clock is disabled, update the M, N and D registers, cache
+ * the CFG value in parked_cfg and don't hit the update bit of CMD
+ * register.
*/
- if (!__clk_is_enabled(hw->clk))
- return __clk_rcg2_configure(rcg, f);
+ if (!clk_hw_is_enabled(hw))
+ return __clk_rcg2_configure(rcg, f, &rcg->parked_cfg);
return clk_rcg2_shared_force_enable_clear(hw, f);
}
@@ -1007,6 +1040,11 @@ static int clk_rcg2_shared_enable(struct clk_hw *hw)
if (ret)
return ret;
+ /* Write back the stored configuration corresponding to current rate */
+ ret = regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, rcg->parked_cfg);
+ if (ret)
+ return ret;
+
ret = update_config(rcg);
if (ret)
return ret;
@@ -1017,13 +1055,12 @@ static int clk_rcg2_shared_enable(struct clk_hw *hw)
static void clk_rcg2_shared_disable(struct clk_hw *hw)
{
struct clk_rcg2 *rcg = to_clk_rcg2(hw);
- u32 cfg;
/*
* Store current configuration as switching to safe source would clear
* the SRC and DIV of CFG register
*/
- regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
+ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &rcg->parked_cfg);
/*
* Park the RCG at a safe configuration - sourced off of safe source.
@@ -1041,17 +1078,52 @@ static void clk_rcg2_shared_disable(struct clk_hw *hw)
update_config(rcg);
clk_rcg2_clear_force_enable(hw);
+}
- /* Write back the stored configuration corresponding to current rate */
- regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, cfg);
+static u8 clk_rcg2_shared_get_parent(struct clk_hw *hw)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+
+ /* If the shared rcg is parked use the cached cfg instead */
+ if (!clk_hw_is_enabled(hw))
+ return __clk_rcg2_get_parent(hw, rcg->parked_cfg);
+
+ return clk_rcg2_get_parent(hw);
+}
+
+static int clk_rcg2_shared_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+
+ /* If the shared rcg is parked only update the cached cfg */
+ if (!clk_hw_is_enabled(hw)) {
+ rcg->parked_cfg &= ~CFG_SRC_SEL_MASK;
+ rcg->parked_cfg |= rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
+
+ return 0;
+ }
+
+ return clk_rcg2_set_parent(hw, index);
+}
+
+static unsigned long
+clk_rcg2_shared_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+
+ /* If the shared rcg is parked use the cached cfg instead */
+ if (!clk_hw_is_enabled(hw))
+ return __clk_rcg2_recalc_rate(hw, parent_rate, rcg->parked_cfg);
+
+ return clk_rcg2_recalc_rate(hw, parent_rate);
}
const struct clk_ops clk_rcg2_shared_ops = {
.enable = clk_rcg2_shared_enable,
.disable = clk_rcg2_shared_disable,
- .get_parent = clk_rcg2_get_parent,
- .set_parent = clk_rcg2_set_parent,
- .recalc_rate = clk_rcg2_recalc_rate,
+ .get_parent = clk_rcg2_shared_get_parent,
+ .set_parent = clk_rcg2_shared_set_parent,
+ .recalc_rate = clk_rcg2_shared_recalc_rate,
.determine_rate = clk_rcg2_determine_rate,
.set_rate = clk_rcg2_shared_set_rate,
.set_rate_and_parent = clk_rcg2_shared_set_rate_and_parent,
diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c
index afc6dc930011..10b4e6d8d10f 100644
--- a/drivers/clk/qcom/clk-smd-rpm.c
+++ b/drivers/clk/qcom/clk-smd-rpm.c
@@ -563,17 +563,19 @@ static const struct rpm_smd_clk_desc rpm_clk_msm8974 = {
.num_clks = ARRAY_SIZE(msm8974_clks),
};
-DEFINE_CLK_SMD_RPM(msm8976, mmssnoc_ahb_clk, mmssnoc_ahb_a_clk,
- QCOM_SMD_RPM_BUS_CLK, 2);
DEFINE_CLK_SMD_RPM(msm8976, ipa_clk, ipa_a_clk, QCOM_SMD_RPM_IPA_CLK, 0);
static struct clk_smd_rpm *msm8976_clks[] = {
+ [RPM_SMD_XO_CLK_SRC] = &sdm660_bi_tcxo,
+ [RPM_SMD_XO_A_CLK_SRC] = &sdm660_bi_tcxo_a,
[RPM_SMD_PCNOC_CLK] = &msm8916_pcnoc_clk,
[RPM_SMD_PCNOC_A_CLK] = &msm8916_pcnoc_a_clk,
[RPM_SMD_SNOC_CLK] = &msm8916_snoc_clk,
[RPM_SMD_SNOC_A_CLK] = &msm8916_snoc_a_clk,
[RPM_SMD_BIMC_CLK] = &msm8916_bimc_clk,
[RPM_SMD_BIMC_A_CLK] = &msm8916_bimc_a_clk,
+ [RPM_SMD_SYSMMNOC_CLK] = &msm8936_sysmmnoc_clk,
+ [RPM_SMD_SYSMMNOC_A_CLK] = &msm8936_sysmmnoc_a_clk,
[RPM_SMD_QDSS_CLK] = &msm8916_qdss_clk,
[RPM_SMD_QDSS_A_CLK] = &msm8916_qdss_a_clk,
[RPM_SMD_BB_CLK1] = &msm8916_bb_clk1,
@@ -586,8 +588,6 @@ static struct clk_smd_rpm *msm8976_clks[] = {
[RPM_SMD_BB_CLK1_A_PIN] = &msm8916_bb_clk1_a_pin,
[RPM_SMD_BB_CLK2_PIN] = &msm8916_bb_clk2_pin,
[RPM_SMD_BB_CLK2_A_PIN] = &msm8916_bb_clk2_a_pin,
- [RPM_SMD_MMSSNOC_AHB_CLK] = &msm8976_mmssnoc_ahb_clk,
- [RPM_SMD_MMSSNOC_AHB_A_CLK] = &msm8976_mmssnoc_ahb_a_clk,
[RPM_SMD_DIV_CLK2] = &msm8974_div_clk2,
[RPM_SMD_DIV_A_CLK2] = &msm8974_div_a_clk2,
[RPM_SMD_IPA_CLK] = &msm8976_ipa_clk,
diff --git a/drivers/clk/qcom/gcc-msm8976.c b/drivers/clk/qcom/gcc-msm8976.c
index a8b15814933e..6b112984694c 100644
--- a/drivers/clk/qcom/gcc-msm8976.c
+++ b/drivers/clk/qcom/gcc-msm8976.c
@@ -1486,7 +1486,7 @@ static const struct clk_init_data sdcc1_apps_clk_src_8976v1_1_init = {
.name = "sdcc1_apps_clk_src",
.parent_data = gcc_parent_data_v1_1,
.num_parents = ARRAY_SIZE(gcc_parent_data_v1_1),
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_floor_ops,
};
static struct clk_rcg2 sdcc1_apps_clk_src = {
@@ -1499,7 +1499,7 @@ static struct clk_rcg2 sdcc1_apps_clk_src = {
.name = "sdcc1_apps_clk_src",
.parent_data = gcc_parent_data_1,
.num_parents = ARRAY_SIZE(gcc_parent_data_1),
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_floor_ops,
},
};
@@ -1547,7 +1547,7 @@ static struct clk_rcg2 sdcc2_apps_clk_src = {
.name = "sdcc2_apps_clk_src",
.parent_data = gcc_parent_data_4_8,
.num_parents = ARRAY_SIZE(gcc_parent_data_4_8),
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_floor_ops,
},
};
@@ -4056,6 +4056,7 @@ static const struct qcom_reset_map gcc_msm8976_resets[] = {
[RST_CAMSS_CSI_VFE1_BCR] = { 0x58070 },
[RST_CAMSS_VFE1_BCR] = { 0x5807c },
[RST_CAMSS_CPP_BCR] = { 0x58080 },
+ [RST_MSS_BCR] = { 0x71000 },
};
static struct gdsc *gcc_msm8976_gdscs[] = {
diff --git a/drivers/clk/qcom/gcc-msm8998.c b/drivers/clk/qcom/gcc-msm8998.c
index 407e2c5caea4..33473c52eb90 100644
--- a/drivers/clk/qcom/gcc-msm8998.c
+++ b/drivers/clk/qcom/gcc-msm8998.c
@@ -2833,6 +2833,58 @@ static struct clk_branch gcc_rx1_usb2_clkref_clk = {
},
};
+static struct clk_branch gcc_im_sleep_clk = {
+ .halt_reg = 0x4300c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4300c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gcc_im_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch aggre2_snoc_north_axi_clk = {
+ .halt_reg = 0x83010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x83010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "aggre2_snoc_north_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch ssc_xo_clk = {
+ .halt_reg = 0x63018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x63018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "ssc_xo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch ssc_cnoc_ahbs_clk = {
+ .halt_reg = 0x6300c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6300c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "ssc_cnoc_ahbs_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct gdsc pcie_0_gdsc = {
.gdscr = 0x6b004,
.gds_hw_ctrl = 0x0,
@@ -3036,6 +3088,10 @@ static struct clk_regmap *gcc_msm8998_clocks[] = {
[GCC_MSS_MNOC_BIMC_AXI_CLK] = &gcc_mss_mnoc_bimc_axi_clk.clkr,
[GCC_MMSS_GPLL0_CLK] = &gcc_mmss_gpll0_clk.clkr,
[HMSS_GPLL0_CLK_SRC] = &hmss_gpll0_clk_src.clkr,
+ [GCC_IM_SLEEP] = &gcc_im_sleep_clk.clkr,
+ [AGGRE2_SNOC_NORTH_AXI] = &aggre2_snoc_north_axi_clk.clkr,
+ [SSC_XO] = &ssc_xo_clk.clkr,
+ [SSC_CNOC_AHBS_CLK] = &ssc_cnoc_ahbs_clk.clkr,
};
static struct gdsc *gcc_msm8998_gdscs[] = {
diff --git a/drivers/clk/qcom/gcc-sc8280xp.c b/drivers/clk/qcom/gcc-sc8280xp.c
new file mode 100644
index 000000000000..4b894442fdf5
--- /dev/null
+++ b/drivers/clk/qcom/gcc-sc8280xp.c
@@ -0,0 +1,7488 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Linaro Ltd.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,gcc-sc8280xp.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+/* Need to match the order of clocks in DT binding */
+enum {
+ DT_BI_TCXO,
+ DT_SLEEP_CLK,
+ DT_UFS_PHY_RX_SYMBOL_0_CLK,
+ DT_UFS_PHY_RX_SYMBOL_1_CLK,
+ DT_UFS_PHY_TX_SYMBOL_0_CLK,
+ DT_UFS_CARD_RX_SYMBOL_0_CLK,
+ DT_UFS_CARD_RX_SYMBOL_1_CLK,
+ DT_UFS_CARD_TX_SYMBOL_0_CLK,
+ DT_USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK,
+ DT_GCC_USB4_PHY_PIPEGMUX_CLK_SRC,
+ DT_GCC_USB4_PHY_DP_GMUX_CLK_SRC,
+ DT_GCC_USB4_PHY_SYS_PIPEGMUX_CLK_SRC,
+ DT_USB4_PHY_GCC_USB4_PCIE_PIPE_CLK,
+ DT_USB4_PHY_GCC_USB4RTR_MAX_PIPE_CLK,
+ DT_QUSB4PHY_GCC_USB4_RX0_CLK,
+ DT_QUSB4PHY_GCC_USB4_RX1_CLK,
+ DT_USB3_UNI_PHY_SEC_GCC_USB30_PIPE_CLK,
+ DT_GCC_USB4_1_PHY_PIPEGMUX_CLK_SRC,
+ DT_GCC_USB4_1_PHY_DP_GMUX_CLK_SRC,
+ DT_GCC_USB4_1_PHY_SYS_PIPEGMUX_CLK_SRC,
+ DT_USB4_1_PHY_GCC_USB4_PCIE_PIPE_CLK,
+ DT_USB4_1_PHY_GCC_USB4RTR_MAX_PIPE_CLK,
+ DT_QUSB4PHY_1_GCC_USB4_RX0_CLK,
+ DT_QUSB4PHY_1_GCC_USB4_RX1_CLK,
+ DT_USB3_UNI_PHY_MP_GCC_USB30_PIPE_0_CLK,
+ DT_USB3_UNI_PHY_MP_GCC_USB30_PIPE_1_CLK,
+ DT_PCIE_2A_PIPE_CLK,
+ DT_PCIE_2B_PIPE_CLK,
+ DT_PCIE_3A_PIPE_CLK,
+ DT_PCIE_3B_PIPE_CLK,
+ DT_PCIE_4_PIPE_CLK,
+ DT_RXC0_REF_CLK,
+ DT_RXC1_REF_CLK,
+};
+
+enum {
+ P_BI_TCXO,
+ P_GCC_GPLL0_OUT_EVEN,
+ P_GCC_GPLL0_OUT_MAIN,
+ P_GCC_GPLL2_OUT_MAIN,
+ P_GCC_GPLL4_OUT_MAIN,
+ P_GCC_GPLL7_OUT_MAIN,
+ P_GCC_GPLL8_OUT_MAIN,
+ P_GCC_GPLL9_OUT_MAIN,
+ P_GCC_USB3_PRIM_PHY_PIPE_CLK_SRC,
+ P_GCC_USB3_SEC_PHY_PIPE_CLK_SRC,
+ P_GCC_USB4_1_PHY_DP_GMUX_CLK_SRC,
+ P_GCC_USB4_1_PHY_PCIE_PIPE_CLK_SRC,
+ P_GCC_USB4_1_PHY_PCIE_PIPEGMUX_CLK_SRC,
+ P_GCC_USB4_1_PHY_PIPEGMUX_CLK_SRC,
+ P_GCC_USB4_1_PHY_SYS_PIPEGMUX_CLK_SRC,
+ P_GCC_USB4_PHY_DP_GMUX_CLK_SRC,
+ P_GCC_USB4_PHY_PCIE_PIPE_CLK_SRC,
+ P_GCC_USB4_PHY_PCIE_PIPEGMUX_CLK_SRC,
+ P_GCC_USB4_PHY_PIPEGMUX_CLK_SRC,
+ P_GCC_USB4_PHY_SYS_PIPEGMUX_CLK_SRC,
+ P_PCIE_2A_PIPE_CLK,
+ P_PCIE_2B_PIPE_CLK,
+ P_PCIE_3A_PIPE_CLK,
+ P_PCIE_3B_PIPE_CLK,
+ P_PCIE_4_PIPE_CLK,
+ P_QUSB4PHY_1_GCC_USB4_RX0_CLK,
+ P_QUSB4PHY_1_GCC_USB4_RX1_CLK,
+ P_QUSB4PHY_GCC_USB4_RX0_CLK,
+ P_QUSB4PHY_GCC_USB4_RX1_CLK,
+ P_RXC0_REF_CLK,
+ P_RXC1_REF_CLK,
+ P_SLEEP_CLK,
+ P_UFS_CARD_RX_SYMBOL_0_CLK,
+ P_UFS_CARD_RX_SYMBOL_1_CLK,
+ P_UFS_CARD_TX_SYMBOL_0_CLK,
+ P_UFS_PHY_RX_SYMBOL_0_CLK,
+ P_UFS_PHY_RX_SYMBOL_1_CLK,
+ P_UFS_PHY_TX_SYMBOL_0_CLK,
+ P_USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK,
+ P_USB3_UNI_PHY_MP_GCC_USB30_PIPE_0_CLK,
+ P_USB3_UNI_PHY_MP_GCC_USB30_PIPE_1_CLK,
+ P_USB3_UNI_PHY_SEC_GCC_USB30_PIPE_CLK,
+ P_USB4_1_PHY_GCC_USB4_PCIE_PIPE_CLK,
+ P_USB4_1_PHY_GCC_USB4RTR_MAX_PIPE_CLK,
+ P_USB4_PHY_GCC_USB4_PCIE_PIPE_CLK,
+ P_USB4_PHY_GCC_USB4RTR_MAX_PIPE_CLK,
+};
+
+static const struct clk_parent_data gcc_parent_data_tcxo = { .index = DT_BI_TCXO };
+
+static struct clk_alpha_pll gcc_gpll0 = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .enable_reg = 0x52028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll0",
+ .parent_data = &gcc_parent_data_tcxo,
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_5lpe_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gcc_gpll0_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gcc_gpll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_gcc_gpll0_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_gcc_gpll0_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll0_out_even",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_lucid_5lpe_ops,
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll2 = {
+ .offset = 0x2000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .enable_reg = 0x52028,
+ .enable_mask = BIT(2),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll2",
+ .parent_data = &gcc_parent_data_tcxo,
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_5lpe_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll4 = {
+ .offset = 0x76000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .enable_reg = 0x52028,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll4",
+ .parent_data = &gcc_parent_data_tcxo,
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_5lpe_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll7 = {
+ .offset = 0x1a000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .enable_reg = 0x52028,
+ .enable_mask = BIT(7),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll7",
+ .parent_data = &gcc_parent_data_tcxo,
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_5lpe_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll8 = {
+ .offset = 0x1b000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .enable_reg = 0x52028,
+ .enable_mask = BIT(8),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll8",
+ .parent_data = &gcc_parent_data_tcxo,
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_5lpe_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll9 = {
+ .offset = 0x1c000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .enable_reg = 0x52028,
+ .enable_mask = BIT(9),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll9",
+ .parent_data = &gcc_parent_data_tcxo,
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_5lpe_ops,
+ },
+ },
+};
+
+static struct clk_rcg2 gcc_usb4_1_phy_pcie_pipe_clk_src;
+static struct clk_rcg2 gcc_usb4_phy_pcie_pipe_clk_src;
+
+static const struct parent_map gcc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_SLEEP_CLK, 5 },
+};
+
+static const struct clk_parent_data gcc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct parent_map gcc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_SLEEP_CLK, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .index = DT_SLEEP_CLK },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data gcc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL7_OUT_MAIN, 2 },
+ { P_GCC_GPLL4_OUT_MAIN, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_4[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll7.clkr.hw },
+ { .hw = &gcc_gpll4.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL8_OUT_MAIN, 2 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_5[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll8.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL7_OUT_MAIN, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_6[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll7.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_7[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL2_OUT_MAIN, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_7[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll2.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_8[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL7_OUT_MAIN, 2 },
+ { P_RXC0_REF_CLK, 3 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_8[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll7.clkr.hw },
+ { .index = DT_RXC0_REF_CLK },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_9[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL7_OUT_MAIN, 2 },
+ { P_RXC1_REF_CLK, 3 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_9[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll7.clkr.hw },
+ { .index = DT_RXC1_REF_CLK },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_10[] = {
+ { P_PCIE_2A_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_10[] = {
+ { .index = DT_PCIE_2A_PIPE_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_11[] = {
+ { P_PCIE_2B_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_11[] = {
+ { .index = DT_PCIE_2B_PIPE_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_12[] = {
+ { P_PCIE_3A_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_12[] = {
+ { .index = DT_PCIE_3A_PIPE_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_13[] = {
+ { P_PCIE_3B_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_13[] = {
+ { .index = DT_PCIE_3B_PIPE_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_14[] = {
+ { P_PCIE_4_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_14[] = {
+ { .index = DT_PCIE_4_PIPE_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_15[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL9_OUT_MAIN, 2 },
+ { P_GCC_GPLL4_OUT_MAIN, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_15[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll9.clkr.hw },
+ { .hw = &gcc_gpll4.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_16[] = {
+ { P_UFS_CARD_RX_SYMBOL_0_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_16[] = {
+ { .index = DT_UFS_CARD_RX_SYMBOL_0_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_17[] = {
+ { P_UFS_CARD_RX_SYMBOL_1_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_17[] = {
+ { .index = DT_UFS_CARD_RX_SYMBOL_1_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_18[] = {
+ { P_UFS_CARD_TX_SYMBOL_0_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_18[] = {
+ { .index = DT_UFS_CARD_TX_SYMBOL_0_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_19[] = {
+ { P_UFS_PHY_RX_SYMBOL_0_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_19[] = {
+ { .index = DT_UFS_PHY_RX_SYMBOL_0_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_20[] = {
+ { P_UFS_PHY_RX_SYMBOL_1_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_20[] = {
+ { .index = DT_UFS_PHY_RX_SYMBOL_1_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_21[] = {
+ { P_UFS_PHY_TX_SYMBOL_0_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_21[] = {
+ { .index = DT_UFS_PHY_TX_SYMBOL_0_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_22[] = {
+ { P_USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_22[] = {
+ { .index = DT_USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_23[] = {
+ { P_USB3_UNI_PHY_SEC_GCC_USB30_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_23[] = {
+ { .index = DT_USB3_UNI_PHY_SEC_GCC_USB30_PIPE_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static struct clk_regmap_mux gcc_usb3_prim_phy_pipe_clk_src = {
+ .reg = 0xf060,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_22,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_pipe_clk_src",
+ .parent_data = gcc_parent_data_22,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_22),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb3_sec_phy_pipe_clk_src = {
+ .reg = 0x10060,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_23,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_sec_phy_pipe_clk_src",
+ .parent_data = gcc_parent_data_23,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_23),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static const struct parent_map gcc_parent_map_24[] = {
+ { P_USB3_UNI_PHY_MP_GCC_USB30_PIPE_0_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_24[] = {
+ { .index = DT_USB3_UNI_PHY_MP_GCC_USB30_PIPE_0_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_25[] = {
+ { P_USB3_UNI_PHY_MP_GCC_USB30_PIPE_1_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_25[] = {
+ { .index = DT_USB3_UNI_PHY_MP_GCC_USB30_PIPE_1_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_26[] = {
+ { P_GCC_USB3_PRIM_PHY_PIPE_CLK_SRC, 0 },
+ { P_USB4_PHY_GCC_USB4RTR_MAX_PIPE_CLK, 1 },
+ { P_GCC_USB4_PHY_PIPEGMUX_CLK_SRC, 3 },
+};
+
+static const struct clk_parent_data gcc_parent_data_26[] = {
+ { .hw = &gcc_usb3_prim_phy_pipe_clk_src.clkr.hw },
+ { .index = DT_USB4_PHY_GCC_USB4RTR_MAX_PIPE_CLK },
+ { .index = DT_GCC_USB4_PHY_PIPEGMUX_CLK_SRC },
+};
+
+static const struct parent_map gcc_parent_map_27[] = {
+ { P_GCC_USB3_SEC_PHY_PIPE_CLK_SRC, 0 },
+ { P_USB4_1_PHY_GCC_USB4RTR_MAX_PIPE_CLK, 1 },
+ { P_GCC_USB4_1_PHY_PIPEGMUX_CLK_SRC, 3 },
+};
+
+static const struct clk_parent_data gcc_parent_data_27[] = {
+ { .hw = &gcc_usb3_sec_phy_pipe_clk_src.clkr.hw },
+ { .index = DT_USB4_1_PHY_GCC_USB4RTR_MAX_PIPE_CLK },
+ { .index = DT_GCC_USB4_1_PHY_PIPEGMUX_CLK_SRC },
+};
+
+static const struct parent_map gcc_parent_map_28[] = {
+ { P_GCC_USB4_1_PHY_DP_GMUX_CLK_SRC, 0 },
+ { P_USB4_1_PHY_GCC_USB4RTR_MAX_PIPE_CLK, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_28[] = {
+ { .index = DT_GCC_USB4_1_PHY_DP_GMUX_CLK_SRC },
+ { .index = DT_USB4_1_PHY_GCC_USB4RTR_MAX_PIPE_CLK },
+};
+
+static const struct parent_map gcc_parent_map_29[] = {
+ { P_USB4_1_PHY_GCC_USB4_PCIE_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_29[] = {
+ { .index = DT_USB4_1_PHY_GCC_USB4_PCIE_PIPE_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_30[] = {
+ { P_GCC_USB4_1_PHY_SYS_PIPEGMUX_CLK_SRC, 0 },
+ { P_GCC_USB4_1_PHY_PCIE_PIPE_CLK_SRC, 1 },
+};
+
+static const struct clk_parent_data gcc_parent_data_30[] = {
+ { .index = DT_GCC_USB4_1_PHY_SYS_PIPEGMUX_CLK_SRC },
+ { .hw = &gcc_usb4_1_phy_pcie_pipe_clk_src.clkr.hw },
+};
+
+static struct clk_regmap_mux gcc_usb4_1_phy_pcie_pipegmux_clk_src = {
+ .reg = 0xb80dc,
+ .shift = 0,
+ .width = 1,
+ .parent_map = gcc_parent_map_30,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_pcie_pipegmux_clk_src",
+ .parent_data = gcc_parent_data_30,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_30),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static const struct parent_map gcc_parent_map_31[] = {
+ { P_GCC_USB4_1_PHY_PCIE_PIPEGMUX_CLK_SRC, 0 },
+ { P_USB4_1_PHY_GCC_USB4_PCIE_PIPE_CLK, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_31[] = {
+ { .hw = &gcc_usb4_1_phy_pcie_pipegmux_clk_src.clkr.hw },
+ { .index = DT_USB4_1_PHY_GCC_USB4_PCIE_PIPE_CLK },
+};
+
+static const struct parent_map gcc_parent_map_32[] = {
+ { P_QUSB4PHY_1_GCC_USB4_RX0_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_32[] = {
+ { .index = DT_QUSB4PHY_1_GCC_USB4_RX0_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_33[] = {
+ { P_QUSB4PHY_1_GCC_USB4_RX1_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_33[] = {
+ { .index = DT_QUSB4PHY_1_GCC_USB4_RX1_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_34[] = {
+ { P_GCC_USB4_1_PHY_SYS_PIPEGMUX_CLK_SRC, 0 },
+ { P_USB4_1_PHY_GCC_USB4_PCIE_PIPE_CLK, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_34[] = {
+ { .index = DT_GCC_USB4_1_PHY_SYS_PIPEGMUX_CLK_SRC },
+ { .index = DT_USB4_1_PHY_GCC_USB4_PCIE_PIPE_CLK },
+};
+
+static const struct parent_map gcc_parent_map_35[] = {
+ { P_GCC_USB4_PHY_DP_GMUX_CLK_SRC, 0 },
+ { P_USB4_PHY_GCC_USB4RTR_MAX_PIPE_CLK, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_35[] = {
+ { .index = DT_GCC_USB4_PHY_DP_GMUX_CLK_SRC },
+ { .index = DT_USB4_PHY_GCC_USB4RTR_MAX_PIPE_CLK },
+};
+
+static const struct parent_map gcc_parent_map_36[] = {
+ { P_USB4_PHY_GCC_USB4_PCIE_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_36[] = {
+ { .index = DT_USB4_PHY_GCC_USB4_PCIE_PIPE_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_37[] = {
+ { P_GCC_USB4_PHY_SYS_PIPEGMUX_CLK_SRC, 0 },
+ { P_GCC_USB4_PHY_PCIE_PIPE_CLK_SRC, 1 },
+};
+
+static const struct clk_parent_data gcc_parent_data_37[] = {
+ { .index = DT_GCC_USB4_PHY_SYS_PIPEGMUX_CLK_SRC },
+ { .hw = &gcc_usb4_phy_pcie_pipe_clk_src.clkr.hw },
+};
+
+static struct clk_regmap_mux gcc_usb4_phy_pcie_pipegmux_clk_src = {
+ .reg = 0x2a0dc,
+ .shift = 0,
+ .width = 1,
+ .parent_map = gcc_parent_map_37,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_phy_pcie_pipegmux_clk_src",
+ .parent_data = gcc_parent_data_37,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_37),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static const struct parent_map gcc_parent_map_38[] = {
+ { P_GCC_USB4_PHY_PCIE_PIPEGMUX_CLK_SRC, 0 },
+ { P_USB4_PHY_GCC_USB4_PCIE_PIPE_CLK, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_38[] = {
+ { .hw = &gcc_usb4_phy_pcie_pipegmux_clk_src.clkr.hw },
+ { .index = DT_USB4_PHY_GCC_USB4_PCIE_PIPE_CLK },
+};
+
+static const struct parent_map gcc_parent_map_39[] = {
+ { P_QUSB4PHY_GCC_USB4_RX0_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_39[] = {
+ { .index = DT_QUSB4PHY_GCC_USB4_RX0_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_40[] = {
+ { P_QUSB4PHY_GCC_USB4_RX1_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_40[] = {
+ { .index = DT_QUSB4PHY_GCC_USB4_RX1_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_41[] = {
+ { P_GCC_USB4_PHY_SYS_PIPEGMUX_CLK_SRC, 0 },
+ { P_USB4_PHY_GCC_USB4_PCIE_PIPE_CLK, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_41[] = {
+ { .index = DT_GCC_USB4_PHY_SYS_PIPEGMUX_CLK_SRC },
+ { .index = DT_USB4_PHY_GCC_USB4_PCIE_PIPE_CLK },
+};
+
+static struct clk_regmap_mux gcc_pcie_2a_pipe_clk_src = {
+ .reg = 0x9d05c,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_10,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2a_pipe_clk_src",
+ .parent_data = gcc_parent_data_10,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_10),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_pcie_2b_pipe_clk_src = {
+ .reg = 0x9e05c,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_11,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2b_pipe_clk_src",
+ .parent_data = gcc_parent_data_11,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_11),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_pcie_3a_pipe_clk_src = {
+ .reg = 0xa005c,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_12,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3a_pipe_clk_src",
+ .parent_data = gcc_parent_data_12,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_12),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_pcie_3b_pipe_clk_src = {
+ .reg = 0xa205c,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_13,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3b_pipe_clk_src",
+ .parent_data = gcc_parent_data_13,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_13),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_pcie_4_pipe_clk_src = {
+ .reg = 0x6b05c,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_14,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_4_pipe_clk_src",
+ .parent_data = gcc_parent_data_14,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_14),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_card_rx_symbol_0_clk_src = {
+ .reg = 0x75058,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_16,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_card_rx_symbol_0_clk_src",
+ .parent_data = gcc_parent_data_16,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_16),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_card_rx_symbol_1_clk_src = {
+ .reg = 0x750c8,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_17,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_card_rx_symbol_1_clk_src",
+ .parent_data = gcc_parent_data_17,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_17),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_card_tx_symbol_0_clk_src = {
+ .reg = 0x75048,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_18,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_card_tx_symbol_0_clk_src",
+ .parent_data = gcc_parent_data_18,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_18),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_phy_rx_symbol_0_clk_src = {
+ .reg = 0x77058,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_19,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_rx_symbol_0_clk_src",
+ .parent_data = gcc_parent_data_19,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_19),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_phy_rx_symbol_1_clk_src = {
+ .reg = 0x770c8,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_20,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_rx_symbol_1_clk_src",
+ .parent_data = gcc_parent_data_20,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_20),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_phy_tx_symbol_0_clk_src = {
+ .reg = 0x77048,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_21,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_tx_symbol_0_clk_src",
+ .parent_data = gcc_parent_data_21,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_21),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb34_prim_phy_pipe_clk_src = {
+ .reg = 0xf064,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_26,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb34_prim_phy_pipe_clk_src",
+ .parent_data = gcc_parent_data_26,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_26),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb34_sec_phy_pipe_clk_src = {
+ .reg = 0x10064,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_27,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb34_sec_phy_pipe_clk_src",
+ .parent_data = gcc_parent_data_27,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_27),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb3_mp_phy_pipe_0_clk_src = {
+ .reg = 0xab060,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_24,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_mp_phy_pipe_0_clk_src",
+ .parent_data = gcc_parent_data_24,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_24),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb3_mp_phy_pipe_1_clk_src = {
+ .reg = 0xab068,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_25,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_mp_phy_pipe_1_clk_src",
+ .parent_data = gcc_parent_data_25,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_25),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_1_phy_dp_clk_src = {
+ .reg = 0xb8050,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_28,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_dp_clk_src",
+ .parent_data = gcc_parent_data_28,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_28),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_1_phy_p2rr2p_pipe_clk_src = {
+ .reg = 0xb80b0,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_29,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_p2rr2p_pipe_clk_src",
+ .parent_data = gcc_parent_data_29,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_29),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_1_phy_pcie_pipe_mux_clk_src = {
+ .reg = 0xb80e0,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_31,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_pcie_pipe_mux_clk_src",
+ .parent_data = gcc_parent_data_31,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_31),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_1_phy_rx0_clk_src = {
+ .reg = 0xb8090,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_32,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_rx0_clk_src",
+ .parent_data = gcc_parent_data_32,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_32),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_1_phy_rx1_clk_src = {
+ .reg = 0xb809c,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_33,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_rx1_clk_src",
+ .parent_data = gcc_parent_data_33,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_33),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_1_phy_sys_clk_src = {
+ .reg = 0xb80c0,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_34,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_sys_clk_src",
+ .parent_data = gcc_parent_data_34,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_34),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_phy_dp_clk_src = {
+ .reg = 0x2a050,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_35,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_phy_dp_clk_src",
+ .parent_data = gcc_parent_data_35,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_35),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_phy_p2rr2p_pipe_clk_src = {
+ .reg = 0x2a0b0,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_36,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_phy_p2rr2p_pipe_clk_src",
+ .parent_data = gcc_parent_data_36,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_36),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_phy_pcie_pipe_mux_clk_src = {
+ .reg = 0x2a0e0,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_38,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_phy_pcie_pipe_mux_clk_src",
+ .parent_data = gcc_parent_data_38,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_38),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_phy_rx0_clk_src = {
+ .reg = 0x2a090,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_39,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_phy_rx0_clk_src",
+ .parent_data = gcc_parent_data_39,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_39),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_phy_rx1_clk_src = {
+ .reg = 0x2a09c,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_40,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_phy_rx1_clk_src",
+ .parent_data = gcc_parent_data_40,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_40),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_phy_sys_clk_src = {
+ .reg = 0x2a0c0,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_41,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_phy_sys_clk_src",
+ .parent_data = gcc_parent_data_41,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_41),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_emac0_ptp_clk_src[] = {
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(125000000, P_GCC_GPLL7_OUT_MAIN, 4, 0, 0),
+ F(230400000, P_GCC_GPLL4_OUT_MAIN, 3.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_emac0_ptp_clk_src = {
+ .cmd_rcgr = 0xaa020,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_emac0_ptp_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac0_ptp_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_emac0_rgmii_clk_src[] = {
+ F(50000000, P_GCC_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(125000000, P_GCC_GPLL7_OUT_MAIN, 4, 0, 0),
+ F(250000000, P_GCC_GPLL7_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_emac0_rgmii_clk_src = {
+ .cmd_rcgr = 0xaa040,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_gcc_emac0_rgmii_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac0_rgmii_clk_src",
+ .parent_data = gcc_parent_data_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_8),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_emac1_ptp_clk_src = {
+ .cmd_rcgr = 0xba020,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_emac0_ptp_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac1_ptp_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_emac1_rgmii_clk_src = {
+ .cmd_rcgr = 0xba040,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_9,
+ .freq_tbl = ftbl_gcc_emac0_rgmii_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac1_rgmii_clk_src",
+ .parent_data = gcc_parent_data_9,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_9),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = {
+ F(50000000, P_GCC_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GCC_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_gp1_clk_src = {
+ .cmd_rcgr = 0x64004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp1_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp2_clk_src = {
+ .cmd_rcgr = 0x65004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp2_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp3_clk_src = {
+ .cmd_rcgr = 0x66004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp3_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp4_clk_src = {
+ .cmd_rcgr = 0xc2004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp4_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp5_clk_src = {
+ .cmd_rcgr = 0xc3004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp5_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_0_aux_clk_src[] = {
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_0_aux_clk_src = {
+ .cmd_rcgr = 0xa4054,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_aux_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_0_phy_rchng_clk_src[] = {
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_0_phy_rchng_clk_src = {
+ .cmd_rcgr = 0xa403c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_1_aux_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_1_aux_clk_src = {
+ .cmd_rcgr = 0x8d054,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_pcie_1_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_aux_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_1_phy_rchng_clk_src = {
+ .cmd_rcgr = 0x8d03c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_2a_aux_clk_src = {
+ .cmd_rcgr = 0x9d064,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_pcie_1_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2a_aux_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_2a_phy_rchng_clk_src = {
+ .cmd_rcgr = 0x9d044,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2a_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_2b_aux_clk_src = {
+ .cmd_rcgr = 0x9e064,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_pcie_1_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2b_aux_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_2b_phy_rchng_clk_src = {
+ .cmd_rcgr = 0x9e044,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2b_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_3a_aux_clk_src = {
+ .cmd_rcgr = 0xa0064,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_pcie_1_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3a_aux_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_3a_phy_rchng_clk_src = {
+ .cmd_rcgr = 0xa0044,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3a_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_3b_aux_clk_src = {
+ .cmd_rcgr = 0xa2064,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_pcie_1_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3b_aux_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_3b_phy_rchng_clk_src = {
+ .cmd_rcgr = 0xa2044,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3b_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_4_aux_clk_src = {
+ .cmd_rcgr = 0x6b064,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_4_aux_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_4_phy_rchng_clk_src = {
+ .cmd_rcgr = 0x6b044,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_4_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_rscc_xo_clk_src = {
+ .cmd_rcgr = 0xae00c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_pcie_1_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_rscc_xo_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pdm2_clk_src[] = {
+ F(60000000, P_GCC_GPLL0_OUT_EVEN, 5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pdm2_clk_src = {
+ .cmd_rcgr = 0x33010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pdm2_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s0_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = {
+ .cmd_rcgr = 0x17148,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
+ .cmd_rcgr = 0x17278,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = {
+ .cmd_rcgr = 0x173a8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s3_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = {
+ .cmd_rcgr = 0x174d8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s4_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
+ .cmd_rcgr = 0x17608,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s5_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
+ .cmd_rcgr = 0x17738,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s5_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s6_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(120000000, P_GCC_GPLL0_OUT_MAIN, 5, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s6_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s6_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s6_clk_src = {
+ .cmd_rcgr = 0x17868,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s6_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s6_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s7_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s7_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s7_clk_src = {
+ .cmd_rcgr = 0x17998,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s6_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s7_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
+ .cmd_rcgr = 0x18148,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
+ .cmd_rcgr = 0x18278,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = {
+ .cmd_rcgr = 0x183a8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s3_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
+ .cmd_rcgr = 0x184d8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s4_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
+ .cmd_rcgr = 0x18608,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s5_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
+ .cmd_rcgr = 0x18738,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s6_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s6_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s6_clk_src = {
+ .cmd_rcgr = 0x18868,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s6_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s6_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s7_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s7_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s7_clk_src = {
+ .cmd_rcgr = 0x18998,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s6_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s7_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s0_clk_src = {
+ .cmd_rcgr = 0x1e148,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s1_clk_src = {
+ .cmd_rcgr = 0x1e278,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s2_clk_src = {
+ .cmd_rcgr = 0x1e3a8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s3_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s3_clk_src = {
+ .cmd_rcgr = 0x1e4d8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s4_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s4_clk_src = {
+ .cmd_rcgr = 0x1e608,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s5_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s5_clk_src = {
+ .cmd_rcgr = 0x1e738,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s6_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s6_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s6_clk_src = {
+ .cmd_rcgr = 0x1e868,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s6_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s6_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s7_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s7_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s7_clk_src = {
+ .cmd_rcgr = 0x1e998,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s6_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s7_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GCC_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(202000000, P_GCC_GPLL9_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
+ .cmd_rcgr = 0x1400c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_15,
+ .freq_tbl = ftbl_gcc_sdcc2_apps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc2_apps_clk_src",
+ .parent_data = gcc_parent_data_15,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_15),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc4_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc4_apps_clk_src = {
+ .cmd_rcgr = 0x1600c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_sdcc4_apps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc4_apps_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_card_axi_clk_src[] = {
+ F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(150000000, P_GCC_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(300000000, P_GCC_GPLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_card_axi_clk_src = {
+ .cmd_rcgr = 0x75024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_axi_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_card_axi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_card_ice_core_clk_src[] = {
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(150000000, P_GCC_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(300000000, P_GCC_GPLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_card_ice_core_clk_src = {
+ .cmd_rcgr = 0x7506c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_ice_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_card_ice_core_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_card_phy_aux_clk_src = {
+ .cmd_rcgr = 0x750a0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_pcie_1_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_card_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_card_unipro_core_clk_src = {
+ .cmd_rcgr = 0x75084,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_ice_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_card_unipro_core_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_phy_axi_clk_src = {
+ .cmd_rcgr = 0x77024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_axi_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_axi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_phy_ice_core_clk_src = {
+ .cmd_rcgr = 0x7706c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_ice_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_ice_core_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_phy_phy_aux_clk_src = {
+ .cmd_rcgr = 0x770a0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_phy_unipro_core_clk_src = {
+ .cmd_rcgr = 0x77084,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_ice_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_unipro_core_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_mp_master_clk_src[] = {
+ F(66666667, P_GCC_GPLL0_OUT_EVEN, 4.5, 0, 0),
+ F(133333333, P_GCC_GPLL0_OUT_MAIN, 4.5, 0, 0),
+ F(200000000, P_GCC_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(240000000, P_GCC_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_mp_master_clk_src = {
+ .cmd_rcgr = 0xab020,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_mp_master_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_mp_master_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_mp_mock_utmi_clk_src = {
+ .cmd_rcgr = 0xab038,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_1_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_mp_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_prim_master_clk_src = {
+ .cmd_rcgr = 0xf020,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_mp_master_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_master_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_prim_mock_utmi_clk_src = {
+ .cmd_rcgr = 0xf038,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_1_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_sec_master_clk_src = {
+ .cmd_rcgr = 0x10020,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_mp_master_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_sec_master_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_sec_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x10038,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_1_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_sec_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_mp_phy_aux_clk_src = {
+ .cmd_rcgr = 0xab06c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_pcie_1_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_mp_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_prim_phy_aux_clk_src = {
+ .cmd_rcgr = 0xf068,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_pcie_1_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_sec_phy_aux_clk_src = {
+ .cmd_rcgr = 0x10068,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_pcie_1_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_sec_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb4_1_master_clk_src[] = {
+ F(85714286, P_GCC_GPLL0_OUT_EVEN, 3.5, 0, 0),
+ F(175000000, P_GCC_GPLL8_OUT_MAIN, 4, 0, 0),
+ F(350000000, P_GCC_GPLL8_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb4_1_master_clk_src = {
+ .cmd_rcgr = 0xb8018,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_usb4_1_master_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_master_clk_src",
+ .parent_data = gcc_parent_data_5,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_5),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb4_1_phy_pcie_pipe_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(125000000, P_GCC_GPLL7_OUT_MAIN, 4, 0, 0),
+ F(250000000, P_GCC_GPLL7_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb4_1_phy_pcie_pipe_clk_src = {
+ .cmd_rcgr = 0xb80c4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_6,
+ .freq_tbl = ftbl_gcc_usb4_1_phy_pcie_pipe_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_pcie_pipe_clk_src",
+ .parent_data = gcc_parent_data_6,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_6),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb4_1_sb_if_clk_src = {
+ .cmd_rcgr = 0xb8070,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_pcie_1_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_sb_if_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb4_1_tmu_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(250000000, P_GCC_GPLL2_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb4_1_tmu_clk_src = {
+ .cmd_rcgr = 0xb8054,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_7,
+ .freq_tbl = ftbl_gcc_usb4_1_tmu_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_tmu_clk_src",
+ .parent_data = gcc_parent_data_7,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_7),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb4_master_clk_src = {
+ .cmd_rcgr = 0x2a018,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_usb4_1_master_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_master_clk_src",
+ .parent_data = gcc_parent_data_5,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_5),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb4_phy_pcie_pipe_clk_src = {
+ .cmd_rcgr = 0x2a0c4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_6,
+ .freq_tbl = ftbl_gcc_usb4_1_phy_pcie_pipe_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_phy_pcie_pipe_clk_src",
+ .parent_data = gcc_parent_data_6,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_6),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb4_sb_if_clk_src = {
+ .cmd_rcgr = 0x2a070,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_pcie_1_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_sb_if_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb4_tmu_clk_src = {
+ .cmd_rcgr = 0x2a054,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_7,
+ .freq_tbl = ftbl_gcc_usb4_1_tmu_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_tmu_clk_src",
+ .parent_data = gcc_parent_data_7,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_7),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_pcie_2a_pipe_div_clk_src = {
+ .reg = 0x9d060,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2a_pipe_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_pcie_2a_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_pcie_2b_pipe_div_clk_src = {
+ .reg = 0x9e060,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2b_pipe_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_pcie_2b_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_pcie_3a_pipe_div_clk_src = {
+ .reg = 0xa0060,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3a_pipe_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_pcie_3a_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_pcie_3b_pipe_div_clk_src = {
+ .reg = 0xa2060,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3b_pipe_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_pcie_3b_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_pcie_4_pipe_div_clk_src = {
+ .reg = 0x6b060,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_4_pipe_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_pcie_4_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_qupv3_wrap0_s4_div_clk_src = {
+ .reg = 0x17ac8,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s4_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap0_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_qupv3_wrap1_s4_div_clk_src = {
+ .reg = 0x18ac8,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s4_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap1_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_qupv3_wrap2_s4_div_clk_src = {
+ .reg = 0x1eac8,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s4_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap2_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_usb30_mp_mock_utmi_postdiv_clk_src = {
+ .reg = 0xab050,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_mp_mock_utmi_postdiv_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb30_mp_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_usb30_prim_mock_utmi_postdiv_clk_src = {
+ .reg = 0xf050,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_postdiv_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb30_prim_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_usb30_sec_mock_utmi_postdiv_clk_src = {
+ .reg = 0x10050,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_sec_mock_utmi_postdiv_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb30_sec_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch gcc_aggre_noc_pcie0_tunnel_axi_clk = {
+ .halt_reg = 0xa41a8,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0xa41a8,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(14),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_noc_pcie0_tunnel_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_noc_pcie1_tunnel_axi_clk = {
+ .halt_reg = 0x8d07c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x8d07c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(21),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_noc_pcie1_tunnel_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_noc_pcie_4_axi_clk = {
+ .halt_reg = 0x6b1b8,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x6b1b8,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(12),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_noc_pcie_4_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_noc_pcie_south_sf_axi_clk = {
+ .halt_reg = 0xbf13c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0xbf13c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(13),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_noc_pcie_south_sf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_ufs_card_axi_clk = {
+ .halt_reg = 0x750cc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x750cc,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x750cc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_ufs_card_axi_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_ufs_card_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_ufs_card_axi_hw_ctl_clk = {
+ .halt_reg = 0x750cc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x750cc,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x750cc,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_ufs_card_axi_hw_ctl_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_ufs_card_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_ufs_phy_axi_clk = {
+ .halt_reg = 0x770cc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x770cc,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x770cc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_ufs_phy_axi_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_ufs_phy_axi_hw_ctl_clk = {
+ .halt_reg = 0x770cc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x770cc,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x770cc,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_ufs_phy_axi_hw_ctl_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb3_mp_axi_clk = {
+ .halt_reg = 0xab084,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xab084,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xab084,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_usb3_mp_axi_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb30_mp_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb3_prim_axi_clk = {
+ .halt_reg = 0xf080,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xf080,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xf080,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_usb3_prim_axi_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb3_sec_axi_clk = {
+ .halt_reg = 0x10080,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x10080,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x10080,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_usb3_sec_axi_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb30_sec_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb4_1_axi_clk = {
+ .halt_reg = 0xb80e4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xb80e4,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb80e4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_usb4_1_axi_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb4_1_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb4_axi_clk = {
+ .halt_reg = 0x2a0e4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2a0e4,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2a0e4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_usb4_axi_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb4_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb_noc_axi_clk = {
+ .halt_reg = 0x5d024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x5d024,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x5d024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_usb_noc_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb_noc_north_axi_clk = {
+ .halt_reg = 0x5d020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x5d020,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x5d020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_usb_noc_north_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb_noc_south_axi_clk = {
+ .halt_reg = 0x5d01c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x5d01c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x5d01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_usb_noc_south_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ahb2phy0_clk = {
+ .halt_reg = 0x6a004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6a004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x6a004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ahb2phy0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ahb2phy2_clk = {
+ .halt_reg = 0x6a008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6a008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x6a008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ahb2phy2_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .halt_reg = 0x38004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x38004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(10),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_boot_rom_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_hf_axi_clk = {
+ .halt_reg = 0x26010,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x26010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x26010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_camera_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_sf_axi_clk = {
+ .halt_reg = 0x26014,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x26014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x26014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_camera_sf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_throttle_nrt_axi_clk = {
+ .halt_reg = 0x2601c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x2601c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2601c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_camera_throttle_nrt_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_throttle_rt_axi_clk = {
+ .halt_reg = 0x26018,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x26018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x26018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_camera_throttle_rt_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_throttle_xo_clk = {
+ .halt_reg = 0x26024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x26024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_camera_throttle_xo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_mp_axi_clk = {
+ .halt_reg = 0xab088,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xab088,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xab088,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_usb3_mp_axi_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb30_mp_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_prim_axi_clk = {
+ .halt_reg = 0xf084,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xf084,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xf084,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_usb3_prim_axi_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_sec_axi_clk = {
+ .halt_reg = 0x10084,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x10084,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x10084,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_usb3_sec_axi_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb30_sec_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cnoc_pcie0_tunnel_clk = {
+ .halt_reg = 0xa4074,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(8),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cnoc_pcie0_tunnel_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cnoc_pcie1_tunnel_clk = {
+ .halt_reg = 0x8d074,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(9),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cnoc_pcie1_tunnel_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cnoc_pcie4_qx_clk = {
+ .halt_reg = 0x6b084,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6b084,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(10),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cnoc_pcie4_qx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ddrss_gpu_axi_clk = {
+ .halt_reg = 0x7115c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x7115c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7115c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ddrss_gpu_axi_clk",
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ddrss_pcie_sf_tbu_clk = {
+ .halt_reg = 0xa602c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0xa602c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(19),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ddrss_pcie_sf_tbu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp1_hf_axi_clk = {
+ .halt_reg = 0xbb010,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0xbb010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xbb010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_disp1_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp1_sf_axi_clk = {
+ .halt_reg = 0xbb018,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0xbb018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xbb018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_disp1_sf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp1_throttle_nrt_axi_clk = {
+ .halt_reg = 0xbb024,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0xbb024,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xbb024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_disp1_throttle_nrt_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp1_throttle_rt_axi_clk = {
+ .halt_reg = 0xbb020,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0xbb020,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xbb020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_disp1_throttle_rt_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_hf_axi_clk = {
+ .halt_reg = 0x27010,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x27010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x27010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_disp_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_sf_axi_clk = {
+ .halt_reg = 0x27018,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x27018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x27018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_disp_sf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_throttle_nrt_axi_clk = {
+ .halt_reg = 0x27024,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x27024,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x27024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_disp_throttle_nrt_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_throttle_rt_axi_clk = {
+ .halt_reg = 0x27020,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x27020,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x27020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_disp_throttle_rt_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac0_axi_clk = {
+ .halt_reg = 0xaa010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xaa010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xaa010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac0_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac0_ptp_clk = {
+ .halt_reg = 0xaa01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xaa01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac0_ptp_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_emac0_ptp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac0_rgmii_clk = {
+ .halt_reg = 0xaa038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xaa038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac0_rgmii_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_emac0_rgmii_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac0_slv_ahb_clk = {
+ .halt_reg = 0xaa018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xaa018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xaa018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac0_slv_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac1_axi_clk = {
+ .halt_reg = 0xba010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xba010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac1_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac1_ptp_clk = {
+ .halt_reg = 0xba01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xba01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac1_ptp_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_emac1_ptp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac1_rgmii_clk = {
+ .halt_reg = 0xba038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xba038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac1_rgmii_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_emac1_rgmii_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac1_slv_ahb_clk = {
+ .halt_reg = 0xba018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xba018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac1_slv_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x64000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x64000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp1_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_gp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x65000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x65000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp2_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_gp2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0x66000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x66000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp3_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_gp3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp4_clk = {
+ .halt_reg = 0xc2000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc2000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp4_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_gp4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp5_clk = {
+ .halt_reg = 0xc3000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc3000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp5_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_gp5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(15),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_gpll0_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_div_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(16),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_gpll0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_gpll0_out_even.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_iref_en = {
+ .halt_reg = 0x8c014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_iref_en",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_memnoc_gfx_clk = {
+ .halt_reg = 0x71010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x71010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x71010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_memnoc_gfx_clk",
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_snoc_dvm_gfx_clk = {
+ .halt_reg = 0x71020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x71020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_snoc_dvm_gfx_clk",
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_tcu_throttle_ahb_clk = {
+ .halt_reg = 0x71008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x71008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x71008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_tcu_throttle_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_tcu_throttle_clk = {
+ .halt_reg = 0x71018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x71018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x71018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_tcu_throttle_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie0_phy_rchng_clk = {
+ .halt_reg = 0xa4038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(11),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie0_phy_rchng_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_pcie_0_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie1_phy_rchng_clk = {
+ .halt_reg = 0x8d038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(23),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie1_phy_rchng_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_pcie_1_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie2a_phy_rchng_clk = {
+ .halt_reg = 0x9d040,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(15),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie2a_phy_rchng_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_pcie_2a_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie2b_phy_rchng_clk = {
+ .halt_reg = 0x9e040,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(22),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie2b_phy_rchng_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_pcie_2b_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie3a_phy_rchng_clk = {
+ .halt_reg = 0xa0040,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(29),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie3a_phy_rchng_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_pcie_3a_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie3b_phy_rchng_clk = {
+ .halt_reg = 0xa2040,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie3b_phy_rchng_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_pcie_3b_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie4_phy_rchng_clk = {
+ .halt_reg = 0x6b040,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(22),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie4_phy_rchng_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_pcie_4_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_aux_clk = {
+ .halt_reg = 0xa4028,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(9),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_aux_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_pcie_0_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_cfg_ahb_clk = {
+ .halt_reg = 0xa4024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xa4024,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(8),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_mstr_axi_clk = {
+ .halt_reg = 0xa401c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0xa401c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(7),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_pipe_clk = {
+ .halt_reg = 0xa4030,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(10),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb4_phy_pcie_pipe_mux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_axi_clk = {
+ .halt_reg = 0xa4014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xa4014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(6),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_q2a_axi_clk = {
+ .halt_reg = 0xa4010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(5),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_aux_clk = {
+ .halt_reg = 0x8d028,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(29),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_aux_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_pcie_1_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_cfg_ahb_clk = {
+ .halt_reg = 0x8d024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x8d024,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(28),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_mstr_axi_clk = {
+ .halt_reg = 0x8d01c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x8d01c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(27),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_pipe_clk = {
+ .halt_reg = 0x8d030,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(30),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb4_1_phy_pcie_pipe_mux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_slv_axi_clk = {
+ .halt_reg = 0x8d014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x8d014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(26),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_slv_q2a_axi_clk = {
+ .halt_reg = 0x8d010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(25),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2a2b_clkref_clk = {
+ .halt_reg = 0x8c034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2a2b_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2a_aux_clk = {
+ .halt_reg = 0x9d028,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(13),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2a_aux_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_pcie_2a_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2a_cfg_ahb_clk = {
+ .halt_reg = 0x9d024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x9d024,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(12),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2a_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2a_mstr_axi_clk = {
+ .halt_reg = 0x9d01c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x9d01c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(11),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2a_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2a_pipe_clk = {
+ .halt_reg = 0x9d030,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(14),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2a_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_pcie_2a_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2a_pipediv2_clk = {
+ .halt_reg = 0x9d038,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(22),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2a_pipediv2_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_pcie_2a_pipe_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2a_slv_axi_clk = {
+ .halt_reg = 0x9d014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x9d014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(10),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2a_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2a_slv_q2a_axi_clk = {
+ .halt_reg = 0x9d010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(12),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2a_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2b_aux_clk = {
+ .halt_reg = 0x9e028,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(20),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2b_aux_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_pcie_2b_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2b_cfg_ahb_clk = {
+ .halt_reg = 0x9e024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x9e024,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(19),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2b_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2b_mstr_axi_clk = {
+ .halt_reg = 0x9e01c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x9e01c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(18),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2b_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2b_pipe_clk = {
+ .halt_reg = 0x9e030,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(21),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2b_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_pcie_2b_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2b_pipediv2_clk = {
+ .halt_reg = 0x9e038,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(23),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2b_pipediv2_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_pcie_2b_pipe_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2b_slv_axi_clk = {
+ .halt_reg = 0x9e014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x9e014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(17),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2b_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2b_slv_q2a_axi_clk = {
+ .halt_reg = 0x9e010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(16),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2b_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3a3b_clkref_clk = {
+ .halt_reg = 0x8c038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3a3b_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3a_aux_clk = {
+ .halt_reg = 0xa0028,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(27),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3a_aux_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_pcie_3a_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3a_cfg_ahb_clk = {
+ .halt_reg = 0xa0024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xa0024,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(26),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3a_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3a_mstr_axi_clk = {
+ .halt_reg = 0xa001c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0xa001c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(25),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3a_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3a_pipe_clk = {
+ .halt_reg = 0xa0030,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(28),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3a_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_pcie_3a_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3a_pipediv2_clk = {
+ .halt_reg = 0xa0038,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(24),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3a_pipediv2_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_pcie_3a_pipe_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3a_slv_axi_clk = {
+ .halt_reg = 0xa0014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xa0014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(24),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3a_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3a_slv_q2a_axi_clk = {
+ .halt_reg = 0xa0010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(23),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3a_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3b_aux_clk = {
+ .halt_reg = 0xa2028,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(2),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3b_aux_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_pcie_3b_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3b_cfg_ahb_clk = {
+ .halt_reg = 0xa2024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xa2024,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3b_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3b_mstr_axi_clk = {
+ .halt_reg = 0xa201c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0xa201c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3b_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3b_pipe_clk = {
+ .halt_reg = 0xa2030,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(3),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3b_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_pcie_3b_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3b_pipediv2_clk = {
+ .halt_reg = 0xa2038,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(25),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3b_pipediv2_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_pcie_3b_pipe_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3b_slv_axi_clk = {
+ .halt_reg = 0xa2014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xa2014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(31),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3b_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3b_slv_q2a_axi_clk = {
+ .halt_reg = 0xa2010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(30),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3b_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_4_aux_clk = {
+ .halt_reg = 0x6b028,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(3),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_4_aux_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_pcie_4_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_4_cfg_ahb_clk = {
+ .halt_reg = 0x6b024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6b024,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(2),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_4_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_4_clkref_clk = {
+ .halt_reg = 0x8c030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_4_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_4_mstr_axi_clk = {
+ .halt_reg = 0x6b01c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x6b01c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_4_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_4_pipe_clk = {
+ .halt_reg = 0x6b030,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_4_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_pcie_4_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_4_pipediv2_clk = {
+ .halt_reg = 0x6b038,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(16),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_4_pipediv2_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_pcie_4_pipe_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_4_slv_axi_clk = {
+ .halt_reg = 0x6b014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6b014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_4_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_4_slv_q2a_axi_clk = {
+ .halt_reg = 0x6b010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(5),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_4_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_rscc_ahb_clk = {
+ .halt_reg = 0xae008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xae008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(17),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_rscc_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_rscc_xo_clk = {
+ .halt_reg = 0xae004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(16),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_rscc_xo_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_pcie_rscc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_throttle_cfg_clk = {
+ .halt_reg = 0xa6028,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(15),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_throttle_cfg_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+ .halt_reg = 0x3300c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3300c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm2_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_pdm2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+ .halt_reg = 0x33004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x33004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x33004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_xo4_clk = {
+ .halt_reg = 0x33008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x33008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm_xo4_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_nrt_ahb_clk = {
+ .halt_reg = 0x26008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x26008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x26008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_camera_nrt_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_rt_ahb_clk = {
+ .halt_reg = 0x2600c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2600c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2600c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_camera_rt_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_disp1_ahb_clk = {
+ .halt_reg = 0xbb008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xbb008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xbb008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_disp1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_disp1_rot_ahb_clk = {
+ .halt_reg = 0xbb00c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xbb00c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xbb00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_disp1_rot_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_disp_ahb_clk = {
+ .halt_reg = 0x27008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x27008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x27008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_disp_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_disp_rot_ahb_clk = {
+ .halt_reg = 0x2700c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2700c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2700c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_disp_rot_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_cvp_ahb_clk = {
+ .halt_reg = 0x28008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x28008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x28008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_cvp_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_vcodec_ahb_clk = {
+ .halt_reg = 0x2800c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2800c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2800c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_vcodec_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_2x_clk = {
+ .halt_reg = 0x17014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(9),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_clk = {
+ .halt_reg = 0x1700c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(8),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_qspi0_clk = {
+ .halt_reg = 0x17ac4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_qspi0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap0_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s0_clk = {
+ .halt_reg = 0x17144,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(10),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap0_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s1_clk = {
+ .halt_reg = 0x17274,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(11),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s1_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap0_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s2_clk = {
+ .halt_reg = 0x173a4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(12),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s2_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap0_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s3_clk = {
+ .halt_reg = 0x174d4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(13),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s3_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap0_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s4_clk = {
+ .halt_reg = 0x17604,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(14),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s4_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap0_s4_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s5_clk = {
+ .halt_reg = 0x17734,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(15),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s5_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap0_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s6_clk = {
+ .halt_reg = 0x17864,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(16),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s6_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap0_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s7_clk = {
+ .halt_reg = 0x17994,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(17),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s7_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap0_s7_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_2x_clk = {
+ .halt_reg = 0x18014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(18),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_clk = {
+ .halt_reg = 0x1800c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(19),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_qspi0_clk = {
+ .halt_reg = 0x18ac4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(2),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_qspi0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap1_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s0_clk = {
+ .halt_reg = 0x18144,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(22),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap1_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s1_clk = {
+ .halt_reg = 0x18274,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(23),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s1_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap1_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s2_clk = {
+ .halt_reg = 0x183a4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(24),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s2_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap1_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s3_clk = {
+ .halt_reg = 0x184d4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(25),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s3_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap1_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s4_clk = {
+ .halt_reg = 0x18604,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(26),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s4_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap1_s4_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s5_clk = {
+ .halt_reg = 0x18734,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(27),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s5_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap1_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s6_clk = {
+ .halt_reg = 0x18864,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(27),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s6_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap1_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s7_clk = {
+ .halt_reg = 0x18994,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(28),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s7_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap1_s7_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_core_2x_clk = {
+ .halt_reg = 0x1e014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(3),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_core_clk = {
+ .halt_reg = 0x1e00c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_qspi0_clk = {
+ .halt_reg = 0x1eac4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_qspi0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap2_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s0_clk = {
+ .halt_reg = 0x1e144,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap2_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s1_clk = {
+ .halt_reg = 0x1e274,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(5),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s1_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap2_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s2_clk = {
+ .halt_reg = 0x1e3a4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(6),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s2_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap2_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s3_clk = {
+ .halt_reg = 0x1e4d4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(7),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s3_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap2_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s4_clk = {
+ .halt_reg = 0x1e604,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(8),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s4_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap2_s4_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s5_clk = {
+ .halt_reg = 0x1e734,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(9),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s5_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap2_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s6_clk = {
+ .halt_reg = 0x1e864,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(29),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s6_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap2_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s7_clk = {
+ .halt_reg = 0x1e994,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(30),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s7_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_qupv3_wrap2_s7_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_m_ahb_clk = {
+ .halt_reg = 0x17004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x17004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(6),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_0_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_s_ahb_clk = {
+ .halt_reg = 0x17008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x17008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(7),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_0_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_m_ahb_clk = {
+ .halt_reg = 0x18004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x18004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(20),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_1_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_s_ahb_clk = {
+ .halt_reg = 0x18008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x18008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(21),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_1_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_2_m_ahb_clk = {
+ .halt_reg = 0x1e004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1e004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(2),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_2_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_2_s_ahb_clk = {
+ .halt_reg = 0x1e008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1e008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_2_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_ahb_clk = {
+ .halt_reg = 0x14008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x14008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc2_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_apps_clk = {
+ .halt_reg = 0x14004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x14004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc2_apps_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_sdcc2_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc4_ahb_clk = {
+ .halt_reg = 0x16008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x16008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc4_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc4_apps_clk = {
+ .halt_reg = 0x16004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x16004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc4_apps_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_sdcc4_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sys_noc_usb_axi_clk = {
+ .halt_reg = 0x5d000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x5d000,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x5d000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sys_noc_usb_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_1_card_clkref_clk = {
+ .halt_reg = 0x8c000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_1_card_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_ahb_clk = {
+ .halt_reg = 0x75018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x75018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x75018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_card_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_axi_clk = {
+ .halt_reg = 0x75010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x75010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x75010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_card_axi_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_ufs_card_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_axi_hw_ctl_clk = {
+ .halt_reg = 0x75010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x75010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x75010,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_card_axi_hw_ctl_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_ufs_card_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_clkref_clk = {
+ .halt_reg = 0x8c054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c054,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_card_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_ice_core_clk = {
+ .halt_reg = 0x75064,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x75064,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x75064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_card_ice_core_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_ufs_card_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_ice_core_hw_ctl_clk = {
+ .halt_reg = 0x75064,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x75064,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x75064,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_card_ice_core_hw_ctl_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_ufs_card_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_phy_aux_clk = {
+ .halt_reg = 0x7509c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7509c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7509c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_card_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_ufs_card_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_phy_aux_hw_ctl_clk = {
+ .halt_reg = 0x7509c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7509c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7509c,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_card_phy_aux_hw_ctl_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_ufs_card_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_rx_symbol_0_clk = {
+ .halt_reg = 0x75020,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x75020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_card_rx_symbol_0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_ufs_card_rx_symbol_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_rx_symbol_1_clk = {
+ .halt_reg = 0x750b8,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x750b8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_card_rx_symbol_1_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_ufs_card_rx_symbol_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_tx_symbol_0_clk = {
+ .halt_reg = 0x7501c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x7501c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_card_tx_symbol_0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_ufs_card_tx_symbol_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_unipro_core_clk = {
+ .halt_reg = 0x7505c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7505c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7505c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_card_unipro_core_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_ufs_card_unipro_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_unipro_core_hw_ctl_clk = {
+ .halt_reg = 0x7505c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7505c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7505c,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_card_unipro_core_hw_ctl_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_ufs_card_unipro_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ahb_clk = {
+ .halt_reg = 0x77018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_axi_clk = {
+ .halt_reg = 0x77010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_axi_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_axi_hw_ctl_clk = {
+ .halt_reg = 0x77010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77010,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_axi_hw_ctl_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ice_core_clk = {
+ .halt_reg = 0x77064,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77064,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_ice_core_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_ufs_phy_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ice_core_hw_ctl_clk = {
+ .halt_reg = 0x77064,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77064,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77064,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_ice_core_hw_ctl_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_ufs_phy_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_phy_aux_clk = {
+ .halt_reg = 0x7709c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7709c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7709c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_ufs_phy_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_phy_aux_hw_ctl_clk = {
+ .halt_reg = 0x7709c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7709c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7709c,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_phy_aux_hw_ctl_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_ufs_phy_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_0_clk = {
+ .halt_reg = 0x77020,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x77020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_rx_symbol_0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_ufs_phy_rx_symbol_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_1_clk = {
+ .halt_reg = 0x770b8,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x770b8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_rx_symbol_1_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_ufs_phy_rx_symbol_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_tx_symbol_0_clk = {
+ .halt_reg = 0x7701c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x7701c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_tx_symbol_0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_ufs_phy_tx_symbol_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_unipro_core_clk = {
+ .halt_reg = 0x7705c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7705c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7705c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_unipro_core_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_ufs_phy_unipro_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_unipro_core_hw_ctl_clk = {
+ .halt_reg = 0x7705c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7705c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7705c,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_unipro_core_hw_ctl_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_ufs_phy_unipro_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_ref_clkref_clk = {
+ .halt_reg = 0x8c058,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c058,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_ref_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb2_hs0_clkref_clk = {
+ .halt_reg = 0x8c044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb2_hs0_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb2_hs1_clkref_clk = {
+ .halt_reg = 0x8c048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb2_hs1_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb2_hs2_clkref_clk = {
+ .halt_reg = 0x8c04c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c04c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb2_hs2_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb2_hs3_clkref_clk = {
+ .halt_reg = 0x8c050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c050,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb2_hs3_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_mp_master_clk = {
+ .halt_reg = 0xab010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xab010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_mp_master_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb30_mp_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_mp_mock_utmi_clk = {
+ .halt_reg = 0xab01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xab01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_mp_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb30_mp_mock_utmi_postdiv_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_mp_sleep_clk = {
+ .halt_reg = 0xab018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xab018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_mp_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_master_clk = {
+ .halt_reg = 0xf010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_master_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_mock_utmi_clk = {
+ .halt_reg = 0xf01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_sleep_clk = {
+ .halt_reg = 0xf018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sec_master_clk = {
+ .halt_reg = 0x10010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_sec_master_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb30_sec_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sec_mock_utmi_clk = {
+ .halt_reg = 0x1001c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1001c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_sec_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb30_sec_mock_utmi_postdiv_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sec_sleep_clk = {
+ .halt_reg = 0x10018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_sec_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_mp0_clkref_clk = {
+ .halt_reg = 0x8c03c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c03c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_mp0_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_mp1_clkref_clk = {
+ .halt_reg = 0x8c040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_mp1_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_mp_phy_aux_clk = {
+ .halt_reg = 0xab054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xab054,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_mp_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb3_mp_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_mp_phy_com_aux_clk = {
+ .halt_reg = 0xab058,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xab058,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_mp_phy_com_aux_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb3_mp_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_mp_phy_pipe_0_clk = {
+ .halt_reg = 0xab05c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0xab05c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_mp_phy_pipe_0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb3_mp_phy_pipe_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_mp_phy_pipe_1_clk = {
+ .halt_reg = 0xab064,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0xab064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_mp_phy_pipe_1_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb3_mp_phy_pipe_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_aux_clk = {
+ .halt_reg = 0xf054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf054,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_com_aux_clk = {
+ .halt_reg = 0xf058,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf058,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_com_aux_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_pipe_clk = {
+ .halt_reg = 0xf05c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .hwcg_reg = 0xf05c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xf05c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb34_prim_phy_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_sec_phy_aux_clk = {
+ .halt_reg = 0x10054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10054,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_sec_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb3_sec_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_sec_phy_com_aux_clk = {
+ .halt_reg = 0x10058,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10058,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_sec_phy_com_aux_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb3_sec_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_sec_phy_pipe_clk = {
+ .halt_reg = 0x1005c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .hwcg_reg = 0x1005c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x1005c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_sec_phy_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb34_sec_phy_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_1_cfg_ahb_clk = {
+ .halt_reg = 0xb808c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xb808c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb808c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_1_dp_clk = {
+ .halt_reg = 0xb8048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb8048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_dp_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb4_1_phy_dp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_1_master_clk = {
+ .halt_reg = 0xb8010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb8010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_master_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb4_1_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_1_phy_p2rr2p_pipe_clk = {
+ .halt_reg = 0xb80b4,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0xb80b4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_p2rr2p_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb4_1_phy_p2rr2p_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_1_phy_pcie_pipe_clk = {
+ .halt_reg = 0xb8038,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(19),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_pcie_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb4_1_phy_pcie_pipe_mux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_1_phy_rx0_clk = {
+ .halt_reg = 0xb8094,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb8094,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_rx0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb4_1_phy_rx0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_1_phy_rx1_clk = {
+ .halt_reg = 0xb80a0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb80a0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_rx1_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb4_1_phy_rx1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_1_phy_usb_pipe_clk = {
+ .halt_reg = 0xb8088,
+ .halt_check = BRANCH_HALT_DELAY,
+ .hwcg_reg = 0xb8088,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb8088,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_usb_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb34_sec_phy_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_1_sb_if_clk = {
+ .halt_reg = 0xb8034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb8034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_sb_if_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb4_1_sb_if_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_1_sys_clk = {
+ .halt_reg = 0xb8040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb8040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_sys_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb4_1_phy_sys_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_1_tmu_clk = {
+ .halt_reg = 0xb806c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xb806c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb806c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_tmu_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb4_1_tmu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_cfg_ahb_clk = {
+ .halt_reg = 0x2a08c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2a08c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2a08c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_clkref_clk = {
+ .halt_reg = 0x8c010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_dp_clk = {
+ .halt_reg = 0x2a048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2a048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_dp_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb4_phy_dp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_eud_clkref_clk = {
+ .halt_reg = 0x8c02c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c02c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_eud_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_master_clk = {
+ .halt_reg = 0x2a010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2a010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_master_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb4_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_phy_p2rr2p_pipe_clk = {
+ .halt_reg = 0x2a0b4,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x2a0b4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_phy_p2rr2p_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb4_phy_p2rr2p_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_phy_pcie_pipe_clk = {
+ .halt_reg = 0x2a038,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(18),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_phy_pcie_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb4_phy_pcie_pipe_mux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_phy_rx0_clk = {
+ .halt_reg = 0x2a094,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2a094,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_phy_rx0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb4_phy_rx0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_phy_rx1_clk = {
+ .halt_reg = 0x2a0a0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2a0a0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_phy_rx1_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb4_phy_rx1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_phy_usb_pipe_clk = {
+ .halt_reg = 0x2a088,
+ .halt_check = BRANCH_HALT_DELAY,
+ .hwcg_reg = 0x2a088,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2a088,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_phy_usb_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb34_prim_phy_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_sb_if_clk = {
+ .halt_reg = 0x2a034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2a034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_sb_if_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb4_sb_if_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_sys_clk = {
+ .halt_reg = 0x2a040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2a040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_sys_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb4_phy_sys_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_tmu_clk = {
+ .halt_reg = 0x2a06c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2a06c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2a06c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_tmu_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb4_tmu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_axi0_clk = {
+ .halt_reg = 0x28010,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x28010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x28010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_video_axi0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_axi1_clk = {
+ .halt_reg = 0x28018,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x28018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x28018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_video_axi1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_cvp_throttle_clk = {
+ .halt_reg = 0x28024,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x28024,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x28024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_video_cvp_throttle_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_vcodec_throttle_clk = {
+ .halt_reg = 0x28020,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x28020,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x28020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_video_vcodec_throttle_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc pcie_0_tunnel_gdsc = {
+ .gdscr = 0xa4004,
+ .pd = {
+ .name = "pcie_0_tunnel_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc pcie_1_tunnel_gdsc = {
+ .gdscr = 0x8d004,
+ .pd = {
+ .name = "pcie_1_tunnel_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc pcie_2a_gdsc = {
+ .gdscr = 0x9d004,
+ .pd = {
+ .name = "pcie_2a_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc pcie_2b_gdsc = {
+ .gdscr = 0x9e004,
+ .pd = {
+ .name = "pcie_2b_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc pcie_3a_gdsc = {
+ .gdscr = 0xa0004,
+ .pd = {
+ .name = "pcie_3a_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc pcie_3b_gdsc = {
+ .gdscr = 0xa2004,
+ .pd = {
+ .name = "pcie_3b_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc pcie_4_gdsc = {
+ .gdscr = 0x6b004,
+ .pd = {
+ .name = "pcie_4_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc ufs_card_gdsc = {
+ .gdscr = 0x75004,
+ .pd = {
+ .name = "ufs_card_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc ufs_phy_gdsc = {
+ .gdscr = 0x77004,
+ .pd = {
+ .name = "ufs_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc usb30_mp_gdsc = {
+ .gdscr = 0xab004,
+ .pd = {
+ .name = "usb30_mp_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc usb30_prim_gdsc = {
+ .gdscr = 0xf004,
+ .pd = {
+ .name = "usb30_prim_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc usb30_sec_gdsc = {
+ .gdscr = 0x10004,
+ .pd = {
+ .name = "usb30_sec_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct clk_regmap *gcc_sc8280xp_clocks[] = {
+ [GCC_AGGRE_NOC_PCIE0_TUNNEL_AXI_CLK] = &gcc_aggre_noc_pcie0_tunnel_axi_clk.clkr,
+ [GCC_AGGRE_NOC_PCIE1_TUNNEL_AXI_CLK] = &gcc_aggre_noc_pcie1_tunnel_axi_clk.clkr,
+ [GCC_AGGRE_NOC_PCIE_4_AXI_CLK] = &gcc_aggre_noc_pcie_4_axi_clk.clkr,
+ [GCC_AGGRE_NOC_PCIE_SOUTH_SF_AXI_CLK] = &gcc_aggre_noc_pcie_south_sf_axi_clk.clkr,
+ [GCC_AGGRE_UFS_CARD_AXI_CLK] = &gcc_aggre_ufs_card_axi_clk.clkr,
+ [GCC_AGGRE_UFS_CARD_AXI_HW_CTL_CLK] = &gcc_aggre_ufs_card_axi_hw_ctl_clk.clkr,
+ [GCC_AGGRE_UFS_PHY_AXI_CLK] = &gcc_aggre_ufs_phy_axi_clk.clkr,
+ [GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK] = &gcc_aggre_ufs_phy_axi_hw_ctl_clk.clkr,
+ [GCC_AGGRE_USB3_MP_AXI_CLK] = &gcc_aggre_usb3_mp_axi_clk.clkr,
+ [GCC_AGGRE_USB3_PRIM_AXI_CLK] = &gcc_aggre_usb3_prim_axi_clk.clkr,
+ [GCC_AGGRE_USB3_SEC_AXI_CLK] = &gcc_aggre_usb3_sec_axi_clk.clkr,
+ [GCC_AGGRE_USB4_1_AXI_CLK] = &gcc_aggre_usb4_1_axi_clk.clkr,
+ [GCC_AGGRE_USB4_AXI_CLK] = &gcc_aggre_usb4_axi_clk.clkr,
+ [GCC_AGGRE_USB_NOC_AXI_CLK] = &gcc_aggre_usb_noc_axi_clk.clkr,
+ [GCC_AGGRE_USB_NOC_NORTH_AXI_CLK] = &gcc_aggre_usb_noc_north_axi_clk.clkr,
+ [GCC_AGGRE_USB_NOC_SOUTH_AXI_CLK] = &gcc_aggre_usb_noc_south_axi_clk.clkr,
+ [GCC_AHB2PHY0_CLK] = &gcc_ahb2phy0_clk.clkr,
+ [GCC_AHB2PHY2_CLK] = &gcc_ahb2phy2_clk.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_CAMERA_HF_AXI_CLK] = &gcc_camera_hf_axi_clk.clkr,
+ [GCC_CAMERA_SF_AXI_CLK] = &gcc_camera_sf_axi_clk.clkr,
+ [GCC_CAMERA_THROTTLE_NRT_AXI_CLK] = &gcc_camera_throttle_nrt_axi_clk.clkr,
+ [GCC_CAMERA_THROTTLE_RT_AXI_CLK] = &gcc_camera_throttle_rt_axi_clk.clkr,
+ [GCC_CAMERA_THROTTLE_XO_CLK] = &gcc_camera_throttle_xo_clk.clkr,
+ [GCC_CFG_NOC_USB3_MP_AXI_CLK] = &gcc_cfg_noc_usb3_mp_axi_clk.clkr,
+ [GCC_CFG_NOC_USB3_PRIM_AXI_CLK] = &gcc_cfg_noc_usb3_prim_axi_clk.clkr,
+ [GCC_CFG_NOC_USB3_SEC_AXI_CLK] = &gcc_cfg_noc_usb3_sec_axi_clk.clkr,
+ [GCC_CNOC_PCIE0_TUNNEL_CLK] = &gcc_cnoc_pcie0_tunnel_clk.clkr,
+ [GCC_CNOC_PCIE1_TUNNEL_CLK] = &gcc_cnoc_pcie1_tunnel_clk.clkr,
+ [GCC_CNOC_PCIE4_QX_CLK] = &gcc_cnoc_pcie4_qx_clk.clkr,
+ [GCC_DDRSS_GPU_AXI_CLK] = &gcc_ddrss_gpu_axi_clk.clkr,
+ [GCC_DDRSS_PCIE_SF_TBU_CLK] = &gcc_ddrss_pcie_sf_tbu_clk.clkr,
+ [GCC_DISP1_HF_AXI_CLK] = &gcc_disp1_hf_axi_clk.clkr,
+ [GCC_DISP1_SF_AXI_CLK] = &gcc_disp1_sf_axi_clk.clkr,
+ [GCC_DISP1_THROTTLE_NRT_AXI_CLK] = &gcc_disp1_throttle_nrt_axi_clk.clkr,
+ [GCC_DISP1_THROTTLE_RT_AXI_CLK] = &gcc_disp1_throttle_rt_axi_clk.clkr,
+ [GCC_DISP_HF_AXI_CLK] = &gcc_disp_hf_axi_clk.clkr,
+ [GCC_DISP_SF_AXI_CLK] = &gcc_disp_sf_axi_clk.clkr,
+ [GCC_DISP_THROTTLE_NRT_AXI_CLK] = &gcc_disp_throttle_nrt_axi_clk.clkr,
+ [GCC_DISP_THROTTLE_RT_AXI_CLK] = &gcc_disp_throttle_rt_axi_clk.clkr,
+ [GCC_EMAC0_AXI_CLK] = &gcc_emac0_axi_clk.clkr,
+ [GCC_EMAC0_PTP_CLK] = &gcc_emac0_ptp_clk.clkr,
+ [GCC_EMAC0_PTP_CLK_SRC] = &gcc_emac0_ptp_clk_src.clkr,
+ [GCC_EMAC0_RGMII_CLK] = &gcc_emac0_rgmii_clk.clkr,
+ [GCC_EMAC0_RGMII_CLK_SRC] = &gcc_emac0_rgmii_clk_src.clkr,
+ [GCC_EMAC0_SLV_AHB_CLK] = &gcc_emac0_slv_ahb_clk.clkr,
+ [GCC_EMAC1_AXI_CLK] = &gcc_emac1_axi_clk.clkr,
+ [GCC_EMAC1_PTP_CLK] = &gcc_emac1_ptp_clk.clkr,
+ [GCC_EMAC1_PTP_CLK_SRC] = &gcc_emac1_ptp_clk_src.clkr,
+ [GCC_EMAC1_RGMII_CLK] = &gcc_emac1_rgmii_clk.clkr,
+ [GCC_EMAC1_RGMII_CLK_SRC] = &gcc_emac1_rgmii_clk_src.clkr,
+ [GCC_EMAC1_SLV_AHB_CLK] = &gcc_emac1_slv_ahb_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP2_CLK_SRC] = &gcc_gp2_clk_src.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr,
+ [GCC_GP4_CLK] = &gcc_gp4_clk.clkr,
+ [GCC_GP4_CLK_SRC] = &gcc_gp4_clk_src.clkr,
+ [GCC_GP5_CLK] = &gcc_gp5_clk.clkr,
+ [GCC_GP5_CLK_SRC] = &gcc_gp5_clk_src.clkr,
+ [GCC_GPLL0] = &gcc_gpll0.clkr,
+ [GCC_GPLL0_OUT_EVEN] = &gcc_gpll0_out_even.clkr,
+ [GCC_GPLL2] = &gcc_gpll2.clkr,
+ [GCC_GPLL4] = &gcc_gpll4.clkr,
+ [GCC_GPLL7] = &gcc_gpll7.clkr,
+ [GCC_GPLL8] = &gcc_gpll8.clkr,
+ [GCC_GPLL9] = &gcc_gpll9.clkr,
+ [GCC_GPU_GPLL0_CLK_SRC] = &gcc_gpu_gpll0_clk_src.clkr,
+ [GCC_GPU_GPLL0_DIV_CLK_SRC] = &gcc_gpu_gpll0_div_clk_src.clkr,
+ [GCC_GPU_IREF_EN] = &gcc_gpu_iref_en.clkr,
+ [GCC_GPU_MEMNOC_GFX_CLK] = &gcc_gpu_memnoc_gfx_clk.clkr,
+ [GCC_GPU_SNOC_DVM_GFX_CLK] = &gcc_gpu_snoc_dvm_gfx_clk.clkr,
+ [GCC_GPU_TCU_THROTTLE_AHB_CLK] = &gcc_gpu_tcu_throttle_ahb_clk.clkr,
+ [GCC_GPU_TCU_THROTTLE_CLK] = &gcc_gpu_tcu_throttle_clk.clkr,
+ [GCC_PCIE0_PHY_RCHNG_CLK] = &gcc_pcie0_phy_rchng_clk.clkr,
+ [GCC_PCIE1_PHY_RCHNG_CLK] = &gcc_pcie1_phy_rchng_clk.clkr,
+ [GCC_PCIE2A_PHY_RCHNG_CLK] = &gcc_pcie2a_phy_rchng_clk.clkr,
+ [GCC_PCIE2B_PHY_RCHNG_CLK] = &gcc_pcie2b_phy_rchng_clk.clkr,
+ [GCC_PCIE3A_PHY_RCHNG_CLK] = &gcc_pcie3a_phy_rchng_clk.clkr,
+ [GCC_PCIE3B_PHY_RCHNG_CLK] = &gcc_pcie3b_phy_rchng_clk.clkr,
+ [GCC_PCIE4_PHY_RCHNG_CLK] = &gcc_pcie4_phy_rchng_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK] = &gcc_pcie_0_aux_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK_SRC] = &gcc_pcie_0_aux_clk_src.clkr,
+ [GCC_PCIE_0_CFG_AHB_CLK] = &gcc_pcie_0_cfg_ahb_clk.clkr,
+ [GCC_PCIE_0_MSTR_AXI_CLK] = &gcc_pcie_0_mstr_axi_clk.clkr,
+ [GCC_PCIE_0_PHY_RCHNG_CLK_SRC] = &gcc_pcie_0_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_0_PIPE_CLK] = &gcc_pcie_0_pipe_clk.clkr,
+ [GCC_PCIE_0_SLV_AXI_CLK] = &gcc_pcie_0_slv_axi_clk.clkr,
+ [GCC_PCIE_0_SLV_Q2A_AXI_CLK] = &gcc_pcie_0_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_1_AUX_CLK] = &gcc_pcie_1_aux_clk.clkr,
+ [GCC_PCIE_1_AUX_CLK_SRC] = &gcc_pcie_1_aux_clk_src.clkr,
+ [GCC_PCIE_1_CFG_AHB_CLK] = &gcc_pcie_1_cfg_ahb_clk.clkr,
+ [GCC_PCIE_1_MSTR_AXI_CLK] = &gcc_pcie_1_mstr_axi_clk.clkr,
+ [GCC_PCIE_1_PHY_RCHNG_CLK_SRC] = &gcc_pcie_1_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_1_PIPE_CLK] = &gcc_pcie_1_pipe_clk.clkr,
+ [GCC_PCIE_1_SLV_AXI_CLK] = &gcc_pcie_1_slv_axi_clk.clkr,
+ [GCC_PCIE_1_SLV_Q2A_AXI_CLK] = &gcc_pcie_1_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_2A2B_CLKREF_CLK] = &gcc_pcie_2a2b_clkref_clk.clkr,
+ [GCC_PCIE_2A_AUX_CLK] = &gcc_pcie_2a_aux_clk.clkr,
+ [GCC_PCIE_2A_AUX_CLK_SRC] = &gcc_pcie_2a_aux_clk_src.clkr,
+ [GCC_PCIE_2A_CFG_AHB_CLK] = &gcc_pcie_2a_cfg_ahb_clk.clkr,
+ [GCC_PCIE_2A_MSTR_AXI_CLK] = &gcc_pcie_2a_mstr_axi_clk.clkr,
+ [GCC_PCIE_2A_PHY_RCHNG_CLK_SRC] = &gcc_pcie_2a_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_2A_PIPE_CLK] = &gcc_pcie_2a_pipe_clk.clkr,
+ [GCC_PCIE_2A_PIPE_CLK_SRC] = &gcc_pcie_2a_pipe_clk_src.clkr,
+ [GCC_PCIE_2A_PIPE_DIV_CLK_SRC] = &gcc_pcie_2a_pipe_div_clk_src.clkr,
+ [GCC_PCIE_2A_PIPEDIV2_CLK] = &gcc_pcie_2a_pipediv2_clk.clkr,
+ [GCC_PCIE_2A_SLV_AXI_CLK] = &gcc_pcie_2a_slv_axi_clk.clkr,
+ [GCC_PCIE_2A_SLV_Q2A_AXI_CLK] = &gcc_pcie_2a_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_2B_AUX_CLK] = &gcc_pcie_2b_aux_clk.clkr,
+ [GCC_PCIE_2B_AUX_CLK_SRC] = &gcc_pcie_2b_aux_clk_src.clkr,
+ [GCC_PCIE_2B_CFG_AHB_CLK] = &gcc_pcie_2b_cfg_ahb_clk.clkr,
+ [GCC_PCIE_2B_MSTR_AXI_CLK] = &gcc_pcie_2b_mstr_axi_clk.clkr,
+ [GCC_PCIE_2B_PHY_RCHNG_CLK_SRC] = &gcc_pcie_2b_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_2B_PIPE_CLK] = &gcc_pcie_2b_pipe_clk.clkr,
+ [GCC_PCIE_2B_PIPE_CLK_SRC] = &gcc_pcie_2b_pipe_clk_src.clkr,
+ [GCC_PCIE_2B_PIPE_DIV_CLK_SRC] = &gcc_pcie_2b_pipe_div_clk_src.clkr,
+ [GCC_PCIE_2B_PIPEDIV2_CLK] = &gcc_pcie_2b_pipediv2_clk.clkr,
+ [GCC_PCIE_2B_SLV_AXI_CLK] = &gcc_pcie_2b_slv_axi_clk.clkr,
+ [GCC_PCIE_2B_SLV_Q2A_AXI_CLK] = &gcc_pcie_2b_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_3A3B_CLKREF_CLK] = &gcc_pcie_3a3b_clkref_clk.clkr,
+ [GCC_PCIE_3A_AUX_CLK] = &gcc_pcie_3a_aux_clk.clkr,
+ [GCC_PCIE_3A_AUX_CLK_SRC] = &gcc_pcie_3a_aux_clk_src.clkr,
+ [GCC_PCIE_3A_CFG_AHB_CLK] = &gcc_pcie_3a_cfg_ahb_clk.clkr,
+ [GCC_PCIE_3A_MSTR_AXI_CLK] = &gcc_pcie_3a_mstr_axi_clk.clkr,
+ [GCC_PCIE_3A_PHY_RCHNG_CLK_SRC] = &gcc_pcie_3a_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_3A_PIPE_CLK] = &gcc_pcie_3a_pipe_clk.clkr,
+ [GCC_PCIE_3A_PIPE_CLK_SRC] = &gcc_pcie_3a_pipe_clk_src.clkr,
+ [GCC_PCIE_3A_PIPE_DIV_CLK_SRC] = &gcc_pcie_3a_pipe_div_clk_src.clkr,
+ [GCC_PCIE_3A_PIPEDIV2_CLK] = &gcc_pcie_3a_pipediv2_clk.clkr,
+ [GCC_PCIE_3A_SLV_AXI_CLK] = &gcc_pcie_3a_slv_axi_clk.clkr,
+ [GCC_PCIE_3A_SLV_Q2A_AXI_CLK] = &gcc_pcie_3a_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_3B_AUX_CLK] = &gcc_pcie_3b_aux_clk.clkr,
+ [GCC_PCIE_3B_AUX_CLK_SRC] = &gcc_pcie_3b_aux_clk_src.clkr,
+ [GCC_PCIE_3B_CFG_AHB_CLK] = &gcc_pcie_3b_cfg_ahb_clk.clkr,
+ [GCC_PCIE_3B_MSTR_AXI_CLK] = &gcc_pcie_3b_mstr_axi_clk.clkr,
+ [GCC_PCIE_3B_PHY_RCHNG_CLK_SRC] = &gcc_pcie_3b_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_3B_PIPE_CLK] = &gcc_pcie_3b_pipe_clk.clkr,
+ [GCC_PCIE_3B_PIPE_CLK_SRC] = &gcc_pcie_3b_pipe_clk_src.clkr,
+ [GCC_PCIE_3B_PIPE_DIV_CLK_SRC] = &gcc_pcie_3b_pipe_div_clk_src.clkr,
+ [GCC_PCIE_3B_PIPEDIV2_CLK] = &gcc_pcie_3b_pipediv2_clk.clkr,
+ [GCC_PCIE_3B_SLV_AXI_CLK] = &gcc_pcie_3b_slv_axi_clk.clkr,
+ [GCC_PCIE_3B_SLV_Q2A_AXI_CLK] = &gcc_pcie_3b_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_4_AUX_CLK] = &gcc_pcie_4_aux_clk.clkr,
+ [GCC_PCIE_4_AUX_CLK_SRC] = &gcc_pcie_4_aux_clk_src.clkr,
+ [GCC_PCIE_4_CFG_AHB_CLK] = &gcc_pcie_4_cfg_ahb_clk.clkr,
+ [GCC_PCIE_4_CLKREF_CLK] = &gcc_pcie_4_clkref_clk.clkr,
+ [GCC_PCIE_4_MSTR_AXI_CLK] = &gcc_pcie_4_mstr_axi_clk.clkr,
+ [GCC_PCIE_4_PHY_RCHNG_CLK_SRC] = &gcc_pcie_4_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_4_PIPE_CLK] = &gcc_pcie_4_pipe_clk.clkr,
+ [GCC_PCIE_4_PIPE_CLK_SRC] = &gcc_pcie_4_pipe_clk_src.clkr,
+ [GCC_PCIE_4_PIPE_DIV_CLK_SRC] = &gcc_pcie_4_pipe_div_clk_src.clkr,
+ [GCC_PCIE_4_PIPEDIV2_CLK] = &gcc_pcie_4_pipediv2_clk.clkr,
+ [GCC_PCIE_4_SLV_AXI_CLK] = &gcc_pcie_4_slv_axi_clk.clkr,
+ [GCC_PCIE_4_SLV_Q2A_AXI_CLK] = &gcc_pcie_4_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_RSCC_AHB_CLK] = &gcc_pcie_rscc_ahb_clk.clkr,
+ [GCC_PCIE_RSCC_XO_CLK] = &gcc_pcie_rscc_xo_clk.clkr,
+ [GCC_PCIE_RSCC_XO_CLK_SRC] = &gcc_pcie_rscc_xo_clk_src.clkr,
+ [GCC_PCIE_THROTTLE_CFG_CLK] = &gcc_pcie_throttle_cfg_clk.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM2_CLK_SRC] = &gcc_pdm2_clk_src.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_PDM_XO4_CLK] = &gcc_pdm_xo4_clk.clkr,
+ [GCC_QMIP_CAMERA_NRT_AHB_CLK] = &gcc_qmip_camera_nrt_ahb_clk.clkr,
+ [GCC_QMIP_CAMERA_RT_AHB_CLK] = &gcc_qmip_camera_rt_ahb_clk.clkr,
+ [GCC_QMIP_DISP1_AHB_CLK] = &gcc_qmip_disp1_ahb_clk.clkr,
+ [GCC_QMIP_DISP1_ROT_AHB_CLK] = &gcc_qmip_disp1_rot_ahb_clk.clkr,
+ [GCC_QMIP_DISP_AHB_CLK] = &gcc_qmip_disp_ahb_clk.clkr,
+ [GCC_QMIP_DISP_ROT_AHB_CLK] = &gcc_qmip_disp_rot_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_CVP_AHB_CLK] = &gcc_qmip_video_cvp_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_VCODEC_AHB_CLK] = &gcc_qmip_video_vcodec_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP0_CORE_2X_CLK] = &gcc_qupv3_wrap0_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP0_CORE_CLK] = &gcc_qupv3_wrap0_core_clk.clkr,
+ [GCC_QUPV3_WRAP0_QSPI0_CLK] = &gcc_qupv3_wrap0_qspi0_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK] = &gcc_qupv3_wrap0_s0_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK_SRC] = &gcc_qupv3_wrap0_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK] = &gcc_qupv3_wrap0_s1_clk.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK_SRC] = &gcc_qupv3_wrap0_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK] = &gcc_qupv3_wrap0_s2_clk.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK_SRC] = &gcc_qupv3_wrap0_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK] = &gcc_qupv3_wrap0_s3_clk.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK_SRC] = &gcc_qupv3_wrap0_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK] = &gcc_qupv3_wrap0_s4_clk.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK_SRC] = &gcc_qupv3_wrap0_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S4_DIV_CLK_SRC] = &gcc_qupv3_wrap0_s4_div_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK] = &gcc_qupv3_wrap0_s5_clk.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK_SRC] = &gcc_qupv3_wrap0_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S6_CLK] = &gcc_qupv3_wrap0_s6_clk.clkr,
+ [GCC_QUPV3_WRAP0_S6_CLK_SRC] = &gcc_qupv3_wrap0_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S7_CLK] = &gcc_qupv3_wrap0_s7_clk.clkr,
+ [GCC_QUPV3_WRAP0_S7_CLK_SRC] = &gcc_qupv3_wrap0_s7_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_CORE_2X_CLK] = &gcc_qupv3_wrap1_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP1_CORE_CLK] = &gcc_qupv3_wrap1_core_clk.clkr,
+ [GCC_QUPV3_WRAP1_QSPI0_CLK] = &gcc_qupv3_wrap1_qspi0_clk.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK] = &gcc_qupv3_wrap1_s0_clk.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK_SRC] = &gcc_qupv3_wrap1_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK] = &gcc_qupv3_wrap1_s1_clk.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK_SRC] = &gcc_qupv3_wrap1_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK] = &gcc_qupv3_wrap1_s2_clk.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK_SRC] = &gcc_qupv3_wrap1_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK] = &gcc_qupv3_wrap1_s3_clk.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK_SRC] = &gcc_qupv3_wrap1_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK] = &gcc_qupv3_wrap1_s4_clk.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK_SRC] = &gcc_qupv3_wrap1_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S4_DIV_CLK_SRC] = &gcc_qupv3_wrap1_s4_div_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK] = &gcc_qupv3_wrap1_s5_clk.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK_SRC] = &gcc_qupv3_wrap1_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S6_CLK] = &gcc_qupv3_wrap1_s6_clk.clkr,
+ [GCC_QUPV3_WRAP1_S6_CLK_SRC] = &gcc_qupv3_wrap1_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S7_CLK] = &gcc_qupv3_wrap1_s7_clk.clkr,
+ [GCC_QUPV3_WRAP1_S7_CLK_SRC] = &gcc_qupv3_wrap1_s7_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_CORE_2X_CLK] = &gcc_qupv3_wrap2_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP2_CORE_CLK] = &gcc_qupv3_wrap2_core_clk.clkr,
+ [GCC_QUPV3_WRAP2_QSPI0_CLK] = &gcc_qupv3_wrap2_qspi0_clk.clkr,
+ [GCC_QUPV3_WRAP2_S0_CLK] = &gcc_qupv3_wrap2_s0_clk.clkr,
+ [GCC_QUPV3_WRAP2_S0_CLK_SRC] = &gcc_qupv3_wrap2_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S1_CLK] = &gcc_qupv3_wrap2_s1_clk.clkr,
+ [GCC_QUPV3_WRAP2_S1_CLK_SRC] = &gcc_qupv3_wrap2_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S2_CLK] = &gcc_qupv3_wrap2_s2_clk.clkr,
+ [GCC_QUPV3_WRAP2_S2_CLK_SRC] = &gcc_qupv3_wrap2_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S3_CLK] = &gcc_qupv3_wrap2_s3_clk.clkr,
+ [GCC_QUPV3_WRAP2_S3_CLK_SRC] = &gcc_qupv3_wrap2_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S4_CLK] = &gcc_qupv3_wrap2_s4_clk.clkr,
+ [GCC_QUPV3_WRAP2_S4_CLK_SRC] = &gcc_qupv3_wrap2_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S4_DIV_CLK_SRC] = &gcc_qupv3_wrap2_s4_div_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S5_CLK] = &gcc_qupv3_wrap2_s5_clk.clkr,
+ [GCC_QUPV3_WRAP2_S5_CLK_SRC] = &gcc_qupv3_wrap2_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S6_CLK] = &gcc_qupv3_wrap2_s6_clk.clkr,
+ [GCC_QUPV3_WRAP2_S6_CLK_SRC] = &gcc_qupv3_wrap2_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S7_CLK] = &gcc_qupv3_wrap2_s7_clk.clkr,
+ [GCC_QUPV3_WRAP2_S7_CLK_SRC] = &gcc_qupv3_wrap2_s7_clk_src.clkr,
+ [GCC_QUPV3_WRAP_0_M_AHB_CLK] = &gcc_qupv3_wrap_0_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_0_S_AHB_CLK] = &gcc_qupv3_wrap_0_s_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_M_AHB_CLK] = &gcc_qupv3_wrap_1_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_S_AHB_CLK] = &gcc_qupv3_wrap_1_s_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_2_M_AHB_CLK] = &gcc_qupv3_wrap_2_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_2_S_AHB_CLK] = &gcc_qupv3_wrap_2_s_ahb_clk.clkr,
+ [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
+ [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
+ [GCC_SDCC2_APPS_CLK_SRC] = &gcc_sdcc2_apps_clk_src.clkr,
+ [GCC_SDCC4_AHB_CLK] = &gcc_sdcc4_ahb_clk.clkr,
+ [GCC_SDCC4_APPS_CLK] = &gcc_sdcc4_apps_clk.clkr,
+ [GCC_SDCC4_APPS_CLK_SRC] = &gcc_sdcc4_apps_clk_src.clkr,
+ [GCC_SYS_NOC_USB_AXI_CLK] = &gcc_sys_noc_usb_axi_clk.clkr,
+ [GCC_UFS_1_CARD_CLKREF_CLK] = &gcc_ufs_1_card_clkref_clk.clkr,
+ [GCC_UFS_CARD_AHB_CLK] = &gcc_ufs_card_ahb_clk.clkr,
+ [GCC_UFS_CARD_AXI_CLK] = &gcc_ufs_card_axi_clk.clkr,
+ [GCC_UFS_CARD_AXI_CLK_SRC] = &gcc_ufs_card_axi_clk_src.clkr,
+ [GCC_UFS_CARD_AXI_HW_CTL_CLK] = &gcc_ufs_card_axi_hw_ctl_clk.clkr,
+ [GCC_UFS_CARD_CLKREF_CLK] = &gcc_ufs_card_clkref_clk.clkr,
+ [GCC_UFS_CARD_ICE_CORE_CLK] = &gcc_ufs_card_ice_core_clk.clkr,
+ [GCC_UFS_CARD_ICE_CORE_CLK_SRC] = &gcc_ufs_card_ice_core_clk_src.clkr,
+ [GCC_UFS_CARD_ICE_CORE_HW_CTL_CLK] = &gcc_ufs_card_ice_core_hw_ctl_clk.clkr,
+ [GCC_UFS_CARD_PHY_AUX_CLK] = &gcc_ufs_card_phy_aux_clk.clkr,
+ [GCC_UFS_CARD_PHY_AUX_CLK_SRC] = &gcc_ufs_card_phy_aux_clk_src.clkr,
+ [GCC_UFS_CARD_PHY_AUX_HW_CTL_CLK] = &gcc_ufs_card_phy_aux_hw_ctl_clk.clkr,
+ [GCC_UFS_CARD_RX_SYMBOL_0_CLK] = &gcc_ufs_card_rx_symbol_0_clk.clkr,
+ [GCC_UFS_CARD_RX_SYMBOL_0_CLK_SRC] = &gcc_ufs_card_rx_symbol_0_clk_src.clkr,
+ [GCC_UFS_CARD_RX_SYMBOL_1_CLK] = &gcc_ufs_card_rx_symbol_1_clk.clkr,
+ [GCC_UFS_CARD_RX_SYMBOL_1_CLK_SRC] = &gcc_ufs_card_rx_symbol_1_clk_src.clkr,
+ [GCC_UFS_CARD_TX_SYMBOL_0_CLK] = &gcc_ufs_card_tx_symbol_0_clk.clkr,
+ [GCC_UFS_CARD_TX_SYMBOL_0_CLK_SRC] = &gcc_ufs_card_tx_symbol_0_clk_src.clkr,
+ [GCC_UFS_CARD_UNIPRO_CORE_CLK] = &gcc_ufs_card_unipro_core_clk.clkr,
+ [GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC] = &gcc_ufs_card_unipro_core_clk_src.clkr,
+ [GCC_UFS_CARD_UNIPRO_CORE_HW_CTL_CLK] = &gcc_ufs_card_unipro_core_hw_ctl_clk.clkr,
+ [GCC_UFS_PHY_AHB_CLK] = &gcc_ufs_phy_ahb_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK] = &gcc_ufs_phy_axi_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK_SRC] = &gcc_ufs_phy_axi_clk_src.clkr,
+ [GCC_UFS_PHY_AXI_HW_CTL_CLK] = &gcc_ufs_phy_axi_hw_ctl_clk.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK] = &gcc_ufs_phy_ice_core_clk.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK_SRC] = &gcc_ufs_phy_ice_core_clk_src.clkr,
+ [GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK] = &gcc_ufs_phy_ice_core_hw_ctl_clk.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK] = &gcc_ufs_phy_phy_aux_clk.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK_SRC] = &gcc_ufs_phy_phy_aux_clk_src.clkr,
+ [GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK] = &gcc_ufs_phy_phy_aux_hw_ctl_clk.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_0_CLK] = &gcc_ufs_phy_rx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_0_CLK_SRC] = &gcc_ufs_phy_rx_symbol_0_clk_src.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_1_CLK] = &gcc_ufs_phy_rx_symbol_1_clk.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_1_CLK_SRC] = &gcc_ufs_phy_rx_symbol_1_clk_src.clkr,
+ [GCC_UFS_PHY_TX_SYMBOL_0_CLK] = &gcc_ufs_phy_tx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_TX_SYMBOL_0_CLK_SRC] = &gcc_ufs_phy_tx_symbol_0_clk_src.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK] = &gcc_ufs_phy_unipro_core_clk.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC] = &gcc_ufs_phy_unipro_core_clk_src.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK] = &gcc_ufs_phy_unipro_core_hw_ctl_clk.clkr,
+ [GCC_UFS_REF_CLKREF_CLK] = &gcc_ufs_ref_clkref_clk.clkr,
+ [GCC_USB2_HS0_CLKREF_CLK] = &gcc_usb2_hs0_clkref_clk.clkr,
+ [GCC_USB2_HS1_CLKREF_CLK] = &gcc_usb2_hs1_clkref_clk.clkr,
+ [GCC_USB2_HS2_CLKREF_CLK] = &gcc_usb2_hs2_clkref_clk.clkr,
+ [GCC_USB2_HS3_CLKREF_CLK] = &gcc_usb2_hs3_clkref_clk.clkr,
+ [GCC_USB30_MP_MASTER_CLK] = &gcc_usb30_mp_master_clk.clkr,
+ [GCC_USB30_MP_MASTER_CLK_SRC] = &gcc_usb30_mp_master_clk_src.clkr,
+ [GCC_USB30_MP_MOCK_UTMI_CLK] = &gcc_usb30_mp_mock_utmi_clk.clkr,
+ [GCC_USB30_MP_MOCK_UTMI_CLK_SRC] = &gcc_usb30_mp_mock_utmi_clk_src.clkr,
+ [GCC_USB30_MP_MOCK_UTMI_POSTDIV_CLK_SRC] = &gcc_usb30_mp_mock_utmi_postdiv_clk_src.clkr,
+ [GCC_USB30_MP_SLEEP_CLK] = &gcc_usb30_mp_sleep_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK] = &gcc_usb30_prim_master_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK_SRC] = &gcc_usb30_prim_master_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK] = &gcc_usb30_prim_mock_utmi_clk.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC] = &gcc_usb30_prim_mock_utmi_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC] = &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr,
+ [GCC_USB30_PRIM_SLEEP_CLK] = &gcc_usb30_prim_sleep_clk.clkr,
+ [GCC_USB30_SEC_MASTER_CLK] = &gcc_usb30_sec_master_clk.clkr,
+ [GCC_USB30_SEC_MASTER_CLK_SRC] = &gcc_usb30_sec_master_clk_src.clkr,
+ [GCC_USB30_SEC_MOCK_UTMI_CLK] = &gcc_usb30_sec_mock_utmi_clk.clkr,
+ [GCC_USB30_SEC_MOCK_UTMI_CLK_SRC] = &gcc_usb30_sec_mock_utmi_clk_src.clkr,
+ [GCC_USB30_SEC_MOCK_UTMI_POSTDIV_CLK_SRC] = &gcc_usb30_sec_mock_utmi_postdiv_clk_src.clkr,
+ [GCC_USB30_SEC_SLEEP_CLK] = &gcc_usb30_sec_sleep_clk.clkr,
+ [GCC_USB34_PRIM_PHY_PIPE_CLK_SRC] = &gcc_usb34_prim_phy_pipe_clk_src.clkr,
+ [GCC_USB34_SEC_PHY_PIPE_CLK_SRC] = &gcc_usb34_sec_phy_pipe_clk_src.clkr,
+ [GCC_USB3_MP0_CLKREF_CLK] = &gcc_usb3_mp0_clkref_clk.clkr,
+ [GCC_USB3_MP1_CLKREF_CLK] = &gcc_usb3_mp1_clkref_clk.clkr,
+ [GCC_USB3_MP_PHY_AUX_CLK] = &gcc_usb3_mp_phy_aux_clk.clkr,
+ [GCC_USB3_MP_PHY_AUX_CLK_SRC] = &gcc_usb3_mp_phy_aux_clk_src.clkr,
+ [GCC_USB3_MP_PHY_COM_AUX_CLK] = &gcc_usb3_mp_phy_com_aux_clk.clkr,
+ [GCC_USB3_MP_PHY_PIPE_0_CLK] = &gcc_usb3_mp_phy_pipe_0_clk.clkr,
+ [GCC_USB3_MP_PHY_PIPE_0_CLK_SRC] = &gcc_usb3_mp_phy_pipe_0_clk_src.clkr,
+ [GCC_USB3_MP_PHY_PIPE_1_CLK] = &gcc_usb3_mp_phy_pipe_1_clk.clkr,
+ [GCC_USB3_MP_PHY_PIPE_1_CLK_SRC] = &gcc_usb3_mp_phy_pipe_1_clk_src.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK] = &gcc_usb3_prim_phy_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK_SRC] = &gcc_usb3_prim_phy_aux_clk_src.clkr,
+ [GCC_USB3_PRIM_PHY_COM_AUX_CLK] = &gcc_usb3_prim_phy_com_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK] = &gcc_usb3_prim_phy_pipe_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK_SRC] = &gcc_usb3_prim_phy_pipe_clk_src.clkr,
+ [GCC_USB3_SEC_PHY_AUX_CLK] = &gcc_usb3_sec_phy_aux_clk.clkr,
+ [GCC_USB3_SEC_PHY_AUX_CLK_SRC] = &gcc_usb3_sec_phy_aux_clk_src.clkr,
+ [GCC_USB3_SEC_PHY_COM_AUX_CLK] = &gcc_usb3_sec_phy_com_aux_clk.clkr,
+ [GCC_USB3_SEC_PHY_PIPE_CLK] = &gcc_usb3_sec_phy_pipe_clk.clkr,
+ [GCC_USB3_SEC_PHY_PIPE_CLK_SRC] = &gcc_usb3_sec_phy_pipe_clk_src.clkr,
+ [GCC_USB4_1_CFG_AHB_CLK] = &gcc_usb4_1_cfg_ahb_clk.clkr,
+ [GCC_USB4_1_DP_CLK] = &gcc_usb4_1_dp_clk.clkr,
+ [GCC_USB4_1_MASTER_CLK] = &gcc_usb4_1_master_clk.clkr,
+ [GCC_USB4_1_MASTER_CLK_SRC] = &gcc_usb4_1_master_clk_src.clkr,
+ [GCC_USB4_1_PHY_DP_CLK_SRC] = &gcc_usb4_1_phy_dp_clk_src.clkr,
+ [GCC_USB4_1_PHY_P2RR2P_PIPE_CLK] = &gcc_usb4_1_phy_p2rr2p_pipe_clk.clkr,
+ [GCC_USB4_1_PHY_P2RR2P_PIPE_CLK_SRC] = &gcc_usb4_1_phy_p2rr2p_pipe_clk_src.clkr,
+ [GCC_USB4_1_PHY_PCIE_PIPE_CLK] = &gcc_usb4_1_phy_pcie_pipe_clk.clkr,
+ [GCC_USB4_1_PHY_PCIE_PIPE_CLK_SRC] = &gcc_usb4_1_phy_pcie_pipe_clk_src.clkr,
+ [GCC_USB4_1_PHY_PCIE_PIPE_MUX_CLK_SRC] = &gcc_usb4_1_phy_pcie_pipe_mux_clk_src.clkr,
+ [GCC_USB4_1_PHY_PCIE_PIPEGMUX_CLK_SRC] = &gcc_usb4_1_phy_pcie_pipegmux_clk_src.clkr,
+ [GCC_USB4_1_PHY_RX0_CLK] = &gcc_usb4_1_phy_rx0_clk.clkr,
+ [GCC_USB4_1_PHY_RX0_CLK_SRC] = &gcc_usb4_1_phy_rx0_clk_src.clkr,
+ [GCC_USB4_1_PHY_RX1_CLK] = &gcc_usb4_1_phy_rx1_clk.clkr,
+ [GCC_USB4_1_PHY_RX1_CLK_SRC] = &gcc_usb4_1_phy_rx1_clk_src.clkr,
+ [GCC_USB4_1_PHY_SYS_CLK_SRC] = &gcc_usb4_1_phy_sys_clk_src.clkr,
+ [GCC_USB4_1_PHY_USB_PIPE_CLK] = &gcc_usb4_1_phy_usb_pipe_clk.clkr,
+ [GCC_USB4_1_SB_IF_CLK] = &gcc_usb4_1_sb_if_clk.clkr,
+ [GCC_USB4_1_SB_IF_CLK_SRC] = &gcc_usb4_1_sb_if_clk_src.clkr,
+ [GCC_USB4_1_SYS_CLK] = &gcc_usb4_1_sys_clk.clkr,
+ [GCC_USB4_1_TMU_CLK] = &gcc_usb4_1_tmu_clk.clkr,
+ [GCC_USB4_1_TMU_CLK_SRC] = &gcc_usb4_1_tmu_clk_src.clkr,
+ [GCC_USB4_CFG_AHB_CLK] = &gcc_usb4_cfg_ahb_clk.clkr,
+ [GCC_USB4_CLKREF_CLK] = &gcc_usb4_clkref_clk.clkr,
+ [GCC_USB4_DP_CLK] = &gcc_usb4_dp_clk.clkr,
+ [GCC_USB4_EUD_CLKREF_CLK] = &gcc_usb4_eud_clkref_clk.clkr,
+ [GCC_USB4_MASTER_CLK] = &gcc_usb4_master_clk.clkr,
+ [GCC_USB4_MASTER_CLK_SRC] = &gcc_usb4_master_clk_src.clkr,
+ [GCC_USB4_PHY_DP_CLK_SRC] = &gcc_usb4_phy_dp_clk_src.clkr,
+ [GCC_USB4_PHY_P2RR2P_PIPE_CLK] = &gcc_usb4_phy_p2rr2p_pipe_clk.clkr,
+ [GCC_USB4_PHY_P2RR2P_PIPE_CLK_SRC] = &gcc_usb4_phy_p2rr2p_pipe_clk_src.clkr,
+ [GCC_USB4_PHY_PCIE_PIPE_CLK] = &gcc_usb4_phy_pcie_pipe_clk.clkr,
+ [GCC_USB4_PHY_PCIE_PIPE_CLK_SRC] = &gcc_usb4_phy_pcie_pipe_clk_src.clkr,
+ [GCC_USB4_PHY_PCIE_PIPE_MUX_CLK_SRC] = &gcc_usb4_phy_pcie_pipe_mux_clk_src.clkr,
+ [GCC_USB4_PHY_PCIE_PIPEGMUX_CLK_SRC] = &gcc_usb4_phy_pcie_pipegmux_clk_src.clkr,
+ [GCC_USB4_PHY_RX0_CLK] = &gcc_usb4_phy_rx0_clk.clkr,
+ [GCC_USB4_PHY_RX0_CLK_SRC] = &gcc_usb4_phy_rx0_clk_src.clkr,
+ [GCC_USB4_PHY_RX1_CLK] = &gcc_usb4_phy_rx1_clk.clkr,
+ [GCC_USB4_PHY_RX1_CLK_SRC] = &gcc_usb4_phy_rx1_clk_src.clkr,
+ [GCC_USB4_PHY_SYS_CLK_SRC] = &gcc_usb4_phy_sys_clk_src.clkr,
+ [GCC_USB4_PHY_USB_PIPE_CLK] = &gcc_usb4_phy_usb_pipe_clk.clkr,
+ [GCC_USB4_SB_IF_CLK] = &gcc_usb4_sb_if_clk.clkr,
+ [GCC_USB4_SB_IF_CLK_SRC] = &gcc_usb4_sb_if_clk_src.clkr,
+ [GCC_USB4_SYS_CLK] = &gcc_usb4_sys_clk.clkr,
+ [GCC_USB4_TMU_CLK] = &gcc_usb4_tmu_clk.clkr,
+ [GCC_USB4_TMU_CLK_SRC] = &gcc_usb4_tmu_clk_src.clkr,
+ [GCC_VIDEO_AXI0_CLK] = &gcc_video_axi0_clk.clkr,
+ [GCC_VIDEO_AXI1_CLK] = &gcc_video_axi1_clk.clkr,
+ [GCC_VIDEO_CVP_THROTTLE_CLK] = &gcc_video_cvp_throttle_clk.clkr,
+ [GCC_VIDEO_VCODEC_THROTTLE_CLK] = &gcc_video_vcodec_throttle_clk.clkr,
+};
+
+static const struct qcom_reset_map gcc_sc8280xp_resets[] = {
+ [GCC_EMAC0_BCR] = { 0xaa000 },
+ [GCC_EMAC1_BCR] = { 0xba000 },
+ [GCC_PCIE_0_LINK_DOWN_BCR] = { 0x6c014 },
+ [GCC_PCIE_0_NOCSR_COM_PHY_BCR] = { 0x6c020 },
+ [GCC_PCIE_0_PHY_BCR] = { 0x6c01c },
+ [GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR] = { 0x6c028 },
+ [GCC_PCIE_0_TUNNEL_BCR] = { 0xa4000 },
+ [GCC_PCIE_1_LINK_DOWN_BCR] = { 0x8e014 },
+ [GCC_PCIE_1_NOCSR_COM_PHY_BCR] = { 0x8e020 },
+ [GCC_PCIE_1_PHY_BCR] = { 0x8e01c },
+ [GCC_PCIE_1_PHY_NOCSR_COM_PHY_BCR] = { 0x8e000 },
+ [GCC_PCIE_1_TUNNEL_BCR] = { 0x8d000 },
+ [GCC_PCIE_2A_BCR] = { 0x9d000 },
+ [GCC_PCIE_2A_LINK_DOWN_BCR] = { 0x9d13c },
+ [GCC_PCIE_2A_NOCSR_COM_PHY_BCR] = { 0x9d148 },
+ [GCC_PCIE_2A_PHY_BCR] = { 0x9d144 },
+ [GCC_PCIE_2A_PHY_NOCSR_COM_PHY_BCR] = { 0x9d14c },
+ [GCC_PCIE_2B_BCR] = { 0x9e000 },
+ [GCC_PCIE_2B_LINK_DOWN_BCR] = { 0x9e084 },
+ [GCC_PCIE_2B_NOCSR_COM_PHY_BCR] = { 0x9e090 },
+ [GCC_PCIE_2B_PHY_BCR] = { 0x9e08c },
+ [GCC_PCIE_2B_PHY_NOCSR_COM_PHY_BCR] = { 0x9e094 },
+ [GCC_PCIE_3A_BCR] = { 0xa0000 },
+ [GCC_PCIE_3A_LINK_DOWN_BCR] = { 0xa00f0 },
+ [GCC_PCIE_3A_NOCSR_COM_PHY_BCR] = { 0xa00fc },
+ [GCC_PCIE_3A_PHY_BCR] = { 0xa00e0 },
+ [GCC_PCIE_3A_PHY_NOCSR_COM_PHY_BCR] = { 0xa00e4 },
+ [GCC_PCIE_3B_BCR] = { 0xa2000 },
+ [GCC_PCIE_3B_LINK_DOWN_BCR] = { 0xa20e0 },
+ [GCC_PCIE_3B_NOCSR_COM_PHY_BCR] = { 0xa20ec },
+ [GCC_PCIE_3B_PHY_BCR] = { 0xa20e8 },
+ [GCC_PCIE_3B_PHY_NOCSR_COM_PHY_BCR] = { 0xa20f0 },
+ [GCC_PCIE_4_BCR] = { 0x6b000 },
+ [GCC_PCIE_4_LINK_DOWN_BCR] = { 0x6b300 },
+ [GCC_PCIE_4_NOCSR_COM_PHY_BCR] = { 0x6b30c },
+ [GCC_PCIE_4_PHY_BCR] = { 0x6b308 },
+ [GCC_PCIE_4_PHY_NOCSR_COM_PHY_BCR] = { 0x6b310 },
+ [GCC_PCIE_PHY_CFG_AHB_BCR] = { 0x6f00c },
+ [GCC_PCIE_PHY_COM_BCR] = { 0x6f010 },
+ [GCC_PCIE_RSCC_BCR] = { 0xae000 },
+ [GCC_QUSB2PHY_HS0_MP_BCR] = { 0x12008 },
+ [GCC_QUSB2PHY_HS1_MP_BCR] = { 0x1200c },
+ [GCC_QUSB2PHY_HS2_MP_BCR] = { 0x12010 },
+ [GCC_QUSB2PHY_HS3_MP_BCR] = { 0x12014 },
+ [GCC_QUSB2PHY_PRIM_BCR] = { 0x12000 },
+ [GCC_QUSB2PHY_SEC_BCR] = { 0x12004 },
+ [GCC_SDCC2_BCR] = { 0x14000 },
+ [GCC_SDCC4_BCR] = { 0x16000 },
+ [GCC_UFS_CARD_BCR] = { 0x75000 },
+ [GCC_UFS_PHY_BCR] = { 0x77000 },
+ [GCC_USB2_PHY_PRIM_BCR] = { 0x50028 },
+ [GCC_USB2_PHY_SEC_BCR] = { 0x5002c },
+ [GCC_USB30_MP_BCR] = { 0xab000 },
+ [GCC_USB30_PRIM_BCR] = { 0xf000 },
+ [GCC_USB30_SEC_BCR] = { 0x10000 },
+ [GCC_USB3_DP_PHY_PRIM_BCR] = { 0x50008 },
+ [GCC_USB3_DP_PHY_SEC_BCR] = { 0x50014 },
+ [GCC_USB3_PHY_PRIM_BCR] = { 0x50000 },
+ [GCC_USB3_PHY_SEC_BCR] = { 0x5000c },
+ [GCC_USB3_UNIPHY_MP0_BCR] = { 0x50018 },
+ [GCC_USB3_UNIPHY_MP1_BCR] = { 0x5001c },
+ [GCC_USB3PHY_PHY_PRIM_BCR] = { 0x50004 },
+ [GCC_USB3PHY_PHY_SEC_BCR] = { 0x50010 },
+ [GCC_USB3UNIPHY_PHY_MP0_BCR] = { 0x50020 },
+ [GCC_USB3UNIPHY_PHY_MP1_BCR] = { 0x50024 },
+ [GCC_USB4_1_BCR] = { 0xb8000 },
+ [GCC_USB4_1_DP_PHY_PRIM_BCR] = { 0xb9020 },
+ [GCC_USB4_1_DPPHY_AUX_BCR] = { 0xb9024 },
+ [GCC_USB4_1_PHY_PRIM_BCR] = { 0xb9018 },
+ [GCC_USB4_BCR] = { 0x2a000 },
+ [GCC_USB4_DP_PHY_PRIM_BCR] = { 0x4a008 },
+ [GCC_USB4_DPPHY_AUX_BCR] = { 0x4a00c },
+ [GCC_USB4_PHY_PRIM_BCR] = { 0x4a000 },
+ [GCC_USB4PHY_1_PHY_PRIM_BCR] = { 0xb901c },
+ [GCC_USB4PHY_PHY_PRIM_BCR] = { 0x4a004 },
+ [GCC_USB_PHY_CFG_AHB2PHY_BCR] = { 0x6a000 },
+ [GCC_VIDEO_BCR] = { 0x28000 },
+ [GCC_VIDEO_AXI0_CLK_ARES] = { 0x28010, 2 },
+ [GCC_VIDEO_AXI1_CLK_ARES] = { 0x28018, 2 },
+};
+
+static struct gdsc *gcc_sc8280xp_gdscs[] = {
+ [PCIE_0_TUNNEL_GDSC] = &pcie_0_tunnel_gdsc,
+ [PCIE_1_TUNNEL_GDSC] = &pcie_1_tunnel_gdsc,
+ [PCIE_2A_GDSC] = &pcie_2a_gdsc,
+ [PCIE_2B_GDSC] = &pcie_2b_gdsc,
+ [PCIE_3A_GDSC] = &pcie_3a_gdsc,
+ [PCIE_3B_GDSC] = &pcie_3b_gdsc,
+ [PCIE_4_GDSC] = &pcie_4_gdsc,
+ [UFS_CARD_GDSC] = &ufs_card_gdsc,
+ [UFS_PHY_GDSC] = &ufs_phy_gdsc,
+ [USB30_MP_GDSC] = &usb30_mp_gdsc,
+ [USB30_PRIM_GDSC] = &usb30_prim_gdsc,
+ [USB30_SEC_GDSC] = &usb30_sec_gdsc,
+};
+
+static const struct clk_rcg_dfs_data gcc_dfs_clocks[] = {
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s6_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s7_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s6_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s7_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s6_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s7_clk_src),
+};
+
+static const struct regmap_config gcc_sc8280xp_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xc3014,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gcc_sc8280xp_desc = {
+ .config = &gcc_sc8280xp_regmap_config,
+ .clks = gcc_sc8280xp_clocks,
+ .num_clks = ARRAY_SIZE(gcc_sc8280xp_clocks),
+ .resets = gcc_sc8280xp_resets,
+ .num_resets = ARRAY_SIZE(gcc_sc8280xp_resets),
+ .gdscs = gcc_sc8280xp_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_sc8280xp_gdscs),
+};
+
+static int gcc_sc8280xp_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ regmap = qcom_cc_map(pdev, &gcc_sc8280xp_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ /*
+ * Keep the clocks always-ON
+ * GCC_CAMERA_AHB_CLK, GCC_CAMERA_XO_CLK, GCC_DISP_AHB_CLK,
+ * GCC_DISP_XO_CLK, GCC_GPU_CFG_AHB_CLK, GCC_VIDEO_AHB_CLK,
+ * GCC_VIDEO_XO_CLK, GCC_DISP1_AHB_CLK, GCC_DISP1_XO_CLK
+ */
+ regmap_update_bits(regmap, 0x26004, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x26020, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x27004, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x27028, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x71004, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x28004, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x28028, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0xbb004, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0xbb028, BIT(0), BIT(0));
+
+ ret = qcom_cc_register_rcg_dfs(regmap, gcc_dfs_clocks, ARRAY_SIZE(gcc_dfs_clocks));
+ if (ret)
+ return ret;
+
+ return qcom_cc_really_probe(pdev, &gcc_sc8280xp_desc, regmap);
+}
+
+static const struct of_device_id gcc_sc8280xp_match_table[] = {
+ { .compatible = "qcom,gcc-sc8280xp" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_sc8280xp_match_table);
+
+static struct platform_driver gcc_sc8280xp_driver = {
+ .probe = gcc_sc8280xp_probe,
+ .driver = {
+ .name = "gcc-sc8280xp",
+ .of_match_table = gcc_sc8280xp_match_table,
+ },
+};
+
+static int __init gcc_sc8280xp_init(void)
+{
+ return platform_driver_register(&gcc_sc8280xp_driver);
+}
+subsys_initcall(gcc_sc8280xp_init);
+
+static void __exit gcc_sc8280xp_exit(void)
+{
+ platform_driver_unregister(&gcc_sc8280xp_driver);
+}
+module_exit(gcc_sc8280xp_exit);
+
+MODULE_DESCRIPTION("Qualcomm SC8280XP GCC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/lpassaudiocc-sc7280.c b/drivers/clk/qcom/lpassaudiocc-sc7280.c
new file mode 100644
index 000000000000..6ab6e5a34c72
--- /dev/null
+++ b/drivers/clk/qcom/lpassaudiocc-sc7280.c
@@ -0,0 +1,838 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/pm_clock.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,lpassaudiocc-sc7280.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "common.h"
+#include "gdsc.h"
+
+enum {
+ P_BI_TCXO,
+ P_LPASS_AON_CC_PLL_OUT_EVEN,
+ P_LPASS_AON_CC_PLL_OUT_MAIN,
+ P_LPASS_AON_CC_PLL_OUT_MAIN_CDIV_DIV_CLK_SRC,
+ P_LPASS_AON_CC_PLL_OUT_ODD,
+ P_LPASS_AUDIO_CC_PLL_OUT_AUX,
+ P_LPASS_AUDIO_CC_PLL_OUT_AUX2_DIV_CLK_SRC,
+ P_LPASS_AUDIO_CC_PLL_MAIN_DIV_CLK,
+};
+
+static const struct pll_vco zonda_vco[] = {
+ { 595200000UL, 3600000000UL, 0 },
+};
+
+/* 1128.96MHz configuration */
+static const struct alpha_pll_config lpass_audio_cc_pll_config = {
+ .l = 0x3a,
+ .alpha = 0xcccc,
+ .config_ctl_val = 0x08200920,
+ .config_ctl_hi_val = 0x05002001,
+ .config_ctl_hi1_val = 0x00000000,
+ .user_ctl_val = 0x03000101,
+};
+
+static struct clk_alpha_pll lpass_audio_cc_pll = {
+ .offset = 0x0,
+ .vco_table = zonda_vco,
+ .num_vco = ARRAY_SIZE(zonda_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_ZONDA],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data){
+ .name = "lpass_audio_cc_pll",
+ .parent_data = &(const struct clk_parent_data){
+ .index = 0,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_zonda_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_lpass_audio_cc_pll_out_aux2[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv lpass_audio_cc_pll_out_aux2 = {
+ .offset = 0x0,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_lpass_audio_cc_pll_out_aux2,
+ .num_post_div = ARRAY_SIZE(post_div_table_lpass_audio_cc_pll_out_aux2),
+ .width = 2,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_ZONDA],
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "lpass_audio_cc_pll_out_aux2",
+ .parent_hws = (const struct clk_hw*[]){
+ &lpass_audio_cc_pll.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_zonda_ops,
+ },
+};
+
+static const struct pll_vco lucid_vco[] = {
+ { 249600000, 2000000000, 0 },
+};
+
+/* 614.4 MHz configuration */
+static const struct alpha_pll_config lpass_aon_cc_pll_config = {
+ .l = 0x20,
+ .alpha = 0x0,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002261,
+ .config_ctl_hi1_val = 0x329A299C,
+ .user_ctl_val = 0x00005100,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x00000000,
+};
+
+static struct clk_alpha_pll lpass_aon_cc_pll = {
+ .offset = 0x0,
+ .vco_table = lucid_vco,
+ .num_vco = ARRAY_SIZE(lucid_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data){
+ .name = "lpass_aon_cc_pll",
+ .parent_data = &(const struct clk_parent_data){
+ .index = 0,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_lpass_aon_cc_pll_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv lpass_aon_cc_pll_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_lpass_aon_cc_pll_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_lpass_aon_cc_pll_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "lpass_aon_cc_pll_out_even",
+ .parent_hws = (const struct clk_hw*[]){
+ &lpass_aon_cc_pll.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_lucid_ops,
+ },
+};
+
+static const struct clk_div_table post_div_table_lpass_aon_cc_pll_out_odd[] = {
+ { 0x5, 5 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv lpass_aon_cc_pll_out_odd = {
+ .offset = 0x0,
+ .post_div_shift = 12,
+ .post_div_table = post_div_table_lpass_aon_cc_pll_out_odd,
+ .num_post_div = ARRAY_SIZE(post_div_table_lpass_aon_cc_pll_out_odd),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "lpass_aon_cc_pll_out_odd",
+ .parent_hws = (const struct clk_hw*[]){
+ &lpass_aon_cc_pll.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_lucid_ops,
+ },
+};
+
+static const struct parent_map lpass_audio_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_LPASS_AUDIO_CC_PLL_OUT_AUX, 3 },
+ { P_LPASS_AON_CC_PLL_OUT_ODD, 5 },
+ { P_LPASS_AUDIO_CC_PLL_OUT_AUX2_DIV_CLK_SRC, 6 },
+};
+
+static struct clk_regmap_div lpass_audio_cc_pll_out_aux2_div_clk_src;
+static struct clk_regmap_div lpass_audio_cc_pll_out_main_div_clk_src;
+
+static const struct clk_parent_data lpass_audio_cc_parent_data_0[] = {
+ { .index = 0 },
+ { .hw = &lpass_audio_cc_pll.clkr.hw },
+ { .hw = &lpass_aon_cc_pll_out_odd.clkr.hw },
+ { .hw = &lpass_audio_cc_pll_out_aux2_div_clk_src.clkr.hw },
+};
+
+static const struct parent_map lpass_aon_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_LPASS_AON_CC_PLL_OUT_EVEN, 4 },
+};
+
+static const struct clk_parent_data lpass_aon_cc_parent_data_0[] = {
+ { .index = 0 },
+ { .hw = &lpass_aon_cc_pll_out_even.clkr.hw },
+};
+
+static const struct parent_map lpass_aon_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_LPASS_AON_CC_PLL_OUT_ODD, 1 },
+ { P_LPASS_AUDIO_CC_PLL_MAIN_DIV_CLK, 6 },
+};
+
+static const struct clk_parent_data lpass_aon_cc_parent_data_1[] = {
+ { .index = 0 },
+ { .hw = &lpass_aon_cc_pll_out_odd.clkr.hw },
+ { .hw = &lpass_audio_cc_pll_out_main_div_clk_src.clkr.hw },
+};
+
+static const struct freq_tbl ftbl_lpass_aon_cc_main_rcg_clk_src[] = {
+ F(38400000, P_LPASS_AON_CC_PLL_OUT_EVEN, 8, 0, 0),
+ F(76800000, P_LPASS_AON_CC_PLL_OUT_EVEN, 4, 0, 0),
+ F(153600000, P_LPASS_AON_CC_PLL_OUT_EVEN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 lpass_aon_cc_main_rcg_clk_src = {
+ .cmd_rcgr = 0x1000,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = lpass_aon_cc_parent_map_0,
+ .freq_tbl = ftbl_lpass_aon_cc_main_rcg_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "lpass_aon_cc_main_rcg_clk_src",
+ .parent_data = lpass_aon_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(lpass_aon_cc_parent_data_0),
+ .flags = CLK_OPS_PARENT_ENABLE,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_lpass_aon_cc_tx_mclk_rcg_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(24576000, P_LPASS_AON_CC_PLL_OUT_ODD, 5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 lpass_aon_cc_tx_mclk_rcg_clk_src = {
+ .cmd_rcgr = 0x13004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = lpass_aon_cc_parent_map_1,
+ .freq_tbl = ftbl_lpass_aon_cc_tx_mclk_rcg_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "lpass_aon_cc_tx_mclk_rcg_clk_src",
+ .parent_data = lpass_aon_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(lpass_aon_cc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_regmap_div lpass_audio_cc_pll_out_aux2_div_clk_src = {
+ .reg = 0x48,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "lpass_audio_cc_pll_out_aux2_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &lpass_audio_cc_pll_out_aux2.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div lpass_audio_cc_pll_out_main_div_clk_src = {
+ .reg = 0x3c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "lpass_audio_cc_pll_out_main_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &lpass_audio_cc_pll.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div lpass_aon_cc_cdiv_tx_mclk_div_clk_src = {
+ .reg = 0x13010,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "lpass_aon_cc_cdiv_tx_mclk_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &lpass_aon_cc_tx_mclk_rcg_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div lpass_aon_cc_pll_out_main_cdiv_div_clk_src = {
+ .reg = 0x80,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "lpass_aon_cc_pll_out_main_cdiv_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &lpass_aon_cc_pll.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_lpass_audio_cc_ext_mclk0_clk_src[] = {
+ F(256000, P_LPASS_AON_CC_PLL_OUT_ODD, 15, 1, 32),
+ F(352800, P_LPASS_AUDIO_CC_PLL_OUT_AUX2_DIV_CLK_SRC, 10, 1, 32),
+ F(512000, P_LPASS_AON_CC_PLL_OUT_ODD, 15, 1, 16),
+ F(705600, P_LPASS_AUDIO_CC_PLL_OUT_AUX2_DIV_CLK_SRC, 10, 1, 16),
+ F(768000, P_LPASS_AON_CC_PLL_OUT_ODD, 10, 1, 16),
+ F(1024000, P_LPASS_AON_CC_PLL_OUT_ODD, 15, 1, 8),
+ F(1411200, P_LPASS_AUDIO_CC_PLL_OUT_AUX2_DIV_CLK_SRC, 10, 1, 8),
+ F(1536000, P_LPASS_AON_CC_PLL_OUT_ODD, 10, 1, 8),
+ F(2048000, P_LPASS_AON_CC_PLL_OUT_ODD, 15, 1, 4),
+ F(2822400, P_LPASS_AUDIO_CC_PLL_OUT_AUX2_DIV_CLK_SRC, 10, 1, 4),
+ F(3072000, P_LPASS_AON_CC_PLL_OUT_ODD, 10, 1, 4),
+ F(4096000, P_LPASS_AON_CC_PLL_OUT_ODD, 15, 1, 2),
+ F(5644800, P_LPASS_AUDIO_CC_PLL_OUT_AUX2_DIV_CLK_SRC, 10, 1, 2),
+ F(6144000, P_LPASS_AON_CC_PLL_OUT_ODD, 10, 1, 2),
+ F(8192000, P_LPASS_AON_CC_PLL_OUT_ODD, 15, 0, 0),
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(11289600, P_LPASS_AUDIO_CC_PLL_OUT_AUX2_DIV_CLK_SRC, 10, 0, 0),
+ F(12288000, P_LPASS_AON_CC_PLL_OUT_ODD, 10, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(22579200, P_LPASS_AUDIO_CC_PLL_OUT_AUX2_DIV_CLK_SRC, 5, 0, 0),
+ F(24576000, P_LPASS_AON_CC_PLL_OUT_ODD, 5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 lpass_audio_cc_ext_mclk0_clk_src = {
+ .cmd_rcgr = 0x20004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = lpass_audio_cc_parent_map_0,
+ .freq_tbl = ftbl_lpass_audio_cc_ext_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "lpass_audio_cc_ext_mclk0_clk_src",
+ .parent_data = lpass_audio_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(lpass_audio_cc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 lpass_audio_cc_ext_mclk1_clk_src = {
+ .cmd_rcgr = 0x21004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = lpass_audio_cc_parent_map_0,
+ .freq_tbl = ftbl_lpass_audio_cc_ext_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "lpass_audio_cc_ext_mclk1_clk_src",
+ .parent_data = lpass_audio_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(lpass_audio_cc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 lpass_audio_cc_rx_mclk_clk_src = {
+ .cmd_rcgr = 0x24004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = lpass_audio_cc_parent_map_0,
+ .freq_tbl = ftbl_lpass_audio_cc_ext_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "lpass_audio_cc_rx_mclk_clk_src",
+ .parent_data = lpass_audio_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(lpass_audio_cc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_regmap_div lpass_audio_cc_cdiv_rx_mclk_div_clk_src = {
+ .reg = 0x240d0,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "lpass_audio_cc_cdiv_rx_mclk_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &lpass_audio_cc_rx_mclk_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch lpass_aon_cc_audio_hm_h_clk;
+
+static struct clk_branch lpass_audio_cc_codec_mem0_clk = {
+ .halt_reg = 0x1e004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1e004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "lpass_audio_cc_codec_mem0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &lpass_aon_cc_audio_hm_h_clk.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch lpass_audio_cc_codec_mem1_clk = {
+ .halt_reg = 0x1e008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1e008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "lpass_audio_cc_codec_mem1_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &lpass_aon_cc_audio_hm_h_clk.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch lpass_audio_cc_codec_mem2_clk = {
+ .halt_reg = 0x1e00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1e00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "lpass_audio_cc_codec_mem2_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &lpass_aon_cc_audio_hm_h_clk.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch lpass_audio_cc_codec_mem_clk = {
+ .halt_reg = 0x1e000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1e000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "lpass_audio_cc_codec_mem_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &lpass_aon_cc_audio_hm_h_clk.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch lpass_audio_cc_ext_mclk0_clk = {
+ .halt_reg = 0x20018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x20018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "lpass_audio_cc_ext_mclk0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &lpass_audio_cc_ext_mclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch lpass_audio_cc_ext_mclk1_clk = {
+ .halt_reg = 0x21018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x21018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "lpass_audio_cc_ext_mclk1_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &lpass_audio_cc_ext_mclk1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch lpass_audio_cc_rx_mclk_2x_clk = {
+ .halt_reg = 0x240cc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x240cc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "lpass_audio_cc_rx_mclk_2x_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &lpass_audio_cc_rx_mclk_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch lpass_audio_cc_rx_mclk_clk = {
+ .halt_reg = 0x240d4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x240d4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "lpass_audio_cc_rx_mclk_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &lpass_audio_cc_cdiv_rx_mclk_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch lpass_aon_cc_audio_hm_h_clk = {
+ .halt_reg = 0x9014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "lpass_aon_cc_audio_hm_h_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &lpass_aon_cc_main_rcg_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch lpass_aon_cc_va_mem0_clk = {
+ .halt_reg = 0x9028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "lpass_aon_cc_va_mem0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &lpass_aon_cc_main_rcg_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch lpass_aon_cc_tx_mclk_2x_clk = {
+ .halt_reg = 0x1300c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1300c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "lpass_aon_cc_tx_mclk_2x_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &lpass_aon_cc_tx_mclk_rcg_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch lpass_aon_cc_tx_mclk_clk = {
+ .halt_reg = 0x13014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "lpass_aon_cc_tx_mclk_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &lpass_aon_cc_cdiv_tx_mclk_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc lpass_aon_cc_lpass_audio_hm_gdsc = {
+ .gdscr = 0x9090,
+ .pd = {
+ .name = "lpass_aon_cc_lpass_audio_hm_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = RETAIN_FF_ENABLE,
+};
+
+static struct clk_regmap *lpass_aon_cc_sc7280_clocks[] = {
+ [LPASS_AON_CC_AUDIO_HM_H_CLK] = &lpass_aon_cc_audio_hm_h_clk.clkr,
+ [LPASS_AON_CC_VA_MEM0_CLK] = &lpass_aon_cc_va_mem0_clk.clkr,
+ [LPASS_AON_CC_CDIV_TX_MCLK_DIV_CLK_SRC] = &lpass_aon_cc_cdiv_tx_mclk_div_clk_src.clkr,
+ [LPASS_AON_CC_MAIN_RCG_CLK_SRC] = &lpass_aon_cc_main_rcg_clk_src.clkr,
+ [LPASS_AON_CC_PLL] = &lpass_aon_cc_pll.clkr,
+ [LPASS_AON_CC_PLL_OUT_EVEN] = &lpass_aon_cc_pll_out_even.clkr,
+ [LPASS_AON_CC_PLL_OUT_MAIN_CDIV_DIV_CLK_SRC] =
+ &lpass_aon_cc_pll_out_main_cdiv_div_clk_src.clkr,
+ [LPASS_AON_CC_PLL_OUT_ODD] = &lpass_aon_cc_pll_out_odd.clkr,
+ [LPASS_AON_CC_TX_MCLK_2X_CLK] = &lpass_aon_cc_tx_mclk_2x_clk.clkr,
+ [LPASS_AON_CC_TX_MCLK_CLK] = &lpass_aon_cc_tx_mclk_clk.clkr,
+ [LPASS_AON_CC_TX_MCLK_RCG_CLK_SRC] = &lpass_aon_cc_tx_mclk_rcg_clk_src.clkr,
+};
+
+static struct gdsc *lpass_aon_cc_sc7280_gdscs[] = {
+ [LPASS_AON_CC_LPASS_AUDIO_HM_GDSC] = &lpass_aon_cc_lpass_audio_hm_gdsc,
+};
+
+static struct clk_regmap *lpass_audio_cc_sc7280_clocks[] = {
+ [LPASS_AUDIO_CC_CDIV_RX_MCLK_DIV_CLK_SRC] = &lpass_audio_cc_cdiv_rx_mclk_div_clk_src.clkr,
+ [LPASS_AUDIO_CC_CODEC_MEM0_CLK] = &lpass_audio_cc_codec_mem0_clk.clkr,
+ [LPASS_AUDIO_CC_CODEC_MEM1_CLK] = &lpass_audio_cc_codec_mem1_clk.clkr,
+ [LPASS_AUDIO_CC_CODEC_MEM2_CLK] = &lpass_audio_cc_codec_mem2_clk.clkr,
+ [LPASS_AUDIO_CC_CODEC_MEM_CLK] = &lpass_audio_cc_codec_mem_clk.clkr,
+ [LPASS_AUDIO_CC_EXT_MCLK0_CLK] = &lpass_audio_cc_ext_mclk0_clk.clkr,
+ [LPASS_AUDIO_CC_EXT_MCLK0_CLK_SRC] = &lpass_audio_cc_ext_mclk0_clk_src.clkr,
+ [LPASS_AUDIO_CC_EXT_MCLK1_CLK] = &lpass_audio_cc_ext_mclk1_clk.clkr,
+ [LPASS_AUDIO_CC_EXT_MCLK1_CLK_SRC] = &lpass_audio_cc_ext_mclk1_clk_src.clkr,
+ [LPASS_AUDIO_CC_PLL] = &lpass_audio_cc_pll.clkr,
+ [LPASS_AUDIO_CC_PLL_OUT_AUX2] = &lpass_audio_cc_pll_out_aux2.clkr,
+ [LPASS_AUDIO_CC_PLL_OUT_AUX2_DIV_CLK_SRC] = &lpass_audio_cc_pll_out_aux2_div_clk_src.clkr,
+ [LPASS_AUDIO_CC_PLL_OUT_MAIN_DIV_CLK_SRC] = &lpass_audio_cc_pll_out_main_div_clk_src.clkr,
+ [LPASS_AUDIO_CC_RX_MCLK_2X_CLK] = &lpass_audio_cc_rx_mclk_2x_clk.clkr,
+ [LPASS_AUDIO_CC_RX_MCLK_CLK] = &lpass_audio_cc_rx_mclk_clk.clkr,
+ [LPASS_AUDIO_CC_RX_MCLK_CLK_SRC] = &lpass_audio_cc_rx_mclk_clk_src.clkr,
+};
+
+static struct regmap_config lpass_audio_cc_sc7280_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc lpass_audio_cc_sc7280_desc = {
+ .config = &lpass_audio_cc_sc7280_regmap_config,
+ .clks = lpass_audio_cc_sc7280_clocks,
+ .num_clks = ARRAY_SIZE(lpass_audio_cc_sc7280_clocks),
+};
+
+static const struct of_device_id lpass_audio_cc_sc7280_match_table[] = {
+ { .compatible = "qcom,sc7280-lpassaudiocc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, lpass_audio_cc_sc7280_match_table);
+
+static void lpassaudio_pm_runtime_disable(void *data)
+{
+ pm_runtime_disable(data);
+}
+
+static void lpassaudio_pm_clk_destroy(void *data)
+{
+ pm_clk_destroy(data);
+}
+
+static int lpassaudio_create_pm_clks(struct platform_device *pdev)
+{
+ int ret;
+
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
+ pm_runtime_enable(&pdev->dev);
+
+ ret = devm_add_action_or_reset(&pdev->dev, lpassaudio_pm_runtime_disable, &pdev->dev);
+ if (ret)
+ return ret;
+
+ ret = pm_clk_create(&pdev->dev);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(&pdev->dev, lpassaudio_pm_clk_destroy, &pdev->dev);
+ if (ret)
+ return ret;
+
+ ret = pm_clk_add(&pdev->dev, "iface");
+ if (ret < 0)
+ dev_err(&pdev->dev, "failed to acquire iface clock\n");
+
+ return ret;
+}
+
+static int lpass_audio_cc_sc7280_probe(struct platform_device *pdev)
+{
+ const struct qcom_cc_desc *desc;
+ struct regmap *regmap;
+ int ret;
+
+ ret = lpassaudio_create_pm_clks(pdev);
+ if (ret)
+ return ret;
+
+ lpass_audio_cc_sc7280_regmap_config.name = "lpassaudio_cc";
+ lpass_audio_cc_sc7280_regmap_config.max_register = 0x2f000;
+ desc = &lpass_audio_cc_sc7280_desc;
+
+ regmap = qcom_cc_map(pdev, desc);
+ if (IS_ERR(regmap)) {
+ pm_runtime_disable(&pdev->dev);
+ return PTR_ERR(regmap);
+ }
+
+ clk_zonda_pll_configure(&lpass_audio_cc_pll, regmap, &lpass_audio_cc_pll_config);
+
+ /* PLL settings */
+ regmap_write(regmap, 0x4, 0x3b);
+ regmap_write(regmap, 0x8, 0xff05);
+
+ ret = qcom_cc_really_probe(pdev, &lpass_audio_cc_sc7280_desc, regmap);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register LPASS AUDIO CC clocks\n");
+ pm_runtime_disable(&pdev->dev);
+ return ret;
+ }
+
+ pm_runtime_mark_last_busy(&pdev->dev);
+ pm_runtime_put_autosuspend(&pdev->dev);
+ pm_runtime_put_sync(&pdev->dev);
+
+ return ret;
+}
+
+static const struct dev_pm_ops lpass_audio_cc_pm_ops = {
+ SET_RUNTIME_PM_OPS(pm_clk_suspend, pm_clk_resume, NULL)
+};
+
+static struct platform_driver lpass_audio_cc_sc7280_driver = {
+ .probe = lpass_audio_cc_sc7280_probe,
+ .driver = {
+ .name = "lpass_audio_cc-sc7280",
+ .of_match_table = lpass_audio_cc_sc7280_match_table,
+ .pm = &lpass_audio_cc_pm_ops,
+ },
+};
+
+static const struct qcom_cc_desc lpass_aon_cc_sc7280_desc = {
+ .config = &lpass_audio_cc_sc7280_regmap_config,
+ .clks = lpass_aon_cc_sc7280_clocks,
+ .num_clks = ARRAY_SIZE(lpass_aon_cc_sc7280_clocks),
+ .gdscs = lpass_aon_cc_sc7280_gdscs,
+ .num_gdscs = ARRAY_SIZE(lpass_aon_cc_sc7280_gdscs),
+};
+
+static const struct of_device_id lpass_aon_cc_sc7280_match_table[] = {
+ { .compatible = "qcom,sc7280-lpassaoncc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, lpass_aon_cc_sc7280_match_table);
+
+static int lpass_aon_cc_sc7280_probe(struct platform_device *pdev)
+{
+ const struct qcom_cc_desc *desc;
+ struct regmap *regmap;
+ int ret;
+
+ ret = lpassaudio_create_pm_clks(pdev);
+ if (ret)
+ return ret;
+
+ lpass_audio_cc_sc7280_regmap_config.name = "lpasscc_aon";
+ lpass_audio_cc_sc7280_regmap_config.max_register = 0xa0008;
+ desc = &lpass_aon_cc_sc7280_desc;
+
+ regmap = qcom_cc_map(pdev, desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ clk_lucid_pll_configure(&lpass_aon_cc_pll, regmap, &lpass_aon_cc_pll_config);
+
+ ret = qcom_cc_really_probe(pdev, &lpass_aon_cc_sc7280_desc, regmap);
+ if (ret)
+ dev_err(&pdev->dev, "Failed to register LPASS AON CC clocks\n");
+
+ pm_runtime_mark_last_busy(&pdev->dev);
+ pm_runtime_put_autosuspend(&pdev->dev);
+ pm_runtime_put_sync(&pdev->dev);
+
+ return ret;
+}
+
+static struct platform_driver lpass_aon_cc_sc7280_driver = {
+ .probe = lpass_aon_cc_sc7280_probe,
+ .driver = {
+ .name = "lpass_aon_cc-sc7280",
+ .of_match_table = lpass_aon_cc_sc7280_match_table,
+ .pm = &lpass_audio_cc_pm_ops,
+ },
+};
+
+static int __init lpass_audio_cc_sc7280_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&lpass_aon_cc_sc7280_driver);
+ if (ret)
+ return ret;
+
+ return platform_driver_register(&lpass_audio_cc_sc7280_driver);
+}
+subsys_initcall(lpass_audio_cc_sc7280_init);
+
+static void __exit lpass_audio_cc_sc7280_exit(void)
+{
+ platform_driver_unregister(&lpass_audio_cc_sc7280_driver);
+ platform_driver_unregister(&lpass_aon_cc_sc7280_driver);
+}
+module_exit(lpass_audio_cc_sc7280_exit);
+
+MODULE_DESCRIPTION("QTI LPASS_AUDIO_CC SC7280 Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/lpasscorecc-sc7280.c b/drivers/clk/qcom/lpasscorecc-sc7280.c
new file mode 100644
index 000000000000..1f1f1bd1b68e
--- /dev/null
+++ b/drivers/clk/qcom/lpasscorecc-sc7280.c
@@ -0,0 +1,431 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/pm_clock.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,lpasscorecc-sc7280.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "common.h"
+#include "gdsc.h"
+
+enum {
+ P_BI_TCXO,
+ P_LPASS_CORE_CC_DIG_PLL_OUT_MAIN,
+ P_LPASS_CORE_CC_DIG_PLL_OUT_MAIN_DIV_CLK_SRC,
+ P_LPASS_CORE_CC_DIG_PLL_OUT_ODD,
+};
+
+static const struct pll_vco lucid_vco[] = {
+ { 249600000, 2000000000, 0 },
+};
+
+/* 614.4MHz configuration */
+static const struct alpha_pll_config lpass_core_cc_dig_pll_config = {
+ .l = 0x20,
+ .alpha = 0x0,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002261,
+ .config_ctl_hi1_val = 0xB2923BBC,
+ .user_ctl_val = 0x00005100,
+ .user_ctl_hi_val = 0x00050805,
+ .user_ctl_hi1_val = 0x00000000,
+};
+
+static struct clk_alpha_pll lpass_core_cc_dig_pll = {
+ .offset = 0x1000,
+ .vco_table = lucid_vco,
+ .num_vco = ARRAY_SIZE(lucid_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "lpass_core_cc_dig_pll",
+ .parent_data = &(const struct clk_parent_data){
+ .index = 0,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_lpass_core_cc_dig_pll_out_odd[] = {
+ { 0x5, 5 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv lpass_core_cc_dig_pll_out_odd = {
+ .offset = 0x1000,
+ .post_div_shift = 12,
+ .post_div_table = post_div_table_lpass_core_cc_dig_pll_out_odd,
+ .num_post_div = ARRAY_SIZE(post_div_table_lpass_core_cc_dig_pll_out_odd),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "lpass_core_cc_dig_pll_out_odd",
+ .parent_hws = (const struct clk_hw*[]){
+ &lpass_core_cc_dig_pll.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ops,
+ },
+};
+
+static struct clk_regmap_div lpass_core_cc_dig_pll_out_main_div_clk_src = {
+ .reg = 0x1054,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "lpass_core_cc_dig_pll_out_main_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &lpass_core_cc_dig_pll.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+
+static const struct parent_map lpass_core_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_LPASS_CORE_CC_DIG_PLL_OUT_ODD, 5 },
+};
+
+static const struct clk_parent_data lpass_core_cc_parent_data_0[] = {
+ { .index = 0 },
+ { .hw = &lpass_core_cc_dig_pll_out_odd.clkr.hw },
+};
+
+static const struct parent_map lpass_core_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_LPASS_CORE_CC_DIG_PLL_OUT_MAIN, 1 },
+ { P_LPASS_CORE_CC_DIG_PLL_OUT_MAIN_DIV_CLK_SRC, 2 },
+};
+
+static const struct clk_parent_data lpass_core_cc_parent_data_ao_2[] = {
+ { .index = 1 },
+ { .hw = &lpass_core_cc_dig_pll.clkr.hw },
+ { .hw = &lpass_core_cc_dig_pll_out_main_div_clk_src.clkr.hw },
+};
+
+static const struct freq_tbl ftbl_lpass_core_cc_core_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(51200000, P_LPASS_CORE_CC_DIG_PLL_OUT_MAIN_DIV_CLK_SRC, 6, 0, 0),
+ F(102400000, P_LPASS_CORE_CC_DIG_PLL_OUT_MAIN_DIV_CLK_SRC, 3, 0, 0),
+ F(204800000, P_LPASS_CORE_CC_DIG_PLL_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 lpass_core_cc_core_clk_src = {
+ .cmd_rcgr = 0x1d000,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = lpass_core_cc_parent_map_2,
+ .freq_tbl = ftbl_lpass_core_cc_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "lpass_core_cc_core_clk_src",
+ .parent_data = lpass_core_cc_parent_data_ao_2,
+ .num_parents = ARRAY_SIZE(lpass_core_cc_parent_data_ao_2),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_lpass_core_cc_ext_if0_clk_src[] = {
+ F(256000, P_LPASS_CORE_CC_DIG_PLL_OUT_ODD, 15, 1, 32),
+ F(512000, P_LPASS_CORE_CC_DIG_PLL_OUT_ODD, 15, 1, 16),
+ F(768000, P_LPASS_CORE_CC_DIG_PLL_OUT_ODD, 10, 1, 16),
+ F(1024000, P_LPASS_CORE_CC_DIG_PLL_OUT_ODD, 15, 1, 8),
+ F(1536000, P_LPASS_CORE_CC_DIG_PLL_OUT_ODD, 10, 1, 8),
+ F(2048000, P_LPASS_CORE_CC_DIG_PLL_OUT_ODD, 15, 1, 4),
+ F(3072000, P_LPASS_CORE_CC_DIG_PLL_OUT_ODD, 10, 1, 4),
+ F(4096000, P_LPASS_CORE_CC_DIG_PLL_OUT_ODD, 15, 1, 2),
+ F(6144000, P_LPASS_CORE_CC_DIG_PLL_OUT_ODD, 10, 1, 2),
+ F(8192000, P_LPASS_CORE_CC_DIG_PLL_OUT_ODD, 15, 0, 0),
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(12288000, P_LPASS_CORE_CC_DIG_PLL_OUT_ODD, 10, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(24576000, P_LPASS_CORE_CC_DIG_PLL_OUT_ODD, 5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 lpass_core_cc_ext_if0_clk_src = {
+ .cmd_rcgr = 0x10000,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = lpass_core_cc_parent_map_0,
+ .freq_tbl = ftbl_lpass_core_cc_ext_if0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "lpass_core_cc_ext_if0_clk_src",
+ .parent_data = lpass_core_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(lpass_core_cc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 lpass_core_cc_ext_if1_clk_src = {
+ .cmd_rcgr = 0x11000,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = lpass_core_cc_parent_map_0,
+ .freq_tbl = ftbl_lpass_core_cc_ext_if0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "lpass_core_cc_ext_if1_clk_src",
+ .parent_data = lpass_core_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(lpass_core_cc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+
+static struct clk_branch lpass_core_cc_core_clk = {
+ .halt_reg = 0x1f000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1f000,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x1f000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "lpass_core_cc_core_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &lpass_core_cc_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch lpass_core_cc_ext_if0_ibit_clk = {
+ .halt_reg = 0x10018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "lpass_core_cc_ext_if0_ibit_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &lpass_core_cc_ext_if0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch lpass_core_cc_ext_if1_ibit_clk = {
+ .halt_reg = 0x11018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x11018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "lpass_core_cc_ext_if1_ibit_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &lpass_core_cc_ext_if1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch lpass_core_cc_lpm_core_clk = {
+ .halt_reg = 0x1e000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1e000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "lpass_core_cc_lpm_core_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &lpass_core_cc_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch lpass_core_cc_lpm_mem0_core_clk = {
+ .halt_reg = 0x1e004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1e004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "lpass_core_cc_lpm_mem0_core_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &lpass_core_cc_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch lpass_core_cc_sysnoc_mport_core_clk = {
+ .halt_reg = 0x23000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x23000,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x23000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "lpass_core_cc_sysnoc_mport_core_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &lpass_core_cc_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc lpass_core_cc_lpass_core_hm_gdsc = {
+ .gdscr = 0x0,
+ .pd = {
+ .name = "lpass_core_cc_lpass_core_hm_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = RETAIN_FF_ENABLE,
+};
+
+static struct clk_regmap *lpass_core_cc_sc7280_clocks[] = {
+ [LPASS_CORE_CC_CORE_CLK] = &lpass_core_cc_core_clk.clkr,
+ [LPASS_CORE_CC_CORE_CLK_SRC] = &lpass_core_cc_core_clk_src.clkr,
+ [LPASS_CORE_CC_DIG_PLL] = &lpass_core_cc_dig_pll.clkr,
+ [LPASS_CORE_CC_DIG_PLL_OUT_MAIN_DIV_CLK_SRC] =
+ &lpass_core_cc_dig_pll_out_main_div_clk_src.clkr,
+ [LPASS_CORE_CC_DIG_PLL_OUT_ODD] = &lpass_core_cc_dig_pll_out_odd.clkr,
+ [LPASS_CORE_CC_EXT_IF0_CLK_SRC] = &lpass_core_cc_ext_if0_clk_src.clkr,
+ [LPASS_CORE_CC_EXT_IF0_IBIT_CLK] = &lpass_core_cc_ext_if0_ibit_clk.clkr,
+ [LPASS_CORE_CC_EXT_IF1_CLK_SRC] = &lpass_core_cc_ext_if1_clk_src.clkr,
+ [LPASS_CORE_CC_EXT_IF1_IBIT_CLK] = &lpass_core_cc_ext_if1_ibit_clk.clkr,
+ [LPASS_CORE_CC_LPM_CORE_CLK] = &lpass_core_cc_lpm_core_clk.clkr,
+ [LPASS_CORE_CC_LPM_MEM0_CORE_CLK] = &lpass_core_cc_lpm_mem0_core_clk.clkr,
+ [LPASS_CORE_CC_SYSNOC_MPORT_CORE_CLK] = &lpass_core_cc_sysnoc_mport_core_clk.clkr,
+};
+
+static struct regmap_config lpass_core_cc_sc7280_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc lpass_core_cc_sc7280_desc = {
+ .config = &lpass_core_cc_sc7280_regmap_config,
+ .clks = lpass_core_cc_sc7280_clocks,
+ .num_clks = ARRAY_SIZE(lpass_core_cc_sc7280_clocks),
+};
+
+static const struct of_device_id lpass_core_cc_sc7280_match_table[] = {
+ { .compatible = "qcom,sc7280-lpasscorecc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, lpass_core_cc_sc7280_match_table);
+
+static struct gdsc *lpass_core_hm_sc7280_gdscs[] = {
+ [LPASS_CORE_CC_LPASS_CORE_HM_GDSC] = &lpass_core_cc_lpass_core_hm_gdsc,
+};
+
+static const struct qcom_cc_desc lpass_core_hm_sc7280_desc = {
+ .config = &lpass_core_cc_sc7280_regmap_config,
+ .gdscs = lpass_core_hm_sc7280_gdscs,
+ .num_gdscs = ARRAY_SIZE(lpass_core_hm_sc7280_gdscs),
+};
+
+static int lpass_core_cc_sc7280_probe(struct platform_device *pdev)
+{
+ const struct qcom_cc_desc *desc;
+ struct regmap *regmap;
+
+ lpass_core_cc_sc7280_regmap_config.name = "lpass_core_cc";
+ lpass_core_cc_sc7280_regmap_config.max_register = 0x4f004;
+ desc = &lpass_core_cc_sc7280_desc;
+
+ regmap = qcom_cc_map(pdev, desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ clk_lucid_pll_configure(&lpass_core_cc_dig_pll, regmap, &lpass_core_cc_dig_pll_config);
+
+ return qcom_cc_really_probe(pdev, &lpass_core_cc_sc7280_desc, regmap);
+}
+
+static struct platform_driver lpass_core_cc_sc7280_driver = {
+ .probe = lpass_core_cc_sc7280_probe,
+ .driver = {
+ .name = "lpass_core_cc-sc7280",
+ .of_match_table = lpass_core_cc_sc7280_match_table,
+ },
+};
+
+static int lpass_hm_core_probe(struct platform_device *pdev)
+{
+ const struct qcom_cc_desc *desc;
+
+ lpass_core_cc_sc7280_regmap_config.name = "lpass_hm_core";
+ lpass_core_cc_sc7280_regmap_config.max_register = 0x24;
+ desc = &lpass_core_hm_sc7280_desc;
+
+ return qcom_cc_probe_by_index(pdev, 0, desc);
+}
+
+static const struct of_device_id lpass_hm_sc7280_match_table[] = {
+ { .compatible = "qcom,sc7280-lpasshm" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, lpass_hm_sc7280_match_table);
+
+static struct platform_driver lpass_hm_sc7280_driver = {
+ .probe = lpass_hm_core_probe,
+ .driver = {
+ .name = "lpass_hm-sc7280",
+ .of_match_table = lpass_hm_sc7280_match_table,
+ },
+};
+
+static int __init lpass_core_cc_sc7280_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&lpass_hm_sc7280_driver);
+ if (ret)
+ return ret;
+
+ return platform_driver_register(&lpass_core_cc_sc7280_driver);
+}
+subsys_initcall(lpass_core_cc_sc7280_init);
+
+static void __exit lpass_core_cc_sc7280_exit(void)
+{
+ platform_driver_unregister(&lpass_core_cc_sc7280_driver);
+ platform_driver_unregister(&lpass_hm_sc7280_driver);
+}
+module_exit(lpass_core_cc_sc7280_exit);
+
+MODULE_DESCRIPTION("QTI LPASS_CORE_CC SC7280 Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/renesas/Kconfig b/drivers/clk/renesas/Kconfig
index c281f3af5716..cacaf9b87d26 100644
--- a/drivers/clk/renesas/Kconfig
+++ b/drivers/clk/renesas/Kconfig
@@ -32,9 +32,12 @@ config CLK_RENESAS
select CLK_R8A77995 if ARCH_R8A77995
select CLK_R8A779A0 if ARCH_R8A779A0
select CLK_R8A779F0 if ARCH_R8A779F0
+ select CLK_R8A779G0 if ARCH_R8A779G0
select CLK_R9A06G032 if ARCH_R9A06G032
+ select CLK_R9A07G043 if ARCH_R9A07G043
select CLK_R9A07G044 if ARCH_R9A07G044
select CLK_R9A07G054 if ARCH_R9A07G054
+ select CLK_R9A09G011 if ARCH_R9A09G011
select CLK_SH73A0 if ARCH_SH73A0
if CLK_RENESAS
@@ -157,9 +160,17 @@ config CLK_R8A779F0
bool "R-Car S4-8 clock support" if COMPILE_TEST
select CLK_RCAR_GEN4_CPG
+config CLK_R8A779G0
+ bool "R-Car V4H clock support" if COMPILE_TEST
+ select CLK_RCAR_GEN4_CPG
+
config CLK_R9A06G032
bool "RZ/N1D clock support" if COMPILE_TEST
+config CLK_R9A07G043
+ bool "RZ/G2UL clock support" if COMPILE_TEST
+ select CLK_RZG2L
+
config CLK_R9A07G044
bool "RZ/G2L clock support" if COMPILE_TEST
select CLK_RZG2L
@@ -168,6 +179,10 @@ config CLK_R9A07G054
bool "RZ/V2L clock support" if COMPILE_TEST
select CLK_RZG2L
+config CLK_R9A09G011
+ bool "RZ/V2M clock support" if COMPILE_TEST
+ select CLK_RZG2L
+
config CLK_SH73A0
bool "SH-Mobile AG5 clock support" if COMPILE_TEST
select CLK_RENESAS_CPG_MSTP
@@ -200,7 +215,7 @@ config CLK_RCAR_USB2_CLOCK_SEL
This is a driver for R-Car USB2 clock selector
config CLK_RZG2L
- bool "Renesas RZ/{G2L,V2L} family clock support" if COMPILE_TEST
+ bool "Renesas RZ/{G2L,G2UL,V2L} family clock support" if COMPILE_TEST
select RESET_CONTROLLER
# Generic
diff --git a/drivers/clk/renesas/Makefile b/drivers/clk/renesas/Makefile
index d5e571699a30..de907623fe3f 100644
--- a/drivers/clk/renesas/Makefile
+++ b/drivers/clk/renesas/Makefile
@@ -29,9 +29,12 @@ obj-$(CONFIG_CLK_R8A77990) += r8a77990-cpg-mssr.o
obj-$(CONFIG_CLK_R8A77995) += r8a77995-cpg-mssr.o
obj-$(CONFIG_CLK_R8A779A0) += r8a779a0-cpg-mssr.o
obj-$(CONFIG_CLK_R8A779F0) += r8a779f0-cpg-mssr.o
+obj-$(CONFIG_CLK_R8A779G0) += r8a779g0-cpg-mssr.o
obj-$(CONFIG_CLK_R9A06G032) += r9a06g032-clocks.o
+obj-$(CONFIG_CLK_R9A07G043) += r9a07g043-cpg.o
obj-$(CONFIG_CLK_R9A07G044) += r9a07g044-cpg.o
obj-$(CONFIG_CLK_R9A07G054) += r9a07g044-cpg.o
+obj-$(CONFIG_CLK_R9A09G011) += r9a09g011-cpg.o
obj-$(CONFIG_CLK_SH73A0) += clk-sh73a0.o
# Family
diff --git a/drivers/clk/renesas/r8a774a1-cpg-mssr.c b/drivers/clk/renesas/r8a774a1-cpg-mssr.c
index 95dd56b64d64..ad03c09ebc1f 100644
--- a/drivers/clk/renesas/r8a774a1-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a774a1-cpg-mssr.c
@@ -68,12 +68,8 @@ static const struct cpg_core_clk r8a774a1_core_clks[] __initconst = {
DEF_FIXED(".s2", CLK_S2, CLK_PLL1_DIV2, 4, 1),
DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 6, 1),
DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1_DIV2, 2, 1),
- DEF_BASE(".rpcsrc", CLK_RPCSRC, CLK_TYPE_GEN3_RPCSRC, CLK_PLL1),
- DEF_BASE("rpc", R8A774A1_CLK_RPC, CLK_TYPE_GEN3_RPC,
- CLK_RPCSRC),
- DEF_BASE("rpcd2", R8A774A1_CLK_RPCD2, CLK_TYPE_GEN3_RPCD2,
- R8A774A1_CLK_RPC),
+ DEF_BASE(".rpcsrc", CLK_RPCSRC, CLK_TYPE_GEN3_RPCSRC, CLK_PLL1),
DEF_GEN3_OSC(".r", CLK_RINT, CLK_EXTAL, 32),
@@ -109,6 +105,9 @@ static const struct cpg_core_clk r8a774a1_core_clks[] __initconst = {
DEF_GEN3_SD("sd2", R8A774A1_CLK_SD2, R8A774A1_CLK_SD2H, 0x268),
DEF_GEN3_SD("sd3", R8A774A1_CLK_SD3, R8A774A1_CLK_SD3H, 0x26c),
+ DEF_BASE("rpc", R8A774A1_CLK_RPC, CLK_TYPE_GEN3_RPC, CLK_RPCSRC),
+ DEF_BASE("rpcd2", R8A774A1_CLK_RPCD2, CLK_TYPE_GEN3_RPCD2, R8A774A1_CLK_RPC),
+
DEF_FIXED("cl", R8A774A1_CLK_CL, CLK_PLL1_DIV2, 48, 1),
DEF_FIXED("cp", R8A774A1_CLK_CP, CLK_EXTAL, 2, 1),
DEF_FIXED("cpex", R8A774A1_CLK_CPEX, CLK_EXTAL, 2, 1),
diff --git a/drivers/clk/renesas/r8a774b1-cpg-mssr.c b/drivers/clk/renesas/r8a774b1-cpg-mssr.c
index 56061b9b8437..ab087b02ef90 100644
--- a/drivers/clk/renesas/r8a774b1-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a774b1-cpg-mssr.c
@@ -66,12 +66,8 @@ static const struct cpg_core_clk r8a774b1_core_clks[] __initconst = {
DEF_FIXED(".s2", CLK_S2, CLK_PLL1_DIV2, 4, 1),
DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 6, 1),
DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1_DIV2, 2, 1),
- DEF_BASE(".rpcsrc", CLK_RPCSRC, CLK_TYPE_GEN3_RPCSRC, CLK_PLL1),
- DEF_BASE("rpc", R8A774B1_CLK_RPC, CLK_TYPE_GEN3_RPC,
- CLK_RPCSRC),
- DEF_BASE("rpcd2", R8A774B1_CLK_RPCD2, CLK_TYPE_GEN3_RPCD2,
- R8A774B1_CLK_RPC),
+ DEF_BASE(".rpcsrc", CLK_RPCSRC, CLK_TYPE_GEN3_RPCSRC, CLK_PLL1),
DEF_GEN3_OSC(".r", CLK_RINT, CLK_EXTAL, 32),
@@ -106,6 +102,9 @@ static const struct cpg_core_clk r8a774b1_core_clks[] __initconst = {
DEF_GEN3_SD("sd2", R8A774B1_CLK_SD2, R8A774B1_CLK_SD2H, 0x268),
DEF_GEN3_SD("sd3", R8A774B1_CLK_SD3, R8A774B1_CLK_SD3H, 0x26c),
+ DEF_BASE("rpc", R8A774B1_CLK_RPC, CLK_TYPE_GEN3_RPC, CLK_RPCSRC),
+ DEF_BASE("rpcd2", R8A774B1_CLK_RPCD2, CLK_TYPE_GEN3_RPCD2, R8A774B1_CLK_RPC),
+
DEF_FIXED("cl", R8A774B1_CLK_CL, CLK_PLL1_DIV2, 48, 1),
DEF_FIXED("cp", R8A774B1_CLK_CP, CLK_EXTAL, 2, 1),
DEF_FIXED("cpex", R8A774B1_CLK_CPEX, CLK_EXTAL, 2, 1),
diff --git a/drivers/clk/renesas/r8a774c0-cpg-mssr.c b/drivers/clk/renesas/r8a774c0-cpg-mssr.c
index b5eb5dc45d62..c9c8fde0f0a6 100644
--- a/drivers/clk/renesas/r8a774c0-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a774c0-cpg-mssr.c
@@ -77,11 +77,6 @@ static const struct cpg_core_clk r8a774c0_core_clks[] __initconst = {
DEF_FIXED_RPCSRC_E3(".rpcsrc", CLK_RPCSRC, CLK_PLL0, CLK_PLL1),
- DEF_BASE("rpc", R8A774C0_CLK_RPC, CLK_TYPE_GEN3_RPC,
- CLK_RPCSRC),
- DEF_BASE("rpcd2", R8A774C0_CLK_RPCD2, CLK_TYPE_GEN3_RPCD2,
- R8A774C0_CLK_RPC),
-
DEF_DIV6_RO(".r", CLK_RINT, CLK_EXTAL, CPG_RCKCR, 32),
DEF_RATE(".oco", CLK_OCO, 8 * 1000 * 1000),
@@ -108,6 +103,9 @@ static const struct cpg_core_clk r8a774c0_core_clks[] __initconst = {
DEF_FIXED("s3d2", R8A774C0_CLK_S3D2, CLK_S3, 2, 1),
DEF_FIXED("s3d4", R8A774C0_CLK_S3D4, CLK_S3, 4, 1),
+ DEF_BASE("rpc", R8A774C0_CLK_RPC, CLK_TYPE_GEN3_RPC, CLK_RPCSRC),
+ DEF_BASE("rpcd2", R8A774C0_CLK_RPCD2, CLK_TYPE_GEN3_RPCD2, R8A774C0_CLK_RPC),
+
DEF_GEN3_SDH("sd0h", R8A774C0_CLK_SD0H, CLK_SDSRC, 0x0074),
DEF_GEN3_SDH("sd1h", R8A774C0_CLK_SD1H, CLK_SDSRC, 0x0078),
DEF_GEN3_SDH("sd3h", R8A774C0_CLK_SD3H, CLK_SDSRC, 0x026c),
diff --git a/drivers/clk/renesas/r8a774e1-cpg-mssr.c b/drivers/clk/renesas/r8a774e1-cpg-mssr.c
index 2950f0db90ae..a790061db877 100644
--- a/drivers/clk/renesas/r8a774e1-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a774e1-cpg-mssr.c
@@ -68,12 +68,8 @@ static const struct cpg_core_clk r8a774e1_core_clks[] __initconst = {
DEF_FIXED(".s2", CLK_S2, CLK_PLL1_DIV2, 4, 1),
DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 6, 1),
DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1_DIV2, 2, 1),
- DEF_BASE(".rpcsrc", CLK_RPCSRC, CLK_TYPE_GEN3_RPCSRC, CLK_PLL1),
- DEF_BASE("rpc", R8A774E1_CLK_RPC, CLK_TYPE_GEN3_RPC,
- CLK_RPCSRC),
- DEF_BASE("rpcd2", R8A774E1_CLK_RPCD2, CLK_TYPE_GEN3_RPCD2,
- R8A774E1_CLK_RPC),
+ DEF_BASE(".rpcsrc", CLK_RPCSRC, CLK_TYPE_GEN3_RPCSRC, CLK_PLL1),
DEF_GEN3_OSC(".r", CLK_RINT, CLK_EXTAL, 32),
@@ -109,6 +105,9 @@ static const struct cpg_core_clk r8a774e1_core_clks[] __initconst = {
DEF_GEN3_SD("sd2", R8A774E1_CLK_SD2, R8A774E1_CLK_SD2H, 0x268),
DEF_GEN3_SD("sd3", R8A774E1_CLK_SD3, R8A774E1_CLK_SD3H, 0x26c),
+ DEF_BASE("rpc", R8A774E1_CLK_RPC, CLK_TYPE_GEN3_RPC, CLK_RPCSRC),
+ DEF_BASE("rpcd2", R8A774E1_CLK_RPCD2, CLK_TYPE_GEN3_RPCD2, R8A774E1_CLK_RPC),
+
DEF_FIXED("cl", R8A774E1_CLK_CL, CLK_PLL1_DIV2, 48, 1),
DEF_FIXED("cr", R8A774E1_CLK_CR, CLK_PLL1_DIV4, 2, 1),
DEF_FIXED("cp", R8A774E1_CLK_CP, CLK_EXTAL, 2, 1),
diff --git a/drivers/clk/renesas/r8a7795-cpg-mssr.c b/drivers/clk/renesas/r8a7795-cpg-mssr.c
index 991a44315d71..301475c74f50 100644
--- a/drivers/clk/renesas/r8a7795-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7795-cpg-mssr.c
@@ -71,12 +71,8 @@ static struct cpg_core_clk r8a7795_core_clks[] __initdata = {
DEF_FIXED(".s2", CLK_S2, CLK_PLL1_DIV2, 4, 1),
DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 6, 1),
DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1_DIV2, 2, 1),
- DEF_BASE(".rpcsrc", CLK_RPCSRC, CLK_TYPE_GEN3_RPCSRC, CLK_PLL1),
- DEF_BASE("rpc", R8A7795_CLK_RPC, CLK_TYPE_GEN3_RPC,
- CLK_RPCSRC),
- DEF_BASE("rpcd2", R8A7795_CLK_RPCD2, CLK_TYPE_GEN3_RPCD2,
- R8A7795_CLK_RPC),
+ DEF_BASE(".rpcsrc", CLK_RPCSRC, CLK_TYPE_GEN3_RPCSRC, CLK_PLL1),
DEF_GEN3_OSC(".r", CLK_RINT, CLK_EXTAL, 32),
@@ -113,6 +109,9 @@ static struct cpg_core_clk r8a7795_core_clks[] __initdata = {
DEF_GEN3_SD("sd2", R8A7795_CLK_SD2, R8A7795_CLK_SD2H, 0x268),
DEF_GEN3_SD("sd3", R8A7795_CLK_SD3, R8A7795_CLK_SD3H, 0x26c),
+ DEF_BASE("rpc", R8A7795_CLK_RPC, CLK_TYPE_GEN3_RPC, CLK_RPCSRC),
+ DEF_BASE("rpcd2", R8A7795_CLK_RPCD2, CLK_TYPE_GEN3_RPCD2, R8A7795_CLK_RPC),
+
DEF_FIXED("cl", R8A7795_CLK_CL, CLK_PLL1_DIV2, 48, 1),
DEF_FIXED("cr", R8A7795_CLK_CR, CLK_PLL1_DIV4, 2, 1),
DEF_FIXED("cp", R8A7795_CLK_CP, CLK_EXTAL, 2, 1),
diff --git a/drivers/clk/renesas/r8a7796-cpg-mssr.c b/drivers/clk/renesas/r8a7796-cpg-mssr.c
index 7950313611ef..c4969318508e 100644
--- a/drivers/clk/renesas/r8a7796-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7796-cpg-mssr.c
@@ -73,12 +73,8 @@ static const struct cpg_core_clk r8a7796_core_clks[] __initconst = {
DEF_FIXED(".s2", CLK_S2, CLK_PLL1_DIV2, 4, 1),
DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 6, 1),
DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1_DIV2, 2, 1),
- DEF_BASE(".rpcsrc", CLK_RPCSRC, CLK_TYPE_GEN3_RPCSRC, CLK_PLL1),
- DEF_BASE("rpc", R8A7796_CLK_RPC, CLK_TYPE_GEN3_RPC,
- CLK_RPCSRC),
- DEF_BASE("rpcd2", R8A7796_CLK_RPCD2, CLK_TYPE_GEN3_RPCD2,
- R8A7796_CLK_RPC),
+ DEF_BASE(".rpcsrc", CLK_RPCSRC, CLK_TYPE_GEN3_RPCSRC, CLK_PLL1),
DEF_GEN3_OSC(".r", CLK_RINT, CLK_EXTAL, 32),
@@ -115,6 +111,9 @@ static const struct cpg_core_clk r8a7796_core_clks[] __initconst = {
DEF_GEN3_SD("sd2", R8A7796_CLK_SD2, R8A7796_CLK_SD2H, 0x268),
DEF_GEN3_SD("sd3", R8A7796_CLK_SD3, R8A7796_CLK_SD3H, 0x26c),
+ DEF_BASE("rpc", R8A7796_CLK_RPC, CLK_TYPE_GEN3_RPC, CLK_RPCSRC),
+ DEF_BASE("rpcd2", R8A7796_CLK_RPCD2, CLK_TYPE_GEN3_RPCD2, R8A7796_CLK_RPC),
+
DEF_FIXED("cl", R8A7796_CLK_CL, CLK_PLL1_DIV2, 48, 1),
DEF_FIXED("cr", R8A7796_CLK_CR, CLK_PLL1_DIV4, 2, 1),
DEF_FIXED("cp", R8A7796_CLK_CP, CLK_EXTAL, 2, 1),
diff --git a/drivers/clk/renesas/r8a77965-cpg-mssr.c b/drivers/clk/renesas/r8a77965-cpg-mssr.c
index d687c29efa3c..78f6e530848e 100644
--- a/drivers/clk/renesas/r8a77965-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a77965-cpg-mssr.c
@@ -69,12 +69,8 @@ static const struct cpg_core_clk r8a77965_core_clks[] __initconst = {
DEF_FIXED(".s2", CLK_S2, CLK_PLL1_DIV2, 4, 1),
DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 6, 1),
DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1_DIV2, 2, 1),
- DEF_BASE(".rpcsrc", CLK_RPCSRC, CLK_TYPE_GEN3_RPCSRC, CLK_PLL1),
- DEF_BASE("rpc", R8A77965_CLK_RPC, CLK_TYPE_GEN3_RPC,
- CLK_RPCSRC),
- DEF_BASE("rpcd2", R8A77965_CLK_RPCD2, CLK_TYPE_GEN3_RPCD2,
- R8A77965_CLK_RPC),
+ DEF_BASE(".rpcsrc", CLK_RPCSRC, CLK_TYPE_GEN3_RPCSRC, CLK_PLL1),
DEF_GEN3_OSC(".r", CLK_RINT, CLK_EXTAL, 32),
@@ -110,6 +106,9 @@ static const struct cpg_core_clk r8a77965_core_clks[] __initconst = {
DEF_GEN3_SD("sd2", R8A77965_CLK_SD2, R8A77965_CLK_SD2H, 0x268),
DEF_GEN3_SD("sd3", R8A77965_CLK_SD3, R8A77965_CLK_SD3H, 0x26c),
+ DEF_BASE("rpc", R8A77965_CLK_RPC, CLK_TYPE_GEN3_RPC, CLK_RPCSRC),
+ DEF_BASE("rpcd2", R8A77965_CLK_RPCD2, CLK_TYPE_GEN3_RPCD2, R8A77965_CLK_RPC),
+
DEF_FIXED("cl", R8A77965_CLK_CL, CLK_PLL1_DIV2, 48, 1),
DEF_FIXED("cr", R8A77965_CLK_CR, CLK_PLL1_DIV4, 2, 1),
DEF_FIXED("cp", R8A77965_CLK_CP, CLK_EXTAL, 2, 1),
diff --git a/drivers/clk/renesas/r8a77980-cpg-mssr.c b/drivers/clk/renesas/r8a77980-cpg-mssr.c
index f3cd64de4dc6..06f925aff407 100644
--- a/drivers/clk/renesas/r8a77980-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a77980-cpg-mssr.c
@@ -66,13 +66,10 @@ static const struct cpg_core_clk r8a77980_core_clks[] __initconst = {
DEF_FIXED(".s2", CLK_S2, CLK_PLL1_DIV2, 4, 1),
DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 6, 1),
DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1_DIV2, 2, 1),
+
DEF_BASE(".rpcsrc", CLK_RPCSRC, CLK_TYPE_GEN3_RPCSRC, CLK_PLL1),
- DEF_RATE(".oco", CLK_OCO, 32768),
- DEF_BASE("rpc", R8A77980_CLK_RPC, CLK_TYPE_GEN3_RPC,
- CLK_RPCSRC),
- DEF_BASE("rpcd2", R8A77980_CLK_RPCD2, CLK_TYPE_GEN3_RPCD2,
- R8A77980_CLK_RPC),
+ DEF_RATE(".oco", CLK_OCO, 32768),
/* Core Clock Outputs */
DEF_FIXED("ztr", R8A77980_CLK_ZTR, CLK_PLL1_DIV2, 6, 1),
@@ -99,6 +96,9 @@ static const struct cpg_core_clk r8a77980_core_clks[] __initconst = {
DEF_GEN3_SDH("sd0h", R8A77980_CLK_SD0H, CLK_SDSRC, 0x0074),
DEF_GEN3_SD("sd0", R8A77980_CLK_SD0, R8A77980_CLK_SD0H, 0x0074),
+ DEF_BASE("rpc", R8A77980_CLK_RPC, CLK_TYPE_GEN3_RPC, CLK_RPCSRC),
+ DEF_BASE("rpcd2", R8A77980_CLK_RPCD2, CLK_TYPE_GEN3_RPCD2, R8A77980_CLK_RPC),
+
DEF_FIXED("cl", R8A77980_CLK_CL, CLK_PLL1_DIV2, 48, 1),
DEF_FIXED("cp", R8A77980_CLK_CP, CLK_EXTAL, 2, 1),
DEF_FIXED("cpex", R8A77980_CLK_CPEX, CLK_EXTAL, 2, 1),
diff --git a/drivers/clk/renesas/r8a77990-cpg-mssr.c b/drivers/clk/renesas/r8a77990-cpg-mssr.c
index d34d97baab35..b666d099365e 100644
--- a/drivers/clk/renesas/r8a77990-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a77990-cpg-mssr.c
@@ -44,6 +44,7 @@ enum clk_ids {
CLK_S2,
CLK_S3,
CLK_SDSRC,
+ CLK_RPCSRC,
CLK_RINT,
CLK_OCO,
@@ -74,6 +75,8 @@ static const struct cpg_core_clk r8a77990_core_clks[] __initconst = {
DEF_FIXED(".s3", CLK_S3, CLK_PLL1, 6, 1),
DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1, 2, 1),
+ DEF_FIXED_RPCSRC_E3(".rpcsrc", CLK_RPCSRC, CLK_PLL0, CLK_PLL1),
+
DEF_DIV6_RO(".r", CLK_RINT, CLK_EXTAL, CPG_RCKCR, 32),
DEF_RATE(".oco", CLK_OCO, 8 * 1000 * 1000),
@@ -107,6 +110,9 @@ static const struct cpg_core_clk r8a77990_core_clks[] __initconst = {
DEF_GEN3_SD("sd1", R8A77990_CLK_SD1, R8A77990_CLK_SD1H, 0x0078),
DEF_GEN3_SD("sd3", R8A77990_CLK_SD3, R8A77990_CLK_SD3H, 0x026c),
+ DEF_BASE("rpc", R8A77990_CLK_RPC, CLK_TYPE_GEN3_RPC, CLK_RPCSRC),
+ DEF_BASE("rpcd2", R8A77990_CLK_RPCD2, CLK_TYPE_GEN3_RPCD2, R8A77990_CLK_RPC),
+
DEF_FIXED("cl", R8A77990_CLK_CL, CLK_PLL1, 48, 1),
DEF_FIXED("cr", R8A77990_CLK_CR, CLK_PLL1D2, 2, 1),
DEF_FIXED("cp", R8A77990_CLK_CP, CLK_EXTAL, 2, 1),
@@ -215,6 +221,7 @@ static const struct mssr_mod_clk r8a77990_mod_clks[] __initconst = {
DEF_MOD("can-fd", 914, R8A77990_CLK_S3D2),
DEF_MOD("can-if1", 915, R8A77990_CLK_S3D4),
DEF_MOD("can-if0", 916, R8A77990_CLK_S3D4),
+ DEF_MOD("rpc-if", 917, R8A77990_CLK_RPCD2),
DEF_MOD("i2c6", 918, R8A77990_CLK_S3D2),
DEF_MOD("i2c5", 919, R8A77990_CLK_S3D2),
DEF_MOD("i2c-dvfs", 926, R8A77990_CLK_CP),
diff --git a/drivers/clk/renesas/r8a77995-cpg-mssr.c b/drivers/clk/renesas/r8a77995-cpg-mssr.c
index 525eef197fd9..24ba9093a72f 100644
--- a/drivers/clk/renesas/r8a77995-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a77995-cpg-mssr.c
@@ -42,6 +42,7 @@ enum clk_ids {
CLK_S2,
CLK_S3,
CLK_SDSRC,
+ CLK_RPCSRC,
CLK_RINT,
CLK_OCO,
@@ -70,6 +71,8 @@ static const struct cpg_core_clk r8a77995_core_clks[] __initconst = {
DEF_FIXED(".s3", CLK_S3, CLK_PLL1, 6, 1),
DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1, 2, 1),
+ DEF_FIXED_RPCSRC_D3(".rpcsrc", CLK_RPCSRC, CLK_PLL0, CLK_PLL1),
+
DEF_DIV6_RO(".r", CLK_RINT, CLK_EXTAL, CPG_RCKCR, 32),
DEF_RATE(".oco", CLK_OCO, 8 * 1000 * 1000),
@@ -103,8 +106,11 @@ static const struct cpg_core_clk r8a77995_core_clks[] __initconst = {
DEF_GEN3_PE("s3d2c", R8A77995_CLK_S3D2C, CLK_S3, 2, CLK_PE, 2),
DEF_GEN3_PE("s3d4c", R8A77995_CLK_S3D4C, CLK_S3, 4, CLK_PE, 4),
- DEF_GEN3_SDH("sd0h", R8A77995_CLK_SD0H, CLK_SDSRC, 0x268),
- DEF_GEN3_SD("sd0", R8A77995_CLK_SD0, R8A77995_CLK_SD0H, 0x268),
+ DEF_GEN3_SDH("sd0h", R8A77995_CLK_SD0H, CLK_SDSRC, 0x268),
+ DEF_GEN3_SD("sd0", R8A77995_CLK_SD0, R8A77995_CLK_SD0H, 0x268),
+
+ DEF_BASE("rpc", R8A77995_CLK_RPC, CLK_TYPE_GEN3_RPC, CLK_RPCSRC),
+ DEF_BASE("rpcd2", R8A77995_CLK_RPCD2, CLK_TYPE_GEN3_RPCD2, R8A77995_CLK_RPC),
DEF_DIV6P1("canfd", R8A77995_CLK_CANFD, CLK_PLL0D3, 0x244),
DEF_DIV6P1("mso", R8A77995_CLK_MSO, CLK_PLL1D2, 0x014),
@@ -174,6 +180,7 @@ static const struct mssr_mod_clk r8a77995_mod_clks[] __initconst = {
DEF_MOD("can-fd", 914, R8A77995_CLK_S3D2),
DEF_MOD("can-if1", 915, R8A77995_CLK_S3D4),
DEF_MOD("can-if0", 916, R8A77995_CLK_S3D4),
+ DEF_MOD("rpc-if", 917, R8A77995_CLK_RPCD2),
DEF_MOD("i2c3", 928, R8A77995_CLK_S3D2),
DEF_MOD("i2c2", 929, R8A77995_CLK_S3D2),
DEF_MOD("i2c1", 930, R8A77995_CLK_S3D2),
diff --git a/drivers/clk/renesas/r8a779a0-cpg-mssr.c b/drivers/clk/renesas/r8a779a0-cpg-mssr.c
index fadd8a1718c6..d74d46833012 100644
--- a/drivers/clk/renesas/r8a779a0-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a779a0-cpg-mssr.c
@@ -85,11 +85,10 @@ static const struct cpg_core_clk r8a779a0_core_clks[] __initconst = {
DEF_FIXED(".s1", CLK_S1, CLK_PLL1_DIV2, 2, 1),
DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 4, 1),
DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL5_DIV4, 1, 1),
+
DEF_RATE(".oco", CLK_OCO, 32768),
- DEF_BASE(".rpcsrc", CLK_RPCSRC, CLK_TYPE_GEN4_RPCSRC, CLK_PLL5),
- DEF_BASE("rpc", R8A779A0_CLK_RPC, CLK_TYPE_GEN4_RPC, CLK_RPCSRC),
- DEF_BASE("rpcd2", R8A779A0_CLK_RPCD2, CLK_TYPE_GEN4_RPCD2,
- R8A779A0_CLK_RPC),
+
+ DEF_BASE(".rpcsrc", CLK_RPCSRC, CLK_TYPE_GEN4_RPCSRC, CLK_PLL5),
/* Core Clock Outputs */
DEF_GEN4_Z("z0", R8A779A0_CLK_Z0, CLK_TYPE_GEN4_Z, CLK_PLL20, 2, 0),
@@ -120,6 +119,10 @@ static const struct cpg_core_clk r8a779a0_core_clks[] __initconst = {
DEF_GEN4_SDH("sdh0", R8A779A0_CLK_SD0H, CLK_SDSRC, 0x870),
DEF_GEN4_SD("sd0", R8A779A0_CLK_SD0, R8A779A0_CLK_SD0H, 0x870),
+ DEF_BASE("rpc", R8A779A0_CLK_RPC, CLK_TYPE_GEN4_RPC, CLK_RPCSRC),
+ DEF_BASE("rpcd2", R8A779A0_CLK_RPCD2, CLK_TYPE_GEN4_RPCD2,
+ R8A779A0_CLK_RPC),
+
DEF_DIV6P1("mso", R8A779A0_CLK_MSO, CLK_PLL5_DIV4, 0x87c),
DEF_DIV6P1("canfd", R8A779A0_CLK_CANFD, CLK_PLL5_DIV4, 0x878),
DEF_DIV6P1("csi0", R8A779A0_CLK_CSI0, CLK_PLL5_DIV4, 0x880),
@@ -241,7 +244,7 @@ static const unsigned int r8a779a0_crit_mod_clks[] __initconst = {
/*
* MD EXTAL PLL1 PLL20 PLL30 PLL4 PLL5 OSC
* 14 13 (MHz) 21 31
- * --------------------------------------------------------
+ * ----------------------------------------------------------------
* 0 0 16.66 x 1 x128 x216 x128 x144 x192 /16
* 0 1 20 x 1 x106 x180 x106 x120 x160 /19
* 1 0 Prohibited setting
@@ -250,11 +253,11 @@ static const unsigned int r8a779a0_crit_mod_clks[] __initconst = {
#define CPG_PLL_CONFIG_INDEX(md) ((((md) & BIT(14)) >> 13) | \
(((md) & BIT(13)) >> 13))
static const struct rcar_gen4_cpg_pll_config cpg_pll_configs[4] = {
- /* EXTAL div PLL1 mult/div PLL2 mult/div PLL3 mult/div PLL5 mult/div PLL6 mult/div OSC prediv */
- { 1, 128, 1, 0, 0, 0, 0, 192, 1, 0, 0, 16, },
- { 1, 106, 1, 0, 0, 0, 0, 160, 1, 0, 0, 19, },
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
- { 2, 128, 1, 0, 0, 0, 0, 192, 1, 0, 0, 32, },
+ /* EXTAL div PLL1 mult/div PLL2 mult/div PLL3 mult/div PLL4 mult/div PLL5 mult/div PLL6 mult/div OSC prediv */
+ { 1, 128, 1, 0, 0, 0, 0, 144, 1, 192, 1, 0, 0, 16, },
+ { 1, 106, 1, 0, 0, 0, 0, 120, 1, 160, 1, 0, 0, 19, },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+ { 2, 128, 1, 0, 0, 0, 0, 144, 1, 192, 1, 0, 0, 32, },
};
diff --git a/drivers/clk/renesas/r8a779f0-cpg-mssr.c b/drivers/clk/renesas/r8a779f0-cpg-mssr.c
index 76b441965037..c17ebe6b5992 100644
--- a/drivers/clk/renesas/r8a779f0-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a779f0-cpg-mssr.c
@@ -70,12 +70,11 @@ static const struct cpg_core_clk r8a779f0_core_clks[] __initconst = {
DEF_FIXED(".pll5_div4", CLK_PLL5_DIV4, CLK_PLL5_DIV2, 2, 1),
DEF_FIXED(".pll6_div2", CLK_PLL6_DIV2, CLK_PLL6, 2, 1),
DEF_FIXED(".s0", CLK_S0, CLK_PLL1_DIV2, 2, 1),
+
DEF_BASE(".sdsrc", CLK_SDSRC, CLK_TYPE_GEN4_SDSRC, CLK_PLL5),
DEF_RATE(".oco", CLK_OCO, 32768),
- DEF_BASE(".rpcsrc", CLK_RPCSRC, CLK_TYPE_GEN4_RPCSRC, CLK_PLL5),
- DEF_BASE(".rpc", R8A779F0_CLK_RPC, CLK_TYPE_GEN4_RPC, CLK_RPCSRC),
- DEF_BASE("rpcd2", R8A779F0_CLK_RPCD2, CLK_TYPE_GEN4_RPCD2, R8A779F0_CLK_RPC),
+ DEF_BASE(".rpcsrc", CLK_RPCSRC, CLK_TYPE_GEN4_RPCSRC, CLK_PLL5),
/* Core Clock Outputs */
DEF_FIXED("s0d2", R8A779F0_CLK_S0D2, CLK_S0, 2, 1),
@@ -108,6 +107,10 @@ static const struct cpg_core_clk r8a779f0_core_clks[] __initconst = {
DEF_FIXED("cpex", R8A779F0_CLK_CPEX, CLK_EXTAL, 2, 1),
DEF_GEN4_SD("sd0", R8A779F0_CLK_SD0, CLK_SDSRC, 0x870),
+
+ DEF_BASE("rpc", R8A779F0_CLK_RPC, CLK_TYPE_GEN4_RPC, CLK_RPCSRC),
+ DEF_BASE("rpcd2", R8A779F0_CLK_RPCD2, CLK_TYPE_GEN4_RPCD2, R8A779F0_CLK_RPC),
+
DEF_DIV6P1("mso", R8A779F0_CLK_MSO, CLK_PLL5_DIV4, 0x87c),
DEF_GEN4_OSC("osc", R8A779F0_CLK_OSC, CLK_EXTAL, 8),
@@ -129,6 +132,7 @@ static const struct mssr_mod_clk r8a779f0_mod_clks[] __initconst = {
DEF_MOD("sys-dmac1", 710, R8A779F0_CLK_S0D3_PER),
DEF_MOD("wdt", 907, R8A779F0_CLK_R),
DEF_MOD("pfc0", 915, R8A779F0_CLK_CL16M),
+ DEF_MOD("ufs", 1514, R8A779F0_CLK_S0D4_HSC),
};
static const unsigned int r8a779f0_crit_mod_clks[] __initconst = {
@@ -139,23 +143,23 @@ static const unsigned int r8a779f0_crit_mod_clks[] __initconst = {
* CPG Clock Data
*/
/*
- * MD EXTAL PLL1 PLL2 PLL3 PLL5 PLL6 OSC
+ * MD EXTAL PLL1 PLL2 PLL3 PLL4 PLL5 PLL6 OSC
* 14 13 (MHz)
- * ----------------------------------------------------------------
- * 0 0 16 / 1 x200 x150 x200 x200 x134 /15
- * 0 1 20 / 1 x160 x120 x160 x160 x106 /19
+ * ------------------------------------------------------------------------
+ * 0 0 16 / 1 x200 x150 x200 n/a x200 x134 /15
+ * 0 1 20 / 1 x160 x120 x160 n/a x160 x106 /19
* 1 0 Prohibited setting
- * 1 1 40 / 2 x160 x120 x160 x160 x106 /38
+ * 1 1 40 / 2 x160 x120 x160 n/a x160 x106 /38
*/
#define CPG_PLL_CONFIG_INDEX(md) ((((md) & BIT(14)) >> 13) | \
(((md) & BIT(13)) >> 13))
static const struct rcar_gen4_cpg_pll_config cpg_pll_configs[4] = {
- /* EXTAL div PLL1 mult/div PLL2 mult/div PLL3 mult/div PLL5 mult/div PLL6 mult/div OSC prediv */
- { 1, 200, 1, 150, 1, 200, 1, 200, 1, 134, 1, 15, },
- { 1, 160, 1, 120, 1, 160, 1, 160, 1, 106, 1, 19, },
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
- { 2, 160, 1, 120, 1, 160, 1, 160, 1, 106, 1, 38, },
+ /* EXTAL div PLL1 mult/div PLL2 mult/div PLL3 mult/div PLL4 mult/div PLL5 mult/div PLL6 mult/div OSC prediv */
+ { 1, 200, 1, 150, 1, 200, 1, 0, 0, 200, 1, 134, 1, 15, },
+ { 1, 160, 1, 120, 1, 160, 1, 0, 0, 160, 1, 106, 1, 19, },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+ { 2, 160, 1, 120, 1, 160, 1, 0, 0, 160, 1, 106, 1, 38, },
};
static int __init r8a779f0_cpg_mssr_init(struct device *dev)
diff --git a/drivers/clk/renesas/r8a779g0-cpg-mssr.c b/drivers/clk/renesas/r8a779g0-cpg-mssr.c
new file mode 100644
index 000000000000..3fc4233b1ead
--- /dev/null
+++ b/drivers/clk/renesas/r8a779g0-cpg-mssr.c
@@ -0,0 +1,218 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * r8a779g0 Clock Pulse Generator / Module Standby and Software Reset
+ *
+ * Copyright (C) 2022 Renesas Electronics Corp.
+ *
+ * Based on r8a779f0-cpg-mssr.c
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/soc/renesas/rcar-rst.h>
+
+#include <dt-bindings/clock/r8a779g0-cpg-mssr.h>
+
+#include "renesas-cpg-mssr.h"
+#include "rcar-gen4-cpg.h"
+
+enum clk_ids {
+ /* Core Clock Outputs exported to DT */
+ LAST_DT_CORE_CLK = R8A779G0_CLK_R,
+
+ /* External Input Clocks */
+ CLK_EXTAL,
+ CLK_EXTALR,
+
+ /* Internal Core Clocks */
+ CLK_MAIN,
+ CLK_PLL1,
+ CLK_PLL2,
+ CLK_PLL3,
+ CLK_PLL4,
+ CLK_PLL5,
+ CLK_PLL6,
+ CLK_PLL1_DIV2,
+ CLK_PLL2_DIV2,
+ CLK_PLL3_DIV2,
+ CLK_PLL4_DIV2,
+ CLK_PLL5_DIV2,
+ CLK_PLL5_DIV4,
+ CLK_PLL6_DIV2,
+ CLK_S0,
+ CLK_S0_VIO,
+ CLK_S0_VC,
+ CLK_S0_HSC,
+ CLK_SV_VIP,
+ CLK_SV_IR,
+ CLK_SDSRC,
+ CLK_RPCSRC,
+ CLK_VIO,
+ CLK_VC,
+ CLK_OCO,
+
+ /* Module Clocks */
+ MOD_CLK_BASE
+};
+
+static const struct cpg_core_clk r8a779g0_core_clks[] __initconst = {
+ /* External Clock Inputs */
+ DEF_INPUT("extal", CLK_EXTAL),
+ DEF_INPUT("extalr", CLK_EXTALR),
+
+ /* Internal Core Clocks */
+ DEF_BASE(".main", CLK_MAIN, CLK_TYPE_GEN4_MAIN, CLK_EXTAL),
+ DEF_BASE(".pll1", CLK_PLL1, CLK_TYPE_GEN4_PLL1, CLK_MAIN),
+ DEF_BASE(".pll2", CLK_PLL2, CLK_TYPE_GEN4_PLL2, CLK_MAIN),
+ DEF_BASE(".pll3", CLK_PLL3, CLK_TYPE_GEN4_PLL3, CLK_MAIN),
+ DEF_BASE(".pll4", CLK_PLL4, CLK_TYPE_GEN4_PLL4, CLK_MAIN),
+ DEF_BASE(".pll5", CLK_PLL5, CLK_TYPE_GEN4_PLL5, CLK_MAIN),
+ DEF_BASE(".pll6", CLK_PLL6, CLK_TYPE_GEN4_PLL6, CLK_MAIN),
+
+ DEF_FIXED(".pll1_div2", CLK_PLL1_DIV2, CLK_PLL1, 2, 1),
+ DEF_FIXED(".pll2_div2", CLK_PLL2_DIV2, CLK_PLL2, 2, 1),
+ DEF_FIXED(".pll3_div2", CLK_PLL3_DIV2, CLK_PLL3, 2, 1),
+ DEF_FIXED(".pll4_div2", CLK_PLL4_DIV2, CLK_PLL4, 2, 1),
+ DEF_FIXED(".pll5_div2", CLK_PLL5_DIV2, CLK_PLL5, 2, 1),
+ DEF_FIXED(".pll5_div4", CLK_PLL5_DIV4, CLK_PLL5_DIV2, 2, 1),
+ DEF_FIXED(".pll6_div2", CLK_PLL6_DIV2, CLK_PLL6, 2, 1),
+ DEF_FIXED(".s0", CLK_S0, CLK_PLL1_DIV2, 2, 1),
+ DEF_FIXED(".s0_vio", CLK_S0_VIO, CLK_PLL1_DIV2, 2, 1),
+ DEF_FIXED(".s0_vc", CLK_S0_VC, CLK_PLL1_DIV2, 2, 1),
+ DEF_FIXED(".s0_hsc", CLK_S0_HSC, CLK_PLL1_DIV2, 2, 1),
+ DEF_FIXED(".sv_vip", CLK_SV_VIP, CLK_PLL1, 5, 1),
+ DEF_FIXED(".sv_ir", CLK_SV_IR, CLK_PLL1, 5, 1),
+ DEF_BASE(".sdsrc", CLK_SDSRC, CLK_TYPE_GEN4_SDSRC, CLK_PLL5),
+ DEF_RATE(".oco", CLK_OCO, 32768),
+
+ DEF_BASE(".rpcsrc", CLK_RPCSRC, CLK_TYPE_GEN4_RPCSRC, CLK_PLL5),
+ DEF_FIXED(".vio", CLK_VIO, CLK_PLL5_DIV2, 3, 1),
+ DEF_FIXED(".vc", CLK_VC, CLK_PLL5_DIV2, 3, 1),
+
+ /* Core Clock Outputs */
+ DEF_FIXED("s0d2", R8A779G0_CLK_S0D2, CLK_S0, 2, 1),
+ DEF_FIXED("s0d3", R8A779G0_CLK_S0D3, CLK_S0, 3, 1),
+ DEF_FIXED("s0d4", R8A779G0_CLK_S0D4, CLK_S0, 4, 1),
+ DEF_FIXED("cl16m", R8A779G0_CLK_CL16M, CLK_S0, 48, 1),
+ DEF_FIXED("s0d1_vio", R8A779G0_CLK_S0D1_VIO, CLK_S0_VIO, 1, 1),
+ DEF_FIXED("s0d2_vio", R8A779G0_CLK_S0D2_VIO, CLK_S0_VIO, 2, 1),
+ DEF_FIXED("s0d4_vio", R8A779G0_CLK_S0D4_VIO, CLK_S0_VIO, 4, 1),
+ DEF_FIXED("s0d8_vio", R8A779G0_CLK_S0D8_VIO, CLK_S0_VIO, 8, 1),
+ DEF_FIXED("s0d1_vc", R8A779G0_CLK_S0D1_VC, CLK_S0_VC, 1, 1),
+ DEF_FIXED("s0d2_vc", R8A779G0_CLK_S0D2_VC, CLK_S0_VC, 2, 1),
+ DEF_FIXED("s0d4_vc", R8A779G0_CLK_S0D4_VC, CLK_S0_VC, 4, 1),
+ DEF_FIXED("s0d2_mm", R8A779G0_CLK_S0D2_MM, CLK_S0, 2, 1),
+ DEF_FIXED("s0d4_mm", R8A779G0_CLK_S0D4_MM, CLK_S0, 4, 1),
+ DEF_FIXED("cl16m_mm", R8A779G0_CLK_CL16M_MM, CLK_S0, 48, 1),
+ DEF_FIXED("s0d2_u3dg", R8A779G0_CLK_S0D2_U3DG, CLK_S0, 2, 1),
+ DEF_FIXED("s0d4_u3dg", R8A779G0_CLK_S0D4_U3DG, CLK_S0, 4, 1),
+ DEF_FIXED("s0d2_rt", R8A779G0_CLK_S0D2_RT, CLK_S0, 2, 1),
+ DEF_FIXED("s0d3_rt", R8A779G0_CLK_S0D3_RT, CLK_S0, 3, 1),
+ DEF_FIXED("s0d4_rt", R8A779G0_CLK_S0D4_RT, CLK_S0, 4, 1),
+ DEF_FIXED("s0d6_rt", R8A779G0_CLK_S0D6_RT, CLK_S0, 6, 1),
+ DEF_FIXED("s0d24_rt", R8A779G0_CLK_S0D24_RT, CLK_S0, 24, 1),
+ DEF_FIXED("cl16m_rt", R8A779G0_CLK_CL16M_RT, CLK_S0, 48, 1),
+ DEF_FIXED("s0d2_per", R8A779G0_CLK_S0D2_PER, CLK_S0, 2, 1),
+ DEF_FIXED("s0d3_per", R8A779G0_CLK_S0D3_PER, CLK_S0, 3, 1),
+ DEF_FIXED("s0d4_per", R8A779G0_CLK_S0D4_PER, CLK_S0, 4, 1),
+ DEF_FIXED("s0d6_per", R8A779G0_CLK_S0D6_PER, CLK_S0, 6, 1),
+ DEF_FIXED("s0d12_per", R8A779G0_CLK_S0D12_PER, CLK_S0, 12, 1),
+ DEF_FIXED("s0d24_per", R8A779G0_CLK_S0D24_PER, CLK_S0, 24, 1),
+ DEF_FIXED("cl16m_per", R8A779G0_CLK_CL16M_PER, CLK_S0, 48, 1),
+ DEF_FIXED("s0d1_hsc", R8A779G0_CLK_S0D1_HSC, CLK_S0_HSC, 1, 1),
+ DEF_FIXED("s0d2_hsc", R8A779G0_CLK_S0D2_HSC, CLK_S0_HSC, 2, 1),
+ DEF_FIXED("s0d4_hsc", R8A779G0_CLK_S0D4_HSC, CLK_S0_HSC, 4, 1),
+ DEF_FIXED("cl16m_hsc", R8A779G0_CLK_CL16M_HSC, CLK_S0_HSC, 48, 1),
+ DEF_FIXED("s0d2_cc", R8A779G0_CLK_S0D2_CC, CLK_S0, 2, 1),
+ DEF_FIXED("svd1_ir", R8A779G0_CLK_SVD1_IR, CLK_SV_IR, 1, 1),
+ DEF_FIXED("svd2_ir", R8A779G0_CLK_SVD2_IR, CLK_SV_IR, 2, 1),
+ DEF_FIXED("svd1_vip", R8A779G0_CLK_SVD1_VIP, CLK_SV_VIP, 1, 1),
+ DEF_FIXED("svd2_vip", R8A779G0_CLK_SVD2_VIP, CLK_SV_VIP, 2, 1),
+ DEF_FIXED("cbfusa", R8A779G0_CLK_CBFUSA, CLK_EXTAL, 2, 1),
+ DEF_FIXED("cpex", R8A779G0_CLK_CPEX, CLK_EXTAL, 2, 1),
+ DEF_FIXED("viobus", R8A779G0_CLK_VIOBUS, CLK_VIO, 1, 1),
+ DEF_FIXED("viobusd2", R8A779G0_CLK_VIOBUSD2, CLK_VIO, 2, 1),
+ DEF_FIXED("vcbus", R8A779G0_CLK_VCBUS, CLK_VC, 1, 1),
+ DEF_FIXED("vcbusd2", R8A779G0_CLK_VCBUSD2, CLK_VC, 2, 1),
+
+ DEF_GEN4_SD("sd0", R8A779G0_CLK_SD0, CLK_SDSRC, 0x870),
+ DEF_DIV6P1("mso", R8A779G0_CLK_MSO, CLK_PLL5_DIV4, 0x87c),
+
+ DEF_BASE("rpc", R8A779G0_CLK_RPC, CLK_TYPE_GEN4_RPC, CLK_RPCSRC),
+ DEF_BASE("rpcd2", R8A779G0_CLK_RPCD2, CLK_TYPE_GEN4_RPCD2, R8A779G0_CLK_RPC),
+
+ DEF_GEN4_OSC("osc", R8A779G0_CLK_OSC, CLK_EXTAL, 8),
+ DEF_GEN4_MDSEL("r", R8A779G0_CLK_R, 29, CLK_EXTALR, 1, CLK_OCO, 1),
+};
+
+static const struct mssr_mod_clk r8a779g0_mod_clks[] __initconst = {
+ DEF_MOD("hscif0", 514, R8A779G0_CLK_S0D3_PER),
+ DEF_MOD("hscif1", 515, R8A779G0_CLK_S0D3_PER),
+ DEF_MOD("hscif2", 516, R8A779G0_CLK_S0D3_PER),
+ DEF_MOD("hscif3", 517, R8A779G0_CLK_S0D3_PER),
+};
+
+/*
+ * CPG Clock Data
+ */
+/*
+ * MD EXTAL PLL1 PLL2 PLL3 PLL4 PLL5 PLL6 OSC
+ * 14 13 (MHz)
+ * ------------------------------------------------------------------------
+ * 0 0 16.66 / 1 x192 x204 x192 x144 x192 x168 /15
+ * 0 1 20 / 1 x160 x170 x160 x120 x160 x140 /19
+ * 1 0 Prohibited setting
+ * 1 1 33.33 / 2 x192 x204 x192 x144 x192 x168 /38
+ */
+#define CPG_PLL_CONFIG_INDEX(md) ((((md) & BIT(14)) >> 13) | \
+ (((md) & BIT(13)) >> 13))
+
+static const struct rcar_gen4_cpg_pll_config cpg_pll_configs[4] = {
+ /* EXTAL div PLL1 mult/div PLL2 mult/div PLL3 mult/div PLL4 mult/div PLL5 mult/div PLL6 mult/div OSC prediv */
+ { 1, 192, 1, 204, 1, 192, 1, 144, 1, 192, 1, 168, 1, 15, },
+ { 1, 160, 1, 170, 1, 160, 1, 120, 1, 160, 1, 140, 1, 19, },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+ { 2, 192, 1, 204, 1, 192, 1, 144, 1, 192, 1, 168, 1, 38, },
+};
+
+static int __init r8a779g0_cpg_mssr_init(struct device *dev)
+{
+ const struct rcar_gen4_cpg_pll_config *cpg_pll_config;
+ u32 cpg_mode;
+ int error;
+
+ error = rcar_rst_read_mode_pins(&cpg_mode);
+ if (error)
+ return error;
+
+ cpg_pll_config = &cpg_pll_configs[CPG_PLL_CONFIG_INDEX(cpg_mode)];
+ if (!cpg_pll_config->extal_div) {
+ dev_err(dev, "Prohibited setting (cpg_mode=0x%x)\n", cpg_mode);
+ return -EINVAL;
+ }
+
+ return rcar_gen4_cpg_init(cpg_pll_config, CLK_EXTALR, cpg_mode);
+}
+
+const struct cpg_mssr_info r8a779g0_cpg_mssr_info __initconst = {
+ /* Core Clocks */
+ .core_clks = r8a779g0_core_clks,
+ .num_core_clks = ARRAY_SIZE(r8a779g0_core_clks),
+ .last_dt_core_clk = LAST_DT_CORE_CLK,
+ .num_total_core_clks = MOD_CLK_BASE,
+
+ /* Module Clocks */
+ .mod_clks = r8a779g0_mod_clks,
+ .num_mod_clks = ARRAY_SIZE(r8a779g0_mod_clks),
+ .num_hw_mod_clks = 30 * 32,
+
+ /* Callbacks */
+ .init = r8a779g0_cpg_mssr_init,
+ .cpg_clk_register = rcar_gen4_cpg_clk_register,
+
+ .reg_layout = CLK_REG_LAYOUT_RCAR_GEN4,
+};
diff --git a/drivers/clk/renesas/r9a06g032-clocks.c b/drivers/clk/renesas/r9a06g032-clocks.c
index c99942f0e4d4..35ffc462af1a 100644
--- a/drivers/clk/renesas/r9a06g032-clocks.c
+++ b/drivers/clk/renesas/r9a06g032-clocks.c
@@ -16,13 +16,17 @@
#include <linux/math64.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_clock.h>
#include <linux/pm_domain.h>
#include <linux/slab.h>
+#include <linux/soc/renesas/r9a06g032-sysctrl.h>
#include <linux/spinlock.h>
#include <dt-bindings/clock/r9a06g032-sysctrl.h>
+#define R9A06G032_SYSCTRL_DMAMUX 0xA0
+
struct r9a06g032_gate {
u16 gate, reset, ready, midle,
scon, mirack, mistat;
@@ -256,7 +260,7 @@ static const struct r9a06g032_clkdesc r9a06g032_clocks[] = {
D_MODULE(HCLK_QSPI0, "hclk_qspi0", CLK_REF_SYNC_D4, 0x2a0, 0x2a1, 0x2a2, 0x2a3, 0x300, 0x301, 0x302),
D_MODULE(HCLK_QSPI1, "hclk_qspi1", CLK_REF_SYNC_D4, 0x480, 0x481, 0x482, 0x483, 0x4c0, 0x4c1, 0x4c2),
D_MODULE(HCLK_ROM, "hclk_rom", CLK_REF_SYNC_D4, 0xaa0, 0xaa1, 0xaa2, 0, 0xb80, 0, 0),
- D_MODULE(HCLK_RTC, "hclk_rtc", CLK_REF_SYNC_D8, 0xa00, 0, 0, 0, 0, 0, 0),
+ D_MODULE(HCLK_RTC, "hclk_rtc", CLK_REF_SYNC_D8, 0xa00, 0xa03, 0, 0xa02, 0, 0, 0),
D_MODULE(HCLK_SDIO0, "hclk_sdio0", CLK_REF_SYNC_D4, 0x60, 0x61, 0x62, 0x63, 0x80, 0x81, 0x82),
D_MODULE(HCLK_SDIO1, "hclk_sdio1", CLK_REF_SYNC_D4, 0x640, 0x641, 0x642, 0x643, 0x660, 0x661, 0x662),
D_MODULE(HCLK_SEMAP, "hclk_semap", CLK_REF_SYNC_D4, 0x7a3, 0x7a4, 0x7a5, 0, 0xb21, 0, 0),
@@ -315,6 +319,30 @@ struct r9a06g032_priv {
void __iomem *reg;
};
+static struct r9a06g032_priv *sysctrl_priv;
+
+/* Exported helper to access the DMAMUX register */
+int r9a06g032_sysctrl_set_dmamux(u32 mask, u32 val)
+{
+ unsigned long flags;
+ u32 dmamux;
+
+ if (!sysctrl_priv)
+ return -EPROBE_DEFER;
+
+ spin_lock_irqsave(&sysctrl_priv->lock, flags);
+
+ dmamux = readl(sysctrl_priv->reg + R9A06G032_SYSCTRL_DMAMUX);
+ dmamux &= ~mask;
+ dmamux |= val & mask;
+ writel(dmamux, sysctrl_priv->reg + R9A06G032_SYSCTRL_DMAMUX);
+
+ spin_unlock_irqrestore(&sysctrl_priv->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(r9a06g032_sysctrl_set_dmamux);
+
/* register/bit pairs are encoded as an uint16_t */
static void
clk_rdesc_set(struct r9a06g032_priv *clocks,
@@ -963,7 +991,17 @@ static int __init r9a06g032_clocks_probe(struct platform_device *pdev)
if (error)
return error;
- return r9a06g032_add_clk_domain(dev);
+ error = r9a06g032_add_clk_domain(dev);
+ if (error)
+ return error;
+
+ sysctrl_priv = clocks;
+
+ error = of_platform_populate(np, NULL, NULL, dev);
+ if (error)
+ dev_err(dev, "Failed to populate children (%d)\n", error);
+
+ return 0;
}
static const struct of_device_id r9a06g032_match[] = {
diff --git a/drivers/clk/renesas/r9a07g043-cpg.c b/drivers/clk/renesas/r9a07g043-cpg.c
new file mode 100644
index 000000000000..33c2bd8df2e5
--- /dev/null
+++ b/drivers/clk/renesas/r9a07g043-cpg.c
@@ -0,0 +1,320 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * RZ/G2UL CPG driver
+ *
+ * Copyright (C) 2022 Renesas Electronics Corp.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+
+#include <dt-bindings/clock/r9a07g043-cpg.h>
+
+#include "rzg2l-cpg.h"
+
+enum clk_ids {
+ /* Core Clock Outputs exported to DT */
+ LAST_DT_CORE_CLK = R9A07G043_CLK_P0_DIV2,
+
+ /* External Input Clocks */
+ CLK_EXTAL,
+
+ /* Internal Core Clocks */
+ CLK_OSC_DIV1000,
+ CLK_PLL1,
+ CLK_PLL2,
+ CLK_PLL2_DIV2,
+ CLK_PLL2_DIV2_8,
+ CLK_PLL2_DIV2_10,
+ CLK_PLL3,
+ CLK_PLL3_400,
+ CLK_PLL3_533,
+ CLK_PLL3_DIV2,
+ CLK_PLL3_DIV2_4,
+ CLK_PLL3_DIV2_4_2,
+ CLK_SEL_PLL3_3,
+ CLK_DIV_PLL3_C,
+ CLK_PLL5,
+ CLK_PLL5_500,
+ CLK_PLL5_250,
+ CLK_PLL6,
+ CLK_PLL6_250,
+ CLK_P1_DIV2,
+ CLK_PLL2_800,
+ CLK_PLL2_SDHI_533,
+ CLK_PLL2_SDHI_400,
+ CLK_PLL2_SDHI_266,
+ CLK_SD0_DIV4,
+ CLK_SD1_DIV4,
+
+ /* Module Clocks */
+ MOD_CLK_BASE,
+};
+
+/* Divider tables */
+static const struct clk_div_table dtable_1_8[] = {
+ {0, 1},
+ {1, 2},
+ {2, 4},
+ {3, 8},
+ {0, 0},
+};
+
+static const struct clk_div_table dtable_1_32[] = {
+ {0, 1},
+ {1, 2},
+ {2, 4},
+ {3, 8},
+ {4, 32},
+ {0, 0},
+};
+
+/* Mux clock tables */
+static const char * const sel_pll3_3[] = { ".pll3_533", ".pll3_400" };
+static const char * const sel_pll6_2[] = { ".pll6_250", ".pll5_250" };
+static const char * const sel_shdi[] = { ".clk_533", ".clk_400", ".clk_266" };
+
+static const struct cpg_core_clk r9a07g043_core_clks[] __initconst = {
+ /* External Clock Inputs */
+ DEF_INPUT("extal", CLK_EXTAL),
+
+ /* Internal Core Clocks */
+ DEF_FIXED(".osc", R9A07G043_OSCCLK, CLK_EXTAL, 1, 1),
+ DEF_FIXED(".osc_div1000", CLK_OSC_DIV1000, CLK_EXTAL, 1, 1000),
+ DEF_SAMPLL(".pll1", CLK_PLL1, CLK_EXTAL, PLL146_CONF(0)),
+ DEF_FIXED(".pll2", CLK_PLL2, CLK_EXTAL, 200, 3),
+ DEF_FIXED(".pll2_div2", CLK_PLL2_DIV2, CLK_PLL2, 1, 2),
+ DEF_FIXED(".clk_800", CLK_PLL2_800, CLK_PLL2, 1, 2),
+ DEF_FIXED(".clk_533", CLK_PLL2_SDHI_533, CLK_PLL2, 1, 3),
+ DEF_FIXED(".clk_400", CLK_PLL2_SDHI_400, CLK_PLL2_800, 1, 2),
+ DEF_FIXED(".clk_266", CLK_PLL2_SDHI_266, CLK_PLL2_SDHI_533, 1, 2),
+ DEF_FIXED(".pll2_div2_8", CLK_PLL2_DIV2_8, CLK_PLL2_DIV2, 1, 8),
+ DEF_FIXED(".pll2_div2_10", CLK_PLL2_DIV2_10, CLK_PLL2_DIV2, 1, 10),
+ DEF_FIXED(".pll3", CLK_PLL3, CLK_EXTAL, 200, 3),
+ DEF_FIXED(".pll3_div2", CLK_PLL3_DIV2, CLK_PLL3, 1, 2),
+ DEF_FIXED(".pll3_div2_4", CLK_PLL3_DIV2_4, CLK_PLL3_DIV2, 1, 4),
+ DEF_FIXED(".pll3_div2_4_2", CLK_PLL3_DIV2_4_2, CLK_PLL3_DIV2_4, 1, 2),
+ DEF_FIXED(".pll3_400", CLK_PLL3_400, CLK_PLL3, 1, 4),
+ DEF_FIXED(".pll3_533", CLK_PLL3_533, CLK_PLL3, 1, 3),
+ DEF_MUX_RO(".sel_pll3_3", CLK_SEL_PLL3_3, SEL_PLL3_3, sel_pll3_3),
+ DEF_DIV("divpl3c", CLK_DIV_PLL3_C, CLK_SEL_PLL3_3, DIVPL3C, dtable_1_32),
+ DEF_FIXED(".pll5", CLK_PLL5, CLK_EXTAL, 125, 1),
+ DEF_FIXED(".pll5_500", CLK_PLL5_500, CLK_PLL5, 1, 6),
+ DEF_FIXED(".pll5_250", CLK_PLL5_250, CLK_PLL5_500, 1, 2),
+ DEF_FIXED(".pll6", CLK_PLL6, CLK_EXTAL, 125, 6),
+ DEF_FIXED(".pll6_250", CLK_PLL6_250, CLK_PLL6, 1, 2),
+
+ /* Core output clk */
+ DEF_DIV("I", R9A07G043_CLK_I, CLK_PLL1, DIVPL1A, dtable_1_8),
+ DEF_DIV("P0", R9A07G043_CLK_P0, CLK_PLL2_DIV2_8, DIVPL2A, dtable_1_32),
+ DEF_FIXED("P0_DIV2", R9A07G043_CLK_P0_DIV2, R9A07G043_CLK_P0, 1, 2),
+ DEF_FIXED("TSU", R9A07G043_CLK_TSU, CLK_PLL2_DIV2_10, 1, 1),
+ DEF_DIV("P1", R9A07G043_CLK_P1, CLK_PLL3_DIV2_4, DIVPL3B, dtable_1_32),
+ DEF_FIXED("P1_DIV2", CLK_P1_DIV2, R9A07G043_CLK_P1, 1, 2),
+ DEF_DIV("P2", R9A07G043_CLK_P2, CLK_PLL3_DIV2_4_2, DIVPL3A, dtable_1_32),
+ DEF_FIXED("M0", R9A07G043_CLK_M0, CLK_PLL3_DIV2_4, 1, 1),
+ DEF_FIXED("ZT", R9A07G043_CLK_ZT, CLK_PLL3_DIV2_4_2, 1, 1),
+ DEF_MUX("HP", R9A07G043_CLK_HP, SEL_PLL6_2, sel_pll6_2),
+ DEF_FIXED("SPI0", R9A07G043_CLK_SPI0, CLK_DIV_PLL3_C, 1, 2),
+ DEF_FIXED("SPI1", R9A07G043_CLK_SPI1, CLK_DIV_PLL3_C, 1, 4),
+ DEF_SD_MUX("SD0", R9A07G043_CLK_SD0, SEL_SDHI0, sel_shdi),
+ DEF_SD_MUX("SD1", R9A07G043_CLK_SD1, SEL_SDHI1, sel_shdi),
+ DEF_FIXED("SD0_DIV4", CLK_SD0_DIV4, R9A07G043_CLK_SD0, 1, 4),
+ DEF_FIXED("SD1_DIV4", CLK_SD1_DIV4, R9A07G043_CLK_SD1, 1, 4),
+};
+
+static struct rzg2l_mod_clk r9a07g043_mod_clks[] = {
+ DEF_MOD("gic", R9A07G043_GIC600_GICCLK, R9A07G043_CLK_P1,
+ 0x514, 0),
+ DEF_MOD("ia55_pclk", R9A07G043_IA55_PCLK, R9A07G043_CLK_P2,
+ 0x518, 0),
+ DEF_MOD("ia55_clk", R9A07G043_IA55_CLK, R9A07G043_CLK_P1,
+ 0x518, 1),
+ DEF_MOD("dmac_aclk", R9A07G043_DMAC_ACLK, R9A07G043_CLK_P1,
+ 0x52c, 0),
+ DEF_MOD("dmac_pclk", R9A07G043_DMAC_PCLK, CLK_P1_DIV2,
+ 0x52c, 1),
+ DEF_MOD("ostm0_pclk", R9A07G043_OSTM0_PCLK, R9A07G043_CLK_P0,
+ 0x534, 0),
+ DEF_MOD("ostm1_pclk", R9A07G043_OSTM1_PCLK, R9A07G043_CLK_P0,
+ 0x534, 1),
+ DEF_MOD("ostm2_pclk", R9A07G043_OSTM2_PCLK, R9A07G043_CLK_P0,
+ 0x534, 2),
+ DEF_MOD("wdt0_pclk", R9A07G043_WDT0_PCLK, R9A07G043_CLK_P0,
+ 0x548, 0),
+ DEF_MOD("wdt0_clk", R9A07G043_WDT0_CLK, R9A07G043_OSCCLK,
+ 0x548, 1),
+ DEF_MOD("wdt2_pclk", R9A07G043_WDT2_PCLK, R9A07G043_CLK_P0,
+ 0x548, 4),
+ DEF_MOD("wdt2_clk", R9A07G043_WDT2_CLK, R9A07G043_OSCCLK,
+ 0x548, 5),
+ DEF_MOD("spi_clk2", R9A07G043_SPI_CLK2, R9A07G043_CLK_SPI1,
+ 0x550, 0),
+ DEF_MOD("spi_clk", R9A07G043_SPI_CLK, R9A07G043_CLK_SPI0,
+ 0x550, 1),
+ DEF_MOD("sdhi0_imclk", R9A07G043_SDHI0_IMCLK, CLK_SD0_DIV4,
+ 0x554, 0),
+ DEF_MOD("sdhi0_imclk2", R9A07G043_SDHI0_IMCLK2, CLK_SD0_DIV4,
+ 0x554, 1),
+ DEF_MOD("sdhi0_clk_hs", R9A07G043_SDHI0_CLK_HS, R9A07G043_CLK_SD0,
+ 0x554, 2),
+ DEF_MOD("sdhi0_aclk", R9A07G043_SDHI0_ACLK, R9A07G043_CLK_P1,
+ 0x554, 3),
+ DEF_MOD("sdhi1_imclk", R9A07G043_SDHI1_IMCLK, CLK_SD1_DIV4,
+ 0x554, 4),
+ DEF_MOD("sdhi1_imclk2", R9A07G043_SDHI1_IMCLK2, CLK_SD1_DIV4,
+ 0x554, 5),
+ DEF_MOD("sdhi1_clk_hs", R9A07G043_SDHI1_CLK_HS, R9A07G043_CLK_SD1,
+ 0x554, 6),
+ DEF_MOD("sdhi1_aclk", R9A07G043_SDHI1_ACLK, R9A07G043_CLK_P1,
+ 0x554, 7),
+ DEF_MOD("ssi0_pclk", R9A07G043_SSI0_PCLK2, R9A07G043_CLK_P0,
+ 0x570, 0),
+ DEF_MOD("ssi0_sfr", R9A07G043_SSI0_PCLK_SFR, R9A07G043_CLK_P0,
+ 0x570, 1),
+ DEF_MOD("ssi1_pclk", R9A07G043_SSI1_PCLK2, R9A07G043_CLK_P0,
+ 0x570, 2),
+ DEF_MOD("ssi1_sfr", R9A07G043_SSI1_PCLK_SFR, R9A07G043_CLK_P0,
+ 0x570, 3),
+ DEF_MOD("ssi2_pclk", R9A07G043_SSI2_PCLK2, R9A07G043_CLK_P0,
+ 0x570, 4),
+ DEF_MOD("ssi2_sfr", R9A07G043_SSI2_PCLK_SFR, R9A07G043_CLK_P0,
+ 0x570, 5),
+ DEF_MOD("ssi3_pclk", R9A07G043_SSI3_PCLK2, R9A07G043_CLK_P0,
+ 0x570, 6),
+ DEF_MOD("ssi3_sfr", R9A07G043_SSI3_PCLK_SFR, R9A07G043_CLK_P0,
+ 0x570, 7),
+ DEF_MOD("usb0_host", R9A07G043_USB_U2H0_HCLK, R9A07G043_CLK_P1,
+ 0x578, 0),
+ DEF_MOD("usb1_host", R9A07G043_USB_U2H1_HCLK, R9A07G043_CLK_P1,
+ 0x578, 1),
+ DEF_MOD("usb0_func", R9A07G043_USB_U2P_EXR_CPUCLK, R9A07G043_CLK_P1,
+ 0x578, 2),
+ DEF_MOD("usb_pclk", R9A07G043_USB_PCLK, R9A07G043_CLK_P1,
+ 0x578, 3),
+ DEF_COUPLED("eth0_axi", R9A07G043_ETH0_CLK_AXI, R9A07G043_CLK_M0,
+ 0x57c, 0),
+ DEF_COUPLED("eth0_chi", R9A07G043_ETH0_CLK_CHI, R9A07G043_CLK_ZT,
+ 0x57c, 0),
+ DEF_COUPLED("eth1_axi", R9A07G043_ETH1_CLK_AXI, R9A07G043_CLK_M0,
+ 0x57c, 1),
+ DEF_COUPLED("eth1_chi", R9A07G043_ETH1_CLK_CHI, R9A07G043_CLK_ZT,
+ 0x57c, 1),
+ DEF_MOD("i2c0", R9A07G043_I2C0_PCLK, R9A07G043_CLK_P0,
+ 0x580, 0),
+ DEF_MOD("i2c1", R9A07G043_I2C1_PCLK, R9A07G043_CLK_P0,
+ 0x580, 1),
+ DEF_MOD("i2c2", R9A07G043_I2C2_PCLK, R9A07G043_CLK_P0,
+ 0x580, 2),
+ DEF_MOD("i2c3", R9A07G043_I2C3_PCLK, R9A07G043_CLK_P0,
+ 0x580, 3),
+ DEF_MOD("scif0", R9A07G043_SCIF0_CLK_PCK, R9A07G043_CLK_P0,
+ 0x584, 0),
+ DEF_MOD("scif1", R9A07G043_SCIF1_CLK_PCK, R9A07G043_CLK_P0,
+ 0x584, 1),
+ DEF_MOD("scif2", R9A07G043_SCIF2_CLK_PCK, R9A07G043_CLK_P0,
+ 0x584, 2),
+ DEF_MOD("scif3", R9A07G043_SCIF3_CLK_PCK, R9A07G043_CLK_P0,
+ 0x584, 3),
+ DEF_MOD("scif4", R9A07G043_SCIF4_CLK_PCK, R9A07G043_CLK_P0,
+ 0x584, 4),
+ DEF_MOD("sci0", R9A07G043_SCI0_CLKP, R9A07G043_CLK_P0,
+ 0x588, 0),
+ DEF_MOD("sci1", R9A07G043_SCI1_CLKP, R9A07G043_CLK_P0,
+ 0x588, 1),
+ DEF_MOD("rspi0", R9A07G043_RSPI0_CLKB, R9A07G043_CLK_P0,
+ 0x590, 0),
+ DEF_MOD("rspi1", R9A07G043_RSPI1_CLKB, R9A07G043_CLK_P0,
+ 0x590, 1),
+ DEF_MOD("rspi2", R9A07G043_RSPI2_CLKB, R9A07G043_CLK_P0,
+ 0x590, 2),
+ DEF_MOD("canfd", R9A07G043_CANFD_PCLK, R9A07G043_CLK_P0,
+ 0x594, 0),
+ DEF_MOD("gpio", R9A07G043_GPIO_HCLK, R9A07G043_OSCCLK,
+ 0x598, 0),
+ DEF_MOD("adc_adclk", R9A07G043_ADC_ADCLK, R9A07G043_CLK_TSU,
+ 0x5a8, 0),
+ DEF_MOD("adc_pclk", R9A07G043_ADC_PCLK, R9A07G043_CLK_P0,
+ 0x5a8, 1),
+ DEF_MOD("tsu_pclk", R9A07G043_TSU_PCLK, R9A07G043_CLK_TSU,
+ 0x5ac, 0),
+};
+
+static struct rzg2l_reset r9a07g043_resets[] = {
+ DEF_RST(R9A07G043_GIC600_GICRESET_N, 0x814, 0),
+ DEF_RST(R9A07G043_GIC600_DBG_GICRESET_N, 0x814, 1),
+ DEF_RST(R9A07G043_IA55_RESETN, 0x818, 0),
+ DEF_RST(R9A07G043_DMAC_ARESETN, 0x82c, 0),
+ DEF_RST(R9A07G043_DMAC_RST_ASYNC, 0x82c, 1),
+ DEF_RST(R9A07G043_OSTM0_PRESETZ, 0x834, 0),
+ DEF_RST(R9A07G043_OSTM1_PRESETZ, 0x834, 1),
+ DEF_RST(R9A07G043_OSTM2_PRESETZ, 0x834, 2),
+ DEF_RST(R9A07G043_WDT0_PRESETN, 0x848, 0),
+ DEF_RST(R9A07G043_WDT2_PRESETN, 0x848, 2),
+ DEF_RST(R9A07G043_SPI_RST, 0x850, 0),
+ DEF_RST(R9A07G043_SDHI0_IXRST, 0x854, 0),
+ DEF_RST(R9A07G043_SDHI1_IXRST, 0x854, 1),
+ DEF_RST(R9A07G043_SSI0_RST_M2_REG, 0x870, 0),
+ DEF_RST(R9A07G043_SSI1_RST_M2_REG, 0x870, 1),
+ DEF_RST(R9A07G043_SSI2_RST_M2_REG, 0x870, 2),
+ DEF_RST(R9A07G043_SSI3_RST_M2_REG, 0x870, 3),
+ DEF_RST(R9A07G043_USB_U2H0_HRESETN, 0x878, 0),
+ DEF_RST(R9A07G043_USB_U2H1_HRESETN, 0x878, 1),
+ DEF_RST(R9A07G043_USB_U2P_EXL_SYSRST, 0x878, 2),
+ DEF_RST(R9A07G043_USB_PRESETN, 0x878, 3),
+ DEF_RST(R9A07G043_ETH0_RST_HW_N, 0x87c, 0),
+ DEF_RST(R9A07G043_ETH1_RST_HW_N, 0x87c, 1),
+ DEF_RST(R9A07G043_I2C0_MRST, 0x880, 0),
+ DEF_RST(R9A07G043_I2C1_MRST, 0x880, 1),
+ DEF_RST(R9A07G043_I2C2_MRST, 0x880, 2),
+ DEF_RST(R9A07G043_I2C3_MRST, 0x880, 3),
+ DEF_RST(R9A07G043_SCIF0_RST_SYSTEM_N, 0x884, 0),
+ DEF_RST(R9A07G043_SCIF1_RST_SYSTEM_N, 0x884, 1),
+ DEF_RST(R9A07G043_SCIF2_RST_SYSTEM_N, 0x884, 2),
+ DEF_RST(R9A07G043_SCIF3_RST_SYSTEM_N, 0x884, 3),
+ DEF_RST(R9A07G043_SCIF4_RST_SYSTEM_N, 0x884, 4),
+ DEF_RST(R9A07G043_SCI0_RST, 0x888, 0),
+ DEF_RST(R9A07G043_SCI1_RST, 0x888, 1),
+ DEF_RST(R9A07G043_RSPI0_RST, 0x890, 0),
+ DEF_RST(R9A07G043_RSPI1_RST, 0x890, 1),
+ DEF_RST(R9A07G043_RSPI2_RST, 0x890, 2),
+ DEF_RST(R9A07G043_CANFD_RSTP_N, 0x894, 0),
+ DEF_RST(R9A07G043_CANFD_RSTC_N, 0x894, 1),
+ DEF_RST(R9A07G043_GPIO_RSTN, 0x898, 0),
+ DEF_RST(R9A07G043_GPIO_PORT_RESETN, 0x898, 1),
+ DEF_RST(R9A07G043_GPIO_SPARE_RESETN, 0x898, 2),
+ DEF_RST(R9A07G043_ADC_PRESETN, 0x8a8, 0),
+ DEF_RST(R9A07G043_ADC_ADRST_N, 0x8a8, 1),
+ DEF_RST(R9A07G043_TSU_PRESETN, 0x8ac, 0),
+};
+
+static const unsigned int r9a07g043_crit_mod_clks[] __initconst = {
+ MOD_CLK_BASE + R9A07G043_GIC600_GICCLK,
+ MOD_CLK_BASE + R9A07G043_IA55_CLK,
+ MOD_CLK_BASE + R9A07G043_DMAC_ACLK,
+};
+
+const struct rzg2l_cpg_info r9a07g043_cpg_info = {
+ /* Core Clocks */
+ .core_clks = r9a07g043_core_clks,
+ .num_core_clks = ARRAY_SIZE(r9a07g043_core_clks),
+ .last_dt_core_clk = LAST_DT_CORE_CLK,
+ .num_total_core_clks = MOD_CLK_BASE,
+
+ /* Critical Module Clocks */
+ .crit_mod_clks = r9a07g043_crit_mod_clks,
+ .num_crit_mod_clks = ARRAY_SIZE(r9a07g043_crit_mod_clks),
+
+ /* Module Clocks */
+ .mod_clks = r9a07g043_mod_clks,
+ .num_mod_clks = ARRAY_SIZE(r9a07g043_mod_clks),
+ .num_hw_mod_clks = R9A07G043_TSU_PCLK + 1,
+
+ /* Resets */
+ .resets = r9a07g043_resets,
+ .num_resets = R9A07G043_TSU_PRESETN + 1, /* Last reset ID + 1 */
+
+ .has_clk_mon_regs = true,
+};
diff --git a/drivers/clk/renesas/r9a07g044-cpg.c b/drivers/clk/renesas/r9a07g044-cpg.c
index bdfabb992a20..b288897852c7 100644
--- a/drivers/clk/renesas/r9a07g044-cpg.c
+++ b/drivers/clk/renesas/r9a07g044-cpg.c
@@ -32,6 +32,7 @@ enum clk_ids {
CLK_PLL3,
CLK_PLL3_400,
CLK_PLL3_533,
+ CLK_M2_DIV2,
CLK_PLL3_DIV2,
CLK_PLL3_DIV2_2,
CLK_PLL3_DIV2_4,
@@ -40,6 +41,8 @@ enum clk_ids {
CLK_DIV_PLL3_C,
CLK_PLL4,
CLK_PLL5,
+ CLK_PLL5_FOUTPOSTDIV,
+ CLK_PLL5_FOUT1PH0,
CLK_PLL5_FOUT3,
CLK_PLL5_250,
CLK_PLL6,
@@ -52,6 +55,11 @@ enum clk_ids {
CLK_SD0_DIV4,
CLK_SD1_DIV4,
CLK_SEL_GPU2,
+ CLK_SEL_PLL5_4,
+ CLK_DSI_DIV,
+ CLK_PLL2_533,
+ CLK_PLL2_533_DIV2,
+ CLK_DIV_DSI_LPCLK,
/* Module Clocks */
MOD_CLK_BASE,
@@ -75,14 +83,23 @@ static const struct clk_div_table dtable_1_32[] = {
{0, 0},
};
+static const struct clk_div_table dtable_16_128[] = {
+ {0, 16},
+ {1, 32},
+ {2, 64},
+ {3, 128},
+ {0, 0},
+};
+
/* Mux clock tables */
static const char * const sel_pll3_3[] = { ".pll3_533", ".pll3_400" };
+static const char * const sel_pll5_4[] = { ".pll5_foutpostdiv", ".pll5_fout1ph0" };
static const char * const sel_pll6_2[] = { ".pll6_250", ".pll5_250" };
static const char * const sel_shdi[] = { ".clk_533", ".clk_400", ".clk_266" };
static const char * const sel_gpu2[] = { ".pll6", ".pll3_div2_2" };
static const struct {
- struct cpg_core_clk common[44];
+ struct cpg_core_clk common[56];
#ifdef CONFIG_CLK_R9A07G054
struct cpg_core_clk drp[0];
#endif
@@ -96,6 +113,7 @@ static const struct {
DEF_FIXED(".osc_div1000", CLK_OSC_DIV1000, CLK_EXTAL, 1, 1000),
DEF_SAMPLL(".pll1", CLK_PLL1, CLK_EXTAL, PLL146_CONF(0)),
DEF_FIXED(".pll2", CLK_PLL2, CLK_EXTAL, 200, 3),
+ DEF_FIXED(".pll2_533", CLK_PLL2_533, CLK_PLL2, 1, 3),
DEF_FIXED(".pll3", CLK_PLL3, CLK_EXTAL, 200, 3),
DEF_FIXED(".pll3_400", CLK_PLL3_400, CLK_PLL3, 1, 4),
DEF_FIXED(".pll3_533", CLK_PLL3_533, CLK_PLL3, 1, 3),
@@ -114,46 +132,48 @@ static const struct {
DEF_FIXED(".pll2_div2_8", CLK_PLL2_DIV2_8, CLK_PLL2_DIV2, 1, 8),
DEF_FIXED(".pll2_div2_10", CLK_PLL2_DIV2_10, CLK_PLL2_DIV2, 1, 10),
+ DEF_FIXED(".pll2_533_div2", CLK_PLL2_533_DIV2, CLK_PLL2_533, 1, 2),
+
DEF_FIXED(".pll3_div2", CLK_PLL3_DIV2, CLK_PLL3, 1, 2),
DEF_FIXED(".pll3_div2_2", CLK_PLL3_DIV2_2, CLK_PLL3_DIV2, 1, 2),
DEF_FIXED(".pll3_div2_4", CLK_PLL3_DIV2_4, CLK_PLL3_DIV2, 1, 4),
DEF_FIXED(".pll3_div2_4_2", CLK_PLL3_DIV2_4_2, CLK_PLL3_DIV2_4, 1, 2),
- DEF_MUX(".sel_pll3_3", CLK_SEL_PLL3_3, SEL_PLL3_3,
- sel_pll3_3, ARRAY_SIZE(sel_pll3_3), 0, CLK_MUX_READ_ONLY),
- DEF_DIV("divpl3c", CLK_DIV_PLL3_C, CLK_SEL_PLL3_3,
- DIVPL3C, dtable_1_32, CLK_DIVIDER_HIWORD_MASK),
+ DEF_MUX_RO(".sel_pll3_3", CLK_SEL_PLL3_3, SEL_PLL3_3, sel_pll3_3),
+ DEF_DIV("divpl3c", CLK_DIV_PLL3_C, CLK_SEL_PLL3_3, DIVPL3C, dtable_1_32),
DEF_FIXED(".pll5_250", CLK_PLL5_250, CLK_PLL5_FOUT3, 1, 2),
DEF_FIXED(".pll6_250", CLK_PLL6_250, CLK_PLL6, 1, 2),
- DEF_MUX(".sel_gpu2", CLK_SEL_GPU2, SEL_GPU2,
- sel_gpu2, ARRAY_SIZE(sel_gpu2), 0, CLK_MUX_READ_ONLY),
+ DEF_MUX_RO(".sel_gpu2", CLK_SEL_GPU2, SEL_GPU2, sel_gpu2),
+ DEF_PLL5_FOUTPOSTDIV(".pll5_foutpostdiv", CLK_PLL5_FOUTPOSTDIV, CLK_EXTAL),
+ DEF_FIXED(".pll5_fout1ph0", CLK_PLL5_FOUT1PH0, CLK_PLL5_FOUTPOSTDIV, 1, 2),
+ DEF_PLL5_4_MUX(".sel_pll5_4", CLK_SEL_PLL5_4, SEL_PLL5_4, sel_pll5_4),
+ DEF_DIV(".div_dsi_lpclk", CLK_DIV_DSI_LPCLK, CLK_PLL2_533_DIV2,
+ DIVDSILPCLK, dtable_16_128),
/* Core output clk */
- DEF_DIV("I", R9A07G044_CLK_I, CLK_PLL1, DIVPL1A, dtable_1_8,
- CLK_DIVIDER_HIWORD_MASK),
- DEF_DIV("P0", R9A07G044_CLK_P0, CLK_PLL2_DIV2_8, DIVPL2A,
- dtable_1_32, CLK_DIVIDER_HIWORD_MASK),
+ DEF_DIV("I", R9A07G044_CLK_I, CLK_PLL1, DIVPL1A, dtable_1_8),
+ DEF_DIV("P0", R9A07G044_CLK_P0, CLK_PLL2_DIV2_8, DIVPL2A, dtable_1_32),
DEF_FIXED("P0_DIV2", R9A07G044_CLK_P0_DIV2, R9A07G044_CLK_P0, 1, 2),
DEF_FIXED("TSU", R9A07G044_CLK_TSU, CLK_PLL2_DIV2_10, 1, 1),
- DEF_DIV("P1", R9A07G044_CLK_P1, CLK_PLL3_DIV2_4,
- DIVPL3B, dtable_1_32, CLK_DIVIDER_HIWORD_MASK),
+ DEF_DIV("P1", R9A07G044_CLK_P1, CLK_PLL3_DIV2_4, DIVPL3B, dtable_1_32),
DEF_FIXED("P1_DIV2", CLK_P1_DIV2, R9A07G044_CLK_P1, 1, 2),
- DEF_DIV("P2", R9A07G044_CLK_P2, CLK_PLL3_DIV2_4_2,
- DIVPL3A, dtable_1_32, CLK_DIVIDER_HIWORD_MASK),
+ DEF_DIV("P2", R9A07G044_CLK_P2, CLK_PLL3_DIV2_4_2, DIVPL3A, dtable_1_32),
DEF_FIXED("M0", R9A07G044_CLK_M0, CLK_PLL3_DIV2_4, 1, 1),
DEF_FIXED("ZT", R9A07G044_CLK_ZT, CLK_PLL3_DIV2_4_2, 1, 1),
- DEF_MUX("HP", R9A07G044_CLK_HP, SEL_PLL6_2,
- sel_pll6_2, ARRAY_SIZE(sel_pll6_2), 0, CLK_MUX_HIWORD_MASK),
+ DEF_MUX("HP", R9A07G044_CLK_HP, SEL_PLL6_2, sel_pll6_2),
DEF_FIXED("SPI0", R9A07G044_CLK_SPI0, CLK_DIV_PLL3_C, 1, 2),
DEF_FIXED("SPI1", R9A07G044_CLK_SPI1, CLK_DIV_PLL3_C, 1, 4),
- DEF_SD_MUX("SD0", R9A07G044_CLK_SD0, SEL_SDHI0,
- sel_shdi, ARRAY_SIZE(sel_shdi)),
- DEF_SD_MUX("SD1", R9A07G044_CLK_SD1, SEL_SDHI1,
- sel_shdi, ARRAY_SIZE(sel_shdi)),
+ DEF_SD_MUX("SD0", R9A07G044_CLK_SD0, SEL_SDHI0, sel_shdi),
+ DEF_SD_MUX("SD1", R9A07G044_CLK_SD1, SEL_SDHI1, sel_shdi),
DEF_FIXED("SD0_DIV4", CLK_SD0_DIV4, R9A07G044_CLK_SD0, 1, 4),
DEF_FIXED("SD1_DIV4", CLK_SD1_DIV4, R9A07G044_CLK_SD1, 1, 4),
- DEF_DIV("G", R9A07G044_CLK_G, CLK_SEL_GPU2, DIVGPU, dtable_1_8,
- CLK_DIVIDER_HIWORD_MASK),
+ DEF_DIV("G", R9A07G044_CLK_G, CLK_SEL_GPU2, DIVGPU, dtable_1_8),
+ DEF_FIXED("M1", R9A07G044_CLK_M1, CLK_PLL5_FOUTPOSTDIV, 1, 1),
+ DEF_FIXED("M2", R9A07G044_CLK_M2, CLK_PLL3_533, 1, 2),
+ DEF_FIXED("M2_DIV2", CLK_M2_DIV2, R9A07G044_CLK_M2, 1, 2),
+ DEF_DSI_DIV("DSI_DIV", CLK_DSI_DIV, CLK_SEL_PLL5_4, CLK_SET_RATE_PARENT),
+ DEF_FIXED("M3", R9A07G044_CLK_M3, CLK_DSI_DIV, 1, 1),
+ DEF_FIXED("M4", R9A07G044_CLK_M4, CLK_DIV_DSI_LPCLK, 1, 1),
},
#ifdef CONFIG_CLK_R9A07G054
.drp = {
@@ -162,7 +182,7 @@ static const struct {
};
static const struct {
- struct rzg2l_mod_clk common[62];
+ struct rzg2l_mod_clk common[71];
#ifdef CONFIG_CLK_R9A07G054
struct rzg2l_mod_clk drp[0];
#endif
@@ -180,7 +200,7 @@ static const struct {
0x52c, 1),
DEF_MOD("ostm0_pclk", R9A07G044_OSTM0_PCLK, R9A07G044_CLK_P0,
0x534, 0),
- DEF_MOD("ostm1_clk", R9A07G044_OSTM1_PCLK, R9A07G044_CLK_P0,
+ DEF_MOD("ostm1_pclk", R9A07G044_OSTM1_PCLK, R9A07G044_CLK_P0,
0x534, 1),
DEF_MOD("ostm2_pclk", R9A07G044_OSTM2_PCLK, R9A07G044_CLK_P0,
0x534, 2),
@@ -222,6 +242,24 @@ static const struct {
0x558, 1),
DEF_MOD("gpu_ace_clk", R9A07G044_GPU_ACE_CLK, R9A07G044_CLK_P1,
0x558, 2),
+ DEF_MOD("dsi_pll_clk", R9A07G044_MIPI_DSI_PLLCLK, R9A07G044_CLK_M1,
+ 0x568, 0),
+ DEF_MOD("dsi_sys_clk", R9A07G044_MIPI_DSI_SYSCLK, CLK_M2_DIV2,
+ 0x568, 1),
+ DEF_MOD("dsi_aclk", R9A07G044_MIPI_DSI_ACLK, R9A07G044_CLK_P1,
+ 0x568, 2),
+ DEF_MOD("dsi_pclk", R9A07G044_MIPI_DSI_PCLK, R9A07G044_CLK_P2,
+ 0x568, 3),
+ DEF_MOD("dsi_vclk", R9A07G044_MIPI_DSI_VCLK, R9A07G044_CLK_M3,
+ 0x568, 4),
+ DEF_MOD("dsi_lpclk", R9A07G044_MIPI_DSI_LPCLK, R9A07G044_CLK_M4,
+ 0x568, 5),
+ DEF_COUPLED("lcdc_a", R9A07G044_LCDC_CLK_A, R9A07G044_CLK_M0,
+ 0x56c, 0),
+ DEF_COUPLED("lcdc_p", R9A07G044_LCDC_CLK_P, R9A07G044_CLK_ZT,
+ 0x56c, 0),
+ DEF_MOD("lcdc_clk_d", R9A07G044_LCDC_CLK_D, R9A07G044_CLK_M3,
+ 0x56c, 1),
DEF_MOD("ssi0_pclk", R9A07G044_SSI0_PCLK2, R9A07G044_CLK_P0,
0x570, 0),
DEF_MOD("ssi0_sfr", R9A07G044_SSI0_PCLK_SFR, R9A07G044_CLK_P0,
@@ -317,6 +355,10 @@ static struct rzg2l_reset r9a07g044_resets[] = {
DEF_RST(R9A07G044_GPU_RESETN, 0x858, 0),
DEF_RST(R9A07G044_GPU_AXI_RESETN, 0x858, 1),
DEF_RST(R9A07G044_GPU_ACE_RESETN, 0x858, 2),
+ DEF_RST(R9A07G044_MIPI_DSI_CMN_RSTB, 0x868, 0),
+ DEF_RST(R9A07G044_MIPI_DSI_ARESET_N, 0x868, 1),
+ DEF_RST(R9A07G044_MIPI_DSI_PRESET_N, 0x868, 2),
+ DEF_RST(R9A07G044_LCDC_RESET_N, 0x86c, 0),
DEF_RST(R9A07G044_SSI0_RST_M2_REG, 0x870, 0),
DEF_RST(R9A07G044_SSI1_RST_M2_REG, 0x870, 1),
DEF_RST(R9A07G044_SSI2_RST_M2_REG, 0x870, 2),
@@ -376,6 +418,8 @@ const struct rzg2l_cpg_info r9a07g044_cpg_info = {
/* Resets */
.resets = r9a07g044_resets,
.num_resets = R9A07G044_TSU_PRESETN + 1, /* Last reset ID + 1 */
+
+ .has_clk_mon_regs = true,
};
#ifdef CONFIG_CLK_R9A07G054
@@ -398,5 +442,7 @@ const struct rzg2l_cpg_info r9a07g054_cpg_info = {
/* Resets */
.resets = r9a07g044_resets,
.num_resets = R9A07G054_STPAI_ARESETN + 1, /* Last reset ID + 1 */
+
+ .has_clk_mon_regs = true,
};
#endif
diff --git a/drivers/clk/renesas/r9a09g011-cpg.c b/drivers/clk/renesas/r9a09g011-cpg.c
new file mode 100644
index 000000000000..40693bb85b80
--- /dev/null
+++ b/drivers/clk/renesas/r9a09g011-cpg.c
@@ -0,0 +1,172 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * RZ/V2M Clock Pulse Generator / Module Standby and Software Reset
+ *
+ * Copyright (C) 2022 Renesas Electronics Corp.
+ *
+ * Based on r9a07g044-cpg.c
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+
+#include <dt-bindings/clock/r9a09g011-cpg.h>
+
+#include "rzg2l-cpg.h"
+
+#define RZV2M_SAMPLL4_CLK1 0x104
+#define RZV2M_SAMPLL4_CLK2 0x108
+
+#define PLL4_CONF (RZV2M_SAMPLL4_CLK1 << 22 | RZV2M_SAMPLL4_CLK2 << 12)
+
+#define DIV_A DDIV_PACK(0x200, 0, 3)
+#define DIV_B DDIV_PACK(0x204, 0, 2)
+#define DIV_E DDIV_PACK(0x204, 8, 1)
+#define DIV_W DDIV_PACK(0x328, 0, 3)
+
+#define SEL_B SEL_PLL_PACK(0x214, 0, 1)
+#define SEL_E SEL_PLL_PACK(0x214, 2, 1)
+#define SEL_W0 SEL_PLL_PACK(0x32C, 0, 1)
+
+enum clk_ids {
+ /* Core Clock Outputs exported to DT */
+ LAST_DT_CORE_CLK = 0,
+
+ /* External Input Clocks */
+ CLK_EXTAL,
+
+ /* Internal Core Clocks */
+ CLK_MAIN,
+ CLK_MAIN_24,
+ CLK_MAIN_2,
+ CLK_PLL1,
+ CLK_PLL2,
+ CLK_PLL2_800,
+ CLK_PLL2_400,
+ CLK_PLL2_200,
+ CLK_PLL2_100,
+ CLK_PLL4,
+ CLK_DIV_A,
+ CLK_DIV_B,
+ CLK_DIV_E,
+ CLK_DIV_W,
+ CLK_SEL_B,
+ CLK_SEL_B_D2,
+ CLK_SEL_E,
+ CLK_SEL_W0,
+
+ /* Module Clocks */
+ MOD_CLK_BASE
+};
+
+/* Divider tables */
+static const struct clk_div_table dtable_diva[] = {
+ {0, 1},
+ {1, 2},
+ {2, 3},
+ {3, 4},
+ {4, 6},
+ {5, 12},
+ {6, 24},
+ {0, 0},
+};
+
+static const struct clk_div_table dtable_divb[] = {
+ {0, 1},
+ {1, 2},
+ {2, 4},
+ {3, 8},
+ {0, 0},
+};
+
+static const struct clk_div_table dtable_divw[] = {
+ {0, 6},
+ {1, 7},
+ {2, 8},
+ {3, 9},
+ {4, 10},
+ {5, 11},
+ {6, 12},
+ {0, 0},
+};
+
+/* Mux clock tables */
+static const char * const sel_b[] = { ".main", ".divb" };
+static const char * const sel_e[] = { ".main", ".dive" };
+static const char * const sel_w[] = { ".main", ".divw" };
+
+static const struct cpg_core_clk r9a09g011_core_clks[] __initconst = {
+ /* External Clock Inputs */
+ DEF_INPUT("extal", CLK_EXTAL),
+
+ /* Internal Core Clocks */
+ DEF_FIXED(".main", CLK_MAIN, CLK_EXTAL, 1, 1),
+ DEF_FIXED(".main_24", CLK_MAIN_24, CLK_MAIN, 1, 2),
+ DEF_FIXED(".main_2", CLK_MAIN_2, CLK_MAIN, 1, 24),
+ DEF_FIXED(".pll1", CLK_PLL1, CLK_MAIN_2, 498, 1),
+ DEF_FIXED(".pll2", CLK_PLL2, CLK_MAIN_2, 800, 1),
+ DEF_FIXED(".pll2_800", CLK_PLL2_800, CLK_PLL2, 1, 2),
+ DEF_FIXED(".pll2_400", CLK_PLL2_400, CLK_PLL2_800, 1, 2),
+ DEF_FIXED(".pll2_200", CLK_PLL2_200, CLK_PLL2_800, 1, 4),
+ DEF_FIXED(".pll2_100", CLK_PLL2_100, CLK_PLL2_800, 1, 8),
+ DEF_SAMPLL(".pll4", CLK_PLL4, CLK_MAIN_2, PLL4_CONF),
+
+ DEF_DIV_RO(".diva", CLK_DIV_A, CLK_PLL1, DIV_A, dtable_diva),
+ DEF_DIV_RO(".divb", CLK_DIV_B, CLK_PLL2_400, DIV_B, dtable_divb),
+ DEF_DIV_RO(".dive", CLK_DIV_E, CLK_PLL2_100, DIV_E, NULL),
+ DEF_DIV_RO(".divw", CLK_DIV_W, CLK_PLL4, DIV_W, dtable_divw),
+
+ DEF_MUX_RO(".selb", CLK_SEL_B, SEL_B, sel_b),
+ DEF_MUX_RO(".sele", CLK_SEL_E, SEL_E, sel_e),
+ DEF_MUX(".selw0", CLK_SEL_W0, SEL_W0, sel_w),
+
+ DEF_FIXED(".selb_d2", CLK_SEL_B_D2, CLK_SEL_B, 1, 2),
+};
+
+static const struct rzg2l_mod_clk r9a09g011_mod_clks[] __initconst = {
+ DEF_MOD("gic", R9A09G011_GIC_CLK, CLK_SEL_B_D2, 0x400, 5),
+ DEF_COUPLED("eth_axi", R9A09G011_ETH0_CLK_AXI, CLK_PLL2_200, 0x40c, 8),
+ DEF_COUPLED("eth_chi", R9A09G011_ETH0_CLK_CHI, CLK_PLL2_100, 0x40c, 8),
+ DEF_MOD("eth_clk_gptp", R9A09G011_ETH0_GPTP_EXT, CLK_PLL2_100, 0x40c, 9),
+ DEF_MOD("syc_cnt_clk", R9A09G011_SYC_CNT_CLK, CLK_MAIN_24, 0x41c, 12),
+ DEF_MOD("urt_pclk", R9A09G011_URT_PCLK, CLK_SEL_E, 0x438, 4),
+ DEF_MOD("urt0_clk", R9A09G011_URT0_CLK, CLK_SEL_W0, 0x438, 5),
+ DEF_MOD("ca53", R9A09G011_CA53_CLK, CLK_DIV_A, 0x448, 0),
+};
+
+static const struct rzg2l_reset r9a09g011_resets[] = {
+ DEF_RST_MON(R9A09G011_ETH0_RST_HW_N, 0x608, 11, 11),
+ DEF_RST_MON(R9A09G011_SYC_RST_N, 0x610, 9, 13),
+};
+
+static const unsigned int r9a09g011_crit_mod_clks[] __initconst = {
+ MOD_CLK_BASE + R9A09G011_CA53_CLK,
+ MOD_CLK_BASE + R9A09G011_GIC_CLK,
+ MOD_CLK_BASE + R9A09G011_SYC_CNT_CLK,
+ MOD_CLK_BASE + R9A09G011_URT_PCLK,
+};
+
+const struct rzg2l_cpg_info r9a09g011_cpg_info = {
+ /* Core Clocks */
+ .core_clks = r9a09g011_core_clks,
+ .num_core_clks = ARRAY_SIZE(r9a09g011_core_clks),
+ .last_dt_core_clk = LAST_DT_CORE_CLK,
+ .num_total_core_clks = MOD_CLK_BASE,
+
+ /* Critical Module Clocks */
+ .crit_mod_clks = r9a09g011_crit_mod_clks,
+ .num_crit_mod_clks = ARRAY_SIZE(r9a09g011_crit_mod_clks),
+
+ /* Module Clocks */
+ .mod_clks = r9a09g011_mod_clks,
+ .num_mod_clks = ARRAY_SIZE(r9a09g011_mod_clks),
+ .num_hw_mod_clks = R9A09G011_CA53_CLK + 1,
+
+ /* Resets */
+ .resets = r9a09g011_resets,
+ .num_resets = ARRAY_SIZE(r9a09g011_resets),
+
+ .has_clk_mon_regs = false,
+};
diff --git a/drivers/clk/renesas/rcar-gen3-cpg.h b/drivers/clk/renesas/rcar-gen3-cpg.h
index 2bc0afadf604..9028bf4295ce 100644
--- a/drivers/clk/renesas/rcar-gen3-cpg.h
+++ b/drivers/clk/renesas/rcar-gen3-cpg.h
@@ -25,7 +25,7 @@ enum rcar_gen3_clk_types {
CLK_TYPE_GEN3_OSC, /* OSC EXTAL predivider and fixed divider */
CLK_TYPE_GEN3_RCKSEL, /* Select parent/divider using RCKCR.CKSEL */
CLK_TYPE_GEN3_RPCSRC,
- CLK_TYPE_GEN3_E3_RPCSRC,
+ CLK_TYPE_GEN3_E3_RPCSRC,/* Select parent/divider using RPCCKCR.DIV */
CLK_TYPE_GEN3_RPC,
CLK_TYPE_GEN3_RPCD2,
@@ -62,6 +62,9 @@ enum rcar_gen3_clk_types {
#define DEF_FIXED_RPCSRC_E3(_name, _id, _parent0, _parent1) \
DEF_BASE(_name, _id, CLK_TYPE_GEN3_E3_RPCSRC, \
(_parent0) << 16 | (_parent1), .div = 8)
+#define DEF_FIXED_RPCSRC_D3(_name, _id, _parent0, _parent1) \
+ DEF_BASE(_name, _id, CLK_TYPE_GEN3_E3_RPCSRC, \
+ (_parent0) << 16 | (_parent1), .div = 5)
struct rcar_gen3_cpg_pll_config {
u8 extal_div;
diff --git a/drivers/clk/renesas/rcar-gen4-cpg.c b/drivers/clk/renesas/rcar-gen4-cpg.c
index 54ebf4b3c128..c7ed43d6aa67 100644
--- a/drivers/clk/renesas/rcar-gen4-cpg.c
+++ b/drivers/clk/renesas/rcar-gen4-cpg.c
@@ -215,6 +215,11 @@ struct clk * __init rcar_gen4_cpg_clk_register(struct device *dev,
div = cpg_pll_config->pll3_div;
break;
+ case CLK_TYPE_GEN4_PLL4:
+ mult = cpg_pll_config->pll4_mult;
+ div = cpg_pll_config->pll4_div;
+ break;
+
case CLK_TYPE_GEN4_PLL5:
mult = cpg_pll_config->pll5_mult;
div = cpg_pll_config->pll5_div;
diff --git a/drivers/clk/renesas/rcar-gen4-cpg.h b/drivers/clk/renesas/rcar-gen4-cpg.h
index afc8c024d538..0b15dcfdca7b 100644
--- a/drivers/clk/renesas/rcar-gen4-cpg.h
+++ b/drivers/clk/renesas/rcar-gen4-cpg.h
@@ -16,6 +16,7 @@ enum rcar_gen4_clk_types {
CLK_TYPE_GEN4_PLL2X_3X, /* r8a779a0 only */
CLK_TYPE_GEN4_PLL3,
CLK_TYPE_GEN4_PLL5,
+ CLK_TYPE_GEN4_PLL4,
CLK_TYPE_GEN4_PLL6,
CLK_TYPE_GEN4_SDSRC,
CLK_TYPE_GEN4_SDH,
@@ -56,6 +57,8 @@ struct rcar_gen4_cpg_pll_config {
u8 pll2_div;
u8 pll3_mult;
u8 pll3_div;
+ u8 pll4_mult;
+ u8 pll4_div;
u8 pll5_mult;
u8 pll5_div;
u8 pll6_mult;
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
index 5d2c3edbaa14..1a0cdf001b2f 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.c
+++ b/drivers/clk/renesas/renesas-cpg-mssr.c
@@ -854,6 +854,12 @@ static const struct of_device_id cpg_mssr_match[] = {
.data = &r8a779f0_cpg_mssr_info,
},
#endif
+#ifdef CONFIG_CLK_R8A779G0
+ {
+ .compatible = "renesas,r8a779g0-cpg-mssr",
+ .data = &r8a779g0_cpg_mssr_info,
+ },
+#endif
{ /* sentinel */ }
};
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.h b/drivers/clk/renesas/renesas-cpg-mssr.h
index 16810dd4e6ac..1c3c057d17f5 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.h
+++ b/drivers/clk/renesas/renesas-cpg-mssr.h
@@ -179,6 +179,7 @@ extern const struct cpg_mssr_info r8a77990_cpg_mssr_info;
extern const struct cpg_mssr_info r8a77995_cpg_mssr_info;
extern const struct cpg_mssr_info r8a779a0_cpg_mssr_info;
extern const struct cpg_mssr_info r8a779f0_cpg_mssr_info;
+extern const struct cpg_mssr_info r8a779g0_cpg_mssr_info;
void __init cpg_mssr_early_init(struct device_node *np,
const struct cpg_mssr_info *info);
diff --git a/drivers/clk/renesas/rzg2l-cpg.c b/drivers/clk/renesas/rzg2l-cpg.c
index 486d0656c58a..e2999ab2b53c 100644
--- a/drivers/clk/renesas/rzg2l-cpg.c
+++ b/drivers/clk/renesas/rzg2l-cpg.c
@@ -27,6 +27,7 @@
#include <linux/pm_domain.h>
#include <linux/reset-controller.h>
#include <linux/slab.h>
+#include <linux/units.h>
#include <dt-bindings/clock/renesas-cpg-mssr.h>
@@ -56,6 +57,8 @@
#define GET_REG_SAMPLL_CLK1(val) ((val >> 22) & 0xfff)
#define GET_REG_SAMPLL_CLK2(val) ((val >> 12) & 0xfff)
+#define MAX_VCLK_FREQ (148500000)
+
struct sd_hw_data {
struct clk_hw hw;
u32 conf;
@@ -64,6 +67,21 @@ struct sd_hw_data {
#define to_sd_hw_data(_hw) container_of(_hw, struct sd_hw_data, hw)
+struct rzg2l_pll5_param {
+ u32 pl5_fracin;
+ u8 pl5_refdiv;
+ u8 pl5_intin;
+ u8 pl5_postdiv1;
+ u8 pl5_postdiv2;
+ u8 pl5_spread;
+};
+
+struct rzg2l_pll5_mux_dsi_div_param {
+ u8 clksrc;
+ u8 dsi_div_a;
+ u8 dsi_div_b;
+};
+
/**
* struct rzg2l_cpg_priv - Clock Pulse Generator Private Data
*
@@ -76,8 +94,8 @@ struct sd_hw_data {
* @num_mod_clks: Number of Module Clocks in clks[]
* @num_resets: Number of Module Resets in info->resets[]
* @last_dt_core_clk: ID of the last Core Clock exported to DT
- * @notifiers: Notifier chain to save/restore clock state for system resume
* @info: Pointer to platform data
+ * @pll5_mux_dsi_div_params: pll5 mux and dsi div parameters
*/
struct rzg2l_cpg_priv {
struct reset_controller_dev rcdev;
@@ -91,8 +109,9 @@ struct rzg2l_cpg_priv {
unsigned int num_resets;
unsigned int last_dt_core_clk;
- struct raw_notifier_head notifiers;
const struct rzg2l_cpg_info *info;
+
+ struct rzg2l_pll5_mux_dsi_div_param mux_dsi_div_params;
};
static void rzg2l_cpg_del_clk_provider(void *data)
@@ -266,6 +285,406 @@ rzg2l_cpg_sd_mux_clk_register(const struct cpg_core_clk *core,
return clk_hw->clk;
}
+static unsigned long
+rzg2l_cpg_get_foutpostdiv_rate(struct rzg2l_pll5_param *params,
+ unsigned long rate)
+{
+ unsigned long foutpostdiv_rate;
+
+ params->pl5_intin = rate / MEGA;
+ params->pl5_fracin = div_u64(((u64)rate % MEGA) << 24, MEGA);
+ params->pl5_refdiv = 2;
+ params->pl5_postdiv1 = 1;
+ params->pl5_postdiv2 = 1;
+ params->pl5_spread = 0x16;
+
+ foutpostdiv_rate =
+ EXTAL_FREQ_IN_MEGA_HZ * MEGA / params->pl5_refdiv *
+ ((((params->pl5_intin << 24) + params->pl5_fracin)) >> 24) /
+ (params->pl5_postdiv1 * params->pl5_postdiv2);
+
+ return foutpostdiv_rate;
+}
+
+struct dsi_div_hw_data {
+ struct clk_hw hw;
+ u32 conf;
+ unsigned long rate;
+ struct rzg2l_cpg_priv *priv;
+};
+
+#define to_dsi_div_hw_data(_hw) container_of(_hw, struct dsi_div_hw_data, hw)
+
+static unsigned long rzg2l_cpg_dsi_div_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct dsi_div_hw_data *dsi_div = to_dsi_div_hw_data(hw);
+ unsigned long rate = dsi_div->rate;
+
+ if (!rate)
+ rate = parent_rate;
+
+ return rate;
+}
+
+static unsigned long rzg2l_cpg_get_vclk_parent_rate(struct clk_hw *hw,
+ unsigned long rate)
+{
+ struct dsi_div_hw_data *dsi_div = to_dsi_div_hw_data(hw);
+ struct rzg2l_cpg_priv *priv = dsi_div->priv;
+ struct rzg2l_pll5_param params;
+ unsigned long parent_rate;
+
+ parent_rate = rzg2l_cpg_get_foutpostdiv_rate(&params, rate);
+
+ if (priv->mux_dsi_div_params.clksrc)
+ parent_rate /= 2;
+
+ return parent_rate;
+}
+
+static int rzg2l_cpg_dsi_div_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ if (req->rate > MAX_VCLK_FREQ)
+ req->rate = MAX_VCLK_FREQ;
+
+ req->best_parent_rate = rzg2l_cpg_get_vclk_parent_rate(hw, req->rate);
+
+ return 0;
+}
+
+static int rzg2l_cpg_dsi_div_set_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct dsi_div_hw_data *dsi_div = to_dsi_div_hw_data(hw);
+ struct rzg2l_cpg_priv *priv = dsi_div->priv;
+
+ /*
+ * MUX -->DIV_DSI_{A,B} -->M3 -->VCLK
+ *
+ * Based on the dot clock, the DSI divider clock sets the divider value,
+ * calculates the pll parameters for generating FOUTPOSTDIV and the clk
+ * source for the MUX and propagates that info to the parents.
+ */
+
+ if (!rate || rate > MAX_VCLK_FREQ)
+ return -EINVAL;
+
+ dsi_div->rate = rate;
+ writel(CPG_PL5_SDIV_DIV_DSI_A_WEN | CPG_PL5_SDIV_DIV_DSI_B_WEN |
+ (priv->mux_dsi_div_params.dsi_div_a << 0) |
+ (priv->mux_dsi_div_params.dsi_div_b << 8),
+ priv->base + CPG_PL5_SDIV);
+
+ return 0;
+}
+
+static const struct clk_ops rzg2l_cpg_dsi_div_ops = {
+ .recalc_rate = rzg2l_cpg_dsi_div_recalc_rate,
+ .determine_rate = rzg2l_cpg_dsi_div_determine_rate,
+ .set_rate = rzg2l_cpg_dsi_div_set_rate,
+};
+
+static struct clk * __init
+rzg2l_cpg_dsi_div_clk_register(const struct cpg_core_clk *core,
+ struct clk **clks,
+ struct rzg2l_cpg_priv *priv)
+{
+ struct dsi_div_hw_data *clk_hw_data;
+ const struct clk *parent;
+ const char *parent_name;
+ struct clk_init_data init;
+ struct clk_hw *clk_hw;
+ int ret;
+
+ parent = clks[core->parent & 0xffff];
+ if (IS_ERR(parent))
+ return ERR_CAST(parent);
+
+ clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL);
+ if (!clk_hw_data)
+ return ERR_PTR(-ENOMEM);
+
+ clk_hw_data->priv = priv;
+
+ parent_name = __clk_get_name(parent);
+ init.name = core->name;
+ init.ops = &rzg2l_cpg_dsi_div_ops;
+ init.flags = CLK_SET_RATE_PARENT;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ clk_hw = &clk_hw_data->hw;
+ clk_hw->init = &init;
+
+ ret = devm_clk_hw_register(priv->dev, clk_hw);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return clk_hw->clk;
+}
+
+struct pll5_mux_hw_data {
+ struct clk_hw hw;
+ u32 conf;
+ unsigned long rate;
+ struct rzg2l_cpg_priv *priv;
+};
+
+#define to_pll5_mux_hw_data(_hw) container_of(_hw, struct pll5_mux_hw_data, hw)
+
+static int rzg2l_cpg_pll5_4_clk_mux_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_hw *parent;
+ struct pll5_mux_hw_data *hwdata = to_pll5_mux_hw_data(hw);
+ struct rzg2l_cpg_priv *priv = hwdata->priv;
+
+ parent = clk_hw_get_parent_by_index(hw, priv->mux_dsi_div_params.clksrc);
+ req->best_parent_hw = parent;
+ req->best_parent_rate = req->rate;
+
+ return 0;
+}
+
+static int rzg2l_cpg_pll5_4_clk_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct pll5_mux_hw_data *hwdata = to_pll5_mux_hw_data(hw);
+ struct rzg2l_cpg_priv *priv = hwdata->priv;
+
+ /*
+ * FOUTPOSTDIV--->|
+ * | | -->MUX -->DIV_DSIA_B -->M3 -->VCLK
+ * |--FOUT1PH0-->|
+ *
+ * Based on the dot clock, the DSI divider clock calculates the parent
+ * rate and clk source for the MUX. It propagates that info to
+ * pll5_4_clk_mux which sets the clock source for DSI divider clock.
+ */
+
+ writel(CPG_OTHERFUNC1_REG_RES0_ON_WEN | index,
+ priv->base + CPG_OTHERFUNC1_REG);
+
+ return 0;
+}
+
+static u8 rzg2l_cpg_pll5_4_clk_mux_get_parent(struct clk_hw *hw)
+{
+ struct pll5_mux_hw_data *hwdata = to_pll5_mux_hw_data(hw);
+ struct rzg2l_cpg_priv *priv = hwdata->priv;
+
+ return readl(priv->base + GET_REG_OFFSET(hwdata->conf));
+}
+
+static const struct clk_ops rzg2l_cpg_pll5_4_clk_mux_ops = {
+ .determine_rate = rzg2l_cpg_pll5_4_clk_mux_determine_rate,
+ .set_parent = rzg2l_cpg_pll5_4_clk_mux_set_parent,
+ .get_parent = rzg2l_cpg_pll5_4_clk_mux_get_parent,
+};
+
+static struct clk * __init
+rzg2l_cpg_pll5_4_mux_clk_register(const struct cpg_core_clk *core,
+ struct rzg2l_cpg_priv *priv)
+{
+ struct pll5_mux_hw_data *clk_hw_data;
+ struct clk_init_data init;
+ struct clk_hw *clk_hw;
+ int ret;
+
+ clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL);
+ if (!clk_hw_data)
+ return ERR_PTR(-ENOMEM);
+
+ clk_hw_data->priv = priv;
+ clk_hw_data->conf = core->conf;
+
+ init.name = core->name;
+ init.ops = &rzg2l_cpg_pll5_4_clk_mux_ops;
+ init.flags = CLK_SET_RATE_PARENT;
+ init.num_parents = core->num_parents;
+ init.parent_names = core->parent_names;
+
+ clk_hw = &clk_hw_data->hw;
+ clk_hw->init = &init;
+
+ ret = devm_clk_hw_register(priv->dev, clk_hw);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return clk_hw->clk;
+}
+
+struct sipll5 {
+ struct clk_hw hw;
+ u32 conf;
+ unsigned long foutpostdiv_rate;
+ struct rzg2l_cpg_priv *priv;
+};
+
+#define to_sipll5(_hw) container_of(_hw, struct sipll5, hw)
+
+static unsigned long rzg2l_cpg_get_vclk_rate(struct clk_hw *hw,
+ unsigned long rate)
+{
+ struct sipll5 *sipll5 = to_sipll5(hw);
+ struct rzg2l_cpg_priv *priv = sipll5->priv;
+ unsigned long vclk;
+
+ vclk = rate / ((1 << priv->mux_dsi_div_params.dsi_div_a) *
+ (priv->mux_dsi_div_params.dsi_div_b + 1));
+
+ if (priv->mux_dsi_div_params.clksrc)
+ vclk /= 2;
+
+ return vclk;
+}
+
+static unsigned long rzg2l_cpg_sipll5_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct sipll5 *sipll5 = to_sipll5(hw);
+ unsigned long pll5_rate = sipll5->foutpostdiv_rate;
+
+ if (!pll5_rate)
+ pll5_rate = parent_rate;
+
+ return pll5_rate;
+}
+
+static long rzg2l_cpg_sipll5_round_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long *parent_rate)
+{
+ return rate;
+}
+
+static int rzg2l_cpg_sipll5_set_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct sipll5 *sipll5 = to_sipll5(hw);
+ struct rzg2l_cpg_priv *priv = sipll5->priv;
+ struct rzg2l_pll5_param params;
+ unsigned long vclk_rate;
+ int ret;
+ u32 val;
+
+ /*
+ * OSC --> PLL5 --> FOUTPOSTDIV-->|
+ * | | -->MUX -->DIV_DSIA_B -->M3 -->VCLK
+ * |--FOUT1PH0-->|
+ *
+ * Based on the dot clock, the DSI divider clock calculates the parent
+ * rate and the pll5 parameters for generating FOUTPOSTDIV. It propagates
+ * that info to sipll5 which sets parameters for generating FOUTPOSTDIV.
+ *
+ * OSC --> PLL5 --> FOUTPOSTDIV
+ */
+
+ if (!rate)
+ return -EINVAL;
+
+ vclk_rate = rzg2l_cpg_get_vclk_rate(hw, rate);
+ sipll5->foutpostdiv_rate =
+ rzg2l_cpg_get_foutpostdiv_rate(&params, vclk_rate);
+
+ /* Put PLL5 into standby mode */
+ writel(CPG_SIPLL5_STBY_RESETB_WEN, priv->base + CPG_SIPLL5_STBY);
+ ret = readl_poll_timeout(priv->base + CPG_SIPLL5_MON, val,
+ !(val & CPG_SIPLL5_MON_PLL5_LOCK), 100, 250000);
+ if (ret) {
+ dev_err(priv->dev, "failed to release pll5 lock");
+ return ret;
+ }
+
+ /* Output clock setting 1 */
+ writel(CPG_SIPLL5_CLK1_POSTDIV1_WEN | CPG_SIPLL5_CLK1_POSTDIV2_WEN |
+ CPG_SIPLL5_CLK1_REFDIV_WEN | (params.pl5_postdiv1 << 0) |
+ (params.pl5_postdiv2 << 4) | (params.pl5_refdiv << 8),
+ priv->base + CPG_SIPLL5_CLK1);
+
+ /* Output clock setting, SSCG modulation value setting 3 */
+ writel((params.pl5_fracin << 8), priv->base + CPG_SIPLL5_CLK3);
+
+ /* Output clock setting 4 */
+ writel(CPG_SIPLL5_CLK4_RESV_LSB | (params.pl5_intin << 16),
+ priv->base + CPG_SIPLL5_CLK4);
+
+ /* Output clock setting 5 */
+ writel(params.pl5_spread, priv->base + CPG_SIPLL5_CLK5);
+
+ /* PLL normal mode setting */
+ writel(CPG_SIPLL5_STBY_DOWNSPREAD_WEN | CPG_SIPLL5_STBY_SSCG_EN_WEN |
+ CPG_SIPLL5_STBY_RESETB_WEN | CPG_SIPLL5_STBY_RESETB,
+ priv->base + CPG_SIPLL5_STBY);
+
+ /* PLL normal mode transition, output clock stability check */
+ ret = readl_poll_timeout(priv->base + CPG_SIPLL5_MON, val,
+ (val & CPG_SIPLL5_MON_PLL5_LOCK), 100, 250000);
+ if (ret) {
+ dev_err(priv->dev, "failed to lock pll5");
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct clk_ops rzg2l_cpg_sipll5_ops = {
+ .recalc_rate = rzg2l_cpg_sipll5_recalc_rate,
+ .round_rate = rzg2l_cpg_sipll5_round_rate,
+ .set_rate = rzg2l_cpg_sipll5_set_rate,
+};
+
+static struct clk * __init
+rzg2l_cpg_sipll5_register(const struct cpg_core_clk *core,
+ struct clk **clks,
+ struct rzg2l_cpg_priv *priv)
+{
+ const struct clk *parent;
+ struct clk_init_data init;
+ const char *parent_name;
+ struct sipll5 *sipll5;
+ struct clk_hw *clk_hw;
+ int ret;
+
+ parent = clks[core->parent & 0xffff];
+ if (IS_ERR(parent))
+ return ERR_CAST(parent);
+
+ sipll5 = devm_kzalloc(priv->dev, sizeof(*sipll5), GFP_KERNEL);
+ if (!sipll5)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = core->name;
+ parent_name = __clk_get_name(parent);
+ init.ops = &rzg2l_cpg_sipll5_ops;
+ init.flags = 0;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ sipll5->hw.init = &init;
+ sipll5->conf = core->conf;
+ sipll5->priv = priv;
+
+ writel(CPG_SIPLL5_STBY_SSCG_EN_WEN | CPG_SIPLL5_STBY_RESETB_WEN |
+ CPG_SIPLL5_STBY_RESETB, priv->base + CPG_SIPLL5_STBY);
+
+ clk_hw = &sipll5->hw;
+ clk_hw->init = &init;
+
+ ret = devm_clk_hw_register(priv->dev, clk_hw);
+ if (ret)
+ return ERR_PTR(ret);
+
+ priv->mux_dsi_div_params.clksrc = 1; /* Use clk src 1 for DSI */
+ priv->mux_dsi_div_params.dsi_div_a = 1; /* Divided by 2 */
+ priv->mux_dsi_div_params.dsi_div_b = 2; /* Divided by 3 */
+
+ return clk_hw->clk;
+}
+
struct pll_clk {
struct clk_hw hw;
unsigned int conf;
@@ -291,7 +710,7 @@ static unsigned long rzg2l_cpg_pll_clk_recalc_rate(struct clk_hw *hw,
val1 = readl(priv->base + GET_REG_SAMPLL_CLK1(pll_clk->conf));
val2 = readl(priv->base + GET_REG_SAMPLL_CLK2(pll_clk->conf));
mult = MDIV(val1) + KDIV(val1) / 65536;
- div = PDIV(val1) * (1 << SDIV(val2));
+ div = PDIV(val1) << SDIV(val2);
return DIV_ROUND_CLOSEST_ULL((u64)parent_rate * mult, div);
}
@@ -420,6 +839,9 @@ rzg2l_cpg_register_core_clk(const struct cpg_core_clk *core,
clk = rzg2l_cpg_pll_clk_register(core, priv->clks,
priv->base, priv);
break;
+ case CLK_TYPE_SIPLL5:
+ clk = rzg2l_cpg_sipll5_register(core, priv->clks, priv);
+ break;
case CLK_TYPE_DIV:
clk = rzg2l_cpg_div_clk_register(core, priv->clks,
priv->base, priv);
@@ -430,6 +852,12 @@ rzg2l_cpg_register_core_clk(const struct cpg_core_clk *core,
case CLK_TYPE_SD_MUX:
clk = rzg2l_cpg_sd_mux_clk_register(core, priv->base, priv);
break;
+ case CLK_TYPE_PLL5_4_MUX:
+ clk = rzg2l_cpg_pll5_4_mux_clk_register(core, priv);
+ break;
+ case CLK_TYPE_DSI_DIV:
+ clk = rzg2l_cpg_dsi_div_clk_register(core, priv->clks, priv);
+ break;
default:
goto fail;
}
@@ -498,6 +926,9 @@ static int rzg2l_mod_clock_endisable(struct clk_hw *hw, bool enable)
if (!enable)
return 0;
+ if (!priv->info->has_clk_mon_regs)
+ return 0;
+
for (i = 1000; i > 0; --i) {
if (((readl(priv->base + CLK_MON_R(reg))) & bitmask))
break;
@@ -568,7 +999,10 @@ static int rzg2l_mod_clock_is_enabled(struct clk_hw *hw)
if (clock->sibling)
return clock->enabled;
- value = readl(priv->base + CLK_MON_R(clock->off));
+ if (priv->info->has_clk_mon_regs)
+ value = readl(priv->base + CLK_MON_R(clock->off));
+ else
+ value = readl(priv->base + clock->off);
return value & bitmask;
}
@@ -743,8 +1177,16 @@ static int rzg2l_cpg_status(struct reset_controller_dev *rcdev,
const struct rzg2l_cpg_info *info = priv->info;
unsigned int reg = info->resets[id].off;
u32 bitmask = BIT(info->resets[id].bit);
+ s8 monbit = info->resets[id].monbit;
+
+ if (info->has_clk_mon_regs) {
+ return !(readl(priv->base + CLK_MRST_R(reg)) & bitmask);
+ } else if (monbit >= 0) {
+ u32 monbitmask = BIT(monbit);
- return !(readl(priv->base + CLK_MRST_R(reg)) & bitmask);
+ return !!(readl(priv->base + CPG_RST_MON) & monbitmask);
+ }
+ return -ENOTSUPP;
}
static const struct reset_control_ops rzg2l_cpg_reset_ops = {
@@ -947,6 +1389,12 @@ static int __init rzg2l_cpg_probe(struct platform_device *pdev)
}
static const struct of_device_id rzg2l_cpg_match[] = {
+#ifdef CONFIG_CLK_R9A07G043
+ {
+ .compatible = "renesas,r9a07g043-cpg",
+ .data = &r9a07g043_cpg_info,
+ },
+#endif
#ifdef CONFIG_CLK_R9A07G044
{
.compatible = "renesas,r9a07g044-cpg",
@@ -959,6 +1407,12 @@ static const struct of_device_id rzg2l_cpg_match[] = {
.data = &r9a07g054_cpg_info,
},
#endif
+#ifdef CONFIG_CLK_R9A09G011
+ {
+ .compatible = "renesas,r9a09g011-cpg",
+ .data = &r9a09g011_cpg_info,
+ },
+#endif
{ /* sentinel */ }
};
diff --git a/drivers/clk/renesas/rzg2l-cpg.h b/drivers/clk/renesas/rzg2l-cpg.h
index ce657beaf160..cecbdf5e4f93 100644
--- a/drivers/clk/renesas/rzg2l-cpg.h
+++ b/drivers/clk/renesas/rzg2l-cpg.h
@@ -9,6 +9,12 @@
#ifndef __RENESAS_RZG2L_CPG_H__
#define __RENESAS_RZG2L_CPG_H__
+#define CPG_SIPLL5_STBY (0x140)
+#define CPG_SIPLL5_CLK1 (0x144)
+#define CPG_SIPLL5_CLK3 (0x14C)
+#define CPG_SIPLL5_CLK4 (0x150)
+#define CPG_SIPLL5_CLK5 (0x154)
+#define CPG_SIPLL5_MON (0x15C)
#define CPG_PL1_DDIV (0x200)
#define CPG_PL2_DDIV (0x204)
#define CPG_PL3A_DDIV (0x208)
@@ -18,6 +24,24 @@
#define CPG_PL3_SSEL (0x408)
#define CPG_PL6_SSEL (0x414)
#define CPG_PL6_ETH_SSEL (0x418)
+#define CPG_PL5_SDIV (0x420)
+#define CPG_RST_MON (0x680)
+#define CPG_OTHERFUNC1_REG (0xBE8)
+
+#define CPG_SIPLL5_STBY_RESETB BIT(0)
+#define CPG_SIPLL5_STBY_RESETB_WEN BIT(16)
+#define CPG_SIPLL5_STBY_SSCG_EN_WEN BIT(18)
+#define CPG_SIPLL5_STBY_DOWNSPREAD_WEN BIT(20)
+#define CPG_SIPLL5_CLK1_POSTDIV1_WEN BIT(16)
+#define CPG_SIPLL5_CLK1_POSTDIV2_WEN BIT(20)
+#define CPG_SIPLL5_CLK1_REFDIV_WEN BIT(24)
+#define CPG_SIPLL5_CLK4_RESV_LSB (0xFF)
+#define CPG_SIPLL5_MON_PLL5_LOCK BIT(4)
+
+#define CPG_OTHERFUNC1_REG_RES0_ON_WEN BIT(16)
+
+#define CPG_PL5_SDIV_DIV_DSI_A_WEN BIT(16)
+#define CPG_PL5_SDIV_DIV_DSI_B_WEN BIT(24)
#define CPG_CLKSTATUS_SELSDHI0_STS BIT(28)
#define CPG_CLKSTATUS_SELSDHI1_STS BIT(29)
@@ -34,6 +58,7 @@
(((offset) << 20) | ((bitpos) << 12) | ((size) << 8))
#define DIVPL1A DDIV_PACK(CPG_PL1_DDIV, 0, 2)
#define DIVPL2A DDIV_PACK(CPG_PL2_DDIV, 0, 3)
+#define DIVDSILPCLK DDIV_PACK(CPG_PL2_DDIV, 12, 2)
#define DIVPL3A DDIV_PACK(CPG_PL3A_DDIV, 0, 3)
#define DIVPL3B DDIV_PACK(CPG_PL3A_DDIV, 4, 3)
#define DIVPL3C DDIV_PACK(CPG_PL3A_DDIV, 8, 3)
@@ -43,12 +68,15 @@
(((offset) << 20) | ((bitpos) << 12) | ((size) << 8))
#define SEL_PLL3_3 SEL_PLL_PACK(CPG_PL3_SSEL, 8, 1)
+#define SEL_PLL5_4 SEL_PLL_PACK(CPG_OTHERFUNC1_REG, 0, 1)
#define SEL_PLL6_2 SEL_PLL_PACK(CPG_PL6_ETH_SSEL, 0, 1)
#define SEL_GPU2 SEL_PLL_PACK(CPG_PL6_SSEL, 12, 1)
#define SEL_SDHI0 DDIV_PACK(CPG_PL2SDHI_DSEL, 0, 2)
#define SEL_SDHI1 DDIV_PACK(CPG_PL2SDHI_DSEL, 4, 2)
+#define EXTAL_FREQ_IN_MEGA_HZ (24)
+
/**
* Definitions of CPG Core Clocks
*
@@ -86,6 +114,16 @@ enum clk_types {
/* Clock with SD clock source selector */
CLK_TYPE_SD_MUX,
+
+ /* Clock for SIPLL5 */
+ CLK_TYPE_SIPLL5,
+
+ /* Clock for PLL5_4 clock source selector */
+ CLK_TYPE_PLL5_4_MUX,
+
+ /* Clock for DSI divider */
+ CLK_TYPE_DSI_DIV,
+
};
#define DEF_TYPE(_name, _id, _type...) \
@@ -98,17 +136,36 @@ enum clk_types {
DEF_TYPE(_name, _id, CLK_TYPE_IN)
#define DEF_FIXED(_name, _id, _parent, _mult, _div) \
DEF_BASE(_name, _id, CLK_TYPE_FF, _parent, .div = _div, .mult = _mult)
-#define DEF_DIV(_name, _id, _parent, _conf, _dtable, _flag) \
+#define DEF_DIV(_name, _id, _parent, _conf, _dtable) \
+ DEF_TYPE(_name, _id, CLK_TYPE_DIV, .conf = _conf, \
+ .parent = _parent, .dtable = _dtable, \
+ .flag = CLK_DIVIDER_HIWORD_MASK)
+#define DEF_DIV_RO(_name, _id, _parent, _conf, _dtable) \
DEF_TYPE(_name, _id, CLK_TYPE_DIV, .conf = _conf, \
- .parent = _parent, .dtable = _dtable, .flag = _flag)
-#define DEF_MUX(_name, _id, _conf, _parent_names, _num_parents, _flag, \
- _mux_flags) \
+ .parent = _parent, .dtable = _dtable, \
+ .flag = CLK_DIVIDER_READ_ONLY)
+#define DEF_MUX(_name, _id, _conf, _parent_names) \
DEF_TYPE(_name, _id, CLK_TYPE_MUX, .conf = _conf, \
- .parent_names = _parent_names, .num_parents = _num_parents, \
- .flag = _flag, .mux_flags = _mux_flags)
-#define DEF_SD_MUX(_name, _id, _conf, _parent_names, _num_parents) \
+ .parent_names = _parent_names, \
+ .num_parents = ARRAY_SIZE(_parent_names), \
+ .mux_flags = CLK_MUX_HIWORD_MASK)
+#define DEF_MUX_RO(_name, _id, _conf, _parent_names) \
+ DEF_TYPE(_name, _id, CLK_TYPE_MUX, .conf = _conf, \
+ .parent_names = _parent_names, \
+ .num_parents = ARRAY_SIZE(_parent_names), \
+ .mux_flags = CLK_MUX_READ_ONLY)
+#define DEF_SD_MUX(_name, _id, _conf, _parent_names) \
DEF_TYPE(_name, _id, CLK_TYPE_SD_MUX, .conf = _conf, \
- .parent_names = _parent_names, .num_parents = _num_parents)
+ .parent_names = _parent_names, \
+ .num_parents = ARRAY_SIZE(_parent_names))
+#define DEF_PLL5_FOUTPOSTDIV(_name, _id, _parent) \
+ DEF_TYPE(_name, _id, CLK_TYPE_SIPLL5, .parent = _parent)
+#define DEF_PLL5_4_MUX(_name, _id, _conf, _parent_names) \
+ DEF_TYPE(_name, _id, CLK_TYPE_PLL5_4_MUX, .conf = _conf, \
+ .parent_names = _parent_names, \
+ .num_parents = ARRAY_SIZE(_parent_names))
+#define DEF_DSI_DIV(_name, _id, _parent, _flag) \
+ DEF_TYPE(_name, _id, CLK_TYPE_DSI_DIV, .parent = _parent, .flag = _flag)
/**
* struct rzg2l_mod_clk - Module Clocks definitions
@@ -150,17 +207,22 @@ struct rzg2l_mod_clk {
*
* @off: register offset
* @bit: reset bit
+ * @monbit: monitor bit in CPG_RST_MON register, -1 if none
*/
struct rzg2l_reset {
u16 off;
u8 bit;
+ s8 monbit;
};
-#define DEF_RST(_id, _off, _bit) \
+#define DEF_RST_MON(_id, _off, _bit, _monbit) \
[_id] = { \
.off = (_off), \
- .bit = (_bit) \
+ .bit = (_bit), \
+ .monbit = (_monbit) \
}
+#define DEF_RST(_id, _off, _bit) \
+ DEF_RST_MON(_id, _off, _bit, -1)
/**
* struct rzg2l_cpg_info - SoC-specific CPG Description
@@ -180,6 +242,7 @@ struct rzg2l_reset {
* @crit_mod_clks: Array with Module Clock IDs of critical clocks that
* should not be disabled without a knowledgeable driver
* @num_crit_mod_clks: Number of entries in crit_mod_clks[]
+ * @has_clk_mon_regs: Flag indicating whether the SoC has CLK_MON registers
*/
struct rzg2l_cpg_info {
/* Core Clocks */
@@ -200,9 +263,13 @@ struct rzg2l_cpg_info {
/* Critical Module Clocks that should not be disabled */
const unsigned int *crit_mod_clks;
unsigned int num_crit_mod_clks;
+
+ bool has_clk_mon_regs;
};
+extern const struct rzg2l_cpg_info r9a07g043_cpg_info;
extern const struct rzg2l_cpg_info r9a07g044_cpg_info;
extern const struct rzg2l_cpg_info r9a07g054_cpg_info;
+extern const struct rzg2l_cpg_info r9a09g011_cpg_info;
#endif
diff --git a/drivers/clk/rockchip/clk-rk3568.c b/drivers/clk/rockchip/clk-rk3568.c
index 606ae6cd918b..f85902e2590c 100644
--- a/drivers/clk/rockchip/clk-rk3568.c
+++ b/drivers/clk/rockchip/clk-rk3568.c
@@ -1591,6 +1591,7 @@ static const char *const rk3568_cru_critical_clocks[] __initconst = {
"hclk_php",
"pclk_php",
"hclk_usb",
+ "hclk_vo",
};
static const char *const rk3568_pmucru_critical_clocks[] __initconst = {
diff --git a/drivers/clk/samsung/Makefile b/drivers/clk/samsung/Makefile
index 17e5d1cb9da2..239d9eead77f 100644
--- a/drivers/clk/samsung/Makefile
+++ b/drivers/clk/samsung/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos-arm64.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos7.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos7885.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos850.o
+obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynosautov9.o
obj-$(CONFIG_S3C2410_COMMON_CLK)+= clk-s3c2410.o
obj-$(CONFIG_S3C2410_COMMON_DCLK)+= clk-s3c2410-dclk.o
obj-$(CONFIG_S3C2412_COMMON_CLK)+= clk-s3c2412.o
diff --git a/drivers/clk/samsung/clk-exynosautov9.c b/drivers/clk/samsung/clk-exynosautov9.c
new file mode 100644
index 000000000000..d9e1f8e4a7b4
--- /dev/null
+++ b/drivers/clk/samsung/clk-exynosautov9.c
@@ -0,0 +1,1733 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 Samsung Electronics Co., Ltd.
+ * Author: Chanho Park <chanho61.park@samsung.com>
+ *
+ * Common Clock Framework support for ExynosAuto V9 SoC.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/clock/samsung,exynosautov9.h>
+
+#include "clk.h"
+#include "clk-exynos-arm64.h"
+
+/* ---- CMU_TOP ------------------------------------------------------------ */
+
+/* Register Offset definitions for CMU_TOP (0x1b240000) */
+#define PLL_LOCKTIME_PLL_SHARED0 0x0000
+#define PLL_LOCKTIME_PLL_SHARED1 0x0004
+#define PLL_LOCKTIME_PLL_SHARED2 0x0008
+#define PLL_LOCKTIME_PLL_SHARED3 0x000c
+#define PLL_LOCKTIME_PLL_SHARED4 0x0010
+#define PLL_CON0_PLL_SHARED0 0x0100
+#define PLL_CON3_PLL_SHARED0 0x010c
+#define PLL_CON0_PLL_SHARED1 0x0140
+#define PLL_CON3_PLL_SHARED1 0x014c
+#define PLL_CON0_PLL_SHARED2 0x0180
+#define PLL_CON3_PLL_SHARED2 0x018c
+#define PLL_CON0_PLL_SHARED3 0x01c0
+#define PLL_CON3_PLL_SHARED3 0x01cc
+#define PLL_CON0_PLL_SHARED4 0x0200
+#define PLL_CON3_PLL_SHARED4 0x020c
+
+/* MUX */
+#define CLK_CON_MUX_MUX_CLKCMU_ACC_BUS 0x1000
+#define CLK_CON_MUX_MUX_CLKCMU_APM_BUS 0x1004
+#define CLK_CON_MUX_MUX_CLKCMU_AUD_BUS 0x1008
+#define CLK_CON_MUX_MUX_CLKCMU_AUD_CPU 0x100c
+#define CLK_CON_MUX_MUX_CLKCMU_BUSC_BUS 0x1010
+#define CLK_CON_MUX_MUX_CLKCMU_BUSMC_BUS 0x1018
+#define CLK_CON_MUX_MUX_CLKCMU_CMU_BOOST 0x101c
+#define CLK_CON_MUX_MUX_CLKCMU_CORE_BUS 0x1020
+#define CLK_CON_MUX_MUX_CLKCMU_CPUCL0_CLUSTER 0x1024
+#define CLK_CON_MUX_MUX_CLKCMU_CPUCL0_SWITCH 0x102c
+#define CLK_CON_MUX_MUX_CLKCMU_CPUCL1_CLUSTER 0x1030
+#define CLK_CON_MUX_MUX_CLKCMU_CPUCL1_SWITCH 0x1034
+#define CLK_CON_MUX_MUX_CLKCMU_DPTX_BUS 0x1040
+#define CLK_CON_MUX_MUX_CLKCMU_DPTX_DPGTC 0x1044
+#define CLK_CON_MUX_MUX_CLKCMU_DPUM_BUS 0x1048
+#define CLK_CON_MUX_MUX_CLKCMU_DPUS0_BUS 0x104c
+#define CLK_CON_MUX_MUX_CLKCMU_DPUS1_BUS 0x1050
+#define CLK_CON_MUX_MUX_CLKCMU_FSYS0_BUS 0x1054
+#define CLK_CON_MUX_MUX_CLKCMU_FSYS0_PCIE 0x1058
+#define CLK_CON_MUX_MUX_CLKCMU_FSYS1_BUS 0x105c
+#define CLK_CON_MUX_MUX_CLKCMU_FSYS1_MMC_CARD 0x1060
+#define CLK_CON_MUX_MUX_CLKCMU_FSYS1_USBDRD 0x1064
+#define CLK_CON_MUX_MUX_CLKCMU_FSYS2_BUS 0x1068
+#define CLK_CON_MUX_MUX_CLKCMU_FSYS2_ETHERNET 0x106c
+#define CLK_CON_MUX_MUX_CLKCMU_FSYS2_UFS_EMBD 0x1070
+#define CLK_CON_MUX_MUX_CLKCMU_G2D_G2D 0x1074
+#define CLK_CON_MUX_MUX_CLKCMU_G2D_MSCL 0x1078
+#define CLK_CON_MUX_MUX_CLKCMU_G3D00_SWITCH 0x107c
+#define CLK_CON_MUX_MUX_CLKCMU_G3D01_SWITCH 0x1080
+#define CLK_CON_MUX_MUX_CLKCMU_G3D1_SWITCH 0x1084
+#define CLK_CON_MUX_MUX_CLKCMU_ISPB_BUS 0x108c
+#define CLK_CON_MUX_MUX_CLKCMU_MFC_MFC 0x1090
+#define CLK_CON_MUX_MUX_CLKCMU_MFC_WFD 0x1094
+#define CLK_CON_MUX_MUX_CLKCMU_MIF_SWITCH 0x109c
+#define CLK_CON_MUX_MUX_CLKCMU_MIF_BUSP 0x1098
+#define CLK_CON_MUX_MUX_CLKCMU_MIF_SWITCH 0x109c
+#define CLK_CON_MUX_MUX_CLKCMU_NPU_BUS 0x10a0
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC0_BUS 0x10a4
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC0_IP 0x10a8
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC1_BUS 0x10ac
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC1_IP 0x10b0
+#define CLK_CON_MUX_MUX_CLKCMU_PERIS_BUS 0x10b4
+#define CLK_CON_MUX_MUX_CMU_CMUREF 0x10c0
+
+/* DIV */
+#define CLK_CON_DIV_CLKCMU_ACC_BUS 0x1800
+#define CLK_CON_DIV_CLKCMU_APM_BUS 0x1804
+#define CLK_CON_DIV_CLKCMU_AUD_BUS 0x1808
+#define CLK_CON_DIV_CLKCMU_AUD_CPU 0x180c
+#define CLK_CON_DIV_CLKCMU_BUSC_BUS 0x1810
+#define CLK_CON_DIV_CLKCMU_BUSMC_BUS 0x1818
+#define CLK_CON_DIV_CLKCMU_CORE_BUS 0x181c
+#define CLK_CON_DIV_CLKCMU_CPUCL0_CLUSTER 0x1820
+#define CLK_CON_DIV_CLKCMU_CPUCL0_SWITCH 0x1828
+#define CLK_CON_DIV_CLKCMU_CPUCL1_CLUSTER 0x182c
+#define CLK_CON_DIV_CLKCMU_CPUCL1_SWITCH 0x1830
+#define CLK_CON_DIV_CLKCMU_DPTX_BUS 0x183c
+#define CLK_CON_DIV_CLKCMU_DPTX_DPGTC 0x1840
+#define CLK_CON_DIV_CLKCMU_DPUM_BUS 0x1844
+#define CLK_CON_DIV_CLKCMU_DPUS0_BUS 0x1848
+#define CLK_CON_DIV_CLKCMU_DPUS1_BUS 0x184c
+#define CLK_CON_DIV_CLKCMU_FSYS0_BUS 0x1850
+#define CLK_CON_DIV_CLKCMU_FSYS0_PCIE 0x1854
+#define CLK_CON_DIV_CLKCMU_FSYS1_BUS 0x1858
+#define CLK_CON_DIV_CLKCMU_FSYS1_USBDRD 0x185c
+#define CLK_CON_DIV_CLKCMU_FSYS2_BUS 0x1860
+#define CLK_CON_DIV_CLKCMU_FSYS2_ETHERNET 0x1864
+#define CLK_CON_DIV_CLKCMU_FSYS2_UFS_EMBD 0x1868
+#define CLK_CON_DIV_CLKCMU_G2D_G2D 0x186c
+#define CLK_CON_DIV_CLKCMU_G2D_MSCL 0x1870
+#define CLK_CON_DIV_CLKCMU_G3D00_SWITCH 0x1874
+#define CLK_CON_DIV_CLKCMU_G3D01_SWITCH 0x1878
+#define CLK_CON_DIV_CLKCMU_G3D1_SWITCH 0x187c
+#define CLK_CON_DIV_CLKCMU_ISPB_BUS 0x1884
+#define CLK_CON_DIV_CLKCMU_MFC_MFC 0x1888
+#define CLK_CON_DIV_CLKCMU_MFC_WFD 0x188c
+#define CLK_CON_DIV_CLKCMU_MIF_BUSP 0x1890
+#define CLK_CON_DIV_CLKCMU_NPU_BUS 0x1894
+#define CLK_CON_DIV_CLKCMU_PERIC0_BUS 0x1898
+#define CLK_CON_DIV_CLKCMU_PERIC0_IP 0x189c
+#define CLK_CON_DIV_CLKCMU_PERIC1_BUS 0x18a0
+#define CLK_CON_DIV_CLKCMU_PERIC1_IP 0x18a4
+#define CLK_CON_DIV_CLKCMU_PERIS_BUS 0x18a8
+#define CLK_CON_DIV_DIV_CLKCMU_CMU_BOOST 0x18b4
+
+#define CLK_CON_DIV_PLL_SHARED0_DIV2 0x18b8
+#define CLK_CON_DIV_PLL_SHARED0_DIV3 0x18bc
+#define CLK_CON_DIV_PLL_SHARED1_DIV2 0x18c0
+#define CLK_CON_DIV_PLL_SHARED1_DIV3 0x18c4
+#define CLK_CON_DIV_PLL_SHARED1_DIV4 0x18c8
+#define CLK_CON_DIV_PLL_SHARED2_DIV2 0x18cc
+#define CLK_CON_DIV_PLL_SHARED2_DIV3 0x18d0
+#define CLK_CON_DIV_PLL_SHARED2_DIV4 0x18d4
+#define CLK_CON_DIV_PLL_SHARED4_DIV2 0x18d4
+#define CLK_CON_DIV_PLL_SHARED4_DIV4 0x18d8
+
+/* GATE */
+#define CLK_CON_GAT_CLKCMU_CMU_BUSC_BOOST 0x2000
+#define CLK_CON_GAT_CLKCMU_CMU_BUSMC_BOOST 0x2004
+#define CLK_CON_GAT_CLKCMU_CMU_CORE_BOOST 0x2008
+#define CLK_CON_GAT_CLKCMU_CMU_CPUCL0_BOOST 0x2010
+#define CLK_CON_GAT_CLKCMU_CMU_CPUCL1_BOOST 0x2018
+#define CLK_CON_GAT_CLKCMU_CMU_MIF_BOOST 0x2020
+#define CLK_CON_GAT_GATE_CLKCMU_FSYS1_MMC_CARD 0x2024
+#define CLK_CON_GAT_GATE_CLKCMU_MIF_SWITCH 0x2028
+#define CLK_CON_GAT_GATE_CLKCMU_ACC_BUS 0x202c
+#define CLK_CON_GAT_GATE_CLKCMU_APM_BUS 0x2030
+#define CLK_CON_GAT_GATE_CLKCMU_AUD_BUS 0x2034
+#define CLK_CON_GAT_GATE_CLKCMU_AUD_CPU 0x2038
+#define CLK_CON_GAT_GATE_CLKCMU_BUSC_BUS 0x203c
+#define CLK_CON_GAT_GATE_CLKCMU_BUSMC_BUS 0x2044
+#define CLK_CON_GAT_GATE_CLKCMU_CMU_BOOST 0x2048
+#define CLK_CON_GAT_GATE_CLKCMU_CORE_BUS 0x204c
+#define CLK_CON_GAT_GATE_CLKCMU_CPUCL0_CLUSTER 0x2050
+#define CLK_CON_GAT_GATE_CLKCMU_CPUCL0_SWITCH 0x2058
+#define CLK_CON_GAT_GATE_CLKCMU_CPUCL1_CLUSTER 0x205c
+#define CLK_CON_GAT_GATE_CLKCMU_CPUCL1_SWITCH 0x2060
+#define CLK_CON_GAT_GATE_CLKCMU_DPTX_BUS 0x206c
+#define CLK_CON_GAT_GATE_CLKCMU_DPTX_DPGTC 0x2070
+#define CLK_CON_GAT_GATE_CLKCMU_DPUM_BUS 0x2060
+#define CLK_CON_GAT_GATE_CLKCMU_DPUS0_BUS 0x2064
+#define CLK_CON_GAT_GATE_CLKCMU_DPUS1_BUS 0x207c
+#define CLK_CON_GAT_GATE_CLKCMU_FSYS0_BUS 0x2080
+#define CLK_CON_GAT_GATE_CLKCMU_FSYS0_PCIE 0x2084
+#define CLK_CON_GAT_GATE_CLKCMU_FSYS1_BUS 0x2088
+#define CLK_CON_GAT_GATE_CLKCMU_FSYS1_USBDRD 0x208c
+#define CLK_CON_GAT_GATE_CLKCMU_FSYS2_BUS 0x2090
+#define CLK_CON_GAT_GATE_CLKCMU_FSYS2_ETHERNET 0x2094
+#define CLK_CON_GAT_GATE_CLKCMU_FSYS2_UFS_EMBD 0x2098
+#define CLK_CON_GAT_GATE_CLKCMU_G2D_G2D 0x209c
+#define CLK_CON_GAT_GATE_CLKCMU_G2D_MSCL 0x20a0
+#define CLK_CON_GAT_GATE_CLKCMU_G3D00_SWITCH 0x20a4
+#define CLK_CON_GAT_GATE_CLKCMU_G3D01_SWITCH 0x20a8
+#define CLK_CON_GAT_GATE_CLKCMU_G3D1_SWITCH 0x20ac
+#define CLK_CON_GAT_GATE_CLKCMU_ISPB_BUS 0x20b4
+#define CLK_CON_GAT_GATE_CLKCMU_MFC_MFC 0x20b8
+#define CLK_CON_GAT_GATE_CLKCMU_MFC_WFD 0x20bc
+#define CLK_CON_GAT_GATE_CLKCMU_MIF_BUSP 0x20c0
+#define CLK_CON_GAT_GATE_CLKCMU_NPU_BUS 0x20c4
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC0_BUS 0x20c8
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC0_IP 0x20cc
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC1_BUS 0x20d0
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC1_IP 0x20d4
+#define CLK_CON_GAT_GATE_CLKCMU_PERIS_BUS 0x20d8
+
+static const unsigned long top_clk_regs[] __initconst = {
+ PLL_LOCKTIME_PLL_SHARED0,
+ PLL_LOCKTIME_PLL_SHARED1,
+ PLL_LOCKTIME_PLL_SHARED2,
+ PLL_LOCKTIME_PLL_SHARED3,
+ PLL_LOCKTIME_PLL_SHARED4,
+ PLL_CON0_PLL_SHARED0,
+ PLL_CON3_PLL_SHARED0,
+ PLL_CON0_PLL_SHARED1,
+ PLL_CON3_PLL_SHARED1,
+ PLL_CON0_PLL_SHARED2,
+ PLL_CON3_PLL_SHARED2,
+ PLL_CON0_PLL_SHARED3,
+ PLL_CON3_PLL_SHARED3,
+ PLL_CON0_PLL_SHARED4,
+ PLL_CON3_PLL_SHARED4,
+ CLK_CON_MUX_MUX_CLKCMU_ACC_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_APM_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_AUD_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_AUD_CPU,
+ CLK_CON_MUX_MUX_CLKCMU_BUSC_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_CMU_BOOST,
+ CLK_CON_MUX_MUX_CLKCMU_CORE_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_CPUCL0_CLUSTER,
+ CLK_CON_MUX_MUX_CLKCMU_CPUCL0_SWITCH,
+ CLK_CON_MUX_MUX_CLKCMU_CPUCL1_CLUSTER,
+ CLK_CON_MUX_MUX_CLKCMU_CPUCL1_SWITCH,
+ CLK_CON_MUX_MUX_CLKCMU_DPTX_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_DPTX_DPGTC,
+ CLK_CON_MUX_MUX_CLKCMU_DPUM_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_DPUS0_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_DPUS1_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_FSYS0_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_FSYS0_PCIE,
+ CLK_CON_MUX_MUX_CLKCMU_FSYS1_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_FSYS1_MMC_CARD,
+ CLK_CON_MUX_MUX_CLKCMU_FSYS1_USBDRD,
+ CLK_CON_MUX_MUX_CLKCMU_FSYS2_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_FSYS2_ETHERNET,
+ CLK_CON_MUX_MUX_CLKCMU_FSYS2_UFS_EMBD,
+ CLK_CON_MUX_MUX_CLKCMU_G2D_G2D,
+ CLK_CON_MUX_MUX_CLKCMU_G2D_MSCL,
+ CLK_CON_MUX_MUX_CLKCMU_G3D00_SWITCH,
+ CLK_CON_MUX_MUX_CLKCMU_G3D01_SWITCH,
+ CLK_CON_MUX_MUX_CLKCMU_G3D1_SWITCH,
+ CLK_CON_MUX_MUX_CLKCMU_ISPB_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_MFC_MFC,
+ CLK_CON_MUX_MUX_CLKCMU_MFC_WFD,
+ CLK_CON_MUX_MUX_CLKCMU_MIF_SWITCH,
+ CLK_CON_MUX_MUX_CLKCMU_MIF_BUSP,
+ CLK_CON_MUX_MUX_CLKCMU_MIF_SWITCH,
+ CLK_CON_MUX_MUX_CLKCMU_NPU_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC0_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC0_IP,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC1_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC1_IP,
+ CLK_CON_MUX_MUX_CLKCMU_PERIS_BUS,
+ CLK_CON_MUX_MUX_CMU_CMUREF,
+ CLK_CON_DIV_CLKCMU_ACC_BUS,
+ CLK_CON_DIV_CLKCMU_APM_BUS,
+ CLK_CON_DIV_CLKCMU_AUD_BUS,
+ CLK_CON_DIV_CLKCMU_AUD_CPU,
+ CLK_CON_DIV_CLKCMU_BUSC_BUS,
+ CLK_CON_DIV_CLKCMU_BUSMC_BUS,
+ CLK_CON_DIV_CLKCMU_CORE_BUS,
+ CLK_CON_DIV_CLKCMU_CPUCL0_CLUSTER,
+ CLK_CON_DIV_CLKCMU_CPUCL0_SWITCH,
+ CLK_CON_DIV_CLKCMU_CPUCL1_CLUSTER,
+ CLK_CON_DIV_CLKCMU_CPUCL1_SWITCH,
+ CLK_CON_DIV_CLKCMU_DPTX_BUS,
+ CLK_CON_DIV_CLKCMU_DPTX_DPGTC,
+ CLK_CON_DIV_CLKCMU_DPUM_BUS,
+ CLK_CON_DIV_CLKCMU_DPUS0_BUS,
+ CLK_CON_DIV_CLKCMU_DPUS1_BUS,
+ CLK_CON_DIV_CLKCMU_FSYS0_BUS,
+ CLK_CON_DIV_CLKCMU_FSYS0_PCIE,
+ CLK_CON_DIV_CLKCMU_FSYS1_BUS,
+ CLK_CON_DIV_CLKCMU_FSYS1_USBDRD,
+ CLK_CON_DIV_CLKCMU_FSYS2_BUS,
+ CLK_CON_DIV_CLKCMU_FSYS2_ETHERNET,
+ CLK_CON_DIV_CLKCMU_FSYS2_UFS_EMBD,
+ CLK_CON_DIV_CLKCMU_G2D_G2D,
+ CLK_CON_DIV_CLKCMU_G2D_MSCL,
+ CLK_CON_DIV_CLKCMU_G3D00_SWITCH,
+ CLK_CON_DIV_CLKCMU_G3D01_SWITCH,
+ CLK_CON_DIV_CLKCMU_G3D1_SWITCH,
+ CLK_CON_DIV_CLKCMU_ISPB_BUS,
+ CLK_CON_DIV_CLKCMU_MFC_MFC,
+ CLK_CON_DIV_CLKCMU_MFC_WFD,
+ CLK_CON_DIV_CLKCMU_MIF_BUSP,
+ CLK_CON_DIV_CLKCMU_NPU_BUS,
+ CLK_CON_DIV_CLKCMU_PERIC0_BUS,
+ CLK_CON_DIV_CLKCMU_PERIC0_IP,
+ CLK_CON_DIV_CLKCMU_PERIC1_BUS,
+ CLK_CON_DIV_CLKCMU_PERIC1_IP,
+ CLK_CON_DIV_CLKCMU_PERIS_BUS,
+ CLK_CON_DIV_DIV_CLKCMU_CMU_BOOST,
+ CLK_CON_DIV_PLL_SHARED0_DIV2,
+ CLK_CON_DIV_PLL_SHARED0_DIV3,
+ CLK_CON_DIV_PLL_SHARED1_DIV2,
+ CLK_CON_DIV_PLL_SHARED1_DIV3,
+ CLK_CON_DIV_PLL_SHARED1_DIV4,
+ CLK_CON_DIV_PLL_SHARED2_DIV2,
+ CLK_CON_DIV_PLL_SHARED2_DIV3,
+ CLK_CON_DIV_PLL_SHARED2_DIV4,
+ CLK_CON_DIV_PLL_SHARED4_DIV2,
+ CLK_CON_DIV_PLL_SHARED4_DIV4,
+ CLK_CON_GAT_CLKCMU_CMU_BUSC_BOOST,
+ CLK_CON_GAT_CLKCMU_CMU_BUSMC_BOOST,
+ CLK_CON_GAT_CLKCMU_CMU_CORE_BOOST,
+ CLK_CON_GAT_CLKCMU_CMU_CPUCL0_BOOST,
+ CLK_CON_GAT_CLKCMU_CMU_CPUCL1_BOOST,
+ CLK_CON_GAT_CLKCMU_CMU_MIF_BOOST,
+ CLK_CON_GAT_GATE_CLKCMU_FSYS1_MMC_CARD,
+ CLK_CON_GAT_GATE_CLKCMU_MIF_SWITCH,
+ CLK_CON_GAT_GATE_CLKCMU_ACC_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_APM_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_AUD_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_AUD_CPU,
+ CLK_CON_GAT_GATE_CLKCMU_BUSC_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_BUSMC_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_CMU_BOOST,
+ CLK_CON_GAT_GATE_CLKCMU_CORE_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_CPUCL0_CLUSTER,
+ CLK_CON_GAT_GATE_CLKCMU_CPUCL0_SWITCH,
+ CLK_CON_GAT_GATE_CLKCMU_CPUCL1_CLUSTER,
+ CLK_CON_GAT_GATE_CLKCMU_CPUCL1_SWITCH,
+ CLK_CON_GAT_GATE_CLKCMU_DPTX_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_DPTX_DPGTC,
+ CLK_CON_GAT_GATE_CLKCMU_DPUM_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_DPUS0_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_DPUS1_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_FSYS0_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_FSYS0_PCIE,
+ CLK_CON_GAT_GATE_CLKCMU_FSYS1_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_FSYS1_USBDRD,
+ CLK_CON_GAT_GATE_CLKCMU_FSYS2_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_FSYS2_ETHERNET,
+ CLK_CON_GAT_GATE_CLKCMU_FSYS2_UFS_EMBD,
+ CLK_CON_GAT_GATE_CLKCMU_G2D_G2D,
+ CLK_CON_GAT_GATE_CLKCMU_G2D_MSCL,
+ CLK_CON_GAT_GATE_CLKCMU_G3D00_SWITCH,
+ CLK_CON_GAT_GATE_CLKCMU_G3D01_SWITCH,
+ CLK_CON_GAT_GATE_CLKCMU_G3D1_SWITCH,
+ CLK_CON_GAT_GATE_CLKCMU_ISPB_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_MFC_MFC,
+ CLK_CON_GAT_GATE_CLKCMU_MFC_WFD,
+ CLK_CON_GAT_GATE_CLKCMU_MIF_BUSP,
+ CLK_CON_GAT_GATE_CLKCMU_NPU_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC0_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC0_IP,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC1_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC1_IP,
+ CLK_CON_GAT_GATE_CLKCMU_PERIS_BUS,
+};
+
+static const struct samsung_pll_clock top_pll_clks[] __initconst = {
+ /* CMU_TOP_PURECLKCOMP */
+ PLL(pll_0822x, FOUT_SHARED0_PLL, "fout_shared0_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED0, PLL_CON3_PLL_SHARED0, NULL),
+ PLL(pll_0822x, FOUT_SHARED0_PLL, "fout_shared1_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED1, PLL_CON3_PLL_SHARED1, NULL),
+ PLL(pll_0822x, FOUT_SHARED0_PLL, "fout_shared2_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED2, PLL_CON3_PLL_SHARED2, NULL),
+ PLL(pll_0822x, FOUT_SHARED0_PLL, "fout_shared3_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED3, PLL_CON3_PLL_SHARED3, NULL),
+ PLL(pll_0822x, FOUT_SHARED0_PLL, "fout_shared4_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED4, PLL_CON3_PLL_SHARED4, NULL),
+};
+
+/* List of parent clocks for Muxes in CMU_TOP */
+PNAME(mout_shared0_pll_p) = { "oscclk", "fout_shared0_pll" };
+PNAME(mout_shared1_pll_p) = { "oscclk", "fout_shared1_pll" };
+PNAME(mout_shared2_pll_p) = { "oscclk", "fout_shared2_pll" };
+PNAME(mout_shared3_pll_p) = { "oscclk", "fout_shared3_pll" };
+PNAME(mout_shared4_pll_p) = { "oscclk", "fout_shared4_pll" };
+
+PNAME(mout_clkcmu_cmu_boost_p) = { "dout_shared2_div3", "dout_shared1_div4",
+ "dout_shared2_div4", "dout_shared4_div4" };
+PNAME(mout_clkcmu_cmu_cmuref_p) = { "oscclk", "dout_cmu_boost" };
+PNAME(mout_clkcmu_acc_bus_p) = { "dout_shared1_div3", "dout_shared2_div3",
+ "dout_shared1_div4", "dout_shared2_div4" };
+PNAME(mout_clkcmu_apm_bus_p) = { "dout_shared2_div3", "dout_shared1_div4",
+ "dout_shared2_div4", "dout_shared4_div4" };
+PNAME(mout_clkcmu_aud_cpu_p) = { "dout_shared0_div2", "dout_shared1_div2",
+ "dout_shared2_div2", "dout_shared0_div3",
+ "dout_shared4_div2", "dout_shared1_div3",
+ "fout_shared3_pll" };
+PNAME(mout_clkcmu_aud_bus_p) = { "dout_shared4_div2", "dout_shared1_div3",
+ "dout_shared2_div3", "dout_shared1_div4" };
+PNAME(mout_clkcmu_busc_bus_p) = { "dout_shared2_div3", "dout_shared1_div4",
+ "dout_shared2_div4", "dout_shared4_div4" };
+PNAME(mout_clkcmu_core_bus_p) = { "dout_shared0_div2", "dout_shared1_div2",
+ "dout_shared2_div2", "dout_shared0_div3",
+ "dout_shared4_div2", "dout_shared1_div3",
+ "dout_shared2_div3", "fout_shared3_pll" };
+PNAME(mout_clkcmu_cpucl0_switch_p) = {
+ "dout_shared0_div2", "dout_shared1_div2",
+ "dout_shared2_div2", "dout_shared4_div2" };
+PNAME(mout_clkcmu_cpucl0_cluster_p) = {
+ "fout_shared2_pll", "fout_shared4_pll",
+ "dout_shared0_div2", "dout_shared1_div2",
+ "dout_shared2_div2", "dout_shared4_div2",
+ "dout_shared2_div3", "fout_shared3_pll" };
+PNAME(mout_clkcmu_dptx_bus_p) = { "dout_shared4_div2", "dout_shared2_div3",
+ "dout_shared1_div4", "dout_shared2_div4" };
+PNAME(mout_clkcmu_dptx_dpgtc_p) = { "oscclk", "dout_shared2_div3",
+ "dout_shared2_div4", "dout_shared4_div4" };
+PNAME(mout_clkcmu_dpum_bus_p) = { "dout_shared1_div3", "dout_shared2_div3",
+ "dout_shared1_div4", "dout_shared2_div4",
+ "dout_shared4_div4", "fout_shared3_pll" };
+PNAME(mout_clkcmu_fsys0_bus_p) = {
+ "dout_shared4_div2", "dout_shared2_div3",
+ "dout_shared1_div4", "dout_shared2_div4" };
+PNAME(mout_clkcmu_fsys0_pcie_p) = { "oscclk", "dout_shared2_div4" };
+PNAME(mout_clkcmu_fsys1_bus_p) = { "dout_shared2_div3", "dout_shared1_div4",
+ "dout_shared2_div4", "dout_shared4_div4" };
+PNAME(mout_clkcmu_fsys1_usbdrd_p) = {
+ "oscclk", "dout_shared2_div3",
+ "dout_shared2_div4", "dout_shared4_div4" };
+PNAME(mout_clkcmu_fsys1_mmc_card_p) = {
+ "oscclk", "dout_shared2_div2",
+ "dout_shared4_div2", "dout_shared2_div3" };
+PNAME(mout_clkcmu_fsys2_ethernet_p) = {
+ "oscclk", "dout_shared2_div2",
+ "dout_shared0_div3", "dout_shared2_div3",
+ "dout_shared1_div4", "fout_shared3_pll" };
+PNAME(mout_clkcmu_g2d_g2d_p) = { "dout_shared2_div2", "dout_shared0_div3",
+ "dout_shared4_div2", "dout_shared1_div3",
+ "dout_shared2_div3", "dout_shared1_div4",
+ "dout_shared2_div4", "dout_shared4_div4" };
+PNAME(mout_clkcmu_g3d0_switch_p) = { "dout_shared0_div2", "dout_shared1_div2",
+ "dout_shared2_div2", "dout_shared4_div2" };
+PNAME(mout_clkcmu_g3d1_switch_p) = { "dout_shared2_div2", "dout_shared4_div2",
+ "dout_shared2_div3", "dout_shared1_div4" };
+PNAME(mout_clkcmu_mif_switch_p) = { "fout_shared0_pll", "fout_shared1_pll",
+ "fout_shared2_pll", "fout_shared4_pll",
+ "dout_shared0_div2", "dout_shared1_div2",
+ "dout_shared2_div2", "fout_shared3_pll" };
+PNAME(mout_clkcmu_npu_bus_p) = { "dout_shared1_div2", "dout_shared2_div2",
+ "dout_shared0_div3", "dout_shared4_div2",
+ "dout_shared1_div3", "dout_shared2_div3",
+ "dout_shared1_div4", "fout_shared3_pll" };
+PNAME(mout_clkcmu_peric0_bus_p) = { "dout_shared2_div3", "dout_shared2_div4" };
+
+static const struct samsung_mux_clock top_mux_clks[] __initconst = {
+ /* CMU_TOP_PURECLKCOMP */
+ MUX(MOUT_SHARED0_PLL, "mout_shared0_pll", mout_shared0_pll_p,
+ PLL_CON0_PLL_SHARED0, 4, 1),
+ MUX(MOUT_SHARED1_PLL, "mout_shared1_pll", mout_shared1_pll_p,
+ PLL_CON0_PLL_SHARED1, 4, 1),
+ MUX(MOUT_SHARED2_PLL, "mout_shared2_pll", mout_shared2_pll_p,
+ PLL_CON0_PLL_SHARED2, 4, 1),
+ MUX(MOUT_SHARED3_PLL, "mout_shared3_pll", mout_shared3_pll_p,
+ PLL_CON0_PLL_SHARED3, 4, 1),
+ MUX(MOUT_SHARED4_PLL, "mout_shared4_pll", mout_shared4_pll_p,
+ PLL_CON0_PLL_SHARED4, 4, 1),
+
+ /* BOOST */
+ MUX(MOUT_CLKCMU_CMU_BOOST, "mout_clkcmu_cmu_boost",
+ mout_clkcmu_cmu_boost_p, CLK_CON_MUX_MUX_CLKCMU_CMU_BOOST, 0, 2),
+ MUX(MOUT_CLKCMU_CMU_CMUREF, "mout_clkcmu_cmu_cmuref",
+ mout_clkcmu_cmu_cmuref_p, CLK_CON_MUX_MUX_CMU_CMUREF, 0, 1),
+
+ /* ACC */
+ MUX(MOUT_CLKCMU_ACC_BUS, "mout_clkcmu_acc_bus", mout_clkcmu_acc_bus_p,
+ CLK_CON_MUX_MUX_CLKCMU_ACC_BUS, 0, 2),
+
+ /* APM */
+ MUX(MOUT_CLKCMU_APM_BUS, "mout_clkcmu_apm_bus", mout_clkcmu_apm_bus_p,
+ CLK_CON_MUX_MUX_CLKCMU_APM_BUS, 0, 2),
+
+ /* AUD */
+ MUX(MOUT_CLKCMU_AUD_CPU, "mout_clkcmu_aud_cpu", mout_clkcmu_aud_cpu_p,
+ CLK_CON_MUX_MUX_CLKCMU_AUD_CPU, 0, 3),
+ MUX(MOUT_CLKCMU_AUD_BUS, "mout_clkcmu_aud_bus", mout_clkcmu_aud_bus_p,
+ CLK_CON_MUX_MUX_CLKCMU_AUD_BUS, 0, 2),
+
+ /* BUSC */
+ MUX(MOUT_CLKCMU_BUSC_BUS, "mout_clkcmu_busc_bus",
+ mout_clkcmu_busc_bus_p, CLK_CON_MUX_MUX_CLKCMU_BUSC_BUS, 0, 2),
+
+ /* BUSMC */
+ MUX(MOUT_CLKCMU_BUSMC_BUS, "mout_clkcmu_busmc_bus",
+ mout_clkcmu_busc_bus_p, CLK_CON_MUX_MUX_CLKCMU_BUSMC_BUS, 0, 2),
+
+ /* CORE */
+ MUX(MOUT_CLKCMU_CORE_BUS, "mout_clkcmu_core_bus",
+ mout_clkcmu_core_bus_p, CLK_CON_MUX_MUX_CLKCMU_CORE_BUS, 0, 3),
+
+ /* CPUCL0 */
+ MUX(MOUT_CLKCMU_CPUCL0_SWITCH, "mout_clkcmu_cpucl0_switch",
+ mout_clkcmu_cpucl0_switch_p, CLK_CON_MUX_MUX_CLKCMU_CPUCL0_SWITCH,
+ 0, 2),
+ MUX(MOUT_CLKCMU_CPUCL0_CLUSTER, "mout_clkcmu_cpucl0_cluster",
+ mout_clkcmu_cpucl0_cluster_p,
+ CLK_CON_MUX_MUX_CLKCMU_CPUCL0_CLUSTER, 0, 3),
+
+ /* CPUCL1 */
+ MUX(MOUT_CLKCMU_CPUCL1_SWITCH, "mout_clkcmu_cpucl1_switch",
+ mout_clkcmu_cpucl0_switch_p, CLK_CON_MUX_MUX_CLKCMU_CPUCL1_SWITCH,
+ 0, 2),
+ MUX(MOUT_CLKCMU_CPUCL1_CLUSTER, "mout_clkcmu_cpucl1_cluster",
+ mout_clkcmu_cpucl0_cluster_p,
+ CLK_CON_MUX_MUX_CLKCMU_CPUCL1_CLUSTER, 0, 3),
+
+ /* DPTX */
+ MUX(MOUT_CLKCMU_DPTX_BUS, "mout_clkcmu_dptx_bus",
+ mout_clkcmu_dptx_bus_p, CLK_CON_MUX_MUX_CLKCMU_DPTX_BUS, 0, 2),
+ MUX(MOUT_CLKCMU_DPTX_DPGTC, "mout_clkcmu_dptx_dpgtc",
+ mout_clkcmu_dptx_dpgtc_p, CLK_CON_MUX_MUX_CLKCMU_DPTX_DPGTC, 0, 2),
+
+ /* DPUM */
+ MUX(MOUT_CLKCMU_DPUM_BUS, "mout_clkcmu_dpum_bus",
+ mout_clkcmu_dpum_bus_p, CLK_CON_MUX_MUX_CLKCMU_DPUM_BUS, 0, 3),
+
+ /* DPUS */
+ MUX(MOUT_CLKCMU_DPUS0_BUS, "mout_clkcmu_dpus0_bus",
+ mout_clkcmu_dpum_bus_p, CLK_CON_MUX_MUX_CLKCMU_DPUS0_BUS, 0, 3),
+ MUX(MOUT_CLKCMU_DPUS1_BUS, "mout_clkcmu_dpus1_bus",
+ mout_clkcmu_dpum_bus_p, CLK_CON_MUX_MUX_CLKCMU_DPUS1_BUS, 0, 3),
+
+ /* FSYS0 */
+ MUX(MOUT_CLKCMU_FSYS0_BUS, "mout_clkcmu_fsys0_bus",
+ mout_clkcmu_fsys0_bus_p, CLK_CON_MUX_MUX_CLKCMU_FSYS0_BUS, 0, 2),
+ MUX(MOUT_CLKCMU_FSYS0_PCIE, "mout_clkcmu_fsys0_pcie",
+ mout_clkcmu_fsys0_pcie_p, CLK_CON_MUX_MUX_CLKCMU_FSYS0_PCIE, 0, 1),
+
+ /* FSYS1 */
+ MUX(MOUT_CLKCMU_FSYS1_BUS, "mout_clkcmu_fsys1_bus",
+ mout_clkcmu_fsys1_bus_p, CLK_CON_MUX_MUX_CLKCMU_FSYS1_BUS, 0, 2),
+ MUX(MOUT_CLKCMU_FSYS1_USBDRD, "mout_clkcmu_fsys1_usbdrd",
+ mout_clkcmu_fsys1_usbdrd_p, CLK_CON_MUX_MUX_CLKCMU_FSYS1_USBDRD,
+ 0, 2),
+ MUX(MOUT_CLKCMU_FSYS1_MMC_CARD, "mout_clkcmu_fsys1_mmc_card",
+ mout_clkcmu_fsys1_mmc_card_p,
+ CLK_CON_MUX_MUX_CLKCMU_FSYS1_MMC_CARD, 0, 2),
+
+ /* FSYS2 */
+ MUX(MOUT_CLKCMU_FSYS2_BUS, "mout_clkcmu_fsys2_bus",
+ mout_clkcmu_fsys0_bus_p, CLK_CON_MUX_MUX_CLKCMU_FSYS2_BUS, 0, 2),
+ MUX(MOUT_CLKCMU_FSYS2_UFS_EMBD, "mout_clkcmu_fsys2_ufs_embd",
+ mout_clkcmu_fsys1_usbdrd_p, CLK_CON_MUX_MUX_CLKCMU_FSYS2_UFS_EMBD,
+ 0, 2),
+ MUX(MOUT_CLKCMU_FSYS2_ETHERNET, "mout_clkcmu_fsys2_ethernet",
+ mout_clkcmu_fsys2_ethernet_p,
+ CLK_CON_MUX_MUX_CLKCMU_FSYS2_ETHERNET, 0, 3),
+
+ /* G2D */
+ MUX(MOUT_CLKCMU_G2D_G2D, "mout_clkcmu_g2d_g2d", mout_clkcmu_g2d_g2d_p,
+ CLK_CON_MUX_MUX_CLKCMU_G2D_G2D, 0, 3),
+ MUX(MOUT_CLKCMU_G2D_MSCL, "mout_clkcmu_g2d_mscl",
+ mout_clkcmu_fsys1_bus_p, CLK_CON_MUX_MUX_CLKCMU_G2D_MSCL, 0, 2),
+
+ /* G3D0 */
+ MUX(MOUT_CLKCMU_G3D00_SWITCH, "mout_clkcmu_g3d00_switch",
+ mout_clkcmu_g3d0_switch_p, CLK_CON_MUX_MUX_CLKCMU_G3D00_SWITCH,
+ 0, 2),
+ MUX(MOUT_CLKCMU_G3D01_SWITCH, "mout_clkcmu_g3d01_switch",
+ mout_clkcmu_g3d0_switch_p, CLK_CON_MUX_MUX_CLKCMU_G3D01_SWITCH,
+ 0, 2),
+
+ /* G3D1 */
+ MUX(MOUT_CLKCMU_G3D1_SWITCH, "mout_clkcmu_g3d1_switch",
+ mout_clkcmu_g3d1_switch_p, CLK_CON_MUX_MUX_CLKCMU_G3D1_SWITCH,
+ 0, 2),
+
+ /* ISPB */
+ MUX(MOUT_CLKCMU_ISPB_BUS, "mout_clkcmu_ispb_bus",
+ mout_clkcmu_acc_bus_p, CLK_CON_MUX_MUX_CLKCMU_ISPB_BUS, 0, 2),
+
+ /* MFC */
+ MUX(MOUT_CLKCMU_MFC_MFC, "mout_clkcmu_mfc_mfc",
+ mout_clkcmu_g3d1_switch_p, CLK_CON_MUX_MUX_CLKCMU_MFC_MFC, 0, 2),
+ MUX(MOUT_CLKCMU_MFC_WFD, "mout_clkcmu_mfc_wfd",
+ mout_clkcmu_fsys0_bus_p, CLK_CON_MUX_MUX_CLKCMU_MFC_WFD, 0, 2),
+
+ /* MIF */
+ MUX(MOUT_CLKCMU_MIF_SWITCH, "mout_clkcmu_mif_switch",
+ mout_clkcmu_mif_switch_p, CLK_CON_MUX_MUX_CLKCMU_MIF_SWITCH, 0, 3),
+ MUX(MOUT_CLKCMU_MIF_BUSP, "mout_clkcmu_mif_busp",
+ mout_clkcmu_fsys1_bus_p, CLK_CON_MUX_MUX_CLKCMU_MIF_BUSP, 0, 2),
+
+ /* NPU */
+ MUX(MOUT_CLKCMU_NPU_BUS, "mout_clkcmu_npu_bus", mout_clkcmu_npu_bus_p,
+ CLK_CON_MUX_MUX_CLKCMU_NPU_BUS, 0, 3),
+
+ /* PERIC0 */
+ MUX(MOUT_CLKCMU_PERIC0_BUS, "mout_clkcmu_peric0_bus",
+ mout_clkcmu_peric0_bus_p, CLK_CON_MUX_MUX_CLKCMU_PERIC0_BUS, 0, 1),
+ MUX(MOUT_CLKCMU_PERIC0_IP, "mout_clkcmu_peric0_ip",
+ mout_clkcmu_peric0_bus_p, CLK_CON_MUX_MUX_CLKCMU_PERIC0_IP, 0, 1),
+
+ /* PERIC1 */
+ MUX(MOUT_CLKCMU_PERIC1_BUS, "mout_clkcmu_peric1_bus",
+ mout_clkcmu_peric0_bus_p, CLK_CON_MUX_MUX_CLKCMU_PERIC1_BUS, 0, 1),
+ MUX(MOUT_CLKCMU_PERIC1_IP, "mout_clkcmu_peric1_ip",
+ mout_clkcmu_peric0_bus_p, CLK_CON_MUX_MUX_CLKCMU_PERIC1_IP, 0, 1),
+
+ /* PERIS */
+ MUX(MOUT_CLKCMU_PERIS_BUS, "mout_clkcmu_peris_bus",
+ mout_clkcmu_peric0_bus_p, CLK_CON_MUX_MUX_CLKCMU_PERIS_BUS, 0, 1),
+};
+
+static const struct samsung_div_clock top_div_clks[] __initconst = {
+ /* CMU_TOP_PURECLKCOMP */
+ DIV(DOUT_SHARED0_DIV3, "dout_shared0_div3", "mout_shared0_pll",
+ CLK_CON_DIV_PLL_SHARED0_DIV3, 0, 2),
+ DIV(DOUT_SHARED0_DIV2, "dout_shared0_div2", "mout_shared0_pll",
+ CLK_CON_DIV_PLL_SHARED0_DIV2, 0, 1),
+
+ DIV(DOUT_SHARED1_DIV3, "dout_shared1_div3", "mout_shared1_pll",
+ CLK_CON_DIV_PLL_SHARED1_DIV3, 0, 2),
+ DIV(DOUT_SHARED1_DIV2, "dout_shared1_div2", "mout_shared1_pll",
+ CLK_CON_DIV_PLL_SHARED1_DIV2, 0, 1),
+ DIV(DOUT_SHARED1_DIV4, "dout_shared1_div4", "dout_shared1_div2",
+ CLK_CON_DIV_PLL_SHARED1_DIV4, 0, 1),
+
+ DIV(DOUT_SHARED2_DIV3, "dout_shared2_div3", "mout_shared2_pll",
+ CLK_CON_DIV_PLL_SHARED2_DIV3, 0, 2),
+ DIV(DOUT_SHARED2_DIV2, "dout_shared2_div2", "mout_shared2_pll",
+ CLK_CON_DIV_PLL_SHARED2_DIV2, 0, 1),
+ DIV(DOUT_SHARED2_DIV4, "dout_shared2_div4", "dout_shared2_div2",
+ CLK_CON_DIV_PLL_SHARED2_DIV4, 0, 1),
+
+ DIV(DOUT_SHARED4_DIV2, "dout_shared4_div2", "mout_shared4_pll",
+ CLK_CON_DIV_PLL_SHARED4_DIV2, 0, 1),
+ DIV(DOUT_SHARED4_DIV4, "dout_shared4_div4", "dout_shared4_div2",
+ CLK_CON_DIV_PLL_SHARED4_DIV4, 0, 1),
+
+ /* BOOST */
+ DIV(DOUT_CLKCMU_CMU_BOOST, "dout_clkcmu_cmu_boost",
+ "gout_clkcmu_cmu_boost", CLK_CON_DIV_DIV_CLKCMU_CMU_BOOST, 0, 2),
+
+ /* ACC */
+ DIV(DOUT_CLKCMU_ACC_BUS, "dout_clkcmu_acc_bus", "gout_clkcmu_acc_bus",
+ CLK_CON_DIV_CLKCMU_ACC_BUS, 0, 4),
+
+ /* APM */
+ DIV(DOUT_CLKCMU_APM_BUS, "dout_clkcmu_apm_bus", "gout_clkcmu_apm_bus",
+ CLK_CON_DIV_CLKCMU_APM_BUS, 0, 3),
+
+ /* AUD */
+ DIV(DOUT_CLKCMU_AUD_CPU, "dout_clkcmu_aud_cpu", "gout_clkcmu_aud_cpu",
+ CLK_CON_DIV_CLKCMU_AUD_CPU, 0, 3),
+ DIV(DOUT_CLKCMU_AUD_BUS, "dout_clkcmu_aud_bus", "gout_clkcmu_aud_bus",
+ CLK_CON_DIV_CLKCMU_AUD_BUS, 0, 4),
+
+ /* BUSC */
+ DIV(DOUT_CLKCMU_BUSC_BUS, "dout_clkcmu_busc_bus",
+ "gout_clkcmu_busc_bus", CLK_CON_DIV_CLKCMU_BUSC_BUS, 0, 4),
+
+ /* BUSMC */
+ DIV(DOUT_CLKCMU_BUSMC_BUS, "dout_clkcmu_busmc_bus",
+ "gout_clkcmu_busmc_bus", CLK_CON_DIV_CLKCMU_BUSMC_BUS, 0, 4),
+
+ /* CORE */
+ DIV(DOUT_CLKCMU_CORE_BUS, "dout_clkcmu_core_bus",
+ "gout_clkcmu_core_bus", CLK_CON_DIV_CLKCMU_CORE_BUS, 0, 4),
+
+ /* CPUCL0 */
+ DIV(DOUT_CLKCMU_CPUCL0_SWITCH, "dout_clkcmu_cpucl0_switch",
+ "gout_clkcmu_cpucl0_switch", CLK_CON_DIV_CLKCMU_CPUCL0_SWITCH,
+ 0, 3),
+ DIV(DOUT_CLKCMU_CPUCL0_CLUSTER, "dout_clkcmu_cpucl0_cluster",
+ "gout_clkcmu_cpucl0_cluster", CLK_CON_DIV_CLKCMU_CPUCL0_CLUSTER,
+ 0, 3),
+
+ /* CPUCL1 */
+ DIV(DOUT_CLKCMU_CPUCL1_SWITCH, "dout_clkcmu_cpucl1_switch",
+ "gout_clkcmu_cpucl1_switch", CLK_CON_DIV_CLKCMU_CPUCL1_SWITCH,
+ 0, 3),
+ DIV(DOUT_CLKCMU_CPUCL1_CLUSTER, "dout_clkcmu_cpucl1_cluster",
+ "gout_clkcmu_cpucl1_cluster", CLK_CON_DIV_CLKCMU_CPUCL1_CLUSTER,
+ 0, 3),
+
+ /* DPTX */
+ DIV(DOUT_CLKCMU_DPTX_BUS, "dout_clkcmu_dptx_bus",
+ "gout_clkcmu_dptx_bus", CLK_CON_DIV_CLKCMU_DPTX_BUS, 0, 4),
+ DIV(DOUT_CLKCMU_DPTX_DPGTC, "dout_clkcmu_dptx_dpgtc",
+ "gout_clkcmu_dptx_dpgtc", CLK_CON_DIV_CLKCMU_DPTX_DPGTC, 0, 3),
+
+ /* DPUM */
+ DIV(DOUT_CLKCMU_DPUM_BUS, "dout_clkcmu_dpum_bus",
+ "gout_clkcmu_dpum_bus", CLK_CON_DIV_CLKCMU_DPUM_BUS, 0, 4),
+
+ /* DPUS */
+ DIV(DOUT_CLKCMU_DPUS0_BUS, "dout_clkcmu_dpus0_bus",
+ "gout_clkcmu_dpus0_bus", CLK_CON_DIV_CLKCMU_DPUS0_BUS, 0, 4),
+ DIV(DOUT_CLKCMU_DPUS1_BUS, "dout_clkcmu_dpus1_bus",
+ "gout_clkcmu_dpus1_bus", CLK_CON_DIV_CLKCMU_DPUS1_BUS, 0, 4),
+
+ /* FSYS0 */
+ DIV(DOUT_CLKCMU_FSYS0_BUS, "dout_clkcmu_fsys0_bus",
+ "gout_clkcmu_fsys0_bus", CLK_CON_DIV_CLKCMU_FSYS0_BUS, 0, 4),
+
+ /* FSYS1 */
+ DIV(DOUT_CLKCMU_FSYS1_BUS, "dout_clkcmu_fsys1_bus",
+ "gout_clkcmu_fsys1_bus", CLK_CON_DIV_CLKCMU_FSYS1_BUS, 0, 4),
+ DIV(DOUT_CLKCMU_FSYS1_USBDRD, "dout_clkcmu_fsys1_usbdrd",
+ "gout_clkcmu_fsys1_usbdrd", CLK_CON_DIV_CLKCMU_FSYS1_USBDRD, 0, 4),
+
+ /* FSYS2 */
+ DIV(DOUT_CLKCMU_FSYS2_BUS, "dout_clkcmu_fsys2_bus",
+ "gout_clkcmu_fsys2_bus", CLK_CON_DIV_CLKCMU_FSYS2_BUS, 0, 4),
+ DIV(DOUT_CLKCMU_FSYS2_UFS_EMBD, "dout_clkcmu_fsys2_ufs_embd",
+ "gout_clkcmu_fsys2_ufs_embd", CLK_CON_DIV_CLKCMU_FSYS2_UFS_EMBD,
+ 0, 3),
+ DIV(DOUT_CLKCMU_FSYS2_ETHERNET, "dout_clkcmu_fsys2_ethernet",
+ "gout_clkcmu_fsys2_ethernet", CLK_CON_DIV_CLKCMU_FSYS2_ETHERNET,
+ 0, 3),
+
+ /* G2D */
+ DIV(DOUT_CLKCMU_G2D_G2D, "dout_clkcmu_g2d_g2d", "gout_clkcmu_g2d_g2d",
+ CLK_CON_DIV_CLKCMU_G2D_G2D, 0, 4),
+ DIV(DOUT_CLKCMU_G2D_MSCL, "dout_clkcmu_g2d_mscl",
+ "gout_clkcmu_g2d_mscl", CLK_CON_DIV_CLKCMU_G2D_MSCL, 0, 4),
+
+ /* G3D0 */
+ DIV(DOUT_CLKCMU_G3D00_SWITCH, "dout_clkcmu_g3d00_switch",
+ "gout_clkcmu_g3d00_switch", CLK_CON_DIV_CLKCMU_G3D00_SWITCH, 0, 3),
+ DIV(DOUT_CLKCMU_G3D01_SWITCH, "dout_clkcmu_g3d01_switch",
+ "gout_clkcmu_g3d01_switch", CLK_CON_DIV_CLKCMU_G3D01_SWITCH, 0, 3),
+
+ /* G3D1 */
+ DIV(DOUT_CLKCMU_G3D1_SWITCH, "dout_clkcmu_g3d1_switch",
+ "gout_clkcmu_g3d1_switch", CLK_CON_DIV_CLKCMU_G3D1_SWITCH, 0, 3),
+
+ /* ISPB */
+ DIV(DOUT_CLKCMU_ISPB_BUS, "dout_clkcmu_ispb_bus",
+ "gout_clkcmu_ispb_bus", CLK_CON_DIV_CLKCMU_ISPB_BUS, 0, 4),
+
+ /* MFC */
+ DIV(DOUT_CLKCMU_MFC_MFC, "dout_clkcmu_mfc_mfc", "gout_clkcmu_mfc_mfc",
+ CLK_CON_DIV_CLKCMU_MFC_MFC, 0, 4),
+ DIV(DOUT_CLKCMU_MFC_WFD, "dout_clkcmu_mfc_wfd", "gout_clkcmu_mfc_wfd",
+ CLK_CON_DIV_CLKCMU_MFC_WFD, 0, 4),
+
+ /* MIF */
+ DIV(DOUT_CLKCMU_MIF_BUSP, "dout_clkcmu_mif_busp",
+ "gout_clkcmu_mif_busp", CLK_CON_DIV_CLKCMU_MIF_BUSP, 0, 4),
+
+ /* NPU */
+ DIV(DOUT_CLKCMU_NPU_BUS, "dout_clkcmu_npu_bus", "gout_clkcmu_npu_bus",
+ CLK_CON_DIV_CLKCMU_NPU_BUS, 0, 4),
+
+ /* PERIC0 */
+ DIV(DOUT_CLKCMU_PERIC0_BUS, "dout_clkcmu_peric0_bus",
+ "gout_clkcmu_peric0_bus", CLK_CON_DIV_CLKCMU_PERIC0_BUS, 0, 4),
+ DIV(DOUT_CLKCMU_PERIC0_IP, "dout_clkcmu_peric0_ip",
+ "gout_clkcmu_peric0_ip", CLK_CON_DIV_CLKCMU_PERIC0_IP, 0, 4),
+
+ /* PERIC1 */
+ DIV(DOUT_CLKCMU_PERIC1_BUS, "dout_clkcmu_peric1_bus",
+ "gout_clkcmu_peric1_bus", CLK_CON_DIV_CLKCMU_PERIC1_BUS, 0, 4),
+ DIV(DOUT_CLKCMU_PERIC1_IP, "dout_clkcmu_peric1_ip",
+ "gout_clkcmu_peric1_ip", CLK_CON_DIV_CLKCMU_PERIC1_IP, 0, 4),
+
+ /* PERIS */
+ DIV(DOUT_CLKCMU_PERIS_BUS, "dout_clkcmu_peris_bus",
+ "gout_clkcmu_peris_bus", CLK_CON_DIV_CLKCMU_PERIS_BUS, 0, 4),
+};
+
+static const struct samsung_fixed_factor_clock top_fixed_factor_clks[] __initconst = {
+ FFACTOR(DOUT_CLKCMU_FSYS0_PCIE, "dout_clkcmu_fsys0_pcie",
+ "gout_clkcmu_fsys0_pcie", 1, 4, 0),
+};
+
+static const struct samsung_gate_clock top_gate_clks[] __initconst = {
+ /* BOOST */
+ GATE(GOUT_CLKCMU_CMU_BOOST, "gout_clkcmu_cmu_boost",
+ "mout_clkcmu_cmu_boost", CLK_CON_GAT_GATE_CLKCMU_CMU_BOOST,
+ 21, 0, 0),
+
+ GATE(GOUT_CLKCMU_CPUCL0_BOOST, "gout_clkcmu_cpucl0_boost",
+ "dout_cmu_boost", CLK_CON_GAT_CLKCMU_CMU_CPUCL0_BOOST, 21, 0, 0),
+ GATE(GOUT_CLKCMU_CPUCL1_BOOST, "gout_clkcmu_cpucl1_boost",
+ "dout_cmu_boost", CLK_CON_GAT_CLKCMU_CMU_CPUCL1_BOOST, 21, 0, 0),
+ GATE(GOUT_CLKCMU_CORE_BOOST, "gout_clkcmu_core_boost",
+ "dout_cmu_boost", CLK_CON_GAT_CLKCMU_CMU_CORE_BOOST, 21, 0, 0),
+ GATE(GOUT_CLKCMU_BUSC_BOOST, "gout_clkcmu_busc_boost",
+ "dout_cmu_boost", CLK_CON_GAT_CLKCMU_CMU_BUSC_BOOST, 21, 0, 0),
+
+ GATE(GOUT_CLKCMU_BUSMC_BOOST, "gout_clkcmu_busmc_boost",
+ "dout_cmu_boost", CLK_CON_GAT_CLKCMU_CMU_BUSMC_BOOST, 21, 0, 0),
+ GATE(GOUT_CLKCMU_MIF_BOOST, "gout_clkcmu_mif_boost", "dout_cmu_boost",
+ CLK_CON_GAT_CLKCMU_CMU_MIF_BOOST, 21, 0, 0),
+
+ /* ACC */
+ GATE(GOUT_CLKCMU_ACC_BUS, "gout_clkcmu_acc_bus", "mout_clkcmu_acc_bus",
+ CLK_CON_GAT_GATE_CLKCMU_ACC_BUS, 21, 0, 0),
+
+ /* APM */
+ GATE(GOUT_CLKCMU_APM_BUS, "gout_clkcmu_apm_bus", "mout_clkcmu_apm_bus",
+ CLK_CON_GAT_GATE_CLKCMU_APM_BUS, 21, CLK_IGNORE_UNUSED, 0),
+
+ /* AUD */
+ GATE(GOUT_CLKCMU_AUD_CPU, "gout_clkcmu_aud_cpu", "mout_clkcmu_aud_cpu",
+ CLK_CON_GAT_GATE_CLKCMU_AUD_CPU, 21, 0, 0),
+ GATE(GOUT_CLKCMU_AUD_BUS, "gout_clkcmu_aud_bus", "mout_clkcmu_aud_bus",
+ CLK_CON_GAT_GATE_CLKCMU_AUD_BUS, 21, 0, 0),
+
+ /* BUSC */
+ GATE(GOUT_CLKCMU_BUSC_BUS, "gout_clkcmu_busc_bus",
+ "mout_clkcmu_busc_bus", CLK_CON_GAT_GATE_CLKCMU_BUSC_BUS, 21,
+ CLK_IS_CRITICAL, 0),
+
+ /* BUSMC */
+ GATE(GOUT_CLKCMU_BUSMC_BUS, "gout_clkcmu_busmc_bus",
+ "mout_clkcmu_busmc_bus", CLK_CON_GAT_GATE_CLKCMU_BUSMC_BUS, 21,
+ CLK_IS_CRITICAL, 0),
+
+ /* CORE */
+ GATE(GOUT_CLKCMU_CORE_BUS, "gout_clkcmu_core_bus",
+ "mout_clkcmu_core_bus", CLK_CON_GAT_GATE_CLKCMU_CORE_BUS,
+ 21, 0, 0),
+
+ /* CPUCL0 */
+ GATE(GOUT_CLKCMU_CPUCL0_SWITCH, "gout_clkcmu_cpucl0_switch",
+ "mout_clkcmu_cpucl0_switch",
+ CLK_CON_GAT_GATE_CLKCMU_CPUCL0_SWITCH, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(GOUT_CLKCMU_CPUCL0_CLUSTER, "gout_clkcmu_cpucl0_cluster",
+ "mout_clkcmu_cpucl0_cluster",
+ CLK_CON_GAT_GATE_CLKCMU_CPUCL0_CLUSTER, 21, CLK_IGNORE_UNUSED, 0),
+
+ /* CPUCL1 */
+ GATE(GOUT_CLKCMU_CPUCL1_SWITCH, "gout_clkcmu_cpucl1_switch",
+ "mout_clkcmu_cpucl1_switch",
+ CLK_CON_GAT_GATE_CLKCMU_CPUCL1_SWITCH, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(GOUT_CLKCMU_CPUCL1_CLUSTER, "gout_clkcmu_cpucl1_cluster",
+ "mout_clkcmu_cpucl1_cluster",
+ CLK_CON_GAT_GATE_CLKCMU_CPUCL1_CLUSTER, 21, CLK_IGNORE_UNUSED, 0),
+
+ /* DPTX */
+ GATE(GOUT_CLKCMU_DPTX_BUS, "gout_clkcmu_dptx_bus",
+ "mout_clkcmu_dptx_bus", CLK_CON_GAT_GATE_CLKCMU_DPTX_BUS,
+ 21, 0, 0),
+ GATE(GOUT_CLKCMU_DPTX_DPGTC, "gout_clkcmu_dptx_dpgtc",
+ "mout_clkcmu_dptx_dpgtc", CLK_CON_GAT_GATE_CLKCMU_DPTX_DPGTC,
+ 21, 0, 0),
+
+ /* DPUM */
+ GATE(GOUT_CLKCMU_DPUM_BUS, "gout_clkcmu_dpum_bus",
+ "mout_clkcmu_dpum_bus", CLK_CON_GAT_GATE_CLKCMU_DPUM_BUS,
+ 21, 0, 0),
+
+ /* DPUS */
+ GATE(GOUT_CLKCMU_DPUS0_BUS, "gout_clkcmu_dpus0_bus",
+ "mout_clkcmu_dpus0_bus", CLK_CON_GAT_GATE_CLKCMU_DPUS0_BUS,
+ 21, 0, 0),
+ GATE(GOUT_CLKCMU_DPUS1_BUS, "gout_clkcmu_dpus1_bus",
+ "mout_clkcmu_dpus1_bus", CLK_CON_GAT_GATE_CLKCMU_DPUS1_BUS,
+ 21, 0, 0),
+
+ /* FSYS0 */
+ GATE(GOUT_CLKCMU_FSYS0_BUS, "gout_clkcmu_fsys0_bus",
+ "mout_clkcmu_fsys0_bus", CLK_CON_GAT_GATE_CLKCMU_FSYS0_BUS,
+ 21, 0, 0),
+ GATE(GOUT_CLKCMU_FSYS0_PCIE, "gout_clkcmu_fsys0_pcie",
+ "mout_clkcmu_fsys0_pcie", CLK_CON_GAT_GATE_CLKCMU_FSYS0_PCIE,
+ 21, 0, 0),
+
+ /* FSYS1 */
+ GATE(GOUT_CLKCMU_FSYS1_BUS, "gout_clkcmu_fsys1_bus",
+ "mout_clkcmu_fsys1_bus", CLK_CON_GAT_GATE_CLKCMU_FSYS1_BUS,
+ 21, 0, 0),
+ GATE(GOUT_CLKCMU_FSYS1_USBDRD, "gout_clkcmu_fsys1_usbdrd",
+ "mout_clkcmu_fsys1_usbdrd", CLK_CON_GAT_GATE_CLKCMU_FSYS1_USBDRD,
+ 21, 0, 0),
+ GATE(GOUT_CLKCMU_FSYS1_MMC_CARD, "gout_clkcmu_fsys1_mmc_card",
+ "mout_clkcmu_fsys1_mmc_card",
+ CLK_CON_GAT_GATE_CLKCMU_FSYS1_MMC_CARD, 21, 0, 0),
+
+ /* FSYS2 */
+ GATE(GOUT_CLKCMU_FSYS2_BUS, "gout_clkcmu_fsys2_bus",
+ "mout_clkcmu_fsys2_bus", CLK_CON_GAT_GATE_CLKCMU_FSYS2_BUS,
+ 21, 0, 0),
+ GATE(GOUT_CLKCMU_FSYS2_UFS_EMBD, "gout_clkcmu_fsys2_ufs_embd",
+ "mout_clkcmu_fsys2_ufs_embd",
+ CLK_CON_GAT_GATE_CLKCMU_FSYS2_UFS_EMBD, 21, 0, 0),
+ GATE(GOUT_CLKCMU_FSYS2_ETHERNET, "gout_clkcmu_fsys2_ethernet",
+ "mout_clkcmu_fsys2_ethernet",
+ CLK_CON_GAT_GATE_CLKCMU_FSYS2_ETHERNET, 21, 0, 0),
+
+ /* G2D */
+ GATE(GOUT_CLKCMU_G2D_G2D, "gout_clkcmu_g2d_g2d",
+ "mout_clkcmu_g2d_g2d", CLK_CON_GAT_GATE_CLKCMU_G2D_G2D, 21, 0, 0),
+ GATE(GOUT_CLKCMU_G2D_MSCL, "gout_clkcmu_g2d_mscl",
+ "mout_clkcmu_g2d_mscl", CLK_CON_GAT_GATE_CLKCMU_G2D_MSCL,
+ 21, 0, 0),
+
+ /* G3D0 */
+ GATE(GOUT_CLKCMU_G3D00_SWITCH, "gout_clkcmu_g3d00_switch",
+ "mout_clkcmu_g3d00_switch", CLK_CON_GAT_GATE_CLKCMU_G3D00_SWITCH,
+ 21, 0, 0),
+ GATE(GOUT_CLKCMU_G3D01_SWITCH, "gout_clkcmu_g3d01_switch",
+ "mout_clkcmu_g3d01_switch", CLK_CON_GAT_GATE_CLKCMU_G3D01_SWITCH,
+ 21, 0, 0),
+
+ /* G3D1 */
+ GATE(GOUT_CLKCMU_G3D1_SWITCH, "gout_clkcmu_g3d1_switch",
+ "mout_clkcmu_g3d1_switch", CLK_CON_GAT_GATE_CLKCMU_G3D1_SWITCH,
+ 21, 0, 0),
+
+ /* ISPB */
+ GATE(GOUT_CLKCMU_ISPB_BUS, "gout_clkcmu_ispb_bus",
+ "mout_clkcmu_ispb_bus", CLK_CON_GAT_GATE_CLKCMU_ISPB_BUS,
+ 21, 0, 0),
+
+ /* MFC */
+ GATE(GOUT_CLKCMU_MFC_MFC, "gout_clkcmu_mfc_mfc", "mout_clkcmu_mfc_mfc",
+ CLK_CON_GAT_GATE_CLKCMU_MFC_MFC, 21, 0, 0),
+ GATE(GOUT_CLKCMU_MFC_WFD, "gout_clkcmu_mfc_wfd", "mout_clkcmu_mfc_wfd",
+ CLK_CON_GAT_GATE_CLKCMU_MFC_WFD, 21, 0, 0),
+
+ /* MIF */
+ GATE(GOUT_CLKCMU_MIF_SWITCH, "gout_clkcmu_mif_switch",
+ "mout_clkcmu_mif_switch", CLK_CON_GAT_GATE_CLKCMU_MIF_SWITCH,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(GOUT_CLKCMU_MIF_BUSP, "gout_clkcmu_mif_busp",
+ "mout_clkcmu_mif_busp", CLK_CON_GAT_GATE_CLKCMU_MIF_BUSP,
+ 21, CLK_IGNORE_UNUSED, 0),
+
+ /* NPU */
+ GATE(GOUT_CLKCMU_NPU_BUS, "gout_clkcmu_npu_bus", "mout_clkcmu_npu_bus",
+ CLK_CON_GAT_GATE_CLKCMU_NPU_BUS, 21, 0, 0),
+
+ /* PERIC0 */
+ GATE(GOUT_CLKCMU_PERIC0_BUS, "gout_clkcmu_peric0_bus",
+ "mout_clkcmu_peric0_bus", CLK_CON_GAT_GATE_CLKCMU_PERIC0_BUS,
+ 21, 0, 0),
+ GATE(GOUT_CLKCMU_PERIC0_IP, "gout_clkcmu_peric0_ip",
+ "mout_clkcmu_peric0_ip", CLK_CON_GAT_GATE_CLKCMU_PERIC0_IP,
+ 21, 0, 0),
+
+ /* PERIC1 */
+ GATE(GOUT_CLKCMU_PERIC1_BUS, "gout_clkcmu_peric1_bus",
+ "mout_clkcmu_peric1_bus", CLK_CON_GAT_GATE_CLKCMU_PERIC1_BUS,
+ 21, 0, 0),
+ GATE(GOUT_CLKCMU_PERIC1_IP, "gout_clkcmu_peric1_ip",
+ "mout_clkcmu_peric1_ip", CLK_CON_GAT_GATE_CLKCMU_PERIC1_IP,
+ 21, 0, 0),
+
+ /* PERIS */
+ GATE(GOUT_CLKCMU_PERIS_BUS, "gout_clkcmu_peris_bus",
+ "mout_clkcmu_peris_bus", CLK_CON_GAT_GATE_CLKCMU_PERIS_BUS,
+ 21, CLK_IGNORE_UNUSED, 0),
+};
+
+static const struct samsung_cmu_info top_cmu_info __initconst = {
+ .pll_clks = top_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(top_pll_clks),
+ .mux_clks = top_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(top_mux_clks),
+ .div_clks = top_div_clks,
+ .nr_div_clks = ARRAY_SIZE(top_div_clks),
+ .fixed_factor_clks = top_fixed_factor_clks,
+ .nr_fixed_factor_clks = ARRAY_SIZE(top_fixed_factor_clks),
+ .gate_clks = top_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(top_gate_clks),
+ .nr_clk_ids = TOP_NR_CLK,
+ .clk_regs = top_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(top_clk_regs),
+};
+
+static void __init exynosautov9_cmu_top_init(struct device_node *np)
+{
+ exynos_arm64_register_cmu(NULL, np, &top_cmu_info);
+}
+
+/* Register CMU_TOP early, as it's a dependency for other early domains */
+CLK_OF_DECLARE(exynosautov9_cmu_top, "samsung,exynosautov9-cmu-top",
+ exynosautov9_cmu_top_init);
+
+/* ---- CMU_BUSMC ---------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_BUSMC (0x1b200000) */
+#define PLL_CON0_MUX_CLKCMU_BUSMC_BUS_USER 0x0600
+#define CLK_CON_DIV_DIV_CLK_BUSMC_BUSP 0x1800
+#define CLK_CON_GAT_GOUT_BLK_BUSMC_UID_QE_PDMA0_IPCLKPORT_PCLK 0x2078
+#define CLK_CON_GAT_GOUT_BLK_BUSMC_UID_QE_SPDMA_IPCLKPORT_PCLK 0x2080
+
+static const unsigned long busmc_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_BUSMC_BUS_USER,
+ CLK_CON_DIV_DIV_CLK_BUSMC_BUSP,
+ CLK_CON_GAT_GOUT_BLK_BUSMC_UID_QE_PDMA0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_BUSMC_UID_QE_SPDMA_IPCLKPORT_PCLK,
+};
+
+/* List of parent clocks for Muxes in CMU_BUSMC */
+PNAME(mout_busmc_bus_user_p) = { "oscclk", "dout_clkcmu_busmc_bus" };
+
+static const struct samsung_mux_clock busmc_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_BUSMC_BUS_USER, "mout_busmc_bus_user",
+ mout_busmc_bus_user_p, PLL_CON0_MUX_CLKCMU_BUSMC_BUS_USER, 4, 1),
+};
+
+static const struct samsung_div_clock busmc_div_clks[] __initconst = {
+ DIV(CLK_DOUT_BUSMC_BUSP, "dout_busmc_busp", "mout_busmc_bus_user",
+ CLK_CON_DIV_DIV_CLK_BUSMC_BUSP, 0, 3),
+};
+
+static const struct samsung_gate_clock busmc_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_BUSMC_PDMA0_PCLK, "gout_busmc_pdma0_pclk",
+ "dout_busmc_busp",
+ CLK_CON_GAT_GOUT_BLK_BUSMC_UID_QE_PDMA0_IPCLKPORT_PCLK, 21,
+ 0, 0),
+ GATE(CLK_GOUT_BUSMC_SPDMA_PCLK, "gout_busmc_spdma_pclk",
+ "dout_busmc_busp",
+ CLK_CON_GAT_GOUT_BLK_BUSMC_UID_QE_SPDMA_IPCLKPORT_PCLK, 21,
+ 0, 0),
+};
+
+static const struct samsung_cmu_info busmc_cmu_info __initconst = {
+ .mux_clks = busmc_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(busmc_mux_clks),
+ .div_clks = busmc_div_clks,
+ .nr_div_clks = ARRAY_SIZE(busmc_div_clks),
+ .gate_clks = busmc_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(busmc_gate_clks),
+ .nr_clk_ids = BUSMC_NR_CLK,
+ .clk_regs = busmc_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(busmc_clk_regs),
+ .clk_name = "dout_clkcmu_busmc_bus",
+};
+
+/* ---- CMU_CORE ----------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_CORE (0x1b030000) */
+#define PLL_CON0_MUX_CLKCMU_CORE_BUS_USER 0x0600
+#define CLK_CON_MUX_MUX_CORE_CMUREF 0x1000
+#define CLK_CON_DIV_DIV_CLK_CORE_BUSP 0x1800
+#define CLK_CON_GAT_CLK_BLK_CORE_UID_CCI_IPCLKPORT_CLK 0x2000
+#define CLK_CON_GAT_CLK_BLK_CORE_UID_CCI_IPCLKPORT_PCLK 0x2004
+#define CLK_CON_GAT_CLK_BLK_CORE_UID_CORE_CMU_CORE_IPCLKPORT_PCLK 0x2008
+
+static const unsigned long core_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_CORE_BUS_USER,
+ CLK_CON_MUX_MUX_CORE_CMUREF,
+ CLK_CON_DIV_DIV_CLK_CORE_BUSP,
+ CLK_CON_GAT_CLK_BLK_CORE_UID_CCI_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_CORE_UID_CCI_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_CORE_UID_CORE_CMU_CORE_IPCLKPORT_PCLK,
+};
+
+/* List of parent clocks for Muxes in CMU_CORE */
+PNAME(mout_core_bus_user_p) = { "oscclk", "dout_clkcmu_core_bus" };
+
+static const struct samsung_mux_clock core_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_CORE_BUS_USER, "mout_core_bus_user", mout_core_bus_user_p,
+ PLL_CON0_MUX_CLKCMU_CORE_BUS_USER, 4, 1),
+};
+
+static const struct samsung_div_clock core_div_clks[] __initconst = {
+ DIV(CLK_DOUT_CORE_BUSP, "dout_core_busp", "mout_core_bus_user",
+ CLK_CON_DIV_DIV_CLK_CORE_BUSP, 0, 3),
+};
+
+static const struct samsung_gate_clock core_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_CORE_CCI_CLK, "gout_core_cci_clk", "mout_core_bus_user",
+ CLK_CON_GAT_CLK_BLK_CORE_UID_CCI_IPCLKPORT_CLK, 21,
+ CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_CORE_CCI_PCLK, "gout_core_cci_pclk", "dout_core_busp",
+ CLK_CON_GAT_CLK_BLK_CORE_UID_CCI_IPCLKPORT_PCLK, 21,
+ CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_CORE_CMU_CORE_PCLK, "gout_core_cmu_core_pclk",
+ "dout_core_busp",
+ CLK_CON_GAT_CLK_BLK_CORE_UID_CORE_CMU_CORE_IPCLKPORT_PCLK, 21,
+ CLK_IS_CRITICAL, 0),
+};
+
+static const struct samsung_cmu_info core_cmu_info __initconst = {
+ .mux_clks = core_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(core_mux_clks),
+ .div_clks = core_div_clks,
+ .nr_div_clks = ARRAY_SIZE(core_div_clks),
+ .gate_clks = core_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(core_gate_clks),
+ .nr_clk_ids = CORE_NR_CLK,
+ .clk_regs = core_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(core_clk_regs),
+ .clk_name = "dout_clkcmu_core_bus",
+};
+
+/* ---- CMU_FSYS2 ---------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_FSYS2 (0x17c00000) */
+#define PLL_CON0_MUX_CLKCMU_FSYS2_BUS_USER 0x0600
+#define PLL_CON0_MUX_CLKCMU_FSYS2_UFS_EMBD_USER 0x0620
+#define PLL_CON0_MUX_CLKCMU_FSYS2_ETHERNET_USER 0x0610
+#define CLK_CON_GAT_GOUT_BLK_FSYS2_UID_UFS_EMBD0_IPCLKPORT_I_ACLK 0x2098
+#define CLK_CON_GAT_GOUT_BLK_FSYS2_UID_UFS_EMBD0_IPCLKPORT_I_CLK_UNIPRO 0x209c
+#define CLK_CON_GAT_GOUT_BLK_FSYS2_UID_UFS_EMBD1_IPCLKPORT_I_ACLK 0x20a4
+#define CLK_CON_GAT_GOUT_BLK_FSYS2_UID_UFS_EMBD1_IPCLKPORT_I_CLK_UNIPRO 0x20a8
+
+static const unsigned long fsys2_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_FSYS2_BUS_USER,
+ PLL_CON0_MUX_CLKCMU_FSYS2_UFS_EMBD_USER,
+ PLL_CON0_MUX_CLKCMU_FSYS2_ETHERNET_USER,
+ CLK_CON_GAT_GOUT_BLK_FSYS2_UID_UFS_EMBD0_IPCLKPORT_I_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS2_UID_UFS_EMBD0_IPCLKPORT_I_CLK_UNIPRO,
+ CLK_CON_GAT_GOUT_BLK_FSYS2_UID_UFS_EMBD1_IPCLKPORT_I_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS2_UID_UFS_EMBD1_IPCLKPORT_I_CLK_UNIPRO,
+};
+
+/* List of parent clocks for Muxes in CMU_FSYS2 */
+PNAME(mout_fsys2_bus_user_p) = { "oscclk", "dout_clkcmu_fsys2_bus" };
+PNAME(mout_fsys2_ufs_embd_user_p) = { "oscclk", "dout_clkcmu_fsys2_ufs_embd" };
+PNAME(mout_fsys2_ethernet_user_p) = { "oscclk", "dout_clkcmu_fsys2_ethernet" };
+
+static const struct samsung_mux_clock fsys2_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_FSYS2_BUS_USER, "mout_fsys2_bus_user",
+ mout_fsys2_bus_user_p, PLL_CON0_MUX_CLKCMU_FSYS2_BUS_USER, 4, 1),
+ MUX(CLK_MOUT_FSYS2_UFS_EMBD_USER, "mout_fsys2_ufs_embd_user",
+ mout_fsys2_ufs_embd_user_p,
+ PLL_CON0_MUX_CLKCMU_FSYS2_UFS_EMBD_USER, 4, 1),
+ MUX(CLK_MOUT_FSYS2_ETHERNET_USER, "mout_fsys2_ethernet_user",
+ mout_fsys2_ethernet_user_p,
+ PLL_CON0_MUX_CLKCMU_FSYS2_ETHERNET_USER, 4, 1),
+};
+
+static const struct samsung_gate_clock fsys2_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_FSYS2_UFS_EMBD0_ACLK, "gout_fsys2_ufs_embd0_aclk",
+ "mout_fsys2_ufs_embd_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS2_UID_UFS_EMBD0_IPCLKPORT_I_ACLK, 21,
+ 0, 0),
+ GATE(CLK_GOUT_FSYS2_UFS_EMBD0_UNIPRO, "gout_fsys2_ufs_embd0_unipro",
+ "mout_fsys2_ufs_embd_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS2_UID_UFS_EMBD0_IPCLKPORT_I_CLK_UNIPRO,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS2_UFS_EMBD1_ACLK, "gout_fsys2_ufs_embd1_aclk",
+ "mout_fsys2_ufs_embd_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS2_UID_UFS_EMBD1_IPCLKPORT_I_ACLK, 21,
+ 0, 0),
+ GATE(CLK_GOUT_FSYS2_UFS_EMBD1_UNIPRO, "gout_fsys2_ufs_embd1_unipro",
+ "mout_fsys2_ufs_embd_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS2_UID_UFS_EMBD1_IPCLKPORT_I_CLK_UNIPRO,
+ 21, 0, 0),
+};
+
+static const struct samsung_cmu_info fsys2_cmu_info __initconst = {
+ .mux_clks = fsys2_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(fsys2_mux_clks),
+ .gate_clks = fsys2_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(fsys2_gate_clks),
+ .nr_clk_ids = FSYS2_NR_CLK,
+ .clk_regs = fsys2_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(fsys2_clk_regs),
+ .clk_name = "dout_clkcmu_fsys2_bus",
+};
+
+/* ---- CMU_PERIC0 --------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_PERIC0 (0x10200000) */
+#define PLL_CON0_MUX_CLKCMU_PERIC0_BUS_USER 0x0600
+#define PLL_CON0_MUX_CLKCMU_PERIC0_IP_USER 0x0610
+#define CLK_CON_MUX_MUX_CLK_PERIC0_USI00_USI 0x1000
+#define CLK_CON_MUX_MUX_CLK_PERIC0_USI01_USI 0x1004
+#define CLK_CON_MUX_MUX_CLK_PERIC0_USI02_USI 0x1008
+#define CLK_CON_MUX_MUX_CLK_PERIC0_USI03_USI 0x100c
+#define CLK_CON_MUX_MUX_CLK_PERIC0_USI04_USI 0x1010
+#define CLK_CON_MUX_MUX_CLK_PERIC0_USI05_USI 0x1014
+#define CLK_CON_MUX_MUX_CLK_PERIC0_USI_I2C 0x1018
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI00_USI 0x1800
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI01_USI 0x1804
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI02_USI 0x1808
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI03_USI 0x180c
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI04_USI 0x1810
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI05_USI 0x1814
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI_I2C 0x1818
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_0 0x2014
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_1 0x2018
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_2 0x2024
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_3 0x2028
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_4 0x202c
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_5 0x2030
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_6 0x2034
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_7 0x2038
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_8 0x203c
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_9 0x2040
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_10 0x201c
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_11 0x2020
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_0 0x2044
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_1 0x2048
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_2 0x2058
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_3 0x205c
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_4 0x2060
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_7 0x206c
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_5 0x2064
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_6 0x2068
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_8 0x2070
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_9 0x2074
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_10 0x204c
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_11 0x2050
+
+static const unsigned long peric0_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_PERIC0_BUS_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC0_IP_USER,
+ CLK_CON_MUX_MUX_CLK_PERIC0_USI00_USI,
+ CLK_CON_MUX_MUX_CLK_PERIC0_USI01_USI,
+ CLK_CON_MUX_MUX_CLK_PERIC0_USI02_USI,
+ CLK_CON_MUX_MUX_CLK_PERIC0_USI03_USI,
+ CLK_CON_MUX_MUX_CLK_PERIC0_USI04_USI,
+ CLK_CON_MUX_MUX_CLK_PERIC0_USI05_USI,
+ CLK_CON_MUX_MUX_CLK_PERIC0_USI_I2C,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI00_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI01_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI02_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI03_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI04_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI05_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI_I2C,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_0,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_1,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_2,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_3,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_4,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_5,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_6,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_7,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_8,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_9,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_10,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_11,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_0,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_1,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_2,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_3,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_4,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_7,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_5,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_6,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_8,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_9,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_10,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_11,
+};
+
+/* List of parent clocks for Muxes in CMU_PERIC0 */
+PNAME(mout_peric0_bus_user_p) = { "oscclk", "dout_clkcmu_peric0_bus" };
+PNAME(mout_peric0_ip_user_p) = { "oscclk", "dout_clkcmu_peric0_ip" };
+PNAME(mout_peric0_usi_p) = { "oscclk", "mout_peric0_ip_user" };
+
+static const struct samsung_mux_clock peric0_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_PERIC0_BUS_USER, "mout_peric0_bus_user",
+ mout_peric0_bus_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_BUS_USER, 4, 1),
+ MUX(CLK_MOUT_PERIC0_IP_USER, "mout_peric0_ip_user",
+ mout_peric0_ip_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_IP_USER, 4, 1),
+ /* USI00 ~ USI05 */
+ MUX(CLK_MOUT_PERIC0_USI00_USI, "mout_peric0_usi00_usi",
+ mout_peric0_usi_p, CLK_CON_MUX_MUX_CLK_PERIC0_USI00_USI, 0, 1),
+ MUX(CLK_MOUT_PERIC0_USI01_USI, "mout_peric0_usi01_usi",
+ mout_peric0_usi_p, CLK_CON_MUX_MUX_CLK_PERIC0_USI01_USI, 0, 1),
+ MUX(CLK_MOUT_PERIC0_USI02_USI, "mout_peric0_usi02_usi",
+ mout_peric0_usi_p, CLK_CON_MUX_MUX_CLK_PERIC0_USI02_USI, 0, 1),
+ MUX(CLK_MOUT_PERIC0_USI03_USI, "mout_peric0_usi03_usi",
+ mout_peric0_usi_p, CLK_CON_MUX_MUX_CLK_PERIC0_USI03_USI, 0, 1),
+ MUX(CLK_MOUT_PERIC0_USI04_USI, "mout_peric0_usi04_usi",
+ mout_peric0_usi_p, CLK_CON_MUX_MUX_CLK_PERIC0_USI04_USI, 0, 1),
+ MUX(CLK_MOUT_PERIC0_USI05_USI, "mout_peric0_usi05_usi",
+ mout_peric0_usi_p, CLK_CON_MUX_MUX_CLK_PERIC0_USI05_USI, 0, 1),
+ /* USI_I2C */
+ MUX(CLK_MOUT_PERIC0_USI_I2C, "mout_peric0_usi_i2c",
+ mout_peric0_usi_p, CLK_CON_MUX_MUX_CLK_PERIC0_USI_I2C, 0, 1),
+};
+
+static const struct samsung_div_clock peric0_div_clks[] __initconst = {
+ /* USI00 ~ USI05 */
+ DIV(CLK_DOUT_PERIC0_USI00_USI, "dout_peric0_usi00_usi",
+ "mout_peric0_usi00_usi", CLK_CON_DIV_DIV_CLK_PERIC0_USI00_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC0_USI01_USI, "dout_peric0_usi01_usi",
+ "mout_peric0_usi01_usi", CLK_CON_DIV_DIV_CLK_PERIC0_USI01_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC0_USI02_USI, "dout_peric0_usi02_usi",
+ "mout_peric0_usi02_usi", CLK_CON_DIV_DIV_CLK_PERIC0_USI02_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC0_USI03_USI, "dout_peric0_usi03_usi",
+ "mout_peric0_usi03_usi", CLK_CON_DIV_DIV_CLK_PERIC0_USI03_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC0_USI04_USI, "dout_peric0_usi04_usi",
+ "mout_peric0_usi04_usi", CLK_CON_DIV_DIV_CLK_PERIC0_USI04_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC0_USI05_USI, "dout_peric0_usi05_usi",
+ "mout_peric0_usi05_usi", CLK_CON_DIV_DIV_CLK_PERIC0_USI05_USI,
+ 0, 4),
+ /* USI_I2C */
+ DIV(CLK_DOUT_PERIC0_USI_I2C, "dout_peric0_usi_i2c",
+ "mout_peric0_usi_i2c", CLK_CON_DIV_DIV_CLK_PERIC0_USI_I2C, 0, 4),
+};
+
+static const struct samsung_gate_clock peric0_gate_clks[] __initconst = {
+ /* IPCLK */
+ GATE(CLK_GOUT_PERIC0_IPCLK_0, "gout_peric0_ipclk_0",
+ "dout_peric0_usi00_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_0,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_IPCLK_1, "gout_peric0_ipclk_1",
+ "dout_peric0_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_1,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_IPCLK_2, "gout_peric0_ipclk_2",
+ "dout_peric0_usi01_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_2,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_IPCLK_3, "gout_peric0_ipclk_3",
+ "dout_peric0_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_3,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_IPCLK_4, "gout_peric0_ipclk_4",
+ "dout_peric0_usi02_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_4,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_IPCLK_5, "gout_peric0_ipclk_5",
+ "dout_peric0_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_5,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_IPCLK_6, "gout_peric0_ipclk_6",
+ "dout_peric0_usi03_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_6,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_IPCLK_7, "gout_peric0_ipclk_7",
+ "dout_peric0_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_7,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_IPCLK_8, "gout_peric0_ipclk_8",
+ "dout_peric0_usi04_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_8,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_IPCLK_9, "gout_peric0_ipclk_9",
+ "dout_peric0_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_9,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_IPCLK_10, "gout_peric0_ipclk_10",
+ "dout_peric0_usi05_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_10,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_IPCLK_11, "gout_peric0_ipclk_11",
+ "dout_peric0_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_11,
+ 21, 0, 0),
+
+ /* PCLK */
+ GATE(CLK_GOUT_PERIC0_PCLK_0, "gout_peric0_pclk_0",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_0,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_PCLK_2, "gout_peric0_pclk_2",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_2,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_PCLK_3, "gout_peric0_pclk_3",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_3,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_PCLK_4, "gout_peric0_pclk_4",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_4,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_PCLK_5, "gout_peric0_pclk_5",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_5,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_PCLK_6, "gout_peric0_pclk_6",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_6,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_PCLK_7, "gout_peric0_pclk_7",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_7,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_PCLK_8, "gout_peric0_pclk_8",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_8,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_PCLK_9, "gout_peric0_pclk_9",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_9,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_PCLK_10, "gout_peric0_pclk_10",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_10,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_PCLK_11, "gout_peric0_pclk_11",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_11,
+ 21, 0, 0),
+};
+
+static const struct samsung_cmu_info peric0_cmu_info __initconst = {
+ .mux_clks = peric0_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(peric0_mux_clks),
+ .div_clks = peric0_div_clks,
+ .nr_div_clks = ARRAY_SIZE(peric0_div_clks),
+ .gate_clks = peric0_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(peric0_gate_clks),
+ .nr_clk_ids = PERIC0_NR_CLK,
+ .clk_regs = peric0_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(peric0_clk_regs),
+ .clk_name = "dout_clkcmu_peric0_bus",
+};
+
+/* ---- CMU_PERIC1 --------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_PERIC1 (0x10800000) */
+#define PLL_CON0_MUX_CLKCMU_PERIC1_BUS_USER 0x0600
+#define PLL_CON0_MUX_CLKCMU_PERIC1_IP_USER 0x0610
+#define CLK_CON_MUX_MUX_CLK_PERIC1_USI06_USI 0x1000
+#define CLK_CON_MUX_MUX_CLK_PERIC1_USI07_USI 0x1004
+#define CLK_CON_MUX_MUX_CLK_PERIC1_USI08_USI 0x1008
+#define CLK_CON_MUX_MUX_CLK_PERIC1_USI09_USI 0x100c
+#define CLK_CON_MUX_MUX_CLK_PERIC1_USI10_USI 0x1010
+#define CLK_CON_MUX_MUX_CLK_PERIC1_USI11_USI 0x1014
+#define CLK_CON_MUX_MUX_CLK_PERIC1_USI_I2C 0x1018
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI06_USI 0x1800
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI07_USI 0x1804
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI08_USI 0x1808
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI09_USI 0x180c
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI10_USI 0x1810
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI11_USI 0x1814
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI_I2C 0x1818
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_0 0x2014
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_1 0x2018
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_2 0x2024
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_3 0x2028
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_4 0x202c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_5 0x2030
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_6 0x2034
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_7 0x2038
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_8 0x203c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_9 0x2040
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_10 0x201c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_11 0x2020
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_0 0x2044
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_1 0x2048
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_2 0x2058
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_3 0x205c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_4 0x2060
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_7 0x206c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_5 0x2064
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_6 0x2068
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_8 0x2070
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_9 0x2074
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_10 0x204c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_11 0x2050
+
+static const unsigned long peric1_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_PERIC1_BUS_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_IP_USER,
+ CLK_CON_MUX_MUX_CLK_PERIC1_USI06_USI,
+ CLK_CON_MUX_MUX_CLK_PERIC1_USI07_USI,
+ CLK_CON_MUX_MUX_CLK_PERIC1_USI08_USI,
+ CLK_CON_MUX_MUX_CLK_PERIC1_USI09_USI,
+ CLK_CON_MUX_MUX_CLK_PERIC1_USI10_USI,
+ CLK_CON_MUX_MUX_CLK_PERIC1_USI11_USI,
+ CLK_CON_MUX_MUX_CLK_PERIC1_USI_I2C,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI06_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI07_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI08_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI09_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI10_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI11_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI_I2C,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_0,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_1,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_2,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_3,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_4,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_5,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_6,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_7,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_8,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_9,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_10,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_11,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_0,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_1,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_2,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_3,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_4,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_7,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_5,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_6,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_8,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_9,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_10,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_11,
+};
+
+/* List of parent clocks for Muxes in CMU_PERIC1 */
+PNAME(mout_peric1_bus_user_p) = { "oscclk", "dout_clkcmu_peric1_bus" };
+PNAME(mout_peric1_ip_user_p) = { "oscclk", "dout_clkcmu_peric1_ip" };
+PNAME(mout_peric1_usi_p) = { "oscclk", "mout_peric1_ip_user" };
+
+static const struct samsung_mux_clock peric1_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_PERIC1_BUS_USER, "mout_peric1_bus_user",
+ mout_peric1_bus_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_BUS_USER, 4, 1),
+ MUX(CLK_MOUT_PERIC1_IP_USER, "mout_peric1_ip_user",
+ mout_peric1_ip_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_IP_USER, 4, 1),
+ /* USI06 ~ USI11 */
+ MUX(CLK_MOUT_PERIC1_USI06_USI, "mout_peric1_usi06_usi",
+ mout_peric1_usi_p, CLK_CON_MUX_MUX_CLK_PERIC1_USI06_USI, 0, 1),
+ MUX(CLK_MOUT_PERIC1_USI07_USI, "mout_peric1_usi07_usi",
+ mout_peric1_usi_p, CLK_CON_MUX_MUX_CLK_PERIC1_USI07_USI, 0, 1),
+ MUX(CLK_MOUT_PERIC1_USI08_USI, "mout_peric1_usi08_usi",
+ mout_peric1_usi_p, CLK_CON_MUX_MUX_CLK_PERIC1_USI08_USI, 0, 1),
+ MUX(CLK_MOUT_PERIC1_USI09_USI, "mout_peric1_usi09_usi",
+ mout_peric1_usi_p, CLK_CON_MUX_MUX_CLK_PERIC1_USI09_USI, 0, 1),
+ MUX(CLK_MOUT_PERIC1_USI10_USI, "mout_peric1_usi10_usi",
+ mout_peric1_usi_p, CLK_CON_MUX_MUX_CLK_PERIC1_USI10_USI, 0, 1),
+ MUX(CLK_MOUT_PERIC1_USI11_USI, "mout_peric1_usi11_usi",
+ mout_peric1_usi_p, CLK_CON_MUX_MUX_CLK_PERIC1_USI11_USI, 0, 1),
+ /* USI_I2C */
+ MUX(CLK_MOUT_PERIC1_USI_I2C, "mout_peric1_usi_i2c",
+ mout_peric1_usi_p, CLK_CON_MUX_MUX_CLK_PERIC1_USI_I2C, 0, 1),
+};
+
+static const struct samsung_div_clock peric1_div_clks[] __initconst = {
+ /* USI06 ~ USI11 */
+ DIV(CLK_DOUT_PERIC1_USI06_USI, "dout_peric1_usi06_usi",
+ "mout_peric1_usi06_usi", CLK_CON_DIV_DIV_CLK_PERIC1_USI06_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI07_USI, "dout_peric1_usi07_usi",
+ "mout_peric1_usi07_usi", CLK_CON_DIV_DIV_CLK_PERIC1_USI07_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI08_USI, "dout_peric1_usi08_usi",
+ "mout_peric1_usi08_usi", CLK_CON_DIV_DIV_CLK_PERIC1_USI08_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI09_USI, "dout_peric1_usi09_usi",
+ "mout_peric1_usi09_usi", CLK_CON_DIV_DIV_CLK_PERIC1_USI09_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI10_USI, "dout_peric1_usi10_usi",
+ "mout_peric1_usi10_usi", CLK_CON_DIV_DIV_CLK_PERIC1_USI10_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI11_USI, "dout_peric1_usi11_usi",
+ "mout_peric1_usi11_usi", CLK_CON_DIV_DIV_CLK_PERIC1_USI11_USI,
+ 0, 4),
+ /* USI_I2C */
+ DIV(CLK_DOUT_PERIC1_USI_I2C, "dout_peric1_usi_i2c",
+ "mout_peric1_usi_i2c", CLK_CON_DIV_DIV_CLK_PERIC1_USI_I2C, 0, 4),
+};
+
+static const struct samsung_gate_clock peric1_gate_clks[] __initconst = {
+ /* IPCLK */
+ GATE(CLK_GOUT_PERIC1_IPCLK_0, "gout_peric1_ipclk_0",
+ "dout_peric1_usi06_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_0,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_IPCLK_1, "gout_peric1_ipclk_1",
+ "dout_peric1_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_1,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_IPCLK_2, "gout_peric1_ipclk_2",
+ "dout_peric1_usi07_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_2,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_IPCLK_3, "gout_peric1_ipclk_3",
+ "dout_peric1_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_3,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_IPCLK_4, "gout_peric1_ipclk_4",
+ "dout_peric1_usi08_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_4,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_IPCLK_5, "gout_peric1_ipclk_5",
+ "dout_peric1_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_5,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_IPCLK_6, "gout_peric1_ipclk_6",
+ "dout_peric1_usi09_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_6,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_IPCLK_7, "gout_peric1_ipclk_7",
+ "dout_peric1_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_7,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_IPCLK_8, "gout_peric1_ipclk_8",
+ "dout_peric1_usi10_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_8,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_IPCLK_9, "gout_peric1_ipclk_9",
+ "dout_peric1_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_9,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_IPCLK_10, "gout_peric1_ipclk_10",
+ "dout_peric1_usi11_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_10,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_IPCLK_11, "gout_peric1_ipclk_11",
+ "dout_peric1_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_11,
+ 21, 0, 0),
+
+ /* PCLK */
+ GATE(CLK_GOUT_PERIC1_PCLK_0, "gout_peric1_pclk_0",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_0,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_PCLK_2, "gout_peric1_pclk_2",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_2,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_PCLK_3, "gout_peric1_pclk_3",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_3,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_PCLK_4, "gout_peric1_pclk_4",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_4,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_PCLK_5, "gout_peric1_pclk_5",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_5,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_PCLK_6, "gout_peric1_pclk_6",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_6,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_PCLK_7, "gout_peric1_pclk_7",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_7,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_PCLK_8, "gout_peric1_pclk_8",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_8,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_PCLK_9, "gout_peric1_pclk_9",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_9,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_PCLK_10, "gout_peric1_pclk_10",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_10,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_PCLK_11, "gout_peric1_pclk_11",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_11,
+ 21, 0, 0),
+};
+
+static const struct samsung_cmu_info peric1_cmu_info __initconst = {
+ .mux_clks = peric1_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(peric1_mux_clks),
+ .div_clks = peric1_div_clks,
+ .nr_div_clks = ARRAY_SIZE(peric1_div_clks),
+ .gate_clks = peric1_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(peric1_gate_clks),
+ .nr_clk_ids = PERIC1_NR_CLK,
+ .clk_regs = peric1_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(peric1_clk_regs),
+ .clk_name = "dout_clkcmu_peric1_bus",
+};
+
+/* ---- CMU_PERIS ---------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_PERIS (0x10020000) */
+#define PLL_CON0_MUX_CLKCMU_PERIS_BUS_USER 0x0600
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_SYSREG_PERIS_IPCLKPORT_PCLK 0x2058
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_WDT_CLUSTER0_IPCLKPORT_PCLK 0x205c
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_WDT_CLUSTER1_IPCLKPORT_PCLK 0x2060
+
+static const unsigned long peris_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_PERIS_BUS_USER,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_SYSREG_PERIS_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_WDT_CLUSTER0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_WDT_CLUSTER1_IPCLKPORT_PCLK,
+};
+
+/* List of parent clocks for Muxes in CMU_PERIS */
+PNAME(mout_peris_bus_user_p) = { "oscclk", "dout_clkcmu_peris_bus" };
+
+static const struct samsung_mux_clock peris_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_PERIS_BUS_USER, "mout_peris_bus_user",
+ mout_peris_bus_user_p, PLL_CON0_MUX_CLKCMU_PERIS_BUS_USER, 4, 1),
+};
+
+static const struct samsung_gate_clock peris_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_SYSREG_PERIS_PCLK, "gout_sysreg_peris_pclk",
+ "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_SYSREG_PERIS_IPCLKPORT_PCLK,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_WDT_CLUSTER0, "gout_wdt_cluster0", "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_WDT_CLUSTER0_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_WDT_CLUSTER1, "gout_wdt_cluster1", "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_WDT_CLUSTER1_IPCLKPORT_PCLK,
+ 21, 0, 0),
+};
+
+static const struct samsung_cmu_info peris_cmu_info __initconst = {
+ .mux_clks = peris_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(peris_mux_clks),
+ .gate_clks = peris_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(peris_gate_clks),
+ .nr_clk_ids = PERIS_NR_CLK,
+ .clk_regs = peris_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(peris_clk_regs),
+ .clk_name = "dout_clkcmu_peris_bus",
+};
+
+static int __init exynosautov9_cmu_probe(struct platform_device *pdev)
+{
+ const struct samsung_cmu_info *info;
+ struct device *dev = &pdev->dev;
+
+ info = of_device_get_match_data(dev);
+ exynos_arm64_register_cmu(dev, dev->of_node, info);
+
+ return 0;
+}
+
+static const struct of_device_id exynosautov9_cmu_of_match[] = {
+ {
+ .compatible = "samsung,exynosautov9-cmu-busmc",
+ .data = &busmc_cmu_info,
+ }, {
+ .compatible = "samsung,exynosautov9-cmu-core",
+ .data = &core_cmu_info,
+ }, {
+ .compatible = "samsung,exynosautov9-cmu-fsys2",
+ .data = &fsys2_cmu_info,
+ }, {
+ .compatible = "samsung,exynosautov9-cmu-peric0",
+ .data = &peric0_cmu_info,
+ }, {
+ .compatible = "samsung,exynosautov9-cmu-peric1",
+ .data = &peric1_cmu_info,
+ }, {
+ .compatible = "samsung,exynosautov9-cmu-peris",
+ .data = &peris_cmu_info,
+ }, {
+ },
+};
+
+static struct platform_driver exynosautov9_cmu_driver __refdata = {
+ .driver = {
+ .name = "exynosautov9-cmu",
+ .of_match_table = exynosautov9_cmu_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = exynosautov9_cmu_probe,
+};
+
+static int __init exynosautov9_cmu_init(void)
+{
+ return platform_driver_register(&exynosautov9_cmu_driver);
+}
+core_initcall(exynosautov9_cmu_init);
diff --git a/drivers/clk/stm32/Makefile b/drivers/clk/stm32/Makefile
new file mode 100644
index 000000000000..95bd2230bba0
--- /dev/null
+++ b/drivers/clk/stm32/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_COMMON_CLK_STM32MP135) += clk-stm32mp13.o clk-stm32-core.o reset-stm32.o
diff --git a/drivers/clk/stm32/clk-stm32-core.c b/drivers/clk/stm32/clk-stm32-core.c
new file mode 100644
index 000000000000..45a279e73779
--- /dev/null
+++ b/drivers/clk/stm32/clk-stm32-core.c
@@ -0,0 +1,695 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) STMicroelectronics 2022 - All Rights Reserved
+ * Author: Gabriel Fernandez <gabriel.fernandez@foss.st.com> for STMicroelectronics.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "clk-stm32-core.h"
+#include "reset-stm32.h"
+
+static DEFINE_SPINLOCK(rlock);
+
+static int stm32_rcc_clock_init(struct device *dev,
+ const struct of_device_id *match,
+ void __iomem *base)
+{
+ const struct stm32_rcc_match_data *data = match->data;
+ struct clk_hw_onecell_data *clk_data = data->hw_clks;
+ struct device_node *np = dev_of_node(dev);
+ struct clk_hw **hws;
+ int n, max_binding;
+
+ max_binding = data->maxbinding;
+
+ clk_data = devm_kzalloc(dev, struct_size(clk_data, hws, max_binding), GFP_KERNEL);
+ if (!clk_data)
+ return -ENOMEM;
+
+ clk_data->num = max_binding;
+
+ hws = clk_data->hws;
+
+ for (n = 0; n < max_binding; n++)
+ hws[n] = ERR_PTR(-ENOENT);
+
+ for (n = 0; n < data->num_clocks; n++) {
+ const struct clock_config *cfg_clock = &data->tab_clocks[n];
+ struct clk_hw *hw = ERR_PTR(-ENOENT);
+
+ if (data->check_security &&
+ data->check_security(base, cfg_clock))
+ continue;
+
+ if (cfg_clock->func)
+ hw = (*cfg_clock->func)(dev, data, base, &rlock,
+ cfg_clock);
+
+ if (IS_ERR(hw)) {
+ dev_err(dev, "Can't register clk %d: %ld\n", n,
+ PTR_ERR(hw));
+ return PTR_ERR(hw);
+ }
+
+ if (cfg_clock->id != NO_ID)
+ hws[cfg_clock->id] = hw;
+ }
+
+ return of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
+}
+
+int stm32_rcc_init(struct device *dev, const struct of_device_id *match_data,
+ void __iomem *base)
+{
+ const struct of_device_id *match;
+ int err;
+
+ match = of_match_node(match_data, dev_of_node(dev));
+ if (!match) {
+ dev_err(dev, "match data not found\n");
+ return -ENODEV;
+ }
+
+ /* RCC Reset Configuration */
+ err = stm32_rcc_reset_init(dev, match, base);
+ if (err) {
+ pr_err("stm32 reset failed to initialize\n");
+ return err;
+ }
+
+ /* RCC Clock Configuration */
+ err = stm32_rcc_clock_init(dev, match, base);
+ if (err) {
+ pr_err("stm32 clock failed to initialize\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static u8 stm32_mux_get_parent(void __iomem *base,
+ struct clk_stm32_clock_data *data,
+ u16 mux_id)
+{
+ const struct stm32_mux_cfg *mux = &data->muxes[mux_id];
+ u32 mask = BIT(mux->width) - 1;
+ u32 val;
+
+ val = readl(base + mux->offset) >> mux->shift;
+ val &= mask;
+
+ return val;
+}
+
+static int stm32_mux_set_parent(void __iomem *base,
+ struct clk_stm32_clock_data *data,
+ u16 mux_id, u8 index)
+{
+ const struct stm32_mux_cfg *mux = &data->muxes[mux_id];
+
+ u32 mask = BIT(mux->width) - 1;
+ u32 reg = readl(base + mux->offset);
+ u32 val = index << mux->shift;
+
+ reg &= ~(mask << mux->shift);
+ reg |= val;
+
+ writel(reg, base + mux->offset);
+
+ return 0;
+}
+
+static void stm32_gate_endisable(void __iomem *base,
+ struct clk_stm32_clock_data *data,
+ u16 gate_id, int enable)
+{
+ const struct stm32_gate_cfg *gate = &data->gates[gate_id];
+ void __iomem *addr = base + gate->offset;
+
+ if (enable) {
+ if (data->gate_cpt[gate_id]++ > 0)
+ return;
+
+ if (gate->set_clr != 0)
+ writel(BIT(gate->bit_idx), addr);
+ else
+ writel(readl(addr) | BIT(gate->bit_idx), addr);
+ } else {
+ if (--data->gate_cpt[gate_id] > 0)
+ return;
+
+ if (gate->set_clr != 0)
+ writel(BIT(gate->bit_idx), addr + gate->set_clr);
+ else
+ writel(readl(addr) & ~BIT(gate->bit_idx), addr);
+ }
+}
+
+static void stm32_gate_disable_unused(void __iomem *base,
+ struct clk_stm32_clock_data *data,
+ u16 gate_id)
+{
+ const struct stm32_gate_cfg *gate = &data->gates[gate_id];
+ void __iomem *addr = base + gate->offset;
+
+ if (data->gate_cpt[gate_id] > 0)
+ return;
+
+ if (gate->set_clr != 0)
+ writel(BIT(gate->bit_idx), addr + gate->set_clr);
+ else
+ writel(readl(addr) & ~BIT(gate->bit_idx), addr);
+}
+
+static int stm32_gate_is_enabled(void __iomem *base,
+ struct clk_stm32_clock_data *data,
+ u16 gate_id)
+{
+ const struct stm32_gate_cfg *gate = &data->gates[gate_id];
+
+ return (readl(base + gate->offset) & BIT(gate->bit_idx)) != 0;
+}
+
+static unsigned int _get_table_div(const struct clk_div_table *table,
+ unsigned int val)
+{
+ const struct clk_div_table *clkt;
+
+ for (clkt = table; clkt->div; clkt++)
+ if (clkt->val == val)
+ return clkt->div;
+ return 0;
+}
+
+static unsigned int _get_div(const struct clk_div_table *table,
+ unsigned int val, unsigned long flags, u8 width)
+{
+ if (flags & CLK_DIVIDER_ONE_BASED)
+ return val;
+ if (flags & CLK_DIVIDER_POWER_OF_TWO)
+ return 1 << val;
+ if (table)
+ return _get_table_div(table, val);
+ return val + 1;
+}
+
+static unsigned long stm32_divider_get_rate(void __iomem *base,
+ struct clk_stm32_clock_data *data,
+ u16 div_id,
+ unsigned long parent_rate)
+{
+ const struct stm32_div_cfg *divider = &data->dividers[div_id];
+ unsigned int val;
+ unsigned int div;
+
+ val = readl(base + divider->offset) >> divider->shift;
+ val &= clk_div_mask(divider->width);
+ div = _get_div(divider->table, val, divider->flags, divider->width);
+
+ if (!div) {
+ WARN(!(divider->flags & CLK_DIVIDER_ALLOW_ZERO),
+ "%d: Zero divisor and CLK_DIVIDER_ALLOW_ZERO not set\n",
+ div_id);
+ return parent_rate;
+ }
+
+ return DIV_ROUND_UP_ULL((u64)parent_rate, div);
+}
+
+static int stm32_divider_set_rate(void __iomem *base,
+ struct clk_stm32_clock_data *data,
+ u16 div_id, unsigned long rate,
+ unsigned long parent_rate)
+{
+ const struct stm32_div_cfg *divider = &data->dividers[div_id];
+ int value;
+ u32 val;
+
+ value = divider_get_val(rate, parent_rate, divider->table,
+ divider->width, divider->flags);
+ if (value < 0)
+ return value;
+
+ if (divider->flags & CLK_DIVIDER_HIWORD_MASK) {
+ val = clk_div_mask(divider->width) << (divider->shift + 16);
+ } else {
+ val = readl(base + divider->offset);
+ val &= ~(clk_div_mask(divider->width) << divider->shift);
+ }
+
+ val |= (u32)value << divider->shift;
+
+ writel(val, base + divider->offset);
+
+ return 0;
+}
+
+static u8 clk_stm32_mux_get_parent(struct clk_hw *hw)
+{
+ struct clk_stm32_mux *mux = to_clk_stm32_mux(hw);
+
+ return stm32_mux_get_parent(mux->base, mux->clock_data, mux->mux_id);
+}
+
+static int clk_stm32_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clk_stm32_mux *mux = to_clk_stm32_mux(hw);
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(mux->lock, flags);
+
+ stm32_mux_set_parent(mux->base, mux->clock_data, mux->mux_id, index);
+
+ spin_unlock_irqrestore(mux->lock, flags);
+
+ return 0;
+}
+
+const struct clk_ops clk_stm32_mux_ops = {
+ .get_parent = clk_stm32_mux_get_parent,
+ .set_parent = clk_stm32_mux_set_parent,
+};
+
+static void clk_stm32_gate_endisable(struct clk_hw *hw, int enable)
+{
+ struct clk_stm32_gate *gate = to_clk_stm32_gate(hw);
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(gate->lock, flags);
+
+ stm32_gate_endisable(gate->base, gate->clock_data, gate->gate_id, enable);
+
+ spin_unlock_irqrestore(gate->lock, flags);
+}
+
+static int clk_stm32_gate_enable(struct clk_hw *hw)
+{
+ clk_stm32_gate_endisable(hw, 1);
+
+ return 0;
+}
+
+static void clk_stm32_gate_disable(struct clk_hw *hw)
+{
+ clk_stm32_gate_endisable(hw, 0);
+}
+
+static int clk_stm32_gate_is_enabled(struct clk_hw *hw)
+{
+ struct clk_stm32_gate *gate = to_clk_stm32_gate(hw);
+
+ return stm32_gate_is_enabled(gate->base, gate->clock_data, gate->gate_id);
+}
+
+static void clk_stm32_gate_disable_unused(struct clk_hw *hw)
+{
+ struct clk_stm32_gate *gate = to_clk_stm32_gate(hw);
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(gate->lock, flags);
+
+ stm32_gate_disable_unused(gate->base, gate->clock_data, gate->gate_id);
+
+ spin_unlock_irqrestore(gate->lock, flags);
+}
+
+const struct clk_ops clk_stm32_gate_ops = {
+ .enable = clk_stm32_gate_enable,
+ .disable = clk_stm32_gate_disable,
+ .is_enabled = clk_stm32_gate_is_enabled,
+ .disable_unused = clk_stm32_gate_disable_unused,
+};
+
+static int clk_stm32_divider_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_stm32_div *div = to_clk_stm32_divider(hw);
+ unsigned long flags = 0;
+ int ret;
+
+ if (div->div_id == NO_STM32_DIV)
+ return rate;
+
+ spin_lock_irqsave(div->lock, flags);
+
+ ret = stm32_divider_set_rate(div->base, div->clock_data, div->div_id, rate, parent_rate);
+
+ spin_unlock_irqrestore(div->lock, flags);
+
+ return ret;
+}
+
+static long clk_stm32_divider_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct clk_stm32_div *div = to_clk_stm32_divider(hw);
+ const struct stm32_div_cfg *divider;
+
+ if (div->div_id == NO_STM32_DIV)
+ return rate;
+
+ divider = &div->clock_data->dividers[div->div_id];
+
+ /* if read only, just return current value */
+ if (divider->flags & CLK_DIVIDER_READ_ONLY) {
+ u32 val;
+
+ val = readl(div->base + divider->offset) >> divider->shift;
+ val &= clk_div_mask(divider->width);
+
+ return divider_ro_round_rate(hw, rate, prate, divider->table,
+ divider->width, divider->flags,
+ val);
+ }
+
+ return divider_round_rate_parent(hw, clk_hw_get_parent(hw),
+ rate, prate, divider->table,
+ divider->width, divider->flags);
+}
+
+static unsigned long clk_stm32_divider_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_stm32_div *div = to_clk_stm32_divider(hw);
+
+ if (div->div_id == NO_STM32_DIV)
+ return parent_rate;
+
+ return stm32_divider_get_rate(div->base, div->clock_data, div->div_id, parent_rate);
+}
+
+const struct clk_ops clk_stm32_divider_ops = {
+ .recalc_rate = clk_stm32_divider_recalc_rate,
+ .round_rate = clk_stm32_divider_round_rate,
+ .set_rate = clk_stm32_divider_set_rate,
+};
+
+static int clk_stm32_composite_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_stm32_composite *composite = to_clk_stm32_composite(hw);
+ unsigned long flags = 0;
+ int ret;
+
+ if (composite->div_id == NO_STM32_DIV)
+ return rate;
+
+ spin_lock_irqsave(composite->lock, flags);
+
+ ret = stm32_divider_set_rate(composite->base, composite->clock_data,
+ composite->div_id, rate, parent_rate);
+
+ spin_unlock_irqrestore(composite->lock, flags);
+
+ return ret;
+}
+
+static unsigned long clk_stm32_composite_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_stm32_composite *composite = to_clk_stm32_composite(hw);
+
+ if (composite->div_id == NO_STM32_DIV)
+ return parent_rate;
+
+ return stm32_divider_get_rate(composite->base, composite->clock_data,
+ composite->div_id, parent_rate);
+}
+
+static long clk_stm32_composite_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct clk_stm32_composite *composite = to_clk_stm32_composite(hw);
+
+ const struct stm32_div_cfg *divider;
+
+ if (composite->div_id == NO_STM32_DIV)
+ return rate;
+
+ divider = &composite->clock_data->dividers[composite->div_id];
+
+ /* if read only, just return current value */
+ if (divider->flags & CLK_DIVIDER_READ_ONLY) {
+ u32 val;
+
+ val = readl(composite->base + divider->offset) >> divider->shift;
+ val &= clk_div_mask(divider->width);
+
+ return divider_ro_round_rate(hw, rate, prate, divider->table,
+ divider->width, divider->flags,
+ val);
+ }
+
+ return divider_round_rate_parent(hw, clk_hw_get_parent(hw),
+ rate, prate, divider->table,
+ divider->width, divider->flags);
+}
+
+static u8 clk_stm32_composite_get_parent(struct clk_hw *hw)
+{
+ struct clk_stm32_composite *composite = to_clk_stm32_composite(hw);
+
+ return stm32_mux_get_parent(composite->base, composite->clock_data, composite->mux_id);
+}
+
+static int clk_stm32_composite_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clk_stm32_composite *composite = to_clk_stm32_composite(hw);
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(composite->lock, flags);
+
+ stm32_mux_set_parent(composite->base, composite->clock_data, composite->mux_id, index);
+
+ spin_unlock_irqrestore(composite->lock, flags);
+
+ if (composite->clock_data->is_multi_mux) {
+ struct clk_hw *other_mux_hw = composite->clock_data->is_multi_mux(hw);
+
+ if (other_mux_hw) {
+ struct clk_hw *hwp = clk_hw_get_parent_by_index(hw, index);
+
+ clk_hw_reparent(other_mux_hw, hwp);
+ }
+ }
+
+ return 0;
+}
+
+static int clk_stm32_composite_is_enabled(struct clk_hw *hw)
+{
+ struct clk_stm32_composite *composite = to_clk_stm32_composite(hw);
+
+ if (composite->gate_id == NO_STM32_GATE)
+ return (__clk_get_enable_count(hw->clk) > 0);
+
+ return stm32_gate_is_enabled(composite->base, composite->clock_data, composite->gate_id);
+}
+
+#define MUX_SAFE_POSITION 0
+
+static int clk_stm32_has_safe_mux(struct clk_hw *hw)
+{
+ struct clk_stm32_composite *composite = to_clk_stm32_composite(hw);
+ const struct stm32_mux_cfg *mux = &composite->clock_data->muxes[composite->mux_id];
+
+ return !!(mux->flags & MUX_SAFE);
+}
+
+static void clk_stm32_set_safe_position_mux(struct clk_hw *hw)
+{
+ struct clk_stm32_composite *composite = to_clk_stm32_composite(hw);
+
+ if (!clk_stm32_composite_is_enabled(hw)) {
+ unsigned long flags = 0;
+
+ if (composite->clock_data->is_multi_mux) {
+ struct clk_hw *other_mux_hw = NULL;
+
+ other_mux_hw = composite->clock_data->is_multi_mux(hw);
+
+ if (!other_mux_hw || clk_stm32_composite_is_enabled(other_mux_hw))
+ return;
+ }
+
+ spin_lock_irqsave(composite->lock, flags);
+
+ stm32_mux_set_parent(composite->base, composite->clock_data,
+ composite->mux_id, MUX_SAFE_POSITION);
+
+ spin_unlock_irqrestore(composite->lock, flags);
+ }
+}
+
+static void clk_stm32_safe_restore_position_mux(struct clk_hw *hw)
+{
+ struct clk_stm32_composite *composite = to_clk_stm32_composite(hw);
+ int sel = clk_hw_get_parent_index(hw);
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(composite->lock, flags);
+
+ stm32_mux_set_parent(composite->base, composite->clock_data, composite->mux_id, sel);
+
+ spin_unlock_irqrestore(composite->lock, flags);
+}
+
+static void clk_stm32_composite_gate_endisable(struct clk_hw *hw, int enable)
+{
+ struct clk_stm32_composite *composite = to_clk_stm32_composite(hw);
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(composite->lock, flags);
+
+ stm32_gate_endisable(composite->base, composite->clock_data, composite->gate_id, enable);
+
+ spin_unlock_irqrestore(composite->lock, flags);
+}
+
+static int clk_stm32_composite_gate_enable(struct clk_hw *hw)
+{
+ struct clk_stm32_composite *composite = to_clk_stm32_composite(hw);
+
+ if (composite->gate_id == NO_STM32_GATE)
+ return 0;
+
+ clk_stm32_composite_gate_endisable(hw, 1);
+
+ if (composite->mux_id != NO_STM32_MUX && clk_stm32_has_safe_mux(hw))
+ clk_stm32_safe_restore_position_mux(hw);
+
+ return 0;
+}
+
+static void clk_stm32_composite_gate_disable(struct clk_hw *hw)
+{
+ struct clk_stm32_composite *composite = to_clk_stm32_composite(hw);
+
+ if (composite->gate_id == NO_STM32_GATE)
+ return;
+
+ clk_stm32_composite_gate_endisable(hw, 0);
+
+ if (composite->mux_id != NO_STM32_MUX && clk_stm32_has_safe_mux(hw))
+ clk_stm32_set_safe_position_mux(hw);
+}
+
+static void clk_stm32_composite_disable_unused(struct clk_hw *hw)
+{
+ struct clk_stm32_composite *composite = to_clk_stm32_composite(hw);
+ unsigned long flags = 0;
+
+ if (composite->gate_id == NO_STM32_GATE)
+ return;
+
+ spin_lock_irqsave(composite->lock, flags);
+
+ stm32_gate_disable_unused(composite->base, composite->clock_data, composite->gate_id);
+
+ spin_unlock_irqrestore(composite->lock, flags);
+}
+
+const struct clk_ops clk_stm32_composite_ops = {
+ .set_rate = clk_stm32_composite_set_rate,
+ .recalc_rate = clk_stm32_composite_recalc_rate,
+ .round_rate = clk_stm32_composite_round_rate,
+ .get_parent = clk_stm32_composite_get_parent,
+ .set_parent = clk_stm32_composite_set_parent,
+ .enable = clk_stm32_composite_gate_enable,
+ .disable = clk_stm32_composite_gate_disable,
+ .is_enabled = clk_stm32_composite_is_enabled,
+ .disable_unused = clk_stm32_composite_disable_unused,
+};
+
+struct clk_hw *clk_stm32_mux_register(struct device *dev,
+ const struct stm32_rcc_match_data *data,
+ void __iomem *base,
+ spinlock_t *lock,
+ const struct clock_config *cfg)
+{
+ struct clk_stm32_mux *mux = cfg->clock_cfg;
+ struct clk_hw *hw = &mux->hw;
+ int err;
+
+ mux->base = base;
+ mux->lock = lock;
+ mux->clock_data = data->clock_data;
+
+ err = clk_hw_register(dev, hw);
+ if (err)
+ return ERR_PTR(err);
+
+ return hw;
+}
+
+struct clk_hw *clk_stm32_gate_register(struct device *dev,
+ const struct stm32_rcc_match_data *data,
+ void __iomem *base,
+ spinlock_t *lock,
+ const struct clock_config *cfg)
+{
+ struct clk_stm32_gate *gate = cfg->clock_cfg;
+ struct clk_hw *hw = &gate->hw;
+ int err;
+
+ gate->base = base;
+ gate->lock = lock;
+ gate->clock_data = data->clock_data;
+
+ err = clk_hw_register(dev, hw);
+ if (err)
+ return ERR_PTR(err);
+
+ return hw;
+}
+
+struct clk_hw *clk_stm32_div_register(struct device *dev,
+ const struct stm32_rcc_match_data *data,
+ void __iomem *base,
+ spinlock_t *lock,
+ const struct clock_config *cfg)
+{
+ struct clk_stm32_div *div = cfg->clock_cfg;
+ struct clk_hw *hw = &div->hw;
+ int err;
+
+ div->base = base;
+ div->lock = lock;
+ div->clock_data = data->clock_data;
+
+ err = clk_hw_register(dev, hw);
+ if (err)
+ return ERR_PTR(err);
+
+ return hw;
+}
+
+struct clk_hw *clk_stm32_composite_register(struct device *dev,
+ const struct stm32_rcc_match_data *data,
+ void __iomem *base,
+ spinlock_t *lock,
+ const struct clock_config *cfg)
+{
+ struct clk_stm32_composite *composite = cfg->clock_cfg;
+ struct clk_hw *hw = &composite->hw;
+ int err;
+
+ composite->base = base;
+ composite->lock = lock;
+ composite->clock_data = data->clock_data;
+
+ err = clk_hw_register(dev, hw);
+ if (err)
+ return ERR_PTR(err);
+
+ return hw;
+}
diff --git a/drivers/clk/stm32/clk-stm32-core.h b/drivers/clk/stm32/clk-stm32-core.h
new file mode 100644
index 000000000000..76cffda02308
--- /dev/null
+++ b/drivers/clk/stm32/clk-stm32-core.h
@@ -0,0 +1,188 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) STMicroelectronics 2022 - All Rights Reserved
+ * Author: Gabriel Fernandez <gabriel.fernandez@foss.st.com> for STMicroelectronics.
+ */
+
+#include <linux/clk-provider.h>
+
+struct stm32_rcc_match_data;
+
+struct stm32_mux_cfg {
+ u16 offset;
+ u8 shift;
+ u8 width;
+ u8 flags;
+ u32 *table;
+ u8 ready;
+};
+
+struct stm32_gate_cfg {
+ u16 offset;
+ u8 bit_idx;
+ u8 set_clr;
+};
+
+struct stm32_div_cfg {
+ u16 offset;
+ u8 shift;
+ u8 width;
+ u8 flags;
+ u8 ready;
+ const struct clk_div_table *table;
+};
+
+struct stm32_composite_cfg {
+ int mux;
+ int gate;
+ int div;
+};
+
+#define NO_ID 0xFFFFFFFF
+
+#define NO_STM32_MUX 0xFFFF
+#define NO_STM32_DIV 0xFFFF
+#define NO_STM32_GATE 0xFFFF
+
+struct clock_config {
+ unsigned long id;
+ int sec_id;
+ void *clock_cfg;
+
+ struct clk_hw *(*func)(struct device *dev,
+ const struct stm32_rcc_match_data *data,
+ void __iomem *base,
+ spinlock_t *lock,
+ const struct clock_config *cfg);
+};
+
+struct clk_stm32_clock_data {
+ u16 *gate_cpt;
+ const struct stm32_gate_cfg *gates;
+ const struct stm32_mux_cfg *muxes;
+ const struct stm32_div_cfg *dividers;
+ struct clk_hw *(*is_multi_mux)(struct clk_hw *hw);
+};
+
+struct stm32_rcc_match_data {
+ struct clk_hw_onecell_data *hw_clks;
+ unsigned int num_clocks;
+ const struct clock_config *tab_clocks;
+ unsigned int maxbinding;
+ struct clk_stm32_clock_data *clock_data;
+ u32 clear_offset;
+ int (*check_security)(void __iomem *base,
+ const struct clock_config *cfg);
+ int (*multi_mux)(void __iomem *base, const struct clock_config *cfg);
+};
+
+int stm32_rcc_reset_init(struct device *dev, const struct of_device_id *match,
+ void __iomem *base);
+
+int stm32_rcc_init(struct device *dev, const struct of_device_id *match_data,
+ void __iomem *base);
+
+/* MUX define */
+#define MUX_NO_RDY 0xFF
+#define MUX_SAFE BIT(7)
+
+/* DIV define */
+#define DIV_NO_RDY 0xFF
+
+/* Definition of clock structure */
+struct clk_stm32_mux {
+ u16 mux_id;
+ struct clk_hw hw;
+ void __iomem *base;
+ struct clk_stm32_clock_data *clock_data;
+ spinlock_t *lock; /* spin lock */
+};
+
+#define to_clk_stm32_mux(_hw) container_of(_hw, struct clk_stm32_mux, hw)
+
+struct clk_stm32_gate {
+ u16 gate_id;
+ struct clk_hw hw;
+ void __iomem *base;
+ struct clk_stm32_clock_data *clock_data;
+ spinlock_t *lock; /* spin lock */
+};
+
+#define to_clk_stm32_gate(_hw) container_of(_hw, struct clk_stm32_gate, hw)
+
+struct clk_stm32_div {
+ u16 div_id;
+ struct clk_hw hw;
+ void __iomem *base;
+ struct clk_stm32_clock_data *clock_data;
+ spinlock_t *lock; /* spin lock */
+};
+
+#define to_clk_stm32_divider(_hw) container_of(_hw, struct clk_stm32_div, hw)
+
+struct clk_stm32_composite {
+ u16 gate_id;
+ u16 mux_id;
+ u16 div_id;
+ struct clk_hw hw;
+ void __iomem *base;
+ struct clk_stm32_clock_data *clock_data;
+ spinlock_t *lock; /* spin lock */
+};
+
+#define to_clk_stm32_composite(_hw) container_of(_hw, struct clk_stm32_composite, hw)
+
+/* Clock operators */
+extern const struct clk_ops clk_stm32_mux_ops;
+extern const struct clk_ops clk_stm32_gate_ops;
+extern const struct clk_ops clk_stm32_divider_ops;
+extern const struct clk_ops clk_stm32_composite_ops;
+
+/* Clock registering */
+struct clk_hw *clk_stm32_mux_register(struct device *dev,
+ const struct stm32_rcc_match_data *data,
+ void __iomem *base,
+ spinlock_t *lock,
+ const struct clock_config *cfg);
+
+struct clk_hw *clk_stm32_gate_register(struct device *dev,
+ const struct stm32_rcc_match_data *data,
+ void __iomem *base,
+ spinlock_t *lock,
+ const struct clock_config *cfg);
+
+struct clk_hw *clk_stm32_div_register(struct device *dev,
+ const struct stm32_rcc_match_data *data,
+ void __iomem *base,
+ spinlock_t *lock,
+ const struct clock_config *cfg);
+
+struct clk_hw *clk_stm32_composite_register(struct device *dev,
+ const struct stm32_rcc_match_data *data,
+ void __iomem *base,
+ spinlock_t *lock,
+ const struct clock_config *cfg);
+
+#define STM32_CLOCK_CFG(_binding, _clk, _sec_id, _struct, _register)\
+{\
+ .id = (_binding),\
+ .sec_id = (_sec_id),\
+ .clock_cfg = (_struct) {_clk},\
+ .func = (_register),\
+}
+
+#define STM32_MUX_CFG(_binding, _clk, _sec_id)\
+ STM32_CLOCK_CFG(_binding, &(_clk), _sec_id, struct clk_stm32_mux *,\
+ &clk_stm32_mux_register)
+
+#define STM32_GATE_CFG(_binding, _clk, _sec_id)\
+ STM32_CLOCK_CFG(_binding, &(_clk), _sec_id, struct clk_stm32_gate *,\
+ &clk_stm32_gate_register)
+
+#define STM32_DIV_CFG(_binding, _clk, _sec_id)\
+ STM32_CLOCK_CFG(_binding, &(_clk), _sec_id, struct clk_stm32_div *,\
+ &clk_stm32_div_register)
+
+#define STM32_COMPOSITE_CFG(_binding, _clk, _sec_id)\
+ STM32_CLOCK_CFG(_binding, &(_clk), _sec_id, struct clk_stm32_composite *,\
+ &clk_stm32_composite_register)
diff --git a/drivers/clk/stm32/clk-stm32mp13.c b/drivers/clk/stm32/clk-stm32mp13.c
new file mode 100644
index 000000000000..1192eee8abe4
--- /dev/null
+++ b/drivers/clk/stm32/clk-stm32mp13.c
@@ -0,0 +1,1620 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) STMicroelectronics 2022 - All Rights Reserved
+ * Author: Gabriel Fernandez <gabriel.fernandez@foss.st.com> for STMicroelectronics.
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <dt-bindings/clock/stm32mp13-clks.h>
+#include "clk-stm32-core.h"
+#include "stm32mp13_rcc.h"
+
+#define RCC_CLR_OFFSET 0x4
+
+/* STM32 Gates definition */
+enum enum_gate_cfg {
+ GATE_MCO1,
+ GATE_MCO2,
+ GATE_DBGCK,
+ GATE_TRACECK,
+ GATE_DDRC1,
+ GATE_DDRC1LP,
+ GATE_DDRPHYC,
+ GATE_DDRPHYCLP,
+ GATE_DDRCAPB,
+ GATE_DDRCAPBLP,
+ GATE_AXIDCG,
+ GATE_DDRPHYCAPB,
+ GATE_DDRPHYCAPBLP,
+ GATE_TIM2,
+ GATE_TIM3,
+ GATE_TIM4,
+ GATE_TIM5,
+ GATE_TIM6,
+ GATE_TIM7,
+ GATE_LPTIM1,
+ GATE_SPI2,
+ GATE_SPI3,
+ GATE_USART3,
+ GATE_UART4,
+ GATE_UART5,
+ GATE_UART7,
+ GATE_UART8,
+ GATE_I2C1,
+ GATE_I2C2,
+ GATE_SPDIF,
+ GATE_TIM1,
+ GATE_TIM8,
+ GATE_SPI1,
+ GATE_USART6,
+ GATE_SAI1,
+ GATE_SAI2,
+ GATE_DFSDM,
+ GATE_ADFSDM,
+ GATE_FDCAN,
+ GATE_LPTIM2,
+ GATE_LPTIM3,
+ GATE_LPTIM4,
+ GATE_LPTIM5,
+ GATE_VREF,
+ GATE_DTS,
+ GATE_PMBCTRL,
+ GATE_HDP,
+ GATE_SYSCFG,
+ GATE_DCMIPP,
+ GATE_DDRPERFM,
+ GATE_IWDG2APB,
+ GATE_USBPHY,
+ GATE_STGENRO,
+ GATE_LTDC,
+ GATE_RTCAPB,
+ GATE_TZC,
+ GATE_ETZPC,
+ GATE_IWDG1APB,
+ GATE_BSEC,
+ GATE_STGENC,
+ GATE_USART1,
+ GATE_USART2,
+ GATE_SPI4,
+ GATE_SPI5,
+ GATE_I2C3,
+ GATE_I2C4,
+ GATE_I2C5,
+ GATE_TIM12,
+ GATE_TIM13,
+ GATE_TIM14,
+ GATE_TIM15,
+ GATE_TIM16,
+ GATE_TIM17,
+ GATE_DMA1,
+ GATE_DMA2,
+ GATE_DMAMUX1,
+ GATE_DMA3,
+ GATE_DMAMUX2,
+ GATE_ADC1,
+ GATE_ADC2,
+ GATE_USBO,
+ GATE_TSC,
+ GATE_GPIOA,
+ GATE_GPIOB,
+ GATE_GPIOC,
+ GATE_GPIOD,
+ GATE_GPIOE,
+ GATE_GPIOF,
+ GATE_GPIOG,
+ GATE_GPIOH,
+ GATE_GPIOI,
+ GATE_PKA,
+ GATE_SAES,
+ GATE_CRYP1,
+ GATE_HASH1,
+ GATE_RNG1,
+ GATE_BKPSRAM,
+ GATE_AXIMC,
+ GATE_MCE,
+ GATE_ETH1CK,
+ GATE_ETH1TX,
+ GATE_ETH1RX,
+ GATE_ETH1MAC,
+ GATE_FMC,
+ GATE_QSPI,
+ GATE_SDMMC1,
+ GATE_SDMMC2,
+ GATE_CRC1,
+ GATE_USBH,
+ GATE_ETH2CK,
+ GATE_ETH2TX,
+ GATE_ETH2RX,
+ GATE_ETH2MAC,
+ GATE_ETH1STP,
+ GATE_ETH2STP,
+ GATE_MDMA,
+ GATE_NB
+};
+
+#define _CFG_GATE(_id, _offset, _bit_idx, _offset_clr)\
+ [(_id)] = {\
+ .offset = (_offset),\
+ .bit_idx = (_bit_idx),\
+ .set_clr = (_offset_clr),\
+ }
+
+#define CFG_GATE(_id, _offset, _bit_idx)\
+ _CFG_GATE(_id, _offset, _bit_idx, 0)
+
+#define CFG_GATE_SETCLR(_id, _offset, _bit_idx)\
+ _CFG_GATE(_id, _offset, _bit_idx, RCC_CLR_OFFSET)
+
+static struct stm32_gate_cfg stm32mp13_gates[] = {
+ CFG_GATE(GATE_MCO1, RCC_MCO1CFGR, 12),
+ CFG_GATE(GATE_MCO2, RCC_MCO2CFGR, 12),
+ CFG_GATE(GATE_DBGCK, RCC_DBGCFGR, 8),
+ CFG_GATE(GATE_TRACECK, RCC_DBGCFGR, 9),
+ CFG_GATE(GATE_DDRC1, RCC_DDRITFCR, 0),
+ CFG_GATE(GATE_DDRC1LP, RCC_DDRITFCR, 1),
+ CFG_GATE(GATE_DDRPHYC, RCC_DDRITFCR, 4),
+ CFG_GATE(GATE_DDRPHYCLP, RCC_DDRITFCR, 5),
+ CFG_GATE(GATE_DDRCAPB, RCC_DDRITFCR, 6),
+ CFG_GATE(GATE_DDRCAPBLP, RCC_DDRITFCR, 7),
+ CFG_GATE(GATE_AXIDCG, RCC_DDRITFCR, 8),
+ CFG_GATE(GATE_DDRPHYCAPB, RCC_DDRITFCR, 9),
+ CFG_GATE(GATE_DDRPHYCAPBLP, RCC_DDRITFCR, 10),
+ CFG_GATE_SETCLR(GATE_TIM2, RCC_MP_APB1ENSETR, 0),
+ CFG_GATE_SETCLR(GATE_TIM3, RCC_MP_APB1ENSETR, 1),
+ CFG_GATE_SETCLR(GATE_TIM4, RCC_MP_APB1ENSETR, 2),
+ CFG_GATE_SETCLR(GATE_TIM5, RCC_MP_APB1ENSETR, 3),
+ CFG_GATE_SETCLR(GATE_TIM6, RCC_MP_APB1ENSETR, 4),
+ CFG_GATE_SETCLR(GATE_TIM7, RCC_MP_APB1ENSETR, 5),
+ CFG_GATE_SETCLR(GATE_LPTIM1, RCC_MP_APB1ENSETR, 9),
+ CFG_GATE_SETCLR(GATE_SPI2, RCC_MP_APB1ENSETR, 11),
+ CFG_GATE_SETCLR(GATE_SPI3, RCC_MP_APB1ENSETR, 12),
+ CFG_GATE_SETCLR(GATE_USART3, RCC_MP_APB1ENSETR, 15),
+ CFG_GATE_SETCLR(GATE_UART4, RCC_MP_APB1ENSETR, 16),
+ CFG_GATE_SETCLR(GATE_UART5, RCC_MP_APB1ENSETR, 17),
+ CFG_GATE_SETCLR(GATE_UART7, RCC_MP_APB1ENSETR, 18),
+ CFG_GATE_SETCLR(GATE_UART8, RCC_MP_APB1ENSETR, 19),
+ CFG_GATE_SETCLR(GATE_I2C1, RCC_MP_APB1ENSETR, 21),
+ CFG_GATE_SETCLR(GATE_I2C2, RCC_MP_APB1ENSETR, 22),
+ CFG_GATE_SETCLR(GATE_SPDIF, RCC_MP_APB1ENSETR, 26),
+ CFG_GATE_SETCLR(GATE_TIM1, RCC_MP_APB2ENSETR, 0),
+ CFG_GATE_SETCLR(GATE_TIM8, RCC_MP_APB2ENSETR, 1),
+ CFG_GATE_SETCLR(GATE_SPI1, RCC_MP_APB2ENSETR, 8),
+ CFG_GATE_SETCLR(GATE_USART6, RCC_MP_APB2ENSETR, 13),
+ CFG_GATE_SETCLR(GATE_SAI1, RCC_MP_APB2ENSETR, 16),
+ CFG_GATE_SETCLR(GATE_SAI2, RCC_MP_APB2ENSETR, 17),
+ CFG_GATE_SETCLR(GATE_DFSDM, RCC_MP_APB2ENSETR, 20),
+ CFG_GATE_SETCLR(GATE_ADFSDM, RCC_MP_APB2ENSETR, 21),
+ CFG_GATE_SETCLR(GATE_FDCAN, RCC_MP_APB2ENSETR, 24),
+ CFG_GATE_SETCLR(GATE_LPTIM2, RCC_MP_APB3ENSETR, 0),
+ CFG_GATE_SETCLR(GATE_LPTIM3, RCC_MP_APB3ENSETR, 1),
+ CFG_GATE_SETCLR(GATE_LPTIM4, RCC_MP_APB3ENSETR, 2),
+ CFG_GATE_SETCLR(GATE_LPTIM5, RCC_MP_APB3ENSETR, 3),
+ CFG_GATE_SETCLR(GATE_VREF, RCC_MP_APB3ENSETR, 13),
+ CFG_GATE_SETCLR(GATE_DTS, RCC_MP_APB3ENSETR, 16),
+ CFG_GATE_SETCLR(GATE_PMBCTRL, RCC_MP_APB3ENSETR, 17),
+ CFG_GATE_SETCLR(GATE_HDP, RCC_MP_APB3ENSETR, 20),
+ CFG_GATE_SETCLR(GATE_SYSCFG, RCC_MP_NS_APB3ENSETR, 0),
+ CFG_GATE_SETCLR(GATE_DCMIPP, RCC_MP_APB4ENSETR, 1),
+ CFG_GATE_SETCLR(GATE_DDRPERFM, RCC_MP_APB4ENSETR, 8),
+ CFG_GATE_SETCLR(GATE_IWDG2APB, RCC_MP_APB4ENSETR, 15),
+ CFG_GATE_SETCLR(GATE_USBPHY, RCC_MP_APB4ENSETR, 16),
+ CFG_GATE_SETCLR(GATE_STGENRO, RCC_MP_APB4ENSETR, 20),
+ CFG_GATE_SETCLR(GATE_LTDC, RCC_MP_NS_APB4ENSETR, 0),
+ CFG_GATE_SETCLR(GATE_RTCAPB, RCC_MP_APB5ENSETR, 8),
+ CFG_GATE_SETCLR(GATE_TZC, RCC_MP_APB5ENSETR, 11),
+ CFG_GATE_SETCLR(GATE_ETZPC, RCC_MP_APB5ENSETR, 13),
+ CFG_GATE_SETCLR(GATE_IWDG1APB, RCC_MP_APB5ENSETR, 15),
+ CFG_GATE_SETCLR(GATE_BSEC, RCC_MP_APB5ENSETR, 16),
+ CFG_GATE_SETCLR(GATE_STGENC, RCC_MP_APB5ENSETR, 20),
+ CFG_GATE_SETCLR(GATE_USART1, RCC_MP_APB6ENSETR, 0),
+ CFG_GATE_SETCLR(GATE_USART2, RCC_MP_APB6ENSETR, 1),
+ CFG_GATE_SETCLR(GATE_SPI4, RCC_MP_APB6ENSETR, 2),
+ CFG_GATE_SETCLR(GATE_SPI5, RCC_MP_APB6ENSETR, 3),
+ CFG_GATE_SETCLR(GATE_I2C3, RCC_MP_APB6ENSETR, 4),
+ CFG_GATE_SETCLR(GATE_I2C4, RCC_MP_APB6ENSETR, 5),
+ CFG_GATE_SETCLR(GATE_I2C5, RCC_MP_APB6ENSETR, 6),
+ CFG_GATE_SETCLR(GATE_TIM12, RCC_MP_APB6ENSETR, 7),
+ CFG_GATE_SETCLR(GATE_TIM13, RCC_MP_APB6ENSETR, 8),
+ CFG_GATE_SETCLR(GATE_TIM14, RCC_MP_APB6ENSETR, 9),
+ CFG_GATE_SETCLR(GATE_TIM15, RCC_MP_APB6ENSETR, 10),
+ CFG_GATE_SETCLR(GATE_TIM16, RCC_MP_APB6ENSETR, 11),
+ CFG_GATE_SETCLR(GATE_TIM17, RCC_MP_APB6ENSETR, 12),
+ CFG_GATE_SETCLR(GATE_DMA1, RCC_MP_AHB2ENSETR, 0),
+ CFG_GATE_SETCLR(GATE_DMA2, RCC_MP_AHB2ENSETR, 1),
+ CFG_GATE_SETCLR(GATE_DMAMUX1, RCC_MP_AHB2ENSETR, 2),
+ CFG_GATE_SETCLR(GATE_DMA3, RCC_MP_AHB2ENSETR, 3),
+ CFG_GATE_SETCLR(GATE_DMAMUX2, RCC_MP_AHB2ENSETR, 4),
+ CFG_GATE_SETCLR(GATE_ADC1, RCC_MP_AHB2ENSETR, 5),
+ CFG_GATE_SETCLR(GATE_ADC2, RCC_MP_AHB2ENSETR, 6),
+ CFG_GATE_SETCLR(GATE_USBO, RCC_MP_AHB2ENSETR, 8),
+ CFG_GATE_SETCLR(GATE_TSC, RCC_MP_AHB4ENSETR, 15),
+ CFG_GATE_SETCLR(GATE_GPIOA, RCC_MP_NS_AHB4ENSETR, 0),
+ CFG_GATE_SETCLR(GATE_GPIOB, RCC_MP_NS_AHB4ENSETR, 1),
+ CFG_GATE_SETCLR(GATE_GPIOC, RCC_MP_NS_AHB4ENSETR, 2),
+ CFG_GATE_SETCLR(GATE_GPIOD, RCC_MP_NS_AHB4ENSETR, 3),
+ CFG_GATE_SETCLR(GATE_GPIOE, RCC_MP_NS_AHB4ENSETR, 4),
+ CFG_GATE_SETCLR(GATE_GPIOF, RCC_MP_NS_AHB4ENSETR, 5),
+ CFG_GATE_SETCLR(GATE_GPIOG, RCC_MP_NS_AHB4ENSETR, 6),
+ CFG_GATE_SETCLR(GATE_GPIOH, RCC_MP_NS_AHB4ENSETR, 7),
+ CFG_GATE_SETCLR(GATE_GPIOI, RCC_MP_NS_AHB4ENSETR, 8),
+ CFG_GATE_SETCLR(GATE_PKA, RCC_MP_AHB5ENSETR, 2),
+ CFG_GATE_SETCLR(GATE_SAES, RCC_MP_AHB5ENSETR, 3),
+ CFG_GATE_SETCLR(GATE_CRYP1, RCC_MP_AHB5ENSETR, 4),
+ CFG_GATE_SETCLR(GATE_HASH1, RCC_MP_AHB5ENSETR, 5),
+ CFG_GATE_SETCLR(GATE_RNG1, RCC_MP_AHB5ENSETR, 6),
+ CFG_GATE_SETCLR(GATE_BKPSRAM, RCC_MP_AHB5ENSETR, 8),
+ CFG_GATE_SETCLR(GATE_AXIMC, RCC_MP_AHB5ENSETR, 16),
+ CFG_GATE_SETCLR(GATE_MCE, RCC_MP_AHB6ENSETR, 1),
+ CFG_GATE_SETCLR(GATE_ETH1CK, RCC_MP_AHB6ENSETR, 7),
+ CFG_GATE_SETCLR(GATE_ETH1TX, RCC_MP_AHB6ENSETR, 8),
+ CFG_GATE_SETCLR(GATE_ETH1RX, RCC_MP_AHB6ENSETR, 9),
+ CFG_GATE_SETCLR(GATE_ETH1MAC, RCC_MP_AHB6ENSETR, 10),
+ CFG_GATE_SETCLR(GATE_FMC, RCC_MP_AHB6ENSETR, 12),
+ CFG_GATE_SETCLR(GATE_QSPI, RCC_MP_AHB6ENSETR, 14),
+ CFG_GATE_SETCLR(GATE_SDMMC1, RCC_MP_AHB6ENSETR, 16),
+ CFG_GATE_SETCLR(GATE_SDMMC2, RCC_MP_AHB6ENSETR, 17),
+ CFG_GATE_SETCLR(GATE_CRC1, RCC_MP_AHB6ENSETR, 20),
+ CFG_GATE_SETCLR(GATE_USBH, RCC_MP_AHB6ENSETR, 24),
+ CFG_GATE_SETCLR(GATE_ETH2CK, RCC_MP_AHB6ENSETR, 27),
+ CFG_GATE_SETCLR(GATE_ETH2TX, RCC_MP_AHB6ENSETR, 28),
+ CFG_GATE_SETCLR(GATE_ETH2RX, RCC_MP_AHB6ENSETR, 29),
+ CFG_GATE_SETCLR(GATE_ETH2MAC, RCC_MP_AHB6ENSETR, 30),
+ CFG_GATE_SETCLR(GATE_ETH1STP, RCC_MP_AHB6LPENSETR, 11),
+ CFG_GATE_SETCLR(GATE_ETH2STP, RCC_MP_AHB6LPENSETR, 31),
+ CFG_GATE_SETCLR(GATE_MDMA, RCC_MP_NS_AHB6ENSETR, 0),
+};
+
+/* STM32 Divivers definition */
+enum enum_div_cfg {
+ DIV_RTC,
+ DIV_HSI,
+ DIV_MCO1,
+ DIV_MCO2,
+ DIV_TRACE,
+ DIV_ETH1PTP,
+ DIV_ETH2PTP,
+ DIV_NB
+};
+
+static const struct clk_div_table ck_trace_div_table[] = {
+ { 0, 1 }, { 1, 2 }, { 2, 4 }, { 3, 8 },
+ { 4, 16 }, { 5, 16 }, { 6, 16 }, { 7, 16 },
+ { 0 },
+};
+
+#define CFG_DIV(_id, _offset, _shift, _width, _flags, _table, _ready)\
+ [(_id)] = {\
+ .offset = (_offset),\
+ .shift = (_shift),\
+ .width = (_width),\
+ .flags = (_flags),\
+ .table = (_table),\
+ .ready = (_ready),\
+ }
+
+static const struct stm32_div_cfg stm32mp13_dividers[DIV_NB] = {
+ CFG_DIV(DIV_RTC, RCC_RTCDIVR, 0, 6, 0, NULL, DIV_NO_RDY),
+ CFG_DIV(DIV_MCO1, RCC_MCO1CFGR, 4, 4, 0, NULL, DIV_NO_RDY),
+ CFG_DIV(DIV_MCO2, RCC_MCO2CFGR, 4, 4, 0, NULL, DIV_NO_RDY),
+ CFG_DIV(DIV_TRACE, RCC_DBGCFGR, 0, 3, 0, ck_trace_div_table, DIV_NO_RDY),
+ CFG_DIV(DIV_ETH1PTP, RCC_ETH12CKSELR, 4, 4, 0, NULL, DIV_NO_RDY),
+ CFG_DIV(DIV_ETH2PTP, RCC_ETH12CKSELR, 12, 4, 0, NULL, DIV_NO_RDY),
+};
+
+/* STM32 Muxes definition */
+enum enum_mux_cfg {
+ MUX_ADC1,
+ MUX_ADC2,
+ MUX_DCMIPP,
+ MUX_ETH1,
+ MUX_ETH2,
+ MUX_FDCAN,
+ MUX_FMC,
+ MUX_I2C12,
+ MUX_I2C3,
+ MUX_I2C4,
+ MUX_I2C5,
+ MUX_LPTIM1,
+ MUX_LPTIM2,
+ MUX_LPTIM3,
+ MUX_LPTIM45,
+ MUX_MCO1,
+ MUX_MCO2,
+ MUX_QSPI,
+ MUX_RNG1,
+ MUX_SAES,
+ MUX_SAI1,
+ MUX_SAI2,
+ MUX_SDMMC1,
+ MUX_SDMMC2,
+ MUX_SPDIF,
+ MUX_SPI1,
+ MUX_SPI23,
+ MUX_SPI4,
+ MUX_SPI5,
+ MUX_STGEN,
+ MUX_UART1,
+ MUX_UART2,
+ MUX_UART4,
+ MUX_UART6,
+ MUX_UART35,
+ MUX_UART78,
+ MUX_USBO,
+ MUX_USBPHY,
+ MUX_NB
+};
+
+#define _CFG_MUX(_id, _offset, _shift, _witdh, _ready, _flags)\
+ [_id] = {\
+ .offset = (_offset),\
+ .shift = (_shift),\
+ .width = (_witdh),\
+ .ready = (_ready),\
+ .flags = (_flags),\
+ }
+
+#define CFG_MUX(_id, _offset, _shift, _witdh)\
+ _CFG_MUX(_id, _offset, _shift, _witdh, MUX_NO_RDY, 0)
+
+#define CFG_MUX_SAFE(_id, _offset, _shift, _witdh)\
+ _CFG_MUX(_id, _offset, _shift, _witdh, MUX_NO_RDY, MUX_SAFE)
+
+static const struct stm32_mux_cfg stm32mp13_muxes[] = {
+ CFG_MUX(MUX_I2C12, RCC_I2C12CKSELR, 0, 3),
+ CFG_MUX(MUX_LPTIM45, RCC_LPTIM45CKSELR, 0, 3),
+ CFG_MUX(MUX_SPI23, RCC_SPI2S23CKSELR, 0, 3),
+ CFG_MUX(MUX_UART35, RCC_UART35CKSELR, 0, 3),
+ CFG_MUX(MUX_UART78, RCC_UART78CKSELR, 0, 3),
+ CFG_MUX(MUX_ADC1, RCC_ADC12CKSELR, 0, 2),
+ CFG_MUX(MUX_ADC2, RCC_ADC12CKSELR, 2, 2),
+ CFG_MUX(MUX_DCMIPP, RCC_DCMIPPCKSELR, 0, 2),
+ CFG_MUX(MUX_ETH1, RCC_ETH12CKSELR, 0, 2),
+ CFG_MUX(MUX_ETH2, RCC_ETH12CKSELR, 8, 2),
+ CFG_MUX(MUX_FDCAN, RCC_FDCANCKSELR, 0, 2),
+ CFG_MUX(MUX_I2C3, RCC_I2C345CKSELR, 0, 3),
+ CFG_MUX(MUX_I2C4, RCC_I2C345CKSELR, 3, 3),
+ CFG_MUX(MUX_I2C5, RCC_I2C345CKSELR, 6, 3),
+ CFG_MUX(MUX_LPTIM1, RCC_LPTIM1CKSELR, 0, 3),
+ CFG_MUX(MUX_LPTIM2, RCC_LPTIM23CKSELR, 0, 3),
+ CFG_MUX(MUX_LPTIM3, RCC_LPTIM23CKSELR, 3, 3),
+ CFG_MUX(MUX_MCO1, RCC_MCO1CFGR, 0, 3),
+ CFG_MUX(MUX_MCO2, RCC_MCO2CFGR, 0, 3),
+ CFG_MUX(MUX_RNG1, RCC_RNG1CKSELR, 0, 2),
+ CFG_MUX(MUX_SAES, RCC_SAESCKSELR, 0, 2),
+ CFG_MUX(MUX_SAI1, RCC_SAI1CKSELR, 0, 3),
+ CFG_MUX(MUX_SAI2, RCC_SAI2CKSELR, 0, 3),
+ CFG_MUX(MUX_SPDIF, RCC_SPDIFCKSELR, 0, 2),
+ CFG_MUX(MUX_SPI1, RCC_SPI2S1CKSELR, 0, 3),
+ CFG_MUX(MUX_SPI4, RCC_SPI45CKSELR, 0, 3),
+ CFG_MUX(MUX_SPI5, RCC_SPI45CKSELR, 3, 3),
+ CFG_MUX(MUX_STGEN, RCC_STGENCKSELR, 0, 2),
+ CFG_MUX(MUX_UART1, RCC_UART12CKSELR, 0, 3),
+ CFG_MUX(MUX_UART2, RCC_UART12CKSELR, 3, 3),
+ CFG_MUX(MUX_UART4, RCC_UART4CKSELR, 0, 3),
+ CFG_MUX(MUX_UART6, RCC_UART6CKSELR, 0, 3),
+ CFG_MUX(MUX_USBO, RCC_USBCKSELR, 4, 1),
+ CFG_MUX(MUX_USBPHY, RCC_USBCKSELR, 0, 2),
+ CFG_MUX_SAFE(MUX_FMC, RCC_FMCCKSELR, 0, 2),
+ CFG_MUX_SAFE(MUX_QSPI, RCC_QSPICKSELR, 0, 2),
+ CFG_MUX_SAFE(MUX_SDMMC1, RCC_SDMMC12CKSELR, 0, 3),
+ CFG_MUX_SAFE(MUX_SDMMC2, RCC_SDMMC12CKSELR, 3, 3),
+};
+
+struct clk_stm32_securiy {
+ u32 offset;
+ u8 bit_idx;
+ unsigned long scmi_id;
+};
+
+enum security_clk {
+ SECF_NONE,
+ SECF_LPTIM2,
+ SECF_LPTIM3,
+ SECF_VREF,
+ SECF_DCMIPP,
+ SECF_USBPHY,
+ SECF_TZC,
+ SECF_ETZPC,
+ SECF_IWDG1,
+ SECF_BSEC,
+ SECF_STGENC,
+ SECF_STGENRO,
+ SECF_USART1,
+ SECF_USART2,
+ SECF_SPI4,
+ SECF_SPI5,
+ SECF_I2C3,
+ SECF_I2C4,
+ SECF_I2C5,
+ SECF_TIM12,
+ SECF_TIM13,
+ SECF_TIM14,
+ SECF_TIM15,
+ SECF_TIM16,
+ SECF_TIM17,
+ SECF_DMA3,
+ SECF_DMAMUX2,
+ SECF_ADC1,
+ SECF_ADC2,
+ SECF_USBO,
+ SECF_TSC,
+ SECF_PKA,
+ SECF_SAES,
+ SECF_CRYP1,
+ SECF_HASH1,
+ SECF_RNG1,
+ SECF_BKPSRAM,
+ SECF_MCE,
+ SECF_FMC,
+ SECF_QSPI,
+ SECF_SDMMC1,
+ SECF_SDMMC2,
+ SECF_ETH1CK,
+ SECF_ETH1TX,
+ SECF_ETH1RX,
+ SECF_ETH1MAC,
+ SECF_ETH1STP,
+ SECF_ETH2CK,
+ SECF_ETH2TX,
+ SECF_ETH2RX,
+ SECF_ETH2MAC,
+ SECF_ETH2STP,
+ SECF_MCO1,
+ SECF_MCO2
+};
+
+#define SECF(_sec_id, _offset, _bit_idx)[_sec_id] = {\
+ .offset = _offset,\
+ .bit_idx = _bit_idx,\
+ .scmi_id = -1,\
+}
+
+static const struct clk_stm32_securiy stm32mp13_security[] = {
+ SECF(SECF_LPTIM2, RCC_APB3SECSR, RCC_APB3SECSR_LPTIM2SECF),
+ SECF(SECF_LPTIM3, RCC_APB3SECSR, RCC_APB3SECSR_LPTIM3SECF),
+ SECF(SECF_VREF, RCC_APB3SECSR, RCC_APB3SECSR_VREFSECF),
+ SECF(SECF_DCMIPP, RCC_APB4SECSR, RCC_APB4SECSR_DCMIPPSECF),
+ SECF(SECF_USBPHY, RCC_APB4SECSR, RCC_APB4SECSR_USBPHYSECF),
+ SECF(SECF_TZC, RCC_APB5SECSR, RCC_APB5SECSR_TZCSECF),
+ SECF(SECF_ETZPC, RCC_APB5SECSR, RCC_APB5SECSR_ETZPCSECF),
+ SECF(SECF_IWDG1, RCC_APB5SECSR, RCC_APB5SECSR_IWDG1SECF),
+ SECF(SECF_BSEC, RCC_APB5SECSR, RCC_APB5SECSR_BSECSECF),
+ SECF(SECF_STGENC, RCC_APB5SECSR, RCC_APB5SECSR_STGENCSECF),
+ SECF(SECF_STGENRO, RCC_APB5SECSR, RCC_APB5SECSR_STGENROSECF),
+ SECF(SECF_USART1, RCC_APB6SECSR, RCC_APB6SECSR_USART1SECF),
+ SECF(SECF_USART2, RCC_APB6SECSR, RCC_APB6SECSR_USART2SECF),
+ SECF(SECF_SPI4, RCC_APB6SECSR, RCC_APB6SECSR_SPI4SECF),
+ SECF(SECF_SPI5, RCC_APB6SECSR, RCC_APB6SECSR_SPI5SECF),
+ SECF(SECF_I2C3, RCC_APB6SECSR, RCC_APB6SECSR_I2C3SECF),
+ SECF(SECF_I2C4, RCC_APB6SECSR, RCC_APB6SECSR_I2C4SECF),
+ SECF(SECF_I2C5, RCC_APB6SECSR, RCC_APB6SECSR_I2C5SECF),
+ SECF(SECF_TIM12, RCC_APB6SECSR, RCC_APB6SECSR_TIM12SECF),
+ SECF(SECF_TIM13, RCC_APB6SECSR, RCC_APB6SECSR_TIM13SECF),
+ SECF(SECF_TIM14, RCC_APB6SECSR, RCC_APB6SECSR_TIM14SECF),
+ SECF(SECF_TIM15, RCC_APB6SECSR, RCC_APB6SECSR_TIM15SECF),
+ SECF(SECF_TIM16, RCC_APB6SECSR, RCC_APB6SECSR_TIM16SECF),
+ SECF(SECF_TIM17, RCC_APB6SECSR, RCC_APB6SECSR_TIM17SECF),
+ SECF(SECF_DMA3, RCC_AHB2SECSR, RCC_AHB2SECSR_DMA3SECF),
+ SECF(SECF_DMAMUX2, RCC_AHB2SECSR, RCC_AHB2SECSR_DMAMUX2SECF),
+ SECF(SECF_ADC1, RCC_AHB2SECSR, RCC_AHB2SECSR_ADC1SECF),
+ SECF(SECF_ADC2, RCC_AHB2SECSR, RCC_AHB2SECSR_ADC2SECF),
+ SECF(SECF_USBO, RCC_AHB2SECSR, RCC_AHB2SECSR_USBOSECF),
+ SECF(SECF_TSC, RCC_AHB4SECSR, RCC_AHB4SECSR_TSCSECF),
+ SECF(SECF_PKA, RCC_AHB5SECSR, RCC_AHB5SECSR_PKASECF),
+ SECF(SECF_SAES, RCC_AHB5SECSR, RCC_AHB5SECSR_SAESSECF),
+ SECF(SECF_CRYP1, RCC_AHB5SECSR, RCC_AHB5SECSR_CRYP1SECF),
+ SECF(SECF_HASH1, RCC_AHB5SECSR, RCC_AHB5SECSR_HASH1SECF),
+ SECF(SECF_RNG1, RCC_AHB5SECSR, RCC_AHB5SECSR_RNG1SECF),
+ SECF(SECF_BKPSRAM, RCC_AHB5SECSR, RCC_AHB5SECSR_BKPSRAMSECF),
+ SECF(SECF_MCE, RCC_AHB6SECSR, RCC_AHB6SECSR_MCESECF),
+ SECF(SECF_FMC, RCC_AHB6SECSR, RCC_AHB6SECSR_FMCSECF),
+ SECF(SECF_QSPI, RCC_AHB6SECSR, RCC_AHB6SECSR_QSPISECF),
+ SECF(SECF_SDMMC1, RCC_AHB6SECSR, RCC_AHB6SECSR_SDMMC1SECF),
+ SECF(SECF_SDMMC2, RCC_AHB6SECSR, RCC_AHB6SECSR_SDMMC2SECF),
+ SECF(SECF_ETH1CK, RCC_AHB6SECSR, RCC_AHB6SECSR_ETH1CKSECF),
+ SECF(SECF_ETH1TX, RCC_AHB6SECSR, RCC_AHB6SECSR_ETH1TXSECF),
+ SECF(SECF_ETH1RX, RCC_AHB6SECSR, RCC_AHB6SECSR_ETH1RXSECF),
+ SECF(SECF_ETH1MAC, RCC_AHB6SECSR, RCC_AHB6SECSR_ETH1MACSECF),
+ SECF(SECF_ETH1STP, RCC_AHB6SECSR, RCC_AHB6SECSR_ETH1STPSECF),
+ SECF(SECF_ETH2CK, RCC_AHB6SECSR, RCC_AHB6SECSR_ETH2CKSECF),
+ SECF(SECF_ETH2TX, RCC_AHB6SECSR, RCC_AHB6SECSR_ETH2TXSECF),
+ SECF(SECF_ETH2RX, RCC_AHB6SECSR, RCC_AHB6SECSR_ETH2RXSECF),
+ SECF(SECF_ETH2MAC, RCC_AHB6SECSR, RCC_AHB6SECSR_ETH2MACSECF),
+ SECF(SECF_ETH2STP, RCC_AHB6SECSR, RCC_AHB6SECSR_ETH2STPSECF),
+ SECF(SECF_MCO1, RCC_SECCFGR, RCC_SECCFGR_MCO1SEC),
+ SECF(SECF_MCO2, RCC_SECCFGR, RCC_SECCFGR_MCO2SEC),
+};
+
+static const char * const adc12_src[] = {
+ "pll4_r", "ck_per", "pll3_q"
+};
+
+static const char * const dcmipp_src[] = {
+ "ck_axi", "pll2_q", "pll4_p", "ck_per",
+};
+
+static const char * const eth12_src[] = {
+ "pll4_p", "pll3_q"
+};
+
+static const char * const fdcan_src[] = {
+ "ck_hse", "pll3_q", "pll4_q", "pll4_r"
+};
+
+static const char * const fmc_src[] = {
+ "ck_axi", "pll3_r", "pll4_p", "ck_per"
+};
+
+static const char * const i2c12_src[] = {
+ "pclk1", "pll4_r", "ck_hsi", "ck_csi"
+};
+
+static const char * const i2c345_src[] = {
+ "pclk6", "pll4_r", "ck_hsi", "ck_csi"
+};
+
+static const char * const lptim1_src[] = {
+ "pclk1", "pll4_p", "pll3_q", "ck_lse", "ck_lsi", "ck_per"
+};
+
+static const char * const lptim23_src[] = {
+ "pclk3", "pll4_q", "ck_per", "ck_lse", "ck_lsi"
+};
+
+static const char * const lptim45_src[] = {
+ "pclk3", "pll4_p", "pll3_q", "ck_lse", "ck_lsi", "ck_per"
+};
+
+static const char * const mco1_src[] = {
+ "ck_hsi", "ck_hse", "ck_csi", "ck_lsi", "ck_lse"
+};
+
+static const char * const mco2_src[] = {
+ "ck_mpu", "ck_axi", "ck_mlahb", "pll4_p", "ck_hse", "ck_hsi"
+};
+
+static const char * const qspi_src[] = {
+ "ck_axi", "pll3_r", "pll4_p", "ck_per"
+};
+
+static const char * const rng1_src[] = {
+ "ck_csi", "pll4_r", "ck_lse", "ck_lsi"
+};
+
+static const char * const saes_src[] = {
+ "ck_axi", "ck_per", "pll4_r", "ck_lsi"
+};
+
+static const char * const sai1_src[] = {
+ "pll4_q", "pll3_q", "i2s_ckin", "ck_per", "pll3_r"
+};
+
+static const char * const sai2_src[] = {
+ "pll4_q", "pll3_q", "i2s_ckin", "ck_per", "spdif_ck_symb", "pll3_r"
+};
+
+static const char * const sdmmc12_src[] = {
+ "ck_axi", "pll3_r", "pll4_p", "ck_hsi"
+};
+
+static const char * const spdif_src[] = {
+ "pll4_p", "pll3_q", "ck_hsi"
+};
+
+static const char * const spi123_src[] = {
+ "pll4_p", "pll3_q", "i2s_ckin", "ck_per", "pll3_r"
+};
+
+static const char * const spi4_src[] = {
+ "pclk6", "pll4_q", "ck_hsi", "ck_csi", "ck_hse", "i2s_ckin"
+};
+
+static const char * const spi5_src[] = {
+ "pclk6", "pll4_q", "ck_hsi", "ck_csi", "ck_hse"
+};
+
+static const char * const stgen_src[] = {
+ "ck_hsi", "ck_hse"
+};
+
+static const char * const usart12_src[] = {
+ "pclk6", "pll3_q", "ck_hsi", "ck_csi", "pll4_q", "ck_hse"
+};
+
+static const char * const usart34578_src[] = {
+ "pclk1", "pll4_q", "ck_hsi", "ck_csi", "ck_hse"
+};
+
+static const char * const usart6_src[] = {
+ "pclk2", "pll4_q", "ck_hsi", "ck_csi", "ck_hse"
+};
+
+static const char * const usbo_src[] = {
+ "pll4_r", "ck_usbo_48m"
+};
+
+static const char * const usbphy_src[] = {
+ "ck_hse", "pll4_r", "clk-hse-div2"
+};
+
+/* Timer clocks */
+static struct clk_stm32_gate tim2_k = {
+ .gate_id = GATE_TIM2,
+ .hw.init = CLK_HW_INIT("tim2_k", "timg1_ck", &clk_stm32_gate_ops, CLK_SET_RATE_PARENT),
+};
+
+static struct clk_stm32_gate tim3_k = {
+ .gate_id = GATE_TIM3,
+ .hw.init = CLK_HW_INIT("tim3_k", "timg1_ck", &clk_stm32_gate_ops, CLK_SET_RATE_PARENT),
+};
+
+static struct clk_stm32_gate tim4_k = {
+ .gate_id = GATE_TIM4,
+ .hw.init = CLK_HW_INIT("tim4_k", "timg1_ck", &clk_stm32_gate_ops, CLK_SET_RATE_PARENT),
+};
+
+static struct clk_stm32_gate tim5_k = {
+ .gate_id = GATE_TIM5,
+ .hw.init = CLK_HW_INIT("tim5_k", "timg1_ck", &clk_stm32_gate_ops, CLK_SET_RATE_PARENT),
+};
+
+static struct clk_stm32_gate tim6_k = {
+ .gate_id = GATE_TIM6,
+ .hw.init = CLK_HW_INIT("tim6_k", "timg1_ck", &clk_stm32_gate_ops, CLK_SET_RATE_PARENT),
+};
+
+static struct clk_stm32_gate tim7_k = {
+ .gate_id = GATE_TIM7,
+ .hw.init = CLK_HW_INIT("tim7_k", "timg1_ck", &clk_stm32_gate_ops, CLK_SET_RATE_PARENT),
+};
+
+static struct clk_stm32_gate tim1_k = {
+ .gate_id = GATE_TIM1,
+ .hw.init = CLK_HW_INIT("tim1_k", "timg2_ck", &clk_stm32_gate_ops, CLK_SET_RATE_PARENT),
+};
+
+static struct clk_stm32_gate tim8_k = {
+ .gate_id = GATE_TIM8,
+ .hw.init = CLK_HW_INIT("tim8_k", "timg2_ck", &clk_stm32_gate_ops, CLK_SET_RATE_PARENT),
+};
+
+static struct clk_stm32_gate tim12_k = {
+ .gate_id = GATE_TIM12,
+ .hw.init = CLK_HW_INIT("tim12_k", "timg3_ck", &clk_stm32_gate_ops, CLK_SET_RATE_PARENT),
+};
+
+static struct clk_stm32_gate tim13_k = {
+ .gate_id = GATE_TIM13,
+ .hw.init = CLK_HW_INIT("tim13_k", "timg3_ck", &clk_stm32_gate_ops, CLK_SET_RATE_PARENT),
+};
+
+static struct clk_stm32_gate tim14_k = {
+ .gate_id = GATE_TIM14,
+ .hw.init = CLK_HW_INIT("tim14_k", "timg3_ck", &clk_stm32_gate_ops, CLK_SET_RATE_PARENT),
+};
+
+static struct clk_stm32_gate tim15_k = {
+ .gate_id = GATE_TIM15,
+ .hw.init = CLK_HW_INIT("tim15_k", "timg3_ck", &clk_stm32_gate_ops, CLK_SET_RATE_PARENT),
+};
+
+static struct clk_stm32_gate tim16_k = {
+ .gate_id = GATE_TIM16,
+ .hw.init = CLK_HW_INIT("tim16_k", "timg3_ck", &clk_stm32_gate_ops, CLK_SET_RATE_PARENT),
+};
+
+static struct clk_stm32_gate tim17_k = {
+ .gate_id = GATE_TIM17,
+ .hw.init = CLK_HW_INIT("tim17_k", "timg3_ck", &clk_stm32_gate_ops, CLK_SET_RATE_PARENT),
+};
+
+/* Peripheral clocks */
+static struct clk_stm32_gate sai1 = {
+ .gate_id = GATE_SAI1,
+ .hw.init = CLK_HW_INIT("sai1", "pclk2", &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate sai2 = {
+ .gate_id = GATE_SAI2,
+ .hw.init = CLK_HW_INIT("sai2", "pclk2", &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate syscfg = {
+ .gate_id = GATE_SYSCFG,
+ .hw.init = CLK_HW_INIT("syscfg", "pclk3", &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate vref = {
+ .gate_id = GATE_VREF,
+ .hw.init = CLK_HW_INIT("vref", "pclk3", &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate dts = {
+ .gate_id = GATE_DTS,
+ .hw.init = CLK_HW_INIT("dts", "pclk3", &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate pmbctrl = {
+ .gate_id = GATE_PMBCTRL,
+ .hw.init = CLK_HW_INIT("pmbctrl", "pclk3", &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate hdp = {
+ .gate_id = GATE_HDP,
+ .hw.init = CLK_HW_INIT("hdp", "pclk3", &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate iwdg2 = {
+ .gate_id = GATE_IWDG2APB,
+ .hw.init = CLK_HW_INIT("iwdg2", "pclk4", &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate stgenro = {
+ .gate_id = GATE_STGENRO,
+ .hw.init = CLK_HW_INIT("stgenro", "pclk4", &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate gpioa = {
+ .gate_id = GATE_GPIOA,
+ .hw.init = CLK_HW_INIT("gpioa", "pclk4", &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate gpiob = {
+ .gate_id = GATE_GPIOB,
+ .hw.init = CLK_HW_INIT("gpiob", "pclk4", &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate gpioc = {
+ .gate_id = GATE_GPIOC,
+ .hw.init = CLK_HW_INIT("gpioc", "pclk4", &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate gpiod = {
+ .gate_id = GATE_GPIOD,
+ .hw.init = CLK_HW_INIT("gpiod", "pclk4", &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate gpioe = {
+ .gate_id = GATE_GPIOE,
+ .hw.init = CLK_HW_INIT("gpioe", "pclk4", &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate gpiof = {
+ .gate_id = GATE_GPIOF,
+ .hw.init = CLK_HW_INIT("gpiof", "pclk4", &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate gpiog = {
+ .gate_id = GATE_GPIOG,
+ .hw.init = CLK_HW_INIT("gpiog", "pclk4", &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate gpioh = {
+ .gate_id = GATE_GPIOH,
+ .hw.init = CLK_HW_INIT("gpioh", "pclk4", &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate gpioi = {
+ .gate_id = GATE_GPIOI,
+ .hw.init = CLK_HW_INIT("gpioi", "pclk4", &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate tsc = {
+ .gate_id = GATE_TSC,
+ .hw.init = CLK_HW_INIT("tsc", "pclk4", &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ddrperfm = {
+ .gate_id = GATE_DDRPERFM,
+ .hw.init = CLK_HW_INIT("ddrperfm", "pclk4", &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate tzpc = {
+ .gate_id = GATE_TZC,
+ .hw.init = CLK_HW_INIT("tzpc", "pclk5", &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate iwdg1 = {
+ .gate_id = GATE_IWDG1APB,
+ .hw.init = CLK_HW_INIT("iwdg1", "pclk5", &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate bsec = {
+ .gate_id = GATE_BSEC,
+ .hw.init = CLK_HW_INIT("bsec", "pclk5", &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate dma1 = {
+ .gate_id = GATE_DMA1,
+ .hw.init = CLK_HW_INIT("dma1", "ck_mlahb", &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate dma2 = {
+ .gate_id = GATE_DMA2,
+ .hw.init = CLK_HW_INIT("dma2", "ck_mlahb", &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate dmamux1 = {
+ .gate_id = GATE_DMAMUX1,
+ .hw.init = CLK_HW_INIT("dmamux1", "ck_mlahb", &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate dma3 = {
+ .gate_id = GATE_DMA3,
+ .hw.init = CLK_HW_INIT("dma3", "ck_mlahb", &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate dmamux2 = {
+ .gate_id = GATE_DMAMUX2,
+ .hw.init = CLK_HW_INIT("dmamux2", "ck_mlahb", &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate adc1 = {
+ .gate_id = GATE_ADC1,
+ .hw.init = CLK_HW_INIT("adc1", "ck_mlahb", &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate adc2 = {
+ .gate_id = GATE_ADC2,
+ .hw.init = CLK_HW_INIT("adc2", "ck_mlahb", &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate pka = {
+ .gate_id = GATE_PKA,
+ .hw.init = CLK_HW_INIT("pka", "ck_axi", &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate cryp1 = {
+ .gate_id = GATE_CRYP1,
+ .hw.init = CLK_HW_INIT("cryp1", "ck_axi", &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate hash1 = {
+ .gate_id = GATE_HASH1,
+ .hw.init = CLK_HW_INIT("hash1", "ck_axi", &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate bkpsram = {
+ .gate_id = GATE_BKPSRAM,
+ .hw.init = CLK_HW_INIT("bkpsram", "ck_axi", &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate mdma = {
+ .gate_id = GATE_MDMA,
+ .hw.init = CLK_HW_INIT("mdma", "ck_axi", &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate eth1tx = {
+ .gate_id = GATE_ETH1TX,
+ .hw.init = CLK_HW_INIT("eth1tx", "ck_axi", &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate eth1rx = {
+ .gate_id = GATE_ETH1RX,
+ .hw.init = CLK_HW_INIT("eth1rx", "ck_axi", &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate eth1mac = {
+ .gate_id = GATE_ETH1MAC,
+ .hw.init = CLK_HW_INIT("eth1mac", "ck_axi", &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate eth2tx = {
+ .gate_id = GATE_ETH2TX,
+ .hw.init = CLK_HW_INIT("eth2tx", "ck_axi", &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate eth2rx = {
+ .gate_id = GATE_ETH2RX,
+ .hw.init = CLK_HW_INIT("eth2rx", "ck_axi", &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate eth2mac = {
+ .gate_id = GATE_ETH2MAC,
+ .hw.init = CLK_HW_INIT("eth2mac", "ck_axi", &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate crc1 = {
+ .gate_id = GATE_CRC1,
+ .hw.init = CLK_HW_INIT("crc1", "ck_axi", &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate usbh = {
+ .gate_id = GATE_USBH,
+ .hw.init = CLK_HW_INIT("usbh", "ck_axi", &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate eth1stp = {
+ .gate_id = GATE_ETH1STP,
+ .hw.init = CLK_HW_INIT("eth1stp", "ck_axi", &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate eth2stp = {
+ .gate_id = GATE_ETH2STP,
+ .hw.init = CLK_HW_INIT("eth2stp", "ck_axi", &clk_stm32_gate_ops, 0),
+};
+
+/* Kernel clocks */
+static struct clk_stm32_composite sdmmc1_k = {
+ .gate_id = GATE_SDMMC1,
+ .mux_id = MUX_SDMMC1,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS("sdmmc1_k", sdmmc12_src, &clk_stm32_composite_ops,
+ CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT),
+};
+
+static struct clk_stm32_composite sdmmc2_k = {
+ .gate_id = GATE_SDMMC2,
+ .mux_id = MUX_SDMMC2,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS("sdmmc2_k", sdmmc12_src, &clk_stm32_composite_ops,
+ CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT),
+};
+
+static struct clk_stm32_composite fmc_k = {
+ .gate_id = GATE_FMC,
+ .mux_id = MUX_FMC,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS("fmc_k", fmc_src, &clk_stm32_composite_ops,
+ CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT),
+};
+
+static struct clk_stm32_composite qspi_k = {
+ .gate_id = GATE_QSPI,
+ .mux_id = MUX_QSPI,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS("qspi_k", qspi_src, &clk_stm32_composite_ops,
+ CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT),
+};
+
+static struct clk_stm32_composite spi2_k = {
+ .gate_id = GATE_SPI2,
+ .mux_id = MUX_SPI23,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS("spi2_k", spi123_src, &clk_stm32_composite_ops,
+ CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT),
+};
+
+static struct clk_stm32_composite spi3_k = {
+ .gate_id = GATE_SPI3,
+ .mux_id = MUX_SPI23,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS("spi3_k", spi123_src, &clk_stm32_composite_ops,
+ CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT),
+};
+
+static struct clk_stm32_composite i2c1_k = {
+ .gate_id = GATE_I2C1,
+ .mux_id = MUX_I2C12,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS("i2c1_k", i2c12_src, &clk_stm32_composite_ops,
+ CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT),
+};
+
+static struct clk_stm32_composite i2c2_k = {
+ .gate_id = GATE_I2C2,
+ .mux_id = MUX_I2C12,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS("i2c2_k", i2c12_src, &clk_stm32_composite_ops,
+ CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT),
+};
+
+static struct clk_stm32_composite lptim4_k = {
+ .gate_id = GATE_LPTIM4,
+ .mux_id = MUX_LPTIM45,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS("lptim4_k", lptim45_src, &clk_stm32_composite_ops,
+ CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT),
+};
+
+static struct clk_stm32_composite lptim5_k = {
+ .gate_id = GATE_LPTIM5,
+ .mux_id = MUX_LPTIM45,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS("lptim5_k", lptim45_src, &clk_stm32_composite_ops,
+ CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT),
+};
+
+static struct clk_stm32_composite usart3_k = {
+ .gate_id = GATE_USART3,
+ .mux_id = MUX_UART35,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS("usart3_k", usart34578_src, &clk_stm32_composite_ops,
+ CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT),
+};
+
+static struct clk_stm32_composite uart5_k = {
+ .gate_id = GATE_UART5,
+ .mux_id = MUX_UART35,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS("uart5_k", usart34578_src, &clk_stm32_composite_ops,
+ CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT),
+};
+
+static struct clk_stm32_composite uart7_k = {
+ .gate_id = GATE_UART7,
+ .mux_id = MUX_UART78,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS("uart7_k", usart34578_src, &clk_stm32_composite_ops,
+ CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT),
+};
+
+static struct clk_stm32_composite uart8_k = {
+ .gate_id = GATE_UART8,
+ .mux_id = MUX_UART78,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS("uart8_k", usart34578_src, &clk_stm32_composite_ops,
+ CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT),
+};
+
+static struct clk_stm32_composite sai1_k = {
+ .gate_id = GATE_SAI1,
+ .mux_id = MUX_SAI1,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS("sai1_k", sai1_src, &clk_stm32_composite_ops,
+ CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT),
+};
+
+static struct clk_stm32_composite adfsdm_k = {
+ .gate_id = GATE_ADFSDM,
+ .mux_id = MUX_SAI1,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS("adfsdm_k", sai1_src, &clk_stm32_composite_ops,
+ CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT),
+};
+
+static struct clk_stm32_composite sai2_k = {
+ .gate_id = GATE_SAI2,
+ .mux_id = MUX_SAI2,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS("sai2_k", sai2_src, &clk_stm32_composite_ops,
+ CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT),
+};
+
+static struct clk_stm32_composite adc1_k = {
+ .gate_id = GATE_ADC1,
+ .mux_id = MUX_ADC1,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS("adc1_k", adc12_src, &clk_stm32_composite_ops,
+ CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT),
+};
+
+static struct clk_stm32_composite adc2_k = {
+ .gate_id = GATE_ADC2,
+ .mux_id = MUX_ADC2,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS("adc2_k", adc12_src, &clk_stm32_composite_ops,
+ CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT),
+};
+
+static struct clk_stm32_composite rng1_k = {
+ .gate_id = GATE_RNG1,
+ .mux_id = MUX_RNG1,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS("rng1_k", rng1_src, &clk_stm32_composite_ops,
+ CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT),
+};
+
+static struct clk_stm32_composite usbphy_k = {
+ .gate_id = GATE_USBPHY,
+ .mux_id = MUX_USBPHY,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS("usbphy_k", usbphy_src, &clk_stm32_composite_ops,
+ CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT),
+};
+
+static struct clk_stm32_composite stgen_k = {
+ .gate_id = GATE_STGENC,
+ .mux_id = MUX_STGEN,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS("stgen_k", stgen_src, &clk_stm32_composite_ops,
+ CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT),
+};
+
+static struct clk_stm32_composite spdif_k = {
+ .gate_id = GATE_SPDIF,
+ .mux_id = MUX_SPDIF,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS("spdif_k", spdif_src, &clk_stm32_composite_ops,
+ CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT),
+};
+
+static struct clk_stm32_composite spi1_k = {
+ .gate_id = GATE_SPI1,
+ .mux_id = MUX_SPI1,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS("spi1_k", spi123_src, &clk_stm32_composite_ops,
+ CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT),
+};
+
+static struct clk_stm32_composite spi4_k = {
+ .gate_id = GATE_SPI4,
+ .mux_id = MUX_SPI4,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS("spi4_k", spi4_src, &clk_stm32_composite_ops,
+ CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT),
+};
+
+static struct clk_stm32_composite spi5_k = {
+ .gate_id = GATE_SPI5,
+ .mux_id = MUX_SPI5,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS("spi5_k", spi5_src, &clk_stm32_composite_ops,
+ CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT),
+};
+
+static struct clk_stm32_composite i2c3_k = {
+ .gate_id = GATE_I2C3,
+ .mux_id = MUX_I2C3,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS("i2c3_k", i2c345_src, &clk_stm32_composite_ops,
+ CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT),
+};
+
+static struct clk_stm32_composite i2c4_k = {
+ .gate_id = GATE_I2C4,
+ .mux_id = MUX_I2C4,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS("i2c4_k", i2c345_src, &clk_stm32_composite_ops,
+ CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT),
+};
+
+static struct clk_stm32_composite i2c5_k = {
+ .gate_id = GATE_I2C5,
+ .mux_id = MUX_I2C5,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS("i2c5_k", i2c345_src, &clk_stm32_composite_ops,
+ CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT),
+};
+
+static struct clk_stm32_composite lptim1_k = {
+ .gate_id = GATE_LPTIM1,
+ .mux_id = MUX_LPTIM1,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS("lptim1_k", lptim1_src, &clk_stm32_composite_ops,
+ CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT),
+};
+
+static struct clk_stm32_composite lptim2_k = {
+ .gate_id = GATE_LPTIM2,
+ .mux_id = MUX_LPTIM2,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS("lptim2_k", lptim23_src, &clk_stm32_composite_ops,
+ CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT),
+};
+
+static struct clk_stm32_composite lptim3_k = {
+ .gate_id = GATE_LPTIM3,
+ .mux_id = MUX_LPTIM3,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS("lptim3_k", lptim23_src, &clk_stm32_composite_ops,
+ CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT),
+};
+
+static struct clk_stm32_composite usart1_k = {
+ .gate_id = GATE_USART1,
+ .mux_id = MUX_UART1,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS("usart1_k", usart12_src, &clk_stm32_composite_ops,
+ CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT),
+};
+
+static struct clk_stm32_composite usart2_k = {
+ .gate_id = GATE_USART2,
+ .mux_id = MUX_UART2,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS("usart2_k", usart12_src, &clk_stm32_composite_ops,
+ CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT),
+};
+
+static struct clk_stm32_composite uart4_k = {
+ .gate_id = GATE_UART4,
+ .mux_id = MUX_UART4,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS("uart4_k", usart34578_src, &clk_stm32_composite_ops,
+ CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT),
+};
+
+static struct clk_stm32_composite uart6_k = {
+ .gate_id = GATE_USART6,
+ .mux_id = MUX_UART6,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS("uart6_k", usart6_src, &clk_stm32_composite_ops,
+ CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT),
+};
+
+static struct clk_stm32_composite fdcan_k = {
+ .gate_id = GATE_FDCAN,
+ .mux_id = MUX_FDCAN,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS("fdcan_k", fdcan_src, &clk_stm32_composite_ops,
+ CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT),
+};
+
+static struct clk_stm32_composite dcmipp_k = {
+ .gate_id = GATE_DCMIPP,
+ .mux_id = MUX_DCMIPP,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS("dcmipp_k", dcmipp_src, &clk_stm32_composite_ops,
+ CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT),
+};
+
+static struct clk_stm32_composite usbo_k = {
+ .gate_id = GATE_USBO,
+ .mux_id = MUX_USBO,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS("usbo_k", usbo_src, &clk_stm32_composite_ops,
+ CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT),
+};
+
+static struct clk_stm32_composite saes_k = {
+ .gate_id = GATE_SAES,
+ .mux_id = MUX_SAES,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS("saes_k", saes_src, &clk_stm32_composite_ops,
+ CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT),
+};
+
+static struct clk_stm32_gate dfsdm_k = {
+ .gate_id = GATE_DFSDM,
+ .hw.init = CLK_HW_INIT("dfsdm_k", "ck_mlahb", &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ltdc_px = {
+ .gate_id = GATE_LTDC,
+ .hw.init = CLK_HW_INIT("ltdc_px", "pll4_q", &clk_stm32_gate_ops, CLK_SET_RATE_PARENT),
+};
+
+static struct clk_stm32_mux ck_ker_eth1 = {
+ .mux_id = MUX_ETH1,
+ .hw.init = CLK_HW_INIT_PARENTS("ck_ker_eth1", eth12_src, &clk_stm32_mux_ops,
+ CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT),
+};
+
+static struct clk_stm32_gate eth1ck_k = {
+ .gate_id = GATE_ETH1CK,
+ .hw.init = CLK_HW_INIT_HW("eth1ck_k", &ck_ker_eth1.hw, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_div eth1ptp_k = {
+ .div_id = DIV_ETH1PTP,
+ .hw.init = CLK_HW_INIT_HW("eth1ptp_k", &ck_ker_eth1.hw, &clk_stm32_divider_ops,
+ CLK_SET_RATE_NO_REPARENT),
+};
+
+static struct clk_stm32_mux ck_ker_eth2 = {
+ .mux_id = MUX_ETH2,
+ .hw.init = CLK_HW_INIT_PARENTS("ck_ker_eth2", eth12_src, &clk_stm32_mux_ops,
+ CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT),
+};
+
+static struct clk_stm32_gate eth2ck_k = {
+ .gate_id = GATE_ETH2CK,
+ .hw.init = CLK_HW_INIT_HW("eth2ck_k", &ck_ker_eth2.hw, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_div eth2ptp_k = {
+ .div_id = DIV_ETH2PTP,
+ .hw.init = CLK_HW_INIT_HW("eth2ptp_k", &ck_ker_eth2.hw, &clk_stm32_divider_ops,
+ CLK_SET_RATE_NO_REPARENT),
+};
+
+static struct clk_stm32_composite ck_mco1 = {
+ .gate_id = GATE_MCO1,
+ .mux_id = MUX_MCO1,
+ .div_id = DIV_MCO1,
+ .hw.init = CLK_HW_INIT_PARENTS("ck_mco1", mco1_src, &clk_stm32_composite_ops,
+ CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT |
+ CLK_IGNORE_UNUSED),
+};
+
+static struct clk_stm32_composite ck_mco2 = {
+ .gate_id = GATE_MCO2,
+ .mux_id = MUX_MCO2,
+ .div_id = DIV_MCO2,
+ .hw.init = CLK_HW_INIT_PARENTS("ck_mco2", mco2_src, &clk_stm32_composite_ops,
+ CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT |
+ CLK_IGNORE_UNUSED),
+};
+
+/* Debug clocks */
+static struct clk_stm32_gate ck_sys_dbg = {
+ .gate_id = GATE_DBGCK,
+ .hw.init = CLK_HW_INIT("ck_sys_dbg", "ck_axi", &clk_stm32_gate_ops, CLK_IS_CRITICAL),
+};
+
+static struct clk_stm32_composite ck_trace = {
+ .gate_id = GATE_TRACECK,
+ .mux_id = NO_STM32_MUX,
+ .div_id = DIV_TRACE,
+ .hw.init = CLK_HW_INIT("ck_trace", "ck_axi", &clk_stm32_composite_ops, CLK_IGNORE_UNUSED),
+};
+
+static const struct clock_config stm32mp13_clock_cfg[] = {
+ /* Timer clocks */
+ STM32_GATE_CFG(TIM2_K, tim2_k, SECF_NONE),
+ STM32_GATE_CFG(TIM3_K, tim3_k, SECF_NONE),
+ STM32_GATE_CFG(TIM4_K, tim4_k, SECF_NONE),
+ STM32_GATE_CFG(TIM5_K, tim5_k, SECF_NONE),
+ STM32_GATE_CFG(TIM6_K, tim6_k, SECF_NONE),
+ STM32_GATE_CFG(TIM7_K, tim7_k, SECF_NONE),
+ STM32_GATE_CFG(TIM1_K, tim1_k, SECF_NONE),
+ STM32_GATE_CFG(TIM8_K, tim8_k, SECF_NONE),
+ STM32_GATE_CFG(TIM12_K, tim12_k, SECF_TIM12),
+ STM32_GATE_CFG(TIM13_K, tim13_k, SECF_TIM13),
+ STM32_GATE_CFG(TIM14_K, tim14_k, SECF_TIM14),
+ STM32_GATE_CFG(TIM15_K, tim15_k, SECF_TIM15),
+ STM32_GATE_CFG(TIM16_K, tim16_k, SECF_TIM16),
+ STM32_GATE_CFG(TIM17_K, tim17_k, SECF_TIM17),
+
+ /* Peripheral clocks */
+ STM32_GATE_CFG(SAI1, sai1, SECF_NONE),
+ STM32_GATE_CFG(SAI2, sai2, SECF_NONE),
+ STM32_GATE_CFG(SYSCFG, syscfg, SECF_NONE),
+ STM32_GATE_CFG(VREF, vref, SECF_VREF),
+ STM32_GATE_CFG(DTS, dts, SECF_NONE),
+ STM32_GATE_CFG(PMBCTRL, pmbctrl, SECF_NONE),
+ STM32_GATE_CFG(HDP, hdp, SECF_NONE),
+ STM32_GATE_CFG(IWDG2, iwdg2, SECF_NONE),
+ STM32_GATE_CFG(STGENRO, stgenro, SECF_STGENRO),
+ STM32_GATE_CFG(TZPC, tzpc, SECF_TZC),
+ STM32_GATE_CFG(IWDG1, iwdg1, SECF_IWDG1),
+ STM32_GATE_CFG(BSEC, bsec, SECF_BSEC),
+ STM32_GATE_CFG(DMA1, dma1, SECF_NONE),
+ STM32_GATE_CFG(DMA2, dma2, SECF_NONE),
+ STM32_GATE_CFG(DMAMUX1, dmamux1, SECF_NONE),
+ STM32_GATE_CFG(DMA3, dma3, SECF_DMA3),
+ STM32_GATE_CFG(DMAMUX2, dmamux2, SECF_DMAMUX2),
+ STM32_GATE_CFG(ADC1, adc1, SECF_ADC1),
+ STM32_GATE_CFG(ADC2, adc2, SECF_ADC2),
+ STM32_GATE_CFG(GPIOA, gpioa, SECF_NONE),
+ STM32_GATE_CFG(GPIOB, gpiob, SECF_NONE),
+ STM32_GATE_CFG(GPIOC, gpioc, SECF_NONE),
+ STM32_GATE_CFG(GPIOD, gpiod, SECF_NONE),
+ STM32_GATE_CFG(GPIOE, gpioe, SECF_NONE),
+ STM32_GATE_CFG(GPIOF, gpiof, SECF_NONE),
+ STM32_GATE_CFG(GPIOG, gpiog, SECF_NONE),
+ STM32_GATE_CFG(GPIOH, gpioh, SECF_NONE),
+ STM32_GATE_CFG(GPIOI, gpioi, SECF_NONE),
+ STM32_GATE_CFG(TSC, tsc, SECF_TZC),
+ STM32_GATE_CFG(PKA, pka, SECF_PKA),
+ STM32_GATE_CFG(CRYP1, cryp1, SECF_CRYP1),
+ STM32_GATE_CFG(HASH1, hash1, SECF_HASH1),
+ STM32_GATE_CFG(BKPSRAM, bkpsram, SECF_BKPSRAM),
+ STM32_GATE_CFG(MDMA, mdma, SECF_NONE),
+ STM32_GATE_CFG(ETH1TX, eth1tx, SECF_ETH1TX),
+ STM32_GATE_CFG(ETH1RX, eth1rx, SECF_ETH1RX),
+ STM32_GATE_CFG(ETH1MAC, eth1mac, SECF_ETH1MAC),
+ STM32_GATE_CFG(ETH2TX, eth2tx, SECF_ETH2TX),
+ STM32_GATE_CFG(ETH2RX, eth2rx, SECF_ETH2RX),
+ STM32_GATE_CFG(ETH2MAC, eth2mac, SECF_ETH2MAC),
+ STM32_GATE_CFG(CRC1, crc1, SECF_NONE),
+ STM32_GATE_CFG(USBH, usbh, SECF_NONE),
+ STM32_GATE_CFG(DDRPERFM, ddrperfm, SECF_NONE),
+ STM32_GATE_CFG(ETH1STP, eth1stp, SECF_ETH1STP),
+ STM32_GATE_CFG(ETH2STP, eth2stp, SECF_ETH2STP),
+
+ /* Kernel clocks */
+ STM32_COMPOSITE_CFG(SDMMC1_K, sdmmc1_k, SECF_SDMMC1),
+ STM32_COMPOSITE_CFG(SDMMC2_K, sdmmc2_k, SECF_SDMMC2),
+ STM32_COMPOSITE_CFG(FMC_K, fmc_k, SECF_FMC),
+ STM32_COMPOSITE_CFG(QSPI_K, qspi_k, SECF_QSPI),
+ STM32_COMPOSITE_CFG(SPI2_K, spi2_k, SECF_NONE),
+ STM32_COMPOSITE_CFG(SPI3_K, spi3_k, SECF_NONE),
+ STM32_COMPOSITE_CFG(I2C1_K, i2c1_k, SECF_NONE),
+ STM32_COMPOSITE_CFG(I2C2_K, i2c2_k, SECF_NONE),
+ STM32_COMPOSITE_CFG(LPTIM4_K, lptim4_k, SECF_NONE),
+ STM32_COMPOSITE_CFG(LPTIM5_K, lptim5_k, SECF_NONE),
+ STM32_COMPOSITE_CFG(USART3_K, usart3_k, SECF_NONE),
+ STM32_COMPOSITE_CFG(UART5_K, uart5_k, SECF_NONE),
+ STM32_COMPOSITE_CFG(UART7_K, uart7_k, SECF_NONE),
+ STM32_COMPOSITE_CFG(UART8_K, uart8_k, SECF_NONE),
+ STM32_COMPOSITE_CFG(SAI1_K, sai1_k, SECF_NONE),
+ STM32_COMPOSITE_CFG(SAI2_K, sai2_k, SECF_NONE),
+ STM32_COMPOSITE_CFG(ADFSDM_K, adfsdm_k, SECF_NONE),
+ STM32_COMPOSITE_CFG(ADC1_K, adc1_k, SECF_ADC1),
+ STM32_COMPOSITE_CFG(ADC2_K, adc2_k, SECF_ADC2),
+ STM32_COMPOSITE_CFG(RNG1_K, rng1_k, SECF_RNG1),
+ STM32_COMPOSITE_CFG(USBPHY_K, usbphy_k, SECF_USBPHY),
+ STM32_COMPOSITE_CFG(STGEN_K, stgen_k, SECF_STGENC),
+ STM32_COMPOSITE_CFG(SPDIF_K, spdif_k, SECF_NONE),
+ STM32_COMPOSITE_CFG(SPI1_K, spi1_k, SECF_NONE),
+ STM32_COMPOSITE_CFG(SPI4_K, spi4_k, SECF_SPI4),
+ STM32_COMPOSITE_CFG(SPI5_K, spi5_k, SECF_SPI5),
+ STM32_COMPOSITE_CFG(I2C3_K, i2c3_k, SECF_I2C3),
+ STM32_COMPOSITE_CFG(I2C4_K, i2c4_k, SECF_I2C4),
+ STM32_COMPOSITE_CFG(I2C5_K, i2c5_k, SECF_I2C5),
+ STM32_COMPOSITE_CFG(LPTIM1_K, lptim1_k, SECF_NONE),
+ STM32_COMPOSITE_CFG(LPTIM2_K, lptim2_k, SECF_LPTIM2),
+ STM32_COMPOSITE_CFG(LPTIM3_K, lptim3_k, SECF_LPTIM3),
+ STM32_COMPOSITE_CFG(USART1_K, usart1_k, SECF_USART1),
+ STM32_COMPOSITE_CFG(USART2_K, usart2_k, SECF_USART2),
+ STM32_COMPOSITE_CFG(UART4_K, uart4_k, SECF_NONE),
+ STM32_COMPOSITE_CFG(USART6_K, uart6_k, SECF_NONE),
+ STM32_COMPOSITE_CFG(FDCAN_K, fdcan_k, SECF_NONE),
+ STM32_COMPOSITE_CFG(DCMIPP_K, dcmipp_k, SECF_DCMIPP),
+ STM32_COMPOSITE_CFG(USBO_K, usbo_k, SECF_USBO),
+ STM32_COMPOSITE_CFG(SAES_K, saes_k, SECF_SAES),
+ STM32_GATE_CFG(DFSDM_K, dfsdm_k, SECF_NONE),
+ STM32_GATE_CFG(LTDC_PX, ltdc_px, SECF_NONE),
+
+ STM32_MUX_CFG(NO_ID, ck_ker_eth1, SECF_ETH1CK),
+ STM32_GATE_CFG(ETH1CK_K, eth1ck_k, SECF_ETH1CK),
+ STM32_DIV_CFG(ETH1PTP_K, eth1ptp_k, SECF_ETH1CK),
+
+ STM32_MUX_CFG(NO_ID, ck_ker_eth2, SECF_ETH2CK),
+ STM32_GATE_CFG(ETH2CK_K, eth2ck_k, SECF_ETH2CK),
+ STM32_DIV_CFG(ETH2PTP_K, eth2ptp_k, SECF_ETH2CK),
+
+ STM32_GATE_CFG(CK_DBG, ck_sys_dbg, SECF_NONE),
+ STM32_COMPOSITE_CFG(CK_TRACE, ck_trace, SECF_NONE),
+
+ STM32_COMPOSITE_CFG(CK_MCO1, ck_mco1, SECF_MCO1),
+ STM32_COMPOSITE_CFG(CK_MCO2, ck_mco2, SECF_MCO2),
+};
+
+static int stm32mp13_clock_is_provided_by_secure(void __iomem *base,
+ const struct clock_config *cfg)
+{
+ int sec_id = cfg->sec_id;
+
+ if (sec_id != SECF_NONE) {
+ const struct clk_stm32_securiy *secf;
+
+ secf = &stm32mp13_security[sec_id];
+
+ return !!(readl(base + secf->offset) & BIT(secf->bit_idx));
+ }
+
+ return 0;
+}
+
+struct multi_mux {
+ struct clk_hw *hw1;
+ struct clk_hw *hw2;
+};
+
+static struct multi_mux *stm32_mp13_multi_mux[MUX_NB] = {
+ [MUX_SPI23] = &(struct multi_mux){ &spi2_k.hw, &spi3_k.hw },
+ [MUX_I2C12] = &(struct multi_mux){ &i2c1_k.hw, &i2c2_k.hw },
+ [MUX_LPTIM45] = &(struct multi_mux){ &lptim4_k.hw, &lptim5_k.hw },
+ [MUX_UART35] = &(struct multi_mux){ &usart3_k.hw, &uart5_k.hw },
+ [MUX_UART78] = &(struct multi_mux){ &uart7_k.hw, &uart8_k.hw },
+ [MUX_SAI1] = &(struct multi_mux){ &sai1_k.hw, &adfsdm_k.hw },
+};
+
+static struct clk_hw *stm32mp13_is_multi_mux(struct clk_hw *hw)
+{
+ struct clk_stm32_composite *composite = to_clk_stm32_composite(hw);
+ struct multi_mux *mmux = stm32_mp13_multi_mux[composite->mux_id];
+
+ if (mmux) {
+ if (!(mmux->hw1 == hw))
+ return mmux->hw1;
+ else
+ return mmux->hw2;
+ }
+
+ return NULL;
+}
+
+static u16 stm32mp13_cpt_gate[GATE_NB];
+
+static struct clk_stm32_clock_data stm32mp13_clock_data = {
+ .gate_cpt = stm32mp13_cpt_gate,
+ .gates = stm32mp13_gates,
+ .muxes = stm32mp13_muxes,
+ .dividers = stm32mp13_dividers,
+ .is_multi_mux = stm32mp13_is_multi_mux,
+};
+
+static const struct stm32_rcc_match_data stm32mp13_data = {
+ .tab_clocks = stm32mp13_clock_cfg,
+ .num_clocks = ARRAY_SIZE(stm32mp13_clock_cfg),
+ .clock_data = &stm32mp13_clock_data,
+ .check_security = &stm32mp13_clock_is_provided_by_secure,
+ .maxbinding = STM32MP1_LAST_CLK,
+ .clear_offset = RCC_CLR_OFFSET,
+};
+
+static const struct of_device_id stm32mp13_match_data[] = {
+ {
+ .compatible = "st,stm32mp13-rcc",
+ .data = &stm32mp13_data,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, stm32mp13_match_data);
+
+static int stm32mp1_rcc_init(struct device *dev)
+{
+ void __iomem *rcc_base;
+ int ret = -ENOMEM;
+
+ rcc_base = of_iomap(dev_of_node(dev), 0);
+ if (!rcc_base) {
+ dev_err(dev, "%pOFn: unable to map resource", dev_of_node(dev));
+ goto out;
+ }
+
+ ret = stm32_rcc_init(dev, stm32mp13_match_data, rcc_base);
+out:
+ if (ret) {
+ if (rcc_base)
+ iounmap(rcc_base);
+
+ of_node_put(dev_of_node(dev));
+ }
+
+ return ret;
+}
+
+static int get_clock_deps(struct device *dev)
+{
+ static const char * const clock_deps_name[] = {
+ "hsi", "hse", "csi", "lsi", "lse",
+ };
+ size_t deps_size = sizeof(struct clk *) * ARRAY_SIZE(clock_deps_name);
+ struct clk **clk_deps;
+ int i;
+
+ clk_deps = devm_kzalloc(dev, deps_size, GFP_KERNEL);
+ if (!clk_deps)
+ return -ENOMEM;
+
+ for (i = 0; i < ARRAY_SIZE(clock_deps_name); i++) {
+ struct clk *clk = of_clk_get_by_name(dev_of_node(dev),
+ clock_deps_name[i]);
+
+ if (IS_ERR(clk)) {
+ if (PTR_ERR(clk) != -EINVAL && PTR_ERR(clk) != -ENOENT)
+ return PTR_ERR(clk);
+ } else {
+ /* Device gets a reference count on the clock */
+ clk_deps[i] = devm_clk_get(dev, __clk_get_name(clk));
+ clk_put(clk);
+ }
+ }
+
+ return 0;
+}
+
+static int stm32mp1_rcc_clocks_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ int ret = get_clock_deps(dev);
+
+ if (!ret)
+ ret = stm32mp1_rcc_init(dev);
+
+ return ret;
+}
+
+static int stm32mp1_rcc_clocks_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *child, *np = dev_of_node(dev);
+
+ for_each_available_child_of_node(np, child)
+ of_clk_del_provider(child);
+
+ return 0;
+}
+
+static struct platform_driver stm32mp13_rcc_clocks_driver = {
+ .driver = {
+ .name = "stm32mp13_rcc",
+ .of_match_table = stm32mp13_match_data,
+ },
+ .probe = stm32mp1_rcc_clocks_probe,
+ .remove = stm32mp1_rcc_clocks_remove,
+};
+
+static int __init stm32mp13_clocks_init(void)
+{
+ return platform_driver_register(&stm32mp13_rcc_clocks_driver);
+}
+core_initcall(stm32mp13_clocks_init);
diff --git a/drivers/clk/stm32/reset-stm32.c b/drivers/clk/stm32/reset-stm32.c
new file mode 100644
index 000000000000..040870130e4b
--- /dev/null
+++ b/drivers/clk/stm32/reset-stm32.c
@@ -0,0 +1,122 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) STMicroelectronics 2022 - All Rights Reserved
+ * Author: Gabriel Fernandez <gabriel.fernandez@foss.st.com> for STMicroelectronics.
+ */
+
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "clk-stm32-core.h"
+
+#define STM32_RESET_ID_MASK GENMASK(15, 0)
+
+struct stm32_reset_data {
+ /* reset lock */
+ spinlock_t lock;
+ struct reset_controller_dev rcdev;
+ void __iomem *membase;
+ u32 clear_offset;
+};
+
+static inline struct stm32_reset_data *
+to_stm32_reset_data(struct reset_controller_dev *rcdev)
+{
+ return container_of(rcdev, struct stm32_reset_data, rcdev);
+}
+
+static int stm32_reset_update(struct reset_controller_dev *rcdev,
+ unsigned long id, bool assert)
+{
+ struct stm32_reset_data *data = to_stm32_reset_data(rcdev);
+ int reg_width = sizeof(u32);
+ int bank = id / (reg_width * BITS_PER_BYTE);
+ int offset = id % (reg_width * BITS_PER_BYTE);
+
+ if (data->clear_offset) {
+ void __iomem *addr;
+
+ addr = data->membase + (bank * reg_width);
+ if (!assert)
+ addr += data->clear_offset;
+
+ writel(BIT(offset), addr);
+
+ } else {
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&data->lock, flags);
+
+ reg = readl(data->membase + (bank * reg_width));
+
+ if (assert)
+ reg |= BIT(offset);
+ else
+ reg &= ~BIT(offset);
+
+ writel(reg, data->membase + (bank * reg_width));
+
+ spin_unlock_irqrestore(&data->lock, flags);
+ }
+
+ return 0;
+}
+
+static int stm32_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return stm32_reset_update(rcdev, id, true);
+}
+
+static int stm32_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return stm32_reset_update(rcdev, id, false);
+}
+
+static int stm32_reset_status(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct stm32_reset_data *data = to_stm32_reset_data(rcdev);
+ int reg_width = sizeof(u32);
+ int bank = id / (reg_width * BITS_PER_BYTE);
+ int offset = id % (reg_width * BITS_PER_BYTE);
+ u32 reg;
+
+ reg = readl(data->membase + (bank * reg_width));
+
+ return !!(reg & BIT(offset));
+}
+
+static const struct reset_control_ops stm32_reset_ops = {
+ .assert = stm32_reset_assert,
+ .deassert = stm32_reset_deassert,
+ .status = stm32_reset_status,
+};
+
+int stm32_rcc_reset_init(struct device *dev, const struct of_device_id *match,
+ void __iomem *base)
+{
+ const struct stm32_rcc_match_data *data = match->data;
+ struct stm32_reset_data *reset_data = NULL;
+
+ data = match->data;
+
+ reset_data = kzalloc(sizeof(*reset_data), GFP_KERNEL);
+ if (!reset_data)
+ return -ENOMEM;
+
+ reset_data->membase = base;
+ reset_data->rcdev.owner = THIS_MODULE;
+ reset_data->rcdev.ops = &stm32_reset_ops;
+ reset_data->rcdev.of_node = dev_of_node(dev);
+ reset_data->rcdev.nr_resets = STM32_RESET_ID_MASK;
+ reset_data->clear_offset = data->clear_offset;
+
+ return reset_controller_register(&reset_data->rcdev);
+}
diff --git a/drivers/clk/stm32/reset-stm32.h b/drivers/clk/stm32/reset-stm32.h
new file mode 100644
index 000000000000..6eb6ea4b55ab
--- /dev/null
+++ b/drivers/clk/stm32/reset-stm32.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) STMicroelectronics 2022 - All Rights Reserved
+ * Author: Gabriel Fernandez <gabriel.fernandez@foss.st.com> for STMicroelectronics.
+ */
+
+int stm32_rcc_reset_init(struct device *dev, const struct of_device_id *match,
+ void __iomem *base);
diff --git a/drivers/clk/stm32/stm32mp13_rcc.h b/drivers/clk/stm32/stm32mp13_rcc.h
new file mode 100644
index 000000000000..a82512ae08f2
--- /dev/null
+++ b/drivers/clk/stm32/stm32mp13_rcc.h
@@ -0,0 +1,1748 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
+/*
+ * Copyright (C) 2020, STMicroelectronics - All Rights Reserved
+ *
+ * Configuration settings for the STM32MP13x CPU
+ */
+
+#ifndef STM32MP13_RCC_H
+#define STM32MP13_RCC_H
+/* RCC registers */
+#define RCC_SECCFGR 0x0
+#define RCC_MP_SREQSETR 0x100
+#define RCC_MP_SREQCLRR 0x104
+#define RCC_MP_APRSTCR 0x108
+#define RCC_MP_APRSTSR 0x10c
+#define RCC_PWRLPDLYCR 0x110
+#define RCC_MP_GRSTCSETR 0x114
+#define RCC_BR_RSTSCLRR 0x118
+#define RCC_MP_RSTSSETR 0x11c
+#define RCC_MP_RSTSCLRR 0x120
+#define RCC_MP_IWDGFZSETR 0x124
+#define RCC_MP_IWDGFZCLRR 0x128
+#define RCC_MP_CIER 0x200
+#define RCC_MP_CIFR 0x204
+#define RCC_BDCR 0x400
+#define RCC_RDLSICR 0x404
+#define RCC_OCENSETR 0x420
+#define RCC_OCENCLRR 0x424
+#define RCC_OCRDYR 0x428
+#define RCC_HSICFGR 0x440
+#define RCC_CSICFGR 0x444
+#define RCC_MCO1CFGR 0x460
+#define RCC_MCO2CFGR 0x464
+#define RCC_DBGCFGR 0x468
+#define RCC_RCK12SELR 0x480
+#define RCC_RCK3SELR 0x484
+#define RCC_RCK4SELR 0x488
+#define RCC_PLL1CR 0x4a0
+#define RCC_PLL1CFGR1 0x4a4
+#define RCC_PLL1CFGR2 0x4a8
+#define RCC_PLL1FRACR 0x4ac
+#define RCC_PLL1CSGR 0x4b0
+#define RCC_PLL2CR 0x4d0
+#define RCC_PLL2CFGR1 0x4d4
+#define RCC_PLL2CFGR2 0x4d8
+#define RCC_PLL2FRACR 0x4dc
+#define RCC_PLL2CSGR 0x4e0
+#define RCC_PLL3CR 0x500
+#define RCC_PLL3CFGR1 0x504
+#define RCC_PLL3CFGR2 0x508
+#define RCC_PLL3FRACR 0x50c
+#define RCC_PLL3CSGR 0x510
+#define RCC_PLL4CR 0x520
+#define RCC_PLL4CFGR1 0x524
+#define RCC_PLL4CFGR2 0x528
+#define RCC_PLL4FRACR 0x52c
+#define RCC_PLL4CSGR 0x530
+#define RCC_MPCKSELR 0x540
+#define RCC_ASSCKSELR 0x544
+#define RCC_MSSCKSELR 0x548
+#define RCC_CPERCKSELR 0x54c
+#define RCC_RTCDIVR 0x560
+#define RCC_MPCKDIVR 0x564
+#define RCC_AXIDIVR 0x568
+#define RCC_MLAHBDIVR 0x56c
+#define RCC_APB1DIVR 0x570
+#define RCC_APB2DIVR 0x574
+#define RCC_APB3DIVR 0x578
+#define RCC_APB4DIVR 0x57c
+#define RCC_APB5DIVR 0x580
+#define RCC_APB6DIVR 0x584
+#define RCC_TIMG1PRER 0x5a0
+#define RCC_TIMG2PRER 0x5a4
+#define RCC_TIMG3PRER 0x5a8
+#define RCC_DDRITFCR 0x5c0
+#define RCC_I2C12CKSELR 0x600
+#define RCC_I2C345CKSELR 0x604
+#define RCC_SPI2S1CKSELR 0x608
+#define RCC_SPI2S23CKSELR 0x60c
+#define RCC_SPI45CKSELR 0x610
+#define RCC_UART12CKSELR 0x614
+#define RCC_UART35CKSELR 0x618
+#define RCC_UART4CKSELR 0x61c
+#define RCC_UART6CKSELR 0x620
+#define RCC_UART78CKSELR 0x624
+#define RCC_LPTIM1CKSELR 0x628
+#define RCC_LPTIM23CKSELR 0x62c
+#define RCC_LPTIM45CKSELR 0x630
+#define RCC_SAI1CKSELR 0x634
+#define RCC_SAI2CKSELR 0x638
+#define RCC_FDCANCKSELR 0x63c
+#define RCC_SPDIFCKSELR 0x640
+#define RCC_ADC12CKSELR 0x644
+#define RCC_SDMMC12CKSELR 0x648
+#define RCC_ETH12CKSELR 0x64c
+#define RCC_USBCKSELR 0x650
+#define RCC_QSPICKSELR 0x654
+#define RCC_FMCCKSELR 0x658
+#define RCC_RNG1CKSELR 0x65c
+#define RCC_STGENCKSELR 0x660
+#define RCC_DCMIPPCKSELR 0x664
+#define RCC_SAESCKSELR 0x668
+#define RCC_APB1RSTSETR 0x6a0
+#define RCC_APB1RSTCLRR 0x6a4
+#define RCC_APB2RSTSETR 0x6a8
+#define RCC_APB2RSTCLRR 0x6ac
+#define RCC_APB3RSTSETR 0x6b0
+#define RCC_APB3RSTCLRR 0x6b4
+#define RCC_APB4RSTSETR 0x6b8
+#define RCC_APB4RSTCLRR 0x6bc
+#define RCC_APB5RSTSETR 0x6c0
+#define RCC_APB5RSTCLRR 0x6c4
+#define RCC_APB6RSTSETR 0x6c8
+#define RCC_APB6RSTCLRR 0x6cc
+#define RCC_AHB2RSTSETR 0x6d0
+#define RCC_AHB2RSTCLRR 0x6d4
+#define RCC_AHB4RSTSETR 0x6e0
+#define RCC_AHB4RSTCLRR 0x6e4
+#define RCC_AHB5RSTSETR 0x6e8
+#define RCC_AHB5RSTCLRR 0x6ec
+#define RCC_AHB6RSTSETR 0x6f0
+#define RCC_AHB6RSTCLRR 0x6f4
+#define RCC_MP_APB1ENSETR 0x700
+#define RCC_MP_APB1ENCLRR 0x704
+#define RCC_MP_APB2ENSETR 0x708
+#define RCC_MP_APB2ENCLRR 0x70c
+#define RCC_MP_APB3ENSETR 0x710
+#define RCC_MP_APB3ENCLRR 0x714
+#define RCC_MP_S_APB3ENSETR 0x718
+#define RCC_MP_S_APB3ENCLRR 0x71c
+#define RCC_MP_NS_APB3ENSETR 0x720
+#define RCC_MP_NS_APB3ENCLRR 0x724
+#define RCC_MP_APB4ENSETR 0x728
+#define RCC_MP_APB4ENCLRR 0x72c
+#define RCC_MP_S_APB4ENSETR 0x730
+#define RCC_MP_S_APB4ENCLRR 0x734
+#define RCC_MP_NS_APB4ENSETR 0x738
+#define RCC_MP_NS_APB4ENCLRR 0x73c
+#define RCC_MP_APB5ENSETR 0x740
+#define RCC_MP_APB5ENCLRR 0x744
+#define RCC_MP_APB6ENSETR 0x748
+#define RCC_MP_APB6ENCLRR 0x74c
+#define RCC_MP_AHB2ENSETR 0x750
+#define RCC_MP_AHB2ENCLRR 0x754
+#define RCC_MP_AHB4ENSETR 0x760
+#define RCC_MP_AHB4ENCLRR 0x764
+#define RCC_MP_S_AHB4ENSETR 0x768
+#define RCC_MP_S_AHB4ENCLRR 0x76c
+#define RCC_MP_NS_AHB4ENSETR 0x770
+#define RCC_MP_NS_AHB4ENCLRR 0x774
+#define RCC_MP_AHB5ENSETR 0x778
+#define RCC_MP_AHB5ENCLRR 0x77c
+#define RCC_MP_AHB6ENSETR 0x780
+#define RCC_MP_AHB6ENCLRR 0x784
+#define RCC_MP_S_AHB6ENSETR 0x788
+#define RCC_MP_S_AHB6ENCLRR 0x78c
+#define RCC_MP_NS_AHB6ENSETR 0x790
+#define RCC_MP_NS_AHB6ENCLRR 0x794
+#define RCC_MP_APB1LPENSETR 0x800
+#define RCC_MP_APB1LPENCLRR 0x804
+#define RCC_MP_APB2LPENSETR 0x808
+#define RCC_MP_APB2LPENCLRR 0x80c
+#define RCC_MP_APB3LPENSETR 0x810
+#define RCC_MP_APB3LPENCLRR 0x814
+#define RCC_MP_S_APB3LPENSETR 0x818
+#define RCC_MP_S_APB3LPENCLRR 0x81c
+#define RCC_MP_NS_APB3LPENSETR 0x820
+#define RCC_MP_NS_APB3LPENCLRR 0x824
+#define RCC_MP_APB4LPENSETR 0x828
+#define RCC_MP_APB4LPENCLRR 0x82c
+#define RCC_MP_S_APB4LPENSETR 0x830
+#define RCC_MP_S_APB4LPENCLRR 0x834
+#define RCC_MP_NS_APB4LPENSETR 0x838
+#define RCC_MP_NS_APB4LPENCLRR 0x83c
+#define RCC_MP_APB5LPENSETR 0x840
+#define RCC_MP_APB5LPENCLRR 0x844
+#define RCC_MP_APB6LPENSETR 0x848
+#define RCC_MP_APB6LPENCLRR 0x84c
+#define RCC_MP_AHB2LPENSETR 0x850
+#define RCC_MP_AHB2LPENCLRR 0x854
+#define RCC_MP_AHB4LPENSETR 0x858
+#define RCC_MP_AHB4LPENCLRR 0x85c
+#define RCC_MP_S_AHB4LPENSETR 0x868
+#define RCC_MP_S_AHB4LPENCLRR 0x86c
+#define RCC_MP_NS_AHB4LPENSETR 0x870
+#define RCC_MP_NS_AHB4LPENCLRR 0x874
+#define RCC_MP_AHB5LPENSETR 0x878
+#define RCC_MP_AHB5LPENCLRR 0x87c
+#define RCC_MP_AHB6LPENSETR 0x880
+#define RCC_MP_AHB6LPENCLRR 0x884
+#define RCC_MP_S_AHB6LPENSETR 0x888
+#define RCC_MP_S_AHB6LPENCLRR 0x88c
+#define RCC_MP_NS_AHB6LPENSETR 0x890
+#define RCC_MP_NS_AHB6LPENCLRR 0x894
+#define RCC_MP_S_AXIMLPENSETR 0x898
+#define RCC_MP_S_AXIMLPENCLRR 0x89c
+#define RCC_MP_NS_AXIMLPENSETR 0x8a0
+#define RCC_MP_NS_AXIMLPENCLRR 0x8a4
+#define RCC_MP_MLAHBLPENSETR 0x8a8
+#define RCC_MP_MLAHBLPENCLRR 0x8ac
+#define RCC_APB3SECSR 0x8c0
+#define RCC_APB4SECSR 0x8c4
+#define RCC_APB5SECSR 0x8c8
+#define RCC_APB6SECSR 0x8cc
+#define RCC_AHB2SECSR 0x8d0
+#define RCC_AHB4SECSR 0x8d4
+#define RCC_AHB5SECSR 0x8d8
+#define RCC_AHB6SECSR 0x8dc
+#define RCC_VERR 0xff4
+#define RCC_IDR 0xff8
+#define RCC_SIDR 0xffc
+
+/* RCC_SECCFGR register fields */
+#define RCC_SECCFGR_HSISEC 0
+#define RCC_SECCFGR_CSISEC 1
+#define RCC_SECCFGR_HSESEC 2
+#define RCC_SECCFGR_LSISEC 3
+#define RCC_SECCFGR_LSESEC 4
+#define RCC_SECCFGR_PLL12SEC 8
+#define RCC_SECCFGR_PLL3SEC 9
+#define RCC_SECCFGR_PLL4SEC 10
+#define RCC_SECCFGR_MPUSEC 11
+#define RCC_SECCFGR_AXISEC 12
+#define RCC_SECCFGR_MLAHBSEC 13
+#define RCC_SECCFGR_APB3DIVSEC 16
+#define RCC_SECCFGR_APB4DIVSEC 17
+#define RCC_SECCFGR_APB5DIVSEC 18
+#define RCC_SECCFGR_APB6DIVSEC 19
+#define RCC_SECCFGR_TIMG3SEC 20
+#define RCC_SECCFGR_CPERSEC 21
+#define RCC_SECCFGR_MCO1SEC 22
+#define RCC_SECCFGR_MCO2SEC 23
+#define RCC_SECCFGR_STPSEC 24
+#define RCC_SECCFGR_RSTSEC 25
+#define RCC_SECCFGR_PWRSEC 31
+
+/* RCC_MP_SREQSETR register fields */
+#define RCC_MP_SREQSETR_STPREQ_P0 BIT(0)
+
+/* RCC_MP_SREQCLRR register fields */
+#define RCC_MP_SREQCLRR_STPREQ_P0 BIT(0)
+
+/* RCC_MP_APRSTCR register fields */
+#define RCC_MP_APRSTCR_RDCTLEN BIT(0)
+#define RCC_MP_APRSTCR_RSTTO_MASK GENMASK(14, 8)
+#define RCC_MP_APRSTCR_RSTTO_SHIFT 8
+
+/* RCC_MP_APRSTSR register fields */
+#define RCC_MP_APRSTSR_RSTTOV_MASK GENMASK(14, 8)
+#define RCC_MP_APRSTSR_RSTTOV_SHIFT 8
+
+/* RCC_PWRLPDLYCR register fields */
+#define RCC_PWRLPDLYCR_PWRLP_DLY_MASK GENMASK(21, 0)
+#define RCC_PWRLPDLYCR_PWRLP_DLY_SHIFT 0
+
+/* RCC_MP_GRSTCSETR register fields */
+#define RCC_MP_GRSTCSETR_MPSYSRST BIT(0)
+#define RCC_MP_GRSTCSETR_MPUP0RST BIT(4)
+
+/* RCC_BR_RSTSCLRR register fields */
+#define RCC_BR_RSTSCLRR_PORRSTF BIT(0)
+#define RCC_BR_RSTSCLRR_BORRSTF BIT(1)
+#define RCC_BR_RSTSCLRR_PADRSTF BIT(2)
+#define RCC_BR_RSTSCLRR_HCSSRSTF BIT(3)
+#define RCC_BR_RSTSCLRR_VCORERSTF BIT(4)
+#define RCC_BR_RSTSCLRR_VCPURSTF BIT(5)
+#define RCC_BR_RSTSCLRR_MPSYSRSTF BIT(6)
+#define RCC_BR_RSTSCLRR_IWDG1RSTF BIT(8)
+#define RCC_BR_RSTSCLRR_IWDG2RSTF BIT(9)
+#define RCC_BR_RSTSCLRR_MPUP0RSTF BIT(13)
+
+/* RCC_MP_RSTSSETR register fields */
+#define RCC_MP_RSTSSETR_PORRSTF BIT(0)
+#define RCC_MP_RSTSSETR_BORRSTF BIT(1)
+#define RCC_MP_RSTSSETR_PADRSTF BIT(2)
+#define RCC_MP_RSTSSETR_HCSSRSTF BIT(3)
+#define RCC_MP_RSTSSETR_VCORERSTF BIT(4)
+#define RCC_MP_RSTSSETR_VCPURSTF BIT(5)
+#define RCC_MP_RSTSSETR_MPSYSRSTF BIT(6)
+#define RCC_MP_RSTSSETR_IWDG1RSTF BIT(8)
+#define RCC_MP_RSTSSETR_IWDG2RSTF BIT(9)
+#define RCC_MP_RSTSSETR_STP2RSTF BIT(10)
+#define RCC_MP_RSTSSETR_STDBYRSTF BIT(11)
+#define RCC_MP_RSTSSETR_CSTDBYRSTF BIT(12)
+#define RCC_MP_RSTSSETR_MPUP0RSTF BIT(13)
+#define RCC_MP_RSTSSETR_SPARE BIT(15)
+
+/* RCC_MP_RSTSCLRR register fields */
+#define RCC_MP_RSTSCLRR_PORRSTF BIT(0)
+#define RCC_MP_RSTSCLRR_BORRSTF BIT(1)
+#define RCC_MP_RSTSCLRR_PADRSTF BIT(2)
+#define RCC_MP_RSTSCLRR_HCSSRSTF BIT(3)
+#define RCC_MP_RSTSCLRR_VCORERSTF BIT(4)
+#define RCC_MP_RSTSCLRR_VCPURSTF BIT(5)
+#define RCC_MP_RSTSCLRR_MPSYSRSTF BIT(6)
+#define RCC_MP_RSTSCLRR_IWDG1RSTF BIT(8)
+#define RCC_MP_RSTSCLRR_IWDG2RSTF BIT(9)
+#define RCC_MP_RSTSCLRR_STP2RSTF BIT(10)
+#define RCC_MP_RSTSCLRR_STDBYRSTF BIT(11)
+#define RCC_MP_RSTSCLRR_CSTDBYRSTF BIT(12)
+#define RCC_MP_RSTSCLRR_MPUP0RSTF BIT(13)
+#define RCC_MP_RSTSCLRR_SPARE BIT(15)
+
+/* RCC_MP_IWDGFZSETR register fields */
+#define RCC_MP_IWDGFZSETR_FZ_IWDG1 BIT(0)
+#define RCC_MP_IWDGFZSETR_FZ_IWDG2 BIT(1)
+
+/* RCC_MP_IWDGFZCLRR register fields */
+#define RCC_MP_IWDGFZCLRR_FZ_IWDG1 BIT(0)
+#define RCC_MP_IWDGFZCLRR_FZ_IWDG2 BIT(1)
+
+/* RCC_MP_CIER register fields */
+#define RCC_MP_CIER_LSIRDYIE BIT(0)
+#define RCC_MP_CIER_LSERDYIE BIT(1)
+#define RCC_MP_CIER_HSIRDYIE BIT(2)
+#define RCC_MP_CIER_HSERDYIE BIT(3)
+#define RCC_MP_CIER_CSIRDYIE BIT(4)
+#define RCC_MP_CIER_PLL1DYIE BIT(8)
+#define RCC_MP_CIER_PLL2DYIE BIT(9)
+#define RCC_MP_CIER_PLL3DYIE BIT(10)
+#define RCC_MP_CIER_PLL4DYIE BIT(11)
+#define RCC_MP_CIER_LSECSSIE BIT(16)
+#define RCC_MP_CIER_WKUPIE BIT(20)
+
+/* RCC_MP_CIFR register fields */
+#define RCC_MP_CIFR_LSIRDYF BIT(0)
+#define RCC_MP_CIFR_LSERDYF BIT(1)
+#define RCC_MP_CIFR_HSIRDYF BIT(2)
+#define RCC_MP_CIFR_HSERDYF BIT(3)
+#define RCC_MP_CIFR_CSIRDYF BIT(4)
+#define RCC_MP_CIFR_PLL1DYF BIT(8)
+#define RCC_MP_CIFR_PLL2DYF BIT(9)
+#define RCC_MP_CIFR_PLL3DYF BIT(10)
+#define RCC_MP_CIFR_PLL4DYF BIT(11)
+#define RCC_MP_CIFR_LSECSSF BIT(16)
+#define RCC_MP_CIFR_WKUPF BIT(20)
+
+/* RCC_BDCR register fields */
+#define RCC_BDCR_LSEON BIT(0)
+#define RCC_BDCR_LSEBYP BIT(1)
+#define RCC_BDCR_LSERDY BIT(2)
+#define RCC_BDCR_DIGBYP BIT(3)
+#define RCC_BDCR_LSEDRV_MASK GENMASK(5, 4)
+#define RCC_BDCR_LSECSSON BIT(8)
+#define RCC_BDCR_LSECSSD BIT(9)
+#define RCC_BDCR_RTCSRC_MASK GENMASK(17, 16)
+#define RCC_BDCR_RTCCKEN BIT(20)
+#define RCC_BDCR_VSWRST BIT(31)
+#define RCC_BDCR_LSEDRV_SHIFT 4
+#define RCC_BDCR_RTCSRC_SHIFT 16
+
+/* RCC_RDLSICR register fields */
+#define RCC_RDLSICR_LSION BIT(0)
+#define RCC_RDLSICR_LSIRDY BIT(1)
+#define RCC_RDLSICR_MRD_MASK GENMASK(20, 16)
+#define RCC_RDLSICR_EADLY_MASK GENMASK(26, 24)
+#define RCC_RDLSICR_SPARE_MASK GENMASK(31, 27)
+#define RCC_RDLSICR_MRD_SHIFT 16
+#define RCC_RDLSICR_EADLY_SHIFT 24
+#define RCC_RDLSICR_SPARE_SHIFT 27
+
+/* RCC_OCENSETR register fields */
+#define RCC_OCENSETR_HSION BIT(0)
+#define RCC_OCENSETR_HSIKERON BIT(1)
+#define RCC_OCENSETR_CSION BIT(4)
+#define RCC_OCENSETR_CSIKERON BIT(5)
+#define RCC_OCENSETR_DIGBYP BIT(7)
+#define RCC_OCENSETR_HSEON BIT(8)
+#define RCC_OCENSETR_HSEKERON BIT(9)
+#define RCC_OCENSETR_HSEBYP BIT(10)
+#define RCC_OCENSETR_HSECSSON BIT(11)
+
+/* RCC_OCENCLRR register fields */
+#define RCC_OCENCLRR_HSION BIT(0)
+#define RCC_OCENCLRR_HSIKERON BIT(1)
+#define RCC_OCENCLRR_CSION BIT(4)
+#define RCC_OCENCLRR_CSIKERON BIT(5)
+#define RCC_OCENCLRR_DIGBYP BIT(7)
+#define RCC_OCENCLRR_HSEON BIT(8)
+#define RCC_OCENCLRR_HSEKERON BIT(9)
+#define RCC_OCENCLRR_HSEBYP BIT(10)
+
+/* RCC_OCRDYR register fields */
+#define RCC_OCRDYR_HSIRDY BIT(0)
+#define RCC_OCRDYR_HSIDIVRDY BIT(2)
+#define RCC_OCRDYR_CSIRDY BIT(4)
+#define RCC_OCRDYR_HSERDY BIT(8)
+#define RCC_OCRDYR_MPUCKRDY BIT(23)
+#define RCC_OCRDYR_AXICKRDY BIT(24)
+
+/* RCC_HSICFGR register fields */
+#define RCC_HSICFGR_HSIDIV_MASK GENMASK(1, 0)
+#define RCC_HSICFGR_HSITRIM_MASK GENMASK(14, 8)
+#define RCC_HSICFGR_HSICAL_MASK GENMASK(27, 16)
+#define RCC_HSICFGR_HSIDIV_SHIFT 0
+#define RCC_HSICFGR_HSITRIM_SHIFT 8
+#define RCC_HSICFGR_HSICAL_SHIFT 16
+
+/* RCC_CSICFGR register fields */
+#define RCC_CSICFGR_CSITRIM_MASK GENMASK(12, 8)
+#define RCC_CSICFGR_CSICAL_MASK GENMASK(23, 16)
+#define RCC_CSICFGR_CSITRIM_SHIFT 8
+#define RCC_CSICFGR_CSICAL_SHIFT 16
+
+/* RCC_MCO1CFGR register fields */
+#define RCC_MCO1CFGR_MCO1SEL_MASK GENMASK(2, 0)
+#define RCC_MCO1CFGR_MCO1DIV_MASK GENMASK(7, 4)
+#define RCC_MCO1CFGR_MCO1ON BIT(12)
+#define RCC_MCO1CFGR_MCO1SEL_SHIFT 0
+#define RCC_MCO1CFGR_MCO1DIV_SHIFT 4
+
+/* RCC_MCO2CFGR register fields */
+#define RCC_MCO2CFGR_MCO2SEL_MASK GENMASK(2, 0)
+#define RCC_MCO2CFGR_MCO2DIV_MASK GENMASK(7, 4)
+#define RCC_MCO2CFGR_MCO2ON BIT(12)
+#define RCC_MCO2CFGR_MCO2SEL_SHIFT 0
+#define RCC_MCO2CFGR_MCO2DIV_SHIFT 4
+
+/* RCC_DBGCFGR register fields */
+#define RCC_DBGCFGR_TRACEDIV_MASK GENMASK(2, 0)
+#define RCC_DBGCFGR_DBGCKEN BIT(8)
+#define RCC_DBGCFGR_TRACECKEN BIT(9)
+#define RCC_DBGCFGR_DBGRST BIT(12)
+#define RCC_DBGCFGR_TRACEDIV_SHIFT 0
+
+/* RCC_RCK12SELR register fields */
+#define RCC_RCK12SELR_PLL12SRC_MASK GENMASK(1, 0)
+#define RCC_RCK12SELR_PLL12SRCRDY BIT(31)
+#define RCC_RCK12SELR_PLL12SRC_SHIFT 0
+
+/* RCC_RCK3SELR register fields */
+#define RCC_RCK3SELR_PLL3SRC_MASK GENMASK(1, 0)
+#define RCC_RCK3SELR_PLL3SRCRDY BIT(31)
+#define RCC_RCK3SELR_PLL3SRC_SHIFT 0
+
+/* RCC_RCK4SELR register fields */
+#define RCC_RCK4SELR_PLL4SRC_MASK GENMASK(1, 0)
+#define RCC_RCK4SELR_PLL4SRCRDY BIT(31)
+#define RCC_RCK4SELR_PLL4SRC_SHIFT 0
+
+/* RCC_PLL1CR register fields */
+#define RCC_PLL1CR_PLLON BIT(0)
+#define RCC_PLL1CR_PLL1RDY BIT(1)
+#define RCC_PLL1CR_SSCG_CTRL BIT(2)
+#define RCC_PLL1CR_DIVPEN BIT(4)
+#define RCC_PLL1CR_DIVQEN BIT(5)
+#define RCC_PLL1CR_DIVREN BIT(6)
+
+/* RCC_PLL1CFGR1 register fields */
+#define RCC_PLL1CFGR1_DIVN_MASK GENMASK(8, 0)
+#define RCC_PLL1CFGR1_DIVM1_MASK GENMASK(21, 16)
+#define RCC_PLL1CFGR1_DIVN_SHIFT 0
+#define RCC_PLL1CFGR1_DIVM1_SHIFT 16
+
+/* RCC_PLL1CFGR2 register fields */
+#define RCC_PLL1CFGR2_DIVP_MASK GENMASK(6, 0)
+#define RCC_PLL1CFGR2_DIVQ_MASK GENMASK(14, 8)
+#define RCC_PLL1CFGR2_DIVR_MASK GENMASK(22, 16)
+#define RCC_PLL1CFGR2_DIVP_SHIFT 0
+#define RCC_PLL1CFGR2_DIVQ_SHIFT 8
+#define RCC_PLL1CFGR2_DIVR_SHIFT 16
+
+/* RCC_PLL1FRACR register fields */
+#define RCC_PLL1FRACR_FRACV_MASK GENMASK(15, 3)
+#define RCC_PLL1FRACR_FRACLE BIT(16)
+#define RCC_PLL1FRACR_FRACV_SHIFT 3
+
+/* RCC_PLL1CSGR register fields */
+#define RCC_PLL1CSGR_MOD_PER_MASK GENMASK(12, 0)
+#define RCC_PLL1CSGR_TPDFN_DIS BIT(13)
+#define RCC_PLL1CSGR_RPDFN_DIS BIT(14)
+#define RCC_PLL1CSGR_SSCG_MODE BIT(15)
+#define RCC_PLL1CSGR_INC_STEP_MASK GENMASK(30, 16)
+#define RCC_PLL1CSGR_MOD_PER_SHIFT 0
+#define RCC_PLL1CSGR_INC_STEP_SHIFT 16
+
+/* RCC_PLL2CR register fields */
+#define RCC_PLL2CR_PLLON BIT(0)
+#define RCC_PLL2CR_PLL2RDY BIT(1)
+#define RCC_PLL2CR_SSCG_CTRL BIT(2)
+#define RCC_PLL2CR_DIVPEN BIT(4)
+#define RCC_PLL2CR_DIVQEN BIT(5)
+#define RCC_PLL2CR_DIVREN BIT(6)
+
+/* RCC_PLL2CFGR1 register fields */
+#define RCC_PLL2CFGR1_DIVN_MASK GENMASK(8, 0)
+#define RCC_PLL2CFGR1_DIVM2_MASK GENMASK(21, 16)
+#define RCC_PLL2CFGR1_DIVN_SHIFT 0
+#define RCC_PLL2CFGR1_DIVM2_SHIFT 16
+
+/* RCC_PLL2CFGR2 register fields */
+#define RCC_PLL2CFGR2_DIVP_MASK GENMASK(6, 0)
+#define RCC_PLL2CFGR2_DIVQ_MASK GENMASK(14, 8)
+#define RCC_PLL2CFGR2_DIVR_MASK GENMASK(22, 16)
+#define RCC_PLL2CFGR2_DIVP_SHIFT 0
+#define RCC_PLL2CFGR2_DIVQ_SHIFT 8
+#define RCC_PLL2CFGR2_DIVR_SHIFT 16
+
+/* RCC_PLL2FRACR register fields */
+#define RCC_PLL2FRACR_FRACV_MASK GENMASK(15, 3)
+#define RCC_PLL2FRACR_FRACLE BIT(16)
+#define RCC_PLL2FRACR_FRACV_SHIFT 3
+
+/* RCC_PLL2CSGR register fields */
+#define RCC_PLL2CSGR_MOD_PER_MASK GENMASK(12, 0)
+#define RCC_PLL2CSGR_TPDFN_DIS BIT(13)
+#define RCC_PLL2CSGR_RPDFN_DIS BIT(14)
+#define RCC_PLL2CSGR_SSCG_MODE BIT(15)
+#define RCC_PLL2CSGR_INC_STEP_MASK GENMASK(30, 16)
+#define RCC_PLL2CSGR_MOD_PER_SHIFT 0
+#define RCC_PLL2CSGR_INC_STEP_SHIFT 16
+
+/* RCC_PLL3CR register fields */
+#define RCC_PLL3CR_PLLON BIT(0)
+#define RCC_PLL3CR_PLL3RDY BIT(1)
+#define RCC_PLL3CR_SSCG_CTRL BIT(2)
+#define RCC_PLL3CR_DIVPEN BIT(4)
+#define RCC_PLL3CR_DIVQEN BIT(5)
+#define RCC_PLL3CR_DIVREN BIT(6)
+
+/* RCC_PLL3CFGR1 register fields */
+#define RCC_PLL3CFGR1_DIVN_MASK GENMASK(8, 0)
+#define RCC_PLL3CFGR1_DIVM3_MASK GENMASK(21, 16)
+#define RCC_PLL3CFGR1_IFRGE_MASK GENMASK(25, 24)
+#define RCC_PLL3CFGR1_DIVN_SHIFT 0
+#define RCC_PLL3CFGR1_DIVM3_SHIFT 16
+#define RCC_PLL3CFGR1_IFRGE_SHIFT 24
+
+/* RCC_PLL3CFGR2 register fields */
+#define RCC_PLL3CFGR2_DIVP_MASK GENMASK(6, 0)
+#define RCC_PLL3CFGR2_DIVQ_MASK GENMASK(14, 8)
+#define RCC_PLL3CFGR2_DIVR_MASK GENMASK(22, 16)
+#define RCC_PLL3CFGR2_DIVP_SHIFT 0
+#define RCC_PLL3CFGR2_DIVQ_SHIFT 8
+#define RCC_PLL3CFGR2_DIVR_SHIFT 16
+
+/* RCC_PLL3FRACR register fields */
+#define RCC_PLL3FRACR_FRACV_MASK GENMASK(15, 3)
+#define RCC_PLL3FRACR_FRACLE BIT(16)
+#define RCC_PLL3FRACR_FRACV_SHIFT 3
+
+/* RCC_PLL3CSGR register fields */
+#define RCC_PLL3CSGR_MOD_PER_MASK GENMASK(12, 0)
+#define RCC_PLL3CSGR_TPDFN_DIS BIT(13)
+#define RCC_PLL3CSGR_RPDFN_DIS BIT(14)
+#define RCC_PLL3CSGR_SSCG_MODE BIT(15)
+#define RCC_PLL3CSGR_INC_STEP_MASK GENMASK(30, 16)
+#define RCC_PLL3CSGR_MOD_PER_SHIFT 0
+#define RCC_PLL3CSGR_INC_STEP_SHIFT 16
+
+/* RCC_PLL4CR register fields */
+#define RCC_PLL4CR_PLLON BIT(0)
+#define RCC_PLL4CR_PLL4RDY BIT(1)
+#define RCC_PLL4CR_SSCG_CTRL BIT(2)
+#define RCC_PLL4CR_DIVPEN BIT(4)
+#define RCC_PLL4CR_DIVQEN BIT(5)
+#define RCC_PLL4CR_DIVREN BIT(6)
+
+/* RCC_PLL4CFGR1 register fields */
+#define RCC_PLL4CFGR1_DIVN_MASK GENMASK(8, 0)
+#define RCC_PLL4CFGR1_DIVM4_MASK GENMASK(21, 16)
+#define RCC_PLL4CFGR1_IFRGE_MASK GENMASK(25, 24)
+#define RCC_PLL4CFGR1_DIVN_SHIFT 0
+#define RCC_PLL4CFGR1_DIVM4_SHIFT 16
+#define RCC_PLL4CFGR1_IFRGE_SHIFT 24
+
+/* RCC_PLL4CFGR2 register fields */
+#define RCC_PLL4CFGR2_DIVP_MASK GENMASK(6, 0)
+#define RCC_PLL4CFGR2_DIVQ_MASK GENMASK(14, 8)
+#define RCC_PLL4CFGR2_DIVR_MASK GENMASK(22, 16)
+#define RCC_PLL4CFGR2_DIVP_SHIFT 0
+#define RCC_PLL4CFGR2_DIVQ_SHIFT 8
+#define RCC_PLL4CFGR2_DIVR_SHIFT 16
+
+/* RCC_PLL4FRACR register fields */
+#define RCC_PLL4FRACR_FRACV_MASK GENMASK(15, 3)
+#define RCC_PLL4FRACR_FRACLE BIT(16)
+#define RCC_PLL4FRACR_FRACV_SHIFT 3
+
+/* RCC_PLL4CSGR register fields */
+#define RCC_PLL4CSGR_MOD_PER_MASK GENMASK(12, 0)
+#define RCC_PLL4CSGR_TPDFN_DIS BIT(13)
+#define RCC_PLL4CSGR_RPDFN_DIS BIT(14)
+#define RCC_PLL4CSGR_SSCG_MODE BIT(15)
+#define RCC_PLL4CSGR_INC_STEP_MASK GENMASK(30, 16)
+#define RCC_PLL4CSGR_MOD_PER_SHIFT 0
+#define RCC_PLL4CSGR_INC_STEP_SHIFT 16
+
+/* RCC_MPCKSELR register fields */
+#define RCC_MPCKSELR_MPUSRC_MASK GENMASK(1, 0)
+#define RCC_MPCKSELR_MPUSRCRDY BIT(31)
+#define RCC_MPCKSELR_MPUSRC_SHIFT 0
+
+/* RCC_ASSCKSELR register fields */
+#define RCC_ASSCKSELR_AXISSRC_MASK GENMASK(2, 0)
+#define RCC_ASSCKSELR_AXISSRCRDY BIT(31)
+#define RCC_ASSCKSELR_AXISSRC_SHIFT 0
+
+/* RCC_MSSCKSELR register fields */
+#define RCC_MSSCKSELR_MLAHBSSRC_MASK GENMASK(1, 0)
+#define RCC_MSSCKSELR_MLAHBSSRCRDY BIT(31)
+#define RCC_MSSCKSELR_MLAHBSSRC_SHIFT 0
+
+/* RCC_CPERCKSELR register fields */
+#define RCC_CPERCKSELR_CKPERSRC_MASK GENMASK(1, 0)
+#define RCC_CPERCKSELR_CKPERSRC_SHIFT 0
+
+/* RCC_RTCDIVR register fields */
+#define RCC_RTCDIVR_RTCDIV_MASK GENMASK(5, 0)
+#define RCC_RTCDIVR_RTCDIV_SHIFT 0
+
+/* RCC_MPCKDIVR register fields */
+#define RCC_MPCKDIVR_MPUDIV_MASK GENMASK(3, 0)
+#define RCC_MPCKDIVR_MPUDIVRDY BIT(31)
+#define RCC_MPCKDIVR_MPUDIV_SHIFT 0
+
+/* RCC_AXIDIVR register fields */
+#define RCC_AXIDIVR_AXIDIV_MASK GENMASK(2, 0)
+#define RCC_AXIDIVR_AXIDIVRDY BIT(31)
+#define RCC_AXIDIVR_AXIDIV_SHIFT 0
+
+/* RCC_MLAHBDIVR register fields */
+#define RCC_MLAHBDIVR_MLAHBDIV_MASK GENMASK(3, 0)
+#define RCC_MLAHBDIVR_MLAHBDIVRDY BIT(31)
+#define RCC_MLAHBDIVR_MLAHBDIV_SHIFT 0
+
+/* RCC_APB1DIVR register fields */
+#define RCC_APB1DIVR_APB1DIV_MASK GENMASK(2, 0)
+#define RCC_APB1DIVR_APB1DIVRDY BIT(31)
+#define RCC_APB1DIVR_APB1DIV_SHIFT 0
+
+/* RCC_APB2DIVR register fields */
+#define RCC_APB2DIVR_APB2DIV_MASK GENMASK(2, 0)
+#define RCC_APB2DIVR_APB2DIVRDY BIT(31)
+#define RCC_APB2DIVR_APB2DIV_SHIFT 0
+
+/* RCC_APB3DIVR register fields */
+#define RCC_APB3DIVR_APB3DIV_MASK GENMASK(2, 0)
+#define RCC_APB3DIVR_APB3DIVRDY BIT(31)
+#define RCC_APB3DIVR_APB3DIV_SHIFT 0
+
+/* RCC_APB4DIVR register fields */
+#define RCC_APB4DIVR_APB4DIV_MASK GENMASK(2, 0)
+#define RCC_APB4DIVR_APB4DIVRDY BIT(31)
+#define RCC_APB4DIVR_APB4DIV_SHIFT 0
+
+/* RCC_APB5DIVR register fields */
+#define RCC_APB5DIVR_APB5DIV_MASK GENMASK(2, 0)
+#define RCC_APB5DIVR_APB5DIVRDY BIT(31)
+#define RCC_APB5DIVR_APB5DIV_SHIFT 0
+
+/* RCC_APB6DIVR register fields */
+#define RCC_APB6DIVR_APB6DIV_MASK GENMASK(2, 0)
+#define RCC_APB6DIVR_APB6DIVRDY BIT(31)
+#define RCC_APB6DIVR_APB6DIV_SHIFT 0
+
+/* RCC_TIMG1PRER register fields */
+#define RCC_TIMG1PRER_TIMG1PRE BIT(0)
+#define RCC_TIMG1PRER_TIMG1PRERDY BIT(31)
+
+/* RCC_TIMG2PRER register fields */
+#define RCC_TIMG2PRER_TIMG2PRE BIT(0)
+#define RCC_TIMG2PRER_TIMG2PRERDY BIT(31)
+
+/* RCC_TIMG3PRER register fields */
+#define RCC_TIMG3PRER_TIMG3PRE BIT(0)
+#define RCC_TIMG3PRER_TIMG3PRERDY BIT(31)
+
+/* RCC_DDRITFCR register fields */
+#define RCC_DDRITFCR_DDRC1EN BIT(0)
+#define RCC_DDRITFCR_DDRC1LPEN BIT(1)
+#define RCC_DDRITFCR_DDRPHYCEN BIT(4)
+#define RCC_DDRITFCR_DDRPHYCLPEN BIT(5)
+#define RCC_DDRITFCR_DDRCAPBEN BIT(6)
+#define RCC_DDRITFCR_DDRCAPBLPEN BIT(7)
+#define RCC_DDRITFCR_AXIDCGEN BIT(8)
+#define RCC_DDRITFCR_DDRPHYCAPBEN BIT(9)
+#define RCC_DDRITFCR_DDRPHYCAPBLPEN BIT(10)
+#define RCC_DDRITFCR_KERDCG_DLY_MASK GENMASK(13, 11)
+#define RCC_DDRITFCR_DDRCAPBRST BIT(14)
+#define RCC_DDRITFCR_DDRCAXIRST BIT(15)
+#define RCC_DDRITFCR_DDRCORERST BIT(16)
+#define RCC_DDRITFCR_DPHYAPBRST BIT(17)
+#define RCC_DDRITFCR_DPHYRST BIT(18)
+#define RCC_DDRITFCR_DPHYCTLRST BIT(19)
+#define RCC_DDRITFCR_DDRCKMOD_MASK GENMASK(22, 20)
+#define RCC_DDRITFCR_GSKPMOD BIT(23)
+#define RCC_DDRITFCR_GSKPCTRL BIT(24)
+#define RCC_DDRITFCR_DFILP_WIDTH_MASK GENMASK(27, 25)
+#define RCC_DDRITFCR_GSKP_DUR_MASK GENMASK(31, 28)
+#define RCC_DDRITFCR_KERDCG_DLY_SHIFT 11
+#define RCC_DDRITFCR_DDRCKMOD_SHIFT 20
+#define RCC_DDRITFCR_DFILP_WIDTH_SHIFT 25
+#define RCC_DDRITFCR_GSKP_DUR_SHIFT 28
+
+/* RCC_I2C12CKSELR register fields */
+#define RCC_I2C12CKSELR_I2C12SRC_MASK GENMASK(2, 0)
+#define RCC_I2C12CKSELR_I2C12SRC_SHIFT 0
+
+/* RCC_I2C345CKSELR register fields */
+#define RCC_I2C345CKSELR_I2C3SRC_MASK GENMASK(2, 0)
+#define RCC_I2C345CKSELR_I2C4SRC_MASK GENMASK(5, 3)
+#define RCC_I2C345CKSELR_I2C5SRC_MASK GENMASK(8, 6)
+#define RCC_I2C345CKSELR_I2C3SRC_SHIFT 0
+#define RCC_I2C345CKSELR_I2C4SRC_SHIFT 3
+#define RCC_I2C345CKSELR_I2C5SRC_SHIFT 6
+
+/* RCC_SPI2S1CKSELR register fields */
+#define RCC_SPI2S1CKSELR_SPI1SRC_MASK GENMASK(2, 0)
+#define RCC_SPI2S1CKSELR_SPI1SRC_SHIFT 0
+
+/* RCC_SPI2S23CKSELR register fields */
+#define RCC_SPI2S23CKSELR_SPI23SRC_MASK GENMASK(2, 0)
+#define RCC_SPI2S23CKSELR_SPI23SRC_SHIFT 0
+
+/* RCC_SPI45CKSELR register fields */
+#define RCC_SPI45CKSELR_SPI4SRC_MASK GENMASK(2, 0)
+#define RCC_SPI45CKSELR_SPI5SRC_MASK GENMASK(5, 3)
+#define RCC_SPI45CKSELR_SPI4SRC_SHIFT 0
+#define RCC_SPI45CKSELR_SPI5SRC_SHIFT 3
+
+/* RCC_UART12CKSELR register fields */
+#define RCC_UART12CKSELR_UART1SRC_MASK GENMASK(2, 0)
+#define RCC_UART12CKSELR_UART2SRC_MASK GENMASK(5, 3)
+#define RCC_UART12CKSELR_UART1SRC_SHIFT 0
+#define RCC_UART12CKSELR_UART2SRC_SHIFT 3
+
+/* RCC_UART35CKSELR register fields */
+#define RCC_UART35CKSELR_UART35SRC_MASK GENMASK(2, 0)
+#define RCC_UART35CKSELR_UART35SRC_SHIFT 0
+
+/* RCC_UART4CKSELR register fields */
+#define RCC_UART4CKSELR_UART4SRC_MASK GENMASK(2, 0)
+#define RCC_UART4CKSELR_UART4SRC_SHIFT 0
+
+/* RCC_UART6CKSELR register fields */
+#define RCC_UART6CKSELR_UART6SRC_MASK GENMASK(2, 0)
+#define RCC_UART6CKSELR_UART6SRC_SHIFT 0
+
+/* RCC_UART78CKSELR register fields */
+#define RCC_UART78CKSELR_UART78SRC_MASK GENMASK(2, 0)
+#define RCC_UART78CKSELR_UART78SRC_SHIFT 0
+
+/* RCC_LPTIM1CKSELR register fields */
+#define RCC_LPTIM1CKSELR_LPTIM1SRC_MASK GENMASK(2, 0)
+#define RCC_LPTIM1CKSELR_LPTIM1SRC_SHIFT 0
+
+/* RCC_LPTIM23CKSELR register fields */
+#define RCC_LPTIM23CKSELR_LPTIM2SRC_MASK GENMASK(2, 0)
+#define RCC_LPTIM23CKSELR_LPTIM3SRC_MASK GENMASK(5, 3)
+#define RCC_LPTIM23CKSELR_LPTIM2SRC_SHIFT 0
+#define RCC_LPTIM23CKSELR_LPTIM3SRC_SHIFT 3
+
+/* RCC_LPTIM45CKSELR register fields */
+#define RCC_LPTIM45CKSELR_LPTIM45SRC_MASK GENMASK(2, 0)
+#define RCC_LPTIM45CKSELR_LPTIM45SRC_SHIFT 0
+
+/* RCC_SAI1CKSELR register fields */
+#define RCC_SAI1CKSELR_SAI1SRC_MASK GENMASK(2, 0)
+#define RCC_SAI1CKSELR_SAI1SRC_SHIFT 0
+
+/* RCC_SAI2CKSELR register fields */
+#define RCC_SAI2CKSELR_SAI2SRC_MASK GENMASK(2, 0)
+#define RCC_SAI2CKSELR_SAI2SRC_SHIFT 0
+
+/* RCC_FDCANCKSELR register fields */
+#define RCC_FDCANCKSELR_FDCANSRC_MASK GENMASK(1, 0)
+#define RCC_FDCANCKSELR_FDCANSRC_SHIFT 0
+
+/* RCC_SPDIFCKSELR register fields */
+#define RCC_SPDIFCKSELR_SPDIFSRC_MASK GENMASK(1, 0)
+#define RCC_SPDIFCKSELR_SPDIFSRC_SHIFT 0
+
+/* RCC_ADC12CKSELR register fields */
+#define RCC_ADC12CKSELR_ADC1SRC_MASK GENMASK(1, 0)
+#define RCC_ADC12CKSELR_ADC2SRC_MASK GENMASK(3, 2)
+#define RCC_ADC12CKSELR_ADC1SRC_SHIFT 0
+#define RCC_ADC12CKSELR_ADC2SRC_SHIFT 2
+
+/* RCC_SDMMC12CKSELR register fields */
+#define RCC_SDMMC12CKSELR_SDMMC1SRC_MASK GENMASK(2, 0)
+#define RCC_SDMMC12CKSELR_SDMMC2SRC_MASK GENMASK(5, 3)
+#define RCC_SDMMC12CKSELR_SDMMC1SRC_SHIFT 0
+#define RCC_SDMMC12CKSELR_SDMMC2SRC_SHIFT 3
+
+/* RCC_ETH12CKSELR register fields */
+#define RCC_ETH12CKSELR_ETH1SRC_MASK GENMASK(1, 0)
+#define RCC_ETH12CKSELR_ETH1PTPDIV_MASK GENMASK(7, 4)
+#define RCC_ETH12CKSELR_ETH2SRC_MASK GENMASK(9, 8)
+#define RCC_ETH12CKSELR_ETH2PTPDIV_MASK GENMASK(15, 12)
+#define RCC_ETH12CKSELR_ETH1SRC_SHIFT 0
+#define RCC_ETH12CKSELR_ETH1PTPDIV_SHIFT 4
+#define RCC_ETH12CKSELR_ETH2SRC_SHIFT 8
+#define RCC_ETH12CKSELR_ETH2PTPDIV_SHIFT 12
+
+/* RCC_USBCKSELR register fields */
+#define RCC_USBCKSELR_USBPHYSRC_MASK GENMASK(1, 0)
+#define RCC_USBCKSELR_USBOSRC BIT(4)
+#define RCC_USBCKSELR_USBPHYSRC_SHIFT 0
+
+/* RCC_QSPICKSELR register fields */
+#define RCC_QSPICKSELR_QSPISRC_MASK GENMASK(1, 0)
+#define RCC_QSPICKSELR_QSPISRC_SHIFT 0
+
+/* RCC_FMCCKSELR register fields */
+#define RCC_FMCCKSELR_FMCSRC_MASK GENMASK(1, 0)
+#define RCC_FMCCKSELR_FMCSRC_SHIFT 0
+
+/* RCC_RNG1CKSELR register fields */
+#define RCC_RNG1CKSELR_RNG1SRC_MASK GENMASK(1, 0)
+#define RCC_RNG1CKSELR_RNG1SRC_SHIFT 0
+
+/* RCC_STGENCKSELR register fields */
+#define RCC_STGENCKSELR_STGENSRC_MASK GENMASK(1, 0)
+#define RCC_STGENCKSELR_STGENSRC_SHIFT 0
+
+/* RCC_DCMIPPCKSELR register fields */
+#define RCC_DCMIPPCKSELR_DCMIPPSRC_MASK GENMASK(1, 0)
+#define RCC_DCMIPPCKSELR_DCMIPPSRC_SHIFT 0
+
+/* RCC_SAESCKSELR register fields */
+#define RCC_SAESCKSELR_SAESSRC_MASK GENMASK(1, 0)
+#define RCC_SAESCKSELR_SAESSRC_SHIFT 0
+
+/* RCC_APB1RSTSETR register fields */
+#define RCC_APB1RSTSETR_TIM2RST BIT(0)
+#define RCC_APB1RSTSETR_TIM3RST BIT(1)
+#define RCC_APB1RSTSETR_TIM4RST BIT(2)
+#define RCC_APB1RSTSETR_TIM5RST BIT(3)
+#define RCC_APB1RSTSETR_TIM6RST BIT(4)
+#define RCC_APB1RSTSETR_TIM7RST BIT(5)
+#define RCC_APB1RSTSETR_LPTIM1RST BIT(9)
+#define RCC_APB1RSTSETR_SPI2RST BIT(11)
+#define RCC_APB1RSTSETR_SPI3RST BIT(12)
+#define RCC_APB1RSTSETR_USART3RST BIT(15)
+#define RCC_APB1RSTSETR_UART4RST BIT(16)
+#define RCC_APB1RSTSETR_UART5RST BIT(17)
+#define RCC_APB1RSTSETR_UART7RST BIT(18)
+#define RCC_APB1RSTSETR_UART8RST BIT(19)
+#define RCC_APB1RSTSETR_I2C1RST BIT(21)
+#define RCC_APB1RSTSETR_I2C2RST BIT(22)
+#define RCC_APB1RSTSETR_SPDIFRST BIT(26)
+
+/* RCC_APB1RSTCLRR register fields */
+#define RCC_APB1RSTCLRR_TIM2RST BIT(0)
+#define RCC_APB1RSTCLRR_TIM3RST BIT(1)
+#define RCC_APB1RSTCLRR_TIM4RST BIT(2)
+#define RCC_APB1RSTCLRR_TIM5RST BIT(3)
+#define RCC_APB1RSTCLRR_TIM6RST BIT(4)
+#define RCC_APB1RSTCLRR_TIM7RST BIT(5)
+#define RCC_APB1RSTCLRR_LPTIM1RST BIT(9)
+#define RCC_APB1RSTCLRR_SPI2RST BIT(11)
+#define RCC_APB1RSTCLRR_SPI3RST BIT(12)
+#define RCC_APB1RSTCLRR_USART3RST BIT(15)
+#define RCC_APB1RSTCLRR_UART4RST BIT(16)
+#define RCC_APB1RSTCLRR_UART5RST BIT(17)
+#define RCC_APB1RSTCLRR_UART7RST BIT(18)
+#define RCC_APB1RSTCLRR_UART8RST BIT(19)
+#define RCC_APB1RSTCLRR_I2C1RST BIT(21)
+#define RCC_APB1RSTCLRR_I2C2RST BIT(22)
+#define RCC_APB1RSTCLRR_SPDIFRST BIT(26)
+
+/* RCC_APB2RSTSETR register fields */
+#define RCC_APB2RSTSETR_TIM1RST BIT(0)
+#define RCC_APB2RSTSETR_TIM8RST BIT(1)
+#define RCC_APB2RSTSETR_SPI1RST BIT(8)
+#define RCC_APB2RSTSETR_USART6RST BIT(13)
+#define RCC_APB2RSTSETR_SAI1RST BIT(16)
+#define RCC_APB2RSTSETR_SAI2RST BIT(17)
+#define RCC_APB2RSTSETR_DFSDMRST BIT(20)
+#define RCC_APB2RSTSETR_FDCANRST BIT(24)
+
+/* RCC_APB2RSTCLRR register fields */
+#define RCC_APB2RSTCLRR_TIM1RST BIT(0)
+#define RCC_APB2RSTCLRR_TIM8RST BIT(1)
+#define RCC_APB2RSTCLRR_SPI1RST BIT(8)
+#define RCC_APB2RSTCLRR_USART6RST BIT(13)
+#define RCC_APB2RSTCLRR_SAI1RST BIT(16)
+#define RCC_APB2RSTCLRR_SAI2RST BIT(17)
+#define RCC_APB2RSTCLRR_DFSDMRST BIT(20)
+#define RCC_APB2RSTCLRR_FDCANRST BIT(24)
+
+/* RCC_APB3RSTSETR register fields */
+#define RCC_APB3RSTSETR_LPTIM2RST BIT(0)
+#define RCC_APB3RSTSETR_LPTIM3RST BIT(1)
+#define RCC_APB3RSTSETR_LPTIM4RST BIT(2)
+#define RCC_APB3RSTSETR_LPTIM5RST BIT(3)
+#define RCC_APB3RSTSETR_SYSCFGRST BIT(11)
+#define RCC_APB3RSTSETR_VREFRST BIT(13)
+#define RCC_APB3RSTSETR_DTSRST BIT(16)
+#define RCC_APB3RSTSETR_PMBCTRLRST BIT(17)
+
+/* RCC_APB3RSTCLRR register fields */
+#define RCC_APB3RSTCLRR_LPTIM2RST BIT(0)
+#define RCC_APB3RSTCLRR_LPTIM3RST BIT(1)
+#define RCC_APB3RSTCLRR_LPTIM4RST BIT(2)
+#define RCC_APB3RSTCLRR_LPTIM5RST BIT(3)
+#define RCC_APB3RSTCLRR_SYSCFGRST BIT(11)
+#define RCC_APB3RSTCLRR_VREFRST BIT(13)
+#define RCC_APB3RSTCLRR_DTSRST BIT(16)
+#define RCC_APB3RSTCLRR_PMBCTRLRST BIT(17)
+
+/* RCC_APB4RSTSETR register fields */
+#define RCC_APB4RSTSETR_LTDCRST BIT(0)
+#define RCC_APB4RSTSETR_DCMIPPRST BIT(1)
+#define RCC_APB4RSTSETR_DDRPERFMRST BIT(8)
+#define RCC_APB4RSTSETR_USBPHYRST BIT(16)
+
+/* RCC_APB4RSTCLRR register fields */
+#define RCC_APB4RSTCLRR_LTDCRST BIT(0)
+#define RCC_APB4RSTCLRR_DCMIPPRST BIT(1)
+#define RCC_APB4RSTCLRR_DDRPERFMRST BIT(8)
+#define RCC_APB4RSTCLRR_USBPHYRST BIT(16)
+
+/* RCC_APB5RSTSETR register fields */
+#define RCC_APB5RSTSETR_STGENRST BIT(20)
+
+/* RCC_APB5RSTCLRR register fields */
+#define RCC_APB5RSTCLRR_STGENRST BIT(20)
+
+/* RCC_APB6RSTSETR register fields */
+#define RCC_APB6RSTSETR_USART1RST BIT(0)
+#define RCC_APB6RSTSETR_USART2RST BIT(1)
+#define RCC_APB6RSTSETR_SPI4RST BIT(2)
+#define RCC_APB6RSTSETR_SPI5RST BIT(3)
+#define RCC_APB6RSTSETR_I2C3RST BIT(4)
+#define RCC_APB6RSTSETR_I2C4RST BIT(5)
+#define RCC_APB6RSTSETR_I2C5RST BIT(6)
+#define RCC_APB6RSTSETR_TIM12RST BIT(7)
+#define RCC_APB6RSTSETR_TIM13RST BIT(8)
+#define RCC_APB6RSTSETR_TIM14RST BIT(9)
+#define RCC_APB6RSTSETR_TIM15RST BIT(10)
+#define RCC_APB6RSTSETR_TIM16RST BIT(11)
+#define RCC_APB6RSTSETR_TIM17RST BIT(12)
+
+/* RCC_APB6RSTCLRR register fields */
+#define RCC_APB6RSTCLRR_USART1RST BIT(0)
+#define RCC_APB6RSTCLRR_USART2RST BIT(1)
+#define RCC_APB6RSTCLRR_SPI4RST BIT(2)
+#define RCC_APB6RSTCLRR_SPI5RST BIT(3)
+#define RCC_APB6RSTCLRR_I2C3RST BIT(4)
+#define RCC_APB6RSTCLRR_I2C4RST BIT(5)
+#define RCC_APB6RSTCLRR_I2C5RST BIT(6)
+#define RCC_APB6RSTCLRR_TIM12RST BIT(7)
+#define RCC_APB6RSTCLRR_TIM13RST BIT(8)
+#define RCC_APB6RSTCLRR_TIM14RST BIT(9)
+#define RCC_APB6RSTCLRR_TIM15RST BIT(10)
+#define RCC_APB6RSTCLRR_TIM16RST BIT(11)
+#define RCC_APB6RSTCLRR_TIM17RST BIT(12)
+
+/* RCC_AHB2RSTSETR register fields */
+#define RCC_AHB2RSTSETR_DMA1RST BIT(0)
+#define RCC_AHB2RSTSETR_DMA2RST BIT(1)
+#define RCC_AHB2RSTSETR_DMAMUX1RST BIT(2)
+#define RCC_AHB2RSTSETR_DMA3RST BIT(3)
+#define RCC_AHB2RSTSETR_DMAMUX2RST BIT(4)
+#define RCC_AHB2RSTSETR_ADC1RST BIT(5)
+#define RCC_AHB2RSTSETR_ADC2RST BIT(6)
+#define RCC_AHB2RSTSETR_USBORST BIT(8)
+
+/* RCC_AHB2RSTCLRR register fields */
+#define RCC_AHB2RSTCLRR_DMA1RST BIT(0)
+#define RCC_AHB2RSTCLRR_DMA2RST BIT(1)
+#define RCC_AHB2RSTCLRR_DMAMUX1RST BIT(2)
+#define RCC_AHB2RSTCLRR_DMA3RST BIT(3)
+#define RCC_AHB2RSTCLRR_DMAMUX2RST BIT(4)
+#define RCC_AHB2RSTCLRR_ADC1RST BIT(5)
+#define RCC_AHB2RSTCLRR_ADC2RST BIT(6)
+#define RCC_AHB2RSTCLRR_USBORST BIT(8)
+
+/* RCC_AHB4RSTSETR register fields */
+#define RCC_AHB4RSTSETR_GPIOARST BIT(0)
+#define RCC_AHB4RSTSETR_GPIOBRST BIT(1)
+#define RCC_AHB4RSTSETR_GPIOCRST BIT(2)
+#define RCC_AHB4RSTSETR_GPIODRST BIT(3)
+#define RCC_AHB4RSTSETR_GPIOERST BIT(4)
+#define RCC_AHB4RSTSETR_GPIOFRST BIT(5)
+#define RCC_AHB4RSTSETR_GPIOGRST BIT(6)
+#define RCC_AHB4RSTSETR_GPIOHRST BIT(7)
+#define RCC_AHB4RSTSETR_GPIOIRST BIT(8)
+#define RCC_AHB4RSTSETR_TSCRST BIT(15)
+
+/* RCC_AHB4RSTCLRR register fields */
+#define RCC_AHB4RSTCLRR_GPIOARST BIT(0)
+#define RCC_AHB4RSTCLRR_GPIOBRST BIT(1)
+#define RCC_AHB4RSTCLRR_GPIOCRST BIT(2)
+#define RCC_AHB4RSTCLRR_GPIODRST BIT(3)
+#define RCC_AHB4RSTCLRR_GPIOERST BIT(4)
+#define RCC_AHB4RSTCLRR_GPIOFRST BIT(5)
+#define RCC_AHB4RSTCLRR_GPIOGRST BIT(6)
+#define RCC_AHB4RSTCLRR_GPIOHRST BIT(7)
+#define RCC_AHB4RSTCLRR_GPIOIRST BIT(8)
+#define RCC_AHB4RSTCLRR_TSCRST BIT(15)
+
+/* RCC_AHB5RSTSETR register fields */
+#define RCC_AHB5RSTSETR_PKARST BIT(2)
+#define RCC_AHB5RSTSETR_SAESRST BIT(3)
+#define RCC_AHB5RSTSETR_CRYP1RST BIT(4)
+#define RCC_AHB5RSTSETR_HASH1RST BIT(5)
+#define RCC_AHB5RSTSETR_RNG1RST BIT(6)
+#define RCC_AHB5RSTSETR_AXIMCRST BIT(16)
+
+/* RCC_AHB5RSTCLRR register fields */
+#define RCC_AHB5RSTCLRR_PKARST BIT(2)
+#define RCC_AHB5RSTCLRR_SAESRST BIT(3)
+#define RCC_AHB5RSTCLRR_CRYP1RST BIT(4)
+#define RCC_AHB5RSTCLRR_HASH1RST BIT(5)
+#define RCC_AHB5RSTCLRR_RNG1RST BIT(6)
+#define RCC_AHB5RSTCLRR_AXIMCRST BIT(16)
+
+/* RCC_AHB6RSTSETR register fields */
+#define RCC_AHB6RSTSETR_MDMARST BIT(0)
+#define RCC_AHB6RSTSETR_MCERST BIT(1)
+#define RCC_AHB6RSTSETR_ETH1MACRST BIT(10)
+#define RCC_AHB6RSTSETR_FMCRST BIT(12)
+#define RCC_AHB6RSTSETR_QSPIRST BIT(14)
+#define RCC_AHB6RSTSETR_SDMMC1RST BIT(16)
+#define RCC_AHB6RSTSETR_SDMMC2RST BIT(17)
+#define RCC_AHB6RSTSETR_CRC1RST BIT(20)
+#define RCC_AHB6RSTSETR_USBHRST BIT(24)
+#define RCC_AHB6RSTSETR_ETH2MACRST BIT(30)
+
+/* RCC_AHB6RSTCLRR register fields */
+#define RCC_AHB6RSTCLRR_MDMARST BIT(0)
+#define RCC_AHB6RSTCLRR_MCERST BIT(1)
+#define RCC_AHB6RSTCLRR_ETH1MACRST BIT(10)
+#define RCC_AHB6RSTCLRR_FMCRST BIT(12)
+#define RCC_AHB6RSTCLRR_QSPIRST BIT(14)
+#define RCC_AHB6RSTCLRR_SDMMC1RST BIT(16)
+#define RCC_AHB6RSTCLRR_SDMMC2RST BIT(17)
+#define RCC_AHB6RSTCLRR_CRC1RST BIT(20)
+#define RCC_AHB6RSTCLRR_USBHRST BIT(24)
+#define RCC_AHB6RSTCLRR_ETH2MACRST BIT(30)
+
+/* RCC_MP_APB1ENSETR register fields */
+#define RCC_MP_APB1ENSETR_TIM2EN BIT(0)
+#define RCC_MP_APB1ENSETR_TIM3EN BIT(1)
+#define RCC_MP_APB1ENSETR_TIM4EN BIT(2)
+#define RCC_MP_APB1ENSETR_TIM5EN BIT(3)
+#define RCC_MP_APB1ENSETR_TIM6EN BIT(4)
+#define RCC_MP_APB1ENSETR_TIM7EN BIT(5)
+#define RCC_MP_APB1ENSETR_LPTIM1EN BIT(9)
+#define RCC_MP_APB1ENSETR_SPI2EN BIT(11)
+#define RCC_MP_APB1ENSETR_SPI3EN BIT(12)
+#define RCC_MP_APB1ENSETR_USART3EN BIT(15)
+#define RCC_MP_APB1ENSETR_UART4EN BIT(16)
+#define RCC_MP_APB1ENSETR_UART5EN BIT(17)
+#define RCC_MP_APB1ENSETR_UART7EN BIT(18)
+#define RCC_MP_APB1ENSETR_UART8EN BIT(19)
+#define RCC_MP_APB1ENSETR_I2C1EN BIT(21)
+#define RCC_MP_APB1ENSETR_I2C2EN BIT(22)
+#define RCC_MP_APB1ENSETR_SPDIFEN BIT(26)
+
+/* RCC_MP_APB1ENCLRR register fields */
+#define RCC_MP_APB1ENCLRR_TIM2EN BIT(0)
+#define RCC_MP_APB1ENCLRR_TIM3EN BIT(1)
+#define RCC_MP_APB1ENCLRR_TIM4EN BIT(2)
+#define RCC_MP_APB1ENCLRR_TIM5EN BIT(3)
+#define RCC_MP_APB1ENCLRR_TIM6EN BIT(4)
+#define RCC_MP_APB1ENCLRR_TIM7EN BIT(5)
+#define RCC_MP_APB1ENCLRR_LPTIM1EN BIT(9)
+#define RCC_MP_APB1ENCLRR_SPI2EN BIT(11)
+#define RCC_MP_APB1ENCLRR_SPI3EN BIT(12)
+#define RCC_MP_APB1ENCLRR_USART3EN BIT(15)
+#define RCC_MP_APB1ENCLRR_UART4EN BIT(16)
+#define RCC_MP_APB1ENCLRR_UART5EN BIT(17)
+#define RCC_MP_APB1ENCLRR_UART7EN BIT(18)
+#define RCC_MP_APB1ENCLRR_UART8EN BIT(19)
+#define RCC_MP_APB1ENCLRR_I2C1EN BIT(21)
+#define RCC_MP_APB1ENCLRR_I2C2EN BIT(22)
+#define RCC_MP_APB1ENCLRR_SPDIFEN BIT(26)
+
+/* RCC_MP_APB2ENSETR register fields */
+#define RCC_MP_APB2ENSETR_TIM1EN BIT(0)
+#define RCC_MP_APB2ENSETR_TIM8EN BIT(1)
+#define RCC_MP_APB2ENSETR_SPI1EN BIT(8)
+#define RCC_MP_APB2ENSETR_USART6EN BIT(13)
+#define RCC_MP_APB2ENSETR_SAI1EN BIT(16)
+#define RCC_MP_APB2ENSETR_SAI2EN BIT(17)
+#define RCC_MP_APB2ENSETR_DFSDMEN BIT(20)
+#define RCC_MP_APB2ENSETR_ADFSDMEN BIT(21)
+#define RCC_MP_APB2ENSETR_FDCANEN BIT(24)
+
+/* RCC_MP_APB2ENCLRR register fields */
+#define RCC_MP_APB2ENCLRR_TIM1EN BIT(0)
+#define RCC_MP_APB2ENCLRR_TIM8EN BIT(1)
+#define RCC_MP_APB2ENCLRR_SPI1EN BIT(8)
+#define RCC_MP_APB2ENCLRR_USART6EN BIT(13)
+#define RCC_MP_APB2ENCLRR_SAI1EN BIT(16)
+#define RCC_MP_APB2ENCLRR_SAI2EN BIT(17)
+#define RCC_MP_APB2ENCLRR_DFSDMEN BIT(20)
+#define RCC_MP_APB2ENCLRR_ADFSDMEN BIT(21)
+#define RCC_MP_APB2ENCLRR_FDCANEN BIT(24)
+
+/* RCC_MP_APB3ENSETR register fields */
+#define RCC_MP_APB3ENSETR_LPTIM2EN BIT(0)
+#define RCC_MP_APB3ENSETR_LPTIM3EN BIT(1)
+#define RCC_MP_APB3ENSETR_LPTIM4EN BIT(2)
+#define RCC_MP_APB3ENSETR_LPTIM5EN BIT(3)
+#define RCC_MP_APB3ENSETR_VREFEN BIT(13)
+#define RCC_MP_APB3ENSETR_DTSEN BIT(16)
+#define RCC_MP_APB3ENSETR_PMBCTRLEN BIT(17)
+#define RCC_MP_APB3ENSETR_HDPEN BIT(20)
+
+/* RCC_MP_APB3ENCLRR register fields */
+#define RCC_MP_APB3ENCLRR_LPTIM2EN BIT(0)
+#define RCC_MP_APB3ENCLRR_LPTIM3EN BIT(1)
+#define RCC_MP_APB3ENCLRR_LPTIM4EN BIT(2)
+#define RCC_MP_APB3ENCLRR_LPTIM5EN BIT(3)
+#define RCC_MP_APB3ENCLRR_VREFEN BIT(13)
+#define RCC_MP_APB3ENCLRR_DTSEN BIT(16)
+#define RCC_MP_APB3ENCLRR_PMBCTRLEN BIT(17)
+#define RCC_MP_APB3ENCLRR_HDPEN BIT(20)
+
+/* RCC_MP_S_APB3ENSETR register fields */
+#define RCC_MP_S_APB3ENSETR_SYSCFGEN BIT(0)
+
+/* RCC_MP_S_APB3ENCLRR register fields */
+#define RCC_MP_S_APB3ENCLRR_SYSCFGEN BIT(0)
+
+/* RCC_MP_NS_APB3ENSETR register fields */
+#define RCC_MP_NS_APB3ENSETR_SYSCFGEN BIT(0)
+
+/* RCC_MP_NS_APB3ENCLRR register fields */
+#define RCC_MP_NS_APB3ENCLRR_SYSCFGEN BIT(0)
+
+/* RCC_MP_APB4ENSETR register fields */
+#define RCC_MP_APB4ENSETR_DCMIPPEN BIT(1)
+#define RCC_MP_APB4ENSETR_DDRPERFMEN BIT(8)
+#define RCC_MP_APB4ENSETR_IWDG2APBEN BIT(15)
+#define RCC_MP_APB4ENSETR_USBPHYEN BIT(16)
+#define RCC_MP_APB4ENSETR_STGENROEN BIT(20)
+
+/* RCC_MP_APB4ENCLRR register fields */
+#define RCC_MP_APB4ENCLRR_DCMIPPEN BIT(1)
+#define RCC_MP_APB4ENCLRR_DDRPERFMEN BIT(8)
+#define RCC_MP_APB4ENCLRR_IWDG2APBEN BIT(15)
+#define RCC_MP_APB4ENCLRR_USBPHYEN BIT(16)
+#define RCC_MP_APB4ENCLRR_STGENROEN BIT(20)
+
+/* RCC_MP_S_APB4ENSETR register fields */
+#define RCC_MP_S_APB4ENSETR_LTDCEN BIT(0)
+
+/* RCC_MP_S_APB4ENCLRR register fields */
+#define RCC_MP_S_APB4ENCLRR_LTDCEN BIT(0)
+
+/* RCC_MP_NS_APB4ENSETR register fields */
+#define RCC_MP_NS_APB4ENSETR_LTDCEN BIT(0)
+
+/* RCC_MP_NS_APB4ENCLRR register fields */
+#define RCC_MP_NS_APB4ENCLRR_LTDCEN BIT(0)
+
+/* RCC_MP_APB5ENSETR register fields */
+#define RCC_MP_APB5ENSETR_RTCAPBEN BIT(8)
+#define RCC_MP_APB5ENSETR_TZCEN BIT(11)
+#define RCC_MP_APB5ENSETR_ETZPCEN BIT(13)
+#define RCC_MP_APB5ENSETR_IWDG1APBEN BIT(15)
+#define RCC_MP_APB5ENSETR_BSECEN BIT(16)
+#define RCC_MP_APB5ENSETR_STGENCEN BIT(20)
+
+/* RCC_MP_APB5ENCLRR register fields */
+#define RCC_MP_APB5ENCLRR_RTCAPBEN BIT(8)
+#define RCC_MP_APB5ENCLRR_TZCEN BIT(11)
+#define RCC_MP_APB5ENCLRR_ETZPCEN BIT(13)
+#define RCC_MP_APB5ENCLRR_IWDG1APBEN BIT(15)
+#define RCC_MP_APB5ENCLRR_BSECEN BIT(16)
+#define RCC_MP_APB5ENCLRR_STGENCEN BIT(20)
+
+/* RCC_MP_APB6ENSETR register fields */
+#define RCC_MP_APB6ENSETR_USART1EN BIT(0)
+#define RCC_MP_APB6ENSETR_USART2EN BIT(1)
+#define RCC_MP_APB6ENSETR_SPI4EN BIT(2)
+#define RCC_MP_APB6ENSETR_SPI5EN BIT(3)
+#define RCC_MP_APB6ENSETR_I2C3EN BIT(4)
+#define RCC_MP_APB6ENSETR_I2C4EN BIT(5)
+#define RCC_MP_APB6ENSETR_I2C5EN BIT(6)
+#define RCC_MP_APB6ENSETR_TIM12EN BIT(7)
+#define RCC_MP_APB6ENSETR_TIM13EN BIT(8)
+#define RCC_MP_APB6ENSETR_TIM14EN BIT(9)
+#define RCC_MP_APB6ENSETR_TIM15EN BIT(10)
+#define RCC_MP_APB6ENSETR_TIM16EN BIT(11)
+#define RCC_MP_APB6ENSETR_TIM17EN BIT(12)
+
+/* RCC_MP_APB6ENCLRR register fields */
+#define RCC_MP_APB6ENCLRR_USART1EN BIT(0)
+#define RCC_MP_APB6ENCLRR_USART2EN BIT(1)
+#define RCC_MP_APB6ENCLRR_SPI4EN BIT(2)
+#define RCC_MP_APB6ENCLRR_SPI5EN BIT(3)
+#define RCC_MP_APB6ENCLRR_I2C3EN BIT(4)
+#define RCC_MP_APB6ENCLRR_I2C4EN BIT(5)
+#define RCC_MP_APB6ENCLRR_I2C5EN BIT(6)
+#define RCC_MP_APB6ENCLRR_TIM12EN BIT(7)
+#define RCC_MP_APB6ENCLRR_TIM13EN BIT(8)
+#define RCC_MP_APB6ENCLRR_TIM14EN BIT(9)
+#define RCC_MP_APB6ENCLRR_TIM15EN BIT(10)
+#define RCC_MP_APB6ENCLRR_TIM16EN BIT(11)
+#define RCC_MP_APB6ENCLRR_TIM17EN BIT(12)
+
+/* RCC_MP_AHB2ENSETR register fields */
+#define RCC_MP_AHB2ENSETR_DMA1EN BIT(0)
+#define RCC_MP_AHB2ENSETR_DMA2EN BIT(1)
+#define RCC_MP_AHB2ENSETR_DMAMUX1EN BIT(2)
+#define RCC_MP_AHB2ENSETR_DMA3EN BIT(3)
+#define RCC_MP_AHB2ENSETR_DMAMUX2EN BIT(4)
+#define RCC_MP_AHB2ENSETR_ADC1EN BIT(5)
+#define RCC_MP_AHB2ENSETR_ADC2EN BIT(6)
+#define RCC_MP_AHB2ENSETR_USBOEN BIT(8)
+
+/* RCC_MP_AHB2ENCLRR register fields */
+#define RCC_MP_AHB2ENCLRR_DMA1EN BIT(0)
+#define RCC_MP_AHB2ENCLRR_DMA2EN BIT(1)
+#define RCC_MP_AHB2ENCLRR_DMAMUX1EN BIT(2)
+#define RCC_MP_AHB2ENCLRR_DMA3EN BIT(3)
+#define RCC_MP_AHB2ENCLRR_DMAMUX2EN BIT(4)
+#define RCC_MP_AHB2ENCLRR_ADC1EN BIT(5)
+#define RCC_MP_AHB2ENCLRR_ADC2EN BIT(6)
+#define RCC_MP_AHB2ENCLRR_USBOEN BIT(8)
+
+/* RCC_MP_AHB4ENSETR register fields */
+#define RCC_MP_AHB4ENSETR_TSCEN BIT(15)
+
+/* RCC_MP_AHB4ENCLRR register fields */
+#define RCC_MP_AHB4ENCLRR_TSCEN BIT(15)
+
+/* RCC_MP_S_AHB4ENSETR register fields */
+#define RCC_MP_S_AHB4ENSETR_GPIOAEN BIT(0)
+#define RCC_MP_S_AHB4ENSETR_GPIOBEN BIT(1)
+#define RCC_MP_S_AHB4ENSETR_GPIOCEN BIT(2)
+#define RCC_MP_S_AHB4ENSETR_GPIODEN BIT(3)
+#define RCC_MP_S_AHB4ENSETR_GPIOEEN BIT(4)
+#define RCC_MP_S_AHB4ENSETR_GPIOFEN BIT(5)
+#define RCC_MP_S_AHB4ENSETR_GPIOGEN BIT(6)
+#define RCC_MP_S_AHB4ENSETR_GPIOHEN BIT(7)
+#define RCC_MP_S_AHB4ENSETR_GPIOIEN BIT(8)
+
+/* RCC_MP_S_AHB4ENCLRR register fields */
+#define RCC_MP_S_AHB4ENCLRR_GPIOAEN BIT(0)
+#define RCC_MP_S_AHB4ENCLRR_GPIOBEN BIT(1)
+#define RCC_MP_S_AHB4ENCLRR_GPIOCEN BIT(2)
+#define RCC_MP_S_AHB4ENCLRR_GPIODEN BIT(3)
+#define RCC_MP_S_AHB4ENCLRR_GPIOEEN BIT(4)
+#define RCC_MP_S_AHB4ENCLRR_GPIOFEN BIT(5)
+#define RCC_MP_S_AHB4ENCLRR_GPIOGEN BIT(6)
+#define RCC_MP_S_AHB4ENCLRR_GPIOHEN BIT(7)
+#define RCC_MP_S_AHB4ENCLRR_GPIOIEN BIT(8)
+
+/* RCC_MP_NS_AHB4ENSETR register fields */
+#define RCC_MP_NS_AHB4ENSETR_GPIOAEN BIT(0)
+#define RCC_MP_NS_AHB4ENSETR_GPIOBEN BIT(1)
+#define RCC_MP_NS_AHB4ENSETR_GPIOCEN BIT(2)
+#define RCC_MP_NS_AHB4ENSETR_GPIODEN BIT(3)
+#define RCC_MP_NS_AHB4ENSETR_GPIOEEN BIT(4)
+#define RCC_MP_NS_AHB4ENSETR_GPIOFEN BIT(5)
+#define RCC_MP_NS_AHB4ENSETR_GPIOGEN BIT(6)
+#define RCC_MP_NS_AHB4ENSETR_GPIOHEN BIT(7)
+#define RCC_MP_NS_AHB4ENSETR_GPIOIEN BIT(8)
+
+/* RCC_MP_NS_AHB4ENCLRR register fields */
+#define RCC_MP_NS_AHB4ENCLRR_GPIOAEN BIT(0)
+#define RCC_MP_NS_AHB4ENCLRR_GPIOBEN BIT(1)
+#define RCC_MP_NS_AHB4ENCLRR_GPIOCEN BIT(2)
+#define RCC_MP_NS_AHB4ENCLRR_GPIODEN BIT(3)
+#define RCC_MP_NS_AHB4ENCLRR_GPIOEEN BIT(4)
+#define RCC_MP_NS_AHB4ENCLRR_GPIOFEN BIT(5)
+#define RCC_MP_NS_AHB4ENCLRR_GPIOGEN BIT(6)
+#define RCC_MP_NS_AHB4ENCLRR_GPIOHEN BIT(7)
+#define RCC_MP_NS_AHB4ENCLRR_GPIOIEN BIT(8)
+
+/* RCC_MP_AHB5ENSETR register fields */
+#define RCC_MP_AHB5ENSETR_PKAEN BIT(2)
+#define RCC_MP_AHB5ENSETR_SAESEN BIT(3)
+#define RCC_MP_AHB5ENSETR_CRYP1EN BIT(4)
+#define RCC_MP_AHB5ENSETR_HASH1EN BIT(5)
+#define RCC_MP_AHB5ENSETR_RNG1EN BIT(6)
+#define RCC_MP_AHB5ENSETR_BKPSRAMEN BIT(8)
+#define RCC_MP_AHB5ENSETR_AXIMCEN BIT(16)
+
+/* RCC_MP_AHB5ENCLRR register fields */
+#define RCC_MP_AHB5ENCLRR_PKAEN BIT(2)
+#define RCC_MP_AHB5ENCLRR_SAESEN BIT(3)
+#define RCC_MP_AHB5ENCLRR_CRYP1EN BIT(4)
+#define RCC_MP_AHB5ENCLRR_HASH1EN BIT(5)
+#define RCC_MP_AHB5ENCLRR_RNG1EN BIT(6)
+#define RCC_MP_AHB5ENCLRR_BKPSRAMEN BIT(8)
+#define RCC_MP_AHB5ENCLRR_AXIMCEN BIT(16)
+
+/* RCC_MP_AHB6ENSETR register fields */
+#define RCC_MP_AHB6ENSETR_MCEEN BIT(1)
+#define RCC_MP_AHB6ENSETR_ETH1CKEN BIT(7)
+#define RCC_MP_AHB6ENSETR_ETH1TXEN BIT(8)
+#define RCC_MP_AHB6ENSETR_ETH1RXEN BIT(9)
+#define RCC_MP_AHB6ENSETR_ETH1MACEN BIT(10)
+#define RCC_MP_AHB6ENSETR_FMCEN BIT(12)
+#define RCC_MP_AHB6ENSETR_QSPIEN BIT(14)
+#define RCC_MP_AHB6ENSETR_SDMMC1EN BIT(16)
+#define RCC_MP_AHB6ENSETR_SDMMC2EN BIT(17)
+#define RCC_MP_AHB6ENSETR_CRC1EN BIT(20)
+#define RCC_MP_AHB6ENSETR_USBHEN BIT(24)
+#define RCC_MP_AHB6ENSETR_ETH2CKEN BIT(27)
+#define RCC_MP_AHB6ENSETR_ETH2TXEN BIT(28)
+#define RCC_MP_AHB6ENSETR_ETH2RXEN BIT(29)
+#define RCC_MP_AHB6ENSETR_ETH2MACEN BIT(30)
+
+/* RCC_MP_AHB6ENCLRR register fields */
+#define RCC_MP_AHB6ENCLRR_MCEEN BIT(1)
+#define RCC_MP_AHB6ENCLRR_ETH1CKEN BIT(7)
+#define RCC_MP_AHB6ENCLRR_ETH1TXEN BIT(8)
+#define RCC_MP_AHB6ENCLRR_ETH1RXEN BIT(9)
+#define RCC_MP_AHB6ENCLRR_ETH1MACEN BIT(10)
+#define RCC_MP_AHB6ENCLRR_FMCEN BIT(12)
+#define RCC_MP_AHB6ENCLRR_QSPIEN BIT(14)
+#define RCC_MP_AHB6ENCLRR_SDMMC1EN BIT(16)
+#define RCC_MP_AHB6ENCLRR_SDMMC2EN BIT(17)
+#define RCC_MP_AHB6ENCLRR_CRC1EN BIT(20)
+#define RCC_MP_AHB6ENCLRR_USBHEN BIT(24)
+#define RCC_MP_AHB6ENCLRR_ETH2CKEN BIT(27)
+#define RCC_MP_AHB6ENCLRR_ETH2TXEN BIT(28)
+#define RCC_MP_AHB6ENCLRR_ETH2RXEN BIT(29)
+#define RCC_MP_AHB6ENCLRR_ETH2MACEN BIT(30)
+
+/* RCC_MP_S_AHB6ENSETR register fields */
+#define RCC_MP_S_AHB6ENSETR_MDMAEN BIT(0)
+
+/* RCC_MP_S_AHB6ENCLRR register fields */
+#define RCC_MP_S_AHB6ENCLRR_MDMAEN BIT(0)
+
+/* RCC_MP_NS_AHB6ENSETR register fields */
+#define RCC_MP_NS_AHB6ENSETR_MDMAEN BIT(0)
+
+/* RCC_MP_NS_AHB6ENCLRR register fields */
+#define RCC_MP_NS_AHB6ENCLRR_MDMAEN BIT(0)
+
+/* RCC_MP_APB1LPENSETR register fields */
+#define RCC_MP_APB1LPENSETR_TIM2LPEN BIT(0)
+#define RCC_MP_APB1LPENSETR_TIM3LPEN BIT(1)
+#define RCC_MP_APB1LPENSETR_TIM4LPEN BIT(2)
+#define RCC_MP_APB1LPENSETR_TIM5LPEN BIT(3)
+#define RCC_MP_APB1LPENSETR_TIM6LPEN BIT(4)
+#define RCC_MP_APB1LPENSETR_TIM7LPEN BIT(5)
+#define RCC_MP_APB1LPENSETR_LPTIM1LPEN BIT(9)
+#define RCC_MP_APB1LPENSETR_SPI2LPEN BIT(11)
+#define RCC_MP_APB1LPENSETR_SPI3LPEN BIT(12)
+#define RCC_MP_APB1LPENSETR_USART3LPEN BIT(15)
+#define RCC_MP_APB1LPENSETR_UART4LPEN BIT(16)
+#define RCC_MP_APB1LPENSETR_UART5LPEN BIT(17)
+#define RCC_MP_APB1LPENSETR_UART7LPEN BIT(18)
+#define RCC_MP_APB1LPENSETR_UART8LPEN BIT(19)
+#define RCC_MP_APB1LPENSETR_I2C1LPEN BIT(21)
+#define RCC_MP_APB1LPENSETR_I2C2LPEN BIT(22)
+#define RCC_MP_APB1LPENSETR_SPDIFLPEN BIT(26)
+
+/* RCC_MP_APB1LPENCLRR register fields */
+#define RCC_MP_APB1LPENCLRR_TIM2LPEN BIT(0)
+#define RCC_MP_APB1LPENCLRR_TIM3LPEN BIT(1)
+#define RCC_MP_APB1LPENCLRR_TIM4LPEN BIT(2)
+#define RCC_MP_APB1LPENCLRR_TIM5LPEN BIT(3)
+#define RCC_MP_APB1LPENCLRR_TIM6LPEN BIT(4)
+#define RCC_MP_APB1LPENCLRR_TIM7LPEN BIT(5)
+#define RCC_MP_APB1LPENCLRR_LPTIM1LPEN BIT(9)
+#define RCC_MP_APB1LPENCLRR_SPI2LPEN BIT(11)
+#define RCC_MP_APB1LPENCLRR_SPI3LPEN BIT(12)
+#define RCC_MP_APB1LPENCLRR_USART3LPEN BIT(15)
+#define RCC_MP_APB1LPENCLRR_UART4LPEN BIT(16)
+#define RCC_MP_APB1LPENCLRR_UART5LPEN BIT(17)
+#define RCC_MP_APB1LPENCLRR_UART7LPEN BIT(18)
+#define RCC_MP_APB1LPENCLRR_UART8LPEN BIT(19)
+#define RCC_MP_APB1LPENCLRR_I2C1LPEN BIT(21)
+#define RCC_MP_APB1LPENCLRR_I2C2LPEN BIT(22)
+#define RCC_MP_APB1LPENCLRR_SPDIFLPEN BIT(26)
+
+/* RCC_MP_APB2LPENSETR register fields */
+#define RCC_MP_APB2LPENSETR_TIM1LPEN BIT(0)
+#define RCC_MP_APB2LPENSETR_TIM8LPEN BIT(1)
+#define RCC_MP_APB2LPENSETR_SPI1LPEN BIT(8)
+#define RCC_MP_APB2LPENSETR_USART6LPEN BIT(13)
+#define RCC_MP_APB2LPENSETR_SAI1LPEN BIT(16)
+#define RCC_MP_APB2LPENSETR_SAI2LPEN BIT(17)
+#define RCC_MP_APB2LPENSETR_DFSDMLPEN BIT(20)
+#define RCC_MP_APB2LPENSETR_ADFSDMLPEN BIT(21)
+#define RCC_MP_APB2LPENSETR_FDCANLPEN BIT(24)
+
+/* RCC_MP_APB2LPENCLRR register fields */
+#define RCC_MP_APB2LPENCLRR_TIM1LPEN BIT(0)
+#define RCC_MP_APB2LPENCLRR_TIM8LPEN BIT(1)
+#define RCC_MP_APB2LPENCLRR_SPI1LPEN BIT(8)
+#define RCC_MP_APB2LPENCLRR_USART6LPEN BIT(13)
+#define RCC_MP_APB2LPENCLRR_SAI1LPEN BIT(16)
+#define RCC_MP_APB2LPENCLRR_SAI2LPEN BIT(17)
+#define RCC_MP_APB2LPENCLRR_DFSDMLPEN BIT(20)
+#define RCC_MP_APB2LPENCLRR_ADFSDMLPEN BIT(21)
+#define RCC_MP_APB2LPENCLRR_FDCANLPEN BIT(24)
+
+/* RCC_MP_APB3LPENSETR register fields */
+#define RCC_MP_APB3LPENSETR_LPTIM2LPEN BIT(0)
+#define RCC_MP_APB3LPENSETR_LPTIM3LPEN BIT(1)
+#define RCC_MP_APB3LPENSETR_LPTIM4LPEN BIT(2)
+#define RCC_MP_APB3LPENSETR_LPTIM5LPEN BIT(3)
+#define RCC_MP_APB3LPENSETR_VREFLPEN BIT(13)
+#define RCC_MP_APB3LPENSETR_DTSLPEN BIT(16)
+#define RCC_MP_APB3LPENSETR_PMBCTRLLPEN BIT(17)
+
+/* RCC_MP_APB3LPENCLRR register fields */
+#define RCC_MP_APB3LPENCLRR_LPTIM2LPEN BIT(0)
+#define RCC_MP_APB3LPENCLRR_LPTIM3LPEN BIT(1)
+#define RCC_MP_APB3LPENCLRR_LPTIM4LPEN BIT(2)
+#define RCC_MP_APB3LPENCLRR_LPTIM5LPEN BIT(3)
+#define RCC_MP_APB3LPENCLRR_VREFLPEN BIT(13)
+#define RCC_MP_APB3LPENCLRR_DTSLPEN BIT(16)
+#define RCC_MP_APB3LPENCLRR_PMBCTRLLPEN BIT(17)
+
+/* RCC_MP_S_APB3LPENSETR register fields */
+#define RCC_MP_S_APB3LPENSETR_SYSCFGLPEN BIT(0)
+
+/* RCC_MP_S_APB3LPENCLRR register fields */
+#define RCC_MP_S_APB3LPENCLRR_SYSCFGLPEN BIT(0)
+
+/* RCC_MP_NS_APB3LPENSETR register fields */
+#define RCC_MP_NS_APB3LPENSETR_SYSCFGLPEN BIT(0)
+
+/* RCC_MP_NS_APB3LPENCLRR register fields */
+#define RCC_MP_NS_APB3LPENCLRR_SYSCFGLPEN BIT(0)
+
+/* RCC_MP_APB4LPENSETR register fields */
+#define RCC_MP_APB4LPENSETR_DCMIPPLPEN BIT(1)
+#define RCC_MP_APB4LPENSETR_DDRPERFMLPEN BIT(8)
+#define RCC_MP_APB4LPENSETR_IWDG2APBLPEN BIT(15)
+#define RCC_MP_APB4LPENSETR_USBPHYLPEN BIT(16)
+#define RCC_MP_APB4LPENSETR_STGENROLPEN BIT(20)
+#define RCC_MP_APB4LPENSETR_STGENROSTPEN BIT(21)
+
+/* RCC_MP_APB4LPENCLRR register fields */
+#define RCC_MP_APB4LPENCLRR_DCMIPPLPEN BIT(1)
+#define RCC_MP_APB4LPENCLRR_DDRPERFMLPEN BIT(8)
+#define RCC_MP_APB4LPENCLRR_IWDG2APBLPEN BIT(15)
+#define RCC_MP_APB4LPENCLRR_USBPHYLPEN BIT(16)
+#define RCC_MP_APB4LPENCLRR_STGENROLPEN BIT(20)
+#define RCC_MP_APB4LPENCLRR_STGENROSTPEN BIT(21)
+
+/* RCC_MP_S_APB4LPENSETR register fields */
+#define RCC_MP_S_APB4LPENSETR_LTDCLPEN BIT(0)
+
+/* RCC_MP_S_APB4LPENCLRR register fields */
+#define RCC_MP_S_APB4LPENCLRR_LTDCLPEN BIT(0)
+
+/* RCC_MP_NS_APB4LPENSETR register fields */
+#define RCC_MP_NS_APB4LPENSETR_LTDCLPEN BIT(0)
+
+/* RCC_MP_NS_APB4LPENCLRR register fields */
+#define RCC_MP_NS_APB4LPENCLRR_LTDCLPEN BIT(0)
+
+/* RCC_MP_APB5LPENSETR register fields */
+#define RCC_MP_APB5LPENSETR_RTCAPBLPEN BIT(8)
+#define RCC_MP_APB5LPENSETR_TZCLPEN BIT(11)
+#define RCC_MP_APB5LPENSETR_ETZPCLPEN BIT(13)
+#define RCC_MP_APB5LPENSETR_IWDG1APBLPEN BIT(15)
+#define RCC_MP_APB5LPENSETR_BSECLPEN BIT(16)
+#define RCC_MP_APB5LPENSETR_STGENCLPEN BIT(20)
+#define RCC_MP_APB5LPENSETR_STGENCSTPEN BIT(21)
+
+/* RCC_MP_APB5LPENCLRR register fields */
+#define RCC_MP_APB5LPENCLRR_RTCAPBLPEN BIT(8)
+#define RCC_MP_APB5LPENCLRR_TZCLPEN BIT(11)
+#define RCC_MP_APB5LPENCLRR_ETZPCLPEN BIT(13)
+#define RCC_MP_APB5LPENCLRR_IWDG1APBLPEN BIT(15)
+#define RCC_MP_APB5LPENCLRR_BSECLPEN BIT(16)
+#define RCC_MP_APB5LPENCLRR_STGENCLPEN BIT(20)
+#define RCC_MP_APB5LPENCLRR_STGENCSTPEN BIT(21)
+
+/* RCC_MP_APB6LPENSETR register fields */
+#define RCC_MP_APB6LPENSETR_USART1LPEN BIT(0)
+#define RCC_MP_APB6LPENSETR_USART2LPEN BIT(1)
+#define RCC_MP_APB6LPENSETR_SPI4LPEN BIT(2)
+#define RCC_MP_APB6LPENSETR_SPI5LPEN BIT(3)
+#define RCC_MP_APB6LPENSETR_I2C3LPEN BIT(4)
+#define RCC_MP_APB6LPENSETR_I2C4LPEN BIT(5)
+#define RCC_MP_APB6LPENSETR_I2C5LPEN BIT(6)
+#define RCC_MP_APB6LPENSETR_TIM12LPEN BIT(7)
+#define RCC_MP_APB6LPENSETR_TIM13LPEN BIT(8)
+#define RCC_MP_APB6LPENSETR_TIM14LPEN BIT(9)
+#define RCC_MP_APB6LPENSETR_TIM15LPEN BIT(10)
+#define RCC_MP_APB6LPENSETR_TIM16LPEN BIT(11)
+#define RCC_MP_APB6LPENSETR_TIM17LPEN BIT(12)
+
+/* RCC_MP_APB6LPENCLRR register fields */
+#define RCC_MP_APB6LPENCLRR_USART1LPEN BIT(0)
+#define RCC_MP_APB6LPENCLRR_USART2LPEN BIT(1)
+#define RCC_MP_APB6LPENCLRR_SPI4LPEN BIT(2)
+#define RCC_MP_APB6LPENCLRR_SPI5LPEN BIT(3)
+#define RCC_MP_APB6LPENCLRR_I2C3LPEN BIT(4)
+#define RCC_MP_APB6LPENCLRR_I2C4LPEN BIT(5)
+#define RCC_MP_APB6LPENCLRR_I2C5LPEN BIT(6)
+#define RCC_MP_APB6LPENCLRR_TIM12LPEN BIT(7)
+#define RCC_MP_APB6LPENCLRR_TIM13LPEN BIT(8)
+#define RCC_MP_APB6LPENCLRR_TIM14LPEN BIT(9)
+#define RCC_MP_APB6LPENCLRR_TIM15LPEN BIT(10)
+#define RCC_MP_APB6LPENCLRR_TIM16LPEN BIT(11)
+#define RCC_MP_APB6LPENCLRR_TIM17LPEN BIT(12)
+
+/* RCC_MP_AHB2LPENSETR register fields */
+#define RCC_MP_AHB2LPENSETR_DMA1LPEN BIT(0)
+#define RCC_MP_AHB2LPENSETR_DMA2LPEN BIT(1)
+#define RCC_MP_AHB2LPENSETR_DMAMUX1LPEN BIT(2)
+#define RCC_MP_AHB2LPENSETR_DMA3LPEN BIT(3)
+#define RCC_MP_AHB2LPENSETR_DMAMUX2LPEN BIT(4)
+#define RCC_MP_AHB2LPENSETR_ADC1LPEN BIT(5)
+#define RCC_MP_AHB2LPENSETR_ADC2LPEN BIT(6)
+#define RCC_MP_AHB2LPENSETR_USBOLPEN BIT(8)
+
+/* RCC_MP_AHB2LPENCLRR register fields */
+#define RCC_MP_AHB2LPENCLRR_DMA1LPEN BIT(0)
+#define RCC_MP_AHB2LPENCLRR_DMA2LPEN BIT(1)
+#define RCC_MP_AHB2LPENCLRR_DMAMUX1LPEN BIT(2)
+#define RCC_MP_AHB2LPENCLRR_DMA3LPEN BIT(3)
+#define RCC_MP_AHB2LPENCLRR_DMAMUX2LPEN BIT(4)
+#define RCC_MP_AHB2LPENCLRR_ADC1LPEN BIT(5)
+#define RCC_MP_AHB2LPENCLRR_ADC2LPEN BIT(6)
+#define RCC_MP_AHB2LPENCLRR_USBOLPEN BIT(8)
+
+/* RCC_MP_AHB4LPENSETR register fields */
+#define RCC_MP_AHB4LPENSETR_TSCLPEN BIT(15)
+
+/* RCC_MP_AHB4LPENCLRR register fields */
+#define RCC_MP_AHB4LPENCLRR_TSCLPEN BIT(15)
+
+/* RCC_MP_S_AHB4LPENSETR register fields */
+#define RCC_MP_S_AHB4LPENSETR_GPIOALPEN BIT(0)
+#define RCC_MP_S_AHB4LPENSETR_GPIOBLPEN BIT(1)
+#define RCC_MP_S_AHB4LPENSETR_GPIOCLPEN BIT(2)
+#define RCC_MP_S_AHB4LPENSETR_GPIODLPEN BIT(3)
+#define RCC_MP_S_AHB4LPENSETR_GPIOELPEN BIT(4)
+#define RCC_MP_S_AHB4LPENSETR_GPIOFLPEN BIT(5)
+#define RCC_MP_S_AHB4LPENSETR_GPIOGLPEN BIT(6)
+#define RCC_MP_S_AHB4LPENSETR_GPIOHLPEN BIT(7)
+#define RCC_MP_S_AHB4LPENSETR_GPIOILPEN BIT(8)
+
+/* RCC_MP_S_AHB4LPENCLRR register fields */
+#define RCC_MP_S_AHB4LPENCLRR_GPIOALPEN BIT(0)
+#define RCC_MP_S_AHB4LPENCLRR_GPIOBLPEN BIT(1)
+#define RCC_MP_S_AHB4LPENCLRR_GPIOCLPEN BIT(2)
+#define RCC_MP_S_AHB4LPENCLRR_GPIODLPEN BIT(3)
+#define RCC_MP_S_AHB4LPENCLRR_GPIOELPEN BIT(4)
+#define RCC_MP_S_AHB4LPENCLRR_GPIOFLPEN BIT(5)
+#define RCC_MP_S_AHB4LPENCLRR_GPIOGLPEN BIT(6)
+#define RCC_MP_S_AHB4LPENCLRR_GPIOHLPEN BIT(7)
+#define RCC_MP_S_AHB4LPENCLRR_GPIOILPEN BIT(8)
+
+/* RCC_MP_NS_AHB4LPENSETR register fields */
+#define RCC_MP_NS_AHB4LPENSETR_GPIOALPEN BIT(0)
+#define RCC_MP_NS_AHB4LPENSETR_GPIOBLPEN BIT(1)
+#define RCC_MP_NS_AHB4LPENSETR_GPIOCLPEN BIT(2)
+#define RCC_MP_NS_AHB4LPENSETR_GPIODLPEN BIT(3)
+#define RCC_MP_NS_AHB4LPENSETR_GPIOELPEN BIT(4)
+#define RCC_MP_NS_AHB4LPENSETR_GPIOFLPEN BIT(5)
+#define RCC_MP_NS_AHB4LPENSETR_GPIOGLPEN BIT(6)
+#define RCC_MP_NS_AHB4LPENSETR_GPIOHLPEN BIT(7)
+#define RCC_MP_NS_AHB4LPENSETR_GPIOILPEN BIT(8)
+
+/* RCC_MP_NS_AHB4LPENCLRR register fields */
+#define RCC_MP_NS_AHB4LPENCLRR_GPIOALPEN BIT(0)
+#define RCC_MP_NS_AHB4LPENCLRR_GPIOBLPEN BIT(1)
+#define RCC_MP_NS_AHB4LPENCLRR_GPIOCLPEN BIT(2)
+#define RCC_MP_NS_AHB4LPENCLRR_GPIODLPEN BIT(3)
+#define RCC_MP_NS_AHB4LPENCLRR_GPIOELPEN BIT(4)
+#define RCC_MP_NS_AHB4LPENCLRR_GPIOFLPEN BIT(5)
+#define RCC_MP_NS_AHB4LPENCLRR_GPIOGLPEN BIT(6)
+#define RCC_MP_NS_AHB4LPENCLRR_GPIOHLPEN BIT(7)
+#define RCC_MP_NS_AHB4LPENCLRR_GPIOILPEN BIT(8)
+
+/* RCC_MP_AHB5LPENSETR register fields */
+#define RCC_MP_AHB5LPENSETR_PKALPEN BIT(2)
+#define RCC_MP_AHB5LPENSETR_SAESLPEN BIT(3)
+#define RCC_MP_AHB5LPENSETR_CRYP1LPEN BIT(4)
+#define RCC_MP_AHB5LPENSETR_HASH1LPEN BIT(5)
+#define RCC_MP_AHB5LPENSETR_RNG1LPEN BIT(6)
+#define RCC_MP_AHB5LPENSETR_BKPSRAMLPEN BIT(8)
+
+/* RCC_MP_AHB5LPENCLRR register fields */
+#define RCC_MP_AHB5LPENCLRR_PKALPEN BIT(2)
+#define RCC_MP_AHB5LPENCLRR_SAESLPEN BIT(3)
+#define RCC_MP_AHB5LPENCLRR_CRYP1LPEN BIT(4)
+#define RCC_MP_AHB5LPENCLRR_HASH1LPEN BIT(5)
+#define RCC_MP_AHB5LPENCLRR_RNG1LPEN BIT(6)
+#define RCC_MP_AHB5LPENCLRR_BKPSRAMLPEN BIT(8)
+
+/* RCC_MP_AHB6LPENSETR register fields */
+#define RCC_MP_AHB6LPENSETR_MCELPEN BIT(1)
+#define RCC_MP_AHB6LPENSETR_ETH1CKLPEN BIT(7)
+#define RCC_MP_AHB6LPENSETR_ETH1TXLPEN BIT(8)
+#define RCC_MP_AHB6LPENSETR_ETH1RXLPEN BIT(9)
+#define RCC_MP_AHB6LPENSETR_ETH1MACLPEN BIT(10)
+#define RCC_MP_AHB6LPENSETR_ETH1STPEN BIT(11)
+#define RCC_MP_AHB6LPENSETR_FMCLPEN BIT(12)
+#define RCC_MP_AHB6LPENSETR_QSPILPEN BIT(14)
+#define RCC_MP_AHB6LPENSETR_SDMMC1LPEN BIT(16)
+#define RCC_MP_AHB6LPENSETR_SDMMC2LPEN BIT(17)
+#define RCC_MP_AHB6LPENSETR_CRC1LPEN BIT(20)
+#define RCC_MP_AHB6LPENSETR_USBHLPEN BIT(24)
+#define RCC_MP_AHB6LPENSETR_ETH2CKLPEN BIT(27)
+#define RCC_MP_AHB6LPENSETR_ETH2TXLPEN BIT(28)
+#define RCC_MP_AHB6LPENSETR_ETH2RXLPEN BIT(29)
+#define RCC_MP_AHB6LPENSETR_ETH2MACLPEN BIT(30)
+#define RCC_MP_AHB6LPENSETR_ETH2STPEN BIT(31)
+
+/* RCC_MP_AHB6LPENCLRR register fields */
+#define RCC_MP_AHB6LPENCLRR_MCELPEN BIT(1)
+#define RCC_MP_AHB6LPENCLRR_ETH1CKLPEN BIT(7)
+#define RCC_MP_AHB6LPENCLRR_ETH1TXLPEN BIT(8)
+#define RCC_MP_AHB6LPENCLRR_ETH1RXLPEN BIT(9)
+#define RCC_MP_AHB6LPENCLRR_ETH1MACLPEN BIT(10)
+#define RCC_MP_AHB6LPENCLRR_ETH1STPEN BIT(11)
+#define RCC_MP_AHB6LPENCLRR_FMCLPEN BIT(12)
+#define RCC_MP_AHB6LPENCLRR_QSPILPEN BIT(14)
+#define RCC_MP_AHB6LPENCLRR_SDMMC1LPEN BIT(16)
+#define RCC_MP_AHB6LPENCLRR_SDMMC2LPEN BIT(17)
+#define RCC_MP_AHB6LPENCLRR_CRC1LPEN BIT(20)
+#define RCC_MP_AHB6LPENCLRR_USBHLPEN BIT(24)
+#define RCC_MP_AHB6LPENCLRR_ETH2CKLPEN BIT(27)
+#define RCC_MP_AHB6LPENCLRR_ETH2TXLPEN BIT(28)
+#define RCC_MP_AHB6LPENCLRR_ETH2RXLPEN BIT(29)
+#define RCC_MP_AHB6LPENCLRR_ETH2MACLPEN BIT(30)
+#define RCC_MP_AHB6LPENCLRR_ETH2STPEN BIT(31)
+
+/* RCC_MP_S_AHB6LPENSETR register fields */
+#define RCC_MP_S_AHB6LPENSETR_MDMALPEN BIT(0)
+
+/* RCC_MP_S_AHB6LPENCLRR register fields */
+#define RCC_MP_S_AHB6LPENCLRR_MDMALPEN BIT(0)
+
+/* RCC_MP_NS_AHB6LPENSETR register fields */
+#define RCC_MP_NS_AHB6LPENSETR_MDMALPEN BIT(0)
+
+/* RCC_MP_NS_AHB6LPENCLRR register fields */
+#define RCC_MP_NS_AHB6LPENCLRR_MDMALPEN BIT(0)
+
+/* RCC_MP_S_AXIMLPENSETR register fields */
+#define RCC_MP_S_AXIMLPENSETR_SYSRAMLPEN BIT(0)
+
+/* RCC_MP_S_AXIMLPENCLRR register fields */
+#define RCC_MP_S_AXIMLPENCLRR_SYSRAMLPEN BIT(0)
+
+/* RCC_MP_NS_AXIMLPENSETR register fields */
+#define RCC_MP_NS_AXIMLPENSETR_SYSRAMLPEN BIT(0)
+
+/* RCC_MP_NS_AXIMLPENCLRR register fields */
+#define RCC_MP_NS_AXIMLPENCLRR_SYSRAMLPEN BIT(0)
+
+/* RCC_MP_MLAHBLPENSETR register fields */
+#define RCC_MP_MLAHBLPENSETR_SRAM1LPEN BIT(0)
+#define RCC_MP_MLAHBLPENSETR_SRAM2LPEN BIT(1)
+#define RCC_MP_MLAHBLPENSETR_SRAM3LPEN BIT(2)
+
+/* RCC_MP_MLAHBLPENCLRR register fields */
+#define RCC_MP_MLAHBLPENCLRR_SRAM1LPEN BIT(0)
+#define RCC_MP_MLAHBLPENCLRR_SRAM2LPEN BIT(1)
+#define RCC_MP_MLAHBLPENCLRR_SRAM3LPEN BIT(2)
+
+/* RCC_APB3SECSR register fields */
+#define RCC_APB3SECSR_LPTIM2SECF 0
+#define RCC_APB3SECSR_LPTIM3SECF 1
+#define RCC_APB3SECSR_VREFSECF 13
+
+/* RCC_APB4SECSR register fields */
+#define RCC_APB4SECSR_DCMIPPSECF 1
+#define RCC_APB4SECSR_USBPHYSECF 16
+
+/* RCC_APB5SECSR register fields */
+#define RCC_APB5SECSR_RTCSECF 8
+#define RCC_APB5SECSR_TZCSECF 11
+#define RCC_APB5SECSR_ETZPCSECF 13
+#define RCC_APB5SECSR_IWDG1SECF 15
+#define RCC_APB5SECSR_BSECSECF 16
+#define RCC_APB5SECSR_STGENCSECF_MASK GENMASK(21, 20)
+#define RCC_APB5SECSR_STGENCSECF 20
+#define RCC_APB5SECSR_STGENROSECF 21
+
+/* RCC_APB6SECSR register fields */
+#define RCC_APB6SECSR_USART1SECF 0
+#define RCC_APB6SECSR_USART2SECF 1
+#define RCC_APB6SECSR_SPI4SECF 2
+#define RCC_APB6SECSR_SPI5SECF 3
+#define RCC_APB6SECSR_I2C3SECF 4
+#define RCC_APB6SECSR_I2C4SECF 5
+#define RCC_APB6SECSR_I2C5SECF 6
+#define RCC_APB6SECSR_TIM12SECF 7
+#define RCC_APB6SECSR_TIM13SECF 8
+#define RCC_APB6SECSR_TIM14SECF 9
+#define RCC_APB6SECSR_TIM15SECF 10
+#define RCC_APB6SECSR_TIM16SECF 11
+#define RCC_APB6SECSR_TIM17SECF 12
+
+/* RCC_AHB2SECSR register fields */
+#define RCC_AHB2SECSR_DMA3SECF 3
+#define RCC_AHB2SECSR_DMAMUX2SECF 4
+#define RCC_AHB2SECSR_ADC1SECF 5
+#define RCC_AHB2SECSR_ADC2SECF 6
+#define RCC_AHB2SECSR_USBOSECF 8
+
+/* RCC_AHB4SECSR register fields */
+#define RCC_AHB4SECSR_TSCSECF 15
+
+/* RCC_AHB5SECSR register fields */
+#define RCC_AHB5SECSR_PKASECF 2
+#define RCC_AHB5SECSR_SAESSECF 3
+#define RCC_AHB5SECSR_CRYP1SECF 4
+#define RCC_AHB5SECSR_HASH1SECF 5
+#define RCC_AHB5SECSR_RNG1SECF 6
+#define RCC_AHB5SECSR_BKPSRAMSECF 8
+
+/* RCC_AHB6SECSR register fields */
+#define RCC_AHB6SECSR_MCESECF 1
+#define RCC_AHB6SECSR_FMCSECF 12
+#define RCC_AHB6SECSR_QSPISECF 14
+#define RCC_AHB6SECSR_SDMMC1SECF 16
+#define RCC_AHB6SECSR_SDMMC2SECF 17
+
+#define RCC_AHB6SECSR_ETH1SECF_MASK GENMASK(11, 7)
+#define RCC_AHB6SECSR_ETH2SECF_MASK GENMASK(31, 27)
+#define RCC_AHB6SECSR_ETH1SECF_SHIFT 7
+#define RCC_AHB6SECSR_ETH2SECF_SHIFT 27
+
+#define RCC_AHB6SECSR_ETH1CKSECF 7
+#define RCC_AHB6SECSR_ETH1TXSECF 8
+#define RCC_AHB6SECSR_ETH1RXSECF 9
+#define RCC_AHB6SECSR_ETH1MACSECF 10
+#define RCC_AHB6SECSR_ETH1STPSECF 11
+
+#define RCC_AHB6SECSR_ETH2CKSECF 27
+#define RCC_AHB6SECSR_ETH2TXSECF 28
+#define RCC_AHB6SECSR_ETH2RXSECF 29
+#define RCC_AHB6SECSR_ETH2MACSECF 30
+#define RCC_AHB6SECSR_ETH2STPSECF 31
+
+/* RCC_VERR register fields */
+#define RCC_VERR_MINREV_MASK GENMASK(3, 0)
+#define RCC_VERR_MAJREV_MASK GENMASK(7, 4)
+#define RCC_VERR_MINREV_SHIFT 0
+#define RCC_VERR_MAJREV_SHIFT 4
+
+/* RCC_IDR register fields */
+#define RCC_IDR_ID_MASK GENMASK(31, 0)
+#define RCC_IDR_ID_SHIFT 0
+
+/* RCC_SIDR register fields */
+#define RCC_SIDR_SID_MASK GENMASK(31, 0)
+#define RCC_SIDR_SID_SHIFT 0
+
+#endif /* STM32MP13_RCC_H */
+
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c
index 712e103382d8..29a8c710ae06 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c
@@ -98,6 +98,8 @@ static SUNXI_CCU_GATE(r_apb1_ir_clk, "r-apb1-ir", "r-apb1",
0x1cc, BIT(0), 0);
static SUNXI_CCU_GATE(r_apb1_w1_clk, "r-apb1-w1", "r-apb1",
0x1ec, BIT(0), 0);
+static SUNXI_CCU_GATE(r_apb1_rtc_clk, "r-apb1-rtc", "r-apb1",
+ 0x20c, BIT(0), CLK_IGNORE_UNUSED);
/* Information of IR(RX) mod clock is gathered from BSP source code */
static const char * const r_mod0_default_parents[] = { "osc32k", "osc24M" };
@@ -147,6 +149,7 @@ static struct ccu_common *sun50i_h616_r_ccu_clks[] = {
&r_apb2_i2c_clk.common,
&r_apb2_rsb_clk.common,
&r_apb1_ir_clk.common,
+ &r_apb1_rtc_clk.common,
&ir_clk.common,
};
@@ -164,6 +167,7 @@ static struct clk_hw_onecell_data sun50i_h6_r_hw_clks = {
[CLK_R_APB2_RSB] = &r_apb2_rsb_clk.common.hw,
[CLK_R_APB1_IR] = &r_apb1_ir_clk.common.hw,
[CLK_R_APB1_W1] = &r_apb1_w1_clk.common.hw,
+ [CLK_R_APB1_RTC] = &r_apb1_rtc_clk.common.hw,
[CLK_IR] = &ir_clk.common.hw,
[CLK_W1] = &w1_clk.common.hw,
},
@@ -179,6 +183,7 @@ static struct clk_hw_onecell_data sun50i_h616_r_hw_clks = {
[CLK_R_APB2_I2C] = &r_apb2_i2c_clk.common.hw,
[CLK_R_APB2_RSB] = &r_apb2_rsb_clk.common.hw,
[CLK_R_APB1_IR] = &r_apb1_ir_clk.common.hw,
+ [CLK_R_APB1_RTC] = &r_apb1_rtc_clk.common.hw,
[CLK_IR] = &ir_clk.common.hw,
},
.num = CLK_NUMBER,
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.h b/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.h
index 7e290b840803..10e9b66afc6a 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.h
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.h
@@ -14,6 +14,6 @@
#define CLK_R_APB2 3
-#define CLK_NUMBER (CLK_R_APB2_RSB + 1)
+#define CLK_NUMBER (CLK_R_APB1_RTC + 1)
#endif /* _CCU_SUN50I_H6_R_H */
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h616.c b/drivers/clk/sunxi-ng/ccu-sun50i-h616.c
index 49a2474cf314..21e918582aa5 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-h616.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h616.c
@@ -704,6 +704,13 @@ static CLK_FIXED_FACTOR_HWS(pll_periph0_2x_clk, "pll-periph0-2x",
pll_periph0_parents,
1, 2, 0);
+static const struct clk_hw *pll_periph0_2x_hws[] = {
+ &pll_periph0_2x_clk.hw
+};
+
+static CLK_FIXED_FACTOR_HWS(pll_system_32k_clk, "pll-system-32k",
+ pll_periph0_2x_hws, 36621, 1, 0);
+
static const struct clk_hw *pll_periph1_parents[] = {
&pll_periph1_clk.common.hw
};
@@ -852,6 +859,7 @@ static struct clk_hw_onecell_data sun50i_h616_hw_clks = {
[CLK_PLL_DDR1] = &pll_ddr1_clk.common.hw,
[CLK_PLL_PERIPH0] = &pll_periph0_clk.common.hw,
[CLK_PLL_PERIPH0_2X] = &pll_periph0_2x_clk.hw,
+ [CLK_PLL_SYSTEM_32K] = &pll_system_32k_clk.hw,
[CLK_PLL_PERIPH1] = &pll_periph1_clk.common.hw,
[CLK_PLL_PERIPH1_2X] = &pll_periph1_2x_clk.hw,
[CLK_PLL_GPU] = &pll_gpu_clk.common.hw,
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h616.h b/drivers/clk/sunxi-ng/ccu-sun50i-h616.h
index dd671b413f22..fdd2f4d5103f 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-h616.h
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h616.h
@@ -51,6 +51,6 @@
#define CLK_BUS_DRAM 56
-#define CLK_NUMBER (CLK_BUS_HDCP + 1)
+#define CLK_NUMBER (CLK_PLL_SYSTEM_32K + 1)
#endif /* _CCU_SUN50I_H616_H_ */
diff --git a/drivers/clk/tegra/clk-bpmp.c b/drivers/clk/tegra/clk-bpmp.c
index 6ecf18f71c32..3748a39dae7c 100644
--- a/drivers/clk/tegra/clk-bpmp.c
+++ b/drivers/clk/tegra/clk-bpmp.c
@@ -164,15 +164,18 @@ static unsigned long tegra_bpmp_clk_recalc_rate(struct clk_hw *hw,
return response.rate;
}
-static long tegra_bpmp_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int tegra_bpmp_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *rate_req)
{
struct tegra_bpmp_clk *clk = to_tegra_bpmp_clk(hw);
struct cmd_clk_round_rate_response response;
struct cmd_clk_round_rate_request request;
struct tegra_bpmp_clk_message msg;
+ unsigned long rate;
int err;
+ rate = min(max(rate_req->rate, rate_req->min_rate), rate_req->max_rate);
+
memset(&request, 0, sizeof(request));
request.rate = min_t(u64, rate, S64_MAX);
@@ -188,7 +191,9 @@ static long tegra_bpmp_clk_round_rate(struct clk_hw *hw, unsigned long rate,
if (err < 0)
return err;
- return response.rate;
+ rate_req->rate = (unsigned long)response.rate;
+
+ return 0;
}
static int tegra_bpmp_clk_set_parent(struct clk_hw *hw, u8 index)
@@ -290,7 +295,7 @@ static const struct clk_ops tegra_bpmp_clk_rate_ops = {
.unprepare = tegra_bpmp_clk_unprepare,
.is_prepared = tegra_bpmp_clk_is_prepared,
.recalc_rate = tegra_bpmp_clk_recalc_rate,
- .round_rate = tegra_bpmp_clk_round_rate,
+ .determine_rate = tegra_bpmp_clk_determine_rate,
.set_rate = tegra_bpmp_clk_set_rate,
};
@@ -299,7 +304,7 @@ static const struct clk_ops tegra_bpmp_clk_mux_rate_ops = {
.unprepare = tegra_bpmp_clk_unprepare,
.is_prepared = tegra_bpmp_clk_is_prepared,
.recalc_rate = tegra_bpmp_clk_recalc_rate,
- .round_rate = tegra_bpmp_clk_round_rate,
+ .determine_rate = tegra_bpmp_clk_determine_rate,
.set_parent = tegra_bpmp_clk_set_parent,
.get_parent = tegra_bpmp_clk_get_parent,
.set_rate = tegra_bpmp_clk_set_rate,
@@ -448,15 +453,29 @@ static int tegra_bpmp_probe_clocks(struct tegra_bpmp *bpmp,
return count;
}
+static unsigned int
+tegra_bpmp_clk_id_to_index(const struct tegra_bpmp_clk_info *clocks,
+ unsigned int num_clocks, unsigned int id)
+{
+ unsigned int i;
+
+ for (i = 0; i < num_clocks; i++)
+ if (clocks[i].id == id)
+ return i;
+
+ return UINT_MAX;
+}
+
static const struct tegra_bpmp_clk_info *
tegra_bpmp_clk_find(const struct tegra_bpmp_clk_info *clocks,
unsigned int num_clocks, unsigned int id)
{
unsigned int i;
- for (i = 0; i < num_clocks; i++)
- if (clocks[i].id == id)
- return &clocks[i];
+ i = tegra_bpmp_clk_id_to_index(clocks, num_clocks, id);
+
+ if (i < num_clocks)
+ return &clocks[i];
return NULL;
}
@@ -539,31 +558,57 @@ tegra_bpmp_clk_register(struct tegra_bpmp *bpmp,
return clk;
}
+static void tegra_bpmp_register_clocks_one(struct tegra_bpmp *bpmp,
+ struct tegra_bpmp_clk_info *infos,
+ unsigned int i,
+ unsigned int count)
+{
+ unsigned int j;
+ struct tegra_bpmp_clk_info *info;
+ struct tegra_bpmp_clk *clk;
+
+ if (bpmp->clocks[i]) {
+ /* already registered */
+ return;
+ }
+
+ info = &infos[i];
+ for (j = 0; j < info->num_parents; ++j) {
+ unsigned int p_id = info->parents[j];
+ unsigned int p_i = tegra_bpmp_clk_id_to_index(infos, count,
+ p_id);
+ if (p_i < count)
+ tegra_bpmp_register_clocks_one(bpmp, infos, p_i, count);
+ }
+
+ clk = tegra_bpmp_clk_register(bpmp, info, infos, count);
+ if (IS_ERR(clk)) {
+ dev_err(bpmp->dev,
+ "failed to register clock %u (%s): %ld\n",
+ info->id, info->name, PTR_ERR(clk));
+ /* intentionally store the error pointer to
+ * bpmp->clocks[i] to avoid re-attempting the
+ * registration later
+ */
+ }
+
+ bpmp->clocks[i] = clk;
+}
+
static int tegra_bpmp_register_clocks(struct tegra_bpmp *bpmp,
struct tegra_bpmp_clk_info *infos,
unsigned int count)
{
- struct tegra_bpmp_clk *clk;
unsigned int i;
bpmp->num_clocks = count;
- bpmp->clocks = devm_kcalloc(bpmp->dev, count, sizeof(clk), GFP_KERNEL);
+ bpmp->clocks = devm_kcalloc(bpmp->dev, count, sizeof(struct tegra_bpmp_clk), GFP_KERNEL);
if (!bpmp->clocks)
return -ENOMEM;
for (i = 0; i < count; i++) {
- struct tegra_bpmp_clk_info *info = &infos[i];
-
- clk = tegra_bpmp_clk_register(bpmp, info, infos, count);
- if (IS_ERR(clk)) {
- dev_err(bpmp->dev,
- "failed to register clock %u (%s): %ld\n",
- info->id, info->name, PTR_ERR(clk));
- continue;
- }
-
- bpmp->clocks[i] = clk;
+ tegra_bpmp_register_clocks_one(bpmp, infos, i, count);
}
return 0;
diff --git a/drivers/clk/tegra/clk-dfll.c b/drivers/clk/tegra/clk-dfll.c
index 6144447f86c6..41433927b55c 100644
--- a/drivers/clk/tegra/clk-dfll.c
+++ b/drivers/clk/tegra/clk-dfll.c
@@ -271,6 +271,7 @@ struct tegra_dfll {
struct clk *ref_clk;
struct clk *i2c_clk;
struct clk *dfll_clk;
+ struct reset_control *dfll_rst;
struct reset_control *dvco_rst;
unsigned long ref_rate;
unsigned long i2c_clk_rate;
@@ -666,7 +667,7 @@ static int dfll_force_output(struct tegra_dfll *td, unsigned int out_sel)
}
/**
- * dfll_load_lut - load the voltage lookup table
+ * dfll_load_i2c_lut - load the voltage lookup table
* @td: struct tegra_dfll *
*
* Load the voltage-to-PMIC register value lookup table into the DFLL
@@ -897,7 +898,7 @@ static void dfll_set_frequency_request(struct tegra_dfll *td,
}
/**
- * tegra_dfll_request_rate - set the next rate for the DFLL to tune to
+ * dfll_request_rate - set the next rate for the DFLL to tune to
* @td: DFLL instance
* @rate: clock rate to target
*
@@ -1005,7 +1006,7 @@ static void dfll_set_open_loop_config(struct tegra_dfll *td)
}
/**
- * tegra_dfll_lock - switch from open-loop to closed-loop mode
+ * dfll_lock - switch from open-loop to closed-loop mode
* @td: DFLL instance
*
* Switch from OPEN_LOOP state to CLOSED_LOOP state. Returns 0 upon success,
@@ -1046,7 +1047,7 @@ static int dfll_lock(struct tegra_dfll *td)
}
/**
- * tegra_dfll_unlock - switch from closed-loop to open-loop mode
+ * dfll_unlock - switch from closed-loop to open-loop mode
* @td: DFLL instance
*
* Switch from CLOSED_LOOP state to OPEN_LOOP state. Returns 0 upon success,
@@ -1464,6 +1465,7 @@ static int dfll_init(struct tegra_dfll *td)
return -EINVAL;
}
+ reset_control_deassert(td->dfll_rst);
reset_control_deassert(td->dvco_rst);
ret = clk_prepare(td->ref_clk);
@@ -1509,6 +1511,7 @@ di_err1:
clk_unprepare(td->ref_clk);
reset_control_assert(td->dvco_rst);
+ reset_control_assert(td->dfll_rst);
return ret;
}
@@ -1530,6 +1533,7 @@ int tegra_dfll_suspend(struct device *dev)
}
reset_control_assert(td->dvco_rst);
+ reset_control_assert(td->dfll_rst);
return 0;
}
@@ -1548,6 +1552,7 @@ int tegra_dfll_resume(struct device *dev)
{
struct tegra_dfll *td = dev_get_drvdata(dev);
+ reset_control_deassert(td->dfll_rst);
reset_control_deassert(td->dvco_rst);
pm_runtime_get_sync(td->dev);
@@ -1951,6 +1956,12 @@ int tegra_dfll_register(struct platform_device *pdev,
td->soc = soc;
+ td->dfll_rst = devm_reset_control_get_optional(td->dev, "dfll");
+ if (IS_ERR(td->dfll_rst)) {
+ dev_err(td->dev, "couldn't get dfll reset\n");
+ return PTR_ERR(td->dfll_rst);
+ }
+
td->dvco_rst = devm_reset_control_get(td->dev, "dvco");
if (IS_ERR(td->dvco_rst)) {
dev_err(td->dev, "couldn't get dvco reset\n");
@@ -2087,6 +2098,7 @@ struct tegra_dfll_soc_data *tegra_dfll_unregister(struct platform_device *pdev)
clk_unprepare(td->i2c_clk);
reset_control_assert(td->dvco_rst);
+ reset_control_assert(td->dfll_rst);
return td->soc;
}
diff --git a/drivers/clk/ti/clkctrl.c b/drivers/clk/ti/clkctrl.c
index 064066e9e85b..617360e20d86 100644
--- a/drivers/clk/ti/clkctrl.c
+++ b/drivers/clk/ti/clkctrl.c
@@ -232,8 +232,7 @@ static struct clk_hw *_ti_omap4_clkctrl_xlate(struct of_phandle_args *clkspec,
void *data)
{
struct omap_clkctrl_provider *provider = data;
- struct omap_clkctrl_clk *entry;
- bool found = false;
+ struct omap_clkctrl_clk *entry = NULL, *iter;
if (clkspec->args_count != 2)
return ERR_PTR(-EINVAL);
@@ -241,15 +240,15 @@ static struct clk_hw *_ti_omap4_clkctrl_xlate(struct of_phandle_args *clkspec,
pr_debug("%s: looking for %x:%x\n", __func__,
clkspec->args[0], clkspec->args[1]);
- list_for_each_entry(entry, &provider->clocks, node) {
- if (entry->reg_offset == clkspec->args[0] &&
- entry->bit_offset == clkspec->args[1]) {
- found = true;
+ list_for_each_entry(iter, &provider->clocks, node) {
+ if (iter->reg_offset == clkspec->args[0] &&
+ iter->bit_offset == clkspec->args[1]) {
+ entry = iter;
break;
}
}
- if (!found)
+ if (!entry)
return ERR_PTR(-EINVAL);
return entry->clk;
diff --git a/drivers/clk/ti/composite.c b/drivers/clk/ti/composite.c
index 8d60319be368..779b9900f636 100644
--- a/drivers/clk/ti/composite.c
+++ b/drivers/clk/ti/composite.c
@@ -255,7 +255,7 @@ int __init ti_clk_add_component(struct device_node *node, struct clk_hw *hw,
return -EINVAL;
}
- parent_names = kzalloc((sizeof(char *) * num_parents), GFP_KERNEL);
+ parent_names = kcalloc(num_parents, sizeof(char *), GFP_KERNEL);
if (!parent_names)
return -ENOMEM;
diff --git a/drivers/clk/ux500/clk-prcmu.c b/drivers/clk/ux500/clk-prcmu.c
index 937b6bb82b30..4deb37f19a7c 100644
--- a/drivers/clk/ux500/clk-prcmu.c
+++ b/drivers/clk/ux500/clk-prcmu.c
@@ -14,27 +14,28 @@
#include "clk.h"
#define to_clk_prcmu(_hw) container_of(_hw, struct clk_prcmu, hw)
+#define to_clk_prcmu_clkout(_hw) container_of(_hw, struct clk_prcmu_clkout, hw)
struct clk_prcmu {
struct clk_hw hw;
u8 cg_sel;
- int is_prepared;
- int is_enabled;
int opp_requested;
};
+struct clk_prcmu_clkout {
+ struct clk_hw hw;
+ u8 clkout_id;
+ u8 source;
+ u8 divider;
+};
+
/* PRCMU clock operations. */
static int clk_prcmu_prepare(struct clk_hw *hw)
{
- int ret;
struct clk_prcmu *clk = to_clk_prcmu(hw);
- ret = prcmu_request_clock(clk->cg_sel, true);
- if (!ret)
- clk->is_prepared = 1;
-
- return ret;
+ return prcmu_request_clock(clk->cg_sel, true);
}
static void clk_prcmu_unprepare(struct clk_hw *hw)
@@ -42,34 +43,7 @@ static void clk_prcmu_unprepare(struct clk_hw *hw)
struct clk_prcmu *clk = to_clk_prcmu(hw);
if (prcmu_request_clock(clk->cg_sel, false))
pr_err("clk_prcmu: %s failed to disable %s.\n", __func__,
- clk_hw_get_name(hw));
- else
- clk->is_prepared = 0;
-}
-
-static int clk_prcmu_is_prepared(struct clk_hw *hw)
-{
- struct clk_prcmu *clk = to_clk_prcmu(hw);
- return clk->is_prepared;
-}
-
-static int clk_prcmu_enable(struct clk_hw *hw)
-{
- struct clk_prcmu *clk = to_clk_prcmu(hw);
- clk->is_enabled = 1;
- return 0;
-}
-
-static void clk_prcmu_disable(struct clk_hw *hw)
-{
- struct clk_prcmu *clk = to_clk_prcmu(hw);
- clk->is_enabled = 0;
-}
-
-static int clk_prcmu_is_enabled(struct clk_hw *hw)
-{
- struct clk_prcmu *clk = to_clk_prcmu(hw);
- return clk->is_enabled;
+ clk_hw_get_name(hw));
}
static unsigned long clk_prcmu_recalc_rate(struct clk_hw *hw,
@@ -118,7 +92,6 @@ static int clk_prcmu_opp_prepare(struct clk_hw *hw)
return err;
}
- clk->is_prepared = 1;
return 0;
}
@@ -137,8 +110,6 @@ static void clk_prcmu_opp_unprepare(struct clk_hw *hw)
(char *)clk_hw_get_name(hw));
clk->opp_requested = 0;
}
-
- clk->is_prepared = 0;
}
static int clk_prcmu_opp_volt_prepare(struct clk_hw *hw)
@@ -163,7 +134,6 @@ static int clk_prcmu_opp_volt_prepare(struct clk_hw *hw)
return err;
}
- clk->is_prepared = 1;
return 0;
}
@@ -181,17 +151,11 @@ static void clk_prcmu_opp_volt_unprepare(struct clk_hw *hw)
prcmu_request_ape_opp_100_voltage(false);
clk->opp_requested = 0;
}
-
- clk->is_prepared = 0;
}
static const struct clk_ops clk_prcmu_scalable_ops = {
.prepare = clk_prcmu_prepare,
.unprepare = clk_prcmu_unprepare,
- .is_prepared = clk_prcmu_is_prepared,
- .enable = clk_prcmu_enable,
- .disable = clk_prcmu_disable,
- .is_enabled = clk_prcmu_is_enabled,
.recalc_rate = clk_prcmu_recalc_rate,
.round_rate = clk_prcmu_round_rate,
.set_rate = clk_prcmu_set_rate,
@@ -200,57 +164,43 @@ static const struct clk_ops clk_prcmu_scalable_ops = {
static const struct clk_ops clk_prcmu_gate_ops = {
.prepare = clk_prcmu_prepare,
.unprepare = clk_prcmu_unprepare,
- .is_prepared = clk_prcmu_is_prepared,
- .enable = clk_prcmu_enable,
- .disable = clk_prcmu_disable,
- .is_enabled = clk_prcmu_is_enabled,
.recalc_rate = clk_prcmu_recalc_rate,
};
static const struct clk_ops clk_prcmu_scalable_rate_ops = {
- .is_enabled = clk_prcmu_is_enabled,
.recalc_rate = clk_prcmu_recalc_rate,
.round_rate = clk_prcmu_round_rate,
.set_rate = clk_prcmu_set_rate,
};
static const struct clk_ops clk_prcmu_rate_ops = {
- .is_enabled = clk_prcmu_is_enabled,
.recalc_rate = clk_prcmu_recalc_rate,
};
static const struct clk_ops clk_prcmu_opp_gate_ops = {
.prepare = clk_prcmu_opp_prepare,
.unprepare = clk_prcmu_opp_unprepare,
- .is_prepared = clk_prcmu_is_prepared,
- .enable = clk_prcmu_enable,
- .disable = clk_prcmu_disable,
- .is_enabled = clk_prcmu_is_enabled,
.recalc_rate = clk_prcmu_recalc_rate,
};
static const struct clk_ops clk_prcmu_opp_volt_scalable_ops = {
.prepare = clk_prcmu_opp_volt_prepare,
.unprepare = clk_prcmu_opp_volt_unprepare,
- .is_prepared = clk_prcmu_is_prepared,
- .enable = clk_prcmu_enable,
- .disable = clk_prcmu_disable,
- .is_enabled = clk_prcmu_is_enabled,
.recalc_rate = clk_prcmu_recalc_rate,
.round_rate = clk_prcmu_round_rate,
.set_rate = clk_prcmu_set_rate,
};
-static struct clk *clk_reg_prcmu(const char *name,
- const char *parent_name,
- u8 cg_sel,
- unsigned long rate,
- unsigned long flags,
- const struct clk_ops *clk_prcmu_ops)
+static struct clk_hw *clk_reg_prcmu(const char *name,
+ const char *parent_name,
+ u8 cg_sel,
+ unsigned long rate,
+ unsigned long flags,
+ const struct clk_ops *clk_prcmu_ops)
{
struct clk_prcmu *clk;
struct clk_init_data clk_prcmu_init;
- struct clk *clk_reg;
+ int ret;
if (!name) {
pr_err("clk_prcmu: %s invalid arguments passed\n", __func__);
@@ -262,8 +212,6 @@ static struct clk *clk_reg_prcmu(const char *name,
return ERR_PTR(-ENOMEM);
clk->cg_sel = cg_sel;
- clk->is_prepared = 1;
- clk->is_enabled = 1;
clk->opp_requested = 0;
/* "rate" can be used for changing the initial frequency */
if (rate)
@@ -276,11 +224,11 @@ static struct clk *clk_reg_prcmu(const char *name,
clk_prcmu_init.num_parents = (parent_name ? 1 : 0);
clk->hw.init = &clk_prcmu_init;
- clk_reg = clk_register(NULL, &clk->hw);
- if (IS_ERR_OR_NULL(clk_reg))
+ ret = clk_hw_register(NULL, &clk->hw);
+ if (ret)
goto free_clk;
- return clk_reg;
+ return &clk->hw;
free_clk:
kfree(clk);
@@ -288,59 +236,165 @@ free_clk:
return ERR_PTR(-ENOMEM);
}
-struct clk *clk_reg_prcmu_scalable(const char *name,
- const char *parent_name,
- u8 cg_sel,
- unsigned long rate,
- unsigned long flags)
+struct clk_hw *clk_reg_prcmu_scalable(const char *name,
+ const char *parent_name,
+ u8 cg_sel,
+ unsigned long rate,
+ unsigned long flags)
{
return clk_reg_prcmu(name, parent_name, cg_sel, rate, flags,
&clk_prcmu_scalable_ops);
}
-struct clk *clk_reg_prcmu_gate(const char *name,
- const char *parent_name,
- u8 cg_sel,
- unsigned long flags)
+struct clk_hw *clk_reg_prcmu_gate(const char *name,
+ const char *parent_name,
+ u8 cg_sel,
+ unsigned long flags)
{
return clk_reg_prcmu(name, parent_name, cg_sel, 0, flags,
&clk_prcmu_gate_ops);
}
-struct clk *clk_reg_prcmu_scalable_rate(const char *name,
- const char *parent_name,
- u8 cg_sel,
- unsigned long rate,
- unsigned long flags)
+struct clk_hw *clk_reg_prcmu_scalable_rate(const char *name,
+ const char *parent_name,
+ u8 cg_sel,
+ unsigned long rate,
+ unsigned long flags)
{
return clk_reg_prcmu(name, parent_name, cg_sel, rate, flags,
&clk_prcmu_scalable_rate_ops);
}
-struct clk *clk_reg_prcmu_rate(const char *name,
- const char *parent_name,
- u8 cg_sel,
- unsigned long flags)
+struct clk_hw *clk_reg_prcmu_rate(const char *name,
+ const char *parent_name,
+ u8 cg_sel,
+ unsigned long flags)
{
return clk_reg_prcmu(name, parent_name, cg_sel, 0, flags,
&clk_prcmu_rate_ops);
}
-struct clk *clk_reg_prcmu_opp_gate(const char *name,
- const char *parent_name,
- u8 cg_sel,
- unsigned long flags)
+struct clk_hw *clk_reg_prcmu_opp_gate(const char *name,
+ const char *parent_name,
+ u8 cg_sel,
+ unsigned long flags)
{
return clk_reg_prcmu(name, parent_name, cg_sel, 0, flags,
&clk_prcmu_opp_gate_ops);
}
-struct clk *clk_reg_prcmu_opp_volt_scalable(const char *name,
- const char *parent_name,
- u8 cg_sel,
- unsigned long rate,
- unsigned long flags)
+struct clk_hw *clk_reg_prcmu_opp_volt_scalable(const char *name,
+ const char *parent_name,
+ u8 cg_sel,
+ unsigned long rate,
+ unsigned long flags)
{
return clk_reg_prcmu(name, parent_name, cg_sel, rate, flags,
&clk_prcmu_opp_volt_scalable_ops);
}
+
+/* The clkout (external) clock is special and need special ops */
+
+static int clk_prcmu_clkout_prepare(struct clk_hw *hw)
+{
+ struct clk_prcmu_clkout *clk = to_clk_prcmu_clkout(hw);
+
+ return prcmu_config_clkout(clk->clkout_id, clk->source, clk->divider);
+}
+
+static void clk_prcmu_clkout_unprepare(struct clk_hw *hw)
+{
+ struct clk_prcmu_clkout *clk = to_clk_prcmu_clkout(hw);
+ int ret;
+
+ /* The clkout clock is disabled by dividing by 0 */
+ ret = prcmu_config_clkout(clk->clkout_id, clk->source, 0);
+ if (ret)
+ pr_err("clk_prcmu: %s failed to disable %s\n", __func__,
+ clk_hw_get_name(hw));
+}
+
+static unsigned long clk_prcmu_clkout_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_prcmu_clkout *clk = to_clk_prcmu_clkout(hw);
+
+ return (parent_rate / clk->divider);
+}
+
+static u8 clk_prcmu_clkout_get_parent(struct clk_hw *hw)
+{
+ struct clk_prcmu_clkout *clk = to_clk_prcmu_clkout(hw);
+
+ return clk->source;
+}
+
+static int clk_prcmu_clkout_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clk_prcmu_clkout *clk = to_clk_prcmu_clkout(hw);
+
+ clk->source = index;
+ /* Make sure the change reaches the hardware immediately */
+ if (clk_hw_is_prepared(hw))
+ return clk_prcmu_clkout_prepare(hw);
+ return 0;
+}
+
+static const struct clk_ops clk_prcmu_clkout_ops = {
+ .prepare = clk_prcmu_clkout_prepare,
+ .unprepare = clk_prcmu_clkout_unprepare,
+ .recalc_rate = clk_prcmu_clkout_recalc_rate,
+ .get_parent = clk_prcmu_clkout_get_parent,
+ .set_parent = clk_prcmu_clkout_set_parent,
+};
+
+struct clk_hw *clk_reg_prcmu_clkout(const char *name,
+ const char * const *parent_names,
+ int num_parents,
+ u8 source, u8 divider)
+
+{
+ struct clk_prcmu_clkout *clk;
+ struct clk_init_data clk_prcmu_clkout_init;
+ u8 clkout_id;
+ int ret;
+
+ if (!name) {
+ pr_err("clk_prcmu_clkout: %s invalid arguments passed\n", __func__);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!strcmp(name, "clkout1"))
+ clkout_id = 0;
+ else if (!strcmp(name, "clkout2"))
+ clkout_id = 1;
+ else {
+ pr_err("clk_prcmu_clkout: %s bad clock name\n", __func__);
+ return ERR_PTR(-EINVAL);
+ }
+
+ clk = kzalloc(sizeof(*clk), GFP_KERNEL);
+ if (!clk)
+ return ERR_PTR(-ENOMEM);
+
+ clk->clkout_id = clkout_id;
+ clk->source = source;
+ clk->divider = divider;
+
+ clk_prcmu_clkout_init.name = name;
+ clk_prcmu_clkout_init.ops = &clk_prcmu_clkout_ops;
+ clk_prcmu_clkout_init.flags = CLK_GET_RATE_NOCACHE;
+ clk_prcmu_clkout_init.parent_names = parent_names;
+ clk_prcmu_clkout_init.num_parents = num_parents;
+ clk->hw.init = &clk_prcmu_clkout_init;
+
+ ret = clk_hw_register(NULL, &clk->hw);
+ if (ret)
+ goto free_clkout;
+
+ return &clk->hw;
+free_clkout:
+ kfree(clk);
+ pr_err("clk_prcmu_clkout: %s failed to register clk\n", __func__);
+ return ERR_PTR(-ENOMEM);
+}
diff --git a/drivers/clk/ux500/clk.h b/drivers/clk/ux500/clk.h
index 40cd9fc95b8b..91003cf8003c 100644
--- a/drivers/clk/ux500/clk.h
+++ b/drivers/clk/ux500/clk.h
@@ -13,6 +13,7 @@
#include <linux/types.h>
struct clk;
+struct clk_hw;
struct clk *clk_reg_prcc_pclk(const char *name,
const char *parent_name,
@@ -26,38 +27,43 @@ struct clk *clk_reg_prcc_kclk(const char *name,
u32 cg_sel,
unsigned long flags);
-struct clk *clk_reg_prcmu_scalable(const char *name,
- const char *parent_name,
- u8 cg_sel,
- unsigned long rate,
- unsigned long flags);
-
-struct clk *clk_reg_prcmu_gate(const char *name,
- const char *parent_name,
- u8 cg_sel,
- unsigned long flags);
-
-struct clk *clk_reg_prcmu_scalable_rate(const char *name,
- const char *parent_name,
- u8 cg_sel,
- unsigned long rate,
- unsigned long flags);
-
-struct clk *clk_reg_prcmu_rate(const char *name,
- const char *parent_name,
- u8 cg_sel,
- unsigned long flags);
-
-struct clk *clk_reg_prcmu_opp_gate(const char *name,
- const char *parent_name,
- u8 cg_sel,
- unsigned long flags);
-
-struct clk *clk_reg_prcmu_opp_volt_scalable(const char *name,
- const char *parent_name,
- u8 cg_sel,
- unsigned long rate,
- unsigned long flags);
+struct clk_hw *clk_reg_prcmu_scalable(const char *name,
+ const char *parent_name,
+ u8 cg_sel,
+ unsigned long rate,
+ unsigned long flags);
+
+struct clk_hw *clk_reg_prcmu_gate(const char *name,
+ const char *parent_name,
+ u8 cg_sel,
+ unsigned long flags);
+
+struct clk_hw *clk_reg_prcmu_scalable_rate(const char *name,
+ const char *parent_name,
+ u8 cg_sel,
+ unsigned long rate,
+ unsigned long flags);
+
+struct clk_hw *clk_reg_prcmu_rate(const char *name,
+ const char *parent_name,
+ u8 cg_sel,
+ unsigned long flags);
+
+struct clk_hw *clk_reg_prcmu_opp_gate(const char *name,
+ const char *parent_name,
+ u8 cg_sel,
+ unsigned long flags);
+
+struct clk_hw *clk_reg_prcmu_opp_volt_scalable(const char *name,
+ const char *parent_name,
+ u8 cg_sel,
+ unsigned long rate,
+ unsigned long flags);
+
+struct clk_hw *clk_reg_prcmu_clkout(const char *name,
+ const char * const *parent_names,
+ int num_parents,
+ u8 source, u8 divider);
struct clk *clk_reg_sysctrl_gate(struct device *dev,
const char *name,
diff --git a/drivers/clk/ux500/reset-prcc.c b/drivers/clk/ux500/reset-prcc.c
index fcd5d042806a..f7e48941fbc7 100644
--- a/drivers/clk/ux500/reset-prcc.c
+++ b/drivers/clk/ux500/reset-prcc.c
@@ -58,7 +58,7 @@ static void __iomem *u8500_prcc_reset_base(struct u8500_prcc_reset *ur,
prcc_num = id / PRCC_PERIPHS_PER_CLUSTER;
index = prcc_num_to_index(prcc_num);
- if (index > ARRAY_SIZE(ur->base))
+ if (index >= ARRAY_SIZE(ur->base))
return NULL;
return ur->base[index];
diff --git a/drivers/clk/ux500/u8500_of_clk.c b/drivers/clk/ux500/u8500_of_clk.c
index e86ed2eec3fd..8e2f6c65db2a 100644
--- a/drivers/clk/ux500/u8500_of_clk.c
+++ b/drivers/clk/ux500/u8500_of_clk.c
@@ -15,9 +15,9 @@
#include "prcc.h"
#include "reset-prcc.h"
-static struct clk *prcmu_clk[PRCMU_NUM_CLKS];
static struct clk *prcc_pclk[(PRCC_NUM_PERIPH_CLUSTERS + 1) * PRCC_PERIPHS_PER_CLUSTER];
static struct clk *prcc_kclk[(PRCC_NUM_PERIPH_CLUSTERS + 1) * PRCC_PERIPHS_PER_CLUSTER];
+static struct clk_hw *clkout_clk[2];
#define PRCC_SHOW(clk, base, bit) \
clk[(base * PRCC_PERIPHS_PER_CLUSTER) + bit]
@@ -46,6 +46,82 @@ static struct clk *ux500_twocell_get(struct of_phandle_args *clkspec,
return PRCC_SHOW(clk_data, base, bit);
}
+static struct clk_hw_onecell_data u8500_prcmu_hw_clks = {
+ .hws = {
+ /*
+ * This assignment makes sure the dynamic array
+ * gets the right size.
+ */
+ [PRCMU_NUM_CLKS] = NULL,
+ },
+ .num = PRCMU_NUM_CLKS,
+};
+
+/* Essentially names for the first PRCMU_CLKSRC_* defines */
+static const char * const u8500_clkout_parents[] = {
+ "clk38m_to_clkgen",
+ "aclk",
+ /* Just called "sysclk" in documentation */
+ "ab8500_sysclk",
+ "lcdclk",
+ "sdmmcclk",
+ "tvclk",
+ "timclk",
+ /* CLK009 is not implemented, add it if you need it */
+ "clk009",
+};
+
+static struct clk_hw *ux500_clkout_get(struct of_phandle_args *clkspec,
+ void *data)
+{
+ u32 id, source, divider;
+ struct clk_hw *clkout;
+
+ if (clkspec->args_count != 3)
+ return ERR_PTR(-EINVAL);
+
+ id = clkspec->args[0];
+ source = clkspec->args[1];
+ divider = clkspec->args[2];
+
+ if (id > 1) {
+ pr_err("%s: invalid clkout ID %d\n", __func__, id);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (clkout_clk[id]) {
+ pr_info("%s: clkout%d already registered, not reconfiguring\n",
+ __func__, id + 1);
+ return clkout_clk[id];
+ }
+
+ if (source > 7) {
+ pr_err("%s: invalid source ID %d\n", __func__, source);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (divider == 0 || divider > 63) {
+ pr_err("%s: invalid divider %d\n", __func__, divider);
+ return ERR_PTR(-EINVAL);
+ }
+
+ pr_debug("registering clkout%d with source %d and divider %d\n",
+ id + 1, source, divider);
+
+ clkout = clk_reg_prcmu_clkout(id ? "clkout2" : "clkout1",
+ u8500_clkout_parents,
+ ARRAY_SIZE(u8500_clkout_parents),
+ source, divider);
+ if (IS_ERR(clkout)) {
+ pr_err("failed to register clkout%d\n", id + 1);
+ return ERR_CAST(clkout);
+ }
+
+ clkout_clk[id] = clkout;
+
+ return clkout;
+}
+
static void u8500_clk_init(struct device_node *np)
{
struct prcmu_fw_version *fw_version;
@@ -77,19 +153,29 @@ static void u8500_clk_init(struct device_node *np)
}
/* Clock sources */
- clk = clk_reg_prcmu_gate("soc0_pll", NULL, PRCMU_PLLSOC0,
- CLK_IGNORE_UNUSED);
- prcmu_clk[PRCMU_PLLSOC0] = clk;
+ u8500_prcmu_hw_clks.hws[PRCMU_PLLSOC0] =
+ clk_reg_prcmu_gate("soc0_pll", NULL, PRCMU_PLLSOC0,
+ CLK_IGNORE_UNUSED);
- clk = clk_reg_prcmu_gate("soc1_pll", NULL, PRCMU_PLLSOC1,
- CLK_IGNORE_UNUSED);
- prcmu_clk[PRCMU_PLLSOC1] = clk;
+ u8500_prcmu_hw_clks.hws[PRCMU_PLLSOC1] =
+ clk_reg_prcmu_gate("soc1_pll", NULL, PRCMU_PLLSOC1,
+ CLK_IGNORE_UNUSED);
- clk = clk_reg_prcmu_gate("ddr_pll", NULL, PRCMU_PLLDDR,
- CLK_IGNORE_UNUSED);
- prcmu_clk[PRCMU_PLLDDR] = clk;
+ u8500_prcmu_hw_clks.hws[PRCMU_PLLDDR] =
+ clk_reg_prcmu_gate("ddr_pll", NULL, PRCMU_PLLDDR,
+ CLK_IGNORE_UNUSED);
- /* FIXME: Add sys, ulp and int clocks here. */
+ /*
+ * Read-only clocks that only return their current rate, only used
+ * as parents to other clocks and not visible in the device tree.
+ * clk38m_to_clkgen is the same as the SYSCLK, i.e. the root clock.
+ */
+ clk_reg_prcmu_rate("clk38m_to_clkgen", NULL, PRCMU_SYSCLK,
+ CLK_IGNORE_UNUSED);
+ clk_reg_prcmu_rate("aclk", NULL, PRCMU_ACLK,
+ CLK_IGNORE_UNUSED);
+
+ /* TODO: add CLK009 if needed */
rtc_clk = clk_register_fixed_rate(NULL, "rtc32k", "NULL",
CLK_IGNORE_UNUSED,
@@ -113,146 +199,106 @@ static void u8500_clk_init(struct device_node *np)
}
if (sgaclk_parent)
- clk = clk_reg_prcmu_gate("sgclk", sgaclk_parent,
- PRCMU_SGACLK, 0);
+ u8500_prcmu_hw_clks.hws[PRCMU_SGACLK] =
+ clk_reg_prcmu_gate("sgclk", sgaclk_parent,
+ PRCMU_SGACLK, 0);
else
- clk = clk_reg_prcmu_gate("sgclk", NULL, PRCMU_SGACLK, 0);
- prcmu_clk[PRCMU_SGACLK] = clk;
-
- clk = clk_reg_prcmu_gate("uartclk", NULL, PRCMU_UARTCLK, 0);
- prcmu_clk[PRCMU_UARTCLK] = clk;
-
- clk = clk_reg_prcmu_gate("msp02clk", NULL, PRCMU_MSP02CLK, 0);
- prcmu_clk[PRCMU_MSP02CLK] = clk;
-
- clk = clk_reg_prcmu_gate("msp1clk", NULL, PRCMU_MSP1CLK, 0);
- prcmu_clk[PRCMU_MSP1CLK] = clk;
-
- clk = clk_reg_prcmu_gate("i2cclk", NULL, PRCMU_I2CCLK, 0);
- prcmu_clk[PRCMU_I2CCLK] = clk;
-
- clk = clk_reg_prcmu_gate("slimclk", NULL, PRCMU_SLIMCLK, 0);
- prcmu_clk[PRCMU_SLIMCLK] = clk;
-
- clk = clk_reg_prcmu_gate("per1clk", NULL, PRCMU_PER1CLK, 0);
- prcmu_clk[PRCMU_PER1CLK] = clk;
-
- clk = clk_reg_prcmu_gate("per2clk", NULL, PRCMU_PER2CLK, 0);
- prcmu_clk[PRCMU_PER2CLK] = clk;
-
- clk = clk_reg_prcmu_gate("per3clk", NULL, PRCMU_PER3CLK, 0);
- prcmu_clk[PRCMU_PER3CLK] = clk;
-
- clk = clk_reg_prcmu_gate("per5clk", NULL, PRCMU_PER5CLK, 0);
- prcmu_clk[PRCMU_PER5CLK] = clk;
-
- clk = clk_reg_prcmu_gate("per6clk", NULL, PRCMU_PER6CLK, 0);
- prcmu_clk[PRCMU_PER6CLK] = clk;
-
- clk = clk_reg_prcmu_gate("per7clk", NULL, PRCMU_PER7CLK, 0);
- prcmu_clk[PRCMU_PER7CLK] = clk;
-
- clk = clk_reg_prcmu_scalable("lcdclk", NULL, PRCMU_LCDCLK, 0,
- CLK_SET_RATE_GATE);
- prcmu_clk[PRCMU_LCDCLK] = clk;
-
- clk = clk_reg_prcmu_opp_gate("bmlclk", NULL, PRCMU_BMLCLK, 0);
- prcmu_clk[PRCMU_BMLCLK] = clk;
-
- clk = clk_reg_prcmu_scalable("hsitxclk", NULL, PRCMU_HSITXCLK, 0,
- CLK_SET_RATE_GATE);
- prcmu_clk[PRCMU_HSITXCLK] = clk;
-
- clk = clk_reg_prcmu_scalable("hsirxclk", NULL, PRCMU_HSIRXCLK, 0,
- CLK_SET_RATE_GATE);
- prcmu_clk[PRCMU_HSIRXCLK] = clk;
-
- clk = clk_reg_prcmu_scalable("hdmiclk", NULL, PRCMU_HDMICLK, 0,
- CLK_SET_RATE_GATE);
- prcmu_clk[PRCMU_HDMICLK] = clk;
-
- clk = clk_reg_prcmu_gate("apeatclk", NULL, PRCMU_APEATCLK, 0);
- prcmu_clk[PRCMU_APEATCLK] = clk;
-
- clk = clk_reg_prcmu_scalable("apetraceclk", NULL, PRCMU_APETRACECLK, 0,
- CLK_SET_RATE_GATE);
- prcmu_clk[PRCMU_APETRACECLK] = clk;
-
- clk = clk_reg_prcmu_gate("mcdeclk", NULL, PRCMU_MCDECLK, 0);
- prcmu_clk[PRCMU_MCDECLK] = clk;
-
- clk = clk_reg_prcmu_opp_gate("ipi2cclk", NULL, PRCMU_IPI2CCLK, 0);
- prcmu_clk[PRCMU_IPI2CCLK] = clk;
-
- clk = clk_reg_prcmu_gate("dsialtclk", NULL, PRCMU_DSIALTCLK, 0);
- prcmu_clk[PRCMU_DSIALTCLK] = clk;
-
- clk = clk_reg_prcmu_gate("dmaclk", NULL, PRCMU_DMACLK, 0);
- prcmu_clk[PRCMU_DMACLK] = clk;
-
- clk = clk_reg_prcmu_gate("b2r2clk", NULL, PRCMU_B2R2CLK, 0);
- prcmu_clk[PRCMU_B2R2CLK] = clk;
-
- clk = clk_reg_prcmu_scalable("tvclk", NULL, PRCMU_TVCLK, 0,
- CLK_SET_RATE_GATE);
- prcmu_clk[PRCMU_TVCLK] = clk;
-
- clk = clk_reg_prcmu_gate("sspclk", NULL, PRCMU_SSPCLK, 0);
- prcmu_clk[PRCMU_SSPCLK] = clk;
-
- clk = clk_reg_prcmu_gate("rngclk", NULL, PRCMU_RNGCLK, 0);
- prcmu_clk[PRCMU_RNGCLK] = clk;
-
- clk = clk_reg_prcmu_gate("uiccclk", NULL, PRCMU_UICCCLK, 0);
- prcmu_clk[PRCMU_UICCCLK] = clk;
-
- clk = clk_reg_prcmu_gate("timclk", NULL, PRCMU_TIMCLK, 0);
- prcmu_clk[PRCMU_TIMCLK] = clk;
-
- clk = clk_reg_prcmu_gate("ab8500_sysclk", NULL, PRCMU_SYSCLK, 0);
- prcmu_clk[PRCMU_SYSCLK] = clk;
-
- clk = clk_reg_prcmu_opp_volt_scalable("sdmmcclk", NULL, PRCMU_SDMMCCLK,
- 100000000, CLK_SET_RATE_GATE);
- prcmu_clk[PRCMU_SDMMCCLK] = clk;
-
- clk = clk_reg_prcmu_scalable("dsi_pll", "hdmiclk",
- PRCMU_PLLDSI, 0, CLK_SET_RATE_GATE);
- prcmu_clk[PRCMU_PLLDSI] = clk;
-
- clk = clk_reg_prcmu_scalable("dsi0clk", "dsi_pll",
- PRCMU_DSI0CLK, 0, CLK_SET_RATE_GATE);
- prcmu_clk[PRCMU_DSI0CLK] = clk;
-
- clk = clk_reg_prcmu_scalable("dsi1clk", "dsi_pll",
- PRCMU_DSI1CLK, 0, CLK_SET_RATE_GATE);
- prcmu_clk[PRCMU_DSI1CLK] = clk;
-
- clk = clk_reg_prcmu_scalable("dsi0escclk", "tvclk",
- PRCMU_DSI0ESCCLK, 0, CLK_SET_RATE_GATE);
- prcmu_clk[PRCMU_DSI0ESCCLK] = clk;
-
- clk = clk_reg_prcmu_scalable("dsi1escclk", "tvclk",
- PRCMU_DSI1ESCCLK, 0, CLK_SET_RATE_GATE);
- prcmu_clk[PRCMU_DSI1ESCCLK] = clk;
-
- clk = clk_reg_prcmu_scalable("dsi2escclk", "tvclk",
- PRCMU_DSI2ESCCLK, 0, CLK_SET_RATE_GATE);
- prcmu_clk[PRCMU_DSI2ESCCLK] = clk;
-
- clk = clk_reg_prcmu_scalable_rate("armss", NULL,
- PRCMU_ARMSS, 0, CLK_IGNORE_UNUSED);
- prcmu_clk[PRCMU_ARMSS] = clk;
+ u8500_prcmu_hw_clks.hws[PRCMU_SGACLK] =
+ clk_reg_prcmu_gate("sgclk", NULL, PRCMU_SGACLK, 0);
+
+ u8500_prcmu_hw_clks.hws[PRCMU_UARTCLK] =
+ clk_reg_prcmu_gate("uartclk", NULL, PRCMU_UARTCLK, 0);
+ u8500_prcmu_hw_clks.hws[PRCMU_MSP02CLK] =
+ clk_reg_prcmu_gate("msp02clk", NULL, PRCMU_MSP02CLK, 0);
+ u8500_prcmu_hw_clks.hws[PRCMU_MSP1CLK] =
+ clk_reg_prcmu_gate("msp1clk", NULL, PRCMU_MSP1CLK, 0);
+ u8500_prcmu_hw_clks.hws[PRCMU_I2CCLK] =
+ clk_reg_prcmu_gate("i2cclk", NULL, PRCMU_I2CCLK, 0);
+ u8500_prcmu_hw_clks.hws[PRCMU_SLIMCLK] =
+ clk_reg_prcmu_gate("slimclk", NULL, PRCMU_SLIMCLK, 0);
+ u8500_prcmu_hw_clks.hws[PRCMU_PER1CLK] =
+ clk_reg_prcmu_gate("per1clk", NULL, PRCMU_PER1CLK, 0);
+ u8500_prcmu_hw_clks.hws[PRCMU_PER2CLK] =
+ clk_reg_prcmu_gate("per2clk", NULL, PRCMU_PER2CLK, 0);
+ u8500_prcmu_hw_clks.hws[PRCMU_PER3CLK] =
+ clk_reg_prcmu_gate("per3clk", NULL, PRCMU_PER3CLK, 0);
+ u8500_prcmu_hw_clks.hws[PRCMU_PER5CLK] =
+ clk_reg_prcmu_gate("per5clk", NULL, PRCMU_PER5CLK, 0);
+ u8500_prcmu_hw_clks.hws[PRCMU_PER6CLK] =
+ clk_reg_prcmu_gate("per6clk", NULL, PRCMU_PER6CLK, 0);
+ u8500_prcmu_hw_clks.hws[PRCMU_PER7CLK] =
+ clk_reg_prcmu_gate("per7clk", NULL, PRCMU_PER7CLK, 0);
+ u8500_prcmu_hw_clks.hws[PRCMU_LCDCLK] =
+ clk_reg_prcmu_scalable("lcdclk", NULL, PRCMU_LCDCLK, 0,
+ CLK_SET_RATE_GATE);
+ u8500_prcmu_hw_clks.hws[PRCMU_BMLCLK] =
+ clk_reg_prcmu_opp_gate("bmlclk", NULL, PRCMU_BMLCLK, 0);
+ u8500_prcmu_hw_clks.hws[PRCMU_HSITXCLK] =
+ clk_reg_prcmu_scalable("hsitxclk", NULL, PRCMU_HSITXCLK, 0,
+ CLK_SET_RATE_GATE);
+ u8500_prcmu_hw_clks.hws[PRCMU_HSIRXCLK] =
+ clk_reg_prcmu_scalable("hsirxclk", NULL, PRCMU_HSIRXCLK, 0,
+ CLK_SET_RATE_GATE);
+ u8500_prcmu_hw_clks.hws[PRCMU_HDMICLK] =
+ clk_reg_prcmu_scalable("hdmiclk", NULL, PRCMU_HDMICLK, 0,
+ CLK_SET_RATE_GATE);
+ u8500_prcmu_hw_clks.hws[PRCMU_APEATCLK] =
+ clk_reg_prcmu_gate("apeatclk", NULL, PRCMU_APEATCLK, 0);
+ u8500_prcmu_hw_clks.hws[PRCMU_APETRACECLK] =
+ clk_reg_prcmu_scalable("apetraceclk", NULL, PRCMU_APETRACECLK, 0,
+ CLK_SET_RATE_GATE);
+ u8500_prcmu_hw_clks.hws[PRCMU_MCDECLK] =
+ clk_reg_prcmu_gate("mcdeclk", NULL, PRCMU_MCDECLK, 0);
+ u8500_prcmu_hw_clks.hws[PRCMU_IPI2CCLK] =
+ clk_reg_prcmu_opp_gate("ipi2cclk", NULL, PRCMU_IPI2CCLK, 0);
+ u8500_prcmu_hw_clks.hws[PRCMU_DSIALTCLK] =
+ clk_reg_prcmu_gate("dsialtclk", NULL, PRCMU_DSIALTCLK, 0);
+ u8500_prcmu_hw_clks.hws[PRCMU_DMACLK] =
+ clk_reg_prcmu_gate("dmaclk", NULL, PRCMU_DMACLK, 0);
+ u8500_prcmu_hw_clks.hws[PRCMU_B2R2CLK] =
+ clk_reg_prcmu_gate("b2r2clk", NULL, PRCMU_B2R2CLK, 0);
+ u8500_prcmu_hw_clks.hws[PRCMU_TVCLK] =
+ clk_reg_prcmu_scalable("tvclk", NULL, PRCMU_TVCLK, 0,
+ CLK_SET_RATE_GATE);
+ u8500_prcmu_hw_clks.hws[PRCMU_SSPCLK] =
+ clk_reg_prcmu_gate("sspclk", NULL, PRCMU_SSPCLK, 0);
+ u8500_prcmu_hw_clks.hws[PRCMU_RNGCLK] =
+ clk_reg_prcmu_gate("rngclk", NULL, PRCMU_RNGCLK, 0);
+ u8500_prcmu_hw_clks.hws[PRCMU_UICCCLK] =
+ clk_reg_prcmu_gate("uiccclk", NULL, PRCMU_UICCCLK, 0);
+ u8500_prcmu_hw_clks.hws[PRCMU_TIMCLK] =
+ clk_reg_prcmu_gate("timclk", NULL, PRCMU_TIMCLK, 0);
+ u8500_prcmu_hw_clks.hws[PRCMU_SYSCLK] =
+ clk_reg_prcmu_gate("ab8500_sysclk", NULL, PRCMU_SYSCLK, 0);
+ u8500_prcmu_hw_clks.hws[PRCMU_SDMMCCLK] =
+ clk_reg_prcmu_opp_volt_scalable("sdmmcclk", NULL,
+ PRCMU_SDMMCCLK, 100000000,
+ CLK_SET_RATE_GATE);
+ u8500_prcmu_hw_clks.hws[PRCMU_PLLDSI] =
+ clk_reg_prcmu_scalable("dsi_pll", "hdmiclk",
+ PRCMU_PLLDSI, 0, CLK_SET_RATE_GATE);
+ u8500_prcmu_hw_clks.hws[PRCMU_DSI0CLK] =
+ clk_reg_prcmu_scalable("dsi0clk", "dsi_pll",
+ PRCMU_DSI0CLK, 0, CLK_SET_RATE_GATE);
+ u8500_prcmu_hw_clks.hws[PRCMU_DSI1CLK] =
+ clk_reg_prcmu_scalable("dsi1clk", "dsi_pll",
+ PRCMU_DSI1CLK, 0, CLK_SET_RATE_GATE);
+ u8500_prcmu_hw_clks.hws[PRCMU_DSI0ESCCLK] =
+ clk_reg_prcmu_scalable("dsi0escclk", "tvclk",
+ PRCMU_DSI0ESCCLK, 0, CLK_SET_RATE_GATE);
+ u8500_prcmu_hw_clks.hws[PRCMU_DSI1ESCCLK] =
+ clk_reg_prcmu_scalable("dsi1escclk", "tvclk",
+ PRCMU_DSI1ESCCLK, 0, CLK_SET_RATE_GATE);
+ u8500_prcmu_hw_clks.hws[PRCMU_DSI2ESCCLK] =
+ clk_reg_prcmu_scalable("dsi2escclk", "tvclk",
+ PRCMU_DSI2ESCCLK, 0, CLK_SET_RATE_GATE);
+ u8500_prcmu_hw_clks.hws[PRCMU_ARMSS] =
+ clk_reg_prcmu_scalable_rate("armss", NULL,
+ PRCMU_ARMSS, 0, CLK_IGNORE_UNUSED);
twd_clk = clk_register_fixed_factor(NULL, "smp_twd", "armss",
CLK_IGNORE_UNUSED, 1, 2);
- /*
- * FIXME: Add special handled PRCMU clocks here:
- * 1. clkout0yuv, use PRCMU as parent + need regulator + pinctrl.
- * 2. ab9540_clkout1yuv, see clkout0yuv
- */
-
/* PRCC P-clocks */
clk = clk_reg_prcc_pclk("p1_pclk0", "per1clk", bases[CLKRST1_INDEX],
BIT(0), 0);
@@ -546,13 +592,13 @@ static void u8500_clk_init(struct device_node *np)
PRCC_KCLK_STORE(clk, 6, 0);
for_each_child_of_node(np, child) {
- static struct clk_onecell_data clk_data;
+ if (of_node_name_eq(child, "prcmu-clock"))
+ of_clk_add_hw_provider(child, of_clk_hw_onecell_get,
+ &u8500_prcmu_hw_clks);
+
+ if (of_node_name_eq(child, "clkout-clock"))
+ of_clk_add_hw_provider(child, ux500_clkout_get, NULL);
- if (of_node_name_eq(child, "prcmu-clock")) {
- clk_data.clks = prcmu_clk;
- clk_data.clk_num = ARRAY_SIZE(prcmu_clk);
- of_clk_add_provider(child, of_clk_src_onecell_get, &clk_data);
- }
if (of_node_name_eq(child, "prcc-periph-clock"))
of_clk_add_provider(child, ux500_twocell_get, prcc_pclk);
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index fe3f05dfafd9..b5c86d37caa9 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -597,6 +597,14 @@ config CLKSRC_ST_LPC
Enable this option to use the Low Power controller timer
as clocksource.
+config GXP_TIMER
+ bool "GXP timer driver" if COMPILE_TEST && !ARCH_HPE
+ default ARCH_HPE
+ select TIMER_OF if OF
+ help
+ Provides a driver for the timer control found on HPE
+ GXP SOCs. This is required for all GXP SOCs.
+
config RISCV_TIMER
bool "Timer for the RISC-V platform" if COMPILE_TEST
depends on GENERIC_SCHED_CLOCK && RISCV && RISCV_SBI
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 833cfb7a96c1..6ca640019e10 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -86,3 +86,4 @@ obj-$(CONFIG_HYPERV_TIMER) += hyperv_timer.o
obj-$(CONFIG_MICROCHIP_PIT64B) += timer-microchip-pit64b.o
obj-$(CONFIG_MSC313E_TIMER) += timer-msc313e.o
obj-$(CONFIG_GOLDFISH_TIMER) += timer-goldfish.o
+obj-$(CONFIG_GXP_TIMER) += timer-gxp.o
diff --git a/drivers/clocksource/timer-gxp.c b/drivers/clocksource/timer-gxp.c
new file mode 100644
index 000000000000..8b38b3212388
--- /dev/null
+++ b/drivers/clocksource/timer-gxp.c
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2022 Hewlett-Packard Enterprise Development Company, L.P. */
+
+#include <linux/clk.h>
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
+#include <linux/interrupt.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/sched_clock.h>
+
+#define TIMER0_FREQ 1000000
+#define GXP_TIMER_CNT_OFS 0x00
+#define GXP_TIMESTAMP_OFS 0x08
+#define GXP_TIMER_CTRL_OFS 0x14
+
+/* TCS Stands for Timer Control/Status: these are masks to be used in */
+/* the Timer Count Registers */
+#define MASK_TCS_ENABLE 0x01
+#define MASK_TCS_PERIOD 0x02
+#define MASK_TCS_RELOAD 0x04
+#define MASK_TCS_TC 0x80
+
+struct gxp_timer {
+ void __iomem *counter;
+ void __iomem *control;
+ struct clock_event_device evt;
+};
+
+static struct gxp_timer *gxp_timer;
+
+static void __iomem *system_clock __ro_after_init;
+
+static inline struct gxp_timer *to_gxp_timer(struct clock_event_device *evt_dev)
+{
+ return container_of(evt_dev, struct gxp_timer, evt);
+}
+
+static u64 notrace gxp_sched_read(void)
+{
+ return readl_relaxed(system_clock);
+}
+
+static int gxp_time_set_next_event(unsigned long event, struct clock_event_device *evt_dev)
+{
+ struct gxp_timer *timer = to_gxp_timer(evt_dev);
+
+ /* Stop counting and disable interrupt before updating */
+ writeb_relaxed(MASK_TCS_TC, timer->control);
+ writel_relaxed(event, timer->counter);
+ writeb_relaxed(MASK_TCS_TC | MASK_TCS_ENABLE, timer->control);
+
+ return 0;
+}
+
+static irqreturn_t gxp_timer_interrupt(int irq, void *dev_id)
+{
+ struct gxp_timer *timer = (struct gxp_timer *)dev_id;
+
+ if (!(readb_relaxed(timer->control) & MASK_TCS_TC))
+ return IRQ_NONE;
+
+ writeb_relaxed(MASK_TCS_TC, timer->control);
+
+ timer->evt.event_handler(&timer->evt);
+
+ return IRQ_HANDLED;
+}
+
+static int __init gxp_timer_init(struct device_node *node)
+{
+ void __iomem *base;
+ struct clk *clk;
+ u32 freq;
+ int ret, irq;
+
+ gxp_timer = kzalloc(sizeof(*gxp_timer), GFP_KERNEL);
+ if (!gxp_timer) {
+ ret = -ENOMEM;
+ pr_err("Can't allocate gxp_timer");
+ return ret;
+ }
+
+ clk = of_clk_get(node, 0);
+ if (IS_ERR(clk)) {
+ ret = (int)PTR_ERR(clk);
+ pr_err("%pOFn clock not found: %d\n", node, ret);
+ goto err_free;
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ pr_err("%pOFn clock enable failed: %d\n", node, ret);
+ goto err_clk_enable;
+ }
+
+ base = of_iomap(node, 0);
+ if (!base) {
+ ret = -ENXIO;
+ pr_err("Can't map timer base registers");
+ goto err_iomap;
+ }
+
+ /* Set the offsets to the clock register and timer registers */
+ gxp_timer->counter = base + GXP_TIMER_CNT_OFS;
+ gxp_timer->control = base + GXP_TIMER_CTRL_OFS;
+ system_clock = base + GXP_TIMESTAMP_OFS;
+
+ gxp_timer->evt.name = node->name;
+ gxp_timer->evt.rating = 300;
+ gxp_timer->evt.features = CLOCK_EVT_FEAT_ONESHOT;
+ gxp_timer->evt.set_next_event = gxp_time_set_next_event;
+ gxp_timer->evt.cpumask = cpumask_of(0);
+
+ irq = irq_of_parse_and_map(node, 0);
+ if (irq <= 0) {
+ ret = -EINVAL;
+ pr_err("GXP Timer Can't parse IRQ %d", irq);
+ goto err_exit;
+ }
+
+ freq = clk_get_rate(clk);
+
+ ret = clocksource_mmio_init(system_clock, node->name, freq,
+ 300, 32, clocksource_mmio_readl_up);
+ if (ret) {
+ pr_err("%pOFn init clocksource failed: %d", node, ret);
+ goto err_exit;
+ }
+
+ sched_clock_register(gxp_sched_read, 32, freq);
+
+ irq = irq_of_parse_and_map(node, 0);
+ if (irq <= 0) {
+ ret = -EINVAL;
+ pr_err("%pOFn Can't parse IRQ %d", node, irq);
+ goto err_exit;
+ }
+
+ clockevents_config_and_register(&gxp_timer->evt, TIMER0_FREQ,
+ 0xf, 0xffffffff);
+
+ ret = request_irq(irq, gxp_timer_interrupt, IRQF_TIMER | IRQF_SHARED,
+ node->name, gxp_timer);
+ if (ret) {
+ pr_err("%pOFn request_irq() failed: %d", node, ret);
+ goto err_exit;
+ }
+
+ pr_debug("gxp: system timer (irq = %d)\n", irq);
+ return 0;
+
+err_exit:
+ iounmap(base);
+err_iomap:
+ clk_disable_unprepare(clk);
+err_clk_enable:
+ clk_put(clk);
+err_free:
+ kfree(gxp_timer);
+ return ret;
+}
+
+/*
+ * This probe gets called after the timer is already up and running. This will create
+ * the watchdog device as a child since the registers are shared.
+ */
+
+static int gxp_timer_probe(struct platform_device *pdev)
+{
+ struct platform_device *gxp_watchdog_device;
+ struct device *dev = &pdev->dev;
+
+ if (!gxp_timer) {
+ pr_err("Gxp Timer not initialized, cannot create watchdog");
+ return -ENOMEM;
+ }
+
+ gxp_watchdog_device = platform_device_alloc("gxp-wdt", -1);
+ if (!gxp_watchdog_device) {
+ pr_err("Timer failed to allocate gxp-wdt");
+ return -ENOMEM;
+ }
+
+ /* Pass the base address (counter) as platform data and nothing else */
+ gxp_watchdog_device->dev.platform_data = gxp_timer->counter;
+ gxp_watchdog_device->dev.parent = dev;
+
+ return platform_device_add(gxp_watchdog_device);
+}
+
+static const struct of_device_id gxp_timer_of_match[] = {
+ { .compatible = "hpe,gxp-timer", },
+ {},
+};
+
+static struct platform_driver gxp_timer_driver = {
+ .probe = gxp_timer_probe,
+ .driver = {
+ .name = "gxp-timer",
+ .of_match_table = gxp_timer_of_match,
+ .suppress_bind_attrs = true,
+ },
+};
+
+builtin_platform_driver(gxp_timer_driver);
+
+TIMER_OF_DECLARE(gxp, "hpe,gxp-timer", gxp_timer_init);
diff --git a/drivers/comedi/drivers.c b/drivers/comedi/drivers.c
index 8eb1f699a857..d4e2ed709bfc 100644
--- a/drivers/comedi/drivers.c
+++ b/drivers/comedi/drivers.c
@@ -854,7 +854,7 @@ int comedi_load_firmware(struct comedi_device *dev,
release_firmware(fw);
}
- return ret < 0 ? ret : 0;
+ return min(ret, 0);
}
EXPORT_SYMBOL_GPL(comedi_load_firmware);
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index d092c9bb4ba3..24eaf0ec344d 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -61,6 +61,8 @@ static struct cppc_workaround_oem_info wa_info[] = {
}
};
+static struct cpufreq_driver cppc_cpufreq_driver;
+
#ifdef CONFIG_ACPI_CPPC_CPUFREQ_FIE
/* Frequency invariance support */
@@ -75,7 +77,6 @@ struct cppc_freq_invariance {
static DEFINE_PER_CPU(struct cppc_freq_invariance, cppc_freq_inv);
static struct kthread_worker *kworker_fie;
-static struct cpufreq_driver cppc_cpufreq_driver;
static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpu);
static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data,
struct cppc_perf_fb_ctrs *fb_ctrs_t0,
@@ -440,6 +441,14 @@ static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu)
}
return cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
}
+#else
+static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu)
+{
+ return cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
+}
+#endif
+
+#if defined(CONFIG_ARM64) && defined(CONFIG_ENERGY_MODEL)
static DEFINE_PER_CPU(unsigned int, efficiency_class);
static void cppc_cpufreq_register_em(struct cpufreq_policy *policy);
@@ -620,21 +629,12 @@ static void cppc_cpufreq_register_em(struct cpufreq_policy *policy)
}
#else
-
-static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu)
-{
- return cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
-}
static int populate_efficiency_class(void)
{
return 0;
}
-static void cppc_cpufreq_register_em(struct cpufreq_policy *policy)
-{
-}
#endif
-
static struct cppc_cpudata *cppc_cpufreq_get_cpu_data(unsigned int cpu)
{
struct cppc_cpudata *cpu_data;
diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c
index 866163883b48..37a1eb20f5ba 100644
--- a/drivers/cpufreq/mediatek-cpufreq.c
+++ b/drivers/cpufreq/mediatek-cpufreq.c
@@ -8,18 +8,22 @@
#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/cpumask.h>
+#include <linux/minmax.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
#include <linux/regulator/consumer.h>
-#include <linux/slab.h>
-#include <linux/thermal.h>
-#define MIN_VOLT_SHIFT (100000)
-#define MAX_VOLT_SHIFT (200000)
-#define MAX_VOLT_LIMIT (1150000)
-#define VOLT_TOL (10000)
+struct mtk_cpufreq_platform_data {
+ int min_volt_shift;
+ int max_volt_shift;
+ int proc_max_volt;
+ int sram_min_volt;
+ int sram_max_volt;
+ bool ccifreq_supported;
+};
/*
* The struct mtk_cpu_dvfs_info holds necessary information for doing CPU DVFS
@@ -35,6 +39,7 @@
struct mtk_cpu_dvfs_info {
struct cpumask cpus;
struct device *cpu_dev;
+ struct device *cci_dev;
struct regulator *proc_reg;
struct regulator *sram_reg;
struct clk *cpu_clk;
@@ -42,8 +47,20 @@ struct mtk_cpu_dvfs_info {
struct list_head list_head;
int intermediate_voltage;
bool need_voltage_tracking;
+ int vproc_on_boot;
+ int pre_vproc;
+ /* Avoid race condition for regulators between notify and policy */
+ struct mutex reg_lock;
+ struct notifier_block opp_nb;
+ unsigned int opp_cpu;
+ unsigned long current_freq;
+ const struct mtk_cpufreq_platform_data *soc_data;
+ int vtrack_max;
+ bool ccifreq_bound;
};
+static struct platform_device *cpufreq_pdev;
+
static LIST_HEAD(dvfs_info_list);
static struct mtk_cpu_dvfs_info *mtk_cpu_dvfs_info_lookup(int cpu)
@@ -61,142 +78,123 @@ static struct mtk_cpu_dvfs_info *mtk_cpu_dvfs_info_lookup(int cpu)
static int mtk_cpufreq_voltage_tracking(struct mtk_cpu_dvfs_info *info,
int new_vproc)
{
+ const struct mtk_cpufreq_platform_data *soc_data = info->soc_data;
struct regulator *proc_reg = info->proc_reg;
struct regulator *sram_reg = info->sram_reg;
- int old_vproc, old_vsram, new_vsram, vsram, vproc, ret;
-
- old_vproc = regulator_get_voltage(proc_reg);
- if (old_vproc < 0) {
- pr_err("%s: invalid Vproc value: %d\n", __func__, old_vproc);
- return old_vproc;
- }
- /* Vsram should not exceed the maximum allowed voltage of SoC. */
- new_vsram = min(new_vproc + MIN_VOLT_SHIFT, MAX_VOLT_LIMIT);
-
- if (old_vproc < new_vproc) {
- /*
- * When scaling up voltages, Vsram and Vproc scale up step
- * by step. At each step, set Vsram to (Vproc + 200mV) first,
- * then set Vproc to (Vsram - 100mV).
- * Keep doing it until Vsram and Vproc hit target voltages.
- */
- do {
- old_vsram = regulator_get_voltage(sram_reg);
- if (old_vsram < 0) {
- pr_err("%s: invalid Vsram value: %d\n",
- __func__, old_vsram);
- return old_vsram;
- }
- old_vproc = regulator_get_voltage(proc_reg);
- if (old_vproc < 0) {
- pr_err("%s: invalid Vproc value: %d\n",
- __func__, old_vproc);
- return old_vproc;
- }
-
- vsram = min(new_vsram, old_vproc + MAX_VOLT_SHIFT);
+ int pre_vproc, pre_vsram, new_vsram, vsram, vproc, ret;
+ int retry = info->vtrack_max;
+
+ pre_vproc = regulator_get_voltage(proc_reg);
+ if (pre_vproc < 0) {
+ dev_err(info->cpu_dev,
+ "invalid Vproc value: %d\n", pre_vproc);
+ return pre_vproc;
+ }
- if (vsram + VOLT_TOL >= MAX_VOLT_LIMIT) {
- vsram = MAX_VOLT_LIMIT;
+ pre_vsram = regulator_get_voltage(sram_reg);
+ if (pre_vsram < 0) {
+ dev_err(info->cpu_dev, "invalid Vsram value: %d\n", pre_vsram);
+ return pre_vsram;
+ }
- /*
- * If the target Vsram hits the maximum voltage,
- * try to set the exact voltage value first.
- */
- ret = regulator_set_voltage(sram_reg, vsram,
- vsram);
- if (ret)
- ret = regulator_set_voltage(sram_reg,
- vsram - VOLT_TOL,
- vsram);
+ new_vsram = clamp(new_vproc + soc_data->min_volt_shift,
+ soc_data->sram_min_volt, soc_data->sram_max_volt);
- vproc = new_vproc;
- } else {
- ret = regulator_set_voltage(sram_reg, vsram,
- vsram + VOLT_TOL);
+ do {
+ if (pre_vproc <= new_vproc) {
+ vsram = clamp(pre_vproc + soc_data->max_volt_shift,
+ soc_data->sram_min_volt, new_vsram);
+ ret = regulator_set_voltage(sram_reg, vsram,
+ soc_data->sram_max_volt);
- vproc = vsram - MIN_VOLT_SHIFT;
- }
if (ret)
return ret;
+ if (vsram == soc_data->sram_max_volt ||
+ new_vsram == soc_data->sram_min_volt)
+ vproc = new_vproc;
+ else
+ vproc = vsram - soc_data->min_volt_shift;
+
ret = regulator_set_voltage(proc_reg, vproc,
- vproc + VOLT_TOL);
+ soc_data->proc_max_volt);
if (ret) {
- regulator_set_voltage(sram_reg, old_vsram,
- old_vsram);
+ regulator_set_voltage(sram_reg, pre_vsram,
+ soc_data->sram_max_volt);
return ret;
}
- } while (vproc < new_vproc || vsram < new_vsram);
- } else if (old_vproc > new_vproc) {
- /*
- * When scaling down voltages, Vsram and Vproc scale down step
- * by step. At each step, set Vproc to (Vsram - 200mV) first,
- * then set Vproc to (Vproc + 100mV).
- * Keep doing it until Vsram and Vproc hit target voltages.
- */
- do {
- old_vproc = regulator_get_voltage(proc_reg);
- if (old_vproc < 0) {
- pr_err("%s: invalid Vproc value: %d\n",
- __func__, old_vproc);
- return old_vproc;
- }
- old_vsram = regulator_get_voltage(sram_reg);
- if (old_vsram < 0) {
- pr_err("%s: invalid Vsram value: %d\n",
- __func__, old_vsram);
- return old_vsram;
- }
-
- vproc = max(new_vproc, old_vsram - MAX_VOLT_SHIFT);
+ } else if (pre_vproc > new_vproc) {
+ vproc = max(new_vproc,
+ pre_vsram - soc_data->max_volt_shift);
ret = regulator_set_voltage(proc_reg, vproc,
- vproc + VOLT_TOL);
+ soc_data->proc_max_volt);
if (ret)
return ret;
if (vproc == new_vproc)
vsram = new_vsram;
else
- vsram = max(new_vsram, vproc + MIN_VOLT_SHIFT);
-
- if (vsram + VOLT_TOL >= MAX_VOLT_LIMIT) {
- vsram = MAX_VOLT_LIMIT;
-
- /*
- * If the target Vsram hits the maximum voltage,
- * try to set the exact voltage value first.
- */
- ret = regulator_set_voltage(sram_reg, vsram,
- vsram);
- if (ret)
- ret = regulator_set_voltage(sram_reg,
- vsram - VOLT_TOL,
- vsram);
- } else {
- ret = regulator_set_voltage(sram_reg, vsram,
- vsram + VOLT_TOL);
- }
+ vsram = max(new_vsram,
+ vproc + soc_data->min_volt_shift);
+ ret = regulator_set_voltage(sram_reg, vsram,
+ soc_data->sram_max_volt);
if (ret) {
- regulator_set_voltage(proc_reg, old_vproc,
- old_vproc);
+ regulator_set_voltage(proc_reg, pre_vproc,
+ soc_data->proc_max_volt);
return ret;
}
- } while (vproc > new_vproc + VOLT_TOL ||
- vsram > new_vsram + VOLT_TOL);
- }
+ }
+
+ pre_vproc = vproc;
+ pre_vsram = vsram;
+
+ if (--retry < 0) {
+ dev_err(info->cpu_dev,
+ "over loop count, failed to set voltage\n");
+ return -EINVAL;
+ }
+ } while (vproc != new_vproc || vsram != new_vsram);
return 0;
}
static int mtk_cpufreq_set_voltage(struct mtk_cpu_dvfs_info *info, int vproc)
{
+ const struct mtk_cpufreq_platform_data *soc_data = info->soc_data;
+ int ret;
+
if (info->need_voltage_tracking)
- return mtk_cpufreq_voltage_tracking(info, vproc);
+ ret = mtk_cpufreq_voltage_tracking(info, vproc);
else
- return regulator_set_voltage(info->proc_reg, vproc,
- vproc + VOLT_TOL);
+ ret = regulator_set_voltage(info->proc_reg, vproc,
+ soc_data->proc_max_volt);
+ if (!ret)
+ info->pre_vproc = vproc;
+
+ return ret;
+}
+
+static bool is_ccifreq_ready(struct mtk_cpu_dvfs_info *info)
+{
+ struct device_link *sup_link;
+
+ if (info->ccifreq_bound)
+ return true;
+
+ sup_link = device_link_add(info->cpu_dev, info->cci_dev,
+ DL_FLAG_AUTOREMOVE_CONSUMER);
+ if (!sup_link) {
+ dev_err(info->cpu_dev, "cpu%d: sup_link is NULL\n", info->opp_cpu);
+ return false;
+ }
+
+ if (sup_link->supplier->links.status != DL_DEV_DRIVER_BOUND)
+ return false;
+
+ info->ccifreq_bound = true;
+
+ return true;
}
static int mtk_cpufreq_set_target(struct cpufreq_policy *policy,
@@ -208,219 +206,367 @@ static int mtk_cpufreq_set_target(struct cpufreq_policy *policy,
struct mtk_cpu_dvfs_info *info = policy->driver_data;
struct device *cpu_dev = info->cpu_dev;
struct dev_pm_opp *opp;
- long freq_hz, old_freq_hz;
- int vproc, old_vproc, inter_vproc, target_vproc, ret;
+ long freq_hz, pre_freq_hz;
+ int vproc, pre_vproc, inter_vproc, target_vproc, ret;
inter_vproc = info->intermediate_voltage;
- old_freq_hz = clk_get_rate(cpu_clk);
- old_vproc = regulator_get_voltage(info->proc_reg);
- if (old_vproc < 0) {
- pr_err("%s: invalid Vproc value: %d\n", __func__, old_vproc);
- return old_vproc;
+ pre_freq_hz = clk_get_rate(cpu_clk);
+
+ mutex_lock(&info->reg_lock);
+
+ if (unlikely(info->pre_vproc <= 0))
+ pre_vproc = regulator_get_voltage(info->proc_reg);
+ else
+ pre_vproc = info->pre_vproc;
+
+ if (pre_vproc < 0) {
+ dev_err(cpu_dev, "invalid Vproc value: %d\n", pre_vproc);
+ ret = pre_vproc;
+ goto out;
}
freq_hz = freq_table[index].frequency * 1000;
opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_hz);
if (IS_ERR(opp)) {
- pr_err("cpu%d: failed to find OPP for %ld\n",
- policy->cpu, freq_hz);
- return PTR_ERR(opp);
+ dev_err(cpu_dev, "cpu%d: failed to find OPP for %ld\n",
+ policy->cpu, freq_hz);
+ ret = PTR_ERR(opp);
+ goto out;
}
vproc = dev_pm_opp_get_voltage(opp);
dev_pm_opp_put(opp);
/*
+ * If MediaTek cci is supported but is not ready, we will use the value
+ * of max(target cpu voltage, booting voltage) to prevent high freqeuncy
+ * low voltage crash.
+ */
+ if (info->soc_data->ccifreq_supported && !is_ccifreq_ready(info))
+ vproc = max(vproc, info->vproc_on_boot);
+
+ /*
* If the new voltage or the intermediate voltage is higher than the
* current voltage, scale up voltage first.
*/
- target_vproc = (inter_vproc > vproc) ? inter_vproc : vproc;
- if (old_vproc < target_vproc) {
+ target_vproc = max(inter_vproc, vproc);
+ if (pre_vproc <= target_vproc) {
ret = mtk_cpufreq_set_voltage(info, target_vproc);
if (ret) {
- pr_err("cpu%d: failed to scale up voltage!\n",
- policy->cpu);
- mtk_cpufreq_set_voltage(info, old_vproc);
- return ret;
+ dev_err(cpu_dev,
+ "cpu%d: failed to scale up voltage!\n", policy->cpu);
+ mtk_cpufreq_set_voltage(info, pre_vproc);
+ goto out;
}
}
/* Reparent the CPU clock to intermediate clock. */
ret = clk_set_parent(cpu_clk, info->inter_clk);
if (ret) {
- pr_err("cpu%d: failed to re-parent cpu clock!\n",
- policy->cpu);
- mtk_cpufreq_set_voltage(info, old_vproc);
- WARN_ON(1);
- return ret;
+ dev_err(cpu_dev,
+ "cpu%d: failed to re-parent cpu clock!\n", policy->cpu);
+ mtk_cpufreq_set_voltage(info, pre_vproc);
+ goto out;
}
/* Set the original PLL to target rate. */
ret = clk_set_rate(armpll, freq_hz);
if (ret) {
- pr_err("cpu%d: failed to scale cpu clock rate!\n",
- policy->cpu);
+ dev_err(cpu_dev,
+ "cpu%d: failed to scale cpu clock rate!\n", policy->cpu);
clk_set_parent(cpu_clk, armpll);
- mtk_cpufreq_set_voltage(info, old_vproc);
- return ret;
+ mtk_cpufreq_set_voltage(info, pre_vproc);
+ goto out;
}
/* Set parent of CPU clock back to the original PLL. */
ret = clk_set_parent(cpu_clk, armpll);
if (ret) {
- pr_err("cpu%d: failed to re-parent cpu clock!\n",
- policy->cpu);
+ dev_err(cpu_dev,
+ "cpu%d: failed to re-parent cpu clock!\n", policy->cpu);
mtk_cpufreq_set_voltage(info, inter_vproc);
- WARN_ON(1);
- return ret;
+ goto out;
}
/*
* If the new voltage is lower than the intermediate voltage or the
* original voltage, scale down to the new voltage.
*/
- if (vproc < inter_vproc || vproc < old_vproc) {
+ if (vproc < inter_vproc || vproc < pre_vproc) {
ret = mtk_cpufreq_set_voltage(info, vproc);
if (ret) {
- pr_err("cpu%d: failed to scale down voltage!\n",
- policy->cpu);
+ dev_err(cpu_dev,
+ "cpu%d: failed to scale down voltage!\n", policy->cpu);
clk_set_parent(cpu_clk, info->inter_clk);
- clk_set_rate(armpll, old_freq_hz);
+ clk_set_rate(armpll, pre_freq_hz);
clk_set_parent(cpu_clk, armpll);
- return ret;
+ goto out;
}
}
- return 0;
+ info->current_freq = freq_hz;
+
+out:
+ mutex_unlock(&info->reg_lock);
+
+ return ret;
}
#define DYNAMIC_POWER "dynamic-power-coefficient"
+static int mtk_cpufreq_opp_notifier(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct dev_pm_opp *opp = data;
+ struct dev_pm_opp *new_opp;
+ struct mtk_cpu_dvfs_info *info;
+ unsigned long freq, volt;
+ struct cpufreq_policy *policy;
+ int ret = 0;
+
+ info = container_of(nb, struct mtk_cpu_dvfs_info, opp_nb);
+
+ if (event == OPP_EVENT_ADJUST_VOLTAGE) {
+ freq = dev_pm_opp_get_freq(opp);
+
+ mutex_lock(&info->reg_lock);
+ if (info->current_freq == freq) {
+ volt = dev_pm_opp_get_voltage(opp);
+ ret = mtk_cpufreq_set_voltage(info, volt);
+ if (ret)
+ dev_err(info->cpu_dev,
+ "failed to scale voltage: %d\n", ret);
+ }
+ mutex_unlock(&info->reg_lock);
+ } else if (event == OPP_EVENT_DISABLE) {
+ freq = dev_pm_opp_get_freq(opp);
+
+ /* case of current opp item is disabled */
+ if (info->current_freq == freq) {
+ freq = 1;
+ new_opp = dev_pm_opp_find_freq_ceil(info->cpu_dev,
+ &freq);
+ if (IS_ERR(new_opp)) {
+ dev_err(info->cpu_dev,
+ "all opp items are disabled\n");
+ ret = PTR_ERR(new_opp);
+ return notifier_from_errno(ret);
+ }
+
+ dev_pm_opp_put(new_opp);
+ policy = cpufreq_cpu_get(info->opp_cpu);
+ if (policy) {
+ cpufreq_driver_target(policy, freq / 1000,
+ CPUFREQ_RELATION_L);
+ cpufreq_cpu_put(policy);
+ }
+ }
+ }
+
+ return notifier_from_errno(ret);
+}
+
+static struct device *of_get_cci(struct device *cpu_dev)
+{
+ struct device_node *np;
+ struct platform_device *pdev;
+
+ np = of_parse_phandle(cpu_dev->of_node, "mediatek,cci", 0);
+ if (IS_ERR_OR_NULL(np))
+ return NULL;
+
+ pdev = of_find_device_by_node(np);
+ of_node_put(np);
+ if (IS_ERR_OR_NULL(pdev))
+ return NULL;
+
+ return &pdev->dev;
+}
+
static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
{
struct device *cpu_dev;
- struct regulator *proc_reg = ERR_PTR(-ENODEV);
- struct regulator *sram_reg = ERR_PTR(-ENODEV);
- struct clk *cpu_clk = ERR_PTR(-ENODEV);
- struct clk *inter_clk = ERR_PTR(-ENODEV);
struct dev_pm_opp *opp;
unsigned long rate;
int ret;
cpu_dev = get_cpu_device(cpu);
if (!cpu_dev) {
- pr_err("failed to get cpu%d device\n", cpu);
+ dev_err(cpu_dev, "failed to get cpu%d device\n", cpu);
return -ENODEV;
}
+ info->cpu_dev = cpu_dev;
- cpu_clk = clk_get(cpu_dev, "cpu");
- if (IS_ERR(cpu_clk)) {
- if (PTR_ERR(cpu_clk) == -EPROBE_DEFER)
- pr_warn("cpu clk for cpu%d not ready, retry.\n", cpu);
- else
- pr_err("failed to get cpu clk for cpu%d\n", cpu);
-
- ret = PTR_ERR(cpu_clk);
- return ret;
+ info->ccifreq_bound = false;
+ if (info->soc_data->ccifreq_supported) {
+ info->cci_dev = of_get_cci(info->cpu_dev);
+ if (IS_ERR_OR_NULL(info->cci_dev)) {
+ ret = PTR_ERR(info->cci_dev);
+ dev_err(cpu_dev, "cpu%d: failed to get cci device\n", cpu);
+ return -ENODEV;
+ }
}
- inter_clk = clk_get(cpu_dev, "intermediate");
- if (IS_ERR(inter_clk)) {
- if (PTR_ERR(inter_clk) == -EPROBE_DEFER)
- pr_warn("intermediate clk for cpu%d not ready, retry.\n",
- cpu);
- else
- pr_err("failed to get intermediate clk for cpu%d\n",
- cpu);
+ info->cpu_clk = clk_get(cpu_dev, "cpu");
+ if (IS_ERR(info->cpu_clk)) {
+ ret = PTR_ERR(info->cpu_clk);
+ return dev_err_probe(cpu_dev, ret,
+ "cpu%d: failed to get cpu clk\n", cpu);
+ }
- ret = PTR_ERR(inter_clk);
+ info->inter_clk = clk_get(cpu_dev, "intermediate");
+ if (IS_ERR(info->inter_clk)) {
+ ret = PTR_ERR(info->inter_clk);
+ dev_err_probe(cpu_dev, ret,
+ "cpu%d: failed to get intermediate clk\n", cpu);
goto out_free_resources;
}
- proc_reg = regulator_get_optional(cpu_dev, "proc");
- if (IS_ERR(proc_reg)) {
- if (PTR_ERR(proc_reg) == -EPROBE_DEFER)
- pr_warn("proc regulator for cpu%d not ready, retry.\n",
- cpu);
- else
- pr_err("failed to get proc regulator for cpu%d\n",
- cpu);
+ info->proc_reg = regulator_get_optional(cpu_dev, "proc");
+ if (IS_ERR(info->proc_reg)) {
+ ret = PTR_ERR(info->proc_reg);
+ dev_err_probe(cpu_dev, ret,
+ "cpu%d: failed to get proc regulator\n", cpu);
+ goto out_free_resources;
+ }
- ret = PTR_ERR(proc_reg);
+ ret = regulator_enable(info->proc_reg);
+ if (ret) {
+ dev_warn(cpu_dev, "cpu%d: failed to enable vproc\n", cpu);
goto out_free_resources;
}
/* Both presence and absence of sram regulator are valid cases. */
- sram_reg = regulator_get_exclusive(cpu_dev, "sram");
+ info->sram_reg = regulator_get_optional(cpu_dev, "sram");
+ if (IS_ERR(info->sram_reg))
+ info->sram_reg = NULL;
+ else {
+ ret = regulator_enable(info->sram_reg);
+ if (ret) {
+ dev_warn(cpu_dev, "cpu%d: failed to enable vsram\n", cpu);
+ goto out_free_resources;
+ }
+ }
/* Get OPP-sharing information from "operating-points-v2" bindings */
ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, &info->cpus);
if (ret) {
- pr_err("failed to get OPP-sharing information for cpu%d\n",
- cpu);
+ dev_err(cpu_dev,
+ "cpu%d: failed to get OPP-sharing information\n", cpu);
goto out_free_resources;
}
ret = dev_pm_opp_of_cpumask_add_table(&info->cpus);
if (ret) {
- pr_warn("no OPP table for cpu%d\n", cpu);
+ dev_warn(cpu_dev, "cpu%d: no OPP table\n", cpu);
goto out_free_resources;
}
+ ret = clk_prepare_enable(info->cpu_clk);
+ if (ret)
+ goto out_free_opp_table;
+
+ ret = clk_prepare_enable(info->inter_clk);
+ if (ret)
+ goto out_disable_mux_clock;
+
+ if (info->soc_data->ccifreq_supported) {
+ info->vproc_on_boot = regulator_get_voltage(info->proc_reg);
+ if (info->vproc_on_boot < 0) {
+ dev_err(info->cpu_dev,
+ "invalid Vproc value: %d\n", info->vproc_on_boot);
+ goto out_disable_inter_clock;
+ }
+ }
+
/* Search a safe voltage for intermediate frequency. */
- rate = clk_get_rate(inter_clk);
+ rate = clk_get_rate(info->inter_clk);
opp = dev_pm_opp_find_freq_ceil(cpu_dev, &rate);
if (IS_ERR(opp)) {
- pr_err("failed to get intermediate opp for cpu%d\n", cpu);
+ dev_err(cpu_dev, "cpu%d: failed to get intermediate opp\n", cpu);
ret = PTR_ERR(opp);
- goto out_free_opp_table;
+ goto out_disable_inter_clock;
}
info->intermediate_voltage = dev_pm_opp_get_voltage(opp);
dev_pm_opp_put(opp);
- info->cpu_dev = cpu_dev;
- info->proc_reg = proc_reg;
- info->sram_reg = IS_ERR(sram_reg) ? NULL : sram_reg;
- info->cpu_clk = cpu_clk;
- info->inter_clk = inter_clk;
+ mutex_init(&info->reg_lock);
+ info->current_freq = clk_get_rate(info->cpu_clk);
+
+ info->opp_cpu = cpu;
+ info->opp_nb.notifier_call = mtk_cpufreq_opp_notifier;
+ ret = dev_pm_opp_register_notifier(cpu_dev, &info->opp_nb);
+ if (ret) {
+ dev_err(cpu_dev, "cpu%d: failed to register opp notifier\n", cpu);
+ goto out_disable_inter_clock;
+ }
/*
* If SRAM regulator is present, software "voltage tracking" is needed
* for this CPU power domain.
*/
- info->need_voltage_tracking = !IS_ERR(sram_reg);
+ info->need_voltage_tracking = (info->sram_reg != NULL);
+
+ /*
+ * We assume min voltage is 0 and tracking target voltage using
+ * min_volt_shift for each iteration.
+ * The vtrack_max is 3 times of expeted iteration count.
+ */
+ info->vtrack_max = 3 * DIV_ROUND_UP(max(info->soc_data->sram_max_volt,
+ info->soc_data->proc_max_volt),
+ info->soc_data->min_volt_shift);
return 0;
+out_disable_inter_clock:
+ clk_disable_unprepare(info->inter_clk);
+
+out_disable_mux_clock:
+ clk_disable_unprepare(info->cpu_clk);
+
out_free_opp_table:
dev_pm_opp_of_cpumask_remove_table(&info->cpus);
out_free_resources:
- if (!IS_ERR(proc_reg))
- regulator_put(proc_reg);
- if (!IS_ERR(sram_reg))
- regulator_put(sram_reg);
- if (!IS_ERR(cpu_clk))
- clk_put(cpu_clk);
- if (!IS_ERR(inter_clk))
- clk_put(inter_clk);
+ if (regulator_is_enabled(info->proc_reg))
+ regulator_disable(info->proc_reg);
+ if (info->sram_reg && regulator_is_enabled(info->sram_reg))
+ regulator_disable(info->sram_reg);
+
+ if (!IS_ERR(info->proc_reg))
+ regulator_put(info->proc_reg);
+ if (!IS_ERR(info->sram_reg))
+ regulator_put(info->sram_reg);
+ if (!IS_ERR(info->cpu_clk))
+ clk_put(info->cpu_clk);
+ if (!IS_ERR(info->inter_clk))
+ clk_put(info->inter_clk);
return ret;
}
static void mtk_cpu_dvfs_info_release(struct mtk_cpu_dvfs_info *info)
{
- if (!IS_ERR(info->proc_reg))
+ if (!IS_ERR(info->proc_reg)) {
+ regulator_disable(info->proc_reg);
regulator_put(info->proc_reg);
- if (!IS_ERR(info->sram_reg))
+ }
+ if (!IS_ERR(info->sram_reg)) {
+ regulator_disable(info->sram_reg);
regulator_put(info->sram_reg);
- if (!IS_ERR(info->cpu_clk))
+ }
+ if (!IS_ERR(info->cpu_clk)) {
+ clk_disable_unprepare(info->cpu_clk);
clk_put(info->cpu_clk);
- if (!IS_ERR(info->inter_clk))
+ }
+ if (!IS_ERR(info->inter_clk)) {
+ clk_disable_unprepare(info->inter_clk);
clk_put(info->inter_clk);
+ }
dev_pm_opp_of_cpumask_remove_table(&info->cpus);
+ dev_pm_opp_unregister_notifier(info->cpu_dev, &info->opp_nb);
}
static int mtk_cpufreq_init(struct cpufreq_policy *policy)
@@ -432,14 +578,15 @@ static int mtk_cpufreq_init(struct cpufreq_policy *policy)
info = mtk_cpu_dvfs_info_lookup(policy->cpu);
if (!info) {
pr_err("dvfs info for cpu%d is not initialized.\n",
- policy->cpu);
+ policy->cpu);
return -EINVAL;
}
ret = dev_pm_opp_init_cpufreq_table(info->cpu_dev, &freq_table);
if (ret) {
- pr_err("failed to init cpufreq table for cpu%d: %d\n",
- policy->cpu, ret);
+ dev_err(info->cpu_dev,
+ "failed to init cpufreq table for cpu%d: %d\n",
+ policy->cpu, ret);
return ret;
}
@@ -476,9 +623,17 @@ static struct cpufreq_driver mtk_cpufreq_driver = {
static int mtk_cpufreq_probe(struct platform_device *pdev)
{
+ const struct mtk_cpufreq_platform_data *data;
struct mtk_cpu_dvfs_info *info, *tmp;
int cpu, ret;
+ data = dev_get_platdata(&pdev->dev);
+ if (!data) {
+ dev_err(&pdev->dev,
+ "failed to get mtk cpufreq platform data\n");
+ return -ENODEV;
+ }
+
for_each_possible_cpu(cpu) {
info = mtk_cpu_dvfs_info_lookup(cpu);
if (info)
@@ -490,6 +645,7 @@ static int mtk_cpufreq_probe(struct platform_device *pdev)
goto release_dvfs_info_list;
}
+ info->soc_data = data;
ret = mtk_cpu_dvfs_info_init(info, cpu);
if (ret) {
dev_err(&pdev->dev,
@@ -525,20 +681,47 @@ static struct platform_driver mtk_cpufreq_platdrv = {
.probe = mtk_cpufreq_probe,
};
+static const struct mtk_cpufreq_platform_data mt2701_platform_data = {
+ .min_volt_shift = 100000,
+ .max_volt_shift = 200000,
+ .proc_max_volt = 1150000,
+ .sram_min_volt = 0,
+ .sram_max_volt = 1150000,
+ .ccifreq_supported = false,
+};
+
+static const struct mtk_cpufreq_platform_data mt8183_platform_data = {
+ .min_volt_shift = 100000,
+ .max_volt_shift = 200000,
+ .proc_max_volt = 1150000,
+ .sram_min_volt = 0,
+ .sram_max_volt = 1150000,
+ .ccifreq_supported = true,
+};
+
+static const struct mtk_cpufreq_platform_data mt8186_platform_data = {
+ .min_volt_shift = 100000,
+ .max_volt_shift = 250000,
+ .proc_max_volt = 1118750,
+ .sram_min_volt = 850000,
+ .sram_max_volt = 1118750,
+ .ccifreq_supported = true,
+};
+
/* List of machines supported by this driver */
static const struct of_device_id mtk_cpufreq_machines[] __initconst = {
- { .compatible = "mediatek,mt2701", },
- { .compatible = "mediatek,mt2712", },
- { .compatible = "mediatek,mt7622", },
- { .compatible = "mediatek,mt7623", },
- { .compatible = "mediatek,mt8167", },
- { .compatible = "mediatek,mt817x", },
- { .compatible = "mediatek,mt8173", },
- { .compatible = "mediatek,mt8176", },
- { .compatible = "mediatek,mt8183", },
- { .compatible = "mediatek,mt8365", },
- { .compatible = "mediatek,mt8516", },
-
+ { .compatible = "mediatek,mt2701", .data = &mt2701_platform_data },
+ { .compatible = "mediatek,mt2712", .data = &mt2701_platform_data },
+ { .compatible = "mediatek,mt7622", .data = &mt2701_platform_data },
+ { .compatible = "mediatek,mt7623", .data = &mt2701_platform_data },
+ { .compatible = "mediatek,mt8167", .data = &mt2701_platform_data },
+ { .compatible = "mediatek,mt817x", .data = &mt2701_platform_data },
+ { .compatible = "mediatek,mt8173", .data = &mt2701_platform_data },
+ { .compatible = "mediatek,mt8176", .data = &mt2701_platform_data },
+ { .compatible = "mediatek,mt8183", .data = &mt8183_platform_data },
+ { .compatible = "mediatek,mt8186", .data = &mt8186_platform_data },
+ { .compatible = "mediatek,mt8365", .data = &mt2701_platform_data },
+ { .compatible = "mediatek,mt8516", .data = &mt2701_platform_data },
{ }
};
MODULE_DEVICE_TABLE(of, mtk_cpufreq_machines);
@@ -547,7 +730,7 @@ static int __init mtk_cpufreq_driver_init(void)
{
struct device_node *np;
const struct of_device_id *match;
- struct platform_device *pdev;
+ const struct mtk_cpufreq_platform_data *data;
int err;
np = of_find_node_by_path("/");
@@ -560,6 +743,7 @@ static int __init mtk_cpufreq_driver_init(void)
pr_debug("Machine is not compatible with mtk-cpufreq\n");
return -ENODEV;
}
+ data = match->data;
err = platform_driver_register(&mtk_cpufreq_platdrv);
if (err)
@@ -571,16 +755,24 @@ static int __init mtk_cpufreq_driver_init(void)
* and the device registration codes are put here to handle defer
* probing.
*/
- pdev = platform_device_register_simple("mtk-cpufreq", -1, NULL, 0);
- if (IS_ERR(pdev)) {
+ cpufreq_pdev = platform_device_register_data(NULL, "mtk-cpufreq", -1,
+ data, sizeof(*data));
+ if (IS_ERR(cpufreq_pdev)) {
pr_err("failed to register mtk-cpufreq platform device\n");
platform_driver_unregister(&mtk_cpufreq_platdrv);
- return PTR_ERR(pdev);
+ return PTR_ERR(cpufreq_pdev);
}
return 0;
}
-device_initcall(mtk_cpufreq_driver_init);
+module_init(mtk_cpufreq_driver_init)
+
+static void __exit mtk_cpufreq_driver_exit(void)
+{
+ platform_device_unregister(cpufreq_pdev);
+ platform_driver_unregister(&mtk_cpufreq_platdrv);
+}
+module_exit(mtk_cpufreq_driver_exit)
MODULE_DESCRIPTION("MediaTek CPUFreq driver");
MODULE_AUTHOR("Pi-Cheng Chen <pi-cheng.chen@linaro.org>");
diff --git a/drivers/cpufreq/pxa2xx-cpufreq.c b/drivers/cpufreq/pxa2xx-cpufreq.c
index f0b6f52eb2c3..ed1ae061a687 100644
--- a/drivers/cpufreq/pxa2xx-cpufreq.c
+++ b/drivers/cpufreq/pxa2xx-cpufreq.c
@@ -24,11 +24,9 @@
#include <linux/cpufreq.h>
#include <linux/err.h>
#include <linux/regulator/consumer.h>
+#include <linux/soc/pxa/cpu.h>
#include <linux/io.h>
-#include <mach/pxa2xx-regs.h>
-#include <mach/smemc.h>
-
#ifdef DEBUG
static unsigned int freq_debug;
module_param(freq_debug, uint, 0);
@@ -106,8 +104,6 @@ static struct pxa_freqs pxa27x_freqs[] = {
static struct cpufreq_frequency_table
pxa27x_freq_table[NUM_PXA27x_FREQS+1];
-extern unsigned get_clk_frequency_khz(int info);
-
#ifdef CONFIG_REGULATOR
static int pxa_cpufreq_change_voltage(const struct pxa_freqs *pxa_freq)
diff --git a/drivers/cpufreq/pxa3xx-cpufreq.c b/drivers/cpufreq/pxa3xx-cpufreq.c
index 32f993c94675..4afa48d172db 100644
--- a/drivers/cpufreq/pxa3xx-cpufreq.c
+++ b/drivers/cpufreq/pxa3xx-cpufreq.c
@@ -8,12 +8,11 @@
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/cpufreq.h>
+#include <linux/soc/pxa/cpu.h>
+#include <linux/clk/pxa.h>
#include <linux/slab.h>
#include <linux/io.h>
-#include <mach/generic.h>
-#include <mach/pxa3xx-regs.h>
-
#define HSS_104M (0)
#define HSS_156M (1)
#define HSS_208M (2)
@@ -34,6 +33,28 @@
#define DMCFS_26M (0)
#define DMCFS_260M (3)
+#define ACCR_XPDIS (1 << 31) /* Core PLL Output Disable */
+#define ACCR_SPDIS (1 << 30) /* System PLL Output Disable */
+#define ACCR_D0CS (1 << 26) /* D0 Mode Clock Select */
+#define ACCR_PCCE (1 << 11) /* Power Mode Change Clock Enable */
+#define ACCR_DDR_D0CS (1 << 7) /* DDR SDRAM clock frequency in D0CS (PXA31x only) */
+
+#define ACCR_SMCFS_MASK (0x7 << 23) /* Static Memory Controller Frequency Select */
+#define ACCR_SFLFS_MASK (0x3 << 18) /* Frequency Select for Internal Memory Controller */
+#define ACCR_XSPCLK_MASK (0x3 << 16) /* Core Frequency during Frequency Change */
+#define ACCR_HSS_MASK (0x3 << 14) /* System Bus-Clock Frequency Select */
+#define ACCR_DMCFS_MASK (0x3 << 12) /* Dynamic Memory Controller Clock Frequency Select */
+#define ACCR_XN_MASK (0x7 << 8) /* Core PLL Turbo-Mode-to-Run-Mode Ratio */
+#define ACCR_XL_MASK (0x1f) /* Core PLL Run-Mode-to-Oscillator Ratio */
+
+#define ACCR_SMCFS(x) (((x) & 0x7) << 23)
+#define ACCR_SFLFS(x) (((x) & 0x3) << 18)
+#define ACCR_XSPCLK(x) (((x) & 0x3) << 16)
+#define ACCR_HSS(x) (((x) & 0x3) << 14)
+#define ACCR_DMCFS(x) (((x) & 0x3) << 12)
+#define ACCR_XN(x) (((x) & 0x7) << 8)
+#define ACCR_XL(x) ((x) & 0x1f)
+
struct pxa3xx_freq_info {
unsigned int cpufreq_mhz;
unsigned int core_xl : 5;
@@ -111,41 +132,29 @@ static int setup_freqs_table(struct cpufreq_policy *policy,
static void __update_core_freq(struct pxa3xx_freq_info *info)
{
- uint32_t mask = ACCR_XN_MASK | ACCR_XL_MASK;
- uint32_t accr = ACCR;
- uint32_t xclkcfg;
-
- accr &= ~(ACCR_XN_MASK | ACCR_XL_MASK | ACCR_XSPCLK_MASK);
- accr |= ACCR_XN(info->core_xn) | ACCR_XL(info->core_xl);
+ u32 mask, disable, enable, xclkcfg;
+ mask = ACCR_XN_MASK | ACCR_XL_MASK;
+ disable = mask | ACCR_XSPCLK_MASK;
+ enable = ACCR_XN(info->core_xn) | ACCR_XL(info->core_xl);
/* No clock until core PLL is re-locked */
- accr |= ACCR_XSPCLK(XSPCLK_NONE);
-
+ enable |= ACCR_XSPCLK(XSPCLK_NONE);
xclkcfg = (info->core_xn == 2) ? 0x3 : 0x2; /* turbo bit */
- ACCR = accr;
- __asm__("mcr p14, 0, %0, c6, c0, 0\n" : : "r"(xclkcfg));
-
- while ((ACSR & mask) != (accr & mask))
- cpu_relax();
+ pxa3xx_clk_update_accr(disable, enable, xclkcfg, mask);
}
static void __update_bus_freq(struct pxa3xx_freq_info *info)
{
- uint32_t mask;
- uint32_t accr = ACCR;
-
- mask = ACCR_SMCFS_MASK | ACCR_SFLFS_MASK | ACCR_HSS_MASK |
- ACCR_DMCFS_MASK;
-
- accr &= ~mask;
- accr |= ACCR_SMCFS(info->smcfs) | ACCR_SFLFS(info->sflfs) |
- ACCR_HSS(info->hss) | ACCR_DMCFS(info->dmcfs);
+ u32 mask, disable, enable;
- ACCR = accr;
+ mask = ACCR_SMCFS_MASK | ACCR_SFLFS_MASK | ACCR_HSS_MASK |
+ ACCR_DMCFS_MASK;
+ disable = mask;
+ enable = ACCR_SMCFS(info->smcfs) | ACCR_SFLFS(info->sflfs) |
+ ACCR_HSS(info->hss) | ACCR_DMCFS(info->dmcfs);
- while ((ACSR & mask) != (accr & mask))
- cpu_relax();
+ pxa3xx_clk_update_accr(disable, enable, 0, mask);
}
static unsigned int pxa3xx_cpufreq_get(unsigned int cpu)
diff --git a/drivers/cpufreq/tegra194-cpufreq.c b/drivers/cpufreq/tegra194-cpufreq.c
index ac381db25dbe..2a6a98764a8c 100644
--- a/drivers/cpufreq/tegra194-cpufreq.c
+++ b/drivers/cpufreq/tegra194-cpufreq.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved
+ * Copyright (c) 2020 - 2022, NVIDIA CORPORATION. All rights reserved
*/
#include <linux/cpu.h>
@@ -24,6 +24,17 @@
#define CPUFREQ_TBL_STEP_HZ (50 * KHZ * KHZ)
#define MAX_CNT ~0U
+#define NDIV_MASK 0x1FF
+
+#define CORE_OFFSET(cpu) (cpu * 8)
+#define CMU_CLKS_BASE 0x2000
+#define SCRATCH_FREQ_CORE_REG(data, cpu) (data->regs + CMU_CLKS_BASE + CORE_OFFSET(cpu))
+
+#define MMCRAB_CLUSTER_BASE(cl) (0x30000 + (cl * 0x10000))
+#define CLUSTER_ACTMON_BASE(data, cl) \
+ (data->regs + (MMCRAB_CLUSTER_BASE(cl) + data->soc->actmon_cntr_base))
+#define CORE_ACTMON_CNTR_REG(data, cl, cpu) (CLUSTER_ACTMON_BASE(data, cl) + CORE_OFFSET(cpu))
+
/* cpufreq transisition latency */
#define TEGRA_CPUFREQ_TRANSITION_LATENCY (300 * 1000) /* unit in nanoseconds */
@@ -35,12 +46,6 @@ enum cluster {
MAX_CLUSTERS,
};
-struct tegra194_cpufreq_data {
- void __iomem *regs;
- size_t num_clusters;
- struct cpufreq_frequency_table **tables;
-};
-
struct tegra_cpu_ctr {
u32 cpu;
u32 coreclk_cnt, last_coreclk_cnt;
@@ -52,13 +57,127 @@ struct read_counters_work {
struct tegra_cpu_ctr c;
};
+struct tegra_cpufreq_ops {
+ void (*read_counters)(struct tegra_cpu_ctr *c);
+ void (*set_cpu_ndiv)(struct cpufreq_policy *policy, u64 ndiv);
+ void (*get_cpu_cluster_id)(u32 cpu, u32 *cpuid, u32 *clusterid);
+ int (*get_cpu_ndiv)(u32 cpu, u32 cpuid, u32 clusterid, u64 *ndiv);
+};
+
+struct tegra_cpufreq_soc {
+ struct tegra_cpufreq_ops *ops;
+ int maxcpus_per_cluster;
+ phys_addr_t actmon_cntr_base;
+};
+
+struct tegra194_cpufreq_data {
+ void __iomem *regs;
+ size_t num_clusters;
+ struct cpufreq_frequency_table **tables;
+ const struct tegra_cpufreq_soc *soc;
+};
+
static struct workqueue_struct *read_counters_wq;
-static void get_cpu_cluster(void *cluster)
+static void tegra_get_cpu_mpidr(void *mpidr)
+{
+ *((u64 *)mpidr) = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
+}
+
+static void tegra234_get_cpu_cluster_id(u32 cpu, u32 *cpuid, u32 *clusterid)
+{
+ u64 mpidr;
+
+ smp_call_function_single(cpu, tegra_get_cpu_mpidr, &mpidr, true);
+
+ if (cpuid)
+ *cpuid = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+ if (clusterid)
+ *clusterid = MPIDR_AFFINITY_LEVEL(mpidr, 2);
+}
+
+static int tegra234_get_cpu_ndiv(u32 cpu, u32 cpuid, u32 clusterid, u64 *ndiv)
{
- u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
+ struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
+ void __iomem *freq_core_reg;
+ u64 mpidr_id;
+
+ /* use physical id to get address of per core frequency register */
+ mpidr_id = (clusterid * data->soc->maxcpus_per_cluster) + cpuid;
+ freq_core_reg = SCRATCH_FREQ_CORE_REG(data, mpidr_id);
+
+ *ndiv = readl(freq_core_reg) & NDIV_MASK;
+
+ return 0;
+}
- *((uint32_t *)cluster) = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+static void tegra234_set_cpu_ndiv(struct cpufreq_policy *policy, u64 ndiv)
+{
+ struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
+ void __iomem *freq_core_reg;
+ u32 cpu, cpuid, clusterid;
+ u64 mpidr_id;
+
+ for_each_cpu_and(cpu, policy->cpus, cpu_online_mask) {
+ data->soc->ops->get_cpu_cluster_id(cpu, &cpuid, &clusterid);
+
+ /* use physical id to get address of per core frequency register */
+ mpidr_id = (clusterid * data->soc->maxcpus_per_cluster) + cpuid;
+ freq_core_reg = SCRATCH_FREQ_CORE_REG(data, mpidr_id);
+
+ writel(ndiv, freq_core_reg);
+ }
+}
+
+/*
+ * This register provides access to two counter values with a single
+ * 64-bit read. The counter values are used to determine the average
+ * actual frequency a core has run at over a period of time.
+ * [63:32] PLLP counter: Counts at fixed frequency (408 MHz)
+ * [31:0] Core clock counter: Counts on every core clock cycle
+ */
+static void tegra234_read_counters(struct tegra_cpu_ctr *c)
+{
+ struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
+ void __iomem *actmon_reg;
+ u32 cpuid, clusterid;
+ u64 val;
+
+ data->soc->ops->get_cpu_cluster_id(c->cpu, &cpuid, &clusterid);
+ actmon_reg = CORE_ACTMON_CNTR_REG(data, clusterid, cpuid);
+
+ val = readq(actmon_reg);
+ c->last_refclk_cnt = upper_32_bits(val);
+ c->last_coreclk_cnt = lower_32_bits(val);
+ udelay(US_DELAY);
+ val = readq(actmon_reg);
+ c->refclk_cnt = upper_32_bits(val);
+ c->coreclk_cnt = lower_32_bits(val);
+}
+
+static struct tegra_cpufreq_ops tegra234_cpufreq_ops = {
+ .read_counters = tegra234_read_counters,
+ .get_cpu_cluster_id = tegra234_get_cpu_cluster_id,
+ .get_cpu_ndiv = tegra234_get_cpu_ndiv,
+ .set_cpu_ndiv = tegra234_set_cpu_ndiv,
+};
+
+const struct tegra_cpufreq_soc tegra234_cpufreq_soc = {
+ .ops = &tegra234_cpufreq_ops,
+ .actmon_cntr_base = 0x9000,
+ .maxcpus_per_cluster = 4,
+};
+
+static void tegra194_get_cpu_cluster_id(u32 cpu, u32 *cpuid, u32 *clusterid)
+{
+ u64 mpidr;
+
+ smp_call_function_single(cpu, tegra_get_cpu_mpidr, &mpidr, true);
+
+ if (cpuid)
+ *cpuid = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+ if (clusterid)
+ *clusterid = MPIDR_AFFINITY_LEVEL(mpidr, 1);
}
/*
@@ -85,11 +204,24 @@ static inline u32 map_ndiv_to_freq(struct mrq_cpu_ndiv_limits_response
return nltbl->ref_clk_hz / KHZ * ndiv / (nltbl->pdiv * nltbl->mdiv);
}
+static void tegra194_read_counters(struct tegra_cpu_ctr *c)
+{
+ u64 val;
+
+ val = read_freq_feedback();
+ c->last_refclk_cnt = lower_32_bits(val);
+ c->last_coreclk_cnt = upper_32_bits(val);
+ udelay(US_DELAY);
+ val = read_freq_feedback();
+ c->refclk_cnt = lower_32_bits(val);
+ c->coreclk_cnt = upper_32_bits(val);
+}
+
static void tegra_read_counters(struct work_struct *work)
{
+ struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
struct read_counters_work *read_counters_work;
struct tegra_cpu_ctr *c;
- u64 val;
/*
* ref_clk_counter(32 bit counter) runs on constant clk,
@@ -107,13 +239,7 @@ static void tegra_read_counters(struct work_struct *work)
work);
c = &read_counters_work->c;
- val = read_freq_feedback();
- c->last_refclk_cnt = lower_32_bits(val);
- c->last_coreclk_cnt = upper_32_bits(val);
- udelay(US_DELAY);
- val = read_freq_feedback();
- c->refclk_cnt = lower_32_bits(val);
- c->coreclk_cnt = upper_32_bits(val);
+ data->soc->ops->read_counters(c);
}
/*
@@ -177,7 +303,7 @@ static unsigned int tegra194_calculate_speed(u32 cpu)
return (rate_mhz * KHZ); /* in KHz */
}
-static void get_cpu_ndiv(void *ndiv)
+static void tegra194_get_cpu_ndiv_sysreg(void *ndiv)
{
u64 ndiv_val;
@@ -186,30 +312,43 @@ static void get_cpu_ndiv(void *ndiv)
*(u64 *)ndiv = ndiv_val;
}
-static void set_cpu_ndiv(void *data)
+static int tegra194_get_cpu_ndiv(u32 cpu, u32 cpuid, u32 clusterid, u64 *ndiv)
+{
+ int ret;
+
+ ret = smp_call_function_single(cpu, tegra194_get_cpu_ndiv_sysreg, &ndiv, true);
+
+ return ret;
+}
+
+static void tegra194_set_cpu_ndiv_sysreg(void *data)
{
- struct cpufreq_frequency_table *tbl = data;
- u64 ndiv_val = (u64)tbl->driver_data;
+ u64 ndiv_val = *(u64 *)data;
asm volatile("msr s3_0_c15_c0_4, %0" : : "r" (ndiv_val));
}
+static void tegra194_set_cpu_ndiv(struct cpufreq_policy *policy, u64 ndiv)
+{
+ on_each_cpu_mask(policy->cpus, tegra194_set_cpu_ndiv_sysreg, &ndiv, true);
+}
+
static unsigned int tegra194_get_speed(u32 cpu)
{
struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
struct cpufreq_frequency_table *pos;
+ u32 cpuid, clusterid;
unsigned int rate;
u64 ndiv;
int ret;
- u32 cl;
- smp_call_function_single(cpu, get_cpu_cluster, &cl, true);
+ data->soc->ops->get_cpu_cluster_id(cpu, &cpuid, &clusterid);
/* reconstruct actual cpu freq using counters */
rate = tegra194_calculate_speed(cpu);
/* get last written ndiv value */
- ret = smp_call_function_single(cpu, get_cpu_ndiv, &ndiv, true);
+ ret = data->soc->ops->get_cpu_ndiv(cpu, cpuid, clusterid, &ndiv);
if (WARN_ON_ONCE(ret))
return rate;
@@ -219,7 +358,7 @@ static unsigned int tegra194_get_speed(u32 cpu)
* to the last written ndiv value from freq_table. This is
* done to return consistent value.
*/
- cpufreq_for_each_valid_entry(pos, data->tables[cl]) {
+ cpufreq_for_each_valid_entry(pos, data->tables[clusterid]) {
if (pos->driver_data != ndiv)
continue;
@@ -237,19 +376,22 @@ static unsigned int tegra194_get_speed(u32 cpu)
static int tegra194_cpufreq_init(struct cpufreq_policy *policy)
{
struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
- u32 cpu;
- u32 cl;
+ int maxcpus_per_cluster = data->soc->maxcpus_per_cluster;
+ u32 start_cpu, cpu;
+ u32 clusterid;
- smp_call_function_single(policy->cpu, get_cpu_cluster, &cl, true);
+ data->soc->ops->get_cpu_cluster_id(policy->cpu, NULL, &clusterid);
- if (cl >= data->num_clusters || !data->tables[cl])
+ if (clusterid >= data->num_clusters || !data->tables[clusterid])
return -EINVAL;
+ start_cpu = rounddown(policy->cpu, maxcpus_per_cluster);
/* set same policy for all cpus in a cluster */
- for (cpu = (cl * 2); cpu < ((cl + 1) * 2); cpu++)
- cpumask_set_cpu(cpu, policy->cpus);
-
- policy->freq_table = data->tables[cl];
+ for (cpu = start_cpu; cpu < (start_cpu + maxcpus_per_cluster); cpu++) {
+ if (cpu_possible(cpu))
+ cpumask_set_cpu(cpu, policy->cpus);
+ }
+ policy->freq_table = data->tables[clusterid];
policy->cpuinfo.transition_latency = TEGRA_CPUFREQ_TRANSITION_LATENCY;
return 0;
@@ -259,13 +401,14 @@ static int tegra194_cpufreq_set_target(struct cpufreq_policy *policy,
unsigned int index)
{
struct cpufreq_frequency_table *tbl = policy->freq_table + index;
+ struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
/*
* Each core writes frequency in per core register. Then both cores
* in a cluster run at same frequency which is the maximum frequency
* request out of the values requested by both cores in that cluster.
*/
- on_each_cpu_mask(policy->cpus, set_cpu_ndiv, tbl, true);
+ data->soc->ops->set_cpu_ndiv(policy, (u64)tbl->driver_data);
return 0;
}
@@ -280,6 +423,18 @@ static struct cpufreq_driver tegra194_cpufreq_driver = {
.attr = cpufreq_generic_attr,
};
+static struct tegra_cpufreq_ops tegra194_cpufreq_ops = {
+ .read_counters = tegra194_read_counters,
+ .get_cpu_cluster_id = tegra194_get_cpu_cluster_id,
+ .get_cpu_ndiv = tegra194_get_cpu_ndiv,
+ .set_cpu_ndiv = tegra194_set_cpu_ndiv,
+};
+
+const struct tegra_cpufreq_soc tegra194_cpufreq_soc = {
+ .ops = &tegra194_cpufreq_ops,
+ .maxcpus_per_cluster = 2,
+};
+
static void tegra194_cpufreq_free_resources(void)
{
destroy_workqueue(read_counters_wq);
@@ -359,6 +514,7 @@ init_freq_table(struct platform_device *pdev, struct tegra_bpmp *bpmp,
static int tegra194_cpufreq_probe(struct platform_device *pdev)
{
+ const struct tegra_cpufreq_soc *soc;
struct tegra194_cpufreq_data *data;
struct tegra_bpmp *bpmp;
int err, i;
@@ -367,12 +523,28 @@ static int tegra194_cpufreq_probe(struct platform_device *pdev)
if (!data)
return -ENOMEM;
+ soc = of_device_get_match_data(&pdev->dev);
+
+ if (soc->ops && soc->maxcpus_per_cluster) {
+ data->soc = soc;
+ } else {
+ dev_err(&pdev->dev, "soc data missing\n");
+ return -EINVAL;
+ }
+
data->num_clusters = MAX_CLUSTERS;
data->tables = devm_kcalloc(&pdev->dev, data->num_clusters,
sizeof(*data->tables), GFP_KERNEL);
if (!data->tables)
return -ENOMEM;
+ if (soc->actmon_cntr_base) {
+ /* mmio registers are used for frequency request and re-construction */
+ data->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(data->regs))
+ return PTR_ERR(data->regs);
+ }
+
platform_set_drvdata(pdev, data);
bpmp = tegra_bpmp_get(&pdev->dev);
@@ -416,10 +588,10 @@ static int tegra194_cpufreq_remove(struct platform_device *pdev)
}
static const struct of_device_id tegra194_cpufreq_of_match[] = {
- { .compatible = "nvidia,tegra194-ccplex", },
+ { .compatible = "nvidia,tegra194-ccplex", .data = &tegra194_cpufreq_soc },
+ { .compatible = "nvidia,tegra234-ccplex-cluster", .data = &tegra234_cpufreq_soc },
{ /* sentinel */ }
};
-MODULE_DEVICE_TABLE(of, tegra194_cpufreq_of_match);
static struct platform_driver tegra194_ccplex_driver = {
.driver = {
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 7b2d138bc83e..ee99c02c84e8 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -216,9 +216,9 @@ config CRYPTO_AES_S390
config CRYPTO_CHACHA_S390
tristate "ChaCha20 stream cipher"
depends on S390
- select CRYPTO_ALGAPI
select CRYPTO_SKCIPHER
- select CRYPTO_CHACHA20
+ select CRYPTO_LIB_CHACHA_GENERIC
+ select CRYPTO_ARCH_HAVE_LIB_CHACHA
help
This is the s390 SIMD implementation of the ChaCha20 stream
cipher (RFC 7539).
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 0a4fff23d272..f81703a86b98 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -3,6 +3,7 @@ obj-$(CONFIG_CRYPTO_DEV_ALLWINNER) += allwinner/
obj-$(CONFIG_CRYPTO_DEV_ATMEL_AES) += atmel-aes.o
obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA) += atmel-sha.o
obj-$(CONFIG_CRYPTO_DEV_ATMEL_TDES) += atmel-tdes.o
+# __init ordering requires atmel-i2c being before atmel-ecc and atmel-sha204a.
obj-$(CONFIG_CRYPTO_DEV_ATMEL_I2C) += atmel-i2c.o
obj-$(CONFIG_CRYPTO_DEV_ATMEL_ECC) += atmel-ecc.o
obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA204A) += atmel-sha204a.o
diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
index dec79fa3ebaf..10fe9f73a5fb 100644
--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
@@ -20,7 +20,6 @@ static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq)
unsigned int ivsize = crypto_skcipher_ivsize(tfm);
struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
u32 mode = ctx->mode;
- void *backup_iv = NULL;
/* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
u32 rx_cnt = SS_RX_DEFAULT;
u32 tx_cnt = 0;
@@ -48,10 +47,8 @@ static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq)
}
if (areq->iv && ivsize > 0 && mode & SS_DECRYPTION) {
- backup_iv = kzalloc(ivsize, GFP_KERNEL);
- if (!backup_iv)
- return -ENOMEM;
- scatterwalk_map_and_copy(backup_iv, areq->src, areq->cryptlen - ivsize, ivsize, 0);
+ scatterwalk_map_and_copy(ctx->backup_iv, areq->src,
+ areq->cryptlen - ivsize, ivsize, 0);
}
if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN4I_SS_DEBUG)) {
@@ -134,8 +131,8 @@ static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq)
if (areq->iv) {
if (mode & SS_DECRYPTION) {
- memcpy(areq->iv, backup_iv, ivsize);
- kfree_sensitive(backup_iv);
+ memcpy(areq->iv, ctx->backup_iv, ivsize);
+ memzero_explicit(ctx->backup_iv, ivsize);
} else {
scatterwalk_map_and_copy(areq->iv, areq->dst, areq->cryptlen - ivsize,
ivsize, 0);
@@ -199,7 +196,6 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
unsigned int ileft = areq->cryptlen;
unsigned int oleft = areq->cryptlen;
unsigned int todo;
- void *backup_iv = NULL;
struct sg_mapping_iter mi, mo;
unsigned long pi = 0, po = 0; /* progress for in and out */
bool miter_err;
@@ -244,10 +240,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
return sun4i_ss_cipher_poll_fallback(areq);
if (areq->iv && ivsize > 0 && mode & SS_DECRYPTION) {
- backup_iv = kzalloc(ivsize, GFP_KERNEL);
- if (!backup_iv)
- return -ENOMEM;
- scatterwalk_map_and_copy(backup_iv, areq->src, areq->cryptlen - ivsize, ivsize, 0);
+ scatterwalk_map_and_copy(ctx->backup_iv, areq->src,
+ areq->cryptlen - ivsize, ivsize, 0);
}
if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN4I_SS_DEBUG)) {
@@ -384,8 +378,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
}
if (areq->iv) {
if (mode & SS_DECRYPTION) {
- memcpy(areq->iv, backup_iv, ivsize);
- kfree_sensitive(backup_iv);
+ memcpy(areq->iv, ctx->backup_iv, ivsize);
+ memzero_explicit(ctx->backup_iv, ivsize);
} else {
scatterwalk_map_and_copy(areq->iv, areq->dst, areq->cryptlen - ivsize,
ivsize, 0);
diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h
index 0fee6f4e2d90..ba59c7a48825 100644
--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h
@@ -183,6 +183,7 @@ struct sun4i_tfm_ctx {
struct sun4i_cipher_req_ctx {
u32 mode;
+ u8 backup_iv[AES_BLOCK_SIZE];
struct skcipher_request fallback_req; // keep at the end
};
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
index 35e3cadccac2..74b4e910a38d 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
@@ -25,26 +25,62 @@ static int sun8i_ce_cipher_need_fallback(struct skcipher_request *areq)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
struct scatterlist *sg;
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct sun8i_ce_alg_template *algt;
+ unsigned int todo, len;
+
+ algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher);
+
+ if (sg_nents_for_len(areq->src, areq->cryptlen) > MAX_SG ||
+ sg_nents_for_len(areq->dst, areq->cryptlen) > MAX_SG) {
+ algt->stat_fb_maxsg++;
+ return true;
+ }
- if (sg_nents(areq->src) > MAX_SG || sg_nents(areq->dst) > MAX_SG)
+ if (areq->cryptlen < crypto_skcipher_ivsize(tfm)) {
+ algt->stat_fb_leniv++;
return true;
+ }
- if (areq->cryptlen < crypto_skcipher_ivsize(tfm))
+ if (areq->cryptlen == 0) {
+ algt->stat_fb_len0++;
return true;
+ }
- if (areq->cryptlen == 0 || areq->cryptlen % 16)
+ if (areq->cryptlen % 16) {
+ algt->stat_fb_mod16++;
return true;
+ }
+ len = areq->cryptlen;
sg = areq->src;
while (sg) {
- if (sg->length % 4 || !IS_ALIGNED(sg->offset, sizeof(u32)))
+ if (!IS_ALIGNED(sg->offset, sizeof(u32))) {
+ algt->stat_fb_srcali++;
+ return true;
+ }
+ todo = min(len, sg->length);
+ if (todo % 4) {
+ algt->stat_fb_srclen++;
return true;
+ }
+ len -= todo;
sg = sg_next(sg);
}
+
+ len = areq->cryptlen;
sg = areq->dst;
while (sg) {
- if (sg->length % 4 || !IS_ALIGNED(sg->offset, sizeof(u32)))
+ if (!IS_ALIGNED(sg->offset, sizeof(u32))) {
+ algt->stat_fb_dstali++;
+ return true;
+ }
+ todo = min(len, sg->length);
+ if (todo % 4) {
+ algt->stat_fb_dstlen++;
return true;
+ }
+ len -= todo;
sg = sg_next(sg);
}
return false;
@@ -94,6 +130,8 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req
int nr_sgs = 0;
int nr_sgd = 0;
int err = 0;
+ int ns = sg_nents_for_len(areq->src, areq->cryptlen);
+ int nd = sg_nents_for_len(areq->dst, areq->cryptlen);
algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher);
@@ -152,23 +190,13 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req
ivsize = crypto_skcipher_ivsize(tfm);
if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) {
rctx->ivlen = ivsize;
- rctx->bounce_iv = kzalloc(ivsize, GFP_KERNEL | GFP_DMA);
- if (!rctx->bounce_iv) {
- err = -ENOMEM;
- goto theend_key;
- }
if (rctx->op_dir & CE_DECRYPTION) {
- rctx->backup_iv = kzalloc(ivsize, GFP_KERNEL);
- if (!rctx->backup_iv) {
- err = -ENOMEM;
- goto theend_key;
- }
offset = areq->cryptlen - ivsize;
- scatterwalk_map_and_copy(rctx->backup_iv, areq->src,
+ scatterwalk_map_and_copy(chan->backup_iv, areq->src,
offset, ivsize, 0);
}
- memcpy(rctx->bounce_iv, areq->iv, ivsize);
- rctx->addr_iv = dma_map_single(ce->dev, rctx->bounce_iv, rctx->ivlen,
+ memcpy(chan->bounce_iv, areq->iv, ivsize);
+ rctx->addr_iv = dma_map_single(ce->dev, chan->bounce_iv, rctx->ivlen,
DMA_TO_DEVICE);
if (dma_mapping_error(ce->dev, rctx->addr_iv)) {
dev_err(ce->dev, "Cannot DMA MAP IV\n");
@@ -179,8 +207,7 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req
}
if (areq->src == areq->dst) {
- nr_sgs = dma_map_sg(ce->dev, areq->src, sg_nents(areq->src),
- DMA_BIDIRECTIONAL);
+ nr_sgs = dma_map_sg(ce->dev, areq->src, ns, DMA_BIDIRECTIONAL);
if (nr_sgs <= 0 || nr_sgs > MAX_SG) {
dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
err = -EINVAL;
@@ -188,15 +215,13 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req
}
nr_sgd = nr_sgs;
} else {
- nr_sgs = dma_map_sg(ce->dev, areq->src, sg_nents(areq->src),
- DMA_TO_DEVICE);
+ nr_sgs = dma_map_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE);
if (nr_sgs <= 0 || nr_sgs > MAX_SG) {
dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
err = -EINVAL;
goto theend_iv;
}
- nr_sgd = dma_map_sg(ce->dev, areq->dst, sg_nents(areq->dst),
- DMA_FROM_DEVICE);
+ nr_sgd = dma_map_sg(ce->dev, areq->dst, nd, DMA_FROM_DEVICE);
if (nr_sgd <= 0 || nr_sgd > MAX_SG) {
dev_err(ce->dev, "Invalid sg number %d\n", nr_sgd);
err = -EINVAL;
@@ -241,14 +266,11 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req
theend_sgs:
if (areq->src == areq->dst) {
- dma_unmap_sg(ce->dev, areq->src, sg_nents(areq->src),
- DMA_BIDIRECTIONAL);
+ dma_unmap_sg(ce->dev, areq->src, ns, DMA_BIDIRECTIONAL);
} else {
if (nr_sgs > 0)
- dma_unmap_sg(ce->dev, areq->src, sg_nents(areq->src),
- DMA_TO_DEVICE);
- dma_unmap_sg(ce->dev, areq->dst, sg_nents(areq->dst),
- DMA_FROM_DEVICE);
+ dma_unmap_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE);
+ dma_unmap_sg(ce->dev, areq->dst, nd, DMA_FROM_DEVICE);
}
theend_iv:
@@ -257,16 +279,15 @@ theend_iv:
dma_unmap_single(ce->dev, rctx->addr_iv, rctx->ivlen, DMA_TO_DEVICE);
offset = areq->cryptlen - ivsize;
if (rctx->op_dir & CE_DECRYPTION) {
- memcpy(areq->iv, rctx->backup_iv, ivsize);
- kfree_sensitive(rctx->backup_iv);
+ memcpy(areq->iv, chan->backup_iv, ivsize);
+ memzero_explicit(chan->backup_iv, ivsize);
} else {
scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
ivsize, 0);
}
- kfree(rctx->bounce_iv);
+ memzero_explicit(chan->bounce_iv, ivsize);
}
-theend_key:
dma_unmap_single(ce->dev, rctx->addr_key, op->keylen, DMA_TO_DEVICE);
theend:
@@ -322,13 +343,13 @@ static int sun8i_ce_cipher_unprepare(struct crypto_engine *engine, void *async_r
dma_unmap_single(ce->dev, rctx->addr_iv, rctx->ivlen, DMA_TO_DEVICE);
offset = areq->cryptlen - ivsize;
if (rctx->op_dir & CE_DECRYPTION) {
- memcpy(areq->iv, rctx->backup_iv, ivsize);
- kfree_sensitive(rctx->backup_iv);
+ memcpy(areq->iv, chan->backup_iv, ivsize);
+ memzero_explicit(chan->backup_iv, ivsize);
} else {
scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
ivsize, 0);
}
- kfree(rctx->bounce_iv);
+ memzero_explicit(chan->bounce_iv, ivsize);
}
dma_unmap_single(ce->dev, rctx->addr_key, op->keylen, DMA_TO_DEVICE);
@@ -398,10 +419,9 @@ int sun8i_ce_cipher_init(struct crypto_tfm *tfm)
sktfm->reqsize = sizeof(struct sun8i_cipher_req_ctx) +
crypto_skcipher_reqsize(op->fallback_tfm);
-
- dev_info(op->ce->dev, "Fallback for %s is %s\n",
- crypto_tfm_alg_driver_name(&sktfm->base),
- crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm)));
+ memcpy(algt->fbname,
+ crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm)),
+ CRYPTO_MAX_ALG_NAME);
op->enginectx.op.do_one_request = sun8i_ce_cipher_run;
op->enginectx.op.prepare_request = sun8i_ce_cipher_prepare;
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
index d8623c7e0d1d..9f6594699835 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
@@ -283,7 +283,7 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.cra_priority = 400,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
.cra_module = THIS_MODULE,
@@ -310,7 +310,7 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.cra_priority = 400,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
.cra_module = THIS_MODULE,
@@ -336,7 +336,7 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.cra_priority = 400,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
.cra_module = THIS_MODULE,
@@ -363,7 +363,7 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.cra_priority = 400,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
.cra_module = THIS_MODULE,
@@ -595,19 +595,47 @@ static int sun8i_ce_debugfs_show(struct seq_file *seq, void *v)
continue;
switch (ce_algs[i].type) {
case CRYPTO_ALG_TYPE_SKCIPHER:
- seq_printf(seq, "%s %s %lu %lu\n",
+ seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n",
ce_algs[i].alg.skcipher.base.cra_driver_name,
ce_algs[i].alg.skcipher.base.cra_name,
ce_algs[i].stat_req, ce_algs[i].stat_fb);
+ seq_printf(seq, "\tLast fallback is: %s\n",
+ ce_algs[i].fbname);
+ seq_printf(seq, "\tFallback due to 0 length: %lu\n",
+ ce_algs[i].stat_fb_len0);
+ seq_printf(seq, "\tFallback due to length !mod16: %lu\n",
+ ce_algs[i].stat_fb_mod16);
+ seq_printf(seq, "\tFallback due to length < IV: %lu\n",
+ ce_algs[i].stat_fb_leniv);
+ seq_printf(seq, "\tFallback due to source alignment: %lu\n",
+ ce_algs[i].stat_fb_srcali);
+ seq_printf(seq, "\tFallback due to dest alignment: %lu\n",
+ ce_algs[i].stat_fb_dstali);
+ seq_printf(seq, "\tFallback due to source length: %lu\n",
+ ce_algs[i].stat_fb_srclen);
+ seq_printf(seq, "\tFallback due to dest length: %lu\n",
+ ce_algs[i].stat_fb_dstlen);
+ seq_printf(seq, "\tFallback due to SG numbers: %lu\n",
+ ce_algs[i].stat_fb_maxsg);
break;
case CRYPTO_ALG_TYPE_AHASH:
- seq_printf(seq, "%s %s %lu %lu\n",
+ seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n",
ce_algs[i].alg.hash.halg.base.cra_driver_name,
ce_algs[i].alg.hash.halg.base.cra_name,
ce_algs[i].stat_req, ce_algs[i].stat_fb);
+ seq_printf(seq, "\tLast fallback is: %s\n",
+ ce_algs[i].fbname);
+ seq_printf(seq, "\tFallback due to 0 length: %lu\n",
+ ce_algs[i].stat_fb_len0);
+ seq_printf(seq, "\tFallback due to length: %lu\n",
+ ce_algs[i].stat_fb_srclen);
+ seq_printf(seq, "\tFallback due to alignment: %lu\n",
+ ce_algs[i].stat_fb_srcali);
+ seq_printf(seq, "\tFallback due to SG numbers: %lu\n",
+ ce_algs[i].stat_fb_maxsg);
break;
case CRYPTO_ALG_TYPE_RNG:
- seq_printf(seq, "%s %s %lu %lu\n",
+ seq_printf(seq, "%s %s reqs=%lu bytes=%lu\n",
ce_algs[i].alg.rng.base.cra_driver_name,
ce_algs[i].alg.rng.base.cra_name,
ce_algs[i].stat_req, ce_algs[i].stat_bytes);
@@ -673,6 +701,18 @@ static int sun8i_ce_allocate_chanlist(struct sun8i_ce_dev *ce)
err = -ENOMEM;
goto error_engine;
}
+ ce->chanlist[i].bounce_iv = devm_kmalloc(ce->dev, AES_BLOCK_SIZE,
+ GFP_KERNEL | GFP_DMA);
+ if (!ce->chanlist[i].bounce_iv) {
+ err = -ENOMEM;
+ goto error_engine;
+ }
+ ce->chanlist[i].backup_iv = devm_kmalloc(ce->dev, AES_BLOCK_SIZE,
+ GFP_KERNEL);
+ if (!ce->chanlist[i].backup_iv) {
+ err = -ENOMEM;
+ goto error_engine;
+ }
}
return 0;
error_engine:
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
index 859b7522faaa..8b5b9b9d04c3 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
@@ -50,9 +50,9 @@ int sun8i_ce_hash_crainit(struct crypto_tfm *tfm)
sizeof(struct sun8i_ce_hash_reqctx) +
crypto_ahash_reqsize(op->fallback_tfm));
- dev_info(op->ce->dev, "Fallback for %s is %s\n",
- crypto_tfm_alg_driver_name(tfm),
- crypto_tfm_alg_driver_name(&op->fallback_tfm->base));
+ memcpy(algt->fbname, crypto_tfm_alg_driver_name(&op->fallback_tfm->base),
+ CRYPTO_MAX_ALG_NAME);
+
err = pm_runtime_get_sync(op->ce->dev);
if (err < 0)
goto error_pm;
@@ -199,17 +199,32 @@ static int sun8i_ce_hash_digest_fb(struct ahash_request *areq)
static bool sun8i_ce_hash_need_fallback(struct ahash_request *areq)
{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
+ struct sun8i_ce_alg_template *algt;
struct scatterlist *sg;
- if (areq->nbytes == 0)
+ algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
+
+ if (areq->nbytes == 0) {
+ algt->stat_fb_len0++;
return true;
+ }
/* we need to reserve one SG for padding one */
- if (sg_nents(areq->src) > MAX_SG - 1)
+ if (sg_nents_for_len(areq->src, areq->nbytes) > MAX_SG - 1) {
+ algt->stat_fb_maxsg++;
return true;
+ }
sg = areq->src;
while (sg) {
- if (sg->length % 4 || !IS_ALIGNED(sg->offset, sizeof(u32)))
+ if (sg->length % 4) {
+ algt->stat_fb_srclen++;
return true;
+ }
+ if (!IS_ALIGNED(sg->offset, sizeof(u32))) {
+ algt->stat_fb_srcali++;
+ return true;
+ }
sg = sg_next(sg);
}
return false;
@@ -229,7 +244,7 @@ int sun8i_ce_hash_digest(struct ahash_request *areq)
if (sun8i_ce_hash_need_fallback(areq))
return sun8i_ce_hash_digest_fb(areq);
- nr_sgs = sg_nents(areq->src);
+ nr_sgs = sg_nents_for_len(areq->src, areq->nbytes);
if (nr_sgs > MAX_SG - 1)
return sun8i_ce_hash_digest_fb(areq);
@@ -248,6 +263,64 @@ int sun8i_ce_hash_digest(struct ahash_request *areq)
return crypto_transfer_hash_request_to_engine(engine, areq);
}
+static u64 hash_pad(__le32 *buf, unsigned int bufsize, u64 padi, u64 byte_count, bool le, int bs)
+{
+ u64 fill, min_fill, j, k;
+ __be64 *bebits;
+ __le64 *lebits;
+
+ j = padi;
+ buf[j++] = cpu_to_le32(0x80);
+
+ if (bs == 64) {
+ fill = 64 - (byte_count % 64);
+ min_fill = 2 * sizeof(u32) + sizeof(u32);
+ } else {
+ fill = 128 - (byte_count % 128);
+ min_fill = 4 * sizeof(u32) + sizeof(u32);
+ }
+
+ if (fill < min_fill)
+ fill += bs;
+
+ k = j;
+ j += (fill - min_fill) / sizeof(u32);
+ if (j * 4 > bufsize) {
+ pr_err("%s OVERFLOW %llu\n", __func__, j);
+ return 0;
+ }
+ for (; k < j; k++)
+ buf[k] = 0;
+
+ if (le) {
+ /* MD5 */
+ lebits = (__le64 *)&buf[j];
+ *lebits = cpu_to_le64(byte_count << 3);
+ j += 2;
+ } else {
+ if (bs == 64) {
+ /* sha1 sha224 sha256 */
+ bebits = (__be64 *)&buf[j];
+ *bebits = cpu_to_be64(byte_count << 3);
+ j += 2;
+ } else {
+ /* sha384 sha512*/
+ bebits = (__be64 *)&buf[j];
+ *bebits = cpu_to_be64(byte_count >> 61);
+ j += 2;
+ bebits = (__be64 *)&buf[j];
+ *bebits = cpu_to_be64(byte_count << 3);
+ j += 2;
+ }
+ }
+ if (j * 4 > bufsize) {
+ pr_err("%s OVERFLOW %llu\n", __func__, j);
+ return 0;
+ }
+
+ return j;
+}
+
int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
{
struct ahash_request *areq = container_of(breq, struct ahash_request, base);
@@ -266,14 +339,11 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
__le32 *bf;
void *buf = NULL;
int j, i, todo;
- int nbw = 0;
- u64 fill, min_fill;
- __be64 *bebits;
- __le64 *lebits;
void *result = NULL;
u64 bs;
int digestsize;
dma_addr_t addr_res, addr_pad;
+ int ns = sg_nents_for_len(areq->src, areq->nbytes);
algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
ce = algt->ce;
@@ -318,7 +388,7 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
cet->t_sym_ctl = 0;
cet->t_asym_ctl = 0;
- nr_sgs = dma_map_sg(ce->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE);
+ nr_sgs = dma_map_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE);
if (nr_sgs <= 0 || nr_sgs > MAX_SG) {
dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
err = -EINVAL;
@@ -348,44 +418,25 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
byte_count = areq->nbytes;
j = 0;
- bf[j++] = cpu_to_le32(0x80);
-
- if (bs == 64) {
- fill = 64 - (byte_count % 64);
- min_fill = 2 * sizeof(u32) + (nbw ? 0 : sizeof(u32));
- } else {
- fill = 128 - (byte_count % 128);
- min_fill = 4 * sizeof(u32) + (nbw ? 0 : sizeof(u32));
- }
-
- if (fill < min_fill)
- fill += bs;
-
- j += (fill - min_fill) / sizeof(u32);
switch (algt->ce_algo_id) {
case CE_ID_HASH_MD5:
- lebits = (__le64 *)&bf[j];
- *lebits = cpu_to_le64(byte_count << 3);
- j += 2;
+ j = hash_pad(bf, 2 * bs, j, byte_count, true, bs);
break;
case CE_ID_HASH_SHA1:
case CE_ID_HASH_SHA224:
case CE_ID_HASH_SHA256:
- bebits = (__be64 *)&bf[j];
- *bebits = cpu_to_be64(byte_count << 3);
- j += 2;
+ j = hash_pad(bf, 2 * bs, j, byte_count, false, bs);
break;
case CE_ID_HASH_SHA384:
case CE_ID_HASH_SHA512:
- bebits = (__be64 *)&bf[j];
- *bebits = cpu_to_be64(byte_count >> 61);
- j += 2;
- bebits = (__be64 *)&bf[j];
- *bebits = cpu_to_be64(byte_count << 3);
- j += 2;
+ j = hash_pad(bf, 2 * bs, j, byte_count, false, bs);
break;
}
+ if (!j) {
+ err = -EINVAL;
+ goto theend;
+ }
addr_pad = dma_map_single(ce->dev, buf, j * 4, DMA_TO_DEVICE);
cet->t_src[i].addr = cpu_to_le32(addr_pad);
@@ -406,8 +457,7 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(areq->base.tfm));
dma_unmap_single(ce->dev, addr_pad, j * 4, DMA_TO_DEVICE);
- dma_unmap_sg(ce->dev, areq->src, sg_nents(areq->src),
- DMA_TO_DEVICE);
+ dma_unmap_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE);
dma_unmap_single(ce->dev, addr_res, digestsize, DMA_FROM_DEVICE);
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c
index b3a9bbfb8831..b3cc43ea6c8a 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c
@@ -108,11 +108,9 @@ int sun8i_ce_prng_generate(struct crypto_rng *tfm, const u8 *src,
goto err_dst;
}
- err = pm_runtime_get_sync(ce->dev);
- if (err < 0) {
- pm_runtime_put_noidle(ce->dev);
+ err = pm_runtime_resume_and_get(ce->dev);
+ if (err < 0)
goto err_pm;
- }
mutex_lock(&ce->rnglock);
chan = &ce->chanlist[flow];
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
index 624a5926f21f..8177aaba4434 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
@@ -186,6 +186,8 @@ struct ce_task {
* @status: set to 1 by interrupt if task is done
* @t_phy: Physical address of task
* @tl: pointer to the current ce_task for this flow
+ * @backup_iv: buffer which contain the next IV to store
+ * @bounce_iv: buffer which contain the IV
* @stat_req: number of request done by this flow
*/
struct sun8i_ce_flow {
@@ -195,6 +197,8 @@ struct sun8i_ce_flow {
dma_addr_t t_phy;
int timeout;
struct ce_task *tl;
+ void *backup_iv;
+ void *bounce_iv;
#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
unsigned long stat_req;
#endif
@@ -241,8 +245,6 @@ struct sun8i_ce_dev {
* struct sun8i_cipher_req_ctx - context for a skcipher request
* @op_dir: direction (encrypt vs decrypt) for this request
* @flow: the flow to use for this request
- * @backup_iv: buffer which contain the next IV to store
- * @bounce_iv: buffer which contain the IV
* @ivlen: size of bounce_iv
* @nr_sgs: The number of source SG (as given by dma_map_sg())
* @nr_sgd: The number of destination SG (as given by dma_map_sg())
@@ -253,8 +255,6 @@ struct sun8i_ce_dev {
struct sun8i_cipher_req_ctx {
u32 op_dir;
int flow;
- void *backup_iv;
- void *bounce_iv;
unsigned int ivlen;
int nr_sgs;
int nr_sgd;
@@ -333,11 +333,18 @@ struct sun8i_ce_alg_template {
struct ahash_alg hash;
struct rng_alg rng;
} alg;
-#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
unsigned long stat_req;
unsigned long stat_fb;
unsigned long stat_bytes;
-#endif
+ unsigned long stat_fb_maxsg;
+ unsigned long stat_fb_leniv;
+ unsigned long stat_fb_len0;
+ unsigned long stat_fb_mod16;
+ unsigned long stat_fb_srcali;
+ unsigned long stat_fb_srclen;
+ unsigned long stat_fb_dstali;
+ unsigned long stat_fb_dstlen;
+ char fbname[CRYPTO_MAX_ALG_NAME];
};
int sun8i_ce_enqueue(struct crypto_async_request *areq, u32 type);
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
index 554e400d41ca..5bb950182026 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
@@ -22,34 +22,53 @@
static bool sun8i_ss_need_fallback(struct skcipher_request *areq)
{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct sun8i_ss_alg_template *algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher);
struct scatterlist *in_sg = areq->src;
struct scatterlist *out_sg = areq->dst;
struct scatterlist *sg;
+ unsigned int todo, len;
- if (areq->cryptlen == 0 || areq->cryptlen % 16)
+ if (areq->cryptlen == 0 || areq->cryptlen % 16) {
+ algt->stat_fb_len++;
return true;
+ }
- if (sg_nents(areq->src) > 8 || sg_nents(areq->dst) > 8)
+ if (sg_nents_for_len(areq->src, areq->cryptlen) > 8 ||
+ sg_nents_for_len(areq->dst, areq->cryptlen) > 8) {
+ algt->stat_fb_sgnum++;
return true;
+ }
+ len = areq->cryptlen;
sg = areq->src;
while (sg) {
- if ((sg->length % 16) != 0)
- return true;
- if ((sg_dma_len(sg) % 16) != 0)
+ todo = min(len, sg->length);
+ if ((todo % 16) != 0) {
+ algt->stat_fb_sglen++;
return true;
- if (!IS_ALIGNED(sg->offset, 16))
+ }
+ if (!IS_ALIGNED(sg->offset, 16)) {
+ algt->stat_fb_align++;
return true;
+ }
+ len -= todo;
sg = sg_next(sg);
}
+ len = areq->cryptlen;
sg = areq->dst;
while (sg) {
- if ((sg->length % 16) != 0)
- return true;
- if ((sg_dma_len(sg) % 16) != 0)
+ todo = min(len, sg->length);
+ if ((todo % 16) != 0) {
+ algt->stat_fb_sglen++;
return true;
- if (!IS_ALIGNED(sg->offset, 16))
+ }
+ if (!IS_ALIGNED(sg->offset, 16)) {
+ algt->stat_fb_align++;
return true;
+ }
+ len -= todo;
sg = sg_next(sg);
}
@@ -93,6 +112,68 @@ static int sun8i_ss_cipher_fallback(struct skcipher_request *areq)
return err;
}
+static int sun8i_ss_setup_ivs(struct skcipher_request *areq)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct sun8i_ss_dev *ss = op->ss;
+ struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
+ struct scatterlist *sg = areq->src;
+ unsigned int todo, offset;
+ unsigned int len = areq->cryptlen;
+ unsigned int ivsize = crypto_skcipher_ivsize(tfm);
+ struct sun8i_ss_flow *sf = &ss->flows[rctx->flow];
+ int i = 0;
+ u32 a;
+ int err;
+
+ rctx->ivlen = ivsize;
+ if (rctx->op_dir & SS_DECRYPTION) {
+ offset = areq->cryptlen - ivsize;
+ scatterwalk_map_and_copy(sf->biv, areq->src, offset,
+ ivsize, 0);
+ }
+
+ /* we need to copy all IVs from source in case DMA is bi-directionnal */
+ while (sg && len) {
+ if (sg_dma_len(sg) == 0) {
+ sg = sg_next(sg);
+ continue;
+ }
+ if (i == 0)
+ memcpy(sf->iv[0], areq->iv, ivsize);
+ a = dma_map_single(ss->dev, sf->iv[i], ivsize, DMA_TO_DEVICE);
+ if (dma_mapping_error(ss->dev, a)) {
+ memzero_explicit(sf->iv[i], ivsize);
+ dev_err(ss->dev, "Cannot DMA MAP IV\n");
+ err = -EFAULT;
+ goto dma_iv_error;
+ }
+ rctx->p_iv[i] = a;
+ /* we need to setup all others IVs only in the decrypt way */
+ if (rctx->op_dir & SS_ENCRYPTION)
+ return 0;
+ todo = min(len, sg_dma_len(sg));
+ len -= todo;
+ i++;
+ if (i < MAX_SG) {
+ offset = sg->length - ivsize;
+ scatterwalk_map_and_copy(sf->iv[i], sg, offset, ivsize, 0);
+ }
+ rctx->niv = i;
+ sg = sg_next(sg);
+ }
+
+ return 0;
+dma_iv_error:
+ i--;
+ while (i >= 0) {
+ dma_unmap_single(ss->dev, rctx->p_iv[i], ivsize, DMA_TO_DEVICE);
+ memzero_explicit(sf->iv[i], ivsize);
+ }
+ return err;
+}
+
static int sun8i_ss_cipher(struct skcipher_request *areq)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
@@ -101,12 +182,14 @@ static int sun8i_ss_cipher(struct skcipher_request *areq)
struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
struct sun8i_ss_alg_template *algt;
+ struct sun8i_ss_flow *sf = &ss->flows[rctx->flow];
struct scatterlist *sg;
unsigned int todo, len, offset, ivsize;
- void *backup_iv = NULL;
int nr_sgs = 0;
int nr_sgd = 0;
int err = 0;
+ int nsgs = sg_nents_for_len(areq->src, areq->cryptlen);
+ int nsgd = sg_nents_for_len(areq->dst, areq->cryptlen);
int i;
algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher);
@@ -134,34 +217,12 @@ static int sun8i_ss_cipher(struct skcipher_request *areq)
ivsize = crypto_skcipher_ivsize(tfm);
if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) {
- rctx->ivlen = ivsize;
- rctx->biv = kzalloc(ivsize, GFP_KERNEL | GFP_DMA);
- if (!rctx->biv) {
- err = -ENOMEM;
+ err = sun8i_ss_setup_ivs(areq);
+ if (err)
goto theend_key;
- }
- if (rctx->op_dir & SS_DECRYPTION) {
- backup_iv = kzalloc(ivsize, GFP_KERNEL);
- if (!backup_iv) {
- err = -ENOMEM;
- goto theend_key;
- }
- offset = areq->cryptlen - ivsize;
- scatterwalk_map_and_copy(backup_iv, areq->src, offset,
- ivsize, 0);
- }
- memcpy(rctx->biv, areq->iv, ivsize);
- rctx->p_iv = dma_map_single(ss->dev, rctx->biv, rctx->ivlen,
- DMA_TO_DEVICE);
- if (dma_mapping_error(ss->dev, rctx->p_iv)) {
- dev_err(ss->dev, "Cannot DMA MAP IV\n");
- err = -ENOMEM;
- goto theend_iv;
- }
}
if (areq->src == areq->dst) {
- nr_sgs = dma_map_sg(ss->dev, areq->src, sg_nents(areq->src),
- DMA_BIDIRECTIONAL);
+ nr_sgs = dma_map_sg(ss->dev, areq->src, nsgs, DMA_BIDIRECTIONAL);
if (nr_sgs <= 0 || nr_sgs > 8) {
dev_err(ss->dev, "Invalid sg number %d\n", nr_sgs);
err = -EINVAL;
@@ -169,15 +230,13 @@ static int sun8i_ss_cipher(struct skcipher_request *areq)
}
nr_sgd = nr_sgs;
} else {
- nr_sgs = dma_map_sg(ss->dev, areq->src, sg_nents(areq->src),
- DMA_TO_DEVICE);
+ nr_sgs = dma_map_sg(ss->dev, areq->src, nsgs, DMA_TO_DEVICE);
if (nr_sgs <= 0 || nr_sgs > 8) {
dev_err(ss->dev, "Invalid sg number %d\n", nr_sgs);
err = -EINVAL;
goto theend_iv;
}
- nr_sgd = dma_map_sg(ss->dev, areq->dst, sg_nents(areq->dst),
- DMA_FROM_DEVICE);
+ nr_sgd = dma_map_sg(ss->dev, areq->dst, nsgd, DMA_FROM_DEVICE);
if (nr_sgd <= 0 || nr_sgd > 8) {
dev_err(ss->dev, "Invalid sg number %d\n", nr_sgd);
err = -EINVAL;
@@ -233,31 +292,26 @@ sgd_next:
theend_sgs:
if (areq->src == areq->dst) {
- dma_unmap_sg(ss->dev, areq->src, sg_nents(areq->src),
- DMA_BIDIRECTIONAL);
+ dma_unmap_sg(ss->dev, areq->src, nsgs, DMA_BIDIRECTIONAL);
} else {
- dma_unmap_sg(ss->dev, areq->src, sg_nents(areq->src),
- DMA_TO_DEVICE);
- dma_unmap_sg(ss->dev, areq->dst, sg_nents(areq->dst),
- DMA_FROM_DEVICE);
+ dma_unmap_sg(ss->dev, areq->src, nsgs, DMA_TO_DEVICE);
+ dma_unmap_sg(ss->dev, areq->dst, nsgd, DMA_FROM_DEVICE);
}
theend_iv:
- if (rctx->p_iv)
- dma_unmap_single(ss->dev, rctx->p_iv, rctx->ivlen,
- DMA_TO_DEVICE);
-
if (areq->iv && ivsize > 0) {
- if (rctx->biv) {
- offset = areq->cryptlen - ivsize;
- if (rctx->op_dir & SS_DECRYPTION) {
- memcpy(areq->iv, backup_iv, ivsize);
- kfree_sensitive(backup_iv);
- } else {
- scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
- ivsize, 0);
- }
- kfree(rctx->biv);
+ for (i = 0; i < rctx->niv; i++) {
+ dma_unmap_single(ss->dev, rctx->p_iv[i], ivsize, DMA_TO_DEVICE);
+ memzero_explicit(sf->iv[i], ivsize);
+ }
+
+ offset = areq->cryptlen - ivsize;
+ if (rctx->op_dir & SS_DECRYPTION) {
+ memcpy(areq->iv, sf->biv, ivsize);
+ memzero_explicit(sf->biv, ivsize);
+ } else {
+ scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
+ ivsize, 0);
}
}
@@ -349,9 +403,9 @@ int sun8i_ss_cipher_init(struct crypto_tfm *tfm)
crypto_skcipher_reqsize(op->fallback_tfm);
- dev_info(op->ss->dev, "Fallback for %s is %s\n",
- crypto_tfm_alg_driver_name(&sktfm->base),
- crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm)));
+ memcpy(algt->fbname,
+ crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm)),
+ CRYPTO_MAX_ALG_NAME);
op->enginectx.op.do_one_request = sun8i_ss_handle_cipher_request;
op->enginectx.op.prepare_request = NULL;
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
index 319fe3279a71..98593a0cff69 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
@@ -66,6 +66,7 @@ int sun8i_ss_run_task(struct sun8i_ss_dev *ss, struct sun8i_cipher_req_ctx *rctx
const char *name)
{
int flow = rctx->flow;
+ unsigned int ivlen = rctx->ivlen;
u32 v = SS_START;
int i;
@@ -104,15 +105,14 @@ int sun8i_ss_run_task(struct sun8i_ss_dev *ss, struct sun8i_cipher_req_ctx *rctx
mutex_lock(&ss->mlock);
writel(rctx->p_key, ss->base + SS_KEY_ADR_REG);
- if (i == 0) {
- if (rctx->p_iv)
- writel(rctx->p_iv, ss->base + SS_IV_ADR_REG);
- } else {
- if (rctx->biv) {
- if (rctx->op_dir == SS_ENCRYPTION)
- writel(rctx->t_dst[i - 1].addr + rctx->t_dst[i - 1].len * 4 - rctx->ivlen, ss->base + SS_IV_ADR_REG);
+ if (ivlen) {
+ if (rctx->op_dir == SS_ENCRYPTION) {
+ if (i == 0)
+ writel(rctx->p_iv[0], ss->base + SS_IV_ADR_REG);
else
- writel(rctx->t_src[i - 1].addr + rctx->t_src[i - 1].len * 4 - rctx->ivlen, ss->base + SS_IV_ADR_REG);
+ writel(rctx->t_dst[i - 1].addr + rctx->t_dst[i - 1].len * 4 - ivlen, ss->base + SS_IV_ADR_REG);
+ } else {
+ writel(rctx->p_iv[i], ss->base + SS_IV_ADR_REG);
}
}
@@ -409,6 +409,37 @@ static struct sun8i_ss_alg_template ss_algs[] = {
}
}
},
+{ .type = CRYPTO_ALG_TYPE_AHASH,
+ .ss_algo_id = SS_ID_HASH_SHA1,
+ .alg.hash = {
+ .init = sun8i_ss_hash_init,
+ .update = sun8i_ss_hash_update,
+ .final = sun8i_ss_hash_final,
+ .finup = sun8i_ss_hash_finup,
+ .digest = sun8i_ss_hash_digest,
+ .export = sun8i_ss_hash_export,
+ .import = sun8i_ss_hash_import,
+ .setkey = sun8i_ss_hmac_setkey,
+ .halg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .statesize = sizeof(struct sha1_state),
+ .base = {
+ .cra_name = "hmac(sha1)",
+ .cra_driver_name = "hmac-sha1-sun8i-ss",
+ .cra_priority = 300,
+ .cra_alignmask = 3,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sun8i_ss_hash_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = sun8i_ss_hash_crainit,
+ .cra_exit = sun8i_ss_hash_craexit,
+ }
+ }
+ }
+},
#endif
};
@@ -430,6 +461,17 @@ static int sun8i_ss_debugfs_show(struct seq_file *seq, void *v)
ss_algs[i].alg.skcipher.base.cra_driver_name,
ss_algs[i].alg.skcipher.base.cra_name,
ss_algs[i].stat_req, ss_algs[i].stat_fb);
+
+ seq_printf(seq, "\tLast fallback is: %s\n",
+ ss_algs[i].fbname);
+ seq_printf(seq, "\tFallback due to length: %lu\n",
+ ss_algs[i].stat_fb_len);
+ seq_printf(seq, "\tFallback due to SG length: %lu\n",
+ ss_algs[i].stat_fb_sglen);
+ seq_printf(seq, "\tFallback due to alignment: %lu\n",
+ ss_algs[i].stat_fb_align);
+ seq_printf(seq, "\tFallback due to SG numbers: %lu\n",
+ ss_algs[i].stat_fb_sgnum);
break;
case CRYPTO_ALG_TYPE_RNG:
seq_printf(seq, "%s %s reqs=%lu tsize=%lu\n",
@@ -442,6 +484,16 @@ static int sun8i_ss_debugfs_show(struct seq_file *seq, void *v)
ss_algs[i].alg.hash.halg.base.cra_driver_name,
ss_algs[i].alg.hash.halg.base.cra_name,
ss_algs[i].stat_req, ss_algs[i].stat_fb);
+ seq_printf(seq, "\tLast fallback is: %s\n",
+ ss_algs[i].fbname);
+ seq_printf(seq, "\tFallback due to length: %lu\n",
+ ss_algs[i].stat_fb_len);
+ seq_printf(seq, "\tFallback due to SG length: %lu\n",
+ ss_algs[i].stat_fb_sglen);
+ seq_printf(seq, "\tFallback due to alignment: %lu\n",
+ ss_algs[i].stat_fb_align);
+ seq_printf(seq, "\tFallback due to SG numbers: %lu\n",
+ ss_algs[i].stat_fb_sgnum);
break;
}
}
@@ -464,7 +516,7 @@ static void sun8i_ss_free_flows(struct sun8i_ss_dev *ss, int i)
*/
static int allocate_flows(struct sun8i_ss_dev *ss)
{
- int i, err;
+ int i, j, err;
ss->flows = devm_kcalloc(ss->dev, MAXFLOW, sizeof(struct sun8i_ss_flow),
GFP_KERNEL);
@@ -474,6 +526,28 @@ static int allocate_flows(struct sun8i_ss_dev *ss)
for (i = 0; i < MAXFLOW; i++) {
init_completion(&ss->flows[i].complete);
+ ss->flows[i].biv = devm_kmalloc(ss->dev, AES_BLOCK_SIZE,
+ GFP_KERNEL | GFP_DMA);
+ if (!ss->flows[i].biv)
+ goto error_engine;
+
+ for (j = 0; j < MAX_SG; j++) {
+ ss->flows[i].iv[j] = devm_kmalloc(ss->dev, AES_BLOCK_SIZE,
+ GFP_KERNEL | GFP_DMA);
+ if (!ss->flows[i].iv[j])
+ goto error_engine;
+ }
+
+ /* the padding could be up to two block. */
+ ss->flows[i].pad = devm_kmalloc(ss->dev, MAX_PAD_SIZE,
+ GFP_KERNEL | GFP_DMA);
+ if (!ss->flows[i].pad)
+ goto error_engine;
+ ss->flows[i].result = devm_kmalloc(ss->dev, SHA256_DIGEST_SIZE,
+ GFP_KERNEL | GFP_DMA);
+ if (!ss->flows[i].result)
+ goto error_engine;
+
ss->flows[i].engine = crypto_engine_alloc_init(ss->dev, true);
if (!ss->flows[i].engine) {
dev_err(ss->dev, "Cannot allocate engine\n");
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
index 1a71ed49d233..ac417a6b39e5 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
@@ -14,11 +14,99 @@
#include <linux/pm_runtime.h>
#include <linux/scatterlist.h>
#include <crypto/internal/hash.h>
+#include <crypto/hmac.h>
+#include <crypto/scatterwalk.h>
#include <crypto/sha1.h>
#include <crypto/sha2.h>
#include <crypto/md5.h>
#include "sun8i-ss.h"
+static int sun8i_ss_hashkey(struct sun8i_ss_hash_tfm_ctx *tfmctx, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_shash *xtfm;
+ struct shash_desc *sdesc;
+ size_t len;
+ int ret = 0;
+
+ xtfm = crypto_alloc_shash("sha1", 0, CRYPTO_ALG_NEED_FALLBACK);
+ if (!xtfm)
+ return -ENOMEM;
+
+ len = sizeof(*sdesc) + crypto_shash_descsize(xtfm);
+ sdesc = kmalloc(len, GFP_KERNEL);
+ if (!sdesc) {
+ ret = -ENOMEM;
+ goto err_hashkey_sdesc;
+ }
+ sdesc->tfm = xtfm;
+
+ ret = crypto_shash_init(sdesc);
+ if (ret) {
+ dev_err(tfmctx->ss->dev, "shash init error ret=%d\n", ret);
+ goto err_hashkey;
+ }
+ ret = crypto_shash_finup(sdesc, key, keylen, tfmctx->key);
+ if (ret)
+ dev_err(tfmctx->ss->dev, "shash finup error\n");
+err_hashkey:
+ kfree(sdesc);
+err_hashkey_sdesc:
+ crypto_free_shash(xtfm);
+ return ret;
+}
+
+int sun8i_ss_hmac_setkey(struct crypto_ahash *ahash, const u8 *key,
+ unsigned int keylen)
+{
+ struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(ahash);
+ struct ahash_alg *alg = __crypto_ahash_alg(ahash->base.__crt_alg);
+ struct sun8i_ss_alg_template *algt;
+ int digestsize, i;
+ int bs = crypto_ahash_blocksize(ahash);
+ int ret;
+
+ algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
+ digestsize = algt->alg.hash.halg.digestsize;
+
+ if (keylen > bs) {
+ ret = sun8i_ss_hashkey(tfmctx, key, keylen);
+ if (ret)
+ return ret;
+ tfmctx->keylen = digestsize;
+ } else {
+ tfmctx->keylen = keylen;
+ memcpy(tfmctx->key, key, keylen);
+ }
+
+ tfmctx->ipad = kzalloc(bs, GFP_KERNEL | GFP_DMA);
+ if (!tfmctx->ipad)
+ return -ENOMEM;
+ tfmctx->opad = kzalloc(bs, GFP_KERNEL | GFP_DMA);
+ if (!tfmctx->opad) {
+ ret = -ENOMEM;
+ goto err_opad;
+ }
+
+ memset(tfmctx->key + tfmctx->keylen, 0, bs - tfmctx->keylen);
+ memcpy(tfmctx->ipad, tfmctx->key, tfmctx->keylen);
+ memcpy(tfmctx->opad, tfmctx->key, tfmctx->keylen);
+ for (i = 0; i < bs; i++) {
+ tfmctx->ipad[i] ^= HMAC_IPAD_VALUE;
+ tfmctx->opad[i] ^= HMAC_OPAD_VALUE;
+ }
+
+ ret = crypto_ahash_setkey(tfmctx->fallback_tfm, key, keylen);
+ if (!ret)
+ return 0;
+
+ memzero_explicit(tfmctx->key, keylen);
+ kfree_sensitive(tfmctx->opad);
+err_opad:
+ kfree_sensitive(tfmctx->ipad);
+ return ret;
+}
+
int sun8i_ss_hash_crainit(struct crypto_tfm *tfm)
{
struct sun8i_ss_hash_tfm_ctx *op = crypto_tfm_ctx(tfm);
@@ -50,9 +138,8 @@ int sun8i_ss_hash_crainit(struct crypto_tfm *tfm)
sizeof(struct sun8i_ss_hash_reqctx) +
crypto_ahash_reqsize(op->fallback_tfm));
- dev_info(op->ss->dev, "Fallback for %s is %s\n",
- crypto_tfm_alg_driver_name(tfm),
- crypto_tfm_alg_driver_name(&op->fallback_tfm->base));
+ memcpy(algt->fbname, crypto_tfm_alg_driver_name(&op->fallback_tfm->base), CRYPTO_MAX_ALG_NAME);
+
err = pm_runtime_get_sync(op->ss->dev);
if (err < 0)
goto error_pm;
@@ -67,6 +154,9 @@ void sun8i_ss_hash_craexit(struct crypto_tfm *tfm)
{
struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_tfm_ctx(tfm);
+ kfree_sensitive(tfmctx->ipad);
+ kfree_sensitive(tfmctx->opad);
+
crypto_free_ahash(tfmctx->fallback_tfm);
pm_runtime_put_sync_suspend(tfmctx->ss->dev);
}
@@ -258,23 +348,48 @@ static int sun8i_ss_run_hash_task(struct sun8i_ss_dev *ss,
static bool sun8i_ss_hash_need_fallback(struct ahash_request *areq)
{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
+ struct sun8i_ss_alg_template *algt;
struct scatterlist *sg;
- if (areq->nbytes == 0)
+ algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
+
+ if (areq->nbytes == 0) {
+ algt->stat_fb_len++;
+ return true;
+ }
+
+ if (areq->nbytes >= MAX_PAD_SIZE - 64) {
+ algt->stat_fb_len++;
return true;
+ }
+
/* we need to reserve one SG for the padding one */
- if (sg_nents(areq->src) > MAX_SG - 1)
+ if (sg_nents(areq->src) > MAX_SG - 1) {
+ algt->stat_fb_sgnum++;
return true;
+ }
+
sg = areq->src;
while (sg) {
/* SS can operate hash only on full block size
* since SS support only MD5,sha1,sha224 and sha256, blocksize
* is always 64
- * TODO: handle request if last SG is not len%64
- * but this will need to copy data on a new SG of size=64
*/
- if (sg->length % 64 || !IS_ALIGNED(sg->offset, sizeof(u32)))
+ /* Only the last block could be bounced to the pad buffer */
+ if (sg->length % 64 && sg_next(sg)) {
+ algt->stat_fb_sglen++;
+ return true;
+ }
+ if (!IS_ALIGNED(sg->offset, sizeof(u32))) {
+ algt->stat_fb_align++;
+ return true;
+ }
+ if (sg->length % 4) {
+ algt->stat_fb_sglen++;
return true;
+ }
sg = sg_next(sg);
}
return false;
@@ -288,21 +403,11 @@ int sun8i_ss_hash_digest(struct ahash_request *areq)
struct sun8i_ss_alg_template *algt;
struct sun8i_ss_dev *ss;
struct crypto_engine *engine;
- struct scatterlist *sg;
- int nr_sgs, e, i;
+ int e;
if (sun8i_ss_hash_need_fallback(areq))
return sun8i_ss_hash_digest_fb(areq);
- nr_sgs = sg_nents(areq->src);
- if (nr_sgs > MAX_SG - 1)
- return sun8i_ss_hash_digest_fb(areq);
-
- for_each_sg(areq->src, sg, nr_sgs, i) {
- if (sg->length % 4 || !IS_ALIGNED(sg->offset, sizeof(u32)))
- return sun8i_ss_hash_digest_fb(areq);
- }
-
algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
ss = algt->ss;
@@ -313,6 +418,64 @@ int sun8i_ss_hash_digest(struct ahash_request *areq)
return crypto_transfer_hash_request_to_engine(engine, areq);
}
+static u64 hash_pad(__le32 *buf, unsigned int bufsize, u64 padi, u64 byte_count, bool le, int bs)
+{
+ u64 fill, min_fill, j, k;
+ __be64 *bebits;
+ __le64 *lebits;
+
+ j = padi;
+ buf[j++] = cpu_to_le32(0x80);
+
+ if (bs == 64) {
+ fill = 64 - (byte_count % 64);
+ min_fill = 2 * sizeof(u32) + sizeof(u32);
+ } else {
+ fill = 128 - (byte_count % 128);
+ min_fill = 4 * sizeof(u32) + sizeof(u32);
+ }
+
+ if (fill < min_fill)
+ fill += bs;
+
+ k = j;
+ j += (fill - min_fill) / sizeof(u32);
+ if (j * 4 > bufsize) {
+ pr_err("%s OVERFLOW %llu\n", __func__, j);
+ return 0;
+ }
+ for (; k < j; k++)
+ buf[k] = 0;
+
+ if (le) {
+ /* MD5 */
+ lebits = (__le64 *)&buf[j];
+ *lebits = cpu_to_le64(byte_count << 3);
+ j += 2;
+ } else {
+ if (bs == 64) {
+ /* sha1 sha224 sha256 */
+ bebits = (__be64 *)&buf[j];
+ *bebits = cpu_to_be64(byte_count << 3);
+ j += 2;
+ } else {
+ /* sha384 sha512*/
+ bebits = (__be64 *)&buf[j];
+ *bebits = cpu_to_be64(byte_count >> 61);
+ j += 2;
+ bebits = (__be64 *)&buf[j];
+ *bebits = cpu_to_be64(byte_count << 3);
+ j += 2;
+ }
+ }
+ if (j * 4 > bufsize) {
+ pr_err("%s OVERFLOW %llu\n", __func__, j);
+ return 0;
+ }
+
+ return j;
+}
+
/* sun8i_ss_hash_run - run an ahash request
* Send the data of the request to the SS along with an extra SG with padding
*/
@@ -320,20 +483,26 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq)
{
struct ahash_request *areq = container_of(breq, struct ahash_request, base);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
struct sun8i_ss_alg_template *algt;
struct sun8i_ss_dev *ss;
struct scatterlist *sg;
+ int bs = crypto_ahash_blocksize(tfm);
int nr_sgs, err, digestsize;
unsigned int len;
- u64 fill, min_fill, byte_count;
+ u64 byte_count;
void *pad, *result;
- int j, i, todo;
- __be64 *bebits;
- __le64 *lebits;
- dma_addr_t addr_res, addr_pad;
+ int j, i, k, todo;
+ dma_addr_t addr_res, addr_pad, addr_xpad;
__le32 *bf;
+ /* HMAC step:
+ * 0: normal hashing
+ * 1: IPAD
+ * 2: OPAD
+ */
+ int hmac = 0;
algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
ss = algt->ss;
@@ -342,18 +511,10 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq)
if (digestsize == SHA224_DIGEST_SIZE)
digestsize = SHA256_DIGEST_SIZE;
- /* the padding could be up to two block. */
- pad = kzalloc(algt->alg.hash.halg.base.cra_blocksize * 2, GFP_KERNEL | GFP_DMA);
- if (!pad)
- return -ENOMEM;
+ result = ss->flows[rctx->flow].result;
+ pad = ss->flows[rctx->flow].pad;
bf = (__le32 *)pad;
- result = kzalloc(digestsize, GFP_KERNEL | GFP_DMA);
- if (!result) {
- kfree(pad);
- return -ENOMEM;
- }
-
for (i = 0; i < MAX_SG; i++) {
rctx->t_dst[i].addr = 0;
rctx->t_dst[i].len = 0;
@@ -376,17 +537,33 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq)
if (dma_mapping_error(ss->dev, addr_res)) {
dev_err(ss->dev, "DMA map dest\n");
err = -EINVAL;
- goto theend;
+ goto err_dma_result;
}
+ j = 0;
len = areq->nbytes;
- for_each_sg(areq->src, sg, nr_sgs, i) {
- rctx->t_src[i].addr = sg_dma_address(sg);
+ sg = areq->src;
+ i = 0;
+ while (len > 0 && sg) {
+ if (sg_dma_len(sg) == 0) {
+ sg = sg_next(sg);
+ continue;
+ }
todo = min(len, sg_dma_len(sg));
- rctx->t_src[i].len = todo / 4;
- len -= todo;
- rctx->t_dst[i].addr = addr_res;
- rctx->t_dst[i].len = digestsize / 4;
+ /* only the last SG could be with a size not modulo64 */
+ if (todo % 64 == 0) {
+ rctx->t_src[i].addr = sg_dma_address(sg);
+ rctx->t_src[i].len = todo / 4;
+ rctx->t_dst[i].addr = addr_res;
+ rctx->t_dst[i].len = digestsize / 4;
+ len -= todo;
+ } else {
+ scatterwalk_map_and_copy(bf, sg, 0, todo, 0);
+ j += todo / 4;
+ len -= todo;
+ }
+ sg = sg_next(sg);
+ i++;
}
if (len > 0) {
dev_err(ss->dev, "remaining len %d\n", len);
@@ -394,55 +571,135 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq)
goto theend;
}
+ if (j > 0)
+ i--;
+
+retry:
byte_count = areq->nbytes;
- j = 0;
- bf[j++] = cpu_to_le32(0x80);
+ if (tfmctx->keylen && hmac == 0) {
+ hmac = 1;
+ /* shift all SG one slot up, to free slot 0 for IPAD */
+ for (k = 6; k >= 0; k--) {
+ rctx->t_src[k + 1].addr = rctx->t_src[k].addr;
+ rctx->t_src[k + 1].len = rctx->t_src[k].len;
+ rctx->t_dst[k + 1].addr = rctx->t_dst[k].addr;
+ rctx->t_dst[k + 1].len = rctx->t_dst[k].len;
+ }
+ addr_xpad = dma_map_single(ss->dev, tfmctx->ipad, bs, DMA_TO_DEVICE);
+ if (dma_mapping_error(ss->dev, addr_xpad)) {
+ dev_err(ss->dev, "Fail to create DMA mapping of ipad\n");
+ goto err_dma_xpad;
+ }
+ rctx->t_src[0].addr = addr_xpad;
+ rctx->t_src[0].len = bs / 4;
+ rctx->t_dst[0].addr = addr_res;
+ rctx->t_dst[0].len = digestsize / 4;
+ i++;
+ byte_count = areq->nbytes + bs;
+ }
+ if (tfmctx->keylen && hmac == 2) {
+ for (i = 0; i < MAX_SG; i++) {
+ rctx->t_src[i].addr = 0;
+ rctx->t_src[i].len = 0;
+ rctx->t_dst[i].addr = 0;
+ rctx->t_dst[i].len = 0;
+ }
- fill = 64 - (byte_count % 64);
- min_fill = 3 * sizeof(u32);
+ addr_res = dma_map_single(ss->dev, result, digestsize, DMA_FROM_DEVICE);
+ if (dma_mapping_error(ss->dev, addr_res)) {
+ dev_err(ss->dev, "Fail to create DMA mapping of result\n");
+ err = -EINVAL;
+ goto err_dma_result;
+ }
+ addr_xpad = dma_map_single(ss->dev, tfmctx->opad, bs, DMA_TO_DEVICE);
+ if (dma_mapping_error(ss->dev, addr_xpad)) {
+ dev_err(ss->dev, "Fail to create DMA mapping of opad\n");
+ goto err_dma_xpad;
+ }
+ rctx->t_src[0].addr = addr_xpad;
+ rctx->t_src[0].len = bs / 4;
- if (fill < min_fill)
- fill += 64;
+ memcpy(bf, result, digestsize);
+ j = digestsize / 4;
+ i = 1;
+ byte_count = digestsize + bs;
- j += (fill - min_fill) / sizeof(u32);
+ rctx->t_dst[0].addr = addr_res;
+ rctx->t_dst[0].len = digestsize / 4;
+ }
switch (algt->ss_algo_id) {
case SS_ID_HASH_MD5:
- lebits = (__le64 *)&bf[j];
- *lebits = cpu_to_le64(byte_count << 3);
- j += 2;
+ j = hash_pad(bf, 4096, j, byte_count, true, bs);
break;
case SS_ID_HASH_SHA1:
case SS_ID_HASH_SHA224:
case SS_ID_HASH_SHA256:
- bebits = (__be64 *)&bf[j];
- *bebits = cpu_to_be64(byte_count << 3);
- j += 2;
+ j = hash_pad(bf, 4096, j, byte_count, false, bs);
break;
}
+ if (!j) {
+ err = -EINVAL;
+ goto theend;
+ }
addr_pad = dma_map_single(ss->dev, pad, j * 4, DMA_TO_DEVICE);
- rctx->t_src[i].addr = addr_pad;
- rctx->t_src[i].len = j;
- rctx->t_dst[i].addr = addr_res;
- rctx->t_dst[i].len = digestsize / 4;
if (dma_mapping_error(ss->dev, addr_pad)) {
dev_err(ss->dev, "DMA error on padding SG\n");
err = -EINVAL;
- goto theend;
+ goto err_dma_pad;
}
+ rctx->t_src[i].addr = addr_pad;
+ rctx->t_src[i].len = j;
+ rctx->t_dst[i].addr = addr_res;
+ rctx->t_dst[i].len = digestsize / 4;
err = sun8i_ss_run_hash_task(ss, rctx, crypto_tfm_alg_name(areq->base.tfm));
+ /*
+ * mini helper for checking dma map/unmap
+ * flow start for hmac = 0 (and HMAC = 1)
+ * HMAC = 0
+ * MAP src
+ * MAP res
+ *
+ * retry:
+ * if hmac then hmac = 1
+ * MAP xpad (ipad)
+ * if hmac == 2
+ * MAP res
+ * MAP xpad (opad)
+ * MAP pad
+ * ACTION!
+ * UNMAP pad
+ * if hmac
+ * UNMAP xpad
+ * UNMAP res
+ * if hmac < 2
+ * UNMAP SRC
+ *
+ * if hmac = 1 then hmac = 2 goto retry
+ */
+
dma_unmap_single(ss->dev, addr_pad, j * 4, DMA_TO_DEVICE);
- dma_unmap_sg(ss->dev, areq->src, sg_nents(areq->src),
- DMA_TO_DEVICE);
+
+err_dma_pad:
+ if (hmac > 0)
+ dma_unmap_single(ss->dev, addr_xpad, bs, DMA_TO_DEVICE);
+err_dma_xpad:
dma_unmap_single(ss->dev, addr_res, digestsize, DMA_FROM_DEVICE);
+err_dma_result:
+ if (hmac < 2)
+ dma_unmap_sg(ss->dev, areq->src, sg_nents(areq->src),
+ DMA_TO_DEVICE);
+ if (hmac == 1 && !err) {
+ hmac = 2;
+ goto retry;
+ }
- memcpy(areq->result, result, algt->alg.hash.halg.digestsize);
+ if (!err)
+ memcpy(areq->result, result, algt->alg.hash.halg.digestsize);
theend:
- kfree(pad);
- kfree(result);
local_bh_disable();
crypto_finalize_hash_request(engine, breq, err);
local_bh_enable();
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c
index 246a6782674c..dd677e9ed06f 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c
@@ -112,11 +112,9 @@ int sun8i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,
goto err_iv;
}
- err = pm_runtime_get_sync(ss->dev);
- if (err < 0) {
- pm_runtime_put_noidle(ss->dev);
+ err = pm_runtime_resume_and_get(ss->dev);
+ if (err < 0)
goto err_pm;
- }
err = 0;
mutex_lock(&ss->mlock);
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h
index 28188685b910..df6f08f6092f 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h
@@ -82,6 +82,8 @@
#define PRNG_DATA_SIZE (160 / 8)
#define PRNG_SEED_SIZE DIV_ROUND_UP(175, 8)
+#define MAX_PAD_SIZE 4096
+
/*
* struct ss_clock - Describe clocks used by sun8i-ss
* @name: Name of clock needed by this variant
@@ -121,11 +123,19 @@ struct sginfo {
* @complete: completion for the current task on this flow
* @status: set to 1 by interrupt if task is done
* @stat_req: number of request done by this flow
+ * @iv: list of IV to use for each step
+ * @biv: buffer which contain the backuped IV
+ * @pad: padding buffer for hash operations
+ * @result: buffer for storing the result of hash operations
*/
struct sun8i_ss_flow {
struct crypto_engine *engine;
struct completion complete;
int status;
+ u8 *iv[MAX_SG];
+ u8 *biv;
+ void *pad;
+ void *result;
#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
unsigned long stat_req;
#endif
@@ -164,28 +174,28 @@ struct sun8i_ss_dev {
* @t_src: list of mapped SGs with their size
* @t_dst: list of mapped SGs with their size
* @p_key: DMA address of the key
- * @p_iv: DMA address of the IV
+ * @p_iv: DMA address of the IVs
+ * @niv: Number of IVs DMA mapped
* @method: current algorithm for this request
* @op_mode: op_mode for this request
* @op_dir: direction (encrypt vs decrypt) for this request
* @flow: the flow to use for this request
- * @ivlen: size of biv
+ * @ivlen: size of IVs
* @keylen: keylen for this request
- * @biv: buffer which contain the IV
* @fallback_req: request struct for invoking the fallback skcipher TFM
*/
struct sun8i_cipher_req_ctx {
struct sginfo t_src[MAX_SG];
struct sginfo t_dst[MAX_SG];
u32 p_key;
- u32 p_iv;
+ u32 p_iv[MAX_SG];
+ int niv;
u32 method;
u32 op_mode;
u32 op_dir;
int flow;
unsigned int ivlen;
unsigned int keylen;
- void *biv;
struct skcipher_request fallback_req; // keep at the end
};
@@ -229,6 +239,10 @@ struct sun8i_ss_hash_tfm_ctx {
struct crypto_engine_ctx enginectx;
struct crypto_ahash *fallback_tfm;
struct sun8i_ss_dev *ss;
+ u8 *ipad;
+ u8 *opad;
+ u8 key[SHA256_BLOCK_SIZE];
+ int keylen;
};
/*
@@ -269,11 +283,14 @@ struct sun8i_ss_alg_template {
struct rng_alg rng;
struct ahash_alg hash;
} alg;
-#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
unsigned long stat_req;
unsigned long stat_fb;
unsigned long stat_bytes;
-#endif
+ unsigned long stat_fb_len;
+ unsigned long stat_fb_sglen;
+ unsigned long stat_fb_align;
+ unsigned long stat_fb_sgnum;
+ char fbname[CRYPTO_MAX_ALG_NAME];
};
int sun8i_ss_enqueue(struct crypto_async_request *areq, u32 type);
@@ -306,3 +323,5 @@ int sun8i_ss_hash_update(struct ahash_request *areq);
int sun8i_ss_hash_finup(struct ahash_request *areq);
int sun8i_ss_hash_digest(struct ahash_request *areq);
int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq);
+int sun8i_ss_hmac_setkey(struct crypto_ahash *ahash, const u8 *key,
+ unsigned int keylen);
diff --git a/drivers/crypto/atmel-ecc.c b/drivers/crypto/atmel-ecc.c
index 333fbefbbccb..59a57279e77b 100644
--- a/drivers/crypto/atmel-ecc.c
+++ b/drivers/crypto/atmel-ecc.c
@@ -398,7 +398,7 @@ static int __init atmel_ecc_init(void)
static void __exit atmel_ecc_exit(void)
{
- flush_scheduled_work();
+ atmel_i2c_flush_queue();
i2c_del_driver(&atmel_ecc_driver);
}
diff --git a/drivers/crypto/atmel-i2c.c b/drivers/crypto/atmel-i2c.c
index 6fd3e969211d..81ce09bedda8 100644
--- a/drivers/crypto/atmel-i2c.c
+++ b/drivers/crypto/atmel-i2c.c
@@ -263,6 +263,8 @@ static void atmel_i2c_work_handler(struct work_struct *work)
work_data->cbk(work_data, work_data->areq, status);
}
+static struct workqueue_struct *atmel_wq;
+
void atmel_i2c_enqueue(struct atmel_i2c_work_data *work_data,
void (*cbk)(struct atmel_i2c_work_data *work_data,
void *areq, int status),
@@ -272,10 +274,16 @@ void atmel_i2c_enqueue(struct atmel_i2c_work_data *work_data,
work_data->areq = areq;
INIT_WORK(&work_data->work, atmel_i2c_work_handler);
- schedule_work(&work_data->work);
+ queue_work(atmel_wq, &work_data->work);
}
EXPORT_SYMBOL(atmel_i2c_enqueue);
+void atmel_i2c_flush_queue(void)
+{
+ flush_workqueue(atmel_wq);
+}
+EXPORT_SYMBOL(atmel_i2c_flush_queue);
+
static inline size_t atmel_i2c_wake_token_sz(u32 bus_clk_rate)
{
u32 no_of_bits = DIV_ROUND_UP(TWLO_USEC * bus_clk_rate, USEC_PER_SEC);
@@ -364,14 +372,24 @@ int atmel_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id)
i2c_set_clientdata(client, i2c_priv);
- ret = device_sanity_check(client);
- if (ret)
- return ret;
-
- return 0;
+ return device_sanity_check(client);
}
EXPORT_SYMBOL(atmel_i2c_probe);
+static int __init atmel_i2c_init(void)
+{
+ atmel_wq = alloc_workqueue("atmel_wq", 0, 0);
+ return atmel_wq ? 0 : -ENOMEM;
+}
+
+static void __exit atmel_i2c_exit(void)
+{
+ destroy_workqueue(atmel_wq);
+}
+
+module_init(atmel_i2c_init);
+module_exit(atmel_i2c_exit);
+
MODULE_AUTHOR("Tudor Ambarus <tudor.ambarus@microchip.com>");
MODULE_DESCRIPTION("Microchip / Atmel ECC (I2C) driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/crypto/atmel-i2c.h b/drivers/crypto/atmel-i2c.h
index 63b97b104f16..48929efe2a5b 100644
--- a/drivers/crypto/atmel-i2c.h
+++ b/drivers/crypto/atmel-i2c.h
@@ -173,6 +173,7 @@ void atmel_i2c_enqueue(struct atmel_i2c_work_data *work_data,
void (*cbk)(struct atmel_i2c_work_data *work_data,
void *areq, int status),
void *areq);
+void atmel_i2c_flush_queue(void);
int atmel_i2c_send_receive(struct i2c_client *client, struct atmel_i2c_cmd *cmd);
diff --git a/drivers/crypto/atmel-sha204a.c b/drivers/crypto/atmel-sha204a.c
index c96c14e7dab1..e4087bdd2475 100644
--- a/drivers/crypto/atmel-sha204a.c
+++ b/drivers/crypto/atmel-sha204a.c
@@ -121,23 +121,24 @@ static int atmel_sha204a_remove(struct i2c_client *client)
struct atmel_i2c_client_priv *i2c_priv = i2c_get_clientdata(client);
if (atomic_read(&i2c_priv->tfm_count)) {
- dev_err(&client->dev, "Device is busy\n");
- return -EBUSY;
+ dev_emerg(&client->dev, "Device is busy, will remove it anyhow\n");
+ return 0;
}
- if (i2c_priv->hwrng.priv)
- kfree((void *)i2c_priv->hwrng.priv);
+ kfree((void *)i2c_priv->hwrng.priv);
return 0;
}
static const struct of_device_id atmel_sha204a_dt_ids[] = {
+ { .compatible = "atmel,atsha204", },
{ .compatible = "atmel,atsha204a", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, atmel_sha204a_dt_ids);
static const struct i2c_device_id atmel_sha204a_id[] = {
+ { "atsha204", 0 },
{ "atsha204a", 0 },
{ /* sentinel */ }
};
@@ -159,7 +160,7 @@ static int __init atmel_sha204a_init(void)
static void __exit atmel_sha204a_exit(void)
{
- flush_scheduled_work();
+ atmel_i2c_flush_queue();
i2c_del_driver(&atmel_sha204a_driver);
}
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
index ea9f8b1ae981..ec6a9e6ad4d2 100644
--- a/drivers/crypto/caam/Kconfig
+++ b/drivers/crypto/caam/Kconfig
@@ -151,6 +151,14 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API
Selecting this will register the SEC4 hardware rng to
the hw_random API for supplying the kernel entropy pool.
+config CRYPTO_DEV_FSL_CAAM_PRNG_API
+ bool "Register Pseudo random number generation implementation with Crypto API"
+ default y
+ select CRYPTO_RNG
+ help
+ Selecting this will register the SEC hardware prng to
+ the Crypto API.
+
config CRYPTO_DEV_FSL_CAAM_BLOB_GEN
bool
diff --git a/drivers/crypto/caam/Makefile b/drivers/crypto/caam/Makefile
index 25f7ae5a4642..acf1b197eb84 100644
--- a/drivers/crypto/caam/Makefile
+++ b/drivers/crypto/caam/Makefile
@@ -20,6 +20,7 @@ caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += caamalg_qi.o
caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
+caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PRNG_API) += caamprng.o
caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caampkc.o pkc_desc.o
caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_BLOB_GEN) += blob_gen.o
diff --git a/drivers/crypto/caam/caamprng.c b/drivers/crypto/caam/caamprng.c
new file mode 100644
index 000000000000..4839e66300a2
--- /dev/null
+++ b/drivers/crypto/caam/caamprng.c
@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Driver to expose SEC4 PRNG via crypto RNG API
+ *
+ * Copyright 2022 NXP
+ *
+ */
+
+#include <linux/completion.h>
+#include <crypto/internal/rng.h>
+#include "compat.h"
+#include "regs.h"
+#include "intern.h"
+#include "desc_constr.h"
+#include "jr.h"
+#include "error.h"
+
+/*
+ * Length of used descriptors, see caam_init_desc()
+ */
+#define CAAM_PRNG_MAX_DESC_LEN (CAAM_CMD_SZ + \
+ CAAM_CMD_SZ + \
+ CAAM_CMD_SZ + CAAM_PTR_SZ_MAX)
+
+/* prng per-device context */
+struct caam_prng_ctx {
+ int err;
+ struct completion done;
+};
+
+struct caam_prng_alg {
+ struct rng_alg rng;
+ bool registered;
+};
+
+static void caam_prng_done(struct device *jrdev, u32 *desc, u32 err,
+ void *context)
+{
+ struct caam_prng_ctx *jctx = context;
+
+ jctx->err = err ? caam_jr_strstatus(jrdev, err) : 0;
+
+ complete(&jctx->done);
+}
+
+static u32 *caam_init_reseed_desc(u32 *desc)
+{
+ init_job_desc(desc, 0); /* + 1 cmd_sz */
+ /* Generate random bytes: + 1 cmd_sz */
+ append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
+ OP_ALG_AS_FINALIZE);
+
+ print_hex_dump_debug("prng reseed desc@: ", DUMP_PREFIX_ADDRESS,
+ 16, 4, desc, desc_bytes(desc), 1);
+
+ return desc;
+}
+
+static u32 *caam_init_prng_desc(u32 *desc, dma_addr_t dst_dma, u32 len)
+{
+ init_job_desc(desc, 0); /* + 1 cmd_sz */
+ /* Generate random bytes: + 1 cmd_sz */
+ append_operation(desc, OP_ALG_ALGSEL_RNG | OP_TYPE_CLASS1_ALG);
+ /* Store bytes: + 1 cmd_sz + caam_ptr_sz */
+ append_fifo_store(desc, dst_dma,
+ len, FIFOST_TYPE_RNGSTORE);
+
+ print_hex_dump_debug("prng job desc@: ", DUMP_PREFIX_ADDRESS,
+ 16, 4, desc, desc_bytes(desc), 1);
+
+ return desc;
+}
+
+static int caam_prng_generate(struct crypto_rng *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int dlen)
+{
+ struct caam_prng_ctx ctx;
+ struct device *jrdev;
+ dma_addr_t dst_dma;
+ u32 *desc;
+ u8 *buf;
+ int ret;
+
+ buf = kzalloc(dlen, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ jrdev = caam_jr_alloc();
+ ret = PTR_ERR_OR_ZERO(jrdev);
+ if (ret) {
+ pr_err("Job Ring Device allocation failed\n");
+ kfree(buf);
+ return ret;
+ }
+
+ desc = kzalloc(CAAM_PRNG_MAX_DESC_LEN, GFP_KERNEL | GFP_DMA);
+ if (!desc) {
+ ret = -ENOMEM;
+ goto out1;
+ }
+
+ dst_dma = dma_map_single(jrdev, buf, dlen, DMA_FROM_DEVICE);
+ if (dma_mapping_error(jrdev, dst_dma)) {
+ dev_err(jrdev, "Failed to map destination buffer memory\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ init_completion(&ctx.done);
+ ret = caam_jr_enqueue(jrdev,
+ caam_init_prng_desc(desc, dst_dma, dlen),
+ caam_prng_done, &ctx);
+
+ if (ret == -EINPROGRESS) {
+ wait_for_completion(&ctx.done);
+ ret = ctx.err;
+ }
+
+ dma_unmap_single(jrdev, dst_dma, dlen, DMA_FROM_DEVICE);
+
+ if (!ret)
+ memcpy(dst, buf, dlen);
+out:
+ kfree(desc);
+out1:
+ caam_jr_free(jrdev);
+ kfree(buf);
+ return ret;
+}
+
+static void caam_prng_exit(struct crypto_tfm *tfm) {}
+
+static int caam_prng_init(struct crypto_tfm *tfm)
+{
+ return 0;
+}
+
+static int caam_prng_seed(struct crypto_rng *tfm,
+ const u8 *seed, unsigned int slen)
+{
+ struct caam_prng_ctx ctx;
+ struct device *jrdev;
+ u32 *desc;
+ int ret;
+
+ if (slen) {
+ pr_err("Seed length should be zero\n");
+ return -EINVAL;
+ }
+
+ jrdev = caam_jr_alloc();
+ ret = PTR_ERR_OR_ZERO(jrdev);
+ if (ret) {
+ pr_err("Job Ring Device allocation failed\n");
+ return ret;
+ }
+
+ desc = kzalloc(CAAM_PRNG_MAX_DESC_LEN, GFP_KERNEL | GFP_DMA);
+ if (!desc) {
+ caam_jr_free(jrdev);
+ return -ENOMEM;
+ }
+
+ init_completion(&ctx.done);
+ ret = caam_jr_enqueue(jrdev,
+ caam_init_reseed_desc(desc),
+ caam_prng_done, &ctx);
+
+ if (ret == -EINPROGRESS) {
+ wait_for_completion(&ctx.done);
+ ret = ctx.err;
+ }
+
+ kfree(desc);
+ caam_jr_free(jrdev);
+ return ret;
+}
+
+static struct caam_prng_alg caam_prng_alg = {
+ .rng = {
+ .generate = caam_prng_generate,
+ .seed = caam_prng_seed,
+ .seedsize = 0,
+ .base = {
+ .cra_name = "stdrng",
+ .cra_driver_name = "prng-caam",
+ .cra_priority = 500,
+ .cra_ctxsize = sizeof(struct caam_prng_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = caam_prng_init,
+ .cra_exit = caam_prng_exit,
+ },
+ }
+};
+
+void caam_prng_unregister(void *data)
+{
+ if (caam_prng_alg.registered)
+ crypto_unregister_rng(&caam_prng_alg.rng);
+}
+
+int caam_prng_register(struct device *ctrldev)
+{
+ struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
+ u32 rng_inst;
+ int ret = 0;
+
+ /* Check for available RNG blocks before registration */
+ if (priv->era < 10)
+ rng_inst = (rd_reg32(&priv->jr[0]->perfmon.cha_num_ls) &
+ CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT;
+ else
+ rng_inst = rd_reg32(&priv->jr[0]->vreg.rng) & CHA_VER_NUM_MASK;
+
+ if (!rng_inst) {
+ dev_dbg(ctrldev, "RNG block is not available... skipping registering algorithm\n");
+ return ret;
+ }
+
+ ret = crypto_register_rng(&caam_prng_alg.rng);
+ if (ret) {
+ dev_err(ctrldev,
+ "couldn't register rng crypto alg: %d\n",
+ ret);
+ return ret;
+ }
+
+ caam_prng_alg.registered = true;
+
+ dev_info(ctrldev,
+ "rng crypto API alg registered %s\n", caam_prng_alg.rng.base.cra_driver_name);
+
+ return 0;
+}
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 38c4d88a9d03..32253a064d0f 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -609,6 +609,13 @@ static bool check_version(struct fsl_mc_version *mc_version, u32 major,
}
#endif
+static bool needs_entropy_delay_adjustment(void)
+{
+ if (of_machine_is_compatible("fsl,imx6sx"))
+ return true;
+ return false;
+}
+
/* Probe routine for CAAM top (controller) level */
static int caam_probe(struct platform_device *pdev)
{
@@ -868,6 +875,8 @@ static int caam_probe(struct platform_device *pdev)
* Also, if a handle was instantiated, do not change
* the TRNG parameters.
*/
+ if (needs_entropy_delay_adjustment())
+ ent_delay = 12000;
if (!(ctrlpriv->rng4_sh_init || inst_handles)) {
dev_info(dev,
"Entropy delay = %u\n",
@@ -884,6 +893,15 @@ static int caam_probe(struct platform_device *pdev)
*/
ret = instantiate_rng(dev, inst_handles,
gen_sk);
+ /*
+ * Entropy delay is determined via TRNG characterization.
+ * TRNG characterization is run across different voltages
+ * and temperatures.
+ * If worst case value for ent_dly is identified,
+ * the loop can be skipped for that platform.
+ */
+ if (needs_entropy_delay_adjustment())
+ break;
if (ret == -EAGAIN)
/*
* if here, the loop will rerun,
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index e92210e2ab76..572cf66c887a 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -186,6 +186,21 @@ static inline void caam_rng_exit(struct device *dev) {}
#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API */
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_PRNG_API
+
+int caam_prng_register(struct device *dev);
+void caam_prng_unregister(void *data);
+
+#else
+
+static inline int caam_prng_register(struct device *dev)
+{
+ return 0;
+}
+
+static inline void caam_prng_unregister(void *data) {}
+#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_PRNG_API */
+
#ifdef CONFIG_CAAM_QI
int caam_qi_algapi_init(struct device *dev);
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index 7f2b1101f567..724fdec18bf9 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -39,6 +39,7 @@ static void register_algs(struct caam_drv_private_jr *jrpriv,
caam_algapi_hash_init(dev);
caam_pkc_init(dev);
jrpriv->hwrng = !caam_rng_init(dev);
+ caam_prng_register(dev);
caam_qi_algapi_init(dev);
algs_unlock:
@@ -53,7 +54,7 @@ static void unregister_algs(void)
goto algs_unlock;
caam_qi_algapi_exit();
-
+ caam_prng_unregister(NULL);
caam_pkc_exit();
caam_algapi_hash_exit();
caam_algapi_exit();
diff --git a/drivers/crypto/cavium/nitrox/nitrox_main.c b/drivers/crypto/cavium/nitrox/nitrox_main.c
index 6c61817996a3..432a61aca0c5 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_main.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_main.c
@@ -269,15 +269,17 @@ static void nitrox_remove_from_devlist(struct nitrox_device *ndev)
struct nitrox_device *nitrox_get_first_device(void)
{
- struct nitrox_device *ndev;
+ struct nitrox_device *ndev = NULL, *iter;
mutex_lock(&devlist_lock);
- list_for_each_entry(ndev, &ndevlist, list) {
- if (nitrox_ready(ndev))
+ list_for_each_entry(iter, &ndevlist, list) {
+ if (nitrox_ready(iter)) {
+ ndev = iter;
break;
+ }
}
mutex_unlock(&devlist_lock);
- if (&ndev->list == &ndevlist)
+ if (!ndev)
return NULL;
refcount_inc(&ndev->refcnt);
diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
index ae7b44599914..c9c741ac8442 100644
--- a/drivers/crypto/ccp/psp-dev.c
+++ b/drivers/crypto/ccp/psp-dev.c
@@ -70,17 +70,23 @@ static unsigned int psp_get_capability(struct psp_device *psp)
*/
if (val == 0xffffffff) {
dev_notice(psp->dev, "psp: unable to access the device: you might be running a broken BIOS.\n");
- return 0;
+ return -ENODEV;
}
+ psp->capability = val;
+
+ /* Detect if TSME and SME are both enabled */
+ if (psp->capability & PSP_CAPABILITY_PSP_SECURITY_REPORTING &&
+ psp->capability & (PSP_SECURITY_TSME_STATUS << PSP_CAPABILITY_PSP_SECURITY_OFFSET) &&
+ cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
+ dev_notice(psp->dev, "psp: Both TSME and SME are active, SME is unnecessary when TSME is active.\n");
- return val;
+ return 0;
}
-static int psp_check_sev_support(struct psp_device *psp,
- unsigned int capability)
+static int psp_check_sev_support(struct psp_device *psp)
{
/* Check if device supports SEV feature */
- if (!(capability & 1)) {
+ if (!(psp->capability & PSP_CAPABILITY_SEV)) {
dev_dbg(psp->dev, "psp does not support SEV\n");
return -ENODEV;
}
@@ -88,11 +94,10 @@ static int psp_check_sev_support(struct psp_device *psp,
return 0;
}
-static int psp_check_tee_support(struct psp_device *psp,
- unsigned int capability)
+static int psp_check_tee_support(struct psp_device *psp)
{
/* Check if device supports TEE feature */
- if (!(capability & 2)) {
+ if (!(psp->capability & PSP_CAPABILITY_TEE)) {
dev_dbg(psp->dev, "psp does not support TEE\n");
return -ENODEV;
}
@@ -100,30 +105,17 @@ static int psp_check_tee_support(struct psp_device *psp,
return 0;
}
-static int psp_check_support(struct psp_device *psp,
- unsigned int capability)
-{
- int sev_support = psp_check_sev_support(psp, capability);
- int tee_support = psp_check_tee_support(psp, capability);
-
- /* Return error if device neither supports SEV nor TEE */
- if (sev_support && tee_support)
- return -ENODEV;
-
- return 0;
-}
-
-static int psp_init(struct psp_device *psp, unsigned int capability)
+static int psp_init(struct psp_device *psp)
{
int ret;
- if (!psp_check_sev_support(psp, capability)) {
+ if (!psp_check_sev_support(psp)) {
ret = sev_dev_init(psp);
if (ret)
return ret;
}
- if (!psp_check_tee_support(psp, capability)) {
+ if (!psp_check_tee_support(psp)) {
ret = tee_dev_init(psp);
if (ret)
return ret;
@@ -136,7 +128,6 @@ int psp_dev_init(struct sp_device *sp)
{
struct device *dev = sp->dev;
struct psp_device *psp;
- unsigned int capability;
int ret;
ret = -ENOMEM;
@@ -155,11 +146,7 @@ int psp_dev_init(struct sp_device *sp)
psp->io_regs = sp->io_map;
- capability = psp_get_capability(psp);
- if (!capability)
- goto e_disable;
-
- ret = psp_check_support(psp, capability);
+ ret = psp_get_capability(psp);
if (ret)
goto e_disable;
@@ -174,7 +161,7 @@ int psp_dev_init(struct sp_device *sp)
goto e_err;
}
- ret = psp_init(psp, capability);
+ ret = psp_init(psp);
if (ret)
goto e_irq;
diff --git a/drivers/crypto/ccp/psp-dev.h b/drivers/crypto/ccp/psp-dev.h
index ef38e4135d81..d528eb04c3ef 100644
--- a/drivers/crypto/ccp/psp-dev.h
+++ b/drivers/crypto/ccp/psp-dev.h
@@ -45,6 +45,8 @@ struct psp_device {
void *sev_data;
void *tee_data;
+
+ unsigned int capability;
};
void psp_set_sev_irq_handler(struct psp_device *psp, psp_irq_handler_t handler,
@@ -57,4 +59,24 @@ void psp_clear_tee_irq_handler(struct psp_device *psp);
struct psp_device *psp_get_master_device(void);
+#define PSP_CAPABILITY_SEV BIT(0)
+#define PSP_CAPABILITY_TEE BIT(1)
+#define PSP_CAPABILITY_PSP_SECURITY_REPORTING BIT(7)
+
+#define PSP_CAPABILITY_PSP_SECURITY_OFFSET 8
+/*
+ * The PSP doesn't directly store these bits in the capability register
+ * but instead copies them from the results of query command.
+ *
+ * The offsets from the query command are below, and shifted when used.
+ */
+#define PSP_SECURITY_FUSED_PART BIT(0)
+#define PSP_SECURITY_DEBUG_LOCK_ON BIT(2)
+#define PSP_SECURITY_TSME_STATUS BIT(5)
+#define PSP_SECURITY_ANTI_ROLLBACK_STATUS BIT(7)
+#define PSP_SECURITY_RPMC_PRODUCTION_ENABLED BIT(8)
+#define PSP_SECURITY_RPMC_SPIROM_AVAILABLE BIT(9)
+#define PSP_SECURITY_HSP_TPM_AVAILABLE BIT(10)
+#define PSP_SECURITY_ROM_ARMOR_ENFORCED BIT(11)
+
#endif /* __PSP_DEV_H */
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index 6ab93dfd478a..799b476fc3e8 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -23,6 +23,7 @@
#include <linux/gfp.h>
#include <linux/cpufeature.h>
#include <linux/fs.h>
+#include <linux/fs_struct.h>
#include <asm/smp.h>
@@ -170,6 +171,31 @@ static void *sev_fw_alloc(unsigned long len)
return page_address(page);
}
+static struct file *open_file_as_root(const char *filename, int flags, umode_t mode)
+{
+ struct file *fp;
+ struct path root;
+ struct cred *cred;
+ const struct cred *old_cred;
+
+ task_lock(&init_task);
+ get_fs_root(init_task.fs, &root);
+ task_unlock(&init_task);
+
+ cred = prepare_creds();
+ if (!cred)
+ return ERR_PTR(-ENOMEM);
+ cred->fsuid = GLOBAL_ROOT_UID;
+ old_cred = override_creds(cred);
+
+ fp = file_open_root(&root, filename, flags, mode);
+ path_put(&root);
+
+ revert_creds(old_cred);
+
+ return fp;
+}
+
static int sev_read_init_ex_file(void)
{
struct sev_device *sev = psp_master->sev_data;
@@ -181,7 +207,7 @@ static int sev_read_init_ex_file(void)
if (!sev_init_ex_buffer)
return -EOPNOTSUPP;
- fp = filp_open(init_ex_path, O_RDONLY, 0);
+ fp = open_file_as_root(init_ex_path, O_RDONLY, 0);
if (IS_ERR(fp)) {
int ret = PTR_ERR(fp);
@@ -217,7 +243,7 @@ static void sev_write_init_ex_file(void)
if (!sev_init_ex_buffer)
return;
- fp = filp_open(init_ex_path, O_CREAT | O_WRONLY, 0600);
+ fp = open_file_as_root(init_ex_path, O_CREAT | O_WRONLY, 0600);
if (IS_ERR(fp)) {
dev_err(sev->dev,
"SEV: could not open file for write, error %ld\n",
@@ -435,7 +461,7 @@ static int __sev_platform_init_locked(int *error)
* initialization function should succeed by replacing the state
* with a reset state.
*/
- dev_dbg(sev->dev, "SEV: retrying INIT command");
+ dev_err(sev->dev, "SEV: retrying INIT command because of SECURE_DATA_INVALID error. Retrying once to reset PSP SEV state.");
rc = init_function(&psp_ret);
}
if (error)
diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c
index 88c672ad27e4..b5970ae54d0e 100644
--- a/drivers/crypto/ccp/sp-pci.c
+++ b/drivers/crypto/ccp/sp-pci.c
@@ -32,6 +32,67 @@ struct sp_pci {
};
static struct sp_device *sp_dev_master;
+#define attribute_show(name, def) \
+static ssize_t name##_show(struct device *d, struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct sp_device *sp = dev_get_drvdata(d); \
+ struct psp_device *psp = sp->psp_data; \
+ int bit = PSP_SECURITY_##def << PSP_CAPABILITY_PSP_SECURITY_OFFSET; \
+ return sysfs_emit(buf, "%d\n", (psp->capability & bit) > 0); \
+}
+
+attribute_show(fused_part, FUSED_PART)
+static DEVICE_ATTR_RO(fused_part);
+attribute_show(debug_lock_on, DEBUG_LOCK_ON)
+static DEVICE_ATTR_RO(debug_lock_on);
+attribute_show(tsme_status, TSME_STATUS)
+static DEVICE_ATTR_RO(tsme_status);
+attribute_show(anti_rollback_status, ANTI_ROLLBACK_STATUS)
+static DEVICE_ATTR_RO(anti_rollback_status);
+attribute_show(rpmc_production_enabled, RPMC_PRODUCTION_ENABLED)
+static DEVICE_ATTR_RO(rpmc_production_enabled);
+attribute_show(rpmc_spirom_available, RPMC_SPIROM_AVAILABLE)
+static DEVICE_ATTR_RO(rpmc_spirom_available);
+attribute_show(hsp_tpm_available, HSP_TPM_AVAILABLE)
+static DEVICE_ATTR_RO(hsp_tpm_available);
+attribute_show(rom_armor_enforced, ROM_ARMOR_ENFORCED)
+static DEVICE_ATTR_RO(rom_armor_enforced);
+
+static struct attribute *psp_attrs[] = {
+ &dev_attr_fused_part.attr,
+ &dev_attr_debug_lock_on.attr,
+ &dev_attr_tsme_status.attr,
+ &dev_attr_anti_rollback_status.attr,
+ &dev_attr_rpmc_production_enabled.attr,
+ &dev_attr_rpmc_spirom_available.attr,
+ &dev_attr_hsp_tpm_available.attr,
+ &dev_attr_rom_armor_enforced.attr,
+ NULL
+};
+
+static umode_t psp_security_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct sp_device *sp = dev_get_drvdata(dev);
+ struct psp_device *psp = sp->psp_data;
+
+ if (psp && (psp->capability & PSP_CAPABILITY_PSP_SECURITY_REPORTING))
+ return 0444;
+
+ return 0;
+}
+
+static struct attribute_group psp_attr_group = {
+ .attrs = psp_attrs,
+ .is_visible = psp_security_is_visible,
+};
+
+static const struct attribute_group *psp_groups[] = {
+ &psp_attr_group,
+ NULL,
+};
+
static int sp_get_msix_irqs(struct sp_device *sp)
{
struct sp_pci *sp_pci = sp->dev_specific;
@@ -391,6 +452,7 @@ static struct pci_driver sp_pci_driver = {
.remove = sp_pci_remove,
.shutdown = sp_pci_shutdown,
.driver.pm = &sp_pci_pm_ops,
+ .dev_groups = psp_groups,
};
int sp_pci_init(void)
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c
index 11e0278c8631..6140e4927322 100644
--- a/drivers/crypto/ccree/cc_buffer_mgr.c
+++ b/drivers/crypto/ccree/cc_buffer_mgr.c
@@ -356,12 +356,14 @@ void cc_unmap_cipher_request(struct device *dev, void *ctx,
req_ctx->mlli_params.mlli_dma_addr);
}
- dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL);
- dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src));
-
if (src != dst) {
- dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_BIDIRECTIONAL);
+ dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_TO_DEVICE);
+ dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_FROM_DEVICE);
dev_dbg(dev, "Unmapped req->dst=%pK\n", sg_virt(dst));
+ dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src));
+ } else {
+ dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL);
+ dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src));
}
}
@@ -377,6 +379,7 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
u32 dummy = 0;
int rc = 0;
u32 mapped_nents = 0;
+ int src_direction = (src != dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
req_ctx->dma_buf_type = CC_DMA_BUF_DLLI;
mlli_params->curr_pool = NULL;
@@ -399,7 +402,7 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
}
/* Map the src SGL */
- rc = cc_map_sg(dev, src, nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents,
+ rc = cc_map_sg(dev, src, nbytes, src_direction, &req_ctx->in_nents,
LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
if (rc)
goto cipher_exit;
@@ -416,7 +419,7 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
}
} else {
/* Map the dst sg */
- rc = cc_map_sg(dev, dst, nbytes, DMA_BIDIRECTIONAL,
+ rc = cc_map_sg(dev, dst, nbytes, DMA_FROM_DEVICE,
&req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
&dummy, &mapped_nents);
if (rc)
@@ -456,6 +459,7 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
unsigned int hw_iv_size = areq_ctx->hw_iv_size;
struct cc_drvdata *drvdata = dev_get_drvdata(dev);
+ int src_direction = (req->src != req->dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
if (areq_ctx->mac_buf_dma_addr) {
dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr,
@@ -514,13 +518,11 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents,
areq_ctx->assoclen, req->cryptlen);
- dma_unmap_sg(dev, req->src, areq_ctx->src.mapped_nents,
- DMA_BIDIRECTIONAL);
+ dma_unmap_sg(dev, req->src, areq_ctx->src.mapped_nents, src_direction);
if (req->src != req->dst) {
dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n",
sg_virt(req->dst));
- dma_unmap_sg(dev, req->dst, areq_ctx->dst.mapped_nents,
- DMA_BIDIRECTIONAL);
+ dma_unmap_sg(dev, req->dst, areq_ctx->dst.mapped_nents, DMA_FROM_DEVICE);
}
if (drvdata->coherent &&
areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT &&
@@ -843,7 +845,7 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
else
size_for_map -= authsize;
- rc = cc_map_sg(dev, req->dst, size_for_map, DMA_BIDIRECTIONAL,
+ rc = cc_map_sg(dev, req->dst, size_for_map, DMA_FROM_DEVICE,
&areq_ctx->dst.mapped_nents,
LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes,
&dst_mapped_nents);
@@ -1056,7 +1058,8 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
size_to_map += authsize;
}
- rc = cc_map_sg(dev, req->src, size_to_map, DMA_BIDIRECTIONAL,
+ rc = cc_map_sg(dev, req->src, size_to_map,
+ (req->src != req->dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL),
&areq_ctx->src.mapped_nents,
(LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES +
LLI_MAX_NUM_OF_DATA_ENTRIES),
diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c
index 790fa9058a36..7d1bee86d581 100644
--- a/drivers/crypto/ccree/cc_driver.c
+++ b/drivers/crypto/ccree/cc_driver.c
@@ -529,24 +529,26 @@ static int init_cc_resources(struct platform_device *plat_dev)
goto post_req_mgr_err;
}
- /* Allocate crypto algs */
- rc = cc_cipher_alloc(new_drvdata);
+ /* hash must be allocated first due to use of send_request_init()
+ * and dependency of AEAD on it
+ */
+ rc = cc_hash_alloc(new_drvdata);
if (rc) {
- dev_err(dev, "cc_cipher_alloc failed\n");
+ dev_err(dev, "cc_hash_alloc failed\n");
goto post_buf_mgr_err;
}
- /* hash must be allocated before aead since hash exports APIs */
- rc = cc_hash_alloc(new_drvdata);
+ /* Allocate crypto algs */
+ rc = cc_cipher_alloc(new_drvdata);
if (rc) {
- dev_err(dev, "cc_hash_alloc failed\n");
- goto post_cipher_err;
+ dev_err(dev, "cc_cipher_alloc failed\n");
+ goto post_hash_err;
}
rc = cc_aead_alloc(new_drvdata);
if (rc) {
dev_err(dev, "cc_aead_alloc failed\n");
- goto post_hash_err;
+ goto post_cipher_err;
}
/* If we got here and FIPS mode is enabled
@@ -558,10 +560,10 @@ static int init_cc_resources(struct platform_device *plat_dev)
pm_runtime_put(dev);
return 0;
-post_hash_err:
- cc_hash_free(new_drvdata);
post_cipher_err:
cc_cipher_free(new_drvdata);
+post_hash_err:
+ cc_hash_free(new_drvdata);
post_buf_mgr_err:
cc_buffer_mgr_fini(new_drvdata);
post_req_mgr_err:
@@ -593,8 +595,8 @@ static void cleanup_cc_resources(struct platform_device *plat_dev)
(struct cc_drvdata *)platform_get_drvdata(plat_dev);
cc_aead_free(drvdata);
- cc_hash_free(drvdata);
cc_cipher_free(drvdata);
+ cc_hash_free(drvdata);
cc_buffer_mgr_fini(drvdata);
cc_req_mgr_fini(drvdata);
cc_fips_fini(drvdata);
diff --git a/drivers/crypto/hisilicon/Kconfig b/drivers/crypto/hisilicon/Kconfig
index e572f9982d4e..27e1fa912063 100644
--- a/drivers/crypto/hisilicon/Kconfig
+++ b/drivers/crypto/hisilicon/Kconfig
@@ -26,6 +26,7 @@ config CRYPTO_DEV_HISI_SEC2
select CRYPTO_SHA1
select CRYPTO_SHA256
select CRYPTO_SHA512
+ select CRYPTO_SM4
depends on PCI && PCI_MSI
depends on UACCE || UACCE=n
depends on ARM64 || (COMPILE_TEST && 64BIT)
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
index 36ab30e9e654..9d529df0eab9 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
@@ -36,6 +36,12 @@
#define HPRE_DATA_WUSER_CFG 0x301040
#define HPRE_INT_MASK 0x301400
#define HPRE_INT_STATUS 0x301800
+#define HPRE_HAC_INT_MSK 0x301400
+#define HPRE_HAC_RAS_CE_ENB 0x301410
+#define HPRE_HAC_RAS_NFE_ENB 0x301414
+#define HPRE_HAC_RAS_FE_ENB 0x301418
+#define HPRE_HAC_INT_SET 0x301500
+#define HPRE_RNG_TIMEOUT_NUM 0x301A34
#define HPRE_CORE_INT_ENABLE 0
#define HPRE_CORE_INT_DISABLE GENMASK(21, 0)
#define HPRE_RDCHN_INI_ST 0x301a00
@@ -107,6 +113,15 @@
#define HPRE_SQE_MASK_OFFSET 8
#define HPRE_SQE_MASK_LEN 24
+#define HPRE_DFX_BASE 0x301000
+#define HPRE_DFX_COMMON1 0x301400
+#define HPRE_DFX_COMMON2 0x301A00
+#define HPRE_DFX_CORE 0x302000
+#define HPRE_DFX_BASE_LEN 0x55
+#define HPRE_DFX_COMMON1_LEN 0x41
+#define HPRE_DFX_COMMON2_LEN 0xE
+#define HPRE_DFX_CORE_LEN 0x43
+
static const char hpre_name[] = "hisi_hpre";
static struct dentry *hpre_debugfs_root;
static const struct pci_device_id hpre_dev_ids[] = {
@@ -192,28 +207,32 @@ static const u64 hpre_cluster_offsets[] = {
};
static const struct debugfs_reg32 hpre_cluster_dfx_regs[] = {
- {"CORES_EN_STATUS ", HPRE_CORE_EN_OFFSET},
- {"CORES_INI_CFG ", HPRE_CORE_INI_CFG_OFFSET},
- {"CORES_INI_STATUS ", HPRE_CORE_INI_STATUS_OFFSET},
- {"CORES_HTBT_WARN ", HPRE_CORE_HTBT_WARN_OFFSET},
- {"CORES_IS_SCHD ", HPRE_CORE_IS_SCHD_OFFSET},
+ {"CORES_EN_STATUS ", HPRE_CORE_EN_OFFSET},
+ {"CORES_INI_CFG ", HPRE_CORE_INI_CFG_OFFSET},
+ {"CORES_INI_STATUS ", HPRE_CORE_INI_STATUS_OFFSET},
+ {"CORES_HTBT_WARN ", HPRE_CORE_HTBT_WARN_OFFSET},
+ {"CORES_IS_SCHD ", HPRE_CORE_IS_SCHD_OFFSET},
};
static const struct debugfs_reg32 hpre_com_dfx_regs[] = {
- {"READ_CLR_EN ", HPRE_CTRL_CNT_CLR_CE},
- {"AXQOS ", HPRE_VFG_AXQOS},
- {"AWUSR_CFG ", HPRE_AWUSR_FP_CFG},
- {"QM_ARUSR_MCFG1 ", QM_ARUSER_M_CFG_1},
- {"QM_AWUSR_MCFG1 ", QM_AWUSER_M_CFG_1},
- {"BD_ENDIAN ", HPRE_BD_ENDIAN},
- {"ECC_CHECK_CTRL ", HPRE_ECC_BYPASS},
- {"RAS_INT_WIDTH ", HPRE_RAS_WIDTH_CFG},
- {"POISON_BYPASS ", HPRE_POISON_BYPASS},
- {"BD_ARUSER ", HPRE_BD_ARUSR_CFG},
- {"BD_AWUSER ", HPRE_BD_AWUSR_CFG},
- {"DATA_ARUSER ", HPRE_DATA_RUSER_CFG},
- {"DATA_AWUSER ", HPRE_DATA_WUSER_CFG},
- {"INT_STATUS ", HPRE_INT_STATUS},
+ {"READ_CLR_EN ", HPRE_CTRL_CNT_CLR_CE},
+ {"AXQOS ", HPRE_VFG_AXQOS},
+ {"AWUSR_CFG ", HPRE_AWUSR_FP_CFG},
+ {"BD_ENDIAN ", HPRE_BD_ENDIAN},
+ {"ECC_CHECK_CTRL ", HPRE_ECC_BYPASS},
+ {"RAS_INT_WIDTH ", HPRE_RAS_WIDTH_CFG},
+ {"POISON_BYPASS ", HPRE_POISON_BYPASS},
+ {"BD_ARUSER ", HPRE_BD_ARUSR_CFG},
+ {"BD_AWUSER ", HPRE_BD_AWUSR_CFG},
+ {"DATA_ARUSER ", HPRE_DATA_RUSER_CFG},
+ {"DATA_AWUSER ", HPRE_DATA_WUSER_CFG},
+ {"INT_STATUS ", HPRE_INT_STATUS},
+ {"INT_MASK ", HPRE_HAC_INT_MSK},
+ {"RAS_CE_ENB ", HPRE_HAC_RAS_CE_ENB},
+ {"RAS_NFE_ENB ", HPRE_HAC_RAS_NFE_ENB},
+ {"RAS_FE_ENB ", HPRE_HAC_RAS_FE_ENB},
+ {"INT_SET ", HPRE_HAC_INT_SET},
+ {"RNG_TIMEOUT_NUM ", HPRE_RNG_TIMEOUT_NUM},
};
static const char *hpre_dfx_files[HPRE_DFX_FILE_NUM] = {
@@ -226,6 +245,53 @@ static const char *hpre_dfx_files[HPRE_DFX_FILE_NUM] = {
"invalid_req_cnt"
};
+/* define the HPRE's dfx regs region and region length */
+static struct dfx_diff_registers hpre_diff_regs[] = {
+ {
+ .reg_offset = HPRE_DFX_BASE,
+ .reg_len = HPRE_DFX_BASE_LEN,
+ }, {
+ .reg_offset = HPRE_DFX_COMMON1,
+ .reg_len = HPRE_DFX_COMMON1_LEN,
+ }, {
+ .reg_offset = HPRE_DFX_COMMON2,
+ .reg_len = HPRE_DFX_COMMON2_LEN,
+ }, {
+ .reg_offset = HPRE_DFX_CORE,
+ .reg_len = HPRE_DFX_CORE_LEN,
+ },
+};
+
+static int hpre_diff_regs_show(struct seq_file *s, void *unused)
+{
+ struct hisi_qm *qm = s->private;
+
+ hisi_qm_acc_diff_regs_dump(qm, s, qm->debug.acc_diff_regs,
+ ARRAY_SIZE(hpre_diff_regs));
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(hpre_diff_regs);
+
+static int hpre_com_regs_show(struct seq_file *s, void *unused)
+{
+ hisi_qm_regs_dump(s, s->private);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(hpre_com_regs);
+
+static int hpre_cluster_regs_show(struct seq_file *s, void *unused)
+{
+ hisi_qm_regs_dump(s, s->private);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(hpre_cluster_regs);
+
static const struct kernel_param_ops hpre_uacce_mode_ops = {
.set = uacce_mode_set,
.get = param_get_int,
@@ -779,24 +845,6 @@ static int hpre_debugfs_atomic64_set(void *data, u64 val)
DEFINE_DEBUGFS_ATTRIBUTE(hpre_atomic64_ops, hpre_debugfs_atomic64_get,
hpre_debugfs_atomic64_set, "%llu\n");
-static int hpre_com_regs_show(struct seq_file *s, void *unused)
-{
- hisi_qm_regs_dump(s, s->private);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(hpre_com_regs);
-
-static int hpre_cluster_regs_show(struct seq_file *s, void *unused)
-{
- hisi_qm_regs_dump(s, s->private);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(hpre_cluster_regs);
-
static int hpre_create_debugfs_file(struct hisi_qm *qm, struct dentry *dir,
enum hpre_ctrl_dbgfs_file type, int indx)
{
@@ -895,6 +943,7 @@ static int hpre_ctrl_debug_init(struct hisi_qm *qm)
static void hpre_dfx_debug_init(struct hisi_qm *qm)
{
+ struct dfx_diff_registers *hpre_regs = qm->debug.acc_diff_regs;
struct hpre *hpre = container_of(qm, struct hpre, qm);
struct hpre_dfx *dfx = hpre->debug.dfx;
struct dentry *parent;
@@ -906,6 +955,10 @@ static void hpre_dfx_debug_init(struct hisi_qm *qm)
debugfs_create_file(hpre_dfx_files[i], 0644, parent, &dfx[i],
&hpre_atomic64_ops);
}
+
+ if (qm->fun_type == QM_HW_PF && hpre_regs)
+ debugfs_create_file("diff_regs", 0444, parent,
+ qm, &hpre_diff_regs_fops);
}
static int hpre_debugfs_init(struct hisi_qm *qm)
@@ -918,6 +971,13 @@ static int hpre_debugfs_init(struct hisi_qm *qm)
qm->debug.sqe_mask_offset = HPRE_SQE_MASK_OFFSET;
qm->debug.sqe_mask_len = HPRE_SQE_MASK_LEN;
+ ret = hisi_qm_diff_regs_init(qm, hpre_diff_regs,
+ ARRAY_SIZE(hpre_diff_regs));
+ if (ret) {
+ dev_warn(dev, "Failed to init HPRE diff regs!\n");
+ goto debugfs_remove;
+ }
+
hisi_qm_debug_init(qm);
if (qm->pdev->device == PCI_DEVICE_ID_HUAWEI_HPRE_PF) {
@@ -931,12 +991,16 @@ static int hpre_debugfs_init(struct hisi_qm *qm)
return 0;
failed_to_create:
+ hisi_qm_diff_regs_uninit(qm, ARRAY_SIZE(hpre_diff_regs));
+debugfs_remove:
debugfs_remove_recursive(qm->debug.debug_root);
return ret;
}
static void hpre_debugfs_exit(struct hisi_qm *qm)
{
+ hisi_qm_diff_regs_uninit(qm, ARRAY_SIZE(hpre_diff_regs));
+
debugfs_remove_recursive(qm->debug.debug_root);
}
@@ -969,6 +1033,82 @@ static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
return hisi_qm_init(qm);
}
+static int hpre_show_last_regs_init(struct hisi_qm *qm)
+{
+ int cluster_dfx_regs_num = ARRAY_SIZE(hpre_cluster_dfx_regs);
+ int com_dfx_regs_num = ARRAY_SIZE(hpre_com_dfx_regs);
+ u8 clusters_num = hpre_cluster_num(qm);
+ struct qm_debug *debug = &qm->debug;
+ void __iomem *io_base;
+ int i, j, idx;
+
+ debug->last_words = kcalloc(cluster_dfx_regs_num * clusters_num +
+ com_dfx_regs_num, sizeof(unsigned int), GFP_KERNEL);
+ if (!debug->last_words)
+ return -ENOMEM;
+
+ for (i = 0; i < com_dfx_regs_num; i++)
+ debug->last_words[i] = readl_relaxed(qm->io_base +
+ hpre_com_dfx_regs[i].offset);
+
+ for (i = 0; i < clusters_num; i++) {
+ io_base = qm->io_base + hpre_cluster_offsets[i];
+ for (j = 0; j < cluster_dfx_regs_num; j++) {
+ idx = com_dfx_regs_num + i * cluster_dfx_regs_num + j;
+ debug->last_words[idx] = readl_relaxed(
+ io_base + hpre_cluster_dfx_regs[j].offset);
+ }
+ }
+
+ return 0;
+}
+
+static void hpre_show_last_regs_uninit(struct hisi_qm *qm)
+{
+ struct qm_debug *debug = &qm->debug;
+
+ if (qm->fun_type == QM_HW_VF || !debug->last_words)
+ return;
+
+ kfree(debug->last_words);
+ debug->last_words = NULL;
+}
+
+static void hpre_show_last_dfx_regs(struct hisi_qm *qm)
+{
+ int cluster_dfx_regs_num = ARRAY_SIZE(hpre_cluster_dfx_regs);
+ int com_dfx_regs_num = ARRAY_SIZE(hpre_com_dfx_regs);
+ u8 clusters_num = hpre_cluster_num(qm);
+ struct qm_debug *debug = &qm->debug;
+ struct pci_dev *pdev = qm->pdev;
+ void __iomem *io_base;
+ int i, j, idx;
+ u32 val;
+
+ if (qm->fun_type == QM_HW_VF || !debug->last_words)
+ return;
+
+ /* dumps last word of the debugging registers during controller reset */
+ for (i = 0; i < com_dfx_regs_num; i++) {
+ val = readl_relaxed(qm->io_base + hpre_com_dfx_regs[i].offset);
+ if (debug->last_words[i] != val)
+ pci_info(pdev, "Common_core:%s \t= 0x%08x => 0x%08x\n",
+ hpre_com_dfx_regs[i].name, debug->last_words[i], val);
+ }
+
+ for (i = 0; i < clusters_num; i++) {
+ io_base = qm->io_base + hpre_cluster_offsets[i];
+ for (j = 0; j < cluster_dfx_regs_num; j++) {
+ val = readl_relaxed(io_base +
+ hpre_cluster_dfx_regs[j].offset);
+ idx = com_dfx_regs_num + i * cluster_dfx_regs_num + j;
+ if (debug->last_words[idx] != val)
+ pci_info(pdev, "cluster-%d:%s \t= 0x%08x => 0x%08x\n",
+ i, hpre_cluster_dfx_regs[j].name, debug->last_words[idx], val);
+ }
+ }
+}
+
static void hpre_log_hw_error(struct hisi_qm *qm, u32 err_sts)
{
const struct hpre_hw_error *err = hpre_hw_errors;
@@ -1027,6 +1167,7 @@ static const struct hisi_qm_err_ini hpre_err_ini = {
.open_axi_master_ooo = hpre_open_axi_master_ooo,
.open_sva_prefetch = hpre_open_sva_prefetch,
.close_sva_prefetch = hpre_close_sva_prefetch,
+ .show_last_dfx_regs = hpre_show_last_dfx_regs,
.err_info_init = hpre_err_info_init,
};
@@ -1044,8 +1185,11 @@ static int hpre_pf_probe_init(struct hpre *hpre)
qm->err_ini = &hpre_err_ini;
qm->err_ini->err_info_init(qm);
hisi_qm_dev_err_init(qm);
+ ret = hpre_show_last_regs_init(qm);
+ if (ret)
+ pci_err(qm->pdev, "Failed to init last word regs!\n");
- return 0;
+ return ret;
}
static int hpre_probe_init(struct hpre *hpre)
@@ -1131,6 +1275,7 @@ err_with_qm_start:
hisi_qm_stop(qm, QM_NORMAL);
err_with_err_init:
+ hpre_show_last_regs_uninit(qm);
hisi_qm_dev_err_uninit(qm);
err_with_qm_init:
@@ -1161,6 +1306,7 @@ static void hpre_remove(struct pci_dev *pdev)
if (qm->fun_type == QM_HW_PF) {
hpre_cnt_regs_clear(qm);
qm->debug.curr_qm_qp_num = 0;
+ hpre_show_last_regs_uninit(qm);
hisi_qm_dev_err_uninit(qm);
}
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index 009132333d2b..b4ca2eb034d7 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -253,7 +253,15 @@
#define QM_QOS_MAX_CIR_U 6
#define QM_QOS_MAX_CIR_S 11
#define QM_QOS_VAL_MAX_LEN 32
-
+#define QM_DFX_BASE 0x0100000
+#define QM_DFX_STATE1 0x0104000
+#define QM_DFX_STATE2 0x01040C8
+#define QM_DFX_COMMON 0x0000
+#define QM_DFX_BASE_LEN 0x5A
+#define QM_DFX_STATE1_LEN 0x2E
+#define QM_DFX_STATE2_LEN 0x11
+#define QM_DFX_COMMON_LEN 0xC3
+#define QM_DFX_REGS_LEN 4UL
#define QM_AUTOSUSPEND_DELAY 3000
#define QM_MK_CQC_DW3_V1(hop_num, pg_sz, buf_sz, cqe_sz) \
@@ -467,6 +475,23 @@ static const struct hisi_qm_hw_error qm_hw_error[] = {
{ /* sentinel */ }
};
+/* define the QM's dfx regs region and region length */
+static struct dfx_diff_registers qm_diff_regs[] = {
+ {
+ .reg_offset = QM_DFX_BASE,
+ .reg_len = QM_DFX_BASE_LEN,
+ }, {
+ .reg_offset = QM_DFX_STATE1,
+ .reg_len = QM_DFX_STATE1_LEN,
+ }, {
+ .reg_offset = QM_DFX_STATE2,
+ .reg_len = QM_DFX_STATE2_LEN,
+ }, {
+ .reg_offset = QM_DFX_COMMON,
+ .reg_len = QM_DFX_COMMON_LEN,
+ },
+};
+
static const char * const qm_db_timeout[] = {
"sq", "cq", "eq", "aeq",
};
@@ -687,13 +712,13 @@ static void qm_mb_write(struct hisi_qm *qm, const void *src)
if (!IS_ENABLED(CONFIG_ARM64)) {
memcpy_toio(fun_base, src, 16);
- wmb();
+ dma_wmb();
return;
}
asm volatile("ldp %0, %1, %3\n"
"stp %0, %1, %2\n"
- "dsb sy\n"
+ "dmb oshst\n"
: "=&r" (tmp0),
"=&r" (tmp1),
"+Q" (*((char __iomem *)fun_base))
@@ -982,7 +1007,7 @@ static void qm_set_qp_disable(struct hisi_qp *qp, int offset)
*addr = 1;
/* make sure setup is completed */
- mb();
+ smp_wmb();
}
static void qm_disable_qp(struct hisi_qm *qm, u32 qp_id)
@@ -1625,6 +1650,156 @@ static int qm_regs_show(struct seq_file *s, void *unused)
DEFINE_SHOW_ATTRIBUTE(qm_regs);
+static struct dfx_diff_registers *dfx_regs_init(struct hisi_qm *qm,
+ const struct dfx_diff_registers *cregs, int reg_len)
+{
+ struct dfx_diff_registers *diff_regs;
+ u32 j, base_offset;
+ int i;
+
+ diff_regs = kcalloc(reg_len, sizeof(*diff_regs), GFP_KERNEL);
+ if (!diff_regs)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < reg_len; i++) {
+ if (!cregs[i].reg_len)
+ continue;
+
+ diff_regs[i].reg_offset = cregs[i].reg_offset;
+ diff_regs[i].reg_len = cregs[i].reg_len;
+ diff_regs[i].regs = kcalloc(QM_DFX_REGS_LEN, cregs[i].reg_len,
+ GFP_KERNEL);
+ if (!diff_regs[i].regs)
+ goto alloc_error;
+
+ for (j = 0; j < diff_regs[i].reg_len; j++) {
+ base_offset = diff_regs[i].reg_offset +
+ j * QM_DFX_REGS_LEN;
+ diff_regs[i].regs[j] = readl(qm->io_base + base_offset);
+ }
+ }
+
+ return diff_regs;
+
+alloc_error:
+ while (i > 0) {
+ i--;
+ kfree(diff_regs[i].regs);
+ }
+ kfree(diff_regs);
+ return ERR_PTR(-ENOMEM);
+}
+
+static void dfx_regs_uninit(struct hisi_qm *qm,
+ struct dfx_diff_registers *dregs, int reg_len)
+{
+ int i;
+
+ /* Setting the pointer is NULL to prevent double free */
+ for (i = 0; i < reg_len; i++) {
+ kfree(dregs[i].regs);
+ dregs[i].regs = NULL;
+ }
+ kfree(dregs);
+ dregs = NULL;
+}
+
+/**
+ * hisi_qm_diff_regs_init() - Allocate memory for registers.
+ * @qm: device qm handle.
+ * @dregs: diff registers handle.
+ * @reg_len: diff registers region length.
+ */
+int hisi_qm_diff_regs_init(struct hisi_qm *qm,
+ struct dfx_diff_registers *dregs, int reg_len)
+{
+ if (!qm || !dregs || reg_len <= 0)
+ return -EINVAL;
+
+ if (qm->fun_type != QM_HW_PF)
+ return 0;
+
+ qm->debug.qm_diff_regs = dfx_regs_init(qm, qm_diff_regs,
+ ARRAY_SIZE(qm_diff_regs));
+ if (IS_ERR(qm->debug.qm_diff_regs))
+ return PTR_ERR(qm->debug.qm_diff_regs);
+
+ qm->debug.acc_diff_regs = dfx_regs_init(qm, dregs, reg_len);
+ if (IS_ERR(qm->debug.acc_diff_regs)) {
+ dfx_regs_uninit(qm, qm->debug.qm_diff_regs,
+ ARRAY_SIZE(qm_diff_regs));
+ return PTR_ERR(qm->debug.acc_diff_regs);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(hisi_qm_diff_regs_init);
+
+/**
+ * hisi_qm_diff_regs_uninit() - Free memory for registers.
+ * @qm: device qm handle.
+ * @reg_len: diff registers region length.
+ */
+void hisi_qm_diff_regs_uninit(struct hisi_qm *qm, int reg_len)
+{
+ if (!qm || reg_len <= 0 || qm->fun_type != QM_HW_PF)
+ return;
+
+ dfx_regs_uninit(qm, qm->debug.acc_diff_regs, reg_len);
+ dfx_regs_uninit(qm, qm->debug.qm_diff_regs, ARRAY_SIZE(qm_diff_regs));
+}
+EXPORT_SYMBOL_GPL(hisi_qm_diff_regs_uninit);
+
+/**
+ * hisi_qm_acc_diff_regs_dump() - Dump registers's value.
+ * @qm: device qm handle.
+ * @s: Debugfs file handle.
+ * @dregs: diff registers handle.
+ * @regs_len: diff registers region length.
+ */
+void hisi_qm_acc_diff_regs_dump(struct hisi_qm *qm, struct seq_file *s,
+ struct dfx_diff_registers *dregs, int regs_len)
+{
+ u32 j, val, base_offset;
+ int i, ret;
+
+ if (!qm || !s || !dregs || regs_len <= 0)
+ return;
+
+ ret = hisi_qm_get_dfx_access(qm);
+ if (ret)
+ return;
+
+ down_read(&qm->qps_lock);
+ for (i = 0; i < regs_len; i++) {
+ if (!dregs[i].reg_len)
+ continue;
+
+ for (j = 0; j < dregs[i].reg_len; j++) {
+ base_offset = dregs[i].reg_offset + j * QM_DFX_REGS_LEN;
+ val = readl(qm->io_base + base_offset);
+ if (val != dregs[i].regs[j])
+ seq_printf(s, "0x%08x = 0x%08x ---> 0x%08x\n",
+ base_offset, dregs[i].regs[j], val);
+ }
+ }
+ up_read(&qm->qps_lock);
+
+ hisi_qm_put_dfx_access(qm);
+}
+EXPORT_SYMBOL_GPL(hisi_qm_acc_diff_regs_dump);
+
+static int qm_diff_regs_show(struct seq_file *s, void *unused)
+{
+ struct hisi_qm *qm = s->private;
+
+ hisi_qm_acc_diff_regs_dump(qm, s, qm->debug.qm_diff_regs,
+ ARRAY_SIZE(qm_diff_regs));
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(qm_diff_regs);
+
static ssize_t qm_cmd_read(struct file *filp, char __user *buffer,
size_t count, loff_t *pos)
{
@@ -2660,7 +2835,7 @@ static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type)
* return created qp, -EBUSY if all qps in qm allocated, -ENOMEM if allocating
* qp memory fails.
*/
-struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type)
+static struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type)
{
struct hisi_qp *qp;
int ret;
@@ -2678,7 +2853,6 @@ struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type)
return qp;
}
-EXPORT_SYMBOL_GPL(hisi_qm_create_qp);
/**
* hisi_qm_release_qp() - Release a qp back to its qm.
@@ -2686,7 +2860,7 @@ EXPORT_SYMBOL_GPL(hisi_qm_create_qp);
*
* This function releases the resource of a qp.
*/
-void hisi_qm_release_qp(struct hisi_qp *qp)
+static void hisi_qm_release_qp(struct hisi_qp *qp)
{
struct hisi_qm *qm = qp->qm;
@@ -2704,7 +2878,6 @@ void hisi_qm_release_qp(struct hisi_qp *qp)
qm_pm_put_sync(qm);
}
-EXPORT_SYMBOL_GPL(hisi_qm_release_qp);
static int qm_sq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid)
{
@@ -3053,9 +3226,17 @@ static void qm_qp_event_notifier(struct hisi_qp *qp)
wake_up_interruptible(&qp->uacce_q->wait);
}
+ /* This function returns free number of qp in qm. */
static int hisi_qm_get_available_instances(struct uacce_device *uacce)
{
- return hisi_qm_get_free_qp_num(uacce->priv);
+ struct hisi_qm *qm = uacce->priv;
+ int ret;
+
+ down_read(&qm->qps_lock);
+ ret = qm->qp_num - qm->qp_in_used;
+ up_read(&qm->qps_lock);
+
+ return ret;
}
static void hisi_qm_set_hw_reset(struct hisi_qm *qm, int offset)
@@ -3367,24 +3548,6 @@ void hisi_qm_wait_task_finish(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
}
EXPORT_SYMBOL_GPL(hisi_qm_wait_task_finish);
-/**
- * hisi_qm_get_free_qp_num() - Get free number of qp in qm.
- * @qm: The qm which want to get free qp.
- *
- * This function return free number of qp in qm.
- */
-int hisi_qm_get_free_qp_num(struct hisi_qm *qm)
-{
- int ret;
-
- down_read(&qm->qps_lock);
- ret = qm->qp_num - qm->qp_in_used;
- up_read(&qm->qps_lock);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(hisi_qm_get_free_qp_num);
-
static void hisi_qp_memory_uninit(struct hisi_qm *qm, int num)
{
struct device *dev = &qm->pdev->dev;
@@ -3498,6 +3661,17 @@ static void hisi_qm_set_state(struct hisi_qm *qm, u8 state)
writel(state, qm->io_base + QM_VF_STATE);
}
+static void qm_last_regs_uninit(struct hisi_qm *qm)
+{
+ struct qm_debug *debug = &qm->debug;
+
+ if (qm->fun_type == QM_HW_VF || !debug->qm_last_words)
+ return;
+
+ kfree(debug->qm_last_words);
+ debug->qm_last_words = NULL;
+}
+
/**
* hisi_qm_uninit() - Uninitialize qm.
* @qm: The qm needed uninit.
@@ -3509,6 +3683,8 @@ void hisi_qm_uninit(struct hisi_qm *qm)
struct pci_dev *pdev = qm->pdev;
struct device *dev = &pdev->dev;
+ qm_last_regs_uninit(qm);
+
qm_cmd_uninit(qm);
kfree(qm->factor);
down_write(&qm->qps_lock);
@@ -3550,7 +3726,7 @@ EXPORT_SYMBOL_GPL(hisi_qm_uninit);
*
* qm hw v1 does not support this interface.
*/
-int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number)
+static int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number)
{
if (!base || !number)
return -EINVAL;
@@ -3562,7 +3738,6 @@ int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number)
return qm->ops->get_vft(qm, base, number);
}
-EXPORT_SYMBOL_GPL(hisi_qm_get_vft);
/**
* hisi_qm_set_vft() - Set vft to a qm.
@@ -4484,6 +4659,7 @@ static void hisi_qm_set_algqos_init(struct hisi_qm *qm)
*/
void hisi_qm_debug_init(struct hisi_qm *qm)
{
+ struct dfx_diff_registers *qm_regs = qm->debug.qm_diff_regs;
struct qm_dfx *dfx = &qm->debug.dfx;
struct dentry *qm_d;
void *data;
@@ -4499,6 +4675,10 @@ void hisi_qm_debug_init(struct hisi_qm *qm)
qm_create_debugfs_file(qm, qm->debug.qm_d, i);
}
+ if (qm_regs)
+ debugfs_create_file("diff_regs", 0444, qm->debug.qm_d,
+ qm, &qm_diff_regs_fops);
+
debugfs_create_file("regs", 0444, qm->debug.qm_d, qm, &qm_regs_fops);
debugfs_create_file("cmd", 0600, qm->debug.qm_d, qm, &qm_cmd_fops);
@@ -5181,6 +5361,24 @@ static int qm_controller_reset_done(struct hisi_qm *qm)
return 0;
}
+static void qm_show_last_dfx_regs(struct hisi_qm *qm)
+{
+ struct qm_debug *debug = &qm->debug;
+ struct pci_dev *pdev = qm->pdev;
+ u32 val;
+ int i;
+
+ if (qm->fun_type == QM_HW_VF || !debug->qm_last_words)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(qm_dfx_regs); i++) {
+ val = readl_relaxed(qm->io_base + qm_dfx_regs[i].offset);
+ if (debug->qm_last_words[i] != val)
+ pci_info(pdev, "%s \t= 0x%08x => 0x%08x\n",
+ qm_dfx_regs[i].name, debug->qm_last_words[i], val);
+ }
+}
+
static int qm_controller_reset(struct hisi_qm *qm)
{
struct pci_dev *pdev = qm->pdev;
@@ -5196,6 +5394,10 @@ static int qm_controller_reset(struct hisi_qm *qm)
return ret;
}
+ qm_show_last_dfx_regs(qm);
+ if (qm->err_ini->show_last_dfx_regs)
+ qm->err_ini->show_last_dfx_regs(qm);
+
ret = qm_soft_reset(qm);
if (ret) {
pci_err(pdev, "Controller reset failed (%d)\n", ret);
@@ -5906,6 +6108,26 @@ err_alloc_qdma:
return ret;
}
+static void qm_last_regs_init(struct hisi_qm *qm)
+{
+ int dfx_regs_num = ARRAY_SIZE(qm_dfx_regs);
+ struct qm_debug *debug = &qm->debug;
+ int i;
+
+ if (qm->fun_type == QM_HW_VF)
+ return;
+
+ debug->qm_last_words = kcalloc(dfx_regs_num, sizeof(unsigned int),
+ GFP_KERNEL);
+ if (!debug->qm_last_words)
+ return;
+
+ for (i = 0; i < dfx_regs_num; i++) {
+ debug->qm_last_words[i] = readl_relaxed(qm->io_base +
+ qm_dfx_regs[i].offset);
+ }
+}
+
/**
* hisi_qm_init() - Initialize configures about qm.
* @qm: The qm needing init.
@@ -5958,6 +6180,8 @@ int hisi_qm_init(struct hisi_qm *qm)
qm_cmd_init(qm);
atomic_set(&qm->status.flags, QM_INIT);
+ qm_last_regs_init(qm);
+
return 0;
err_alloc_uacce:
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index a91635c348b5..6eebe739893c 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -2113,7 +2113,6 @@ static int sec_skcipher_decrypt(struct skcipher_request *sk_req)
.cra_driver_name = "hisi_sec_"sec_cra_name,\
.cra_priority = SEC_PRIORITY,\
.cra_flags = CRYPTO_ALG_ASYNC |\
- CRYPTO_ALG_ALLOCATES_MEMORY |\
CRYPTO_ALG_NEED_FALLBACK,\
.cra_blocksize = blk_size,\
.cra_ctxsize = sizeof(struct sec_ctx),\
@@ -2366,7 +2365,6 @@ static int sec_aead_decrypt(struct aead_request *a_req)
.cra_driver_name = "hisi_sec_"sec_cra_name,\
.cra_priority = SEC_PRIORITY,\
.cra_flags = CRYPTO_ALG_ASYNC |\
- CRYPTO_ALG_ALLOCATES_MEMORY |\
CRYPTO_ALG_NEED_FALLBACK,\
.cra_blocksize = blk_size,\
.cra_ctxsize = sizeof(struct sec_ctx),\
diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
index 92fae706bdb2..4d85d2cbf376 100644
--- a/drivers/crypto/hisilicon/sec2/sec_main.c
+++ b/drivers/crypto/hisilicon/sec2/sec_main.c
@@ -110,6 +110,15 @@
#define SEC_SQE_MASK_LEN 48
#define SEC_SHAPER_TYPE_RATE 400
+#define SEC_DFX_BASE 0x301000
+#define SEC_DFX_CORE 0x302100
+#define SEC_DFX_COMMON1 0x301600
+#define SEC_DFX_COMMON2 0x301C00
+#define SEC_DFX_BASE_LEN 0x9D
+#define SEC_DFX_CORE_LEN 0x32B
+#define SEC_DFX_COMMON1_LEN 0x45
+#define SEC_DFX_COMMON2_LEN 0xBA
+
struct sec_hw_error {
u32 int_msk;
const char *msg;
@@ -226,6 +235,34 @@ static const struct debugfs_reg32 sec_dfx_regs[] = {
{"SEC_BD_SAA8 ", 0x301C40},
};
+/* define the SEC's dfx regs region and region length */
+static struct dfx_diff_registers sec_diff_regs[] = {
+ {
+ .reg_offset = SEC_DFX_BASE,
+ .reg_len = SEC_DFX_BASE_LEN,
+ }, {
+ .reg_offset = SEC_DFX_COMMON1,
+ .reg_len = SEC_DFX_COMMON1_LEN,
+ }, {
+ .reg_offset = SEC_DFX_COMMON2,
+ .reg_len = SEC_DFX_COMMON2_LEN,
+ }, {
+ .reg_offset = SEC_DFX_CORE,
+ .reg_len = SEC_DFX_CORE_LEN,
+ },
+};
+
+static int sec_diff_regs_show(struct seq_file *s, void *unused)
+{
+ struct hisi_qm *qm = s->private;
+
+ hisi_qm_acc_diff_regs_dump(qm, s, qm->debug.acc_diff_regs,
+ ARRAY_SIZE(sec_diff_regs));
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(sec_diff_regs);
+
static int sec_pf_q_num_set(const char *val, const struct kernel_param *kp)
{
return q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_SEC_PF);
@@ -729,6 +766,7 @@ DEFINE_SHOW_ATTRIBUTE(sec_regs);
static int sec_core_debug_init(struct hisi_qm *qm)
{
+ struct dfx_diff_registers *sec_regs = qm->debug.acc_diff_regs;
struct sec_dev *sec = container_of(qm, struct sec_dev, qm);
struct device *dev = &qm->pdev->dev;
struct sec_dfx *dfx = &sec->debug.dfx;
@@ -749,6 +787,9 @@ static int sec_core_debug_init(struct hisi_qm *qm)
if (qm->pdev->device == PCI_DEVICE_ID_HUAWEI_SEC_PF)
debugfs_create_file("regs", 0444, tmp_d, regset, &sec_regs_fops);
+ if (qm->fun_type == QM_HW_PF && sec_regs)
+ debugfs_create_file("diff_regs", 0444, tmp_d,
+ qm, &sec_diff_regs_fops);
for (i = 0; i < ARRAY_SIZE(sec_dfx_labels); i++) {
atomic64_t *data = (atomic64_t *)((uintptr_t)dfx +
@@ -790,6 +831,14 @@ static int sec_debugfs_init(struct hisi_qm *qm)
sec_debugfs_root);
qm->debug.sqe_mask_offset = SEC_SQE_MASK_OFFSET;
qm->debug.sqe_mask_len = SEC_SQE_MASK_LEN;
+
+ ret = hisi_qm_diff_regs_init(qm, sec_diff_regs,
+ ARRAY_SIZE(sec_diff_regs));
+ if (ret) {
+ dev_warn(dev, "Failed to init SEC diff regs!\n");
+ goto debugfs_remove;
+ }
+
hisi_qm_debug_init(qm);
ret = sec_debug_init(qm);
@@ -799,15 +848,66 @@ static int sec_debugfs_init(struct hisi_qm *qm)
return 0;
failed_to_create:
+ hisi_qm_diff_regs_uninit(qm, ARRAY_SIZE(sec_diff_regs));
+debugfs_remove:
debugfs_remove_recursive(sec_debugfs_root);
return ret;
}
static void sec_debugfs_exit(struct hisi_qm *qm)
{
+ hisi_qm_diff_regs_uninit(qm, ARRAY_SIZE(sec_diff_regs));
+
debugfs_remove_recursive(qm->debug.debug_root);
}
+static int sec_show_last_regs_init(struct hisi_qm *qm)
+{
+ struct qm_debug *debug = &qm->debug;
+ int i;
+
+ debug->last_words = kcalloc(ARRAY_SIZE(sec_dfx_regs),
+ sizeof(unsigned int), GFP_KERNEL);
+ if (!debug->last_words)
+ return -ENOMEM;
+
+ for (i = 0; i < ARRAY_SIZE(sec_dfx_regs); i++)
+ debug->last_words[i] = readl_relaxed(qm->io_base +
+ sec_dfx_regs[i].offset);
+
+ return 0;
+}
+
+static void sec_show_last_regs_uninit(struct hisi_qm *qm)
+{
+ struct qm_debug *debug = &qm->debug;
+
+ if (qm->fun_type == QM_HW_VF || !debug->last_words)
+ return;
+
+ kfree(debug->last_words);
+ debug->last_words = NULL;
+}
+
+static void sec_show_last_dfx_regs(struct hisi_qm *qm)
+{
+ struct qm_debug *debug = &qm->debug;
+ struct pci_dev *pdev = qm->pdev;
+ u32 val;
+ int i;
+
+ if (qm->fun_type == QM_HW_VF || !debug->last_words)
+ return;
+
+ /* dumps last word of the debugging registers during controller reset */
+ for (i = 0; i < ARRAY_SIZE(sec_dfx_regs); i++) {
+ val = readl_relaxed(qm->io_base + sec_dfx_regs[i].offset);
+ if (val != debug->last_words[i])
+ pci_info(pdev, "%s \t= 0x%08x => 0x%08x\n",
+ sec_dfx_regs[i].name, debug->last_words[i], val);
+ }
+}
+
static void sec_log_hw_error(struct hisi_qm *qm, u32 err_sts)
{
const struct sec_hw_error *errs = sec_hw_errors;
@@ -874,6 +974,7 @@ static const struct hisi_qm_err_ini sec_err_ini = {
.open_axi_master_ooo = sec_open_axi_master_ooo,
.open_sva_prefetch = sec_open_sva_prefetch,
.close_sva_prefetch = sec_close_sva_prefetch,
+ .show_last_dfx_regs = sec_show_last_dfx_regs,
.err_info_init = sec_err_info_init,
};
@@ -892,8 +993,11 @@ static int sec_pf_probe_init(struct sec_dev *sec)
sec_open_sva_prefetch(qm);
hisi_qm_dev_err_init(qm);
sec_debug_regs_clear(qm);
+ ret = sec_show_last_regs_init(qm);
+ if (ret)
+ pci_err(qm->pdev, "Failed to init last word regs!\n");
- return 0;
+ return ret;
}
static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
@@ -1067,6 +1171,7 @@ err_qm_stop:
sec_debugfs_exit(qm);
hisi_qm_stop(qm, QM_NORMAL);
err_probe_uninit:
+ sec_show_last_regs_uninit(qm);
sec_probe_uninit(qm);
err_qm_uninit:
sec_qm_uninit(qm);
@@ -1091,6 +1196,7 @@ static void sec_remove(struct pci_dev *pdev)
if (qm->fun_type == QM_HW_PF)
sec_debug_regs_clear(qm);
+ sec_show_last_regs_uninit(qm);
sec_probe_uninit(qm);
diff --git a/drivers/crypto/hisilicon/sgl.c b/drivers/crypto/hisilicon/sgl.c
index f7efc02b065f..2b6f2281cfd6 100644
--- a/drivers/crypto/hisilicon/sgl.c
+++ b/drivers/crypto/hisilicon/sgl.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 HiSilicon Limited. */
+#include <linux/align.h>
#include <linux/dma-mapping.h>
#include <linux/hisi_acc_qm.h>
#include <linux/module.h>
@@ -64,8 +65,9 @@ struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev,
if (!dev || !count || !sge_nr || sge_nr > HISI_ACC_SGL_SGE_NR_MAX)
return ERR_PTR(-EINVAL);
- sgl_size = sizeof(struct acc_hw_sge) * sge_nr +
- sizeof(struct hisi_acc_hw_sgl);
+ sgl_size = ALIGN(sizeof(struct acc_hw_sge) * sge_nr +
+ sizeof(struct hisi_acc_hw_sgl),
+ HISI_ACC_SGL_ALIGN_SIZE);
/*
* the pool may allocate a block of memory of size PAGE_SIZE * 2^(MAX_ORDER - 1),
diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c
index 9520a4113c81..67869513e48c 100644
--- a/drivers/crypto/hisilicon/zip/zip_crypto.c
+++ b/drivers/crypto/hisilicon/zip/zip_crypto.c
@@ -521,7 +521,7 @@ static int hisi_zip_start_qp(struct hisi_qp *qp, struct hisi_zip_qp_ctx *ctx,
static void hisi_zip_release_qp(struct hisi_zip_qp_ctx *ctx)
{
hisi_qm_stop_qp(ctx->qp);
- hisi_qm_release_qp(ctx->qp);
+ hisi_qm_free_qps(&ctx->qp, 1);
}
static const struct hisi_zip_sqe_ops hisi_zip_ops_v1 = {
diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
index 4534e1e107d1..9c925e9c0a2d 100644
--- a/drivers/crypto/hisilicon/zip/zip_main.c
+++ b/drivers/crypto/hisilicon/zip/zip_main.c
@@ -49,14 +49,18 @@
#define HZIP_QM_IDEL_STATUS 0x3040e4
-#define HZIP_CORE_DEBUG_COMP_0 0x302000
-#define HZIP_CORE_DEBUG_COMP_1 0x303000
-#define HZIP_CORE_DEBUG_DECOMP_0 0x304000
-#define HZIP_CORE_DEBUG_DECOMP_1 0x305000
-#define HZIP_CORE_DEBUG_DECOMP_2 0x306000
-#define HZIP_CORE_DEBUG_DECOMP_3 0x307000
-#define HZIP_CORE_DEBUG_DECOMP_4 0x308000
-#define HZIP_CORE_DEBUG_DECOMP_5 0x309000
+#define HZIP_CORE_DFX_BASE 0x301000
+#define HZIP_CLOCK_GATED_CONTL 0X301004
+#define HZIP_CORE_DFX_COMP_0 0x302000
+#define HZIP_CORE_DFX_COMP_1 0x303000
+#define HZIP_CORE_DFX_DECOMP_0 0x304000
+#define HZIP_CORE_DFX_DECOMP_1 0x305000
+#define HZIP_CORE_DFX_DECOMP_2 0x306000
+#define HZIP_CORE_DFX_DECOMP_3 0x307000
+#define HZIP_CORE_DFX_DECOMP_4 0x308000
+#define HZIP_CORE_DFX_DECOMP_5 0x309000
+#define HZIP_CORE_REGS_BASE_LEN 0xB0
+#define HZIP_CORE_REGS_DFX_LEN 0x28
#define HZIP_CORE_INT_SOURCE 0x3010A0
#define HZIP_CORE_INT_MASK_REG 0x3010A4
@@ -230,6 +234,64 @@ static const struct debugfs_reg32 hzip_dfx_regs[] = {
{"HZIP_DECOMP_LZ77_CURR_ST ", 0x9cull},
};
+static const struct debugfs_reg32 hzip_com_dfx_regs[] = {
+ {"HZIP_CLOCK_GATE_CTRL ", 0x301004},
+ {"HZIP_CORE_INT_RAS_CE_ENB ", 0x301160},
+ {"HZIP_CORE_INT_RAS_NFE_ENB ", 0x301164},
+ {"HZIP_CORE_INT_RAS_FE_ENB ", 0x301168},
+ {"HZIP_UNCOM_ERR_RAS_CTRL ", 0x30116C},
+};
+
+static const struct debugfs_reg32 hzip_dump_dfx_regs[] = {
+ {"HZIP_GET_BD_NUM ", 0x00ull},
+ {"HZIP_GET_RIGHT_BD ", 0x04ull},
+ {"HZIP_GET_ERROR_BD ", 0x08ull},
+ {"HZIP_DONE_BD_NUM ", 0x0cull},
+ {"HZIP_MAX_DELAY ", 0x20ull},
+};
+
+/* define the ZIP's dfx regs region and region length */
+static struct dfx_diff_registers hzip_diff_regs[] = {
+ {
+ .reg_offset = HZIP_CORE_DFX_BASE,
+ .reg_len = HZIP_CORE_REGS_BASE_LEN,
+ }, {
+ .reg_offset = HZIP_CORE_DFX_COMP_0,
+ .reg_len = HZIP_CORE_REGS_DFX_LEN,
+ }, {
+ .reg_offset = HZIP_CORE_DFX_COMP_1,
+ .reg_len = HZIP_CORE_REGS_DFX_LEN,
+ }, {
+ .reg_offset = HZIP_CORE_DFX_DECOMP_0,
+ .reg_len = HZIP_CORE_REGS_DFX_LEN,
+ }, {
+ .reg_offset = HZIP_CORE_DFX_DECOMP_1,
+ .reg_len = HZIP_CORE_REGS_DFX_LEN,
+ }, {
+ .reg_offset = HZIP_CORE_DFX_DECOMP_2,
+ .reg_len = HZIP_CORE_REGS_DFX_LEN,
+ }, {
+ .reg_offset = HZIP_CORE_DFX_DECOMP_3,
+ .reg_len = HZIP_CORE_REGS_DFX_LEN,
+ }, {
+ .reg_offset = HZIP_CORE_DFX_DECOMP_4,
+ .reg_len = HZIP_CORE_REGS_DFX_LEN,
+ }, {
+ .reg_offset = HZIP_CORE_DFX_DECOMP_5,
+ .reg_len = HZIP_CORE_REGS_DFX_LEN,
+ },
+};
+
+static int hzip_diff_regs_show(struct seq_file *s, void *unused)
+{
+ struct hisi_qm *qm = s->private;
+
+ hisi_qm_acc_diff_regs_dump(qm, s, qm->debug.acc_diff_regs,
+ ARRAY_SIZE(hzip_diff_regs));
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(hzip_diff_regs);
static const struct kernel_param_ops zip_uacce_mode_ops = {
.set = uacce_mode_set,
.get = param_get_int,
@@ -621,6 +683,7 @@ static int hisi_zip_core_debug_init(struct hisi_qm *qm)
static void hisi_zip_dfx_debug_init(struct hisi_qm *qm)
{
+ struct dfx_diff_registers *hzip_regs = qm->debug.acc_diff_regs;
struct hisi_zip *zip = container_of(qm, struct hisi_zip, qm);
struct hisi_zip_dfx *dfx = &zip->dfx;
struct dentry *tmp_dir;
@@ -634,6 +697,10 @@ static void hisi_zip_dfx_debug_init(struct hisi_qm *qm)
0644, tmp_dir, data,
&zip_atomic64_ops);
}
+
+ if (qm->fun_type == QM_HW_PF && hzip_regs)
+ debugfs_create_file("diff_regs", 0444, tmp_dir,
+ qm, &hzip_diff_regs_fops);
}
static int hisi_zip_ctrl_debug_init(struct hisi_qm *qm)
@@ -666,6 +733,13 @@ static int hisi_zip_debugfs_init(struct hisi_qm *qm)
qm->debug.sqe_mask_offset = HZIP_SQE_MASK_OFFSET;
qm->debug.sqe_mask_len = HZIP_SQE_MASK_LEN;
qm->debug.debug_root = dev_d;
+ ret = hisi_qm_diff_regs_init(qm, hzip_diff_regs,
+ ARRAY_SIZE(hzip_diff_regs));
+ if (ret) {
+ dev_warn(dev, "Failed to init ZIP diff regs!\n");
+ goto debugfs_remove;
+ }
+
hisi_qm_debug_init(qm);
if (qm->fun_type == QM_HW_PF) {
@@ -679,6 +753,8 @@ static int hisi_zip_debugfs_init(struct hisi_qm *qm)
return 0;
failed_to_create:
+ hisi_qm_diff_regs_uninit(qm, ARRAY_SIZE(hzip_diff_regs));
+debugfs_remove:
debugfs_remove_recursive(hzip_debugfs_root);
return ret;
}
@@ -703,6 +779,8 @@ static void hisi_zip_debug_regs_clear(struct hisi_qm *qm)
static void hisi_zip_debugfs_exit(struct hisi_qm *qm)
{
+ hisi_qm_diff_regs_uninit(qm, ARRAY_SIZE(hzip_diff_regs));
+
debugfs_remove_recursive(qm->debug.debug_root);
if (qm->fun_type == QM_HW_PF) {
@@ -711,6 +789,87 @@ static void hisi_zip_debugfs_exit(struct hisi_qm *qm)
}
}
+static int hisi_zip_show_last_regs_init(struct hisi_qm *qm)
+{
+ int core_dfx_regs_num = ARRAY_SIZE(hzip_dump_dfx_regs);
+ int com_dfx_regs_num = ARRAY_SIZE(hzip_com_dfx_regs);
+ struct qm_debug *debug = &qm->debug;
+ void __iomem *io_base;
+ int i, j, idx;
+
+ debug->last_words = kcalloc(core_dfx_regs_num * HZIP_CORE_NUM +
+ com_dfx_regs_num, sizeof(unsigned int), GFP_KERNEL);
+ if (!debug->last_words)
+ return -ENOMEM;
+
+ for (i = 0; i < com_dfx_regs_num; i++) {
+ io_base = qm->io_base + hzip_com_dfx_regs[i].offset;
+ debug->last_words[i] = readl_relaxed(io_base);
+ }
+
+ for (i = 0; i < HZIP_CORE_NUM; i++) {
+ io_base = qm->io_base + core_offsets[i];
+ for (j = 0; j < core_dfx_regs_num; j++) {
+ idx = com_dfx_regs_num + i * core_dfx_regs_num + j;
+ debug->last_words[idx] = readl_relaxed(
+ io_base + hzip_dump_dfx_regs[j].offset);
+ }
+ }
+
+ return 0;
+}
+
+static void hisi_zip_show_last_regs_uninit(struct hisi_qm *qm)
+{
+ struct qm_debug *debug = &qm->debug;
+
+ if (qm->fun_type == QM_HW_VF || !debug->last_words)
+ return;
+
+ kfree(debug->last_words);
+ debug->last_words = NULL;
+}
+
+static void hisi_zip_show_last_dfx_regs(struct hisi_qm *qm)
+{
+ int core_dfx_regs_num = ARRAY_SIZE(hzip_dump_dfx_regs);
+ int com_dfx_regs_num = ARRAY_SIZE(hzip_com_dfx_regs);
+ struct qm_debug *debug = &qm->debug;
+ char buf[HZIP_BUF_SIZE];
+ void __iomem *base;
+ int i, j, idx;
+ u32 val;
+
+ if (qm->fun_type == QM_HW_VF || !debug->last_words)
+ return;
+
+ for (i = 0; i < com_dfx_regs_num; i++) {
+ val = readl_relaxed(qm->io_base + hzip_com_dfx_regs[i].offset);
+ if (debug->last_words[i] != val)
+ pci_info(qm->pdev, "com_dfx: %s \t= 0x%08x => 0x%08x\n",
+ hzip_com_dfx_regs[i].name, debug->last_words[i], val);
+ }
+
+ for (i = 0; i < HZIP_CORE_NUM; i++) {
+ if (i < HZIP_COMP_CORE_NUM)
+ scnprintf(buf, sizeof(buf), "Comp_core-%d", i);
+ else
+ scnprintf(buf, sizeof(buf), "Decomp_core-%d",
+ i - HZIP_COMP_CORE_NUM);
+ base = qm->io_base + core_offsets[i];
+
+ pci_info(qm->pdev, "==>%s:\n", buf);
+ /* dump last word for dfx regs during control resetting */
+ for (j = 0; j < core_dfx_regs_num; j++) {
+ idx = com_dfx_regs_num + i * core_dfx_regs_num + j;
+ val = readl_relaxed(base + hzip_dump_dfx_regs[j].offset);
+ if (debug->last_words[idx] != val)
+ pci_info(qm->pdev, "%s \t= 0x%08x => 0x%08x\n",
+ hzip_dump_dfx_regs[j].name, debug->last_words[idx], val);
+ }
+ }
+}
+
static void hisi_zip_log_hw_error(struct hisi_qm *qm, u32 err_sts)
{
const struct hisi_zip_hw_error *err = zip_hw_error;
@@ -798,6 +957,7 @@ static const struct hisi_qm_err_ini hisi_zip_err_ini = {
.close_axi_master_ooo = hisi_zip_close_axi_master_ooo,
.open_sva_prefetch = hisi_zip_open_sva_prefetch,
.close_sva_prefetch = hisi_zip_close_sva_prefetch,
+ .show_last_dfx_regs = hisi_zip_show_last_dfx_regs,
.err_info_init = hisi_zip_err_info_init,
};
@@ -805,6 +965,7 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
{
struct hisi_qm *qm = &hisi_zip->qm;
struct hisi_zip_ctrl *ctrl;
+ int ret;
ctrl = devm_kzalloc(&qm->pdev->dev, sizeof(*ctrl), GFP_KERNEL);
if (!ctrl)
@@ -820,7 +981,11 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
hisi_qm_dev_err_init(qm);
hisi_zip_debug_regs_clear(qm);
- return 0;
+ ret = hisi_zip_show_last_regs_init(qm);
+ if (ret)
+ pci_err(qm->pdev, "Failed to init last word regs!\n");
+
+ return ret;
}
static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
@@ -964,6 +1129,7 @@ err_qm_stop:
hisi_qm_stop(qm, QM_NORMAL);
err_dev_err_uninit:
+ hisi_zip_show_last_regs_uninit(qm);
hisi_qm_dev_err_uninit(qm);
err_qm_uninit:
@@ -985,6 +1151,7 @@ static void hisi_zip_remove(struct pci_dev *pdev)
hisi_zip_debugfs_exit(qm);
hisi_qm_stop(qm, QM_NORMAL);
+ hisi_zip_show_last_regs_uninit(qm);
hisi_qm_dev_err_uninit(qm);
hisi_zip_qm_uninit(qm);
}
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index 9ff885d50edf..9b1a158aec29 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -1997,3 +1997,12 @@ MODULE_AUTHOR("Igal Liberman <igall@marvell.com>");
MODULE_DESCRIPTION("Support for SafeXcel cryptographic engines: EIP97 & EIP197");
MODULE_LICENSE("GPL v2");
MODULE_IMPORT_NS(CRYPTO_INTERNAL);
+
+MODULE_FIRMWARE("ifpp.bin");
+MODULE_FIRMWARE("ipue.bin");
+MODULE_FIRMWARE("inside-secure/eip197b/ifpp.bin");
+MODULE_FIRMWARE("inside-secure/eip197b/ipue.bin");
+MODULE_FIRMWARE("inside-secure/eip197d/ifpp.bin");
+MODULE_FIRMWARE("inside-secure/eip197d/ipue.bin");
+MODULE_FIRMWARE("inside-secure/eip197_minifw/ifpp.bin");
+MODULE_FIRMWARE("inside-secure/eip197_minifw/ipue.bin");
diff --git a/drivers/crypto/keembay/keembay-ocs-aes-core.c b/drivers/crypto/keembay/keembay-ocs-aes-core.c
index e2a39fdaf623..9953f5590ac4 100644
--- a/drivers/crypto/keembay/keembay-ocs-aes-core.c
+++ b/drivers/crypto/keembay/keembay-ocs-aes-core.c
@@ -1598,7 +1598,6 @@ static int kmb_ocs_aes_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct ocs_aes_dev *aes_dev;
- struct resource *aes_mem;
int rc;
aes_dev = devm_kzalloc(dev, sizeof(*aes_dev), GFP_KERNEL);
@@ -1616,13 +1615,7 @@ static int kmb_ocs_aes_probe(struct platform_device *pdev)
}
/* Get base register address. */
- aes_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!aes_mem) {
- dev_err(dev, "Could not retrieve io mem resource\n");
- return -ENODEV;
- }
-
- aes_dev->base_reg = devm_ioremap_resource(&pdev->dev, aes_mem);
+ aes_dev->base_reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(aes_dev->base_reg))
return PTR_ERR(aes_dev->base_reg);
diff --git a/drivers/crypto/marvell/cesa/cipher.c b/drivers/crypto/marvell/cesa/cipher.c
index b739d3b873dc..c6f2fa753b7c 100644
--- a/drivers/crypto/marvell/cesa/cipher.c
+++ b/drivers/crypto/marvell/cesa/cipher.c
@@ -624,7 +624,6 @@ struct skcipher_alg mv_cesa_ecb_des3_ede_alg = {
.decrypt = mv_cesa_ecb_des3_ede_decrypt,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
- .ivsize = DES3_EDE_BLOCK_SIZE,
.base = {
.cra_name = "ecb(des3_ede)",
.cra_driver_name = "mv-ecb-des3-ede",
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c
index f8f8542ce3e4..67530e90bbfe 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c
@@ -896,7 +896,6 @@ static int otx2_cpt_aead_cbc_aes_sha_setkey(struct crypto_aead *cipher,
struct crypto_authenc_key_param *param;
int enckeylen = 0, authkeylen = 0;
struct rtattr *rta = (void *)key;
- int status;
if (!RTA_OK(rta, keylen))
return -EINVAL;
@@ -938,11 +937,7 @@ static int otx2_cpt_aead_cbc_aes_sha_setkey(struct crypto_aead *cipher,
ctx->enc_key_len = enckeylen;
ctx->auth_key_len = authkeylen;
- status = aead_hmac_init(cipher);
- if (status)
- return status;
-
- return 0;
+ return aead_hmac_init(cipher);
}
static int otx2_cpt_aead_ecb_null_sha_setkey(struct crypto_aead *cipher,
diff --git a/drivers/crypto/nx/nx-common-powernv.c b/drivers/crypto/nx/nx-common-powernv.c
index 32a036ada5d0..f418817c0f43 100644
--- a/drivers/crypto/nx/nx-common-powernv.c
+++ b/drivers/crypto/nx/nx-common-powernv.c
@@ -827,7 +827,7 @@ static int __init vas_cfg_coproc_info(struct device_node *dn, int chip_id,
goto err_out;
vas_init_rx_win_attr(&rxattr, coproc->ct);
- rxattr.rx_fifo = (void *)rx_fifo;
+ rxattr.rx_fifo = rx_fifo;
rxattr.rx_fifo_size = fifo_size;
rxattr.lnotify_lpid = lpid;
rxattr.lnotify_pid = pid;
diff --git a/drivers/crypto/qat/qat_4xxx/adf_drv.c b/drivers/crypto/qat/qat_4xxx/adf_drv.c
index fa4c350c1bf9..181fa1c8b3c7 100644
--- a/drivers/crypto/qat/qat_4xxx/adf_drv.c
+++ b/drivers/crypto/qat/qat_4xxx/adf_drv.c
@@ -14,6 +14,7 @@
static const struct pci_device_id adf_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, ADF_4XXX_PCI_DEVICE_ID), },
+ { PCI_VDEVICE(INTEL, ADF_401XX_PCI_DEVICE_ID), },
{ }
};
MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
@@ -75,13 +76,6 @@ static int adf_crypto_dev_config(struct adf_accel_dev *accel_dev)
if (ret)
goto err;
- /* Temporarily set the number of crypto instances to zero to avoid
- * registering the crypto algorithms.
- * This will be removed when the algorithms will support the
- * CRYPTO_TFM_REQ_MAY_BACKLOG flag
- */
- instances = 0;
-
for (i = 0; i < instances; i++) {
val = i;
bank = i * 2;
diff --git a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c
index b941fe3713ff..50d5afa26a9b 100644
--- a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c
+++ b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c
@@ -78,19 +78,6 @@ static const u32 *adf_get_arbiter_mapping(void)
return thrd_to_arb_map;
}
-static void adf_enable_ints(struct adf_accel_dev *accel_dev)
-{
- void __iomem *addr;
-
- addr = (&GET_BARS(accel_dev)[ADF_C3XXX_PMISC_BAR])->virt_addr;
-
- /* Enable bundle and misc interrupts */
- ADF_CSR_WR(addr, ADF_C3XXX_SMIAPF0_MASK_OFFSET,
- ADF_C3XXX_SMIA0_MASK);
- ADF_CSR_WR(addr, ADF_C3XXX_SMIAPF1_MASK_OFFSET,
- ADF_C3XXX_SMIA1_MASK);
-}
-
static void configure_iov_threads(struct adf_accel_dev *accel_dev, bool enable)
{
adf_gen2_cfg_iov_thds(accel_dev, enable,
@@ -133,7 +120,7 @@ void adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data)
hw_data->init_arb = adf_init_arb;
hw_data->exit_arb = adf_exit_arb;
hw_data->get_arb_mapping = adf_get_arbiter_mapping;
- hw_data->enable_ints = adf_enable_ints;
+ hw_data->enable_ints = adf_gen2_enable_ints;
hw_data->reset_device = adf_reset_flr;
hw_data->set_ssm_wdtimer = adf_gen2_set_ssm_wdtimer;
hw_data->disable_iov = adf_disable_sriov;
diff --git a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h
index 1b86f828725c..336a06f11dbd 100644
--- a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h
+++ b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h
@@ -13,10 +13,6 @@
#define ADF_C3XXX_ACCELERATORS_MASK 0x7
#define ADF_C3XXX_ACCELENGINES_MASK 0x3F
#define ADF_C3XXX_ETR_MAX_BANKS 16
-#define ADF_C3XXX_SMIAPF0_MASK_OFFSET (0x3A000 + 0x28)
-#define ADF_C3XXX_SMIAPF1_MASK_OFFSET (0x3A000 + 0x30)
-#define ADF_C3XXX_SMIA0_MASK 0xFFFF
-#define ADF_C3XXX_SMIA1_MASK 0x1
#define ADF_C3XXX_SOFTSTRAP_CSR_OFFSET 0x2EC
/* AE to function mapping */
diff --git a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c
index b1eac2f81faa..c00386fe6587 100644
--- a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c
+++ b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c
@@ -80,19 +80,6 @@ static const u32 *adf_get_arbiter_mapping(void)
return thrd_to_arb_map;
}
-static void adf_enable_ints(struct adf_accel_dev *accel_dev)
-{
- void __iomem *addr;
-
- addr = (&GET_BARS(accel_dev)[ADF_C62X_PMISC_BAR])->virt_addr;
-
- /* Enable bundle and misc interrupts */
- ADF_CSR_WR(addr, ADF_C62X_SMIAPF0_MASK_OFFSET,
- ADF_C62X_SMIA0_MASK);
- ADF_CSR_WR(addr, ADF_C62X_SMIAPF1_MASK_OFFSET,
- ADF_C62X_SMIA1_MASK);
-}
-
static void configure_iov_threads(struct adf_accel_dev *accel_dev, bool enable)
{
adf_gen2_cfg_iov_thds(accel_dev, enable,
@@ -135,7 +122,7 @@ void adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data)
hw_data->init_arb = adf_init_arb;
hw_data->exit_arb = adf_exit_arb;
hw_data->get_arb_mapping = adf_get_arbiter_mapping;
- hw_data->enable_ints = adf_enable_ints;
+ hw_data->enable_ints = adf_gen2_enable_ints;
hw_data->reset_device = adf_reset_flr;
hw_data->set_ssm_wdtimer = adf_gen2_set_ssm_wdtimer;
hw_data->disable_iov = adf_disable_sriov;
diff --git a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h
index 68c3436bd3aa..008c0a3a9769 100644
--- a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h
+++ b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h
@@ -13,10 +13,6 @@
#define ADF_C62X_ACCELERATORS_MASK 0x1F
#define ADF_C62X_ACCELENGINES_MASK 0x3FF
#define ADF_C62X_ETR_MAX_BANKS 16
-#define ADF_C62X_SMIAPF0_MASK_OFFSET (0x3A000 + 0x28)
-#define ADF_C62X_SMIAPF1_MASK_OFFSET (0x3A000 + 0x30)
-#define ADF_C62X_SMIA0_MASK 0xFFFF
-#define ADF_C62X_SMIA1_MASK 0x1
#define ADF_C62X_SOFTSTRAP_CSR_OFFSET 0x2EC
/* AE to function mapping */
diff --git a/drivers/crypto/qat/qat_common/Makefile b/drivers/crypto/qat/qat_common/Makefile
index f25a6c8edfc7..04f058acc4d3 100644
--- a/drivers/crypto/qat/qat_common/Makefile
+++ b/drivers/crypto/qat/qat_common/Makefile
@@ -16,6 +16,7 @@ intel_qat-objs := adf_cfg.o \
qat_crypto.o \
qat_algs.o \
qat_asym_algs.o \
+ qat_algs_send.o \
qat_uclo.o \
qat_hal.o
diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h
index a03c6cf72331..ede6458c9dbf 100644
--- a/drivers/crypto/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h
@@ -19,6 +19,8 @@
#define ADF_4XXX_DEVICE_NAME "4xxx"
#define ADF_4XXX_PCI_DEVICE_ID 0x4940
#define ADF_4XXXIOV_PCI_DEVICE_ID 0x4941
+#define ADF_401XX_PCI_DEVICE_ID 0x4942
+#define ADF_401XXIOV_PCI_DEVICE_ID 0x4943
#define ADF_DEVICE_FUSECTL_OFFSET 0x40
#define ADF_DEVICE_LEGFUSE_OFFSET 0x4C
#define ADF_DEVICE_FUSECTL_MASK 0x80000000
@@ -152,9 +154,9 @@ struct adf_pfvf_ops {
int (*enable_comms)(struct adf_accel_dev *accel_dev);
u32 (*get_pf2vf_offset)(u32 i);
u32 (*get_vf2pf_offset)(u32 i);
- u32 (*get_vf2pf_sources)(void __iomem *pmisc_addr);
void (*enable_vf2pf_interrupts)(void __iomem *pmisc_addr, u32 vf_mask);
- void (*disable_vf2pf_interrupts)(void __iomem *pmisc_addr, u32 vf_mask);
+ void (*disable_all_vf2pf_interrupts)(void __iomem *pmisc_addr);
+ u32 (*disable_pending_vf2pf_interrupts)(void __iomem *pmisc_addr);
int (*send_msg)(struct adf_accel_dev *accel_dev, struct pfvf_message msg,
u32 pfvf_offset, struct mutex *csr_lock);
struct pfvf_message (*recv_msg)(struct adf_accel_dev *accel_dev,
diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h
index e8c9b77c0d66..0464fa257929 100644
--- a/drivers/crypto/qat/qat_common/adf_common_drv.h
+++ b/drivers/crypto/qat/qat_common/adf_common_drv.h
@@ -195,10 +195,8 @@ bool adf_misc_wq_queue_work(struct work_struct *work);
#if defined(CONFIG_PCI_IOV)
int adf_sriov_configure(struct pci_dev *pdev, int numvfs);
void adf_disable_sriov(struct adf_accel_dev *accel_dev);
-void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
- u32 vf_mask);
-void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
- u32 vf_mask);
+void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask);
+void adf_disable_all_vf2pf_interrupts(struct adf_accel_dev *accel_dev);
bool adf_recv_and_handle_pf2vf_msg(struct adf_accel_dev *accel_dev);
bool adf_recv_and_handle_vf2pf_msg(struct adf_accel_dev *accel_dev, u32 vf_nr);
int adf_pf2vf_handle_pf_restarting(struct adf_accel_dev *accel_dev);
@@ -217,14 +215,6 @@ static inline void adf_disable_sriov(struct adf_accel_dev *accel_dev)
{
}
-static inline void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
-{
-}
-
-static inline void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
-{
-}
-
static inline int adf_init_pf_wq(void)
{
return 0;
@@ -243,10 +233,6 @@ static inline void adf_exit_vf_wq(void)
{
}
-static inline void adf_flush_vf_wq(struct adf_accel_dev *accel_dev)
-{
-}
-
#endif
static inline void __iomem *adf_get_pmisc_base(struct adf_accel_dev *accel_dev)
diff --git a/drivers/crypto/qat/qat_common/adf_gen2_hw_data.c b/drivers/crypto/qat/qat_common/adf_gen2_hw_data.c
index 57035b7dd4b2..d1884547b5a1 100644
--- a/drivers/crypto/qat/qat_common/adf_gen2_hw_data.c
+++ b/drivers/crypto/qat/qat_common/adf_gen2_hw_data.c
@@ -98,6 +98,19 @@ void adf_gen2_get_arb_info(struct arb_info *arb_info)
}
EXPORT_SYMBOL_GPL(adf_gen2_get_arb_info);
+void adf_gen2_enable_ints(struct adf_accel_dev *accel_dev)
+{
+ void __iomem *addr = adf_get_pmisc_base(accel_dev);
+ u32 val;
+
+ val = accel_dev->pf.vf_info ? 0 : BIT_ULL(GET_MAX_BANKS(accel_dev)) - 1;
+
+ /* Enable bundle and misc interrupts */
+ ADF_CSR_WR(addr, ADF_GEN2_SMIAPF0_MASK_OFFSET, val);
+ ADF_CSR_WR(addr, ADF_GEN2_SMIAPF1_MASK_OFFSET, ADF_GEN2_SMIA1_MASK);
+}
+EXPORT_SYMBOL_GPL(adf_gen2_enable_ints);
+
static u64 build_csr_ring_base_addr(dma_addr_t addr, u32 size)
{
return BUILD_RING_BASE_ADDR(addr, size);
diff --git a/drivers/crypto/qat/qat_common/adf_gen2_hw_data.h b/drivers/crypto/qat/qat_common/adf_gen2_hw_data.h
index f2e0451b11c0..e4bc07529be4 100644
--- a/drivers/crypto/qat/qat_common/adf_gen2_hw_data.h
+++ b/drivers/crypto/qat/qat_common/adf_gen2_hw_data.h
@@ -145,6 +145,11 @@ do { \
#define ADF_GEN2_CERRSSMSH(i) ((i) * 0x4000 + 0x10)
#define ADF_GEN2_ERRSSMSH_EN BIT(3)
+/* Interrupts */
+#define ADF_GEN2_SMIAPF0_MASK_OFFSET (0x3A000 + 0x28)
+#define ADF_GEN2_SMIAPF1_MASK_OFFSET (0x3A000 + 0x30)
+#define ADF_GEN2_SMIA1_MASK 0x1
+
u32 adf_gen2_get_num_accels(struct adf_hw_device_data *self);
u32 adf_gen2_get_num_aes(struct adf_hw_device_data *self);
void adf_gen2_enable_error_correction(struct adf_accel_dev *accel_dev);
@@ -153,6 +158,7 @@ void adf_gen2_cfg_iov_thds(struct adf_accel_dev *accel_dev, bool enable,
void adf_gen2_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops);
void adf_gen2_get_admin_info(struct admin_info *admin_csrs_info);
void adf_gen2_get_arb_info(struct arb_info *arb_info);
+void adf_gen2_enable_ints(struct adf_accel_dev *accel_dev);
u32 adf_gen2_get_accel_cap(struct adf_accel_dev *accel_dev);
void adf_gen2_set_ssm_wdtimer(struct adf_accel_dev *accel_dev);
diff --git a/drivers/crypto/qat/qat_common/adf_gen2_pfvf.c b/drivers/crypto/qat/qat_common/adf_gen2_pfvf.c
index 1a9072aac2ca..70ef11963938 100644
--- a/drivers/crypto/qat/qat_common/adf_gen2_pfvf.c
+++ b/drivers/crypto/qat/qat_common/adf_gen2_pfvf.c
@@ -13,8 +13,9 @@
#include "adf_pfvf_utils.h"
/* VF2PF interrupts */
+#define ADF_GEN2_VF_MSK 0xFFFF
#define ADF_GEN2_ERR_REG_VF2PF(vf_src) (((vf_src) & 0x01FFFE00) >> 9)
-#define ADF_GEN2_ERR_MSK_VF2PF(vf_mask) (((vf_mask) & 0xFFFF) << 9)
+#define ADF_GEN2_ERR_MSK_VF2PF(vf_mask) (((vf_mask) & ADF_GEN2_VF_MSK) << 9)
#define ADF_GEN2_PF_PF2VF_OFFSET(i) (0x3A000 + 0x280 + ((i) * 0x04))
#define ADF_GEN2_VF_PF2VF_OFFSET 0x200
@@ -50,43 +51,60 @@ static u32 adf_gen2_vf_get_pfvf_offset(u32 i)
return ADF_GEN2_VF_PF2VF_OFFSET;
}
-static u32 adf_gen2_get_vf2pf_sources(void __iomem *pmisc_addr)
-{
- u32 errsou3, errmsk3, vf_int_mask;
-
- /* Get the interrupt sources triggered by VFs */
- errsou3 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRSOU3);
- vf_int_mask = ADF_GEN2_ERR_REG_VF2PF(errsou3);
-
- /* To avoid adding duplicate entries to work queue, clear
- * vf_int_mask_sets bits that are already masked in ERRMSK register.
- */
- errmsk3 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3);
- vf_int_mask &= ~ADF_GEN2_ERR_REG_VF2PF(errmsk3);
-
- return vf_int_mask;
-}
-
-static void adf_gen2_enable_vf2pf_interrupts(void __iomem *pmisc_addr,
- u32 vf_mask)
+static void adf_gen2_enable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask)
{
/* Enable VF2PF Messaging Ints - VFs 0 through 15 per vf_mask[15:0] */
- if (vf_mask & 0xFFFF) {
+ if (vf_mask & ADF_GEN2_VF_MSK) {
u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3)
& ~ADF_GEN2_ERR_MSK_VF2PF(vf_mask);
ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, val);
}
}
-static void adf_gen2_disable_vf2pf_interrupts(void __iomem *pmisc_addr,
- u32 vf_mask)
+static void adf_gen2_disable_all_vf2pf_interrupts(void __iomem *pmisc_addr)
{
/* Disable VF2PF interrupts for VFs 0 through 15 per vf_mask[15:0] */
- if (vf_mask & 0xFFFF) {
- u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3)
- | ADF_GEN2_ERR_MSK_VF2PF(vf_mask);
- ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, val);
- }
+ u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3)
+ | ADF_GEN2_ERR_MSK_VF2PF(ADF_GEN2_VF_MSK);
+ ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, val);
+}
+
+static u32 adf_gen2_disable_pending_vf2pf_interrupts(void __iomem *pmisc_addr)
+{
+ u32 sources, disabled, pending;
+ u32 errsou3, errmsk3;
+
+ /* Get the interrupt sources triggered by VFs */
+ errsou3 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRSOU3);
+ sources = ADF_GEN2_ERR_REG_VF2PF(errsou3);
+
+ if (!sources)
+ return 0;
+
+ /* Get the already disabled interrupts */
+ errmsk3 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3);
+ disabled = ADF_GEN2_ERR_REG_VF2PF(errmsk3);
+
+ pending = sources & ~disabled;
+ if (!pending)
+ return 0;
+
+ /* Due to HW limitations, when disabling the interrupts, we can't
+ * just disable the requested sources, as this would lead to missed
+ * interrupts if ERRSOU3 changes just before writing to ERRMSK3.
+ * To work around it, disable all and re-enable only the sources that
+ * are not in vf_mask and were not already disabled. Re-enabling will
+ * trigger a new interrupt for the sources that have changed in the
+ * meantime, if any.
+ */
+ errmsk3 |= ADF_GEN2_ERR_MSK_VF2PF(ADF_GEN2_VF_MSK);
+ ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3);
+
+ errmsk3 &= ADF_GEN2_ERR_MSK_VF2PF(sources | disabled);
+ ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3);
+
+ /* Return the sources of the (new) interrupt(s) */
+ return pending;
}
static u32 gen2_csr_get_int_bit(enum gen2_csr_pos offset)
@@ -362,9 +380,9 @@ void adf_gen2_init_pf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops)
pfvf_ops->enable_comms = adf_enable_pf2vf_comms;
pfvf_ops->get_pf2vf_offset = adf_gen2_pf_get_pfvf_offset;
pfvf_ops->get_vf2pf_offset = adf_gen2_pf_get_pfvf_offset;
- pfvf_ops->get_vf2pf_sources = adf_gen2_get_vf2pf_sources;
pfvf_ops->enable_vf2pf_interrupts = adf_gen2_enable_vf2pf_interrupts;
- pfvf_ops->disable_vf2pf_interrupts = adf_gen2_disable_vf2pf_interrupts;
+ pfvf_ops->disable_all_vf2pf_interrupts = adf_gen2_disable_all_vf2pf_interrupts;
+ pfvf_ops->disable_pending_vf2pf_interrupts = adf_gen2_disable_pending_vf2pf_interrupts;
pfvf_ops->send_msg = adf_gen2_pf2vf_send;
pfvf_ops->recv_msg = adf_gen2_vf2pf_recv;
}
diff --git a/drivers/crypto/qat/qat_common/adf_gen4_pfvf.c b/drivers/crypto/qat/qat_common/adf_gen4_pfvf.c
index d80d493a7756..8e8efe93f3ee 100644
--- a/drivers/crypto/qat/qat_common/adf_gen4_pfvf.c
+++ b/drivers/crypto/qat/qat_common/adf_gen4_pfvf.c
@@ -15,6 +15,7 @@
/* VF2PF interrupt source registers */
#define ADF_4XXX_VM2PF_SOU 0x41A180
#define ADF_4XXX_VM2PF_MSK 0x41A1C0
+#define ADF_GEN4_VF_MSK 0xFFFF
#define ADF_PFVF_GEN4_MSGTYPE_SHIFT 2
#define ADF_PFVF_GEN4_MSGTYPE_MASK 0x3F
@@ -36,32 +37,48 @@ static u32 adf_gen4_pf_get_vf2pf_offset(u32 i)
return ADF_4XXX_VM2PF_OFFSET(i);
}
-static u32 adf_gen4_get_vf2pf_sources(void __iomem *pmisc_addr)
+static void adf_gen4_enable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask)
{
- u32 sou, mask;
+ u32 val;
- sou = ADF_CSR_RD(pmisc_addr, ADF_4XXX_VM2PF_SOU);
- mask = ADF_CSR_RD(pmisc_addr, ADF_4XXX_VM2PF_MSK);
-
- return sou & ~mask;
+ val = ADF_CSR_RD(pmisc_addr, ADF_4XXX_VM2PF_MSK) & ~vf_mask;
+ ADF_CSR_WR(pmisc_addr, ADF_4XXX_VM2PF_MSK, val);
}
-static void adf_gen4_enable_vf2pf_interrupts(void __iomem *pmisc_addr,
- u32 vf_mask)
+static void adf_gen4_disable_all_vf2pf_interrupts(void __iomem *pmisc_addr)
{
- unsigned int val;
-
- val = ADF_CSR_RD(pmisc_addr, ADF_4XXX_VM2PF_MSK) & ~vf_mask;
- ADF_CSR_WR(pmisc_addr, ADF_4XXX_VM2PF_MSK, val);
+ ADF_CSR_WR(pmisc_addr, ADF_4XXX_VM2PF_MSK, ADF_GEN4_VF_MSK);
}
-static void adf_gen4_disable_vf2pf_interrupts(void __iomem *pmisc_addr,
- u32 vf_mask)
+static u32 adf_gen4_disable_pending_vf2pf_interrupts(void __iomem *pmisc_addr)
{
- unsigned int val;
+ u32 sources, disabled, pending;
+
+ /* Get the interrupt sources triggered by VFs */
+ sources = ADF_CSR_RD(pmisc_addr, ADF_4XXX_VM2PF_SOU);
+ if (!sources)
+ return 0;
+
+ /* Get the already disabled interrupts */
+ disabled = ADF_CSR_RD(pmisc_addr, ADF_4XXX_VM2PF_MSK);
+
+ pending = sources & ~disabled;
+ if (!pending)
+ return 0;
+
+ /* Due to HW limitations, when disabling the interrupts, we can't
+ * just disable the requested sources, as this would lead to missed
+ * interrupts if VM2PF_SOU changes just before writing to VM2PF_MSK.
+ * To work around it, disable all and re-enable only the sources that
+ * are not in vf_mask and were not already disabled. Re-enabling will
+ * trigger a new interrupt for the sources that have changed in the
+ * meantime, if any.
+ */
+ ADF_CSR_WR(pmisc_addr, ADF_4XXX_VM2PF_MSK, ADF_GEN4_VF_MSK);
+ ADF_CSR_WR(pmisc_addr, ADF_4XXX_VM2PF_MSK, disabled | sources);
- val = ADF_CSR_RD(pmisc_addr, ADF_4XXX_VM2PF_MSK) | vf_mask;
- ADF_CSR_WR(pmisc_addr, ADF_4XXX_VM2PF_MSK, val);
+ /* Return the sources of the (new) interrupt(s) */
+ return pending;
}
static int adf_gen4_pfvf_send(struct adf_accel_dev *accel_dev,
@@ -96,10 +113,16 @@ static struct pfvf_message adf_gen4_pfvf_recv(struct adf_accel_dev *accel_dev,
u32 pfvf_offset, u8 compat_ver)
{
void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
+ struct pfvf_message msg = { 0 };
u32 csr_val;
/* Read message from the CSR */
csr_val = ADF_CSR_RD(pmisc_addr, pfvf_offset);
+ if (!(csr_val & ADF_PFVF_INT)) {
+ dev_info(&GET_DEV(accel_dev),
+ "Spurious PFVF interrupt, msg 0x%.8x. Ignored\n", csr_val);
+ return msg;
+ }
/* We can now acknowledge the message reception by clearing the
* interrupt bit
@@ -115,9 +138,9 @@ void adf_gen4_init_pf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops)
pfvf_ops->enable_comms = adf_enable_pf2vf_comms;
pfvf_ops->get_pf2vf_offset = adf_gen4_pf_get_pf2vf_offset;
pfvf_ops->get_vf2pf_offset = adf_gen4_pf_get_vf2pf_offset;
- pfvf_ops->get_vf2pf_sources = adf_gen4_get_vf2pf_sources;
pfvf_ops->enable_vf2pf_interrupts = adf_gen4_enable_vf2pf_interrupts;
- pfvf_ops->disable_vf2pf_interrupts = adf_gen4_disable_vf2pf_interrupts;
+ pfvf_ops->disable_all_vf2pf_interrupts = adf_gen4_disable_all_vf2pf_interrupts;
+ pfvf_ops->disable_pending_vf2pf_interrupts = adf_gen4_disable_pending_vf2pf_interrupts;
pfvf_ops->send_msg = adf_gen4_pfvf_send;
pfvf_ops->recv_msg = adf_gen4_pfvf_recv;
}
diff --git a/drivers/crypto/qat/qat_common/adf_isr.c b/drivers/crypto/qat/qat_common/adf_isr.c
index a35149f8bf1e..ad9e135b8560 100644
--- a/drivers/crypto/qat/qat_common/adf_isr.c
+++ b/drivers/crypto/qat/qat_common/adf_isr.c
@@ -66,42 +66,39 @@ void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask)
spin_unlock_irqrestore(&accel_dev->pf.vf2pf_ints_lock, flags);
}
-void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask)
+void adf_disable_all_vf2pf_interrupts(struct adf_accel_dev *accel_dev)
{
void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
unsigned long flags;
spin_lock_irqsave(&accel_dev->pf.vf2pf_ints_lock, flags);
- GET_PFVF_OPS(accel_dev)->disable_vf2pf_interrupts(pmisc_addr, vf_mask);
+ GET_PFVF_OPS(accel_dev)->disable_all_vf2pf_interrupts(pmisc_addr);
spin_unlock_irqrestore(&accel_dev->pf.vf2pf_ints_lock, flags);
}
-static void adf_disable_vf2pf_interrupts_irq(struct adf_accel_dev *accel_dev,
- u32 vf_mask)
+static u32 adf_disable_pending_vf2pf_interrupts(struct adf_accel_dev *accel_dev)
{
void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
+ u32 pending;
spin_lock(&accel_dev->pf.vf2pf_ints_lock);
- GET_PFVF_OPS(accel_dev)->disable_vf2pf_interrupts(pmisc_addr, vf_mask);
+ pending = GET_PFVF_OPS(accel_dev)->disable_pending_vf2pf_interrupts(pmisc_addr);
spin_unlock(&accel_dev->pf.vf2pf_ints_lock);
+
+ return pending;
}
static bool adf_handle_vf2pf_int(struct adf_accel_dev *accel_dev)
{
- void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
bool irq_handled = false;
unsigned long vf_mask;
- /* Get the interrupt sources triggered by VFs */
- vf_mask = GET_PFVF_OPS(accel_dev)->get_vf2pf_sources(pmisc_addr);
-
+ /* Get the interrupt sources triggered by VFs, except for those already disabled */
+ vf_mask = adf_disable_pending_vf2pf_interrupts(accel_dev);
if (vf_mask) {
struct adf_accel_vf_info *vf_info;
int i;
- /* Disable VF2PF interrupts for VFs with pending ints */
- adf_disable_vf2pf_interrupts_irq(accel_dev, vf_mask);
-
/*
* Handle VF2PF interrupt unless the VF is malicious and
* is attempting to flood the host OS with VF2PF interrupts.
diff --git a/drivers/crypto/qat/qat_common/adf_pfvf_msg.h b/drivers/crypto/qat/qat_common/adf_pfvf_msg.h
index 9c37a2661392..204a42438992 100644
--- a/drivers/crypto/qat/qat_common/adf_pfvf_msg.h
+++ b/drivers/crypto/qat/qat_common/adf_pfvf_msg.h
@@ -8,8 +8,8 @@
/*
* PF<->VF Gen2 Messaging format
*
- * The PF has an array of 32-bit PF2VF registers, one for each VF. The
- * PF can access all these registers; each VF can access only the one
+ * The PF has an array of 32-bit PF2VF registers, one for each VF. The
+ * PF can access all these registers while each VF can access only the one
* register associated with that particular VF.
*
* The register functionally is split into two parts:
diff --git a/drivers/crypto/qat/qat_common/adf_pfvf_pf_proto.c b/drivers/crypto/qat/qat_common/adf_pfvf_pf_proto.c
index 588352de1ef0..388e58bcbcaf 100644
--- a/drivers/crypto/qat/qat_common/adf_pfvf_pf_proto.c
+++ b/drivers/crypto/qat/qat_common/adf_pfvf_pf_proto.c
@@ -154,7 +154,7 @@ static struct pfvf_message handle_blkmsg_req(struct adf_accel_vf_info *vf_info,
if (FIELD_GET(ADF_VF2PF_BLOCK_CRC_REQ_MASK, req.data)) {
dev_dbg(&GET_DEV(vf_info->accel_dev),
"BlockMsg of type %d for CRC over %d bytes received from VF%d\n",
- blk_type, blk_byte, vf_info->vf_nr);
+ blk_type, blk_byte + 1, vf_info->vf_nr);
if (!adf_pf2vf_blkmsg_get_data(vf_info, blk_type, blk_byte,
byte_max, &resp_data,
@@ -242,7 +242,9 @@ static int adf_handle_vf2pf_msg(struct adf_accel_dev *accel_dev, u8 vf_nr,
"VersionRequest received from VF%d (vers %d) to PF (vers %d)\n",
vf_nr, vf_compat_ver, ADF_PFVF_COMPAT_THIS_VERSION);
- if (vf_compat_ver <= ADF_PFVF_COMPAT_THIS_VERSION)
+ if (vf_compat_ver == 0)
+ compat = ADF_PF2VF_VF_INCOMPATIBLE;
+ else if (vf_compat_ver <= ADF_PFVF_COMPAT_THIS_VERSION)
compat = ADF_PF2VF_VF_COMPATIBLE;
else
compat = ADF_PF2VF_VF_COMPAT_UNKNOWN;
diff --git a/drivers/crypto/qat/qat_common/adf_sriov.c b/drivers/crypto/qat/qat_common/adf_sriov.c
index b960bca1f9d2..f38b2ffde146 100644
--- a/drivers/crypto/qat/qat_common/adf_sriov.c
+++ b/drivers/crypto/qat/qat_common/adf_sriov.c
@@ -3,7 +3,6 @@
#include <linux/workqueue.h>
#include <linux/pci.h>
#include <linux/device.h>
-#include <linux/iommu.h>
#include "adf_common_drv.h"
#include "adf_cfg.h"
#include "adf_pfvf_pf_msg.h"
@@ -74,8 +73,7 @@ static int adf_enable_sriov(struct adf_accel_dev *accel_dev)
hw_data->configure_iov_threads(accel_dev, true);
/* Enable VF to PF interrupts for all VFs */
- if (hw_data->pfvf_ops.get_pf2vf_offset)
- adf_enable_vf2pf_interrupts(accel_dev, BIT_ULL(totalvfs) - 1);
+ adf_enable_vf2pf_interrupts(accel_dev, BIT_ULL(totalvfs) - 1);
/*
* Due to the hardware design, when SR-IOV and the ring arbiter
@@ -104,22 +102,18 @@ void adf_disable_sriov(struct adf_accel_dev *accel_dev)
if (!accel_dev->pf.vf_info)
return;
- if (hw_data->pfvf_ops.get_pf2vf_offset)
- adf_pf2vf_notify_restarting(accel_dev);
-
+ adf_pf2vf_notify_restarting(accel_dev);
pci_disable_sriov(accel_to_pci_dev(accel_dev));
/* Disable VF to PF interrupts */
- if (hw_data->pfvf_ops.get_pf2vf_offset)
- adf_disable_vf2pf_interrupts(accel_dev, GENMASK(31, 0));
+ adf_disable_all_vf2pf_interrupts(accel_dev);
/* Clear Valid bits in AE Thread to PCIe Function Mapping */
if (hw_data->configure_iov_threads)
hw_data->configure_iov_threads(accel_dev, false);
- for (i = 0, vf = accel_dev->pf.vf_info; i < totalvfs; i++, vf++) {
+ for (i = 0, vf = accel_dev->pf.vf_info; i < totalvfs; i++, vf++)
mutex_destroy(&vf->pf2vf_lock);
- }
kfree(accel_dev->pf.vf_info);
accel_dev->pf.vf_info = NULL;
@@ -176,7 +170,7 @@ int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
return -EFAULT;
}
- if (!iommu_present(&pci_bus_type))
+ if (!device_iommu_mapped(&pdev->dev))
dev_warn(&pdev->dev, "IOMMU should be enabled for SR-IOV to work correctly\n");
if (accel_dev->pf.vf_info) {
diff --git a/drivers/crypto/qat/qat_common/adf_transport.c b/drivers/crypto/qat/qat_common/adf_transport.c
index 8ba28409fb74..630d0483c4e0 100644
--- a/drivers/crypto/qat/qat_common/adf_transport.c
+++ b/drivers/crypto/qat/qat_common/adf_transport.c
@@ -8,6 +8,9 @@
#include "adf_cfg.h"
#include "adf_common_drv.h"
+#define ADF_MAX_RING_THRESHOLD 80
+#define ADF_PERCENT(tot, percent) (((tot) * (percent)) / 100)
+
static inline u32 adf_modulo(u32 data, u32 shift)
{
u32 div = data >> shift;
@@ -77,6 +80,11 @@ static void adf_disable_ring_irq(struct adf_etr_bank_data *bank, u32 ring)
bank->irq_mask);
}
+bool adf_ring_nearly_full(struct adf_etr_ring_data *ring)
+{
+ return atomic_read(ring->inflights) > ring->threshold;
+}
+
int adf_send_message(struct adf_etr_ring_data *ring, u32 *msg)
{
struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(ring->bank->accel_dev);
@@ -217,6 +225,7 @@ int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
struct adf_etr_bank_data *bank;
struct adf_etr_ring_data *ring;
char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
+ int max_inflights;
u32 ring_num;
int ret;
@@ -263,6 +272,8 @@ int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
ring->ring_size = adf_verify_ring_size(msg_size, num_msgs);
ring->head = 0;
ring->tail = 0;
+ max_inflights = ADF_MAX_INFLIGHTS(ring->ring_size, ring->msg_size);
+ ring->threshold = ADF_PERCENT(max_inflights, ADF_MAX_RING_THRESHOLD);
atomic_set(ring->inflights, 0);
ret = adf_init_ring(ring);
if (ret)
diff --git a/drivers/crypto/qat/qat_common/adf_transport.h b/drivers/crypto/qat/qat_common/adf_transport.h
index 2c95f1697c76..e6ef6f9b7691 100644
--- a/drivers/crypto/qat/qat_common/adf_transport.h
+++ b/drivers/crypto/qat/qat_common/adf_transport.h
@@ -14,6 +14,7 @@ int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
const char *ring_name, adf_callback_fn callback,
int poll_mode, struct adf_etr_ring_data **ring_ptr);
+bool adf_ring_nearly_full(struct adf_etr_ring_data *ring);
int adf_send_message(struct adf_etr_ring_data *ring, u32 *msg);
void adf_remove_ring(struct adf_etr_ring_data *ring);
#endif
diff --git a/drivers/crypto/qat/qat_common/adf_transport_internal.h b/drivers/crypto/qat/qat_common/adf_transport_internal.h
index 501bcf0f1809..8b2c92ba7ca1 100644
--- a/drivers/crypto/qat/qat_common/adf_transport_internal.h
+++ b/drivers/crypto/qat/qat_common/adf_transport_internal.h
@@ -22,6 +22,7 @@ struct adf_etr_ring_data {
spinlock_t lock; /* protects ring data struct */
u16 head;
u16 tail;
+ u32 threshold;
u8 ring_number;
u8 ring_size;
u8 msg_size;
diff --git a/drivers/crypto/qat/qat_common/adf_vf_isr.c b/drivers/crypto/qat/qat_common/adf_vf_isr.c
index 86c3bd0c9c2b..8c95fcd8e64b 100644
--- a/drivers/crypto/qat/qat_common/adf_vf_isr.c
+++ b/drivers/crypto/qat/qat_common/adf_vf_isr.c
@@ -70,6 +70,7 @@ static void adf_dev_stop_async(struct work_struct *work)
container_of(work, struct adf_vf_stop_data, work);
struct adf_accel_dev *accel_dev = stop_data->accel_dev;
+ adf_dev_restarting_notify(accel_dev);
adf_dev_stop(accel_dev);
adf_dev_shutdown(accel_dev);
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index f998ed58457c..148edbe379e3 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -17,7 +17,7 @@
#include <crypto/xts.h>
#include <linux/dma-mapping.h>
#include "adf_accel_devices.h"
-#include "adf_transport.h"
+#include "qat_algs_send.h"
#include "adf_common_drv.h"
#include "qat_crypto.h"
#include "icp_qat_hw.h"
@@ -46,19 +46,6 @@
static DEFINE_MUTEX(algs_lock);
static unsigned int active_devs;
-struct qat_alg_buf {
- u32 len;
- u32 resrvd;
- u64 addr;
-} __packed;
-
-struct qat_alg_buf_list {
- u64 resrvd;
- u32 num_bufs;
- u32 num_mapped_bufs;
- struct qat_alg_buf bufers[];
-} __packed __aligned(64);
-
/* Common content descriptor */
struct qat_alg_cd {
union {
@@ -693,7 +680,10 @@ static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
bl->bufers[i].len, DMA_BIDIRECTIONAL);
dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
- kfree(bl);
+
+ if (!qat_req->buf.sgl_src_valid)
+ kfree(bl);
+
if (blp != blpout) {
/* If out of place operation dma unmap only data */
int bufless = blout->num_bufs - blout->num_mapped_bufs;
@@ -704,14 +694,17 @@ static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
DMA_BIDIRECTIONAL);
}
dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
- kfree(blout);
+
+ if (!qat_req->buf.sgl_dst_valid)
+ kfree(blout);
}
}
static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
struct scatterlist *sgl,
struct scatterlist *sglout,
- struct qat_crypto_request *qat_req)
+ struct qat_crypto_request *qat_req,
+ gfp_t flags)
{
struct device *dev = &GET_DEV(inst->accel_dev);
int i, sg_nctr = 0;
@@ -721,15 +714,24 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
dma_addr_t blp = DMA_MAPPING_ERROR;
dma_addr_t bloutp = DMA_MAPPING_ERROR;
struct scatterlist *sg;
- size_t sz_out, sz = struct_size(bufl, bufers, n + 1);
+ size_t sz_out, sz = struct_size(bufl, bufers, n);
+ int node = dev_to_node(&GET_DEV(inst->accel_dev));
if (unlikely(!n))
return -EINVAL;
- bufl = kzalloc_node(sz, GFP_ATOMIC,
- dev_to_node(&GET_DEV(inst->accel_dev)));
- if (unlikely(!bufl))
- return -ENOMEM;
+ qat_req->buf.sgl_src_valid = false;
+ qat_req->buf.sgl_dst_valid = false;
+
+ if (n > QAT_MAX_BUFF_DESC) {
+ bufl = kzalloc_node(sz, flags, node);
+ if (unlikely(!bufl))
+ return -ENOMEM;
+ } else {
+ bufl = &qat_req->buf.sgl_src.sgl_hdr;
+ memset(bufl, 0, sizeof(struct qat_alg_buf_list));
+ qat_req->buf.sgl_src_valid = true;
+ }
for_each_sg(sgl, sg, n, i)
bufl->bufers[i].addr = DMA_MAPPING_ERROR;
@@ -760,12 +762,18 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
struct qat_alg_buf *bufers;
n = sg_nents(sglout);
- sz_out = struct_size(buflout, bufers, n + 1);
+ sz_out = struct_size(buflout, bufers, n);
sg_nctr = 0;
- buflout = kzalloc_node(sz_out, GFP_ATOMIC,
- dev_to_node(&GET_DEV(inst->accel_dev)));
- if (unlikely(!buflout))
- goto err_in;
+
+ if (n > QAT_MAX_BUFF_DESC) {
+ buflout = kzalloc_node(sz_out, flags, node);
+ if (unlikely(!buflout))
+ goto err_in;
+ } else {
+ buflout = &qat_req->buf.sgl_dst.sgl_hdr;
+ memset(buflout, 0, sizeof(struct qat_alg_buf_list));
+ qat_req->buf.sgl_dst_valid = true;
+ }
bufers = buflout->bufers;
for_each_sg(sglout, sg, n, i)
@@ -810,7 +818,9 @@ err_out:
dma_unmap_single(dev, buflout->bufers[i].addr,
buflout->bufers[i].len,
DMA_BIDIRECTIONAL);
- kfree(buflout);
+
+ if (!qat_req->buf.sgl_dst_valid)
+ kfree(buflout);
err_in:
if (!dma_mapping_error(dev, blp))
@@ -823,7 +833,8 @@ err_in:
bufl->bufers[i].len,
DMA_BIDIRECTIONAL);
- kfree(bufl);
+ if (!qat_req->buf.sgl_src_valid)
+ kfree(bufl);
dev_err(dev, "Failed to map buf for dma\n");
return -ENOMEM;
@@ -925,8 +936,25 @@ void qat_alg_callback(void *resp)
struct icp_qat_fw_la_resp *qat_resp = resp;
struct qat_crypto_request *qat_req =
(void *)(__force long)qat_resp->opaque_data;
+ struct qat_instance_backlog *backlog = qat_req->alg_req.backlog;
qat_req->cb(qat_resp, qat_req);
+
+ qat_alg_send_backlog(backlog);
+}
+
+static int qat_alg_send_sym_message(struct qat_crypto_request *qat_req,
+ struct qat_crypto_instance *inst,
+ struct crypto_async_request *base)
+{
+ struct qat_alg_req *alg_req = &qat_req->alg_req;
+
+ alg_req->fw_req = (u32 *)&qat_req->req;
+ alg_req->tx_ring = inst->sym_tx;
+ alg_req->base = base;
+ alg_req->backlog = &inst->backlog;
+
+ return qat_alg_send_message(alg_req);
}
static int qat_alg_aead_dec(struct aead_request *areq)
@@ -939,14 +967,15 @@ static int qat_alg_aead_dec(struct aead_request *areq)
struct icp_qat_fw_la_auth_req_params *auth_param;
struct icp_qat_fw_la_bulk_req *msg;
int digst_size = crypto_aead_authsize(aead_tfm);
- int ret, ctr = 0;
+ gfp_t f = qat_algs_alloc_flags(&areq->base);
+ int ret;
u32 cipher_len;
cipher_len = areq->cryptlen - digst_size;
if (cipher_len % AES_BLOCK_SIZE != 0)
return -EINVAL;
- ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
+ ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req, f);
if (unlikely(ret))
return ret;
@@ -965,15 +994,12 @@ static int qat_alg_aead_dec(struct aead_request *areq)
auth_param = (void *)((u8 *)cipher_param + sizeof(*cipher_param));
auth_param->auth_off = 0;
auth_param->auth_len = areq->assoclen + cipher_param->cipher_length;
- do {
- ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
- } while (ret == -EAGAIN && ctr++ < 10);
- if (ret == -EAGAIN) {
+ ret = qat_alg_send_sym_message(qat_req, ctx->inst, &areq->base);
+ if (ret == -ENOSPC)
qat_alg_free_bufl(ctx->inst, qat_req);
- return -EBUSY;
- }
- return -EINPROGRESS;
+
+ return ret;
}
static int qat_alg_aead_enc(struct aead_request *areq)
@@ -984,14 +1010,15 @@ static int qat_alg_aead_enc(struct aead_request *areq)
struct qat_crypto_request *qat_req = aead_request_ctx(areq);
struct icp_qat_fw_la_cipher_req_params *cipher_param;
struct icp_qat_fw_la_auth_req_params *auth_param;
+ gfp_t f = qat_algs_alloc_flags(&areq->base);
struct icp_qat_fw_la_bulk_req *msg;
u8 *iv = areq->iv;
- int ret, ctr = 0;
+ int ret;
if (areq->cryptlen % AES_BLOCK_SIZE != 0)
return -EINVAL;
- ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
+ ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req, f);
if (unlikely(ret))
return ret;
@@ -1013,15 +1040,11 @@ static int qat_alg_aead_enc(struct aead_request *areq)
auth_param->auth_off = 0;
auth_param->auth_len = areq->assoclen + areq->cryptlen;
- do {
- ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
- } while (ret == -EAGAIN && ctr++ < 10);
-
- if (ret == -EAGAIN) {
+ ret = qat_alg_send_sym_message(qat_req, ctx->inst, &areq->base);
+ if (ret == -ENOSPC)
qat_alg_free_bufl(ctx->inst, qat_req);
- return -EBUSY;
- }
- return -EINPROGRESS;
+
+ return ret;
}
static int qat_alg_skcipher_rekey(struct qat_alg_skcipher_ctx *ctx,
@@ -1173,13 +1196,14 @@ static int qat_alg_skcipher_encrypt(struct skcipher_request *req)
struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm);
struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
struct icp_qat_fw_la_cipher_req_params *cipher_param;
+ gfp_t f = qat_algs_alloc_flags(&req->base);
struct icp_qat_fw_la_bulk_req *msg;
- int ret, ctr = 0;
+ int ret;
if (req->cryptlen == 0)
return 0;
- ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
+ ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req, f);
if (unlikely(ret))
return ret;
@@ -1198,15 +1222,11 @@ static int qat_alg_skcipher_encrypt(struct skcipher_request *req)
qat_alg_set_req_iv(qat_req);
- do {
- ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
- } while (ret == -EAGAIN && ctr++ < 10);
-
- if (ret == -EAGAIN) {
+ ret = qat_alg_send_sym_message(qat_req, ctx->inst, &req->base);
+ if (ret == -ENOSPC)
qat_alg_free_bufl(ctx->inst, qat_req);
- return -EBUSY;
- }
- return -EINPROGRESS;
+
+ return ret;
}
static int qat_alg_skcipher_blk_encrypt(struct skcipher_request *req)
@@ -1242,13 +1262,14 @@ static int qat_alg_skcipher_decrypt(struct skcipher_request *req)
struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm);
struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
struct icp_qat_fw_la_cipher_req_params *cipher_param;
+ gfp_t f = qat_algs_alloc_flags(&req->base);
struct icp_qat_fw_la_bulk_req *msg;
- int ret, ctr = 0;
+ int ret;
if (req->cryptlen == 0)
return 0;
- ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
+ ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req, f);
if (unlikely(ret))
return ret;
@@ -1268,15 +1289,11 @@ static int qat_alg_skcipher_decrypt(struct skcipher_request *req)
qat_alg_set_req_iv(qat_req);
qat_alg_update_iv(qat_req);
- do {
- ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
- } while (ret == -EAGAIN && ctr++ < 10);
-
- if (ret == -EAGAIN) {
+ ret = qat_alg_send_sym_message(qat_req, ctx->inst, &req->base);
+ if (ret == -ENOSPC)
qat_alg_free_bufl(ctx->inst, qat_req);
- return -EBUSY;
- }
- return -EINPROGRESS;
+
+ return ret;
}
static int qat_alg_skcipher_blk_decrypt(struct skcipher_request *req)
diff --git a/drivers/crypto/qat/qat_common/qat_algs_send.c b/drivers/crypto/qat/qat_common/qat_algs_send.c
new file mode 100644
index 000000000000..ff5b4347f783
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/qat_algs_send.c
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2022 Intel Corporation */
+#include "adf_transport.h"
+#include "qat_algs_send.h"
+#include "qat_crypto.h"
+
+#define ADF_MAX_RETRIES 20
+
+static int qat_alg_send_message_retry(struct qat_alg_req *req)
+{
+ int ret = 0, ctr = 0;
+
+ do {
+ ret = adf_send_message(req->tx_ring, req->fw_req);
+ } while (ret == -EAGAIN && ctr++ < ADF_MAX_RETRIES);
+
+ if (ret == -EAGAIN)
+ return -ENOSPC;
+
+ return -EINPROGRESS;
+}
+
+void qat_alg_send_backlog(struct qat_instance_backlog *backlog)
+{
+ struct qat_alg_req *req, *tmp;
+
+ spin_lock_bh(&backlog->lock);
+ list_for_each_entry_safe(req, tmp, &backlog->list, list) {
+ if (adf_send_message(req->tx_ring, req->fw_req)) {
+ /* The HW ring is full. Do nothing.
+ * qat_alg_send_backlog() will be invoked again by
+ * another callback.
+ */
+ break;
+ }
+ list_del(&req->list);
+ req->base->complete(req->base, -EINPROGRESS);
+ }
+ spin_unlock_bh(&backlog->lock);
+}
+
+static void qat_alg_backlog_req(struct qat_alg_req *req,
+ struct qat_instance_backlog *backlog)
+{
+ INIT_LIST_HEAD(&req->list);
+
+ spin_lock_bh(&backlog->lock);
+ list_add_tail(&req->list, &backlog->list);
+ spin_unlock_bh(&backlog->lock);
+}
+
+static int qat_alg_send_message_maybacklog(struct qat_alg_req *req)
+{
+ struct qat_instance_backlog *backlog = req->backlog;
+ struct adf_etr_ring_data *tx_ring = req->tx_ring;
+ u32 *fw_req = req->fw_req;
+
+ /* If any request is already backlogged, then add to backlog list */
+ if (!list_empty(&backlog->list))
+ goto enqueue;
+
+ /* If ring is nearly full, then add to backlog list */
+ if (adf_ring_nearly_full(tx_ring))
+ goto enqueue;
+
+ /* If adding request to HW ring fails, then add to backlog list */
+ if (adf_send_message(tx_ring, fw_req))
+ goto enqueue;
+
+ return -EINPROGRESS;
+
+enqueue:
+ qat_alg_backlog_req(req, backlog);
+
+ return -EBUSY;
+}
+
+int qat_alg_send_message(struct qat_alg_req *req)
+{
+ u32 flags = req->base->flags;
+
+ if (flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
+ return qat_alg_send_message_maybacklog(req);
+ else
+ return qat_alg_send_message_retry(req);
+}
diff --git a/drivers/crypto/qat/qat_common/qat_algs_send.h b/drivers/crypto/qat/qat_common/qat_algs_send.h
new file mode 100644
index 000000000000..5ce9f4f69d8f
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/qat_algs_send.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2022 Intel Corporation */
+#ifndef QAT_ALGS_SEND_H
+#define QAT_ALGS_SEND_H
+
+#include "qat_crypto.h"
+
+int qat_alg_send_message(struct qat_alg_req *req);
+void qat_alg_send_backlog(struct qat_instance_backlog *backlog);
+
+#endif
diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c
index b0b78445418b..16d97db9ea15 100644
--- a/drivers/crypto/qat/qat_common/qat_asym_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c
@@ -12,6 +12,7 @@
#include <crypto/scatterwalk.h>
#include "icp_qat_fw_pke.h"
#include "adf_accel_devices.h"
+#include "qat_algs_send.h"
#include "adf_transport.h"
#include "adf_common_drv.h"
#include "qat_crypto.h"
@@ -135,8 +136,23 @@ struct qat_asym_request {
} areq;
int err;
void (*cb)(struct icp_qat_fw_pke_resp *resp);
+ struct qat_alg_req alg_req;
} __aligned(64);
+static int qat_alg_send_asym_message(struct qat_asym_request *qat_req,
+ struct qat_crypto_instance *inst,
+ struct crypto_async_request *base)
+{
+ struct qat_alg_req *alg_req = &qat_req->alg_req;
+
+ alg_req->fw_req = (u32 *)&qat_req->req;
+ alg_req->tx_ring = inst->pke_tx;
+ alg_req->base = base;
+ alg_req->backlog = &inst->backlog;
+
+ return qat_alg_send_message(alg_req);
+}
+
static void qat_dh_cb(struct icp_qat_fw_pke_resp *resp)
{
struct qat_asym_request *req = (void *)(__force long)resp->opaque;
@@ -148,26 +164,21 @@ static void qat_dh_cb(struct icp_qat_fw_pke_resp *resp)
err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
if (areq->src) {
- if (req->src_align)
- dma_free_coherent(dev, req->ctx.dh->p_size,
- req->src_align, req->in.dh.in.b);
- else
- dma_unmap_single(dev, req->in.dh.in.b,
- req->ctx.dh->p_size, DMA_TO_DEVICE);
+ dma_unmap_single(dev, req->in.dh.in.b, req->ctx.dh->p_size,
+ DMA_TO_DEVICE);
+ kfree_sensitive(req->src_align);
}
areq->dst_len = req->ctx.dh->p_size;
if (req->dst_align) {
scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
areq->dst_len, 1);
-
- dma_free_coherent(dev, req->ctx.dh->p_size, req->dst_align,
- req->out.dh.r);
- } else {
- dma_unmap_single(dev, req->out.dh.r, req->ctx.dh->p_size,
- DMA_FROM_DEVICE);
+ kfree_sensitive(req->dst_align);
}
+ dma_unmap_single(dev, req->out.dh.r, req->ctx.dh->p_size,
+ DMA_FROM_DEVICE);
+
dma_unmap_single(dev, req->phy_in, sizeof(struct qat_dh_input_params),
DMA_TO_DEVICE);
dma_unmap_single(dev, req->phy_out,
@@ -213,8 +224,10 @@ static int qat_dh_compute_value(struct kpp_request *req)
struct qat_asym_request *qat_req =
PTR_ALIGN(kpp_request_ctx(req), 64);
struct icp_qat_fw_pke_request *msg = &qat_req->req;
- int ret, ctr = 0;
+ gfp_t flags = qat_algs_alloc_flags(&req->base);
int n_input_params = 0;
+ u8 *vaddr;
+ int ret;
if (unlikely(!ctx->xa))
return -EINVAL;
@@ -223,6 +236,10 @@ static int qat_dh_compute_value(struct kpp_request *req)
req->dst_len = ctx->p_size;
return -EOVERFLOW;
}
+
+ if (req->src_len > ctx->p_size)
+ return -EINVAL;
+
memset(msg, '\0', sizeof(*msg));
ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
ICP_QAT_FW_COMN_REQ_FLAG_SET);
@@ -271,27 +288,24 @@ static int qat_dh_compute_value(struct kpp_request *req)
*/
if (sg_is_last(req->src) && req->src_len == ctx->p_size) {
qat_req->src_align = NULL;
- qat_req->in.dh.in.b = dma_map_single(dev,
- sg_virt(req->src),
- req->src_len,
- DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev,
- qat_req->in.dh.in.b)))
- return ret;
-
+ vaddr = sg_virt(req->src);
} else {
int shift = ctx->p_size - req->src_len;
- qat_req->src_align = dma_alloc_coherent(dev,
- ctx->p_size,
- &qat_req->in.dh.in.b,
- GFP_KERNEL);
+ qat_req->src_align = kzalloc(ctx->p_size, flags);
if (unlikely(!qat_req->src_align))
return ret;
scatterwalk_map_and_copy(qat_req->src_align + shift,
req->src, 0, req->src_len, 0);
+
+ vaddr = qat_req->src_align;
}
+
+ qat_req->in.dh.in.b = dma_map_single(dev, vaddr, ctx->p_size,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, qat_req->in.dh.in.b)))
+ goto unmap_src;
}
/*
* dst can be of any size in valid range, but HW expects it to be the
@@ -302,20 +316,18 @@ static int qat_dh_compute_value(struct kpp_request *req)
*/
if (sg_is_last(req->dst) && req->dst_len == ctx->p_size) {
qat_req->dst_align = NULL;
- qat_req->out.dh.r = dma_map_single(dev, sg_virt(req->dst),
- req->dst_len,
- DMA_FROM_DEVICE);
-
- if (unlikely(dma_mapping_error(dev, qat_req->out.dh.r)))
- goto unmap_src;
-
+ vaddr = sg_virt(req->dst);
} else {
- qat_req->dst_align = dma_alloc_coherent(dev, ctx->p_size,
- &qat_req->out.dh.r,
- GFP_KERNEL);
+ qat_req->dst_align = kzalloc(ctx->p_size, flags);
if (unlikely(!qat_req->dst_align))
goto unmap_src;
+
+ vaddr = qat_req->dst_align;
}
+ qat_req->out.dh.r = dma_map_single(dev, vaddr, ctx->p_size,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(dev, qat_req->out.dh.r)))
+ goto unmap_dst;
qat_req->in.dh.in_tab[n_input_params] = 0;
qat_req->out.dh.out_tab[1] = 0;
@@ -338,13 +350,13 @@ static int qat_dh_compute_value(struct kpp_request *req)
msg->input_param_count = n_input_params;
msg->output_param_count = 1;
- do {
- ret = adf_send_message(ctx->inst->pke_tx, (u32 *)msg);
- } while (ret == -EBUSY && ctr++ < 100);
+ ret = qat_alg_send_asym_message(qat_req, inst, &req->base);
+ if (ret == -ENOSPC)
+ goto unmap_all;
- if (!ret)
- return -EINPROGRESS;
+ return ret;
+unmap_all:
if (!dma_mapping_error(dev, qat_req->phy_out))
dma_unmap_single(dev, qat_req->phy_out,
sizeof(struct qat_dh_output_params),
@@ -355,23 +367,17 @@ unmap_in_params:
sizeof(struct qat_dh_input_params),
DMA_TO_DEVICE);
unmap_dst:
- if (qat_req->dst_align)
- dma_free_coherent(dev, ctx->p_size, qat_req->dst_align,
- qat_req->out.dh.r);
- else
- if (!dma_mapping_error(dev, qat_req->out.dh.r))
- dma_unmap_single(dev, qat_req->out.dh.r, ctx->p_size,
- DMA_FROM_DEVICE);
+ if (!dma_mapping_error(dev, qat_req->out.dh.r))
+ dma_unmap_single(dev, qat_req->out.dh.r, ctx->p_size,
+ DMA_FROM_DEVICE);
+ kfree_sensitive(qat_req->dst_align);
unmap_src:
if (req->src) {
- if (qat_req->src_align)
- dma_free_coherent(dev, ctx->p_size, qat_req->src_align,
- qat_req->in.dh.in.b);
- else
- if (!dma_mapping_error(dev, qat_req->in.dh.in.b))
- dma_unmap_single(dev, qat_req->in.dh.in.b,
- ctx->p_size,
- DMA_TO_DEVICE);
+ if (!dma_mapping_error(dev, qat_req->in.dh.in.b))
+ dma_unmap_single(dev, qat_req->in.dh.in.b,
+ ctx->p_size,
+ DMA_TO_DEVICE);
+ kfree_sensitive(qat_req->src_align);
}
return ret;
}
@@ -420,14 +426,17 @@ static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params)
static void qat_dh_clear_ctx(struct device *dev, struct qat_dh_ctx *ctx)
{
if (ctx->g) {
+ memset(ctx->g, 0, ctx->p_size);
dma_free_coherent(dev, ctx->p_size, ctx->g, ctx->dma_g);
ctx->g = NULL;
}
if (ctx->xa) {
+ memset(ctx->xa, 0, ctx->p_size);
dma_free_coherent(dev, ctx->p_size, ctx->xa, ctx->dma_xa);
ctx->xa = NULL;
}
if (ctx->p) {
+ memset(ctx->p, 0, ctx->p_size);
dma_free_coherent(dev, ctx->p_size, ctx->p, ctx->dma_p);
ctx->p = NULL;
}
@@ -510,25 +519,22 @@ static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp)
err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
- if (req->src_align)
- dma_free_coherent(dev, req->ctx.rsa->key_sz, req->src_align,
- req->in.rsa.enc.m);
- else
- dma_unmap_single(dev, req->in.rsa.enc.m, req->ctx.rsa->key_sz,
- DMA_TO_DEVICE);
+ kfree_sensitive(req->src_align);
+
+ dma_unmap_single(dev, req->in.rsa.enc.m, req->ctx.rsa->key_sz,
+ DMA_TO_DEVICE);
areq->dst_len = req->ctx.rsa->key_sz;
if (req->dst_align) {
scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
areq->dst_len, 1);
- dma_free_coherent(dev, req->ctx.rsa->key_sz, req->dst_align,
- req->out.rsa.enc.c);
- } else {
- dma_unmap_single(dev, req->out.rsa.enc.c, req->ctx.rsa->key_sz,
- DMA_FROM_DEVICE);
+ kfree_sensitive(req->dst_align);
}
+ dma_unmap_single(dev, req->out.rsa.enc.c, req->ctx.rsa->key_sz,
+ DMA_FROM_DEVICE);
+
dma_unmap_single(dev, req->phy_in, sizeof(struct qat_rsa_input_params),
DMA_TO_DEVICE);
dma_unmap_single(dev, req->phy_out,
@@ -542,8 +548,11 @@ void qat_alg_asym_callback(void *_resp)
{
struct icp_qat_fw_pke_resp *resp = _resp;
struct qat_asym_request *areq = (void *)(__force long)resp->opaque;
+ struct qat_instance_backlog *backlog = areq->alg_req.backlog;
areq->cb(resp);
+
+ qat_alg_send_backlog(backlog);
}
#define PKE_RSA_EP_512 0x1c161b21
@@ -642,7 +651,9 @@ static int qat_rsa_enc(struct akcipher_request *req)
struct qat_asym_request *qat_req =
PTR_ALIGN(akcipher_request_ctx(req), 64);
struct icp_qat_fw_pke_request *msg = &qat_req->req;
- int ret, ctr = 0;
+ gfp_t flags = qat_algs_alloc_flags(&req->base);
+ u8 *vaddr;
+ int ret;
if (unlikely(!ctx->n || !ctx->e))
return -EINVAL;
@@ -651,6 +662,10 @@ static int qat_rsa_enc(struct akcipher_request *req)
req->dst_len = ctx->key_sz;
return -EOVERFLOW;
}
+
+ if (req->src_len > ctx->key_sz)
+ return -EINVAL;
+
memset(msg, '\0', sizeof(*msg));
ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
ICP_QAT_FW_COMN_REQ_FLAG_SET);
@@ -679,40 +694,39 @@ static int qat_rsa_enc(struct akcipher_request *req)
*/
if (sg_is_last(req->src) && req->src_len == ctx->key_sz) {
qat_req->src_align = NULL;
- qat_req->in.rsa.enc.m = dma_map_single(dev, sg_virt(req->src),
- req->src_len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.enc.m)))
- return ret;
-
+ vaddr = sg_virt(req->src);
} else {
int shift = ctx->key_sz - req->src_len;
- qat_req->src_align = dma_alloc_coherent(dev, ctx->key_sz,
- &qat_req->in.rsa.enc.m,
- GFP_KERNEL);
+ qat_req->src_align = kzalloc(ctx->key_sz, flags);
if (unlikely(!qat_req->src_align))
return ret;
scatterwalk_map_and_copy(qat_req->src_align + shift, req->src,
0, req->src_len, 0);
+ vaddr = qat_req->src_align;
}
- if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
- qat_req->dst_align = NULL;
- qat_req->out.rsa.enc.c = dma_map_single(dev, sg_virt(req->dst),
- req->dst_len,
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.enc.c)))
- goto unmap_src;
+ qat_req->in.rsa.enc.m = dma_map_single(dev, vaddr, ctx->key_sz,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.enc.m)))
+ goto unmap_src;
+ if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
+ qat_req->dst_align = NULL;
+ vaddr = sg_virt(req->dst);
} else {
- qat_req->dst_align = dma_alloc_coherent(dev, ctx->key_sz,
- &qat_req->out.rsa.enc.c,
- GFP_KERNEL);
+ qat_req->dst_align = kzalloc(ctx->key_sz, flags);
if (unlikely(!qat_req->dst_align))
goto unmap_src;
-
+ vaddr = qat_req->dst_align;
}
+
+ qat_req->out.rsa.enc.c = dma_map_single(dev, vaddr, ctx->key_sz,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.enc.c)))
+ goto unmap_dst;
+
qat_req->in.rsa.in_tab[3] = 0;
qat_req->out.rsa.out_tab[1] = 0;
qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa.enc.m,
@@ -732,13 +746,14 @@ static int qat_rsa_enc(struct akcipher_request *req)
msg->pke_mid.opaque = (u64)(__force long)qat_req;
msg->input_param_count = 3;
msg->output_param_count = 1;
- do {
- ret = adf_send_message(ctx->inst->pke_tx, (u32 *)msg);
- } while (ret == -EBUSY && ctr++ < 100);
- if (!ret)
- return -EINPROGRESS;
+ ret = qat_alg_send_asym_message(qat_req, inst, &req->base);
+ if (ret == -ENOSPC)
+ goto unmap_all;
+ return ret;
+
+unmap_all:
if (!dma_mapping_error(dev, qat_req->phy_out))
dma_unmap_single(dev, qat_req->phy_out,
sizeof(struct qat_rsa_output_params),
@@ -749,21 +764,15 @@ unmap_in_params:
sizeof(struct qat_rsa_input_params),
DMA_TO_DEVICE);
unmap_dst:
- if (qat_req->dst_align)
- dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align,
- qat_req->out.rsa.enc.c);
- else
- if (!dma_mapping_error(dev, qat_req->out.rsa.enc.c))
- dma_unmap_single(dev, qat_req->out.rsa.enc.c,
- ctx->key_sz, DMA_FROM_DEVICE);
+ if (!dma_mapping_error(dev, qat_req->out.rsa.enc.c))
+ dma_unmap_single(dev, qat_req->out.rsa.enc.c,
+ ctx->key_sz, DMA_FROM_DEVICE);
+ kfree_sensitive(qat_req->dst_align);
unmap_src:
- if (qat_req->src_align)
- dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
- qat_req->in.rsa.enc.m);
- else
- if (!dma_mapping_error(dev, qat_req->in.rsa.enc.m))
- dma_unmap_single(dev, qat_req->in.rsa.enc.m,
- ctx->key_sz, DMA_TO_DEVICE);
+ if (!dma_mapping_error(dev, qat_req->in.rsa.enc.m))
+ dma_unmap_single(dev, qat_req->in.rsa.enc.m, ctx->key_sz,
+ DMA_TO_DEVICE);
+ kfree_sensitive(qat_req->src_align);
return ret;
}
@@ -776,7 +785,9 @@ static int qat_rsa_dec(struct akcipher_request *req)
struct qat_asym_request *qat_req =
PTR_ALIGN(akcipher_request_ctx(req), 64);
struct icp_qat_fw_pke_request *msg = &qat_req->req;
- int ret, ctr = 0;
+ gfp_t flags = qat_algs_alloc_flags(&req->base);
+ u8 *vaddr;
+ int ret;
if (unlikely(!ctx->n || !ctx->d))
return -EINVAL;
@@ -785,6 +796,10 @@ static int qat_rsa_dec(struct akcipher_request *req)
req->dst_len = ctx->key_sz;
return -EOVERFLOW;
}
+
+ if (req->src_len > ctx->key_sz)
+ return -EINVAL;
+
memset(msg, '\0', sizeof(*msg));
ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
ICP_QAT_FW_COMN_REQ_FLAG_SET);
@@ -823,40 +838,37 @@ static int qat_rsa_dec(struct akcipher_request *req)
*/
if (sg_is_last(req->src) && req->src_len == ctx->key_sz) {
qat_req->src_align = NULL;
- qat_req->in.rsa.dec.c = dma_map_single(dev, sg_virt(req->src),
- req->dst_len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.dec.c)))
- return ret;
-
+ vaddr = sg_virt(req->src);
} else {
int shift = ctx->key_sz - req->src_len;
- qat_req->src_align = dma_alloc_coherent(dev, ctx->key_sz,
- &qat_req->in.rsa.dec.c,
- GFP_KERNEL);
+ qat_req->src_align = kzalloc(ctx->key_sz, flags);
if (unlikely(!qat_req->src_align))
return ret;
scatterwalk_map_and_copy(qat_req->src_align + shift, req->src,
0, req->src_len, 0);
+ vaddr = qat_req->src_align;
}
- if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
- qat_req->dst_align = NULL;
- qat_req->out.rsa.dec.m = dma_map_single(dev, sg_virt(req->dst),
- req->dst_len,
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.dec.m)))
- goto unmap_src;
+ qat_req->in.rsa.dec.c = dma_map_single(dev, vaddr, ctx->key_sz,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.dec.c)))
+ goto unmap_src;
+ if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
+ qat_req->dst_align = NULL;
+ vaddr = sg_virt(req->dst);
} else {
- qat_req->dst_align = dma_alloc_coherent(dev, ctx->key_sz,
- &qat_req->out.rsa.dec.m,
- GFP_KERNEL);
+ qat_req->dst_align = kzalloc(ctx->key_sz, flags);
if (unlikely(!qat_req->dst_align))
goto unmap_src;
-
+ vaddr = qat_req->dst_align;
}
+ qat_req->out.rsa.dec.m = dma_map_single(dev, vaddr, ctx->key_sz,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.dec.m)))
+ goto unmap_dst;
if (ctx->crt_mode)
qat_req->in.rsa.in_tab[6] = 0;
@@ -884,13 +896,14 @@ static int qat_rsa_dec(struct akcipher_request *req)
msg->input_param_count = 3;
msg->output_param_count = 1;
- do {
- ret = adf_send_message(ctx->inst->pke_tx, (u32 *)msg);
- } while (ret == -EBUSY && ctr++ < 100);
- if (!ret)
- return -EINPROGRESS;
+ ret = qat_alg_send_asym_message(qat_req, inst, &req->base);
+ if (ret == -ENOSPC)
+ goto unmap_all;
+
+ return ret;
+unmap_all:
if (!dma_mapping_error(dev, qat_req->phy_out))
dma_unmap_single(dev, qat_req->phy_out,
sizeof(struct qat_rsa_output_params),
@@ -901,21 +914,15 @@ unmap_in_params:
sizeof(struct qat_rsa_input_params),
DMA_TO_DEVICE);
unmap_dst:
- if (qat_req->dst_align)
- dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align,
- qat_req->out.rsa.dec.m);
- else
- if (!dma_mapping_error(dev, qat_req->out.rsa.dec.m))
- dma_unmap_single(dev, qat_req->out.rsa.dec.m,
- ctx->key_sz, DMA_FROM_DEVICE);
+ if (!dma_mapping_error(dev, qat_req->out.rsa.dec.m))
+ dma_unmap_single(dev, qat_req->out.rsa.dec.m,
+ ctx->key_sz, DMA_FROM_DEVICE);
+ kfree_sensitive(qat_req->dst_align);
unmap_src:
- if (qat_req->src_align)
- dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
- qat_req->in.rsa.dec.c);
- else
- if (!dma_mapping_error(dev, qat_req->in.rsa.dec.c))
- dma_unmap_single(dev, qat_req->in.rsa.dec.c,
- ctx->key_sz, DMA_TO_DEVICE);
+ if (!dma_mapping_error(dev, qat_req->in.rsa.dec.c))
+ dma_unmap_single(dev, qat_req->in.rsa.dec.c, ctx->key_sz,
+ DMA_TO_DEVICE);
+ kfree_sensitive(qat_req->src_align);
return ret;
}
@@ -1233,18 +1240,8 @@ static void qat_rsa_exit_tfm(struct crypto_akcipher *tfm)
struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
struct device *dev = &GET_DEV(ctx->inst->accel_dev);
- if (ctx->n)
- dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n);
- if (ctx->e)
- dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e);
- if (ctx->d) {
- memset(ctx->d, '\0', ctx->key_sz);
- dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d);
- }
+ qat_rsa_clear_ctx(dev, ctx);
qat_crypto_put_instance(ctx->inst);
- ctx->n = NULL;
- ctx->e = NULL;
- ctx->d = NULL;
}
static struct akcipher_alg rsa = {
diff --git a/drivers/crypto/qat/qat_common/qat_crypto.c b/drivers/crypto/qat/qat_common/qat_crypto.c
index 67c9588e89df..9341d892533a 100644
--- a/drivers/crypto/qat/qat_common/qat_crypto.c
+++ b/drivers/crypto/qat/qat_common/qat_crypto.c
@@ -161,13 +161,6 @@ int qat_crypto_dev_config(struct adf_accel_dev *accel_dev)
if (ret)
goto err;
- /* Temporarily set the number of crypto instances to zero to avoid
- * registering the crypto algorithms.
- * This will be removed when the algorithms will support the
- * CRYPTO_TFM_REQ_MAY_BACKLOG flag
- */
- instances = 0;
-
for (i = 0; i < instances; i++) {
val = i;
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_BANK_NUM, i);
@@ -353,6 +346,9 @@ static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev)
&inst->pke_rx);
if (ret)
goto err;
+
+ INIT_LIST_HEAD(&inst->backlog.list);
+ spin_lock_init(&inst->backlog.lock);
}
return 0;
err:
diff --git a/drivers/crypto/qat/qat_common/qat_crypto.h b/drivers/crypto/qat/qat_common/qat_crypto.h
index b6a4c95ae003..df3c738ce323 100644
--- a/drivers/crypto/qat/qat_common/qat_crypto.h
+++ b/drivers/crypto/qat/qat_common/qat_crypto.h
@@ -9,6 +9,19 @@
#include "adf_accel_devices.h"
#include "icp_qat_fw_la.h"
+struct qat_instance_backlog {
+ struct list_head list;
+ spinlock_t lock; /* protects backlog list */
+};
+
+struct qat_alg_req {
+ u32 *fw_req;
+ struct adf_etr_ring_data *tx_ring;
+ struct crypto_async_request *base;
+ struct list_head list;
+ struct qat_instance_backlog *backlog;
+};
+
struct qat_crypto_instance {
struct adf_etr_ring_data *sym_tx;
struct adf_etr_ring_data *sym_rx;
@@ -19,8 +32,29 @@ struct qat_crypto_instance {
unsigned long state;
int id;
atomic_t refctr;
+ struct qat_instance_backlog backlog;
};
+#define QAT_MAX_BUFF_DESC 4
+
+struct qat_alg_buf {
+ u32 len;
+ u32 resrvd;
+ u64 addr;
+} __packed;
+
+struct qat_alg_buf_list {
+ u64 resrvd;
+ u32 num_bufs;
+ u32 num_mapped_bufs;
+ struct qat_alg_buf bufers[];
+} __packed;
+
+struct qat_alg_fixed_buf_list {
+ struct qat_alg_buf_list sgl_hdr;
+ struct qat_alg_buf descriptors[QAT_MAX_BUFF_DESC];
+} __packed __aligned(64);
+
struct qat_crypto_request_buffs {
struct qat_alg_buf_list *bl;
dma_addr_t blp;
@@ -28,6 +62,10 @@ struct qat_crypto_request_buffs {
dma_addr_t bloutp;
size_t sz;
size_t sz_out;
+ bool sgl_src_valid;
+ bool sgl_dst_valid;
+ struct qat_alg_fixed_buf_list sgl_src;
+ struct qat_alg_fixed_buf_list sgl_dst;
};
struct qat_crypto_request;
@@ -53,6 +91,7 @@ struct qat_crypto_request {
u8 iv[AES_BLOCK_SIZE];
};
bool encryption;
+ struct qat_alg_req alg_req;
};
static inline bool adf_hw_dev_has_crypto(struct adf_accel_dev *accel_dev)
@@ -70,4 +109,9 @@ static inline bool adf_hw_dev_has_crypto(struct adf_accel_dev *accel_dev)
return true;
}
+static inline gfp_t qat_algs_alloc_flags(struct crypto_async_request *req)
+{
+ return req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
+}
+
#endif
diff --git a/drivers/crypto/qat/qat_common/qat_hal.c b/drivers/crypto/qat/qat_common/qat_hal.c
index 4bfd8f3566f7..7bba35280dac 100644
--- a/drivers/crypto/qat/qat_common/qat_hal.c
+++ b/drivers/crypto/qat/qat_common/qat_hal.c
@@ -695,6 +695,7 @@ static int qat_hal_chip_init(struct icp_qat_fw_loader_handle *handle,
handle->pci_dev = pci_info->pci_dev;
switch (handle->pci_dev->device) {
case ADF_4XXX_PCI_DEVICE_ID:
+ case ADF_401XX_PCI_DEVICE_ID:
handle->chip_info->mmp_sram_size = 0;
handle->chip_info->nn = false;
handle->chip_info->lm2lm3 = true;
diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c
index 6356402a2c9e..0fe5a474aa45 100644
--- a/drivers/crypto/qat/qat_common/qat_uclo.c
+++ b/drivers/crypto/qat/qat_common/qat_uclo.c
@@ -519,7 +519,7 @@ qat_uclo_map_chunk(char *buf, struct icp_qat_uof_filehdr *file_hdr,
return NULL;
}
-static unsigned int
+static int
qat_uclo_check_image_compat(struct icp_qat_uof_encap_obj *encap_uof_obj,
struct icp_qat_uof_image *image)
{
@@ -731,6 +731,7 @@ qat_uclo_get_dev_type(struct icp_qat_fw_loader_handle *handle)
case PCI_DEVICE_ID_INTEL_QAT_C3XXX:
return ICP_QAT_AC_C3XXX_DEV_TYPE;
case ADF_4XXX_PCI_DEVICE_ID:
+ case ADF_401XX_PCI_DEVICE_ID:
return ICP_QAT_AC_4XXX_A_DEV_TYPE;
default:
pr_err("QAT: unsupported device 0x%x\n",
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
index 09599fe4d2f3..cb3bdd3618fb 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
@@ -7,6 +7,8 @@
#include "adf_dh895xcc_hw_data.h"
#include "icp_qat_hw.h"
+#define ADF_DH895XCC_VF_MSK 0xFFFFFFFF
+
/* Worker thread to service arbiter mappings */
static const u32 thrd_to_arb_map[ADF_DH895XCC_MAX_ACCELENGINES] = {
0x12222AAA, 0x11666666, 0x12222AAA, 0x11666666,
@@ -58,17 +60,24 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
capabilities = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
- ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
+ ICP_ACCEL_CAPABILITIES_AUTHENTICATION |
+ ICP_ACCEL_CAPABILITIES_CIPHER |
+ ICP_ACCEL_CAPABILITIES_COMPRESSION;
/* Read accelerator capabilities mask */
pci_read_config_dword(pdev, ADF_DEVICE_LEGFUSE_OFFSET, &legfuses);
- if (legfuses & ICP_ACCEL_MASK_CIPHER_SLICE)
+ /* A set bit in legfuses means the feature is OFF in this SKU */
+ if (legfuses & ICP_ACCEL_MASK_CIPHER_SLICE) {
capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
+ capabilities &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
+ }
if (legfuses & ICP_ACCEL_MASK_PKE_SLICE)
capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
- if (legfuses & ICP_ACCEL_MASK_AUTH_SLICE)
+ if (legfuses & ICP_ACCEL_MASK_AUTH_SLICE) {
capabilities &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
+ capabilities &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
+ }
if (legfuses & ICP_ACCEL_MASK_COMPRESS_SLICE)
capabilities &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
@@ -100,43 +109,6 @@ static const u32 *adf_get_arbiter_mapping(void)
return thrd_to_arb_map;
}
-static void adf_enable_ints(struct adf_accel_dev *accel_dev)
-{
- void __iomem *addr;
-
- addr = (&GET_BARS(accel_dev)[ADF_DH895XCC_PMISC_BAR])->virt_addr;
-
- /* Enable bundle and misc interrupts */
- ADF_CSR_WR(addr, ADF_DH895XCC_SMIAPF0_MASK_OFFSET,
- accel_dev->pf.vf_info ? 0 :
- BIT_ULL(GET_MAX_BANKS(accel_dev)) - 1);
- ADF_CSR_WR(addr, ADF_DH895XCC_SMIAPF1_MASK_OFFSET,
- ADF_DH895XCC_SMIA1_MASK);
-}
-
-static u32 get_vf2pf_sources(void __iomem *pmisc_bar)
-{
- u32 errsou3, errmsk3, errsou5, errmsk5, vf_int_mask;
-
- /* Get the interrupt sources triggered by VFs */
- errsou3 = ADF_CSR_RD(pmisc_bar, ADF_GEN2_ERRSOU3);
- vf_int_mask = ADF_DH895XCC_ERR_REG_VF2PF_L(errsou3);
-
- /* To avoid adding duplicate entries to work queue, clear
- * vf_int_mask_sets bits that are already masked in ERRMSK register.
- */
- errmsk3 = ADF_CSR_RD(pmisc_bar, ADF_GEN2_ERRMSK3);
- vf_int_mask &= ~ADF_DH895XCC_ERR_REG_VF2PF_L(errmsk3);
-
- /* Do the same for ERRSOU5 */
- errsou5 = ADF_CSR_RD(pmisc_bar, ADF_GEN2_ERRSOU5);
- errmsk5 = ADF_CSR_RD(pmisc_bar, ADF_GEN2_ERRMSK5);
- vf_int_mask |= ADF_DH895XCC_ERR_REG_VF2PF_U(errsou5);
- vf_int_mask &= ~ADF_DH895XCC_ERR_REG_VF2PF_U(errmsk5);
-
- return vf_int_mask;
-}
-
static void enable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask)
{
/* Enable VF2PF Messaging Ints - VFs 0 through 15 per vf_mask[15:0] */
@@ -150,27 +122,71 @@ static void enable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask)
if (vf_mask >> 16) {
u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK5)
& ~ADF_DH895XCC_ERR_MSK_VF2PF_U(vf_mask);
-
ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, val);
}
}
-static void disable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask)
+static void disable_all_vf2pf_interrupts(void __iomem *pmisc_addr)
{
+ u32 val;
+
/* Disable VF2PF interrupts for VFs 0 through 15 per vf_mask[15:0] */
- if (vf_mask & 0xFFFF) {
- u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3)
- | ADF_DH895XCC_ERR_MSK_VF2PF_L(vf_mask);
- ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, val);
- }
+ val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3)
+ | ADF_DH895XCC_ERR_MSK_VF2PF_L(ADF_DH895XCC_VF_MSK);
+ ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, val);
/* Disable VF2PF interrupts for VFs 16 through 31 per vf_mask[31:16] */
- if (vf_mask >> 16) {
- u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK5)
- | ADF_DH895XCC_ERR_MSK_VF2PF_U(vf_mask);
+ val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK5)
+ | ADF_DH895XCC_ERR_MSK_VF2PF_U(ADF_DH895XCC_VF_MSK);
+ ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, val);
+}
- ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, val);
- }
+static u32 disable_pending_vf2pf_interrupts(void __iomem *pmisc_addr)
+{
+ u32 sources, pending, disabled;
+ u32 errsou3, errmsk3;
+ u32 errsou5, errmsk5;
+
+ /* Get the interrupt sources triggered by VFs */
+ errsou3 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRSOU3);
+ errsou5 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRSOU5);
+ sources = ADF_DH895XCC_ERR_REG_VF2PF_L(errsou3)
+ | ADF_DH895XCC_ERR_REG_VF2PF_U(errsou5);
+
+ if (!sources)
+ return 0;
+
+ /* Get the already disabled interrupts */
+ errmsk3 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3);
+ errmsk5 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK5);
+ disabled = ADF_DH895XCC_ERR_REG_VF2PF_L(errmsk3)
+ | ADF_DH895XCC_ERR_REG_VF2PF_U(errmsk5);
+
+ pending = sources & ~disabled;
+ if (!pending)
+ return 0;
+
+ /* Due to HW limitations, when disabling the interrupts, we can't
+ * just disable the requested sources, as this would lead to missed
+ * interrupts if sources changes just before writing to ERRMSK3 and
+ * ERRMSK5.
+ * To work around it, disable all and re-enable only the sources that
+ * are not in vf_mask and were not already disabled. Re-enabling will
+ * trigger a new interrupt for the sources that have changed in the
+ * meantime, if any.
+ */
+ errmsk3 |= ADF_DH895XCC_ERR_MSK_VF2PF_L(ADF_DH895XCC_VF_MSK);
+ errmsk5 |= ADF_DH895XCC_ERR_MSK_VF2PF_U(ADF_DH895XCC_VF_MSK);
+ ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3);
+ ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, errmsk5);
+
+ errmsk3 &= ADF_DH895XCC_ERR_MSK_VF2PF_L(sources | disabled);
+ errmsk5 &= ADF_DH895XCC_ERR_MSK_VF2PF_U(sources | disabled);
+ ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3);
+ ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, errmsk5);
+
+ /* Return the sources of the (new) interrupt(s) */
+ return pending;
}
static void configure_iov_threads(struct adf_accel_dev *accel_dev, bool enable)
@@ -215,14 +231,14 @@ void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
hw_data->init_arb = adf_init_arb;
hw_data->exit_arb = adf_exit_arb;
hw_data->get_arb_mapping = adf_get_arbiter_mapping;
- hw_data->enable_ints = adf_enable_ints;
+ hw_data->enable_ints = adf_gen2_enable_ints;
hw_data->reset_device = adf_reset_sbr;
hw_data->disable_iov = adf_disable_sriov;
adf_gen2_init_pf_pfvf_ops(&hw_data->pfvf_ops);
- hw_data->pfvf_ops.get_vf2pf_sources = get_vf2pf_sources;
hw_data->pfvf_ops.enable_vf2pf_interrupts = enable_vf2pf_interrupts;
- hw_data->pfvf_ops.disable_vf2pf_interrupts = disable_vf2pf_interrupts;
+ hw_data->pfvf_ops.disable_all_vf2pf_interrupts = disable_all_vf2pf_interrupts;
+ hw_data->pfvf_ops.disable_pending_vf2pf_interrupts = disable_pending_vf2pf_interrupts;
adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
}
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
index aa17272a1507..7b674bbe4192 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
@@ -19,10 +19,6 @@
#define ADF_DH895XCC_ACCELERATORS_MASK 0x3F
#define ADF_DH895XCC_ACCELENGINES_MASK 0xFFF
#define ADF_DH895XCC_ETR_MAX_BANKS 32
-#define ADF_DH895XCC_SMIAPF0_MASK_OFFSET (0x3A000 + 0x28)
-#define ADF_DH895XCC_SMIAPF1_MASK_OFFSET (0x3A000 + 0x30)
-#define ADF_DH895XCC_SMIA0_MASK 0xFFFFFFFF
-#define ADF_DH895XCC_SMIA1_MASK 0x1
/* Masks for VF2PF interrupts */
#define ADF_DH895XCC_ERR_REG_VF2PF_L(vf_src) (((vf_src) & 0x01FFFE00) >> 9)
diff --git a/drivers/crypto/sa2ul.c b/drivers/crypto/sa2ul.c
index 51b58e57153f..6957a125b447 100644
--- a/drivers/crypto/sa2ul.c
+++ b/drivers/crypto/sa2ul.c
@@ -2379,6 +2379,7 @@ static const struct of_device_id of_match[] = {
{ .compatible = "ti,j721e-sa2ul", .data = &am654_match_data, },
{ .compatible = "ti,am654-sa2ul", .data = &am654_match_data, },
{ .compatible = "ti,am64-sa2ul", .data = &am64_match_data, },
+ { .compatible = "ti,am62-sa3ul", .data = &am64_match_data, },
{},
};
MODULE_DEVICE_TABLE(of, of_match);
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 25c9f825b8b5..c9ad6c213090 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -1709,7 +1709,7 @@ static void common_nonsnoop_hash_unmap(struct device *dev,
struct talitos_desc *desc2 = (struct talitos_desc *)
(edesc->buf + edesc->dma_len);
- unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
+ unmap_single_talitos_ptr(dev, &desc->ptr[5], DMA_FROM_DEVICE);
if (desc->next_desc &&
desc->ptr[5].ptr != desc2->ptr[5].ptr)
unmap_single_talitos_ptr(dev, &desc2->ptr[5], DMA_FROM_DEVICE);
@@ -1721,8 +1721,8 @@ static void common_nonsnoop_hash_unmap(struct device *dev,
talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0);
/* When using hashctx-in, must unmap it. */
- if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
- unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
+ if (from_talitos_ptr_len(&desc->ptr[1], is_sec1))
+ unmap_single_talitos_ptr(dev, &desc->ptr[1],
DMA_TO_DEVICE);
else if (desc->next_desc)
unmap_single_talitos_ptr(dev, &desc2->ptr[1],
@@ -1736,8 +1736,8 @@ static void common_nonsnoop_hash_unmap(struct device *dev,
dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
DMA_BIDIRECTIONAL);
- if (edesc->desc.next_desc)
- dma_unmap_single(dev, be32_to_cpu(edesc->desc.next_desc),
+ if (desc->next_desc)
+ dma_unmap_single(dev, be32_to_cpu(desc->next_desc),
TALITOS_DESC_SIZE, DMA_BIDIRECTIONAL);
}
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
index 5157c118d642..265ef3e96fdd 100644
--- a/drivers/crypto/ux500/hash/hash_core.c
+++ b/drivers/crypto/ux500/hash/hash_core.c
@@ -877,9 +877,7 @@ static int hash_dma_final(struct ahash_request *req)
__func__);
goto out;
}
- }
-
- if (!req_ctx->updated) {
+ } else {
ret = hash_setconfiguration(device_data, &ctx->config);
if (ret) {
dev_err(device_data->dev,
diff --git a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
index f3ec9420215e..2a60d0525cde 100644
--- a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
+++ b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
@@ -90,9 +90,12 @@ static void virtio_crypto_dataq_akcipher_callback(struct virtio_crypto_request *
}
akcipher_req = vc_akcipher_req->akcipher_req;
- if (vc_akcipher_req->opcode != VIRTIO_CRYPTO_AKCIPHER_VERIFY)
+ if (vc_akcipher_req->opcode != VIRTIO_CRYPTO_AKCIPHER_VERIFY) {
+ /* actuall length maybe less than dst buffer */
+ akcipher_req->dst_len = len - sizeof(vc_req->status);
sg_copy_from_buffer(akcipher_req->dst, sg_nents(akcipher_req->dst),
vc_akcipher_req->dst_buf, akcipher_req->dst_len);
+ }
virtio_crypto_akcipher_finalize_req(vc_akcipher_req, akcipher_req, error);
}
@@ -103,54 +106,56 @@ static int virtio_crypto_alg_akcipher_init_session(struct virtio_crypto_akcipher
struct scatterlist outhdr_sg, key_sg, inhdr_sg, *sgs[3];
struct virtio_crypto *vcrypto = ctx->vcrypto;
uint8_t *pkey;
- unsigned int inlen;
int err;
unsigned int num_out = 0, num_in = 0;
+ struct virtio_crypto_op_ctrl_req *ctrl;
+ struct virtio_crypto_session_input *input;
+ struct virtio_crypto_ctrl_request *vc_ctrl_req;
pkey = kmemdup(key, keylen, GFP_ATOMIC);
if (!pkey)
return -ENOMEM;
- spin_lock(&vcrypto->ctrl_lock);
- memcpy(&vcrypto->ctrl.header, header, sizeof(vcrypto->ctrl.header));
- memcpy(&vcrypto->ctrl.u, para, sizeof(vcrypto->ctrl.u));
- vcrypto->input.status = cpu_to_le32(VIRTIO_CRYPTO_ERR);
+ vc_ctrl_req = kzalloc(sizeof(*vc_ctrl_req), GFP_KERNEL);
+ if (!vc_ctrl_req) {
+ err = -ENOMEM;
+ goto out;
+ }
- sg_init_one(&outhdr_sg, &vcrypto->ctrl, sizeof(vcrypto->ctrl));
+ ctrl = &vc_ctrl_req->ctrl;
+ memcpy(&ctrl->header, header, sizeof(ctrl->header));
+ memcpy(&ctrl->u, para, sizeof(ctrl->u));
+ input = &vc_ctrl_req->input;
+ input->status = cpu_to_le32(VIRTIO_CRYPTO_ERR);
+
+ sg_init_one(&outhdr_sg, ctrl, sizeof(*ctrl));
sgs[num_out++] = &outhdr_sg;
sg_init_one(&key_sg, pkey, keylen);
sgs[num_out++] = &key_sg;
- sg_init_one(&inhdr_sg, &vcrypto->input, sizeof(vcrypto->input));
+ sg_init_one(&inhdr_sg, input, sizeof(*input));
sgs[num_out + num_in++] = &inhdr_sg;
- err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, num_out, num_in, vcrypto, GFP_ATOMIC);
+ err = virtio_crypto_ctrl_vq_request(vcrypto, sgs, num_out, num_in, vc_ctrl_req);
if (err < 0)
goto out;
- virtqueue_kick(vcrypto->ctrl_vq);
- while (!virtqueue_get_buf(vcrypto->ctrl_vq, &inlen) &&
- !virtqueue_is_broken(vcrypto->ctrl_vq))
- cpu_relax();
-
- if (le32_to_cpu(vcrypto->input.status) != VIRTIO_CRYPTO_OK) {
+ if (le32_to_cpu(input->status) != VIRTIO_CRYPTO_OK) {
+ pr_err("virtio_crypto: Create session failed status: %u\n",
+ le32_to_cpu(input->status));
err = -EINVAL;
goto out;
}
- ctx->session_id = le64_to_cpu(vcrypto->input.session_id);
+ ctx->session_id = le64_to_cpu(input->session_id);
ctx->session_valid = true;
err = 0;
out:
- spin_unlock(&vcrypto->ctrl_lock);
+ kfree(vc_ctrl_req);
kfree_sensitive(pkey);
- if (err < 0)
- pr_err("virtio_crypto: Create session failed status: %u\n",
- le32_to_cpu(vcrypto->input.status));
-
return err;
}
@@ -159,37 +164,41 @@ static int virtio_crypto_alg_akcipher_close_session(struct virtio_crypto_akciphe
struct scatterlist outhdr_sg, inhdr_sg, *sgs[2];
struct virtio_crypto_destroy_session_req *destroy_session;
struct virtio_crypto *vcrypto = ctx->vcrypto;
- unsigned int num_out = 0, num_in = 0, inlen;
+ unsigned int num_out = 0, num_in = 0;
int err;
+ struct virtio_crypto_op_ctrl_req *ctrl;
+ struct virtio_crypto_inhdr *ctrl_status;
+ struct virtio_crypto_ctrl_request *vc_ctrl_req;
- spin_lock(&vcrypto->ctrl_lock);
- if (!ctx->session_valid) {
- err = 0;
- goto out;
- }
- vcrypto->ctrl_status.status = VIRTIO_CRYPTO_ERR;
- vcrypto->ctrl.header.opcode = cpu_to_le32(VIRTIO_CRYPTO_AKCIPHER_DESTROY_SESSION);
- vcrypto->ctrl.header.queue_id = 0;
+ if (!ctx->session_valid)
+ return 0;
+
+ vc_ctrl_req = kzalloc(sizeof(*vc_ctrl_req), GFP_KERNEL);
+ if (!vc_ctrl_req)
+ return -ENOMEM;
+
+ ctrl_status = &vc_ctrl_req->ctrl_status;
+ ctrl_status->status = VIRTIO_CRYPTO_ERR;
+ ctrl = &vc_ctrl_req->ctrl;
+ ctrl->header.opcode = cpu_to_le32(VIRTIO_CRYPTO_AKCIPHER_DESTROY_SESSION);
+ ctrl->header.queue_id = 0;
- destroy_session = &vcrypto->ctrl.u.destroy_session;
+ destroy_session = &ctrl->u.destroy_session;
destroy_session->session_id = cpu_to_le64(ctx->session_id);
- sg_init_one(&outhdr_sg, &vcrypto->ctrl, sizeof(vcrypto->ctrl));
+ sg_init_one(&outhdr_sg, ctrl, sizeof(*ctrl));
sgs[num_out++] = &outhdr_sg;
- sg_init_one(&inhdr_sg, &vcrypto->ctrl_status.status, sizeof(vcrypto->ctrl_status.status));
+ sg_init_one(&inhdr_sg, &ctrl_status->status, sizeof(ctrl_status->status));
sgs[num_out + num_in++] = &inhdr_sg;
- err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, num_out, num_in, vcrypto, GFP_ATOMIC);
+ err = virtio_crypto_ctrl_vq_request(vcrypto, sgs, num_out, num_in, vc_ctrl_req);
if (err < 0)
goto out;
- virtqueue_kick(vcrypto->ctrl_vq);
- while (!virtqueue_get_buf(vcrypto->ctrl_vq, &inlen) &&
- !virtqueue_is_broken(vcrypto->ctrl_vq))
- cpu_relax();
-
- if (vcrypto->ctrl_status.status != VIRTIO_CRYPTO_OK) {
+ if (ctrl_status->status != VIRTIO_CRYPTO_OK) {
+ pr_err("virtio_crypto: Close session failed status: %u, session_id: 0x%llx\n",
+ ctrl_status->status, destroy_session->session_id);
err = -EINVAL;
goto out;
}
@@ -198,11 +207,7 @@ static int virtio_crypto_alg_akcipher_close_session(struct virtio_crypto_akciphe
ctx->session_valid = false;
out:
- spin_unlock(&vcrypto->ctrl_lock);
- if (err < 0) {
- pr_err("virtio_crypto: Close session failed status: %u, session_id: 0x%llx\n",
- vcrypto->ctrl_status.status, destroy_session->session_id);
- }
+ kfree(vc_ctrl_req);
return err;
}
diff --git a/drivers/crypto/virtio/virtio_crypto_common.h b/drivers/crypto/virtio/virtio_crypto_common.h
index e693d4ee83a6..59a4c0259456 100644
--- a/drivers/crypto/virtio/virtio_crypto_common.h
+++ b/drivers/crypto/virtio/virtio_crypto_common.h
@@ -13,6 +13,7 @@
#include <crypto/aead.h>
#include <crypto/aes.h>
#include <crypto/engine.h>
+#include <uapi/linux/virtio_crypto.h>
/* Internal representation of a data virtqueue */
@@ -65,11 +66,6 @@ struct virtio_crypto {
/* Maximum size of per request */
u64 max_size;
- /* Control VQ buffers: protected by the ctrl_lock */
- struct virtio_crypto_op_ctrl_req ctrl;
- struct virtio_crypto_session_input input;
- struct virtio_crypto_inhdr ctrl_status;
-
unsigned long status;
atomic_t ref_count;
struct list_head list;
@@ -85,6 +81,18 @@ struct virtio_crypto_sym_session_info {
__u64 session_id;
};
+/*
+ * Note: there are padding fields in request, clear them to zero before
+ * sending to host to avoid to divulge any information.
+ * Ex, virtio_crypto_ctrl_request::ctrl::u::destroy_session::padding[48]
+ */
+struct virtio_crypto_ctrl_request {
+ struct virtio_crypto_op_ctrl_req ctrl;
+ struct virtio_crypto_session_input input;
+ struct virtio_crypto_inhdr ctrl_status;
+ struct completion compl;
+};
+
struct virtio_crypto_request;
typedef void (*virtio_crypto_data_callback)
(struct virtio_crypto_request *vc_req, int len);
@@ -134,5 +142,8 @@ int virtio_crypto_skcipher_algs_register(struct virtio_crypto *vcrypto);
void virtio_crypto_skcipher_algs_unregister(struct virtio_crypto *vcrypto);
int virtio_crypto_akcipher_algs_register(struct virtio_crypto *vcrypto);
void virtio_crypto_akcipher_algs_unregister(struct virtio_crypto *vcrypto);
+int virtio_crypto_ctrl_vq_request(struct virtio_crypto *vcrypto, struct scatterlist *sgs[],
+ unsigned int out_sgs, unsigned int in_sgs,
+ struct virtio_crypto_ctrl_request *vc_ctrl_req);
#endif /* _VIRTIO_CRYPTO_COMMON_H */
diff --git a/drivers/crypto/virtio/virtio_crypto_core.c b/drivers/crypto/virtio/virtio_crypto_core.c
index c6f482db0bc0..1198bd306365 100644
--- a/drivers/crypto/virtio/virtio_crypto_core.c
+++ b/drivers/crypto/virtio/virtio_crypto_core.c
@@ -22,6 +22,56 @@ virtcrypto_clear_request(struct virtio_crypto_request *vc_req)
}
}
+static void virtio_crypto_ctrlq_callback(struct virtio_crypto_ctrl_request *vc_ctrl_req)
+{
+ complete(&vc_ctrl_req->compl);
+}
+
+static void virtcrypto_ctrlq_callback(struct virtqueue *vq)
+{
+ struct virtio_crypto *vcrypto = vq->vdev->priv;
+ struct virtio_crypto_ctrl_request *vc_ctrl_req;
+ unsigned long flags;
+ unsigned int len;
+
+ spin_lock_irqsave(&vcrypto->ctrl_lock, flags);
+ do {
+ virtqueue_disable_cb(vq);
+ while ((vc_ctrl_req = virtqueue_get_buf(vq, &len)) != NULL) {
+ spin_unlock_irqrestore(&vcrypto->ctrl_lock, flags);
+ virtio_crypto_ctrlq_callback(vc_ctrl_req);
+ spin_lock_irqsave(&vcrypto->ctrl_lock, flags);
+ }
+ if (unlikely(virtqueue_is_broken(vq)))
+ break;
+ } while (!virtqueue_enable_cb(vq));
+ spin_unlock_irqrestore(&vcrypto->ctrl_lock, flags);
+}
+
+int virtio_crypto_ctrl_vq_request(struct virtio_crypto *vcrypto, struct scatterlist *sgs[],
+ unsigned int out_sgs, unsigned int in_sgs,
+ struct virtio_crypto_ctrl_request *vc_ctrl_req)
+{
+ int err;
+ unsigned long flags;
+
+ init_completion(&vc_ctrl_req->compl);
+
+ spin_lock_irqsave(&vcrypto->ctrl_lock, flags);
+ err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, out_sgs, in_sgs, vc_ctrl_req, GFP_ATOMIC);
+ if (err < 0) {
+ spin_unlock_irqrestore(&vcrypto->ctrl_lock, flags);
+ return err;
+ }
+
+ virtqueue_kick(vcrypto->ctrl_vq);
+ spin_unlock_irqrestore(&vcrypto->ctrl_lock, flags);
+
+ wait_for_completion(&vc_ctrl_req->compl);
+
+ return 0;
+}
+
static void virtcrypto_dataq_callback(struct virtqueue *vq)
{
struct virtio_crypto *vcrypto = vq->vdev->priv;
@@ -73,7 +123,7 @@ static int virtcrypto_find_vqs(struct virtio_crypto *vi)
goto err_names;
/* Parameters for control virtqueue */
- callbacks[total_vqs - 1] = NULL;
+ callbacks[total_vqs - 1] = virtcrypto_ctrlq_callback;
names[total_vqs - 1] = "controlq";
/* Allocate/initialize parameters for data virtqueues */
@@ -94,7 +144,8 @@ static int virtcrypto_find_vqs(struct virtio_crypto *vi)
spin_lock_init(&vi->data_vq[i].lock);
vi->data_vq[i].vq = vqs[i];
/* Initialize crypto engine */
- vi->data_vq[i].engine = crypto_engine_alloc_init(dev, 1);
+ vi->data_vq[i].engine = crypto_engine_alloc_init_and_set(dev, true, NULL, true,
+ virtqueue_get_vring_size(vqs[i]));
if (!vi->data_vq[i].engine) {
ret = -ENOMEM;
goto err_engine;
diff --git a/drivers/crypto/virtio/virtio_crypto_skcipher_algs.c b/drivers/crypto/virtio/virtio_crypto_skcipher_algs.c
index a618c46a52b8..e553ccadbcbc 100644
--- a/drivers/crypto/virtio/virtio_crypto_skcipher_algs.c
+++ b/drivers/crypto/virtio/virtio_crypto_skcipher_algs.c
@@ -118,11 +118,14 @@ static int virtio_crypto_alg_skcipher_init_session(
int encrypt)
{
struct scatterlist outhdr, key_sg, inhdr, *sgs[3];
- unsigned int tmp;
struct virtio_crypto *vcrypto = ctx->vcrypto;
int op = encrypt ? VIRTIO_CRYPTO_OP_ENCRYPT : VIRTIO_CRYPTO_OP_DECRYPT;
int err;
unsigned int num_out = 0, num_in = 0;
+ struct virtio_crypto_op_ctrl_req *ctrl;
+ struct virtio_crypto_session_input *input;
+ struct virtio_crypto_sym_create_session_req *sym_create_session;
+ struct virtio_crypto_ctrl_request *vc_ctrl_req;
/*
* Avoid to do DMA from the stack, switch to using
@@ -133,26 +136,29 @@ static int virtio_crypto_alg_skcipher_init_session(
if (!cipher_key)
return -ENOMEM;
- spin_lock(&vcrypto->ctrl_lock);
+ vc_ctrl_req = kzalloc(sizeof(*vc_ctrl_req), GFP_KERNEL);
+ if (!vc_ctrl_req) {
+ err = -ENOMEM;
+ goto out;
+ }
+
/* Pad ctrl header */
- vcrypto->ctrl.header.opcode =
- cpu_to_le32(VIRTIO_CRYPTO_CIPHER_CREATE_SESSION);
- vcrypto->ctrl.header.algo = cpu_to_le32(alg);
+ ctrl = &vc_ctrl_req->ctrl;
+ ctrl->header.opcode = cpu_to_le32(VIRTIO_CRYPTO_CIPHER_CREATE_SESSION);
+ ctrl->header.algo = cpu_to_le32(alg);
/* Set the default dataqueue id to 0 */
- vcrypto->ctrl.header.queue_id = 0;
+ ctrl->header.queue_id = 0;
- vcrypto->input.status = cpu_to_le32(VIRTIO_CRYPTO_ERR);
+ input = &vc_ctrl_req->input;
+ input->status = cpu_to_le32(VIRTIO_CRYPTO_ERR);
/* Pad cipher's parameters */
- vcrypto->ctrl.u.sym_create_session.op_type =
- cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);
- vcrypto->ctrl.u.sym_create_session.u.cipher.para.algo =
- vcrypto->ctrl.header.algo;
- vcrypto->ctrl.u.sym_create_session.u.cipher.para.keylen =
- cpu_to_le32(keylen);
- vcrypto->ctrl.u.sym_create_session.u.cipher.para.op =
- cpu_to_le32(op);
-
- sg_init_one(&outhdr, &vcrypto->ctrl, sizeof(vcrypto->ctrl));
+ sym_create_session = &ctrl->u.sym_create_session;
+ sym_create_session->op_type = cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);
+ sym_create_session->u.cipher.para.algo = ctrl->header.algo;
+ sym_create_session->u.cipher.para.keylen = cpu_to_le32(keylen);
+ sym_create_session->u.cipher.para.op = cpu_to_le32(op);
+
+ sg_init_one(&outhdr, ctrl, sizeof(*ctrl));
sgs[num_out++] = &outhdr;
/* Set key */
@@ -160,45 +166,30 @@ static int virtio_crypto_alg_skcipher_init_session(
sgs[num_out++] = &key_sg;
/* Return status and session id back */
- sg_init_one(&inhdr, &vcrypto->input, sizeof(vcrypto->input));
+ sg_init_one(&inhdr, input, sizeof(*input));
sgs[num_out + num_in++] = &inhdr;
- err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, num_out,
- num_in, vcrypto, GFP_ATOMIC);
- if (err < 0) {
- spin_unlock(&vcrypto->ctrl_lock);
- kfree_sensitive(cipher_key);
- return err;
- }
- virtqueue_kick(vcrypto->ctrl_vq);
+ err = virtio_crypto_ctrl_vq_request(vcrypto, sgs, num_out, num_in, vc_ctrl_req);
+ if (err < 0)
+ goto out;
- /*
- * Trapping into the hypervisor, so the request should be
- * handled immediately.
- */
- while (!virtqueue_get_buf(vcrypto->ctrl_vq, &tmp) &&
- !virtqueue_is_broken(vcrypto->ctrl_vq))
- cpu_relax();
-
- if (le32_to_cpu(vcrypto->input.status) != VIRTIO_CRYPTO_OK) {
- spin_unlock(&vcrypto->ctrl_lock);
+ if (le32_to_cpu(input->status) != VIRTIO_CRYPTO_OK) {
pr_err("virtio_crypto: Create session failed status: %u\n",
- le32_to_cpu(vcrypto->input.status));
- kfree_sensitive(cipher_key);
- return -EINVAL;
+ le32_to_cpu(input->status));
+ err = -EINVAL;
+ goto out;
}
if (encrypt)
- ctx->enc_sess_info.session_id =
- le64_to_cpu(vcrypto->input.session_id);
+ ctx->enc_sess_info.session_id = le64_to_cpu(input->session_id);
else
- ctx->dec_sess_info.session_id =
- le64_to_cpu(vcrypto->input.session_id);
-
- spin_unlock(&vcrypto->ctrl_lock);
+ ctx->dec_sess_info.session_id = le64_to_cpu(input->session_id);
+ err = 0;
+out:
+ kfree(vc_ctrl_req);
kfree_sensitive(cipher_key);
- return 0;
+ return err;
}
static int virtio_crypto_alg_skcipher_close_session(
@@ -206,60 +197,55 @@ static int virtio_crypto_alg_skcipher_close_session(
int encrypt)
{
struct scatterlist outhdr, status_sg, *sgs[2];
- unsigned int tmp;
struct virtio_crypto_destroy_session_req *destroy_session;
struct virtio_crypto *vcrypto = ctx->vcrypto;
int err;
unsigned int num_out = 0, num_in = 0;
+ struct virtio_crypto_op_ctrl_req *ctrl;
+ struct virtio_crypto_inhdr *ctrl_status;
+ struct virtio_crypto_ctrl_request *vc_ctrl_req;
- spin_lock(&vcrypto->ctrl_lock);
- vcrypto->ctrl_status.status = VIRTIO_CRYPTO_ERR;
+ vc_ctrl_req = kzalloc(sizeof(*vc_ctrl_req), GFP_KERNEL);
+ if (!vc_ctrl_req)
+ return -ENOMEM;
+
+ ctrl_status = &vc_ctrl_req->ctrl_status;
+ ctrl_status->status = VIRTIO_CRYPTO_ERR;
/* Pad ctrl header */
- vcrypto->ctrl.header.opcode =
- cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION);
+ ctrl = &vc_ctrl_req->ctrl;
+ ctrl->header.opcode = cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION);
/* Set the default virtqueue id to 0 */
- vcrypto->ctrl.header.queue_id = 0;
+ ctrl->header.queue_id = 0;
- destroy_session = &vcrypto->ctrl.u.destroy_session;
+ destroy_session = &ctrl->u.destroy_session;
if (encrypt)
- destroy_session->session_id =
- cpu_to_le64(ctx->enc_sess_info.session_id);
+ destroy_session->session_id = cpu_to_le64(ctx->enc_sess_info.session_id);
else
- destroy_session->session_id =
- cpu_to_le64(ctx->dec_sess_info.session_id);
+ destroy_session->session_id = cpu_to_le64(ctx->dec_sess_info.session_id);
- sg_init_one(&outhdr, &vcrypto->ctrl, sizeof(vcrypto->ctrl));
+ sg_init_one(&outhdr, ctrl, sizeof(*ctrl));
sgs[num_out++] = &outhdr;
/* Return status and session id back */
- sg_init_one(&status_sg, &vcrypto->ctrl_status.status,
- sizeof(vcrypto->ctrl_status.status));
+ sg_init_one(&status_sg, &ctrl_status->status, sizeof(ctrl_status->status));
sgs[num_out + num_in++] = &status_sg;
- err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, num_out,
- num_in, vcrypto, GFP_ATOMIC);
- if (err < 0) {
- spin_unlock(&vcrypto->ctrl_lock);
- return err;
- }
- virtqueue_kick(vcrypto->ctrl_vq);
-
- while (!virtqueue_get_buf(vcrypto->ctrl_vq, &tmp) &&
- !virtqueue_is_broken(vcrypto->ctrl_vq))
- cpu_relax();
+ err = virtio_crypto_ctrl_vq_request(vcrypto, sgs, num_out, num_in, vc_ctrl_req);
+ if (err < 0)
+ goto out;
- if (vcrypto->ctrl_status.status != VIRTIO_CRYPTO_OK) {
- spin_unlock(&vcrypto->ctrl_lock);
+ if (ctrl_status->status != VIRTIO_CRYPTO_OK) {
pr_err("virtio_crypto: Close session failed status: %u, session_id: 0x%llx\n",
- vcrypto->ctrl_status.status,
- destroy_session->session_id);
+ ctrl_status->status, destroy_session->session_id);
return -EINVAL;
}
- spin_unlock(&vcrypto->ctrl_lock);
- return 0;
+ err = 0;
+out:
+ kfree(vc_ctrl_req);
+ return err;
}
static int virtio_crypto_alg_skcipher_init_sessions(
diff --git a/drivers/crypto/vmx/Makefile b/drivers/crypto/vmx/Makefile
index 709670d2b553..2560cfea1dec 100644
--- a/drivers/crypto/vmx/Makefile
+++ b/drivers/crypto/vmx/Makefile
@@ -2,21 +2,10 @@
obj-$(CONFIG_CRYPTO_DEV_VMX_ENCRYPT) += vmx-crypto.o
vmx-crypto-objs := vmx.o aesp8-ppc.o ghashp8-ppc.o aes.o aes_cbc.o aes_ctr.o aes_xts.o ghash.o
-ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y)
-override flavour := linux-ppc64le
-else
-override flavour := linux-ppc64
-endif
-
-quiet_cmd_perl = PERL $@
- cmd_perl = $(PERL) $(<) $(flavour) > $(@)
+quiet_cmd_perl = PERL $@
+ cmd_perl = $(PERL) $< $(if $(CONFIG_CPU_LITTLE_ENDIAN), linux-ppc64le, linux-ppc64) > $@
targets += aesp8-ppc.S ghashp8-ppc.S
-$(obj)/aesp8-ppc.S: $(src)/aesp8-ppc.pl FORCE
- $(call if_changed,perl)
-
-$(obj)/ghashp8-ppc.S: $(src)/ghashp8-ppc.pl FORCE
+$(obj)/aesp8-ppc.S $(obj)/ghashp8-ppc.S: $(obj)/%.S: $(src)/%.pl FORCE
$(call if_changed,perl)
-
-clean-files := aesp8-ppc.S ghashp8-ppc.S
diff --git a/drivers/cxl/Kconfig b/drivers/cxl/Kconfig
index b88ab956bb7c..f64e3984689f 100644
--- a/drivers/cxl/Kconfig
+++ b/drivers/cxl/Kconfig
@@ -98,4 +98,8 @@ config CXL_PORT
default CXL_BUS
tristate
+config CXL_SUSPEND
+ def_bool y
+ depends on SUSPEND && CXL_MEM
+
endif
diff --git a/drivers/cxl/Makefile b/drivers/cxl/Makefile
index ce267ef11d93..a78270794150 100644
--- a/drivers/cxl/Makefile
+++ b/drivers/cxl/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_CXL_BUS) += core/
+obj-y += core/
obj-$(CONFIG_CXL_PCI) += cxl_pci.o
obj-$(CONFIG_CXL_MEM) += cxl_mem.o
obj-$(CONFIG_CXL_ACPI) += cxl_acpi.o
diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c
index d15a6aec0331..40286f5df812 100644
--- a/drivers/cxl/acpi.c
+++ b/drivers/cxl/acpi.c
@@ -275,6 +275,13 @@ static int add_root_nvdimm_bridge(struct device *match, void *data)
return 1;
}
+static struct lock_class_key cxl_root_key;
+
+static void cxl_acpi_lock_reset_class(void *dev)
+{
+ device_lock_reset_class(dev);
+}
+
static int cxl_acpi_probe(struct platform_device *pdev)
{
int rc;
@@ -283,6 +290,12 @@ static int cxl_acpi_probe(struct platform_device *pdev)
struct acpi_device *adev = ACPI_COMPANION(host);
struct cxl_cfmws_context ctx;
+ device_lock_set_class(&pdev->dev, &cxl_root_key);
+ rc = devm_add_action_or_reset(&pdev->dev, cxl_acpi_lock_reset_class,
+ &pdev->dev);
+ if (rc)
+ return rc;
+
root_port = devm_cxl_add_port(host, host, CXL_RESOURCE_NONE, NULL);
if (IS_ERR(root_port))
return PTR_ERR(root_port);
diff --git a/drivers/cxl/core/Makefile b/drivers/cxl/core/Makefile
index 6d37cd78b151..9d35085d25af 100644
--- a/drivers/cxl/core/Makefile
+++ b/drivers/cxl/core/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_CXL_BUS) += cxl_core.o
+obj-$(CONFIG_CXL_SUSPEND) += suspend.o
ccflags-y += -I$(srctree)/drivers/cxl
cxl_core-y := port.o
diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c
index be61a0d8016b..54f434733b56 100644
--- a/drivers/cxl/core/mbox.c
+++ b/drivers/cxl/core/mbox.c
@@ -35,6 +35,7 @@ static bool cxl_raw_allow_all;
.flags = _flags, \
}
+#define CXL_VARIABLE_PAYLOAD ~0U
/*
* This table defines the supported mailbox commands for the driver. This table
* is made up of a UAPI structure. Non-negative values as parameters in the
@@ -44,26 +45,26 @@ static bool cxl_raw_allow_all;
static struct cxl_mem_command cxl_mem_commands[CXL_MEM_COMMAND_ID_MAX] = {
CXL_CMD(IDENTIFY, 0, 0x43, CXL_CMD_FLAG_FORCE_ENABLE),
#ifdef CONFIG_CXL_MEM_RAW_COMMANDS
- CXL_CMD(RAW, ~0, ~0, 0),
+ CXL_CMD(RAW, CXL_VARIABLE_PAYLOAD, CXL_VARIABLE_PAYLOAD, 0),
#endif
- CXL_CMD(GET_SUPPORTED_LOGS, 0, ~0, CXL_CMD_FLAG_FORCE_ENABLE),
+ CXL_CMD(GET_SUPPORTED_LOGS, 0, CXL_VARIABLE_PAYLOAD, CXL_CMD_FLAG_FORCE_ENABLE),
CXL_CMD(GET_FW_INFO, 0, 0x50, 0),
CXL_CMD(GET_PARTITION_INFO, 0, 0x20, 0),
- CXL_CMD(GET_LSA, 0x8, ~0, 0),
+ CXL_CMD(GET_LSA, 0x8, CXL_VARIABLE_PAYLOAD, 0),
CXL_CMD(GET_HEALTH_INFO, 0, 0x12, 0),
- CXL_CMD(GET_LOG, 0x18, ~0, CXL_CMD_FLAG_FORCE_ENABLE),
+ CXL_CMD(GET_LOG, 0x18, CXL_VARIABLE_PAYLOAD, CXL_CMD_FLAG_FORCE_ENABLE),
CXL_CMD(SET_PARTITION_INFO, 0x0a, 0, 0),
- CXL_CMD(SET_LSA, ~0, 0, 0),
+ CXL_CMD(SET_LSA, CXL_VARIABLE_PAYLOAD, 0, 0),
CXL_CMD(GET_ALERT_CONFIG, 0, 0x10, 0),
CXL_CMD(SET_ALERT_CONFIG, 0xc, 0, 0),
CXL_CMD(GET_SHUTDOWN_STATE, 0, 0x1, 0),
CXL_CMD(SET_SHUTDOWN_STATE, 0x1, 0, 0),
- CXL_CMD(GET_POISON, 0x10, ~0, 0),
+ CXL_CMD(GET_POISON, 0x10, CXL_VARIABLE_PAYLOAD, 0),
CXL_CMD(INJECT_POISON, 0x8, 0, 0),
CXL_CMD(CLEAR_POISON, 0x48, 0, 0),
CXL_CMD(GET_SCAN_MEDIA_CAPS, 0x10, 0x4, 0),
CXL_CMD(SCAN_MEDIA, 0x11, 0, 0),
- CXL_CMD(GET_SCAN_MEDIA, 0, ~0, 0),
+ CXL_CMD(GET_SCAN_MEDIA, 0, CXL_VARIABLE_PAYLOAD, 0),
};
/*
@@ -127,6 +128,17 @@ static struct cxl_mem_command *cxl_mem_find_command(u16 opcode)
return NULL;
}
+static const char *cxl_mem_opcode_to_name(u16 opcode)
+{
+ struct cxl_mem_command *c;
+
+ c = cxl_mem_find_command(opcode);
+ if (!c)
+ return NULL;
+
+ return cxl_command_names[c->info.id].name;
+}
+
/**
* cxl_mbox_send_cmd() - Send a mailbox command to a device.
* @cxlds: The device data for the operation
@@ -136,7 +148,7 @@ static struct cxl_mem_command *cxl_mem_find_command(u16 opcode)
* @out: Caller allocated buffer for the output.
* @out_size: Expected size of output.
*
- * Context: Any context. Will acquire and release mbox_mutex.
+ * Context: Any context.
* Return:
* * %>=0 - Number of bytes returned in @out.
* * %-E2BIG - Payload is too large for hardware.
@@ -169,17 +181,17 @@ int cxl_mbox_send_cmd(struct cxl_dev_state *cxlds, u16 opcode, void *in,
if (rc)
return rc;
- /* TODO: Map return code to proper kernel style errno */
- if (mbox_cmd.return_code != CXL_MBOX_SUCCESS)
- return -ENXIO;
+ if (mbox_cmd.return_code != CXL_MBOX_CMD_RC_SUCCESS)
+ return cxl_mbox_cmd_rc2errno(&mbox_cmd);
/*
* Variable sized commands can't be validated and so it's up to the
* caller to do that if they wish.
*/
- if (cmd->info.size_out >= 0 && mbox_cmd.size_out != out_size)
- return -EIO;
-
+ if (cmd->info.size_out != CXL_VARIABLE_PAYLOAD) {
+ if (mbox_cmd.size_out != out_size)
+ return -EIO;
+ }
return 0;
}
EXPORT_SYMBOL_NS_GPL(cxl_mbox_send_cmd, CXL);
@@ -208,75 +220,122 @@ static bool cxl_mem_raw_command_allowed(u16 opcode)
}
/**
- * cxl_validate_cmd_from_user() - Check fields for CXL_MEM_SEND_COMMAND.
- * @cxlds: The device data for the operation
- * @send_cmd: &struct cxl_send_command copied in from userspace.
- * @out_cmd: Sanitized and populated &struct cxl_mem_command.
+ * cxl_payload_from_user_allowed() - Check contents of in_payload.
+ * @opcode: The mailbox command opcode.
+ * @payload_in: Pointer to the input payload passed in from user space.
*
* Return:
- * * %0 - @out_cmd is ready to send.
- * * %-ENOTTY - Invalid command specified.
- * * %-EINVAL - Reserved fields or invalid values were used.
- * * %-ENOMEM - Input or output buffer wasn't sized properly.
- * * %-EPERM - Attempted to use a protected command.
- * * %-EBUSY - Kernel has claimed exclusive access to this opcode
+ * * true - payload_in passes check for @opcode.
+ * * false - payload_in contains invalid or unsupported values.
*
- * The result of this command is a fully validated command in @out_cmd that is
- * safe to send to the hardware.
+ * The driver may inspect payload contents before sending a mailbox
+ * command from user space to the device. The intent is to reject
+ * commands with input payloads that are known to be unsafe. This
+ * check is not intended to replace the users careful selection of
+ * mailbox command parameters and makes no guarantee that the user
+ * command will succeed, nor that it is appropriate.
*
- * See handle_mailbox_cmd_from_user()
+ * The specific checks are determined by the opcode.
*/
-static int cxl_validate_cmd_from_user(struct cxl_dev_state *cxlds,
- const struct cxl_send_command *send_cmd,
- struct cxl_mem_command *out_cmd)
+static bool cxl_payload_from_user_allowed(u16 opcode, void *payload_in)
{
- const struct cxl_command_info *info;
- struct cxl_mem_command *c;
+ switch (opcode) {
+ case CXL_MBOX_OP_SET_PARTITION_INFO: {
+ struct cxl_mbox_set_partition_info *pi = payload_in;
- if (send_cmd->id == 0 || send_cmd->id >= CXL_MEM_COMMAND_ID_MAX)
- return -ENOTTY;
+ if (pi->flags & CXL_SET_PARTITION_IMMEDIATE_FLAG)
+ return false;
+ break;
+ }
+ default:
+ break;
+ }
+ return true;
+}
- /*
- * The user can never specify an input payload larger than what hardware
- * supports, but output can be arbitrarily large (simply write out as
- * much data as the hardware provides).
- */
- if (send_cmd->in.size > cxlds->payload_size)
+static int cxl_mbox_cmd_ctor(struct cxl_mbox_cmd *mbox,
+ struct cxl_dev_state *cxlds, u16 opcode,
+ size_t in_size, size_t out_size, u64 in_payload)
+{
+ *mbox = (struct cxl_mbox_cmd) {
+ .opcode = opcode,
+ .size_in = in_size,
+ };
+
+ if (in_size) {
+ mbox->payload_in = vmemdup_user(u64_to_user_ptr(in_payload),
+ in_size);
+ if (IS_ERR(mbox->payload_in))
+ return PTR_ERR(mbox->payload_in);
+
+ if (!cxl_payload_from_user_allowed(opcode, mbox->payload_in)) {
+ dev_dbg(cxlds->dev, "%s: input payload not allowed\n",
+ cxl_mem_opcode_to_name(opcode));
+ kvfree(mbox->payload_in);
+ return -EBUSY;
+ }
+ }
+
+ /* Prepare to handle a full payload for variable sized output */
+ if (out_size == CXL_VARIABLE_PAYLOAD)
+ mbox->size_out = cxlds->payload_size;
+ else
+ mbox->size_out = out_size;
+
+ if (mbox->size_out) {
+ mbox->payload_out = kvzalloc(mbox->size_out, GFP_KERNEL);
+ if (!mbox->payload_out) {
+ kvfree(mbox->payload_in);
+ return -ENOMEM;
+ }
+ }
+ return 0;
+}
+
+static void cxl_mbox_cmd_dtor(struct cxl_mbox_cmd *mbox)
+{
+ kvfree(mbox->payload_in);
+ kvfree(mbox->payload_out);
+}
+
+static int cxl_to_mem_cmd_raw(struct cxl_mem_command *mem_cmd,
+ const struct cxl_send_command *send_cmd,
+ struct cxl_dev_state *cxlds)
+{
+ if (send_cmd->raw.rsvd)
return -EINVAL;
/*
- * Checks are bypassed for raw commands but a WARN/taint will occur
- * later in the callchain
+ * Unlike supported commands, the output size of RAW commands
+ * gets passed along without further checking, so it must be
+ * validated here.
*/
- if (send_cmd->id == CXL_MEM_COMMAND_ID_RAW) {
- const struct cxl_mem_command temp = {
- .info = {
- .id = CXL_MEM_COMMAND_ID_RAW,
- .flags = 0,
- .size_in = send_cmd->in.size,
- .size_out = send_cmd->out.size,
- },
- .opcode = send_cmd->raw.opcode
- };
+ if (send_cmd->out.size > cxlds->payload_size)
+ return -EINVAL;
- if (send_cmd->raw.rsvd)
- return -EINVAL;
+ if (!cxl_mem_raw_command_allowed(send_cmd->raw.opcode))
+ return -EPERM;
- /*
- * Unlike supported commands, the output size of RAW commands
- * gets passed along without further checking, so it must be
- * validated here.
- */
- if (send_cmd->out.size > cxlds->payload_size)
- return -EINVAL;
+ dev_WARN_ONCE(cxlds->dev, true, "raw command path used\n");
- if (!cxl_mem_raw_command_allowed(send_cmd->raw.opcode))
- return -EPERM;
+ *mem_cmd = (struct cxl_mem_command) {
+ .info = {
+ .id = CXL_MEM_COMMAND_ID_RAW,
+ .size_in = send_cmd->in.size,
+ .size_out = send_cmd->out.size,
+ },
+ .opcode = send_cmd->raw.opcode
+ };
- memcpy(out_cmd, &temp, sizeof(temp));
+ return 0;
+}
- return 0;
- }
+static int cxl_to_mem_cmd(struct cxl_mem_command *mem_cmd,
+ const struct cxl_send_command *send_cmd,
+ struct cxl_dev_state *cxlds)
+{
+ struct cxl_mem_command *c = &cxl_mem_commands[send_cmd->id];
+ const struct cxl_command_info *info = &c->info;
if (send_cmd->flags & ~CXL_MEM_COMMAND_FLAG_MASK)
return -EINVAL;
@@ -287,10 +346,6 @@ static int cxl_validate_cmd_from_user(struct cxl_dev_state *cxlds,
if (send_cmd->in.rsvd || send_cmd->out.rsvd)
return -EINVAL;
- /* Convert user's command into the internal representation */
- c = &cxl_mem_commands[send_cmd->id];
- info = &c->info;
-
/* Check that the command is enabled for hardware */
if (!test_bit(info->id, cxlds->enabled_cmds))
return -ENOTTY;
@@ -300,22 +355,74 @@ static int cxl_validate_cmd_from_user(struct cxl_dev_state *cxlds,
return -EBUSY;
/* Check the input buffer is the expected size */
- if (info->size_in >= 0 && info->size_in != send_cmd->in.size)
+ if (info->size_in != send_cmd->in.size)
return -ENOMEM;
/* Check the output buffer is at least large enough */
- if (info->size_out >= 0 && send_cmd->out.size < info->size_out)
+ if (send_cmd->out.size < info->size_out)
return -ENOMEM;
- memcpy(out_cmd, c, sizeof(*c));
- out_cmd->info.size_in = send_cmd->in.size;
+ *mem_cmd = (struct cxl_mem_command) {
+ .info = {
+ .id = info->id,
+ .flags = info->flags,
+ .size_in = send_cmd->in.size,
+ .size_out = send_cmd->out.size,
+ },
+ .opcode = c->opcode
+ };
+
+ return 0;
+}
+
+/**
+ * cxl_validate_cmd_from_user() - Check fields for CXL_MEM_SEND_COMMAND.
+ * @mbox_cmd: Sanitized and populated &struct cxl_mbox_cmd.
+ * @cxlds: The device data for the operation
+ * @send_cmd: &struct cxl_send_command copied in from userspace.
+ *
+ * Return:
+ * * %0 - @out_cmd is ready to send.
+ * * %-ENOTTY - Invalid command specified.
+ * * %-EINVAL - Reserved fields or invalid values were used.
+ * * %-ENOMEM - Input or output buffer wasn't sized properly.
+ * * %-EPERM - Attempted to use a protected command.
+ * * %-EBUSY - Kernel has claimed exclusive access to this opcode
+ *
+ * The result of this command is a fully validated command in @mbox_cmd that is
+ * safe to send to the hardware.
+ */
+static int cxl_validate_cmd_from_user(struct cxl_mbox_cmd *mbox_cmd,
+ struct cxl_dev_state *cxlds,
+ const struct cxl_send_command *send_cmd)
+{
+ struct cxl_mem_command mem_cmd;
+ int rc;
+
+ if (send_cmd->id == 0 || send_cmd->id >= CXL_MEM_COMMAND_ID_MAX)
+ return -ENOTTY;
+
/*
- * XXX: out_cmd->info.size_out will be controlled by the driver, and the
- * specified number of bytes @send_cmd->out.size will be copied back out
- * to userspace.
+ * The user can never specify an input payload larger than what hardware
+ * supports, but output can be arbitrarily large (simply write out as
+ * much data as the hardware provides).
*/
+ if (send_cmd->in.size > cxlds->payload_size)
+ return -EINVAL;
- return 0;
+ /* Sanitize and construct a cxl_mem_command */
+ if (send_cmd->id == CXL_MEM_COMMAND_ID_RAW)
+ rc = cxl_to_mem_cmd_raw(&mem_cmd, send_cmd, cxlds);
+ else
+ rc = cxl_to_mem_cmd(&mem_cmd, send_cmd, cxlds);
+
+ if (rc)
+ return rc;
+
+ /* Sanitize and construct a cxl_mbox_cmd */
+ return cxl_mbox_cmd_ctor(mbox_cmd, cxlds, mem_cmd.opcode,
+ mem_cmd.info.size_in, mem_cmd.info.size_out,
+ send_cmd->in.payload);
}
int cxl_query_cmd(struct cxl_memdev *cxlmd,
@@ -355,8 +462,7 @@ int cxl_query_cmd(struct cxl_memdev *cxlmd,
/**
* handle_mailbox_cmd_from_user() - Dispatch a mailbox command for userspace.
* @cxlds: The device data for the operation
- * @cmd: The validated command.
- * @in_payload: Pointer to userspace's input payload.
+ * @mbox_cmd: The validated mailbox command.
* @out_payload: Pointer to userspace's output payload.
* @size_out: (Input) Max payload size to copy out.
* (Output) Payload size hardware generated.
@@ -371,51 +477,27 @@ int cxl_query_cmd(struct cxl_memdev *cxlmd,
* * %-EINTR - Mailbox acquisition interrupted.
* * %-EXXX - Transaction level failures.
*
- * Creates the appropriate mailbox command and dispatches it on behalf of a
- * userspace request. The input and output payloads are copied between
- * userspace.
+ * Dispatches a mailbox command on behalf of a userspace request.
+ * The output payload is copied to userspace.
*
* See cxl_send_cmd().
*/
static int handle_mailbox_cmd_from_user(struct cxl_dev_state *cxlds,
- const struct cxl_mem_command *cmd,
- u64 in_payload, u64 out_payload,
- s32 *size_out, u32 *retval)
+ struct cxl_mbox_cmd *mbox_cmd,
+ u64 out_payload, s32 *size_out,
+ u32 *retval)
{
struct device *dev = cxlds->dev;
- struct cxl_mbox_cmd mbox_cmd = {
- .opcode = cmd->opcode,
- .size_in = cmd->info.size_in,
- .size_out = cmd->info.size_out,
- };
int rc;
- if (cmd->info.size_out) {
- mbox_cmd.payload_out = kvzalloc(cmd->info.size_out, GFP_KERNEL);
- if (!mbox_cmd.payload_out)
- return -ENOMEM;
- }
-
- if (cmd->info.size_in) {
- mbox_cmd.payload_in = vmemdup_user(u64_to_user_ptr(in_payload),
- cmd->info.size_in);
- if (IS_ERR(mbox_cmd.payload_in)) {
- kvfree(mbox_cmd.payload_out);
- return PTR_ERR(mbox_cmd.payload_in);
- }
- }
-
dev_dbg(dev,
"Submitting %s command for user\n"
"\topcode: %x\n"
- "\tsize: %ub\n",
- cxl_command_names[cmd->info.id].name, mbox_cmd.opcode,
- cmd->info.size_in);
-
- dev_WARN_ONCE(dev, cmd->info.id == CXL_MEM_COMMAND_ID_RAW,
- "raw command path used\n");
+ "\tsize: %zx\n",
+ cxl_mem_opcode_to_name(mbox_cmd->opcode),
+ mbox_cmd->opcode, mbox_cmd->size_in);
- rc = cxlds->mbox_send(cxlds, &mbox_cmd);
+ rc = cxlds->mbox_send(cxlds, mbox_cmd);
if (rc)
goto out;
@@ -424,22 +506,21 @@ static int handle_mailbox_cmd_from_user(struct cxl_dev_state *cxlds,
* to userspace. While the payload may have written more output than
* this it will have to be ignored.
*/
- if (mbox_cmd.size_out) {
- dev_WARN_ONCE(dev, mbox_cmd.size_out > *size_out,
+ if (mbox_cmd->size_out) {
+ dev_WARN_ONCE(dev, mbox_cmd->size_out > *size_out,
"Invalid return size\n");
if (copy_to_user(u64_to_user_ptr(out_payload),
- mbox_cmd.payload_out, mbox_cmd.size_out)) {
+ mbox_cmd->payload_out, mbox_cmd->size_out)) {
rc = -EFAULT;
goto out;
}
}
- *size_out = mbox_cmd.size_out;
- *retval = mbox_cmd.return_code;
+ *size_out = mbox_cmd->size_out;
+ *retval = mbox_cmd->return_code;
out:
- kvfree(mbox_cmd.payload_in);
- kvfree(mbox_cmd.payload_out);
+ cxl_mbox_cmd_dtor(mbox_cmd);
return rc;
}
@@ -448,7 +529,7 @@ int cxl_send_cmd(struct cxl_memdev *cxlmd, struct cxl_send_command __user *s)
struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct device *dev = &cxlmd->dev;
struct cxl_send_command send;
- struct cxl_mem_command c;
+ struct cxl_mbox_cmd mbox_cmd;
int rc;
dev_dbg(dev, "Send IOCTL\n");
@@ -456,17 +537,12 @@ int cxl_send_cmd(struct cxl_memdev *cxlmd, struct cxl_send_command __user *s)
if (copy_from_user(&send, s, sizeof(send)))
return -EFAULT;
- rc = cxl_validate_cmd_from_user(cxlmd->cxlds, &send, &c);
+ rc = cxl_validate_cmd_from_user(&mbox_cmd, cxlmd->cxlds, &send);
if (rc)
return rc;
- /* Prepare to handle a full payload for variable sized output */
- if (c.info.size_out < 0)
- c.info.size_out = cxlds->payload_size;
-
- rc = handle_mailbox_cmd_from_user(cxlds, &c, send.in.payload,
- send.out.payload, &send.out.size,
- &send.retval);
+ rc = handle_mailbox_cmd_from_user(cxlds, &mbox_cmd, send.out.payload,
+ &send.out.size, &send.retval);
if (rc)
return rc;
diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c
index 1f76b28f9826..f7cdcd33504a 100644
--- a/drivers/cxl/core/memdev.c
+++ b/drivers/cxl/core/memdev.c
@@ -228,6 +228,8 @@ static void detach_memdev(struct work_struct *work)
put_device(&cxlmd->dev);
}
+static struct lock_class_key cxl_memdev_key;
+
static struct cxl_memdev *cxl_memdev_alloc(struct cxl_dev_state *cxlds,
const struct file_operations *fops)
{
@@ -247,6 +249,7 @@ static struct cxl_memdev *cxl_memdev_alloc(struct cxl_dev_state *cxlds,
dev = &cxlmd->dev;
device_initialize(dev);
+ lockdep_set_class(&dev->mutex, &cxl_memdev_key);
dev->parent = cxlds->dev;
dev->bus = &cxl_bus_type;
dev->devt = MKDEV(cxl_mem_major, cxlmd->id);
diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c
index c9a494d6976a..c4c99ff7b55e 100644
--- a/drivers/cxl/core/pci.c
+++ b/drivers/cxl/core/pci.c
@@ -1,8 +1,11 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2021 Intel Corporation. All rights reserved. */
+#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/device.h>
+#include <linux/delay.h>
#include <linux/pci.h>
#include <cxlpci.h>
+#include <cxlmem.h>
#include <cxl.h>
#include "core.h"
@@ -13,6 +16,10 @@
* a set of helpers for CXL interactions which occur via PCIe.
*/
+static unsigned short media_ready_timeout = 60;
+module_param(media_ready_timeout, ushort, 0644);
+MODULE_PARM_DESC(media_ready_timeout, "seconds to wait for media ready");
+
struct cxl_walk_context {
struct pci_bus *bus;
struct cxl_port *port;
@@ -94,3 +101,360 @@ int devm_cxl_port_enumerate_dports(struct cxl_port *port)
return ctx.count;
}
EXPORT_SYMBOL_NS_GPL(devm_cxl_port_enumerate_dports, CXL);
+
+/*
+ * Wait up to @media_ready_timeout for the device to report memory
+ * active.
+ */
+int cxl_await_media_ready(struct cxl_dev_state *cxlds)
+{
+ struct pci_dev *pdev = to_pci_dev(cxlds->dev);
+ int d = cxlds->cxl_dvsec;
+ bool active = false;
+ u64 md_status;
+ int rc, i;
+
+ for (i = media_ready_timeout; i; i--) {
+ u32 temp;
+
+ rc = pci_read_config_dword(
+ pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(0), &temp);
+ if (rc)
+ return rc;
+
+ active = FIELD_GET(CXL_DVSEC_MEM_ACTIVE, temp);
+ if (active)
+ break;
+ msleep(1000);
+ }
+
+ if (!active) {
+ dev_err(&pdev->dev,
+ "timeout awaiting memory active after %d seconds\n",
+ media_ready_timeout);
+ return -ETIMEDOUT;
+ }
+
+ md_status = readq(cxlds->regs.memdev + CXLMDEV_STATUS_OFFSET);
+ if (!CXLMDEV_READY(md_status))
+ return -EIO;
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cxl_await_media_ready, CXL);
+
+static int wait_for_valid(struct cxl_dev_state *cxlds)
+{
+ struct pci_dev *pdev = to_pci_dev(cxlds->dev);
+ int d = cxlds->cxl_dvsec, rc;
+ u32 val;
+
+ /*
+ * Memory_Info_Valid: When set, indicates that the CXL Range 1 Size high
+ * and Size Low registers are valid. Must be set within 1 second of
+ * deassertion of reset to CXL device. Likely it is already set by the
+ * time this runs, but otherwise give a 1.5 second timeout in case of
+ * clock skew.
+ */
+ rc = pci_read_config_dword(pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(0), &val);
+ if (rc)
+ return rc;
+
+ if (val & CXL_DVSEC_MEM_INFO_VALID)
+ return 0;
+
+ msleep(1500);
+
+ rc = pci_read_config_dword(pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(0), &val);
+ if (rc)
+ return rc;
+
+ if (val & CXL_DVSEC_MEM_INFO_VALID)
+ return 0;
+
+ return -ETIMEDOUT;
+}
+
+static int cxl_set_mem_enable(struct cxl_dev_state *cxlds, u16 val)
+{
+ struct pci_dev *pdev = to_pci_dev(cxlds->dev);
+ int d = cxlds->cxl_dvsec;
+ u16 ctrl;
+ int rc;
+
+ rc = pci_read_config_word(pdev, d + CXL_DVSEC_CTRL_OFFSET, &ctrl);
+ if (rc < 0)
+ return rc;
+
+ if ((ctrl & CXL_DVSEC_MEM_ENABLE) == val)
+ return 1;
+ ctrl &= ~CXL_DVSEC_MEM_ENABLE;
+ ctrl |= val;
+
+ rc = pci_write_config_word(pdev, d + CXL_DVSEC_CTRL_OFFSET, ctrl);
+ if (rc < 0)
+ return rc;
+
+ return 0;
+}
+
+static void clear_mem_enable(void *cxlds)
+{
+ cxl_set_mem_enable(cxlds, 0);
+}
+
+static int devm_cxl_enable_mem(struct device *host, struct cxl_dev_state *cxlds)
+{
+ int rc;
+
+ rc = cxl_set_mem_enable(cxlds, CXL_DVSEC_MEM_ENABLE);
+ if (rc < 0)
+ return rc;
+ if (rc > 0)
+ return 0;
+ return devm_add_action_or_reset(host, clear_mem_enable, cxlds);
+}
+
+static bool range_contains(struct range *r1, struct range *r2)
+{
+ return r1->start <= r2->start && r1->end >= r2->end;
+}
+
+/* require dvsec ranges to be covered by a locked platform window */
+static int dvsec_range_allowed(struct device *dev, void *arg)
+{
+ struct range *dev_range = arg;
+ struct cxl_decoder *cxld;
+ struct range root_range;
+
+ if (!is_root_decoder(dev))
+ return 0;
+
+ cxld = to_cxl_decoder(dev);
+
+ if (!(cxld->flags & CXL_DECODER_F_LOCK))
+ return 0;
+ if (!(cxld->flags & CXL_DECODER_F_RAM))
+ return 0;
+
+ root_range = (struct range) {
+ .start = cxld->platform_res.start,
+ .end = cxld->platform_res.end,
+ };
+
+ return range_contains(&root_range, dev_range);
+}
+
+static void disable_hdm(void *_cxlhdm)
+{
+ u32 global_ctrl;
+ struct cxl_hdm *cxlhdm = _cxlhdm;
+ void __iomem *hdm = cxlhdm->regs.hdm_decoder;
+
+ global_ctrl = readl(hdm + CXL_HDM_DECODER_CTRL_OFFSET);
+ writel(global_ctrl & ~CXL_HDM_DECODER_ENABLE,
+ hdm + CXL_HDM_DECODER_CTRL_OFFSET);
+}
+
+static int devm_cxl_enable_hdm(struct device *host, struct cxl_hdm *cxlhdm)
+{
+ void __iomem *hdm = cxlhdm->regs.hdm_decoder;
+ u32 global_ctrl;
+
+ global_ctrl = readl(hdm + CXL_HDM_DECODER_CTRL_OFFSET);
+ writel(global_ctrl | CXL_HDM_DECODER_ENABLE,
+ hdm + CXL_HDM_DECODER_CTRL_OFFSET);
+
+ return devm_add_action_or_reset(host, disable_hdm, cxlhdm);
+}
+
+static bool __cxl_hdm_decode_init(struct cxl_dev_state *cxlds,
+ struct cxl_hdm *cxlhdm,
+ struct cxl_endpoint_dvsec_info *info)
+{
+ void __iomem *hdm = cxlhdm->regs.hdm_decoder;
+ struct cxl_port *port = cxlhdm->port;
+ struct device *dev = cxlds->dev;
+ struct cxl_port *root;
+ int i, rc, allowed;
+ u32 global_ctrl;
+
+ global_ctrl = readl(hdm + CXL_HDM_DECODER_CTRL_OFFSET);
+
+ /*
+ * If the HDM Decoder Capability is already enabled then assume
+ * that some other agent like platform firmware set it up.
+ */
+ if (global_ctrl & CXL_HDM_DECODER_ENABLE) {
+ rc = devm_cxl_enable_mem(&port->dev, cxlds);
+ if (rc)
+ return false;
+ return true;
+ }
+
+ root = to_cxl_port(port->dev.parent);
+ while (!is_cxl_root(root) && is_cxl_port(root->dev.parent))
+ root = to_cxl_port(root->dev.parent);
+ if (!is_cxl_root(root)) {
+ dev_err(dev, "Failed to acquire root port for HDM enable\n");
+ return false;
+ }
+
+ for (i = 0, allowed = 0; info->mem_enabled && i < info->ranges; i++) {
+ struct device *cxld_dev;
+
+ cxld_dev = device_find_child(&root->dev, &info->dvsec_range[i],
+ dvsec_range_allowed);
+ if (!cxld_dev) {
+ dev_dbg(dev, "DVSEC Range%d denied by platform\n", i);
+ continue;
+ }
+ dev_dbg(dev, "DVSEC Range%d allowed by platform\n", i);
+ put_device(cxld_dev);
+ allowed++;
+ }
+
+ if (!allowed) {
+ cxl_set_mem_enable(cxlds, 0);
+ info->mem_enabled = 0;
+ }
+
+ /*
+ * Per CXL 2.0 Section 8.1.3.8.3 and 8.1.3.8.4 DVSEC CXL Range 1 Base
+ * [High,Low] when HDM operation is enabled the range register values
+ * are ignored by the device, but the spec also recommends matching the
+ * DVSEC Range 1,2 to HDM Decoder Range 0,1. So, non-zero info->ranges
+ * are expected even though Linux does not require or maintain that
+ * match. If at least one DVSEC range is enabled and allowed, skip HDM
+ * Decoder Capability Enable.
+ */
+ if (info->mem_enabled)
+ return false;
+
+ rc = devm_cxl_enable_hdm(&port->dev, cxlhdm);
+ if (rc)
+ return false;
+
+ rc = devm_cxl_enable_mem(&port->dev, cxlds);
+ if (rc)
+ return false;
+
+ return true;
+}
+
+/**
+ * cxl_hdm_decode_init() - Setup HDM decoding for the endpoint
+ * @cxlds: Device state
+ * @cxlhdm: Mapped HDM decoder Capability
+ *
+ * Try to enable the endpoint's HDM Decoder Capability
+ */
+int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm)
+{
+ struct pci_dev *pdev = to_pci_dev(cxlds->dev);
+ struct cxl_endpoint_dvsec_info info = { 0 };
+ int hdm_count, rc, i, ranges = 0;
+ struct device *dev = &pdev->dev;
+ int d = cxlds->cxl_dvsec;
+ u16 cap, ctrl;
+
+ if (!d) {
+ dev_dbg(dev, "No DVSEC Capability\n");
+ return -ENXIO;
+ }
+
+ rc = pci_read_config_word(pdev, d + CXL_DVSEC_CAP_OFFSET, &cap);
+ if (rc)
+ return rc;
+
+ rc = pci_read_config_word(pdev, d + CXL_DVSEC_CTRL_OFFSET, &ctrl);
+ if (rc)
+ return rc;
+
+ if (!(cap & CXL_DVSEC_MEM_CAPABLE)) {
+ dev_dbg(dev, "Not MEM Capable\n");
+ return -ENXIO;
+ }
+
+ /*
+ * It is not allowed by spec for MEM.capable to be set and have 0 legacy
+ * HDM decoders (values > 2 are also undefined as of CXL 2.0). As this
+ * driver is for a spec defined class code which must be CXL.mem
+ * capable, there is no point in continuing to enable CXL.mem.
+ */
+ hdm_count = FIELD_GET(CXL_DVSEC_HDM_COUNT_MASK, cap);
+ if (!hdm_count || hdm_count > 2)
+ return -EINVAL;
+
+ rc = wait_for_valid(cxlds);
+ if (rc) {
+ dev_dbg(dev, "Failure awaiting MEM_INFO_VALID (%d)\n", rc);
+ return rc;
+ }
+
+ /*
+ * The current DVSEC values are moot if the memory capability is
+ * disabled, and they will remain moot after the HDM Decoder
+ * capability is enabled.
+ */
+ info.mem_enabled = FIELD_GET(CXL_DVSEC_MEM_ENABLE, ctrl);
+ if (!info.mem_enabled)
+ goto hdm_init;
+
+ for (i = 0; i < hdm_count; i++) {
+ u64 base, size;
+ u32 temp;
+
+ rc = pci_read_config_dword(
+ pdev, d + CXL_DVSEC_RANGE_SIZE_HIGH(i), &temp);
+ if (rc)
+ return rc;
+
+ size = (u64)temp << 32;
+
+ rc = pci_read_config_dword(
+ pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(i), &temp);
+ if (rc)
+ return rc;
+
+ size |= temp & CXL_DVSEC_MEM_SIZE_LOW_MASK;
+
+ rc = pci_read_config_dword(
+ pdev, d + CXL_DVSEC_RANGE_BASE_HIGH(i), &temp);
+ if (rc)
+ return rc;
+
+ base = (u64)temp << 32;
+
+ rc = pci_read_config_dword(
+ pdev, d + CXL_DVSEC_RANGE_BASE_LOW(i), &temp);
+ if (rc)
+ return rc;
+
+ base |= temp & CXL_DVSEC_MEM_BASE_LOW_MASK;
+
+ info.dvsec_range[i] = (struct range) {
+ .start = base,
+ .end = base + size - 1
+ };
+
+ if (size)
+ ranges++;
+ }
+
+ info.ranges = ranges;
+
+ /*
+ * If DVSEC ranges are being used instead of HDM decoder registers there
+ * is no use in trying to manage those.
+ */
+hdm_init:
+ if (!__cxl_hdm_decode_init(cxlds, cxlhdm, &info)) {
+ dev_err(dev,
+ "Legacy range registers configuration prevents HDM operation.\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cxl_hdm_decode_init, CXL);
diff --git a/drivers/cxl/core/pmem.c b/drivers/cxl/core/pmem.c
index 8de240c4d96b..bec7cfb54ebf 100644
--- a/drivers/cxl/core/pmem.c
+++ b/drivers/cxl/core/pmem.c
@@ -80,6 +80,8 @@ struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_nvdimm *cxl_nvd)
}
EXPORT_SYMBOL_NS_GPL(cxl_find_nvdimm_bridge, CXL);
+static struct lock_class_key cxl_nvdimm_bridge_key;
+
static struct cxl_nvdimm_bridge *cxl_nvdimm_bridge_alloc(struct cxl_port *port)
{
struct cxl_nvdimm_bridge *cxl_nvb;
@@ -99,6 +101,7 @@ static struct cxl_nvdimm_bridge *cxl_nvdimm_bridge_alloc(struct cxl_port *port)
cxl_nvb->port = port;
cxl_nvb->state = CXL_NVB_NEW;
device_initialize(dev);
+ lockdep_set_class(&dev->mutex, &cxl_nvdimm_bridge_key);
device_set_pm_not_required(dev);
dev->parent = &port->dev;
dev->bus = &cxl_bus_type;
@@ -121,10 +124,10 @@ static void unregister_nvb(void *_cxl_nvb)
* work to flush. Once the state has been changed to 'dead' then no new
* work can be queued by user-triggered bind.
*/
- cxl_device_lock(&cxl_nvb->dev);
+ device_lock(&cxl_nvb->dev);
flush = cxl_nvb->state != CXL_NVB_NEW;
cxl_nvb->state = CXL_NVB_DEAD;
- cxl_device_unlock(&cxl_nvb->dev);
+ device_unlock(&cxl_nvb->dev);
/*
* Even though the device core will trigger device_release_driver()
@@ -214,6 +217,8 @@ struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev)
}
EXPORT_SYMBOL_NS_GPL(to_cxl_nvdimm, CXL);
+static struct lock_class_key cxl_nvdimm_key;
+
static struct cxl_nvdimm *cxl_nvdimm_alloc(struct cxl_memdev *cxlmd)
{
struct cxl_nvdimm *cxl_nvd;
@@ -226,6 +231,7 @@ static struct cxl_nvdimm *cxl_nvdimm_alloc(struct cxl_memdev *cxlmd)
dev = &cxl_nvd->dev;
cxl_nvd->cxlmd = cxlmd;
device_initialize(dev);
+ lockdep_set_class(&dev->mutex, &cxl_nvdimm_key);
device_set_pm_not_required(dev);
dev->parent = &cxlmd->dev;
dev->bus = &cxl_bus_type;
diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c
index 2ab1ba4499b3..ea60abda6500 100644
--- a/drivers/cxl/core/port.c
+++ b/drivers/cxl/core/port.c
@@ -312,10 +312,10 @@ static void cxl_port_release(struct device *dev)
struct cxl_port *port = to_cxl_port(dev);
struct cxl_ep *ep, *_e;
- cxl_device_lock(dev);
+ device_lock(dev);
list_for_each_entry_safe(ep, _e, &port->endpoints, list)
cxl_ep_release(ep);
- cxl_device_unlock(dev);
+ device_unlock(dev);
ida_free(&cxl_port_ida, port->id);
kfree(port);
}
@@ -391,6 +391,8 @@ static int devm_cxl_link_uport(struct device *host, struct cxl_port *port)
return devm_add_action_or_reset(host, cxl_unlink_uport, port);
}
+static struct lock_class_key cxl_port_key;
+
static struct cxl_port *cxl_port_alloc(struct device *uport,
resource_size_t component_reg_phys,
struct cxl_port *parent_port)
@@ -415,9 +417,10 @@ static struct cxl_port *cxl_port_alloc(struct device *uport,
* description.
*/
dev = &port->dev;
- if (parent_port)
+ if (parent_port) {
dev->parent = &parent_port->dev;
- else
+ port->depth = parent_port->depth + 1;
+ } else
dev->parent = uport;
port->uport = uport;
@@ -427,6 +430,7 @@ static struct cxl_port *cxl_port_alloc(struct device *uport,
INIT_LIST_HEAD(&port->endpoints);
device_initialize(dev);
+ lockdep_set_class_and_subclass(&dev->mutex, &cxl_port_key, port->depth);
device_set_pm_not_required(dev);
dev->bus = &cxl_bus_type;
dev->type = &cxl_port_type;
@@ -457,8 +461,6 @@ struct cxl_port *devm_cxl_add_port(struct device *host, struct device *uport,
if (IS_ERR(port))
return port;
- if (parent_port)
- port->depth = parent_port->depth + 1;
dev = &port->dev;
if (is_cxl_memdev(uport))
rc = dev_set_name(dev, "endpoint%d", port->id);
@@ -554,7 +556,7 @@ static int match_root_child(struct device *dev, const void *match)
return 0;
port = to_cxl_port(dev);
- cxl_device_lock(dev);
+ device_lock(dev);
list_for_each_entry(dport, &port->dports, list) {
iter = match;
while (iter) {
@@ -564,7 +566,7 @@ static int match_root_child(struct device *dev, const void *match)
}
}
out:
- cxl_device_unlock(dev);
+ device_unlock(dev);
return !!iter;
}
@@ -623,13 +625,13 @@ static int add_dport(struct cxl_port *port, struct cxl_dport *new)
static void cond_cxl_root_lock(struct cxl_port *port)
{
if (is_cxl_root(port))
- cxl_device_lock(&port->dev);
+ device_lock(&port->dev);
}
static void cond_cxl_root_unlock(struct cxl_port *port)
{
if (is_cxl_root(port))
- cxl_device_unlock(&port->dev);
+ device_unlock(&port->dev);
}
static void cxl_dport_remove(void *data)
@@ -736,15 +738,15 @@ static int add_ep(struct cxl_port *port, struct cxl_ep *new)
{
struct cxl_ep *dup;
- cxl_device_lock(&port->dev);
+ device_lock(&port->dev);
if (port->dead) {
- cxl_device_unlock(&port->dev);
+ device_unlock(&port->dev);
return -ENXIO;
}
dup = find_ep(port, new->ep);
if (!dup)
list_add_tail(&new->list, &port->endpoints);
- cxl_device_unlock(&port->dev);
+ device_unlock(&port->dev);
return dup ? -EEXIST : 0;
}
@@ -854,12 +856,12 @@ static void delete_endpoint(void *data)
goto out;
parent = &parent_port->dev;
- cxl_device_lock(parent);
+ device_lock(parent);
if (parent->driver && endpoint->uport) {
devm_release_action(parent, cxl_unlink_uport, endpoint);
devm_release_action(parent, unregister_port, endpoint);
}
- cxl_device_unlock(parent);
+ device_unlock(parent);
put_device(parent);
out:
put_device(&endpoint->dev);
@@ -920,7 +922,7 @@ static void cxl_detach_ep(void *data)
}
parent_port = to_cxl_port(port->dev.parent);
- cxl_device_lock(&parent_port->dev);
+ device_lock(&parent_port->dev);
if (!parent_port->dev.driver) {
/*
* The bottom-up race to delete the port lost to a
@@ -928,12 +930,12 @@ static void cxl_detach_ep(void *data)
* parent_port ->remove() will have cleaned up all
* descendants.
*/
- cxl_device_unlock(&parent_port->dev);
+ device_unlock(&parent_port->dev);
put_device(&port->dev);
continue;
}
- cxl_device_lock(&port->dev);
+ device_lock(&port->dev);
ep = find_ep(port, &cxlmd->dev);
dev_dbg(&cxlmd->dev, "disconnect %s from %s\n",
ep ? dev_name(ep->ep) : "", dev_name(&port->dev));
@@ -948,7 +950,7 @@ static void cxl_detach_ep(void *data)
port->dead = true;
list_splice_init(&port->dports, &reap_dports);
}
- cxl_device_unlock(&port->dev);
+ device_unlock(&port->dev);
if (!list_empty(&reap_dports)) {
dev_dbg(&cxlmd->dev, "delete %s\n",
@@ -956,7 +958,7 @@ static void cxl_detach_ep(void *data)
delete_switch_port(port, &reap_dports);
}
put_device(&port->dev);
- cxl_device_unlock(&parent_port->dev);
+ device_unlock(&parent_port->dev);
}
}
@@ -1004,7 +1006,7 @@ static int add_port_attach_ep(struct cxl_memdev *cxlmd,
return -EAGAIN;
}
- cxl_device_lock(&parent_port->dev);
+ device_lock(&parent_port->dev);
if (!parent_port->dev.driver) {
dev_warn(&cxlmd->dev,
"port %s:%s disabled, failed to enumerate CXL.mem\n",
@@ -1022,7 +1024,7 @@ static int add_port_attach_ep(struct cxl_memdev *cxlmd,
get_device(&port->dev);
}
out:
- cxl_device_unlock(&parent_port->dev);
+ device_unlock(&parent_port->dev);
if (IS_ERR(port))
rc = PTR_ERR(port);
@@ -1133,14 +1135,14 @@ struct cxl_dport *cxl_find_dport_by_dev(struct cxl_port *port,
{
struct cxl_dport *dport;
- cxl_device_lock(&port->dev);
+ device_lock(&port->dev);
list_for_each_entry(dport, &port->dports, list)
if (dport->dport == dev) {
- cxl_device_unlock(&port->dev);
+ device_unlock(&port->dev);
return dport;
}
- cxl_device_unlock(&port->dev);
+ device_unlock(&port->dev);
return NULL;
}
EXPORT_SYMBOL_NS_GPL(cxl_find_dport_by_dev, CXL);
@@ -1173,6 +1175,8 @@ static int decoder_populate_targets(struct cxl_decoder *cxld,
return rc;
}
+static struct lock_class_key cxl_decoder_key;
+
/**
* cxl_decoder_alloc - Allocate a new CXL decoder
* @port: owning port of this decoder
@@ -1214,6 +1218,7 @@ static struct cxl_decoder *cxl_decoder_alloc(struct cxl_port *port,
seqlock_init(&cxld->target_lock);
dev = &cxld->dev;
device_initialize(dev);
+ lockdep_set_class(&dev->mutex, &cxl_decoder_key);
device_set_pm_not_required(dev);
dev->parent = &port->dev;
dev->bus = &cxl_bus_type;
@@ -1379,9 +1384,9 @@ int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map)
port = to_cxl_port(cxld->dev.parent);
- cxl_device_lock(&port->dev);
+ device_lock(&port->dev);
rc = cxl_decoder_add_locked(cxld, target_map);
- cxl_device_unlock(&port->dev);
+ device_unlock(&port->dev);
return rc;
}
@@ -1452,14 +1457,7 @@ static int cxl_bus_probe(struct device *dev)
{
int rc;
- /*
- * Take the CXL nested lock since the driver core only holds
- * @dev->mutex and not @dev->lockdep_mutex.
- */
- cxl_nested_lock(dev);
rc = to_cxl_drv(dev->driver)->probe(dev);
- cxl_nested_unlock(dev);
-
dev_dbg(dev, "probe: %d\n", rc);
return rc;
}
@@ -1468,10 +1466,8 @@ static void cxl_bus_remove(struct device *dev)
{
struct cxl_driver *cxl_drv = to_cxl_drv(dev->driver);
- cxl_nested_lock(dev);
if (cxl_drv->remove)
cxl_drv->remove(dev);
- cxl_nested_unlock(dev);
}
static struct workqueue_struct *cxl_bus_wq;
diff --git a/drivers/cxl/core/suspend.c b/drivers/cxl/core/suspend.c
new file mode 100644
index 000000000000..a5984d96ea1d
--- /dev/null
+++ b/drivers/cxl/core/suspend.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2022 Intel Corporation. All rights reserved. */
+#include <linux/atomic.h>
+#include <linux/export.h>
+#include "cxlmem.h"
+
+static atomic_t mem_active;
+
+bool cxl_mem_active(void)
+{
+ return atomic_read(&mem_active) != 0;
+}
+
+void cxl_mem_active_inc(void)
+{
+ atomic_inc(&mem_active);
+}
+EXPORT_SYMBOL_NS_GPL(cxl_mem_active_inc, CXL);
+
+void cxl_mem_active_dec(void)
+{
+ atomic_dec(&mem_active);
+}
+EXPORT_SYMBOL_NS_GPL(cxl_mem_active_dec, CXL);
diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
index 990b6670222e..140dc3278cde 100644
--- a/drivers/cxl/cxl.h
+++ b/drivers/cxl/cxl.h
@@ -405,82 +405,4 @@ struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_nvdimm *cxl_nvd);
#define __mock static
#endif
-#ifdef CONFIG_PROVE_CXL_LOCKING
-enum cxl_lock_class {
- CXL_ANON_LOCK,
- CXL_NVDIMM_LOCK,
- CXL_NVDIMM_BRIDGE_LOCK,
- CXL_PORT_LOCK,
- /*
- * Be careful to add new lock classes here, CXL_PORT_LOCK is
- * extended by the port depth, so a maximum CXL port topology
- * depth would need to be defined first.
- */
-};
-
-static inline void cxl_nested_lock(struct device *dev)
-{
- if (is_cxl_port(dev)) {
- struct cxl_port *port = to_cxl_port(dev);
-
- mutex_lock_nested(&dev->lockdep_mutex,
- CXL_PORT_LOCK + port->depth);
- } else if (is_cxl_decoder(dev)) {
- struct cxl_port *port = to_cxl_port(dev->parent);
-
- /*
- * A decoder is the immediate child of a port, so set
- * its lock class equal to other child device siblings.
- */
- mutex_lock_nested(&dev->lockdep_mutex,
- CXL_PORT_LOCK + port->depth + 1);
- } else if (is_cxl_nvdimm_bridge(dev))
- mutex_lock_nested(&dev->lockdep_mutex, CXL_NVDIMM_BRIDGE_LOCK);
- else if (is_cxl_nvdimm(dev))
- mutex_lock_nested(&dev->lockdep_mutex, CXL_NVDIMM_LOCK);
- else
- mutex_lock_nested(&dev->lockdep_mutex, CXL_ANON_LOCK);
-}
-
-static inline void cxl_nested_unlock(struct device *dev)
-{
- mutex_unlock(&dev->lockdep_mutex);
-}
-
-static inline void cxl_device_lock(struct device *dev)
-{
- /*
- * For double lock errors the lockup will happen before lockdep
- * warns at cxl_nested_lock(), so assert explicitly.
- */
- lockdep_assert_not_held(&dev->lockdep_mutex);
-
- device_lock(dev);
- cxl_nested_lock(dev);
-}
-
-static inline void cxl_device_unlock(struct device *dev)
-{
- cxl_nested_unlock(dev);
- device_unlock(dev);
-}
-#else
-static inline void cxl_nested_lock(struct device *dev)
-{
-}
-
-static inline void cxl_nested_unlock(struct device *dev)
-{
-}
-
-static inline void cxl_device_lock(struct device *dev)
-{
- device_lock(dev);
-}
-
-static inline void cxl_device_unlock(struct device *dev)
-{
- device_unlock(dev);
-}
-#endif
#endif /* __CXL_H__ */
diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h
index 5d33ce24fe09..60d10ee1e7fc 100644
--- a/drivers/cxl/cxlmem.h
+++ b/drivers/cxl/cxlmem.h
@@ -85,10 +85,61 @@ struct cxl_mbox_cmd {
size_t size_in;
size_t size_out;
u16 return_code;
-#define CXL_MBOX_SUCCESS 0
};
/*
+ * Per CXL 2.0 Section 8.2.8.4.5.1
+ */
+#define CMD_CMD_RC_TABLE \
+ C(SUCCESS, 0, NULL), \
+ C(BACKGROUND, -ENXIO, "background cmd started successfully"), \
+ C(INPUT, -ENXIO, "cmd input was invalid"), \
+ C(UNSUPPORTED, -ENXIO, "cmd is not supported"), \
+ C(INTERNAL, -ENXIO, "internal device error"), \
+ C(RETRY, -ENXIO, "temporary error, retry once"), \
+ C(BUSY, -ENXIO, "ongoing background operation"), \
+ C(MEDIADISABLED, -ENXIO, "media access is disabled"), \
+ C(FWINPROGRESS, -ENXIO, "one FW package can be transferred at a time"), \
+ C(FWOOO, -ENXIO, "FW package content was transferred out of order"), \
+ C(FWAUTH, -ENXIO, "FW package authentication failed"), \
+ C(FWSLOT, -ENXIO, "FW slot is not supported for requested operation"), \
+ C(FWROLLBACK, -ENXIO, "rolled back to the previous active FW"), \
+ C(FWRESET, -ENXIO, "FW failed to activate, needs cold reset"), \
+ C(HANDLE, -ENXIO, "one or more Event Record Handles were invalid"), \
+ C(PADDR, -ENXIO, "physical address specified is invalid"), \
+ C(POISONLMT, -ENXIO, "poison injection limit has been reached"), \
+ C(MEDIAFAILURE, -ENXIO, "permanent issue with the media"), \
+ C(ABORT, -ENXIO, "background cmd was aborted by device"), \
+ C(SECURITY, -ENXIO, "not valid in the current security state"), \
+ C(PASSPHRASE, -ENXIO, "phrase doesn't match current set passphrase"), \
+ C(MBUNSUPPORTED, -ENXIO, "unsupported on the mailbox it was issued on"),\
+ C(PAYLOADLEN, -ENXIO, "invalid payload length")
+
+#undef C
+#define C(a, b, c) CXL_MBOX_CMD_RC_##a
+enum { CMD_CMD_RC_TABLE };
+#undef C
+#define C(a, b, c) { b, c }
+struct cxl_mbox_cmd_rc {
+ int err;
+ const char *desc;
+};
+
+static const
+struct cxl_mbox_cmd_rc cxl_mbox_cmd_rctable[] ={ CMD_CMD_RC_TABLE };
+#undef C
+
+static inline const char *cxl_mbox_cmd_rc2str(struct cxl_mbox_cmd *mbox_cmd)
+{
+ return cxl_mbox_cmd_rctable[mbox_cmd->return_code].desc;
+}
+
+static inline int cxl_mbox_cmd_rc2errno(struct cxl_mbox_cmd *mbox_cmd)
+{
+ return cxl_mbox_cmd_rctable[mbox_cmd->return_code].err;
+}
+
+/*
* CXL 2.0 - Memory capacity multiplier
* See Section 8.2.9.5
*
@@ -141,7 +192,6 @@ struct cxl_endpoint_dvsec_info {
* @info: Cached DVSEC information about the device.
* @serial: PCIe Device Serial Number
* @mbox_send: @dev specific transport for transmitting mailbox commands
- * @wait_media_ready: @dev specific method to await media ready
*
* See section 8.2.9.5.2 Capacity Configuration and Label Storage for
* details on capacity parameters.
@@ -172,11 +222,9 @@ struct cxl_dev_state {
u64 next_persistent_bytes;
resource_size_t component_reg_phys;
- struct cxl_endpoint_dvsec_info info;
u64 serial;
int (*mbox_send)(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd);
- int (*wait_media_ready)(struct cxl_dev_state *cxlds);
};
enum cxl_opcode {
@@ -262,6 +310,13 @@ struct cxl_mbox_set_lsa {
u8 data[];
} __packed;
+struct cxl_mbox_set_partition_info {
+ __le64 volatile_capacity;
+ u8 flags;
+} __packed;
+
+#define CXL_SET_PARTITION_IMMEDIATE_FLAG BIT(0)
+
/**
* struct cxl_mem_command - Driver representation of a memory device command
* @info: Command information as it exists for the UAPI
@@ -290,11 +345,23 @@ struct cxl_mem_command {
int cxl_mbox_send_cmd(struct cxl_dev_state *cxlds, u16 opcode, void *in,
size_t in_size, void *out, size_t out_size);
int cxl_dev_state_identify(struct cxl_dev_state *cxlds);
+int cxl_await_media_ready(struct cxl_dev_state *cxlds);
int cxl_enumerate_cmds(struct cxl_dev_state *cxlds);
int cxl_mem_create_range_info(struct cxl_dev_state *cxlds);
struct cxl_dev_state *cxl_dev_state_create(struct device *dev);
void set_exclusive_cxl_commands(struct cxl_dev_state *cxlds, unsigned long *cmds);
void clear_exclusive_cxl_commands(struct cxl_dev_state *cxlds, unsigned long *cmds);
+#ifdef CONFIG_CXL_SUSPEND
+void cxl_mem_active_inc(void);
+void cxl_mem_active_dec(void);
+#else
+static inline void cxl_mem_active_inc(void)
+{
+}
+static inline void cxl_mem_active_dec(void)
+{
+}
+#endif
struct cxl_hdm {
struct cxl_component_regs regs;
diff --git a/drivers/cxl/cxlpci.h b/drivers/cxl/cxlpci.h
index 329e7ea3f36a..fce1c11729c2 100644
--- a/drivers/cxl/cxlpci.h
+++ b/drivers/cxl/cxlpci.h
@@ -72,4 +72,6 @@ static inline resource_size_t cxl_regmap_to_base(struct pci_dev *pdev,
}
int devm_cxl_port_enumerate_dports(struct cxl_port *port);
+struct cxl_dev_state;
+int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm);
#endif /* __CXL_PCI_H__ */
diff --git a/drivers/cxl/mem.c b/drivers/cxl/mem.c
index 49a4b1c47299..c310f1fd3db0 100644
--- a/drivers/cxl/mem.c
+++ b/drivers/cxl/mem.c
@@ -24,27 +24,6 @@
* in higher level operations.
*/
-static int wait_for_media(struct cxl_memdev *cxlmd)
-{
- struct cxl_dev_state *cxlds = cxlmd->cxlds;
- struct cxl_endpoint_dvsec_info *info = &cxlds->info;
- int rc;
-
- if (!info->mem_enabled)
- return -EBUSY;
-
- rc = cxlds->wait_media_ready(cxlds);
- if (rc)
- return rc;
-
- /*
- * We know the device is active, and enabled, if any ranges are non-zero
- * we'll need to check later before adding the port since that owns the
- * HDM decoder registers.
- */
- return 0;
-}
-
static int create_endpoint(struct cxl_memdev *cxlmd,
struct cxl_port *parent_port)
{
@@ -67,72 +46,14 @@ static int create_endpoint(struct cxl_memdev *cxlmd,
return cxl_endpoint_autoremove(cxlmd, endpoint);
}
-/**
- * cxl_dvsec_decode_init() - Setup HDM decoding for the endpoint
- * @cxlds: Device state
- *
- * Additionally, enables global HDM decoding. Warning: don't call this outside
- * of probe. Once probe is complete, the port driver owns all access to the HDM
- * decoder registers.
- *
- * Returns: false if DVSEC Ranges are being used instead of HDM
- * decoders, or if it can not be determined if DVSEC Ranges are in use.
- * Otherwise, returns true.
- */
-__mock bool cxl_dvsec_decode_init(struct cxl_dev_state *cxlds)
+static void enable_suspend(void *data)
{
- struct cxl_endpoint_dvsec_info *info = &cxlds->info;
- struct cxl_register_map map;
- struct cxl_component_reg_map *cmap = &map.component_map;
- bool global_enable, do_hdm_init = false;
- void __iomem *crb;
- u32 global_ctrl;
-
- /* map hdm decoder */
- crb = ioremap(cxlds->component_reg_phys, CXL_COMPONENT_REG_BLOCK_SIZE);
- if (!crb) {
- dev_dbg(cxlds->dev, "Failed to map component registers\n");
- return false;
- }
-
- cxl_probe_component_regs(cxlds->dev, crb, cmap);
- if (!cmap->hdm_decoder.valid) {
- dev_dbg(cxlds->dev, "Invalid HDM decoder registers\n");
- goto out;
- }
-
- global_ctrl = readl(crb + cmap->hdm_decoder.offset +
- CXL_HDM_DECODER_CTRL_OFFSET);
- global_enable = global_ctrl & CXL_HDM_DECODER_ENABLE;
- if (!global_enable && info->ranges) {
- dev_dbg(cxlds->dev,
- "DVSEC ranges already programmed and HDM decoders not enabled.\n");
- goto out;
- }
-
- do_hdm_init = true;
-
- /*
- * Permanently (for this boot at least) opt the device into HDM
- * operation. Individual HDM decoders still need to be enabled after
- * this point.
- */
- if (!global_enable) {
- dev_dbg(cxlds->dev, "Enabling HDM decode\n");
- writel(global_ctrl | CXL_HDM_DECODER_ENABLE,
- crb + cmap->hdm_decoder.offset +
- CXL_HDM_DECODER_CTRL_OFFSET);
- }
-
-out:
- iounmap(crb);
- return do_hdm_init;
+ cxl_mem_active_dec();
}
static int cxl_mem_probe(struct device *dev)
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
- struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct cxl_port *parent_port;
int rc;
@@ -147,44 +68,6 @@ static int cxl_mem_probe(struct device *dev)
if (work_pending(&cxlmd->detach_work))
return -EBUSY;
- rc = wait_for_media(cxlmd);
- if (rc) {
- dev_err(dev, "Media not active (%d)\n", rc);
- return rc;
- }
-
- /*
- * If DVSEC ranges are being used instead of HDM decoder registers there
- * is no use in trying to manage those.
- */
- if (!cxl_dvsec_decode_init(cxlds)) {
- struct cxl_endpoint_dvsec_info *info = &cxlds->info;
- int i;
-
- /* */
- for (i = 0; i < 2; i++) {
- u64 base, size;
-
- /*
- * Give a nice warning to the user that BIOS has really
- * botched things for them if it didn't place DVSEC
- * ranges in the memory map.
- */
- base = info->dvsec_range[i].start;
- size = range_len(&info->dvsec_range[i]);
- if (size && !region_intersects(base, size,
- IORESOURCE_SYSTEM_RAM,
- IORES_DESC_NONE)) {
- dev_err(dev,
- "DVSEC range %#llx-%#llx must be reserved by BIOS, but isn't\n",
- base, base + size - 1);
- }
- }
- dev_err(dev,
- "Active DVSEC range registers in use. Will not bind.\n");
- return -EBUSY;
- }
-
rc = devm_cxl_enumerate_ports(cxlmd);
if (rc)
return rc;
@@ -195,19 +78,36 @@ static int cxl_mem_probe(struct device *dev)
return -ENXIO;
}
- cxl_device_lock(&parent_port->dev);
+ device_lock(&parent_port->dev);
if (!parent_port->dev.driver) {
dev_err(dev, "CXL port topology %s not enabled\n",
dev_name(&parent_port->dev));
rc = -ENXIO;
- goto out;
+ goto unlock;
}
rc = create_endpoint(cxlmd, parent_port);
-out:
- cxl_device_unlock(&parent_port->dev);
+unlock:
+ device_unlock(&parent_port->dev);
put_device(&parent_port->dev);
- return rc;
+ if (rc)
+ return rc;
+
+ /*
+ * The kernel may be operating out of CXL memory on this device,
+ * there is no spec defined way to determine whether this device
+ * preserves contents over suspend, and there is no simple way
+ * to arrange for the suspend image to avoid CXL memory which
+ * would setup a circular dependency between PCI resume and save
+ * state restoration.
+ *
+ * TODO: support suspend when all the regions this device is
+ * hosting are locked and covered by the system address map,
+ * i.e. platform firmware owns restoring the HDM configuration
+ * that it locked.
+ */
+ cxl_mem_active_inc();
+ return devm_add_action_or_reset(dev, enable_suspend, NULL);
}
static struct cxl_driver cxl_mem_driver = {
diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c
index 3f2182d66829..5a0ae46d4989 100644
--- a/drivers/cxl/pci.c
+++ b/drivers/cxl/pci.c
@@ -48,8 +48,7 @@
*/
static unsigned short mbox_ready_timeout = 60;
module_param(mbox_ready_timeout, ushort, 0644);
-MODULE_PARM_DESC(mbox_ready_timeout,
- "seconds to wait for mailbox ready / memory active status");
+MODULE_PARM_DESC(mbox_ready_timeout, "seconds to wait for mailbox ready");
static int cxl_pci_mbox_wait_for_doorbell(struct cxl_dev_state *cxlds)
{
@@ -177,9 +176,10 @@ static int __cxl_pci_mbox_send_cmd(struct cxl_dev_state *cxlds,
mbox_cmd->return_code =
FIELD_GET(CXLDEV_MBOX_STATUS_RET_CODE_MASK, status_reg);
- if (mbox_cmd->return_code != 0) {
- dev_dbg(dev, "Mailbox operation had an error\n");
- return 0;
+ if (mbox_cmd->return_code != CXL_MBOX_CMD_RC_SUCCESS) {
+ dev_dbg(dev, "Mailbox operation had an error: %s\n",
+ cxl_mbox_cmd_rc2str(mbox_cmd));
+ return 0; /* completed but caller must check return_code */
}
/* #7 */
@@ -386,164 +386,6 @@ static int cxl_setup_regs(struct pci_dev *pdev, enum cxl_regloc_type type,
return rc;
}
-static int wait_for_valid(struct cxl_dev_state *cxlds)
-{
- struct pci_dev *pdev = to_pci_dev(cxlds->dev);
- int d = cxlds->cxl_dvsec, rc;
- u32 val;
-
- /*
- * Memory_Info_Valid: When set, indicates that the CXL Range 1 Size high
- * and Size Low registers are valid. Must be set within 1 second of
- * deassertion of reset to CXL device. Likely it is already set by the
- * time this runs, but otherwise give a 1.5 second timeout in case of
- * clock skew.
- */
- rc = pci_read_config_dword(pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(0), &val);
- if (rc)
- return rc;
-
- if (val & CXL_DVSEC_MEM_INFO_VALID)
- return 0;
-
- msleep(1500);
-
- rc = pci_read_config_dword(pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(0), &val);
- if (rc)
- return rc;
-
- if (val & CXL_DVSEC_MEM_INFO_VALID)
- return 0;
-
- return -ETIMEDOUT;
-}
-
-/*
- * Wait up to @mbox_ready_timeout for the device to report memory
- * active.
- */
-static int wait_for_media_ready(struct cxl_dev_state *cxlds)
-{
- struct pci_dev *pdev = to_pci_dev(cxlds->dev);
- int d = cxlds->cxl_dvsec;
- bool active = false;
- u64 md_status;
- int rc, i;
-
- rc = wait_for_valid(cxlds);
- if (rc)
- return rc;
-
- for (i = mbox_ready_timeout; i; i--) {
- u32 temp;
-
- rc = pci_read_config_dword(
- pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(0), &temp);
- if (rc)
- return rc;
-
- active = FIELD_GET(CXL_DVSEC_MEM_ACTIVE, temp);
- if (active)
- break;
- msleep(1000);
- }
-
- if (!active) {
- dev_err(&pdev->dev,
- "timeout awaiting memory active after %d seconds\n",
- mbox_ready_timeout);
- return -ETIMEDOUT;
- }
-
- md_status = readq(cxlds->regs.memdev + CXLMDEV_STATUS_OFFSET);
- if (!CXLMDEV_READY(md_status))
- return -EIO;
-
- return 0;
-}
-
-static int cxl_dvsec_ranges(struct cxl_dev_state *cxlds)
-{
- struct cxl_endpoint_dvsec_info *info = &cxlds->info;
- struct pci_dev *pdev = to_pci_dev(cxlds->dev);
- int d = cxlds->cxl_dvsec;
- int hdm_count, rc, i;
- u16 cap, ctrl;
-
- if (!d)
- return -ENXIO;
-
- rc = pci_read_config_word(pdev, d + CXL_DVSEC_CAP_OFFSET, &cap);
- if (rc)
- return rc;
-
- rc = pci_read_config_word(pdev, d + CXL_DVSEC_CTRL_OFFSET, &ctrl);
- if (rc)
- return rc;
-
- if (!(cap & CXL_DVSEC_MEM_CAPABLE))
- return -ENXIO;
-
- /*
- * It is not allowed by spec for MEM.capable to be set and have 0 legacy
- * HDM decoders (values > 2 are also undefined as of CXL 2.0). As this
- * driver is for a spec defined class code which must be CXL.mem
- * capable, there is no point in continuing to enable CXL.mem.
- */
- hdm_count = FIELD_GET(CXL_DVSEC_HDM_COUNT_MASK, cap);
- if (!hdm_count || hdm_count > 2)
- return -EINVAL;
-
- rc = wait_for_valid(cxlds);
- if (rc)
- return rc;
-
- info->mem_enabled = FIELD_GET(CXL_DVSEC_MEM_ENABLE, ctrl);
-
- for (i = 0; i < hdm_count; i++) {
- u64 base, size;
- u32 temp;
-
- rc = pci_read_config_dword(
- pdev, d + CXL_DVSEC_RANGE_SIZE_HIGH(i), &temp);
- if (rc)
- return rc;
-
- size = (u64)temp << 32;
-
- rc = pci_read_config_dword(
- pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(i), &temp);
- if (rc)
- return rc;
-
- size |= temp & CXL_DVSEC_MEM_SIZE_LOW_MASK;
-
- rc = pci_read_config_dword(
- pdev, d + CXL_DVSEC_RANGE_BASE_HIGH(i), &temp);
- if (rc)
- return rc;
-
- base = (u64)temp << 32;
-
- rc = pci_read_config_dword(
- pdev, d + CXL_DVSEC_RANGE_BASE_LOW(i), &temp);
- if (rc)
- return rc;
-
- base |= temp & CXL_DVSEC_MEM_BASE_LOW_MASK;
-
- info->dvsec_range[i] = (struct range) {
- .start = base,
- .end = base + size - 1
- };
-
- if (size)
- info->ranges++;
- }
-
- return 0;
-}
-
static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct cxl_register_map map;
@@ -573,8 +415,6 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dev_warn(&pdev->dev,
"Device DVSEC not present, skip CXL.mem init\n");
- cxlds->wait_media_ready = wait_for_media_ready;
-
rc = cxl_setup_regs(pdev, CXL_REGLOC_RBI_MEMDEV, &map);
if (rc)
return rc;
@@ -610,11 +450,6 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (rc)
return rc;
- rc = cxl_dvsec_ranges(cxlds);
- if (rc)
- dev_warn(&pdev->dev,
- "Failed to get DVSEC range information (%d)\n", rc);
-
cxlmd = devm_cxl_add_memdev(cxlds);
if (IS_ERR(cxlmd))
return PTR_ERR(cxlmd);
diff --git a/drivers/cxl/pmem.c b/drivers/cxl/pmem.c
index 15ad666ab03e..bbeef91e637e 100644
--- a/drivers/cxl/pmem.c
+++ b/drivers/cxl/pmem.c
@@ -43,7 +43,7 @@ static int cxl_nvdimm_probe(struct device *dev)
if (!cxl_nvb)
return -ENXIO;
- cxl_device_lock(&cxl_nvb->dev);
+ device_lock(&cxl_nvb->dev);
if (!cxl_nvb->nvdimm_bus) {
rc = -ENXIO;
goto out;
@@ -68,7 +68,7 @@ static int cxl_nvdimm_probe(struct device *dev)
dev_set_drvdata(dev, nvdimm);
rc = devm_add_action_or_reset(dev, unregister_nvdimm, nvdimm);
out:
- cxl_device_unlock(&cxl_nvb->dev);
+ device_unlock(&cxl_nvb->dev);
put_device(&cxl_nvb->dev);
return rc;
@@ -233,7 +233,7 @@ static void cxl_nvb_update_state(struct work_struct *work)
struct nvdimm_bus *victim_bus = NULL;
bool release = false, rescan = false;
- cxl_device_lock(&cxl_nvb->dev);
+ device_lock(&cxl_nvb->dev);
switch (cxl_nvb->state) {
case CXL_NVB_ONLINE:
if (!online_nvdimm_bus(cxl_nvb)) {
@@ -251,7 +251,7 @@ static void cxl_nvb_update_state(struct work_struct *work)
default:
break;
}
- cxl_device_unlock(&cxl_nvb->dev);
+ device_unlock(&cxl_nvb->dev);
if (release)
device_release_driver(&cxl_nvb->dev);
@@ -327,9 +327,9 @@ static int cxl_nvdimm_bridge_reset(struct device *dev, void *data)
return 0;
cxl_nvb = to_cxl_nvdimm_bridge(dev);
- cxl_device_lock(dev);
+ device_lock(dev);
cxl_nvb->state = CXL_NVB_NEW;
- cxl_device_unlock(dev);
+ device_unlock(dev);
return 0;
}
@@ -344,7 +344,6 @@ static __init int cxl_pmem_init(void)
{
int rc;
- set_bit(CXL_MEM_COMMAND_ID_SET_PARTITION_INFO, exclusive_cmds);
set_bit(CXL_MEM_COMMAND_ID_SET_SHUTDOWN_STATE, exclusive_cmds);
set_bit(CXL_MEM_COMMAND_ID_SET_LSA, exclusive_cmds);
diff --git a/drivers/cxl/port.c b/drivers/cxl/port.c
index d420da5fc39c..3cf308f114c4 100644
--- a/drivers/cxl/port.c
+++ b/drivers/cxl/port.c
@@ -36,14 +36,8 @@ static int cxl_port_probe(struct device *dev)
struct cxl_hdm *cxlhdm;
int rc;
- if (is_cxl_endpoint(port)) {
- struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport);
- get_device(&cxlmd->dev);
- rc = devm_add_action_or_reset(dev, schedule_detach, cxlmd);
- if (rc)
- return rc;
- } else {
+ if (!is_cxl_endpoint(port)) {
rc = devm_cxl_port_enumerate_dports(port);
if (rc < 0)
return rc;
@@ -55,6 +49,26 @@ static int cxl_port_probe(struct device *dev)
if (IS_ERR(cxlhdm))
return PTR_ERR(cxlhdm);
+ if (is_cxl_endpoint(port)) {
+ struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport);
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+
+ get_device(&cxlmd->dev);
+ rc = devm_add_action_or_reset(dev, schedule_detach, cxlmd);
+ if (rc)
+ return rc;
+
+ rc = cxl_hdm_decode_init(cxlds, cxlhdm);
+ if (rc)
+ return rc;
+
+ rc = cxl_await_media_ready(cxlds);
+ if (rc) {
+ dev_err(dev, "Media not active (%d)\n", rc);
+ return rc;
+ }
+ }
+
rc = devm_cxl_enumerate_decoders(cxlhdm);
if (rc) {
dev_err(dev, "Couldn't enumerate decoders (%d)\n", rc);
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index 0211e6f7b47a..50a08b2ec247 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -117,6 +117,7 @@ enum dax_device_flags {
* @dax_dev: a dax_device instance representing the logical memory range
* @pgoff: offset in pages from the start of the device to translate
* @nr_pages: number of consecutive pages caller can handle relative to @pfn
+ * @mode: indicator on normal access or recovery write
* @kaddr: output parameter that returns a virtual address mapping of pfn
* @pfn: output parameter that returns an absolute pfn translation of @pgoff
*
@@ -124,7 +125,7 @@ enum dax_device_flags {
* pages accessible at the device relative @pgoff.
*/
long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
- void **kaddr, pfn_t *pfn)
+ enum dax_access_mode mode, void **kaddr, pfn_t *pfn)
{
long avail;
@@ -138,7 +139,7 @@ long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
return -EINVAL;
avail = dax_dev->ops->direct_access(dax_dev, pgoff, nr_pages,
- kaddr, pfn);
+ mode, kaddr, pfn);
if (!avail)
return -ERANGE;
return min(avail, nr_pages);
@@ -194,6 +195,15 @@ int dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
}
EXPORT_SYMBOL_GPL(dax_zero_page_range);
+size_t dax_recovery_write(struct dax_device *dax_dev, pgoff_t pgoff,
+ void *addr, size_t bytes, struct iov_iter *iter)
+{
+ if (!dax_dev->ops->recovery_write)
+ return 0;
+ return dax_dev->ops->recovery_write(dax_dev, pgoff, addr, bytes, iter);
+}
+EXPORT_SYMBOL_GPL(dax_recovery_write);
+
#ifdef CONFIG_ARCH_HAS_PMEM_API
void arch_wb_cache_pmem(void *addr, size_t size);
void dax_flush(struct dax_device *dax_dev, void *addr, size_t size)
diff --git a/drivers/dio/dio.c b/drivers/dio/dio.c
index 005a82f671c3..0e5a5662d5a4 100644
--- a/drivers/dio/dio.c
+++ b/drivers/dio/dio.c
@@ -216,8 +216,11 @@ static int __init dio_init(void)
/* Found a board, allocate it an entry in the list */
dev = kzalloc(sizeof(struct dio_dev), GFP_KERNEL);
- if (!dev)
+ if (!dev) {
+ if (scode >= DIOII_SCBASE)
+ iounmap(va);
return -ENOMEM;
+ }
dev->bus = &dio_bus;
dev->dev.parent = &dio_bus.dev;
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index d5de3f77d3aa..487ed4ddc3be 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -163,7 +163,7 @@ config DMA_SUN4I
config DMA_SUN6I
tristate "Allwinner A31 SoCs DMA support"
- depends on MACH_SUN6I || MACH_SUN8I || (ARM64 && ARCH_SUNXI) || COMPILE_TEST
+ depends on ARCH_SUNXI || COMPILE_TEST
depends on RESET_CONTROLLER
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
@@ -629,6 +629,18 @@ config TXX9_DMAC
Support the TXx9 SoC internal DMA controller. This can be
integrated in chips such as the Toshiba TX4927/38/39.
+config TEGRA186_GPC_DMA
+ tristate "NVIDIA Tegra GPC DMA support"
+ depends on (ARCH_TEGRA || COMPILE_TEST) && ARCH_DMA_ADDR_T_64BIT
+ depends on IOMMU_API
+ select DMA_ENGINE
+ help
+ Support for the NVIDIA Tegra General Purpose Central DMA controller.
+ The DMA controller has multiple DMA channels which can be configured
+ for different peripherals like UART, SPI, etc which are on APB bus.
+ This DMA controller transfers data from memory to peripheral FIFO
+ or vice versa. It also supports memory to memory data transfer.
+
config TEGRA20_APB_DMA
tristate "NVIDIA Tegra20 APB DMA support"
depends on ARCH_TEGRA || COMPILE_TEST
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 616d926cf2a5..2f1b87ffd7ab 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -72,6 +72,7 @@ obj-$(CONFIG_STM32_MDMA) += stm32-mdma.o
obj-$(CONFIG_SPRD_DMA) += sprd-dma.o
obj-$(CONFIG_S3C24XX_DMAC) += s3c24xx-dma.o
obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
+obj-$(CONFIG_TEGRA186_GPC_DMA) += tegra186-gpc-dma.o
obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o
obj-$(CONFIG_TEGRA210_ADMA) += tegra210-adma.o
obj-$(CONFIG_TIMB_DMA) += timb_dma.o
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index a24882ba3764..a4a794e62ac2 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -1535,14 +1535,6 @@ static void pl08x_free_chan_resources(struct dma_chan *chan)
vchan_free_chan_resources(to_virt_chan(chan));
}
-static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt(
- struct dma_chan *chan, unsigned long flags)
-{
- struct dma_async_tx_descriptor *retval = NULL;
-
- return retval;
-}
-
/*
* Code accessing dma_async_is_complete() in a tight loop may give problems.
* If slaves are relying on interrupts to signal completion this function
@@ -2760,7 +2752,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
pl08x->memcpy.dev = &adev->dev;
pl08x->memcpy.device_free_chan_resources = pl08x_free_chan_resources;
pl08x->memcpy.device_prep_dma_memcpy = pl08x_prep_dma_memcpy;
- pl08x->memcpy.device_prep_dma_interrupt = pl08x_prep_dma_interrupt;
pl08x->memcpy.device_tx_status = pl08x_dma_tx_status;
pl08x->memcpy.device_issue_pending = pl08x_issue_pending;
pl08x->memcpy.device_config = pl08x_config;
@@ -2787,8 +2778,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
pl08x->slave.dev = &adev->dev;
pl08x->slave.device_free_chan_resources =
pl08x_free_chan_resources;
- pl08x->slave.device_prep_dma_interrupt =
- pl08x_prep_dma_interrupt;
pl08x->slave.device_tx_status = pl08x_dma_tx_status;
pl08x->slave.device_issue_pending = pl08x_issue_pending;
pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg;
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 30ae36124b1d..5a50423b7378 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -942,6 +942,7 @@ atc_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
struct at_desc *desc;
void __iomem *vaddr;
dma_addr_t paddr;
+ char fill_pattern;
dev_vdbg(chan2dev(chan), "%s: d%pad v0x%x l0x%zx f0x%lx\n", __func__,
&dest, value, len, flags);
@@ -963,7 +964,14 @@ atc_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
__func__);
return NULL;
}
- *(u32*)vaddr = value;
+
+ /* Only the first byte of value is to be used according to dmaengine */
+ fill_pattern = (char)value;
+
+ *(u32*)vaddr = (fill_pattern << 24) |
+ (fill_pattern << 16) |
+ (fill_pattern << 8) |
+ fill_pattern;
desc = atc_create_memset_desc(chan, paddr, dest, len);
if (!desc) {
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index def564d1e8fa..3e9d726504e2 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -1202,6 +1202,7 @@ static struct at_xdmac_desc *at_xdmac_memset_create_desc(struct dma_chan *chan,
unsigned long flags;
size_t ublen;
u32 dwidth;
+ char pattern;
/*
* WARNING: The channel configuration is set here since there is no
* dmaengine_slave_config call in this case. Moreover we don't know the
@@ -1244,10 +1245,16 @@ static struct at_xdmac_desc *at_xdmac_memset_create_desc(struct dma_chan *chan,
chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth);
+ /* Only the first byte of value is to be used according to dmaengine */
+ pattern = (char)value;
+
ublen = len >> dwidth;
desc->lld.mbr_da = dst_addr;
- desc->lld.mbr_ds = value;
+ desc->lld.mbr_ds = (pattern << 24) |
+ (pattern << 16) |
+ (pattern << 8) |
+ pattern;
desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV3
| AT_XDMAC_MBR_UBC_NDEN
| AT_XDMAC_MBR_UBC_NSEN
diff --git a/drivers/dma/bestcomm/bestcomm.c b/drivers/dma/bestcomm/bestcomm.c
index 8c42e5ca00a9..1822a7034630 100644
--- a/drivers/dma/bestcomm/bestcomm.c
+++ b/drivers/dma/bestcomm/bestcomm.c
@@ -17,7 +17,9 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/of_device.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <asm/io.h>
#include <asm/irq.h>
diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c
index fc513eb2b289..e2ec540e6519 100644
--- a/drivers/dma/dma-jz4780.c
+++ b/drivers/dma/dma-jz4780.c
@@ -8,6 +8,7 @@
#include <linux/clk.h>
#include <linux/dmapool.h>
+#include <linux/dma-mapping.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/module.h>
@@ -911,6 +912,14 @@ static int jz4780_dma_probe(struct platform_device *pdev)
dd = &jzdma->dma_device;
+ /*
+ * The real segment size limit is dependent on the size unit selected
+ * for the transfer. Because the size unit is selected automatically
+ * and may be as small as 1 byte, use a safe limit of 2^24-1 bytes to
+ * ensure the 24-bit transfer count in the descriptor cannot overflow.
+ */
+ dma_set_max_seg_size(dev, 0xffffff);
+
dma_cap_set(DMA_MEMCPY, dd->cap_mask);
dma_cap_set(DMA_SLAVE, dd->cap_mask);
dma_cap_set(DMA_CYCLIC, dd->cap_mask);
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 2cfa8458b51b..e80feeea0e01 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -1053,9 +1053,7 @@ static int __dma_async_device_channel_register(struct dma_device *device,
* When the chan_id is a negative value, we are dynamically adding
* the channel. Otherwise we are static enumerating.
*/
- mutex_lock(&device->chan_mutex);
chan->chan_id = ida_alloc(&device->chan_ida, GFP_KERNEL);
- mutex_unlock(&device->chan_mutex);
if (chan->chan_id < 0) {
pr_err("%s: unable to alloc ida for chan: %d\n",
__func__, chan->chan_id);
@@ -1078,9 +1076,7 @@ static int __dma_async_device_channel_register(struct dma_device *device,
return 0;
err_out_ida:
- mutex_lock(&device->chan_mutex);
ida_free(&device->chan_ida, chan->chan_id);
- mutex_unlock(&device->chan_mutex);
err_free_dev:
kfree(chan->dev);
err_free_local:
@@ -1113,9 +1109,7 @@ static void __dma_async_device_channel_unregister(struct dma_device *device,
device->chancnt--;
chan->dev->chan = NULL;
mutex_unlock(&dma_list_mutex);
- mutex_lock(&device->chan_mutex);
ida_free(&device->chan_ida, chan->chan_id);
- mutex_unlock(&device->chan_mutex);
device_unregister(&chan->dev->device);
free_percpu(chan->local);
}
@@ -1250,7 +1244,6 @@ int dma_async_device_register(struct dma_device *device)
if (rc != 0)
return rc;
- mutex_init(&device->chan_mutex);
ida_init(&device->chan_ida);
/* represent channels in sysfs. Probably want devs too */
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index f696246f57fd..0a2168a4ccb0 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -675,10 +675,16 @@ static int dmatest_func(void *data)
/*
* src and dst buffers are freed by ourselves below
*/
- if (params->polled)
+ if (params->polled) {
flags = DMA_CTRL_ACK;
- else
- flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+ } else {
+ if (dma_has_cap(DMA_INTERRUPT, dev->cap_mask)) {
+ flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+ } else {
+ pr_err("Channel does not support interrupt!\n");
+ goto err_pq_array;
+ }
+ }
ktime = ktime_get();
while (!(kthread_should_stop() ||
@@ -906,6 +912,7 @@ error_unmap_continue:
runtime = ktime_to_us(ktime);
ret = 0;
+err_pq_array:
kfree(dma_pq);
err_srcs_array:
kfree(srcs);
diff --git a/drivers/dma/dw/Kconfig b/drivers/dma/dw/Kconfig
index db25f9b7778c..a9828ddd6d06 100644
--- a/drivers/dma/dw/Kconfig
+++ b/drivers/dma/dw/Kconfig
@@ -16,6 +16,15 @@ config DW_DMAC
Support the Synopsys DesignWare AHB DMA controller. This
can be integrated in chips such as the Intel Cherrytrail.
+config RZN1_DMAMUX
+ tristate "Renesas RZ/N1 DMAMUX driver"
+ depends on DW_DMAC
+ depends on ARCH_RZN1 || COMPILE_TEST
+ help
+ Support the Renesas RZ/N1 DMAMUX which is located in front of
+ the Synopsys DesignWare AHB DMA controller located on Renesas
+ SoCs.
+
config DW_DMAC_PCI
tristate "Synopsys DesignWare AHB DMA PCI driver"
depends on PCI
diff --git a/drivers/dma/dw/Makefile b/drivers/dma/dw/Makefile
index a6f358ad8591..e1796015f213 100644
--- a/drivers/dma/dw/Makefile
+++ b/drivers/dma/dw/Makefile
@@ -9,3 +9,5 @@ dw_dmac-$(CONFIG_OF) += of.o
obj-$(CONFIG_DW_DMAC_PCI) += dw_dmac_pci.o
dw_dmac_pci-y := pci.o
+
+obj-$(CONFIG_RZN1_DMAMUX) += rzn1-dmamux.o
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c
index 246118955877..47f2292dba98 100644
--- a/drivers/dma/dw/platform.c
+++ b/drivers/dma/dw/platform.c
@@ -137,6 +137,7 @@ static void dw_shutdown(struct platform_device *pdev)
#ifdef CONFIG_OF
static const struct of_device_id dw_dma_of_id_table[] = {
{ .compatible = "snps,dma-spear1340", .data = &dw_dma_chip_pdata },
+ { .compatible = "renesas,rzn1-dma", .data = &dw_dma_chip_pdata },
{}
};
MODULE_DEVICE_TABLE(of, dw_dma_of_id_table);
diff --git a/drivers/dma/dw/rzn1-dmamux.c b/drivers/dma/dw/rzn1-dmamux.c
new file mode 100644
index 000000000000..11d254e450b0
--- /dev/null
+++ b/drivers/dma/dw/rzn1-dmamux.c
@@ -0,0 +1,155 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2022 Schneider-Electric
+ * Author: Miquel Raynal <miquel.raynal@bootlin.com
+ * Based on TI crossbar driver written by Peter Ujfalusi <peter.ujfalusi@ti.com>
+ */
+#include <linux/bitops.h>
+#include <linux/of_device.h>
+#include <linux/of_dma.h>
+#include <linux/slab.h>
+#include <linux/soc/renesas/r9a06g032-sysctrl.h>
+#include <linux/types.h>
+
+#define RNZ1_DMAMUX_NCELLS 6
+#define RZN1_DMAMUX_MAX_LINES 64
+#define RZN1_DMAMUX_LINES_PER_CTLR 16
+
+struct rzn1_dmamux_data {
+ struct dma_router dmarouter;
+ DECLARE_BITMAP(used_chans, 2 * RZN1_DMAMUX_LINES_PER_CTLR);
+};
+
+struct rzn1_dmamux_map {
+ unsigned int req_idx;
+};
+
+static void rzn1_dmamux_free(struct device *dev, void *route_data)
+{
+ struct rzn1_dmamux_data *dmamux = dev_get_drvdata(dev);
+ struct rzn1_dmamux_map *map = route_data;
+
+ dev_dbg(dev, "Unmapping DMAMUX request %u\n", map->req_idx);
+
+ clear_bit(map->req_idx, dmamux->used_chans);
+
+ kfree(map);
+}
+
+static void *rzn1_dmamux_route_allocate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct platform_device *pdev = of_find_device_by_node(ofdma->of_node);
+ struct rzn1_dmamux_data *dmamux = platform_get_drvdata(pdev);
+ struct rzn1_dmamux_map *map;
+ unsigned int dmac_idx, chan, val;
+ u32 mask;
+ int ret;
+
+ if (dma_spec->args_count != RNZ1_DMAMUX_NCELLS)
+ return ERR_PTR(-EINVAL);
+
+ map = kzalloc(sizeof(*map), GFP_KERNEL);
+ if (!map)
+ return ERR_PTR(-ENOMEM);
+
+ chan = dma_spec->args[0];
+ map->req_idx = dma_spec->args[4];
+ val = dma_spec->args[5];
+ dma_spec->args_count -= 2;
+
+ if (chan >= RZN1_DMAMUX_LINES_PER_CTLR) {
+ dev_err(&pdev->dev, "Invalid DMA request line: %u\n", chan);
+ ret = -EINVAL;
+ goto free_map;
+ }
+
+ if (map->req_idx >= RZN1_DMAMUX_MAX_LINES ||
+ (map->req_idx % RZN1_DMAMUX_LINES_PER_CTLR) != chan) {
+ dev_err(&pdev->dev, "Invalid MUX request line: %u\n", map->req_idx);
+ ret = -EINVAL;
+ goto free_map;
+ }
+
+ dmac_idx = map->req_idx >= RZN1_DMAMUX_LINES_PER_CTLR ? 1 : 0;
+ dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", dmac_idx);
+ if (!dma_spec->np) {
+ dev_err(&pdev->dev, "Can't get DMA master\n");
+ ret = -EINVAL;
+ goto free_map;
+ }
+
+ dev_dbg(&pdev->dev, "Mapping DMAMUX request %u to DMAC%u request %u\n",
+ map->req_idx, dmac_idx, chan);
+
+ if (test_and_set_bit(map->req_idx, dmamux->used_chans)) {
+ ret = -EBUSY;
+ goto free_map;
+ }
+
+ mask = BIT(map->req_idx);
+ ret = r9a06g032_sysctrl_set_dmamux(mask, val ? mask : 0);
+ if (ret)
+ goto clear_bitmap;
+
+ return map;
+
+clear_bitmap:
+ clear_bit(map->req_idx, dmamux->used_chans);
+free_map:
+ kfree(map);
+
+ return ERR_PTR(ret);
+}
+
+static const struct of_device_id rzn1_dmac_match[] = {
+ { .compatible = "renesas,rzn1-dma" },
+ {}
+};
+
+static int rzn1_dmamux_probe(struct platform_device *pdev)
+{
+ struct device_node *mux_node = pdev->dev.of_node;
+ const struct of_device_id *match;
+ struct device_node *dmac_node;
+ struct rzn1_dmamux_data *dmamux;
+
+ dmamux = devm_kzalloc(&pdev->dev, sizeof(*dmamux), GFP_KERNEL);
+ if (!dmamux)
+ return -ENOMEM;
+
+ dmac_node = of_parse_phandle(mux_node, "dma-masters", 0);
+ if (!dmac_node)
+ return dev_err_probe(&pdev->dev, -ENODEV, "Can't get DMA master node\n");
+
+ match = of_match_node(rzn1_dmac_match, dmac_node);
+ of_node_put(dmac_node);
+ if (!match)
+ return dev_err_probe(&pdev->dev, -EINVAL, "DMA master is not supported\n");
+
+ dmamux->dmarouter.dev = &pdev->dev;
+ dmamux->dmarouter.route_free = rzn1_dmamux_free;
+
+ platform_set_drvdata(pdev, dmamux);
+
+ return of_dma_router_register(mux_node, rzn1_dmamux_route_allocate,
+ &dmamux->dmarouter);
+}
+
+static const struct of_device_id rzn1_dmamux_match[] = {
+ { .compatible = "renesas,rzn1-dmamux" },
+ {}
+};
+
+static struct platform_driver rzn1_dmamux_driver = {
+ .driver = {
+ .name = "renesas,rzn1-dmamux",
+ .of_match_table = rzn1_dmamux_match,
+ },
+ .probe = rzn1_dmamux_probe,
+};
+module_platform_driver(rzn1_dmamux_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Miquel Raynal <miquel.raynal@bootlin.com");
+MODULE_DESCRIPTION("Renesas RZ/N1 DMAMUX driver");
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index 98f9ee70362e..971ff5f9ae84 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -132,7 +132,7 @@ struct ep93xx_dma_desc {
/**
* struct ep93xx_dma_chan - an EP93xx DMA M2P/M2M channel
* @chan: dmaengine API channel
- * @edma: pointer to to the engine device
+ * @edma: pointer to the engine device
* @regs: memory mapped registers
* @irq: interrupt number of the channel
* @clk: clock used by this channel
diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
index b9b2b4a4124e..c2808fd081d6 100644
--- a/drivers/dma/idxd/cdev.c
+++ b/drivers/dma/idxd/cdev.c
@@ -99,7 +99,7 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp)
ctx->wq = wq;
filp->private_data = ctx;
- if (device_pasid_enabled(idxd)) {
+ if (device_user_pasid_enabled(idxd)) {
sva = iommu_sva_bind_device(dev, current->mm, NULL);
if (IS_ERR(sva)) {
rc = PTR_ERR(sva);
@@ -152,7 +152,7 @@ static int idxd_cdev_release(struct inode *node, struct file *filep)
if (wq_shared(wq)) {
idxd_device_drain_pasid(idxd, ctx->pasid);
} else {
- if (device_pasid_enabled(idxd)) {
+ if (device_user_pasid_enabled(idxd)) {
/* The wq disable in the disable pasid function will drain the wq */
rc = idxd_wq_disable_pasid(wq);
if (rc < 0)
@@ -314,7 +314,7 @@ static int idxd_user_drv_probe(struct idxd_dev *idxd_dev)
mutex_lock(&wq->wq_lock);
wq->type = IDXD_WQT_USER;
- rc = __drv_enable_wq(wq);
+ rc = drv_enable_wq(wq);
if (rc < 0)
goto err;
@@ -329,7 +329,7 @@ static int idxd_user_drv_probe(struct idxd_dev *idxd_dev)
return 0;
err_cdev:
- __drv_disable_wq(wq);
+ drv_disable_wq(wq);
err:
wq->type = IDXD_WQT_NONE;
mutex_unlock(&wq->wq_lock);
@@ -342,7 +342,7 @@ static void idxd_user_drv_remove(struct idxd_dev *idxd_dev)
mutex_lock(&wq->wq_lock);
idxd_wq_del_cdev(wq);
- __drv_disable_wq(wq);
+ drv_disable_wq(wq);
wq->type = IDXD_WQT_NONE;
mutex_unlock(&wq->wq_lock);
}
@@ -369,10 +369,16 @@ int idxd_cdev_register(void)
rc = alloc_chrdev_region(&ictx[i].devt, 0, MINORMASK,
ictx[i].name);
if (rc)
- return rc;
+ goto err_free_chrdev_region;
}
return 0;
+
+err_free_chrdev_region:
+ for (i--; i >= 0; i--)
+ unregister_chrdev_region(ictx[i].devt, MINORMASK);
+
+ return rc;
}
void idxd_cdev_remove(void)
diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
index f652da6ab47d..ff0ea60051f0 100644
--- a/drivers/dma/idxd/device.c
+++ b/drivers/dma/idxd/device.c
@@ -184,7 +184,7 @@ int idxd_wq_enable(struct idxd_wq *wq)
if (wq->state == IDXD_WQ_ENABLED) {
dev_dbg(dev, "WQ %d already enabled\n", wq->id);
- return -ENXIO;
+ return 0;
}
idxd_cmd_exec(idxd, IDXD_CMD_ENABLE_WQ, wq->id, &status);
@@ -299,24 +299,46 @@ void idxd_wqs_unmap_portal(struct idxd_device *idxd)
}
}
-int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid)
+static void __idxd_wq_set_priv_locked(struct idxd_wq *wq, int priv)
{
struct idxd_device *idxd = wq->idxd;
- int rc;
union wqcfg wqcfg;
unsigned int offset;
- rc = idxd_wq_disable(wq, false);
- if (rc < 0)
- return rc;
+ offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PRIVL_IDX);
+ spin_lock(&idxd->dev_lock);
+ wqcfg.bits[WQCFG_PRIVL_IDX] = ioread32(idxd->reg_base + offset);
+ wqcfg.priv = priv;
+ wq->wqcfg->bits[WQCFG_PRIVL_IDX] = wqcfg.bits[WQCFG_PRIVL_IDX];
+ iowrite32(wqcfg.bits[WQCFG_PRIVL_IDX], idxd->reg_base + offset);
+ spin_unlock(&idxd->dev_lock);
+}
+
+static void __idxd_wq_set_pasid_locked(struct idxd_wq *wq, int pasid)
+{
+ struct idxd_device *idxd = wq->idxd;
+ union wqcfg wqcfg;
+ unsigned int offset;
offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PASID_IDX);
spin_lock(&idxd->dev_lock);
wqcfg.bits[WQCFG_PASID_IDX] = ioread32(idxd->reg_base + offset);
wqcfg.pasid_en = 1;
wqcfg.pasid = pasid;
+ wq->wqcfg->bits[WQCFG_PASID_IDX] = wqcfg.bits[WQCFG_PASID_IDX];
iowrite32(wqcfg.bits[WQCFG_PASID_IDX], idxd->reg_base + offset);
spin_unlock(&idxd->dev_lock);
+}
+
+int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid)
+{
+ int rc;
+
+ rc = idxd_wq_disable(wq, false);
+ if (rc < 0)
+ return rc;
+
+ __idxd_wq_set_pasid_locked(wq, pasid);
rc = idxd_wq_enable(wq);
if (rc < 0)
@@ -555,19 +577,15 @@ int idxd_device_disable(struct idxd_device *idxd)
return -ENXIO;
}
- spin_lock(&idxd->dev_lock);
idxd_device_clear_state(idxd);
- idxd->state = IDXD_DEV_DISABLED;
- spin_unlock(&idxd->dev_lock);
return 0;
}
void idxd_device_reset(struct idxd_device *idxd)
{
idxd_cmd_exec(idxd, IDXD_CMD_RESET_DEVICE, 0, NULL);
- spin_lock(&idxd->dev_lock);
idxd_device_clear_state(idxd);
- idxd->state = IDXD_DEV_DISABLED;
+ spin_lock(&idxd->dev_lock);
idxd_unmask_error_interrupts(idxd);
spin_unlock(&idxd->dev_lock);
}
@@ -694,15 +712,16 @@ static void idxd_device_wqs_clear_state(struct idxd_device *idxd)
{
int i;
- lockdep_assert_held(&idxd->dev_lock);
for (i = 0; i < idxd->max_wqs; i++) {
struct idxd_wq *wq = idxd->wqs[i];
+ mutex_lock(&wq->wq_lock);
if (wq->state == IDXD_WQ_ENABLED) {
idxd_wq_disable_cleanup(wq);
wq->state = IDXD_WQ_DISABLED;
}
idxd_wq_device_reset_cleanup(wq);
+ mutex_unlock(&wq->wq_lock);
}
}
@@ -711,9 +730,12 @@ void idxd_device_clear_state(struct idxd_device *idxd)
if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
return;
+ idxd_device_wqs_clear_state(idxd);
+ spin_lock(&idxd->dev_lock);
idxd_groups_clear_state(idxd);
idxd_engines_clear_state(idxd);
- idxd_device_wqs_clear_state(idxd);
+ idxd->state = IDXD_DEV_DISABLED;
+ spin_unlock(&idxd->dev_lock);
}
static void idxd_group_config_write(struct idxd_group *group)
@@ -799,7 +821,7 @@ static int idxd_wq_config_write(struct idxd_wq *wq)
*/
for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
wq_offset = WQCFG_OFFSET(idxd, wq->id, i);
- wq->wqcfg->bits[i] = ioread32(idxd->reg_base + wq_offset);
+ wq->wqcfg->bits[i] |= ioread32(idxd->reg_base + wq_offset);
}
if (wq->size == 0 && wq->type != IDXD_WQT_NONE)
@@ -815,14 +837,8 @@ static int idxd_wq_config_write(struct idxd_wq *wq)
if (wq_dedicated(wq))
wq->wqcfg->mode = 1;
- if (device_pasid_enabled(idxd)) {
- wq->wqcfg->pasid_en = 1;
- if (wq->type == IDXD_WQT_KERNEL && wq_dedicated(wq))
- wq->wqcfg->pasid = idxd->pasid;
- }
-
/*
- * Here the priv bit is set depending on the WQ type. priv = 1 if the
+ * The WQ priv bit is set depending on the WQ type. priv = 1 if the
* WQ type is kernel to indicate privileged access. This setting only
* matters for dedicated WQ. According to the DSA spec:
* If the WQ is in dedicated mode, WQ PASID Enable is 1, and the
@@ -832,7 +848,6 @@ static int idxd_wq_config_write(struct idxd_wq *wq)
* In the case of a dedicated kernel WQ that is not able to support
* the PASID cap, then the configuration will be rejected.
*/
- wq->wqcfg->priv = !!(wq->type == IDXD_WQT_KERNEL);
if (wq_dedicated(wq) && wq->wqcfg->pasid_en &&
!idxd_device_pasid_priv_enabled(idxd) &&
wq->type == IDXD_WQT_KERNEL) {
@@ -953,7 +968,7 @@ static int idxd_wqs_setup(struct idxd_device *idxd)
if (!wq->group)
continue;
- if (wq_shared(wq) && !device_swq_supported(idxd)) {
+ if (wq_shared(wq) && !wq_shared_supported(wq)) {
idxd->cmd_status = IDXD_SCMD_WQ_NO_SWQ_SUPPORT;
dev_warn(dev, "No shared wq support but configured.\n");
return -EINVAL;
@@ -1018,6 +1033,9 @@ static int idxd_wq_load_config(struct idxd_wq *wq)
wq->priority = wq->wqcfg->priority;
+ wq->max_xfer_bytes = 1ULL << wq->wqcfg->max_xfer_shift;
+ wq->max_batch_size = 1ULL << wq->wqcfg->max_batch_shift;
+
for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
wqcfg_offset = WQCFG_OFFSET(idxd, wq->id, i);
dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n", wq->id, i, wqcfg_offset, wq->wqcfg->bits[i]);
@@ -1161,7 +1179,9 @@ void idxd_wq_free_irq(struct idxd_wq *wq)
struct idxd_device *idxd = wq->idxd;
struct idxd_irq_entry *ie = &wq->ie;
- synchronize_irq(ie->vector);
+ if (wq->type != IDXD_WQT_KERNEL)
+ return;
+
free_irq(ie->vector, ie);
idxd_flush_pending_descs(ie);
if (idxd->request_int_handles)
@@ -1180,6 +1200,9 @@ int idxd_wq_request_irq(struct idxd_wq *wq)
struct idxd_irq_entry *ie;
int rc;
+ if (wq->type != IDXD_WQT_KERNEL)
+ return 0;
+
ie = &wq->ie;
ie->vector = pci_irq_vector(pdev, ie->id);
ie->pasid = device_pasid_enabled(idxd) ? idxd->pasid : INVALID_IOASID;
@@ -1211,7 +1234,7 @@ err_irq:
return rc;
}
-int __drv_enable_wq(struct idxd_wq *wq)
+int drv_enable_wq(struct idxd_wq *wq)
{
struct idxd_device *idxd = wq->idxd;
struct device *dev = &idxd->pdev->dev;
@@ -1245,7 +1268,7 @@ int __drv_enable_wq(struct idxd_wq *wq)
/* Shared WQ checks */
if (wq_shared(wq)) {
- if (!device_swq_supported(idxd)) {
+ if (!wq_shared_supported(wq)) {
idxd->cmd_status = IDXD_SCMD_WQ_NO_SVM;
dev_dbg(dev, "PASID not enabled and shared wq.\n");
goto err;
@@ -1265,6 +1288,29 @@ int __drv_enable_wq(struct idxd_wq *wq)
}
}
+ /*
+ * In the event that the WQ is configurable for pasid and priv bits.
+ * For kernel wq, the driver should setup the pasid, pasid_en, and priv bit.
+ * However, for non-kernel wq, the driver should only set the pasid_en bit for
+ * shared wq. A dedicated wq that is not 'kernel' type will configure pasid and
+ * pasid_en later on so there is no need to setup.
+ */
+ if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) {
+ int priv = 0;
+
+ if (wq_pasid_enabled(wq)) {
+ if (is_idxd_wq_kernel(wq) || wq_shared(wq)) {
+ u32 pasid = wq_dedicated(wq) ? idxd->pasid : 0;
+
+ __idxd_wq_set_pasid_locked(wq, pasid);
+ }
+ }
+
+ if (is_idxd_wq_kernel(wq))
+ priv = 1;
+ __idxd_wq_set_priv_locked(wq, priv);
+ }
+
rc = 0;
spin_lock(&idxd->dev_lock);
if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
@@ -1289,8 +1335,36 @@ int __drv_enable_wq(struct idxd_wq *wq)
}
wq->client_count = 0;
+
+ rc = idxd_wq_request_irq(wq);
+ if (rc < 0) {
+ idxd->cmd_status = IDXD_SCMD_WQ_IRQ_ERR;
+ dev_dbg(dev, "WQ %d irq setup failed: %d\n", wq->id, rc);
+ goto err_irq;
+ }
+
+ rc = idxd_wq_alloc_resources(wq);
+ if (rc < 0) {
+ idxd->cmd_status = IDXD_SCMD_WQ_RES_ALLOC_ERR;
+ dev_dbg(dev, "WQ resource alloc failed\n");
+ goto err_res_alloc;
+ }
+
+ rc = idxd_wq_init_percpu_ref(wq);
+ if (rc < 0) {
+ idxd->cmd_status = IDXD_SCMD_PERCPU_ERR;
+ dev_dbg(dev, "percpu_ref setup failed\n");
+ goto err_ref;
+ }
+
return 0;
+err_ref:
+ idxd_wq_free_resources(wq);
+err_res_alloc:
+ idxd_wq_free_irq(wq);
+err_irq:
+ idxd_wq_unmap_portal(wq);
err_map_portal:
rc = idxd_wq_disable(wq, false);
if (rc < 0)
@@ -1299,17 +1373,7 @@ err:
return rc;
}
-int drv_enable_wq(struct idxd_wq *wq)
-{
- int rc;
-
- mutex_lock(&wq->wq_lock);
- rc = __drv_enable_wq(wq);
- mutex_unlock(&wq->wq_lock);
- return rc;
-}
-
-void __drv_disable_wq(struct idxd_wq *wq)
+void drv_disable_wq(struct idxd_wq *wq)
{
struct idxd_device *idxd = wq->idxd;
struct device *dev = &idxd->pdev->dev;
@@ -1320,21 +1384,16 @@ void __drv_disable_wq(struct idxd_wq *wq)
dev_warn(dev, "Clients has claim on wq %d: %d\n",
wq->id, idxd_wq_refcount(wq));
+ idxd_wq_free_resources(wq);
idxd_wq_unmap_portal(wq);
-
idxd_wq_drain(wq);
+ idxd_wq_free_irq(wq);
idxd_wq_reset(wq);
-
+ percpu_ref_exit(&wq->wq_active);
+ wq->type = IDXD_WQT_NONE;
wq->client_count = 0;
}
-void drv_disable_wq(struct idxd_wq *wq)
-{
- mutex_lock(&wq->wq_lock);
- __drv_disable_wq(wq);
- mutex_unlock(&wq->wq_lock);
-}
-
int idxd_device_drv_probe(struct idxd_dev *idxd_dev)
{
struct idxd_device *idxd = idxd_dev_to_idxd(idxd_dev);
diff --git a/drivers/dma/idxd/dma.c b/drivers/dma/idxd/dma.c
index bfff59617d04..e0874cb4721c 100644
--- a/drivers/dma/idxd/dma.c
+++ b/drivers/dma/idxd/dma.c
@@ -88,6 +88,27 @@ static inline void idxd_prep_desc_common(struct idxd_wq *wq,
}
static struct dma_async_tx_descriptor *
+idxd_dma_prep_interrupt(struct dma_chan *c, unsigned long flags)
+{
+ struct idxd_wq *wq = to_idxd_wq(c);
+ u32 desc_flags;
+ struct idxd_desc *desc;
+
+ if (wq->state != IDXD_WQ_ENABLED)
+ return NULL;
+
+ op_flag_setup(flags, &desc_flags);
+ desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK);
+ if (IS_ERR(desc))
+ return NULL;
+
+ idxd_prep_desc_common(wq, desc->hw, DSA_OPCODE_NOOP,
+ 0, 0, 0, desc->compl_dma, desc_flags);
+ desc->txd.flags = flags;
+ return &desc->txd;
+}
+
+static struct dma_async_tx_descriptor *
idxd_dma_submit_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
dma_addr_t dma_src, size_t len, unsigned long flags)
{
@@ -193,10 +214,12 @@ int idxd_register_dma_device(struct idxd_device *idxd)
INIT_LIST_HEAD(&dma->channels);
dma->dev = dev;
+ dma_cap_set(DMA_INTERRUPT, dma->cap_mask);
dma_cap_set(DMA_PRIVATE, dma->cap_mask);
dma_cap_set(DMA_COMPLETION_NO_ORDER, dma->cap_mask);
dma->device_release = idxd_dma_release;
+ dma->device_prep_dma_interrupt = idxd_dma_prep_interrupt;
if (idxd->hw.opcap.bits[0] & IDXD_OPCAP_MEMMOVE) {
dma_cap_set(DMA_MEMCPY, dma->cap_mask);
dma->device_prep_dma_memcpy = idxd_dma_submit_memcpy;
@@ -227,7 +250,7 @@ void idxd_unregister_dma_device(struct idxd_device *idxd)
dma_async_device_unregister(&idxd->idxd_dma->dma);
}
-int idxd_register_dma_channel(struct idxd_wq *wq)
+static int idxd_register_dma_channel(struct idxd_wq *wq)
{
struct idxd_device *idxd = wq->idxd;
struct dma_device *dma = &idxd->idxd_dma->dma;
@@ -264,7 +287,7 @@ int idxd_register_dma_channel(struct idxd_wq *wq)
return 0;
}
-void idxd_unregister_dma_channel(struct idxd_wq *wq)
+static void idxd_unregister_dma_channel(struct idxd_wq *wq)
{
struct idxd_dma_chan *idxd_chan = wq->idxd_chan;
struct dma_chan *chan = &idxd_chan->chan;
@@ -290,34 +313,13 @@ static int idxd_dmaengine_drv_probe(struct idxd_dev *idxd_dev)
mutex_lock(&wq->wq_lock);
wq->type = IDXD_WQT_KERNEL;
- rc = idxd_wq_request_irq(wq);
- if (rc < 0) {
- idxd->cmd_status = IDXD_SCMD_WQ_IRQ_ERR;
- dev_dbg(dev, "WQ %d irq setup failed: %d\n", wq->id, rc);
- goto err_irq;
- }
-
- rc = __drv_enable_wq(wq);
+ rc = drv_enable_wq(wq);
if (rc < 0) {
dev_dbg(dev, "Enable wq %d failed: %d\n", wq->id, rc);
rc = -ENXIO;
goto err;
}
- rc = idxd_wq_alloc_resources(wq);
- if (rc < 0) {
- idxd->cmd_status = IDXD_SCMD_WQ_RES_ALLOC_ERR;
- dev_dbg(dev, "WQ resource alloc failed\n");
- goto err_res_alloc;
- }
-
- rc = idxd_wq_init_percpu_ref(wq);
- if (rc < 0) {
- idxd->cmd_status = IDXD_SCMD_PERCPU_ERR;
- dev_dbg(dev, "percpu_ref setup failed\n");
- goto err_ref;
- }
-
rc = idxd_register_dma_channel(wq);
if (rc < 0) {
idxd->cmd_status = IDXD_SCMD_DMA_CHAN_ERR;
@@ -330,15 +332,8 @@ static int idxd_dmaengine_drv_probe(struct idxd_dev *idxd_dev)
return 0;
err_dma:
- __idxd_wq_quiesce(wq);
- percpu_ref_exit(&wq->wq_active);
-err_ref:
- idxd_wq_free_resources(wq);
-err_res_alloc:
- __drv_disable_wq(wq);
+ drv_disable_wq(wq);
err:
- idxd_wq_free_irq(wq);
-err_irq:
wq->type = IDXD_WQT_NONE;
mutex_unlock(&wq->wq_lock);
return rc;
@@ -351,11 +346,7 @@ static void idxd_dmaengine_drv_remove(struct idxd_dev *idxd_dev)
mutex_lock(&wq->wq_lock);
__idxd_wq_quiesce(wq);
idxd_unregister_dma_channel(wq);
- idxd_wq_free_resources(wq);
- __drv_disable_wq(wq);
- percpu_ref_exit(&wq->wq_active);
- idxd_wq_free_irq(wq);
- wq->type = IDXD_WQT_NONE;
+ drv_disable_wq(wq);
mutex_unlock(&wq->wq_lock);
}
diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
index da72eb15f610..fed0dfc1eaa8 100644
--- a/drivers/dma/idxd/idxd.h
+++ b/drivers/dma/idxd/idxd.h
@@ -239,6 +239,7 @@ enum idxd_device_flag {
IDXD_FLAG_CONFIGURABLE = 0,
IDXD_FLAG_CMD_RUNNING,
IDXD_FLAG_PASID_ENABLED,
+ IDXD_FLAG_USER_PASID_ENABLED,
};
struct idxd_dma_dev {
@@ -469,9 +470,20 @@ static inline bool device_pasid_enabled(struct idxd_device *idxd)
return test_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
}
-static inline bool device_swq_supported(struct idxd_device *idxd)
+static inline bool device_user_pasid_enabled(struct idxd_device *idxd)
{
- return (support_enqcmd && device_pasid_enabled(idxd));
+ return test_bit(IDXD_FLAG_USER_PASID_ENABLED, &idxd->flags);
+}
+
+static inline bool wq_pasid_enabled(struct idxd_wq *wq)
+{
+ return (is_idxd_wq_kernel(wq) && device_pasid_enabled(wq->idxd)) ||
+ (is_idxd_wq_user(wq) && device_user_pasid_enabled(wq->idxd));
+}
+
+static inline bool wq_shared_supported(struct idxd_wq *wq)
+{
+ return (support_enqcmd && wq_pasid_enabled(wq));
}
enum idxd_portal_prot {
@@ -559,9 +571,7 @@ void idxd_unregister_idxd_drv(void);
int idxd_device_drv_probe(struct idxd_dev *idxd_dev);
void idxd_device_drv_remove(struct idxd_dev *idxd_dev);
int drv_enable_wq(struct idxd_wq *wq);
-int __drv_enable_wq(struct idxd_wq *wq);
void drv_disable_wq(struct idxd_wq *wq);
-void __drv_disable_wq(struct idxd_wq *wq);
int idxd_device_init_reset(struct idxd_device *idxd);
int idxd_device_enable(struct idxd_device *idxd);
int idxd_device_disable(struct idxd_device *idxd);
@@ -602,8 +612,6 @@ int idxd_enqcmds(struct idxd_wq *wq, void __iomem *portal, const void *desc);
/* dmaengine */
int idxd_register_dma_device(struct idxd_device *idxd);
void idxd_unregister_dma_device(struct idxd_device *idxd);
-int idxd_register_dma_channel(struct idxd_wq *wq);
-void idxd_unregister_dma_channel(struct idxd_wq *wq);
void idxd_parse_completion_status(u8 status, enum dmaengine_tx_result *res);
void idxd_dma_complete_txd(struct idxd_desc *desc,
enum idxd_complete_type comp_type, bool free_desc);
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index 993a5dcca24f..355fb3ef4cbf 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -512,18 +512,15 @@ static int idxd_probe(struct idxd_device *idxd)
dev_dbg(dev, "IDXD reset complete\n");
if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM) && sva) {
- rc = iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA);
- if (rc == 0) {
- rc = idxd_enable_system_pasid(idxd);
- if (rc < 0) {
- iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA);
- dev_warn(dev, "Failed to enable PASID. No SVA support: %d\n", rc);
- } else {
- set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
- }
- } else {
- dev_warn(dev, "Unable to turn on SVA feature.\n");
- }
+ if (iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA))
+ dev_warn(dev, "Unable to turn on user SVA feature.\n");
+ else
+ set_bit(IDXD_FLAG_USER_PASID_ENABLED, &idxd->flags);
+
+ if (idxd_enable_system_pasid(idxd))
+ dev_warn(dev, "No in-kernel DMA with PASID.\n");
+ else
+ set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
} else if (!sva) {
dev_warn(dev, "User forced SVA off via module param.\n");
}
@@ -561,7 +558,8 @@ static int idxd_probe(struct idxd_device *idxd)
err:
if (device_pasid_enabled(idxd))
idxd_disable_system_pasid(idxd);
- iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA);
+ if (device_user_pasid_enabled(idxd))
+ iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA);
return rc;
}
@@ -574,7 +572,8 @@ static void idxd_cleanup(struct idxd_device *idxd)
idxd_cleanup_internals(idxd);
if (device_pasid_enabled(idxd))
idxd_disable_system_pasid(idxd);
- iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA);
+ if (device_user_pasid_enabled(idxd))
+ iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA);
}
static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
@@ -691,7 +690,8 @@ static void idxd_remove(struct pci_dev *pdev)
free_irq(irq_entry->vector, irq_entry);
pci_free_irq_vectors(pdev);
pci_iounmap(pdev, idxd->reg_base);
- iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
+ if (device_user_pasid_enabled(idxd))
+ iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
pci_disable_device(pdev);
destroy_workqueue(idxd->wq);
perfmon_pmu_remove(idxd);
diff --git a/drivers/dma/idxd/registers.h b/drivers/dma/idxd/registers.h
index aa642aecdc0b..02449aa9c454 100644
--- a/drivers/dma/idxd/registers.h
+++ b/drivers/dma/idxd/registers.h
@@ -353,6 +353,7 @@ union wqcfg {
} __packed;
#define WQCFG_PASID_IDX 2
+#define WQCFG_PRIVL_IDX 2
#define WQCFG_OCCUP_IDX 6
#define WQCFG_OCCUP_MASK 0xffff
diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
index dfd549685c46..3f262a57441b 100644
--- a/drivers/dma/idxd/sysfs.c
+++ b/drivers/dma/idxd/sysfs.c
@@ -588,7 +588,7 @@ static ssize_t wq_mode_store(struct device *dev,
if (sysfs_streq(buf, "dedicated")) {
set_bit(WQ_FLAG_DEDICATED, &wq->flags);
wq->threshold = 0;
- } else if (sysfs_streq(buf, "shared") && device_swq_supported(idxd)) {
+ } else if (sysfs_streq(buf, "shared")) {
clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
} else {
return -EINVAL;
@@ -832,6 +832,7 @@ static ssize_t wq_name_store(struct device *dev,
size_t count)
{
struct idxd_wq *wq = confdev_to_wq(dev);
+ char *input, *pos;
if (wq->state != IDXD_WQ_DISABLED)
return -EPERM;
@@ -846,9 +847,14 @@ static ssize_t wq_name_store(struct device *dev,
if (wq->type == IDXD_WQT_KERNEL && device_pasid_enabled(wq->idxd))
return -EOPNOTSUPP;
+ input = kstrndup(buf, count, GFP_KERNEL);
+ if (!input)
+ return -ENOMEM;
+
+ pos = strim(input);
memset(wq->name, 0, WQ_NAME_SIZE + 1);
- strncpy(wq->name, buf, WQ_NAME_SIZE);
- strreplace(wq->name, '\n', '\0');
+ sprintf(wq->name, "%s", pos);
+ kfree(input);
return count;
}
diff --git a/drivers/dma/mediatek/mtk-cqdma.c b/drivers/dma/mediatek/mtk-cqdma.c
index 41ef9f15d3d5..f8847c48ba03 100644
--- a/drivers/dma/mediatek/mtk-cqdma.c
+++ b/drivers/dma/mediatek/mtk-cqdma.c
@@ -751,7 +751,6 @@ static int mtk_cqdma_probe(struct platform_device *pdev)
struct mtk_cqdma_device *cqdma;
struct mtk_cqdma_vchan *vc;
struct dma_device *dd;
- struct resource *res;
int err;
u32 i;
@@ -824,13 +823,10 @@ static int mtk_cqdma_probe(struct platform_device *pdev)
return PTR_ERR(cqdma->pc[i]->base);
/* allocate IRQ resource */
- res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
- if (!res) {
- dev_err(&pdev->dev, "No irq resource for %s\n",
- dev_name(&pdev->dev));
- return -EINVAL;
- }
- cqdma->pc[i]->irq = res->start;
+ err = platform_get_irq(pdev, i);
+ if (err < 0)
+ return err;
+ cqdma->pc[i]->irq = err;
err = devm_request_irq(&pdev->dev, cqdma->pc[i]->irq,
mtk_cqdma_irq, 0, dev_name(&pdev->dev),
diff --git a/drivers/dma/mediatek/mtk-hsdma.c b/drivers/dma/mediatek/mtk-hsdma.c
index 6ad8afbb95f2..9ebd9231f62f 100644
--- a/drivers/dma/mediatek/mtk-hsdma.c
+++ b/drivers/dma/mediatek/mtk-hsdma.c
@@ -601,7 +601,7 @@ static void mtk_hsdma_free_rooms_in_ring(struct mtk_hsdma_device *hsdma)
cb->flag = 0;
}
- cb->vd = 0;
+ cb->vd = NULL;
/*
* Recycle the RXD with the helper WRITE_ONCE that can ensure
@@ -923,13 +923,10 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
return PTR_ERR(hsdma->clk);
}
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res) {
- dev_err(&pdev->dev, "No irq resource for %s\n",
- dev_name(&pdev->dev));
- return -EINVAL;
- }
- hsdma->irq = res->start;
+ err = platform_get_irq(pdev, 0);
+ if (err < 0)
+ return err;
+ hsdma->irq = err;
refcount_set(&hsdma->pc_refcnt, 0);
spin_lock_init(&hsdma->lock);
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index 5a53d7fcef01..e8d71b35593e 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -1043,13 +1043,17 @@ static int mmp_pdma_probe(struct platform_device *op)
return PTR_ERR(pdev->base);
of_id = of_match_device(mmp_pdma_dt_ids, pdev->dev);
- if (of_id)
- of_property_read_u32(pdev->dev->of_node, "#dma-channels",
- &dma_channels);
- else if (pdata && pdata->dma_channels)
+ if (of_id) {
+ /* Parse new and deprecated dma-channels properties */
+ if (of_property_read_u32(pdev->dev->of_node, "dma-channels",
+ &dma_channels))
+ of_property_read_u32(pdev->dev->of_node, "#dma-channels",
+ &dma_channels);
+ } else if (pdata && pdata->dma_channels) {
dma_channels = pdata->dma_channels;
- else
+ } else {
dma_channels = 32; /* default 32 channel */
+ }
pdev->dma_channels = dma_channels;
for (i = 0; i < dma_channels; i++) {
diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c
index 9c8b4084ba2f..f10b29034da1 100644
--- a/drivers/dma/mv_xor_v2.c
+++ b/drivers/dma/mv_xor_v2.c
@@ -591,14 +591,14 @@ static void mv_xor_v2_tasklet(struct tasklet_struct *t)
dma_run_dependencies(&next_pending_sw_desc->async_tx);
/* Lock the channel */
- spin_lock_bh(&xor_dev->lock);
+ spin_lock(&xor_dev->lock);
/* add the SW descriptor to the free descriptors list */
list_add(&next_pending_sw_desc->free_list,
&xor_dev->free_sw_desc);
/* Release the channel */
- spin_unlock_bh(&xor_dev->lock);
+ spin_unlock(&xor_dev->lock);
/* increment the next descriptor */
pending_ptr++;
diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c
index 9c52c57919c6..a7063e9cd551 100644
--- a/drivers/dma/nbpfaxi.c
+++ b/drivers/dma/nbpfaxi.c
@@ -1294,7 +1294,7 @@ static int nbpf_probe(struct platform_device *pdev)
struct device_node *np = dev->of_node;
struct nbpf_device *nbpf;
struct dma_device *dma_dev;
- struct resource *iomem, *irq_res;
+ struct resource *iomem;
const struct nbpf_config *cfg;
int num_channels;
int ret, irq, eirq, i;
@@ -1335,13 +1335,11 @@ static int nbpf_probe(struct platform_device *pdev)
nbpf->config = cfg;
for (i = 0; irqs < ARRAY_SIZE(irqbuf); i++) {
- irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
- if (!irq_res)
- break;
-
- for (irq = irq_res->start; irq <= irq_res->end;
- irq++, irqs++)
- irqbuf[irqs] = irq;
+ irq = platform_get_irq_optional(pdev, i);
+ if (irq < 0 && irq != -ENXIO)
+ return irq;
+ if (irq > 0)
+ irqbuf[irqs++] = irq;
}
/*
diff --git a/drivers/dma/plx_dma.c b/drivers/dma/plx_dma.c
index 1ffcb5ca9788..12725fa1655f 100644
--- a/drivers/dma/plx_dma.c
+++ b/drivers/dma/plx_dma.c
@@ -137,7 +137,7 @@ static void plx_dma_process_desc(struct plx_dma_dev *plxdev)
struct plx_dma_desc *desc;
u32 flags;
- spin_lock_bh(&plxdev->ring_lock);
+ spin_lock(&plxdev->ring_lock);
while (plxdev->tail != plxdev->head) {
desc = plx_dma_get_desc(plxdev, plxdev->tail);
@@ -165,7 +165,7 @@ static void plx_dma_process_desc(struct plx_dma_dev *plxdev)
plxdev->tail++;
}
- spin_unlock_bh(&plxdev->ring_lock);
+ spin_unlock(&plxdev->ring_lock);
}
static void plx_dma_abort_desc(struct plx_dma_dev *plxdev)
diff --git a/drivers/dma/ptdma/ptdma-dev.c b/drivers/dma/ptdma/ptdma-dev.c
index daafea5bc35d..377da23012ac 100644
--- a/drivers/dma/ptdma/ptdma-dev.c
+++ b/drivers/dma/ptdma/ptdma-dev.c
@@ -100,6 +100,7 @@ int pt_core_perform_passthru(struct pt_cmd_queue *cmd_q,
struct pt_passthru_engine *pt_engine)
{
struct ptdma_desc desc;
+ struct pt_device *pt = container_of(cmd_q, struct pt_device, cmd_q);
cmd_q->cmd_error = 0;
cmd_q->total_pt_ops++;
@@ -111,17 +112,12 @@ int pt_core_perform_passthru(struct pt_cmd_queue *cmd_q,
desc.dst_lo = lower_32_bits(pt_engine->dst_dma);
desc.dw5.dst_hi = upper_32_bits(pt_engine->dst_dma);
- return pt_core_execute_cmd(&desc, cmd_q);
-}
-
-static inline void pt_core_disable_queue_interrupts(struct pt_device *pt)
-{
- iowrite32(0, pt->cmd_q.reg_control + 0x000C);
-}
+ if (cmd_q->int_en)
+ pt_core_enable_queue_interrupts(pt);
+ else
+ pt_core_disable_queue_interrupts(pt);
-static inline void pt_core_enable_queue_interrupts(struct pt_device *pt)
-{
- iowrite32(SUPPORTED_INTERRUPTS, pt->cmd_q.reg_control + 0x000C);
+ return pt_core_execute_cmd(&desc, cmd_q);
}
static void pt_do_cmd_complete(unsigned long data)
@@ -144,14 +140,10 @@ static void pt_do_cmd_complete(unsigned long data)
cmd->pt_cmd_callback(cmd->data, cmd->ret);
}
-static irqreturn_t pt_core_irq_handler(int irq, void *data)
+void pt_check_status_trans(struct pt_device *pt, struct pt_cmd_queue *cmd_q)
{
- struct pt_device *pt = data;
- struct pt_cmd_queue *cmd_q = &pt->cmd_q;
u32 status;
- pt_core_disable_queue_interrupts(pt);
- pt->total_interrupts++;
status = ioread32(cmd_q->reg_control + 0x0010);
if (status) {
cmd_q->int_status = status;
@@ -162,11 +154,21 @@ static irqreturn_t pt_core_irq_handler(int irq, void *data)
if ((status & INT_ERROR) && !cmd_q->cmd_error)
cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
- /* Acknowledge the interrupt */
+ /* Acknowledge the completion */
iowrite32(status, cmd_q->reg_control + 0x0010);
- pt_core_enable_queue_interrupts(pt);
pt_do_cmd_complete((ulong)&pt->tdata);
}
+}
+
+static irqreturn_t pt_core_irq_handler(int irq, void *data)
+{
+ struct pt_device *pt = data;
+ struct pt_cmd_queue *cmd_q = &pt->cmd_q;
+
+ pt_core_disable_queue_interrupts(pt);
+ pt->total_interrupts++;
+ pt_check_status_trans(pt, cmd_q);
+ pt_core_enable_queue_interrupts(pt);
return IRQ_HANDLED;
}
diff --git a/drivers/dma/ptdma/ptdma-dmaengine.c b/drivers/dma/ptdma/ptdma-dmaengine.c
index 91b93e8d9779..cc22d162ce25 100644
--- a/drivers/dma/ptdma/ptdma-dmaengine.c
+++ b/drivers/dma/ptdma/ptdma-dmaengine.c
@@ -171,6 +171,7 @@ static struct pt_dma_desc *pt_alloc_dma_desc(struct pt_dma_chan *chan,
vchan_tx_prep(&chan->vc, &desc->vd, flags);
desc->pt = chan->pt;
+ desc->pt->cmd_q.int_en = !!(flags & DMA_PREP_INTERRUPT);
desc->issued_to_hw = 0;
desc->status = DMA_IN_PROGRESS;
@@ -257,6 +258,17 @@ static void pt_issue_pending(struct dma_chan *dma_chan)
pt_cmd_callback(desc, 0);
}
+static enum dma_status
+pt_tx_status(struct dma_chan *c, dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct pt_device *pt = to_pt_chan(c)->pt;
+ struct pt_cmd_queue *cmd_q = &pt->cmd_q;
+
+ pt_check_status_trans(pt, cmd_q);
+ return dma_cookie_status(c, cookie, txstate);
+}
+
static int pt_pause(struct dma_chan *dma_chan)
{
struct pt_dma_chan *chan = to_pt_chan(dma_chan);
@@ -291,8 +303,10 @@ static int pt_terminate_all(struct dma_chan *dma_chan)
{
struct pt_dma_chan *chan = to_pt_chan(dma_chan);
unsigned long flags;
+ struct pt_cmd_queue *cmd_q = &chan->pt->cmd_q;
LIST_HEAD(head);
+ iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_control + 0x0010);
spin_lock_irqsave(&chan->vc.lock, flags);
vchan_get_all_descriptors(&chan->vc, &head);
spin_unlock_irqrestore(&chan->vc.lock, flags);
@@ -362,7 +376,7 @@ int pt_dmaengine_register(struct pt_device *pt)
dma_dev->device_prep_dma_memcpy = pt_prep_dma_memcpy;
dma_dev->device_prep_dma_interrupt = pt_prep_dma_interrupt;
dma_dev->device_issue_pending = pt_issue_pending;
- dma_dev->device_tx_status = dma_cookie_status;
+ dma_dev->device_tx_status = pt_tx_status;
dma_dev->device_pause = pt_pause;
dma_dev->device_resume = pt_resume;
dma_dev->device_terminate_all = pt_terminate_all;
diff --git a/drivers/dma/ptdma/ptdma.h b/drivers/dma/ptdma/ptdma.h
index afbf192c9230..d093c43b7d13 100644
--- a/drivers/dma/ptdma/ptdma.h
+++ b/drivers/dma/ptdma/ptdma.h
@@ -206,6 +206,9 @@ struct pt_cmd_queue {
unsigned int active;
unsigned int suspended;
+ /* Interrupt flag */
+ bool int_en;
+
/* Register addresses for queue */
void __iomem *reg_control;
u32 qcontrol; /* Cached control register */
@@ -318,7 +321,17 @@ void pt_core_destroy(struct pt_device *pt);
int pt_core_perform_passthru(struct pt_cmd_queue *cmd_q,
struct pt_passthru_engine *pt_engine);
+void pt_check_status_trans(struct pt_device *pt, struct pt_cmd_queue *cmd_q);
void pt_start_queue(struct pt_cmd_queue *cmd_q);
void pt_stop_queue(struct pt_cmd_queue *cmd_q);
+static inline void pt_core_disable_queue_interrupts(struct pt_device *pt)
+{
+ iowrite32(0, pt->cmd_q.reg_control + 0x000C);
+}
+
+static inline void pt_core_enable_queue_interrupts(struct pt_device *pt)
+{
+ iowrite32(SUPPORTED_INTERRUPTS, pt->cmd_q.reg_control + 0x000C);
+}
#endif
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
index 6078cc81892e..e7034f6f3994 100644
--- a/drivers/dma/pxa_dma.c
+++ b/drivers/dma/pxa_dma.c
@@ -1365,10 +1365,17 @@ static int pxad_probe(struct platform_device *op)
of_id = of_match_device(pxad_dt_ids, &op->dev);
if (of_id) {
- of_property_read_u32(op->dev.of_node, "#dma-channels",
- &dma_channels);
- ret = of_property_read_u32(op->dev.of_node, "#dma-requests",
+ /* Parse new and deprecated dma-channels properties */
+ if (of_property_read_u32(op->dev.of_node, "dma-channels",
+ &dma_channels))
+ of_property_read_u32(op->dev.of_node, "#dma-channels",
+ &dma_channels);
+ /* Parse new and deprecated dma-requests properties */
+ ret = of_property_read_u32(op->dev.of_node, "dma-requests",
&nb_requestors);
+ if (ret)
+ ret = of_property_read_u32(op->dev.of_node, "#dma-requests",
+ &nb_requestors);
if (ret) {
dev_warn(pdev->slave.dev,
"#dma-requests set to default 32 as missing in OF: %d",
diff --git a/drivers/dma/qcom/gpi.c b/drivers/dma/qcom/gpi.c
index 94f3648f7483..8f0c9c4e2efd 100644
--- a/drivers/dma/qcom/gpi.c
+++ b/drivers/dma/qcom/gpi.c
@@ -1754,10 +1754,14 @@ static int gpi_create_spi_tre(struct gchan *chan, struct gpi_desc *desc,
tre->dword[2] = u32_encode_bits(spi->rx_len, TRE_RX_LEN);
tre->dword[3] = u32_encode_bits(TRE_TYPE_GO, TRE_FLAGS_TYPE);
- if (spi->cmd == SPI_RX)
+ if (spi->cmd == SPI_RX) {
tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_IEOB);
- else
+ } else if (spi->cmd == SPI_TX) {
+ tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_CHAIN);
+ } else { /* SPI_DUPLEX */
tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_CHAIN);
+ tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_LINK);
+ }
}
/* create the dma tre */
@@ -2148,6 +2152,7 @@ static int gpi_probe(struct platform_device *pdev)
{
struct gpi_dev *gpi_dev;
unsigned int i;
+ u32 ee_offset;
int ret;
gpi_dev = devm_kzalloc(&pdev->dev, sizeof(*gpi_dev), GFP_KERNEL);
@@ -2175,6 +2180,9 @@ static int gpi_probe(struct platform_device *pdev)
return ret;
}
+ ee_offset = (uintptr_t)device_get_match_data(gpi_dev->dev);
+ gpi_dev->ee_base = gpi_dev->ee_base - ee_offset;
+
gpi_dev->ev_factor = EV_FACTOR;
ret = dma_set_mask(gpi_dev->dev, DMA_BIT_MASK(64));
@@ -2278,9 +2286,12 @@ static int gpi_probe(struct platform_device *pdev)
}
static const struct of_device_id gpi_of_match[] = {
- { .compatible = "qcom,sdm845-gpi-dma" },
- { .compatible = "qcom,sm8150-gpi-dma" },
- { .compatible = "qcom,sm8250-gpi-dma" },
+ { .compatible = "qcom,sc7280-gpi-dma", .data = (void *)0x10000 },
+ { .compatible = "qcom,sdm845-gpi-dma", .data = (void *)0x0 },
+ { .compatible = "qcom,sm8150-gpi-dma", .data = (void *)0x0 },
+ { .compatible = "qcom,sm8250-gpi-dma", .data = (void *)0x0 },
+ { .compatible = "qcom,sm8350-gpi-dma", .data = (void *)0x10000 },
+ { .compatible = "qcom,sm8450-gpi-dma", .data = (void *)0x10000 },
{ },
};
MODULE_DEVICE_TABLE(of, gpi_of_match);
diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c
index 51587cf8196b..210f1a9eb441 100644
--- a/drivers/dma/qcom/hidma.c
+++ b/drivers/dma/qcom/hidma.c
@@ -431,6 +431,7 @@ hidma_prep_dma_memset(struct dma_chan *dmach, dma_addr_t dest, int value,
struct hidma_desc *mdesc = NULL;
struct hidma_dev *mdma = mchan->dmadev;
unsigned long irqflags;
+ u64 byte_pattern, fill_pattern;
/* Get free descriptor */
spin_lock_irqsave(&mchan->lock, irqflags);
@@ -443,9 +444,19 @@ hidma_prep_dma_memset(struct dma_chan *dmach, dma_addr_t dest, int value,
if (!mdesc)
return NULL;
+ byte_pattern = (char)value;
+ fill_pattern = (byte_pattern << 56) |
+ (byte_pattern << 48) |
+ (byte_pattern << 40) |
+ (byte_pattern << 32) |
+ (byte_pattern << 24) |
+ (byte_pattern << 16) |
+ (byte_pattern << 8) |
+ byte_pattern;
+
mdesc->desc.flags = flags;
hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch,
- value, dest, len, flags,
+ fill_pattern, dest, len, flags,
HIDMA_TRE_MEMSET);
/* Place descriptor in prepared list */
diff --git a/drivers/dma/sf-pdma/sf-pdma.c b/drivers/dma/sf-pdma/sf-pdma.c
index f12606aeff87..db5a4ef76077 100644
--- a/drivers/dma/sf-pdma/sf-pdma.c
+++ b/drivers/dma/sf-pdma/sf-pdma.c
@@ -482,23 +482,30 @@ static void sf_pdma_setup_chans(struct sf_pdma *pdma)
static int sf_pdma_probe(struct platform_device *pdev)
{
struct sf_pdma *pdma;
- struct sf_pdma_chan *chan;
struct resource *res;
- int len, chans;
- int ret;
+ int ret, n_chans;
const enum dma_slave_buswidth widths =
DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES |
DMA_SLAVE_BUSWIDTH_4_BYTES | DMA_SLAVE_BUSWIDTH_8_BYTES |
DMA_SLAVE_BUSWIDTH_16_BYTES | DMA_SLAVE_BUSWIDTH_32_BYTES |
DMA_SLAVE_BUSWIDTH_64_BYTES;
- chans = PDMA_NR_CH;
- len = sizeof(*pdma) + sizeof(*chan) * chans;
- pdma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
+ ret = of_property_read_u32(pdev->dev.of_node, "dma-channels", &n_chans);
+ if (ret) {
+ /* backwards-compatibility for no dma-channels property */
+ dev_dbg(&pdev->dev, "set number of channels to default value: 4\n");
+ n_chans = PDMA_MAX_NR_CH;
+ } else if (n_chans > PDMA_MAX_NR_CH) {
+ dev_err(&pdev->dev, "the number of channels exceeds the maximum\n");
+ return -EINVAL;
+ }
+
+ pdma = devm_kzalloc(&pdev->dev, struct_size(pdma, chans, n_chans),
+ GFP_KERNEL);
if (!pdma)
return -ENOMEM;
- pdma->n_chans = chans;
+ pdma->n_chans = n_chans;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
pdma->membase = devm_ioremap_resource(&pdev->dev, res);
@@ -556,7 +563,7 @@ static int sf_pdma_remove(struct platform_device *pdev)
struct sf_pdma_chan *ch;
int i;
- for (i = 0; i < PDMA_NR_CH; i++) {
+ for (i = 0; i < pdma->n_chans; i++) {
ch = &pdma->chans[i];
devm_free_irq(&pdev->dev, ch->txirq, ch);
@@ -574,6 +581,7 @@ static int sf_pdma_remove(struct platform_device *pdev)
static const struct of_device_id sf_pdma_dt_ids[] = {
{ .compatible = "sifive,fu540-c000-pdma" },
+ { .compatible = "sifive,pdma0" },
{},
};
MODULE_DEVICE_TABLE(of, sf_pdma_dt_ids);
diff --git a/drivers/dma/sf-pdma/sf-pdma.h b/drivers/dma/sf-pdma/sf-pdma.h
index 0c20167b097d..dcb3687bd5da 100644
--- a/drivers/dma/sf-pdma/sf-pdma.h
+++ b/drivers/dma/sf-pdma/sf-pdma.h
@@ -22,11 +22,7 @@
#include "../dmaengine.h"
#include "../virt-dma.h"
-#define PDMA_NR_CH 4
-
-#if (PDMA_NR_CH != 4)
-#error "Please define PDMA_NR_CH to 4"
-#endif
+#define PDMA_MAX_NR_CH 4
#define PDMA_BASE_ADDR 0x3000000
#define PDMA_CHAN_OFFSET 0x1000
@@ -118,7 +114,7 @@ struct sf_pdma {
void __iomem *membase;
void __iomem *mappedbase;
u32 n_chans;
- struct sf_pdma_chan chans[PDMA_NR_CH];
+ struct sf_pdma_chan chans[];
};
#endif /* _SF_PDMA_H */
diff --git a/drivers/dma/sh/Kconfig b/drivers/dma/sh/Kconfig
index b35d705f79e7..c0b2997ab7fd 100644
--- a/drivers/dma/sh/Kconfig
+++ b/drivers/dma/sh/Kconfig
@@ -50,7 +50,7 @@ config RENESAS_USB_DMAC
config RZ_DMAC
tristate "Renesas RZ/{G2L,V2L} DMA Controller"
- depends on ARCH_R9A07G044 || ARCH_R9A07G054 || COMPILE_TEST
+ depends on ARCH_RZG2L || COMPILE_TEST
select RENESAS_DMA
select DMA_VIRTUAL_CHANNELS
help
diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c
index 7f158ef5672d..2138b80435ab 100644
--- a/drivers/dma/sprd-dma.c
+++ b/drivers/dma/sprd-dma.c
@@ -1117,7 +1117,11 @@ static int sprd_dma_probe(struct platform_device *pdev)
u32 chn_count;
int ret, i;
- ret = device_property_read_u32(&pdev->dev, "#dma-channels", &chn_count);
+ /* Parse new and deprecated dma-channels properties */
+ ret = device_property_read_u32(&pdev->dev, "dma-channels", &chn_count);
+ if (ret)
+ ret = device_property_read_u32(&pdev->dev, "#dma-channels",
+ &chn_count);
if (ret) {
dev_err(&pdev->dev, "get dma channels count failed\n");
return ret;
diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c
index d2365fab1b7a..adb25a11c70f 100644
--- a/drivers/dma/stm32-dma.c
+++ b/drivers/dma/stm32-dma.c
@@ -208,6 +208,7 @@ struct stm32_dma_chan {
u32 threshold;
u32 mem_burst;
u32 mem_width;
+ enum dma_status status;
};
struct stm32_dma_device {
@@ -485,6 +486,7 @@ static void stm32_dma_stop(struct stm32_dma_chan *chan)
}
chan->busy = false;
+ chan->status = DMA_COMPLETE;
}
static int stm32_dma_terminate_all(struct dma_chan *c)
@@ -535,6 +537,13 @@ static void stm32_dma_dump_reg(struct stm32_dma_chan *chan)
dev_dbg(chan2dev(chan), "SFCR: 0x%08x\n", sfcr);
}
+static void stm32_dma_sg_inc(struct stm32_dma_chan *chan)
+{
+ chan->next_sg++;
+ if (chan->desc->cyclic && (chan->next_sg == chan->desc->num_sgs))
+ chan->next_sg = 0;
+}
+
static void stm32_dma_configure_next_sg(struct stm32_dma_chan *chan);
static void stm32_dma_start_transfer(struct stm32_dma_chan *chan)
@@ -575,7 +584,7 @@ static void stm32_dma_start_transfer(struct stm32_dma_chan *chan)
stm32_dma_write(dmadev, STM32_DMA_SM1AR(chan->id), reg->dma_sm1ar);
stm32_dma_write(dmadev, STM32_DMA_SNDTR(chan->id), reg->dma_sndtr);
- chan->next_sg++;
+ stm32_dma_sg_inc(chan);
/* Clear interrupt status if it is there */
status = stm32_dma_irq_status(chan);
@@ -588,11 +597,11 @@ static void stm32_dma_start_transfer(struct stm32_dma_chan *chan)
stm32_dma_dump_reg(chan);
/* Start DMA */
+ chan->busy = true;
+ chan->status = DMA_IN_PROGRESS;
reg->dma_scr |= STM32_DMA_SCR_EN;
stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), reg->dma_scr);
- chan->busy = true;
-
dev_dbg(chan2dev(chan), "vchan %pK: started\n", &chan->vchan);
}
@@ -605,41 +614,131 @@ static void stm32_dma_configure_next_sg(struct stm32_dma_chan *chan)
id = chan->id;
dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id));
- if (dma_scr & STM32_DMA_SCR_DBM) {
- if (chan->next_sg == chan->desc->num_sgs)
- chan->next_sg = 0;
+ sg_req = &chan->desc->sg_req[chan->next_sg];
- sg_req = &chan->desc->sg_req[chan->next_sg];
+ if (dma_scr & STM32_DMA_SCR_CT) {
+ dma_sm0ar = sg_req->chan_reg.dma_sm0ar;
+ stm32_dma_write(dmadev, STM32_DMA_SM0AR(id), dma_sm0ar);
+ dev_dbg(chan2dev(chan), "CT=1 <=> SM0AR: 0x%08x\n",
+ stm32_dma_read(dmadev, STM32_DMA_SM0AR(id)));
+ } else {
+ dma_sm1ar = sg_req->chan_reg.dma_sm1ar;
+ stm32_dma_write(dmadev, STM32_DMA_SM1AR(id), dma_sm1ar);
+ dev_dbg(chan2dev(chan), "CT=0 <=> SM1AR: 0x%08x\n",
+ stm32_dma_read(dmadev, STM32_DMA_SM1AR(id)));
+ }
+}
- if (dma_scr & STM32_DMA_SCR_CT) {
- dma_sm0ar = sg_req->chan_reg.dma_sm0ar;
- stm32_dma_write(dmadev, STM32_DMA_SM0AR(id), dma_sm0ar);
- dev_dbg(chan2dev(chan), "CT=1 <=> SM0AR: 0x%08x\n",
- stm32_dma_read(dmadev, STM32_DMA_SM0AR(id)));
- } else {
- dma_sm1ar = sg_req->chan_reg.dma_sm1ar;
- stm32_dma_write(dmadev, STM32_DMA_SM1AR(id), dma_sm1ar);
- dev_dbg(chan2dev(chan), "CT=0 <=> SM1AR: 0x%08x\n",
- stm32_dma_read(dmadev, STM32_DMA_SM1AR(id)));
- }
+static void stm32_dma_handle_chan_paused(struct stm32_dma_chan *chan)
+{
+ struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
+ u32 dma_scr;
+
+ /*
+ * Read and store current remaining data items and peripheral/memory addresses to be
+ * updated on resume
+ */
+ dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
+ /*
+ * Transfer can be paused while between a previous resume and reconfiguration on transfer
+ * complete. If transfer is cyclic and CIRC and DBM have been deactivated for resume, need
+ * to set it here in SCR backup to ensure a good reconfiguration on transfer complete.
+ */
+ if (chan->desc && chan->desc->cyclic) {
+ if (chan->desc->num_sgs == 1)
+ dma_scr |= STM32_DMA_SCR_CIRC;
+ else
+ dma_scr |= STM32_DMA_SCR_DBM;
+ }
+ chan->chan_reg.dma_scr = dma_scr;
+
+ /*
+ * Need to temporarily deactivate CIRC/DBM until next Transfer Complete interrupt, otherwise
+ * on resume NDTR autoreload value will be wrong (lower than the initial period length)
+ */
+ if (chan->desc && chan->desc->cyclic) {
+ dma_scr &= ~(STM32_DMA_SCR_DBM | STM32_DMA_SCR_CIRC);
+ stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), dma_scr);
+ }
+
+ chan->chan_reg.dma_sndtr = stm32_dma_read(dmadev, STM32_DMA_SNDTR(chan->id));
+
+ dev_dbg(chan2dev(chan), "vchan %pK: paused\n", &chan->vchan);
+}
+
+static void stm32_dma_post_resume_reconfigure(struct stm32_dma_chan *chan)
+{
+ struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
+ struct stm32_dma_sg_req *sg_req;
+ u32 dma_scr, status, id;
+
+ id = chan->id;
+ dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id));
+
+ /* Clear interrupt status if it is there */
+ status = stm32_dma_irq_status(chan);
+ if (status)
+ stm32_dma_irq_clear(chan, status);
+
+ if (!chan->next_sg)
+ sg_req = &chan->desc->sg_req[chan->desc->num_sgs - 1];
+ else
+ sg_req = &chan->desc->sg_req[chan->next_sg - 1];
+
+ /* Reconfigure NDTR with the initial value */
+ stm32_dma_write(dmadev, STM32_DMA_SNDTR(chan->id), sg_req->chan_reg.dma_sndtr);
+
+ /* Restore SPAR */
+ stm32_dma_write(dmadev, STM32_DMA_SPAR(id), sg_req->chan_reg.dma_spar);
+
+ /* Restore SM0AR/SM1AR whatever DBM/CT as they may have been modified */
+ stm32_dma_write(dmadev, STM32_DMA_SM0AR(id), sg_req->chan_reg.dma_sm0ar);
+ stm32_dma_write(dmadev, STM32_DMA_SM1AR(id), sg_req->chan_reg.dma_sm1ar);
+
+ /* Reactivate CIRC/DBM if needed */
+ if (chan->chan_reg.dma_scr & STM32_DMA_SCR_DBM) {
+ dma_scr |= STM32_DMA_SCR_DBM;
+ /* Restore CT */
+ if (chan->chan_reg.dma_scr & STM32_DMA_SCR_CT)
+ dma_scr &= ~STM32_DMA_SCR_CT;
+ else
+ dma_scr |= STM32_DMA_SCR_CT;
+ } else if (chan->chan_reg.dma_scr & STM32_DMA_SCR_CIRC) {
+ dma_scr |= STM32_DMA_SCR_CIRC;
}
+ stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), dma_scr);
+
+ stm32_dma_configure_next_sg(chan);
+
+ stm32_dma_dump_reg(chan);
+
+ dma_scr |= STM32_DMA_SCR_EN;
+ stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), dma_scr);
+
+ dev_dbg(chan2dev(chan), "vchan %pK: reconfigured after pause/resume\n", &chan->vchan);
}
-static void stm32_dma_handle_chan_done(struct stm32_dma_chan *chan)
+static void stm32_dma_handle_chan_done(struct stm32_dma_chan *chan, u32 scr)
{
- if (chan->desc) {
- if (chan->desc->cyclic) {
- vchan_cyclic_callback(&chan->desc->vdesc);
- chan->next_sg++;
+ if (!chan->desc)
+ return;
+
+ if (chan->desc->cyclic) {
+ vchan_cyclic_callback(&chan->desc->vdesc);
+ stm32_dma_sg_inc(chan);
+ /* cyclic while CIRC/DBM disable => post resume reconfiguration needed */
+ if (!(scr & (STM32_DMA_SCR_CIRC | STM32_DMA_SCR_DBM)))
+ stm32_dma_post_resume_reconfigure(chan);
+ else if (scr & STM32_DMA_SCR_DBM)
stm32_dma_configure_next_sg(chan);
- } else {
- chan->busy = false;
- if (chan->next_sg == chan->desc->num_sgs) {
- vchan_cookie_complete(&chan->desc->vdesc);
- chan->desc = NULL;
- }
- stm32_dma_start_transfer(chan);
+ } else {
+ chan->busy = false;
+ chan->status = DMA_COMPLETE;
+ if (chan->next_sg == chan->desc->num_sgs) {
+ vchan_cookie_complete(&chan->desc->vdesc);
+ chan->desc = NULL;
}
+ stm32_dma_start_transfer(chan);
}
}
@@ -675,8 +774,12 @@ static irqreturn_t stm32_dma_chan_irq(int irq, void *devid)
if (status & STM32_DMA_TCI) {
stm32_dma_irq_clear(chan, STM32_DMA_TCI);
- if (scr & STM32_DMA_SCR_TCIE)
- stm32_dma_handle_chan_done(chan);
+ if (scr & STM32_DMA_SCR_TCIE) {
+ if (chan->status == DMA_PAUSED && !(scr & STM32_DMA_SCR_EN))
+ stm32_dma_handle_chan_paused(chan);
+ else
+ stm32_dma_handle_chan_done(chan, scr);
+ }
status &= ~STM32_DMA_TCI;
}
@@ -711,6 +814,107 @@ static void stm32_dma_issue_pending(struct dma_chan *c)
spin_unlock_irqrestore(&chan->vchan.lock, flags);
}
+static int stm32_dma_pause(struct dma_chan *c)
+{
+ struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
+ unsigned long flags;
+ int ret;
+
+ if (chan->status != DMA_IN_PROGRESS)
+ return -EPERM;
+
+ spin_lock_irqsave(&chan->vchan.lock, flags);
+ ret = stm32_dma_disable_chan(chan);
+ /*
+ * A transfer complete flag is set to indicate the end of transfer due to the stream
+ * interruption, so wait for interrupt
+ */
+ if (!ret)
+ chan->status = DMA_PAUSED;
+ spin_unlock_irqrestore(&chan->vchan.lock, flags);
+
+ return ret;
+}
+
+static int stm32_dma_resume(struct dma_chan *c)
+{
+ struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
+ struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
+ struct stm32_dma_chan_reg chan_reg = chan->chan_reg;
+ u32 id = chan->id, scr, ndtr, offset, spar, sm0ar, sm1ar;
+ struct stm32_dma_sg_req *sg_req;
+ unsigned long flags;
+
+ if (chan->status != DMA_PAUSED)
+ return -EPERM;
+
+ scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id));
+ if (WARN_ON(scr & STM32_DMA_SCR_EN))
+ return -EPERM;
+
+ spin_lock_irqsave(&chan->vchan.lock, flags);
+
+ /* sg_reg[prev_sg] contains original ndtr, sm0ar and sm1ar before pausing the transfer */
+ if (!chan->next_sg)
+ sg_req = &chan->desc->sg_req[chan->desc->num_sgs - 1];
+ else
+ sg_req = &chan->desc->sg_req[chan->next_sg - 1];
+
+ ndtr = sg_req->chan_reg.dma_sndtr;
+ offset = (ndtr - chan_reg.dma_sndtr) << STM32_DMA_SCR_PSIZE_GET(chan_reg.dma_scr);
+ spar = sg_req->chan_reg.dma_spar;
+ sm0ar = sg_req->chan_reg.dma_sm0ar;
+ sm1ar = sg_req->chan_reg.dma_sm1ar;
+
+ /*
+ * The peripheral and/or memory addresses have to be updated in order to adjust the
+ * address pointers. Need to check increment.
+ */
+ if (chan_reg.dma_scr & STM32_DMA_SCR_PINC)
+ stm32_dma_write(dmadev, STM32_DMA_SPAR(id), spar + offset);
+ else
+ stm32_dma_write(dmadev, STM32_DMA_SPAR(id), spar);
+
+ if (!(chan_reg.dma_scr & STM32_DMA_SCR_MINC))
+ offset = 0;
+
+ /*
+ * In case of DBM, the current target could be SM1AR.
+ * Need to temporarily deactivate CIRC/DBM to finish the current transfer, so
+ * SM0AR becomes the current target and must be updated with SM1AR + offset if CT=1.
+ */
+ if ((chan_reg.dma_scr & STM32_DMA_SCR_DBM) && (chan_reg.dma_scr & STM32_DMA_SCR_CT))
+ stm32_dma_write(dmadev, STM32_DMA_SM1AR(id), sm1ar + offset);
+ else
+ stm32_dma_write(dmadev, STM32_DMA_SM0AR(id), sm0ar + offset);
+
+ /* NDTR must be restored otherwise internal HW counter won't be correctly reset */
+ stm32_dma_write(dmadev, STM32_DMA_SNDTR(id), chan_reg.dma_sndtr);
+
+ /*
+ * Need to temporarily deactivate CIRC/DBM until next Transfer Complete interrupt,
+ * otherwise NDTR autoreload value will be wrong (lower than the initial period length)
+ */
+ if (chan_reg.dma_scr & (STM32_DMA_SCR_CIRC | STM32_DMA_SCR_DBM))
+ chan_reg.dma_scr &= ~(STM32_DMA_SCR_CIRC | STM32_DMA_SCR_DBM);
+
+ if (chan_reg.dma_scr & STM32_DMA_SCR_DBM)
+ stm32_dma_configure_next_sg(chan);
+
+ stm32_dma_dump_reg(chan);
+
+ /* The stream may then be re-enabled to restart transfer from the point it was stopped */
+ chan->status = DMA_IN_PROGRESS;
+ chan_reg.dma_scr |= STM32_DMA_SCR_EN;
+ stm32_dma_write(dmadev, STM32_DMA_SCR(id), chan_reg.dma_scr);
+
+ spin_unlock_irqrestore(&chan->vchan.lock, flags);
+
+ dev_dbg(chan2dev(chan), "vchan %pK: resumed\n", &chan->vchan);
+
+ return 0;
+}
+
static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan,
enum dma_transfer_direction direction,
enum dma_slave_buswidth *buswidth,
@@ -978,10 +1182,12 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_dma_cyclic(
}
/* Enable Circular mode or double buffer mode */
- if (buf_len == period_len)
+ if (buf_len == period_len) {
chan->chan_reg.dma_scr |= STM32_DMA_SCR_CIRC;
- else
+ } else {
chan->chan_reg.dma_scr |= STM32_DMA_SCR_DBM;
+ chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_CT;
+ }
/* Clear periph ctrl if client set it */
chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_PFCTRL;
@@ -1091,24 +1297,36 @@ static bool stm32_dma_is_current_sg(struct stm32_dma_chan *chan)
{
struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
struct stm32_dma_sg_req *sg_req;
- u32 dma_scr, dma_smar, id;
+ u32 dma_scr, dma_smar, id, period_len;
id = chan->id;
dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id));
+ /* In cyclic CIRC but not DBM, CT is not used */
if (!(dma_scr & STM32_DMA_SCR_DBM))
return true;
sg_req = &chan->desc->sg_req[chan->next_sg];
+ period_len = sg_req->len;
+ /* DBM - take care of a previous pause/resume not yet post reconfigured */
if (dma_scr & STM32_DMA_SCR_CT) {
dma_smar = stm32_dma_read(dmadev, STM32_DMA_SM0AR(id));
- return (dma_smar == sg_req->chan_reg.dma_sm0ar);
+ /*
+ * If transfer has been pause/resumed,
+ * SM0AR is in the range of [SM0AR:SM0AR+period_len]
+ */
+ return (dma_smar >= sg_req->chan_reg.dma_sm0ar &&
+ dma_smar < sg_req->chan_reg.dma_sm0ar + period_len);
}
dma_smar = stm32_dma_read(dmadev, STM32_DMA_SM1AR(id));
-
- return (dma_smar == sg_req->chan_reg.dma_sm1ar);
+ /*
+ * If transfer has been pause/resumed,
+ * SM1AR is in the range of [SM1AR:SM1AR+period_len]
+ */
+ return (dma_smar >= sg_req->chan_reg.dma_sm1ar &&
+ dma_smar < sg_req->chan_reg.dma_sm1ar + period_len);
}
static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan,
@@ -1148,7 +1366,7 @@ static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan,
residue = stm32_dma_get_remaining_bytes(chan);
- if (!stm32_dma_is_current_sg(chan)) {
+ if (chan->desc->cyclic && !stm32_dma_is_current_sg(chan)) {
n_sg++;
if (n_sg == chan->desc->num_sgs)
n_sg = 0;
@@ -1188,7 +1406,12 @@ static enum dma_status stm32_dma_tx_status(struct dma_chan *c,
u32 residue = 0;
status = dma_cookie_status(c, cookie, state);
- if (status == DMA_COMPLETE || !state)
+ if (status == DMA_COMPLETE)
+ return status;
+
+ status = chan->status;
+
+ if (!state)
return status;
spin_lock_irqsave(&chan->vchan.lock, flags);
@@ -1377,6 +1600,8 @@ static int stm32_dma_probe(struct platform_device *pdev)
dd->device_prep_slave_sg = stm32_dma_prep_slave_sg;
dd->device_prep_dma_cyclic = stm32_dma_prep_dma_cyclic;
dd->device_config = stm32_dma_slave_config;
+ dd->device_pause = stm32_dma_pause;
+ dd->device_resume = stm32_dma_resume;
dd->device_terminate_all = stm32_dma_terminate_all;
dd->device_synchronize = stm32_dma_synchronize;
dd->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
@@ -1482,7 +1707,7 @@ static int stm32_dma_runtime_resume(struct device *dev)
#endif
#ifdef CONFIG_PM_SLEEP
-static int stm32_dma_suspend(struct device *dev)
+static int stm32_dma_pm_suspend(struct device *dev)
{
struct stm32_dma_device *dmadev = dev_get_drvdata(dev);
int id, ret, scr;
@@ -1506,14 +1731,14 @@ static int stm32_dma_suspend(struct device *dev)
return 0;
}
-static int stm32_dma_resume(struct device *dev)
+static int stm32_dma_pm_resume(struct device *dev)
{
return pm_runtime_force_resume(dev);
}
#endif
static const struct dev_pm_ops stm32_dma_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(stm32_dma_suspend, stm32_dma_resume)
+ SET_SYSTEM_SLEEP_PM_OPS(stm32_dma_pm_suspend, stm32_dma_pm_resume)
SET_RUNTIME_PM_OPS(stm32_dma_runtime_suspend,
stm32_dma_runtime_resume, NULL)
};
diff --git a/drivers/dma/stm32-dmamux.c b/drivers/dma/stm32-dmamux.c
index d5d55732adba..eee0c5aa5fb5 100644
--- a/drivers/dma/stm32-dmamux.c
+++ b/drivers/dma/stm32-dmamux.c
@@ -267,7 +267,7 @@ static int stm32_dmamux_probe(struct platform_device *pdev)
ret = PTR_ERR(rst);
if (ret == -EPROBE_DEFER)
goto err_clk;
- } else {
+ } else if (count > 1) { /* Don't reset if there is only one dma-master */
reset_control_assert(rst);
udelay(2);
reset_control_deassert(rst);
diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
index 6f57ff0e7b37..caf0cce8f528 100644
--- a/drivers/dma/stm32-mdma.c
+++ b/drivers/dma/stm32-mdma.c
@@ -34,7 +34,6 @@
#include "virt-dma.h"
#define STM32_MDMA_GISR0 0x0000 /* MDMA Int Status Reg 1 */
-#define STM32_MDMA_GISR1 0x0004 /* MDMA Int Status Reg 2 */
/* MDMA Channel x interrupt/status register */
#define STM32_MDMA_CISR(x) (0x40 + 0x40 * (x)) /* x = 0..62 */
@@ -73,6 +72,7 @@
#define STM32_MDMA_CCR_WEX BIT(14)
#define STM32_MDMA_CCR_HEX BIT(13)
#define STM32_MDMA_CCR_BEX BIT(12)
+#define STM32_MDMA_CCR_SM BIT(8)
#define STM32_MDMA_CCR_PL_MASK GENMASK(7, 6)
#define STM32_MDMA_CCR_PL(n) FIELD_PREP(STM32_MDMA_CCR_PL_MASK, (n))
#define STM32_MDMA_CCR_TCIE BIT(5)
@@ -168,7 +168,7 @@
#define STM32_MDMA_MAX_BUF_LEN 128
#define STM32_MDMA_MAX_BLOCK_LEN 65536
-#define STM32_MDMA_MAX_CHANNELS 63
+#define STM32_MDMA_MAX_CHANNELS 32
#define STM32_MDMA_MAX_REQUESTS 256
#define STM32_MDMA_MAX_BURST 128
#define STM32_MDMA_VERY_HIGH_PRIORITY 0x3
@@ -248,6 +248,7 @@ struct stm32_mdma_device {
u32 nr_channels;
u32 nr_requests;
u32 nr_ahb_addr_masks;
+ u32 chan_reserved;
struct stm32_mdma_chan chan[STM32_MDMA_MAX_CHANNELS];
u32 ahb_addr_masks[];
};
@@ -1317,26 +1318,16 @@ static void stm32_mdma_xfer_end(struct stm32_mdma_chan *chan)
static irqreturn_t stm32_mdma_irq_handler(int irq, void *devid)
{
struct stm32_mdma_device *dmadev = devid;
- struct stm32_mdma_chan *chan = devid;
+ struct stm32_mdma_chan *chan;
u32 reg, id, ccr, ien, status;
/* Find out which channel generates the interrupt */
status = readl_relaxed(dmadev->base + STM32_MDMA_GISR0);
- if (status) {
- id = __ffs(status);
- } else {
- status = readl_relaxed(dmadev->base + STM32_MDMA_GISR1);
- if (!status) {
- dev_dbg(mdma2dev(dmadev), "spurious it\n");
- return IRQ_NONE;
- }
- id = __ffs(status);
- /*
- * As GISR0 provides status for channel id from 0 to 31,
- * so GISR1 provides status for channel id from 32 to 62
- */
- id += 32;
+ if (!status) {
+ dev_dbg(mdma2dev(dmadev), "spurious it\n");
+ return IRQ_NONE;
}
+ id = __ffs(status);
chan = &dmadev->chan[id];
if (!chan) {
@@ -1354,9 +1345,12 @@ static irqreturn_t stm32_mdma_irq_handler(int irq, void *devid)
if (!(status & ien)) {
spin_unlock(&chan->vchan.lock);
- dev_warn(chan2dev(chan),
- "spurious it (status=0x%04x, ien=0x%04x)\n",
- status, ien);
+ if (chan->busy)
+ dev_warn(chan2dev(chan),
+ "spurious it (status=0x%04x, ien=0x%04x)\n", status, ien);
+ else
+ dev_dbg(chan2dev(chan),
+ "spurious it (status=0x%04x, ien=0x%04x)\n", status, ien);
return IRQ_NONE;
}
@@ -1456,10 +1450,23 @@ static void stm32_mdma_free_chan_resources(struct dma_chan *c)
chan->desc_pool = NULL;
}
+static bool stm32_mdma_filter_fn(struct dma_chan *c, void *fn_param)
+{
+ struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
+ struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
+
+ /* Check if chan is marked Secure */
+ if (dmadev->chan_reserved & BIT(chan->id))
+ return false;
+
+ return true;
+}
+
static struct dma_chan *stm32_mdma_of_xlate(struct of_phandle_args *dma_spec,
struct of_dma *ofdma)
{
struct stm32_mdma_device *dmadev = ofdma->of_dma_data;
+ dma_cap_mask_t mask = dmadev->ddev.cap_mask;
struct stm32_mdma_chan *chan;
struct dma_chan *c;
struct stm32_mdma_chan_config config;
@@ -1485,7 +1492,7 @@ static struct dma_chan *stm32_mdma_of_xlate(struct of_phandle_args *dma_spec,
return NULL;
}
- c = dma_get_any_slave_channel(&dmadev->ddev);
+ c = __dma_request_channel(&mask, stm32_mdma_filter_fn, &config, ofdma->of_node);
if (!c) {
dev_err(mdma2dev(dmadev), "No more channels available\n");
return NULL;
@@ -1615,6 +1622,10 @@ static int stm32_mdma_probe(struct platform_device *pdev)
for (i = 0; i < dmadev->nr_channels; i++) {
chan = &dmadev->chan[i];
chan->id = i;
+
+ if (stm32_mdma_read(dmadev, STM32_MDMA_CCR(i)) & STM32_MDMA_CCR_SM)
+ dmadev->chan_reserved |= BIT(i);
+
chan->vchan.desc_free = stm32_mdma_desc_free;
vchan_init(&chan->vchan, dd);
}
diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c
index 5cadd4d2b824..b7557f437936 100644
--- a/drivers/dma/sun6i-dma.c
+++ b/drivers/dma/sun6i-dma.c
@@ -90,6 +90,14 @@
#define DMA_CHAN_CUR_PARA 0x1c
+/*
+ * LLI address mangling
+ *
+ * The LLI link physical address is also mangled, but we avoid dealing
+ * with that by allocating LLIs from the DMA32 zone.
+ */
+#define SRC_HIGH_ADDR(x) (((x) & 0x3U) << 16)
+#define DST_HIGH_ADDR(x) (((x) & 0x3U) << 18)
/*
* Various hardware related defines
@@ -132,6 +140,7 @@ struct sun6i_dma_config {
u32 dst_burst_lengths;
u32 src_addr_widths;
u32 dst_addr_widths;
+ bool has_high_addr;
bool has_mbus_clk;
};
@@ -241,9 +250,7 @@ static inline void sun6i_dma_dump_com_regs(struct sun6i_dma_dev *sdev)
static inline void sun6i_dma_dump_chan_regs(struct sun6i_dma_dev *sdev,
struct sun6i_pchan *pchan)
{
- phys_addr_t reg = virt_to_phys(pchan->base);
-
- dev_dbg(sdev->slave.dev, "Chan %d reg: %pa\n"
+ dev_dbg(sdev->slave.dev, "Chan %d reg:\n"
"\t___en(%04x): \t0x%08x\n"
"\tpause(%04x): \t0x%08x\n"
"\tstart(%04x): \t0x%08x\n"
@@ -252,7 +259,7 @@ static inline void sun6i_dma_dump_chan_regs(struct sun6i_dma_dev *sdev,
"\t__dst(%04x): \t0x%08x\n"
"\tcount(%04x): \t0x%08x\n"
"\t_para(%04x): \t0x%08x\n\n",
- pchan->idx, &reg,
+ pchan->idx,
DMA_CHAN_ENABLE,
readl(pchan->base + DMA_CHAN_ENABLE),
DMA_CHAN_PAUSE,
@@ -385,17 +392,16 @@ static void *sun6i_dma_lli_add(struct sun6i_dma_lli *prev,
}
static inline void sun6i_dma_dump_lli(struct sun6i_vchan *vchan,
- struct sun6i_dma_lli *lli)
+ struct sun6i_dma_lli *v_lli,
+ dma_addr_t p_lli)
{
- phys_addr_t p_lli = virt_to_phys(lli);
-
dev_dbg(chan2dev(&vchan->vc.chan),
- "\n\tdesc: p - %pa v - 0x%p\n"
+ "\n\tdesc:\tp - %pad v - 0x%p\n"
"\t\tc - 0x%08x s - 0x%08x d - 0x%08x\n"
"\t\tl - 0x%08x p - 0x%08x n - 0x%08x\n",
- &p_lli, lli,
- lli->cfg, lli->src, lli->dst,
- lli->len, lli->para, lli->p_lli_next);
+ &p_lli, v_lli,
+ v_lli->cfg, v_lli->src, v_lli->dst,
+ v_lli->len, v_lli->para, v_lli->p_lli_next);
}
static void sun6i_dma_free_desc(struct virt_dma_desc *vd)
@@ -445,7 +451,7 @@ static int sun6i_dma_start_desc(struct sun6i_vchan *vchan)
pchan->desc = to_sun6i_desc(&desc->tx);
pchan->done = NULL;
- sun6i_dma_dump_lli(vchan, pchan->desc->v_lli);
+ sun6i_dma_dump_lli(vchan, pchan->desc->v_lli, pchan->desc->p_lli);
irq_reg = pchan->idx / DMA_IRQ_CHAN_NR;
irq_offset = pchan->idx % DMA_IRQ_CHAN_NR;
@@ -626,6 +632,18 @@ static int set_config(struct sun6i_dma_dev *sdev,
return 0;
}
+static inline void sun6i_dma_set_addr(struct sun6i_dma_dev *sdev,
+ struct sun6i_dma_lli *v_lli,
+ dma_addr_t src, dma_addr_t dst)
+{
+ v_lli->src = lower_32_bits(src);
+ v_lli->dst = lower_32_bits(dst);
+
+ if (sdev->cfg->has_high_addr)
+ v_lli->para |= SRC_HIGH_ADDR(upper_32_bits(src)) |
+ DST_HIGH_ADDR(upper_32_bits(dst));
+}
+
static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy(
struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
size_t len, unsigned long flags)
@@ -648,16 +666,15 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy(
if (!txd)
return NULL;
- v_lli = dma_pool_alloc(sdev->pool, GFP_NOWAIT, &p_lli);
+ v_lli = dma_pool_alloc(sdev->pool, GFP_DMA32 | GFP_NOWAIT, &p_lli);
if (!v_lli) {
dev_err(sdev->slave.dev, "Failed to alloc lli memory\n");
goto err_txd_free;
}
- v_lli->src = src;
- v_lli->dst = dest;
v_lli->len = len;
v_lli->para = NORMAL_WAIT;
+ sun6i_dma_set_addr(sdev, v_lli, src, dest);
burst = convert_burst(8);
width = convert_buswidth(DMA_SLAVE_BUSWIDTH_4_BYTES);
@@ -670,7 +687,7 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy(
sun6i_dma_lli_add(NULL, v_lli, p_lli, txd);
- sun6i_dma_dump_lli(vchan, v_lli);
+ sun6i_dma_dump_lli(vchan, v_lli, p_lli);
return vchan_tx_prep(&vchan->vc, &txd->vd, flags);
@@ -708,7 +725,7 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_slave_sg(
return NULL;
for_each_sg(sgl, sg, sg_len, i) {
- v_lli = dma_pool_alloc(sdev->pool, GFP_NOWAIT, &p_lli);
+ v_lli = dma_pool_alloc(sdev->pool, GFP_DMA32 | GFP_NOWAIT, &p_lli);
if (!v_lli)
goto err_lli_free;
@@ -716,8 +733,9 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_slave_sg(
v_lli->para = NORMAL_WAIT;
if (dir == DMA_MEM_TO_DEV) {
- v_lli->src = sg_dma_address(sg);
- v_lli->dst = sconfig->dst_addr;
+ sun6i_dma_set_addr(sdev, v_lli,
+ sg_dma_address(sg),
+ sconfig->dst_addr);
v_lli->cfg = lli_cfg;
sdev->cfg->set_drq(&v_lli->cfg, DRQ_SDRAM, vchan->port);
sdev->cfg->set_mode(&v_lli->cfg, LINEAR_MODE, IO_MODE);
@@ -729,8 +747,9 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_slave_sg(
sg_dma_len(sg), flags);
} else {
- v_lli->src = sconfig->src_addr;
- v_lli->dst = sg_dma_address(sg);
+ sun6i_dma_set_addr(sdev, v_lli,
+ sconfig->src_addr,
+ sg_dma_address(sg));
v_lli->cfg = lli_cfg;
sdev->cfg->set_drq(&v_lli->cfg, vchan->port, DRQ_SDRAM);
sdev->cfg->set_mode(&v_lli->cfg, IO_MODE, LINEAR_MODE);
@@ -746,14 +765,16 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_slave_sg(
}
dev_dbg(chan2dev(chan), "First: %pad\n", &txd->p_lli);
- for (prev = txd->v_lli; prev; prev = prev->v_lli_next)
- sun6i_dma_dump_lli(vchan, prev);
+ for (p_lli = txd->p_lli, v_lli = txd->v_lli; v_lli;
+ p_lli = v_lli->p_lli_next, v_lli = v_lli->v_lli_next)
+ sun6i_dma_dump_lli(vchan, v_lli, p_lli);
return vchan_tx_prep(&vchan->vc, &txd->vd, flags);
err_lli_free:
- for (prev = txd->v_lli; prev; prev = prev->v_lli_next)
- dma_pool_free(sdev->pool, prev, virt_to_phys(prev));
+ for (p_lli = txd->p_lli, v_lli = txd->v_lli; v_lli;
+ p_lli = v_lli->p_lli_next, v_lli = v_lli->v_lli_next)
+ dma_pool_free(sdev->pool, v_lli, p_lli);
kfree(txd);
return NULL;
}
@@ -787,7 +808,7 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_cyclic(
return NULL;
for (i = 0; i < periods; i++) {
- v_lli = dma_pool_alloc(sdev->pool, GFP_NOWAIT, &p_lli);
+ v_lli = dma_pool_alloc(sdev->pool, GFP_DMA32 | GFP_NOWAIT, &p_lli);
if (!v_lli) {
dev_err(sdev->slave.dev, "Failed to alloc lli memory\n");
goto err_lli_free;
@@ -797,14 +818,16 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_cyclic(
v_lli->para = NORMAL_WAIT;
if (dir == DMA_MEM_TO_DEV) {
- v_lli->src = buf_addr + period_len * i;
- v_lli->dst = sconfig->dst_addr;
+ sun6i_dma_set_addr(sdev, v_lli,
+ buf_addr + period_len * i,
+ sconfig->dst_addr);
v_lli->cfg = lli_cfg;
sdev->cfg->set_drq(&v_lli->cfg, DRQ_SDRAM, vchan->port);
sdev->cfg->set_mode(&v_lli->cfg, LINEAR_MODE, IO_MODE);
} else {
- v_lli->src = sconfig->src_addr;
- v_lli->dst = buf_addr + period_len * i;
+ sun6i_dma_set_addr(sdev, v_lli,
+ sconfig->src_addr,
+ buf_addr + period_len * i);
v_lli->cfg = lli_cfg;
sdev->cfg->set_drq(&v_lli->cfg, vchan->port, DRQ_SDRAM);
sdev->cfg->set_mode(&v_lli->cfg, IO_MODE, LINEAR_MODE);
@@ -820,8 +843,9 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_cyclic(
return vchan_tx_prep(&vchan->vc, &txd->vd, flags);
err_lli_free:
- for (prev = txd->v_lli; prev; prev = prev->v_lli_next)
- dma_pool_free(sdev->pool, prev, virt_to_phys(prev));
+ for (p_lli = txd->p_lli, v_lli = txd->v_lli; v_lli;
+ p_lli = v_lli->p_lli_next, v_lli = v_lli->v_lli_next)
+ dma_pool_free(sdev->pool, v_lli, p_lli);
kfree(txd);
return NULL;
}
@@ -1174,8 +1198,6 @@ static struct sun6i_dma_config sun50i_a64_dma_cfg = {
};
/*
- * TODO: Add support for more than 4g physical addressing.
- *
* The A100 binding uses the number of dma channels from the
* device tree node.
*/
@@ -1194,6 +1216,7 @@ static struct sun6i_dma_config sun50i_a100_dma_cfg = {
BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
BIT(DMA_SLAVE_BUSWIDTH_8_BYTES),
+ .has_high_addr = true,
.has_mbus_clk = true,
};
@@ -1248,6 +1271,7 @@ static const struct of_device_id sun6i_dma_match[] = {
{ .compatible = "allwinner,sun8i-a83t-dma", .data = &sun8i_a83t_dma_cfg },
{ .compatible = "allwinner,sun8i-h3-dma", .data = &sun8i_h3_dma_cfg },
{ .compatible = "allwinner,sun8i-v3s-dma", .data = &sun8i_v3s_dma_cfg },
+ { .compatible = "allwinner,sun20i-d1-dma", .data = &sun50i_a100_dma_cfg },
{ .compatible = "allwinner,sun50i-a64-dma", .data = &sun50i_a64_dma_cfg },
{ .compatible = "allwinner,sun50i-a100-dma", .data = &sun50i_a100_dma_cfg },
{ .compatible = "allwinner,sun50i-h6-dma", .data = &sun50i_h6_dma_cfg },
diff --git a/drivers/dma/tegra186-gpc-dma.c b/drivers/dma/tegra186-gpc-dma.c
new file mode 100644
index 000000000000..05cd451f541d
--- /dev/null
+++ b/drivers/dma/tegra186-gpc-dma.c
@@ -0,0 +1,1498 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * DMA driver for NVIDIA Tegra GPC DMA controller.
+ *
+ * Copyright (c) 2014-2022, NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/iommu.h>
+#include <linux/iopoll.h>
+#include <linux/minmax.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+#include <dt-bindings/memory/tegra186-mc.h>
+#include "virt-dma.h"
+
+/* CSR register */
+#define TEGRA_GPCDMA_CHAN_CSR 0x00
+#define TEGRA_GPCDMA_CSR_ENB BIT(31)
+#define TEGRA_GPCDMA_CSR_IE_EOC BIT(30)
+#define TEGRA_GPCDMA_CSR_ONCE BIT(27)
+
+#define TEGRA_GPCDMA_CSR_FC_MODE GENMASK(25, 24)
+#define TEGRA_GPCDMA_CSR_FC_MODE_NO_MMIO \
+ FIELD_PREP(TEGRA_GPCDMA_CSR_FC_MODE, 0)
+#define TEGRA_GPCDMA_CSR_FC_MODE_ONE_MMIO \
+ FIELD_PREP(TEGRA_GPCDMA_CSR_FC_MODE, 1)
+#define TEGRA_GPCDMA_CSR_FC_MODE_TWO_MMIO \
+ FIELD_PREP(TEGRA_GPCDMA_CSR_FC_MODE, 2)
+#define TEGRA_GPCDMA_CSR_FC_MODE_FOUR_MMIO \
+ FIELD_PREP(TEGRA_GPCDMA_CSR_FC_MODE, 3)
+
+#define TEGRA_GPCDMA_CSR_DMA GENMASK(23, 21)
+#define TEGRA_GPCDMA_CSR_DMA_IO2MEM_NO_FC \
+ FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 0)
+#define TEGRA_GPCDMA_CSR_DMA_IO2MEM_FC \
+ FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 1)
+#define TEGRA_GPCDMA_CSR_DMA_MEM2IO_NO_FC \
+ FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 2)
+#define TEGRA_GPCDMA_CSR_DMA_MEM2IO_FC \
+ FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 3)
+#define TEGRA_GPCDMA_CSR_DMA_MEM2MEM \
+ FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 4)
+#define TEGRA_GPCDMA_CSR_DMA_FIXED_PAT \
+ FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 6)
+
+#define TEGRA_GPCDMA_CSR_REQ_SEL_MASK GENMASK(20, 16)
+#define TEGRA_GPCDMA_CSR_REQ_SEL_UNUSED \
+ FIELD_PREP(TEGRA_GPCDMA_CSR_REQ_SEL_MASK, 4)
+#define TEGRA_GPCDMA_CSR_IRQ_MASK BIT(15)
+#define TEGRA_GPCDMA_CSR_WEIGHT GENMASK(13, 10)
+
+/* STATUS register */
+#define TEGRA_GPCDMA_CHAN_STATUS 0x004
+#define TEGRA_GPCDMA_STATUS_BUSY BIT(31)
+#define TEGRA_GPCDMA_STATUS_ISE_EOC BIT(30)
+#define TEGRA_GPCDMA_STATUS_PING_PONG BIT(28)
+#define TEGRA_GPCDMA_STATUS_DMA_ACTIVITY BIT(27)
+#define TEGRA_GPCDMA_STATUS_CHANNEL_PAUSE BIT(26)
+#define TEGRA_GPCDMA_STATUS_CHANNEL_RX BIT(25)
+#define TEGRA_GPCDMA_STATUS_CHANNEL_TX BIT(24)
+#define TEGRA_GPCDMA_STATUS_IRQ_INTR_STA BIT(23)
+#define TEGRA_GPCDMA_STATUS_IRQ_STA BIT(21)
+#define TEGRA_GPCDMA_STATUS_IRQ_TRIG_STA BIT(20)
+
+#define TEGRA_GPCDMA_CHAN_CSRE 0x008
+#define TEGRA_GPCDMA_CHAN_CSRE_PAUSE BIT(31)
+
+/* Source address */
+#define TEGRA_GPCDMA_CHAN_SRC_PTR 0x00C
+
+/* Destination address */
+#define TEGRA_GPCDMA_CHAN_DST_PTR 0x010
+
+/* High address pointer */
+#define TEGRA_GPCDMA_CHAN_HIGH_ADDR_PTR 0x014
+#define TEGRA_GPCDMA_HIGH_ADDR_SRC_PTR GENMASK(7, 0)
+#define TEGRA_GPCDMA_HIGH_ADDR_DST_PTR GENMASK(23, 16)
+
+/* MC sequence register */
+#define TEGRA_GPCDMA_CHAN_MCSEQ 0x18
+#define TEGRA_GPCDMA_MCSEQ_DATA_SWAP BIT(31)
+#define TEGRA_GPCDMA_MCSEQ_REQ_COUNT GENMASK(30, 25)
+#define TEGRA_GPCDMA_MCSEQ_BURST GENMASK(24, 23)
+#define TEGRA_GPCDMA_MCSEQ_BURST_2 \
+ FIELD_PREP(TEGRA_GPCDMA_MCSEQ_BURST, 0)
+#define TEGRA_GPCDMA_MCSEQ_BURST_16 \
+ FIELD_PREP(TEGRA_GPCDMA_MCSEQ_BURST, 3)
+#define TEGRA_GPCDMA_MCSEQ_WRAP1 GENMASK(22, 20)
+#define TEGRA_GPCDMA_MCSEQ_WRAP0 GENMASK(19, 17)
+#define TEGRA_GPCDMA_MCSEQ_WRAP_NONE 0
+
+#define TEGRA_GPCDMA_MCSEQ_STREAM_ID1_MASK GENMASK(13, 7)
+#define TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK GENMASK(6, 0)
+
+/* MMIO sequence register */
+#define TEGRA_GPCDMA_CHAN_MMIOSEQ 0x01c
+#define TEGRA_GPCDMA_MMIOSEQ_DBL_BUF BIT(31)
+#define TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH GENMASK(30, 28)
+#define TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_8 \
+ FIELD_PREP(TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH, 0)
+#define TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_16 \
+ FIELD_PREP(TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH, 1)
+#define TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_32 \
+ FIELD_PREP(TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH, 2)
+#define TEGRA_GPCDMA_MMIOSEQ_DATA_SWAP BIT(27)
+#define TEGRA_GPCDMA_MMIOSEQ_BURST_SHIFT 23
+#define TEGRA_GPCDMA_MMIOSEQ_BURST_MIN 2U
+#define TEGRA_GPCDMA_MMIOSEQ_BURST_MAX 32U
+#define TEGRA_GPCDMA_MMIOSEQ_BURST(bs) \
+ (GENMASK((fls(bs) - 2), 0) << TEGRA_GPCDMA_MMIOSEQ_BURST_SHIFT)
+#define TEGRA_GPCDMA_MMIOSEQ_MASTER_ID GENMASK(22, 19)
+#define TEGRA_GPCDMA_MMIOSEQ_WRAP_WORD GENMASK(18, 16)
+#define TEGRA_GPCDMA_MMIOSEQ_MMIO_PROT GENMASK(8, 7)
+
+/* Channel WCOUNT */
+#define TEGRA_GPCDMA_CHAN_WCOUNT 0x20
+
+/* Transfer count */
+#define TEGRA_GPCDMA_CHAN_XFER_COUNT 0x24
+
+/* DMA byte count status */
+#define TEGRA_GPCDMA_CHAN_DMA_BYTE_STATUS 0x28
+
+/* Error Status Register */
+#define TEGRA_GPCDMA_CHAN_ERR_STATUS 0x30
+#define TEGRA_GPCDMA_CHAN_ERR_TYPE_SHIFT 8
+#define TEGRA_GPCDMA_CHAN_ERR_TYPE_MASK 0xF
+#define TEGRA_GPCDMA_CHAN_ERR_TYPE(err) ( \
+ ((err) >> TEGRA_GPCDMA_CHAN_ERR_TYPE_SHIFT) & \
+ TEGRA_GPCDMA_CHAN_ERR_TYPE_MASK)
+#define TEGRA_DMA_BM_FIFO_FULL_ERR 0xF
+#define TEGRA_DMA_PERIPH_FIFO_FULL_ERR 0xE
+#define TEGRA_DMA_PERIPH_ID_ERR 0xD
+#define TEGRA_DMA_STREAM_ID_ERR 0xC
+#define TEGRA_DMA_MC_SLAVE_ERR 0xB
+#define TEGRA_DMA_MMIO_SLAVE_ERR 0xA
+
+/* Fixed Pattern */
+#define TEGRA_GPCDMA_CHAN_FIXED_PATTERN 0x34
+
+#define TEGRA_GPCDMA_CHAN_TZ 0x38
+#define TEGRA_GPCDMA_CHAN_TZ_MMIO_PROT_1 BIT(0)
+#define TEGRA_GPCDMA_CHAN_TZ_MC_PROT_1 BIT(1)
+
+#define TEGRA_GPCDMA_CHAN_SPARE 0x3c
+#define TEGRA_GPCDMA_CHAN_SPARE_EN_LEGACY_FC BIT(16)
+
+/*
+ * If any burst is in flight and DMA paused then this is the time to complete
+ * on-flight burst and update DMA status register.
+ */
+#define TEGRA_GPCDMA_BURST_COMPLETE_TIME 20
+#define TEGRA_GPCDMA_BURST_COMPLETION_TIMEOUT 100
+
+/* Channel base address offset from GPCDMA base address */
+#define TEGRA_GPCDMA_CHANNEL_BASE_ADD_OFFSET 0x20000
+
+struct tegra_dma;
+struct tegra_dma_channel;
+
+/*
+ * tegra_dma_chip_data Tegra chip specific DMA data
+ * @nr_channels: Number of channels available in the controller.
+ * @channel_reg_size: Channel register size.
+ * @max_dma_count: Maximum DMA transfer count supported by DMA controller.
+ * @hw_support_pause: DMA HW engine support pause of the channel.
+ */
+struct tegra_dma_chip_data {
+ bool hw_support_pause;
+ unsigned int nr_channels;
+ unsigned int channel_reg_size;
+ unsigned int max_dma_count;
+ int (*terminate)(struct tegra_dma_channel *tdc);
+};
+
+/* DMA channel registers */
+struct tegra_dma_channel_regs {
+ u32 csr;
+ u32 src_ptr;
+ u32 dst_ptr;
+ u32 high_addr_ptr;
+ u32 mc_seq;
+ u32 mmio_seq;
+ u32 wcount;
+ u32 fixed_pattern;
+};
+
+/*
+ * tegra_dma_sg_req: DMA request details to configure hardware. This
+ * contains the details for one transfer to configure DMA hw.
+ * The client's request for data transfer can be broken into multiple
+ * sub-transfer as per requester details and hw support. This sub transfer
+ * get added as an array in Tegra DMA desc which manages the transfer details.
+ */
+struct tegra_dma_sg_req {
+ unsigned int len;
+ struct tegra_dma_channel_regs ch_regs;
+};
+
+/*
+ * tegra_dma_desc: Tegra DMA descriptors which uses virt_dma_desc to
+ * manage client request and keep track of transfer status, callbacks
+ * and request counts etc.
+ */
+struct tegra_dma_desc {
+ bool cyclic;
+ unsigned int bytes_req;
+ unsigned int bytes_xfer;
+ unsigned int sg_idx;
+ unsigned int sg_count;
+ struct virt_dma_desc vd;
+ struct tegra_dma_channel *tdc;
+ struct tegra_dma_sg_req sg_req[];
+};
+
+/*
+ * tegra_dma_channel: Channel specific information
+ */
+struct tegra_dma_channel {
+ bool config_init;
+ char name[30];
+ enum dma_transfer_direction sid_dir;
+ int id;
+ int irq;
+ int slave_id;
+ struct tegra_dma *tdma;
+ struct virt_dma_chan vc;
+ struct tegra_dma_desc *dma_desc;
+ struct dma_slave_config dma_sconfig;
+ unsigned int stream_id;
+ unsigned long chan_base_offset;
+};
+
+/*
+ * tegra_dma: Tegra DMA specific information
+ */
+struct tegra_dma {
+ const struct tegra_dma_chip_data *chip_data;
+ unsigned long sid_m2d_reserved;
+ unsigned long sid_d2m_reserved;
+ void __iomem *base_addr;
+ struct device *dev;
+ struct dma_device dma_dev;
+ struct reset_control *rst;
+ struct tegra_dma_channel channels[];
+};
+
+static inline void tdc_write(struct tegra_dma_channel *tdc,
+ u32 reg, u32 val)
+{
+ writel_relaxed(val, tdc->tdma->base_addr + tdc->chan_base_offset + reg);
+}
+
+static inline u32 tdc_read(struct tegra_dma_channel *tdc, u32 reg)
+{
+ return readl_relaxed(tdc->tdma->base_addr + tdc->chan_base_offset + reg);
+}
+
+static inline struct tegra_dma_channel *to_tegra_dma_chan(struct dma_chan *dc)
+{
+ return container_of(dc, struct tegra_dma_channel, vc.chan);
+}
+
+static inline struct tegra_dma_desc *vd_to_tegra_dma_desc(struct virt_dma_desc *vd)
+{
+ return container_of(vd, struct tegra_dma_desc, vd);
+}
+
+static inline struct device *tdc2dev(struct tegra_dma_channel *tdc)
+{
+ return tdc->vc.chan.device->dev;
+}
+
+static void tegra_dma_dump_chan_regs(struct tegra_dma_channel *tdc)
+{
+ dev_dbg(tdc2dev(tdc), "DMA Channel %d name %s register dump:\n",
+ tdc->id, tdc->name);
+ dev_dbg(tdc2dev(tdc), "CSR %x STA %x CSRE %x SRC %x DST %x\n",
+ tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSR),
+ tdc_read(tdc, TEGRA_GPCDMA_CHAN_STATUS),
+ tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSRE),
+ tdc_read(tdc, TEGRA_GPCDMA_CHAN_SRC_PTR),
+ tdc_read(tdc, TEGRA_GPCDMA_CHAN_DST_PTR)
+ );
+ dev_dbg(tdc2dev(tdc), "MCSEQ %x IOSEQ %x WCNT %x XFER %x BSTA %x\n",
+ tdc_read(tdc, TEGRA_GPCDMA_CHAN_MCSEQ),
+ tdc_read(tdc, TEGRA_GPCDMA_CHAN_MMIOSEQ),
+ tdc_read(tdc, TEGRA_GPCDMA_CHAN_WCOUNT),
+ tdc_read(tdc, TEGRA_GPCDMA_CHAN_XFER_COUNT),
+ tdc_read(tdc, TEGRA_GPCDMA_CHAN_DMA_BYTE_STATUS)
+ );
+ dev_dbg(tdc2dev(tdc), "DMA ERR_STA %x\n",
+ tdc_read(tdc, TEGRA_GPCDMA_CHAN_ERR_STATUS));
+}
+
+static int tegra_dma_sid_reserve(struct tegra_dma_channel *tdc,
+ enum dma_transfer_direction direction)
+{
+ struct tegra_dma *tdma = tdc->tdma;
+ int sid = tdc->slave_id;
+
+ if (!is_slave_direction(direction))
+ return 0;
+
+ switch (direction) {
+ case DMA_MEM_TO_DEV:
+ if (test_and_set_bit(sid, &tdma->sid_m2d_reserved)) {
+ dev_err(tdma->dev, "slave id already in use\n");
+ return -EINVAL;
+ }
+ break;
+ case DMA_DEV_TO_MEM:
+ if (test_and_set_bit(sid, &tdma->sid_d2m_reserved)) {
+ dev_err(tdma->dev, "slave id already in use\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ break;
+ }
+
+ tdc->sid_dir = direction;
+
+ return 0;
+}
+
+static void tegra_dma_sid_free(struct tegra_dma_channel *tdc)
+{
+ struct tegra_dma *tdma = tdc->tdma;
+ int sid = tdc->slave_id;
+
+ switch (tdc->sid_dir) {
+ case DMA_MEM_TO_DEV:
+ clear_bit(sid, &tdma->sid_m2d_reserved);
+ break;
+ case DMA_DEV_TO_MEM:
+ clear_bit(sid, &tdma->sid_d2m_reserved);
+ break;
+ default:
+ break;
+ }
+
+ tdc->sid_dir = DMA_TRANS_NONE;
+}
+
+static void tegra_dma_desc_free(struct virt_dma_desc *vd)
+{
+ kfree(container_of(vd, struct tegra_dma_desc, vd));
+}
+
+static int tegra_dma_slave_config(struct dma_chan *dc,
+ struct dma_slave_config *sconfig)
+{
+ struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
+
+ memcpy(&tdc->dma_sconfig, sconfig, sizeof(*sconfig));
+ tdc->config_init = true;
+
+ return 0;
+}
+
+static int tegra_dma_pause(struct tegra_dma_channel *tdc)
+{
+ int ret;
+ u32 val;
+
+ val = tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSRE);
+ val |= TEGRA_GPCDMA_CHAN_CSRE_PAUSE;
+ tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSRE, val);
+
+ /* Wait until busy bit is de-asserted */
+ ret = readl_relaxed_poll_timeout_atomic(tdc->tdma->base_addr +
+ tdc->chan_base_offset + TEGRA_GPCDMA_CHAN_STATUS,
+ val,
+ !(val & TEGRA_GPCDMA_STATUS_BUSY),
+ TEGRA_GPCDMA_BURST_COMPLETE_TIME,
+ TEGRA_GPCDMA_BURST_COMPLETION_TIMEOUT);
+
+ if (ret) {
+ dev_err(tdc2dev(tdc), "DMA pause timed out\n");
+ tegra_dma_dump_chan_regs(tdc);
+ }
+
+ return ret;
+}
+
+static int tegra_dma_device_pause(struct dma_chan *dc)
+{
+ struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
+ unsigned long flags;
+ int ret;
+
+ if (!tdc->tdma->chip_data->hw_support_pause)
+ return -ENOSYS;
+
+ spin_lock_irqsave(&tdc->vc.lock, flags);
+ ret = tegra_dma_pause(tdc);
+ spin_unlock_irqrestore(&tdc->vc.lock, flags);
+
+ return ret;
+}
+
+static void tegra_dma_resume(struct tegra_dma_channel *tdc)
+{
+ u32 val;
+
+ val = tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSRE);
+ val &= ~TEGRA_GPCDMA_CHAN_CSRE_PAUSE;
+ tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSRE, val);
+}
+
+static int tegra_dma_device_resume(struct dma_chan *dc)
+{
+ struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
+ unsigned long flags;
+
+ if (!tdc->tdma->chip_data->hw_support_pause)
+ return -ENOSYS;
+
+ spin_lock_irqsave(&tdc->vc.lock, flags);
+ tegra_dma_resume(tdc);
+ spin_unlock_irqrestore(&tdc->vc.lock, flags);
+
+ return 0;
+}
+
+static void tegra_dma_disable(struct tegra_dma_channel *tdc)
+{
+ u32 csr, status;
+
+ csr = tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSR);
+
+ /* Disable interrupts */
+ csr &= ~TEGRA_GPCDMA_CSR_IE_EOC;
+
+ /* Disable DMA */
+ csr &= ~TEGRA_GPCDMA_CSR_ENB;
+ tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSR, csr);
+
+ /* Clear interrupt status if it is there */
+ status = tdc_read(tdc, TEGRA_GPCDMA_CHAN_STATUS);
+ if (status & TEGRA_GPCDMA_STATUS_ISE_EOC) {
+ dev_dbg(tdc2dev(tdc), "%s():clearing interrupt\n", __func__);
+ tdc_write(tdc, TEGRA_GPCDMA_CHAN_STATUS, status);
+ }
+}
+
+static void tegra_dma_configure_next_sg(struct tegra_dma_channel *tdc)
+{
+ struct tegra_dma_desc *dma_desc = tdc->dma_desc;
+ struct tegra_dma_channel_regs *ch_regs;
+ int ret;
+ u32 val;
+
+ dma_desc->sg_idx++;
+
+ /* Reset the sg index for cyclic transfers */
+ if (dma_desc->sg_idx == dma_desc->sg_count)
+ dma_desc->sg_idx = 0;
+
+ /* Configure next transfer immediately after DMA is busy */
+ ret = readl_relaxed_poll_timeout_atomic(tdc->tdma->base_addr +
+ tdc->chan_base_offset + TEGRA_GPCDMA_CHAN_STATUS,
+ val,
+ (val & TEGRA_GPCDMA_STATUS_BUSY), 0,
+ TEGRA_GPCDMA_BURST_COMPLETION_TIMEOUT);
+ if (ret)
+ return;
+
+ ch_regs = &dma_desc->sg_req[dma_desc->sg_idx].ch_regs;
+
+ tdc_write(tdc, TEGRA_GPCDMA_CHAN_WCOUNT, ch_regs->wcount);
+ tdc_write(tdc, TEGRA_GPCDMA_CHAN_SRC_PTR, ch_regs->src_ptr);
+ tdc_write(tdc, TEGRA_GPCDMA_CHAN_DST_PTR, ch_regs->dst_ptr);
+ tdc_write(tdc, TEGRA_GPCDMA_CHAN_HIGH_ADDR_PTR, ch_regs->high_addr_ptr);
+
+ /* Start DMA */
+ tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSR,
+ ch_regs->csr | TEGRA_GPCDMA_CSR_ENB);
+}
+
+static void tegra_dma_start(struct tegra_dma_channel *tdc)
+{
+ struct tegra_dma_desc *dma_desc = tdc->dma_desc;
+ struct tegra_dma_channel_regs *ch_regs;
+ struct virt_dma_desc *vdesc;
+
+ if (!dma_desc) {
+ vdesc = vchan_next_desc(&tdc->vc);
+ if (!vdesc)
+ return;
+
+ dma_desc = vd_to_tegra_dma_desc(vdesc);
+ list_del(&vdesc->node);
+ dma_desc->tdc = tdc;
+ tdc->dma_desc = dma_desc;
+
+ tegra_dma_resume(tdc);
+ }
+
+ ch_regs = &dma_desc->sg_req[dma_desc->sg_idx].ch_regs;
+
+ tdc_write(tdc, TEGRA_GPCDMA_CHAN_WCOUNT, ch_regs->wcount);
+ tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSR, 0);
+ tdc_write(tdc, TEGRA_GPCDMA_CHAN_SRC_PTR, ch_regs->src_ptr);
+ tdc_write(tdc, TEGRA_GPCDMA_CHAN_DST_PTR, ch_regs->dst_ptr);
+ tdc_write(tdc, TEGRA_GPCDMA_CHAN_HIGH_ADDR_PTR, ch_regs->high_addr_ptr);
+ tdc_write(tdc, TEGRA_GPCDMA_CHAN_FIXED_PATTERN, ch_regs->fixed_pattern);
+ tdc_write(tdc, TEGRA_GPCDMA_CHAN_MMIOSEQ, ch_regs->mmio_seq);
+ tdc_write(tdc, TEGRA_GPCDMA_CHAN_MCSEQ, ch_regs->mc_seq);
+ tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSR, ch_regs->csr);
+
+ /* Start DMA */
+ tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSR,
+ ch_regs->csr | TEGRA_GPCDMA_CSR_ENB);
+}
+
+static void tegra_dma_xfer_complete(struct tegra_dma_channel *tdc)
+{
+ vchan_cookie_complete(&tdc->dma_desc->vd);
+
+ tegra_dma_sid_free(tdc);
+ tdc->dma_desc = NULL;
+}
+
+static void tegra_dma_chan_decode_error(struct tegra_dma_channel *tdc,
+ unsigned int err_status)
+{
+ switch (TEGRA_GPCDMA_CHAN_ERR_TYPE(err_status)) {
+ case TEGRA_DMA_BM_FIFO_FULL_ERR:
+ dev_err(tdc->tdma->dev,
+ "GPCDMA CH%d bm fifo full\n", tdc->id);
+ break;
+
+ case TEGRA_DMA_PERIPH_FIFO_FULL_ERR:
+ dev_err(tdc->tdma->dev,
+ "GPCDMA CH%d peripheral fifo full\n", tdc->id);
+ break;
+
+ case TEGRA_DMA_PERIPH_ID_ERR:
+ dev_err(tdc->tdma->dev,
+ "GPCDMA CH%d illegal peripheral id\n", tdc->id);
+ break;
+
+ case TEGRA_DMA_STREAM_ID_ERR:
+ dev_err(tdc->tdma->dev,
+ "GPCDMA CH%d illegal stream id\n", tdc->id);
+ break;
+
+ case TEGRA_DMA_MC_SLAVE_ERR:
+ dev_err(tdc->tdma->dev,
+ "GPCDMA CH%d mc slave error\n", tdc->id);
+ break;
+
+ case TEGRA_DMA_MMIO_SLAVE_ERR:
+ dev_err(tdc->tdma->dev,
+ "GPCDMA CH%d mmio slave error\n", tdc->id);
+ break;
+
+ default:
+ dev_err(tdc->tdma->dev,
+ "GPCDMA CH%d security violation %x\n", tdc->id,
+ err_status);
+ }
+}
+
+static irqreturn_t tegra_dma_isr(int irq, void *dev_id)
+{
+ struct tegra_dma_channel *tdc = dev_id;
+ struct tegra_dma_desc *dma_desc = tdc->dma_desc;
+ struct tegra_dma_sg_req *sg_req;
+ u32 status;
+
+ /* Check channel error status register */
+ status = tdc_read(tdc, TEGRA_GPCDMA_CHAN_ERR_STATUS);
+ if (status) {
+ tegra_dma_chan_decode_error(tdc, status);
+ tegra_dma_dump_chan_regs(tdc);
+ tdc_write(tdc, TEGRA_GPCDMA_CHAN_ERR_STATUS, 0xFFFFFFFF);
+ }
+
+ spin_lock(&tdc->vc.lock);
+ status = tdc_read(tdc, TEGRA_GPCDMA_CHAN_STATUS);
+ if (!(status & TEGRA_GPCDMA_STATUS_ISE_EOC))
+ goto irq_done;
+
+ tdc_write(tdc, TEGRA_GPCDMA_CHAN_STATUS,
+ TEGRA_GPCDMA_STATUS_ISE_EOC);
+
+ if (!dma_desc)
+ goto irq_done;
+
+ sg_req = dma_desc->sg_req;
+ dma_desc->bytes_xfer += sg_req[dma_desc->sg_idx].len;
+
+ if (dma_desc->cyclic) {
+ vchan_cyclic_callback(&dma_desc->vd);
+ tegra_dma_configure_next_sg(tdc);
+ } else {
+ dma_desc->sg_idx++;
+ if (dma_desc->sg_idx == dma_desc->sg_count)
+ tegra_dma_xfer_complete(tdc);
+ else
+ tegra_dma_start(tdc);
+ }
+
+irq_done:
+ spin_unlock(&tdc->vc.lock);
+ return IRQ_HANDLED;
+}
+
+static void tegra_dma_issue_pending(struct dma_chan *dc)
+{
+ struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
+ unsigned long flags;
+
+ if (tdc->dma_desc)
+ return;
+
+ spin_lock_irqsave(&tdc->vc.lock, flags);
+ if (vchan_issue_pending(&tdc->vc))
+ tegra_dma_start(tdc);
+
+ /*
+ * For cyclic DMA transfers, program the second
+ * transfer parameters as soon as the first DMA
+ * transfer is started inorder for the DMA
+ * controller to trigger the second transfer
+ * with the correct parameters.
+ */
+ if (tdc->dma_desc && tdc->dma_desc->cyclic)
+ tegra_dma_configure_next_sg(tdc);
+
+ spin_unlock_irqrestore(&tdc->vc.lock, flags);
+}
+
+static int tegra_dma_stop_client(struct tegra_dma_channel *tdc)
+{
+ int ret;
+ u32 status, csr;
+
+ /*
+ * Change the client associated with the DMA channel
+ * to stop DMA engine from starting any more bursts for
+ * the given client and wait for in flight bursts to complete
+ */
+ csr = tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSR);
+ csr &= ~(TEGRA_GPCDMA_CSR_REQ_SEL_MASK);
+ csr |= TEGRA_GPCDMA_CSR_REQ_SEL_UNUSED;
+ tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSR, csr);
+
+ /* Wait for in flight data transfer to finish */
+ udelay(TEGRA_GPCDMA_BURST_COMPLETE_TIME);
+
+ /* If TX/RX path is still active wait till it becomes
+ * inactive
+ */
+
+ ret = readl_relaxed_poll_timeout_atomic(tdc->tdma->base_addr +
+ tdc->chan_base_offset +
+ TEGRA_GPCDMA_CHAN_STATUS,
+ status,
+ !(status & (TEGRA_GPCDMA_STATUS_CHANNEL_TX |
+ TEGRA_GPCDMA_STATUS_CHANNEL_RX)),
+ 5,
+ TEGRA_GPCDMA_BURST_COMPLETION_TIMEOUT);
+ if (ret) {
+ dev_err(tdc2dev(tdc), "Timeout waiting for DMA burst completion!\n");
+ tegra_dma_dump_chan_regs(tdc);
+ }
+
+ return ret;
+}
+
+static int tegra_dma_terminate_all(struct dma_chan *dc)
+{
+ struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
+ unsigned long flags;
+ LIST_HEAD(head);
+ int err;
+
+ spin_lock_irqsave(&tdc->vc.lock, flags);
+
+ if (tdc->dma_desc) {
+ err = tdc->tdma->chip_data->terminate(tdc);
+ if (err) {
+ spin_unlock_irqrestore(&tdc->vc.lock, flags);
+ return err;
+ }
+
+ tegra_dma_disable(tdc);
+ tdc->dma_desc = NULL;
+ }
+
+ tegra_dma_sid_free(tdc);
+ vchan_get_all_descriptors(&tdc->vc, &head);
+ spin_unlock_irqrestore(&tdc->vc.lock, flags);
+
+ vchan_dma_desc_free_list(&tdc->vc, &head);
+
+ return 0;
+}
+
+static int tegra_dma_get_residual(struct tegra_dma_channel *tdc)
+{
+ struct tegra_dma_desc *dma_desc = tdc->dma_desc;
+ struct tegra_dma_sg_req *sg_req = dma_desc->sg_req;
+ unsigned int bytes_xfer, residual;
+ u32 wcount = 0, status;
+
+ wcount = tdc_read(tdc, TEGRA_GPCDMA_CHAN_XFER_COUNT);
+
+ /*
+ * Set wcount = 0 if EOC bit is set. The transfer would have
+ * already completed and the CHAN_XFER_COUNT could have updated
+ * for the next transfer, specifically in case of cyclic transfers.
+ */
+ status = tdc_read(tdc, TEGRA_GPCDMA_CHAN_STATUS);
+ if (status & TEGRA_GPCDMA_STATUS_ISE_EOC)
+ wcount = 0;
+
+ bytes_xfer = dma_desc->bytes_xfer +
+ sg_req[dma_desc->sg_idx].len - (wcount * 4);
+
+ residual = dma_desc->bytes_req - (bytes_xfer % dma_desc->bytes_req);
+
+ return residual;
+}
+
+static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
+ struct tegra_dma_desc *dma_desc;
+ struct virt_dma_desc *vd;
+ unsigned int residual;
+ unsigned long flags;
+ enum dma_status ret;
+
+ ret = dma_cookie_status(dc, cookie, txstate);
+ if (ret == DMA_COMPLETE)
+ return ret;
+
+ spin_lock_irqsave(&tdc->vc.lock, flags);
+ vd = vchan_find_desc(&tdc->vc, cookie);
+ if (vd) {
+ dma_desc = vd_to_tegra_dma_desc(vd);
+ residual = dma_desc->bytes_req;
+ dma_set_residue(txstate, residual);
+ } else if (tdc->dma_desc && tdc->dma_desc->vd.tx.cookie == cookie) {
+ residual = tegra_dma_get_residual(tdc);
+ dma_set_residue(txstate, residual);
+ } else {
+ dev_err(tdc2dev(tdc), "cookie %d is not found\n", cookie);
+ }
+ spin_unlock_irqrestore(&tdc->vc.lock, flags);
+
+ return ret;
+}
+
+static inline int get_bus_width(struct tegra_dma_channel *tdc,
+ enum dma_slave_buswidth slave_bw)
+{
+ switch (slave_bw) {
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ return TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_8;
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ return TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_16;
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
+ return TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_32;
+ default:
+ dev_err(tdc2dev(tdc), "given slave bus width is not supported\n");
+ return -EINVAL;
+ }
+}
+
+static unsigned int get_burst_size(struct tegra_dma_channel *tdc,
+ u32 burst_size, enum dma_slave_buswidth slave_bw,
+ int len)
+{
+ unsigned int burst_mmio_width, burst_byte;
+
+ /*
+ * burst_size from client is in terms of the bus_width.
+ * convert that into words.
+ * If burst_size is not specified from client, then use
+ * len to calculate the optimum burst size
+ */
+ burst_byte = burst_size ? burst_size * slave_bw : len;
+ burst_mmio_width = burst_byte / 4;
+
+ if (burst_mmio_width < TEGRA_GPCDMA_MMIOSEQ_BURST_MIN)
+ return 0;
+
+ burst_mmio_width = min(burst_mmio_width, TEGRA_GPCDMA_MMIOSEQ_BURST_MAX);
+
+ return TEGRA_GPCDMA_MMIOSEQ_BURST(burst_mmio_width);
+}
+
+static int get_transfer_param(struct tegra_dma_channel *tdc,
+ enum dma_transfer_direction direction,
+ u32 *apb_addr,
+ u32 *mmio_seq,
+ u32 *csr,
+ unsigned int *burst_size,
+ enum dma_slave_buswidth *slave_bw)
+{
+ switch (direction) {
+ case DMA_MEM_TO_DEV:
+ *apb_addr = tdc->dma_sconfig.dst_addr;
+ *mmio_seq = get_bus_width(tdc, tdc->dma_sconfig.dst_addr_width);
+ *burst_size = tdc->dma_sconfig.dst_maxburst;
+ *slave_bw = tdc->dma_sconfig.dst_addr_width;
+ *csr = TEGRA_GPCDMA_CSR_DMA_MEM2IO_FC;
+ return 0;
+ case DMA_DEV_TO_MEM:
+ *apb_addr = tdc->dma_sconfig.src_addr;
+ *mmio_seq = get_bus_width(tdc, tdc->dma_sconfig.src_addr_width);
+ *burst_size = tdc->dma_sconfig.src_maxburst;
+ *slave_bw = tdc->dma_sconfig.src_addr_width;
+ *csr = TEGRA_GPCDMA_CSR_DMA_IO2MEM_FC;
+ return 0;
+ default:
+ dev_err(tdc2dev(tdc), "DMA direction is not supported\n");
+ }
+
+ return -EINVAL;
+}
+
+static struct dma_async_tx_descriptor *
+tegra_dma_prep_dma_memset(struct dma_chan *dc, dma_addr_t dest, int value,
+ size_t len, unsigned long flags)
+{
+ struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
+ unsigned int max_dma_count = tdc->tdma->chip_data->max_dma_count;
+ struct tegra_dma_sg_req *sg_req;
+ struct tegra_dma_desc *dma_desc;
+ u32 csr, mc_seq;
+
+ if ((len & 3) || (dest & 3) || len > max_dma_count) {
+ dev_err(tdc2dev(tdc),
+ "DMA length/memory address is not supported\n");
+ return NULL;
+ }
+
+ /* Set DMA mode to fixed pattern */
+ csr = TEGRA_GPCDMA_CSR_DMA_FIXED_PAT;
+ /* Enable once or continuous mode */
+ csr |= TEGRA_GPCDMA_CSR_ONCE;
+ /* Enable IRQ mask */
+ csr |= TEGRA_GPCDMA_CSR_IRQ_MASK;
+ /* Enable the DMA interrupt */
+ if (flags & DMA_PREP_INTERRUPT)
+ csr |= TEGRA_GPCDMA_CSR_IE_EOC;
+ /* Configure default priority weight for the channel */
+ csr |= FIELD_PREP(TEGRA_GPCDMA_CSR_WEIGHT, 1);
+
+ mc_seq = tdc_read(tdc, TEGRA_GPCDMA_CHAN_MCSEQ);
+ /* retain stream-id and clean rest */
+ mc_seq &= TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK;
+
+ /* Set the address wrapping */
+ mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP0,
+ TEGRA_GPCDMA_MCSEQ_WRAP_NONE);
+ mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP1,
+ TEGRA_GPCDMA_MCSEQ_WRAP_NONE);
+
+ /* Program outstanding MC requests */
+ mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_REQ_COUNT, 1);
+ /* Set burst size */
+ mc_seq |= TEGRA_GPCDMA_MCSEQ_BURST_16;
+
+ dma_desc = kzalloc(struct_size(dma_desc, sg_req, 1), GFP_NOWAIT);
+ if (!dma_desc)
+ return NULL;
+
+ dma_desc->bytes_req = len;
+ dma_desc->sg_count = 1;
+ sg_req = dma_desc->sg_req;
+
+ sg_req[0].ch_regs.src_ptr = 0;
+ sg_req[0].ch_regs.dst_ptr = dest;
+ sg_req[0].ch_regs.high_addr_ptr =
+ FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_DST_PTR, (dest >> 32));
+ sg_req[0].ch_regs.fixed_pattern = value;
+ /* Word count reg takes value as (N +1) words */
+ sg_req[0].ch_regs.wcount = ((len - 4) >> 2);
+ sg_req[0].ch_regs.csr = csr;
+ sg_req[0].ch_regs.mmio_seq = 0;
+ sg_req[0].ch_regs.mc_seq = mc_seq;
+ sg_req[0].len = len;
+
+ dma_desc->cyclic = false;
+ return vchan_tx_prep(&tdc->vc, &dma_desc->vd, flags);
+}
+
+static struct dma_async_tx_descriptor *
+tegra_dma_prep_dma_memcpy(struct dma_chan *dc, dma_addr_t dest,
+ dma_addr_t src, size_t len, unsigned long flags)
+{
+ struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
+ struct tegra_dma_sg_req *sg_req;
+ struct tegra_dma_desc *dma_desc;
+ unsigned int max_dma_count;
+ u32 csr, mc_seq;
+
+ max_dma_count = tdc->tdma->chip_data->max_dma_count;
+ if ((len & 3) || (src & 3) || (dest & 3) || len > max_dma_count) {
+ dev_err(tdc2dev(tdc),
+ "DMA length/memory address is not supported\n");
+ return NULL;
+ }
+
+ /* Set DMA mode to memory to memory transfer */
+ csr = TEGRA_GPCDMA_CSR_DMA_MEM2MEM;
+ /* Enable once or continuous mode */
+ csr |= TEGRA_GPCDMA_CSR_ONCE;
+ /* Enable IRQ mask */
+ csr |= TEGRA_GPCDMA_CSR_IRQ_MASK;
+ /* Enable the DMA interrupt */
+ if (flags & DMA_PREP_INTERRUPT)
+ csr |= TEGRA_GPCDMA_CSR_IE_EOC;
+ /* Configure default priority weight for the channel */
+ csr |= FIELD_PREP(TEGRA_GPCDMA_CSR_WEIGHT, 1);
+
+ mc_seq = tdc_read(tdc, TEGRA_GPCDMA_CHAN_MCSEQ);
+ /* retain stream-id and clean rest */
+ mc_seq &= (TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK) |
+ (TEGRA_GPCDMA_MCSEQ_STREAM_ID1_MASK);
+
+ /* Set the address wrapping */
+ mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP0,
+ TEGRA_GPCDMA_MCSEQ_WRAP_NONE);
+ mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP1,
+ TEGRA_GPCDMA_MCSEQ_WRAP_NONE);
+
+ /* Program outstanding MC requests */
+ mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_REQ_COUNT, 1);
+ /* Set burst size */
+ mc_seq |= TEGRA_GPCDMA_MCSEQ_BURST_16;
+
+ dma_desc = kzalloc(struct_size(dma_desc, sg_req, 1), GFP_NOWAIT);
+ if (!dma_desc)
+ return NULL;
+
+ dma_desc->bytes_req = len;
+ dma_desc->sg_count = 1;
+ sg_req = dma_desc->sg_req;
+
+ sg_req[0].ch_regs.src_ptr = src;
+ sg_req[0].ch_regs.dst_ptr = dest;
+ sg_req[0].ch_regs.high_addr_ptr =
+ FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_SRC_PTR, (src >> 32));
+ sg_req[0].ch_regs.high_addr_ptr |=
+ FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_DST_PTR, (dest >> 32));
+ /* Word count reg takes value as (N +1) words */
+ sg_req[0].ch_regs.wcount = ((len - 4) >> 2);
+ sg_req[0].ch_regs.csr = csr;
+ sg_req[0].ch_regs.mmio_seq = 0;
+ sg_req[0].ch_regs.mc_seq = mc_seq;
+ sg_req[0].len = len;
+
+ dma_desc->cyclic = false;
+ return vchan_tx_prep(&tdc->vc, &dma_desc->vd, flags);
+}
+
+static struct dma_async_tx_descriptor *
+tegra_dma_prep_slave_sg(struct dma_chan *dc, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
+ unsigned int max_dma_count = tdc->tdma->chip_data->max_dma_count;
+ enum dma_slave_buswidth slave_bw = DMA_SLAVE_BUSWIDTH_UNDEFINED;
+ u32 csr, mc_seq, apb_ptr = 0, mmio_seq = 0;
+ struct tegra_dma_sg_req *sg_req;
+ struct tegra_dma_desc *dma_desc;
+ struct scatterlist *sg;
+ u32 burst_size;
+ unsigned int i;
+ int ret;
+
+ if (!tdc->config_init) {
+ dev_err(tdc2dev(tdc), "DMA channel is not configured\n");
+ return NULL;
+ }
+ if (sg_len < 1) {
+ dev_err(tdc2dev(tdc), "Invalid segment length %d\n", sg_len);
+ return NULL;
+ }
+
+ ret = tegra_dma_sid_reserve(tdc, direction);
+ if (ret)
+ return NULL;
+
+ ret = get_transfer_param(tdc, direction, &apb_ptr, &mmio_seq, &csr,
+ &burst_size, &slave_bw);
+ if (ret < 0)
+ return NULL;
+
+ /* Enable once or continuous mode */
+ csr |= TEGRA_GPCDMA_CSR_ONCE;
+ /* Program the slave id in requestor select */
+ csr |= FIELD_PREP(TEGRA_GPCDMA_CSR_REQ_SEL_MASK, tdc->slave_id);
+ /* Enable IRQ mask */
+ csr |= TEGRA_GPCDMA_CSR_IRQ_MASK;
+ /* Configure default priority weight for the channel*/
+ csr |= FIELD_PREP(TEGRA_GPCDMA_CSR_WEIGHT, 1);
+
+ /* Enable the DMA interrupt */
+ if (flags & DMA_PREP_INTERRUPT)
+ csr |= TEGRA_GPCDMA_CSR_IE_EOC;
+
+ mc_seq = tdc_read(tdc, TEGRA_GPCDMA_CHAN_MCSEQ);
+ /* retain stream-id and clean rest */
+ mc_seq &= TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK;
+
+ /* Set the address wrapping on both MC and MMIO side */
+
+ mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP0,
+ TEGRA_GPCDMA_MCSEQ_WRAP_NONE);
+ mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP1,
+ TEGRA_GPCDMA_MCSEQ_WRAP_NONE);
+ mmio_seq |= FIELD_PREP(TEGRA_GPCDMA_MMIOSEQ_WRAP_WORD, 1);
+
+ /* Program 2 MC outstanding requests by default. */
+ mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_REQ_COUNT, 1);
+
+ /* Setting MC burst size depending on MMIO burst size */
+ if (burst_size == 64)
+ mc_seq |= TEGRA_GPCDMA_MCSEQ_BURST_16;
+ else
+ mc_seq |= TEGRA_GPCDMA_MCSEQ_BURST_2;
+
+ dma_desc = kzalloc(struct_size(dma_desc, sg_req, sg_len), GFP_NOWAIT);
+ if (!dma_desc)
+ return NULL;
+
+ dma_desc->sg_count = sg_len;
+ sg_req = dma_desc->sg_req;
+
+ /* Make transfer requests */
+ for_each_sg(sgl, sg, sg_len, i) {
+ u32 len;
+ dma_addr_t mem;
+
+ mem = sg_dma_address(sg);
+ len = sg_dma_len(sg);
+
+ if ((len & 3) || (mem & 3) || len > max_dma_count) {
+ dev_err(tdc2dev(tdc),
+ "DMA length/memory address is not supported\n");
+ kfree(dma_desc);
+ return NULL;
+ }
+
+ mmio_seq |= get_burst_size(tdc, burst_size, slave_bw, len);
+ dma_desc->bytes_req += len;
+
+ if (direction == DMA_MEM_TO_DEV) {
+ sg_req[i].ch_regs.src_ptr = mem;
+ sg_req[i].ch_regs.dst_ptr = apb_ptr;
+ sg_req[i].ch_regs.high_addr_ptr =
+ FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_SRC_PTR, (mem >> 32));
+ } else if (direction == DMA_DEV_TO_MEM) {
+ sg_req[i].ch_regs.src_ptr = apb_ptr;
+ sg_req[i].ch_regs.dst_ptr = mem;
+ sg_req[i].ch_regs.high_addr_ptr =
+ FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_DST_PTR, (mem >> 32));
+ }
+
+ /*
+ * Word count register takes input in words. Writing a value
+ * of N into word count register means a req of (N+1) words.
+ */
+ sg_req[i].ch_regs.wcount = ((len - 4) >> 2);
+ sg_req[i].ch_regs.csr = csr;
+ sg_req[i].ch_regs.mmio_seq = mmio_seq;
+ sg_req[i].ch_regs.mc_seq = mc_seq;
+ sg_req[i].len = len;
+ }
+
+ dma_desc->cyclic = false;
+ return vchan_tx_prep(&tdc->vc, &dma_desc->vd, flags);
+}
+
+static struct dma_async_tx_descriptor *
+tegra_dma_prep_dma_cyclic(struct dma_chan *dc, dma_addr_t buf_addr, size_t buf_len,
+ size_t period_len, enum dma_transfer_direction direction,
+ unsigned long flags)
+{
+ enum dma_slave_buswidth slave_bw = DMA_SLAVE_BUSWIDTH_UNDEFINED;
+ u32 csr, mc_seq, apb_ptr = 0, mmio_seq = 0, burst_size;
+ unsigned int max_dma_count, len, period_count, i;
+ struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
+ struct tegra_dma_desc *dma_desc;
+ struct tegra_dma_sg_req *sg_req;
+ dma_addr_t mem = buf_addr;
+ int ret;
+
+ if (!buf_len || !period_len) {
+ dev_err(tdc2dev(tdc), "Invalid buffer/period len\n");
+ return NULL;
+ }
+
+ if (!tdc->config_init) {
+ dev_err(tdc2dev(tdc), "DMA slave is not configured\n");
+ return NULL;
+ }
+
+ ret = tegra_dma_sid_reserve(tdc, direction);
+ if (ret)
+ return NULL;
+
+ /*
+ * We only support cycle transfer when buf_len is multiple of
+ * period_len.
+ */
+ if (buf_len % period_len) {
+ dev_err(tdc2dev(tdc), "buf_len is not multiple of period_len\n");
+ return NULL;
+ }
+
+ len = period_len;
+ max_dma_count = tdc->tdma->chip_data->max_dma_count;
+ if ((len & 3) || (buf_addr & 3) || len > max_dma_count) {
+ dev_err(tdc2dev(tdc), "Req len/mem address is not correct\n");
+ return NULL;
+ }
+
+ ret = get_transfer_param(tdc, direction, &apb_ptr, &mmio_seq, &csr,
+ &burst_size, &slave_bw);
+ if (ret < 0)
+ return NULL;
+
+ /* Enable once or continuous mode */
+ csr &= ~TEGRA_GPCDMA_CSR_ONCE;
+ /* Program the slave id in requestor select */
+ csr |= FIELD_PREP(TEGRA_GPCDMA_CSR_REQ_SEL_MASK, tdc->slave_id);
+ /* Enable IRQ mask */
+ csr |= TEGRA_GPCDMA_CSR_IRQ_MASK;
+ /* Configure default priority weight for the channel*/
+ csr |= FIELD_PREP(TEGRA_GPCDMA_CSR_WEIGHT, 1);
+
+ /* Enable the DMA interrupt */
+ if (flags & DMA_PREP_INTERRUPT)
+ csr |= TEGRA_GPCDMA_CSR_IE_EOC;
+
+ mmio_seq |= FIELD_PREP(TEGRA_GPCDMA_MMIOSEQ_WRAP_WORD, 1);
+
+ mc_seq = tdc_read(tdc, TEGRA_GPCDMA_CHAN_MCSEQ);
+ /* retain stream-id and clean rest */
+ mc_seq &= TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK;
+
+ /* Set the address wrapping on both MC and MMIO side */
+ mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP0,
+ TEGRA_GPCDMA_MCSEQ_WRAP_NONE);
+ mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP1,
+ TEGRA_GPCDMA_MCSEQ_WRAP_NONE);
+
+ /* Program 2 MC outstanding requests by default. */
+ mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_REQ_COUNT, 1);
+ /* Setting MC burst size depending on MMIO burst size */
+ if (burst_size == 64)
+ mc_seq |= TEGRA_GPCDMA_MCSEQ_BURST_16;
+ else
+ mc_seq |= TEGRA_GPCDMA_MCSEQ_BURST_2;
+
+ period_count = buf_len / period_len;
+ dma_desc = kzalloc(struct_size(dma_desc, sg_req, period_count),
+ GFP_NOWAIT);
+ if (!dma_desc)
+ return NULL;
+
+ dma_desc->bytes_req = buf_len;
+ dma_desc->sg_count = period_count;
+ sg_req = dma_desc->sg_req;
+
+ /* Split transfer equal to period size */
+ for (i = 0; i < period_count; i++) {
+ mmio_seq |= get_burst_size(tdc, burst_size, slave_bw, len);
+ if (direction == DMA_MEM_TO_DEV) {
+ sg_req[i].ch_regs.src_ptr = mem;
+ sg_req[i].ch_regs.dst_ptr = apb_ptr;
+ sg_req[i].ch_regs.high_addr_ptr =
+ FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_SRC_PTR, (mem >> 32));
+ } else if (direction == DMA_DEV_TO_MEM) {
+ sg_req[i].ch_regs.src_ptr = apb_ptr;
+ sg_req[i].ch_regs.dst_ptr = mem;
+ sg_req[i].ch_regs.high_addr_ptr =
+ FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_DST_PTR, (mem >> 32));
+ }
+ /*
+ * Word count register takes input in words. Writing a value
+ * of N into word count register means a req of (N+1) words.
+ */
+ sg_req[i].ch_regs.wcount = ((len - 4) >> 2);
+ sg_req[i].ch_regs.csr = csr;
+ sg_req[i].ch_regs.mmio_seq = mmio_seq;
+ sg_req[i].ch_regs.mc_seq = mc_seq;
+ sg_req[i].len = len;
+
+ mem += len;
+ }
+
+ dma_desc->cyclic = true;
+
+ return vchan_tx_prep(&tdc->vc, &dma_desc->vd, flags);
+}
+
+static int tegra_dma_alloc_chan_resources(struct dma_chan *dc)
+{
+ struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
+ int ret;
+
+ ret = request_irq(tdc->irq, tegra_dma_isr, 0, tdc->name, tdc);
+ if (ret) {
+ dev_err(tdc2dev(tdc), "request_irq failed for %s\n", tdc->name);
+ return ret;
+ }
+
+ dma_cookie_init(&tdc->vc.chan);
+ tdc->config_init = false;
+ return 0;
+}
+
+static void tegra_dma_chan_synchronize(struct dma_chan *dc)
+{
+ struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
+
+ synchronize_irq(tdc->irq);
+ vchan_synchronize(&tdc->vc);
+}
+
+static void tegra_dma_free_chan_resources(struct dma_chan *dc)
+{
+ struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
+
+ dev_dbg(tdc2dev(tdc), "Freeing channel %d\n", tdc->id);
+
+ tegra_dma_terminate_all(dc);
+ synchronize_irq(tdc->irq);
+
+ tasklet_kill(&tdc->vc.task);
+ tdc->config_init = false;
+ tdc->slave_id = -1;
+ tdc->sid_dir = DMA_TRANS_NONE;
+ free_irq(tdc->irq, tdc);
+
+ vchan_free_chan_resources(&tdc->vc);
+}
+
+static struct dma_chan *tegra_dma_of_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct tegra_dma *tdma = ofdma->of_dma_data;
+ struct tegra_dma_channel *tdc;
+ struct dma_chan *chan;
+
+ chan = dma_get_any_slave_channel(&tdma->dma_dev);
+ if (!chan)
+ return NULL;
+
+ tdc = to_tegra_dma_chan(chan);
+ tdc->slave_id = dma_spec->args[0];
+
+ return chan;
+}
+
+static const struct tegra_dma_chip_data tegra186_dma_chip_data = {
+ .nr_channels = 31,
+ .channel_reg_size = SZ_64K,
+ .max_dma_count = SZ_1G,
+ .hw_support_pause = false,
+ .terminate = tegra_dma_stop_client,
+};
+
+static const struct tegra_dma_chip_data tegra194_dma_chip_data = {
+ .nr_channels = 31,
+ .channel_reg_size = SZ_64K,
+ .max_dma_count = SZ_1G,
+ .hw_support_pause = true,
+ .terminate = tegra_dma_pause,
+};
+
+static const struct of_device_id tegra_dma_of_match[] = {
+ {
+ .compatible = "nvidia,tegra186-gpcdma",
+ .data = &tegra186_dma_chip_data,
+ }, {
+ .compatible = "nvidia,tegra194-gpcdma",
+ .data = &tegra194_dma_chip_data,
+ }, {
+ },
+};
+MODULE_DEVICE_TABLE(of, tegra_dma_of_match);
+
+static int tegra_dma_program_sid(struct tegra_dma_channel *tdc, int stream_id)
+{
+ unsigned int reg_val = tdc_read(tdc, TEGRA_GPCDMA_CHAN_MCSEQ);
+
+ reg_val &= ~(TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK);
+ reg_val &= ~(TEGRA_GPCDMA_MCSEQ_STREAM_ID1_MASK);
+
+ reg_val |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK, stream_id);
+ reg_val |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_STREAM_ID1_MASK, stream_id);
+
+ tdc_write(tdc, TEGRA_GPCDMA_CHAN_MCSEQ, reg_val);
+ return 0;
+}
+
+static int tegra_dma_probe(struct platform_device *pdev)
+{
+ const struct tegra_dma_chip_data *cdata = NULL;
+ struct iommu_fwspec *iommu_spec;
+ unsigned int stream_id, i;
+ struct tegra_dma *tdma;
+ int ret;
+
+ cdata = of_device_get_match_data(&pdev->dev);
+
+ tdma = devm_kzalloc(&pdev->dev,
+ struct_size(tdma, channels, cdata->nr_channels),
+ GFP_KERNEL);
+ if (!tdma)
+ return -ENOMEM;
+
+ tdma->dev = &pdev->dev;
+ tdma->chip_data = cdata;
+ platform_set_drvdata(pdev, tdma);
+
+ tdma->base_addr = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(tdma->base_addr))
+ return PTR_ERR(tdma->base_addr);
+
+ tdma->rst = devm_reset_control_get_exclusive(&pdev->dev, "gpcdma");
+ if (IS_ERR(tdma->rst)) {
+ return dev_err_probe(&pdev->dev, PTR_ERR(tdma->rst),
+ "Missing controller reset\n");
+ }
+ reset_control_reset(tdma->rst);
+
+ tdma->dma_dev.dev = &pdev->dev;
+
+ iommu_spec = dev_iommu_fwspec_get(&pdev->dev);
+ if (!iommu_spec) {
+ dev_err(&pdev->dev, "Missing iommu stream-id\n");
+ return -EINVAL;
+ }
+ stream_id = iommu_spec->ids[0] & 0xffff;
+
+ INIT_LIST_HEAD(&tdma->dma_dev.channels);
+ for (i = 0; i < cdata->nr_channels; i++) {
+ struct tegra_dma_channel *tdc = &tdma->channels[i];
+
+ tdc->irq = platform_get_irq(pdev, i);
+ if (tdc->irq < 0)
+ return tdc->irq;
+
+ tdc->chan_base_offset = TEGRA_GPCDMA_CHANNEL_BASE_ADD_OFFSET +
+ i * cdata->channel_reg_size;
+ snprintf(tdc->name, sizeof(tdc->name), "gpcdma.%d", i);
+ tdc->tdma = tdma;
+ tdc->id = i;
+ tdc->slave_id = -1;
+
+ vchan_init(&tdc->vc, &tdma->dma_dev);
+ tdc->vc.desc_free = tegra_dma_desc_free;
+
+ /* program stream-id for this channel */
+ tegra_dma_program_sid(tdc, stream_id);
+ tdc->stream_id = stream_id;
+ }
+
+ dma_cap_set(DMA_SLAVE, tdma->dma_dev.cap_mask);
+ dma_cap_set(DMA_PRIVATE, tdma->dma_dev.cap_mask);
+ dma_cap_set(DMA_MEMCPY, tdma->dma_dev.cap_mask);
+ dma_cap_set(DMA_MEMSET, tdma->dma_dev.cap_mask);
+ dma_cap_set(DMA_CYCLIC, tdma->dma_dev.cap_mask);
+
+ /*
+ * Only word aligned transfers are supported. Set the copy
+ * alignment shift.
+ */
+ tdma->dma_dev.copy_align = 2;
+ tdma->dma_dev.fill_align = 2;
+ tdma->dma_dev.device_alloc_chan_resources =
+ tegra_dma_alloc_chan_resources;
+ tdma->dma_dev.device_free_chan_resources =
+ tegra_dma_free_chan_resources;
+ tdma->dma_dev.device_prep_slave_sg = tegra_dma_prep_slave_sg;
+ tdma->dma_dev.device_prep_dma_memcpy = tegra_dma_prep_dma_memcpy;
+ tdma->dma_dev.device_prep_dma_memset = tegra_dma_prep_dma_memset;
+ tdma->dma_dev.device_prep_dma_cyclic = tegra_dma_prep_dma_cyclic;
+ tdma->dma_dev.device_config = tegra_dma_slave_config;
+ tdma->dma_dev.device_terminate_all = tegra_dma_terminate_all;
+ tdma->dma_dev.device_tx_status = tegra_dma_tx_status;
+ tdma->dma_dev.device_issue_pending = tegra_dma_issue_pending;
+ tdma->dma_dev.device_pause = tegra_dma_device_pause;
+ tdma->dma_dev.device_resume = tegra_dma_device_resume;
+ tdma->dma_dev.device_synchronize = tegra_dma_chan_synchronize;
+ tdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+
+ ret = dma_async_device_register(&tdma->dma_dev);
+ if (ret < 0) {
+ dev_err_probe(&pdev->dev, ret,
+ "GPC DMA driver registration failed\n");
+ return ret;
+ }
+
+ ret = of_dma_controller_register(pdev->dev.of_node,
+ tegra_dma_of_xlate, tdma);
+ if (ret < 0) {
+ dev_err_probe(&pdev->dev, ret,
+ "GPC DMA OF registration failed\n");
+
+ dma_async_device_unregister(&tdma->dma_dev);
+ return ret;
+ }
+
+ dev_info(&pdev->dev, "GPC DMA driver register %d channels\n",
+ cdata->nr_channels);
+
+ return 0;
+}
+
+static int tegra_dma_remove(struct platform_device *pdev)
+{
+ struct tegra_dma *tdma = platform_get_drvdata(pdev);
+
+ of_dma_controller_free(pdev->dev.of_node);
+ dma_async_device_unregister(&tdma->dma_dev);
+
+ return 0;
+}
+
+static int __maybe_unused tegra_dma_pm_suspend(struct device *dev)
+{
+ struct tegra_dma *tdma = dev_get_drvdata(dev);
+ unsigned int i;
+
+ for (i = 0; i < tdma->chip_data->nr_channels; i++) {
+ struct tegra_dma_channel *tdc = &tdma->channels[i];
+
+ if (tdc->dma_desc) {
+ dev_err(tdma->dev, "channel %u busy\n", i);
+ return -EBUSY;
+ }
+ }
+
+ return 0;
+}
+
+static int __maybe_unused tegra_dma_pm_resume(struct device *dev)
+{
+ struct tegra_dma *tdma = dev_get_drvdata(dev);
+ unsigned int i;
+
+ reset_control_reset(tdma->rst);
+
+ for (i = 0; i < tdma->chip_data->nr_channels; i++) {
+ struct tegra_dma_channel *tdc = &tdma->channels[i];
+
+ tegra_dma_program_sid(tdc, tdc->stream_id);
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops tegra_dma_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(tegra_dma_pm_suspend, tegra_dma_pm_resume)
+};
+
+static struct platform_driver tegra_dma_driver = {
+ .driver = {
+ .name = "tegra-gpcdma",
+ .pm = &tegra_dma_dev_pm_ops,
+ .of_match_table = tegra_dma_of_match,
+ },
+ .probe = tegra_dma_probe,
+ .remove = tegra_dma_remove,
+};
+
+module_platform_driver(tegra_dma_driver);
+
+MODULE_DESCRIPTION("NVIDIA Tegra GPC DMA Controller driver");
+MODULE_AUTHOR("Pavan Kunapuli <pkunapuli@nvidia.com>");
+MODULE_AUTHOR("Rajesh Gumasta <rgumasta@nvidia.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/ti/cppi41.c b/drivers/dma/ti/cppi41.c
index 062bd9bd4de0..695915dba707 100644
--- a/drivers/dma/ti/cppi41.c
+++ b/drivers/dma/ti/cppi41.c
@@ -1105,8 +1105,12 @@ static int cppi41_dma_probe(struct platform_device *pdev)
cdd->qmgr_num_pend = glue_info->qmgr_num_pend;
cdd->first_completion_queue = glue_info->first_completion_queue;
+ /* Parse new and deprecated dma-channels properties */
ret = of_property_read_u32(dev->of_node,
- "#dma-channels", &cdd->n_chans);
+ "dma-channels", &cdd->n_chans);
+ if (ret)
+ ret = of_property_read_u32(dev->of_node,
+ "#dma-channels", &cdd->n_chans);
if (ret)
goto err_get_n_chans;
diff --git a/drivers/dma/ti/k3-psil-am62.c b/drivers/dma/ti/k3-psil-am62.c
index d431e2033237..2b6fd6e37c61 100644
--- a/drivers/dma/ti/k3-psil-am62.c
+++ b/drivers/dma/ti/k3-psil-am62.c
@@ -70,10 +70,10 @@
/* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */
static struct psil_ep am62_src_ep_map[] = {
/* SAUL */
- PSIL_SAUL(0x7500, 20, 35, 8, 35, 0),
- PSIL_SAUL(0x7501, 21, 35, 8, 36, 0),
- PSIL_SAUL(0x7502, 22, 43, 8, 43, 0),
- PSIL_SAUL(0x7503, 23, 43, 8, 44, 0),
+ PSIL_SAUL(0x7504, 20, 35, 8, 35, 0),
+ PSIL_SAUL(0x7505, 21, 35, 8, 36, 0),
+ PSIL_SAUL(0x7506, 22, 43, 8, 43, 0),
+ PSIL_SAUL(0x7507, 23, 43, 8, 44, 0),
/* PDMA_MAIN0 - SPI0-3 */
PSIL_PDMA_XY_PKT(0x4302),
PSIL_PDMA_XY_PKT(0x4303),
diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
index 7aa63b652027..dc299ab36818 100644
--- a/drivers/dma/xilinx/zynqmp_dma.c
+++ b/drivers/dma/xilinx/zynqmp_dma.c
@@ -229,7 +229,7 @@ struct zynqmp_dma_chan {
bool is_dmacoherent;
struct tasklet_struct tasklet;
bool idle;
- u32 desc_size;
+ size_t desc_size;
bool err;
u32 bus_width;
u32 src_burst_len;
@@ -486,7 +486,8 @@ static int zynqmp_dma_alloc_chan_resources(struct dma_chan *dchan)
}
chan->desc_pool_v = dma_alloc_coherent(chan->dev,
- (2 * chan->desc_size * ZYNQMP_DMA_NUM_DESCS),
+ (2 * ZYNQMP_DMA_DESC_SIZE(chan) *
+ ZYNQMP_DMA_NUM_DESCS),
&chan->desc_pool_p, GFP_KERNEL);
if (!chan->desc_pool_v)
return -ENOMEM;
@@ -1077,7 +1078,11 @@ static int zynqmp_dma_probe(struct platform_device *pdev)
pm_runtime_set_autosuspend_delay(zdev->dev, ZDMA_PM_TIMEOUT);
pm_runtime_use_autosuspend(zdev->dev);
pm_runtime_enable(zdev->dev);
- pm_runtime_get_sync(zdev->dev);
+ ret = pm_runtime_resume_and_get(zdev->dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "device wakeup failed.\n");
+ pm_runtime_disable(zdev->dev);
+ }
if (!pm_runtime_enabled(zdev->dev)) {
ret = zynqmp_dma_runtime_resume(zdev->dev);
if (ret)
@@ -1093,7 +1098,11 @@ static int zynqmp_dma_probe(struct platform_device *pdev)
p->dst_addr_widths = BIT(zdev->chan->bus_width / 8);
p->src_addr_widths = BIT(zdev->chan->bus_width / 8);
- dma_async_device_register(&zdev->common);
+ ret = dma_async_device_register(&zdev->common);
+ if (ret) {
+ dev_err(zdev->dev, "failed to register the dma device\n");
+ goto free_chan_resources;
+ }
ret = of_dma_controller_register(pdev->dev.of_node,
of_zynqmp_dma_xlate, zdev);
diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig
index 0d42e49105dd..dca7cecb37e3 100644
--- a/drivers/extcon/Kconfig
+++ b/drivers/extcon/Kconfig
@@ -131,6 +131,7 @@ config EXTCON_PALMAS
config EXTCON_PTN5150
tristate "NXP PTN5150 CC LOGIC USB EXTCON support"
depends on I2C && (GPIOLIB || COMPILE_TEST)
+ depends on USB_ROLE_SWITCH || !USB_ROLE_SWITCH
select REGMAP_I2C
help
Say Y here to enable support for USB peripheral and USB host
@@ -156,7 +157,7 @@ config EXTCON_RT8973A
from abnormal high input voltage (up to 28V).
config EXTCON_SM5502
- tristate "Silicon Mitus SM5502/SM5504 EXTCON support"
+ tristate "Silicon Mitus SM5502/SM5504/SM5703 EXTCON support"
depends on I2C
select IRQ_DOMAIN
select REGMAP_I2C
diff --git a/drivers/extcon/extcon-axp288.c b/drivers/extcon/extcon-axp288.c
index 7c6d5857ff25..180be768c215 100644
--- a/drivers/extcon/extcon-axp288.c
+++ b/drivers/extcon/extcon-axp288.c
@@ -394,8 +394,8 @@ static int axp288_extcon_probe(struct platform_device *pdev)
if (adev) {
info->id_extcon = extcon_get_extcon_dev(acpi_dev_name(adev));
put_device(&adev->dev);
- if (!info->id_extcon)
- return -EPROBE_DEFER;
+ if (IS_ERR(info->id_extcon))
+ return PTR_ERR(info->id_extcon);
dev_info(dev, "controlling USB role\n");
} else {
diff --git a/drivers/extcon/extcon-intel-int3496.c b/drivers/extcon/extcon-intel-int3496.c
index fb527c23639e..ded1a85a5549 100644
--- a/drivers/extcon/extcon-intel-int3496.c
+++ b/drivers/extcon/extcon-intel-int3496.c
@@ -17,6 +17,7 @@
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
#define INT3496_GPIO_USB_ID 0
#define INT3496_GPIO_VBUS_EN 1
@@ -30,7 +31,9 @@ struct int3496_data {
struct gpio_desc *gpio_usb_id;
struct gpio_desc *gpio_vbus_en;
struct gpio_desc *gpio_usb_mux;
+ struct regulator *vbus_boost;
int usb_id_irq;
+ bool vbus_boost_enabled;
};
static const unsigned int int3496_cable[] = {
@@ -53,6 +56,27 @@ static const struct acpi_gpio_mapping acpi_int3496_default_gpios[] = {
{ },
};
+static void int3496_set_vbus_boost(struct int3496_data *data, bool enable)
+{
+ int ret;
+
+ if (IS_ERR_OR_NULL(data->vbus_boost))
+ return;
+
+ if (data->vbus_boost_enabled == enable)
+ return;
+
+ if (enable)
+ ret = regulator_enable(data->vbus_boost);
+ else
+ ret = regulator_disable(data->vbus_boost);
+
+ if (ret == 0)
+ data->vbus_boost_enabled = enable;
+ else
+ dev_err(data->dev, "Error updating Vbus boost regulator: %d\n", ret);
+}
+
static void int3496_do_usb_id(struct work_struct *work)
{
struct int3496_data *data =
@@ -71,6 +95,8 @@ static void int3496_do_usb_id(struct work_struct *work)
if (!IS_ERR(data->gpio_vbus_en))
gpiod_direction_output(data->gpio_vbus_en, !id);
+ else
+ int3496_set_vbus_boost(data, !id);
extcon_set_state_sync(data->edev, EXTCON_USB_HOST, !id);
}
@@ -91,10 +117,12 @@ static int int3496_probe(struct platform_device *pdev)
struct int3496_data *data;
int ret;
- ret = devm_acpi_dev_add_driver_gpios(dev, acpi_int3496_default_gpios);
- if (ret) {
- dev_err(dev, "can't add GPIO ACPI mapping\n");
- return ret;
+ if (has_acpi_companion(dev)) {
+ ret = devm_acpi_dev_add_driver_gpios(dev, acpi_int3496_default_gpios);
+ if (ret) {
+ dev_err(dev, "can't add GPIO ACPI mapping\n");
+ return ret;
+ }
}
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
@@ -106,7 +134,8 @@ static int int3496_probe(struct platform_device *pdev)
if (ret)
return ret;
- data->gpio_usb_id = devm_gpiod_get(dev, "id", GPIOD_IN);
+ data->gpio_usb_id =
+ devm_gpiod_get(dev, "id", GPIOD_IN | GPIOD_FLAGS_BIT_NONEXCLUSIVE);
if (IS_ERR(data->gpio_usb_id)) {
ret = PTR_ERR(data->gpio_usb_id);
dev_err(dev, "can't request USB ID GPIO: %d\n", ret);
@@ -120,12 +149,14 @@ static int int3496_probe(struct platform_device *pdev)
}
data->gpio_vbus_en = devm_gpiod_get(dev, "vbus", GPIOD_ASIS);
- if (IS_ERR(data->gpio_vbus_en))
- dev_info(dev, "can't request VBUS EN GPIO\n");
+ if (IS_ERR(data->gpio_vbus_en)) {
+ dev_dbg(dev, "can't request VBUS EN GPIO\n");
+ data->vbus_boost = devm_regulator_get_optional(dev, "vbus");
+ }
data->gpio_usb_mux = devm_gpiod_get(dev, "mux", GPIOD_ASIS);
if (IS_ERR(data->gpio_usb_mux))
- dev_info(dev, "can't request USB MUX GPIO\n");
+ dev_dbg(dev, "can't request USB MUX GPIO\n");
/* register extcon device */
data->edev = devm_extcon_dev_allocate(dev, int3496_cable);
@@ -164,12 +195,19 @@ static const struct acpi_device_id int3496_acpi_match[] = {
};
MODULE_DEVICE_TABLE(acpi, int3496_acpi_match);
+static const struct platform_device_id int3496_ids[] = {
+ { .name = "intel-int3496" },
+ {},
+};
+MODULE_DEVICE_TABLE(platform, int3496_ids);
+
static struct platform_driver int3496_driver = {
.driver = {
.name = "intel-int3496",
.acpi_match_table = int3496_acpi_match,
},
.probe = int3496_probe,
+ .id_table = int3496_ids,
};
module_platform_driver(int3496_driver);
diff --git a/drivers/extcon/extcon-ptn5150.c b/drivers/extcon/extcon-ptn5150.c
index 5b9a3cf8df26..017a07197f38 100644
--- a/drivers/extcon/extcon-ptn5150.c
+++ b/drivers/extcon/extcon-ptn5150.c
@@ -17,6 +17,7 @@
#include <linux/slab.h>
#include <linux/extcon-provider.h>
#include <linux/gpio/consumer.h>
+#include <linux/usb/role.h>
/* PTN5150 registers */
#define PTN5150_REG_DEVICE_ID 0x01
@@ -52,6 +53,7 @@ struct ptn5150_info {
int irq;
struct work_struct irq_work;
struct mutex mutex;
+ struct usb_role_switch *role_sw;
};
/* List of detectable cables */
@@ -70,6 +72,7 @@ static const struct regmap_config ptn5150_regmap_config = {
static void ptn5150_check_state(struct ptn5150_info *info)
{
unsigned int port_status, reg_data, vbus;
+ enum usb_role usb_role = USB_ROLE_NONE;
int ret;
ret = regmap_read(info->regmap, PTN5150_REG_CC_STATUS, &reg_data);
@@ -85,6 +88,7 @@ static void ptn5150_check_state(struct ptn5150_info *info)
extcon_set_state_sync(info->edev, EXTCON_USB_HOST, false);
gpiod_set_value_cansleep(info->vbus_gpiod, 0);
extcon_set_state_sync(info->edev, EXTCON_USB, true);
+ usb_role = USB_ROLE_DEVICE;
break;
case PTN5150_UFP_ATTACHED:
extcon_set_state_sync(info->edev, EXTCON_USB, false);
@@ -95,10 +99,18 @@ static void ptn5150_check_state(struct ptn5150_info *info)
gpiod_set_value_cansleep(info->vbus_gpiod, 1);
extcon_set_state_sync(info->edev, EXTCON_USB_HOST, true);
+ usb_role = USB_ROLE_HOST;
break;
default:
break;
}
+
+ if (usb_role) {
+ ret = usb_role_switch_set_role(info->role_sw, usb_role);
+ if (ret)
+ dev_err(info->dev, "failed to set %s role: %d\n",
+ usb_role_string(usb_role), ret);
+ }
}
static void ptn5150_irq_work(struct work_struct *work)
@@ -133,6 +145,13 @@ static void ptn5150_irq_work(struct work_struct *work)
extcon_set_state_sync(info->edev,
EXTCON_USB, false);
gpiod_set_value_cansleep(info->vbus_gpiod, 0);
+
+ ret = usb_role_switch_set_role(info->role_sw,
+ USB_ROLE_NONE);
+ if (ret)
+ dev_err(info->dev,
+ "failed to set none role: %d\n",
+ ret);
}
}
@@ -194,6 +213,14 @@ static int ptn5150_init_dev_type(struct ptn5150_info *info)
return 0;
}
+static void ptn5150_work_sync_and_put(void *data)
+{
+ struct ptn5150_info *info = data;
+
+ cancel_work_sync(&info->irq_work);
+ usb_role_switch_put(info->role_sw);
+}
+
static int ptn5150_i2c_probe(struct i2c_client *i2c)
{
struct device *dev = &i2c->dev;
@@ -284,6 +311,15 @@ static int ptn5150_i2c_probe(struct i2c_client *i2c)
if (ret)
return -EINVAL;
+ info->role_sw = usb_role_switch_get(info->dev);
+ if (IS_ERR(info->role_sw))
+ return dev_err_probe(info->dev, PTR_ERR(info->role_sw),
+ "failed to get role switch\n");
+
+ ret = devm_add_action_or_reset(dev, ptn5150_work_sync_and_put, info);
+ if (ret)
+ return ret;
+
/*
* Update current extcon state if for example OTG connection was there
* before the probe
diff --git a/drivers/extcon/extcon-sm5502.c b/drivers/extcon/extcon-sm5502.c
index 93da2d8379b1..f706f5288257 100644
--- a/drivers/extcon/extcon-sm5502.c
+++ b/drivers/extcon/extcon-sm5502.c
@@ -798,6 +798,7 @@ static const struct sm5502_type sm5504_data = {
static const struct of_device_id sm5502_dt_match[] = {
{ .compatible = "siliconmitus,sm5502-muic", .data = &sm5502_data },
{ .compatible = "siliconmitus,sm5504-muic", .data = &sm5504_data },
+ { .compatible = "siliconmitus,sm5703-muic", .data = &sm5502_data },
{ },
};
MODULE_DEVICE_TABLE(of, sm5502_dt_match);
@@ -830,6 +831,7 @@ static SIMPLE_DEV_PM_OPS(sm5502_muic_pm_ops,
static const struct i2c_device_id sm5502_i2c_id[] = {
{ "sm5502", (kernel_ulong_t)&sm5502_data },
{ "sm5504", (kernel_ulong_t)&sm5504_data },
+ { "sm5703-muic", (kernel_ulong_t)&sm5502_data },
{ }
};
MODULE_DEVICE_TABLE(i2c, sm5502_i2c_id);
diff --git a/drivers/extcon/extcon-usb-gpio.c b/drivers/extcon/extcon-usb-gpio.c
index f2b65d967384..40d967a11e87 100644
--- a/drivers/extcon/extcon-usb-gpio.c
+++ b/drivers/extcon/extcon-usb-gpio.c
@@ -226,16 +226,6 @@ static int usb_extcon_suspend(struct device *dev)
}
}
- /*
- * We don't want to process any IRQs after this point
- * as GPIOs used behind I2C subsystem might not be
- * accessible until resume completes. So disable IRQ.
- */
- if (info->id_gpiod)
- disable_irq(info->id_irq);
- if (info->vbus_gpiod)
- disable_irq(info->vbus_irq);
-
if (!device_may_wakeup(dev))
pinctrl_pm_select_sleep_state(dev);
@@ -267,11 +257,6 @@ static int usb_extcon_resume(struct device *dev)
}
}
- if (info->id_gpiod)
- enable_irq(info->id_irq);
- if (info->vbus_gpiod)
- enable_irq(info->vbus_irq);
-
queue_delayed_work(system_power_efficient_wq,
&info->wq_detcable, 0);
diff --git a/drivers/extcon/extcon-usbc-cros-ec.c b/drivers/extcon/extcon-usbc-cros-ec.c
index 5290cc2d19d9..fde1db62be0d 100644
--- a/drivers/extcon/extcon-usbc-cros-ec.c
+++ b/drivers/extcon/extcon-usbc-cros-ec.c
@@ -68,7 +68,7 @@ static int cros_ec_pd_command(struct cros_ec_extcon_info *info,
struct cros_ec_command *msg;
int ret;
- msg = kzalloc(sizeof(*msg) + max(outsize, insize), GFP_KERNEL);
+ msg = kzalloc(struct_size(msg, data, max(outsize, insize)), GFP_KERNEL);
if (!msg)
return -ENOMEM;
diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c
index a09e704fd0fa..d3a32b806499 100644
--- a/drivers/extcon/extcon.c
+++ b/drivers/extcon/extcon.c
@@ -399,6 +399,7 @@ static ssize_t cable_state_show(struct device *dev,
/**
* extcon_sync() - Synchronize the state for an external connector.
* @edev: the extcon device
+ * @id: the unique id indicating an external connector
*
* Note that this function send a notification in order to synchronize
* the state and property of an external connector.
@@ -736,6 +737,9 @@ EXPORT_SYMBOL_GPL(extcon_set_property);
/**
* extcon_set_property_sync() - Set property of an external connector with sync.
+ * @edev: the extcon device
+ * @id: the unique id indicating an external connector
+ * @prop: the property id indicating an extcon property
* @prop_val: the pointer including the new value of extcon property
*
* Note that when setting the property value of external connector,
@@ -851,6 +855,8 @@ EXPORT_SYMBOL_GPL(extcon_set_property_capability);
* @extcon_name: the extcon name provided with extcon_dev_register()
*
* Return the pointer of extcon device if success or ERR_PTR(err) if fail.
+ * NOTE: This function returns -EPROBE_DEFER so it may only be called from
+ * probe() functions.
*/
struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name)
{
@@ -864,7 +870,7 @@ struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name)
if (!strcmp(sd->name, extcon_name))
goto out;
}
- sd = NULL;
+ sd = ERR_PTR(-EPROBE_DEFER);
out:
mutex_unlock(&extcon_dev_list_lock);
return sd;
@@ -1218,19 +1224,14 @@ int extcon_dev_register(struct extcon_dev *edev)
edev->dev.type = &edev->extcon_dev_type;
}
- ret = device_register(&edev->dev);
- if (ret) {
- put_device(&edev->dev);
- goto err_dev;
- }
-
spin_lock_init(&edev->lock);
- edev->nh = devm_kcalloc(&edev->dev, edev->max_supported,
- sizeof(*edev->nh), GFP_KERNEL);
- if (!edev->nh) {
- ret = -ENOMEM;
- device_unregister(&edev->dev);
- goto err_dev;
+ if (edev->max_supported) {
+ edev->nh = kcalloc(edev->max_supported, sizeof(*edev->nh),
+ GFP_KERNEL);
+ if (!edev->nh) {
+ ret = -ENOMEM;
+ goto err_alloc_nh;
+ }
}
for (index = 0; index < edev->max_supported; index++)
@@ -1241,6 +1242,12 @@ int extcon_dev_register(struct extcon_dev *edev)
dev_set_drvdata(&edev->dev, edev);
edev->state = 0;
+ ret = device_register(&edev->dev);
+ if (ret) {
+ put_device(&edev->dev);
+ goto err_dev;
+ }
+
mutex_lock(&extcon_dev_list_lock);
list_add(&edev->entry, &extcon_dev_list);
mutex_unlock(&extcon_dev_list_lock);
@@ -1249,6 +1256,9 @@ int extcon_dev_register(struct extcon_dev *edev)
err_dev:
if (edev->max_supported)
+ kfree(edev->nh);
+err_alloc_nh:
+ if (edev->max_supported)
kfree(edev->extcon_dev_type.groups);
err_alloc_groups:
if (edev->max_supported && edev->mutually_exclusive) {
@@ -1308,6 +1318,7 @@ void extcon_dev_unregister(struct extcon_dev *edev)
if (edev->max_supported) {
kfree(edev->extcon_dev_type.groups);
kfree(edev->cables);
+ kfree(edev->nh);
}
put_device(&edev->dev);
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 1be0e8295222..28fcddcd688f 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -32,8 +32,7 @@ obj-y += broadcom/
obj-y += cirrus/
obj-y += meson/
obj-$(CONFIG_GOOGLE_FIRMWARE) += google/
-obj-$(CONFIG_EFI) += efi/
-obj-$(CONFIG_UEFI_CPER) += efi/
+obj-y += efi/
obj-y += imx/
obj-y += psci/
obj-y += smccc/
diff --git a/drivers/firmware/dmi-sysfs.c b/drivers/firmware/dmi-sysfs.c
index 3a353776bd34..66727ad3361b 100644
--- a/drivers/firmware/dmi-sysfs.c
+++ b/drivers/firmware/dmi-sysfs.c
@@ -604,7 +604,7 @@ static void __init dmi_sysfs_register_handle(const struct dmi_header *dh,
"%d-%d", dh->type, entry->instance);
if (*ret) {
- kfree(entry);
+ kobject_put(&entry->kobj);
return;
}
diff --git a/drivers/firmware/edd.c b/drivers/firmware/edd.c
index 69353dd0ea22..5cc238916551 100644
--- a/drivers/firmware/edd.c
+++ b/drivers/firmware/edd.c
@@ -685,8 +685,7 @@ static void edd_populate_dir(struct edd_device * edev)
int i;
for (i = 0; (attr = edd_attrs[i]) && !error; i++) {
- if (!attr->test ||
- (attr->test && attr->test(edev)))
+ if (!attr->test || attr->test(edev))
error = sysfs_create_file(&edev->kobj,&attr->attr);
}
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index 4720ba98cec3..7aa4717cdcac 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -193,6 +193,9 @@ config EFI_TEST
Say Y here to enable the runtime services support via /dev/efi_test.
If unsure, say N.
+config EFI_DEV_PATH_PARSER
+ bool
+
config APPLE_PROPERTIES
bool "Apple Device Properties"
depends on EFI_STUB && X86
@@ -255,40 +258,15 @@ config EFI_DISABLE_PCI_DMA
options "efi=disable_early_pci_dma" or "efi=no_disable_early_pci_dma"
may be used to override this option.
-endmenu
-
-config EFI_EMBEDDED_FIRMWARE
- bool
- depends on EFI
- select CRYPTO_LIB_SHA256
-
-config UEFI_CPER
- bool
-
-config UEFI_CPER_ARM
- bool
- depends on UEFI_CPER && ( ARM || ARM64 )
- default y
-
-config UEFI_CPER_X86
- bool
- depends on UEFI_CPER && X86
- default y
-
-config EFI_DEV_PATH_PARSER
- bool
- depends on ACPI
- default n
-
config EFI_EARLYCON
def_bool y
- depends on EFI && SERIAL_EARLYCON && !ARM && !IA64
+ depends on SERIAL_EARLYCON && !ARM && !IA64
select FONT_SUPPORT
select ARCH_USE_MEMREMAP_PROT
config EFI_CUSTOM_SSDT_OVERLAYS
bool "Load custom ACPI SSDT overlay from an EFI variable"
- depends on EFI && ACPI
+ depends on ACPI
default ACPI_TABLE_UPGRADE
help
Allow loading of an ACPI SSDT overlay from an EFI variable specified
@@ -314,7 +292,6 @@ config EFI_DISABLE_RUNTIME
config EFI_COCO_SECRET
bool "EFI Confidential Computing Secret Area Support"
- depends on EFI
help
Confidential Computing platforms (such as AMD SEV) allow the
Guest Owner to securely inject secrets during guest VM launch.
@@ -327,3 +304,22 @@ config EFI_COCO_SECRET
for usage inside the kernel. This will allow the
virt/coco/efi_secret module to access the secrets, which in turn
allows userspace programs to access the injected secrets.
+
+config EFI_EMBEDDED_FIRMWARE
+ bool
+ select CRYPTO_LIB_SHA256
+
+endmenu
+
+config UEFI_CPER
+ bool
+
+config UEFI_CPER_ARM
+ bool
+ depends on UEFI_CPER && ( ARM || ARM64 )
+ default y
+
+config UEFI_CPER_X86
+ bool
+ depends on UEFI_CPER && X86
+ default y
diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c
index b14e88ccefca..05ae8bcc9d67 100644
--- a/drivers/firmware/efi/libstub/x86-stub.c
+++ b/drivers/firmware/efi/libstub/x86-stub.c
@@ -260,10 +260,10 @@ adjust_memory_range_protection(unsigned long start, unsigned long size)
EFI_MEMORY_WB);
if (status != EFI_SUCCESS) {
- efi_warn("Unable to unprotect memory range [%08lx,%08lx]: %d\n",
+ efi_warn("Unable to unprotect memory range [%08lx,%08lx]: %lx\n",
unprotect_start,
unprotect_start + unprotect_size,
- (int)status);
+ status);
}
}
}
diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c
index 8177a0fae11d..14663f671323 100644
--- a/drivers/firmware/stratix10-svc.c
+++ b/drivers/firmware/stratix10-svc.c
@@ -948,17 +948,17 @@ EXPORT_SYMBOL_GPL(stratix10_svc_allocate_memory);
void stratix10_svc_free_memory(struct stratix10_svc_chan *chan, void *kaddr)
{
struct stratix10_svc_data_mem *pmem;
- size_t size = 0;
list_for_each_entry(pmem, &svc_data_mem, node)
if (pmem->vaddr == kaddr) {
- size = pmem->size;
- break;
+ gen_pool_free(chan->ctrl->genpool,
+ (unsigned long)kaddr, pmem->size);
+ pmem->vaddr = NULL;
+ list_del(&pmem->node);
+ return;
}
- gen_pool_free(chan->ctrl->genpool, (unsigned long)kaddr, size);
- pmem->vaddr = NULL;
- list_del(&pmem->node);
+ list_del(&svc_data_mem);
}
EXPORT_SYMBOL_GPL(stratix10_svc_free_memory);
diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
index f21ece56695e..7977a494a651 100644
--- a/drivers/firmware/xilinx/zynqmp.c
+++ b/drivers/firmware/xilinx/zynqmp.c
@@ -36,8 +36,16 @@
/* BOOT_PIN_CTRL_MASK- out_val[11:8], out_en[3:0] */
#define CRL_APB_BOOTPIN_CTRL_MASK 0xF0FU
+/* IOCTL/QUERY feature payload size */
+#define FEATURE_PAYLOAD_SIZE 2
+
+/* Firmware feature check version mask */
+#define FIRMWARE_VERSION_MASK GENMASK(15, 0)
+
static bool feature_check_enabled;
static DEFINE_HASHTABLE(pm_api_features_map, PM_API_FEATURE_CHECK_MAX_ORDER);
+static u32 ioctl_features[FEATURE_PAYLOAD_SIZE];
+static u32 query_features[FEATURE_PAYLOAD_SIZE];
static struct platform_device *em_dev;
@@ -167,21 +175,28 @@ static noinline int do_fw_call_hvc(u64 arg0, u64 arg1, u64 arg2,
return zynqmp_pm_ret_code((enum pm_ret_status)res.a0);
}
-/**
- * zynqmp_pm_feature() - Check weather given feature is supported or not
- * @api_id: API ID to check
- *
- * Return: Returns status, either success or error+reason
- */
-int zynqmp_pm_feature(const u32 api_id)
+static int __do_feature_check_call(const u32 api_id, u32 *ret_payload)
{
int ret;
- u32 ret_payload[PAYLOAD_ARG_CNT];
u64 smc_arg[2];
- struct pm_api_feature_data *feature_data;
- if (!feature_check_enabled)
- return 0;
+ smc_arg[0] = PM_SIP_SVC | PM_FEATURE_CHECK;
+ smc_arg[1] = api_id;
+
+ ret = do_fw_call(smc_arg[0], smc_arg[1], 0, ret_payload);
+ if (ret)
+ ret = -EOPNOTSUPP;
+ else
+ ret = ret_payload[1];
+
+ return ret;
+}
+
+static int do_feature_check_call(const u32 api_id)
+{
+ int ret;
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ struct pm_api_feature_data *feature_data;
/* Check for existing entry in hash table for given api */
hash_for_each_possible(pm_api_features_map, feature_data, hentry,
@@ -196,23 +211,86 @@ int zynqmp_pm_feature(const u32 api_id)
return -ENOMEM;
feature_data->pm_api_id = api_id;
- smc_arg[0] = PM_SIP_SVC | PM_FEATURE_CHECK;
- smc_arg[1] = api_id;
-
- ret = do_fw_call(smc_arg[0], smc_arg[1], 0, ret_payload);
- if (ret)
- ret = -EOPNOTSUPP;
- else
- ret = ret_payload[1];
+ ret = __do_feature_check_call(api_id, ret_payload);
feature_data->feature_status = ret;
hash_add(pm_api_features_map, &feature_data->hentry, api_id);
+ if (api_id == PM_IOCTL)
+ /* Store supported IOCTL IDs mask */
+ memcpy(ioctl_features, &ret_payload[2], FEATURE_PAYLOAD_SIZE * 4);
+ else if (api_id == PM_QUERY_DATA)
+ /* Store supported QUERY IDs mask */
+ memcpy(query_features, &ret_payload[2], FEATURE_PAYLOAD_SIZE * 4);
+
return ret;
}
EXPORT_SYMBOL_GPL(zynqmp_pm_feature);
/**
+ * zynqmp_pm_feature() - Check whether given feature is supported or not and
+ * store supported IOCTL/QUERY ID mask
+ * @api_id: API ID to check
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_feature(const u32 api_id)
+{
+ int ret;
+
+ if (!feature_check_enabled)
+ return 0;
+
+ ret = do_feature_check_call(api_id);
+
+ return ret;
+}
+
+/**
+ * zynqmp_pm_is_function_supported() - Check whether given IOCTL/QUERY function
+ * is supported or not
+ * @api_id: PM_IOCTL or PM_QUERY_DATA
+ * @id: IOCTL or QUERY function IDs
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_is_function_supported(const u32 api_id, const u32 id)
+{
+ int ret;
+ u32 *bit_mask;
+
+ /* Input arguments validation */
+ if (id >= 64 || (api_id != PM_IOCTL && api_id != PM_QUERY_DATA))
+ return -EINVAL;
+
+ /* Check feature check API version */
+ ret = do_feature_check_call(PM_FEATURE_CHECK);
+ if (ret < 0)
+ return ret;
+
+ /* Check if feature check version 2 is supported or not */
+ if ((ret & FIRMWARE_VERSION_MASK) == PM_API_VERSION_2) {
+ /*
+ * Call feature check for IOCTL/QUERY API to get IOCTL ID or
+ * QUERY ID feature status.
+ */
+ ret = do_feature_check_call(api_id);
+ if (ret < 0)
+ return ret;
+
+ bit_mask = (api_id == PM_IOCTL) ? ioctl_features : query_features;
+
+ if ((bit_mask[(id / 32)] & BIT((id % 32))) == 0U)
+ return -EOPNOTSUPP;
+ } else {
+ return -ENODATA;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_is_function_supported);
+
+/**
* zynqmp_pm_invoke_fn() - Invoke the system-level platform management layer
* caller function depending on the configuration
* @pm_api_id: Requested PM-API call
@@ -1584,6 +1662,10 @@ static int zynqmp_firmware_probe(struct platform_device *pdev)
struct zynqmp_devinfo *devinfo;
int ret;
+ ret = get_set_conduit_method(dev->of_node);
+ if (ret)
+ return ret;
+
np = of_find_compatible_node(NULL, NULL, "xlnx,zynqmp");
if (!np) {
np = of_find_compatible_node(NULL, NULL, "xlnx,versal");
@@ -1592,11 +1674,14 @@ static int zynqmp_firmware_probe(struct platform_device *pdev)
feature_check_enabled = true;
}
- of_node_put(np);
- ret = get_set_conduit_method(dev->of_node);
- if (ret)
- return ret;
+ if (!feature_check_enabled) {
+ ret = do_feature_check_call(PM_FEATURE_CHECK);
+ if (ret >= 0)
+ feature_check_enabled = true;
+ }
+
+ of_node_put(np);
devinfo = devm_kzalloc(dev, sizeof(*devinfo), GFP_KERNEL);
if (!devinfo)
diff --git a/drivers/fpga/Makefile b/drivers/fpga/Makefile
index 0bff783d1b61..5935b3d0abd5 100644
--- a/drivers/fpga/Makefile
+++ b/drivers/fpga/Makefile
@@ -18,9 +18,9 @@ obj-$(CONFIG_FPGA_MGR_TS73XX) += ts73xx-fpga.o
obj-$(CONFIG_FPGA_MGR_XILINX_SPI) += xilinx-spi.o
obj-$(CONFIG_FPGA_MGR_ZYNQ_FPGA) += zynq-fpga.o
obj-$(CONFIG_FPGA_MGR_ZYNQMP_FPGA) += zynqmp-fpga.o
-obj-$(CONFIG_FPGA_MGR_VERSAL_FPGA) += versal-fpga.o
-obj-$(CONFIG_ALTERA_PR_IP_CORE) += altera-pr-ip-core.o
-obj-$(CONFIG_ALTERA_PR_IP_CORE_PLAT) += altera-pr-ip-core-plat.o
+obj-$(CONFIG_FPGA_MGR_VERSAL_FPGA) += versal-fpga.o
+obj-$(CONFIG_ALTERA_PR_IP_CORE) += altera-pr-ip-core.o
+obj-$(CONFIG_ALTERA_PR_IP_CORE_PLAT) += altera-pr-ip-core-plat.o
# FPGA Bridge Drivers
obj-$(CONFIG_FPGA_BRIDGE) += fpga-bridge.o
diff --git a/drivers/fpga/dfl-pci.c b/drivers/fpga/dfl-pci.c
index 717ac9715970..fd1fa55c9113 100644
--- a/drivers/fpga/dfl-pci.c
+++ b/drivers/fpga/dfl-pci.c
@@ -259,6 +259,15 @@ static int find_dfls_by_default(struct pci_dev *pcidev,
*/
bar = FIELD_GET(FME_PORT_OFST_BAR_ID, v);
offset = FIELD_GET(FME_PORT_OFST_DFH_OFST, v);
+ if (bar == FME_PORT_OFST_BAR_SKIP) {
+ continue;
+ } else if (bar >= PCI_STD_NUM_BARS) {
+ dev_err(&pcidev->dev, "bad BAR %d for port %d\n",
+ bar, i);
+ ret = -EINVAL;
+ break;
+ }
+
start = pci_resource_start(pcidev, bar) + offset;
len = pci_resource_len(pcidev, bar) - offset;
diff --git a/drivers/fpga/dfl.c b/drivers/fpga/dfl.c
index 599bb21d86af..6bff39ff21a0 100644
--- a/drivers/fpga/dfl.c
+++ b/drivers/fpga/dfl.c
@@ -940,9 +940,12 @@ static int parse_feature_irqs(struct build_feature_devs_info *binfo,
{
void __iomem *base = binfo->ioaddr + ofst;
unsigned int i, ibase, inr = 0;
+ enum dfl_id_type type;
int virq;
u64 v;
+ type = feature_dev_id_type(binfo->feature_dev);
+
/*
* Ideally DFL framework should only read info from DFL header, but
* current version DFL only provides mmio resources information for
@@ -957,22 +960,25 @@ static int parse_feature_irqs(struct build_feature_devs_info *binfo,
* code will be added. But in order to be compatible to old version
* DFL, the driver may still fall back to these quirks.
*/
- switch (fid) {
- case PORT_FEATURE_ID_UINT:
- v = readq(base + PORT_UINT_CAP);
- ibase = FIELD_GET(PORT_UINT_CAP_FST_VECT, v);
- inr = FIELD_GET(PORT_UINT_CAP_INT_NUM, v);
- break;
- case PORT_FEATURE_ID_ERROR:
- v = readq(base + PORT_ERROR_CAP);
- ibase = FIELD_GET(PORT_ERROR_CAP_INT_VECT, v);
- inr = FIELD_GET(PORT_ERROR_CAP_SUPP_INT, v);
- break;
- case FME_FEATURE_ID_GLOBAL_ERR:
- v = readq(base + FME_ERROR_CAP);
- ibase = FIELD_GET(FME_ERROR_CAP_INT_VECT, v);
- inr = FIELD_GET(FME_ERROR_CAP_SUPP_INT, v);
- break;
+ if (type == PORT_ID) {
+ switch (fid) {
+ case PORT_FEATURE_ID_UINT:
+ v = readq(base + PORT_UINT_CAP);
+ ibase = FIELD_GET(PORT_UINT_CAP_FST_VECT, v);
+ inr = FIELD_GET(PORT_UINT_CAP_INT_NUM, v);
+ break;
+ case PORT_FEATURE_ID_ERROR:
+ v = readq(base + PORT_ERROR_CAP);
+ ibase = FIELD_GET(PORT_ERROR_CAP_INT_VECT, v);
+ inr = FIELD_GET(PORT_ERROR_CAP_SUPP_INT, v);
+ break;
+ }
+ } else if (type == FME_ID) {
+ if (fid == FME_FEATURE_ID_GLOBAL_ERR) {
+ v = readq(base + FME_ERROR_CAP);
+ ibase = FIELD_GET(FME_ERROR_CAP_INT_VECT, v);
+ inr = FIELD_GET(FME_ERROR_CAP_SUPP_INT, v);
+ }
}
if (!inr) {
diff --git a/drivers/fpga/dfl.h b/drivers/fpga/dfl.h
index 53572c7aced0..06cfcd5e84bb 100644
--- a/drivers/fpga/dfl.h
+++ b/drivers/fpga/dfl.h
@@ -89,6 +89,7 @@
#define FME_HDR_NEXT_AFU NEXT_AFU
#define FME_HDR_CAP 0x30
#define FME_HDR_PORT_OFST(n) (0x38 + ((n) * 0x8))
+#define FME_PORT_OFST_BAR_SKIP 7
#define FME_HDR_BITSTREAM_ID 0x60
#define FME_HDR_BITSTREAM_MD 0x68
diff --git a/drivers/fpga/fpga-mgr.c b/drivers/fpga/fpga-mgr.c
index d49a9ce34568..a3595ecc3f79 100644
--- a/drivers/fpga/fpga-mgr.c
+++ b/drivers/fpga/fpga-mgr.c
@@ -148,11 +148,12 @@ static int fpga_mgr_write_init_buf(struct fpga_manager *mgr,
int ret;
mgr->state = FPGA_MGR_STATE_WRITE_INIT;
- if (!mgr->mops->initial_header_size)
+ if (!mgr->mops->initial_header_size) {
ret = fpga_mgr_write_init(mgr, info, NULL, 0);
- else
- ret = fpga_mgr_write_init(
- mgr, info, buf, min(mgr->mops->initial_header_size, count));
+ } else {
+ count = min(mgr->mops->initial_header_size, count);
+ ret = fpga_mgr_write_init(mgr, info, buf, count);
+ }
if (ret) {
dev_err(&mgr->dev, "Error preparing FPGA for writing\n");
@@ -730,6 +731,8 @@ static void devm_fpga_mgr_unregister(struct device *dev, void *res)
* @parent: fpga manager device from pdev
* @info: parameters for fpga manager
*
+ * Return: fpga manager pointer on success, negative error code otherwise.
+ *
* This is the devres variant of fpga_mgr_register_full() for which the unregister
* function will be called automatically when the managing device is detached.
*/
@@ -763,6 +766,8 @@ EXPORT_SYMBOL_GPL(devm_fpga_mgr_register_full);
* @mops: pointer to structure of fpga manager ops
* @priv: fpga manager private data
*
+ * Return: fpga manager pointer on success, negative error code otherwise.
+ *
* This is the devres variant of fpga_mgr_register() for which the
* unregister function will be called automatically when the managing
* device is detached.
diff --git a/drivers/fpga/fpga-region.c b/drivers/fpga/fpga-region.c
index b0ac18de4885..485948e3c0db 100644
--- a/drivers/fpga/fpga-region.c
+++ b/drivers/fpga/fpga-region.c
@@ -18,9 +18,9 @@
static DEFINE_IDA(fpga_region_ida);
static struct class *fpga_region_class;
-struct fpga_region *fpga_region_class_find(
- struct device *start, const void *data,
- int (*match)(struct device *, const void *))
+struct fpga_region *
+fpga_region_class_find(struct device *start, const void *data,
+ int (*match)(struct device *, const void *))
{
struct device *dev;
diff --git a/drivers/fpga/of-fpga-region.c b/drivers/fpga/of-fpga-region.c
index 50b83057c048..ae82532fc127 100644
--- a/drivers/fpga/of-fpga-region.c
+++ b/drivers/fpga/of-fpga-region.c
@@ -28,7 +28,7 @@ MODULE_DEVICE_TABLE(of, fpga_region_of_match);
*
* Caller will need to put_device(&region->dev) when done.
*
- * Returns FPGA Region struct or NULL
+ * Return: FPGA Region struct or NULL
*/
static struct fpga_region *of_fpga_region_find(struct device_node *np)
{
@@ -80,7 +80,7 @@ static struct fpga_manager *of_fpga_region_get_mgr(struct device_node *np)
* Caller should call fpga_bridges_put(&region->bridge_list) when
* done with the bridges.
*
- * Return 0 for success (even if there are no bridges specified)
+ * Return: 0 for success (even if there are no bridges specified)
* or -EBUSY if any of the bridges are in use.
*/
static int of_fpga_region_get_bridges(struct fpga_region *region)
@@ -139,13 +139,13 @@ static int of_fpga_region_get_bridges(struct fpga_region *region)
}
/**
- * child_regions_with_firmware
+ * child_regions_with_firmware - Used to check the child region info.
* @overlay: device node of the overlay
*
* If the overlay adds child FPGA regions, they are not allowed to have
* firmware-name property.
*
- * Return 0 for OK or -EINVAL if child FPGA region adds firmware-name.
+ * Return: 0 for OK or -EINVAL if child FPGA region adds firmware-name.
*/
static int child_regions_with_firmware(struct device_node *overlay)
{
@@ -184,14 +184,14 @@ static int child_regions_with_firmware(struct device_node *overlay)
* Given an overlay applied to an FPGA region, parse the FPGA image specific
* info in the overlay and do some checking.
*
- * Returns:
+ * Return:
* NULL if overlay doesn't direct us to program the FPGA.
* fpga_image_info struct if there is an image to program.
* error code for invalid overlay.
*/
-static struct fpga_image_info *of_fpga_region_parse_ov(
- struct fpga_region *region,
- struct device_node *overlay)
+static struct fpga_image_info *
+of_fpga_region_parse_ov(struct fpga_region *region,
+ struct device_node *overlay)
{
struct device *dev = &region->dev;
struct fpga_image_info *info;
@@ -279,7 +279,7 @@ ret_no_info:
* If the checks fail, overlay is rejected and does not get added to the
* live tree.
*
- * Returns 0 for success or negative error code for failure.
+ * Return: 0 for success or negative error code for failure.
*/
static int of_fpga_region_notify_pre_apply(struct fpga_region *region,
struct of_overlay_notify_data *nd)
@@ -339,7 +339,7 @@ static void of_fpga_region_notify_post_remove(struct fpga_region *region,
* This notifier handles programming an FPGA when a "firmware-name" property is
* added to an fpga-region.
*
- * Returns NOTIFY_OK or error if FPGA programming fails.
+ * Return: NOTIFY_OK or error if FPGA programming fails.
*/
static int of_fpga_region_notify(struct notifier_block *nb,
unsigned long action, void *arg)
@@ -446,6 +446,8 @@ static struct platform_driver of_fpga_region_driver = {
/**
* of_fpga_region_init - init function for fpga_region class
* Creates the fpga_region class and registers a reconfig notifier.
+ *
+ * Return: 0 on success, negative error code otherwise.
*/
static int __init of_fpga_region_init(void)
{
diff --git a/drivers/gpio/gpio-adp5588.c b/drivers/gpio/gpio-adp5588.c
index f1e4ac90e7d3..e388e75103f4 100644
--- a/drivers/gpio/gpio-adp5588.c
+++ b/drivers/gpio/gpio-adp5588.c
@@ -406,12 +406,6 @@ static int adp5588_gpio_probe(struct i2c_client *client)
if (ret)
return ret;
- if (pdata && pdata->setup) {
- ret = pdata->setup(client, gc->base, gc->ngpio, pdata->context);
- if (ret < 0)
- dev_warn(&client->dev, "setup failed, %d\n", ret);
- }
-
i2c_set_clientdata(client, dev);
return 0;
@@ -419,20 +413,7 @@ static int adp5588_gpio_probe(struct i2c_client *client)
static int adp5588_gpio_remove(struct i2c_client *client)
{
- struct adp5588_gpio_platform_data *pdata =
- dev_get_platdata(&client->dev);
struct adp5588_gpio *dev = i2c_get_clientdata(client);
- int ret;
-
- if (pdata && pdata->teardown) {
- ret = pdata->teardown(client,
- dev->gpio_chip.base, dev->gpio_chip.ngpio,
- pdata->context);
- if (ret < 0) {
- dev_err(&client->dev, "teardown failed %d\n", ret);
- return ret;
- }
- }
if (dev->client->irq)
free_irq(dev->client->irq, dev);
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index b444c6ab958b..08bc52c3cdcb 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -1120,20 +1120,21 @@ static int pca953x_regcache_sync(struct device *dev)
{
struct pca953x_chip *chip = dev_get_drvdata(dev);
int ret;
+ u8 regaddr;
/*
* The ordering between direction and output is important,
* sync these registers first and only then sync the rest.
*/
- ret = regcache_sync_region(chip->regmap, chip->regs->direction,
- chip->regs->direction + NBANK(chip));
+ regaddr = pca953x_recalc_addr(chip, chip->regs->direction, 0);
+ ret = regcache_sync_region(chip->regmap, regaddr, regaddr + NBANK(chip));
if (ret) {
dev_err(dev, "Failed to sync GPIO dir registers: %d\n", ret);
return ret;
}
- ret = regcache_sync_region(chip->regmap, chip->regs->output,
- chip->regs->output + NBANK(chip));
+ regaddr = pca953x_recalc_addr(chip, chip->regs->output, 0);
+ ret = regcache_sync_region(chip->regmap, regaddr, regaddr + NBANK(chip));
if (ret) {
dev_err(dev, "Failed to sync GPIO out registers: %d\n", ret);
return ret;
@@ -1141,16 +1142,18 @@ static int pca953x_regcache_sync(struct device *dev)
#ifdef CONFIG_GPIO_PCA953X_IRQ
if (chip->driver_data & PCA_PCAL) {
- ret = regcache_sync_region(chip->regmap, PCAL953X_IN_LATCH,
- PCAL953X_IN_LATCH + NBANK(chip));
+ regaddr = pca953x_recalc_addr(chip, PCAL953X_IN_LATCH, 0);
+ ret = regcache_sync_region(chip->regmap, regaddr,
+ regaddr + NBANK(chip));
if (ret) {
dev_err(dev, "Failed to sync INT latch registers: %d\n",
ret);
return ret;
}
- ret = regcache_sync_region(chip->regmap, PCAL953X_INT_MASK,
- PCAL953X_INT_MASK + NBANK(chip));
+ regaddr = pca953x_recalc_addr(chip, PCAL953X_INT_MASK, 0);
+ ret = regcache_sync_region(chip->regmap, regaddr,
+ regaddr + NBANK(chip));
if (ret) {
dev_err(dev, "Failed to sync INT mask registers: %d\n",
ret);
diff --git a/drivers/gpio/gpio-rockchip.c b/drivers/gpio/gpio-rockchip.c
index 099e358d2491..e342a6dc4c6c 100644
--- a/drivers/gpio/gpio-rockchip.c
+++ b/drivers/gpio/gpio-rockchip.c
@@ -19,6 +19,7 @@
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
+#include <linux/pinctrl/pinconf-generic.h>
#include <linux/regmap.h>
#include "../pinctrl/core.h"
@@ -706,7 +707,7 @@ static int rockchip_gpio_probe(struct platform_device *pdev)
struct device_node *pctlnp = of_get_parent(np);
struct pinctrl_dev *pctldev = NULL;
struct rockchip_pin_bank *bank = NULL;
- struct rockchip_pin_output_deferred *cfg;
+ struct rockchip_pin_deferred *cfg;
static int gpio;
int id, ret;
@@ -747,15 +748,27 @@ static int rockchip_gpio_probe(struct platform_device *pdev)
return ret;
}
- while (!list_empty(&bank->deferred_output)) {
- cfg = list_first_entry(&bank->deferred_output,
- struct rockchip_pin_output_deferred, head);
+ while (!list_empty(&bank->deferred_pins)) {
+ cfg = list_first_entry(&bank->deferred_pins,
+ struct rockchip_pin_deferred, head);
list_del(&cfg->head);
- ret = rockchip_gpio_direction_output(&bank->gpio_chip, cfg->pin, cfg->arg);
- if (ret)
- dev_warn(dev, "setting output pin %u to %u failed\n", cfg->pin, cfg->arg);
-
+ switch (cfg->param) {
+ case PIN_CONFIG_OUTPUT:
+ ret = rockchip_gpio_direction_output(&bank->gpio_chip, cfg->pin, cfg->arg);
+ if (ret)
+ dev_warn(dev, "setting output pin %u to %u failed\n", cfg->pin,
+ cfg->arg);
+ break;
+ case PIN_CONFIG_INPUT_ENABLE:
+ ret = rockchip_gpio_direction_input(&bank->gpio_chip, cfg->pin);
+ if (ret)
+ dev_warn(dev, "setting input pin %u failed\n", cfg->pin);
+ break;
+ default:
+ dev_warn(dev, "unknown deferred config param %d\n", cfg->param);
+ break;
+ }
kfree(cfg);
}
diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c
index 84c4f1e9fb0c..de28a68daea0 100644
--- a/drivers/gpio/gpio-tegra186.c
+++ b/drivers/gpio/gpio-tegra186.c
@@ -1,8 +1,9 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2016-2017 NVIDIA Corporation
+ * Copyright (c) 2016-2022 NVIDIA Corporation
*
* Author: Thierry Reding <treding@nvidia.com>
+ * Dipen Patel <dpatel@nvidia.com>
*/
#include <linux/gpio/driver.h>
@@ -11,6 +12,7 @@
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/hte.h>
#include <dt-bindings/gpio/tegra186-gpio.h>
#include <dt-bindings/gpio/tegra194-gpio.h>
@@ -36,6 +38,7 @@
#define TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_LEVEL BIT(4)
#define TEGRA186_GPIO_ENABLE_CONFIG_DEBOUNCE BIT(5)
#define TEGRA186_GPIO_ENABLE_CONFIG_INTERRUPT BIT(6)
+#define TEGRA186_GPIO_ENABLE_CONFIG_TIMESTAMP_FUNC BIT(7)
#define TEGRA186_GPIO_DEBOUNCE_CONTROL 0x04
#define TEGRA186_GPIO_DEBOUNCE_CONTROL_THRESHOLD(x) ((x) & 0xff)
@@ -76,6 +79,7 @@ struct tegra_gpio_soc {
const struct tegra186_pin_range *pin_ranges;
unsigned int num_pin_ranges;
const char *pinmux;
+ bool has_gte;
};
struct tegra_gpio {
@@ -193,6 +197,76 @@ static int tegra186_gpio_direction_output(struct gpio_chip *chip,
return 0;
}
+#define HTE_BOTH_EDGES (HTE_RISING_EDGE_TS | HTE_FALLING_EDGE_TS)
+
+static int tegra186_gpio_en_hw_ts(struct gpio_chip *gc, u32 offset,
+ unsigned long flags)
+{
+ struct tegra_gpio *gpio;
+ void __iomem *base;
+ int value;
+
+ if (!gc)
+ return -EINVAL;
+
+ gpio = gpiochip_get_data(gc);
+ if (!gpio)
+ return -ENODEV;
+
+ base = tegra186_gpio_get_base(gpio, offset);
+ if (WARN_ON(base == NULL))
+ return -EINVAL;
+
+ value = readl(base + TEGRA186_GPIO_ENABLE_CONFIG);
+ value |= TEGRA186_GPIO_ENABLE_CONFIG_TIMESTAMP_FUNC;
+
+ if (flags == HTE_BOTH_EDGES) {
+ value |= TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_TYPE_DOUBLE_EDGE;
+ } else if (flags == HTE_RISING_EDGE_TS) {
+ value |= TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_TYPE_SINGLE_EDGE;
+ value |= TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_LEVEL;
+ } else if (flags == HTE_FALLING_EDGE_TS) {
+ value |= TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_TYPE_SINGLE_EDGE;
+ }
+
+ writel(value, base + TEGRA186_GPIO_ENABLE_CONFIG);
+
+ return 0;
+}
+
+static int tegra186_gpio_dis_hw_ts(struct gpio_chip *gc, u32 offset,
+ unsigned long flags)
+{
+ struct tegra_gpio *gpio;
+ void __iomem *base;
+ int value;
+
+ if (!gc)
+ return -EINVAL;
+
+ gpio = gpiochip_get_data(gc);
+ if (!gpio)
+ return -ENODEV;
+
+ base = tegra186_gpio_get_base(gpio, offset);
+ if (WARN_ON(base == NULL))
+ return -EINVAL;
+
+ value = readl(base + TEGRA186_GPIO_ENABLE_CONFIG);
+ value &= ~TEGRA186_GPIO_ENABLE_CONFIG_TIMESTAMP_FUNC;
+ if (flags == HTE_BOTH_EDGES) {
+ value &= ~TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_TYPE_DOUBLE_EDGE;
+ } else if (flags == HTE_RISING_EDGE_TS) {
+ value &= ~TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_TYPE_SINGLE_EDGE;
+ value &= ~TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_LEVEL;
+ } else if (flags == HTE_FALLING_EDGE_TS) {
+ value &= ~TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_TYPE_SINGLE_EDGE;
+ }
+ writel(value, base + TEGRA186_GPIO_ENABLE_CONFIG);
+
+ return 0;
+}
+
static int tegra186_gpio_get(struct gpio_chip *chip, unsigned int offset)
{
struct tegra_gpio *gpio = gpiochip_get_data(chip);
@@ -747,6 +821,10 @@ static int tegra186_gpio_probe(struct platform_device *pdev)
gpio->gpio.set = tegra186_gpio_set;
gpio->gpio.set_config = tegra186_gpio_set_config;
gpio->gpio.add_pin_ranges = tegra186_gpio_add_pin_ranges;
+ if (gpio->soc->has_gte) {
+ gpio->gpio.en_hw_timestamp = tegra186_gpio_en_hw_ts;
+ gpio->gpio.dis_hw_timestamp = tegra186_gpio_dis_hw_ts;
+ }
gpio->gpio.base = -1;
@@ -991,6 +1069,7 @@ static const struct tegra_gpio_soc tegra194_aon_soc = {
.name = "tegra194-gpio-aon",
.instance = 1,
.num_irqs_per_bank = 8,
+ .has_gte = true,
};
#define TEGRA234_MAIN_GPIO_PORT(_name, _bank, _port, _pins) \
diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c
index c2900b1be69d..f5aa5f93342a 100644
--- a/drivers/gpio/gpiolib-cdev.c
+++ b/drivers/gpio/gpiolib-cdev.c
@@ -24,6 +24,7 @@
#include <linux/timekeeping.h>
#include <linux/uaccess.h>
#include <linux/workqueue.h>
+#include <linux/hte.h>
#include <uapi/linux/gpio.h>
#include "gpiolib.h"
@@ -464,6 +465,25 @@ struct line {
* stale value.
*/
unsigned int level;
+ /*
+ * -- hte specific fields --
+ */
+ struct hte_ts_desc hdesc;
+ /*
+ * HTE provider sets line level at the time of event. The valid
+ * value is 0 or 1 and negative value for an error.
+ */
+ int raw_level;
+ /*
+ * when sw_debounce is set on HTE enabled line, this is running
+ * counter of the discarded events.
+ */
+ u32 total_discard_seq;
+ /*
+ * when sw_debounce is set on HTE enabled line, this variable records
+ * last sequence number before debounce period expires.
+ */
+ u32 last_seqno;
};
/**
@@ -518,6 +538,7 @@ struct linereq {
GPIO_V2_LINE_DRIVE_FLAGS | \
GPIO_V2_LINE_EDGE_FLAGS | \
GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME | \
+ GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE | \
GPIO_V2_LINE_BIAS_FLAGS)
static void linereq_put_event(struct linereq *lr,
@@ -542,10 +563,98 @@ static u64 line_event_timestamp(struct line *line)
{
if (test_bit(FLAG_EVENT_CLOCK_REALTIME, &line->desc->flags))
return ktime_get_real_ns();
+ else if (test_bit(FLAG_EVENT_CLOCK_HTE, &line->desc->flags))
+ return line->timestamp_ns;
return ktime_get_ns();
}
+static enum hte_return process_hw_ts_thread(void *p)
+{
+ struct line *line;
+ struct linereq *lr;
+ struct gpio_v2_line_event le;
+ int level;
+ u64 eflags;
+
+ if (!p)
+ return HTE_CB_HANDLED;
+
+ line = p;
+ lr = line->req;
+
+ memset(&le, 0, sizeof(le));
+
+ le.timestamp_ns = line->timestamp_ns;
+ eflags = READ_ONCE(line->eflags);
+
+ if (eflags == GPIO_V2_LINE_FLAG_EDGE_BOTH) {
+ if (line->raw_level >= 0) {
+ if (test_bit(FLAG_ACTIVE_LOW, &line->desc->flags))
+ level = !line->raw_level;
+ else
+ level = line->raw_level;
+ } else {
+ level = gpiod_get_value_cansleep(line->desc);
+ }
+
+ if (level)
+ le.id = GPIO_V2_LINE_EVENT_RISING_EDGE;
+ else
+ le.id = GPIO_V2_LINE_EVENT_FALLING_EDGE;
+ } else if (eflags == GPIO_V2_LINE_FLAG_EDGE_RISING) {
+ /* Emit low-to-high event */
+ le.id = GPIO_V2_LINE_EVENT_RISING_EDGE;
+ } else if (eflags == GPIO_V2_LINE_FLAG_EDGE_FALLING) {
+ /* Emit high-to-low event */
+ le.id = GPIO_V2_LINE_EVENT_FALLING_EDGE;
+ } else {
+ return HTE_CB_HANDLED;
+ }
+ le.line_seqno = line->line_seqno;
+ le.seqno = (lr->num_lines == 1) ? le.line_seqno : line->req_seqno;
+ le.offset = gpio_chip_hwgpio(line->desc);
+
+ linereq_put_event(lr, &le);
+
+ return HTE_CB_HANDLED;
+}
+
+static enum hte_return process_hw_ts(struct hte_ts_data *ts, void *p)
+{
+ struct line *line;
+ struct linereq *lr;
+ int diff_seqno = 0;
+
+ if (!ts || !p)
+ return HTE_CB_HANDLED;
+
+ line = p;
+ line->timestamp_ns = ts->tsc;
+ line->raw_level = ts->raw_level;
+ lr = line->req;
+
+ if (READ_ONCE(line->sw_debounced)) {
+ line->total_discard_seq++;
+ line->last_seqno = ts->seq;
+ mod_delayed_work(system_wq, &line->work,
+ usecs_to_jiffies(READ_ONCE(line->desc->debounce_period_us)));
+ } else {
+ if (unlikely(ts->seq < line->line_seqno))
+ return HTE_CB_HANDLED;
+
+ diff_seqno = ts->seq - line->line_seqno;
+ line->line_seqno = ts->seq;
+ if (lr->num_lines != 1)
+ line->req_seqno = atomic_add_return(diff_seqno,
+ &lr->seqno);
+
+ return HTE_RUN_SECOND_CB;
+ }
+
+ return HTE_CB_HANDLED;
+}
+
static irqreturn_t edge_irq_thread(int irq, void *p)
{
struct line *line = p;
@@ -651,10 +760,16 @@ static void debounce_work_func(struct work_struct *work)
struct gpio_v2_line_event le;
struct line *line = container_of(work, struct line, work.work);
struct linereq *lr;
- int level;
+ int level, diff_seqno;
u64 eflags;
- level = gpiod_get_raw_value_cansleep(line->desc);
+ if (test_bit(FLAG_EVENT_CLOCK_HTE, &line->desc->flags)) {
+ level = line->raw_level;
+ if (level < 0)
+ level = gpiod_get_raw_value_cansleep(line->desc);
+ } else {
+ level = gpiod_get_raw_value_cansleep(line->desc);
+ }
if (level < 0) {
pr_debug_ratelimited("debouncer failed to read line value\n");
return;
@@ -685,10 +800,21 @@ static void debounce_work_func(struct work_struct *work)
lr = line->req;
le.timestamp_ns = line_event_timestamp(line);
le.offset = gpio_chip_hwgpio(line->desc);
- line->line_seqno++;
- le.line_seqno = line->line_seqno;
- le.seqno = (lr->num_lines == 1) ?
- le.line_seqno : atomic_inc_return(&lr->seqno);
+ if (test_bit(FLAG_EVENT_CLOCK_HTE, &line->desc->flags)) {
+ /* discard events except the last one */
+ line->total_discard_seq -= 1;
+ diff_seqno = line->last_seqno - line->total_discard_seq -
+ line->line_seqno;
+ line->line_seqno = line->last_seqno - line->total_discard_seq;
+ le.line_seqno = line->line_seqno;
+ le.seqno = (lr->num_lines == 1) ?
+ le.line_seqno : atomic_add_return(diff_seqno, &lr->seqno);
+ } else {
+ line->line_seqno++;
+ le.line_seqno = line->line_seqno;
+ le.seqno = (lr->num_lines == 1) ?
+ le.line_seqno : atomic_inc_return(&lr->seqno);
+ }
if (level)
/* Emit low-to-high event */
@@ -700,8 +826,34 @@ static void debounce_work_func(struct work_struct *work)
linereq_put_event(lr, &le);
}
+static int hte_edge_setup(struct line *line, u64 eflags)
+{
+ int ret;
+ unsigned long flags = 0;
+ struct hte_ts_desc *hdesc = &line->hdesc;
+
+ if (eflags & GPIO_V2_LINE_FLAG_EDGE_RISING)
+ flags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
+ HTE_FALLING_EDGE_TS : HTE_RISING_EDGE_TS;
+ if (eflags & GPIO_V2_LINE_FLAG_EDGE_FALLING)
+ flags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
+ HTE_RISING_EDGE_TS : HTE_FALLING_EDGE_TS;
+
+ line->total_discard_seq = 0;
+
+ hte_init_line_attr(hdesc, desc_to_gpio(line->desc), flags,
+ NULL, line->desc);
+
+ ret = hte_ts_get(NULL, hdesc, 0);
+ if (ret)
+ return ret;
+
+ return hte_request_ts_ns(hdesc, process_hw_ts,
+ process_hw_ts_thread, line);
+}
+
static int debounce_setup(struct line *line,
- unsigned int debounce_period_us)
+ unsigned int debounce_period_us, bool hte_req)
{
unsigned long irqflags;
int ret, level, irq;
@@ -721,19 +873,27 @@ static int debounce_setup(struct line *line,
if (level < 0)
return level;
- irq = gpiod_to_irq(line->desc);
- if (irq < 0)
- return -ENXIO;
+ if (!hte_req) {
+ irq = gpiod_to_irq(line->desc);
+ if (irq < 0)
+ return -ENXIO;
- WRITE_ONCE(line->level, level);
- irqflags = IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING;
- ret = request_irq(irq, debounce_irq_handler, irqflags,
- line->req->label, line);
- if (ret)
- return ret;
+ irqflags = IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING;
+ ret = request_irq(irq, debounce_irq_handler, irqflags,
+ line->req->label, line);
+ if (ret)
+ return ret;
+ line->irq = irq;
+ } else {
+ ret = hte_edge_setup(line,
+ GPIO_V2_LINE_FLAG_EDGE_RISING |
+ GPIO_V2_LINE_FLAG_EDGE_FALLING);
+ if (ret)
+ return ret;
+ }
+ WRITE_ONCE(line->level, level);
WRITE_ONCE(line->sw_debounced, 1);
- line->irq = irq;
}
return 0;
}
@@ -766,13 +926,16 @@ static u32 gpio_v2_line_config_debounce_period(struct gpio_v2_line_config *lc,
return 0;
}
-static void edge_detector_stop(struct line *line)
+static void edge_detector_stop(struct line *line, bool hte_en)
{
- if (line->irq) {
+ if (line->irq && !hte_en) {
free_irq(line->irq, line);
line->irq = 0;
}
+ if (hte_en)
+ hte_ts_put(&line->hdesc);
+
cancel_delayed_work_sync(&line->work);
WRITE_ONCE(line->sw_debounced, 0);
WRITE_ONCE(line->eflags, 0);
@@ -784,7 +947,7 @@ static void edge_detector_stop(struct line *line)
static int edge_detector_setup(struct line *line,
struct gpio_v2_line_config *lc,
unsigned int line_idx,
- u64 eflags)
+ u64 eflags, bool hte_req)
{
u32 debounce_period_us;
unsigned long irqflags = 0;
@@ -799,7 +962,7 @@ static int edge_detector_setup(struct line *line,
WRITE_ONCE(line->eflags, eflags);
if (gpio_v2_line_config_debounced(lc, line_idx)) {
debounce_period_us = gpio_v2_line_config_debounce_period(lc, line_idx);
- ret = debounce_setup(line, debounce_period_us);
+ ret = debounce_setup(line, debounce_period_us, hte_req);
if (ret)
return ret;
WRITE_ONCE(line->desc->debounce_period_us, debounce_period_us);
@@ -809,6 +972,9 @@ static int edge_detector_setup(struct line *line,
if (!eflags || READ_ONCE(line->sw_debounced))
return 0;
+ if (hte_req)
+ return hte_edge_setup(line, eflags);
+
irq = gpiod_to_irq(line->desc);
if (irq < 0)
return -ENXIO;
@@ -834,13 +1000,18 @@ static int edge_detector_setup(struct line *line,
static int edge_detector_update(struct line *line,
struct gpio_v2_line_config *lc,
unsigned int line_idx,
- u64 eflags, bool polarity_change)
+ u64 flags, bool polarity_change,
+ bool prev_hte_flag)
{
+ u64 eflags = flags & GPIO_V2_LINE_EDGE_FLAGS;
unsigned int debounce_period_us =
- gpio_v2_line_config_debounce_period(lc, line_idx);
+ gpio_v2_line_config_debounce_period(lc, line_idx);
+ bool hte_change = (prev_hte_flag !=
+ ((flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE) != 0));
if ((READ_ONCE(line->eflags) == eflags) && !polarity_change &&
- (READ_ONCE(line->desc->debounce_period_us) == debounce_period_us))
+ (READ_ONCE(line->desc->debounce_period_us) == debounce_period_us)
+ && !hte_change)
return 0;
/* sw debounced and still will be...*/
@@ -851,11 +1022,12 @@ static int edge_detector_update(struct line *line,
}
/* reconfiguring edge detection or sw debounce being disabled */
- if ((line->irq && !READ_ONCE(line->sw_debounced)) ||
+ if ((line->irq && !READ_ONCE(line->sw_debounced)) || prev_hte_flag ||
(!debounce_period_us && READ_ONCE(line->sw_debounced)))
- edge_detector_stop(line);
+ edge_detector_stop(line, prev_hte_flag);
- return edge_detector_setup(line, lc, line_idx, eflags);
+ return edge_detector_setup(line, lc, line_idx, eflags,
+ flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE);
}
static u64 gpio_v2_line_config_flags(struct gpio_v2_line_config *lc,
@@ -891,7 +1063,6 @@ static int gpio_v2_line_flags_validate(u64 flags)
/* Return an error if an unknown flag is set */
if (flags & ~GPIO_V2_LINE_VALID_FLAGS)
return -EINVAL;
-
/*
* Do not allow both INPUT and OUTPUT flags to be set as they are
* contradictory.
@@ -900,6 +1071,11 @@ static int gpio_v2_line_flags_validate(u64 flags)
(flags & GPIO_V2_LINE_FLAG_OUTPUT))
return -EINVAL;
+ /* Only allow one event clock source */
+ if ((flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME) &&
+ (flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE))
+ return -EINVAL;
+
/* Edge detection requires explicit input. */
if ((flags & GPIO_V2_LINE_EDGE_FLAGS) &&
!(flags & GPIO_V2_LINE_FLAG_INPUT))
@@ -992,6 +1168,8 @@ static void gpio_v2_line_config_flags_to_desc_flags(u64 flags,
assign_bit(FLAG_EVENT_CLOCK_REALTIME, flagsp,
flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME);
+ assign_bit(FLAG_EVENT_CLOCK_HTE, flagsp,
+ flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE);
}
static long linereq_get_values(struct linereq *lr, void __user *ip)
@@ -1121,6 +1299,7 @@ static long linereq_set_config_unlocked(struct linereq *lr,
unsigned int i;
u64 flags;
bool polarity_change;
+ bool prev_hte_flag;
int ret;
for (i = 0; i < lr->num_lines; i++) {
@@ -1130,6 +1309,8 @@ static long linereq_set_config_unlocked(struct linereq *lr,
(!!test_bit(FLAG_ACTIVE_LOW, &desc->flags) !=
((flags & GPIO_V2_LINE_FLAG_ACTIVE_LOW) != 0));
+ prev_hte_flag = !!test_bit(FLAG_EVENT_CLOCK_HTE, &desc->flags);
+
gpio_v2_line_config_flags_to_desc_flags(flags, &desc->flags);
/*
* Lines have to be requested explicitly for input
@@ -1138,7 +1319,7 @@ static long linereq_set_config_unlocked(struct linereq *lr,
if (flags & GPIO_V2_LINE_FLAG_OUTPUT) {
int val = gpio_v2_line_config_output_value(lc, i);
- edge_detector_stop(&lr->lines[i]);
+ edge_detector_stop(&lr->lines[i], prev_hte_flag);
ret = gpiod_direction_output(desc, val);
if (ret)
return ret;
@@ -1148,8 +1329,7 @@ static long linereq_set_config_unlocked(struct linereq *lr,
return ret;
ret = edge_detector_update(&lr->lines[i], lc, i,
- flags & GPIO_V2_LINE_EDGE_FLAGS,
- polarity_change);
+ flags, polarity_change, prev_hte_flag);
if (ret)
return ret;
}
@@ -1280,9 +1460,12 @@ static ssize_t linereq_read(struct file *file,
static void linereq_free(struct linereq *lr)
{
unsigned int i;
+ bool hte;
for (i = 0; i < lr->num_lines; i++) {
- edge_detector_stop(&lr->lines[i]);
+ hte = !!test_bit(FLAG_EVENT_CLOCK_HTE,
+ &lr->lines[i].desc->flags);
+ edge_detector_stop(&lr->lines[i], hte);
if (lr->lines[i].desc)
gpiod_free(lr->lines[i].desc);
}
@@ -1408,7 +1591,8 @@ static int linereq_create(struct gpio_device *gdev, void __user *ip)
goto out_free_linereq;
ret = edge_detector_setup(&lr->lines[i], lc, i,
- flags & GPIO_V2_LINE_EDGE_FLAGS);
+ flags & GPIO_V2_LINE_EDGE_FLAGS,
+ flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE);
if (ret)
goto out_free_linereq;
}
@@ -1961,6 +2145,8 @@ static void gpio_desc_to_lineinfo(struct gpio_desc *desc,
if (test_bit(FLAG_EVENT_CLOCK_REALTIME, &desc->flags))
info->flags |= GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME;
+ else if (test_bit(FLAG_EVENT_CLOCK_HTE, &desc->flags))
+ info->flags |= GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE;
debounce_period_us = READ_ONCE(desc->debounce_period_us);
if (debounce_period_us) {
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index b7ac07f43b95..3d6c3ffd5576 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -930,6 +930,11 @@ static int of_gpiochip_add_pin_range(struct gpio_chip *chip)
if (!np)
return 0;
+ if (!of_property_read_bool(np, "gpio-ranges") &&
+ chip->of_gpio_ranges_fallback) {
+ return chip->of_gpio_ranges_fallback(chip, np);
+ }
+
group_names = of_find_property(np, group_names_propname, NULL);
for (;; index++) {
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 9fff4f464ca3..9535f48e18d1 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -2454,6 +2454,64 @@ set_output_flag:
EXPORT_SYMBOL_GPL(gpiod_direction_output);
/**
+ * gpiod_enable_hw_timestamp_ns - Enable hardware timestamp in nanoseconds.
+ *
+ * @desc: GPIO to enable.
+ * @flags: Flags related to GPIO edge.
+ *
+ * Return 0 in case of success, else negative error code.
+ */
+int gpiod_enable_hw_timestamp_ns(struct gpio_desc *desc, unsigned long flags)
+{
+ int ret = 0;
+ struct gpio_chip *gc;
+
+ VALIDATE_DESC(desc);
+
+ gc = desc->gdev->chip;
+ if (!gc->en_hw_timestamp) {
+ gpiod_warn(desc, "%s: hw ts not supported\n", __func__);
+ return -ENOTSUPP;
+ }
+
+ ret = gc->en_hw_timestamp(gc, gpio_chip_hwgpio(desc), flags);
+ if (ret)
+ gpiod_warn(desc, "%s: hw ts request failed\n", __func__);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(gpiod_enable_hw_timestamp_ns);
+
+/**
+ * gpiod_disable_hw_timestamp_ns - Disable hardware timestamp.
+ *
+ * @desc: GPIO to disable.
+ * @flags: Flags related to GPIO edge, same value as used during enable call.
+ *
+ * Return 0 in case of success, else negative error code.
+ */
+int gpiod_disable_hw_timestamp_ns(struct gpio_desc *desc, unsigned long flags)
+{
+ int ret = 0;
+ struct gpio_chip *gc;
+
+ VALIDATE_DESC(desc);
+
+ gc = desc->gdev->chip;
+ if (!gc->dis_hw_timestamp) {
+ gpiod_warn(desc, "%s: hw ts not supported\n", __func__);
+ return -ENOTSUPP;
+ }
+
+ ret = gc->dis_hw_timestamp(gc, gpio_chip_hwgpio(desc), flags);
+ if (ret)
+ gpiod_warn(desc, "%s: hw ts release failed\n", __func__);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(gpiod_disable_hw_timestamp_ns);
+
+/**
* gpiod_set_config - sets @config for a GPIO
* @desc: descriptor of the GPIO for which to set the configuration
* @config: Same packed config format as generic pinconf
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index eef3ec073d9e..d900ecdbac46 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -161,6 +161,7 @@ struct gpio_desc {
#define FLAG_EDGE_RISING 16 /* GPIO CDEV detects rising edge events */
#define FLAG_EDGE_FALLING 17 /* GPIO CDEV detects falling edge events */
#define FLAG_EVENT_CLOCK_REALTIME 18 /* GPIO CDEV reports REALTIME timestamps in events */
+#define FLAG_EVENT_CLOCK_HTE 19 /* GPIO CDEV reports hardware timestamps in events */
/* Connection label */
const char *label;
diff --git a/drivers/gpu/Makefile b/drivers/gpu/Makefile
index 835c88318cec..8997f0096545 100644
--- a/drivers/gpu/Makefile
+++ b/drivers/gpu/Makefile
@@ -2,7 +2,6 @@
# drm/tegra depends on host1x, so if both drivers are built-in care must be
# taken to initialize them in the correct order. Link order is the only way
# to ensure this currently.
-obj-$(CONFIG_TEGRA_HOST1X) += host1x/
-obj-y += drm/ vga/
+obj-y += host1x/ drm/ vga/
obj-$(CONFIG_IMX_IPUV3_CORE) += ipu-v3/
obj-$(CONFIG_TRACE_GPU_MEM) += trace/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 8b5452a8d330..67abf8dcd30a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -1621,7 +1621,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
mutex_lock(&mem->lock);
- /* Unpin MMIO/DOORBELL BO's that were pinnned during allocation */
+ /* Unpin MMIO/DOORBELL BO's that were pinned during allocation */
if (mem->alloc_flags &
(KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index 63e0293edc5f..fd8f3731758e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -188,13 +188,17 @@ static int convert_atom_mem_type_to_vram_type(struct amdgpu_device *adev,
vram_type = AMDGPU_VRAM_TYPE_DDR3;
break;
case Ddr4MemType:
- case LpDdr4MemType:
vram_type = AMDGPU_VRAM_TYPE_DDR4;
break;
+ case LpDdr4MemType:
+ vram_type = AMDGPU_VRAM_TYPE_LPDDR4;
+ break;
case Ddr5MemType:
- case LpDdr5MemType:
vram_type = AMDGPU_VRAM_TYPE_DDR5;
break;
+ case LpDdr5MemType:
+ vram_type = AMDGPU_VRAM_TYPE_LPDDR5;
+ break;
default:
vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index e552a2004868..b28af04b0c3e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -116,7 +116,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs
int ret;
if (cs->in.num_chunks == 0)
- return 0;
+ return -EINVAL;
chunk_array = kvmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
if (!chunk_array)
@@ -1252,7 +1252,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
p->fence = dma_fence_get(&job->base.s_fence->finished);
- amdgpu_ctx_add_fence(p->ctx, entity, p->fence, &seq);
+ seq = amdgpu_ctx_add_fence(p->ctx, entity, p->fence);
amdgpu_cs_post_dependencies(p);
if ((job->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) &&
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index c317078d1afd..7dc92ef36b2b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -135,9 +135,9 @@ static enum amdgpu_ring_priority_level amdgpu_ctx_sched_prio_to_ring_prio(int32_
static unsigned int amdgpu_ctx_get_hw_prio(struct amdgpu_ctx *ctx, u32 hw_ip)
{
- struct amdgpu_device *adev = ctx->adev;
- int32_t ctx_prio;
+ struct amdgpu_device *adev = ctx->mgr->adev;
unsigned int hw_prio;
+ int32_t ctx_prio;
ctx_prio = (ctx->override_priority == AMDGPU_CTX_PRIORITY_UNSET) ?
ctx->init_priority : ctx->override_priority;
@@ -162,17 +162,50 @@ static unsigned int amdgpu_ctx_get_hw_prio(struct amdgpu_ctx *ctx, u32 hw_ip)
return hw_prio;
}
+/* Calculate the time spend on the hw */
+static ktime_t amdgpu_ctx_fence_time(struct dma_fence *fence)
+{
+ struct drm_sched_fence *s_fence;
+
+ if (!fence)
+ return ns_to_ktime(0);
+
+ /* When the fence is not even scheduled it can't have spend time */
+ s_fence = to_drm_sched_fence(fence);
+ if (!test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &s_fence->scheduled.flags))
+ return ns_to_ktime(0);
+
+ /* When it is still running account how much already spend */
+ if (!test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &s_fence->finished.flags))
+ return ktime_sub(ktime_get(), s_fence->scheduled.timestamp);
+
+ return ktime_sub(s_fence->finished.timestamp,
+ s_fence->scheduled.timestamp);
+}
+
+static ktime_t amdgpu_ctx_entity_time(struct amdgpu_ctx *ctx,
+ struct amdgpu_ctx_entity *centity)
+{
+ ktime_t res = ns_to_ktime(0);
+ uint32_t i;
+
+ spin_lock(&ctx->ring_lock);
+ for (i = 0; i < amdgpu_sched_jobs; i++) {
+ res = ktime_add(res, amdgpu_ctx_fence_time(centity->fences[i]));
+ }
+ spin_unlock(&ctx->ring_lock);
+ return res;
+}
static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip,
const u32 ring)
{
- struct amdgpu_device *adev = ctx->adev;
- struct amdgpu_ctx_entity *entity;
struct drm_gpu_scheduler **scheds = NULL, *sched = NULL;
- unsigned num_scheds = 0;
- int32_t ctx_prio;
- unsigned int hw_prio;
+ struct amdgpu_device *adev = ctx->mgr->adev;
+ struct amdgpu_ctx_entity *entity;
enum drm_sched_priority drm_prio;
+ unsigned int hw_prio, num_scheds;
+ int32_t ctx_prio;
int r;
entity = kzalloc(struct_size(entity, fences, amdgpu_sched_jobs),
@@ -182,6 +215,7 @@ static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip,
ctx_prio = (ctx->override_priority == AMDGPU_CTX_PRIORITY_UNSET) ?
ctx->init_priority : ctx->override_priority;
+ entity->hw_ip = hw_ip;
entity->sequence = 1;
hw_prio = amdgpu_ctx_get_hw_prio(ctx, hw_ip);
drm_prio = amdgpu_ctx_to_drm_sched_prio(ctx_prio);
@@ -220,10 +254,25 @@ error_free_entity:
return r;
}
-static int amdgpu_ctx_init(struct amdgpu_device *adev,
- int32_t priority,
- struct drm_file *filp,
- struct amdgpu_ctx *ctx)
+static ktime_t amdgpu_ctx_fini_entity(struct amdgpu_ctx_entity *entity)
+{
+ ktime_t res = ns_to_ktime(0);
+ int i;
+
+ if (!entity)
+ return res;
+
+ for (i = 0; i < amdgpu_sched_jobs; ++i) {
+ res = ktime_add(res, amdgpu_ctx_fence_time(entity->fences[i]));
+ dma_fence_put(entity->fences[i]);
+ }
+
+ kfree(entity);
+ return res;
+}
+
+static int amdgpu_ctx_init(struct amdgpu_ctx_mgr *mgr, int32_t priority,
+ struct drm_file *filp, struct amdgpu_ctx *ctx)
{
int r;
@@ -233,15 +282,14 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
memset(ctx, 0, sizeof(*ctx));
- ctx->adev = adev;
-
kref_init(&ctx->refcount);
+ ctx->mgr = mgr;
spin_lock_init(&ctx->ring_lock);
mutex_init(&ctx->lock);
- ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
+ ctx->reset_counter = atomic_read(&mgr->adev->gpu_reset_counter);
ctx->reset_counter_query = ctx->reset_counter;
- ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
+ ctx->vram_lost_counter = atomic_read(&mgr->adev->vram_lost_counter);
ctx->init_priority = priority;
ctx->override_priority = AMDGPU_CTX_PRIORITY_UNSET;
ctx->stable_pstate = AMDGPU_CTX_STABLE_PSTATE_NONE;
@@ -249,24 +297,10 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
return 0;
}
-static void amdgpu_ctx_fini_entity(struct amdgpu_ctx_entity *entity)
-{
-
- int i;
-
- if (!entity)
- return;
-
- for (i = 0; i < amdgpu_sched_jobs; ++i)
- dma_fence_put(entity->fences[i]);
-
- kfree(entity);
-}
-
static int amdgpu_ctx_get_stable_pstate(struct amdgpu_ctx *ctx,
u32 *stable_pstate)
{
- struct amdgpu_device *adev = ctx->adev;
+ struct amdgpu_device *adev = ctx->mgr->adev;
enum amd_dpm_forced_level current_level;
current_level = amdgpu_dpm_get_performance_level(adev);
@@ -294,7 +328,7 @@ static int amdgpu_ctx_get_stable_pstate(struct amdgpu_ctx *ctx,
static int amdgpu_ctx_set_stable_pstate(struct amdgpu_ctx *ctx,
u32 stable_pstate)
{
- struct amdgpu_device *adev = ctx->adev;
+ struct amdgpu_device *adev = ctx->mgr->adev;
enum amd_dpm_forced_level level;
u32 current_stable_pstate;
int r;
@@ -345,7 +379,8 @@ done:
static void amdgpu_ctx_fini(struct kref *ref)
{
struct amdgpu_ctx *ctx = container_of(ref, struct amdgpu_ctx, refcount);
- struct amdgpu_device *adev = ctx->adev;
+ struct amdgpu_ctx_mgr *mgr = ctx->mgr;
+ struct amdgpu_device *adev = mgr->adev;
unsigned i, j, idx;
if (!adev)
@@ -353,8 +388,10 @@ static void amdgpu_ctx_fini(struct kref *ref)
for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
for (j = 0; j < AMDGPU_MAX_ENTITY_NUM; ++j) {
- amdgpu_ctx_fini_entity(ctx->entities[i][j]);
- ctx->entities[i][j] = NULL;
+ ktime_t spend;
+
+ spend = amdgpu_ctx_fini_entity(ctx->entities[i][j]);
+ atomic64_add(ktime_to_ns(spend), &mgr->time_spend[i]);
}
}
@@ -421,7 +458,7 @@ static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
}
*id = (uint32_t)r;
- r = amdgpu_ctx_init(adev, priority, filp, ctx);
+ r = amdgpu_ctx_init(mgr, priority, filp, ctx);
if (r) {
idr_remove(&mgr->ctx_handles, *id);
*id = 0;
@@ -671,9 +708,9 @@ int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
return 0;
}
-void amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,
- struct drm_sched_entity *entity,
- struct dma_fence *fence, uint64_t *handle)
+uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,
+ struct drm_sched_entity *entity,
+ struct dma_fence *fence)
{
struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
uint64_t seq = centity->sequence;
@@ -682,8 +719,7 @@ void amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,
idx = seq & (amdgpu_sched_jobs - 1);
other = centity->fences[idx];
- if (other)
- BUG_ON(!dma_fence_is_signaled(other));
+ WARN_ON(other && !dma_fence_is_signaled(other));
dma_fence_get(fence);
@@ -692,9 +728,11 @@ void amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,
centity->sequence++;
spin_unlock(&ctx->ring_lock);
+ atomic64_add(ktime_to_ns(amdgpu_ctx_fence_time(other)),
+ &ctx->mgr->time_spend[centity->hw_ip]);
+
dma_fence_put(other);
- if (handle)
- *handle = seq;
+ return seq;
}
struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
@@ -731,7 +769,7 @@ static void amdgpu_ctx_set_entity_priority(struct amdgpu_ctx *ctx,
int hw_ip,
int32_t priority)
{
- struct amdgpu_device *adev = ctx->adev;
+ struct amdgpu_device *adev = ctx->mgr->adev;
unsigned int hw_prio;
struct drm_gpu_scheduler **scheds = NULL;
unsigned num_scheds;
@@ -796,10 +834,17 @@ int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx,
return r;
}
-void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
+void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr,
+ struct amdgpu_device *adev)
{
+ unsigned int i;
+
+ mgr->adev = adev;
mutex_init(&mgr->lock);
idr_init(&mgr->ctx_handles);
+
+ for (i = 0; i < AMDGPU_HW_IP_NUM; ++i)
+ atomic64_set(&mgr->time_spend[i], 0);
}
long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout)
@@ -875,80 +920,38 @@ void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
mutex_destroy(&mgr->lock);
}
-static void amdgpu_ctx_fence_time(struct amdgpu_ctx *ctx,
- struct amdgpu_ctx_entity *centity, ktime_t *total, ktime_t *max)
-{
- ktime_t now, t1;
- uint32_t i;
-
- *total = *max = 0;
-
- now = ktime_get();
- for (i = 0; i < amdgpu_sched_jobs; i++) {
- struct dma_fence *fence;
- struct drm_sched_fence *s_fence;
-
- spin_lock(&ctx->ring_lock);
- fence = dma_fence_get(centity->fences[i]);
- spin_unlock(&ctx->ring_lock);
- if (!fence)
- continue;
- s_fence = to_drm_sched_fence(fence);
- if (!dma_fence_is_signaled(&s_fence->scheduled)) {
- dma_fence_put(fence);
- continue;
- }
- t1 = s_fence->scheduled.timestamp;
- if (!ktime_before(t1, now)) {
- dma_fence_put(fence);
- continue;
- }
- if (dma_fence_is_signaled(&s_fence->finished) &&
- s_fence->finished.timestamp < now)
- *total += ktime_sub(s_fence->finished.timestamp, t1);
- else
- *total += ktime_sub(now, t1);
- t1 = ktime_sub(now, t1);
- dma_fence_put(fence);
- *max = max(t1, *max);
- }
-}
-
-ktime_t amdgpu_ctx_mgr_fence_usage(struct amdgpu_ctx_mgr *mgr, uint32_t hwip,
- uint32_t idx, uint64_t *elapsed)
+void amdgpu_ctx_mgr_usage(struct amdgpu_ctx_mgr *mgr,
+ ktime_t usage[AMDGPU_HW_IP_NUM])
{
- struct idr *idp;
struct amdgpu_ctx *ctx;
+ unsigned int hw_ip, i;
uint32_t id;
- struct amdgpu_ctx_entity *centity;
- ktime_t total = 0, max = 0;
- if (idx >= AMDGPU_MAX_ENTITY_NUM)
- return 0;
- idp = &mgr->ctx_handles;
+ /*
+ * This is a little bit racy because it can be that a ctx or a fence are
+ * destroyed just in the moment we try to account them. But that is ok
+ * since exactly that case is explicitely allowed by the interface.
+ */
mutex_lock(&mgr->lock);
- idr_for_each_entry(idp, ctx, id) {
- ktime_t ttotal, tmax;
+ for (hw_ip = 0; hw_ip < AMDGPU_HW_IP_NUM; ++hw_ip) {
+ uint64_t ns = atomic64_read(&mgr->time_spend[hw_ip]);
- if (!ctx->entities[hwip][idx])
- continue;
-
- centity = ctx->entities[hwip][idx];
- amdgpu_ctx_fence_time(ctx, centity, &ttotal, &tmax);
+ usage[hw_ip] = ns_to_ktime(ns);
+ }
- /* Harmonic mean approximation diverges for very small
- * values. If ratio < 0.01% ignore
- */
- if (AMDGPU_CTX_FENCE_USAGE_MIN_RATIO(tmax, ttotal))
- continue;
+ idr_for_each_entry(&mgr->ctx_handles, ctx, id) {
+ for (hw_ip = 0; hw_ip < AMDGPU_HW_IP_NUM; ++hw_ip) {
+ for (i = 0; i < amdgpu_ctx_num_entities[hw_ip]; ++i) {
+ struct amdgpu_ctx_entity *centity;
+ ktime_t spend;
- total = ktime_add(total, ttotal);
- max = ktime_after(tmax, max) ? tmax : max;
+ centity = ctx->entities[hw_ip][i];
+ if (!centity)
+ continue;
+ spend = amdgpu_ctx_entity_time(ctx, centity);
+ usage[hw_ip] = ktime_add(usage[hw_ip], spend);
+ }
+ }
}
-
mutex_unlock(&mgr->lock);
- if (elapsed)
- *elapsed = max;
-
- return total;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
index 142f2f87d44c..cc7c8afff414 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
@@ -23,16 +23,20 @@
#ifndef __AMDGPU_CTX_H__
#define __AMDGPU_CTX_H__
+#include <linux/ktime.h>
+#include <linux/types.h>
+
#include "amdgpu_ring.h"
struct drm_device;
struct drm_file;
struct amdgpu_fpriv;
+struct amdgpu_ctx_mgr;
#define AMDGPU_MAX_ENTITY_NUM 4
-#define AMDGPU_CTX_FENCE_USAGE_MIN_RATIO(max, total) ((max) > 16384ULL*(total))
struct amdgpu_ctx_entity {
+ uint32_t hw_ip;
uint64_t sequence;
struct drm_sched_entity entity;
struct dma_fence *fences[];
@@ -40,7 +44,7 @@ struct amdgpu_ctx_entity {
struct amdgpu_ctx {
struct kref refcount;
- struct amdgpu_device *adev;
+ struct amdgpu_ctx_mgr *mgr;
unsigned reset_counter;
unsigned reset_counter_query;
uint32_t vram_lost_counter;
@@ -61,6 +65,7 @@ struct amdgpu_ctx_mgr {
struct mutex lock;
/* protected by lock */
struct idr ctx_handles;
+ atomic64_t time_spend[AMDGPU_HW_IP_NUM];
};
extern const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM];
@@ -70,9 +75,9 @@ int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance,
u32 ring, struct drm_sched_entity **entity);
-void amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,
- struct drm_sched_entity *entity,
- struct dma_fence *fence, uint64_t *seq);
+uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,
+ struct drm_sched_entity *entity,
+ struct dma_fence *fence);
struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
struct drm_sched_entity *entity,
uint64_t seq);
@@ -85,10 +90,12 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx,
struct drm_sched_entity *entity);
-void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr);
+void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr,
+ struct amdgpu_device *adev);
void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr);
long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout);
void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
-ktime_t amdgpu_ctx_mgr_fence_usage(struct amdgpu_ctx_mgr *mgr, uint32_t hwip,
- uint32_t idx, uint64_t *elapsed);
+void amdgpu_ctx_mgr_usage(struct amdgpu_ctx_mgr *mgr,
+ ktime_t usage[AMDGPU_HW_IP_NUM]);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 9af8d7a1d011..625424f3082b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1556,9 +1556,6 @@ static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
- amdgpu_gmc_tmz_set(adev);
-
-
return 0;
}
@@ -3701,6 +3698,9 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (r)
return r;
+ /* Enable TMZ based on IP_VERSION */
+ amdgpu_gmc_tmz_set(adev);
+
amdgpu_gmc_noretry_set(adev);
/* Need to get xgmi info early to decide the reset behavior*/
if (adev->gmc.xgmi.supported) {
@@ -5219,6 +5219,10 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */
r = amdgpu_device_reset_sriov(adev, job ? false : true);
if (r)
adev->asic_reset_res = r;
+
+ /* Aldebaran supports ras in SRIOV, so need resume ras during reset */
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))
+ amdgpu_ras_resume(adev);
} else {
r = amdgpu_do_asic_reset(device_list_handle, &reset_context);
if (r && r == -EAGAIN)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index 881570dced41..47f0344205ed 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -1130,13 +1130,24 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
adev->vcn.vcn_config[adev->vcn.num_vcn_inst] =
ip->revision & 0xc0;
ip->revision &= ~0xc0;
- adev->vcn.num_vcn_inst++;
+ if (adev->vcn.num_vcn_inst < AMDGPU_MAX_VCN_INSTANCES)
+ adev->vcn.num_vcn_inst++;
+ else
+ dev_err(adev->dev, "Too many VCN instances: %d vs %d\n",
+ adev->vcn.num_vcn_inst + 1,
+ AMDGPU_MAX_VCN_INSTANCES);
}
if (le16_to_cpu(ip->hw_id) == SDMA0_HWID ||
le16_to_cpu(ip->hw_id) == SDMA1_HWID ||
le16_to_cpu(ip->hw_id) == SDMA2_HWID ||
- le16_to_cpu(ip->hw_id) == SDMA3_HWID)
- adev->sdma.num_instances++;
+ le16_to_cpu(ip->hw_id) == SDMA3_HWID) {
+ if (adev->sdma.num_instances < AMDGPU_MAX_SDMA_INSTANCES)
+ adev->sdma.num_instances++;
+ else
+ dev_err(adev->dev, "Too many SDMA instances: %d vs %d\n",
+ adev->sdma.num_instances + 1,
+ AMDGPU_MAX_SDMA_INSTANCES);
+ }
if (le16_to_cpu(ip->hw_id) == UMC_HWID)
adev->gmc.num_umc++;
@@ -1361,7 +1372,7 @@ union mall_info {
struct mall_info_v1_0 v1;
};
-int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev)
+static int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev)
{
struct binary_header *bhdr;
union mall_info *mall_info;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 8592d43a79b0..8890300766a5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -99,10 +99,11 @@
* - 3.43.0 - Add device hot plug/unplug support
* - 3.44.0 - DCN3 supports DCC independent block settings: !64B && 128B, 64B && 128B
* - 3.45.0 - Add context ioctl stable pstate interface
- * * 3.46.0 - To enable hot plug amdgpu tests in libdrm
+ * - 3.46.0 - To enable hot plug amdgpu tests in libdrm
+ * * 3.47.0 - Add AMDGPU_GEM_CREATE_DISCARDABLE and AMDGPU_VM_NOALLOC flags
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 46
+#define KMS_DRIVER_MINOR 47
#define KMS_DRIVER_PATCHLEVEL 0
int amdgpu_vram_limit;
@@ -1940,6 +1941,7 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x7421, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY},
{0x1002, 0x7422, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY},
{0x1002, 0x7423, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY},
+ {0x1002, 0x7424, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY},
{0x1002, 0x743F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY},
{ PCI_DEVICE(0x1002, PCI_ANY_ID),
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
index 5a6857c44bb6..99a7855ab1bc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
@@ -32,6 +32,7 @@
#include <drm/amdgpu_drm.h>
#include <drm/drm_debugfs.h>
+#include <drm/drm_drv.h>
#include "amdgpu.h"
#include "amdgpu_vm.h"
@@ -54,58 +55,49 @@ static const char *amdgpu_ip_name[AMDGPU_HW_IP_NUM] = {
void amdgpu_show_fdinfo(struct seq_file *m, struct file *f)
{
- struct amdgpu_fpriv *fpriv;
- uint32_t bus, dev, fn, i, domain;
- uint64_t vram_mem = 0, gtt_mem = 0, cpu_mem = 0;
struct drm_file *file = f->private_data;
struct amdgpu_device *adev = drm_to_adev(file->minor->dev);
- struct amdgpu_bo *root;
+ struct amdgpu_fpriv *fpriv = file->driver_priv;
+ struct amdgpu_vm *vm = &fpriv->vm;
+
+ uint64_t vram_mem = 0, gtt_mem = 0, cpu_mem = 0;
+ ktime_t usage[AMDGPU_HW_IP_NUM];
+ uint32_t bus, dev, fn, domain;
+ unsigned int hw_ip;
int ret;
- ret = amdgpu_file_to_fpriv(f, &fpriv);
- if (ret)
- return;
bus = adev->pdev->bus->number;
domain = pci_domain_nr(adev->pdev->bus);
dev = PCI_SLOT(adev->pdev->devfn);
fn = PCI_FUNC(adev->pdev->devfn);
- root = amdgpu_bo_ref(fpriv->vm.root.bo);
- if (!root)
+ ret = amdgpu_bo_reserve(vm->root.bo, false);
+ if (ret)
return;
- ret = amdgpu_bo_reserve(root, false);
- if (ret) {
- DRM_ERROR("Fail to reserve bo\n");
- return;
- }
- amdgpu_vm_get_memory(&fpriv->vm, &vram_mem, &gtt_mem, &cpu_mem);
- amdgpu_bo_unreserve(root);
- amdgpu_bo_unref(&root);
+ amdgpu_vm_get_memory(vm, &vram_mem, &gtt_mem, &cpu_mem);
+ amdgpu_bo_unreserve(vm->root.bo);
- seq_printf(m, "pdev:\t%04x:%02x:%02x.%d\npasid:\t%u\n", domain, bus,
- dev, fn, fpriv->vm.pasid);
- seq_printf(m, "vram mem:\t%llu kB\n", vram_mem/1024UL);
- seq_printf(m, "gtt mem:\t%llu kB\n", gtt_mem/1024UL);
- seq_printf(m, "cpu mem:\t%llu kB\n", cpu_mem/1024UL);
- for (i = 0; i < AMDGPU_HW_IP_NUM; i++) {
- uint32_t count = amdgpu_ctx_num_entities[i];
- int idx = 0;
- uint64_t total = 0, min = 0;
- uint32_t perc, frac;
+ amdgpu_ctx_mgr_usage(&fpriv->ctx_mgr, usage);
- for (idx = 0; idx < count; idx++) {
- total = amdgpu_ctx_mgr_fence_usage(&fpriv->ctx_mgr,
- i, idx, &min);
- if ((total == 0) || (min == 0))
- continue;
+ /*
+ * ******************************************************************
+ * For text output format description please see drm-usage-stats.rst!
+ * ******************************************************************
+ */
- perc = div64_u64(10000 * total, min);
- frac = perc % 100;
+ seq_printf(m, "pasid:\t%u\n", fpriv->vm.pasid);
+ seq_printf(m, "drm-driver:\t%s\n", file->minor->dev->driver->name);
+ seq_printf(m, "drm-pdev:\t%04x:%02x:%02x.%d\n", domain, bus, dev, fn);
+ seq_printf(m, "drm-client-id:\t%Lu\n", vm->immediate.fence_context);
+ seq_printf(m, "drm-memory-vram:\t%llu KiB\n", vram_mem/1024UL);
+ seq_printf(m, "drm-memory-gtt: \t%llu KiB\n", gtt_mem/1024UL);
+ seq_printf(m, "drm-memory-cpu: \t%llu KiB\n", cpu_mem/1024UL);
+ for (hw_ip = 0; hw_ip < AMDGPU_HW_IP_NUM; ++hw_ip) {
+ if (!usage[hw_ip])
+ continue;
- seq_printf(m, "%s%d:\t%d.%d%%\n",
- amdgpu_ip_name[i],
- idx, perc/100, frac);
- }
+ seq_printf(m, "drm-engine-%s:\t%Ld ns\n", amdgpu_ip_name[hw_ip],
+ ktime_to_ns(usage[hw_ip]));
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 652571267077..8ef31d687ef3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -296,8 +296,8 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
AMDGPU_GEM_CREATE_VRAM_CLEARED |
AMDGPU_GEM_CREATE_VM_ALWAYS_VALID |
AMDGPU_GEM_CREATE_EXPLICIT_SYNC |
- AMDGPU_GEM_CREATE_ENCRYPTED))
-
+ AMDGPU_GEM_CREATE_ENCRYPTED |
+ AMDGPU_GEM_CREATE_DISCARDABLE))
return -EINVAL;
/* reject invalid gem domains */
@@ -645,6 +645,8 @@ uint64_t amdgpu_gem_va_map_flags(struct amdgpu_device *adev, uint32_t flags)
pte_flag |= AMDGPU_PTE_WRITEABLE;
if (flags & AMDGPU_VM_PAGE_PRT)
pte_flag |= AMDGPU_PTE_PRT;
+ if (flags & AMDGPU_VM_PAGE_NOALLOC)
+ pte_flag |= AMDGPU_PTE_NOALLOC;
if (adev->gmc.gmc_funcs->map_mtype)
pte_flag |= amdgpu_gmc_map_mtype(adev,
@@ -658,7 +660,8 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
{
const uint32_t valid_flags = AMDGPU_VM_DELAY_UPDATE |
AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
- AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_MASK;
+ AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_MASK |
+ AMDGPU_VM_PAGE_NOALLOC;
const uint32_t prt_flags = AMDGPU_VM_DELAY_UPDATE |
AMDGPU_VM_PAGE_PRT;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 88b852b3a2cb..798c56214a23 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -512,9 +512,12 @@ int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev)
*/
void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
{
- switch (adev->asic_type) {
- case CHIP_RAVEN:
- case CHIP_RENOIR:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ /* RAVEN */
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 1, 0):
+ /* RENOIR looks like RAVEN */
+ case IP_VERSION(9, 3, 0):
if (amdgpu_tmz == 0) {
adev->gmc.tmz_enabled = false;
dev_info(adev->dev,
@@ -525,12 +528,20 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
"Trusted Memory Zone (TMZ) feature enabled\n");
}
break;
- case CHIP_NAVI10:
- case CHIP_NAVI14:
- case CHIP_NAVI12:
- case CHIP_VANGOGH:
- case CHIP_YELLOW_CARP:
- case CHIP_IP_DISCOVERY:
+ case IP_VERSION(10, 1, 10):
+ case IP_VERSION(10, 1, 1):
+ case IP_VERSION(10, 1, 2):
+ case IP_VERSION(10, 1, 3):
+ case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
+ /* VANGOGH */
+ case IP_VERSION(10, 3, 1):
+ /* YELLOW_CARP*/
+ case IP_VERSION(10, 3, 3):
+ /* GC 10.3.7 */
+ case IP_VERSION(10, 3, 7):
/* Don't enable it by default yet.
*/
if (amdgpu_tmz < 1) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 497478f8a5d3..801f6fa692e9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -1152,7 +1152,7 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
mutex_init(&fpriv->bo_list_lock);
idr_init(&fpriv->bo_list_handles);
- amdgpu_ctx_mgr_init(&fpriv->ctx_mgr);
+ amdgpu_ctx_mgr_init(&fpriv->ctx_mgr, adev);
file_priv->driver_priv = fpriv;
goto out_suspend;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 5444515c1476..2c82b1d5a0d7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -567,6 +567,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
bp->domain;
bo->allowed_domains = bo->preferred_domains;
if (bp->type != ttm_bo_type_kernel &&
+ !(bp->flags & AMDGPU_GEM_CREATE_DISCARDABLE) &&
bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
@@ -1018,7 +1019,9 @@ static const char *amdgpu_vram_names[] = {
"DDR3",
"DDR4",
"GDDR6",
- "DDR5"
+ "DDR5",
+ "LPDDR4",
+ "LPDDR5"
};
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 4c9cbdc66995..147b79c10cbb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -41,7 +41,6 @@
/* BO flag to indicate a KFD userptr BO */
#define AMDGPU_AMDKFD_CREATE_USERPTR_BO (1ULL << 63)
-#define AMDGPU_AMDKFD_CREATE_SVM_BO (1ULL << 62)
#define to_amdgpu_bo_user(abo) container_of((abo), struct amdgpu_bo_user, bo)
#define to_amdgpu_bo_vm(abo) container_of((abo), struct amdgpu_bo_vm, bo)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 214e4e89a028..e9411c28d88b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -1177,7 +1177,7 @@ int psp_xgmi_initialize(struct psp_context *psp, bool set_extended_data, bool lo
psp->xgmi_context.context.mem_context.shared_mem_size = PSP_XGMI_SHARED_MEM_SIZE;
psp->xgmi_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
- if (!psp->xgmi_context.context.initialized) {
+ if (!psp->xgmi_context.context.mem_context.shared_buf) {
ret = psp_ta_init_shared_buf(psp, &psp->xgmi_context.context.mem_context);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 035891ec59d5..2de9309a4193 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -726,7 +726,9 @@ int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
/* Do not enable if it is not allowed. */
WARN_ON(enable && !amdgpu_ras_is_feature_allowed(adev, head));
- if (!amdgpu_ras_intr_triggered()) {
+ /* Only enable ras feature operation handle on host side */
+ if (!amdgpu_sriov_vf(adev) &&
+ !amdgpu_ras_intr_triggered()) {
ret = psp_ras_enable_features(&adev->psp, info, enable);
if (ret) {
dev_err(adev->dev, "ras %s %s failed poison:%d ret:%d\n",
@@ -1523,7 +1525,9 @@ static int amdgpu_ras_fs_fini(struct amdgpu_device *adev)
*/
void amdgpu_ras_interrupt_fatal_error_handler(struct amdgpu_device *adev)
{
- if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__PCIE_BIF))
+ /* Fatal error events are handled on host side */
+ if (amdgpu_sriov_vf(adev) ||
+ !amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__PCIE_BIF))
return;
if (adev->nbio.ras &&
@@ -2270,10 +2274,14 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
{
adev->ras_hw_enabled = adev->ras_enabled = 0;
- if (amdgpu_sriov_vf(adev) || !adev->is_atom_fw ||
+ if (!adev->is_atom_fw ||
!amdgpu_ras_asic_supported(adev))
return;
+ if (!(amdgpu_sriov_vf(adev) &&
+ (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2))))
+ return;
+
if (!adev->gmc.xgmi.connected_to_cpu) {
if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
dev_info(adev->dev, "MEM ECC is active.\n");
@@ -2285,15 +2293,21 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
if (amdgpu_atomfirmware_sram_ecc_supported(adev)) {
dev_info(adev->dev, "SRAM ECC is active.\n");
- adev->ras_hw_enabled |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
- 1 << AMDGPU_RAS_BLOCK__DF);
-
- if (adev->ip_versions[VCN_HWIP][0] == IP_VERSION(2, 6, 0))
- adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN |
- 1 << AMDGPU_RAS_BLOCK__JPEG);
- else
- adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__VCN |
- 1 << AMDGPU_RAS_BLOCK__JPEG);
+ if (!amdgpu_sriov_vf(adev)) {
+ adev->ras_hw_enabled |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
+ 1 << AMDGPU_RAS_BLOCK__DF);
+
+ if (adev->ip_versions[VCN_HWIP][0] == IP_VERSION(2, 6, 0))
+ adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN |
+ 1 << AMDGPU_RAS_BLOCK__JPEG);
+ else
+ adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__VCN |
+ 1 << AMDGPU_RAS_BLOCK__JPEG);
+ } else {
+ adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__PCIE_BIF |
+ 1 << AMDGPU_RAS_BLOCK__SDMA |
+ 1 << AMDGPU_RAS_BLOCK__GFX);
+ }
} else {
dev_info(adev->dev, "SRAM ECC is not presented.\n");
}
@@ -2637,6 +2651,10 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev)
struct amdgpu_ras_block_object *obj;
int r;
+ /* Guest side doesn't need init ras feature */
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
if (!node->ras_obj) {
dev_warn(adev->dev, "Warning: abnormal ras list node.\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
index 8e221a1ba937..42c1f050542f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
@@ -124,6 +124,10 @@ int amdgpu_sdma_process_ras_data_cb(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry)
{
kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
+
+ if (amdgpu_sriov_vf(adev))
+ return AMDGPU_RAS_SUCCESS;
+
amdgpu_ras_reset_gpu(adev);
return AMDGPU_RAS_SUCCESS;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index ec26edd4f4d8..be6f76a30ac6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -117,7 +117,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
}
abo = ttm_to_amdgpu_bo(bo);
- if (abo->flags & AMDGPU_AMDKFD_CREATE_SVM_BO) {
+ if (abo->flags & AMDGPU_GEM_CREATE_DISCARDABLE) {
placement->num_placement = 0;
placement->num_busy_placement = 0;
return;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 65a4126135b0..c5f46d264b23 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -5111,7 +5111,7 @@ static void gfx_v10_0_init_compute_vmid(struct amdgpu_device *adev)
mutex_unlock(&adev->srbm_mutex);
/* Initialize all compute VMIDs to have no GDS, GWS, or OA
- acccess. These should be enabled by FW for target VMIDs. */
+ access. These should be enabled by FW for target VMIDs. */
for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * i, 0);
WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * i, 0);
@@ -6898,7 +6898,7 @@ static int gfx_v10_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
(order_base_2(prop->queue_size / 4) - 1));
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
- ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
+ (order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1));
#ifdef __BIG_ENDIAN
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
#endif
@@ -6919,23 +6919,6 @@ static int gfx_v10_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
- tmp = 0;
- /* enable the doorbell if requested */
- if (prop->use_doorbell) {
- tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
- tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
- DOORBELL_OFFSET, prop->doorbell_index);
-
- tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
- DOORBELL_EN, 1);
- tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
- DOORBELL_SOURCE, 0);
- tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
- DOORBELL_HIT, 0);
- }
-
- mqd->cp_hqd_pq_doorbell_control = tmp;
-
/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR);
@@ -6973,20 +6956,6 @@ static int gfx_v10_0_kiq_init_register(struct amdgpu_ring *ring)
/* disable wptr polling */
WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
- /* write the EOP addr */
- WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR,
- mqd->cp_hqd_eop_base_addr_lo);
- WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR_HI,
- mqd->cp_hqd_eop_base_addr_hi);
-
- /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
- WREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL,
- mqd->cp_hqd_eop_control);
-
- /* enable doorbell? */
- WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
- mqd->cp_hqd_pq_doorbell_control);
-
/* disable the queue if it's active */
if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
@@ -7005,6 +6974,19 @@ static int gfx_v10_0_kiq_init_register(struct amdgpu_ring *ring)
mqd->cp_hqd_pq_wptr_hi);
}
+ /* disable doorbells */
+ WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0);
+
+ /* write the EOP addr */
+ WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR,
+ mqd->cp_hqd_eop_base_addr_lo);
+ WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR_HI,
+ mqd->cp_hqd_eop_base_addr_hi);
+
+ /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
+ WREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL,
+ mqd->cp_hqd_eop_control);
+
/* set the pointer to the MQD */
WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR,
mqd->cp_mqd_base_addr_lo);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index 8773cbd1f03b..8c0a3fc7aaa6 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -4082,7 +4082,7 @@ static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
(order_base_2(prop->queue_size / 4) - 1));
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
- ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
+ (order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1));
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 0);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 90f64219d291..7f0b18b0d4c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -3714,7 +3714,7 @@ static void gfx_v8_0_init_compute_vmid(struct amdgpu_device *adev)
mutex_unlock(&adev->srbm_mutex);
/* Initialize all compute VMIDs to have no GDS, GWS, or OA
- acccess. These should be enabled by FW for target VMIDs. */
+ access. These should be enabled by FW for target VMIDs. */
for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
WREG32(amdgpu_gds_reg_offset[i].mem_base, 0);
WREG32(amdgpu_gds_reg_offset[i].mem_size, 0);
@@ -4490,7 +4490,7 @@ static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring)
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
(order_base_2(ring->ring_size / 4) - 1));
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
- ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
+ (order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1));
#ifdef __BIG_ENDIAN
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
#endif
@@ -5815,7 +5815,7 @@ static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
/* wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
gfx_v8_0_wait_for_rlc_serdes(adev);
- /* write cmd to Set CGCG Overrride */
+ /* write cmd to Set CGCG Override */
gfx_v8_0_send_serdes_cmd(adev, BPM_REG_CGCG_OVERRIDE, SET_BPM_SERDES_CMD);
/* wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 83639b5ea6a9..5349ca4d19e3 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -2535,7 +2535,7 @@ static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev)
mutex_unlock(&adev->srbm_mutex);
/* Initialize all compute VMIDs to have no GDS, GWS, or OA
- acccess. These should be enabled by FW for target VMIDs. */
+ access. These should be enabled by FW for target VMIDs. */
for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * i, 0);
WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * i, 0);
@@ -3514,7 +3514,7 @@ static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
(order_base_2(ring->ring_size / 4) - 1));
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
- ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
+ (order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1));
#ifdef __BIG_ENDIAN
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
#endif
@@ -3535,23 +3535,6 @@ static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
- tmp = 0;
- /* enable the doorbell if requested */
- if (ring->use_doorbell) {
- tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
- tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
- DOORBELL_OFFSET, ring->doorbell_index);
-
- tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
- DOORBELL_EN, 1);
- tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
- DOORBELL_SOURCE, 0);
- tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
- DOORBELL_HIT, 0);
- }
-
- mqd->cp_hqd_pq_doorbell_control = tmp;
-
/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
ring->wptr = 0;
mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index b8c79789e1e4..9077dfccaf3c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -613,6 +613,9 @@ static void gmc_v10_0_get_vm_pte(struct amdgpu_device *adev,
*flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK;
*flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK);
+ *flags &= ~AMDGPU_PTE_NOALLOC;
+ *flags |= (mapping->flags & AMDGPU_PTE_NOALLOC);
+
if (mapping->flags & AMDGPU_PTE_PRT) {
*flags |= AMDGPU_PTE_PRT;
*flags |= AMDGPU_PTE_SNOOPED;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
index 477f67d9b07c..a0c0b7d9f444 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
@@ -500,6 +500,9 @@ static void gmc_v11_0_get_vm_pte(struct amdgpu_device *adev,
*flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK;
*flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK);
+ *flags &= ~AMDGPU_PTE_NOALLOC;
+ *flags |= (mapping->flags & AMDGPU_PTE_NOALLOC);
+
if (mapping->flags & AMDGPU_PTE_PRT) {
*flags |= AMDGPU_PTE_PRT;
*flags |= AMDGPU_PTE_SNOOPED;
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
index d6d79e97def9..9e1ef81933ff 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
@@ -32,13 +32,10 @@
MODULE_FIRMWARE("amdgpu/aldebaran_sos.bin");
MODULE_FIRMWARE("amdgpu/aldebaran_ta.bin");
MODULE_FIRMWARE("amdgpu/aldebaran_cap.bin");
-MODULE_FIRMWARE("amdgpu/yellow_carp_asd.bin");
MODULE_FIRMWARE("amdgpu/yellow_carp_toc.bin");
MODULE_FIRMWARE("amdgpu/yellow_carp_ta.bin");
-MODULE_FIRMWARE("amdgpu/psp_13_0_5_asd.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_5_toc.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_5_ta.bin");
-MODULE_FIRMWARE("amdgpu/psp_13_0_8_asd.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_8_toc.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_8_ta.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_0_sos.bin");
@@ -85,17 +82,17 @@ static int psp_v13_0_init_microcode(struct psp_context *psp)
err = psp_init_sos_microcode(psp, chip_name);
if (err)
return err;
- err = psp_init_ta_microcode(&adev->psp, chip_name);
- if (err)
- return err;
+ /* It's not necessary to load ras ta on Guest side */
+ if (!amdgpu_sriov_vf(adev)) {
+ err = psp_init_ta_microcode(&adev->psp, chip_name);
+ if (err)
+ return err;
+ }
break;
case IP_VERSION(13, 0, 1):
case IP_VERSION(13, 0, 3):
case IP_VERSION(13, 0, 5):
case IP_VERSION(13, 0, 8):
- err = psp_init_asd_microcode(psp, chip_name);
- if (err)
- return err;
err = psp_init_toc_microcode(psp, chip_name);
if (err)
return err;
diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c
index c6a8520053bb..9e18a2b22607 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc21.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc21.c
@@ -42,6 +42,7 @@
#include "soc15.h"
#include "soc15_common.h"
+#include "soc21.h"
static const struct amd_ip_funcs soc21_common_ip_funcs;
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
index 475f89700c74..60a81649cf12 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
@@ -166,7 +166,7 @@ static const uint32_t cwsr_trap_gfx8_hex[] = {
0x807c847c, 0x806eff6e,
0x00000400, 0xbf0a757c,
0xbf85ffef, 0xbf9c0000,
- 0xbf8200cd, 0xbef8007e,
+ 0xbf8200ce, 0xbef8007e,
0x8679ff7f, 0x0000ffff,
0x8779ff79, 0x00040000,
0xbefa0080, 0xbefb00ff,
@@ -212,304 +212,310 @@ static const uint32_t cwsr_trap_gfx8_hex[] = {
0x761e0000, 0xe0524100,
0x761e0100, 0xe0524200,
0x761e0200, 0xe0524300,
- 0x761e0300, 0xb8f22a05,
- 0x80728172, 0x8e728a72,
- 0xb8f61605, 0x80768176,
- 0x8e768676, 0x80727672,
- 0x80f2c072, 0xb8f31605,
- 0x80738173, 0x8e738473,
- 0x8e7a8273, 0xbefa00ff,
- 0x01000000, 0xbefc0073,
- 0xc031003c, 0x00000072,
- 0x80f2c072, 0xbf8c007f,
- 0x80fc907c, 0xbe802d00,
- 0xbe822d02, 0xbe842d04,
- 0xbe862d06, 0xbe882d08,
- 0xbe8a2d0a, 0xbe8c2d0c,
- 0xbe8e2d0e, 0xbf06807c,
- 0xbf84fff1, 0xb8f22a05,
- 0x80728172, 0x8e728a72,
- 0xb8f61605, 0x80768176,
- 0x8e768676, 0x80727672,
- 0xbefa0084, 0xbefa00ff,
- 0x01000000, 0xc0211cfc,
+ 0x761e0300, 0xbf8c0f70,
+ 0xb8f22a05, 0x80728172,
+ 0x8e728a72, 0xb8f61605,
+ 0x80768176, 0x8e768676,
+ 0x80727672, 0x80f2c072,
+ 0xb8f31605, 0x80738173,
+ 0x8e738473, 0x8e7a8273,
+ 0xbefa00ff, 0x01000000,
+ 0xbefc0073, 0xc031003c,
+ 0x00000072, 0x80f2c072,
+ 0xbf8c007f, 0x80fc907c,
+ 0xbe802d00, 0xbe822d02,
+ 0xbe842d04, 0xbe862d06,
+ 0xbe882d08, 0xbe8a2d0a,
+ 0xbe8c2d0c, 0xbe8e2d0e,
+ 0xbf06807c, 0xbf84fff1,
+ 0xb8f22a05, 0x80728172,
+ 0x8e728a72, 0xb8f61605,
+ 0x80768176, 0x8e768676,
+ 0x80727672, 0xbefa0084,
+ 0xbefa00ff, 0x01000000,
+ 0xc0211cfc, 0x00000072,
+ 0x80728472, 0xc0211c3c,
0x00000072, 0x80728472,
- 0xc0211c3c, 0x00000072,
- 0x80728472, 0xc0211c7c,
+ 0xc0211c7c, 0x00000072,
+ 0x80728472, 0xc0211bbc,
0x00000072, 0x80728472,
- 0xc0211bbc, 0x00000072,
- 0x80728472, 0xc0211bfc,
+ 0xc0211bfc, 0x00000072,
+ 0x80728472, 0xc0211d3c,
0x00000072, 0x80728472,
- 0xc0211d3c, 0x00000072,
- 0x80728472, 0xc0211d7c,
+ 0xc0211d7c, 0x00000072,
+ 0x80728472, 0xc0211a3c,
0x00000072, 0x80728472,
- 0xc0211a3c, 0x00000072,
- 0x80728472, 0xc0211a7c,
+ 0xc0211a7c, 0x00000072,
+ 0x80728472, 0xc0211dfc,
0x00000072, 0x80728472,
- 0xc0211dfc, 0x00000072,
- 0x80728472, 0xc0211b3c,
+ 0xc0211b3c, 0x00000072,
+ 0x80728472, 0xc0211b7c,
0x00000072, 0x80728472,
- 0xc0211b7c, 0x00000072,
- 0x80728472, 0xbf8c007f,
- 0xbefc0073, 0xbefe006e,
- 0xbeff006f, 0x867375ff,
- 0x000003ff, 0xb9734803,
- 0x867375ff, 0xfffff800,
- 0x8f738b73, 0xb973a2c3,
- 0xb977f801, 0x8673ff71,
- 0xf0000000, 0x8f739c73,
- 0x8e739073, 0xbef60080,
- 0x87767376, 0x8673ff71,
- 0x08000000, 0x8f739b73,
- 0x8e738f73, 0x87767376,
- 0x8673ff74, 0x00800000,
- 0x8f739773, 0xb976f807,
- 0x8671ff71, 0x0000ffff,
- 0x86fe7e7e, 0x86ea6a6a,
- 0x8f768374, 0xb976e0c2,
- 0xbf800002, 0xb9740002,
- 0xbf8a0000, 0x95807370,
- 0xbf810000, 0x00000000,
+ 0xbf8c007f, 0xbefc0073,
+ 0xbefe006e, 0xbeff006f,
+ 0x867375ff, 0x000003ff,
+ 0xb9734803, 0x867375ff,
+ 0xfffff800, 0x8f738b73,
+ 0xb973a2c3, 0xb977f801,
+ 0x8673ff71, 0xf0000000,
+ 0x8f739c73, 0x8e739073,
+ 0xbef60080, 0x87767376,
+ 0x8673ff71, 0x08000000,
+ 0x8f739b73, 0x8e738f73,
+ 0x87767376, 0x8673ff74,
+ 0x00800000, 0x8f739773,
+ 0xb976f807, 0x8671ff71,
+ 0x0000ffff, 0x86fe7e7e,
+ 0x86ea6a6a, 0x8f768374,
+ 0xb976e0c2, 0xbf800002,
+ 0xb9740002, 0xbf8a0000,
+ 0x95807370, 0xbf810000,
};
static const uint32_t cwsr_trap_gfx9_hex[] = {
- 0xbf820001, 0xbf820248,
- 0xb8f8f802, 0x89788678,
- 0xb8eef801, 0x866eff6e,
- 0x00000800, 0xbf840003,
+ 0xbf820001, 0xbf820254,
+ 0xb8f8f802, 0x8978ff78,
+ 0x00020006, 0xb8fbf803,
0x866eff78, 0x00002000,
- 0xbf840016, 0xb8fbf803,
+ 0xbf840009, 0x866eff6d,
+ 0x00ff0000, 0xbf85001e,
0x866eff7b, 0x00000400,
- 0xbf85003b, 0x866eff7b,
- 0x00000800, 0xbf850003,
- 0x866eff7b, 0x00000100,
- 0xbf84000c, 0x866eff78,
- 0x00002000, 0xbf840005,
- 0xbf8e0010, 0xb8eef803,
- 0x866eff6e, 0x00000400,
- 0xbf84fffb, 0x8778ff78,
- 0x00002000, 0x80ec886c,
- 0x82ed806d, 0xb8eef807,
- 0x866fff6e, 0x001f8000,
- 0x8e6f8b6f, 0x8977ff77,
- 0xfc000000, 0x87776f77,
- 0x896eff6e, 0x001f8000,
- 0xb96ef807, 0xb8faf812,
+ 0xbf850051, 0xbf8e0010,
+ 0xb8fbf803, 0xbf82fffa,
+ 0x866eff7b, 0x00000900,
+ 0xbf850015, 0x866eff7b,
+ 0x000071ff, 0xbf840008,
+ 0x866fff7b, 0x00007080,
+ 0xbf840001, 0xbeee1a87,
+ 0xb8eff801, 0x8e6e8c6e,
+ 0x866e6f6e, 0xbf85000a,
+ 0x866eff6d, 0x00ff0000,
+ 0xbf850007, 0xb8eef801,
+ 0x866eff6e, 0x00000800,
+ 0xbf850003, 0x866eff7b,
+ 0x00000400, 0xbf850036,
+ 0xb8faf807, 0x867aff7a,
+ 0x001f8000, 0x8e7a8b7a,
+ 0x8977ff77, 0xfc000000,
+ 0x87777a77, 0xba7ff807,
+ 0x00000000, 0xb8faf812,
0xb8fbf813, 0x8efa887a,
- 0xc0071bbd, 0x00000000,
- 0xbf8cc07f, 0xc0071ebd,
- 0x00000008, 0xbf8cc07f,
- 0x86ee6e6e, 0xbf840001,
- 0xbe801d6e, 0xb8fbf803,
- 0x867bff7b, 0x000001ff,
+ 0xc0031bbd, 0x00000010,
+ 0xbf8cc07f, 0x8e6e976e,
+ 0x8977ff77, 0x00800000,
+ 0x87776e77, 0xc0071bbd,
+ 0x00000000, 0xbf8cc07f,
+ 0xc0071ebd, 0x00000008,
+ 0xbf8cc07f, 0x86ee6e6e,
+ 0xbf840001, 0xbe801d6e,
+ 0x866eff6d, 0x01ff0000,
+ 0xbf850005, 0x8778ff78,
+ 0x00002000, 0x80ec886c,
+ 0x82ed806d, 0xbf820005,
+ 0x866eff6d, 0x01000000,
0xbf850002, 0x806c846c,
0x826d806d, 0x866dff6d,
- 0x0000ffff, 0x8f6e8b77,
- 0x866eff6e, 0x001f8000,
- 0xb96ef807, 0x86fe7e7e,
+ 0x0000ffff, 0x8f7a8b77,
+ 0x867aff7a, 0x001f8000,
+ 0xb97af807, 0x86fe7e7e,
0x86ea6a6a, 0x8f6e8378,
0xb96ee0c2, 0xbf800002,
0xb9780002, 0xbe801f6c,
0x866dff6d, 0x0000ffff,
0xbefa0080, 0xb97a0283,
- 0xb8fa2407, 0x8e7a9b7a,
- 0x876d7a6d, 0xb8fa03c7,
- 0x8e7a9a7a, 0x876d7a6d,
0xb8faf807, 0x867aff7a,
- 0x00007fff, 0xb97af807,
- 0xbeee007e, 0xbeef007f,
- 0xbefe0180, 0xbf900004,
- 0x877a8478, 0xb97af802,
- 0xbf8e0002, 0xbf88fffe,
- 0xb8fa2a05, 0x807a817a,
- 0x8e7a8a7a, 0xb8fb1605,
- 0x807b817b, 0x8e7b867b,
- 0x807a7b7a, 0x807a7e7a,
- 0x827b807f, 0x867bff7b,
- 0x0000ffff, 0xc04b1c3d,
- 0x00000050, 0xbf8cc07f,
- 0xc04b1d3d, 0x00000060,
- 0xbf8cc07f, 0xc0431e7d,
- 0x00000074, 0xbf8cc07f,
- 0xbef4007e, 0x8675ff7f,
- 0x0000ffff, 0x8775ff75,
- 0x00040000, 0xbef60080,
- 0xbef700ff, 0x00807fac,
- 0x867aff7f, 0x08000000,
- 0x8f7a837a, 0x87777a77,
- 0x867aff7f, 0x70000000,
- 0x8f7a817a, 0x87777a77,
- 0xbef1007c, 0xbef00080,
- 0xb8f02a05, 0x80708170,
- 0x8e708a70, 0xb8fa1605,
- 0x807a817a, 0x8e7a867a,
- 0x80707a70, 0xbef60084,
- 0xbef600ff, 0x01000000,
- 0xbefe007c, 0xbefc0070,
- 0xc0611c7a, 0x0000007c,
- 0xbf8cc07f, 0x80708470,
- 0xbefc007e, 0xbefe007c,
- 0xbefc0070, 0xc0611b3a,
+ 0x001f8000, 0x8e7a8b7a,
+ 0x8977ff77, 0xfc000000,
+ 0x87777a77, 0xba7ff807,
+ 0x00000000, 0xbeee007e,
+ 0xbeef007f, 0xbefe0180,
+ 0xbf900004, 0x877a8478,
+ 0xb97af802, 0xbf8e0002,
+ 0xbf88fffe, 0xb8fa2a05,
+ 0x807a817a, 0x8e7a8a7a,
+ 0xb8fb1605, 0x807b817b,
+ 0x8e7b867b, 0x807a7b7a,
+ 0x807a7e7a, 0x827b807f,
+ 0x867bff7b, 0x0000ffff,
+ 0xc04b1c3d, 0x00000050,
+ 0xbf8cc07f, 0xc04b1d3d,
+ 0x00000060, 0xbf8cc07f,
+ 0xc0431e7d, 0x00000074,
+ 0xbf8cc07f, 0xbef4007e,
+ 0x8675ff7f, 0x0000ffff,
+ 0x8775ff75, 0x00040000,
+ 0xbef60080, 0xbef700ff,
+ 0x00807fac, 0xbef1007c,
+ 0xbef00080, 0xb8f02a05,
+ 0x80708170, 0x8e708a70,
+ 0xb8fa1605, 0x807a817a,
+ 0x8e7a867a, 0x80707a70,
+ 0xbef60084, 0xbef600ff,
+ 0x01000000, 0xbefe007c,
+ 0xbefc0070, 0xc0611c7a,
0x0000007c, 0xbf8cc07f,
0x80708470, 0xbefc007e,
0xbefe007c, 0xbefc0070,
- 0xc0611b7a, 0x0000007c,
+ 0xc0611b3a, 0x0000007c,
0xbf8cc07f, 0x80708470,
0xbefc007e, 0xbefe007c,
- 0xbefc0070, 0xc0611bba,
+ 0xbefc0070, 0xc0611b7a,
0x0000007c, 0xbf8cc07f,
0x80708470, 0xbefc007e,
0xbefe007c, 0xbefc0070,
- 0xc0611bfa, 0x0000007c,
+ 0xc0611bba, 0x0000007c,
0xbf8cc07f, 0x80708470,
0xbefc007e, 0xbefe007c,
- 0xbefc0070, 0xc0611e3a,
- 0x0000007c, 0xbf8cc07f,
- 0x80708470, 0xbefc007e,
- 0xb8fbf803, 0xbefe007c,
- 0xbefc0070, 0xc0611efa,
+ 0xbefc0070, 0xc0611bfa,
0x0000007c, 0xbf8cc07f,
0x80708470, 0xbefc007e,
0xbefe007c, 0xbefc0070,
- 0xc0611a3a, 0x0000007c,
+ 0xc0611e3a, 0x0000007c,
+ 0xbf8cc07f, 0x80708470,
+ 0xbefc007e, 0xb8fbf803,
+ 0xbefe007c, 0xbefc0070,
+ 0xc0611efa, 0x0000007c,
0xbf8cc07f, 0x80708470,
0xbefc007e, 0xbefe007c,
- 0xbefc0070, 0xc0611a7a,
+ 0xbefc0070, 0xc0611a3a,
0x0000007c, 0xbf8cc07f,
0x80708470, 0xbefc007e,
- 0xb8f1f801, 0xbefe007c,
- 0xbefc0070, 0xc0611c7a,
- 0x0000007c, 0xbf8cc07f,
- 0x80708470, 0xbefc007e,
- 0x867aff7f, 0x04000000,
- 0xbeef0080, 0x876f6f7a,
- 0xb8f02a05, 0x80708170,
- 0x8e708a70, 0xb8fb1605,
- 0x807b817b, 0x8e7b847b,
- 0x8e76827b, 0xbef600ff,
- 0x01000000, 0xbef20174,
- 0x80747074, 0x82758075,
- 0xbefc0080, 0xbf800000,
- 0xbe802b00, 0xbe822b02,
- 0xbe842b04, 0xbe862b06,
- 0xbe882b08, 0xbe8a2b0a,
- 0xbe8c2b0c, 0xbe8e2b0e,
- 0xc06b003a, 0x00000000,
- 0xbf8cc07f, 0xc06b013a,
- 0x00000010, 0xbf8cc07f,
- 0xc06b023a, 0x00000020,
- 0xbf8cc07f, 0xc06b033a,
- 0x00000030, 0xbf8cc07f,
- 0x8074c074, 0x82758075,
- 0x807c907c, 0xbf0a7b7c,
- 0xbf85ffe7, 0xbef40172,
- 0xbef00080, 0xbefe00c1,
- 0xbeff00c1, 0xbee80080,
- 0xbee90080, 0xbef600ff,
- 0x01000000, 0x867aff78,
- 0x00400000, 0xbf850003,
- 0xb8faf803, 0x897a7aff,
- 0x10000000, 0xbf85004d,
- 0xbe840080, 0xd2890000,
- 0x00000900, 0x80048104,
- 0xd2890001, 0x00000900,
- 0x80048104, 0xd2890002,
- 0x00000900, 0x80048104,
- 0xd2890003, 0x00000900,
- 0x80048104, 0xc069003a,
- 0x00000070, 0xbf8cc07f,
- 0x80709070, 0xbf06c004,
- 0xbf84ffee, 0xbe840080,
- 0xd2890000, 0x00000901,
+ 0xbefe007c, 0xbefc0070,
+ 0xc0611a7a, 0x0000007c,
+ 0xbf8cc07f, 0x80708470,
+ 0xbefc007e, 0xb8f1f801,
+ 0xbefe007c, 0xbefc0070,
+ 0xc0611c7a, 0x0000007c,
+ 0xbf8cc07f, 0x80708470,
+ 0xbefc007e, 0x867aff7f,
+ 0x04000000, 0xbeef0080,
+ 0x876f6f7a, 0xb8f02a05,
+ 0x80708170, 0x8e708a70,
+ 0xb8fb1605, 0x807b817b,
+ 0x8e7b847b, 0x8e76827b,
+ 0xbef600ff, 0x01000000,
+ 0xbef20174, 0x80747074,
+ 0x82758075, 0xbefc0080,
+ 0xbf800000, 0xbe802b00,
+ 0xbe822b02, 0xbe842b04,
+ 0xbe862b06, 0xbe882b08,
+ 0xbe8a2b0a, 0xbe8c2b0c,
+ 0xbe8e2b0e, 0xc06b003a,
+ 0x00000000, 0xbf8cc07f,
+ 0xc06b013a, 0x00000010,
+ 0xbf8cc07f, 0xc06b023a,
+ 0x00000020, 0xbf8cc07f,
+ 0xc06b033a, 0x00000030,
+ 0xbf8cc07f, 0x8074c074,
+ 0x82758075, 0x807c907c,
+ 0xbf0a7b7c, 0xbf85ffe7,
+ 0xbef40172, 0xbef00080,
+ 0xbefe00c1, 0xbeff00c1,
+ 0xbee80080, 0xbee90080,
+ 0xbef600ff, 0x01000000,
+ 0x867aff78, 0x00400000,
+ 0xbf850003, 0xb8faf803,
+ 0x897a7aff, 0x10000000,
+ 0xbf85004d, 0xbe840080,
+ 0xd2890000, 0x00000900,
0x80048104, 0xd2890001,
- 0x00000901, 0x80048104,
- 0xd2890002, 0x00000901,
+ 0x00000900, 0x80048104,
+ 0xd2890002, 0x00000900,
0x80048104, 0xd2890003,
- 0x00000901, 0x80048104,
+ 0x00000900, 0x80048104,
0xc069003a, 0x00000070,
0xbf8cc07f, 0x80709070,
0xbf06c004, 0xbf84ffee,
0xbe840080, 0xd2890000,
- 0x00000902, 0x80048104,
- 0xd2890001, 0x00000902,
+ 0x00000901, 0x80048104,
+ 0xd2890001, 0x00000901,
0x80048104, 0xd2890002,
- 0x00000902, 0x80048104,
- 0xd2890003, 0x00000902,
+ 0x00000901, 0x80048104,
+ 0xd2890003, 0x00000901,
0x80048104, 0xc069003a,
0x00000070, 0xbf8cc07f,
0x80709070, 0xbf06c004,
0xbf84ffee, 0xbe840080,
- 0xd2890000, 0x00000903,
+ 0xd2890000, 0x00000902,
0x80048104, 0xd2890001,
- 0x00000903, 0x80048104,
- 0xd2890002, 0x00000903,
+ 0x00000902, 0x80048104,
+ 0xd2890002, 0x00000902,
0x80048104, 0xd2890003,
- 0x00000903, 0x80048104,
+ 0x00000902, 0x80048104,
0xc069003a, 0x00000070,
0xbf8cc07f, 0x80709070,
0xbf06c004, 0xbf84ffee,
- 0xbf820008, 0xe0724000,
- 0x701d0000, 0xe0724100,
- 0x701d0100, 0xe0724200,
- 0x701d0200, 0xe0724300,
- 0x701d0300, 0xbefe00c1,
- 0xbeff00c1, 0xb8fb4306,
- 0x867bc17b, 0xbf840063,
- 0xbf8a0000, 0x867aff6f,
- 0x04000000, 0xbf84005f,
- 0x8e7b867b, 0x8e7b827b,
- 0xbef6007b, 0xb8f02a05,
- 0x80708170, 0x8e708a70,
- 0xb8fa1605, 0x807a817a,
- 0x8e7a867a, 0x80707a70,
- 0x8070ff70, 0x00000080,
- 0xbef600ff, 0x01000000,
- 0xbefc0080, 0xd28c0002,
- 0x000100c1, 0xd28d0003,
- 0x000204c1, 0x867aff78,
- 0x00400000, 0xbf850003,
- 0xb8faf803, 0x897a7aff,
- 0x10000000, 0xbf850030,
- 0x24040682, 0xd86e4000,
- 0x00000002, 0xbf8cc07f,
0xbe840080, 0xd2890000,
- 0x00000900, 0x80048104,
- 0xd2890001, 0x00000900,
+ 0x00000903, 0x80048104,
+ 0xd2890001, 0x00000903,
0x80048104, 0xd2890002,
- 0x00000900, 0x80048104,
- 0xd2890003, 0x00000900,
+ 0x00000903, 0x80048104,
+ 0xd2890003, 0x00000903,
0x80048104, 0xc069003a,
0x00000070, 0xbf8cc07f,
0x80709070, 0xbf06c004,
- 0xbf84ffee, 0xbe840080,
- 0xd2890000, 0x00000901,
+ 0xbf84ffee, 0xbf820008,
+ 0xe0724000, 0x701d0000,
+ 0xe0724100, 0x701d0100,
+ 0xe0724200, 0x701d0200,
+ 0xe0724300, 0x701d0300,
+ 0xbefe00c1, 0xbeff00c1,
+ 0xb8fb4306, 0x867bc17b,
+ 0xbf840063, 0xbf8a0000,
+ 0x867aff6f, 0x04000000,
+ 0xbf84005f, 0x8e7b867b,
+ 0x8e7b827b, 0xbef6007b,
+ 0xb8f02a05, 0x80708170,
+ 0x8e708a70, 0xb8fa1605,
+ 0x807a817a, 0x8e7a867a,
+ 0x80707a70, 0x8070ff70,
+ 0x00000080, 0xbef600ff,
+ 0x01000000, 0xbefc0080,
+ 0xd28c0002, 0x000100c1,
+ 0xd28d0003, 0x000204c1,
+ 0x867aff78, 0x00400000,
+ 0xbf850003, 0xb8faf803,
+ 0x897a7aff, 0x10000000,
+ 0xbf850030, 0x24040682,
+ 0xd86e4000, 0x00000002,
+ 0xbf8cc07f, 0xbe840080,
+ 0xd2890000, 0x00000900,
0x80048104, 0xd2890001,
- 0x00000901, 0x80048104,
- 0xd2890002, 0x00000901,
+ 0x00000900, 0x80048104,
+ 0xd2890002, 0x00000900,
0x80048104, 0xd2890003,
- 0x00000901, 0x80048104,
+ 0x00000900, 0x80048104,
0xc069003a, 0x00000070,
0xbf8cc07f, 0x80709070,
0xbf06c004, 0xbf84ffee,
- 0x680404ff, 0x00000200,
+ 0xbe840080, 0xd2890000,
+ 0x00000901, 0x80048104,
+ 0xd2890001, 0x00000901,
+ 0x80048104, 0xd2890002,
+ 0x00000901, 0x80048104,
+ 0xd2890003, 0x00000901,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0x680404ff,
+ 0x00000200, 0xd0c9006a,
+ 0x0000f702, 0xbf87ffd2,
+ 0xbf820015, 0xd1060002,
+ 0x00011103, 0x7e0602ff,
+ 0x00000200, 0xbefc00ff,
+ 0x00010000, 0xbe800077,
+ 0x8677ff77, 0xff7fffff,
+ 0x8777ff77, 0x00058000,
+ 0xd8ec0000, 0x00000002,
+ 0xbf8cc07f, 0xe0765000,
+ 0x701d0002, 0x68040702,
0xd0c9006a, 0x0000f702,
- 0xbf87ffd2, 0xbf820015,
- 0xd1060002, 0x00011103,
- 0x7e0602ff, 0x00000200,
- 0xbefc00ff, 0x00010000,
- 0xbe800077, 0x8677ff77,
- 0xff7fffff, 0x8777ff77,
- 0x00058000, 0xd8ec0000,
- 0x00000002, 0xbf8cc07f,
- 0xe0765000, 0x701d0002,
- 0x68040702, 0xd0c9006a,
- 0x0000f702, 0xbf87fff7,
- 0xbef70000, 0xbef000ff,
- 0x00000400, 0xbefe00c1,
- 0xbeff00c1, 0xb8fb2a05,
- 0x807b817b, 0x8e7b827b,
- 0x8e76887b, 0xbef600ff,
+ 0xbf87fff7, 0xbef70000,
+ 0xbef000ff, 0x00000400,
+ 0xbefe00c1, 0xbeff00c1,
+ 0xb8fb2a05, 0x807b817b,
+ 0x8e7b827b, 0xbef600ff,
0x01000000, 0xbefc0084,
0xbf0a7b7c, 0xbf84006d,
0xbf11017c, 0x807bff7b,
@@ -566,15 +572,11 @@ static const uint32_t cwsr_trap_gfx9_hex[] = {
0x701d0300, 0x807c847c,
0x8070ff70, 0x00000400,
0xbf0a7b7c, 0xbf85ffef,
- 0xbf9c0000, 0xbf8200da,
+ 0xbf9c0000, 0xbf8200c7,
0xbef4007e, 0x8675ff7f,
0x0000ffff, 0x8775ff75,
0x00040000, 0xbef60080,
0xbef700ff, 0x00807fac,
- 0x866eff7f, 0x08000000,
- 0x8f6e836e, 0x87776e77,
- 0x866eff7f, 0x70000000,
- 0x8f6e816e, 0x87776e77,
0x866eff7f, 0x04000000,
0xbf84001e, 0xbefe00c1,
0xbeff00c1, 0xb8ef4306,
@@ -591,28 +593,28 @@ static const uint32_t cwsr_trap_gfx9_hex[] = {
0x781d0000, 0x807cff7c,
0x00000200, 0x8078ff78,
0x00000200, 0xbf0a6f7c,
- 0xbf85fff6, 0xbef80080,
- 0xbefe00c1, 0xbeff00c1,
- 0xb8ef2a05, 0x806f816f,
- 0x8e6f826f, 0x8e76886f,
- 0xbef600ff, 0x01000000,
- 0xbeee0078, 0x8078ff78,
- 0x00000400, 0xbefc0084,
- 0xbf11087c, 0x806fff6f,
- 0x00008000, 0xe0524000,
- 0x781d0000, 0xe0524100,
- 0x781d0100, 0xe0524200,
- 0x781d0200, 0xe0524300,
- 0x781d0300, 0xbf8c0f70,
- 0x7e000300, 0x7e020301,
- 0x7e040302, 0x7e060303,
- 0x807c847c, 0x8078ff78,
- 0x00000400, 0xbf0a6f7c,
- 0xbf85ffee, 0xbf9c0000,
- 0xe0524000, 0x6e1d0000,
- 0xe0524100, 0x6e1d0100,
- 0xe0524200, 0x6e1d0200,
- 0xe0524300, 0x6e1d0300,
+ 0xbf85fff6, 0xbefe00c1,
+ 0xbeff00c1, 0xbef600ff,
+ 0x01000000, 0xb8ef2a05,
+ 0x806f816f, 0x8e6f826f,
+ 0x806fff6f, 0x00008000,
+ 0xbef80080, 0xbeee0078,
+ 0x8078ff78, 0x00000400,
+ 0xbefc0084, 0xbf11087c,
+ 0xe0524000, 0x781d0000,
+ 0xe0524100, 0x781d0100,
+ 0xe0524200, 0x781d0200,
+ 0xe0524300, 0x781d0300,
+ 0xbf8c0f70, 0x7e000300,
+ 0x7e020301, 0x7e040302,
+ 0x7e060303, 0x807c847c,
+ 0x8078ff78, 0x00000400,
+ 0xbf0a6f7c, 0xbf85ffee,
+ 0xbf9c0000, 0xe0524000,
+ 0x6e1d0000, 0xe0524100,
+ 0x6e1d0100, 0xe0524200,
+ 0x6e1d0200, 0xe0524300,
+ 0x6e1d0300, 0xbf8c0f70,
0xb8f82a05, 0x80788178,
0x8e788a78, 0xb8ee1605,
0x806e816e, 0x8e6e866e,
@@ -663,90 +665,101 @@ static const uint32_t cwsr_trap_gfx9_hex[] = {
0xc00b1c37, 0x00000050,
0xc00b1d37, 0x00000060,
0xc0031e77, 0x00000074,
- 0xbf8cc07f, 0x866fff6d,
- 0xf8000000, 0x8f6f9b6f,
- 0x8e6f906f, 0xbeee0080,
- 0x876e6f6e, 0x866fff6d,
- 0x04000000, 0x8f6f9a6f,
- 0x8e6f8f6f, 0x876e6f6e,
- 0x866fff7a, 0x00800000,
- 0x8f6f976f, 0xb96ef807,
- 0x866dff6d, 0x0000ffff,
- 0x86fe7e7e, 0x86ea6a6a,
- 0x8f6e837a, 0xb96ee0c2,
- 0xbf800002, 0xb97a0002,
- 0xbf8a0000, 0x95806f6c,
- 0xbf810000, 0x00000000,
+ 0xbf8cc07f, 0x8f6e8b77,
+ 0x866eff6e, 0x001f8000,
+ 0xb96ef807, 0x866dff6d,
+ 0x0000ffff, 0x86fe7e7e,
+ 0x86ea6a6a, 0x8f6e837a,
+ 0xb96ee0c2, 0xbf800002,
+ 0xb97a0002, 0xbf8a0000,
+ 0xbe801f6c, 0xbf810000,
};
static const uint32_t cwsr_trap_nv1x_hex[] = {
- 0xbf820001, 0xbf8201cd,
+ 0xbf820001, 0xbf8201f1,
0xb0804004, 0xb978f802,
- 0x8a788678, 0xb96ef801,
- 0x876eff6e, 0x00000800,
- 0xbf840003, 0x876eff78,
+ 0x8a78ff78, 0x00020006,
+ 0xb97bf803, 0x876eff78,
0x00002000, 0xbf840009,
- 0xb97bf803, 0x876eff7b,
- 0x00000400, 0xbf850033,
- 0x876eff7b, 0x00000100,
- 0xbf840002, 0x8878ff78,
- 0x00002000, 0x8a77ff77,
- 0xff000000, 0xb96ef807,
- 0x876fff6e, 0x02000000,
- 0x8f6f866f, 0x88776f77,
- 0x876fff6e, 0x003f8000,
- 0x8f6f896f, 0x88776f77,
- 0x8a6eff6e, 0x023f8000,
- 0xb9eef807, 0xb97af812,
+ 0x876eff6d, 0x00ff0000,
+ 0xbf85001e, 0x876eff7b,
+ 0x00000400, 0xbf850057,
+ 0xbf8e0010, 0xb97bf803,
+ 0xbf82fffa, 0x876eff7b,
+ 0x00000900, 0xbf850015,
+ 0x876eff7b, 0x000071ff,
+ 0xbf840008, 0x876fff7b,
+ 0x00007080, 0xbf840001,
+ 0xbeee1d87, 0xb96ff801,
+ 0x8f6e8c6e, 0x876e6f6e,
+ 0xbf85000a, 0x876eff6d,
+ 0x00ff0000, 0xbf850007,
+ 0xb96ef801, 0x876eff6e,
+ 0x00000800, 0xbf850003,
+ 0x876eff7b, 0x00000400,
+ 0xbf85003c, 0x8a77ff77,
+ 0xff000000, 0xb97af807,
+ 0x877bff7a, 0x02000000,
+ 0x8f7b867b, 0x88777b77,
+ 0x877bff7a, 0x003f8000,
+ 0x8f7b897b, 0x88777b77,
+ 0x8a7aff7a, 0x023f8000,
+ 0xb9faf807, 0xb97af812,
0xb97bf813, 0x8ffa887a,
- 0xf4051bbd, 0xfa000000,
- 0xbf8cc07f, 0xf4051ebd,
- 0xfa000008, 0xbf8cc07f,
- 0x87ee6e6e, 0xbf840001,
- 0xbe80206e, 0xb97bf803,
- 0x877bff7b, 0x000001ff,
+ 0xf4011bbd, 0xfa000010,
+ 0xbf8cc07f, 0x8f6e976e,
+ 0x8a77ff77, 0x00800000,
+ 0x88776e77, 0xf4051bbd,
+ 0xfa000000, 0xbf8cc07f,
+ 0xf4051ebd, 0xfa000008,
+ 0xbf8cc07f, 0x87ee6e6e,
+ 0xbf840001, 0xbe80206e,
+ 0x876eff6d, 0x01ff0000,
+ 0xbf850005, 0x8878ff78,
+ 0x00002000, 0x80ec886c,
+ 0x82ed806d, 0xbf820005,
+ 0x876eff6d, 0x01000000,
0xbf850002, 0x806c846c,
0x826d806d, 0x876dff6d,
- 0x0000ffff, 0x906e8977,
- 0x876fff6e, 0x003f8000,
- 0x906e8677, 0x876eff6e,
- 0x02000000, 0x886e6f6e,
- 0xb9eef807, 0x87fe7e7e,
+ 0x0000ffff, 0x907a8977,
+ 0x877bff7a, 0x003f8000,
+ 0x907a8677, 0x877aff7a,
+ 0x02000000, 0x887a7b7a,
+ 0xb9faf807, 0x87fe7e7e,
0x87ea6a6a, 0xb9f8f802,
0xbe80226c, 0x876dff6d,
0x0000ffff, 0xbefa0380,
- 0xb9fa0283, 0xb97a2c07,
- 0x8f7a9a7a, 0x886d7a6d,
- 0xb97a03c7, 0x8f7a997a,
- 0x886d7a6d, 0xb97a0647,
- 0x8f7a987a, 0x886d7a6d,
- 0xb97af807, 0x877aff7a,
- 0x00007fff, 0xb9faf807,
- 0xbeee037e, 0xbeef037f,
- 0xbefe0480, 0xbf900004,
- 0xbf8e0002, 0xbf88fffe,
- 0xb97b02dc, 0x8f7b997b,
- 0x887b7b7f, 0xb97a2a05,
+ 0xb9fa0283, 0x8a77ff77,
+ 0xff000000, 0xb97af807,
+ 0x877bff7a, 0x02000000,
+ 0x8f7b867b, 0x88777b77,
+ 0x877bff7a, 0x003f8000,
+ 0x8f7b897b, 0x88777b77,
+ 0x8a7aff7a, 0x023f8000,
+ 0xb9faf807, 0xbeee037e,
+ 0xbeef037f, 0xbefe0480,
+ 0xbf900004, 0xbf8e0002,
+ 0xbf88fffe, 0x877aff7f,
+ 0x04000000, 0x8f7a857a,
+ 0x886d7a6d, 0xb97b02dc,
+ 0x8f7b997b, 0xb97a2a05,
0x807a817a, 0xbf0d997b,
0xbf850002, 0x8f7a897a,
0xbf820001, 0x8f7a8a7a,
- 0x877bff7f, 0x0000ffff,
- 0x807aff7a, 0x00000200,
- 0x807a7e7a, 0x827b807b,
- 0xf4491c3d, 0xfa000050,
- 0xf4491d3d, 0xfa000060,
- 0xf4411e7d, 0xfa000074,
- 0xbef4037e, 0x8775ff7f,
- 0x0000ffff, 0x8875ff75,
- 0x00040000, 0xbef60380,
- 0xbef703ff, 0x10807fac,
- 0x877aff7f, 0x08000000,
- 0x907a837a, 0x88777a77,
- 0x877aff7f, 0x70000000,
- 0x907a817a, 0x88777a77,
- 0xbef1037c, 0xbef00380,
- 0xb97302dc, 0x8f739973,
- 0x8873737f, 0xb97bf816,
+ 0xb97b1e06, 0x8f7b8a7b,
+ 0x807a7b7a, 0x877bff7f,
+ 0x0000ffff, 0x807aff7a,
+ 0x00000200, 0x807a7e7a,
+ 0x827b807b, 0xf4491c3d,
+ 0xfa000050, 0xf4491d3d,
+ 0xfa000060, 0xf4411e7d,
+ 0xfa000074, 0xbef4037e,
+ 0x8775ff7f, 0x0000ffff,
+ 0x8875ff75, 0x00040000,
+ 0xbef60380, 0xbef703ff,
+ 0x10807fac, 0xbef1037c,
+ 0xbef00380, 0xb97302dc,
+ 0x8f739973, 0xb97bf816,
0xba80f816, 0x00000000,
0xbefe03c1, 0x907c9973,
0x877c817c, 0xbf06817c,
@@ -763,7 +776,7 @@ static const uint32_t cwsr_trap_nv1x_hex[] = {
0xe0704100, 0x705d0100,
0xe0704200, 0x705d0200,
0xe0704300, 0x705d0300,
- 0xb9702a05, 0x80708170,
+ 0xb9703a05, 0x80708170,
0xbf0d9973, 0xbf850002,
0x8f708970, 0xbf820001,
0x8f708a70, 0xb97a1e06,
@@ -776,8 +789,9 @@ static const uint32_t cwsr_trap_nv1x_hex[] = {
0xbefe037c, 0xbefc0370,
0xf4611b3a, 0xf8000000,
0x80708470, 0xbefc037e,
+ 0x8a7aff6d, 0x80000000,
0xbefe037c, 0xbefc0370,
- 0xf4611b7a, 0xf8000000,
+ 0xf4611eba, 0xf8000000,
0x80708470, 0xbefc037e,
0xbefe037c, 0xbefc0370,
0xf4611bba, 0xf8000000,
@@ -838,10 +852,10 @@ static const uint32_t cwsr_trap_nv1x_hex[] = {
0xbf820001, 0xbeff03c1,
0xb97b4306, 0x877bc17b,
0xbf840044, 0xbf8a0000,
- 0x877aff73, 0x04000000,
+ 0x877aff6d, 0x80000000,
0xbf840040, 0x8f7b867b,
0x8f7b827b, 0xbef6037b,
- 0xb9702a05, 0x80708170,
+ 0xb9703a05, 0x80708170,
0xbf0d9973, 0xbf850002,
0x8f708970, 0xbf820001,
0x8f708a70, 0xb97a1e06,
@@ -877,7 +891,7 @@ static const uint32_t cwsr_trap_nv1x_hex[] = {
0xbef003ff, 0x00000200,
0xbeff0380, 0xbf820003,
0xbef003ff, 0x00000400,
- 0xbeff03c1, 0xb97b2a05,
+ 0xbeff03c1, 0xb97b3a05,
0x807b817b, 0x8f7b827b,
0x907c9973, 0x877c817c,
0xbf06817c, 0xbf850017,
@@ -894,7 +908,7 @@ static const uint32_t cwsr_trap_nv1x_hex[] = {
0xbf0a7b7c, 0xbf85ffef,
0xbf820025, 0xbef603ff,
0x01000000, 0xbefc0384,
- 0xbf0a7b7c, 0xbf840020,
+ 0xbf0a7b7c, 0xbf840011,
0x7e008700, 0x7e028701,
0x7e048702, 0x7e068703,
0xe0704000, 0x705d0000,
@@ -911,71 +925,69 @@ static const uint32_t cwsr_trap_nv1x_hex[] = {
0x705d0000, 0x807c817c,
0x8070ff70, 0x00000080,
0xbf0a7b7c, 0xbf85fff8,
- 0xbf820151, 0xbef4037e,
+ 0xbf820144, 0xbef4037e,
0x8775ff7f, 0x0000ffff,
0x8875ff75, 0x00040000,
0xbef60380, 0xbef703ff,
- 0x10807fac, 0x876eff7f,
- 0x08000000, 0x906e836e,
- 0x88776e77, 0x876eff7f,
- 0x70000000, 0x906e816e,
- 0x88776e77, 0xb97202dc,
- 0x8f729972, 0x8872727f,
- 0x876eff7f, 0x04000000,
- 0xbf840034, 0xbefe03c1,
- 0x907c9972, 0x877c817c,
- 0xbf06817c, 0xbf850002,
- 0xbeff0380, 0xbf820001,
- 0xbeff03c1, 0xb96f4306,
- 0x876fc16f, 0xbf840029,
- 0x8f6f866f, 0x8f6f826f,
- 0xbef6036f, 0xb9782a05,
- 0x80788178, 0xbf0d9972,
- 0xbf850002, 0x8f788978,
- 0xbf820001, 0x8f788a78,
- 0xb96e1e06, 0x8f6e8a6e,
- 0x80786e78, 0x8078ff78,
- 0x00000200, 0x8078ff78,
- 0x00000080, 0xbef603ff,
- 0x01000000, 0x907c9972,
- 0x877c817c, 0xbf06817c,
- 0xbefc0380, 0xbf850009,
- 0xe0310000, 0x781d0000,
- 0x807cff7c, 0x00000080,
- 0x8078ff78, 0x00000080,
- 0xbf0a6f7c, 0xbf85fff8,
- 0xbf820008, 0xe0310000,
- 0x781d0000, 0x807cff7c,
- 0x00000100, 0x8078ff78,
- 0x00000100, 0xbf0a6f7c,
- 0xbf85fff8, 0xbef80380,
+ 0x10807fac, 0xb97202dc,
+ 0x8f729972, 0x876eff7f,
+ 0x04000000, 0xbf840034,
0xbefe03c1, 0x907c9972,
0x877c817c, 0xbf06817c,
0xbf850002, 0xbeff0380,
0xbf820001, 0xbeff03c1,
- 0xb96f2a05, 0x806f816f,
- 0x8f6f826f, 0x907c9972,
- 0x877c817c, 0xbf06817c,
- 0xbf850021, 0xbef603ff,
- 0x01000000, 0xbeee0378,
+ 0xb96f4306, 0x876fc16f,
+ 0xbf840029, 0x8f6f866f,
+ 0x8f6f826f, 0xbef6036f,
+ 0xb9783a05, 0x80788178,
+ 0xbf0d9972, 0xbf850002,
+ 0x8f788978, 0xbf820001,
+ 0x8f788a78, 0xb96e1e06,
+ 0x8f6e8a6e, 0x80786e78,
0x8078ff78, 0x00000200,
- 0xbefc0384, 0xe0304000,
- 0x785d0000, 0xe0304080,
- 0x785d0100, 0xe0304100,
- 0x785d0200, 0xe0304180,
- 0x785d0300, 0xbf8c3f70,
- 0x7e008500, 0x7e028501,
- 0x7e048502, 0x7e068503,
- 0x807c847c, 0x8078ff78,
- 0x00000200, 0xbf0a6f7c,
- 0xbf85ffee, 0xe0304000,
- 0x6e5d0000, 0xe0304080,
- 0x6e5d0100, 0xe0304100,
- 0x6e5d0200, 0xe0304180,
- 0x6e5d0300, 0xbf820032,
+ 0x8078ff78, 0x00000080,
+ 0xbef603ff, 0x01000000,
+ 0x907c9972, 0x877c817c,
+ 0xbf06817c, 0xbefc0380,
+ 0xbf850009, 0xe0310000,
+ 0x781d0000, 0x807cff7c,
+ 0x00000080, 0x8078ff78,
+ 0x00000080, 0xbf0a6f7c,
+ 0xbf85fff8, 0xbf820008,
+ 0xe0310000, 0x781d0000,
+ 0x807cff7c, 0x00000100,
+ 0x8078ff78, 0x00000100,
+ 0xbf0a6f7c, 0xbf85fff8,
+ 0xbef80380, 0xbefe03c1,
+ 0x907c9972, 0x877c817c,
+ 0xbf06817c, 0xbf850002,
+ 0xbeff0380, 0xbf820001,
+ 0xbeff03c1, 0xb96f3a05,
+ 0x806f816f, 0x8f6f826f,
+ 0x907c9972, 0x877c817c,
+ 0xbf06817c, 0xbf850024,
+ 0xbef603ff, 0x01000000,
+ 0xbeee0378, 0x8078ff78,
+ 0x00000200, 0xbefc0384,
+ 0xbf0a6f7c, 0xbf840050,
+ 0xe0304000, 0x785d0000,
+ 0xe0304080, 0x785d0100,
+ 0xe0304100, 0x785d0200,
+ 0xe0304180, 0x785d0300,
+ 0xbf8c3f70, 0x7e008500,
+ 0x7e028501, 0x7e048502,
+ 0x7e068503, 0x807c847c,
+ 0x8078ff78, 0x00000200,
+ 0xbf0a6f7c, 0xbf85ffee,
+ 0xe0304000, 0x6e5d0000,
+ 0xe0304080, 0x6e5d0100,
+ 0xe0304100, 0x6e5d0200,
+ 0xe0304180, 0x6e5d0300,
+ 0xbf8c3f70, 0xbf820034,
0xbef603ff, 0x01000000,
0xbeee0378, 0x8078ff78,
0x00000400, 0xbefc0384,
+ 0xbf0a6f7c, 0xbf840012,
0xe0304000, 0x785d0000,
0xe0304100, 0x785d0100,
0xe0304200, 0x785d0200,
@@ -998,7 +1010,7 @@ static const uint32_t cwsr_trap_nv1x_hex[] = {
0x6e5d0100, 0xe0304200,
0x6e5d0200, 0xe0304300,
0x6e5d0300, 0xbf8c3f70,
- 0xb9782a05, 0x80788178,
+ 0xb9783a05, 0x80788178,
0xbf0d9972, 0xbf850002,
0x8f788978, 0xbf820001,
0x8f788a78, 0xb96e1e06,
@@ -1025,7 +1037,7 @@ static const uint32_t cwsr_trap_nv1x_hex[] = {
0xbe8c310c, 0xbe8e310e,
0xbf06807c, 0xbf84fff0,
0xba80f801, 0x00000000,
- 0xbf8a0000, 0xb9782a05,
+ 0xbf8a0000, 0xb9783a05,
0x80788178, 0xbf0d9972,
0xbf850002, 0x8f788978,
0xbf820001, 0x8f788a78,
@@ -1060,270 +1072,272 @@ static const uint32_t cwsr_trap_nv1x_hex[] = {
0xb96e2a05, 0x806e816e,
0xbf0d9972, 0xbf850002,
0x8f6e896e, 0xbf820001,
- 0x8f6e8a6e, 0x806eff6e,
- 0x00000200, 0x806e746e,
- 0x826f8075, 0x876fff6f,
- 0x0000ffff, 0xf4091c37,
- 0xfa000050, 0xf4091d37,
- 0xfa000060, 0xf4011e77,
- 0xfa000074, 0xbf8cc07f,
- 0x876fff6d, 0xfc000000,
- 0x906f9a6f, 0x8f6f906f,
- 0xbeee0380, 0x886e6f6e,
- 0x876fff6d, 0x02000000,
- 0x906f996f, 0x8f6f8f6f,
- 0x886e6f6e, 0x876fff6d,
- 0x01000000, 0x906f986f,
- 0x8f6f996f, 0x886e6f6e,
- 0x876fff7a, 0x00800000,
- 0x906f976f, 0xb9eef807,
- 0x876dff6d, 0x0000ffff,
- 0x87fe7e7e, 0x87ea6a6a,
- 0xb9faf802, 0xbe80226c,
- 0xbf810000, 0xbf9f0000,
+ 0x8f6e8a6e, 0xb96f1e06,
+ 0x8f6f8a6f, 0x806e6f6e,
+ 0x806eff6e, 0x00000200,
+ 0x806e746e, 0x826f8075,
+ 0x876fff6f, 0x0000ffff,
+ 0xf4091c37, 0xfa000050,
+ 0xf4091d37, 0xfa000060,
+ 0xf4011e77, 0xfa000074,
+ 0xbf8cc07f, 0x906e8977,
+ 0x876fff6e, 0x003f8000,
+ 0x906e8677, 0x876eff6e,
+ 0x02000000, 0x886e6f6e,
+ 0xb9eef807, 0x876dff6d,
+ 0x0000ffff, 0x87fe7e7e,
+ 0x87ea6a6a, 0xb9faf802,
+ 0xbe80226c, 0xbf810000,
0xbf9f0000, 0xbf9f0000,
0xbf9f0000, 0xbf9f0000,
+ 0xbf9f0000, 0x00000000,
};
static const uint32_t cwsr_trap_arcturus_hex[] = {
- 0xbf820001, 0xbf8202c4,
- 0xb8f8f802, 0x89788678,
- 0xb8eef801, 0x866eff6e,
- 0x00000800, 0xbf840003,
+ 0xbf820001, 0xbf8202d0,
+ 0xb8f8f802, 0x8978ff78,
+ 0x00020006, 0xb8fbf803,
0x866eff78, 0x00002000,
- 0xbf840016, 0xb8fbf803,
+ 0xbf840009, 0x866eff6d,
+ 0x00ff0000, 0xbf85001e,
0x866eff7b, 0x00000400,
- 0xbf85003b, 0x866eff7b,
- 0x00000800, 0xbf850003,
- 0x866eff7b, 0x00000100,
- 0xbf84000c, 0x866eff78,
- 0x00002000, 0xbf840005,
- 0xbf8e0010, 0xb8eef803,
- 0x866eff6e, 0x00000400,
- 0xbf84fffb, 0x8778ff78,
- 0x00002000, 0x80ec886c,
- 0x82ed806d, 0xb8eef807,
- 0x866fff6e, 0x001f8000,
- 0x8e6f8b6f, 0x8977ff77,
- 0xfc000000, 0x87776f77,
- 0x896eff6e, 0x001f8000,
- 0xb96ef807, 0xb8faf812,
+ 0xbf850051, 0xbf8e0010,
+ 0xb8fbf803, 0xbf82fffa,
+ 0x866eff7b, 0x00000900,
+ 0xbf850015, 0x866eff7b,
+ 0x000071ff, 0xbf840008,
+ 0x866fff7b, 0x00007080,
+ 0xbf840001, 0xbeee1a87,
+ 0xb8eff801, 0x8e6e8c6e,
+ 0x866e6f6e, 0xbf85000a,
+ 0x866eff6d, 0x00ff0000,
+ 0xbf850007, 0xb8eef801,
+ 0x866eff6e, 0x00000800,
+ 0xbf850003, 0x866eff7b,
+ 0x00000400, 0xbf850036,
+ 0xb8faf807, 0x867aff7a,
+ 0x001f8000, 0x8e7a8b7a,
+ 0x8977ff77, 0xfc000000,
+ 0x87777a77, 0xba7ff807,
+ 0x00000000, 0xb8faf812,
0xb8fbf813, 0x8efa887a,
- 0xc0071bbd, 0x00000000,
- 0xbf8cc07f, 0xc0071ebd,
- 0x00000008, 0xbf8cc07f,
- 0x86ee6e6e, 0xbf840001,
- 0xbe801d6e, 0xb8fbf803,
- 0x867bff7b, 0x000001ff,
+ 0xc0031bbd, 0x00000010,
+ 0xbf8cc07f, 0x8e6e976e,
+ 0x8977ff77, 0x00800000,
+ 0x87776e77, 0xc0071bbd,
+ 0x00000000, 0xbf8cc07f,
+ 0xc0071ebd, 0x00000008,
+ 0xbf8cc07f, 0x86ee6e6e,
+ 0xbf840001, 0xbe801d6e,
+ 0x866eff6d, 0x01ff0000,
+ 0xbf850005, 0x8778ff78,
+ 0x00002000, 0x80ec886c,
+ 0x82ed806d, 0xbf820005,
+ 0x866eff6d, 0x01000000,
0xbf850002, 0x806c846c,
0x826d806d, 0x866dff6d,
- 0x0000ffff, 0x8f6e8b77,
- 0x866eff6e, 0x001f8000,
- 0xb96ef807, 0x86fe7e7e,
+ 0x0000ffff, 0x8f7a8b77,
+ 0x867aff7a, 0x001f8000,
+ 0xb97af807, 0x86fe7e7e,
0x86ea6a6a, 0x8f6e8378,
0xb96ee0c2, 0xbf800002,
0xb9780002, 0xbe801f6c,
0x866dff6d, 0x0000ffff,
0xbefa0080, 0xb97a0283,
- 0xb8fa2407, 0x8e7a9b7a,
- 0x876d7a6d, 0xb8fa03c7,
- 0x8e7a9a7a, 0x876d7a6d,
0xb8faf807, 0x867aff7a,
- 0x00007fff, 0xb97af807,
- 0xbeee007e, 0xbeef007f,
- 0xbefe0180, 0xbf900004,
- 0x877a8478, 0xb97af802,
- 0xbf8e0002, 0xbf88fffe,
- 0xb8fa2a05, 0x807a817a,
- 0x8e7a8a7a, 0x8e7a817a,
- 0xb8fb1605, 0x807b817b,
- 0x8e7b867b, 0x807a7b7a,
- 0x807a7e7a, 0x827b807f,
- 0x867bff7b, 0x0000ffff,
- 0xc04b1c3d, 0x00000050,
- 0xbf8cc07f, 0xc04b1d3d,
- 0x00000060, 0xbf8cc07f,
- 0xc0431e7d, 0x00000074,
- 0xbf8cc07f, 0xbef4007e,
- 0x8675ff7f, 0x0000ffff,
- 0x8775ff75, 0x00040000,
- 0xbef60080, 0xbef700ff,
- 0x00807fac, 0x867aff7f,
- 0x08000000, 0x8f7a837a,
- 0x87777a77, 0x867aff7f,
- 0x70000000, 0x8f7a817a,
- 0x87777a77, 0xbef1007c,
- 0xbef00080, 0xb8f02a05,
- 0x80708170, 0x8e708a70,
- 0x8e708170, 0xb8fa1605,
- 0x807a817a, 0x8e7a867a,
- 0x80707a70, 0xbef60084,
- 0xbef600ff, 0x01000000,
- 0xbefe007c, 0xbefc0070,
- 0xc0611c7a, 0x0000007c,
- 0xbf8cc07f, 0x80708470,
- 0xbefc007e, 0xbefe007c,
- 0xbefc0070, 0xc0611b3a,
+ 0x001f8000, 0x8e7a8b7a,
+ 0x8977ff77, 0xfc000000,
+ 0x87777a77, 0xba7ff807,
+ 0x00000000, 0xbeee007e,
+ 0xbeef007f, 0xbefe0180,
+ 0xbf900004, 0x877a8478,
+ 0xb97af802, 0xbf8e0002,
+ 0xbf88fffe, 0xb8fa2a05,
+ 0x807a817a, 0x8e7a8a7a,
+ 0x8e7a817a, 0xb8fb1605,
+ 0x807b817b, 0x8e7b867b,
+ 0x807a7b7a, 0x807a7e7a,
+ 0x827b807f, 0x867bff7b,
+ 0x0000ffff, 0xc04b1c3d,
+ 0x00000050, 0xbf8cc07f,
+ 0xc04b1d3d, 0x00000060,
+ 0xbf8cc07f, 0xc0431e7d,
+ 0x00000074, 0xbf8cc07f,
+ 0xbef4007e, 0x8675ff7f,
+ 0x0000ffff, 0x8775ff75,
+ 0x00040000, 0xbef60080,
+ 0xbef700ff, 0x00807fac,
+ 0xbef1007c, 0xbef00080,
+ 0xb8f02a05, 0x80708170,
+ 0x8e708a70, 0x8e708170,
+ 0xb8fa1605, 0x807a817a,
+ 0x8e7a867a, 0x80707a70,
+ 0xbef60084, 0xbef600ff,
+ 0x01000000, 0xbefe007c,
+ 0xbefc0070, 0xc0611c7a,
0x0000007c, 0xbf8cc07f,
0x80708470, 0xbefc007e,
0xbefe007c, 0xbefc0070,
- 0xc0611b7a, 0x0000007c,
+ 0xc0611b3a, 0x0000007c,
0xbf8cc07f, 0x80708470,
0xbefc007e, 0xbefe007c,
- 0xbefc0070, 0xc0611bba,
+ 0xbefc0070, 0xc0611b7a,
0x0000007c, 0xbf8cc07f,
0x80708470, 0xbefc007e,
0xbefe007c, 0xbefc0070,
- 0xc0611bfa, 0x0000007c,
+ 0xc0611bba, 0x0000007c,
0xbf8cc07f, 0x80708470,
0xbefc007e, 0xbefe007c,
- 0xbefc0070, 0xc0611e3a,
- 0x0000007c, 0xbf8cc07f,
- 0x80708470, 0xbefc007e,
- 0xb8fbf803, 0xbefe007c,
- 0xbefc0070, 0xc0611efa,
+ 0xbefc0070, 0xc0611bfa,
0x0000007c, 0xbf8cc07f,
0x80708470, 0xbefc007e,
0xbefe007c, 0xbefc0070,
- 0xc0611a3a, 0x0000007c,
+ 0xc0611e3a, 0x0000007c,
+ 0xbf8cc07f, 0x80708470,
+ 0xbefc007e, 0xb8fbf803,
+ 0xbefe007c, 0xbefc0070,
+ 0xc0611efa, 0x0000007c,
0xbf8cc07f, 0x80708470,
0xbefc007e, 0xbefe007c,
- 0xbefc0070, 0xc0611a7a,
- 0x0000007c, 0xbf8cc07f,
- 0x80708470, 0xbefc007e,
- 0xb8f1f801, 0xbefe007c,
- 0xbefc0070, 0xc0611c7a,
+ 0xbefc0070, 0xc0611a3a,
0x0000007c, 0xbf8cc07f,
0x80708470, 0xbefc007e,
- 0x867aff7f, 0x04000000,
- 0xbeef0080, 0x876f6f7a,
- 0xb8f02a05, 0x80708170,
- 0x8e708a70, 0x8e708170,
- 0xb8fb1605, 0x807b817b,
- 0x8e7b847b, 0x8e76827b,
- 0xbef600ff, 0x01000000,
- 0xbef20174, 0x80747074,
- 0x82758075, 0xbefc0080,
- 0xbf800000, 0xbe802b00,
- 0xbe822b02, 0xbe842b04,
- 0xbe862b06, 0xbe882b08,
- 0xbe8a2b0a, 0xbe8c2b0c,
- 0xbe8e2b0e, 0xc06b003a,
- 0x00000000, 0xbf8cc07f,
- 0xc06b013a, 0x00000010,
- 0xbf8cc07f, 0xc06b023a,
- 0x00000020, 0xbf8cc07f,
- 0xc06b033a, 0x00000030,
- 0xbf8cc07f, 0x8074c074,
- 0x82758075, 0x807c907c,
- 0xbf0a7b7c, 0xbf85ffe7,
- 0xbef40172, 0xbef00080,
- 0xbefe00c1, 0xbeff00c1,
- 0xbee80080, 0xbee90080,
- 0xbef600ff, 0x01000000,
- 0x867aff78, 0x00400000,
- 0xbf850003, 0xb8faf803,
- 0x897a7aff, 0x10000000,
- 0xbf85004d, 0xbe840080,
- 0xd2890000, 0x00000900,
- 0x80048104, 0xd2890001,
- 0x00000900, 0x80048104,
- 0xd2890002, 0x00000900,
- 0x80048104, 0xd2890003,
- 0x00000900, 0x80048104,
- 0xc069003a, 0x00000070,
- 0xbf8cc07f, 0x80709070,
- 0xbf06c004, 0xbf84ffee,
+ 0xbefe007c, 0xbefc0070,
+ 0xc0611a7a, 0x0000007c,
+ 0xbf8cc07f, 0x80708470,
+ 0xbefc007e, 0xb8f1f801,
+ 0xbefe007c, 0xbefc0070,
+ 0xc0611c7a, 0x0000007c,
+ 0xbf8cc07f, 0x80708470,
+ 0xbefc007e, 0x867aff7f,
+ 0x04000000, 0xbeef0080,
+ 0x876f6f7a, 0xb8f02a05,
+ 0x80708170, 0x8e708a70,
+ 0x8e708170, 0xb8fb1605,
+ 0x807b817b, 0x8e7b847b,
+ 0x8e76827b, 0xbef600ff,
+ 0x01000000, 0xbef20174,
+ 0x80747074, 0x82758075,
+ 0xbefc0080, 0xbf800000,
+ 0xbe802b00, 0xbe822b02,
+ 0xbe842b04, 0xbe862b06,
+ 0xbe882b08, 0xbe8a2b0a,
+ 0xbe8c2b0c, 0xbe8e2b0e,
+ 0xc06b003a, 0x00000000,
+ 0xbf8cc07f, 0xc06b013a,
+ 0x00000010, 0xbf8cc07f,
+ 0xc06b023a, 0x00000020,
+ 0xbf8cc07f, 0xc06b033a,
+ 0x00000030, 0xbf8cc07f,
+ 0x8074c074, 0x82758075,
+ 0x807c907c, 0xbf0a7b7c,
+ 0xbf85ffe7, 0xbef40172,
+ 0xbef00080, 0xbefe00c1,
+ 0xbeff00c1, 0xbee80080,
+ 0xbee90080, 0xbef600ff,
+ 0x01000000, 0x867aff78,
+ 0x00400000, 0xbf850003,
+ 0xb8faf803, 0x897a7aff,
+ 0x10000000, 0xbf85004d,
0xbe840080, 0xd2890000,
- 0x00000901, 0x80048104,
- 0xd2890001, 0x00000901,
+ 0x00000900, 0x80048104,
+ 0xd2890001, 0x00000900,
0x80048104, 0xd2890002,
- 0x00000901, 0x80048104,
- 0xd2890003, 0x00000901,
+ 0x00000900, 0x80048104,
+ 0xd2890003, 0x00000900,
0x80048104, 0xc069003a,
0x00000070, 0xbf8cc07f,
0x80709070, 0xbf06c004,
0xbf84ffee, 0xbe840080,
- 0xd2890000, 0x00000902,
+ 0xd2890000, 0x00000901,
0x80048104, 0xd2890001,
- 0x00000902, 0x80048104,
- 0xd2890002, 0x00000902,
+ 0x00000901, 0x80048104,
+ 0xd2890002, 0x00000901,
0x80048104, 0xd2890003,
- 0x00000902, 0x80048104,
+ 0x00000901, 0x80048104,
0xc069003a, 0x00000070,
0xbf8cc07f, 0x80709070,
0xbf06c004, 0xbf84ffee,
0xbe840080, 0xd2890000,
- 0x00000903, 0x80048104,
- 0xd2890001, 0x00000903,
- 0x80048104, 0xd2890002,
- 0x00000903, 0x80048104,
- 0xd2890003, 0x00000903,
- 0x80048104, 0xc069003a,
- 0x00000070, 0xbf8cc07f,
- 0x80709070, 0xbf06c004,
- 0xbf84ffee, 0xbf820008,
- 0xe0724000, 0x701d0000,
- 0xe0724100, 0x701d0100,
- 0xe0724200, 0x701d0200,
- 0xe0724300, 0x701d0300,
- 0xbefe00c1, 0xbeff00c1,
- 0xb8fb4306, 0x867bc17b,
- 0xbf840064, 0xbf8a0000,
- 0x867aff6f, 0x04000000,
- 0xbf840060, 0x8e7b867b,
- 0x8e7b827b, 0xbef6007b,
- 0xb8f02a05, 0x80708170,
- 0x8e708a70, 0x8e708170,
- 0xb8fa1605, 0x807a817a,
- 0x8e7a867a, 0x80707a70,
- 0x8070ff70, 0x00000080,
- 0xbef600ff, 0x01000000,
- 0xbefc0080, 0xd28c0002,
- 0x000100c1, 0xd28d0003,
- 0x000204c1, 0x867aff78,
- 0x00400000, 0xbf850003,
- 0xb8faf803, 0x897a7aff,
- 0x10000000, 0xbf850030,
- 0x24040682, 0xd86e4000,
- 0x00000002, 0xbf8cc07f,
- 0xbe840080, 0xd2890000,
- 0x00000900, 0x80048104,
- 0xd2890001, 0x00000900,
+ 0x00000902, 0x80048104,
+ 0xd2890001, 0x00000902,
0x80048104, 0xd2890002,
- 0x00000900, 0x80048104,
- 0xd2890003, 0x00000900,
+ 0x00000902, 0x80048104,
+ 0xd2890003, 0x00000902,
0x80048104, 0xc069003a,
0x00000070, 0xbf8cc07f,
0x80709070, 0xbf06c004,
0xbf84ffee, 0xbe840080,
- 0xd2890000, 0x00000901,
+ 0xd2890000, 0x00000903,
0x80048104, 0xd2890001,
- 0x00000901, 0x80048104,
- 0xd2890002, 0x00000901,
+ 0x00000903, 0x80048104,
+ 0xd2890002, 0x00000903,
0x80048104, 0xd2890003,
- 0x00000901, 0x80048104,
+ 0x00000903, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0xbf820008, 0xe0724000,
+ 0x701d0000, 0xe0724100,
+ 0x701d0100, 0xe0724200,
+ 0x701d0200, 0xe0724300,
+ 0x701d0300, 0xbefe00c1,
+ 0xbeff00c1, 0xb8fb4306,
+ 0x867bc17b, 0xbf840064,
+ 0xbf8a0000, 0x867aff6f,
+ 0x04000000, 0xbf840060,
+ 0x8e7b867b, 0x8e7b827b,
+ 0xbef6007b, 0xb8f02a05,
+ 0x80708170, 0x8e708a70,
+ 0x8e708170, 0xb8fa1605,
+ 0x807a817a, 0x8e7a867a,
+ 0x80707a70, 0x8070ff70,
+ 0x00000080, 0xbef600ff,
+ 0x01000000, 0xbefc0080,
+ 0xd28c0002, 0x000100c1,
+ 0xd28d0003, 0x000204c1,
+ 0x867aff78, 0x00400000,
+ 0xbf850003, 0xb8faf803,
+ 0x897a7aff, 0x10000000,
+ 0xbf850030, 0x24040682,
+ 0xd86e4000, 0x00000002,
+ 0xbf8cc07f, 0xbe840080,
+ 0xd2890000, 0x00000900,
+ 0x80048104, 0xd2890001,
+ 0x00000900, 0x80048104,
+ 0xd2890002, 0x00000900,
+ 0x80048104, 0xd2890003,
+ 0x00000900, 0x80048104,
0xc069003a, 0x00000070,
0xbf8cc07f, 0x80709070,
0xbf06c004, 0xbf84ffee,
- 0x680404ff, 0x00000200,
+ 0xbe840080, 0xd2890000,
+ 0x00000901, 0x80048104,
+ 0xd2890001, 0x00000901,
+ 0x80048104, 0xd2890002,
+ 0x00000901, 0x80048104,
+ 0xd2890003, 0x00000901,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0x680404ff,
+ 0x00000200, 0xd0c9006a,
+ 0x0000f702, 0xbf87ffd2,
+ 0xbf820015, 0xd1060002,
+ 0x00011103, 0x7e0602ff,
+ 0x00000200, 0xbefc00ff,
+ 0x00010000, 0xbe800077,
+ 0x8677ff77, 0xff7fffff,
+ 0x8777ff77, 0x00058000,
+ 0xd8ec0000, 0x00000002,
+ 0xbf8cc07f, 0xe0765000,
+ 0x701d0002, 0x68040702,
0xd0c9006a, 0x0000f702,
- 0xbf87ffd2, 0xbf820015,
- 0xd1060002, 0x00011103,
- 0x7e0602ff, 0x00000200,
- 0xbefc00ff, 0x00010000,
- 0xbe800077, 0x8677ff77,
- 0xff7fffff, 0x8777ff77,
- 0x00058000, 0xd8ec0000,
- 0x00000002, 0xbf8cc07f,
- 0xe0765000, 0x701d0002,
- 0x68040702, 0xd0c9006a,
- 0x0000f702, 0xbf87fff7,
- 0xbef70000, 0xbef000ff,
- 0x00000400, 0xbefe00c1,
- 0xbeff00c1, 0xb8fb2a05,
- 0x807b817b, 0x8e7b827b,
- 0x8e76887b, 0xbef600ff,
+ 0xbf87fff7, 0xbef70000,
+ 0xbef000ff, 0x00000400,
+ 0xbefe00c1, 0xbeff00c1,
+ 0xb8fb2a05, 0x807b817b,
+ 0x8e7b827b, 0xbef600ff,
0x01000000, 0xbefc0084,
0xbf0a7b7c, 0xbf84006d,
0xbf11017c, 0x807bff7b,
@@ -1440,15 +1454,11 @@ static const uint32_t cwsr_trap_arcturus_hex[] = {
0x701d0300, 0x807c847c,
0x8070ff70, 0x00000400,
0xbf0a7b7c, 0xbf85ffeb,
- 0xbf9c0000, 0xbf820106,
+ 0xbf9c0000, 0xbf8200e3,
0xbef4007e, 0x8675ff7f,
0x0000ffff, 0x8775ff75,
0x00040000, 0xbef60080,
0xbef700ff, 0x00807fac,
- 0x866eff7f, 0x08000000,
- 0x8f6e836e, 0x87776e77,
- 0x866eff7f, 0x70000000,
- 0x8f6e816e, 0x87776e77,
0x866eff7f, 0x04000000,
0xbf84001f, 0xbefe00c1,
0xbeff00c1, 0xb8ef4306,
@@ -1466,26 +1476,14 @@ static const uint32_t cwsr_trap_arcturus_hex[] = {
0x807cff7c, 0x00000200,
0x8078ff78, 0x00000200,
0xbf0a6f7c, 0xbf85fff6,
- 0xbef80080, 0xbefe00c1,
- 0xbeff00c1, 0xb8ef2a05,
- 0x806f816f, 0x8e6f826f,
- 0x8e76886f, 0xbef90076,
+ 0xbefe00c1, 0xbeff00c1,
0xbef600ff, 0x01000000,
+ 0xb8ef2a05, 0x806f816f,
+ 0x8e6f826f, 0x806fff6f,
+ 0x00008000, 0xbef80080,
0xbeee0078, 0x8078ff78,
- 0x00000400, 0xbef30079,
- 0x8079ff79, 0x00000400,
- 0xbefc0084, 0xbf11087c,
- 0x806fff6f, 0x00008000,
- 0xe0524000, 0x791d0000,
- 0xe0524100, 0x791d0100,
- 0xe0524200, 0x791d0200,
- 0xe0524300, 0x791d0300,
- 0x8079ff79, 0x00000400,
- 0xbf8c0f70, 0xd3d94000,
- 0x18000100, 0xd3d94001,
- 0x18000101, 0xd3d94002,
- 0x18000102, 0xd3d94003,
- 0x18000103, 0xe0524000,
+ 0x00000400, 0xbefc0084,
+ 0xbf11087c, 0xe0524000,
0x781d0000, 0xe0524100,
0x781d0100, 0xe0524200,
0x781d0200, 0xe0524300,
@@ -1494,20 +1492,24 @@ static const uint32_t cwsr_trap_arcturus_hex[] = {
0x7e040302, 0x7e060303,
0x807c847c, 0x8078ff78,
0x00000400, 0xbf0a6f7c,
- 0xbf85ffdb, 0xbf9c0000,
- 0xe0524000, 0x731d0000,
- 0xe0524100, 0x731d0100,
- 0xe0524200, 0x731d0200,
- 0xe0524300, 0x731d0300,
- 0xbf8c0f70, 0xd3d94000,
- 0x18000100, 0xd3d94001,
- 0x18000101, 0xd3d94002,
- 0x18000102, 0xd3d94003,
- 0x18000103, 0xe0524000,
- 0x6e1d0000, 0xe0524100,
- 0x6e1d0100, 0xe0524200,
- 0x6e1d0200, 0xe0524300,
- 0x6e1d0300, 0xb8f82a05,
+ 0xbf85ffee, 0xbefc0080,
+ 0xbf11087c, 0xe0524000,
+ 0x781d0000, 0xe0524100,
+ 0x781d0100, 0xe0524200,
+ 0x781d0200, 0xe0524300,
+ 0x781d0300, 0xbf8c0f70,
+ 0xd3d94000, 0x18000100,
+ 0xd3d94001, 0x18000101,
+ 0xd3d94002, 0x18000102,
+ 0xd3d94003, 0x18000103,
+ 0x807c847c, 0x8078ff78,
+ 0x00000400, 0xbf0a6f7c,
+ 0xbf85ffea, 0xbf9c0000,
+ 0xe0524000, 0x6e1d0000,
+ 0xe0524100, 0x6e1d0100,
+ 0xe0524200, 0x6e1d0200,
+ 0xe0524300, 0x6e1d0300,
+ 0xbf8c0f70, 0xb8f82a05,
0x80788178, 0x8e788a78,
0x8e788178, 0xb8ee1605,
0x806e816e, 0x8e6e866e,
@@ -1559,224 +1561,268 @@ static const uint32_t cwsr_trap_arcturus_hex[] = {
0xc00b1c37, 0x00000050,
0xc00b1d37, 0x00000060,
0xc0031e77, 0x00000074,
- 0xbf8cc07f, 0x866fff6d,
- 0xf8000000, 0x8f6f9b6f,
- 0x8e6f906f, 0xbeee0080,
- 0x876e6f6e, 0x866fff6d,
- 0x04000000, 0x8f6f9a6f,
- 0x8e6f8f6f, 0x876e6f6e,
- 0x866fff7a, 0x00800000,
- 0x8f6f976f, 0xb96ef807,
- 0x866dff6d, 0x0000ffff,
- 0x86fe7e7e, 0x86ea6a6a,
- 0x8f6e837a, 0xb96ee0c2,
- 0xbf800002, 0xb97a0002,
- 0xbf8a0000, 0x95806f6c,
- 0xbf810000, 0x00000000,
+ 0xbf8cc07f, 0x8f6e8b77,
+ 0x866eff6e, 0x001f8000,
+ 0xb96ef807, 0x866dff6d,
+ 0x0000ffff, 0x86fe7e7e,
+ 0x86ea6a6a, 0x8f6e837a,
+ 0xb96ee0c2, 0xbf800002,
+ 0xb97a0002, 0xbf8a0000,
+ 0xbe801f6c, 0xbf810000,
};
static const uint32_t cwsr_trap_aldebaran_hex[] = {
- 0xbf820001, 0xbf8202ce,
- 0xb8f8f802, 0x89788678,
- 0xb8eef801, 0x866eff6e,
- 0x00000800, 0xbf840003,
+ 0xbf820001, 0xbf8202db,
+ 0xb8f8f802, 0x8978ff78,
+ 0x00020006, 0xb8fbf803,
0x866eff78, 0x00002000,
- 0xbf840016, 0xb8fbf803,
+ 0xbf840009, 0x866eff6d,
+ 0x00ff0000, 0xbf85001e,
0x866eff7b, 0x00000400,
- 0xbf85003b, 0x866eff7b,
- 0x00000800, 0xbf850003,
- 0x866eff7b, 0x00000100,
- 0xbf84000c, 0x866eff78,
- 0x00002000, 0xbf840005,
- 0xbf8e0010, 0xb8eef803,
- 0x866eff6e, 0x00000400,
- 0xbf84fffb, 0x8778ff78,
- 0x00002000, 0x80ec886c,
- 0x82ed806d, 0xb8eef807,
- 0x866fff6e, 0x001f8000,
- 0x8e6f8b6f, 0x8977ff77,
- 0xfc000000, 0x87776f77,
- 0x896eff6e, 0x001f8000,
- 0xb96ef807, 0xb8faf812,
+ 0xbf850051, 0xbf8e0010,
+ 0xb8fbf803, 0xbf82fffa,
+ 0x866eff7b, 0x00000900,
+ 0xbf850015, 0x866eff7b,
+ 0x000071ff, 0xbf840008,
+ 0x866fff7b, 0x00007080,
+ 0xbf840001, 0xbeee1a87,
+ 0xb8eff801, 0x8e6e8c6e,
+ 0x866e6f6e, 0xbf85000a,
+ 0x866eff6d, 0x00ff0000,
+ 0xbf850007, 0xb8eef801,
+ 0x866eff6e, 0x00000800,
+ 0xbf850003, 0x866eff7b,
+ 0x00000400, 0xbf850036,
+ 0xb8faf807, 0x867aff7a,
+ 0x001f8000, 0x8e7a8b7a,
+ 0x8977ff77, 0xfc000000,
+ 0x87777a77, 0xba7ff807,
+ 0x00000000, 0xb8faf812,
0xb8fbf813, 0x8efa887a,
- 0xc0071bbd, 0x00000000,
- 0xbf8cc07f, 0xc0071ebd,
- 0x00000008, 0xbf8cc07f,
- 0x86ee6e6e, 0xbf840001,
- 0xbe801d6e, 0xb8fbf803,
- 0x867bff7b, 0x000001ff,
+ 0xc0031bbd, 0x00000010,
+ 0xbf8cc07f, 0x8e6e976e,
+ 0x8977ff77, 0x00800000,
+ 0x87776e77, 0xc0071bbd,
+ 0x00000000, 0xbf8cc07f,
+ 0xc0071ebd, 0x00000008,
+ 0xbf8cc07f, 0x86ee6e6e,
+ 0xbf840001, 0xbe801d6e,
+ 0x866eff6d, 0x01ff0000,
+ 0xbf850005, 0x8778ff78,
+ 0x00002000, 0x80ec886c,
+ 0x82ed806d, 0xbf820005,
+ 0x866eff6d, 0x01000000,
0xbf850002, 0x806c846c,
0x826d806d, 0x866dff6d,
- 0x0000ffff, 0x8f6e8b77,
- 0x866eff6e, 0x001f8000,
- 0xb96ef807, 0x86fe7e7e,
+ 0x0000ffff, 0x8f7a8b77,
+ 0x867aff7a, 0x001f8000,
+ 0xb97af807, 0x86fe7e7e,
0x86ea6a6a, 0x8f6e8378,
0xb96ee0c2, 0xbf800002,
0xb9780002, 0xbe801f6c,
0x866dff6d, 0x0000ffff,
0xbefa0080, 0xb97a0283,
- 0xb8fa2407, 0x8e7a9b7a,
- 0x876d7a6d, 0xb8fa03c7,
- 0x8e7a9a7a, 0x876d7a6d,
0xb8faf807, 0x867aff7a,
- 0x00007fff, 0xb97af807,
- 0xbeee007e, 0xbeef007f,
- 0xbefe0180, 0xbf900004,
- 0x877a8478, 0xb97af802,
- 0xbf8e0002, 0xbf88fffe,
- 0xb8fa2985, 0x807a817a,
- 0x8e7a8a7a, 0x8e7a817a,
- 0xb8fb1605, 0x807b817b,
- 0x8e7b867b, 0x807a7b7a,
- 0x807a7e7a, 0x827b807f,
- 0x867bff7b, 0x0000ffff,
- 0xc04b1c3d, 0x00000050,
- 0xbf8cc07f, 0xc04b1d3d,
- 0x00000060, 0xbf8cc07f,
- 0xc0431e7d, 0x00000074,
- 0xbf8cc07f, 0xbef4007e,
- 0x8675ff7f, 0x0000ffff,
- 0x8775ff75, 0x00040000,
- 0xbef60080, 0xbef700ff,
- 0x00807fac, 0x867aff7f,
- 0x08000000, 0x8f7a837a,
- 0x87777a77, 0x867aff7f,
- 0x70000000, 0x8f7a817a,
- 0x87777a77, 0xbef1007c,
- 0xbef00080, 0xb8f02985,
- 0x80708170, 0x8e708a70,
- 0x8e708170, 0xb8fa1605,
- 0x807a817a, 0x8e7a867a,
- 0x80707a70, 0xbef60084,
- 0xbef600ff, 0x01000000,
- 0xbefe007c, 0xbefc0070,
- 0xc0611c7a, 0x0000007c,
- 0xbf8cc07f, 0x80708470,
- 0xbefc007e, 0xbefe007c,
- 0xbefc0070, 0xc0611b3a,
+ 0x001f8000, 0x8e7a8b7a,
+ 0x8977ff77, 0xfc000000,
+ 0x87777a77, 0xba7ff807,
+ 0x00000000, 0xbeee007e,
+ 0xbeef007f, 0xbefe0180,
+ 0xbf900004, 0x877a8478,
+ 0xb97af802, 0xbf8e0002,
+ 0xbf88fffe, 0xb8fa2985,
+ 0x807a817a, 0x8e7a8a7a,
+ 0x8e7a817a, 0xb8fb1605,
+ 0x807b817b, 0x8e7b867b,
+ 0x807a7b7a, 0x807a7e7a,
+ 0x827b807f, 0x867bff7b,
+ 0x0000ffff, 0xc04b1c3d,
+ 0x00000050, 0xbf8cc07f,
+ 0xc04b1d3d, 0x00000060,
+ 0xbf8cc07f, 0xc0431e7d,
+ 0x00000074, 0xbf8cc07f,
+ 0xbef4007e, 0x8675ff7f,
+ 0x0000ffff, 0x8775ff75,
+ 0x00040000, 0xbef60080,
+ 0xbef700ff, 0x00807fac,
+ 0xbef1007c, 0xbef00080,
+ 0xb8f02985, 0x80708170,
+ 0x8e708a70, 0x8e708170,
+ 0xb8fa1605, 0x807a817a,
+ 0x8e7a867a, 0x80707a70,
+ 0xbef60084, 0xbef600ff,
+ 0x01000000, 0xbefe007c,
+ 0xbefc0070, 0xc0611c7a,
0x0000007c, 0xbf8cc07f,
0x80708470, 0xbefc007e,
0xbefe007c, 0xbefc0070,
- 0xc0611b7a, 0x0000007c,
+ 0xc0611b3a, 0x0000007c,
0xbf8cc07f, 0x80708470,
0xbefc007e, 0xbefe007c,
- 0xbefc0070, 0xc0611bba,
+ 0xbefc0070, 0xc0611b7a,
0x0000007c, 0xbf8cc07f,
0x80708470, 0xbefc007e,
0xbefe007c, 0xbefc0070,
- 0xc0611bfa, 0x0000007c,
+ 0xc0611bba, 0x0000007c,
0xbf8cc07f, 0x80708470,
0xbefc007e, 0xbefe007c,
- 0xbefc0070, 0xc0611e3a,
- 0x0000007c, 0xbf8cc07f,
- 0x80708470, 0xbefc007e,
- 0xb8fbf803, 0xbefe007c,
- 0xbefc0070, 0xc0611efa,
+ 0xbefc0070, 0xc0611bfa,
0x0000007c, 0xbf8cc07f,
0x80708470, 0xbefc007e,
0xbefe007c, 0xbefc0070,
- 0xc0611a3a, 0x0000007c,
+ 0xc0611e3a, 0x0000007c,
+ 0xbf8cc07f, 0x80708470,
+ 0xbefc007e, 0xb8fbf803,
+ 0xbefe007c, 0xbefc0070,
+ 0xc0611efa, 0x0000007c,
0xbf8cc07f, 0x80708470,
0xbefc007e, 0xbefe007c,
- 0xbefc0070, 0xc0611a7a,
- 0x0000007c, 0xbf8cc07f,
- 0x80708470, 0xbefc007e,
- 0xb8f1f801, 0xbefe007c,
- 0xbefc0070, 0xc0611c7a,
+ 0xbefc0070, 0xc0611a3a,
0x0000007c, 0xbf8cc07f,
0x80708470, 0xbefc007e,
- 0x867aff7f, 0x04000000,
- 0xbeef0080, 0x876f6f7a,
- 0xb8f02985, 0x80708170,
- 0x8e708a70, 0x8e708170,
- 0xb8fb1605, 0x807b817b,
- 0x8e7b847b, 0x8e76827b,
- 0xbef600ff, 0x01000000,
- 0xbef20174, 0x80747074,
- 0x82758075, 0xbefc0080,
- 0xbf800000, 0xbe802b00,
- 0xbe822b02, 0xbe842b04,
- 0xbe862b06, 0xbe882b08,
- 0xbe8a2b0a, 0xbe8c2b0c,
- 0xbe8e2b0e, 0xc06b003a,
- 0x00000000, 0xbf8cc07f,
- 0xc06b013a, 0x00000010,
- 0xbf8cc07f, 0xc06b023a,
- 0x00000020, 0xbf8cc07f,
- 0xc06b033a, 0x00000030,
- 0xbf8cc07f, 0x8074c074,
- 0x82758075, 0x807c907c,
- 0xbf0a7b7c, 0xbf85ffe7,
- 0xbef40172, 0xbef00080,
- 0xbefe00c1, 0xbeff00c1,
- 0xbee80080, 0xbee90080,
- 0xbef600ff, 0x01000000,
- 0x867aff78, 0x00400000,
- 0xbf850003, 0xb8faf803,
- 0x897a7aff, 0x10000000,
- 0xbf85004d, 0xbe840080,
- 0xd2890000, 0x00000900,
- 0x80048104, 0xd2890001,
+ 0xbefe007c, 0xbefc0070,
+ 0xc0611a7a, 0x0000007c,
+ 0xbf8cc07f, 0x80708470,
+ 0xbefc007e, 0xb8f1f801,
+ 0xbefe007c, 0xbefc0070,
+ 0xc0611c7a, 0x0000007c,
+ 0xbf8cc07f, 0x80708470,
+ 0xbefc007e, 0x867aff7f,
+ 0x04000000, 0xbeef0080,
+ 0x876f6f7a, 0xb8f02985,
+ 0x80708170, 0x8e708a70,
+ 0x8e708170, 0xb8fb1605,
+ 0x807b817b, 0x8e7b847b,
+ 0x8e76827b, 0xbef600ff,
+ 0x01000000, 0xbef20174,
+ 0x80747074, 0x82758075,
+ 0xbefc0080, 0xbf800000,
+ 0xbe802b00, 0xbe822b02,
+ 0xbe842b04, 0xbe862b06,
+ 0xbe882b08, 0xbe8a2b0a,
+ 0xbe8c2b0c, 0xbe8e2b0e,
+ 0xc06b003a, 0x00000000,
+ 0xbf8cc07f, 0xc06b013a,
+ 0x00000010, 0xbf8cc07f,
+ 0xc06b023a, 0x00000020,
+ 0xbf8cc07f, 0xc06b033a,
+ 0x00000030, 0xbf8cc07f,
+ 0x8074c074, 0x82758075,
+ 0x807c907c, 0xbf0a7b7c,
+ 0xbf85ffe7, 0xbef40172,
+ 0xbef00080, 0xbefe00c1,
+ 0xbeff00c1, 0xbee80080,
+ 0xbee90080, 0xbef600ff,
+ 0x01000000, 0x867aff78,
+ 0x00400000, 0xbf850003,
+ 0xb8faf803, 0x897a7aff,
+ 0x10000000, 0xbf85004d,
+ 0xbe840080, 0xd2890000,
0x00000900, 0x80048104,
- 0xd2890002, 0x00000900,
- 0x80048104, 0xd2890003,
+ 0xd2890001, 0x00000900,
+ 0x80048104, 0xd2890002,
0x00000900, 0x80048104,
+ 0xd2890003, 0x00000900,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000901,
+ 0x80048104, 0xd2890001,
+ 0x00000901, 0x80048104,
+ 0xd2890002, 0x00000901,
+ 0x80048104, 0xd2890003,
+ 0x00000901, 0x80048104,
0xc069003a, 0x00000070,
0xbf8cc07f, 0x80709070,
0xbf06c004, 0xbf84ffee,
0xbe840080, 0xd2890000,
- 0x00000901, 0x80048104,
- 0xd2890001, 0x00000901,
+ 0x00000902, 0x80048104,
+ 0xd2890001, 0x00000902,
0x80048104, 0xd2890002,
- 0x00000901, 0x80048104,
- 0xd2890003, 0x00000901,
+ 0x00000902, 0x80048104,
+ 0xd2890003, 0x00000902,
0x80048104, 0xc069003a,
0x00000070, 0xbf8cc07f,
0x80709070, 0xbf06c004,
0xbf84ffee, 0xbe840080,
- 0xd2890000, 0x00000902,
+ 0xd2890000, 0x00000903,
0x80048104, 0xd2890001,
- 0x00000902, 0x80048104,
- 0xd2890002, 0x00000902,
+ 0x00000903, 0x80048104,
+ 0xd2890002, 0x00000903,
0x80048104, 0xd2890003,
- 0x00000902, 0x80048104,
+ 0x00000903, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0xbf820008, 0xe0724000,
+ 0x701d0000, 0xe0724100,
+ 0x701d0100, 0xe0724200,
+ 0x701d0200, 0xe0724300,
+ 0x701d0300, 0xbefe00c1,
+ 0xbeff00c1, 0xb8fb4306,
+ 0x867bc17b, 0xbf840064,
+ 0xbf8a0000, 0x867aff6f,
+ 0x04000000, 0xbf840060,
+ 0x8e7b867b, 0x8e7b827b,
+ 0xbef6007b, 0xb8f02985,
+ 0x80708170, 0x8e708a70,
+ 0x8e708170, 0xb8fa1605,
+ 0x807a817a, 0x8e7a867a,
+ 0x80707a70, 0x8070ff70,
+ 0x00000080, 0xbef600ff,
+ 0x01000000, 0xbefc0080,
+ 0xd28c0002, 0x000100c1,
+ 0xd28d0003, 0x000204c1,
+ 0x867aff78, 0x00400000,
+ 0xbf850003, 0xb8faf803,
+ 0x897a7aff, 0x10000000,
+ 0xbf850030, 0x24040682,
+ 0xd86e4000, 0x00000002,
+ 0xbf8cc07f, 0xbe840080,
+ 0xd2890000, 0x00000900,
+ 0x80048104, 0xd2890001,
+ 0x00000900, 0x80048104,
+ 0xd2890002, 0x00000900,
+ 0x80048104, 0xd2890003,
+ 0x00000900, 0x80048104,
0xc069003a, 0x00000070,
0xbf8cc07f, 0x80709070,
0xbf06c004, 0xbf84ffee,
0xbe840080, 0xd2890000,
- 0x00000903, 0x80048104,
- 0xd2890001, 0x00000903,
+ 0x00000901, 0x80048104,
+ 0xd2890001, 0x00000901,
0x80048104, 0xd2890002,
- 0x00000903, 0x80048104,
- 0xd2890003, 0x00000903,
+ 0x00000901, 0x80048104,
+ 0xd2890003, 0x00000901,
0x80048104, 0xc069003a,
0x00000070, 0xbf8cc07f,
0x80709070, 0xbf06c004,
- 0xbf84ffee, 0xbf820008,
- 0xe0724000, 0x701d0000,
- 0xe0724100, 0x701d0100,
- 0xe0724200, 0x701d0200,
- 0xe0724300, 0x701d0300,
+ 0xbf84ffee, 0x680404ff,
+ 0x00000200, 0xd0c9006a,
+ 0x0000f702, 0xbf87ffd2,
+ 0xbf820015, 0xd1060002,
+ 0x00011103, 0x7e0602ff,
+ 0x00000200, 0xbefc00ff,
+ 0x00010000, 0xbe800077,
+ 0x8677ff77, 0xff7fffff,
+ 0x8777ff77, 0x00058000,
+ 0xd8ec0000, 0x00000002,
+ 0xbf8cc07f, 0xe0765000,
+ 0x701d0002, 0x68040702,
+ 0xd0c9006a, 0x0000f702,
+ 0xbf87fff7, 0xbef70000,
+ 0xbef000ff, 0x00000400,
0xbefe00c1, 0xbeff00c1,
- 0xb8fb4306, 0x867bc17b,
- 0xbf840064, 0xbf8a0000,
- 0x867aff6f, 0x04000000,
- 0xbf840060, 0x8e7b867b,
- 0x8e7b827b, 0xbef6007b,
- 0xb8f02985, 0x80708170,
- 0x8e708a70, 0x8e708170,
- 0xb8fa1605, 0x807a817a,
- 0x8e7a867a, 0x80707a70,
- 0x8070ff70, 0x00000080,
- 0xbef600ff, 0x01000000,
- 0xbefc0080, 0xd28c0002,
- 0x000100c1, 0xd28d0003,
- 0x000204c1, 0x867aff78,
+ 0xb8fb2b05, 0x807b817b,
+ 0x8e7b827b, 0xbef600ff,
+ 0x01000000, 0xbefc0084,
+ 0xbf0a7b7c, 0xbf84006d,
+ 0xbf11017c, 0x807bff7b,
+ 0x00001000, 0x867aff78,
0x00400000, 0xbf850003,
0xb8faf803, 0x897a7aff,
- 0x10000000, 0xbf850030,
- 0x24040682, 0xd86e4000,
- 0x00000002, 0xbf8cc07f,
+ 0x10000000, 0xbf850051,
0xbe840080, 0xd2890000,
0x00000900, 0x80048104,
0xd2890001, 0x00000900,
@@ -1796,31 +1842,51 @@ static const uint32_t cwsr_trap_aldebaran_hex[] = {
0xc069003a, 0x00000070,
0xbf8cc07f, 0x80709070,
0xbf06c004, 0xbf84ffee,
- 0x680404ff, 0x00000200,
- 0xd0c9006a, 0x0000f702,
- 0xbf87ffd2, 0xbf820015,
- 0xd1060002, 0x00011103,
- 0x7e0602ff, 0x00000200,
- 0xbefc00ff, 0x00010000,
- 0xbe800077, 0x8677ff77,
- 0xff7fffff, 0x8777ff77,
- 0x00058000, 0xd8ec0000,
- 0x00000002, 0xbf8cc07f,
- 0xe0765000, 0x701d0002,
- 0x68040702, 0xd0c9006a,
- 0x0000f702, 0xbf87fff7,
- 0xbef70000, 0xbef000ff,
- 0x00000400, 0xbefe00c1,
- 0xbeff00c1, 0xb8fb2b05,
- 0x807b817b, 0x8e7b827b,
- 0xbef600ff, 0x01000000,
- 0xbefc0084, 0xbf0a7b7c,
- 0xbf84006d, 0xbf11017c,
+ 0xbe840080, 0xd2890000,
+ 0x00000902, 0x80048104,
+ 0xd2890001, 0x00000902,
+ 0x80048104, 0xd2890002,
+ 0x00000902, 0x80048104,
+ 0xd2890003, 0x00000902,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000903,
+ 0x80048104, 0xd2890001,
+ 0x00000903, 0x80048104,
+ 0xd2890002, 0x00000903,
+ 0x80048104, 0xd2890003,
+ 0x00000903, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0x807c847c, 0xbf0a7b7c,
+ 0xbf85ffb1, 0xbf9c0000,
+ 0xbf820012, 0x7e000300,
+ 0x7e020301, 0x7e040302,
+ 0x7e060303, 0xe0724000,
+ 0x701d0000, 0xe0724100,
+ 0x701d0100, 0xe0724200,
+ 0x701d0200, 0xe0724300,
+ 0x701d0300, 0x807c847c,
+ 0x8070ff70, 0x00000400,
+ 0xbf0a7b7c, 0xbf85ffef,
+ 0xbf9c0000, 0xb8fb2985,
+ 0x807b817b, 0x8e7b837b,
+ 0xb8fa2b05, 0x807a817a,
+ 0x8e7a827a, 0x80fb7a7b,
+ 0x867b7b7b, 0xbf84007a,
0x807bff7b, 0x00001000,
+ 0xbefc0080, 0xbf11017c,
0x867aff78, 0x00400000,
0xbf850003, 0xb8faf803,
0x897a7aff, 0x10000000,
- 0xbf850051, 0xbe840080,
+ 0xbf850059, 0xd3d84000,
+ 0x18000100, 0xd3d84001,
+ 0x18000101, 0xd3d84002,
+ 0x18000102, 0xd3d84003,
+ 0x18000103, 0xbe840080,
0xd2890000, 0x00000900,
0x80048104, 0xd2890001,
0x00000900, 0x80048104,
@@ -1859,233 +1925,178 @@ static const uint32_t cwsr_trap_aldebaran_hex[] = {
0x00000070, 0xbf8cc07f,
0x80709070, 0xbf06c004,
0xbf84ffee, 0x807c847c,
- 0xbf0a7b7c, 0xbf85ffb1,
- 0xbf9c0000, 0xbf820012,
- 0x7e000300, 0x7e020301,
- 0x7e040302, 0x7e060303,
+ 0xbf0a7b7c, 0xbf85ffa9,
+ 0xbf9c0000, 0xbf820016,
+ 0xd3d84000, 0x18000100,
+ 0xd3d84001, 0x18000101,
+ 0xd3d84002, 0x18000102,
+ 0xd3d84003, 0x18000103,
0xe0724000, 0x701d0000,
0xe0724100, 0x701d0100,
0xe0724200, 0x701d0200,
0xe0724300, 0x701d0300,
0x807c847c, 0x8070ff70,
0x00000400, 0xbf0a7b7c,
- 0xbf85ffef, 0xbf9c0000,
- 0xb8fb2985, 0x807b817b,
- 0x8e7b837b, 0xb8fa2b05,
- 0x807a817a, 0x8e7a827a,
- 0x80fb7a7b, 0x867b7b7b,
- 0xbf84007a, 0x807bff7b,
- 0x00001000, 0xbefc0080,
- 0xbf11017c, 0x867aff78,
- 0x00400000, 0xbf850003,
- 0xb8faf803, 0x897a7aff,
- 0x10000000, 0xbf850059,
- 0xd3d84000, 0x18000100,
- 0xd3d84001, 0x18000101,
- 0xd3d84002, 0x18000102,
- 0xd3d84003, 0x18000103,
- 0xbe840080, 0xd2890000,
- 0x00000900, 0x80048104,
- 0xd2890001, 0x00000900,
- 0x80048104, 0xd2890002,
- 0x00000900, 0x80048104,
- 0xd2890003, 0x00000900,
- 0x80048104, 0xc069003a,
- 0x00000070, 0xbf8cc07f,
- 0x80709070, 0xbf06c004,
- 0xbf84ffee, 0xbe840080,
- 0xd2890000, 0x00000901,
- 0x80048104, 0xd2890001,
- 0x00000901, 0x80048104,
- 0xd2890002, 0x00000901,
- 0x80048104, 0xd2890003,
- 0x00000901, 0x80048104,
- 0xc069003a, 0x00000070,
- 0xbf8cc07f, 0x80709070,
- 0xbf06c004, 0xbf84ffee,
- 0xbe840080, 0xd2890000,
- 0x00000902, 0x80048104,
- 0xd2890001, 0x00000902,
- 0x80048104, 0xd2890002,
- 0x00000902, 0x80048104,
- 0xd2890003, 0x00000902,
- 0x80048104, 0xc069003a,
- 0x00000070, 0xbf8cc07f,
- 0x80709070, 0xbf06c004,
- 0xbf84ffee, 0xbe840080,
- 0xd2890000, 0x00000903,
- 0x80048104, 0xd2890001,
- 0x00000903, 0x80048104,
- 0xd2890002, 0x00000903,
- 0x80048104, 0xd2890003,
- 0x00000903, 0x80048104,
- 0xc069003a, 0x00000070,
- 0xbf8cc07f, 0x80709070,
- 0xbf06c004, 0xbf84ffee,
- 0x807c847c, 0xbf0a7b7c,
- 0xbf85ffa9, 0xbf9c0000,
- 0xbf820016, 0xd3d84000,
- 0x18000100, 0xd3d84001,
- 0x18000101, 0xd3d84002,
- 0x18000102, 0xd3d84003,
- 0x18000103, 0xe0724000,
- 0x701d0000, 0xe0724100,
- 0x701d0100, 0xe0724200,
- 0x701d0200, 0xe0724300,
- 0x701d0300, 0x807c847c,
- 0x8070ff70, 0x00000400,
- 0xbf0a7b7c, 0xbf85ffeb,
- 0xbf9c0000, 0xbf820101,
- 0xbef4007e, 0x8675ff7f,
- 0x0000ffff, 0x8775ff75,
- 0x00040000, 0xbef60080,
- 0xbef700ff, 0x00807fac,
- 0x866eff7f, 0x08000000,
- 0x8f6e836e, 0x87776e77,
- 0x866eff7f, 0x70000000,
- 0x8f6e816e, 0x87776e77,
- 0x866eff7f, 0x04000000,
- 0xbf84001f, 0xbefe00c1,
- 0xbeff00c1, 0xb8ef4306,
- 0x866fc16f, 0xbf84001a,
- 0x8e6f866f, 0x8e6f826f,
- 0xbef6006f, 0xb8f82985,
- 0x80788178, 0x8e788a78,
- 0x8e788178, 0xb8ee1605,
- 0x806e816e, 0x8e6e866e,
- 0x80786e78, 0x8078ff78,
- 0x00000080, 0xbef600ff,
- 0x01000000, 0xbefc0080,
- 0xe0510000, 0x781d0000,
- 0xe0510100, 0x781d0000,
- 0x807cff7c, 0x00000200,
- 0x8078ff78, 0x00000200,
- 0xbf0a6f7c, 0xbf85fff6,
+ 0xbf85ffeb, 0xbf9c0000,
+ 0xbf8200ee, 0xbef4007e,
+ 0x8675ff7f, 0x0000ffff,
+ 0x8775ff75, 0x00040000,
+ 0xbef60080, 0xbef700ff,
+ 0x00807fac, 0x866eff7f,
+ 0x04000000, 0xbf84001f,
0xbefe00c1, 0xbeff00c1,
+ 0xb8ef4306, 0x866fc16f,
+ 0xbf84001a, 0x8e6f866f,
+ 0x8e6f826f, 0xbef6006f,
+ 0xb8f82985, 0x80788178,
+ 0x8e788a78, 0x8e788178,
+ 0xb8ee1605, 0x806e816e,
+ 0x8e6e866e, 0x80786e78,
+ 0x8078ff78, 0x00000080,
0xbef600ff, 0x01000000,
- 0xb8ef2b05, 0x806f816f,
- 0x8e6f826f, 0x806fff6f,
- 0x00008000, 0xbef80080,
- 0xbeee0078, 0x8078ff78,
- 0x00000400, 0xbefc0084,
+ 0xbefc0080, 0xe0510000,
+ 0x781d0000, 0xe0510100,
+ 0x781d0000, 0x807cff7c,
+ 0x00000200, 0x8078ff78,
+ 0x00000200, 0xbf0a6f7c,
+ 0xbf85fff6, 0xbefe00c1,
+ 0xbeff00c1, 0xbef600ff,
+ 0x01000000, 0xb8ef2b05,
+ 0x806f816f, 0x8e6f826f,
+ 0x806fff6f, 0x00008000,
+ 0xbef80080, 0xbeee0078,
+ 0x8078ff78, 0x00000400,
+ 0xbefc0084, 0xbf11087c,
+ 0xe0524000, 0x781d0000,
+ 0xe0524100, 0x781d0100,
+ 0xe0524200, 0x781d0200,
+ 0xe0524300, 0x781d0300,
+ 0xbf8c0f70, 0x7e000300,
+ 0x7e020301, 0x7e040302,
+ 0x7e060303, 0x807c847c,
+ 0x8078ff78, 0x00000400,
+ 0xbf0a6f7c, 0xbf85ffee,
+ 0xb8ef2985, 0x806f816f,
+ 0x8e6f836f, 0xb8f92b05,
+ 0x80798179, 0x8e798279,
+ 0x80ef796f, 0x866f6f6f,
+ 0xbf84001a, 0x806fff6f,
+ 0x00008000, 0xbefc0080,
0xbf11087c, 0xe0524000,
0x781d0000, 0xe0524100,
0x781d0100, 0xe0524200,
0x781d0200, 0xe0524300,
0x781d0300, 0xbf8c0f70,
- 0x7e000300, 0x7e020301,
- 0x7e040302, 0x7e060303,
+ 0xd3d94000, 0x18000100,
+ 0xd3d94001, 0x18000101,
+ 0xd3d94002, 0x18000102,
+ 0xd3d94003, 0x18000103,
0x807c847c, 0x8078ff78,
0x00000400, 0xbf0a6f7c,
- 0xbf85ffee, 0xb8ef2985,
- 0x806f816f, 0x8e6f836f,
- 0xb8f92b05, 0x80798179,
- 0x8e798279, 0x80ef796f,
- 0x866f6f6f, 0xbf84001a,
- 0x806fff6f, 0x00008000,
- 0xbefc0080, 0xbf11087c,
- 0xe0524000, 0x781d0000,
- 0xe0524100, 0x781d0100,
- 0xe0524200, 0x781d0200,
- 0xe0524300, 0x781d0300,
- 0xbf8c0f70, 0xd3d94000,
- 0x18000100, 0xd3d94001,
- 0x18000101, 0xd3d94002,
- 0x18000102, 0xd3d94003,
- 0x18000103, 0x807c847c,
- 0x8078ff78, 0x00000400,
- 0xbf0a6f7c, 0xbf85ffea,
- 0xbf9c0000, 0xe0524000,
- 0x6e1d0000, 0xe0524100,
- 0x6e1d0100, 0xe0524200,
- 0x6e1d0200, 0xe0524300,
- 0x6e1d0300, 0xbf8c0f70,
- 0xb8f82985, 0x80788178,
- 0x8e788a78, 0x8e788178,
- 0xb8ee1605, 0x806e816e,
- 0x8e6e866e, 0x80786e78,
- 0x80f8c078, 0xb8ef1605,
- 0x806f816f, 0x8e6f846f,
- 0x8e76826f, 0xbef600ff,
- 0x01000000, 0xbefc006f,
- 0xc031003a, 0x00000078,
- 0x80f8c078, 0xbf8cc07f,
- 0x80fc907c, 0xbf800000,
- 0xbe802d00, 0xbe822d02,
- 0xbe842d04, 0xbe862d06,
- 0xbe882d08, 0xbe8a2d0a,
- 0xbe8c2d0c, 0xbe8e2d0e,
- 0xbf06807c, 0xbf84fff0,
- 0xb8f82985, 0x80788178,
- 0x8e788a78, 0x8e788178,
- 0xb8ee1605, 0x806e816e,
- 0x8e6e866e, 0x80786e78,
- 0xbef60084, 0xbef600ff,
- 0x01000000, 0xc0211bfa,
+ 0xbf85ffea, 0xbf9c0000,
+ 0xe0524000, 0x6e1d0000,
+ 0xe0524100, 0x6e1d0100,
+ 0xe0524200, 0x6e1d0200,
+ 0xe0524300, 0x6e1d0300,
+ 0xbf8c0f70, 0xb8f82985,
+ 0x80788178, 0x8e788a78,
+ 0x8e788178, 0xb8ee1605,
+ 0x806e816e, 0x8e6e866e,
+ 0x80786e78, 0x80f8c078,
+ 0xb8ef1605, 0x806f816f,
+ 0x8e6f846f, 0x8e76826f,
+ 0xbef600ff, 0x01000000,
+ 0xbefc006f, 0xc031003a,
+ 0x00000078, 0x80f8c078,
+ 0xbf8cc07f, 0x80fc907c,
+ 0xbf800000, 0xbe802d00,
+ 0xbe822d02, 0xbe842d04,
+ 0xbe862d06, 0xbe882d08,
+ 0xbe8a2d0a, 0xbe8c2d0c,
+ 0xbe8e2d0e, 0xbf06807c,
+ 0xbf84fff0, 0xb8f82985,
+ 0x80788178, 0x8e788a78,
+ 0x8e788178, 0xb8ee1605,
+ 0x806e816e, 0x8e6e866e,
+ 0x80786e78, 0xbef60084,
+ 0xbef600ff, 0x01000000,
+ 0xc0211bfa, 0x00000078,
+ 0x80788478, 0xc0211b3a,
0x00000078, 0x80788478,
- 0xc0211b3a, 0x00000078,
- 0x80788478, 0xc0211b7a,
+ 0xc0211b7a, 0x00000078,
+ 0x80788478, 0xc0211c3a,
0x00000078, 0x80788478,
- 0xc0211c3a, 0x00000078,
- 0x80788478, 0xc0211c7a,
+ 0xc0211c7a, 0x00000078,
+ 0x80788478, 0xc0211eba,
0x00000078, 0x80788478,
- 0xc0211eba, 0x00000078,
- 0x80788478, 0xc0211efa,
+ 0xc0211efa, 0x00000078,
+ 0x80788478, 0xc0211a3a,
0x00000078, 0x80788478,
- 0xc0211a3a, 0x00000078,
- 0x80788478, 0xc0211a7a,
+ 0xc0211a7a, 0x00000078,
+ 0x80788478, 0xc0211cfa,
0x00000078, 0x80788478,
- 0xc0211cfa, 0x00000078,
- 0x80788478, 0xbf8cc07f,
- 0xbefc006f, 0xbefe0070,
- 0xbeff0071, 0x866f7bff,
- 0x000003ff, 0xb96f4803,
- 0x866f7bff, 0xfffff800,
- 0x8f6f8b6f, 0xb96fa2c3,
- 0xb973f801, 0xb8ee2985,
- 0x806e816e, 0x8e6e8a6e,
- 0x8e6e816e, 0xb8ef1605,
- 0x806f816f, 0x8e6f866f,
- 0x806e6f6e, 0x806e746e,
- 0x826f8075, 0x866fff6f,
- 0x0000ffff, 0xc00b1c37,
- 0x00000050, 0xc00b1d37,
- 0x00000060, 0xc0031e77,
- 0x00000074, 0xbf8cc07f,
- 0x866fff6d, 0xf8000000,
- 0x8f6f9b6f, 0x8e6f906f,
- 0xbeee0080, 0x876e6f6e,
- 0x866fff6d, 0x04000000,
- 0x8f6f9a6f, 0x8e6f8f6f,
- 0x876e6f6e, 0x866fff7a,
- 0x00800000, 0x8f6f976f,
+ 0xbf8cc07f, 0xbefc006f,
+ 0xbefe0070, 0xbeff0071,
+ 0x866f7bff, 0x000003ff,
+ 0xb96f4803, 0x866f7bff,
+ 0xfffff800, 0x8f6f8b6f,
+ 0xb96fa2c3, 0xb973f801,
+ 0xb8ee2985, 0x806e816e,
+ 0x8e6e8a6e, 0x8e6e816e,
+ 0xb8ef1605, 0x806f816f,
+ 0x8e6f866f, 0x806e6f6e,
+ 0x806e746e, 0x826f8075,
+ 0x866fff6f, 0x0000ffff,
+ 0xc00b1c37, 0x00000050,
+ 0xc00b1d37, 0x00000060,
+ 0xc0031e77, 0x00000074,
+ 0xbf8cc07f, 0x8f6e8b77,
+ 0x866eff6e, 0x001f8000,
0xb96ef807, 0x866dff6d,
0x0000ffff, 0x86fe7e7e,
0x86ea6a6a, 0x8f6e837a,
0xb96ee0c2, 0xbf800002,
0xb97a0002, 0xbf8a0000,
- 0x95806f6c, 0xbf810000,
+ 0xbe801f6c, 0xbf810000,
};
static const uint32_t cwsr_trap_gfx10_hex[] = {
- 0xbf820001, 0xbf8201cf,
+ 0xbf820001, 0xbf82021c,
0xb0804004, 0xb978f802,
- 0x8a788678, 0xb96ef801,
- 0x876eff6e, 0x00000800,
- 0xbf840003, 0x876eff78,
+ 0x8a78ff78, 0x00020006,
+ 0xb97bf803, 0x876eff78,
0x00002000, 0xbf840009,
- 0xb97bf803, 0x876eff7b,
- 0x00000400, 0xbf85001d,
- 0x876eff7b, 0x00000100,
- 0xbf840002, 0x8878ff78,
- 0x00002000, 0xb97af812,
+ 0x876eff6d, 0x00ff0000,
+ 0xbf85001e, 0x876eff7b,
+ 0x00000400, 0xbf850041,
+ 0xbf8e0010, 0xb97bf803,
+ 0xbf82fffa, 0x876eff7b,
+ 0x00000900, 0xbf850015,
+ 0x876eff7b, 0x000071ff,
+ 0xbf840008, 0x876fff7b,
+ 0x00007080, 0xbf840001,
+ 0xbeee1d87, 0xb96ff801,
+ 0x8f6e8c6e, 0x876e6f6e,
+ 0xbf85000a, 0x876eff6d,
+ 0x00ff0000, 0xbf850007,
+ 0xb96ef801, 0x876eff6e,
+ 0x00000800, 0xbf850003,
+ 0x876eff7b, 0x00000400,
+ 0xbf850026, 0xb97af812,
0xb97bf813, 0x8ffa887a,
- 0xf4051bbd, 0xfa000000,
- 0xbf8cc07f, 0xf4051ebd,
- 0xfa000008, 0xbf8cc07f,
- 0x87ee6e6e, 0xbf840001,
- 0xbe80206e, 0xb97bf803,
- 0x877bff7b, 0x000001ff,
+ 0xf4011bbd, 0xfa000010,
+ 0xbf8cc07f, 0x8f6e976e,
+ 0x8a77ff77, 0x00800000,
+ 0x88776e77, 0xf4051bbd,
+ 0xfa000000, 0xbf8cc07f,
+ 0xf4051ebd, 0xfa000008,
+ 0xbf8cc07f, 0x87ee6e6e,
+ 0xbf840001, 0xbe80206e,
+ 0x876eff6d, 0x01ff0000,
+ 0xbf850005, 0x8878ff78,
+ 0x00002000, 0x80ec886c,
+ 0x82ed806d, 0xbf820005,
+ 0x876eff6d, 0x01000000,
0xbf850002, 0x806c846c,
0x826d806d, 0x876dff6d,
0x0000ffff, 0x87fe7e7e,
@@ -2095,37 +2106,55 @@ static const uint32_t cwsr_trap_gfx10_hex[] = {
0xb9fa0283, 0xbeee037e,
0xbeef037f, 0xbefe0480,
0xbf900004, 0xbf8cc07f,
+ 0x877aff7f, 0x04000000,
+ 0x8f7a857a, 0x886d7a6d,
+ 0xbefa037e, 0x877bff7f,
+ 0x0000ffff, 0xbefe03c1,
+ 0xbeff03c1, 0xdc5f8000,
+ 0x007a0000, 0x7e000280,
+ 0xbefe037a, 0xbeff037b,
0xb97b02dc, 0x8f7b997b,
- 0x887b7b7f, 0xb97a2a05,
- 0x807a817a, 0xbf0d997b,
- 0xbf850002, 0x8f7a897a,
- 0xbf820001, 0x8f7a8a7a,
+ 0xb97a2a05, 0x807a817a,
+ 0xbf0d997b, 0xbf850002,
+ 0x8f7a897a, 0xbf820001,
+ 0x8f7a8a7a, 0xb97b1e06,
+ 0x8f7b8a7b, 0x807a7b7a,
0x877bff7f, 0x0000ffff,
0x807aff7a, 0x00000200,
0x807a7e7a, 0x827b807b,
- 0xbef4037e, 0x8775ff7f,
- 0x0000ffff, 0x8875ff75,
- 0x00040000, 0xbef60380,
- 0xbef703ff, 0x10807fac,
- 0x877aff7f, 0x08000000,
- 0x907a837a, 0x88777a77,
- 0x877aff7f, 0x70000000,
- 0x907a817a, 0x88777a77,
- 0xbef1037c, 0xbef00380,
- 0xb97302dc, 0x8f739973,
- 0x8873737f, 0xbefe03c1,
+ 0xd7610000, 0x00010870,
+ 0xd7610000, 0x00010a71,
+ 0xd7610000, 0x00010c72,
+ 0xd7610000, 0x00010e73,
+ 0xd7610000, 0x00011074,
+ 0xd7610000, 0x00011275,
+ 0xd7610000, 0x00011476,
+ 0xd7610000, 0x00011677,
+ 0xd7610000, 0x00011a79,
+ 0xd7610000, 0x00011c7e,
+ 0xd7610000, 0x00011e7f,
+ 0xbefe03ff, 0x00003fff,
+ 0xbeff0380, 0xdc5f8040,
+ 0x007a0000, 0xd760007a,
+ 0x00011d00, 0xd760007b,
+ 0x00011f00, 0xbefe037a,
+ 0xbeff037b, 0xbef4037e,
+ 0x8775ff7f, 0x0000ffff,
+ 0x8875ff75, 0x00040000,
+ 0xbef60380, 0xbef703ff,
+ 0x10807fac, 0xbef1037c,
+ 0xbef00380, 0xb97302dc,
+ 0x8f739973, 0xbefe03c1,
0x907c9973, 0x877c817c,
0xbf06817c, 0xbf850002,
0xbeff0380, 0xbf820002,
- 0xbeff03c1, 0xbf82000b,
+ 0xbeff03c1, 0xbf820009,
0xbef603ff, 0x01000000,
- 0xe0704000, 0x705d0000,
0xe0704080, 0x705d0100,
0xe0704100, 0x705d0200,
0xe0704180, 0x705d0300,
- 0xbf82000a, 0xbef603ff,
- 0x01000000, 0xe0704000,
- 0x705d0000, 0xe0704100,
+ 0xbf820008, 0xbef603ff,
+ 0x01000000, 0xe0704100,
0x705d0100, 0xe0704200,
0x705d0200, 0xe0704300,
0x705d0300, 0xb9702a05,
@@ -2140,8 +2169,9 @@ static const uint32_t cwsr_trap_gfx10_hex[] = {
0xbefc0380, 0xd7610002,
0x0000f871, 0x807c817c,
0xd7610002, 0x0000f86c,
- 0x807c817c, 0xd7610002,
- 0x0000f86d, 0x807c817c,
+ 0x807c817c, 0x8a7aff6d,
+ 0x80000000, 0xd7610002,
+ 0x0000f87a, 0x807c817c,
0xd7610002, 0x0000f86e,
0x807c817c, 0xd7610002,
0x0000f86f, 0x807c817c,
@@ -2156,160 +2186,157 @@ static const uint32_t cwsr_trap_gfx10_hex[] = {
0x0000f871, 0x807c817c,
0xb971f815, 0xd7610002,
0x0000f871, 0x807c817c,
+ 0xbefe03ff, 0x0000ffff,
0xbeff0380, 0xe0704000,
- 0x705d0200, 0xb9702a05,
- 0x80708170, 0xbf0d9973,
- 0xbf850002, 0x8f708970,
- 0xbf820001, 0x8f708a70,
- 0xb97a1e06, 0x8f7a8a7a,
- 0x80707a70, 0xbef603ff,
- 0x01000000, 0xbef90380,
- 0xbefc0380, 0xbf800000,
- 0xbe802f00, 0xbe822f02,
- 0xbe842f04, 0xbe862f06,
- 0xbe882f08, 0xbe8a2f0a,
- 0xbe8c2f0c, 0xbe8e2f0e,
- 0xd7610002, 0x0000f200,
- 0x80798179, 0xd7610002,
- 0x0000f201, 0x80798179,
- 0xd7610002, 0x0000f202,
- 0x80798179, 0xd7610002,
- 0x0000f203, 0x80798179,
- 0xd7610002, 0x0000f204,
+ 0x705d0200, 0xbefe03c1,
+ 0xb9702a05, 0x80708170,
+ 0xbf0d9973, 0xbf850002,
+ 0x8f708970, 0xbf820001,
+ 0x8f708a70, 0xb97a1e06,
+ 0x8f7a8a7a, 0x80707a70,
+ 0xbef603ff, 0x01000000,
+ 0xbef90380, 0xbefc0380,
+ 0xbf800000, 0xbe802f00,
+ 0xbe822f02, 0xbe842f04,
+ 0xbe862f06, 0xbe882f08,
+ 0xbe8a2f0a, 0xbe8c2f0c,
+ 0xbe8e2f0e, 0xd7610002,
+ 0x0000f200, 0x80798179,
+ 0xd7610002, 0x0000f201,
0x80798179, 0xd7610002,
- 0x0000f205, 0x80798179,
- 0xd7610002, 0x0000f206,
+ 0x0000f202, 0x80798179,
+ 0xd7610002, 0x0000f203,
0x80798179, 0xd7610002,
- 0x0000f207, 0x80798179,
- 0xd7610002, 0x0000f208,
+ 0x0000f204, 0x80798179,
+ 0xd7610002, 0x0000f205,
0x80798179, 0xd7610002,
- 0x0000f209, 0x80798179,
- 0xd7610002, 0x0000f20a,
+ 0x0000f206, 0x80798179,
+ 0xd7610002, 0x0000f207,
0x80798179, 0xd7610002,
- 0x0000f20b, 0x80798179,
- 0xd7610002, 0x0000f20c,
+ 0x0000f208, 0x80798179,
+ 0xd7610002, 0x0000f209,
0x80798179, 0xd7610002,
- 0x0000f20d, 0x80798179,
- 0xd7610002, 0x0000f20e,
+ 0x0000f20a, 0x80798179,
+ 0xd7610002, 0x0000f20b,
0x80798179, 0xd7610002,
- 0x0000f20f, 0x80798179,
- 0xbf06a079, 0xbf840006,
- 0xe0704000, 0x705d0200,
- 0x8070ff70, 0x00000080,
- 0xbef90380, 0x7e040280,
- 0x807c907c, 0xbf0aff7c,
- 0x00000060, 0xbf85ffbc,
- 0xbe802f00, 0xbe822f02,
- 0xbe842f04, 0xbe862f06,
- 0xbe882f08, 0xbe8a2f0a,
- 0xd7610002, 0x0000f200,
+ 0x0000f20c, 0x80798179,
+ 0xd7610002, 0x0000f20d,
0x80798179, 0xd7610002,
- 0x0000f201, 0x80798179,
- 0xd7610002, 0x0000f202,
+ 0x0000f20e, 0x80798179,
+ 0xd7610002, 0x0000f20f,
+ 0x80798179, 0xbf06a079,
+ 0xbf840006, 0xe0704000,
+ 0x705d0200, 0x8070ff70,
+ 0x00000080, 0xbef90380,
+ 0x7e040280, 0x807c907c,
+ 0xbf0aff7c, 0x00000060,
+ 0xbf85ffbc, 0xbe802f00,
+ 0xbe822f02, 0xbe842f04,
+ 0xbe862f06, 0xbe882f08,
+ 0xbe8a2f0a, 0xd7610002,
+ 0x0000f200, 0x80798179,
+ 0xd7610002, 0x0000f201,
0x80798179, 0xd7610002,
- 0x0000f203, 0x80798179,
- 0xd7610002, 0x0000f204,
+ 0x0000f202, 0x80798179,
+ 0xd7610002, 0x0000f203,
0x80798179, 0xd7610002,
- 0x0000f205, 0x80798179,
- 0xd7610002, 0x0000f206,
+ 0x0000f204, 0x80798179,
+ 0xd7610002, 0x0000f205,
0x80798179, 0xd7610002,
- 0x0000f207, 0x80798179,
- 0xd7610002, 0x0000f208,
+ 0x0000f206, 0x80798179,
+ 0xd7610002, 0x0000f207,
0x80798179, 0xd7610002,
- 0x0000f209, 0x80798179,
- 0xd7610002, 0x0000f20a,
+ 0x0000f208, 0x80798179,
+ 0xd7610002, 0x0000f209,
0x80798179, 0xd7610002,
- 0x0000f20b, 0x80798179,
- 0xe0704000, 0x705d0200,
+ 0x0000f20a, 0x80798179,
+ 0xd7610002, 0x0000f20b,
+ 0x80798179, 0xe0704000,
+ 0x705d0200, 0xbefe03c1,
+ 0x907c9973, 0x877c817c,
+ 0xbf06817c, 0xbf850002,
+ 0xbeff0380, 0xbf820001,
+ 0xbeff03c1, 0xb97b4306,
+ 0x877bc17b, 0xbf840044,
+ 0xbf8a0000, 0x877aff6d,
+ 0x80000000, 0xbf840040,
+ 0x8f7b867b, 0x8f7b827b,
+ 0xbef6037b, 0xb9703a05,
+ 0x80708170, 0xbf0d9973,
+ 0xbf850002, 0x8f708970,
+ 0xbf820001, 0x8f708a70,
+ 0xb97a1e06, 0x8f7a8a7a,
+ 0x80707a70, 0x8070ff70,
+ 0x00000200, 0x8070ff70,
+ 0x00000080, 0xbef603ff,
+ 0x01000000, 0xd7650000,
+ 0x000100c1, 0xd7660000,
+ 0x000200c1, 0x16000084,
+ 0x907c9973, 0x877c817c,
+ 0xbf06817c, 0xbefc0380,
+ 0xbf850012, 0xbe8303ff,
+ 0x00000080, 0xbf800000,
+ 0xbf800000, 0xbf800000,
+ 0xd8d80000, 0x01000000,
+ 0xbf8c0000, 0xe0704000,
+ 0x705d0100, 0x807c037c,
+ 0x80700370, 0xd5250000,
+ 0x0001ff00, 0x00000080,
+ 0xbf0a7b7c, 0xbf85fff4,
+ 0xbf820011, 0xbe8303ff,
+ 0x00000100, 0xbf800000,
+ 0xbf800000, 0xbf800000,
+ 0xd8d80000, 0x01000000,
+ 0xbf8c0000, 0xe0704000,
+ 0x705d0100, 0x807c037c,
+ 0x80700370, 0xd5250000,
+ 0x0001ff00, 0x00000100,
+ 0xbf0a7b7c, 0xbf85fff4,
0xbefe03c1, 0x907c9973,
0x877c817c, 0xbf06817c,
- 0xbf850002, 0xbeff0380,
- 0xbf820001, 0xbeff03c1,
- 0xb97b4306, 0x877bc17b,
- 0xbf840044, 0xbf8a0000,
- 0x877aff73, 0x04000000,
- 0xbf840040, 0x8f7b867b,
- 0x8f7b827b, 0xbef6037b,
- 0xb9702a05, 0x80708170,
- 0xbf0d9973, 0xbf850002,
- 0x8f708970, 0xbf820001,
- 0x8f708a70, 0xb97a1e06,
- 0x8f7a8a7a, 0x80707a70,
- 0x8070ff70, 0x00000200,
- 0x8070ff70, 0x00000080,
- 0xbef603ff, 0x01000000,
- 0xd7650000, 0x000100c1,
- 0xd7660000, 0x000200c1,
- 0x16000084, 0x907c9973,
+ 0xbf850004, 0xbef003ff,
+ 0x00000200, 0xbeff0380,
+ 0xbf820003, 0xbef003ff,
+ 0x00000400, 0xbeff03c1,
+ 0xb97b3a05, 0x807b817b,
+ 0x8f7b827b, 0x907c9973,
0x877c817c, 0xbf06817c,
- 0xbefc0380, 0xbf850012,
- 0xbe8303ff, 0x00000080,
- 0xbf800000, 0xbf800000,
- 0xbf800000, 0xd8d80000,
- 0x01000000, 0xbf8c0000,
- 0xe0704000, 0x705d0100,
- 0x807c037c, 0x80700370,
- 0xd5250000, 0x0001ff00,
- 0x00000080, 0xbf0a7b7c,
- 0xbf85fff4, 0xbf820011,
- 0xbe8303ff, 0x00000100,
- 0xbf800000, 0xbf800000,
- 0xbf800000, 0xd8d80000,
- 0x01000000, 0xbf8c0000,
- 0xe0704000, 0x705d0100,
- 0x807c037c, 0x80700370,
- 0xd5250000, 0x0001ff00,
- 0x00000100, 0xbf0a7b7c,
- 0xbf85fff4, 0xbefe03c1,
- 0x907c9973, 0x877c817c,
- 0xbf06817c, 0xbf850004,
- 0xbef003ff, 0x00000200,
- 0xbeff0380, 0xbf820003,
- 0xbef003ff, 0x00000400,
- 0xbeff03c1, 0xb97b2a05,
- 0x807b817b, 0x8f7b827b,
- 0x907c9973, 0x877c817c,
- 0xbf06817c, 0xbf850017,
+ 0xbf850017, 0xbef603ff,
+ 0x01000000, 0xbefc0384,
+ 0xbf0a7b7c, 0xbf840037,
+ 0x7e008700, 0x7e028701,
+ 0x7e048702, 0x7e068703,
+ 0xe0704000, 0x705d0000,
+ 0xe0704080, 0x705d0100,
+ 0xe0704100, 0x705d0200,
+ 0xe0704180, 0x705d0300,
+ 0x807c847c, 0x8070ff70,
+ 0x00000200, 0xbf0a7b7c,
+ 0xbf85ffef, 0xbf820025,
0xbef603ff, 0x01000000,
0xbefc0384, 0xbf0a7b7c,
- 0xbf840037, 0x7e008700,
+ 0xbf840011, 0x7e008700,
0x7e028701, 0x7e048702,
0x7e068703, 0xe0704000,
- 0x705d0000, 0xe0704080,
- 0x705d0100, 0xe0704100,
- 0x705d0200, 0xe0704180,
+ 0x705d0000, 0xe0704100,
+ 0x705d0100, 0xe0704200,
+ 0x705d0200, 0xe0704300,
0x705d0300, 0x807c847c,
- 0x8070ff70, 0x00000200,
+ 0x8070ff70, 0x00000400,
0xbf0a7b7c, 0xbf85ffef,
- 0xbf820025, 0xbef603ff,
- 0x01000000, 0xbefc0384,
- 0xbf0a7b7c, 0xbf840020,
- 0x7e008700, 0x7e028701,
- 0x7e048702, 0x7e068703,
+ 0xb97b1e06, 0x877bc17b,
+ 0xbf84000c, 0x8f7b837b,
+ 0x807b7c7b, 0xbefe03c1,
+ 0xbeff0380, 0x7e008700,
0xe0704000, 0x705d0000,
- 0xe0704100, 0x705d0100,
- 0xe0704200, 0x705d0200,
- 0xe0704300, 0x705d0300,
- 0x807c847c, 0x8070ff70,
- 0x00000400, 0xbf0a7b7c,
- 0xbf85ffef, 0xb97b1e06,
- 0x877bc17b, 0xbf84000c,
- 0x8f7b837b, 0x807b7c7b,
- 0xbefe03c1, 0xbeff0380,
- 0x7e008700, 0xe0704000,
- 0x705d0000, 0x807c817c,
- 0x8070ff70, 0x00000080,
- 0xbf0a7b7c, 0xbf85fff8,
- 0xbf82013c, 0xbef4037e,
- 0x8775ff7f, 0x0000ffff,
- 0x8875ff75, 0x00040000,
- 0xbef60380, 0xbef703ff,
- 0x10807fac, 0x876eff7f,
- 0x08000000, 0x906e836e,
- 0x88776e77, 0x876eff7f,
- 0x70000000, 0x906e816e,
- 0x88776e77, 0xb97202dc,
- 0x8f729972, 0x8872727f,
+ 0x807c817c, 0x8070ff70,
+ 0x00000080, 0xbf0a7b7c,
+ 0xbf85fff8, 0xbf82013b,
+ 0xbef4037e, 0x8775ff7f,
+ 0x0000ffff, 0x8875ff75,
+ 0x00040000, 0xbef60380,
+ 0xbef703ff, 0x10807fac,
+ 0xb97202dc, 0x8f729972,
0x876eff7f, 0x04000000,
0xbf840034, 0xbefe03c1,
0x907c9972, 0x877c817c,
@@ -2318,7 +2345,7 @@ static const uint32_t cwsr_trap_gfx10_hex[] = {
0xbeff03c1, 0xb96f4306,
0x876fc16f, 0xbf840029,
0x8f6f866f, 0x8f6f826f,
- 0xbef6036f, 0xb9782a05,
+ 0xbef6036f, 0xb9783a05,
0x80788178, 0xbf0d9972,
0xbf850002, 0x8f788978,
0xbf820001, 0x8f788a78,
@@ -2342,13 +2369,14 @@ static const uint32_t cwsr_trap_gfx10_hex[] = {
0x877c817c, 0xbf06817c,
0xbf850002, 0xbeff0380,
0xbf820001, 0xbeff03c1,
- 0xb96f2a05, 0x806f816f,
+ 0xb96f3a05, 0x806f816f,
0x8f6f826f, 0x907c9972,
0x877c817c, 0xbf06817c,
- 0xbf850021, 0xbef603ff,
+ 0xbf850024, 0xbef603ff,
0x01000000, 0xbeee0378,
0x8078ff78, 0x00000200,
- 0xbefc0384, 0xe0304000,
+ 0xbefc0384, 0xbf0a6f7c,
+ 0xbf840050, 0xe0304000,
0x785d0000, 0xe0304080,
0x785d0100, 0xe0304100,
0x785d0200, 0xe0304180,
@@ -2361,94 +2389,97 @@ static const uint32_t cwsr_trap_gfx10_hex[] = {
0x6e5d0000, 0xe0304080,
0x6e5d0100, 0xe0304100,
0x6e5d0200, 0xe0304180,
- 0x6e5d0300, 0xbf820032,
- 0xbef603ff, 0x01000000,
- 0xbeee0378, 0x8078ff78,
- 0x00000400, 0xbefc0384,
+ 0x6e5d0300, 0xbf8c3f70,
+ 0xbf820034, 0xbef603ff,
+ 0x01000000, 0xbeee0378,
+ 0x8078ff78, 0x00000400,
+ 0xbefc0384, 0xbf0a6f7c,
+ 0xbf840012, 0xe0304000,
+ 0x785d0000, 0xe0304100,
+ 0x785d0100, 0xe0304200,
+ 0x785d0200, 0xe0304300,
+ 0x785d0300, 0xbf8c3f70,
+ 0x7e008500, 0x7e028501,
+ 0x7e048502, 0x7e068503,
+ 0x807c847c, 0x8078ff78,
+ 0x00000400, 0xbf0a6f7c,
+ 0xbf85ffee, 0xb96f1e06,
+ 0x876fc16f, 0xbf84000e,
+ 0x8f6f836f, 0x806f7c6f,
+ 0xbefe03c1, 0xbeff0380,
0xe0304000, 0x785d0000,
- 0xe0304100, 0x785d0100,
- 0xe0304200, 0x785d0200,
- 0xe0304300, 0x785d0300,
0xbf8c3f70, 0x7e008500,
- 0x7e028501, 0x7e048502,
- 0x7e068503, 0x807c847c,
- 0x8078ff78, 0x00000400,
- 0xbf0a6f7c, 0xbf85ffee,
- 0xb96f1e06, 0x876fc16f,
- 0xbf84000e, 0x8f6f836f,
- 0x806f7c6f, 0xbefe03c1,
- 0xbeff0380, 0xe0304000,
- 0x785d0000, 0xbf8c3f70,
- 0x7e008500, 0x807c817c,
- 0x8078ff78, 0x00000080,
- 0xbf0a6f7c, 0xbf85fff7,
- 0xbeff03c1, 0xe0304000,
- 0x6e5d0000, 0xe0304100,
- 0x6e5d0100, 0xe0304200,
- 0x6e5d0200, 0xe0304300,
- 0x6e5d0300, 0xbf8c3f70,
- 0xb9782a05, 0x80788178,
+ 0x807c817c, 0x8078ff78,
+ 0x00000080, 0xbf0a6f7c,
+ 0xbf85fff7, 0xbeff03c1,
+ 0xe0304000, 0x6e5d0000,
+ 0xe0304100, 0x6e5d0100,
+ 0xe0304200, 0x6e5d0200,
+ 0xe0304300, 0x6e5d0300,
+ 0xbf8c3f70, 0xb9783a05,
+ 0x80788178, 0xbf0d9972,
+ 0xbf850002, 0x8f788978,
+ 0xbf820001, 0x8f788a78,
+ 0xb96e1e06, 0x8f6e8a6e,
+ 0x80786e78, 0x8078ff78,
+ 0x00000200, 0x80f8ff78,
+ 0x00000050, 0xbef603ff,
+ 0x01000000, 0xbefc03ff,
+ 0x0000006c, 0x80f89078,
+ 0xf429003a, 0xf0000000,
+ 0xbf8cc07f, 0x80fc847c,
+ 0xbf800000, 0xbe803100,
+ 0xbe823102, 0x80f8a078,
+ 0xf42d003a, 0xf0000000,
+ 0xbf8cc07f, 0x80fc887c,
+ 0xbf800000, 0xbe803100,
+ 0xbe823102, 0xbe843104,
+ 0xbe863106, 0x80f8c078,
+ 0xf431003a, 0xf0000000,
+ 0xbf8cc07f, 0x80fc907c,
+ 0xbf800000, 0xbe803100,
+ 0xbe823102, 0xbe843104,
+ 0xbe863106, 0xbe883108,
+ 0xbe8a310a, 0xbe8c310c,
+ 0xbe8e310e, 0xbf06807c,
+ 0xbf84fff0, 0xba80f801,
+ 0x00000000, 0xbf8a0000,
+ 0xb9783a05, 0x80788178,
0xbf0d9972, 0xbf850002,
0x8f788978, 0xbf820001,
0x8f788a78, 0xb96e1e06,
0x8f6e8a6e, 0x80786e78,
0x8078ff78, 0x00000200,
- 0x80f8ff78, 0x00000050,
0xbef603ff, 0x01000000,
- 0xbefc03ff, 0x0000006c,
- 0x80f89078, 0xf429003a,
- 0xf0000000, 0xbf8cc07f,
- 0x80fc847c, 0xbf800000,
- 0xbe803100, 0xbe823102,
- 0x80f8a078, 0xf42d003a,
- 0xf0000000, 0xbf8cc07f,
- 0x80fc887c, 0xbf800000,
- 0xbe803100, 0xbe823102,
- 0xbe843104, 0xbe863106,
- 0x80f8c078, 0xf431003a,
- 0xf0000000, 0xbf8cc07f,
- 0x80fc907c, 0xbf800000,
- 0xbe803100, 0xbe823102,
- 0xbe843104, 0xbe863106,
- 0xbe883108, 0xbe8a310a,
- 0xbe8c310c, 0xbe8e310e,
- 0xbf06807c, 0xbf84fff0,
- 0xba80f801, 0x00000000,
- 0xbf8a0000, 0xb9782a05,
- 0x80788178, 0xbf0d9972,
- 0xbf850002, 0x8f788978,
- 0xbf820001, 0x8f788a78,
- 0xb96e1e06, 0x8f6e8a6e,
- 0x80786e78, 0x8078ff78,
- 0x00000200, 0xbef603ff,
- 0x01000000, 0xf4211bfa,
+ 0xf4211bfa, 0xf0000000,
+ 0x80788478, 0xf4211b3a,
0xf0000000, 0x80788478,
- 0xf4211b3a, 0xf0000000,
- 0x80788478, 0xf4211b7a,
+ 0xf4211b7a, 0xf0000000,
+ 0x80788478, 0xf4211c3a,
0xf0000000, 0x80788478,
- 0xf4211c3a, 0xf0000000,
- 0x80788478, 0xf4211c7a,
+ 0xf4211c7a, 0xf0000000,
+ 0x80788478, 0xf4211eba,
0xf0000000, 0x80788478,
- 0xf4211eba, 0xf0000000,
- 0x80788478, 0xf4211efa,
+ 0xf4211efa, 0xf0000000,
+ 0x80788478, 0xf4211e7a,
0xf0000000, 0x80788478,
- 0xf4211e7a, 0xf0000000,
- 0x80788478, 0xf4211cfa,
+ 0xf4211cfa, 0xf0000000,
+ 0x80788478, 0xf4211bba,
0xf0000000, 0x80788478,
+ 0xbf8cc07f, 0xb9eef814,
0xf4211bba, 0xf0000000,
0x80788478, 0xbf8cc07f,
- 0xb9eef814, 0xf4211bba,
- 0xf0000000, 0x80788478,
- 0xbf8cc07f, 0xb9eef815,
- 0xbefc036f, 0xbefe0370,
- 0xbeff0371, 0x876f7bff,
- 0x000003ff, 0xb9ef4803,
- 0x876f7bff, 0xfffff800,
- 0x906f8b6f, 0xb9efa2c3,
- 0xb9f3f801, 0xb96e2a05,
- 0x806e816e, 0xbf0d9972,
- 0xbf850002, 0x8f6e896e,
- 0xbf820001, 0x8f6e8a6e,
+ 0xb9eef815, 0xbefc036f,
+ 0xbefe0370, 0xbeff0371,
+ 0x876f7bff, 0x000003ff,
+ 0xb9ef4803, 0x876f7bff,
+ 0xfffff800, 0x906f8b6f,
+ 0xb9efa2c3, 0xb9f3f801,
+ 0xb96e2a05, 0x806e816e,
+ 0xbf0d9972, 0xbf850002,
+ 0x8f6e896e, 0xbf820001,
+ 0x8f6e8a6e, 0xb96f1e06,
+ 0x8f6f8a6f, 0x806e6f6e,
0x806eff6e, 0x00000200,
0x806e746e, 0x826f8075,
0x876fff6f, 0x0000ffff,
@@ -2463,3 +2494,440 @@ static const uint32_t cwsr_trap_gfx10_hex[] = {
0xbf9f0000, 0xbf9f0000,
0xbf9f0000, 0x00000000,
};
+
+static const uint32_t cwsr_trap_gfx11_hex[] = {
+ 0xbfa00001, 0xbfa0021b,
+ 0xb0804006, 0xb8f8f802,
+ 0x91788678, 0xb8fbf803,
+ 0x8b6eff78, 0x00002000,
+ 0xbfa10009, 0x8b6eff6d,
+ 0x00ff0000, 0xbfa2001e,
+ 0x8b6eff7b, 0x00000400,
+ 0xbfa20041, 0xbf830010,
+ 0xb8fbf803, 0xbfa0fffa,
+ 0x8b6eff7b, 0x00000900,
+ 0xbfa20015, 0x8b6eff7b,
+ 0x000071ff, 0xbfa10008,
+ 0x8b6fff7b, 0x00007080,
+ 0xbfa10001, 0xbeee1287,
+ 0xb8eff801, 0x846e8c6e,
+ 0x8b6e6f6e, 0xbfa2000a,
+ 0x8b6eff6d, 0x00ff0000,
+ 0xbfa20007, 0xb8eef801,
+ 0x8b6eff6e, 0x00000800,
+ 0xbfa20003, 0x8b6eff7b,
+ 0x00000400, 0xbfa20026,
+ 0xbefa4d82, 0xbf89fc07,
+ 0x84fa887a, 0xf4005bbd,
+ 0xf8000010, 0xbf89fc07,
+ 0x846e976e, 0x9177ff77,
+ 0x00800000, 0x8c776e77,
+ 0xf4045bbd, 0xf8000000,
+ 0xbf89fc07, 0xf4045ebd,
+ 0xf8000008, 0xbf89fc07,
+ 0x8bee6e6e, 0xbfa10001,
+ 0xbe80486e, 0x8b6eff6d,
+ 0x01ff0000, 0xbfa20005,
+ 0x8c78ff78, 0x00002000,
+ 0x80ec886c, 0x82ed806d,
+ 0xbfa00005, 0x8b6eff6d,
+ 0x01000000, 0xbfa20002,
+ 0x806c846c, 0x826d806d,
+ 0x8b6dff6d, 0x0000ffff,
+ 0x8bfe7e7e, 0x8bea6a6a,
+ 0xb978f802, 0xbe804a6c,
+ 0x8b6dff6d, 0x0000ffff,
+ 0xbefa0080, 0xb97a0283,
+ 0xbeee007e, 0xbeef007f,
+ 0xbefe0180, 0xbefe4d84,
+ 0xbf89fc07, 0x8b7aff7f,
+ 0x04000000, 0x847a857a,
+ 0x8c6d7a6d, 0xbefa007e,
+ 0x8b7bff7f, 0x0000ffff,
+ 0xbefe00c1, 0xbeff00c1,
+ 0xdca6c000, 0x007a0000,
+ 0x7e000280, 0xbefe007a,
+ 0xbeff007b, 0xb8fb02dc,
+ 0x847b997b, 0xb8fa3b05,
+ 0x807a817a, 0xbf0d997b,
+ 0xbfa20002, 0x847a897a,
+ 0xbfa00001, 0x847a8a7a,
+ 0xb8fb1e06, 0x847b8a7b,
+ 0x807a7b7a, 0x8b7bff7f,
+ 0x0000ffff, 0x807aff7a,
+ 0x00000200, 0x807a7e7a,
+ 0x827b807b, 0xd7610000,
+ 0x00010870, 0xd7610000,
+ 0x00010a71, 0xd7610000,
+ 0x00010c72, 0xd7610000,
+ 0x00010e73, 0xd7610000,
+ 0x00011074, 0xd7610000,
+ 0x00011275, 0xd7610000,
+ 0x00011476, 0xd7610000,
+ 0x00011677, 0xd7610000,
+ 0x00011a79, 0xd7610000,
+ 0x00011c7e, 0xd7610000,
+ 0x00011e7f, 0xbefe00ff,
+ 0x00003fff, 0xbeff0080,
+ 0xdca6c040, 0x007a0000,
+ 0xd760007a, 0x00011d00,
+ 0xd760007b, 0x00011f00,
+ 0xbefe007a, 0xbeff007b,
+ 0xbef4007e, 0x8b75ff7f,
+ 0x0000ffff, 0x8c75ff75,
+ 0x00040000, 0xbef60080,
+ 0xbef700ff, 0x10807fac,
+ 0xbef1007d, 0xbef00080,
+ 0xb8f302dc, 0x84739973,
+ 0xbefe00c1, 0x857d9973,
+ 0x8b7d817d, 0xbf06817d,
+ 0xbfa20002, 0xbeff0080,
+ 0xbfa00002, 0xbeff00c1,
+ 0xbfa00009, 0xbef600ff,
+ 0x01000000, 0xe0685080,
+ 0x701d0100, 0xe0685100,
+ 0x701d0200, 0xe0685180,
+ 0x701d0300, 0xbfa00008,
+ 0xbef600ff, 0x01000000,
+ 0xe0685100, 0x701d0100,
+ 0xe0685200, 0x701d0200,
+ 0xe0685300, 0x701d0300,
+ 0xb8f03b05, 0x80708170,
+ 0xbf0d9973, 0xbfa20002,
+ 0x84708970, 0xbfa00001,
+ 0x84708a70, 0xb8fa1e06,
+ 0x847a8a7a, 0x80707a70,
+ 0x8070ff70, 0x00000200,
+ 0xbef600ff, 0x01000000,
+ 0x7e000280, 0x7e020280,
+ 0x7e040280, 0xbefd0080,
+ 0xd7610002, 0x0000fa71,
+ 0x807d817d, 0xd7610002,
+ 0x0000fa6c, 0x807d817d,
+ 0x917aff6d, 0x80000000,
+ 0xd7610002, 0x0000fa7a,
+ 0x807d817d, 0xd7610002,
+ 0x0000fa6e, 0x807d817d,
+ 0xd7610002, 0x0000fa6f,
+ 0x807d817d, 0xd7610002,
+ 0x0000fa78, 0x807d817d,
+ 0xb8faf803, 0xd7610002,
+ 0x0000fa7a, 0x807d817d,
+ 0xd7610002, 0x0000fa7b,
+ 0x807d817d, 0xb8f1f801,
+ 0xd7610002, 0x0000fa71,
+ 0x807d817d, 0xb8f1f814,
+ 0xd7610002, 0x0000fa71,
+ 0x807d817d, 0xb8f1f815,
+ 0xd7610002, 0x0000fa71,
+ 0x807d817d, 0xbefe00ff,
+ 0x0000ffff, 0xbeff0080,
+ 0xe0685000, 0x701d0200,
+ 0xbefe00c1, 0xb8f03b05,
+ 0x80708170, 0xbf0d9973,
+ 0xbfa20002, 0x84708970,
+ 0xbfa00001, 0x84708a70,
+ 0xb8fa1e06, 0x847a8a7a,
+ 0x80707a70, 0xbef600ff,
+ 0x01000000, 0xbef90080,
+ 0xbefd0080, 0xbf800000,
+ 0xbe804100, 0xbe824102,
+ 0xbe844104, 0xbe864106,
+ 0xbe884108, 0xbe8a410a,
+ 0xbe8c410c, 0xbe8e410e,
+ 0xd7610002, 0x0000f200,
+ 0x80798179, 0xd7610002,
+ 0x0000f201, 0x80798179,
+ 0xd7610002, 0x0000f202,
+ 0x80798179, 0xd7610002,
+ 0x0000f203, 0x80798179,
+ 0xd7610002, 0x0000f204,
+ 0x80798179, 0xd7610002,
+ 0x0000f205, 0x80798179,
+ 0xd7610002, 0x0000f206,
+ 0x80798179, 0xd7610002,
+ 0x0000f207, 0x80798179,
+ 0xd7610002, 0x0000f208,
+ 0x80798179, 0xd7610002,
+ 0x0000f209, 0x80798179,
+ 0xd7610002, 0x0000f20a,
+ 0x80798179, 0xd7610002,
+ 0x0000f20b, 0x80798179,
+ 0xd7610002, 0x0000f20c,
+ 0x80798179, 0xd7610002,
+ 0x0000f20d, 0x80798179,
+ 0xd7610002, 0x0000f20e,
+ 0x80798179, 0xd7610002,
+ 0x0000f20f, 0x80798179,
+ 0xbf06a079, 0xbfa10006,
+ 0xe0685000, 0x701d0200,
+ 0x8070ff70, 0x00000080,
+ 0xbef90080, 0x7e040280,
+ 0x807d907d, 0xbf0aff7d,
+ 0x00000060, 0xbfa2ffbc,
+ 0xbe804100, 0xbe824102,
+ 0xbe844104, 0xbe864106,
+ 0xbe884108, 0xbe8a410a,
+ 0xd7610002, 0x0000f200,
+ 0x80798179, 0xd7610002,
+ 0x0000f201, 0x80798179,
+ 0xd7610002, 0x0000f202,
+ 0x80798179, 0xd7610002,
+ 0x0000f203, 0x80798179,
+ 0xd7610002, 0x0000f204,
+ 0x80798179, 0xd7610002,
+ 0x0000f205, 0x80798179,
+ 0xd7610002, 0x0000f206,
+ 0x80798179, 0xd7610002,
+ 0x0000f207, 0x80798179,
+ 0xd7610002, 0x0000f208,
+ 0x80798179, 0xd7610002,
+ 0x0000f209, 0x80798179,
+ 0xd7610002, 0x0000f20a,
+ 0x80798179, 0xd7610002,
+ 0x0000f20b, 0x80798179,
+ 0xe0685000, 0x701d0200,
+ 0xbefe00c1, 0x857d9973,
+ 0x8b7d817d, 0xbf06817d,
+ 0xbfa20002, 0xbeff0080,
+ 0xbfa00001, 0xbeff00c1,
+ 0xb8fb4306, 0x8b7bc17b,
+ 0xbfa10044, 0xbfbd0000,
+ 0x8b7aff6d, 0x80000000,
+ 0xbfa10040, 0x847b867b,
+ 0x847b827b, 0xbef6007b,
+ 0xb8f03b05, 0x80708170,
+ 0xbf0d9973, 0xbfa20002,
+ 0x84708970, 0xbfa00001,
+ 0x84708a70, 0xb8fa1e06,
+ 0x847a8a7a, 0x80707a70,
+ 0x8070ff70, 0x00000200,
+ 0x8070ff70, 0x00000080,
+ 0xbef600ff, 0x01000000,
+ 0xd71f0000, 0x000100c1,
+ 0xd7200000, 0x000200c1,
+ 0x16000084, 0x857d9973,
+ 0x8b7d817d, 0xbf06817d,
+ 0xbefd0080, 0xbfa20012,
+ 0xbe8300ff, 0x00000080,
+ 0xbf800000, 0xbf800000,
+ 0xbf800000, 0xd8d80000,
+ 0x01000000, 0xbf890000,
+ 0xe0685000, 0x701d0100,
+ 0x807d037d, 0x80700370,
+ 0xd5250000, 0x0001ff00,
+ 0x00000080, 0xbf0a7b7d,
+ 0xbfa2fff4, 0xbfa00011,
+ 0xbe8300ff, 0x00000100,
+ 0xbf800000, 0xbf800000,
+ 0xbf800000, 0xd8d80000,
+ 0x01000000, 0xbf890000,
+ 0xe0685000, 0x701d0100,
+ 0x807d037d, 0x80700370,
+ 0xd5250000, 0x0001ff00,
+ 0x00000100, 0xbf0a7b7d,
+ 0xbfa2fff4, 0xbefe00c1,
+ 0x857d9973, 0x8b7d817d,
+ 0xbf06817d, 0xbfa20004,
+ 0xbef000ff, 0x00000200,
+ 0xbeff0080, 0xbfa00003,
+ 0xbef000ff, 0x00000400,
+ 0xbeff00c1, 0xb8fb3b05,
+ 0x807b817b, 0x847b827b,
+ 0x857d9973, 0x8b7d817d,
+ 0xbf06817d, 0xbfa20017,
+ 0xbef600ff, 0x01000000,
+ 0xbefd0084, 0xbf0a7b7d,
+ 0xbfa10037, 0x7e008700,
+ 0x7e028701, 0x7e048702,
+ 0x7e068703, 0xe0685000,
+ 0x701d0000, 0xe0685080,
+ 0x701d0100, 0xe0685100,
+ 0x701d0200, 0xe0685180,
+ 0x701d0300, 0x807d847d,
+ 0x8070ff70, 0x00000200,
+ 0xbf0a7b7d, 0xbfa2ffef,
+ 0xbfa00025, 0xbef600ff,
+ 0x01000000, 0xbefd0084,
+ 0xbf0a7b7d, 0xbfa10011,
+ 0x7e008700, 0x7e028701,
+ 0x7e048702, 0x7e068703,
+ 0xe0685000, 0x701d0000,
+ 0xe0685100, 0x701d0100,
+ 0xe0685200, 0x701d0200,
+ 0xe0685300, 0x701d0300,
+ 0x807d847d, 0x8070ff70,
+ 0x00000400, 0xbf0a7b7d,
+ 0xbfa2ffef, 0xb8fb1e06,
+ 0x8b7bc17b, 0xbfa1000c,
+ 0x847b837b, 0x807b7d7b,
+ 0xbefe00c1, 0xbeff0080,
+ 0x7e008700, 0xe0685000,
+ 0x701d0000, 0x807d817d,
+ 0x8070ff70, 0x00000080,
+ 0xbf0a7b7d, 0xbfa2fff8,
+ 0xbfa00141, 0xbef4007e,
+ 0x8b75ff7f, 0x0000ffff,
+ 0x8c75ff75, 0x00040000,
+ 0xbef60080, 0xbef700ff,
+ 0x10807fac, 0xb8f202dc,
+ 0x84729972, 0x8b6eff7f,
+ 0x04000000, 0xbfa1003a,
+ 0xbefe00c1, 0x857d9972,
+ 0x8b7d817d, 0xbf06817d,
+ 0xbfa20002, 0xbeff0080,
+ 0xbfa00001, 0xbeff00c1,
+ 0xb8ef4306, 0x8b6fc16f,
+ 0xbfa1002f, 0x846f866f,
+ 0x846f826f, 0xbef6006f,
+ 0xb8f83b05, 0x80788178,
+ 0xbf0d9972, 0xbfa20002,
+ 0x84788978, 0xbfa00001,
+ 0x84788a78, 0xb8ee1e06,
+ 0x846e8a6e, 0x80786e78,
+ 0x8078ff78, 0x00000200,
+ 0x8078ff78, 0x00000080,
+ 0xbef600ff, 0x01000000,
+ 0x857d9972, 0x8b7d817d,
+ 0xbf06817d, 0xbefd0080,
+ 0xbfa2000c, 0xe0500000,
+ 0x781d0000, 0xbf8903f7,
+ 0xdac00000, 0x00000000,
+ 0x807dff7d, 0x00000080,
+ 0x8078ff78, 0x00000080,
+ 0xbf0a6f7d, 0xbfa2fff5,
+ 0xbfa0000b, 0xe0500000,
+ 0x781d0000, 0xbf8903f7,
+ 0xdac00000, 0x00000000,
+ 0x807dff7d, 0x00000100,
+ 0x8078ff78, 0x00000100,
+ 0xbf0a6f7d, 0xbfa2fff5,
+ 0xbef80080, 0xbefe00c1,
+ 0x857d9972, 0x8b7d817d,
+ 0xbf06817d, 0xbfa20002,
+ 0xbeff0080, 0xbfa00001,
+ 0xbeff00c1, 0xb8ef3b05,
+ 0x806f816f, 0x846f826f,
+ 0x857d9972, 0x8b7d817d,
+ 0xbf06817d, 0xbfa20024,
+ 0xbef600ff, 0x01000000,
+ 0xbeee0078, 0x8078ff78,
+ 0x00000200, 0xbefd0084,
+ 0xbf0a6f7d, 0xbfa10050,
+ 0xe0505000, 0x781d0000,
+ 0xe0505080, 0x781d0100,
+ 0xe0505100, 0x781d0200,
+ 0xe0505180, 0x781d0300,
+ 0xbf8903f7, 0x7e008500,
+ 0x7e028501, 0x7e048502,
+ 0x7e068503, 0x807d847d,
+ 0x8078ff78, 0x00000200,
+ 0xbf0a6f7d, 0xbfa2ffee,
+ 0xe0505000, 0x6e1d0000,
+ 0xe0505080, 0x6e1d0100,
+ 0xe0505100, 0x6e1d0200,
+ 0xe0505180, 0x6e1d0300,
+ 0xbf8903f7, 0xbfa00034,
+ 0xbef600ff, 0x01000000,
+ 0xbeee0078, 0x8078ff78,
+ 0x00000400, 0xbefd0084,
+ 0xbf0a6f7d, 0xbfa10012,
+ 0xe0505000, 0x781d0000,
+ 0xe0505100, 0x781d0100,
+ 0xe0505200, 0x781d0200,
+ 0xe0505300, 0x781d0300,
+ 0xbf8903f7, 0x7e008500,
+ 0x7e028501, 0x7e048502,
+ 0x7e068503, 0x807d847d,
+ 0x8078ff78, 0x00000400,
+ 0xbf0a6f7d, 0xbfa2ffee,
+ 0xb8ef1e06, 0x8b6fc16f,
+ 0xbfa1000e, 0x846f836f,
+ 0x806f7d6f, 0xbefe00c1,
+ 0xbeff0080, 0xe0505000,
+ 0x781d0000, 0xbf8903f7,
+ 0x7e008500, 0x807d817d,
+ 0x8078ff78, 0x00000080,
+ 0xbf0a6f7d, 0xbfa2fff7,
+ 0xbeff00c1, 0xe0505000,
+ 0x6e1d0000, 0xe0505100,
+ 0x6e1d0100, 0xe0505200,
+ 0x6e1d0200, 0xe0505300,
+ 0x6e1d0300, 0xbf8903f7,
+ 0xb8f83b05, 0x80788178,
+ 0xbf0d9972, 0xbfa20002,
+ 0x84788978, 0xbfa00001,
+ 0x84788a78, 0xb8ee1e06,
+ 0x846e8a6e, 0x80786e78,
+ 0x8078ff78, 0x00000200,
+ 0x80f8ff78, 0x00000050,
+ 0xbef600ff, 0x01000000,
+ 0xbefd00ff, 0x0000006c,
+ 0x80f89078, 0xf428403a,
+ 0xf0000000, 0xbf89fc07,
+ 0x80fd847d, 0xbf800000,
+ 0xbe804300, 0xbe824302,
+ 0x80f8a078, 0xf42c403a,
+ 0xf0000000, 0xbf89fc07,
+ 0x80fd887d, 0xbf800000,
+ 0xbe804300, 0xbe824302,
+ 0xbe844304, 0xbe864306,
+ 0x80f8c078, 0xf430403a,
+ 0xf0000000, 0xbf89fc07,
+ 0x80fd907d, 0xbf800000,
+ 0xbe804300, 0xbe824302,
+ 0xbe844304, 0xbe864306,
+ 0xbe884308, 0xbe8a430a,
+ 0xbe8c430c, 0xbe8e430e,
+ 0xbf06807d, 0xbfa1fff0,
+ 0xb980f801, 0x00000000,
+ 0xbfbd0000, 0xb8f83b05,
+ 0x80788178, 0xbf0d9972,
+ 0xbfa20002, 0x84788978,
+ 0xbfa00001, 0x84788a78,
+ 0xb8ee1e06, 0x846e8a6e,
+ 0x80786e78, 0x8078ff78,
+ 0x00000200, 0xbef600ff,
+ 0x01000000, 0xf4205bfa,
+ 0xf0000000, 0x80788478,
+ 0xf4205b3a, 0xf0000000,
+ 0x80788478, 0xf4205b7a,
+ 0xf0000000, 0x80788478,
+ 0xf4205c3a, 0xf0000000,
+ 0x80788478, 0xf4205c7a,
+ 0xf0000000, 0x80788478,
+ 0xf4205eba, 0xf0000000,
+ 0x80788478, 0xf4205efa,
+ 0xf0000000, 0x80788478,
+ 0xf4205e7a, 0xf0000000,
+ 0x80788478, 0xf4205cfa,
+ 0xf0000000, 0x80788478,
+ 0xf4205bba, 0xf0000000,
+ 0x80788478, 0xbf89fc07,
+ 0xb96ef814, 0xf4205bba,
+ 0xf0000000, 0x80788478,
+ 0xbf89fc07, 0xb96ef815,
+ 0xbefd006f, 0xbefe0070,
+ 0xbeff0071, 0x8b6f7bff,
+ 0x000003ff, 0xb96f4803,
+ 0x8b6f7bff, 0xfffff800,
+ 0x856f8b6f, 0xb96fa2c3,
+ 0xb973f801, 0xb8ee3b05,
+ 0x806e816e, 0xbf0d9972,
+ 0xbfa20002, 0x846e896e,
+ 0xbfa00001, 0x846e8a6e,
+ 0xb8ef1e06, 0x846f8a6f,
+ 0x806e6f6e, 0x806eff6e,
+ 0x00000200, 0x806e746e,
+ 0x826f8075, 0x8b6fff6f,
+ 0x0000ffff, 0xf4085c37,
+ 0xf8000050, 0xf4085d37,
+ 0xf8000060, 0xf4005e77,
+ 0xf8000074, 0xbf89fc07,
+ 0x8b6dff6d, 0x0000ffff,
+ 0x8bfe7e7e, 0x8bea6a6a,
+ 0xb97af802, 0xbe804a6c,
+ 0xbfb00000, 0xbf9f0000,
+ 0xbf9f0000, 0xbf9f0000,
+ 0xbf9f0000, 0xbf9f0000,
+};
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
index 5081f91190b8..250ab007399b 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
@@ -23,37 +23,52 @@
/* To compile this assembly code:
*
* Navi1x:
- * cpp -DASIC_TARGET_NAVI1X=1 cwsr_trap_handler_gfx10.asm -P -o nv1x.sp3
- * sp3-nv1x nv1x.sp3 -hex nv1x.hex
+ * cpp -DASIC_FAMILY=CHIP_NAVI10 cwsr_trap_handler_gfx10.asm -P -o nv1x.sp3
+ * sp3 nv1x.sp3 -hex nv1x.hex
*
- * Others:
- * cpp -DASIC_TARGET_NAVI1X=0 cwsr_trap_handler_gfx10.asm -P -o gfx10.sp3
- * sp3-gfx10 gfx10.sp3 -hex gfx10.hex
+ * gfx10:
+ * cpp -DASIC_FAMILY=CHIP_SIENNA_CICHLID cwsr_trap_handler_gfx10.asm -P -o gfx10.sp3
+ * sp3 gfx10.sp3 -hex gfx10.hex
+ *
+ * gfx11:
+ * cpp -DASIC_FAMILY=CHIP_PLUM_BONITO cwsr_trap_handler_gfx10.asm -P -o gfx11.sp3
+ * sp3 gfx11.sp3 -hex gfx11.hex
*/
-#define NO_SQC_STORE !ASIC_TARGET_NAVI1X
+#define CHIP_NAVI10 26
+#define CHIP_SIENNA_CICHLID 30
+#define CHIP_PLUM_BONITO 36
+
+#define NO_SQC_STORE (ASIC_FAMILY >= CHIP_SIENNA_CICHLID)
+#define HAVE_XNACK (ASIC_FAMILY < CHIP_SIENNA_CICHLID)
+#define HAVE_SENDMSG_RTN (ASIC_FAMILY >= CHIP_PLUM_BONITO)
+#define HAVE_BUFFER_LDS_LOAD (ASIC_FAMILY < CHIP_PLUM_BONITO)
var SINGLE_STEP_MISSED_WORKAROUND = 1 //workaround for lost MODE.DEBUG_EN exception when SAVECTX raised
-var SQ_WAVE_STATUS_INST_ATC_SHIFT = 23
-var SQ_WAVE_STATUS_INST_ATC_MASK = 0x00800000
var SQ_WAVE_STATUS_SPI_PRIO_MASK = 0x00000006
var SQ_WAVE_STATUS_HALT_MASK = 0x2000
+var SQ_WAVE_STATUS_ECC_ERR_MASK = 0x20000
var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT = 12
var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE = 9
-var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT = 8
-var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE = 6
-var SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SHIFT = 24
-var SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SIZE = 4
+var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE = 8
var SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SHIFT = 24
var SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SIZE = 4
var SQ_WAVE_IB_STS2_WAVE64_SHIFT = 11
var SQ_WAVE_IB_STS2_WAVE64_SIZE = 1
+#if ASIC_FAMILY < CHIP_PLUM_BONITO
+var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT = 8
+#else
+var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT = 12
+#endif
+
var SQ_WAVE_TRAPSTS_SAVECTX_MASK = 0x400
-var SQ_WAVE_TRAPSTS_EXCE_MASK = 0x1FF
+var SQ_WAVE_TRAPSTS_EXCP_MASK = 0x1FF
var SQ_WAVE_TRAPSTS_SAVECTX_SHIFT = 10
+var SQ_WAVE_TRAPSTS_ADDR_WATCH_MASK = 0x80
+var SQ_WAVE_TRAPSTS_ADDR_WATCH_SHIFT = 7
var SQ_WAVE_TRAPSTS_MEM_VIOL_MASK = 0x100
var SQ_WAVE_TRAPSTS_MEM_VIOL_SHIFT = 8
var SQ_WAVE_TRAPSTS_PRE_SAVECTX_MASK = 0x3FF
@@ -63,46 +78,37 @@ var SQ_WAVE_TRAPSTS_POST_SAVECTX_MASK = 0xFFFFF800
var SQ_WAVE_TRAPSTS_POST_SAVECTX_SHIFT = 11
var SQ_WAVE_TRAPSTS_POST_SAVECTX_SIZE = 21
var SQ_WAVE_TRAPSTS_ILLEGAL_INST_MASK = 0x800
+var SQ_WAVE_TRAPSTS_EXCP_HI_MASK = 0x7000
+
+var SQ_WAVE_MODE_EXCP_EN_SHIFT = 12
+var SQ_WAVE_MODE_EXCP_EN_ADDR_WATCH_SHIFT = 19
-var SQ_WAVE_IB_STS_RCNT_SHIFT = 16
var SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT = 15
var SQ_WAVE_IB_STS_REPLAY_W64H_SHIFT = 25
-var SQ_WAVE_IB_STS_REPLAY_W64H_SIZE = 1
var SQ_WAVE_IB_STS_REPLAY_W64H_MASK = 0x02000000
-var SQ_WAVE_IB_STS_FIRST_REPLAY_SIZE = 1
-var SQ_WAVE_IB_STS_RCNT_SIZE = 6
var SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK = 0x003F8000
-var SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK_NEG = 0x00007FFF
var SQ_WAVE_MODE_DEBUG_EN_MASK = 0x800
-var SQ_BUF_RSRC_WORD1_ATC_SHIFT = 24
-var SQ_BUF_RSRC_WORD3_MTYPE_SHIFT = 27
-
// bits [31:24] unused by SPI debug data
var TTMP11_SAVE_REPLAY_W64H_SHIFT = 31
var TTMP11_SAVE_REPLAY_W64H_MASK = 0x80000000
var TTMP11_SAVE_RCNT_FIRST_REPLAY_SHIFT = 24
var TTMP11_SAVE_RCNT_FIRST_REPLAY_MASK = 0x7F000000
+var TTMP11_DEBUG_TRAP_ENABLED_SHIFT = 23
+var TTMP11_DEBUG_TRAP_ENABLED_MASK = 0x800000
// SQ_SEL_X/Y/Z/W, BUF_NUM_FORMAT_FLOAT, (0 for MUBUF stride[17:14]
// when ADD_TID_ENABLE and BUF_DATA_FORMAT_32 for MTBUF), ADD_TID_ENABLE
var S_SAVE_BUF_RSRC_WORD1_STRIDE = 0x00040000
var S_SAVE_BUF_RSRC_WORD3_MISC = 0x10807FAC
-
-var S_SAVE_SPI_INIT_ATC_MASK = 0x08000000
-var S_SAVE_SPI_INIT_ATC_SHIFT = 27
-var S_SAVE_SPI_INIT_MTYPE_MASK = 0x70000000
-var S_SAVE_SPI_INIT_MTYPE_SHIFT = 28
+var S_SAVE_PC_HI_TRAP_ID_MASK = 0x00FF0000
+var S_SAVE_PC_HI_HT_MASK = 0x01000000
var S_SAVE_SPI_INIT_FIRST_WAVE_MASK = 0x04000000
var S_SAVE_SPI_INIT_FIRST_WAVE_SHIFT = 26
-var S_SAVE_PC_HI_RCNT_SHIFT = 26
-var S_SAVE_PC_HI_RCNT_MASK = 0xFC000000
-var S_SAVE_PC_HI_FIRST_REPLAY_SHIFT = 25
-var S_SAVE_PC_HI_FIRST_REPLAY_MASK = 0x02000000
-var S_SAVE_PC_HI_REPLAY_W64H_SHIFT = 24
-var S_SAVE_PC_HI_REPLAY_W64H_MASK = 0x01000000
+var S_SAVE_PC_HI_FIRST_WAVE_MASK = 0x80000000
+var S_SAVE_PC_HI_FIRST_WAVE_SHIFT = 31
var s_sgpr_save_num = 108
@@ -130,19 +136,10 @@ var s_save_ttmps_hi = s_save_trapsts
var S_RESTORE_BUF_RSRC_WORD1_STRIDE = S_SAVE_BUF_RSRC_WORD1_STRIDE
var S_RESTORE_BUF_RSRC_WORD3_MISC = S_SAVE_BUF_RSRC_WORD3_MISC
-var S_RESTORE_SPI_INIT_ATC_MASK = 0x08000000
-var S_RESTORE_SPI_INIT_ATC_SHIFT = 27
-var S_RESTORE_SPI_INIT_MTYPE_MASK = 0x70000000
-var S_RESTORE_SPI_INIT_MTYPE_SHIFT = 28
var S_RESTORE_SPI_INIT_FIRST_WAVE_MASK = 0x04000000
var S_RESTORE_SPI_INIT_FIRST_WAVE_SHIFT = 26
var S_WAVE_SIZE = 25
-var S_RESTORE_PC_HI_RCNT_SHIFT = S_SAVE_PC_HI_RCNT_SHIFT
-var S_RESTORE_PC_HI_RCNT_MASK = S_SAVE_PC_HI_RCNT_MASK
-var S_RESTORE_PC_HI_FIRST_REPLAY_SHIFT = S_SAVE_PC_HI_FIRST_REPLAY_SHIFT
-var S_RESTORE_PC_HI_FIRST_REPLAY_MASK = S_SAVE_PC_HI_FIRST_REPLAY_MASK
-
var s_restore_spi_init_lo = exec_lo
var s_restore_spi_init_hi = exec_hi
var s_restore_mem_offset = ttmp12
@@ -179,84 +176,133 @@ L_JUMP_TO_RESTORE:
L_SKIP_RESTORE:
s_getreg_b32 s_save_status, hwreg(HW_REG_STATUS) //save STATUS since we will change SCC
- s_andn2_b32 s_save_status, s_save_status, SQ_WAVE_STATUS_SPI_PRIO_MASK
-if SINGLE_STEP_MISSED_WORKAROUND
- // No single step exceptions if MODE.DEBUG_EN=0.
- s_getreg_b32 ttmp2, hwreg(HW_REG_MODE)
- s_and_b32 ttmp2, ttmp2, SQ_WAVE_MODE_DEBUG_EN_MASK
- s_cbranch_scc0 L_NO_SINGLE_STEP_WORKAROUND
+ // Clear SPI_PRIO: do not save with elevated priority.
+ // Clear ECC_ERR: prevents SQC store and triggers FATAL_HALT if setreg'd.
+ s_andn2_b32 s_save_status, s_save_status, SQ_WAVE_STATUS_SPI_PRIO_MASK|SQ_WAVE_STATUS_ECC_ERR_MASK
+
+ s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS)
- // Second-level trap already handled exception if STATUS.HALT=1.
s_and_b32 ttmp2, s_save_status, SQ_WAVE_STATUS_HALT_MASK
+ s_cbranch_scc0 L_NOT_HALTED
+
+L_HALTED:
+ // Host trap may occur while wave is halted.
+ s_and_b32 ttmp2, s_save_pc_hi, S_SAVE_PC_HI_TRAP_ID_MASK
+ s_cbranch_scc1 L_FETCH_2ND_TRAP
+L_CHECK_SAVE:
+ s_and_b32 ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_SAVECTX_MASK
+ s_cbranch_scc1 L_SAVE
+
+ // Wave is halted but neither host trap nor SAVECTX is raised.
+ // Caused by instruction fetch memory violation.
+ // Spin wait until context saved to prevent interrupt storm.
+ s_sleep 0x10
+ s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS)
+ s_branch L_CHECK_SAVE
+
+L_NOT_HALTED:
+ // Let second-level handle non-SAVECTX exception or trap.
+ // Any concurrent SAVECTX will be handled upon re-entry once halted.
+
+ // Check non-maskable exceptions. memory_violation, illegal_instruction
+ // and xnack_error exceptions always cause the wave to enter the trap
+ // handler.
+ s_and_b32 ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_MEM_VIOL_MASK|SQ_WAVE_TRAPSTS_ILLEGAL_INST_MASK
+ s_cbranch_scc1 L_FETCH_2ND_TRAP
+
+ // Check for maskable exceptions in trapsts.excp and trapsts.excp_hi.
+ // Maskable exceptions only cause the wave to enter the trap handler if
+ // their respective bit in mode.excp_en is set.
+ s_and_b32 ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_EXCP_MASK|SQ_WAVE_TRAPSTS_EXCP_HI_MASK
+ s_cbranch_scc0 L_CHECK_TRAP_ID
+
+ s_and_b32 ttmp3, s_save_trapsts, SQ_WAVE_TRAPSTS_ADDR_WATCH_MASK|SQ_WAVE_TRAPSTS_EXCP_HI_MASK
+ s_cbranch_scc0 L_NOT_ADDR_WATCH
+ s_bitset1_b32 ttmp2, SQ_WAVE_TRAPSTS_ADDR_WATCH_SHIFT // Check all addr_watch[123] exceptions against excp_en.addr_watch
+
+L_NOT_ADDR_WATCH:
+ s_getreg_b32 ttmp3, hwreg(HW_REG_MODE)
+ s_lshl_b32 ttmp2, ttmp2, SQ_WAVE_MODE_EXCP_EN_SHIFT
+ s_and_b32 ttmp2, ttmp2, ttmp3
+ s_cbranch_scc1 L_FETCH_2ND_TRAP
+
+L_CHECK_TRAP_ID:
+ // Check trap_id != 0
+ s_and_b32 ttmp2, s_save_pc_hi, S_SAVE_PC_HI_TRAP_ID_MASK
+ s_cbranch_scc1 L_FETCH_2ND_TRAP
+
+if SINGLE_STEP_MISSED_WORKAROUND
// Prioritize single step exception over context save.
// Second-level trap will halt wave and RFE, re-entering for SAVECTX.
- s_cbranch_scc0 L_FETCH_2ND_TRAP
-
-L_NO_SINGLE_STEP_WORKAROUND:
+ s_getreg_b32 ttmp2, hwreg(HW_REG_MODE)
+ s_and_b32 ttmp2, ttmp2, SQ_WAVE_MODE_DEBUG_EN_MASK
+ s_cbranch_scc1 L_FETCH_2ND_TRAP
end
-
- s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS)
- s_and_b32 ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_SAVECTX_MASK //check whether this is for save
+ s_and_b32 ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_SAVECTX_MASK
s_cbranch_scc1 L_SAVE
- // If STATUS.MEM_VIOL is asserted then halt the wave to prevent
- // the exception raising again and blocking context save.
- s_and_b32 ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_MEM_VIOL_MASK
- s_cbranch_scc0 L_FETCH_2ND_TRAP
- s_or_b32 s_save_status, s_save_status, SQ_WAVE_STATUS_HALT_MASK
-
L_FETCH_2ND_TRAP:
-
-#if ASIC_TARGET_NAVI1X
- // Preserve and clear scalar XNACK state before issuing scalar loads.
- // Save IB_STS.REPLAY_W64H[25], RCNT[21:16], FIRST_REPLAY[15] into
- // unused space ttmp11[31:24].
- s_andn2_b32 ttmp11, ttmp11, (TTMP11_SAVE_REPLAY_W64H_MASK | TTMP11_SAVE_RCNT_FIRST_REPLAY_MASK)
- s_getreg_b32 ttmp2, hwreg(HW_REG_IB_STS)
- s_and_b32 ttmp3, ttmp2, SQ_WAVE_IB_STS_REPLAY_W64H_MASK
- s_lshl_b32 ttmp3, ttmp3, (TTMP11_SAVE_REPLAY_W64H_SHIFT - SQ_WAVE_IB_STS_REPLAY_W64H_SHIFT)
- s_or_b32 ttmp11, ttmp11, ttmp3
- s_and_b32 ttmp3, ttmp2, SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK
- s_lshl_b32 ttmp3, ttmp3, (TTMP11_SAVE_RCNT_FIRST_REPLAY_SHIFT - SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT)
- s_or_b32 ttmp11, ttmp11, ttmp3
- s_andn2_b32 ttmp2, ttmp2, (SQ_WAVE_IB_STS_REPLAY_W64H_MASK | SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK)
- s_setreg_b32 hwreg(HW_REG_IB_STS), ttmp2
+#if HAVE_XNACK
+ save_and_clear_ib_sts(ttmp14, ttmp15)
#endif
// Read second-level TBA/TMA from first-level TMA and jump if available.
// ttmp[2:5] and ttmp12 can be used (others hold SPI-initialized debug data)
// ttmp12 holds SQ_WAVE_STATUS
+#if HAVE_SENDMSG_RTN
+ s_sendmsg_rtn_b64 [ttmp14, ttmp15], sendmsg(MSG_RTN_GET_TMA)
+ s_waitcnt lgkmcnt(0)
+#else
s_getreg_b32 ttmp14, hwreg(HW_REG_SHADER_TMA_LO)
s_getreg_b32 ttmp15, hwreg(HW_REG_SHADER_TMA_HI)
+#endif
s_lshl_b64 [ttmp14, ttmp15], [ttmp14, ttmp15], 0x8
+
+ s_load_dword ttmp2, [ttmp14, ttmp15], 0x10 glc:1 // debug trap enabled flag
+ s_waitcnt lgkmcnt(0)
+ s_lshl_b32 ttmp2, ttmp2, TTMP11_DEBUG_TRAP_ENABLED_SHIFT
+ s_andn2_b32 ttmp11, ttmp11, TTMP11_DEBUG_TRAP_ENABLED_MASK
+ s_or_b32 ttmp11, ttmp11, ttmp2
+
s_load_dwordx2 [ttmp2, ttmp3], [ttmp14, ttmp15], 0x0 glc:1 // second-level TBA
s_waitcnt lgkmcnt(0)
s_load_dwordx2 [ttmp14, ttmp15], [ttmp14, ttmp15], 0x8 glc:1 // second-level TMA
s_waitcnt lgkmcnt(0)
+
s_and_b64 [ttmp2, ttmp3], [ttmp2, ttmp3], [ttmp2, ttmp3]
s_cbranch_scc0 L_NO_NEXT_TRAP // second-level trap handler not been set
s_setpc_b64 [ttmp2, ttmp3] // jump to second-level trap handler
L_NO_NEXT_TRAP:
- s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS)
- s_and_b32 s_save_trapsts, s_save_trapsts, SQ_WAVE_TRAPSTS_EXCE_MASK
- s_cbranch_scc1 L_EXCP_CASE // Exception, jump back to the shader program directly.
- s_add_u32 ttmp0, ttmp0, 4 // S_TRAP case, add 4 to ttmp0
- s_addc_u32 ttmp1, ttmp1, 0
-L_EXCP_CASE:
+ // If not caused by trap then halt wave to prevent re-entry.
+ s_and_b32 ttmp2, s_save_pc_hi, (S_SAVE_PC_HI_TRAP_ID_MASK|S_SAVE_PC_HI_HT_MASK)
+ s_cbranch_scc1 L_TRAP_CASE
+ s_or_b32 s_save_status, s_save_status, SQ_WAVE_STATUS_HALT_MASK
+
+ // If the PC points to S_ENDPGM then context save will fail if STATUS.HALT is set.
+ // Rewind the PC to prevent this from occurring.
+ s_sub_u32 ttmp0, ttmp0, 0x8
+ s_subb_u32 ttmp1, ttmp1, 0x0
+
+ s_branch L_EXIT_TRAP
+
+L_TRAP_CASE:
+ // Host trap will not cause trap re-entry.
+ s_and_b32 ttmp2, s_save_pc_hi, S_SAVE_PC_HI_HT_MASK
+ s_cbranch_scc1 L_EXIT_TRAP
+
+ // Advance past trap instruction to prevent re-entry.
+ s_add_u32 ttmp0, ttmp0, 0x4
+ s_addc_u32 ttmp1, ttmp1, 0x0
+
+L_EXIT_TRAP:
s_and_b32 ttmp1, ttmp1, 0xFFFF
-#if ASIC_TARGET_NAVI1X
- // Restore SQ_WAVE_IB_STS.
- s_lshr_b32 ttmp2, ttmp11, (TTMP11_SAVE_RCNT_FIRST_REPLAY_SHIFT - SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT)
- s_and_b32 ttmp3, ttmp2, SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK
- s_lshr_b32 ttmp2, ttmp11, (TTMP11_SAVE_REPLAY_W64H_SHIFT - SQ_WAVE_IB_STS_REPLAY_W64H_SHIFT)
- s_and_b32 ttmp2, ttmp2, SQ_WAVE_IB_STS_REPLAY_W64H_MASK
- s_or_b32 ttmp2, ttmp2, ttmp3
- s_setreg_b32 hwreg(HW_REG_IB_STS), ttmp2
+#if HAVE_XNACK
+ restore_ib_sts(ttmp14, ttmp15)
#endif
// Restore SQ_WAVE_STATUS.
@@ -271,20 +317,8 @@ L_SAVE:
s_mov_b32 s_save_tmp, 0
s_setreg_b32 hwreg(HW_REG_TRAPSTS, SQ_WAVE_TRAPSTS_SAVECTX_SHIFT, 1), s_save_tmp //clear saveCtx bit
-#if ASIC_TARGET_NAVI1X
- s_getreg_b32 s_save_tmp, hwreg(HW_REG_IB_STS, SQ_WAVE_IB_STS_RCNT_SHIFT, SQ_WAVE_IB_STS_RCNT_SIZE)
- s_lshl_b32 s_save_tmp, s_save_tmp, S_SAVE_PC_HI_RCNT_SHIFT
- s_or_b32 s_save_pc_hi, s_save_pc_hi, s_save_tmp
- s_getreg_b32 s_save_tmp, hwreg(HW_REG_IB_STS, SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT, SQ_WAVE_IB_STS_FIRST_REPLAY_SIZE)
- s_lshl_b32 s_save_tmp, s_save_tmp, S_SAVE_PC_HI_FIRST_REPLAY_SHIFT
- s_or_b32 s_save_pc_hi, s_save_pc_hi, s_save_tmp
- s_getreg_b32 s_save_tmp, hwreg(HW_REG_IB_STS, SQ_WAVE_IB_STS_REPLAY_W64H_SHIFT, SQ_WAVE_IB_STS_REPLAY_W64H_SIZE)
- s_lshl_b32 s_save_tmp, s_save_tmp, S_SAVE_PC_HI_REPLAY_W64H_SHIFT
- s_or_b32 s_save_pc_hi, s_save_pc_hi, s_save_tmp
- s_getreg_b32 s_save_tmp, hwreg(HW_REG_IB_STS) //clear RCNT and FIRST_REPLAY and REPLAY_W64H in IB_STS
- s_and_b32 s_save_tmp, s_save_tmp, SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK_NEG
-
- s_setreg_b32 hwreg(HW_REG_IB_STS), s_save_tmp
+#if HAVE_XNACK
+ save_and_clear_ib_sts(s_save_tmp, s_save_trapsts)
#endif
/* inform SPI the readiness and wait for SPI's go signal */
@@ -292,9 +326,13 @@ L_SAVE:
s_mov_b32 s_save_exec_hi, exec_hi
s_mov_b64 exec, 0x0 //clear EXEC to get ready to receive
+#if HAVE_SENDMSG_RTN
+ s_sendmsg_rtn_b64 [exec_lo, exec_hi], sendmsg(MSG_RTN_SAVE_WAVE)
+#else
s_sendmsg sendmsg(MSG_SAVEWAVE) //send SPI a message and wait for SPI's write to EXEC
+#endif
-#if ASIC_TARGET_NAVI1X
+#if ASIC_FAMILY < CHIP_SIENNA_CICHLID
L_SLEEP:
// sleep 1 (64clk) is not enough for 8 waves per SIMD, which will cause
// SQ hang, since the 7,8th wave could not get arbit to exec inst, while
@@ -305,16 +343,57 @@ L_SLEEP:
s_waitcnt lgkmcnt(0)
#endif
+ // Save first_wave flag so we can clear high bits of save address.
+ s_and_b32 s_save_tmp, s_save_spi_init_hi, S_SAVE_SPI_INIT_FIRST_WAVE_MASK
+ s_lshl_b32 s_save_tmp, s_save_tmp, (S_SAVE_PC_HI_FIRST_WAVE_SHIFT - S_SAVE_SPI_INIT_FIRST_WAVE_SHIFT)
+ s_or_b32 s_save_pc_hi, s_save_pc_hi, s_save_tmp
+
+#if NO_SQC_STORE
+ // Trap temporaries must be saved via VGPR but all VGPRs are in use.
+ // There is no ttmp space to hold the resource constant for VGPR save.
+ // Save v0 by itself since it requires only two SGPRs.
+ s_mov_b32 s_save_ttmps_lo, exec_lo
+ s_and_b32 s_save_ttmps_hi, exec_hi, 0xFFFF
+ s_mov_b32 exec_lo, 0xFFFFFFFF
+ s_mov_b32 exec_hi, 0xFFFFFFFF
+ global_store_dword_addtid v0, [s_save_ttmps_lo, s_save_ttmps_hi] slc:1 glc:1
+ v_mov_b32 v0, 0x0
+ s_mov_b32 exec_lo, s_save_ttmps_lo
+ s_mov_b32 exec_hi, s_save_ttmps_hi
+#endif
+
// Save trap temporaries 4-11, 13 initialized by SPI debug dispatch logic
- // ttmp SR memory offset : size(VGPR)+size(SGPR)+0x40
+ // ttmp SR memory offset : size(VGPR)+size(SVGPR)+size(SGPR)+0x40
get_wave_size(s_save_ttmps_hi)
get_vgpr_size_bytes(s_save_ttmps_lo, s_save_ttmps_hi)
+ get_svgpr_size_bytes(s_save_ttmps_hi)
+ s_add_u32 s_save_ttmps_lo, s_save_ttmps_lo, s_save_ttmps_hi
s_and_b32 s_save_ttmps_hi, s_save_spi_init_hi, 0xFFFF
s_add_u32 s_save_ttmps_lo, s_save_ttmps_lo, get_sgpr_size_bytes()
s_add_u32 s_save_ttmps_lo, s_save_ttmps_lo, s_save_spi_init_lo
s_addc_u32 s_save_ttmps_hi, s_save_ttmps_hi, 0x0
-#if ASIC_TARGET_NAVI1X
+#if NO_SQC_STORE
+ v_writelane_b32 v0, ttmp4, 0x4
+ v_writelane_b32 v0, ttmp5, 0x5
+ v_writelane_b32 v0, ttmp6, 0x6
+ v_writelane_b32 v0, ttmp7, 0x7
+ v_writelane_b32 v0, ttmp8, 0x8
+ v_writelane_b32 v0, ttmp9, 0x9
+ v_writelane_b32 v0, ttmp10, 0xA
+ v_writelane_b32 v0, ttmp11, 0xB
+ v_writelane_b32 v0, ttmp13, 0xD
+ v_writelane_b32 v0, exec_lo, 0xE
+ v_writelane_b32 v0, exec_hi, 0xF
+
+ s_mov_b32 exec_lo, 0x3FFF
+ s_mov_b32 exec_hi, 0x0
+ global_store_dword_addtid v0, [s_save_ttmps_lo, s_save_ttmps_hi] inst_offset:0x40 slc:1 glc:1
+ v_readlane_b32 ttmp14, v0, 0xE
+ v_readlane_b32 ttmp15, v0, 0xF
+ s_mov_b32 exec_lo, ttmp14
+ s_mov_b32 exec_hi, ttmp15
+#else
s_store_dwordx4 [ttmp4, ttmp5, ttmp6, ttmp7], [s_save_ttmps_lo, s_save_ttmps_hi], 0x50 glc:1
s_store_dwordx4 [ttmp8, ttmp9, ttmp10, ttmp11], [s_save_ttmps_lo, s_save_ttmps_hi], 0x60 glc:1
s_store_dword ttmp13, [s_save_ttmps_lo, s_save_ttmps_hi], 0x74 glc:1
@@ -326,12 +405,6 @@ L_SLEEP:
s_or_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, S_SAVE_BUF_RSRC_WORD1_STRIDE
s_mov_b32 s_save_buf_rsrc2, 0 //NUM_RECORDS initial value = 0 (in bytes) although not neccessarily inited
s_mov_b32 s_save_buf_rsrc3, S_SAVE_BUF_RSRC_WORD3_MISC
- s_and_b32 s_save_tmp, s_save_spi_init_hi, S_SAVE_SPI_INIT_ATC_MASK
- s_lshr_b32 s_save_tmp, s_save_tmp, (S_SAVE_SPI_INIT_ATC_SHIFT-SQ_BUF_RSRC_WORD1_ATC_SHIFT)
- s_or_b32 s_save_buf_rsrc3, s_save_buf_rsrc3, s_save_tmp //or ATC
- s_and_b32 s_save_tmp, s_save_spi_init_hi, S_SAVE_SPI_INIT_MTYPE_MASK
- s_lshr_b32 s_save_tmp, s_save_tmp, (S_SAVE_SPI_INIT_MTYPE_SHIFT-SQ_BUF_RSRC_WORD3_MTYPE_SHIFT)
- s_or_b32 s_save_buf_rsrc3, s_save_buf_rsrc3, s_save_tmp //or MTYPE
s_mov_b32 s_save_m0, m0
@@ -339,7 +412,7 @@ L_SLEEP:
s_mov_b32 s_save_mem_offset, 0x0
get_wave_size(s_wave_size)
-#if ASIC_TARGET_NAVI1X
+#if HAVE_XNACK
// Save and clear vector XNACK state late to free up SGPRs.
s_getreg_b32 s_save_xnack_mask, hwreg(HW_REG_SHADER_XNACK_MASK)
s_setreg_imm32_b32 hwreg(HW_REG_SHADER_XNACK_MASK), 0x0
@@ -361,7 +434,9 @@ L_SAVE_4VGPR_WAVE32:
// VGPR Allocated in 4-GPR granularity
+#if !NO_SQC_STORE
buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
+#endif
buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:128
buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:128*2
buffer_store_dword v3, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:128*3
@@ -372,7 +447,9 @@ L_SAVE_4VGPR_WAVE64:
// VGPR Allocated in 4-GPR granularity
+#if !NO_SQC_STORE
buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
+#endif
buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256
buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256*2
buffer_store_dword v3, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256*3
@@ -397,7 +474,8 @@ L_SAVE_HWREG:
write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset)
write_hwreg_to_mem(s_save_pc_lo, s_save_buf_rsrc0, s_save_mem_offset)
- write_hwreg_to_mem(s_save_pc_hi, s_save_buf_rsrc0, s_save_mem_offset)
+ s_andn2_b32 s_save_tmp, s_save_pc_hi, S_SAVE_PC_HI_FIRST_WAVE_MASK
+ write_hwreg_to_mem(s_save_tmp, s_save_buf_rsrc0, s_save_mem_offset)
write_hwreg_to_mem(s_save_exec_lo, s_save_buf_rsrc0, s_save_mem_offset)
write_hwreg_to_mem(s_save_exec_hi, s_save_buf_rsrc0, s_save_mem_offset)
write_hwreg_to_mem(s_save_status, s_save_buf_rsrc0, s_save_mem_offset)
@@ -418,9 +496,13 @@ L_SAVE_HWREG:
write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset)
#if NO_SQC_STORE
- // Write HWREG/SGPRs with 32 VGPR lanes, wave32 is common case.
+ // Write HWREGs with 16 VGPR lanes. TTMPs occupy space after this.
+ s_mov_b32 exec_lo, 0xFFFF
s_mov_b32 exec_hi, 0x0
buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
+
+ // Write SGPRs with 32 VGPR lanes. This works in wave32 and wave64 mode.
+ s_mov_b32 exec_lo, 0xFFFFFFFF
#endif
/* save SGPRs */
@@ -506,7 +588,7 @@ L_SAVE_LDS_NORMAL:
s_cbranch_scc0 L_SAVE_LDS_DONE //no lds used? jump to L_SAVE_DONE
s_barrier //LDS is used? wait for other waves in the same TG
- s_and_b32 s_save_tmp, s_wave_size, S_SAVE_SPI_INIT_FIRST_WAVE_MASK
+ s_and_b32 s_save_tmp, s_save_pc_hi, S_SAVE_PC_HI_FIRST_WAVE_MASK
s_cbranch_scc0 L_SAVE_LDS_DONE
// first wave do LDS save;
@@ -628,7 +710,7 @@ L_SAVE_VGPR_WAVE64:
// VGPR store using dw burst
s_mov_b32 m0, 0x4 //VGPR initial index value =4
s_cmp_lt_u32 m0, s_save_alloc_size
- s_cbranch_scc0 L_SAVE_VGPR_END
+ s_cbranch_scc0 L_SAVE_SHARED_VGPR
L_SAVE_VGPR_W64_LOOP:
v_movrels_b32 v0, v0 //v0 = v[0+m0]
@@ -646,6 +728,7 @@ L_SAVE_VGPR_W64_LOOP:
s_cmp_lt_u32 m0, s_save_alloc_size //scc = (m0 < s_save_alloc_size) ? 1 : 0
s_cbranch_scc1 L_SAVE_VGPR_W64_LOOP //VGPR save is complete?
+L_SAVE_SHARED_VGPR:
//Below part will be the save shared vgpr part (new for gfx10)
s_getreg_b32 s_save_alloc_size, hwreg(HW_REG_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SIZE)
s_and_b32 s_save_alloc_size, s_save_alloc_size, 0xFFFFFFFF //shared_vgpr_size is zero?
@@ -674,12 +757,7 @@ L_RESTORE:
s_or_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, S_RESTORE_BUF_RSRC_WORD1_STRIDE
s_mov_b32 s_restore_buf_rsrc2, 0 //NUM_RECORDS initial value = 0 (in bytes)
s_mov_b32 s_restore_buf_rsrc3, S_RESTORE_BUF_RSRC_WORD3_MISC
- s_and_b32 s_restore_tmp, s_restore_spi_init_hi, S_RESTORE_SPI_INIT_ATC_MASK
- s_lshr_b32 s_restore_tmp, s_restore_tmp, (S_RESTORE_SPI_INIT_ATC_SHIFT-SQ_BUF_RSRC_WORD1_ATC_SHIFT)
- s_or_b32 s_restore_buf_rsrc3, s_restore_buf_rsrc3, s_restore_tmp //or ATC
- s_and_b32 s_restore_tmp, s_restore_spi_init_hi, S_RESTORE_SPI_INIT_MTYPE_MASK
- s_lshr_b32 s_restore_tmp, s_restore_tmp, (S_RESTORE_SPI_INIT_MTYPE_SHIFT-SQ_BUF_RSRC_WORD3_MTYPE_SHIFT)
- s_or_b32 s_restore_buf_rsrc3, s_restore_buf_rsrc3, s_restore_tmp //or MTYPE
+
//determine it is wave32 or wave64
get_wave_size(s_restore_size)
@@ -722,7 +800,13 @@ L_RESTORE_LDS_NORMAL:
s_cbranch_scc1 L_RESTORE_LDS_LOOP_W64
L_RESTORE_LDS_LOOP_W32:
+#if HAVE_BUFFER_LDS_LOAD
buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset lds:1 // first 64DW
+#else
+ buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset
+ s_waitcnt vmcnt(0)
+ ds_store_addtid_b32 v0
+#endif
s_add_u32 m0, m0, 128 // 128 DW
s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 128 //mem offset increased by 128DW
s_cmp_lt_u32 m0, s_restore_alloc_size //scc=(m0 < s_restore_alloc_size) ? 1 : 0
@@ -730,7 +814,13 @@ L_RESTORE_LDS_LOOP_W32:
s_branch L_RESTORE_VGPR
L_RESTORE_LDS_LOOP_W64:
+#if HAVE_BUFFER_LDS_LOAD
buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset lds:1 // first 64DW
+#else
+ buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset
+ s_waitcnt vmcnt(0)
+ ds_store_addtid_b32 v0
+#endif
s_add_u32 m0, m0, 256 // 256 DW
s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 256 //mem offset increased by 256DW
s_cmp_lt_u32 m0, s_restore_alloc_size //scc=(m0 < s_restore_alloc_size) ? 1 : 0
@@ -765,6 +855,8 @@ L_RESTORE_VGPR_NORMAL:
s_mov_b32 s_restore_mem_offset_save, s_restore_mem_offset // restore start with v1, v0 will be the last
s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 128*4
s_mov_b32 m0, 4 //VGPR initial index value = 4
+ s_cmp_lt_u32 m0, s_restore_alloc_size
+ s_cbranch_scc0 L_RESTORE_SGPR
L_RESTORE_VGPR_WAVE32_LOOP:
buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1
@@ -786,6 +878,7 @@ L_RESTORE_VGPR_WAVE32_LOOP:
buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:128
buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:128*2
buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:128*3
+ s_waitcnt vmcnt(0)
s_branch L_RESTORE_SGPR
@@ -796,6 +889,8 @@ L_RESTORE_VGPR_WAVE64:
s_mov_b32 s_restore_mem_offset_save, s_restore_mem_offset // restore start with v4, v0 will be the last
s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 256*4
s_mov_b32 m0, 4 //VGPR initial index value = 4
+ s_cmp_lt_u32 m0, s_restore_alloc_size
+ s_cbranch_scc0 L_RESTORE_SHARED_VGPR
L_RESTORE_VGPR_WAVE64_LOOP:
buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1
@@ -812,6 +907,7 @@ L_RESTORE_VGPR_WAVE64_LOOP:
s_cmp_lt_u32 m0, s_restore_alloc_size //scc = (m0 < s_restore_alloc_size) ? 1 : 0
s_cbranch_scc1 L_RESTORE_VGPR_WAVE64_LOOP //VGPR restore (except v0) is complete?
+L_RESTORE_SHARED_VGPR:
//Below part will be the restore shared vgpr part (new for gfx10)
s_getreg_b32 s_restore_alloc_size, hwreg(HW_REG_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SIZE) //shared_vgpr_size
s_and_b32 s_restore_alloc_size, s_restore_alloc_size, 0xFFFFFFFF //shared_vgpr_size is zero?
@@ -935,7 +1031,7 @@ L_RESTORE_HWREG:
s_and_b32 s_restore_m0, SQ_WAVE_TRAPSTS_PRE_SAVECTX_MASK, s_restore_trapsts
s_setreg_b32 hwreg(HW_REG_TRAPSTS, SQ_WAVE_TRAPSTS_PRE_SAVECTX_SHIFT, SQ_WAVE_TRAPSTS_PRE_SAVECTX_SIZE), s_restore_m0
-#if ASIC_TARGET_NAVI1X
+#if HAVE_XNACK
s_setreg_b32 hwreg(HW_REG_SHADER_XNACK_MASK), s_restore_xnack_mask
#endif
@@ -945,8 +1041,10 @@ L_RESTORE_HWREG:
s_setreg_b32 hwreg(HW_REG_MODE), s_restore_mode
// Restore trap temporaries 4-11, 13 initialized by SPI debug dispatch logic
- // ttmp SR memory offset : size(VGPR)+size(SGPR)+0x40
+ // ttmp SR memory offset : size(VGPR)+size(SVGPR)+size(SGPR)+0x40
get_vgpr_size_bytes(s_restore_ttmps_lo, s_restore_size)
+ get_svgpr_size_bytes(s_restore_ttmps_hi)
+ s_add_u32 s_restore_ttmps_lo, s_restore_ttmps_lo, s_restore_ttmps_hi
s_add_u32 s_restore_ttmps_lo, s_restore_ttmps_lo, get_sgpr_size_bytes()
s_add_u32 s_restore_ttmps_lo, s_restore_ttmps_lo, s_restore_buf_rsrc0
s_addc_u32 s_restore_ttmps_hi, s_restore_buf_rsrc1, 0x0
@@ -956,24 +1054,8 @@ L_RESTORE_HWREG:
s_load_dword ttmp13, [s_restore_ttmps_lo, s_restore_ttmps_hi], 0x74 glc:1
s_waitcnt lgkmcnt(0)
-#if ASIC_TARGET_NAVI1X
- s_and_b32 s_restore_m0, s_restore_pc_hi, S_SAVE_PC_HI_RCNT_MASK
- s_lshr_b32 s_restore_m0, s_restore_m0, S_SAVE_PC_HI_RCNT_SHIFT
- s_lshl_b32 s_restore_m0, s_restore_m0, SQ_WAVE_IB_STS_RCNT_SHIFT
- s_mov_b32 s_restore_tmp, 0x0
- s_or_b32 s_restore_tmp, s_restore_tmp, s_restore_m0
- s_and_b32 s_restore_m0, s_restore_pc_hi, S_SAVE_PC_HI_FIRST_REPLAY_MASK
- s_lshr_b32 s_restore_m0, s_restore_m0, S_SAVE_PC_HI_FIRST_REPLAY_SHIFT
- s_lshl_b32 s_restore_m0, s_restore_m0, SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT
- s_or_b32 s_restore_tmp, s_restore_tmp, s_restore_m0
- s_and_b32 s_restore_m0, s_restore_pc_hi, S_SAVE_PC_HI_REPLAY_W64H_MASK
- s_lshr_b32 s_restore_m0, s_restore_m0, S_SAVE_PC_HI_REPLAY_W64H_SHIFT
- s_lshl_b32 s_restore_m0, s_restore_m0, SQ_WAVE_IB_STS_REPLAY_W64H_SHIFT
- s_or_b32 s_restore_tmp, s_restore_tmp, s_restore_m0
-
- s_and_b32 s_restore_m0, s_restore_status, SQ_WAVE_STATUS_INST_ATC_MASK
- s_lshr_b32 s_restore_m0, s_restore_m0, SQ_WAVE_STATUS_INST_ATC_SHIFT
- s_setreg_b32 hwreg(HW_REG_IB_STS), s_restore_tmp
+#if HAVE_XNACK
+ restore_ib_sts(s_restore_tmp, s_restore_m0)
#endif
s_and_b32 s_restore_pc_hi, s_restore_pc_hi, 0x0000ffff //pc[47:32] //Do it here in order not to affect STATUS
@@ -1089,5 +1171,29 @@ end
function get_wave_size(s_reg)
s_getreg_b32 s_reg, hwreg(HW_REG_IB_STS2,SQ_WAVE_IB_STS2_WAVE64_SHIFT,SQ_WAVE_IB_STS2_WAVE64_SIZE)
s_lshl_b32 s_reg, s_reg, S_WAVE_SIZE
- s_or_b32 s_reg, s_save_spi_init_hi, s_reg //share with exec_hi, it's at bit25
+end
+
+function save_and_clear_ib_sts(tmp1, tmp2)
+ // Preserve and clear scalar XNACK state before issuing scalar loads.
+ // Save IB_STS.REPLAY_W64H[25], RCNT[21:16], FIRST_REPLAY[15] into
+ // unused space ttmp11[31:24].
+ s_andn2_b32 ttmp11, ttmp11, (TTMP11_SAVE_REPLAY_W64H_MASK | TTMP11_SAVE_RCNT_FIRST_REPLAY_MASK)
+ s_getreg_b32 tmp1, hwreg(HW_REG_IB_STS)
+ s_and_b32 tmp2, tmp1, SQ_WAVE_IB_STS_REPLAY_W64H_MASK
+ s_lshl_b32 tmp2, tmp2, (TTMP11_SAVE_REPLAY_W64H_SHIFT - SQ_WAVE_IB_STS_REPLAY_W64H_SHIFT)
+ s_or_b32 ttmp11, ttmp11, tmp2
+ s_and_b32 tmp2, tmp1, SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK
+ s_lshl_b32 tmp2, tmp2, (TTMP11_SAVE_RCNT_FIRST_REPLAY_SHIFT - SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT)
+ s_or_b32 ttmp11, ttmp11, tmp2
+ s_andn2_b32 tmp1, tmp1, (SQ_WAVE_IB_STS_REPLAY_W64H_MASK | SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK)
+ s_setreg_b32 hwreg(HW_REG_IB_STS), tmp1
+end
+
+function restore_ib_sts(tmp1, tmp2)
+ s_lshr_b32 tmp1, ttmp11, (TTMP11_SAVE_RCNT_FIRST_REPLAY_SHIFT - SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT)
+ s_and_b32 tmp2, tmp1, SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK
+ s_lshr_b32 tmp1, ttmp11, (TTMP11_SAVE_REPLAY_W64H_SHIFT - SQ_WAVE_IB_STS_REPLAY_W64H_SHIFT)
+ s_and_b32 tmp1, tmp1, SQ_WAVE_IB_STS_REPLAY_W64H_MASK
+ s_or_b32 tmp1, tmp1, tmp2
+ s_setreg_b32 hwreg(HW_REG_IB_STS), tmp1
end
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
index eed78a04e7c7..6770cbe3250a 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
@@ -46,8 +46,6 @@ var SINGLE_STEP_MISSED_WORKAROUND = 1 //workaround for lost MODE.DEBUG_EN
/**************************************************************************/
/* variables */
/**************************************************************************/
-var SQ_WAVE_STATUS_INST_ATC_SHIFT = 23
-var SQ_WAVE_STATUS_INST_ATC_MASK = 0x00800000
var SQ_WAVE_STATUS_SPI_PRIO_SHIFT = 1
var SQ_WAVE_STATUS_SPI_PRIO_MASK = 0x00000006
var SQ_WAVE_STATUS_HALT_MASK = 0x2000
@@ -56,6 +54,7 @@ var SQ_WAVE_STATUS_PRE_SPI_PRIO_SIZE = 1
var SQ_WAVE_STATUS_POST_SPI_PRIO_SHIFT = 3
var SQ_WAVE_STATUS_POST_SPI_PRIO_SIZE = 29
var SQ_WAVE_STATUS_ALLOW_REPLAY_MASK = 0x400000
+var SQ_WAVE_STATUS_ECC_ERR_MASK = 0x20000
var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT = 12
var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE = 9
@@ -72,8 +71,10 @@ var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT = 8
#endif
var SQ_WAVE_TRAPSTS_SAVECTX_MASK = 0x400
-var SQ_WAVE_TRAPSTS_EXCE_MASK = 0x1FF // Exception mask
+var SQ_WAVE_TRAPSTS_EXCP_MASK = 0x1FF
var SQ_WAVE_TRAPSTS_SAVECTX_SHIFT = 10
+var SQ_WAVE_TRAPSTS_ADDR_WATCH_MASK = 0x80
+var SQ_WAVE_TRAPSTS_ADDR_WATCH_SHIFT = 7
var SQ_WAVE_TRAPSTS_MEM_VIOL_MASK = 0x100
var SQ_WAVE_TRAPSTS_MEM_VIOL_SHIFT = 8
var SQ_WAVE_TRAPSTS_PRE_SAVECTX_MASK = 0x3FF
@@ -83,37 +84,30 @@ var SQ_WAVE_TRAPSTS_POST_SAVECTX_MASK = 0xFFFFF800
var SQ_WAVE_TRAPSTS_POST_SAVECTX_SHIFT = 11
var SQ_WAVE_TRAPSTS_POST_SAVECTX_SIZE = 21
var SQ_WAVE_TRAPSTS_ILLEGAL_INST_MASK = 0x800
+var SQ_WAVE_TRAPSTS_EXCP_HI_MASK = 0x7000
var SQ_WAVE_TRAPSTS_XNACK_ERROR_MASK = 0x10000000
-var SQ_WAVE_IB_STS_RCNT_SHIFT = 16 //FIXME
+var SQ_WAVE_MODE_EXCP_EN_SHIFT = 12
+var SQ_WAVE_MODE_EXCP_EN_ADDR_WATCH_SHIFT = 19
+
var SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT = 15 //FIXME
var SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK = 0x1F8000
-var SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK_NEG = 0x00007FFF //FIXME
var SQ_WAVE_MODE_DEBUG_EN_MASK = 0x800
-var SQ_BUF_RSRC_WORD1_ATC_SHIFT = 24
-var SQ_BUF_RSRC_WORD3_MTYPE_SHIFT = 27
-
var TTMP11_SAVE_RCNT_FIRST_REPLAY_SHIFT = 26 // bits [31:26] unused by SPI debug data
var TTMP11_SAVE_RCNT_FIRST_REPLAY_MASK = 0xFC000000
+var TTMP11_DEBUG_TRAP_ENABLED_SHIFT = 23
+var TTMP11_DEBUG_TRAP_ENABLED_MASK = 0x800000
/* Save */
var S_SAVE_BUF_RSRC_WORD1_STRIDE = 0x00040000 //stride is 4 bytes
var S_SAVE_BUF_RSRC_WORD3_MISC = 0x00807FAC //SQ_SEL_X/Y/Z/W, BUF_NUM_FORMAT_FLOAT, (0 for MUBUF stride[17:14] when ADD_TID_ENABLE and BUF_DATA_FORMAT_32 for MTBUF), ADD_TID_ENABLE
-
-var S_SAVE_SPI_INIT_ATC_MASK = 0x08000000 //bit[27]: ATC bit
-var S_SAVE_SPI_INIT_ATC_SHIFT = 27
-var S_SAVE_SPI_INIT_MTYPE_MASK = 0x70000000 //bit[30:28]: Mtype
-var S_SAVE_SPI_INIT_MTYPE_SHIFT = 28
+var S_SAVE_PC_HI_TRAP_ID_MASK = 0x00FF0000
+var S_SAVE_PC_HI_HT_MASK = 0x01000000
var S_SAVE_SPI_INIT_FIRST_WAVE_MASK = 0x04000000 //bit[26]: FirstWaveInTG
var S_SAVE_SPI_INIT_FIRST_WAVE_SHIFT = 26
-var S_SAVE_PC_HI_RCNT_SHIFT = 27 //FIXME check with Brian to ensure all fields other than PC[47:0] can be used
-var S_SAVE_PC_HI_RCNT_MASK = 0xF8000000 //FIXME
-var S_SAVE_PC_HI_FIRST_REPLAY_SHIFT = 26 //FIXME
-var S_SAVE_PC_HI_FIRST_REPLAY_MASK = 0x04000000 //FIXME
-
var s_save_spi_init_lo = exec_lo
var s_save_spi_init_hi = exec_hi
@@ -140,18 +134,9 @@ var s_save_ttmps_hi = s_save_trapsts //no conflict
var S_RESTORE_BUF_RSRC_WORD1_STRIDE = S_SAVE_BUF_RSRC_WORD1_STRIDE
var S_RESTORE_BUF_RSRC_WORD3_MISC = S_SAVE_BUF_RSRC_WORD3_MISC
-var S_RESTORE_SPI_INIT_ATC_MASK = 0x08000000 //bit[27]: ATC bit
-var S_RESTORE_SPI_INIT_ATC_SHIFT = 27
-var S_RESTORE_SPI_INIT_MTYPE_MASK = 0x70000000 //bit[30:28]: Mtype
-var S_RESTORE_SPI_INIT_MTYPE_SHIFT = 28
var S_RESTORE_SPI_INIT_FIRST_WAVE_MASK = 0x04000000 //bit[26]: FirstWaveInTG
var S_RESTORE_SPI_INIT_FIRST_WAVE_SHIFT = 26
-var S_RESTORE_PC_HI_RCNT_SHIFT = S_SAVE_PC_HI_RCNT_SHIFT
-var S_RESTORE_PC_HI_RCNT_MASK = S_SAVE_PC_HI_RCNT_MASK
-var S_RESTORE_PC_HI_FIRST_REPLAY_SHIFT = S_SAVE_PC_HI_FIRST_REPLAY_SHIFT
-var S_RESTORE_PC_HI_FIRST_REPLAY_MASK = S_SAVE_PC_HI_FIRST_REPLAY_MASK
-
var s_restore_spi_init_lo = exec_lo
var s_restore_spi_init_hi = exec_hi
@@ -199,71 +184,77 @@ L_JUMP_TO_RESTORE:
L_SKIP_RESTORE:
s_getreg_b32 s_save_status, hwreg(HW_REG_STATUS) //save STATUS since we will change SCC
- s_andn2_b32 s_save_status, s_save_status, SQ_WAVE_STATUS_SPI_PRIO_MASK //check whether this is for save
-if SINGLE_STEP_MISSED_WORKAROUND
- // No single step exceptions if MODE.DEBUG_EN=0.
- s_getreg_b32 ttmp2, hwreg(HW_REG_MODE)
- s_and_b32 ttmp2, ttmp2, SQ_WAVE_MODE_DEBUG_EN_MASK
- s_cbranch_scc0 L_NO_SINGLE_STEP_WORKAROUND
+ // Clear SPI_PRIO: do not save with elevated priority.
+ // Clear ECC_ERR: prevents SQC store and triggers FATAL_HALT if setreg'd.
+ s_andn2_b32 s_save_status, s_save_status, SQ_WAVE_STATUS_SPI_PRIO_MASK|SQ_WAVE_STATUS_ECC_ERR_MASK
- // Second-level trap already handled exception if STATUS.HALT=1.
- s_and_b32 ttmp2, s_save_status, SQ_WAVE_STATUS_HALT_MASK
+ s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS)
- // Prioritize single step exception over context save.
- // Second-level trap will halt wave and RFE, re-entering for SAVECTX.
- s_cbranch_scc0 L_FETCH_2ND_TRAP
+ s_and_b32 ttmp2, s_save_status, SQ_WAVE_STATUS_HALT_MASK
+ s_cbranch_scc0 L_NOT_HALTED
-L_NO_SINGLE_STEP_WORKAROUND:
-end
+L_HALTED:
+ // Host trap may occur while wave is halted.
+ s_and_b32 ttmp2, s_save_pc_hi, S_SAVE_PC_HI_TRAP_ID_MASK
+ s_cbranch_scc1 L_FETCH_2ND_TRAP
- s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS)
+L_CHECK_SAVE:
s_and_b32 ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_SAVECTX_MASK //check whether this is for save
s_cbranch_scc1 L_SAVE //this is the operation for save
- // ********* Handle non-CWSR traps *******************
-
- // Illegal instruction is a non-maskable exception which blocks context save.
- // Halt the wavefront and return from the trap.
- s_and_b32 ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_ILLEGAL_INST_MASK
- s_cbranch_scc1 L_HALT_WAVE
-
- // If STATUS.MEM_VIOL is asserted then we cannot fetch from the TMA.
- // Instead, halt the wavefront and return from the trap.
- s_and_b32 ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_MEM_VIOL_MASK
- s_cbranch_scc0 L_FETCH_2ND_TRAP
-
-L_HALT_WAVE:
- // If STATUS.HALT is set then this fault must come from SQC instruction fetch.
- // We cannot prevent further faults. Spin wait until context saved.
- s_and_b32 ttmp2, s_save_status, SQ_WAVE_STATUS_HALT_MASK
- s_cbranch_scc0 L_NOT_ALREADY_HALTED
-
-L_WAIT_CTX_SAVE:
+ // Wave is halted but neither host trap nor SAVECTX is raised.
+ // Caused by instruction fetch memory violation.
+ // Spin wait until context saved to prevent interrupt storm.
s_sleep 0x10
- s_getreg_b32 ttmp2, hwreg(HW_REG_TRAPSTS)
- s_and_b32 ttmp2, ttmp2, SQ_WAVE_TRAPSTS_SAVECTX_MASK
- s_cbranch_scc0 L_WAIT_CTX_SAVE
+ s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS)
+ s_branch L_CHECK_SAVE
+
+L_NOT_HALTED:
+ // Let second-level handle non-SAVECTX exception or trap.
+ // Any concurrent SAVECTX will be handled upon re-entry once halted.
+
+ // Check non-maskable exceptions. memory_violation, illegal_instruction
+ // and xnack_error exceptions always cause the wave to enter the trap
+ // handler.
+ s_and_b32 ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_MEM_VIOL_MASK|SQ_WAVE_TRAPSTS_ILLEGAL_INST_MASK
+ s_cbranch_scc1 L_FETCH_2ND_TRAP
+
+ // Check for maskable exceptions in trapsts.excp and trapsts.excp_hi.
+ // Maskable exceptions only cause the wave to enter the trap handler if
+ // their respective bit in mode.excp_en is set.
+ s_and_b32 ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_EXCP_MASK|SQ_WAVE_TRAPSTS_EXCP_HI_MASK
+ s_cbranch_scc0 L_CHECK_TRAP_ID
+
+ s_and_b32 ttmp3, s_save_trapsts, SQ_WAVE_TRAPSTS_ADDR_WATCH_MASK|SQ_WAVE_TRAPSTS_EXCP_HI_MASK
+ s_cbranch_scc0 L_NOT_ADDR_WATCH
+ s_bitset1_b32 ttmp2, SQ_WAVE_TRAPSTS_ADDR_WATCH_SHIFT // Check all addr_watch[123] exceptions against excp_en.addr_watch
+
+L_NOT_ADDR_WATCH:
+ s_getreg_b32 ttmp3, hwreg(HW_REG_MODE)
+ s_lshl_b32 ttmp2, ttmp2, SQ_WAVE_MODE_EXCP_EN_SHIFT
+ s_and_b32 ttmp2, ttmp2, ttmp3
+ s_cbranch_scc1 L_FETCH_2ND_TRAP
+
+L_CHECK_TRAP_ID:
+ // Check trap_id != 0
+ s_and_b32 ttmp2, s_save_pc_hi, S_SAVE_PC_HI_TRAP_ID_MASK
+ s_cbranch_scc1 L_FETCH_2ND_TRAP
-L_NOT_ALREADY_HALTED:
- s_or_b32 s_save_status, s_save_status, SQ_WAVE_STATUS_HALT_MASK
+if SINGLE_STEP_MISSED_WORKAROUND
+ // Prioritize single step exception over context save.
+ // Second-level trap will halt wave and RFE, re-entering for SAVECTX.
+ s_getreg_b32 ttmp2, hwreg(HW_REG_MODE)
+ s_and_b32 ttmp2, ttmp2, SQ_WAVE_MODE_DEBUG_EN_MASK
+ s_cbranch_scc1 L_FETCH_2ND_TRAP
+end
- // If the PC points to S_ENDPGM then context save will fail if STATUS.HALT is set.
- // Rewind the PC to prevent this from occurring. The debugger compensates for this.
- s_sub_u32 ttmp0, ttmp0, 0x8
- s_subb_u32 ttmp1, ttmp1, 0x0
+ s_and_b32 ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_SAVECTX_MASK
+ s_cbranch_scc1 L_SAVE
L_FETCH_2ND_TRAP:
// Preserve and clear scalar XNACK state before issuing scalar reads.
- // Save IB_STS.FIRST_REPLAY[15] and IB_STS.RCNT[20:16] into unused space ttmp11[31:26].
- s_getreg_b32 ttmp2, hwreg(HW_REG_IB_STS)
- s_and_b32 ttmp3, ttmp2, SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK
- s_lshl_b32 ttmp3, ttmp3, (TTMP11_SAVE_RCNT_FIRST_REPLAY_SHIFT - SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT)
- s_andn2_b32 ttmp11, ttmp11, TTMP11_SAVE_RCNT_FIRST_REPLAY_MASK
- s_or_b32 ttmp11, ttmp11, ttmp3
-
- s_andn2_b32 ttmp2, ttmp2, SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK
- s_setreg_b32 hwreg(HW_REG_IB_STS), ttmp2
+ save_and_clear_ib_sts(ttmp14)
// Read second-level TBA/TMA from first-level TMA and jump if available.
// ttmp[2:5] and ttmp12 can be used (others hold SPI-initialized debug data)
@@ -271,27 +262,48 @@ L_FETCH_2ND_TRAP:
s_getreg_b32 ttmp14, hwreg(HW_REG_SQ_SHADER_TMA_LO)
s_getreg_b32 ttmp15, hwreg(HW_REG_SQ_SHADER_TMA_HI)
s_lshl_b64 [ttmp14, ttmp15], [ttmp14, ttmp15], 0x8
+
+ s_load_dword ttmp2, [ttmp14, ttmp15], 0x10 glc:1 // debug trap enabled flag
+ s_waitcnt lgkmcnt(0)
+ s_lshl_b32 ttmp2, ttmp2, TTMP11_DEBUG_TRAP_ENABLED_SHIFT
+ s_andn2_b32 ttmp11, ttmp11, TTMP11_DEBUG_TRAP_ENABLED_MASK
+ s_or_b32 ttmp11, ttmp11, ttmp2
+
s_load_dwordx2 [ttmp2, ttmp3], [ttmp14, ttmp15], 0x0 glc:1 // second-level TBA
s_waitcnt lgkmcnt(0)
s_load_dwordx2 [ttmp14, ttmp15], [ttmp14, ttmp15], 0x8 glc:1 // second-level TMA
s_waitcnt lgkmcnt(0)
+
s_and_b64 [ttmp2, ttmp3], [ttmp2, ttmp3], [ttmp2, ttmp3]
s_cbranch_scc0 L_NO_NEXT_TRAP // second-level trap handler not been set
s_setpc_b64 [ttmp2, ttmp3] // jump to second-level trap handler
L_NO_NEXT_TRAP:
- s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS)
- s_and_b32 s_save_trapsts, s_save_trapsts, SQ_WAVE_TRAPSTS_EXCE_MASK // Check whether it is an exception
- s_cbranch_scc1 L_EXCP_CASE // Exception, jump back to the shader program directly.
- s_add_u32 ttmp0, ttmp0, 4 // S_TRAP case, add 4 to ttmp0
- s_addc_u32 ttmp1, ttmp1, 0
-L_EXCP_CASE:
+ // If not caused by trap then halt wave to prevent re-entry.
+ s_and_b32 ttmp2, s_save_pc_hi, (S_SAVE_PC_HI_TRAP_ID_MASK|S_SAVE_PC_HI_HT_MASK)
+ s_cbranch_scc1 L_TRAP_CASE
+ s_or_b32 s_save_status, s_save_status, SQ_WAVE_STATUS_HALT_MASK
+
+ // If the PC points to S_ENDPGM then context save will fail if STATUS.HALT is set.
+ // Rewind the PC to prevent this from occurring.
+ s_sub_u32 ttmp0, ttmp0, 0x8
+ s_subb_u32 ttmp1, ttmp1, 0x0
+
+ s_branch L_EXIT_TRAP
+
+L_TRAP_CASE:
+ // Host trap will not cause trap re-entry.
+ s_and_b32 ttmp2, s_save_pc_hi, S_SAVE_PC_HI_HT_MASK
+ s_cbranch_scc1 L_EXIT_TRAP
+
+ // Advance past trap instruction to prevent re-entry.
+ s_add_u32 ttmp0, ttmp0, 0x4
+ s_addc_u32 ttmp1, ttmp1, 0x0
+
+L_EXIT_TRAP:
s_and_b32 ttmp1, ttmp1, 0xFFFF
- // Restore SQ_WAVE_IB_STS.
- s_lshr_b32 ttmp2, ttmp11, (TTMP11_SAVE_RCNT_FIRST_REPLAY_SHIFT - SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT)
- s_and_b32 ttmp2, ttmp2, SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK
- s_setreg_b32 hwreg(HW_REG_IB_STS), ttmp2
+ restore_ib_sts(ttmp14)
// Restore SQ_WAVE_STATUS.
s_and_b64 exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32
@@ -312,16 +324,7 @@ L_SAVE:
s_mov_b32 s_save_tmp, 0 //clear saveCtx bit
s_setreg_b32 hwreg(HW_REG_TRAPSTS, SQ_WAVE_TRAPSTS_SAVECTX_SHIFT, 1), s_save_tmp //clear saveCtx bit
- s_getreg_b32 s_save_tmp, hwreg(HW_REG_IB_STS, SQ_WAVE_IB_STS_RCNT_SHIFT, SQ_WAVE_IB_STS_RCNT_SIZE) //save RCNT
- s_lshl_b32 s_save_tmp, s_save_tmp, S_SAVE_PC_HI_RCNT_SHIFT
- s_or_b32 s_save_pc_hi, s_save_pc_hi, s_save_tmp
- s_getreg_b32 s_save_tmp, hwreg(HW_REG_IB_STS, SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT, SQ_WAVE_IB_STS_FIRST_REPLAY_SIZE) //save FIRST_REPLAY
- s_lshl_b32 s_save_tmp, s_save_tmp, S_SAVE_PC_HI_FIRST_REPLAY_SHIFT
- s_or_b32 s_save_pc_hi, s_save_pc_hi, s_save_tmp
- s_getreg_b32 s_save_tmp, hwreg(HW_REG_IB_STS) //clear RCNT and FIRST_REPLAY in IB_STS
- s_and_b32 s_save_tmp, s_save_tmp, SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK_NEG
-
- s_setreg_b32 hwreg(HW_REG_IB_STS), s_save_tmp
+ save_and_clear_ib_sts(s_save_tmp)
/* inform SPI the readiness and wait for SPI's go signal */
s_mov_b32 s_save_exec_lo, exec_lo //save EXEC and use EXEC for the go signal from SPI
@@ -360,12 +363,6 @@ L_SAVE:
s_or_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, S_SAVE_BUF_RSRC_WORD1_STRIDE
s_mov_b32 s_save_buf_rsrc2, 0 //NUM_RECORDS initial value = 0 (in bytes) although not neccessarily inited
s_mov_b32 s_save_buf_rsrc3, S_SAVE_BUF_RSRC_WORD3_MISC
- s_and_b32 s_save_tmp, s_save_spi_init_hi, S_SAVE_SPI_INIT_ATC_MASK
- s_lshr_b32 s_save_tmp, s_save_tmp, (S_SAVE_SPI_INIT_ATC_SHIFT-SQ_BUF_RSRC_WORD1_ATC_SHIFT) //get ATC bit into position
- s_or_b32 s_save_buf_rsrc3, s_save_buf_rsrc3, s_save_tmp //or ATC
- s_and_b32 s_save_tmp, s_save_spi_init_hi, S_SAVE_SPI_INIT_MTYPE_MASK
- s_lshr_b32 s_save_tmp, s_save_tmp, (S_SAVE_SPI_INIT_MTYPE_SHIFT-SQ_BUF_RSRC_WORD3_MTYPE_SHIFT) //get MTYPE bits into position
- s_or_b32 s_save_buf_rsrc3, s_save_buf_rsrc3, s_save_tmp //or MTYPE
//FIXME right now s_save_m0/s_save_mem_offset use tma_lo/tma_hi (might need to save them before using them?)
s_mov_b32 s_save_m0, m0 //save M0
@@ -690,12 +687,6 @@ L_RESTORE:
s_or_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, S_RESTORE_BUF_RSRC_WORD1_STRIDE
s_mov_b32 s_restore_buf_rsrc2, 0 //NUM_RECORDS initial value = 0 (in bytes)
s_mov_b32 s_restore_buf_rsrc3, S_RESTORE_BUF_RSRC_WORD3_MISC
- s_and_b32 s_restore_tmp, s_restore_spi_init_hi, S_RESTORE_SPI_INIT_ATC_MASK
- s_lshr_b32 s_restore_tmp, s_restore_tmp, (S_RESTORE_SPI_INIT_ATC_SHIFT-SQ_BUF_RSRC_WORD1_ATC_SHIFT) //get ATC bit into position
- s_or_b32 s_restore_buf_rsrc3, s_restore_buf_rsrc3, s_restore_tmp //or ATC
- s_and_b32 s_restore_tmp, s_restore_spi_init_hi, S_RESTORE_SPI_INIT_MTYPE_MASK
- s_lshr_b32 s_restore_tmp, s_restore_tmp, (S_RESTORE_SPI_INIT_MTYPE_SHIFT-SQ_BUF_RSRC_WORD3_MTYPE_SHIFT) //get MTYPE bits into position
- s_or_b32 s_restore_buf_rsrc3, s_restore_buf_rsrc3, s_restore_tmp //or MTYPE
/* global mem offset */
// s_mov_b32 s_restore_mem_offset, 0x0 //mem offset initial value = 0
@@ -889,19 +880,7 @@ L_RESTORE:
s_load_dword ttmp13, [s_restore_ttmps_lo, s_restore_ttmps_hi], 0x74 glc:1
s_waitcnt lgkmcnt(0)
- //reuse s_restore_m0 as a temp register
- s_and_b32 s_restore_m0, s_restore_pc_hi, S_SAVE_PC_HI_RCNT_MASK
- s_lshr_b32 s_restore_m0, s_restore_m0, S_SAVE_PC_HI_RCNT_SHIFT
- s_lshl_b32 s_restore_m0, s_restore_m0, SQ_WAVE_IB_STS_RCNT_SHIFT
- s_mov_b32 s_restore_tmp, 0x0 //IB_STS is zero
- s_or_b32 s_restore_tmp, s_restore_tmp, s_restore_m0
- s_and_b32 s_restore_m0, s_restore_pc_hi, S_SAVE_PC_HI_FIRST_REPLAY_MASK
- s_lshr_b32 s_restore_m0, s_restore_m0, S_SAVE_PC_HI_FIRST_REPLAY_SHIFT
- s_lshl_b32 s_restore_m0, s_restore_m0, SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT
- s_or_b32 s_restore_tmp, s_restore_tmp, s_restore_m0
- s_and_b32 s_restore_m0, s_restore_status, SQ_WAVE_STATUS_INST_ATC_MASK
- s_lshr_b32 s_restore_m0, s_restore_m0, SQ_WAVE_STATUS_INST_ATC_SHIFT
- s_setreg_b32 hwreg(HW_REG_IB_STS), s_restore_tmp
+ restore_ib_sts(s_restore_tmp)
s_and_b32 s_restore_pc_hi, s_restore_pc_hi, 0x0000ffff //pc[47:32] //Do it here in order not to affect STATUS
s_and_b64 exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32
@@ -910,8 +889,7 @@ L_RESTORE:
s_barrier //barrier to ensure the readiness of LDS before access attempts from any other wave in the same TG //FIXME not performance-optimal at this time
-// s_rfe_b64 s_restore_pc_lo //Return to the main shader program and resume execution
- s_rfe_restore_b64 s_restore_pc_lo, s_restore_m0 // s_restore_m0[0] is used to set STATUS.inst_atc
+ s_rfe_b64 s_restore_pc_lo //Return to the main shader program and resume execution
/**************************************************************************/
@@ -1078,3 +1056,19 @@ function set_status_without_spi_prio(status, tmp)
s_nop 0x2 // avoid S_SETREG => S_SETREG hazard
s_setreg_b32 hwreg(HW_REG_STATUS, SQ_WAVE_STATUS_PRE_SPI_PRIO_SHIFT, SQ_WAVE_STATUS_PRE_SPI_PRIO_SIZE), status
end
+
+function save_and_clear_ib_sts(tmp)
+ // Save IB_STS.FIRST_REPLAY[15] and IB_STS.RCNT[20:16] into unused space ttmp11[31:26].
+ s_getreg_b32 tmp, hwreg(HW_REG_IB_STS)
+ s_and_b32 tmp, tmp, SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK
+ s_lshl_b32 tmp, tmp, (TTMP11_SAVE_RCNT_FIRST_REPLAY_SHIFT - SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT)
+ s_andn2_b32 ttmp11, ttmp11, TTMP11_SAVE_RCNT_FIRST_REPLAY_MASK
+ s_or_b32 ttmp11, ttmp11, tmp
+ s_setreg_imm32_b32 hwreg(HW_REG_IB_STS), 0x0
+end
+
+function restore_ib_sts(tmp)
+ s_lshr_b32 tmp, ttmp11, (TTMP11_SAVE_RCNT_FIRST_REPLAY_SHIFT - SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT)
+ s_and_b32 tmp, tmp, SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK
+ s_setreg_b32 hwreg(HW_REG_IB_STS), tmp
+end
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index f1a225a20719..8667e3df2d0b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -441,10 +441,14 @@ static void kfd_cwsr_init(struct kfd_dev *kfd)
BUILD_BUG_ON(sizeof(cwsr_trap_nv1x_hex) > PAGE_SIZE);
kfd->cwsr_isa = cwsr_trap_nv1x_hex;
kfd->cwsr_isa_size = sizeof(cwsr_trap_nv1x_hex);
- } else {
+ } else if (KFD_GC_VERSION(kfd) < IP_VERSION(11, 0, 0)) {
BUILD_BUG_ON(sizeof(cwsr_trap_gfx10_hex) > PAGE_SIZE);
kfd->cwsr_isa = cwsr_trap_gfx10_hex;
kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx10_hex);
+ } else {
+ BUILD_BUG_ON(sizeof(cwsr_trap_gfx11_hex) > PAGE_SIZE);
+ kfd->cwsr_isa = cwsr_trap_gfx11_hex;
+ kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx11_hex);
}
kfd->cwsr_enabled = true;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 29e9ebf6d8d5..2ebf0132c25b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -531,7 +531,7 @@ svm_range_vram_node_new(struct amdgpu_device *adev, struct svm_range *prange,
bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
bp.flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
bp.flags |= clear ? AMDGPU_GEM_CREATE_VRAM_CLEARED : 0;
- bp.flags |= AMDGPU_AMDKFD_CREATE_SVM_BO;
+ bp.flags |= AMDGPU_GEM_CREATE_DISCARDABLE;
bp.type = ttm_bo_type_device;
bp.resv = NULL;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 2e20f54bb147..8d50d207cf66 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -1271,6 +1271,12 @@ static void kfd_fill_iolink_non_crat_info(struct kfd_topology_device *dev)
if (!peer_dev)
continue;
+ /* Include the CPU peer in GPU hive if connected over xGMI. */
+ if (!peer_dev->gpu && !peer_dev->node_props.hive_id &&
+ dev->node_props.hive_id &&
+ dev->gpu->adev->gmc.xgmi.connected_to_cpu)
+ peer_dev->node_props.hive_id = dev->node_props.hive_id;
+
list_for_each_entry(inbound_link, &peer_dev->io_link_props,
list) {
if (inbound_link->node_to != link->node_from)
@@ -1302,22 +1308,6 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
pr_debug("Adding new GPU (ID: 0x%x) to topology\n", gpu_id);
- /* Include the CPU in xGMI hive if xGMI connected by assigning it the hive ID. */
- if (gpu->hive_id && gpu->adev->gmc.xgmi.connected_to_cpu) {
- struct kfd_topology_device *top_dev;
-
- down_read(&topology_lock);
-
- list_for_each_entry(top_dev, &topology_device_list, list) {
- if (top_dev->gpu)
- break;
-
- top_dev->node_props.hive_id = gpu->hive_id;
- }
-
- up_read(&topology_lock);
- }
-
/* Check to see if this gpu device exists in the topology_device_list.
* If so, assign the gpu to that device,
* else create a Virtual CRAT for this gpu device and then parse that
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index a92cfb055c15..70be67a56673 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -769,7 +769,7 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params)
do {
dc_stat_get_dmub_notification(adev->dm.dc, &notify);
- if (notify.type > ARRAY_SIZE(dm->dmub_thread_offload)) {
+ if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) {
DRM_ERROR("DM: notify type %d invalid!", notify.type);
continue;
}
@@ -5381,17 +5381,19 @@ fill_plane_buffer_attributes(struct amdgpu_device *adev,
static void
fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
- bool *per_pixel_alpha, bool *global_alpha,
- int *global_alpha_value)
+ bool *per_pixel_alpha, bool *pre_multiplied_alpha,
+ bool *global_alpha, int *global_alpha_value)
{
*per_pixel_alpha = false;
+ *pre_multiplied_alpha = true;
*global_alpha = false;
*global_alpha_value = 0xff;
if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
return;
- if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
+ if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI ||
+ plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE) {
static const uint32_t alpha_formats[] = {
DRM_FORMAT_ARGB8888,
DRM_FORMAT_RGBA8888,
@@ -5406,6 +5408,9 @@ fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
break;
}
}
+
+ if (per_pixel_alpha && plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE)
+ *pre_multiplied_alpha = false;
}
if (plane_state->alpha < 0xffff) {
@@ -5568,7 +5573,7 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
return ret;
fill_blending_from_plane_state(
- plane_state, &plane_info->per_pixel_alpha,
+ plane_state, &plane_info->per_pixel_alpha, &plane_info->pre_multiplied_alpha,
&plane_info->global_alpha, &plane_info->global_alpha_value);
return 0;
@@ -5615,6 +5620,7 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
dc_plane_state->tiling_info = plane_info.tiling_info;
dc_plane_state->visible = plane_info.visible;
dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
+ dc_plane_state->pre_multiplied_alpha = plane_info.pre_multiplied_alpha;
dc_plane_state->global_alpha = plane_info.global_alpha;
dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
dc_plane_state->dcc = plane_info.dcc;
@@ -7911,7 +7917,8 @@ static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
plane_cap && plane_cap->per_pixel_alpha) {
unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
- BIT(DRM_MODE_BLEND_PREMULTI);
+ BIT(DRM_MODE_BLEND_PREMULTI) |
+ BIT(DRM_MODE_BLEND_COVERAGE);
drm_plane_create_alpha_property(plane);
drm_plane_create_blend_mode_property(plane, blend_caps);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index 02943ca65807..cf1b5f354ae9 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -122,7 +122,7 @@ static void rn_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr,
dpp_inst = clk_mgr->base.ctx->dc->res_pool->dpps[i]->inst;
dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz;
- prev_dppclk_khz = clk_mgr->dccg->pipe_dppclk_khz[i];
+ prev_dppclk_khz = clk_mgr->dccg->pipe_dppclk_khz[dpp_inst];
if (safe_to_lower || prev_dppclk_khz < dppclk_khz)
clk_mgr->dccg->funcs->update_dpp_dto(
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
index 27501b735a9c..a2ade6e93f5e 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
@@ -91,7 +91,8 @@ static void dcn315_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable)
if (pipe->top_pipe || pipe->prev_odm_pipe)
continue;
- if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal))) {
+ if (pipe->stream && (pipe->stream->dpms_off || pipe->plane_state == NULL ||
+ dc_is_virtual_signal(pipe->stream->signal))) {
if (disable)
pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
else
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
index 3121dd2d2a91..fc3af81ed6c6 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
@@ -122,7 +122,8 @@ static void dcn316_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable)
if (pipe->top_pipe || pipe->prev_odm_pipe)
continue;
- if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal))) {
+ if (pipe->stream && (pipe->stream->dpms_off || pipe->plane_state == NULL ||
+ dc_is_virtual_signal(pipe->stream->signal))) {
if (disable)
pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
else
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index e41a48f596a3..f14449401188 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -2901,14 +2901,15 @@ static void commit_planes_for_stream(struct dc *dc,
top_pipe_to_program->stream_res.tg);
}
- if (should_lock_all_pipes && dc->hwss.interdependent_update_lock)
+ if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
dc->hwss.interdependent_update_lock(dc, context, true);
- else
+ } else {
/* Lock the top pipe while updating plane addrs, since freesync requires
* plane addr update event triggers to be synchronized.
* top_pipe_to_program is expected to never be NULL
*/
dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);
+ }
// Stream updates
if (stream_update)
@@ -2924,10 +2925,11 @@ static void commit_planes_for_stream(struct dc *dc,
if (dc->hwss.program_front_end_for_ctx)
dc->hwss.program_front_end_for_ctx(dc, context);
- if (should_lock_all_pipes && dc->hwss.interdependent_update_lock)
+ if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
dc->hwss.interdependent_update_lock(dc, context, false);
- else
+ } else {
dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
+ }
dc->hwss.post_unlock_program_front_end(dc, context);
return;
}
@@ -3052,10 +3054,11 @@ static void commit_planes_for_stream(struct dc *dc,
}
- if (should_lock_all_pipes && dc->hwss.interdependent_update_lock)
+ if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
dc->hwss.interdependent_update_lock(dc, context, false);
- else
+ } else {
dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
+ }
if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 67ef357e5798..a789ea8af27f 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -33,6 +33,7 @@
#include "gpio_service_interface.h"
#include "core_status.h"
#include "dc_link_dp.h"
+#include "dc_link_dpia.h"
#include "dc_link_ddc.h"
#include "link_hwss.h"
#include "opp.h"
@@ -240,7 +241,7 @@ bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type)
/* Link may not have physical HPD pin. */
if (link->ep_type != DISPLAY_ENDPOINT_PHY) {
- if (link->is_hpd_pending || !link->hpd_status)
+ if (link->is_hpd_pending || !dc_link_dpia_query_hpd_status(link))
*type = dc_connection_none;
else
*type = dc_connection_single;
@@ -1604,8 +1605,25 @@ static bool dc_link_construct_legacy(struct dc_link *link,
if (link->hpd_gpio) {
if (!link->dc->config.allow_edp_hotplug_detection)
link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
- link->irq_source_hpd_rx =
- dal_irq_get_rx_source(link->hpd_gpio);
+
+ switch (link->dc->config.allow_edp_hotplug_detection) {
+ case 1: // only the 1st eDP handles hotplug
+ if (link->link_index == 0)
+ link->irq_source_hpd_rx =
+ dal_irq_get_rx_source(link->hpd_gpio);
+ else
+ link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
+ break;
+ case 2: // only the 2nd eDP handles hotplug
+ if (link->link_index == 1)
+ link->irq_source_hpd_rx =
+ dal_irq_get_rx_source(link->hpd_gpio);
+ else
+ link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
+ break;
+ default:
+ break;
+ }
}
break;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 340b5f90a82a..dc30ac366a50 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -2783,31 +2783,37 @@ bool perform_link_training_with_retries(
struct dc_link *link = stream->link;
enum dp_panel_mode panel_mode = dp_get_panel_mode(link);
enum link_training_result status = LINK_TRAINING_CR_FAIL_LANE0;
- struct dc_link_settings current_setting = *link_setting;
+ struct dc_link_settings cur_link_settings = *link_setting;
const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
int fail_count = 0;
+ bool is_link_bw_low = false; /* link bandwidth < stream bandwidth */
+ bool is_link_bw_min = /* RBR x 1 */
+ (cur_link_settings.link_rate <= LINK_RATE_LOW) &&
+ (cur_link_settings.lane_count <= LANE_COUNT_ONE);
dp_trace_commit_lt_init(link);
- if (dp_get_link_encoding_format(&current_setting) == DP_8b_10b_ENCODING)
+ if (dp_get_link_encoding_format(&cur_link_settings) == DP_8b_10b_ENCODING)
/* We need to do this before the link training to ensure the idle
* pattern in SST mode will be sent right after the link training
*/
link_hwss->setup_stream_encoder(pipe_ctx);
dp_trace_set_lt_start_timestamp(link, false);
- for (j = 0; j < attempts; ++j) {
+ j = 0;
+ while (j < attempts && fail_count < (attempts * 10)) {
- DC_LOG_HW_LINK_TRAINING("%s: Beginning link training attempt %u of %d\n",
- __func__, (unsigned int)j + 1, attempts);
+ DC_LOG_HW_LINK_TRAINING("%s: Beginning link training attempt %u of %d @ rate(%d) x lane(%d)\n",
+ __func__, (unsigned int)j + 1, attempts, cur_link_settings.link_rate,
+ cur_link_settings.lane_count);
dp_enable_link_phy(
link,
&pipe_ctx->link_res,
signal,
pipe_ctx->clock_source->id,
- &current_setting);
+ &cur_link_settings);
if (stream->sink_patches.dppowerup_delay > 0) {
int delay_dp_power_up_in_ms = stream->sink_patches.dppowerup_delay;
@@ -2832,30 +2838,30 @@ bool perform_link_training_with_retries(
dp_set_panel_mode(link, panel_mode);
if (link->aux_access_disabled) {
- dc_link_dp_perform_link_training_skip_aux(link, &pipe_ctx->link_res, &current_setting);
+ dc_link_dp_perform_link_training_skip_aux(link, &pipe_ctx->link_res, &cur_link_settings);
return true;
} else {
/** @todo Consolidate USB4 DP and DPx.x training. */
if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
status = dc_link_dpia_perform_link_training(link,
&pipe_ctx->link_res,
- &current_setting,
+ &cur_link_settings,
skip_video_pattern);
/* Transmit idle pattern once training successful. */
- if (status == LINK_TRAINING_SUCCESS)
+ if (status == LINK_TRAINING_SUCCESS && !is_link_bw_low)
dp_set_hw_test_pattern(link, &pipe_ctx->link_res, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);
} else {
status = dc_link_dp_perform_link_training(link,
&pipe_ctx->link_res,
- &current_setting,
+ &cur_link_settings,
skip_video_pattern);
}
dp_trace_lt_total_count_increment(link, false);
dp_trace_lt_result_update(link, status, false);
dp_trace_set_lt_end_timestamp(link, false);
- if (status == LINK_TRAINING_SUCCESS)
+ if (status == LINK_TRAINING_SUCCESS && !is_link_bw_low)
return true;
}
@@ -2866,8 +2872,9 @@ bool perform_link_training_with_retries(
if (j == (attempts - 1) && link->ep_type == DISPLAY_ENDPOINT_PHY)
break;
- DC_LOG_WARNING("%s: Link training attempt %u of %d failed\n",
- __func__, (unsigned int)j + 1, attempts);
+ DC_LOG_WARNING("%s: Link training attempt %u of %d failed @ rate(%d) x lane(%d)\n",
+ __func__, (unsigned int)j + 1, attempts, cur_link_settings.link_rate,
+ cur_link_settings.lane_count);
dp_disable_link_phy(link, &pipe_ctx->link_res, signal);
@@ -2876,27 +2883,49 @@ bool perform_link_training_with_retries(
enum dc_connection_type type = dc_connection_none;
dc_link_detect_sink(link, &type);
- if (type == dc_connection_none)
+ if (type == dc_connection_none) {
+ DC_LOG_HW_LINK_TRAINING("%s: Aborting training because sink unplugged\n", __func__);
break;
- } else if (do_fallback) {
+ }
+ }
+
+ /* Try to train again at original settings if:
+ * - not falling back between training attempts;
+ * - aborted previous attempt due to reasons other than sink unplug;
+ * - successfully trained but at a link rate lower than that required by stream;
+ * - reached minimum link bandwidth.
+ */
+ if (!do_fallback || (status == LINK_TRAINING_ABORT) ||
+ (status == LINK_TRAINING_SUCCESS && is_link_bw_low) ||
+ is_link_bw_min) {
+ j++;
+ cur_link_settings = *link_setting;
+ delay_between_attempts += LINK_TRAINING_RETRY_DELAY;
+ is_link_bw_low = false;
+ is_link_bw_min = (cur_link_settings.link_rate <= LINK_RATE_LOW) &&
+ (cur_link_settings.lane_count <= LANE_COUNT_ONE);
+
+ } else if (do_fallback) { /* Try training at lower link bandwidth if doing fallback. */
uint32_t req_bw;
uint32_t link_bw;
- decide_fallback_link_setting(link, *link_setting, &current_setting, status);
- /* Fail link training if reduced link bandwidth no longer meets
- * stream requirements.
+ decide_fallback_link_setting(link, *link_setting, &cur_link_settings, status);
+ /* Flag if reduced link bandwidth no longer meets stream requirements or fallen back to
+ * minimum link bandwidth.
*/
req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing);
- link_bw = dc_link_bandwidth_kbps(link, &current_setting);
- if (req_bw > link_bw)
- break;
+ link_bw = dc_link_bandwidth_kbps(link, &cur_link_settings);
+ is_link_bw_low = (req_bw > link_bw);
+ is_link_bw_min = ((cur_link_settings.link_rate <= LINK_RATE_LOW) &&
+ (cur_link_settings.lane_count <= LANE_COUNT_ONE));
+
+ if (is_link_bw_low)
+ DC_LOG_WARNING("%s: Link bandwidth too low after fallback req_bw(%d) > link_bw(%d)\n",
+ __func__, req_bw, link_bw);
}
msleep(delay_between_attempts);
-
- delay_between_attempts += LINK_TRAINING_RETRY_DELAY;
}
-
return false;
}
@@ -5097,13 +5126,16 @@ static bool dpcd_read_sink_ext_caps(struct dc_link *link)
return true;
}
-void dp_retrieve_lttpr_cap(struct dc_link *link)
+bool dp_retrieve_lttpr_cap(struct dc_link *link)
{
+ uint8_t lttpr_dpcd_data[8];
bool allow_lttpr_non_transparent_mode = 0;
+ bool vbios_lttpr_enable = link->dc->caps.vbios_lttpr_enable;
bool vbios_lttpr_interop = link->dc->caps.vbios_lttpr_aware;
enum dc_status status = DC_ERROR_UNEXPECTED;
+ bool is_lttpr_present = false;
- memset(link->lttpr_dpcd_data, '\0', sizeof(link->lttpr_dpcd_data));
+ memset(lttpr_dpcd_data, '\0', sizeof(lttpr_dpcd_data));
if ((link->dc->config.allow_lttpr_non_transparent_mode.bits.DP2_0 &&
link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED)) {
@@ -5113,116 +5145,82 @@ void dp_retrieve_lttpr_cap(struct dc_link *link)
allow_lttpr_non_transparent_mode = 1;
}
- link->lttpr_mode = LTTPR_MODE_NON_LTTPR;
- link->lttpr_support = LTTPR_UNSUPPORTED;
-
/*
- * Logic to determine LTTPR support
+ * Logic to determine LTTPR mode
*/
- if (vbios_lttpr_interop)
- link->lttpr_support = LTTPR_SUPPORTED;
- else if (link->dc->config.allow_lttpr_non_transparent_mode.raw == 0
- || !link->dc->caps.extended_aux_timeout_support)
- link->lttpr_support = LTTPR_UNSUPPORTED;
- else
- link->lttpr_support = LTTPR_CHECK_EXT_SUPPORT;
+ link->lttpr_mode = LTTPR_MODE_NON_LTTPR;
+ if (vbios_lttpr_enable && vbios_lttpr_interop)
+ link->lttpr_mode = LTTPR_MODE_NON_TRANSPARENT;
+ else if (!vbios_lttpr_enable && vbios_lttpr_interop) {
+ if (allow_lttpr_non_transparent_mode)
+ link->lttpr_mode = LTTPR_MODE_NON_TRANSPARENT;
+ else
+ link->lttpr_mode = LTTPR_MODE_TRANSPARENT;
+ } else if (!vbios_lttpr_enable && !vbios_lttpr_interop) {
+ if (!allow_lttpr_non_transparent_mode || !link->dc->caps.extended_aux_timeout_support)
+ link->lttpr_mode = LTTPR_MODE_NON_LTTPR;
+ else
+ link->lttpr_mode = LTTPR_MODE_NON_TRANSPARENT;
+ }
+#if defined(CONFIG_DRM_AMD_DC_DCN)
/* Check DP tunnel LTTPR mode debug option. */
if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&
link->dc->debug.dpia_debug.bits.force_non_lttpr)
- link->lttpr_support = LTTPR_UNSUPPORTED;
+ link->lttpr_mode = LTTPR_MODE_NON_LTTPR;
+#endif
- if (link->lttpr_support > LTTPR_UNSUPPORTED) {
+ if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT || link->lttpr_mode == LTTPR_MODE_TRANSPARENT) {
/* By reading LTTPR capability, RX assumes that we will enable
* LTTPR extended aux timeout if LTTPR is present.
*/
status = core_link_read_dpcd(
link,
DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV,
- link->lttpr_dpcd_data,
- sizeof(link->lttpr_dpcd_data));
- }
-}
-
-bool dp_parse_lttpr_mode(struct dc_link *link)
-{
- bool dpcd_allow_lttpr_non_transparent_mode = false;
- bool is_lttpr_present = false;
-
- bool vbios_lttpr_enable = link->dc->caps.vbios_lttpr_enable;
-
- if ((link->dc->config.allow_lttpr_non_transparent_mode.bits.DP2_0 &&
- link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED)) {
- dpcd_allow_lttpr_non_transparent_mode = true;
- } else if (link->dc->config.allow_lttpr_non_transparent_mode.bits.DP1_4A &&
- !link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) {
- dpcd_allow_lttpr_non_transparent_mode = true;
+ lttpr_dpcd_data,
+ sizeof(lttpr_dpcd_data));
+
+ link->dpcd_caps.lttpr_caps.revision.raw =
+ lttpr_dpcd_data[DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ link->dpcd_caps.lttpr_caps.max_link_rate =
+ lttpr_dpcd_data[DP_MAX_LINK_RATE_PHY_REPEATER -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ link->dpcd_caps.lttpr_caps.phy_repeater_cnt =
+ lttpr_dpcd_data[DP_PHY_REPEATER_CNT -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ link->dpcd_caps.lttpr_caps.max_lane_count =
+ lttpr_dpcd_data[DP_MAX_LANE_COUNT_PHY_REPEATER -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ link->dpcd_caps.lttpr_caps.mode =
+ lttpr_dpcd_data[DP_PHY_REPEATER_MODE -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ link->dpcd_caps.lttpr_caps.max_ext_timeout =
+ lttpr_dpcd_data[DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+ link->dpcd_caps.lttpr_caps.main_link_channel_coding.raw =
+ lttpr_dpcd_data[DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.raw =
+ lttpr_dpcd_data[DP_PHY_REPEATER_128B132B_RATES -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ /* Attempt to train in LTTPR transparent mode if repeater count exceeds 8. */
+ is_lttpr_present = (link->dpcd_caps.lttpr_caps.max_lane_count > 0 &&
+ link->dpcd_caps.lttpr_caps.max_lane_count <= 4 &&
+ link->dpcd_caps.lttpr_caps.revision.raw >= 0x14);
+ if (is_lttpr_present) {
+ CONN_DATA_DETECT(link, lttpr_dpcd_data, sizeof(lttpr_dpcd_data), "LTTPR Caps: ");
+ configure_lttpr_mode_transparent(link);
+ } else
+ link->lttpr_mode = LTTPR_MODE_NON_LTTPR;
}
-
- /*
- * Logic to determine LTTPR mode
- */
- if (link->lttpr_support == LTTPR_SUPPORTED)
- if (vbios_lttpr_enable)
- link->lttpr_mode = LTTPR_MODE_NON_TRANSPARENT;
- else if (dpcd_allow_lttpr_non_transparent_mode)
- link->lttpr_mode = LTTPR_MODE_NON_TRANSPARENT;
- else
- link->lttpr_mode = LTTPR_MODE_TRANSPARENT;
- else // lttpr_support == LTTPR_CHECK_EXT_SUPPORT
- if (dpcd_allow_lttpr_non_transparent_mode) {
- link->lttpr_support = LTTPR_SUPPORTED;
- link->lttpr_mode = LTTPR_MODE_NON_TRANSPARENT;
- } else {
- link->lttpr_support = LTTPR_UNSUPPORTED;
- }
-
- if (link->lttpr_support == LTTPR_UNSUPPORTED)
- return false;
-
- link->dpcd_caps.lttpr_caps.revision.raw =
- link->lttpr_dpcd_data[DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV -
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-
- link->dpcd_caps.lttpr_caps.max_link_rate =
- link->lttpr_dpcd_data[DP_MAX_LINK_RATE_PHY_REPEATER -
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-
- link->dpcd_caps.lttpr_caps.phy_repeater_cnt =
- link->lttpr_dpcd_data[DP_PHY_REPEATER_CNT -
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-
- link->dpcd_caps.lttpr_caps.max_lane_count =
- link->lttpr_dpcd_data[DP_MAX_LANE_COUNT_PHY_REPEATER -
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-
- link->dpcd_caps.lttpr_caps.mode =
- link->lttpr_dpcd_data[DP_PHY_REPEATER_MODE -
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-
- link->dpcd_caps.lttpr_caps.max_ext_timeout =
- link->lttpr_dpcd_data[DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT -
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-
- link->dpcd_caps.lttpr_caps.main_link_channel_coding.raw =
- link->lttpr_dpcd_data[DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER -
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-
- link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.raw =
- link->lttpr_dpcd_data[DP_PHY_REPEATER_128B132B_RATES -
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-
-
- /* Attempt to train in LTTPR transparent mode if repeater count exceeds 8. */
- is_lttpr_present = (link->dpcd_caps.lttpr_caps.max_lane_count > 0 &&
- link->dpcd_caps.lttpr_caps.max_lane_count <= 4 &&
- link->dpcd_caps.lttpr_caps.revision.raw >= 0x14);
- if (is_lttpr_present) {
- CONN_DATA_DETECT(link, link->lttpr_dpcd_data, sizeof(link->lttpr_dpcd_data), "LTTPR Caps: ");
- configure_lttpr_mode_transparent(link);
- } else
- link->lttpr_mode = LTTPR_MODE_NON_LTTPR;
-
return is_lttpr_present;
}
@@ -5374,8 +5372,7 @@ static bool retrieve_link_cap(struct dc_link *link)
status = wa_try_to_wake_dprx(link, timeout_ms);
}
- dp_retrieve_lttpr_cap(link);
-
+ is_lttpr_present = dp_retrieve_lttpr_cap(link);
/* Read DP tunneling information. */
status = dpcd_get_tunneling_device_data(link);
@@ -5411,9 +5408,6 @@ static bool retrieve_link_cap(struct dc_link *link)
return false;
}
- if (link->lttpr_support > LTTPR_UNSUPPORTED)
- is_lttpr_present = dp_parse_lttpr_mode(link);
-
if (!is_lttpr_present)
dc_link_aux_try_to_configure_timeout(link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c
index a5765f36d86f..1b7a8774b0c9 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c
@@ -34,6 +34,7 @@
#include "dm_helpers.h"
#include "dmub/inc/dmub_cmd.h"
#include "inc/link_dpcd.h"
+#include "dc_dmub_srv.h"
#define DC_LOGGER \
link->ctx->logger
@@ -69,6 +70,24 @@ enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link)
return status;
}
+bool dc_link_dpia_query_hpd_status(struct dc_link *link)
+{
+ union dmub_rb_cmd cmd = {0};
+ struct dc_dmub_srv *dmub_srv = link->ctx->dmub_srv;
+ bool is_hpd_high = false;
+
+ /* prepare QUERY_HPD command */
+ cmd.query_hpd.header.type = DMUB_CMD__QUERY_HPD_STATE;
+ cmd.query_hpd.data.instance = link->link_id.enum_id - ENUM_ID_1;
+ cmd.query_hpd.data.ch_type = AUX_CHANNEL_DPIA;
+
+ /* Return HPD status reported by DMUB if query successfully executed. */
+ if (dc_dmub_srv_cmd_with_reply_data(dmub_srv, &cmd) && cmd.query_hpd.data.status == AUX_RET_SUCCESS)
+ is_hpd_high = cmd.query_hpd.data.result;
+
+ return is_hpd_high;
+}
+
/* Configure link as prescribed in link_setting; set LTTPR mode; and
* Initialize link training settings.
* Abort link training if sink unplug detected.
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
index e6b9c6a71841..5bc6ff2fa73e 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
@@ -61,6 +61,8 @@ static void dc_plane_construct(struct dc_context *ctx, struct dc_plane_state *pl
plane_state->blend_tf->type = TF_TYPE_BYPASS;
}
+ plane_state->pre_multiplied_alpha = true;
+
}
static void dc_plane_destruct(struct dc_plane_state *plane_state)
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 26c24db8f1da..3960c74482be 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -47,7 +47,7 @@ struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
-#define DC_VER "3.2.185"
+#define DC_VER "3.2.186"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@@ -329,7 +329,7 @@ struct dc_config {
bool disable_dmcu;
bool enable_4to1MPC;
bool enable_windowed_mpo_odm;
- bool allow_edp_hotplug_detection;
+ uint32_t allow_edp_hotplug_detection;
bool clamp_min_dcfclk;
uint64_t vblank_alignment_dto_params;
uint8_t vblank_alignment_max_frame_time_diff;
@@ -1011,6 +1011,7 @@ struct dc_plane_state {
bool is_tiling_rotated;
bool per_pixel_alpha;
+ bool pre_multiplied_alpha;
bool global_alpha;
int global_alpha_value;
bool visible;
@@ -1045,6 +1046,7 @@ struct dc_plane_info {
bool horizontal_mirror;
bool visible;
bool per_pixel_alpha;
+ bool pre_multiplied_alpha;
bool global_alpha;
int global_alpha_value;
bool input_csc_enabled;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index 251f2bbc96b9..a3c37ee3f849 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -129,8 +129,6 @@ struct dc_link {
bool link_state_valid;
bool aux_access_disabled;
bool sync_lt_in_progress;
- uint8_t lttpr_dpcd_data[8];
- enum lttpr_support lttpr_support;
enum lttpr_mode lttpr_mode;
bool is_internal_display;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
index 29e20d92b0bb..9e39cd7b203e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
@@ -87,7 +87,8 @@ static void release_engine(
engine->ddc = NULL;
- REG_UPDATE(AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, 1);
+ REG_UPDATE_2(AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, 1,
+ AUX_SW_USE_AUX_REG_REQ, 0);
}
#define SW_CAN_ACCESS_AUX 1
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
index 5e6fea85a7b5..845aa8a1027d 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
@@ -1101,9 +1101,12 @@ static bool get_pixel_clk_frequency_100hz(
* not be programmed equal to DPREFCLK
*/
modulo_hz = REG_READ(MODULO[inst]);
- *pixel_clk_khz = div_u64((uint64_t)clock_hz*
- clock_source->ctx->dc->clk_mgr->dprefclk_khz*10,
- modulo_hz);
+ if (modulo_hz)
+ *pixel_clk_khz = div_u64((uint64_t)clock_hz*
+ clock_source->ctx->dc->clk_mgr->dprefclk_khz*10,
+ modulo_hz);
+ else
+ *pixel_clk_khz = 0;
} else {
/* NOTE: There is agreement with VBIOS here that MODULO is
* programmed equal to DPREFCLK, in which case PHASE will be
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index e02ac75afbf7..e3a62873c0e7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -2550,12 +2550,21 @@ void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
blnd_cfg.overlap_only = false;
blnd_cfg.global_gain = 0xff;
- if (per_pixel_alpha && pipe_ctx->plane_state->global_alpha) {
- blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
- blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
- } else if (per_pixel_alpha) {
- blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
+ if (per_pixel_alpha) {
+ /* DCN1.0 has output CM before MPC which seems to screw with
+ * pre-multiplied alpha.
+ */
+ blnd_cfg.pre_multiplied_alpha = (is_rgb_cspace(
+ pipe_ctx->stream->output_color_space)
+ && pipe_ctx->plane_state->pre_multiplied_alpha);
+ if (pipe_ctx->plane_state->global_alpha) {
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
+ blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
+ } else {
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
+ }
} else {
+ blnd_cfg.pre_multiplied_alpha = false;
blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
}
@@ -2564,14 +2573,6 @@ void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
else
blnd_cfg.global_alpha = 0xff;
- /* DCN1.0 has output CM before MPC which seems to screw with
- * pre-multiplied alpha.
- */
- blnd_cfg.pre_multiplied_alpha = is_rgb_cspace(
- pipe_ctx->stream->output_color_space)
- && per_pixel_alpha;
-
-
/*
* TODO: remove hack
* Note: currently there is a bug in init_hw such that
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index e1f87bd72e4a..ec6aa8d8b251 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -1773,7 +1773,6 @@ void dcn20_post_unlock_program_front_end(
*/
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
-
if (pipe->plane_state && !pipe->top_pipe && pipe->update_flags.bits.enable) {
struct hubp *hubp = pipe->plane_res.hubp;
int j = 0;
@@ -2346,12 +2345,16 @@ void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
blnd_cfg.overlap_only = false;
blnd_cfg.global_gain = 0xff;
- if (per_pixel_alpha && pipe_ctx->plane_state->global_alpha) {
- blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
- blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
- } else if (per_pixel_alpha) {
- blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
+ if (per_pixel_alpha) {
+ blnd_cfg.pre_multiplied_alpha = pipe_ctx->plane_state->pre_multiplied_alpha;
+ if (pipe_ctx->plane_state->global_alpha) {
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
+ blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
+ } else {
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
+ }
} else {
+ blnd_cfg.pre_multiplied_alpha = false;
blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
}
@@ -2365,7 +2368,7 @@ void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
blnd_cfg.top_gain = 0x1f000;
blnd_cfg.bottom_inside_gain = 0x1f000;
blnd_cfg.bottom_outside_gain = 0x1f000;
- blnd_cfg.pre_multiplied_alpha = per_pixel_alpha;
+
if (pipe_ctx->plane_state->format
== SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA)
blnd_cfg.pre_multiplied_alpha = false;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
index f5e8916601d3..b604fb26f288 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
@@ -28,6 +28,8 @@
#include "dc.h"
#include "dcn_calc_math.h"
+#include "dml/dcn30/dcn30_fpu.h"
+
#define REG(reg)\
optc1->tg_regs->reg
@@ -184,6 +186,14 @@ void optc3_set_dsc_config(struct timing_generator *optc,
}
+void optc3_set_vrr_m_const(struct timing_generator *optc,
+ double vtotal_avg)
+{
+ DC_FP_START();
+ optc3_fpu_set_vrr_m_const(optc, vtotal_avg);
+ DC_FP_END();
+}
+
void optc3_set_odm_bypass(struct timing_generator *optc,
const struct dc_crtc_timing *dc_crtc_timing)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
index 336b2ce6a636..1c1a67c4cec1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
@@ -84,6 +84,7 @@
#include "dce/dce_aux.h"
#include "dce/dce_i2c.h"
+#include "dml/dcn30/dcn30_fpu.h"
#include "dml/dcn30/display_mode_vba_30.h"
#include "vm_helper.h"
#include "dcn20/dcn20_vmid.h"
@@ -91,137 +92,6 @@
#define DC_LOGGER_INIT(logger)
-struct _vcs_dpi_ip_params_st dcn3_0_ip = {
- .use_min_dcfclk = 0,
- .clamp_min_dcfclk = 0,
- .odm_capable = 1,
- .gpuvm_enable = 0,
- .hostvm_enable = 0,
- .gpuvm_max_page_table_levels = 4,
- .hostvm_max_page_table_levels = 4,
- .hostvm_cached_page_table_levels = 0,
- .pte_group_size_bytes = 2048,
- .num_dsc = 6,
- .rob_buffer_size_kbytes = 184,
- .det_buffer_size_kbytes = 184,
- .dpte_buffer_size_in_pte_reqs_luma = 84,
- .pde_proc_buffer_size_64k_reqs = 48,
- .dpp_output_buffer_pixels = 2560,
- .opp_output_buffer_lines = 1,
- .pixel_chunk_size_kbytes = 8,
- .pte_enable = 1,
- .max_page_table_levels = 2,
- .pte_chunk_size_kbytes = 2, // ?
- .meta_chunk_size_kbytes = 2,
- .writeback_chunk_size_kbytes = 8,
- .line_buffer_size_bits = 789504,
- .is_line_buffer_bpp_fixed = 0, // ?
- .line_buffer_fixed_bpp = 0, // ?
- .dcc_supported = true,
- .writeback_interface_buffer_size_kbytes = 90,
- .writeback_line_buffer_buffer_size = 0,
- .max_line_buffer_lines = 12,
- .writeback_luma_buffer_size_kbytes = 12, // writeback_line_buffer_buffer_size = 656640
- .writeback_chroma_buffer_size_kbytes = 8,
- .writeback_chroma_line_buffer_width_pixels = 4,
- .writeback_max_hscl_ratio = 1,
- .writeback_max_vscl_ratio = 1,
- .writeback_min_hscl_ratio = 1,
- .writeback_min_vscl_ratio = 1,
- .writeback_max_hscl_taps = 1,
- .writeback_max_vscl_taps = 1,
- .writeback_line_buffer_luma_buffer_size = 0,
- .writeback_line_buffer_chroma_buffer_size = 14643,
- .cursor_buffer_size = 8,
- .cursor_chunk_size = 2,
- .max_num_otg = 6,
- .max_num_dpp = 6,
- .max_num_wb = 1,
- .max_dchub_pscl_bw_pix_per_clk = 4,
- .max_pscl_lb_bw_pix_per_clk = 2,
- .max_lb_vscl_bw_pix_per_clk = 4,
- .max_vscl_hscl_bw_pix_per_clk = 4,
- .max_hscl_ratio = 6,
- .max_vscl_ratio = 6,
- .hscl_mults = 4,
- .vscl_mults = 4,
- .max_hscl_taps = 8,
- .max_vscl_taps = 8,
- .dispclk_ramp_margin_percent = 1,
- .underscan_factor = 1.11,
- .min_vblank_lines = 32,
- .dppclk_delay_subtotal = 46,
- .dynamic_metadata_vm_enabled = true,
- .dppclk_delay_scl_lb_only = 16,
- .dppclk_delay_scl = 50,
- .dppclk_delay_cnvc_formatter = 27,
- .dppclk_delay_cnvc_cursor = 6,
- .dispclk_delay_subtotal = 119,
- .dcfclk_cstate_latency = 5.2, // SRExitTime
- .max_inter_dcn_tile_repeaters = 8,
- .odm_combine_4to1_supported = true,
-
- .xfc_supported = false,
- .xfc_fill_bw_overhead_percent = 10.0,
- .xfc_fill_constant_bytes = 0,
- .gfx7_compat_tiling_supported = 0,
- .number_of_cursors = 1,
-};
-
-struct _vcs_dpi_soc_bounding_box_st dcn3_0_soc = {
- .clock_limits = {
- {
- .state = 0,
- .dispclk_mhz = 562.0,
- .dppclk_mhz = 300.0,
- .phyclk_mhz = 300.0,
- .phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 405.6,
- },
- },
- .min_dcfclk = 500.0, /* TODO: set this to actual min DCFCLK */
- .num_states = 1,
- .sr_exit_time_us = 15.5,
- .sr_enter_plus_exit_time_us = 20,
- .urgent_latency_us = 4.0,
- .urgent_latency_pixel_data_only_us = 4.0,
- .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
- .urgent_latency_vm_data_only_us = 4.0,
- .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
- .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
- .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
- .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 80.0,
- .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0,
- .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0,
- .max_avg_sdp_bw_use_normal_percent = 60.0,
- .max_avg_dram_bw_use_normal_percent = 40.0,
- .writeback_latency_us = 12.0,
- .max_request_size_bytes = 256,
- .fabric_datapath_to_dcn_data_return_bytes = 64,
- .dcn_downspread_percent = 0.5,
- .downspread_percent = 0.38,
- .dram_page_open_time_ns = 50.0,
- .dram_rw_turnaround_time_ns = 17.5,
- .dram_return_buffer_per_channel_bytes = 8192,
- .round_trip_ping_latency_dcfclk_cycles = 191,
- .urgent_out_of_order_return_per_channel_bytes = 4096,
- .channel_interleave_bytes = 256,
- .num_banks = 8,
- .gpuvm_min_page_size_bytes = 4096,
- .hostvm_min_page_size_bytes = 4096,
- .dram_clock_change_latency_us = 404,
- .dummy_pstate_latency_us = 5,
- .writeback_dram_clock_change_latency_us = 23.0,
- .return_bus_width_bytes = 64,
- .dispclk_dppclk_vco_speed_mhz = 3650,
- .xfc_bus_transport_time_us = 20, // ?
- .xfc_xbuf_latency_tolerance_us = 4, // ?
- .use_urgent_burst_bw = 1, // ?
- .do_urgent_latency_adjustment = true,
- .urgent_latency_adjustment_fabric_clock_component_us = 1.0,
- .urgent_latency_adjustment_fabric_clock_reference_mhz = 1000,
-};
-
enum dcn30_clk_src_array_id {
DCN30_CLK_SRC_PLL0,
DCN30_CLK_SRC_PLL1,
@@ -1480,90 +1350,9 @@ int dcn30_populate_dml_pipes_from_context(
void dcn30_populate_dml_writeback_from_context(
struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes)
{
- int pipe_cnt, i, j;
- double max_calc_writeback_dispclk;
- double writeback_dispclk;
- struct writeback_st dout_wb;
-
- for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
- struct dc_stream_state *stream = res_ctx->pipe_ctx[i].stream;
-
- if (!stream)
- continue;
- max_calc_writeback_dispclk = 0;
-
- /* Set writeback information */
- pipes[pipe_cnt].dout.wb_enable = 0;
- pipes[pipe_cnt].dout.num_active_wb = 0;
- for (j = 0; j < stream->num_wb_info; j++) {
- struct dc_writeback_info *wb_info = &stream->writeback_info[j];
-
- if (wb_info->wb_enabled && wb_info->writeback_source_plane &&
- (wb_info->writeback_source_plane == res_ctx->pipe_ctx[i].plane_state)) {
- pipes[pipe_cnt].dout.wb_enable = 1;
- pipes[pipe_cnt].dout.num_active_wb++;
- dout_wb.wb_src_height = wb_info->dwb_params.cnv_params.crop_en ?
- wb_info->dwb_params.cnv_params.crop_height :
- wb_info->dwb_params.cnv_params.src_height;
- dout_wb.wb_src_width = wb_info->dwb_params.cnv_params.crop_en ?
- wb_info->dwb_params.cnv_params.crop_width :
- wb_info->dwb_params.cnv_params.src_width;
- dout_wb.wb_dst_width = wb_info->dwb_params.dest_width;
- dout_wb.wb_dst_height = wb_info->dwb_params.dest_height;
-
- /* For IP that doesn't support WB scaling, set h/v taps to 1 to avoid DML validation failure */
- if (dc->dml.ip.writeback_max_hscl_taps > 1) {
- dout_wb.wb_htaps_luma = wb_info->dwb_params.scaler_taps.h_taps;
- dout_wb.wb_vtaps_luma = wb_info->dwb_params.scaler_taps.v_taps;
- } else {
- dout_wb.wb_htaps_luma = 1;
- dout_wb.wb_vtaps_luma = 1;
- }
- dout_wb.wb_htaps_chroma = 0;
- dout_wb.wb_vtaps_chroma = 0;
- dout_wb.wb_hratio = wb_info->dwb_params.cnv_params.crop_en ?
- (double)wb_info->dwb_params.cnv_params.crop_width /
- (double)wb_info->dwb_params.dest_width :
- (double)wb_info->dwb_params.cnv_params.src_width /
- (double)wb_info->dwb_params.dest_width;
- dout_wb.wb_vratio = wb_info->dwb_params.cnv_params.crop_en ?
- (double)wb_info->dwb_params.cnv_params.crop_height /
- (double)wb_info->dwb_params.dest_height :
- (double)wb_info->dwb_params.cnv_params.src_height /
- (double)wb_info->dwb_params.dest_height;
- if (wb_info->dwb_params.cnv_params.fc_out_format == DWB_OUT_FORMAT_64BPP_ARGB ||
- wb_info->dwb_params.cnv_params.fc_out_format == DWB_OUT_FORMAT_64BPP_RGBA)
- dout_wb.wb_pixel_format = dm_444_64;
- else
- dout_wb.wb_pixel_format = dm_444_32;
-
- /* Workaround for cases where multiple writebacks are connected to same plane
- * In which case, need to compute worst case and set the associated writeback parameters
- * This workaround is necessary due to DML computation assuming only 1 set of writeback
- * parameters per pipe
- */
- writeback_dispclk = dml30_CalculateWriteBackDISPCLK(
- dout_wb.wb_pixel_format,
- pipes[pipe_cnt].pipe.dest.pixel_rate_mhz,
- dout_wb.wb_hratio,
- dout_wb.wb_vratio,
- dout_wb.wb_htaps_luma,
- dout_wb.wb_vtaps_luma,
- dout_wb.wb_src_width,
- dout_wb.wb_dst_width,
- pipes[pipe_cnt].pipe.dest.htotal,
- dc->current_state->bw_ctx.dml.ip.writeback_line_buffer_buffer_size);
-
- if (writeback_dispclk > max_calc_writeback_dispclk) {
- max_calc_writeback_dispclk = writeback_dispclk;
- pipes[pipe_cnt].dout.wb = dout_wb;
- }
- }
- }
-
- pipe_cnt++;
- }
-
+ DC_FP_START();
+ dcn30_fpu_populate_dml_writeback_from_context(dc, res_ctx, pipes);
+ DC_FP_END();
}
unsigned int dcn30_calc_max_scaled_time(
@@ -1598,7 +1387,7 @@ void dcn30_set_mcif_arb_params(
enum mmhubbub_wbif_mode wbif_mode;
struct display_mode_lib *dml = &context->bw_ctx.dml;
struct mcif_arb_params *wb_arb_params;
- int i, j, k, dwb_pipe;
+ int i, j, dwb_pipe;
/* Writeback MCIF_WB arbitration parameters */
dwb_pipe = 0;
@@ -1622,17 +1411,15 @@ void dcn30_set_mcif_arb_params(
else
wbif_mode = PACKED_444;
- for (k = 0; k < sizeof(wb_arb_params->cli_watermark)/sizeof(wb_arb_params->cli_watermark[0]); k++) {
- wb_arb_params->cli_watermark[k] = get_wm_writeback_urgent(dml, pipes, pipe_cnt) * 1000;
- wb_arb_params->pstate_watermark[k] = get_wm_writeback_dram_clock_change(dml, pipes, pipe_cnt) * 1000;
- }
+ DC_FP_START();
+ dcn30_fpu_set_mcif_arb_params(wb_arb_params, dml, pipes, pipe_cnt, j);
+ DC_FP_END();
wb_arb_params->time_per_pixel = (1000000 << 6) / context->res_ctx.pipe_ctx[i].stream->phy_pix_clk; /* time_per_pixel should be in u6.6 format */
wb_arb_params->slice_lines = 32;
wb_arb_params->arbitration_slice = 2; /* irrelevant since there is no YUV output */
wb_arb_params->max_scaled_time = dcn30_calc_max_scaled_time(wb_arb_params->time_per_pixel,
wbif_mode,
wb_arb_params->cli_watermark[0]); /* assume 4 watermark sets have the same value */
- wb_arb_params->dram_speed_change_duration = dml->vba.WritebackAllowDRAMClockChangeEndPosition[j] * pipes[0].clks_cfg.refclk_mhz; /* num_clock_cycles = us * MHz */
dwb_pipe++;
@@ -2111,178 +1898,11 @@ validate_out:
return out;
}
-/*
- * This must be noinline to ensure anything that deals with FP registers
- * is contained within this call; previously our compiling with hard-float
- * would result in fp instructions being emitted outside of the boundaries
- * of the DC_FP_START/END macros, which makes sense as the compiler has no
- * idea about what is wrapped and what is not
- *
- * This is largely just a workaround to avoid breakage introduced with 5.6,
- * ideally all fp-using code should be moved into its own file, only that
- * should be compiled with hard-float, and all code exported from there
- * should be strictly wrapped with DC_FP_START/END
- */
-static noinline void dcn30_calculate_wm_and_dlg_fp(
- struct dc *dc, struct dc_state *context,
- display_e2e_pipe_params_st *pipes,
- int pipe_cnt,
- int vlevel)
+void dcn30_update_soc_for_wm_a(struct dc *dc, struct dc_state *context)
{
- int maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb;
- int i, pipe_idx;
- double dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][maxMpcComb];
- bool pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] != dm_dram_clock_change_unsupported;
-
- if (context->bw_ctx.dml.soc.min_dcfclk > dcfclk)
- dcfclk = context->bw_ctx.dml.soc.min_dcfclk;
-
- pipes[0].clks_cfg.voltage = vlevel;
- pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
- pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz;
-
- /* Set B:
- * DCFCLK: 1GHz or min required above 1GHz
- * FCLK/UCLK: Max
- */
- if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].valid) {
- if (vlevel == 0) {
- pipes[0].clks_cfg.voltage = 1;
- pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dcfclk_mhz;
- }
- context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.pstate_latency_us;
- context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_enter_plus_exit_time_us;
- context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_exit_time_us;
- }
- context->bw_ctx.bw.dcn.watermarks.b.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-
- pipes[0].clks_cfg.voltage = vlevel;
- pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
-
- /* Set D:
- * DCFCLK: Min Required
- * FCLK(proportional to UCLK): 1GHz or Max
- * MALL stutter, sr_enter_exit = 4, sr_exit = 2us
- */
- /*
- if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].valid) {
- context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.pstate_latency_us;
- context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_enter_plus_exit_time_us;
- context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_exit_time_us;
- }
- context->bw_ctx.bw.dcn.watermarks.d.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- */
-
- /* Set C:
- * DCFCLK: Min Required
- * FCLK(proportional to UCLK): 1GHz or Max
- * pstate latency overridden to 5us
- */
- if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid) {
- unsigned int min_dram_speed_mts = context->bw_ctx.dml.vba.DRAMSpeed;
- unsigned int min_dram_speed_mts_margin = 160;
-
- if (context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] == dm_dram_clock_change_unsupported)
- min_dram_speed_mts = dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz * 16;
-
- /* find largest table entry that is lower than dram speed, but lower than DPM0 still uses DPM0 */
- for (i = 3; i > 0; i--)
- if (min_dram_speed_mts + min_dram_speed_mts_margin > dc->clk_mgr->bw_params->dummy_pstate_table[i].dram_speed_mts)
- break;
-
- context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->dummy_pstate_table[i].dummy_pstate_latency_us;
- context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us;
- context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us;
- }
-
- context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-
- if (!pstate_en) {
- /* The only difference between A and C is p-state latency, if p-state is not supported we want to
- * calculate DLG based on dummy p-state latency, and max out the set A p-state watermark
- */
- context->bw_ctx.bw.dcn.watermarks.a = context->bw_ctx.bw.dcn.watermarks.c;
- context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = 0;
- } else {
- /* Set A:
- * DCFCLK: Min Required
- * FCLK(proportional to UCLK): 1GHz or Max
- *
- * Set A calculated last so that following calculations are based on Set A
- */
- dc->res_pool->funcs->update_soc_for_wm_a(dc, context);
- context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- }
-
- context->perf_params.stutter_period_us = context->bw_ctx.dml.vba.StutterPeriod;
-
- /* Make set D = set A until set D is enabled */
- context->bw_ctx.bw.dcn.watermarks.d = context->bw_ctx.bw.dcn.watermarks.a;
-
- for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
- if (!context->res_ctx.pipe_ctx[i].stream)
- continue;
-
- pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt);
- pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
-
- if (dc->config.forced_clocks) {
- pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz;
- pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz;
- }
- if (dc->debug.min_disp_clk_khz > pipes[pipe_idx].clks_cfg.dispclk_mhz * 1000)
- pipes[pipe_idx].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0;
- if (dc->debug.min_dpp_clk_khz > pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
- pipes[pipe_idx].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0;
-
- pipe_idx++;
- }
-
DC_FP_START();
- dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
+ dcn30_fpu_update_soc_for_wm_a(dc, context);
DC_FP_END();
-
- if (!pstate_en)
- /* Restore full p-state latency */
- context->bw_ctx.dml.soc.dram_clock_change_latency_us =
- dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
-}
-
-void dcn30_update_soc_for_wm_a(struct dc *dc, struct dc_state *context)
-{
- if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].valid) {
- context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
- context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_enter_plus_exit_time_us;
- context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_exit_time_us;
- }
}
void dcn30_calculate_wm_and_dlg(
@@ -2292,7 +1912,7 @@ void dcn30_calculate_wm_and_dlg(
int vlevel)
{
DC_FP_START();
- dcn30_calculate_wm_and_dlg_fp(dc, context, pipes, pipe_cnt, vlevel);
+ dcn30_fpu_calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel);
DC_FP_END();
}
@@ -2351,40 +1971,6 @@ validate_out:
return out;
}
-/*
- * This must be noinline to ensure anything that deals with FP registers
- * is contained within this call; previously our compiling with hard-float
- * would result in fp instructions being emitted outside of the boundaries
- * of the DC_FP_START/END macros, which makes sense as the compiler has no
- * idea about what is wrapped and what is not
- *
- * This is largely just a workaround to avoid breakage introduced with 5.6,
- * ideally all fp-using code should be moved into its own file, only that
- * should be compiled with hard-float, and all code exported from there
- * should be strictly wrapped with DC_FP_START/END
- */
-static noinline void dcn30_get_optimal_dcfclk_fclk_for_uclk(unsigned int uclk_mts,
- unsigned int *optimal_dcfclk,
- unsigned int *optimal_fclk)
-{
- double bw_from_dram, bw_from_dram1, bw_from_dram2;
-
- bw_from_dram1 = uclk_mts * dcn3_0_soc.num_chans *
- dcn3_0_soc.dram_channel_width_bytes * (dcn3_0_soc.max_avg_dram_bw_use_normal_percent / 100);
- bw_from_dram2 = uclk_mts * dcn3_0_soc.num_chans *
- dcn3_0_soc.dram_channel_width_bytes * (dcn3_0_soc.max_avg_sdp_bw_use_normal_percent / 100);
-
- bw_from_dram = (bw_from_dram1 < bw_from_dram2) ? bw_from_dram1 : bw_from_dram2;
-
- if (optimal_fclk)
- *optimal_fclk = bw_from_dram /
- (dcn3_0_soc.fabric_datapath_to_dcn_data_return_bytes * (dcn3_0_soc.max_avg_sdp_bw_use_normal_percent / 100));
-
- if (optimal_dcfclk)
- *optimal_dcfclk = bw_from_dram /
- (dcn3_0_soc.return_bus_width_bytes * (dcn3_0_soc.max_avg_sdp_bw_use_normal_percent / 100));
-}
-
void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
{
unsigned int i, j;
@@ -2399,47 +1985,43 @@ void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
unsigned int num_dcfclk_sta_targets = 4;
unsigned int num_uclk_states;
+ struct dc_bounding_box_max_clk dcn30_bb_max_clk;
+
+ memset(&dcn30_bb_max_clk, 0, sizeof(dcn30_bb_max_clk));
+
if (dc->ctx->dc_bios->vram_info.num_chans)
dcn3_0_soc.num_chans = dc->ctx->dc_bios->vram_info.num_chans;
- if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes)
- dcn3_0_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes;
-
- dcn3_0_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
- dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
+ DC_FP_START();
+ dcn30_fpu_update_dram_channel_width_bytes(dc);
+ DC_FP_END();
if (bw_params->clk_table.entries[0].memclk_mhz) {
- int max_dcfclk_mhz = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0, max_phyclk_mhz = 0;
for (i = 0; i < MAX_NUM_DPM_LVL; i++) {
- if (bw_params->clk_table.entries[i].dcfclk_mhz > max_dcfclk_mhz)
- max_dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz;
- if (bw_params->clk_table.entries[i].dispclk_mhz > max_dispclk_mhz)
- max_dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz;
- if (bw_params->clk_table.entries[i].dppclk_mhz > max_dppclk_mhz)
- max_dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz;
- if (bw_params->clk_table.entries[i].phyclk_mhz > max_phyclk_mhz)
- max_phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz;
+ if (bw_params->clk_table.entries[i].dcfclk_mhz > dcn30_bb_max_clk.max_dcfclk_mhz)
+ dcn30_bb_max_clk.max_dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz;
+ if (bw_params->clk_table.entries[i].dispclk_mhz > dcn30_bb_max_clk.max_dispclk_mhz)
+ dcn30_bb_max_clk.max_dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz;
+ if (bw_params->clk_table.entries[i].dppclk_mhz > dcn30_bb_max_clk.max_dppclk_mhz)
+ dcn30_bb_max_clk.max_dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz;
+ if (bw_params->clk_table.entries[i].phyclk_mhz > dcn30_bb_max_clk.max_phyclk_mhz)
+ dcn30_bb_max_clk.max_phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz;
}
- if (!max_dcfclk_mhz)
- max_dcfclk_mhz = dcn3_0_soc.clock_limits[0].dcfclk_mhz;
- if (!max_dispclk_mhz)
- max_dispclk_mhz = dcn3_0_soc.clock_limits[0].dispclk_mhz;
- if (!max_dppclk_mhz)
- max_dppclk_mhz = dcn3_0_soc.clock_limits[0].dppclk_mhz;
- if (!max_phyclk_mhz)
- max_phyclk_mhz = dcn3_0_soc.clock_limits[0].phyclk_mhz;
+ DC_FP_START();
+ dcn30_fpu_update_max_clk(&dcn30_bb_max_clk);
+ DC_FP_END();
- if (max_dcfclk_mhz > dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
+ if (dcn30_bb_max_clk.max_dcfclk_mhz > dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
// If max DCFCLK is greater than the max DCFCLK STA target, insert into the DCFCLK STA target array
- dcfclk_sta_targets[num_dcfclk_sta_targets] = max_dcfclk_mhz;
+ dcfclk_sta_targets[num_dcfclk_sta_targets] = dcn30_bb_max_clk.max_dcfclk_mhz;
num_dcfclk_sta_targets++;
- } else if (max_dcfclk_mhz < dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
+ } else if (dcn30_bb_max_clk.max_dcfclk_mhz < dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
// If max DCFCLK is less than the max DCFCLK STA target, cap values and remove duplicates
for (i = 0; i < num_dcfclk_sta_targets; i++) {
- if (dcfclk_sta_targets[i] > max_dcfclk_mhz) {
- dcfclk_sta_targets[i] = max_dcfclk_mhz;
+ if (dcfclk_sta_targets[i] > dcn30_bb_max_clk.max_dcfclk_mhz) {
+ dcfclk_sta_targets[i] = dcn30_bb_max_clk.max_dcfclk_mhz;
break;
}
}
@@ -2452,7 +2034,7 @@ void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
// Calculate optimal dcfclk for each uclk
for (i = 0; i < num_uclk_states; i++) {
DC_FP_START();
- dcn30_get_optimal_dcfclk_fclk_for_uclk(bw_params->clk_table.entries[i].memclk_mhz * 16,
+ dcn30_fpu_get_optimal_dcfclk_fclk_for_uclk(bw_params->clk_table.entries[i].memclk_mhz * 16,
&optimal_dcfclk_for_uclk[i], NULL);
DC_FP_END();
if (optimal_dcfclk_for_uclk[i] < bw_params->clk_table.entries[0].dcfclk_mhz) {
@@ -2479,7 +2061,7 @@ void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
dcfclk_mhz[num_states] = dcfclk_sta_targets[i];
dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];
} else {
- if (j < num_uclk_states && optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) {
+ if (j < num_uclk_states && optimal_dcfclk_for_uclk[j] <= dcn30_bb_max_clk.max_dcfclk_mhz) {
dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j];
dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16;
} else {
@@ -2494,33 +2076,15 @@ void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
}
while (j < num_uclk_states && num_states < DC__VOLTAGE_STATES &&
- optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) {
+ optimal_dcfclk_for_uclk[j] <= dcn30_bb_max_clk.max_dcfclk_mhz) {
dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j];
dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16;
}
dcn3_0_soc.num_states = num_states;
- for (i = 0; i < dcn3_0_soc.num_states; i++) {
- dcn3_0_soc.clock_limits[i].state = i;
- dcn3_0_soc.clock_limits[i].dcfclk_mhz = dcfclk_mhz[i];
- dcn3_0_soc.clock_limits[i].fabricclk_mhz = dcfclk_mhz[i];
- dcn3_0_soc.clock_limits[i].dram_speed_mts = dram_speed_mts[i];
-
- /* Fill all states with max values of all other clocks */
- dcn3_0_soc.clock_limits[i].dispclk_mhz = max_dispclk_mhz;
- dcn3_0_soc.clock_limits[i].dppclk_mhz = max_dppclk_mhz;
- dcn3_0_soc.clock_limits[i].phyclk_mhz = max_phyclk_mhz;
- dcn3_0_soc.clock_limits[i].dtbclk_mhz = dcn3_0_soc.clock_limits[0].dtbclk_mhz;
- /* These clocks cannot come from bw_params, always fill from dcn3_0_soc[1] */
- /* FCLK, PHYCLK_D18, SOCCLK, DSCCLK */
- dcn3_0_soc.clock_limits[i].phyclk_d18_mhz = dcn3_0_soc.clock_limits[0].phyclk_d18_mhz;
- dcn3_0_soc.clock_limits[i].socclk_mhz = dcn3_0_soc.clock_limits[0].socclk_mhz;
- dcn3_0_soc.clock_limits[i].dscclk_mhz = dcn3_0_soc.clock_limits[0].dscclk_mhz;
- }
- /* re-init DML with updated bb */
- dml_init_instance(&dc->dml, &dcn3_0_soc, &dcn3_0_ip, DML_PROJECT_DCN30);
- if (dc->current_state)
- dml_init_instance(&dc->current_state->bw_ctx.dml, &dcn3_0_soc, &dcn3_0_ip, DML_PROJECT_DCN30);
+ DC_FP_START();
+ dcn30_fpu_update_bw_bounding_box(dc, bw_params, &dcn30_bb_max_clk, dcfclk_mhz, dram_speed_mts);
+ DC_FP_END();
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h
index b92e4cc0232f..3330a1026fa5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h
@@ -35,6 +35,9 @@ struct dc;
struct resource_pool;
struct _vcs_dpi_display_pipe_params_st;
+extern struct _vcs_dpi_ip_params_st dcn3_0_ip;
+extern struct _vcs_dpi_soc_bounding_box_st dcn3_0_soc;
+
struct dcn30_resource_pool {
struct resource_pool base;
};
@@ -96,4 +99,6 @@ enum dc_status dcn30_add_stream_to_ctx(
void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params);
+void dcn30_setup_mclk_switch_using_fw_based_vblank_stretch(struct dc *dc, struct dc_state *context);
+
#endif /* _DCN30_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
index 4daf8931aa7c..a5df74110284 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
@@ -81,6 +81,8 @@
#include "dce/dce_aux.h"
#include "dce/dce_i2c.h"
+#include "dml/dcn30/dcn30_fpu.h"
+
#include "dml/dcn30/display_mode_vba_30.h"
#include "dml/dcn301/dcn301_fpu.h"
#include "vm_helper.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
index f0938653bb88..f537888f4fa6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
@@ -43,6 +43,8 @@
#include "dcn20/dcn20_dsc.h"
#include "dcn20/dcn20_resource.h"
+#include "dml/dcn30/dcn30_fpu.h"
+
#include "dcn10/dcn10_resource.h"
#include "dce/dce_abm.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
index 4fcbc0502808..76f863eb86ef 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
@@ -25,6 +25,8 @@
#include "dcn20/dcn20_dsc.h"
#include "dcn20/dcn20_resource.h"
+#include "dml/dcn30/dcn30_fpu.h"
+
#include "dcn10/dcn10_resource.h"
#include "dc_link_ddc.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
index ccf1b71a8269..3d9f07d4770b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
@@ -36,6 +36,8 @@
#include "dcn20/dcn20_resource.h"
#include "dcn30/dcn30_resource.h"
+#include "dml/dcn30/dcn30_fpu.h"
+
#include "dcn10/dcn10_ipp.h"
#include "dcn30/dcn30_hubbub.h"
#include "dcn31/dcn31_hubbub.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
index ee911452c048..a64b88ca01a9 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -71,6 +71,7 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_mode_vba_30.o := $(dml_ccflags) $(fram
CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_rq_dlg_calc_30.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/display_mode_vba_31.o := $(dml_ccflags) $(frame_warn_flag)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/display_rq_dlg_calc_31.o := $(dml_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/dcn30_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/dcn31_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn301/dcn301_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn302/dcn302_fpu.o := $(dml_ccflags)
@@ -113,7 +114,7 @@ DML += dcn20/dcn20_fpu.o
DML += display_mode_vba.o dcn20/display_rq_dlg_calc_20.o dcn20/display_mode_vba_20.o
DML += dcn20/display_rq_dlg_calc_20v2.o dcn20/display_mode_vba_20v2.o
DML += dcn21/display_rq_dlg_calc_21.o dcn21/display_mode_vba_21.o
-DML += dcn30/display_mode_vba_30.o dcn30/display_rq_dlg_calc_30.o
+DML += dcn30/dcn30_fpu.o dcn30/display_mode_vba_30.o dcn30/display_rq_dlg_calc_30.o
DML += dcn31/display_mode_vba_31.o dcn31/display_rq_dlg_calc_31.o
DML += dcn31/dcn31_fpu.o
DML += dcn301/dcn301_fpu.o
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
new file mode 100644
index 000000000000..574676a0711a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
@@ -0,0 +1,617 @@
+/*
+ * Copyright 2020-2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#include "resource.h"
+#include "clk_mgr.h"
+#include "reg_helper.h"
+#include "dcn_calc_math.h"
+#include "dcn20/dcn20_resource.h"
+#include "dcn30/dcn30_resource.h"
+
+
+#include "display_mode_vba_30.h"
+#include "dcn30_fpu.h"
+
+#define REG(reg)\
+ optc1->tg_regs->reg
+
+#define CTX \
+ optc1->base.ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+ optc1->tg_shift->field_name, optc1->tg_mask->field_name
+
+
+struct _vcs_dpi_ip_params_st dcn3_0_ip = {
+ .use_min_dcfclk = 0,
+ .clamp_min_dcfclk = 0,
+ .odm_capable = 1,
+ .gpuvm_enable = 0,
+ .hostvm_enable = 0,
+ .gpuvm_max_page_table_levels = 4,
+ .hostvm_max_page_table_levels = 4,
+ .hostvm_cached_page_table_levels = 0,
+ .pte_group_size_bytes = 2048,
+ .num_dsc = 6,
+ .rob_buffer_size_kbytes = 184,
+ .det_buffer_size_kbytes = 184,
+ .dpte_buffer_size_in_pte_reqs_luma = 84,
+ .pde_proc_buffer_size_64k_reqs = 48,
+ .dpp_output_buffer_pixels = 2560,
+ .opp_output_buffer_lines = 1,
+ .pixel_chunk_size_kbytes = 8,
+ .pte_enable = 1,
+ .max_page_table_levels = 2,
+ .pte_chunk_size_kbytes = 2, // ?
+ .meta_chunk_size_kbytes = 2,
+ .writeback_chunk_size_kbytes = 8,
+ .line_buffer_size_bits = 789504,
+ .is_line_buffer_bpp_fixed = 0, // ?
+ .line_buffer_fixed_bpp = 0, // ?
+ .dcc_supported = true,
+ .writeback_interface_buffer_size_kbytes = 90,
+ .writeback_line_buffer_buffer_size = 0,
+ .max_line_buffer_lines = 12,
+ .writeback_luma_buffer_size_kbytes = 12, // writeback_line_buffer_buffer_size = 656640
+ .writeback_chroma_buffer_size_kbytes = 8,
+ .writeback_chroma_line_buffer_width_pixels = 4,
+ .writeback_max_hscl_ratio = 1,
+ .writeback_max_vscl_ratio = 1,
+ .writeback_min_hscl_ratio = 1,
+ .writeback_min_vscl_ratio = 1,
+ .writeback_max_hscl_taps = 1,
+ .writeback_max_vscl_taps = 1,
+ .writeback_line_buffer_luma_buffer_size = 0,
+ .writeback_line_buffer_chroma_buffer_size = 14643,
+ .cursor_buffer_size = 8,
+ .cursor_chunk_size = 2,
+ .max_num_otg = 6,
+ .max_num_dpp = 6,
+ .max_num_wb = 1,
+ .max_dchub_pscl_bw_pix_per_clk = 4,
+ .max_pscl_lb_bw_pix_per_clk = 2,
+ .max_lb_vscl_bw_pix_per_clk = 4,
+ .max_vscl_hscl_bw_pix_per_clk = 4,
+ .max_hscl_ratio = 6,
+ .max_vscl_ratio = 6,
+ .hscl_mults = 4,
+ .vscl_mults = 4,
+ .max_hscl_taps = 8,
+ .max_vscl_taps = 8,
+ .dispclk_ramp_margin_percent = 1,
+ .underscan_factor = 1.11,
+ .min_vblank_lines = 32,
+ .dppclk_delay_subtotal = 46,
+ .dynamic_metadata_vm_enabled = true,
+ .dppclk_delay_scl_lb_only = 16,
+ .dppclk_delay_scl = 50,
+ .dppclk_delay_cnvc_formatter = 27,
+ .dppclk_delay_cnvc_cursor = 6,
+ .dispclk_delay_subtotal = 119,
+ .dcfclk_cstate_latency = 5.2, // SRExitTime
+ .max_inter_dcn_tile_repeaters = 8,
+ .max_num_hdmi_frl_outputs = 1,
+ .odm_combine_4to1_supported = true,
+
+ .xfc_supported = false,
+ .xfc_fill_bw_overhead_percent = 10.0,
+ .xfc_fill_constant_bytes = 0,
+ .gfx7_compat_tiling_supported = 0,
+ .number_of_cursors = 1,
+};
+
+struct _vcs_dpi_soc_bounding_box_st dcn3_0_soc = {
+ .clock_limits = {
+ {
+ .state = 0,
+ .dispclk_mhz = 562.0,
+ .dppclk_mhz = 300.0,
+ .phyclk_mhz = 300.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 405.6,
+ },
+ },
+
+ .min_dcfclk = 500.0, /* TODO: set this to actual min DCFCLK */
+ .num_states = 1,
+ .sr_exit_time_us = 15.5,
+ .sr_enter_plus_exit_time_us = 20,
+ .urgent_latency_us = 4.0,
+ .urgent_latency_pixel_data_only_us = 4.0,
+ .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
+ .urgent_latency_vm_data_only_us = 4.0,
+ .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
+ .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
+ .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
+ .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 80.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0,
+ .max_avg_sdp_bw_use_normal_percent = 60.0,
+ .max_avg_dram_bw_use_normal_percent = 40.0,
+ .writeback_latency_us = 12.0,
+ .max_request_size_bytes = 256,
+ .fabric_datapath_to_dcn_data_return_bytes = 64,
+ .dcn_downspread_percent = 0.5,
+ .downspread_percent = 0.38,
+ .dram_page_open_time_ns = 50.0,
+ .dram_rw_turnaround_time_ns = 17.5,
+ .dram_return_buffer_per_channel_bytes = 8192,
+ .round_trip_ping_latency_dcfclk_cycles = 191,
+ .urgent_out_of_order_return_per_channel_bytes = 4096,
+ .channel_interleave_bytes = 256,
+ .num_banks = 8,
+ .gpuvm_min_page_size_bytes = 4096,
+ .hostvm_min_page_size_bytes = 4096,
+ .dram_clock_change_latency_us = 404,
+ .dummy_pstate_latency_us = 5,
+ .writeback_dram_clock_change_latency_us = 23.0,
+ .return_bus_width_bytes = 64,
+ .dispclk_dppclk_vco_speed_mhz = 3650,
+ .xfc_bus_transport_time_us = 20, // ?
+ .xfc_xbuf_latency_tolerance_us = 4, // ?
+ .use_urgent_burst_bw = 1, // ?
+ .do_urgent_latency_adjustment = true,
+ .urgent_latency_adjustment_fabric_clock_component_us = 1.0,
+ .urgent_latency_adjustment_fabric_clock_reference_mhz = 1000,
+};
+
+
+void optc3_fpu_set_vrr_m_const(struct timing_generator *optc,
+ double vtotal_avg)
+{
+struct optc *optc1 = DCN10TG_FROM_TG(optc);
+ double vtotal_min, vtotal_max;
+ double ratio, modulo, phase;
+ uint32_t vblank_start;
+ uint32_t v_total_mask_value = 0;
+
+ dc_assert_fp_enabled();
+
+ /* Compute VTOTAL_MIN and VTOTAL_MAX, so that
+ * VOTAL_MAX - VTOTAL_MIN = 1
+ */
+ v_total_mask_value = 16;
+ vtotal_min = dcn_bw_floor(vtotal_avg);
+ vtotal_max = dcn_bw_ceil(vtotal_avg);
+
+ /* Check that bottom VBLANK is at least 2 lines tall when running with
+ * VTOTAL_MIN. Note that VTOTAL registers are defined as 'total number
+ * of lines in a frame - 1'.
+ */
+ REG_GET(OTG_V_BLANK_START_END, OTG_V_BLANK_START,
+ &vblank_start);
+ ASSERT(vtotal_min >= vblank_start + 1);
+
+ /* Special case where the average frame rate can be achieved
+ * without using the DTO
+ */
+ if (vtotal_min == vtotal_max) {
+ REG_SET(OTG_V_TOTAL, 0, OTG_V_TOTAL, (uint32_t)vtotal_min);
+
+ optc->funcs->set_vtotal_min_max(optc, 0, 0);
+ REG_SET(OTG_M_CONST_DTO0, 0, OTG_M_CONST_DTO_PHASE, 0);
+ REG_SET(OTG_M_CONST_DTO1, 0, OTG_M_CONST_DTO_MODULO, 0);
+ REG_UPDATE_3(OTG_V_TOTAL_CONTROL,
+ OTG_V_TOTAL_MIN_SEL, 0,
+ OTG_V_TOTAL_MAX_SEL, 0,
+ OTG_SET_V_TOTAL_MIN_MASK_EN, 0);
+ return;
+ }
+
+ ratio = vtotal_max - vtotal_avg;
+ modulo = 65536.0 * 65536.0 - 1.0; /* 2^32 - 1 */
+ phase = ratio * modulo;
+
+ /* Special cases where the DTO phase gets rounded to 0 or
+ * to DTO modulo
+ */
+ if (phase <= 0 || phase >= modulo) {
+ REG_SET(OTG_V_TOTAL, 0, OTG_V_TOTAL,
+ phase <= 0 ?
+ (uint32_t)vtotal_max : (uint32_t)vtotal_min);
+ REG_SET(OTG_V_TOTAL_MIN, 0, OTG_V_TOTAL_MIN, 0);
+ REG_SET(OTG_V_TOTAL_MAX, 0, OTG_V_TOTAL_MAX, 0);
+ REG_SET(OTG_M_CONST_DTO0, 0, OTG_M_CONST_DTO_PHASE, 0);
+ REG_SET(OTG_M_CONST_DTO1, 0, OTG_M_CONST_DTO_MODULO, 0);
+ REG_UPDATE_3(OTG_V_TOTAL_CONTROL,
+ OTG_V_TOTAL_MIN_SEL, 0,
+ OTG_V_TOTAL_MAX_SEL, 0,
+ OTG_SET_V_TOTAL_MIN_MASK_EN, 0);
+ return;
+ }
+ REG_UPDATE_6(OTG_V_TOTAL_CONTROL,
+ OTG_V_TOTAL_MIN_SEL, 1,
+ OTG_V_TOTAL_MAX_SEL, 1,
+ OTG_SET_V_TOTAL_MIN_MASK_EN, 1,
+ OTG_SET_V_TOTAL_MIN_MASK, v_total_mask_value,
+ OTG_VTOTAL_MID_REPLACING_MIN_EN, 0,
+ OTG_VTOTAL_MID_REPLACING_MAX_EN, 0);
+ REG_SET(OTG_V_TOTAL, 0, OTG_V_TOTAL, (uint32_t)vtotal_min);
+ optc->funcs->set_vtotal_min_max(optc, vtotal_min, vtotal_max);
+ REG_SET(OTG_M_CONST_DTO0, 0, OTG_M_CONST_DTO_PHASE, (uint32_t)phase);
+ REG_SET(OTG_M_CONST_DTO1, 0, OTG_M_CONST_DTO_MODULO, (uint32_t)modulo);
+}
+
+void dcn30_fpu_populate_dml_writeback_from_context(
+ struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes)
+{
+ int pipe_cnt, i, j;
+ double max_calc_writeback_dispclk;
+ double writeback_dispclk;
+ struct writeback_st dout_wb;
+
+ dc_assert_fp_enabled();
+
+ for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
+ struct dc_stream_state *stream = res_ctx->pipe_ctx[i].stream;
+
+ if (!stream)
+ continue;
+ max_calc_writeback_dispclk = 0;
+
+ /* Set writeback information */
+ pipes[pipe_cnt].dout.wb_enable = 0;
+ pipes[pipe_cnt].dout.num_active_wb = 0;
+ for (j = 0; j < stream->num_wb_info; j++) {
+ struct dc_writeback_info *wb_info = &stream->writeback_info[j];
+
+ if (wb_info->wb_enabled && wb_info->writeback_source_plane &&
+ (wb_info->writeback_source_plane == res_ctx->pipe_ctx[i].plane_state)) {
+ pipes[pipe_cnt].dout.wb_enable = 1;
+ pipes[pipe_cnt].dout.num_active_wb++;
+ dout_wb.wb_src_height = wb_info->dwb_params.cnv_params.crop_en ?
+ wb_info->dwb_params.cnv_params.crop_height :
+ wb_info->dwb_params.cnv_params.src_height;
+ dout_wb.wb_src_width = wb_info->dwb_params.cnv_params.crop_en ?
+ wb_info->dwb_params.cnv_params.crop_width :
+ wb_info->dwb_params.cnv_params.src_width;
+ dout_wb.wb_dst_width = wb_info->dwb_params.dest_width;
+ dout_wb.wb_dst_height = wb_info->dwb_params.dest_height;
+
+ /* For IP that doesn't support WB scaling, set h/v taps to 1 to avoid DML validation failure */
+ if (dc->dml.ip.writeback_max_hscl_taps > 1) {
+ dout_wb.wb_htaps_luma = wb_info->dwb_params.scaler_taps.h_taps;
+ dout_wb.wb_vtaps_luma = wb_info->dwb_params.scaler_taps.v_taps;
+ } else {
+ dout_wb.wb_htaps_luma = 1;
+ dout_wb.wb_vtaps_luma = 1;
+ }
+ dout_wb.wb_htaps_chroma = 0;
+ dout_wb.wb_vtaps_chroma = 0;
+ dout_wb.wb_hratio = wb_info->dwb_params.cnv_params.crop_en ?
+ (double)wb_info->dwb_params.cnv_params.crop_width /
+ (double)wb_info->dwb_params.dest_width :
+ (double)wb_info->dwb_params.cnv_params.src_width /
+ (double)wb_info->dwb_params.dest_width;
+ dout_wb.wb_vratio = wb_info->dwb_params.cnv_params.crop_en ?
+ (double)wb_info->dwb_params.cnv_params.crop_height /
+ (double)wb_info->dwb_params.dest_height :
+ (double)wb_info->dwb_params.cnv_params.src_height /
+ (double)wb_info->dwb_params.dest_height;
+ if (wb_info->dwb_params.cnv_params.fc_out_format == DWB_OUT_FORMAT_64BPP_ARGB ||
+ wb_info->dwb_params.cnv_params.fc_out_format == DWB_OUT_FORMAT_64BPP_RGBA)
+ dout_wb.wb_pixel_format = dm_444_64;
+ else
+ dout_wb.wb_pixel_format = dm_444_32;
+
+ /* Workaround for cases where multiple writebacks are connected to same plane
+ * In which case, need to compute worst case and set the associated writeback parameters
+ * This workaround is necessary due to DML computation assuming only 1 set of writeback
+ * parameters per pipe
+ */
+ writeback_dispclk = dml30_CalculateWriteBackDISPCLK(
+ dout_wb.wb_pixel_format,
+ pipes[pipe_cnt].pipe.dest.pixel_rate_mhz,
+ dout_wb.wb_hratio,
+ dout_wb.wb_vratio,
+ dout_wb.wb_htaps_luma,
+ dout_wb.wb_vtaps_luma,
+ dout_wb.wb_src_width,
+ dout_wb.wb_dst_width,
+ pipes[pipe_cnt].pipe.dest.htotal,
+ dc->current_state->bw_ctx.dml.ip.writeback_line_buffer_buffer_size);
+
+ if (writeback_dispclk > max_calc_writeback_dispclk) {
+ max_calc_writeback_dispclk = writeback_dispclk;
+ pipes[pipe_cnt].dout.wb = dout_wb;
+ }
+ }
+ }
+
+ pipe_cnt++;
+ }
+}
+
+void dcn30_fpu_set_mcif_arb_params(struct mcif_arb_params *wb_arb_params,
+ struct display_mode_lib *dml,
+ display_e2e_pipe_params_st *pipes,
+ int pipe_cnt,
+ int cur_pipe)
+{
+ int i;
+
+ dc_assert_fp_enabled();
+
+ for (i = 0; i < sizeof(wb_arb_params->cli_watermark)/sizeof(wb_arb_params->cli_watermark[0]); i++) {
+ wb_arb_params->cli_watermark[i] = get_wm_writeback_urgent(dml, pipes, pipe_cnt) * 1000;
+ wb_arb_params->pstate_watermark[i] = get_wm_writeback_dram_clock_change(dml, pipes, pipe_cnt) * 1000;
+ }
+
+ wb_arb_params->dram_speed_change_duration = dml->vba.WritebackAllowDRAMClockChangeEndPosition[cur_pipe] * pipes[0].clks_cfg.refclk_mhz; /* num_clock_cycles = us * MHz */
+}
+
+void dcn30_fpu_update_soc_for_wm_a(struct dc *dc, struct dc_state *context)
+{
+
+dc_assert_fp_enabled();
+
+if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].valid) {
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
+ context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_enter_plus_exit_time_us;
+ context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_exit_time_us;
+ }
+}
+
+void dcn30_fpu_calculate_wm_and_dlg(
+ struct dc *dc, struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int pipe_cnt,
+ int vlevel)
+{
+int maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb;
+ int i, pipe_idx;
+ double dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][maxMpcComb];
+ bool pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] != dm_dram_clock_change_unsupported;
+
+dc_assert_fp_enabled();
+
+ if (context->bw_ctx.dml.soc.min_dcfclk > dcfclk)
+ dcfclk = context->bw_ctx.dml.soc.min_dcfclk;
+
+ pipes[0].clks_cfg.voltage = vlevel;
+ pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
+ pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz;
+
+ /* Set B:
+ * DCFCLK: 1GHz or min required above 1GHz
+ * FCLK/UCLK: Max
+ */
+ if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].valid) {
+ if (vlevel == 0) {
+ pipes[0].clks_cfg.voltage = 1;
+ pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dcfclk_mhz;
+ }
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.pstate_latency_us;
+ context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_enter_plus_exit_time_us;
+ context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_exit_time_us;
+ }
+ context->bw_ctx.bw.dcn.watermarks.b.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+
+ pipes[0].clks_cfg.voltage = vlevel;
+ pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
+
+ /* Set D:
+ * DCFCLK: Min Required
+ * FCLK(proportional to UCLK): 1GHz or Max
+ * MALL stutter, sr_enter_exit = 4, sr_exit = 2us
+ */
+ /*
+ if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].valid) {
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.pstate_latency_us;
+ context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_enter_plus_exit_time_us;
+ context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_exit_time_us;
+ }
+ context->bw_ctx.bw.dcn.watermarks.d.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ */
+
+ /* Set C:
+ * DCFCLK: Min Required
+ * FCLK(proportional to UCLK): 1GHz or Max
+ * pstate latency overridden to 5us
+ */
+ if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid) {
+ unsigned int min_dram_speed_mts = context->bw_ctx.dml.vba.DRAMSpeed;
+ unsigned int min_dram_speed_mts_margin = 160;
+
+ if (context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] == dm_dram_clock_change_unsupported)
+ min_dram_speed_mts = dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz * 16;
+
+ /* find largest table entry that is lower than dram speed, but lower than DPM0 still uses DPM0 */
+ for (i = 3; i > 0; i--)
+ if (min_dram_speed_mts + min_dram_speed_mts_margin > dc->clk_mgr->bw_params->dummy_pstate_table[i].dram_speed_mts)
+ break;
+
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->dummy_pstate_table[i].dummy_pstate_latency_us;
+
+ context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us;
+ context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us;
+ }
+
+ context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+
+ if (!pstate_en) {
+ /* The only difference between A and C is p-state latency, if p-state is not supported we want to
+ * calculate DLG based on dummy p-state latency, and max out the set A p-state watermark
+ */
+ context->bw_ctx.bw.dcn.watermarks.a = context->bw_ctx.bw.dcn.watermarks.c;
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = 0;
+ } else {
+ /* Set A:
+ * DCFCLK: Min Required
+ * FCLK(proportional to UCLK): 1GHz or Max
+ *
+ * Set A calculated last so that following calculations are based on Set A
+ */
+ dc->res_pool->funcs->update_soc_for_wm_a(dc, context);
+ context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ }
+
+ context->perf_params.stutter_period_us = context->bw_ctx.dml.vba.StutterPeriod;
+
+ /* Make set D = set A until set D is enabled */
+ context->bw_ctx.bw.dcn.watermarks.d = context->bw_ctx.bw.dcn.watermarks.a;
+
+ for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
+ if (!context->res_ctx.pipe_ctx[i].stream)
+ continue;
+
+ pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt);
+ pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
+
+ if (dc->config.forced_clocks) {
+ pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz;
+ pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz;
+ }
+ if (dc->debug.min_disp_clk_khz > pipes[pipe_idx].clks_cfg.dispclk_mhz * 1000)
+ pipes[pipe_idx].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0;
+ if (dc->debug.min_dpp_clk_khz > pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
+ pipes[pipe_idx].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0;
+
+ pipe_idx++;
+ }
+
+ DC_FP_START();
+ dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
+ DC_FP_END();
+
+ if (!pstate_en)
+ /* Restore full p-state latency */
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us =
+ dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
+
+}
+
+void dcn30_fpu_update_dram_channel_width_bytes(struct dc *dc)
+{
+ dc_assert_fp_enabled();
+
+ if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes)
+ dcn3_0_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes;
+}
+
+void dcn30_fpu_update_max_clk(struct dc_bounding_box_max_clk *dcn30_bb_max_clk)
+{
+ dc_assert_fp_enabled();
+
+ if (!dcn30_bb_max_clk->max_dcfclk_mhz)
+ dcn30_bb_max_clk->max_dcfclk_mhz = dcn3_0_soc.clock_limits[0].dcfclk_mhz;
+ if (!dcn30_bb_max_clk->max_dispclk_mhz)
+ dcn30_bb_max_clk->max_dispclk_mhz = dcn3_0_soc.clock_limits[0].dispclk_mhz;
+ if (!dcn30_bb_max_clk->max_dppclk_mhz)
+ dcn30_bb_max_clk->max_dppclk_mhz = dcn3_0_soc.clock_limits[0].dppclk_mhz;
+ if (!dcn30_bb_max_clk->max_phyclk_mhz)
+ dcn30_bb_max_clk->max_phyclk_mhz = dcn3_0_soc.clock_limits[0].phyclk_mhz;
+}
+
+void dcn30_fpu_get_optimal_dcfclk_fclk_for_uclk(unsigned int uclk_mts,
+ unsigned int *optimal_dcfclk,
+ unsigned int *optimal_fclk)
+{
+ double bw_from_dram, bw_from_dram1, bw_from_dram2;
+
+ dc_assert_fp_enabled();
+
+ bw_from_dram1 = uclk_mts * dcn3_0_soc.num_chans *
+ dcn3_0_soc.dram_channel_width_bytes * (dcn3_0_soc.max_avg_dram_bw_use_normal_percent / 100);
+ bw_from_dram2 = uclk_mts * dcn3_0_soc.num_chans *
+ dcn3_0_soc.dram_channel_width_bytes * (dcn3_0_soc.max_avg_sdp_bw_use_normal_percent / 100);
+
+ bw_from_dram = (bw_from_dram1 < bw_from_dram2) ? bw_from_dram1 : bw_from_dram2;
+
+ if (optimal_fclk)
+ *optimal_fclk = bw_from_dram /
+ (dcn3_0_soc.fabric_datapath_to_dcn_data_return_bytes * (dcn3_0_soc.max_avg_sdp_bw_use_normal_percent / 100));
+
+ if (optimal_dcfclk)
+ *optimal_dcfclk = bw_from_dram /
+ (dcn3_0_soc.return_bus_width_bytes * (dcn3_0_soc.max_avg_sdp_bw_use_normal_percent / 100));
+}
+
+void dcn30_fpu_update_bw_bounding_box(struct dc *dc,
+ struct clk_bw_params *bw_params,
+ struct dc_bounding_box_max_clk *dcn30_bb_max_clk,
+ unsigned int *dcfclk_mhz,
+ unsigned int *dram_speed_mts)
+{
+ unsigned int i;
+
+ dc_assert_fp_enabled();
+
+ dcn3_0_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
+ dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
+
+ for (i = 0; i < dcn3_0_soc.num_states; i++) {
+ dcn3_0_soc.clock_limits[i].state = i;
+ dcn3_0_soc.clock_limits[i].dcfclk_mhz = dcfclk_mhz[i];
+ dcn3_0_soc.clock_limits[i].fabricclk_mhz = dcfclk_mhz[i];
+ dcn3_0_soc.clock_limits[i].dram_speed_mts = dram_speed_mts[i];
+
+ /* Fill all states with max values of all other clocks */
+ dcn3_0_soc.clock_limits[i].dispclk_mhz = dcn30_bb_max_clk->max_dispclk_mhz;
+ dcn3_0_soc.clock_limits[i].dppclk_mhz = dcn30_bb_max_clk->max_dppclk_mhz;
+ dcn3_0_soc.clock_limits[i].phyclk_mhz = dcn30_bb_max_clk->max_phyclk_mhz;
+ dcn3_0_soc.clock_limits[i].dtbclk_mhz = dcn3_0_soc.clock_limits[0].dtbclk_mhz;
+ /* These clocks cannot come from bw_params, always fill from dcn3_0_soc[1] */
+ /* FCLK, PHYCLK_D18, SOCCLK, DSCCLK */
+ dcn3_0_soc.clock_limits[i].phyclk_d18_mhz = dcn3_0_soc.clock_limits[0].phyclk_d18_mhz;
+ dcn3_0_soc.clock_limits[i].socclk_mhz = dcn3_0_soc.clock_limits[0].socclk_mhz;
+ dcn3_0_soc.clock_limits[i].dscclk_mhz = dcn3_0_soc.clock_limits[0].dscclk_mhz;
+ }
+ /* re-init DML with updated bb */
+ dml_init_instance(&dc->dml, &dcn3_0_soc, &dcn3_0_ip, DML_PROJECT_DCN30);
+ if (dc->current_state)
+ dml_init_instance(&dc->current_state->bw_ctx.dml, &dcn3_0_soc, &dcn3_0_ip, DML_PROJECT_DCN30);
+
+}
+
+
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.h
new file mode 100644
index 000000000000..dedfe7b5f173
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2020-2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DCN30_FPU_H__
+#define __DCN30_FPU_H__
+
+#include "core_types.h"
+#include "dcn20/dcn20_optc.h"
+
+void optc3_fpu_set_vrr_m_const(struct timing_generator *optc,
+ double vtotal_avg);
+
+void dcn30_fpu_populate_dml_writeback_from_context(
+ struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes);
+
+void dcn30_fpu_set_mcif_arb_params(struct mcif_arb_params *wb_arb_params,
+ struct display_mode_lib *dml,
+ display_e2e_pipe_params_st *pipes,
+ int pipe_cnt,
+ int cur_pipe);
+
+void dcn30_fpu_update_soc_for_wm_a(struct dc *dc, struct dc_state *context);
+
+void dcn30_fpu_calculate_wm_and_dlg(
+ struct dc *dc, struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int pipe_cnt,
+ int vlevel);
+
+void dcn30_fpu_update_dram_channel_width_bytes(struct dc *dc);
+
+void dcn30_fpu_update_max_clk(struct dc_bounding_box_max_clk *dcn30_bb_max_clk);
+
+void dcn30_fpu_get_optimal_dcfclk_fclk_for_uclk(unsigned int uclk_mts,
+ unsigned int *optimal_dcfclk,
+ unsigned int *optimal_fclk);
+
+void dcn30_fpu_update_bw_bounding_box(struct dc *dc,
+ struct clk_bw_params *bw_params,
+ struct dc_bounding_box_max_clk *dcn30_bb_max_clk,
+ unsigned int *dcfclk_mhz,
+ unsigned int *dram_speed_mts);
+
+
+#endif /* __DCN30_FPU_H__*/
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index 26f3a55c35d7..555d4d9e1454 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -486,4 +486,11 @@ struct dc_state {
} perf_params;
};
+struct dc_bounding_box_max_clk {
+ int max_dcfclk_mhz;
+ int max_dispclk_mhz;
+ int max_dppclk_mhz;
+ int max_phyclk_mhz;
+};
+
#endif /* _CORE_TYPES_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
index 851b98299063..a3c1e9c56d8b 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
@@ -217,8 +217,7 @@ void disable_dp_hpo_output(struct dc_link *link,
void setup_dp_hpo_stream(struct pipe_ctx *pipe_ctx, bool enable);
bool is_dp_128b_132b_signal(struct pipe_ctx *pipe_ctx);
-void dp_retrieve_lttpr_cap(struct dc_link *link);
-bool dp_apply_lttpr_mode(struct dc_link *link);
+bool dp_retrieve_lttpr_cap(struct dc_link *link);
void edp_panel_backlight_power_on(struct dc_link *link);
void dp_receiver_power_ctrl(struct dc_link *link, bool on);
void dp_source_sequence_trace(struct dc_link *link, uint8_t dp_test_mode);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dpia.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dpia.h
index 74dafd0f9d3d..39c1d1d07357 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dpia.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dpia.h
@@ -87,6 +87,11 @@ union dpia_set_config_data {
*/
enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link);
+/* Query hot plug status of USB4 DP tunnel.
+ * Returns true if HPD high.
+ */
+bool dc_link_dpia_query_hpd_status(struct dc_link *link);
+
/* Train DP tunneling link for USB4 DPIA display endpoint.
* DPIA equivalent of dc_link_dp_perfrorm_link_training.
* Aborts link training upon detection of sink unplug.
diff --git a/drivers/gpu/drm/amd/display/include/link_service_types.h b/drivers/gpu/drm/amd/display/include/link_service_types.h
index 9f465b4d626e..447a56286dd0 100644
--- a/drivers/gpu/drm/amd/display/include/link_service_types.h
+++ b/drivers/gpu/drm/amd/display/include/link_service_types.h
@@ -80,12 +80,6 @@ enum link_training_result {
DP_128b_132b_CDS_DONE_TIMEOUT,
};
-enum lttpr_support {
- LTTPR_UNSUPPORTED,
- LTTPR_CHECK_EXT_SUPPORT,
- LTTPR_SUPPORTED,
-};
-
enum lttpr_mode {
LTTPR_MODE_NON_LTTPR,
LTTPR_MODE_TRANSPARENT,
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_10_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_10_0_sh_mask.h
index c755f43aaaf8..7a2c6b12c249 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_10_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_10_0_sh_mask.h
@@ -6070,6 +6070,8 @@
#define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND__SHIFT 0x8
#define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT_MASK 0x200
#define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT__SHIFT 0x9
+#define HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND_MASK 0x1000
+#define HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND__SHIFT 0xc
#define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE_MASK 0x3f0000
#define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE__SHIFT 0x10
#define HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND_MASK 0x1
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_0_sh_mask.h
index 14a3bacfcfd1..fa1f4374fafe 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_0_sh_mask.h
@@ -6058,6 +6058,8 @@
#define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND__SHIFT 0x8
#define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT_MASK 0x200
#define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT__SHIFT 0x9
+#define HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND_MASK 0x1000
+#define HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND__SHIFT 0xc
#define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE_MASK 0x3f0000
#define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE__SHIFT 0x10
#define HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND_MASK 0x1
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_sh_mask.h
index 106094ed0661..39f6fde6db1d 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_sh_mask.h
@@ -7142,6 +7142,8 @@
#define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND__SHIFT 0x8
#define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT_MASK 0x200
#define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT__SHIFT 0x9
+#define HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND_MASK 0x1000
+#define HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND__SHIFT 0xc
#define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE_MASK 0x3f0000
#define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE__SHIFT 0x10
#define HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND_MASK 0x1
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_sh_mask.h
index bcd190a3fcdd..c5f4afac3b39 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_sh_mask.h
@@ -37285,12 +37285,14 @@
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT__SHIFT 0x5
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND__SHIFT 0x8
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT__SHIFT 0x9
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND__SHIFT 0xc
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE__SHIFT 0x10
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK 0x00000001L
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND_MASK 0x00000010L
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT_MASK 0x00000020L
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND_MASK 0x00000100L
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT_MASK 0x00000200L
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND_MASK 0x00001000L
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE_MASK 0x003F0000L
//DIG0_HDMI_INFOFRAME_CONTROL0
#define DIG0_HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND__SHIFT 0x0
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_sh_mask.h
index 9b6825b74cc1..23580907663b 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_sh_mask.h
@@ -5584,6 +5584,8 @@
#define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND__SHIFT 0x8
#define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT_MASK 0x200
#define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT__SHIFT 0x9
+#define HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND_MASK 0x1000
+#define HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND__SHIFT 0xc
#define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE_MASK 0x3f0000
#define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE__SHIFT 0x10
#define HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND_MASK 0x1
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_sh_mask.h
index e7c0cad41081..a788ff3b68c0 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_sh_mask.h
@@ -30357,12 +30357,14 @@
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT__SHIFT 0x5
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND__SHIFT 0x8
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT__SHIFT 0x9
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND__SHIFT 0xc
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE__SHIFT 0x10
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK 0x00000001L
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND_MASK 0x00000010L
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT_MASK 0x00000020L
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND_MASK 0x00000100L
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT_MASK 0x00000200L
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND_MASK 0x00001000L
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE_MASK 0x003F0000L
//DIG0_HDMI_INFOFRAME_CONTROL0
#define DIG0_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND__SHIFT 0x4
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_0_sh_mask.h
index dc8ce7aaa0cf..c70f7ba94d8f 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_0_sh_mask.h
@@ -39439,12 +39439,14 @@
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT__SHIFT 0x5
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND__SHIFT 0x8
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT__SHIFT 0x9
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND__SHIFT 0xc
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE__SHIFT 0x10
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK 0x00000001L
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND_MASK 0x00000010L
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT_MASK 0x00000020L
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND_MASK 0x00000100L
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT_MASK 0x00000200L
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND_MASK 0x00001000L
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE_MASK 0x003F0000L
//DIG0_HDMI_INFOFRAME_CONTROL0
#define DIG0_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND__SHIFT 0x4
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_3_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_3_sh_mask.h
index 91969554e36a..ca1e1eb39256 100755
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_3_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_3_sh_mask.h
@@ -16956,7 +16956,7 @@
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT__SHIFT 0x5
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND__SHIFT 0x8
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT__SHIFT 0x9
-
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND__SHIFT 0xc
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE__SHIFT 0x10
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK 0x00000001L
@@ -16964,7 +16964,7 @@
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT_MASK 0x00000020L
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND_MASK 0x00000100L
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT_MASK 0x00000200L
-
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND_MASK 0x00001000L
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE_MASK 0x003F0000L
//DIG0_HDMI_INFOFRAME_CONTROL0
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_sh_mask.h
index 2f780aefc722..6104ae304099 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_sh_mask.h
@@ -35487,12 +35487,14 @@
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT__SHIFT 0x5
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND__SHIFT 0x8
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT__SHIFT 0x9
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND__SHIFT 0xc
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE__SHIFT 0x10
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK 0x00000001L
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND_MASK 0x00000010L
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT_MASK 0x00000020L
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND_MASK 0x00000100L
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT_MASK 0x00000200L
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND_MASK 0x00001000L
#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE_MASK 0x003F0000L
//DIG0_HDMI_INFOFRAME_CONTROL0
#define DIG0_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND__SHIFT 0x4
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
index 5472f9936feb..d1bf073adf54 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
@@ -770,6 +770,9 @@ enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
enum amd_dpm_forced_level level;
+ if (!pp_funcs)
+ return AMD_DPM_FORCED_LEVEL_AUTO;
+
mutex_lock(&adev->pm.mutex);
if (pp_funcs->get_performance_level)
level = pp_funcs->get_performance_level(adev->powerplay.pp_handle);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 6016b325b6b5..a601024ba4de 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -1436,6 +1436,7 @@ static int smu_disable_dpms(struct smu_context *smu)
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 5):
case IP_VERSION(11, 0, 9):
+ case IP_VERSION(13, 0, 0):
return 0;
default:
break;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
index ecc6411dfc8d..c1f76236da26 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
@@ -671,8 +671,8 @@ typedef struct {
uint16_t reserved[2];
//Frequency changes
- uint16_t GfxclkFmin; // MHz
- uint16_t GfxclkFmax; // MHz
+ int16_t GfxclkFmin; // MHz
+ int16_t GfxclkFmax; // MHz
uint16_t UclkFmin; // MHz
uint16_t UclkFmax; // MHz
@@ -683,15 +683,14 @@ typedef struct {
//Fan control
uint8_t FanLinearPwmPoints[NUM_OD_FAN_MAX_POINTS];
uint8_t FanLinearTempPoints[NUM_OD_FAN_MAX_POINTS];
- uint16_t FanMaximumRpm;
uint16_t FanMinimumPwm;
- uint16_t FanAcousticLimitRpm;
+ uint16_t AcousticTargetRpmThreshold;
+ uint16_t AcousticLimitRpmThreshold;
uint16_t FanTargetTemperature; // Degree Celcius
uint8_t FanZeroRpmEnable;
uint8_t FanZeroRpmStopTemp;
uint8_t FanMode;
- uint8_t Padding[1];
-
+ uint8_t MaxOpTemp;
uint32_t Spare[13];
uint32_t MmHubPadding[8]; // SMU internal use. Adding here instead of external as a workaround
@@ -719,15 +718,14 @@ typedef struct {
uint8_t FanLinearPwmPoints;
uint8_t FanLinearTempPoints;
- uint16_t FanMaximumRpm;
uint16_t FanMinimumPwm;
- uint16_t FanAcousticLimitRpm;
+ uint16_t AcousticTargetRpmThreshold;
+ uint16_t AcousticLimitRpmThreshold;
uint16_t FanTargetTemperature; // Degree Celcius
uint8_t FanZeroRpmEnable;
uint8_t FanZeroRpmStopTemp;
uint8_t FanMode;
- uint8_t Padding[1];
-
+ uint8_t MaxOpTemp;
uint32_t Spare[13];
@@ -997,7 +995,8 @@ typedef struct {
uint16_t SocketPowerLimitAcTau[PPT_THROTTLER_COUNT]; // Time constant of LPF in ms
uint16_t SocketPowerLimitDcTau[PPT_THROTTLER_COUNT]; // Time constant of LPF in ms
- uint32_t SpareVmin[12];
+ QuadraticInt_t Vmin_droop;
+ uint32_t SpareVmin[9];
//SECTION: DPM Configuration 1
@@ -1286,7 +1285,6 @@ typedef struct {
uint32_t PostVoltageSetBacoDelay; // in microseconds. Amount of time FW will wait after power good is established or PSI0 command is issued
uint32_t BacoEntryDelay; // in milliseconds. Amount of time FW will wait to trigger BACO entry after receiving entry notification from OS
-
// SECTION: Board Reserved
uint32_t BoardSpare[64];
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
index 2b44d41a5157..afa1991e26f9 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
@@ -30,7 +30,7 @@
#define SMU13_DRIVER_IF_VERSION_ALDE 0x08
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x04
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_5 0x04
-#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0 0x27
+#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0 0x28
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_7 0x28
#define SMU13_MODE1_RESET_WAIT_TIME_IN_MS 500 //500ms
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index d68be8f8850e..78f3d9e722bb 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -697,12 +697,28 @@ static int sienna_cichlid_get_smu_metrics_data(struct smu_context *smu,
uint32_t apu_percent = 0;
uint32_t dgpu_percent = 0;
- if ((smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) &&
- (smu->smc_fw_version >= 0x3A4900))
- use_metrics_v3 = true;
- else if ((smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) &&
- (smu->smc_fw_version >= 0x3A4300))
- use_metrics_v2 = true;
+ switch (smu->adev->ip_versions[MP1_HWIP][0]) {
+ case IP_VERSION(11, 0, 7):
+ if (smu->smc_fw_version >= 0x3A4900)
+ use_metrics_v3 = true;
+ else if (smu->smc_fw_version >= 0x3A4300)
+ use_metrics_v2 = true;
+ break;
+ case IP_VERSION(11, 0, 11):
+ if (smu->smc_fw_version >= 0x412D00)
+ use_metrics_v2 = true;
+ break;
+ case IP_VERSION(11, 0, 12):
+ if (smu->smc_fw_version >= 0x3B2300)
+ use_metrics_v2 = true;
+ break;
+ case IP_VERSION(11, 0, 13):
+ if (smu->smc_fw_version >= 0x491100)
+ use_metrics_v2 = true;
+ break;
+ default:
+ break;
+ }
ret = smu_cmn_get_metrics_table(smu,
NULL,
@@ -3833,13 +3849,28 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
uint16_t average_gfx_activity;
int ret = 0;
- if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) &&
- (smu->smc_fw_version >= 0x3A4900))
- use_metrics_v3 = true;
- else if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) &&
- (smu->smc_fw_version >= 0x3A4300))
- use_metrics_v2 = true;
-
+ switch (smu->adev->ip_versions[MP1_HWIP][0]) {
+ case IP_VERSION(11, 0, 7):
+ if (smu->smc_fw_version >= 0x3A4900)
+ use_metrics_v3 = true;
+ else if (smu->smc_fw_version >= 0x3A4300)
+ use_metrics_v2 = true;
+ break;
+ case IP_VERSION(11, 0, 11):
+ if (smu->smc_fw_version >= 0x412D00)
+ use_metrics_v2 = true;
+ break;
+ case IP_VERSION(11, 0, 12):
+ if (smu->smc_fw_version >= 0x3B2300)
+ use_metrics_v2 = true;
+ break;
+ case IP_VERSION(11, 0, 13):
+ if (smu->smc_fw_version >= 0x491100)
+ use_metrics_v2 = true;
+ break;
+ default:
+ break;
+ }
ret = smu_cmn_get_metrics_table(smu,
&metrics_external,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
index b87f550af26b..5f8809f6990d 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
@@ -781,7 +781,7 @@ int smu_v11_0_set_allowed_mask(struct smu_context *smu)
goto failed;
}
- bitmap_copy((unsigned long *)feature_mask, feature->allowed, 64);
+ bitmap_to_arr32(feature_mask, feature->allowed, 64);
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh,
feature_mask[1], NULL);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
index 38af648cb857..fb130409309c 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
@@ -1666,6 +1666,7 @@ static const struct throttling_logging_label {
uint32_t feature_mask;
const char *label;
} logging_label[] = {
+ {(1U << THROTTLER_TEMP_GPU_BIT), "GPU"},
{(1U << THROTTLER_TEMP_MEM_BIT), "HBM"},
{(1U << THROTTLER_TEMP_VR_GFX_BIT), "VR of GFX rail"},
{(1U << THROTTLER_TEMP_VR_MEM_BIT), "VR of HBM rail"},
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index ae6321af9d88..ef9b56de143b 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -218,13 +218,25 @@ int smu_v13_0_init_pptable_microcode(struct smu_context *smu)
pptable_id == 3688)
pptable_id = 36881;
/*
- * Temporary solution for SMU V13.0.0:
- * - use 99991 signed pptable when SCPM enabled
- * TODO: drop this when the pptable carried in vbios
- * is ready.
+ * Temporary solution for SMU V13.0.0 with SCPM enabled:
+ * - use 36831 signed pptable when pp_table_id is 3683
+ * - use 36641 signed pptable when pp_table_id is 3664 or 0
+ * TODO: drop these when the pptable carried in vbios is ready.
*/
- if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 0))
- pptable_id = 99991;
+ if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 0)) {
+ switch (pptable_id) {
+ case 0:
+ case 3664:
+ pptable_id = 36641;
+ break;
+ case 3683:
+ pptable_id = 36831;
+ break;
+ default:
+ dev_err(adev->dev, "Unsupported pptable id %d\n", pptable_id);
+ return -EINVAL;
+ }
+ }
}
/* "pptable_id == 0" means vbios carries the pptable. */
@@ -448,13 +460,24 @@ int smu_v13_0_setup_pptable(struct smu_context *smu)
pptable_id = smu->smu_table.boot_values.pp_table_id;
/*
- * Temporary solution for SMU V13.0.0:
- * - use 9999 unsigned pptable when SCPM disabled
- * TODO: drop this when the pptable carried in vbios
- * is ready.
+ * Temporary solution for SMU V13.0.0 with SCPM disabled:
+ * - use 3664 or 3683 on request
+ * - use 3664 when pptable_id is 0
+ * TODO: drop these when the pptable carried in vbios is ready.
*/
- if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 0))
- pptable_id = 9999;
+ if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 0)) {
+ switch (pptable_id) {
+ case 0:
+ pptable_id = 3664;
+ break;
+ case 3664:
+ case 3683:
+ break;
+ default:
+ dev_err(adev->dev, "Unsupported pptable id %d\n", pptable_id);
+ return -EINVAL;
+ }
+ }
}
/* force using vbios pptable in sriov mode */
@@ -814,7 +837,7 @@ int smu_v13_0_set_allowed_mask(struct smu_context *smu)
feature->feature_num < 64)
return -EINVAL;
- bitmap_copy((unsigned long *)feature_mask, feature->allowed, 64);
+ bitmap_to_arr32(feature_mask, feature->allowed, 64);
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh,
feature_mask[1], NULL);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index 197a0e2ff063..7432b3e76d3d 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -275,9 +275,7 @@ smu_v13_0_0_get_allowed_feature_mask(struct smu_context *smu,
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_VDDIO_MEM_SCALING_BIT);
}
-#if 0
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_MEM_TEMP_READ_BIT);
-#endif
if (adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK)
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_GFXCLK_BIT);
@@ -296,6 +294,12 @@ smu_v13_0_0_get_allowed_feature_mask(struct smu_context *smu,
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_BACO_BIT);
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT);
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_FW_DSTATE_BIT);
+
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_OUT_OF_BAND_MONITOR_BIT);
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_SOC_CG_BIT);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
index 7d6ff141b43f..5a17b51aa0f9 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
@@ -644,42 +644,40 @@ static int smu_v13_0_4_set_watermarks_table(struct smu_context *smu,
if (!table || !clock_ranges)
return -EINVAL;
- if (clock_ranges) {
- if (clock_ranges->num_reader_wm_sets > NUM_WM_RANGES ||
- clock_ranges->num_writer_wm_sets > NUM_WM_RANGES)
- return -EINVAL;
-
- for (i = 0; i < clock_ranges->num_reader_wm_sets; i++) {
- table->WatermarkRow[WM_DCFCLK][i].MinClock =
- clock_ranges->reader_wm_sets[i].min_drain_clk_mhz;
- table->WatermarkRow[WM_DCFCLK][i].MaxClock =
- clock_ranges->reader_wm_sets[i].max_drain_clk_mhz;
- table->WatermarkRow[WM_DCFCLK][i].MinMclk =
- clock_ranges->reader_wm_sets[i].min_fill_clk_mhz;
- table->WatermarkRow[WM_DCFCLK][i].MaxMclk =
- clock_ranges->reader_wm_sets[i].max_fill_clk_mhz;
-
- table->WatermarkRow[WM_DCFCLK][i].WmSetting =
- clock_ranges->reader_wm_sets[i].wm_inst;
- }
+ if (clock_ranges->num_reader_wm_sets > NUM_WM_RANGES ||
+ clock_ranges->num_writer_wm_sets > NUM_WM_RANGES)
+ return -EINVAL;
- for (i = 0; i < clock_ranges->num_writer_wm_sets; i++) {
- table->WatermarkRow[WM_SOCCLK][i].MinClock =
- clock_ranges->writer_wm_sets[i].min_fill_clk_mhz;
- table->WatermarkRow[WM_SOCCLK][i].MaxClock =
- clock_ranges->writer_wm_sets[i].max_fill_clk_mhz;
- table->WatermarkRow[WM_SOCCLK][i].MinMclk =
- clock_ranges->writer_wm_sets[i].min_drain_clk_mhz;
- table->WatermarkRow[WM_SOCCLK][i].MaxMclk =
- clock_ranges->writer_wm_sets[i].max_drain_clk_mhz;
-
- table->WatermarkRow[WM_SOCCLK][i].WmSetting =
- clock_ranges->writer_wm_sets[i].wm_inst;
- }
+ for (i = 0; i < clock_ranges->num_reader_wm_sets; i++) {
+ table->WatermarkRow[WM_DCFCLK][i].MinClock =
+ clock_ranges->reader_wm_sets[i].min_drain_clk_mhz;
+ table->WatermarkRow[WM_DCFCLK][i].MaxClock =
+ clock_ranges->reader_wm_sets[i].max_drain_clk_mhz;
+ table->WatermarkRow[WM_DCFCLK][i].MinMclk =
+ clock_ranges->reader_wm_sets[i].min_fill_clk_mhz;
+ table->WatermarkRow[WM_DCFCLK][i].MaxMclk =
+ clock_ranges->reader_wm_sets[i].max_fill_clk_mhz;
+
+ table->WatermarkRow[WM_DCFCLK][i].WmSetting =
+ clock_ranges->reader_wm_sets[i].wm_inst;
+ }
- smu->watermarks_bitmap |= WATERMARKS_EXIST;
+ for (i = 0; i < clock_ranges->num_writer_wm_sets; i++) {
+ table->WatermarkRow[WM_SOCCLK][i].MinClock =
+ clock_ranges->writer_wm_sets[i].min_fill_clk_mhz;
+ table->WatermarkRow[WM_SOCCLK][i].MaxClock =
+ clock_ranges->writer_wm_sets[i].max_fill_clk_mhz;
+ table->WatermarkRow[WM_SOCCLK][i].MinMclk =
+ clock_ranges->writer_wm_sets[i].min_drain_clk_mhz;
+ table->WatermarkRow[WM_SOCCLK][i].MaxMclk =
+ clock_ranges->writer_wm_sets[i].max_drain_clk_mhz;
+
+ table->WatermarkRow[WM_SOCCLK][i].WmSetting =
+ clock_ranges->writer_wm_sets[i].wm_inst;
}
+ smu->watermarks_bitmap |= WATERMARKS_EXIST;
+
/* pass data to smu controller */
if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
!(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
index 87257b1b028f..feff4f8c927c 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
@@ -190,6 +190,9 @@ static int yellow_carp_fini_smc_tables(struct smu_context *smu)
kfree(smu_table->watermarks_table);
smu_table->watermarks_table = NULL;
+ kfree(smu_table->gpu_metrics_table);
+ smu_table->gpu_metrics_table = NULL;
+
return 0;
}
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index e957d4851dc0..f024dc93939e 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -69,7 +69,7 @@ static pgprot_t drm_io_prot(struct drm_local_map *map,
pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
#if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__) || \
- defined(__mips__)
+ defined(__mips__) || defined(__loongarch__)
if (map->type == _DRM_REGISTERS && !(map->flags & _DRM_WRITE_COMBINING))
tmp = pgprot_noncached(tmp);
else
diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c b/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c
index e82b815f83a6..27f4fcb058f9 100644
--- a/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c
+++ b/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c
@@ -123,8 +123,11 @@ static int hyperv_pipe_check(struct drm_simple_display_pipe *pipe,
if (fb->format->format != DRM_FORMAT_XRGB8888)
return -EINVAL;
- if (fb->pitches[0] * fb->height > hv->fb_size)
+ if (fb->pitches[0] * fb->height > hv->fb_size) {
+ drm_err(&hv->dev, "fb size requested by %s for %dX%d (pitch %d) greater than %ld\n",
+ current->comm, fb->width, fb->height, fb->pitches[0], hv->fb_size);
return -EINVAL;
+ }
return 0;
}
diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_proto.c b/drivers/gpu/drm/hyperv/hyperv_drm_proto.c
index c0155c6271bf..76a182a9a765 100644
--- a/drivers/gpu/drm/hyperv/hyperv_drm_proto.c
+++ b/drivers/gpu/drm/hyperv/hyperv_drm_proto.c
@@ -18,16 +18,16 @@
#define SYNTHVID_VERSION(major, minor) ((minor) << 16 | (major))
#define SYNTHVID_VER_GET_MAJOR(ver) (ver & 0x0000ffff)
#define SYNTHVID_VER_GET_MINOR(ver) ((ver & 0xffff0000) >> 16)
+
+/* Support for VERSION_WIN7 is removed. #define is retained for reference. */
#define SYNTHVID_VERSION_WIN7 SYNTHVID_VERSION(3, 0)
#define SYNTHVID_VERSION_WIN8 SYNTHVID_VERSION(3, 2)
#define SYNTHVID_VERSION_WIN10 SYNTHVID_VERSION(3, 5)
-#define SYNTHVID_DEPTH_WIN7 16
#define SYNTHVID_DEPTH_WIN8 32
-#define SYNTHVID_FB_SIZE_WIN7 (4 * 1024 * 1024)
+#define SYNTHVID_WIDTH_WIN8 1600
+#define SYNTHVID_HEIGHT_WIN8 1200
#define SYNTHVID_FB_SIZE_WIN8 (8 * 1024 * 1024)
-#define SYNTHVID_WIDTH_MAX_WIN7 1600
-#define SYNTHVID_HEIGHT_MAX_WIN7 1200
enum pipe_msg_type {
PIPE_MSG_INVALID,
@@ -496,12 +496,6 @@ int hyperv_connect_vsp(struct hv_device *hdev)
case VERSION_WIN8:
case VERSION_WIN8_1:
ret = hyperv_negotiate_version(hdev, SYNTHVID_VERSION_WIN8);
- if (!ret)
- break;
- fallthrough;
- case VERSION_WS2008:
- case VERSION_WIN7:
- ret = hyperv_negotiate_version(hdev, SYNTHVID_VERSION_WIN7);
break;
default:
ret = hyperv_negotiate_version(hdev, SYNTHVID_VERSION_WIN10);
@@ -513,18 +507,15 @@ int hyperv_connect_vsp(struct hv_device *hdev)
goto error;
}
- if (hv->synthvid_version == SYNTHVID_VERSION_WIN7)
- hv->screen_depth = SYNTHVID_DEPTH_WIN7;
- else
- hv->screen_depth = SYNTHVID_DEPTH_WIN8;
+ hv->screen_depth = SYNTHVID_DEPTH_WIN8;
if (hyperv_version_ge(hv->synthvid_version, SYNTHVID_VERSION_WIN10)) {
ret = hyperv_get_supported_resolution(hdev);
if (ret)
drm_err(dev, "Failed to get supported resolution from host, use default\n");
} else {
- hv->screen_width_max = SYNTHVID_WIDTH_MAX_WIN7;
- hv->screen_height_max = SYNTHVID_HEIGHT_MAX_WIN7;
+ hv->screen_width_max = SYNTHVID_WIDTH_WIN8;
+ hv->screen_height_max = SYNTHVID_HEIGHT_WIN8;
}
hv->mmio_megabytes = hdev->channel->offermsg.offer.mmio_megabytes;
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 9c5cc2800975..b4f69364f9a1 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -51,7 +51,7 @@ static int preallocated_oos_pages = 8192;
static bool intel_gvt_is_valid_gfn(struct intel_vgpu *vgpu, unsigned long gfn)
{
- struct kvm *kvm = vgpu->kvm;
+ struct kvm *kvm = vgpu->vfio_device.kvm;
int idx;
bool ret;
@@ -1185,7 +1185,7 @@ static int is_2MB_gtt_possible(struct intel_vgpu *vgpu,
if (!vgpu->attached)
return -EINVAL;
- pfn = gfn_to_pfn(vgpu->kvm, ops->get_pfn(entry));
+ pfn = gfn_to_pfn(vgpu->vfio_device.kvm, ops->get_pfn(entry));
if (is_error_noslot_pfn(pfn))
return -EINVAL;
return PageTransHuge(pfn_to_page(pfn));
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 03ecffc2ba56..aee1a45da74b 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -227,11 +227,7 @@ struct intel_vgpu {
struct mutex cache_lock;
struct notifier_block iommu_notifier;
- struct notifier_block group_notifier;
- struct kvm *kvm;
- struct work_struct release_work;
atomic_t released;
- struct vfio_group *vfio_group;
struct kvm_page_track_notifier_node track_node;
#define NR_BKT (1 << 18)
@@ -732,7 +728,7 @@ static inline int intel_gvt_read_gpa(struct intel_vgpu *vgpu, unsigned long gpa,
{
if (!vgpu->attached)
return -ESRCH;
- return vfio_dma_rw(vgpu->vfio_group, gpa, buf, len, false);
+ return vfio_dma_rw(&vgpu->vfio_device, gpa, buf, len, false);
}
/**
@@ -750,7 +746,7 @@ static inline int intel_gvt_write_gpa(struct intel_vgpu *vgpu,
{
if (!vgpu->attached)
return -ESRCH;
- return vfio_dma_rw(vgpu->vfio_group, gpa, buf, len, true);
+ return vfio_dma_rw(&vgpu->vfio_device, gpa, buf, len, true);
}
void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu);
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 0787ba5c301f..e2f6c56ab342 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -228,8 +228,6 @@ static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt)
}
}
-static void intel_vgpu_release_work(struct work_struct *work);
-
static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
unsigned long size)
{
@@ -243,7 +241,7 @@ static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
for (npage = 0; npage < total_pages; npage++) {
unsigned long cur_gfn = gfn + npage;
- ret = vfio_group_unpin_pages(vgpu->vfio_group, &cur_gfn, 1);
+ ret = vfio_unpin_pages(&vgpu->vfio_device, &cur_gfn, 1);
drm_WARN_ON(&i915->drm, ret != 1);
}
}
@@ -266,8 +264,8 @@ static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
unsigned long cur_gfn = gfn + npage;
unsigned long pfn;
- ret = vfio_group_pin_pages(vgpu->vfio_group, &cur_gfn, 1,
- IOMMU_READ | IOMMU_WRITE, &pfn);
+ ret = vfio_pin_pages(&vgpu->vfio_device, &cur_gfn, 1,
+ IOMMU_READ | IOMMU_WRITE, &pfn);
if (ret != 1) {
gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx, ret %d\n",
cur_gfn, ret);
@@ -761,23 +759,6 @@ static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
return NOTIFY_OK;
}
-static int intel_vgpu_group_notifier(struct notifier_block *nb,
- unsigned long action, void *data)
-{
- struct intel_vgpu *vgpu =
- container_of(nb, struct intel_vgpu, group_notifier);
-
- /* the only action we care about */
- if (action == VFIO_GROUP_NOTIFY_SET_KVM) {
- vgpu->kvm = data;
-
- if (!data)
- schedule_work(&vgpu->release_work);
- }
-
- return NOTIFY_OK;
-}
-
static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu)
{
struct intel_vgpu *itr;
@@ -789,7 +770,7 @@ static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu)
if (!itr->attached)
continue;
- if (vgpu->kvm == itr->kvm) {
+ if (vgpu->vfio_device.kvm == itr->vfio_device.kvm) {
ret = true;
goto out;
}
@@ -804,61 +785,44 @@ static int intel_vgpu_open_device(struct vfio_device *vfio_dev)
struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
unsigned long events;
int ret;
- struct vfio_group *vfio_group;
vgpu->iommu_notifier.notifier_call = intel_vgpu_iommu_notifier;
- vgpu->group_notifier.notifier_call = intel_vgpu_group_notifier;
events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
- ret = vfio_register_notifier(vfio_dev->dev, VFIO_IOMMU_NOTIFY, &events,
- &vgpu->iommu_notifier);
+ ret = vfio_register_notifier(vfio_dev, VFIO_IOMMU_NOTIFY, &events,
+ &vgpu->iommu_notifier);
if (ret != 0) {
gvt_vgpu_err("vfio_register_notifier for iommu failed: %d\n",
ret);
goto out;
}
- events = VFIO_GROUP_NOTIFY_SET_KVM;
- ret = vfio_register_notifier(vfio_dev->dev, VFIO_GROUP_NOTIFY, &events,
- &vgpu->group_notifier);
- if (ret != 0) {
- gvt_vgpu_err("vfio_register_notifier for group failed: %d\n",
- ret);
- goto undo_iommu;
- }
-
- vfio_group =
- vfio_group_get_external_user_from_dev(vgpu->vfio_device.dev);
- if (IS_ERR_OR_NULL(vfio_group)) {
- ret = !vfio_group ? -EFAULT : PTR_ERR(vfio_group);
- gvt_vgpu_err("vfio_group_get_external_user_from_dev failed\n");
- goto undo_register;
- }
- vgpu->vfio_group = vfio_group;
-
ret = -EEXIST;
if (vgpu->attached)
- goto undo_group;
+ goto undo_iommu;
ret = -ESRCH;
- if (!vgpu->kvm || vgpu->kvm->mm != current->mm) {
+ if (!vgpu->vfio_device.kvm ||
+ vgpu->vfio_device.kvm->mm != current->mm) {
gvt_vgpu_err("KVM is required to use Intel vGPU\n");
- goto undo_group;
+ goto undo_iommu;
}
+ kvm_get_kvm(vgpu->vfio_device.kvm);
+
ret = -EEXIST;
if (__kvmgt_vgpu_exist(vgpu))
- goto undo_group;
+ goto undo_iommu;
vgpu->attached = true;
- kvm_get_kvm(vgpu->kvm);
kvmgt_protect_table_init(vgpu);
gvt_cache_init(vgpu);
vgpu->track_node.track_write = kvmgt_page_track_write;
vgpu->track_node.track_flush_slot = kvmgt_page_track_flush_slot;
- kvm_page_track_register_notifier(vgpu->kvm, &vgpu->track_node);
+ kvm_page_track_register_notifier(vgpu->vfio_device.kvm,
+ &vgpu->track_node);
debugfs_create_ulong(KVMGT_DEBUGFS_FILENAME, 0444, vgpu->debugfs,
&vgpu->nr_cache_entries);
@@ -868,17 +832,9 @@ static int intel_vgpu_open_device(struct vfio_device *vfio_dev)
atomic_set(&vgpu->released, 0);
return 0;
-undo_group:
- vfio_group_put_external_user(vgpu->vfio_group);
- vgpu->vfio_group = NULL;
-
-undo_register:
- vfio_unregister_notifier(vfio_dev->dev, VFIO_GROUP_NOTIFY,
- &vgpu->group_notifier);
-
undo_iommu:
- vfio_unregister_notifier(vfio_dev->dev, VFIO_IOMMU_NOTIFY,
- &vgpu->iommu_notifier);
+ vfio_unregister_notifier(vfio_dev, VFIO_IOMMU_NOTIFY,
+ &vgpu->iommu_notifier);
out:
return ret;
}
@@ -894,8 +850,9 @@ static void intel_vgpu_release_msi_eventfd_ctx(struct intel_vgpu *vgpu)
}
}
-static void __intel_vgpu_release(struct intel_vgpu *vgpu)
+static void intel_vgpu_close_device(struct vfio_device *vfio_dev)
{
+ struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
int ret;
@@ -907,41 +864,24 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu)
intel_gvt_release_vgpu(vgpu);
- ret = vfio_unregister_notifier(vgpu->vfio_device.dev, VFIO_IOMMU_NOTIFY,
- &vgpu->iommu_notifier);
+ ret = vfio_unregister_notifier(&vgpu->vfio_device, VFIO_IOMMU_NOTIFY,
+ &vgpu->iommu_notifier);
drm_WARN(&i915->drm, ret,
"vfio_unregister_notifier for iommu failed: %d\n", ret);
- ret = vfio_unregister_notifier(vgpu->vfio_device.dev, VFIO_GROUP_NOTIFY,
- &vgpu->group_notifier);
- drm_WARN(&i915->drm, ret,
- "vfio_unregister_notifier for group failed: %d\n", ret);
-
debugfs_remove(debugfs_lookup(KVMGT_DEBUGFS_FILENAME, vgpu->debugfs));
- kvm_page_track_unregister_notifier(vgpu->kvm, &vgpu->track_node);
- kvm_put_kvm(vgpu->kvm);
+ kvm_page_track_unregister_notifier(vgpu->vfio_device.kvm,
+ &vgpu->track_node);
kvmgt_protect_table_destroy(vgpu);
gvt_cache_destroy(vgpu);
intel_vgpu_release_msi_eventfd_ctx(vgpu);
- vfio_group_put_external_user(vgpu->vfio_group);
- vgpu->kvm = NULL;
vgpu->attached = false;
-}
-
-static void intel_vgpu_close_device(struct vfio_device *vfio_dev)
-{
- __intel_vgpu_release(vfio_dev_to_vgpu(vfio_dev));
-}
-
-static void intel_vgpu_release_work(struct work_struct *work)
-{
- struct intel_vgpu *vgpu =
- container_of(work, struct intel_vgpu, release_work);
- __intel_vgpu_release(vgpu);
+ if (vgpu->vfio_device.kvm)
+ kvm_put_kvm(vgpu->vfio_device.kvm);
}
static u64 intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar)
@@ -1690,7 +1630,6 @@ static int intel_vgpu_probe(struct mdev_device *mdev)
return PTR_ERR(vgpu);
}
- INIT_WORK(&vgpu->release_work, intel_vgpu_release_work);
vfio_init_group_dev(&vgpu->vfio_device, &mdev->dev,
&intel_vgpu_dev_ops);
@@ -1728,7 +1667,7 @@ static struct mdev_driver intel_vgpu_mdev_driver = {
int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn)
{
- struct kvm *kvm = info->kvm;
+ struct kvm *kvm = info->vfio_device.kvm;
struct kvm_memory_slot *slot;
int idx;
@@ -1758,7 +1697,7 @@ out:
int intel_gvt_page_track_remove(struct intel_vgpu *info, u64 gfn)
{
- struct kvm *kvm = info->kvm;
+ struct kvm *kvm = info->vfio_device.kvm;
struct kvm_memory_slot *slot;
int idx;
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index 3e3b09588fd3..958b37123bf1 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -1047,7 +1047,7 @@ static int i915_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
GEM_BUG_ON(!pmu->base.event_init);
/* Select the first online CPU as a designated reader. */
- if (!cpumask_weight(&i915_pmu_cpumask))
+ if (cpumask_empty(&i915_pmu_cpumask))
cpumask_set_cpu(cpu, &i915_pmu_cpumask);
return 0;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 52516eb20cb8..3a462e327e0e 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -541,7 +541,6 @@ static int dpu_encoder_virt_atomic_check(
struct dpu_encoder_virt *dpu_enc;
struct msm_drm_private *priv;
struct dpu_kms *dpu_kms;
- const struct drm_display_mode *mode;
struct drm_display_mode *adj_mode;
struct msm_display_topology topology;
struct dpu_global_state *global_state;
@@ -559,7 +558,6 @@ static int dpu_encoder_virt_atomic_check(
priv = drm_enc->dev->dev_private;
dpu_kms = to_dpu_kms(priv->kms);
- mode = &crtc_state->mode;
adj_mode = &crtc_state->adjusted_mode;
global_state = dpu_kms_get_global_state(crtc_state->state);
if (IS_ERR(global_state))
@@ -1814,7 +1812,6 @@ static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc,
}
}
- dsc_common_mode = 0;
pic_width = dsc->drm->pic_width;
dsc_common_mode = DSC_MODE_MULTIPLEX | DSC_MODE_SPLIT_PANEL;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
index 4829d1ce0cf8..59da348ff339 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
@@ -574,11 +574,11 @@ static void dpu_encoder_phys_wb_disable(struct dpu_encoder_phys *phys_enc)
*/
static void dpu_encoder_phys_wb_destroy(struct dpu_encoder_phys *phys_enc)
{
- DPU_DEBUG("[wb:%d]\n", phys_enc->wb_idx - WB_0);
-
if (!phys_enc)
return;
+ DPU_DEBUG("[wb:%d]\n", phys_enc->wb_idx - WB_0);
+
kfree(phys_enc);
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index bce47647d891..e23e2552e802 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -49,8 +49,6 @@
#define DPU_DEBUGFS_DIR "msm_dpu"
#define DPU_DEBUGFS_HWMASKNAME "hw_log_mask"
-#define MIN_IB_BW 400000000ULL /* Min ib vote 400MB */
-
static int dpu_kms_hw_init(struct msm_kms *kms);
static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms);
@@ -1305,15 +1303,9 @@ static int __maybe_unused dpu_runtime_resume(struct device *dev)
struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
struct drm_encoder *encoder;
struct drm_device *ddev;
- int i;
ddev = dpu_kms->dev;
- WARN_ON(!(dpu_kms->num_paths));
- /* Min vote of BW is required before turning on AXI clk */
- for (i = 0; i < dpu_kms->num_paths; i++)
- icc_set_bw(dpu_kms->path[i], 0, Bps_to_icc(MIN_IB_BW));
-
rc = clk_bulk_prepare_enable(dpu_kms->num_clocks, dpu_kms->clocks);
if (rc) {
DPU_ERROR("clock enable failed rc:%d\n", rc);
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
index d21971baa24c..b7f5b8d3bbd6 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
@@ -1390,8 +1390,13 @@ void dp_ctrl_reset_irq_ctrl(struct dp_ctrl *dp_ctrl, bool enable)
dp_catalog_ctrl_reset(ctrl->catalog);
- if (enable)
- dp_catalog_ctrl_enable_irq(ctrl->catalog, enable);
+ /*
+ * all dp controller programmable registers will not
+ * be reset to default value after DP_SW_RESET
+ * therefore interrupt mask bits have to be updated
+ * to enable/disable interrupts
+ */
+ dp_catalog_ctrl_enable_irq(ctrl->catalog, enable);
}
void dp_ctrl_phy_init(struct dp_ctrl *dp_ctrl)
diff --git a/drivers/gpu/drm/msm/msm_mdss.c b/drivers/gpu/drm/msm/msm_mdss.c
index 0454a571adf7..e13c5c12b775 100644
--- a/drivers/gpu/drm/msm/msm_mdss.c
+++ b/drivers/gpu/drm/msm/msm_mdss.c
@@ -5,6 +5,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/interconnect.h>
#include <linux/irq.h>
#include <linux/irqchip.h>
#include <linux/irqdesc.h>
@@ -25,6 +26,8 @@
#define UBWC_CTRL_2 0x150
#define UBWC_PREDICTION_MODE 0x154
+#define MIN_IB_BW 400000000UL /* Min ib vote 400MB */
+
struct msm_mdss {
struct device *dev;
@@ -36,8 +39,47 @@ struct msm_mdss {
unsigned long enabled_mask;
struct irq_domain *domain;
} irq_controller;
+ struct icc_path *path[2];
+ u32 num_paths;
};
+static int msm_mdss_parse_data_bus_icc_path(struct device *dev,
+ struct msm_mdss *msm_mdss)
+{
+ struct icc_path *path0 = of_icc_get(dev, "mdp0-mem");
+ struct icc_path *path1 = of_icc_get(dev, "mdp1-mem");
+
+ if (IS_ERR_OR_NULL(path0))
+ return PTR_ERR_OR_ZERO(path0);
+
+ msm_mdss->path[0] = path0;
+ msm_mdss->num_paths = 1;
+
+ if (!IS_ERR_OR_NULL(path1)) {
+ msm_mdss->path[1] = path1;
+ msm_mdss->num_paths++;
+ }
+
+ return 0;
+}
+
+static void msm_mdss_put_icc_path(void *data)
+{
+ struct msm_mdss *msm_mdss = data;
+ int i;
+
+ for (i = 0; i < msm_mdss->num_paths; i++)
+ icc_put(msm_mdss->path[i]);
+}
+
+static void msm_mdss_icc_request_bw(struct msm_mdss *msm_mdss, unsigned long bw)
+{
+ int i;
+
+ for (i = 0; i < msm_mdss->num_paths; i++)
+ icc_set_bw(msm_mdss->path[i], 0, Bps_to_icc(bw));
+}
+
static void msm_mdss_irq(struct irq_desc *desc)
{
struct msm_mdss *msm_mdss = irq_desc_get_handler_data(desc);
@@ -136,6 +178,13 @@ static int msm_mdss_enable(struct msm_mdss *msm_mdss)
{
int ret;
+ /*
+ * Several components have AXI clocks that can only be turned on if
+ * the interconnect is enabled (non-zero bandwidth). Let's make sure
+ * that the interconnects are at least at a minimum amount.
+ */
+ msm_mdss_icc_request_bw(msm_mdss, MIN_IB_BW);
+
ret = clk_bulk_prepare_enable(msm_mdss->num_clocks, msm_mdss->clocks);
if (ret) {
dev_err(msm_mdss->dev, "clock enable failed, ret:%d\n", ret);
@@ -178,6 +227,7 @@ static int msm_mdss_enable(struct msm_mdss *msm_mdss)
static int msm_mdss_disable(struct msm_mdss *msm_mdss)
{
clk_bulk_disable_unprepare(msm_mdss->num_clocks, msm_mdss->clocks);
+ msm_mdss_icc_request_bw(msm_mdss, 0);
return 0;
}
@@ -271,6 +321,13 @@ static struct msm_mdss *msm_mdss_init(struct platform_device *pdev, bool is_mdp5
dev_dbg(&pdev->dev, "mapped mdss address space @%pK\n", msm_mdss->mmio);
+ ret = msm_mdss_parse_data_bus_icc_path(&pdev->dev, msm_mdss);
+ if (ret)
+ return ERR_PTR(ret);
+ ret = devm_add_action_or_reset(&pdev->dev, msm_mdss_put_icc_path, msm_mdss);
+ if (ret)
+ return ERR_PTR(ret);
+
if (is_mdp5)
ret = mdp5_mdss_parse_clock(pdev, &msm_mdss->clocks);
else
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index a16892c16f60..58db79921cd3 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -473,6 +473,8 @@ static struct drm_display_mode *radeon_fp_native_mode(struct drm_encoder *encode
native_mode->vdisplay != 0 &&
native_mode->clock != 0) {
mode = drm_mode_duplicate(dev, native_mode);
+ if (!mode)
+ return NULL;
mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
drm_mode_set_name(mode);
@@ -487,6 +489,8 @@ static struct drm_display_mode *radeon_fp_native_mode(struct drm_encoder *encode
* simpler.
*/
mode = drm_cvt_mode(dev, native_mode->hdisplay, native_mode->vdisplay, 60, true, false, false);
+ if (!mode)
+ return NULL;
mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
DRM_DEBUG_KMS("Adding cvt approximation of native panel mode %s\n", mode->name);
}
diff --git a/drivers/gpu/drm/ttm/ttm_module.c b/drivers/gpu/drm/ttm/ttm_module.c
index a3ad7c9736ec..b3fffe7b5062 100644
--- a/drivers/gpu/drm/ttm/ttm_module.c
+++ b/drivers/gpu/drm/ttm/ttm_module.c
@@ -74,7 +74,7 @@ pgprot_t ttm_prot_from_caching(enum ttm_caching caching, pgprot_t tmp)
#endif /* CONFIG_UML */
#endif /* __i386__ || __x86_64__ */
#if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
- defined(__powerpc__) || defined(__mips__)
+ defined(__powerpc__) || defined(__mips__) || defined(__loongarch__)
if (caching == ttm_write_combined)
tmp = pgprot_writecombine(tmp);
else
diff --git a/drivers/gpu/host1x/Kconfig b/drivers/gpu/host1x/Kconfig
index 6815b4db17c1..1861a8180d3f 100644
--- a/drivers/gpu/host1x/Kconfig
+++ b/drivers/gpu/host1x/Kconfig
@@ -1,8 +1,13 @@
# SPDX-License-Identifier: GPL-2.0-only
+
+config TEGRA_HOST1X_CONTEXT_BUS
+ bool
+
config TEGRA_HOST1X
tristate "NVIDIA Tegra host1x driver"
depends on ARCH_TEGRA || (ARM && COMPILE_TEST)
select DMA_SHARED_BUFFER
+ select TEGRA_HOST1X_CONTEXT_BUS
select IOMMU_IOVA
help
Driver for the NVIDIA Tegra host1x hardware.
diff --git a/drivers/gpu/host1x/Makefile b/drivers/gpu/host1x/Makefile
index d2b6f7de0498..c891a3e33844 100644
--- a/drivers/gpu/host1x/Makefile
+++ b/drivers/gpu/host1x/Makefile
@@ -18,3 +18,4 @@ host1x-y = \
hw/host1x07.o
obj-$(CONFIG_TEGRA_HOST1X) += host1x.o
+obj-$(CONFIG_TEGRA_HOST1X_CONTEXT_BUS) += context_bus.o
diff --git a/drivers/gpu/host1x/context_bus.c b/drivers/gpu/host1x/context_bus.c
new file mode 100644
index 000000000000..b0d35b2bbe89
--- /dev/null
+++ b/drivers/gpu/host1x/context_bus.c
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, NVIDIA Corporation.
+ */
+
+#include <linux/device.h>
+#include <linux/of.h>
+
+struct bus_type host1x_context_device_bus_type = {
+ .name = "host1x-context",
+};
+EXPORT_SYMBOL_GPL(host1x_context_device_bus_type);
+
+static int __init host1x_context_device_bus_init(void)
+{
+ int err;
+
+ if (!of_machine_is_compatible("nvidia,tegra186") &&
+ !of_machine_is_compatible("nvidia,tegra194") &&
+ !of_machine_is_compatible("nvidia,tegra234"))
+ return 0;
+
+ err = bus_register(&host1x_context_device_bus_type);
+ if (err < 0) {
+ pr_err("bus type registration failed: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+postcore_initcall(host1x_context_device_bus_init);
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index 54752c85604b..4490e2f7252a 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -387,7 +387,7 @@ static int hid_submit_ctrl(struct hid_device *hid)
usbhid->urbctrl->pipe = usb_rcvctrlpipe(hid_to_usb_dev(hid), 0);
maxpacket = usb_maxpacket(hid_to_usb_dev(hid),
- usbhid->urbctrl->pipe, 0);
+ usbhid->urbctrl->pipe);
len += (len == 0); /* Don't allow 0-length reports */
len = round_up(len, maxpacket);
if (len > usbhid->bufsize)
diff --git a/drivers/hid/usbhid/usbkbd.c b/drivers/hid/usbhid/usbkbd.c
index df02002066ce..b4b007c4beb6 100644
--- a/drivers/hid/usbhid/usbkbd.c
+++ b/drivers/hid/usbhid/usbkbd.c
@@ -279,7 +279,7 @@ static int usb_kbd_probe(struct usb_interface *iface,
return -ENODEV;
pipe = usb_rcvintpipe(dev, endpoint->bEndpointAddress);
- maxp = usb_maxpacket(dev, pipe, usb_pipeout(pipe));
+ maxp = usb_maxpacket(dev, pipe);
kbd = kzalloc(sizeof(struct usb_kbd), GFP_KERNEL);
input_dev = input_allocate_device();
diff --git a/drivers/hid/usbhid/usbmouse.c b/drivers/hid/usbhid/usbmouse.c
index c89332017d5d..fb1d7d1f6999 100644
--- a/drivers/hid/usbhid/usbmouse.c
+++ b/drivers/hid/usbhid/usbmouse.c
@@ -123,7 +123,7 @@ static int usb_mouse_probe(struct usb_interface *intf, const struct usb_device_i
return -ENODEV;
pipe = usb_rcvintpipe(dev, endpoint->bEndpointAddress);
- maxp = usb_maxpacket(dev, pipe, usb_pipeout(pipe));
+ maxp = usb_maxpacket(dev, pipe);
mouse = kzalloc(sizeof(struct usb_mouse), GFP_KERNEL);
input_dev = input_allocate_device();
diff --git a/drivers/hte/Kconfig b/drivers/hte/Kconfig
new file mode 100644
index 000000000000..cf29e0218bae
--- /dev/null
+++ b/drivers/hte/Kconfig
@@ -0,0 +1,33 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menuconfig HTE
+ bool "Hardware Timestamping Engine (HTE) Support"
+ help
+ Hardware Timestamping Engine (HTE) Support.
+
+ Some devices provide a hardware timestamping engine which can
+ timestamp certain device lines/signals in realtime. It comes with a
+ benefit for the applications needing accurate timestamping event with
+ less jitter. This framework provides a generic interface to such HTE
+ providers and consumer devices.
+
+ If unsure, say no.
+
+if HTE
+
+config HTE_TEGRA194
+ tristate "NVIDIA Tegra194 HTE Support"
+ depends on ARCH_TEGRA_194_SOC
+ help
+ Enable this option for integrated hardware timestamping engine also
+ known as generic timestamping engine (GTE) support on NVIDIA Tegra194
+ systems-on-chip. The driver supports 352 LIC IRQs and 39 AON GPIOs
+ lines for timestamping in realtime.
+
+config HTE_TEGRA194_TEST
+ tristate "NVIDIA Tegra194 HTE Test"
+ depends on HTE_TEGRA194
+ help
+ The NVIDIA Tegra194 GTE test driver demonstrates how to use HTE
+ framework to timestamp GPIO and LIC IRQ lines.
+
+endif
diff --git a/drivers/hte/Makefile b/drivers/hte/Makefile
new file mode 100644
index 000000000000..8cca124849d2
--- /dev/null
+++ b/drivers/hte/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_HTE) += hte.o
+obj-$(CONFIG_HTE_TEGRA194) += hte-tegra194.o
+obj-$(CONFIG_HTE_TEGRA194_TEST) += hte-tegra194-test.o
diff --git a/drivers/hte/hte-tegra194-test.c b/drivers/hte/hte-tegra194-test.c
new file mode 100644
index 000000000000..5d776a185bd6
--- /dev/null
+++ b/drivers/hte/hte-tegra194-test.c
@@ -0,0 +1,238 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2021-2022 NVIDIA Corporation
+ *
+ * Author: Dipen Patel <dipenp@nvidia.com>
+ */
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/timer.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/hte.h>
+
+/*
+ * This sample HTE GPIO test driver demonstrates HTE API usage by enabling
+ * hardware timestamp on gpio_in and specified LIC IRQ lines.
+ *
+ * Note: gpio_out and gpio_in need to be shorted externally in order for this
+ * test driver to work for the GPIO monitoring. The test driver has been
+ * tested on Jetson AGX Xavier platform by shorting pin 32 and 16 on 40 pin
+ * header.
+ *
+ * Device tree snippet to activate this driver:
+ * tegra_hte_test {
+ * compatible = "nvidia,tegra194-hte-test";
+ * in-gpio = <&gpio_aon TEGRA194_AON_GPIO(BB, 1)>;
+ * out-gpio = <&gpio_aon TEGRA194_AON_GPIO(BB, 0)>;
+ * timestamps = <&tegra_hte_aon TEGRA194_AON_GPIO(BB, 1)>,
+ * <&tegra_hte_lic 0x19>;
+ * timestamp-names = "hte-gpio", "hte-i2c-irq";
+ * status = "okay";
+ * };
+ *
+ * How to run test driver:
+ * - Load test driver.
+ * - For the GPIO, at regular interval gpio_out pin toggles triggering
+ * HTE for rising edge on gpio_in pin.
+ *
+ * - For the LIC IRQ line, it uses 0x19 interrupt which is i2c controller 1.
+ * - Run i2cdetect -y 1 1>/dev/null, this command will generate i2c bus
+ * transactions which creates timestamp data.
+ * - It prints below message for both the lines.
+ * HW timestamp(<line id>:<ts seq number>): <timestamp>, edge: <edge>.
+ * - Unloading the driver disables and deallocate the HTE.
+ */
+
+static struct tegra_hte_test {
+ int gpio_in_irq;
+ struct device *pdev;
+ struct gpio_desc *gpio_in;
+ struct gpio_desc *gpio_out;
+ struct hte_ts_desc *desc;
+ struct timer_list timer;
+ struct kobject *kobj;
+} hte;
+
+static enum hte_return process_hw_ts(struct hte_ts_data *ts, void *p)
+{
+ char *edge;
+ struct hte_ts_desc *desc = p;
+
+ if (!ts || !p)
+ return HTE_CB_HANDLED;
+
+ if (ts->raw_level < 0)
+ edge = "Unknown";
+
+ pr_info("HW timestamp(%u: %llu): %llu, edge: %s\n",
+ desc->attr.line_id, ts->seq, ts->tsc,
+ (ts->raw_level >= 0) ? ((ts->raw_level == 0) ?
+ "falling" : "rising") : edge);
+
+ return HTE_CB_HANDLED;
+}
+
+static void gpio_timer_cb(struct timer_list *t)
+{
+ (void)t;
+
+ gpiod_set_value(hte.gpio_out, !gpiod_get_value(hte.gpio_out));
+ mod_timer(&hte.timer, jiffies + msecs_to_jiffies(8000));
+}
+
+static irqreturn_t tegra_hte_test_gpio_isr(int irq, void *data)
+{
+ (void)irq;
+ (void)data;
+
+ return IRQ_HANDLED;
+}
+
+static const struct of_device_id tegra_hte_test_of_match[] = {
+ { .compatible = "nvidia,tegra194-hte-test"},
+ { }
+};
+MODULE_DEVICE_TABLE(of, tegra_hte_test_of_match);
+
+static int tegra_hte_test_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ int i, cnt;
+
+ dev_set_drvdata(&pdev->dev, &hte);
+ hte.pdev = &pdev->dev;
+
+ hte.gpio_out = gpiod_get(&pdev->dev, "out", 0);
+ if (IS_ERR(hte.gpio_out)) {
+ dev_err(&pdev->dev, "failed to get gpio out\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ hte.gpio_in = gpiod_get(&pdev->dev, "in", 0);
+ if (IS_ERR(hte.gpio_in)) {
+ dev_err(&pdev->dev, "failed to get gpio in\n");
+ ret = -EINVAL;
+ goto free_gpio_out;
+ }
+
+ ret = gpiod_direction_output(hte.gpio_out, 0);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to set output\n");
+ ret = -EINVAL;
+ goto free_gpio_in;
+ }
+
+ ret = gpiod_direction_input(hte.gpio_in);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to set input\n");
+ ret = -EINVAL;
+ goto free_gpio_in;
+ }
+
+ ret = gpiod_to_irq(hte.gpio_in);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to map GPIO to IRQ: %d\n", ret);
+ ret = -ENXIO;
+ goto free_gpio_in;
+ }
+
+ hte.gpio_in_irq = ret;
+ ret = request_irq(ret, tegra_hte_test_gpio_isr,
+ IRQF_TRIGGER_RISING,
+ "tegra_hte_gpio_test_isr", &hte);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to acquire IRQ\n");
+ ret = -ENXIO;
+ goto free_irq;
+ }
+
+ cnt = of_hte_req_count(hte.pdev);
+ if (cnt < 0)
+ goto free_irq;
+
+ dev_info(&pdev->dev, "Total requested lines:%d\n", cnt);
+
+ hte.desc = devm_kzalloc(hte.pdev, sizeof(*hte.desc) * cnt, GFP_KERNEL);
+ if (!hte.desc) {
+ ret = -ENOMEM;
+ goto free_irq;
+ }
+
+ for (i = 0; i < cnt; i++) {
+ if (i == 0)
+ /*
+ * GPIO hte init, line_id and name will be parsed from
+ * the device tree node. The edge_flag is implicitly
+ * set by request_irq call. Only line_data is needed to be
+ * set.
+ */
+ hte_init_line_attr(&hte.desc[i], 0, 0, NULL,
+ hte.gpio_in);
+ else
+ /*
+ * same comment as above except that IRQ does not need
+ * line data.
+ */
+ hte_init_line_attr(&hte.desc[i], 0, 0, NULL, NULL);
+
+ ret = hte_ts_get(hte.pdev, &hte.desc[i], i);
+ if (ret)
+ goto ts_put;
+
+ ret = devm_hte_request_ts_ns(hte.pdev, &hte.desc[i],
+ process_hw_ts, NULL,
+ &hte.desc[i]);
+ if (ret) /* no need to ts_put, request API takes care */
+ goto free_irq;
+ }
+
+ timer_setup(&hte.timer, gpio_timer_cb, 0);
+ mod_timer(&hte.timer, jiffies + msecs_to_jiffies(5000));
+
+ return 0;
+
+ts_put:
+ cnt = i;
+ for (i = 0; i < cnt; i++)
+ hte_ts_put(&hte.desc[i]);
+free_irq:
+ free_irq(hte.gpio_in_irq, &hte);
+free_gpio_in:
+ gpiod_put(hte.gpio_in);
+free_gpio_out:
+ gpiod_put(hte.gpio_out);
+out:
+
+ return ret;
+}
+
+static int tegra_hte_test_remove(struct platform_device *pdev)
+{
+ (void)pdev;
+
+ free_irq(hte.gpio_in_irq, &hte);
+ gpiod_put(hte.gpio_in);
+ gpiod_put(hte.gpio_out);
+ del_timer_sync(&hte.timer);
+
+ return 0;
+}
+
+static struct platform_driver tegra_hte_test_driver = {
+ .probe = tegra_hte_test_probe,
+ .remove = tegra_hte_test_remove,
+ .driver = {
+ .name = "tegra_hte_test",
+ .of_match_table = tegra_hte_test_of_match,
+ },
+};
+module_platform_driver(tegra_hte_test_driver);
+
+MODULE_AUTHOR("Dipen Patel <dipenp@nvidia.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hte/hte-tegra194.c b/drivers/hte/hte-tegra194.c
new file mode 100644
index 000000000000..49a27af22742
--- /dev/null
+++ b/drivers/hte/hte-tegra194.c
@@ -0,0 +1,730 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2021-2022 NVIDIA Corporation
+ *
+ * Author: Dipen Patel <dipenp@nvidia.com>
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/hte.h>
+#include <linux/uaccess.h>
+#include <linux/gpio/driver.h>
+#include <linux/gpio/consumer.h>
+
+#define HTE_SUSPEND 0
+
+/* HTE source clock TSC is 31.25MHz */
+#define HTE_TS_CLK_RATE_HZ 31250000ULL
+#define HTE_CLK_RATE_NS 32
+#define HTE_TS_NS_SHIFT __builtin_ctz(HTE_CLK_RATE_NS)
+
+#define NV_AON_SLICE_INVALID -1
+#define NV_LINES_IN_SLICE 32
+
+/* AON HTE line map For slice 1 */
+#define NV_AON_HTE_SLICE1_IRQ_GPIO_28 12
+#define NV_AON_HTE_SLICE1_IRQ_GPIO_29 13
+
+/* AON HTE line map For slice 2 */
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_0 0
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_1 1
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_2 2
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_3 3
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_4 4
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_5 5
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_6 6
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_7 7
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_8 8
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_9 9
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_10 10
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_11 11
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_12 12
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_13 13
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_14 14
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_15 15
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_16 16
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_17 17
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_18 18
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_19 19
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_20 20
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_21 21
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_22 22
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_23 23
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_24 24
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_25 25
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_26 26
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_27 27
+
+#define HTE_TECTRL 0x0
+#define HTE_TETSCH 0x4
+#define HTE_TETSCL 0x8
+#define HTE_TESRC 0xC
+#define HTE_TECCV 0x10
+#define HTE_TEPCV 0x14
+#define HTE_TECMD 0x1C
+#define HTE_TESTATUS 0x20
+#define HTE_SLICE0_TETEN 0x40
+#define HTE_SLICE1_TETEN 0x60
+
+#define HTE_SLICE_SIZE (HTE_SLICE1_TETEN - HTE_SLICE0_TETEN)
+
+#define HTE_TECTRL_ENABLE_ENABLE 0x1
+
+#define HTE_TECTRL_OCCU_SHIFT 0x8
+#define HTE_TECTRL_INTR_SHIFT 0x1
+#define HTE_TECTRL_INTR_ENABLE 0x1
+
+#define HTE_TESRC_SLICE_SHIFT 16
+#define HTE_TESRC_SLICE_DEFAULT_MASK 0xFF
+
+#define HTE_TECMD_CMD_POP 0x1
+
+#define HTE_TESTATUS_OCCUPANCY_SHIFT 8
+#define HTE_TESTATUS_OCCUPANCY_MASK 0xFF
+
+enum tegra_hte_type {
+ HTE_TEGRA_TYPE_GPIO = 1U << 0,
+ HTE_TEGRA_TYPE_LIC = 1U << 1,
+};
+
+struct hte_slices {
+ u32 r_val;
+ unsigned long flags;
+ /* to prevent lines mapped to same slice updating its register */
+ spinlock_t s_lock;
+};
+
+struct tegra_hte_line_mapped {
+ int slice;
+ u32 bit_index;
+};
+
+struct tegra_hte_line_data {
+ unsigned long flags;
+ void *data;
+};
+
+struct tegra_hte_data {
+ enum tegra_hte_type type;
+ u32 map_sz;
+ u32 sec_map_sz;
+ const struct tegra_hte_line_mapped *map;
+ const struct tegra_hte_line_mapped *sec_map;
+};
+
+struct tegra_hte_soc {
+ int hte_irq;
+ u32 itr_thrshld;
+ u32 conf_rval;
+ struct hte_slices *sl;
+ const struct tegra_hte_data *prov_data;
+ struct tegra_hte_line_data *line_data;
+ struct hte_chip *chip;
+ struct gpio_chip *c;
+ void __iomem *regs;
+};
+
+static const struct tegra_hte_line_mapped tegra194_aon_gpio_map[] = {
+ /* gpio, slice, bit_index */
+ /* AA port */
+ [0] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_11},
+ [1] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_10},
+ [2] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_9},
+ [3] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_8},
+ [4] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_7},
+ [5] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_6},
+ [6] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_5},
+ [7] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_4},
+ /* BB port */
+ [8] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_3},
+ [9] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_2},
+ [10] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_1},
+ [11] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_0},
+ /* CC port */
+ [12] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_22},
+ [13] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_21},
+ [14] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_20},
+ [15] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_19},
+ [16] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_18},
+ [17] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_17},
+ [18] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_16},
+ [19] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_15},
+ /* DD port */
+ [20] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_14},
+ [21] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_13},
+ [22] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_12},
+ /* EE port */
+ [23] = {1, NV_AON_HTE_SLICE1_IRQ_GPIO_29},
+ [24] = {1, NV_AON_HTE_SLICE1_IRQ_GPIO_28},
+ [25] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_27},
+ [26] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_26},
+ [27] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_25},
+ [28] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_24},
+ [29] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_23},
+};
+
+static const struct tegra_hte_line_mapped tegra194_aon_gpio_sec_map[] = {
+ /* gpio, slice, bit_index */
+ /* AA port */
+ [0] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_11},
+ [1] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_10},
+ [2] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_9},
+ [3] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_8},
+ [4] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_7},
+ [5] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_6},
+ [6] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_5},
+ [7] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_4},
+ /* BB port */
+ [8] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_3},
+ [9] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_2},
+ [10] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_1},
+ [11] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_0},
+ [12] = {NV_AON_SLICE_INVALID, 0},
+ [13] = {NV_AON_SLICE_INVALID, 0},
+ [14] = {NV_AON_SLICE_INVALID, 0},
+ [15] = {NV_AON_SLICE_INVALID, 0},
+ /* CC port */
+ [16] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_22},
+ [17] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_21},
+ [18] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_20},
+ [19] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_19},
+ [20] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_18},
+ [21] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_17},
+ [22] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_16},
+ [23] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_15},
+ /* DD port */
+ [24] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_14},
+ [25] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_13},
+ [26] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_12},
+ [27] = {NV_AON_SLICE_INVALID, 0},
+ [28] = {NV_AON_SLICE_INVALID, 0},
+ [29] = {NV_AON_SLICE_INVALID, 0},
+ [30] = {NV_AON_SLICE_INVALID, 0},
+ [31] = {NV_AON_SLICE_INVALID, 0},
+ /* EE port */
+ [32] = {1, NV_AON_HTE_SLICE1_IRQ_GPIO_29},
+ [33] = {1, NV_AON_HTE_SLICE1_IRQ_GPIO_28},
+ [34] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_27},
+ [35] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_26},
+ [36] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_25},
+ [37] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_24},
+ [38] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_23},
+ [39] = {NV_AON_SLICE_INVALID, 0},
+};
+
+static const struct tegra_hte_data aon_hte = {
+ .map_sz = ARRAY_SIZE(tegra194_aon_gpio_map),
+ .map = tegra194_aon_gpio_map,
+ .sec_map_sz = ARRAY_SIZE(tegra194_aon_gpio_sec_map),
+ .sec_map = tegra194_aon_gpio_sec_map,
+ .type = HTE_TEGRA_TYPE_GPIO,
+};
+
+static const struct tegra_hte_data lic_hte = {
+ .map_sz = 0,
+ .map = NULL,
+ .type = HTE_TEGRA_TYPE_LIC,
+};
+
+static inline u32 tegra_hte_readl(struct tegra_hte_soc *hte, u32 reg)
+{
+ return readl(hte->regs + reg);
+}
+
+static inline void tegra_hte_writel(struct tegra_hte_soc *hte, u32 reg,
+ u32 val)
+{
+ writel(val, hte->regs + reg);
+}
+
+static int tegra_hte_map_to_line_id(u32 eid,
+ const struct tegra_hte_line_mapped *m,
+ u32 map_sz, u32 *mapped)
+{
+
+ if (m) {
+ if (eid > map_sz)
+ return -EINVAL;
+ if (m[eid].slice == NV_AON_SLICE_INVALID)
+ return -EINVAL;
+
+ *mapped = (m[eid].slice << 5) + m[eid].bit_index;
+ } else {
+ *mapped = eid;
+ }
+
+ return 0;
+}
+
+static int tegra_hte_line_xlate(struct hte_chip *gc,
+ const struct of_phandle_args *args,
+ struct hte_ts_desc *desc, u32 *xlated_id)
+{
+ int ret = 0;
+ u32 line_id;
+ struct tegra_hte_soc *gs;
+ const struct tegra_hte_line_mapped *map = NULL;
+ u32 map_sz = 0;
+
+ if (!gc || !desc || !xlated_id)
+ return -EINVAL;
+
+ if (args) {
+ if (gc->of_hte_n_cells < 1)
+ return -EINVAL;
+
+ if (args->args_count != gc->of_hte_n_cells)
+ return -EINVAL;
+
+ desc->attr.line_id = args->args[0];
+ }
+
+ gs = gc->data;
+ if (!gs || !gs->prov_data)
+ return -EINVAL;
+
+ /*
+ *
+ * There are two paths GPIO consumers can take as follows:
+ * 1) The consumer (gpiolib-cdev for example) which uses GPIO global
+ * number which gets assigned run time.
+ * 2) The consumer passing GPIO from the DT which is assigned
+ * statically for example by using TEGRA194_AON_GPIO gpio DT binding.
+ *
+ * The code below addresses both the consumer use cases and maps into
+ * HTE/GTE namespace.
+ */
+ if (gs->prov_data->type == HTE_TEGRA_TYPE_GPIO && !args) {
+ line_id = desc->attr.line_id - gs->c->base;
+ map = gs->prov_data->map;
+ map_sz = gs->prov_data->map_sz;
+ } else if (gs->prov_data->type == HTE_TEGRA_TYPE_GPIO && args) {
+ line_id = desc->attr.line_id;
+ map = gs->prov_data->sec_map;
+ map_sz = gs->prov_data->sec_map_sz;
+ } else {
+ line_id = desc->attr.line_id;
+ }
+
+ ret = tegra_hte_map_to_line_id(line_id, map, map_sz, xlated_id);
+ if (ret < 0) {
+ dev_err(gc->dev, "line_id:%u mapping failed\n",
+ desc->attr.line_id);
+ return ret;
+ }
+
+ if (*xlated_id > gc->nlines)
+ return -EINVAL;
+
+ dev_dbg(gc->dev, "requested id:%u, xlated id:%u\n",
+ desc->attr.line_id, *xlated_id);
+
+ return 0;
+}
+
+static int tegra_hte_line_xlate_plat(struct hte_chip *gc,
+ struct hte_ts_desc *desc, u32 *xlated_id)
+{
+ return tegra_hte_line_xlate(gc, NULL, desc, xlated_id);
+}
+
+static int tegra_hte_en_dis_common(struct hte_chip *chip, u32 line_id, bool en)
+{
+ u32 slice, sl_bit_shift, line_bit, val, reg;
+ struct tegra_hte_soc *gs;
+
+ sl_bit_shift = __builtin_ctz(HTE_SLICE_SIZE);
+
+ if (!chip)
+ return -EINVAL;
+
+ gs = chip->data;
+
+ if (line_id > chip->nlines) {
+ dev_err(chip->dev,
+ "line id: %u is not supported by this controller\n",
+ line_id);
+ return -EINVAL;
+ }
+
+ slice = line_id >> sl_bit_shift;
+ line_bit = line_id & (HTE_SLICE_SIZE - 1);
+ reg = (slice << sl_bit_shift) + HTE_SLICE0_TETEN;
+
+ spin_lock(&gs->sl[slice].s_lock);
+
+ if (test_bit(HTE_SUSPEND, &gs->sl[slice].flags)) {
+ spin_unlock(&gs->sl[slice].s_lock);
+ dev_dbg(chip->dev, "device suspended");
+ return -EBUSY;
+ }
+
+ val = tegra_hte_readl(gs, reg);
+ if (en)
+ val = val | (1 << line_bit);
+ else
+ val = val & (~(1 << line_bit));
+ tegra_hte_writel(gs, reg, val);
+
+ spin_unlock(&gs->sl[slice].s_lock);
+
+ dev_dbg(chip->dev, "line: %u, slice %u, line_bit %u, reg:0x%x\n",
+ line_id, slice, line_bit, reg);
+
+ return 0;
+}
+
+static int tegra_hte_enable(struct hte_chip *chip, u32 line_id)
+{
+ if (!chip)
+ return -EINVAL;
+
+ return tegra_hte_en_dis_common(chip, line_id, true);
+}
+
+static int tegra_hte_disable(struct hte_chip *chip, u32 line_id)
+{
+ if (!chip)
+ return -EINVAL;
+
+ return tegra_hte_en_dis_common(chip, line_id, false);
+}
+
+static int tegra_hte_request(struct hte_chip *chip, struct hte_ts_desc *desc,
+ u32 line_id)
+{
+ int ret;
+ struct tegra_hte_soc *gs;
+ struct hte_line_attr *attr;
+
+ if (!chip || !chip->data || !desc)
+ return -EINVAL;
+
+ gs = chip->data;
+ attr = &desc->attr;
+
+ if (gs->prov_data->type == HTE_TEGRA_TYPE_GPIO) {
+ if (!attr->line_data)
+ return -EINVAL;
+
+ ret = gpiod_enable_hw_timestamp_ns(attr->line_data,
+ attr->edge_flags);
+ if (ret)
+ return ret;
+
+ gs->line_data[line_id].data = attr->line_data;
+ gs->line_data[line_id].flags = attr->edge_flags;
+ }
+
+ return tegra_hte_en_dis_common(chip, line_id, true);
+}
+
+static int tegra_hte_release(struct hte_chip *chip, struct hte_ts_desc *desc,
+ u32 line_id)
+{
+ struct tegra_hte_soc *gs;
+ struct hte_line_attr *attr;
+ int ret;
+
+ if (!chip || !chip->data || !desc)
+ return -EINVAL;
+
+ gs = chip->data;
+ attr = &desc->attr;
+
+ if (gs->prov_data->type == HTE_TEGRA_TYPE_GPIO) {
+ ret = gpiod_disable_hw_timestamp_ns(attr->line_data,
+ gs->line_data[line_id].flags);
+ if (ret)
+ return ret;
+
+ gs->line_data[line_id].data = NULL;
+ gs->line_data[line_id].flags = 0;
+ }
+
+ return tegra_hte_en_dis_common(chip, line_id, false);
+}
+
+static int tegra_hte_clk_src_info(struct hte_chip *chip,
+ struct hte_clk_info *ci)
+{
+ (void)chip;
+
+ if (!ci)
+ return -EINVAL;
+
+ ci->hz = HTE_TS_CLK_RATE_HZ;
+ ci->type = CLOCK_MONOTONIC;
+
+ return 0;
+}
+
+static int tegra_hte_get_level(struct tegra_hte_soc *gs, u32 line_id)
+{
+ struct gpio_desc *desc;
+
+ if (gs->prov_data->type == HTE_TEGRA_TYPE_GPIO) {
+ desc = gs->line_data[line_id].data;
+ if (desc)
+ return gpiod_get_raw_value(desc);
+ }
+
+ return -1;
+}
+
+static void tegra_hte_read_fifo(struct tegra_hte_soc *gs)
+{
+ u32 tsh, tsl, src, pv, cv, acv, slice, bit_index, line_id;
+ u64 tsc;
+ struct hte_ts_data el;
+
+ while ((tegra_hte_readl(gs, HTE_TESTATUS) >>
+ HTE_TESTATUS_OCCUPANCY_SHIFT) &
+ HTE_TESTATUS_OCCUPANCY_MASK) {
+ tsh = tegra_hte_readl(gs, HTE_TETSCH);
+ tsl = tegra_hte_readl(gs, HTE_TETSCL);
+ tsc = (((u64)tsh << 32) | tsl);
+
+ src = tegra_hte_readl(gs, HTE_TESRC);
+ slice = (src >> HTE_TESRC_SLICE_SHIFT) &
+ HTE_TESRC_SLICE_DEFAULT_MASK;
+
+ pv = tegra_hte_readl(gs, HTE_TEPCV);
+ cv = tegra_hte_readl(gs, HTE_TECCV);
+ acv = pv ^ cv;
+ while (acv) {
+ bit_index = __builtin_ctz(acv);
+ line_id = bit_index + (slice << 5);
+ el.tsc = tsc << HTE_TS_NS_SHIFT;
+ el.raw_level = tegra_hte_get_level(gs, line_id);
+ hte_push_ts_ns(gs->chip, line_id, &el);
+ acv &= ~BIT(bit_index);
+ }
+ tegra_hte_writel(gs, HTE_TECMD, HTE_TECMD_CMD_POP);
+ }
+}
+
+static irqreturn_t tegra_hte_isr(int irq, void *dev_id)
+{
+ struct tegra_hte_soc *gs = dev_id;
+ (void)irq;
+
+ tegra_hte_read_fifo(gs);
+
+ return IRQ_HANDLED;
+}
+
+static bool tegra_hte_match_from_linedata(const struct hte_chip *chip,
+ const struct hte_ts_desc *hdesc)
+{
+ struct tegra_hte_soc *hte_dev = chip->data;
+
+ if (!hte_dev || (hte_dev->prov_data->type != HTE_TEGRA_TYPE_GPIO))
+ return false;
+
+ return hte_dev->c == gpiod_to_chip(hdesc->attr.line_data);
+}
+
+static const struct of_device_id tegra_hte_of_match[] = {
+ { .compatible = "nvidia,tegra194-gte-lic", .data = &lic_hte},
+ { .compatible = "nvidia,tegra194-gte-aon", .data = &aon_hte},
+ { }
+};
+MODULE_DEVICE_TABLE(of, tegra_hte_of_match);
+
+static const struct hte_ops g_ops = {
+ .request = tegra_hte_request,
+ .release = tegra_hte_release,
+ .enable = tegra_hte_enable,
+ .disable = tegra_hte_disable,
+ .get_clk_src_info = tegra_hte_clk_src_info,
+};
+
+static void tegra_gte_disable(void *data)
+{
+ struct platform_device *pdev = data;
+ struct tegra_hte_soc *gs = dev_get_drvdata(&pdev->dev);
+
+ tegra_hte_writel(gs, HTE_TECTRL, 0);
+}
+
+static int tegra_get_gpiochip_from_name(struct gpio_chip *chip, void *data)
+{
+ return !strcmp(chip->label, data);
+}
+
+static int tegra_hte_probe(struct platform_device *pdev)
+{
+ int ret;
+ u32 i, slices, val = 0;
+ u32 nlines;
+ struct device *dev;
+ struct tegra_hte_soc *hte_dev;
+ struct hte_chip *gc;
+
+ dev = &pdev->dev;
+
+ ret = of_property_read_u32(dev->of_node, "nvidia,slices", &slices);
+ if (ret != 0) {
+ dev_err(dev, "Could not read slices\n");
+ return -EINVAL;
+ }
+ nlines = slices << 5;
+
+ hte_dev = devm_kzalloc(dev, sizeof(*hte_dev), GFP_KERNEL);
+ if (!hte_dev)
+ return -ENOMEM;
+
+ gc = devm_kzalloc(dev, sizeof(*gc), GFP_KERNEL);
+ if (!gc)
+ return -ENOMEM;
+
+ dev_set_drvdata(&pdev->dev, hte_dev);
+ hte_dev->prov_data = of_device_get_match_data(&pdev->dev);
+
+ hte_dev->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(hte_dev->regs))
+ return PTR_ERR(hte_dev->regs);
+
+ ret = of_property_read_u32(dev->of_node, "nvidia,int-threshold",
+ &hte_dev->itr_thrshld);
+ if (ret != 0)
+ hte_dev->itr_thrshld = 1;
+
+ hte_dev->sl = devm_kcalloc(dev, slices, sizeof(*hte_dev->sl),
+ GFP_KERNEL);
+ if (!hte_dev->sl)
+ return -ENOMEM;
+
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0) {
+ dev_err_probe(dev, ret, "failed to get irq\n");
+ return ret;
+ }
+ hte_dev->hte_irq = ret;
+ ret = devm_request_irq(dev, hte_dev->hte_irq, tegra_hte_isr, 0,
+ dev_name(dev), hte_dev);
+ if (ret < 0) {
+ dev_err(dev, "request irq failed.\n");
+ return ret;
+ }
+
+ gc->nlines = nlines;
+ gc->ops = &g_ops;
+ gc->dev = dev;
+ gc->data = hte_dev;
+ gc->xlate_of = tegra_hte_line_xlate;
+ gc->xlate_plat = tegra_hte_line_xlate_plat;
+ gc->of_hte_n_cells = 1;
+
+ if (hte_dev->prov_data &&
+ hte_dev->prov_data->type == HTE_TEGRA_TYPE_GPIO) {
+ hte_dev->line_data = devm_kcalloc(dev, nlines,
+ sizeof(*hte_dev->line_data),
+ GFP_KERNEL);
+ if (!hte_dev->line_data)
+ return -ENOMEM;
+
+ gc->match_from_linedata = tegra_hte_match_from_linedata;
+
+ hte_dev->c = gpiochip_find("tegra194-gpio-aon",
+ tegra_get_gpiochip_from_name);
+ if (!hte_dev->c)
+ return dev_err_probe(dev, -EPROBE_DEFER,
+ "wait for gpio controller\n");
+ }
+
+ hte_dev->chip = gc;
+
+ ret = devm_hte_register_chip(hte_dev->chip);
+ if (ret) {
+ dev_err(gc->dev, "hte chip register failed");
+ return ret;
+ }
+
+ for (i = 0; i < slices; i++) {
+ hte_dev->sl[i].flags = 0;
+ spin_lock_init(&hte_dev->sl[i].s_lock);
+ }
+
+ val = HTE_TECTRL_ENABLE_ENABLE |
+ (HTE_TECTRL_INTR_ENABLE << HTE_TECTRL_INTR_SHIFT) |
+ (hte_dev->itr_thrshld << HTE_TECTRL_OCCU_SHIFT);
+ tegra_hte_writel(hte_dev, HTE_TECTRL, val);
+
+ ret = devm_add_action_or_reset(&pdev->dev, tegra_gte_disable, pdev);
+ if (ret)
+ return ret;
+
+ dev_dbg(gc->dev, "lines: %d, slices:%d", gc->nlines, slices);
+
+ return 0;
+}
+
+static int __maybe_unused tegra_hte_resume_early(struct device *dev)
+{
+ u32 i;
+ struct tegra_hte_soc *gs = dev_get_drvdata(dev);
+ u32 slices = gs->chip->nlines / NV_LINES_IN_SLICE;
+ u32 sl_bit_shift = __builtin_ctz(HTE_SLICE_SIZE);
+
+ tegra_hte_writel(gs, HTE_TECTRL, gs->conf_rval);
+
+ for (i = 0; i < slices; i++) {
+ spin_lock(&gs->sl[i].s_lock);
+ tegra_hte_writel(gs,
+ ((i << sl_bit_shift) + HTE_SLICE0_TETEN),
+ gs->sl[i].r_val);
+ clear_bit(HTE_SUSPEND, &gs->sl[i].flags);
+ spin_unlock(&gs->sl[i].s_lock);
+ }
+
+ return 0;
+}
+
+static int __maybe_unused tegra_hte_suspend_late(struct device *dev)
+{
+ u32 i;
+ struct tegra_hte_soc *gs = dev_get_drvdata(dev);
+ u32 slices = gs->chip->nlines / NV_LINES_IN_SLICE;
+ u32 sl_bit_shift = __builtin_ctz(HTE_SLICE_SIZE);
+
+ gs->conf_rval = tegra_hte_readl(gs, HTE_TECTRL);
+ for (i = 0; i < slices; i++) {
+ spin_lock(&gs->sl[i].s_lock);
+ gs->sl[i].r_val = tegra_hte_readl(gs,
+ ((i << sl_bit_shift) + HTE_SLICE0_TETEN));
+ set_bit(HTE_SUSPEND, &gs->sl[i].flags);
+ spin_unlock(&gs->sl[i].s_lock);
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops tegra_hte_pm = {
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(tegra_hte_suspend_late,
+ tegra_hte_resume_early)
+};
+
+static struct platform_driver tegra_hte_driver = {
+ .probe = tegra_hte_probe,
+ .driver = {
+ .name = "tegra_hte",
+ .pm = &tegra_hte_pm,
+ .of_match_table = tegra_hte_of_match,
+ },
+};
+
+module_platform_driver(tegra_hte_driver);
+
+MODULE_AUTHOR("Dipen Patel <dipenp@nvidia.com>");
+MODULE_DESCRIPTION("NVIDIA Tegra HTE (Hardware Timestamping Engine) driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hte/hte.c b/drivers/hte/hte.c
new file mode 100644
index 000000000000..7c3b4476f890
--- /dev/null
+++ b/drivers/hte/hte.c
@@ -0,0 +1,947 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2021-2022 NVIDIA Corporation
+ *
+ * Author: Dipen Patel <dipenp@nvidia.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/mutex.h>
+#include <linux/uaccess.h>
+#include <linux/hte.h>
+#include <linux/delay.h>
+#include <linux/debugfs.h>
+
+#define HTE_TS_NAME_LEN 10
+
+/* Global list of the HTE devices */
+static DEFINE_SPINLOCK(hte_lock);
+static LIST_HEAD(hte_devices);
+
+enum {
+ HTE_TS_REGISTERED,
+ HTE_TS_REQ,
+ HTE_TS_DISABLE,
+ HTE_TS_QUEUE_WK,
+};
+
+/**
+ * struct hte_ts_info - Information related to requested timestamp.
+ *
+ * @xlated_id: Timestamp ID as understood between HTE subsys and HTE provider,
+ * See xlate callback API.
+ * @flags: Flags holding state information.
+ * @hte_cb_flags: Callback related flags.
+ * @seq: Timestamp sequence counter.
+ * @line_name: HTE allocated line name.
+ * @free_attr_name: If set, free the attr name.
+ * @cb: A nonsleeping callback function provided by clients.
+ * @tcb: A secondary sleeping callback function provided by clients.
+ * @dropped_ts: Dropped timestamps.
+ * @slock: Spin lock to synchronize between disable/enable,
+ * request/release APIs.
+ * @cb_work: callback workqueue, used when tcb is specified.
+ * @req_mlock: Lock during timestamp request/release APIs.
+ * @ts_dbg_root: Root for the debug fs.
+ * @gdev: HTE abstract device that this timestamp information belongs to.
+ * @cl_data: Client specific data.
+ */
+struct hte_ts_info {
+ u32 xlated_id;
+ unsigned long flags;
+ unsigned long hte_cb_flags;
+ u64 seq;
+ char *line_name;
+ bool free_attr_name;
+ hte_ts_cb_t cb;
+ hte_ts_sec_cb_t tcb;
+ atomic_t dropped_ts;
+ spinlock_t slock;
+ struct work_struct cb_work;
+ struct mutex req_mlock;
+ struct dentry *ts_dbg_root;
+ struct hte_device *gdev;
+ void *cl_data;
+};
+
+/**
+ * struct hte_device - HTE abstract device
+ * @nlines: Number of entities this device supports.
+ * @ts_req: Total number of entities requested.
+ * @sdev: Device used at various debug prints.
+ * @dbg_root: Root directory for debug fs.
+ * @list: List node to store hte_device for each provider.
+ * @chip: HTE chip providing this HTE device.
+ * @owner: helps prevent removal of modules when in use.
+ * @ei: Timestamp information.
+ */
+struct hte_device {
+ u32 nlines;
+ atomic_t ts_req;
+ struct device *sdev;
+ struct dentry *dbg_root;
+ struct list_head list;
+ struct hte_chip *chip;
+ struct module *owner;
+ struct hte_ts_info ei[];
+};
+
+#ifdef CONFIG_DEBUG_FS
+
+static struct dentry *hte_root;
+
+static int __init hte_subsys_dbgfs_init(void)
+{
+ /* creates /sys/kernel/debug/hte/ */
+ hte_root = debugfs_create_dir("hte", NULL);
+
+ return 0;
+}
+subsys_initcall(hte_subsys_dbgfs_init);
+
+static void hte_chip_dbgfs_init(struct hte_device *gdev)
+{
+ const struct hte_chip *chip = gdev->chip;
+ const char *name = chip->name ? chip->name : dev_name(chip->dev);
+
+ gdev->dbg_root = debugfs_create_dir(name, hte_root);
+
+ debugfs_create_atomic_t("ts_requested", 0444, gdev->dbg_root,
+ &gdev->ts_req);
+ debugfs_create_u32("total_ts", 0444, gdev->dbg_root,
+ &gdev->nlines);
+}
+
+static void hte_ts_dbgfs_init(const char *name, struct hte_ts_info *ei)
+{
+ if (!ei->gdev->dbg_root || !name)
+ return;
+
+ ei->ts_dbg_root = debugfs_create_dir(name, ei->gdev->dbg_root);
+
+ debugfs_create_atomic_t("dropped_timestamps", 0444, ei->ts_dbg_root,
+ &ei->dropped_ts);
+}
+
+#else
+
+static void hte_chip_dbgfs_init(struct hte_device *gdev)
+{
+}
+
+static void hte_ts_dbgfs_init(const char *name, struct hte_ts_info *ei)
+{
+}
+
+#endif
+
+/**
+ * hte_ts_put() - Release and disable timestamp for the given desc.
+ *
+ * @desc: timestamp descriptor.
+ *
+ * Context: debugfs_remove_recursive() function call may use sleeping locks,
+ * not suitable from atomic context.
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int hte_ts_put(struct hte_ts_desc *desc)
+{
+ int ret = 0;
+ unsigned long flag;
+ struct hte_device *gdev;
+ struct hte_ts_info *ei;
+
+ if (!desc)
+ return -EINVAL;
+
+ ei = desc->hte_data;
+
+ if (!ei || !ei->gdev)
+ return -EINVAL;
+
+ gdev = ei->gdev;
+
+ mutex_lock(&ei->req_mlock);
+
+ if (unlikely(!test_bit(HTE_TS_REQ, &ei->flags) &&
+ !test_bit(HTE_TS_REGISTERED, &ei->flags))) {
+ dev_info(gdev->sdev, "id:%d is not requested\n",
+ desc->attr.line_id);
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ if (unlikely(!test_bit(HTE_TS_REQ, &ei->flags) &&
+ test_bit(HTE_TS_REGISTERED, &ei->flags))) {
+ dev_info(gdev->sdev, "id:%d is registered but not requested\n",
+ desc->attr.line_id);
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ if (test_bit(HTE_TS_REQ, &ei->flags) &&
+ !test_bit(HTE_TS_REGISTERED, &ei->flags)) {
+ clear_bit(HTE_TS_REQ, &ei->flags);
+ desc->hte_data = NULL;
+ ret = 0;
+ goto mod_put;
+ }
+
+ ret = gdev->chip->ops->release(gdev->chip, desc, ei->xlated_id);
+ if (ret) {
+ dev_err(gdev->sdev, "id: %d free failed\n",
+ desc->attr.line_id);
+ goto unlock;
+ }
+
+ kfree(ei->line_name);
+ if (ei->free_attr_name)
+ kfree_const(desc->attr.name);
+
+ debugfs_remove_recursive(ei->ts_dbg_root);
+
+ spin_lock_irqsave(&ei->slock, flag);
+
+ if (test_bit(HTE_TS_QUEUE_WK, &ei->flags)) {
+ spin_unlock_irqrestore(&ei->slock, flag);
+ flush_work(&ei->cb_work);
+ spin_lock_irqsave(&ei->slock, flag);
+ }
+
+ atomic_dec(&gdev->ts_req);
+ atomic_set(&ei->dropped_ts, 0);
+
+ ei->seq = 1;
+ ei->flags = 0;
+ desc->hte_data = NULL;
+
+ spin_unlock_irqrestore(&ei->slock, flag);
+
+ ei->cb = NULL;
+ ei->tcb = NULL;
+ ei->cl_data = NULL;
+
+mod_put:
+ module_put(gdev->owner);
+unlock:
+ mutex_unlock(&ei->req_mlock);
+ dev_dbg(gdev->sdev, "release id: %d\n", desc->attr.line_id);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(hte_ts_put);
+
+static int hte_ts_dis_en_common(struct hte_ts_desc *desc, bool en)
+{
+ u32 ts_id;
+ struct hte_device *gdev;
+ struct hte_ts_info *ei;
+ int ret;
+ unsigned long flag;
+
+ if (!desc)
+ return -EINVAL;
+
+ ei = desc->hte_data;
+
+ if (!ei || !ei->gdev)
+ return -EINVAL;
+
+ gdev = ei->gdev;
+ ts_id = desc->attr.line_id;
+
+ mutex_lock(&ei->req_mlock);
+
+ if (!test_bit(HTE_TS_REGISTERED, &ei->flags)) {
+ dev_dbg(gdev->sdev, "id:%d is not registered", ts_id);
+ ret = -EUSERS;
+ goto out;
+ }
+
+ spin_lock_irqsave(&ei->slock, flag);
+
+ if (en) {
+ if (!test_bit(HTE_TS_DISABLE, &ei->flags)) {
+ ret = 0;
+ goto out_unlock;
+ }
+
+ spin_unlock_irqrestore(&ei->slock, flag);
+ ret = gdev->chip->ops->enable(gdev->chip, ei->xlated_id);
+ if (ret) {
+ dev_warn(gdev->sdev, "id: %d enable failed\n",
+ ts_id);
+ goto out;
+ }
+
+ spin_lock_irqsave(&ei->slock, flag);
+ clear_bit(HTE_TS_DISABLE, &ei->flags);
+ } else {
+ if (test_bit(HTE_TS_DISABLE, &ei->flags)) {
+ ret = 0;
+ goto out_unlock;
+ }
+
+ spin_unlock_irqrestore(&ei->slock, flag);
+ ret = gdev->chip->ops->disable(gdev->chip, ei->xlated_id);
+ if (ret) {
+ dev_warn(gdev->sdev, "id: %d disable failed\n",
+ ts_id);
+ goto out;
+ }
+
+ spin_lock_irqsave(&ei->slock, flag);
+ set_bit(HTE_TS_DISABLE, &ei->flags);
+ }
+
+out_unlock:
+ spin_unlock_irqrestore(&ei->slock, flag);
+out:
+ mutex_unlock(&ei->req_mlock);
+ return ret;
+}
+
+/**
+ * hte_disable_ts() - Disable timestamp on given descriptor.
+ *
+ * The API does not release any resources associated with desc.
+ *
+ * @desc: ts descriptor, this is the same as returned by the request API.
+ *
+ * Context: Holds mutex lock, not suitable from atomic context.
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int hte_disable_ts(struct hte_ts_desc *desc)
+{
+ return hte_ts_dis_en_common(desc, false);
+}
+EXPORT_SYMBOL_GPL(hte_disable_ts);
+
+/**
+ * hte_enable_ts() - Enable timestamp on given descriptor.
+ *
+ * @desc: ts descriptor, this is the same as returned by the request API.
+ *
+ * Context: Holds mutex lock, not suitable from atomic context.
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int hte_enable_ts(struct hte_ts_desc *desc)
+{
+ return hte_ts_dis_en_common(desc, true);
+}
+EXPORT_SYMBOL_GPL(hte_enable_ts);
+
+static void hte_do_cb_work(struct work_struct *w)
+{
+ unsigned long flag;
+ struct hte_ts_info *ei = container_of(w, struct hte_ts_info, cb_work);
+
+ if (unlikely(!ei->tcb))
+ return;
+
+ ei->tcb(ei->cl_data);
+
+ spin_lock_irqsave(&ei->slock, flag);
+ clear_bit(HTE_TS_QUEUE_WK, &ei->flags);
+ spin_unlock_irqrestore(&ei->slock, flag);
+}
+
+static int __hte_req_ts(struct hte_ts_desc *desc, hte_ts_cb_t cb,
+ hte_ts_sec_cb_t tcb, void *data)
+{
+ int ret;
+ struct hte_device *gdev;
+ struct hte_ts_info *ei = desc->hte_data;
+
+ gdev = ei->gdev;
+ /*
+ * There is a chance that multiple consumers requesting same entity,
+ * lock here.
+ */
+ mutex_lock(&ei->req_mlock);
+
+ if (test_bit(HTE_TS_REGISTERED, &ei->flags) ||
+ !test_bit(HTE_TS_REQ, &ei->flags)) {
+ dev_dbg(gdev->chip->dev, "id:%u req failed\n",
+ desc->attr.line_id);
+ ret = -EUSERS;
+ goto unlock;
+ }
+
+ ei->cb = cb;
+ ei->tcb = tcb;
+ if (tcb)
+ INIT_WORK(&ei->cb_work, hte_do_cb_work);
+
+ ret = gdev->chip->ops->request(gdev->chip, desc, ei->xlated_id);
+ if (ret < 0) {
+ dev_err(gdev->chip->dev, "ts request failed\n");
+ goto unlock;
+ }
+
+ ei->cl_data = data;
+ ei->seq = 1;
+
+ atomic_inc(&gdev->ts_req);
+
+ ei->line_name = NULL;
+ if (!desc->attr.name) {
+ ei->line_name = kzalloc(HTE_TS_NAME_LEN, GFP_KERNEL);
+ if (ei->line_name)
+ scnprintf(ei->line_name, HTE_TS_NAME_LEN, "ts_%u",
+ desc->attr.line_id);
+ }
+
+ hte_ts_dbgfs_init(desc->attr.name == NULL ?
+ ei->line_name : desc->attr.name, ei);
+ set_bit(HTE_TS_REGISTERED, &ei->flags);
+
+ dev_dbg(gdev->chip->dev, "id: %u, xlated id:%u",
+ desc->attr.line_id, ei->xlated_id);
+
+ ret = 0;
+
+unlock:
+ mutex_unlock(&ei->req_mlock);
+
+ return ret;
+}
+
+static int hte_bind_ts_info_locked(struct hte_ts_info *ei,
+ struct hte_ts_desc *desc, u32 x_id)
+{
+ int ret = 0;
+
+ mutex_lock(&ei->req_mlock);
+
+ if (test_bit(HTE_TS_REQ, &ei->flags)) {
+ dev_dbg(ei->gdev->chip->dev, "id:%u is already requested\n",
+ desc->attr.line_id);
+ ret = -EUSERS;
+ goto out;
+ }
+
+ set_bit(HTE_TS_REQ, &ei->flags);
+ desc->hte_data = ei;
+ ei->xlated_id = x_id;
+
+out:
+ mutex_unlock(&ei->req_mlock);
+
+ return ret;
+}
+
+static struct hte_device *of_node_to_htedevice(struct device_node *np)
+{
+ struct hte_device *gdev;
+
+ spin_lock(&hte_lock);
+
+ list_for_each_entry(gdev, &hte_devices, list)
+ if (gdev->chip && gdev->chip->dev &&
+ gdev->chip->dev->of_node == np) {
+ spin_unlock(&hte_lock);
+ return gdev;
+ }
+
+ spin_unlock(&hte_lock);
+
+ return ERR_PTR(-ENODEV);
+}
+
+static struct hte_device *hte_find_dev_from_linedata(struct hte_ts_desc *desc)
+{
+ struct hte_device *gdev;
+
+ spin_lock(&hte_lock);
+
+ list_for_each_entry(gdev, &hte_devices, list)
+ if (gdev->chip && gdev->chip->match_from_linedata) {
+ if (!gdev->chip->match_from_linedata(gdev->chip, desc))
+ continue;
+ spin_unlock(&hte_lock);
+ return gdev;
+ }
+
+ spin_unlock(&hte_lock);
+
+ return ERR_PTR(-ENODEV);
+}
+
+/**
+ * of_hte_req_count - Return the number of entities to timestamp.
+ *
+ * The function returns the total count of the requested entities to timestamp
+ * by parsing device tree.
+ *
+ * @dev: The HTE consumer.
+ *
+ * Returns: Positive number on success, -ENOENT if no entries,
+ * -EINVAL for other errors.
+ */
+int of_hte_req_count(struct device *dev)
+{
+ int count;
+
+ if (!dev || !dev->of_node)
+ return -EINVAL;
+
+ count = of_count_phandle_with_args(dev->of_node, "timestamps",
+ "#timestamp-cells");
+
+ return count ? count : -ENOENT;
+}
+EXPORT_SYMBOL_GPL(of_hte_req_count);
+
+static inline struct hte_device *hte_get_dev(struct hte_ts_desc *desc)
+{
+ return hte_find_dev_from_linedata(desc);
+}
+
+static struct hte_device *hte_of_get_dev(struct device *dev,
+ struct hte_ts_desc *desc,
+ int index,
+ struct of_phandle_args *args,
+ bool *free_name)
+{
+ int ret;
+ struct device_node *np;
+ char *temp;
+
+ if (!dev->of_node)
+ return ERR_PTR(-EINVAL);
+
+ np = dev->of_node;
+
+ if (!of_find_property(np, "timestamp-names", NULL)) {
+ /* Let hte core construct it during request time */
+ desc->attr.name = NULL;
+ } else {
+ ret = of_property_read_string_index(np, "timestamp-names",
+ index, &desc->attr.name);
+ if (ret) {
+ pr_err("can't parse \"timestamp-names\" property\n");
+ return ERR_PTR(ret);
+ }
+ *free_name = false;
+ if (desc->attr.name) {
+ temp = skip_spaces(desc->attr.name);
+ if (!*temp)
+ desc->attr.name = NULL;
+ }
+ }
+
+ ret = of_parse_phandle_with_args(np, "timestamps", "#timestamp-cells",
+ index, args);
+ if (ret) {
+ pr_err("%s(): can't parse \"timestamps\" property\n",
+ __func__);
+ return ERR_PTR(ret);
+ }
+
+ of_node_put(args->np);
+
+ return of_node_to_htedevice(args->np);
+}
+
+/**
+ * hte_ts_get() - The function to initialize and obtain HTE desc.
+ *
+ * The function initializes the consumer provided HTE descriptor. If consumer
+ * has device tree node, index is used to parse the line id and other details.
+ * The function needs to be called before using any request APIs.
+ *
+ * @dev: HTE consumer/client device, used in case of parsing device tree node.
+ * @desc: Pre-allocated timestamp descriptor.
+ * @index: The index will be used as an index to parse line_id from the
+ * device tree node if node is present.
+ *
+ * Context: Holds mutex lock.
+ * Returns: Returns 0 on success or negative error code on failure.
+ */
+int hte_ts_get(struct device *dev, struct hte_ts_desc *desc, int index)
+{
+ struct hte_device *gdev;
+ struct hte_ts_info *ei;
+ const struct fwnode_handle *fwnode;
+ struct of_phandle_args args;
+ u32 xlated_id;
+ int ret;
+ bool free_name = false;
+
+ if (!desc)
+ return -EINVAL;
+
+ fwnode = dev ? dev_fwnode(dev) : NULL;
+
+ if (is_of_node(fwnode))
+ gdev = hte_of_get_dev(dev, desc, index, &args, &free_name);
+ else
+ gdev = hte_get_dev(desc);
+
+ if (IS_ERR(gdev)) {
+ pr_err("%s() no hte dev found\n", __func__);
+ return PTR_ERR(gdev);
+ }
+
+ if (!try_module_get(gdev->owner))
+ return -ENODEV;
+
+ if (!gdev->chip) {
+ pr_err("%s(): requested id does not have provider\n",
+ __func__);
+ ret = -ENODEV;
+ goto put;
+ }
+
+ if (is_of_node(fwnode)) {
+ if (!gdev->chip->xlate_of)
+ ret = -EINVAL;
+ else
+ ret = gdev->chip->xlate_of(gdev->chip, &args,
+ desc, &xlated_id);
+ } else {
+ if (!gdev->chip->xlate_plat)
+ ret = -EINVAL;
+ else
+ ret = gdev->chip->xlate_plat(gdev->chip, desc,
+ &xlated_id);
+ }
+
+ if (ret < 0)
+ goto put;
+
+ ei = &gdev->ei[xlated_id];
+
+ ret = hte_bind_ts_info_locked(ei, desc, xlated_id);
+ if (ret)
+ goto put;
+
+ ei->free_attr_name = free_name;
+
+ return 0;
+
+put:
+ module_put(gdev->owner);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(hte_ts_get);
+
+static void __devm_hte_release_ts(void *res)
+{
+ hte_ts_put(res);
+}
+
+/**
+ * hte_request_ts_ns() - The API to request and enable hardware timestamp in
+ * nanoseconds.
+ *
+ * The entity is provider specific for example, GPIO lines, signals, buses
+ * etc...The API allocates necessary resources and enables the timestamp.
+ *
+ * @desc: Pre-allocated and initialized timestamp descriptor.
+ * @cb: Callback to push the timestamp data to consumer.
+ * @tcb: Optional callback. If its provided, subsystem initializes
+ * workqueue. It is called when cb returns HTE_RUN_SECOND_CB.
+ * @data: Client data, used during cb and tcb callbacks.
+ *
+ * Context: Holds mutex lock.
+ * Returns: Returns 0 on success or negative error code on failure.
+ */
+int hte_request_ts_ns(struct hte_ts_desc *desc, hte_ts_cb_t cb,
+ hte_ts_sec_cb_t tcb, void *data)
+{
+ int ret;
+ struct hte_ts_info *ei;
+
+ if (!desc || !desc->hte_data || !cb)
+ return -EINVAL;
+
+ ei = desc->hte_data;
+ if (!ei || !ei->gdev)
+ return -EINVAL;
+
+ ret = __hte_req_ts(desc, cb, tcb, data);
+ if (ret < 0) {
+ dev_err(ei->gdev->chip->dev,
+ "failed to request id: %d\n", desc->attr.line_id);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(hte_request_ts_ns);
+
+/**
+ * devm_hte_request_ts_ns() - Resource managed API to request and enable
+ * hardware timestamp in nanoseconds.
+ *
+ * The entity is provider specific for example, GPIO lines, signals, buses
+ * etc...The API allocates necessary resources and enables the timestamp. It
+ * deallocates and disables automatically when the consumer exits.
+ *
+ * @dev: HTE consumer/client device.
+ * @desc: Pre-allocated and initialized timestamp descriptor.
+ * @cb: Callback to push the timestamp data to consumer.
+ * @tcb: Optional callback. If its provided, subsystem initializes
+ * workqueue. It is called when cb returns HTE_RUN_SECOND_CB.
+ * @data: Client data, used during cb and tcb callbacks.
+ *
+ * Context: Holds mutex lock.
+ * Returns: Returns 0 on success or negative error code on failure.
+ */
+int devm_hte_request_ts_ns(struct device *dev, struct hte_ts_desc *desc,
+ hte_ts_cb_t cb, hte_ts_sec_cb_t tcb,
+ void *data)
+{
+ int err;
+
+ if (!dev)
+ return -EINVAL;
+
+ err = hte_request_ts_ns(desc, cb, tcb, data);
+ if (err)
+ return err;
+
+ err = devm_add_action_or_reset(dev, __devm_hte_release_ts, desc);
+ if (err)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devm_hte_request_ts_ns);
+
+/**
+ * hte_init_line_attr() - Initialize line attributes.
+ *
+ * Zeroes out line attributes and initializes with provided arguments.
+ * The function needs to be called before calling any consumer facing
+ * functions.
+ *
+ * @desc: Pre-allocated timestamp descriptor.
+ * @line_id: line id.
+ * @edge_flags: edge flags related to line_id.
+ * @name: name of the line.
+ * @data: line data related to line_id.
+ *
+ * Context: Any.
+ * Returns: 0 on success or negative error code for the failure.
+ */
+int hte_init_line_attr(struct hte_ts_desc *desc, u32 line_id,
+ unsigned long edge_flags, const char *name, void *data)
+{
+ if (!desc)
+ return -EINVAL;
+
+ memset(&desc->attr, 0, sizeof(desc->attr));
+
+ desc->attr.edge_flags = edge_flags;
+ desc->attr.line_id = line_id;
+ desc->attr.line_data = data;
+ if (name) {
+ name = kstrdup_const(name, GFP_KERNEL);
+ if (!name)
+ return -ENOMEM;
+ }
+
+ desc->attr.name = name;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(hte_init_line_attr);
+
+/**
+ * hte_get_clk_src_info() - Get the clock source information for a ts
+ * descriptor.
+ *
+ * @desc: ts descriptor, same as returned from request API.
+ * @ci: The API fills this structure with the clock information data.
+ *
+ * Context: Any context.
+ * Returns: 0 on success else negative error code on failure.
+ */
+int hte_get_clk_src_info(const struct hte_ts_desc *desc,
+ struct hte_clk_info *ci)
+{
+ struct hte_chip *chip;
+ struct hte_ts_info *ei;
+
+ if (!desc || !desc->hte_data || !ci) {
+ pr_debug("%s:%d\n", __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ ei = desc->hte_data;
+ if (!ei->gdev || !ei->gdev->chip)
+ return -EINVAL;
+
+ chip = ei->gdev->chip;
+ if (!chip->ops->get_clk_src_info)
+ return -EOPNOTSUPP;
+
+ return chip->ops->get_clk_src_info(chip, ci);
+}
+EXPORT_SYMBOL_GPL(hte_get_clk_src_info);
+
+/**
+ * hte_push_ts_ns() - Push timestamp data in nanoseconds.
+ *
+ * It is used by the provider to push timestamp data.
+ *
+ * @chip: The HTE chip, used during the registration.
+ * @xlated_id: entity id understood by both subsystem and provider, this is
+ * obtained from xlate callback during request API.
+ * @data: timestamp data.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int hte_push_ts_ns(const struct hte_chip *chip, u32 xlated_id,
+ struct hte_ts_data *data)
+{
+ enum hte_return ret;
+ int st = 0;
+ struct hte_ts_info *ei;
+ unsigned long flag;
+
+ if (!chip || !data || !chip->gdev)
+ return -EINVAL;
+
+ if (xlated_id >= chip->nlines)
+ return -EINVAL;
+
+ ei = &chip->gdev->ei[xlated_id];
+
+ spin_lock_irqsave(&ei->slock, flag);
+
+ /* timestamp sequence counter */
+ data->seq = ei->seq++;
+
+ if (!test_bit(HTE_TS_REGISTERED, &ei->flags) ||
+ test_bit(HTE_TS_DISABLE, &ei->flags)) {
+ dev_dbg(chip->dev, "Unknown timestamp push\n");
+ atomic_inc(&ei->dropped_ts);
+ st = -EINVAL;
+ goto unlock;
+ }
+
+ ret = ei->cb(data, ei->cl_data);
+ if (ret == HTE_RUN_SECOND_CB && ei->tcb) {
+ queue_work(system_unbound_wq, &ei->cb_work);
+ set_bit(HTE_TS_QUEUE_WK, &ei->flags);
+ }
+
+unlock:
+ spin_unlock_irqrestore(&ei->slock, flag);
+
+ return st;
+}
+EXPORT_SYMBOL_GPL(hte_push_ts_ns);
+
+static int hte_register_chip(struct hte_chip *chip)
+{
+ struct hte_device *gdev;
+ u32 i;
+
+ if (!chip || !chip->dev || !chip->dev->of_node)
+ return -EINVAL;
+
+ if (!chip->ops || !chip->ops->request || !chip->ops->release) {
+ dev_err(chip->dev, "Driver needs to provide ops\n");
+ return -EINVAL;
+ }
+
+ gdev = kzalloc(struct_size(gdev, ei, chip->nlines), GFP_KERNEL);
+ if (!gdev)
+ return -ENOMEM;
+
+ gdev->chip = chip;
+ chip->gdev = gdev;
+ gdev->nlines = chip->nlines;
+ gdev->sdev = chip->dev;
+
+ for (i = 0; i < chip->nlines; i++) {
+ gdev->ei[i].gdev = gdev;
+ mutex_init(&gdev->ei[i].req_mlock);
+ spin_lock_init(&gdev->ei[i].slock);
+ }
+
+ if (chip->dev->driver)
+ gdev->owner = chip->dev->driver->owner;
+ else
+ gdev->owner = THIS_MODULE;
+
+ of_node_get(chip->dev->of_node);
+
+ INIT_LIST_HEAD(&gdev->list);
+
+ spin_lock(&hte_lock);
+ list_add_tail(&gdev->list, &hte_devices);
+ spin_unlock(&hte_lock);
+
+ hte_chip_dbgfs_init(gdev);
+
+ dev_dbg(chip->dev, "Added hte chip\n");
+
+ return 0;
+}
+
+static int hte_unregister_chip(struct hte_chip *chip)
+{
+ struct hte_device *gdev;
+
+ if (!chip)
+ return -EINVAL;
+
+ gdev = chip->gdev;
+
+ spin_lock(&hte_lock);
+ list_del(&gdev->list);
+ spin_unlock(&hte_lock);
+
+ gdev->chip = NULL;
+
+ of_node_put(chip->dev->of_node);
+ debugfs_remove_recursive(gdev->dbg_root);
+ kfree(gdev);
+
+ dev_dbg(chip->dev, "Removed hte chip\n");
+
+ return 0;
+}
+
+static void _hte_devm_unregister_chip(void *chip)
+{
+ hte_unregister_chip(chip);
+}
+
+/**
+ * devm_hte_register_chip() - Resource managed API to register HTE chip.
+ *
+ * It is used by the provider to register itself with the HTE subsystem.
+ * The unregistration is done automatically when the provider exits.
+ *
+ * @chip: the HTE chip to add to subsystem.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int devm_hte_register_chip(struct hte_chip *chip)
+{
+ int err;
+
+ err = hte_register_chip(chip);
+ if (err)
+ return err;
+
+ err = devm_add_action_or_reset(chip->dev, _hte_devm_unregister_chip,
+ chip);
+ if (err)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devm_hte_register_chip);
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index dc5c35210c16..56f7e06c673e 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -1022,11 +1022,13 @@ void vmbus_close(struct vmbus_channel *channel)
EXPORT_SYMBOL_GPL(vmbus_close);
/**
- * vmbus_sendpacket() - Send the specified buffer on the given channel
+ * vmbus_sendpacket_getid() - Send the specified buffer on the given channel
* @channel: Pointer to vmbus_channel structure
* @buffer: Pointer to the buffer you want to send the data from.
* @bufferlen: Maximum size of what the buffer holds.
* @requestid: Identifier of the request
+ * @trans_id: Identifier of the transaction associated to this request, if
+ * the send is successful; undefined, otherwise.
* @type: Type of packet that is being sent e.g. negotiate, time
* packet etc.
* @flags: 0 or VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED
@@ -1036,8 +1038,8 @@ EXPORT_SYMBOL_GPL(vmbus_close);
*
* Mainly used by Hyper-V drivers.
*/
-int vmbus_sendpacket(struct vmbus_channel *channel, void *buffer,
- u32 bufferlen, u64 requestid,
+int vmbus_sendpacket_getid(struct vmbus_channel *channel, void *buffer,
+ u32 bufferlen, u64 requestid, u64 *trans_id,
enum vmbus_packet_type type, u32 flags)
{
struct vmpacket_descriptor desc;
@@ -1063,7 +1065,31 @@ int vmbus_sendpacket(struct vmbus_channel *channel, void *buffer,
bufferlist[2].iov_base = &aligned_data;
bufferlist[2].iov_len = (packetlen_aligned - packetlen);
- return hv_ringbuffer_write(channel, bufferlist, num_vecs, requestid);
+ return hv_ringbuffer_write(channel, bufferlist, num_vecs, requestid, trans_id);
+}
+EXPORT_SYMBOL(vmbus_sendpacket_getid);
+
+/**
+ * vmbus_sendpacket() - Send the specified buffer on the given channel
+ * @channel: Pointer to vmbus_channel structure
+ * @buffer: Pointer to the buffer you want to send the data from.
+ * @bufferlen: Maximum size of what the buffer holds.
+ * @requestid: Identifier of the request
+ * @type: Type of packet that is being sent e.g. negotiate, time
+ * packet etc.
+ * @flags: 0 or VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED
+ *
+ * Sends data in @buffer directly to Hyper-V via the vmbus.
+ * This will send the data unparsed to Hyper-V.
+ *
+ * Mainly used by Hyper-V drivers.
+ */
+int vmbus_sendpacket(struct vmbus_channel *channel, void *buffer,
+ u32 bufferlen, u64 requestid,
+ enum vmbus_packet_type type, u32 flags)
+{
+ return vmbus_sendpacket_getid(channel, buffer, bufferlen,
+ requestid, NULL, type, flags);
}
EXPORT_SYMBOL(vmbus_sendpacket);
@@ -1122,7 +1148,7 @@ int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
bufferlist[2].iov_base = &aligned_data;
bufferlist[2].iov_len = (packetlen_aligned - packetlen);
- return hv_ringbuffer_write(channel, bufferlist, 3, requestid);
+ return hv_ringbuffer_write(channel, bufferlist, 3, requestid, NULL);
}
EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer);
@@ -1160,7 +1186,7 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
bufferlist[2].iov_base = &aligned_data;
bufferlist[2].iov_len = (packetlen_aligned - packetlen);
- return hv_ringbuffer_write(channel, bufferlist, 3, requestid);
+ return hv_ringbuffer_write(channel, bufferlist, 3, requestid, NULL);
}
EXPORT_SYMBOL_GPL(vmbus_sendpacket_mpb_desc);
@@ -1226,12 +1252,12 @@ u64 vmbus_next_request_id(struct vmbus_channel *channel, u64 rqst_addr)
if (!channel->rqstor_size)
return VMBUS_NO_RQSTOR;
- spin_lock_irqsave(&rqstor->req_lock, flags);
+ lock_requestor(channel, flags);
current_id = rqstor->next_request_id;
/* Requestor array is full */
if (current_id >= rqstor->size) {
- spin_unlock_irqrestore(&rqstor->req_lock, flags);
+ unlock_requestor(channel, flags);
return VMBUS_RQST_ERROR;
}
@@ -1241,27 +1267,23 @@ u64 vmbus_next_request_id(struct vmbus_channel *channel, u64 rqst_addr)
/* The already held spin lock provides atomicity */
bitmap_set(rqstor->req_bitmap, current_id, 1);
- spin_unlock_irqrestore(&rqstor->req_lock, flags);
+ unlock_requestor(channel, flags);
/*
* Cannot return an ID of 0, which is reserved for an unsolicited
- * message from Hyper-V.
+ * message from Hyper-V; Hyper-V does not acknowledge (respond to)
+ * VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED requests with ID of
+ * 0 sent by the guest.
*/
return current_id + 1;
}
EXPORT_SYMBOL_GPL(vmbus_next_request_id);
-/*
- * vmbus_request_addr - Returns the memory address stored at @trans_id
- * in @rqstor. Uses a spin lock to avoid race conditions.
- * @channel: Pointer to the VMbus channel struct
- * @trans_id: Request id sent back from Hyper-V. Becomes the requestor's
- * next request id.
- */
-u64 vmbus_request_addr(struct vmbus_channel *channel, u64 trans_id)
+/* As in vmbus_request_addr_match() but without the requestor lock */
+u64 __vmbus_request_addr_match(struct vmbus_channel *channel, u64 trans_id,
+ u64 rqst_addr)
{
struct vmbus_requestor *rqstor = &channel->requestor;
- unsigned long flags;
u64 req_addr;
/* Check rqstor has been initialized */
@@ -1270,27 +1292,61 @@ u64 vmbus_request_addr(struct vmbus_channel *channel, u64 trans_id)
/* Hyper-V can send an unsolicited message with ID of 0 */
if (!trans_id)
- return trans_id;
-
- spin_lock_irqsave(&rqstor->req_lock, flags);
+ return VMBUS_RQST_ERROR;
/* Data corresponding to trans_id is stored at trans_id - 1 */
trans_id--;
/* Invalid trans_id */
- if (trans_id >= rqstor->size || !test_bit(trans_id, rqstor->req_bitmap)) {
- spin_unlock_irqrestore(&rqstor->req_lock, flags);
+ if (trans_id >= rqstor->size || !test_bit(trans_id, rqstor->req_bitmap))
return VMBUS_RQST_ERROR;
- }
req_addr = rqstor->req_arr[trans_id];
- rqstor->req_arr[trans_id] = rqstor->next_request_id;
- rqstor->next_request_id = trans_id;
+ if (rqst_addr == VMBUS_RQST_ADDR_ANY || req_addr == rqst_addr) {
+ rqstor->req_arr[trans_id] = rqstor->next_request_id;
+ rqstor->next_request_id = trans_id;
- /* The already held spin lock provides atomicity */
- bitmap_clear(rqstor->req_bitmap, trans_id, 1);
+ /* The already held spin lock provides atomicity */
+ bitmap_clear(rqstor->req_bitmap, trans_id, 1);
+ }
- spin_unlock_irqrestore(&rqstor->req_lock, flags);
return req_addr;
}
+EXPORT_SYMBOL_GPL(__vmbus_request_addr_match);
+
+/*
+ * vmbus_request_addr_match - Clears/removes @trans_id from the @channel's
+ * requestor, provided the memory address stored at @trans_id equals @rqst_addr
+ * (or provided @rqst_addr matches the sentinel value VMBUS_RQST_ADDR_ANY).
+ *
+ * Returns the memory address stored at @trans_id, or VMBUS_RQST_ERROR if
+ * @trans_id is not contained in the requestor.
+ *
+ * Acquires and releases the requestor spin lock.
+ */
+u64 vmbus_request_addr_match(struct vmbus_channel *channel, u64 trans_id,
+ u64 rqst_addr)
+{
+ unsigned long flags;
+ u64 req_addr;
+
+ lock_requestor(channel, flags);
+ req_addr = __vmbus_request_addr_match(channel, trans_id, rqst_addr);
+ unlock_requestor(channel, flags);
+
+ return req_addr;
+}
+EXPORT_SYMBOL_GPL(vmbus_request_addr_match);
+
+/*
+ * vmbus_request_addr - Returns the memory address stored at @trans_id
+ * in @rqstor. Uses a spin lock to avoid race conditions.
+ * @channel: Pointer to the VMbus channel struct
+ * @trans_id: Request id sent back from Hyper-V. Becomes the requestor's
+ * next request id.
+ */
+u64 vmbus_request_addr(struct vmbus_channel *channel, u64 trans_id)
+{
+ return vmbus_request_addr_match(channel, trans_id, VMBUS_RQST_ADDR_ANY);
+}
EXPORT_SYMBOL_GPL(vmbus_request_addr);
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 85a2142c9384..b60f13481bdc 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -152,6 +152,7 @@ static const struct {
{ HV_AVMA1_GUID },
{ HV_AVMA2_GUID },
{ HV_RDV_GUID },
+ { HV_IMC_GUID },
};
/*
@@ -442,7 +443,7 @@ void hv_process_channel_removal(struct vmbus_channel *channel)
/*
* Upon suspend, an in-use hv_sock channel is removed from the array of
* channels and the relid is invalidated. After hibernation, when the
- * user-space appplication destroys the channel, it's unnecessary and
+ * user-space application destroys the channel, it's unnecessary and
* unsafe to remove the channel from the array of channels. See also
* the inline comments before the call of vmbus_release_relid() below.
*/
@@ -713,15 +714,13 @@ static bool hv_cpuself_used(u32 cpu, struct vmbus_channel *chn)
static int next_numa_node_id;
/*
- * Starting with Win8, we can statically distribute the incoming
- * channel interrupt load by binding a channel to VCPU.
+ * We can statically distribute the incoming channel interrupt load
+ * by binding a channel to VCPU.
*
- * For pre-win8 hosts or non-performance critical channels we assign the
- * VMBUS_CONNECT_CPU.
- *
- * Starting with win8, performance critical channels will be distributed
- * evenly among all the available NUMA nodes. Once the node is assigned,
- * we will assign the CPU based on a simple round robin scheme.
+ * For non-performance critical channels we assign the VMBUS_CONNECT_CPU.
+ * Performance critical channels will be distributed evenly among all
+ * the available NUMA nodes. Once the node is assigned, we will assign
+ * the CPU based on a simple round robin scheme.
*/
static void init_vp_index(struct vmbus_channel *channel)
{
@@ -732,13 +731,10 @@ static void init_vp_index(struct vmbus_channel *channel)
u32 target_cpu;
int numa_node;
- if ((vmbus_proto_version == VERSION_WS2008) ||
- (vmbus_proto_version == VERSION_WIN7) || (!perf_chn) ||
+ if (!perf_chn ||
!alloc_cpumask_var(&available_mask, GFP_KERNEL)) {
/*
- * Prior to win8, all channel interrupts are
- * delivered on VMBUS_CONNECT_CPU.
- * Also if the channel is not a performance critical
+ * If the channel is not a performance critical
* channel, bind it to VMBUS_CONNECT_CPU.
* In case alloc_cpumask_var() fails, bind it to
* VMBUS_CONNECT_CPU.
@@ -931,11 +927,9 @@ static void vmbus_setup_channel_state(struct vmbus_channel *channel,
*/
channel->sig_event = VMBUS_EVENT_CONNECTION_ID;
- if (vmbus_proto_version != VERSION_WS2008) {
- channel->is_dedicated_interrupt =
- (offer->is_dedicated_interrupt != 0);
- channel->sig_event = offer->connection_id;
- }
+ channel->is_dedicated_interrupt =
+ (offer->is_dedicated_interrupt != 0);
+ channel->sig_event = offer->connection_id;
memcpy(&channel->offermsg, offer,
sizeof(struct vmbus_channel_offer_channel));
@@ -975,13 +969,17 @@ find_primary_channel_by_offer(const struct vmbus_channel_offer_channel *offer)
return channel;
}
-static bool vmbus_is_valid_device(const guid_t *guid)
+static bool vmbus_is_valid_offer(const struct vmbus_channel_offer_channel *offer)
{
+ const guid_t *guid = &offer->offer.if_type;
u16 i;
if (!hv_is_isolation_supported())
return true;
+ if (is_hvsock_offer(offer))
+ return true;
+
for (i = 0; i < ARRAY_SIZE(vmbus_devs); i++) {
if (guid_equal(guid, &vmbus_devs[i].guid))
return vmbus_devs[i].allowed_in_isolated;
@@ -1003,7 +1001,7 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
trace_vmbus_onoffer(offer);
- if (!vmbus_is_valid_device(&offer->offer.if_type)) {
+ if (!vmbus_is_valid_offer(offer)) {
pr_err_ratelimited("Invalid offer %d from the host supporting isolation\n",
offer->child_relid);
atomic_dec(&vmbus_connection.offer_in_progress);
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index a3d8be8d6cfb..6218bbf6863a 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -47,6 +47,8 @@ EXPORT_SYMBOL_GPL(vmbus_proto_version);
/*
* Table of VMBus versions listed from newest to oldest.
+ * VERSION_WIN7 and VERSION_WS2008 are no longer supported in
+ * Linux guests and are not listed.
*/
static __u32 vmbus_versions[] = {
VERSION_WIN10_V5_3,
@@ -56,9 +58,7 @@ static __u32 vmbus_versions[] = {
VERSION_WIN10_V4_1,
VERSION_WIN10,
VERSION_WIN8_1,
- VERSION_WIN8,
- VERSION_WIN7,
- VERSION_WS2008
+ VERSION_WIN8
};
/*
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index 3248b48f37f6..91e8a72eee14 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -1842,7 +1842,7 @@ static int balloon_probe(struct hv_device *dev,
ret = balloon_connect_vsp(dev);
if (ret != 0)
- return ret;
+ goto connect_error;
enable_page_reporting();
dm_device.state = DM_INITIALIZED;
@@ -1861,6 +1861,7 @@ probe_error:
dm_device.thread = NULL;
disable_page_reporting();
vmbus_close(dev->channel);
+connect_error:
#ifdef CONFIG_MEMORY_HOTPLUG
unregister_memory_notifier(&hv_memory_nb);
restore_online_page_callback(&hv_online_page);
@@ -1882,12 +1883,21 @@ static int balloon_remove(struct hv_device *dev)
cancel_work_sync(&dm->ha_wrk.wrk);
kthread_stop(dm->thread);
- disable_page_reporting();
- vmbus_close(dev->channel);
+
+ /*
+ * This is to handle the case when balloon_resume()
+ * call has failed and some cleanup has been done as
+ * a part of the error handling.
+ */
+ if (dm_device.state != DM_INIT_ERROR) {
+ disable_page_reporting();
+ vmbus_close(dev->channel);
#ifdef CONFIG_MEMORY_HOTPLUG
- unregister_memory_notifier(&hv_memory_nb);
- restore_online_page_callback(&hv_online_page);
+ unregister_memory_notifier(&hv_memory_nb);
+ restore_online_page_callback(&hv_online_page);
#endif
+ }
+
spin_lock_irqsave(&dm_device.ha_lock, flags);
list_for_each_entry_safe(has, tmp, &dm->ha_region_list, list) {
list_for_each_entry_safe(gap, tmp_gap, &has->gap_list, list) {
@@ -1948,6 +1958,7 @@ close_channel:
vmbus_close(dev->channel);
out:
dm_device.state = DM_INIT_ERROR;
+ disable_page_reporting();
#ifdef CONFIG_MEMORY_HOTPLUG
unregister_memory_notifier(&hv_memory_nb);
restore_online_page_callback(&hv_online_page);
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 6b45c22bb717..4f5b824b16cf 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -181,7 +181,7 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info);
int hv_ringbuffer_write(struct vmbus_channel *channel,
const struct kvec *kv_list, u32 kv_count,
- u64 requestid);
+ u64 requestid, u64 *trans_id);
int hv_ringbuffer_read(struct vmbus_channel *channel,
void *buffer, u32 buflen, u32 *buffer_actual_len,
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 3d215d9dec43..59a4aa86d1f3 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -283,7 +283,7 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
/* Write to the ring buffer. */
int hv_ringbuffer_write(struct vmbus_channel *channel,
const struct kvec *kv_list, u32 kv_count,
- u64 requestid)
+ u64 requestid, u64 *trans_id)
{
int i;
u32 bytes_avail_towrite;
@@ -294,7 +294,7 @@ int hv_ringbuffer_write(struct vmbus_channel *channel,
unsigned long flags;
struct hv_ring_buffer_info *outring_info = &channel->outbound;
struct vmpacket_descriptor *desc = kv_list[0].iov_base;
- u64 rqst_id = VMBUS_NO_RQSTOR;
+ u64 __trans_id, rqst_id = VMBUS_NO_RQSTOR;
if (channel->rescind)
return -ENODEV;
@@ -353,7 +353,15 @@ int hv_ringbuffer_write(struct vmbus_channel *channel,
}
}
desc = hv_get_ring_buffer(outring_info) + old_write;
- desc->trans_id = (rqst_id == VMBUS_NO_RQSTOR) ? requestid : rqst_id;
+ __trans_id = (rqst_id == VMBUS_NO_RQSTOR) ? requestid : rqst_id;
+ /*
+ * Ensure the compiler doesn't generate code that reads the value of
+ * the transaction ID from the ring buffer, which is shared with the
+ * Hyper-V host and subject to being changed at any time.
+ */
+ WRITE_ONCE(desc->trans_id, __trans_id);
+ if (trans_id)
+ *trans_id = __trans_id;
/* Set previous packet start */
prev_indices = hv_get_ring_bufferindices(outring_info);
@@ -421,7 +429,7 @@ int hv_ringbuffer_read(struct vmbus_channel *channel,
memcpy(buffer, (const char *)desc + offset, packetlen);
/* Advance ring index to next packet descriptor */
- __hv_pkt_iter_next(channel, desc, true);
+ __hv_pkt_iter_next(channel, desc);
/* Notify host of update */
hv_pkt_iter_close(channel);
@@ -457,22 +465,6 @@ static u32 hv_pkt_iter_avail(const struct hv_ring_buffer_info *rbi)
}
/*
- * Get first vmbus packet without copying it out of the ring buffer
- */
-struct vmpacket_descriptor *hv_pkt_iter_first_raw(struct vmbus_channel *channel)
-{
- struct hv_ring_buffer_info *rbi = &channel->inbound;
-
- hv_debug_delay_test(channel, MESSAGE_DELAY);
-
- if (hv_pkt_iter_avail(rbi) < sizeof(struct vmpacket_descriptor))
- return NULL;
-
- return (struct vmpacket_descriptor *)(hv_get_ring_buffer(rbi) + rbi->priv_read_index);
-}
-EXPORT_SYMBOL_GPL(hv_pkt_iter_first_raw);
-
-/*
* Get first vmbus packet from ring buffer after read_index
*
* If ring buffer is empty, returns NULL and no other action needed.
@@ -483,11 +475,14 @@ struct vmpacket_descriptor *hv_pkt_iter_first(struct vmbus_channel *channel)
struct vmpacket_descriptor *desc, *desc_copy;
u32 bytes_avail, pkt_len, pkt_offset;
- desc = hv_pkt_iter_first_raw(channel);
- if (!desc)
+ hv_debug_delay_test(channel, MESSAGE_DELAY);
+
+ bytes_avail = hv_pkt_iter_avail(rbi);
+ if (bytes_avail < sizeof(struct vmpacket_descriptor))
return NULL;
+ bytes_avail = min(rbi->pkt_buffer_size, bytes_avail);
- bytes_avail = min(rbi->pkt_buffer_size, hv_pkt_iter_avail(rbi));
+ desc = (struct vmpacket_descriptor *)(hv_get_ring_buffer(rbi) + rbi->priv_read_index);
/*
* Ensure the compiler does not use references to incoming Hyper-V values (which
@@ -534,8 +529,7 @@ EXPORT_SYMBOL_GPL(hv_pkt_iter_first);
*/
struct vmpacket_descriptor *
__hv_pkt_iter_next(struct vmbus_channel *channel,
- const struct vmpacket_descriptor *desc,
- bool copy)
+ const struct vmpacket_descriptor *desc)
{
struct hv_ring_buffer_info *rbi = &channel->inbound;
u32 packetlen = desc->len8 << 3;
@@ -548,7 +542,7 @@ __hv_pkt_iter_next(struct vmbus_channel *channel,
rbi->priv_read_index -= dsize;
/* more data? */
- return copy ? hv_pkt_iter_first(channel) : hv_pkt_iter_first_raw(channel);
+ return hv_pkt_iter_first(channel);
}
EXPORT_SYMBOL_GPL(__hv_pkt_iter_next);
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 14de17087864..714d549b7b46 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -575,31 +575,11 @@ static ssize_t driver_override_store(struct device *dev,
const char *buf, size_t count)
{
struct hv_device *hv_dev = device_to_hv_device(dev);
- char *driver_override, *old, *cp;
-
- /* We need to keep extra room for a newline */
- if (count >= (PAGE_SIZE - 1))
- return -EINVAL;
-
- driver_override = kstrndup(buf, count, GFP_KERNEL);
- if (!driver_override)
- return -ENOMEM;
-
- cp = strchr(driver_override, '\n');
- if (cp)
- *cp = '\0';
-
- device_lock(dev);
- old = hv_dev->driver_override;
- if (strlen(driver_override)) {
- hv_dev->driver_override = driver_override;
- } else {
- kfree(driver_override);
- hv_dev->driver_override = NULL;
- }
- device_unlock(dev);
+ int ret;
- kfree(old);
+ ret = driver_set_override(dev, &hv_dev->driver_override, buf, count);
+ if (ret)
+ return ret;
return count;
}
@@ -1263,23 +1243,17 @@ static void vmbus_chan_sched(struct hv_per_cpu_context *hv_cpu)
unsigned long *recv_int_page;
u32 maxbits, relid;
- if (vmbus_proto_version < VERSION_WIN8) {
- maxbits = MAX_NUM_CHANNELS_SUPPORTED;
- recv_int_page = vmbus_connection.recv_int_page;
- } else {
- /*
- * When the host is win8 and beyond, the event page
- * can be directly checked to get the id of the channel
- * that has the interrupt pending.
- */
- void *page_addr = hv_cpu->synic_event_page;
- union hv_synic_event_flags *event
- = (union hv_synic_event_flags *)page_addr +
- VMBUS_MESSAGE_SINT;
+ /*
+ * The event page can be directly checked to get the id of
+ * the channel that has the interrupt pending.
+ */
+ void *page_addr = hv_cpu->synic_event_page;
+ union hv_synic_event_flags *event
+ = (union hv_synic_event_flags *)page_addr +
+ VMBUS_MESSAGE_SINT;
- maxbits = HV_EVENT_FLAGS_COUNT;
- recv_int_page = event->flags;
- }
+ maxbits = HV_EVENT_FLAGS_COUNT;
+ recv_int_page = event->flags;
if (unlikely(!recv_int_page))
return;
@@ -1351,40 +1325,10 @@ static void vmbus_isr(void)
{
struct hv_per_cpu_context *hv_cpu
= this_cpu_ptr(hv_context.cpu_context);
- void *page_addr = hv_cpu->synic_event_page;
+ void *page_addr;
struct hv_message *msg;
- union hv_synic_event_flags *event;
- bool handled = false;
-
- if (unlikely(page_addr == NULL))
- return;
-
- event = (union hv_synic_event_flags *)page_addr +
- VMBUS_MESSAGE_SINT;
- /*
- * Check for events before checking for messages. This is the order
- * in which events and messages are checked in Windows guests on
- * Hyper-V, and the Windows team suggested we do the same.
- */
-
- if ((vmbus_proto_version == VERSION_WS2008) ||
- (vmbus_proto_version == VERSION_WIN7)) {
-
- /* Since we are a child, we only need to check bit 0 */
- if (sync_test_and_clear_bit(0, event->flags))
- handled = true;
- } else {
- /*
- * Our host is win8 or above. The signaling mechanism
- * has changed and we can directly look at the event page.
- * If bit n is set then we have an interrup on the channel
- * whose id is n.
- */
- handled = true;
- }
- if (handled)
- vmbus_chan_sched(hv_cpu);
+ vmbus_chan_sched(hv_cpu);
page_addr = hv_cpu->synic_message_page;
msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
diff --git a/drivers/hwtracing/coresight/coresight-core.c b/drivers/hwtracing/coresight/coresight-core.c
index af00dca8d1ac..ee6ce92ab4c3 100644
--- a/drivers/hwtracing/coresight/coresight-core.c
+++ b/drivers/hwtracing/coresight/coresight-core.c
@@ -1379,7 +1379,7 @@ static int coresight_fixup_device_conns(struct coresight_device *csdev)
continue;
conn->child_dev =
coresight_find_csdev_by_fwnode(conn->child_fwnode);
- if (conn->child_dev) {
+ if (conn->child_dev && conn->child_dev->has_conns_grp) {
ret = coresight_make_links(csdev, conn,
conn->child_dev);
if (ret)
@@ -1571,6 +1571,7 @@ struct coresight_device *coresight_register(struct coresight_desc *desc)
int nr_refcnts = 1;
atomic_t *refcnts = NULL;
struct coresight_device *csdev;
+ bool registered = false;
csdev = kzalloc(sizeof(*csdev), GFP_KERNEL);
if (!csdev) {
@@ -1591,7 +1592,8 @@ struct coresight_device *coresight_register(struct coresight_desc *desc)
refcnts = kcalloc(nr_refcnts, sizeof(*refcnts), GFP_KERNEL);
if (!refcnts) {
ret = -ENOMEM;
- goto err_free_csdev;
+ kfree(csdev);
+ goto err_out;
}
csdev->refcnt = refcnts;
@@ -1616,6 +1618,13 @@ struct coresight_device *coresight_register(struct coresight_desc *desc)
csdev->dev.fwnode = fwnode_handle_get(dev_fwnode(desc->dev));
dev_set_name(&csdev->dev, "%s", desc->name);
+ /*
+ * Make sure the device registration and the connection fixup
+ * are synchronised, so that we don't see uninitialised devices
+ * on the coresight bus while trying to resolve the connections.
+ */
+ mutex_lock(&coresight_mutex);
+
ret = device_register(&csdev->dev);
if (ret) {
put_device(&csdev->dev);
@@ -1623,7 +1632,7 @@ struct coresight_device *coresight_register(struct coresight_desc *desc)
* All resources are free'd explicitly via
* coresight_device_release(), triggered from put_device().
*/
- goto err_out;
+ goto out_unlock;
}
if (csdev->type == CORESIGHT_DEV_TYPE_SINK ||
@@ -1638,11 +1647,11 @@ struct coresight_device *coresight_register(struct coresight_desc *desc)
* from put_device(), which is in turn called from
* function device_unregister().
*/
- goto err_out;
+ goto out_unlock;
}
}
-
- mutex_lock(&coresight_mutex);
+ /* Device is now registered */
+ registered = true;
ret = coresight_create_conns_sysfs_group(csdev);
if (!ret)
@@ -1652,16 +1661,18 @@ struct coresight_device *coresight_register(struct coresight_desc *desc)
if (!ret && cti_assoc_ops && cti_assoc_ops->add)
cti_assoc_ops->add(csdev);
+out_unlock:
mutex_unlock(&coresight_mutex);
- if (ret) {
+ /* Success */
+ if (!ret)
+ return csdev;
+
+ /* Unregister the device if needed */
+ if (registered) {
coresight_unregister(csdev);
return ERR_PTR(ret);
}
- return csdev;
-
-err_free_csdev:
- kfree(csdev);
err_out:
/* Cleanup the connection information */
coresight_release_platform_data(NULL, desc->pdata);
diff --git a/drivers/hwtracing/coresight/coresight-cpu-debug.c b/drivers/hwtracing/coresight/coresight-cpu-debug.c
index 8845ec4b4402..1874df7c6a73 100644
--- a/drivers/hwtracing/coresight/coresight-cpu-debug.c
+++ b/drivers/hwtracing/coresight/coresight-cpu-debug.c
@@ -380,9 +380,10 @@ static int debug_notifier_call(struct notifier_block *self,
int cpu;
struct debug_drvdata *drvdata;
- mutex_lock(&debug_lock);
+ /* Bail out if we can't acquire the mutex or the functionality is off */
+ if (!mutex_trylock(&debug_lock))
+ return NOTIFY_DONE;
- /* Bail out if the functionality is disabled */
if (!debug_enable)
goto skip_dump;
@@ -401,7 +402,7 @@ static int debug_notifier_call(struct notifier_block *self,
skip_dump:
mutex_unlock(&debug_lock);
- return 0;
+ return NOTIFY_DONE;
}
static struct notifier_block debug_notifier = {
diff --git a/drivers/hwtracing/coresight/coresight-etm3x-core.c b/drivers/hwtracing/coresight/coresight-etm3x-core.c
index 7d413ba8b823..d0ab9933472b 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x-core.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x-core.c
@@ -204,7 +204,7 @@ void etm_set_default(struct etm_config *config)
* set all bits in register 0x007, the ETMTECR2, to 0
* set register 0x008, the ETMTEEVR, to 0x6F (TRUE).
*/
- config->enable_ctrl1 = BIT(24);
+ config->enable_ctrl1 = ETMTECR1_INC_EXC;
config->enable_ctrl2 = 0x0;
config->enable_event = ETM_HARD_WIRE_RES_A;
diff --git a/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
index e8c7649f123e..68fcbf4ce7a8 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
@@ -474,7 +474,7 @@ static ssize_t addr_start_store(struct device *dev,
config->addr_val[idx] = val;
config->addr_type[idx] = ETM_ADDR_TYPE_START;
config->startstop_ctrl |= (1 << idx);
- config->enable_ctrl1 |= BIT(25);
+ config->enable_ctrl1 |= ETMTECR1_START_STOP;
spin_unlock(&drvdata->spinlock);
return size;
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c
index 7f416a12000e..87299e99dabb 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-core.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c
@@ -443,7 +443,7 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
for (i = 0; i < drvdata->nr_ss_cmp; i++) {
/* always clear status bit on restart if using single-shot */
if (config->ss_ctrl[i] || config->ss_pe_cmp[i])
- config->ss_status[i] &= ~BIT(31);
+ config->ss_status[i] &= ~TRCSSCSRn_STATUS;
etm4x_relaxed_write32(csa, config->ss_ctrl[i], TRCSSCCRn(i));
etm4x_relaxed_write32(csa, config->ss_status[i], TRCSSCSRn(i));
if (etm4x_sspcicrn_present(drvdata, i))
@@ -633,7 +633,7 @@ static int etm4_parse_event_config(struct coresight_device *csdev,
/* Go from generic option to ETMv4 specifics */
if (attr->config & BIT(ETM_OPT_CYCACC)) {
- config->cfg |= BIT(4);
+ config->cfg |= TRCCONFIGR_CCI;
/* TRM: Must program this for cycacc to work */
config->ccctlr = ETM_CYC_THRESHOLD_DEFAULT;
}
@@ -653,14 +653,14 @@ static int etm4_parse_event_config(struct coresight_device *csdev,
goto out;
/* bit[11], Global timestamp tracing bit */
- config->cfg |= BIT(11);
+ config->cfg |= TRCCONFIGR_TS;
}
/* Only trace contextID when runs in root PID namespace */
if ((attr->config & BIT(ETM_OPT_CTXTID)) &&
task_is_in_init_pid_ns(current))
/* bit[6], Context ID tracing bit */
- config->cfg |= BIT(ETM4_CFG_BIT_CTXTID);
+ config->cfg |= TRCCONFIGR_CID;
/*
* If set bit ETM_OPT_CTXTID2 in perf config, this asks to trace VMID
@@ -672,17 +672,15 @@ static int etm4_parse_event_config(struct coresight_device *csdev,
ret = -EINVAL;
goto out;
}
-
/* Only trace virtual contextID when runs in root PID namespace */
if (task_is_in_init_pid_ns(current))
- config->cfg |= BIT(ETM4_CFG_BIT_VMID) |
- BIT(ETM4_CFG_BIT_VMID_OPT);
+ config->cfg |= TRCCONFIGR_VMID | TRCCONFIGR_VMIDOPT;
}
/* return stack - enable if selected and supported */
if ((attr->config & BIT(ETM_OPT_RETSTK)) && drvdata->retstack)
/* bit[12], Return stack enable bit */
- config->cfg |= BIT(12);
+ config->cfg |= TRCCONFIGR_RS;
/*
* Set any selected configuration and preset.
@@ -1097,107 +1095,67 @@ static void etm4_init_arch_data(void *info)
etmidr0 = etm4x_relaxed_read32(csa, TRCIDR0);
/* INSTP0, bits[2:1] P0 tracing support field */
- if (BMVAL(etmidr0, 1, 2) == 0b11)
- drvdata->instrp0 = true;
- else
- drvdata->instrp0 = false;
-
+ drvdata->instrp0 = !!(FIELD_GET(TRCIDR0_INSTP0_MASK, etmidr0) == 0b11);
/* TRCBB, bit[5] Branch broadcast tracing support bit */
- if (BMVAL(etmidr0, 5, 5))
- drvdata->trcbb = true;
- else
- drvdata->trcbb = false;
-
+ drvdata->trcbb = !!(etmidr0 & TRCIDR0_TRCBB);
/* TRCCOND, bit[6] Conditional instruction tracing support bit */
- if (BMVAL(etmidr0, 6, 6))
- drvdata->trccond = true;
- else
- drvdata->trccond = false;
-
+ drvdata->trccond = !!(etmidr0 & TRCIDR0_TRCCOND);
/* TRCCCI, bit[7] Cycle counting instruction bit */
- if (BMVAL(etmidr0, 7, 7))
- drvdata->trccci = true;
- else
- drvdata->trccci = false;
-
+ drvdata->trccci = !!(etmidr0 & TRCIDR0_TRCCCI);
/* RETSTACK, bit[9] Return stack bit */
- if (BMVAL(etmidr0, 9, 9))
- drvdata->retstack = true;
- else
- drvdata->retstack = false;
-
+ drvdata->retstack = !!(etmidr0 & TRCIDR0_RETSTACK);
/* NUMEVENT, bits[11:10] Number of events field */
- drvdata->nr_event = BMVAL(etmidr0, 10, 11);
+ drvdata->nr_event = FIELD_GET(TRCIDR0_NUMEVENT_MASK, etmidr0);
/* QSUPP, bits[16:15] Q element support field */
- drvdata->q_support = BMVAL(etmidr0, 15, 16);
+ drvdata->q_support = FIELD_GET(TRCIDR0_QSUPP_MASK, etmidr0);
/* TSSIZE, bits[28:24] Global timestamp size field */
- drvdata->ts_size = BMVAL(etmidr0, 24, 28);
+ drvdata->ts_size = FIELD_GET(TRCIDR0_TSSIZE_MASK, etmidr0);
/* maximum size of resources */
etmidr2 = etm4x_relaxed_read32(csa, TRCIDR2);
/* CIDSIZE, bits[9:5] Indicates the Context ID size */
- drvdata->ctxid_size = BMVAL(etmidr2, 5, 9);
+ drvdata->ctxid_size = FIELD_GET(TRCIDR2_CIDSIZE_MASK, etmidr2);
/* VMIDSIZE, bits[14:10] Indicates the VMID size */
- drvdata->vmid_size = BMVAL(etmidr2, 10, 14);
+ drvdata->vmid_size = FIELD_GET(TRCIDR2_VMIDSIZE_MASK, etmidr2);
/* CCSIZE, bits[28:25] size of the cycle counter in bits minus 12 */
- drvdata->ccsize = BMVAL(etmidr2, 25, 28);
+ drvdata->ccsize = FIELD_GET(TRCIDR2_CCSIZE_MASK, etmidr2);
etmidr3 = etm4x_relaxed_read32(csa, TRCIDR3);
/* CCITMIN, bits[11:0] minimum threshold value that can be programmed */
- drvdata->ccitmin = BMVAL(etmidr3, 0, 11);
+ drvdata->ccitmin = FIELD_GET(TRCIDR3_CCITMIN_MASK, etmidr3);
/* EXLEVEL_S, bits[19:16] Secure state instruction tracing */
- drvdata->s_ex_level = BMVAL(etmidr3, 16, 19);
+ drvdata->s_ex_level = FIELD_GET(TRCIDR3_EXLEVEL_S_MASK, etmidr3);
drvdata->config.s_ex_level = drvdata->s_ex_level;
/* EXLEVEL_NS, bits[23:20] Non-secure state instruction tracing */
- drvdata->ns_ex_level = BMVAL(etmidr3, 20, 23);
-
+ drvdata->ns_ex_level = FIELD_GET(TRCIDR3_EXLEVEL_NS_MASK, etmidr3);
/*
* TRCERR, bit[24] whether a trace unit can trace a
* system error exception.
*/
- if (BMVAL(etmidr3, 24, 24))
- drvdata->trc_error = true;
- else
- drvdata->trc_error = false;
-
+ drvdata->trc_error = !!(etmidr3 & TRCIDR3_TRCERR);
/* SYNCPR, bit[25] implementation has a fixed synchronization period? */
- if (BMVAL(etmidr3, 25, 25))
- drvdata->syncpr = true;
- else
- drvdata->syncpr = false;
-
+ drvdata->syncpr = !!(etmidr3 & TRCIDR3_SYNCPR);
/* STALLCTL, bit[26] is stall control implemented? */
- if (BMVAL(etmidr3, 26, 26))
- drvdata->stallctl = true;
- else
- drvdata->stallctl = false;
-
+ drvdata->stallctl = !!(etmidr3 & TRCIDR3_STALLCTL);
/* SYSSTALL, bit[27] implementation can support stall control? */
- if (BMVAL(etmidr3, 27, 27))
- drvdata->sysstall = true;
- else
- drvdata->sysstall = false;
-
+ drvdata->sysstall = !!(etmidr3 & TRCIDR3_SYSSTALL);
/*
* NUMPROC - the number of PEs available for tracing, 5bits
* = TRCIDR3.bits[13:12]bits[30:28]
* bits[4:3] = TRCIDR3.bits[13:12] (since etm-v4.2, otherwise RES0)
* bits[3:0] = TRCIDR3.bits[30:28]
*/
- drvdata->nr_pe = (BMVAL(etmidr3, 12, 13) << 3) | BMVAL(etmidr3, 28, 30);
-
+ drvdata->nr_pe = (FIELD_GET(TRCIDR3_NUMPROC_HI_MASK, etmidr3) << 3) |
+ FIELD_GET(TRCIDR3_NUMPROC_LO_MASK, etmidr3);
/* NOOVERFLOW, bit[31] is trace overflow prevention supported */
- if (BMVAL(etmidr3, 31, 31))
- drvdata->nooverflow = true;
- else
- drvdata->nooverflow = false;
+ drvdata->nooverflow = !!(etmidr3 & TRCIDR3_NOOVERFLOW);
/* number of resources trace unit supports */
etmidr4 = etm4x_relaxed_read32(csa, TRCIDR4);
/* NUMACPAIRS, bits[0:3] number of addr comparator pairs for tracing */
- drvdata->nr_addr_cmp = BMVAL(etmidr4, 0, 3);
+ drvdata->nr_addr_cmp = FIELD_GET(TRCIDR4_NUMACPAIRS_MASK, etmidr4);
/* NUMPC, bits[15:12] number of PE comparator inputs for tracing */
- drvdata->nr_pe_cmp = BMVAL(etmidr4, 12, 15);
+ drvdata->nr_pe_cmp = FIELD_GET(TRCIDR4_NUMPC_MASK, etmidr4);
/*
* NUMRSPAIR, bits[19:16]
* The number of resource pairs conveyed by the HW starts at 0, i.e a
@@ -1208,7 +1166,7 @@ static void etm4_init_arch_data(void *info)
* the default TRUE and FALSE resource selectors are omitted.
* Otherwise for values 0x1 and above the number is N + 1 as per v4.2.
*/
- drvdata->nr_resource = BMVAL(etmidr4, 16, 19);
+ drvdata->nr_resource = FIELD_GET(TRCIDR4_NUMRSPAIR_MASK, etmidr4);
if ((drvdata->arch < ETM_ARCH_V4_3) || (drvdata->nr_resource > 0))
drvdata->nr_resource += 1;
/*
@@ -1216,45 +1174,39 @@ static void etm4_init_arch_data(void *info)
* comparator control for tracing. Read any status regs as these
* also contain RO capability data.
*/
- drvdata->nr_ss_cmp = BMVAL(etmidr4, 20, 23);
+ drvdata->nr_ss_cmp = FIELD_GET(TRCIDR4_NUMSSCC_MASK, etmidr4);
for (i = 0; i < drvdata->nr_ss_cmp; i++) {
drvdata->config.ss_status[i] =
etm4x_relaxed_read32(csa, TRCSSCSRn(i));
}
/* NUMCIDC, bits[27:24] number of Context ID comparators for tracing */
- drvdata->numcidc = BMVAL(etmidr4, 24, 27);
+ drvdata->numcidc = FIELD_GET(TRCIDR4_NUMCIDC_MASK, etmidr4);
/* NUMVMIDC, bits[31:28] number of VMID comparators for tracing */
- drvdata->numvmidc = BMVAL(etmidr4, 28, 31);
+ drvdata->numvmidc = FIELD_GET(TRCIDR4_NUMVMIDC_MASK, etmidr4);
etmidr5 = etm4x_relaxed_read32(csa, TRCIDR5);
/* NUMEXTIN, bits[8:0] number of external inputs implemented */
- drvdata->nr_ext_inp = BMVAL(etmidr5, 0, 8);
+ drvdata->nr_ext_inp = FIELD_GET(TRCIDR5_NUMEXTIN_MASK, etmidr5);
/* TRACEIDSIZE, bits[21:16] indicates the trace ID width */
- drvdata->trcid_size = BMVAL(etmidr5, 16, 21);
+ drvdata->trcid_size = FIELD_GET(TRCIDR5_TRACEIDSIZE_MASK, etmidr5);
/* ATBTRIG, bit[22] implementation can support ATB triggers? */
- if (BMVAL(etmidr5, 22, 22))
- drvdata->atbtrig = true;
- else
- drvdata->atbtrig = false;
+ drvdata->atbtrig = !!(etmidr5 & TRCIDR5_ATBTRIG);
/*
* LPOVERRIDE, bit[23] implementation supports
* low-power state override
*/
- if (BMVAL(etmidr5, 23, 23) && (!drvdata->skip_power_up))
- drvdata->lpoverride = true;
- else
- drvdata->lpoverride = false;
+ drvdata->lpoverride = (etmidr5 & TRCIDR5_LPOVERRIDE) && (!drvdata->skip_power_up);
/* NUMSEQSTATE, bits[27:25] number of sequencer states implemented */
- drvdata->nrseqstate = BMVAL(etmidr5, 25, 27);
+ drvdata->nrseqstate = FIELD_GET(TRCIDR5_NUMSEQSTATE_MASK, etmidr5);
/* NUMCNTR, bits[30:28] number of counters available for tracing */
- drvdata->nr_cntr = BMVAL(etmidr5, 28, 30);
+ drvdata->nr_cntr = FIELD_GET(TRCIDR5_NUMCNTR_MASK, etmidr5);
etm4_cs_lock(drvdata, csa);
cpu_detect_trace_filtering(drvdata);
}
static inline u32 etm4_get_victlr_access_type(struct etmv4_config *config)
{
- return etm4_get_access_type(config) << TRCVICTLR_EXLEVEL_SHIFT;
+ return etm4_get_access_type(config) << __bf_shf(TRCVICTLR_EXLEVEL_MASK);
}
/* Set ELx trace filter access in the TRCVICTLR register */
@@ -1280,7 +1232,7 @@ static void etm4_set_default_config(struct etmv4_config *config)
config->ts_ctrl = 0x0;
/* TRCVICTLR::EVENT = 0x01, select the always on logic */
- config->vinst_ctrl = BIT(0);
+ config->vinst_ctrl = FIELD_PREP(TRCVICTLR_EVENT_MASK, 0x01);
/* TRCVICTLR::EXLEVEL_NS:EXLEVELS: Set kernel / user filtering */
etm4_set_victlr_access(config);
@@ -1389,7 +1341,7 @@ static void etm4_set_default_filter(struct etmv4_config *config)
* TRCVICTLR::SSSTATUS == 1, the start-stop logic is
* in the started state
*/
- config->vinst_ctrl |= BIT(9);
+ config->vinst_ctrl |= TRCVICTLR_SSSTATUS;
config->mode |= ETM_MODE_VIEWINST_STARTSTOP;
/* No start-stop filtering for ViewInst */
@@ -1493,7 +1445,7 @@ static int etm4_set_event_filters(struct etmv4_drvdata *drvdata,
* TRCVICTLR::SSSTATUS == 1, the start-stop logic is
* in the started state
*/
- config->vinst_ctrl |= BIT(9);
+ config->vinst_ctrl |= TRCVICTLR_SSSTATUS;
/* No start-stop filtering for ViewInst */
config->vissctlr = 0x0;
@@ -1521,7 +1473,7 @@ static int etm4_set_event_filters(struct etmv4_drvdata *drvdata,
* etm4_disable_perf().
*/
if (filters->ssstatus)
- config->vinst_ctrl |= BIT(9);
+ config->vinst_ctrl |= TRCVICTLR_SSSTATUS;
/* No include/exclude filtering for ViewInst */
config->viiectlr = 0x0;
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
index 21687cc1e4e2..6ea8181816fc 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
@@ -22,7 +22,7 @@ static int etm4_set_mode_exclude(struct etmv4_drvdata *drvdata, bool exclude)
* TRCACATRn.TYPE bit[1:0]: type of comparison
* the trace unit performs
*/
- if (BMVAL(config->addr_acc[idx], 0, 1) == ETM_INSTR_ADDR) {
+ if (FIELD_GET(TRCACATRn_TYPE_MASK, config->addr_acc[idx]) == TRCACATRn_TYPE_ADDR) {
if (idx % 2 != 0)
return -EINVAL;
@@ -180,12 +180,12 @@ static ssize_t reset_store(struct device *dev,
/* Disable data tracing: do not trace load and store data transfers */
config->mode &= ~(ETM_MODE_LOAD | ETM_MODE_STORE);
- config->cfg &= ~(BIT(1) | BIT(2));
+ config->cfg &= ~(TRCCONFIGR_INSTP0_LOAD | TRCCONFIGR_INSTP0_STORE);
/* Disable data value and data address tracing */
config->mode &= ~(ETM_MODE_DATA_TRACE_ADDR |
ETM_MODE_DATA_TRACE_VAL);
- config->cfg &= ~(BIT(16) | BIT(17));
+ config->cfg &= ~(TRCCONFIGR_DA | TRCCONFIGR_DV);
/* Disable all events tracing */
config->eventctrl0 = 0x0;
@@ -206,11 +206,11 @@ static ssize_t reset_store(struct device *dev,
* started state. ARM recommends start-stop logic is set before
* each trace run.
*/
- config->vinst_ctrl = BIT(0);
+ config->vinst_ctrl = FIELD_PREP(TRCVICTLR_EVENT_MASK, 0x01);
if (drvdata->nr_addr_cmp > 0) {
config->mode |= ETM_MODE_VIEWINST_STARTSTOP;
/* SSSTATUS, bit[9] */
- config->vinst_ctrl |= BIT(9);
+ config->vinst_ctrl |= TRCVICTLR_SSSTATUS;
}
/* No address range filtering for ViewInst */
@@ -304,134 +304,134 @@ static ssize_t mode_store(struct device *dev,
if (drvdata->instrp0 == true) {
/* start by clearing instruction P0 field */
- config->cfg &= ~(BIT(1) | BIT(2));
+ config->cfg &= ~TRCCONFIGR_INSTP0_LOAD_STORE;
if (config->mode & ETM_MODE_LOAD)
/* 0b01 Trace load instructions as P0 instructions */
- config->cfg |= BIT(1);
+ config->cfg |= TRCCONFIGR_INSTP0_LOAD;
if (config->mode & ETM_MODE_STORE)
/* 0b10 Trace store instructions as P0 instructions */
- config->cfg |= BIT(2);
+ config->cfg |= TRCCONFIGR_INSTP0_STORE;
if (config->mode & ETM_MODE_LOAD_STORE)
/*
* 0b11 Trace load and store instructions
* as P0 instructions
*/
- config->cfg |= BIT(1) | BIT(2);
+ config->cfg |= TRCCONFIGR_INSTP0_LOAD_STORE;
}
/* bit[3], Branch broadcast mode */
if ((config->mode & ETM_MODE_BB) && (drvdata->trcbb == true))
- config->cfg |= BIT(3);
+ config->cfg |= TRCCONFIGR_BB;
else
- config->cfg &= ~BIT(3);
+ config->cfg &= ~TRCCONFIGR_BB;
/* bit[4], Cycle counting instruction trace bit */
if ((config->mode & ETMv4_MODE_CYCACC) &&
(drvdata->trccci == true))
- config->cfg |= BIT(4);
+ config->cfg |= TRCCONFIGR_CCI;
else
- config->cfg &= ~BIT(4);
+ config->cfg &= ~TRCCONFIGR_CCI;
/* bit[6], Context ID tracing bit */
if ((config->mode & ETMv4_MODE_CTXID) && (drvdata->ctxid_size))
- config->cfg |= BIT(6);
+ config->cfg |= TRCCONFIGR_CID;
else
- config->cfg &= ~BIT(6);
+ config->cfg &= ~TRCCONFIGR_CID;
if ((config->mode & ETM_MODE_VMID) && (drvdata->vmid_size))
- config->cfg |= BIT(7);
+ config->cfg |= TRCCONFIGR_VMID;
else
- config->cfg &= ~BIT(7);
+ config->cfg &= ~TRCCONFIGR_VMID;
/* bits[10:8], Conditional instruction tracing bit */
mode = ETM_MODE_COND(config->mode);
if (drvdata->trccond == true) {
- config->cfg &= ~(BIT(8) | BIT(9) | BIT(10));
- config->cfg |= mode << 8;
+ config->cfg &= ~TRCCONFIGR_COND_MASK;
+ config->cfg |= mode << __bf_shf(TRCCONFIGR_COND_MASK);
}
/* bit[11], Global timestamp tracing bit */
if ((config->mode & ETMv4_MODE_TIMESTAMP) && (drvdata->ts_size))
- config->cfg |= BIT(11);
+ config->cfg |= TRCCONFIGR_TS;
else
- config->cfg &= ~BIT(11);
+ config->cfg &= ~TRCCONFIGR_TS;
/* bit[12], Return stack enable bit */
if ((config->mode & ETM_MODE_RETURNSTACK) &&
(drvdata->retstack == true))
- config->cfg |= BIT(12);
+ config->cfg |= TRCCONFIGR_RS;
else
- config->cfg &= ~BIT(12);
+ config->cfg &= ~TRCCONFIGR_RS;
/* bits[14:13], Q element enable field */
mode = ETM_MODE_QELEM(config->mode);
/* start by clearing QE bits */
- config->cfg &= ~(BIT(13) | BIT(14));
+ config->cfg &= ~(TRCCONFIGR_QE_W_COUNTS | TRCCONFIGR_QE_WO_COUNTS);
/*
* if supported, Q elements with instruction counts are enabled.
* Always set the low bit for any requested mode. Valid combos are
* 0b00, 0b01 and 0b11.
*/
if (mode && drvdata->q_support)
- config->cfg |= BIT(13);
+ config->cfg |= TRCCONFIGR_QE_W_COUNTS;
/*
* if supported, Q elements with and without instruction
* counts are enabled
*/
if ((mode & BIT(1)) && (drvdata->q_support & BIT(1)))
- config->cfg |= BIT(14);
+ config->cfg |= TRCCONFIGR_QE_WO_COUNTS;
/* bit[11], AMBA Trace Bus (ATB) trigger enable bit */
if ((config->mode & ETM_MODE_ATB_TRIGGER) &&
(drvdata->atbtrig == true))
- config->eventctrl1 |= BIT(11);
+ config->eventctrl1 |= TRCEVENTCTL1R_ATB;
else
- config->eventctrl1 &= ~BIT(11);
+ config->eventctrl1 &= ~TRCEVENTCTL1R_ATB;
/* bit[12], Low-power state behavior override bit */
if ((config->mode & ETM_MODE_LPOVERRIDE) &&
(drvdata->lpoverride == true))
- config->eventctrl1 |= BIT(12);
+ config->eventctrl1 |= TRCEVENTCTL1R_LPOVERRIDE;
else
- config->eventctrl1 &= ~BIT(12);
+ config->eventctrl1 &= ~TRCEVENTCTL1R_LPOVERRIDE;
/* bit[8], Instruction stall bit */
if ((config->mode & ETM_MODE_ISTALL_EN) && (drvdata->stallctl == true))
- config->stall_ctrl |= BIT(8);
+ config->stall_ctrl |= TRCSTALLCTLR_ISTALL;
else
- config->stall_ctrl &= ~BIT(8);
+ config->stall_ctrl &= ~TRCSTALLCTLR_ISTALL;
/* bit[10], Prioritize instruction trace bit */
if (config->mode & ETM_MODE_INSTPRIO)
- config->stall_ctrl |= BIT(10);
+ config->stall_ctrl |= TRCSTALLCTLR_INSTPRIORITY;
else
- config->stall_ctrl &= ~BIT(10);
+ config->stall_ctrl &= ~TRCSTALLCTLR_INSTPRIORITY;
/* bit[13], Trace overflow prevention bit */
if ((config->mode & ETM_MODE_NOOVERFLOW) &&
(drvdata->nooverflow == true))
- config->stall_ctrl |= BIT(13);
+ config->stall_ctrl |= TRCSTALLCTLR_NOOVERFLOW;
else
- config->stall_ctrl &= ~BIT(13);
+ config->stall_ctrl &= ~TRCSTALLCTLR_NOOVERFLOW;
/* bit[9] Start/stop logic control bit */
if (config->mode & ETM_MODE_VIEWINST_STARTSTOP)
- config->vinst_ctrl |= BIT(9);
+ config->vinst_ctrl |= TRCVICTLR_SSSTATUS;
else
- config->vinst_ctrl &= ~BIT(9);
+ config->vinst_ctrl &= ~TRCVICTLR_SSSTATUS;
/* bit[10], Whether a trace unit must trace a Reset exception */
if (config->mode & ETM_MODE_TRACE_RESET)
- config->vinst_ctrl |= BIT(10);
+ config->vinst_ctrl |= TRCVICTLR_TRCRESET;
else
- config->vinst_ctrl &= ~BIT(10);
+ config->vinst_ctrl &= ~TRCVICTLR_TRCRESET;
/* bit[11], Whether a trace unit must trace a system error exception */
if ((config->mode & ETM_MODE_TRACE_ERR) &&
(drvdata->trc_error == true))
- config->vinst_ctrl |= BIT(11);
+ config->vinst_ctrl |= TRCVICTLR_TRCERR;
else
- config->vinst_ctrl &= ~BIT(11);
+ config->vinst_ctrl &= ~TRCVICTLR_TRCERR;
if (config->mode & (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER))
etm4_config_trace_mode(config);
@@ -534,7 +534,7 @@ static ssize_t event_instren_show(struct device *dev,
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etmv4_config *config = &drvdata->config;
- val = BMVAL(config->eventctrl1, 0, 3);
+ val = FIELD_GET(TRCEVENTCTL1R_INSTEN_MASK, config->eventctrl1);
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
@@ -551,23 +551,28 @@ static ssize_t event_instren_store(struct device *dev,
spin_lock(&drvdata->spinlock);
/* start by clearing all instruction event enable bits */
- config->eventctrl1 &= ~(BIT(0) | BIT(1) | BIT(2) | BIT(3));
+ config->eventctrl1 &= ~TRCEVENTCTL1R_INSTEN_MASK;
switch (drvdata->nr_event) {
case 0x0:
/* generate Event element for event 1 */
- config->eventctrl1 |= val & BIT(1);
+ config->eventctrl1 |= val & TRCEVENTCTL1R_INSTEN_1;
break;
case 0x1:
/* generate Event element for event 1 and 2 */
- config->eventctrl1 |= val & (BIT(0) | BIT(1));
+ config->eventctrl1 |= val & (TRCEVENTCTL1R_INSTEN_0 | TRCEVENTCTL1R_INSTEN_1);
break;
case 0x2:
/* generate Event element for event 1, 2 and 3 */
- config->eventctrl1 |= val & (BIT(0) | BIT(1) | BIT(2));
+ config->eventctrl1 |= val & (TRCEVENTCTL1R_INSTEN_0 |
+ TRCEVENTCTL1R_INSTEN_1 |
+ TRCEVENTCTL1R_INSTEN_2);
break;
case 0x3:
/* generate Event element for all 4 events */
- config->eventctrl1 |= val & 0xF;
+ config->eventctrl1 |= val & (TRCEVENTCTL1R_INSTEN_0 |
+ TRCEVENTCTL1R_INSTEN_1 |
+ TRCEVENTCTL1R_INSTEN_2 |
+ TRCEVENTCTL1R_INSTEN_3);
break;
default:
break;
@@ -702,10 +707,10 @@ static ssize_t bb_ctrl_store(struct device *dev,
* individual range comparators. If include then at least 1
* range must be selected.
*/
- if ((val & BIT(8)) && (BMVAL(val, 0, 7) == 0))
+ if ((val & TRCBBCTLR_MODE) && (FIELD_GET(TRCBBCTLR_RANGE_MASK, val) == 0))
return -EINVAL;
- config->bb_ctrl = val & GENMASK(8, 0);
+ config->bb_ctrl = val & (TRCBBCTLR_MODE | TRCBBCTLR_RANGE_MASK);
return size;
}
static DEVICE_ATTR_RW(bb_ctrl);
@@ -718,7 +723,7 @@ static ssize_t event_vinst_show(struct device *dev,
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etmv4_config *config = &drvdata->config;
- val = config->vinst_ctrl & ETMv4_EVENT_MASK;
+ val = FIELD_GET(TRCVICTLR_EVENT_MASK, config->vinst_ctrl);
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
@@ -734,9 +739,9 @@ static ssize_t event_vinst_store(struct device *dev,
return -EINVAL;
spin_lock(&drvdata->spinlock);
- val &= ETMv4_EVENT_MASK;
- config->vinst_ctrl &= ~ETMv4_EVENT_MASK;
- config->vinst_ctrl |= val;
+ val &= TRCVICTLR_EVENT_MASK >> __bf_shf(TRCVICTLR_EVENT_MASK);
+ config->vinst_ctrl &= ~TRCVICTLR_EVENT_MASK;
+ config->vinst_ctrl |= FIELD_PREP(TRCVICTLR_EVENT_MASK, val);
spin_unlock(&drvdata->spinlock);
return size;
}
@@ -750,7 +755,7 @@ static ssize_t s_exlevel_vinst_show(struct device *dev,
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etmv4_config *config = &drvdata->config;
- val = (config->vinst_ctrl & TRCVICTLR_EXLEVEL_S_MASK) >> TRCVICTLR_EXLEVEL_S_SHIFT;
+ val = FIELD_GET(TRCVICTLR_EXLEVEL_S_MASK, config->vinst_ctrl);
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
@@ -767,10 +772,10 @@ static ssize_t s_exlevel_vinst_store(struct device *dev,
spin_lock(&drvdata->spinlock);
/* clear all EXLEVEL_S bits */
- config->vinst_ctrl &= ~(TRCVICTLR_EXLEVEL_S_MASK);
+ config->vinst_ctrl &= ~TRCVICTLR_EXLEVEL_S_MASK;
/* enable instruction tracing for corresponding exception level */
val &= drvdata->s_ex_level;
- config->vinst_ctrl |= (val << TRCVICTLR_EXLEVEL_S_SHIFT);
+ config->vinst_ctrl |= val << __bf_shf(TRCVICTLR_EXLEVEL_S_MASK);
spin_unlock(&drvdata->spinlock);
return size;
}
@@ -785,7 +790,7 @@ static ssize_t ns_exlevel_vinst_show(struct device *dev,
struct etmv4_config *config = &drvdata->config;
/* EXLEVEL_NS, bits[23:20] */
- val = (config->vinst_ctrl & TRCVICTLR_EXLEVEL_NS_MASK) >> TRCVICTLR_EXLEVEL_NS_SHIFT;
+ val = FIELD_GET(TRCVICTLR_EXLEVEL_NS_MASK, config->vinst_ctrl);
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
@@ -802,10 +807,10 @@ static ssize_t ns_exlevel_vinst_store(struct device *dev,
spin_lock(&drvdata->spinlock);
/* clear EXLEVEL_NS bits */
- config->vinst_ctrl &= ~(TRCVICTLR_EXLEVEL_NS_MASK);
+ config->vinst_ctrl &= ~TRCVICTLR_EXLEVEL_NS_MASK;
/* enable instruction tracing for corresponding exception level */
val &= drvdata->ns_ex_level;
- config->vinst_ctrl |= (val << TRCVICTLR_EXLEVEL_NS_SHIFT);
+ config->vinst_ctrl |= val << __bf_shf(TRCVICTLR_EXLEVEL_NS_MASK);
spin_unlock(&drvdata->spinlock);
return size;
}
@@ -858,11 +863,11 @@ static ssize_t addr_instdatatype_show(struct device *dev,
spin_lock(&drvdata->spinlock);
idx = config->addr_idx;
- val = BMVAL(config->addr_acc[idx], 0, 1);
+ val = FIELD_GET(TRCACATRn_TYPE_MASK, config->addr_acc[idx]);
len = scnprintf(buf, PAGE_SIZE, "%s\n",
- val == ETM_INSTR_ADDR ? "instr" :
- (val == ETM_DATA_LOAD_ADDR ? "data_load" :
- (val == ETM_DATA_STORE_ADDR ? "data_store" :
+ val == TRCACATRn_TYPE_ADDR ? "instr" :
+ (val == TRCACATRn_TYPE_DATA_LOAD_ADDR ? "data_load" :
+ (val == TRCACATRn_TYPE_DATA_STORE_ADDR ? "data_store" :
"data_load_store")));
spin_unlock(&drvdata->spinlock);
return len;
@@ -886,7 +891,7 @@ static ssize_t addr_instdatatype_store(struct device *dev,
idx = config->addr_idx;
if (!strcmp(str, "instr"))
/* TYPE, bits[1:0] */
- config->addr_acc[idx] &= ~(BIT(0) | BIT(1));
+ config->addr_acc[idx] &= ~TRCACATRn_TYPE_MASK;
spin_unlock(&drvdata->spinlock);
return size;
@@ -1144,7 +1149,7 @@ static ssize_t addr_ctxtype_show(struct device *dev,
spin_lock(&drvdata->spinlock);
idx = config->addr_idx;
/* CONTEXTTYPE, bits[3:2] */
- val = BMVAL(config->addr_acc[idx], 2, 3);
+ val = FIELD_GET(TRCACATRn_CONTEXTTYPE_MASK, config->addr_acc[idx]);
len = scnprintf(buf, PAGE_SIZE, "%s\n", val == ETM_CTX_NONE ? "none" :
(val == ETM_CTX_CTXID ? "ctxid" :
(val == ETM_CTX_VMID ? "vmid" : "all")));
@@ -1170,18 +1175,18 @@ static ssize_t addr_ctxtype_store(struct device *dev,
idx = config->addr_idx;
if (!strcmp(str, "none"))
/* start by clearing context type bits */
- config->addr_acc[idx] &= ~(BIT(2) | BIT(3));
+ config->addr_acc[idx] &= ~TRCACATRn_CONTEXTTYPE_MASK;
else if (!strcmp(str, "ctxid")) {
/* 0b01 The trace unit performs a Context ID */
if (drvdata->numcidc) {
- config->addr_acc[idx] |= BIT(2);
- config->addr_acc[idx] &= ~BIT(3);
+ config->addr_acc[idx] |= TRCACATRn_CONTEXTTYPE_CTXID;
+ config->addr_acc[idx] &= ~TRCACATRn_CONTEXTTYPE_VMID;
}
} else if (!strcmp(str, "vmid")) {
/* 0b10 The trace unit performs a VMID */
if (drvdata->numvmidc) {
- config->addr_acc[idx] &= ~BIT(2);
- config->addr_acc[idx] |= BIT(3);
+ config->addr_acc[idx] &= ~TRCACATRn_CONTEXTTYPE_CTXID;
+ config->addr_acc[idx] |= TRCACATRn_CONTEXTTYPE_VMID;
}
} else if (!strcmp(str, "all")) {
/*
@@ -1189,9 +1194,9 @@ static ssize_t addr_ctxtype_store(struct device *dev,
* comparison and a VMID
*/
if (drvdata->numcidc)
- config->addr_acc[idx] |= BIT(2);
+ config->addr_acc[idx] |= TRCACATRn_CONTEXTTYPE_CTXID;
if (drvdata->numvmidc)
- config->addr_acc[idx] |= BIT(3);
+ config->addr_acc[idx] |= TRCACATRn_CONTEXTTYPE_VMID;
}
spin_unlock(&drvdata->spinlock);
return size;
@@ -1210,7 +1215,7 @@ static ssize_t addr_context_show(struct device *dev,
spin_lock(&drvdata->spinlock);
idx = config->addr_idx;
/* context ID comparator bits[6:4] */
- val = BMVAL(config->addr_acc[idx], 4, 6);
+ val = FIELD_GET(TRCACATRn_CONTEXT_MASK, config->addr_acc[idx]);
spin_unlock(&drvdata->spinlock);
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
@@ -1235,8 +1240,8 @@ static ssize_t addr_context_store(struct device *dev,
spin_lock(&drvdata->spinlock);
idx = config->addr_idx;
/* clear context ID comparator bits[6:4] */
- config->addr_acc[idx] &= ~(BIT(4) | BIT(5) | BIT(6));
- config->addr_acc[idx] |= (val << 4);
+ config->addr_acc[idx] &= ~TRCACATRn_CONTEXT_MASK;
+ config->addr_acc[idx] |= val << __bf_shf(TRCACATRn_CONTEXT_MASK);
spin_unlock(&drvdata->spinlock);
return size;
}
@@ -1253,7 +1258,7 @@ static ssize_t addr_exlevel_s_ns_show(struct device *dev,
spin_lock(&drvdata->spinlock);
idx = config->addr_idx;
- val = BMVAL(config->addr_acc[idx], 8, 14);
+ val = FIELD_GET(TRCACATRn_EXLEVEL_MASK, config->addr_acc[idx]);
spin_unlock(&drvdata->spinlock);
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
@@ -1270,14 +1275,14 @@ static ssize_t addr_exlevel_s_ns_store(struct device *dev,
if (kstrtoul(buf, 0, &val))
return -EINVAL;
- if (val & ~((GENMASK(14, 8) >> 8)))
+ if (val & ~(TRCACATRn_EXLEVEL_MASK >> __bf_shf(TRCACATRn_EXLEVEL_MASK)))
return -EINVAL;
spin_lock(&drvdata->spinlock);
idx = config->addr_idx;
/* clear Exlevel_ns & Exlevel_s bits[14:12, 11:8], bit[15] is res0 */
- config->addr_acc[idx] &= ~(GENMASK(14, 8));
- config->addr_acc[idx] |= (val << 8);
+ config->addr_acc[idx] &= ~TRCACATRn_EXLEVEL_MASK;
+ config->addr_acc[idx] |= val << __bf_shf(TRCACATRn_EXLEVEL_MASK);
spin_unlock(&drvdata->spinlock);
return size;
}
@@ -1721,8 +1726,11 @@ static ssize_t res_ctrl_store(struct device *dev,
/* For odd idx pair inversal bit is RES0 */
if (idx % 2 != 0)
/* PAIRINV, bit[21] */
- val &= ~BIT(21);
- config->res_ctrl[idx] = val & GENMASK(21, 0);
+ val &= ~TRCRSCTLRn_PAIRINV;
+ config->res_ctrl[idx] = val & (TRCRSCTLRn_PAIRINV |
+ TRCRSCTLRn_INV |
+ TRCRSCTLRn_GROUP_MASK |
+ TRCRSCTLRn_SELECT_MASK);
spin_unlock(&drvdata->spinlock);
return size;
}
@@ -1787,9 +1795,9 @@ static ssize_t sshot_ctrl_store(struct device *dev,
spin_lock(&drvdata->spinlock);
idx = config->ss_idx;
- config->ss_ctrl[idx] = val & GENMASK(24, 0);
+ config->ss_ctrl[idx] = FIELD_PREP(TRCSSCCRn_SAC_ARC_RST_MASK, val);
/* must clear bit 31 in related status register on programming */
- config->ss_status[idx] &= ~BIT(31);
+ config->ss_status[idx] &= ~TRCSSCSRn_STATUS;
spin_unlock(&drvdata->spinlock);
return size;
}
@@ -1837,9 +1845,9 @@ static ssize_t sshot_pe_ctrl_store(struct device *dev,
spin_lock(&drvdata->spinlock);
idx = config->ss_idx;
- config->ss_pe_cmp[idx] = val & GENMASK(7, 0);
+ config->ss_pe_cmp[idx] = FIELD_PREP(TRCSSPCICRn_PC_MASK, val);
/* must clear bit 31 in related status register on programming */
- config->ss_status[idx] &= ~BIT(31);
+ config->ss_status[idx] &= ~TRCSSCSRn_STATUS;
spin_unlock(&drvdata->spinlock);
return size;
}
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h
index 3c4d69b096ca..33869c1d20c3 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.h
+++ b/drivers/hwtracing/coresight/coresight-etm4x.h
@@ -131,6 +131,104 @@
#define TRCRSR_TA BIT(12)
/*
+ * Bit positions of registers that are defined above, in the sysreg.h style
+ * of _MASK for multi bit fields and BIT() for single bits.
+ */
+#define TRCIDR0_INSTP0_MASK GENMASK(2, 1)
+#define TRCIDR0_TRCBB BIT(5)
+#define TRCIDR0_TRCCOND BIT(6)
+#define TRCIDR0_TRCCCI BIT(7)
+#define TRCIDR0_RETSTACK BIT(9)
+#define TRCIDR0_NUMEVENT_MASK GENMASK(11, 10)
+#define TRCIDR0_QSUPP_MASK GENMASK(16, 15)
+#define TRCIDR0_TSSIZE_MASK GENMASK(28, 24)
+
+#define TRCIDR2_CIDSIZE_MASK GENMASK(9, 5)
+#define TRCIDR2_VMIDSIZE_MASK GENMASK(14, 10)
+#define TRCIDR2_CCSIZE_MASK GENMASK(28, 25)
+
+#define TRCIDR3_CCITMIN_MASK GENMASK(11, 0)
+#define TRCIDR3_EXLEVEL_S_MASK GENMASK(19, 16)
+#define TRCIDR3_EXLEVEL_NS_MASK GENMASK(23, 20)
+#define TRCIDR3_TRCERR BIT(24)
+#define TRCIDR3_SYNCPR BIT(25)
+#define TRCIDR3_STALLCTL BIT(26)
+#define TRCIDR3_SYSSTALL BIT(27)
+#define TRCIDR3_NUMPROC_LO_MASK GENMASK(30, 28)
+#define TRCIDR3_NUMPROC_HI_MASK GENMASK(13, 12)
+#define TRCIDR3_NOOVERFLOW BIT(31)
+
+#define TRCIDR4_NUMACPAIRS_MASK GENMASK(3, 0)
+#define TRCIDR4_NUMPC_MASK GENMASK(15, 12)
+#define TRCIDR4_NUMRSPAIR_MASK GENMASK(19, 16)
+#define TRCIDR4_NUMSSCC_MASK GENMASK(23, 20)
+#define TRCIDR4_NUMCIDC_MASK GENMASK(27, 24)
+#define TRCIDR4_NUMVMIDC_MASK GENMASK(31, 28)
+
+#define TRCIDR5_NUMEXTIN_MASK GENMASK(8, 0)
+#define TRCIDR5_TRACEIDSIZE_MASK GENMASK(21, 16)
+#define TRCIDR5_ATBTRIG BIT(22)
+#define TRCIDR5_LPOVERRIDE BIT(23)
+#define TRCIDR5_NUMSEQSTATE_MASK GENMASK(27, 25)
+#define TRCIDR5_NUMCNTR_MASK GENMASK(30, 28)
+
+#define TRCCONFIGR_INSTP0_LOAD BIT(1)
+#define TRCCONFIGR_INSTP0_STORE BIT(2)
+#define TRCCONFIGR_INSTP0_LOAD_STORE (TRCCONFIGR_INSTP0_LOAD | TRCCONFIGR_INSTP0_STORE)
+#define TRCCONFIGR_BB BIT(3)
+#define TRCCONFIGR_CCI BIT(4)
+#define TRCCONFIGR_CID BIT(6)
+#define TRCCONFIGR_VMID BIT(7)
+#define TRCCONFIGR_COND_MASK GENMASK(10, 8)
+#define TRCCONFIGR_TS BIT(11)
+#define TRCCONFIGR_RS BIT(12)
+#define TRCCONFIGR_QE_W_COUNTS BIT(13)
+#define TRCCONFIGR_QE_WO_COUNTS BIT(14)
+#define TRCCONFIGR_VMIDOPT BIT(15)
+#define TRCCONFIGR_DA BIT(16)
+#define TRCCONFIGR_DV BIT(17)
+
+#define TRCEVENTCTL1R_INSTEN_MASK GENMASK(3, 0)
+#define TRCEVENTCTL1R_INSTEN_0 BIT(0)
+#define TRCEVENTCTL1R_INSTEN_1 BIT(1)
+#define TRCEVENTCTL1R_INSTEN_2 BIT(2)
+#define TRCEVENTCTL1R_INSTEN_3 BIT(3)
+#define TRCEVENTCTL1R_ATB BIT(11)
+#define TRCEVENTCTL1R_LPOVERRIDE BIT(12)
+
+#define TRCSTALLCTLR_ISTALL BIT(8)
+#define TRCSTALLCTLR_INSTPRIORITY BIT(10)
+#define TRCSTALLCTLR_NOOVERFLOW BIT(13)
+
+#define TRCVICTLR_EVENT_MASK GENMASK(7, 0)
+#define TRCVICTLR_SSSTATUS BIT(9)
+#define TRCVICTLR_TRCRESET BIT(10)
+#define TRCVICTLR_TRCERR BIT(11)
+#define TRCVICTLR_EXLEVEL_MASK GENMASK(22, 16)
+#define TRCVICTLR_EXLEVEL_S_MASK GENMASK(19, 16)
+#define TRCVICTLR_EXLEVEL_NS_MASK GENMASK(22, 20)
+
+#define TRCACATRn_TYPE_MASK GENMASK(1, 0)
+#define TRCACATRn_CONTEXTTYPE_MASK GENMASK(3, 2)
+#define TRCACATRn_CONTEXTTYPE_CTXID BIT(2)
+#define TRCACATRn_CONTEXTTYPE_VMID BIT(3)
+#define TRCACATRn_CONTEXT_MASK GENMASK(6, 4)
+#define TRCACATRn_EXLEVEL_MASK GENMASK(14, 8)
+
+#define TRCSSCSRn_STATUS BIT(31)
+#define TRCSSCCRn_SAC_ARC_RST_MASK GENMASK(24, 0)
+
+#define TRCSSPCICRn_PC_MASK GENMASK(7, 0)
+
+#define TRCBBCTLR_MODE BIT(8)
+#define TRCBBCTLR_RANGE_MASK GENMASK(7, 0)
+
+#define TRCRSCTLRn_PAIRINV BIT(21)
+#define TRCRSCTLRn_INV BIT(20)
+#define TRCRSCTLRn_GROUP_MASK GENMASK(19, 16)
+#define TRCRSCTLRn_SELECT_MASK GENMASK(15, 0)
+
+/*
* System instructions to access ETM registers.
* See ETMv4.4 spec ARM IHI0064F section 4.3.6 System instructions
*/
@@ -630,23 +728,9 @@
#define ETM_EXLEVEL_NS_OS BIT(5) /* NonSecure EL1 */
#define ETM_EXLEVEL_NS_HYP BIT(6) /* NonSecure EL2 */
-#define ETM_EXLEVEL_MASK (GENMASK(6, 0))
-#define ETM_EXLEVEL_S_MASK (GENMASK(3, 0))
-#define ETM_EXLEVEL_NS_MASK (GENMASK(6, 4))
-
/* access level controls in TRCACATRn */
#define TRCACATR_EXLEVEL_SHIFT 8
-/* access level control in TRCVICTLR */
-#define TRCVICTLR_EXLEVEL_SHIFT 16
-#define TRCVICTLR_EXLEVEL_S_SHIFT 16
-#define TRCVICTLR_EXLEVEL_NS_SHIFT 20
-
-/* secure / non secure masks - TRCVICTLR, IDR3 */
-#define TRCVICTLR_EXLEVEL_MASK (ETM_EXLEVEL_MASK << TRCVICTLR_EXLEVEL_SHIFT)
-#define TRCVICTLR_EXLEVEL_S_MASK (ETM_EXLEVEL_S_MASK << TRCVICTLR_EXLEVEL_SHIFT)
-#define TRCVICTLR_EXLEVEL_NS_MASK (ETM_EXLEVEL_NS_MASK << TRCVICTLR_EXLEVEL_SHIFT)
-
#define ETM_TRCIDR1_ARCH_MAJOR_SHIFT 8
#define ETM_TRCIDR1_ARCH_MAJOR_MASK (0xfU << ETM_TRCIDR1_ARCH_MAJOR_SHIFT)
#define ETM_TRCIDR1_ARCH_MAJOR(x) \
@@ -986,10 +1070,10 @@ struct etmv4_drvdata {
/* Address comparator access types */
enum etm_addr_acctype {
- ETM_INSTR_ADDR,
- ETM_DATA_LOAD_ADDR,
- ETM_DATA_STORE_ADDR,
- ETM_DATA_LOAD_STORE_ADDR,
+ TRCACATRn_TYPE_ADDR,
+ TRCACATRn_TYPE_DATA_LOAD_ADDR,
+ TRCACATRn_TYPE_DATA_STORE_ADDR,
+ TRCACATRn_TYPE_DATA_LOAD_STORE_ADDR,
};
/* Address comparator context types */
diff --git a/drivers/i2c/busses/i2c-at91-master.c b/drivers/i2c/busses/i2c-at91-master.c
index b0eae94909f4..c0c35785a0dc 100644
--- a/drivers/i2c/busses/i2c-at91-master.c
+++ b/drivers/i2c/busses/i2c-at91-master.c
@@ -656,6 +656,7 @@ static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num)
unsigned int_addr_flag = 0;
struct i2c_msg *m_start = msg;
bool is_read;
+ u8 *dma_buf = NULL;
dev_dbg(&adap->dev, "at91_xfer: processing %d messages:\n", num);
@@ -703,7 +704,17 @@ static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num)
dev->msg = m_start;
dev->recv_len_abort = false;
+ if (dev->use_dma) {
+ dma_buf = i2c_get_dma_safe_msg_buf(m_start, 1);
+ if (!dma_buf) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ dev->buf = dma_buf;
+ }
+
ret = at91_do_twi_transfer(dev);
+ i2c_put_dma_safe_msg_buf(dma_buf, m_start, !ret);
ret = (ret < 0) ? ret : num;
out:
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
index 805c77143a0f..b4c1ad19cdae 100644
--- a/drivers/i2c/busses/i2c-cadence.c
+++ b/drivers/i2c/busses/i2c-cadence.c
@@ -760,7 +760,7 @@ static void cdns_i2c_master_reset(struct i2c_adapter *adap)
static int cdns_i2c_process_msg(struct cdns_i2c *id, struct i2c_msg *msg,
struct i2c_adapter *adap)
{
- unsigned long time_left;
+ unsigned long time_left, msg_timeout;
u32 reg;
id->p_msg = msg;
@@ -785,8 +785,16 @@ static int cdns_i2c_process_msg(struct cdns_i2c *id, struct i2c_msg *msg,
else
cdns_i2c_msend(id);
+ /* Minimal time to execute this message */
+ msg_timeout = msecs_to_jiffies((1000 * msg->len * BITS_PER_BYTE) / id->i2c_clk);
+ /* Plus some wiggle room */
+ msg_timeout += msecs_to_jiffies(500);
+
+ if (msg_timeout < adap->timeout)
+ msg_timeout = adap->timeout;
+
/* Wait for the signal of completion */
- time_left = wait_for_completion_timeout(&id->xfer_done, adap->timeout);
+ time_left = wait_for_completion_timeout(&id->xfer_done, msg_timeout);
if (time_left == 0) {
cdns_i2c_master_reset(adap);
dev_err(id->adap.dev.parent,
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
index e9d07323c604..9e09db31a937 100644
--- a/drivers/i2c/busses/i2c-davinci.c
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -539,10 +539,9 @@ i2c_davinci_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
dev_dbg(dev->dev, "%s: msgs: %d\n", __func__, num);
- ret = pm_runtime_get_sync(dev->dev);
+ ret = pm_runtime_resume_and_get(dev->dev);
if (ret < 0) {
dev_err(dev->dev, "Failed to runtime_get device: %d\n", ret);
- pm_runtime_put_noidle(dev->dev);
return ret;
}
@@ -821,10 +820,9 @@ static int davinci_i2c_probe(struct platform_device *pdev)
pm_runtime_enable(dev->dev);
- r = pm_runtime_get_sync(dev->dev);
+ r = pm_runtime_resume_and_get(dev->dev);
if (r < 0) {
dev_err(dev->dev, "failed to runtime_get device: %d\n", r);
- pm_runtime_put_noidle(dev->dev);
return r;
}
@@ -898,11 +896,9 @@ static int davinci_i2c_remove(struct platform_device *pdev)
i2c_del_adapter(&dev->adapter);
- ret = pm_runtime_get_sync(&pdev->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret < 0)
return ret;
- }
davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, 0);
diff --git a/drivers/i2c/busses/i2c-designware-amdpsp.c b/drivers/i2c/busses/i2c-designware-amdpsp.c
index 9b37f2b95abc..b624356c945f 100644
--- a/drivers/i2c/busses/i2c-designware-amdpsp.c
+++ b/drivers/i2c/busses/i2c-designware-amdpsp.c
@@ -16,8 +16,8 @@
#define PSP_CMD_TIMEOUT_US (500 * USEC_PER_MSEC)
#define PSP_I2C_REQ_BUS_CMD 0x64
-#define PSP_I2C_REQ_RETRY_CNT 10
-#define PSP_I2C_REQ_RETRY_DELAY_US (50 * USEC_PER_MSEC)
+#define PSP_I2C_REQ_RETRY_CNT 400
+#define PSP_I2C_REQ_RETRY_DELAY_US (25 * USEC_PER_MSEC)
#define PSP_I2C_REQ_STS_OK 0x0
#define PSP_I2C_REQ_STS_BUS_BUSY 0x1
#define PSP_I2C_REQ_STS_INV_PARAM 0x3
diff --git a/drivers/i2c/busses/i2c-designware-common.c b/drivers/i2c/busses/i2c-designware-common.c
index 9f8574320eb2..e7d316b1401a 100644
--- a/drivers/i2c/busses/i2c-designware-common.c
+++ b/drivers/i2c/busses/i2c-designware-common.c
@@ -266,9 +266,9 @@ int i2c_dw_acpi_configure(struct device *device)
* selected speed modes.
*/
i2c_dw_acpi_params(device, "SSCN", &dev->ss_hcnt, &dev->ss_lcnt, &ss_ht);
+ i2c_dw_acpi_params(device, "FMCN", &dev->fs_hcnt, &dev->fs_lcnt, &fs_ht);
i2c_dw_acpi_params(device, "FPCN", &dev->fp_hcnt, &dev->fp_lcnt, &fp_ht);
i2c_dw_acpi_params(device, "HSCN", &dev->hs_hcnt, &dev->hs_lcnt, &hs_ht);
- i2c_dw_acpi_params(device, "FMCN", &dev->fs_hcnt, &dev->fs_lcnt, &fs_ht);
switch (t->bus_freq_hz) {
case I2C_MAX_STANDARD_MODE_FREQ:
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
index c16157ee8c52..6078fa0c0d48 100644
--- a/drivers/i2c/busses/i2c-ismt.c
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -528,6 +528,9 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr,
case I2C_SMBUS_BLOCK_PROC_CALL:
dev_dbg(dev, "I2C_SMBUS_BLOCK_PROC_CALL\n");
+ if (data->block[0] > I2C_SMBUS_BLOCK_MAX)
+ return -EINVAL;
+
dma_size = I2C_SMBUS_BLOCK_MAX;
desc->tgtaddr_rw = ISMT_DESC_ADDR_RW(addr, 1);
desc->wr_len_cmd = data->block[0] + 1;
diff --git a/drivers/i2c/busses/i2c-meson.c b/drivers/i2c/busses/i2c-meson.c
index 07eb819072c4..61cc5b2462c6 100644
--- a/drivers/i2c/busses/i2c-meson.c
+++ b/drivers/i2c/busses/i2c-meson.c
@@ -30,18 +30,21 @@
#define REG_TOK_RDATA1 0x1c
/* Control register fields */
-#define REG_CTRL_START BIT(0)
-#define REG_CTRL_ACK_IGNORE BIT(1)
-#define REG_CTRL_STATUS BIT(2)
-#define REG_CTRL_ERROR BIT(3)
-#define REG_CTRL_CLKDIV GENMASK(21, 12)
-#define REG_CTRL_CLKDIVEXT GENMASK(29, 28)
-
-#define REG_SLV_ADDR GENMASK(7, 0)
-#define REG_SLV_SDA_FILTER GENMASK(10, 8)
-#define REG_SLV_SCL_FILTER GENMASK(13, 11)
-#define REG_SLV_SCL_LOW GENMASK(27, 16)
-#define REG_SLV_SCL_LOW_EN BIT(28)
+#define REG_CTRL_START BIT(0)
+#define REG_CTRL_ACK_IGNORE BIT(1)
+#define REG_CTRL_STATUS BIT(2)
+#define REG_CTRL_ERROR BIT(3)
+#define REG_CTRL_CLKDIV_SHIFT 12
+#define REG_CTRL_CLKDIV_MASK GENMASK(21, REG_CTRL_CLKDIV_SHIFT)
+#define REG_CTRL_CLKDIVEXT_SHIFT 28
+#define REG_CTRL_CLKDIVEXT_MASK GENMASK(29, REG_CTRL_CLKDIVEXT_SHIFT)
+
+#define REG_SLV_ADDR_MASK GENMASK(7, 0)
+#define REG_SLV_SDA_FILTER_MASK GENMASK(10, 8)
+#define REG_SLV_SCL_FILTER_MASK GENMASK(13, 11)
+#define REG_SLV_SCL_LOW_SHIFT 16
+#define REG_SLV_SCL_LOW_MASK GENMASK(27, REG_SLV_SCL_LOW_SHIFT)
+#define REG_SLV_SCL_LOW_EN BIT(28)
#define I2C_TIMEOUT_MS 500
#define FILTER_DELAY 15
@@ -62,10 +65,6 @@ enum {
STATE_WRITE,
};
-struct meson_i2c_data {
- unsigned char div_factor;
-};
-
/**
* struct meson_i2c - Meson I2C device private data
*
@@ -83,7 +82,7 @@ struct meson_i2c_data {
* @done: Completion used to wait for transfer termination
* @tokens: Sequence of tokens to be written to the device
* @num_tokens: Number of tokens
- * @data: Pointer to the controlller's platform data
+ * @data: Pointer to the controller's platform data
*/
struct meson_i2c {
struct i2c_adapter adap;
@@ -106,6 +105,10 @@ struct meson_i2c {
const struct meson_i2c_data *data;
};
+struct meson_i2c_data {
+ void (*set_clk_div)(struct meson_i2c *i2c, unsigned int freq);
+};
+
static void meson_i2c_set_mask(struct meson_i2c *i2c, int reg, u32 mask,
u32 val)
{
@@ -134,14 +137,62 @@ static void meson_i2c_add_token(struct meson_i2c *i2c, int token)
i2c->num_tokens++;
}
-static void meson_i2c_set_clk_div(struct meson_i2c *i2c, unsigned int freq)
+static void meson_gxbb_axg_i2c_set_clk_div(struct meson_i2c *i2c, unsigned int freq)
+{
+ unsigned long clk_rate = clk_get_rate(i2c->clk);
+ unsigned int div_h, div_l;
+
+ /* According to I2C-BUS Spec 2.1, in FAST-MODE, the minimum LOW period is 1.3uS, and
+ * minimum HIGH is least 0.6us.
+ * For 400000 freq, the period is 2.5us. To keep within the specs, give 40% of period to
+ * HIGH and 60% to LOW. This means HIGH at 1.0us and LOW 1.5us.
+ * The same applies for Fast-mode plus, where LOW is 0.5us and HIGH is 0.26us.
+ * Duty = H/(H + L) = 2/5
+ */
+ if (freq <= I2C_MAX_STANDARD_MODE_FREQ) {
+ div_h = DIV_ROUND_UP(clk_rate, freq);
+ div_l = DIV_ROUND_UP(div_h, 4);
+ div_h = DIV_ROUND_UP(div_h, 2) - FILTER_DELAY;
+ } else {
+ div_h = DIV_ROUND_UP(clk_rate * 2, freq * 5) - FILTER_DELAY;
+ div_l = DIV_ROUND_UP(clk_rate * 3, freq * 5 * 2);
+ }
+
+ /* clock divider has 12 bits */
+ if (div_h > GENMASK(11, 0)) {
+ dev_err(i2c->dev, "requested bus frequency too low\n");
+ div_h = GENMASK(11, 0);
+ }
+ if (div_l > GENMASK(11, 0)) {
+ dev_err(i2c->dev, "requested bus frequency too low\n");
+ div_l = GENMASK(11, 0);
+ }
+
+ meson_i2c_set_mask(i2c, REG_CTRL, REG_CTRL_CLKDIV_MASK,
+ FIELD_PREP(REG_CTRL_CLKDIV_MASK, div_h & GENMASK(9, 0)));
+
+ meson_i2c_set_mask(i2c, REG_CTRL, REG_CTRL_CLKDIVEXT_MASK,
+ FIELD_PREP(REG_CTRL_CLKDIVEXT_MASK, div_h >> 10));
+
+ /* set SCL low delay */
+ meson_i2c_set_mask(i2c, REG_SLAVE_ADDR, REG_SLV_SCL_LOW_MASK,
+ FIELD_PREP(REG_SLV_SCL_LOW_MASK, div_l));
+
+ /* Enable HIGH/LOW mode */
+ meson_i2c_set_mask(i2c, REG_SLAVE_ADDR, REG_SLV_SCL_LOW_EN, REG_SLV_SCL_LOW_EN);
+
+ dev_dbg(i2c->dev, "%s: clk %lu, freq %u, divh %u, divl %u\n", __func__,
+ clk_rate, freq, div_h, div_l);
+}
+
+static void meson6_i2c_set_clk_div(struct meson_i2c *i2c, unsigned int freq)
{
unsigned long clk_rate = clk_get_rate(i2c->clk);
unsigned int div;
div = DIV_ROUND_UP(clk_rate, freq);
div -= FILTER_DELAY;
- div = DIV_ROUND_UP(div, i2c->data->div_factor);
+ div = DIV_ROUND_UP(div, 4);
/* clock divider has 12 bits */
if (div > GENMASK(11, 0)) {
@@ -149,11 +200,11 @@ static void meson_i2c_set_clk_div(struct meson_i2c *i2c, unsigned int freq)
div = GENMASK(11, 0);
}
- meson_i2c_set_mask(i2c, REG_CTRL, REG_CTRL_CLKDIV,
- FIELD_PREP(REG_CTRL_CLKDIV, div & GENMASK(9, 0)));
+ meson_i2c_set_mask(i2c, REG_CTRL, REG_CTRL_CLKDIV_MASK,
+ FIELD_PREP(REG_CTRL_CLKDIV_MASK, div & GENMASK(9, 0)));
- meson_i2c_set_mask(i2c, REG_CTRL, REG_CTRL_CLKDIVEXT,
- FIELD_PREP(REG_CTRL_CLKDIVEXT, div >> 10));
+ meson_i2c_set_mask(i2c, REG_CTRL, REG_CTRL_CLKDIVEXT_MASK,
+ FIELD_PREP(REG_CTRL_CLKDIVEXT_MASK, div >> 10));
/* Disable HIGH/LOW mode */
meson_i2c_set_mask(i2c, REG_SLAVE_ADDR, REG_SLV_SCL_LOW_EN, 0);
@@ -292,8 +343,8 @@ static void meson_i2c_do_start(struct meson_i2c *i2c, struct i2c_msg *msg)
TOKEN_SLAVE_ADDR_WRITE;
- meson_i2c_set_mask(i2c, REG_SLAVE_ADDR, REG_SLV_ADDR,
- FIELD_PREP(REG_SLV_ADDR, msg->addr << 1));
+ meson_i2c_set_mask(i2c, REG_SLAVE_ADDR, REG_SLV_ADDR_MASK,
+ FIELD_PREP(REG_SLV_ADDR_MASK, msg->addr << 1));
meson_i2c_add_token(i2c, TOKEN_START);
meson_i2c_add_token(i2c, token);
@@ -467,9 +518,13 @@ static int meson_i2c_probe(struct platform_device *pdev)
/* Disable filtering */
meson_i2c_set_mask(i2c, REG_SLAVE_ADDR,
- REG_SLV_SDA_FILTER | REG_SLV_SCL_FILTER, 0);
+ REG_SLV_SDA_FILTER_MASK | REG_SLV_SCL_FILTER_MASK, 0);
- meson_i2c_set_clk_div(i2c, timings.bus_freq_hz);
+ if (!i2c->data->set_clk_div) {
+ clk_disable_unprepare(i2c->clk);
+ return -EINVAL;
+ }
+ i2c->data->set_clk_div(i2c, timings.bus_freq_hz);
ret = i2c_add_adapter(&i2c->adap);
if (ret < 0) {
@@ -491,15 +546,15 @@ static int meson_i2c_remove(struct platform_device *pdev)
}
static const struct meson_i2c_data i2c_meson6_data = {
- .div_factor = 4,
+ .set_clk_div = meson6_i2c_set_clk_div,
};
static const struct meson_i2c_data i2c_gxbb_data = {
- .div_factor = 4,
+ .set_clk_div = meson_gxbb_axg_i2c_set_clk_div,
};
static const struct meson_i2c_data i2c_axg_data = {
- .div_factor = 3,
+ .set_clk_div = meson_gxbb_axg_i2c_set_clk_div,
};
static const struct of_device_id meson_i2c_match[] = {
diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
index f651d3e124d6..bdecb78bfc26 100644
--- a/drivers/i2c/busses/i2c-mt65xx.c
+++ b/drivers/i2c/busses/i2c-mt65xx.c
@@ -1177,7 +1177,7 @@ static int mtk_i2c_transfer(struct i2c_adapter *adap,
int left_num = num;
struct mtk_i2c *i2c = i2c_get_adapdata(adap);
- ret = clk_bulk_prepare_enable(I2C_MT65XX_CLK_MAX, i2c->clocks);
+ ret = clk_bulk_enable(I2C_MT65XX_CLK_MAX, i2c->clocks);
if (ret)
return ret;
@@ -1231,7 +1231,7 @@ static int mtk_i2c_transfer(struct i2c_adapter *adap,
ret = num;
err_exit:
- clk_bulk_disable_unprepare(I2C_MT65XX_CLK_MAX, i2c->clocks);
+ clk_bulk_disable(I2C_MT65XX_CLK_MAX, i2c->clocks);
return ret;
}
@@ -1412,7 +1412,7 @@ static int mtk_i2c_probe(struct platform_device *pdev)
return ret;
}
mtk_i2c_init_hw(i2c);
- clk_bulk_disable_unprepare(I2C_MT65XX_CLK_MAX, i2c->clocks);
+ clk_bulk_disable(I2C_MT65XX_CLK_MAX, i2c->clocks);
ret = devm_request_irq(&pdev->dev, irq, mtk_i2c_irq,
IRQF_NO_SUSPEND | IRQF_TRIGGER_NONE,
@@ -1439,6 +1439,8 @@ static int mtk_i2c_remove(struct platform_device *pdev)
i2c_del_adapter(&i2c->adap);
+ clk_bulk_unprepare(I2C_MT65XX_CLK_MAX, i2c->clocks);
+
return 0;
}
@@ -1448,6 +1450,7 @@ static int mtk_i2c_suspend_noirq(struct device *dev)
struct mtk_i2c *i2c = dev_get_drvdata(dev);
i2c_mark_adapter_suspended(&i2c->adap);
+ clk_bulk_unprepare(I2C_MT65XX_CLK_MAX, i2c->clocks);
return 0;
}
@@ -1465,7 +1468,7 @@ static int mtk_i2c_resume_noirq(struct device *dev)
mtk_i2c_init_hw(i2c);
- clk_bulk_disable_unprepare(I2C_MT65XX_CLK_MAX, i2c->clocks);
+ clk_bulk_disable(I2C_MT65XX_CLK_MAX, i2c->clocks);
i2c_mark_adapter_resumed(&i2c->adap);
diff --git a/drivers/i2c/busses/i2c-mt7621.c b/drivers/i2c/busses/i2c-mt7621.c
index 901f0fb04fee..cfe6de8175dd 100644
--- a/drivers/i2c/busses/i2c-mt7621.c
+++ b/drivers/i2c/busses/i2c-mt7621.c
@@ -270,18 +270,15 @@ static void mtk_i2c_init(struct mtk_i2c *i2c)
static int mtk_i2c_probe(struct platform_device *pdev)
{
- struct resource *res;
struct mtk_i2c *i2c;
struct i2c_adapter *adap;
int ret;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
i2c = devm_kzalloc(&pdev->dev, sizeof(struct mtk_i2c), GFP_KERNEL);
if (!i2c)
return -ENOMEM;
- i2c->base = devm_ioremap_resource(&pdev->dev, res);
+ i2c->base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
if (IS_ERR(i2c->base))
return PTR_ERR(i2c->base);
diff --git a/drivers/i2c/busses/i2c-npcm7xx.c b/drivers/i2c/busses/i2c-npcm7xx.c
index 71aad029425d..5960ccde6574 100644
--- a/drivers/i2c/busses/i2c-npcm7xx.c
+++ b/drivers/i2c/busses/i2c-npcm7xx.c
@@ -314,6 +314,7 @@ struct npcm_i2c {
u64 rec_fail_cnt;
u64 nack_cnt;
u64 timeout_cnt;
+ u64 tx_complete_cnt;
};
static inline void npcm_i2c_select_bank(struct npcm_i2c *bus,
@@ -359,14 +360,14 @@ static int npcm_i2c_get_SCL(struct i2c_adapter *_adap)
{
struct npcm_i2c *bus = container_of(_adap, struct npcm_i2c, adap);
- return !!(I2CCTL3_SCL_LVL & ioread32(bus->reg + NPCM_I2CCTL3));
+ return !!(I2CCTL3_SCL_LVL & ioread8(bus->reg + NPCM_I2CCTL3));
}
static int npcm_i2c_get_SDA(struct i2c_adapter *_adap)
{
struct npcm_i2c *bus = container_of(_adap, struct npcm_i2c, adap);
- return !!(I2CCTL3_SDA_LVL & ioread32(bus->reg + NPCM_I2CCTL3));
+ return !!(I2CCTL3_SDA_LVL & ioread8(bus->reg + NPCM_I2CCTL3));
}
static inline u16 npcm_i2c_get_index(struct npcm_i2c *bus)
@@ -563,6 +564,15 @@ static inline void npcm_i2c_nack(struct npcm_i2c *bus)
iowrite8(val, bus->reg + NPCM_I2CCTL1);
}
+static inline void npcm_i2c_clear_master_status(struct npcm_i2c *bus)
+{
+ u8 val;
+
+ /* Clear NEGACK, STASTR and BER bits */
+ val = NPCM_I2CST_BER | NPCM_I2CST_NEGACK | NPCM_I2CST_STASTR;
+ iowrite8(val, bus->reg + NPCM_I2CST);
+}
+
#if IS_ENABLED(CONFIG_I2C_SLAVE)
static void npcm_i2c_slave_int_enable(struct npcm_i2c *bus, bool enable)
{
@@ -642,8 +652,8 @@ static void npcm_i2c_reset(struct npcm_i2c *bus)
iowrite8(NPCM_I2CCST_BB, bus->reg + NPCM_I2CCST);
iowrite8(0xFF, bus->reg + NPCM_I2CST);
- /* Clear EOB bit */
- iowrite8(NPCM_I2CCST3_EO_BUSY, bus->reg + NPCM_I2CCST3);
+ /* Clear and disable EOB */
+ npcm_i2c_eob_int(bus, false);
/* Clear all fifo bits: */
iowrite8(NPCM_I2CFIF_CTS_CLR_FIFO, bus->reg + NPCM_I2CFIF_CTS);
@@ -655,6 +665,9 @@ static void npcm_i2c_reset(struct npcm_i2c *bus)
}
#endif
+ /* clear status bits for spurious interrupts */
+ npcm_i2c_clear_master_status(bus);
+
bus->state = I2C_IDLE;
}
@@ -684,6 +697,8 @@ static void npcm_i2c_callback(struct npcm_i2c *bus,
switch (op_status) {
case I2C_MASTER_DONE_IND:
bus->cmd_err = bus->msgs_num;
+ if (bus->tx_complete_cnt < ULLONG_MAX)
+ bus->tx_complete_cnt++;
fallthrough;
case I2C_BLOCK_BYTES_ERR_IND:
/* Master tx finished and all transmit bytes were sent */
@@ -815,15 +830,6 @@ static void npcm_i2c_read_fifo(struct npcm_i2c *bus, u8 bytes_in_fifo)
}
}
-static inline void npcm_i2c_clear_master_status(struct npcm_i2c *bus)
-{
- u8 val;
-
- /* Clear NEGACK, STASTR and BER bits */
- val = NPCM_I2CST_BER | NPCM_I2CST_NEGACK | NPCM_I2CST_STASTR;
- iowrite8(val, bus->reg + NPCM_I2CST);
-}
-
static void npcm_i2c_master_abort(struct npcm_i2c *bus)
{
/* Only current master is allowed to issue a stop condition */
@@ -1231,7 +1237,16 @@ static irqreturn_t npcm_i2c_int_slave_handler(struct npcm_i2c *bus)
ret = IRQ_HANDLED;
} /* SDAST */
- return ret;
+ /*
+ * if irq is not one of the above, make sure EOB is disabled and all
+ * status bits are cleared.
+ */
+ if (ret == IRQ_NONE) {
+ npcm_i2c_eob_int(bus, false);
+ npcm_i2c_clear_master_status(bus);
+ }
+
+ return IRQ_HANDLED;
}
static int npcm_i2c_reg_slave(struct i2c_client *client)
@@ -1467,6 +1482,9 @@ static void npcm_i2c_irq_handle_nack(struct npcm_i2c *bus)
npcm_i2c_eob_int(bus, false);
npcm_i2c_master_stop(bus);
+ /* Clear SDA Status bit (by reading dummy byte) */
+ npcm_i2c_rd_byte(bus);
+
/*
* The bus is released from stall only after the SW clears
* NEGACK bit. Then a Stop condition is sent.
@@ -1474,6 +1492,8 @@ static void npcm_i2c_irq_handle_nack(struct npcm_i2c *bus)
npcm_i2c_clear_master_status(bus);
readx_poll_timeout_atomic(ioread8, bus->reg + NPCM_I2CCST, val,
!(val & NPCM_I2CCST_BUSY), 10, 200);
+ /* verify no status bits are still set after bus is released */
+ npcm_i2c_clear_master_status(bus);
}
bus->state = I2C_IDLE;
@@ -1672,10 +1692,10 @@ static int npcm_i2c_recovery_tgclk(struct i2c_adapter *_adap)
int iter = 27;
if ((npcm_i2c_get_SDA(_adap) == 1) && (npcm_i2c_get_SCL(_adap) == 1)) {
- dev_dbg(bus->dev, "bus%d recovery skipped, bus not stuck",
- bus->num);
+ dev_dbg(bus->dev, "bus%d-0x%x recovery skipped, bus not stuck",
+ bus->num, bus->dest_addr);
npcm_i2c_reset(bus);
- return status;
+ return 0;
}
npcm_i2c_int_enable(bus, false);
@@ -1909,6 +1929,7 @@ static int npcm_i2c_init_module(struct npcm_i2c *bus, enum i2c_mode mode,
bus_freq_hz < I2C_FREQ_MIN_HZ || bus_freq_hz > I2C_FREQ_MAX_HZ)
return -EINVAL;
+ npcm_i2c_int_enable(bus, false);
npcm_i2c_disable(bus);
/* Configure FIFO mode : */
@@ -1937,10 +1958,17 @@ static int npcm_i2c_init_module(struct npcm_i2c *bus, enum i2c_mode mode,
val = (val | NPCM_I2CCTL1_NMINTE) & ~NPCM_I2CCTL1_RWS;
iowrite8(val, bus->reg + NPCM_I2CCTL1);
- npcm_i2c_int_enable(bus, true);
-
npcm_i2c_reset(bus);
+ /* check HW is OK: SDA and SCL should be high at this point. */
+ if ((npcm_i2c_get_SDA(&bus->adap) == 0) || (npcm_i2c_get_SCL(&bus->adap) == 0)) {
+ dev_err(bus->dev, "I2C%d init fail: lines are low\n", bus->num);
+ dev_err(bus->dev, "SDA=%d SCL=%d\n", npcm_i2c_get_SDA(&bus->adap),
+ npcm_i2c_get_SCL(&bus->adap));
+ return -ENXIO;
+ }
+
+ npcm_i2c_int_enable(bus, true);
return 0;
}
@@ -1988,10 +2016,14 @@ static irqreturn_t npcm_i2c_bus_irq(int irq, void *dev_id)
#if IS_ENABLED(CONFIG_I2C_SLAVE)
if (bus->slave) {
bus->master_or_slave = I2C_SLAVE;
- return npcm_i2c_int_slave_handler(bus);
+ if (npcm_i2c_int_slave_handler(bus))
+ return IRQ_HANDLED;
}
#endif
- return IRQ_NONE;
+ /* clear status bits for spurious interrupts */
+ npcm_i2c_clear_master_status(bus);
+
+ return IRQ_HANDLED;
}
static bool npcm_i2c_master_start_xmit(struct npcm_i2c *bus,
@@ -2047,8 +2079,7 @@ static int npcm_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
u16 nwrite, nread;
u8 *write_data, *read_data;
u8 slave_addr;
- int timeout;
- int ret = 0;
+ unsigned long timeout;
bool read_block = false;
bool read_PEC = false;
u8 bus_busy;
@@ -2099,13 +2130,13 @@ static int npcm_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
* 9: bits per transaction (including the ack/nack)
*/
timeout_usec = (2 * 9 * USEC_PER_SEC / bus->bus_freq) * (2 + nread + nwrite);
- timeout = max(msecs_to_jiffies(35), usecs_to_jiffies(timeout_usec));
+ timeout = max_t(unsigned long, bus->adap.timeout, usecs_to_jiffies(timeout_usec));
if (nwrite >= 32 * 1024 || nread >= 32 * 1024) {
dev_err(bus->dev, "i2c%d buffer too big\n", bus->num);
return -EINVAL;
}
- time_left = jiffies + msecs_to_jiffies(DEFAULT_STALL_COUNT) + 1;
+ time_left = jiffies + timeout + 1;
do {
/*
* we must clear slave address immediately when the bus is not
@@ -2138,12 +2169,12 @@ static int npcm_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
bus->read_block_use = read_block;
reinit_completion(&bus->cmd_complete);
- if (!npcm_i2c_master_start_xmit(bus, slave_addr, nwrite, nread,
- write_data, read_data, read_PEC,
- read_block))
- ret = -EBUSY;
- if (ret != -EBUSY) {
+ npcm_i2c_int_enable(bus, true);
+
+ if (npcm_i2c_master_start_xmit(bus, slave_addr, nwrite, nread,
+ write_data, read_data, read_PEC,
+ read_block)) {
time_left = wait_for_completion_timeout(&bus->cmd_complete,
timeout);
@@ -2157,26 +2188,31 @@ static int npcm_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
}
}
}
- ret = bus->cmd_err;
/* if there was BER, check if need to recover the bus: */
if (bus->cmd_err == -EAGAIN)
- ret = i2c_recover_bus(adap);
+ bus->cmd_err = i2c_recover_bus(adap);
/*
* After any type of error, check if LAST bit is still set,
* due to a HW issue.
* It cannot be cleared without resetting the module.
*/
- if (bus->cmd_err &&
- (NPCM_I2CRXF_CTL_LAST_PEC & ioread8(bus->reg + NPCM_I2CRXF_CTL)))
+ else if (bus->cmd_err &&
+ (NPCM_I2CRXF_CTL_LAST_PEC & ioread8(bus->reg + NPCM_I2CRXF_CTL)))
npcm_i2c_reset(bus);
+ /* after any xfer, successful or not, stall and EOB must be disabled */
+ npcm_i2c_stall_after_start(bus, false);
+ npcm_i2c_eob_int(bus, false);
+
#if IS_ENABLED(CONFIG_I2C_SLAVE)
/* reenable slave if it was enabled */
if (bus->slave)
iowrite8((bus->slave->addr & 0x7F) | NPCM_I2CADDR_SAEN,
bus->reg + NPCM_I2CADDR1);
+#else
+ npcm_i2c_int_enable(bus, false);
#endif
return bus->cmd_err;
}
@@ -2223,17 +2259,18 @@ static void npcm_i2c_init_debugfs(struct platform_device *pdev,
debugfs_create_u64("rec_succ_cnt", 0444, d, &bus->rec_succ_cnt);
debugfs_create_u64("rec_fail_cnt", 0444, d, &bus->rec_fail_cnt);
debugfs_create_u64("timeout_cnt", 0444, d, &bus->timeout_cnt);
+ debugfs_create_u64("tx_complete_cnt", 0444, d, &bus->tx_complete_cnt);
bus->debugfs = d;
}
static int npcm_i2c_probe_bus(struct platform_device *pdev)
{
- struct npcm_i2c *bus;
+ struct device_node *np = pdev->dev.of_node;
+ static struct regmap *gcr_regmap;
struct i2c_adapter *adap;
+ struct npcm_i2c *bus;
struct clk *i2c_clk;
- static struct regmap *gcr_regmap;
- static struct regmap *clk_regmap;
int irq;
int ret;
@@ -2250,15 +2287,14 @@ static int npcm_i2c_probe_bus(struct platform_device *pdev)
return PTR_ERR(i2c_clk);
bus->apb_clk = clk_get_rate(i2c_clk);
- gcr_regmap = syscon_regmap_lookup_by_compatible("nuvoton,npcm750-gcr");
+ gcr_regmap = syscon_regmap_lookup_by_phandle(np, "nuvoton,sys-mgr");
+ if (IS_ERR(gcr_regmap))
+ gcr_regmap = syscon_regmap_lookup_by_compatible("nuvoton,npcm750-gcr");
+
if (IS_ERR(gcr_regmap))
return PTR_ERR(gcr_regmap);
regmap_write(gcr_regmap, NPCM_I2CSEGCTL, NPCM_I2CSEGCTL_INIT_VAL);
- clk_regmap = syscon_regmap_lookup_by_compatible("nuvoton,npcm750-clk");
- if (IS_ERR(clk_regmap))
- return PTR_ERR(clk_regmap);
-
bus->reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(bus->reg))
return PTR_ERR(bus->reg);
@@ -2269,7 +2305,7 @@ static int npcm_i2c_probe_bus(struct platform_device *pdev)
adap = &bus->adap;
adap->owner = THIS_MODULE;
adap->retries = 3;
- adap->timeout = HZ;
+ adap->timeout = msecs_to_jiffies(35);
adap->algo = &npcm_i2c_algo;
adap->quirks = &npcm_i2c_quirks;
adap->algo_data = bus;
diff --git a/drivers/i2c/busses/i2c-powermac.c b/drivers/i2c/busses/i2c-powermac.c
index 5241e6f414e9..2e74747eec9c 100644
--- a/drivers/i2c/busses/i2c-powermac.c
+++ b/drivers/i2c/busses/i2c-powermac.c
@@ -15,7 +15,7 @@
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/of_irq.h>
-#include <asm/prom.h>
+
#include <asm/pmac_low_i2c.h>
MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>");
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index 5b920f0fc7dd..6ac402ea58fb 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -727,16 +727,14 @@ static int setup_gpi_dma(struct geni_i2c_dev *gi2c)
if (IS_ERR(gi2c->tx_c)) {
ret = dev_err_probe(gi2c->se.dev, PTR_ERR(gi2c->tx_c),
"Failed to get tx DMA ch\n");
- if (ret < 0)
- goto err_tx;
+ goto err_tx;
}
gi2c->rx_c = dma_request_chan(gi2c->se.dev, "rx");
if (IS_ERR(gi2c->rx_c)) {
ret = dev_err_probe(gi2c->se.dev, PTR_ERR(gi2c->rx_c),
"Failed to get rx DMA ch\n");
- if (ret < 0)
- goto err_rx;
+ goto err_rx;
}
dev_dbg(gi2c->se.dev, "Grabbed GPI dma channels\n");
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index 0db3d7559066..6e7be9d9f504 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -45,44 +45,44 @@
#define ICDMAER 0x3c /* DMA enable (Gen3) */
/* ICSCR */
-#define SDBS (1 << 3) /* slave data buffer select */
-#define SIE (1 << 2) /* slave interface enable */
-#define GCAE (1 << 1) /* general call address enable */
-#define FNA (1 << 0) /* forced non acknowledgment */
+#define SDBS BIT(3) /* slave data buffer select */
+#define SIE BIT(2) /* slave interface enable */
+#define GCAE BIT(1) /* general call address enable */
+#define FNA BIT(0) /* forced non acknowledgment */
/* ICMCR */
-#define MDBS (1 << 7) /* non-fifo mode switch */
-#define FSCL (1 << 6) /* override SCL pin */
-#define FSDA (1 << 5) /* override SDA pin */
-#define OBPC (1 << 4) /* override pins */
-#define MIE (1 << 3) /* master if enable */
-#define TSBE (1 << 2)
-#define FSB (1 << 1) /* force stop bit */
-#define ESG (1 << 0) /* enable start bit gen */
+#define MDBS BIT(7) /* non-fifo mode switch */
+#define FSCL BIT(6) /* override SCL pin */
+#define FSDA BIT(5) /* override SDA pin */
+#define OBPC BIT(4) /* override pins */
+#define MIE BIT(3) /* master if enable */
+#define TSBE BIT(2)
+#define FSB BIT(1) /* force stop bit */
+#define ESG BIT(0) /* enable start bit gen */
/* ICSSR (also for ICSIER) */
-#define GCAR (1 << 6) /* general call received */
-#define STM (1 << 5) /* slave transmit mode */
-#define SSR (1 << 4) /* stop received */
-#define SDE (1 << 3) /* slave data empty */
-#define SDT (1 << 2) /* slave data transmitted */
-#define SDR (1 << 1) /* slave data received */
-#define SAR (1 << 0) /* slave addr received */
+#define GCAR BIT(6) /* general call received */
+#define STM BIT(5) /* slave transmit mode */
+#define SSR BIT(4) /* stop received */
+#define SDE BIT(3) /* slave data empty */
+#define SDT BIT(2) /* slave data transmitted */
+#define SDR BIT(1) /* slave data received */
+#define SAR BIT(0) /* slave addr received */
/* ICMSR (also for ICMIE) */
-#define MNR (1 << 6) /* nack received */
-#define MAL (1 << 5) /* arbitration lost */
-#define MST (1 << 4) /* sent a stop */
-#define MDE (1 << 3)
-#define MDT (1 << 2)
-#define MDR (1 << 1)
-#define MAT (1 << 0) /* slave addr xfer done */
+#define MNR BIT(6) /* nack received */
+#define MAL BIT(5) /* arbitration lost */
+#define MST BIT(4) /* sent a stop */
+#define MDE BIT(3)
+#define MDT BIT(2)
+#define MDR BIT(1)
+#define MAT BIT(0) /* slave addr xfer done */
/* ICDMAER */
-#define RSDMAE (1 << 3) /* DMA Slave Received Enable */
-#define TSDMAE (1 << 2) /* DMA Slave Transmitted Enable */
-#define RMDMAE (1 << 1) /* DMA Master Received Enable */
-#define TMDMAE (1 << 0) /* DMA Master Transmitted Enable */
+#define RSDMAE BIT(3) /* DMA Slave Received Enable */
+#define TSDMAE BIT(2) /* DMA Slave Transmitted Enable */
+#define RMDMAE BIT(1) /* DMA Master Received Enable */
+#define TMDMAE BIT(0) /* DMA Master Transmitted Enable */
/* ICFBSCR */
#define TCYC17 0x0f /* 17*Tcyc delay 1st bit between SDA and SCL */
@@ -97,17 +97,15 @@
#define RCAR_IRQ_RECV (MNR | MAL | MST | MAT | MDR)
#define RCAR_IRQ_STOP (MST)
-#define RCAR_IRQ_ACK_SEND (~(MAT | MDE) & 0x7F)
-#define RCAR_IRQ_ACK_RECV (~(MAT | MDR) & 0x7F)
-
-#define ID_LAST_MSG (1 << 0)
-#define ID_FIRST_MSG (1 << 1)
-#define ID_DONE (1 << 2)
-#define ID_ARBLOST (1 << 3)
-#define ID_NACK (1 << 4)
+#define ID_LAST_MSG BIT(0)
+#define ID_REP_AFTER_RD BIT(1)
+#define ID_DONE BIT(2)
+#define ID_ARBLOST BIT(3)
+#define ID_NACK BIT(4)
+#define ID_EPROTO BIT(5)
/* persistent flags */
-#define ID_P_HOST_NOTIFY BIT(28)
-#define ID_P_REP_AFTER_RD BIT(29)
+#define ID_P_NOT_ATOMIC BIT(28)
+#define ID_P_HOST_NOTIFY BIT(29)
#define ID_P_NO_RXDMA BIT(30) /* HW forbids RXDMA sometimes */
#define ID_P_PM_BLOCKED BIT(31)
#define ID_P_MASK GENMASK(31, 28)
@@ -141,7 +139,6 @@ struct rcar_i2c_priv {
enum dma_data_direction dma_direction;
struct reset_control *rstc;
- bool atomic_xfer;
int irq;
struct i2c_client *host_notify_client;
@@ -160,6 +157,11 @@ static u32 rcar_i2c_read(struct rcar_i2c_priv *priv, int reg)
return readl(priv->io + reg);
}
+static void rcar_i2c_clear_irq(struct rcar_i2c_priv *priv, u32 val)
+{
+ writel(~val & 0x7f, priv->io + ICMSR);
+}
+
static int rcar_i2c_get_scl(struct i2c_adapter *adap)
{
struct rcar_i2c_priv *priv = i2c_get_adapdata(adap);
@@ -330,41 +332,46 @@ scgd_find:
return 0;
}
+/*
+ * We don't have a test case but the HW engineers say that the write order of
+ * ICMSR and ICMCR depends on whether we issue START or REP_START. So, ICMSR
+ * handling is outside of this function. First messages clear ICMSR before this
+ * function, interrupt handlers clear the relevant bits after this function.
+ */
static void rcar_i2c_prepare_msg(struct rcar_i2c_priv *priv)
{
int read = !!rcar_i2c_is_recv(priv);
+ bool rep_start = !(priv->flags & ID_REP_AFTER_RD);
priv->pos = 0;
+ priv->flags &= ID_P_MASK;
+
if (priv->msgs_left == 1)
priv->flags |= ID_LAST_MSG;
rcar_i2c_write(priv, ICMAR, i2c_8bit_addr_from_msg(priv->msg));
- if (!priv->atomic_xfer)
+ if (priv->flags & ID_P_NOT_ATOMIC)
rcar_i2c_write(priv, ICMIER, read ? RCAR_IRQ_RECV : RCAR_IRQ_SEND);
- /*
- * We don't have a test case but the HW engineers say that the write order
- * of ICMSR and ICMCR depends on whether we issue START or REP_START. Since
- * it didn't cause a drawback for me, let's rather be safe than sorry.
- */
- if (priv->flags & ID_FIRST_MSG) {
- rcar_i2c_write(priv, ICMSR, 0);
+ if (rep_start)
rcar_i2c_write(priv, ICMCR, RCAR_BUS_PHASE_START);
- } else {
- if (priv->flags & ID_P_REP_AFTER_RD)
- priv->flags &= ~ID_P_REP_AFTER_RD;
- else
- rcar_i2c_write(priv, ICMCR, RCAR_BUS_PHASE_START);
- rcar_i2c_write(priv, ICMSR, 0);
- }
+}
+
+static void rcar_i2c_first_msg(struct rcar_i2c_priv *priv,
+ struct i2c_msg *msgs, int num)
+{
+ priv->msg = msgs;
+ priv->msgs_left = num;
+ rcar_i2c_write(priv, ICMSR, 0); /* must be before preparing msg */
+ rcar_i2c_prepare_msg(priv);
}
static void rcar_i2c_next_msg(struct rcar_i2c_priv *priv)
{
priv->msg++;
priv->msgs_left--;
- priv->flags &= ID_P_MASK;
rcar_i2c_prepare_msg(priv);
+ /* ICMSR handling must come afterwards in the irq handler */
}
static void rcar_i2c_cleanup_dma(struct rcar_i2c_priv *priv, bool terminate)
@@ -413,7 +420,7 @@ static bool rcar_i2c_dma(struct rcar_i2c_priv *priv)
int len;
/* Do various checks to see if DMA is feasible at all */
- if (priv->atomic_xfer || IS_ERR(chan) || msg->len < RCAR_MIN_DMA_LEN ||
+ if (!(priv->flags & ID_P_NOT_ATOMIC) || IS_ERR(chan) || msg->len < RCAR_MIN_DMA_LEN ||
!(msg->flags & I2C_M_DMA_SAFE) || (read && priv->flags & ID_P_NO_RXDMA))
return false;
@@ -475,11 +482,15 @@ static bool rcar_i2c_dma(struct rcar_i2c_priv *priv)
static void rcar_i2c_irq_send(struct rcar_i2c_priv *priv, u32 msr)
{
struct i2c_msg *msg = priv->msg;
+ u32 irqs_to_clear = MDE;
/* FIXME: sometimes, unknown interrupt happened. Do nothing */
if (!(msr & MDE))
return;
+ if (msr & MAT)
+ irqs_to_clear |= MAT;
+
/* Check if DMA can be enabled and take over */
if (priv->pos == 1 && rcar_i2c_dma(priv))
return;
@@ -503,31 +514,32 @@ static void rcar_i2c_irq_send(struct rcar_i2c_priv *priv, u32 msr)
* [ICRXTX] -> [SHIFT] -> [I2C bus]
*/
- if (priv->flags & ID_LAST_MSG) {
+ if (priv->flags & ID_LAST_MSG)
/*
* If current msg is the _LAST_ msg,
* prepare stop condition here.
* ID_DONE will be set on STOP irq.
*/
rcar_i2c_write(priv, ICMCR, RCAR_BUS_PHASE_STOP);
- } else {
+ else
rcar_i2c_next_msg(priv);
- return;
- }
}
- rcar_i2c_write(priv, ICMSR, RCAR_IRQ_ACK_SEND);
+ rcar_i2c_clear_irq(priv, irqs_to_clear);
}
static void rcar_i2c_irq_recv(struct rcar_i2c_priv *priv, u32 msr)
{
struct i2c_msg *msg = priv->msg;
+ bool recv_len_init = priv->pos == 0 && msg->flags & I2C_M_RECV_LEN;
+ u32 irqs_to_clear = MDR;
/* FIXME: sometimes, unknown interrupt happened. Do nothing */
if (!(msr & MDR))
return;
if (msr & MAT) {
+ irqs_to_clear |= MAT;
/*
* Address transfer phase finished, but no data at this point.
* Try to use DMA to receive data.
@@ -535,24 +547,41 @@ static void rcar_i2c_irq_recv(struct rcar_i2c_priv *priv, u32 msr)
rcar_i2c_dma(priv);
} else if (priv->pos < msg->len) {
/* get received data */
- msg->buf[priv->pos] = rcar_i2c_read(priv, ICRXTX);
+ u8 data = rcar_i2c_read(priv, ICRXTX);
+
+ msg->buf[priv->pos] = data;
+ if (recv_len_init) {
+ if (data == 0 || data > I2C_SMBUS_BLOCK_MAX) {
+ priv->flags |= ID_DONE | ID_EPROTO;
+ return;
+ }
+ msg->len += msg->buf[0];
+ /* Enough data for DMA? */
+ if (rcar_i2c_dma(priv))
+ return;
+ /* new length after RECV_LEN now properly initialized */
+ recv_len_init = false;
+ }
priv->pos++;
}
- /* If next received data is the _LAST_, go to new phase. */
- if (priv->pos + 1 == msg->len) {
+ /*
+ * If next received data is the _LAST_ and we are not waiting for a new
+ * length because of RECV_LEN, then go to a new phase.
+ */
+ if (priv->pos + 1 == msg->len && !recv_len_init) {
if (priv->flags & ID_LAST_MSG) {
rcar_i2c_write(priv, ICMCR, RCAR_BUS_PHASE_STOP);
} else {
rcar_i2c_write(priv, ICMCR, RCAR_BUS_PHASE_START);
- priv->flags |= ID_P_REP_AFTER_RD;
+ priv->flags |= ID_REP_AFTER_RD;
}
}
if (priv->pos == msg->len && !(priv->flags & ID_LAST_MSG))
rcar_i2c_next_msg(priv);
- else
- rcar_i2c_write(priv, ICMSR, RCAR_IRQ_ACK_RECV);
+
+ rcar_i2c_clear_irq(priv, irqs_to_clear);
}
static bool rcar_i2c_slave_irq(struct rcar_i2c_priv *priv)
@@ -641,7 +670,7 @@ static irqreturn_t rcar_i2c_irq(int irq, struct rcar_i2c_priv *priv, u32 msr)
/* Nack */
if (msr & MNR) {
/* HW automatically sends STOP after received NACK */
- if (!priv->atomic_xfer)
+ if (priv->flags & ID_P_NOT_ATOMIC)
rcar_i2c_write(priv, ICMIER, RCAR_IRQ_STOP);
priv->flags |= ID_NACK;
goto out;
@@ -663,7 +692,7 @@ out:
if (priv->flags & ID_DONE) {
rcar_i2c_write(priv, ICMIER, 0);
rcar_i2c_write(priv, ICMSR, 0);
- if (!priv->atomic_xfer)
+ if (priv->flags & ID_P_NOT_ATOMIC)
wake_up(&priv->wait);
}
@@ -676,12 +705,12 @@ static irqreturn_t rcar_i2c_gen2_irq(int irq, void *ptr)
u32 msr;
/* Clear START or STOP immediately, except for REPSTART after read */
- if (likely(!(priv->flags & ID_P_REP_AFTER_RD)))
+ if (likely(!(priv->flags & ID_REP_AFTER_RD)))
rcar_i2c_write(priv, ICMCR, RCAR_BUS_PHASE_DATA);
/* Only handle interrupts that are currently enabled */
msr = rcar_i2c_read(priv, ICMSR);
- if (!priv->atomic_xfer)
+ if (priv->flags & ID_P_NOT_ATOMIC)
msr &= rcar_i2c_read(priv, ICMIER);
return rcar_i2c_irq(irq, priv, msr);
@@ -694,14 +723,14 @@ static irqreturn_t rcar_i2c_gen3_irq(int irq, void *ptr)
/* Only handle interrupts that are currently enabled */
msr = rcar_i2c_read(priv, ICMSR);
- if (!priv->atomic_xfer)
+ if (priv->flags & ID_P_NOT_ATOMIC)
msr &= rcar_i2c_read(priv, ICMIER);
/*
* Clear START or STOP immediately, except for REPSTART after read or
* if a spurious interrupt was detected.
*/
- if (likely(!(priv->flags & ID_P_REP_AFTER_RD) && msr))
+ if (likely(!(priv->flags & ID_REP_AFTER_RD) && msr))
rcar_i2c_write(priv, ICMCR, RCAR_BUS_PHASE_DATA);
return rcar_i2c_irq(irq, priv, msr);
@@ -803,7 +832,7 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
int i, ret;
long time_left;
- priv->atomic_xfer = false;
+ priv->flags |= ID_P_NOT_ATOMIC;
pm_runtime_get_sync(dev);
@@ -827,11 +856,7 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
for (i = 0; i < num; i++)
rcar_i2c_request_dma(priv, msgs + i);
- /* init first message */
- priv->msg = msgs;
- priv->msgs_left = num;
- priv->flags = (priv->flags & ID_P_MASK) | ID_FIRST_MSG;
- rcar_i2c_prepare_msg(priv);
+ rcar_i2c_first_msg(priv, msgs, num);
time_left = wait_event_timeout(priv->wait, priv->flags & ID_DONE,
num * adap->timeout);
@@ -847,6 +872,8 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
ret = -ENXIO;
} else if (priv->flags & ID_ARBLOST) {
ret = -EAGAIN;
+ } else if (priv->flags & ID_EPROTO) {
+ ret = -EPROTO;
} else {
ret = num - priv->msgs_left; /* The number of transfer */
}
@@ -869,7 +896,7 @@ static int rcar_i2c_master_xfer_atomic(struct i2c_adapter *adap,
bool time_left;
int ret;
- priv->atomic_xfer = true;
+ priv->flags &= ~ID_P_NOT_ATOMIC;
pm_runtime_get_sync(dev);
@@ -879,12 +906,7 @@ static int rcar_i2c_master_xfer_atomic(struct i2c_adapter *adap,
goto out;
rcar_i2c_init(priv);
-
- /* init first message */
- priv->msg = msgs;
- priv->msgs_left = num;
- priv->flags = (priv->flags & ID_P_MASK) | ID_FIRST_MSG;
- rcar_i2c_prepare_msg(priv);
+ rcar_i2c_first_msg(priv, msgs, num);
j = jiffies + num * adap->timeout;
do {
@@ -909,6 +931,8 @@ static int rcar_i2c_master_xfer_atomic(struct i2c_adapter *adap,
ret = -ENXIO;
} else if (priv->flags & ID_ARBLOST) {
ret = -EAGAIN;
+ } else if (priv->flags & ID_EPROTO) {
+ ret = -EPROTO;
} else {
ret = num - priv->msgs_left; /* The number of transfer */
}
@@ -975,7 +999,7 @@ static u32 rcar_i2c_func(struct i2c_adapter *adap)
* I2C_M_IGNORE_NAK (automatically sends STOP after NAK)
*/
u32 func = I2C_FUNC_I2C | I2C_FUNC_SLAVE |
- (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
+ (I2C_FUNC_SMBUS_EMUL_ALL & ~I2C_FUNC_SMBUS_QUICK);
if (priv->flags & ID_P_HOST_NOTIFY)
func |= I2C_FUNC_SMBUS_HOST_NOTIFY;
@@ -1063,8 +1087,10 @@ static int rcar_i2c_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
ret = rcar_i2c_clock_calculate(priv);
- if (ret < 0)
- goto out_pm_put;
+ if (ret < 0) {
+ pm_runtime_put(dev);
+ goto out_pm_disable;
+ }
rcar_i2c_write(priv, ICSAR, 0); /* Gen2: must be 0 if not using slave */
@@ -1093,19 +1119,19 @@ static int rcar_i2c_probe(struct platform_device *pdev)
ret = platform_get_irq(pdev, 0);
if (ret < 0)
- goto out_pm_disable;
+ goto out_pm_put;
priv->irq = ret;
ret = devm_request_irq(dev, priv->irq, irqhandler, irqflags, dev_name(dev), priv);
if (ret < 0) {
dev_err(dev, "cannot get irq %d\n", priv->irq);
- goto out_pm_disable;
+ goto out_pm_put;
}
platform_set_drvdata(pdev, priv);
ret = i2c_add_numbered_adapter(adap);
if (ret < 0)
- goto out_pm_disable;
+ goto out_pm_put;
if (priv->flags & ID_P_HOST_NOTIFY) {
priv->host_notify_client = i2c_new_slave_host_notify_device(adap);
@@ -1122,7 +1148,8 @@ static int rcar_i2c_probe(struct platform_device *pdev)
out_del_device:
i2c_del_adapter(&priv->adap);
out_pm_put:
- pm_runtime_put(dev);
+ if (priv->flags & ID_P_PM_BLOCKED)
+ pm_runtime_put(dev);
out_pm_disable:
pm_runtime_disable(dev);
return ret;
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index ffefe3c482e9..9a1c3f8b7048 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -78,24 +78,23 @@ struct xiic_i2c {
bool singlemaster;
};
-
#define XIIC_MSB_OFFSET 0
-#define XIIC_REG_OFFSET (0x100+XIIC_MSB_OFFSET)
+#define XIIC_REG_OFFSET (0x100 + XIIC_MSB_OFFSET)
/*
* Register offsets in bytes from RegisterBase. Three is added to the
* base offset to access LSB (IBM style) of the word
*/
-#define XIIC_CR_REG_OFFSET (0x00+XIIC_REG_OFFSET) /* Control Register */
-#define XIIC_SR_REG_OFFSET (0x04+XIIC_REG_OFFSET) /* Status Register */
-#define XIIC_DTR_REG_OFFSET (0x08+XIIC_REG_OFFSET) /* Data Tx Register */
-#define XIIC_DRR_REG_OFFSET (0x0C+XIIC_REG_OFFSET) /* Data Rx Register */
-#define XIIC_ADR_REG_OFFSET (0x10+XIIC_REG_OFFSET) /* Address Register */
-#define XIIC_TFO_REG_OFFSET (0x14+XIIC_REG_OFFSET) /* Tx FIFO Occupancy */
-#define XIIC_RFO_REG_OFFSET (0x18+XIIC_REG_OFFSET) /* Rx FIFO Occupancy */
-#define XIIC_TBA_REG_OFFSET (0x1C+XIIC_REG_OFFSET) /* 10 Bit Address reg */
-#define XIIC_RFD_REG_OFFSET (0x20+XIIC_REG_OFFSET) /* Rx FIFO Depth reg */
-#define XIIC_GPO_REG_OFFSET (0x24+XIIC_REG_OFFSET) /* Output Register */
+#define XIIC_CR_REG_OFFSET (0x00 + XIIC_REG_OFFSET) /* Control Register */
+#define XIIC_SR_REG_OFFSET (0x04 + XIIC_REG_OFFSET) /* Status Register */
+#define XIIC_DTR_REG_OFFSET (0x08 + XIIC_REG_OFFSET) /* Data Tx Register */
+#define XIIC_DRR_REG_OFFSET (0x0C + XIIC_REG_OFFSET) /* Data Rx Register */
+#define XIIC_ADR_REG_OFFSET (0x10 + XIIC_REG_OFFSET) /* Address Register */
+#define XIIC_TFO_REG_OFFSET (0x14 + XIIC_REG_OFFSET) /* Tx FIFO Occupancy */
+#define XIIC_RFO_REG_OFFSET (0x18 + XIIC_REG_OFFSET) /* Rx FIFO Occupancy */
+#define XIIC_TBA_REG_OFFSET (0x1C + XIIC_REG_OFFSET) /* 10 Bit Address reg */
+#define XIIC_RFD_REG_OFFSET (0x20 + XIIC_REG_OFFSET) /* Rx FIFO Depth reg */
+#define XIIC_GPO_REG_OFFSET (0x24 + XIIC_REG_OFFSET) /* Output Register */
/* Control Register masks */
#define XIIC_CR_ENABLE_DEVICE_MASK 0x01 /* Device enable = 1 */
@@ -233,18 +232,21 @@ static inline int xiic_getreg32(struct xiic_i2c *i2c, int reg)
static inline void xiic_irq_dis(struct xiic_i2c *i2c, u32 mask)
{
u32 ier = xiic_getreg32(i2c, XIIC_IIER_OFFSET);
+
xiic_setreg32(i2c, XIIC_IIER_OFFSET, ier & ~mask);
}
static inline void xiic_irq_en(struct xiic_i2c *i2c, u32 mask)
{
u32 ier = xiic_getreg32(i2c, XIIC_IIER_OFFSET);
+
xiic_setreg32(i2c, XIIC_IIER_OFFSET, ier | mask);
}
static inline void xiic_irq_clr(struct xiic_i2c *i2c, u32 mask)
{
u32 isr = xiic_getreg32(i2c, XIIC_IISR_OFFSET);
+
xiic_setreg32(i2c, XIIC_IISR_OFFSET, isr & mask);
}
@@ -355,7 +357,8 @@ static void xiic_fill_tx_fifo(struct xiic_i2c *i2c)
while (len--) {
u16 data = i2c->tx_msg->buf[i2c->tx_pos++];
- if ((xiic_tx_space(i2c) == 0) && (i2c->nmsgs == 1)) {
+
+ if (!xiic_tx_space(i2c) && i2c->nmsgs == 1) {
/* last message in transfer -> STOP */
data |= XIIC_TX_DYN_STOP_MASK;
dev_dbg(i2c->adap.dev.parent, "%s TX STOP\n", __func__);
@@ -381,6 +384,7 @@ static irqreturn_t xiic_process(int irq, void *dev_id)
int xfer_more = 0;
int wakeup_req = 0;
int wakeup_code = 0;
+ int ret;
/* Get the interrupt Status from the IPIF. There is no clearing of
* interrupts in the IPIF. Interrupts must be cleared at the source.
@@ -401,8 +405,8 @@ static irqreturn_t xiic_process(int irq, void *dev_id)
/* Service requesting interrupt */
if ((pend & XIIC_INTR_ARB_LOST_MASK) ||
- ((pend & XIIC_INTR_TX_ERROR_MASK) &&
- !(pend & XIIC_INTR_RX_FULL_MASK))) {
+ ((pend & XIIC_INTR_TX_ERROR_MASK) &&
+ !(pend & XIIC_INTR_RX_FULL_MASK))) {
/* bus arbritration lost, or...
* Transmit error _OR_ RX completed
* if this happens when RX_FULL is not set
@@ -415,7 +419,9 @@ static irqreturn_t xiic_process(int irq, void *dev_id)
* fifos and the next message is a TX with len 0 (only addr)
* reset the IP instead of just flush fifos
*/
- xiic_reinit(i2c);
+ ret = xiic_reinit(i2c);
+ if (!ret)
+ dev_dbg(i2c->adap.dev.parent, "reinit failed\n");
if (i2c->rx_msg) {
wakeup_req = 1;
@@ -462,24 +468,6 @@ static irqreturn_t xiic_process(int irq, void *dev_id)
}
}
}
- if (pend & XIIC_INTR_BNB_MASK) {
- /* IIC bus has transitioned to not busy */
- clr |= XIIC_INTR_BNB_MASK;
-
- /* The bus is not busy, disable BusNotBusy interrupt */
- xiic_irq_dis(i2c, XIIC_INTR_BNB_MASK);
-
- if (!i2c->tx_msg)
- goto out;
-
- wakeup_req = 1;
-
- if (i2c->nmsgs == 1 && !i2c->rx_msg &&
- xiic_tx_space(i2c) == 0)
- wakeup_code = STATE_DONE;
- else
- wakeup_code = STATE_ERROR;
- }
if (pend & (XIIC_INTR_TX_EMPTY_MASK | XIIC_INTR_TX_HALF_MASK)) {
/* Transmit register/FIFO is empty or ½ empty */
@@ -516,6 +504,26 @@ static irqreturn_t xiic_process(int irq, void *dev_id)
*/
xiic_irq_dis(i2c, XIIC_INTR_TX_HALF_MASK);
}
+
+ if (pend & XIIC_INTR_BNB_MASK) {
+ /* IIC bus has transitioned to not busy */
+ clr |= XIIC_INTR_BNB_MASK;
+
+ /* The bus is not busy, disable BusNotBusy interrupt */
+ xiic_irq_dis(i2c, XIIC_INTR_BNB_MASK);
+
+ if (!i2c->tx_msg)
+ goto out;
+
+ wakeup_req = 1;
+
+ if (i2c->nmsgs == 1 && !i2c->rx_msg &&
+ xiic_tx_space(i2c) == 0)
+ wakeup_code = STATE_DONE;
+ else
+ wakeup_code = STATE_ERROR;
+ }
+
out:
dev_dbg(i2c->adap.dev.parent, "%s clr: 0x%x\n", __func__, clr);
@@ -570,7 +578,7 @@ static int xiic_busy(struct xiic_i2c *i2c)
static void xiic_start_recv(struct xiic_i2c *i2c)
{
- u8 rx_watermark;
+ u16 rx_watermark;
struct i2c_msg *msg = i2c->rx_msg = i2c->tx_msg;
/* Clear and enable Rx full interrupt. */
@@ -585,7 +593,7 @@ static void xiic_start_recv(struct xiic_i2c *i2c)
rx_watermark = msg->len;
if (rx_watermark > IIC_RX_FIFO_DEPTH)
rx_watermark = IIC_RX_FIFO_DEPTH;
- xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET, rx_watermark - 1);
+ xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET, (u8)(rx_watermark - 1));
if (!(msg->flags & I2C_M_NOSTART))
/* write the address */
@@ -638,6 +646,7 @@ static void xiic_start_send(struct xiic_i2c *i2c)
static void __xiic_start_xfer(struct xiic_i2c *i2c)
{
int fifo_space = xiic_tx_fifo_space(i2c);
+
dev_dbg(i2c->adap.dev.parent, "%s entry, msg: %p, fifos space: %d\n",
__func__, i2c->tx_msg, fifo_space);
@@ -739,7 +748,6 @@ static const struct i2c_adapter xiic_adapter = {
.quirks = &xiic_quirks,
};
-
static int xiic_i2c_probe(struct platform_device *pdev)
{
struct xiic_i2c *i2c;
@@ -899,6 +907,7 @@ static const struct dev_pm_ops xiic_dev_pm_ops = {
SET_RUNTIME_PM_OPS(xiic_i2c_runtime_suspend,
xiic_i2c_runtime_resume, NULL)
};
+
static struct platform_driver xiic_i2c_driver = {
.probe = xiic_i2c_probe,
.remove = xiic_i2c_remove,
@@ -914,4 +923,3 @@ module_platform_driver(xiic_i2c_driver);
MODULE_AUTHOR("info@mocean-labs.com");
MODULE_DESCRIPTION("Xilinx I2C bus driver");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:"DRIVER_NAME);
diff --git a/drivers/i3c/master/mipi-i3c-hci/core.c b/drivers/i3c/master/mipi-i3c-hci/core.c
index 8c01123dc4ed..6aef5ce43cc1 100644
--- a/drivers/i3c/master/mipi-i3c-hci/core.c
+++ b/drivers/i3c/master/mipi-i3c-hci/core.c
@@ -768,13 +768,8 @@ static int i3c_hci_probe(struct platform_device *pdev)
static int i3c_hci_remove(struct platform_device *pdev)
{
struct i3c_hci *hci = platform_get_drvdata(pdev);
- int ret;
- ret = i3c_master_unregister(&hci->master);
- if (ret)
- return ret;
-
- return 0;
+ return i3c_master_unregister(&hci->master);
}
static const __maybe_unused struct of_device_id i3c_hci_of_match[] = {
diff --git a/drivers/i3c/master/svc-i3c-master.c b/drivers/i3c/master/svc-i3c-master.c
index 7550dad64ecf..d6e9ed74cdcf 100644
--- a/drivers/i3c/master/svc-i3c-master.c
+++ b/drivers/i3c/master/svc-i3c-master.c
@@ -1597,12 +1597,11 @@ static int __maybe_unused svc_i3c_runtime_suspend(struct device *dev)
static int __maybe_unused svc_i3c_runtime_resume(struct device *dev)
{
struct svc_i3c_master *master = dev_get_drvdata(dev);
- int ret = 0;
pinctrl_pm_select_default_state(dev);
svc_i3c_master_prepare_clks(master);
- return ret;
+ return 0;
}
static const struct dev_pm_ops svc_i3c_pm_ops = {
diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
index eac3f02662ae..b53f010f3e40 100644
--- a/drivers/iio/accel/Kconfig
+++ b/drivers/iio/accel/Kconfig
@@ -290,7 +290,6 @@ config DA311
config DMARD06
tristate "Domintech DMARD06 Digital Accelerometer Driver"
- depends on OF || COMPILE_TEST
depends on I2C
help
Say yes here to build support for the Domintech low-g tri-axial
diff --git a/drivers/iio/accel/adxl355_core.c b/drivers/iio/accel/adxl355_core.c
index e9c10c8c32f0..7561399daef3 100644
--- a/drivers/iio/accel/adxl355_core.c
+++ b/drivers/iio/accel/adxl355_core.c
@@ -18,7 +18,7 @@
#include <linux/math64.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
-#include <linux/of_irq.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/units.h>
@@ -745,10 +745,7 @@ int adxl355_core_probe(struct device *dev, struct regmap *regmap,
return ret;
}
- /*
- * TODO: Would be good to move it to the generic version.
- */
- irq = of_irq_get_byname(dev->of_node, "DRDY");
+ irq = fwnode_irq_get_byname(dev_fwnode(dev), "DRDY");
if (irq > 0) {
ret = adxl355_probe_trigger(indio_dev, irq);
if (ret)
diff --git a/drivers/iio/accel/adxl367.c b/drivers/iio/accel/adxl367.c
index 62960134ea19..0289ed8cf2c6 100644
--- a/drivers/iio/accel/adxl367.c
+++ b/drivers/iio/accel/adxl367.c
@@ -1567,7 +1567,6 @@ int adxl367_probe(struct device *dev, const struct adxl367_ops *ops,
return ret;
ret = devm_iio_kfifo_buffer_setup_ext(st->dev, indio_dev,
- INDIO_BUFFER_SOFTWARE,
&adxl367_buffer_ops,
adxl367_fifo_attributes);
if (ret)
diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c
index 7516d7dde1af..57e8a8350cd1 100644
--- a/drivers/iio/accel/bmc150-accel-core.c
+++ b/drivers/iio/accel/bmc150-accel-core.c
@@ -1525,7 +1525,7 @@ static int bmc150_accel_buffer_postenable(struct iio_dev *indio_dev)
struct bmc150_accel_data *data = iio_priv(indio_dev);
int ret = 0;
- if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED)
+ if (iio_device_get_current_mode(indio_dev) == INDIO_BUFFER_TRIGGERED)
return 0;
mutex_lock(&data->mutex);
@@ -1557,7 +1557,7 @@ static int bmc150_accel_buffer_predisable(struct iio_dev *indio_dev)
{
struct bmc150_accel_data *data = iio_priv(indio_dev);
- if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED)
+ if (iio_device_get_current_mode(indio_dev) == INDIO_BUFFER_TRIGGERED)
return 0;
mutex_lock(&data->mutex);
diff --git a/drivers/iio/accel/dmard09.c b/drivers/iio/accel/dmard09.c
index 53ab6078cb7f..cb0246ca72f3 100644
--- a/drivers/iio/accel/dmard09.c
+++ b/drivers/iio/accel/dmard09.c
@@ -24,7 +24,7 @@
#define DMARD09_AXIS_Y 1
#define DMARD09_AXIS_Z 2
#define DMARD09_AXIS_X_OFFSET ((DMARD09_AXIS_X + 1) * 2)
-#define DMARD09_AXIS_Y_OFFSET ((DMARD09_AXIS_Y + 1 )* 2)
+#define DMARD09_AXIS_Y_OFFSET ((DMARD09_AXIS_Y + 1) * 2)
#define DMARD09_AXIS_Z_OFFSET ((DMARD09_AXIS_Z + 1) * 2)
struct dmard09_data {
diff --git a/drivers/iio/accel/fxls8962af-core.c b/drivers/iio/accel/fxls8962af-core.c
index a9d2f10d5d45..8874d6d61725 100644
--- a/drivers/iio/accel/fxls8962af-core.c
+++ b/drivers/iio/accel/fxls8962af-core.c
@@ -1217,7 +1217,6 @@ int fxls8962af_core_probe(struct device *dev, struct regmap *regmap, int irq)
return ret;
ret = devm_iio_kfifo_buffer_setup(dev, indio_dev,
- INDIO_BUFFER_SOFTWARE,
&fxls8962af_buffer_ops);
if (ret)
return ret;
diff --git a/drivers/iio/accel/kxsd9-spi.c b/drivers/iio/accel/kxsd9-spi.c
index ec17e35e573e..b7b5af45429e 100644
--- a/drivers/iio/accel/kxsd9-spi.c
+++ b/drivers/iio/accel/kxsd9-spi.c
@@ -44,8 +44,8 @@ static const struct spi_device_id kxsd9_spi_id[] = {
MODULE_DEVICE_TABLE(spi, kxsd9_spi_id);
static const struct of_device_id kxsd9_of_match[] = {
- { .compatible = "kionix,kxsd9" },
- { },
+ { .compatible = "kionix,kxsd9" },
+ { }
};
MODULE_DEVICE_TABLE(of, kxsd9_of_match);
diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c
index 9c02c681c84c..912a447e6310 100644
--- a/drivers/iio/accel/mma8452.c
+++ b/drivers/iio/accel/mma8452.c
@@ -166,6 +166,7 @@ static const struct mma8452_event_regs trans_ev_regs = {
/**
* struct mma_chip_info - chip specific data
+ * @name: part number of device reported via 'name' attr
* @chip_id: WHO_AM_I register's value
* @channels: struct iio_chan_spec matching the device's
* capabilities
diff --git a/drivers/iio/accel/sca3000.c b/drivers/iio/accel/sca3000.c
index 83c81072511e..29a68a7d34cd 100644
--- a/drivers/iio/accel/sca3000.c
+++ b/drivers/iio/accel/sca3000.c
@@ -1474,7 +1474,6 @@ static int sca3000_probe(struct spi_device *spi)
indio_dev->modes = INDIO_DIRECT_MODE;
ret = devm_iio_kfifo_buffer_setup(&spi->dev, indio_dev,
- INDIO_BUFFER_SOFTWARE,
&sca3000_ring_setup_ops);
if (ret)
return ret;
diff --git a/drivers/iio/accel/ssp_accel_sensor.c b/drivers/iio/accel/ssp_accel_sensor.c
index a1164b439f41..7ca9d0d543e0 100644
--- a/drivers/iio/accel/ssp_accel_sensor.c
+++ b/drivers/iio/accel/ssp_accel_sensor.c
@@ -113,7 +113,6 @@ static int ssp_accel_probe(struct platform_device *pdev)
indio_dev->available_scan_masks = ssp_accel_scan_mask;
ret = devm_iio_kfifo_buffer_setup(&pdev->dev, indio_dev,
- INDIO_BUFFER_SOFTWARE,
&ssp_accel_buffer_ops);
if (ret)
return ret;
diff --git a/drivers/iio/accel/st_accel.h b/drivers/iio/accel/st_accel.h
index 00e056c21bfc..5b0f54e33d9e 100644
--- a/drivers/iio/accel/st_accel.h
+++ b/drivers/iio/accel/st_accel.h
@@ -14,32 +14,6 @@
#include <linux/types.h>
#include <linux/iio/common/st_sensors.h>
-enum st_accel_type {
- LSM303DLH,
- LSM303DLHC,
- LIS3DH,
- LSM330D,
- LSM330DL,
- LSM330DLC,
- LIS331DLH,
- LSM303DL,
- LSM303DLM,
- LSM330,
- LSM303AGR,
- LIS2DH12,
- LIS3L02DQ,
- LNG2DM,
- H3LIS331DL,
- LIS331DL,
- LIS3LV02DL,
- LIS2DW12,
- LIS3DHH,
- LIS2DE12,
- LIS2HH12,
- SC7A20,
- ST_ACCEL_MAX,
-};
-
#define H3LIS331DL_ACCEL_DEV_NAME "h3lis331dl_accel"
#define LIS3LV02DL_ACCEL_DEV_NAME "lis3lv02dl_accel"
#define LSM303DLHC_ACCEL_DEV_NAME "lsm303dlhc_accel"
@@ -62,8 +36,10 @@ enum st_accel_type {
#define LIS3DE_ACCEL_DEV_NAME "lis3de"
#define LIS2DE12_ACCEL_DEV_NAME "lis2de12"
#define LIS2HH12_ACCEL_DEV_NAME "lis2hh12"
+#define LIS302DL_ACCEL_DEV_NAME "lis302dl"
#define SC7A20_ACCEL_DEV_NAME "sc7a20"
+
#ifdef CONFIG_IIO_BUFFER
int st_accel_allocate_ring(struct iio_dev *indio_dev);
int st_accel_trig_set_state(struct iio_trigger *trig, bool state);
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
index 5c5da6fdb490..c8c8eb15c34e 100644
--- a/drivers/iio/accel/st_accel_core.c
+++ b/drivers/iio/accel/st_accel_core.c
@@ -444,6 +444,7 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
.sensors_supported = {
[0] = LIS331DL_ACCEL_DEV_NAME,
+ [1] = LIS302DL_ACCEL_DEV_NAME,
},
.ch = (struct iio_chan_spec *)st_accel_8bit_channels,
.odr = {
@@ -1209,28 +1210,21 @@ read_error:
static int st_accel_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int val, int val2, long mask)
{
- int err;
-
switch (mask) {
case IIO_CHAN_INFO_SCALE: {
int gain;
gain = val * 1000000 + val2;
- err = st_sensors_set_fullscale_by_gain(indio_dev, gain);
- break;
+ return st_sensors_set_fullscale_by_gain(indio_dev, gain);
}
case IIO_CHAN_INFO_SAMP_FREQ:
if (val2)
return -EINVAL;
- mutex_lock(&indio_dev->mlock);
- err = st_sensors_set_odr(indio_dev, val);
- mutex_unlock(&indio_dev->mlock);
- return err;
+
+ return st_sensors_set_odr(indio_dev, val);
default:
return -EINVAL;
}
-
- return err;
}
static ST_SENSORS_DEV_ATTR_SAMP_FREQ_AVAIL();
diff --git a/drivers/iio/accel/st_accel_i2c.c b/drivers/iio/accel/st_accel_i2c.c
index 96adc4344f4a..45ee0ddc133c 100644
--- a/drivers/iio/accel/st_accel_i2c.c
+++ b/drivers/iio/accel/st_accel_i2c.c
@@ -108,6 +108,10 @@ static const struct of_device_id st_accel_of_match[] = {
.data = LIS2HH12_ACCEL_DEV_NAME,
},
{
+ .compatible = "st,lis302dl",
+ .data = LIS302DL_ACCEL_DEV_NAME,
+ },
+ {
.compatible = "silan,sc7a20",
.data = SC7A20_ACCEL_DEV_NAME,
},
@@ -146,6 +150,7 @@ static const struct i2c_device_id st_accel_id_table[] = {
{ LIS3DE_ACCEL_DEV_NAME },
{ LIS2DE12_ACCEL_DEV_NAME },
{ LIS2HH12_ACCEL_DEV_NAME },
+ { LIS302DL_ACCEL_DEV_NAME },
{ SC7A20_ACCEL_DEV_NAME },
{},
};
diff --git a/drivers/iio/accel/st_accel_spi.c b/drivers/iio/accel/st_accel_spi.c
index 108b63d0146c..6c0917750288 100644
--- a/drivers/iio/accel/st_accel_spi.c
+++ b/drivers/iio/accel/st_accel_spi.c
@@ -92,6 +92,10 @@ static const struct of_device_id st_accel_of_match[] = {
.compatible = "st,lis3de",
.data = LIS3DE_ACCEL_DEV_NAME,
},
+ {
+ .compatible = "st,lis302dl",
+ .data = LIS302DL_ACCEL_DEV_NAME,
+ },
{}
};
MODULE_DEVICE_TABLE(of, st_accel_of_match);
@@ -147,6 +151,7 @@ static const struct spi_device_id st_accel_id_table[] = {
{ LIS2DW12_ACCEL_DEV_NAME },
{ LIS3DHH_ACCEL_DEV_NAME },
{ LIS3DE_ACCEL_DEV_NAME },
+ { LIS302DL_ACCEL_DEV_NAME },
{},
};
MODULE_DEVICE_TABLE(spi, st_accel_id_table);
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 71ab0a06aa82..48ace7412874 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -910,7 +910,7 @@ config ROCKCHIP_SARADC
config RZG2L_ADC
tristate "Renesas RZ/G2L ADC driver"
- depends on ARCH_R9A07G044 || COMPILE_TEST
+ depends on ARCH_RZG2L || COMPILE_TEST
help
Say yes here to build support for the ADC found in Renesas
RZ/G2L family.
diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c
index c47ead15f6e5..c5b785d8b241 100644
--- a/drivers/iio/adc/ad7124.c
+++ b/drivers/iio/adc/ad7124.c
@@ -43,6 +43,8 @@
#define AD7124_STATUS_POR_FLAG_MSK BIT(4)
/* AD7124_ADC_CONTROL */
+#define AD7124_ADC_STATUS_EN_MSK BIT(10)
+#define AD7124_ADC_STATUS_EN(x) FIELD_PREP(AD7124_ADC_STATUS_EN_MSK, x)
#define AD7124_ADC_CTRL_REF_EN_MSK BIT(8)
#define AD7124_ADC_CTRL_REF_EN(x) FIELD_PREP(AD7124_ADC_CTRL_REF_EN_MSK, x)
#define AD7124_ADC_CTRL_PWR_MSK GENMASK(7, 6)
@@ -188,7 +190,6 @@ static const struct iio_chan_spec ad7124_channel_template = {
.sign = 'u',
.realbits = 24,
.storagebits = 32,
- .shift = 8,
.endianness = IIO_BE,
},
};
@@ -501,26 +502,70 @@ static int ad7124_prepare_read(struct ad7124_state *st, int address)
return ad7124_enable_channel(st, &st->channels[address]);
}
+static int __ad7124_set_channel(struct ad_sigma_delta *sd, unsigned int channel)
+{
+ struct ad7124_state *st = container_of(sd, struct ad7124_state, sd);
+
+ return ad7124_prepare_read(st, channel);
+}
+
static int ad7124_set_channel(struct ad_sigma_delta *sd, unsigned int channel)
{
struct ad7124_state *st = container_of(sd, struct ad7124_state, sd);
int ret;
mutex_lock(&st->cfgs_lock);
- ret = ad7124_prepare_read(st, channel);
+ ret = __ad7124_set_channel(sd, channel);
mutex_unlock(&st->cfgs_lock);
return ret;
}
+static int ad7124_append_status(struct ad_sigma_delta *sd, bool append)
+{
+ struct ad7124_state *st = container_of(sd, struct ad7124_state, sd);
+ unsigned int adc_control = st->adc_control;
+ int ret;
+
+ adc_control &= ~AD7124_ADC_STATUS_EN_MSK;
+ adc_control |= AD7124_ADC_STATUS_EN(append);
+
+ ret = ad_sd_write_reg(&st->sd, AD7124_ADC_CONTROL, 2, adc_control);
+ if (ret < 0)
+ return ret;
+
+ st->adc_control = adc_control;
+
+ return 0;
+}
+
+static int ad7124_disable_all(struct ad_sigma_delta *sd)
+{
+ struct ad7124_state *st = container_of(sd, struct ad7124_state, sd);
+ int ret;
+ int i;
+
+ for (i = 0; i < st->num_channels; i++) {
+ ret = ad7124_spi_write_mask(st, AD7124_CHANNEL(i), AD7124_CHANNEL_EN_MSK, 0, 2);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
static const struct ad_sigma_delta_info ad7124_sigma_delta_info = {
.set_channel = ad7124_set_channel,
+ .append_status = ad7124_append_status,
+ .disable_all = ad7124_disable_all,
.set_mode = ad7124_set_mode,
.has_registers = true,
.addr_shift = 0,
.read_mask = BIT(6),
+ .status_ch_mask = GENMASK(3, 0),
.data_reg = AD7124_DATA,
- .irq_flags = IRQF_TRIGGER_FALLING
+ .num_slots = 8,
+ .irq_flags = IRQF_TRIGGER_FALLING,
};
static int ad7124_read_raw(struct iio_dev *indio_dev,
@@ -670,11 +715,40 @@ static const struct attribute_group ad7124_attrs_group = {
.attrs = ad7124_attributes,
};
+static int ad7124_update_scan_mode(struct iio_dev *indio_dev,
+ const unsigned long *scan_mask)
+{
+ struct ad7124_state *st = iio_priv(indio_dev);
+ bool bit_set;
+ int ret;
+ int i;
+
+ mutex_lock(&st->cfgs_lock);
+ for (i = 0; i < st->num_channels; i++) {
+ bit_set = test_bit(i, scan_mask);
+ if (bit_set)
+ ret = __ad7124_set_channel(&st->sd, i);
+ else
+ ret = ad7124_spi_write_mask(st, AD7124_CHANNEL(i), AD7124_CHANNEL_EN_MSK,
+ 0, 2);
+ if (ret < 0) {
+ mutex_unlock(&st->cfgs_lock);
+
+ return ret;
+ }
+ }
+
+ mutex_unlock(&st->cfgs_lock);
+
+ return 0;
+}
+
static const struct iio_info ad7124_info = {
.read_raw = ad7124_read_raw,
.write_raw = ad7124_write_raw,
.debugfs_reg_access = &ad7124_reg_access,
.validate_trigger = ad_sd_validate_trigger,
+ .update_scan_mode = ad7124_update_scan_mode,
.attrs = &ad7124_attrs_group,
};
@@ -886,12 +960,14 @@ static int ad7124_probe(struct spi_device *spi)
st->chip_info = info;
- ad_sd_init(&st->sd, indio_dev, spi, &ad7124_sigma_delta_info);
-
indio_dev->name = st->chip_info->name;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &ad7124_info;
+ ret = ad_sd_init(&st->sd, indio_dev, spi, &ad7124_sigma_delta_info);
+ if (ret < 0)
+ return ret;
+
ret = ad7124_of_parse_channel_config(indio_dev, spi->dev.of_node);
if (ret < 0)
return ret;
diff --git a/drivers/iio/adc/ad7192.c b/drivers/iio/adc/ad7192.c
index 770b4e59238f..d71977be7d22 100644
--- a/drivers/iio/adc/ad7192.c
+++ b/drivers/iio/adc/ad7192.c
@@ -58,7 +58,8 @@
/* Mode Register Bit Designations (AD7192_REG_MODE) */
#define AD7192_MODE_SEL(x) (((x) & 0x7) << 21) /* Operation Mode Select */
#define AD7192_MODE_SEL_MASK (0x7 << 21) /* Operation Mode Select Mask */
-#define AD7192_MODE_DAT_STA BIT(20) /* Status Register transmission */
+#define AD7192_MODE_STA(x) (((x) & 0x1) << 20) /* Status Register transmission */
+#define AD7192_MODE_STA_MASK BIT(20) /* Status Register transmission Mask */
#define AD7192_MODE_CLKSRC(x) (((x) & 0x3) << 18) /* Clock Source Select */
#define AD7192_MODE_SINC3 BIT(15) /* SINC3 Filter Select */
#define AD7192_MODE_ACX BIT(14) /* AC excitation enable(AD7195 only)*/
@@ -225,7 +226,7 @@ static ssize_t ad7192_write_syscalib(struct iio_dev *indio_dev,
bool sys_calib;
int ret, temp;
- ret = strtobool(buf, &sys_calib);
+ ret = kstrtobool(buf, &sys_calib);
if (ret)
return ret;
@@ -288,12 +289,51 @@ static int ad7192_set_mode(struct ad_sigma_delta *sd,
return ad_sd_write_reg(&st->sd, AD7192_REG_MODE, 3, st->mode);
}
+static int ad7192_append_status(struct ad_sigma_delta *sd, bool append)
+{
+ struct ad7192_state *st = ad_sigma_delta_to_ad7192(sd);
+ unsigned int mode = st->mode;
+ int ret;
+
+ mode &= ~AD7192_MODE_STA_MASK;
+ mode |= AD7192_MODE_STA(append);
+
+ ret = ad_sd_write_reg(&st->sd, AD7192_REG_MODE, 3, mode);
+ if (ret < 0)
+ return ret;
+
+ st->mode = mode;
+
+ return 0;
+}
+
+static int ad7192_disable_all(struct ad_sigma_delta *sd)
+{
+ struct ad7192_state *st = ad_sigma_delta_to_ad7192(sd);
+ u32 conf = st->conf;
+ int ret;
+
+ conf &= ~AD7192_CONF_CHAN_MASK;
+
+ ret = ad_sd_write_reg(&st->sd, AD7192_REG_CONF, 3, conf);
+ if (ret < 0)
+ return ret;
+
+ st->conf = conf;
+
+ return 0;
+}
+
static const struct ad_sigma_delta_info ad7192_sigma_delta_info = {
.set_channel = ad7192_set_channel,
+ .append_status = ad7192_append_status,
+ .disable_all = ad7192_disable_all,
.set_mode = ad7192_set_mode,
.has_registers = true,
.addr_shift = 3,
.read_mask = BIT(6),
+ .status_ch_mask = GENMASK(3, 0),
+ .num_slots = 4,
.irq_flags = IRQF_TRIGGER_FALLING,
};
@@ -457,7 +497,7 @@ static ssize_t ad7192_set(struct device *dev,
int ret;
bool val;
- ret = strtobool(buf, &val);
+ ret = kstrtobool(buf, &val);
if (ret < 0)
return ret;
@@ -783,6 +823,26 @@ static int ad7192_read_avail(struct iio_dev *indio_dev,
return -EINVAL;
}
+static int ad7192_update_scan_mode(struct iio_dev *indio_dev, const unsigned long *scan_mask)
+{
+ struct ad7192_state *st = iio_priv(indio_dev);
+ u32 conf = st->conf;
+ int ret;
+ int i;
+
+ conf &= ~AD7192_CONF_CHAN_MASK;
+ for_each_set_bit(i, scan_mask, 8)
+ conf |= AD7192_CONF_CHAN(i);
+
+ ret = ad_sd_write_reg(&st->sd, AD7192_REG_CONF, 3, conf);
+ if (ret < 0)
+ return ret;
+
+ st->conf = conf;
+
+ return 0;
+}
+
static const struct iio_info ad7192_info = {
.read_raw = ad7192_read_raw,
.write_raw = ad7192_write_raw,
@@ -790,6 +850,7 @@ static const struct iio_info ad7192_info = {
.read_avail = ad7192_read_avail,
.attrs = &ad7192_attribute_group,
.validate_trigger = ad_sd_validate_trigger,
+ .update_scan_mode = ad7192_update_scan_mode,
};
static const struct iio_info ad7195_info = {
@@ -799,6 +860,7 @@ static const struct iio_info ad7195_info = {
.read_avail = ad7192_read_avail,
.attrs = &ad7195_attribute_group,
.validate_trigger = ad_sd_validate_trigger,
+ .update_scan_mode = ad7192_update_scan_mode,
};
#define __AD719x_CHANNEL(_si, _channel1, _channel2, _address, _extend_name, \
diff --git a/drivers/iio/adc/ad7266.c b/drivers/iio/adc/ad7266.c
index c17d9b5fbaf6..f20d39f0bc01 100644
--- a/drivers/iio/adc/ad7266.c
+++ b/drivers/iio/adc/ad7266.c
@@ -378,6 +378,11 @@ static const char * const ad7266_gpio_labels[] = {
"ad0", "ad1", "ad2",
};
+static void ad7266_reg_disable(void *reg)
+{
+ regulator_disable(reg);
+}
+
static int ad7266_probe(struct spi_device *spi)
{
struct ad7266_platform_data *pdata = spi->dev.platform_data;
@@ -398,9 +403,13 @@ static int ad7266_probe(struct spi_device *spi)
if (ret)
return ret;
+ ret = devm_add_action_or_reset(&spi->dev, ad7266_reg_disable, st->reg);
+ if (ret)
+ return ret;
+
ret = regulator_get_voltage(st->reg);
if (ret < 0)
- goto error_disable_reg;
+ return ret;
st->vref_mv = ret / 1000;
} else {
@@ -423,7 +432,7 @@ static int ad7266_probe(struct spi_device *spi)
GPIOD_OUT_LOW);
if (IS_ERR(st->gpios[i])) {
ret = PTR_ERR(st->gpios[i]);
- goto error_disable_reg;
+ return ret;
}
}
}
@@ -433,7 +442,6 @@ static int ad7266_probe(struct spi_device *spi)
st->mode = AD7266_MODE_DIFF;
}
- spi_set_drvdata(spi, indio_dev);
st->spi = spi;
indio_dev->name = spi_get_device_id(spi)->name;
@@ -459,35 +467,12 @@ static int ad7266_probe(struct spi_device *spi)
spi_message_add_tail(&st->single_xfer[1], &st->single_msg);
spi_message_add_tail(&st->single_xfer[2], &st->single_msg);
- ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+ ret = devm_iio_triggered_buffer_setup(&spi->dev, indio_dev, &iio_pollfunc_store_time,
&ad7266_trigger_handler, &iio_triggered_buffer_setup_ops);
if (ret)
- goto error_disable_reg;
-
- ret = iio_device_register(indio_dev);
- if (ret)
- goto error_buffer_cleanup;
-
- return 0;
-
-error_buffer_cleanup:
- iio_triggered_buffer_cleanup(indio_dev);
-error_disable_reg:
- if (!IS_ERR(st->reg))
- regulator_disable(st->reg);
-
- return ret;
-}
-
-static void ad7266_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
- struct ad7266_state *st = iio_priv(indio_dev);
+ return ret;
- iio_device_unregister(indio_dev);
- iio_triggered_buffer_cleanup(indio_dev);
- if (!IS_ERR(st->reg))
- regulator_disable(st->reg);
+ return devm_iio_device_register(&spi->dev, indio_dev);
}
static const struct spi_device_id ad7266_id[] = {
@@ -502,7 +487,6 @@ static struct spi_driver ad7266_driver = {
.name = "ad7266",
},
.probe = ad7266_probe,
- .remove = ad7266_remove,
.id_table = ad7266_id,
};
module_spi_driver(ad7266_driver);
diff --git a/drivers/iio/adc/ad7280a.c b/drivers/iio/adc/ad7280a.c
index ec9acbf12b9a..3bdf3d9422f2 100644
--- a/drivers/iio/adc/ad7280a.c
+++ b/drivers/iio/adc/ad7280a.c
@@ -488,7 +488,7 @@ static ssize_t ad7280_store_balance_sw(struct iio_dev *indio_dev,
bool readin;
int ret;
- ret = strtobool(buf, &readin);
+ ret = kstrtobool(buf, &readin);
if (ret)
return ret;
diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c
index ebcd52526cac..261a9a6b45e1 100644
--- a/drivers/iio/adc/ad_sigma_delta.c
+++ b/drivers/iio/adc/ad_sigma_delta.c
@@ -6,6 +6,7 @@
* Author: Lars-Peter Clausen <lars@metafoo.de>
*/
+#include <linux/align.h>
#include <linux/interrupt.h>
#include <linux/device.h>
#include <linux/kernel.h>
@@ -342,15 +343,49 @@ EXPORT_SYMBOL_NS_GPL(ad_sigma_delta_single_conversion, IIO_AD_SIGMA_DELTA);
static int ad_sd_buffer_postenable(struct iio_dev *indio_dev)
{
struct ad_sigma_delta *sigma_delta = iio_device_get_drvdata(indio_dev);
+ unsigned int i, slot, samples_buf_size;
unsigned int channel;
+ uint8_t *samples_buf;
int ret;
- channel = find_first_bit(indio_dev->active_scan_mask,
- indio_dev->masklength);
- ret = ad_sigma_delta_set_channel(sigma_delta,
- indio_dev->channels[channel].address);
- if (ret)
- return ret;
+ if (sigma_delta->num_slots == 1) {
+ channel = find_first_bit(indio_dev->active_scan_mask,
+ indio_dev->masklength);
+ ret = ad_sigma_delta_set_channel(sigma_delta,
+ indio_dev->channels[channel].address);
+ if (ret)
+ return ret;
+ slot = 1;
+ } else {
+ /*
+ * At this point update_scan_mode already enabled the required channels.
+ * For sigma-delta sequencer drivers with multiple slots, an update_scan_mode
+ * implementation is mandatory.
+ */
+ slot = 0;
+ for_each_set_bit(i, indio_dev->active_scan_mask, indio_dev->masklength) {
+ sigma_delta->slots[slot] = indio_dev->channels[i].address;
+ slot++;
+ }
+ }
+
+ sigma_delta->active_slots = slot;
+ sigma_delta->current_slot = 0;
+
+ if (sigma_delta->active_slots > 1) {
+ ret = ad_sigma_delta_append_status(sigma_delta, true);
+ if (ret)
+ return ret;
+ }
+
+ samples_buf_size = ALIGN(slot * indio_dev->channels[0].scan_type.storagebits, 8);
+ samples_buf_size += sizeof(int64_t);
+ samples_buf = devm_krealloc(&sigma_delta->spi->dev, sigma_delta->samples_buf,
+ samples_buf_size, GFP_KERNEL);
+ if (!samples_buf)
+ return -ENOMEM;
+
+ sigma_delta->samples_buf = samples_buf;
spi_bus_lock(sigma_delta->spi->master);
sigma_delta->bus_locked = true;
@@ -386,6 +421,10 @@ static int ad_sd_buffer_postdisable(struct iio_dev *indio_dev)
sigma_delta->keep_cs_asserted = false;
ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_IDLE);
+ if (sigma_delta->status_appended)
+ ad_sigma_delta_append_status(sigma_delta, false);
+
+ ad_sigma_delta_disable_all(sigma_delta);
sigma_delta->bus_locked = false;
return spi_bus_unlock(sigma_delta->spi->master);
}
@@ -396,6 +435,10 @@ static irqreturn_t ad_sd_trigger_handler(int irq, void *p)
struct iio_dev *indio_dev = pf->indio_dev;
struct ad_sigma_delta *sigma_delta = iio_device_get_drvdata(indio_dev);
uint8_t *data = sigma_delta->rx_buf;
+ unsigned int transfer_size;
+ unsigned int sample_size;
+ unsigned int sample_pos;
+ unsigned int status_pos;
unsigned int reg_size;
unsigned int data_reg;
@@ -408,21 +451,69 @@ static irqreturn_t ad_sd_trigger_handler(int irq, void *p)
else
data_reg = AD_SD_REG_DATA;
+ /* Status word will be appended to the sample during transfer */
+ if (sigma_delta->status_appended)
+ transfer_size = reg_size + 1;
+ else
+ transfer_size = reg_size;
+
switch (reg_size) {
case 4:
case 2:
case 1:
- ad_sd_read_reg_raw(sigma_delta, data_reg, reg_size, &data[0]);
+ status_pos = reg_size;
+ ad_sd_read_reg_raw(sigma_delta, data_reg, transfer_size, &data[0]);
break;
case 3:
+ /*
+ * Data array after transfer will look like (if status is appended):
+ * data[] = { [0][sample][sample][sample][status] }
+ * Keeping the first byte 0 shifts the status postion by 1 byte to the right.
+ */
+ status_pos = reg_size + 1;
+
/* We store 24 bit samples in a 32 bit word. Keep the upper
* byte set to zero. */
- ad_sd_read_reg_raw(sigma_delta, data_reg, reg_size, &data[1]);
+ ad_sd_read_reg_raw(sigma_delta, data_reg, transfer_size, &data[1]);
break;
}
- iio_push_to_buffers_with_timestamp(indio_dev, data, pf->timestamp);
+ /*
+ * For devices sampling only one channel at
+ * once, there is no need for sample number tracking.
+ */
+ if (sigma_delta->active_slots == 1) {
+ iio_push_to_buffers_with_timestamp(indio_dev, data, pf->timestamp);
+ goto irq_handled;
+ }
+
+ if (sigma_delta->status_appended) {
+ u8 converted_channel;
+
+ converted_channel = data[status_pos] & sigma_delta->info->status_ch_mask;
+ if (converted_channel != sigma_delta->slots[sigma_delta->current_slot]) {
+ /*
+ * Desync occurred during continuous sampling of multiple channels.
+ * Drop this incomplete sample and start from first channel again.
+ */
+
+ sigma_delta->current_slot = 0;
+ goto irq_handled;
+ }
+ }
+
+ sample_size = indio_dev->channels[0].scan_type.storagebits / 8;
+ sample_pos = sample_size * sigma_delta->current_slot;
+ memcpy(&sigma_delta->samples_buf[sample_pos], data, sample_size);
+ sigma_delta->current_slot++;
+ if (sigma_delta->current_slot == sigma_delta->active_slots) {
+ sigma_delta->current_slot = 0;
+ iio_push_to_buffers_with_timestamp(indio_dev, sigma_delta->samples_buf,
+ pf->timestamp);
+ }
+
+irq_handled:
iio_trigger_notify_done(indio_dev->trig);
sigma_delta->irq_dis = false;
enable_irq(sigma_delta->spi->irq);
@@ -430,10 +521,17 @@ static irqreturn_t ad_sd_trigger_handler(int irq, void *p)
return IRQ_HANDLED;
}
+static bool ad_sd_validate_scan_mask(struct iio_dev *indio_dev, const unsigned long *mask)
+{
+ struct ad_sigma_delta *sigma_delta = iio_device_get_drvdata(indio_dev);
+
+ return bitmap_weight(mask, indio_dev->masklength) <= sigma_delta->num_slots;
+}
+
static const struct iio_buffer_setup_ops ad_sd_buffer_setup_ops = {
.postenable = &ad_sd_buffer_postenable,
.postdisable = &ad_sd_buffer_postdisable,
- .validate_scan_mask = &iio_validate_scan_mask_onehot,
+ .validate_scan_mask = &ad_sd_validate_scan_mask,
};
static irqreturn_t ad_sd_data_rdy_trig_poll(int irq, void *private)
@@ -513,8 +611,14 @@ static int devm_ad_sd_probe_trigger(struct device *dev, struct iio_dev *indio_de
*/
int devm_ad_sd_setup_buffer_and_trigger(struct device *dev, struct iio_dev *indio_dev)
{
+ struct ad_sigma_delta *sigma_delta = iio_device_get_drvdata(indio_dev);
int ret;
+ sigma_delta->slots = devm_kcalloc(dev, sigma_delta->num_slots,
+ sizeof(*sigma_delta->slots), GFP_KERNEL);
+ if (!sigma_delta->slots)
+ return -ENOMEM;
+
ret = devm_iio_triggered_buffer_setup(dev, indio_dev,
&iio_pollfunc_store_time,
&ad_sd_trigger_handler,
@@ -541,6 +645,25 @@ int ad_sd_init(struct ad_sigma_delta *sigma_delta, struct iio_dev *indio_dev,
{
sigma_delta->spi = spi;
sigma_delta->info = info;
+
+ /* If the field is unset in ad_sigma_delta_info, asume there can only be 1 slot. */
+ if (!info->num_slots)
+ sigma_delta->num_slots = 1;
+ else
+ sigma_delta->num_slots = info->num_slots;
+
+ if (sigma_delta->num_slots > 1) {
+ if (!indio_dev->info->update_scan_mode) {
+ dev_err(&spi->dev, "iio_dev lacks update_scan_mode().\n");
+ return -EINVAL;
+ }
+
+ if (!info->disable_all) {
+ dev_err(&spi->dev, "ad_sigma_delta_info lacks disable_all().\n");
+ return -EINVAL;
+ }
+ }
+
iio_device_set_drvdata(indio_dev, sigma_delta);
return 0;
diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c
index 854b1f81d807..b764823ce57e 100644
--- a/drivers/iio/adc/at91-sama5d2_adc.c
+++ b/drivers/iio/adc/at91-sama5d2_adc.c
@@ -1117,7 +1117,7 @@ static int at91_adc_buffer_prepare(struct iio_dev *indio_dev)
return at91_adc_configure_touch(st, true);
/* if we are not in triggered mode, we cannot enable the buffer. */
- if (!(indio_dev->currentmode & INDIO_ALL_TRIGGERED_MODES))
+ if (!(iio_device_get_current_mode(indio_dev) & INDIO_ALL_TRIGGERED_MODES))
return -EINVAL;
/* we continue with the triggered buffer */
@@ -1159,7 +1159,7 @@ static int at91_adc_buffer_postdisable(struct iio_dev *indio_dev)
return at91_adc_configure_touch(st, false);
/* if we are not in triggered mode, nothing to do here */
- if (!(indio_dev->currentmode & INDIO_ALL_TRIGGERED_MODES))
+ if (!(iio_device_get_current_mode(indio_dev) & INDIO_ALL_TRIGGERED_MODES))
return -EINVAL;
/*
diff --git a/drivers/iio/adc/ina2xx-adc.c b/drivers/iio/adc/ina2xx-adc.c
index 8d902a32a0fd..abad16803849 100644
--- a/drivers/iio/adc/ina2xx-adc.c
+++ b/drivers/iio/adc/ina2xx-adc.c
@@ -550,7 +550,7 @@ static ssize_t ina2xx_allow_async_readout_store(struct device *dev,
bool val;
int ret;
- ret = strtobool(buf, &val);
+ ret = kstrtobool(buf, &val);
if (ret)
return ret;
@@ -1027,7 +1027,6 @@ static int ina2xx_probe(struct i2c_client *client,
indio_dev->name = id->name;
ret = devm_iio_kfifo_buffer_setup(&client->dev, indio_dev,
- INDIO_BUFFER_SOFTWARE,
&ina2xx_setup_ops);
if (ret)
return ret;
diff --git a/drivers/iio/adc/palmas_gpadc.c b/drivers/iio/adc/palmas_gpadc.c
index 61e80bf3d05e..fd000345ec5c 100644
--- a/drivers/iio/adc/palmas_gpadc.c
+++ b/drivers/iio/adc/palmas_gpadc.c
@@ -376,7 +376,8 @@ static int palmas_gpadc_get_calibrated_code(struct palmas_gpadc *adc,
adc->adc_info[adc_chan].gain_error;
if (val < 0) {
- dev_err(adc->dev, "Mismatch with calibration\n");
+ if (val < -10)
+ dev_err(adc->dev, "Mismatch with calibration var = %d\n", val);
return 0;
}
diff --git a/drivers/iio/adc/sc27xx_adc.c b/drivers/iio/adc/sc27xx_adc.c
index 00098caf6d9e..e9ff2d6a8a57 100644
--- a/drivers/iio/adc/sc27xx_adc.c
+++ b/drivers/iio/adc/sc27xx_adc.c
@@ -9,12 +9,16 @@
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
#include <linux/slab.h>
/* PMIC global registers definition */
-#define SC27XX_MODULE_EN 0xc08
+#define SC2730_MODULE_EN 0x1808
+#define SC2731_MODULE_EN 0xc08
#define SC27XX_MODULE_ADC_EN BIT(5)
-#define SC27XX_ARM_CLK_EN 0xc10
+#define SC2721_ARM_CLK_EN 0xc0c
+#define SC2730_ARM_CLK_EN 0x180c
+#define SC2731_ARM_CLK_EN 0xc10
#define SC27XX_CLK_ADC_EN BIT(5)
#define SC27XX_CLK_ADC_CLK_EN BIT(6)
@@ -36,8 +40,10 @@
/* Bits and mask definition for SC27XX_ADC_CH_CFG register */
#define SC27XX_ADC_CHN_ID_MASK GENMASK(4, 0)
-#define SC27XX_ADC_SCALE_MASK GENMASK(10, 8)
-#define SC27XX_ADC_SCALE_SHIFT 8
+#define SC27XX_ADC_SCALE_MASK GENMASK(10, 9)
+#define SC2721_ADC_SCALE_MASK BIT(5)
+#define SC27XX_ADC_SCALE_SHIFT 9
+#define SC2721_ADC_SCALE_SHIFT 5
/* Bits definitions for SC27XX_ADC_INT_EN registers */
#define SC27XX_ADC_IRQ_EN BIT(0)
@@ -67,8 +73,15 @@
#define SC27XX_RATIO_NUMERATOR_OFFSET 16
#define SC27XX_RATIO_DENOMINATOR_MASK GENMASK(15, 0)
+/* ADC specific channel reference voltage 3.5V */
+#define SC27XX_ADC_REFVOL_VDD35 3500000
+
+/* ADC default channel reference voltage is 2.8V */
+#define SC27XX_ADC_REFVOL_VDD28 2800000
+
struct sc27xx_adc_data {
struct device *dev;
+ struct regulator *volref;
struct regmap *regmap;
/*
* One hardware spinlock to synchronize between the multiple
@@ -78,6 +91,24 @@ struct sc27xx_adc_data {
int channel_scale[SC27XX_ADC_CHANNEL_MAX];
u32 base;
int irq;
+ const struct sc27xx_adc_variant_data *var_data;
+};
+
+/*
+ * Since different PMICs of SC27xx series can have different
+ * address and ratio, we should save ratio config and base
+ * in the device data structure.
+ */
+struct sc27xx_adc_variant_data {
+ u32 module_en;
+ u32 clk_en;
+ u32 scale_shift;
+ u32 scale_mask;
+ const struct sc27xx_adc_linear_graph *bscale_cal;
+ const struct sc27xx_adc_linear_graph *sscale_cal;
+ void (*init_scale)(struct sc27xx_adc_data *data);
+ int (*get_ratio)(int channel, int scale);
+ bool set_volref;
};
struct sc27xx_adc_linear_graph {
@@ -103,6 +134,16 @@ static struct sc27xx_adc_linear_graph small_scale_graph = {
100, 341,
};
+static const struct sc27xx_adc_linear_graph sc2731_big_scale_graph_calib = {
+ 4200, 850,
+ 3600, 728,
+};
+
+static const struct sc27xx_adc_linear_graph sc2731_small_scale_graph_calib = {
+ 1000, 838,
+ 100, 84,
+};
+
static const struct sc27xx_adc_linear_graph big_scale_graph_calib = {
4200, 856,
3600, 733,
@@ -118,49 +159,225 @@ static int sc27xx_adc_get_calib_data(u32 calib_data, int calib_adc)
return ((calib_data & 0xff) + calib_adc - 128) * 4;
}
+/* get the adc nvmem cell calibration data */
+static int adc_nvmem_cell_calib_data(struct sc27xx_adc_data *data, const char *cell_name)
+{
+ struct nvmem_cell *cell;
+ void *buf;
+ u32 origin_calib_data = 0;
+ size_t len;
+
+ if (!data)
+ return -EINVAL;
+
+ cell = nvmem_cell_get(data->dev, cell_name);
+ if (IS_ERR(cell))
+ return PTR_ERR(cell);
+
+ buf = nvmem_cell_read(cell, &len);
+ if (IS_ERR(buf)) {
+ nvmem_cell_put(cell);
+ return PTR_ERR(buf);
+ }
+
+ memcpy(&origin_calib_data, buf, min(len, sizeof(u32)));
+
+ kfree(buf);
+ nvmem_cell_put(cell);
+ return origin_calib_data;
+}
+
static int sc27xx_adc_scale_calibration(struct sc27xx_adc_data *data,
bool big_scale)
{
const struct sc27xx_adc_linear_graph *calib_graph;
struct sc27xx_adc_linear_graph *graph;
- struct nvmem_cell *cell;
const char *cell_name;
u32 calib_data = 0;
- void *buf;
- size_t len;
if (big_scale) {
- calib_graph = &big_scale_graph_calib;
+ calib_graph = data->var_data->bscale_cal;
graph = &big_scale_graph;
cell_name = "big_scale_calib";
} else {
- calib_graph = &small_scale_graph_calib;
+ calib_graph = data->var_data->sscale_cal;
graph = &small_scale_graph;
cell_name = "small_scale_calib";
}
- cell = nvmem_cell_get(data->dev, cell_name);
- if (IS_ERR(cell))
- return PTR_ERR(cell);
-
- buf = nvmem_cell_read(cell, &len);
- nvmem_cell_put(cell);
-
- if (IS_ERR(buf))
- return PTR_ERR(buf);
-
- memcpy(&calib_data, buf, min(len, sizeof(u32)));
+ calib_data = adc_nvmem_cell_calib_data(data, cell_name);
/* Only need to calibrate the adc values in the linear graph. */
graph->adc0 = sc27xx_adc_get_calib_data(calib_data, calib_graph->adc0);
graph->adc1 = sc27xx_adc_get_calib_data(calib_data >> 8,
calib_graph->adc1);
- kfree(buf);
return 0;
}
-static int sc27xx_adc_get_ratio(int channel, int scale)
+static int sc2720_adc_get_ratio(int channel, int scale)
+{
+ switch (channel) {
+ case 14:
+ switch (scale) {
+ case 0:
+ return SC27XX_VOLT_RATIO(68, 900);
+ case 1:
+ return SC27XX_VOLT_RATIO(68, 1760);
+ case 2:
+ return SC27XX_VOLT_RATIO(68, 2327);
+ case 3:
+ return SC27XX_VOLT_RATIO(68, 3654);
+ default:
+ return SC27XX_VOLT_RATIO(1, 1);
+ }
+ case 16:
+ switch (scale) {
+ case 0:
+ return SC27XX_VOLT_RATIO(48, 100);
+ case 1:
+ return SC27XX_VOLT_RATIO(480, 1955);
+ case 2:
+ return SC27XX_VOLT_RATIO(480, 2586);
+ case 3:
+ return SC27XX_VOLT_RATIO(48, 406);
+ default:
+ return SC27XX_VOLT_RATIO(1, 1);
+ }
+ case 21:
+ case 22:
+ case 23:
+ switch (scale) {
+ case 0:
+ return SC27XX_VOLT_RATIO(3, 8);
+ case 1:
+ return SC27XX_VOLT_RATIO(375, 1955);
+ case 2:
+ return SC27XX_VOLT_RATIO(375, 2586);
+ case 3:
+ return SC27XX_VOLT_RATIO(300, 3248);
+ default:
+ return SC27XX_VOLT_RATIO(1, 1);
+ }
+ default:
+ switch (scale) {
+ case 0:
+ return SC27XX_VOLT_RATIO(1, 1);
+ case 1:
+ return SC27XX_VOLT_RATIO(1000, 1955);
+ case 2:
+ return SC27XX_VOLT_RATIO(1000, 2586);
+ case 3:
+ return SC27XX_VOLT_RATIO(100, 406);
+ default:
+ return SC27XX_VOLT_RATIO(1, 1);
+ }
+ }
+ return SC27XX_VOLT_RATIO(1, 1);
+}
+
+static int sc2721_adc_get_ratio(int channel, int scale)
+{
+ switch (channel) {
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ return scale ? SC27XX_VOLT_RATIO(400, 1025) :
+ SC27XX_VOLT_RATIO(1, 1);
+ case 5:
+ return SC27XX_VOLT_RATIO(7, 29);
+ case 7:
+ case 9:
+ return scale ? SC27XX_VOLT_RATIO(100, 125) :
+ SC27XX_VOLT_RATIO(1, 1);
+ case 14:
+ return SC27XX_VOLT_RATIO(68, 900);
+ case 16:
+ return SC27XX_VOLT_RATIO(48, 100);
+ case 19:
+ return SC27XX_VOLT_RATIO(1, 3);
+ default:
+ return SC27XX_VOLT_RATIO(1, 1);
+ }
+ return SC27XX_VOLT_RATIO(1, 1);
+}
+
+static int sc2730_adc_get_ratio(int channel, int scale)
+{
+ switch (channel) {
+ case 14:
+ switch (scale) {
+ case 0:
+ return SC27XX_VOLT_RATIO(68, 900);
+ case 1:
+ return SC27XX_VOLT_RATIO(68, 1760);
+ case 2:
+ return SC27XX_VOLT_RATIO(68, 2327);
+ case 3:
+ return SC27XX_VOLT_RATIO(68, 3654);
+ default:
+ return SC27XX_VOLT_RATIO(1, 1);
+ }
+ case 15:
+ switch (scale) {
+ case 0:
+ return SC27XX_VOLT_RATIO(1, 3);
+ case 1:
+ return SC27XX_VOLT_RATIO(1000, 5865);
+ case 2:
+ return SC27XX_VOLT_RATIO(500, 3879);
+ case 3:
+ return SC27XX_VOLT_RATIO(500, 6090);
+ default:
+ return SC27XX_VOLT_RATIO(1, 1);
+ }
+ case 16:
+ switch (scale) {
+ case 0:
+ return SC27XX_VOLT_RATIO(48, 100);
+ case 1:
+ return SC27XX_VOLT_RATIO(480, 1955);
+ case 2:
+ return SC27XX_VOLT_RATIO(480, 2586);
+ case 3:
+ return SC27XX_VOLT_RATIO(48, 406);
+ default:
+ return SC27XX_VOLT_RATIO(1, 1);
+ }
+ case 21:
+ case 22:
+ case 23:
+ switch (scale) {
+ case 0:
+ return SC27XX_VOLT_RATIO(3, 8);
+ case 1:
+ return SC27XX_VOLT_RATIO(375, 1955);
+ case 2:
+ return SC27XX_VOLT_RATIO(375, 2586);
+ case 3:
+ return SC27XX_VOLT_RATIO(300, 3248);
+ default:
+ return SC27XX_VOLT_RATIO(1, 1);
+ }
+ default:
+ switch (scale) {
+ case 0:
+ return SC27XX_VOLT_RATIO(1, 1);
+ case 1:
+ return SC27XX_VOLT_RATIO(1000, 1955);
+ case 2:
+ return SC27XX_VOLT_RATIO(1000, 2586);
+ case 3:
+ return SC27XX_VOLT_RATIO(1000, 4060);
+ default:
+ return SC27XX_VOLT_RATIO(1, 1);
+ }
+ }
+ return SC27XX_VOLT_RATIO(1, 1);
+}
+
+static int sc2731_adc_get_ratio(int channel, int scale)
{
switch (channel) {
case 1:
@@ -185,10 +402,87 @@ static int sc27xx_adc_get_ratio(int channel, int scale)
return SC27XX_VOLT_RATIO(1, 1);
}
+/*
+ * According to the datasheet set specific value on some channel.
+ */
+static void sc2720_adc_scale_init(struct sc27xx_adc_data *data)
+{
+ int i;
+
+ for (i = 0; i < SC27XX_ADC_CHANNEL_MAX; i++) {
+ switch (i) {
+ case 5:
+ data->channel_scale[i] = 3;
+ break;
+ case 7:
+ case 9:
+ data->channel_scale[i] = 2;
+ break;
+ case 13:
+ data->channel_scale[i] = 1;
+ break;
+ case 19:
+ case 30:
+ case 31:
+ data->channel_scale[i] = 3;
+ break;
+ default:
+ data->channel_scale[i] = 0;
+ break;
+ }
+ }
+}
+
+static void sc2730_adc_scale_init(struct sc27xx_adc_data *data)
+{
+ int i;
+
+ for (i = 0; i < SC27XX_ADC_CHANNEL_MAX; i++) {
+ switch (i) {
+ case 5:
+ case 10:
+ case 19:
+ case 30:
+ case 31:
+ data->channel_scale[i] = 3;
+ break;
+ case 7:
+ case 9:
+ data->channel_scale[i] = 2;
+ break;
+ case 13:
+ data->channel_scale[i] = 1;
+ break;
+ default:
+ data->channel_scale[i] = 0;
+ break;
+ }
+ }
+}
+
+static void sc2731_adc_scale_init(struct sc27xx_adc_data *data)
+{
+ int i;
+ /*
+ * In the current software design, SC2731 support 2 scales,
+ * channels 5 uses big scale, others use smale.
+ */
+ for (i = 0; i < SC27XX_ADC_CHANNEL_MAX; i++) {
+ switch (i) {
+ case 5:
+ data->channel_scale[i] = 1;
+ break;
+ default:
+ data->channel_scale[i] = 0;
+ break;
+ }
+ }
+}
+
static int sc27xx_adc_read(struct sc27xx_adc_data *data, int channel,
int scale, int *val)
{
- int ret;
+ int ret, ret_volref;
u32 tmp, value, status;
ret = hwspin_lock_timeout_raw(data->hwlock, SC27XX_ADC_HWLOCK_TIMEOUT);
@@ -197,10 +491,25 @@ static int sc27xx_adc_read(struct sc27xx_adc_data *data, int channel,
return ret;
}
+ /*
+ * According to the sc2721 chip data sheet, the reference voltage of
+ * specific channel 30 and channel 31 in ADC module needs to be set from
+ * the default 2.8v to 3.5v.
+ */
+ if ((data->var_data->set_volref) && (channel == 30 || channel == 31)) {
+ ret = regulator_set_voltage(data->volref,
+ SC27XX_ADC_REFVOL_VDD35,
+ SC27XX_ADC_REFVOL_VDD35);
+ if (ret) {
+ dev_err(data->dev, "failed to set the volref 3.5v\n");
+ goto unlock_adc;
+ }
+ }
+
ret = regmap_update_bits(data->regmap, data->base + SC27XX_ADC_CTL,
SC27XX_ADC_EN, SC27XX_ADC_EN);
if (ret)
- goto unlock_adc;
+ goto regulator_restore;
ret = regmap_update_bits(data->regmap, data->base + SC27XX_ADC_INT_CLR,
SC27XX_ADC_IRQ_CLR, SC27XX_ADC_IRQ_CLR);
@@ -208,10 +517,11 @@ static int sc27xx_adc_read(struct sc27xx_adc_data *data, int channel,
goto disable_adc;
/* Configure the channel id and scale */
- tmp = (scale << SC27XX_ADC_SCALE_SHIFT) & SC27XX_ADC_SCALE_MASK;
+ tmp = (scale << data->var_data->scale_shift) & data->var_data->scale_mask;
tmp |= channel & SC27XX_ADC_CHN_ID_MASK;
ret = regmap_update_bits(data->regmap, data->base + SC27XX_ADC_CH_CFG,
- SC27XX_ADC_CHN_ID_MASK | SC27XX_ADC_SCALE_MASK,
+ SC27XX_ADC_CHN_ID_MASK |
+ data->var_data->scale_mask,
tmp);
if (ret)
goto disable_adc;
@@ -249,6 +559,17 @@ static int sc27xx_adc_read(struct sc27xx_adc_data *data, int channel,
disable_adc:
regmap_update_bits(data->regmap, data->base + SC27XX_ADC_CTL,
SC27XX_ADC_EN, 0);
+regulator_restore:
+ if ((data->var_data->set_volref) && (channel == 30 || channel == 31)) {
+ ret_volref = regulator_set_voltage(data->volref,
+ SC27XX_ADC_REFVOL_VDD28,
+ SC27XX_ADC_REFVOL_VDD28);
+ if (ret_volref) {
+ dev_err(data->dev, "failed to set the volref 2.8v,ret_volref = 0x%x\n",
+ ret_volref);
+ ret = ret || ret_volref;
+ }
+ }
unlock_adc:
hwspin_unlock_raw(data->hwlock);
@@ -262,13 +583,14 @@ static void sc27xx_adc_volt_ratio(struct sc27xx_adc_data *data,
int channel, int scale,
u32 *div_numerator, u32 *div_denominator)
{
- u32 ratio = sc27xx_adc_get_ratio(channel, scale);
+ u32 ratio;
+ ratio = data->var_data->get_ratio(channel, scale);
*div_numerator = ratio >> SC27XX_RATIO_NUMERATOR_OFFSET;
*div_denominator = ratio & SC27XX_RATIO_DENOMINATOR_MASK;
}
-static int sc27xx_adc_to_volt(struct sc27xx_adc_linear_graph *graph,
+static int adc_to_volt(struct sc27xx_adc_linear_graph *graph,
int raw_adc)
{
int tmp;
@@ -277,6 +599,16 @@ static int sc27xx_adc_to_volt(struct sc27xx_adc_linear_graph *graph,
tmp /= (graph->adc0 - graph->adc1);
tmp += graph->volt1;
+ return tmp;
+}
+
+static int sc27xx_adc_to_volt(struct sc27xx_adc_linear_graph *graph,
+ int raw_adc)
+{
+ int tmp;
+
+ tmp = adc_to_volt(graph, raw_adc);
+
return tmp < 0 ? 0 : tmp;
}
@@ -432,13 +764,13 @@ static int sc27xx_adc_enable(struct sc27xx_adc_data *data)
{
int ret;
- ret = regmap_update_bits(data->regmap, SC27XX_MODULE_EN,
+ ret = regmap_update_bits(data->regmap, data->var_data->module_en,
SC27XX_MODULE_ADC_EN, SC27XX_MODULE_ADC_EN);
if (ret)
return ret;
/* Enable ADC work clock and controller clock */
- ret = regmap_update_bits(data->regmap, SC27XX_ARM_CLK_EN,
+ ret = regmap_update_bits(data->regmap, data->var_data->clk_en,
SC27XX_CLK_ADC_EN | SC27XX_CLK_ADC_CLK_EN,
SC27XX_CLK_ADC_EN | SC27XX_CLK_ADC_CLK_EN);
if (ret)
@@ -456,10 +788,10 @@ static int sc27xx_adc_enable(struct sc27xx_adc_data *data)
return 0;
disable_clk:
- regmap_update_bits(data->regmap, SC27XX_ARM_CLK_EN,
+ regmap_update_bits(data->regmap, data->var_data->clk_en,
SC27XX_CLK_ADC_EN | SC27XX_CLK_ADC_CLK_EN, 0);
disable_adc:
- regmap_update_bits(data->regmap, SC27XX_MODULE_EN,
+ regmap_update_bits(data->regmap, data->var_data->module_en,
SC27XX_MODULE_ADC_EN, 0);
return ret;
@@ -470,21 +802,76 @@ static void sc27xx_adc_disable(void *_data)
struct sc27xx_adc_data *data = _data;
/* Disable ADC work clock and controller clock */
- regmap_update_bits(data->regmap, SC27XX_ARM_CLK_EN,
+ regmap_update_bits(data->regmap, data->var_data->clk_en,
SC27XX_CLK_ADC_EN | SC27XX_CLK_ADC_CLK_EN, 0);
- regmap_update_bits(data->regmap, SC27XX_MODULE_EN,
+ regmap_update_bits(data->regmap, data->var_data->module_en,
SC27XX_MODULE_ADC_EN, 0);
}
+static const struct sc27xx_adc_variant_data sc2731_data = {
+ .module_en = SC2731_MODULE_EN,
+ .clk_en = SC2731_ARM_CLK_EN,
+ .scale_shift = SC27XX_ADC_SCALE_SHIFT,
+ .scale_mask = SC27XX_ADC_SCALE_MASK,
+ .bscale_cal = &sc2731_big_scale_graph_calib,
+ .sscale_cal = &sc2731_small_scale_graph_calib,
+ .init_scale = sc2731_adc_scale_init,
+ .get_ratio = sc2731_adc_get_ratio,
+ .set_volref = false,
+};
+
+static const struct sc27xx_adc_variant_data sc2730_data = {
+ .module_en = SC2730_MODULE_EN,
+ .clk_en = SC2730_ARM_CLK_EN,
+ .scale_shift = SC27XX_ADC_SCALE_SHIFT,
+ .scale_mask = SC27XX_ADC_SCALE_MASK,
+ .bscale_cal = &big_scale_graph_calib,
+ .sscale_cal = &small_scale_graph_calib,
+ .init_scale = sc2730_adc_scale_init,
+ .get_ratio = sc2730_adc_get_ratio,
+ .set_volref = false,
+};
+
+static const struct sc27xx_adc_variant_data sc2721_data = {
+ .module_en = SC2731_MODULE_EN,
+ .clk_en = SC2721_ARM_CLK_EN,
+ .scale_shift = SC2721_ADC_SCALE_SHIFT,
+ .scale_mask = SC2721_ADC_SCALE_MASK,
+ .bscale_cal = &sc2731_big_scale_graph_calib,
+ .sscale_cal = &sc2731_small_scale_graph_calib,
+ .init_scale = sc2731_adc_scale_init,
+ .get_ratio = sc2721_adc_get_ratio,
+ .set_volref = true,
+};
+
+static const struct sc27xx_adc_variant_data sc2720_data = {
+ .module_en = SC2731_MODULE_EN,
+ .clk_en = SC2721_ARM_CLK_EN,
+ .scale_shift = SC27XX_ADC_SCALE_SHIFT,
+ .scale_mask = SC27XX_ADC_SCALE_MASK,
+ .bscale_cal = &big_scale_graph_calib,
+ .sscale_cal = &small_scale_graph_calib,
+ .init_scale = sc2720_adc_scale_init,
+ .get_ratio = sc2720_adc_get_ratio,
+ .set_volref = false,
+};
+
static int sc27xx_adc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct sc27xx_adc_data *sc27xx_data;
+ const struct sc27xx_adc_variant_data *pdata;
struct iio_dev *indio_dev;
int ret;
+ pdata = of_device_get_match_data(dev);
+ if (!pdata) {
+ dev_err(dev, "No matching driver data found\n");
+ return -EINVAL;
+ }
+
indio_dev = devm_iio_device_alloc(dev, sizeof(*sc27xx_data));
if (!indio_dev)
return -ENOMEM;
@@ -520,6 +907,16 @@ static int sc27xx_adc_probe(struct platform_device *pdev)
}
sc27xx_data->dev = dev;
+ if (pdata->set_volref) {
+ sc27xx_data->volref = devm_regulator_get(dev, "vref");
+ if (IS_ERR(sc27xx_data->volref)) {
+ ret = PTR_ERR(sc27xx_data->volref);
+ return dev_err_probe(dev, ret, "failed to get ADC volref\n");
+ }
+ }
+
+ sc27xx_data->var_data = pdata;
+ sc27xx_data->var_data->init_scale(sc27xx_data);
ret = sc27xx_adc_enable(sc27xx_data);
if (ret) {
@@ -546,7 +943,10 @@ static int sc27xx_adc_probe(struct platform_device *pdev)
}
static const struct of_device_id sc27xx_adc_of_match[] = {
- { .compatible = "sprd,sc2731-adc", },
+ { .compatible = "sprd,sc2731-adc", .data = &sc2731_data},
+ { .compatible = "sprd,sc2730-adc", .data = &sc2730_data},
+ { .compatible = "sprd,sc2721-adc", .data = &sc2721_data},
+ { .compatible = "sprd,sc2720-adc", .data = &sc2720_data},
{ }
};
MODULE_DEVICE_TABLE(of, sc27xx_adc_of_match);
diff --git a/drivers/iio/adc/stm32-dfsdm-adc.c b/drivers/iio/adc/stm32-dfsdm-adc.c
index 9704cf0b9753..6d21ea84fa82 100644
--- a/drivers/iio/adc/stm32-dfsdm-adc.c
+++ b/drivers/iio/adc/stm32-dfsdm-adc.c
@@ -466,8 +466,7 @@ static int stm32_dfsdm_channels_configure(struct iio_dev *indio_dev,
* In continuous mode, use fast mode configuration,
* if it provides a better resolution.
*/
- if (adc->nconv == 1 && !trig &&
- (indio_dev->currentmode & INDIO_BUFFER_SOFTWARE)) {
+ if (adc->nconv == 1 && !trig && iio_buffer_enabled(indio_dev)) {
if (fl->flo[1].res >= fl->flo[0].res) {
fl->fast = 1;
flo = &fl->flo[1];
@@ -562,7 +561,7 @@ static int stm32_dfsdm_filter_configure(struct iio_dev *indio_dev,
cr1 = DFSDM_CR1_RCH(chan->channel);
/* Continuous conversions triggered by SPI clk in buffer mode */
- if (indio_dev->currentmode & INDIO_BUFFER_SOFTWARE)
+ if (iio_buffer_enabled(indio_dev))
cr1 |= DFSDM_CR1_RCONT(1);
cr1 |= DFSDM_CR1_RSYNC(fl->sync_mode);
diff --git a/drivers/iio/adc/stmpe-adc.c b/drivers/iio/adc/stmpe-adc.c
index d2d405388499..000e5cfecb43 100644
--- a/drivers/iio/adc/stmpe-adc.c
+++ b/drivers/iio/adc/stmpe-adc.c
@@ -61,7 +61,7 @@ struct stmpe_adc {
static int stmpe_read_voltage(struct stmpe_adc *info,
struct iio_chan_spec const *chan, int *val)
{
- long ret;
+ unsigned long ret;
mutex_lock(&info->lock);
@@ -79,7 +79,7 @@ static int stmpe_read_voltage(struct stmpe_adc *info,
ret = wait_for_completion_timeout(&info->completion, STMPE_ADC_TIMEOUT);
- if (ret <= 0) {
+ if (ret == 0) {
stmpe_reg_write(info->stmpe, STMPE_REG_ADC_INT_STA,
STMPE_ADC_CH(info->channel));
mutex_unlock(&info->lock);
@@ -96,7 +96,7 @@ static int stmpe_read_voltage(struct stmpe_adc *info,
static int stmpe_read_temp(struct stmpe_adc *info,
struct iio_chan_spec const *chan, int *val)
{
- long ret;
+ unsigned long ret;
mutex_lock(&info->lock);
@@ -114,7 +114,7 @@ static int stmpe_read_temp(struct stmpe_adc *info,
ret = wait_for_completion_timeout(&info->completion, STMPE_ADC_TIMEOUT);
- if (ret <= 0) {
+ if (ret == 0) {
mutex_unlock(&info->lock);
return -ETIMEDOUT;
}
@@ -345,21 +345,22 @@ static int __maybe_unused stmpe_adc_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(stmpe_adc_pm_ops, NULL, stmpe_adc_resume);
+static const struct of_device_id stmpe_adc_ids[] = {
+ { .compatible = "st,stmpe-adc", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, stmpe_adc_ids);
+
static struct platform_driver stmpe_adc_driver = {
.probe = stmpe_adc_probe,
.driver = {
.name = "stmpe-adc",
.pm = &stmpe_adc_pm_ops,
+ .of_match_table = stmpe_adc_ids,
},
};
module_platform_driver(stmpe_adc_driver);
-static const struct of_device_id stmpe_adc_ids[] = {
- { .compatible = "st,stmpe-adc", },
- { },
-};
-MODULE_DEVICE_TABLE(of, stmpe_adc_ids);
-
MODULE_AUTHOR("Stefan Agner <stefan.agner@toradex.com>");
MODULE_DESCRIPTION("STMPEXXX ADC driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/ti-ads1015.c b/drivers/iio/adc/ti-ads1015.c
index 068efbce1710..5544da80b636 100644
--- a/drivers/iio/adc/ti-ads1015.c
+++ b/drivers/iio/adc/ti-ads1015.c
@@ -76,10 +76,15 @@
#define ADS1015_DEFAULT_DATA_RATE 4
#define ADS1015_DEFAULT_CHAN 0
-enum chip_ids {
- ADSXXXX = 0,
- ADS1015,
- ADS1115,
+struct ads1015_chip_data {
+ struct iio_chan_spec const *channels;
+ int num_channels;
+ const struct iio_info *info;
+ const int *data_rate;
+ const int data_rate_len;
+ const int *scale;
+ const int scale_len;
+ bool has_comparator;
};
enum ads1015_channels {
@@ -94,11 +99,11 @@ enum ads1015_channels {
ADS1015_TIMESTAMP,
};
-static const unsigned int ads1015_data_rate[] = {
+static const int ads1015_data_rate[] = {
128, 250, 490, 920, 1600, 2400, 3300, 3300
};
-static const unsigned int ads1115_data_rate[] = {
+static const int ads1115_data_rate[] = {
8, 16, 32, 64, 128, 250, 475, 860
};
@@ -106,10 +111,28 @@ static const unsigned int ads1115_data_rate[] = {
* Translation from PGA bits to full-scale positive and negative input voltage
* range in mV
*/
-static int ads1015_fullscale_range[] = {
+static const int ads1015_fullscale_range[] = {
6144, 4096, 2048, 1024, 512, 256, 256, 256
};
+static const int ads1015_scale[] = { /* 12bit ADC */
+ 256, 11,
+ 512, 11,
+ 1024, 11,
+ 2048, 11,
+ 4096, 11,
+ 6144, 11
+};
+
+static const int ads1115_scale[] = { /* 16bit ADC */
+ 256, 15,
+ 512, 15,
+ 1024, 15,
+ 2048, 15,
+ 4096, 15,
+ 6144, 15
+};
+
/*
* Translation from COMP_QUE field value to the number of successive readings
* exceed the threshold values before an interrupt is generated
@@ -134,71 +157,53 @@ static const struct iio_event_spec ads1015_events[] = {
},
};
-#define ADS1015_V_CHAN(_chan, _addr) { \
- .type = IIO_VOLTAGE, \
- .indexed = 1, \
- .address = _addr, \
- .channel = _chan, \
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
- BIT(IIO_CHAN_INFO_SCALE) | \
- BIT(IIO_CHAN_INFO_SAMP_FREQ), \
- .scan_index = _addr, \
- .scan_type = { \
- .sign = 's', \
- .realbits = 12, \
- .storagebits = 16, \
- .shift = 4, \
- .endianness = IIO_CPU, \
- }, \
- .event_spec = ads1015_events, \
- .num_event_specs = ARRAY_SIZE(ads1015_events), \
- .datasheet_name = "AIN"#_chan, \
-}
-
-#define ADS1015_V_DIFF_CHAN(_chan, _chan2, _addr) { \
+/*
+ * Compile-time check whether _fitbits can accommodate up to _testbits
+ * bits. Returns _fitbits on success, fails to compile otherwise.
+ *
+ * The test works such that it multiplies constant _fitbits by constant
+ * double-negation of size of a non-empty structure, i.e. it multiplies
+ * constant _fitbits by constant 1 in each successful compilation case.
+ * The non-empty structure may contain C11 _Static_assert(), make use of
+ * this and place the kernel variant of static assert in there, so that
+ * it performs the compile-time check for _testbits <= _fitbits. Note
+ * that it is not possible to directly use static_assert in compound
+ * statements, hence this convoluted construct.
+ */
+#define FIT_CHECK(_testbits, _fitbits) \
+ ( \
+ (_fitbits) * \
+ !!sizeof(struct { \
+ static_assert((_testbits) <= (_fitbits)); \
+ int pad; \
+ }) \
+ )
+
+#define ADS1015_V_CHAN(_chan, _addr, _realbits, _shift, _event_spec, _num_event_specs) { \
.type = IIO_VOLTAGE, \
- .differential = 1, \
.indexed = 1, \
.address = _addr, \
.channel = _chan, \
- .channel2 = _chan2, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
BIT(IIO_CHAN_INFO_SCALE) | \
BIT(IIO_CHAN_INFO_SAMP_FREQ), \
- .scan_index = _addr, \
- .scan_type = { \
- .sign = 's', \
- .realbits = 12, \
- .storagebits = 16, \
- .shift = 4, \
- .endianness = IIO_CPU, \
- }, \
- .event_spec = ads1015_events, \
- .num_event_specs = ARRAY_SIZE(ads1015_events), \
- .datasheet_name = "AIN"#_chan"-AIN"#_chan2, \
-}
-
-#define ADS1115_V_CHAN(_chan, _addr) { \
- .type = IIO_VOLTAGE, \
- .indexed = 1, \
- .address = _addr, \
- .channel = _chan, \
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ .info_mask_shared_by_all_available = \
BIT(IIO_CHAN_INFO_SCALE) | \
BIT(IIO_CHAN_INFO_SAMP_FREQ), \
.scan_index = _addr, \
.scan_type = { \
.sign = 's', \
- .realbits = 16, \
- .storagebits = 16, \
+ .realbits = (_realbits), \
+ .storagebits = FIT_CHECK((_realbits) + (_shift), 16), \
+ .shift = (_shift), \
.endianness = IIO_CPU, \
}, \
- .event_spec = ads1015_events, \
- .num_event_specs = ARRAY_SIZE(ads1015_events), \
+ .event_spec = (_event_spec), \
+ .num_event_specs = (_num_event_specs), \
.datasheet_name = "AIN"#_chan, \
}
-#define ADS1115_V_DIFF_CHAN(_chan, _chan2, _addr) { \
+#define ADS1015_V_DIFF_CHAN(_chan, _chan2, _addr, _realbits, _shift, _event_spec, _num_event_specs) { \
.type = IIO_VOLTAGE, \
.differential = 1, \
.indexed = 1, \
@@ -208,15 +213,19 @@ static const struct iio_event_spec ads1015_events[] = {
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
BIT(IIO_CHAN_INFO_SCALE) | \
BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .info_mask_shared_by_all_available = \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), \
.scan_index = _addr, \
.scan_type = { \
.sign = 's', \
- .realbits = 16, \
- .storagebits = 16, \
+ .realbits = (_realbits), \
+ .storagebits = FIT_CHECK((_realbits) + (_shift), 16), \
+ .shift = (_shift), \
.endianness = IIO_CPU, \
}, \
- .event_spec = ads1015_events, \
- .num_event_specs = ARRAY_SIZE(ads1015_events), \
+ .event_spec = (_event_spec), \
+ .num_event_specs = (_num_event_specs), \
.datasheet_name = "AIN"#_chan"-AIN"#_chan2, \
}
@@ -245,7 +254,7 @@ struct ads1015_data {
unsigned int comp_mode;
struct ads1015_thresh_data thresh_data[ADS1015_CHANNELS];
- unsigned int *data_rate;
+ const struct ads1015_chip_data *chip;
/*
* Set to true when the ADC is switched to the continuous-conversion
* mode and exits from a power-down state. This flag is used to avoid
@@ -273,49 +282,91 @@ static void ads1015_event_channel_disable(struct ads1015_data *data, int chan)
data->event_channel = ADS1015_CHANNELS;
}
-static bool ads1015_is_writeable_reg(struct device *dev, unsigned int reg)
-{
- switch (reg) {
- case ADS1015_CFG_REG:
- case ADS1015_LO_THRESH_REG:
- case ADS1015_HI_THRESH_REG:
- return true;
- default:
- return false;
- }
-}
+static const struct regmap_range ads1015_writeable_ranges[] = {
+ regmap_reg_range(ADS1015_CFG_REG, ADS1015_HI_THRESH_REG),
+};
+
+static const struct regmap_access_table ads1015_writeable_table = {
+ .yes_ranges = ads1015_writeable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(ads1015_writeable_ranges),
+};
static const struct regmap_config ads1015_regmap_config = {
.reg_bits = 8,
.val_bits = 16,
.max_register = ADS1015_HI_THRESH_REG,
- .writeable_reg = ads1015_is_writeable_reg,
+ .wr_table = &ads1015_writeable_table,
+};
+
+static const struct regmap_range tla2024_writeable_ranges[] = {
+ regmap_reg_range(ADS1015_CFG_REG, ADS1015_CFG_REG),
+};
+
+static const struct regmap_access_table tla2024_writeable_table = {
+ .yes_ranges = tla2024_writeable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(tla2024_writeable_ranges),
+};
+
+static const struct regmap_config tla2024_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 16,
+ .max_register = ADS1015_CFG_REG,
+ .wr_table = &tla2024_writeable_table,
};
static const struct iio_chan_spec ads1015_channels[] = {
- ADS1015_V_DIFF_CHAN(0, 1, ADS1015_AIN0_AIN1),
- ADS1015_V_DIFF_CHAN(0, 3, ADS1015_AIN0_AIN3),
- ADS1015_V_DIFF_CHAN(1, 3, ADS1015_AIN1_AIN3),
- ADS1015_V_DIFF_CHAN(2, 3, ADS1015_AIN2_AIN3),
- ADS1015_V_CHAN(0, ADS1015_AIN0),
- ADS1015_V_CHAN(1, ADS1015_AIN1),
- ADS1015_V_CHAN(2, ADS1015_AIN2),
- ADS1015_V_CHAN(3, ADS1015_AIN3),
+ ADS1015_V_DIFF_CHAN(0, 1, ADS1015_AIN0_AIN1, 12, 4,
+ ads1015_events, ARRAY_SIZE(ads1015_events)),
+ ADS1015_V_DIFF_CHAN(0, 3, ADS1015_AIN0_AIN3, 12, 4,
+ ads1015_events, ARRAY_SIZE(ads1015_events)),
+ ADS1015_V_DIFF_CHAN(1, 3, ADS1015_AIN1_AIN3, 12, 4,
+ ads1015_events, ARRAY_SIZE(ads1015_events)),
+ ADS1015_V_DIFF_CHAN(2, 3, ADS1015_AIN2_AIN3, 12, 4,
+ ads1015_events, ARRAY_SIZE(ads1015_events)),
+ ADS1015_V_CHAN(0, ADS1015_AIN0, 12, 4,
+ ads1015_events, ARRAY_SIZE(ads1015_events)),
+ ADS1015_V_CHAN(1, ADS1015_AIN1, 12, 4,
+ ads1015_events, ARRAY_SIZE(ads1015_events)),
+ ADS1015_V_CHAN(2, ADS1015_AIN2, 12, 4,
+ ads1015_events, ARRAY_SIZE(ads1015_events)),
+ ADS1015_V_CHAN(3, ADS1015_AIN3, 12, 4,
+ ads1015_events, ARRAY_SIZE(ads1015_events)),
IIO_CHAN_SOFT_TIMESTAMP(ADS1015_TIMESTAMP),
};
static const struct iio_chan_spec ads1115_channels[] = {
- ADS1115_V_DIFF_CHAN(0, 1, ADS1015_AIN0_AIN1),
- ADS1115_V_DIFF_CHAN(0, 3, ADS1015_AIN0_AIN3),
- ADS1115_V_DIFF_CHAN(1, 3, ADS1015_AIN1_AIN3),
- ADS1115_V_DIFF_CHAN(2, 3, ADS1015_AIN2_AIN3),
- ADS1115_V_CHAN(0, ADS1015_AIN0),
- ADS1115_V_CHAN(1, ADS1015_AIN1),
- ADS1115_V_CHAN(2, ADS1015_AIN2),
- ADS1115_V_CHAN(3, ADS1015_AIN3),
+ ADS1015_V_DIFF_CHAN(0, 1, ADS1015_AIN0_AIN1, 16, 0,
+ ads1015_events, ARRAY_SIZE(ads1015_events)),
+ ADS1015_V_DIFF_CHAN(0, 3, ADS1015_AIN0_AIN3, 16, 0,
+ ads1015_events, ARRAY_SIZE(ads1015_events)),
+ ADS1015_V_DIFF_CHAN(1, 3, ADS1015_AIN1_AIN3, 16, 0,
+ ads1015_events, ARRAY_SIZE(ads1015_events)),
+ ADS1015_V_DIFF_CHAN(2, 3, ADS1015_AIN2_AIN3, 16, 0,
+ ads1015_events, ARRAY_SIZE(ads1015_events)),
+ ADS1015_V_CHAN(0, ADS1015_AIN0, 16, 0,
+ ads1015_events, ARRAY_SIZE(ads1015_events)),
+ ADS1015_V_CHAN(1, ADS1015_AIN1, 16, 0,
+ ads1015_events, ARRAY_SIZE(ads1015_events)),
+ ADS1015_V_CHAN(2, ADS1015_AIN2, 16, 0,
+ ads1015_events, ARRAY_SIZE(ads1015_events)),
+ ADS1015_V_CHAN(3, ADS1015_AIN3, 16, 0,
+ ads1015_events, ARRAY_SIZE(ads1015_events)),
IIO_CHAN_SOFT_TIMESTAMP(ADS1015_TIMESTAMP),
};
+static const struct iio_chan_spec tla2024_channels[] = {
+ ADS1015_V_DIFF_CHAN(0, 1, ADS1015_AIN0_AIN1, 12, 4, NULL, 0),
+ ADS1015_V_DIFF_CHAN(0, 3, ADS1015_AIN0_AIN3, 12, 4, NULL, 0),
+ ADS1015_V_DIFF_CHAN(1, 3, ADS1015_AIN1_AIN3, 12, 4, NULL, 0),
+ ADS1015_V_DIFF_CHAN(2, 3, ADS1015_AIN2_AIN3, 12, 4, NULL, 0),
+ ADS1015_V_CHAN(0, ADS1015_AIN0, 12, 4, NULL, 0),
+ ADS1015_V_CHAN(1, ADS1015_AIN1, 12, 4, NULL, 0),
+ ADS1015_V_CHAN(2, ADS1015_AIN2, 12, 4, NULL, 0),
+ ADS1015_V_CHAN(3, ADS1015_AIN3, 12, 4, NULL, 0),
+ IIO_CHAN_SOFT_TIMESTAMP(ADS1015_TIMESTAMP),
+};
+
+
#ifdef CONFIG_PM
static int ads1015_set_power_state(struct ads1015_data *data, bool on)
{
@@ -344,6 +395,7 @@ static int ads1015_set_power_state(struct ads1015_data *data, bool on)
static
int ads1015_get_adc_result(struct ads1015_data *data, int chan, int *val)
{
+ const int *data_rate = data->chip->data_rate;
int ret, pga, dr, dr_old, conv_time;
unsigned int old, mask, cfg;
@@ -378,8 +430,8 @@ int ads1015_get_adc_result(struct ads1015_data *data, int chan, int *val)
}
if (data->conv_invalid) {
dr_old = (old & ADS1015_CFG_DR_MASK) >> ADS1015_CFG_DR_SHIFT;
- conv_time = DIV_ROUND_UP(USEC_PER_SEC, data->data_rate[dr_old]);
- conv_time += DIV_ROUND_UP(USEC_PER_SEC, data->data_rate[dr]);
+ conv_time = DIV_ROUND_UP(USEC_PER_SEC, data_rate[dr_old]);
+ conv_time += DIV_ROUND_UP(USEC_PER_SEC, data_rate[dr]);
conv_time += conv_time / 10; /* 10% internal clock inaccuracy */
usleep_range(conv_time, conv_time + 1);
data->conv_invalid = false;
@@ -445,8 +497,8 @@ static int ads1015_set_data_rate(struct ads1015_data *data, int chan, int rate)
{
int i;
- for (i = 0; i < ARRAY_SIZE(ads1015_data_rate); i++) {
- if (data->data_rate[i] == rate) {
+ for (i = 0; i < data->chip->data_rate_len; i++) {
+ if (data->chip->data_rate[i] == rate) {
data->channel_data[chan].data_rate = i;
return 0;
}
@@ -455,6 +507,32 @@ static int ads1015_set_data_rate(struct ads1015_data *data, int chan, int rate)
return -EINVAL;
}
+static int ads1015_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
+{
+ struct ads1015_data *data = iio_priv(indio_dev);
+
+ if (chan->type != IIO_VOLTAGE)
+ return -EINVAL;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ *type = IIO_VAL_FRACTIONAL_LOG2;
+ *vals = data->chip->scale;
+ *length = data->chip->scale_len;
+ return IIO_AVAIL_LIST;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *type = IIO_VAL_INT;
+ *vals = data->chip->data_rate;
+ *length = data->chip->data_rate_len;
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
+}
+
static int ads1015_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int *val,
int *val2, long mask)
@@ -504,7 +582,7 @@ release_direct:
break;
case IIO_CHAN_INFO_SAMP_FREQ:
idx = data->channel_data[chan->address].data_rate;
- *val = data->data_rate[idx];
+ *val = data->chip->data_rate[idx];
ret = IIO_VAL_INT;
break;
default:
@@ -564,7 +642,7 @@ static int ads1015_read_event(struct iio_dev *indio_dev,
dr = data->channel_data[chan->address].data_rate;
comp_queue = data->thresh_data[chan->address].comp_queue;
period = ads1015_comp_queue[comp_queue] *
- USEC_PER_SEC / data->data_rate[dr];
+ USEC_PER_SEC / data->chip->data_rate[dr];
*val = period / USEC_PER_SEC;
*val2 = period % USEC_PER_SEC;
@@ -586,6 +664,7 @@ static int ads1015_write_event(struct iio_dev *indio_dev,
int val2)
{
struct ads1015_data *data = iio_priv(indio_dev);
+ const int *data_rate = data->chip->data_rate;
int realbits = chan->scan_type.realbits;
int ret = 0;
long long period;
@@ -611,7 +690,7 @@ static int ads1015_write_event(struct iio_dev *indio_dev,
for (i = 0; i < ARRAY_SIZE(ads1015_comp_queue) - 1; i++) {
if (period <= ads1015_comp_queue[i] *
- USEC_PER_SEC / data->data_rate[dr])
+ USEC_PER_SEC / data_rate[dr])
break;
}
data->thresh_data[chan->address].comp_queue = i;
@@ -802,54 +881,20 @@ static const struct iio_buffer_setup_ops ads1015_buffer_setup_ops = {
.validate_scan_mask = &iio_validate_scan_mask_onehot,
};
-static IIO_CONST_ATTR_NAMED(ads1015_scale_available, scale_available,
- "3 2 1 0.5 0.25 0.125");
-static IIO_CONST_ATTR_NAMED(ads1115_scale_available, scale_available,
- "0.1875 0.125 0.0625 0.03125 0.015625 0.007813");
-
-static IIO_CONST_ATTR_NAMED(ads1015_sampling_frequency_available,
- sampling_frequency_available, "128 250 490 920 1600 2400 3300");
-static IIO_CONST_ATTR_NAMED(ads1115_sampling_frequency_available,
- sampling_frequency_available, "8 16 32 64 128 250 475 860");
-
-static struct attribute *ads1015_attributes[] = {
- &iio_const_attr_ads1015_scale_available.dev_attr.attr,
- &iio_const_attr_ads1015_sampling_frequency_available.dev_attr.attr,
- NULL,
-};
-
-static const struct attribute_group ads1015_attribute_group = {
- .attrs = ads1015_attributes,
-};
-
-static struct attribute *ads1115_attributes[] = {
- &iio_const_attr_ads1115_scale_available.dev_attr.attr,
- &iio_const_attr_ads1115_sampling_frequency_available.dev_attr.attr,
- NULL,
-};
-
-static const struct attribute_group ads1115_attribute_group = {
- .attrs = ads1115_attributes,
-};
-
static const struct iio_info ads1015_info = {
+ .read_avail = ads1015_read_avail,
.read_raw = ads1015_read_raw,
.write_raw = ads1015_write_raw,
.read_event_value = ads1015_read_event,
.write_event_value = ads1015_write_event,
.read_event_config = ads1015_read_event_config,
.write_event_config = ads1015_write_event_config,
- .attrs = &ads1015_attribute_group,
};
-static const struct iio_info ads1115_info = {
+static const struct iio_info tla2024_info = {
+ .read_avail = ads1015_read_avail,
.read_raw = ads1015_read_raw,
.write_raw = ads1015_write_raw,
- .read_event_value = ads1015_read_event,
- .write_event_value = ads1015_write_event,
- .read_event_config = ads1015_read_event_config,
- .write_event_config = ads1015_write_event_config,
- .attrs = &ads1115_attribute_group,
};
static int ads1015_client_get_channels_config(struct i2c_client *client)
@@ -932,12 +977,18 @@ static int ads1015_set_conv_mode(struct ads1015_data *data, int mode)
static int ads1015_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
+ const struct ads1015_chip_data *chip;
struct iio_dev *indio_dev;
struct ads1015_data *data;
int ret;
- enum chip_ids chip;
int i;
+ chip = device_get_match_data(&client->dev);
+ if (!chip)
+ chip = (const struct ads1015_chip_data *)id->driver_data;
+ if (!chip)
+ return dev_err_probe(&client->dev, -EINVAL, "Unknown chip\n");
+
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
if (!indio_dev)
return -ENOMEM;
@@ -950,28 +1001,12 @@ static int ads1015_probe(struct i2c_client *client,
indio_dev->name = ADS1015_DRV_NAME;
indio_dev->modes = INDIO_DIRECT_MODE;
- chip = (uintptr_t)device_get_match_data(&client->dev);
- if (chip == ADSXXXX)
- chip = id->driver_data;
- switch (chip) {
- case ADS1015:
- indio_dev->channels = ads1015_channels;
- indio_dev->num_channels = ARRAY_SIZE(ads1015_channels);
- indio_dev->info = &ads1015_info;
- data->data_rate = (unsigned int *) &ads1015_data_rate;
- break;
- case ADS1115:
- indio_dev->channels = ads1115_channels;
- indio_dev->num_channels = ARRAY_SIZE(ads1115_channels);
- indio_dev->info = &ads1115_info;
- data->data_rate = (unsigned int *) &ads1115_data_rate;
- break;
- default:
- dev_err(&client->dev, "Unknown chip %d\n", chip);
- return -EINVAL;
- }
-
+ indio_dev->channels = chip->channels;
+ indio_dev->num_channels = chip->num_channels;
+ indio_dev->info = chip->info;
+ data->chip = chip;
data->event_channel = ADS1015_CHANNELS;
+
/*
* Set default lower and upper threshold to min and max value
* respectively.
@@ -986,7 +1021,9 @@ static int ads1015_probe(struct i2c_client *client,
/* we need to keep this ABI the same as used by hwmon ADS1015 driver */
ads1015_get_channels_config(client);
- data->regmap = devm_regmap_init_i2c(client, &ads1015_regmap_config);
+ data->regmap = devm_regmap_init_i2c(client, chip->has_comparator ?
+ &ads1015_regmap_config :
+ &tla2024_regmap_config);
if (IS_ERR(data->regmap)) {
dev_err(&client->dev, "Failed to allocate register map\n");
return PTR_ERR(data->regmap);
@@ -1000,7 +1037,7 @@ static int ads1015_probe(struct i2c_client *client,
return ret;
}
- if (client->irq) {
+ if (client->irq && chip->has_comparator) {
unsigned long irq_trig =
irqd_get_trigger_type(irq_get_irq_data(client->irq));
unsigned int cfg_comp_mask = ADS1015_CFG_COMP_QUE_MASK |
@@ -1099,22 +1136,51 @@ static const struct dev_pm_ops ads1015_pm_ops = {
ads1015_runtime_resume, NULL)
};
+static const struct ads1015_chip_data ads1015_data = {
+ .channels = ads1015_channels,
+ .num_channels = ARRAY_SIZE(ads1015_channels),
+ .info = &ads1015_info,
+ .data_rate = ads1015_data_rate,
+ .data_rate_len = ARRAY_SIZE(ads1015_data_rate),
+ .scale = ads1015_scale,
+ .scale_len = ARRAY_SIZE(ads1015_scale),
+ .has_comparator = true,
+};
+
+static const struct ads1015_chip_data ads1115_data = {
+ .channels = ads1115_channels,
+ .num_channels = ARRAY_SIZE(ads1115_channels),
+ .info = &ads1015_info,
+ .data_rate = ads1115_data_rate,
+ .data_rate_len = ARRAY_SIZE(ads1115_data_rate),
+ .scale = ads1115_scale,
+ .scale_len = ARRAY_SIZE(ads1115_scale),
+ .has_comparator = true,
+};
+
+static const struct ads1015_chip_data tla2024_data = {
+ .channels = tla2024_channels,
+ .num_channels = ARRAY_SIZE(tla2024_channels),
+ .info = &tla2024_info,
+ .data_rate = ads1015_data_rate,
+ .data_rate_len = ARRAY_SIZE(ads1015_data_rate),
+ .scale = ads1015_scale,
+ .scale_len = ARRAY_SIZE(ads1015_scale),
+ .has_comparator = false,
+};
+
static const struct i2c_device_id ads1015_id[] = {
- {"ads1015", ADS1015},
- {"ads1115", ADS1115},
+ { "ads1015", (kernel_ulong_t)&ads1015_data },
+ { "ads1115", (kernel_ulong_t)&ads1115_data },
+ { "tla2024", (kernel_ulong_t)&tla2024_data },
{}
};
MODULE_DEVICE_TABLE(i2c, ads1015_id);
static const struct of_device_id ads1015_of_match[] = {
- {
- .compatible = "ti,ads1015",
- .data = (void *)ADS1015
- },
- {
- .compatible = "ti,ads1115",
- .data = (void *)ADS1115
- },
+ { .compatible = "ti,ads1015", .data = &ads1015_data },
+ { .compatible = "ti,ads1115", .data = &ads1115_data },
+ { .compatible = "ti,tla2024", .data = &tla2024_data },
{}
};
MODULE_DEVICE_TABLE(of, ads1015_of_match);
diff --git a/drivers/iio/adc/ti-ads8688.c b/drivers/iio/adc/ti-ads8688.c
index 22c2583eedd0..708cca0a63be 100644
--- a/drivers/iio/adc/ti-ads8688.c
+++ b/drivers/iio/adc/ti-ads8688.c
@@ -508,6 +508,7 @@ MODULE_DEVICE_TABLE(of, ads8688_of_match);
static struct spi_driver ads8688_driver = {
.driver = {
.name = "ads8688",
+ .of_match_table = ads8688_of_match,
},
.probe = ads8688_probe,
.remove = ads8688_remove,
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
index dbdc1ef48566..567d43a30955 100644
--- a/drivers/iio/adc/ti_am335x_adc.c
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -376,9 +376,7 @@ static int tiadc_iio_buffered_hardware_setup(struct device *dev,
{
int ret;
- ret = devm_iio_kfifo_buffer_setup(dev, indio_dev,
- INDIO_BUFFER_SOFTWARE,
- setup_ops);
+ ret = devm_iio_kfifo_buffer_setup(dev, indio_dev, setup_ops);
if (ret)
return ret;
diff --git a/drivers/iio/afe/Kconfig b/drivers/iio/afe/Kconfig
index 4fa397822cff..9a1d95c1c7ed 100644
--- a/drivers/iio/afe/Kconfig
+++ b/drivers/iio/afe/Kconfig
@@ -8,7 +8,6 @@ menu "Analog Front Ends"
config IIO_RESCALE
tristate "IIO rescale"
- depends on OF || COMPILE_TEST
help
Say yes here to build support for the IIO rescaling
that handles voltage dividers, current sense shunts and
diff --git a/drivers/iio/afe/iio-rescale.c b/drivers/iio/afe/iio-rescale.c
index 7e511293d6d1..c6cf709f0f05 100644
--- a/drivers/iio/afe/iio-rescale.c
+++ b/drivers/iio/afe/iio-rescale.c
@@ -10,9 +10,8 @@
#include <linux/err.h>
#include <linux/gcd.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/property.h>
@@ -536,7 +535,7 @@ static int rescale_probe(struct platform_device *pdev)
rescale = iio_priv(indio_dev);
- rescale->cfg = of_device_get_match_data(dev);
+ rescale->cfg = device_get_match_data(dev);
rescale->numerator = 1;
rescale->denominator = 1;
rescale->offset = 0;
diff --git a/drivers/iio/buffer/kfifo_buf.c b/drivers/iio/buffer/kfifo_buf.c
index 416d35a61ae2..35d8b4077376 100644
--- a/drivers/iio/buffer/kfifo_buf.c
+++ b/drivers/iio/buffer/kfifo_buf.c
@@ -259,8 +259,6 @@ static struct iio_buffer *devm_iio_kfifo_allocate(struct device *dev)
* devm_iio_kfifo_buffer_setup_ext - Allocate a kfifo buffer & attach it to an IIO device
* @dev: Device object to which to attach the life-time of this kfifo buffer
* @indio_dev: The device the buffer should be attached to
- * @mode_flags: The mode flags for this buffer (INDIO_BUFFER_SOFTWARE and/or
- * INDIO_BUFFER_TRIGGERED).
* @setup_ops: The setup_ops required to configure the HW part of the buffer (optional)
* @buffer_attrs: Extra sysfs buffer attributes for this IIO buffer
*
@@ -271,22 +269,16 @@ static struct iio_buffer *devm_iio_kfifo_allocate(struct device *dev)
*/
int devm_iio_kfifo_buffer_setup_ext(struct device *dev,
struct iio_dev *indio_dev,
- int mode_flags,
const struct iio_buffer_setup_ops *setup_ops,
const struct attribute **buffer_attrs)
{
struct iio_buffer *buffer;
- if (!mode_flags)
- return -EINVAL;
-
buffer = devm_iio_kfifo_allocate(dev);
if (!buffer)
return -ENOMEM;
- mode_flags &= kfifo_access_funcs.modes;
-
- indio_dev->modes |= mode_flags;
+ indio_dev->modes |= INDIO_BUFFER_SOFTWARE;
indio_dev->setup_ops = setup_ops;
buffer->attrs = buffer_attrs;
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
index b2725c6adc7f..5976aca48e3b 100644
--- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
@@ -333,8 +333,7 @@ int cros_ec_sensors_core_init(struct platform_device *pdev,
* We can not use trigger here, as events are generated
* as soon as sample_frequency is set.
*/
- ret = devm_iio_kfifo_buffer_setup_ext(dev, indio_dev,
- INDIO_BUFFER_SOFTWARE, NULL,
+ ret = devm_iio_kfifo_buffer_setup_ext(dev, indio_dev, NULL,
cros_ec_sensor_fifo_attributes);
if (ret)
return ret;
@@ -413,7 +412,7 @@ static ssize_t cros_ec_sensors_calibrate(struct iio_dev *indio_dev,
int ret, i;
bool calibrate;
- ret = strtobool(buf, &calibrate);
+ ret = kstrtobool(buf, &calibrate);
if (ret < 0)
return ret;
if (!calibrate)
diff --git a/drivers/iio/common/scmi_sensors/scmi_iio.c b/drivers/iio/common/scmi_sensors/scmi_iio.c
index d538bf3ab1ef..793d628db55f 100644
--- a/drivers/iio/common/scmi_sensors/scmi_iio.c
+++ b/drivers/iio/common/scmi_sensors/scmi_iio.c
@@ -686,7 +686,6 @@ static int scmi_iio_dev_probe(struct scmi_device *sdev)
err = devm_iio_kfifo_buffer_setup(&scmi_iio_dev->dev,
scmi_iio_dev,
- INDIO_BUFFER_SOFTWARE,
&scmi_iio_buffer_ops);
if (err < 0) {
dev_err(dev,
diff --git a/drivers/iio/common/ssp_sensors/ssp_spi.c b/drivers/iio/common/ssp_sensors/ssp_spi.c
index 769bd9280524..f32b04b63ea1 100644
--- a/drivers/iio/common/ssp_sensors/ssp_spi.c
+++ b/drivers/iio/common/ssp_sensors/ssp_spi.c
@@ -331,12 +331,11 @@ static int ssp_parse_dataframe(struct ssp_data *data, char *dataframe, int len)
/* threaded irq */
int ssp_irq_msg(struct ssp_data *data)
{
- bool found = false;
char *buffer;
u8 msg_type;
int ret;
u16 length, msg_options;
- struct ssp_msg *msg, *n;
+ struct ssp_msg *msg = NULL, *iter, *n;
ret = spi_read(data->spi, data->header_buffer, SSP_HEADER_BUFFER_SIZE);
if (ret < 0) {
@@ -362,15 +361,15 @@ int ssp_irq_msg(struct ssp_data *data)
* received with no order
*/
mutex_lock(&data->pending_lock);
- list_for_each_entry_safe(msg, n, &data->pending_list, list) {
- if (msg->options == msg_options) {
- list_del(&msg->list);
- found = true;
+ list_for_each_entry_safe(iter, n, &data->pending_list, list) {
+ if (iter->options == msg_options) {
+ list_del(&iter->list);
+ msg = iter;
break;
}
}
- if (!found) {
+ if (!msg) {
/*
* here can be implemented dead messages handling
* but the slave should not send such ones - it is to
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c
index fa9bcdf0d190..9910ba1da085 100644
--- a/drivers/iio/common/st_sensors/st_sensors_core.c
+++ b/drivers/iio/common/st_sensors/st_sensors_core.c
@@ -71,16 +71,18 @@ st_sensors_match_odr_error:
int st_sensors_set_odr(struct iio_dev *indio_dev, unsigned int odr)
{
- int err;
+ int err = 0;
struct st_sensor_odr_avl odr_out = {0, 0};
struct st_sensor_data *sdata = iio_priv(indio_dev);
+ mutex_lock(&sdata->odr_lock);
+
if (!sdata->sensor_settings->odr.mask)
- return 0;
+ goto unlock_mutex;
err = st_sensors_match_odr(sdata->sensor_settings, odr, &odr_out);
if (err < 0)
- goto st_sensors_match_odr_error;
+ goto unlock_mutex;
if ((sdata->sensor_settings->odr.addr ==
sdata->sensor_settings->pw.addr) &&
@@ -103,7 +105,9 @@ int st_sensors_set_odr(struct iio_dev *indio_dev, unsigned int odr)
if (err >= 0)
sdata->odr = odr_out.hz;
-st_sensors_match_odr_error:
+unlock_mutex:
+ mutex_unlock(&sdata->odr_lock);
+
return err;
}
EXPORT_SYMBOL_NS(st_sensors_set_odr, IIO_ST_SENSORS);
@@ -361,6 +365,8 @@ int st_sensors_init_sensor(struct iio_dev *indio_dev,
struct st_sensors_platform_data *of_pdata;
int err = 0;
+ mutex_init(&sdata->odr_lock);
+
/* If OF/DT pdata exists, it will take precedence of anything else */
of_pdata = st_sensors_dev_probe(indio_dev->dev.parent, pdata);
if (IS_ERR(of_pdata))
@@ -549,26 +555,28 @@ int st_sensors_read_info_raw(struct iio_dev *indio_dev,
int err;
struct st_sensor_data *sdata = iio_priv(indio_dev);
- mutex_lock(&indio_dev->mlock);
- if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
- err = -EBUSY;
+ err = iio_device_claim_direct_mode(indio_dev);
+ if (err)
+ return err;
+
+ mutex_lock(&sdata->odr_lock);
+
+ err = st_sensors_set_enable(indio_dev, true);
+ if (err < 0)
goto out;
- } else {
- err = st_sensors_set_enable(indio_dev, true);
- if (err < 0)
- goto out;
- msleep((sdata->sensor_settings->bootime * 1000) / sdata->odr);
- err = st_sensors_read_axis_data(indio_dev, ch, val);
- if (err < 0)
- goto out;
+ msleep((sdata->sensor_settings->bootime * 1000) / sdata->odr);
+ err = st_sensors_read_axis_data(indio_dev, ch, val);
+ if (err < 0)
+ goto out;
- *val = *val >> ch->scan_type.shift;
+ *val = *val >> ch->scan_type.shift;
+
+ err = st_sensors_set_enable(indio_dev, false);
- err = st_sensors_set_enable(indio_dev, false);
- }
out:
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&sdata->odr_lock);
+ iio_device_release_direct_mode(indio_dev);
return err;
}
@@ -641,7 +649,6 @@ ssize_t st_sensors_sysfs_sampling_frequency_avail(struct device *dev,
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct st_sensor_data *sdata = iio_priv(indio_dev);
- mutex_lock(&indio_dev->mlock);
for (i = 0; i < ST_SENSORS_ODR_LIST_MAX; i++) {
if (sdata->sensor_settings->odr.odr_avl[i].hz == 0)
break;
@@ -649,7 +656,6 @@ ssize_t st_sensors_sysfs_sampling_frequency_avail(struct device *dev,
len += scnprintf(buf + len, PAGE_SIZE - len, "%d ",
sdata->sensor_settings->odr.odr_avl[i].hz);
}
- mutex_unlock(&indio_dev->mlock);
buf[len - 1] = '\n';
return len;
@@ -663,7 +669,6 @@ ssize_t st_sensors_sysfs_scale_avail(struct device *dev,
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct st_sensor_data *sdata = iio_priv(indio_dev);
- mutex_lock(&indio_dev->mlock);
for (i = 0; i < ST_SENSORS_FULLSCALE_AVL_MAX; i++) {
if (sdata->sensor_settings->fs.fs_avl[i].num == 0)
break;
@@ -673,7 +678,6 @@ ssize_t st_sensors_sysfs_scale_avail(struct device *dev,
len += scnprintf(buf + len, PAGE_SIZE - len, "%u.%06u ", q, r);
}
- mutex_unlock(&indio_dev->mlock);
buf[len - 1] = '\n';
return len;
diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig
index c0bf0d84197f..d1c7bde8aece 100644
--- a/drivers/iio/dac/Kconfig
+++ b/drivers/iio/dac/Kconfig
@@ -285,7 +285,6 @@ config CIO_DAC
config DPOT_DAC
tristate "DAC emulation using a DPOT"
- depends on OF
help
Say yes here to build support for DAC emulation using a digital
potentiometer.
@@ -305,7 +304,7 @@ config DS4424
config LPC18XX_DAC
tristate "NXP LPC18xx DAC driver"
depends on ARCH_LPC18XX || COMPILE_TEST
- depends on OF && HAS_IOMEM
+ depends on HAS_IOMEM
help
Say yes here to build support for NXP LPC18XX DAC.
@@ -442,7 +441,6 @@ config TI_DAC7612
config VF610_DAC
tristate "Vybrid vf610 DAC driver"
- depends on OF
depends on HAS_IOMEM
help
Say yes here to support Vybrid board digital-to-analog converter.
diff --git a/drivers/iio/dac/ad5064.c b/drivers/iio/dac/ad5064.c
index 27ee2c63c5d4..d87cf14daabe 100644
--- a/drivers/iio/dac/ad5064.c
+++ b/drivers/iio/dac/ad5064.c
@@ -288,7 +288,7 @@ static ssize_t ad5064_write_dac_powerdown(struct iio_dev *indio_dev,
bool pwr_down;
int ret;
- ret = strtobool(buf, &pwr_down);
+ ret = kstrtobool(buf, &pwr_down);
if (ret)
return ret;
diff --git a/drivers/iio/dac/ad5360.c b/drivers/iio/dac/ad5360.c
index ecbc6a51d60f..22b000a40828 100644
--- a/drivers/iio/dac/ad5360.c
+++ b/drivers/iio/dac/ad5360.c
@@ -284,7 +284,7 @@ static ssize_t ad5360_write_dac_powerdown(struct device *dev,
bool pwr_down;
int ret;
- ret = strtobool(buf, &pwr_down);
+ ret = kstrtobool(buf, &pwr_down);
if (ret)
return ret;
diff --git a/drivers/iio/dac/ad5380.c b/drivers/iio/dac/ad5380.c
index 82e1d9bd773e..a44c83242fb1 100644
--- a/drivers/iio/dac/ad5380.c
+++ b/drivers/iio/dac/ad5380.c
@@ -96,7 +96,7 @@ static ssize_t ad5380_write_dac_powerdown(struct iio_dev *indio_dev,
bool pwr_down;
int ret;
- ret = strtobool(buf, &pwr_down);
+ ret = kstrtobool(buf, &pwr_down);
if (ret)
return ret;
diff --git a/drivers/iio/dac/ad5446.c b/drivers/iio/dac/ad5446.c
index fdf824041497..09e242949cd0 100644
--- a/drivers/iio/dac/ad5446.c
+++ b/drivers/iio/dac/ad5446.c
@@ -114,7 +114,7 @@ static ssize_t ad5446_write_dac_powerdown(struct iio_dev *indio_dev,
bool powerdown;
int ret;
- ret = strtobool(buf, &powerdown);
+ ret = kstrtobool(buf, &powerdown);
if (ret)
return ret;
diff --git a/drivers/iio/dac/ad5504.c b/drivers/iio/dac/ad5504.c
index 8507573aa13e..a0817e799cc0 100644
--- a/drivers/iio/dac/ad5504.c
+++ b/drivers/iio/dac/ad5504.c
@@ -182,7 +182,7 @@ static ssize_t ad5504_write_dac_powerdown(struct iio_dev *indio_dev,
int ret;
struct ad5504_state *st = iio_priv(indio_dev);
- ret = strtobool(buf, &pwr_down);
+ ret = kstrtobool(buf, &pwr_down);
if (ret)
return ret;
diff --git a/drivers/iio/dac/ad5624r_spi.c b/drivers/iio/dac/ad5624r_spi.c
index 371e812850eb..7e6f824de299 100644
--- a/drivers/iio/dac/ad5624r_spi.c
+++ b/drivers/iio/dac/ad5624r_spi.c
@@ -129,7 +129,7 @@ static ssize_t ad5624r_write_dac_powerdown(struct iio_dev *indio_dev,
int ret;
struct ad5624r_state *st = iio_priv(indio_dev);
- ret = strtobool(buf, &pwr_down);
+ ret = kstrtobool(buf, &pwr_down);
if (ret)
return ret;
diff --git a/drivers/iio/dac/ad5686.c b/drivers/iio/dac/ad5686.c
index f78dd3f33199..15361d8bbf94 100644
--- a/drivers/iio/dac/ad5686.c
+++ b/drivers/iio/dac/ad5686.c
@@ -73,7 +73,7 @@ static ssize_t ad5686_write_dac_powerdown(struct iio_dev *indio_dev,
unsigned int val, ref_bit_msk;
u8 shift, address = 0;
- ret = strtobool(buf, &readin);
+ ret = kstrtobool(buf, &readin);
if (ret)
return ret;
diff --git a/drivers/iio/dac/ad5755.c b/drivers/iio/dac/ad5755.c
index 7a62e6e1d5f1..1a63b8456725 100644
--- a/drivers/iio/dac/ad5755.c
+++ b/drivers/iio/dac/ad5755.c
@@ -502,7 +502,7 @@ static ssize_t ad5755_write_powerdown(struct iio_dev *indio_dev, uintptr_t priv,
bool pwr_down;
int ret;
- ret = strtobool(buf, &pwr_down);
+ ret = kstrtobool(buf, &pwr_down);
if (ret)
return ret;
diff --git a/drivers/iio/dac/ad5791.c b/drivers/iio/dac/ad5791.c
index 2b14914b4050..339564fe47d1 100644
--- a/drivers/iio/dac/ad5791.c
+++ b/drivers/iio/dac/ad5791.c
@@ -188,7 +188,7 @@ static ssize_t ad5791_write_dac_powerdown(struct iio_dev *indio_dev,
int ret;
struct ad5791_state *st = iio_priv(indio_dev);
- ret = strtobool(buf, &pwr_down);
+ ret = kstrtobool(buf, &pwr_down);
if (ret)
return ret;
diff --git a/drivers/iio/dac/ad7303.c b/drivers/iio/dac/ad7303.c
index 91eaaf793b3e..03edf046dec6 100644
--- a/drivers/iio/dac/ad7303.c
+++ b/drivers/iio/dac/ad7303.c
@@ -77,7 +77,7 @@ static ssize_t ad7303_write_dac_powerdown(struct iio_dev *indio_dev,
bool pwr_down;
int ret;
- ret = strtobool(buf, &pwr_down);
+ ret = kstrtobool(buf, &pwr_down);
if (ret)
return ret;
diff --git a/drivers/iio/dac/ltc2632.c b/drivers/iio/dac/ltc2632.c
index aed46c80757e..3a3c4f4874e4 100644
--- a/drivers/iio/dac/ltc2632.c
+++ b/drivers/iio/dac/ltc2632.c
@@ -10,6 +10,7 @@
#include <linux/spi/spi.h>
#include <linux/module.h>
#include <linux/iio/iio.h>
+#include <linux/property.h>
#include <linux/regulator/consumer.h>
#include <asm/unaligned.h>
@@ -149,7 +150,7 @@ static ssize_t ltc2632_write_dac_powerdown(struct iio_dev *indio_dev,
int ret;
struct ltc2632_state *st = iio_priv(indio_dev);
- ret = strtobool(buf, &pwr_down);
+ ret = kstrtobool(buf, &pwr_down);
if (ret)
return ret;
@@ -362,8 +363,7 @@ static int ltc2632_probe(struct spi_device *spi)
}
}
- indio_dev->name = dev_of_node(&spi->dev) ? dev_of_node(&spi->dev)->name
- : spi_get_device_id(spi)->name;
+ indio_dev->name = fwnode_get_name(dev_fwnode(&spi->dev)) ?: spi_get_device_id(spi)->name;
indio_dev->info = &ltc2632_info;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = chip_info->channels;
@@ -469,7 +469,7 @@ MODULE_DEVICE_TABLE(of, ltc2632_of_match);
static struct spi_driver ltc2632_driver = {
.driver = {
.name = "ltc2632",
- .of_match_table = of_match_ptr(ltc2632_of_match),
+ .of_match_table = ltc2632_of_match,
},
.probe = ltc2632_probe,
.remove = ltc2632_remove,
diff --git a/drivers/iio/dac/ltc2688.c b/drivers/iio/dac/ltc2688.c
index 2f9c384885f4..937b0d25a11c 100644
--- a/drivers/iio/dac/ltc2688.c
+++ b/drivers/iio/dac/ltc2688.c
@@ -703,21 +703,20 @@ static int ltc2688_tgp_clk_setup(struct ltc2688_state *st,
struct ltc2688_chan *chan,
struct fwnode_handle *node, int tgp)
{
+ struct device *dev = &st->spi->dev;
unsigned long rate;
struct clk *clk;
int ret, f;
- clk = devm_get_clk_from_child(&st->spi->dev, to_of_node(node), NULL);
+ clk = devm_get_clk_from_child(dev, to_of_node(node), NULL);
if (IS_ERR(clk))
- return dev_err_probe(&st->spi->dev, PTR_ERR(clk),
- "failed to get tgp clk.\n");
+ return dev_err_probe(dev, PTR_ERR(clk), "failed to get tgp clk.\n");
ret = clk_prepare_enable(clk);
if (ret)
- return dev_err_probe(&st->spi->dev, ret,
- "failed to enable tgp clk.\n");
+ return dev_err_probe(dev, ret, "failed to enable tgp clk.\n");
- ret = devm_add_action_or_reset(&st->spi->dev, ltc2688_clk_disable, clk);
+ ret = devm_add_action_or_reset(dev, ltc2688_clk_disable, clk);
if (ret)
return ret;
@@ -858,6 +857,7 @@ static int ltc2688_channel_config(struct ltc2688_state *st)
static int ltc2688_setup(struct ltc2688_state *st, struct regulator *vref)
{
+ struct device *dev = &st->spi->dev;
struct gpio_desc *gpio;
int ret;
@@ -865,10 +865,9 @@ static int ltc2688_setup(struct ltc2688_state *st, struct regulator *vref)
* If we have a reset pin, use that to reset the board, If not, use
* the reset bit.
*/
- gpio = devm_gpiod_get_optional(&st->spi->dev, "clr", GPIOD_OUT_HIGH);
+ gpio = devm_gpiod_get_optional(dev, "clr", GPIOD_OUT_HIGH);
if (IS_ERR(gpio))
- return dev_err_probe(&st->spi->dev, PTR_ERR(gpio),
- "Failed to get reset gpio");
+ return dev_err_probe(dev, PTR_ERR(gpio), "Failed to get reset gpio");
if (gpio) {
usleep_range(1000, 1200);
/* bring device out of reset */
@@ -887,7 +886,7 @@ static int ltc2688_setup(struct ltc2688_state *st, struct regulator *vref)
* Duplicate the default channel configuration as it can change during
* @ltc2688_channel_config()
*/
- st->iio_chan = devm_kmemdup(&st->spi->dev, ltc2688_channels,
+ st->iio_chan = devm_kmemdup(dev, ltc2688_channels,
sizeof(ltc2688_channels), GFP_KERNEL);
if (!st->iio_chan)
return -ENOMEM;
diff --git a/drivers/iio/dac/max5821.c b/drivers/iio/dac/max5821.c
index fce640b7f1c8..540f9ea7cada 100644
--- a/drivers/iio/dac/max5821.c
+++ b/drivers/iio/dac/max5821.c
@@ -116,7 +116,7 @@ static ssize_t max5821_write_dac_powerdown(struct iio_dev *indio_dev,
bool powerdown;
int ret;
- ret = strtobool(buf, &powerdown);
+ ret = kstrtobool(buf, &powerdown);
if (ret)
return ret;
diff --git a/drivers/iio/dac/mcp4725.c b/drivers/iio/dac/mcp4725.c
index 842bad57cb88..7fcb86288823 100644
--- a/drivers/iio/dac/mcp4725.c
+++ b/drivers/iio/dac/mcp4725.c
@@ -80,7 +80,7 @@ static ssize_t mcp4725_store_eeprom(struct device *dev,
bool state;
int ret;
- ret = strtobool(buf, &state);
+ ret = kstrtobool(buf, &state);
if (ret < 0)
return ret;
@@ -178,7 +178,7 @@ static ssize_t mcp4725_write_powerdown(struct iio_dev *indio_dev,
bool state;
int ret;
- ret = strtobool(buf, &state);
+ ret = kstrtobool(buf, &state);
if (ret)
return ret;
diff --git a/drivers/iio/dac/stm32-dac.c b/drivers/iio/dac/stm32-dac.c
index b20192a071cb..daa42bcbae83 100644
--- a/drivers/iio/dac/stm32-dac.c
+++ b/drivers/iio/dac/stm32-dac.c
@@ -220,7 +220,7 @@ static ssize_t stm32_dac_write_powerdown(struct iio_dev *indio_dev,
bool powerdown;
int ret;
- ret = strtobool(buf, &powerdown);
+ ret = kstrtobool(buf, &powerdown);
if (ret)
return ret;
diff --git a/drivers/iio/dac/ti-dac082s085.c b/drivers/iio/dac/ti-dac082s085.c
index 4e1156e6deb2..106ce3546419 100644
--- a/drivers/iio/dac/ti-dac082s085.c
+++ b/drivers/iio/dac/ti-dac082s085.c
@@ -133,7 +133,7 @@ static ssize_t ti_dac_write_powerdown(struct iio_dev *indio_dev,
bool powerdown;
int ret;
- ret = strtobool(buf, &powerdown);
+ ret = kstrtobool(buf, &powerdown);
if (ret)
return ret;
diff --git a/drivers/iio/dac/ti-dac5571.c b/drivers/iio/dac/ti-dac5571.c
index 0b775f943db3..4b6b04038e94 100644
--- a/drivers/iio/dac/ti-dac5571.c
+++ b/drivers/iio/dac/ti-dac5571.c
@@ -179,7 +179,7 @@ static ssize_t dac5571_write_powerdown(struct iio_dev *indio_dev,
bool powerdown;
int ret;
- ret = strtobool(buf, &powerdown);
+ ret = kstrtobool(buf, &powerdown);
if (ret)
return ret;
diff --git a/drivers/iio/dac/ti-dac7311.c b/drivers/iio/dac/ti-dac7311.c
index e10d17e60ed3..4afc411725d9 100644
--- a/drivers/iio/dac/ti-dac7311.c
+++ b/drivers/iio/dac/ti-dac7311.c
@@ -123,7 +123,7 @@ static ssize_t ti_dac_write_powerdown(struct iio_dev *indio_dev,
u8 power;
int ret;
- ret = strtobool(buf, &powerdown);
+ ret = kstrtobool(buf, &powerdown);
if (ret)
return ret;
diff --git a/drivers/iio/dummy/iio_simple_dummy.c b/drivers/iio/dummy/iio_simple_dummy.c
index c0b7ef900735..c24f609c2ade 100644
--- a/drivers/iio/dummy/iio_simple_dummy.c
+++ b/drivers/iio/dummy/iio_simple_dummy.c
@@ -575,10 +575,9 @@ static struct iio_sw_device *iio_dummy_probe(const char *name)
*/
swd = kzalloc(sizeof(*swd), GFP_KERNEL);
- if (!swd) {
- ret = -ENOMEM;
- goto error_kzalloc;
- }
+ if (!swd)
+ return ERR_PTR(-ENOMEM);
+
/*
* Allocate an IIO device.
*
@@ -590,7 +589,7 @@ static struct iio_sw_device *iio_dummy_probe(const char *name)
indio_dev = iio_device_alloc(parent, sizeof(*st));
if (!indio_dev) {
ret = -ENOMEM;
- goto error_ret;
+ goto error_free_swd;
}
st = iio_priv(indio_dev);
@@ -616,6 +615,10 @@ static struct iio_sw_device *iio_dummy_probe(const char *name)
* indio_dev->name = spi_get_device_id(spi)->name;
*/
indio_dev->name = kstrdup(name, GFP_KERNEL);
+ if (!indio_dev->name) {
+ ret = -ENOMEM;
+ goto error_free_device;
+ }
/* Provide description of available channels */
indio_dev->channels = iio_dummy_channels;
@@ -632,7 +635,7 @@ static struct iio_sw_device *iio_dummy_probe(const char *name)
ret = iio_simple_dummy_events_register(indio_dev);
if (ret < 0)
- goto error_free_device;
+ goto error_free_name;
ret = iio_simple_dummy_configure_buffer(indio_dev);
if (ret < 0)
@@ -649,11 +652,12 @@ error_unconfigure_buffer:
iio_simple_dummy_unconfigure_buffer(indio_dev);
error_unregister_events:
iio_simple_dummy_events_unregister(indio_dev);
+error_free_name:
+ kfree(indio_dev->name);
error_free_device:
iio_device_free(indio_dev);
-error_ret:
+error_free_swd:
kfree(swd);
-error_kzalloc:
return ERR_PTR(ret);
}
diff --git a/drivers/iio/dummy/iio_simple_dummy_buffer.c b/drivers/iio/dummy/iio_simple_dummy_buffer.c
index d81c2b2dad82..9b2f99449a82 100644
--- a/drivers/iio/dummy/iio_simple_dummy_buffer.c
+++ b/drivers/iio/dummy/iio_simple_dummy_buffer.c
@@ -45,41 +45,31 @@ static irqreturn_t iio_simple_dummy_trigger_h(int irq, void *p)
{
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
+ int i = 0, j;
u16 *data;
data = kmalloc(indio_dev->scan_bytes, GFP_KERNEL);
if (!data)
goto done;
- if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength)) {
- /*
- * Three common options here:
- * hardware scans: certain combinations of channels make
- * up a fast read. The capture will consist of all of them.
- * Hence we just call the grab data function and fill the
- * buffer without processing.
- * software scans: can be considered to be random access
- * so efficient reading is just a case of minimal bus
- * transactions.
- * software culled hardware scans:
- * occasionally a driver may process the nearest hardware
- * scan to avoid storing elements that are not desired. This
- * is the fiddliest option by far.
- * Here let's pretend we have random access. And the values are
- * in the constant table fakedata.
- */
- int i, j;
-
- for (i = 0, j = 0;
- i < bitmap_weight(indio_dev->active_scan_mask,
- indio_dev->masklength);
- i++, j++) {
- j = find_next_bit(indio_dev->active_scan_mask,
- indio_dev->masklength, j);
- /* random access read from the 'device' */
- data[i] = fakedata[j];
- }
- }
+ /*
+ * Three common options here:
+ * hardware scans:
+ * certain combinations of channels make up a fast read. The capture
+ * will consist of all of them. Hence we just call the grab data
+ * function and fill the buffer without processing.
+ * software scans:
+ * can be considered to be random access so efficient reading is just
+ * a case of minimal bus transactions.
+ * software culled hardware scans:
+ * occasionally a driver may process the nearest hardware scan to avoid
+ * storing elements that are not desired. This is the fiddliest option
+ * by far.
+ * Here let's pretend we have random access. And the values are in the
+ * constant table fakedata.
+ */
+ for_each_set_bit(j, indio_dev->active_scan_mask, indio_dev->masklength)
+ data[i++] = fakedata[j];
iio_push_to_buffers_with_timestamp(indio_dev, data,
iio_get_time_ns(indio_dev));
diff --git a/drivers/iio/frequency/ad9523.c b/drivers/iio/frequency/ad9523.c
index a0f92c336fc4..942870539268 100644
--- a/drivers/iio/frequency/ad9523.c
+++ b/drivers/iio/frequency/ad9523.c
@@ -516,7 +516,7 @@ static ssize_t ad9523_store(struct device *dev,
bool state;
int ret;
- ret = strtobool(buf, &state);
+ ret = kstrtobool(buf, &state);
if (ret < 0)
return ret;
diff --git a/drivers/iio/gyro/fxas21002c_core.c b/drivers/iio/gyro/fxas21002c_core.c
index 410e5e9f2672..0923fd793492 100644
--- a/drivers/iio/gyro/fxas21002c_core.c
+++ b/drivers/iio/gyro/fxas21002c_core.c
@@ -7,9 +7,9 @@
#include <linux/interrupt.h>
#include <linux/module.h>
-#include <linux/of_irq.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
@@ -822,7 +822,6 @@ static int fxas21002c_trigger_probe(struct fxas21002c_data *data)
{
struct device *dev = regmap_get_device(data->regmap);
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct device_node *np = indio_dev->dev.of_node;
unsigned long irq_trig;
bool irq_open_drain;
int irq1;
@@ -831,8 +830,7 @@ static int fxas21002c_trigger_probe(struct fxas21002c_data *data)
if (!data->irq)
return 0;
- irq1 = of_irq_get_byname(np, "INT1");
-
+ irq1 = fwnode_irq_get_byname(dev_fwnode(dev), "INT1");
if (irq1 == data->irq) {
dev_info(dev, "using interrupt line INT1\n");
ret = regmap_field_write(data->regmap_fields[F_INT_CFG_DRDY],
@@ -843,7 +841,7 @@ static int fxas21002c_trigger_probe(struct fxas21002c_data *data)
dev_info(dev, "using interrupt line INT2\n");
- irq_open_drain = of_property_read_bool(np, "drive-open-drain");
+ irq_open_drain = device_property_read_bool(dev, "drive-open-drain");
data->dready_trig = devm_iio_trigger_alloc(dev, "%s-dev%d",
indio_dev->name,
diff --git a/drivers/iio/gyro/mpu3050-core.c b/drivers/iio/gyro/mpu3050-core.c
index ea387efab62d..4f19dc7ffe57 100644
--- a/drivers/iio/gyro/mpu3050-core.c
+++ b/drivers/iio/gyro/mpu3050-core.c
@@ -26,6 +26,7 @@
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
+#include <linux/property.h>
#include <linux/random.h>
#include <linux/slab.h>
@@ -1050,6 +1051,7 @@ static const struct iio_trigger_ops mpu3050_trigger_ops = {
static int mpu3050_trigger_probe(struct iio_dev *indio_dev, int irq)
{
struct mpu3050 *mpu3050 = iio_priv(indio_dev);
+ struct device *dev = mpu3050->dev;
unsigned long irq_trig;
int ret;
@@ -1061,8 +1063,7 @@ static int mpu3050_trigger_probe(struct iio_dev *indio_dev, int irq)
return -ENOMEM;
/* Check if IRQ is open drain */
- if (of_property_read_bool(mpu3050->dev->of_node, "drive-open-drain"))
- mpu3050->irq_opendrain = true;
+ mpu3050->irq_opendrain = device_property_read_bool(dev, "drive-open-drain");
irq_trig = irqd_get_trigger_type(irq_get_irq_data(irq));
/*
@@ -1118,13 +1119,12 @@ static int mpu3050_trigger_probe(struct iio_dev *indio_dev, int irq)
mpu3050->trig->name,
mpu3050->trig);
if (ret) {
- dev_err(mpu3050->dev,
- "can't get IRQ %d, error %d\n", irq, ret);
+ dev_err(dev, "can't get IRQ %d, error %d\n", irq, ret);
return ret;
}
mpu3050->irq = irq;
- mpu3050->trig->dev.parent = mpu3050->dev;
+ mpu3050->trig->dev.parent = dev;
mpu3050->trig->ops = &mpu3050_trigger_ops;
iio_trigger_set_drvdata(mpu3050->trig, indio_dev);
@@ -1263,7 +1263,7 @@ err_power_down:
}
EXPORT_SYMBOL(mpu3050_common_probe);
-int mpu3050_common_remove(struct device *dev)
+void mpu3050_common_remove(struct device *dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct mpu3050 *mpu3050 = iio_priv(indio_dev);
@@ -1276,8 +1276,6 @@ int mpu3050_common_remove(struct device *dev)
free_irq(mpu3050->irq, mpu3050);
iio_device_unregister(indio_dev);
mpu3050_power_down(mpu3050);
-
- return 0;
}
EXPORT_SYMBOL(mpu3050_common_remove);
diff --git a/drivers/iio/gyro/mpu3050-i2c.c b/drivers/iio/gyro/mpu3050-i2c.c
index ef5bcbc4b45b..5b5f58baaf7f 100644
--- a/drivers/iio/gyro/mpu3050-i2c.c
+++ b/drivers/iio/gyro/mpu3050-i2c.c
@@ -86,7 +86,9 @@ static int mpu3050_i2c_remove(struct i2c_client *client)
if (mpu3050->i2cmux)
i2c_mux_del_adapters(mpu3050->i2cmux);
- return mpu3050_common_remove(&client->dev);
+ mpu3050_common_remove(&client->dev);
+
+ return 0;
}
/*
diff --git a/drivers/iio/gyro/mpu3050.h b/drivers/iio/gyro/mpu3050.h
index 835b0249c376..faf4168a3b07 100644
--- a/drivers/iio/gyro/mpu3050.h
+++ b/drivers/iio/gyro/mpu3050.h
@@ -91,7 +91,7 @@ int mpu3050_common_probe(struct device *dev,
struct regmap *map,
int irq,
const char *name);
-int mpu3050_common_remove(struct device *dev);
+void mpu3050_common_remove(struct device *dev);
/* PM ops */
extern const struct dev_pm_ops mpu3050_dev_pm_ops;
diff --git a/drivers/iio/gyro/ssp_gyro_sensor.c b/drivers/iio/gyro/ssp_gyro_sensor.c
index 5fd1bf9902ea..d332474bc484 100644
--- a/drivers/iio/gyro/ssp_gyro_sensor.c
+++ b/drivers/iio/gyro/ssp_gyro_sensor.c
@@ -113,7 +113,6 @@ static int ssp_gyro_probe(struct platform_device *pdev)
indio_dev->available_scan_masks = ssp_gyro_scan_mask;
ret = devm_iio_kfifo_buffer_setup(&pdev->dev, indio_dev,
- INDIO_BUFFER_SOFTWARE,
&ssp_gyro_buffer_ops);
if (ret)
return ret;
diff --git a/drivers/iio/gyro/st_gyro_core.c b/drivers/iio/gyro/st_gyro_core.c
index 62172e18d0d8..eaa35da42b33 100644
--- a/drivers/iio/gyro/st_gyro_core.c
+++ b/drivers/iio/gyro/st_gyro_core.c
@@ -406,24 +406,17 @@ read_error:
static int st_gyro_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int val, int val2, long mask)
{
- int err;
-
switch (mask) {
case IIO_CHAN_INFO_SCALE:
- err = st_sensors_set_fullscale_by_gain(indio_dev, val2);
- break;
+ return st_sensors_set_fullscale_by_gain(indio_dev, val2);
case IIO_CHAN_INFO_SAMP_FREQ:
if (val2)
return -EINVAL;
- mutex_lock(&indio_dev->mlock);
- err = st_sensors_set_odr(indio_dev, val);
- mutex_unlock(&indio_dev->mlock);
- return err;
+
+ return st_sensors_set_odr(indio_dev, val);
default:
- err = -EINVAL;
+ return -EINVAL;
}
-
- return err;
}
static ST_SENSORS_DEV_ATTR_SAMP_FREQ_AVAIL();
diff --git a/drivers/iio/health/max30100.c b/drivers/iio/health/max30100.c
index 36ba7611d9ce..ad5717965223 100644
--- a/drivers/iio/health/max30100.c
+++ b/drivers/iio/health/max30100.c
@@ -433,7 +433,6 @@ static int max30100_probe(struct i2c_client *client,
indio_dev->modes = INDIO_DIRECT_MODE;
ret = devm_iio_kfifo_buffer_setup(&client->dev, indio_dev,
- INDIO_BUFFER_SOFTWARE,
&max30100_buffer_setup_ops);
if (ret)
return ret;
diff --git a/drivers/iio/health/max30102.c b/drivers/iio/health/max30102.c
index 2292876c55e2..abbcef563807 100644
--- a/drivers/iio/health/max30102.c
+++ b/drivers/iio/health/max30102.c
@@ -542,7 +542,6 @@ static int max30102_probe(struct i2c_client *client,
}
ret = devm_iio_kfifo_buffer_setup(&client->dev, indio_dev,
- INDIO_BUFFER_SOFTWARE,
&max30102_buffer_setup_ops);
if (ret)
return ret;
diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c
index 44bbe3d19907..fe520194a837 100644
--- a/drivers/iio/imu/adis16480.c
+++ b/drivers/iio/imu/adis16480.c
@@ -7,14 +7,16 @@
#include <linux/clk.h>
#include <linux/bitfield.h>
-#include <linux/of_irq.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/math.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/spi/spi.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/lcm.h>
+#include <linux/property.h>
#include <linux/swab.h>
#include <linux/crc32.h>
@@ -1119,6 +1121,7 @@ static irqreturn_t adis16480_trigger_handler(int irq, void *p)
struct iio_dev *indio_dev = pf->indio_dev;
struct adis16480 *st = iio_priv(indio_dev);
struct adis *adis = &st->adis;
+ struct device *dev = &adis->spi->dev;
int ret, bit, offset, i = 0;
__be16 *buffer;
u32 crc;
@@ -1130,7 +1133,7 @@ static irqreturn_t adis16480_trigger_handler(int irq, void *p)
adis->tx[1] = 0;
ret = spi_write(adis->spi, adis->tx, 2);
if (ret) {
- dev_err(&adis->spi->dev, "Failed to change device page: %d\n", ret);
+ dev_err(dev, "Failed to change device page: %d\n", ret);
adis_dev_unlock(adis);
goto irq_done;
}
@@ -1140,7 +1143,7 @@ static irqreturn_t adis16480_trigger_handler(int irq, void *p)
ret = spi_sync(adis->spi, &adis->msg);
if (ret) {
- dev_err(&adis->spi->dev, "Failed to read data: %d\n", ret);
+ dev_err(dev, "Failed to read data: %d\n", ret);
adis_dev_unlock(adis);
goto irq_done;
}
@@ -1168,14 +1171,14 @@ static irqreturn_t adis16480_trigger_handler(int irq, void *p)
}
if (offset == 4) {
- dev_err(&adis->spi->dev, "Invalid burst data\n");
+ dev_err(dev, "Invalid burst data\n");
goto irq_done;
}
crc = be16_to_cpu(buffer[offset + 16]) << 16 | be16_to_cpu(buffer[offset + 15]);
valid = adis16480_validate_crc((u16 *)&buffer[offset], 15, crc);
if (!valid) {
- dev_err(&adis->spi->dev, "Invalid crc\n");
+ dev_err(dev, "Invalid crc\n");
goto irq_done;
}
@@ -1214,12 +1217,12 @@ static const struct iio_info adis16480_info = {
static int adis16480_stop_device(struct iio_dev *indio_dev)
{
struct adis16480 *st = iio_priv(indio_dev);
+ struct device *dev = &st->adis.spi->dev;
int ret;
ret = adis_write_reg_16(&st->adis, ADIS16480_REG_SLP_CNT, BIT(9));
if (ret)
- dev_err(&indio_dev->dev,
- "Could not power down device: %d\n", ret);
+ dev_err(dev, "Could not power down device: %d\n", ret);
return ret;
}
@@ -1239,9 +1242,10 @@ static int adis16480_enable_irq(struct adis *adis, bool enable)
return __adis_write_reg_16(adis, ADIS16480_REG_FNCTIO_CTRL, val);
}
-static int adis16480_config_irq_pin(struct device_node *of_node,
- struct adis16480 *st)
+static int adis16480_config_irq_pin(struct adis16480 *st)
{
+ struct device *dev = &st->adis.spi->dev;
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
struct irq_data *desc;
enum adis16480_int_pin pin;
unsigned int irq_type;
@@ -1250,7 +1254,7 @@ static int adis16480_config_irq_pin(struct device_node *of_node,
desc = irq_get_irq_data(st->adis.spi->irq);
if (!desc) {
- dev_err(&st->adis.spi->dev, "Could not find IRQ %d\n", irq);
+ dev_err(dev, "Could not find IRQ %d\n", irq);
return -EINVAL;
}
@@ -1267,7 +1271,7 @@ static int adis16480_config_irq_pin(struct device_node *of_node,
*/
pin = ADIS16480_PIN_DIO1;
for (i = 0; i < ARRAY_SIZE(adis16480_int_pin_names); i++) {
- irq = of_irq_get_byname(of_node, adis16480_int_pin_names[i]);
+ irq = fwnode_irq_get_byname(fwnode, adis16480_int_pin_names[i]);
if (irq > 0) {
pin = i;
break;
@@ -1287,23 +1291,22 @@ static int adis16480_config_irq_pin(struct device_node *of_node,
} else if (irq_type == IRQ_TYPE_EDGE_FALLING) {
val |= ADIS16480_DRDY_POL(0);
} else {
- dev_err(&st->adis.spi->dev,
- "Invalid interrupt type 0x%x specified\n", irq_type);
+ dev_err(dev, "Invalid interrupt type 0x%x specified\n", irq_type);
return -EINVAL;
}
/* Write the data ready configuration to the FNCTIO_CTRL register */
return adis_write_reg_16(&st->adis, ADIS16480_REG_FNCTIO_CTRL, val);
}
-static int adis16480_of_get_ext_clk_pin(struct adis16480 *st,
- struct device_node *of_node)
+static int adis16480_fw_get_ext_clk_pin(struct adis16480 *st)
{
+ struct device *dev = &st->adis.spi->dev;
const char *ext_clk_pin;
enum adis16480_int_pin pin;
int i;
pin = ADIS16480_PIN_DIO2;
- if (of_property_read_string(of_node, "adi,ext-clk-pin", &ext_clk_pin))
+ if (device_property_read_string(dev, "adi,ext-clk-pin", &ext_clk_pin))
goto clk_input_not_found;
for (i = 0; i < ARRAY_SIZE(adis16480_int_pin_names); i++) {
@@ -1312,15 +1315,13 @@ static int adis16480_of_get_ext_clk_pin(struct adis16480 *st,
}
clk_input_not_found:
- dev_info(&st->adis.spi->dev,
- "clk input line not specified, using DIO2\n");
+ dev_info(dev, "clk input line not specified, using DIO2\n");
return pin;
}
-static int adis16480_ext_clk_config(struct adis16480 *st,
- struct device_node *of_node,
- bool enable)
+static int adis16480_ext_clk_config(struct adis16480 *st, bool enable)
{
+ struct device *dev = &st->adis.spi->dev;
unsigned int mode, mask;
enum adis16480_int_pin pin;
uint16_t val;
@@ -1330,16 +1331,14 @@ static int adis16480_ext_clk_config(struct adis16480 *st,
if (ret)
return ret;
- pin = adis16480_of_get_ext_clk_pin(st, of_node);
+ pin = adis16480_fw_get_ext_clk_pin(st);
/*
* Each DIOx pin supports only one function at a time. When a single pin
* has two assignments, the enable bit for a lower priority function
* automatically resets to zero (disabling the lower priority function).
*/
if (pin == ADIS16480_DRDY_SEL(val))
- dev_warn(&st->adis.spi->dev,
- "DIO%x pin supports only one function at a time\n",
- pin + 1);
+ dev_warn(dev, "DIO%x pin supports only one function at a time\n", pin + 1);
mode = ADIS16480_SYNC_EN(enable) | ADIS16480_SYNC_SEL(pin);
mask = ADIS16480_SYNC_EN_MSK | ADIS16480_SYNC_SEL_MSK;
@@ -1361,31 +1360,27 @@ static int adis16480_ext_clk_config(struct adis16480 *st,
static int adis16480_get_ext_clocks(struct adis16480 *st)
{
- st->clk_mode = ADIS16480_CLK_INT;
- st->ext_clk = devm_clk_get(&st->adis.spi->dev, "sync");
- if (!IS_ERR_OR_NULL(st->ext_clk)) {
+ struct device *dev = &st->adis.spi->dev;
+
+ st->ext_clk = devm_clk_get_optional(dev, "sync");
+ if (IS_ERR(st->ext_clk))
+ return dev_err_probe(dev, PTR_ERR(st->ext_clk), "failed to get ext clk\n");
+ if (st->ext_clk) {
st->clk_mode = ADIS16480_CLK_SYNC;
return 0;
}
- if (PTR_ERR(st->ext_clk) != -ENOENT) {
- dev_err(&st->adis.spi->dev, "failed to get ext clk\n");
- return PTR_ERR(st->ext_clk);
- }
-
if (st->chip_info->has_pps_clk_mode) {
- st->ext_clk = devm_clk_get(&st->adis.spi->dev, "pps");
- if (!IS_ERR_OR_NULL(st->ext_clk)) {
+ st->ext_clk = devm_clk_get_optional(dev, "pps");
+ if (IS_ERR(st->ext_clk))
+ return dev_err_probe(dev, PTR_ERR(st->ext_clk), "failed to get ext clk\n");
+ if (st->ext_clk) {
st->clk_mode = ADIS16480_CLK_PPS;
return 0;
}
-
- if (PTR_ERR(st->ext_clk) != -ENOENT) {
- dev_err(&st->adis.spi->dev, "failed to get ext clk\n");
- return PTR_ERR(st->ext_clk);
- }
}
+ st->clk_mode = ADIS16480_CLK_INT;
return 0;
}
@@ -1404,11 +1399,12 @@ static int adis16480_probe(struct spi_device *spi)
const struct spi_device_id *id = spi_get_device_id(spi);
const struct adis_data *adis16480_data;
irq_handler_t trigger_handler = NULL;
+ struct device *dev = &spi->dev;
struct iio_dev *indio_dev;
struct adis16480 *st;
int ret;
- indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*st));
if (indio_dev == NULL)
return -ENOMEM;
@@ -1432,13 +1428,12 @@ static int adis16480_probe(struct spi_device *spi)
return ret;
if (st->chip_info->has_sleep_cnt) {
- ret = devm_add_action_or_reset(&spi->dev, adis16480_stop,
- indio_dev);
+ ret = devm_add_action_or_reset(dev, adis16480_stop, indio_dev);
if (ret)
return ret;
}
- ret = adis16480_config_irq_pin(spi->dev.of_node, st);
+ ret = adis16480_config_irq_pin(st);
if (ret)
return ret;
@@ -1446,12 +1441,12 @@ static int adis16480_probe(struct spi_device *spi)
if (ret)
return ret;
- if (!IS_ERR_OR_NULL(st->ext_clk)) {
- ret = adis16480_ext_clk_config(st, spi->dev.of_node, true);
+ if (st->ext_clk) {
+ ret = adis16480_ext_clk_config(st, true);
if (ret)
return ret;
- ret = devm_add_action_or_reset(&spi->dev, adis16480_clk_disable, st->ext_clk);
+ ret = devm_add_action_or_reset(dev, adis16480_clk_disable, st->ext_clk);
if (ret)
return ret;
@@ -1484,7 +1479,7 @@ static int adis16480_probe(struct spi_device *spi)
if (ret)
return ret;
- ret = devm_iio_device_register(&spi->dev, indio_dev);
+ ret = devm_iio_device_register(dev, indio_dev);
if (ret)
return ret;
diff --git a/drivers/iio/imu/bmi160/bmi160_core.c b/drivers/iio/imu/bmi160/bmi160_core.c
index 01336105792e..e7aec56ea136 100644
--- a/drivers/iio/imu/bmi160/bmi160_core.c
+++ b/drivers/iio/imu/bmi160/bmi160_core.c
@@ -11,10 +11,9 @@
*/
#include <linux/module.h>
#include <linux/regmap.h>
-#include <linux/acpi.h>
#include <linux/delay.h>
#include <linux/irq.h>
-#include <linux/of_irq.h>
+#include <linux/property.h>
#include <linux/regulator/consumer.h>
#include <linux/iio/iio.h>
@@ -525,17 +524,6 @@ static const struct iio_info bmi160_info = {
.attrs = &bmi160_attrs_group,
};
-static const char *bmi160_match_acpi_device(struct device *dev)
-{
- const struct acpi_device_id *id;
-
- id = acpi_match_device(dev->driver->acpi_match_table, dev);
- if (!id)
- return NULL;
-
- return dev_name(dev);
-}
-
static int bmi160_write_conf_reg(struct regmap *regmap, unsigned int reg,
unsigned int mask, unsigned int bits,
unsigned int write_usleep)
@@ -647,18 +635,18 @@ int bmi160_enable_irq(struct regmap *regmap, bool enable)
}
EXPORT_SYMBOL(bmi160_enable_irq);
-static int bmi160_get_irq(struct device_node *of_node, enum bmi160_int_pin *pin)
+static int bmi160_get_irq(struct fwnode_handle *fwnode, enum bmi160_int_pin *pin)
{
int irq;
/* Use INT1 if possible, otherwise fall back to INT2. */
- irq = of_irq_get_byname(of_node, "INT1");
+ irq = fwnode_irq_get_byname(fwnode, "INT1");
if (irq > 0) {
*pin = BMI160_PIN_INT1;
return irq;
}
- irq = of_irq_get_byname(of_node, "INT2");
+ irq = fwnode_irq_get_byname(fwnode, "INT2");
if (irq > 0)
*pin = BMI160_PIN_INT2;
@@ -688,7 +676,7 @@ static int bmi160_config_device_irq(struct iio_dev *indio_dev, int irq_type,
return -EINVAL;
}
- open_drain = of_property_read_bool(dev->of_node, "drive-open-drain");
+ open_drain = device_property_read_bool(dev, "drive-open-drain");
return bmi160_config_pin(data->regmap, pin, open_drain, irq_mask,
BMI160_NORMAL_WRITE_USLEEP);
@@ -872,9 +860,6 @@ int bmi160_core_probe(struct device *dev, struct regmap *regmap,
if (ret)
return ret;
- if (!name && ACPI_HANDLE(dev))
- name = bmi160_match_acpi_device(dev);
-
indio_dev->channels = bmi160_channels;
indio_dev->num_channels = ARRAY_SIZE(bmi160_channels);
indio_dev->name = name;
@@ -887,7 +872,7 @@ int bmi160_core_probe(struct device *dev, struct regmap *regmap,
if (ret)
return ret;
- irq = bmi160_get_irq(dev->of_node, &int_pin);
+ irq = bmi160_get_irq(dev_fwnode(dev), &int_pin);
if (irq > 0) {
ret = bmi160_setup_irq(indio_dev, irq, int_pin);
if (ret)
diff --git a/drivers/iio/imu/bmi160/bmi160_i2c.c b/drivers/iio/imu/bmi160/bmi160_i2c.c
index 26398614eddf..02f149d37b17 100644
--- a/drivers/iio/imu/bmi160/bmi160_i2c.c
+++ b/drivers/iio/imu/bmi160/bmi160_i2c.c
@@ -8,10 +8,9 @@
* - 0x68 if SDO is pulled to GND
* - 0x69 if SDO is pulled to VDDIO
*/
-#include <linux/acpi.h>
#include <linux/i2c.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of.h>
#include <linux/regmap.h>
#include "bmi160.h"
@@ -20,7 +19,7 @@ static int bmi160_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct regmap *regmap;
- const char *name = NULL;
+ const char *name;
regmap = devm_regmap_init_i2c(client, &bmi160_regmap_config);
if (IS_ERR(regmap)) {
@@ -31,6 +30,8 @@ static int bmi160_i2c_probe(struct i2c_client *client,
if (id)
name = id->name;
+ else
+ name = dev_name(&client->dev);
return bmi160_core_probe(&client->dev, regmap, name, false);
}
@@ -47,19 +48,17 @@ static const struct acpi_device_id bmi160_acpi_match[] = {
};
MODULE_DEVICE_TABLE(acpi, bmi160_acpi_match);
-#ifdef CONFIG_OF
static const struct of_device_id bmi160_of_match[] = {
{ .compatible = "bosch,bmi160" },
{ },
};
MODULE_DEVICE_TABLE(of, bmi160_of_match);
-#endif
static struct i2c_driver bmi160_i2c_driver = {
.driver = {
.name = "bmi160_i2c",
- .acpi_match_table = ACPI_PTR(bmi160_acpi_match),
- .of_match_table = of_match_ptr(bmi160_of_match),
+ .acpi_match_table = bmi160_acpi_match,
+ .of_match_table = bmi160_of_match,
},
.probe = bmi160_i2c_probe,
.id_table = bmi160_i2c_id,
diff --git a/drivers/iio/imu/bmi160/bmi160_spi.c b/drivers/iio/imu/bmi160/bmi160_spi.c
index 61389b41c6d9..24f7d75c7903 100644
--- a/drivers/iio/imu/bmi160/bmi160_spi.c
+++ b/drivers/iio/imu/bmi160/bmi160_spi.c
@@ -5,9 +5,8 @@
* Copyright (c) 2016, Intel Corporation.
*
*/
-#include <linux/acpi.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/spi/spi.h>
@@ -17,6 +16,7 @@ static int bmi160_spi_probe(struct spi_device *spi)
{
struct regmap *regmap;
const struct spi_device_id *id = spi_get_device_id(spi);
+ const char *name;
regmap = devm_regmap_init_spi(spi, &bmi160_regmap_config);
if (IS_ERR(regmap)) {
@@ -24,7 +24,13 @@ static int bmi160_spi_probe(struct spi_device *spi)
regmap);
return PTR_ERR(regmap);
}
- return bmi160_core_probe(&spi->dev, regmap, id->name, true);
+
+ if (id)
+ name = id->name;
+ else
+ name = dev_name(&spi->dev);
+
+ return bmi160_core_probe(&spi->dev, regmap, name, true);
}
static const struct spi_device_id bmi160_spi_id[] = {
@@ -39,20 +45,18 @@ static const struct acpi_device_id bmi160_acpi_match[] = {
};
MODULE_DEVICE_TABLE(acpi, bmi160_acpi_match);
-#ifdef CONFIG_OF
static const struct of_device_id bmi160_of_match[] = {
{ .compatible = "bosch,bmi160" },
{ },
};
MODULE_DEVICE_TABLE(of, bmi160_of_match);
-#endif
static struct spi_driver bmi160_spi_driver = {
.probe = bmi160_spi_probe,
.id_table = bmi160_spi_id,
.driver = {
- .acpi_match_table = ACPI_PTR(bmi160_acpi_match),
- .of_match_table = of_match_ptr(bmi160_of_match),
+ .acpi_match_table = bmi160_acpi_match,
+ .of_match_table = bmi160_of_match,
.name = "bmi160_spi",
},
};
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
index 383cc3250342..c3f433ad3af6 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
@@ -731,7 +731,6 @@ struct iio_dev *inv_icm42600_accel_init(struct inv_icm42600_state *st)
indio_dev->available_scan_masks = inv_icm42600_accel_scan_masks;
ret = devm_iio_kfifo_buffer_setup(dev, indio_dev,
- INDIO_BUFFER_SOFTWARE,
&inv_icm42600_buffer_ops);
if (ret)
return ERR_PTR(ret);
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
index cec1dd0e0464..9d94a8518e3c 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
@@ -743,7 +743,6 @@ struct iio_dev *inv_icm42600_gyro_init(struct inv_icm42600_state *st)
indio_dev->setup_ops = &inv_icm42600_buffer_ops;
ret = devm_iio_kfifo_buffer_setup(dev, indio_dev,
- INDIO_BUFFER_SOFTWARE,
&inv_icm42600_buffer_ops);
if (ret)
return ERR_PTR(ret);
diff --git a/drivers/iio/imu/inv_mpu6050/Kconfig b/drivers/iio/imu/inv_mpu6050/Kconfig
index 9c625517173a..3636b1bc90f1 100644
--- a/drivers/iio/imu/inv_mpu6050/Kconfig
+++ b/drivers/iio/imu/inv_mpu6050/Kconfig
@@ -16,7 +16,7 @@ config INV_MPU6050_I2C
select REGMAP_I2C
help
This driver supports the Invensense MPU6050/9150,
- MPU6500/6515/6880/9250/9255, ICM20608/20609/20689, ICM20602/ICM20690
+ MPU6500/6515/6880/9250/9255, ICM20608(D)/20609/20689, ICM20602/ICM20690
and IAM20680 motion tracking devices over I2C.
This driver can be built as a module. The module will be called
inv-mpu6050-i2c.
@@ -28,7 +28,7 @@ config INV_MPU6050_SPI
select REGMAP_SPI
help
This driver supports the Invensense MPU6000,
- MPU6500/6515/6880/9250/9255, ICM20608/20609/20689, ICM20602/ICM20690
+ MPU6500/6515/6880/9250/9255, ICM20608(D)/20609/20689, ICM20602/ICM20690
and IAM20680 motion tracking devices over SPI.
This driver can be built as a module. The module will be called
inv-mpu6050-spi.
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index 597768c29a72..86fbbe904050 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -218,6 +218,15 @@ static const struct inv_mpu6050_hw hw_info[] = {
.startup_time = {INV_MPU6500_GYRO_STARTUP_TIME, INV_MPU6500_ACCEL_STARTUP_TIME},
},
{
+ .whoami = INV_ICM20608D_WHOAMI_VALUE,
+ .name = "ICM20608D",
+ .reg = &reg_set_6500,
+ .config = &chip_config_6500,
+ .fifo_size = 512,
+ .temp = {INV_ICM20608_TEMP_OFFSET, INV_ICM20608_TEMP_SCALE},
+ .startup_time = {INV_MPU6500_GYRO_STARTUP_TIME, INV_MPU6500_ACCEL_STARTUP_TIME},
+ },
+ {
.whoami = INV_ICM20609_WHOAMI_VALUE,
.name = "ICM20609",
.reg = &reg_set_6500,
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
index 55cffb5fa115..2aa647704a79 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
@@ -29,6 +29,7 @@ static bool inv_mpu_i2c_aux_bus(struct device *dev)
switch (st->chip_type) {
case INV_ICM20608:
+ case INV_ICM20608D:
case INV_ICM20609:
case INV_ICM20689:
case INV_ICM20602:
@@ -182,6 +183,7 @@ static const struct i2c_device_id inv_mpu_id[] = {
{"mpu9250", INV_MPU9250},
{"mpu9255", INV_MPU9255},
{"icm20608", INV_ICM20608},
+ {"icm20608d", INV_ICM20608D},
{"icm20609", INV_ICM20609},
{"icm20689", INV_ICM20689},
{"icm20602", INV_ICM20602},
@@ -226,6 +228,10 @@ static const struct of_device_id inv_of_match[] = {
.data = (void *)INV_ICM20608
},
{
+ .compatible = "invensense,icm20608d",
+ .data = (void *)INV_ICM20608D
+ },
+ {
.compatible = "invensense,icm20609",
.data = (void *)INV_ICM20609
},
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
index c6aa36ee966a..8e14f20b1314 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
@@ -76,6 +76,7 @@ enum inv_devices {
INV_MPU9250,
INV_MPU9255,
INV_ICM20608,
+ INV_ICM20608D,
INV_ICM20609,
INV_ICM20689,
INV_ICM20602,
@@ -394,6 +395,7 @@ struct inv_mpu6050_state {
#define INV_MPU9255_WHOAMI_VALUE 0x73
#define INV_MPU6515_WHOAMI_VALUE 0x74
#define INV_ICM20608_WHOAMI_VALUE 0xAF
+#define INV_ICM20608D_WHOAMI_VALUE 0xAE
#define INV_ICM20609_WHOAMI_VALUE 0xA6
#define INV_ICM20689_WHOAMI_VALUE 0x98
#define INV_ICM20602_WHOAMI_VALUE 0x12
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
index 26a7c2521dc4..e6107b0cc38f 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
@@ -73,6 +73,7 @@ static const struct spi_device_id inv_mpu_id[] = {
{"mpu9250", INV_MPU9250},
{"mpu9255", INV_MPU9255},
{"icm20608", INV_ICM20608},
+ {"icm20608d", INV_ICM20608D},
{"icm20609", INV_ICM20609},
{"icm20689", INV_ICM20689},
{"icm20602", INV_ICM20602},
@@ -113,6 +114,10 @@ static const struct of_device_id inv_of_match[] = {
.data = (void *)INV_ICM20608
},
{
+ .compatible = "invensense,icm20608d",
+ .data = (void *)INV_ICM20608D
+ },
+ {
.compatible = "invensense,icm20609",
.data = (void *)INV_ICM20609
},
diff --git a/drivers/iio/imu/st_lsm6dsx/Kconfig b/drivers/iio/imu/st_lsm6dsx/Kconfig
index 85860217aaf3..fefd0b939100 100644
--- a/drivers/iio/imu/st_lsm6dsx/Kconfig
+++ b/drivers/iio/imu/st_lsm6dsx/Kconfig
@@ -11,9 +11,9 @@ config IIO_ST_LSM6DSX
help
Say yes here to build support for STMicroelectronics LSM6DSx imu
sensor. Supported devices: lsm6ds3, lsm6ds3h, lsm6dsl, lsm6dsm,
- ism330dlc, lsm6dso, lsm6dsox, asm330lhh, lsm6dsr, lsm6ds3tr-c,
- ism330dhcx, lsm6dsrx, lsm6ds0, lsm6dsop, the accelerometer/gyroscope
- of lsm9ds1 and lsm6dst.
+ ism330dlc, lsm6dso, lsm6dsox, asm330lhh, asm330lhhx, lsm6dsr,
+ lsm6ds3tr-c, ism330dhcx, lsm6dsrx, lsm6ds0, lsm6dsop,
+ the accelerometer/gyroscope of lsm9ds1 and lsm6dst.
To compile this driver as a module, choose M here: the module
will be called st_lsm6dsx.
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
index 6ac4eac36458..a86dd29a4738 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
@@ -31,6 +31,7 @@
#define ST_LSM6DSRX_DEV_NAME "lsm6dsrx"
#define ST_LSM6DST_DEV_NAME "lsm6dst"
#define ST_LSM6DSOP_DEV_NAME "lsm6dsop"
+#define ST_ASM330LHHX_DEV_NAME "asm330lhhx"
enum st_lsm6dsx_hw_id {
ST_LSM6DS3_ID,
@@ -49,6 +50,7 @@ enum st_lsm6dsx_hw_id {
ST_LSM6DSRX_ID,
ST_LSM6DST_ID,
ST_LSM6DSOP_ID,
+ ST_ASM330LHHX_ID,
ST_LSM6DSX_MAX_ID,
};
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
index 16730a780964..c7d3730ab1c5 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
@@ -14,7 +14,8 @@
* (e.g. Gx, Gy, Gz, Ax, Ay, Az), then data are repeated depending on the
* value of the decimation factor and ODR set for each FIFO data set.
*
- * LSM6DSO/LSM6DSOX/ASM330LHH/LSM6DSR/LSM6DSRX/ISM330DHCX/LSM6DST/LSM6DSOP:
+ * LSM6DSO/LSM6DSOX/ASM330LHH/ASM330LHHX/LSM6DSR/LSM6DSRX/ISM330DHCX/
+ * LSM6DST/LSM6DSOP:
* The FIFO buffer can be configured to store data from gyroscope and
* accelerometer. Each sample is queued with a tag (1B) indicating data
* source (gyroscope, accelerometer, hw timer).
@@ -746,7 +747,6 @@ int st_lsm6dsx_fifo_setup(struct st_lsm6dsx_hw *hw)
continue;
ret = devm_iio_kfifo_buffer_setup(hw->dev, hw->iio_devs[i],
- INDIO_BUFFER_SOFTWARE,
&st_lsm6dsx_buffer_ops);
if (ret)
return ret;
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
index b1d8d5a66f01..910397716833 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
@@ -26,7 +26,7 @@
* - Gyroscope supported full-scale [dps]: +-125/+-245/+-500/+-1000/+-2000
* - FIFO size: 4KB
*
- * - LSM6DSO/LSM6DSOX/ASM330LHH/LSM6DSR/ISM330DHCX/LSM6DST/LSM6DSOP:
+ * - LSM6DSO/LSM6DSOX/ASM330LHH/ASM330LHHX/LSM6DSR/ISM330DHCX/LSM6DST/LSM6DSOP:
* - Accelerometer/Gyroscope supported ODR [Hz]: 12.5, 26, 52, 104, 208, 416,
* 833
* - Accelerometer supported full-scale [g]: +-2/+-4/+-8/+-16
@@ -786,6 +786,10 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.hw_id = ST_LSM6DST_ID,
.name = ST_LSM6DST_DEV_NAME,
.wai = 0x6d,
+ }, {
+ .hw_id = ST_ASM330LHHX_ID,
+ .name = ST_ASM330LHHX_DEV_NAME,
+ .wai = 0x6b,
},
},
.channels = {
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c
index 8b4fc2c15622..715fbdc8190e 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c
@@ -101,6 +101,10 @@ static const struct of_device_id st_lsm6dsx_i2c_of_match[] = {
.compatible = "st,lsm6dsop",
.data = (void *)ST_LSM6DSOP_ID,
},
+ {
+ .compatible = "st,asm330lhhx",
+ .data = (void *)ST_ASM330LHHX_ID,
+ },
{},
};
MODULE_DEVICE_TABLE(of, st_lsm6dsx_i2c_of_match);
@@ -122,6 +126,7 @@ static const struct i2c_device_id st_lsm6dsx_i2c_id_table[] = {
{ ST_LSM6DSRX_DEV_NAME, ST_LSM6DSRX_ID },
{ ST_LSM6DST_DEV_NAME, ST_LSM6DST_ID },
{ ST_LSM6DSOP_DEV_NAME, ST_LSM6DSOP_ID },
+ { ST_ASM330LHHX_DEV_NAME, ST_ASM330LHHX_ID },
{},
};
MODULE_DEVICE_TABLE(i2c, st_lsm6dsx_i2c_id_table);
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c
index e80110b6b280..f5767cf76c1d 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c
@@ -101,6 +101,10 @@ static const struct of_device_id st_lsm6dsx_spi_of_match[] = {
.compatible = "st,lsm6dsop",
.data = (void *)ST_LSM6DSOP_ID,
},
+ {
+ .compatible = "st,asm330lhhx",
+ .data = (void *)ST_ASM330LHHX_ID,
+ },
{},
};
MODULE_DEVICE_TABLE(of, st_lsm6dsx_spi_of_match);
@@ -122,6 +126,7 @@ static const struct spi_device_id st_lsm6dsx_spi_id_table[] = {
{ ST_LSM6DSRX_DEV_NAME, ST_LSM6DSRX_ID },
{ ST_LSM6DST_DEV_NAME, ST_LSM6DST_ID },
{ ST_LSM6DSOP_DEV_NAME, ST_LSM6DSOP_ID },
+ { ST_ASM330LHHX_DEV_NAME, ST_ASM330LHHX_ID },
{},
};
MODULE_DEVICE_TABLE(spi, st_lsm6dsx_spi_id_table);
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index b078eb2f3c9d..06141ca27e1f 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -510,7 +510,7 @@ static ssize_t iio_scan_el_store(struct device *dev,
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
struct iio_buffer *buffer = this_attr->buffer;
- ret = strtobool(buf, &state);
+ ret = kstrtobool(buf, &state);
if (ret < 0)
return ret;
mutex_lock(&indio_dev->mlock);
@@ -557,7 +557,7 @@ static ssize_t iio_scan_el_ts_store(struct device *dev,
struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
bool state;
- ret = strtobool(buf, &state);
+ ret = kstrtobool(buf, &state);
if (ret < 0)
return ret;
@@ -915,7 +915,7 @@ static int iio_verify_update(struct iio_dev *indio_dev,
if (scan_mask == NULL)
return -EINVAL;
} else {
- scan_mask = compound_mask;
+ scan_mask = compound_mask;
}
config->scan_bytes = iio_compute_scan_bytes(indio_dev,
@@ -1059,13 +1059,13 @@ static int iio_enable_buffers(struct iio_dev *indio_dev,
struct iio_device_config *config)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
- struct iio_buffer *buffer;
+ struct iio_buffer *buffer, *tmp = NULL;
int ret;
indio_dev->active_scan_mask = config->scan_mask;
indio_dev->scan_timestamp = config->scan_timestamp;
indio_dev->scan_bytes = config->scan_bytes;
- indio_dev->currentmode = config->mode;
+ iio_dev_opaque->currentmode = config->mode;
iio_update_demux(indio_dev);
@@ -1097,11 +1097,13 @@ static int iio_enable_buffers(struct iio_dev *indio_dev,
list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
ret = iio_buffer_enable(buffer, indio_dev);
- if (ret)
+ if (ret) {
+ tmp = buffer;
goto err_disable_buffers;
+ }
}
- if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
+ if (iio_dev_opaque->currentmode == INDIO_BUFFER_TRIGGERED) {
ret = iio_trigger_attach_poll_func(indio_dev->trig,
indio_dev->pollfunc);
if (ret)
@@ -1120,11 +1122,12 @@ static int iio_enable_buffers(struct iio_dev *indio_dev,
return 0;
err_detach_pollfunc:
- if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
+ if (iio_dev_opaque->currentmode == INDIO_BUFFER_TRIGGERED) {
iio_trigger_detach_poll_func(indio_dev->trig,
indio_dev->pollfunc);
}
err_disable_buffers:
+ buffer = list_prepare_entry(tmp, &iio_dev_opaque->buffer_list, buffer_list);
list_for_each_entry_continue_reverse(buffer, &iio_dev_opaque->buffer_list,
buffer_list)
iio_buffer_disable(buffer, indio_dev);
@@ -1132,7 +1135,7 @@ err_run_postdisable:
if (indio_dev->setup_ops->postdisable)
indio_dev->setup_ops->postdisable(indio_dev);
err_undo_config:
- indio_dev->currentmode = INDIO_DIRECT_MODE;
+ iio_dev_opaque->currentmode = INDIO_DIRECT_MODE;
indio_dev->active_scan_mask = NULL;
return ret;
@@ -1162,7 +1165,7 @@ static int iio_disable_buffers(struct iio_dev *indio_dev)
ret = ret2;
}
- if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
+ if (iio_dev_opaque->currentmode == INDIO_BUFFER_TRIGGERED) {
iio_trigger_detach_poll_func(indio_dev->trig,
indio_dev->pollfunc);
}
@@ -1181,7 +1184,7 @@ static int iio_disable_buffers(struct iio_dev *indio_dev)
iio_free_scan_mask(indio_dev, indio_dev->active_scan_mask);
indio_dev->active_scan_mask = NULL;
- indio_dev->currentmode = INDIO_DIRECT_MODE;
+ iio_dev_opaque->currentmode = INDIO_DIRECT_MODE;
return ret;
}
@@ -1300,7 +1303,7 @@ static ssize_t iio_buffer_store_enable(struct device *dev,
struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
bool inlist;
- ret = strtobool(buf, &requested_state);
+ ret = kstrtobool(buf, &requested_state);
if (ret < 0)
return ret;
@@ -1629,6 +1632,19 @@ static int __iio_buffer_alloc_sysfs_and_mask(struct iio_buffer *buffer,
if (channels[i].scan_index < 0)
continue;
+ /* Verify that sample bits fit into storage */
+ if (channels[i].scan_type.storagebits <
+ channels[i].scan_type.realbits +
+ channels[i].scan_type.shift) {
+ dev_err(&indio_dev->dev,
+ "Channel %d storagebits (%d) < shifted realbits (%d + %d)\n",
+ i, channels[i].scan_type.storagebits,
+ channels[i].scan_type.realbits,
+ channels[i].scan_type.shift);
+ ret = -EINVAL;
+ goto error_cleanup_dynamic;
+ }
+
ret = iio_buffer_add_channel_sysfs(indio_dev, buffer,
&channels[i]);
if (ret < 0)
@@ -1649,7 +1665,7 @@ static int __iio_buffer_alloc_sysfs_and_mask(struct iio_buffer *buffer,
}
attrn = buffer_attrcount + scan_el_attrcount + ARRAY_SIZE(iio_buffer_attrs);
- attr = kcalloc(attrn + 1, sizeof(* attr), GFP_KERNEL);
+ attr = kcalloc(attrn + 1, sizeof(*attr), GFP_KERNEL);
if (!attr) {
ret = -ENOMEM;
goto error_free_scan_mask;
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index e1ed44dec2ab..adf054c7a75e 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -185,6 +185,20 @@ int iio_device_id(struct iio_dev *indio_dev)
EXPORT_SYMBOL_GPL(iio_device_id);
/**
+ * iio_buffer_enabled() - helper function to test if the buffer is enabled
+ * @indio_dev: IIO device structure for device
+ */
+bool iio_buffer_enabled(struct iio_dev *indio_dev)
+{
+ struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
+
+ return iio_dev_opaque->currentmode
+ & (INDIO_BUFFER_TRIGGERED | INDIO_BUFFER_HARDWARE |
+ INDIO_BUFFER_SOFTWARE);
+}
+EXPORT_SYMBOL_GPL(iio_buffer_enabled);
+
+/**
* iio_sysfs_match_string_with_gaps - matches given string in an array with gaps
* @array: array of strings
* @n: number of strings in the array
@@ -892,8 +906,7 @@ static int __iio_str_to_fixpoint(const char *str, int fract_mult,
} else if (*str == '\n') {
if (*(str + 1) == '\0')
break;
- else
- return -EINVAL;
+ return -EINVAL;
} else if (!strncmp(str, " dB", sizeof(" dB") - 1) && scale_db) {
/* Ignore the dB suffix */
str += sizeof(" dB") - 1;
@@ -1894,20 +1907,22 @@ static const struct iio_buffer_setup_ops noop_ring_setup_ops;
int __iio_device_register(struct iio_dev *indio_dev, struct module *this_mod)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
- const char *label;
+ struct fwnode_handle *fwnode;
int ret;
if (!indio_dev->info)
return -EINVAL;
iio_dev_opaque->driver_module = this_mod;
- /* If the calling driver did not initialize of_node, do it here */
- if (!indio_dev->dev.of_node && indio_dev->dev.parent)
- indio_dev->dev.of_node = indio_dev->dev.parent->of_node;
- label = of_get_property(indio_dev->dev.of_node, "label", NULL);
- if (label)
- indio_dev->label = label;
+ /* If the calling driver did not initialize firmware node, do it here */
+ if (dev_fwnode(&indio_dev->dev))
+ fwnode = dev_fwnode(&indio_dev->dev);
+ else
+ fwnode = dev_fwnode(indio_dev->dev.parent);
+ device_set_node(&indio_dev->dev, fwnode);
+
+ fwnode_property_read_string(fwnode, "label", &indio_dev->label);
ret = iio_check_unique_scan_index(indio_dev);
if (ret < 0)
@@ -2059,6 +2074,19 @@ void iio_device_release_direct_mode(struct iio_dev *indio_dev)
}
EXPORT_SYMBOL_GPL(iio_device_release_direct_mode);
+/**
+ * iio_device_get_current_mode() - helper function providing read-only access to
+ * the opaque @currentmode variable
+ * @indio_dev: IIO device structure for device
+ */
+int iio_device_get_current_mode(struct iio_dev *indio_dev)
+{
+ struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
+
+ return iio_dev_opaque->currentmode;
+}
+EXPORT_SYMBOL_GPL(iio_device_get_current_mode);
+
subsys_initcall(iio_init);
module_exit(iio_exit);
diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c
index ce8b102ce52f..b5e059e15b0a 100644
--- a/drivers/iio/industrialio-event.c
+++ b/drivers/iio/industrialio-event.c
@@ -274,7 +274,7 @@ static ssize_t iio_ev_state_store(struct device *dev,
int ret;
bool val;
- ret = strtobool(buf, &val);
+ ret = kstrtobool(buf, &val);
if (ret < 0)
return ret;
diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c
index f504ed351b3e..585b6cef8fcc 100644
--- a/drivers/iio/industrialio-trigger.c
+++ b/drivers/iio/industrialio-trigger.c
@@ -444,7 +444,7 @@ static ssize_t iio_trigger_write_current(struct device *dev,
int ret;
mutex_lock(&indio_dev->mlock);
- if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
+ if (iio_dev_opaque->currentmode == INDIO_BUFFER_TRIGGERED) {
mutex_unlock(&indio_dev->mlock);
return -EBUSY;
}
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index a62c7b4b8678..8537e88f02e3 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -155,7 +155,6 @@ config CM3323
config CM3605
tristate "Capella CM3605 ambient light and proximity sensor"
- depends on OF
help
Say Y here if you want to build a driver for Capella CM3605
ambient light and short range proximity sensor.
diff --git a/drivers/iio/light/apds9960.c b/drivers/iio/light/apds9960.c
index 4141c0fa7bc4..09b831f9f40b 100644
--- a/drivers/iio/light/apds9960.c
+++ b/drivers/iio/light/apds9960.c
@@ -1003,7 +1003,6 @@ static int apds9960_probe(struct i2c_client *client,
indio_dev->modes = INDIO_DIRECT_MODE;
ret = devm_iio_kfifo_buffer_setup(&client->dev, indio_dev,
- INDIO_BUFFER_SOFTWARE,
&apds9960_buffer_setup_ops);
if (ret)
return ret;
diff --git a/drivers/iio/light/stk3310.c b/drivers/iio/light/stk3310.c
index 1d02dfbc29d1..b578b46276cc 100644
--- a/drivers/iio/light/stk3310.c
+++ b/drivers/iio/light/stk3310.c
@@ -106,6 +106,7 @@ struct stk3310_data {
struct mutex lock;
bool als_enabled;
bool ps_enabled;
+ uint32_t ps_near_level;
u64 timestamp;
struct regmap *regmap;
struct regmap_field *reg_state;
@@ -135,6 +136,25 @@ static const struct iio_event_spec stk3310_events[] = {
},
};
+static ssize_t stk3310_read_near_level(struct iio_dev *indio_dev,
+ uintptr_t priv,
+ const struct iio_chan_spec *chan,
+ char *buf)
+{
+ struct stk3310_data *data = iio_priv(indio_dev);
+
+ return sprintf(buf, "%u\n", data->ps_near_level);
+}
+
+static const struct iio_chan_spec_ext_info stk3310_ext_info[] = {
+ {
+ .name = "nearlevel",
+ .shared = IIO_SEPARATE,
+ .read = stk3310_read_near_level,
+ },
+ { /* sentinel */ }
+};
+
static const struct iio_chan_spec stk3310_channels[] = {
{
.type = IIO_LIGHT,
@@ -151,6 +171,7 @@ static const struct iio_chan_spec stk3310_channels[] = {
BIT(IIO_CHAN_INFO_INT_TIME),
.event_spec = stk3310_events,
.num_event_specs = ARRAY_SIZE(stk3310_events),
+ .ext_info = stk3310_ext_info,
}
};
@@ -581,6 +602,10 @@ static int stk3310_probe(struct i2c_client *client,
data = iio_priv(indio_dev);
data->client = client;
i2c_set_clientdata(client, indio_dev);
+
+ device_property_read_u32(&client->dev, "proximity-near-level",
+ &data->ps_near_level);
+
mutex_init(&data->lock);
ret = stk3310_regmap_init(data);
diff --git a/drivers/iio/light/tsl2772.c b/drivers/iio/light/tsl2772.c
index 729f14d9f2a4..dd9051f1cc1a 100644
--- a/drivers/iio/light/tsl2772.c
+++ b/drivers/iio/light/tsl2772.c
@@ -15,7 +15,9 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/property.h>
#include <linux/slab.h>
+
#include <linux/iio/events.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
@@ -549,10 +551,10 @@ prox_poll_err:
static int tsl2772_read_prox_led_current(struct tsl2772_chip *chip)
{
- struct device_node *of_node = chip->client->dev.of_node;
+ struct device *dev = &chip->client->dev;
int ret, tmp, i;
- ret = of_property_read_u32(of_node, "led-max-microamp", &tmp);
+ ret = device_property_read_u32(dev, "led-max-microamp", &tmp);
if (ret < 0)
return ret;
@@ -563,20 +565,18 @@ static int tsl2772_read_prox_led_current(struct tsl2772_chip *chip)
}
}
- dev_err(&chip->client->dev, "Invalid value %d for led-max-microamp\n",
- tmp);
+ dev_err(dev, "Invalid value %d for led-max-microamp\n", tmp);
return -EINVAL;
-
}
static int tsl2772_read_prox_diodes(struct tsl2772_chip *chip)
{
- struct device_node *of_node = chip->client->dev.of_node;
+ struct device *dev = &chip->client->dev;
int i, ret, num_leds, prox_diode_mask;
u32 leds[TSL2772_MAX_PROX_LEDS];
- ret = of_property_count_u32_elems(of_node, "amstaos,proximity-diodes");
+ ret = device_property_count_u32(dev, "amstaos,proximity-diodes");
if (ret < 0)
return ret;
@@ -584,12 +584,9 @@ static int tsl2772_read_prox_diodes(struct tsl2772_chip *chip)
if (num_leds > TSL2772_MAX_PROX_LEDS)
num_leds = TSL2772_MAX_PROX_LEDS;
- ret = of_property_read_u32_array(of_node, "amstaos,proximity-diodes",
- leds, num_leds);
+ ret = device_property_read_u32_array(dev, "amstaos,proximity-diodes", leds, num_leds);
if (ret < 0) {
- dev_err(&chip->client->dev,
- "Invalid value for amstaos,proximity-diodes: %d.\n",
- ret);
+ dev_err(dev, "Invalid value for amstaos,proximity-diodes: %d.\n", ret);
return ret;
}
@@ -600,9 +597,7 @@ static int tsl2772_read_prox_diodes(struct tsl2772_chip *chip)
else if (leds[i] == 1)
prox_diode_mask |= TSL2772_DIODE1;
else {
- dev_err(&chip->client->dev,
- "Invalid value %d in amstaos,proximity-diodes.\n",
- leds[i]);
+ dev_err(dev, "Invalid value %d in amstaos,proximity-diodes.\n", leds[i]);
return -EINVAL;
}
}
diff --git a/drivers/iio/magnetometer/Kconfig b/drivers/iio/magnetometer/Kconfig
index 54445365c4bc..07eb619bcfe8 100644
--- a/drivers/iio/magnetometer/Kconfig
+++ b/drivers/iio/magnetometer/Kconfig
@@ -9,7 +9,6 @@ menu "Magnetometer sensors"
config AK8974
tristate "Asahi Kasei AK8974 3-Axis Magnetometer"
depends on I2C
- depends on OF
select REGMAP_I2C
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
diff --git a/drivers/iio/magnetometer/rm3100-core.c b/drivers/iio/magnetometer/rm3100-core.c
index 26195733ea3e..707ba25360b8 100644
--- a/drivers/iio/magnetometer/rm3100-core.c
+++ b/drivers/iio/magnetometer/rm3100-core.c
@@ -141,18 +141,10 @@ static irqreturn_t rm3100_irq_handler(int irq, void *d)
struct iio_dev *indio_dev = d;
struct rm3100_data *data = iio_priv(indio_dev);
- switch (indio_dev->currentmode) {
- case INDIO_DIRECT_MODE:
+ if (!iio_buffer_enabled(indio_dev))
complete(&data->measuring_done);
- break;
- case INDIO_BUFFER_TRIGGERED:
+ else
iio_trigger_poll(data->drdy_trig);
- break;
- default:
- dev_err(indio_dev->dev.parent,
- "device mode out of control, current mode: %d",
- indio_dev->currentmode);
- }
return IRQ_WAKE_THREAD;
}
@@ -377,7 +369,7 @@ static int rm3100_set_samp_freq(struct iio_dev *indio_dev, int val, int val2)
goto unlock_return;
}
- if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
+ if (iio_buffer_enabled(indio_dev)) {
/* Writing TMRC registers requires CMM reset. */
ret = regmap_write(regmap, RM3100_REG_CMM, 0);
if (ret < 0)
@@ -553,7 +545,6 @@ int rm3100_common_probe(struct device *dev, struct regmap *regmap, int irq)
indio_dev->channels = rm3100_channels;
indio_dev->num_channels = ARRAY_SIZE(rm3100_channels);
indio_dev->modes = INDIO_DIRECT_MODE | INDIO_BUFFER_TRIGGERED;
- indio_dev->currentmode = INDIO_DIRECT_MODE;
if (!irq)
data->use_interrupt = false;
diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c
index 74435f4a427d..e2fd233b3626 100644
--- a/drivers/iio/magnetometer/st_magn_core.c
+++ b/drivers/iio/magnetometer/st_magn_core.c
@@ -540,24 +540,17 @@ read_error:
static int st_magn_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int val, int val2, long mask)
{
- int err;
-
switch (mask) {
case IIO_CHAN_INFO_SCALE:
- err = st_sensors_set_fullscale_by_gain(indio_dev, val2);
- break;
+ return st_sensors_set_fullscale_by_gain(indio_dev, val2);
case IIO_CHAN_INFO_SAMP_FREQ:
if (val2)
return -EINVAL;
- mutex_lock(&indio_dev->mlock);
- err = st_sensors_set_odr(indio_dev, val);
- mutex_unlock(&indio_dev->mlock);
- return err;
+
+ return st_sensors_set_odr(indio_dev, val);
default:
- err = -EINVAL;
+ return -EINVAL;
}
-
- return err;
}
static ST_SENSORS_DEV_ATTR_SAMP_FREQ_AVAIL();
diff --git a/drivers/iio/multiplexer/Kconfig b/drivers/iio/multiplexer/Kconfig
index a1e1332d1206..928f424a1ed3 100644
--- a/drivers/iio/multiplexer/Kconfig
+++ b/drivers/iio/multiplexer/Kconfig
@@ -9,7 +9,6 @@ menu "Multiplexers"
config IIO_MUX
tristate "IIO multiplexer driver"
select MULTIPLEXER
- depends on OF || COMPILE_TEST
help
Say yes here to build support for the IIO multiplexer.
diff --git a/drivers/iio/multiplexer/iio-mux.c b/drivers/iio/multiplexer/iio-mux.c
index f422d44377df..93558fddfa9b 100644
--- a/drivers/iio/multiplexer/iio-mux.c
+++ b/drivers/iio/multiplexer/iio-mux.c
@@ -10,11 +10,12 @@
#include <linux/err.h>
#include <linux/iio/consumer.h>
#include <linux/iio/iio.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/mux/consumer.h>
-#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
struct mux_ext_info_cache {
char *data;
@@ -324,37 +325,21 @@ static int mux_configure_channel(struct device *dev, struct mux *mux,
return 0;
}
-/*
- * Same as of_property_for_each_string(), but also keeps track of the
- * index of each string.
- */
-#define of_property_for_each_string_index(np, propname, prop, s, i) \
- for (prop = of_find_property(np, propname, NULL), \
- s = of_prop_next_string(prop, NULL), \
- i = 0; \
- s; \
- s = of_prop_next_string(prop, s), \
- i++)
-
static int mux_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct device_node *np = pdev->dev.of_node;
struct iio_dev *indio_dev;
struct iio_channel *parent;
struct mux *mux;
- struct property *prop;
- const char *label;
+ const char **labels;
+ int all_children;
+ int children;
u32 state;
int sizeof_ext_info;
- int children;
int sizeof_priv;
int i;
int ret;
- if (!np)
- return -ENODEV;
-
parent = devm_iio_channel_get(dev, "parent");
if (IS_ERR(parent))
return dev_err_probe(dev, PTR_ERR(parent),
@@ -366,9 +351,21 @@ static int mux_probe(struct platform_device *pdev)
sizeof_ext_info *= sizeof(*mux->ext_info);
}
+ all_children = device_property_string_array_count(dev, "channels");
+ if (all_children < 0)
+ return all_children;
+
+ labels = devm_kmalloc_array(dev, all_children, sizeof(*labels), GFP_KERNEL);
+ if (!labels)
+ return -ENOMEM;
+
+ ret = device_property_read_string_array(dev, "channels", labels, all_children);
+ if (ret < 0)
+ return ret;
+
children = 0;
- of_property_for_each_string(np, "channels", prop, label) {
- if (*label)
+ for (state = 0; state < all_children; state++) {
+ if (*labels[state])
children++;
}
if (children <= 0) {
@@ -395,7 +392,7 @@ static int mux_probe(struct platform_device *pdev)
mux->cached_state = -1;
mux->delay_us = 0;
- of_property_read_u32(np, "settle-time-us", &mux->delay_us);
+ device_property_read_u32(dev, "settle-time-us", &mux->delay_us);
indio_dev->name = dev_name(dev);
indio_dev->info = &mux_info;
@@ -426,11 +423,11 @@ static int mux_probe(struct platform_device *pdev)
}
i = 0;
- of_property_for_each_string_index(np, "channels", prop, label, state) {
- if (!*label)
+ for (state = 0; state < all_children; state++) {
+ if (!*labels[state])
continue;
- ret = mux_configure_channel(dev, mux, state, label, i++);
+ ret = mux_configure_channel(dev, mux, state, labels[state], i++);
if (ret < 0)
return ret;
}
diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c
index 5b93933a2e27..76913a2028d2 100644
--- a/drivers/iio/pressure/st_pressure_core.c
+++ b/drivers/iio/pressure/st_pressure_core.c
@@ -560,16 +560,12 @@ static int st_press_write_raw(struct iio_dev *indio_dev,
int val2,
long mask)
{
- int err;
-
switch (mask) {
case IIO_CHAN_INFO_SAMP_FREQ:
if (val2)
return -EINVAL;
- mutex_lock(&indio_dev->mlock);
- err = st_sensors_set_odr(indio_dev, val);
- mutex_unlock(&indio_dev->mlock);
- return err;
+
+ return st_sensors_set_odr(indio_dev, val);
default:
return -EINVAL;
}
diff --git a/drivers/iio/proximity/mb1232.c b/drivers/iio/proximity/mb1232.c
index ad4b1fb2607a..0bca5f74de68 100644
--- a/drivers/iio/proximity/mb1232.c
+++ b/drivers/iio/proximity/mb1232.c
@@ -10,12 +10,14 @@
* https://www.maxbotix.com/documents/I2CXL-MaxSonar-EZ_Datasheet.pdf
*/
+#include <linux/bitops.h>
#include <linux/err.h>
#include <linux/i2c.h>
-#include <linux/of_irq.h>
#include <linux/delay.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/bitops.h>
+#include <linux/property.h>
+
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/buffer.h>
@@ -209,7 +211,7 @@ static int mb1232_probe(struct i2c_client *client,
init_completion(&data->ranging);
- data->irqnr = irq_of_parse_and_map(dev->of_node, 0);
+ data->irqnr = fwnode_irq_get(dev_fwnode(&client->dev), 0);
if (data->irqnr <= 0) {
/* usage of interrupt is optional */
data->irqnr = -1;
diff --git a/drivers/iio/proximity/ping.c b/drivers/iio/proximity/ping.c
index 24a97d41e115..d56e037378de 100644
--- a/drivers/iio/proximity/ping.c
+++ b/drivers/iio/proximity/ping.c
@@ -29,9 +29,8 @@
#include <linux/err.h>
#include <linux/gpio/consumer.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/sched.h>
@@ -288,7 +287,7 @@ static int ping_probe(struct platform_device *pdev)
data = iio_priv(indio_dev);
data->dev = dev;
- data->cfg = of_device_get_match_data(dev);
+ data->cfg = device_get_match_data(dev);
mutex_init(&data->lock);
init_completion(&data->rising);
diff --git a/drivers/iio/proximity/vl53l0x-i2c.c b/drivers/iio/proximity/vl53l0x-i2c.c
index 661a79ea200d..a284b20529fb 100644
--- a/drivers/iio/proximity/vl53l0x-i2c.c
+++ b/drivers/iio/proximity/vl53l0x-i2c.c
@@ -104,6 +104,7 @@ static int vl53l0x_read_proximity(struct vl53l0x_data *data,
u16 tries = 20;
u8 buffer[12];
int ret;
+ unsigned long time_left;
ret = i2c_smbus_write_byte_data(client, VL_REG_SYSRANGE_START, 1);
if (ret < 0)
@@ -112,10 +113,8 @@ static int vl53l0x_read_proximity(struct vl53l0x_data *data,
if (data->client->irq) {
reinit_completion(&data->completion);
- ret = wait_for_completion_timeout(&data->completion, HZ/10);
- if (ret < 0)
- return ret;
- else if (ret == 0)
+ time_left = wait_for_completion_timeout(&data->completion, HZ/10);
+ if (time_left == 0)
return -ETIMEDOUT;
vl53l0x_clear_irq(data);
diff --git a/drivers/iio/temperature/ltc2983.c b/drivers/iio/temperature/ltc2983.c
index 301c3f13fb26..4fc654275155 100644
--- a/drivers/iio/temperature/ltc2983.c
+++ b/drivers/iio/temperature/ltc2983.c
@@ -12,11 +12,15 @@
#include <linux/iio/iio.h>
#include <linux/interrupt.h>
#include <linux/list.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_gpio.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/spi/spi.h>
+#include <asm/byteorder.h>
+#include <asm/unaligned.h>
+
/* register map */
#define LTC2983_STATUS_REG 0x0000
#define LTC2983_TEMP_RES_START_REG 0x0010
@@ -219,7 +223,7 @@ struct ltc2983_sensor {
struct ltc2983_custom_sensor {
/* raw table sensor data */
- u8 *table;
+ void *table;
size_t size;
/* address offset */
s8 offset;
@@ -377,25 +381,25 @@ static int __ltc2983_chan_custom_sensor_assign(struct ltc2983_data *st,
return regmap_bulk_write(st->regmap, reg, custom->table, custom->size);
}
-static struct ltc2983_custom_sensor *__ltc2983_custom_sensor_new(
- struct ltc2983_data *st,
- const struct device_node *np,
- const char *propname,
- const bool is_steinhart,
- const u32 resolution,
- const bool has_signed)
+static struct ltc2983_custom_sensor *
+__ltc2983_custom_sensor_new(struct ltc2983_data *st, const struct fwnode_handle *fn,
+ const char *propname, const bool is_steinhart,
+ const u32 resolution, const bool has_signed)
{
struct ltc2983_custom_sensor *new_custom;
- u8 index, n_entries, tbl = 0;
struct device *dev = &st->spi->dev;
/*
* For custom steinhart, the full u32 is taken. For all the others
* the MSB is discarded.
*/
const u8 n_size = is_steinhart ? 4 : 3;
- const u8 e_size = is_steinhart ? sizeof(u32) : sizeof(u64);
+ u8 index, n_entries;
+ int ret;
- n_entries = of_property_count_elems_of_size(np, propname, e_size);
+ if (is_steinhart)
+ n_entries = fwnode_property_count_u32(fn, propname);
+ else
+ n_entries = fwnode_property_count_u64(fn, propname);
/* n_entries must be an even number */
if (!n_entries || (n_entries % 2) != 0) {
dev_err(dev, "Number of entries either 0 or not even\n");
@@ -409,8 +413,8 @@ static struct ltc2983_custom_sensor *__ltc2983_custom_sensor_new(
new_custom->size = n_entries * n_size;
/* check Steinhart size */
if (is_steinhart && new_custom->size != LTC2983_CUSTOM_STEINHART_SIZE) {
- dev_err(dev, "Steinhart sensors size(%zu) must be 24",
- new_custom->size);
+ dev_err(dev, "Steinhart sensors size(%zu) must be %u\n", new_custom->size,
+ LTC2983_CUSTOM_STEINHART_SIZE);
return ERR_PTR(-EINVAL);
}
/* Check space on the table. */
@@ -423,21 +427,33 @@ static struct ltc2983_custom_sensor *__ltc2983_custom_sensor_new(
}
/* allocate the table */
- new_custom->table = devm_kzalloc(dev, new_custom->size, GFP_KERNEL);
+ if (is_steinhart)
+ new_custom->table = devm_kcalloc(dev, n_entries, sizeof(u32), GFP_KERNEL);
+ else
+ new_custom->table = devm_kcalloc(dev, n_entries, sizeof(u64), GFP_KERNEL);
if (!new_custom->table)
return ERR_PTR(-ENOMEM);
- for (index = 0; index < n_entries; index++) {
- u64 temp = 0, j;
- /*
- * Steinhart sensors are configured with raw values in the
- * devicetree. For the other sensors we must convert the
- * value to raw. The odd index's correspond to temperarures
- * and always have 1/1024 of resolution. Temperatures also
- * come in kelvin, so signed values is not possible
- */
- if (!is_steinhart) {
- of_property_read_u64_index(np, propname, index, &temp);
+ /*
+ * Steinhart sensors are configured with raw values in the firmware
+ * node. For the other sensors we must convert the value to raw.
+ * The odd index's correspond to temperatures and always have 1/1024
+ * of resolution. Temperatures also come in Kelvin, so signed values
+ * are not possible.
+ */
+ if (is_steinhart) {
+ ret = fwnode_property_read_u32_array(fn, propname, new_custom->table, n_entries);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ cpu_to_be32_array(new_custom->table, new_custom->table, n_entries);
+ } else {
+ ret = fwnode_property_read_u64_array(fn, propname, new_custom->table, n_entries);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ for (index = 0; index < n_entries; index++) {
+ u64 temp = ((u64 *)new_custom->table)[index];
if ((index % 2) != 0)
temp = __convert_to_raw(temp, 1024);
@@ -445,16 +461,9 @@ static struct ltc2983_custom_sensor *__ltc2983_custom_sensor_new(
temp = __convert_to_raw_sign(temp, resolution);
else
temp = __convert_to_raw(temp, resolution);
- } else {
- u32 t32;
- of_property_read_u32_index(np, propname, index, &t32);
- temp = t32;
+ put_unaligned_be24(temp, new_custom->table + index * 3);
}
-
- for (j = 0; j < n_size; j++)
- new_custom->table[tbl++] =
- temp >> (8 * (n_size - j - 1));
}
new_custom->is_steinhart = is_steinhart;
@@ -597,13 +606,12 @@ static int ltc2983_adc_assign_chan(struct ltc2983_data *st,
return __ltc2983_chan_assign_common(st, sensor, chan_val);
}
-static struct ltc2983_sensor *ltc2983_thermocouple_new(
- const struct device_node *child,
- struct ltc2983_data *st,
- const struct ltc2983_sensor *sensor)
+static struct ltc2983_sensor *
+ltc2983_thermocouple_new(const struct fwnode_handle *child, struct ltc2983_data *st,
+ const struct ltc2983_sensor *sensor)
{
struct ltc2983_thermocouple *thermo;
- struct device_node *phandle;
+ struct fwnode_handle *ref;
u32 oc_current;
int ret;
@@ -611,11 +619,10 @@ static struct ltc2983_sensor *ltc2983_thermocouple_new(
if (!thermo)
return ERR_PTR(-ENOMEM);
- if (of_property_read_bool(child, "adi,single-ended"))
+ if (fwnode_property_read_bool(child, "adi,single-ended"))
thermo->sensor_config = LTC2983_THERMOCOUPLE_SGL(1);
- ret = of_property_read_u32(child, "adi,sensor-oc-current-microamp",
- &oc_current);
+ ret = fwnode_property_read_u32(child, "adi,sensor-oc-current-microamp", &oc_current);
if (!ret) {
switch (oc_current) {
case 10:
@@ -651,20 +658,18 @@ static struct ltc2983_sensor *ltc2983_thermocouple_new(
return ERR_PTR(-EINVAL);
}
- phandle = of_parse_phandle(child, "adi,cold-junction-handle", 0);
- if (phandle) {
- int ret;
-
- ret = of_property_read_u32(phandle, "reg",
- &thermo->cold_junction_chan);
+ ref = fwnode_find_reference(child, "adi,cold-junction-handle", 0);
+ if (IS_ERR(ref)) {
+ ref = NULL;
+ } else {
+ ret = fwnode_property_read_u32(ref, "reg", &thermo->cold_junction_chan);
if (ret) {
/*
* This would be catched later but we can just return
* the error right away.
*/
dev_err(&st->spi->dev, "Property reg must be given\n");
- of_node_put(phandle);
- return ERR_PTR(-EINVAL);
+ goto fail;
}
}
@@ -676,8 +681,8 @@ static struct ltc2983_sensor *ltc2983_thermocouple_new(
propname, false,
16384, true);
if (IS_ERR(thermo->custom)) {
- of_node_put(phandle);
- return ERR_CAST(thermo->custom);
+ ret = PTR_ERR(thermo->custom);
+ goto fail;
}
}
@@ -685,37 +690,41 @@ static struct ltc2983_sensor *ltc2983_thermocouple_new(
thermo->sensor.fault_handler = ltc2983_thermocouple_fault_handler;
thermo->sensor.assign_chan = ltc2983_thermocouple_assign_chan;
- of_node_put(phandle);
+ fwnode_handle_put(ref);
return &thermo->sensor;
+
+fail:
+ fwnode_handle_put(ref);
+ return ERR_PTR(ret);
}
-static struct ltc2983_sensor *ltc2983_rtd_new(const struct device_node *child,
- struct ltc2983_data *st,
- const struct ltc2983_sensor *sensor)
+static struct ltc2983_sensor *
+ltc2983_rtd_new(const struct fwnode_handle *child, struct ltc2983_data *st,
+ const struct ltc2983_sensor *sensor)
{
struct ltc2983_rtd *rtd;
int ret = 0;
struct device *dev = &st->spi->dev;
- struct device_node *phandle;
+ struct fwnode_handle *ref;
u32 excitation_current = 0, n_wires = 0;
rtd = devm_kzalloc(dev, sizeof(*rtd), GFP_KERNEL);
if (!rtd)
return ERR_PTR(-ENOMEM);
- phandle = of_parse_phandle(child, "adi,rsense-handle", 0);
- if (!phandle) {
+ ref = fwnode_find_reference(child, "adi,rsense-handle", 0);
+ if (IS_ERR(ref)) {
dev_err(dev, "Property adi,rsense-handle missing or invalid");
- return ERR_PTR(-EINVAL);
+ return ERR_CAST(ref);
}
- ret = of_property_read_u32(phandle, "reg", &rtd->r_sense_chan);
+ ret = fwnode_property_read_u32(ref, "reg", &rtd->r_sense_chan);
if (ret) {
dev_err(dev, "Property reg must be given\n");
goto fail;
}
- ret = of_property_read_u32(child, "adi,number-of-wires", &n_wires);
+ ret = fwnode_property_read_u32(child, "adi,number-of-wires", &n_wires);
if (!ret) {
switch (n_wires) {
case 2:
@@ -738,9 +747,9 @@ static struct ltc2983_sensor *ltc2983_rtd_new(const struct device_node *child,
}
}
- if (of_property_read_bool(child, "adi,rsense-share")) {
+ if (fwnode_property_read_bool(child, "adi,rsense-share")) {
/* Current rotation is only available with rsense sharing */
- if (of_property_read_bool(child, "adi,current-rotate")) {
+ if (fwnode_property_read_bool(child, "adi,current-rotate")) {
if (n_wires == 2 || n_wires == 3) {
dev_err(dev,
"Rotation not allowed for 2/3 Wire RTDs");
@@ -803,8 +812,8 @@ static struct ltc2983_sensor *ltc2983_rtd_new(const struct device_node *child,
"adi,custom-rtd",
false, 2048, false);
if (IS_ERR(rtd->custom)) {
- of_node_put(phandle);
- return ERR_CAST(rtd->custom);
+ ret = PTR_ERR(rtd->custom);
+ goto fail;
}
}
@@ -812,8 +821,8 @@ static struct ltc2983_sensor *ltc2983_rtd_new(const struct device_node *child,
rtd->sensor.fault_handler = ltc2983_common_fault_handler;
rtd->sensor.assign_chan = ltc2983_rtd_assign_chan;
- ret = of_property_read_u32(child, "adi,excitation-current-microamp",
- &excitation_current);
+ ret = fwnode_property_read_u32(child, "adi,excitation-current-microamp",
+ &excitation_current);
if (ret) {
/* default to 5uA */
rtd->excitation_current = 1;
@@ -852,23 +861,22 @@ static struct ltc2983_sensor *ltc2983_rtd_new(const struct device_node *child,
}
}
- of_property_read_u32(child, "adi,rtd-curve", &rtd->rtd_curve);
+ fwnode_property_read_u32(child, "adi,rtd-curve", &rtd->rtd_curve);
- of_node_put(phandle);
+ fwnode_handle_put(ref);
return &rtd->sensor;
fail:
- of_node_put(phandle);
+ fwnode_handle_put(ref);
return ERR_PTR(ret);
}
-static struct ltc2983_sensor *ltc2983_thermistor_new(
- const struct device_node *child,
- struct ltc2983_data *st,
- const struct ltc2983_sensor *sensor)
+static struct ltc2983_sensor *
+ltc2983_thermistor_new(const struct fwnode_handle *child, struct ltc2983_data *st,
+ const struct ltc2983_sensor *sensor)
{
struct ltc2983_thermistor *thermistor;
struct device *dev = &st->spi->dev;
- struct device_node *phandle;
+ struct fwnode_handle *ref;
u32 excitation_current = 0;
int ret = 0;
@@ -876,23 +884,23 @@ static struct ltc2983_sensor *ltc2983_thermistor_new(
if (!thermistor)
return ERR_PTR(-ENOMEM);
- phandle = of_parse_phandle(child, "adi,rsense-handle", 0);
- if (!phandle) {
+ ref = fwnode_find_reference(child, "adi,rsense-handle", 0);
+ if (IS_ERR(ref)) {
dev_err(dev, "Property adi,rsense-handle missing or invalid");
- return ERR_PTR(-EINVAL);
+ return ERR_CAST(ref);
}
- ret = of_property_read_u32(phandle, "reg", &thermistor->r_sense_chan);
+ ret = fwnode_property_read_u32(ref, "reg", &thermistor->r_sense_chan);
if (ret) {
dev_err(dev, "rsense channel must be configured...\n");
goto fail;
}
- if (of_property_read_bool(child, "adi,single-ended")) {
+ if (fwnode_property_read_bool(child, "adi,single-ended")) {
thermistor->sensor_config = LTC2983_THERMISTOR_SGL(1);
- } else if (of_property_read_bool(child, "adi,rsense-share")) {
+ } else if (fwnode_property_read_bool(child, "adi,rsense-share")) {
/* rotation is only possible if sharing rsense */
- if (of_property_read_bool(child, "adi,current-rotate"))
+ if (fwnode_property_read_bool(child, "adi,current-rotate"))
thermistor->sensor_config =
LTC2983_THERMISTOR_C_ROTATE(1);
else
@@ -926,16 +934,16 @@ static struct ltc2983_sensor *ltc2983_thermistor_new(
steinhart,
64, false);
if (IS_ERR(thermistor->custom)) {
- of_node_put(phandle);
- return ERR_CAST(thermistor->custom);
+ ret = PTR_ERR(thermistor->custom);
+ goto fail;
}
}
/* set common parameters */
thermistor->sensor.fault_handler = ltc2983_common_fault_handler;
thermistor->sensor.assign_chan = ltc2983_thermistor_assign_chan;
- ret = of_property_read_u32(child, "adi,excitation-current-nanoamp",
- &excitation_current);
+ ret = fwnode_property_read_u32(child, "adi,excitation-current-nanoamp",
+ &excitation_current);
if (ret) {
/* Auto range is not allowed for custom sensors */
if (sensor->type >= LTC2983_SENSOR_THERMISTOR_STEINHART)
@@ -999,17 +1007,16 @@ static struct ltc2983_sensor *ltc2983_thermistor_new(
}
}
- of_node_put(phandle);
+ fwnode_handle_put(ref);
return &thermistor->sensor;
fail:
- of_node_put(phandle);
+ fwnode_handle_put(ref);
return ERR_PTR(ret);
}
-static struct ltc2983_sensor *ltc2983_diode_new(
- const struct device_node *child,
- const struct ltc2983_data *st,
- const struct ltc2983_sensor *sensor)
+static struct ltc2983_sensor *
+ltc2983_diode_new(const struct fwnode_handle *child, const struct ltc2983_data *st,
+ const struct ltc2983_sensor *sensor)
{
struct ltc2983_diode *diode;
u32 temp = 0, excitation_current = 0;
@@ -1019,13 +1026,13 @@ static struct ltc2983_sensor *ltc2983_diode_new(
if (!diode)
return ERR_PTR(-ENOMEM);
- if (of_property_read_bool(child, "adi,single-ended"))
+ if (fwnode_property_read_bool(child, "adi,single-ended"))
diode->sensor_config = LTC2983_DIODE_SGL(1);
- if (of_property_read_bool(child, "adi,three-conversion-cycles"))
+ if (fwnode_property_read_bool(child, "adi,three-conversion-cycles"))
diode->sensor_config |= LTC2983_DIODE_3_CONV_CYCLE(1);
- if (of_property_read_bool(child, "adi,average-on"))
+ if (fwnode_property_read_bool(child, "adi,average-on"))
diode->sensor_config |= LTC2983_DIODE_AVERAGE_ON(1);
/* validate channel index */
@@ -1040,8 +1047,8 @@ static struct ltc2983_sensor *ltc2983_diode_new(
diode->sensor.fault_handler = ltc2983_common_fault_handler;
diode->sensor.assign_chan = ltc2983_diode_assign_chan;
- ret = of_property_read_u32(child, "adi,excitation-current-microamp",
- &excitation_current);
+ ret = fwnode_property_read_u32(child, "adi,excitation-current-microamp",
+ &excitation_current);
if (!ret) {
switch (excitation_current) {
case 10:
@@ -1064,7 +1071,7 @@ static struct ltc2983_sensor *ltc2983_diode_new(
}
}
- of_property_read_u32(child, "adi,ideal-factor-value", &temp);
+ fwnode_property_read_u32(child, "adi,ideal-factor-value", &temp);
/* 2^20 resolution */
diode->ideal_factor_value = __convert_to_raw(temp, 1048576);
@@ -1072,7 +1079,7 @@ static struct ltc2983_sensor *ltc2983_diode_new(
return &diode->sensor;
}
-static struct ltc2983_sensor *ltc2983_r_sense_new(struct device_node *child,
+static struct ltc2983_sensor *ltc2983_r_sense_new(struct fwnode_handle *child,
struct ltc2983_data *st,
const struct ltc2983_sensor *sensor)
{
@@ -1091,7 +1098,7 @@ static struct ltc2983_sensor *ltc2983_r_sense_new(struct device_node *child,
return ERR_PTR(-EINVAL);
}
- ret = of_property_read_u32(child, "adi,rsense-val-milli-ohms", &temp);
+ ret = fwnode_property_read_u32(child, "adi,rsense-val-milli-ohms", &temp);
if (ret) {
dev_err(&st->spi->dev, "Property adi,rsense-val-milli-ohms missing\n");
return ERR_PTR(-EINVAL);
@@ -1110,7 +1117,7 @@ static struct ltc2983_sensor *ltc2983_r_sense_new(struct device_node *child,
return &rsense->sensor;
}
-static struct ltc2983_sensor *ltc2983_adc_new(struct device_node *child,
+static struct ltc2983_sensor *ltc2983_adc_new(struct fwnode_handle *child,
struct ltc2983_data *st,
const struct ltc2983_sensor *sensor)
{
@@ -1120,7 +1127,7 @@ static struct ltc2983_sensor *ltc2983_adc_new(struct device_node *child,
if (!adc)
return ERR_PTR(-ENOMEM);
- if (of_property_read_bool(child, "adi,single-ended"))
+ if (fwnode_property_read_bool(child, "adi,single-ended"))
adc->single_ended = true;
if (!adc->single_ended &&
@@ -1264,17 +1271,15 @@ static irqreturn_t ltc2983_irq_handler(int irq, void *data)
static int ltc2983_parse_dt(struct ltc2983_data *st)
{
- struct device_node *child;
struct device *dev = &st->spi->dev;
+ struct fwnode_handle *child;
int ret = 0, chan = 0, channel_avail_mask = 0;
- of_property_read_u32(dev->of_node, "adi,mux-delay-config-us",
- &st->mux_delay_config);
+ device_property_read_u32(dev, "adi,mux-delay-config-us", &st->mux_delay_config);
- of_property_read_u32(dev->of_node, "adi,filter-notch-freq",
- &st->filter_notch_freq);
+ device_property_read_u32(dev, "adi,filter-notch-freq", &st->filter_notch_freq);
- st->num_channels = of_get_available_child_count(dev->of_node);
+ st->num_channels = device_get_child_node_count(dev);
if (!st->num_channels) {
dev_err(&st->spi->dev, "At least one channel must be given!");
return -EINVAL;
@@ -1286,10 +1291,10 @@ static int ltc2983_parse_dt(struct ltc2983_data *st)
return -ENOMEM;
st->iio_channels = st->num_channels;
- for_each_available_child_of_node(dev->of_node, child) {
+ device_for_each_child_node(dev, child) {
struct ltc2983_sensor sensor;
- ret = of_property_read_u32(child, "reg", &sensor.chan);
+ ret = fwnode_property_read_u32(child, "reg", &sensor.chan);
if (ret) {
dev_err(dev, "reg property must given for child nodes\n");
goto put_child;
@@ -1299,8 +1304,8 @@ static int ltc2983_parse_dt(struct ltc2983_data *st)
if (sensor.chan < LTC2983_MIN_CHANNELS_NR ||
sensor.chan > LTC2983_MAX_CHANNELS_NR) {
ret = -EINVAL;
- dev_err(dev,
- "chan:%d must be from 1 to 20\n", sensor.chan);
+ dev_err(dev, "chan:%d must be from %u to %u\n", sensor.chan,
+ LTC2983_MIN_CHANNELS_NR, LTC2983_MAX_CHANNELS_NR);
goto put_child;
} else if (channel_avail_mask & BIT(sensor.chan)) {
ret = -EINVAL;
@@ -1308,8 +1313,7 @@ static int ltc2983_parse_dt(struct ltc2983_data *st)
goto put_child;
}
- ret = of_property_read_u32(child, "adi,sensor-type",
- &sensor.type);
+ ret = fwnode_property_read_u32(child, "adi,sensor-type", &sensor.type);
if (ret) {
dev_err(dev,
"adi,sensor-type property must given for child nodes\n");
@@ -1363,7 +1367,7 @@ static int ltc2983_parse_dt(struct ltc2983_data *st)
return 0;
put_child:
- of_node_put(child);
+ fwnode_handle_put(child);
return ret;
}
diff --git a/drivers/iio/temperature/max31856.c b/drivers/iio/temperature/max31856.c
index 54840881259a..8307aae2cb45 100644
--- a/drivers/iio/temperature/max31856.c
+++ b/drivers/iio/temperature/max31856.c
@@ -7,9 +7,11 @@
*/
#include <linux/ctype.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/err.h>
+#include <linux/property.h>
#include <linux/spi/spi.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
@@ -422,9 +424,7 @@ static int max31856_probe(struct spi_device *spi)
indio_dev->channels = max31856_channels;
indio_dev->num_channels = ARRAY_SIZE(max31856_channels);
- ret = of_property_read_u32(spi->dev.of_node, "thermocouple-type",
- &data->thermocouple_type);
-
+ ret = device_property_read_u32(&spi->dev, "thermocouple-type", &data->thermocouple_type);
if (ret) {
dev_info(&spi->dev,
"Could not read thermocouple type DT property, configuring as a K-Type\n");
diff --git a/drivers/iio/temperature/max31865.c b/drivers/iio/temperature/max31865.c
index 86c3f3509a26..e3bb78184c6e 100644
--- a/drivers/iio/temperature/max31865.c
+++ b/drivers/iio/temperature/max31865.c
@@ -12,9 +12,11 @@
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/init.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
+#include <linux/property.h>
#include <linux/spi/spi.h>
#include <asm/unaligned.h>
@@ -305,7 +307,7 @@ static int max31865_probe(struct spi_device *spi)
indio_dev->channels = max31865_channels;
indio_dev->num_channels = ARRAY_SIZE(max31865_channels);
- if (of_property_read_bool(spi->dev.of_node, "maxim,3-wire")) {
+ if (device_property_read_bool(&spi->dev, "maxim,3-wire")) {
/* select 3 wire */
data->three_wire = 1;
} else {
diff --git a/drivers/iio/trigger/iio-trig-sysfs.c b/drivers/iio/trigger/iio-trig-sysfs.c
index 2a4b75897910..f1a8704e6cc1 100644
--- a/drivers/iio/trigger/iio-trig-sysfs.c
+++ b/drivers/iio/trigger/iio-trig-sysfs.c
@@ -176,16 +176,15 @@ out1:
static int iio_sysfs_trigger_remove(int id)
{
- bool foundit = false;
- struct iio_sysfs_trig *t;
+ struct iio_sysfs_trig *t = NULL, *iter;
mutex_lock(&iio_sysfs_trig_list_mut);
- list_for_each_entry(t, &iio_sysfs_trig_list, l)
- if (id == t->id) {
- foundit = true;
+ list_for_each_entry(iter, &iio_sysfs_trig_list, l)
+ if (id == iter->id) {
+ t = iter;
break;
}
- if (!foundit) {
+ if (!t) {
mutex_unlock(&iio_sysfs_trig_list_mut);
return -EINVAL;
}
diff --git a/drivers/input/input.c b/drivers/input/input.c
index e5a668ce884d..1365c9dfb5f2 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -1793,8 +1793,6 @@ EXPORT_SYMBOL(input_reset_device);
static int input_inhibit_device(struct input_dev *dev)
{
- int ret = 0;
-
mutex_lock(&dev->mutex);
if (dev->inhibited)
@@ -1816,7 +1814,7 @@ static int input_inhibit_device(struct input_dev *dev)
out:
mutex_unlock(&dev->mutex);
- return ret;
+ return 0;
}
static int input_uninhibit_device(struct input_dev *dev)
diff --git a/drivers/input/joystick/Kconfig b/drivers/input/joystick/Kconfig
index 3b23078bc7b5..505a032e2786 100644
--- a/drivers/input/joystick/Kconfig
+++ b/drivers/input/joystick/Kconfig
@@ -399,4 +399,15 @@ config JOYSTICK_N64
Say Y here if you want enable support for the four
built-in controller ports on the Nintendo 64 console.
+config JOYSTICK_SENSEHAT
+ tristate "Raspberry Pi Sense HAT joystick"
+ depends on INPUT && I2C
+ select MFD_SIMPLE_MFD_I2C
+ help
+ Say Y here if you want to enable the driver for the
+ the Raspberry Pi Sense HAT.
+
+ To compile this driver as a module, choose M here: the
+ module will be called sensehat_joystick.
+
endif
diff --git a/drivers/input/joystick/Makefile b/drivers/input/joystick/Makefile
index 5174b8aba2dd..3937535f0098 100644
--- a/drivers/input/joystick/Makefile
+++ b/drivers/input/joystick/Makefile
@@ -28,6 +28,7 @@ obj-$(CONFIG_JOYSTICK_N64) += n64joy.o
obj-$(CONFIG_JOYSTICK_PSXPAD_SPI) += psxpad-spi.o
obj-$(CONFIG_JOYSTICK_PXRC) += pxrc.o
obj-$(CONFIG_JOYSTICK_QWIIC) += qwiic-joystick.o
+obj-$(CONFIG_JOYSTICK_SENSEHAT) += sensehat-joystick.o
obj-$(CONFIG_JOYSTICK_SIDEWINDER) += sidewinder.o
obj-$(CONFIG_JOYSTICK_SPACEBALL) += spaceball.o
obj-$(CONFIG_JOYSTICK_SPACEORB) += spaceorb.o
diff --git a/drivers/input/joystick/sensehat-joystick.c b/drivers/input/joystick/sensehat-joystick.c
new file mode 100644
index 000000000000..5ad1fe4ff496
--- /dev/null
+++ b/drivers/input/joystick/sensehat-joystick.c
@@ -0,0 +1,137 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Raspberry Pi Sense HAT joystick driver
+ * http://raspberrypi.org
+ *
+ * Copyright (C) 2015 Raspberry Pi
+ * Copyright (C) 2021 Charles Mirabile, Mwesigwa Guma, Joel Savitz
+ *
+ * Original Author: Serge Schneider
+ * Revised for upstream Linux by: Charles Mirabile, Mwesigwa Guma, Joel Savitz
+ */
+
+#include <linux/module.h>
+#include <linux/input.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/property.h>
+
+#define JOYSTICK_SMB_REG 0xf2
+
+struct sensehat_joystick {
+ struct platform_device *pdev;
+ struct input_dev *keys_dev;
+ unsigned long prev_states;
+ struct regmap *regmap;
+};
+
+static const unsigned int keymap[] = {
+ BTN_DPAD_DOWN, BTN_DPAD_RIGHT, BTN_DPAD_UP, BTN_SELECT, BTN_DPAD_LEFT,
+};
+
+static irqreturn_t sensehat_joystick_report(int irq, void *cookie)
+{
+ struct sensehat_joystick *sensehat_joystick = cookie;
+ unsigned long curr_states, changes;
+ unsigned int keys;
+ int error;
+ int i;
+
+ error = regmap_read(sensehat_joystick->regmap, JOYSTICK_SMB_REG, &keys);
+ if (error < 0) {
+ dev_err(&sensehat_joystick->pdev->dev,
+ "Failed to read joystick state: %d", error);
+ return IRQ_NONE;
+ }
+ curr_states = keys;
+ bitmap_xor(&changes, &curr_states, &sensehat_joystick->prev_states,
+ ARRAY_SIZE(keymap));
+
+ for_each_set_bit(i, &changes, ARRAY_SIZE(keymap))
+ input_report_key(sensehat_joystick->keys_dev, keymap[i],
+ curr_states & BIT(i));
+
+ input_sync(sensehat_joystick->keys_dev);
+ sensehat_joystick->prev_states = keys;
+ return IRQ_HANDLED;
+}
+
+static int sensehat_joystick_probe(struct platform_device *pdev)
+{
+ struct sensehat_joystick *sensehat_joystick;
+ int error, i, irq;
+
+ sensehat_joystick = devm_kzalloc(&pdev->dev, sizeof(*sensehat_joystick),
+ GFP_KERNEL);
+ if (!sensehat_joystick)
+ return -ENOMEM;
+
+ sensehat_joystick->pdev = pdev;
+
+ sensehat_joystick->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!sensehat_joystick->regmap) {
+ dev_err(&pdev->dev, "unable to get sensehat regmap");
+ return -ENODEV;
+ }
+
+ sensehat_joystick->keys_dev = devm_input_allocate_device(&pdev->dev);
+ if (!sensehat_joystick->keys_dev) {
+ dev_err(&pdev->dev, "Could not allocate input device");
+ return -ENOMEM;
+ }
+
+ sensehat_joystick->keys_dev->name = "Raspberry Pi Sense HAT Joystick";
+ sensehat_joystick->keys_dev->phys = "sensehat-joystick/input0";
+ sensehat_joystick->keys_dev->id.bustype = BUS_I2C;
+
+ __set_bit(EV_KEY, sensehat_joystick->keys_dev->evbit);
+ __set_bit(EV_REP, sensehat_joystick->keys_dev->evbit);
+ for (i = 0; i < ARRAY_SIZE(keymap); i++)
+ __set_bit(keymap[i], sensehat_joystick->keys_dev->keybit);
+
+ error = input_register_device(sensehat_joystick->keys_dev);
+ if (error) {
+ dev_err(&pdev->dev, "Could not register input device");
+ return error;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "Could not retrieve interrupt request");
+ return irq;
+ }
+
+ error = devm_request_threaded_irq(&pdev->dev, irq,
+ NULL, sensehat_joystick_report,
+ IRQF_ONESHOT, "keys",
+ sensehat_joystick);
+ if (error) {
+ dev_err(&pdev->dev, "IRQ request failed");
+ return error;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id sensehat_joystick_device_id[] = {
+ { .compatible = "raspberrypi,sensehat-joystick" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, sensehat_joystick_device_id);
+
+static struct platform_driver sensehat_joystick_driver = {
+ .probe = sensehat_joystick_probe,
+ .driver = {
+ .name = "sensehat-joystick",
+ .of_match_table = sensehat_joystick_device_id,
+ },
+};
+
+module_platform_driver(sensehat_joystick_driver);
+
+MODULE_DESCRIPTION("Raspberry Pi Sense HAT joystick driver");
+MODULE_AUTHOR("Charles Mirabile <cmirabil@redhat.com>");
+MODULE_AUTHOR("Serge Schneider <serge@raspberrypi.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/keyboard/bcm-keypad.c b/drivers/input/keyboard/bcm-keypad.c
index 2b771c3a5578..166d6023a538 100644
--- a/drivers/input/keyboard/bcm-keypad.c
+++ b/drivers/input/keyboard/bcm-keypad.c
@@ -183,8 +183,7 @@ static void bcm_kp_stop(const struct bcm_kp *kp)
writel(0xFFFFFFFF, kp->base + KPICR0_OFFSET);
writel(0xFFFFFFFF, kp->base + KPICR1_OFFSET);
- if (kp->clk)
- clk_disable_unprepare(kp->clk);
+ clk_disable_unprepare(kp->clk);
}
static int bcm_kp_open(struct input_dev *dev)
diff --git a/drivers/input/keyboard/clps711x-keypad.c b/drivers/input/keyboard/clps711x-keypad.c
index 019dd6ed2c29..939c88655fc0 100644
--- a/drivers/input/keyboard/clps711x-keypad.c
+++ b/drivers/input/keyboard/clps711x-keypad.c
@@ -95,8 +95,7 @@ static int clps711x_keypad_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- priv->syscon =
- syscon_regmap_lookup_by_compatible("cirrus,ep7209-syscon1");
+ priv->syscon = syscon_regmap_lookup_by_phandle(np, "syscon");
if (IS_ERR(priv->syscon))
return PTR_ERR(priv->syscon);
diff --git a/drivers/input/keyboard/cros_ec_keyb.c b/drivers/input/keyboard/cros_ec_keyb.c
index 6534dfca60b4..cc73a149da28 100644
--- a/drivers/input/keyboard/cros_ec_keyb.c
+++ b/drivers/input/keyboard/cros_ec_keyb.c
@@ -435,10 +435,13 @@ static __maybe_unused int cros_ec_keyb_resume(struct device *dev)
* but the ckdev->bs_idev will remain NULL when this function exits.
*
* @ckdev: The keyboard device
+ * @expect_buttons_switches: Indicates that EC must report button and/or
+ * switch events
*
* Returns 0 if no error or -error upon error.
*/
-static int cros_ec_keyb_register_bs(struct cros_ec_keyb *ckdev)
+static int cros_ec_keyb_register_bs(struct cros_ec_keyb *ckdev,
+ bool expect_buttons_switches)
{
struct cros_ec_device *ec_dev = ckdev->ec;
struct device *dev = ckdev->dev;
@@ -465,7 +468,7 @@ static int cros_ec_keyb_register_bs(struct cros_ec_keyb *ckdev)
switches = get_unaligned_le32(&event_data.switches);
if (!buttons && !switches)
- return 0;
+ return expect_buttons_switches ? -EINVAL : 0;
/*
* We call the non-matrix buttons/switches 'input1', if present.
@@ -516,7 +519,7 @@ static int cros_ec_keyb_register_bs(struct cros_ec_keyb *ckdev)
}
/**
- * cros_ec_keyb_register_bs - Register matrix keys
+ * cros_ec_keyb_register_matrix - Register matrix keys
*
* Handles all the bits of the keyboard driver related to matrix keys.
*
@@ -648,12 +651,12 @@ static const struct attribute_group cros_ec_keyb_attr_group = {
.attrs = cros_ec_keyb_attrs,
};
-
static int cros_ec_keyb_probe(struct platform_device *pdev)
{
struct cros_ec_device *ec = dev_get_drvdata(pdev->dev.parent);
struct device *dev = &pdev->dev;
struct cros_ec_keyb *ckdev;
+ bool buttons_switches_only = device_get_match_data(dev);
int err;
if (!dev->of_node)
@@ -667,13 +670,16 @@ static int cros_ec_keyb_probe(struct platform_device *pdev)
ckdev->dev = dev;
dev_set_drvdata(dev, ckdev);
- err = cros_ec_keyb_register_matrix(ckdev);
- if (err) {
- dev_err(dev, "cannot register matrix inputs: %d\n", err);
- return err;
+ if (!buttons_switches_only) {
+ err = cros_ec_keyb_register_matrix(ckdev);
+ if (err) {
+ dev_err(dev, "cannot register matrix inputs: %d\n",
+ err);
+ return err;
+ }
}
- err = cros_ec_keyb_register_bs(ckdev);
+ err = cros_ec_keyb_register_bs(ckdev, buttons_switches_only);
if (err) {
dev_err(dev, "cannot register non-matrix inputs: %d\n", err);
return err;
@@ -681,7 +687,7 @@ static int cros_ec_keyb_probe(struct platform_device *pdev)
err = devm_device_add_group(dev, &cros_ec_keyb_attr_group);
if (err) {
- dev_err(dev, "failed to create attributes. err=%d\n", err);
+ dev_err(dev, "failed to create attributes: %d\n", err);
return err;
}
@@ -710,7 +716,8 @@ static int cros_ec_keyb_remove(struct platform_device *pdev)
#ifdef CONFIG_OF
static const struct of_device_id cros_ec_keyb_of_match[] = {
{ .compatible = "google,cros-ec-keyb" },
- {},
+ { .compatible = "google,cros-ec-keyb-switches", .data = (void *)true },
+ {}
};
MODULE_DEVICE_TABLE(of, cros_ec_keyb_of_match);
#endif
diff --git a/drivers/input/keyboard/ep93xx_keypad.c b/drivers/input/keyboard/ep93xx_keypad.c
index 272a4f1c6e81..7a3b0664ab4f 100644
--- a/drivers/input/keyboard/ep93xx_keypad.c
+++ b/drivers/input/keyboard/ep93xx_keypad.c
@@ -231,7 +231,6 @@ static int ep93xx_keypad_probe(struct platform_device *pdev)
struct ep93xx_keypad *keypad;
const struct matrix_keymap_data *keymap_data;
struct input_dev *input_dev;
- struct resource *res;
int err;
keypad = devm_kzalloc(&pdev->dev, sizeof(*keypad), GFP_KERNEL);
@@ -250,11 +249,7 @@ static int ep93xx_keypad_probe(struct platform_device *pdev)
if (keypad->irq < 0)
return keypad->irq;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENXIO;
-
- keypad->mmio_base = devm_ioremap_resource(&pdev->dev, res);
+ keypad->mmio_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(keypad->mmio_base))
return PTR_ERR(keypad->mmio_base);
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index d75a8b179a8a..a5dc4ab87fa1 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -131,7 +131,7 @@ static void gpio_keys_quiesce_key(void *data)
if (!bdata->gpiod)
hrtimer_cancel(&bdata->release_timer);
- if (bdata->debounce_use_hrtimer)
+ else if (bdata->debounce_use_hrtimer)
hrtimer_cancel(&bdata->debounce_timer);
else
cancel_delayed_work_sync(&bdata->work);
diff --git a/drivers/input/keyboard/mt6779-keypad.c b/drivers/input/keyboard/mt6779-keypad.c
index 0dbbddc7f298..2e7c9187c10f 100644
--- a/drivers/input/keyboard/mt6779-keypad.c
+++ b/drivers/input/keyboard/mt6779-keypad.c
@@ -24,7 +24,6 @@ struct mt6779_keypad {
struct regmap *regmap;
struct input_dev *input_dev;
struct clk *clk;
- void __iomem *base;
u32 n_rows;
u32 n_cols;
DECLARE_BITMAP(keymap_state, MTK_KPD_NUM_BITS);
@@ -91,6 +90,7 @@ static void mt6779_keypad_clk_disable(void *data)
static int mt6779_keypad_pdrv_probe(struct platform_device *pdev)
{
struct mt6779_keypad *keypad;
+ void __iomem *base;
int irq;
u32 debounce;
bool wakeup;
@@ -100,11 +100,11 @@ static int mt6779_keypad_pdrv_probe(struct platform_device *pdev)
if (!keypad)
return -ENOMEM;
- keypad->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(keypad->base))
- return PTR_ERR(keypad->base);
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
- keypad->regmap = devm_regmap_init_mmio(&pdev->dev, keypad->base,
+ keypad->regmap = devm_regmap_init_mmio(&pdev->dev, base,
&mt6779_keypad_regmap_cfg);
if (IS_ERR(keypad->regmap)) {
dev_err(&pdev->dev,
diff --git a/drivers/input/keyboard/sun4i-lradc-keys.c b/drivers/input/keyboard/sun4i-lradc-keys.c
index 4a796bed48ac..15c15c0958b0 100644
--- a/drivers/input/keyboard/sun4i-lradc-keys.c
+++ b/drivers/input/keyboard/sun4i-lradc-keys.c
@@ -14,6 +14,7 @@
* there are no boards known to use channel 1.
*/
+#include <linux/clk.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/input.h>
@@ -22,7 +23,10 @@
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include <linux/pm_wakeirq.h>
+#include <linux/pm_wakeup.h>
#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
#include <linux/slab.h>
#define LRADC_CTRL 0x00
@@ -58,10 +62,12 @@
/* struct lradc_variant - Describe sun4i-a10-lradc-keys hardware variant
* @divisor_numerator: The numerator of lradc Vref internally divisor
* @divisor_denominator: The denominator of lradc Vref internally divisor
+ * @has_clock_reset: If the binding requires a clock and reset
*/
struct lradc_variant {
u8 divisor_numerator;
u8 divisor_denominator;
+ bool has_clock_reset;
};
static const struct lradc_variant lradc_variant_a10 = {
@@ -74,6 +80,12 @@ static const struct lradc_variant r_lradc_variant_a83t = {
.divisor_denominator = 4
};
+static const struct lradc_variant lradc_variant_r329 = {
+ .divisor_numerator = 3,
+ .divisor_denominator = 4,
+ .has_clock_reset = true,
+};
+
struct sun4i_lradc_keymap {
u32 voltage;
u32 keycode;
@@ -83,6 +95,8 @@ struct sun4i_lradc_data {
struct device *dev;
struct input_dev *input;
void __iomem *base;
+ struct clk *clk;
+ struct reset_control *reset;
struct regulator *vref_supply;
struct sun4i_lradc_keymap *chan0_map;
const struct lradc_variant *variant;
@@ -140,6 +154,14 @@ static int sun4i_lradc_open(struct input_dev *dev)
if (error)
return error;
+ error = reset_control_deassert(lradc->reset);
+ if (error)
+ goto err_disable_reg;
+
+ error = clk_prepare_enable(lradc->clk);
+ if (error)
+ goto err_assert_reset;
+
lradc->vref = regulator_get_voltage(lradc->vref_supply) *
lradc->variant->divisor_numerator /
lradc->variant->divisor_denominator;
@@ -153,6 +175,13 @@ static int sun4i_lradc_open(struct input_dev *dev)
writel(CHAN0_KEYUP_IRQ | CHAN0_KEYDOWN_IRQ, lradc->base + LRADC_INTC);
return 0;
+
+err_assert_reset:
+ reset_control_assert(lradc->reset);
+err_disable_reg:
+ regulator_disable(lradc->vref_supply);
+
+ return error;
}
static void sun4i_lradc_close(struct input_dev *dev)
@@ -164,6 +193,8 @@ static void sun4i_lradc_close(struct input_dev *dev)
SAMPLE_RATE(2), lradc->base + LRADC_CTRL);
writel(0, lradc->base + LRADC_INTC);
+ clk_disable_unprepare(lradc->clk);
+ reset_control_assert(lradc->reset);
regulator_disable(lradc->vref_supply);
}
@@ -226,8 +257,7 @@ static int sun4i_lradc_probe(struct platform_device *pdev)
{
struct sun4i_lradc_data *lradc;
struct device *dev = &pdev->dev;
- int i;
- int error;
+ int error, i, irq;
lradc = devm_kzalloc(dev, sizeof(struct sun4i_lradc_data), GFP_KERNEL);
if (!lradc)
@@ -243,6 +273,16 @@ static int sun4i_lradc_probe(struct platform_device *pdev)
return -EINVAL;
}
+ if (lradc->variant->has_clock_reset) {
+ lradc->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(lradc->clk))
+ return PTR_ERR(lradc->clk);
+
+ lradc->reset = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(lradc->reset))
+ return PTR_ERR(lradc->reset);
+ }
+
lradc->vref_supply = devm_regulator_get(dev, "vref");
if (IS_ERR(lradc->vref_supply))
return PTR_ERR(lradc->vref_supply);
@@ -272,8 +312,11 @@ static int sun4i_lradc_probe(struct platform_device *pdev)
if (IS_ERR(lradc->base))
return PTR_ERR(lradc->base);
- error = devm_request_irq(dev, platform_get_irq(pdev, 0),
- sun4i_lradc_irq, 0,
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ error = devm_request_irq(dev, irq, sun4i_lradc_irq, 0,
"sun4i-a10-lradc-keys", lradc);
if (error)
return error;
@@ -282,6 +325,16 @@ static int sun4i_lradc_probe(struct platform_device *pdev)
if (error)
return error;
+ if (device_property_read_bool(dev, "wakeup-source")) {
+ error = dev_pm_set_wake_irq(dev, irq);
+ if (error)
+ dev_warn(dev,
+ "Failed to set IRQ %d as a wake IRQ: %d\n",
+ irq, error);
+ else
+ device_set_wakeup_capable(dev, true);
+ }
+
return 0;
}
@@ -290,6 +343,8 @@ static const struct of_device_id sun4i_lradc_of_match[] = {
.data = &lradc_variant_a10 },
{ .compatible = "allwinner,sun8i-a83t-r-lradc",
.data = &r_lradc_variant_a83t },
+ { .compatible = "allwinner,sun50i-r329-lradc",
+ .data = &lradc_variant_r329 },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sun4i_lradc_of_match);
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index dd5227cf8696..a18ab7358d8f 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -762,6 +762,16 @@ config INPUT_IQS626A
To compile this driver as a module, choose M here: the
module will be called iqs626a.
+config INPUT_IQS7222
+ tristate "Azoteq IQS7222A/B/C capacitive touch controller"
+ depends on I2C
+ help
+ Say Y to enable support for the Azoteq IQS7222A/B/C family
+ of capacitive touch controllers.
+
+ To compile this driver as a module, choose M here: the
+ module will be called iqs7222.
+
config INPUT_CMA3000
tristate "VTI CMA3000 Tri-axis accelerometer"
help
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index b92c53a6b5ae..28dfc444f0a9 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -44,6 +44,7 @@ obj-$(CONFIG_HP_SDC_RTC) += hp_sdc_rtc.o
obj-$(CONFIG_INPUT_IMS_PCU) += ims-pcu.o
obj-$(CONFIG_INPUT_IQS269A) += iqs269a.o
obj-$(CONFIG_INPUT_IQS626A) += iqs626a.o
+obj-$(CONFIG_INPUT_IQS7222) += iqs7222.o
obj-$(CONFIG_INPUT_KEYSPAN_REMOTE) += keyspan_remote.o
obj-$(CONFIG_INPUT_KXTJ9) += kxtj9.o
obj-$(CONFIG_INPUT_M68K_BEEP) += m68kspkr.o
diff --git a/drivers/input/misc/ati_remote2.c b/drivers/input/misc/ati_remote2.c
index 8a36d78fed63..946bf75aa106 100644
--- a/drivers/input/misc/ati_remote2.c
+++ b/drivers/input/misc/ati_remote2.c
@@ -639,7 +639,7 @@ static int ati_remote2_urb_init(struct ati_remote2 *ar2)
return -ENOMEM;
pipe = usb_rcvintpipe(udev, ar2->ep[i]->bEndpointAddress);
- maxp = usb_maxpacket(udev, pipe, usb_pipeout(pipe));
+ maxp = usb_maxpacket(udev, pipe);
maxp = maxp > 4 ? 4 : maxp;
usb_fill_int_urb(ar2->urb[i], udev, pipe, ar2->buf[i], maxp,
diff --git a/drivers/input/misc/cm109.c b/drivers/input/misc/cm109.c
index f515fae465c3..728325a2d574 100644
--- a/drivers/input/misc/cm109.c
+++ b/drivers/input/misc/cm109.c
@@ -745,7 +745,7 @@ static int cm109_usb_probe(struct usb_interface *intf,
/* get a handle to the interrupt data pipe */
pipe = usb_rcvintpipe(udev, endpoint->bEndpointAddress);
- ret = usb_maxpacket(udev, pipe, usb_pipeout(pipe));
+ ret = usb_maxpacket(udev, pipe);
if (ret != USB_PKT_LEN)
dev_err(&intf->dev, "invalid payload size %d, expected %d\n",
ret, USB_PKT_LEN);
diff --git a/drivers/input/misc/iqs7222.c b/drivers/input/misc/iqs7222.c
new file mode 100644
index 000000000000..6b4138771a3f
--- /dev/null
+++ b/drivers/input/misc/iqs7222.c
@@ -0,0 +1,2446 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Azoteq IQS7222A/B/C Capacitive Touch Controller
+ *
+ * Copyright (C) 2022 Jeff LaBundy <jeff@labundy.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/ktime.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/property.h>
+#include <linux/slab.h>
+#include <asm/unaligned.h>
+
+#define IQS7222_PROD_NUM 0x00
+#define IQS7222_PROD_NUM_A 840
+#define IQS7222_PROD_NUM_B 698
+#define IQS7222_PROD_NUM_C 863
+
+#define IQS7222_SYS_STATUS 0x10
+#define IQS7222_SYS_STATUS_RESET BIT(3)
+#define IQS7222_SYS_STATUS_ATI_ERROR BIT(1)
+#define IQS7222_SYS_STATUS_ATI_ACTIVE BIT(0)
+
+#define IQS7222_CHAN_SETUP_0_REF_MODE_MASK GENMASK(15, 14)
+#define IQS7222_CHAN_SETUP_0_REF_MODE_FOLLOW BIT(15)
+#define IQS7222_CHAN_SETUP_0_REF_MODE_REF BIT(14)
+#define IQS7222_CHAN_SETUP_0_CHAN_EN BIT(8)
+
+#define IQS7222_SLDR_SETUP_0_CHAN_CNT_MASK GENMASK(2, 0)
+#define IQS7222_SLDR_SETUP_2_RES_MASK GENMASK(15, 8)
+#define IQS7222_SLDR_SETUP_2_RES_SHIFT 8
+#define IQS7222_SLDR_SETUP_2_TOP_SPEED_MASK GENMASK(7, 0)
+#define IQS7222_SLDR_SETUP_3_CHAN_SEL_MASK GENMASK(9, 0)
+
+#define IQS7222_GPIO_SETUP_0_GPIO_EN BIT(0)
+
+#define IQS7222_SYS_SETUP 0xD0
+#define IQS7222_SYS_SETUP_INTF_MODE_MASK GENMASK(7, 6)
+#define IQS7222_SYS_SETUP_INTF_MODE_TOUCH BIT(7)
+#define IQS7222_SYS_SETUP_INTF_MODE_EVENT BIT(6)
+#define IQS7222_SYS_SETUP_PWR_MODE_MASK GENMASK(5, 4)
+#define IQS7222_SYS_SETUP_PWR_MODE_AUTO IQS7222_SYS_SETUP_PWR_MODE_MASK
+#define IQS7222_SYS_SETUP_REDO_ATI BIT(2)
+#define IQS7222_SYS_SETUP_ACK_RESET BIT(0)
+
+#define IQS7222_EVENT_MASK_ATI BIT(12)
+
+#define IQS7222_COMMS_HOLD BIT(0)
+#define IQS7222_COMMS_ERROR 0xEEEE
+#define IQS7222_COMMS_RETRY_MS 50
+#define IQS7222_COMMS_TIMEOUT_MS 100
+#define IQS7222_RESET_TIMEOUT_MS 250
+#define IQS7222_ATI_TIMEOUT_MS 2000
+
+#define IQS7222_MAX_COLS_STAT 8
+#define IQS7222_MAX_COLS_CYCLE 3
+#define IQS7222_MAX_COLS_GLBL 3
+#define IQS7222_MAX_COLS_BTN 3
+#define IQS7222_MAX_COLS_CHAN 6
+#define IQS7222_MAX_COLS_FILT 2
+#define IQS7222_MAX_COLS_SLDR 11
+#define IQS7222_MAX_COLS_GPIO 3
+#define IQS7222_MAX_COLS_SYS 13
+
+#define IQS7222_MAX_CHAN 20
+#define IQS7222_MAX_SLDR 2
+
+#define IQS7222_NUM_RETRIES 5
+#define IQS7222_REG_OFFSET 0x100
+
+enum iqs7222_reg_key_id {
+ IQS7222_REG_KEY_NONE,
+ IQS7222_REG_KEY_PROX,
+ IQS7222_REG_KEY_TOUCH,
+ IQS7222_REG_KEY_DEBOUNCE,
+ IQS7222_REG_KEY_TAP,
+ IQS7222_REG_KEY_AXIAL,
+ IQS7222_REG_KEY_WHEEL,
+ IQS7222_REG_KEY_NO_WHEEL,
+ IQS7222_REG_KEY_RESERVED
+};
+
+enum iqs7222_reg_grp_id {
+ IQS7222_REG_GRP_STAT,
+ IQS7222_REG_GRP_CYCLE,
+ IQS7222_REG_GRP_GLBL,
+ IQS7222_REG_GRP_BTN,
+ IQS7222_REG_GRP_CHAN,
+ IQS7222_REG_GRP_FILT,
+ IQS7222_REG_GRP_SLDR,
+ IQS7222_REG_GRP_GPIO,
+ IQS7222_REG_GRP_SYS,
+ IQS7222_NUM_REG_GRPS
+};
+
+static const char * const iqs7222_reg_grp_names[] = {
+ [IQS7222_REG_GRP_CYCLE] = "cycle",
+ [IQS7222_REG_GRP_CHAN] = "channel",
+ [IQS7222_REG_GRP_SLDR] = "slider",
+ [IQS7222_REG_GRP_GPIO] = "gpio",
+};
+
+static const unsigned int iqs7222_max_cols[] = {
+ [IQS7222_REG_GRP_STAT] = IQS7222_MAX_COLS_STAT,
+ [IQS7222_REG_GRP_CYCLE] = IQS7222_MAX_COLS_CYCLE,
+ [IQS7222_REG_GRP_GLBL] = IQS7222_MAX_COLS_GLBL,
+ [IQS7222_REG_GRP_BTN] = IQS7222_MAX_COLS_BTN,
+ [IQS7222_REG_GRP_CHAN] = IQS7222_MAX_COLS_CHAN,
+ [IQS7222_REG_GRP_FILT] = IQS7222_MAX_COLS_FILT,
+ [IQS7222_REG_GRP_SLDR] = IQS7222_MAX_COLS_SLDR,
+ [IQS7222_REG_GRP_GPIO] = IQS7222_MAX_COLS_GPIO,
+ [IQS7222_REG_GRP_SYS] = IQS7222_MAX_COLS_SYS,
+};
+
+static const unsigned int iqs7222_gpio_links[] = { 2, 5, 6, };
+
+struct iqs7222_event_desc {
+ const char *name;
+ u16 mask;
+ u16 val;
+ u16 enable;
+ enum iqs7222_reg_key_id reg_key;
+};
+
+static const struct iqs7222_event_desc iqs7222_kp_events[] = {
+ {
+ .name = "event-prox",
+ .enable = BIT(0),
+ .reg_key = IQS7222_REG_KEY_PROX,
+ },
+ {
+ .name = "event-touch",
+ .enable = BIT(1),
+ .reg_key = IQS7222_REG_KEY_TOUCH,
+ },
+};
+
+static const struct iqs7222_event_desc iqs7222_sl_events[] = {
+ { .name = "event-press", },
+ {
+ .name = "event-tap",
+ .mask = BIT(0),
+ .val = BIT(0),
+ .enable = BIT(0),
+ .reg_key = IQS7222_REG_KEY_TAP,
+ },
+ {
+ .name = "event-swipe-pos",
+ .mask = BIT(5) | BIT(1),
+ .val = BIT(1),
+ .enable = BIT(1),
+ .reg_key = IQS7222_REG_KEY_AXIAL,
+ },
+ {
+ .name = "event-swipe-neg",
+ .mask = BIT(5) | BIT(1),
+ .val = BIT(5) | BIT(1),
+ .enable = BIT(1),
+ .reg_key = IQS7222_REG_KEY_AXIAL,
+ },
+ {
+ .name = "event-flick-pos",
+ .mask = BIT(5) | BIT(2),
+ .val = BIT(2),
+ .enable = BIT(2),
+ .reg_key = IQS7222_REG_KEY_AXIAL,
+ },
+ {
+ .name = "event-flick-neg",
+ .mask = BIT(5) | BIT(2),
+ .val = BIT(5) | BIT(2),
+ .enable = BIT(2),
+ .reg_key = IQS7222_REG_KEY_AXIAL,
+ },
+};
+
+struct iqs7222_reg_grp_desc {
+ u16 base;
+ int num_row;
+ int num_col;
+};
+
+struct iqs7222_dev_desc {
+ u16 prod_num;
+ u16 fw_major;
+ u16 fw_minor;
+ u16 sldr_res;
+ u16 touch_link;
+ u16 wheel_enable;
+ int allow_offset;
+ int event_offset;
+ int comms_offset;
+ struct iqs7222_reg_grp_desc reg_grps[IQS7222_NUM_REG_GRPS];
+};
+
+static const struct iqs7222_dev_desc iqs7222_devs[] = {
+ {
+ .prod_num = IQS7222_PROD_NUM_A,
+ .fw_major = 1,
+ .fw_minor = 12,
+ .sldr_res = U8_MAX * 16,
+ .touch_link = 1768,
+ .allow_offset = 9,
+ .event_offset = 10,
+ .comms_offset = 12,
+ .reg_grps = {
+ [IQS7222_REG_GRP_STAT] = {
+ .base = IQS7222_SYS_STATUS,
+ .num_row = 1,
+ .num_col = 8,
+ },
+ [IQS7222_REG_GRP_CYCLE] = {
+ .base = 0x8000,
+ .num_row = 7,
+ .num_col = 3,
+ },
+ [IQS7222_REG_GRP_GLBL] = {
+ .base = 0x8700,
+ .num_row = 1,
+ .num_col = 3,
+ },
+ [IQS7222_REG_GRP_BTN] = {
+ .base = 0x9000,
+ .num_row = 12,
+ .num_col = 3,
+ },
+ [IQS7222_REG_GRP_CHAN] = {
+ .base = 0xA000,
+ .num_row = 12,
+ .num_col = 6,
+ },
+ [IQS7222_REG_GRP_FILT] = {
+ .base = 0xAC00,
+ .num_row = 1,
+ .num_col = 2,
+ },
+ [IQS7222_REG_GRP_SLDR] = {
+ .base = 0xB000,
+ .num_row = 2,
+ .num_col = 11,
+ },
+ [IQS7222_REG_GRP_GPIO] = {
+ .base = 0xC000,
+ .num_row = 1,
+ .num_col = 3,
+ },
+ [IQS7222_REG_GRP_SYS] = {
+ .base = IQS7222_SYS_SETUP,
+ .num_row = 1,
+ .num_col = 13,
+ },
+ },
+ },
+ {
+ .prod_num = IQS7222_PROD_NUM_B,
+ .fw_major = 1,
+ .fw_minor = 43,
+ .event_offset = 10,
+ .comms_offset = 11,
+ .reg_grps = {
+ [IQS7222_REG_GRP_STAT] = {
+ .base = IQS7222_SYS_STATUS,
+ .num_row = 1,
+ .num_col = 6,
+ },
+ [IQS7222_REG_GRP_CYCLE] = {
+ .base = 0x8000,
+ .num_row = 10,
+ .num_col = 2,
+ },
+ [IQS7222_REG_GRP_GLBL] = {
+ .base = 0x8A00,
+ .num_row = 1,
+ .num_col = 3,
+ },
+ [IQS7222_REG_GRP_BTN] = {
+ .base = 0x9000,
+ .num_row = 20,
+ .num_col = 2,
+ },
+ [IQS7222_REG_GRP_CHAN] = {
+ .base = 0xB000,
+ .num_row = 20,
+ .num_col = 4,
+ },
+ [IQS7222_REG_GRP_FILT] = {
+ .base = 0xC400,
+ .num_row = 1,
+ .num_col = 2,
+ },
+ [IQS7222_REG_GRP_SYS] = {
+ .base = IQS7222_SYS_SETUP,
+ .num_row = 1,
+ .num_col = 13,
+ },
+ },
+ },
+ {
+ .prod_num = IQS7222_PROD_NUM_B,
+ .fw_major = 1,
+ .fw_minor = 27,
+ .reg_grps = {
+ [IQS7222_REG_GRP_STAT] = {
+ .base = IQS7222_SYS_STATUS,
+ .num_row = 1,
+ .num_col = 6,
+ },
+ [IQS7222_REG_GRP_CYCLE] = {
+ .base = 0x8000,
+ .num_row = 10,
+ .num_col = 2,
+ },
+ [IQS7222_REG_GRP_GLBL] = {
+ .base = 0x8A00,
+ .num_row = 1,
+ .num_col = 3,
+ },
+ [IQS7222_REG_GRP_BTN] = {
+ .base = 0x9000,
+ .num_row = 20,
+ .num_col = 2,
+ },
+ [IQS7222_REG_GRP_CHAN] = {
+ .base = 0xB000,
+ .num_row = 20,
+ .num_col = 4,
+ },
+ [IQS7222_REG_GRP_FILT] = {
+ .base = 0xC400,
+ .num_row = 1,
+ .num_col = 2,
+ },
+ [IQS7222_REG_GRP_SYS] = {
+ .base = IQS7222_SYS_SETUP,
+ .num_row = 1,
+ .num_col = 10,
+ },
+ },
+ },
+ {
+ .prod_num = IQS7222_PROD_NUM_C,
+ .fw_major = 2,
+ .fw_minor = 6,
+ .sldr_res = U16_MAX,
+ .touch_link = 1686,
+ .wheel_enable = BIT(3),
+ .event_offset = 9,
+ .comms_offset = 10,
+ .reg_grps = {
+ [IQS7222_REG_GRP_STAT] = {
+ .base = IQS7222_SYS_STATUS,
+ .num_row = 1,
+ .num_col = 6,
+ },
+ [IQS7222_REG_GRP_CYCLE] = {
+ .base = 0x8000,
+ .num_row = 5,
+ .num_col = 3,
+ },
+ [IQS7222_REG_GRP_GLBL] = {
+ .base = 0x8500,
+ .num_row = 1,
+ .num_col = 3,
+ },
+ [IQS7222_REG_GRP_BTN] = {
+ .base = 0x9000,
+ .num_row = 10,
+ .num_col = 3,
+ },
+ [IQS7222_REG_GRP_CHAN] = {
+ .base = 0xA000,
+ .num_row = 10,
+ .num_col = 6,
+ },
+ [IQS7222_REG_GRP_FILT] = {
+ .base = 0xAA00,
+ .num_row = 1,
+ .num_col = 2,
+ },
+ [IQS7222_REG_GRP_SLDR] = {
+ .base = 0xB000,
+ .num_row = 2,
+ .num_col = 10,
+ },
+ [IQS7222_REG_GRP_GPIO] = {
+ .base = 0xC000,
+ .num_row = 3,
+ .num_col = 3,
+ },
+ [IQS7222_REG_GRP_SYS] = {
+ .base = IQS7222_SYS_SETUP,
+ .num_row = 1,
+ .num_col = 12,
+ },
+ },
+ },
+ {
+ .prod_num = IQS7222_PROD_NUM_C,
+ .fw_major = 1,
+ .fw_minor = 13,
+ .sldr_res = U16_MAX,
+ .touch_link = 1674,
+ .wheel_enable = BIT(3),
+ .event_offset = 9,
+ .comms_offset = 10,
+ .reg_grps = {
+ [IQS7222_REG_GRP_STAT] = {
+ .base = IQS7222_SYS_STATUS,
+ .num_row = 1,
+ .num_col = 6,
+ },
+ [IQS7222_REG_GRP_CYCLE] = {
+ .base = 0x8000,
+ .num_row = 5,
+ .num_col = 3,
+ },
+ [IQS7222_REG_GRP_GLBL] = {
+ .base = 0x8500,
+ .num_row = 1,
+ .num_col = 3,
+ },
+ [IQS7222_REG_GRP_BTN] = {
+ .base = 0x9000,
+ .num_row = 10,
+ .num_col = 3,
+ },
+ [IQS7222_REG_GRP_CHAN] = {
+ .base = 0xA000,
+ .num_row = 10,
+ .num_col = 6,
+ },
+ [IQS7222_REG_GRP_FILT] = {
+ .base = 0xAA00,
+ .num_row = 1,
+ .num_col = 2,
+ },
+ [IQS7222_REG_GRP_SLDR] = {
+ .base = 0xB000,
+ .num_row = 2,
+ .num_col = 10,
+ },
+ [IQS7222_REG_GRP_GPIO] = {
+ .base = 0xC000,
+ .num_row = 1,
+ .num_col = 3,
+ },
+ [IQS7222_REG_GRP_SYS] = {
+ .base = IQS7222_SYS_SETUP,
+ .num_row = 1,
+ .num_col = 11,
+ },
+ },
+ },
+};
+
+struct iqs7222_prop_desc {
+ const char *name;
+ enum iqs7222_reg_grp_id reg_grp;
+ enum iqs7222_reg_key_id reg_key;
+ int reg_offset;
+ int reg_shift;
+ int reg_width;
+ int val_pitch;
+ int val_min;
+ int val_max;
+ bool invert;
+ const char *label;
+};
+
+static const struct iqs7222_prop_desc iqs7222_props[] = {
+ {
+ .name = "azoteq,conv-period",
+ .reg_grp = IQS7222_REG_GRP_CYCLE,
+ .reg_offset = 0,
+ .reg_shift = 8,
+ .reg_width = 8,
+ .label = "conversion period",
+ },
+ {
+ .name = "azoteq,conv-frac",
+ .reg_grp = IQS7222_REG_GRP_CYCLE,
+ .reg_offset = 0,
+ .reg_shift = 0,
+ .reg_width = 8,
+ .label = "conversion frequency fractional divider",
+ },
+ {
+ .name = "azoteq,rx-float-inactive",
+ .reg_grp = IQS7222_REG_GRP_CYCLE,
+ .reg_offset = 1,
+ .reg_shift = 6,
+ .reg_width = 1,
+ .invert = true,
+ },
+ {
+ .name = "azoteq,dead-time-enable",
+ .reg_grp = IQS7222_REG_GRP_CYCLE,
+ .reg_offset = 1,
+ .reg_shift = 5,
+ .reg_width = 1,
+ },
+ {
+ .name = "azoteq,tx-freq-fosc",
+ .reg_grp = IQS7222_REG_GRP_CYCLE,
+ .reg_offset = 1,
+ .reg_shift = 4,
+ .reg_width = 1,
+ },
+ {
+ .name = "azoteq,vbias-enable",
+ .reg_grp = IQS7222_REG_GRP_CYCLE,
+ .reg_offset = 1,
+ .reg_shift = 3,
+ .reg_width = 1,
+ },
+ {
+ .name = "azoteq,sense-mode",
+ .reg_grp = IQS7222_REG_GRP_CYCLE,
+ .reg_offset = 1,
+ .reg_shift = 0,
+ .reg_width = 3,
+ .val_max = 3,
+ .label = "sensing mode",
+ },
+ {
+ .name = "azoteq,iref-enable",
+ .reg_grp = IQS7222_REG_GRP_CYCLE,
+ .reg_offset = 2,
+ .reg_shift = 10,
+ .reg_width = 1,
+ },
+ {
+ .name = "azoteq,iref-level",
+ .reg_grp = IQS7222_REG_GRP_CYCLE,
+ .reg_offset = 2,
+ .reg_shift = 4,
+ .reg_width = 4,
+ .label = "current reference level",
+ },
+ {
+ .name = "azoteq,iref-trim",
+ .reg_grp = IQS7222_REG_GRP_CYCLE,
+ .reg_offset = 2,
+ .reg_shift = 0,
+ .reg_width = 4,
+ .label = "current reference trim",
+ },
+ {
+ .name = "azoteq,rf-filt-enable",
+ .reg_grp = IQS7222_REG_GRP_GLBL,
+ .reg_offset = 0,
+ .reg_shift = 15,
+ .reg_width = 1,
+ },
+ {
+ .name = "azoteq,max-counts",
+ .reg_grp = IQS7222_REG_GRP_GLBL,
+ .reg_offset = 0,
+ .reg_shift = 13,
+ .reg_width = 2,
+ .label = "maximum counts",
+ },
+ {
+ .name = "azoteq,auto-mode",
+ .reg_grp = IQS7222_REG_GRP_GLBL,
+ .reg_offset = 0,
+ .reg_shift = 2,
+ .reg_width = 2,
+ .label = "number of conversions",
+ },
+ {
+ .name = "azoteq,ati-frac-div-fine",
+ .reg_grp = IQS7222_REG_GRP_GLBL,
+ .reg_offset = 1,
+ .reg_shift = 9,
+ .reg_width = 5,
+ .label = "ATI fine fractional divider",
+ },
+ {
+ .name = "azoteq,ati-frac-div-coarse",
+ .reg_grp = IQS7222_REG_GRP_GLBL,
+ .reg_offset = 1,
+ .reg_shift = 0,
+ .reg_width = 5,
+ .label = "ATI coarse fractional divider",
+ },
+ {
+ .name = "azoteq,ati-comp-select",
+ .reg_grp = IQS7222_REG_GRP_GLBL,
+ .reg_offset = 2,
+ .reg_shift = 0,
+ .reg_width = 10,
+ .label = "ATI compensation selection",
+ },
+ {
+ .name = "azoteq,ati-band",
+ .reg_grp = IQS7222_REG_GRP_CHAN,
+ .reg_offset = 0,
+ .reg_shift = 12,
+ .reg_width = 2,
+ .label = "ATI band",
+ },
+ {
+ .name = "azoteq,global-halt",
+ .reg_grp = IQS7222_REG_GRP_CHAN,
+ .reg_offset = 0,
+ .reg_shift = 11,
+ .reg_width = 1,
+ },
+ {
+ .name = "azoteq,invert-enable",
+ .reg_grp = IQS7222_REG_GRP_CHAN,
+ .reg_offset = 0,
+ .reg_shift = 10,
+ .reg_width = 1,
+ },
+ {
+ .name = "azoteq,dual-direction",
+ .reg_grp = IQS7222_REG_GRP_CHAN,
+ .reg_offset = 0,
+ .reg_shift = 9,
+ .reg_width = 1,
+ },
+ {
+ .name = "azoteq,samp-cap-double",
+ .reg_grp = IQS7222_REG_GRP_CHAN,
+ .reg_offset = 0,
+ .reg_shift = 3,
+ .reg_width = 1,
+ },
+ {
+ .name = "azoteq,vref-half",
+ .reg_grp = IQS7222_REG_GRP_CHAN,
+ .reg_offset = 0,
+ .reg_shift = 2,
+ .reg_width = 1,
+ },
+ {
+ .name = "azoteq,proj-bias",
+ .reg_grp = IQS7222_REG_GRP_CHAN,
+ .reg_offset = 0,
+ .reg_shift = 0,
+ .reg_width = 2,
+ .label = "projected bias current",
+ },
+ {
+ .name = "azoteq,ati-target",
+ .reg_grp = IQS7222_REG_GRP_CHAN,
+ .reg_offset = 1,
+ .reg_shift = 8,
+ .reg_width = 8,
+ .val_pitch = 8,
+ .label = "ATI target",
+ },
+ {
+ .name = "azoteq,ati-base",
+ .reg_grp = IQS7222_REG_GRP_CHAN,
+ .reg_offset = 1,
+ .reg_shift = 3,
+ .reg_width = 5,
+ .val_pitch = 16,
+ .label = "ATI base",
+ },
+ {
+ .name = "azoteq,ati-mode",
+ .reg_grp = IQS7222_REG_GRP_CHAN,
+ .reg_offset = 1,
+ .reg_shift = 0,
+ .reg_width = 3,
+ .val_max = 5,
+ .label = "ATI mode",
+ },
+ {
+ .name = "azoteq,ati-frac-div-fine",
+ .reg_grp = IQS7222_REG_GRP_CHAN,
+ .reg_offset = 2,
+ .reg_shift = 9,
+ .reg_width = 5,
+ .label = "ATI fine fractional divider",
+ },
+ {
+ .name = "azoteq,ati-frac-mult-coarse",
+ .reg_grp = IQS7222_REG_GRP_CHAN,
+ .reg_offset = 2,
+ .reg_shift = 5,
+ .reg_width = 4,
+ .label = "ATI coarse fractional multiplier",
+ },
+ {
+ .name = "azoteq,ati-frac-div-coarse",
+ .reg_grp = IQS7222_REG_GRP_CHAN,
+ .reg_offset = 2,
+ .reg_shift = 0,
+ .reg_width = 5,
+ .label = "ATI coarse fractional divider",
+ },
+ {
+ .name = "azoteq,ati-comp-div",
+ .reg_grp = IQS7222_REG_GRP_CHAN,
+ .reg_offset = 3,
+ .reg_shift = 11,
+ .reg_width = 5,
+ .label = "ATI compensation divider",
+ },
+ {
+ .name = "azoteq,ati-comp-select",
+ .reg_grp = IQS7222_REG_GRP_CHAN,
+ .reg_offset = 3,
+ .reg_shift = 0,
+ .reg_width = 10,
+ .label = "ATI compensation selection",
+ },
+ {
+ .name = "azoteq,debounce-exit",
+ .reg_grp = IQS7222_REG_GRP_BTN,
+ .reg_key = IQS7222_REG_KEY_DEBOUNCE,
+ .reg_offset = 0,
+ .reg_shift = 12,
+ .reg_width = 4,
+ .label = "debounce exit factor",
+ },
+ {
+ .name = "azoteq,debounce-enter",
+ .reg_grp = IQS7222_REG_GRP_BTN,
+ .reg_key = IQS7222_REG_KEY_DEBOUNCE,
+ .reg_offset = 0,
+ .reg_shift = 8,
+ .reg_width = 4,
+ .label = "debounce entrance factor",
+ },
+ {
+ .name = "azoteq,thresh",
+ .reg_grp = IQS7222_REG_GRP_BTN,
+ .reg_key = IQS7222_REG_KEY_PROX,
+ .reg_offset = 0,
+ .reg_shift = 0,
+ .reg_width = 8,
+ .val_max = 127,
+ .label = "threshold",
+ },
+ {
+ .name = "azoteq,thresh",
+ .reg_grp = IQS7222_REG_GRP_BTN,
+ .reg_key = IQS7222_REG_KEY_TOUCH,
+ .reg_offset = 1,
+ .reg_shift = 0,
+ .reg_width = 8,
+ .label = "threshold",
+ },
+ {
+ .name = "azoteq,hyst",
+ .reg_grp = IQS7222_REG_GRP_BTN,
+ .reg_key = IQS7222_REG_KEY_TOUCH,
+ .reg_offset = 1,
+ .reg_shift = 8,
+ .reg_width = 8,
+ .label = "hysteresis",
+ },
+ {
+ .name = "azoteq,lta-beta-lp",
+ .reg_grp = IQS7222_REG_GRP_FILT,
+ .reg_offset = 0,
+ .reg_shift = 12,
+ .reg_width = 4,
+ .label = "low-power mode long-term average beta",
+ },
+ {
+ .name = "azoteq,lta-beta-np",
+ .reg_grp = IQS7222_REG_GRP_FILT,
+ .reg_offset = 0,
+ .reg_shift = 8,
+ .reg_width = 4,
+ .label = "normal-power mode long-term average beta",
+ },
+ {
+ .name = "azoteq,counts-beta-lp",
+ .reg_grp = IQS7222_REG_GRP_FILT,
+ .reg_offset = 0,
+ .reg_shift = 4,
+ .reg_width = 4,
+ .label = "low-power mode counts beta",
+ },
+ {
+ .name = "azoteq,counts-beta-np",
+ .reg_grp = IQS7222_REG_GRP_FILT,
+ .reg_offset = 0,
+ .reg_shift = 0,
+ .reg_width = 4,
+ .label = "normal-power mode counts beta",
+ },
+ {
+ .name = "azoteq,lta-fast-beta-lp",
+ .reg_grp = IQS7222_REG_GRP_FILT,
+ .reg_offset = 1,
+ .reg_shift = 4,
+ .reg_width = 4,
+ .label = "low-power mode long-term average fast beta",
+ },
+ {
+ .name = "azoteq,lta-fast-beta-np",
+ .reg_grp = IQS7222_REG_GRP_FILT,
+ .reg_offset = 1,
+ .reg_shift = 0,
+ .reg_width = 4,
+ .label = "normal-power mode long-term average fast beta",
+ },
+ {
+ .name = "azoteq,lower-cal",
+ .reg_grp = IQS7222_REG_GRP_SLDR,
+ .reg_offset = 0,
+ .reg_shift = 8,
+ .reg_width = 8,
+ .label = "lower calibration",
+ },
+ {
+ .name = "azoteq,static-beta",
+ .reg_grp = IQS7222_REG_GRP_SLDR,
+ .reg_key = IQS7222_REG_KEY_NO_WHEEL,
+ .reg_offset = 0,
+ .reg_shift = 6,
+ .reg_width = 1,
+ },
+ {
+ .name = "azoteq,bottom-beta",
+ .reg_grp = IQS7222_REG_GRP_SLDR,
+ .reg_key = IQS7222_REG_KEY_NO_WHEEL,
+ .reg_offset = 0,
+ .reg_shift = 3,
+ .reg_width = 3,
+ .label = "bottom beta",
+ },
+ {
+ .name = "azoteq,static-beta",
+ .reg_grp = IQS7222_REG_GRP_SLDR,
+ .reg_key = IQS7222_REG_KEY_WHEEL,
+ .reg_offset = 0,
+ .reg_shift = 7,
+ .reg_width = 1,
+ },
+ {
+ .name = "azoteq,bottom-beta",
+ .reg_grp = IQS7222_REG_GRP_SLDR,
+ .reg_key = IQS7222_REG_KEY_WHEEL,
+ .reg_offset = 0,
+ .reg_shift = 4,
+ .reg_width = 3,
+ .label = "bottom beta",
+ },
+ {
+ .name = "azoteq,bottom-speed",
+ .reg_grp = IQS7222_REG_GRP_SLDR,
+ .reg_offset = 1,
+ .reg_shift = 8,
+ .reg_width = 8,
+ .label = "bottom speed",
+ },
+ {
+ .name = "azoteq,upper-cal",
+ .reg_grp = IQS7222_REG_GRP_SLDR,
+ .reg_offset = 1,
+ .reg_shift = 0,
+ .reg_width = 8,
+ .label = "upper calibration",
+ },
+ {
+ .name = "azoteq,gesture-max-ms",
+ .reg_grp = IQS7222_REG_GRP_SLDR,
+ .reg_key = IQS7222_REG_KEY_TAP,
+ .reg_offset = 9,
+ .reg_shift = 8,
+ .reg_width = 8,
+ .val_pitch = 4,
+ .label = "maximum gesture time",
+ },
+ {
+ .name = "azoteq,gesture-min-ms",
+ .reg_grp = IQS7222_REG_GRP_SLDR,
+ .reg_key = IQS7222_REG_KEY_TAP,
+ .reg_offset = 9,
+ .reg_shift = 3,
+ .reg_width = 5,
+ .val_pitch = 4,
+ .label = "minimum gesture time",
+ },
+ {
+ .name = "azoteq,gesture-dist",
+ .reg_grp = IQS7222_REG_GRP_SLDR,
+ .reg_key = IQS7222_REG_KEY_AXIAL,
+ .reg_offset = 10,
+ .reg_shift = 8,
+ .reg_width = 8,
+ .val_pitch = 16,
+ .label = "gesture distance",
+ },
+ {
+ .name = "azoteq,gesture-max-ms",
+ .reg_grp = IQS7222_REG_GRP_SLDR,
+ .reg_key = IQS7222_REG_KEY_AXIAL,
+ .reg_offset = 10,
+ .reg_shift = 0,
+ .reg_width = 8,
+ .val_pitch = 4,
+ .label = "maximum gesture time",
+ },
+ {
+ .name = "drive-open-drain",
+ .reg_grp = IQS7222_REG_GRP_GPIO,
+ .reg_offset = 0,
+ .reg_shift = 1,
+ .reg_width = 1,
+ },
+ {
+ .name = "azoteq,timeout-ati-ms",
+ .reg_grp = IQS7222_REG_GRP_SYS,
+ .reg_offset = 1,
+ .reg_shift = 0,
+ .reg_width = 16,
+ .val_pitch = 500,
+ .label = "ATI error timeout",
+ },
+ {
+ .name = "azoteq,rate-ati-ms",
+ .reg_grp = IQS7222_REG_GRP_SYS,
+ .reg_offset = 2,
+ .reg_shift = 0,
+ .reg_width = 16,
+ .label = "ATI report rate",
+ },
+ {
+ .name = "azoteq,timeout-np-ms",
+ .reg_grp = IQS7222_REG_GRP_SYS,
+ .reg_offset = 3,
+ .reg_shift = 0,
+ .reg_width = 16,
+ .label = "normal-power mode timeout",
+ },
+ {
+ .name = "azoteq,rate-np-ms",
+ .reg_grp = IQS7222_REG_GRP_SYS,
+ .reg_offset = 4,
+ .reg_shift = 0,
+ .reg_width = 16,
+ .val_max = 3000,
+ .label = "normal-power mode report rate",
+ },
+ {
+ .name = "azoteq,timeout-lp-ms",
+ .reg_grp = IQS7222_REG_GRP_SYS,
+ .reg_offset = 5,
+ .reg_shift = 0,
+ .reg_width = 16,
+ .label = "low-power mode timeout",
+ },
+ {
+ .name = "azoteq,rate-lp-ms",
+ .reg_grp = IQS7222_REG_GRP_SYS,
+ .reg_offset = 6,
+ .reg_shift = 0,
+ .reg_width = 16,
+ .val_max = 3000,
+ .label = "low-power mode report rate",
+ },
+ {
+ .name = "azoteq,timeout-ulp-ms",
+ .reg_grp = IQS7222_REG_GRP_SYS,
+ .reg_offset = 7,
+ .reg_shift = 0,
+ .reg_width = 16,
+ .label = "ultra-low-power mode timeout",
+ },
+ {
+ .name = "azoteq,rate-ulp-ms",
+ .reg_grp = IQS7222_REG_GRP_SYS,
+ .reg_offset = 8,
+ .reg_shift = 0,
+ .reg_width = 16,
+ .val_max = 3000,
+ .label = "ultra-low-power mode report rate",
+ },
+};
+
+struct iqs7222_private {
+ const struct iqs7222_dev_desc *dev_desc;
+ struct gpio_desc *reset_gpio;
+ struct gpio_desc *irq_gpio;
+ struct i2c_client *client;
+ struct input_dev *keypad;
+ unsigned int kp_type[IQS7222_MAX_CHAN][ARRAY_SIZE(iqs7222_kp_events)];
+ unsigned int kp_code[IQS7222_MAX_CHAN][ARRAY_SIZE(iqs7222_kp_events)];
+ unsigned int sl_code[IQS7222_MAX_SLDR][ARRAY_SIZE(iqs7222_sl_events)];
+ unsigned int sl_axis[IQS7222_MAX_SLDR];
+ u16 cycle_setup[IQS7222_MAX_CHAN / 2][IQS7222_MAX_COLS_CYCLE];
+ u16 glbl_setup[IQS7222_MAX_COLS_GLBL];
+ u16 btn_setup[IQS7222_MAX_CHAN][IQS7222_MAX_COLS_BTN];
+ u16 chan_setup[IQS7222_MAX_CHAN][IQS7222_MAX_COLS_CHAN];
+ u16 filt_setup[IQS7222_MAX_COLS_FILT];
+ u16 sldr_setup[IQS7222_MAX_SLDR][IQS7222_MAX_COLS_SLDR];
+ u16 gpio_setup[ARRAY_SIZE(iqs7222_gpio_links)][IQS7222_MAX_COLS_GPIO];
+ u16 sys_setup[IQS7222_MAX_COLS_SYS];
+};
+
+static u16 *iqs7222_setup(struct iqs7222_private *iqs7222,
+ enum iqs7222_reg_grp_id reg_grp, int row)
+{
+ switch (reg_grp) {
+ case IQS7222_REG_GRP_CYCLE:
+ return iqs7222->cycle_setup[row];
+
+ case IQS7222_REG_GRP_GLBL:
+ return iqs7222->glbl_setup;
+
+ case IQS7222_REG_GRP_BTN:
+ return iqs7222->btn_setup[row];
+
+ case IQS7222_REG_GRP_CHAN:
+ return iqs7222->chan_setup[row];
+
+ case IQS7222_REG_GRP_FILT:
+ return iqs7222->filt_setup;
+
+ case IQS7222_REG_GRP_SLDR:
+ return iqs7222->sldr_setup[row];
+
+ case IQS7222_REG_GRP_GPIO:
+ return iqs7222->gpio_setup[row];
+
+ case IQS7222_REG_GRP_SYS:
+ return iqs7222->sys_setup;
+
+ default:
+ return NULL;
+ }
+}
+
+static int iqs7222_irq_poll(struct iqs7222_private *iqs7222, u16 timeout_ms)
+{
+ ktime_t irq_timeout = ktime_add_ms(ktime_get(), timeout_ms);
+ int ret;
+
+ do {
+ usleep_range(1000, 1100);
+
+ ret = gpiod_get_value_cansleep(iqs7222->irq_gpio);
+ if (ret < 0)
+ return ret;
+ else if (ret > 0)
+ return 0;
+ } while (ktime_compare(ktime_get(), irq_timeout) < 0);
+
+ return -EBUSY;
+}
+
+static int iqs7222_hard_reset(struct iqs7222_private *iqs7222)
+{
+ struct i2c_client *client = iqs7222->client;
+ int error;
+
+ if (!iqs7222->reset_gpio)
+ return 0;
+
+ gpiod_set_value_cansleep(iqs7222->reset_gpio, 1);
+ usleep_range(1000, 1100);
+
+ gpiod_set_value_cansleep(iqs7222->reset_gpio, 0);
+
+ error = iqs7222_irq_poll(iqs7222, IQS7222_RESET_TIMEOUT_MS);
+ if (error)
+ dev_err(&client->dev, "Failed to reset device: %d\n", error);
+
+ return error;
+}
+
+static int iqs7222_force_comms(struct iqs7222_private *iqs7222)
+{
+ u8 msg_buf[] = { 0xFF, 0x00, };
+ int ret;
+
+ /*
+ * The device cannot communicate until it asserts its interrupt (RDY)
+ * pin. Attempts to do so while RDY is deasserted return an ACK; how-
+ * ever all write data is ignored, and all read data returns 0xEE.
+ *
+ * Unsolicited communication must be preceded by a special force com-
+ * munication command, after which the device eventually asserts its
+ * RDY pin and agrees to communicate.
+ *
+ * Regardless of whether communication is forced or the result of an
+ * interrupt, the device automatically deasserts its RDY pin once it
+ * detects an I2C stop condition, or a timeout expires.
+ */
+ ret = gpiod_get_value_cansleep(iqs7222->irq_gpio);
+ if (ret < 0)
+ return ret;
+ else if (ret > 0)
+ return 0;
+
+ ret = i2c_master_send(iqs7222->client, msg_buf, sizeof(msg_buf));
+ if (ret < (int)sizeof(msg_buf)) {
+ if (ret >= 0)
+ ret = -EIO;
+
+ /*
+ * The datasheet states that the host must wait to retry any
+ * failed attempt to communicate over I2C.
+ */
+ msleep(IQS7222_COMMS_RETRY_MS);
+ return ret;
+ }
+
+ return iqs7222_irq_poll(iqs7222, IQS7222_COMMS_TIMEOUT_MS);
+}
+
+static int iqs7222_read_burst(struct iqs7222_private *iqs7222,
+ u16 reg, void *val, u16 num_val)
+{
+ u8 reg_buf[sizeof(__be16)];
+ int ret, i;
+ struct i2c_client *client = iqs7222->client;
+ struct i2c_msg msg[] = {
+ {
+ .addr = client->addr,
+ .flags = 0,
+ .len = reg > U8_MAX ? sizeof(reg) : sizeof(u8),
+ .buf = reg_buf,
+ },
+ {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = num_val * sizeof(__le16),
+ .buf = (u8 *)val,
+ },
+ };
+
+ if (reg > U8_MAX)
+ put_unaligned_be16(reg, reg_buf);
+ else
+ *reg_buf = (u8)reg;
+
+ /*
+ * The following loop protects against an edge case in which the RDY
+ * pin is automatically deasserted just as the read is initiated. In
+ * that case, the read must be retried using forced communication.
+ */
+ for (i = 0; i < IQS7222_NUM_RETRIES; i++) {
+ ret = iqs7222_force_comms(iqs7222);
+ if (ret < 0)
+ continue;
+
+ ret = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg));
+ if (ret < (int)ARRAY_SIZE(msg)) {
+ if (ret >= 0)
+ ret = -EIO;
+
+ msleep(IQS7222_COMMS_RETRY_MS);
+ continue;
+ }
+
+ if (get_unaligned_le16(msg[1].buf) == IQS7222_COMMS_ERROR) {
+ ret = -ENODATA;
+ continue;
+ }
+
+ ret = 0;
+ break;
+ }
+
+ /*
+ * The following delay ensures the device has deasserted the RDY pin
+ * following the I2C stop condition.
+ */
+ usleep_range(50, 100);
+
+ if (ret < 0)
+ dev_err(&client->dev,
+ "Failed to read from address 0x%04X: %d\n", reg, ret);
+
+ return ret;
+}
+
+static int iqs7222_read_word(struct iqs7222_private *iqs7222, u16 reg, u16 *val)
+{
+ __le16 val_buf;
+ int error;
+
+ error = iqs7222_read_burst(iqs7222, reg, &val_buf, 1);
+ if (error)
+ return error;
+
+ *val = le16_to_cpu(val_buf);
+
+ return 0;
+}
+
+static int iqs7222_write_burst(struct iqs7222_private *iqs7222,
+ u16 reg, const void *val, u16 num_val)
+{
+ int reg_len = reg > U8_MAX ? sizeof(reg) : sizeof(u8);
+ int val_len = num_val * sizeof(__le16);
+ int msg_len = reg_len + val_len;
+ int ret, i;
+ struct i2c_client *client = iqs7222->client;
+ u8 *msg_buf;
+
+ msg_buf = kzalloc(msg_len, GFP_KERNEL);
+ if (!msg_buf)
+ return -ENOMEM;
+
+ if (reg > U8_MAX)
+ put_unaligned_be16(reg, msg_buf);
+ else
+ *msg_buf = (u8)reg;
+
+ memcpy(msg_buf + reg_len, val, val_len);
+
+ /*
+ * The following loop protects against an edge case in which the RDY
+ * pin is automatically asserted just before the force communication
+ * command is sent.
+ *
+ * In that case, the subsequent I2C stop condition tricks the device
+ * into preemptively deasserting the RDY pin and the command must be
+ * sent again.
+ */
+ for (i = 0; i < IQS7222_NUM_RETRIES; i++) {
+ ret = iqs7222_force_comms(iqs7222);
+ if (ret < 0)
+ continue;
+
+ ret = i2c_master_send(client, msg_buf, msg_len);
+ if (ret < msg_len) {
+ if (ret >= 0)
+ ret = -EIO;
+
+ msleep(IQS7222_COMMS_RETRY_MS);
+ continue;
+ }
+
+ ret = 0;
+ break;
+ }
+
+ kfree(msg_buf);
+
+ usleep_range(50, 100);
+
+ if (ret < 0)
+ dev_err(&client->dev,
+ "Failed to write to address 0x%04X: %d\n", reg, ret);
+
+ return ret;
+}
+
+static int iqs7222_write_word(struct iqs7222_private *iqs7222, u16 reg, u16 val)
+{
+ __le16 val_buf = cpu_to_le16(val);
+
+ return iqs7222_write_burst(iqs7222, reg, &val_buf, 1);
+}
+
+static int iqs7222_ati_trigger(struct iqs7222_private *iqs7222)
+{
+ struct i2c_client *client = iqs7222->client;
+ ktime_t ati_timeout;
+ u16 sys_status = 0;
+ u16 sys_setup = iqs7222->sys_setup[0] & ~IQS7222_SYS_SETUP_ACK_RESET;
+ int error, i;
+
+ for (i = 0; i < IQS7222_NUM_RETRIES; i++) {
+ /*
+ * Trigger ATI from streaming and normal-power modes so that
+ * the RDY pin continues to be asserted during ATI.
+ */
+ error = iqs7222_write_word(iqs7222, IQS7222_SYS_SETUP,
+ sys_setup |
+ IQS7222_SYS_SETUP_REDO_ATI);
+ if (error)
+ return error;
+
+ ati_timeout = ktime_add_ms(ktime_get(), IQS7222_ATI_TIMEOUT_MS);
+
+ do {
+ error = iqs7222_irq_poll(iqs7222,
+ IQS7222_COMMS_TIMEOUT_MS);
+ if (error)
+ continue;
+
+ error = iqs7222_read_word(iqs7222, IQS7222_SYS_STATUS,
+ &sys_status);
+ if (error)
+ return error;
+
+ if (sys_status & IQS7222_SYS_STATUS_ATI_ACTIVE)
+ continue;
+
+ if (sys_status & IQS7222_SYS_STATUS_ATI_ERROR)
+ break;
+
+ /*
+ * Use stream-in-touch mode if either slider reports
+ * absolute position.
+ */
+ sys_setup |= test_bit(EV_ABS, iqs7222->keypad->evbit)
+ ? IQS7222_SYS_SETUP_INTF_MODE_TOUCH
+ : IQS7222_SYS_SETUP_INTF_MODE_EVENT;
+ sys_setup |= IQS7222_SYS_SETUP_PWR_MODE_AUTO;
+
+ return iqs7222_write_word(iqs7222, IQS7222_SYS_SETUP,
+ sys_setup);
+ } while (ktime_compare(ktime_get(), ati_timeout) < 0);
+
+ dev_err(&client->dev,
+ "ATI attempt %d of %d failed with status 0x%02X, %s\n",
+ i + 1, IQS7222_NUM_RETRIES, (u8)sys_status,
+ i < IQS7222_NUM_RETRIES ? "retrying..." : "stopping");
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int iqs7222_dev_init(struct iqs7222_private *iqs7222, int dir)
+{
+ const struct iqs7222_dev_desc *dev_desc = iqs7222->dev_desc;
+ int comms_offset = dev_desc->comms_offset;
+ int error, i, j, k;
+
+ /*
+ * Take advantage of the stop-bit disable function, if available, to
+ * save the trouble of having to reopen a communication window after
+ * each burst read or write.
+ */
+ if (comms_offset) {
+ u16 comms_setup;
+
+ error = iqs7222_read_word(iqs7222,
+ IQS7222_SYS_SETUP + comms_offset,
+ &comms_setup);
+ if (error)
+ return error;
+
+ error = iqs7222_write_word(iqs7222,
+ IQS7222_SYS_SETUP + comms_offset,
+ comms_setup | IQS7222_COMMS_HOLD);
+ if (error)
+ return error;
+ }
+
+ for (i = 0; i < IQS7222_NUM_REG_GRPS; i++) {
+ int num_row = dev_desc->reg_grps[i].num_row;
+ int num_col = dev_desc->reg_grps[i].num_col;
+ u16 reg = dev_desc->reg_grps[i].base;
+ __le16 *val_buf;
+ u16 *val;
+
+ if (!num_col)
+ continue;
+
+ val = iqs7222_setup(iqs7222, i, 0);
+ if (!val)
+ continue;
+
+ val_buf = kcalloc(num_col, sizeof(__le16), GFP_KERNEL);
+ if (!val_buf)
+ return -ENOMEM;
+
+ for (j = 0; j < num_row; j++) {
+ switch (dir) {
+ case READ:
+ error = iqs7222_read_burst(iqs7222, reg,
+ val_buf, num_col);
+ for (k = 0; k < num_col; k++)
+ val[k] = le16_to_cpu(val_buf[k]);
+ break;
+
+ case WRITE:
+ for (k = 0; k < num_col; k++)
+ val_buf[k] = cpu_to_le16(val[k]);
+ error = iqs7222_write_burst(iqs7222, reg,
+ val_buf, num_col);
+ break;
+
+ default:
+ error = -EINVAL;
+ }
+
+ if (error)
+ break;
+
+ reg += IQS7222_REG_OFFSET;
+ val += iqs7222_max_cols[i];
+ }
+
+ kfree(val_buf);
+
+ if (error)
+ return error;
+ }
+
+ if (comms_offset) {
+ u16 comms_setup;
+
+ error = iqs7222_read_word(iqs7222,
+ IQS7222_SYS_SETUP + comms_offset,
+ &comms_setup);
+ if (error)
+ return error;
+
+ error = iqs7222_write_word(iqs7222,
+ IQS7222_SYS_SETUP + comms_offset,
+ comms_setup & ~IQS7222_COMMS_HOLD);
+ if (error)
+ return error;
+ }
+
+ if (dir == READ)
+ return 0;
+
+ return iqs7222_ati_trigger(iqs7222);
+}
+
+static int iqs7222_dev_info(struct iqs7222_private *iqs7222)
+{
+ struct i2c_client *client = iqs7222->client;
+ bool prod_num_valid = false;
+ __le16 dev_id[3];
+ int error, i;
+
+ error = iqs7222_read_burst(iqs7222, IQS7222_PROD_NUM, dev_id,
+ ARRAY_SIZE(dev_id));
+ if (error)
+ return error;
+
+ for (i = 0; i < ARRAY_SIZE(iqs7222_devs); i++) {
+ if (le16_to_cpu(dev_id[0]) != iqs7222_devs[i].prod_num)
+ continue;
+
+ prod_num_valid = true;
+
+ if (le16_to_cpu(dev_id[1]) < iqs7222_devs[i].fw_major)
+ continue;
+
+ if (le16_to_cpu(dev_id[2]) < iqs7222_devs[i].fw_minor)
+ continue;
+
+ iqs7222->dev_desc = &iqs7222_devs[i];
+ return 0;
+ }
+
+ if (prod_num_valid)
+ dev_err(&client->dev, "Unsupported firmware revision: %u.%u\n",
+ le16_to_cpu(dev_id[1]), le16_to_cpu(dev_id[2]));
+ else
+ dev_err(&client->dev, "Unrecognized product number: %u\n",
+ le16_to_cpu(dev_id[0]));
+
+ return -EINVAL;
+}
+
+static int iqs7222_gpio_select(struct iqs7222_private *iqs7222,
+ struct fwnode_handle *child_node,
+ int child_enable, u16 child_link)
+{
+ const struct iqs7222_dev_desc *dev_desc = iqs7222->dev_desc;
+ struct i2c_client *client = iqs7222->client;
+ int num_gpio = dev_desc->reg_grps[IQS7222_REG_GRP_GPIO].num_row;
+ int error, count, i;
+ unsigned int gpio_sel[ARRAY_SIZE(iqs7222_gpio_links)];
+
+ if (!num_gpio)
+ return 0;
+
+ if (!fwnode_property_present(child_node, "azoteq,gpio-select"))
+ return 0;
+
+ count = fwnode_property_count_u32(child_node, "azoteq,gpio-select");
+ if (count > num_gpio) {
+ dev_err(&client->dev, "Invalid number of %s GPIOs\n",
+ fwnode_get_name(child_node));
+ return -EINVAL;
+ } else if (count < 0) {
+ dev_err(&client->dev, "Failed to count %s GPIOs: %d\n",
+ fwnode_get_name(child_node), count);
+ return count;
+ }
+
+ error = fwnode_property_read_u32_array(child_node,
+ "azoteq,gpio-select",
+ gpio_sel, count);
+ if (error) {
+ dev_err(&client->dev, "Failed to read %s GPIOs: %d\n",
+ fwnode_get_name(child_node), error);
+ return error;
+ }
+
+ for (i = 0; i < count; i++) {
+ u16 *gpio_setup;
+
+ if (gpio_sel[i] >= num_gpio) {
+ dev_err(&client->dev, "Invalid %s GPIO: %u\n",
+ fwnode_get_name(child_node), gpio_sel[i]);
+ return -EINVAL;
+ }
+
+ gpio_setup = iqs7222->gpio_setup[gpio_sel[i]];
+
+ if (gpio_setup[2] && child_link != gpio_setup[2]) {
+ dev_err(&client->dev,
+ "Conflicting GPIO %u event types\n",
+ gpio_sel[i]);
+ return -EINVAL;
+ }
+
+ gpio_setup[0] |= IQS7222_GPIO_SETUP_0_GPIO_EN;
+ gpio_setup[1] |= child_enable;
+ gpio_setup[2] = child_link;
+ }
+
+ return 0;
+}
+
+static int iqs7222_parse_props(struct iqs7222_private *iqs7222,
+ struct fwnode_handle **child_node,
+ int child_index,
+ enum iqs7222_reg_grp_id reg_grp,
+ enum iqs7222_reg_key_id reg_key)
+{
+ u16 *setup = iqs7222_setup(iqs7222, reg_grp, child_index);
+ struct i2c_client *client = iqs7222->client;
+ struct fwnode_handle *reg_grp_node;
+ char reg_grp_name[16];
+ int i;
+
+ switch (reg_grp) {
+ case IQS7222_REG_GRP_CYCLE:
+ case IQS7222_REG_GRP_CHAN:
+ case IQS7222_REG_GRP_SLDR:
+ case IQS7222_REG_GRP_GPIO:
+ case IQS7222_REG_GRP_BTN:
+ /*
+ * These groups derive a child node and return it to the caller
+ * for additional group-specific processing. In some cases, the
+ * child node may have already been derived.
+ */
+ reg_grp_node = *child_node;
+ if (reg_grp_node)
+ break;
+
+ snprintf(reg_grp_name, sizeof(reg_grp_name), "%s-%d",
+ iqs7222_reg_grp_names[reg_grp], child_index);
+
+ reg_grp_node = device_get_named_child_node(&client->dev,
+ reg_grp_name);
+ if (!reg_grp_node)
+ return 0;
+
+ *child_node = reg_grp_node;
+ break;
+
+ case IQS7222_REG_GRP_GLBL:
+ case IQS7222_REG_GRP_FILT:
+ case IQS7222_REG_GRP_SYS:
+ /*
+ * These groups are not organized beneath a child node, nor are
+ * they subject to any additional processing by the caller.
+ */
+ reg_grp_node = dev_fwnode(&client->dev);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(iqs7222_props); i++) {
+ const char *name = iqs7222_props[i].name;
+ int reg_offset = iqs7222_props[i].reg_offset;
+ int reg_shift = iqs7222_props[i].reg_shift;
+ int reg_width = iqs7222_props[i].reg_width;
+ int val_pitch = iqs7222_props[i].val_pitch ? : 1;
+ int val_min = iqs7222_props[i].val_min;
+ int val_max = iqs7222_props[i].val_max;
+ bool invert = iqs7222_props[i].invert;
+ const char *label = iqs7222_props[i].label ? : name;
+ unsigned int val;
+ int error;
+
+ if (iqs7222_props[i].reg_grp != reg_grp ||
+ iqs7222_props[i].reg_key != reg_key)
+ continue;
+
+ /*
+ * Boolean register fields are one bit wide; they are forcibly
+ * reset to provide a means to undo changes by a bootloader if
+ * necessary.
+ *
+ * Scalar fields, on the other hand, are left untouched unless
+ * their corresponding properties are present.
+ */
+ if (reg_width == 1) {
+ if (invert)
+ setup[reg_offset] |= BIT(reg_shift);
+ else
+ setup[reg_offset] &= ~BIT(reg_shift);
+ }
+
+ if (!fwnode_property_present(reg_grp_node, name))
+ continue;
+
+ if (reg_width == 1) {
+ if (invert)
+ setup[reg_offset] &= ~BIT(reg_shift);
+ else
+ setup[reg_offset] |= BIT(reg_shift);
+
+ continue;
+ }
+
+ error = fwnode_property_read_u32(reg_grp_node, name, &val);
+ if (error) {
+ dev_err(&client->dev, "Failed to read %s %s: %d\n",
+ fwnode_get_name(reg_grp_node), label, error);
+ return error;
+ }
+
+ if (!val_max)
+ val_max = GENMASK(reg_width - 1, 0) * val_pitch;
+
+ if (val < val_min || val > val_max) {
+ dev_err(&client->dev, "Invalid %s %s: %u\n",
+ fwnode_get_name(reg_grp_node), label, val);
+ return -EINVAL;
+ }
+
+ setup[reg_offset] &= ~GENMASK(reg_shift + reg_width - 1,
+ reg_shift);
+ setup[reg_offset] |= (val / val_pitch << reg_shift);
+ }
+
+ return 0;
+}
+
+static int iqs7222_parse_cycle(struct iqs7222_private *iqs7222, int cycle_index)
+{
+ u16 *cycle_setup = iqs7222->cycle_setup[cycle_index];
+ struct i2c_client *client = iqs7222->client;
+ struct fwnode_handle *cycle_node = NULL;
+ unsigned int pins[9];
+ int error, count, i;
+
+ /*
+ * Each channel shares a cycle with one other channel; the mapping of
+ * channels to cycles is fixed. Properties defined for a cycle impact
+ * both channels tied to the cycle.
+ */
+ error = iqs7222_parse_props(iqs7222, &cycle_node, cycle_index,
+ IQS7222_REG_GRP_CYCLE,
+ IQS7222_REG_KEY_NONE);
+ if (error)
+ return error;
+
+ if (!cycle_node)
+ return 0;
+
+ /*
+ * Unlike channels which are restricted to a select range of CRx pins
+ * based on channel number, any cycle can claim any of the device's 9
+ * CTx pins (CTx0-8).
+ */
+ if (!fwnode_property_present(cycle_node, "azoteq,tx-enable"))
+ return 0;
+
+ count = fwnode_property_count_u32(cycle_node, "azoteq,tx-enable");
+ if (count < 0) {
+ dev_err(&client->dev, "Failed to count %s CTx pins: %d\n",
+ fwnode_get_name(cycle_node), count);
+ return count;
+ } else if (count > ARRAY_SIZE(pins)) {
+ dev_err(&client->dev, "Invalid number of %s CTx pins\n",
+ fwnode_get_name(cycle_node));
+ return -EINVAL;
+ }
+
+ error = fwnode_property_read_u32_array(cycle_node, "azoteq,tx-enable",
+ pins, count);
+ if (error) {
+ dev_err(&client->dev, "Failed to read %s CTx pins: %d\n",
+ fwnode_get_name(cycle_node), error);
+ return error;
+ }
+
+ cycle_setup[1] &= ~GENMASK(7 + ARRAY_SIZE(pins) - 1, 7);
+
+ for (i = 0; i < count; i++) {
+ if (pins[i] > 8) {
+ dev_err(&client->dev, "Invalid %s CTx pin: %u\n",
+ fwnode_get_name(cycle_node), pins[i]);
+ return -EINVAL;
+ }
+
+ cycle_setup[1] |= BIT(pins[i] + 7);
+ }
+
+ return 0;
+}
+
+static int iqs7222_parse_chan(struct iqs7222_private *iqs7222, int chan_index)
+{
+ const struct iqs7222_dev_desc *dev_desc = iqs7222->dev_desc;
+ struct i2c_client *client = iqs7222->client;
+ struct fwnode_handle *chan_node = NULL;
+ int num_chan = dev_desc->reg_grps[IQS7222_REG_GRP_CHAN].num_row;
+ int ext_chan = rounddown(num_chan, 10);
+ int error, i;
+ u16 *chan_setup = iqs7222->chan_setup[chan_index];
+ u16 *sys_setup = iqs7222->sys_setup;
+ unsigned int val;
+
+ error = iqs7222_parse_props(iqs7222, &chan_node, chan_index,
+ IQS7222_REG_GRP_CHAN,
+ IQS7222_REG_KEY_NONE);
+ if (error)
+ return error;
+
+ if (!chan_node)
+ return 0;
+
+ if (dev_desc->allow_offset) {
+ sys_setup[dev_desc->allow_offset] |= BIT(chan_index);
+ if (fwnode_property_present(chan_node, "azoteq,ulp-allow"))
+ sys_setup[dev_desc->allow_offset] &= ~BIT(chan_index);
+ }
+
+ chan_setup[0] |= IQS7222_CHAN_SETUP_0_CHAN_EN;
+
+ /*
+ * The reference channel function allows for differential measurements
+ * and is only available in the case of IQS7222A or IQS7222C.
+ */
+ if (dev_desc->reg_grps[IQS7222_REG_GRP_CHAN].num_col > 4 &&
+ fwnode_property_present(chan_node, "azoteq,ref-select")) {
+ u16 *ref_setup;
+
+ error = fwnode_property_read_u32(chan_node, "azoteq,ref-select",
+ &val);
+ if (error) {
+ dev_err(&client->dev,
+ "Failed to read %s reference channel: %d\n",
+ fwnode_get_name(chan_node), error);
+ return error;
+ }
+
+ if (val >= ext_chan) {
+ dev_err(&client->dev,
+ "Invalid %s reference channel: %u\n",
+ fwnode_get_name(chan_node), val);
+ return -EINVAL;
+ }
+
+ ref_setup = iqs7222->chan_setup[val];
+
+ /*
+ * Configure the current channel as a follower of the selected
+ * reference channel.
+ */
+ chan_setup[0] |= IQS7222_CHAN_SETUP_0_REF_MODE_FOLLOW;
+ chan_setup[4] = val * 42 + 1048;
+
+ if (!fwnode_property_read_u32(chan_node, "azoteq,ref-weight",
+ &val)) {
+ if (val > U16_MAX) {
+ dev_err(&client->dev,
+ "Invalid %s reference weight: %u\n",
+ fwnode_get_name(chan_node), val);
+ return -EINVAL;
+ }
+
+ chan_setup[5] = val;
+ }
+
+ /*
+ * Configure the selected channel as a reference channel which
+ * serves the current channel.
+ */
+ ref_setup[0] |= IQS7222_CHAN_SETUP_0_REF_MODE_REF;
+ ref_setup[5] |= BIT(chan_index);
+
+ ref_setup[4] = dev_desc->touch_link;
+ if (fwnode_property_present(chan_node, "azoteq,use-prox"))
+ ref_setup[4] -= 2;
+ }
+
+ if (fwnode_property_present(chan_node, "azoteq,rx-enable")) {
+ /*
+ * Each channel can claim up to 4 CRx pins. The first half of
+ * the channels can use CRx0-3, while the second half can use
+ * CRx4-7.
+ */
+ unsigned int pins[4];
+ int count;
+
+ count = fwnode_property_count_u32(chan_node,
+ "azoteq,rx-enable");
+ if (count < 0) {
+ dev_err(&client->dev,
+ "Failed to count %s CRx pins: %d\n",
+ fwnode_get_name(chan_node), count);
+ return count;
+ } else if (count > ARRAY_SIZE(pins)) {
+ dev_err(&client->dev,
+ "Invalid number of %s CRx pins\n",
+ fwnode_get_name(chan_node));
+ return -EINVAL;
+ }
+
+ error = fwnode_property_read_u32_array(chan_node,
+ "azoteq,rx-enable",
+ pins, count);
+ if (error) {
+ dev_err(&client->dev,
+ "Failed to read %s CRx pins: %d\n",
+ fwnode_get_name(chan_node), error);
+ return error;
+ }
+
+ chan_setup[0] &= ~GENMASK(4 + ARRAY_SIZE(pins) - 1, 4);
+
+ for (i = 0; i < count; i++) {
+ int min_crx = chan_index < ext_chan / 2 ? 0 : 4;
+
+ if (pins[i] < min_crx || pins[i] > min_crx + 3) {
+ dev_err(&client->dev,
+ "Invalid %s CRx pin: %u\n",
+ fwnode_get_name(chan_node), pins[i]);
+ return -EINVAL;
+ }
+
+ chan_setup[0] |= BIT(pins[i] + 4 - min_crx);
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(iqs7222_kp_events); i++) {
+ const char *event_name = iqs7222_kp_events[i].name;
+ u16 event_enable = iqs7222_kp_events[i].enable;
+ struct fwnode_handle *event_node;
+
+ event_node = fwnode_get_named_child_node(chan_node, event_name);
+ if (!event_node)
+ continue;
+
+ error = iqs7222_parse_props(iqs7222, &event_node, chan_index,
+ IQS7222_REG_GRP_BTN,
+ iqs7222_kp_events[i].reg_key);
+ if (error)
+ return error;
+
+ error = iqs7222_gpio_select(iqs7222, event_node,
+ BIT(chan_index),
+ dev_desc->touch_link - (i ? 0 : 2));
+ if (error)
+ return error;
+
+ if (!fwnode_property_read_u32(event_node,
+ "azoteq,timeout-press-ms",
+ &val)) {
+ /*
+ * The IQS7222B employs a global pair of press timeout
+ * registers as opposed to channel-specific registers.
+ */
+ u16 *setup = dev_desc->reg_grps
+ [IQS7222_REG_GRP_BTN].num_col > 2 ?
+ &iqs7222->btn_setup[chan_index][2] :
+ &sys_setup[9];
+
+ if (val > U8_MAX * 500) {
+ dev_err(&client->dev,
+ "Invalid %s press timeout: %u\n",
+ fwnode_get_name(chan_node), val);
+ return -EINVAL;
+ }
+
+ *setup &= ~(U8_MAX << i * 8);
+ *setup |= (val / 500 << i * 8);
+ }
+
+ error = fwnode_property_read_u32(event_node, "linux,code",
+ &val);
+ if (error) {
+ dev_err(&client->dev, "Failed to read %s code: %d\n",
+ fwnode_get_name(chan_node), error);
+ return error;
+ }
+
+ iqs7222->kp_code[chan_index][i] = val;
+ iqs7222->kp_type[chan_index][i] = EV_KEY;
+
+ if (fwnode_property_present(event_node, "linux,input-type")) {
+ error = fwnode_property_read_u32(event_node,
+ "linux,input-type",
+ &val);
+ if (error) {
+ dev_err(&client->dev,
+ "Failed to read %s input type: %d\n",
+ fwnode_get_name(chan_node), error);
+ return error;
+ }
+
+ if (val != EV_KEY && val != EV_SW) {
+ dev_err(&client->dev,
+ "Invalid %s input type: %u\n",
+ fwnode_get_name(chan_node), val);
+ return -EINVAL;
+ }
+
+ iqs7222->kp_type[chan_index][i] = val;
+ }
+
+ /*
+ * Reference channels can opt out of event reporting by using
+ * KEY_RESERVED in place of a true key or switch code.
+ */
+ if (iqs7222->kp_type[chan_index][i] == EV_KEY &&
+ iqs7222->kp_code[chan_index][i] == KEY_RESERVED)
+ continue;
+
+ input_set_capability(iqs7222->keypad,
+ iqs7222->kp_type[chan_index][i],
+ iqs7222->kp_code[chan_index][i]);
+
+ if (!dev_desc->event_offset)
+ continue;
+
+ sys_setup[dev_desc->event_offset] |= event_enable;
+ }
+
+ /*
+ * The following call handles a special pair of properties that apply
+ * to a channel node, but reside within the button (event) group.
+ */
+ return iqs7222_parse_props(iqs7222, &chan_node, chan_index,
+ IQS7222_REG_GRP_BTN,
+ IQS7222_REG_KEY_DEBOUNCE);
+}
+
+static int iqs7222_parse_sldr(struct iqs7222_private *iqs7222, int sldr_index)
+{
+ const struct iqs7222_dev_desc *dev_desc = iqs7222->dev_desc;
+ struct i2c_client *client = iqs7222->client;
+ struct fwnode_handle *sldr_node = NULL;
+ int num_chan = dev_desc->reg_grps[IQS7222_REG_GRP_CHAN].num_row;
+ int ext_chan = rounddown(num_chan, 10);
+ int count, error, reg_offset, i;
+ u16 *sldr_setup = iqs7222->sldr_setup[sldr_index];
+ u16 *sys_setup = iqs7222->sys_setup;
+ unsigned int chan_sel[4], val;
+
+ error = iqs7222_parse_props(iqs7222, &sldr_node, sldr_index,
+ IQS7222_REG_GRP_SLDR,
+ IQS7222_REG_KEY_NONE);
+ if (error)
+ return error;
+
+ if (!sldr_node)
+ return 0;
+
+ /*
+ * Each slider can be spread across 3 to 4 channels. It is possible to
+ * select only 2 channels, but doing so prevents the slider from using
+ * the specified resolution.
+ */
+ count = fwnode_property_count_u32(sldr_node, "azoteq,channel-select");
+ if (count < 0) {
+ dev_err(&client->dev, "Failed to count %s channels: %d\n",
+ fwnode_get_name(sldr_node), count);
+ return count;
+ } else if (count < 3 || count > ARRAY_SIZE(chan_sel)) {
+ dev_err(&client->dev, "Invalid number of %s channels\n",
+ fwnode_get_name(sldr_node));
+ return -EINVAL;
+ }
+
+ error = fwnode_property_read_u32_array(sldr_node,
+ "azoteq,channel-select",
+ chan_sel, count);
+ if (error) {
+ dev_err(&client->dev, "Failed to read %s channels: %d\n",
+ fwnode_get_name(sldr_node), error);
+ return error;
+ }
+
+ /*
+ * Resolution and top speed, if small enough, are packed into a single
+ * register. Otherwise, each occupies its own register and the rest of
+ * the slider-related register addresses are offset by one.
+ */
+ reg_offset = dev_desc->sldr_res < U16_MAX ? 0 : 1;
+
+ sldr_setup[0] |= count;
+ sldr_setup[3 + reg_offset] &= ~IQS7222_SLDR_SETUP_3_CHAN_SEL_MASK;
+
+ for (i = 0; i < ARRAY_SIZE(chan_sel); i++) {
+ sldr_setup[5 + reg_offset + i] = 0;
+ if (i >= count)
+ continue;
+
+ if (chan_sel[i] >= ext_chan) {
+ dev_err(&client->dev, "Invalid %s channel: %u\n",
+ fwnode_get_name(sldr_node), chan_sel[i]);
+ return -EINVAL;
+ }
+
+ /*
+ * The following fields indicate which channels participate in
+ * the slider, as well as each channel's relative placement.
+ */
+ sldr_setup[3 + reg_offset] |= BIT(chan_sel[i]);
+ sldr_setup[5 + reg_offset + i] = chan_sel[i] * 42 + 1080;
+ }
+
+ sldr_setup[4 + reg_offset] = dev_desc->touch_link;
+ if (fwnode_property_present(sldr_node, "azoteq,use-prox"))
+ sldr_setup[4 + reg_offset] -= 2;
+
+ if (!fwnode_property_read_u32(sldr_node, "azoteq,slider-size", &val)) {
+ if (!val || val > dev_desc->sldr_res) {
+ dev_err(&client->dev, "Invalid %s size: %u\n",
+ fwnode_get_name(sldr_node), val);
+ return -EINVAL;
+ }
+
+ if (reg_offset) {
+ sldr_setup[3] = val;
+ } else {
+ sldr_setup[2] &= ~IQS7222_SLDR_SETUP_2_RES_MASK;
+ sldr_setup[2] |= (val / 16 <<
+ IQS7222_SLDR_SETUP_2_RES_SHIFT);
+ }
+ }
+
+ if (!fwnode_property_read_u32(sldr_node, "azoteq,top-speed", &val)) {
+ if (val > (reg_offset ? U16_MAX : U8_MAX * 4)) {
+ dev_err(&client->dev, "Invalid %s top speed: %u\n",
+ fwnode_get_name(sldr_node), val);
+ return -EINVAL;
+ }
+
+ if (reg_offset) {
+ sldr_setup[2] = val;
+ } else {
+ sldr_setup[2] &= ~IQS7222_SLDR_SETUP_2_TOP_SPEED_MASK;
+ sldr_setup[2] |= (val / 4);
+ }
+ }
+
+ if (!fwnode_property_read_u32(sldr_node, "linux,axis", &val)) {
+ u16 sldr_max = sldr_setup[3] - 1;
+
+ if (!reg_offset) {
+ sldr_max = sldr_setup[2];
+
+ sldr_max &= IQS7222_SLDR_SETUP_2_RES_MASK;
+ sldr_max >>= IQS7222_SLDR_SETUP_2_RES_SHIFT;
+
+ sldr_max = sldr_max * 16 - 1;
+ }
+
+ input_set_abs_params(iqs7222->keypad, val, 0, sldr_max, 0, 0);
+ iqs7222->sl_axis[sldr_index] = val;
+ }
+
+ if (dev_desc->wheel_enable) {
+ sldr_setup[0] &= ~dev_desc->wheel_enable;
+ if (iqs7222->sl_axis[sldr_index] == ABS_WHEEL)
+ sldr_setup[0] |= dev_desc->wheel_enable;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(iqs7222_sl_events); i++) {
+ const char *event_name = iqs7222_sl_events[i].name;
+ struct fwnode_handle *event_node;
+
+ /*
+ * The absence of a register offset means the remaining fields
+ * in the group represent gesture settings.
+ */
+ if (iqs7222_sl_events[i].enable && !reg_offset)
+ sldr_setup[9] &= ~iqs7222_sl_events[i].enable;
+
+ event_node = fwnode_get_named_child_node(sldr_node, event_name);
+ if (!event_node)
+ continue;
+
+ error = iqs7222_parse_props(iqs7222, &event_node, sldr_index,
+ IQS7222_REG_GRP_SLDR,
+ reg_offset ?
+ IQS7222_REG_KEY_RESERVED :
+ iqs7222_sl_events[i].reg_key);
+ if (error)
+ return error;
+
+ error = fwnode_property_read_u32(event_node, "linux,code",
+ &val);
+ if (error) {
+ dev_err(&client->dev, "Failed to read %s code: %d\n",
+ fwnode_get_name(sldr_node), error);
+ return error;
+ }
+
+ iqs7222->sl_code[sldr_index][i] = val;
+ input_set_capability(iqs7222->keypad, EV_KEY, val);
+
+ /*
+ * The press/release event is determined based on whether the
+ * coordinate field reports 0xFFFF and has no explicit enable
+ * control.
+ */
+ if (!iqs7222_sl_events[i].enable || reg_offset)
+ continue;
+
+ sldr_setup[9] |= iqs7222_sl_events[i].enable;
+
+ error = iqs7222_gpio_select(iqs7222, event_node,
+ iqs7222_sl_events[i].enable,
+ 1568 + sldr_index * 30);
+ if (error)
+ return error;
+
+ if (!dev_desc->event_offset)
+ continue;
+
+ sys_setup[dev_desc->event_offset] |= BIT(10 + sldr_index);
+ }
+
+ /*
+ * The following call handles a special pair of properties that shift
+ * to make room for a wheel enable control in the case of IQS7222C.
+ */
+ return iqs7222_parse_props(iqs7222, &sldr_node, sldr_index,
+ IQS7222_REG_GRP_SLDR,
+ dev_desc->wheel_enable ?
+ IQS7222_REG_KEY_WHEEL :
+ IQS7222_REG_KEY_NO_WHEEL);
+}
+
+static int iqs7222_parse_all(struct iqs7222_private *iqs7222)
+{
+ const struct iqs7222_dev_desc *dev_desc = iqs7222->dev_desc;
+ const struct iqs7222_reg_grp_desc *reg_grps = dev_desc->reg_grps;
+ u16 *sys_setup = iqs7222->sys_setup;
+ int error, i;
+
+ if (dev_desc->event_offset)
+ sys_setup[dev_desc->event_offset] = IQS7222_EVENT_MASK_ATI;
+
+ for (i = 0; i < reg_grps[IQS7222_REG_GRP_CYCLE].num_row; i++) {
+ error = iqs7222_parse_cycle(iqs7222, i);
+ if (error)
+ return error;
+ }
+
+ error = iqs7222_parse_props(iqs7222, NULL, 0, IQS7222_REG_GRP_GLBL,
+ IQS7222_REG_KEY_NONE);
+ if (error)
+ return error;
+
+ for (i = 0; i < reg_grps[IQS7222_REG_GRP_GPIO].num_row; i++) {
+ struct fwnode_handle *gpio_node = NULL;
+ u16 *gpio_setup = iqs7222->gpio_setup[i];
+ int j;
+
+ gpio_setup[0] &= ~IQS7222_GPIO_SETUP_0_GPIO_EN;
+ gpio_setup[1] = 0;
+ gpio_setup[2] = 0;
+
+ error = iqs7222_parse_props(iqs7222, &gpio_node, i,
+ IQS7222_REG_GRP_GPIO,
+ IQS7222_REG_KEY_NONE);
+ if (error)
+ return error;
+
+ if (reg_grps[IQS7222_REG_GRP_GPIO].num_row == 1)
+ continue;
+
+ /*
+ * The IQS7222C exposes multiple GPIO and must be informed
+ * as to which GPIO this group represents.
+ */
+ for (j = 0; j < ARRAY_SIZE(iqs7222_gpio_links); j++)
+ gpio_setup[0] &= ~BIT(iqs7222_gpio_links[j]);
+
+ gpio_setup[0] |= BIT(iqs7222_gpio_links[i]);
+ }
+
+ for (i = 0; i < reg_grps[IQS7222_REG_GRP_CHAN].num_row; i++) {
+ u16 *chan_setup = iqs7222->chan_setup[i];
+
+ chan_setup[0] &= ~IQS7222_CHAN_SETUP_0_REF_MODE_MASK;
+ chan_setup[0] &= ~IQS7222_CHAN_SETUP_0_CHAN_EN;
+
+ chan_setup[5] = 0;
+ }
+
+ for (i = 0; i < reg_grps[IQS7222_REG_GRP_CHAN].num_row; i++) {
+ error = iqs7222_parse_chan(iqs7222, i);
+ if (error)
+ return error;
+ }
+
+ error = iqs7222_parse_props(iqs7222, NULL, 0, IQS7222_REG_GRP_FILT,
+ IQS7222_REG_KEY_NONE);
+ if (error)
+ return error;
+
+ for (i = 0; i < reg_grps[IQS7222_REG_GRP_SLDR].num_row; i++) {
+ u16 *sldr_setup = iqs7222->sldr_setup[i];
+
+ sldr_setup[0] &= ~IQS7222_SLDR_SETUP_0_CHAN_CNT_MASK;
+
+ error = iqs7222_parse_sldr(iqs7222, i);
+ if (error)
+ return error;
+ }
+
+ sys_setup[0] &= ~IQS7222_SYS_SETUP_INTF_MODE_MASK;
+ sys_setup[0] &= ~IQS7222_SYS_SETUP_PWR_MODE_MASK;
+
+ sys_setup[0] |= IQS7222_SYS_SETUP_ACK_RESET;
+
+ return iqs7222_parse_props(iqs7222, NULL, 0, IQS7222_REG_GRP_SYS,
+ IQS7222_REG_KEY_NONE);
+}
+
+static int iqs7222_report(struct iqs7222_private *iqs7222)
+{
+ const struct iqs7222_dev_desc *dev_desc = iqs7222->dev_desc;
+ struct i2c_client *client = iqs7222->client;
+ int num_chan = dev_desc->reg_grps[IQS7222_REG_GRP_CHAN].num_row;
+ int num_stat = dev_desc->reg_grps[IQS7222_REG_GRP_STAT].num_col;
+ int error, i, j;
+ __le16 status[IQS7222_MAX_COLS_STAT];
+
+ error = iqs7222_read_burst(iqs7222, IQS7222_SYS_STATUS, status,
+ num_stat);
+ if (error)
+ return error;
+
+ if (le16_to_cpu(status[0]) & IQS7222_SYS_STATUS_RESET) {
+ dev_err(&client->dev, "Unexpected device reset\n");
+ return iqs7222_dev_init(iqs7222, WRITE);
+ }
+
+ if (le16_to_cpu(status[0]) & IQS7222_SYS_STATUS_ATI_ERROR) {
+ dev_err(&client->dev, "Unexpected ATI error\n");
+ return iqs7222_ati_trigger(iqs7222);
+ }
+
+ if (le16_to_cpu(status[0]) & IQS7222_SYS_STATUS_ATI_ACTIVE)
+ return 0;
+
+ for (i = 0; i < num_chan; i++) {
+ u16 *chan_setup = iqs7222->chan_setup[i];
+
+ if (!(chan_setup[0] & IQS7222_CHAN_SETUP_0_CHAN_EN))
+ continue;
+
+ for (j = 0; j < ARRAY_SIZE(iqs7222_kp_events); j++) {
+ /*
+ * Proximity state begins at offset 2 and spills into
+ * offset 3 for devices with more than 16 channels.
+ *
+ * Touch state begins at the first offset immediately
+ * following proximity state.
+ */
+ int k = 2 + j * (num_chan > 16 ? 2 : 1);
+ u16 state = le16_to_cpu(status[k + i / 16]);
+
+ input_event(iqs7222->keypad,
+ iqs7222->kp_type[i][j],
+ iqs7222->kp_code[i][j],
+ !!(state & BIT(i % 16)));
+ }
+ }
+
+ for (i = 0; i < dev_desc->reg_grps[IQS7222_REG_GRP_SLDR].num_row; i++) {
+ u16 *sldr_setup = iqs7222->sldr_setup[i];
+ u16 sldr_pos = le16_to_cpu(status[4 + i]);
+ u16 state = le16_to_cpu(status[6 + i]);
+
+ if (!(sldr_setup[0] & IQS7222_SLDR_SETUP_0_CHAN_CNT_MASK))
+ continue;
+
+ if (sldr_pos < dev_desc->sldr_res)
+ input_report_abs(iqs7222->keypad, iqs7222->sl_axis[i],
+ sldr_pos);
+
+ for (j = 0; j < ARRAY_SIZE(iqs7222_sl_events); j++) {
+ u16 mask = iqs7222_sl_events[j].mask;
+ u16 val = iqs7222_sl_events[j].val;
+
+ if (!iqs7222_sl_events[j].enable) {
+ input_report_key(iqs7222->keypad,
+ iqs7222->sl_code[i][j],
+ sldr_pos < dev_desc->sldr_res);
+ continue;
+ }
+
+ /*
+ * The remaining offsets represent gesture state, and
+ * are discarded in the case of IQS7222C because only
+ * absolute position is reported.
+ */
+ if (num_stat < IQS7222_MAX_COLS_STAT)
+ continue;
+
+ input_report_key(iqs7222->keypad,
+ iqs7222->sl_code[i][j],
+ (state & mask) == val);
+ }
+ }
+
+ input_sync(iqs7222->keypad);
+
+ return 0;
+}
+
+static irqreturn_t iqs7222_irq(int irq, void *context)
+{
+ struct iqs7222_private *iqs7222 = context;
+
+ return iqs7222_report(iqs7222) ? IRQ_NONE : IRQ_HANDLED;
+}
+
+static int iqs7222_probe(struct i2c_client *client)
+{
+ struct iqs7222_private *iqs7222;
+ unsigned long irq_flags;
+ int error, irq;
+
+ iqs7222 = devm_kzalloc(&client->dev, sizeof(*iqs7222), GFP_KERNEL);
+ if (!iqs7222)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, iqs7222);
+ iqs7222->client = client;
+
+ iqs7222->keypad = devm_input_allocate_device(&client->dev);
+ if (!iqs7222->keypad)
+ return -ENOMEM;
+
+ iqs7222->keypad->name = client->name;
+ iqs7222->keypad->id.bustype = BUS_I2C;
+
+ /*
+ * The RDY pin behaves as an interrupt, but must also be polled ahead
+ * of unsolicited I2C communication. As such, it is first opened as a
+ * GPIO and then passed to gpiod_to_irq() to register the interrupt.
+ */
+ iqs7222->irq_gpio = devm_gpiod_get(&client->dev, "irq", GPIOD_IN);
+ if (IS_ERR(iqs7222->irq_gpio)) {
+ error = PTR_ERR(iqs7222->irq_gpio);
+ dev_err(&client->dev, "Failed to request IRQ GPIO: %d\n",
+ error);
+ return error;
+ }
+
+ iqs7222->reset_gpio = devm_gpiod_get_optional(&client->dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(iqs7222->reset_gpio)) {
+ error = PTR_ERR(iqs7222->reset_gpio);
+ dev_err(&client->dev, "Failed to request reset GPIO: %d\n",
+ error);
+ return error;
+ }
+
+ error = iqs7222_hard_reset(iqs7222);
+ if (error)
+ return error;
+
+ error = iqs7222_dev_info(iqs7222);
+ if (error)
+ return error;
+
+ error = iqs7222_dev_init(iqs7222, READ);
+ if (error)
+ return error;
+
+ error = iqs7222_parse_all(iqs7222);
+ if (error)
+ return error;
+
+ error = iqs7222_dev_init(iqs7222, WRITE);
+ if (error)
+ return error;
+
+ error = iqs7222_report(iqs7222);
+ if (error)
+ return error;
+
+ error = input_register_device(iqs7222->keypad);
+ if (error) {
+ dev_err(&client->dev, "Failed to register device: %d\n", error);
+ return error;
+ }
+
+ irq = gpiod_to_irq(iqs7222->irq_gpio);
+ if (irq < 0)
+ return irq;
+
+ irq_flags = gpiod_is_active_low(iqs7222->irq_gpio) ? IRQF_TRIGGER_LOW
+ : IRQF_TRIGGER_HIGH;
+ irq_flags |= IRQF_ONESHOT;
+
+ error = devm_request_threaded_irq(&client->dev, irq, NULL, iqs7222_irq,
+ irq_flags, client->name, iqs7222);
+ if (error)
+ dev_err(&client->dev, "Failed to request IRQ: %d\n", error);
+
+ return error;
+}
+
+static const struct of_device_id iqs7222_of_match[] = {
+ { .compatible = "azoteq,iqs7222a" },
+ { .compatible = "azoteq,iqs7222b" },
+ { .compatible = "azoteq,iqs7222c" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, iqs7222_of_match);
+
+static struct i2c_driver iqs7222_i2c_driver = {
+ .driver = {
+ .name = "iqs7222",
+ .of_match_table = iqs7222_of_match,
+ },
+ .probe_new = iqs7222_probe,
+};
+module_i2c_driver(iqs7222_i2c_driver);
+
+MODULE_AUTHOR("Jeff LaBundy <jeff@labundy.com>");
+MODULE_DESCRIPTION("Azoteq IQS7222A/B/C Capacitive Touch Controller");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/pm8941-pwrkey.c b/drivers/input/misc/pm8941-pwrkey.c
index 89af52498c96..549df01b6ee3 100644
--- a/drivers/input/misc/pm8941-pwrkey.c
+++ b/drivers/input/misc/pm8941-pwrkey.c
@@ -9,9 +9,11 @@
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
+#include <linux/ktime.h>
#include <linux/log2.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/reboot.h>
@@ -19,6 +21,16 @@
#define PON_REV2 0x01
+#define PON_SUBTYPE 0x05
+
+#define PON_SUBTYPE_PRIMARY 0x01
+#define PON_SUBTYPE_SECONDARY 0x02
+#define PON_SUBTYPE_1REG 0x03
+#define PON_SUBTYPE_GEN2_PRIMARY 0x04
+#define PON_SUBTYPE_GEN2_SECONDARY 0x05
+#define PON_SUBTYPE_GEN3_PBS 0x08
+#define PON_SUBTYPE_GEN3_HLOS 0x09
+
#define PON_RT_STS 0x10
#define PON_KPDPWR_N_SET BIT(0)
#define PON_RESIN_N_SET BIT(1)
@@ -45,6 +57,7 @@ struct pm8941_data {
unsigned int status_bit;
bool supports_ps_hold_poff_config;
bool supports_debounce_config;
+ bool has_pon_pbs;
const char *name;
const char *phys;
};
@@ -53,13 +66,18 @@ struct pm8941_pwrkey {
struct device *dev;
int irq;
u32 baseaddr;
+ u32 pon_pbs_baseaddr;
struct regmap *regmap;
struct input_dev *input;
unsigned int revision;
+ unsigned int subtype;
struct notifier_block reboot_notifier;
u32 code;
+ u32 sw_debounce_time_us;
+ ktime_t sw_debounce_end_time;
+ bool last_status;
const struct pm8941_data *data;
};
@@ -129,20 +147,76 @@ static irqreturn_t pm8941_pwrkey_irq(int irq, void *_data)
{
struct pm8941_pwrkey *pwrkey = _data;
unsigned int sts;
- int error;
+ int err;
+
+ if (pwrkey->sw_debounce_time_us) {
+ if (ktime_before(ktime_get(), pwrkey->sw_debounce_end_time)) {
+ dev_dbg(pwrkey->dev,
+ "ignoring key event received before debounce end %llu us\n",
+ pwrkey->sw_debounce_end_time);
+ return IRQ_HANDLED;
+ }
+ }
- error = regmap_read(pwrkey->regmap,
- pwrkey->baseaddr + PON_RT_STS, &sts);
- if (error)
+ err = regmap_read(pwrkey->regmap, pwrkey->baseaddr + PON_RT_STS, &sts);
+ if (err)
return IRQ_HANDLED;
- input_report_key(pwrkey->input, pwrkey->code,
- sts & pwrkey->data->status_bit);
+ sts &= pwrkey->data->status_bit;
+
+ if (pwrkey->sw_debounce_time_us && !sts)
+ pwrkey->sw_debounce_end_time = ktime_add_us(ktime_get(),
+ pwrkey->sw_debounce_time_us);
+
+ /*
+ * Simulate a press event in case a release event occurred without a
+ * corresponding press event.
+ */
+ if (!pwrkey->last_status && !sts) {
+ input_report_key(pwrkey->input, pwrkey->code, 1);
+ input_sync(pwrkey->input);
+ }
+ pwrkey->last_status = sts;
+
+ input_report_key(pwrkey->input, pwrkey->code, sts);
input_sync(pwrkey->input);
return IRQ_HANDLED;
}
+static int pm8941_pwrkey_sw_debounce_init(struct pm8941_pwrkey *pwrkey)
+{
+ unsigned int val, addr, mask;
+ int error;
+
+ if (pwrkey->data->has_pon_pbs && !pwrkey->pon_pbs_baseaddr) {
+ dev_err(pwrkey->dev,
+ "PON_PBS address missing, can't read HW debounce time\n");
+ return 0;
+ }
+
+ if (pwrkey->pon_pbs_baseaddr)
+ addr = pwrkey->pon_pbs_baseaddr + PON_DBC_CTL;
+ else
+ addr = pwrkey->baseaddr + PON_DBC_CTL;
+ error = regmap_read(pwrkey->regmap, addr, &val);
+ if (error)
+ return error;
+
+ if (pwrkey->subtype >= PON_SUBTYPE_GEN2_PRIMARY)
+ mask = 0xf;
+ else
+ mask = 0x7;
+
+ pwrkey->sw_debounce_time_us =
+ 2 * USEC_PER_SEC / (1 << (mask - (val & mask)));
+
+ dev_dbg(pwrkey->dev, "SW debounce time = %u us\n",
+ pwrkey->sw_debounce_time_us);
+
+ return 0;
+}
+
static int __maybe_unused pm8941_pwrkey_suspend(struct device *dev)
{
struct pm8941_pwrkey *pwrkey = dev_get_drvdata(dev);
@@ -171,6 +245,8 @@ static int pm8941_pwrkey_probe(struct platform_device *pdev)
struct pm8941_pwrkey *pwrkey;
bool pull_up;
struct device *parent;
+ struct device_node *regmap_node;
+ const __be32 *addr;
u32 req_delay;
int error;
@@ -192,8 +268,10 @@ static int pm8941_pwrkey_probe(struct platform_device *pdev)
pwrkey->data = of_device_get_match_data(&pdev->dev);
parent = pdev->dev.parent;
+ regmap_node = pdev->dev.of_node;
pwrkey->regmap = dev_get_regmap(parent, NULL);
if (!pwrkey->regmap) {
+ regmap_node = parent->of_node;
/*
* We failed to get regmap for parent. Let's see if we are
* a child of pon node and read regmap and reg from its
@@ -204,15 +282,21 @@ static int pm8941_pwrkey_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "failed to locate regmap\n");
return -ENODEV;
}
+ }
- error = of_property_read_u32(parent->of_node,
- "reg", &pwrkey->baseaddr);
- } else {
- error = of_property_read_u32(pdev->dev.of_node, "reg",
- &pwrkey->baseaddr);
+ addr = of_get_address(regmap_node, 0, NULL, NULL);
+ if (!addr) {
+ dev_err(&pdev->dev, "reg property missing\n");
+ return -EINVAL;
+ }
+ pwrkey->baseaddr = be32_to_cpup(addr);
+
+ if (pwrkey->data->has_pon_pbs) {
+ /* PON_PBS base address is optional */
+ addr = of_get_address(regmap_node, 1, NULL, NULL);
+ if (addr)
+ pwrkey->pon_pbs_baseaddr = be32_to_cpup(addr);
}
- if (error)
- return error;
pwrkey->irq = platform_get_irq(pdev, 0);
if (pwrkey->irq < 0)
@@ -221,7 +305,14 @@ static int pm8941_pwrkey_probe(struct platform_device *pdev)
error = regmap_read(pwrkey->regmap, pwrkey->baseaddr + PON_REV2,
&pwrkey->revision);
if (error) {
- dev_err(&pdev->dev, "failed to set debounce: %d\n", error);
+ dev_err(&pdev->dev, "failed to read revision: %d\n", error);
+ return error;
+ }
+
+ error = regmap_read(pwrkey->regmap, pwrkey->baseaddr + PON_SUBTYPE,
+ &pwrkey->subtype);
+ if (error) {
+ dev_err(&pdev->dev, "failed to read subtype: %d\n", error);
return error;
}
@@ -259,6 +350,10 @@ static int pm8941_pwrkey_probe(struct platform_device *pdev)
}
}
+ error = pm8941_pwrkey_sw_debounce_init(pwrkey);
+ if (error)
+ return error;
+
if (pwrkey->data->pull_up_bit) {
error = regmap_update_bits(pwrkey->regmap,
pwrkey->baseaddr + PON_PULL_CTL,
@@ -320,6 +415,7 @@ static const struct pm8941_data pwrkey_data = {
.phys = "pm8941_pwrkey/input0",
.supports_ps_hold_poff_config = true,
.supports_debounce_config = true,
+ .has_pon_pbs = false,
};
static const struct pm8941_data resin_data = {
@@ -329,6 +425,7 @@ static const struct pm8941_data resin_data = {
.phys = "pm8941_resin/input0",
.supports_ps_hold_poff_config = true,
.supports_debounce_config = true,
+ .has_pon_pbs = false,
};
static const struct pm8941_data pon_gen3_pwrkey_data = {
@@ -337,6 +434,7 @@ static const struct pm8941_data pon_gen3_pwrkey_data = {
.phys = "pmic_pwrkey/input0",
.supports_ps_hold_poff_config = false,
.supports_debounce_config = false,
+ .has_pon_pbs = true,
};
static const struct pm8941_data pon_gen3_resin_data = {
@@ -345,6 +443,7 @@ static const struct pm8941_data pon_gen3_resin_data = {
.phys = "pmic_resin/input0",
.supports_ps_hold_poff_config = false,
.supports_debounce_config = false,
+ .has_pon_pbs = true,
};
static const struct of_device_id pm8941_pwr_key_id_table[] = {
diff --git a/drivers/input/misc/powermate.c b/drivers/input/misc/powermate.c
index c4e0e1886061..c1c733a9cb89 100644
--- a/drivers/input/misc/powermate.c
+++ b/drivers/input/misc/powermate.c
@@ -374,7 +374,7 @@ static int powermate_probe(struct usb_interface *intf, const struct usb_device_i
/* get a handle to the interrupt data pipe */
pipe = usb_rcvintpipe(udev, endpoint->bEndpointAddress);
- maxp = usb_maxpacket(udev, pipe, usb_pipeout(pipe));
+ maxp = usb_maxpacket(udev, pipe);
if (maxp < POWERMATE_PAYLOAD_SIZE_MIN || maxp > POWERMATE_PAYLOAD_SIZE_MAX) {
printk(KERN_WARNING "powermate: Expected payload of %d--%d bytes, found %d bytes!\n",
diff --git a/drivers/input/misc/sparcspkr.c b/drivers/input/misc/sparcspkr.c
index fe43e5557ed7..cdcb7737c46a 100644
--- a/drivers/input/misc/sparcspkr.c
+++ b/drivers/input/misc/sparcspkr.c
@@ -205,6 +205,7 @@ static int bbc_beep_probe(struct platform_device *op)
info = &state->u.bbc;
info->clock_freq = of_getintprop_default(dp, "clock-frequency", 0);
+ of_node_put(dp);
if (!info->clock_freq)
goto out_free;
diff --git a/drivers/input/misc/xen-kbdfront.c b/drivers/input/misc/xen-kbdfront.c
index 1fc9b3e7007f..8d8ebdc2039b 100644
--- a/drivers/input/misc/xen-kbdfront.c
+++ b/drivers/input/misc/xen-kbdfront.c
@@ -481,7 +481,7 @@ static int xenkbd_connect_backend(struct xenbus_device *dev,
error_evtchan:
xenbus_free_evtchn(dev, evtchn);
error_grant:
- gnttab_end_foreign_access(info->gref, 0UL);
+ gnttab_end_foreign_access(info->gref, NULL);
info->gref = -1;
return ret;
}
@@ -492,7 +492,7 @@ static void xenkbd_disconnect_backend(struct xenkbd_info *info)
unbind_from_irqhandler(info->irq, info);
info->irq = -1;
if (info->gref >= 0)
- gnttab_end_foreign_access(info->gref, 0UL);
+ gnttab_end_foreign_access(info->gref, NULL);
info->gref = -1;
}
diff --git a/drivers/input/misc/yealink.c b/drivers/input/misc/yealink.c
index 8ab01c7601b1..69420781db30 100644
--- a/drivers/input/misc/yealink.c
+++ b/drivers/input/misc/yealink.c
@@ -905,7 +905,7 @@ static int usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
/* get a handle to the interrupt data pipe */
pipe = usb_rcvintpipe(udev, endpoint->bEndpointAddress);
- ret = usb_maxpacket(udev, pipe, usb_pipeout(pipe));
+ ret = usb_maxpacket(udev, pipe);
if (ret != USB_PKT_LEN)
dev_err(&intf->dev, "invalid payload size %d, expected %zd\n",
ret, USB_PKT_LEN);
diff --git a/drivers/input/mouse/cypress_ps2.c b/drivers/input/mouse/cypress_ps2.c
index 5f868009d35b..d272f1ec27ba 100644
--- a/drivers/input/mouse/cypress_ps2.c
+++ b/drivers/input/mouse/cypress_ps2.c
@@ -696,7 +696,7 @@ int cypress_init(struct psmouse *psmouse)
err_exit:
/*
* Reset Cypress Trackpad as a standard mouse. Then
- * let psmouse driver commmunicating with it as default PS2 mouse.
+ * let psmouse driver communicating with it as default PS2 mouse.
*/
cypress_reset(psmouse);
diff --git a/drivers/input/mouse/psmouse-smbus.c b/drivers/input/mouse/psmouse-smbus.c
index 164f6c757f6b..2a2459b1b4f2 100644
--- a/drivers/input/mouse/psmouse-smbus.c
+++ b/drivers/input/mouse/psmouse-smbus.c
@@ -26,6 +26,8 @@ struct psmouse_smbus_dev {
static LIST_HEAD(psmouse_smbus_list);
static DEFINE_MUTEX(psmouse_smbus_mutex);
+static struct workqueue_struct *psmouse_smbus_wq;
+
static void psmouse_smbus_check_adapter(struct i2c_adapter *adapter)
{
struct psmouse_smbus_dev *smbdev;
@@ -161,7 +163,7 @@ static void psmouse_smbus_schedule_remove(struct i2c_client *client)
INIT_WORK(&rwork->work, psmouse_smbus_remove_i2c_device);
rwork->client = client;
- schedule_work(&rwork->work);
+ queue_work(psmouse_smbus_wq, &rwork->work);
}
}
@@ -305,9 +307,14 @@ int __init psmouse_smbus_module_init(void)
{
int error;
+ psmouse_smbus_wq = alloc_workqueue("psmouse-smbus", 0, 0);
+ if (!psmouse_smbus_wq)
+ return -ENOMEM;
+
error = bus_register_notifier(&i2c_bus_type, &psmouse_smbus_notifier);
if (error) {
pr_err("failed to register i2c bus notifier: %d\n", error);
+ destroy_workqueue(psmouse_smbus_wq);
return error;
}
@@ -317,5 +324,5 @@ int __init psmouse_smbus_module_init(void)
void psmouse_smbus_module_exit(void)
{
bus_unregister_notifier(&i2c_bus_type, &psmouse_smbus_notifier);
- flush_scheduled_work();
+ destroy_workqueue(psmouse_smbus_wq);
}
diff --git a/drivers/input/mouse/pxa930_trkball.c b/drivers/input/mouse/pxa930_trkball.c
index 3332b77eef2a..f04ba12dbfa8 100644
--- a/drivers/input/mouse/pxa930_trkball.c
+++ b/drivers/input/mouse/pxa930_trkball.c
@@ -15,7 +15,6 @@
#include <linux/io.h>
#include <linux/slab.h>
-#include <mach/hardware.h>
#include <linux/platform_data/mouse-pxa930_trkball.h>
/* Trackball Controller Register Definitions */
diff --git a/drivers/input/mouse/vmmouse.c b/drivers/input/mouse/vmmouse.c
index 42443ffba7c4..ea9eff7c8099 100644
--- a/drivers/input/mouse/vmmouse.c
+++ b/drivers/input/mouse/vmmouse.c
@@ -366,6 +366,19 @@ int vmmouse_detect(struct psmouse *psmouse, bool set_properties)
}
/**
+ * vmmouse_reset - Disable vmmouse and reset
+ *
+ * @psmouse: Pointer to the psmouse struct
+ *
+ * Tries to disable vmmouse mode before enter suspend.
+ */
+static void vmmouse_reset(struct psmouse *psmouse)
+{
+ vmmouse_disable(psmouse);
+ psmouse_reset(psmouse);
+}
+
+/**
* vmmouse_disconnect - Take down vmmouse driver
*
* @psmouse: Pointer to the psmouse struct
@@ -472,6 +485,7 @@ int vmmouse_init(struct psmouse *psmouse)
psmouse->protocol_handler = vmmouse_process_byte;
psmouse->disconnect = vmmouse_disconnect;
psmouse->reconnect = vmmouse_reconnect;
+ psmouse->cleanup = vmmouse_reset;
return 0;
diff --git a/drivers/input/rmi4/rmi_f54.c b/drivers/input/rmi4/rmi_f54.c
index 93b328c796c6..c5ce907535ef 100644
--- a/drivers/input/rmi4/rmi_f54.c
+++ b/drivers/input/rmi4/rmi_f54.c
@@ -733,7 +733,6 @@ remove_v4l2:
v4l2_device_unregister(&f54->v4l2);
remove_wq:
cancel_delayed_work_sync(&f54->work);
- flush_workqueue(f54->workqueue);
destroy_workqueue(f54->workqueue);
return ret;
}
diff --git a/drivers/input/tablet/acecad.c b/drivers/input/tablet/acecad.c
index a38d1fe97334..56c7e471ac32 100644
--- a/drivers/input/tablet/acecad.c
+++ b/drivers/input/tablet/acecad.c
@@ -130,7 +130,7 @@ static int usb_acecad_probe(struct usb_interface *intf, const struct usb_device_
return -ENODEV;
pipe = usb_rcvintpipe(dev, endpoint->bEndpointAddress);
- maxp = usb_maxpacket(dev, pipe, usb_pipeout(pipe));
+ maxp = usb_maxpacket(dev, pipe);
acecad = kzalloc(sizeof(struct usb_acecad), GFP_KERNEL);
input_dev = input_allocate_device();
diff --git a/drivers/input/tablet/aiptek.c b/drivers/input/tablet/aiptek.c
index 1581f6ef0927..24ec4844a5c3 100644
--- a/drivers/input/tablet/aiptek.c
+++ b/drivers/input/tablet/aiptek.c
@@ -931,8 +931,7 @@ aiptek_query(struct aiptek *aiptek, unsigned char command, unsigned char data)
}
msleep(aiptek->curSetting.programmableDelay);
- if ((ret =
- aiptek_get_report(aiptek, 3, 2, buf, sizeof_buf)) != sizeof_buf) {
+ if (aiptek_get_report(aiptek, 3, 2, buf, sizeof_buf) != sizeof_buf) {
dev_dbg(&aiptek->intf->dev,
"aiptek_query failed: returned 0x%02x 0x%02x 0x%02x\n",
buf[0], buf[1], buf[2]);
diff --git a/drivers/input/tablet/pegasus_notetaker.c b/drivers/input/tablet/pegasus_notetaker.c
index 749edbdb7ffa..c608ac505d1b 100644
--- a/drivers/input/tablet/pegasus_notetaker.c
+++ b/drivers/input/tablet/pegasus_notetaker.c
@@ -296,7 +296,7 @@ static int pegasus_probe(struct usb_interface *intf,
pegasus->intf = intf;
pipe = usb_rcvintpipe(dev, endpoint->bEndpointAddress);
- pegasus->data_len = usb_maxpacket(dev, pipe, usb_pipeout(pipe));
+ pegasus->data_len = usb_maxpacket(dev, pipe);
pegasus->data = usb_alloc_coherent(dev, pegasus->data_len, GFP_KERNEL,
&pegasus->data_dma);
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 43c7d6e5bdc0..2d70c945b20a 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -902,6 +902,7 @@ config TOUCHSCREEN_WM9713
config TOUCHSCREEN_WM97XX_MAINSTONE
tristate "WM97xx Mainstone/Palm accelerated touch"
depends on TOUCHSCREEN_WM97XX && ARCH_PXA
+ depends on SND_PXA2XX_LIB_AC97
help
Say Y here for support for streaming mode with WM97xx touchscreens
on Mainstone, Palm Tungsten T5, TX and LifeDrive systems.
@@ -914,6 +915,7 @@ config TOUCHSCREEN_WM97XX_MAINSTONE
config TOUCHSCREEN_WM97XX_ZYLONITE
tristate "Zylonite accelerated touch"
depends on TOUCHSCREEN_WM97XX && MACH_ZYLONITE
+ depends on SND_PXA2XX_LIB_AC97
select TOUCHSCREEN_WM9713
help
Say Y here for support for streaming mode with the touchscreen
diff --git a/drivers/input/touchscreen/mainstone-wm97xx.c b/drivers/input/touchscreen/mainstone-wm97xx.c
index f8564b398eb3..c39f49720fe4 100644
--- a/drivers/input/touchscreen/mainstone-wm97xx.c
+++ b/drivers/input/touchscreen/mainstone-wm97xx.c
@@ -21,13 +21,14 @@
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
-#include <linux/wm97xx.h>
#include <linux/io.h>
-#include <linux/gpio.h>
+#include <linux/soc/pxa/cpu.h>
+#include <linux/wm97xx.h>
-#include <mach/regs-ac97.h>
+#include <sound/pxa2xx-lib.h>
#include <asm/mach-types.h>
@@ -41,24 +42,23 @@ struct continuous {
#define WM_READS(sp) ((sp / HZ) + 1)
static const struct continuous cinfo[] = {
- {WM9705_ID2, 0, WM_READS(94), 94},
- {WM9705_ID2, 1, WM_READS(188), 188},
- {WM9705_ID2, 2, WM_READS(375), 375},
- {WM9705_ID2, 3, WM_READS(750), 750},
- {WM9712_ID2, 0, WM_READS(94), 94},
- {WM9712_ID2, 1, WM_READS(188), 188},
- {WM9712_ID2, 2, WM_READS(375), 375},
- {WM9712_ID2, 3, WM_READS(750), 750},
- {WM9713_ID2, 0, WM_READS(94), 94},
- {WM9713_ID2, 1, WM_READS(120), 120},
- {WM9713_ID2, 2, WM_READS(154), 154},
- {WM9713_ID2, 3, WM_READS(188), 188},
+ { WM9705_ID2, 0, WM_READS(94), 94 },
+ { WM9705_ID2, 1, WM_READS(188), 188 },
+ { WM9705_ID2, 2, WM_READS(375), 375 },
+ { WM9705_ID2, 3, WM_READS(750), 750 },
+ { WM9712_ID2, 0, WM_READS(94), 94 },
+ { WM9712_ID2, 1, WM_READS(188), 188 },
+ { WM9712_ID2, 2, WM_READS(375), 375 },
+ { WM9712_ID2, 3, WM_READS(750), 750 },
+ { WM9713_ID2, 0, WM_READS(94), 94 },
+ { WM9713_ID2, 1, WM_READS(120), 120 },
+ { WM9713_ID2, 2, WM_READS(154), 154 },
+ { WM9713_ID2, 3, WM_READS(188), 188 },
};
/* continuous speed index */
static int sp_idx;
-static u16 last, tries;
-static int irq;
+static struct gpio_desc *gpiod_irq;
/*
* Pen sampling frequency (Hz) in continuous mode.
@@ -97,44 +97,40 @@ MODULE_PARM_DESC(ac97_touch_slot, "Touch screen data slot AC97 number");
/* flush AC97 slot 5 FIFO on pxa machines */
-#ifdef CONFIG_PXA27x
-static void wm97xx_acc_pen_up(struct wm97xx *wm)
-{
- schedule_timeout_uninterruptible(1);
-
- while (MISR & (1 << 2))
- MODR;
-}
-#else
static void wm97xx_acc_pen_up(struct wm97xx *wm)
{
unsigned int count;
- schedule_timeout_uninterruptible(1);
+ msleep(1);
- for (count = 0; count < 16; count++)
- MODR;
+ if (cpu_is_pxa27x()) {
+ while (pxa2xx_ac97_read_misr() & (1 << 2))
+ pxa2xx_ac97_read_modr();
+ } else if (cpu_is_pxa3xx()) {
+ for (count = 0; count < 16; count++)
+ pxa2xx_ac97_read_modr();
+ }
}
-#endif
static int wm97xx_acc_pen_down(struct wm97xx *wm)
{
u16 x, y, p = 0x100 | WM97XX_ADCSEL_PRES;
int reads = 0;
+ static u16 last, tries;
/* When the AC97 queue has been drained we need to allow time
* to buffer up samples otherwise we end up spinning polling
* for samples. The controller can't have a suitably low
* threshold set to use the notifications it gives.
*/
- schedule_timeout_uninterruptible(1);
+ msleep(1);
if (tries > 5) {
tries = 0;
return RC_PENUP;
}
- x = MODR;
+ x = pxa2xx_ac97_read_modr();
if (x == last) {
tries++;
return RC_AGAIN;
@@ -142,10 +138,10 @@ static int wm97xx_acc_pen_down(struct wm97xx *wm)
last = x;
do {
if (reads)
- x = MODR;
- y = MODR;
+ x = pxa2xx_ac97_read_modr();
+ y = pxa2xx_ac97_read_modr();
if (pressure)
- p = MODR;
+ p = pxa2xx_ac97_read_modr();
dev_dbg(wm->dev, "Raw coordinates: x=%x, y=%x, p=%x\n",
x, y, p);
@@ -194,28 +190,23 @@ static int wm97xx_acc_startup(struct wm97xx *wm)
/* IRQ driven touchscreen is used on Palm hardware */
if (machine_is_palmt5() || machine_is_palmtx() || machine_is_palmld()) {
pen_int = 1;
- irq = 27;
/* There is some obscure mutant of WM9712 interbred with WM9713
* used on Palm HW */
wm->variant = WM97xx_WM1613;
- } else if (machine_is_mainstone() && pen_int)
- irq = 4;
-
- if (irq) {
- ret = gpio_request(irq, "Touchscreen IRQ");
- if (ret)
- goto out;
-
- ret = gpio_direction_input(irq);
- if (ret) {
- gpio_free(irq);
- goto out;
- }
+ } else if (machine_is_zylonite()) {
+ pen_int = 1;
+ }
- wm->pen_irq = gpio_to_irq(irq);
+ if (pen_int) {
+ gpiod_irq = gpiod_get(wm->dev, "touch", GPIOD_IN);
+ if (IS_ERR(gpiod_irq))
+ pen_int = 0;
+ }
+
+ if (pen_int) {
+ wm->pen_irq = gpiod_to_irq(gpiod_irq);
irq_set_irq_type(wm->pen_irq, IRQ_TYPE_EDGE_BOTH);
- } else /* pen irq not supported */
- pen_int = 0;
+ }
/* codec specific irq config */
if (pen_int) {
@@ -242,7 +233,6 @@ static int wm97xx_acc_startup(struct wm97xx *wm)
}
}
-out:
return ret;
}
@@ -250,28 +240,19 @@ static void wm97xx_acc_shutdown(struct wm97xx *wm)
{
/* codec specific deconfig */
if (pen_int) {
- if (irq)
- gpio_free(irq);
+ if (gpiod_irq)
+ gpiod_put(gpiod_irq);
wm->pen_irq = 0;
}
}
-static void wm97xx_irq_enable(struct wm97xx *wm, int enable)
-{
- if (enable)
- enable_irq(wm->pen_irq);
- else
- disable_irq_nosync(wm->pen_irq);
-}
-
static struct wm97xx_mach_ops mainstone_mach_ops = {
- .acc_enabled = 1,
- .acc_pen_up = wm97xx_acc_pen_up,
- .acc_pen_down = wm97xx_acc_pen_down,
- .acc_startup = wm97xx_acc_startup,
- .acc_shutdown = wm97xx_acc_shutdown,
- .irq_enable = wm97xx_irq_enable,
- .irq_gpio = WM97XX_GPIO_2,
+ .acc_enabled = 1,
+ .acc_pen_up = wm97xx_acc_pen_up,
+ .acc_pen_down = wm97xx_acc_pen_down,
+ .acc_startup = wm97xx_acc_startup,
+ .acc_shutdown = wm97xx_acc_shutdown,
+ .irq_gpio = WM97XX_GPIO_2,
};
static int mainstone_wm97xx_probe(struct platform_device *pdev)
@@ -286,14 +267,15 @@ static int mainstone_wm97xx_remove(struct platform_device *pdev)
struct wm97xx *wm = platform_get_drvdata(pdev);
wm97xx_unregister_mach_ops(wm);
+
return 0;
}
static struct platform_driver mainstone_wm97xx_driver = {
- .probe = mainstone_wm97xx_probe,
- .remove = mainstone_wm97xx_remove,
- .driver = {
- .name = "wm97xx-touch",
+ .probe = mainstone_wm97xx_probe,
+ .remove = mainstone_wm97xx_remove,
+ .driver = {
+ .name = "wm97xx-touch",
},
};
module_platform_driver(mainstone_wm97xx_driver);
diff --git a/drivers/input/touchscreen/stmfts.c b/drivers/input/touchscreen/stmfts.c
index 72e0b767e1ba..c175d44c52f3 100644
--- a/drivers/input/touchscreen/stmfts.c
+++ b/drivers/input/touchscreen/stmfts.c
@@ -337,13 +337,15 @@ static int stmfts_input_open(struct input_dev *dev)
struct stmfts_data *sdata = input_get_drvdata(dev);
int err;
- err = pm_runtime_get_sync(&sdata->client->dev);
- if (err < 0)
- goto out;
+ err = pm_runtime_resume_and_get(&sdata->client->dev);
+ if (err)
+ return err;
err = i2c_smbus_write_byte(sdata->client, STMFTS_MS_MT_SENSE_ON);
- if (err)
- goto out;
+ if (err) {
+ pm_runtime_put_sync(&sdata->client->dev);
+ return err;
+ }
mutex_lock(&sdata->mutex);
sdata->running = true;
@@ -366,9 +368,7 @@ static int stmfts_input_open(struct input_dev *dev)
"failed to enable touchkey\n");
}
-out:
- pm_runtime_put_noidle(&sdata->client->dev);
- return err;
+ return 0;
}
static void stmfts_input_close(struct input_dev *dev)
diff --git a/drivers/input/touchscreen/wm97xx-core.c b/drivers/input/touchscreen/wm97xx-core.c
index 1b58611c8084..2757c7768ffe 100644
--- a/drivers/input/touchscreen/wm97xx-core.c
+++ b/drivers/input/touchscreen/wm97xx-core.c
@@ -285,11 +285,12 @@ void wm97xx_set_suspend_mode(struct wm97xx *wm, u16 mode)
EXPORT_SYMBOL_GPL(wm97xx_set_suspend_mode);
/*
- * Handle a pen down interrupt.
+ * Codec PENDOWN irq handler
+ *
*/
-static void wm97xx_pen_irq_worker(struct work_struct *work)
+static irqreturn_t wm97xx_pen_interrupt(int irq, void *dev_id)
{
- struct wm97xx *wm = container_of(work, struct wm97xx, pen_event_work);
+ struct wm97xx *wm = dev_id;
int pen_was_down = wm->pen_is_down;
/* do we need to enable the touch panel reader */
@@ -343,27 +344,6 @@ static void wm97xx_pen_irq_worker(struct work_struct *work)
if (!wm->pen_is_down && wm->mach_ops->acc_enabled)
wm->mach_ops->acc_pen_up(wm);
- wm->mach_ops->irq_enable(wm, 1);
-}
-
-/*
- * Codec PENDOWN irq handler
- *
- * We have to disable the codec interrupt in the handler because it
- * can take up to 1ms to clear the interrupt source. We schedule a task
- * in a work queue to do the actual interaction with the chip. The
- * interrupt is then enabled again in the slow handler when the source
- * has been cleared.
- */
-static irqreturn_t wm97xx_pen_interrupt(int irq, void *dev_id)
-{
- struct wm97xx *wm = dev_id;
-
- if (!work_pending(&wm->pen_event_work)) {
- wm->mach_ops->irq_enable(wm, 0);
- queue_work(wm->ts_workq, &wm->pen_event_work);
- }
-
return IRQ_HANDLED;
}
@@ -374,12 +354,9 @@ static int wm97xx_init_pen_irq(struct wm97xx *wm)
{
u16 reg;
- /* If an interrupt is supplied an IRQ enable operation must also be
- * provided. */
- BUG_ON(!wm->mach_ops->irq_enable);
-
- if (request_irq(wm->pen_irq, wm97xx_pen_interrupt, IRQF_SHARED,
- "wm97xx-pen", wm)) {
+ if (request_threaded_irq(wm->pen_irq, NULL, wm97xx_pen_interrupt,
+ IRQF_SHARED | IRQF_ONESHOT,
+ "wm97xx-pen", wm)) {
dev_err(wm->dev,
"Failed to register pen down interrupt, polling");
wm->pen_irq = 0;
@@ -509,7 +486,6 @@ static int wm97xx_ts_input_open(struct input_dev *idev)
wm->codec->dig_enable(wm, 1);
INIT_DELAYED_WORK(&wm->ts_reader, wm97xx_ts_reader);
- INIT_WORK(&wm->pen_event_work, wm97xx_pen_irq_worker);
wm->ts_reader_min_interval = HZ >= 100 ? HZ / 100 : 1;
if (wm->ts_reader_min_interval < 1)
@@ -560,10 +536,6 @@ static void wm97xx_ts_input_close(struct input_dev *idev)
wm->pen_is_down = 0;
- /* Balance out interrupt disables/enables */
- if (cancel_work_sync(&wm->pen_event_work))
- wm->mach_ops->irq_enable(wm, 1);
-
/* ts_reader rearms itself so we need to explicitly stop it
* before we destroy the workqueue.
*/
diff --git a/drivers/input/touchscreen/zylonite-wm97xx.c b/drivers/input/touchscreen/zylonite-wm97xx.c
index 0f4ac7f844ce..a70fe4abe520 100644
--- a/drivers/input/touchscreen/zylonite-wm97xx.c
+++ b/drivers/input/touchscreen/zylonite-wm97xx.c
@@ -17,15 +17,14 @@
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/delay.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/soc/pxa/cpu.h>
#include <linux/wm97xx.h>
-#include <mach/hardware.h>
-#include <mach/mfp.h>
-#include <mach/regs-ac97.h>
+#include <sound/pxa2xx-lib.h>
struct continuous {
u16 id; /* codec id */
@@ -80,7 +79,7 @@ static void wm97xx_acc_pen_up(struct wm97xx *wm)
msleep(1);
for (i = 0; i < 16; i++)
- MODR;
+ pxa2xx_ac97_read_modr();
}
static int wm97xx_acc_pen_down(struct wm97xx *wm)
@@ -101,7 +100,7 @@ static int wm97xx_acc_pen_down(struct wm97xx *wm)
return RC_PENUP;
}
- x = MODR;
+ x = pxa2xx_ac97_read_modr();
if (x == last) {
tries++;
return RC_AGAIN;
@@ -109,10 +108,10 @@ static int wm97xx_acc_pen_down(struct wm97xx *wm)
last = x;
do {
if (reads)
- x = MODR;
- y = MODR;
+ x = pxa2xx_ac97_read_modr();
+ y = pxa2xx_ac97_read_modr();
if (pressure)
- p = MODR;
+ p = pxa2xx_ac97_read_modr();
dev_dbg(wm->dev, "Raw coordinates: x=%x, y=%x, p=%x\n",
x, y, p);
@@ -161,34 +160,28 @@ static int wm97xx_acc_startup(struct wm97xx *wm)
return 0;
}
-static void wm97xx_irq_enable(struct wm97xx *wm, int enable)
-{
- if (enable)
- enable_irq(wm->pen_irq);
- else
- disable_irq_nosync(wm->pen_irq);
-}
-
static struct wm97xx_mach_ops zylonite_mach_ops = {
.acc_enabled = 1,
.acc_pen_up = wm97xx_acc_pen_up,
.acc_pen_down = wm97xx_acc_pen_down,
.acc_startup = wm97xx_acc_startup,
- .irq_enable = wm97xx_irq_enable,
.irq_gpio = WM97XX_GPIO_2,
};
static int zylonite_wm97xx_probe(struct platform_device *pdev)
{
struct wm97xx *wm = platform_get_drvdata(pdev);
- int gpio_touch_irq;
-
- if (cpu_is_pxa320())
- gpio_touch_irq = mfp_to_gpio(MFP_PIN_GPIO15);
- else
- gpio_touch_irq = mfp_to_gpio(MFP_PIN_GPIO26);
+ struct gpio_desc *gpio_touch_irq;
+ int err;
+
+ gpio_touch_irq = devm_gpiod_get(&pdev->dev, "touch", GPIOD_IN);
+ err = PTR_ERR_OR_ZERO(gpio_touch_irq);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot get irq gpio: %d\n", err);
+ return err;
+ }
- wm->pen_irq = gpio_to_irq(gpio_touch_irq);
+ wm->pen_irq = gpiod_to_irq(gpio_touch_irq);
irq_set_irq_type(wm->pen_irq, IRQ_TYPE_EDGE_BOTH);
wm97xx_config_gpio(wm, WM97XX_GPIO_13, WM97XX_GPIO_IN,
diff --git a/drivers/interconnect/qcom/Kconfig b/drivers/interconnect/qcom/Kconfig
index 91353e651a52..22adff5d7f53 100644
--- a/drivers/interconnect/qcom/Kconfig
+++ b/drivers/interconnect/qcom/Kconfig
@@ -110,6 +110,15 @@ config INTERCONNECT_QCOM_SC8180X
This is a driver for the Qualcomm Network-on-Chip on sc8180x-based
platforms.
+config INTERCONNECT_QCOM_SC8280XP
+ tristate "Qualcomm SC8280XP interconnect driver"
+ depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
+ select INTERCONNECT_QCOM_RPMH
+ select INTERCONNECT_QCOM_BCM_VOTER
+ help
+ This is a driver for the Qualcomm Network-on-Chip on SC8280XP-based
+ platforms.
+
config INTERCONNECT_QCOM_SDM660
tristate "Qualcomm SDM660 interconnect driver"
depends on INTERCONNECT_QCOM
@@ -137,6 +146,15 @@ config INTERCONNECT_QCOM_SDX55
This is a driver for the Qualcomm Network-on-Chip on sdx55-based
platforms.
+config INTERCONNECT_QCOM_SDX65
+ tristate "Qualcomm SDX65 interconnect driver"
+ depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
+ select INTERCONNECT_QCOM_RPMH
+ select INTERCONNECT_QCOM_BCM_VOTER
+ help
+ This is a driver for the Qualcomm Network-on-Chip on sdx65-based
+ platforms.
+
config INTERCONNECT_QCOM_SM8150
tristate "Qualcomm SM8150 interconnect driver"
depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
diff --git a/drivers/interconnect/qcom/Makefile b/drivers/interconnect/qcom/Makefile
index ceae9bb566c6..8d1fe9d38ac3 100644
--- a/drivers/interconnect/qcom/Makefile
+++ b/drivers/interconnect/qcom/Makefile
@@ -12,9 +12,11 @@ icc-rpmh-obj := icc-rpmh.o
qnoc-sc7180-objs := sc7180.o
qnoc-sc7280-objs := sc7280.o
qnoc-sc8180x-objs := sc8180x.o
+qnoc-sc8280xp-objs := sc8280xp.o
qnoc-sdm660-objs := sdm660.o
qnoc-sdm845-objs := sdm845.o
qnoc-sdx55-objs := sdx55.o
+qnoc-sdx65-objs := sdx65.o
qnoc-sm8150-objs := sm8150.o
qnoc-sm8250-objs := sm8250.o
qnoc-sm8350-objs := sm8350.o
@@ -33,9 +35,11 @@ obj-$(CONFIG_INTERCONNECT_QCOM_RPMH) += icc-rpmh.o
obj-$(CONFIG_INTERCONNECT_QCOM_SC7180) += qnoc-sc7180.o
obj-$(CONFIG_INTERCONNECT_QCOM_SC7280) += qnoc-sc7280.o
obj-$(CONFIG_INTERCONNECT_QCOM_SC8180X) += qnoc-sc8180x.o
+obj-$(CONFIG_INTERCONNECT_QCOM_SC8280XP) += qnoc-sc8280xp.o
obj-$(CONFIG_INTERCONNECT_QCOM_SDM660) += qnoc-sdm660.o
obj-$(CONFIG_INTERCONNECT_QCOM_SDM845) += qnoc-sdm845.o
obj-$(CONFIG_INTERCONNECT_QCOM_SDX55) += qnoc-sdx55.o
+obj-$(CONFIG_INTERCONNECT_QCOM_SDX65) += qnoc-sdx65.o
obj-$(CONFIG_INTERCONNECT_QCOM_SM8150) += qnoc-sm8150.o
obj-$(CONFIG_INTERCONNECT_QCOM_SM8250) += qnoc-sm8250.o
obj-$(CONFIG_INTERCONNECT_QCOM_SM8350) += qnoc-sm8350.o
diff --git a/drivers/interconnect/qcom/icc-rpm.c b/drivers/interconnect/qcom/icc-rpm.c
index 34125e8f8b60..fb013191c29b 100644
--- a/drivers/interconnect/qcom/icc-rpm.c
+++ b/drivers/interconnect/qcom/icc-rpm.c
@@ -274,20 +274,19 @@ static int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
do_div(rate, qn->buswidth);
rate = min_t(u64, rate, LONG_MAX);
- if (qn->rate == rate)
- return 0;
-
for (i = 0; i < qp->num_clks; i++) {
+ if (qp->bus_clk_rate[i] == rate)
+ continue;
+
ret = clk_set_rate(qp->bus_clks[i].clk, rate);
if (ret) {
pr_err("%s clk_set_rate error: %d\n",
qp->bus_clks[i].id, ret);
return ret;
}
+ qp->bus_clk_rate[i] = rate;
}
- qn->rate = rate;
-
return 0;
}
@@ -301,7 +300,7 @@ int qnoc_probe(struct platform_device *pdev)
const struct qcom_icc_desc *desc;
struct icc_onecell_data *data;
struct icc_provider *provider;
- struct qcom_icc_node **qnodes;
+ struct qcom_icc_node * const *qnodes;
struct qcom_icc_provider *qp;
struct icc_node *node;
size_t num_nodes, i;
@@ -332,6 +331,11 @@ int qnoc_probe(struct platform_device *pdev)
if (!qp)
return -ENOMEM;
+ qp->bus_clk_rate = devm_kcalloc(dev, cd_num, sizeof(*qp->bus_clk_rate),
+ GFP_KERNEL);
+ if (!qp->bus_clk_rate)
+ return -ENOMEM;
+
data = devm_kzalloc(dev, struct_size(data, nodes, num_nodes),
GFP_KERNEL);
if (!data)
diff --git a/drivers/interconnect/qcom/icc-rpm.h b/drivers/interconnect/qcom/icc-rpm.h
index 26dad006034f..ebee9009301e 100644
--- a/drivers/interconnect/qcom/icc-rpm.h
+++ b/drivers/interconnect/qcom/icc-rpm.h
@@ -26,6 +26,7 @@ enum qcom_icc_type {
* @type: the ICC provider type
* @qos_offset: offset to QoS registers
* @regmap: regmap for QoS registers read/write access
+ * @bus_clk_rate: bus clock rate in Hz
*/
struct qcom_icc_provider {
struct icc_provider provider;
@@ -33,6 +34,7 @@ struct qcom_icc_provider {
enum qcom_icc_type type;
struct regmap *regmap;
unsigned int qos_offset;
+ u64 *bus_clk_rate;
struct clk_bulk_data bus_clks[];
};
@@ -66,7 +68,6 @@ struct qcom_icc_qos {
* @mas_rpm_id: RPM id for devices that are bus masters
* @slv_rpm_id: RPM id for devices that are bus slaves
* @qos: NoC QoS setting parameters
- * @rate: current bus clock rate in Hz
*/
struct qcom_icc_node {
unsigned char *name;
@@ -77,11 +78,10 @@ struct qcom_icc_node {
int mas_rpm_id;
int slv_rpm_id;
struct qcom_icc_qos qos;
- u64 rate;
};
struct qcom_icc_desc {
- struct qcom_icc_node **nodes;
+ struct qcom_icc_node * const *nodes;
size_t num_nodes;
const char * const *clocks;
size_t num_clocks;
diff --git a/drivers/interconnect/qcom/icc-rpmh.c b/drivers/interconnect/qcom/icc-rpmh.c
index 2c8e12549804..3c40076eb5fb 100644
--- a/drivers/interconnect/qcom/icc-rpmh.c
+++ b/drivers/interconnect/qcom/icc-rpmh.c
@@ -189,7 +189,7 @@ int qcom_icc_rpmh_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct icc_onecell_data *data;
struct icc_provider *provider;
- struct qcom_icc_node **qnodes, *qn;
+ struct qcom_icc_node * const *qnodes, *qn;
struct qcom_icc_provider *qp;
struct icc_node *node;
size_t num_nodes, i, j;
diff --git a/drivers/interconnect/qcom/icc-rpmh.h b/drivers/interconnect/qcom/icc-rpmh.h
index 4bfc060529ba..d29929461c17 100644
--- a/drivers/interconnect/qcom/icc-rpmh.h
+++ b/drivers/interconnect/qcom/icc-rpmh.h
@@ -22,7 +22,7 @@
struct qcom_icc_provider {
struct icc_provider provider;
struct device *dev;
- struct qcom_icc_bcm **bcms;
+ struct qcom_icc_bcm * const *bcms;
size_t num_bcms;
struct bcm_voter *voter;
};
@@ -112,9 +112,9 @@ struct qcom_icc_fabric {
};
struct qcom_icc_desc {
- struct qcom_icc_node **nodes;
+ struct qcom_icc_node * const *nodes;
size_t num_nodes;
- struct qcom_icc_bcm **bcms;
+ struct qcom_icc_bcm * const *bcms;
size_t num_bcms;
};
diff --git a/drivers/interconnect/qcom/msm8916.c b/drivers/interconnect/qcom/msm8916.c
index 2f397a7c3322..5c4ba2f37c8e 100644
--- a/drivers/interconnect/qcom/msm8916.c
+++ b/drivers/interconnect/qcom/msm8916.c
@@ -1191,7 +1191,7 @@ static struct qcom_icc_node snoc_pcnoc_slv = {
.links = snoc_pcnoc_slv_links,
};
-static struct qcom_icc_node *msm8916_snoc_nodes[] = {
+static struct qcom_icc_node * const msm8916_snoc_nodes[] = {
[BIMC_SNOC_SLV] = &bimc_snoc_slv,
[MASTER_JPEG] = &mas_jpeg,
[MASTER_MDP_PORT0] = &mas_mdp,
@@ -1228,7 +1228,7 @@ static const struct regmap_config msm8916_snoc_regmap_config = {
.fast_io = true,
};
-static struct qcom_icc_desc msm8916_snoc = {
+static const struct qcom_icc_desc msm8916_snoc = {
.type = QCOM_ICC_NOC,
.nodes = msm8916_snoc_nodes,
.num_nodes = ARRAY_SIZE(msm8916_snoc_nodes),
@@ -1236,7 +1236,7 @@ static struct qcom_icc_desc msm8916_snoc = {
.qos_offset = 0x7000,
};
-static struct qcom_icc_node *msm8916_bimc_nodes[] = {
+static struct qcom_icc_node * const msm8916_bimc_nodes[] = {
[BIMC_SNOC_MAS] = &bimc_snoc_mas,
[MASTER_AMPSS_M0] = &mas_apss,
[MASTER_GRAPHICS_3D] = &mas_gfx,
@@ -1256,7 +1256,7 @@ static const struct regmap_config msm8916_bimc_regmap_config = {
.fast_io = true,
};
-static struct qcom_icc_desc msm8916_bimc = {
+static const struct qcom_icc_desc msm8916_bimc = {
.type = QCOM_ICC_BIMC,
.nodes = msm8916_bimc_nodes,
.num_nodes = ARRAY_SIZE(msm8916_bimc_nodes),
@@ -1264,7 +1264,7 @@ static struct qcom_icc_desc msm8916_bimc = {
.qos_offset = 0x8000,
};
-static struct qcom_icc_node *msm8916_pcnoc_nodes[] = {
+static struct qcom_icc_node * const msm8916_pcnoc_nodes[] = {
[MASTER_BLSP_1] = &mas_blsp_1,
[MASTER_DEHR] = &mas_dehr,
[MASTER_LPASS] = &mas_audio,
@@ -1325,7 +1325,7 @@ static const struct regmap_config msm8916_pcnoc_regmap_config = {
.fast_io = true,
};
-static struct qcom_icc_desc msm8916_pcnoc = {
+static const struct qcom_icc_desc msm8916_pcnoc = {
.type = QCOM_ICC_NOC,
.nodes = msm8916_pcnoc_nodes,
.num_nodes = ARRAY_SIZE(msm8916_pcnoc_nodes),
diff --git a/drivers/interconnect/qcom/msm8939.c b/drivers/interconnect/qcom/msm8939.c
index f9c2d7d3100d..63b31deea722 100644
--- a/drivers/interconnect/qcom/msm8939.c
+++ b/drivers/interconnect/qcom/msm8939.c
@@ -1251,7 +1251,7 @@ static struct qcom_icc_node snoc_pcnoc_slv = {
.links = snoc_pcnoc_slv_links,
};
-static struct qcom_icc_node *msm8939_snoc_nodes[] = {
+static struct qcom_icc_node * const msm8939_snoc_nodes[] = {
[BIMC_SNOC_SLV] = &bimc_snoc_slv,
[MASTER_QDSS_BAM] = &mas_qdss_bam,
[MASTER_QDSS_ETR] = &mas_qdss_etr,
@@ -1281,7 +1281,7 @@ static const struct regmap_config msm8939_snoc_regmap_config = {
.fast_io = true,
};
-static struct qcom_icc_desc msm8939_snoc = {
+static const struct qcom_icc_desc msm8939_snoc = {
.type = QCOM_ICC_NOC,
.nodes = msm8939_snoc_nodes,
.num_nodes = ARRAY_SIZE(msm8939_snoc_nodes),
@@ -1289,7 +1289,7 @@ static struct qcom_icc_desc msm8939_snoc = {
.qos_offset = 0x7000,
};
-static struct qcom_icc_node *msm8939_snoc_mm_nodes[] = {
+static struct qcom_icc_node * const msm8939_snoc_mm_nodes[] = {
[MASTER_VIDEO_P0] = &mas_video,
[MASTER_JPEG] = &mas_jpeg,
[MASTER_VFE] = &mas_vfe,
@@ -1301,7 +1301,7 @@ static struct qcom_icc_node *msm8939_snoc_mm_nodes[] = {
[SNOC_MM_INT_2] = &mm_int_2,
};
-static struct qcom_icc_desc msm8939_snoc_mm = {
+static const struct qcom_icc_desc msm8939_snoc_mm = {
.type = QCOM_ICC_NOC,
.nodes = msm8939_snoc_mm_nodes,
.num_nodes = ARRAY_SIZE(msm8939_snoc_mm_nodes),
@@ -1309,7 +1309,7 @@ static struct qcom_icc_desc msm8939_snoc_mm = {
.qos_offset = 0x7000,
};
-static struct qcom_icc_node *msm8939_bimc_nodes[] = {
+static struct qcom_icc_node * const msm8939_bimc_nodes[] = {
[BIMC_SNOC_MAS] = &bimc_snoc_mas,
[MASTER_AMPSS_M0] = &mas_apss,
[MASTER_GRAPHICS_3D] = &mas_gfx,
@@ -1329,7 +1329,7 @@ static const struct regmap_config msm8939_bimc_regmap_config = {
.fast_io = true,
};
-static struct qcom_icc_desc msm8939_bimc = {
+static const struct qcom_icc_desc msm8939_bimc = {
.type = QCOM_ICC_BIMC,
.nodes = msm8939_bimc_nodes,
.num_nodes = ARRAY_SIZE(msm8939_bimc_nodes),
@@ -1337,7 +1337,7 @@ static struct qcom_icc_desc msm8939_bimc = {
.qos_offset = 0x8000,
};
-static struct qcom_icc_node *msm8939_pcnoc_nodes[] = {
+static struct qcom_icc_node * const msm8939_pcnoc_nodes[] = {
[MASTER_BLSP_1] = &mas_blsp_1,
[MASTER_DEHR] = &mas_dehr,
[MASTER_LPASS] = &mas_audio,
@@ -1400,7 +1400,7 @@ static const struct regmap_config msm8939_pcnoc_regmap_config = {
.fast_io = true,
};
-static struct qcom_icc_desc msm8939_pcnoc = {
+static const struct qcom_icc_desc msm8939_pcnoc = {
.type = QCOM_ICC_NOC,
.nodes = msm8939_pcnoc_nodes,
.num_nodes = ARRAY_SIZE(msm8939_pcnoc_nodes),
diff --git a/drivers/interconnect/qcom/msm8974.c b/drivers/interconnect/qcom/msm8974.c
index da68ce375a89..6fa0ad90fc3d 100644
--- a/drivers/interconnect/qcom/msm8974.c
+++ b/drivers/interconnect/qcom/msm8974.c
@@ -220,7 +220,7 @@ struct msm8974_icc_node {
};
struct msm8974_icc_desc {
- struct msm8974_icc_node **nodes;
+ struct msm8974_icc_node * const *nodes;
size_t num_nodes;
};
@@ -244,7 +244,7 @@ DEFINE_QNODE(bimc_to_snoc, MSM8974_BIMC_TO_SNOC, 8, 3, 2, MSM8974_SNOC_TO_BIMC,
DEFINE_QNODE(slv_ebi_ch0, MSM8974_BIMC_SLV_EBI_CH0, 8, -1, 0);
DEFINE_QNODE(slv_ampss_l2, MSM8974_BIMC_SLV_AMPSS_L2, 8, -1, 1);
-static struct msm8974_icc_node *msm8974_bimc_nodes[] = {
+static struct msm8974_icc_node * const msm8974_bimc_nodes[] = {
[BIMC_MAS_AMPSS_M0] = &mas_ampss_m0,
[BIMC_MAS_AMPSS_M1] = &mas_ampss_m1,
[BIMC_MAS_MSS_PROC] = &mas_mss_proc,
@@ -254,7 +254,7 @@ static struct msm8974_icc_node *msm8974_bimc_nodes[] = {
[BIMC_SLV_AMPSS_L2] = &slv_ampss_l2,
};
-static struct msm8974_icc_desc msm8974_bimc = {
+static const struct msm8974_icc_desc msm8974_bimc = {
.nodes = msm8974_bimc_nodes,
.num_nodes = ARRAY_SIZE(msm8974_bimc_nodes),
};
@@ -297,7 +297,7 @@ DEFINE_QNODE(slv_ebi1_phy_cfg, MSM8974_CNOC_SLV_EBI1_PHY_CFG, 8, -1, 73);
DEFINE_QNODE(slv_rpm, MSM8974_CNOC_SLV_RPM, 8, -1, 74);
DEFINE_QNODE(slv_service_cnoc, MSM8974_CNOC_SLV_SERVICE_CNOC, 8, -1, 76);
-static struct msm8974_icc_node *msm8974_cnoc_nodes[] = {
+static struct msm8974_icc_node * const msm8974_cnoc_nodes[] = {
[CNOC_MAS_RPM_INST] = &mas_rpm_inst,
[CNOC_MAS_RPM_DATA] = &mas_rpm_data,
[CNOC_MAS_RPM_SYS] = &mas_rpm_sys,
@@ -337,7 +337,7 @@ static struct msm8974_icc_node *msm8974_cnoc_nodes[] = {
[CNOC_SLV_SERVICE_CNOC] = &slv_service_cnoc,
};
-static struct msm8974_icc_desc msm8974_cnoc = {
+static const struct msm8974_icc_desc msm8974_cnoc = {
.nodes = msm8974_cnoc_nodes,
.num_nodes = ARRAY_SIZE(msm8974_cnoc_nodes),
};
@@ -365,7 +365,7 @@ DEFINE_QNODE(slv_mnoc_mpu_cfg, MSM8974_MNOC_SLV_MNOC_MPU_CFG, 16, -1, 14);
DEFINE_QNODE(slv_onoc_mpu_cfg, MSM8974_MNOC_SLV_ONOC_MPU_CFG, 16, -1, 15);
DEFINE_QNODE(slv_service_mnoc, MSM8974_MNOC_SLV_SERVICE_MNOC, 16, -1, 17);
-static struct msm8974_icc_node *msm8974_mnoc_nodes[] = {
+static struct msm8974_icc_node * const msm8974_mnoc_nodes[] = {
[MNOC_MAS_GRAPHICS_3D] = &mas_graphics_3d,
[MNOC_MAS_JPEG] = &mas_jpeg,
[MNOC_MAS_MDP_PORT0] = &mas_mdp_port0,
@@ -390,7 +390,7 @@ static struct msm8974_icc_node *msm8974_mnoc_nodes[] = {
[MNOC_SLV_SERVICE_MNOC] = &slv_service_mnoc,
};
-static struct msm8974_icc_desc msm8974_mnoc = {
+static const struct msm8974_icc_desc msm8974_mnoc = {
.nodes = msm8974_mnoc_nodes,
.num_nodes = ARRAY_SIZE(msm8974_mnoc_nodes),
};
@@ -410,7 +410,7 @@ DEFINE_QNODE(ocmem_vnoc_to_onoc, MSM8974_OCMEM_VNOC_TO_OCMEM_NOC, 16, 56, 79, MS
DEFINE_QNODE(ocmem_vnoc_to_snoc, MSM8974_OCMEM_VNOC_TO_SNOC, 8, 57, 80);
DEFINE_QNODE(mas_v_ocmem_gfx3d, MSM8974_OCMEM_VNOC_MAS_GFX3D, 8, 55, -1, MSM8974_OCMEM_VNOC_TO_OCMEM_NOC);
-static struct msm8974_icc_node *msm8974_onoc_nodes[] = {
+static struct msm8974_icc_node * const msm8974_onoc_nodes[] = {
[OCMEM_NOC_TO_OCMEM_VNOC] = &ocmem_noc_to_ocmem_vnoc,
[OCMEM_MAS_JPEG_OCMEM] = &mas_jpeg_ocmem,
[OCMEM_MAS_MDP_OCMEM] = &mas_mdp_ocmem,
@@ -425,7 +425,7 @@ static struct msm8974_icc_node *msm8974_onoc_nodes[] = {
[OCMEM_SLV_OCMEM] = &slv_ocmem,
};
-static struct msm8974_icc_desc msm8974_onoc = {
+static const struct msm8974_icc_desc msm8974_onoc = {
.nodes = msm8974_onoc_nodes,
.num_nodes = ARRAY_SIZE(msm8974_onoc_nodes),
};
@@ -458,7 +458,7 @@ DEFINE_QNODE(slv_pnoc_mpu_cfg, MSM8974_PNOC_SLV_PNOC_MPU_CFG, 8, -1, 43);
DEFINE_QNODE(slv_prng, MSM8974_PNOC_SLV_PRNG, 8, -1, 44, MSM8974_PNOC_TO_SNOC);
DEFINE_QNODE(slv_service_pnoc, MSM8974_PNOC_SLV_SERVICE_PNOC, 8, -1, 46);
-static struct msm8974_icc_node *msm8974_pnoc_nodes[] = {
+static struct msm8974_icc_node * const msm8974_pnoc_nodes[] = {
[PNOC_MAS_PNOC_CFG] = &mas_pnoc_cfg,
[PNOC_MAS_SDCC_1] = &mas_sdcc_1,
[PNOC_MAS_SDCC_3] = &mas_sdcc_3,
@@ -488,7 +488,7 @@ static struct msm8974_icc_node *msm8974_pnoc_nodes[] = {
[PNOC_SLV_SERVICE_PNOC] = &slv_service_pnoc,
};
-static struct msm8974_icc_desc msm8974_pnoc = {
+static const struct msm8974_icc_desc msm8974_pnoc = {
.nodes = msm8974_pnoc_nodes,
.num_nodes = ARRAY_SIZE(msm8974_pnoc_nodes),
};
@@ -518,7 +518,7 @@ DEFINE_QNODE(slv_snoc_ocmem, MSM8974_SNOC_SLV_SNOC_OCMEM, 8, -1, 27);
DEFINE_QNODE(slv_service_snoc, MSM8974_SNOC_SLV_SERVICE_SNOC, 8, -1, 29);
DEFINE_QNODE(slv_qdss_stm, MSM8974_SNOC_SLV_QDSS_STM, 8, -1, 30);
-static struct msm8974_icc_node *msm8974_snoc_nodes[] = {
+static struct msm8974_icc_node * const msm8974_snoc_nodes[] = {
[SNOC_MAS_LPASS_AHB] = &mas_lpass_ahb,
[SNOC_MAS_QDSS_BAM] = &mas_qdss_bam,
[SNOC_MAS_SNOC_CFG] = &mas_snoc_cfg,
@@ -545,7 +545,7 @@ static struct msm8974_icc_node *msm8974_snoc_nodes[] = {
[SNOC_SLV_QDSS_STM] = &slv_qdss_stm,
};
-static struct msm8974_icc_desc msm8974_snoc = {
+static const struct msm8974_icc_desc msm8974_snoc = {
.nodes = msm8974_snoc_nodes,
.num_nodes = ARRAY_SIZE(msm8974_snoc_nodes),
};
@@ -648,7 +648,7 @@ static int msm8974_get_bw(struct icc_node *node, u32 *avg, u32 *peak)
static int msm8974_icc_probe(struct platform_device *pdev)
{
const struct msm8974_icc_desc *desc;
- struct msm8974_icc_node **qnodes;
+ struct msm8974_icc_node * const *qnodes;
struct msm8974_icc_provider *qp;
struct device *dev = &pdev->dev;
struct icc_onecell_data *data;
diff --git a/drivers/interconnect/qcom/msm8996.c b/drivers/interconnect/qcom/msm8996.c
index 499e11fbbd2e..c2903ae3b3bc 100644
--- a/drivers/interconnect/qcom/msm8996.c
+++ b/drivers/interconnect/qcom/msm8996.c
@@ -1796,7 +1796,7 @@ static struct qcom_icc_node slv_srvc_snoc = {
.qos.qos_mode = NOC_QOS_MODE_INVALID
};
-static struct qcom_icc_node *a0noc_nodes[] = {
+static struct qcom_icc_node * const a0noc_nodes[] = {
[MASTER_PCIE_0] = &mas_pcie_0,
[MASTER_PCIE_1] = &mas_pcie_1,
[MASTER_PCIE_2] = &mas_pcie_2
@@ -1820,7 +1820,7 @@ static const struct qcom_icc_desc msm8996_a0noc = {
.regmap_cfg = &msm8996_a0noc_regmap_config
};
-static struct qcom_icc_node *a1noc_nodes[] = {
+static struct qcom_icc_node * const a1noc_nodes[] = {
[MASTER_CNOC_A1NOC] = &mas_cnoc_a1noc,
[MASTER_CRYPTO_CORE0] = &mas_crypto_c0,
[MASTER_PNOC_A1NOC] = &mas_pnoc_a1noc
@@ -1841,7 +1841,7 @@ static const struct qcom_icc_desc msm8996_a1noc = {
.regmap_cfg = &msm8996_a1noc_regmap_config
};
-static struct qcom_icc_node *a2noc_nodes[] = {
+static struct qcom_icc_node * const a2noc_nodes[] = {
[MASTER_USB3] = &mas_usb3,
[MASTER_IPA] = &mas_ipa,
[MASTER_UFS] = &mas_ufs
@@ -1862,7 +1862,7 @@ static const struct qcom_icc_desc msm8996_a2noc = {
.regmap_cfg = &msm8996_a2noc_regmap_config
};
-static struct qcom_icc_node *bimc_nodes[] = {
+static struct qcom_icc_node * const bimc_nodes[] = {
[MASTER_AMPSS_M0] = &mas_apps_proc,
[MASTER_GRAPHICS_3D] = &mas_oxili,
[MASTER_MNOC_BIMC] = &mas_mnoc_bimc,
@@ -1888,7 +1888,7 @@ static const struct qcom_icc_desc msm8996_bimc = {
.regmap_cfg = &msm8996_bimc_regmap_config
};
-static struct qcom_icc_node *cnoc_nodes[] = {
+static struct qcom_icc_node * const cnoc_nodes[] = {
[MASTER_SNOC_CNOC] = &mas_snoc_cnoc,
[MASTER_QDSS_DAP] = &mas_qdss_dap,
[SLAVE_CNOC_A1NOC] = &slv_cnoc_a1noc,
@@ -1946,7 +1946,7 @@ static const struct qcom_icc_desc msm8996_cnoc = {
.regmap_cfg = &msm8996_cnoc_regmap_config
};
-static struct qcom_icc_node *mnoc_nodes[] = {
+static struct qcom_icc_node * const mnoc_nodes[] = {
[MASTER_CNOC_MNOC_CFG] = &mas_cnoc_mnoc_cfg,
[MASTER_CPP] = &mas_cpp,
[MASTER_JPEG] = &mas_jpeg,
@@ -2001,7 +2001,7 @@ static const struct qcom_icc_desc msm8996_mnoc = {
.regmap_cfg = &msm8996_mnoc_regmap_config
};
-static struct qcom_icc_node *pnoc_nodes[] = {
+static struct qcom_icc_node * const pnoc_nodes[] = {
[MASTER_SNOC_PNOC] = &mas_snoc_pnoc,
[MASTER_SDCC_1] = &mas_sdcc_1,
[MASTER_SDCC_2] = &mas_sdcc_2,
@@ -2037,7 +2037,7 @@ static const struct qcom_icc_desc msm8996_pnoc = {
.regmap_cfg = &msm8996_pnoc_regmap_config
};
-static struct qcom_icc_node *snoc_nodes[] = {
+static struct qcom_icc_node * const snoc_nodes[] = {
[MASTER_HMSS] = &mas_hmss,
[MASTER_QDSS_BAM] = &mas_qdss_bam,
[MASTER_SNOC_CFG] = &mas_snoc_cfg,
diff --git a/drivers/interconnect/qcom/osm-l3.c b/drivers/interconnect/qcom/osm-l3.c
index eec13099a6a3..4198656f4e59 100644
--- a/drivers/interconnect/qcom/osm-l3.c
+++ b/drivers/interconnect/qcom/osm-l3.c
@@ -67,7 +67,7 @@ struct qcom_osm_l3_node {
};
struct qcom_osm_l3_desc {
- const struct qcom_osm_l3_node **nodes;
+ const struct qcom_osm_l3_node * const *nodes;
size_t num_nodes;
unsigned int lut_row_size;
unsigned int reg_freq_lut;
@@ -86,7 +86,7 @@ struct qcom_osm_l3_desc {
DEFINE_QNODE(sdm845_osm_apps_l3, SDM845_MASTER_OSM_L3_APPS, 16, SDM845_SLAVE_OSM_L3);
DEFINE_QNODE(sdm845_osm_l3, SDM845_SLAVE_OSM_L3, 16);
-static const struct qcom_osm_l3_node *sdm845_osm_l3_nodes[] = {
+static const struct qcom_osm_l3_node * const sdm845_osm_l3_nodes[] = {
[MASTER_OSM_L3_APPS] = &sdm845_osm_apps_l3,
[SLAVE_OSM_L3] = &sdm845_osm_l3,
};
@@ -102,7 +102,7 @@ static const struct qcom_osm_l3_desc sdm845_icc_osm_l3 = {
DEFINE_QNODE(sc7180_osm_apps_l3, SC7180_MASTER_OSM_L3_APPS, 16, SC7180_SLAVE_OSM_L3);
DEFINE_QNODE(sc7180_osm_l3, SC7180_SLAVE_OSM_L3, 16);
-static const struct qcom_osm_l3_node *sc7180_osm_l3_nodes[] = {
+static const struct qcom_osm_l3_node * const sc7180_osm_l3_nodes[] = {
[MASTER_OSM_L3_APPS] = &sc7180_osm_apps_l3,
[SLAVE_OSM_L3] = &sc7180_osm_l3,
};
@@ -118,7 +118,7 @@ static const struct qcom_osm_l3_desc sc7180_icc_osm_l3 = {
DEFINE_QNODE(sc7280_epss_apps_l3, SC7280_MASTER_EPSS_L3_APPS, 32, SC7280_SLAVE_EPSS_L3);
DEFINE_QNODE(sc7280_epss_l3, SC7280_SLAVE_EPSS_L3, 32);
-static const struct qcom_osm_l3_node *sc7280_epss_l3_nodes[] = {
+static const struct qcom_osm_l3_node * const sc7280_epss_l3_nodes[] = {
[MASTER_EPSS_L3_APPS] = &sc7280_epss_apps_l3,
[SLAVE_EPSS_L3_SHARED] = &sc7280_epss_l3,
};
@@ -134,7 +134,7 @@ static const struct qcom_osm_l3_desc sc7280_icc_epss_l3 = {
DEFINE_QNODE(sc8180x_osm_apps_l3, SC8180X_MASTER_OSM_L3_APPS, 32, SC8180X_SLAVE_OSM_L3);
DEFINE_QNODE(sc8180x_osm_l3, SC8180X_SLAVE_OSM_L3, 32);
-static const struct qcom_osm_l3_node *sc8180x_osm_l3_nodes[] = {
+static const struct qcom_osm_l3_node * const sc8180x_osm_l3_nodes[] = {
[MASTER_OSM_L3_APPS] = &sc8180x_osm_apps_l3,
[SLAVE_OSM_L3] = &sc8180x_osm_l3,
};
@@ -150,7 +150,7 @@ static const struct qcom_osm_l3_desc sc8180x_icc_osm_l3 = {
DEFINE_QNODE(sm8150_osm_apps_l3, SM8150_MASTER_OSM_L3_APPS, 32, SM8150_SLAVE_OSM_L3);
DEFINE_QNODE(sm8150_osm_l3, SM8150_SLAVE_OSM_L3, 32);
-static const struct qcom_osm_l3_node *sm8150_osm_l3_nodes[] = {
+static const struct qcom_osm_l3_node * const sm8150_osm_l3_nodes[] = {
[MASTER_OSM_L3_APPS] = &sm8150_osm_apps_l3,
[SLAVE_OSM_L3] = &sm8150_osm_l3,
};
@@ -166,7 +166,7 @@ static const struct qcom_osm_l3_desc sm8150_icc_osm_l3 = {
DEFINE_QNODE(sm8250_epss_apps_l3, SM8250_MASTER_EPSS_L3_APPS, 32, SM8250_SLAVE_EPSS_L3);
DEFINE_QNODE(sm8250_epss_l3, SM8250_SLAVE_EPSS_L3, 32);
-static const struct qcom_osm_l3_node *sm8250_epss_l3_nodes[] = {
+static const struct qcom_osm_l3_node * const sm8250_epss_l3_nodes[] = {
[MASTER_EPSS_L3_APPS] = &sm8250_epss_apps_l3,
[SLAVE_EPSS_L3_SHARED] = &sm8250_epss_l3,
};
@@ -228,7 +228,7 @@ static int qcom_osm_l3_probe(struct platform_device *pdev)
const struct qcom_osm_l3_desc *desc;
struct icc_onecell_data *data;
struct icc_provider *provider;
- const struct qcom_osm_l3_node **qnodes;
+ const struct qcom_osm_l3_node * const *qnodes;
struct icc_node *node;
size_t num_nodes;
struct clk *clk;
diff --git a/drivers/interconnect/qcom/qcm2290.c b/drivers/interconnect/qcom/qcm2290.c
index 74404e0b2080..0da612d6398c 100644
--- a/drivers/interconnect/qcom/qcm2290.c
+++ b/drivers/interconnect/qcom/qcm2290.c
@@ -1174,7 +1174,7 @@ static struct qcom_icc_node slv_anoc_snoc = {
};
/* NoC descriptors */
-static struct qcom_icc_node *qcm2290_bimc_nodes[] = {
+static struct qcom_icc_node * const qcm2290_bimc_nodes[] = {
[MASTER_APPSS_PROC] = &mas_appss_proc,
[MASTER_SNOC_BIMC_RT] = &mas_snoc_bimc_rt,
[MASTER_SNOC_BIMC_NRT] = &mas_snoc_bimc_nrt,
@@ -1193,7 +1193,7 @@ static const struct regmap_config qcm2290_bimc_regmap_config = {
.fast_io = true,
};
-static struct qcom_icc_desc qcm2290_bimc = {
+static const struct qcom_icc_desc qcm2290_bimc = {
.type = QCOM_ICC_BIMC,
.nodes = qcm2290_bimc_nodes,
.num_nodes = ARRAY_SIZE(qcm2290_bimc_nodes),
@@ -1202,7 +1202,7 @@ static struct qcom_icc_desc qcm2290_bimc = {
.qos_offset = 0x8000,
};
-static struct qcom_icc_node *qcm2290_cnoc_nodes[] = {
+static struct qcom_icc_node * const qcm2290_cnoc_nodes[] = {
[MASTER_SNOC_CNOC] = &mas_snoc_cnoc,
[MASTER_QDSS_DAP] = &mas_qdss_dap,
[SLAVE_BIMC_CFG] = &slv_bimc_cfg,
@@ -1248,14 +1248,14 @@ static const struct regmap_config qcm2290_cnoc_regmap_config = {
.fast_io = true,
};
-static struct qcom_icc_desc qcm2290_cnoc = {
+static const struct qcom_icc_desc qcm2290_cnoc = {
.type = QCOM_ICC_NOC,
.nodes = qcm2290_cnoc_nodes,
.num_nodes = ARRAY_SIZE(qcm2290_cnoc_nodes),
.regmap_cfg = &qcm2290_cnoc_regmap_config,
};
-static struct qcom_icc_node *qcm2290_snoc_nodes[] = {
+static struct qcom_icc_node * const qcm2290_snoc_nodes[] = {
[MASTER_CRYPTO_CORE0] = &mas_crypto_core0,
[MASTER_SNOC_CFG] = &mas_snoc_cfg,
[MASTER_TIC] = &mas_tic,
@@ -1289,7 +1289,7 @@ static const struct regmap_config qcm2290_snoc_regmap_config = {
.fast_io = true,
};
-static struct qcom_icc_desc qcm2290_snoc = {
+static const struct qcom_icc_desc qcm2290_snoc = {
.type = QCOM_ICC_QNOC,
.nodes = qcm2290_snoc_nodes,
.num_nodes = ARRAY_SIZE(qcm2290_snoc_nodes),
@@ -1298,25 +1298,25 @@ static struct qcom_icc_desc qcm2290_snoc = {
.qos_offset = 0x15000,
};
-static struct qcom_icc_node *qcm2290_qup_virt_nodes[] = {
+static struct qcom_icc_node * const qcm2290_qup_virt_nodes[] = {
[MASTER_QUP_CORE_0] = &mas_qup_core_0,
[SLAVE_QUP_CORE_0] = &slv_qup_core_0
};
-static struct qcom_icc_desc qcm2290_qup_virt = {
+static const struct qcom_icc_desc qcm2290_qup_virt = {
.type = QCOM_ICC_QNOC,
.nodes = qcm2290_qup_virt_nodes,
.num_nodes = ARRAY_SIZE(qcm2290_qup_virt_nodes),
};
-static struct qcom_icc_node *qcm2290_mmnrt_virt_nodes[] = {
+static struct qcom_icc_node * const qcm2290_mmnrt_virt_nodes[] = {
[MASTER_CAMNOC_SF] = &mas_camnoc_sf,
[MASTER_VIDEO_P0] = &mas_video_p0,
[MASTER_VIDEO_PROC] = &mas_video_proc,
[SLAVE_SNOC_BIMC_NRT] = &slv_snoc_bimc_nrt,
};
-static struct qcom_icc_desc qcm2290_mmnrt_virt = {
+static const struct qcom_icc_desc qcm2290_mmnrt_virt = {
.type = QCOM_ICC_QNOC,
.nodes = qcm2290_mmnrt_virt_nodes,
.num_nodes = ARRAY_SIZE(qcm2290_mmnrt_virt_nodes),
@@ -1324,13 +1324,13 @@ static struct qcom_icc_desc qcm2290_mmnrt_virt = {
.qos_offset = 0x15000,
};
-static struct qcom_icc_node *qcm2290_mmrt_virt_nodes[] = {
+static struct qcom_icc_node * const qcm2290_mmrt_virt_nodes[] = {
[MASTER_CAMNOC_HF] = &mas_camnoc_hf,
[MASTER_MDP0] = &mas_mdp0,
[SLAVE_SNOC_BIMC_RT] = &slv_snoc_bimc_rt,
};
-static struct qcom_icc_desc qcm2290_mmrt_virt = {
+static const struct qcom_icc_desc qcm2290_mmrt_virt = {
.type = QCOM_ICC_QNOC,
.nodes = qcm2290_mmrt_virt_nodes,
.num_nodes = ARRAY_SIZE(qcm2290_mmrt_virt_nodes),
diff --git a/drivers/interconnect/qcom/qcs404.c b/drivers/interconnect/qcom/qcs404.c
index 416c8bff8efa..fae155344332 100644
--- a/drivers/interconnect/qcom/qcs404.c
+++ b/drivers/interconnect/qcom/qcs404.c
@@ -974,7 +974,7 @@ static struct qcom_icc_node slv_lpass = {
.slv_rpm_id = -1,
};
-static struct qcom_icc_node *qcs404_bimc_nodes[] = {
+static struct qcom_icc_node * const qcs404_bimc_nodes[] = {
[MASTER_AMPSS_M0] = &mas_apps_proc,
[MASTER_OXILI] = &mas_oxili,
[MASTER_MDP_PORT0] = &mas_mdp,
@@ -984,12 +984,12 @@ static struct qcom_icc_node *qcs404_bimc_nodes[] = {
[SLAVE_BIMC_SNOC] = &slv_bimc_snoc,
};
-static struct qcom_icc_desc qcs404_bimc = {
+static const struct qcom_icc_desc qcs404_bimc = {
.nodes = qcs404_bimc_nodes,
.num_nodes = ARRAY_SIZE(qcs404_bimc_nodes),
};
-static struct qcom_icc_node *qcs404_pcnoc_nodes[] = {
+static struct qcom_icc_node * const qcs404_pcnoc_nodes[] = {
[MASTER_SPDM] = &mas_spdm,
[MASTER_BLSP_1] = &mas_blsp_1,
[MASTER_BLSP_2] = &mas_blsp_2,
@@ -1038,12 +1038,12 @@ static struct qcom_icc_node *qcs404_pcnoc_nodes[] = {
[SLAVE_PCNOC_SNOC] = &slv_pcnoc_snoc,
};
-static struct qcom_icc_desc qcs404_pcnoc = {
+static const struct qcom_icc_desc qcs404_pcnoc = {
.nodes = qcs404_pcnoc_nodes,
.num_nodes = ARRAY_SIZE(qcs404_pcnoc_nodes),
};
-static struct qcom_icc_node *qcs404_snoc_nodes[] = {
+static struct qcom_icc_node * const qcs404_snoc_nodes[] = {
[MASTER_QDSS_BAM] = &mas_qdss_bam,
[MASTER_BIMC_SNOC] = &mas_bimc_snoc,
[MASTER_PCNOC_SNOC] = &mas_pcnoc_snoc,
@@ -1066,7 +1066,7 @@ static struct qcom_icc_node *qcs404_snoc_nodes[] = {
[SLAVE_LPASS] = &slv_lpass,
};
-static struct qcom_icc_desc qcs404_snoc = {
+static const struct qcom_icc_desc qcs404_snoc = {
.nodes = qcs404_snoc_nodes,
.num_nodes = ARRAY_SIZE(qcs404_snoc_nodes),
};
diff --git a/drivers/interconnect/qcom/sc7180.c b/drivers/interconnect/qcom/sc7180.c
index 5f7c0f85fa8e..35cd448efdfb 100644
--- a/drivers/interconnect/qcom/sc7180.c
+++ b/drivers/interconnect/qcom/sc7180.c
@@ -178,11 +178,11 @@ DEFINE_QBCM(bcm_sn7, "SN7", false, &qnm_aggre1_noc);
DEFINE_QBCM(bcm_sn9, "SN9", false, &qnm_aggre2_noc);
DEFINE_QBCM(bcm_sn12, "SN12", false, &qnm_gemnoc);
-static struct qcom_icc_bcm *aggre1_noc_bcms[] = {
+static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
&bcm_cn1,
};
-static struct qcom_icc_node *aggre1_noc_nodes[] = {
+static struct qcom_icc_node * const aggre1_noc_nodes[] = {
[MASTER_A1NOC_CFG] = &qhm_a1noc_cfg,
[MASTER_QSPI] = &qhm_qspi,
[MASTER_QUP_0] = &qhm_qup_0,
@@ -193,18 +193,18 @@ static struct qcom_icc_node *aggre1_noc_nodes[] = {
[SLAVE_SERVICE_A1NOC] = &srvc_aggre1_noc,
};
-static struct qcom_icc_desc sc7180_aggre1_noc = {
+static const struct qcom_icc_desc sc7180_aggre1_noc = {
.nodes = aggre1_noc_nodes,
.num_nodes = ARRAY_SIZE(aggre1_noc_nodes),
.bcms = aggre1_noc_bcms,
.num_bcms = ARRAY_SIZE(aggre1_noc_bcms),
};
-static struct qcom_icc_bcm *aggre2_noc_bcms[] = {
+static struct qcom_icc_bcm * const aggre2_noc_bcms[] = {
&bcm_ce0,
};
-static struct qcom_icc_node *aggre2_noc_nodes[] = {
+static struct qcom_icc_node * const aggre2_noc_nodes[] = {
[MASTER_A2NOC_CFG] = &qhm_a2noc_cfg,
[MASTER_QDSS_BAM] = &qhm_qdss_bam,
[MASTER_QUP_1] = &qhm_qup_1,
@@ -216,56 +216,56 @@ static struct qcom_icc_node *aggre2_noc_nodes[] = {
[SLAVE_SERVICE_A2NOC] = &srvc_aggre2_noc,
};
-static struct qcom_icc_desc sc7180_aggre2_noc = {
+static const struct qcom_icc_desc sc7180_aggre2_noc = {
.nodes = aggre2_noc_nodes,
.num_nodes = ARRAY_SIZE(aggre2_noc_nodes),
.bcms = aggre2_noc_bcms,
.num_bcms = ARRAY_SIZE(aggre2_noc_bcms),
};
-static struct qcom_icc_bcm *camnoc_virt_bcms[] = {
+static struct qcom_icc_bcm * const camnoc_virt_bcms[] = {
&bcm_mm1,
};
-static struct qcom_icc_node *camnoc_virt_nodes[] = {
+static struct qcom_icc_node * const camnoc_virt_nodes[] = {
[MASTER_CAMNOC_HF0_UNCOMP] = &qxm_camnoc_hf0_uncomp,
[MASTER_CAMNOC_HF1_UNCOMP] = &qxm_camnoc_hf1_uncomp,
[MASTER_CAMNOC_SF_UNCOMP] = &qxm_camnoc_sf_uncomp,
[SLAVE_CAMNOC_UNCOMP] = &qns_camnoc_uncomp,
};
-static struct qcom_icc_desc sc7180_camnoc_virt = {
+static const struct qcom_icc_desc sc7180_camnoc_virt = {
.nodes = camnoc_virt_nodes,
.num_nodes = ARRAY_SIZE(camnoc_virt_nodes),
.bcms = camnoc_virt_bcms,
.num_bcms = ARRAY_SIZE(camnoc_virt_bcms),
};
-static struct qcom_icc_bcm *compute_noc_bcms[] = {
+static struct qcom_icc_bcm * const compute_noc_bcms[] = {
&bcm_co0,
&bcm_co2,
&bcm_co3,
};
-static struct qcom_icc_node *compute_noc_nodes[] = {
+static struct qcom_icc_node * const compute_noc_nodes[] = {
[MASTER_NPU] = &qnm_npu,
[MASTER_NPU_PROC] = &qxm_npu_dsp,
[SLAVE_CDSP_GEM_NOC] = &qns_cdsp_gemnoc,
};
-static struct qcom_icc_desc sc7180_compute_noc = {
+static const struct qcom_icc_desc sc7180_compute_noc = {
.nodes = compute_noc_nodes,
.num_nodes = ARRAY_SIZE(compute_noc_nodes),
.bcms = compute_noc_bcms,
.num_bcms = ARRAY_SIZE(compute_noc_bcms),
};
-static struct qcom_icc_bcm *config_noc_bcms[] = {
+static struct qcom_icc_bcm * const config_noc_bcms[] = {
&bcm_cn0,
&bcm_cn1,
};
-static struct qcom_icc_node *config_noc_nodes[] = {
+static struct qcom_icc_node * const config_noc_nodes[] = {
[MASTER_SNOC_CNOC] = &qnm_snoc,
[MASTER_QDSS_DAP] = &xm_qdss_dap,
[SLAVE_A1NOC_CFG] = &qhs_a1_noc_cfg,
@@ -321,32 +321,32 @@ static struct qcom_icc_node *config_noc_nodes[] = {
[SLAVE_SERVICE_CNOC] = &srvc_cnoc,
};
-static struct qcom_icc_desc sc7180_config_noc = {
+static const struct qcom_icc_desc sc7180_config_noc = {
.nodes = config_noc_nodes,
.num_nodes = ARRAY_SIZE(config_noc_nodes),
.bcms = config_noc_bcms,
.num_bcms = ARRAY_SIZE(config_noc_bcms),
};
-static struct qcom_icc_node *dc_noc_nodes[] = {
+static struct qcom_icc_node * const dc_noc_nodes[] = {
[MASTER_CNOC_DC_NOC] = &qhm_cnoc_dc_noc,
[SLAVE_GEM_NOC_CFG] = &qhs_gemnoc,
[SLAVE_LLCC_CFG] = &qhs_llcc,
};
-static struct qcom_icc_desc sc7180_dc_noc = {
+static const struct qcom_icc_desc sc7180_dc_noc = {
.nodes = dc_noc_nodes,
.num_nodes = ARRAY_SIZE(dc_noc_nodes),
};
-static struct qcom_icc_bcm *gem_noc_bcms[] = {
+static struct qcom_icc_bcm * const gem_noc_bcms[] = {
&bcm_sh0,
&bcm_sh2,
&bcm_sh3,
&bcm_sh4,
};
-static struct qcom_icc_node *gem_noc_nodes[] = {
+static struct qcom_icc_node * const gem_noc_nodes[] = {
[MASTER_APPSS_PROC] = &acm_apps0,
[MASTER_SYS_TCU] = &acm_sys_tcu,
[MASTER_GEM_NOC_CFG] = &qhm_gemnoc_cfg,
@@ -362,7 +362,7 @@ static struct qcom_icc_node *gem_noc_nodes[] = {
[SLAVE_SERVICE_GEM_NOC] = &srvc_gemnoc,
};
-static struct qcom_icc_desc sc7180_gem_noc = {
+static const struct qcom_icc_desc sc7180_gem_noc = {
.nodes = gem_noc_nodes,
.num_nodes = ARRAY_SIZE(gem_noc_nodes),
.bcms = gem_noc_bcms,
@@ -374,25 +374,25 @@ static struct qcom_icc_bcm *mc_virt_bcms[] = {
&bcm_mc0,
};
-static struct qcom_icc_node *mc_virt_nodes[] = {
+static struct qcom_icc_node * const mc_virt_nodes[] = {
[MASTER_LLCC] = &llcc_mc,
[SLAVE_EBI1] = &ebi,
};
-static struct qcom_icc_desc sc7180_mc_virt = {
+static const struct qcom_icc_desc sc7180_mc_virt = {
.nodes = mc_virt_nodes,
.num_nodes = ARRAY_SIZE(mc_virt_nodes),
.bcms = mc_virt_bcms,
.num_bcms = ARRAY_SIZE(mc_virt_bcms),
};
-static struct qcom_icc_bcm *mmss_noc_bcms[] = {
+static struct qcom_icc_bcm * const mmss_noc_bcms[] = {
&bcm_mm0,
&bcm_mm1,
&bcm_mm2,
};
-static struct qcom_icc_node *mmss_noc_nodes[] = {
+static struct qcom_icc_node * const mmss_noc_nodes[] = {
[MASTER_CNOC_MNOC_CFG] = &qhm_mnoc_cfg,
[MASTER_CAMNOC_HF0] = &qxm_camnoc_hf0,
[MASTER_CAMNOC_HF1] = &qxm_camnoc_hf1,
@@ -406,14 +406,14 @@ static struct qcom_icc_node *mmss_noc_nodes[] = {
[SLAVE_SERVICE_MNOC] = &srvc_mnoc,
};
-static struct qcom_icc_desc sc7180_mmss_noc = {
+static const struct qcom_icc_desc sc7180_mmss_noc = {
.nodes = mmss_noc_nodes,
.num_nodes = ARRAY_SIZE(mmss_noc_nodes),
.bcms = mmss_noc_bcms,
.num_bcms = ARRAY_SIZE(mmss_noc_bcms),
};
-static struct qcom_icc_node *npu_noc_nodes[] = {
+static struct qcom_icc_node * const npu_noc_nodes[] = {
[MASTER_NPU_SYS] = &amm_npu_sys,
[MASTER_NPU_NOC_CFG] = &qhm_npu_cfg,
[SLAVE_NPU_CAL_DP0] = &qhs_cal_dp0,
@@ -427,30 +427,30 @@ static struct qcom_icc_node *npu_noc_nodes[] = {
[SLAVE_SERVICE_NPU_NOC] = &srvc_noc,
};
-static struct qcom_icc_desc sc7180_npu_noc = {
+static const struct qcom_icc_desc sc7180_npu_noc = {
.nodes = npu_noc_nodes,
.num_nodes = ARRAY_SIZE(npu_noc_nodes),
};
-static struct qcom_icc_bcm *qup_virt_bcms[] = {
+static struct qcom_icc_bcm * const qup_virt_bcms[] = {
&bcm_qup0,
};
-static struct qcom_icc_node *qup_virt_nodes[] = {
+static struct qcom_icc_node * const qup_virt_nodes[] = {
[MASTER_QUP_CORE_0] = &qup_core_master_1,
[MASTER_QUP_CORE_1] = &qup_core_master_2,
[SLAVE_QUP_CORE_0] = &qup_core_slave_1,
[SLAVE_QUP_CORE_1] = &qup_core_slave_2,
};
-static struct qcom_icc_desc sc7180_qup_virt = {
+static const struct qcom_icc_desc sc7180_qup_virt = {
.nodes = qup_virt_nodes,
.num_nodes = ARRAY_SIZE(qup_virt_nodes),
.bcms = qup_virt_bcms,
.num_bcms = ARRAY_SIZE(qup_virt_bcms),
};
-static struct qcom_icc_bcm *system_noc_bcms[] = {
+static struct qcom_icc_bcm * const system_noc_bcms[] = {
&bcm_sn0,
&bcm_sn1,
&bcm_sn2,
@@ -461,7 +461,7 @@ static struct qcom_icc_bcm *system_noc_bcms[] = {
&bcm_sn12,
};
-static struct qcom_icc_node *system_noc_nodes[] = {
+static struct qcom_icc_node * const system_noc_nodes[] = {
[MASTER_SNOC_CFG] = &qhm_snoc_cfg,
[MASTER_A1NOC_SNOC] = &qnm_aggre1_noc,
[MASTER_A2NOC_SNOC] = &qnm_aggre2_noc,
@@ -478,7 +478,7 @@ static struct qcom_icc_node *system_noc_nodes[] = {
[SLAVE_TCU] = &xs_sys_tcu_cfg,
};
-static struct qcom_icc_desc sc7180_system_noc = {
+static const struct qcom_icc_desc sc7180_system_noc = {
.nodes = system_noc_nodes,
.num_nodes = ARRAY_SIZE(system_noc_nodes),
.bcms = system_noc_bcms,
diff --git a/drivers/interconnect/qcom/sc7280.c b/drivers/interconnect/qcom/sc7280.c
index f8b34f6cbb0d..971f538bc98a 100644
--- a/drivers/interconnect/qcom/sc7280.c
+++ b/drivers/interconnect/qcom/sc7280.c
@@ -1476,13 +1476,13 @@ static struct qcom_icc_bcm bcm_sn14 = {
.nodes = { &qns_pcie_mem_noc },
};
-static struct qcom_icc_bcm *aggre1_noc_bcms[] = {
+static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
&bcm_sn5,
&bcm_sn6,
&bcm_sn14,
};
-static struct qcom_icc_node *aggre1_noc_nodes[] = {
+static struct qcom_icc_node * const aggre1_noc_nodes[] = {
[MASTER_QSPI_0] = &qhm_qspi,
[MASTER_QUP_0] = &qhm_qup0,
[MASTER_QUP_1] = &qhm_qup1,
@@ -1500,18 +1500,18 @@ static struct qcom_icc_node *aggre1_noc_nodes[] = {
[SLAVE_SERVICE_A1NOC] = &srvc_aggre1_noc,
};
-static struct qcom_icc_desc sc7280_aggre1_noc = {
+static const struct qcom_icc_desc sc7280_aggre1_noc = {
.nodes = aggre1_noc_nodes,
.num_nodes = ARRAY_SIZE(aggre1_noc_nodes),
.bcms = aggre1_noc_bcms,
.num_bcms = ARRAY_SIZE(aggre1_noc_bcms),
};
-static struct qcom_icc_bcm *aggre2_noc_bcms[] = {
+static struct qcom_icc_bcm * const aggre2_noc_bcms[] = {
&bcm_ce0,
};
-static struct qcom_icc_node *aggre2_noc_nodes[] = {
+static struct qcom_icc_node * const aggre2_noc_nodes[] = {
[MASTER_QDSS_BAM] = &qhm_qdss_bam,
[MASTER_A2NOC_CFG] = &qnm_a2noc_cfg,
[MASTER_CNOC_A2NOC] = &qnm_cnoc_datapath,
@@ -1522,38 +1522,38 @@ static struct qcom_icc_node *aggre2_noc_nodes[] = {
[SLAVE_SERVICE_A2NOC] = &srvc_aggre2_noc,
};
-static struct qcom_icc_desc sc7280_aggre2_noc = {
+static const struct qcom_icc_desc sc7280_aggre2_noc = {
.nodes = aggre2_noc_nodes,
.num_nodes = ARRAY_SIZE(aggre2_noc_nodes),
.bcms = aggre2_noc_bcms,
.num_bcms = ARRAY_SIZE(aggre2_noc_bcms),
};
-static struct qcom_icc_bcm *clk_virt_bcms[] = {
+static struct qcom_icc_bcm * const clk_virt_bcms[] = {
&bcm_qup0,
&bcm_qup1,
};
-static struct qcom_icc_node *clk_virt_nodes[] = {
+static struct qcom_icc_node * const clk_virt_nodes[] = {
[MASTER_QUP_CORE_0] = &qup0_core_master,
[MASTER_QUP_CORE_1] = &qup1_core_master,
[SLAVE_QUP_CORE_0] = &qup0_core_slave,
[SLAVE_QUP_CORE_1] = &qup1_core_slave,
};
-static struct qcom_icc_desc sc7280_clk_virt = {
+static const struct qcom_icc_desc sc7280_clk_virt = {
.nodes = clk_virt_nodes,
.num_nodes = ARRAY_SIZE(clk_virt_nodes),
.bcms = clk_virt_bcms,
.num_bcms = ARRAY_SIZE(clk_virt_bcms),
};
-static struct qcom_icc_bcm *cnoc2_bcms[] = {
+static struct qcom_icc_bcm * const cnoc2_bcms[] = {
&bcm_cn1,
&bcm_cn2,
};
-static struct qcom_icc_node *cnoc2_nodes[] = {
+static struct qcom_icc_node * const cnoc2_nodes[] = {
[MASTER_CNOC3_CNOC2] = &qnm_cnoc3_cnoc2,
[MASTER_QDSS_DAP] = &xm_qdss_dap,
[SLAVE_AHB2PHY_SOUTH] = &qhs_ahb2phy0,
@@ -1603,21 +1603,21 @@ static struct qcom_icc_node *cnoc2_nodes[] = {
[SLAVE_SNOC_CFG] = &qns_snoc_cfg,
};
-static struct qcom_icc_desc sc7280_cnoc2 = {
+static const struct qcom_icc_desc sc7280_cnoc2 = {
.nodes = cnoc2_nodes,
.num_nodes = ARRAY_SIZE(cnoc2_nodes),
.bcms = cnoc2_bcms,
.num_bcms = ARRAY_SIZE(cnoc2_bcms),
};
-static struct qcom_icc_bcm *cnoc3_bcms[] = {
+static struct qcom_icc_bcm * const cnoc3_bcms[] = {
&bcm_cn0,
&bcm_cn1,
&bcm_sn3,
&bcm_sn4,
};
-static struct qcom_icc_node *cnoc3_nodes[] = {
+static struct qcom_icc_node * const cnoc3_nodes[] = {
[MASTER_CNOC2_CNOC3] = &qnm_cnoc2_cnoc3,
[MASTER_GEM_NOC_CNOC] = &qnm_gemnoc_cnoc,
[MASTER_GEM_NOC_PCIE_SNOC] = &qnm_gemnoc_pcie,
@@ -1635,37 +1635,37 @@ static struct qcom_icc_node *cnoc3_nodes[] = {
[SLAVE_TCU] = &xs_sys_tcu_cfg,
};
-static struct qcom_icc_desc sc7280_cnoc3 = {
+static const struct qcom_icc_desc sc7280_cnoc3 = {
.nodes = cnoc3_nodes,
.num_nodes = ARRAY_SIZE(cnoc3_nodes),
.bcms = cnoc3_bcms,
.num_bcms = ARRAY_SIZE(cnoc3_bcms),
};
-static struct qcom_icc_bcm *dc_noc_bcms[] = {
+static struct qcom_icc_bcm * const dc_noc_bcms[] = {
};
-static struct qcom_icc_node *dc_noc_nodes[] = {
+static struct qcom_icc_node * const dc_noc_nodes[] = {
[MASTER_CNOC_DC_NOC] = &qnm_cnoc_dc_noc,
[SLAVE_LLCC_CFG] = &qhs_llcc,
[SLAVE_GEM_NOC_CFG] = &qns_gemnoc,
};
-static struct qcom_icc_desc sc7280_dc_noc = {
+static const struct qcom_icc_desc sc7280_dc_noc = {
.nodes = dc_noc_nodes,
.num_nodes = ARRAY_SIZE(dc_noc_nodes),
.bcms = dc_noc_bcms,
.num_bcms = ARRAY_SIZE(dc_noc_bcms),
};
-static struct qcom_icc_bcm *gem_noc_bcms[] = {
+static struct qcom_icc_bcm * const gem_noc_bcms[] = {
&bcm_sh0,
&bcm_sh2,
&bcm_sh3,
&bcm_sh4,
};
-static struct qcom_icc_node *gem_noc_nodes[] = {
+static struct qcom_icc_node * const gem_noc_nodes[] = {
[MASTER_GPU_TCU] = &alm_gpu_tcu,
[MASTER_SYS_TCU] = &alm_sys_tcu,
[MASTER_APPSS_PROC] = &chm_apps,
@@ -1687,17 +1687,17 @@ static struct qcom_icc_node *gem_noc_nodes[] = {
[SLAVE_SERVICE_GEM_NOC] = &srvc_sys_gemnoc,
};
-static struct qcom_icc_desc sc7280_gem_noc = {
+static const struct qcom_icc_desc sc7280_gem_noc = {
.nodes = gem_noc_nodes,
.num_nodes = ARRAY_SIZE(gem_noc_nodes),
.bcms = gem_noc_bcms,
.num_bcms = ARRAY_SIZE(gem_noc_bcms),
};
-static struct qcom_icc_bcm *lpass_ag_noc_bcms[] = {
+static struct qcom_icc_bcm * const lpass_ag_noc_bcms[] = {
};
-static struct qcom_icc_node *lpass_ag_noc_nodes[] = {
+static struct qcom_icc_node * const lpass_ag_noc_nodes[] = {
[MASTER_CNOC_LPASS_AG_NOC] = &qhm_config_noc,
[SLAVE_LPASS_CORE_CFG] = &qhs_lpass_core,
[SLAVE_LPASS_LPI_CFG] = &qhs_lpass_lpi,
@@ -1707,38 +1707,38 @@ static struct qcom_icc_node *lpass_ag_noc_nodes[] = {
[SLAVE_SERVICE_LPASS_AG_NOC] = &srvc_niu_lpass_agnoc,
};
-static struct qcom_icc_desc sc7280_lpass_ag_noc = {
+static const struct qcom_icc_desc sc7280_lpass_ag_noc = {
.nodes = lpass_ag_noc_nodes,
.num_nodes = ARRAY_SIZE(lpass_ag_noc_nodes),
.bcms = lpass_ag_noc_bcms,
.num_bcms = ARRAY_SIZE(lpass_ag_noc_bcms),
};
-static struct qcom_icc_bcm *mc_virt_bcms[] = {
+static struct qcom_icc_bcm * const mc_virt_bcms[] = {
&bcm_acv,
&bcm_mc0,
};
-static struct qcom_icc_node *mc_virt_nodes[] = {
+static struct qcom_icc_node * const mc_virt_nodes[] = {
[MASTER_LLCC] = &llcc_mc,
[SLAVE_EBI1] = &ebi,
};
-static struct qcom_icc_desc sc7280_mc_virt = {
+static const struct qcom_icc_desc sc7280_mc_virt = {
.nodes = mc_virt_nodes,
.num_nodes = ARRAY_SIZE(mc_virt_nodes),
.bcms = mc_virt_bcms,
.num_bcms = ARRAY_SIZE(mc_virt_bcms),
};
-static struct qcom_icc_bcm *mmss_noc_bcms[] = {
+static struct qcom_icc_bcm * const mmss_noc_bcms[] = {
&bcm_mm0,
&bcm_mm1,
&bcm_mm4,
&bcm_mm5,
};
-static struct qcom_icc_node *mmss_noc_nodes[] = {
+static struct qcom_icc_node * const mmss_noc_nodes[] = {
[MASTER_CNOC_MNOC_CFG] = &qnm_mnoc_cfg,
[MASTER_VIDEO_P0] = &qnm_video0,
[MASTER_VIDEO_PROC] = &qnm_video_cpu,
@@ -1751,40 +1751,40 @@ static struct qcom_icc_node *mmss_noc_nodes[] = {
[SLAVE_SERVICE_MNOC] = &srvc_mnoc,
};
-static struct qcom_icc_desc sc7280_mmss_noc = {
+static const struct qcom_icc_desc sc7280_mmss_noc = {
.nodes = mmss_noc_nodes,
.num_nodes = ARRAY_SIZE(mmss_noc_nodes),
.bcms = mmss_noc_bcms,
.num_bcms = ARRAY_SIZE(mmss_noc_bcms),
};
-static struct qcom_icc_bcm *nsp_noc_bcms[] = {
+static struct qcom_icc_bcm * const nsp_noc_bcms[] = {
&bcm_co0,
&bcm_co3,
};
-static struct qcom_icc_node *nsp_noc_nodes[] = {
+static struct qcom_icc_node * const nsp_noc_nodes[] = {
[MASTER_CDSP_NOC_CFG] = &qhm_nsp_noc_config,
[MASTER_CDSP_PROC] = &qxm_nsp,
[SLAVE_CDSP_MEM_NOC] = &qns_nsp_gemnoc,
[SLAVE_SERVICE_NSP_NOC] = &service_nsp_noc,
};
-static struct qcom_icc_desc sc7280_nsp_noc = {
+static const struct qcom_icc_desc sc7280_nsp_noc = {
.nodes = nsp_noc_nodes,
.num_nodes = ARRAY_SIZE(nsp_noc_nodes),
.bcms = nsp_noc_bcms,
.num_bcms = ARRAY_SIZE(nsp_noc_bcms),
};
-static struct qcom_icc_bcm *system_noc_bcms[] = {
+static struct qcom_icc_bcm * const system_noc_bcms[] = {
&bcm_sn0,
&bcm_sn2,
&bcm_sn7,
&bcm_sn8,
};
-static struct qcom_icc_node *system_noc_nodes[] = {
+static struct qcom_icc_node * const system_noc_nodes[] = {
[MASTER_A1NOC_SNOC] = &qnm_aggre1_noc,
[MASTER_A2NOC_SNOC] = &qnm_aggre2_noc,
[MASTER_SNOC_CFG] = &qnm_snoc_cfg,
@@ -1795,7 +1795,7 @@ static struct qcom_icc_node *system_noc_nodes[] = {
[SLAVE_SERVICE_SNOC] = &srvc_snoc,
};
-static struct qcom_icc_desc sc7280_system_noc = {
+static const struct qcom_icc_desc sc7280_system_noc = {
.nodes = system_noc_nodes,
.num_nodes = ARRAY_SIZE(system_noc_nodes),
.bcms = system_noc_bcms,
diff --git a/drivers/interconnect/qcom/sc8180x.c b/drivers/interconnect/qcom/sc8180x.c
index e9adf05b9330..8e32ca958824 100644
--- a/drivers/interconnect/qcom/sc8180x.c
+++ b/drivers/interconnect/qcom/sc8180x.c
@@ -15,229 +15,1611 @@
#include "icc-rpmh.h"
#include "sc8180x.h"
-DEFINE_QNODE(mas_qhm_a1noc_cfg, SC8180X_MASTER_A1NOC_CFG, 1, 4, SC8180X_SLAVE_SERVICE_A1NOC);
-DEFINE_QNODE(mas_xm_ufs_card, SC8180X_MASTER_UFS_CARD, 1, 8, SC8180X_A1NOC_SNOC_SLV);
-DEFINE_QNODE(mas_xm_ufs_g4, SC8180X_MASTER_UFS_GEN4, 1, 8, SC8180X_A1NOC_SNOC_SLV);
-DEFINE_QNODE(mas_xm_ufs_mem, SC8180X_MASTER_UFS_MEM, 1, 8, SC8180X_A1NOC_SNOC_SLV);
-DEFINE_QNODE(mas_xm_usb3_0, SC8180X_MASTER_USB3, 1, 8, SC8180X_A1NOC_SNOC_SLV);
-DEFINE_QNODE(mas_xm_usb3_1, SC8180X_MASTER_USB3_1, 1, 8, SC8180X_A1NOC_SNOC_SLV);
-DEFINE_QNODE(mas_xm_usb3_2, SC8180X_MASTER_USB3_2, 1, 16, SC8180X_A1NOC_SNOC_SLV);
-DEFINE_QNODE(mas_qhm_a2noc_cfg, SC8180X_MASTER_A2NOC_CFG, 1, 4, SC8180X_SLAVE_SERVICE_A2NOC);
-DEFINE_QNODE(mas_qhm_qdss_bam, SC8180X_MASTER_QDSS_BAM, 1, 4, SC8180X_A2NOC_SNOC_SLV);
-DEFINE_QNODE(mas_qhm_qspi, SC8180X_MASTER_QSPI_0, 1, 4, SC8180X_A2NOC_SNOC_SLV);
-DEFINE_QNODE(mas_qhm_qspi1, SC8180X_MASTER_QSPI_1, 1, 4, SC8180X_A2NOC_SNOC_SLV);
-DEFINE_QNODE(mas_qhm_qup0, SC8180X_MASTER_QUP_0, 1, 4, SC8180X_A2NOC_SNOC_SLV);
-DEFINE_QNODE(mas_qhm_qup1, SC8180X_MASTER_QUP_1, 1, 4, SC8180X_A2NOC_SNOC_SLV);
-DEFINE_QNODE(mas_qhm_qup2, SC8180X_MASTER_QUP_2, 1, 4, SC8180X_A2NOC_SNOC_SLV);
-DEFINE_QNODE(mas_qhm_sensorss_ahb, SC8180X_MASTER_SENSORS_AHB, 1, 4, SC8180X_A2NOC_SNOC_SLV);
-DEFINE_QNODE(mas_qxm_crypto, SC8180X_MASTER_CRYPTO_CORE_0, 1, 8, SC8180X_A2NOC_SNOC_SLV);
-DEFINE_QNODE(mas_qxm_ipa, SC8180X_MASTER_IPA, 1, 8, SC8180X_A2NOC_SNOC_SLV);
-DEFINE_QNODE(mas_xm_emac, SC8180X_MASTER_EMAC, 1, 8, SC8180X_A2NOC_SNOC_SLV);
-DEFINE_QNODE(mas_xm_pcie3_0, SC8180X_MASTER_PCIE, 1, 8, SC8180X_SLAVE_ANOC_PCIE_GEM_NOC);
-DEFINE_QNODE(mas_xm_pcie3_1, SC8180X_MASTER_PCIE_1, 1, 16, SC8180X_SLAVE_ANOC_PCIE_GEM_NOC);
-DEFINE_QNODE(mas_xm_pcie3_2, SC8180X_MASTER_PCIE_2, 1, 8, SC8180X_SLAVE_ANOC_PCIE_GEM_NOC);
-DEFINE_QNODE(mas_xm_pcie3_3, SC8180X_MASTER_PCIE_3, 1, 16, SC8180X_SLAVE_ANOC_PCIE_GEM_NOC);
-DEFINE_QNODE(mas_xm_qdss_etr, SC8180X_MASTER_QDSS_ETR, 1, 8, SC8180X_A2NOC_SNOC_SLV);
-DEFINE_QNODE(mas_xm_sdc2, SC8180X_MASTER_SDCC_2, 1, 8, SC8180X_A2NOC_SNOC_SLV);
-DEFINE_QNODE(mas_xm_sdc4, SC8180X_MASTER_SDCC_4, 1, 8, SC8180X_A2NOC_SNOC_SLV);
-DEFINE_QNODE(mas_qxm_camnoc_hf0_uncomp, SC8180X_MASTER_CAMNOC_HF0_UNCOMP, 1, 32, SC8180X_SLAVE_CAMNOC_UNCOMP);
-DEFINE_QNODE(mas_qxm_camnoc_hf1_uncomp, SC8180X_MASTER_CAMNOC_HF1_UNCOMP, 1, 32, SC8180X_SLAVE_CAMNOC_UNCOMP);
-DEFINE_QNODE(mas_qxm_camnoc_sf_uncomp, SC8180X_MASTER_CAMNOC_SF_UNCOMP, 1, 32, SC8180X_SLAVE_CAMNOC_UNCOMP);
-DEFINE_QNODE(mas_qnm_npu, SC8180X_MASTER_NPU, 1, 32, SC8180X_SLAVE_CDSP_MEM_NOC);
-DEFINE_QNODE(mas_qnm_snoc, SC8180X_SNOC_CNOC_MAS, 1, 8, SC8180X_SLAVE_TLMM_SOUTH, SC8180X_SLAVE_CDSP_CFG, SC8180X_SLAVE_SPSS_CFG, SC8180X_SLAVE_CAMERA_CFG, SC8180X_SLAVE_SDCC_4, SC8180X_SLAVE_AHB2PHY_CENTER, SC8180X_SLAVE_SDCC_2, SC8180X_SLAVE_PCIE_2_CFG, SC8180X_SLAVE_CNOC_MNOC_CFG, SC8180X_SLAVE_EMAC_CFG, SC8180X_SLAVE_QSPI_0, SC8180X_SLAVE_QSPI_1, SC8180X_SLAVE_TLMM_EAST, SC8180X_SLAVE_SNOC_CFG, SC8180X_SLAVE_AHB2PHY_EAST, SC8180X_SLAVE_GLM, SC8180X_SLAVE_PDM, SC8180X_SLAVE_PCIE_1_CFG, SC8180X_SLAVE_A2NOC_CFG, SC8180X_SLAVE_QDSS_CFG, SC8180X_SLAVE_DISPLAY_CFG, SC8180X_SLAVE_TCSR, SC8180X_SLAVE_UFS_MEM_0_CFG, SC8180X_SLAVE_CNOC_DDRSS, SC8180X_SLAVE_PCIE_0_CFG, SC8180X_SLAVE_QUP_1, SC8180X_SLAVE_QUP_2, SC8180X_SLAVE_NPU_CFG, SC8180X_SLAVE_CRYPTO_0_CFG, SC8180X_SLAVE_GRAPHICS_3D_CFG, SC8180X_SLAVE_VENUS_CFG, SC8180X_SLAVE_TSIF, SC8180X_SLAVE_IPA_CFG, SC8180X_SLAVE_CLK_CTL, SC8180X_SLAVE_SECURITY, SC8180X_SLAVE_AOP, SC8180X_SLAVE_AHB2PHY_WEST, SC8180X_SLAVE_AHB2PHY_SOUTH, SC8180X_SLAVE_SERVICE_CNOC, SC8180X_SLAVE_UFS_CARD_CFG, SC8180X_SLAVE_USB3_1, SC8180X_SLAVE_USB3_2, SC8180X_SLAVE_PCIE_3_CFG, SC8180X_SLAVE_RBCPR_CX_CFG, SC8180X_SLAVE_TLMM_WEST, SC8180X_SLAVE_A1NOC_CFG, SC8180X_SLAVE_AOSS, SC8180X_SLAVE_PRNG, SC8180X_SLAVE_VSENSE_CTRL_CFG, SC8180X_SLAVE_QUP_0, SC8180X_SLAVE_USB3, SC8180X_SLAVE_RBCPR_MMCX_CFG, SC8180X_SLAVE_PIMEM_CFG, SC8180X_SLAVE_UFS_MEM_1_CFG, SC8180X_SLAVE_RBCPR_MX_CFG, SC8180X_SLAVE_IMEM_CFG);
-DEFINE_QNODE(mas_qhm_cnoc_dc_noc, SC8180X_MASTER_CNOC_DC_NOC, 1, 4, SC8180X_SLAVE_LLCC_CFG, SC8180X_SLAVE_GEM_NOC_CFG);
-DEFINE_QNODE(mas_acm_apps, SC8180X_MASTER_AMPSS_M0, 4, 64, SC8180X_SLAVE_ECC, SC8180X_SLAVE_LLCC, SC8180X_SLAVE_GEM_NOC_SNOC);
-DEFINE_QNODE(mas_acm_gpu_tcu, SC8180X_MASTER_GPU_TCU, 1, 8, SC8180X_SLAVE_LLCC, SC8180X_SLAVE_GEM_NOC_SNOC);
-DEFINE_QNODE(mas_acm_sys_tcu, SC8180X_MASTER_SYS_TCU, 1, 8, SC8180X_SLAVE_LLCC, SC8180X_SLAVE_GEM_NOC_SNOC);
-DEFINE_QNODE(mas_qhm_gemnoc_cfg, SC8180X_MASTER_GEM_NOC_CFG, 1, 4, SC8180X_SLAVE_SERVICE_GEM_NOC_1, SC8180X_SLAVE_SERVICE_GEM_NOC, SC8180X_SLAVE_MSS_PROC_MS_MPU_CFG);
-DEFINE_QNODE(mas_qnm_cmpnoc, SC8180X_MASTER_COMPUTE_NOC, 2, 32, SC8180X_SLAVE_ECC, SC8180X_SLAVE_LLCC, SC8180X_SLAVE_GEM_NOC_SNOC);
-DEFINE_QNODE(mas_qnm_gpu, SC8180X_MASTER_GRAPHICS_3D, 4, 32, SC8180X_SLAVE_LLCC, SC8180X_SLAVE_GEM_NOC_SNOC);
-DEFINE_QNODE(mas_qnm_mnoc_hf, SC8180X_MASTER_MNOC_HF_MEM_NOC, 2, 32, SC8180X_SLAVE_LLCC);
-DEFINE_QNODE(mas_qnm_mnoc_sf, SC8180X_MASTER_MNOC_SF_MEM_NOC, 1, 32, SC8180X_SLAVE_LLCC, SC8180X_SLAVE_GEM_NOC_SNOC);
-DEFINE_QNODE(mas_qnm_pcie, SC8180X_MASTER_GEM_NOC_PCIE_SNOC, 1, 32, SC8180X_SLAVE_LLCC, SC8180X_SLAVE_GEM_NOC_SNOC);
-DEFINE_QNODE(mas_qnm_snoc_gc, SC8180X_MASTER_SNOC_GC_MEM_NOC, 1, 8, SC8180X_SLAVE_LLCC);
-DEFINE_QNODE(mas_qnm_snoc_sf, SC8180X_MASTER_SNOC_SF_MEM_NOC, 1, 32, SC8180X_SLAVE_LLCC);
-DEFINE_QNODE(mas_qxm_ecc, SC8180X_MASTER_ECC, 2, 32, SC8180X_SLAVE_LLCC);
-DEFINE_QNODE(mas_ipa_core_master, SC8180X_MASTER_IPA_CORE, 1, 8, SC8180X_SLAVE_IPA_CORE);
-DEFINE_QNODE(mas_llcc_mc, SC8180X_MASTER_LLCC, 8, 4, SC8180X_SLAVE_EBI_CH0);
-DEFINE_QNODE(mas_qhm_mnoc_cfg, SC8180X_MASTER_CNOC_MNOC_CFG, 1, 4, SC8180X_SLAVE_SERVICE_MNOC);
-DEFINE_QNODE(mas_qxm_camnoc_hf0, SC8180X_MASTER_CAMNOC_HF0, 1, 32, SC8180X_SLAVE_MNOC_HF_MEM_NOC);
-DEFINE_QNODE(mas_qxm_camnoc_hf1, SC8180X_MASTER_CAMNOC_HF1, 1, 32, SC8180X_SLAVE_MNOC_HF_MEM_NOC);
-DEFINE_QNODE(mas_qxm_camnoc_sf, SC8180X_MASTER_CAMNOC_SF, 1, 32, SC8180X_SLAVE_MNOC_SF_MEM_NOC);
-DEFINE_QNODE(mas_qxm_mdp0, SC8180X_MASTER_MDP_PORT0, 1, 32, SC8180X_SLAVE_MNOC_HF_MEM_NOC);
-DEFINE_QNODE(mas_qxm_mdp1, SC8180X_MASTER_MDP_PORT1, 1, 32, SC8180X_SLAVE_MNOC_HF_MEM_NOC);
-DEFINE_QNODE(mas_qxm_rot, SC8180X_MASTER_ROTATOR, 1, 32, SC8180X_SLAVE_MNOC_SF_MEM_NOC);
-DEFINE_QNODE(mas_qxm_venus0, SC8180X_MASTER_VIDEO_P0, 1, 32, SC8180X_SLAVE_MNOC_SF_MEM_NOC);
-DEFINE_QNODE(mas_qxm_venus1, SC8180X_MASTER_VIDEO_P1, 1, 32, SC8180X_SLAVE_MNOC_SF_MEM_NOC);
-DEFINE_QNODE(mas_qxm_venus_arm9, SC8180X_MASTER_VIDEO_PROC, 1, 8, SC8180X_SLAVE_MNOC_SF_MEM_NOC);
-DEFINE_QNODE(mas_qhm_snoc_cfg, SC8180X_MASTER_SNOC_CFG, 1, 4, SC8180X_SLAVE_SERVICE_SNOC);
-DEFINE_QNODE(mas_qnm_aggre1_noc, SC8180X_A1NOC_SNOC_MAS, 1, 32, SC8180X_SLAVE_SNOC_GEM_NOC_SF, SC8180X_SLAVE_PIMEM, SC8180X_SLAVE_OCIMEM, SC8180X_SLAVE_APPSS, SC8180X_SNOC_CNOC_SLV, SC8180X_SLAVE_QDSS_STM);
-DEFINE_QNODE(mas_qnm_aggre2_noc, SC8180X_A2NOC_SNOC_MAS, 1, 16, SC8180X_SLAVE_SNOC_GEM_NOC_SF, SC8180X_SLAVE_PIMEM, SC8180X_SLAVE_PCIE_3, SC8180X_SLAVE_OCIMEM, SC8180X_SLAVE_APPSS, SC8180X_SLAVE_PCIE_2, SC8180X_SNOC_CNOC_SLV, SC8180X_SLAVE_PCIE_0, SC8180X_SLAVE_PCIE_1, SC8180X_SLAVE_TCU, SC8180X_SLAVE_QDSS_STM);
-DEFINE_QNODE(mas_qnm_gemnoc, SC8180X_MASTER_GEM_NOC_SNOC, 1, 8, SC8180X_SLAVE_PIMEM, SC8180X_SLAVE_OCIMEM, SC8180X_SLAVE_APPSS, SC8180X_SNOC_CNOC_SLV, SC8180X_SLAVE_TCU, SC8180X_SLAVE_QDSS_STM);
-DEFINE_QNODE(mas_qxm_pimem, SC8180X_MASTER_PIMEM, 1, 8, SC8180X_SLAVE_SNOC_GEM_NOC_GC, SC8180X_SLAVE_OCIMEM);
-DEFINE_QNODE(mas_xm_gic, SC8180X_MASTER_GIC, 1, 8, SC8180X_SLAVE_SNOC_GEM_NOC_GC, SC8180X_SLAVE_OCIMEM);
-DEFINE_QNODE(slv_qns_a1noc_snoc, SC8180X_A1NOC_SNOC_SLV, 1, 32, SC8180X_A1NOC_SNOC_MAS);
-DEFINE_QNODE(slv_srvc_aggre1_noc, SC8180X_SLAVE_SERVICE_A1NOC, 1, 4);
-DEFINE_QNODE(slv_qns_a2noc_snoc, SC8180X_A2NOC_SNOC_SLV, 1, 16, SC8180X_A2NOC_SNOC_MAS);
-DEFINE_QNODE(slv_qns_pcie_mem_noc, SC8180X_SLAVE_ANOC_PCIE_GEM_NOC, 1, 32, SC8180X_MASTER_GEM_NOC_PCIE_SNOC);
-DEFINE_QNODE(slv_srvc_aggre2_noc, SC8180X_SLAVE_SERVICE_A2NOC, 1, 4);
-DEFINE_QNODE(slv_qns_camnoc_uncomp, SC8180X_SLAVE_CAMNOC_UNCOMP, 1, 32);
-DEFINE_QNODE(slv_qns_cdsp_mem_noc, SC8180X_SLAVE_CDSP_MEM_NOC, 2, 32, SC8180X_MASTER_COMPUTE_NOC);
-DEFINE_QNODE(slv_qhs_a1_noc_cfg, SC8180X_SLAVE_A1NOC_CFG, 1, 4, SC8180X_MASTER_A1NOC_CFG);
-DEFINE_QNODE(slv_qhs_a2_noc_cfg, SC8180X_SLAVE_A2NOC_CFG, 1, 4, SC8180X_MASTER_A2NOC_CFG);
-DEFINE_QNODE(slv_qhs_ahb2phy_refgen_center, SC8180X_SLAVE_AHB2PHY_CENTER, 1, 4);
-DEFINE_QNODE(slv_qhs_ahb2phy_refgen_east, SC8180X_SLAVE_AHB2PHY_EAST, 1, 4);
-DEFINE_QNODE(slv_qhs_ahb2phy_refgen_west, SC8180X_SLAVE_AHB2PHY_WEST, 1, 4);
-DEFINE_QNODE(slv_qhs_ahb2phy_south, SC8180X_SLAVE_AHB2PHY_SOUTH, 1, 4);
-DEFINE_QNODE(slv_qhs_aop, SC8180X_SLAVE_AOP, 1, 4);
-DEFINE_QNODE(slv_qhs_aoss, SC8180X_SLAVE_AOSS, 1, 4);
-DEFINE_QNODE(slv_qhs_camera_cfg, SC8180X_SLAVE_CAMERA_CFG, 1, 4);
-DEFINE_QNODE(slv_qhs_clk_ctl, SC8180X_SLAVE_CLK_CTL, 1, 4);
-DEFINE_QNODE(slv_qhs_compute_dsp, SC8180X_SLAVE_CDSP_CFG, 1, 4);
-DEFINE_QNODE(slv_qhs_cpr_cx, SC8180X_SLAVE_RBCPR_CX_CFG, 1, 4);
-DEFINE_QNODE(slv_qhs_cpr_mmcx, SC8180X_SLAVE_RBCPR_MMCX_CFG, 1, 4);
-DEFINE_QNODE(slv_qhs_cpr_mx, SC8180X_SLAVE_RBCPR_MX_CFG, 1, 4);
-DEFINE_QNODE(slv_qhs_crypto0_cfg, SC8180X_SLAVE_CRYPTO_0_CFG, 1, 4);
-DEFINE_QNODE(slv_qhs_ddrss_cfg, SC8180X_SLAVE_CNOC_DDRSS, 1, 4, SC8180X_MASTER_CNOC_DC_NOC);
-DEFINE_QNODE(slv_qhs_display_cfg, SC8180X_SLAVE_DISPLAY_CFG, 1, 4);
-DEFINE_QNODE(slv_qhs_emac_cfg, SC8180X_SLAVE_EMAC_CFG, 1, 4);
-DEFINE_QNODE(slv_qhs_glm, SC8180X_SLAVE_GLM, 1, 4);
-DEFINE_QNODE(slv_qhs_gpuss_cfg, SC8180X_SLAVE_GRAPHICS_3D_CFG, 1, 8);
-DEFINE_QNODE(slv_qhs_imem_cfg, SC8180X_SLAVE_IMEM_CFG, 1, 4);
-DEFINE_QNODE(slv_qhs_ipa, SC8180X_SLAVE_IPA_CFG, 1, 4);
-DEFINE_QNODE(slv_qhs_mnoc_cfg, SC8180X_SLAVE_CNOC_MNOC_CFG, 1, 4, SC8180X_MASTER_CNOC_MNOC_CFG);
-DEFINE_QNODE(slv_qhs_npu_cfg, SC8180X_SLAVE_NPU_CFG, 1, 4);
-DEFINE_QNODE(slv_qhs_pcie0_cfg, SC8180X_SLAVE_PCIE_0_CFG, 1, 4);
-DEFINE_QNODE(slv_qhs_pcie1_cfg, SC8180X_SLAVE_PCIE_1_CFG, 1, 4);
-DEFINE_QNODE(slv_qhs_pcie2_cfg, SC8180X_SLAVE_PCIE_2_CFG, 1, 4);
-DEFINE_QNODE(slv_qhs_pcie3_cfg, SC8180X_SLAVE_PCIE_3_CFG, 1, 4);
-DEFINE_QNODE(slv_qhs_pdm, SC8180X_SLAVE_PDM, 1, 4);
-DEFINE_QNODE(slv_qhs_pimem_cfg, SC8180X_SLAVE_PIMEM_CFG, 1, 4);
-DEFINE_QNODE(slv_qhs_prng, SC8180X_SLAVE_PRNG, 1, 4);
-DEFINE_QNODE(slv_qhs_qdss_cfg, SC8180X_SLAVE_QDSS_CFG, 1, 4);
-DEFINE_QNODE(slv_qhs_qspi_0, SC8180X_SLAVE_QSPI_0, 1, 4);
-DEFINE_QNODE(slv_qhs_qspi_1, SC8180X_SLAVE_QSPI_1, 1, 4);
-DEFINE_QNODE(slv_qhs_qupv3_east0, SC8180X_SLAVE_QUP_1, 1, 4);
-DEFINE_QNODE(slv_qhs_qupv3_east1, SC8180X_SLAVE_QUP_2, 1, 4);
-DEFINE_QNODE(slv_qhs_qupv3_west, SC8180X_SLAVE_QUP_0, 1, 4);
-DEFINE_QNODE(slv_qhs_sdc2, SC8180X_SLAVE_SDCC_2, 1, 4);
-DEFINE_QNODE(slv_qhs_sdc4, SC8180X_SLAVE_SDCC_4, 1, 4);
-DEFINE_QNODE(slv_qhs_security, SC8180X_SLAVE_SECURITY, 1, 4);
-DEFINE_QNODE(slv_qhs_snoc_cfg, SC8180X_SLAVE_SNOC_CFG, 1, 4, SC8180X_MASTER_SNOC_CFG);
-DEFINE_QNODE(slv_qhs_spss_cfg, SC8180X_SLAVE_SPSS_CFG, 1, 4);
-DEFINE_QNODE(slv_qhs_tcsr, SC8180X_SLAVE_TCSR, 1, 4);
-DEFINE_QNODE(slv_qhs_tlmm_east, SC8180X_SLAVE_TLMM_EAST, 1, 4);
-DEFINE_QNODE(slv_qhs_tlmm_south, SC8180X_SLAVE_TLMM_SOUTH, 1, 4);
-DEFINE_QNODE(slv_qhs_tlmm_west, SC8180X_SLAVE_TLMM_WEST, 1, 4);
-DEFINE_QNODE(slv_qhs_tsif, SC8180X_SLAVE_TSIF, 1, 4);
-DEFINE_QNODE(slv_qhs_ufs_card_cfg, SC8180X_SLAVE_UFS_CARD_CFG, 1, 4);
-DEFINE_QNODE(slv_qhs_ufs_mem0_cfg, SC8180X_SLAVE_UFS_MEM_0_CFG, 1, 4);
-DEFINE_QNODE(slv_qhs_ufs_mem1_cfg, SC8180X_SLAVE_UFS_MEM_1_CFG, 1, 4);
-DEFINE_QNODE(slv_qhs_usb3_0, SC8180X_SLAVE_USB3, 1, 4);
-DEFINE_QNODE(slv_qhs_usb3_1, SC8180X_SLAVE_USB3_1, 1, 4);
-DEFINE_QNODE(slv_qhs_usb3_2, SC8180X_SLAVE_USB3_2, 1, 4);
-DEFINE_QNODE(slv_qhs_venus_cfg, SC8180X_SLAVE_VENUS_CFG, 1, 4);
-DEFINE_QNODE(slv_qhs_vsense_ctrl_cfg, SC8180X_SLAVE_VSENSE_CTRL_CFG, 1, 4);
-DEFINE_QNODE(slv_srvc_cnoc, SC8180X_SLAVE_SERVICE_CNOC, 1, 4);
-DEFINE_QNODE(slv_qhs_gemnoc, SC8180X_SLAVE_GEM_NOC_CFG, 1, 4, SC8180X_MASTER_GEM_NOC_CFG);
-DEFINE_QNODE(slv_qhs_llcc, SC8180X_SLAVE_LLCC_CFG, 1, 4);
-DEFINE_QNODE(slv_qhs_mdsp_ms_mpu_cfg, SC8180X_SLAVE_MSS_PROC_MS_MPU_CFG, 1, 4);
-DEFINE_QNODE(slv_qns_ecc, SC8180X_SLAVE_ECC, 1, 32);
-DEFINE_QNODE(slv_qns_gem_noc_snoc, SC8180X_SLAVE_GEM_NOC_SNOC, 1, 8, SC8180X_MASTER_GEM_NOC_SNOC);
-DEFINE_QNODE(slv_qns_llcc, SC8180X_SLAVE_LLCC, 8, 16, SC8180X_MASTER_LLCC);
-DEFINE_QNODE(slv_srvc_gemnoc, SC8180X_SLAVE_SERVICE_GEM_NOC, 1, 4);
-DEFINE_QNODE(slv_srvc_gemnoc1, SC8180X_SLAVE_SERVICE_GEM_NOC_1, 1, 4);
-DEFINE_QNODE(slv_ipa_core_slave, SC8180X_SLAVE_IPA_CORE, 1, 8);
-DEFINE_QNODE(slv_ebi, SC8180X_SLAVE_EBI_CH0, 8, 4);
-DEFINE_QNODE(slv_qns2_mem_noc, SC8180X_SLAVE_MNOC_SF_MEM_NOC, 1, 32, SC8180X_MASTER_MNOC_SF_MEM_NOC);
-DEFINE_QNODE(slv_qns_mem_noc_hf, SC8180X_SLAVE_MNOC_HF_MEM_NOC, 2, 32, SC8180X_MASTER_MNOC_HF_MEM_NOC);
-DEFINE_QNODE(slv_srvc_mnoc, SC8180X_SLAVE_SERVICE_MNOC, 1, 4);
-DEFINE_QNODE(slv_qhs_apss, SC8180X_SLAVE_APPSS, 1, 8);
-DEFINE_QNODE(slv_qns_cnoc, SC8180X_SNOC_CNOC_SLV, 1, 8, SC8180X_SNOC_CNOC_MAS);
-DEFINE_QNODE(slv_qns_gemnoc_gc, SC8180X_SLAVE_SNOC_GEM_NOC_GC, 1, 8, SC8180X_MASTER_SNOC_GC_MEM_NOC);
-DEFINE_QNODE(slv_qns_gemnoc_sf, SC8180X_SLAVE_SNOC_GEM_NOC_SF, 1, 32, SC8180X_MASTER_SNOC_SF_MEM_NOC);
-DEFINE_QNODE(slv_qxs_imem, SC8180X_SLAVE_OCIMEM, 1, 8);
-DEFINE_QNODE(slv_qxs_pimem, SC8180X_SLAVE_PIMEM, 1, 8);
-DEFINE_QNODE(slv_srvc_snoc, SC8180X_SLAVE_SERVICE_SNOC, 1, 4);
-DEFINE_QNODE(slv_xs_pcie_0, SC8180X_SLAVE_PCIE_0, 1, 8);
-DEFINE_QNODE(slv_xs_pcie_1, SC8180X_SLAVE_PCIE_1, 1, 8);
-DEFINE_QNODE(slv_xs_pcie_2, SC8180X_SLAVE_PCIE_2, 1, 8);
-DEFINE_QNODE(slv_xs_pcie_3, SC8180X_SLAVE_PCIE_3, 1, 8);
-DEFINE_QNODE(slv_xs_qdss_stm, SC8180X_SLAVE_QDSS_STM, 1, 4);
-DEFINE_QNODE(slv_xs_sys_tcu_cfg, SC8180X_SLAVE_TCU, 1, 8);
-
-DEFINE_QBCM(bcm_acv, "ACV", false, &slv_ebi);
-DEFINE_QBCM(bcm_mc0, "MC0", false, &slv_ebi);
-DEFINE_QBCM(bcm_sh0, "SH0", false, &slv_qns_llcc);
-DEFINE_QBCM(bcm_mm0, "MM0", false, &slv_qns_mem_noc_hf);
-DEFINE_QBCM(bcm_co0, "CO0", false, &slv_qns_cdsp_mem_noc);
-DEFINE_QBCM(bcm_ce0, "CE0", false, &mas_qxm_crypto);
-DEFINE_QBCM(bcm_cn0, "CN0", false, &mas_qnm_snoc, &slv_qhs_a1_noc_cfg, &slv_qhs_a2_noc_cfg, &slv_qhs_ahb2phy_refgen_center, &slv_qhs_ahb2phy_refgen_east, &slv_qhs_ahb2phy_refgen_west, &slv_qhs_ahb2phy_south, &slv_qhs_aop, &slv_qhs_aoss, &slv_qhs_camera_cfg, &slv_qhs_clk_ctl, &slv_qhs_compute_dsp, &slv_qhs_cpr_cx, &slv_qhs_cpr_mmcx, &slv_qhs_cpr_mx, &slv_qhs_crypto0_cfg, &slv_qhs_ddrss_cfg, &slv_qhs_display_cfg, &slv_qhs_emac_cfg, &slv_qhs_glm, &slv_qhs_gpuss_cfg, &slv_qhs_imem_cfg, &slv_qhs_ipa, &slv_qhs_mnoc_cfg, &slv_qhs_npu_cfg, &slv_qhs_pcie0_cfg, &slv_qhs_pcie1_cfg, &slv_qhs_pcie2_cfg, &slv_qhs_pcie3_cfg, &slv_qhs_pdm, &slv_qhs_pimem_cfg, &slv_qhs_prng, &slv_qhs_qdss_cfg, &slv_qhs_qspi_0, &slv_qhs_qspi_1, &slv_qhs_qupv3_east0, &slv_qhs_qupv3_east1, &slv_qhs_qupv3_west, &slv_qhs_sdc2, &slv_qhs_sdc4, &slv_qhs_security, &slv_qhs_snoc_cfg, &slv_qhs_spss_cfg, &slv_qhs_tcsr, &slv_qhs_tlmm_east, &slv_qhs_tlmm_south, &slv_qhs_tlmm_west, &slv_qhs_tsif, &slv_qhs_ufs_card_cfg, &slv_qhs_ufs_mem0_cfg, &slv_qhs_ufs_mem1_cfg, &slv_qhs_usb3_0, &slv_qhs_usb3_1, &slv_qhs_usb3_2, &slv_qhs_venus_cfg, &slv_qhs_vsense_ctrl_cfg, &slv_srvc_cnoc);
-DEFINE_QBCM(bcm_mm1, "MM1", false, &mas_qxm_camnoc_hf0_uncomp, &mas_qxm_camnoc_hf1_uncomp, &mas_qxm_camnoc_sf_uncomp, &mas_qxm_camnoc_hf0, &mas_qxm_camnoc_hf1, &mas_qxm_mdp0, &mas_qxm_mdp1);
-DEFINE_QBCM(bcm_qup0, "QUP0", false, &mas_qhm_qup0, &mas_qhm_qup1, &mas_qhm_qup2);
-DEFINE_QBCM(bcm_sh2, "SH2", false, &slv_qns_gem_noc_snoc);
-DEFINE_QBCM(bcm_mm2, "MM2", false, &mas_qxm_camnoc_sf, &mas_qxm_rot, &mas_qxm_venus0, &mas_qxm_venus1, &mas_qxm_venus_arm9, &slv_qns2_mem_noc);
-DEFINE_QBCM(bcm_sh3, "SH3", false, &mas_acm_apps);
-DEFINE_QBCM(bcm_sn0, "SN0", false, &slv_qns_gemnoc_sf);
-DEFINE_QBCM(bcm_sn1, "SN1", false, &slv_qxs_imem);
-DEFINE_QBCM(bcm_sn2, "SN2", false, &slv_qns_gemnoc_gc);
-DEFINE_QBCM(bcm_co2, "CO2", false, &mas_qnm_npu);
-DEFINE_QBCM(bcm_ip0, "IP0", false, &slv_ipa_core_slave);
-DEFINE_QBCM(bcm_sn3, "SN3", false, &slv_srvc_aggre1_noc, &slv_qns_cnoc);
-DEFINE_QBCM(bcm_sn4, "SN4", false, &slv_qxs_pimem);
-DEFINE_QBCM(bcm_sn8, "SN8", false, &slv_xs_pcie_0, &slv_xs_pcie_1, &slv_xs_pcie_2, &slv_xs_pcie_3);
-DEFINE_QBCM(bcm_sn9, "SN9", false, &mas_qnm_aggre1_noc);
-DEFINE_QBCM(bcm_sn11, "SN11", false, &mas_qnm_aggre2_noc);
-DEFINE_QBCM(bcm_sn14, "SN14", false, &slv_qns_pcie_mem_noc);
-DEFINE_QBCM(bcm_sn15, "SN15", false, &mas_qnm_gemnoc);
-
-static struct qcom_icc_bcm *aggre1_noc_bcms[] = {
+static struct qcom_icc_node mas_qhm_a1noc_cfg = {
+ .name = "mas_qhm_a1noc_cfg",
+ .id = SC8180X_MASTER_A1NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8180X_SLAVE_SERVICE_A1NOC }
+};
+
+static struct qcom_icc_node mas_xm_ufs_card = {
+ .name = "mas_xm_ufs_card",
+ .id = SC8180X_MASTER_UFS_CARD,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8180X_A1NOC_SNOC_SLV }
+};
+
+static struct qcom_icc_node mas_xm_ufs_g4 = {
+ .name = "mas_xm_ufs_g4",
+ .id = SC8180X_MASTER_UFS_GEN4,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8180X_A1NOC_SNOC_SLV }
+};
+
+static struct qcom_icc_node mas_xm_ufs_mem = {
+ .name = "mas_xm_ufs_mem",
+ .id = SC8180X_MASTER_UFS_MEM,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8180X_A1NOC_SNOC_SLV }
+};
+
+static struct qcom_icc_node mas_xm_usb3_0 = {
+ .name = "mas_xm_usb3_0",
+ .id = SC8180X_MASTER_USB3,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8180X_A1NOC_SNOC_SLV }
+};
+
+static struct qcom_icc_node mas_xm_usb3_1 = {
+ .name = "mas_xm_usb3_1",
+ .id = SC8180X_MASTER_USB3_1,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8180X_A1NOC_SNOC_SLV }
+};
+
+static struct qcom_icc_node mas_xm_usb3_2 = {
+ .name = "mas_xm_usb3_2",
+ .id = SC8180X_MASTER_USB3_2,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SC8180X_A1NOC_SNOC_SLV }
+};
+
+static struct qcom_icc_node mas_qhm_a2noc_cfg = {
+ .name = "mas_qhm_a2noc_cfg",
+ .id = SC8180X_MASTER_A2NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8180X_SLAVE_SERVICE_A2NOC }
+};
+
+static struct qcom_icc_node mas_qhm_qdss_bam = {
+ .name = "mas_qhm_qdss_bam",
+ .id = SC8180X_MASTER_QDSS_BAM,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8180X_A2NOC_SNOC_SLV }
+};
+
+static struct qcom_icc_node mas_qhm_qspi = {
+ .name = "mas_qhm_qspi",
+ .id = SC8180X_MASTER_QSPI_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8180X_A2NOC_SNOC_SLV }
+};
+
+static struct qcom_icc_node mas_qhm_qspi1 = {
+ .name = "mas_qhm_qspi1",
+ .id = SC8180X_MASTER_QSPI_1,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8180X_A2NOC_SNOC_SLV }
+};
+
+static struct qcom_icc_node mas_qhm_qup0 = {
+ .name = "mas_qhm_qup0",
+ .id = SC8180X_MASTER_QUP_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8180X_A2NOC_SNOC_SLV }
+};
+
+static struct qcom_icc_node mas_qhm_qup1 = {
+ .name = "mas_qhm_qup1",
+ .id = SC8180X_MASTER_QUP_1,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8180X_A2NOC_SNOC_SLV }
+};
+
+static struct qcom_icc_node mas_qhm_qup2 = {
+ .name = "mas_qhm_qup2",
+ .id = SC8180X_MASTER_QUP_2,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8180X_A2NOC_SNOC_SLV }
+};
+
+static struct qcom_icc_node mas_qhm_sensorss_ahb = {
+ .name = "mas_qhm_sensorss_ahb",
+ .id = SC8180X_MASTER_SENSORS_AHB,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8180X_A2NOC_SNOC_SLV }
+};
+
+static struct qcom_icc_node mas_qxm_crypto = {
+ .name = "mas_qxm_crypto",
+ .id = SC8180X_MASTER_CRYPTO_CORE_0,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8180X_A2NOC_SNOC_SLV }
+};
+
+static struct qcom_icc_node mas_qxm_ipa = {
+ .name = "mas_qxm_ipa",
+ .id = SC8180X_MASTER_IPA,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8180X_A2NOC_SNOC_SLV }
+};
+
+static struct qcom_icc_node mas_xm_emac = {
+ .name = "mas_xm_emac",
+ .id = SC8180X_MASTER_EMAC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8180X_A2NOC_SNOC_SLV }
+};
+
+static struct qcom_icc_node mas_xm_pcie3_0 = {
+ .name = "mas_xm_pcie3_0",
+ .id = SC8180X_MASTER_PCIE,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8180X_SLAVE_ANOC_PCIE_GEM_NOC }
+};
+
+static struct qcom_icc_node mas_xm_pcie3_1 = {
+ .name = "mas_xm_pcie3_1",
+ .id = SC8180X_MASTER_PCIE_1,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SC8180X_SLAVE_ANOC_PCIE_GEM_NOC }
+};
+
+static struct qcom_icc_node mas_xm_pcie3_2 = {
+ .name = "mas_xm_pcie3_2",
+ .id = SC8180X_MASTER_PCIE_2,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8180X_SLAVE_ANOC_PCIE_GEM_NOC }
+};
+
+static struct qcom_icc_node mas_xm_pcie3_3 = {
+ .name = "mas_xm_pcie3_3",
+ .id = SC8180X_MASTER_PCIE_3,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SC8180X_SLAVE_ANOC_PCIE_GEM_NOC }
+};
+
+static struct qcom_icc_node mas_xm_qdss_etr = {
+ .name = "mas_xm_qdss_etr",
+ .id = SC8180X_MASTER_QDSS_ETR,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8180X_A2NOC_SNOC_SLV }
+};
+
+static struct qcom_icc_node mas_xm_sdc2 = {
+ .name = "mas_xm_sdc2",
+ .id = SC8180X_MASTER_SDCC_2,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8180X_A2NOC_SNOC_SLV }
+};
+
+static struct qcom_icc_node mas_xm_sdc4 = {
+ .name = "mas_xm_sdc4",
+ .id = SC8180X_MASTER_SDCC_4,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8180X_A2NOC_SNOC_SLV }
+};
+
+static struct qcom_icc_node mas_qxm_camnoc_hf0_uncomp = {
+ .name = "mas_qxm_camnoc_hf0_uncomp",
+ .id = SC8180X_MASTER_CAMNOC_HF0_UNCOMP,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC8180X_SLAVE_CAMNOC_UNCOMP }
+};
+
+static struct qcom_icc_node mas_qxm_camnoc_hf1_uncomp = {
+ .name = "mas_qxm_camnoc_hf1_uncomp",
+ .id = SC8180X_MASTER_CAMNOC_HF1_UNCOMP,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC8180X_SLAVE_CAMNOC_UNCOMP }
+};
+
+static struct qcom_icc_node mas_qxm_camnoc_sf_uncomp = {
+ .name = "mas_qxm_camnoc_sf_uncomp",
+ .id = SC8180X_MASTER_CAMNOC_SF_UNCOMP,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC8180X_SLAVE_CAMNOC_UNCOMP }
+};
+
+static struct qcom_icc_node mas_qnm_npu = {
+ .name = "mas_qnm_npu",
+ .id = SC8180X_MASTER_NPU,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC8180X_SLAVE_CDSP_MEM_NOC }
+};
+
+static struct qcom_icc_node mas_qnm_snoc = {
+ .name = "mas_qnm_snoc",
+ .id = SC8180X_SNOC_CNOC_MAS,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 56,
+ .links = { SC8180X_SLAVE_TLMM_SOUTH,
+ SC8180X_SLAVE_CDSP_CFG,
+ SC8180X_SLAVE_SPSS_CFG,
+ SC8180X_SLAVE_CAMERA_CFG,
+ SC8180X_SLAVE_SDCC_4,
+ SC8180X_SLAVE_AHB2PHY_CENTER,
+ SC8180X_SLAVE_SDCC_2,
+ SC8180X_SLAVE_PCIE_2_CFG,
+ SC8180X_SLAVE_CNOC_MNOC_CFG,
+ SC8180X_SLAVE_EMAC_CFG,
+ SC8180X_SLAVE_QSPI_0,
+ SC8180X_SLAVE_QSPI_1,
+ SC8180X_SLAVE_TLMM_EAST,
+ SC8180X_SLAVE_SNOC_CFG,
+ SC8180X_SLAVE_AHB2PHY_EAST,
+ SC8180X_SLAVE_GLM,
+ SC8180X_SLAVE_PDM,
+ SC8180X_SLAVE_PCIE_1_CFG,
+ SC8180X_SLAVE_A2NOC_CFG,
+ SC8180X_SLAVE_QDSS_CFG,
+ SC8180X_SLAVE_DISPLAY_CFG,
+ SC8180X_SLAVE_TCSR,
+ SC8180X_SLAVE_UFS_MEM_0_CFG,
+ SC8180X_SLAVE_CNOC_DDRSS,
+ SC8180X_SLAVE_PCIE_0_CFG,
+ SC8180X_SLAVE_QUP_1,
+ SC8180X_SLAVE_QUP_2,
+ SC8180X_SLAVE_NPU_CFG,
+ SC8180X_SLAVE_CRYPTO_0_CFG,
+ SC8180X_SLAVE_GRAPHICS_3D_CFG,
+ SC8180X_SLAVE_VENUS_CFG,
+ SC8180X_SLAVE_TSIF,
+ SC8180X_SLAVE_IPA_CFG,
+ SC8180X_SLAVE_CLK_CTL,
+ SC8180X_SLAVE_SECURITY,
+ SC8180X_SLAVE_AOP,
+ SC8180X_SLAVE_AHB2PHY_WEST,
+ SC8180X_SLAVE_AHB2PHY_SOUTH,
+ SC8180X_SLAVE_SERVICE_CNOC,
+ SC8180X_SLAVE_UFS_CARD_CFG,
+ SC8180X_SLAVE_USB3_1,
+ SC8180X_SLAVE_USB3_2,
+ SC8180X_SLAVE_PCIE_3_CFG,
+ SC8180X_SLAVE_RBCPR_CX_CFG,
+ SC8180X_SLAVE_TLMM_WEST,
+ SC8180X_SLAVE_A1NOC_CFG,
+ SC8180X_SLAVE_AOSS,
+ SC8180X_SLAVE_PRNG,
+ SC8180X_SLAVE_VSENSE_CTRL_CFG,
+ SC8180X_SLAVE_QUP_0,
+ SC8180X_SLAVE_USB3,
+ SC8180X_SLAVE_RBCPR_MMCX_CFG,
+ SC8180X_SLAVE_PIMEM_CFG,
+ SC8180X_SLAVE_UFS_MEM_1_CFG,
+ SC8180X_SLAVE_RBCPR_MX_CFG,
+ SC8180X_SLAVE_IMEM_CFG }
+};
+
+static struct qcom_icc_node mas_qhm_cnoc_dc_noc = {
+ .name = "mas_qhm_cnoc_dc_noc",
+ .id = SC8180X_MASTER_CNOC_DC_NOC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 2,
+ .links = { SC8180X_SLAVE_LLCC_CFG,
+ SC8180X_SLAVE_GEM_NOC_CFG }
+};
+
+static struct qcom_icc_node mas_acm_apps = {
+ .name = "mas_acm_apps",
+ .id = SC8180X_MASTER_AMPSS_M0,
+ .channels = 4,
+ .buswidth = 64,
+ .num_links = 3,
+ .links = { SC8180X_SLAVE_ECC,
+ SC8180X_SLAVE_LLCC,
+ SC8180X_SLAVE_GEM_NOC_SNOC }
+};
+
+static struct qcom_icc_node mas_acm_gpu_tcu = {
+ .name = "mas_acm_gpu_tcu",
+ .id = SC8180X_MASTER_GPU_TCU,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 2,
+ .links = { SC8180X_SLAVE_LLCC,
+ SC8180X_SLAVE_GEM_NOC_SNOC }
+};
+
+static struct qcom_icc_node mas_acm_sys_tcu = {
+ .name = "mas_acm_sys_tcu",
+ .id = SC8180X_MASTER_SYS_TCU,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 2,
+ .links = { SC8180X_SLAVE_LLCC,
+ SC8180X_SLAVE_GEM_NOC_SNOC }
+};
+
+static struct qcom_icc_node mas_qhm_gemnoc_cfg = {
+ .name = "mas_qhm_gemnoc_cfg",
+ .id = SC8180X_MASTER_GEM_NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 3,
+ .links = { SC8180X_SLAVE_SERVICE_GEM_NOC_1,
+ SC8180X_SLAVE_SERVICE_GEM_NOC,
+ SC8180X_SLAVE_MSS_PROC_MS_MPU_CFG }
+};
+
+static struct qcom_icc_node mas_qnm_cmpnoc = {
+ .name = "mas_qnm_cmpnoc",
+ .id = SC8180X_MASTER_COMPUTE_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 3,
+ .links = { SC8180X_SLAVE_ECC,
+ SC8180X_SLAVE_LLCC,
+ SC8180X_SLAVE_GEM_NOC_SNOC }
+};
+
+static struct qcom_icc_node mas_qnm_gpu = {
+ .name = "mas_qnm_gpu",
+ .id = SC8180X_MASTER_GRAPHICS_3D,
+ .channels = 4,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { SC8180X_SLAVE_LLCC,
+ SC8180X_SLAVE_GEM_NOC_SNOC }
+};
+
+static struct qcom_icc_node mas_qnm_mnoc_hf = {
+ .name = "mas_qnm_mnoc_hf",
+ .id = SC8180X_MASTER_MNOC_HF_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC8180X_SLAVE_LLCC }
+};
+
+static struct qcom_icc_node mas_qnm_mnoc_sf = {
+ .name = "mas_qnm_mnoc_sf",
+ .id = SC8180X_MASTER_MNOC_SF_MEM_NOC,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { SC8180X_SLAVE_LLCC,
+ SC8180X_SLAVE_GEM_NOC_SNOC }
+};
+
+static struct qcom_icc_node mas_qnm_pcie = {
+ .name = "mas_qnm_pcie",
+ .id = SC8180X_MASTER_GEM_NOC_PCIE_SNOC,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { SC8180X_SLAVE_LLCC,
+ SC8180X_SLAVE_GEM_NOC_SNOC }
+};
+
+static struct qcom_icc_node mas_qnm_snoc_gc = {
+ .name = "mas_qnm_snoc_gc",
+ .id = SC8180X_MASTER_SNOC_GC_MEM_NOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8180X_SLAVE_LLCC }
+};
+
+static struct qcom_icc_node mas_qnm_snoc_sf = {
+ .name = "mas_qnm_snoc_sf",
+ .id = SC8180X_MASTER_SNOC_SF_MEM_NOC,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC8180X_SLAVE_LLCC }
+};
+
+static struct qcom_icc_node mas_qxm_ecc = {
+ .name = "mas_qxm_ecc",
+ .id = SC8180X_MASTER_ECC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC8180X_SLAVE_LLCC }
+};
+
+static struct qcom_icc_node mas_ipa_core_master = {
+ .name = "mas_ipa_core_master",
+ .id = SC8180X_MASTER_IPA_CORE,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8180X_SLAVE_IPA_CORE }
+};
+
+static struct qcom_icc_node mas_llcc_mc = {
+ .name = "mas_llcc_mc",
+ .id = SC8180X_MASTER_LLCC,
+ .channels = 8,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8180X_SLAVE_EBI_CH0 }
+};
+
+static struct qcom_icc_node mas_qhm_mnoc_cfg = {
+ .name = "mas_qhm_mnoc_cfg",
+ .id = SC8180X_MASTER_CNOC_MNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8180X_SLAVE_SERVICE_MNOC }
+};
+
+static struct qcom_icc_node mas_qxm_camnoc_hf0 = {
+ .name = "mas_qxm_camnoc_hf0",
+ .id = SC8180X_MASTER_CAMNOC_HF0,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC8180X_SLAVE_MNOC_HF_MEM_NOC }
+};
+
+static struct qcom_icc_node mas_qxm_camnoc_hf1 = {
+ .name = "mas_qxm_camnoc_hf1",
+ .id = SC8180X_MASTER_CAMNOC_HF1,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC8180X_SLAVE_MNOC_HF_MEM_NOC }
+};
+
+static struct qcom_icc_node mas_qxm_camnoc_sf = {
+ .name = "mas_qxm_camnoc_sf",
+ .id = SC8180X_MASTER_CAMNOC_SF,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC8180X_SLAVE_MNOC_SF_MEM_NOC }
+};
+
+static struct qcom_icc_node mas_qxm_mdp0 = {
+ .name = "mas_qxm_mdp0",
+ .id = SC8180X_MASTER_MDP_PORT0,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC8180X_SLAVE_MNOC_HF_MEM_NOC }
+};
+
+static struct qcom_icc_node mas_qxm_mdp1 = {
+ .name = "mas_qxm_mdp1",
+ .id = SC8180X_MASTER_MDP_PORT1,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC8180X_SLAVE_MNOC_HF_MEM_NOC }
+};
+
+static struct qcom_icc_node mas_qxm_rot = {
+ .name = "mas_qxm_rot",
+ .id = SC8180X_MASTER_ROTATOR,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC8180X_SLAVE_MNOC_SF_MEM_NOC }
+};
+
+static struct qcom_icc_node mas_qxm_venus0 = {
+ .name = "mas_qxm_venus0",
+ .id = SC8180X_MASTER_VIDEO_P0,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC8180X_SLAVE_MNOC_SF_MEM_NOC }
+};
+
+static struct qcom_icc_node mas_qxm_venus1 = {
+ .name = "mas_qxm_venus1",
+ .id = SC8180X_MASTER_VIDEO_P1,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC8180X_SLAVE_MNOC_SF_MEM_NOC }
+};
+
+static struct qcom_icc_node mas_qxm_venus_arm9 = {
+ .name = "mas_qxm_venus_arm9",
+ .id = SC8180X_MASTER_VIDEO_PROC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8180X_SLAVE_MNOC_SF_MEM_NOC }
+};
+
+static struct qcom_icc_node mas_qhm_snoc_cfg = {
+ .name = "mas_qhm_snoc_cfg",
+ .id = SC8180X_MASTER_SNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8180X_SLAVE_SERVICE_SNOC }
+};
+
+static struct qcom_icc_node mas_qnm_aggre1_noc = {
+ .name = "mas_qnm_aggre1_noc",
+ .id = SC8180X_A1NOC_SNOC_MAS,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 6,
+ .links = { SC8180X_SLAVE_SNOC_GEM_NOC_SF,
+ SC8180X_SLAVE_PIMEM,
+ SC8180X_SLAVE_OCIMEM,
+ SC8180X_SLAVE_APPSS,
+ SC8180X_SNOC_CNOC_SLV,
+ SC8180X_SLAVE_QDSS_STM }
+};
+
+static struct qcom_icc_node mas_qnm_aggre2_noc = {
+ .name = "mas_qnm_aggre2_noc",
+ .id = SC8180X_A2NOC_SNOC_MAS,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 11,
+ .links = { SC8180X_SLAVE_SNOC_GEM_NOC_SF,
+ SC8180X_SLAVE_PIMEM,
+ SC8180X_SLAVE_PCIE_3,
+ SC8180X_SLAVE_OCIMEM,
+ SC8180X_SLAVE_APPSS,
+ SC8180X_SLAVE_PCIE_2,
+ SC8180X_SNOC_CNOC_SLV,
+ SC8180X_SLAVE_PCIE_0,
+ SC8180X_SLAVE_PCIE_1,
+ SC8180X_SLAVE_TCU,
+ SC8180X_SLAVE_QDSS_STM }
+};
+
+static struct qcom_icc_node mas_qnm_gemnoc = {
+ .name = "mas_qnm_gemnoc",
+ .id = SC8180X_MASTER_GEM_NOC_SNOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 6,
+ .links = { SC8180X_SLAVE_PIMEM,
+ SC8180X_SLAVE_OCIMEM,
+ SC8180X_SLAVE_APPSS,
+ SC8180X_SNOC_CNOC_SLV,
+ SC8180X_SLAVE_TCU,
+ SC8180X_SLAVE_QDSS_STM }
+};
+
+static struct qcom_icc_node mas_qxm_pimem = {
+ .name = "mas_qxm_pimem",
+ .id = SC8180X_MASTER_PIMEM,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 2,
+ .links = { SC8180X_SLAVE_SNOC_GEM_NOC_GC,
+ SC8180X_SLAVE_OCIMEM }
+};
+
+static struct qcom_icc_node mas_xm_gic = {
+ .name = "mas_xm_gic",
+ .id = SC8180X_MASTER_GIC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 2,
+ .links = { SC8180X_SLAVE_SNOC_GEM_NOC_GC,
+ SC8180X_SLAVE_OCIMEM }
+};
+
+static struct qcom_icc_node mas_qup_core_0 = {
+ .name = "mas_qup_core_0",
+ .id = SC8180X_MASTER_QUP_CORE_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8180X_SLAVE_QUP_CORE_0 }
+};
+
+static struct qcom_icc_node mas_qup_core_1 = {
+ .name = "mas_qup_core_1",
+ .id = SC8180X_MASTER_QUP_CORE_1,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8180X_SLAVE_QUP_CORE_1 }
+};
+
+static struct qcom_icc_node mas_qup_core_2 = {
+ .name = "mas_qup_core_2",
+ .id = SC8180X_MASTER_QUP_CORE_2,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8180X_SLAVE_QUP_CORE_2 }
+};
+
+static struct qcom_icc_node slv_qns_a1noc_snoc = {
+ .name = "slv_qns_a1noc_snoc",
+ .id = SC8180X_A1NOC_SNOC_SLV,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC8180X_A1NOC_SNOC_MAS }
+};
+
+static struct qcom_icc_node slv_srvc_aggre1_noc = {
+ .name = "slv_srvc_aggre1_noc",
+ .id = SC8180X_SLAVE_SERVICE_A1NOC,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qns_a2noc_snoc = {
+ .name = "slv_qns_a2noc_snoc",
+ .id = SC8180X_A2NOC_SNOC_SLV,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SC8180X_A2NOC_SNOC_MAS }
+};
+
+static struct qcom_icc_node slv_qns_pcie_mem_noc = {
+ .name = "slv_qns_pcie_mem_noc",
+ .id = SC8180X_SLAVE_ANOC_PCIE_GEM_NOC,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC8180X_MASTER_GEM_NOC_PCIE_SNOC }
+};
+
+static struct qcom_icc_node slv_srvc_aggre2_noc = {
+ .name = "slv_srvc_aggre2_noc",
+ .id = SC8180X_SLAVE_SERVICE_A2NOC,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qns_camnoc_uncomp = {
+ .name = "slv_qns_camnoc_uncomp",
+ .id = SC8180X_SLAVE_CAMNOC_UNCOMP,
+ .channels = 1,
+ .buswidth = 32
+};
+
+static struct qcom_icc_node slv_qns_cdsp_mem_noc = {
+ .name = "slv_qns_cdsp_mem_noc",
+ .id = SC8180X_SLAVE_CDSP_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC8180X_MASTER_COMPUTE_NOC }
+};
+
+static struct qcom_icc_node slv_qhs_a1_noc_cfg = {
+ .name = "slv_qhs_a1_noc_cfg",
+ .id = SC8180X_SLAVE_A1NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8180X_MASTER_A1NOC_CFG }
+};
+
+static struct qcom_icc_node slv_qhs_a2_noc_cfg = {
+ .name = "slv_qhs_a2_noc_cfg",
+ .id = SC8180X_SLAVE_A2NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8180X_MASTER_A2NOC_CFG }
+};
+
+static struct qcom_icc_node slv_qhs_ahb2phy_refgen_center = {
+ .name = "slv_qhs_ahb2phy_refgen_center",
+ .id = SC8180X_SLAVE_AHB2PHY_CENTER,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_ahb2phy_refgen_east = {
+ .name = "slv_qhs_ahb2phy_refgen_east",
+ .id = SC8180X_SLAVE_AHB2PHY_EAST,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_ahb2phy_refgen_west = {
+ .name = "slv_qhs_ahb2phy_refgen_west",
+ .id = SC8180X_SLAVE_AHB2PHY_WEST,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_ahb2phy_south = {
+ .name = "slv_qhs_ahb2phy_south",
+ .id = SC8180X_SLAVE_AHB2PHY_SOUTH,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_aop = {
+ .name = "slv_qhs_aop",
+ .id = SC8180X_SLAVE_AOP,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_aoss = {
+ .name = "slv_qhs_aoss",
+ .id = SC8180X_SLAVE_AOSS,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_camera_cfg = {
+ .name = "slv_qhs_camera_cfg",
+ .id = SC8180X_SLAVE_CAMERA_CFG,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_clk_ctl = {
+ .name = "slv_qhs_clk_ctl",
+ .id = SC8180X_SLAVE_CLK_CTL,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_compute_dsp = {
+ .name = "slv_qhs_compute_dsp",
+ .id = SC8180X_SLAVE_CDSP_CFG,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_cpr_cx = {
+ .name = "slv_qhs_cpr_cx",
+ .id = SC8180X_SLAVE_RBCPR_CX_CFG,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_cpr_mmcx = {
+ .name = "slv_qhs_cpr_mmcx",
+ .id = SC8180X_SLAVE_RBCPR_MMCX_CFG,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_cpr_mx = {
+ .name = "slv_qhs_cpr_mx",
+ .id = SC8180X_SLAVE_RBCPR_MX_CFG,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_crypto0_cfg = {
+ .name = "slv_qhs_crypto0_cfg",
+ .id = SC8180X_SLAVE_CRYPTO_0_CFG,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_ddrss_cfg = {
+ .name = "slv_qhs_ddrss_cfg",
+ .id = SC8180X_SLAVE_CNOC_DDRSS,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8180X_MASTER_CNOC_DC_NOC }
+};
+
+static struct qcom_icc_node slv_qhs_display_cfg = {
+ .name = "slv_qhs_display_cfg",
+ .id = SC8180X_SLAVE_DISPLAY_CFG,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_emac_cfg = {
+ .name = "slv_qhs_emac_cfg",
+ .id = SC8180X_SLAVE_EMAC_CFG,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_glm = {
+ .name = "slv_qhs_glm",
+ .id = SC8180X_SLAVE_GLM,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_gpuss_cfg = {
+ .name = "slv_qhs_gpuss_cfg",
+ .id = SC8180X_SLAVE_GRAPHICS_3D_CFG,
+ .channels = 1,
+ .buswidth = 8
+};
+
+static struct qcom_icc_node slv_qhs_imem_cfg = {
+ .name = "slv_qhs_imem_cfg",
+ .id = SC8180X_SLAVE_IMEM_CFG,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_ipa = {
+ .name = "slv_qhs_ipa",
+ .id = SC8180X_SLAVE_IPA_CFG,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_mnoc_cfg = {
+ .name = "slv_qhs_mnoc_cfg",
+ .id = SC8180X_SLAVE_CNOC_MNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8180X_MASTER_CNOC_MNOC_CFG }
+};
+
+static struct qcom_icc_node slv_qhs_npu_cfg = {
+ .name = "slv_qhs_npu_cfg",
+ .id = SC8180X_SLAVE_NPU_CFG,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_pcie0_cfg = {
+ .name = "slv_qhs_pcie0_cfg",
+ .id = SC8180X_SLAVE_PCIE_0_CFG,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_pcie1_cfg = {
+ .name = "slv_qhs_pcie1_cfg",
+ .id = SC8180X_SLAVE_PCIE_1_CFG,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_pcie2_cfg = {
+ .name = "slv_qhs_pcie2_cfg",
+ .id = SC8180X_SLAVE_PCIE_2_CFG,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_pcie3_cfg = {
+ .name = "slv_qhs_pcie3_cfg",
+ .id = SC8180X_SLAVE_PCIE_3_CFG,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_pdm = {
+ .name = "slv_qhs_pdm",
+ .id = SC8180X_SLAVE_PDM,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_pimem_cfg = {
+ .name = "slv_qhs_pimem_cfg",
+ .id = SC8180X_SLAVE_PIMEM_CFG,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_prng = {
+ .name = "slv_qhs_prng",
+ .id = SC8180X_SLAVE_PRNG,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_qdss_cfg = {
+ .name = "slv_qhs_qdss_cfg",
+ .id = SC8180X_SLAVE_QDSS_CFG,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_qspi_0 = {
+ .name = "slv_qhs_qspi_0",
+ .id = SC8180X_SLAVE_QSPI_0,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_qspi_1 = {
+ .name = "slv_qhs_qspi_1",
+ .id = SC8180X_SLAVE_QSPI_1,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_qupv3_east0 = {
+ .name = "slv_qhs_qupv3_east0",
+ .id = SC8180X_SLAVE_QUP_1,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_qupv3_east1 = {
+ .name = "slv_qhs_qupv3_east1",
+ .id = SC8180X_SLAVE_QUP_2,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_qupv3_west = {
+ .name = "slv_qhs_qupv3_west",
+ .id = SC8180X_SLAVE_QUP_0,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_sdc2 = {
+ .name = "slv_qhs_sdc2",
+ .id = SC8180X_SLAVE_SDCC_2,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_sdc4 = {
+ .name = "slv_qhs_sdc4",
+ .id = SC8180X_SLAVE_SDCC_4,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_security = {
+ .name = "slv_qhs_security",
+ .id = SC8180X_SLAVE_SECURITY,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_snoc_cfg = {
+ .name = "slv_qhs_snoc_cfg",
+ .id = SC8180X_SLAVE_SNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8180X_MASTER_SNOC_CFG }
+};
+
+static struct qcom_icc_node slv_qhs_spss_cfg = {
+ .name = "slv_qhs_spss_cfg",
+ .id = SC8180X_SLAVE_SPSS_CFG,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_tcsr = {
+ .name = "slv_qhs_tcsr",
+ .id = SC8180X_SLAVE_TCSR,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_tlmm_east = {
+ .name = "slv_qhs_tlmm_east",
+ .id = SC8180X_SLAVE_TLMM_EAST,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_tlmm_south = {
+ .name = "slv_qhs_tlmm_south",
+ .id = SC8180X_SLAVE_TLMM_SOUTH,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_tlmm_west = {
+ .name = "slv_qhs_tlmm_west",
+ .id = SC8180X_SLAVE_TLMM_WEST,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_tsif = {
+ .name = "slv_qhs_tsif",
+ .id = SC8180X_SLAVE_TSIF,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_ufs_card_cfg = {
+ .name = "slv_qhs_ufs_card_cfg",
+ .id = SC8180X_SLAVE_UFS_CARD_CFG,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_ufs_mem0_cfg = {
+ .name = "slv_qhs_ufs_mem0_cfg",
+ .id = SC8180X_SLAVE_UFS_MEM_0_CFG,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_ufs_mem1_cfg = {
+ .name = "slv_qhs_ufs_mem1_cfg",
+ .id = SC8180X_SLAVE_UFS_MEM_1_CFG,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_usb3_0 = {
+ .name = "slv_qhs_usb3_0",
+ .id = SC8180X_SLAVE_USB3,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_usb3_1 = {
+ .name = "slv_qhs_usb3_1",
+ .id = SC8180X_SLAVE_USB3_1,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_usb3_2 = {
+ .name = "slv_qhs_usb3_2",
+ .id = SC8180X_SLAVE_USB3_2,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_venus_cfg = {
+ .name = "slv_qhs_venus_cfg",
+ .id = SC8180X_SLAVE_VENUS_CFG,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_vsense_ctrl_cfg = {
+ .name = "slv_qhs_vsense_ctrl_cfg",
+ .id = SC8180X_SLAVE_VSENSE_CTRL_CFG,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_srvc_cnoc = {
+ .name = "slv_srvc_cnoc",
+ .id = SC8180X_SLAVE_SERVICE_CNOC,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_gemnoc = {
+ .name = "slv_qhs_gemnoc",
+ .id = SC8180X_SLAVE_GEM_NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8180X_MASTER_GEM_NOC_CFG }
+};
+
+static struct qcom_icc_node slv_qhs_llcc = {
+ .name = "slv_qhs_llcc",
+ .id = SC8180X_SLAVE_LLCC_CFG,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_mdsp_ms_mpu_cfg = {
+ .name = "slv_qhs_mdsp_ms_mpu_cfg",
+ .id = SC8180X_SLAVE_MSS_PROC_MS_MPU_CFG,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qns_ecc = {
+ .name = "slv_qns_ecc",
+ .id = SC8180X_SLAVE_ECC,
+ .channels = 1,
+ .buswidth = 32
+};
+
+static struct qcom_icc_node slv_qns_gem_noc_snoc = {
+ .name = "slv_qns_gem_noc_snoc",
+ .id = SC8180X_SLAVE_GEM_NOC_SNOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8180X_MASTER_GEM_NOC_SNOC }
+};
+
+static struct qcom_icc_node slv_qns_llcc = {
+ .name = "slv_qns_llcc",
+ .id = SC8180X_SLAVE_LLCC,
+ .channels = 8,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SC8180X_MASTER_LLCC }
+};
+
+static struct qcom_icc_node slv_srvc_gemnoc = {
+ .name = "slv_srvc_gemnoc",
+ .id = SC8180X_SLAVE_SERVICE_GEM_NOC,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_srvc_gemnoc1 = {
+ .name = "slv_srvc_gemnoc1",
+ .id = SC8180X_SLAVE_SERVICE_GEM_NOC_1,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_ipa_core_slave = {
+ .name = "slv_ipa_core_slave",
+ .id = SC8180X_SLAVE_IPA_CORE,
+ .channels = 1,
+ .buswidth = 8
+};
+
+static struct qcom_icc_node slv_ebi = {
+ .name = "slv_ebi",
+ .id = SC8180X_SLAVE_EBI_CH0,
+ .channels = 8,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qns2_mem_noc = {
+ .name = "slv_qns2_mem_noc",
+ .id = SC8180X_SLAVE_MNOC_SF_MEM_NOC,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC8180X_MASTER_MNOC_SF_MEM_NOC }
+};
+
+static struct qcom_icc_node slv_qns_mem_noc_hf = {
+ .name = "slv_qns_mem_noc_hf",
+ .id = SC8180X_SLAVE_MNOC_HF_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC8180X_MASTER_MNOC_HF_MEM_NOC }
+};
+
+static struct qcom_icc_node slv_srvc_mnoc = {
+ .name = "slv_srvc_mnoc",
+ .id = SC8180X_SLAVE_SERVICE_MNOC,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_apss = {
+ .name = "slv_qhs_apss",
+ .id = SC8180X_SLAVE_APPSS,
+ .channels = 1,
+ .buswidth = 8
+};
+
+static struct qcom_icc_node slv_qns_cnoc = {
+ .name = "slv_qns_cnoc",
+ .id = SC8180X_SNOC_CNOC_SLV,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8180X_SNOC_CNOC_MAS }
+};
+
+static struct qcom_icc_node slv_qns_gemnoc_gc = {
+ .name = "slv_qns_gemnoc_gc",
+ .id = SC8180X_SLAVE_SNOC_GEM_NOC_GC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8180X_MASTER_SNOC_GC_MEM_NOC }
+};
+
+static struct qcom_icc_node slv_qns_gemnoc_sf = {
+ .name = "slv_qns_gemnoc_sf",
+ .id = SC8180X_SLAVE_SNOC_GEM_NOC_SF,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC8180X_MASTER_SNOC_SF_MEM_NOC }
+};
+
+static struct qcom_icc_node slv_qxs_imem = {
+ .name = "slv_qxs_imem",
+ .id = SC8180X_SLAVE_OCIMEM,
+ .channels = 1,
+ .buswidth = 8
+};
+
+static struct qcom_icc_node slv_qxs_pimem = {
+ .name = "slv_qxs_pimem",
+ .id = SC8180X_SLAVE_PIMEM,
+ .channels = 1,
+ .buswidth = 8
+};
+
+static struct qcom_icc_node slv_srvc_snoc = {
+ .name = "slv_srvc_snoc",
+ .id = SC8180X_SLAVE_SERVICE_SNOC,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_xs_pcie_0 = {
+ .name = "slv_xs_pcie_0",
+ .id = SC8180X_SLAVE_PCIE_0,
+ .channels = 1,
+ .buswidth = 8
+};
+
+static struct qcom_icc_node slv_xs_pcie_1 = {
+ .name = "slv_xs_pcie_1",
+ .id = SC8180X_SLAVE_PCIE_1,
+ .channels = 1,
+ .buswidth = 8
+};
+
+static struct qcom_icc_node slv_xs_pcie_2 = {
+ .name = "slv_xs_pcie_2",
+ .id = SC8180X_SLAVE_PCIE_2,
+ .channels = 1,
+ .buswidth = 8
+};
+
+static struct qcom_icc_node slv_xs_pcie_3 = {
+ .name = "slv_xs_pcie_3",
+ .id = SC8180X_SLAVE_PCIE_3,
+ .channels = 1,
+ .buswidth = 8
+};
+
+static struct qcom_icc_node slv_xs_qdss_stm = {
+ .name = "slv_xs_qdss_stm",
+ .id = SC8180X_SLAVE_QDSS_STM,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_xs_sys_tcu_cfg = {
+ .name = "slv_xs_sys_tcu_cfg",
+ .id = SC8180X_SLAVE_TCU,
+ .channels = 1,
+ .buswidth = 8
+};
+
+static struct qcom_icc_node slv_qup_core_0 = {
+ .name = "slv_qup_core_0",
+ .id = SC8180X_SLAVE_QUP_CORE_0,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qup_core_1 = {
+ .name = "slv_qup_core_1",
+ .id = SC8180X_SLAVE_QUP_CORE_1,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qup_core_2 = {
+ .name = "slv_qup_core_2",
+ .id = SC8180X_SLAVE_QUP_CORE_2,
+ .channels = 1,
+ .buswidth = 4
+};
+
+static struct qcom_icc_bcm bcm_acv = {
+ .name = "ACV",
+ .num_nodes = 1,
+ .nodes = { &slv_ebi }
+};
+
+static struct qcom_icc_bcm bcm_mc0 = {
+ .name = "MC0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &slv_ebi }
+};
+
+static struct qcom_icc_bcm bcm_sh0 = {
+ .name = "SH0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &slv_qns_llcc }
+};
+
+static struct qcom_icc_bcm bcm_mm0 = {
+ .name = "MM0",
+ .num_nodes = 1,
+ .nodes = { &slv_qns_mem_noc_hf }
+};
+
+static struct qcom_icc_bcm bcm_co0 = {
+ .name = "CO0",
+ .num_nodes = 1,
+ .nodes = { &slv_qns_cdsp_mem_noc }
+};
+
+static struct qcom_icc_bcm bcm_ce0 = {
+ .name = "CE0",
+ .num_nodes = 1,
+ .nodes = { &mas_qxm_crypto }
+};
+
+static struct qcom_icc_bcm bcm_cn0 = {
+ .name = "CN0",
+ .keepalive = true,
+ .num_nodes = 57,
+ .nodes = { &mas_qnm_snoc,
+ &slv_qhs_a1_noc_cfg,
+ &slv_qhs_a2_noc_cfg,
+ &slv_qhs_ahb2phy_refgen_center,
+ &slv_qhs_ahb2phy_refgen_east,
+ &slv_qhs_ahb2phy_refgen_west,
+ &slv_qhs_ahb2phy_south,
+ &slv_qhs_aop,
+ &slv_qhs_aoss,
+ &slv_qhs_camera_cfg,
+ &slv_qhs_clk_ctl,
+ &slv_qhs_compute_dsp,
+ &slv_qhs_cpr_cx,
+ &slv_qhs_cpr_mmcx,
+ &slv_qhs_cpr_mx,
+ &slv_qhs_crypto0_cfg,
+ &slv_qhs_ddrss_cfg,
+ &slv_qhs_display_cfg,
+ &slv_qhs_emac_cfg,
+ &slv_qhs_glm,
+ &slv_qhs_gpuss_cfg,
+ &slv_qhs_imem_cfg,
+ &slv_qhs_ipa,
+ &slv_qhs_mnoc_cfg,
+ &slv_qhs_npu_cfg,
+ &slv_qhs_pcie0_cfg,
+ &slv_qhs_pcie1_cfg,
+ &slv_qhs_pcie2_cfg,
+ &slv_qhs_pcie3_cfg,
+ &slv_qhs_pdm,
+ &slv_qhs_pimem_cfg,
+ &slv_qhs_prng,
+ &slv_qhs_qdss_cfg,
+ &slv_qhs_qspi_0,
+ &slv_qhs_qspi_1,
+ &slv_qhs_qupv3_east0,
+ &slv_qhs_qupv3_east1,
+ &slv_qhs_qupv3_west,
+ &slv_qhs_sdc2,
+ &slv_qhs_sdc4,
+ &slv_qhs_security,
+ &slv_qhs_snoc_cfg,
+ &slv_qhs_spss_cfg,
+ &slv_qhs_tcsr,
+ &slv_qhs_tlmm_east,
+ &slv_qhs_tlmm_south,
+ &slv_qhs_tlmm_west,
+ &slv_qhs_tsif,
+ &slv_qhs_ufs_card_cfg,
+ &slv_qhs_ufs_mem0_cfg,
+ &slv_qhs_ufs_mem1_cfg,
+ &slv_qhs_usb3_0,
+ &slv_qhs_usb3_1,
+ &slv_qhs_usb3_2,
+ &slv_qhs_venus_cfg,
+ &slv_qhs_vsense_ctrl_cfg,
+ &slv_srvc_cnoc }
+};
+
+static struct qcom_icc_bcm bcm_mm1 = {
+ .name = "MM1",
+ .num_nodes = 7,
+ .nodes = { &mas_qxm_camnoc_hf0_uncomp,
+ &mas_qxm_camnoc_hf1_uncomp,
+ &mas_qxm_camnoc_sf_uncomp,
+ &mas_qxm_camnoc_hf0,
+ &mas_qxm_camnoc_hf1,
+ &mas_qxm_mdp0,
+ &mas_qxm_mdp1 }
+};
+
+static struct qcom_icc_bcm bcm_qup0 = {
+ .name = "QUP0",
+ .num_nodes = 3,
+ .nodes = { &mas_qup_core_0,
+ &mas_qup_core_1,
+ &mas_qup_core_2 }
+};
+
+static struct qcom_icc_bcm bcm_sh2 = {
+ .name = "SH2",
+ .num_nodes = 1,
+ .nodes = { &slv_qns_gem_noc_snoc }
+};
+
+static struct qcom_icc_bcm bcm_mm2 = {
+ .name = "MM2",
+ .num_nodes = 6,
+ .nodes = { &mas_qxm_camnoc_sf,
+ &mas_qxm_rot,
+ &mas_qxm_venus0,
+ &mas_qxm_venus1,
+ &mas_qxm_venus_arm9,
+ &slv_qns2_mem_noc }
+};
+
+static struct qcom_icc_bcm bcm_sh3 = {
+ .name = "SH3",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &mas_acm_apps }
+};
+
+static struct qcom_icc_bcm bcm_sn0 = {
+ .name = "SN0",
+ .nodes = { &slv_qns_gemnoc_sf }
+};
+
+static struct qcom_icc_bcm bcm_sn1 = {
+ .name = "SN1",
+ .nodes = { &slv_qxs_imem }
+};
+
+static struct qcom_icc_bcm bcm_sn2 = {
+ .name = "SN2",
+ .keepalive = true,
+ .nodes = { &slv_qns_gemnoc_gc }
+};
+
+static struct qcom_icc_bcm bcm_co2 = {
+ .name = "CO2",
+ .nodes = { &mas_qnm_npu }
+};
+
+static struct qcom_icc_bcm bcm_ip0 = {
+ .name = "IP0",
+ .nodes = { &slv_ipa_core_slave }
+};
+
+static struct qcom_icc_bcm bcm_sn3 = {
+ .name = "SN3",
+ .keepalive = true,
+ .nodes = { &slv_srvc_aggre1_noc,
+ &slv_qns_cnoc }
+};
+
+static struct qcom_icc_bcm bcm_sn4 = {
+ .name = "SN4",
+ .nodes = { &slv_qxs_pimem }
+};
+
+static struct qcom_icc_bcm bcm_sn8 = {
+ .name = "SN8",
+ .num_nodes = 4,
+ .nodes = { &slv_xs_pcie_0,
+ &slv_xs_pcie_1,
+ &slv_xs_pcie_2,
+ &slv_xs_pcie_3 }
+};
+
+static struct qcom_icc_bcm bcm_sn9 = {
+ .name = "SN9",
+ .num_nodes = 1,
+ .nodes = { &mas_qnm_aggre1_noc }
+};
+
+static struct qcom_icc_bcm bcm_sn11 = {
+ .name = "SN11",
+ .num_nodes = 1,
+ .nodes = { &mas_qnm_aggre2_noc }
+};
+
+static struct qcom_icc_bcm bcm_sn14 = {
+ .name = "SN14",
+ .num_nodes = 1,
+ .nodes = { &slv_qns_pcie_mem_noc }
+};
+
+static struct qcom_icc_bcm bcm_sn15 = {
+ .name = "SN15",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &mas_qnm_gemnoc }
+};
+
+static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
&bcm_sn3,
&bcm_ce0,
- &bcm_qup0,
};
-static struct qcom_icc_bcm *aggre2_noc_bcms[] = {
+static struct qcom_icc_bcm * const aggre2_noc_bcms[] = {
&bcm_sn14,
&bcm_ce0,
- &bcm_qup0,
};
-static struct qcom_icc_bcm *camnoc_virt_bcms[] = {
+static struct qcom_icc_bcm * const camnoc_virt_bcms[] = {
&bcm_mm1,
};
-static struct qcom_icc_bcm *compute_noc_bcms[] = {
+static struct qcom_icc_bcm * const compute_noc_bcms[] = {
&bcm_co0,
&bcm_co2,
};
-static struct qcom_icc_bcm *config_noc_bcms[] = {
+static struct qcom_icc_bcm * const config_noc_bcms[] = {
&bcm_cn0,
};
-static struct qcom_icc_bcm *gem_noc_bcms[] = {
+static struct qcom_icc_bcm * const gem_noc_bcms[] = {
&bcm_sh0,
&bcm_sh2,
&bcm_sh3,
};
-static struct qcom_icc_bcm *ipa_virt_bcms[] = {
+static struct qcom_icc_bcm * const ipa_virt_bcms[] = {
&bcm_ip0,
};
-static struct qcom_icc_bcm *mc_virt_bcms[] = {
+static struct qcom_icc_bcm * const mc_virt_bcms[] = {
&bcm_mc0,
&bcm_acv,
};
-static struct qcom_icc_bcm *mmss_noc_bcms[] = {
+static struct qcom_icc_bcm * const mmss_noc_bcms[] = {
&bcm_mm0,
&bcm_mm1,
&bcm_mm2,
};
-static struct qcom_icc_bcm *system_noc_bcms[] = {
+static struct qcom_icc_bcm * const system_noc_bcms[] = {
&bcm_sn0,
&bcm_sn1,
&bcm_sn2,
@@ -249,7 +1631,7 @@ static struct qcom_icc_bcm *system_noc_bcms[] = {
&bcm_sn15,
};
-static struct qcom_icc_node *aggre1_noc_nodes[] = {
+static struct qcom_icc_node * const aggre1_noc_nodes[] = {
[MASTER_A1NOC_CFG] = &mas_qhm_a1noc_cfg,
[MASTER_UFS_CARD] = &mas_xm_ufs_card,
[MASTER_UFS_GEN4] = &mas_xm_ufs_g4,
@@ -261,7 +1643,7 @@ static struct qcom_icc_node *aggre1_noc_nodes[] = {
[SLAVE_SERVICE_A1NOC] = &slv_srvc_aggre1_noc,
};
-static struct qcom_icc_node *aggre2_noc_nodes[] = {
+static struct qcom_icc_node * const aggre2_noc_nodes[] = {
[MASTER_A2NOC_CFG] = &mas_qhm_a2noc_cfg,
[MASTER_QDSS_BAM] = &mas_qhm_qdss_bam,
[MASTER_QSPI_0] = &mas_qhm_qspi,
@@ -285,19 +1667,19 @@ static struct qcom_icc_node *aggre2_noc_nodes[] = {
[SLAVE_SERVICE_A2NOC] = &slv_srvc_aggre2_noc,
};
-static struct qcom_icc_node *camnoc_virt_nodes[] = {
+static struct qcom_icc_node * const camnoc_virt_nodes[] = {
[MASTER_CAMNOC_HF0_UNCOMP] = &mas_qxm_camnoc_hf0_uncomp,
[MASTER_CAMNOC_HF1_UNCOMP] = &mas_qxm_camnoc_hf1_uncomp,
[MASTER_CAMNOC_SF_UNCOMP] = &mas_qxm_camnoc_sf_uncomp,
[SLAVE_CAMNOC_UNCOMP] = &slv_qns_camnoc_uncomp,
};
-static struct qcom_icc_node *compute_noc_nodes[] = {
+static struct qcom_icc_node * const compute_noc_nodes[] = {
[MASTER_NPU] = &mas_qnm_npu,
[SLAVE_CDSP_MEM_NOC] = &slv_qns_cdsp_mem_noc,
};
-static struct qcom_icc_node *config_noc_nodes[] = {
+static struct qcom_icc_node * const config_noc_nodes[] = {
[SNOC_CNOC_MAS] = &mas_qnm_snoc,
[SLAVE_A1NOC_CFG] = &slv_qhs_a1_noc_cfg,
[SLAVE_A2NOC_CFG] = &slv_qhs_a2_noc_cfg,
@@ -357,13 +1739,13 @@ static struct qcom_icc_node *config_noc_nodes[] = {
[SLAVE_SERVICE_CNOC] = &slv_srvc_cnoc,
};
-static struct qcom_icc_node *dc_noc_nodes[] = {
+static struct qcom_icc_node * const dc_noc_nodes[] = {
[MASTER_CNOC_DC_NOC] = &mas_qhm_cnoc_dc_noc,
[SLAVE_GEM_NOC_CFG] = &slv_qhs_gemnoc,
[SLAVE_LLCC_CFG] = &slv_qhs_llcc,
};
-static struct qcom_icc_node *gem_noc_nodes[] = {
+static struct qcom_icc_node * const gem_noc_nodes[] = {
[MASTER_AMPSS_M0] = &mas_acm_apps,
[MASTER_GPU_TCU] = &mas_acm_gpu_tcu,
[MASTER_SYS_TCU] = &mas_acm_sys_tcu,
@@ -384,17 +1766,17 @@ static struct qcom_icc_node *gem_noc_nodes[] = {
[SLAVE_SERVICE_GEM_NOC_1] = &slv_srvc_gemnoc1,
};
-static struct qcom_icc_node *ipa_virt_nodes[] = {
+static struct qcom_icc_node * const ipa_virt_nodes[] = {
[MASTER_IPA_CORE] = &mas_ipa_core_master,
[SLAVE_IPA_CORE] = &slv_ipa_core_slave,
};
-static struct qcom_icc_node *mc_virt_nodes[] = {
+static struct qcom_icc_node * const mc_virt_nodes[] = {
[MASTER_LLCC] = &mas_llcc_mc,
[SLAVE_EBI_CH0] = &slv_ebi,
};
-static struct qcom_icc_node *mmss_noc_nodes[] = {
+static struct qcom_icc_node * const mmss_noc_nodes[] = {
[MASTER_CNOC_MNOC_CFG] = &mas_qhm_mnoc_cfg,
[MASTER_CAMNOC_HF0] = &mas_qxm_camnoc_hf0,
[MASTER_CAMNOC_HF1] = &mas_qxm_camnoc_hf1,
@@ -410,7 +1792,7 @@ static struct qcom_icc_node *mmss_noc_nodes[] = {
[SLAVE_SERVICE_MNOC] = &slv_srvc_mnoc,
};
-static struct qcom_icc_node *system_noc_nodes[] = {
+static struct qcom_icc_node * const system_noc_nodes[] = {
[MASTER_SNOC_CFG] = &mas_qhm_snoc_cfg,
[A1NOC_SNOC_MAS] = &mas_qnm_aggre1_noc,
[A2NOC_SNOC_MAS] = &mas_qnm_aggre2_noc,
@@ -503,97 +1885,25 @@ static const struct qcom_icc_desc sc8180x_system_noc = {
.num_bcms = ARRAY_SIZE(system_noc_bcms),
};
-static int qnoc_probe(struct platform_device *pdev)
-{
- const struct qcom_icc_desc *desc;
- struct icc_onecell_data *data;
- struct icc_provider *provider;
- struct qcom_icc_node **qnodes;
- struct qcom_icc_provider *qp;
- struct icc_node *node;
- size_t num_nodes, i;
- int ret;
-
- desc = device_get_match_data(&pdev->dev);
- if (!desc)
- return -EINVAL;
-
- qnodes = desc->nodes;
- num_nodes = desc->num_nodes;
-
- qp = devm_kzalloc(&pdev->dev, sizeof(*qp), GFP_KERNEL);
- if (!qp)
- return -ENOMEM;
-
- data = devm_kcalloc(&pdev->dev, num_nodes, sizeof(*node), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- provider = &qp->provider;
- provider->dev = &pdev->dev;
- provider->set = qcom_icc_set;
- provider->pre_aggregate = qcom_icc_pre_aggregate;
- provider->aggregate = qcom_icc_aggregate;
- provider->xlate = of_icc_xlate_onecell;
- INIT_LIST_HEAD(&provider->nodes);
- provider->data = data;
-
- qp->dev = &pdev->dev;
- qp->bcms = desc->bcms;
- qp->num_bcms = desc->num_bcms;
-
- qp->voter = of_bcm_voter_get(qp->dev, NULL);
- if (IS_ERR(qp->voter))
- return PTR_ERR(qp->voter);
-
- ret = icc_provider_add(provider);
- if (ret) {
- dev_err(&pdev->dev, "error adding interconnect provider\n");
- return ret;
- }
-
- for (i = 0; i < qp->num_bcms; i++)
- qcom_icc_bcm_init(qp->bcms[i], &pdev->dev);
-
- for (i = 0; i < num_nodes; i++) {
- size_t j;
-
- if (!qnodes[i])
- continue;
-
- node = icc_node_create(qnodes[i]->id);
- if (IS_ERR(node)) {
- ret = PTR_ERR(node);
- goto err;
- }
-
- node->name = qnodes[i]->name;
- node->data = qnodes[i];
- icc_node_add(node, provider);
-
- for (j = 0; j < qnodes[i]->num_links; j++)
- icc_link_create(node, qnodes[i]->links[j]);
-
- data->nodes[i] = node;
- }
- data->num_nodes = num_nodes;
-
- platform_set_drvdata(pdev, qp);
-
- return 0;
-err:
- icc_nodes_remove(provider);
- icc_provider_del(provider);
- return ret;
-}
-
-static int qnoc_remove(struct platform_device *pdev)
-{
- struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
-
- icc_nodes_remove(&qp->provider);
- return icc_provider_del(&qp->provider);
-}
+static struct qcom_icc_bcm * const qup_virt_bcms[] = {
+ &bcm_qup0,
+};
+
+static struct qcom_icc_node *qup_virt_nodes[] = {
+ [MASTER_QUP_CORE_0] = &mas_qup_core_0,
+ [MASTER_QUP_CORE_1] = &mas_qup_core_1,
+ [MASTER_QUP_CORE_2] = &mas_qup_core_2,
+ [SLAVE_QUP_CORE_0] = &slv_qup_core_0,
+ [SLAVE_QUP_CORE_1] = &slv_qup_core_1,
+ [SLAVE_QUP_CORE_2] = &slv_qup_core_2,
+};
+
+static const struct qcom_icc_desc sc8180x_qup_virt = {
+ .nodes = qup_virt_nodes,
+ .num_nodes = ARRAY_SIZE(qup_virt_nodes),
+ .bcms = qup_virt_bcms,
+ .num_bcms = ARRAY_SIZE(qup_virt_bcms),
+};
static const struct of_device_id qnoc_of_match[] = {
{ .compatible = "qcom,sc8180x-aggre1-noc", .data = &sc8180x_aggre1_noc },
@@ -606,14 +1916,15 @@ static const struct of_device_id qnoc_of_match[] = {
{ .compatible = "qcom,sc8180x-ipa-virt", .data = &sc8180x_ipa_virt },
{ .compatible = "qcom,sc8180x-mc-virt", .data = &sc8180x_mc_virt },
{ .compatible = "qcom,sc8180x-mmss-noc", .data = &sc8180x_mmss_noc },
+ { .compatible = "qcom,sc8180x-qup-virt", .data = &sc8180x_qup_virt },
{ .compatible = "qcom,sc8180x-system-noc", .data = &sc8180x_system_noc },
{ }
};
MODULE_DEVICE_TABLE(of, qnoc_of_match);
static struct platform_driver qnoc_driver = {
- .probe = qnoc_probe,
- .remove = qnoc_remove,
+ .probe = qcom_icc_rpmh_probe,
+ .remove = qcom_icc_rpmh_remove,
.driver = {
.name = "qnoc-sc8180x",
.of_match_table = qnoc_of_match,
diff --git a/drivers/interconnect/qcom/sc8180x.h b/drivers/interconnect/qcom/sc8180x.h
index e70cf7032f80..2eafd35543c7 100644
--- a/drivers/interconnect/qcom/sc8180x.h
+++ b/drivers/interconnect/qcom/sc8180x.h
@@ -171,4 +171,11 @@
#define SC8180X_MASTER_OSM_L3_APPS 161
#define SC8180X_SLAVE_OSM_L3 162
+#define SC8180X_MASTER_QUP_CORE_0 163
+#define SC8180X_MASTER_QUP_CORE_1 164
+#define SC8180X_MASTER_QUP_CORE_2 165
+#define SC8180X_SLAVE_QUP_CORE_0 166
+#define SC8180X_SLAVE_QUP_CORE_1 167
+#define SC8180X_SLAVE_QUP_CORE_2 168
+
#endif
diff --git a/drivers/interconnect/qcom/sc8280xp.c b/drivers/interconnect/qcom/sc8280xp.c
new file mode 100644
index 000000000000..507fe5f89791
--- /dev/null
+++ b/drivers/interconnect/qcom/sc8280xp.c
@@ -0,0 +1,2438 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Linaro Ltd
+ */
+
+#include <linux/device.h>
+#include <linux/interconnect.h>
+#include <linux/interconnect-provider.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <dt-bindings/interconnect/qcom,sc8280xp.h>
+
+#include "bcm-voter.h"
+#include "icc-rpmh.h"
+#include "sc8280xp.h"
+
+static struct qcom_icc_node qhm_qspi = {
+ .name = "qhm_qspi",
+ .id = SC8280XP_MASTER_QSPI_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node qhm_qup1 = {
+ .name = "qhm_qup1",
+ .id = SC8280XP_MASTER_QUP_1,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node qhm_qup2 = {
+ .name = "qhm_qup2",
+ .id = SC8280XP_MASTER_QUP_2,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node qnm_a1noc_cfg = {
+ .name = "qnm_a1noc_cfg",
+ .id = SC8280XP_MASTER_A1NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .links = { SC8280XP_SLAVE_SERVICE_A1NOC },
+};
+
+static struct qcom_icc_node qxm_ipa = {
+ .name = "qxm_ipa",
+ .id = SC8280XP_MASTER_IPA,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_emac_1 = {
+ .name = "xm_emac_1",
+ .id = SC8280XP_MASTER_EMAC_1,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_sdc4 = {
+ .name = "xm_sdc4",
+ .id = SC8280XP_MASTER_SDCC_4,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_ufs_mem = {
+ .name = "xm_ufs_mem",
+ .id = SC8280XP_MASTER_UFS_MEM,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_usb3_0 = {
+ .name = "xm_usb3_0",
+ .id = SC8280XP_MASTER_USB3_0,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_USB_NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_usb3_1 = {
+ .name = "xm_usb3_1",
+ .id = SC8280XP_MASTER_USB3_1,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_USB_NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_usb3_mp = {
+ .name = "xm_usb3_mp",
+ .id = SC8280XP_MASTER_USB3_MP,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_USB_NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_usb4_host0 = {
+ .name = "xm_usb4_host0",
+ .id = SC8280XP_MASTER_USB4_0,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_USB_NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_usb4_host1 = {
+ .name = "xm_usb4_host1",
+ .id = SC8280XP_MASTER_USB4_1,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_USB_NOC_SNOC },
+};
+
+static struct qcom_icc_node qhm_qdss_bam = {
+ .name = "qhm_qdss_bam",
+ .id = SC8280XP_MASTER_QDSS_BAM,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qhm_qup0 = {
+ .name = "qhm_qup0",
+ .id = SC8280XP_MASTER_QUP_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qnm_a2noc_cfg = {
+ .name = "qnm_a2noc_cfg",
+ .id = SC8280XP_MASTER_A2NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_SERVICE_A2NOC },
+};
+
+static struct qcom_icc_node qxm_crypto = {
+ .name = "qxm_crypto",
+ .id = SC8280XP_MASTER_CRYPTO,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qxm_sensorss_q6 = {
+ .name = "qxm_sensorss_q6",
+ .id = SC8280XP_MASTER_SENSORS_PROC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qxm_sp = {
+ .name = "qxm_sp",
+ .id = SC8280XP_MASTER_SP,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_emac_0 = {
+ .name = "xm_emac_0",
+ .id = SC8280XP_MASTER_EMAC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_pcie3_0 = {
+ .name = "xm_pcie3_0",
+ .id = SC8280XP_MASTER_PCIE_0,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_ANOC_PCIE_GEM_NOC },
+};
+
+static struct qcom_icc_node xm_pcie3_1 = {
+ .name = "xm_pcie3_1",
+ .id = SC8280XP_MASTER_PCIE_1,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_ANOC_PCIE_GEM_NOC },
+};
+
+static struct qcom_icc_node xm_pcie3_2a = {
+ .name = "xm_pcie3_2a",
+ .id = SC8280XP_MASTER_PCIE_2A,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_ANOC_PCIE_GEM_NOC },
+};
+
+static struct qcom_icc_node xm_pcie3_2b = {
+ .name = "xm_pcie3_2b",
+ .id = SC8280XP_MASTER_PCIE_2B,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_ANOC_PCIE_GEM_NOC },
+};
+
+static struct qcom_icc_node xm_pcie3_3a = {
+ .name = "xm_pcie3_3a",
+ .id = SC8280XP_MASTER_PCIE_3A,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_ANOC_PCIE_GEM_NOC },
+};
+
+static struct qcom_icc_node xm_pcie3_3b = {
+ .name = "xm_pcie3_3b",
+ .id = SC8280XP_MASTER_PCIE_3B,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_ANOC_PCIE_GEM_NOC },
+};
+
+static struct qcom_icc_node xm_pcie3_4 = {
+ .name = "xm_pcie3_4",
+ .id = SC8280XP_MASTER_PCIE_4,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_ANOC_PCIE_GEM_NOC },
+};
+
+static struct qcom_icc_node xm_qdss_etr = {
+ .name = "xm_qdss_etr",
+ .id = SC8280XP_MASTER_QDSS_ETR,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_sdc2 = {
+ .name = "xm_sdc2",
+ .id = SC8280XP_MASTER_SDCC_2,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_ufs_card = {
+ .name = "xm_ufs_card",
+ .id = SC8280XP_MASTER_UFS_CARD,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node ipa_core_master = {
+ .name = "ipa_core_master",
+ .id = SC8280XP_MASTER_IPA_CORE,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_IPA_CORE },
+};
+
+static struct qcom_icc_node qup0_core_master = {
+ .name = "qup0_core_master",
+ .id = SC8280XP_MASTER_QUP_CORE_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_QUP_CORE_0 },
+};
+
+static struct qcom_icc_node qup1_core_master = {
+ .name = "qup1_core_master",
+ .id = SC8280XP_MASTER_QUP_CORE_1,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_QUP_CORE_1 },
+};
+
+static struct qcom_icc_node qup2_core_master = {
+ .name = "qup2_core_master",
+ .id = SC8280XP_MASTER_QUP_CORE_2,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_QUP_CORE_2 },
+};
+
+static struct qcom_icc_node qnm_gemnoc_cnoc = {
+ .name = "qnm_gemnoc_cnoc",
+ .id = SC8280XP_MASTER_GEM_NOC_CNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 76,
+ .links = { SC8280XP_SLAVE_AHB2PHY_0,
+ SC8280XP_SLAVE_AHB2PHY_1,
+ SC8280XP_SLAVE_AHB2PHY_2,
+ SC8280XP_SLAVE_AOSS,
+ SC8280XP_SLAVE_APPSS,
+ SC8280XP_SLAVE_CAMERA_CFG,
+ SC8280XP_SLAVE_CLK_CTL,
+ SC8280XP_SLAVE_CDSP_CFG,
+ SC8280XP_SLAVE_CDSP1_CFG,
+ SC8280XP_SLAVE_RBCPR_CX_CFG,
+ SC8280XP_SLAVE_RBCPR_MMCX_CFG,
+ SC8280XP_SLAVE_RBCPR_MX_CFG,
+ SC8280XP_SLAVE_CPR_NSPCX,
+ SC8280XP_SLAVE_CRYPTO_0_CFG,
+ SC8280XP_SLAVE_CX_RDPM,
+ SC8280XP_SLAVE_DCC_CFG,
+ SC8280XP_SLAVE_DISPLAY_CFG,
+ SC8280XP_SLAVE_DISPLAY1_CFG,
+ SC8280XP_SLAVE_EMAC_CFG,
+ SC8280XP_SLAVE_EMAC1_CFG,
+ SC8280XP_SLAVE_GFX3D_CFG,
+ SC8280XP_SLAVE_HWKM,
+ SC8280XP_SLAVE_IMEM_CFG,
+ SC8280XP_SLAVE_IPA_CFG,
+ SC8280XP_SLAVE_IPC_ROUTER_CFG,
+ SC8280XP_SLAVE_LPASS,
+ SC8280XP_SLAVE_MX_RDPM,
+ SC8280XP_SLAVE_MXC_RDPM,
+ SC8280XP_SLAVE_PCIE_0_CFG,
+ SC8280XP_SLAVE_PCIE_1_CFG,
+ SC8280XP_SLAVE_PCIE_2A_CFG,
+ SC8280XP_SLAVE_PCIE_2B_CFG,
+ SC8280XP_SLAVE_PCIE_3A_CFG,
+ SC8280XP_SLAVE_PCIE_3B_CFG,
+ SC8280XP_SLAVE_PCIE_4_CFG,
+ SC8280XP_SLAVE_PCIE_RSC_CFG,
+ SC8280XP_SLAVE_PDM,
+ SC8280XP_SLAVE_PIMEM_CFG,
+ SC8280XP_SLAVE_PKA_WRAPPER_CFG,
+ SC8280XP_SLAVE_PMU_WRAPPER_CFG,
+ SC8280XP_SLAVE_QDSS_CFG,
+ SC8280XP_SLAVE_QSPI_0,
+ SC8280XP_SLAVE_QUP_0,
+ SC8280XP_SLAVE_QUP_1,
+ SC8280XP_SLAVE_QUP_2,
+ SC8280XP_SLAVE_SDCC_2,
+ SC8280XP_SLAVE_SDCC_4,
+ SC8280XP_SLAVE_SECURITY,
+ SC8280XP_SLAVE_SMMUV3_CFG,
+ SC8280XP_SLAVE_SMSS_CFG,
+ SC8280XP_SLAVE_SPSS_CFG,
+ SC8280XP_SLAVE_TCSR,
+ SC8280XP_SLAVE_TLMM,
+ SC8280XP_SLAVE_UFS_CARD_CFG,
+ SC8280XP_SLAVE_UFS_MEM_CFG,
+ SC8280XP_SLAVE_USB3_0,
+ SC8280XP_SLAVE_USB3_1,
+ SC8280XP_SLAVE_USB3_MP,
+ SC8280XP_SLAVE_USB4_0,
+ SC8280XP_SLAVE_USB4_1,
+ SC8280XP_SLAVE_VENUS_CFG,
+ SC8280XP_SLAVE_VSENSE_CTRL_CFG,
+ SC8280XP_SLAVE_VSENSE_CTRL_R_CFG,
+ SC8280XP_SLAVE_A1NOC_CFG,
+ SC8280XP_SLAVE_A2NOC_CFG,
+ SC8280XP_SLAVE_ANOC_PCIE_BRIDGE_CFG,
+ SC8280XP_SLAVE_DDRSS_CFG,
+ SC8280XP_SLAVE_CNOC_MNOC_CFG,
+ SC8280XP_SLAVE_SNOC_CFG,
+ SC8280XP_SLAVE_SNOC_SF_BRIDGE_CFG,
+ SC8280XP_SLAVE_IMEM,
+ SC8280XP_SLAVE_PIMEM,
+ SC8280XP_SLAVE_SERVICE_CNOC,
+ SC8280XP_SLAVE_QDSS_STM,
+ SC8280XP_SLAVE_SMSS,
+ SC8280XP_SLAVE_TCU
+ },
+};
+
+static struct qcom_icc_node qnm_gemnoc_pcie = {
+ .name = "qnm_gemnoc_pcie",
+ .id = SC8280XP_MASTER_GEM_NOC_PCIE_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 7,
+ .links = { SC8280XP_SLAVE_PCIE_0,
+ SC8280XP_SLAVE_PCIE_1,
+ SC8280XP_SLAVE_PCIE_2A,
+ SC8280XP_SLAVE_PCIE_2B,
+ SC8280XP_SLAVE_PCIE_3A,
+ SC8280XP_SLAVE_PCIE_3B,
+ SC8280XP_SLAVE_PCIE_4
+ },
+};
+
+static struct qcom_icc_node qnm_cnoc_dc_noc = {
+ .name = "qnm_cnoc_dc_noc",
+ .id = SC8280XP_MASTER_CNOC_DC_NOC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 2,
+ .links = { SC8280XP_SLAVE_LLCC_CFG,
+ SC8280XP_SLAVE_GEM_NOC_CFG
+ },
+};
+
+static struct qcom_icc_node alm_gpu_tcu = {
+ .name = "alm_gpu_tcu",
+ .id = SC8280XP_MASTER_GPU_TCU,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 2,
+ .links = { SC8280XP_SLAVE_GEM_NOC_CNOC,
+ SC8280XP_SLAVE_LLCC
+ },
+};
+
+static struct qcom_icc_node alm_pcie_tcu = {
+ .name = "alm_pcie_tcu",
+ .id = SC8280XP_MASTER_PCIE_TCU,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 2,
+ .links = { SC8280XP_SLAVE_GEM_NOC_CNOC,
+ SC8280XP_SLAVE_LLCC
+ },
+};
+
+static struct qcom_icc_node alm_sys_tcu = {
+ .name = "alm_sys_tcu",
+ .id = SC8280XP_MASTER_SYS_TCU,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 2,
+ .links = { SC8280XP_SLAVE_GEM_NOC_CNOC,
+ SC8280XP_SLAVE_LLCC
+ },
+};
+
+static struct qcom_icc_node chm_apps = {
+ .name = "chm_apps",
+ .id = SC8280XP_MASTER_APPSS_PROC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 3,
+ .links = { SC8280XP_SLAVE_GEM_NOC_CNOC,
+ SC8280XP_SLAVE_LLCC,
+ SC8280XP_SLAVE_GEM_NOC_PCIE_CNOC
+ },
+};
+
+static struct qcom_icc_node qnm_cmpnoc0 = {
+ .name = "qnm_cmpnoc0",
+ .id = SC8280XP_MASTER_COMPUTE_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { SC8280XP_SLAVE_GEM_NOC_CNOC,
+ SC8280XP_SLAVE_LLCC
+ },
+};
+
+static struct qcom_icc_node qnm_cmpnoc1 = {
+ .name = "qnm_cmpnoc1",
+ .id = SC8280XP_MASTER_COMPUTE_NOC_1,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { SC8280XP_SLAVE_GEM_NOC_CNOC,
+ SC8280XP_SLAVE_LLCC
+ },
+};
+
+static struct qcom_icc_node qnm_gemnoc_cfg = {
+ .name = "qnm_gemnoc_cfg",
+ .id = SC8280XP_MASTER_GEM_NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 3,
+ .links = { SC8280XP_SLAVE_SERVICE_GEM_NOC_1,
+ SC8280XP_SLAVE_SERVICE_GEM_NOC_2,
+ SC8280XP_SLAVE_SERVICE_GEM_NOC
+ },
+};
+
+static struct qcom_icc_node qnm_gpu = {
+ .name = "qnm_gpu",
+ .id = SC8280XP_MASTER_GFX3D,
+ .channels = 4,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { SC8280XP_SLAVE_GEM_NOC_CNOC,
+ SC8280XP_SLAVE_LLCC
+ },
+};
+
+static struct qcom_icc_node qnm_mnoc_hf = {
+ .name = "qnm_mnoc_hf",
+ .id = SC8280XP_MASTER_MNOC_HF_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { SC8280XP_SLAVE_LLCC,
+ SC8280XP_SLAVE_GEM_NOC_PCIE_CNOC
+ },
+};
+
+static struct qcom_icc_node qnm_mnoc_sf = {
+ .name = "qnm_mnoc_sf",
+ .id = SC8280XP_MASTER_MNOC_SF_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { SC8280XP_SLAVE_GEM_NOC_CNOC,
+ SC8280XP_SLAVE_LLCC
+ },
+};
+
+static struct qcom_icc_node qnm_pcie = {
+ .name = "qnm_pcie",
+ .id = SC8280XP_MASTER_ANOC_PCIE_GEM_NOC,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { SC8280XP_SLAVE_GEM_NOC_CNOC,
+ SC8280XP_SLAVE_LLCC
+ },
+};
+
+static struct qcom_icc_node qnm_snoc_gc = {
+ .name = "qnm_snoc_gc",
+ .id = SC8280XP_MASTER_SNOC_GC_MEM_NOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_snoc_sf = {
+ .name = "qnm_snoc_sf",
+ .id = SC8280XP_MASTER_SNOC_SF_MEM_NOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 3,
+ .links = { SC8280XP_SLAVE_GEM_NOC_CNOC,
+ SC8280XP_SLAVE_LLCC,
+ SC8280XP_SLAVE_GEM_NOC_PCIE_CNOC },
+};
+
+static struct qcom_icc_node qhm_config_noc = {
+ .name = "qhm_config_noc",
+ .id = SC8280XP_MASTER_CNOC_LPASS_AG_NOC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 6,
+ .links = { SC8280XP_SLAVE_LPASS_CORE_CFG,
+ SC8280XP_SLAVE_LPASS_LPI_CFG,
+ SC8280XP_SLAVE_LPASS_MPU_CFG,
+ SC8280XP_SLAVE_LPASS_TOP_CFG,
+ SC8280XP_SLAVE_SERVICES_LPASS_AML_NOC,
+ SC8280XP_SLAVE_SERVICE_LPASS_AG_NOC
+ },
+};
+
+static struct qcom_icc_node qxm_lpass_dsp = {
+ .name = "qxm_lpass_dsp",
+ .id = SC8280XP_MASTER_LPASS_PROC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 4,
+ .links = { SC8280XP_SLAVE_LPASS_TOP_CFG,
+ SC8280XP_SLAVE_LPASS_SNOC,
+ SC8280XP_SLAVE_SERVICES_LPASS_AML_NOC,
+ SC8280XP_SLAVE_SERVICE_LPASS_AG_NOC
+ },
+};
+
+static struct qcom_icc_node llcc_mc = {
+ .name = "llcc_mc",
+ .id = SC8280XP_MASTER_LLCC,
+ .channels = 8,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_EBI1 },
+};
+
+static struct qcom_icc_node qnm_camnoc_hf = {
+ .name = "qnm_camnoc_hf",
+ .id = SC8280XP_MASTER_CAMNOC_HF,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_mdp0_0 = {
+ .name = "qnm_mdp0_0",
+ .id = SC8280XP_MASTER_MDP0,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_mdp0_1 = {
+ .name = "qnm_mdp0_1",
+ .id = SC8280XP_MASTER_MDP1,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_mdp1_0 = {
+ .name = "qnm_mdp1_0",
+ .id = SC8280XP_MASTER_MDP_CORE1_0,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_mdp1_1 = {
+ .name = "qnm_mdp1_1",
+ .id = SC8280XP_MASTER_MDP_CORE1_1,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_mnoc_cfg = {
+ .name = "qnm_mnoc_cfg",
+ .id = SC8280XP_MASTER_CNOC_MNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_SERVICE_MNOC },
+};
+
+static struct qcom_icc_node qnm_rot_0 = {
+ .name = "qnm_rot_0",
+ .id = SC8280XP_MASTER_ROTATOR,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_rot_1 = {
+ .name = "qnm_rot_1",
+ .id = SC8280XP_MASTER_ROTATOR_1,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_video0 = {
+ .name = "qnm_video0",
+ .id = SC8280XP_MASTER_VIDEO_P0,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_video1 = {
+ .name = "qnm_video1",
+ .id = SC8280XP_MASTER_VIDEO_P1,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_video_cvp = {
+ .name = "qnm_video_cvp",
+ .id = SC8280XP_MASTER_VIDEO_PROC,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxm_camnoc_icp = {
+ .name = "qxm_camnoc_icp",
+ .id = SC8280XP_MASTER_CAMNOC_ICP,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxm_camnoc_sf = {
+ .name = "qxm_camnoc_sf",
+ .id = SC8280XP_MASTER_CAMNOC_SF,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qhm_nsp_noc_config = {
+ .name = "qhm_nsp_noc_config",
+ .id = SC8280XP_MASTER_CDSP_NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_SERVICE_NSP_NOC },
+};
+
+static struct qcom_icc_node qxm_nsp = {
+ .name = "qxm_nsp",
+ .id = SC8280XP_MASTER_CDSP_PROC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { SC8280XP_SLAVE_CDSP_MEM_NOC,
+ SC8280XP_SLAVE_NSP_XFR
+ },
+};
+
+static struct qcom_icc_node qhm_nspb_noc_config = {
+ .name = "qhm_nspb_noc_config",
+ .id = SC8280XP_MASTER_CDSPB_NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_SERVICE_NSPB_NOC },
+};
+
+static struct qcom_icc_node qxm_nspb = {
+ .name = "qxm_nspb",
+ .id = SC8280XP_MASTER_CDSP_PROC_B,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { SC8280XP_SLAVE_CDSPB_MEM_NOC,
+ SC8280XP_SLAVE_NSPB_XFR
+ },
+};
+
+static struct qcom_icc_node qnm_aggre1_noc = {
+ .name = "qnm_aggre1_noc",
+ .id = SC8280XP_MASTER_A1NOC_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_SNOC_GEM_NOC_SF },
+};
+
+static struct qcom_icc_node qnm_aggre2_noc = {
+ .name = "qnm_aggre2_noc",
+ .id = SC8280XP_MASTER_A2NOC_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_SNOC_GEM_NOC_SF },
+};
+
+static struct qcom_icc_node qnm_aggre_usb_noc = {
+ .name = "qnm_aggre_usb_noc",
+ .id = SC8280XP_MASTER_USB_NOC_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_SNOC_GEM_NOC_SF },
+};
+
+static struct qcom_icc_node qnm_lpass_noc = {
+ .name = "qnm_lpass_noc",
+ .id = SC8280XP_MASTER_LPASS_ANOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_SNOC_GEM_NOC_SF },
+};
+
+static struct qcom_icc_node qnm_snoc_cfg = {
+ .name = "qnm_snoc_cfg",
+ .id = SC8280XP_MASTER_SNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_SERVICE_SNOC },
+};
+
+static struct qcom_icc_node qxm_pimem = {
+ .name = "qxm_pimem",
+ .id = SC8280XP_MASTER_PIMEM,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_SNOC_GEM_NOC_GC },
+};
+
+static struct qcom_icc_node xm_gic = {
+ .name = "xm_gic",
+ .id = SC8280XP_MASTER_GIC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8280XP_SLAVE_SNOC_GEM_NOC_GC },
+};
+
+static struct qcom_icc_node qns_a1noc_snoc = {
+ .name = "qns_a1noc_snoc",
+ .id = SC8280XP_SLAVE_A1NOC_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SC8280XP_MASTER_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node qns_aggre_usb_snoc = {
+ .name = "qns_aggre_usb_snoc",
+ .id = SC8280XP_SLAVE_USB_NOC_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SC8280XP_MASTER_USB_NOC_SNOC },
+};
+
+static struct qcom_icc_node srvc_aggre1_noc = {
+ .name = "srvc_aggre1_noc",
+ .id = SC8280XP_SLAVE_SERVICE_A1NOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_a2noc_snoc = {
+ .name = "qns_a2noc_snoc",
+ .id = SC8280XP_SLAVE_A2NOC_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SC8280XP_MASTER_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qns_pcie_gem_noc = {
+ .name = "qns_pcie_gem_noc",
+ .id = SC8280XP_SLAVE_ANOC_PCIE_GEM_NOC,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC8280XP_MASTER_ANOC_PCIE_GEM_NOC },
+};
+
+static struct qcom_icc_node srvc_aggre2_noc = {
+ .name = "srvc_aggre2_noc",
+ .id = SC8280XP_SLAVE_SERVICE_A2NOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node ipa_core_slave = {
+ .name = "ipa_core_slave",
+ .id = SC8280XP_SLAVE_IPA_CORE,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node qup0_core_slave = {
+ .name = "qup0_core_slave",
+ .id = SC8280XP_SLAVE_QUP_CORE_0,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qup1_core_slave = {
+ .name = "qup1_core_slave",
+ .id = SC8280XP_SLAVE_QUP_CORE_1,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qup2_core_slave = {
+ .name = "qup2_core_slave",
+ .id = SC8280XP_SLAVE_QUP_CORE_2,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ahb2phy0 = {
+ .name = "qhs_ahb2phy0",
+ .id = SC8280XP_SLAVE_AHB2PHY_0,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ahb2phy1 = {
+ .name = "qhs_ahb2phy1",
+ .id = SC8280XP_SLAVE_AHB2PHY_1,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ahb2phy2 = {
+ .name = "qhs_ahb2phy2",
+ .id = SC8280XP_SLAVE_AHB2PHY_2,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_aoss = {
+ .name = "qhs_aoss",
+ .id = SC8280XP_SLAVE_AOSS,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_apss = {
+ .name = "qhs_apss",
+ .id = SC8280XP_SLAVE_APPSS,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node qhs_camera_cfg = {
+ .name = "qhs_camera_cfg",
+ .id = SC8280XP_SLAVE_CAMERA_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_clk_ctl = {
+ .name = "qhs_clk_ctl",
+ .id = SC8280XP_SLAVE_CLK_CTL,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_compute0_cfg = {
+ .name = "qhs_compute0_cfg",
+ .id = SC8280XP_SLAVE_CDSP_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8280XP_MASTER_CDSP_NOC_CFG },
+};
+
+static struct qcom_icc_node qhs_compute1_cfg = {
+ .name = "qhs_compute1_cfg",
+ .id = SC8280XP_SLAVE_CDSP1_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8280XP_MASTER_CDSPB_NOC_CFG },
+};
+
+static struct qcom_icc_node qhs_cpr_cx = {
+ .name = "qhs_cpr_cx",
+ .id = SC8280XP_SLAVE_RBCPR_CX_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_cpr_mmcx = {
+ .name = "qhs_cpr_mmcx",
+ .id = SC8280XP_SLAVE_RBCPR_MMCX_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_cpr_mx = {
+ .name = "qhs_cpr_mx",
+ .id = SC8280XP_SLAVE_RBCPR_MX_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_cpr_nspcx = {
+ .name = "qhs_cpr_nspcx",
+ .id = SC8280XP_SLAVE_CPR_NSPCX,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_crypto0_cfg = {
+ .name = "qhs_crypto0_cfg",
+ .id = SC8280XP_SLAVE_CRYPTO_0_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_cx_rdpm = {
+ .name = "qhs_cx_rdpm",
+ .id = SC8280XP_SLAVE_CX_RDPM,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_dcc_cfg = {
+ .name = "qhs_dcc_cfg",
+ .id = SC8280XP_SLAVE_DCC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_display0_cfg = {
+ .name = "qhs_display0_cfg",
+ .id = SC8280XP_SLAVE_DISPLAY_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_display1_cfg = {
+ .name = "qhs_display1_cfg",
+ .id = SC8280XP_SLAVE_DISPLAY1_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_emac0_cfg = {
+ .name = "qhs_emac0_cfg",
+ .id = SC8280XP_SLAVE_EMAC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_emac1_cfg = {
+ .name = "qhs_emac1_cfg",
+ .id = SC8280XP_SLAVE_EMAC1_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_gpuss_cfg = {
+ .name = "qhs_gpuss_cfg",
+ .id = SC8280XP_SLAVE_GFX3D_CFG,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node qhs_hwkm = {
+ .name = "qhs_hwkm",
+ .id = SC8280XP_SLAVE_HWKM,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_imem_cfg = {
+ .name = "qhs_imem_cfg",
+ .id = SC8280XP_SLAVE_IMEM_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ipa = {
+ .name = "qhs_ipa",
+ .id = SC8280XP_SLAVE_IPA_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ipc_router = {
+ .name = "qhs_ipc_router",
+ .id = SC8280XP_SLAVE_IPC_ROUTER_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_lpass_cfg = {
+ .name = "qhs_lpass_cfg",
+ .id = SC8280XP_SLAVE_LPASS,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8280XP_MASTER_CNOC_LPASS_AG_NOC },
+};
+
+static struct qcom_icc_node qhs_mx_rdpm = {
+ .name = "qhs_mx_rdpm",
+ .id = SC8280XP_SLAVE_MX_RDPM,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_mxc_rdpm = {
+ .name = "qhs_mxc_rdpm",
+ .id = SC8280XP_SLAVE_MXC_RDPM,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pcie0_cfg = {
+ .name = "qhs_pcie0_cfg",
+ .id = SC8280XP_SLAVE_PCIE_0_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pcie1_cfg = {
+ .name = "qhs_pcie1_cfg",
+ .id = SC8280XP_SLAVE_PCIE_1_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pcie2a_cfg = {
+ .name = "qhs_pcie2a_cfg",
+ .id = SC8280XP_SLAVE_PCIE_2A_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pcie2b_cfg = {
+ .name = "qhs_pcie2b_cfg",
+ .id = SC8280XP_SLAVE_PCIE_2B_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pcie3a_cfg = {
+ .name = "qhs_pcie3a_cfg",
+ .id = SC8280XP_SLAVE_PCIE_3A_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pcie3b_cfg = {
+ .name = "qhs_pcie3b_cfg",
+ .id = SC8280XP_SLAVE_PCIE_3B_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pcie4_cfg = {
+ .name = "qhs_pcie4_cfg",
+ .id = SC8280XP_SLAVE_PCIE_4_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pcie_rsc_cfg = {
+ .name = "qhs_pcie_rsc_cfg",
+ .id = SC8280XP_SLAVE_PCIE_RSC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pdm = {
+ .name = "qhs_pdm",
+ .id = SC8280XP_SLAVE_PDM,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pimem_cfg = {
+ .name = "qhs_pimem_cfg",
+ .id = SC8280XP_SLAVE_PIMEM_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pka_wrapper_cfg = {
+ .name = "qhs_pka_wrapper_cfg",
+ .id = SC8280XP_SLAVE_PKA_WRAPPER_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pmu_wrapper_cfg = {
+ .name = "qhs_pmu_wrapper_cfg",
+ .id = SC8280XP_SLAVE_PMU_WRAPPER_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qdss_cfg = {
+ .name = "qhs_qdss_cfg",
+ .id = SC8280XP_SLAVE_QDSS_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qspi = {
+ .name = "qhs_qspi",
+ .id = SC8280XP_SLAVE_QSPI_0,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qup0 = {
+ .name = "qhs_qup0",
+ .id = SC8280XP_SLAVE_QUP_0,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qup1 = {
+ .name = "qhs_qup1",
+ .id = SC8280XP_SLAVE_QUP_1,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qup2 = {
+ .name = "qhs_qup2",
+ .id = SC8280XP_SLAVE_QUP_2,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_sdc2 = {
+ .name = "qhs_sdc2",
+ .id = SC8280XP_SLAVE_SDCC_2,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_sdc4 = {
+ .name = "qhs_sdc4",
+ .id = SC8280XP_SLAVE_SDCC_4,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_security = {
+ .name = "qhs_security",
+ .id = SC8280XP_SLAVE_SECURITY,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_smmuv3_cfg = {
+ .name = "qhs_smmuv3_cfg",
+ .id = SC8280XP_SLAVE_SMMUV3_CFG,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node qhs_smss_cfg = {
+ .name = "qhs_smss_cfg",
+ .id = SC8280XP_SLAVE_SMSS_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_spss_cfg = {
+ .name = "qhs_spss_cfg",
+ .id = SC8280XP_SLAVE_SPSS_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_tcsr = {
+ .name = "qhs_tcsr",
+ .id = SC8280XP_SLAVE_TCSR,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_tlmm = {
+ .name = "qhs_tlmm",
+ .id = SC8280XP_SLAVE_TLMM,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ufs_card_cfg = {
+ .name = "qhs_ufs_card_cfg",
+ .id = SC8280XP_SLAVE_UFS_CARD_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ufs_mem_cfg = {
+ .name = "qhs_ufs_mem_cfg",
+ .id = SC8280XP_SLAVE_UFS_MEM_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_usb3_0 = {
+ .name = "qhs_usb3_0",
+ .id = SC8280XP_SLAVE_USB3_0,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_usb3_1 = {
+ .name = "qhs_usb3_1",
+ .id = SC8280XP_SLAVE_USB3_1,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_usb3_mp = {
+ .name = "qhs_usb3_mp",
+ .id = SC8280XP_SLAVE_USB3_MP,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_usb4_host_0 = {
+ .name = "qhs_usb4_host_0",
+ .id = SC8280XP_SLAVE_USB4_0,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_usb4_host_1 = {
+ .name = "qhs_usb4_host_1",
+ .id = SC8280XP_SLAVE_USB4_1,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_venus_cfg = {
+ .name = "qhs_venus_cfg",
+ .id = SC8280XP_SLAVE_VENUS_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_vsense_ctrl_cfg = {
+ .name = "qhs_vsense_ctrl_cfg",
+ .id = SC8280XP_SLAVE_VSENSE_CTRL_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_vsense_ctrl_r_cfg = {
+ .name = "qhs_vsense_ctrl_r_cfg",
+ .id = SC8280XP_SLAVE_VSENSE_CTRL_R_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_a1_noc_cfg = {
+ .name = "qns_a1_noc_cfg",
+ .id = SC8280XP_SLAVE_A1NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8280XP_MASTER_A1NOC_CFG },
+};
+
+static struct qcom_icc_node qns_a2_noc_cfg = {
+ .name = "qns_a2_noc_cfg",
+ .id = SC8280XP_SLAVE_A2NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8280XP_MASTER_A2NOC_CFG },
+};
+
+static struct qcom_icc_node qns_anoc_pcie_bridge_cfg = {
+ .name = "qns_anoc_pcie_bridge_cfg",
+ .id = SC8280XP_SLAVE_ANOC_PCIE_BRIDGE_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_ddrss_cfg = {
+ .name = "qns_ddrss_cfg",
+ .id = SC8280XP_SLAVE_DDRSS_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8280XP_MASTER_CNOC_DC_NOC },
+};
+
+static struct qcom_icc_node qns_mnoc_cfg = {
+ .name = "qns_mnoc_cfg",
+ .id = SC8280XP_SLAVE_CNOC_MNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8280XP_MASTER_CNOC_MNOC_CFG },
+};
+
+static struct qcom_icc_node qns_snoc_cfg = {
+ .name = "qns_snoc_cfg",
+ .id = SC8280XP_SLAVE_SNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8280XP_MASTER_SNOC_CFG },
+};
+
+static struct qcom_icc_node qns_snoc_sf_bridge_cfg = {
+ .name = "qns_snoc_sf_bridge_cfg",
+ .id = SC8280XP_SLAVE_SNOC_SF_BRIDGE_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qxs_imem = {
+ .name = "qxs_imem",
+ .id = SC8280XP_SLAVE_IMEM,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node qxs_pimem = {
+ .name = "qxs_pimem",
+ .id = SC8280XP_SLAVE_PIMEM,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node srvc_cnoc = {
+ .name = "srvc_cnoc",
+ .id = SC8280XP_SLAVE_SERVICE_CNOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node xs_pcie_0 = {
+ .name = "xs_pcie_0",
+ .id = SC8280XP_SLAVE_PCIE_0,
+ .channels = 1,
+ .buswidth = 16,
+};
+
+static struct qcom_icc_node xs_pcie_1 = {
+ .name = "xs_pcie_1",
+ .id = SC8280XP_SLAVE_PCIE_1,
+ .channels = 1,
+ .buswidth = 16,
+};
+
+static struct qcom_icc_node xs_pcie_2a = {
+ .name = "xs_pcie_2a",
+ .id = SC8280XP_SLAVE_PCIE_2A,
+ .channels = 1,
+ .buswidth = 16,
+};
+
+static struct qcom_icc_node xs_pcie_2b = {
+ .name = "xs_pcie_2b",
+ .id = SC8280XP_SLAVE_PCIE_2B,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node xs_pcie_3a = {
+ .name = "xs_pcie_3a",
+ .id = SC8280XP_SLAVE_PCIE_3A,
+ .channels = 1,
+ .buswidth = 16,
+};
+
+static struct qcom_icc_node xs_pcie_3b = {
+ .name = "xs_pcie_3b",
+ .id = SC8280XP_SLAVE_PCIE_3B,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node xs_pcie_4 = {
+ .name = "xs_pcie_4",
+ .id = SC8280XP_SLAVE_PCIE_4,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node xs_qdss_stm = {
+ .name = "xs_qdss_stm",
+ .id = SC8280XP_SLAVE_QDSS_STM,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node xs_smss = {
+ .name = "xs_smss",
+ .id = SC8280XP_SLAVE_SMSS,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node xs_sys_tcu_cfg = {
+ .name = "xs_sys_tcu_cfg",
+ .id = SC8280XP_SLAVE_TCU,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node qhs_llcc = {
+ .name = "qhs_llcc",
+ .id = SC8280XP_SLAVE_LLCC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_gemnoc = {
+ .name = "qns_gemnoc",
+ .id = SC8280XP_SLAVE_GEM_NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC8280XP_MASTER_GEM_NOC_CFG },
+};
+
+static struct qcom_icc_node qns_gem_noc_cnoc = {
+ .name = "qns_gem_noc_cnoc",
+ .id = SC8280XP_SLAVE_GEM_NOC_CNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SC8280XP_MASTER_GEM_NOC_CNOC },
+};
+
+static struct qcom_icc_node qns_llcc = {
+ .name = "qns_llcc",
+ .id = SC8280XP_SLAVE_LLCC,
+ .channels = 8,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SC8280XP_MASTER_LLCC },
+};
+
+static struct qcom_icc_node qns_pcie = {
+ .name = "qns_pcie",
+ .id = SC8280XP_SLAVE_GEM_NOC_PCIE_CNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SC8280XP_MASTER_GEM_NOC_PCIE_SNOC },
+};
+
+static struct qcom_icc_node srvc_even_gemnoc = {
+ .name = "srvc_even_gemnoc",
+ .id = SC8280XP_SLAVE_SERVICE_GEM_NOC_1,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node srvc_odd_gemnoc = {
+ .name = "srvc_odd_gemnoc",
+ .id = SC8280XP_SLAVE_SERVICE_GEM_NOC_2,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node srvc_sys_gemnoc = {
+ .name = "srvc_sys_gemnoc",
+ .id = SC8280XP_SLAVE_SERVICE_GEM_NOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_lpass_core = {
+ .name = "qhs_lpass_core",
+ .id = SC8280XP_SLAVE_LPASS_CORE_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_lpass_lpi = {
+ .name = "qhs_lpass_lpi",
+ .id = SC8280XP_SLAVE_LPASS_LPI_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_lpass_mpu = {
+ .name = "qhs_lpass_mpu",
+ .id = SC8280XP_SLAVE_LPASS_MPU_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_lpass_top = {
+ .name = "qhs_lpass_top",
+ .id = SC8280XP_SLAVE_LPASS_TOP_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_sysnoc = {
+ .name = "qns_sysnoc",
+ .id = SC8280XP_SLAVE_LPASS_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SC8280XP_MASTER_LPASS_ANOC },
+};
+
+static struct qcom_icc_node srvc_niu_aml_noc = {
+ .name = "srvc_niu_aml_noc",
+ .id = SC8280XP_SLAVE_SERVICES_LPASS_AML_NOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node srvc_niu_lpass_agnoc = {
+ .name = "srvc_niu_lpass_agnoc",
+ .id = SC8280XP_SLAVE_SERVICE_LPASS_AG_NOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node ebi = {
+ .name = "ebi",
+ .id = SC8280XP_SLAVE_EBI1,
+ .channels = 8,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_mem_noc_hf = {
+ .name = "qns_mem_noc_hf",
+ .id = SC8280XP_SLAVE_MNOC_HF_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC8280XP_MASTER_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qns_mem_noc_sf = {
+ .name = "qns_mem_noc_sf",
+ .id = SC8280XP_SLAVE_MNOC_SF_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC8280XP_MASTER_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node srvc_mnoc = {
+ .name = "srvc_mnoc",
+ .id = SC8280XP_SLAVE_SERVICE_MNOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_nsp_gemnoc = {
+ .name = "qns_nsp_gemnoc",
+ .id = SC8280XP_SLAVE_CDSP_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC8280XP_MASTER_COMPUTE_NOC },
+};
+
+static struct qcom_icc_node qxs_nsp_xfr = {
+ .name = "qxs_nsp_xfr",
+ .id = SC8280XP_SLAVE_NSP_XFR,
+ .channels = 1,
+ .buswidth = 32,
+};
+
+static struct qcom_icc_node service_nsp_noc = {
+ .name = "service_nsp_noc",
+ .id = SC8280XP_SLAVE_SERVICE_NSP_NOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_nspb_gemnoc = {
+ .name = "qns_nspb_gemnoc",
+ .id = SC8280XP_SLAVE_CDSPB_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC8280XP_MASTER_COMPUTE_NOC_1 },
+};
+
+static struct qcom_icc_node qxs_nspb_xfr = {
+ .name = "qxs_nspb_xfr",
+ .id = SC8280XP_SLAVE_NSPB_XFR,
+ .channels = 1,
+ .buswidth = 32,
+};
+
+static struct qcom_icc_node service_nspb_noc = {
+ .name = "service_nspb_noc",
+ .id = SC8280XP_SLAVE_SERVICE_NSPB_NOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_gemnoc_gc = {
+ .name = "qns_gemnoc_gc",
+ .id = SC8280XP_SLAVE_SNOC_GEM_NOC_GC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC8280XP_MASTER_SNOC_GC_MEM_NOC },
+};
+
+static struct qcom_icc_node qns_gemnoc_sf = {
+ .name = "qns_gemnoc_sf",
+ .id = SC8280XP_SLAVE_SNOC_GEM_NOC_SF,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SC8280XP_MASTER_SNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node srvc_snoc = {
+ .name = "srvc_snoc",
+ .id = SC8280XP_SLAVE_SERVICE_SNOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_bcm bcm_acv = {
+ .name = "ACV",
+ .num_nodes = 1,
+ .nodes = { &ebi },
+};
+
+static struct qcom_icc_bcm bcm_ce0 = {
+ .name = "CE0",
+ .num_nodes = 1,
+ .nodes = { &qxm_crypto },
+};
+
+static struct qcom_icc_bcm bcm_cn0 = {
+ .name = "CN0",
+ .keepalive = true,
+ .num_nodes = 9,
+ .nodes = { &qnm_gemnoc_cnoc,
+ &qnm_gemnoc_pcie,
+ &xs_pcie_0,
+ &xs_pcie_1,
+ &xs_pcie_2a,
+ &xs_pcie_2b,
+ &xs_pcie_3a,
+ &xs_pcie_3b,
+ &xs_pcie_4
+ },
+};
+
+static struct qcom_icc_bcm bcm_cn1 = {
+ .name = "CN1",
+ .num_nodes = 67,
+ .nodes = { &qhs_ahb2phy0,
+ &qhs_ahb2phy1,
+ &qhs_ahb2phy2,
+ &qhs_aoss,
+ &qhs_apss,
+ &qhs_camera_cfg,
+ &qhs_clk_ctl,
+ &qhs_compute0_cfg,
+ &qhs_compute1_cfg,
+ &qhs_cpr_cx,
+ &qhs_cpr_mmcx,
+ &qhs_cpr_mx,
+ &qhs_cpr_nspcx,
+ &qhs_crypto0_cfg,
+ &qhs_cx_rdpm,
+ &qhs_dcc_cfg,
+ &qhs_display0_cfg,
+ &qhs_display1_cfg,
+ &qhs_emac0_cfg,
+ &qhs_emac1_cfg,
+ &qhs_gpuss_cfg,
+ &qhs_hwkm,
+ &qhs_imem_cfg,
+ &qhs_ipa,
+ &qhs_ipc_router,
+ &qhs_lpass_cfg,
+ &qhs_mx_rdpm,
+ &qhs_mxc_rdpm,
+ &qhs_pcie0_cfg,
+ &qhs_pcie1_cfg,
+ &qhs_pcie2a_cfg,
+ &qhs_pcie2b_cfg,
+ &qhs_pcie3a_cfg,
+ &qhs_pcie3b_cfg,
+ &qhs_pcie4_cfg,
+ &qhs_pcie_rsc_cfg,
+ &qhs_pdm,
+ &qhs_pimem_cfg,
+ &qhs_pka_wrapper_cfg,
+ &qhs_pmu_wrapper_cfg,
+ &qhs_qdss_cfg,
+ &qhs_sdc2,
+ &qhs_sdc4,
+ &qhs_security,
+ &qhs_smmuv3_cfg,
+ &qhs_smss_cfg,
+ &qhs_spss_cfg,
+ &qhs_tcsr,
+ &qhs_tlmm,
+ &qhs_ufs_card_cfg,
+ &qhs_ufs_mem_cfg,
+ &qhs_usb3_0,
+ &qhs_usb3_1,
+ &qhs_usb3_mp,
+ &qhs_usb4_host_0,
+ &qhs_usb4_host_1,
+ &qhs_venus_cfg,
+ &qhs_vsense_ctrl_cfg,
+ &qhs_vsense_ctrl_r_cfg,
+ &qns_a1_noc_cfg,
+ &qns_a2_noc_cfg,
+ &qns_anoc_pcie_bridge_cfg,
+ &qns_ddrss_cfg,
+ &qns_mnoc_cfg,
+ &qns_snoc_cfg,
+ &qns_snoc_sf_bridge_cfg,
+ &srvc_cnoc
+ },
+};
+
+static struct qcom_icc_bcm bcm_cn2 = {
+ .name = "CN2",
+ .num_nodes = 4,
+ .nodes = { &qhs_qspi,
+ &qhs_qup0,
+ &qhs_qup1,
+ &qhs_qup2
+ },
+};
+
+static struct qcom_icc_bcm bcm_cn3 = {
+ .name = "CN3",
+ .num_nodes = 3,
+ .nodes = { &qxs_imem,
+ &xs_smss,
+ &xs_sys_tcu_cfg
+ },
+};
+
+static struct qcom_icc_bcm bcm_ip0 = {
+ .name = "IP0",
+ .num_nodes = 1,
+ .nodes = { &ipa_core_slave },
+};
+
+static struct qcom_icc_bcm bcm_mc0 = {
+ .name = "MC0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &ebi },
+};
+
+static struct qcom_icc_bcm bcm_mm0 = {
+ .name = "MM0",
+ .keepalive = true,
+ .num_nodes = 5,
+ .nodes = { &qnm_camnoc_hf,
+ &qnm_mdp0_0,
+ &qnm_mdp0_1,
+ &qnm_mdp1_0,
+ &qns_mem_noc_hf
+ },
+};
+
+static struct qcom_icc_bcm bcm_mm1 = {
+ .name = "MM1",
+ .num_nodes = 8,
+ .nodes = { &qnm_rot_0,
+ &qnm_rot_1,
+ &qnm_video0,
+ &qnm_video1,
+ &qnm_video_cvp,
+ &qxm_camnoc_icp,
+ &qxm_camnoc_sf,
+ &qns_mem_noc_sf
+ },
+};
+
+static struct qcom_icc_bcm bcm_nsa0 = {
+ .name = "NSA0",
+ .num_nodes = 2,
+ .nodes = { &qns_nsp_gemnoc,
+ &qxs_nsp_xfr
+ },
+};
+
+static struct qcom_icc_bcm bcm_nsa1 = {
+ .name = "NSA1",
+ .num_nodes = 1,
+ .nodes = { &qxm_nsp },
+};
+
+static struct qcom_icc_bcm bcm_nsb0 = {
+ .name = "NSB0",
+ .num_nodes = 2,
+ .nodes = { &qns_nspb_gemnoc,
+ &qxs_nspb_xfr
+ },
+};
+
+static struct qcom_icc_bcm bcm_nsb1 = {
+ .name = "NSB1",
+ .num_nodes = 1,
+ .nodes = { &qxm_nspb },
+};
+
+static struct qcom_icc_bcm bcm_pci0 = {
+ .name = "PCI0",
+ .num_nodes = 1,
+ .nodes = { &qns_pcie_gem_noc },
+};
+
+static struct qcom_icc_bcm bcm_qup0 = {
+ .name = "QUP0",
+ .vote_scale = 1,
+ .num_nodes = 1,
+ .nodes = { &qup0_core_slave },
+};
+
+static struct qcom_icc_bcm bcm_qup1 = {
+ .name = "QUP1",
+ .vote_scale = 1,
+ .num_nodes = 1,
+ .nodes = { &qup1_core_slave },
+};
+
+static struct qcom_icc_bcm bcm_qup2 = {
+ .name = "QUP2",
+ .vote_scale = 1,
+ .num_nodes = 1,
+ .nodes = { &qup2_core_slave },
+};
+
+static struct qcom_icc_bcm bcm_sh0 = {
+ .name = "SH0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &qns_llcc },
+};
+
+static struct qcom_icc_bcm bcm_sh2 = {
+ .name = "SH2",
+ .num_nodes = 1,
+ .nodes = { &chm_apps },
+};
+
+static struct qcom_icc_bcm bcm_sn0 = {
+ .name = "SN0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &qns_gemnoc_sf },
+};
+
+static struct qcom_icc_bcm bcm_sn1 = {
+ .name = "SN1",
+ .num_nodes = 1,
+ .nodes = { &qns_gemnoc_gc },
+};
+
+static struct qcom_icc_bcm bcm_sn2 = {
+ .name = "SN2",
+ .num_nodes = 1,
+ .nodes = { &qxs_pimem },
+};
+
+static struct qcom_icc_bcm bcm_sn3 = {
+ .name = "SN3",
+ .num_nodes = 2,
+ .nodes = { &qns_a1noc_snoc,
+ &qnm_aggre1_noc
+ },
+};
+
+static struct qcom_icc_bcm bcm_sn4 = {
+ .name = "SN4",
+ .num_nodes = 2,
+ .nodes = { &qns_a2noc_snoc,
+ &qnm_aggre2_noc
+ },
+};
+
+static struct qcom_icc_bcm bcm_sn5 = {
+ .name = "SN5",
+ .num_nodes = 2,
+ .nodes = { &qns_aggre_usb_snoc,
+ &qnm_aggre_usb_noc
+ },
+};
+
+static struct qcom_icc_bcm bcm_sn9 = {
+ .name = "SN9",
+ .num_nodes = 2,
+ .nodes = { &qns_sysnoc,
+ &qnm_lpass_noc
+ },
+};
+
+static struct qcom_icc_bcm bcm_sn10 = {
+ .name = "SN10",
+ .num_nodes = 1,
+ .nodes = { &xs_qdss_stm },
+};
+
+static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
+ &bcm_sn3,
+ &bcm_sn5,
+};
+
+static struct qcom_icc_node * const aggre1_noc_nodes[] = {
+ [MASTER_QSPI_0] = &qhm_qspi,
+ [MASTER_QUP_1] = &qhm_qup1,
+ [MASTER_QUP_2] = &qhm_qup2,
+ [MASTER_A1NOC_CFG] = &qnm_a1noc_cfg,
+ [MASTER_IPA] = &qxm_ipa,
+ [MASTER_EMAC_1] = &xm_emac_1,
+ [MASTER_SDCC_4] = &xm_sdc4,
+ [MASTER_UFS_MEM] = &xm_ufs_mem,
+ [MASTER_USB3_0] = &xm_usb3_0,
+ [MASTER_USB3_1] = &xm_usb3_1,
+ [MASTER_USB3_MP] = &xm_usb3_mp,
+ [MASTER_USB4_0] = &xm_usb4_host0,
+ [MASTER_USB4_1] = &xm_usb4_host1,
+ [SLAVE_A1NOC_SNOC] = &qns_a1noc_snoc,
+ [SLAVE_USB_NOC_SNOC] = &qns_aggre_usb_snoc,
+ [SLAVE_SERVICE_A1NOC] = &srvc_aggre1_noc,
+};
+
+static const struct qcom_icc_desc sc8280xp_aggre1_noc = {
+ .nodes = aggre1_noc_nodes,
+ .num_nodes = ARRAY_SIZE(aggre1_noc_nodes),
+ .bcms = aggre1_noc_bcms,
+ .num_bcms = ARRAY_SIZE(aggre1_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const aggre2_noc_bcms[] = {
+ &bcm_ce0,
+ &bcm_pci0,
+ &bcm_sn4,
+};
+
+static struct qcom_icc_node * const aggre2_noc_nodes[] = {
+ [MASTER_QDSS_BAM] = &qhm_qdss_bam,
+ [MASTER_QUP_0] = &qhm_qup0,
+ [MASTER_A2NOC_CFG] = &qnm_a2noc_cfg,
+ [MASTER_CRYPTO] = &qxm_crypto,
+ [MASTER_SENSORS_PROC] = &qxm_sensorss_q6,
+ [MASTER_SP] = &qxm_sp,
+ [MASTER_EMAC] = &xm_emac_0,
+ [MASTER_PCIE_0] = &xm_pcie3_0,
+ [MASTER_PCIE_1] = &xm_pcie3_1,
+ [MASTER_PCIE_2A] = &xm_pcie3_2a,
+ [MASTER_PCIE_2B] = &xm_pcie3_2b,
+ [MASTER_PCIE_3A] = &xm_pcie3_3a,
+ [MASTER_PCIE_3B] = &xm_pcie3_3b,
+ [MASTER_PCIE_4] = &xm_pcie3_4,
+ [MASTER_QDSS_ETR] = &xm_qdss_etr,
+ [MASTER_SDCC_2] = &xm_sdc2,
+ [MASTER_UFS_CARD] = &xm_ufs_card,
+ [SLAVE_A2NOC_SNOC] = &qns_a2noc_snoc,
+ [SLAVE_ANOC_PCIE_GEM_NOC] = &qns_pcie_gem_noc,
+ [SLAVE_SERVICE_A2NOC] = &srvc_aggre2_noc,
+};
+
+static const struct qcom_icc_desc sc8280xp_aggre2_noc = {
+ .nodes = aggre2_noc_nodes,
+ .num_nodes = ARRAY_SIZE(aggre2_noc_nodes),
+ .bcms = aggre2_noc_bcms,
+ .num_bcms = ARRAY_SIZE(aggre2_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const clk_virt_bcms[] = {
+ &bcm_ip0,
+ &bcm_qup0,
+ &bcm_qup1,
+ &bcm_qup2,
+};
+
+static struct qcom_icc_node * const clk_virt_nodes[] = {
+ [MASTER_IPA_CORE] = &ipa_core_master,
+ [MASTER_QUP_CORE_0] = &qup0_core_master,
+ [MASTER_QUP_CORE_1] = &qup1_core_master,
+ [MASTER_QUP_CORE_2] = &qup2_core_master,
+ [SLAVE_IPA_CORE] = &ipa_core_slave,
+ [SLAVE_QUP_CORE_0] = &qup0_core_slave,
+ [SLAVE_QUP_CORE_1] = &qup1_core_slave,
+ [SLAVE_QUP_CORE_2] = &qup2_core_slave,
+};
+
+static const struct qcom_icc_desc sc8280xp_clk_virt = {
+ .nodes = clk_virt_nodes,
+ .num_nodes = ARRAY_SIZE(clk_virt_nodes),
+ .bcms = clk_virt_bcms,
+ .num_bcms = ARRAY_SIZE(clk_virt_bcms),
+};
+
+static struct qcom_icc_bcm * const config_noc_bcms[] = {
+ &bcm_cn0,
+ &bcm_cn1,
+ &bcm_cn2,
+ &bcm_cn3,
+ &bcm_sn2,
+ &bcm_sn10,
+};
+
+static struct qcom_icc_node * const config_noc_nodes[] = {
+ [MASTER_GEM_NOC_CNOC] = &qnm_gemnoc_cnoc,
+ [MASTER_GEM_NOC_PCIE_SNOC] = &qnm_gemnoc_pcie,
+ [SLAVE_AHB2PHY_0] = &qhs_ahb2phy0,
+ [SLAVE_AHB2PHY_1] = &qhs_ahb2phy1,
+ [SLAVE_AHB2PHY_2] = &qhs_ahb2phy2,
+ [SLAVE_AOSS] = &qhs_aoss,
+ [SLAVE_APPSS] = &qhs_apss,
+ [SLAVE_CAMERA_CFG] = &qhs_camera_cfg,
+ [SLAVE_CLK_CTL] = &qhs_clk_ctl,
+ [SLAVE_CDSP_CFG] = &qhs_compute0_cfg,
+ [SLAVE_CDSP1_CFG] = &qhs_compute1_cfg,
+ [SLAVE_RBCPR_CX_CFG] = &qhs_cpr_cx,
+ [SLAVE_RBCPR_MMCX_CFG] = &qhs_cpr_mmcx,
+ [SLAVE_RBCPR_MX_CFG] = &qhs_cpr_mx,
+ [SLAVE_CPR_NSPCX] = &qhs_cpr_nspcx,
+ [SLAVE_CRYPTO_0_CFG] = &qhs_crypto0_cfg,
+ [SLAVE_CX_RDPM] = &qhs_cx_rdpm,
+ [SLAVE_DCC_CFG] = &qhs_dcc_cfg,
+ [SLAVE_DISPLAY_CFG] = &qhs_display0_cfg,
+ [SLAVE_DISPLAY1_CFG] = &qhs_display1_cfg,
+ [SLAVE_EMAC_CFG] = &qhs_emac0_cfg,
+ [SLAVE_EMAC1_CFG] = &qhs_emac1_cfg,
+ [SLAVE_GFX3D_CFG] = &qhs_gpuss_cfg,
+ [SLAVE_HWKM] = &qhs_hwkm,
+ [SLAVE_IMEM_CFG] = &qhs_imem_cfg,
+ [SLAVE_IPA_CFG] = &qhs_ipa,
+ [SLAVE_IPC_ROUTER_CFG] = &qhs_ipc_router,
+ [SLAVE_LPASS] = &qhs_lpass_cfg,
+ [SLAVE_MX_RDPM] = &qhs_mx_rdpm,
+ [SLAVE_MXC_RDPM] = &qhs_mxc_rdpm,
+ [SLAVE_PCIE_0_CFG] = &qhs_pcie0_cfg,
+ [SLAVE_PCIE_1_CFG] = &qhs_pcie1_cfg,
+ [SLAVE_PCIE_2A_CFG] = &qhs_pcie2a_cfg,
+ [SLAVE_PCIE_2B_CFG] = &qhs_pcie2b_cfg,
+ [SLAVE_PCIE_3A_CFG] = &qhs_pcie3a_cfg,
+ [SLAVE_PCIE_3B_CFG] = &qhs_pcie3b_cfg,
+ [SLAVE_PCIE_4_CFG] = &qhs_pcie4_cfg,
+ [SLAVE_PCIE_RSC_CFG] = &qhs_pcie_rsc_cfg,
+ [SLAVE_PDM] = &qhs_pdm,
+ [SLAVE_PIMEM_CFG] = &qhs_pimem_cfg,
+ [SLAVE_PKA_WRAPPER_CFG] = &qhs_pka_wrapper_cfg,
+ [SLAVE_PMU_WRAPPER_CFG] = &qhs_pmu_wrapper_cfg,
+ [SLAVE_QDSS_CFG] = &qhs_qdss_cfg,
+ [SLAVE_QSPI_0] = &qhs_qspi,
+ [SLAVE_QUP_0] = &qhs_qup0,
+ [SLAVE_QUP_1] = &qhs_qup1,
+ [SLAVE_QUP_2] = &qhs_qup2,
+ [SLAVE_SDCC_2] = &qhs_sdc2,
+ [SLAVE_SDCC_4] = &qhs_sdc4,
+ [SLAVE_SECURITY] = &qhs_security,
+ [SLAVE_SMMUV3_CFG] = &qhs_smmuv3_cfg,
+ [SLAVE_SMSS_CFG] = &qhs_smss_cfg,
+ [SLAVE_SPSS_CFG] = &qhs_spss_cfg,
+ [SLAVE_TCSR] = &qhs_tcsr,
+ [SLAVE_TLMM] = &qhs_tlmm,
+ [SLAVE_UFS_CARD_CFG] = &qhs_ufs_card_cfg,
+ [SLAVE_UFS_MEM_CFG] = &qhs_ufs_mem_cfg,
+ [SLAVE_USB3_0] = &qhs_usb3_0,
+ [SLAVE_USB3_1] = &qhs_usb3_1,
+ [SLAVE_USB3_MP] = &qhs_usb3_mp,
+ [SLAVE_USB4_0] = &qhs_usb4_host_0,
+ [SLAVE_USB4_1] = &qhs_usb4_host_1,
+ [SLAVE_VENUS_CFG] = &qhs_venus_cfg,
+ [SLAVE_VSENSE_CTRL_CFG] = &qhs_vsense_ctrl_cfg,
+ [SLAVE_VSENSE_CTRL_R_CFG] = &qhs_vsense_ctrl_r_cfg,
+ [SLAVE_A1NOC_CFG] = &qns_a1_noc_cfg,
+ [SLAVE_A2NOC_CFG] = &qns_a2_noc_cfg,
+ [SLAVE_ANOC_PCIE_BRIDGE_CFG] = &qns_anoc_pcie_bridge_cfg,
+ [SLAVE_DDRSS_CFG] = &qns_ddrss_cfg,
+ [SLAVE_CNOC_MNOC_CFG] = &qns_mnoc_cfg,
+ [SLAVE_SNOC_CFG] = &qns_snoc_cfg,
+ [SLAVE_SNOC_SF_BRIDGE_CFG] = &qns_snoc_sf_bridge_cfg,
+ [SLAVE_IMEM] = &qxs_imem,
+ [SLAVE_PIMEM] = &qxs_pimem,
+ [SLAVE_SERVICE_CNOC] = &srvc_cnoc,
+ [SLAVE_PCIE_0] = &xs_pcie_0,
+ [SLAVE_PCIE_1] = &xs_pcie_1,
+ [SLAVE_PCIE_2A] = &xs_pcie_2a,
+ [SLAVE_PCIE_2B] = &xs_pcie_2b,
+ [SLAVE_PCIE_3A] = &xs_pcie_3a,
+ [SLAVE_PCIE_3B] = &xs_pcie_3b,
+ [SLAVE_PCIE_4] = &xs_pcie_4,
+ [SLAVE_QDSS_STM] = &xs_qdss_stm,
+ [SLAVE_SMSS] = &xs_smss,
+ [SLAVE_TCU] = &xs_sys_tcu_cfg,
+};
+
+static const struct qcom_icc_desc sc8280xp_config_noc = {
+ .nodes = config_noc_nodes,
+ .num_nodes = ARRAY_SIZE(config_noc_nodes),
+ .bcms = config_noc_bcms,
+ .num_bcms = ARRAY_SIZE(config_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const dc_noc_bcms[] = {
+};
+
+static struct qcom_icc_node * const dc_noc_nodes[] = {
+ [MASTER_CNOC_DC_NOC] = &qnm_cnoc_dc_noc,
+ [SLAVE_LLCC_CFG] = &qhs_llcc,
+ [SLAVE_GEM_NOC_CFG] = &qns_gemnoc,
+};
+
+static const struct qcom_icc_desc sc8280xp_dc_noc = {
+ .nodes = dc_noc_nodes,
+ .num_nodes = ARRAY_SIZE(dc_noc_nodes),
+ .bcms = dc_noc_bcms,
+ .num_bcms = ARRAY_SIZE(dc_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const gem_noc_bcms[] = {
+ &bcm_sh0,
+ &bcm_sh2,
+};
+
+static struct qcom_icc_node * const gem_noc_nodes[] = {
+ [MASTER_GPU_TCU] = &alm_gpu_tcu,
+ [MASTER_PCIE_TCU] = &alm_pcie_tcu,
+ [MASTER_SYS_TCU] = &alm_sys_tcu,
+ [MASTER_APPSS_PROC] = &chm_apps,
+ [MASTER_COMPUTE_NOC] = &qnm_cmpnoc0,
+ [MASTER_COMPUTE_NOC_1] = &qnm_cmpnoc1,
+ [MASTER_GEM_NOC_CFG] = &qnm_gemnoc_cfg,
+ [MASTER_GFX3D] = &qnm_gpu,
+ [MASTER_MNOC_HF_MEM_NOC] = &qnm_mnoc_hf,
+ [MASTER_MNOC_SF_MEM_NOC] = &qnm_mnoc_sf,
+ [MASTER_ANOC_PCIE_GEM_NOC] = &qnm_pcie,
+ [MASTER_SNOC_GC_MEM_NOC] = &qnm_snoc_gc,
+ [MASTER_SNOC_SF_MEM_NOC] = &qnm_snoc_sf,
+ [SLAVE_GEM_NOC_CNOC] = &qns_gem_noc_cnoc,
+ [SLAVE_LLCC] = &qns_llcc,
+ [SLAVE_GEM_NOC_PCIE_CNOC] = &qns_pcie,
+ [SLAVE_SERVICE_GEM_NOC_1] = &srvc_even_gemnoc,
+ [SLAVE_SERVICE_GEM_NOC_2] = &srvc_odd_gemnoc,
+ [SLAVE_SERVICE_GEM_NOC] = &srvc_sys_gemnoc,
+};
+
+static const struct qcom_icc_desc sc8280xp_gem_noc = {
+ .nodes = gem_noc_nodes,
+ .num_nodes = ARRAY_SIZE(gem_noc_nodes),
+ .bcms = gem_noc_bcms,
+ .num_bcms = ARRAY_SIZE(gem_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const lpass_ag_noc_bcms[] = {
+ &bcm_sn9,
+};
+
+static struct qcom_icc_node * const lpass_ag_noc_nodes[] = {
+ [MASTER_CNOC_LPASS_AG_NOC] = &qhm_config_noc,
+ [MASTER_LPASS_PROC] = &qxm_lpass_dsp,
+ [SLAVE_LPASS_CORE_CFG] = &qhs_lpass_core,
+ [SLAVE_LPASS_LPI_CFG] = &qhs_lpass_lpi,
+ [SLAVE_LPASS_MPU_CFG] = &qhs_lpass_mpu,
+ [SLAVE_LPASS_TOP_CFG] = &qhs_lpass_top,
+ [SLAVE_LPASS_SNOC] = &qns_sysnoc,
+ [SLAVE_SERVICES_LPASS_AML_NOC] = &srvc_niu_aml_noc,
+ [SLAVE_SERVICE_LPASS_AG_NOC] = &srvc_niu_lpass_agnoc,
+};
+
+static const struct qcom_icc_desc sc8280xp_lpass_ag_noc = {
+ .nodes = lpass_ag_noc_nodes,
+ .num_nodes = ARRAY_SIZE(lpass_ag_noc_nodes),
+ .bcms = lpass_ag_noc_bcms,
+ .num_bcms = ARRAY_SIZE(lpass_ag_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const mc_virt_bcms[] = {
+ &bcm_acv,
+ &bcm_mc0,
+};
+
+static struct qcom_icc_node * const mc_virt_nodes[] = {
+ [MASTER_LLCC] = &llcc_mc,
+ [SLAVE_EBI1] = &ebi,
+};
+
+static const struct qcom_icc_desc sc8280xp_mc_virt = {
+ .nodes = mc_virt_nodes,
+ .num_nodes = ARRAY_SIZE(mc_virt_nodes),
+ .bcms = mc_virt_bcms,
+ .num_bcms = ARRAY_SIZE(mc_virt_bcms),
+};
+
+static struct qcom_icc_bcm * const mmss_noc_bcms[] = {
+ &bcm_mm0,
+ &bcm_mm1,
+};
+
+static struct qcom_icc_node * const mmss_noc_nodes[] = {
+ [MASTER_CAMNOC_HF] = &qnm_camnoc_hf,
+ [MASTER_MDP0] = &qnm_mdp0_0,
+ [MASTER_MDP1] = &qnm_mdp0_1,
+ [MASTER_MDP_CORE1_0] = &qnm_mdp1_0,
+ [MASTER_MDP_CORE1_1] = &qnm_mdp1_1,
+ [MASTER_CNOC_MNOC_CFG] = &qnm_mnoc_cfg,
+ [MASTER_ROTATOR] = &qnm_rot_0,
+ [MASTER_ROTATOR_1] = &qnm_rot_1,
+ [MASTER_VIDEO_P0] = &qnm_video0,
+ [MASTER_VIDEO_P1] = &qnm_video1,
+ [MASTER_VIDEO_PROC] = &qnm_video_cvp,
+ [MASTER_CAMNOC_ICP] = &qxm_camnoc_icp,
+ [MASTER_CAMNOC_SF] = &qxm_camnoc_sf,
+ [SLAVE_MNOC_HF_MEM_NOC] = &qns_mem_noc_hf,
+ [SLAVE_MNOC_SF_MEM_NOC] = &qns_mem_noc_sf,
+ [SLAVE_SERVICE_MNOC] = &srvc_mnoc,
+};
+
+static const struct qcom_icc_desc sc8280xp_mmss_noc = {
+ .nodes = mmss_noc_nodes,
+ .num_nodes = ARRAY_SIZE(mmss_noc_nodes),
+ .bcms = mmss_noc_bcms,
+ .num_bcms = ARRAY_SIZE(mmss_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const nspa_noc_bcms[] = {
+ &bcm_nsa0,
+ &bcm_nsa1,
+};
+
+static struct qcom_icc_node * const nspa_noc_nodes[] = {
+ [MASTER_CDSP_NOC_CFG] = &qhm_nsp_noc_config,
+ [MASTER_CDSP_PROC] = &qxm_nsp,
+ [SLAVE_CDSP_MEM_NOC] = &qns_nsp_gemnoc,
+ [SLAVE_NSP_XFR] = &qxs_nsp_xfr,
+ [SLAVE_SERVICE_NSP_NOC] = &service_nsp_noc,
+};
+
+static const struct qcom_icc_desc sc8280xp_nspa_noc = {
+ .nodes = nspa_noc_nodes,
+ .num_nodes = ARRAY_SIZE(nspa_noc_nodes),
+ .bcms = nspa_noc_bcms,
+ .num_bcms = ARRAY_SIZE(nspa_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const nspb_noc_bcms[] = {
+ &bcm_nsb0,
+ &bcm_nsb1,
+};
+
+static struct qcom_icc_node * const nspb_noc_nodes[] = {
+ [MASTER_CDSPB_NOC_CFG] = &qhm_nspb_noc_config,
+ [MASTER_CDSP_PROC_B] = &qxm_nspb,
+ [SLAVE_CDSPB_MEM_NOC] = &qns_nspb_gemnoc,
+ [SLAVE_NSPB_XFR] = &qxs_nspb_xfr,
+ [SLAVE_SERVICE_NSPB_NOC] = &service_nspb_noc,
+};
+
+static const struct qcom_icc_desc sc8280xp_nspb_noc = {
+ .nodes = nspb_noc_nodes,
+ .num_nodes = ARRAY_SIZE(nspb_noc_nodes),
+ .bcms = nspb_noc_bcms,
+ .num_bcms = ARRAY_SIZE(nspb_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const system_noc_main_bcms[] = {
+ &bcm_sn0,
+ &bcm_sn1,
+ &bcm_sn3,
+ &bcm_sn4,
+ &bcm_sn5,
+ &bcm_sn9,
+};
+
+static struct qcom_icc_node * const system_noc_main_nodes[] = {
+ [MASTER_A1NOC_SNOC] = &qnm_aggre1_noc,
+ [MASTER_A2NOC_SNOC] = &qnm_aggre2_noc,
+ [MASTER_USB_NOC_SNOC] = &qnm_aggre_usb_noc,
+ [MASTER_LPASS_ANOC] = &qnm_lpass_noc,
+ [MASTER_SNOC_CFG] = &qnm_snoc_cfg,
+ [MASTER_PIMEM] = &qxm_pimem,
+ [MASTER_GIC] = &xm_gic,
+ [SLAVE_SNOC_GEM_NOC_GC] = &qns_gemnoc_gc,
+ [SLAVE_SNOC_GEM_NOC_SF] = &qns_gemnoc_sf,
+ [SLAVE_SERVICE_SNOC] = &srvc_snoc,
+};
+
+static const struct qcom_icc_desc sc8280xp_system_noc_main = {
+ .nodes = system_noc_main_nodes,
+ .num_nodes = ARRAY_SIZE(system_noc_main_nodes),
+ .bcms = system_noc_main_bcms,
+ .num_bcms = ARRAY_SIZE(system_noc_main_bcms),
+};
+
+static const struct of_device_id qnoc_of_match[] = {
+ { .compatible = "qcom,sc8280xp-aggre1-noc", .data = &sc8280xp_aggre1_noc, },
+ { .compatible = "qcom,sc8280xp-aggre2-noc", .data = &sc8280xp_aggre2_noc, },
+ { .compatible = "qcom,sc8280xp-clk-virt", .data = &sc8280xp_clk_virt, },
+ { .compatible = "qcom,sc8280xp-config-noc", .data = &sc8280xp_config_noc, },
+ { .compatible = "qcom,sc8280xp-dc-noc", .data = &sc8280xp_dc_noc, },
+ { .compatible = "qcom,sc8280xp-gem-noc", .data = &sc8280xp_gem_noc, },
+ { .compatible = "qcom,sc8280xp-lpass-ag-noc", .data = &sc8280xp_lpass_ag_noc, },
+ { .compatible = "qcom,sc8280xp-mc-virt", .data = &sc8280xp_mc_virt, },
+ { .compatible = "qcom,sc8280xp-mmss-noc", .data = &sc8280xp_mmss_noc, },
+ { .compatible = "qcom,sc8280xp-nspa-noc", .data = &sc8280xp_nspa_noc, },
+ { .compatible = "qcom,sc8280xp-nspb-noc", .data = &sc8280xp_nspb_noc, },
+ { .compatible = "qcom,sc8280xp-system-noc", .data = &sc8280xp_system_noc_main, },
+ { }
+};
+MODULE_DEVICE_TABLE(of, qnoc_of_match);
+
+static struct platform_driver qnoc_driver = {
+ .probe = qcom_icc_rpmh_probe,
+ .remove = qcom_icc_rpmh_remove,
+ .driver = {
+ .name = "qnoc-sc8280xp",
+ .of_match_table = qnoc_of_match,
+ .sync_state = icc_sync_state,
+ },
+};
+
+static int __init qnoc_driver_init(void)
+{
+ return platform_driver_register(&qnoc_driver);
+}
+core_initcall(qnoc_driver_init);
+
+static void __exit qnoc_driver_exit(void)
+{
+ platform_driver_unregister(&qnoc_driver);
+}
+module_exit(qnoc_driver_exit);
+
+MODULE_DESCRIPTION("Qualcomm SC8280XP NoC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/interconnect/qcom/sc8280xp.h b/drivers/interconnect/qcom/sc8280xp.h
new file mode 100644
index 000000000000..74d8fa412d65
--- /dev/null
+++ b/drivers/interconnect/qcom/sc8280xp.h
@@ -0,0 +1,209 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __DRIVERS_INTERCONNECT_QCOM_SC8280XP_H
+#define __DRIVERS_INTERCONNECT_QCOM_SC8280XP_H
+
+#define SC8280XP_MASTER_GPU_TCU 0
+#define SC8280XP_MASTER_PCIE_TCU 1
+#define SC8280XP_MASTER_SYS_TCU 2
+#define SC8280XP_MASTER_APPSS_PROC 3
+#define SC8280XP_MASTER_IPA_CORE 4
+#define SC8280XP_MASTER_LLCC 5
+#define SC8280XP_MASTER_CNOC_LPASS_AG_NOC 6
+#define SC8280XP_MASTER_CDSP_NOC_CFG 7
+#define SC8280XP_MASTER_CDSPB_NOC_CFG 8
+#define SC8280XP_MASTER_QDSS_BAM 9
+#define SC8280XP_MASTER_QSPI_0 10
+#define SC8280XP_MASTER_QUP_0 11
+#define SC8280XP_MASTER_QUP_1 12
+#define SC8280XP_MASTER_QUP_2 13
+#define SC8280XP_MASTER_A1NOC_CFG 14
+#define SC8280XP_MASTER_A2NOC_CFG 15
+#define SC8280XP_MASTER_A1NOC_SNOC 16
+#define SC8280XP_MASTER_A2NOC_SNOC 17
+#define SC8280XP_MASTER_USB_NOC_SNOC 18
+#define SC8280XP_MASTER_CAMNOC_HF 19
+#define SC8280XP_MASTER_COMPUTE_NOC 20
+#define SC8280XP_MASTER_COMPUTE_NOC_1 21
+#define SC8280XP_MASTER_CNOC_DC_NOC 22
+#define SC8280XP_MASTER_GEM_NOC_CFG 23
+#define SC8280XP_MASTER_GEM_NOC_CNOC 24
+#define SC8280XP_MASTER_GEM_NOC_PCIE_SNOC 25
+#define SC8280XP_MASTER_GFX3D 26
+#define SC8280XP_MASTER_LPASS_ANOC 27
+#define SC8280XP_MASTER_MDP0 28
+#define SC8280XP_MASTER_MDP1 29
+#define SC8280XP_MASTER_MDP_CORE1_0 30
+#define SC8280XP_MASTER_MDP_CORE1_1 31
+#define SC8280XP_MASTER_CNOC_MNOC_CFG 32
+#define SC8280XP_MASTER_MNOC_HF_MEM_NOC 33
+#define SC8280XP_MASTER_MNOC_SF_MEM_NOC 34
+#define SC8280XP_MASTER_ANOC_PCIE_GEM_NOC 35
+#define SC8280XP_MASTER_ROTATOR 36
+#define SC8280XP_MASTER_ROTATOR_1 37
+#define SC8280XP_MASTER_SNOC_CFG 38
+#define SC8280XP_MASTER_SNOC_GC_MEM_NOC 39
+#define SC8280XP_MASTER_SNOC_SF_MEM_NOC 40
+#define SC8280XP_MASTER_VIDEO_P0 41
+#define SC8280XP_MASTER_VIDEO_P1 42
+#define SC8280XP_MASTER_VIDEO_PROC 43
+#define SC8280XP_MASTER_QUP_CORE_0 44
+#define SC8280XP_MASTER_QUP_CORE_1 45
+#define SC8280XP_MASTER_QUP_CORE_2 46
+#define SC8280XP_MASTER_CAMNOC_ICP 47
+#define SC8280XP_MASTER_CAMNOC_SF 48
+#define SC8280XP_MASTER_CRYPTO 49
+#define SC8280XP_MASTER_IPA 50
+#define SC8280XP_MASTER_LPASS_PROC 51
+#define SC8280XP_MASTER_CDSP_PROC 52
+#define SC8280XP_MASTER_CDSP_PROC_B 53
+#define SC8280XP_MASTER_PIMEM 54
+#define SC8280XP_MASTER_SENSORS_PROC 55
+#define SC8280XP_MASTER_SP 56
+#define SC8280XP_MASTER_EMAC 57
+#define SC8280XP_MASTER_EMAC_1 58
+#define SC8280XP_MASTER_GIC 59
+#define SC8280XP_MASTER_PCIE_0 60
+#define SC8280XP_MASTER_PCIE_1 61
+#define SC8280XP_MASTER_PCIE_2A 62
+#define SC8280XP_MASTER_PCIE_2B 63
+#define SC8280XP_MASTER_PCIE_3A 64
+#define SC8280XP_MASTER_PCIE_3B 65
+#define SC8280XP_MASTER_PCIE_4 66
+#define SC8280XP_MASTER_QDSS_ETR 67
+#define SC8280XP_MASTER_SDCC_2 68
+#define SC8280XP_MASTER_SDCC_4 69
+#define SC8280XP_MASTER_UFS_CARD 70
+#define SC8280XP_MASTER_UFS_MEM 71
+#define SC8280XP_MASTER_USB3_0 72
+#define SC8280XP_MASTER_USB3_1 73
+#define SC8280XP_MASTER_USB3_MP 74
+#define SC8280XP_MASTER_USB4_0 75
+#define SC8280XP_MASTER_USB4_1 76
+#define SC8280XP_SLAVE_EBI1 512
+#define SC8280XP_SLAVE_IPA_CORE 513
+#define SC8280XP_SLAVE_AHB2PHY_0 514
+#define SC8280XP_SLAVE_AHB2PHY_1 515
+#define SC8280XP_SLAVE_AHB2PHY_2 516
+#define SC8280XP_SLAVE_AOSS 517
+#define SC8280XP_SLAVE_APPSS 518
+#define SC8280XP_SLAVE_CAMERA_CFG 519
+#define SC8280XP_SLAVE_CLK_CTL 520
+#define SC8280XP_SLAVE_CDSP_CFG 521
+#define SC8280XP_SLAVE_CDSP1_CFG 522
+#define SC8280XP_SLAVE_RBCPR_CX_CFG 523
+#define SC8280XP_SLAVE_RBCPR_MMCX_CFG 524
+#define SC8280XP_SLAVE_RBCPR_MX_CFG 525
+#define SC8280XP_SLAVE_CPR_NSPCX 526
+#define SC8280XP_SLAVE_CRYPTO_0_CFG 527
+#define SC8280XP_SLAVE_CX_RDPM 528
+#define SC8280XP_SLAVE_DCC_CFG 529
+#define SC8280XP_SLAVE_DISPLAY_CFG 530
+#define SC8280XP_SLAVE_DISPLAY1_CFG 531
+#define SC8280XP_SLAVE_EMAC_CFG 532
+#define SC8280XP_SLAVE_EMAC1_CFG 533
+#define SC8280XP_SLAVE_GFX3D_CFG 534
+#define SC8280XP_SLAVE_HWKM 535
+#define SC8280XP_SLAVE_IMEM_CFG 536
+#define SC8280XP_SLAVE_IPA_CFG 537
+#define SC8280XP_SLAVE_IPC_ROUTER_CFG 538
+#define SC8280XP_SLAVE_LLCC_CFG 539
+#define SC8280XP_SLAVE_LPASS 540
+#define SC8280XP_SLAVE_LPASS_CORE_CFG 541
+#define SC8280XP_SLAVE_LPASS_LPI_CFG 542
+#define SC8280XP_SLAVE_LPASS_MPU_CFG 543
+#define SC8280XP_SLAVE_LPASS_TOP_CFG 544
+#define SC8280XP_SLAVE_MX_RDPM 545
+#define SC8280XP_SLAVE_MXC_RDPM 546
+#define SC8280XP_SLAVE_PCIE_0_CFG 547
+#define SC8280XP_SLAVE_PCIE_1_CFG 548
+#define SC8280XP_SLAVE_PCIE_2A_CFG 549
+#define SC8280XP_SLAVE_PCIE_2B_CFG 550
+#define SC8280XP_SLAVE_PCIE_3A_CFG 551
+#define SC8280XP_SLAVE_PCIE_3B_CFG 552
+#define SC8280XP_SLAVE_PCIE_4_CFG 553
+#define SC8280XP_SLAVE_PCIE_RSC_CFG 554
+#define SC8280XP_SLAVE_PDM 555
+#define SC8280XP_SLAVE_PIMEM_CFG 556
+#define SC8280XP_SLAVE_PKA_WRAPPER_CFG 557
+#define SC8280XP_SLAVE_PMU_WRAPPER_CFG 558
+#define SC8280XP_SLAVE_QDSS_CFG 559
+#define SC8280XP_SLAVE_QSPI_0 560
+#define SC8280XP_SLAVE_QUP_0 561
+#define SC8280XP_SLAVE_QUP_1 562
+#define SC8280XP_SLAVE_QUP_2 563
+#define SC8280XP_SLAVE_SDCC_2 564
+#define SC8280XP_SLAVE_SDCC_4 565
+#define SC8280XP_SLAVE_SECURITY 566
+#define SC8280XP_SLAVE_SMMUV3_CFG 567
+#define SC8280XP_SLAVE_SMSS_CFG 568
+#define SC8280XP_SLAVE_SPSS_CFG 569
+#define SC8280XP_SLAVE_TCSR 570
+#define SC8280XP_SLAVE_TLMM 571
+#define SC8280XP_SLAVE_UFS_CARD_CFG 572
+#define SC8280XP_SLAVE_UFS_MEM_CFG 573
+#define SC8280XP_SLAVE_USB3_0 574
+#define SC8280XP_SLAVE_USB3_1 575
+#define SC8280XP_SLAVE_USB3_MP 576
+#define SC8280XP_SLAVE_USB4_0 577
+#define SC8280XP_SLAVE_USB4_1 578
+#define SC8280XP_SLAVE_VENUS_CFG 579
+#define SC8280XP_SLAVE_VSENSE_CTRL_CFG 580
+#define SC8280XP_SLAVE_VSENSE_CTRL_R_CFG 581
+#define SC8280XP_SLAVE_A1NOC_CFG 582
+#define SC8280XP_SLAVE_A1NOC_SNOC 583
+#define SC8280XP_SLAVE_A2NOC_CFG 584
+#define SC8280XP_SLAVE_A2NOC_SNOC 585
+#define SC8280XP_SLAVE_USB_NOC_SNOC 586
+#define SC8280XP_SLAVE_ANOC_PCIE_BRIDGE_CFG 587
+#define SC8280XP_SLAVE_DDRSS_CFG 588
+#define SC8280XP_SLAVE_GEM_NOC_CNOC 589
+#define SC8280XP_SLAVE_GEM_NOC_CFG 590
+#define SC8280XP_SLAVE_SNOC_GEM_NOC_GC 591
+#define SC8280XP_SLAVE_SNOC_GEM_NOC_SF 592
+#define SC8280XP_SLAVE_LLCC 593
+#define SC8280XP_SLAVE_MNOC_HF_MEM_NOC 594
+#define SC8280XP_SLAVE_MNOC_SF_MEM_NOC 595
+#define SC8280XP_SLAVE_CNOC_MNOC_CFG 596
+#define SC8280XP_SLAVE_CDSP_MEM_NOC 597
+#define SC8280XP_SLAVE_CDSPB_MEM_NOC 598
+#define SC8280XP_SLAVE_GEM_NOC_PCIE_CNOC 599
+#define SC8280XP_SLAVE_ANOC_PCIE_GEM_NOC 600
+#define SC8280XP_SLAVE_SNOC_CFG 601
+#define SC8280XP_SLAVE_SNOC_SF_BRIDGE_CFG 602
+#define SC8280XP_SLAVE_LPASS_SNOC 603
+#define SC8280XP_SLAVE_QUP_CORE_0 604
+#define SC8280XP_SLAVE_QUP_CORE_1 605
+#define SC8280XP_SLAVE_QUP_CORE_2 606
+#define SC8280XP_SLAVE_IMEM 607
+#define SC8280XP_SLAVE_NSP_XFR 608
+#define SC8280XP_SLAVE_NSPB_XFR 609
+#define SC8280XP_SLAVE_PIMEM 610
+#define SC8280XP_SLAVE_SERVICE_NSP_NOC 611
+#define SC8280XP_SLAVE_SERVICE_NSPB_NOC 612
+#define SC8280XP_SLAVE_SERVICE_A1NOC 613
+#define SC8280XP_SLAVE_SERVICE_A2NOC 614
+#define SC8280XP_SLAVE_SERVICE_CNOC 615
+#define SC8280XP_SLAVE_SERVICE_GEM_NOC_1 616
+#define SC8280XP_SLAVE_SERVICE_MNOC 617
+#define SC8280XP_SLAVE_SERVICES_LPASS_AML_NOC 618
+#define SC8280XP_SLAVE_SERVICE_LPASS_AG_NOC 619
+#define SC8280XP_SLAVE_SERVICE_GEM_NOC_2 620
+#define SC8280XP_SLAVE_SERVICE_SNOC 621
+#define SC8280XP_SLAVE_SERVICE_GEM_NOC 622
+#define SC8280XP_SLAVE_PCIE_0 623
+#define SC8280XP_SLAVE_PCIE_1 624
+#define SC8280XP_SLAVE_PCIE_2A 625
+#define SC8280XP_SLAVE_PCIE_2B 626
+#define SC8280XP_SLAVE_PCIE_3A 627
+#define SC8280XP_SLAVE_PCIE_3B 628
+#define SC8280XP_SLAVE_PCIE_4 629
+#define SC8280XP_SLAVE_QDSS_STM 630
+#define SC8280XP_SLAVE_SMSS 631
+#define SC8280XP_SLAVE_TCU 632
+
+#endif
+
diff --git a/drivers/interconnect/qcom/sdm660.c b/drivers/interconnect/qcom/sdm660.c
index 274a7139fe1a..8d879b0bcabc 100644
--- a/drivers/interconnect/qcom/sdm660.c
+++ b/drivers/interconnect/qcom/sdm660.c
@@ -1490,7 +1490,7 @@ static struct qcom_icc_node slv_srvc_snoc = {
.slv_rpm_id = 29,
};
-static struct qcom_icc_node *sdm660_a2noc_nodes[] = {
+static struct qcom_icc_node * const sdm660_a2noc_nodes[] = {
[MASTER_IPA] = &mas_ipa,
[MASTER_CNOC_A2NOC] = &mas_cnoc_a2noc,
[MASTER_SDCC_1] = &mas_sdcc_1,
@@ -1512,7 +1512,7 @@ static const struct regmap_config sdm660_a2noc_regmap_config = {
.fast_io = true,
};
-static struct qcom_icc_desc sdm660_a2noc = {
+static const struct qcom_icc_desc sdm660_a2noc = {
.type = QCOM_ICC_NOC,
.nodes = sdm660_a2noc_nodes,
.num_nodes = ARRAY_SIZE(sdm660_a2noc_nodes),
@@ -1521,7 +1521,7 @@ static struct qcom_icc_desc sdm660_a2noc = {
.regmap_cfg = &sdm660_a2noc_regmap_config,
};
-static struct qcom_icc_node *sdm660_bimc_nodes[] = {
+static struct qcom_icc_node * const sdm660_bimc_nodes[] = {
[MASTER_GNOC_BIMC] = &mas_gnoc_bimc,
[MASTER_OXILI] = &mas_oxili,
[MASTER_MNOC_BIMC] = &mas_mnoc_bimc,
@@ -1540,14 +1540,14 @@ static const struct regmap_config sdm660_bimc_regmap_config = {
.fast_io = true,
};
-static struct qcom_icc_desc sdm660_bimc = {
+static const struct qcom_icc_desc sdm660_bimc = {
.type = QCOM_ICC_BIMC,
.nodes = sdm660_bimc_nodes,
.num_nodes = ARRAY_SIZE(sdm660_bimc_nodes),
.regmap_cfg = &sdm660_bimc_regmap_config,
};
-static struct qcom_icc_node *sdm660_cnoc_nodes[] = {
+static struct qcom_icc_node * const sdm660_cnoc_nodes[] = {
[MASTER_SNOC_CNOC] = &mas_snoc_cnoc,
[MASTER_QDSS_DAP] = &mas_qdss_dap,
[SLAVE_CNOC_A2NOC] = &slv_cnoc_a2noc,
@@ -1594,14 +1594,14 @@ static const struct regmap_config sdm660_cnoc_regmap_config = {
.fast_io = true,
};
-static struct qcom_icc_desc sdm660_cnoc = {
+static const struct qcom_icc_desc sdm660_cnoc = {
.type = QCOM_ICC_NOC,
.nodes = sdm660_cnoc_nodes,
.num_nodes = ARRAY_SIZE(sdm660_cnoc_nodes),
.regmap_cfg = &sdm660_cnoc_regmap_config,
};
-static struct qcom_icc_node *sdm660_gnoc_nodes[] = {
+static struct qcom_icc_node * const sdm660_gnoc_nodes[] = {
[MASTER_APSS_PROC] = &mas_apss_proc,
[SLAVE_GNOC_BIMC] = &slv_gnoc_bimc,
[SLAVE_GNOC_SNOC] = &slv_gnoc_snoc,
@@ -1615,14 +1615,14 @@ static const struct regmap_config sdm660_gnoc_regmap_config = {
.fast_io = true,
};
-static struct qcom_icc_desc sdm660_gnoc = {
+static const struct qcom_icc_desc sdm660_gnoc = {
.type = QCOM_ICC_NOC,
.nodes = sdm660_gnoc_nodes,
.num_nodes = ARRAY_SIZE(sdm660_gnoc_nodes),
.regmap_cfg = &sdm660_gnoc_regmap_config,
};
-static struct qcom_icc_node *sdm660_mnoc_nodes[] = {
+static struct qcom_icc_node * const sdm660_mnoc_nodes[] = {
[MASTER_CPP] = &mas_cpp,
[MASTER_JPEG] = &mas_jpeg,
[MASTER_MDP_P0] = &mas_mdp_p0,
@@ -1655,7 +1655,7 @@ static const struct regmap_config sdm660_mnoc_regmap_config = {
.fast_io = true,
};
-static struct qcom_icc_desc sdm660_mnoc = {
+static const struct qcom_icc_desc sdm660_mnoc = {
.type = QCOM_ICC_NOC,
.nodes = sdm660_mnoc_nodes,
.num_nodes = ARRAY_SIZE(sdm660_mnoc_nodes),
@@ -1664,7 +1664,7 @@ static struct qcom_icc_desc sdm660_mnoc = {
.regmap_cfg = &sdm660_mnoc_regmap_config,
};
-static struct qcom_icc_node *sdm660_snoc_nodes[] = {
+static struct qcom_icc_node * const sdm660_snoc_nodes[] = {
[MASTER_QDSS_ETR] = &mas_qdss_etr,
[MASTER_QDSS_BAM] = &mas_qdss_bam,
[MASTER_SNOC_CFG] = &mas_snoc_cfg,
@@ -1692,7 +1692,7 @@ static const struct regmap_config sdm660_snoc_regmap_config = {
.fast_io = true,
};
-static struct qcom_icc_desc sdm660_snoc = {
+static const struct qcom_icc_desc sdm660_snoc = {
.type = QCOM_ICC_NOC,
.nodes = sdm660_snoc_nodes,
.num_nodes = ARRAY_SIZE(sdm660_snoc_nodes),
diff --git a/drivers/interconnect/qcom/sdm845.c b/drivers/interconnect/qcom/sdm845.c
index d2195079c228..954e7bd13fc4 100644
--- a/drivers/interconnect/qcom/sdm845.c
+++ b/drivers/interconnect/qcom/sdm845.c
@@ -175,12 +175,12 @@ DEFINE_QBCM(bcm_sn12, "SN12", false, &qnm_gladiator_sodv, &xm_gic);
DEFINE_QBCM(bcm_sn14, "SN14", false, &qnm_pcie_anoc);
DEFINE_QBCM(bcm_sn15, "SN15", false, &qnm_memnoc);
-static struct qcom_icc_bcm *aggre1_noc_bcms[] = {
+static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
&bcm_sn9,
&bcm_qup0,
};
-static struct qcom_icc_node *aggre1_noc_nodes[] = {
+static struct qcom_icc_node * const aggre1_noc_nodes[] = {
[MASTER_A1NOC_CFG] = &qhm_a1noc_cfg,
[MASTER_TSIF] = &qhm_tsif,
[MASTER_SDCC_2] = &xm_sdc2,
@@ -201,13 +201,13 @@ static const struct qcom_icc_desc sdm845_aggre1_noc = {
.num_bcms = ARRAY_SIZE(aggre1_noc_bcms),
};
-static struct qcom_icc_bcm *aggre2_noc_bcms[] = {
+static struct qcom_icc_bcm * const aggre2_noc_bcms[] = {
&bcm_ce0,
&bcm_sn11,
&bcm_qup0,
};
-static struct qcom_icc_node *aggre2_noc_nodes[] = {
+static struct qcom_icc_node * const aggre2_noc_nodes[] = {
[MASTER_A2NOC_CFG] = &qhm_a2noc_cfg,
[MASTER_QDSS_BAM] = &qhm_qdss_bam,
[MASTER_CNOC_A2NOC] = &qnm_cnoc,
@@ -230,11 +230,11 @@ static const struct qcom_icc_desc sdm845_aggre2_noc = {
.num_bcms = ARRAY_SIZE(aggre2_noc_bcms),
};
-static struct qcom_icc_bcm *config_noc_bcms[] = {
+static struct qcom_icc_bcm * const config_noc_bcms[] = {
&bcm_cn0,
};
-static struct qcom_icc_node *config_noc_nodes[] = {
+static struct qcom_icc_node * const config_noc_nodes[] = {
[MASTER_SPDM] = &qhm_spdm,
[MASTER_TIC] = &qhm_tic,
[MASTER_SNOC_CNOC] = &qnm_snoc,
@@ -291,10 +291,10 @@ static const struct qcom_icc_desc sdm845_config_noc = {
.num_bcms = ARRAY_SIZE(config_noc_bcms),
};
-static struct qcom_icc_bcm *dc_noc_bcms[] = {
+static struct qcom_icc_bcm * const dc_noc_bcms[] = {
};
-static struct qcom_icc_node *dc_noc_nodes[] = {
+static struct qcom_icc_node * const dc_noc_nodes[] = {
[MASTER_CNOC_DC_NOC] = &qhm_cnoc,
[SLAVE_LLCC_CFG] = &qhs_llcc,
[SLAVE_MEM_NOC_CFG] = &qhs_memnoc,
@@ -307,10 +307,10 @@ static const struct qcom_icc_desc sdm845_dc_noc = {
.num_bcms = ARRAY_SIZE(dc_noc_bcms),
};
-static struct qcom_icc_bcm *gladiator_noc_bcms[] = {
+static struct qcom_icc_bcm * const gladiator_noc_bcms[] = {
};
-static struct qcom_icc_node *gladiator_noc_nodes[] = {
+static struct qcom_icc_node * const gladiator_noc_nodes[] = {
[MASTER_APPSS_PROC] = &acm_l3,
[MASTER_GNOC_CFG] = &pm_gnoc_cfg,
[SLAVE_GNOC_SNOC] = &qns_gladiator_sodv,
@@ -325,7 +325,7 @@ static const struct qcom_icc_desc sdm845_gladiator_noc = {
.num_bcms = ARRAY_SIZE(gladiator_noc_bcms),
};
-static struct qcom_icc_bcm *mem_noc_bcms[] = {
+static struct qcom_icc_bcm * const mem_noc_bcms[] = {
&bcm_mc0,
&bcm_acv,
&bcm_sh0,
@@ -335,7 +335,7 @@ static struct qcom_icc_bcm *mem_noc_bcms[] = {
&bcm_sh5,
};
-static struct qcom_icc_node *mem_noc_nodes[] = {
+static struct qcom_icc_node * const mem_noc_nodes[] = {
[MASTER_TCU_0] = &acm_tcu,
[MASTER_MEM_NOC_CFG] = &qhm_memnoc_cfg,
[MASTER_GNOC_MEM_NOC] = &qnm_apps,
@@ -360,14 +360,14 @@ static const struct qcom_icc_desc sdm845_mem_noc = {
.num_bcms = ARRAY_SIZE(mem_noc_bcms),
};
-static struct qcom_icc_bcm *mmss_noc_bcms[] = {
+static struct qcom_icc_bcm * const mmss_noc_bcms[] = {
&bcm_mm0,
&bcm_mm1,
&bcm_mm2,
&bcm_mm3,
};
-static struct qcom_icc_node *mmss_noc_nodes[] = {
+static struct qcom_icc_node * const mmss_noc_nodes[] = {
[MASTER_CNOC_MNOC_CFG] = &qhm_mnoc_cfg,
[MASTER_CAMNOC_HF0] = &qxm_camnoc_hf0,
[MASTER_CAMNOC_HF1] = &qxm_camnoc_hf1,
@@ -394,7 +394,7 @@ static const struct qcom_icc_desc sdm845_mmss_noc = {
.num_bcms = ARRAY_SIZE(mmss_noc_bcms),
};
-static struct qcom_icc_bcm *system_noc_bcms[] = {
+static struct qcom_icc_bcm * const system_noc_bcms[] = {
&bcm_sn0,
&bcm_sn1,
&bcm_sn2,
@@ -411,7 +411,7 @@ static struct qcom_icc_bcm *system_noc_bcms[] = {
&bcm_sn15,
};
-static struct qcom_icc_node *system_noc_nodes[] = {
+static struct qcom_icc_node * const system_noc_nodes[] = {
[MASTER_SNOC_CFG] = &qhm_snoc_cfg,
[MASTER_A1NOC_SNOC] = &qnm_aggre1_noc,
[MASTER_A2NOC_SNOC] = &qnm_aggre2_noc,
diff --git a/drivers/interconnect/qcom/sdx55.c b/drivers/interconnect/qcom/sdx55.c
index e3ac25a997b7..130a828c3873 100644
--- a/drivers/interconnect/qcom/sdx55.c
+++ b/drivers/interconnect/qcom/sdx55.c
@@ -99,11 +99,11 @@ DEFINE_QBCM(bcm_sn9, "SN9", false, &qnm_memnoc);
DEFINE_QBCM(bcm_sn10, "SN10", false, &qnm_memnoc_pcie);
DEFINE_QBCM(bcm_sn11, "SN11", false, &qnm_ipa, &xm_ipa2pcie_slv);
-static struct qcom_icc_bcm *mc_virt_bcms[] = {
+static struct qcom_icc_bcm * const mc_virt_bcms[] = {
&bcm_mc0,
};
-static struct qcom_icc_node *mc_virt_nodes[] = {
+static struct qcom_icc_node * const mc_virt_nodes[] = {
[MASTER_LLCC] = &llcc_mc,
[SLAVE_EBI_CH0] = &ebi,
};
@@ -115,13 +115,13 @@ static const struct qcom_icc_desc sdx55_mc_virt = {
.num_bcms = ARRAY_SIZE(mc_virt_bcms),
};
-static struct qcom_icc_bcm *mem_noc_bcms[] = {
+static struct qcom_icc_bcm * const mem_noc_bcms[] = {
&bcm_sh0,
&bcm_sh3,
&bcm_sh4,
};
-static struct qcom_icc_node *mem_noc_nodes[] = {
+static struct qcom_icc_node * const mem_noc_nodes[] = {
[MASTER_TCU_0] = &acm_tcu,
[MASTER_SNOC_GC_MEM_NOC] = &qnm_snoc_gc,
[MASTER_AMPSS_M0] = &xm_apps_rdwr,
@@ -137,7 +137,7 @@ static const struct qcom_icc_desc sdx55_mem_noc = {
.num_bcms = ARRAY_SIZE(mem_noc_bcms),
};
-static struct qcom_icc_bcm *system_noc_bcms[] = {
+static struct qcom_icc_bcm * const system_noc_bcms[] = {
&bcm_ce0,
&bcm_pn0,
&bcm_pn1,
@@ -156,7 +156,7 @@ static struct qcom_icc_bcm *system_noc_bcms[] = {
&bcm_sn11,
};
-static struct qcom_icc_node *system_noc_nodes[] = {
+static struct qcom_icc_node * const system_noc_nodes[] = {
[MASTER_AUDIO] = &qhm_audio,
[MASTER_BLSP_1] = &qhm_blsp1,
[MASTER_QDSS_BAM] = &qhm_qdss_bam,
diff --git a/drivers/interconnect/qcom/sdx65.c b/drivers/interconnect/qcom/sdx65.c
new file mode 100644
index 000000000000..b16d31d53e9b
--- /dev/null
+++ b/drivers/interconnect/qcom/sdx65.c
@@ -0,0 +1,231 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/device.h>
+#include <linux/interconnect.h>
+#include <linux/interconnect-provider.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <dt-bindings/interconnect/qcom,sdx65.h>
+
+#include "bcm-voter.h"
+#include "icc-rpmh.h"
+#include "sdx65.h"
+
+DEFINE_QNODE(llcc_mc, SDX65_MASTER_LLCC, 1, 4, SDX65_SLAVE_EBI1);
+DEFINE_QNODE(acm_tcu, SDX65_MASTER_TCU_0, 1, 8, SDX65_SLAVE_LLCC, SDX65_SLAVE_MEM_NOC_SNOC, SDX65_SLAVE_MEM_NOC_PCIE_SNOC);
+DEFINE_QNODE(qnm_snoc_gc, SDX65_MASTER_SNOC_GC_MEM_NOC, 1, 16, SDX65_SLAVE_LLCC);
+DEFINE_QNODE(xm_apps_rdwr, SDX65_MASTER_APPSS_PROC, 1, 16, SDX65_SLAVE_LLCC, SDX65_SLAVE_MEM_NOC_SNOC, SDX65_SLAVE_MEM_NOC_PCIE_SNOC);
+DEFINE_QNODE(qhm_audio, SDX65_MASTER_AUDIO, 1, 4, SDX65_SLAVE_ANOC_SNOC);
+DEFINE_QNODE(qhm_blsp1, SDX65_MASTER_BLSP_1, 1, 4, SDX65_SLAVE_ANOC_SNOC);
+DEFINE_QNODE(qhm_qdss_bam, SDX65_MASTER_QDSS_BAM, 1, 4, SDX65_SLAVE_AOSS, SDX65_SLAVE_AUDIO, SDX65_SLAVE_BLSP_1, SDX65_SLAVE_CLK_CTL, SDX65_SLAVE_CRYPTO_0_CFG, SDX65_SLAVE_CNOC_DDRSS, SDX65_SLAVE_ECC_CFG, SDX65_SLAVE_IMEM_CFG, SDX65_SLAVE_IPA_CFG, SDX65_SLAVE_CNOC_MSS, SDX65_SLAVE_PCIE_PARF, SDX65_SLAVE_PDM, SDX65_SLAVE_PRNG, SDX65_SLAVE_QDSS_CFG, SDX65_SLAVE_QPIC, SDX65_SLAVE_SDCC_1, SDX65_SLAVE_SNOC_CFG, SDX65_SLAVE_SPMI_FETCHER, SDX65_SLAVE_SPMI_VGI_COEX, SDX65_SLAVE_TCSR, SDX65_SLAVE_TLMM, SDX65_SLAVE_USB3, SDX65_SLAVE_USB3_PHY_CFG, SDX65_SLAVE_SNOC_MEM_NOC_GC, SDX65_SLAVE_IMEM, SDX65_SLAVE_TCU);
+DEFINE_QNODE(qhm_qpic, SDX65_MASTER_QPIC, 1, 4, SDX65_SLAVE_AOSS, SDX65_SLAVE_AUDIO, SDX65_SLAVE_IPA_CFG, SDX65_SLAVE_ANOC_SNOC);
+DEFINE_QNODE(qhm_snoc_cfg, SDX65_MASTER_SNOC_CFG, 1, 4, SDX65_SLAVE_SERVICE_SNOC);
+DEFINE_QNODE(qhm_spmi_fetcher1, SDX65_MASTER_SPMI_FETCHER, 1, 4, SDX65_SLAVE_AOSS, SDX65_SLAVE_ANOC_SNOC);
+DEFINE_QNODE(qnm_aggre_noc, SDX65_MASTER_ANOC_SNOC, 1, 8, SDX65_SLAVE_AOSS, SDX65_SLAVE_APPSS, SDX65_SLAVE_AUDIO, SDX65_SLAVE_BLSP_1, SDX65_SLAVE_CLK_CTL, SDX65_SLAVE_CRYPTO_0_CFG, SDX65_SLAVE_CNOC_DDRSS, SDX65_SLAVE_ECC_CFG, SDX65_SLAVE_IMEM_CFG, SDX65_SLAVE_IPA_CFG, SDX65_SLAVE_CNOC_MSS, SDX65_SLAVE_PCIE_PARF, SDX65_SLAVE_PDM, SDX65_SLAVE_PRNG, SDX65_SLAVE_QDSS_CFG, SDX65_SLAVE_QPIC, SDX65_SLAVE_SDCC_1, SDX65_SLAVE_SNOC_CFG, SDX65_SLAVE_SPMI_FETCHER, SDX65_SLAVE_SPMI_VGI_COEX, SDX65_SLAVE_TCSR, SDX65_SLAVE_TLMM, SDX65_SLAVE_USB3, SDX65_SLAVE_USB3_PHY_CFG, SDX65_SLAVE_SNOC_MEM_NOC_GC, SDX65_SLAVE_IMEM, SDX65_SLAVE_PCIE_0, SDX65_SLAVE_QDSS_STM, SDX65_SLAVE_TCU);
+DEFINE_QNODE(qnm_ipa, SDX65_MASTER_IPA, 1, 8, SDX65_SLAVE_AOSS, SDX65_SLAVE_AUDIO, SDX65_SLAVE_BLSP_1, SDX65_SLAVE_CLK_CTL, SDX65_SLAVE_CRYPTO_0_CFG, SDX65_SLAVE_CNOC_DDRSS, SDX65_SLAVE_ECC_CFG, SDX65_SLAVE_IMEM_CFG, SDX65_SLAVE_IPA_CFG, SDX65_SLAVE_CNOC_MSS, SDX65_SLAVE_PCIE_PARF, SDX65_SLAVE_PDM, SDX65_SLAVE_PRNG, SDX65_SLAVE_QDSS_CFG, SDX65_SLAVE_QPIC, SDX65_SLAVE_SDCC_1, SDX65_SLAVE_SNOC_CFG, SDX65_SLAVE_SPMI_FETCHER, SDX65_SLAVE_TCSR, SDX65_SLAVE_TLMM, SDX65_SLAVE_USB3, SDX65_SLAVE_USB3_PHY_CFG, SDX65_SLAVE_SNOC_MEM_NOC_GC, SDX65_SLAVE_IMEM, SDX65_SLAVE_PCIE_0, SDX65_SLAVE_QDSS_STM);
+DEFINE_QNODE(qnm_memnoc, SDX65_MASTER_MEM_NOC_SNOC, 1, 8, SDX65_SLAVE_AOSS, SDX65_SLAVE_APPSS, SDX65_SLAVE_AUDIO, SDX65_SLAVE_BLSP_1, SDX65_SLAVE_CLK_CTL, SDX65_SLAVE_CRYPTO_0_CFG, SDX65_SLAVE_CNOC_DDRSS, SDX65_SLAVE_ECC_CFG, SDX65_SLAVE_IMEM_CFG, SDX65_SLAVE_IPA_CFG, SDX65_SLAVE_CNOC_MSS, SDX65_SLAVE_PCIE_PARF, SDX65_SLAVE_PDM, SDX65_SLAVE_PRNG, SDX65_SLAVE_QDSS_CFG, SDX65_SLAVE_QPIC, SDX65_SLAVE_SDCC_1, SDX65_SLAVE_SNOC_CFG, SDX65_SLAVE_SPMI_FETCHER, SDX65_SLAVE_SPMI_VGI_COEX, SDX65_SLAVE_TCSR, SDX65_SLAVE_TLMM, SDX65_SLAVE_USB3, SDX65_SLAVE_USB3_PHY_CFG, SDX65_SLAVE_IMEM, SDX65_SLAVE_QDSS_STM, SDX65_SLAVE_TCU);
+DEFINE_QNODE(qnm_memnoc_pcie, SDX65_MASTER_MEM_NOC_PCIE_SNOC, 1, 8, SDX65_SLAVE_PCIE_0);
+DEFINE_QNODE(qxm_crypto, SDX65_MASTER_CRYPTO, 1, 8, SDX65_SLAVE_AOSS, SDX65_SLAVE_ANOC_SNOC);
+DEFINE_QNODE(xm_ipa2pcie_slv, SDX65_MASTER_IPA_PCIE, 1, 8, SDX65_SLAVE_PCIE_0);
+DEFINE_QNODE(xm_pcie, SDX65_MASTER_PCIE_0, 1, 8, SDX65_SLAVE_ANOC_SNOC);
+DEFINE_QNODE(xm_qdss_etr, SDX65_MASTER_QDSS_ETR, 1, 8, SDX65_SLAVE_AOSS, SDX65_SLAVE_AUDIO, SDX65_SLAVE_BLSP_1, SDX65_SLAVE_CLK_CTL, SDX65_SLAVE_CRYPTO_0_CFG, SDX65_SLAVE_CNOC_DDRSS, SDX65_SLAVE_ECC_CFG, SDX65_SLAVE_IMEM_CFG, SDX65_SLAVE_IPA_CFG, SDX65_SLAVE_CNOC_MSS, SDX65_SLAVE_PCIE_PARF, SDX65_SLAVE_PDM, SDX65_SLAVE_PRNG, SDX65_SLAVE_QDSS_CFG, SDX65_SLAVE_QPIC, SDX65_SLAVE_SDCC_1, SDX65_SLAVE_SNOC_CFG, SDX65_SLAVE_SPMI_FETCHER, SDX65_SLAVE_SPMI_VGI_COEX, SDX65_SLAVE_TCSR, SDX65_SLAVE_TLMM, SDX65_SLAVE_USB3, SDX65_SLAVE_USB3_PHY_CFG, SDX65_SLAVE_SNOC_MEM_NOC_GC, SDX65_SLAVE_IMEM, SDX65_SLAVE_TCU);
+DEFINE_QNODE(xm_sdc1, SDX65_MASTER_SDCC_1, 1, 8, SDX65_SLAVE_AOSS, SDX65_SLAVE_AUDIO, SDX65_SLAVE_IPA_CFG, SDX65_SLAVE_ANOC_SNOC);
+DEFINE_QNODE(xm_usb3, SDX65_MASTER_USB3, 1, 8, SDX65_SLAVE_ANOC_SNOC);
+DEFINE_QNODE(ebi, SDX65_SLAVE_EBI1, 1, 4);
+DEFINE_QNODE(qns_llcc, SDX65_SLAVE_LLCC, 1, 16, SDX65_MASTER_LLCC);
+DEFINE_QNODE(qns_memnoc_snoc, SDX65_SLAVE_MEM_NOC_SNOC, 1, 8, SDX65_MASTER_MEM_NOC_SNOC);
+DEFINE_QNODE(qns_sys_pcie, SDX65_SLAVE_MEM_NOC_PCIE_SNOC, 1, 8, SDX65_MASTER_MEM_NOC_PCIE_SNOC);
+DEFINE_QNODE(qhs_aoss, SDX65_SLAVE_AOSS, 1, 4);
+DEFINE_QNODE(qhs_apss, SDX65_SLAVE_APPSS, 1, 4);
+DEFINE_QNODE(qhs_audio, SDX65_SLAVE_AUDIO, 1, 4);
+DEFINE_QNODE(qhs_blsp1, SDX65_SLAVE_BLSP_1, 1, 4);
+DEFINE_QNODE(qhs_clk_ctl, SDX65_SLAVE_CLK_CTL, 1, 4);
+DEFINE_QNODE(qhs_crypto0_cfg, SDX65_SLAVE_CRYPTO_0_CFG, 1, 4);
+DEFINE_QNODE(qhs_ddrss_cfg, SDX65_SLAVE_CNOC_DDRSS, 1, 4);
+DEFINE_QNODE(qhs_ecc_cfg, SDX65_SLAVE_ECC_CFG, 1, 4);
+DEFINE_QNODE(qhs_imem_cfg, SDX65_SLAVE_IMEM_CFG, 1, 4);
+DEFINE_QNODE(qhs_ipa, SDX65_SLAVE_IPA_CFG, 1, 4);
+DEFINE_QNODE(qhs_mss_cfg, SDX65_SLAVE_CNOC_MSS, 1, 4);
+DEFINE_QNODE(qhs_pcie_parf, SDX65_SLAVE_PCIE_PARF, 1, 4);
+DEFINE_QNODE(qhs_pdm, SDX65_SLAVE_PDM, 1, 4);
+DEFINE_QNODE(qhs_prng, SDX65_SLAVE_PRNG, 1, 4);
+DEFINE_QNODE(qhs_qdss_cfg, SDX65_SLAVE_QDSS_CFG, 1, 4);
+DEFINE_QNODE(qhs_qpic, SDX65_SLAVE_QPIC, 1, 4);
+DEFINE_QNODE(qhs_sdc1, SDX65_SLAVE_SDCC_1, 1, 4);
+DEFINE_QNODE(qhs_snoc_cfg, SDX65_SLAVE_SNOC_CFG, 1, 4, SDX65_MASTER_SNOC_CFG);
+DEFINE_QNODE(qhs_spmi_fetcher, SDX65_SLAVE_SPMI_FETCHER, 1, 4);
+DEFINE_QNODE(qhs_spmi_vgi_coex, SDX65_SLAVE_SPMI_VGI_COEX, 1, 4);
+DEFINE_QNODE(qhs_tcsr, SDX65_SLAVE_TCSR, 1, 4);
+DEFINE_QNODE(qhs_tlmm, SDX65_SLAVE_TLMM, 1, 4);
+DEFINE_QNODE(qhs_usb3, SDX65_SLAVE_USB3, 1, 4);
+DEFINE_QNODE(qhs_usb3_phy, SDX65_SLAVE_USB3_PHY_CFG, 1, 4);
+DEFINE_QNODE(qns_aggre_noc, SDX65_SLAVE_ANOC_SNOC, 1, 8, SDX65_MASTER_ANOC_SNOC);
+DEFINE_QNODE(qns_snoc_memnoc, SDX65_SLAVE_SNOC_MEM_NOC_GC, 1, 16, SDX65_MASTER_SNOC_GC_MEM_NOC);
+DEFINE_QNODE(qxs_imem, SDX65_SLAVE_IMEM, 1, 8);
+DEFINE_QNODE(srvc_snoc, SDX65_SLAVE_SERVICE_SNOC, 1, 4);
+DEFINE_QNODE(xs_pcie, SDX65_SLAVE_PCIE_0, 1, 8);
+DEFINE_QNODE(xs_qdss_stm, SDX65_SLAVE_QDSS_STM, 1, 4);
+DEFINE_QNODE(xs_sys_tcu_cfg, SDX65_SLAVE_TCU, 1, 8);
+
+DEFINE_QBCM(bcm_ce0, "CE0", false, &qxm_crypto);
+DEFINE_QBCM(bcm_mc0, "MC0", true, &ebi);
+DEFINE_QBCM(bcm_pn0, "PN0", true, &qhm_snoc_cfg, &qhs_aoss, &qhs_apss, &qhs_audio, &qhs_blsp1, &qhs_clk_ctl, &qhs_crypto0_cfg, &qhs_ddrss_cfg, &qhs_ecc_cfg, &qhs_imem_cfg, &qhs_ipa, &qhs_mss_cfg, &qhs_pcie_parf, &qhs_pdm, &qhs_prng, &qhs_qdss_cfg, &qhs_qpic, &qhs_sdc1, &qhs_snoc_cfg, &qhs_spmi_fetcher, &qhs_spmi_vgi_coex, &qhs_tcsr, &qhs_tlmm, &qhs_usb3, &qhs_usb3_phy, &srvc_snoc);
+DEFINE_QBCM(bcm_pn1, "PN1", false, &xm_sdc1);
+DEFINE_QBCM(bcm_pn2, "PN2", false, &qhm_audio, &qhm_spmi_fetcher1);
+DEFINE_QBCM(bcm_pn3, "PN3", false, &qhm_blsp1, &qhm_qpic);
+DEFINE_QBCM(bcm_pn4, "PN4", false, &qxm_crypto);
+DEFINE_QBCM(bcm_sh0, "SH0", true, &qns_llcc);
+DEFINE_QBCM(bcm_sh1, "SH1", false, &qns_memnoc_snoc);
+DEFINE_QBCM(bcm_sh3, "SH3", false, &xm_apps_rdwr);
+DEFINE_QBCM(bcm_sn0, "SN0", true, &qns_snoc_memnoc);
+DEFINE_QBCM(bcm_sn1, "SN1", false, &qxs_imem);
+DEFINE_QBCM(bcm_sn2, "SN2", false, &xs_qdss_stm);
+DEFINE_QBCM(bcm_sn3, "SN3", false, &xs_sys_tcu_cfg);
+DEFINE_QBCM(bcm_sn5, "SN5", false, &xs_pcie);
+DEFINE_QBCM(bcm_sn6, "SN6", false, &qhm_qdss_bam, &xm_qdss_etr);
+DEFINE_QBCM(bcm_sn7, "SN7", false, &qnm_aggre_noc, &xm_pcie, &xm_usb3, &qns_aggre_noc);
+DEFINE_QBCM(bcm_sn8, "SN8", false, &qnm_memnoc);
+DEFINE_QBCM(bcm_sn9, "SN9", false, &qnm_memnoc_pcie);
+DEFINE_QBCM(bcm_sn10, "SN10", false, &qnm_ipa, &xm_ipa2pcie_slv);
+
+static struct qcom_icc_bcm * const mc_virt_bcms[] = {
+ &bcm_mc0,
+};
+
+static struct qcom_icc_node * const mc_virt_nodes[] = {
+ [MASTER_LLCC] = &llcc_mc,
+ [SLAVE_EBI1] = &ebi,
+};
+
+static const struct qcom_icc_desc sdx65_mc_virt = {
+ .nodes = mc_virt_nodes,
+ .num_nodes = ARRAY_SIZE(mc_virt_nodes),
+ .bcms = mc_virt_bcms,
+ .num_bcms = ARRAY_SIZE(mc_virt_bcms),
+};
+
+static struct qcom_icc_bcm * const mem_noc_bcms[] = {
+ &bcm_sh0,
+ &bcm_sh1,
+ &bcm_sh3,
+};
+
+static struct qcom_icc_node * const mem_noc_nodes[] = {
+ [MASTER_TCU_0] = &acm_tcu,
+ [MASTER_SNOC_GC_MEM_NOC] = &qnm_snoc_gc,
+ [MASTER_APPSS_PROC] = &xm_apps_rdwr,
+ [SLAVE_LLCC] = &qns_llcc,
+ [SLAVE_MEM_NOC_SNOC] = &qns_memnoc_snoc,
+ [SLAVE_MEM_NOC_PCIE_SNOC] = &qns_sys_pcie,
+};
+
+static const struct qcom_icc_desc sdx65_mem_noc = {
+ .nodes = mem_noc_nodes,
+ .num_nodes = ARRAY_SIZE(mem_noc_nodes),
+ .bcms = mem_noc_bcms,
+ .num_bcms = ARRAY_SIZE(mem_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const system_noc_bcms[] = {
+ &bcm_ce0,
+ &bcm_pn0,
+ &bcm_pn1,
+ &bcm_pn2,
+ &bcm_pn3,
+ &bcm_pn4,
+ &bcm_sn0,
+ &bcm_sn1,
+ &bcm_sn2,
+ &bcm_sn3,
+ &bcm_sn5,
+ &bcm_sn6,
+ &bcm_sn7,
+ &bcm_sn8,
+ &bcm_sn9,
+ &bcm_sn10,
+};
+
+static struct qcom_icc_node * const system_noc_nodes[] = {
+ [MASTER_AUDIO] = &qhm_audio,
+ [MASTER_BLSP_1] = &qhm_blsp1,
+ [MASTER_QDSS_BAM] = &qhm_qdss_bam,
+ [MASTER_QPIC] = &qhm_qpic,
+ [MASTER_SNOC_CFG] = &qhm_snoc_cfg,
+ [MASTER_SPMI_FETCHER] = &qhm_spmi_fetcher1,
+ [MASTER_ANOC_SNOC] = &qnm_aggre_noc,
+ [MASTER_IPA] = &qnm_ipa,
+ [MASTER_MEM_NOC_SNOC] = &qnm_memnoc,
+ [MASTER_MEM_NOC_PCIE_SNOC] = &qnm_memnoc_pcie,
+ [MASTER_CRYPTO] = &qxm_crypto,
+ [MASTER_IPA_PCIE] = &xm_ipa2pcie_slv,
+ [MASTER_PCIE_0] = &xm_pcie,
+ [MASTER_QDSS_ETR] = &xm_qdss_etr,
+ [MASTER_SDCC_1] = &xm_sdc1,
+ [MASTER_USB3] = &xm_usb3,
+ [SLAVE_AOSS] = &qhs_aoss,
+ [SLAVE_APPSS] = &qhs_apss,
+ [SLAVE_AUDIO] = &qhs_audio,
+ [SLAVE_BLSP_1] = &qhs_blsp1,
+ [SLAVE_CLK_CTL] = &qhs_clk_ctl,
+ [SLAVE_CRYPTO_0_CFG] = &qhs_crypto0_cfg,
+ [SLAVE_CNOC_DDRSS] = &qhs_ddrss_cfg,
+ [SLAVE_ECC_CFG] = &qhs_ecc_cfg,
+ [SLAVE_IMEM_CFG] = &qhs_imem_cfg,
+ [SLAVE_IPA_CFG] = &qhs_ipa,
+ [SLAVE_CNOC_MSS] = &qhs_mss_cfg,
+ [SLAVE_PCIE_PARF] = &qhs_pcie_parf,
+ [SLAVE_PDM] = &qhs_pdm,
+ [SLAVE_PRNG] = &qhs_prng,
+ [SLAVE_QDSS_CFG] = &qhs_qdss_cfg,
+ [SLAVE_QPIC] = &qhs_qpic,
+ [SLAVE_SDCC_1] = &qhs_sdc1,
+ [SLAVE_SNOC_CFG] = &qhs_snoc_cfg,
+ [SLAVE_SPMI_FETCHER] = &qhs_spmi_fetcher,
+ [SLAVE_SPMI_VGI_COEX] = &qhs_spmi_vgi_coex,
+ [SLAVE_TCSR] = &qhs_tcsr,
+ [SLAVE_TLMM] = &qhs_tlmm,
+ [SLAVE_USB3] = &qhs_usb3,
+ [SLAVE_USB3_PHY_CFG] = &qhs_usb3_phy,
+ [SLAVE_ANOC_SNOC] = &qns_aggre_noc,
+ [SLAVE_SNOC_MEM_NOC_GC] = &qns_snoc_memnoc,
+ [SLAVE_IMEM] = &qxs_imem,
+ [SLAVE_SERVICE_SNOC] = &srvc_snoc,
+ [SLAVE_PCIE_0] = &xs_pcie,
+ [SLAVE_QDSS_STM] = &xs_qdss_stm,
+ [SLAVE_TCU] = &xs_sys_tcu_cfg,
+};
+
+static const struct qcom_icc_desc sdx65_system_noc = {
+ .nodes = system_noc_nodes,
+ .num_nodes = ARRAY_SIZE(system_noc_nodes),
+ .bcms = system_noc_bcms,
+ .num_bcms = ARRAY_SIZE(system_noc_bcms),
+};
+
+static const struct of_device_id qnoc_of_match[] = {
+ { .compatible = "qcom,sdx65-mc-virt",
+ .data = &sdx65_mc_virt},
+ { .compatible = "qcom,sdx65-mem-noc",
+ .data = &sdx65_mem_noc},
+ { .compatible = "qcom,sdx65-system-noc",
+ .data = &sdx65_system_noc},
+ { }
+};
+MODULE_DEVICE_TABLE(of, qnoc_of_match);
+
+static struct platform_driver qnoc_driver = {
+ .probe = qcom_icc_rpmh_probe,
+ .remove = qcom_icc_rpmh_remove,
+ .driver = {
+ .name = "qnoc-sdx65",
+ .of_match_table = qnoc_of_match,
+ .sync_state = icc_sync_state,
+ },
+};
+module_platform_driver(qnoc_driver);
+
+MODULE_DESCRIPTION("Qualcomm SDX65 NoC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/interconnect/qcom/sdx65.h b/drivers/interconnect/qcom/sdx65.h
new file mode 100644
index 000000000000..5dca6e8b32c9
--- /dev/null
+++ b/drivers/interconnect/qcom/sdx65.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __DRIVERS_INTERCONNECT_QCOM_SDX65_H
+#define __DRIVERS_INTERCONNECT_QCOM_SDX65_H
+
+#define SDX65_MASTER_TCU_0 0
+#define SDX65_MASTER_LLCC 1
+#define SDX65_MASTER_AUDIO 2
+#define SDX65_MASTER_BLSP_1 3
+#define SDX65_MASTER_QDSS_BAM 4
+#define SDX65_MASTER_QPIC 5
+#define SDX65_MASTER_SNOC_CFG 6
+#define SDX65_MASTER_SPMI_FETCHER 7
+#define SDX65_MASTER_ANOC_SNOC 8
+#define SDX65_MASTER_IPA 9
+#define SDX65_MASTER_MEM_NOC_SNOC 10
+#define SDX65_MASTER_MEM_NOC_PCIE_SNOC 11
+#define SDX65_MASTER_SNOC_GC_MEM_NOC 12
+#define SDX65_MASTER_CRYPTO 13
+#define SDX65_MASTER_APPSS_PROC 14
+#define SDX65_MASTER_IPA_PCIE 15
+#define SDX65_MASTER_PCIE_0 16
+#define SDX65_MASTER_QDSS_ETR 17
+#define SDX65_MASTER_SDCC_1 18
+#define SDX65_MASTER_USB3 19
+#define SDX65_SLAVE_EBI1 512
+#define SDX65_SLAVE_AOSS 513
+#define SDX65_SLAVE_APPSS 514
+#define SDX65_SLAVE_AUDIO 515
+#define SDX65_SLAVE_BLSP_1 516
+#define SDX65_SLAVE_CLK_CTL 517
+#define SDX65_SLAVE_CRYPTO_0_CFG 518
+#define SDX65_SLAVE_CNOC_DDRSS 519
+#define SDX65_SLAVE_ECC_CFG 520
+#define SDX65_SLAVE_IMEM_CFG 521
+#define SDX65_SLAVE_IPA_CFG 522
+#define SDX65_SLAVE_CNOC_MSS 523
+#define SDX65_SLAVE_PCIE_PARF 524
+#define SDX65_SLAVE_PDM 525
+#define SDX65_SLAVE_PRNG 526
+#define SDX65_SLAVE_QDSS_CFG 527
+#define SDX65_SLAVE_QPIC 528
+#define SDX65_SLAVE_SDCC_1 529
+#define SDX65_SLAVE_SNOC_CFG 530
+#define SDX65_SLAVE_SPMI_FETCHER 531
+#define SDX65_SLAVE_SPMI_VGI_COEX 532
+#define SDX65_SLAVE_TCSR 533
+#define SDX65_SLAVE_TLMM 534
+#define SDX65_SLAVE_USB3 535
+#define SDX65_SLAVE_USB3_PHY_CFG 536
+#define SDX65_SLAVE_ANOC_SNOC 537
+#define SDX65_SLAVE_LLCC 538
+#define SDX65_SLAVE_MEM_NOC_SNOC 539
+#define SDX65_SLAVE_SNOC_MEM_NOC_GC 540
+#define SDX65_SLAVE_MEM_NOC_PCIE_SNOC 541
+#define SDX65_SLAVE_IMEM 542
+#define SDX65_SLAVE_SERVICE_SNOC 543
+#define SDX65_SLAVE_PCIE_0 544
+#define SDX65_SLAVE_QDSS_STM 545
+#define SDX65_SLAVE_TCU 546
+
+#endif
diff --git a/drivers/interconnect/qcom/sm8150.c b/drivers/interconnect/qcom/sm8150.c
index 745e3c36a61a..1d04a4bfea80 100644
--- a/drivers/interconnect/qcom/sm8150.c
+++ b/drivers/interconnect/qcom/sm8150.c
@@ -186,12 +186,12 @@ DEFINE_QBCM(bcm_sn12, "SN12", false, &qxm_pimem, &xm_gic);
DEFINE_QBCM(bcm_sn14, "SN14", false, &qns_pcie_mem_noc);
DEFINE_QBCM(bcm_sn15, "SN15", false, &qnm_gemnoc);
-static struct qcom_icc_bcm *aggre1_noc_bcms[] = {
+static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
&bcm_qup0,
&bcm_sn3,
};
-static struct qcom_icc_node *aggre1_noc_nodes[] = {
+static struct qcom_icc_node * const aggre1_noc_nodes[] = {
[MASTER_A1NOC_CFG] = &qhm_a1noc_cfg,
[MASTER_QUP_0] = &qhm_qup0,
[MASTER_EMAC] = &xm_emac,
@@ -202,21 +202,21 @@ static struct qcom_icc_node *aggre1_noc_nodes[] = {
[SLAVE_SERVICE_A1NOC] = &srvc_aggre1_noc,
};
-static struct qcom_icc_desc sm8150_aggre1_noc = {
+static const struct qcom_icc_desc sm8150_aggre1_noc = {
.nodes = aggre1_noc_nodes,
.num_nodes = ARRAY_SIZE(aggre1_noc_nodes),
.bcms = aggre1_noc_bcms,
.num_bcms = ARRAY_SIZE(aggre1_noc_bcms),
};
-static struct qcom_icc_bcm *aggre2_noc_bcms[] = {
+static struct qcom_icc_bcm * const aggre2_noc_bcms[] = {
&bcm_ce0,
&bcm_qup0,
&bcm_sn14,
&bcm_sn3,
};
-static struct qcom_icc_node *aggre2_noc_nodes[] = {
+static struct qcom_icc_node * const aggre2_noc_nodes[] = {
[MASTER_A2NOC_CFG] = &qhm_a2noc_cfg,
[MASTER_QDSS_BAM] = &qhm_qdss_bam,
[MASTER_QSPI] = &qhm_qspi,
@@ -237,53 +237,53 @@ static struct qcom_icc_node *aggre2_noc_nodes[] = {
[SLAVE_SERVICE_A2NOC] = &srvc_aggre2_noc,
};
-static struct qcom_icc_desc sm8150_aggre2_noc = {
+static const struct qcom_icc_desc sm8150_aggre2_noc = {
.nodes = aggre2_noc_nodes,
.num_nodes = ARRAY_SIZE(aggre2_noc_nodes),
.bcms = aggre2_noc_bcms,
.num_bcms = ARRAY_SIZE(aggre2_noc_bcms),
};
-static struct qcom_icc_bcm *camnoc_virt_bcms[] = {
+static struct qcom_icc_bcm * const camnoc_virt_bcms[] = {
&bcm_mm1,
};
-static struct qcom_icc_node *camnoc_virt_nodes[] = {
+static struct qcom_icc_node * const camnoc_virt_nodes[] = {
[MASTER_CAMNOC_HF0_UNCOMP] = &qxm_camnoc_hf0_uncomp,
[MASTER_CAMNOC_HF1_UNCOMP] = &qxm_camnoc_hf1_uncomp,
[MASTER_CAMNOC_SF_UNCOMP] = &qxm_camnoc_sf_uncomp,
[SLAVE_CAMNOC_UNCOMP] = &qns_camnoc_uncomp,
};
-static struct qcom_icc_desc sm8150_camnoc_virt = {
+static const struct qcom_icc_desc sm8150_camnoc_virt = {
.nodes = camnoc_virt_nodes,
.num_nodes = ARRAY_SIZE(camnoc_virt_nodes),
.bcms = camnoc_virt_bcms,
.num_bcms = ARRAY_SIZE(camnoc_virt_bcms),
};
-static struct qcom_icc_bcm *compute_noc_bcms[] = {
+static struct qcom_icc_bcm * const compute_noc_bcms[] = {
&bcm_co0,
&bcm_co1,
};
-static struct qcom_icc_node *compute_noc_nodes[] = {
+static struct qcom_icc_node * const compute_noc_nodes[] = {
[MASTER_NPU] = &qnm_npu,
[SLAVE_CDSP_MEM_NOC] = &qns_cdsp_mem_noc,
};
-static struct qcom_icc_desc sm8150_compute_noc = {
+static const struct qcom_icc_desc sm8150_compute_noc = {
.nodes = compute_noc_nodes,
.num_nodes = ARRAY_SIZE(compute_noc_nodes),
.bcms = compute_noc_bcms,
.num_bcms = ARRAY_SIZE(compute_noc_bcms),
};
-static struct qcom_icc_bcm *config_noc_bcms[] = {
+static struct qcom_icc_bcm * const config_noc_bcms[] = {
&bcm_cn0,
};
-static struct qcom_icc_node *config_noc_nodes[] = {
+static struct qcom_icc_node * const config_noc_nodes[] = {
[MASTER_SPDM] = &qhm_spdm,
[SNOC_CNOC_MAS] = &qnm_snoc,
[MASTER_QDSS_DAP] = &xm_qdss_dap,
@@ -340,30 +340,30 @@ static struct qcom_icc_node *config_noc_nodes[] = {
[SLAVE_SERVICE_CNOC] = &srvc_cnoc,
};
-static struct qcom_icc_desc sm8150_config_noc = {
+static const struct qcom_icc_desc sm8150_config_noc = {
.nodes = config_noc_nodes,
.num_nodes = ARRAY_SIZE(config_noc_nodes),
.bcms = config_noc_bcms,
.num_bcms = ARRAY_SIZE(config_noc_bcms),
};
-static struct qcom_icc_bcm *dc_noc_bcms[] = {
+static struct qcom_icc_bcm * const dc_noc_bcms[] = {
};
-static struct qcom_icc_node *dc_noc_nodes[] = {
+static struct qcom_icc_node * const dc_noc_nodes[] = {
[MASTER_CNOC_DC_NOC] = &qhm_cnoc_dc_noc,
[SLAVE_LLCC_CFG] = &qhs_llcc,
[SLAVE_GEM_NOC_CFG] = &qhs_memnoc,
};
-static struct qcom_icc_desc sm8150_dc_noc = {
+static const struct qcom_icc_desc sm8150_dc_noc = {
.nodes = dc_noc_nodes,
.num_nodes = ARRAY_SIZE(dc_noc_nodes),
.bcms = dc_noc_bcms,
.num_bcms = ARRAY_SIZE(dc_noc_bcms),
};
-static struct qcom_icc_bcm *gem_noc_bcms[] = {
+static struct qcom_icc_bcm * const gem_noc_bcms[] = {
&bcm_sh0,
&bcm_sh2,
&bcm_sh3,
@@ -371,7 +371,7 @@ static struct qcom_icc_bcm *gem_noc_bcms[] = {
&bcm_sh5,
};
-static struct qcom_icc_node *gem_noc_nodes[] = {
+static struct qcom_icc_node * const gem_noc_nodes[] = {
[MASTER_AMPSS_M0] = &acm_apps,
[MASTER_GPU_TCU] = &acm_gpu_tcu,
[MASTER_SYS_TCU] = &acm_sys_tcu,
@@ -391,54 +391,54 @@ static struct qcom_icc_node *gem_noc_nodes[] = {
[SLAVE_SERVICE_GEM_NOC] = &srvc_gemnoc,
};
-static struct qcom_icc_desc sm8150_gem_noc = {
+static const struct qcom_icc_desc sm8150_gem_noc = {
.nodes = gem_noc_nodes,
.num_nodes = ARRAY_SIZE(gem_noc_nodes),
.bcms = gem_noc_bcms,
.num_bcms = ARRAY_SIZE(gem_noc_bcms),
};
-static struct qcom_icc_bcm *ipa_virt_bcms[] = {
+static struct qcom_icc_bcm * const ipa_virt_bcms[] = {
&bcm_ip0,
};
-static struct qcom_icc_node *ipa_virt_nodes[] = {
+static struct qcom_icc_node * const ipa_virt_nodes[] = {
[MASTER_IPA_CORE] = &ipa_core_master,
[SLAVE_IPA_CORE] = &ipa_core_slave,
};
-static struct qcom_icc_desc sm8150_ipa_virt = {
+static const struct qcom_icc_desc sm8150_ipa_virt = {
.nodes = ipa_virt_nodes,
.num_nodes = ARRAY_SIZE(ipa_virt_nodes),
.bcms = ipa_virt_bcms,
.num_bcms = ARRAY_SIZE(ipa_virt_bcms),
};
-static struct qcom_icc_bcm *mc_virt_bcms[] = {
+static struct qcom_icc_bcm * const mc_virt_bcms[] = {
&bcm_acv,
&bcm_mc0,
};
-static struct qcom_icc_node *mc_virt_nodes[] = {
+static struct qcom_icc_node * const mc_virt_nodes[] = {
[MASTER_LLCC] = &llcc_mc,
[SLAVE_EBI_CH0] = &ebi,
};
-static struct qcom_icc_desc sm8150_mc_virt = {
+static const struct qcom_icc_desc sm8150_mc_virt = {
.nodes = mc_virt_nodes,
.num_nodes = ARRAY_SIZE(mc_virt_nodes),
.bcms = mc_virt_bcms,
.num_bcms = ARRAY_SIZE(mc_virt_bcms),
};
-static struct qcom_icc_bcm *mmss_noc_bcms[] = {
+static struct qcom_icc_bcm * const mmss_noc_bcms[] = {
&bcm_mm0,
&bcm_mm1,
&bcm_mm2,
&bcm_mm3,
};
-static struct qcom_icc_node *mmss_noc_nodes[] = {
+static struct qcom_icc_node * const mmss_noc_nodes[] = {
[MASTER_CNOC_MNOC_CFG] = &qhm_mnoc_cfg,
[MASTER_CAMNOC_HF0] = &qxm_camnoc_hf0,
[MASTER_CAMNOC_HF1] = &qxm_camnoc_hf1,
@@ -454,14 +454,14 @@ static struct qcom_icc_node *mmss_noc_nodes[] = {
[SLAVE_SERVICE_MNOC] = &srvc_mnoc,
};
-static struct qcom_icc_desc sm8150_mmss_noc = {
+static const struct qcom_icc_desc sm8150_mmss_noc = {
.nodes = mmss_noc_nodes,
.num_nodes = ARRAY_SIZE(mmss_noc_nodes),
.bcms = mmss_noc_bcms,
.num_bcms = ARRAY_SIZE(mmss_noc_bcms),
};
-static struct qcom_icc_bcm *system_noc_bcms[] = {
+static struct qcom_icc_bcm * const system_noc_bcms[] = {
&bcm_sn0,
&bcm_sn1,
&bcm_sn11,
@@ -475,7 +475,7 @@ static struct qcom_icc_bcm *system_noc_bcms[] = {
&bcm_sn9,
};
-static struct qcom_icc_node *system_noc_nodes[] = {
+static struct qcom_icc_node * const system_noc_nodes[] = {
[MASTER_SNOC_CFG] = &qhm_snoc_cfg,
[A1NOC_SNOC_MAS] = &qnm_aggre1_noc,
[A2NOC_SNOC_MAS] = &qnm_aggre2_noc,
@@ -495,7 +495,7 @@ static struct qcom_icc_node *system_noc_nodes[] = {
[SLAVE_TCU] = &xs_sys_tcu_cfg,
};
-static struct qcom_icc_desc sm8150_system_noc = {
+static const struct qcom_icc_desc sm8150_system_noc = {
.nodes = system_noc_nodes,
.num_nodes = ARRAY_SIZE(system_noc_nodes),
.bcms = system_noc_bcms,
diff --git a/drivers/interconnect/qcom/sm8250.c b/drivers/interconnect/qcom/sm8250.c
index aa707582ea01..5cdb058fa095 100644
--- a/drivers/interconnect/qcom/sm8250.c
+++ b/drivers/interconnect/qcom/sm8250.c
@@ -195,12 +195,12 @@ DEFINE_QBCM(bcm_sn9, "SN9", false, &qnm_gemnoc_pcie);
DEFINE_QBCM(bcm_sn11, "SN11", false, &qnm_gemnoc);
DEFINE_QBCM(bcm_sn12, "SN12", false, &qns_pcie_modem_mem_noc, &qns_pcie_mem_noc);
-static struct qcom_icc_bcm *aggre1_noc_bcms[] = {
+static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
&bcm_qup0,
&bcm_sn12,
};
-static struct qcom_icc_node *aggre1_noc_nodes[] = {
+static struct qcom_icc_node * const aggre1_noc_nodes[] = {
[MASTER_A1NOC_CFG] = &qhm_a1noc_cfg,
[MASTER_QSPI_0] = &qhm_qspi,
[MASTER_QUP_1] = &qhm_qup1,
@@ -216,20 +216,20 @@ static struct qcom_icc_node *aggre1_noc_nodes[] = {
[SLAVE_SERVICE_A1NOC] = &srvc_aggre1_noc,
};
-static struct qcom_icc_desc sm8250_aggre1_noc = {
+static const struct qcom_icc_desc sm8250_aggre1_noc = {
.nodes = aggre1_noc_nodes,
.num_nodes = ARRAY_SIZE(aggre1_noc_nodes),
.bcms = aggre1_noc_bcms,
.num_bcms = ARRAY_SIZE(aggre1_noc_bcms),
};
-static struct qcom_icc_bcm *aggre2_noc_bcms[] = {
+static struct qcom_icc_bcm * const aggre2_noc_bcms[] = {
&bcm_ce0,
&bcm_qup0,
&bcm_sn12,
};
-static struct qcom_icc_node *aggre2_noc_nodes[] = {
+static struct qcom_icc_node * const aggre2_noc_nodes[] = {
[MASTER_A2NOC_CFG] = &qhm_a2noc_cfg,
[MASTER_QDSS_BAM] = &qhm_qdss_bam,
[MASTER_QUP_0] = &qhm_qup0,
@@ -246,35 +246,35 @@ static struct qcom_icc_node *aggre2_noc_nodes[] = {
[SLAVE_SERVICE_A2NOC] = &srvc_aggre2_noc,
};
-static struct qcom_icc_desc sm8250_aggre2_noc = {
+static const struct qcom_icc_desc sm8250_aggre2_noc = {
.nodes = aggre2_noc_nodes,
.num_nodes = ARRAY_SIZE(aggre2_noc_nodes),
.bcms = aggre2_noc_bcms,
.num_bcms = ARRAY_SIZE(aggre2_noc_bcms),
};
-static struct qcom_icc_bcm *compute_noc_bcms[] = {
+static struct qcom_icc_bcm * const compute_noc_bcms[] = {
&bcm_co0,
&bcm_co2,
};
-static struct qcom_icc_node *compute_noc_nodes[] = {
+static struct qcom_icc_node * const compute_noc_nodes[] = {
[MASTER_NPU] = &qnm_npu,
[SLAVE_CDSP_MEM_NOC] = &qns_cdsp_mem_noc,
};
-static struct qcom_icc_desc sm8250_compute_noc = {
+static const struct qcom_icc_desc sm8250_compute_noc = {
.nodes = compute_noc_nodes,
.num_nodes = ARRAY_SIZE(compute_noc_nodes),
.bcms = compute_noc_bcms,
.num_bcms = ARRAY_SIZE(compute_noc_bcms),
};
-static struct qcom_icc_bcm *config_noc_bcms[] = {
+static struct qcom_icc_bcm * const config_noc_bcms[] = {
&bcm_cn0,
};
-static struct qcom_icc_node *config_noc_nodes[] = {
+static struct qcom_icc_node * const config_noc_nodes[] = {
[SNOC_CNOC_MAS] = &qnm_snoc,
[MASTER_QDSS_DAP] = &xm_qdss_dap,
[SLAVE_A1NOC_CFG] = &qhs_a1_noc_cfg,
@@ -329,37 +329,37 @@ static struct qcom_icc_node *config_noc_nodes[] = {
[SLAVE_SERVICE_CNOC] = &srvc_cnoc,
};
-static struct qcom_icc_desc sm8250_config_noc = {
+static const struct qcom_icc_desc sm8250_config_noc = {
.nodes = config_noc_nodes,
.num_nodes = ARRAY_SIZE(config_noc_nodes),
.bcms = config_noc_bcms,
.num_bcms = ARRAY_SIZE(config_noc_bcms),
};
-static struct qcom_icc_bcm *dc_noc_bcms[] = {
+static struct qcom_icc_bcm * const dc_noc_bcms[] = {
};
-static struct qcom_icc_node *dc_noc_nodes[] = {
+static struct qcom_icc_node * const dc_noc_nodes[] = {
[MASTER_CNOC_DC_NOC] = &qhm_cnoc_dc_noc,
[SLAVE_LLCC_CFG] = &qhs_llcc,
[SLAVE_GEM_NOC_CFG] = &qhs_memnoc,
};
-static struct qcom_icc_desc sm8250_dc_noc = {
+static const struct qcom_icc_desc sm8250_dc_noc = {
.nodes = dc_noc_nodes,
.num_nodes = ARRAY_SIZE(dc_noc_nodes),
.bcms = dc_noc_bcms,
.num_bcms = ARRAY_SIZE(dc_noc_bcms),
};
-static struct qcom_icc_bcm *gem_noc_bcms[] = {
+static struct qcom_icc_bcm * const gem_noc_bcms[] = {
&bcm_sh0,
&bcm_sh2,
&bcm_sh3,
&bcm_sh4,
};
-static struct qcom_icc_node *gem_noc_nodes[] = {
+static struct qcom_icc_node * const gem_noc_nodes[] = {
[MASTER_GPU_TCU] = &alm_gpu_tcu,
[MASTER_SYS_TCU] = &alm_sys_tcu,
[MASTER_AMPSS_M0] = &chm_apps,
@@ -379,54 +379,54 @@ static struct qcom_icc_node *gem_noc_nodes[] = {
[SLAVE_SERVICE_GEM_NOC] = &srvc_sys_gemnoc,
};
-static struct qcom_icc_desc sm8250_gem_noc = {
+static const struct qcom_icc_desc sm8250_gem_noc = {
.nodes = gem_noc_nodes,
.num_nodes = ARRAY_SIZE(gem_noc_nodes),
.bcms = gem_noc_bcms,
.num_bcms = ARRAY_SIZE(gem_noc_bcms),
};
-static struct qcom_icc_bcm *ipa_virt_bcms[] = {
+static struct qcom_icc_bcm * const ipa_virt_bcms[] = {
&bcm_ip0,
};
-static struct qcom_icc_node *ipa_virt_nodes[] = {
+static struct qcom_icc_node * const ipa_virt_nodes[] = {
[MASTER_IPA_CORE] = &ipa_core_master,
[SLAVE_IPA_CORE] = &ipa_core_slave,
};
-static struct qcom_icc_desc sm8250_ipa_virt = {
+static const struct qcom_icc_desc sm8250_ipa_virt = {
.nodes = ipa_virt_nodes,
.num_nodes = ARRAY_SIZE(ipa_virt_nodes),
.bcms = ipa_virt_bcms,
.num_bcms = ARRAY_SIZE(ipa_virt_bcms),
};
-static struct qcom_icc_bcm *mc_virt_bcms[] = {
+static struct qcom_icc_bcm * const mc_virt_bcms[] = {
&bcm_acv,
&bcm_mc0,
};
-static struct qcom_icc_node *mc_virt_nodes[] = {
+static struct qcom_icc_node * const mc_virt_nodes[] = {
[MASTER_LLCC] = &llcc_mc,
[SLAVE_EBI_CH0] = &ebi,
};
-static struct qcom_icc_desc sm8250_mc_virt = {
+static const struct qcom_icc_desc sm8250_mc_virt = {
.nodes = mc_virt_nodes,
.num_nodes = ARRAY_SIZE(mc_virt_nodes),
.bcms = mc_virt_bcms,
.num_bcms = ARRAY_SIZE(mc_virt_bcms),
};
-static struct qcom_icc_bcm *mmss_noc_bcms[] = {
+static struct qcom_icc_bcm * const mmss_noc_bcms[] = {
&bcm_mm0,
&bcm_mm1,
&bcm_mm2,
&bcm_mm3,
};
-static struct qcom_icc_node *mmss_noc_nodes[] = {
+static struct qcom_icc_node * const mmss_noc_nodes[] = {
[MASTER_CNOC_MNOC_CFG] = &qhm_mnoc_cfg,
[MASTER_CAMNOC_HF] = &qnm_camnoc_hf,
[MASTER_CAMNOC_ICP] = &qnm_camnoc_icp,
@@ -442,17 +442,17 @@ static struct qcom_icc_node *mmss_noc_nodes[] = {
[SLAVE_SERVICE_MNOC] = &srvc_mnoc,
};
-static struct qcom_icc_desc sm8250_mmss_noc = {
+static const struct qcom_icc_desc sm8250_mmss_noc = {
.nodes = mmss_noc_nodes,
.num_nodes = ARRAY_SIZE(mmss_noc_nodes),
.bcms = mmss_noc_bcms,
.num_bcms = ARRAY_SIZE(mmss_noc_bcms),
};
-static struct qcom_icc_bcm *npu_noc_bcms[] = {
+static struct qcom_icc_bcm * const npu_noc_bcms[] = {
};
-static struct qcom_icc_node *npu_noc_nodes[] = {
+static struct qcom_icc_node * const npu_noc_nodes[] = {
[MASTER_NPU_SYS] = &amm_npu_sys,
[MASTER_NPU_CDP] = &amm_npu_sys_cdp_w,
[MASTER_NPU_NOC_CFG] = &qhm_cfg,
@@ -468,14 +468,14 @@ static struct qcom_icc_node *npu_noc_nodes[] = {
[SLAVE_SERVICE_NPU_NOC] = &srvc_noc,
};
-static struct qcom_icc_desc sm8250_npu_noc = {
+static const struct qcom_icc_desc sm8250_npu_noc = {
.nodes = npu_noc_nodes,
.num_nodes = ARRAY_SIZE(npu_noc_nodes),
.bcms = npu_noc_bcms,
.num_bcms = ARRAY_SIZE(npu_noc_bcms),
};
-static struct qcom_icc_bcm *system_noc_bcms[] = {
+static struct qcom_icc_bcm * const system_noc_bcms[] = {
&bcm_sn0,
&bcm_sn1,
&bcm_sn11,
@@ -489,7 +489,7 @@ static struct qcom_icc_bcm *system_noc_bcms[] = {
&bcm_sn9,
};
-static struct qcom_icc_node *system_noc_nodes[] = {
+static struct qcom_icc_node * const system_noc_nodes[] = {
[MASTER_SNOC_CFG] = &qhm_snoc_cfg,
[A1NOC_SNOC_MAS] = &qnm_aggre1_noc,
[A2NOC_SNOC_MAS] = &qnm_aggre2_noc,
@@ -511,7 +511,7 @@ static struct qcom_icc_node *system_noc_nodes[] = {
[SLAVE_TCU] = &xs_sys_tcu_cfg,
};
-static struct qcom_icc_desc sm8250_system_noc = {
+static const struct qcom_icc_desc sm8250_system_noc = {
.nodes = system_noc_nodes,
.num_nodes = ARRAY_SIZE(system_noc_nodes),
.bcms = system_noc_bcms,
diff --git a/drivers/interconnect/qcom/sm8350.c b/drivers/interconnect/qcom/sm8350.c
index c79f93a1ac73..5398e7c8d826 100644
--- a/drivers/interconnect/qcom/sm8350.c
+++ b/drivers/interconnect/qcom/sm8350.c
@@ -198,10 +198,10 @@ DEFINE_QBCM(bcm_mm4_disp, "MM4", false, &qns_mem_noc_sf_disp);
DEFINE_QBCM(bcm_mm5_disp, "MM5", false, &qxm_rot_disp);
DEFINE_QBCM(bcm_sh0_disp, "SH0", false, &qns_llcc_disp);
-static struct qcom_icc_bcm *aggre1_noc_bcms[] = {
+static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
};
-static struct qcom_icc_node *aggre1_noc_nodes[] = {
+static struct qcom_icc_node * const aggre1_noc_nodes[] = {
[MASTER_QSPI_0] = &qhm_qspi,
[MASTER_QUP_1] = &qhm_qup1,
[MASTER_A1NOC_CFG] = &qnm_a1noc_cfg,
@@ -213,21 +213,21 @@ static struct qcom_icc_node *aggre1_noc_nodes[] = {
[SLAVE_SERVICE_A1NOC] = &srvc_aggre1_noc,
};
-static struct qcom_icc_desc sm8350_aggre1_noc = {
+static const struct qcom_icc_desc sm8350_aggre1_noc = {
.nodes = aggre1_noc_nodes,
.num_nodes = ARRAY_SIZE(aggre1_noc_nodes),
.bcms = aggre1_noc_bcms,
.num_bcms = ARRAY_SIZE(aggre1_noc_bcms),
};
-static struct qcom_icc_bcm *aggre2_noc_bcms[] = {
+static struct qcom_icc_bcm * const aggre2_noc_bcms[] = {
&bcm_ce0,
&bcm_sn5,
&bcm_sn6,
&bcm_sn14,
};
-static struct qcom_icc_node *aggre2_noc_nodes[] = {
+static struct qcom_icc_node * const aggre2_noc_nodes[] = {
[MASTER_QDSS_BAM] = &qhm_qdss_bam,
[MASTER_QUP_0] = &qhm_qup0,
[MASTER_QUP_2] = &qhm_qup2,
@@ -244,14 +244,14 @@ static struct qcom_icc_node *aggre2_noc_nodes[] = {
[SLAVE_SERVICE_A2NOC] = &srvc_aggre2_noc,
};
-static struct qcom_icc_desc sm8350_aggre2_noc = {
+static const struct qcom_icc_desc sm8350_aggre2_noc = {
.nodes = aggre2_noc_nodes,
.num_nodes = ARRAY_SIZE(aggre2_noc_nodes),
.bcms = aggre2_noc_bcms,
.num_bcms = ARRAY_SIZE(aggre2_noc_bcms),
};
-static struct qcom_icc_bcm *config_noc_bcms[] = {
+static struct qcom_icc_bcm * const config_noc_bcms[] = {
&bcm_cn0,
&bcm_cn1,
&bcm_cn2,
@@ -259,7 +259,7 @@ static struct qcom_icc_bcm *config_noc_bcms[] = {
&bcm_sn4,
};
-static struct qcom_icc_node *config_noc_nodes[] = {
+static struct qcom_icc_node * const config_noc_nodes[] = {
[MASTER_GEM_NOC_CNOC] = &qnm_gemnoc_cnoc,
[MASTER_GEM_NOC_PCIE_SNOC] = &qnm_gemnoc_pcie,
[MASTER_QDSS_DAP] = &xm_qdss_dap,
@@ -323,30 +323,30 @@ static struct qcom_icc_node *config_noc_nodes[] = {
[SLAVE_TCU] = &xs_sys_tcu_cfg,
};
-static struct qcom_icc_desc sm8350_config_noc = {
+static const struct qcom_icc_desc sm8350_config_noc = {
.nodes = config_noc_nodes,
.num_nodes = ARRAY_SIZE(config_noc_nodes),
.bcms = config_noc_bcms,
.num_bcms = ARRAY_SIZE(config_noc_bcms),
};
-static struct qcom_icc_bcm *dc_noc_bcms[] = {
+static struct qcom_icc_bcm * const dc_noc_bcms[] = {
};
-static struct qcom_icc_node *dc_noc_nodes[] = {
+static struct qcom_icc_node * const dc_noc_nodes[] = {
[MASTER_CNOC_DC_NOC] = &qnm_cnoc_dc_noc,
[SLAVE_LLCC_CFG] = &qhs_llcc,
[SLAVE_GEM_NOC_CFG] = &qns_gemnoc,
};
-static struct qcom_icc_desc sm8350_dc_noc = {
+static const struct qcom_icc_desc sm8350_dc_noc = {
.nodes = dc_noc_nodes,
.num_nodes = ARRAY_SIZE(dc_noc_nodes),
.bcms = dc_noc_bcms,
.num_bcms = ARRAY_SIZE(dc_noc_bcms),
};
-static struct qcom_icc_bcm *gem_noc_bcms[] = {
+static struct qcom_icc_bcm * const gem_noc_bcms[] = {
&bcm_sh0,
&bcm_sh2,
&bcm_sh3,
@@ -354,7 +354,7 @@ static struct qcom_icc_bcm *gem_noc_bcms[] = {
&bcm_sh0_disp,
};
-static struct qcom_icc_node *gem_noc_nodes[] = {
+static struct qcom_icc_node * const gem_noc_nodes[] = {
[MASTER_GPU_TCU] = &alm_gpu_tcu,
[MASTER_SYS_TCU] = &alm_sys_tcu,
[MASTER_APPSS_PROC] = &chm_apps,
@@ -379,17 +379,17 @@ static struct qcom_icc_node *gem_noc_nodes[] = {
[SLAVE_LLCC_DISP] = &qns_llcc_disp,
};
-static struct qcom_icc_desc sm8350_gem_noc = {
+static const struct qcom_icc_desc sm8350_gem_noc = {
.nodes = gem_noc_nodes,
.num_nodes = ARRAY_SIZE(gem_noc_nodes),
.bcms = gem_noc_bcms,
.num_bcms = ARRAY_SIZE(gem_noc_bcms),
};
-static struct qcom_icc_bcm *lpass_ag_noc_bcms[] = {
+static struct qcom_icc_bcm * const lpass_ag_noc_bcms[] = {
};
-static struct qcom_icc_node *lpass_ag_noc_nodes[] = {
+static struct qcom_icc_node * const lpass_ag_noc_nodes[] = {
[MASTER_CNOC_LPASS_AG_NOC] = &qhm_config_noc,
[SLAVE_LPASS_CORE_CFG] = &qhs_lpass_core,
[SLAVE_LPASS_LPI_CFG] = &qhs_lpass_lpi,
@@ -399,35 +399,35 @@ static struct qcom_icc_node *lpass_ag_noc_nodes[] = {
[SLAVE_SERVICE_LPASS_AG_NOC] = &srvc_niu_lpass_agnoc,
};
-static struct qcom_icc_desc sm8350_lpass_ag_noc = {
+static const struct qcom_icc_desc sm8350_lpass_ag_noc = {
.nodes = lpass_ag_noc_nodes,
.num_nodes = ARRAY_SIZE(lpass_ag_noc_nodes),
.bcms = lpass_ag_noc_bcms,
.num_bcms = ARRAY_SIZE(lpass_ag_noc_bcms),
};
-static struct qcom_icc_bcm *mc_virt_bcms[] = {
+static struct qcom_icc_bcm * const mc_virt_bcms[] = {
&bcm_acv,
&bcm_mc0,
&bcm_acv_disp,
&bcm_mc0_disp,
};
-static struct qcom_icc_node *mc_virt_nodes[] = {
+static struct qcom_icc_node * const mc_virt_nodes[] = {
[MASTER_LLCC] = &llcc_mc,
[SLAVE_EBI1] = &ebi,
[MASTER_LLCC_DISP] = &llcc_mc_disp,
[SLAVE_EBI1_DISP] = &ebi_disp,
};
-static struct qcom_icc_desc sm8350_mc_virt = {
+static const struct qcom_icc_desc sm8350_mc_virt = {
.nodes = mc_virt_nodes,
.num_nodes = ARRAY_SIZE(mc_virt_nodes),
.bcms = mc_virt_bcms,
.num_bcms = ARRAY_SIZE(mc_virt_bcms),
};
-static struct qcom_icc_bcm *mmss_noc_bcms[] = {
+static struct qcom_icc_bcm * const mmss_noc_bcms[] = {
&bcm_mm0,
&bcm_mm1,
&bcm_mm4,
@@ -438,7 +438,7 @@ static struct qcom_icc_bcm *mmss_noc_bcms[] = {
&bcm_mm5_disp,
};
-static struct qcom_icc_node *mmss_noc_nodes[] = {
+static struct qcom_icc_node * const mmss_noc_nodes[] = {
[MASTER_CAMNOC_HF] = &qnm_camnoc_hf,
[MASTER_CAMNOC_ICP] = &qnm_camnoc_icp,
[MASTER_CAMNOC_SF] = &qnm_camnoc_sf,
@@ -459,40 +459,40 @@ static struct qcom_icc_node *mmss_noc_nodes[] = {
[SLAVE_MNOC_SF_MEM_NOC_DISP] = &qns_mem_noc_sf_disp,
};
-static struct qcom_icc_desc sm8350_mmss_noc = {
+static const struct qcom_icc_desc sm8350_mmss_noc = {
.nodes = mmss_noc_nodes,
.num_nodes = ARRAY_SIZE(mmss_noc_nodes),
.bcms = mmss_noc_bcms,
.num_bcms = ARRAY_SIZE(mmss_noc_bcms),
};
-static struct qcom_icc_bcm *nsp_noc_bcms[] = {
+static struct qcom_icc_bcm * const nsp_noc_bcms[] = {
&bcm_co0,
&bcm_co3,
};
-static struct qcom_icc_node *nsp_noc_nodes[] = {
+static struct qcom_icc_node * const nsp_noc_nodes[] = {
[MASTER_CDSP_NOC_CFG] = &qhm_nsp_noc_config,
[MASTER_CDSP_PROC] = &qxm_nsp,
[SLAVE_CDSP_MEM_NOC] = &qns_nsp_gemnoc,
[SLAVE_SERVICE_NSP_NOC] = &service_nsp_noc,
};
-static struct qcom_icc_desc sm8350_compute_noc = {
+static const struct qcom_icc_desc sm8350_compute_noc = {
.nodes = nsp_noc_nodes,
.num_nodes = ARRAY_SIZE(nsp_noc_nodes),
.bcms = nsp_noc_bcms,
.num_bcms = ARRAY_SIZE(nsp_noc_bcms),
};
-static struct qcom_icc_bcm *system_noc_bcms[] = {
+static struct qcom_icc_bcm * const system_noc_bcms[] = {
&bcm_sn0,
&bcm_sn2,
&bcm_sn7,
&bcm_sn8,
};
-static struct qcom_icc_node *system_noc_nodes[] = {
+static struct qcom_icc_node * const system_noc_nodes[] = {
[MASTER_A1NOC_SNOC] = &qnm_aggre1_noc,
[MASTER_A2NOC_SNOC] = &qnm_aggre2_noc,
[MASTER_SNOC_CFG] = &qnm_snoc_cfg,
@@ -503,7 +503,7 @@ static struct qcom_icc_node *system_noc_nodes[] = {
[SLAVE_SERVICE_SNOC] = &srvc_snoc,
};
-static struct qcom_icc_desc sm8350_system_noc = {
+static const struct qcom_icc_desc sm8350_system_noc = {
.nodes = system_noc_nodes,
.num_nodes = ARRAY_SIZE(system_noc_nodes),
.bcms = system_noc_bcms,
diff --git a/drivers/interconnect/qcom/sm8450.c b/drivers/interconnect/qcom/sm8450.c
index 8d99ee6421df..7e3d372b712f 100644
--- a/drivers/interconnect/qcom/sm8450.c
+++ b/drivers/interconnect/qcom/sm8450.c
@@ -1526,10 +1526,10 @@ static struct qcom_icc_bcm bcm_sh1_disp = {
.nodes = { &qnm_pcie_disp },
};
-static struct qcom_icc_bcm *aggre1_noc_bcms[] = {
+static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
};
-static struct qcom_icc_node *aggre1_noc_nodes[] = {
+static struct qcom_icc_node * const aggre1_noc_nodes[] = {
[MASTER_QSPI_0] = &qhm_qspi,
[MASTER_QUP_1] = &qhm_qup1,
[MASTER_A1NOC_CFG] = &qnm_a1noc_cfg,
@@ -1540,18 +1540,18 @@ static struct qcom_icc_node *aggre1_noc_nodes[] = {
[SLAVE_SERVICE_A1NOC] = &srvc_aggre1_noc,
};
-static struct qcom_icc_desc sm8450_aggre1_noc = {
+static const struct qcom_icc_desc sm8450_aggre1_noc = {
.nodes = aggre1_noc_nodes,
.num_nodes = ARRAY_SIZE(aggre1_noc_nodes),
.bcms = aggre1_noc_bcms,
.num_bcms = ARRAY_SIZE(aggre1_noc_bcms),
};
-static struct qcom_icc_bcm *aggre2_noc_bcms[] = {
+static struct qcom_icc_bcm * const aggre2_noc_bcms[] = {
&bcm_ce0,
};
-static struct qcom_icc_node *aggre2_noc_nodes[] = {
+static struct qcom_icc_node * const aggre2_noc_nodes[] = {
[MASTER_QDSS_BAM] = &qhm_qdss_bam,
[MASTER_QUP_0] = &qhm_qup0,
[MASTER_QUP_2] = &qhm_qup2,
@@ -1567,20 +1567,20 @@ static struct qcom_icc_node *aggre2_noc_nodes[] = {
[SLAVE_SERVICE_A2NOC] = &srvc_aggre2_noc,
};
-static struct qcom_icc_desc sm8450_aggre2_noc = {
+static const struct qcom_icc_desc sm8450_aggre2_noc = {
.nodes = aggre2_noc_nodes,
.num_nodes = ARRAY_SIZE(aggre2_noc_nodes),
.bcms = aggre2_noc_bcms,
.num_bcms = ARRAY_SIZE(aggre2_noc_bcms),
};
-static struct qcom_icc_bcm *clk_virt_bcms[] = {
+static struct qcom_icc_bcm * const clk_virt_bcms[] = {
&bcm_qup0,
&bcm_qup1,
&bcm_qup2,
};
-static struct qcom_icc_node *clk_virt_nodes[] = {
+static struct qcom_icc_node * const clk_virt_nodes[] = {
[MASTER_QUP_CORE_0] = &qup0_core_master,
[MASTER_QUP_CORE_1] = &qup1_core_master,
[MASTER_QUP_CORE_2] = &qup2_core_master,
@@ -1589,18 +1589,18 @@ static struct qcom_icc_node *clk_virt_nodes[] = {
[SLAVE_QUP_CORE_2] = &qup2_core_slave,
};
-static struct qcom_icc_desc sm8450_clk_virt = {
+static const struct qcom_icc_desc sm8450_clk_virt = {
.nodes = clk_virt_nodes,
.num_nodes = ARRAY_SIZE(clk_virt_nodes),
.bcms = clk_virt_bcms,
.num_bcms = ARRAY_SIZE(clk_virt_bcms),
};
-static struct qcom_icc_bcm *config_noc_bcms[] = {
+static struct qcom_icc_bcm * const config_noc_bcms[] = {
&bcm_cn0,
};
-static struct qcom_icc_node *config_noc_nodes[] = {
+static struct qcom_icc_node * const config_noc_nodes[] = {
[MASTER_GEM_NOC_CNOC] = &qnm_gemnoc_cnoc,
[MASTER_GEM_NOC_PCIE_SNOC] = &qnm_gemnoc_pcie,
[SLAVE_AHB2PHY_SOUTH] = &qhs_ahb2phy0,
@@ -1658,21 +1658,21 @@ static struct qcom_icc_node *config_noc_nodes[] = {
[SLAVE_TCU] = &xs_sys_tcu_cfg,
};
-static struct qcom_icc_desc sm8450_config_noc = {
+static const struct qcom_icc_desc sm8450_config_noc = {
.nodes = config_noc_nodes,
.num_nodes = ARRAY_SIZE(config_noc_nodes),
.bcms = config_noc_bcms,
.num_bcms = ARRAY_SIZE(config_noc_bcms),
};
-static struct qcom_icc_bcm *gem_noc_bcms[] = {
+static struct qcom_icc_bcm * const gem_noc_bcms[] = {
&bcm_sh0,
&bcm_sh1,
&bcm_sh0_disp,
&bcm_sh1_disp,
};
-static struct qcom_icc_node *gem_noc_nodes[] = {
+static struct qcom_icc_node * const gem_noc_nodes[] = {
[MASTER_GPU_TCU] = &alm_gpu_tcu,
[MASTER_SYS_TCU] = &alm_sys_tcu,
[MASTER_APPSS_PROC] = &chm_apps,
@@ -1693,17 +1693,17 @@ static struct qcom_icc_node *gem_noc_nodes[] = {
[SLAVE_LLCC_DISP] = &qns_llcc_disp,
};
-static struct qcom_icc_desc sm8450_gem_noc = {
+static const struct qcom_icc_desc sm8450_gem_noc = {
.nodes = gem_noc_nodes,
.num_nodes = ARRAY_SIZE(gem_noc_nodes),
.bcms = gem_noc_bcms,
.num_bcms = ARRAY_SIZE(gem_noc_bcms),
};
-static struct qcom_icc_bcm *lpass_ag_noc_bcms[] = {
+static struct qcom_icc_bcm * const lpass_ag_noc_bcms[] = {
};
-static struct qcom_icc_node *lpass_ag_noc_nodes[] = {
+static struct qcom_icc_node * const lpass_ag_noc_nodes[] = {
[MASTER_CNOC_LPASS_AG_NOC] = &qhm_config_noc,
[MASTER_LPASS_PROC] = &qxm_lpass_dsp,
[SLAVE_LPASS_CORE_CFG] = &qhs_lpass_core,
@@ -1715,42 +1715,42 @@ static struct qcom_icc_node *lpass_ag_noc_nodes[] = {
[SLAVE_SERVICE_LPASS_AG_NOC] = &srvc_niu_lpass_agnoc,
};
-static struct qcom_icc_desc sm8450_lpass_ag_noc = {
+static const struct qcom_icc_desc sm8450_lpass_ag_noc = {
.nodes = lpass_ag_noc_nodes,
.num_nodes = ARRAY_SIZE(lpass_ag_noc_nodes),
.bcms = lpass_ag_noc_bcms,
.num_bcms = ARRAY_SIZE(lpass_ag_noc_bcms),
};
-static struct qcom_icc_bcm *mc_virt_bcms[] = {
+static struct qcom_icc_bcm * const mc_virt_bcms[] = {
&bcm_acv,
&bcm_mc0,
&bcm_acv_disp,
&bcm_mc0_disp,
};
-static struct qcom_icc_node *mc_virt_nodes[] = {
+static struct qcom_icc_node * const mc_virt_nodes[] = {
[MASTER_LLCC] = &llcc_mc,
[SLAVE_EBI1] = &ebi,
[MASTER_LLCC_DISP] = &llcc_mc_disp,
[SLAVE_EBI1_DISP] = &ebi_disp,
};
-static struct qcom_icc_desc sm8450_mc_virt = {
+static const struct qcom_icc_desc sm8450_mc_virt = {
.nodes = mc_virt_nodes,
.num_nodes = ARRAY_SIZE(mc_virt_nodes),
.bcms = mc_virt_bcms,
.num_bcms = ARRAY_SIZE(mc_virt_bcms),
};
-static struct qcom_icc_bcm *mmss_noc_bcms[] = {
+static struct qcom_icc_bcm * const mmss_noc_bcms[] = {
&bcm_mm0,
&bcm_mm1,
&bcm_mm0_disp,
&bcm_mm1_disp,
};
-static struct qcom_icc_node *mmss_noc_nodes[] = {
+static struct qcom_icc_node * const mmss_noc_nodes[] = {
[MASTER_CAMNOC_HF] = &qnm_camnoc_hf,
[MASTER_CAMNOC_ICP] = &qnm_camnoc_icp,
[MASTER_CAMNOC_SF] = &qnm_camnoc_sf,
@@ -1771,36 +1771,36 @@ static struct qcom_icc_node *mmss_noc_nodes[] = {
[SLAVE_MNOC_SF_MEM_NOC_DISP] = &qns_mem_noc_sf_disp,
};
-static struct qcom_icc_desc sm8450_mmss_noc = {
+static const struct qcom_icc_desc sm8450_mmss_noc = {
.nodes = mmss_noc_nodes,
.num_nodes = ARRAY_SIZE(mmss_noc_nodes),
.bcms = mmss_noc_bcms,
.num_bcms = ARRAY_SIZE(mmss_noc_bcms),
};
-static struct qcom_icc_bcm *nsp_noc_bcms[] = {
+static struct qcom_icc_bcm * const nsp_noc_bcms[] = {
&bcm_co0,
};
-static struct qcom_icc_node *nsp_noc_nodes[] = {
+static struct qcom_icc_node * const nsp_noc_nodes[] = {
[MASTER_CDSP_NOC_CFG] = &qhm_nsp_noc_config,
[MASTER_CDSP_PROC] = &qxm_nsp,
[SLAVE_CDSP_MEM_NOC] = &qns_nsp_gemnoc,
[SLAVE_SERVICE_NSP_NOC] = &service_nsp_noc,
};
-static struct qcom_icc_desc sm8450_nsp_noc = {
+static const struct qcom_icc_desc sm8450_nsp_noc = {
.nodes = nsp_noc_nodes,
.num_nodes = ARRAY_SIZE(nsp_noc_nodes),
.bcms = nsp_noc_bcms,
.num_bcms = ARRAY_SIZE(nsp_noc_bcms),
};
-static struct qcom_icc_bcm *pcie_anoc_bcms[] = {
+static struct qcom_icc_bcm * const pcie_anoc_bcms[] = {
&bcm_sn7,
};
-static struct qcom_icc_node *pcie_anoc_nodes[] = {
+static struct qcom_icc_node * const pcie_anoc_nodes[] = {
[MASTER_PCIE_ANOC_CFG] = &qnm_pcie_anoc_cfg,
[MASTER_PCIE_0] = &xm_pcie3_0,
[MASTER_PCIE_1] = &xm_pcie3_1,
@@ -1808,14 +1808,14 @@ static struct qcom_icc_node *pcie_anoc_nodes[] = {
[SLAVE_SERVICE_PCIE_ANOC] = &srvc_pcie_aggre_noc,
};
-static struct qcom_icc_desc sm8450_pcie_anoc = {
+static const struct qcom_icc_desc sm8450_pcie_anoc = {
.nodes = pcie_anoc_nodes,
.num_nodes = ARRAY_SIZE(pcie_anoc_nodes),
.bcms = pcie_anoc_bcms,
.num_bcms = ARRAY_SIZE(pcie_anoc_bcms),
};
-static struct qcom_icc_bcm *system_noc_bcms[] = {
+static struct qcom_icc_bcm * const system_noc_bcms[] = {
&bcm_sn0,
&bcm_sn1,
&bcm_sn2,
@@ -1823,7 +1823,7 @@ static struct qcom_icc_bcm *system_noc_bcms[] = {
&bcm_sn4,
};
-static struct qcom_icc_node *system_noc_nodes[] = {
+static struct qcom_icc_node * const system_noc_nodes[] = {
[MASTER_GIC_AHB] = &qhm_gic,
[MASTER_A1NOC_SNOC] = &qnm_aggre1_noc,
[MASTER_A2NOC_SNOC] = &qnm_aggre2_noc,
@@ -1836,7 +1836,7 @@ static struct qcom_icc_node *system_noc_nodes[] = {
[SLAVE_SERVICE_SNOC] = &srvc_snoc,
};
-static struct qcom_icc_desc sm8450_system_noc = {
+static const struct qcom_icc_desc sm8450_system_noc = {
.nodes = system_noc_nodes,
.num_nodes = ARRAY_SIZE(system_noc_nodes),
.bcms = system_noc_bcms,
@@ -1848,7 +1848,7 @@ static int qnoc_probe(struct platform_device *pdev)
const struct qcom_icc_desc *desc;
struct icc_onecell_data *data;
struct icc_provider *provider;
- struct qcom_icc_node **qnodes;
+ struct qcom_icc_node * const *qnodes;
struct qcom_icc_provider *qp;
struct icc_node *node;
size_t num_nodes, i;
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index 47108ed44fbb..72d0f5e2f651 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -407,6 +407,7 @@
/* IOMMU IVINFO */
#define IOMMU_IVINFO_OFFSET 36
#define IOMMU_IVINFO_EFRSUP BIT(0)
+#define IOMMU_IVINFO_DMA_REMAP BIT(1)
/* IOMMU Feature Reporting Field (for IVHD type 10h */
#define IOMMU_FEAT_GASUP_SHIFT 6
@@ -449,6 +450,9 @@ extern struct irq_remap_table **irq_lookup_table;
/* Interrupt remapping feature used? */
extern bool amd_iommu_irq_remap;
+/* IVRS indicates that pre-boot remapping was enabled */
+extern bool amdr_ivrs_remap_support;
+
/* kmem_cache to get tables with 128 byte alignement */
extern struct kmem_cache *amd_iommu_irq_cache;
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index 1a3ad58ba846..1d08f87e734b 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -83,7 +83,7 @@
#define ACPI_DEVFLAG_LINT1 0x80
#define ACPI_DEVFLAG_ATSDIS 0x10000000
-#define LOOP_TIMEOUT 100000
+#define LOOP_TIMEOUT 2000000
/*
* ACPI table definitions
*
@@ -181,6 +181,7 @@ u32 amd_iommu_max_pasid __read_mostly = ~0;
bool amd_iommu_v2_present __read_mostly;
static bool amd_iommu_pc_present __read_mostly;
+bool amdr_ivrs_remap_support __read_mostly;
bool amd_iommu_force_isolation __read_mostly;
@@ -325,6 +326,8 @@ static void __init early_iommu_features_init(struct amd_iommu *iommu,
{
if (amd_iommu_ivinfo & IOMMU_IVINFO_EFRSUP)
iommu->features = h->efr_reg;
+ if (amd_iommu_ivinfo & IOMMU_IVINFO_DMA_REMAP)
+ amdr_ivrs_remap_support = true;
}
/* Access to l1 and l2 indexed register spaces */
@@ -1985,8 +1988,7 @@ static int __init amd_iommu_init_pci(void)
for_each_iommu(iommu)
iommu_flush_all_caches(iommu);
- if (!ret)
- print_iommu_info();
+ print_iommu_info();
out:
return ret;
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index b47220ac09ea..840831d5d2ad 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -1838,20 +1838,10 @@ void amd_iommu_domain_update(struct protection_domain *domain)
amd_iommu_domain_flush_complete(domain);
}
-static void __init amd_iommu_init_dma_ops(void)
-{
- if (iommu_default_passthrough() || sme_me_mask)
- x86_swiotlb_enable = true;
- else
- x86_swiotlb_enable = false;
-}
-
int __init amd_iommu_init_api(void)
{
int err;
- amd_iommu_init_dma_ops();
-
err = bus_set_iommu(&pci_bus_type, &amd_iommu_ops);
if (err)
return err;
@@ -2165,6 +2155,8 @@ static bool amd_iommu_capable(enum iommu_cap cap)
return (irq_remapping_enabled == 1);
case IOMMU_CAP_NOEXEC:
return false;
+ case IOMMU_CAP_PRE_BOOT_PROTECTION:
+ return amdr_ivrs_remap_support;
default:
break;
}
@@ -2274,6 +2266,12 @@ static int amd_iommu_def_domain_type(struct device *dev)
return 0;
}
+static bool amd_iommu_enforce_cache_coherency(struct iommu_domain *domain)
+{
+ /* IOMMU_PTE_FC is always set */
+ return true;
+}
+
const struct iommu_ops amd_iommu_ops = {
.capable = amd_iommu_capable,
.domain_alloc = amd_iommu_domain_alloc,
@@ -2296,6 +2294,7 @@ const struct iommu_ops amd_iommu_ops = {
.flush_iotlb_all = amd_iommu_flush_iotlb_all,
.iotlb_sync = amd_iommu_iotlb_sync,
.free = amd_iommu_domain_free,
+ .enforce_cache_coherency = amd_iommu_enforce_cache_coherency,
}
};
diff --git a/drivers/iommu/amd/iommu_v2.c b/drivers/iommu/amd/iommu_v2.c
index e56b137ceabd..afb3efd565b7 100644
--- a/drivers/iommu/amd/iommu_v2.c
+++ b/drivers/iommu/amd/iommu_v2.c
@@ -956,6 +956,7 @@ static void __exit amd_iommu_v2_exit(void)
{
struct device_state *dev_state, *next;
unsigned long flags;
+ LIST_HEAD(freelist);
if (!amd_iommu_v2_supported())
return;
@@ -975,11 +976,20 @@ static void __exit amd_iommu_v2_exit(void)
put_device_state(dev_state);
list_del(&dev_state->list);
- free_device_state(dev_state);
+ list_add_tail(&dev_state->list, &freelist);
}
spin_unlock_irqrestore(&state_lock, flags);
+ /*
+ * Since free_device_state waits on the count to be zero,
+ * we need to free dev_state outside the spinlock.
+ */
+ list_for_each_entry_safe(dev_state, next, &freelist, list) {
+ list_del(&dev_state->list);
+ free_device_state(dev_state);
+ }
+
destroy_workqueue(iommu_wq);
}
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
index c623dae1e115..1ef7bbb4acf3 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
@@ -6,6 +6,7 @@
#include <linux/mm.h>
#include <linux/mmu_context.h>
#include <linux/mmu_notifier.h>
+#include <linux/sched/mm.h>
#include <linux/slab.h>
#include "arm-smmu-v3.h"
@@ -96,9 +97,14 @@ static struct arm_smmu_ctx_desc *arm_smmu_alloc_shared_cd(struct mm_struct *mm)
struct arm_smmu_ctx_desc *cd;
struct arm_smmu_ctx_desc *ret = NULL;
+ /* Don't free the mm until we release the ASID */
+ mmgrab(mm);
+
asid = arm64_mm_context_get(mm);
- if (!asid)
- return ERR_PTR(-ESRCH);
+ if (!asid) {
+ err = -ESRCH;
+ goto out_drop_mm;
+ }
cd = kzalloc(sizeof(*cd), GFP_KERNEL);
if (!cd) {
@@ -165,6 +171,8 @@ out_free_cd:
kfree(cd);
out_put_context:
arm64_mm_context_put(mm);
+out_drop_mm:
+ mmdrop(mm);
return err < 0 ? ERR_PTR(err) : ret;
}
@@ -173,6 +181,7 @@ static void arm_smmu_free_shared_cd(struct arm_smmu_ctx_desc *cd)
if (arm_smmu_free_asid(cd)) {
/* Unpin ASID */
arm64_mm_context_put(cd->mm);
+ mmdrop(cd->mm);
kfree(cd);
}
}
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 627a3ed5ee8f..88817a3376ef 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -3770,6 +3770,8 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
/* Base address */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
if (resource_size(res) < arm_smmu_resource_size(smmu)) {
dev_err(dev, "MMIO region too small (%pr)\n", res);
return -EINVAL;
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c b/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c
index 2c25cce38060..658f3cc83278 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c
@@ -211,7 +211,8 @@ struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu)
if (of_property_read_bool(np, "calxeda,smmu-secure-config-access"))
smmu->impl = &calxeda_impl;
- if (of_device_is_compatible(np, "nvidia,tegra194-smmu") ||
+ if (of_device_is_compatible(np, "nvidia,tegra234-smmu") ||
+ of_device_is_compatible(np, "nvidia,tegra194-smmu") ||
of_device_is_compatible(np, "nvidia,tegra186-smmu"))
return nvidia_smmu_impl_init(smmu);
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
index ba6298c7140e..7820711c4560 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
@@ -408,6 +408,7 @@ static const struct of_device_id __maybe_unused qcom_smmu_impl_of_match[] = {
{ .compatible = "qcom,sc7180-smmu-500" },
{ .compatible = "qcom,sc7280-smmu-500" },
{ .compatible = "qcom,sc8180x-smmu-500" },
+ { .compatible = "qcom,sc8280xp-smmu-500" },
{ .compatible = "qcom,sdm630-smmu-v2" },
{ .compatible = "qcom,sdm845-smmu-500" },
{ .compatible = "qcom,sm6125-smmu-500" },
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c
index 568cce590ccc..2ed3594f384e 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c
@@ -1574,6 +1574,9 @@ static int arm_smmu_def_domain_type(struct device *dev)
struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
const struct arm_smmu_impl *impl = cfg->smmu->impl;
+ if (using_legacy_binding)
+ return IOMMU_DOMAIN_IDENTITY;
+
if (impl && impl->def_domain_type)
return impl->def_domain_type(dev);
@@ -2092,11 +2095,10 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
if (err)
return err;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ioaddr = res->start;
- smmu->base = devm_ioremap_resource(dev, res);
+ smmu->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(smmu->base))
return PTR_ERR(smmu->base);
+ ioaddr = res->start;
/*
* The resource size should effectively match the value of SMMU_TOP;
* stash that temporarily until we know PAGESIZE to validate it with.
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 09f6e1c0f9c0..f90251572a5d 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -20,6 +20,7 @@
#include <linux/iommu.h>
#include <linux/iova.h>
#include <linux/irq.h>
+#include <linux/list_sort.h>
#include <linux/mm.h>
#include <linux/mutex.h>
#include <linux/pci.h>
@@ -414,6 +415,15 @@ static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie,
return 0;
}
+static int iommu_dma_ranges_sort(void *priv, const struct list_head *a,
+ const struct list_head *b)
+{
+ struct resource_entry *res_a = list_entry(a, typeof(*res_a), node);
+ struct resource_entry *res_b = list_entry(b, typeof(*res_b), node);
+
+ return res_a->res->start > res_b->res->start;
+}
+
static int iova_reserve_pci_windows(struct pci_dev *dev,
struct iova_domain *iovad)
{
@@ -432,6 +442,7 @@ static int iova_reserve_pci_windows(struct pci_dev *dev,
}
/* Get reserved DMA windows from host bridge */
+ list_sort(NULL, &bridge->dma_ranges, iommu_dma_ranges_sort);
resource_list_for_each_entry(window, &bridge->dma_ranges) {
end = window->res->start - window->offset;
resv_iova:
@@ -440,7 +451,7 @@ resv_iova:
hi = iova_pfn(iovad, end);
reserve_iova(iovad, lo, hi);
} else if (end < start) {
- /* dma_ranges list should be sorted */
+ /* DMA ranges should be non-overlapping */
dev_err(&dev->dev,
"Failed to reserve IOVA [%pa-%pa]\n",
&start, &end);
@@ -776,6 +787,7 @@ static struct page **__iommu_dma_alloc_noncontiguous(struct device *dev,
unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
struct page **pages;
dma_addr_t iova;
+ ssize_t ret;
if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
iommu_deferred_attach(dev, domain))
@@ -813,8 +825,8 @@ static struct page **__iommu_dma_alloc_noncontiguous(struct device *dev,
arch_dma_prep_coherent(sg_page(sg), sg->length);
}
- if (iommu_map_sg_atomic(domain, iova, sgt->sgl, sgt->orig_nents, ioprot)
- < size)
+ ret = iommu_map_sg_atomic(domain, iova, sgt->sgl, sgt->orig_nents, ioprot);
+ if (ret < 0 || ret < size)
goto out_free_sg;
sgt->sgl->dma_address = iova;
@@ -971,6 +983,11 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
void *padding_start;
size_t padding_size, aligned_size;
+ if (!is_swiotlb_active(dev)) {
+ dev_warn_once(dev, "DMA bounce buffers are inactive, unable to map unaligned transaction.\n");
+ return DMA_MAPPING_ERROR;
+ }
+
aligned_size = iova_align(iovad, size);
phys = swiotlb_tbl_map_single(dev, phys, size, aligned_size,
iova_mask(iovad), dir, attrs);
@@ -1209,7 +1226,7 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
* implementation - it knows better than we do.
*/
ret = iommu_map_sg_atomic(domain, iova, sg, nents, prot);
- if (ret < iova_len)
+ if (ret < 0 || ret < iova_len)
goto out_free_iova;
return __finalise_sg(dev, sg, nents, iova);
diff --git a/drivers/iommu/fsl_pamu.c b/drivers/iommu/fsl_pamu.c
index fc38b1fba7cf..0d03f837a5d4 100644
--- a/drivers/iommu/fsl_pamu.c
+++ b/drivers/iommu/fsl_pamu.c
@@ -11,6 +11,9 @@
#include <linux/fsl/guts.h>
#include <linux/interrupt.h>
#include <linux/genalloc.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
#include <asm/mpc85xx.h>
diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c
index 69a4a62dc3b9..94b4589dc67c 100644
--- a/drivers/iommu/fsl_pamu_domain.c
+++ b/drivers/iommu/fsl_pamu_domain.c
@@ -9,6 +9,7 @@
#include "fsl_pamu_domain.h"
+#include <linux/platform_device.h>
#include <sysdev/fsl_pci.h>
/*
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index ba9a63cac47c..44016594831d 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -533,33 +533,6 @@ static void domain_update_iommu_coherency(struct dmar_domain *domain)
rcu_read_unlock();
}
-static bool domain_update_iommu_snooping(struct intel_iommu *skip)
-{
- struct dmar_drhd_unit *drhd;
- struct intel_iommu *iommu;
- bool ret = true;
-
- rcu_read_lock();
- for_each_active_iommu(iommu, drhd) {
- if (iommu != skip) {
- /*
- * If the hardware is operating in the scalable mode,
- * the snooping control is always supported since we
- * always set PASID-table-entry.PGSNP bit if the domain
- * is managed outside (UNMANAGED).
- */
- if (!sm_supported(iommu) &&
- !ecap_sc_support(iommu->ecap)) {
- ret = false;
- break;
- }
- }
- }
- rcu_read_unlock();
-
- return ret;
-}
-
static int domain_update_iommu_superpage(struct dmar_domain *domain,
struct intel_iommu *skip)
{
@@ -641,7 +614,6 @@ static unsigned long domain_super_pgsize_bitmap(struct dmar_domain *domain)
static void domain_update_iommu_cap(struct dmar_domain *domain)
{
domain_update_iommu_coherency(domain);
- domain->iommu_snooping = domain_update_iommu_snooping(NULL);
domain->iommu_superpage = domain_update_iommu_superpage(domain, NULL);
/*
@@ -2460,7 +2432,7 @@ static int domain_setup_first_level(struct intel_iommu *iommu,
if (level == 5)
flags |= PASID_FLAG_FL5LP;
- if (domain->domain.type == IOMMU_DOMAIN_UNMANAGED)
+ if (domain->force_snooping)
flags |= PASID_FLAG_PAGE_SNOOP;
return intel_pasid_setup_first_level(iommu, dev, (pgd_t *)pgd, pasid,
@@ -2474,64 +2446,6 @@ static bool dev_is_real_dma_subdevice(struct device *dev)
pci_real_dma_dev(to_pci_dev(dev)) != to_pci_dev(dev);
}
-static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
- int bus, int devfn,
- struct device *dev,
- struct dmar_domain *domain)
-{
- struct device_domain_info *info = dev_iommu_priv_get(dev);
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&device_domain_lock, flags);
- info->domain = domain;
- spin_lock(&iommu->lock);
- ret = domain_attach_iommu(domain, iommu);
- spin_unlock(&iommu->lock);
- if (ret) {
- spin_unlock_irqrestore(&device_domain_lock, flags);
- return NULL;
- }
- list_add(&info->link, &domain->devices);
- spin_unlock_irqrestore(&device_domain_lock, flags);
-
- /* PASID table is mandatory for a PCI device in scalable mode. */
- if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) {
- ret = intel_pasid_alloc_table(dev);
- if (ret) {
- dev_err(dev, "PASID table allocation failed\n");
- dmar_remove_one_dev_info(dev);
- return NULL;
- }
-
- /* Setup the PASID entry for requests without PASID: */
- spin_lock_irqsave(&iommu->lock, flags);
- if (hw_pass_through && domain_type_is_si(domain))
- ret = intel_pasid_setup_pass_through(iommu, domain,
- dev, PASID_RID2PASID);
- else if (domain_use_first_level(domain))
- ret = domain_setup_first_level(iommu, domain, dev,
- PASID_RID2PASID);
- else
- ret = intel_pasid_setup_second_level(iommu, domain,
- dev, PASID_RID2PASID);
- spin_unlock_irqrestore(&iommu->lock, flags);
- if (ret) {
- dev_err(dev, "Setup RID2PASID failed\n");
- dmar_remove_one_dev_info(dev);
- return NULL;
- }
- }
-
- if (dev && domain_context_mapping(domain, dev)) {
- dev_err(dev, "Domain context map failed\n");
- dmar_remove_one_dev_info(dev);
- return NULL;
- }
-
- return domain;
-}
-
static int iommu_domain_identity_map(struct dmar_domain *domain,
unsigned long first_vpfn,
unsigned long last_vpfn)
@@ -2607,17 +2521,62 @@ static int __init si_domain_init(int hw)
static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
{
- struct dmar_domain *ndomain;
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
struct intel_iommu *iommu;
+ unsigned long flags;
u8 bus, devfn;
+ int ret;
iommu = device_to_iommu(dev, &bus, &devfn);
if (!iommu)
return -ENODEV;
- ndomain = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
- if (ndomain != domain)
- return -EBUSY;
+ spin_lock_irqsave(&device_domain_lock, flags);
+ info->domain = domain;
+ spin_lock(&iommu->lock);
+ ret = domain_attach_iommu(domain, iommu);
+ spin_unlock(&iommu->lock);
+ if (ret) {
+ spin_unlock_irqrestore(&device_domain_lock, flags);
+ return ret;
+ }
+ list_add(&info->link, &domain->devices);
+ spin_unlock_irqrestore(&device_domain_lock, flags);
+
+ /* PASID table is mandatory for a PCI device in scalable mode. */
+ if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) {
+ ret = intel_pasid_alloc_table(dev);
+ if (ret) {
+ dev_err(dev, "PASID table allocation failed\n");
+ dmar_remove_one_dev_info(dev);
+ return ret;
+ }
+
+ /* Setup the PASID entry for requests without PASID: */
+ spin_lock_irqsave(&iommu->lock, flags);
+ if (hw_pass_through && domain_type_is_si(domain))
+ ret = intel_pasid_setup_pass_through(iommu, domain,
+ dev, PASID_RID2PASID);
+ else if (domain_use_first_level(domain))
+ ret = domain_setup_first_level(iommu, domain, dev,
+ PASID_RID2PASID);
+ else
+ ret = intel_pasid_setup_second_level(iommu, domain,
+ dev, PASID_RID2PASID);
+ spin_unlock_irqrestore(&iommu->lock, flags);
+ if (ret) {
+ dev_err(dev, "Setup RID2PASID failed\n");
+ dmar_remove_one_dev_info(dev);
+ return ret;
+ }
+ }
+
+ ret = domain_context_mapping(domain, dev);
+ if (ret) {
+ dev_err(dev, "Domain context map failed\n");
+ dmar_remove_one_dev_info(dev);
+ return ret;
+ }
return 0;
}
@@ -3607,12 +3566,7 @@ static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
iommu->name);
return -ENXIO;
}
- if (!ecap_sc_support(iommu->ecap) &&
- domain_update_iommu_snooping(iommu)) {
- pr_warn("%s: Doesn't support snooping.\n",
- iommu->name);
- return -ENXIO;
- }
+
sp = domain_update_iommu_superpage(NULL, iommu) - 1;
if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
pr_warn("%s: Doesn't support large page.\n",
@@ -4304,7 +4258,6 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width)
domain->agaw = width_to_agaw(adjust_width);
domain->iommu_coherency = false;
- domain->iommu_snooping = false;
domain->iommu_superpage = 0;
domain->max_addr = 0;
@@ -4369,6 +4322,9 @@ static int prepare_domain_attach_device(struct iommu_domain *domain,
if (!iommu)
return -ENODEV;
+ if (dmar_domain->force_snooping && !ecap_sc_support(iommu->ecap))
+ return -EOPNOTSUPP;
+
/* check if this iommu agaw is sufficient for max mapped address */
addr_width = agaw_to_width(iommu->agaw);
if (addr_width > cap_mgaw(iommu->cap))
@@ -4443,7 +4399,7 @@ static int intel_iommu_map(struct iommu_domain *domain,
prot |= DMA_PTE_READ;
if (iommu_prot & IOMMU_WRITE)
prot |= DMA_PTE_WRITE;
- if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
+ if (dmar_domain->set_pte_snp)
prot |= DMA_PTE_SNP;
max_addr = iova + size;
@@ -4566,12 +4522,71 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
return phys;
}
+static bool domain_support_force_snooping(struct dmar_domain *domain)
+{
+ struct device_domain_info *info;
+ bool support = true;
+
+ assert_spin_locked(&device_domain_lock);
+ list_for_each_entry(info, &domain->devices, link) {
+ if (!ecap_sc_support(info->iommu->ecap)) {
+ support = false;
+ break;
+ }
+ }
+
+ return support;
+}
+
+static void domain_set_force_snooping(struct dmar_domain *domain)
+{
+ struct device_domain_info *info;
+
+ assert_spin_locked(&device_domain_lock);
+
+ /*
+ * Second level page table supports per-PTE snoop control. The
+ * iommu_map() interface will handle this by setting SNP bit.
+ */
+ if (!domain_use_first_level(domain)) {
+ domain->set_pte_snp = true;
+ return;
+ }
+
+ list_for_each_entry(info, &domain->devices, link)
+ intel_pasid_setup_page_snoop_control(info->iommu, info->dev,
+ PASID_RID2PASID);
+}
+
+static bool intel_iommu_enforce_cache_coherency(struct iommu_domain *domain)
+{
+ struct dmar_domain *dmar_domain = to_dmar_domain(domain);
+ unsigned long flags;
+
+ if (dmar_domain->force_snooping)
+ return true;
+
+ spin_lock_irqsave(&device_domain_lock, flags);
+ if (!domain_support_force_snooping(dmar_domain)) {
+ spin_unlock_irqrestore(&device_domain_lock, flags);
+ return false;
+ }
+
+ domain_set_force_snooping(dmar_domain);
+ dmar_domain->force_snooping = true;
+ spin_unlock_irqrestore(&device_domain_lock, flags);
+
+ return true;
+}
+
static bool intel_iommu_capable(enum iommu_cap cap)
{
if (cap == IOMMU_CAP_CACHE_COHERENCY)
- return domain_update_iommu_snooping(NULL);
+ return true;
if (cap == IOMMU_CAP_INTR_REMAP)
return irq_remapping_enabled == 1;
+ if (cap == IOMMU_CAP_PRE_BOOT_PROTECTION)
+ return dmar_platform_optin();
return false;
}
@@ -4919,6 +4934,7 @@ const struct iommu_ops intel_iommu_ops = {
.iotlb_sync = intel_iommu_tlb_sync,
.iova_to_phys = intel_iommu_iova_to_phys,
.free = intel_iommu_domain_free,
+ .enforce_cache_coherency = intel_iommu_enforce_cache_coherency,
}
};
diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
index f8d215d85695..cb4c1d0cf25c 100644
--- a/drivers/iommu/intel/pasid.c
+++ b/drivers/iommu/intel/pasid.c
@@ -710,9 +710,6 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu,
pasid_set_fault_enable(pte);
pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
- if (domain->domain.type == IOMMU_DOMAIN_UNMANAGED)
- pasid_set_pgsnp(pte);
-
/*
* Since it is a second level only translation setup, we should
* set SRE bit as well (addresses are expected to be GPAs).
@@ -762,3 +759,45 @@ int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
return 0;
}
+
+/*
+ * Set the page snoop control for a pasid entry which has been set up.
+ */
+void intel_pasid_setup_page_snoop_control(struct intel_iommu *iommu,
+ struct device *dev, u32 pasid)
+{
+ struct pasid_entry *pte;
+ u16 did;
+
+ spin_lock(&iommu->lock);
+ pte = intel_pasid_get_entry(dev, pasid);
+ if (WARN_ON(!pte || !pasid_pte_is_present(pte))) {
+ spin_unlock(&iommu->lock);
+ return;
+ }
+
+ pasid_set_pgsnp(pte);
+ did = pasid_get_domain_id(pte);
+ spin_unlock(&iommu->lock);
+
+ if (!ecap_coherent(iommu->ecap))
+ clflush_cache_range(pte, sizeof(*pte));
+
+ /*
+ * VT-d spec 3.4 table23 states guides for cache invalidation:
+ *
+ * - PASID-selective-within-Domain PASID-cache invalidation
+ * - PASID-selective PASID-based IOTLB invalidation
+ * - If (pasid is RID_PASID)
+ * - Global Device-TLB invalidation to affected functions
+ * Else
+ * - PASID-based Device-TLB invalidation (with S=1 and
+ * Addr[63:12]=0x7FFFFFFF_FFFFF) to affected functions
+ */
+ pasid_cache_invalidation_with_pasid(iommu, did, pasid);
+ qi_flush_piotlb(iommu, did, pasid, 0, -1, 0);
+
+ /* Device IOTLB doesn't need to be flushed in caching mode. */
+ if (!cap_caching_mode(iommu->cap))
+ devtlb_invalidation_with_pasid(iommu, dev, pasid);
+}
diff --git a/drivers/iommu/intel/pasid.h b/drivers/iommu/intel/pasid.h
index ab4408c824a5..583ea67fc783 100644
--- a/drivers/iommu/intel/pasid.h
+++ b/drivers/iommu/intel/pasid.h
@@ -123,4 +123,6 @@ void intel_pasid_tear_down_entry(struct intel_iommu *iommu,
bool fault_ignore);
int vcmd_alloc_pasid(struct intel_iommu *iommu, u32 *pasid);
void vcmd_free_pasid(struct intel_iommu *iommu, u32 pasid);
+void intel_pasid_setup_page_snoop_control(struct intel_iommu *iommu,
+ struct device *dev, u32 pasid);
#endif /* __INTEL_PASID_H */
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 857d4c2fd1a2..847ad47a2dfd 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -18,7 +18,6 @@
#include <linux/errno.h>
#include <linux/iommu.h>
#include <linux/idr.h>
-#include <linux/notifier.h>
#include <linux/err.h>
#include <linux/pci.h>
#include <linux/bitops.h>
@@ -40,14 +39,16 @@ struct iommu_group {
struct kobject *devices_kobj;
struct list_head devices;
struct mutex mutex;
- struct blocking_notifier_head notifier;
void *iommu_data;
void (*iommu_data_release)(void *iommu_data);
char *name;
int id;
struct iommu_domain *default_domain;
+ struct iommu_domain *blocking_domain;
struct iommu_domain *domain;
struct list_head entry;
+ unsigned int owner_cnt;
+ void *owner;
};
struct group_device {
@@ -82,8 +83,8 @@ static int __iommu_attach_device(struct iommu_domain *domain,
struct device *dev);
static int __iommu_attach_group(struct iommu_domain *domain,
struct iommu_group *group);
-static void __iommu_detach_group(struct iommu_domain *domain,
- struct iommu_group *group);
+static int __iommu_group_set_domain(struct iommu_group *group,
+ struct iommu_domain *new_domain);
static int iommu_create_device_direct_mappings(struct iommu_group *group,
struct device *dev);
static struct iommu_group *iommu_group_get_for_dev(struct device *dev);
@@ -294,7 +295,11 @@ int iommu_probe_device(struct device *dev)
mutex_lock(&group->mutex);
iommu_alloc_default_domain(group, dev);
- if (group->default_domain) {
+ /*
+ * If device joined an existing group which has been claimed, don't
+ * attach the default domain.
+ */
+ if (group->default_domain && !group->owner) {
ret = __iommu_attach_device(group->default_domain, dev);
if (ret) {
mutex_unlock(&group->mutex);
@@ -599,6 +604,8 @@ static void iommu_group_release(struct kobject *kobj)
if (group->default_domain)
iommu_domain_free(group->default_domain);
+ if (group->blocking_domain)
+ iommu_domain_free(group->blocking_domain);
kfree(group->name);
kfree(group);
@@ -633,7 +640,6 @@ struct iommu_group *iommu_group_alloc(void)
mutex_init(&group->mutex);
INIT_LIST_HEAD(&group->devices);
INIT_LIST_HEAD(&group->entry);
- BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
ret = ida_simple_get(&iommu_group_ida, 0, 0, GFP_KERNEL);
if (ret < 0) {
@@ -906,10 +912,6 @@ rename:
if (ret)
goto err_put_group;
- /* Notify any listeners about change to group. */
- blocking_notifier_call_chain(&group->notifier,
- IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev);
-
trace_add_device_to_group(group->id, dev);
dev_info(dev, "Adding to iommu group %d\n", group->id);
@@ -951,10 +953,6 @@ void iommu_group_remove_device(struct device *dev)
dev_info(dev, "Removing from iommu group %d\n", group->id);
- /* Pre-notify listeners that a device is being removed. */
- blocking_notifier_call_chain(&group->notifier,
- IOMMU_GROUP_NOTIFY_DEL_DEVICE, dev);
-
mutex_lock(&group->mutex);
list_for_each_entry(tmp_device, &group->devices, list) {
if (tmp_device->dev == dev) {
@@ -1077,36 +1075,6 @@ void iommu_group_put(struct iommu_group *group)
EXPORT_SYMBOL_GPL(iommu_group_put);
/**
- * iommu_group_register_notifier - Register a notifier for group changes
- * @group: the group to watch
- * @nb: notifier block to signal
- *
- * This function allows iommu group users to track changes in a group.
- * See include/linux/iommu.h for actions sent via this notifier. Caller
- * should hold a reference to the group throughout notifier registration.
- */
-int iommu_group_register_notifier(struct iommu_group *group,
- struct notifier_block *nb)
-{
- return blocking_notifier_chain_register(&group->notifier, nb);
-}
-EXPORT_SYMBOL_GPL(iommu_group_register_notifier);
-
-/**
- * iommu_group_unregister_notifier - Unregister a notifier
- * @group: the group to watch
- * @nb: notifier block to signal
- *
- * Unregister a previously registered group notifier block.
- */
-int iommu_group_unregister_notifier(struct iommu_group *group,
- struct notifier_block *nb)
-{
- return blocking_notifier_chain_unregister(&group->notifier, nb);
-}
-EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier);
-
-/**
* iommu_register_device_fault_handler() - Register a device fault handler
* @dev: the device
* @handler: the fault handler
@@ -1651,14 +1619,8 @@ static int remove_iommu_group(struct device *dev, void *data)
static int iommu_bus_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
- unsigned long group_action = 0;
struct device *dev = data;
- struct iommu_group *group;
- /*
- * ADD/DEL call into iommu driver ops if provided, which may
- * result in ADD/DEL notifiers to group->notifier
- */
if (action == BUS_NOTIFY_ADD_DEVICE) {
int ret;
@@ -1669,34 +1631,6 @@ static int iommu_bus_notifier(struct notifier_block *nb,
return NOTIFY_OK;
}
- /*
- * Remaining BUS_NOTIFYs get filtered and republished to the
- * group, if anyone is listening
- */
- group = iommu_group_get(dev);
- if (!group)
- return 0;
-
- switch (action) {
- case BUS_NOTIFY_BIND_DRIVER:
- group_action = IOMMU_GROUP_NOTIFY_BIND_DRIVER;
- break;
- case BUS_NOTIFY_BOUND_DRIVER:
- group_action = IOMMU_GROUP_NOTIFY_BOUND_DRIVER;
- break;
- case BUS_NOTIFY_UNBIND_DRIVER:
- group_action = IOMMU_GROUP_NOTIFY_UNBIND_DRIVER;
- break;
- case BUS_NOTIFY_UNBOUND_DRIVER:
- group_action = IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER;
- break;
- }
-
- if (group_action)
- blocking_notifier_call_chain(&group->notifier,
- group_action, dev);
-
- iommu_group_put(group);
return 0;
}
@@ -1913,6 +1847,29 @@ bool iommu_present(struct bus_type *bus)
}
EXPORT_SYMBOL_GPL(iommu_present);
+/**
+ * device_iommu_capable() - check for a general IOMMU capability
+ * @dev: device to which the capability would be relevant, if available
+ * @cap: IOMMU capability
+ *
+ * Return: true if an IOMMU is present and supports the given capability
+ * for the given device, otherwise false.
+ */
+bool device_iommu_capable(struct device *dev, enum iommu_cap cap)
+{
+ const struct iommu_ops *ops;
+
+ if (!dev->iommu || !dev->iommu->iommu_dev)
+ return false;
+
+ ops = dev_iommu_ops(dev);
+ if (!ops->capable)
+ return false;
+
+ return ops->capable(cap);
+}
+EXPORT_SYMBOL_GPL(device_iommu_capable);
+
bool iommu_capable(struct bus_type *bus, enum iommu_cap cap)
{
if (!bus->iommu_ops || !bus->iommu_ops->capable)
@@ -1983,6 +1940,24 @@ void iommu_domain_free(struct iommu_domain *domain)
}
EXPORT_SYMBOL_GPL(iommu_domain_free);
+/*
+ * Put the group's domain back to the appropriate core-owned domain - either the
+ * standard kernel-mode DMA configuration or an all-DMA-blocked domain.
+ */
+static void __iommu_group_set_core_domain(struct iommu_group *group)
+{
+ struct iommu_domain *new_domain;
+ int ret;
+
+ if (group->owner)
+ new_domain = group->blocking_domain;
+ else
+ new_domain = group->default_domain;
+
+ ret = __iommu_group_set_domain(group, new_domain);
+ WARN(ret, "iommu driver failed to attach the default/blocking domain");
+}
+
static int __iommu_attach_device(struct iommu_domain *domain,
struct device *dev)
{
@@ -2039,9 +2014,6 @@ static void __iommu_detach_device(struct iommu_domain *domain,
if (iommu_is_attach_deferred(dev))
return;
- if (unlikely(domain->ops->detach_dev == NULL))
- return;
-
domain->ops->detach_dev(domain, dev);
trace_detach_device_from_domain(dev);
}
@@ -2055,12 +2027,10 @@ void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
return;
mutex_lock(&group->mutex);
- if (iommu_group_device_count(group) != 1) {
- WARN_ON(1);
+ if (WARN_ON(domain != group->domain) ||
+ WARN_ON(iommu_group_device_count(group) != 1))
goto out_unlock;
- }
-
- __iommu_detach_group(domain, group);
+ __iommu_group_set_core_domain(group);
out_unlock:
mutex_unlock(&group->mutex);
@@ -2116,7 +2086,8 @@ static int __iommu_attach_group(struct iommu_domain *domain,
{
int ret;
- if (group->default_domain && group->domain != group->default_domain)
+ if (group->domain && group->domain != group->default_domain &&
+ group->domain != group->blocking_domain)
return -EBUSY;
ret = __iommu_group_for_each_dev(group, domain,
@@ -2148,34 +2119,49 @@ static int iommu_group_do_detach_device(struct device *dev, void *data)
return 0;
}
-static void __iommu_detach_group(struct iommu_domain *domain,
- struct iommu_group *group)
+static int __iommu_group_set_domain(struct iommu_group *group,
+ struct iommu_domain *new_domain)
{
int ret;
- if (!group->default_domain) {
- __iommu_group_for_each_dev(group, domain,
+ if (group->domain == new_domain)
+ return 0;
+
+ /*
+ * New drivers should support default domains and so the detach_dev() op
+ * will never be called. Otherwise the NULL domain represents some
+ * platform specific behavior.
+ */
+ if (!new_domain) {
+ if (WARN_ON(!group->domain->ops->detach_dev))
+ return -EINVAL;
+ __iommu_group_for_each_dev(group, group->domain,
iommu_group_do_detach_device);
group->domain = NULL;
- return;
+ return 0;
}
- if (group->domain == group->default_domain)
- return;
-
- /* Detach by re-attaching to the default domain */
- ret = __iommu_group_for_each_dev(group, group->default_domain,
+ /*
+ * Changing the domain is done by calling attach_dev() on the new
+ * domain. This switch does not have to be atomic and DMA can be
+ * discarded during the transition. DMA must only be able to access
+ * either new_domain or group->domain, never something else.
+ *
+ * Note that this is called in error unwind paths, attaching to a
+ * domain that has already been attached cannot fail.
+ */
+ ret = __iommu_group_for_each_dev(group, new_domain,
iommu_group_do_attach_device);
- if (ret != 0)
- WARN_ON(1);
- else
- group->domain = group->default_domain;
+ if (ret)
+ return ret;
+ group->domain = new_domain;
+ return 0;
}
void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group)
{
mutex_lock(&group->mutex);
- __iommu_detach_group(domain, group);
+ __iommu_group_set_core_domain(group);
mutex_unlock(&group->mutex);
}
EXPORT_SYMBOL_GPL(iommu_detach_group);
@@ -3102,3 +3088,167 @@ out:
return ret;
}
+
+/**
+ * iommu_device_use_default_domain() - Device driver wants to handle device
+ * DMA through the kernel DMA API.
+ * @dev: The device.
+ *
+ * The device driver about to bind @dev wants to do DMA through the kernel
+ * DMA API. Return 0 if it is allowed, otherwise an error.
+ */
+int iommu_device_use_default_domain(struct device *dev)
+{
+ struct iommu_group *group = iommu_group_get(dev);
+ int ret = 0;
+
+ if (!group)
+ return 0;
+
+ mutex_lock(&group->mutex);
+ if (group->owner_cnt) {
+ if (group->domain != group->default_domain ||
+ group->owner) {
+ ret = -EBUSY;
+ goto unlock_out;
+ }
+ }
+
+ group->owner_cnt++;
+
+unlock_out:
+ mutex_unlock(&group->mutex);
+ iommu_group_put(group);
+
+ return ret;
+}
+
+/**
+ * iommu_device_unuse_default_domain() - Device driver stops handling device
+ * DMA through the kernel DMA API.
+ * @dev: The device.
+ *
+ * The device driver doesn't want to do DMA through kernel DMA API anymore.
+ * It must be called after iommu_device_use_default_domain().
+ */
+void iommu_device_unuse_default_domain(struct device *dev)
+{
+ struct iommu_group *group = iommu_group_get(dev);
+
+ if (!group)
+ return;
+
+ mutex_lock(&group->mutex);
+ if (!WARN_ON(!group->owner_cnt))
+ group->owner_cnt--;
+
+ mutex_unlock(&group->mutex);
+ iommu_group_put(group);
+}
+
+static int __iommu_group_alloc_blocking_domain(struct iommu_group *group)
+{
+ struct group_device *dev =
+ list_first_entry(&group->devices, struct group_device, list);
+
+ if (group->blocking_domain)
+ return 0;
+
+ group->blocking_domain =
+ __iommu_domain_alloc(dev->dev->bus, IOMMU_DOMAIN_BLOCKED);
+ if (!group->blocking_domain) {
+ /*
+ * For drivers that do not yet understand IOMMU_DOMAIN_BLOCKED
+ * create an empty domain instead.
+ */
+ group->blocking_domain = __iommu_domain_alloc(
+ dev->dev->bus, IOMMU_DOMAIN_UNMANAGED);
+ if (!group->blocking_domain)
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * iommu_group_claim_dma_owner() - Set DMA ownership of a group
+ * @group: The group.
+ * @owner: Caller specified pointer. Used for exclusive ownership.
+ *
+ * This is to support backward compatibility for vfio which manages
+ * the dma ownership in iommu_group level. New invocations on this
+ * interface should be prohibited.
+ */
+int iommu_group_claim_dma_owner(struct iommu_group *group, void *owner)
+{
+ int ret = 0;
+
+ mutex_lock(&group->mutex);
+ if (group->owner_cnt) {
+ ret = -EPERM;
+ goto unlock_out;
+ } else {
+ if (group->domain && group->domain != group->default_domain) {
+ ret = -EBUSY;
+ goto unlock_out;
+ }
+
+ ret = __iommu_group_alloc_blocking_domain(group);
+ if (ret)
+ goto unlock_out;
+
+ ret = __iommu_group_set_domain(group, group->blocking_domain);
+ if (ret)
+ goto unlock_out;
+ group->owner = owner;
+ }
+
+ group->owner_cnt++;
+unlock_out:
+ mutex_unlock(&group->mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iommu_group_claim_dma_owner);
+
+/**
+ * iommu_group_release_dma_owner() - Release DMA ownership of a group
+ * @group: The group.
+ *
+ * Release the DMA ownership claimed by iommu_group_claim_dma_owner().
+ */
+void iommu_group_release_dma_owner(struct iommu_group *group)
+{
+ int ret;
+
+ mutex_lock(&group->mutex);
+ if (WARN_ON(!group->owner_cnt || !group->owner))
+ goto unlock_out;
+
+ group->owner_cnt = 0;
+ group->owner = NULL;
+ ret = __iommu_group_set_domain(group, group->default_domain);
+ WARN(ret, "iommu driver failed to attach the default domain");
+
+unlock_out:
+ mutex_unlock(&group->mutex);
+}
+EXPORT_SYMBOL_GPL(iommu_group_release_dma_owner);
+
+/**
+ * iommu_group_dma_owner_claimed() - Query group dma ownership status
+ * @group: The group.
+ *
+ * This provides status query on a given group. It is racy and only for
+ * non-binding status reporting.
+ */
+bool iommu_group_dma_owner_claimed(struct iommu_group *group)
+{
+ unsigned int user;
+
+ mutex_lock(&group->mutex);
+ user = group->owner_cnt;
+ mutex_unlock(&group->mutex);
+
+ return user;
+}
+EXPORT_SYMBOL_GPL(iommu_group_dma_owner_claimed);
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index 50f57624610f..f09aedfdd462 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -583,7 +583,7 @@ static void print_ctx_regs(void __iomem *base, int ctx)
GET_SCTLR(base, ctx), GET_ACTLR(base, ctx));
}
-static void insert_iommu_master(struct device *dev,
+static int insert_iommu_master(struct device *dev,
struct msm_iommu_dev **iommu,
struct of_phandle_args *spec)
{
@@ -592,6 +592,10 @@ static void insert_iommu_master(struct device *dev,
if (list_empty(&(*iommu)->ctx_list)) {
master = kzalloc(sizeof(*master), GFP_ATOMIC);
+ if (!master) {
+ dev_err(dev, "Failed to allocate iommu_master\n");
+ return -ENOMEM;
+ }
master->of_node = dev->of_node;
list_add(&master->list, &(*iommu)->ctx_list);
dev_iommu_priv_set(dev, master);
@@ -601,30 +605,34 @@ static void insert_iommu_master(struct device *dev,
if (master->mids[sid] == spec->args[0]) {
dev_warn(dev, "Stream ID 0x%hx repeated; ignoring\n",
sid);
- return;
+ return 0;
}
master->mids[master->num_mids++] = spec->args[0];
+ return 0;
}
static int qcom_iommu_of_xlate(struct device *dev,
struct of_phandle_args *spec)
{
- struct msm_iommu_dev *iommu;
+ struct msm_iommu_dev *iommu = NULL, *iter;
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&msm_iommu_lock, flags);
- list_for_each_entry(iommu, &qcom_iommu_devices, dev_node)
- if (iommu->dev->of_node == spec->np)
+ list_for_each_entry(iter, &qcom_iommu_devices, dev_node) {
+ if (iter->dev->of_node == spec->np) {
+ iommu = iter;
break;
+ }
+ }
- if (!iommu || iommu->dev->of_node != spec->np) {
+ if (!iommu) {
ret = -ENODEV;
goto fail;
}
- insert_iommu_master(dev, &iommu, spec);
+ ret = insert_iommu_master(dev, &iommu, spec);
fail:
spin_unlock_irqrestore(&msm_iommu_lock, flags);
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index 6fd75a60abd6..bb9dd92c9898 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -14,12 +14,14 @@
#include <linux/io.h>
#include <linux/iommu.h>
#include <linux/iopoll.h>
+#include <linux/io-pgtable.h>
#include <linux/list.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
+#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
@@ -29,7 +31,7 @@
#include <asm/barrier.h>
#include <soc/mediatek/smi.h>
-#include "mtk_iommu.h"
+#include <dt-bindings/memory/mtk-memory-port.h>
#define REG_MMU_PT_BASE_ADDR 0x000
#define MMU_PT_ADDR_MASK GENMASK(31, 7)
@@ -51,6 +53,8 @@
#define F_MMU_STANDARD_AXI_MODE_MASK (BIT(3) | BIT(19))
#define REG_MMU_DCM_DIS 0x050
+#define F_MMU_DCM BIT(8)
+
#define REG_MMU_WR_LEN_CTRL 0x054
#define F_MMU_WR_THROT_DIS_MASK (BIT(5) | BIT(21))
@@ -103,10 +107,15 @@
#define REG_MMU1_INT_ID 0x154
#define F_MMU_INT_ID_COMM_ID(a) (((a) >> 9) & 0x7)
#define F_MMU_INT_ID_SUB_COMM_ID(a) (((a) >> 7) & 0x3)
+#define F_MMU_INT_ID_COMM_ID_EXT(a) (((a) >> 10) & 0x7)
+#define F_MMU_INT_ID_SUB_COMM_ID_EXT(a) (((a) >> 7) & 0x7)
#define F_MMU_INT_ID_LARB_ID(a) (((a) >> 7) & 0x7)
#define F_MMU_INT_ID_PORT_ID(a) (((a) >> 2) & 0x1f)
#define MTK_PROTECT_PA_ALIGN 256
+#define MTK_IOMMU_BANK_SZ 0x1000
+
+#define PERICFG_IOMMU_1 0x714
#define HAS_4GB_MODE BIT(0)
/* HW will use the EMI clock if there isn't the "bclk". */
@@ -114,25 +123,147 @@
#define HAS_VLD_PA_RNG BIT(2)
#define RESET_AXI BIT(3)
#define OUT_ORDER_WR_EN BIT(4)
-#define HAS_SUB_COMM BIT(5)
-#define WR_THROT_EN BIT(6)
-#define HAS_LEGACY_IVRP_PADDR BIT(7)
-#define IOVA_34_EN BIT(8)
+#define HAS_SUB_COMM_2BITS BIT(5)
+#define HAS_SUB_COMM_3BITS BIT(6)
+#define WR_THROT_EN BIT(7)
+#define HAS_LEGACY_IVRP_PADDR BIT(8)
+#define IOVA_34_EN BIT(9)
+#define SHARE_PGTABLE BIT(10) /* 2 HW share pgtable */
+#define DCM_DISABLE BIT(11)
+#define STD_AXI_MODE BIT(12) /* For non MM iommu */
+/* 2 bits: iommu type */
+#define MTK_IOMMU_TYPE_MM (0x0 << 13)
+#define MTK_IOMMU_TYPE_INFRA (0x1 << 13)
+#define MTK_IOMMU_TYPE_MASK (0x3 << 13)
+/* PM and clock always on. e.g. infra iommu */
+#define PM_CLK_AO BIT(15)
+#define IFA_IOMMU_PCIE_SUPPORT BIT(16)
+
+#define MTK_IOMMU_HAS_FLAG_MASK(pdata, _x, mask) \
+ ((((pdata)->flags) & (mask)) == (_x))
+
+#define MTK_IOMMU_HAS_FLAG(pdata, _x) MTK_IOMMU_HAS_FLAG_MASK(pdata, _x, _x)
+#define MTK_IOMMU_IS_TYPE(pdata, _x) MTK_IOMMU_HAS_FLAG_MASK(pdata, _x,\
+ MTK_IOMMU_TYPE_MASK)
+
+#define MTK_INVALID_LARBID MTK_LARB_NR_MAX
+
+#define MTK_LARB_COM_MAX 8
+#define MTK_LARB_SUBCOM_MAX 8
+
+#define MTK_IOMMU_GROUP_MAX 8
+#define MTK_IOMMU_BANK_MAX 5
+
+enum mtk_iommu_plat {
+ M4U_MT2712,
+ M4U_MT6779,
+ M4U_MT8167,
+ M4U_MT8173,
+ M4U_MT8183,
+ M4U_MT8186,
+ M4U_MT8192,
+ M4U_MT8195,
+};
+
+struct mtk_iommu_iova_region {
+ dma_addr_t iova_base;
+ unsigned long long size;
+};
+
+struct mtk_iommu_suspend_reg {
+ u32 misc_ctrl;
+ u32 dcm_dis;
+ u32 ctrl_reg;
+ u32 vld_pa_rng;
+ u32 wr_len_ctrl;
+
+ u32 int_control[MTK_IOMMU_BANK_MAX];
+ u32 int_main_control[MTK_IOMMU_BANK_MAX];
+ u32 ivrp_paddr[MTK_IOMMU_BANK_MAX];
+};
+
+struct mtk_iommu_plat_data {
+ enum mtk_iommu_plat m4u_plat;
+ u32 flags;
+ u32 inv_sel_reg;
+
+ char *pericfg_comp_str;
+ struct list_head *hw_list;
+ unsigned int iova_region_nr;
+ const struct mtk_iommu_iova_region *iova_region;
+
+ u8 banks_num;
+ bool banks_enable[MTK_IOMMU_BANK_MAX];
+ unsigned int banks_portmsk[MTK_IOMMU_BANK_MAX];
+ unsigned char larbid_remap[MTK_LARB_COM_MAX][MTK_LARB_SUBCOM_MAX];
+};
+
+struct mtk_iommu_bank_data {
+ void __iomem *base;
+ int irq;
+ u8 id;
+ struct device *parent_dev;
+ struct mtk_iommu_data *parent_data;
+ spinlock_t tlb_lock; /* lock for tlb range flush */
+ struct mtk_iommu_domain *m4u_dom; /* Each bank has a domain */
+};
+
+struct mtk_iommu_data {
+ struct device *dev;
+ struct clk *bclk;
+ phys_addr_t protect_base; /* protect memory base */
+ struct mtk_iommu_suspend_reg reg;
+ struct iommu_group *m4u_group[MTK_IOMMU_GROUP_MAX];
+ bool enable_4GB;
-#define MTK_IOMMU_HAS_FLAG(pdata, _x) \
- ((((pdata)->flags) & (_x)) == (_x))
+ struct iommu_device iommu;
+ const struct mtk_iommu_plat_data *plat_data;
+ struct device *smicomm_dev;
+
+ struct mtk_iommu_bank_data *bank;
+
+ struct dma_iommu_mapping *mapping; /* For mtk_iommu_v1.c */
+ struct regmap *pericfg;
+
+ struct mutex mutex; /* Protect m4u_group/m4u_dom above */
+
+ /*
+ * In the sharing pgtable case, list data->list to the global list like m4ulist.
+ * In the non-sharing pgtable case, list data->list to the itself hw_list_head.
+ */
+ struct list_head *hw_list;
+ struct list_head hw_list_head;
+ struct list_head list;
+ struct mtk_smi_larb_iommu larb_imu[MTK_LARB_NR_MAX];
+};
struct mtk_iommu_domain {
struct io_pgtable_cfg cfg;
struct io_pgtable_ops *iop;
- struct mtk_iommu_data *data;
+ struct mtk_iommu_bank_data *bank;
struct iommu_domain domain;
+
+ struct mutex mutex; /* Protect "data" in this structure */
};
+static int mtk_iommu_bind(struct device *dev)
+{
+ struct mtk_iommu_data *data = dev_get_drvdata(dev);
+
+ return component_bind_all(dev, &data->larb_imu);
+}
+
+static void mtk_iommu_unbind(struct device *dev)
+{
+ struct mtk_iommu_data *data = dev_get_drvdata(dev);
+
+ component_unbind_all(dev, &data->larb_imu);
+}
+
static const struct iommu_ops mtk_iommu_ops;
-static int mtk_iommu_hw_init(const struct mtk_iommu_data *data);
+static int mtk_iommu_hw_init(const struct mtk_iommu_data *data, unsigned int bankid);
#define MTK_IOMMU_TLB_ADDR(iova) ({ \
dma_addr_t _addr = iova; \
@@ -165,42 +296,28 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data);
static LIST_HEAD(m4ulist); /* List all the M4U HWs */
-#define for_each_m4u(data) list_for_each_entry(data, &m4ulist, list)
-
-struct mtk_iommu_iova_region {
- dma_addr_t iova_base;
- unsigned long long size;
-};
+#define for_each_m4u(data, head) list_for_each_entry(data, head, list)
static const struct mtk_iommu_iova_region single_domain[] = {
{.iova_base = 0, .size = SZ_4G},
};
static const struct mtk_iommu_iova_region mt8192_multi_dom[] = {
- { .iova_base = 0x0, .size = SZ_4G}, /* disp: 0 ~ 4G */
+ { .iova_base = 0x0, .size = SZ_4G}, /* 0 ~ 4G */
#if IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT)
- { .iova_base = SZ_4G, .size = SZ_4G}, /* vdec: 4G ~ 8G */
- { .iova_base = SZ_4G * 2, .size = SZ_4G}, /* CAM/MDP: 8G ~ 12G */
+ { .iova_base = SZ_4G, .size = SZ_4G}, /* 4G ~ 8G */
+ { .iova_base = SZ_4G * 2, .size = SZ_4G}, /* 8G ~ 12G */
+ { .iova_base = SZ_4G * 3, .size = SZ_4G}, /* 12G ~ 16G */
+
{ .iova_base = 0x240000000ULL, .size = 0x4000000}, /* CCU0 */
{ .iova_base = 0x244000000ULL, .size = 0x4000000}, /* CCU1 */
#endif
};
-/*
- * There may be 1 or 2 M4U HWs, But we always expect they are in the same domain
- * for the performance.
- *
- * Here always return the mtk_iommu_data of the first probed M4U where the
- * iommu domain information is recorded.
- */
-static struct mtk_iommu_data *mtk_iommu_get_m4u_data(void)
+/* If 2 M4U share a domain(use the same hwlist), Put the corresponding info in first data.*/
+static struct mtk_iommu_data *mtk_iommu_get_frst_data(struct list_head *hwlist)
{
- struct mtk_iommu_data *data;
-
- for_each_m4u(data)
- return data;
-
- return NULL;
+ return list_first_entry(hwlist, struct mtk_iommu_data, list);
}
static struct mtk_iommu_domain *to_mtk_domain(struct iommu_domain *dom)
@@ -210,46 +327,72 @@ static struct mtk_iommu_domain *to_mtk_domain(struct iommu_domain *dom)
static void mtk_iommu_tlb_flush_all(struct mtk_iommu_data *data)
{
+ /* Tlb flush all always is in bank0. */
+ struct mtk_iommu_bank_data *bank = &data->bank[0];
+ void __iomem *base = bank->base;
unsigned long flags;
- spin_lock_irqsave(&data->tlb_lock, flags);
- writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
- data->base + data->plat_data->inv_sel_reg);
- writel_relaxed(F_ALL_INVLD, data->base + REG_MMU_INVALIDATE);
+ spin_lock_irqsave(&bank->tlb_lock, flags);
+ writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0, base + data->plat_data->inv_sel_reg);
+ writel_relaxed(F_ALL_INVLD, base + REG_MMU_INVALIDATE);
wmb(); /* Make sure the tlb flush all done */
- spin_unlock_irqrestore(&data->tlb_lock, flags);
+ spin_unlock_irqrestore(&bank->tlb_lock, flags);
}
static void mtk_iommu_tlb_flush_range_sync(unsigned long iova, size_t size,
- size_t granule,
- struct mtk_iommu_data *data)
+ struct mtk_iommu_bank_data *bank)
{
+ struct list_head *head = bank->parent_data->hw_list;
+ struct mtk_iommu_bank_data *curbank;
+ struct mtk_iommu_data *data;
+ bool check_pm_status;
unsigned long flags;
+ void __iomem *base;
int ret;
u32 tmp;
- for_each_m4u(data) {
- if (pm_runtime_get_if_in_use(data->dev) <= 0)
- continue;
+ for_each_m4u(data, head) {
+ /*
+ * To avoid resume the iommu device frequently when the iommu device
+ * is not active, it doesn't always call pm_runtime_get here, then tlb
+ * flush depends on the tlb flush all in the runtime resume.
+ *
+ * There are 2 special cases:
+ *
+ * Case1: The iommu dev doesn't have power domain but has bclk. This case
+ * should also avoid the tlb flush while the dev is not active to mute
+ * the tlb timeout log. like mt8173.
+ *
+ * Case2: The power/clock of infra iommu is always on, and it doesn't
+ * have the device link with the master devices. This case should avoid
+ * the PM status check.
+ */
+ check_pm_status = !MTK_IOMMU_HAS_FLAG(data->plat_data, PM_CLK_AO);
- spin_lock_irqsave(&data->tlb_lock, flags);
+ if (check_pm_status) {
+ if (pm_runtime_get_if_in_use(data->dev) <= 0)
+ continue;
+ }
+
+ curbank = &data->bank[bank->id];
+ base = curbank->base;
+
+ spin_lock_irqsave(&curbank->tlb_lock, flags);
writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
- data->base + data->plat_data->inv_sel_reg);
+ base + data->plat_data->inv_sel_reg);
- writel_relaxed(MTK_IOMMU_TLB_ADDR(iova),
- data->base + REG_MMU_INVLD_START_A);
+ writel_relaxed(MTK_IOMMU_TLB_ADDR(iova), base + REG_MMU_INVLD_START_A);
writel_relaxed(MTK_IOMMU_TLB_ADDR(iova + size - 1),
- data->base + REG_MMU_INVLD_END_A);
- writel_relaxed(F_MMU_INV_RANGE,
- data->base + REG_MMU_INVALIDATE);
+ base + REG_MMU_INVLD_END_A);
+ writel_relaxed(F_MMU_INV_RANGE, base + REG_MMU_INVALIDATE);
/* tlb sync */
- ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE,
+ ret = readl_poll_timeout_atomic(base + REG_MMU_CPE_DONE,
tmp, tmp != 0, 10, 1000);
/* Clear the CPE status */
- writel_relaxed(0, data->base + REG_MMU_CPE_DONE);
- spin_unlock_irqrestore(&data->tlb_lock, flags);
+ writel_relaxed(0, base + REG_MMU_CPE_DONE);
+ spin_unlock_irqrestore(&curbank->tlb_lock, flags);
if (ret) {
dev_warn(data->dev,
@@ -257,70 +400,103 @@ static void mtk_iommu_tlb_flush_range_sync(unsigned long iova, size_t size,
mtk_iommu_tlb_flush_all(data);
}
- pm_runtime_put(data->dev);
+ if (check_pm_status)
+ pm_runtime_put(data->dev);
}
}
static irqreturn_t mtk_iommu_isr(int irq, void *dev_id)
{
- struct mtk_iommu_data *data = dev_id;
- struct mtk_iommu_domain *dom = data->m4u_dom;
- unsigned int fault_larb, fault_port, sub_comm = 0;
+ struct mtk_iommu_bank_data *bank = dev_id;
+ struct mtk_iommu_data *data = bank->parent_data;
+ struct mtk_iommu_domain *dom = bank->m4u_dom;
+ unsigned int fault_larb = MTK_INVALID_LARBID, fault_port = 0, sub_comm = 0;
u32 int_state, regval, va34_32, pa34_32;
+ const struct mtk_iommu_plat_data *plat_data = data->plat_data;
+ void __iomem *base = bank->base;
u64 fault_iova, fault_pa;
bool layer, write;
/* Read error info from registers */
- int_state = readl_relaxed(data->base + REG_MMU_FAULT_ST1);
+ int_state = readl_relaxed(base + REG_MMU_FAULT_ST1);
if (int_state & F_REG_MMU0_FAULT_MASK) {
- regval = readl_relaxed(data->base + REG_MMU0_INT_ID);
- fault_iova = readl_relaxed(data->base + REG_MMU0_FAULT_VA);
- fault_pa = readl_relaxed(data->base + REG_MMU0_INVLD_PA);
+ regval = readl_relaxed(base + REG_MMU0_INT_ID);
+ fault_iova = readl_relaxed(base + REG_MMU0_FAULT_VA);
+ fault_pa = readl_relaxed(base + REG_MMU0_INVLD_PA);
} else {
- regval = readl_relaxed(data->base + REG_MMU1_INT_ID);
- fault_iova = readl_relaxed(data->base + REG_MMU1_FAULT_VA);
- fault_pa = readl_relaxed(data->base + REG_MMU1_INVLD_PA);
+ regval = readl_relaxed(base + REG_MMU1_INT_ID);
+ fault_iova = readl_relaxed(base + REG_MMU1_FAULT_VA);
+ fault_pa = readl_relaxed(base + REG_MMU1_INVLD_PA);
}
layer = fault_iova & F_MMU_FAULT_VA_LAYER_BIT;
write = fault_iova & F_MMU_FAULT_VA_WRITE_BIT;
- if (MTK_IOMMU_HAS_FLAG(data->plat_data, IOVA_34_EN)) {
+ if (MTK_IOMMU_HAS_FLAG(plat_data, IOVA_34_EN)) {
va34_32 = FIELD_GET(F_MMU_INVAL_VA_34_32_MASK, fault_iova);
- pa34_32 = FIELD_GET(F_MMU_INVAL_PA_34_32_MASK, fault_iova);
fault_iova = fault_iova & F_MMU_INVAL_VA_31_12_MASK;
fault_iova |= (u64)va34_32 << 32;
- fault_pa |= (u64)pa34_32 << 32;
}
-
- fault_port = F_MMU_INT_ID_PORT_ID(regval);
- if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_SUB_COMM)) {
- fault_larb = F_MMU_INT_ID_COMM_ID(regval);
- sub_comm = F_MMU_INT_ID_SUB_COMM_ID(regval);
- } else {
- fault_larb = F_MMU_INT_ID_LARB_ID(regval);
+ pa34_32 = FIELD_GET(F_MMU_INVAL_PA_34_32_MASK, fault_iova);
+ fault_pa |= (u64)pa34_32 << 32;
+
+ if (MTK_IOMMU_IS_TYPE(plat_data, MTK_IOMMU_TYPE_MM)) {
+ fault_port = F_MMU_INT_ID_PORT_ID(regval);
+ if (MTK_IOMMU_HAS_FLAG(plat_data, HAS_SUB_COMM_2BITS)) {
+ fault_larb = F_MMU_INT_ID_COMM_ID(regval);
+ sub_comm = F_MMU_INT_ID_SUB_COMM_ID(regval);
+ } else if (MTK_IOMMU_HAS_FLAG(plat_data, HAS_SUB_COMM_3BITS)) {
+ fault_larb = F_MMU_INT_ID_COMM_ID_EXT(regval);
+ sub_comm = F_MMU_INT_ID_SUB_COMM_ID_EXT(regval);
+ } else {
+ fault_larb = F_MMU_INT_ID_LARB_ID(regval);
+ }
+ fault_larb = data->plat_data->larbid_remap[fault_larb][sub_comm];
}
- fault_larb = data->plat_data->larbid_remap[fault_larb][sub_comm];
- if (report_iommu_fault(&dom->domain, data->dev, fault_iova,
+ if (report_iommu_fault(&dom->domain, bank->parent_dev, fault_iova,
write ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ)) {
dev_err_ratelimited(
- data->dev,
- "fault type=0x%x iova=0x%llx pa=0x%llx larb=%d port=%d layer=%d %s\n",
- int_state, fault_iova, fault_pa, fault_larb, fault_port,
+ bank->parent_dev,
+ "fault type=0x%x iova=0x%llx pa=0x%llx master=0x%x(larb=%d port=%d) layer=%d %s\n",
+ int_state, fault_iova, fault_pa, regval, fault_larb, fault_port,
layer, write ? "write" : "read");
}
/* Interrupt clear */
- regval = readl_relaxed(data->base + REG_MMU_INT_CONTROL0);
+ regval = readl_relaxed(base + REG_MMU_INT_CONTROL0);
regval |= F_INT_CLR_BIT;
- writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL0);
+ writel_relaxed(regval, base + REG_MMU_INT_CONTROL0);
mtk_iommu_tlb_flush_all(data);
return IRQ_HANDLED;
}
-static int mtk_iommu_get_domain_id(struct device *dev,
- const struct mtk_iommu_plat_data *plat_data)
+static unsigned int mtk_iommu_get_bank_id(struct device *dev,
+ const struct mtk_iommu_plat_data *plat_data)
+{
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+ unsigned int i, portmsk = 0, bankid = 0;
+
+ if (plat_data->banks_num == 1)
+ return bankid;
+
+ for (i = 0; i < fwspec->num_ids; i++)
+ portmsk |= BIT(MTK_M4U_TO_PORT(fwspec->ids[i]));
+
+ for (i = 0; i < plat_data->banks_num && i < MTK_IOMMU_BANK_MAX; i++) {
+ if (!plat_data->banks_enable[i])
+ continue;
+
+ if (portmsk & plat_data->banks_portmsk[i]) {
+ bankid = i;
+ break;
+ }
+ }
+ return bankid; /* default is 0 */
+}
+
+static int mtk_iommu_get_iova_region_id(struct device *dev,
+ const struct mtk_iommu_plat_data *plat_data)
{
const struct mtk_iommu_iova_region *rgn = plat_data->iova_region;
const struct bus_dma_region *dma_rgn = dev->dma_range_map;
@@ -349,46 +525,65 @@ static int mtk_iommu_get_domain_id(struct device *dev,
return -EINVAL;
}
-static void mtk_iommu_config(struct mtk_iommu_data *data, struct device *dev,
- bool enable, unsigned int domid)
+static int mtk_iommu_config(struct mtk_iommu_data *data, struct device *dev,
+ bool enable, unsigned int regionid)
{
struct mtk_smi_larb_iommu *larb_mmu;
unsigned int larbid, portid;
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
const struct mtk_iommu_iova_region *region;
- int i;
+ u32 peri_mmuen, peri_mmuen_msk;
+ int i, ret = 0;
for (i = 0; i < fwspec->num_ids; ++i) {
larbid = MTK_M4U_TO_LARB(fwspec->ids[i]);
portid = MTK_M4U_TO_PORT(fwspec->ids[i]);
- larb_mmu = &data->larb_imu[larbid];
-
- region = data->plat_data->iova_region + domid;
- larb_mmu->bank[portid] = upper_32_bits(region->iova_base);
-
- dev_dbg(dev, "%s iommu for larb(%s) port %d dom %d bank %d.\n",
- enable ? "enable" : "disable", dev_name(larb_mmu->dev),
- portid, domid, larb_mmu->bank[portid]);
-
- if (enable)
- larb_mmu->mmu |= MTK_SMI_MMU_EN(portid);
- else
- larb_mmu->mmu &= ~MTK_SMI_MMU_EN(portid);
+ if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) {
+ larb_mmu = &data->larb_imu[larbid];
+
+ region = data->plat_data->iova_region + regionid;
+ larb_mmu->bank[portid] = upper_32_bits(region->iova_base);
+
+ dev_dbg(dev, "%s iommu for larb(%s) port %d region %d rgn-bank %d.\n",
+ enable ? "enable" : "disable", dev_name(larb_mmu->dev),
+ portid, regionid, larb_mmu->bank[portid]);
+
+ if (enable)
+ larb_mmu->mmu |= MTK_SMI_MMU_EN(portid);
+ else
+ larb_mmu->mmu &= ~MTK_SMI_MMU_EN(portid);
+ } else if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_INFRA)) {
+ peri_mmuen_msk = BIT(portid);
+ /* PCI dev has only one output id, enable the next writing bit for PCIe */
+ if (dev_is_pci(dev))
+ peri_mmuen_msk |= BIT(portid + 1);
+
+ peri_mmuen = enable ? peri_mmuen_msk : 0;
+ ret = regmap_update_bits(data->pericfg, PERICFG_IOMMU_1,
+ peri_mmuen_msk, peri_mmuen);
+ if (ret)
+ dev_err(dev, "%s iommu(%s) inframaster 0x%x fail(%d).\n",
+ enable ? "enable" : "disable",
+ dev_name(data->dev), peri_mmuen_msk, ret);
+ }
}
+ return ret;
}
static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom,
struct mtk_iommu_data *data,
- unsigned int domid)
+ unsigned int region_id)
{
const struct mtk_iommu_iova_region *region;
-
- /* Use the exist domain as there is only one pgtable here. */
- if (data->m4u_dom) {
- dom->iop = data->m4u_dom->iop;
- dom->cfg = data->m4u_dom->cfg;
- dom->domain.pgsize_bitmap = data->m4u_dom->cfg.pgsize_bitmap;
+ struct mtk_iommu_domain *m4u_dom;
+
+ /* Always use bank0 in sharing pgtable case */
+ m4u_dom = data->bank[0].m4u_dom;
+ if (m4u_dom) {
+ dom->iop = m4u_dom->iop;
+ dom->cfg = m4u_dom->cfg;
+ dom->domain.pgsize_bitmap = m4u_dom->cfg.pgsize_bitmap;
goto update_iova_region;
}
@@ -417,7 +612,7 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom,
update_iova_region:
/* Update the iova region for this domain */
- region = data->plat_data->iova_region + domid;
+ region = data->plat_data->iova_region + region_id;
dom->domain.geometry.aperture_start = region->iova_base;
dom->domain.geometry.aperture_end = region->iova_base + region->size - 1;
dom->domain.geometry.force_aperture = true;
@@ -428,12 +623,13 @@ static struct iommu_domain *mtk_iommu_domain_alloc(unsigned type)
{
struct mtk_iommu_domain *dom;
- if (type != IOMMU_DOMAIN_DMA)
+ if (type != IOMMU_DOMAIN_DMA && type != IOMMU_DOMAIN_UNMANAGED)
return NULL;
dom = kzalloc(sizeof(*dom), GFP_KERNEL);
if (!dom)
return NULL;
+ mutex_init(&dom->mutex);
return &dom->domain;
}
@@ -446,40 +642,60 @@ static void mtk_iommu_domain_free(struct iommu_domain *domain)
static int mtk_iommu_attach_device(struct iommu_domain *domain,
struct device *dev)
{
- struct mtk_iommu_data *data = dev_iommu_priv_get(dev);
+ struct mtk_iommu_data *data = dev_iommu_priv_get(dev), *frstdata;
struct mtk_iommu_domain *dom = to_mtk_domain(domain);
+ struct list_head *hw_list = data->hw_list;
struct device *m4udev = data->dev;
- int ret, domid;
+ struct mtk_iommu_bank_data *bank;
+ unsigned int bankid;
+ int ret, region_id;
- domid = mtk_iommu_get_domain_id(dev, data->plat_data);
- if (domid < 0)
- return domid;
+ region_id = mtk_iommu_get_iova_region_id(dev, data->plat_data);
+ if (region_id < 0)
+ return region_id;
- if (!dom->data) {
- if (mtk_iommu_domain_finalise(dom, data, domid))
+ bankid = mtk_iommu_get_bank_id(dev, data->plat_data);
+ mutex_lock(&dom->mutex);
+ if (!dom->bank) {
+ /* Data is in the frstdata in sharing pgtable case. */
+ frstdata = mtk_iommu_get_frst_data(hw_list);
+
+ ret = mtk_iommu_domain_finalise(dom, frstdata, region_id);
+ if (ret) {
+ mutex_unlock(&dom->mutex);
return -ENODEV;
- dom->data = data;
+ }
+ dom->bank = &data->bank[bankid];
}
+ mutex_unlock(&dom->mutex);
- if (!data->m4u_dom) { /* Initialize the M4U HW */
+ mutex_lock(&data->mutex);
+ bank = &data->bank[bankid];
+ if (!bank->m4u_dom) { /* Initialize the M4U HW for each a BANK */
ret = pm_runtime_resume_and_get(m4udev);
- if (ret < 0)
- return ret;
+ if (ret < 0) {
+ dev_err(m4udev, "pm get fail(%d) in attach.\n", ret);
+ goto err_unlock;
+ }
- ret = mtk_iommu_hw_init(data);
+ ret = mtk_iommu_hw_init(data, bankid);
if (ret) {
pm_runtime_put(m4udev);
- return ret;
+ goto err_unlock;
}
- data->m4u_dom = dom;
+ bank->m4u_dom = dom;
writel(dom->cfg.arm_v7s_cfg.ttbr & MMU_PT_ADDR_MASK,
- data->base + REG_MMU_PT_BASE_ADDR);
+ bank->base + REG_MMU_PT_BASE_ADDR);
pm_runtime_put(m4udev);
}
+ mutex_unlock(&data->mutex);
- mtk_iommu_config(data, dev, true, domid);
- return 0;
+ return mtk_iommu_config(data, dev, true, region_id);
+
+err_unlock:
+ mutex_unlock(&data->mutex);
+ return ret;
}
static void mtk_iommu_detach_device(struct iommu_domain *domain,
@@ -496,7 +712,7 @@ static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova,
struct mtk_iommu_domain *dom = to_mtk_domain(domain);
/* The "4GB mode" M4U physically can not use the lower remap of Dram. */
- if (dom->data->enable_4GB)
+ if (dom->bank->parent_data->enable_4GB)
paddr |= BIT_ULL(32);
/* Synchronize with the tlb_lock */
@@ -517,7 +733,7 @@ static void mtk_iommu_flush_iotlb_all(struct iommu_domain *domain)
{
struct mtk_iommu_domain *dom = to_mtk_domain(domain);
- mtk_iommu_tlb_flush_all(dom->data);
+ mtk_iommu_tlb_flush_all(dom->bank->parent_data);
}
static void mtk_iommu_iotlb_sync(struct iommu_domain *domain,
@@ -526,8 +742,7 @@ static void mtk_iommu_iotlb_sync(struct iommu_domain *domain,
struct mtk_iommu_domain *dom = to_mtk_domain(domain);
size_t length = gather->end - gather->start + 1;
- mtk_iommu_tlb_flush_range_sync(gather->start, length, gather->pgsize,
- dom->data);
+ mtk_iommu_tlb_flush_range_sync(gather->start, length, dom->bank);
}
static void mtk_iommu_sync_map(struct iommu_domain *domain, unsigned long iova,
@@ -535,7 +750,7 @@ static void mtk_iommu_sync_map(struct iommu_domain *domain, unsigned long iova,
{
struct mtk_iommu_domain *dom = to_mtk_domain(domain);
- mtk_iommu_tlb_flush_range_sync(iova, size, size, dom->data);
+ mtk_iommu_tlb_flush_range_sync(iova, size, dom->bank);
}
static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
@@ -546,7 +761,7 @@ static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
pa = dom->iop->iova_to_phys(dom->iop, iova);
if (IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT) &&
- dom->data->enable_4GB &&
+ dom->bank->parent_data->enable_4GB &&
pa >= MTK_IOMMU_4GB_MODE_REMAP_BASE)
pa &= ~BIT_ULL(32);
@@ -566,12 +781,18 @@ static struct iommu_device *mtk_iommu_probe_device(struct device *dev)
data = dev_iommu_priv_get(dev);
+ if (!MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM))
+ return &data->iommu;
+
/*
* Link the consumer device with the smi-larb device(supplier).
* The device that connects with each a larb is a independent HW.
* All the ports in each a device should be in the same larbs.
*/
larbid = MTK_M4U_TO_LARB(fwspec->ids[0]);
+ if (larbid >= MTK_LARB_NR_MAX)
+ return ERR_PTR(-EINVAL);
+
for (i = 1; i < fwspec->num_ids; i++) {
larbidx = MTK_M4U_TO_LARB(fwspec->ids[i]);
if (larbid != larbidx) {
@@ -581,6 +802,9 @@ static struct iommu_device *mtk_iommu_probe_device(struct device *dev)
}
}
larbdev = data->larb_imu[larbid].dev;
+ if (!larbdev)
+ return ERR_PTR(-EINVAL);
+
link = device_link_add(dev, larbdev,
DL_FLAG_PM_RUNTIME | DL_FLAG_STATELESS);
if (!link)
@@ -599,34 +823,55 @@ static void mtk_iommu_release_device(struct device *dev)
return;
data = dev_iommu_priv_get(dev);
- larbid = MTK_M4U_TO_LARB(fwspec->ids[0]);
- larbdev = data->larb_imu[larbid].dev;
- device_link_remove(dev, larbdev);
+ if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) {
+ larbid = MTK_M4U_TO_LARB(fwspec->ids[0]);
+ larbdev = data->larb_imu[larbid].dev;
+ device_link_remove(dev, larbdev);
+ }
iommu_fwspec_free(dev);
}
+static int mtk_iommu_get_group_id(struct device *dev, const struct mtk_iommu_plat_data *plat_data)
+{
+ unsigned int bankid;
+
+ /*
+ * If the bank function is enabled, each bank is a iommu group/domain.
+ * Otherwise, each iova region is a iommu group/domain.
+ */
+ bankid = mtk_iommu_get_bank_id(dev, plat_data);
+ if (bankid)
+ return bankid;
+
+ return mtk_iommu_get_iova_region_id(dev, plat_data);
+}
+
static struct iommu_group *mtk_iommu_device_group(struct device *dev)
{
- struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
+ struct mtk_iommu_data *c_data = dev_iommu_priv_get(dev), *data;
+ struct list_head *hw_list = c_data->hw_list;
struct iommu_group *group;
- int domid;
+ int groupid;
+ data = mtk_iommu_get_frst_data(hw_list);
if (!data)
return ERR_PTR(-ENODEV);
- domid = mtk_iommu_get_domain_id(dev, data->plat_data);
- if (domid < 0)
- return ERR_PTR(domid);
+ groupid = mtk_iommu_get_group_id(dev, data->plat_data);
+ if (groupid < 0)
+ return ERR_PTR(groupid);
- group = data->m4u_group[domid];
+ mutex_lock(&data->mutex);
+ group = data->m4u_group[groupid];
if (!group) {
group = iommu_group_alloc();
if (!IS_ERR(group))
- data->m4u_group[domid] = group;
+ data->m4u_group[groupid] = group;
} else {
iommu_group_ref_get(group);
}
+ mutex_unlock(&data->mutex);
return group;
}
@@ -656,14 +901,14 @@ static void mtk_iommu_get_resv_regions(struct device *dev,
struct list_head *head)
{
struct mtk_iommu_data *data = dev_iommu_priv_get(dev);
- unsigned int domid = mtk_iommu_get_domain_id(dev, data->plat_data), i;
+ unsigned int regionid = mtk_iommu_get_iova_region_id(dev, data->plat_data), i;
const struct mtk_iommu_iova_region *resv, *curdom;
struct iommu_resv_region *region;
int prot = IOMMU_WRITE | IOMMU_READ;
- if ((int)domid < 0)
+ if ((int)regionid < 0)
return;
- curdom = data->plat_data->iova_region + domid;
+ curdom = data->plat_data->iova_region + regionid;
for (i = 0; i < data->plat_data->iova_region_nr; i++) {
resv = data->plat_data->iova_region + i;
@@ -704,42 +949,24 @@ static const struct iommu_ops mtk_iommu_ops = {
}
};
-static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
+static int mtk_iommu_hw_init(const struct mtk_iommu_data *data, unsigned int bankid)
{
+ const struct mtk_iommu_bank_data *bankx = &data->bank[bankid];
+ const struct mtk_iommu_bank_data *bank0 = &data->bank[0];
u32 regval;
+ /*
+ * Global control settings are in bank0. May re-init these global registers
+ * since no sure if there is bank0 consumers.
+ */
if (data->plat_data->m4u_plat == M4U_MT8173) {
regval = F_MMU_PREFETCH_RT_REPLACE_MOD |
F_MMU_TF_PROT_TO_PROGRAM_ADDR_MT8173;
} else {
- regval = readl_relaxed(data->base + REG_MMU_CTRL_REG);
+ regval = readl_relaxed(bank0->base + REG_MMU_CTRL_REG);
regval |= F_MMU_TF_PROT_TO_PROGRAM_ADDR;
}
- writel_relaxed(regval, data->base + REG_MMU_CTRL_REG);
-
- regval = F_L2_MULIT_HIT_EN |
- F_TABLE_WALK_FAULT_INT_EN |
- F_PREETCH_FIFO_OVERFLOW_INT_EN |
- F_MISS_FIFO_OVERFLOW_INT_EN |
- F_PREFETCH_FIFO_ERR_INT_EN |
- F_MISS_FIFO_ERR_INT_EN;
- writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL0);
-
- regval = F_INT_TRANSLATION_FAULT |
- F_INT_MAIN_MULTI_HIT_FAULT |
- F_INT_INVALID_PA_FAULT |
- F_INT_ENTRY_REPLACEMENT_FAULT |
- F_INT_TLB_MISS_FAULT |
- F_INT_MISS_TRANSACTION_FIFO_FAULT |
- F_INT_PRETETCH_TRANSATION_FIFO_FAULT;
- writel_relaxed(regval, data->base + REG_MMU_INT_MAIN_CONTROL);
-
- if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_LEGACY_IVRP_PADDR))
- regval = (data->protect_base >> 1) | (data->enable_4GB << 31);
- else
- regval = lower_32_bits(data->protect_base) |
- upper_32_bits(data->protect_base);
- writel_relaxed(regval, data->base + REG_MMU_IVRP_PADDR);
+ writel_relaxed(regval, bank0->base + REG_MMU_CTRL_REG);
if (data->enable_4GB &&
MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_VLD_PA_RNG)) {
@@ -748,31 +975,61 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
* 0x1_0000_0000 to 0x1_ffff_ffff. here record bit[32:30].
*/
regval = F_MMU_VLD_PA_RNG(7, 4);
- writel_relaxed(regval, data->base + REG_MMU_VLD_PA_RNG);
+ writel_relaxed(regval, bank0->base + REG_MMU_VLD_PA_RNG);
}
- writel_relaxed(0, data->base + REG_MMU_DCM_DIS);
+ if (MTK_IOMMU_HAS_FLAG(data->plat_data, DCM_DISABLE))
+ writel_relaxed(F_MMU_DCM, bank0->base + REG_MMU_DCM_DIS);
+ else
+ writel_relaxed(0, bank0->base + REG_MMU_DCM_DIS);
+
if (MTK_IOMMU_HAS_FLAG(data->plat_data, WR_THROT_EN)) {
/* write command throttling mode */
- regval = readl_relaxed(data->base + REG_MMU_WR_LEN_CTRL);
+ regval = readl_relaxed(bank0->base + REG_MMU_WR_LEN_CTRL);
regval &= ~F_MMU_WR_THROT_DIS_MASK;
- writel_relaxed(regval, data->base + REG_MMU_WR_LEN_CTRL);
+ writel_relaxed(regval, bank0->base + REG_MMU_WR_LEN_CTRL);
}
if (MTK_IOMMU_HAS_FLAG(data->plat_data, RESET_AXI)) {
/* The register is called STANDARD_AXI_MODE in this case */
regval = 0;
} else {
- regval = readl_relaxed(data->base + REG_MMU_MISC_CTRL);
- regval &= ~F_MMU_STANDARD_AXI_MODE_MASK;
+ regval = readl_relaxed(bank0->base + REG_MMU_MISC_CTRL);
+ if (!MTK_IOMMU_HAS_FLAG(data->plat_data, STD_AXI_MODE))
+ regval &= ~F_MMU_STANDARD_AXI_MODE_MASK;
if (MTK_IOMMU_HAS_FLAG(data->plat_data, OUT_ORDER_WR_EN))
regval &= ~F_MMU_IN_ORDER_WR_EN_MASK;
}
- writel_relaxed(regval, data->base + REG_MMU_MISC_CTRL);
+ writel_relaxed(regval, bank0->base + REG_MMU_MISC_CTRL);
- if (devm_request_irq(data->dev, data->irq, mtk_iommu_isr, 0,
- dev_name(data->dev), (void *)data)) {
- writel_relaxed(0, data->base + REG_MMU_PT_BASE_ADDR);
- dev_err(data->dev, "Failed @ IRQ-%d Request\n", data->irq);
+ /* Independent settings for each bank */
+ regval = F_L2_MULIT_HIT_EN |
+ F_TABLE_WALK_FAULT_INT_EN |
+ F_PREETCH_FIFO_OVERFLOW_INT_EN |
+ F_MISS_FIFO_OVERFLOW_INT_EN |
+ F_PREFETCH_FIFO_ERR_INT_EN |
+ F_MISS_FIFO_ERR_INT_EN;
+ writel_relaxed(regval, bankx->base + REG_MMU_INT_CONTROL0);
+
+ regval = F_INT_TRANSLATION_FAULT |
+ F_INT_MAIN_MULTI_HIT_FAULT |
+ F_INT_INVALID_PA_FAULT |
+ F_INT_ENTRY_REPLACEMENT_FAULT |
+ F_INT_TLB_MISS_FAULT |
+ F_INT_MISS_TRANSACTION_FIFO_FAULT |
+ F_INT_PRETETCH_TRANSATION_FIFO_FAULT;
+ writel_relaxed(regval, bankx->base + REG_MMU_INT_MAIN_CONTROL);
+
+ if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_LEGACY_IVRP_PADDR))
+ regval = (data->protect_base >> 1) | (data->enable_4GB << 31);
+ else
+ regval = lower_32_bits(data->protect_base) |
+ upper_32_bits(data->protect_base);
+ writel_relaxed(regval, bankx->base + REG_MMU_IVRP_PADDR);
+
+ if (devm_request_irq(bankx->parent_dev, bankx->irq, mtk_iommu_isr, 0,
+ dev_name(bankx->parent_dev), (void *)bankx)) {
+ writel_relaxed(0, bankx->base + REG_MMU_PT_BASE_ADDR);
+ dev_err(bankx->parent_dev, "Failed @ IRQ-%d Request\n", bankx->irq);
return -ENODEV;
}
@@ -784,21 +1041,91 @@ static const struct component_master_ops mtk_iommu_com_ops = {
.unbind = mtk_iommu_unbind,
};
+static int mtk_iommu_mm_dts_parse(struct device *dev, struct component_match **match,
+ struct mtk_iommu_data *data)
+{
+ struct device_node *larbnode, *smicomm_node, *smi_subcomm_node;
+ struct platform_device *plarbdev;
+ struct device_link *link;
+ int i, larb_nr, ret;
+
+ larb_nr = of_count_phandle_with_args(dev->of_node, "mediatek,larbs", NULL);
+ if (larb_nr < 0)
+ return larb_nr;
+
+ for (i = 0; i < larb_nr; i++) {
+ u32 id;
+
+ larbnode = of_parse_phandle(dev->of_node, "mediatek,larbs", i);
+ if (!larbnode)
+ return -EINVAL;
+
+ if (!of_device_is_available(larbnode)) {
+ of_node_put(larbnode);
+ continue;
+ }
+
+ ret = of_property_read_u32(larbnode, "mediatek,larb-id", &id);
+ if (ret)/* The id is consecutive if there is no this property */
+ id = i;
+
+ plarbdev = of_find_device_by_node(larbnode);
+ if (!plarbdev) {
+ of_node_put(larbnode);
+ return -ENODEV;
+ }
+ if (!plarbdev->dev.driver) {
+ of_node_put(larbnode);
+ return -EPROBE_DEFER;
+ }
+ data->larb_imu[id].dev = &plarbdev->dev;
+
+ component_match_add_release(dev, match, component_release_of,
+ component_compare_of, larbnode);
+ }
+
+ /* Get smi-(sub)-common dev from the last larb. */
+ smi_subcomm_node = of_parse_phandle(larbnode, "mediatek,smi", 0);
+ if (!smi_subcomm_node)
+ return -EINVAL;
+
+ /*
+ * It may have two level smi-common. the node is smi-sub-common if it
+ * has a new mediatek,smi property. otherwise it is smi-commmon.
+ */
+ smicomm_node = of_parse_phandle(smi_subcomm_node, "mediatek,smi", 0);
+ if (smicomm_node)
+ of_node_put(smi_subcomm_node);
+ else
+ smicomm_node = smi_subcomm_node;
+
+ plarbdev = of_find_device_by_node(smicomm_node);
+ of_node_put(smicomm_node);
+ data->smicomm_dev = &plarbdev->dev;
+
+ link = device_link_add(data->smicomm_dev, dev,
+ DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME);
+ if (!link) {
+ dev_err(dev, "Unable to link %s.\n", dev_name(data->smicomm_dev));
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int mtk_iommu_probe(struct platform_device *pdev)
{
struct mtk_iommu_data *data;
struct device *dev = &pdev->dev;
- struct device_node *larbnode, *smicomm_node;
- struct platform_device *plarbdev;
- struct device_link *link;
struct resource *res;
resource_size_t ioaddr;
struct component_match *match = NULL;
struct regmap *infracfg;
void *protect;
- int i, larb_nr, ret;
+ int ret, banks_num, i = 0;
u32 val;
char *p;
+ struct mtk_iommu_bank_data *bank;
+ void __iomem *base;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
@@ -835,15 +1162,36 @@ static int mtk_iommu_probe(struct platform_device *pdev)
data->enable_4GB = !!(val & F_DDR_4GB_SUPPORT_EN);
}
+ banks_num = data->plat_data->banks_num;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- data->base = devm_ioremap_resource(dev, res);
- if (IS_ERR(data->base))
- return PTR_ERR(data->base);
+ if (resource_size(res) < banks_num * MTK_IOMMU_BANK_SZ) {
+ dev_err(dev, "banknr %d. res %pR is not enough.\n", banks_num, res);
+ return -EINVAL;
+ }
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
ioaddr = res->start;
- data->irq = platform_get_irq(pdev, 0);
- if (data->irq < 0)
- return data->irq;
+ data->bank = devm_kmalloc(dev, banks_num * sizeof(*data->bank), GFP_KERNEL);
+ if (!data->bank)
+ return -ENOMEM;
+
+ do {
+ if (!data->plat_data->banks_enable[i])
+ continue;
+ bank = &data->bank[i];
+ bank->id = i;
+ bank->base = base + i * MTK_IOMMU_BANK_SZ;
+ bank->m4u_dom = NULL;
+
+ bank->irq = platform_get_irq(pdev, i);
+ if (bank->irq < 0)
+ return bank->irq;
+ bank->parent_dev = dev;
+ bank->parent_data = data;
+ spin_lock_init(&bank->tlb_lock);
+ } while (++i < banks_num);
if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_BCLK)) {
data->bclk = devm_clk_get(dev, "bclk");
@@ -851,62 +1199,27 @@ static int mtk_iommu_probe(struct platform_device *pdev)
return PTR_ERR(data->bclk);
}
- larb_nr = of_count_phandle_with_args(dev->of_node,
- "mediatek,larbs", NULL);
- if (larb_nr < 0)
- return larb_nr;
-
- for (i = 0; i < larb_nr; i++) {
- u32 id;
-
- larbnode = of_parse_phandle(dev->of_node, "mediatek,larbs", i);
- if (!larbnode)
- return -EINVAL;
-
- if (!of_device_is_available(larbnode)) {
- of_node_put(larbnode);
- continue;
- }
-
- ret = of_property_read_u32(larbnode, "mediatek,larb-id", &id);
- if (ret)/* The id is consecutive if there is no this property */
- id = i;
+ pm_runtime_enable(dev);
- plarbdev = of_find_device_by_node(larbnode);
- if (!plarbdev) {
- of_node_put(larbnode);
- return -ENODEV;
+ if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) {
+ ret = mtk_iommu_mm_dts_parse(dev, &match, data);
+ if (ret) {
+ dev_err(dev, "mm dts parse fail(%d).", ret);
+ goto out_runtime_disable;
}
- if (!plarbdev->dev.driver) {
- of_node_put(larbnode);
- return -EPROBE_DEFER;
+ } else if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_INFRA) &&
+ data->plat_data->pericfg_comp_str) {
+ infracfg = syscon_regmap_lookup_by_compatible(data->plat_data->pericfg_comp_str);
+ if (IS_ERR(infracfg)) {
+ ret = PTR_ERR(infracfg);
+ goto out_runtime_disable;
}
- data->larb_imu[id].dev = &plarbdev->dev;
-
- component_match_add_release(dev, &match, component_release_of,
- component_compare_of, larbnode);
- }
-
- /* Get smi-common dev from the last larb. */
- smicomm_node = of_parse_phandle(larbnode, "mediatek,smi", 0);
- if (!smicomm_node)
- return -EINVAL;
-
- plarbdev = of_find_device_by_node(smicomm_node);
- of_node_put(smicomm_node);
- data->smicomm_dev = &plarbdev->dev;
-
- pm_runtime_enable(dev);
- link = device_link_add(data->smicomm_dev, dev,
- DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME);
- if (!link) {
- dev_err(dev, "Unable to link %s.\n", dev_name(data->smicomm_dev));
- ret = -EINVAL;
- goto out_runtime_disable;
+ data->pericfg = infracfg;
}
platform_set_drvdata(pdev, data);
+ mutex_init(&data->mutex);
ret = iommu_device_sysfs_add(&data->iommu, dev, NULL,
"mtk-iommu.%pa", &ioaddr);
@@ -917,8 +1230,14 @@ static int mtk_iommu_probe(struct platform_device *pdev)
if (ret)
goto out_sysfs_remove;
- spin_lock_init(&data->tlb_lock);
- list_add_tail(&data->list, &m4ulist);
+ if (MTK_IOMMU_HAS_FLAG(data->plat_data, SHARE_PGTABLE)) {
+ list_add_tail(&data->list, data->plat_data->hw_list);
+ data->hw_list = data->plat_data->hw_list;
+ } else {
+ INIT_LIST_HEAD(&data->hw_list_head);
+ list_add_tail(&data->list, &data->hw_list_head);
+ data->hw_list = &data->hw_list_head;
+ }
if (!iommu_present(&platform_bus_type)) {
ret = bus_set_iommu(&platform_bus_type, &mtk_iommu_ops);
@@ -926,9 +1245,20 @@ static int mtk_iommu_probe(struct platform_device *pdev)
goto out_list_del;
}
- ret = component_master_add_with_match(dev, &mtk_iommu_com_ops, match);
- if (ret)
- goto out_bus_set_null;
+ if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) {
+ ret = component_master_add_with_match(dev, &mtk_iommu_com_ops, match);
+ if (ret)
+ goto out_bus_set_null;
+ } else if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_INFRA) &&
+ MTK_IOMMU_HAS_FLAG(data->plat_data, IFA_IOMMU_PCIE_SUPPORT)) {
+#ifdef CONFIG_PCI
+ if (!iommu_present(&pci_bus_type)) {
+ ret = bus_set_iommu(&pci_bus_type, &mtk_iommu_ops);
+ if (ret) /* PCIe fail don't affect platform_bus. */
+ goto out_list_del;
+ }
+#endif
+ }
return ret;
out_bus_set_null:
@@ -939,7 +1269,8 @@ out_list_del:
out_sysfs_remove:
iommu_device_sysfs_remove(&data->iommu);
out_link_remove:
- device_link_remove(data->smicomm_dev, dev);
+ if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM))
+ device_link_remove(data->smicomm_dev, dev);
out_runtime_disable:
pm_runtime_disable(dev);
return ret;
@@ -948,18 +1279,30 @@ out_runtime_disable:
static int mtk_iommu_remove(struct platform_device *pdev)
{
struct mtk_iommu_data *data = platform_get_drvdata(pdev);
+ struct mtk_iommu_bank_data *bank;
+ int i;
iommu_device_sysfs_remove(&data->iommu);
iommu_device_unregister(&data->iommu);
- if (iommu_present(&platform_bus_type))
- bus_set_iommu(&platform_bus_type, NULL);
+ list_del(&data->list);
- clk_disable_unprepare(data->bclk);
- device_link_remove(data->smicomm_dev, &pdev->dev);
+ if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) {
+ device_link_remove(data->smicomm_dev, &pdev->dev);
+ component_master_del(&pdev->dev, &mtk_iommu_com_ops);
+ } else if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_INFRA) &&
+ MTK_IOMMU_HAS_FLAG(data->plat_data, IFA_IOMMU_PCIE_SUPPORT)) {
+#ifdef CONFIG_PCI
+ bus_set_iommu(&pci_bus_type, NULL);
+#endif
+ }
pm_runtime_disable(&pdev->dev);
- devm_free_irq(&pdev->dev, data->irq, data);
- component_master_del(&pdev->dev, &mtk_iommu_com_ops);
+ for (i = 0; i < data->plat_data->banks_num; i++) {
+ bank = &data->bank[i];
+ if (!bank->m4u_dom)
+ continue;
+ devm_free_irq(&pdev->dev, bank->irq, bank);
+ }
return 0;
}
@@ -967,16 +1310,23 @@ static int __maybe_unused mtk_iommu_runtime_suspend(struct device *dev)
{
struct mtk_iommu_data *data = dev_get_drvdata(dev);
struct mtk_iommu_suspend_reg *reg = &data->reg;
- void __iomem *base = data->base;
+ void __iomem *base;
+ int i = 0;
+ base = data->bank[i].base;
reg->wr_len_ctrl = readl_relaxed(base + REG_MMU_WR_LEN_CTRL);
reg->misc_ctrl = readl_relaxed(base + REG_MMU_MISC_CTRL);
reg->dcm_dis = readl_relaxed(base + REG_MMU_DCM_DIS);
reg->ctrl_reg = readl_relaxed(base + REG_MMU_CTRL_REG);
- reg->int_control0 = readl_relaxed(base + REG_MMU_INT_CONTROL0);
- reg->int_main_control = readl_relaxed(base + REG_MMU_INT_MAIN_CONTROL);
- reg->ivrp_paddr = readl_relaxed(base + REG_MMU_IVRP_PADDR);
reg->vld_pa_rng = readl_relaxed(base + REG_MMU_VLD_PA_RNG);
+ do {
+ if (!data->plat_data->banks_enable[i])
+ continue;
+ base = data->bank[i].base;
+ reg->int_control[i] = readl_relaxed(base + REG_MMU_INT_CONTROL0);
+ reg->int_main_control[i] = readl_relaxed(base + REG_MMU_INT_MAIN_CONTROL);
+ reg->ivrp_paddr[i] = readl_relaxed(base + REG_MMU_IVRP_PADDR);
+ } while (++i < data->plat_data->banks_num);
clk_disable_unprepare(data->bclk);
return 0;
}
@@ -985,9 +1335,9 @@ static int __maybe_unused mtk_iommu_runtime_resume(struct device *dev)
{
struct mtk_iommu_data *data = dev_get_drvdata(dev);
struct mtk_iommu_suspend_reg *reg = &data->reg;
- struct mtk_iommu_domain *m4u_dom = data->m4u_dom;
- void __iomem *base = data->base;
- int ret;
+ struct mtk_iommu_domain *m4u_dom;
+ void __iomem *base;
+ int ret, i = 0;
ret = clk_prepare_enable(data->bclk);
if (ret) {
@@ -999,18 +1349,26 @@ static int __maybe_unused mtk_iommu_runtime_resume(struct device *dev)
* Uppon first resume, only enable the clk and return, since the values of the
* registers are not yet set.
*/
- if (!m4u_dom)
+ if (!reg->wr_len_ctrl)
return 0;
+ base = data->bank[i].base;
writel_relaxed(reg->wr_len_ctrl, base + REG_MMU_WR_LEN_CTRL);
writel_relaxed(reg->misc_ctrl, base + REG_MMU_MISC_CTRL);
writel_relaxed(reg->dcm_dis, base + REG_MMU_DCM_DIS);
writel_relaxed(reg->ctrl_reg, base + REG_MMU_CTRL_REG);
- writel_relaxed(reg->int_control0, base + REG_MMU_INT_CONTROL0);
- writel_relaxed(reg->int_main_control, base + REG_MMU_INT_MAIN_CONTROL);
- writel_relaxed(reg->ivrp_paddr, base + REG_MMU_IVRP_PADDR);
writel_relaxed(reg->vld_pa_rng, base + REG_MMU_VLD_PA_RNG);
- writel(m4u_dom->cfg.arm_v7s_cfg.ttbr & MMU_PT_ADDR_MASK, base + REG_MMU_PT_BASE_ADDR);
+ do {
+ m4u_dom = data->bank[i].m4u_dom;
+ if (!data->plat_data->banks_enable[i] || !m4u_dom)
+ continue;
+ base = data->bank[i].base;
+ writel_relaxed(reg->int_control[i], base + REG_MMU_INT_CONTROL0);
+ writel_relaxed(reg->int_main_control[i], base + REG_MMU_INT_MAIN_CONTROL);
+ writel_relaxed(reg->ivrp_paddr[i], base + REG_MMU_IVRP_PADDR);
+ writel(m4u_dom->cfg.arm_v7s_cfg.ttbr & MMU_PT_ADDR_MASK,
+ base + REG_MMU_PT_BASE_ADDR);
+ } while (++i < data->plat_data->banks_num);
/*
* Users may allocate dma buffer before they call pm_runtime_get,
@@ -1029,17 +1387,24 @@ static const struct dev_pm_ops mtk_iommu_pm_ops = {
static const struct mtk_iommu_plat_data mt2712_data = {
.m4u_plat = M4U_MT2712,
- .flags = HAS_4GB_MODE | HAS_BCLK | HAS_VLD_PA_RNG,
+ .flags = HAS_4GB_MODE | HAS_BCLK | HAS_VLD_PA_RNG | SHARE_PGTABLE |
+ MTK_IOMMU_TYPE_MM,
+ .hw_list = &m4ulist,
.inv_sel_reg = REG_MMU_INV_SEL_GEN1,
.iova_region = single_domain,
+ .banks_num = 1,
+ .banks_enable = {true},
.iova_region_nr = ARRAY_SIZE(single_domain),
.larbid_remap = {{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}},
};
static const struct mtk_iommu_plat_data mt6779_data = {
.m4u_plat = M4U_MT6779,
- .flags = HAS_SUB_COMM | OUT_ORDER_WR_EN | WR_THROT_EN,
+ .flags = HAS_SUB_COMM_2BITS | OUT_ORDER_WR_EN | WR_THROT_EN |
+ MTK_IOMMU_TYPE_MM,
.inv_sel_reg = REG_MMU_INV_SEL_GEN2,
+ .banks_num = 1,
+ .banks_enable = {true},
.iova_region = single_domain,
.iova_region_nr = ARRAY_SIZE(single_domain),
.larbid_remap = {{0}, {1}, {2}, {3}, {5}, {7, 8}, {10}, {9}},
@@ -1047,8 +1412,10 @@ static const struct mtk_iommu_plat_data mt6779_data = {
static const struct mtk_iommu_plat_data mt8167_data = {
.m4u_plat = M4U_MT8167,
- .flags = RESET_AXI | HAS_LEGACY_IVRP_PADDR,
+ .flags = RESET_AXI | HAS_LEGACY_IVRP_PADDR | MTK_IOMMU_TYPE_MM,
.inv_sel_reg = REG_MMU_INV_SEL_GEN1,
+ .banks_num = 1,
+ .banks_enable = {true},
.iova_region = single_domain,
.iova_region_nr = ARRAY_SIZE(single_domain),
.larbid_remap = {{0}, {1}, {2}}, /* Linear mapping. */
@@ -1057,8 +1424,10 @@ static const struct mtk_iommu_plat_data mt8167_data = {
static const struct mtk_iommu_plat_data mt8173_data = {
.m4u_plat = M4U_MT8173,
.flags = HAS_4GB_MODE | HAS_BCLK | RESET_AXI |
- HAS_LEGACY_IVRP_PADDR,
+ HAS_LEGACY_IVRP_PADDR | MTK_IOMMU_TYPE_MM,
.inv_sel_reg = REG_MMU_INV_SEL_GEN1,
+ .banks_num = 1,
+ .banks_enable = {true},
.iova_region = single_domain,
.iova_region_nr = ARRAY_SIZE(single_domain),
.larbid_remap = {{0}, {1}, {2}, {3}, {4}, {5}}, /* Linear mapping. */
@@ -1066,31 +1435,100 @@ static const struct mtk_iommu_plat_data mt8173_data = {
static const struct mtk_iommu_plat_data mt8183_data = {
.m4u_plat = M4U_MT8183,
- .flags = RESET_AXI,
+ .flags = RESET_AXI | MTK_IOMMU_TYPE_MM,
.inv_sel_reg = REG_MMU_INV_SEL_GEN1,
+ .banks_num = 1,
+ .banks_enable = {true},
.iova_region = single_domain,
.iova_region_nr = ARRAY_SIZE(single_domain),
.larbid_remap = {{0}, {4}, {5}, {6}, {7}, {2}, {3}, {1}},
};
+static const struct mtk_iommu_plat_data mt8186_data_mm = {
+ .m4u_plat = M4U_MT8186,
+ .flags = HAS_BCLK | HAS_SUB_COMM_2BITS | OUT_ORDER_WR_EN |
+ WR_THROT_EN | IOVA_34_EN | MTK_IOMMU_TYPE_MM,
+ .larbid_remap = {{0}, {1, MTK_INVALID_LARBID, 8}, {4}, {7}, {2}, {9, 11, 19, 20},
+ {MTK_INVALID_LARBID, 14, 16},
+ {MTK_INVALID_LARBID, 13, MTK_INVALID_LARBID, 17}},
+ .inv_sel_reg = REG_MMU_INV_SEL_GEN2,
+ .banks_num = 1,
+ .banks_enable = {true},
+ .iova_region = mt8192_multi_dom,
+ .iova_region_nr = ARRAY_SIZE(mt8192_multi_dom),
+};
+
static const struct mtk_iommu_plat_data mt8192_data = {
.m4u_plat = M4U_MT8192,
- .flags = HAS_BCLK | HAS_SUB_COMM | OUT_ORDER_WR_EN |
- WR_THROT_EN | IOVA_34_EN,
+ .flags = HAS_BCLK | HAS_SUB_COMM_2BITS | OUT_ORDER_WR_EN |
+ WR_THROT_EN | IOVA_34_EN | MTK_IOMMU_TYPE_MM,
.inv_sel_reg = REG_MMU_INV_SEL_GEN2,
+ .banks_num = 1,
+ .banks_enable = {true},
.iova_region = mt8192_multi_dom,
.iova_region_nr = ARRAY_SIZE(mt8192_multi_dom),
.larbid_remap = {{0}, {1}, {4, 5}, {7}, {2}, {9, 11, 19, 20},
{0, 14, 16}, {0, 13, 18, 17}},
};
+static const struct mtk_iommu_plat_data mt8195_data_infra = {
+ .m4u_plat = M4U_MT8195,
+ .flags = WR_THROT_EN | DCM_DISABLE | STD_AXI_MODE | PM_CLK_AO |
+ MTK_IOMMU_TYPE_INFRA | IFA_IOMMU_PCIE_SUPPORT,
+ .pericfg_comp_str = "mediatek,mt8195-pericfg_ao",
+ .inv_sel_reg = REG_MMU_INV_SEL_GEN2,
+ .banks_num = 5,
+ .banks_enable = {true, false, false, false, true},
+ .banks_portmsk = {[0] = GENMASK(19, 16), /* PCIe */
+ [4] = GENMASK(31, 20), /* USB */
+ },
+ .iova_region = single_domain,
+ .iova_region_nr = ARRAY_SIZE(single_domain),
+};
+
+static const struct mtk_iommu_plat_data mt8195_data_vdo = {
+ .m4u_plat = M4U_MT8195,
+ .flags = HAS_BCLK | HAS_SUB_COMM_2BITS | OUT_ORDER_WR_EN |
+ WR_THROT_EN | IOVA_34_EN | SHARE_PGTABLE | MTK_IOMMU_TYPE_MM,
+ .hw_list = &m4ulist,
+ .inv_sel_reg = REG_MMU_INV_SEL_GEN2,
+ .banks_num = 1,
+ .banks_enable = {true},
+ .iova_region = mt8192_multi_dom,
+ .iova_region_nr = ARRAY_SIZE(mt8192_multi_dom),
+ .larbid_remap = {{2, 0}, {21}, {24}, {7}, {19}, {9, 10, 11},
+ {13, 17, 15/* 17b */, 25}, {5}},
+};
+
+static const struct mtk_iommu_plat_data mt8195_data_vpp = {
+ .m4u_plat = M4U_MT8195,
+ .flags = HAS_BCLK | HAS_SUB_COMM_3BITS | OUT_ORDER_WR_EN |
+ WR_THROT_EN | IOVA_34_EN | SHARE_PGTABLE | MTK_IOMMU_TYPE_MM,
+ .hw_list = &m4ulist,
+ .inv_sel_reg = REG_MMU_INV_SEL_GEN2,
+ .banks_num = 1,
+ .banks_enable = {true},
+ .iova_region = mt8192_multi_dom,
+ .iova_region_nr = ARRAY_SIZE(mt8192_multi_dom),
+ .larbid_remap = {{1}, {3},
+ {22, MTK_INVALID_LARBID, MTK_INVALID_LARBID, MTK_INVALID_LARBID, 23},
+ {8}, {20}, {12},
+ /* 16: 16a; 29: 16b; 30: CCUtop0; 31: CCUtop1 */
+ {14, 16, 29, 26, 30, 31, 18},
+ {4, MTK_INVALID_LARBID, MTK_INVALID_LARBID, MTK_INVALID_LARBID, 6}},
+};
+
static const struct of_device_id mtk_iommu_of_ids[] = {
{ .compatible = "mediatek,mt2712-m4u", .data = &mt2712_data},
{ .compatible = "mediatek,mt6779-m4u", .data = &mt6779_data},
{ .compatible = "mediatek,mt8167-m4u", .data = &mt8167_data},
{ .compatible = "mediatek,mt8173-m4u", .data = &mt8173_data},
{ .compatible = "mediatek,mt8183-m4u", .data = &mt8183_data},
+ { .compatible = "mediatek,mt8186-iommu-mm", .data = &mt8186_data_mm}, /* mm: m4u */
{ .compatible = "mediatek,mt8192-m4u", .data = &mt8192_data},
+ { .compatible = "mediatek,mt8195-iommu-infra", .data = &mt8195_data_infra},
+ { .compatible = "mediatek,mt8195-iommu-vdo", .data = &mt8195_data_vdo},
+ { .compatible = "mediatek,mt8195-iommu-vpp", .data = &mt8195_data_vpp},
{}
};
diff --git a/drivers/iommu/mtk_iommu.h b/drivers/iommu/mtk_iommu.h
deleted file mode 100644
index b742432220c5..000000000000
--- a/drivers/iommu/mtk_iommu.h
+++ /dev/null
@@ -1,101 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2015-2016 MediaTek Inc.
- * Author: Honghui Zhang <honghui.zhang@mediatek.com>
- */
-
-#ifndef _MTK_IOMMU_H_
-#define _MTK_IOMMU_H_
-
-#include <linux/clk.h>
-#include <linux/component.h>
-#include <linux/device.h>
-#include <linux/io.h>
-#include <linux/io-pgtable.h>
-#include <linux/iommu.h>
-#include <linux/list.h>
-#include <linux/spinlock.h>
-#include <linux/dma-mapping.h>
-#include <soc/mediatek/smi.h>
-#include <dt-bindings/memory/mtk-memory-port.h>
-
-#define MTK_LARB_COM_MAX 8
-#define MTK_LARB_SUBCOM_MAX 4
-
-#define MTK_IOMMU_GROUP_MAX 8
-
-struct mtk_iommu_suspend_reg {
- union {
- u32 standard_axi_mode;/* v1 */
- u32 misc_ctrl;/* v2 */
- };
- u32 dcm_dis;
- u32 ctrl_reg;
- u32 int_control0;
- u32 int_main_control;
- u32 ivrp_paddr;
- u32 vld_pa_rng;
- u32 wr_len_ctrl;
-};
-
-enum mtk_iommu_plat {
- M4U_MT2701,
- M4U_MT2712,
- M4U_MT6779,
- M4U_MT8167,
- M4U_MT8173,
- M4U_MT8183,
- M4U_MT8192,
-};
-
-struct mtk_iommu_iova_region;
-
-struct mtk_iommu_plat_data {
- enum mtk_iommu_plat m4u_plat;
- u32 flags;
- u32 inv_sel_reg;
-
- unsigned int iova_region_nr;
- const struct mtk_iommu_iova_region *iova_region;
- unsigned char larbid_remap[MTK_LARB_COM_MAX][MTK_LARB_SUBCOM_MAX];
-};
-
-struct mtk_iommu_domain;
-
-struct mtk_iommu_data {
- void __iomem *base;
- int irq;
- struct device *dev;
- struct clk *bclk;
- phys_addr_t protect_base; /* protect memory base */
- struct mtk_iommu_suspend_reg reg;
- struct mtk_iommu_domain *m4u_dom;
- struct iommu_group *m4u_group[MTK_IOMMU_GROUP_MAX];
- bool enable_4GB;
- spinlock_t tlb_lock; /* lock for tlb range flush */
-
- struct iommu_device iommu;
- const struct mtk_iommu_plat_data *plat_data;
- struct device *smicomm_dev;
-
- struct dma_iommu_mapping *mapping; /* For mtk_iommu_v1.c */
-
- struct list_head list;
- struct mtk_smi_larb_iommu larb_imu[MTK_LARB_NR_MAX];
-};
-
-static inline int mtk_iommu_bind(struct device *dev)
-{
- struct mtk_iommu_data *data = dev_get_drvdata(dev);
-
- return component_bind_all(dev, &data->larb_imu);
-}
-
-static inline void mtk_iommu_unbind(struct device *dev)
-{
- struct mtk_iommu_data *data = dev_get_drvdata(dev);
-
- component_unbind_all(dev, &data->larb_imu);
-}
-
-#endif
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
index ecff800656e6..e1cb51b9866c 100644
--- a/drivers/iommu/mtk_iommu_v1.c
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -7,7 +7,6 @@
*
* Based on driver/iommu/mtk_iommu.c
*/
-#include <linux/memblock.h>
#include <linux/bug.h>
#include <linux/clk.h>
#include <linux/component.h>
@@ -28,10 +27,9 @@
#include <linux/spinlock.h>
#include <asm/barrier.h>
#include <asm/dma-iommu.h>
-#include <linux/init.h>
+#include <dt-bindings/memory/mtk-memory-port.h>
#include <dt-bindings/memory/mt2701-larb-port.h>
#include <soc/mediatek/smi.h>
-#include "mtk_iommu.h"
#define REG_MMU_PT_BASE_ADDR 0x000
@@ -80,6 +78,7 @@
/* MTK generation one iommu HW only support 4K size mapping */
#define MT2701_IOMMU_PAGE_SHIFT 12
#define MT2701_IOMMU_PAGE_SIZE (1UL << MT2701_IOMMU_PAGE_SHIFT)
+#define MT2701_LARB_NR_MAX 3
/*
* MTK m4u support 4GB iova address space, and only support 4K page
@@ -87,17 +86,53 @@
*/
#define M2701_IOMMU_PGT_SIZE SZ_4M
-struct mtk_iommu_domain {
+struct mtk_iommu_v1_suspend_reg {
+ u32 standard_axi_mode;
+ u32 dcm_dis;
+ u32 ctrl_reg;
+ u32 int_control0;
+};
+
+struct mtk_iommu_v1_data {
+ void __iomem *base;
+ int irq;
+ struct device *dev;
+ struct clk *bclk;
+ phys_addr_t protect_base; /* protect memory base */
+ struct mtk_iommu_v1_domain *m4u_dom;
+
+ struct iommu_device iommu;
+ struct dma_iommu_mapping *mapping;
+ struct mtk_smi_larb_iommu larb_imu[MTK_LARB_NR_MAX];
+
+ struct mtk_iommu_v1_suspend_reg reg;
+};
+
+struct mtk_iommu_v1_domain {
spinlock_t pgtlock; /* lock for page table */
struct iommu_domain domain;
u32 *pgt_va;
dma_addr_t pgt_pa;
- struct mtk_iommu_data *data;
+ struct mtk_iommu_v1_data *data;
};
-static struct mtk_iommu_domain *to_mtk_domain(struct iommu_domain *dom)
+static int mtk_iommu_v1_bind(struct device *dev)
+{
+ struct mtk_iommu_v1_data *data = dev_get_drvdata(dev);
+
+ return component_bind_all(dev, &data->larb_imu);
+}
+
+static void mtk_iommu_v1_unbind(struct device *dev)
+{
+ struct mtk_iommu_v1_data *data = dev_get_drvdata(dev);
+
+ component_unbind_all(dev, &data->larb_imu);
+}
+
+static struct mtk_iommu_v1_domain *to_mtk_domain(struct iommu_domain *dom)
{
- return container_of(dom, struct mtk_iommu_domain, domain);
+ return container_of(dom, struct mtk_iommu_v1_domain, domain);
}
static const int mt2701_m4u_in_larb[] = {
@@ -123,7 +158,7 @@ static inline int mt2701_m4u_to_port(int id)
return id - mt2701_m4u_in_larb[larb];
}
-static void mtk_iommu_tlb_flush_all(struct mtk_iommu_data *data)
+static void mtk_iommu_v1_tlb_flush_all(struct mtk_iommu_v1_data *data)
{
writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
data->base + REG_MMU_INV_SEL);
@@ -131,8 +166,8 @@ static void mtk_iommu_tlb_flush_all(struct mtk_iommu_data *data)
wmb(); /* Make sure the tlb flush all done */
}
-static void mtk_iommu_tlb_flush_range(struct mtk_iommu_data *data,
- unsigned long iova, size_t size)
+static void mtk_iommu_v1_tlb_flush_range(struct mtk_iommu_v1_data *data,
+ unsigned long iova, size_t size)
{
int ret;
u32 tmp;
@@ -150,16 +185,16 @@ static void mtk_iommu_tlb_flush_range(struct mtk_iommu_data *data,
if (ret) {
dev_warn(data->dev,
"Partial TLB flush timed out, falling back to full flush\n");
- mtk_iommu_tlb_flush_all(data);
+ mtk_iommu_v1_tlb_flush_all(data);
}
/* Clear the CPE status */
writel_relaxed(0, data->base + REG_MMU_CPE_DONE);
}
-static irqreturn_t mtk_iommu_isr(int irq, void *dev_id)
+static irqreturn_t mtk_iommu_v1_isr(int irq, void *dev_id)
{
- struct mtk_iommu_data *data = dev_id;
- struct mtk_iommu_domain *dom = data->m4u_dom;
+ struct mtk_iommu_v1_data *data = dev_id;
+ struct mtk_iommu_v1_domain *dom = data->m4u_dom;
u32 int_state, regval, fault_iova, fault_pa;
unsigned int fault_larb, fault_port;
@@ -189,13 +224,13 @@ static irqreturn_t mtk_iommu_isr(int irq, void *dev_id)
regval |= F_INT_CLR_BIT;
writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL);
- mtk_iommu_tlb_flush_all(data);
+ mtk_iommu_v1_tlb_flush_all(data);
return IRQ_HANDLED;
}
-static void mtk_iommu_config(struct mtk_iommu_data *data,
- struct device *dev, bool enable)
+static void mtk_iommu_v1_config(struct mtk_iommu_v1_data *data,
+ struct device *dev, bool enable)
{
struct mtk_smi_larb_iommu *larb_mmu;
unsigned int larbid, portid;
@@ -217,9 +252,9 @@ static void mtk_iommu_config(struct mtk_iommu_data *data,
}
}
-static int mtk_iommu_domain_finalise(struct mtk_iommu_data *data)
+static int mtk_iommu_v1_domain_finalise(struct mtk_iommu_v1_data *data)
{
- struct mtk_iommu_domain *dom = data->m4u_dom;
+ struct mtk_iommu_v1_domain *dom = data->m4u_dom;
spin_lock_init(&dom->pgtlock);
@@ -235,9 +270,9 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_data *data)
return 0;
}
-static struct iommu_domain *mtk_iommu_domain_alloc(unsigned type)
+static struct iommu_domain *mtk_iommu_v1_domain_alloc(unsigned type)
{
- struct mtk_iommu_domain *dom;
+ struct mtk_iommu_v1_domain *dom;
if (type != IOMMU_DOMAIN_UNMANAGED)
return NULL;
@@ -249,21 +284,20 @@ static struct iommu_domain *mtk_iommu_domain_alloc(unsigned type)
return &dom->domain;
}
-static void mtk_iommu_domain_free(struct iommu_domain *domain)
+static void mtk_iommu_v1_domain_free(struct iommu_domain *domain)
{
- struct mtk_iommu_domain *dom = to_mtk_domain(domain);
- struct mtk_iommu_data *data = dom->data;
+ struct mtk_iommu_v1_domain *dom = to_mtk_domain(domain);
+ struct mtk_iommu_v1_data *data = dom->data;
dma_free_coherent(data->dev, M2701_IOMMU_PGT_SIZE,
dom->pgt_va, dom->pgt_pa);
kfree(to_mtk_domain(domain));
}
-static int mtk_iommu_attach_device(struct iommu_domain *domain,
- struct device *dev)
+static int mtk_iommu_v1_attach_device(struct iommu_domain *domain, struct device *dev)
{
- struct mtk_iommu_data *data = dev_iommu_priv_get(dev);
- struct mtk_iommu_domain *dom = to_mtk_domain(domain);
+ struct mtk_iommu_v1_data *data = dev_iommu_priv_get(dev);
+ struct mtk_iommu_v1_domain *dom = to_mtk_domain(domain);
struct dma_iommu_mapping *mtk_mapping;
int ret;
@@ -274,29 +308,28 @@ static int mtk_iommu_attach_device(struct iommu_domain *domain,
if (!data->m4u_dom) {
data->m4u_dom = dom;
- ret = mtk_iommu_domain_finalise(data);
+ ret = mtk_iommu_v1_domain_finalise(data);
if (ret) {
data->m4u_dom = NULL;
return ret;
}
}
- mtk_iommu_config(data, dev, true);
+ mtk_iommu_v1_config(data, dev, true);
return 0;
}
-static void mtk_iommu_detach_device(struct iommu_domain *domain,
- struct device *dev)
+static void mtk_iommu_v1_detach_device(struct iommu_domain *domain, struct device *dev)
{
- struct mtk_iommu_data *data = dev_iommu_priv_get(dev);
+ struct mtk_iommu_v1_data *data = dev_iommu_priv_get(dev);
- mtk_iommu_config(data, dev, false);
+ mtk_iommu_v1_config(data, dev, false);
}
-static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
+static int mtk_iommu_v1_map(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
- struct mtk_iommu_domain *dom = to_mtk_domain(domain);
+ struct mtk_iommu_v1_domain *dom = to_mtk_domain(domain);
unsigned int page_num = size >> MT2701_IOMMU_PAGE_SHIFT;
unsigned long flags;
unsigned int i;
@@ -317,16 +350,15 @@ static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova,
spin_unlock_irqrestore(&dom->pgtlock, flags);
- mtk_iommu_tlb_flush_range(dom->data, iova, size);
+ mtk_iommu_v1_tlb_flush_range(dom->data, iova, size);
return map_size == size ? 0 : -EEXIST;
}
-static size_t mtk_iommu_unmap(struct iommu_domain *domain,
- unsigned long iova, size_t size,
- struct iommu_iotlb_gather *gather)
+static size_t mtk_iommu_v1_unmap(struct iommu_domain *domain, unsigned long iova,
+ size_t size, struct iommu_iotlb_gather *gather)
{
- struct mtk_iommu_domain *dom = to_mtk_domain(domain);
+ struct mtk_iommu_v1_domain *dom = to_mtk_domain(domain);
unsigned long flags;
u32 *pgt_base_iova = dom->pgt_va + (iova >> MT2701_IOMMU_PAGE_SHIFT);
unsigned int page_num = size >> MT2701_IOMMU_PAGE_SHIFT;
@@ -335,15 +367,14 @@ static size_t mtk_iommu_unmap(struct iommu_domain *domain,
memset(pgt_base_iova, 0, page_num * sizeof(u32));
spin_unlock_irqrestore(&dom->pgtlock, flags);
- mtk_iommu_tlb_flush_range(dom->data, iova, size);
+ mtk_iommu_v1_tlb_flush_range(dom->data, iova, size);
return size;
}
-static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
- dma_addr_t iova)
+static phys_addr_t mtk_iommu_v1_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
{
- struct mtk_iommu_domain *dom = to_mtk_domain(domain);
+ struct mtk_iommu_v1_domain *dom = to_mtk_domain(domain);
unsigned long flags;
phys_addr_t pa;
@@ -355,17 +386,16 @@ static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
return pa;
}
-static const struct iommu_ops mtk_iommu_ops;
+static const struct iommu_ops mtk_iommu_v1_ops;
/*
* MTK generation one iommu HW only support one iommu domain, and all the client
* sharing the same iova address space.
*/
-static int mtk_iommu_create_mapping(struct device *dev,
- struct of_phandle_args *args)
+static int mtk_iommu_v1_create_mapping(struct device *dev, struct of_phandle_args *args)
{
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
- struct mtk_iommu_data *data;
+ struct mtk_iommu_v1_data *data;
struct platform_device *m4updev;
struct dma_iommu_mapping *mtk_mapping;
int ret;
@@ -377,11 +407,11 @@ static int mtk_iommu_create_mapping(struct device *dev,
}
if (!fwspec) {
- ret = iommu_fwspec_init(dev, &args->np->fwnode, &mtk_iommu_ops);
+ ret = iommu_fwspec_init(dev, &args->np->fwnode, &mtk_iommu_v1_ops);
if (ret)
return ret;
fwspec = dev_iommu_fwspec_get(dev);
- } else if (dev_iommu_fwspec_get(dev)->ops != &mtk_iommu_ops) {
+ } else if (dev_iommu_fwspec_get(dev)->ops != &mtk_iommu_v1_ops) {
return -EINVAL;
}
@@ -413,16 +443,16 @@ static int mtk_iommu_create_mapping(struct device *dev,
return 0;
}
-static int mtk_iommu_def_domain_type(struct device *dev)
+static int mtk_iommu_v1_def_domain_type(struct device *dev)
{
return IOMMU_DOMAIN_UNMANAGED;
}
-static struct iommu_device *mtk_iommu_probe_device(struct device *dev)
+static struct iommu_device *mtk_iommu_v1_probe_device(struct device *dev)
{
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct of_phandle_args iommu_spec;
- struct mtk_iommu_data *data;
+ struct mtk_iommu_v1_data *data;
int err, idx = 0, larbid, larbidx;
struct device_link *link;
struct device *larbdev;
@@ -440,7 +470,7 @@ static struct iommu_device *mtk_iommu_probe_device(struct device *dev)
"#iommu-cells",
idx, &iommu_spec)) {
- err = mtk_iommu_create_mapping(dev, &iommu_spec);
+ err = mtk_iommu_v1_create_mapping(dev, &iommu_spec);
of_node_put(iommu_spec.np);
if (err)
return ERR_PTR(err);
@@ -450,13 +480,16 @@ static struct iommu_device *mtk_iommu_probe_device(struct device *dev)
idx++;
}
- if (!fwspec || fwspec->ops != &mtk_iommu_ops)
+ if (!fwspec || fwspec->ops != &mtk_iommu_v1_ops)
return ERR_PTR(-ENODEV); /* Not a iommu client device */
data = dev_iommu_priv_get(dev);
/* Link the consumer device with the smi-larb device(supplier) */
larbid = mt2701_m4u_to_larb(fwspec->ids[0]);
+ if (larbid >= MT2701_LARB_NR_MAX)
+ return ERR_PTR(-EINVAL);
+
for (idx = 1; idx < fwspec->num_ids; idx++) {
larbidx = mt2701_m4u_to_larb(fwspec->ids[idx]);
if (larbid != larbidx) {
@@ -467,6 +500,9 @@ static struct iommu_device *mtk_iommu_probe_device(struct device *dev)
}
larbdev = data->larb_imu[larbid].dev;
+ if (!larbdev)
+ return ERR_PTR(-EINVAL);
+
link = device_link_add(dev, larbdev,
DL_FLAG_PM_RUNTIME | DL_FLAG_STATELESS);
if (!link)
@@ -475,10 +511,10 @@ static struct iommu_device *mtk_iommu_probe_device(struct device *dev)
return &data->iommu;
}
-static void mtk_iommu_probe_finalize(struct device *dev)
+static void mtk_iommu_v1_probe_finalize(struct device *dev)
{
struct dma_iommu_mapping *mtk_mapping;
- struct mtk_iommu_data *data;
+ struct mtk_iommu_v1_data *data;
int err;
data = dev_iommu_priv_get(dev);
@@ -489,14 +525,14 @@ static void mtk_iommu_probe_finalize(struct device *dev)
dev_err(dev, "Can't create IOMMU mapping - DMA-OPS will not work\n");
}
-static void mtk_iommu_release_device(struct device *dev)
+static void mtk_iommu_v1_release_device(struct device *dev)
{
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
- struct mtk_iommu_data *data;
+ struct mtk_iommu_v1_data *data;
struct device *larbdev;
unsigned int larbid;
- if (!fwspec || fwspec->ops != &mtk_iommu_ops)
+ if (!fwspec || fwspec->ops != &mtk_iommu_v1_ops)
return;
data = dev_iommu_priv_get(dev);
@@ -507,7 +543,7 @@ static void mtk_iommu_release_device(struct device *dev)
iommu_fwspec_free(dev);
}
-static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
+static int mtk_iommu_v1_hw_init(const struct mtk_iommu_v1_data *data)
{
u32 regval;
int ret;
@@ -537,7 +573,7 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
writel_relaxed(F_MMU_DCM_ON, data->base + REG_MMU_DCM);
- if (devm_request_irq(data->dev, data->irq, mtk_iommu_isr, 0,
+ if (devm_request_irq(data->dev, data->irq, mtk_iommu_v1_isr, 0,
dev_name(data->dev), (void *)data)) {
writel_relaxed(0, data->base + REG_MMU_PT_BASE_ADDR);
clk_disable_unprepare(data->bclk);
@@ -548,39 +584,39 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
return 0;
}
-static const struct iommu_ops mtk_iommu_ops = {
- .domain_alloc = mtk_iommu_domain_alloc,
- .probe_device = mtk_iommu_probe_device,
- .probe_finalize = mtk_iommu_probe_finalize,
- .release_device = mtk_iommu_release_device,
- .def_domain_type = mtk_iommu_def_domain_type,
+static const struct iommu_ops mtk_iommu_v1_ops = {
+ .domain_alloc = mtk_iommu_v1_domain_alloc,
+ .probe_device = mtk_iommu_v1_probe_device,
+ .probe_finalize = mtk_iommu_v1_probe_finalize,
+ .release_device = mtk_iommu_v1_release_device,
+ .def_domain_type = mtk_iommu_v1_def_domain_type,
.device_group = generic_device_group,
.pgsize_bitmap = ~0UL << MT2701_IOMMU_PAGE_SHIFT,
.owner = THIS_MODULE,
.default_domain_ops = &(const struct iommu_domain_ops) {
- .attach_dev = mtk_iommu_attach_device,
- .detach_dev = mtk_iommu_detach_device,
- .map = mtk_iommu_map,
- .unmap = mtk_iommu_unmap,
- .iova_to_phys = mtk_iommu_iova_to_phys,
- .free = mtk_iommu_domain_free,
+ .attach_dev = mtk_iommu_v1_attach_device,
+ .detach_dev = mtk_iommu_v1_detach_device,
+ .map = mtk_iommu_v1_map,
+ .unmap = mtk_iommu_v1_unmap,
+ .iova_to_phys = mtk_iommu_v1_iova_to_phys,
+ .free = mtk_iommu_v1_domain_free,
}
};
-static const struct of_device_id mtk_iommu_of_ids[] = {
+static const struct of_device_id mtk_iommu_v1_of_ids[] = {
{ .compatible = "mediatek,mt2701-m4u", },
{}
};
-static const struct component_master_ops mtk_iommu_com_ops = {
- .bind = mtk_iommu_bind,
- .unbind = mtk_iommu_unbind,
+static const struct component_master_ops mtk_iommu_v1_com_ops = {
+ .bind = mtk_iommu_v1_bind,
+ .unbind = mtk_iommu_v1_unbind,
};
-static int mtk_iommu_probe(struct platform_device *pdev)
+static int mtk_iommu_v1_probe(struct platform_device *pdev)
{
- struct mtk_iommu_data *data;
struct device *dev = &pdev->dev;
+ struct mtk_iommu_v1_data *data;
struct resource *res;
struct component_match *match = NULL;
void *protect;
@@ -647,7 +683,7 @@ static int mtk_iommu_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, data);
- ret = mtk_iommu_hw_init(data);
+ ret = mtk_iommu_v1_hw_init(data);
if (ret)
return ret;
@@ -656,17 +692,17 @@ static int mtk_iommu_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = iommu_device_register(&data->iommu, &mtk_iommu_ops, dev);
+ ret = iommu_device_register(&data->iommu, &mtk_iommu_v1_ops, dev);
if (ret)
goto out_sysfs_remove;
if (!iommu_present(&platform_bus_type)) {
- ret = bus_set_iommu(&platform_bus_type, &mtk_iommu_ops);
+ ret = bus_set_iommu(&platform_bus_type, &mtk_iommu_v1_ops);
if (ret)
goto out_dev_unreg;
}
- ret = component_master_add_with_match(dev, &mtk_iommu_com_ops, match);
+ ret = component_master_add_with_match(dev, &mtk_iommu_v1_com_ops, match);
if (ret)
goto out_bus_set_null;
return ret;
@@ -680,9 +716,9 @@ out_sysfs_remove:
return ret;
}
-static int mtk_iommu_remove(struct platform_device *pdev)
+static int mtk_iommu_v1_remove(struct platform_device *pdev)
{
- struct mtk_iommu_data *data = platform_get_drvdata(pdev);
+ struct mtk_iommu_v1_data *data = platform_get_drvdata(pdev);
iommu_device_sysfs_remove(&data->iommu);
iommu_device_unregister(&data->iommu);
@@ -692,14 +728,14 @@ static int mtk_iommu_remove(struct platform_device *pdev)
clk_disable_unprepare(data->bclk);
devm_free_irq(&pdev->dev, data->irq, data);
- component_master_del(&pdev->dev, &mtk_iommu_com_ops);
+ component_master_del(&pdev->dev, &mtk_iommu_v1_com_ops);
return 0;
}
-static int __maybe_unused mtk_iommu_suspend(struct device *dev)
+static int __maybe_unused mtk_iommu_v1_suspend(struct device *dev)
{
- struct mtk_iommu_data *data = dev_get_drvdata(dev);
- struct mtk_iommu_suspend_reg *reg = &data->reg;
+ struct mtk_iommu_v1_data *data = dev_get_drvdata(dev);
+ struct mtk_iommu_v1_suspend_reg *reg = &data->reg;
void __iomem *base = data->base;
reg->standard_axi_mode = readl_relaxed(base +
@@ -710,10 +746,10 @@ static int __maybe_unused mtk_iommu_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused mtk_iommu_resume(struct device *dev)
+static int __maybe_unused mtk_iommu_v1_resume(struct device *dev)
{
- struct mtk_iommu_data *data = dev_get_drvdata(dev);
- struct mtk_iommu_suspend_reg *reg = &data->reg;
+ struct mtk_iommu_v1_data *data = dev_get_drvdata(dev);
+ struct mtk_iommu_v1_suspend_reg *reg = &data->reg;
void __iomem *base = data->base;
writel_relaxed(data->m4u_dom->pgt_pa, base + REG_MMU_PT_BASE_ADDR);
@@ -726,20 +762,20 @@ static int __maybe_unused mtk_iommu_resume(struct device *dev)
return 0;
}
-static const struct dev_pm_ops mtk_iommu_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(mtk_iommu_suspend, mtk_iommu_resume)
+static const struct dev_pm_ops mtk_iommu_v1_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(mtk_iommu_v1_suspend, mtk_iommu_v1_resume)
};
-static struct platform_driver mtk_iommu_driver = {
- .probe = mtk_iommu_probe,
- .remove = mtk_iommu_remove,
+static struct platform_driver mtk_iommu_v1_driver = {
+ .probe = mtk_iommu_v1_probe,
+ .remove = mtk_iommu_v1_remove,
.driver = {
.name = "mtk-iommu-v1",
- .of_match_table = mtk_iommu_of_ids,
- .pm = &mtk_iommu_pm_ops,
+ .of_match_table = mtk_iommu_v1_of_ids,
+ .pm = &mtk_iommu_v1_pm_ops,
}
};
-module_platform_driver(mtk_iommu_driver);
+module_platform_driver(mtk_iommu_v1_driver);
MODULE_DESCRIPTION("IOMMU API for MediaTek M4U v1 implementations");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c
index 3833e86c6e7b..c898bcbbce11 100644
--- a/drivers/iommu/s390-iommu.c
+++ b/drivers/iommu/s390-iommu.c
@@ -99,7 +99,7 @@ static int s390_iommu_attach_device(struct iommu_domain *domain,
if (!domain_device)
return -ENOMEM;
- if (zdev->dma_table) {
+ if (zdev->dma_table && !zdev->s390_domain) {
cc = zpci_dma_exit_device(zdev);
if (cc) {
rc = -EIO;
@@ -107,6 +107,9 @@ static int s390_iommu_attach_device(struct iommu_domain *domain,
}
}
+ if (zdev->s390_domain)
+ zpci_unregister_ioat(zdev, 0);
+
zdev->dma_table = s390_domain->dma_table;
cc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
virt_to_phys(zdev->dma_table));
@@ -136,7 +139,13 @@ static int s390_iommu_attach_device(struct iommu_domain *domain,
return 0;
out_restore:
- zpci_dma_init_device(zdev);
+ if (!zdev->s390_domain) {
+ zpci_dma_init_device(zdev);
+ } else {
+ zdev->dma_table = zdev->s390_domain->dma_table;
+ zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
+ virt_to_phys(zdev->dma_table));
+ }
out_free:
kfree(domain_device);
@@ -167,7 +176,7 @@ static void s390_iommu_detach_device(struct iommu_domain *domain,
}
spin_unlock_irqrestore(&s390_domain->list_lock, flags);
- if (found) {
+ if (found && (zdev->s390_domain == s390_domain)) {
zdev->s390_domain = NULL;
zpci_unregister_ioat(zdev, 0);
zpci_dma_init_device(zdev);
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 44fb8843e80e..4ab1038b5482 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -557,7 +557,7 @@ config LOONGSON_LIOINTC
config LOONGSON_HTPIC
bool "Loongson3 HyperTransport PIC Controller"
- depends on MACH_LOONGSON64
+ depends on MACH_LOONGSON64 && MIPS
default y
select IRQ_DOMAIN
select GENERIC_IRQ_CHIP
@@ -565,12 +565,12 @@ config LOONGSON_HTPIC
Support for the Loongson-3 HyperTransport PIC Controller.
config LOONGSON_HTVEC
- bool "Loongson3 HyperTransport Interrupt Vector Controller"
+ bool "Loongson HyperTransport Interrupt Vector Controller"
depends on MACH_LOONGSON64
default MACH_LOONGSON64
select IRQ_DOMAIN_HIERARCHY
help
- Support for the Loongson3 HyperTransport Interrupt Vector Controller.
+ Support for the Loongson HyperTransport Interrupt Vector Controller.
config LOONGSON_PCH_PIC
bool "Loongson PCH PIC Controller"
diff --git a/drivers/irqchip/irq-loongson-liointc.c b/drivers/irqchip/irq-loongson-liointc.c
index 649c58391618..aed88857d90f 100644
--- a/drivers/irqchip/irq-loongson-liointc.c
+++ b/drivers/irqchip/irq-loongson-liointc.c
@@ -16,7 +16,11 @@
#include <linux/smp.h>
#include <linux/irqchip/chained_irq.h>
+#ifdef CONFIG_MIPS
#include <loongson.h>
+#else
+#include <asm/loongson.h>
+#endif
#define LIOINTC_CHIP_IRQ 32
#define LIOINTC_NUM_PARENT 4
@@ -53,7 +57,7 @@ static void liointc_chained_handle_irq(struct irq_desc *desc)
struct liointc_handler_data *handler = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
struct irq_chip_generic *gc = handler->priv->gc;
- int core = get_ebase_cpunum() % LIOINTC_NUM_CORES;
+ int core = cpu_logical_map(smp_processor_id()) % LIOINTC_NUM_CORES;
u32 pending;
chained_irq_enter(chip, desc);
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 6090e647daee..a49979f41eee 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -869,6 +869,9 @@ source "drivers/leds/blink/Kconfig"
comment "Flash and Torch LED drivers"
source "drivers/leds/flash/Kconfig"
+comment "RGB LED drivers"
+source "drivers/leds/rgb/Kconfig"
+
comment "LED Triggers"
source "drivers/leds/trigger/Kconfig"
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index e58ecb36360f..4fd2f92cd198 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -99,6 +99,9 @@ obj-$(CONFIG_LEDS_USER) += uleds.o
# Flash and Torch LED Drivers
obj-$(CONFIG_LEDS_CLASS_FLASH) += flash/
+# RGB LED Drivers
+obj-$(CONFIG_LEDS_CLASS_MULTICOLOR) += rgb/
+
# LED Triggers
obj-$(CONFIG_LEDS_TRIGGERS) += trigger/
diff --git a/drivers/leds/flash/leds-ktd2692.c b/drivers/leds/flash/leds-ktd2692.c
index ed1f20a58bf6..670f3bf2e906 100644
--- a/drivers/leds/flash/leds-ktd2692.c
+++ b/drivers/leds/flash/leds-ktd2692.c
@@ -279,17 +279,12 @@ static int ktd2692_parse_dt(struct ktd2692_context *led, struct device *dev,
led->ctrl_gpio = devm_gpiod_get(dev, "ctrl", GPIOD_ASIS);
ret = PTR_ERR_OR_ZERO(led->ctrl_gpio);
- if (ret) {
- dev_err(dev, "cannot get ctrl-gpios %d\n", ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "cannot get ctrl-gpios\n");
- led->aux_gpio = devm_gpiod_get(dev, "aux", GPIOD_ASIS);
- ret = PTR_ERR_OR_ZERO(led->aux_gpio);
- if (ret) {
- dev_err(dev, "cannot get aux-gpios %d\n", ret);
- return ret;
- }
+ led->aux_gpio = devm_gpiod_get_optional(dev, "aux", GPIOD_ASIS);
+ if (IS_ERR(led->aux_gpio))
+ return dev_err_probe(dev, PTR_ERR(led->aux_gpio), "cannot get aux-gpios\n");
led->regulator = devm_regulator_get(dev, "vin");
if (IS_ERR(led->regulator))
diff --git a/drivers/leds/leds-is31fl32xx.c b/drivers/leds/leds-is31fl32xx.c
index 22c092a4394a..fc63fce38c19 100644
--- a/drivers/leds/leds-is31fl32xx.c
+++ b/drivers/leds/leds-is31fl32xx.c
@@ -460,8 +460,14 @@ static int is31fl32xx_probe(struct i2c_client *client,
static int is31fl32xx_remove(struct i2c_client *client)
{
struct is31fl32xx_priv *priv = i2c_get_clientdata(client);
+ int ret;
- return is31fl32xx_reset_regs(priv);
+ ret = is31fl32xx_reset_regs(priv);
+ if (ret)
+ dev_err(&client->dev, "Failed to reset registers on removal (%pe)\n",
+ ERR_PTR(ret));
+
+ return 0;
}
/*
diff --git a/drivers/leds/leds-locomo.c b/drivers/leds/leds-locomo.c
index 42dc46e3f00f..9aa3fccd71fb 100644
--- a/drivers/leds/leds-locomo.c
+++ b/drivers/leds/leds-locomo.c
@@ -11,7 +11,6 @@
#include <linux/device.h>
#include <linux/leds.h>
-#include <mach/hardware.h>
#include <asm/hardware/locomo.h>
static void locomoled_brightness_set(struct led_classdev *led_cdev,
diff --git a/drivers/leds/leds-lp50xx.c b/drivers/leds/leds-lp50xx.c
index 50b195ff96ca..e129dcc656b8 100644
--- a/drivers/leds/leds-lp50xx.c
+++ b/drivers/leds/leds-lp50xx.c
@@ -569,10 +569,8 @@ static int lp50xx_remove(struct i2c_client *client)
int ret;
ret = lp50xx_enable_disable(led, 0);
- if (ret) {
+ if (ret)
dev_err(led->dev, "Failed to disable chip\n");
- return ret;
- }
if (led->regulator) {
ret = regulator_disable(led->regulator);
diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c
index 017794bb87ae..f72b5d1be3a6 100644
--- a/drivers/leds/leds-pca9532.c
+++ b/drivers/leds/leds-pca9532.c
@@ -318,13 +318,10 @@ static int pca9532_gpio_direction_output(struct gpio_chip *gc, unsigned offset,
}
#endif /* CONFIG_LEDS_PCA9532_GPIO */
-static int pca9532_destroy_devices(struct pca9532_data *data, int n_devs)
+static void pca9532_destroy_devices(struct pca9532_data *data, int n_devs)
{
int i = n_devs;
- if (!data)
- return -EINVAL;
-
while (--i >= 0) {
switch (data->leds[i].type) {
case PCA9532_TYPE_NONE:
@@ -346,8 +343,6 @@ static int pca9532_destroy_devices(struct pca9532_data *data, int n_devs)
if (data->gpio.parent)
gpiochip_remove(&data->gpio);
#endif
-
- return 0;
}
static int pca9532_configure(struct i2c_client *client,
@@ -555,7 +550,9 @@ static int pca9532_remove(struct i2c_client *client)
{
struct pca9532_data *data = i2c_get_clientdata(client);
- return pca9532_destroy_devices(data, data->chip_info->num_leds);
+ pca9532_destroy_devices(data, data->chip_info->num_leds);
+
+ return 0;
}
module_i2c_driver(pca9532_driver);
diff --git a/drivers/leds/leds-regulator.c b/drivers/leds/leds-regulator.c
index 208c98918433..8a8b73b4e358 100644
--- a/drivers/leds/leds-regulator.c
+++ b/drivers/leds/leds-regulator.c
@@ -8,6 +8,7 @@
*/
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/leds.h>
@@ -123,34 +124,37 @@ static int regulator_led_probe(struct platform_device *pdev)
{
struct led_regulator_platform_data *pdata =
dev_get_platdata(&pdev->dev);
+ struct device *dev = &pdev->dev;
+ struct led_init_data init_data = {};
struct regulator_led *led;
struct regulator *vcc;
int ret = 0;
- if (pdata == NULL) {
- dev_err(&pdev->dev, "no platform data\n");
- return -ENODEV;
- }
-
- vcc = devm_regulator_get_exclusive(&pdev->dev, "vled");
+ vcc = devm_regulator_get_exclusive(dev, "vled");
if (IS_ERR(vcc)) {
- dev_err(&pdev->dev, "Cannot get vcc for %s\n", pdata->name);
+ dev_err(dev, "Cannot get vcc\n");
return PTR_ERR(vcc);
}
- led = devm_kzalloc(&pdev->dev, sizeof(*led), GFP_KERNEL);
+ led = devm_kzalloc(dev, sizeof(*led), GFP_KERNEL);
if (led == NULL)
return -ENOMEM;
+ init_data.fwnode = dev->fwnode;
+
led->cdev.max_brightness = led_regulator_get_max_brightness(vcc);
- if (pdata->brightness > led->cdev.max_brightness) {
- dev_err(&pdev->dev, "Invalid default brightness %d\n",
+ /* Legacy platform data label assignment */
+ if (pdata) {
+ if (pdata->brightness > led->cdev.max_brightness) {
+ dev_err(dev, "Invalid default brightness %d\n",
pdata->brightness);
- return -EINVAL;
+ return -EINVAL;
+ }
+ led->cdev.brightness = pdata->brightness;
+ init_data.default_label = pdata->name;
}
led->cdev.brightness_set_blocking = regulator_led_brightness_set;
- led->cdev.name = pdata->name;
led->cdev.flags |= LED_CORE_SUSPENDRESUME;
led->vcc = vcc;
@@ -162,16 +166,10 @@ static int regulator_led_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, led);
- ret = led_classdev_register(&pdev->dev, &led->cdev);
+ ret = led_classdev_register_ext(dev, &led->cdev, &init_data);
if (ret < 0)
return ret;
- /* to expose the default value to userspace */
- led->cdev.brightness = pdata->brightness;
-
- /* Set the default led status */
- regulator_led_brightness_set(&led->cdev, led->cdev.brightness);
-
return 0;
}
@@ -184,10 +182,17 @@ static int regulator_led_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id regulator_led_of_match[] = {
+ { .compatible = "regulator-led", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, regulator_led_of_match);
+
static struct platform_driver regulator_led_driver = {
.driver = {
- .name = "leds-regulator",
- },
+ .name = "leds-regulator",
+ .of_match_table = regulator_led_of_match,
+ },
.probe = regulator_led_probe,
.remove = regulator_led_remove,
};
diff --git a/drivers/leds/rgb/Kconfig b/drivers/leds/rgb/Kconfig
new file mode 100644
index 000000000000..204cf470beae
--- /dev/null
+++ b/drivers/leds/rgb/Kconfig
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: GPL-2.0
+
+if LEDS_CLASS_MULTICOLOR
+
+config LEDS_PWM_MULTICOLOR
+ tristate "PWM driven multi-color LED Support"
+ depends on PWM
+ help
+ This option enables support for PWM driven monochrome LEDs that are
+ grouped into multicolor LEDs.
+
+ To compile this driver as a module, choose M here: the module
+ will be called leds-pwm-multicolor.
+
+config LEDS_QCOM_LPG
+ tristate "LED support for Qualcomm LPG"
+ depends on OF
+ depends on PWM
+ depends on SPMI
+ help
+ This option enables support for the Light Pulse Generator found in a
+ wide variety of Qualcomm PMICs. The LPG consists of a number of PWM
+ channels and typically a shared pattern lookup table and a current
+ sink, intended to drive RGB LEDs. Each channel can either be used as
+ a LED, grouped to represent a RGB LED or exposed as PWM channels.
+
+ If compiled as a module, the module will be named leds-qcom-lpg.
+
+endif # LEDS_CLASS_MULTICOLOR
diff --git a/drivers/leds/rgb/Makefile b/drivers/leds/rgb/Makefile
new file mode 100644
index 000000000000..0675bc0f6e18
--- /dev/null
+++ b/drivers/leds/rgb/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_LEDS_PWM_MULTICOLOR) += leds-pwm-multicolor.o
+obj-$(CONFIG_LEDS_QCOM_LPG) += leds-qcom-lpg.o
diff --git a/drivers/leds/rgb/leds-pwm-multicolor.c b/drivers/leds/rgb/leds-pwm-multicolor.c
new file mode 100644
index 000000000000..45e38708ecb1
--- /dev/null
+++ b/drivers/leds/rgb/leds-pwm-multicolor.c
@@ -0,0 +1,186 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * PWM-based multi-color LED control
+ *
+ * Copyright 2022 Sven Schwermer <sven.schwermer@disruptive-technologies.com>
+ */
+
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/led-class-multicolor.h>
+#include <linux/leds.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/pwm.h>
+
+struct pwm_led {
+ struct pwm_device *pwm;
+ struct pwm_state state;
+};
+
+struct pwm_mc_led {
+ struct led_classdev_mc mc_cdev;
+ struct mutex lock;
+ struct pwm_led leds[];
+};
+
+static int led_pwm_mc_set(struct led_classdev *cdev,
+ enum led_brightness brightness)
+{
+ struct led_classdev_mc *mc_cdev = lcdev_to_mccdev(cdev);
+ struct pwm_mc_led *priv = container_of(mc_cdev, struct pwm_mc_led, mc_cdev);
+ unsigned long long duty;
+ int ret = 0;
+ int i;
+
+ led_mc_calc_color_components(mc_cdev, brightness);
+
+ mutex_lock(&priv->lock);
+
+ for (i = 0; i < mc_cdev->num_colors; i++) {
+ duty = priv->leds[i].state.period;
+ duty *= mc_cdev->subled_info[i].brightness;
+ do_div(duty, cdev->max_brightness);
+
+ priv->leds[i].state.duty_cycle = duty;
+ priv->leds[i].state.enabled = duty > 0;
+ ret = pwm_apply_state(priv->leds[i].pwm,
+ &priv->leds[i].state);
+ if (ret)
+ break;
+ }
+
+ mutex_unlock(&priv->lock);
+
+ return ret;
+}
+
+static int iterate_subleds(struct device *dev, struct pwm_mc_led *priv,
+ struct fwnode_handle *mcnode)
+{
+ struct mc_subled *subled = priv->mc_cdev.subled_info;
+ struct fwnode_handle *fwnode;
+ struct pwm_led *pwmled;
+ u32 color;
+ int ret;
+
+ /* iterate over the nodes inside the multi-led node */
+ fwnode_for_each_child_node(mcnode, fwnode) {
+ pwmled = &priv->leds[priv->mc_cdev.num_colors];
+ pwmled->pwm = devm_fwnode_pwm_get(dev, fwnode, NULL);
+ if (IS_ERR(pwmled->pwm)) {
+ ret = PTR_ERR(pwmled->pwm);
+ dev_err(dev, "unable to request PWM: %d\n", ret);
+ goto release_fwnode;
+ }
+ pwm_init_state(pwmled->pwm, &pwmled->state);
+
+ ret = fwnode_property_read_u32(fwnode, "color", &color);
+ if (ret) {
+ dev_err(dev, "cannot read color: %d\n", ret);
+ goto release_fwnode;
+ }
+
+ subled[priv->mc_cdev.num_colors].color_index = color;
+ priv->mc_cdev.num_colors++;
+ }
+
+ return 0;
+
+release_fwnode:
+ fwnode_handle_put(fwnode);
+ return ret;
+}
+
+static int led_pwm_mc_probe(struct platform_device *pdev)
+{
+ struct fwnode_handle *mcnode, *fwnode;
+ struct led_init_data init_data = {};
+ struct led_classdev *cdev;
+ struct mc_subled *subled;
+ struct pwm_mc_led *priv;
+ int count = 0;
+ int ret = 0;
+
+ mcnode = device_get_named_child_node(&pdev->dev, "multi-led");
+ if (!mcnode)
+ return dev_err_probe(&pdev->dev, -ENODEV,
+ "expected multi-led node\n");
+
+ /* count the nodes inside the multi-led node */
+ fwnode_for_each_child_node(mcnode, fwnode)
+ count++;
+
+ priv = devm_kzalloc(&pdev->dev, struct_size(priv, leds, count),
+ GFP_KERNEL);
+ if (!priv) {
+ ret = -ENOMEM;
+ goto release_mcnode;
+ }
+ mutex_init(&priv->lock);
+
+ subled = devm_kcalloc(&pdev->dev, count, sizeof(*subled), GFP_KERNEL);
+ if (!subled) {
+ ret = -ENOMEM;
+ goto release_mcnode;
+ }
+ priv->mc_cdev.subled_info = subled;
+
+ /* init the multicolor's LED class device */
+ cdev = &priv->mc_cdev.led_cdev;
+ fwnode_property_read_u32(mcnode, "max-brightness",
+ &cdev->max_brightness);
+ cdev->flags = LED_CORE_SUSPENDRESUME;
+ cdev->brightness_set_blocking = led_pwm_mc_set;
+
+ ret = iterate_subleds(&pdev->dev, priv, mcnode);
+ if (ret)
+ goto release_mcnode;
+
+ init_data.fwnode = mcnode;
+ ret = devm_led_classdev_multicolor_register_ext(&pdev->dev,
+ &priv->mc_cdev,
+ &init_data);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to register multicolor PWM led for %s: %d\n",
+ cdev->name, ret);
+ goto release_mcnode;
+ }
+
+ ret = led_pwm_mc_set(cdev, cdev->brightness);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to set led PWM value for %s: %d",
+ cdev->name, ret);
+
+ platform_set_drvdata(pdev, priv);
+ return 0;
+
+release_mcnode:
+ fwnode_handle_put(mcnode);
+ return ret;
+}
+
+static const struct of_device_id of_pwm_leds_mc_match[] = {
+ { .compatible = "pwm-leds-multicolor", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, of_pwm_leds_mc_match);
+
+static struct platform_driver led_pwm_mc_driver = {
+ .probe = led_pwm_mc_probe,
+ .driver = {
+ .name = "leds_pwm_multicolor",
+ .of_match_table = of_pwm_leds_mc_match,
+ },
+};
+module_platform_driver(led_pwm_mc_driver);
+
+MODULE_AUTHOR("Sven Schwermer <sven.schwermer@disruptive-technologies.com>");
+MODULE_DESCRIPTION("multi-color PWM LED driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:leds-pwm-multicolor");
diff --git a/drivers/leds/rgb/leds-qcom-lpg.c b/drivers/leds/rgb/leds-qcom-lpg.c
new file mode 100644
index 000000000000..02f51cc61837
--- /dev/null
+++ b/drivers/leds/rgb/leds-qcom-lpg.c
@@ -0,0 +1,1451 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017-2022 Linaro Ltd
+ * Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
+ */
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/led-class-multicolor.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#define LPG_SUBTYPE_REG 0x05
+#define LPG_SUBTYPE_LPG 0x2
+#define LPG_SUBTYPE_PWM 0xb
+#define LPG_SUBTYPE_LPG_LITE 0x11
+#define LPG_PATTERN_CONFIG_REG 0x40
+#define LPG_SIZE_CLK_REG 0x41
+#define PWM_CLK_SELECT_MASK GENMASK(1, 0)
+#define LPG_PREDIV_CLK_REG 0x42
+#define PWM_FREQ_PRE_DIV_MASK GENMASK(6, 5)
+#define PWM_FREQ_EXP_MASK GENMASK(2, 0)
+#define PWM_TYPE_CONFIG_REG 0x43
+#define PWM_VALUE_REG 0x44
+#define PWM_ENABLE_CONTROL_REG 0x46
+#define PWM_SYNC_REG 0x47
+#define LPG_RAMP_DURATION_REG 0x50
+#define LPG_HI_PAUSE_REG 0x52
+#define LPG_LO_PAUSE_REG 0x54
+#define LPG_HI_IDX_REG 0x56
+#define LPG_LO_IDX_REG 0x57
+#define PWM_SEC_ACCESS_REG 0xd0
+#define PWM_DTEST_REG(x) (0xe2 + (x) - 1)
+
+#define TRI_LED_SRC_SEL 0x45
+#define TRI_LED_EN_CTL 0x46
+#define TRI_LED_ATC_CTL 0x47
+
+#define LPG_LUT_REG(x) (0x40 + (x) * 2)
+#define RAMP_CONTROL_REG 0xc8
+
+#define LPG_RESOLUTION 512
+#define LPG_MAX_M 7
+
+struct lpg_channel;
+struct lpg_data;
+
+/**
+ * struct lpg - LPG device context
+ * @dev: pointer to LPG device
+ * @map: regmap for register access
+ * @lock: used to synchronize LED and pwm callback requests
+ * @pwm: PWM-chip object, if operating in PWM mode
+ * @data: reference to version specific data
+ * @lut_base: base address of the LUT block (optional)
+ * @lut_size: number of entries in the LUT block
+ * @lut_bitmap: allocation bitmap for LUT entries
+ * @triled_base: base address of the TRILED block (optional)
+ * @triled_src: power-source for the TRILED
+ * @triled_has_atc_ctl: true if there is TRI_LED_ATC_CTL register
+ * @triled_has_src_sel: true if there is TRI_LED_SRC_SEL register
+ * @channels: list of PWM channels
+ * @num_channels: number of @channels
+ */
+struct lpg {
+ struct device *dev;
+ struct regmap *map;
+
+ struct mutex lock;
+
+ struct pwm_chip pwm;
+
+ const struct lpg_data *data;
+
+ u32 lut_base;
+ u32 lut_size;
+ unsigned long *lut_bitmap;
+
+ u32 triled_base;
+ u32 triled_src;
+ bool triled_has_atc_ctl;
+ bool triled_has_src_sel;
+
+ struct lpg_channel *channels;
+ unsigned int num_channels;
+};
+
+/**
+ * struct lpg_channel - per channel data
+ * @lpg: reference to parent lpg
+ * @base: base address of the PWM channel
+ * @triled_mask: mask in TRILED to enable this channel
+ * @lut_mask: mask in LUT to start pattern generator for this channel
+ * @subtype: PMIC hardware block subtype
+ * @in_use: channel is exposed to LED framework
+ * @color: color of the LED attached to this channel
+ * @dtest_line: DTEST line for output, or 0 if disabled
+ * @dtest_value: DTEST line configuration
+ * @pwm_value: duty (in microseconds) of the generated pulses, overridden by LUT
+ * @enabled: output enabled?
+ * @period: period (in nanoseconds) of the generated pulses
+ * @clk_sel: reference clock frequency selector
+ * @pre_div_sel: divider selector of the reference clock
+ * @pre_div_exp: exponential divider of the reference clock
+ * @ramp_enabled: duty cycle is driven by iterating over lookup table
+ * @ramp_ping_pong: reverse through pattern, rather than wrapping to start
+ * @ramp_oneshot: perform only a single pass over the pattern
+ * @ramp_reverse: iterate over pattern backwards
+ * @ramp_tick_ms: length (in milliseconds) of one step in the pattern
+ * @ramp_lo_pause_ms: pause (in milliseconds) before iterating over pattern
+ * @ramp_hi_pause_ms: pause (in milliseconds) after iterating over pattern
+ * @pattern_lo_idx: start index of associated pattern
+ * @pattern_hi_idx: last index of associated pattern
+ */
+struct lpg_channel {
+ struct lpg *lpg;
+
+ u32 base;
+ unsigned int triled_mask;
+ unsigned int lut_mask;
+ unsigned int subtype;
+
+ bool in_use;
+
+ int color;
+
+ u32 dtest_line;
+ u32 dtest_value;
+
+ u16 pwm_value;
+ bool enabled;
+
+ u64 period;
+ unsigned int clk_sel;
+ unsigned int pre_div_sel;
+ unsigned int pre_div_exp;
+
+ bool ramp_enabled;
+ bool ramp_ping_pong;
+ bool ramp_oneshot;
+ bool ramp_reverse;
+ unsigned short ramp_tick_ms;
+ unsigned long ramp_lo_pause_ms;
+ unsigned long ramp_hi_pause_ms;
+
+ unsigned int pattern_lo_idx;
+ unsigned int pattern_hi_idx;
+};
+
+/**
+ * struct lpg_led - logical LED object
+ * @lpg: lpg context reference
+ * @cdev: LED class device
+ * @mcdev: Multicolor LED class device
+ * @num_channels: number of @channels
+ * @channels: list of channels associated with the LED
+ */
+struct lpg_led {
+ struct lpg *lpg;
+
+ struct led_classdev cdev;
+ struct led_classdev_mc mcdev;
+
+ unsigned int num_channels;
+ struct lpg_channel *channels[];
+};
+
+/**
+ * struct lpg_channel_data - per channel initialization data
+ * @base: base address for PWM channel registers
+ * @triled_mask: bitmask for controlling this channel in TRILED
+ */
+struct lpg_channel_data {
+ unsigned int base;
+ u8 triled_mask;
+};
+
+/**
+ * struct lpg_data - initialization data
+ * @lut_base: base address of LUT block
+ * @lut_size: number of entries in LUT
+ * @triled_base: base address of TRILED
+ * @triled_has_atc_ctl: true if there is TRI_LED_ATC_CTL register
+ * @triled_has_src_sel: true if there is TRI_LED_SRC_SEL register
+ * @num_channels: number of channels in LPG
+ * @channels: list of channel initialization data
+ */
+struct lpg_data {
+ unsigned int lut_base;
+ unsigned int lut_size;
+ unsigned int triled_base;
+ bool triled_has_atc_ctl;
+ bool triled_has_src_sel;
+ int num_channels;
+ const struct lpg_channel_data *channels;
+};
+
+static int triled_set(struct lpg *lpg, unsigned int mask, unsigned int enable)
+{
+ /* Skip if we don't have a triled block */
+ if (!lpg->triled_base)
+ return 0;
+
+ return regmap_update_bits(lpg->map, lpg->triled_base + TRI_LED_EN_CTL,
+ mask, enable);
+}
+
+static int lpg_lut_store(struct lpg *lpg, struct led_pattern *pattern,
+ size_t len, unsigned int *lo_idx, unsigned int *hi_idx)
+{
+ unsigned int idx;
+ u16 val;
+ int i;
+
+ idx = bitmap_find_next_zero_area(lpg->lut_bitmap, lpg->lut_size,
+ 0, len, 0);
+ if (idx >= lpg->lut_size)
+ return -ENOMEM;
+
+ for (i = 0; i < len; i++) {
+ val = pattern[i].brightness;
+
+ regmap_bulk_write(lpg->map, lpg->lut_base + LPG_LUT_REG(idx + i),
+ &val, sizeof(val));
+ }
+
+ bitmap_set(lpg->lut_bitmap, idx, len);
+
+ *lo_idx = idx;
+ *hi_idx = idx + len - 1;
+
+ return 0;
+}
+
+static void lpg_lut_free(struct lpg *lpg, unsigned int lo_idx, unsigned int hi_idx)
+{
+ int len;
+
+ len = hi_idx - lo_idx + 1;
+ if (len == 1)
+ return;
+
+ bitmap_clear(lpg->lut_bitmap, lo_idx, len);
+}
+
+static int lpg_lut_sync(struct lpg *lpg, unsigned int mask)
+{
+ return regmap_write(lpg->map, lpg->lut_base + RAMP_CONTROL_REG, mask);
+}
+
+static const unsigned int lpg_clk_rates[] = {0, 1024, 32768, 19200000};
+static const unsigned int lpg_pre_divs[] = {1, 3, 5, 6};
+
+static int lpg_calc_freq(struct lpg_channel *chan, uint64_t period)
+{
+ unsigned int clk_sel, best_clk = 0;
+ unsigned int div, best_div = 0;
+ unsigned int m, best_m = 0;
+ unsigned int error;
+ unsigned int best_err = UINT_MAX;
+ u64 best_period = 0;
+ u64 max_period;
+
+ /*
+ * The PWM period is determined by:
+ *
+ * resolution * pre_div * 2^M
+ * period = --------------------------
+ * refclk
+ *
+ * With resolution fixed at 2^9 bits, pre_div = {1, 3, 5, 6} and
+ * M = [0..7].
+ *
+ * This allows for periods between 27uS and 384s, as the PWM framework
+ * wants a period of equal or lower length than requested, reject
+ * anything below 27uS.
+ */
+ if (period <= (u64)NSEC_PER_SEC * LPG_RESOLUTION / 19200000)
+ return -EINVAL;
+
+ /* Limit period to largest possible value, to avoid overflows */
+ max_period = (u64)NSEC_PER_SEC * LPG_RESOLUTION * 6 * (1 << LPG_MAX_M) / 1024;
+ if (period > max_period)
+ period = max_period;
+
+ /*
+ * Search for the pre_div, refclk and M by solving the rewritten formula
+ * for each refclk and pre_div value:
+ *
+ * period * refclk
+ * M = log2 -------------------------------------
+ * NSEC_PER_SEC * pre_div * resolution
+ */
+ for (clk_sel = 1; clk_sel < ARRAY_SIZE(lpg_clk_rates); clk_sel++) {
+ u64 numerator = period * lpg_clk_rates[clk_sel];
+
+ for (div = 0; div < ARRAY_SIZE(lpg_pre_divs); div++) {
+ u64 denominator = (u64)NSEC_PER_SEC * lpg_pre_divs[div] * LPG_RESOLUTION;
+ u64 actual;
+ u64 ratio;
+
+ if (numerator < denominator)
+ continue;
+
+ ratio = div64_u64(numerator, denominator);
+ m = ilog2(ratio);
+ if (m > LPG_MAX_M)
+ m = LPG_MAX_M;
+
+ actual = DIV_ROUND_UP_ULL(denominator * (1 << m), lpg_clk_rates[clk_sel]);
+
+ error = period - actual;
+ if (error < best_err) {
+ best_err = error;
+
+ best_div = div;
+ best_m = m;
+ best_clk = clk_sel;
+ best_period = actual;
+ }
+ }
+ }
+
+ chan->clk_sel = best_clk;
+ chan->pre_div_sel = best_div;
+ chan->pre_div_exp = best_m;
+ chan->period = best_period;
+
+ return 0;
+}
+
+static void lpg_calc_duty(struct lpg_channel *chan, uint64_t duty)
+{
+ unsigned int max = LPG_RESOLUTION - 1;
+ unsigned int val;
+
+ val = div64_u64(duty * lpg_clk_rates[chan->clk_sel],
+ (u64)NSEC_PER_SEC * lpg_pre_divs[chan->pre_div_sel] * (1 << chan->pre_div_exp));
+
+ chan->pwm_value = min(val, max);
+}
+
+static void lpg_apply_freq(struct lpg_channel *chan)
+{
+ unsigned long val;
+ struct lpg *lpg = chan->lpg;
+
+ if (!chan->enabled)
+ return;
+
+ val = chan->clk_sel;
+
+ /* Specify 9bit resolution, based on the subtype of the channel */
+ switch (chan->subtype) {
+ case LPG_SUBTYPE_LPG:
+ val |= GENMASK(5, 4);
+ break;
+ case LPG_SUBTYPE_PWM:
+ val |= BIT(2);
+ break;
+ case LPG_SUBTYPE_LPG_LITE:
+ default:
+ val |= BIT(4);
+ break;
+ }
+
+ regmap_write(lpg->map, chan->base + LPG_SIZE_CLK_REG, val);
+
+ val = FIELD_PREP(PWM_FREQ_PRE_DIV_MASK, chan->pre_div_sel) |
+ FIELD_PREP(PWM_FREQ_EXP_MASK, chan->pre_div_exp);
+ regmap_write(lpg->map, chan->base + LPG_PREDIV_CLK_REG, val);
+}
+
+#define LPG_ENABLE_GLITCH_REMOVAL BIT(5)
+
+static void lpg_enable_glitch(struct lpg_channel *chan)
+{
+ struct lpg *lpg = chan->lpg;
+
+ regmap_update_bits(lpg->map, chan->base + PWM_TYPE_CONFIG_REG,
+ LPG_ENABLE_GLITCH_REMOVAL, 0);
+}
+
+static void lpg_disable_glitch(struct lpg_channel *chan)
+{
+ struct lpg *lpg = chan->lpg;
+
+ regmap_update_bits(lpg->map, chan->base + PWM_TYPE_CONFIG_REG,
+ LPG_ENABLE_GLITCH_REMOVAL,
+ LPG_ENABLE_GLITCH_REMOVAL);
+}
+
+static void lpg_apply_pwm_value(struct lpg_channel *chan)
+{
+ struct lpg *lpg = chan->lpg;
+ u16 val = chan->pwm_value;
+
+ if (!chan->enabled)
+ return;
+
+ regmap_bulk_write(lpg->map, chan->base + PWM_VALUE_REG, &val, sizeof(val));
+}
+
+#define LPG_PATTERN_CONFIG_LO_TO_HI BIT(4)
+#define LPG_PATTERN_CONFIG_REPEAT BIT(3)
+#define LPG_PATTERN_CONFIG_TOGGLE BIT(2)
+#define LPG_PATTERN_CONFIG_PAUSE_HI BIT(1)
+#define LPG_PATTERN_CONFIG_PAUSE_LO BIT(0)
+
+static void lpg_apply_lut_control(struct lpg_channel *chan)
+{
+ struct lpg *lpg = chan->lpg;
+ unsigned int hi_pause;
+ unsigned int lo_pause;
+ unsigned int conf = 0;
+ unsigned int lo_idx = chan->pattern_lo_idx;
+ unsigned int hi_idx = chan->pattern_hi_idx;
+ u16 step = chan->ramp_tick_ms;
+
+ if (!chan->ramp_enabled || chan->pattern_lo_idx == chan->pattern_hi_idx)
+ return;
+
+ hi_pause = DIV_ROUND_UP(chan->ramp_hi_pause_ms, step);
+ lo_pause = DIV_ROUND_UP(chan->ramp_lo_pause_ms, step);
+
+ if (!chan->ramp_reverse)
+ conf |= LPG_PATTERN_CONFIG_LO_TO_HI;
+ if (!chan->ramp_oneshot)
+ conf |= LPG_PATTERN_CONFIG_REPEAT;
+ if (chan->ramp_ping_pong)
+ conf |= LPG_PATTERN_CONFIG_TOGGLE;
+ if (chan->ramp_hi_pause_ms)
+ conf |= LPG_PATTERN_CONFIG_PAUSE_HI;
+ if (chan->ramp_lo_pause_ms)
+ conf |= LPG_PATTERN_CONFIG_PAUSE_LO;
+
+ regmap_write(lpg->map, chan->base + LPG_PATTERN_CONFIG_REG, conf);
+ regmap_write(lpg->map, chan->base + LPG_HI_IDX_REG, hi_idx);
+ regmap_write(lpg->map, chan->base + LPG_LO_IDX_REG, lo_idx);
+
+ regmap_bulk_write(lpg->map, chan->base + LPG_RAMP_DURATION_REG, &step, sizeof(step));
+ regmap_write(lpg->map, chan->base + LPG_HI_PAUSE_REG, hi_pause);
+ regmap_write(lpg->map, chan->base + LPG_LO_PAUSE_REG, lo_pause);
+}
+
+#define LPG_ENABLE_CONTROL_OUTPUT BIT(7)
+#define LPG_ENABLE_CONTROL_BUFFER_TRISTATE BIT(5)
+#define LPG_ENABLE_CONTROL_SRC_PWM BIT(2)
+#define LPG_ENABLE_CONTROL_RAMP_GEN BIT(1)
+
+static void lpg_apply_control(struct lpg_channel *chan)
+{
+ unsigned int ctrl;
+ struct lpg *lpg = chan->lpg;
+
+ ctrl = LPG_ENABLE_CONTROL_BUFFER_TRISTATE;
+
+ if (chan->enabled)
+ ctrl |= LPG_ENABLE_CONTROL_OUTPUT;
+
+ if (chan->pattern_lo_idx != chan->pattern_hi_idx)
+ ctrl |= LPG_ENABLE_CONTROL_RAMP_GEN;
+ else
+ ctrl |= LPG_ENABLE_CONTROL_SRC_PWM;
+
+ regmap_write(lpg->map, chan->base + PWM_ENABLE_CONTROL_REG, ctrl);
+
+ /*
+ * Due to LPG hardware bug, in the PWM mode, having enabled PWM,
+ * We have to write PWM values one more time.
+ */
+ if (chan->enabled)
+ lpg_apply_pwm_value(chan);
+}
+
+#define LPG_SYNC_PWM BIT(0)
+
+static void lpg_apply_sync(struct lpg_channel *chan)
+{
+ struct lpg *lpg = chan->lpg;
+
+ regmap_write(lpg->map, chan->base + PWM_SYNC_REG, LPG_SYNC_PWM);
+}
+
+static int lpg_parse_dtest(struct lpg *lpg)
+{
+ struct lpg_channel *chan;
+ struct device_node *np = lpg->dev->of_node;
+ int count;
+ int ret;
+ int i;
+
+ count = of_property_count_u32_elems(np, "qcom,dtest");
+ if (count == -EINVAL) {
+ return 0;
+ } else if (count < 0) {
+ ret = count;
+ goto err_malformed;
+ } else if (count != lpg->data->num_channels * 2) {
+ dev_err(lpg->dev, "qcom,dtest needs to be %d items\n",
+ lpg->data->num_channels * 2);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < lpg->data->num_channels; i++) {
+ chan = &lpg->channels[i];
+
+ ret = of_property_read_u32_index(np, "qcom,dtest", i * 2,
+ &chan->dtest_line);
+ if (ret)
+ goto err_malformed;
+
+ ret = of_property_read_u32_index(np, "qcom,dtest", i * 2 + 1,
+ &chan->dtest_value);
+ if (ret)
+ goto err_malformed;
+ }
+
+ return 0;
+
+err_malformed:
+ dev_err(lpg->dev, "malformed qcom,dtest\n");
+ return ret;
+}
+
+static void lpg_apply_dtest(struct lpg_channel *chan)
+{
+ struct lpg *lpg = chan->lpg;
+
+ if (!chan->dtest_line)
+ return;
+
+ regmap_write(lpg->map, chan->base + PWM_SEC_ACCESS_REG, 0xa5);
+ regmap_write(lpg->map, chan->base + PWM_DTEST_REG(chan->dtest_line),
+ chan->dtest_value);
+}
+
+static void lpg_apply(struct lpg_channel *chan)
+{
+ lpg_disable_glitch(chan);
+ lpg_apply_freq(chan);
+ lpg_apply_pwm_value(chan);
+ lpg_apply_control(chan);
+ lpg_apply_sync(chan);
+ lpg_apply_lut_control(chan);
+ lpg_enable_glitch(chan);
+}
+
+static void lpg_brightness_set(struct lpg_led *led, struct led_classdev *cdev,
+ struct mc_subled *subleds)
+{
+ enum led_brightness brightness;
+ struct lpg_channel *chan;
+ unsigned int triled_enabled = 0;
+ unsigned int triled_mask = 0;
+ unsigned int lut_mask = 0;
+ unsigned int duty;
+ struct lpg *lpg = led->lpg;
+ int i;
+
+ for (i = 0; i < led->num_channels; i++) {
+ chan = led->channels[i];
+ brightness = subleds[i].brightness;
+
+ if (brightness == LED_OFF) {
+ chan->enabled = false;
+ chan->ramp_enabled = false;
+ } else if (chan->pattern_lo_idx != chan->pattern_hi_idx) {
+ lpg_calc_freq(chan, NSEC_PER_MSEC);
+
+ chan->enabled = true;
+ chan->ramp_enabled = true;
+
+ lut_mask |= chan->lut_mask;
+ triled_enabled |= chan->triled_mask;
+ } else {
+ lpg_calc_freq(chan, NSEC_PER_MSEC);
+
+ duty = div_u64(brightness * chan->period, cdev->max_brightness);
+ lpg_calc_duty(chan, duty);
+ chan->enabled = true;
+ chan->ramp_enabled = false;
+
+ triled_enabled |= chan->triled_mask;
+ }
+
+ triled_mask |= chan->triled_mask;
+
+ lpg_apply(chan);
+ }
+
+ /* Toggle triled lines */
+ if (triled_mask)
+ triled_set(lpg, triled_mask, triled_enabled);
+
+ /* Trigger start of ramp generator(s) */
+ if (lut_mask)
+ lpg_lut_sync(lpg, lut_mask);
+}
+
+static void lpg_brightness_single_set(struct led_classdev *cdev,
+ enum led_brightness value)
+{
+ struct lpg_led *led = container_of(cdev, struct lpg_led, cdev);
+ struct mc_subled info;
+
+ mutex_lock(&led->lpg->lock);
+
+ info.brightness = value;
+ lpg_brightness_set(led, cdev, &info);
+
+ mutex_unlock(&led->lpg->lock);
+}
+
+static void lpg_brightness_mc_set(struct led_classdev *cdev,
+ enum led_brightness value)
+{
+ struct led_classdev_mc *mc = lcdev_to_mccdev(cdev);
+ struct lpg_led *led = container_of(mc, struct lpg_led, mcdev);
+
+ mutex_lock(&led->lpg->lock);
+
+ led_mc_calc_color_components(mc, value);
+ lpg_brightness_set(led, cdev, mc->subled_info);
+
+ mutex_unlock(&led->lpg->lock);
+}
+
+static int lpg_blink_set(struct lpg_led *led,
+ unsigned long *delay_on, unsigned long *delay_off)
+{
+ struct lpg_channel *chan;
+ unsigned int period;
+ unsigned int triled_mask = 0;
+ struct lpg *lpg = led->lpg;
+ u64 duty;
+ int i;
+
+ if (!*delay_on && !*delay_off) {
+ *delay_on = 500;
+ *delay_off = 500;
+ }
+
+ duty = *delay_on * NSEC_PER_MSEC;
+ period = (*delay_on + *delay_off) * NSEC_PER_MSEC;
+
+ for (i = 0; i < led->num_channels; i++) {
+ chan = led->channels[i];
+
+ lpg_calc_freq(chan, period);
+ lpg_calc_duty(chan, duty);
+
+ chan->enabled = true;
+ chan->ramp_enabled = false;
+
+ triled_mask |= chan->triled_mask;
+
+ lpg_apply(chan);
+ }
+
+ /* Enable triled lines */
+ triled_set(lpg, triled_mask, triled_mask);
+
+ chan = led->channels[0];
+ duty = div_u64(chan->pwm_value * chan->period, LPG_RESOLUTION);
+ *delay_on = div_u64(duty, NSEC_PER_MSEC);
+ *delay_off = div_u64(chan->period - duty, NSEC_PER_MSEC);
+
+ return 0;
+}
+
+static int lpg_blink_single_set(struct led_classdev *cdev,
+ unsigned long *delay_on, unsigned long *delay_off)
+{
+ struct lpg_led *led = container_of(cdev, struct lpg_led, cdev);
+ int ret;
+
+ mutex_lock(&led->lpg->lock);
+
+ ret = lpg_blink_set(led, delay_on, delay_off);
+
+ mutex_unlock(&led->lpg->lock);
+
+ return ret;
+}
+
+static int lpg_blink_mc_set(struct led_classdev *cdev,
+ unsigned long *delay_on, unsigned long *delay_off)
+{
+ struct led_classdev_mc *mc = lcdev_to_mccdev(cdev);
+ struct lpg_led *led = container_of(mc, struct lpg_led, mcdev);
+ int ret;
+
+ mutex_lock(&led->lpg->lock);
+
+ ret = lpg_blink_set(led, delay_on, delay_off);
+
+ mutex_unlock(&led->lpg->lock);
+
+ return ret;
+}
+
+static int lpg_pattern_set(struct lpg_led *led, struct led_pattern *led_pattern,
+ u32 len, int repeat)
+{
+ struct lpg_channel *chan;
+ struct lpg *lpg = led->lpg;
+ struct led_pattern *pattern;
+ unsigned int brightness_a;
+ unsigned int brightness_b;
+ unsigned int actual_len;
+ unsigned int hi_pause;
+ unsigned int lo_pause;
+ unsigned int delta_t;
+ unsigned int lo_idx;
+ unsigned int hi_idx;
+ unsigned int i;
+ bool ping_pong = true;
+ int ret = -EINVAL;
+
+ /* Hardware only support oneshot or indefinite loops */
+ if (repeat != -1 && repeat != 1)
+ return -EINVAL;
+
+ /*
+ * The standardized leds-trigger-pattern format defines that the
+ * brightness of the LED follows a linear transition from one entry
+ * in the pattern to the next, over the given delta_t time. It
+ * describes that the way to perform instant transitions a zero-length
+ * entry should be added following a pattern entry.
+ *
+ * The LPG hardware is only able to perform the latter (no linear
+ * transitions), so require each entry in the pattern to be followed by
+ * a zero-length transition.
+ */
+ if (len % 2)
+ return -EINVAL;
+
+ pattern = kcalloc(len / 2, sizeof(*pattern), GFP_KERNEL);
+ if (!pattern)
+ return -ENOMEM;
+
+ for (i = 0; i < len; i += 2) {
+ if (led_pattern[i].brightness != led_pattern[i + 1].brightness)
+ goto out_free_pattern;
+ if (led_pattern[i + 1].delta_t != 0)
+ goto out_free_pattern;
+
+ pattern[i / 2].brightness = led_pattern[i].brightness;
+ pattern[i / 2].delta_t = led_pattern[i].delta_t;
+ }
+
+ len /= 2;
+
+ /*
+ * Specifying a pattern of length 1 causes the hardware to iterate
+ * through the entire LUT, so prohibit this.
+ */
+ if (len < 2)
+ goto out_free_pattern;
+
+ /*
+ * The LPG plays patterns with at a fixed pace, a "low pause" can be
+ * used to stretch the first delay of the pattern and a "high pause"
+ * the last one.
+ *
+ * In order to save space the pattern can be played in "ping pong"
+ * mode, in which the pattern is first played forward, then "high
+ * pause" is applied, then the pattern is played backwards and finally
+ * the "low pause" is applied.
+ *
+ * The middle elements of the pattern are used to determine delta_t and
+ * the "low pause" and "high pause" multipliers are derrived from this.
+ *
+ * The first element in the pattern is used to determine "low pause".
+ *
+ * If the specified pattern is a palindrome the ping pong mode is
+ * enabled. In this scenario the delta_t of the middle entry (i.e. the
+ * last in the programmed pattern) determines the "high pause".
+ */
+
+ /* Detect palindromes and use "ping pong" to reduce LUT usage */
+ for (i = 0; i < len / 2; i++) {
+ brightness_a = pattern[i].brightness;
+ brightness_b = pattern[len - i - 1].brightness;
+
+ if (brightness_a != brightness_b) {
+ ping_pong = false;
+ break;
+ }
+ }
+
+ /* The pattern length to be written to the LUT */
+ if (ping_pong)
+ actual_len = (len + 1) / 2;
+ else
+ actual_len = len;
+
+ /*
+ * Validate that all delta_t in the pattern are the same, with the
+ * exception of the middle element in case of ping_pong.
+ */
+ delta_t = pattern[1].delta_t;
+ for (i = 2; i < len; i++) {
+ if (pattern[i].delta_t != delta_t) {
+ /*
+ * Allow last entry in the full or shortened pattern to
+ * specify hi pause. Reject other variations.
+ */
+ if (i != actual_len - 1)
+ goto out_free_pattern;
+ }
+ }
+
+ /* LPG_RAMP_DURATION_REG is a 9bit */
+ if (delta_t >= BIT(9))
+ goto out_free_pattern;
+
+ /* Find "low pause" and "high pause" in the pattern */
+ lo_pause = pattern[0].delta_t;
+ hi_pause = pattern[actual_len - 1].delta_t;
+
+ mutex_lock(&lpg->lock);
+ ret = lpg_lut_store(lpg, pattern, actual_len, &lo_idx, &hi_idx);
+ if (ret < 0)
+ goto out_unlock;
+
+ for (i = 0; i < led->num_channels; i++) {
+ chan = led->channels[i];
+
+ chan->ramp_tick_ms = delta_t;
+ chan->ramp_ping_pong = ping_pong;
+ chan->ramp_oneshot = repeat != -1;
+
+ chan->ramp_lo_pause_ms = lo_pause;
+ chan->ramp_hi_pause_ms = hi_pause;
+
+ chan->pattern_lo_idx = lo_idx;
+ chan->pattern_hi_idx = hi_idx;
+ }
+
+out_unlock:
+ mutex_unlock(&lpg->lock);
+out_free_pattern:
+ kfree(pattern);
+
+ return ret;
+}
+
+static int lpg_pattern_single_set(struct led_classdev *cdev,
+ struct led_pattern *pattern, u32 len,
+ int repeat)
+{
+ struct lpg_led *led = container_of(cdev, struct lpg_led, cdev);
+ int ret;
+
+ ret = lpg_pattern_set(led, pattern, len, repeat);
+ if (ret < 0)
+ return ret;
+
+ lpg_brightness_single_set(cdev, LED_FULL);
+
+ return 0;
+}
+
+static int lpg_pattern_mc_set(struct led_classdev *cdev,
+ struct led_pattern *pattern, u32 len,
+ int repeat)
+{
+ struct led_classdev_mc *mc = lcdev_to_mccdev(cdev);
+ struct lpg_led *led = container_of(mc, struct lpg_led, mcdev);
+ int ret;
+
+ ret = lpg_pattern_set(led, pattern, len, repeat);
+ if (ret < 0)
+ return ret;
+
+ led_mc_calc_color_components(mc, LED_FULL);
+ lpg_brightness_set(led, cdev, mc->subled_info);
+
+ return 0;
+}
+
+static int lpg_pattern_clear(struct lpg_led *led)
+{
+ struct lpg_channel *chan;
+ struct lpg *lpg = led->lpg;
+ int i;
+
+ mutex_lock(&lpg->lock);
+
+ chan = led->channels[0];
+ lpg_lut_free(lpg, chan->pattern_lo_idx, chan->pattern_hi_idx);
+
+ for (i = 0; i < led->num_channels; i++) {
+ chan = led->channels[i];
+ chan->pattern_lo_idx = 0;
+ chan->pattern_hi_idx = 0;
+ }
+
+ mutex_unlock(&lpg->lock);
+
+ return 0;
+}
+
+static int lpg_pattern_single_clear(struct led_classdev *cdev)
+{
+ struct lpg_led *led = container_of(cdev, struct lpg_led, cdev);
+
+ return lpg_pattern_clear(led);
+}
+
+static int lpg_pattern_mc_clear(struct led_classdev *cdev)
+{
+ struct led_classdev_mc *mc = lcdev_to_mccdev(cdev);
+ struct lpg_led *led = container_of(mc, struct lpg_led, mcdev);
+
+ return lpg_pattern_clear(led);
+}
+
+static int lpg_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct lpg *lpg = container_of(chip, struct lpg, pwm);
+ struct lpg_channel *chan = &lpg->channels[pwm->hwpwm];
+
+ return chan->in_use ? -EBUSY : 0;
+}
+
+/*
+ * Limitations:
+ * - Updating both duty and period is not done atomically, so the output signal
+ * will momentarily be a mix of the settings.
+ * - Changed parameters takes effect immediately.
+ * - A disabled channel outputs a logical 0.
+ */
+static int lpg_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_state *state)
+{
+ struct lpg *lpg = container_of(chip, struct lpg, pwm);
+ struct lpg_channel *chan = &lpg->channels[pwm->hwpwm];
+ int ret = 0;
+
+ if (state->polarity != PWM_POLARITY_NORMAL)
+ return -EINVAL;
+
+ mutex_lock(&lpg->lock);
+
+ if (state->enabled) {
+ ret = lpg_calc_freq(chan, state->period);
+ if (ret < 0)
+ goto out_unlock;
+
+ lpg_calc_duty(chan, state->duty_cycle);
+ }
+ chan->enabled = state->enabled;
+
+ lpg_apply(chan);
+
+ triled_set(lpg, chan->triled_mask, chan->enabled ? chan->triled_mask : 0);
+
+out_unlock:
+ mutex_unlock(&lpg->lock);
+
+ return ret;
+}
+
+static void lpg_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+ struct pwm_state *state)
+{
+ struct lpg *lpg = container_of(chip, struct lpg, pwm);
+ struct lpg_channel *chan = &lpg->channels[pwm->hwpwm];
+ unsigned int pre_div;
+ unsigned int refclk;
+ unsigned int val;
+ unsigned int m;
+ u16 pwm_value;
+ int ret;
+
+ ret = regmap_read(lpg->map, chan->base + LPG_SIZE_CLK_REG, &val);
+ if (ret)
+ return;
+
+ refclk = lpg_clk_rates[val & PWM_CLK_SELECT_MASK];
+ if (refclk) {
+ ret = regmap_read(lpg->map, chan->base + LPG_PREDIV_CLK_REG, &val);
+ if (ret)
+ return;
+
+ pre_div = lpg_pre_divs[FIELD_GET(PWM_FREQ_PRE_DIV_MASK, val)];
+ m = FIELD_GET(PWM_FREQ_EXP_MASK, val);
+
+ ret = regmap_bulk_read(lpg->map, chan->base + PWM_VALUE_REG, &pwm_value, sizeof(pwm_value));
+ if (ret)
+ return;
+
+ state->period = DIV_ROUND_UP_ULL((u64)NSEC_PER_SEC * LPG_RESOLUTION * pre_div * (1 << m), refclk);
+ state->duty_cycle = DIV_ROUND_UP_ULL((u64)NSEC_PER_SEC * pwm_value * pre_div * (1 << m), refclk);
+ } else {
+ state->period = 0;
+ state->duty_cycle = 0;
+ }
+
+ ret = regmap_read(lpg->map, chan->base + PWM_ENABLE_CONTROL_REG, &val);
+ if (ret)
+ return;
+
+ state->enabled = FIELD_GET(LPG_ENABLE_CONTROL_OUTPUT, val);
+ state->polarity = PWM_POLARITY_NORMAL;
+
+ if (state->duty_cycle > state->period)
+ state->duty_cycle = state->period;
+}
+
+static const struct pwm_ops lpg_pwm_ops = {
+ .request = lpg_pwm_request,
+ .apply = lpg_pwm_apply,
+ .get_state = lpg_pwm_get_state,
+ .owner = THIS_MODULE,
+};
+
+static int lpg_add_pwm(struct lpg *lpg)
+{
+ int ret;
+
+ lpg->pwm.base = -1;
+ lpg->pwm.dev = lpg->dev;
+ lpg->pwm.npwm = lpg->num_channels;
+ lpg->pwm.ops = &lpg_pwm_ops;
+
+ ret = pwmchip_add(&lpg->pwm);
+ if (ret)
+ dev_err(lpg->dev, "failed to add PWM chip: ret %d\n", ret);
+
+ return ret;
+}
+
+static int lpg_parse_channel(struct lpg *lpg, struct device_node *np,
+ struct lpg_channel **channel)
+{
+ struct lpg_channel *chan;
+ u32 color = LED_COLOR_ID_GREEN;
+ u32 reg;
+ int ret;
+
+ ret = of_property_read_u32(np, "reg", &reg);
+ if (ret || !reg || reg > lpg->num_channels) {
+ dev_err(lpg->dev, "invalid \"reg\" of %pOFn\n", np);
+ return -EINVAL;
+ }
+
+ chan = &lpg->channels[reg - 1];
+ chan->in_use = true;
+
+ ret = of_property_read_u32(np, "color", &color);
+ if (ret < 0 && ret != -EINVAL) {
+ dev_err(lpg->dev, "failed to parse \"color\" of %pOF\n", np);
+ return ret;
+ }
+
+ chan->color = color;
+
+ *channel = chan;
+
+ return 0;
+}
+
+static int lpg_add_led(struct lpg *lpg, struct device_node *np)
+{
+ struct led_init_data init_data = {};
+ struct led_classdev *cdev;
+ struct device_node *child;
+ struct mc_subled *info;
+ struct lpg_led *led;
+ const char *state;
+ int num_channels;
+ u32 color = 0;
+ int ret;
+ int i;
+
+ ret = of_property_read_u32(np, "color", &color);
+ if (ret < 0 && ret != -EINVAL) {
+ dev_err(lpg->dev, "failed to parse \"color\" of %pOF\n", np);
+ return ret;
+ }
+
+ if (color == LED_COLOR_ID_RGB)
+ num_channels = of_get_available_child_count(np);
+ else
+ num_channels = 1;
+
+ led = devm_kzalloc(lpg->dev, struct_size(led, channels, num_channels), GFP_KERNEL);
+ if (!led)
+ return -ENOMEM;
+
+ led->lpg = lpg;
+ led->num_channels = num_channels;
+
+ if (color == LED_COLOR_ID_RGB) {
+ info = devm_kcalloc(lpg->dev, num_channels, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+ i = 0;
+ for_each_available_child_of_node(np, child) {
+ ret = lpg_parse_channel(lpg, child, &led->channels[i]);
+ if (ret < 0)
+ return ret;
+
+ info[i].color_index = led->channels[i]->color;
+ info[i].intensity = 0;
+ i++;
+ }
+
+ led->mcdev.subled_info = info;
+ led->mcdev.num_colors = num_channels;
+
+ cdev = &led->mcdev.led_cdev;
+ cdev->brightness_set = lpg_brightness_mc_set;
+ cdev->blink_set = lpg_blink_mc_set;
+
+ /* Register pattern accessors only if we have a LUT block */
+ if (lpg->lut_base) {
+ cdev->pattern_set = lpg_pattern_mc_set;
+ cdev->pattern_clear = lpg_pattern_mc_clear;
+ }
+ } else {
+ ret = lpg_parse_channel(lpg, np, &led->channels[0]);
+ if (ret < 0)
+ return ret;
+
+ cdev = &led->cdev;
+ cdev->brightness_set = lpg_brightness_single_set;
+ cdev->blink_set = lpg_blink_single_set;
+
+ /* Register pattern accessors only if we have a LUT block */
+ if (lpg->lut_base) {
+ cdev->pattern_set = lpg_pattern_single_set;
+ cdev->pattern_clear = lpg_pattern_single_clear;
+ }
+ }
+
+ cdev->default_trigger = of_get_property(np, "linux,default-trigger", NULL);
+ cdev->max_brightness = LPG_RESOLUTION - 1;
+
+ if (!of_property_read_string(np, "default-state", &state) &&
+ !strcmp(state, "on"))
+ cdev->brightness = cdev->max_brightness;
+ else
+ cdev->brightness = LED_OFF;
+
+ cdev->brightness_set(cdev, cdev->brightness);
+
+ init_data.fwnode = of_fwnode_handle(np);
+
+ if (color == LED_COLOR_ID_RGB)
+ ret = devm_led_classdev_multicolor_register_ext(lpg->dev, &led->mcdev, &init_data);
+ else
+ ret = devm_led_classdev_register_ext(lpg->dev, &led->cdev, &init_data);
+ if (ret)
+ dev_err(lpg->dev, "unable to register %s\n", cdev->name);
+
+ return ret;
+}
+
+static int lpg_init_channels(struct lpg *lpg)
+{
+ const struct lpg_data *data = lpg->data;
+ struct lpg_channel *chan;
+ int i;
+
+ lpg->num_channels = data->num_channels;
+ lpg->channels = devm_kcalloc(lpg->dev, data->num_channels,
+ sizeof(struct lpg_channel), GFP_KERNEL);
+ if (!lpg->channels)
+ return -ENOMEM;
+
+ for (i = 0; i < data->num_channels; i++) {
+ chan = &lpg->channels[i];
+
+ chan->lpg = lpg;
+ chan->base = data->channels[i].base;
+ chan->triled_mask = data->channels[i].triled_mask;
+ chan->lut_mask = BIT(i);
+
+ regmap_read(lpg->map, chan->base + LPG_SUBTYPE_REG, &chan->subtype);
+ }
+
+ return 0;
+}
+
+static int lpg_init_triled(struct lpg *lpg)
+{
+ struct device_node *np = lpg->dev->of_node;
+ int ret;
+
+ /* Skip initialization if we don't have a triled block */
+ if (!lpg->data->triled_base)
+ return 0;
+
+ lpg->triled_base = lpg->data->triled_base;
+ lpg->triled_has_atc_ctl = lpg->data->triled_has_atc_ctl;
+ lpg->triled_has_src_sel = lpg->data->triled_has_src_sel;
+
+ if (lpg->triled_has_src_sel) {
+ ret = of_property_read_u32(np, "qcom,power-source", &lpg->triled_src);
+ if (ret || lpg->triled_src == 2 || lpg->triled_src > 3) {
+ dev_err(lpg->dev, "invalid power source\n");
+ return -EINVAL;
+ }
+ }
+
+ /* Disable automatic trickle charge LED */
+ if (lpg->triled_has_atc_ctl)
+ regmap_write(lpg->map, lpg->triled_base + TRI_LED_ATC_CTL, 0);
+
+ /* Configure power source */
+ if (lpg->triled_has_src_sel)
+ regmap_write(lpg->map, lpg->triled_base + TRI_LED_SRC_SEL, lpg->triled_src);
+
+ /* Default all outputs to off */
+ regmap_write(lpg->map, lpg->triled_base + TRI_LED_EN_CTL, 0);
+
+ return 0;
+}
+
+static int lpg_init_lut(struct lpg *lpg)
+{
+ const struct lpg_data *data = lpg->data;
+
+ if (!data->lut_base)
+ return 0;
+
+ lpg->lut_base = data->lut_base;
+ lpg->lut_size = data->lut_size;
+
+ lpg->lut_bitmap = devm_bitmap_zalloc(lpg->dev, lpg->lut_size, GFP_KERNEL);
+ if (!lpg->lut_bitmap)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int lpg_probe(struct platform_device *pdev)
+{
+ struct device_node *np;
+ struct lpg *lpg;
+ int ret;
+ int i;
+
+ lpg = devm_kzalloc(&pdev->dev, sizeof(*lpg), GFP_KERNEL);
+ if (!lpg)
+ return -ENOMEM;
+
+ lpg->data = of_device_get_match_data(&pdev->dev);
+ if (!lpg->data)
+ return -EINVAL;
+
+ platform_set_drvdata(pdev, lpg);
+
+ lpg->dev = &pdev->dev;
+ mutex_init(&lpg->lock);
+
+ lpg->map = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!lpg->map)
+ return dev_err_probe(&pdev->dev, -ENXIO, "parent regmap unavailable\n");
+
+ ret = lpg_init_channels(lpg);
+ if (ret < 0)
+ return ret;
+
+ ret = lpg_parse_dtest(lpg);
+ if (ret < 0)
+ return ret;
+
+ ret = lpg_init_triled(lpg);
+ if (ret < 0)
+ return ret;
+
+ ret = lpg_init_lut(lpg);
+ if (ret < 0)
+ return ret;
+
+ for_each_available_child_of_node(pdev->dev.of_node, np) {
+ ret = lpg_add_led(lpg, np);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < lpg->num_channels; i++)
+ lpg_apply_dtest(&lpg->channels[i]);
+
+ return lpg_add_pwm(lpg);
+}
+
+static int lpg_remove(struct platform_device *pdev)
+{
+ struct lpg *lpg = platform_get_drvdata(pdev);
+
+ pwmchip_remove(&lpg->pwm);
+
+ return 0;
+}
+
+static const struct lpg_data pm8916_pwm_data = {
+ .num_channels = 1,
+ .channels = (const struct lpg_channel_data[]) {
+ { .base = 0xbc00 },
+ },
+};
+
+static const struct lpg_data pm8941_lpg_data = {
+ .lut_base = 0xb000,
+ .lut_size = 64,
+
+ .triled_base = 0xd000,
+ .triled_has_atc_ctl = true,
+ .triled_has_src_sel = true,
+
+ .num_channels = 8,
+ .channels = (const struct lpg_channel_data[]) {
+ { .base = 0xb100 },
+ { .base = 0xb200 },
+ { .base = 0xb300 },
+ { .base = 0xb400 },
+ { .base = 0xb500, .triled_mask = BIT(5) },
+ { .base = 0xb600, .triled_mask = BIT(6) },
+ { .base = 0xb700, .triled_mask = BIT(7) },
+ { .base = 0xb800 },
+ },
+};
+
+static const struct lpg_data pm8994_lpg_data = {
+ .lut_base = 0xb000,
+ .lut_size = 64,
+
+ .num_channels = 6,
+ .channels = (const struct lpg_channel_data[]) {
+ { .base = 0xb100 },
+ { .base = 0xb200 },
+ { .base = 0xb300 },
+ { .base = 0xb400 },
+ { .base = 0xb500 },
+ { .base = 0xb600 },
+ },
+};
+
+static const struct lpg_data pmi8994_lpg_data = {
+ .lut_base = 0xb000,
+ .lut_size = 24,
+
+ .triled_base = 0xd000,
+ .triled_has_atc_ctl = true,
+ .triled_has_src_sel = true,
+
+ .num_channels = 4,
+ .channels = (const struct lpg_channel_data[]) {
+ { .base = 0xb100, .triled_mask = BIT(5) },
+ { .base = 0xb200, .triled_mask = BIT(6) },
+ { .base = 0xb300, .triled_mask = BIT(7) },
+ { .base = 0xb400 },
+ },
+};
+
+static const struct lpg_data pmi8998_lpg_data = {
+ .lut_base = 0xb000,
+ .lut_size = 49,
+
+ .triled_base = 0xd000,
+
+ .num_channels = 6,
+ .channels = (const struct lpg_channel_data[]) {
+ { .base = 0xb100 },
+ { .base = 0xb200 },
+ { .base = 0xb300, .triled_mask = BIT(5) },
+ { .base = 0xb400, .triled_mask = BIT(6) },
+ { .base = 0xb500, .triled_mask = BIT(7) },
+ { .base = 0xb600 },
+ },
+};
+
+static const struct lpg_data pm8150b_lpg_data = {
+ .lut_base = 0xb000,
+ .lut_size = 24,
+
+ .triled_base = 0xd000,
+
+ .num_channels = 2,
+ .channels = (const struct lpg_channel_data[]) {
+ { .base = 0xb100, .triled_mask = BIT(7) },
+ { .base = 0xb200, .triled_mask = BIT(6) },
+ },
+};
+
+static const struct lpg_data pm8150l_lpg_data = {
+ .lut_base = 0xb000,
+ .lut_size = 48,
+
+ .triled_base = 0xd000,
+
+ .num_channels = 5,
+ .channels = (const struct lpg_channel_data[]) {
+ { .base = 0xb100, .triled_mask = BIT(7) },
+ { .base = 0xb200, .triled_mask = BIT(6) },
+ { .base = 0xb300, .triled_mask = BIT(5) },
+ { .base = 0xbc00 },
+ { .base = 0xbd00 },
+
+ },
+};
+
+static const struct lpg_data pm8350c_pwm_data = {
+ .triled_base = 0xef00,
+
+ .num_channels = 4,
+ .channels = (const struct lpg_channel_data[]) {
+ { .base = 0xe800, .triled_mask = BIT(7) },
+ { .base = 0xe900, .triled_mask = BIT(6) },
+ { .base = 0xea00, .triled_mask = BIT(5) },
+ { .base = 0xeb00 },
+ },
+};
+
+static const struct of_device_id lpg_of_table[] = {
+ { .compatible = "qcom,pm8150b-lpg", .data = &pm8150b_lpg_data },
+ { .compatible = "qcom,pm8150l-lpg", .data = &pm8150l_lpg_data },
+ { .compatible = "qcom,pm8350c-pwm", .data = &pm8350c_pwm_data },
+ { .compatible = "qcom,pm8916-pwm", .data = &pm8916_pwm_data },
+ { .compatible = "qcom,pm8941-lpg", .data = &pm8941_lpg_data },
+ { .compatible = "qcom,pm8994-lpg", .data = &pm8994_lpg_data },
+ { .compatible = "qcom,pmi8994-lpg", .data = &pmi8994_lpg_data },
+ { .compatible = "qcom,pmi8998-lpg", .data = &pmi8998_lpg_data },
+ { .compatible = "qcom,pmc8180c-lpg", .data = &pm8150l_lpg_data },
+ {}
+};
+MODULE_DEVICE_TABLE(of, lpg_of_table);
+
+static struct platform_driver lpg_driver = {
+ .probe = lpg_probe,
+ .remove = lpg_remove,
+ .driver = {
+ .name = "qcom-spmi-lpg",
+ .of_match_table = lpg_of_table,
+ },
+};
+module_platform_driver(lpg_driver);
+
+MODULE_DESCRIPTION("Qualcomm LPG LED driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/macintosh/Kconfig b/drivers/macintosh/Kconfig
index 5cdc361da37c..539a2ed4e13d 100644
--- a/drivers/macintosh/Kconfig
+++ b/drivers/macintosh/Kconfig
@@ -44,6 +44,7 @@ config ADB_IOP
config ADB_CUDA
bool "Support for Cuda/Egret based Macs and PowerMacs"
depends on (ADB || PPC_PMAC) && !PPC_PMAC64
+ select RTC_LIB
help
This provides support for Cuda/Egret based Macintosh and
Power Macintosh systems. This includes most m68k based Macs,
@@ -57,6 +58,7 @@ config ADB_CUDA
config ADB_PMU
bool "Support for PMU based PowerMacs and PowerBooks"
depends on PPC_PMAC || MAC
+ select RTC_LIB
help
On PowerBooks, iBooks, and recent iMacs and Power Macintoshes, the
PMU is an embedded microprocessor whose primary function is to
@@ -67,6 +69,10 @@ config ADB_PMU
this device; you should do so if your machine is one of those
mentioned above.
+config ADB_PMU_EVENT
+ def_bool y
+ depends on ADB_PMU && INPUT=y
+
config ADB_PMU_LED
bool "Support for the Power/iBook front LED"
depends on PPC_PMAC && ADB_PMU
diff --git a/drivers/macintosh/Makefile b/drivers/macintosh/Makefile
index 49819b1b6f20..712edcb3e0b0 100644
--- a/drivers/macintosh/Makefile
+++ b/drivers/macintosh/Makefile
@@ -12,7 +12,8 @@ obj-$(CONFIG_MAC_EMUMOUSEBTN) += mac_hid.o
obj-$(CONFIG_INPUT_ADBHID) += adbhid.o
obj-$(CONFIG_ANSLCD) += ans-lcd.o
-obj-$(CONFIG_ADB_PMU) += via-pmu.o via-pmu-event.o
+obj-$(CONFIG_ADB_PMU) += via-pmu.o
+obj-$(CONFIG_ADB_PMU_EVENT) += via-pmu-event.o
obj-$(CONFIG_ADB_PMU_LED) += via-pmu-led.o
obj-$(CONFIG_PMAC_BACKLIGHT) += via-pmu-backlight.o
obj-$(CONFIG_ADB_CUDA) += via-cuda.o
diff --git a/drivers/macintosh/adb.c b/drivers/macintosh/adb.c
index 73b396189039..439fab4eaa85 100644
--- a/drivers/macintosh/adb.c
+++ b/drivers/macintosh/adb.c
@@ -38,10 +38,10 @@
#include <linux/kthread.h>
#include <linux/platform_device.h>
#include <linux/mutex.h>
+#include <linux/of.h>
#include <linux/uaccess.h>
#ifdef CONFIG_PPC
-#include <asm/prom.h>
#include <asm/machdep.h>
#endif
diff --git a/drivers/macintosh/adbhid.c b/drivers/macintosh/adbhid.c
index 994ba5cb3678..b2fe7a3dc471 100644
--- a/drivers/macintosh/adbhid.c
+++ b/drivers/macintosh/adbhid.c
@@ -789,7 +789,8 @@ adbhid_input_register(int id, int default_id, int original_handler_id,
switch (default_id) {
case ADB_KEYBOARD:
- hid->keycode = kmalloc(sizeof(adb_to_linux_keycodes), GFP_KERNEL);
+ hid->keycode = kmemdup(adb_to_linux_keycodes,
+ sizeof(adb_to_linux_keycodes), GFP_KERNEL);
if (!hid->keycode) {
err = -ENOMEM;
goto fail;
@@ -797,8 +798,6 @@ adbhid_input_register(int id, int default_id, int original_handler_id,
sprintf(hid->name, "ADB keyboard");
- memcpy(hid->keycode, adb_to_linux_keycodes, sizeof(adb_to_linux_keycodes));
-
switch (original_handler_id) {
default:
keyboard_type = "<unknown>";
@@ -817,9 +816,7 @@ adbhid_input_register(int id, int default_id, int original_handler_id,
case 0xC4: case 0xC7:
keyboard_type = "ISO, swapping keys";
input_dev->id.version = ADB_KEYBOARD_ISO;
- i = hid->keycode[10];
- hid->keycode[10] = hid->keycode[50];
- hid->keycode[50] = i;
+ swap(hid->keycode[10], hid->keycode[50]);
break;
case 0x12: case 0x15: case 0x16: case 0x17: case 0x1A:
diff --git a/drivers/macintosh/ams/ams-core.c b/drivers/macintosh/ams/ams-core.c
index 01eeb2336d1a..877e8cb23128 100644
--- a/drivers/macintosh/ams/ams-core.c
+++ b/drivers/macintosh/ams/ams-core.c
@@ -50,7 +50,7 @@ static ssize_t ams_show_current(struct device *dev,
ams_sensors(&x, &y, &z);
mutex_unlock(&ams_info.lock);
- return snprintf(buf, PAGE_SIZE, "%d %d %d\n", x, y, z);
+ return sysfs_emit(buf, "%d %d %d\n", x, y, z);
}
static DEVICE_ATTR(current, S_IRUGO, ams_show_current, NULL);
diff --git a/drivers/macintosh/ams/ams-i2c.c b/drivers/macintosh/ams/ams-i2c.c
index 21271b2e9259..d2f0cde6f9c7 100644
--- a/drivers/macintosh/ams/ams-i2c.c
+++ b/drivers/macintosh/ams/ams-i2c.c
@@ -256,8 +256,6 @@ static void ams_i2c_exit(void)
int __init ams_i2c_init(struct device_node *np)
{
- int result;
-
/* Set implementation stuff */
ams_info.of_node = np;
ams_info.exit = ams_i2c_exit;
@@ -266,7 +264,5 @@ int __init ams_i2c_init(struct device_node *np)
ams_info.clear_irq = ams_i2c_clear_irq;
ams_info.bustype = BUS_I2C;
- result = i2c_add_driver(&ams_i2c_driver);
-
- return result;
+ return i2c_add_driver(&ams_i2c_driver);
}
diff --git a/drivers/macintosh/ans-lcd.c b/drivers/macintosh/ans-lcd.c
index b4821c751d04..fa904b24a600 100644
--- a/drivers/macintosh/ans-lcd.c
+++ b/drivers/macintosh/ans-lcd.c
@@ -11,10 +11,10 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/fs.h>
+#include <linux/of.h>
#include <linux/uaccess.h>
#include <asm/sections.h>
-#include <asm/prom.h>
#include <asm/io.h>
#include "ans-lcd.h"
diff --git a/drivers/macintosh/macio-adb.c b/drivers/macintosh/macio-adb.c
index dc634c2932fd..9b63bd2551c6 100644
--- a/drivers/macintosh/macio-adb.c
+++ b/drivers/macintosh/macio-adb.c
@@ -9,8 +9,11 @@
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/pgtable.h>
-#include <asm/prom.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/adb.h>
+
#include <asm/io.h>
#include <asm/hydra.h>
#include <asm/irq.h>
diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
index 1943a007e2d5..1ec1e5984563 100644
--- a/drivers/macintosh/macio_asic.c
+++ b/drivers/macintosh/macio_asic.c
@@ -20,13 +20,14 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <asm/machdep.h>
#include <asm/macio.h>
#include <asm/pmac_feature.h>
-#include <asm/prom.h>
#undef DEBUG
@@ -472,7 +473,7 @@ static void macio_pci_add_devices(struct macio_chip *chip)
root_res = &rdev->resource[0];
/* First scan 1st level */
- for (np = NULL; (np = of_get_next_child(pnode, np)) != NULL;) {
+ for_each_child_of_node(pnode, np) {
if (macio_skip_device(np))
continue;
of_node_get(np);
@@ -489,7 +490,7 @@ static void macio_pci_add_devices(struct macio_chip *chip)
/* Add media bay devices if any */
if (mbdev) {
pnode = mbdev->ofdev.dev.of_node;
- for (np = NULL; (np = of_get_next_child(pnode, np)) != NULL;) {
+ for_each_child_of_node(pnode, np) {
if (macio_skip_device(np))
continue;
of_node_get(np);
@@ -502,7 +503,7 @@ static void macio_pci_add_devices(struct macio_chip *chip)
/* Add serial ports if any */
if (sdev) {
pnode = sdev->ofdev.dev.of_node;
- for (np = NULL; (np = of_get_next_child(pnode, np)) != NULL;) {
+ for_each_child_of_node(pnode, np) {
if (macio_skip_device(np))
continue;
of_node_get(np);
diff --git a/drivers/macintosh/macio_sysfs.c b/drivers/macintosh/macio_sysfs.c
index 27f5eefc508f..2bbe359b26d9 100644
--- a/drivers/macintosh/macio_sysfs.c
+++ b/drivers/macintosh/macio_sysfs.c
@@ -1,5 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/stat.h>
#include <asm/macio.h>
diff --git a/drivers/macintosh/mediabay.c b/drivers/macintosh/mediabay.c
index b17660c022eb..36070c6586d1 100644
--- a/drivers/macintosh/mediabay.c
+++ b/drivers/macintosh/mediabay.c
@@ -17,7 +17,7 @@
#include <linux/kthread.h>
#include <linux/mutex.h>
#include <linux/pgtable.h>
-#include <asm/prom.h>
+
#include <asm/io.h>
#include <asm/machdep.h>
#include <asm/pmac_feature.h>
diff --git a/drivers/macintosh/rack-meter.c b/drivers/macintosh/rack-meter.c
index 60311e8d6240..c28893e41a8b 100644
--- a/drivers/macintosh/rack-meter.c
+++ b/drivers/macintosh/rack-meter.c
@@ -27,7 +27,6 @@
#include <linux/of_irq.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/pmac_feature.h>
#include <asm/dbdma.h>
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index a4fbc3fc713d..b495bfa77896 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -41,7 +41,6 @@
#include <asm/byteorder.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/pmac_feature.h>
#include <asm/smu.h>
@@ -1087,7 +1086,7 @@ static int smu_open(struct inode *inode, struct file *file)
unsigned long flags;
pp = kzalloc(sizeof(struct smu_private), GFP_KERNEL);
- if (pp == 0)
+ if (!pp)
return -ENOMEM;
spin_lock_init(&pp->lock);
pp->mode = smu_file_commands;
@@ -1254,7 +1253,7 @@ static __poll_t smu_fpoll(struct file *file, poll_table *wait)
__poll_t mask = 0;
unsigned long flags;
- if (pp == 0)
+ if (!pp)
return 0;
if (pp->mode == smu_file_commands) {
@@ -1277,7 +1276,7 @@ static int smu_release(struct inode *inode, struct file *file)
unsigned long flags;
unsigned int busy;
- if (pp == 0)
+ if (!pp)
return 0;
file->private_data = NULL;
diff --git a/drivers/macintosh/therm_adt746x.c b/drivers/macintosh/therm_adt746x.c
index 7e218437730c..e604cbc91763 100644
--- a/drivers/macintosh/therm_adt746x.c
+++ b/drivers/macintosh/therm_adt746x.c
@@ -27,7 +27,6 @@
#include <linux/freezer.h>
#include <linux/of_platform.h>
-#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/io.h>
#include <asm/sections.h>
diff --git a/drivers/macintosh/therm_windtunnel.c b/drivers/macintosh/therm_windtunnel.c
index f55f6adf5e5f..9226b74fa08f 100644
--- a/drivers/macintosh/therm_windtunnel.c
+++ b/drivers/macintosh/therm_windtunnel.c
@@ -38,7 +38,6 @@
#include <linux/kthread.h>
#include <linux/of_platform.h>
-#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/io.h>
#include <asm/sections.h>
diff --git a/drivers/macintosh/via-cuda.c b/drivers/macintosh/via-cuda.c
index 3d0d0b9d471d..5071289063f0 100644
--- a/drivers/macintosh/via-cuda.c
+++ b/drivers/macintosh/via-cuda.c
@@ -18,8 +18,10 @@
#include <linux/cuda.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
#ifdef CONFIG_PPC
-#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/pmac_feature.h>
#else
@@ -237,10 +239,10 @@ int __init find_via_cuda(void)
const u32 *reg;
int err;
- if (vias != 0)
+ if (vias)
return 1;
vias = of_find_node_by_name(NULL, "via-cuda");
- if (vias == 0)
+ if (!vias)
return 0;
reg = of_get_property(vias, "reg", NULL);
@@ -518,7 +520,7 @@ cuda_write(struct adb_request *req)
req->reply_len = 0;
spin_lock_irqsave(&cuda_lock, flags);
- if (current_req != 0) {
+ if (current_req) {
last_req->next = req;
last_req = req;
} else {
diff --git a/drivers/macintosh/via-pmu-backlight.c b/drivers/macintosh/via-pmu-backlight.c
index 50ada02ae75d..2194016122d2 100644
--- a/drivers/macintosh/via-pmu-backlight.c
+++ b/drivers/macintosh/via-pmu-backlight.c
@@ -12,7 +12,6 @@
#include <linux/adb.h>
#include <linux/pmu.h>
#include <asm/backlight.h>
-#include <asm/prom.h>
#define MAX_PMU_LEVEL 0xFF
diff --git a/drivers/macintosh/via-pmu-led.c b/drivers/macintosh/via-pmu-led.c
index ae067ab2373d..a4fb16d7db3c 100644
--- a/drivers/macintosh/via-pmu-led.c
+++ b/drivers/macintosh/via-pmu-led.c
@@ -25,7 +25,7 @@
#include <linux/leds.h>
#include <linux/adb.h>
#include <linux/pmu.h>
-#include <asm/prom.h>
+#include <linux/of.h>
static spinlock_t pmu_blink_lock;
static struct adb_request pmu_blink_req;
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index 4b98bc26a94b..49657962d892 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -59,7 +59,6 @@
#include <asm/pmac_feature.h>
#include <asm/pmac_pfunc.h>
#include <asm/pmac_low_i2c.h>
-#include <asm/prom.h>
#include <asm/mmu_context.h>
#include <asm/cputable.h>
#include <asm/time.h>
@@ -161,7 +160,7 @@ static unsigned char __iomem *gpio_reg;
static int gpio_irq = 0;
static int gpio_irq_enabled = -1;
static volatile int pmu_suspended;
-static spinlock_t pmu_lock;
+static DEFINE_SPINLOCK(pmu_lock);
static u8 pmu_intr_mask;
static int pmu_version;
static int drop_interrupts;
@@ -305,8 +304,6 @@ int __init find_via_pmu(void)
goto fail;
}
- spin_lock_init(&pmu_lock);
-
pmu_has_adb = 1;
pmu_intr_mask = PMU_INT_PCEJECT |
@@ -388,8 +385,6 @@ int __init find_via_pmu(void)
pmu_kind = PMU_UNKNOWN;
- spin_lock_init(&pmu_lock);
-
pmu_has_adb = 1;
pmu_intr_mask = PMU_INT_PCEJECT |
@@ -1459,7 +1454,7 @@ next:
pmu_pass_intr(data, len);
/* len == 6 is probably a bad check. But how do I
* know what PMU versions send what events here? */
- if (len == 6) {
+ if (IS_ENABLED(CONFIG_ADB_PMU_EVENT) && len == 6) {
via_pmu_event(PMU_EVT_POWER, !!(data[1]&8));
via_pmu_event(PMU_EVT_LID, data[1]&1);
}
diff --git a/drivers/macintosh/windfarm_ad7417_sensor.c b/drivers/macintosh/windfarm_ad7417_sensor.c
index e7dec328c7cf..6ad6441abcbc 100644
--- a/drivers/macintosh/windfarm_ad7417_sensor.c
+++ b/drivers/macintosh/windfarm_ad7417_sensor.c
@@ -13,7 +13,7 @@
#include <linux/init.h>
#include <linux/wait.h>
#include <linux/i2c.h>
-#include <asm/prom.h>
+
#include <asm/machdep.h>
#include <asm/io.h>
#include <asm/sections.h>
diff --git a/drivers/macintosh/windfarm_core.c b/drivers/macintosh/windfarm_core.c
index 07f91ec1f960..5307b1e34261 100644
--- a/drivers/macintosh/windfarm_core.c
+++ b/drivers/macintosh/windfarm_core.c
@@ -35,8 +35,6 @@
#include <linux/mutex.h>
#include <linux/freezer.h>
-#include <asm/prom.h>
-
#include "windfarm.h"
#define VERSION "0.2"
diff --git a/drivers/macintosh/windfarm_cpufreq_clamp.c b/drivers/macintosh/windfarm_cpufreq_clamp.c
index 7b726f00f183..28d18ef22bbb 100644
--- a/drivers/macintosh/windfarm_cpufreq_clamp.c
+++ b/drivers/macintosh/windfarm_cpufreq_clamp.c
@@ -10,8 +10,6 @@
#include <linux/cpu.h>
#include <linux/cpufreq.h>
-#include <asm/prom.h>
-
#include "windfarm.h"
#define VERSION "0.3"
diff --git a/drivers/macintosh/windfarm_fcu_controls.c b/drivers/macintosh/windfarm_fcu_controls.c
index 2470e5a725c8..82e7b2005ae7 100644
--- a/drivers/macintosh/windfarm_fcu_controls.c
+++ b/drivers/macintosh/windfarm_fcu_controls.c
@@ -14,7 +14,7 @@
#include <linux/init.h>
#include <linux/wait.h>
#include <linux/i2c.h>
-#include <asm/prom.h>
+
#include <asm/machdep.h>
#include <asm/io.h>
#include <asm/sections.h>
diff --git a/drivers/macintosh/windfarm_lm75_sensor.c b/drivers/macintosh/windfarm_lm75_sensor.c
index 29f48c2028b6..eb7e7f0bd219 100644
--- a/drivers/macintosh/windfarm_lm75_sensor.c
+++ b/drivers/macintosh/windfarm_lm75_sensor.c
@@ -15,7 +15,6 @@
#include <linux/wait.h>
#include <linux/i2c.h>
#include <linux/of_device.h>
-#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/io.h>
#include <asm/sections.h>
diff --git a/drivers/macintosh/windfarm_lm87_sensor.c b/drivers/macintosh/windfarm_lm87_sensor.c
index 9fab0b47cd3d..807efdde86bc 100644
--- a/drivers/macintosh/windfarm_lm87_sensor.c
+++ b/drivers/macintosh/windfarm_lm87_sensor.c
@@ -13,7 +13,7 @@
#include <linux/init.h>
#include <linux/wait.h>
#include <linux/i2c.h>
-#include <asm/prom.h>
+
#include <asm/machdep.h>
#include <asm/io.h>
#include <asm/sections.h>
diff --git a/drivers/macintosh/windfarm_max6690_sensor.c b/drivers/macintosh/windfarm_max6690_sensor.c
index 1e7b03d44ad9..55ee417fb878 100644
--- a/drivers/macintosh/windfarm_max6690_sensor.c
+++ b/drivers/macintosh/windfarm_max6690_sensor.c
@@ -10,7 +10,7 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/i2c.h>
-#include <asm/prom.h>
+
#include <asm/pmac_low_i2c.h>
#include "windfarm.h"
diff --git a/drivers/macintosh/windfarm_mpu.h b/drivers/macintosh/windfarm_mpu.h
index 157ce6e3f32e..b5ce347d12d4 100644
--- a/drivers/macintosh/windfarm_mpu.h
+++ b/drivers/macintosh/windfarm_mpu.h
@@ -8,6 +8,8 @@
#ifndef __WINDFARM_MPU_H
#define __WINDFARM_MPU_H
+#include <linux/of.h>
+
typedef unsigned short fu16;
typedef int fs32;
typedef short fs16;
diff --git a/drivers/macintosh/windfarm_pm112.c b/drivers/macintosh/windfarm_pm112.c
index e8377ce0a95a..d1dec314ae30 100644
--- a/drivers/macintosh/windfarm_pm112.c
+++ b/drivers/macintosh/windfarm_pm112.c
@@ -12,7 +12,9 @@
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/reboot.h>
-#include <asm/prom.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+
#include <asm/smu.h>
#include "windfarm.h"
diff --git a/drivers/macintosh/windfarm_pm121.c b/drivers/macintosh/windfarm_pm121.c
index ba1ec6fc11d2..36312f163aac 100644
--- a/drivers/macintosh/windfarm_pm121.c
+++ b/drivers/macintosh/windfarm_pm121.c
@@ -201,7 +201,8 @@
#include <linux/kmod.h>
#include <linux/device.h>
#include <linux/platform_device.h>
-#include <asm/prom.h>
+#include <linux/of.h>
+
#include <asm/machdep.h>
#include <asm/io.h>
#include <asm/sections.h>
diff --git a/drivers/macintosh/windfarm_pm72.c b/drivers/macintosh/windfarm_pm72.c
index e81746b87cff..e21f973551cc 100644
--- a/drivers/macintosh/windfarm_pm72.c
+++ b/drivers/macintosh/windfarm_pm72.c
@@ -11,7 +11,7 @@
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/reboot.h>
-#include <asm/prom.h>
+
#include <asm/smu.h>
#include "windfarm.h"
diff --git a/drivers/macintosh/windfarm_pm81.c b/drivers/macintosh/windfarm_pm81.c
index 82c67a4ee5f7..e0f4743f21cc 100644
--- a/drivers/macintosh/windfarm_pm81.c
+++ b/drivers/macintosh/windfarm_pm81.c
@@ -102,7 +102,8 @@
#include <linux/kmod.h>
#include <linux/device.h>
#include <linux/platform_device.h>
-#include <asm/prom.h>
+#include <linux/of.h>
+
#include <asm/machdep.h>
#include <asm/io.h>
#include <asm/sections.h>
diff --git a/drivers/macintosh/windfarm_pm91.c b/drivers/macintosh/windfarm_pm91.c
index 3f346af9e3f7..c8535855360d 100644
--- a/drivers/macintosh/windfarm_pm91.c
+++ b/drivers/macintosh/windfarm_pm91.c
@@ -37,7 +37,8 @@
#include <linux/kmod.h>
#include <linux/device.h>
#include <linux/platform_device.h>
-#include <asm/prom.h>
+#include <linux/of.h>
+
#include <asm/machdep.h>
#include <asm/io.h>
#include <asm/sections.h>
diff --git a/drivers/macintosh/windfarm_rm31.c b/drivers/macintosh/windfarm_rm31.c
index 7acd1684c451..e9eb7fdde48c 100644
--- a/drivers/macintosh/windfarm_rm31.c
+++ b/drivers/macintosh/windfarm_rm31.c
@@ -11,7 +11,7 @@
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/reboot.h>
-#include <asm/prom.h>
+
#include <asm/smu.h>
#include "windfarm.h"
diff --git a/drivers/macintosh/windfarm_smu_controls.c b/drivers/macintosh/windfarm_smu_controls.c
index 75966052819a..e9957ad49a2a 100644
--- a/drivers/macintosh/windfarm_smu_controls.c
+++ b/drivers/macintosh/windfarm_smu_controls.c
@@ -14,7 +14,8 @@
#include <linux/init.h>
#include <linux/wait.h>
#include <linux/completion.h>
-#include <asm/prom.h>
+#include <linux/of.h>
+
#include <asm/machdep.h>
#include <asm/io.h>
#include <asm/sections.h>
diff --git a/drivers/macintosh/windfarm_smu_sat.c b/drivers/macintosh/windfarm_smu_sat.c
index e46e1153a0b4..5ade627eaa78 100644
--- a/drivers/macintosh/windfarm_smu_sat.c
+++ b/drivers/macintosh/windfarm_smu_sat.c
@@ -13,7 +13,7 @@
#include <linux/wait.h>
#include <linux/i2c.h>
#include <linux/mutex.h>
-#include <asm/prom.h>
+
#include <asm/smu.h>
#include <asm/pmac_low_i2c.h>
diff --git a/drivers/macintosh/windfarm_smu_sensors.c b/drivers/macintosh/windfarm_smu_sensors.c
index c8706cfb83fd..00c6fe25fcba 100644
--- a/drivers/macintosh/windfarm_smu_sensors.c
+++ b/drivers/macintosh/windfarm_smu_sensors.c
@@ -14,7 +14,8 @@
#include <linux/init.h>
#include <linux/wait.h>
#include <linux/completion.h>
-#include <asm/prom.h>
+#include <linux/of.h>
+
#include <asm/machdep.h>
#include <asm/io.h>
#include <asm/sections.h>
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 9ed9c955add7..2acda9cea0f9 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -395,6 +395,13 @@ struct cached_dev {
atomic_t io_errors;
unsigned int error_limit;
unsigned int offline_seconds;
+
+ /*
+ * Retry to update writeback_rate if contention happens for
+ * down_read(dc->writeback_lock) in update_writeback_rate()
+ */
+#define BCH_WBRATE_UPDATE_MAX_SKIPS 15
+ unsigned int rate_update_retry;
};
enum alloc_reserve {
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index ad9f16689419..e136d6edc1ed 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -2006,8 +2006,7 @@ int bch_btree_check(struct cache_set *c)
int i;
struct bkey *k = NULL;
struct btree_iter iter;
- struct btree_check_state *check_state;
- char name[32];
+ struct btree_check_state check_state;
/* check and mark root node keys */
for_each_key_filter(&c->root->keys, k, &iter, bch_ptr_invalid)
@@ -2018,63 +2017,59 @@ int bch_btree_check(struct cache_set *c)
if (c->root->level == 0)
return 0;
- check_state = kzalloc(sizeof(struct btree_check_state), GFP_KERNEL);
- if (!check_state)
- return -ENOMEM;
-
- check_state->c = c;
- check_state->total_threads = bch_btree_chkthread_nr();
- check_state->key_idx = 0;
- spin_lock_init(&check_state->idx_lock);
- atomic_set(&check_state->started, 0);
- atomic_set(&check_state->enough, 0);
- init_waitqueue_head(&check_state->wait);
+ memset(&check_state, 0, sizeof(struct btree_check_state));
+ check_state.c = c;
+ check_state.total_threads = bch_btree_chkthread_nr();
+ check_state.key_idx = 0;
+ spin_lock_init(&check_state.idx_lock);
+ atomic_set(&check_state.started, 0);
+ atomic_set(&check_state.enough, 0);
+ init_waitqueue_head(&check_state.wait);
+ rw_lock(0, c->root, c->root->level);
/*
* Run multiple threads to check btree nodes in parallel,
- * if check_state->enough is non-zero, it means current
+ * if check_state.enough is non-zero, it means current
* running check threads are enough, unncessary to create
* more.
*/
- for (i = 0; i < check_state->total_threads; i++) {
- /* fetch latest check_state->enough earlier */
+ for (i = 0; i < check_state.total_threads; i++) {
+ /* fetch latest check_state.enough earlier */
smp_mb__before_atomic();
- if (atomic_read(&check_state->enough))
+ if (atomic_read(&check_state.enough))
break;
- check_state->infos[i].result = 0;
- check_state->infos[i].state = check_state;
- snprintf(name, sizeof(name), "bch_btrchk[%u]", i);
- atomic_inc(&check_state->started);
+ check_state.infos[i].result = 0;
+ check_state.infos[i].state = &check_state;
- check_state->infos[i].thread =
+ check_state.infos[i].thread =
kthread_run(bch_btree_check_thread,
- &check_state->infos[i],
- name);
- if (IS_ERR(check_state->infos[i].thread)) {
+ &check_state.infos[i],
+ "bch_btrchk[%d]", i);
+ if (IS_ERR(check_state.infos[i].thread)) {
pr_err("fails to run thread bch_btrchk[%d]\n", i);
for (--i; i >= 0; i--)
- kthread_stop(check_state->infos[i].thread);
+ kthread_stop(check_state.infos[i].thread);
ret = -ENOMEM;
goto out;
}
+ atomic_inc(&check_state.started);
}
/*
* Must wait for all threads to stop.
*/
- wait_event_interruptible(check_state->wait,
- atomic_read(&check_state->started) == 0);
+ wait_event(check_state.wait, atomic_read(&check_state.started) == 0);
- for (i = 0; i < check_state->total_threads; i++) {
- if (check_state->infos[i].result) {
- ret = check_state->infos[i].result;
+ for (i = 0; i < check_state.total_threads; i++) {
+ if (check_state.infos[i].result) {
+ ret = check_state.infos[i].result;
goto out;
}
}
out:
- kfree(check_state);
+ rw_unlock(0, c->root);
return ret;
}
diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h
index 50482107134f..1b5fdbc0d83e 100644
--- a/drivers/md/bcache/btree.h
+++ b/drivers/md/bcache/btree.h
@@ -226,7 +226,7 @@ struct btree_check_info {
int result;
};
-#define BCH_BTR_CHKTHREAD_MAX 64
+#define BCH_BTR_CHKTHREAD_MAX 12
struct btree_check_state {
struct cache_set *c;
int total_threads;
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index df5347ea450b..e5da469a4235 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -405,6 +405,11 @@ err:
return ret;
}
+void bch_journal_space_reserve(struct journal *j)
+{
+ j->do_reserve = true;
+}
+
/* Journalling */
static void btree_flush_write(struct cache_set *c)
@@ -621,12 +626,30 @@ static void do_journal_discard(struct cache *ca)
}
}
+static unsigned int free_journal_buckets(struct cache_set *c)
+{
+ struct journal *j = &c->journal;
+ struct cache *ca = c->cache;
+ struct journal_device *ja = &c->cache->journal;
+ unsigned int n;
+
+ /* In case njournal_buckets is not power of 2 */
+ if (ja->cur_idx >= ja->discard_idx)
+ n = ca->sb.njournal_buckets + ja->discard_idx - ja->cur_idx;
+ else
+ n = ja->discard_idx - ja->cur_idx;
+
+ if (n > (1 + j->do_reserve))
+ return n - (1 + j->do_reserve);
+
+ return 0;
+}
+
static void journal_reclaim(struct cache_set *c)
{
struct bkey *k = &c->journal.key;
struct cache *ca = c->cache;
uint64_t last_seq;
- unsigned int next;
struct journal_device *ja = &ca->journal;
atomic_t p __maybe_unused;
@@ -649,12 +672,10 @@ static void journal_reclaim(struct cache_set *c)
if (c->journal.blocks_free)
goto out;
- next = (ja->cur_idx + 1) % ca->sb.njournal_buckets;
- /* No space available on this device */
- if (next == ja->discard_idx)
+ if (!free_journal_buckets(c))
goto out;
- ja->cur_idx = next;
+ ja->cur_idx = (ja->cur_idx + 1) % ca->sb.njournal_buckets;
k->ptr[0] = MAKE_PTR(0,
bucket_to_sector(c, ca->sb.d[ja->cur_idx]),
ca->sb.nr_this_dev);
diff --git a/drivers/md/bcache/journal.h b/drivers/md/bcache/journal.h
index f2ea34d5f431..cd316b4a1e95 100644
--- a/drivers/md/bcache/journal.h
+++ b/drivers/md/bcache/journal.h
@@ -105,6 +105,7 @@ struct journal {
spinlock_t lock;
spinlock_t flush_write_lock;
bool btree_flushing;
+ bool do_reserve;
/* used when waiting because the journal was full */
struct closure_waitlist wait;
struct closure io;
@@ -182,5 +183,6 @@ int bch_journal_replay(struct cache_set *c, struct list_head *list);
void bch_journal_free(struct cache_set *c);
int bch_journal_alloc(struct cache_set *c);
+void bch_journal_space_reserve(struct journal *j);
#endif /* _BCACHE_JOURNAL_H */
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 9c5dde73da88..f2c5a7e06fa9 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -1105,6 +1105,12 @@ static void detached_dev_do_request(struct bcache_device *d, struct bio *bio,
* which would call closure_get(&dc->disk.cl)
*/
ddip = kzalloc(sizeof(struct detached_dev_io_private), GFP_NOIO);
+ if (!ddip) {
+ bio->bi_status = BLK_STS_RESOURCE;
+ bio->bi_end_io(bio);
+ return;
+ }
+
ddip->d = d;
/* Count on the bcache device */
ddip->orig_bdev = orig_bdev;
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 2f49e31142f6..3563d15dbaf2 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -2127,6 +2127,7 @@ static int run_cache_set(struct cache_set *c)
flash_devs_run(c);
+ bch_journal_space_reserve(&c->journal);
set_bit(CACHE_SET_RUNNING, &c->flags);
return 0;
err:
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 9ee0005874cd..3f0ff3aab6f2 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -235,19 +235,27 @@ static void update_writeback_rate(struct work_struct *work)
return;
}
- if (atomic_read(&dc->has_dirty) && dc->writeback_percent) {
- /*
- * If the whole cache set is idle, set_at_max_writeback_rate()
- * will set writeback rate to a max number. Then it is
- * unncessary to update writeback rate for an idle cache set
- * in maximum writeback rate number(s).
- */
- if (!set_at_max_writeback_rate(c, dc)) {
- down_read(&dc->writeback_lock);
+ /*
+ * If the whole cache set is idle, set_at_max_writeback_rate()
+ * will set writeback rate to a max number. Then it is
+ * unncessary to update writeback rate for an idle cache set
+ * in maximum writeback rate number(s).
+ */
+ if (atomic_read(&dc->has_dirty) && dc->writeback_percent &&
+ !set_at_max_writeback_rate(c, dc)) {
+ do {
+ if (!down_read_trylock((&dc->writeback_lock))) {
+ dc->rate_update_retry++;
+ if (dc->rate_update_retry <=
+ BCH_WBRATE_UPDATE_MAX_SKIPS)
+ break;
+ down_read(&dc->writeback_lock);
+ dc->rate_update_retry = 0;
+ }
__update_writeback_rate(dc);
update_gc_after_writeback(c);
up_read(&dc->writeback_lock);
- }
+ } while (0);
}
@@ -805,13 +813,11 @@ static int bch_writeback_thread(void *arg)
/* Init */
#define INIT_KEYS_EACH_TIME 500000
-#define INIT_KEYS_SLEEP_MS 100
struct sectors_dirty_init {
struct btree_op op;
unsigned int inode;
size_t count;
- struct bkey start;
};
static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b,
@@ -827,11 +833,8 @@ static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b,
KEY_START(k), KEY_SIZE(k));
op->count++;
- if (atomic_read(&b->c->search_inflight) &&
- !(op->count % INIT_KEYS_EACH_TIME)) {
- bkey_copy_key(&op->start, k);
- return -EAGAIN;
- }
+ if (!(op->count % INIT_KEYS_EACH_TIME))
+ cond_resched();
return MAP_CONTINUE;
}
@@ -846,24 +849,16 @@ static int bch_root_node_dirty_init(struct cache_set *c,
bch_btree_op_init(&op.op, -1);
op.inode = d->id;
op.count = 0;
- op.start = KEY(op.inode, 0, 0);
-
- do {
- ret = bcache_btree(map_keys_recurse,
- k,
- c->root,
- &op.op,
- &op.start,
- sectors_dirty_init_fn,
- 0);
- if (ret == -EAGAIN)
- schedule_timeout_interruptible(
- msecs_to_jiffies(INIT_KEYS_SLEEP_MS));
- else if (ret < 0) {
- pr_warn("sectors dirty init failed, ret=%d!\n", ret);
- break;
- }
- } while (ret == -EAGAIN);
+
+ ret = bcache_btree(map_keys_recurse,
+ k,
+ c->root,
+ &op.op,
+ &KEY(op.inode, 0, 0),
+ sectors_dirty_init_fn,
+ 0);
+ if (ret < 0)
+ pr_warn("sectors dirty init failed, ret=%d!\n", ret);
return ret;
}
@@ -907,7 +902,6 @@ static int bch_dirty_init_thread(void *arg)
goto out;
}
skip_nr--;
- cond_resched();
}
if (p) {
@@ -917,7 +911,6 @@ static int bch_dirty_init_thread(void *arg)
p = NULL;
prev_idx = cur_idx;
- cond_resched();
}
out:
@@ -948,67 +941,56 @@ void bch_sectors_dirty_init(struct bcache_device *d)
struct btree_iter iter;
struct sectors_dirty_init op;
struct cache_set *c = d->c;
- struct bch_dirty_init_state *state;
- char name[32];
+ struct bch_dirty_init_state state;
/* Just count root keys if no leaf node */
+ rw_lock(0, c->root, c->root->level);
if (c->root->level == 0) {
bch_btree_op_init(&op.op, -1);
op.inode = d->id;
op.count = 0;
- op.start = KEY(op.inode, 0, 0);
for_each_key_filter(&c->root->keys,
k, &iter, bch_ptr_invalid)
sectors_dirty_init_fn(&op.op, c->root, k);
- return;
- }
- state = kzalloc(sizeof(struct bch_dirty_init_state), GFP_KERNEL);
- if (!state) {
- pr_warn("sectors dirty init failed: cannot allocate memory\n");
+ rw_unlock(0, c->root);
return;
}
- state->c = c;
- state->d = d;
- state->total_threads = bch_btre_dirty_init_thread_nr();
- state->key_idx = 0;
- spin_lock_init(&state->idx_lock);
- atomic_set(&state->started, 0);
- atomic_set(&state->enough, 0);
- init_waitqueue_head(&state->wait);
-
- for (i = 0; i < state->total_threads; i++) {
- /* Fetch latest state->enough earlier */
+ memset(&state, 0, sizeof(struct bch_dirty_init_state));
+ state.c = c;
+ state.d = d;
+ state.total_threads = bch_btre_dirty_init_thread_nr();
+ state.key_idx = 0;
+ spin_lock_init(&state.idx_lock);
+ atomic_set(&state.started, 0);
+ atomic_set(&state.enough, 0);
+ init_waitqueue_head(&state.wait);
+
+ for (i = 0; i < state.total_threads; i++) {
+ /* Fetch latest state.enough earlier */
smp_mb__before_atomic();
- if (atomic_read(&state->enough))
+ if (atomic_read(&state.enough))
break;
- state->infos[i].state = state;
- atomic_inc(&state->started);
- snprintf(name, sizeof(name), "bch_dirty_init[%d]", i);
-
- state->infos[i].thread =
- kthread_run(bch_dirty_init_thread,
- &state->infos[i],
- name);
- if (IS_ERR(state->infos[i].thread)) {
+ state.infos[i].state = &state;
+ state.infos[i].thread =
+ kthread_run(bch_dirty_init_thread, &state.infos[i],
+ "bch_dirtcnt[%d]", i);
+ if (IS_ERR(state.infos[i].thread)) {
pr_err("fails to run thread bch_dirty_init[%d]\n", i);
for (--i; i >= 0; i--)
- kthread_stop(state->infos[i].thread);
+ kthread_stop(state.infos[i].thread);
goto out;
}
+ atomic_inc(&state.started);
}
- /*
- * Must wait for all threads to stop.
- */
- wait_event_interruptible(state->wait,
- atomic_read(&state->started) == 0);
-
out:
- kfree(state);
+ /* Must wait for all threads to stop. */
+ wait_event(state.wait, atomic_read(&state.started) == 0);
+ rw_unlock(0, c->root);
}
void bch_cached_dev_writeback_init(struct cached_dev *dc)
@@ -1032,6 +1014,9 @@ void bch_cached_dev_writeback_init(struct cached_dev *dc)
dc->writeback_rate_fp_term_high = 1000;
dc->writeback_rate_i_term_inverse = 10000;
+ /* For dc->writeback_lock contention in update_writeback_rate() */
+ dc->rate_update_retry = 0;
+
WARN_ON(test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags));
INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate);
}
diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
index 02b2f9df73f6..31df716951f6 100644
--- a/drivers/md/bcache/writeback.h
+++ b/drivers/md/bcache/writeback.h
@@ -20,7 +20,7 @@
#define BCH_WRITEBACK_FRAGMENT_THRESHOLD_MID 57
#define BCH_WRITEBACK_FRAGMENT_THRESHOLD_HIGH 64
-#define BCH_DIRTY_INIT_THRD_MAX 64
+#define BCH_DIRTY_INIT_THRD_MAX 12
/*
* 14 (16384ths) is chosen here as something that each backing device
* should be a reasonable fraction of the share, and not to blow up
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 0a6abbbe3745..3212ef6aa81b 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -165,11 +165,12 @@ static struct dax_device *linear_dax_pgoff(struct dm_target *ti, pgoff_t *pgoff)
}
static long linear_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
- long nr_pages, void **kaddr, pfn_t *pfn)
+ long nr_pages, enum dax_access_mode mode, void **kaddr,
+ pfn_t *pfn)
{
struct dax_device *dax_dev = linear_dax_pgoff(ti, &pgoff);
- return dax_direct_access(dax_dev, pgoff, nr_pages, kaddr, pfn);
+ return dax_direct_access(dax_dev, pgoff, nr_pages, mode, kaddr, pfn);
}
static int linear_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff,
@@ -180,9 +181,18 @@ static int linear_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff,
return dax_zero_page_range(dax_dev, pgoff, nr_pages);
}
+static size_t linear_dax_recovery_write(struct dm_target *ti, pgoff_t pgoff,
+ void *addr, size_t bytes, struct iov_iter *i)
+{
+ struct dax_device *dax_dev = linear_dax_pgoff(ti, &pgoff);
+
+ return dax_recovery_write(dax_dev, pgoff, addr, bytes, i);
+}
+
#else
#define linear_dax_direct_access NULL
#define linear_dax_zero_page_range NULL
+#define linear_dax_recovery_write NULL
#endif
static struct target_type linear_target = {
@@ -200,6 +210,7 @@ static struct target_type linear_target = {
.iterate_devices = linear_iterate_devices,
.direct_access = linear_dax_direct_access,
.dax_zero_page_range = linear_dax_zero_page_range,
+ .dax_recovery_write = linear_dax_recovery_write,
};
int __init dm_linear_init(void)
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c
index e194226c89e5..20fd688f72e7 100644
--- a/drivers/md/dm-log-writes.c
+++ b/drivers/md/dm-log-writes.c
@@ -888,11 +888,12 @@ static struct dax_device *log_writes_dax_pgoff(struct dm_target *ti,
}
static long log_writes_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
- long nr_pages, void **kaddr, pfn_t *pfn)
+ long nr_pages, enum dax_access_mode mode, void **kaddr,
+ pfn_t *pfn)
{
struct dax_device *dax_dev = log_writes_dax_pgoff(ti, &pgoff);
- return dax_direct_access(dax_dev, pgoff, nr_pages, kaddr, pfn);
+ return dax_direct_access(dax_dev, pgoff, nr_pages, mode, kaddr, pfn);
}
static int log_writes_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff,
@@ -903,9 +904,18 @@ static int log_writes_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff,
return dax_zero_page_range(dax_dev, pgoff, nr_pages << PAGE_SHIFT);
}
+static size_t log_writes_dax_recovery_write(struct dm_target *ti,
+ pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i)
+{
+ struct dax_device *dax_dev = log_writes_dax_pgoff(ti, &pgoff);
+
+ return dax_recovery_write(dax_dev, pgoff, addr, bytes, i);
+}
+
#else
#define log_writes_dax_direct_access NULL
#define log_writes_dax_zero_page_range NULL
+#define log_writes_dax_recovery_write NULL
#endif
static struct target_type log_writes_target = {
@@ -923,6 +933,7 @@ static struct target_type log_writes_target = {
.io_hints = log_writes_io_hints,
.direct_access = log_writes_dax_direct_access,
.dax_zero_page_range = log_writes_dax_zero_page_range,
+ .dax_recovery_write = log_writes_dax_recovery_write,
};
static int __init dm_log_writes_init(void)
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 9526ccbedafb..5e41fbae3f6b 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -3725,7 +3725,7 @@ static int raid_message(struct dm_target *ti, unsigned int argc, char **argv,
if (!strcasecmp(argv[0], "idle") || !strcasecmp(argv[0], "frozen")) {
if (mddev->sync_thread) {
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
- md_reap_sync_thread(mddev);
+ md_reap_sync_thread(mddev, false);
}
} else if (decipher_sync_action(mddev, mddev->recovery) != st_idle)
return -EBUSY;
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index c81d331d1afe..baa085cc67bd 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -315,11 +315,12 @@ static struct dax_device *stripe_dax_pgoff(struct dm_target *ti, pgoff_t *pgoff)
}
static long stripe_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
- long nr_pages, void **kaddr, pfn_t *pfn)
+ long nr_pages, enum dax_access_mode mode, void **kaddr,
+ pfn_t *pfn)
{
struct dax_device *dax_dev = stripe_dax_pgoff(ti, &pgoff);
- return dax_direct_access(dax_dev, pgoff, nr_pages, kaddr, pfn);
+ return dax_direct_access(dax_dev, pgoff, nr_pages, mode, kaddr, pfn);
}
static int stripe_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff,
@@ -330,9 +331,18 @@ static int stripe_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff,
return dax_zero_page_range(dax_dev, pgoff, nr_pages);
}
+static size_t stripe_dax_recovery_write(struct dm_target *ti, pgoff_t pgoff,
+ void *addr, size_t bytes, struct iov_iter *i)
+{
+ struct dax_device *dax_dev = stripe_dax_pgoff(ti, &pgoff);
+
+ return dax_recovery_write(dax_dev, pgoff, addr, bytes, i);
+}
+
#else
#define stripe_dax_direct_access NULL
#define stripe_dax_zero_page_range NULL
+#define stripe_dax_recovery_write NULL
#endif
/*
@@ -469,6 +479,7 @@ static struct target_type stripe_target = {
.io_hints = stripe_io_hints,
.direct_access = stripe_dax_direct_access,
.dax_zero_page_range = stripe_dax_zero_page_range,
+ .dax_recovery_write = stripe_dax_recovery_write,
};
int __init dm_stripe_init(void)
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index a37c7b763643..0e833a154b31 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1005,7 +1005,7 @@ bool dm_table_request_based(struct dm_table *t)
return __table_type_request_based(dm_table_get_type(t));
}
-static int dm_table_supports_poll(struct dm_table *t);
+static bool dm_table_supports_poll(struct dm_table *t);
static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md)
{
@@ -1027,7 +1027,7 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *
per_io_data_size = max(per_io_data_size, ti->per_io_data_size);
min_pool_size = max(min_pool_size, ti->num_flush_bios);
}
- poll_supported = !!dm_table_supports_poll(t);
+ poll_supported = dm_table_supports_poll(t);
}
t->mempools = dm_alloc_md_mempools(md, type, per_io_data_size, min_pool_size,
@@ -1547,9 +1547,20 @@ static int count_device(struct dm_target *ti, struct dm_dev *dev,
return 0;
}
-static int dm_table_supports_poll(struct dm_table *t)
+static bool dm_table_supports_poll(struct dm_table *t)
{
- return !dm_table_any_dev_attr(t, device_not_poll_capable, NULL);
+ struct dm_target *ti;
+ unsigned i = 0;
+
+ while (i < dm_table_get_num_targets(t)) {
+ ti = dm_table_get_target(t, i++);
+
+ if (!ti->type->iterate_devices ||
+ ti->type->iterate_devices(ti, device_not_poll_capable, NULL))
+ return false;
+ }
+
+ return true;
}
/*
diff --git a/drivers/md/dm-target.c b/drivers/md/dm-target.c
index 64dd0b34fcf4..8cd5184e62f0 100644
--- a/drivers/md/dm-target.c
+++ b/drivers/md/dm-target.c
@@ -10,6 +10,7 @@
#include <linux/init.h>
#include <linux/kmod.h>
#include <linux/bio.h>
+#include <linux/dax.h>
#define DM_MSG_PREFIX "target"
@@ -142,7 +143,8 @@ static void io_err_release_clone_rq(struct request *clone,
}
static long io_err_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
- long nr_pages, void **kaddr, pfn_t *pfn)
+ long nr_pages, enum dax_access_mode mode, void **kaddr,
+ pfn_t *pfn)
{
return -EIO;
}
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index 80133aae0db3..d6dbd47492a8 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -1312,6 +1312,7 @@ bad:
static struct target_type verity_target = {
.name = "verity",
+ .features = DM_TARGET_IMMUTABLE,
.version = {1, 8, 0},
.module = THIS_MODULE,
.ctr = verity_ctr,
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
index 5630b470ba42..d74c5a7a0ab4 100644
--- a/drivers/md/dm-writecache.c
+++ b/drivers/md/dm-writecache.c
@@ -286,7 +286,8 @@ static int persistent_memory_claim(struct dm_writecache *wc)
id = dax_read_lock();
- da = dax_direct_access(wc->ssd_dev->dax_dev, offset, p, &wc->memory_map, &pfn);
+ da = dax_direct_access(wc->ssd_dev->dax_dev, offset, p, DAX_ACCESS,
+ &wc->memory_map, &pfn);
if (da < 0) {
wc->memory_map = NULL;
r = da;
@@ -308,8 +309,8 @@ static int persistent_memory_claim(struct dm_writecache *wc)
i = 0;
do {
long daa;
- daa = dax_direct_access(wc->ssd_dev->dax_dev, offset + i, p - i,
- NULL, &pfn);
+ daa = dax_direct_access(wc->ssd_dev->dax_dev, offset + i,
+ p - i, DAX_ACCESS, NULL, &pfn);
if (daa <= 0) {
r = daa ? daa : -EINVAL;
goto err3;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index d62f1354ecbf..dfb0a551bd88 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1143,7 +1143,8 @@ static struct dm_target *dm_dax_get_live_target(struct mapped_device *md,
}
static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
- long nr_pages, void **kaddr, pfn_t *pfn)
+ long nr_pages, enum dax_access_mode mode, void **kaddr,
+ pfn_t *pfn)
{
struct mapped_device *md = dax_get_private(dax_dev);
sector_t sector = pgoff * PAGE_SECTORS;
@@ -1161,7 +1162,7 @@ static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
if (len < 1)
goto out;
nr_pages = min(len, nr_pages);
- ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn);
+ ret = ti->type->direct_access(ti, pgoff, nr_pages, mode, kaddr, pfn);
out:
dm_put_live_table(md, srcu_idx);
@@ -1196,6 +1197,25 @@ static int dm_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
return ret;
}
+static size_t dm_dax_recovery_write(struct dax_device *dax_dev, pgoff_t pgoff,
+ void *addr, size_t bytes, struct iov_iter *i)
+{
+ struct mapped_device *md = dax_get_private(dax_dev);
+ sector_t sector = pgoff * PAGE_SECTORS;
+ struct dm_target *ti;
+ int srcu_idx;
+ long ret = 0;
+
+ ti = dm_dax_get_live_target(md, sector, &srcu_idx);
+ if (!ti || !ti->type->dax_recovery_write)
+ goto out;
+
+ ret = ti->type->dax_recovery_write(ti, pgoff, addr, bytes, i);
+out:
+ dm_put_live_table(md, srcu_idx);
+ return ret;
+}
+
/*
* A target may call dm_accept_partial_bio only from the map routine. It is
* allowed for all bio types except REQ_PREFLUSH, REQ_OP_ZONE_* zone management
@@ -3231,6 +3251,7 @@ static const struct block_device_operations dm_rq_blk_dops = {
static const struct dax_operations dm_dax_ops = {
.direct_access = dm_dax_direct_access,
.zero_page_range = dm_dax_zero_page_range,
+ .recovery_write = dm_dax_recovery_write,
};
/*
diff --git a/drivers/md/md-linear.c b/drivers/md/md-linear.c
index 138a3b25c5c8..6e7797b4e738 100644
--- a/drivers/md/md-linear.c
+++ b/drivers/md/md-linear.c
@@ -206,7 +206,6 @@ static void linear_free(struct mddev *mddev, void *priv)
static bool linear_make_request(struct mddev *mddev, struct bio *bio)
{
- char b[BDEVNAME_SIZE];
struct dev_info *tmp_dev;
sector_t start_sector, end_sector, data_offset;
sector_t bio_sector = bio->bi_iter.bi_sector;
@@ -256,10 +255,10 @@ static bool linear_make_request(struct mddev *mddev, struct bio *bio)
return true;
out_of_bounds:
- pr_err("md/linear:%s: make_request: Sector %llu out of bounds on dev %s: %llu sectors, offset %llu\n",
+ pr_err("md/linear:%s: make_request: Sector %llu out of bounds on dev %pg: %llu sectors, offset %llu\n",
mdname(mddev),
(unsigned long long)bio->bi_iter.bi_sector,
- bdevname(tmp_dev->rdev->bdev, b),
+ tmp_dev->rdev->bdev,
(unsigned long long)tmp_dev->rdev->sectors,
(unsigned long long)start_sector);
bio_io_error(bio);
diff --git a/drivers/md/md-multipath.c b/drivers/md/md-multipath.c
index 1c6dbf92c136..66edf5e72bd6 100644
--- a/drivers/md/md-multipath.c
+++ b/drivers/md/md-multipath.c
@@ -87,10 +87,9 @@ static void multipath_end_request(struct bio *bio)
/*
* oops, IO error:
*/
- char b[BDEVNAME_SIZE];
md_error (mp_bh->mddev, rdev);
- pr_info("multipath: %s: rescheduling sector %llu\n",
- bdevname(rdev->bdev,b),
+ pr_info("multipath: %pg: rescheduling sector %llu\n",
+ rdev->bdev,
(unsigned long long)bio->bi_iter.bi_sector);
multipath_reschedule_retry(mp_bh);
} else
@@ -154,7 +153,6 @@ static void multipath_status(struct seq_file *seq, struct mddev *mddev)
static void multipath_error (struct mddev *mddev, struct md_rdev *rdev)
{
struct mpconf *conf = mddev->private;
- char b[BDEVNAME_SIZE];
if (conf->raid_disks - mddev->degraded <= 1) {
/*
@@ -177,9 +175,9 @@ static void multipath_error (struct mddev *mddev, struct md_rdev *rdev)
}
set_bit(Faulty, &rdev->flags);
set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
- pr_err("multipath: IO failure on %s, disabling IO path.\n"
+ pr_err("multipath: IO failure on %pg, disabling IO path.\n"
"multipath: Operation continuing on %d IO paths.\n",
- bdevname(rdev->bdev, b),
+ rdev->bdev,
conf->raid_disks - mddev->degraded);
}
@@ -197,12 +195,11 @@ static void print_multipath_conf (struct mpconf *conf)
conf->raid_disks);
for (i = 0; i < conf->raid_disks; i++) {
- char b[BDEVNAME_SIZE];
tmp = conf->multipaths + i;
if (tmp->rdev)
- pr_debug(" disk%d, o:%d, dev:%s\n",
+ pr_debug(" disk%d, o:%d, dev:%pg\n",
i,!test_bit(Faulty, &tmp->rdev->flags),
- bdevname(tmp->rdev->bdev,b));
+ tmp->rdev->bdev);
}
}
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 707e802d0082..8273ac5eef06 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1021,8 +1021,6 @@ EXPORT_SYMBOL_GPL(sync_page_io);
static int read_disk_sb(struct md_rdev *rdev, int size)
{
- char b[BDEVNAME_SIZE];
-
if (rdev->sb_loaded)
return 0;
@@ -1032,8 +1030,8 @@ static int read_disk_sb(struct md_rdev *rdev, int size)
return 0;
fail:
- pr_err("md: disabled device %s, could not read superblock.\n",
- bdevname(rdev->bdev,b));
+ pr_err("md: disabled device %pg, could not read superblock.\n",
+ rdev->bdev);
return -EINVAL;
}
@@ -1179,7 +1177,6 @@ EXPORT_SYMBOL(md_check_no_bitmap);
*/
static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
{
- char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
mdp_super_t *sb;
int ret;
bool spare_disk = true;
@@ -1198,19 +1195,19 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor
ret = -EINVAL;
- bdevname(rdev->bdev, b);
sb = page_address(rdev->sb_page);
if (sb->md_magic != MD_SB_MAGIC) {
- pr_warn("md: invalid raid superblock magic on %s\n", b);
+ pr_warn("md: invalid raid superblock magic on %pg\n",
+ rdev->bdev);
goto abort;
}
if (sb->major_version != 0 ||
sb->minor_version < 90 ||
sb->minor_version > 91) {
- pr_warn("Bad version number %d.%d on %s\n",
- sb->major_version, sb->minor_version, b);
+ pr_warn("Bad version number %d.%d on %pg\n",
+ sb->major_version, sb->minor_version, rdev->bdev);
goto abort;
}
@@ -1218,7 +1215,7 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor
goto abort;
if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) {
- pr_warn("md: invalid superblock checksum on %s\n", b);
+ pr_warn("md: invalid superblock checksum on %pg\n", rdev->bdev);
goto abort;
}
@@ -1250,13 +1247,13 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor
__u64 ev1, ev2;
mdp_super_t *refsb = page_address(refdev->sb_page);
if (!md_uuid_equal(refsb, sb)) {
- pr_warn("md: %s has different UUID to %s\n",
- b, bdevname(refdev->bdev,b2));
+ pr_warn("md: %pg has different UUID to %pg\n",
+ rdev->bdev, refdev->bdev);
goto abort;
}
if (!md_sb_equal(refsb, sb)) {
- pr_warn("md: %s has same UUID but different superblock to %s\n",
- b, bdevname(refdev->bdev, b2));
+ pr_warn("md: %pg has same UUID but different superblock to %pg\n",
+ rdev->bdev, refdev->bdev);
goto abort;
}
ev1 = md_event(sb);
@@ -1620,7 +1617,6 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
int ret;
sector_t sb_start;
sector_t sectors;
- char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
int bmask;
bool spare_disk = true;
@@ -1664,13 +1660,13 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
return -EINVAL;
if (calc_sb_1_csum(sb) != sb->sb_csum) {
- pr_warn("md: invalid superblock checksum on %s\n",
- bdevname(rdev->bdev,b));
+ pr_warn("md: invalid superblock checksum on %pg\n",
+ rdev->bdev);
return -EINVAL;
}
if (le64_to_cpu(sb->data_size) < 10) {
- pr_warn("md: data_size too small on %s\n",
- bdevname(rdev->bdev,b));
+ pr_warn("md: data_size too small on %pg\n",
+ rdev->bdev);
return -EINVAL;
}
if (sb->pad0 ||
@@ -1776,9 +1772,9 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
sb->level != refsb->level ||
sb->layout != refsb->layout ||
sb->chunksize != refsb->chunksize) {
- pr_warn("md: %s has strangely different superblock to %s\n",
- bdevname(rdev->bdev,b),
- bdevname(refdev->bdev,b2));
+ pr_warn("md: %pg has strangely different superblock to %pg\n",
+ rdev->bdev,
+ refdev->bdev);
return -EINVAL;
}
ev1 = le64_to_cpu(sb->events);
@@ -2365,7 +2361,6 @@ EXPORT_SYMBOL(md_integrity_register);
int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
{
struct blk_integrity *bi_mddev;
- char name[BDEVNAME_SIZE];
if (!mddev->gendisk)
return 0;
@@ -2376,8 +2371,8 @@ int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
return 0;
if (blk_integrity_compare(mddev->gendisk, rdev->bdev->bd_disk) != 0) {
- pr_err("%s: incompatible integrity profile for %s\n",
- mdname(mddev), bdevname(rdev->bdev, name));
+ pr_err("%s: incompatible integrity profile for %pg\n",
+ mdname(mddev), rdev->bdev);
return -ENXIO;
}
@@ -2486,11 +2481,9 @@ static void rdev_delayed_delete(struct work_struct *ws)
static void unbind_rdev_from_array(struct md_rdev *rdev)
{
- char b[BDEVNAME_SIZE];
-
bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk);
list_del_rcu(&rdev->same_set);
- pr_debug("md: unbind<%s>\n", bdevname(rdev->bdev,b));
+ pr_debug("md: unbind<%pg>\n", rdev->bdev);
mddev_destroy_serial_pool(rdev->mddev, rdev, false);
rdev->mddev = NULL;
sysfs_remove_link(&rdev->kobj, "block");
@@ -2543,9 +2536,7 @@ void md_autodetect_dev(dev_t dev);
static void export_rdev(struct md_rdev *rdev)
{
- char b[BDEVNAME_SIZE];
-
- pr_debug("md: export_rdev(%s)\n", bdevname(rdev->bdev,b));
+ pr_debug("md: export_rdev(%pg)\n", rdev->bdev);
md_rdev_clear(rdev);
#ifndef MODULE
if (test_bit(AutoDetected, &rdev->flags))
@@ -2803,8 +2794,6 @@ repeat:
rewrite:
md_bitmap_update_sb(mddev->bitmap);
rdev_for_each(rdev, mddev) {
- char b[BDEVNAME_SIZE];
-
if (rdev->sb_loaded != 1)
continue; /* no noise on spare devices */
@@ -2812,8 +2801,8 @@ rewrite:
md_super_write(mddev,rdev,
rdev->sb_start, rdev->sb_size,
rdev->sb_page);
- pr_debug("md: (write) %s's sb offset: %llu\n",
- bdevname(rdev->bdev, b),
+ pr_debug("md: (write) %pg's sb offset: %llu\n",
+ rdev->bdev,
(unsigned long long)rdev->sb_start);
rdev->sb_events = mddev->events;
if (rdev->badblocks.size) {
@@ -2825,8 +2814,8 @@ rewrite:
}
} else
- pr_debug("md: %s (skipping faulty)\n",
- bdevname(rdev->bdev, b));
+ pr_debug("md: %pg (skipping faulty)\n",
+ rdev->bdev);
if (mddev->level == LEVEL_MULTIPATH)
/* only need to write one superblock... */
@@ -3701,7 +3690,6 @@ EXPORT_SYMBOL_GPL(md_rdev_init);
*/
static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor)
{
- char b[BDEVNAME_SIZE];
int err;
struct md_rdev *rdev;
sector_t size;
@@ -3725,8 +3713,8 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe
size = bdev_nr_bytes(rdev->bdev) >> BLOCK_SIZE_BITS;
if (!size) {
- pr_warn("md: %s has zero or unknown size, marking faulty!\n",
- bdevname(rdev->bdev,b));
+ pr_warn("md: %pg has zero or unknown size, marking faulty!\n",
+ rdev->bdev);
err = -EINVAL;
goto abort_free;
}
@@ -3735,14 +3723,14 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe
err = super_types[super_format].
load_super(rdev, NULL, super_minor);
if (err == -EINVAL) {
- pr_warn("md: %s does not have a valid v%d.%d superblock, not importing!\n",
- bdevname(rdev->bdev,b),
+ pr_warn("md: %pg does not have a valid v%d.%d superblock, not importing!\n",
+ rdev->bdev,
super_format, super_minor);
goto abort_free;
}
if (err < 0) {
- pr_warn("md: could not read %s's sb, not importing!\n",
- bdevname(rdev->bdev,b));
+ pr_warn("md: could not read %pg's sb, not importing!\n",
+ rdev->bdev);
goto abort_free;
}
}
@@ -3765,7 +3753,6 @@ static int analyze_sbs(struct mddev *mddev)
{
int i;
struct md_rdev *rdev, *freshest, *tmp;
- char b[BDEVNAME_SIZE];
freshest = NULL;
rdev_for_each_safe(rdev, tmp, mddev)
@@ -3777,8 +3764,8 @@ static int analyze_sbs(struct mddev *mddev)
case 0:
break;
default:
- pr_warn("md: fatal superblock inconsistency in %s -- removing from array\n",
- bdevname(rdev->bdev,b));
+ pr_warn("md: fatal superblock inconsistency in %pg -- removing from array\n",
+ rdev->bdev);
md_kick_rdev_from_array(rdev);
}
@@ -3796,8 +3783,8 @@ static int analyze_sbs(struct mddev *mddev)
if (mddev->max_disks &&
(rdev->desc_nr >= mddev->max_disks ||
i > mddev->max_disks)) {
- pr_warn("md: %s: %s: only %d devices permitted\n",
- mdname(mddev), bdevname(rdev->bdev, b),
+ pr_warn("md: %s: %pg: only %d devices permitted\n",
+ mdname(mddev), rdev->bdev,
mddev->max_disks);
md_kick_rdev_from_array(rdev);
continue;
@@ -3805,8 +3792,8 @@ static int analyze_sbs(struct mddev *mddev)
if (rdev != freshest) {
if (super_types[mddev->major_version].
validate_super(mddev, rdev)) {
- pr_warn("md: kicking non-fresh %s from array!\n",
- bdevname(rdev->bdev,b));
+ pr_warn("md: kicking non-fresh %pg from array!\n",
+ rdev->bdev);
md_kick_rdev_from_array(rdev);
continue;
}
@@ -4844,7 +4831,7 @@ action_store(struct mddev *mddev, const char *page, size_t len)
flush_workqueue(md_misc_wq);
if (mddev->sync_thread) {
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
- md_reap_sync_thread(mddev);
+ md_reap_sync_thread(mddev, true);
}
mddev_unlock(mddev);
}
@@ -5598,8 +5585,6 @@ static void md_free(struct kobject *ko)
bioset_exit(&mddev->bio_set);
bioset_exit(&mddev->sync_set);
- if (mddev->level != 1 && mddev->level != 10)
- bioset_exit(&mddev->io_acct_set);
kfree(mddev);
}
@@ -5912,7 +5897,6 @@ int md_run(struct mddev *mddev)
/* Warn if this is a potentially silly
* configuration.
*/
- char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
struct md_rdev *rdev2;
int warned = 0;
@@ -5921,10 +5905,10 @@ int md_run(struct mddev *mddev)
if (rdev < rdev2 &&
rdev->bdev->bd_disk ==
rdev2->bdev->bd_disk) {
- pr_warn("%s: WARNING: %s appears to be on the same physical disk as %s.\n",
+ pr_warn("%s: WARNING: %pg appears to be on the same physical disk as %pg.\n",
mdname(mddev),
- bdevname(rdev->bdev,b),
- bdevname(rdev2->bdev,b2));
+ rdev->bdev,
+ rdev2->bdev);
warned = 1;
}
}
@@ -6213,7 +6197,7 @@ static void __md_stop_writes(struct mddev *mddev)
flush_workqueue(md_misc_wq);
if (mddev->sync_thread) {
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
- md_reap_sync_thread(mddev);
+ md_reap_sync_thread(mddev, true);
}
del_timer_sync(&mddev->safemode_timer);
@@ -6285,8 +6269,6 @@ void md_stop(struct mddev *mddev)
__md_stop(mddev);
bioset_exit(&mddev->bio_set);
bioset_exit(&mddev->sync_set);
- if (mddev->level != 1 && mddev->level != 10)
- bioset_exit(&mddev->io_acct_set);
}
EXPORT_SYMBOL_GPL(md_stop);
@@ -6452,8 +6434,7 @@ static void autorun_array(struct mddev *mddev)
pr_info("md: running: ");
rdev_for_each(rdev, mddev) {
- char b[BDEVNAME_SIZE];
- pr_cont("<%s>", bdevname(rdev->bdev,b));
+ pr_cont("<%pg>", rdev->bdev);
}
pr_cont("\n");
@@ -6480,7 +6461,6 @@ static void autorun_devices(int part)
{
struct md_rdev *rdev0, *rdev, *tmp;
struct mddev *mddev;
- char b[BDEVNAME_SIZE];
pr_info("md: autorun ...\n");
while (!list_empty(&pending_raid_disks)) {
@@ -6490,12 +6470,12 @@ static void autorun_devices(int part)
rdev0 = list_entry(pending_raid_disks.next,
struct md_rdev, same_set);
- pr_debug("md: considering %s ...\n", bdevname(rdev0->bdev,b));
+ pr_debug("md: considering %pg ...\n", rdev0->bdev);
INIT_LIST_HEAD(&candidates);
rdev_for_each_list(rdev, tmp, &pending_raid_disks)
if (super_90_load(rdev, rdev0, 0) >= 0) {
- pr_debug("md: adding %s ...\n",
- bdevname(rdev->bdev,b));
+ pr_debug("md: adding %pg ...\n",
+ rdev->bdev);
list_move(&rdev->same_set, &candidates);
}
/*
@@ -6512,8 +6492,8 @@ static void autorun_devices(int part)
unit = MINOR(dev);
}
if (rdev0->preferred_minor != unit) {
- pr_warn("md: unit number in %s is bad: %d\n",
- bdevname(rdev0->bdev, b), rdev0->preferred_minor);
+ pr_warn("md: unit number in %pg is bad: %d\n",
+ rdev0->bdev, rdev0->preferred_minor);
break;
}
@@ -6526,8 +6506,8 @@ static void autorun_devices(int part)
pr_warn("md: %s locked, cannot run\n", mdname(mddev));
else if (mddev->raid_disks || mddev->major_version
|| !list_empty(&mddev->disks)) {
- pr_warn("md: %s already running, cannot run %s\n",
- mdname(mddev), bdevname(rdev0->bdev,b));
+ pr_warn("md: %s already running, cannot run %pg\n",
+ mdname(mddev), rdev0->bdev);
mddev_unlock(mddev);
} else {
pr_debug("md: created %s\n", mdname(mddev));
@@ -6701,7 +6681,6 @@ static int get_disk_info(struct mddev *mddev, void __user * arg)
int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info)
{
- char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
struct md_rdev *rdev;
dev_t dev = MKDEV(info->major,info->minor);
@@ -6731,9 +6710,9 @@ int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info)
err = super_types[mddev->major_version]
.load_super(rdev, rdev0, mddev->minor_version);
if (err < 0) {
- pr_warn("md: %s has different UUID to %s\n",
- bdevname(rdev->bdev,b),
- bdevname(rdev0->bdev,b2));
+ pr_warn("md: %pg has different UUID to %pg\n",
+ rdev->bdev,
+ rdev0->bdev);
export_rdev(rdev);
return -EINVAL;
}
@@ -6908,7 +6887,6 @@ int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info)
static int hot_remove_disk(struct mddev *mddev, dev_t dev)
{
- char b[BDEVNAME_SIZE];
struct md_rdev *rdev;
if (!mddev->pers)
@@ -6943,14 +6921,13 @@ kick_rdev:
return 0;
busy:
- pr_debug("md: cannot remove active disk %s from %s ...\n",
- bdevname(rdev->bdev,b), mdname(mddev));
+ pr_debug("md: cannot remove active disk %pg from %s ...\n",
+ rdev->bdev, mdname(mddev));
return -EBUSY;
}
static int hot_add_disk(struct mddev *mddev, dev_t dev)
{
- char b[BDEVNAME_SIZE];
int err;
struct md_rdev *rdev;
@@ -6983,8 +6960,8 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev)
rdev->sectors = rdev->sb_start;
if (test_bit(Faulty, &rdev->flags)) {
- pr_warn("md: can not hot-add faulty %s disk to %s!\n",
- bdevname(rdev->bdev,b), mdname(mddev));
+ pr_warn("md: can not hot-add faulty %pg disk to %s!\n",
+ rdev->bdev, mdname(mddev));
err = -EINVAL;
goto abort_export;
}
@@ -7011,8 +6988,8 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev)
* disable on the whole MD.
*/
if (!blk_queue_nowait(bdev_get_queue(rdev->bdev))) {
- pr_info("%s: Disabling nowait because %s does not support nowait\n",
- mdname(mddev), bdevname(rdev->bdev, b));
+ pr_info("%s: Disabling nowait because %pg does not support nowait\n",
+ mdname(mddev), rdev->bdev);
blk_queue_flag_clear(QUEUE_FLAG_NOWAIT, mddev->queue);
}
/*
@@ -7963,17 +7940,22 @@ EXPORT_SYMBOL(md_register_thread);
void md_unregister_thread(struct md_thread **threadp)
{
- struct md_thread *thread = *threadp;
- if (!thread)
- return;
- pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
- /* Locking ensures that mddev_unlock does not wake_up a
+ struct md_thread *thread;
+
+ /*
+ * Locking ensures that mddev_unlock does not wake_up a
* non-existent thread
*/
spin_lock(&pers_lock);
+ thread = *threadp;
+ if (!thread) {
+ spin_unlock(&pers_lock);
+ return;
+ }
*threadp = NULL;
spin_unlock(&pers_lock);
+ pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
kthread_stop(thread->tsk);
kfree(thread);
}
@@ -8012,10 +7994,8 @@ static void status_unused(struct seq_file *seq)
seq_printf(seq, "unused devices: ");
list_for_each_entry(rdev, &pending_raid_disks, same_set) {
- char b[BDEVNAME_SIZE];
i++;
- seq_printf(seq, "%s ",
- bdevname(rdev->bdev,b));
+ seq_printf(seq, "%pg ", rdev->bdev);
}
if (!i)
seq_printf(seq, "<none>");
@@ -8255,9 +8235,8 @@ static int md_seq_show(struct seq_file *seq, void *v)
sectors = 0;
rcu_read_lock();
rdev_for_each_rcu(rdev, mddev) {
- char b[BDEVNAME_SIZE];
- seq_printf(seq, " %s[%d]",
- bdevname(rdev->bdev,b), rdev->desc_nr);
+ seq_printf(seq, " %pg[%d]", rdev->bdev, rdev->desc_nr);
+
if (test_bit(WriteMostly, &rdev->flags))
seq_printf(seq, "(W)");
if (test_bit(Journal, &rdev->flags))
@@ -9324,7 +9303,7 @@ void md_check_recovery(struct mddev *mddev)
* ->spare_active and clear saved_raid_disk
*/
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
- md_reap_sync_thread(mddev);
+ md_reap_sync_thread(mddev, true);
clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
@@ -9359,7 +9338,7 @@ void md_check_recovery(struct mddev *mddev)
goto unlock;
}
if (mddev->sync_thread) {
- md_reap_sync_thread(mddev);
+ md_reap_sync_thread(mddev, true);
goto unlock;
}
/* Set RUNNING before clearing NEEDED to avoid
@@ -9432,14 +9411,18 @@ void md_check_recovery(struct mddev *mddev)
}
EXPORT_SYMBOL(md_check_recovery);
-void md_reap_sync_thread(struct mddev *mddev)
+void md_reap_sync_thread(struct mddev *mddev, bool reconfig_mutex_held)
{
struct md_rdev *rdev;
sector_t old_dev_sectors = mddev->dev_sectors;
bool is_reshaped = false;
+ if (reconfig_mutex_held)
+ mddev_unlock(mddev);
/* resync has finished, collect result */
md_unregister_thread(&mddev->sync_thread);
+ if (reconfig_mutex_held)
+ mddev_lock_nointr(mddev);
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
mddev->degraded != mddev->raid_disks) {
@@ -9652,7 +9635,6 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
struct md_rdev *rdev2, *tmp;
int role, ret;
- char b[BDEVNAME_SIZE];
/*
* If size is changed in another node then we need to
@@ -9676,7 +9658,8 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
if (test_bit(Candidate, &rdev2->flags)) {
if (role == MD_DISK_ROLE_FAULTY) {
- pr_info("md: Removing Candidate device %s because add failed\n", bdevname(rdev2->bdev,b));
+ pr_info("md: Removing Candidate device %pg because add failed\n",
+ rdev2->bdev);
md_kick_rdev_from_array(rdev2);
continue;
}
@@ -9693,8 +9676,8 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
MD_FEATURE_RESHAPE_ACTIVE)) {
rdev2->saved_raid_disk = role;
ret = remove_and_add_spares(mddev, rdev2);
- pr_info("Activated spare: %s\n",
- bdevname(rdev2->bdev,b));
+ pr_info("Activated spare: %pg\n",
+ rdev2->bdev);
/* wakeup mddev->thread here, so array could
* perform resync with the new activated disk */
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
diff --git a/drivers/md/md.h b/drivers/md/md.h
index cf2cbb17acbd..5f62c46ac2d3 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -719,7 +719,7 @@ extern struct md_thread *md_register_thread(
extern void md_unregister_thread(struct md_thread **threadp);
extern void md_wakeup_thread(struct md_thread *thread);
extern void md_check_recovery(struct mddev *mddev);
-extern void md_reap_sync_thread(struct mddev *mddev);
+extern void md_reap_sync_thread(struct mddev *mddev, bool reconfig_mutex_held);
extern int mddev_init_writes_pending(struct mddev *mddev);
extern bool md_write_start(struct mddev *mddev, struct bio *bi);
extern void md_write_inc(struct mddev *mddev, struct bio *bi);
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index e11701e394ca..78addfe4a0c9 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -37,7 +37,6 @@ static void dump_zones(struct mddev *mddev)
int j, k;
sector_t zone_size = 0;
sector_t zone_start = 0;
- char b[BDEVNAME_SIZE];
struct r0conf *conf = mddev->private;
int raid_disks = conf->strip_zone[0].nb_dev;
pr_debug("md: RAID0 configuration for %s - %d zone%s\n",
@@ -48,9 +47,8 @@ static void dump_zones(struct mddev *mddev)
int len = 0;
for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
- len += snprintf(line+len, 200-len, "%s%s", k?"/":"",
- bdevname(conf->devlist[j*raid_disks
- + k]->bdev, b));
+ len += snprintf(line+len, 200-len, "%s%pg", k?"/":"",
+ conf->devlist[j * raid_disks + k]->bdev);
pr_debug("md: zone%d=[%s]\n", j, line);
zone_size = conf->strip_zone[j].zone_end - zone_start;
@@ -69,8 +67,6 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
struct md_rdev *smallest, *rdev1, *rdev2, *rdev, **dev;
struct strip_zone *zone;
int cnt;
- char b[BDEVNAME_SIZE];
- char b2[BDEVNAME_SIZE];
struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
unsigned blksize = 512;
@@ -78,9 +74,9 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
if (!conf)
return -ENOMEM;
rdev_for_each(rdev1, mddev) {
- pr_debug("md/raid0:%s: looking at %s\n",
+ pr_debug("md/raid0:%s: looking at %pg\n",
mdname(mddev),
- bdevname(rdev1->bdev, b));
+ rdev1->bdev);
c = 0;
/* round size to chunk_size */
@@ -92,12 +88,12 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
rdev1->bdev->bd_disk->queue));
rdev_for_each(rdev2, mddev) {
- pr_debug("md/raid0:%s: comparing %s(%llu)"
- " with %s(%llu)\n",
+ pr_debug("md/raid0:%s: comparing %pg(%llu)"
+ " with %pg(%llu)\n",
mdname(mddev),
- bdevname(rdev1->bdev,b),
+ rdev1->bdev,
(unsigned long long)rdev1->sectors,
- bdevname(rdev2->bdev,b2),
+ rdev2->bdev,
(unsigned long long)rdev2->sectors);
if (rdev2 == rdev1) {
pr_debug("md/raid0:%s: END\n",
@@ -225,15 +221,15 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
for (j=0; j<cnt; j++) {
rdev = conf->devlist[j];
if (rdev->sectors <= zone->dev_start) {
- pr_debug("md/raid0:%s: checking %s ... nope\n",
+ pr_debug("md/raid0:%s: checking %pg ... nope\n",
mdname(mddev),
- bdevname(rdev->bdev, b));
+ rdev->bdev);
continue;
}
- pr_debug("md/raid0:%s: checking %s ..."
+ pr_debug("md/raid0:%s: checking %pg ..."
" contained as device %d\n",
mdname(mddev),
- bdevname(rdev->bdev, b), c);
+ rdev->bdev, c);
dev[c] = rdev;
c++;
if (!smallest || rdev->sectors < smallest->sectors) {
@@ -362,7 +358,6 @@ static void free_conf(struct mddev *mddev, struct r0conf *conf)
kfree(conf->strip_zone);
kfree(conf->devlist);
kfree(conf);
- mddev->private = NULL;
}
static void raid0_free(struct mddev *mddev, void *priv)
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 99d5af1362d7..258d4eb2d63c 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -402,10 +402,9 @@ static void raid1_end_read_request(struct bio *bio)
/*
* oops, read error:
*/
- char b[BDEVNAME_SIZE];
- pr_err_ratelimited("md/raid1:%s: %s: rescheduling sector %llu\n",
+ pr_err_ratelimited("md/raid1:%s: %pg: rescheduling sector %llu\n",
mdname(conf->mddev),
- bdevname(rdev->bdev, b),
+ rdev->bdev,
(unsigned long long)r1_bio->sector);
set_bit(R1BIO_ReadError, &r1_bio->state);
reschedule_retry(r1_bio);
@@ -1283,10 +1282,10 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
mirror = conf->mirrors + rdisk;
if (r1bio_existed)
- pr_info_ratelimited("md/raid1:%s: redirecting sector %llu to other mirror: %s\n",
+ pr_info_ratelimited("md/raid1:%s: redirecting sector %llu to other mirror: %pg\n",
mdname(mddev),
(unsigned long long)r1_bio->sector,
- bdevname(mirror->rdev->bdev, b));
+ mirror->rdev->bdev);
if (test_bit(WriteMostly, &mirror->rdev->flags) &&
bitmap) {
@@ -1659,7 +1658,6 @@ static void raid1_status(struct seq_file *seq, struct mddev *mddev)
*/
static void raid1_error(struct mddev *mddev, struct md_rdev *rdev)
{
- char b[BDEVNAME_SIZE];
struct r1conf *conf = mddev->private;
unsigned long flags;
@@ -1686,9 +1684,9 @@ static void raid1_error(struct mddev *mddev, struct md_rdev *rdev)
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
set_mask_bits(&mddev->sb_flags, 0,
BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
- pr_crit("md/raid1:%s: Disk failure on %s, disabling device.\n"
+ pr_crit("md/raid1:%s: Disk failure on %pg, disabling device.\n"
"md/raid1:%s: Operation continuing on %d devices.\n",
- mdname(mddev), bdevname(rdev->bdev, b),
+ mdname(mddev), rdev->bdev,
mdname(mddev), conf->raid_disks - mddev->degraded);
}
@@ -1706,13 +1704,12 @@ static void print_conf(struct r1conf *conf)
rcu_read_lock();
for (i = 0; i < conf->raid_disks; i++) {
- char b[BDEVNAME_SIZE];
struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
if (rdev)
- pr_debug(" disk %d, wo:%d, o:%d, dev:%s\n",
+ pr_debug(" disk %d, wo:%d, o:%d, dev:%pg\n",
i, !test_bit(In_sync, &rdev->flags),
!test_bit(Faulty, &rdev->flags),
- bdevname(rdev->bdev,b));
+ rdev->bdev);
}
rcu_read_unlock();
}
@@ -2347,7 +2344,6 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
}
d = start;
while (d != read_disk) {
- char b[BDEVNAME_SIZE];
if (d==0)
d = conf->raid_disks * 2;
d--;
@@ -2360,11 +2356,11 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
if (r1_sync_page_io(rdev, sect, s,
conf->tmppage, READ)) {
atomic_add(s, &rdev->corrected_errors);
- pr_info("md/raid1:%s: read error corrected (%d sectors at %llu on %s)\n",
+ pr_info("md/raid1:%s: read error corrected (%d sectors at %llu on %pg)\n",
mdname(mddev), s,
(unsigned long long)(sect +
rdev->data_offset),
- bdevname(rdev->bdev, b));
+ rdev->bdev);
}
rdev_dec_pending(rdev, mddev);
} else
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index dfa576cdf11c..d589f823feb1 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -397,10 +397,9 @@ static void raid10_end_read_request(struct bio *bio)
/*
* oops, read error - keep the refcount on the rdev
*/
- char b[BDEVNAME_SIZE];
- pr_err_ratelimited("md/raid10:%s: %s: rescheduling sector %llu\n",
+ pr_err_ratelimited("md/raid10:%s: %pg: rescheduling sector %llu\n",
mdname(conf->mddev),
- bdevname(rdev->bdev, b),
+ rdev->bdev,
(unsigned long long)r10_bio->sector);
set_bit(R10BIO_ReadError, &r10_bio->state);
reschedule_retry(r10_bio);
@@ -1187,9 +1186,9 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
return;
}
if (err_rdev)
- pr_err_ratelimited("md/raid10:%s: %s: redirecting sector %llu to another mirror\n",
+ pr_err_ratelimited("md/raid10:%s: %pg: redirecting sector %llu to another mirror\n",
mdname(mddev),
- bdevname(rdev->bdev, b),
+ rdev->bdev,
(unsigned long long)r10_bio->sector);
if (max_sectors < bio_sectors(bio)) {
struct bio *split = bio_split(bio, max_sectors,
@@ -1987,7 +1986,6 @@ static int enough(struct r10conf *conf, int ignore)
*/
static void raid10_error(struct mddev *mddev, struct md_rdev *rdev)
{
- char b[BDEVNAME_SIZE];
struct r10conf *conf = mddev->private;
unsigned long flags;
@@ -2010,9 +2008,9 @@ static void raid10_error(struct mddev *mddev, struct md_rdev *rdev)
set_mask_bits(&mddev->sb_flags, 0,
BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
spin_unlock_irqrestore(&conf->device_lock, flags);
- pr_crit("md/raid10:%s: Disk failure on %s, disabling device.\n"
+ pr_crit("md/raid10:%s: Disk failure on %pg, disabling device.\n"
"md/raid10:%s: Operation continuing on %d devices.\n",
- mdname(mddev), bdevname(rdev->bdev, b),
+ mdname(mddev), rdev->bdev,
mdname(mddev), conf->geo.raid_disks - mddev->degraded);
}
@@ -2032,13 +2030,12 @@ static void print_conf(struct r10conf *conf)
/* This is only called with ->reconfix_mutex held, so
* rcu protection of rdev is not needed */
for (i = 0; i < conf->geo.raid_disks; i++) {
- char b[BDEVNAME_SIZE];
rdev = conf->mirrors[i].rdev;
if (rdev)
- pr_debug(" disk %d, wo:%d, o:%d, dev:%s\n",
+ pr_debug(" disk %d, wo:%d, o:%d, dev:%pg\n",
i, !test_bit(In_sync, &rdev->flags),
!test_bit(Faulty, &rdev->flags),
- bdevname(rdev->bdev,b));
+ rdev->bdev);
}
}
@@ -2691,14 +2688,11 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
check_decay_read_errors(mddev, rdev);
atomic_inc(&rdev->read_errors);
if (atomic_read(&rdev->read_errors) > max_read_errors) {
- char b[BDEVNAME_SIZE];
- bdevname(rdev->bdev, b);
-
- pr_notice("md/raid10:%s: %s: Raid device exceeded read_error threshold [cur %d:max %d]\n",
- mdname(mddev), b,
+ pr_notice("md/raid10:%s: %pg: Raid device exceeded read_error threshold [cur %d:max %d]\n",
+ mdname(mddev), rdev->bdev,
atomic_read(&rdev->read_errors), max_read_errors);
- pr_notice("md/raid10:%s: %s: Failing raid device\n",
- mdname(mddev), b);
+ pr_notice("md/raid10:%s: %pg: Failing raid device\n",
+ mdname(mddev), rdev->bdev);
md_error(mddev, rdev);
r10_bio->devs[r10_bio->read_slot].bio = IO_BLOCKED;
return;
@@ -2768,8 +2762,6 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
/* write it back and re-read */
rcu_read_lock();
while (sl != r10_bio->read_slot) {
- char b[BDEVNAME_SIZE];
-
if (sl==0)
sl = conf->copies;
sl--;
@@ -2788,24 +2780,22 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
s, conf->tmppage, WRITE)
== 0) {
/* Well, this device is dead */
- pr_notice("md/raid10:%s: read correction write failed (%d sectors at %llu on %s)\n",
+ pr_notice("md/raid10:%s: read correction write failed (%d sectors at %llu on %pg)\n",
mdname(mddev), s,
(unsigned long long)(
sect +
choose_data_offset(r10_bio,
rdev)),
- bdevname(rdev->bdev, b));
- pr_notice("md/raid10:%s: %s: failing drive\n",
+ rdev->bdev);
+ pr_notice("md/raid10:%s: %pg: failing drive\n",
mdname(mddev),
- bdevname(rdev->bdev, b));
+ rdev->bdev);
}
rdev_dec_pending(rdev, mddev);
rcu_read_lock();
}
sl = start;
while (sl != r10_bio->read_slot) {
- char b[BDEVNAME_SIZE];
-
if (sl==0)
sl = conf->copies;
sl--;
@@ -2825,23 +2815,23 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
READ)) {
case 0:
/* Well, this device is dead */
- pr_notice("md/raid10:%s: unable to read back corrected sectors (%d sectors at %llu on %s)\n",
+ pr_notice("md/raid10:%s: unable to read back corrected sectors (%d sectors at %llu on %pg)\n",
mdname(mddev), s,
(unsigned long long)(
sect +
choose_data_offset(r10_bio, rdev)),
- bdevname(rdev->bdev, b));
- pr_notice("md/raid10:%s: %s: failing drive\n",
+ rdev->bdev);
+ pr_notice("md/raid10:%s: %pg: failing drive\n",
mdname(mddev),
- bdevname(rdev->bdev, b));
+ rdev->bdev);
break;
case 1:
- pr_info("md/raid10:%s: read error corrected (%d sectors at %llu on %s)\n",
+ pr_info("md/raid10:%s: read error corrected (%d sectors at %llu on %pg)\n",
mdname(mddev), s,
(unsigned long long)(
sect +
choose_data_offset(r10_bio, rdev)),
- bdevname(rdev->bdev, b));
+ rdev->bdev);
atomic_add(s, &rdev->corrected_errors);
}
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index 094a4042589e..83c184eddbda 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -3064,11 +3064,10 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
{
struct request_queue *q = bdev_get_queue(rdev->bdev);
struct r5l_log *log;
- char b[BDEVNAME_SIZE];
int ret;
- pr_debug("md/raid:%s: using device %s as journal\n",
- mdname(conf->mddev), bdevname(rdev->bdev, b));
+ pr_debug("md/raid:%s: using device %pg as journal\n",
+ mdname(conf->mddev), rdev->bdev);
if (PAGE_SIZE != 4096)
return -EINVAL;
diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c
index 55d065a87b89..973e2e06f19c 100644
--- a/drivers/md/raid5-ppl.c
+++ b/drivers/md/raid5-ppl.c
@@ -798,7 +798,6 @@ static int ppl_recover_entry(struct ppl_log *log, struct ppl_header_entry *e,
int data_disks;
int i;
int ret = 0;
- char b[BDEVNAME_SIZE];
unsigned int pp_size = le32_to_cpu(e->pp_size);
unsigned int data_size = le32_to_cpu(e->data_size);
@@ -894,8 +893,8 @@ static int ppl_recover_entry(struct ppl_log *log, struct ppl_header_entry *e,
break;
}
- pr_debug("%s:%*s reading data member disk %s sector %llu\n",
- __func__, indent, "", bdevname(rdev->bdev, b),
+ pr_debug("%s:%*s reading data member disk %pg sector %llu\n",
+ __func__, indent, "", rdev->bdev,
(unsigned long long)sector);
if (!sync_page_io(rdev, sector, block_size, page2,
REQ_OP_READ, 0, false)) {
@@ -942,10 +941,10 @@ static int ppl_recover_entry(struct ppl_log *log, struct ppl_header_entry *e,
conf->disks[sh.pd_idx].rdev, 1);
BUG_ON(parity_rdev->bdev->bd_dev != log->rdev->bdev->bd_dev);
- pr_debug("%s:%*s write parity at sector %llu, disk %s\n",
+ pr_debug("%s:%*s write parity at sector %llu, disk %pg\n",
__func__, indent, "",
(unsigned long long)parity_sector,
- bdevname(parity_rdev->bdev, b));
+ parity_rdev->bdev);
if (!sync_page_io(parity_rdev, parity_sector, block_size,
page1, REQ_OP_WRITE, 0, false)) {
pr_debug("%s:%*s parity write error!\n", __func__,
@@ -1255,7 +1254,6 @@ void ppl_exit_log(struct r5conf *conf)
static int ppl_validate_rdev(struct md_rdev *rdev)
{
- char b[BDEVNAME_SIZE];
int ppl_data_sectors;
int ppl_size_new;
@@ -1272,8 +1270,8 @@ static int ppl_validate_rdev(struct md_rdev *rdev)
RAID5_STRIPE_SECTORS((struct r5conf *)rdev->mddev->private));
if (ppl_data_sectors <= 0) {
- pr_warn("md/raid:%s: PPL space too small on %s\n",
- mdname(rdev->mddev), bdevname(rdev->bdev, b));
+ pr_warn("md/raid:%s: PPL space too small on %pg\n",
+ mdname(rdev->mddev), rdev->bdev);
return -ENOSPC;
}
@@ -1283,16 +1281,16 @@ static int ppl_validate_rdev(struct md_rdev *rdev)
rdev->ppl.sector + ppl_size_new > rdev->data_offset) ||
(rdev->ppl.sector >= rdev->data_offset &&
rdev->data_offset + rdev->sectors > rdev->ppl.sector)) {
- pr_warn("md/raid:%s: PPL space overlaps with data on %s\n",
- mdname(rdev->mddev), bdevname(rdev->bdev, b));
+ pr_warn("md/raid:%s: PPL space overlaps with data on %pg\n",
+ mdname(rdev->mddev), rdev->bdev);
return -EINVAL;
}
if (!rdev->mddev->external &&
((rdev->ppl.offset > 0 && rdev->ppl.offset < (rdev->sb_size >> 9)) ||
(rdev->ppl.offset <= 0 && rdev->ppl.offset + ppl_size_new > 0))) {
- pr_warn("md/raid:%s: PPL space overlaps with superblock on %s\n",
- mdname(rdev->mddev), bdevname(rdev->bdev, b));
+ pr_warn("md/raid:%s: PPL space overlaps with superblock on %pg\n",
+ mdname(rdev->mddev), rdev->bdev);
return -EINVAL;
}
@@ -1463,14 +1461,13 @@ int ppl_modify_log(struct r5conf *conf, struct md_rdev *rdev, bool add)
struct ppl_conf *ppl_conf = conf->log_private;
struct ppl_log *log;
int ret = 0;
- char b[BDEVNAME_SIZE];
if (!rdev)
return -EINVAL;
- pr_debug("%s: disk: %d operation: %s dev: %s\n",
+ pr_debug("%s: disk: %d operation: %s dev: %pg\n",
__func__, rdev->raid_disk, add ? "add" : "remove",
- bdevname(rdev->bdev, b));
+ rdev->bdev);
if (rdev->raid_disk < 0)
return 0;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 39038fa8b1c8..5d09256d7f81 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2686,7 +2686,6 @@ static void raid5_end_read_request(struct bio * bi)
struct stripe_head *sh = bi->bi_private;
struct r5conf *conf = sh->raid_conf;
int disks = sh->disks, i;
- char b[BDEVNAME_SIZE];
struct md_rdev *rdev = NULL;
sector_t s;
@@ -2723,10 +2722,10 @@ static void raid5_end_read_request(struct bio * bi)
* any error
*/
pr_info_ratelimited(
- "md/raid:%s: read error corrected (%lu sectors at %llu on %s)\n",
+ "md/raid:%s: read error corrected (%lu sectors at %llu on %pg)\n",
mdname(conf->mddev), RAID5_STRIPE_SECTORS(conf),
(unsigned long long)s,
- bdevname(rdev->bdev, b));
+ rdev->bdev);
atomic_add(RAID5_STRIPE_SECTORS(conf), &rdev->corrected_errors);
clear_bit(R5_ReadError, &sh->dev[i].flags);
clear_bit(R5_ReWrite, &sh->dev[i].flags);
@@ -2743,7 +2742,6 @@ static void raid5_end_read_request(struct bio * bi)
if (atomic_read(&rdev->read_errors))
atomic_set(&rdev->read_errors, 0);
} else {
- const char *bdn = bdevname(rdev->bdev, b);
int retry = 0;
int set_bad = 0;
@@ -2752,25 +2750,25 @@ static void raid5_end_read_request(struct bio * bi)
atomic_inc(&rdev->read_errors);
if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
pr_warn_ratelimited(
- "md/raid:%s: read error on replacement device (sector %llu on %s).\n",
+ "md/raid:%s: read error on replacement device (sector %llu on %pg).\n",
mdname(conf->mddev),
(unsigned long long)s,
- bdn);
+ rdev->bdev);
else if (conf->mddev->degraded >= conf->max_degraded) {
set_bad = 1;
pr_warn_ratelimited(
- "md/raid:%s: read error not correctable (sector %llu on %s).\n",
+ "md/raid:%s: read error not correctable (sector %llu on %pg).\n",
mdname(conf->mddev),
(unsigned long long)s,
- bdn);
+ rdev->bdev);
} else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) {
/* Oh, no!!! */
set_bad = 1;
pr_warn_ratelimited(
- "md/raid:%s: read error NOT corrected!! (sector %llu on %s).\n",
+ "md/raid:%s: read error NOT corrected!! (sector %llu on %pg).\n",
mdname(conf->mddev),
(unsigned long long)s,
- bdn);
+ rdev->bdev);
} else if (atomic_read(&rdev->read_errors)
> conf->max_nr_stripes) {
if (!test_bit(Faulty, &rdev->flags)) {
@@ -2778,8 +2776,8 @@ static void raid5_end_read_request(struct bio * bi)
mdname(conf->mddev),
atomic_read(&rdev->read_errors),
conf->max_nr_stripes);
- pr_warn("md/raid:%s: Too many read errors, failing device %s.\n",
- mdname(conf->mddev), bdn);
+ pr_warn("md/raid:%s: Too many read errors, failing device %pg.\n",
+ mdname(conf->mddev), rdev->bdev);
}
} else
retry = 1;
@@ -2891,13 +2889,12 @@ static void raid5_end_write_request(struct bio *bi)
static void raid5_error(struct mddev *mddev, struct md_rdev *rdev)
{
- char b[BDEVNAME_SIZE];
struct r5conf *conf = mddev->private;
unsigned long flags;
pr_debug("raid456: error called\n");
- pr_crit("md/raid:%s: Disk failure on %s, disabling device.\n",
- mdname(mddev), bdevname(rdev->bdev, b));
+ pr_crit("md/raid:%s: Disk failure on %pg, disabling device.\n",
+ mdname(mddev), rdev->bdev);
spin_lock_irqsave(&conf->device_lock, flags);
set_bit(Faulty, &rdev->flags);
@@ -7359,9 +7356,8 @@ static struct r5conf *setup_conf(struct mddev *mddev)
}
if (test_bit(In_sync, &rdev->flags)) {
- char b[BDEVNAME_SIZE];
- pr_info("md/raid:%s: device %s operational as raid disk %d\n",
- mdname(mddev), bdevname(rdev->bdev, b), raid_disk);
+ pr_info("md/raid:%s: device %pg operational as raid disk %d\n",
+ mdname(mddev), rdev->bdev, raid_disk);
} else if (rdev->saved_raid_disk != raid_disk)
/* Cannot rely on bitmap to complete recovery */
conf->fullsync = 1;
@@ -7877,12 +7873,11 @@ static void print_raid5_conf (struct r5conf *conf)
rcu_read_lock();
for (i = 0; i < conf->raid_disks; i++) {
- char b[BDEVNAME_SIZE];
rdev = rcu_dereference(conf->disks[i].rdev);
if (rdev)
- pr_debug(" disk %d, o:%d, dev:%s\n",
+ pr_debug(" disk %d, o:%d, dev:%pg\n",
i, !test_bit(Faulty, &rdev->flags),
- bdevname(rdev->bdev, b));
+ rdev->bdev);
}
rcu_read_unlock();
}
diff --git a/drivers/media/rc/ati_remote.c b/drivers/media/rc/ati_remote.c
index c12dda73cdd5..3155e876616d 100644
--- a/drivers/media/rc/ati_remote.c
+++ b/drivers/media/rc/ati_remote.c
@@ -773,7 +773,7 @@ static int ati_remote_initialize(struct ati_remote *ati_remote)
/* Set up irq_urb */
pipe = usb_rcvintpipe(udev, ati_remote->endpoint_in->bEndpointAddress);
- maxp = usb_maxpacket(udev, pipe, usb_pipeout(pipe));
+ maxp = usb_maxpacket(udev, pipe);
maxp = (maxp > DATA_BUFSIZE) ? DATA_BUFSIZE : maxp;
usb_fill_int_urb(ati_remote->irq_urb, udev, pipe, ati_remote->inbuf,
@@ -784,7 +784,7 @@ static int ati_remote_initialize(struct ati_remote *ati_remote)
/* Set up out_urb */
pipe = usb_sndintpipe(udev, ati_remote->endpoint_out->bEndpointAddress);
- maxp = usb_maxpacket(udev, pipe, usb_pipeout(pipe));
+ maxp = usb_maxpacket(udev, pipe);
maxp = (maxp > DATA_BUFSIZE) ? DATA_BUFSIZE : maxp;
usb_fill_int_urb(ati_remote->out_urb, udev, pipe, ati_remote->outbuf,
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index 2dc810f5a73f..0834d5f866fd 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -1728,7 +1728,7 @@ static int mceusb_dev_probe(struct usb_interface *intf,
pipe = usb_rcvintpipe(dev, ep_in->bEndpointAddress);
else
pipe = usb_rcvbulkpipe(dev, ep_in->bEndpointAddress);
- maxp = usb_maxpacket(dev, pipe, usb_pipeout(pipe));
+ maxp = usb_maxpacket(dev, pipe);
ir = kzalloc(sizeof(struct mceusb_dev), GFP_KERNEL);
if (!ir)
diff --git a/drivers/media/rc/streamzap.c b/drivers/media/rc/streamzap.c
index 16ba85d7c090..deb85330c940 100644
--- a/drivers/media/rc/streamzap.c
+++ b/drivers/media/rc/streamzap.c
@@ -307,7 +307,7 @@ static int streamzap_probe(struct usb_interface *intf,
}
pipe = usb_rcvintpipe(usbdev, endpoint->bEndpointAddress);
- maxp = usb_maxpacket(usbdev, pipe, usb_pipeout(pipe));
+ maxp = usb_maxpacket(usbdev, pipe);
if (maxp == 0) {
dev_err(&intf->dev, "%s: endpoint Max Packet Size is 0!?!\n",
diff --git a/drivers/media/rc/xbox_remote.c b/drivers/media/rc/xbox_remote.c
index 98d0b43608ad..7424b2031152 100644
--- a/drivers/media/rc/xbox_remote.c
+++ b/drivers/media/rc/xbox_remote.c
@@ -171,7 +171,7 @@ static int xbox_remote_initialize(struct xbox_remote *xbox_remote,
/* Set up irq_urb */
pipe = usb_rcvintpipe(udev, endpoint_in->bEndpointAddress);
- maxp = usb_maxpacket(udev, pipe, usb_pipeout(pipe));
+ maxp = usb_maxpacket(udev, pipe);
maxp = (maxp > DATA_BUFSIZE) ? DATA_BUFSIZE : maxp;
usb_fill_int_urb(xbox_remote->irq_urb, udev, pipe, xbox_remote->inbuf,
diff --git a/drivers/media/usb/tm6000/tm6000-dvb.c b/drivers/media/usb/tm6000/tm6000-dvb.c
index 8c2725e4105b..ee04973cbf93 100644
--- a/drivers/media/usb/tm6000/tm6000-dvb.c
+++ b/drivers/media/usb/tm6000/tm6000-dvb.c
@@ -120,7 +120,7 @@ static int tm6000_start_stream(struct tm6000_core *dev)
pipe = usb_rcvbulkpipe(dev->udev, dev->bulk_in.endp->desc.bEndpointAddress
& USB_ENDPOINT_NUMBER_MASK);
- size = usb_maxpacket(dev->udev, pipe, usb_pipeout(pipe));
+ size = usb_maxpacket(dev->udev, pipe);
size = size * 15; /* 512 x 8 or 12 or 15 */
dvb->bulk_urb->transfer_buffer = kzalloc(size, GFP_KERNEL);
diff --git a/drivers/media/usb/tm6000/tm6000-input.c b/drivers/media/usb/tm6000/tm6000-input.c
index 84602edf3fe8..5136e9e202f1 100644
--- a/drivers/media/usb/tm6000/tm6000-input.c
+++ b/drivers/media/usb/tm6000/tm6000-input.c
@@ -340,7 +340,7 @@ static int __tm6000_ir_int_start(struct rc_dev *rc)
dev->int_in.endp->desc.bEndpointAddress
& USB_ENDPOINT_NUMBER_MASK);
- size = usb_maxpacket(dev->udev, pipe, usb_pipeout(pipe));
+ size = usb_maxpacket(dev->udev, pipe);
dprintk(1, "IR max size: %d\n", size);
ir->int_urb->transfer_buffer = kzalloc(size, GFP_ATOMIC);
diff --git a/drivers/media/usb/tm6000/tm6000-video.c b/drivers/media/usb/tm6000/tm6000-video.c
index e293f6f3d1bc..d855a19551f3 100644
--- a/drivers/media/usb/tm6000/tm6000-video.c
+++ b/drivers/media/usb/tm6000/tm6000-video.c
@@ -570,7 +570,7 @@ static int tm6000_prepare_isoc(struct tm6000_core *dev)
dev->isoc_in.endp->desc.bEndpointAddress &
USB_ENDPOINT_NUMBER_MASK);
- size = usb_maxpacket(dev->udev, pipe, usb_pipeout(pipe));
+ size = usb_maxpacket(dev->udev, pipe);
if (size > dev->isoc_in.maxsize)
size = dev->isoc_in.maxsize;
diff --git a/drivers/memory/emif.c b/drivers/memory/emif.c
index 6c2a421b86e3..f305643209f0 100644
--- a/drivers/memory/emif.c
+++ b/drivers/memory/emif.c
@@ -630,7 +630,7 @@ static irqreturn_t emif_threaded_isr(int irq, void *dev_id)
dev_emerg(emif->dev, "SDRAM temperature exceeds operating limit.. Needs shut down!!!\n");
/* If we have Power OFF ability, use it, else try restarting */
- if (pm_power_off) {
+ if (kernel_can_power_off()) {
kernel_power_off();
} else {
WARN(1, "FIXME: NO pm_power_off!!! trying restart\n");
diff --git a/drivers/mfd/cros_ec_dev.c b/drivers/mfd/cros_ec_dev.c
index 546feef851ab..596731caf407 100644
--- a/drivers/mfd/cros_ec_dev.c
+++ b/drivers/mfd/cros_ec_dev.c
@@ -114,6 +114,9 @@ static const struct mfd_cell cros_ec_platform_cells[] = {
{ .name = "cros-ec-chardev", },
{ .name = "cros-ec-debugfs", },
{ .name = "cros-ec-sysfs", },
+};
+
+static const struct mfd_cell cros_ec_pchg_cells[] = {
{ .name = "cros-ec-pchg", },
};
@@ -137,6 +140,7 @@ static int ec_device_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct cros_ec_platform *ec_platform = dev_get_platdata(dev);
struct cros_ec_dev *ec = kzalloc(sizeof(*ec), GFP_KERNEL);
+ struct ec_response_pchg_count pchg_count;
int i;
if (!ec)
@@ -243,6 +247,21 @@ static int ec_device_probe(struct platform_device *pdev)
}
/*
+ * The PCHG device cannot be detected by sending EC_FEATURE_GET_CMD, but
+ * it can be detected by querying the number of peripheral chargers.
+ */
+ retval = cros_ec_command(ec->ec_dev, 0, EC_CMD_PCHG_COUNT, NULL, 0,
+ &pchg_count, sizeof(pchg_count));
+ if (retval >= 0 && pchg_count.port_count) {
+ retval = mfd_add_hotplug_devices(ec->dev,
+ cros_ec_pchg_cells,
+ ARRAY_SIZE(cros_ec_pchg_cells));
+ if (retval)
+ dev_warn(ec->dev, "failed to add pchg: %d\n",
+ retval);
+ }
+
+ /*
* The following subdevices cannot be detected by sending the
* EC_FEATURE_GET_CMD to the Embedded Controller device.
*/
diff --git a/drivers/mfd/davinci_voicecodec.c b/drivers/mfd/davinci_voicecodec.c
index e5c8bc998eb4..965820481f1e 100644
--- a/drivers/mfd/davinci_voicecodec.c
+++ b/drivers/mfd/davinci_voicecodec.c
@@ -46,14 +46,12 @@ static int __init davinci_vc_probe(struct platform_device *pdev)
}
clk_enable(davinci_vc->clk);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- fifo_base = (dma_addr_t)res->start;
- davinci_vc->base = devm_ioremap_resource(&pdev->dev, res);
+ davinci_vc->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(davinci_vc->base)) {
ret = PTR_ERR(davinci_vc->base);
goto fail;
}
+ fifo_base = (dma_addr_t)res->start;
davinci_vc->regmap = devm_regmap_init_mmio(&pdev->dev,
davinci_vc->base,
diff --git a/drivers/mfd/hi655x-pmic.c b/drivers/mfd/hi655x-pmic.c
index 6909d075d017..a58e42ddcd0c 100644
--- a/drivers/mfd/hi655x-pmic.c
+++ b/drivers/mfd/hi655x-pmic.c
@@ -9,14 +9,13 @@
* Fei Wang <w.f@huawei.com>
*/
-#include <linux/gpio.h>
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/mfd/core.h>
#include <linux/mfd/hi655x-pmic.h>
#include <linux/module.h>
-#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
@@ -94,7 +93,6 @@ static int hi655x_pmic_probe(struct platform_device *pdev)
int ret;
struct hi655x_pmic *pmic;
struct device *dev = &pdev->dev;
- struct device_node *np = dev->of_node;
void __iomem *base;
pmic = devm_kzalloc(dev, sizeof(*pmic), GFP_KERNEL);
@@ -120,21 +118,12 @@ static int hi655x_pmic_probe(struct platform_device *pdev)
hi655x_local_irq_clear(pmic->regmap);
- pmic->gpio = of_get_named_gpio(np, "pmic-gpios", 0);
- if (!gpio_is_valid(pmic->gpio)) {
- dev_err(dev, "Failed to get the pmic-gpios\n");
- return -ENODEV;
- }
-
- ret = devm_gpio_request_one(dev, pmic->gpio, GPIOF_IN,
- "hi655x_pmic_irq");
- if (ret < 0) {
- dev_err(dev, "Failed to request gpio %d ret = %d\n",
- pmic->gpio, ret);
- return ret;
- }
+ pmic->gpio = devm_gpiod_get_optional(dev, "pmic", GPIOD_IN);
+ if (IS_ERR(pmic->gpio))
+ return dev_err_probe(dev, PTR_ERR(pmic->gpio),
+ "Failed to request hi655x pmic-gpio");
- ret = regmap_add_irq_chip(pmic->regmap, gpio_to_irq(pmic->gpio),
+ ret = regmap_add_irq_chip(pmic->regmap, gpiod_to_irq(pmic->gpio),
IRQF_TRIGGER_LOW | IRQF_NO_SUSPEND, 0,
&hi655x_irq_chip, &pmic->irq_data);
if (ret) {
@@ -149,7 +138,7 @@ static int hi655x_pmic_probe(struct platform_device *pdev)
regmap_irq_get_domain(pmic->irq_data));
if (ret) {
dev_err(dev, "Failed to register device %d\n", ret);
- regmap_del_irq_chip(gpio_to_irq(pmic->gpio), pmic->irq_data);
+ regmap_del_irq_chip(gpiod_to_irq(pmic->gpio), pmic->irq_data);
return ret;
}
@@ -160,7 +149,7 @@ static int hi655x_pmic_remove(struct platform_device *pdev)
{
struct hi655x_pmic *pmic = platform_get_drvdata(pdev);
- regmap_del_irq_chip(gpio_to_irq(pmic->gpio), pmic->irq_data);
+ regmap_del_irq_chip(gpiod_to_irq(pmic->gpio), pmic->irq_data);
mfd_remove_devices(&pdev->dev);
return 0;
}
diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c
index 962ee14c62dd..f7950d2197df 100644
--- a/drivers/mfd/intel-lpss-pci.c
+++ b/drivers/mfd/intel-lpss-pci.c
@@ -319,6 +319,8 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x51c5), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x51c6), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x51c7), (kernel_ulong_t)&bxt_uart_info },
+ { PCI_VDEVICE(INTEL, 0x51d8), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x51d9), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x51e8), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x51e9), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x51ea), (kernel_ulong_t)&bxt_i2c_info },
diff --git a/drivers/mfd/ipaq-micro.c b/drivers/mfd/ipaq-micro.c
index e92eeeb67a98..4cd5ecc72211 100644
--- a/drivers/mfd/ipaq-micro.c
+++ b/drivers/mfd/ipaq-micro.c
@@ -403,7 +403,7 @@ static int __init micro_probe(struct platform_device *pdev)
micro_reset_comm(micro);
irq = platform_get_irq(pdev, 0);
- if (!irq)
+ if (irq < 0)
return -EINVAL;
ret = devm_request_irq(&pdev->dev, irq, micro_serial_isr,
IRQF_SHARED, "ipaq-micro",
diff --git a/drivers/mfd/mt6397-core.c b/drivers/mfd/mt6397-core.c
index bddb40054b9e..1a368ad08f58 100644
--- a/drivers/mfd/mt6397-core.c
+++ b/drivers/mfd/mt6397-core.c
@@ -54,6 +54,13 @@ static const struct resource mt6358_keys_resources[] = {
DEFINE_RES_IRQ_NAMED(MT6358_IRQ_HOMEKEY_R, "homekey_r"),
};
+static const struct resource mt6359_keys_resources[] = {
+ DEFINE_RES_IRQ_NAMED(MT6359_IRQ_PWRKEY, "powerkey"),
+ DEFINE_RES_IRQ_NAMED(MT6359_IRQ_HOMEKEY, "homekey"),
+ DEFINE_RES_IRQ_NAMED(MT6359_IRQ_PWRKEY_R, "powerkey_r"),
+ DEFINE_RES_IRQ_NAMED(MT6359_IRQ_HOMEKEY_R, "homekey_r"),
+};
+
static const struct resource mt6323_keys_resources[] = {
DEFINE_RES_IRQ_NAMED(MT6323_IRQ_STATUS_PWRKEY, "powerkey"),
DEFINE_RES_IRQ_NAMED(MT6323_IRQ_STATUS_FCHRKEY, "homekey"),
@@ -122,6 +129,12 @@ static const struct mfd_cell mt6359_devs[] = {
.of_compatible = "mediatek,mt6358-rtc",
},
{ .name = "mt6359-sound", },
+ {
+ .name = "mtk-pmic-keys",
+ .num_resources = ARRAY_SIZE(mt6359_keys_resources),
+ .resources = mt6359_keys_resources,
+ .of_compatible = "mediatek,mt6359-keys"
+ },
};
static const struct mfd_cell mt6397_devs[] = {
diff --git a/drivers/mfd/rt4831.c b/drivers/mfd/rt4831.c
index b169781ac675..fb3bd788a3eb 100644
--- a/drivers/mfd/rt4831.c
+++ b/drivers/mfd/rt4831.c
@@ -90,9 +90,14 @@ static int rt4831_probe(struct i2c_client *client)
static int rt4831_remove(struct i2c_client *client)
{
struct regmap *regmap = dev_get_regmap(&client->dev, NULL);
+ int ret;
/* Disable WLED and DSV outputs */
- return regmap_update_bits(regmap, RT4831_REG_ENABLE, RT4831_RESET_MASK, RT4831_RESET_MASK);
+ ret = regmap_update_bits(regmap, RT4831_REG_ENABLE, RT4831_RESET_MASK, RT4831_RESET_MASK);
+ if (ret)
+ dev_warn(&client->dev, "Failed to disable outputs (%pe)\n", ERR_PTR(ret));
+
+ return 0;
}
static const struct of_device_id __maybe_unused rt4831_of_match[] = {
diff --git a/drivers/mfd/sprd-sc27xx-spi.c b/drivers/mfd/sprd-sc27xx-spi.c
index 55d2c31bdfb2..d05a47c5187f 100644
--- a/drivers/mfd/sprd-sc27xx-spi.c
+++ b/drivers/mfd/sprd-sc27xx-spi.c
@@ -240,13 +240,14 @@ static int sprd_pmic_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(sprd_pmic_pm_ops, sprd_pmic_suspend, sprd_pmic_resume);
static const struct of_device_id sprd_pmic_match[] = {
- { .compatible = "sprd,sc2731", .data = &sc2731_data },
{ .compatible = "sprd,sc2730", .data = &sc2730_data },
+ { .compatible = "sprd,sc2731", .data = &sc2731_data },
{},
};
MODULE_DEVICE_TABLE(of, sprd_pmic_match);
static const struct spi_device_id sprd_pmic_spi_ids[] = {
+ { .name = "sc2730", .driver_data = (unsigned long)&sc2730_data },
{ .name = "sc2731", .driver_data = (unsigned long)&sc2731_data },
{},
};
diff --git a/drivers/mfd/tc6393xb.c b/drivers/mfd/tc6393xb.c
index 3d5b14c60e20..0be5731685b4 100644
--- a/drivers/mfd/tc6393xb.c
+++ b/drivers/mfd/tc6393xb.c
@@ -22,6 +22,8 @@
#include <linux/mfd/tmio.h>
#include <linux/mfd/tc6393xb.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/machine.h>
+#include <linux/gpio/consumer.h>
#include <linux/slab.h>
#define SCR_REVID 0x08 /* b Revision ID */
@@ -87,8 +89,10 @@
struct tc6393xb {
void __iomem *scr;
+ struct device *dev;
struct gpio_chip gpio;
+ struct gpio_desc *vcc_on;
struct clk *clk; /* 3,6 Mhz */
@@ -497,17 +501,93 @@ static int tc6393xb_gpio_direction_output(struct gpio_chip *chip,
return 0;
}
-static int tc6393xb_register_gpio(struct tc6393xb *tc6393xb, int gpio_base)
+/*
+ * TC6393XB GPIOs as used on TOSA, are the only user of this chip.
+ * GPIOs 2, 5, 8 and 13 are not connected.
+ */
+#define TOSA_GPIO_TG_ON 0
+#define TOSA_GPIO_L_MUTE 1
+#define TOSA_GPIO_BL_C20MA 3
+#define TOSA_GPIO_CARD_VCC_ON 4
+#define TOSA_GPIO_CHARGE_OFF 6
+#define TOSA_GPIO_CHARGE_OFF_JC 7
+#define TOSA_GPIO_BAT0_V_ON 9
+#define TOSA_GPIO_BAT1_V_ON 10
+#define TOSA_GPIO_BU_CHRG_ON 11
+#define TOSA_GPIO_BAT_SW_ON 12
+#define TOSA_GPIO_BAT0_TH_ON 14
+#define TOSA_GPIO_BAT1_TH_ON 15
+
+
+GPIO_LOOKUP_SINGLE(tosa_lcd_gpio_lookup, "spi2.0", "tc6393xb",
+ TOSA_GPIO_TG_ON, "tg #pwr", GPIO_ACTIVE_HIGH);
+
+GPIO_LOOKUP_SINGLE(tosa_lcd_bl_gpio_lookup, "i2c-tos-bl", "tc6393xb",
+ TOSA_GPIO_BL_C20MA, "backlight", GPIO_ACTIVE_HIGH);
+
+GPIO_LOOKUP_SINGLE(tosa_audio_gpio_lookup, "tosa-audio", "tc6393xb",
+ TOSA_GPIO_L_MUTE, NULL, GPIO_ACTIVE_HIGH);
+
+static struct gpiod_lookup_table tosa_battery_gpio_lookup = {
+ .dev_id = "wm97xx-battery",
+ .table = {
+ GPIO_LOOKUP("tc6393xb", TOSA_GPIO_CHARGE_OFF,
+ "main charge off", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("tc6393xb", TOSA_GPIO_CHARGE_OFF_JC,
+ "jacket charge off", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("tc6393xb", TOSA_GPIO_BAT0_V_ON,
+ "main battery", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("tc6393xb", TOSA_GPIO_BAT1_V_ON,
+ "jacket battery", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("tc6393xb", TOSA_GPIO_BU_CHRG_ON,
+ "backup battery", GPIO_ACTIVE_HIGH),
+ /* BAT1 and BAT0 thermistors appear to be swapped */
+ GPIO_LOOKUP("tc6393xb", TOSA_GPIO_BAT1_TH_ON,
+ "main battery temp", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("tc6393xb", TOSA_GPIO_BAT0_TH_ON,
+ "jacket battery temp", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("tc6393xb", TOSA_GPIO_BAT_SW_ON,
+ "battery switch", GPIO_ACTIVE_HIGH),
+ { },
+ },
+};
+
+static struct gpiod_lookup_table *tc6393xb_gpio_lookups[] = {
+ &tosa_lcd_gpio_lookup,
+ &tosa_lcd_bl_gpio_lookup,
+ &tosa_audio_gpio_lookup,
+ &tosa_battery_gpio_lookup,
+};
+
+static int tc6393xb_register_gpio(struct tc6393xb *tc6393xb)
{
- tc6393xb->gpio.label = "tc6393xb";
- tc6393xb->gpio.base = gpio_base;
- tc6393xb->gpio.ngpio = 16;
- tc6393xb->gpio.set = tc6393xb_gpio_set;
- tc6393xb->gpio.get = tc6393xb_gpio_get;
- tc6393xb->gpio.direction_input = tc6393xb_gpio_direction_input;
- tc6393xb->gpio.direction_output = tc6393xb_gpio_direction_output;
-
- return gpiochip_add_data(&tc6393xb->gpio, tc6393xb);
+ struct gpio_chip *gc = &tc6393xb->gpio;
+ struct device *dev = tc6393xb->dev;
+ int ret;
+
+ gc->label = "tc6393xb";
+ gc->base = -1; /* Dynamic allocation */
+ gc->ngpio = 16;
+ gc->set = tc6393xb_gpio_set;
+ gc->get = tc6393xb_gpio_get;
+ gc->direction_input = tc6393xb_gpio_direction_input;
+ gc->direction_output = tc6393xb_gpio_direction_output;
+
+ ret = devm_gpiochip_add_data(dev, gc, tc6393xb);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to add GPIO chip\n");
+
+ /* Register descriptor look-ups for consumers */
+ gpiod_add_lookup_tables(tc6393xb_gpio_lookups, ARRAY_SIZE(tc6393xb_gpio_lookups));
+
+ /* Request some of our own GPIOs */
+ tc6393xb->vcc_on = gpiochip_request_own_desc(gc, TOSA_GPIO_CARD_VCC_ON, "VCC ON",
+ GPIO_ACTIVE_HIGH, GPIOD_OUT_HIGH);
+ if (IS_ERR(tc6393xb->vcc_on))
+ return dev_err_probe(dev, PTR_ERR(tc6393xb->vcc_on),
+ "failed to request VCC ON GPIO\n");
+
+ return 0;
}
/*--------------------------------------------------------------------------*/
@@ -617,6 +697,7 @@ static int tc6393xb_probe(struct platform_device *dev)
ret = -ENOMEM;
goto err_kzalloc;
}
+ tc6393xb->dev = &dev->dev;
raw_spin_lock_init(&tc6393xb->lock);
@@ -676,22 +757,12 @@ static int tc6393xb_probe(struct platform_device *dev)
tmio_ioread8(tc6393xb->scr + SCR_REVID),
(unsigned long) iomem->start, tc6393xb->irq);
- tc6393xb->gpio.base = -1;
-
- if (tcpd->gpio_base >= 0) {
- ret = tc6393xb_register_gpio(tc6393xb, tcpd->gpio_base);
- if (ret)
- goto err_gpio_add;
- }
+ ret = tc6393xb_register_gpio(tc6393xb);
+ if (ret)
+ goto err_gpio_add;
tc6393xb_attach_irq(dev);
- if (tcpd->setup) {
- ret = tcpd->setup(dev);
- if (ret)
- goto err_setup;
- }
-
tc6393xb_cells[TC6393XB_CELL_NAND].platform_data = tcpd->nand_data;
tc6393xb_cells[TC6393XB_CELL_NAND].pdata_size =
sizeof(*tcpd->nand_data);
@@ -705,15 +776,8 @@ static int tc6393xb_probe(struct platform_device *dev)
if (!ret)
return 0;
- if (tcpd->teardown)
- tcpd->teardown(dev);
-
-err_setup:
tc6393xb_detach_irq(dev);
-
err_gpio_add:
- if (tc6393xb->gpio.base != -1)
- gpiochip_remove(&tc6393xb->gpio);
tcpd->disable(dev);
err_enable:
clk_disable_unprepare(tc6393xb->clk);
@@ -738,14 +802,8 @@ static int tc6393xb_remove(struct platform_device *dev)
mfd_remove_devices(&dev->dev);
- if (tcpd->teardown)
- tcpd->teardown(dev);
-
tc6393xb_detach_irq(dev);
- if (tc6393xb->gpio.base != -1)
- gpiochip_remove(&tc6393xb->gpio);
-
ret = tcpd->disable(dev);
clk_disable_unprepare(tc6393xb->clk);
iounmap(tc6393xb->scr);
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index 289b556dede2..bd6659cf3bc0 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -1036,15 +1036,11 @@ static void clocks_init(struct device *dev,
static int twl_remove(struct i2c_client *client)
{
unsigned i, num_slaves;
- int status;
if (twl_class_is_4030())
- status = twl4030_exit_irq();
+ twl4030_exit_irq();
else
- status = twl6030_exit_irq();
-
- if (status < 0)
- return status;
+ twl6030_exit_irq();
num_slaves = twl_get_num_slaves();
for (i = 0; i < num_slaves; i++) {
diff --git a/drivers/mfd/twl-core.h b/drivers/mfd/twl-core.h
index 6f96c2009a9f..b4bf6a233bd0 100644
--- a/drivers/mfd/twl-core.h
+++ b/drivers/mfd/twl-core.h
@@ -3,9 +3,9 @@
#define __TWL_CORE_H__
extern int twl6030_init_irq(struct device *dev, int irq_num);
-extern int twl6030_exit_irq(void);
+extern void twl6030_exit_irq(void);
extern int twl4030_init_irq(struct device *dev, int irq_num);
-extern int twl4030_exit_irq(void);
+extern void twl4030_exit_irq(void);
extern int twl4030_init_chip_irq(const char *chip);
#endif /* __TWL_CORE_H__ */
diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
index ab417438d1fa..4f576f0160a9 100644
--- a/drivers/mfd/twl4030-irq.c
+++ b/drivers/mfd/twl4030-irq.c
@@ -753,14 +753,11 @@ fail:
return status;
}
-int twl4030_exit_irq(void)
+void twl4030_exit_irq(void)
{
/* FIXME undo twl_init_irq() */
- if (twl4030_irq_base) {
+ if (twl4030_irq_base)
pr_err("twl4030: can't yet clean up IRQs?\n");
- return -ENOSYS;
- }
- return 0;
}
int twl4030_init_chip_irq(const char *chip)
diff --git a/drivers/mfd/twl6030-irq.c b/drivers/mfd/twl6030-irq.c
index 97af6c2a6007..3c03681c124c 100644
--- a/drivers/mfd/twl6030-irq.c
+++ b/drivers/mfd/twl6030-irq.c
@@ -438,7 +438,7 @@ fail_irq:
return status;
}
-int twl6030_exit_irq(void)
+void twl6030_exit_irq(void)
{
if (twl6030_irq && twl6030_irq->twl_irq) {
unregister_pm_notifier(&twl6030_irq->pm_nb);
@@ -453,6 +453,5 @@ int twl6030_exit_irq(void)
* in this module.
*/
}
- return 0;
}
diff --git a/drivers/misc/altera-stapl/altera.c b/drivers/misc/altera-stapl/altera.c
index 92c0611034b0..075f3a36d512 100644
--- a/drivers/misc/altera-stapl/altera.c
+++ b/drivers/misc/altera-stapl/altera.c
@@ -530,11 +530,8 @@ exit_done:
}
break;
case OP_SWP:
- if (altera_check_stack(stack_ptr, 2, &status)) {
- long_tmp = stack[stack_ptr - 2];
- stack[stack_ptr - 2] = stack[stack_ptr - 1];
- stack[stack_ptr - 1] = long_tmp;
- }
+ if (altera_check_stack(stack_ptr, 2, &status))
+ swap(stack[stack_ptr - 2], stack[stack_ptr - 1]);
break;
case OP_ADD:
if (altera_check_stack(stack_ptr, 2, &status)) {
@@ -912,34 +909,22 @@ exit_done:
*/
/* SWP */
- if (altera_check_stack(stack_ptr, 2, &status)) {
- long_tmp = stack[stack_ptr - 2];
- stack[stack_ptr - 2] = stack[stack_ptr - 1];
- stack[stack_ptr - 1] = long_tmp;
- }
+ if (altera_check_stack(stack_ptr, 2, &status))
+ swap(stack[stack_ptr - 2], stack[stack_ptr - 1]);
/* SWPN 7 */
index = 7 + 1;
- if (altera_check_stack(stack_ptr, index, &status)) {
- long_tmp = stack[stack_ptr - index];
- stack[stack_ptr - index] = stack[stack_ptr - 1];
- stack[stack_ptr - 1] = long_tmp;
- }
+ if (altera_check_stack(stack_ptr, index, &status))
+ swap(stack[stack_ptr - index], stack[stack_ptr - 1]);
/* SWP */
- if (altera_check_stack(stack_ptr, 2, &status)) {
- long_tmp = stack[stack_ptr - 2];
- stack[stack_ptr - 2] = stack[stack_ptr - 1];
- stack[stack_ptr - 1] = long_tmp;
- }
+ if (altera_check_stack(stack_ptr, 2, &status))
+ swap(stack[stack_ptr - 2], stack[stack_ptr - 1]);
/* SWPN 6 */
index = 6 + 1;
- if (altera_check_stack(stack_ptr, index, &status)) {
- long_tmp = stack[stack_ptr - index];
- stack[stack_ptr - index] = stack[stack_ptr - 1];
- stack[stack_ptr - 1] = long_tmp;
- }
+ if (altera_check_stack(stack_ptr, index, &status))
+ swap(stack[stack_ptr - index], stack[stack_ptr - 1]);
/* DUPN 8 */
index = 8 + 1;
@@ -950,18 +935,12 @@ exit_done:
/* SWPN 2 */
index = 2 + 1;
- if (altera_check_stack(stack_ptr, index, &status)) {
- long_tmp = stack[stack_ptr - index];
- stack[stack_ptr - index] = stack[stack_ptr - 1];
- stack[stack_ptr - 1] = long_tmp;
- }
+ if (altera_check_stack(stack_ptr, index, &status))
+ swap(stack[stack_ptr - index], stack[stack_ptr - 1]);
/* SWP */
- if (altera_check_stack(stack_ptr, 2, &status)) {
- long_tmp = stack[stack_ptr - 2];
- stack[stack_ptr - 2] = stack[stack_ptr - 1];
- stack[stack_ptr - 1] = long_tmp;
- }
+ if (altera_check_stack(stack_ptr, 2, &status))
+ swap(stack[stack_ptr - 2], stack[stack_ptr - 1]);
/* DUPN 6 */
index = 6 + 1;
@@ -1075,11 +1054,8 @@ exit_done:
* to swap with top element
*/
index = (args[0]) + 1;
- if (altera_check_stack(stack_ptr, index, &status)) {
- long_tmp = stack[stack_ptr - index];
- stack[stack_ptr - index] = stack[stack_ptr - 1];
- stack[stack_ptr - 1] = long_tmp;
- }
+ if (altera_check_stack(stack_ptr, index, &status))
+ swap(stack[stack_ptr - index], stack[stack_ptr - 1]);
break;
case OP_DUPN:
/*
diff --git a/drivers/misc/bcm-vk/bcm_vk_msg.c b/drivers/misc/bcm-vk/bcm_vk_msg.c
index 066b9ef7fcd7..3c081504f38c 100644
--- a/drivers/misc/bcm-vk/bcm_vk_msg.c
+++ b/drivers/misc/bcm-vk/bcm_vk_msg.c
@@ -757,20 +757,19 @@ static struct bcm_vk_wkent *bcm_vk_dequeue_pending(struct bcm_vk *vk,
u16 q_num,
u16 msg_id)
{
- bool found = false;
- struct bcm_vk_wkent *entry;
+ struct bcm_vk_wkent *entry = NULL, *iter;
spin_lock(&chan->pendq_lock);
- list_for_each_entry(entry, &chan->pendq[q_num], node) {
- if (get_msg_id(&entry->to_v_msg[0]) == msg_id) {
- list_del(&entry->node);
- found = true;
+ list_for_each_entry(iter, &chan->pendq[q_num], node) {
+ if (get_msg_id(&iter->to_v_msg[0]) == msg_id) {
+ list_del(&iter->node);
+ entry = iter;
bcm_vk_msgid_bitmap_clear(vk, msg_id, 1);
break;
}
}
spin_unlock(&chan->pendq_lock);
- return ((found) ? entry : NULL);
+ return entry;
}
s32 bcm_to_h_msg_dequeue(struct bcm_vk *vk)
@@ -1010,16 +1009,14 @@ ssize_t bcm_vk_read(struct file *p_file,
miscdev);
struct device *dev = &vk->pdev->dev;
struct bcm_vk_msg_chan *chan = &vk->to_h_msg_chan;
- struct bcm_vk_wkent *entry = NULL;
+ struct bcm_vk_wkent *entry = NULL, *iter;
u32 q_num;
u32 rsp_length;
- bool found = false;
if (!bcm_vk_drv_access_ok(vk))
return -EPERM;
dev_dbg(dev, "Buf count %zu\n", count);
- found = false;
/*
* search through the pendq on the to_h chan, and return only those
@@ -1028,13 +1025,13 @@ ssize_t bcm_vk_read(struct file *p_file,
*/
spin_lock(&chan->pendq_lock);
for (q_num = 0; q_num < chan->q_nr; q_num++) {
- list_for_each_entry(entry, &chan->pendq[q_num], node) {
- if (entry->ctx->idx == ctx->idx) {
+ list_for_each_entry(iter, &chan->pendq[q_num], node) {
+ if (iter->ctx->idx == ctx->idx) {
if (count >=
- (entry->to_h_blks * VK_MSGQ_BLK_SIZE)) {
- list_del(&entry->node);
+ (iter->to_h_blks * VK_MSGQ_BLK_SIZE)) {
+ list_del(&iter->node);
atomic_dec(&ctx->pend_cnt);
- found = true;
+ entry = iter;
} else {
/* buffer not big enough */
rc = -EMSGSIZE;
@@ -1046,7 +1043,7 @@ ssize_t bcm_vk_read(struct file *p_file,
read_loop_exit:
spin_unlock(&chan->pendq_lock);
- if (found) {
+ if (entry) {
/* retrieve the passed down msg_id */
set_msg_id(&entry->to_h_msg[0], entry->usr_msg_id);
rsp_length = entry->to_h_blks * VK_MSGQ_BLK_SIZE;
diff --git a/drivers/misc/cardreader/alcor_pci.c b/drivers/misc/cardreader/alcor_pci.c
index 3f514d77a843..9080f9f150a2 100644
--- a/drivers/misc/cardreader/alcor_pci.c
+++ b/drivers/misc/cardreader/alcor_pci.c
@@ -317,12 +317,15 @@ static int alcor_pci_probe(struct pci_dev *pdev,
ret = mfd_add_devices(&pdev->dev, priv->id, alcor_pci_cells,
ARRAY_SIZE(alcor_pci_cells), NULL, 0, NULL);
if (ret < 0)
- goto error_release_regions;
+ goto error_clear_drvdata;
alcor_pci_aspm_ctrl(priv, 0);
return 0;
+error_clear_drvdata:
+ pci_clear_master(pdev);
+ pci_set_drvdata(pdev, NULL);
error_release_regions:
pci_release_regions(pdev);
error_free_ida:
@@ -343,6 +346,7 @@ static void alcor_pci_remove(struct pci_dev *pdev)
ida_free(&alcor_pci_idr, priv->id);
pci_release_regions(pdev);
+ pci_clear_master(pdev);
pci_set_drvdata(pdev, NULL);
}
diff --git a/drivers/misc/cardreader/rts5261.c b/drivers/misc/cardreader/rts5261.c
index a77585ab0f30..749cc5a46d13 100644
--- a/drivers/misc/cardreader/rts5261.c
+++ b/drivers/misc/cardreader/rts5261.c
@@ -57,40 +57,6 @@ static void rts5261_fill_driving(struct rtsx_pcr *pcr, u8 voltage)
0xFF, driving[drive_sel][2]);
}
-static void rtsx5261_fetch_vendor_settings(struct rtsx_pcr *pcr)
-{
- struct pci_dev *pdev = pcr->pci;
- u32 reg;
-
- /* 0x814~0x817 */
- pci_read_config_dword(pdev, PCR_SETTING_REG2, &reg);
- pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg);
-
- if (!rts5261_vendor_setting_valid(reg)) {
- /* Not support MMC default */
- pcr->extra_caps |= EXTRA_CAPS_NO_MMC;
- pcr_dbg(pcr, "skip fetch vendor setting\n");
- return;
- }
-
- if (!rts5261_reg_check_mmc_support(reg))
- pcr->extra_caps |= EXTRA_CAPS_NO_MMC;
-
- /* TO do: need to add rtd3 function */
- pcr->rtd3_en = rts5261_reg_to_rtd3(reg);
-
- if (rts5261_reg_check_reverse_socket(reg))
- pcr->flags |= PCR_REVERSE_SOCKET;
-
- /* 0x724~0x727 */
- pci_read_config_dword(pdev, PCR_SETTING_REG1, &reg);
- pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
-
- pcr->aspm_en = rts5261_reg_to_aspm(reg);
- pcr->sd30_drive_sel_1v8 = rts5261_reg_to_sd30_drive_sel_1v8(reg);
- pcr->sd30_drive_sel_3v3 = rts5261_reg_to_sd30_drive_sel_3v3(reg);
-}
-
static void rts5261_force_power_down(struct rtsx_pcr *pcr, u8 pm_state, bool runtime)
{
/* Set relink_time to 0 */
@@ -391,11 +357,11 @@ static void rts5261_process_ocp(struct rtsx_pcr *pcr)
}
-static int rts5261_init_from_hw(struct rtsx_pcr *pcr)
+static void rts5261_init_from_hw(struct rtsx_pcr *pcr)
{
struct pci_dev *pdev = pcr->pci;
- int retval;
- u32 lval, i;
+ u32 lval1, lval2, i;
+ u16 setting_reg1, setting_reg2;
u8 valid, efuse_valid, tmp;
rtsx_pci_write_register(pcr, RTS5261_REG_PME_FORCE_CTL,
@@ -418,26 +384,70 @@ static int rts5261_init_from_hw(struct rtsx_pcr *pcr)
efuse_valid = ((tmp & 0x0C) >> 2);
pcr_dbg(pcr, "Load efuse valid: 0x%x\n", efuse_valid);
- if (efuse_valid == 0) {
- retval = pci_read_config_dword(pdev, PCR_SETTING_REG2, &lval);
- if (retval != 0)
- pcr_dbg(pcr, "read 0x814 DW fail\n");
- pcr_dbg(pcr, "DW from 0x814: 0x%x\n", lval);
- /* 0x816 */
- valid = (u8)((lval >> 16) & 0x03);
- pcr_dbg(pcr, "0x816: %d\n", valid);
- }
+ pci_read_config_dword(pdev, PCR_SETTING_REG2, &lval2);
+ pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, lval2);
+ /* 0x816 */
+ valid = (u8)((lval2 >> 16) & 0x03);
+
rtsx_pci_write_register(pcr, RTS5261_REG_PME_FORCE_CTL,
REG_EFUSE_POR, 0);
pcr_dbg(pcr, "Disable efuse por!\n");
- pci_read_config_dword(pdev, PCR_SETTING_REG2, &lval);
- lval = lval & 0x00FFFFFF;
- retval = pci_write_config_dword(pdev, PCR_SETTING_REG2, lval);
- if (retval != 0)
- pcr_dbg(pcr, "write config fail\n");
+ if (efuse_valid == 2 || efuse_valid == 3) {
+ if (valid == 3) {
+ /* Bypass efuse */
+ setting_reg1 = PCR_SETTING_REG1;
+ setting_reg2 = PCR_SETTING_REG2;
+ } else {
+ /* Use efuse data */
+ setting_reg1 = PCR_SETTING_REG4;
+ setting_reg2 = PCR_SETTING_REG5;
+ }
+ } else if (efuse_valid == 0) {
+ // default
+ setting_reg1 = PCR_SETTING_REG1;
+ setting_reg2 = PCR_SETTING_REG2;
+ }
+
+ pci_read_config_dword(pdev, setting_reg2, &lval2);
+ pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", setting_reg2, lval2);
+
+ if (!rts5261_vendor_setting_valid(lval2)) {
+ /* Not support MMC default */
+ pcr->extra_caps |= EXTRA_CAPS_NO_MMC;
+ pcr_dbg(pcr, "skip fetch vendor setting\n");
+ return;
+ }
+
+ if (!rts5261_reg_check_mmc_support(lval2))
+ pcr->extra_caps |= EXTRA_CAPS_NO_MMC;
- return retval;
+ pcr->rtd3_en = rts5261_reg_to_rtd3(lval2);
+
+ if (rts5261_reg_check_reverse_socket(lval2))
+ pcr->flags |= PCR_REVERSE_SOCKET;
+
+ pci_read_config_dword(pdev, setting_reg1, &lval1);
+ pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", setting_reg1, lval1);
+
+ pcr->aspm_en = rts5261_reg_to_aspm(lval1);
+ pcr->sd30_drive_sel_1v8 = rts5261_reg_to_sd30_drive_sel_1v8(lval1);
+ pcr->sd30_drive_sel_3v3 = rts5261_reg_to_sd30_drive_sel_3v3(lval1);
+
+ if (setting_reg1 == PCR_SETTING_REG1) {
+ /* store setting */
+ rtsx_pci_write_register(pcr, 0xFF0C, 0xFF, (u8)(lval1 & 0xFF));
+ rtsx_pci_write_register(pcr, 0xFF0D, 0xFF, (u8)((lval1 >> 8) & 0xFF));
+ rtsx_pci_write_register(pcr, 0xFF0E, 0xFF, (u8)((lval1 >> 16) & 0xFF));
+ rtsx_pci_write_register(pcr, 0xFF0F, 0xFF, (u8)((lval1 >> 24) & 0xFF));
+ rtsx_pci_write_register(pcr, 0xFF10, 0xFF, (u8)(lval2 & 0xFF));
+ rtsx_pci_write_register(pcr, 0xFF11, 0xFF, (u8)((lval2 >> 8) & 0xFF));
+ rtsx_pci_write_register(pcr, 0xFF12, 0xFF, (u8)((lval2 >> 16) & 0xFF));
+
+ pci_write_config_dword(pdev, PCR_SETTING_REG4, lval1);
+ lval2 = lval2 & 0x00FFFFFF;
+ pci_write_config_dword(pdev, PCR_SETTING_REG5, lval2);
+ }
}
static void rts5261_init_from_cfg(struct rtsx_pcr *pcr)
@@ -636,7 +646,6 @@ static void rts5261_set_l1off_cfg_sub_d0(struct rtsx_pcr *pcr, int active)
}
static const struct pcr_ops rts5261_pcr_ops = {
- .fetch_vendor_settings = rtsx5261_fetch_vendor_settings,
.turn_on_led = rts5261_turn_on_led,
.turn_off_led = rts5261_turn_off_led,
.extra_init_hw = rts5261_extra_init_hw,
diff --git a/drivers/misc/cardreader/rtsx_usb.c b/drivers/misc/cardreader/rtsx_usb.c
index 59eda55d92a3..1ef9b61077c4 100644
--- a/drivers/misc/cardreader/rtsx_usb.c
+++ b/drivers/misc/cardreader/rtsx_usb.c
@@ -667,6 +667,7 @@ static int rtsx_usb_probe(struct usb_interface *intf,
return 0;
out_init_fail:
+ usb_set_intfdata(ucr->pusb_intf, NULL);
usb_free_coherent(ucr->pusb_dev, IOBUF_SIZE, ucr->iobuf,
ucr->iobuf_dma);
return ret;
diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c
index b493de962153..d85c56530863 100644
--- a/drivers/misc/cxl/api.c
+++ b/drivers/misc/cxl/api.c
@@ -12,6 +12,7 @@
#include <linux/pseudo_fs.h>
#include <linux/sched/mm.h>
#include <linux/mmu_context.h>
+#include <linux/irqdomain.h>
#include "cxl.h"
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
index 5dc0f6093f9d..7a6dd91987fd 100644
--- a/drivers/misc/cxl/cxl.h
+++ b/drivers/misc/cxl/cxl.h
@@ -25,6 +25,8 @@
extern uint cxl_verbose;
+struct property;
+
#define CXL_TIMEOUT 5
/*
diff --git a/drivers/misc/cxl/cxllib.c b/drivers/misc/cxl/cxllib.c
index 53b919856426..e5fe0a171472 100644
--- a/drivers/misc/cxl/cxllib.c
+++ b/drivers/misc/cxl/cxllib.c
@@ -5,6 +5,7 @@
#include <linux/hugetlb.h>
#include <linux/sched/mm.h>
+#include <asm/opal-api.h>
#include <asm/pnv-pci.h>
#include <misc/cxllib.h>
diff --git a/drivers/misc/cxl/flash.c b/drivers/misc/cxl/flash.c
index 5b93ff51d82a..eee9decc121e 100644
--- a/drivers/misc/cxl/flash.c
+++ b/drivers/misc/cxl/flash.c
@@ -4,6 +4,7 @@
#include <linux/semaphore.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
+#include <linux/of.h>
#include <asm/rtas.h>
#include "cxl.h"
diff --git a/drivers/misc/cxl/guest.c b/drivers/misc/cxl/guest.c
index 9d485c9e3fff..3321c014913c 100644
--- a/drivers/misc/cxl/guest.c
+++ b/drivers/misc/cxl/guest.c
@@ -6,6 +6,8 @@
#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include <linux/delay.h>
+#include <linux/irqdomain.h>
+#include <linux/platform_device.h>
#include "cxl.h"
#include "hcalls.h"
diff --git a/drivers/misc/cxl/irq.c b/drivers/misc/cxl/irq.c
index 4cb829d5d873..5f0e2dcebb34 100644
--- a/drivers/misc/cxl/irq.c
+++ b/drivers/misc/cxl/irq.c
@@ -4,6 +4,7 @@
*/
#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
#include <linux/workqueue.h>
#include <linux/sched.h>
#include <linux/wait.h>
diff --git a/drivers/misc/cxl/main.c b/drivers/misc/cxl/main.c
index 43b312d06e3e..c1fbf6f588f7 100644
--- a/drivers/misc/cxl/main.c
+++ b/drivers/misc/cxl/main.c
@@ -15,6 +15,7 @@
#include <linux/slab.h>
#include <linux/idr.h>
#include <linux/pci.h>
+#include <linux/platform_device.h>
#include <linux/sched/task.h>
#include <asm/cputable.h>
diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c
index 1a7f22836041..50b0c44bb8d7 100644
--- a/drivers/misc/cxl/native.c
+++ b/drivers/misc/cxl/native.c
@@ -11,6 +11,7 @@
#include <linux/mm.h>
#include <linux/uaccess.h>
#include <linux/delay.h>
+#include <linux/irqdomain.h>
#include <asm/synch.h>
#include <asm/switch_to.h>
#include <misc/cxl-base.h>
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index d80ada8cac09..93ebd174d848 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -1606,17 +1606,18 @@ static int fastrpc_req_munmap_impl(struct fastrpc_user *fl,
struct fastrpc_req_munmap *req)
{
struct fastrpc_invoke_args args[1] = { [0] = { 0 } };
- struct fastrpc_buf *buf, *b;
+ struct fastrpc_buf *buf = NULL, *iter, *b;
struct fastrpc_munmap_req_msg req_msg;
struct device *dev = fl->sctx->dev;
int err;
u32 sc;
spin_lock(&fl->lock);
- list_for_each_entry_safe(buf, b, &fl->mmaps, node) {
- if ((buf->raddr == req->vaddrout) && (buf->size == req->size))
+ list_for_each_entry_safe(iter, b, &fl->mmaps, node) {
+ if ((iter->raddr == req->vaddrout) && (iter->size == req->size)) {
+ buf = iter;
break;
- buf = NULL;
+ }
}
spin_unlock(&fl->lock);
@@ -1747,17 +1748,18 @@ err_invoke:
static int fastrpc_req_mem_unmap_impl(struct fastrpc_user *fl, struct fastrpc_mem_unmap *req)
{
struct fastrpc_invoke_args args[1] = { [0] = { 0 } };
- struct fastrpc_map *map = NULL, *m;
+ struct fastrpc_map *map = NULL, *iter, *m;
struct fastrpc_mem_unmap_req_msg req_msg = { 0 };
int err = 0;
u32 sc;
struct device *dev = fl->sctx->dev;
spin_lock(&fl->lock);
- list_for_each_entry_safe(map, m, &fl->maps, node) {
- if ((req->fd < 0 || map->fd == req->fd) && (map->raddr == req->vaddr))
+ list_for_each_entry_safe(iter, m, &fl->maps, node) {
+ if ((req->fd < 0 || iter->fd == req->fd) && (iter->raddr == req->vaddr)) {
+ map = iter;
break;
- map = NULL;
+ }
}
spin_unlock(&fl->lock);
diff --git a/drivers/misc/habanalabs/common/Makefile b/drivers/misc/habanalabs/common/Makefile
index 6ebe3c7001ff..934a3a4aedc9 100644
--- a/drivers/misc/habanalabs/common/Makefile
+++ b/drivers/misc/habanalabs/common/Makefile
@@ -11,4 +11,4 @@ HL_COMMON_FILES := common/habanalabs_drv.o common/device.o common/context.o \
common/command_buffer.o common/hw_queue.o common/irq.o \
common/sysfs.o common/hwmon.o common/memory.o \
common/command_submission.o common/firmware_if.o \
- common/state_dump.o
+ common/state_dump.o common/memory_mgr.o
diff --git a/drivers/misc/habanalabs/common/command_buffer.c b/drivers/misc/habanalabs/common/command_buffer.c
index a507110f6443..e13b2b39c058 100644
--- a/drivers/misc/habanalabs/common/command_buffer.c
+++ b/drivers/misc/habanalabs/common/command_buffer.c
@@ -160,24 +160,6 @@ static void cb_do_release(struct hl_device *hdev, struct hl_cb *cb)
}
}
-static void cb_release(struct kref *ref)
-{
- struct hl_device *hdev;
- struct hl_cb *cb;
-
- cb = container_of(ref, struct hl_cb, refcount);
- hdev = cb->hdev;
-
- hl_debugfs_remove_cb(cb);
-
- if (cb->is_mmu_mapped)
- cb_unmap_mem(cb->ctx, cb);
-
- hl_ctx_put(cb->ctx);
-
- cb_do_release(hdev, cb);
-}
-
static struct hl_cb *hl_cb_alloc(struct hl_device *hdev, u32 cb_size,
int ctx_id, bool internal_cb)
{
@@ -238,168 +220,175 @@ static struct hl_cb *hl_cb_alloc(struct hl_device *hdev, u32 cb_size,
return cb;
}
-int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr,
- struct hl_ctx *ctx, u32 cb_size, bool internal_cb,
- bool map_cb, u64 *handle)
+struct hl_cb_mmap_mem_alloc_args {
+ struct hl_device *hdev;
+ struct hl_ctx *ctx;
+ u32 cb_size;
+ bool internal_cb;
+ bool map_cb;
+};
+
+static void hl_cb_mmap_mem_release(struct hl_mmap_mem_buf *buf)
{
- struct hl_cb *cb;
- bool alloc_new_cb = true;
- int rc, ctx_id = ctx->asid;
+ struct hl_cb *cb = buf->private;
- /*
- * Can't use generic function to check this because of special case
- * where we create a CB as part of the reset process
- */
- if ((hdev->disabled) || (hdev->reset_info.in_reset && (ctx_id != HL_KERNEL_ASID_ID))) {
- dev_warn_ratelimited(hdev->dev,
- "Device is disabled or in reset. Can't create new CBs\n");
- rc = -EBUSY;
- goto out_err;
- }
+ hl_debugfs_remove_cb(cb);
- if (cb_size > SZ_2M) {
- dev_err(hdev->dev, "CB size %d must be less than %d\n",
- cb_size, SZ_2M);
- rc = -EINVAL;
- goto out_err;
- }
+ if (cb->is_mmu_mapped)
+ cb_unmap_mem(cb->ctx, cb);
+
+ hl_ctx_put(cb->ctx);
- if (!internal_cb) {
+ cb_do_release(cb->hdev, cb);
+}
+
+static int hl_cb_mmap_mem_alloc(struct hl_mmap_mem_buf *buf, gfp_t gfp, void *args)
+{
+ struct hl_cb_mmap_mem_alloc_args *cb_args = args;
+ struct hl_cb *cb;
+ int rc, ctx_id = cb_args->ctx->asid;
+ bool alloc_new_cb = true;
+
+ if (!cb_args->internal_cb) {
/* Minimum allocation must be PAGE SIZE */
- if (cb_size < PAGE_SIZE)
- cb_size = PAGE_SIZE;
+ if (cb_args->cb_size < PAGE_SIZE)
+ cb_args->cb_size = PAGE_SIZE;
if (ctx_id == HL_KERNEL_ASID_ID &&
- cb_size <= hdev->asic_prop.cb_pool_cb_size) {
+ cb_args->cb_size <= cb_args->hdev->asic_prop.cb_pool_cb_size) {
- spin_lock(&hdev->cb_pool_lock);
- if (!list_empty(&hdev->cb_pool)) {
- cb = list_first_entry(&hdev->cb_pool,
+ spin_lock(&cb_args->hdev->cb_pool_lock);
+ if (!list_empty(&cb_args->hdev->cb_pool)) {
+ cb = list_first_entry(&cb_args->hdev->cb_pool,
typeof(*cb), pool_list);
list_del(&cb->pool_list);
- spin_unlock(&hdev->cb_pool_lock);
+ spin_unlock(&cb_args->hdev->cb_pool_lock);
alloc_new_cb = false;
} else {
- spin_unlock(&hdev->cb_pool_lock);
- dev_dbg(hdev->dev, "CB pool is empty\n");
+ spin_unlock(&cb_args->hdev->cb_pool_lock);
+ dev_dbg(cb_args->hdev->dev, "CB pool is empty\n");
}
}
}
if (alloc_new_cb) {
- cb = hl_cb_alloc(hdev, cb_size, ctx_id, internal_cb);
- if (!cb) {
- rc = -ENOMEM;
- goto out_err;
- }
+ cb = hl_cb_alloc(cb_args->hdev, cb_args->cb_size, ctx_id, cb_args->internal_cb);
+ if (!cb)
+ return -ENOMEM;
}
- cb->hdev = hdev;
- cb->ctx = ctx;
- hl_ctx_get(hdev, cb->ctx);
+ cb->hdev = cb_args->hdev;
+ cb->ctx = cb_args->ctx;
+ cb->buf = buf;
+ cb->buf->mappable_size = cb->size;
+ cb->buf->private = cb;
+
+ hl_ctx_get(cb->ctx);
- if (map_cb) {
+ if (cb_args->map_cb) {
if (ctx_id == HL_KERNEL_ASID_ID) {
- dev_err(hdev->dev,
+ dev_err(cb_args->hdev->dev,
"CB mapping is not supported for kernel context\n");
rc = -EINVAL;
goto release_cb;
}
- rc = cb_map_mem(ctx, cb);
+ rc = cb_map_mem(cb_args->ctx, cb);
if (rc)
goto release_cb;
}
- spin_lock(&mgr->cb_lock);
- rc = idr_alloc(&mgr->cb_handles, cb, 1, 0, GFP_ATOMIC);
- spin_unlock(&mgr->cb_lock);
-
- if (rc < 0) {
- dev_err(hdev->dev, "Failed to allocate IDR for a new CB\n");
- goto unmap_mem;
- }
-
- cb->id = (u64) rc;
-
- kref_init(&cb->refcount);
- spin_lock_init(&cb->lock);
-
- /*
- * idr is 32-bit so we can safely OR it with a mask that is above
- * 32 bit
- */
- *handle = cb->id | HL_MMAP_TYPE_CB;
- *handle <<= PAGE_SHIFT;
-
hl_debugfs_add_cb(cb);
return 0;
-unmap_mem:
- if (cb->is_mmu_mapped)
- cb_unmap_mem(cb->ctx, cb);
release_cb:
hl_ctx_put(cb->ctx);
- cb_do_release(hdev, cb);
-out_err:
- *handle = 0;
+ cb_do_release(cb_args->hdev, cb);
return rc;
}
-int hl_cb_destroy(struct hl_device *hdev, struct hl_cb_mgr *mgr, u64 cb_handle)
+static int hl_cb_mmap(struct hl_mmap_mem_buf *buf,
+ struct vm_area_struct *vma, void *args)
{
- struct hl_cb *cb;
- u32 handle;
- int rc = 0;
+ struct hl_cb *cb = buf->private;
- /*
- * handle was given to user to do mmap, I need to shift it back to
- * how the idr module gave it to me
- */
- cb_handle >>= PAGE_SHIFT;
- handle = (u32) cb_handle;
+ return cb->hdev->asic_funcs->mmap(cb->hdev, vma, cb->kernel_address,
+ cb->bus_address, cb->size);
+}
- spin_lock(&mgr->cb_lock);
+static struct hl_mmap_mem_buf_behavior cb_behavior = {
+ .topic = "CB",
+ .mem_id = HL_MMAP_TYPE_CB,
+ .alloc = hl_cb_mmap_mem_alloc,
+ .release = hl_cb_mmap_mem_release,
+ .mmap = hl_cb_mmap,
+};
- cb = idr_find(&mgr->cb_handles, handle);
- if (cb) {
- idr_remove(&mgr->cb_handles, handle);
- spin_unlock(&mgr->cb_lock);
- kref_put(&cb->refcount, cb_release);
- } else {
- spin_unlock(&mgr->cb_lock);
- dev_err(hdev->dev,
- "CB destroy failed, no match to handle 0x%x\n", handle);
- rc = -EINVAL;
+int hl_cb_create(struct hl_device *hdev, struct hl_mem_mgr *mmg,
+ struct hl_ctx *ctx, u32 cb_size, bool internal_cb,
+ bool map_cb, u64 *handle)
+{
+ struct hl_cb_mmap_mem_alloc_args args = {
+ .hdev = hdev,
+ .ctx = ctx,
+ .cb_size = cb_size,
+ .internal_cb = internal_cb,
+ .map_cb = map_cb,
+ };
+ struct hl_mmap_mem_buf *buf;
+ int ctx_id = ctx->asid;
+
+ if ((hdev->disabled) || (hdev->reset_info.in_reset && (ctx_id != HL_KERNEL_ASID_ID))) {
+ dev_warn_ratelimited(hdev->dev,
+ "Device is disabled or in reset. Can't create new CBs\n");
+ return -EBUSY;
}
- return rc;
+ if (cb_size > SZ_2M) {
+ dev_err(hdev->dev, "CB size %d must be less than %d\n",
+ cb_size, SZ_2M);
+ return -EINVAL;
+ }
+
+ buf = hl_mmap_mem_buf_alloc(
+ mmg, &cb_behavior,
+ ctx_id == HL_KERNEL_ASID_ID ? GFP_ATOMIC : GFP_KERNEL, &args);
+ if (!buf)
+ return -ENOMEM;
+
+ *handle = buf->handle;
+
+ return 0;
+}
+
+int hl_cb_destroy(struct hl_mem_mgr *mmg, u64 cb_handle)
+{
+ int rc;
+
+ rc = hl_mmap_mem_buf_put_handle(mmg, cb_handle);
+ if (rc < 0)
+ return rc; /* Invalid handle */
+
+ if (rc == 0)
+ dev_dbg(mmg->dev, "CB 0x%llx is destroyed while still in use\n", cb_handle);
+
+ return 0;
}
-static int hl_cb_info(struct hl_device *hdev, struct hl_cb_mgr *mgr,
- u64 cb_handle, u32 flags, u32 *usage_cnt, u64 *device_va)
+static int hl_cb_info(struct hl_mem_mgr *mmg,
+ u64 handle, u32 flags, u32 *usage_cnt, u64 *device_va)
{
struct hl_vm_va_block *va_block;
struct hl_cb *cb;
- u32 handle;
int rc = 0;
- /* The CB handle was given to user to do mmap, so need to shift it back
- * to the value which was allocated by the IDR module.
- */
- cb_handle >>= PAGE_SHIFT;
- handle = (u32) cb_handle;
-
- spin_lock(&mgr->cb_lock);
-
- cb = idr_find(&mgr->cb_handles, handle);
+ cb = hl_cb_get(mmg, handle);
if (!cb) {
- dev_err(hdev->dev,
- "CB info failed, no match to handle 0x%x\n", handle);
- rc = -EINVAL;
- goto out;
+ dev_err(mmg->dev,
+ "CB info failed, no match to handle 0x%llx\n", handle);
+ return -EINVAL;
}
if (flags & HL_CB_FLAGS_GET_DEVICE_VA) {
@@ -407,7 +396,7 @@ static int hl_cb_info(struct hl_device *hdev, struct hl_cb_mgr *mgr,
if (va_block) {
*device_va = va_block->start;
} else {
- dev_err(hdev->dev, "CB is not mapped to the device's MMU\n");
+ dev_err(mmg->dev, "CB is not mapped to the device's MMU\n");
rc = -EINVAL;
goto out;
}
@@ -416,7 +405,7 @@ static int hl_cb_info(struct hl_device *hdev, struct hl_cb_mgr *mgr,
}
out:
- spin_unlock(&mgr->cb_lock);
+ hl_cb_put(cb);
return rc;
}
@@ -444,7 +433,7 @@ int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data)
args->in.cb_size, HL_MAX_CB_SIZE);
rc = -EINVAL;
} else {
- rc = hl_cb_create(hdev, &hpriv->cb_mgr, hpriv->ctx,
+ rc = hl_cb_create(hdev, &hpriv->mem_mgr, hpriv->ctx,
args->in.cb_size, false,
!!(args->in.flags & HL_CB_FLAGS_MAP),
&handle);
@@ -455,12 +444,12 @@ int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data)
break;
case HL_CB_OP_DESTROY:
- rc = hl_cb_destroy(hdev, &hpriv->cb_mgr,
+ rc = hl_cb_destroy(&hpriv->mem_mgr,
args->in.cb_handle);
break;
case HL_CB_OP_INFO:
- rc = hl_cb_info(hdev, &hpriv->cb_mgr, args->in.cb_handle,
+ rc = hl_cb_info(&hpriv->mem_mgr, args->in.cb_handle,
args->in.flags,
&usage_cnt,
&device_va);
@@ -483,163 +472,20 @@ int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data)
return rc;
}
-static void cb_vm_close(struct vm_area_struct *vma)
+struct hl_cb *hl_cb_get(struct hl_mem_mgr *mmg, u64 handle)
{
- struct hl_cb *cb = (struct hl_cb *) vma->vm_private_data;
- long new_mmap_size;
-
- new_mmap_size = cb->mmap_size - (vma->vm_end - vma->vm_start);
-
- if (new_mmap_size > 0) {
- cb->mmap_size = new_mmap_size;
- return;
- }
-
- spin_lock(&cb->lock);
- cb->mmap = false;
- spin_unlock(&cb->lock);
-
- hl_cb_put(cb);
- vma->vm_private_data = NULL;
-}
-
-static const struct vm_operations_struct cb_vm_ops = {
- .close = cb_vm_close
-};
-
-int hl_cb_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma)
-{
- struct hl_device *hdev = hpriv->hdev;
- struct hl_cb *cb;
- u32 handle, user_cb_size;
- int rc;
-
- /* We use the page offset to hold the idr and thus we need to clear
- * it before doing the mmap itself
- */
- handle = vma->vm_pgoff;
- vma->vm_pgoff = 0;
-
- /* reference was taken here */
- cb = hl_cb_get(hdev, &hpriv->cb_mgr, handle);
- if (!cb) {
- dev_err(hdev->dev,
- "CB mmap failed, no match to handle 0x%x\n", handle);
- return -EINVAL;
- }
-
- /* Validation check */
- user_cb_size = vma->vm_end - vma->vm_start;
- if (user_cb_size != ALIGN(cb->size, PAGE_SIZE)) {
- dev_err(hdev->dev,
- "CB mmap failed, mmap size 0x%lx != 0x%x cb size\n",
- vma->vm_end - vma->vm_start, cb->size);
- rc = -EINVAL;
- goto put_cb;
- }
-
- if (!access_ok((void __user *) (uintptr_t) vma->vm_start,
- user_cb_size)) {
- dev_err(hdev->dev,
- "user pointer is invalid - 0x%lx\n",
- vma->vm_start);
-
- rc = -EINVAL;
- goto put_cb;
- }
-
- spin_lock(&cb->lock);
+ struct hl_mmap_mem_buf *buf;
- if (cb->mmap) {
- dev_err(hdev->dev,
- "CB mmap failed, CB already mmaped to user\n");
- rc = -EINVAL;
- goto release_lock;
- }
-
- cb->mmap = true;
-
- spin_unlock(&cb->lock);
-
- vma->vm_ops = &cb_vm_ops;
-
- /*
- * Note: We're transferring the cb reference to
- * vma->vm_private_data here.
- */
-
- vma->vm_private_data = cb;
-
- rc = hdev->asic_funcs->mmap(hdev, vma, cb->kernel_address,
- cb->bus_address, cb->size);
- if (rc) {
- spin_lock(&cb->lock);
- cb->mmap = false;
- goto release_lock;
- }
-
- cb->mmap_size = cb->size;
- vma->vm_pgoff = handle;
-
- return 0;
-
-release_lock:
- spin_unlock(&cb->lock);
-put_cb:
- hl_cb_put(cb);
- return rc;
-}
-
-struct hl_cb *hl_cb_get(struct hl_device *hdev, struct hl_cb_mgr *mgr,
- u32 handle)
-{
- struct hl_cb *cb;
-
- spin_lock(&mgr->cb_lock);
- cb = idr_find(&mgr->cb_handles, handle);
-
- if (!cb) {
- spin_unlock(&mgr->cb_lock);
- dev_warn(hdev->dev,
- "CB get failed, no match to handle 0x%x\n", handle);
+ buf = hl_mmap_mem_buf_get(mmg, handle);
+ if (!buf)
return NULL;
- }
-
- kref_get(&cb->refcount);
-
- spin_unlock(&mgr->cb_lock);
-
- return cb;
+ return buf->private;
}
void hl_cb_put(struct hl_cb *cb)
{
- kref_put(&cb->refcount, cb_release);
-}
-
-void hl_cb_mgr_init(struct hl_cb_mgr *mgr)
-{
- spin_lock_init(&mgr->cb_lock);
- idr_init(&mgr->cb_handles);
-}
-
-void hl_cb_mgr_fini(struct hl_device *hdev, struct hl_cb_mgr *mgr)
-{
- struct hl_cb *cb;
- struct idr *idp;
- u32 id;
-
- idp = &mgr->cb_handles;
-
- idr_for_each_entry(idp, cb, id) {
- if (kref_put(&cb->refcount, cb_release) != 1)
- dev_err(hdev->dev,
- "CB %d for CTX ID %d is still alive\n",
- id, cb->ctx->asid);
- }
-
- idr_destroy(&mgr->cb_handles);
+ hl_mmap_mem_buf_put(cb->buf);
}
struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size,
@@ -649,7 +495,7 @@ struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size,
struct hl_cb *cb;
int rc;
- rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, hdev->kernel_ctx, cb_size,
+ rc = hl_cb_create(hdev, &hdev->kernel_mem_mgr, hdev->kernel_ctx, cb_size,
internal_cb, false, &cb_handle);
if (rc) {
dev_err(hdev->dev,
@@ -657,8 +503,7 @@ struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size,
return NULL;
}
- cb_handle >>= PAGE_SHIFT;
- cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr, (u32) cb_handle);
+ cb = hl_cb_get(&hdev->kernel_mem_mgr, cb_handle);
/* hl_cb_get should never fail here */
if (!cb) {
dev_crit(hdev->dev, "Kernel CB handle invalid 0x%x\n",
@@ -669,7 +514,7 @@ struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size,
return cb;
destroy_cb:
- hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb_handle << PAGE_SHIFT);
+ hl_cb_destroy(&hdev->kernel_mem_mgr, cb_handle);
return NULL;
}
diff --git a/drivers/misc/habanalabs/common/command_submission.c b/drivers/misc/habanalabs/common/command_submission.c
index d93ef9f1c45c..fb30b7de4aab 100644
--- a/drivers/misc/habanalabs/common/command_submission.c
+++ b/drivers/misc/habanalabs/common/command_submission.c
@@ -407,8 +407,7 @@ static void staged_cs_put(struct hl_device *hdev, struct hl_cs *cs)
static void cs_handle_tdr(struct hl_device *hdev, struct hl_cs *cs)
{
- bool next_entry_found = false;
- struct hl_cs *next, *first_cs;
+ struct hl_cs *next = NULL, *iter, *first_cs;
if (!cs_needs_timeout(cs))
return;
@@ -443,13 +442,13 @@ static void cs_handle_tdr(struct hl_device *hdev, struct hl_cs *cs)
spin_lock(&hdev->cs_mirror_lock);
/* queue TDR for next CS */
- list_for_each_entry(next, &hdev->cs_mirror_list, mirror_node)
- if (cs_needs_timeout(next)) {
- next_entry_found = true;
+ list_for_each_entry(iter, &hdev->cs_mirror_list, mirror_node)
+ if (cs_needs_timeout(iter)) {
+ next = iter;
break;
}
- if (next_entry_found && !next->tdr_active) {
+ if (next && !next->tdr_active) {
next->tdr_active = true;
schedule_delayed_work(&next->work_tdr, next->timeout_jiffies);
}
@@ -736,11 +735,10 @@ static void cs_timedout(struct work_struct *work)
hdev = cs->ctx->hdev;
/* Save only the first CS timeout parameters */
- rc = atomic_cmpxchg(&hdev->last_error.cs_write_disable, 0, 1);
+ rc = atomic_cmpxchg(&hdev->last_error.cs_timeout.write_disable, 0, 1);
if (!rc) {
- hdev->last_error.open_dev_timestamp = hdev->last_successful_open_ktime;
- hdev->last_error.cs_timeout_timestamp = ktime_get();
- hdev->last_error.cs_timeout_seq = cs->sequence;
+ hdev->last_error.cs_timeout.timestamp = ktime_get();
+ hdev->last_error.cs_timeout.seq = cs->sequence;
}
switch (cs->type) {
@@ -806,7 +804,7 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
}
/* increment refcnt for context */
- hl_ctx_get(hdev, ctx);
+ hl_ctx_get(ctx);
cs->ctx = ctx;
cs->submitted = false;
@@ -958,9 +956,9 @@ wake_pending_user_interrupt_threads(struct hl_user_interrupt *interrupt)
spin_lock_irqsave(&interrupt->wait_list_lock, flags);
list_for_each_entry_safe(pend, temp, &interrupt->wait_list_head, wait_list_node) {
- if (pend->ts_reg_info.ts_buff) {
+ if (pend->ts_reg_info.buf) {
list_del(&pend->wait_list_node);
- hl_ts_put(pend->ts_reg_info.ts_buff);
+ hl_mmap_mem_buf_put(pend->ts_reg_info.buf);
hl_cb_put(pend->ts_reg_info.cq_cb);
} else {
pend->fence.error = -EIO;
@@ -1072,17 +1070,14 @@ static int validate_queue_index(struct hl_device *hdev,
}
static struct hl_cb *get_cb_from_cs_chunk(struct hl_device *hdev,
- struct hl_cb_mgr *cb_mgr,
+ struct hl_mem_mgr *mmg,
struct hl_cs_chunk *chunk)
{
struct hl_cb *cb;
- u32 cb_handle;
- cb_handle = (u32) (chunk->cb_handle >> PAGE_SHIFT);
-
- cb = hl_cb_get(hdev, cb_mgr, cb_handle);
+ cb = hl_cb_get(mmg, chunk->cb_handle);
if (!cb) {
- dev_err(hdev->dev, "CB handle 0x%x invalid\n", cb_handle);
+ dev_err(hdev->dev, "CB handle 0x%llx invalid\n", chunk->cb_handle);
return NULL;
}
@@ -1344,7 +1339,7 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
}
if (is_kernel_allocated_cb) {
- cb = get_cb_from_cs_chunk(hdev, &hpriv->cb_mgr, chunk);
+ cb = get_cb_from_cs_chunk(hdev, &hpriv->mem_mgr, chunk);
if (!cb) {
atomic64_inc(
&ctx->cs_counters.validation_drop_cnt);
@@ -1772,7 +1767,7 @@ static int cs_ioctl_signal_wait_create_jobs(struct hl_device *hdev,
*/
job->patched_cb = job->user_cb;
job->job_cb_size = job->user_cb_size;
- hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT);
+ hl_cb_destroy(&hdev->kernel_mem_mgr, cb->buf->handle);
/* increment refcount as for external queues we get completion */
cs_get(cs);
@@ -1834,7 +1829,7 @@ static int cs_ioctl_reserve_signals(struct hl_fpriv *hpriv,
handle->count = count;
- hl_ctx_get(hdev, hpriv->ctx);
+ hl_ctx_get(hpriv->ctx);
handle->ctx = hpriv->ctx;
mgr = &hpriv->ctx->sig_mgr;
@@ -2528,7 +2523,7 @@ static int _hl_cs_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
if (timestamp)
*timestamp = 0;
- hl_ctx_get(hdev, ctx);
+ hl_ctx_get(ctx);
fence = hl_ctx_get_fence(ctx, seq);
@@ -2668,7 +2663,7 @@ static int hl_multi_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
{
struct multi_cs_completion *mcs_compl;
struct hl_device *hdev = hpriv->hdev;
- struct multi_cs_data mcs_data = {0};
+ struct multi_cs_data mcs_data = {};
union hl_wait_cs_args *args = data;
struct hl_ctx *ctx = hpriv->ctx;
struct hl_fence **fence_arr;
@@ -2719,7 +2714,7 @@ static int hl_multi_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
mcs_data.fence_arr = fence_arr;
mcs_data.arr_len = seq_arr_len;
- hl_ctx_get(hdev, ctx);
+ hl_ctx_get(ctx);
/* wait (with timeout) for the first CS to be completed */
mcs_data.timeout_jiffies = hl_usecs64_to_jiffies(args->in.timeout_us);
@@ -2868,12 +2863,13 @@ static int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
return 0;
}
-static int ts_buff_get_kernel_ts_record(struct hl_ts_buff *ts_buff,
+static int ts_buff_get_kernel_ts_record(struct hl_mmap_mem_buf *buf,
struct hl_cb *cq_cb,
u64 ts_offset, u64 cq_offset, u64 target_value,
spinlock_t *wait_list_lock,
struct hl_user_pending_interrupt **pend)
{
+ struct hl_ts_buff *ts_buff = buf->private;
struct hl_user_pending_interrupt *requested_offset_record =
(struct hl_user_pending_interrupt *)ts_buff->kernel_buff_address +
ts_offset;
@@ -2885,7 +2881,7 @@ static int ts_buff_get_kernel_ts_record(struct hl_ts_buff *ts_buff,
/* Validate ts_offset not exceeding last max */
if (requested_offset_record > cb_last) {
- dev_err(ts_buff->hdev->dev, "Ts offset exceeds max CB offset(0x%llx)\n",
+ dev_err(buf->mmg->dev, "Ts offset exceeds max CB offset(0x%llx)\n",
(u64)(uintptr_t)cb_last);
return -EINVAL;
}
@@ -2904,18 +2900,21 @@ start_over:
list_del(&requested_offset_record->wait_list_node);
spin_unlock_irqrestore(wait_list_lock, flags);
- hl_ts_put(requested_offset_record->ts_reg_info.ts_buff);
+ hl_mmap_mem_buf_put(requested_offset_record->ts_reg_info.buf);
hl_cb_put(requested_offset_record->ts_reg_info.cq_cb);
- dev_dbg(ts_buff->hdev->dev, "ts node removed from interrupt list now can re-use\n");
+ dev_dbg(buf->mmg->dev,
+ "ts node removed from interrupt list now can re-use\n");
} else {
- dev_dbg(ts_buff->hdev->dev, "ts node in middle of irq handling\n");
+ dev_dbg(buf->mmg->dev,
+ "ts node in middle of irq handling\n");
/* irq handling in the middle give it time to finish */
spin_unlock_irqrestore(wait_list_lock, flags);
usleep_range(1, 10);
if (++iter_counter == MAX_TS_ITER_NUM) {
- dev_err(ts_buff->hdev->dev, "handling registration interrupt took too long!!\n");
+ dev_err(buf->mmg->dev,
+ "handling registration interrupt took too long!!\n");
return -EINVAL;
}
@@ -2927,7 +2926,7 @@ start_over:
/* Fill up the new registration node info */
requested_offset_record->ts_reg_info.in_use = 1;
- requested_offset_record->ts_reg_info.ts_buff = ts_buff;
+ requested_offset_record->ts_reg_info.buf = buf;
requested_offset_record->ts_reg_info.cq_cb = cq_cb;
requested_offset_record->ts_reg_info.timestamp_kernel_addr =
(u64 *) ts_buff->user_buff_address + ts_offset;
@@ -2937,21 +2936,20 @@ start_over:
*pend = requested_offset_record;
- dev_dbg(ts_buff->hdev->dev, "Found available node in TS kernel CB(0x%llx)\n",
+ dev_dbg(buf->mmg->dev, "Found available node in TS kernel CB(0x%llx)\n",
(u64)(uintptr_t)requested_offset_record);
return 0;
}
static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
- struct hl_cb_mgr *cb_mgr, struct hl_ts_mgr *ts_mgr,
+ struct hl_mem_mgr *cb_mmg, struct hl_mem_mgr *mmg,
u64 timeout_us, u64 cq_counters_handle, u64 cq_counters_offset,
u64 target_value, struct hl_user_interrupt *interrupt,
bool register_ts_record, u64 ts_handle, u64 ts_offset,
u32 *status, u64 *timestamp)
{
- u32 cq_patched_handle, ts_patched_handle;
struct hl_user_pending_interrupt *pend;
- struct hl_ts_buff *ts_buff;
+ struct hl_mmap_mem_buf *buf;
struct hl_cb *cq_cb;
unsigned long timeout, flags;
long completion_rc;
@@ -2959,10 +2957,9 @@ static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
timeout = hl_usecs64_to_jiffies(timeout_us);
- hl_ctx_get(hdev, ctx);
+ hl_ctx_get(ctx);
- cq_patched_handle = lower_32_bits(cq_counters_handle >> PAGE_SHIFT);
- cq_cb = hl_cb_get(hdev, cb_mgr, cq_patched_handle);
+ cq_cb = hl_cb_get(cb_mmg, cq_counters_handle);
if (!cq_cb) {
rc = -EINVAL;
goto put_ctx;
@@ -2971,16 +2968,14 @@ static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
if (register_ts_record) {
dev_dbg(hdev->dev, "Timestamp registration: interrupt id: %u, ts offset: %llu, cq_offset: %llu\n",
interrupt->interrupt_id, ts_offset, cq_counters_offset);
-
- ts_patched_handle = lower_32_bits(ts_handle >> PAGE_SHIFT);
- ts_buff = hl_ts_get(hdev, ts_mgr, ts_patched_handle);
- if (!ts_buff) {
+ buf = hl_mmap_mem_buf_get(mmg, ts_handle);
+ if (!buf) {
rc = -EINVAL;
goto put_cq_cb;
}
/* Find first available record */
- rc = ts_buff_get_kernel_ts_record(ts_buff, cq_cb, ts_offset,
+ rc = ts_buff_get_kernel_ts_record(buf, cq_cb, ts_offset,
cq_counters_offset, target_value,
&interrupt->wait_list_lock, &pend);
if (rc)
@@ -3087,7 +3082,7 @@ ts_registration_exit:
return rc;
put_ts_buff:
- hl_ts_put(ts_buff);
+ hl_mmap_mem_buf_put(buf);
put_cq_cb:
hl_cb_put(cq_cb);
put_ctx:
@@ -3111,7 +3106,7 @@ static int _hl_interrupt_wait_ioctl_user_addr(struct hl_device *hdev, struct hl_
timeout = hl_usecs64_to_jiffies(timeout_us);
- hl_ctx_get(hdev, ctx);
+ hl_ctx_get(ctx);
pend = kzalloc(sizeof(*pend), GFP_KERNEL);
if (!pend) {
@@ -3249,7 +3244,7 @@ static int hl_interrupt_wait_ioctl(struct hl_fpriv *hpriv, void *data)
interrupt = &hdev->user_interrupt[interrupt_id - first_interrupt];
if (args->in.flags & HL_WAIT_CS_FLAGS_INTERRUPT_KERNEL_CQ)
- rc = _hl_interrupt_wait_ioctl(hdev, hpriv->ctx, &hpriv->cb_mgr, &hpriv->ts_mem_mgr,
+ rc = _hl_interrupt_wait_ioctl(hdev, hpriv->ctx, &hpriv->mem_mgr, &hpriv->mem_mgr,
args->in.interrupt_timeout_us, args->in.cq_counters_handle,
args->in.cq_counters_offset,
args->in.target, interrupt,
diff --git a/drivers/misc/habanalabs/common/context.c b/drivers/misc/habanalabs/common/context.c
index c6360e33bce8..ed2cfd0c6e99 100644
--- a/drivers/misc/habanalabs/common/context.c
+++ b/drivers/misc/habanalabs/common/context.c
@@ -262,7 +262,7 @@ err_hw_block_mem_fini:
return rc;
}
-void hl_ctx_get(struct hl_device *hdev, struct hl_ctx *ctx)
+void hl_ctx_get(struct hl_ctx *ctx)
{
kref_get(&ctx->refcount);
}
@@ -284,7 +284,7 @@ struct hl_ctx *hl_get_compute_ctx(struct hl_device *hdev)
* immediately once we find him
*/
ctx = hpriv->ctx;
- hl_ctx_get(hdev, ctx);
+ hl_ctx_get(ctx);
break;
}
diff --git a/drivers/misc/habanalabs/common/debugfs.c b/drivers/misc/habanalabs/common/debugfs.c
index f18495545854..c6744bfc6da4 100644
--- a/drivers/misc/habanalabs/common/debugfs.c
+++ b/drivers/misc/habanalabs/common/debugfs.c
@@ -11,6 +11,7 @@
#include <linux/pci.h>
#include <linux/uaccess.h>
#include <linux/vmalloc.h>
+#include <linux/iommu.h>
#define MMU_ADDR_BUF_SIZE 40
#define MMU_ASID_BUF_SIZE 10
@@ -125,9 +126,9 @@ static int command_buffers_show(struct seq_file *s, void *data)
}
seq_printf(s,
" %03llu %d 0x%08x %d %d %d\n",
- cb->id, cb->ctx->asid, cb->size,
- kref_read(&cb->refcount),
- cb->mmap, atomic_read(&cb->cs_cnt));
+ cb->buf->handle, cb->ctx->asid, cb->size,
+ kref_read(&cb->buf->refcount),
+ atomic_read(&cb->buf->mmap), atomic_read(&cb->cs_cnt));
}
spin_unlock(&dev_entry->cb_spinlock);
@@ -369,8 +370,7 @@ static int userptr_lookup_show(struct seq_file *s, void *data)
if (dev_entry->userptr_lookup >= userptr->addr &&
dev_entry->userptr_lookup < userptr->addr + userptr->size) {
total_npages = 0;
- for_each_sg(userptr->sgt->sgl, sg, userptr->sgt->nents,
- i) {
+ for_each_sgtable_dma_sg(userptr->sgt, sg, i) {
npages = hl_get_sg_info(sg, &dma_addr);
sg_start = userptr->addr +
total_npages * PAGE_SIZE;
@@ -538,6 +538,39 @@ static int engines_show(struct seq_file *s, void *data)
return 0;
}
+static ssize_t hl_memory_scrub(struct file *f, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
+ struct hl_device *hdev = entry->hdev;
+ u64 val = entry->memory_scrub_val;
+ int rc;
+
+ if (!hl_device_operational(hdev, NULL)) {
+ dev_warn_ratelimited(hdev->dev, "Can't scrub memory, device is not operational\n");
+ return -EIO;
+ }
+
+ mutex_lock(&hdev->fpriv_list_lock);
+ if (hdev->is_compute_ctx_active) {
+ mutex_unlock(&hdev->fpriv_list_lock);
+ dev_err(hdev->dev, "can't scrub dram, context exist\n");
+ return -EBUSY;
+ }
+ hdev->is_in_dram_scrub = true;
+ mutex_unlock(&hdev->fpriv_list_lock);
+
+ rc = hdev->asic_funcs->scrub_device_dram(hdev, val);
+
+ mutex_lock(&hdev->fpriv_list_lock);
+ hdev->is_in_dram_scrub = false;
+ mutex_unlock(&hdev->fpriv_list_lock);
+
+ if (rc)
+ return rc;
+ return count;
+}
+
static bool hl_is_device_va(struct hl_device *hdev, u64 addr)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
@@ -647,13 +680,105 @@ static int device_va_to_pa(struct hl_device *hdev, u64 virt_addr, u32 size,
return rc;
}
+static int hl_access_dev_mem_by_region(struct hl_device *hdev, u64 addr,
+ u64 *val, enum debugfs_access_type acc_type, bool *found)
+{
+ size_t acc_size = (acc_type == DEBUGFS_READ64 || acc_type == DEBUGFS_WRITE64) ?
+ sizeof(u64) : sizeof(u32);
+ struct pci_mem_region *mem_reg;
+ int i;
+
+ for (i = 0; i < PCI_REGION_NUMBER; i++) {
+ mem_reg = &hdev->pci_mem_region[i];
+ if (!mem_reg->used)
+ continue;
+ if (addr >= mem_reg->region_base &&
+ addr <= mem_reg->region_base + mem_reg->region_size - acc_size) {
+ *found = true;
+ return hdev->asic_funcs->access_dev_mem(hdev, mem_reg, i,
+ addr, val, acc_type);
+ }
+ }
+ return 0;
+}
+
+static void hl_access_host_mem(struct hl_device *hdev, u64 addr, u64 *val,
+ enum debugfs_access_type acc_type)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ u64 offset = prop->device_dma_offset_for_host_access;
+
+ switch (acc_type) {
+ case DEBUGFS_READ32:
+ *val = *(u32 *) phys_to_virt(addr - offset);
+ break;
+ case DEBUGFS_WRITE32:
+ *(u32 *) phys_to_virt(addr - offset) = *val;
+ break;
+ case DEBUGFS_READ64:
+ *val = *(u64 *) phys_to_virt(addr - offset);
+ break;
+ case DEBUGFS_WRITE64:
+ *(u64 *) phys_to_virt(addr - offset) = *val;
+ break;
+ default:
+ dev_err(hdev->dev, "hostmem access-type %d id not supported\n", acc_type);
+ break;
+ }
+}
+
+static int hl_access_mem(struct hl_device *hdev, u64 addr, u64 *val,
+ enum debugfs_access_type acc_type)
+{
+ size_t acc_size = (acc_type == DEBUGFS_READ64 || acc_type == DEBUGFS_WRITE64) ?
+ sizeof(u64) : sizeof(u32);
+ u64 host_start = hdev->asic_prop.host_base_address;
+ u64 host_end = hdev->asic_prop.host_end_address;
+ bool user_address, found = false;
+ int rc;
+
+ user_address = hl_is_device_va(hdev, addr);
+ if (user_address) {
+ rc = device_va_to_pa(hdev, addr, acc_size, &addr);
+ if (rc)
+ return rc;
+ }
+
+ rc = hl_access_dev_mem_by_region(hdev, addr, val, acc_type, &found);
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed reading addr %#llx from dev mem (%d)\n",
+ addr, rc);
+ return rc;
+ }
+
+ if (found)
+ return 0;
+
+ if (!user_address || device_iommu_mapped(&hdev->pdev->dev)) {
+ rc = -EINVAL;
+ goto err;
+ }
+
+ if (addr >= host_start && addr <= host_end - acc_size) {
+ hl_access_host_mem(hdev, addr, val, acc_type);
+ } else {
+ rc = -EINVAL;
+ goto err;
+ }
+
+ return 0;
+err:
+ dev_err(hdev->dev, "invalid addr %#llx\n", addr);
+ return rc;
+}
+
static ssize_t hl_data_read32(struct file *f, char __user *buf,
size_t count, loff_t *ppos)
{
struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
struct hl_device *hdev = entry->hdev;
- u64 addr = entry->addr;
- bool user_address;
+ u64 value64, addr = entry->addr;
char tmp_buf[32];
ssize_t rc;
u32 val;
@@ -666,18 +791,11 @@ static ssize_t hl_data_read32(struct file *f, char __user *buf,
if (*ppos)
return 0;
- user_address = hl_is_device_va(hdev, addr);
- if (user_address) {
- rc = device_va_to_pa(hdev, addr, sizeof(val), &addr);
- if (rc)
- return rc;
- }
-
- rc = hdev->asic_funcs->debugfs_read32(hdev, addr, user_address, &val);
- if (rc) {
- dev_err(hdev->dev, "Failed to read from 0x%010llx\n", addr);
+ rc = hl_access_mem(hdev, addr, &value64, DEBUGFS_READ32);
+ if (rc)
return rc;
- }
+
+ val = value64; /* downcast back to 32 */
sprintf(tmp_buf, "0x%08x\n", val);
return simple_read_from_buffer(buf, count, ppos, tmp_buf,
@@ -689,8 +807,7 @@ static ssize_t hl_data_write32(struct file *f, const char __user *buf,
{
struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
struct hl_device *hdev = entry->hdev;
- u64 addr = entry->addr;
- bool user_address;
+ u64 value64, addr = entry->addr;
u32 value;
ssize_t rc;
@@ -703,19 +820,10 @@ static ssize_t hl_data_write32(struct file *f, const char __user *buf,
if (rc)
return rc;
- user_address = hl_is_device_va(hdev, addr);
- if (user_address) {
- rc = device_va_to_pa(hdev, addr, sizeof(value), &addr);
- if (rc)
- return rc;
- }
-
- rc = hdev->asic_funcs->debugfs_write32(hdev, addr, user_address, value);
- if (rc) {
- dev_err(hdev->dev, "Failed to write 0x%08x to 0x%010llx\n",
- value, addr);
+ value64 = value;
+ rc = hl_access_mem(hdev, addr, &value64, DEBUGFS_WRITE32);
+ if (rc)
return rc;
- }
return count;
}
@@ -726,7 +834,6 @@ static ssize_t hl_data_read64(struct file *f, char __user *buf,
struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
struct hl_device *hdev = entry->hdev;
u64 addr = entry->addr;
- bool user_address;
char tmp_buf[32];
ssize_t rc;
u64 val;
@@ -739,18 +846,9 @@ static ssize_t hl_data_read64(struct file *f, char __user *buf,
if (*ppos)
return 0;
- user_address = hl_is_device_va(hdev, addr);
- if (user_address) {
- rc = device_va_to_pa(hdev, addr, sizeof(val), &addr);
- if (rc)
- return rc;
- }
-
- rc = hdev->asic_funcs->debugfs_read64(hdev, addr, user_address, &val);
- if (rc) {
- dev_err(hdev->dev, "Failed to read from 0x%010llx\n", addr);
+ rc = hl_access_mem(hdev, addr, &val, DEBUGFS_READ64);
+ if (rc)
return rc;
- }
sprintf(tmp_buf, "0x%016llx\n", val);
return simple_read_from_buffer(buf, count, ppos, tmp_buf,
@@ -763,7 +861,6 @@ static ssize_t hl_data_write64(struct file *f, const char __user *buf,
struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
struct hl_device *hdev = entry->hdev;
u64 addr = entry->addr;
- bool user_address;
u64 value;
ssize_t rc;
@@ -776,19 +873,9 @@ static ssize_t hl_data_write64(struct file *f, const char __user *buf,
if (rc)
return rc;
- user_address = hl_is_device_va(hdev, addr);
- if (user_address) {
- rc = device_va_to_pa(hdev, addr, sizeof(value), &addr);
- if (rc)
- return rc;
- }
-
- rc = hdev->asic_funcs->debugfs_write64(hdev, addr, user_address, value);
- if (rc) {
- dev_err(hdev->dev, "Failed to write 0x%016llx to 0x%010llx\n",
- value, addr);
+ rc = hl_access_mem(hdev, addr, &value, DEBUGFS_WRITE64);
+ if (rc)
return rc;
- }
return count;
}
@@ -829,23 +916,67 @@ static ssize_t hl_dma_size_write(struct file *f, const char __user *buf,
}
/* Free the previous allocation, if there was any */
- entry->blob_desc.size = 0;
- vfree(entry->blob_desc.data);
+ entry->data_dma_blob_desc.size = 0;
+ vfree(entry->data_dma_blob_desc.data);
- entry->blob_desc.data = vmalloc(size);
- if (!entry->blob_desc.data)
+ entry->data_dma_blob_desc.data = vmalloc(size);
+ if (!entry->data_dma_blob_desc.data)
return -ENOMEM;
rc = hdev->asic_funcs->debugfs_read_dma(hdev, addr, size,
- entry->blob_desc.data);
+ entry->data_dma_blob_desc.data);
if (rc) {
dev_err(hdev->dev, "Failed to DMA from 0x%010llx\n", addr);
- vfree(entry->blob_desc.data);
- entry->blob_desc.data = NULL;
+ vfree(entry->data_dma_blob_desc.data);
+ entry->data_dma_blob_desc.data = NULL;
+ return -EIO;
+ }
+
+ entry->data_dma_blob_desc.size = size;
+
+ return count;
+}
+
+static ssize_t hl_monitor_dump_trigger(struct file *f, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
+ struct hl_device *hdev = entry->hdev;
+ u32 size, trig;
+ ssize_t rc;
+
+ if (hdev->reset_info.in_reset) {
+ dev_warn_ratelimited(hdev->dev, "Can't dump monitors during reset\n");
+ return 0;
+ }
+ rc = kstrtouint_from_user(buf, count, 10, &trig);
+ if (rc)
+ return rc;
+
+ if (trig != 1) {
+ dev_err(hdev->dev, "Must write 1 to trigger monitor dump\n");
+ return -EINVAL;
+ }
+
+ size = sizeof(struct cpucp_monitor_dump);
+
+ /* Free the previous allocation, if there was any */
+ entry->mon_dump_blob_desc.size = 0;
+ vfree(entry->mon_dump_blob_desc.data);
+
+ entry->mon_dump_blob_desc.data = vmalloc(size);
+ if (!entry->mon_dump_blob_desc.data)
+ return -ENOMEM;
+
+ rc = hdev->asic_funcs->get_monitor_dump(hdev, entry->mon_dump_blob_desc.data);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to dump monitors\n");
+ vfree(entry->mon_dump_blob_desc.data);
+ entry->mon_dump_blob_desc.data = NULL;
return -EIO;
}
- entry->blob_desc.size = size;
+ entry->mon_dump_blob_desc.size = size;
return count;
}
@@ -1218,6 +1349,11 @@ static ssize_t hl_timeout_locked_write(struct file *f, const char __user *buf,
return count;
}
+static const struct file_operations hl_mem_scrub_fops = {
+ .owner = THIS_MODULE,
+ .write = hl_memory_scrub,
+};
+
static const struct file_operations hl_data32b_fops = {
.owner = THIS_MODULE,
.read = hl_data_read32,
@@ -1235,6 +1371,11 @@ static const struct file_operations hl_dma_size_fops = {
.write = hl_dma_size_write
};
+static const struct file_operations hl_monitor_dump_fops = {
+ .owner = THIS_MODULE,
+ .write = hl_monitor_dump_trigger
+};
+
static const struct file_operations hl_i2c_data_fops = {
.owner = THIS_MODULE,
.read = hl_i2c_data_read,
@@ -1350,8 +1491,10 @@ void hl_debugfs_add_device(struct hl_device *hdev)
if (!dev_entry->entry_arr)
return;
- dev_entry->blob_desc.size = 0;
- dev_entry->blob_desc.data = NULL;
+ dev_entry->data_dma_blob_desc.size = 0;
+ dev_entry->data_dma_blob_desc.data = NULL;
+ dev_entry->mon_dump_blob_desc.size = 0;
+ dev_entry->mon_dump_blob_desc.data = NULL;
INIT_LIST_HEAD(&dev_entry->file_list);
INIT_LIST_HEAD(&dev_entry->cb_list);
@@ -1370,6 +1513,17 @@ void hl_debugfs_add_device(struct hl_device *hdev)
dev_entry->root = debugfs_create_dir(dev_name(hdev->dev),
hl_debug_root);
+ debugfs_create_x64("memory_scrub_val",
+ 0644,
+ dev_entry->root,
+ &dev_entry->memory_scrub_val);
+
+ debugfs_create_file("memory_scrub",
+ 0200,
+ dev_entry->root,
+ dev_entry,
+ &hl_mem_scrub_fops);
+
debugfs_create_x64("addr",
0644,
dev_entry->root,
@@ -1470,7 +1624,18 @@ void hl_debugfs_add_device(struct hl_device *hdev)
debugfs_create_blob("data_dma",
0400,
dev_entry->root,
- &dev_entry->blob_desc);
+ &dev_entry->data_dma_blob_desc);
+
+ debugfs_create_file("monitor_dump_trig",
+ 0200,
+ dev_entry->root,
+ dev_entry,
+ &hl_monitor_dump_fops);
+
+ debugfs_create_blob("monitor_dump",
+ 0400,
+ dev_entry->root,
+ &dev_entry->mon_dump_blob_desc);
debugfs_create_x8("skip_reset_on_timeout",
0644,
@@ -1509,7 +1674,8 @@ void hl_debugfs_remove_device(struct hl_device *hdev)
mutex_destroy(&entry->file_mutex);
- vfree(entry->blob_desc.data);
+ vfree(entry->data_dma_blob_desc.data);
+ vfree(entry->mon_dump_blob_desc.data);
for (i = 0; i < ARRAY_SIZE(entry->state_dump); ++i)
vfree(entry->state_dump[i]);
diff --git a/drivers/misc/habanalabs/common/device.c b/drivers/misc/habanalabs/common/device.c
index dc9341a64541..b4f14c6d3970 100644
--- a/drivers/misc/habanalabs/common/device.c
+++ b/drivers/misc/habanalabs/common/device.c
@@ -15,6 +15,182 @@
#define HL_RESET_DELAY_USEC 10000 /* 10ms */
+/*
+ * hl_set_dram_bar- sets the bar to allow later access to address
+ *
+ * @hdev: pointer to habanalabs device structure
+ * @addr: the address the caller wants to access.
+ *
+ * @return: the old BAR base address on success, U64_MAX for failure.
+ * The caller should set it back to the old address after use.
+ *
+ * In case the bar space does not cover the whole address space,
+ * the bar base address should be set to allow access to a given address.
+ * This function can be called also if the bar doesn't need to be set,
+ * in that case it just won't change the base.
+ */
+static uint64_t hl_set_dram_bar(struct hl_device *hdev, u64 addr)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ u64 bar_base_addr;
+
+ bar_base_addr = addr & ~(prop->dram_pci_bar_size - 0x1ull);
+
+ return hdev->asic_funcs->set_dram_bar_base(hdev, bar_base_addr);
+}
+
+
+static int hl_access_sram_dram_region(struct hl_device *hdev, u64 addr, u64 *val,
+ enum debugfs_access_type acc_type, enum pci_region region_type)
+{
+ struct pci_mem_region *region = &hdev->pci_mem_region[region_type];
+ u64 old_base, rc;
+
+ if (region_type == PCI_REGION_DRAM) {
+ old_base = hl_set_dram_bar(hdev, addr);
+ if (old_base == U64_MAX)
+ return -EIO;
+ }
+
+ switch (acc_type) {
+ case DEBUGFS_READ8:
+ *val = readb(hdev->pcie_bar[region->bar_id] +
+ addr - region->region_base + region->offset_in_bar);
+ break;
+ case DEBUGFS_WRITE8:
+ writeb(*val, hdev->pcie_bar[region->bar_id] +
+ addr - region->region_base + region->offset_in_bar);
+ break;
+ case DEBUGFS_READ32:
+ *val = readl(hdev->pcie_bar[region->bar_id] +
+ addr - region->region_base + region->offset_in_bar);
+ break;
+ case DEBUGFS_WRITE32:
+ writel(*val, hdev->pcie_bar[region->bar_id] +
+ addr - region->region_base + region->offset_in_bar);
+ break;
+ case DEBUGFS_READ64:
+ *val = readq(hdev->pcie_bar[region->bar_id] +
+ addr - region->region_base + region->offset_in_bar);
+ break;
+ case DEBUGFS_WRITE64:
+ writeq(*val, hdev->pcie_bar[region->bar_id] +
+ addr - region->region_base + region->offset_in_bar);
+ break;
+ }
+
+ if (region_type == PCI_REGION_DRAM) {
+ rc = hl_set_dram_bar(hdev, old_base);
+ if (rc == U64_MAX)
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int hl_dma_map_sgtable(struct hl_device *hdev, struct sg_table *sgt, enum dma_data_direction dir)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct scatterlist *sg;
+ int rc, i;
+
+ rc = dma_map_sgtable(&hdev->pdev->dev, sgt, dir, 0);
+ if (rc)
+ return rc;
+
+ /* Shift to the device's base physical address of host memory if necessary */
+ if (prop->device_dma_offset_for_host_access)
+ for_each_sgtable_dma_sg(sgt, sg, i)
+ sg->dma_address += prop->device_dma_offset_for_host_access;
+
+ return 0;
+}
+
+void hl_dma_unmap_sgtable(struct hl_device *hdev, struct sg_table *sgt, enum dma_data_direction dir)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct scatterlist *sg;
+ int i;
+
+ /* Cancel the device's base physical address of host memory if necessary */
+ if (prop->device_dma_offset_for_host_access)
+ for_each_sgtable_dma_sg(sgt, sg, i)
+ sg->dma_address -= prop->device_dma_offset_for_host_access;
+
+ dma_unmap_sgtable(&hdev->pdev->dev, sgt, dir, 0);
+}
+
+/*
+ * hl_access_cfg_region - access the config region
+ *
+ * @hdev: pointer to habanalabs device structure
+ * @addr: the address to access
+ * @val: the value to write from or read to
+ * @acc_type: the type of access (read/write 64/32)
+ */
+int hl_access_cfg_region(struct hl_device *hdev, u64 addr, u64 *val,
+ enum debugfs_access_type acc_type)
+{
+ struct pci_mem_region *cfg_region = &hdev->pci_mem_region[PCI_REGION_CFG];
+ u32 val_h, val_l;
+
+ if (!IS_ALIGNED(addr, sizeof(u32))) {
+ dev_err(hdev->dev, "address %#llx not a multiple of %zu\n", addr, sizeof(u32));
+ return -EINVAL;
+ }
+
+ switch (acc_type) {
+ case DEBUGFS_READ32:
+ *val = RREG32(addr - cfg_region->region_base);
+ break;
+ case DEBUGFS_WRITE32:
+ WREG32(addr - cfg_region->region_base, *val);
+ break;
+ case DEBUGFS_READ64:
+ val_l = RREG32(addr - cfg_region->region_base);
+ val_h = RREG32(addr + sizeof(u32) - cfg_region->region_base);
+
+ *val = (((u64) val_h) << 32) | val_l;
+ break;
+ case DEBUGFS_WRITE64:
+ WREG32(addr - cfg_region->region_base, lower_32_bits(*val));
+ WREG32(addr + sizeof(u32) - cfg_region->region_base, upper_32_bits(*val));
+ break;
+ default:
+ dev_err(hdev->dev, "access type %d is not supported\n", acc_type);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+/*
+ * hl_access_dev_mem - access device memory
+ *
+ * @hdev: pointer to habanalabs device structure
+ * @region: the memory region the address belongs to
+ * @region_type: the type of the region the address belongs to
+ * @addr: the address to access
+ * @val: the value to write from or read to
+ * @acc_type: the type of access (r/w, 32/64)
+ */
+int hl_access_dev_mem(struct hl_device *hdev, struct pci_mem_region *region,
+ enum pci_region region_type, u64 addr, u64 *val, enum debugfs_access_type acc_type)
+{
+ switch (region_type) {
+ case PCI_REGION_CFG:
+ return hl_access_cfg_region(hdev, addr, val, acc_type);
+ case PCI_REGION_SRAM:
+ case PCI_REGION_DRAM:
+ return hl_access_sram_dram_region(hdev, addr, val, acc_type,
+ region_type);
+ default:
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
enum hl_device_status hl_device_status(struct hl_device *hdev)
{
enum hl_device_status status;
@@ -107,6 +283,14 @@ static void hpriv_release(struct kref *ref)
hdev->is_compute_ctx_active = false;
mutex_unlock(&hdev->fpriv_list_lock);
+ hdev->compute_ctx_in_release = 0;
+
+ /* release the eventfd */
+ if (hpriv->notifier_event.eventfd)
+ eventfd_ctx_put(hpriv->notifier_event.eventfd);
+
+ mutex_destroy(&hpriv->notifier_event.lock);
+
kfree(hpriv);
}
@@ -146,10 +330,11 @@ static int hl_device_release(struct inode *inode, struct file *filp)
*/
hl_release_pending_user_interrupts(hpriv->hdev);
- hl_cb_mgr_fini(hdev, &hpriv->cb_mgr);
- hl_ts_mgr_fini(hpriv->hdev, &hpriv->ts_mem_mgr);
+ hl_mem_mgr_fini(&hpriv->mem_mgr);
hl_ctx_mgr_fini(hdev, &hpriv->ctx_mgr);
+ hdev->compute_ctx_in_release = 1;
+
if (!hl_hpriv_put(hpriv))
dev_notice(hdev->dev,
"User process closed FD but device still in use\n");
@@ -176,6 +361,11 @@ static int hl_device_release_ctrl(struct inode *inode, struct file *filp)
list_del(&hpriv->dev_node);
mutex_unlock(&hdev->fpriv_ctrl_list_lock);
out:
+ /* release the eventfd */
+ if (hpriv->notifier_event.eventfd)
+ eventfd_ctx_put(hpriv->notifier_event.eventfd);
+
+ mutex_destroy(&hpriv->notifier_event.lock);
put_pid(hpriv->taskpid);
kfree(hpriv);
@@ -204,17 +394,15 @@ static int hl_mmap(struct file *filp, struct vm_area_struct *vma)
}
vm_pgoff = vma->vm_pgoff;
- vma->vm_pgoff = HL_MMAP_OFFSET_VALUE_GET(vm_pgoff);
switch (vm_pgoff & HL_MMAP_TYPE_MASK) {
- case HL_MMAP_TYPE_CB:
- return hl_cb_mmap(hpriv, vma);
-
case HL_MMAP_TYPE_BLOCK:
+ vma->vm_pgoff = HL_MMAP_OFFSET_VALUE_GET(vm_pgoff);
return hl_hw_block_mmap(hpriv, vma);
+ case HL_MMAP_TYPE_CB:
case HL_MMAP_TYPE_TS_BUFF:
- return hl_ts_mmap(hpriv, vma);
+ return hl_mem_mgr_mmap(&hpriv->mem_mgr, vma, NULL);
}
return -EINVAL;
@@ -424,18 +612,25 @@ static int device_early_init(struct hl_device *hdev)
goto free_eq_wq;
}
+ hdev->pf_wq = alloc_workqueue("hl-prefetch", WQ_UNBOUND, 0);
+ if (!hdev->pf_wq) {
+ dev_err(hdev->dev, "Failed to allocate MMU prefetch workqueue\n");
+ rc = -ENOMEM;
+ goto free_ts_free_wq;
+ }
+
hdev->hl_chip_info = kzalloc(sizeof(struct hwmon_chip_info),
GFP_KERNEL);
if (!hdev->hl_chip_info) {
rc = -ENOMEM;
- goto free_ts_free_wq;
+ goto free_pf_wq;
}
rc = hl_mmu_if_set_funcs(hdev);
if (rc)
goto free_chip_info;
- hl_cb_mgr_init(&hdev->kernel_cb_mgr);
+ hl_mem_mgr_init(hdev->dev, &hdev->kernel_mem_mgr);
hdev->device_reset_work.wq =
create_singlethread_workqueue("hl_device_reset");
@@ -464,9 +659,11 @@ static int device_early_init(struct hl_device *hdev)
return 0;
free_cb_mgr:
- hl_cb_mgr_fini(hdev, &hdev->kernel_cb_mgr);
+ hl_mem_mgr_fini(&hdev->kernel_mem_mgr);
free_chip_info:
kfree(hdev->hl_chip_info);
+free_pf_wq:
+ destroy_workqueue(hdev->pf_wq);
free_ts_free_wq:
destroy_workqueue(hdev->ts_free_obj_wq);
free_eq_wq:
@@ -503,10 +700,11 @@ static void device_early_fini(struct hl_device *hdev)
mutex_destroy(&hdev->clk_throttling.lock);
- hl_cb_mgr_fini(hdev, &hdev->kernel_cb_mgr);
+ hl_mem_mgr_fini(&hdev->kernel_mem_mgr);
kfree(hdev->hl_chip_info);
+ destroy_workqueue(hdev->pf_wq);
destroy_workqueue(hdev->ts_free_obj_wq);
destroy_workqueue(hdev->eq_wq);
destroy_workqueue(hdev->device_reset_work.wq);
@@ -703,6 +901,9 @@ static void cleanup_resources(struct hl_device *hdev, bool hard_reset, bool fw_r
/* Go over all the queues, release all CS and their jobs */
hl_cs_rollback_all(hdev, skip_wq_flush);
+ /* flush the MMU prefetch workqueue */
+ flush_workqueue(hdev->pf_wq);
+
/* Release all pending user interrupts, each pending user interrupt
* holds a reference to user context
*/
@@ -847,10 +1048,13 @@ static int device_kill_open_processes(struct hl_device *hdev, u32 timeout, bool
put_task_struct(task);
} else {
- dev_warn(hdev->dev,
- "Can't get task struct for PID so giving up on killing process\n");
- mutex_unlock(fd_lock);
- return -ETIME;
+ /*
+ * If we got here, it means that process was killed from outside the driver
+ * right after it started looping on fd_list and before get_pid_task, thus
+ * we don't need to kill it.
+ */
+ dev_dbg(hdev->dev,
+ "Can't get task struct for user process, assuming process was killed from outside the driver\n");
}
}
@@ -1062,9 +1266,9 @@ do_reset:
if (hard_reset)
dev_info(hdev->dev, "Going to reset device\n");
else if (reset_upon_device_release)
- dev_info(hdev->dev, "Going to reset device after release by user\n");
+ dev_dbg(hdev->dev, "Going to reset device after release by user\n");
else
- dev_info(hdev->dev, "Going to reset engines of inference device\n");
+ dev_dbg(hdev->dev, "Going to reset engines of inference device\n");
}
again:
@@ -1270,7 +1474,10 @@ kill_processes:
hdev->reset_info.needs_reset = false;
- dev_notice(hdev->dev, "Successfully finished resetting the device\n");
+ if (hard_reset)
+ dev_info(hdev->dev, "Successfully finished resetting the device\n");
+ else
+ dev_dbg(hdev->dev, "Successfully finished resetting the device\n");
if (hard_reset) {
hdev->reset_info.hard_reset_cnt++;
@@ -1323,6 +1530,43 @@ out_err:
return rc;
}
+static void hl_notifier_event_send(struct hl_notifier_event *notifier_event, u64 event)
+{
+ mutex_lock(&notifier_event->lock);
+ notifier_event->events_mask |= event;
+ if (notifier_event->eventfd)
+ eventfd_signal(notifier_event->eventfd, 1);
+
+ mutex_unlock(&notifier_event->lock);
+}
+
+/*
+ * hl_notifier_event_send_all - notify all user processes via eventfd
+ *
+ * @hdev: pointer to habanalabs device structure
+ * @event: the occurred event
+ * Returns 0 for success or an error on failure.
+ */
+void hl_notifier_event_send_all(struct hl_device *hdev, u64 event)
+{
+ struct hl_fpriv *hpriv;
+
+ mutex_lock(&hdev->fpriv_list_lock);
+
+ list_for_each_entry(hpriv, &hdev->fpriv_list, dev_node)
+ hl_notifier_event_send(&hpriv->notifier_event, event);
+
+ mutex_unlock(&hdev->fpriv_list_lock);
+
+ /* control device */
+ mutex_lock(&hdev->fpriv_ctrl_list_lock);
+
+ list_for_each_entry(hpriv, &hdev->fpriv_ctrl_list, dev_node)
+ hl_notifier_event_send(&hpriv->notifier_event, event);
+
+ mutex_unlock(&hdev->fpriv_ctrl_list_lock);
+}
+
/*
* hl_device_init - main initialization function for habanalabs device
*
diff --git a/drivers/misc/habanalabs/common/firmware_if.c b/drivers/misc/habanalabs/common/firmware_if.c
index 3262126cc7ca..828a36af5b14 100644
--- a/drivers/misc/habanalabs/common/firmware_if.c
+++ b/drivers/misc/habanalabs/common/firmware_if.c
@@ -18,8 +18,9 @@
static char *extract_fw_ver_from_str(const char *fw_str)
{
char *str, *fw_ver, *whitespace;
+ u32 ver_offset;
- fw_ver = kmalloc(16, GFP_KERNEL);
+ fw_ver = kmalloc(VERSION_MAX_LEN, GFP_KERNEL);
if (!fw_ver)
return NULL;
@@ -29,9 +30,10 @@ static char *extract_fw_ver_from_str(const char *fw_str)
/* Skip the fw- part */
str += 3;
+ ver_offset = str - fw_str;
/* Copy until the next whitespace */
- whitespace = strnstr(str, " ", 15);
+ whitespace = strnstr(str, " ", VERSION_MAX_LEN - ver_offset);
if (!whitespace)
goto free_fw_ver;
@@ -819,6 +821,54 @@ out:
return rc;
}
+int hl_fw_get_monitor_dump(struct hl_device *hdev, void *data)
+{
+ struct cpucp_monitor_dump *mon_dump_cpu_addr;
+ dma_addr_t mon_dump_dma_addr;
+ struct cpucp_packet pkt = {};
+ size_t data_size;
+ __le32 *src_ptr;
+ u32 *dst_ptr;
+ u64 result;
+ int i, rc;
+
+ data_size = sizeof(struct cpucp_monitor_dump);
+ mon_dump_cpu_addr = hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev, data_size,
+ &mon_dump_dma_addr);
+ if (!mon_dump_cpu_addr) {
+ dev_err(hdev->dev,
+ "Failed to allocate DMA memory for CPU-CP monitor-dump packet\n");
+ return -ENOMEM;
+ }
+
+ memset(mon_dump_cpu_addr, 0, data_size);
+
+ pkt.ctl = cpu_to_le32(CPUCP_PACKET_MONITOR_DUMP_GET << CPUCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.addr = cpu_to_le64(mon_dump_dma_addr);
+ pkt.data_max_size = cpu_to_le32(data_size);
+
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
+ HL_CPUCP_MON_DUMP_TIMEOUT_USEC, &result);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to handle CPU-CP monitor-dump packet, error %d\n", rc);
+ goto out;
+ }
+
+ /* result contains the actual size */
+ src_ptr = (__le32 *) mon_dump_cpu_addr;
+ dst_ptr = data;
+ for (i = 0; i < (data_size / sizeof(u32)); i++) {
+ *dst_ptr = le32_to_cpu(*src_ptr);
+ src_ptr++;
+ dst_ptr++;
+ }
+
+out:
+ hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, data_size, mon_dump_cpu_addr);
+
+ return rc;
+}
+
int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev,
struct hl_info_pci_counters *counters)
{
@@ -1539,7 +1589,7 @@ static int hl_fw_dynamic_wait_for_status(struct hl_device *hdev,
le32_to_cpu(dyn_regs->cpu_cmd_status_to_host),
status,
FIELD_GET(COMMS_STATUS_STATUS_MASK, status) == expected_status,
- hdev->fw_poll_interval_usec,
+ hdev->fw_comms_poll_interval_usec,
timeout);
if (rc) {
@@ -1909,7 +1959,7 @@ static int hl_fw_dynamic_request_descriptor(struct hl_device *hdev,
* @fwc: the firmware component
* @fw_version: fw component's version string
*/
-static void hl_fw_dynamic_read_device_fw_version(struct hl_device *hdev,
+static int hl_fw_dynamic_read_device_fw_version(struct hl_device *hdev,
enum hl_fw_component fwc,
const char *fw_version)
{
@@ -1933,23 +1983,33 @@ static void hl_fw_dynamic_read_device_fw_version(struct hl_device *hdev,
VERSION_MAX_LEN);
if (preboot_ver && preboot_ver != prop->preboot_ver) {
strscpy(btl_ver, prop->preboot_ver,
- min((int) (preboot_ver - prop->preboot_ver),
- 31));
+ min((int) (preboot_ver - prop->preboot_ver), 31));
dev_info(hdev->dev, "%s\n", btl_ver);
}
preboot_ver = extract_fw_ver_from_str(prop->preboot_ver);
if (preboot_ver) {
- dev_info(hdev->dev, "preboot version %s\n",
- preboot_ver);
+ char major[8];
+ int rc;
+
+ dev_info(hdev->dev, "preboot version %s\n", preboot_ver);
+ sprintf(major, "%.2s", preboot_ver);
kfree(preboot_ver);
+
+ rc = kstrtou32(major, 10, &hdev->fw_major_version);
+ if (rc) {
+ dev_err(hdev->dev, "Error %d parsing preboot major version\n", rc);
+ return rc;
+ }
}
break;
default:
dev_warn(hdev->dev, "Undefined FW component: %d\n", fwc);
- return;
+ return -EINVAL;
}
+
+ return 0;
}
/**
@@ -2121,9 +2181,10 @@ static int hl_fw_dynamic_load_image(struct hl_device *hdev,
goto release_fw;
/* read preboot version */
- hl_fw_dynamic_read_device_fw_version(hdev, cur_fwc,
+ rc = hl_fw_dynamic_read_device_fw_version(hdev, cur_fwc,
fw_loader->dynamic_loader.comm_desc.cur_fw_ver);
-
+ if (rc)
+ goto release_fw;
/* update state according to boot stage */
if (cur_fwc == FW_COMP_BOOT_FIT) {
@@ -2390,9 +2451,8 @@ static int hl_fw_dynamic_init_cpu(struct hl_device *hdev,
goto protocol_err;
/* read preboot version */
- hl_fw_dynamic_read_device_fw_version(hdev, FW_COMP_PREBOOT,
+ return hl_fw_dynamic_read_device_fw_version(hdev, FW_COMP_PREBOOT,
fw_loader->dynamic_loader.comm_desc.cur_fw_ver);
- return 0;
}
/* load boot fit to FW */
diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h
index 1edaf6ab67bd..b0b0f3f89865 100644
--- a/drivers/misc/habanalabs/common/habanalabs.h
+++ b/drivers/misc/habanalabs/common/habanalabs.h
@@ -21,6 +21,7 @@
#include <linux/hashtable.h>
#include <linux/debugfs.h>
#include <linux/rwsem.h>
+#include <linux/eventfd.h>
#include <linux/bitfield.h>
#include <linux/genalloc.h>
#include <linux/sched/signal.h>
@@ -61,8 +62,10 @@
#define HL_CPUCP_INFO_TIMEOUT_USEC 10000000 /* 10s */
#define HL_CPUCP_EEPROM_TIMEOUT_USEC 10000000 /* 10s */
+#define HL_CPUCP_MON_DUMP_TIMEOUT_USEC 10000000 /* 10s */
#define HL_FW_STATUS_POLL_INTERVAL_USEC 10000 /* 10ms */
+#define HL_FW_COMMS_STATUS_PLDM_POLL_INTERVAL_USEC 1000000 /* 1s */
#define HL_PCI_ELBI_TIMEOUT_MSEC 10 /* 10ms */
@@ -394,18 +397,8 @@ enum hl_device_hw_state {
* struct hl_mmu_properties - ASIC specific MMU address translation properties.
* @start_addr: virtual start address of the memory region.
* @end_addr: virtual end address of the memory region.
- * @hop0_shift: shift of hop 0 mask.
- * @hop1_shift: shift of hop 1 mask.
- * @hop2_shift: shift of hop 2 mask.
- * @hop3_shift: shift of hop 3 mask.
- * @hop4_shift: shift of hop 4 mask.
- * @hop5_shift: shift of hop 5 mask.
- * @hop0_mask: mask to get the PTE address in hop 0.
- * @hop1_mask: mask to get the PTE address in hop 1.
- * @hop2_mask: mask to get the PTE address in hop 2.
- * @hop3_mask: mask to get the PTE address in hop 3.
- * @hop4_mask: mask to get the PTE address in hop 4.
- * @hop5_mask: mask to get the PTE address in hop 5.
+ * @hop_shifts: array holds HOPs shifts.
+ * @hop_masks: array holds HOPs masks.
* @last_mask: mask to get the bit indicating this is the last hop.
* @pgt_size: size for page tables.
* @page_size: default page size used to allocate memory.
@@ -418,18 +411,8 @@ enum hl_device_hw_state {
struct hl_mmu_properties {
u64 start_addr;
u64 end_addr;
- u64 hop0_shift;
- u64 hop1_shift;
- u64 hop2_shift;
- u64 hop3_shift;
- u64 hop4_shift;
- u64 hop5_shift;
- u64 hop0_mask;
- u64 hop1_mask;
- u64 hop2_mask;
- u64 hop3_mask;
- u64 hop4_mask;
- u64 hop5_mask;
+ u64 hop_shifts[MMU_HOP_MAX];
+ u64 hop_masks[MMU_HOP_MAX];
u64 last_mask;
u64 pgt_size;
u32 page_size;
@@ -486,8 +469,10 @@ struct hl_hints_range {
* the device's MMU.
* @dram_hints_align_mask: dram va hint addresses alignment mask which is used
* for hints validity check.
- * device_dma_offset_for_host_access: the offset to add to host DMA addresses
- * to enable the device to access them.
+ * @device_dma_offset_for_host_access: the offset to add to host DMA addresses
+ * to enable the device to access them.
+ * @host_base_address: host physical start address for host DMA from device
+ * @host_end_address: host physical end address for host DMA from device
* @max_freq_value: current max clk frequency.
* @clk_pll_index: clock PLL index that specify which PLL determines the clock
* we display to the user
@@ -528,6 +513,10 @@ struct hl_hints_range {
* @fw_app_cpu_boot_dev_sts1: bitmap representation of application security
* status reported by FW, bit description can be
* found in CPU_BOOT_DEV_STS1
+ * @device_mem_alloc_default_page_size: may be different than dram_page_size only for ASICs for
+ * which the property supports_user_set_page_size is true
+ * (i.e. the DRAM supports multiple page sizes), otherwise
+ * it will shall be equal to dram_page_size.
* @collective_first_sob: first sync object available for collective use
* @collective_first_mon: first monitor available for collective use
* @sync_stream_first_sob: first sync object available for sync stream use
@@ -568,6 +557,7 @@ struct hl_hints_range {
* @configurable_stop_on_err: is stop-on-error option configurable via debugfs.
* @set_max_power_on_device_init: true if need to set max power in F/W on device init.
* @supports_user_set_page_size: true if user can set the allocation page size.
+ * @dma_mask: the dma mask to be set for this device
*/
struct asic_fixed_properties {
struct hw_queue_properties *hw_queues_props;
@@ -599,6 +589,8 @@ struct asic_fixed_properties {
u64 cb_va_end_addr;
u64 dram_hints_align_mask;
u64 device_dma_offset_for_host_access;
+ u64 host_base_address;
+ u64 host_end_address;
u64 max_freq_value;
u32 clk_pll_index;
u32 mmu_pgt_size;
@@ -626,6 +618,7 @@ struct asic_fixed_properties {
u32 fw_bootfit_cpu_boot_dev_sts1;
u32 fw_app_cpu_boot_dev_sts0;
u32 fw_app_cpu_boot_dev_sts1;
+ u32 device_mem_alloc_default_page_size;
u16 collective_first_sob;
u16 collective_first_mon;
u16 sync_stream_first_sob;
@@ -654,6 +647,7 @@ struct asic_fixed_properties {
u8 configurable_stop_on_err;
u8 set_max_power_on_device_init;
u8 supports_user_set_page_size;
+ u8 dma_mask;
};
/**
@@ -711,85 +705,102 @@ struct hl_cs_compl {
*/
/**
- * struct hl_cb_mgr - describes a Command Buffer Manager.
- * @cb_lock: protects cb_handles.
- * @cb_handles: an idr to hold all command buffer handles.
- */
-struct hl_cb_mgr {
- spinlock_t cb_lock;
- struct idr cb_handles; /* protected by cb_lock */
-};
-
-/**
- * struct hl_ts_mgr - describes the timestamp registration memory manager.
- * @ts_lock: protects ts_handles.
- * @ts_handles: an idr to hold all ts bufferes handles.
- */
-struct hl_ts_mgr {
- spinlock_t ts_lock;
- struct idr ts_handles;
-};
-
-/**
* struct hl_ts_buff - describes a timestamp buffer.
- * @refcount: reference counter for usage of the buffer.
- * @hdev: pointer to device this buffer belongs to.
- * @mmap: true if the buff is currently mapped to user.
* @kernel_buff_address: Holds the internal buffer's kernel virtual address.
* @user_buff_address: Holds the user buffer's kernel virtual address.
- * @id: the buffer ID.
- * @mmap_size: Holds the buffer size that was mmaped.
* @kernel_buff_size: Holds the internal kernel buffer size.
- * @user_buff_size: Holds the user buffer size.
*/
struct hl_ts_buff {
- struct kref refcount;
- struct hl_device *hdev;
- atomic_t mmap;
void *kernel_buff_address;
void *user_buff_address;
- u32 id;
- u32 mmap_size;
u32 kernel_buff_size;
- u32 user_buff_size;
+};
+
+struct hl_mmap_mem_buf;
+
+/**
+ * struct hl_mem_mgr - describes unified memory manager for mappable memory chunks.
+ * @dev: back pointer to the owning device
+ * @lock: protects handles
+ * @handles: an idr holding all active handles to the memory buffers in the system.
+ */
+struct hl_mem_mgr {
+ struct device *dev;
+ spinlock_t lock;
+ struct idr handles;
+};
+
+/**
+ * struct hl_mmap_mem_buf_behavior - describes unified memory manager buffer behavior
+ * @topic: string identifier used for logging
+ * @mem_id: memory type identifier, embedded in the handle and used to identify
+ * the memory type by handle.
+ * @alloc: callback executed on buffer allocation, shall allocate the memory,
+ * set it under buffer private, and set mappable size.
+ * @mmap: callback executed on mmap, must map the buffer to vma
+ * @release: callback executed on release, must free the resources used by the buffer
+ */
+struct hl_mmap_mem_buf_behavior {
+ const char *topic;
+ u64 mem_id;
+
+ int (*alloc)(struct hl_mmap_mem_buf *buf, gfp_t gfp, void *args);
+ int (*mmap)(struct hl_mmap_mem_buf *buf, struct vm_area_struct *vma, void *args);
+ void (*release)(struct hl_mmap_mem_buf *buf);
+};
+
+/**
+ * struct hl_mmap_mem_buf - describes a single unified memory buffer
+ * @behavior: buffer behavior
+ * @mmg: back pointer to the unified memory manager
+ * @refcount: reference counter for buffer users
+ * @private: pointer to buffer behavior private data
+ * @mmap: atomic boolean indicating whether or not the buffer is mapped right now
+ * @real_mapped_size: the actual size of buffer mapped, after part of it may be released,
+ * may change at runtime.
+ * @mappable_size: the original mappable size of the buffer, does not change after
+ * the allocation.
+ * @handle: the buffer id in mmg handles store
+ */
+struct hl_mmap_mem_buf {
+ struct hl_mmap_mem_buf_behavior *behavior;
+ struct hl_mem_mgr *mmg;
+ struct kref refcount;
+ void *private;
+ atomic_t mmap;
+ u64 real_mapped_size;
+ u64 mappable_size;
+ u64 handle;
};
/**
* struct hl_cb - describes a Command Buffer.
- * @refcount: reference counter for usage of the CB.
* @hdev: pointer to device this CB belongs to.
* @ctx: pointer to the CB owner's context.
- * @lock: spinlock to protect mmap flows.
+ * @buf: back pointer to the parent mappable memory buffer
* @debugfs_list: node in debugfs list of command buffers.
* @pool_list: node in pool list of command buffers.
* @va_block_list: list of virtual addresses blocks of the CB if it is mapped to
* the device's MMU.
- * @id: the CB's ID.
* @kernel_address: Holds the CB's kernel virtual address.
* @bus_address: Holds the CB's DMA address.
- * @mmap_size: Holds the CB's size that was mmaped.
* @size: holds the CB's size.
* @cs_cnt: holds number of CS that this CB participates in.
- * @mmap: true if the CB is currently mmaped to user.
* @is_pool: true if CB was acquired from the pool, false otherwise.
* @is_internal: internaly allocated
* @is_mmu_mapped: true if the CB is mapped to the device's MMU.
*/
struct hl_cb {
- struct kref refcount;
struct hl_device *hdev;
struct hl_ctx *ctx;
- spinlock_t lock;
+ struct hl_mmap_mem_buf *buf;
struct list_head debugfs_list;
struct list_head pool_list;
struct list_head va_block_list;
- u64 id;
void *kernel_address;
dma_addr_t bus_address;
- u32 mmap_size;
u32 size;
atomic_t cs_cnt;
- u8 mmap;
u8 is_pool;
u8 is_internal;
u8 is_mmu_mapped;
@@ -935,12 +946,12 @@ struct hl_user_interrupt {
* struct timestamp_reg_free_node - holds the timestamp registration free objects node
* @free_objects_node: node in the list free_obj_jobs
* @cq_cb: pointer to cq command buffer to be freed
- * @ts_buff: pointer to timestamp buffer to be freed
+ * @buf: pointer to timestamp buffer to be freed
*/
struct timestamp_reg_free_node {
struct list_head free_objects_node;
struct hl_cb *cq_cb;
- struct hl_ts_buff *ts_buff;
+ struct hl_mmap_mem_buf *buf;
};
/* struct timestamp_reg_work_obj - holds the timestamp registration free objects job
@@ -957,8 +968,8 @@ struct timestamp_reg_work_obj {
};
/* struct timestamp_reg_info - holds the timestamp registration related data.
- * @ts_buff: pointer to the timestamp buffer which include both user/kernel buffers.
- * relevant only when doing timestamps records registration.
+ * @buf: pointer to the timestamp buffer which include both user/kernel buffers.
+ * relevant only when doing timestamps records registration.
* @cq_cb: pointer to CQ counter CB.
* @timestamp_kernel_addr: timestamp handle address, where to set timestamp
* relevant only when doing timestamps records
@@ -969,7 +980,7 @@ struct timestamp_reg_work_obj {
* allocating records dynamically.
*/
struct timestamp_reg_info {
- struct hl_ts_buff *ts_buff;
+ struct hl_mmap_mem_buf *buf;
struct hl_cb *cq_cb;
u64 *timestamp_kernel_addr;
u8 in_use;
@@ -1068,6 +1079,15 @@ enum div_select_defs {
DIV_SEL_DIVIDED_PLL = 3,
};
+enum debugfs_access_type {
+ DEBUGFS_READ8,
+ DEBUGFS_WRITE8,
+ DEBUGFS_READ32,
+ DEBUGFS_WRITE32,
+ DEBUGFS_READ64,
+ DEBUGFS_WRITE64,
+};
+
enum pci_region {
PCI_REGION_CFG,
PCI_REGION_SRAM,
@@ -1229,6 +1249,7 @@ struct fw_load_mgr {
* its implementation is not trivial when the driver
* is loaded in simulation mode (not upstreamed).
* @scrub_device_mem: Scrub device memory given an address and size
+ * @scrub_device_dram: Scrub the dram memory of the device.
* @get_int_queue_base: get the internal queue base address.
* @test_queues: run simple test on all queues for sanity check.
* @asic_dma_pool_zalloc: small DMA allocation of coherent memory from DMA pool.
@@ -1236,18 +1257,14 @@ struct fw_load_mgr {
* @asic_dma_pool_free: free small DMA allocation from pool.
* @cpu_accessible_dma_pool_alloc: allocate CPU PQ packet from DMA pool.
* @cpu_accessible_dma_pool_free: free CPU PQ packet from DMA pool.
- * @hl_dma_unmap_sg: DMA unmap scatter-gather list.
+ * @hl_dma_unmap_sgtable: DMA unmap scatter-gather table.
* @cs_parser: parse Command Submission.
- * @asic_dma_map_sg: DMA map scatter-gather list.
+ * @asic_dma_map_sgtable: DMA map scatter-gather table.
* @get_dma_desc_list_size: get number of LIN_DMA packets required for CB.
* @add_end_of_cb_packets: Add packets to the end of CB, if device requires it.
* @update_eq_ci: update event queue CI.
* @context_switch: called upon ASID context switch.
* @restore_phase_topology: clear all SOBs amd MONs.
- * @debugfs_read32: debug interface for reading u32 from DRAM/SRAM/Host memory.
- * @debugfs_write32: debug interface for writing u32 to DRAM/SRAM/Host memory.
- * @debugfs_read64: debug interface for reading u64 from DRAM/SRAM/Host memory.
- * @debugfs_write64: debug interface for writing u64 to DRAM/SRAM/Host memory.
* @debugfs_read_dma: debug interface for reading up to 2MB from the device's
* internal memory via DMA engine.
* @add_device_attr: add ASIC specific device attributes.
@@ -1257,8 +1274,8 @@ struct fw_load_mgr {
* @write_pte: write MMU page table entry to DRAM.
* @mmu_invalidate_cache: flush MMU STLB host/DRAM cache, either with soft
* (L1 only) or hard (L0 & L1) flush.
- * @mmu_invalidate_cache_range: flush specific MMU STLB cache lines with
- * ASID-VA-size mask.
+ * @mmu_invalidate_cache_range: flush specific MMU STLB cache lines with ASID-VA-size mask.
+ * @mmu_prefetch_cache_range: pre-fetch specific MMU STLB cache lines with ASID-VA-size mask.
* @send_heartbeat: send is-alive packet to CPU-CP and verify response.
* @debug_coresight: perform certain actions on Coresight for debugging.
* @is_device_idle: return true if device is idle, false otherwise.
@@ -1267,6 +1284,7 @@ struct fw_load_mgr {
* @hw_queues_unlock: release H/W queues lock.
* @get_pci_id: retrieve PCI ID.
* @get_eeprom_data: retrieve EEPROM data from F/W.
+ * @get_monitor_dump: retrieve monitor registers dump from F/W.
* @send_cpu_message: send message to F/W. If the message is timedout, the
* driver will eventually reset the device. The timeout can
* be determined by the calling function or it can be 0 and
@@ -1289,8 +1307,6 @@ struct fw_load_mgr {
* @gen_wait_cb: Generate a wait CB.
* @reset_sob: Reset a SOB.
* @reset_sob_group: Reset SOB group
- * @set_dma_mask_from_fw: set the DMA mask in the driver according to the
- * firmware configuration
* @get_device_time: Get the device time.
* @collective_wait_init_cs: Generate collective master/slave packets
* and place them in the relevant cs jobs
@@ -1319,6 +1335,9 @@ struct fw_load_mgr {
* @get_stream_master_qid_arr: get pointer to stream masters QID array
* @is_valid_dram_page_size: return true if page size is supported in device
* memory allocation, otherwise false.
+ * @get_valid_dram_page_orders: get valid device memory allocation page orders
+ * @access_dev_mem: access device memory
+ * @set_dram_bar_base: set the base of the DRAM BAR
*/
struct hl_asic_funcs {
int (*early_init)(struct hl_device *hdev);
@@ -1342,6 +1361,7 @@ struct hl_asic_funcs {
void (*asic_dma_free_coherent)(struct hl_device *hdev, size_t size,
void *cpu_addr, dma_addr_t dma_handle);
int (*scrub_device_mem)(struct hl_device *hdev, u64 addr, u64 size);
+ int (*scrub_device_dram)(struct hl_device *hdev, u64 val);
void* (*get_int_queue_base)(struct hl_device *hdev, u32 queue_id,
dma_addr_t *dma_handle, u16 *queue_len);
int (*test_queues)(struct hl_device *hdev);
@@ -1353,12 +1373,11 @@ struct hl_asic_funcs {
size_t size, dma_addr_t *dma_handle);
void (*cpu_accessible_dma_pool_free)(struct hl_device *hdev,
size_t size, void *vaddr);
- void (*hl_dma_unmap_sg)(struct hl_device *hdev,
- struct scatterlist *sgl, int nents,
+ void (*hl_dma_unmap_sgtable)(struct hl_device *hdev,
+ struct sg_table *sgt,
enum dma_data_direction dir);
int (*cs_parser)(struct hl_device *hdev, struct hl_cs_parser *parser);
- int (*asic_dma_map_sg)(struct hl_device *hdev,
- struct scatterlist *sgl, int nents,
+ int (*asic_dma_map_sgtable)(struct hl_device *hdev, struct sg_table *sgt,
enum dma_data_direction dir);
u32 (*get_dma_desc_list_size)(struct hl_device *hdev,
struct sg_table *sgt);
@@ -1369,14 +1388,6 @@ struct hl_asic_funcs {
void (*update_eq_ci)(struct hl_device *hdev, u32 val);
int (*context_switch)(struct hl_device *hdev, u32 asid);
void (*restore_phase_topology)(struct hl_device *hdev);
- int (*debugfs_read32)(struct hl_device *hdev, u64 addr,
- bool user_address, u32 *val);
- int (*debugfs_write32)(struct hl_device *hdev, u64 addr,
- bool user_address, u32 val);
- int (*debugfs_read64)(struct hl_device *hdev, u64 addr,
- bool user_address, u64 *val);
- int (*debugfs_write64)(struct hl_device *hdev, u64 addr,
- bool user_address, u64 val);
int (*debugfs_read_dma)(struct hl_device *hdev, u64 addr, u32 size,
void *blob_addr);
void (*add_device_attr)(struct hl_device *hdev, struct attribute_group *dev_clk_attr_grp,
@@ -1391,6 +1402,7 @@ struct hl_asic_funcs {
u32 flags);
int (*mmu_invalidate_cache_range)(struct hl_device *hdev, bool is_hard,
u32 flags, u32 asid, u64 va, u64 size);
+ int (*mmu_prefetch_cache_range)(struct hl_ctx *ctx, u32 flags, u32 asid, u64 va, u64 size);
int (*send_heartbeat)(struct hl_device *hdev);
int (*debug_coresight)(struct hl_device *hdev, struct hl_ctx *ctx, void *data);
bool (*is_device_idle)(struct hl_device *hdev, u64 *mask_arr,
@@ -1399,8 +1411,8 @@ struct hl_asic_funcs {
void (*hw_queues_lock)(struct hl_device *hdev);
void (*hw_queues_unlock)(struct hl_device *hdev);
u32 (*get_pci_id)(struct hl_device *hdev);
- int (*get_eeprom_data)(struct hl_device *hdev, void *data,
- size_t max_size);
+ int (*get_eeprom_data)(struct hl_device *hdev, void *data, size_t max_size);
+ int (*get_monitor_dump)(struct hl_device *hdev, void *data);
int (*send_cpu_message)(struct hl_device *hdev, u32 *msg,
u16 len, u32 timeout, u64 *result);
int (*pci_bars_map)(struct hl_device *hdev);
@@ -1421,7 +1433,6 @@ struct hl_asic_funcs {
struct hl_gen_wait_properties *prop);
void (*reset_sob)(struct hl_device *hdev, void *data);
void (*reset_sob_group)(struct hl_device *hdev, u16 sob_group);
- void (*set_dma_mask_from_fw)(struct hl_device *hdev);
u64 (*get_device_time)(struct hl_device *hdev);
int (*collective_wait_init_cs)(struct hl_cs *cs);
int (*collective_wait_create_jobs)(struct hl_device *hdev,
@@ -1445,6 +1456,12 @@ struct hl_asic_funcs {
void (*set_pci_memory_regions)(struct hl_device *hdev);
u32* (*get_stream_master_qid_arr)(void);
bool (*is_valid_dram_page_size)(u32 page_size);
+ int (*mmu_get_real_page_size)(struct hl_device *hdev, struct hl_mmu_properties *mmu_prop,
+ u32 page_size, u32 *real_page_size, bool is_dram_addr);
+ void (*get_valid_dram_page_orders)(struct hl_info_dev_memalloc_page_sizes *info);
+ int (*access_dev_mem)(struct hl_device *hdev, struct pci_mem_region *region,
+ enum pci_region region_type, u64 addr, u64 *val, enum debugfs_access_type acc_type);
+ u64 (*set_dram_bar_base)(struct hl_device *hdev, u64 addr);
};
@@ -1915,6 +1932,18 @@ struct hl_debug_params {
bool enable;
};
+/**
+ * struct hl_notifier_event - holds the notifier data structure
+ * @eventfd: the event file descriptor to raise the notifications
+ * @lock: mutex lock to protect the notifier data flows
+ * @events_mask: indicates the bitmap events
+ */
+struct hl_notifier_event {
+ struct eventfd_ctx *eventfd;
+ struct mutex lock;
+ u64 events_mask;
+};
+
/*
* FILE PRIVATE STRUCTURE
*/
@@ -1926,25 +1955,25 @@ struct hl_debug_params {
* @taskpid: current process ID.
* @ctx: current executing context. TODO: remove for multiple ctx per process
* @ctx_mgr: context manager to handle multiple context for this FD.
- * @cb_mgr: command buffer manager to handle multiple buffers for this FD.
- * @ts_mem_mgr: timestamp registration manager for alloc/free/map timestamp buffers.
+ * @mem_mgr: manager descriptor for memory exportable via mmap
+ * @notifier_event: notifier eventfd towards user process
* @debugfs_list: list of relevant ASIC debugfs.
* @dev_node: node in the device list of file private data
* @refcount: number of related contexts.
* @restore_phase_mutex: lock for context switch and restore phase.
*/
struct hl_fpriv {
- struct hl_device *hdev;
- struct file *filp;
- struct pid *taskpid;
- struct hl_ctx *ctx;
- struct hl_ctx_mgr ctx_mgr;
- struct hl_cb_mgr cb_mgr;
- struct hl_ts_mgr ts_mem_mgr;
- struct list_head debugfs_list;
- struct list_head dev_node;
- struct kref refcount;
- struct mutex restore_phase_mutex;
+ struct hl_device *hdev;
+ struct file *filp;
+ struct pid *taskpid;
+ struct hl_ctx *ctx;
+ struct hl_ctx_mgr ctx_mgr;
+ struct hl_mem_mgr mem_mgr;
+ struct hl_notifier_event notifier_event;
+ struct list_head debugfs_list;
+ struct list_head dev_node;
+ struct kref refcount;
+ struct mutex restore_phase_mutex;
};
@@ -1992,12 +2021,14 @@ struct hl_debugfs_entry {
* @userptr_spinlock: protects userptr_list.
* @ctx_mem_hash_list: list of available contexts with MMU mappings.
* @ctx_mem_hash_spinlock: protects cb_list.
- * @blob_desc: descriptor of blob
+ * @data_dma_blob_desc: data DMA descriptor of blob.
+ * @mon_dump_blob_desc: monitor dump descriptor of blob.
* @state_dump: data of the system states in case of a bad cs.
* @state_dump_sem: protects state_dump.
* @addr: next address to read/write from/to in read/write32.
* @mmu_addr: next virtual address to translate to physical address in mmu_show.
* @userptr_lookup: the target user ptr to look up for on demand.
+ * @memory_scrub_val: the value to which the dram will be scrubbed to using cb scrub_device_dram
* @mmu_asid: ASID to use while translating in mmu_show.
* @state_dump_head: index of the latest state dump
* @i2c_bus: generic u8 debugfs file for bus value to use in i2c_data_read.
@@ -2021,12 +2052,14 @@ struct hl_dbg_device_entry {
spinlock_t userptr_spinlock;
struct list_head ctx_mem_hash_list;
spinlock_t ctx_mem_hash_spinlock;
- struct debugfs_blob_wrapper blob_desc;
+ struct debugfs_blob_wrapper data_dma_blob_desc;
+ struct debugfs_blob_wrapper mon_dump_blob_desc;
char *state_dump[HL_STATE_DUMP_HIST_LEN];
struct rw_semaphore state_dump_sem;
u64 addr;
u64 mmu_addr;
u64 userptr_lookup;
+ u64 memory_scrub_val;
u32 mmu_asid;
u32 state_dump_head;
u8 i2c_bus;
@@ -2442,6 +2475,24 @@ struct hl_mmu_funcs {
};
/**
+ * struct hl_prefetch_work - prefetch work structure handler
+ * @pf_work: actual work struct.
+ * @ctx: compute context.
+ * @va: virtual address to pre-fetch.
+ * @size: pre-fetch size.
+ * @flags: operation flags.
+ * @asid: ASID for maintenance operation.
+ */
+struct hl_prefetch_work {
+ struct work_struct pf_work;
+ struct hl_ctx *ctx;
+ u64 va;
+ u64 size;
+ u32 flags;
+ u32 asid;
+};
+
+/*
* number of user contexts allowed to call wait_for_multi_cs ioctl in
* parallel
*/
@@ -2517,37 +2568,50 @@ struct hl_clk_throttle {
};
/**
- * struct last_error_session_info - info about last session in which CS timeout or
- * razwi error occurred.
- * @open_dev_timestamp: device open timestamp.
- * @cs_timeout_timestamp: CS timeout timestamp.
- * @razwi_timestamp: razwi timestamp.
- * @cs_write_disable: if set writing to CS parameters in the structure is disabled so the
- * first (root cause) CS timeout will not be overwritten.
- * @razwi_write_disable: if set writing to razwi parameters in the structure is disabled so the
- * first (root cause) razwi will not be overwritten.
- * @cs_timeout_seq: CS timeout sequence number.
- * @razwi_addr: address that caused razwi.
- * @razwi_engine_id_1: engine id of the razwi initiator, if it was initiated by engine that does
- * not have engine id it will be set to U16_MAX.
- * @razwi_engine_id_2: second engine id of razwi initiator. Might happen that razwi have 2 possible
- * engines which one them caused the razwi. In that case, it will contain the
- * second possible engine id, otherwise it will be set to U16_MAX.
- * @razwi_non_engine_initiator: in case the initiator of the razwi does not have engine id.
- * @razwi_type: cause of razwi, page fault or access error, otherwise it will be set to U8_MAX.
+ * struct cs_timeout_info - info of last CS timeout occurred.
+ * @timestamp: CS timeout timestamp.
+ * @write_disable: if set writing to CS parameters in the structure is disabled so,
+ * the first (root cause) CS timeout will not be overwritten.
+ * @seq: CS timeout sequence number.
+ */
+struct cs_timeout_info {
+ ktime_t timestamp;
+ atomic_t write_disable;
+ u64 seq;
+};
+
+/**
+ * struct razwi_info - info about last razwi error occurred.
+ * @timestamp: razwi timestamp.
+ * @write_disable: if set writing to razwi parameters in the structure is disabled so the
+ * first (root cause) razwi will not be overwritten.
+ * @addr: address that caused razwi.
+ * @engine_id_1: engine id of the razwi initiator, if it was initiated by engine that does
+ * not have engine id it will be set to U16_MAX.
+ * @engine_id_2: second engine id of razwi initiator. Might happen that razwi have 2 possible
+ * engines which one them caused the razwi. In that case, it will contain the
+ * second possible engine id, otherwise it will be set to U16_MAX.
+ * @non_engine_initiator: in case the initiator of the razwi does not have engine id.
+ * @type: cause of razwi, page fault or access error, otherwise it will be set to U8_MAX.
+ */
+struct razwi_info {
+ ktime_t timestamp;
+ atomic_t write_disable;
+ u64 addr;
+ u16 engine_id_1;
+ u16 engine_id_2;
+ u8 non_engine_initiator;
+ u8 type;
+};
+
+/**
+ * struct last_error_session_info - info about last session errors occurred.
+ * @cs_timeout: CS timeout error last information.
+ * @razwi: razwi last information.
*/
struct last_error_session_info {
- ktime_t open_dev_timestamp;
- ktime_t cs_timeout_timestamp;
- ktime_t razwi_timestamp;
- atomic_t cs_write_disable;
- atomic_t razwi_write_disable;
- u64 cs_timeout_seq;
- u64 razwi_addr;
- u16 razwi_engine_id_1;
- u16 razwi_engine_id_2;
- u8 razwi_non_engine_initiator;
- u8 razwi_type;
+ struct cs_timeout_info cs_timeout;
+ struct razwi_info razwi;
};
/**
@@ -2614,11 +2678,12 @@ struct hl_reset_info {
* context.
* @eq_wq: work queue of event queue for executing work in process context.
* @ts_free_obj_wq: work queue for timestamp registration objects release.
+ * @pf_wq: work queue for MMU pre-fetch operations.
* @kernel_ctx: Kernel driver context structure.
* @kernel_queues: array of hl_hw_queue.
* @cs_mirror_list: CS mirror list for TDR.
* @cs_mirror_lock: protects cs_mirror_list.
- * @kernel_cb_mgr: command buffer manager for creating/destroying/handling CBs.
+ * @kernel_mem_mgr: memory manager for memory buffers with lifespan of driver.
* @event_queue: event queue for IRQ from CPU-CP.
* @dma_pool: DMA pool for small allocations.
* @cpu_accessible_dma_mem: Host <-> CPU-CP shared memory CPU address.
@@ -2656,9 +2721,10 @@ struct hl_reset_info {
* @state_dump_specs: constants and dictionaries needed to dump system state.
* @multi_cs_completion: array of multi-CS completion.
* @clk_throttling: holds information about current/previous clock throttling events
- * @reset_info: holds current device reset information.
* @last_error: holds information about last session in which CS timeout or razwi error occurred.
+ * @reset_info: holds current device reset information.
* @stream_master_qid_arr: pointer to array with QIDs of master streams.
+ * @fw_major_version: major version of current loaded preboot
* @dram_used_mem: current DRAM memory consumption.
* @timeout_jiffies: device CS timeout value.
* @max_power: the max power of the device, as configured by the sysadmin. This
@@ -2678,6 +2744,9 @@ struct hl_reset_info {
* session.
* @open_counter: number of successful device open operations.
* @fw_poll_interval_usec: FW status poll interval in usec.
+ * used for CPU boot status
+ * @fw_comms_poll_interval_usec: FW comms/protocol poll interval in usec.
+ * used for COMMs protocols cmds(COMMS_STS_*)
* @card_type: Various ASICs have several card types. This indicates the card
* type of the current device.
* @major: habanalabs kernel driver major.
@@ -2686,6 +2755,7 @@ struct hl_reset_info {
* @id_control: minor of the control device
* @cpu_pci_msb_addr: 50-bit extension bits for the device CPU's 40-bit
* addresses.
+ * @is_in_dram_scrub: true if dram scrub operation is on going.
* @disabled: is device disabled.
* @late_init_done: is late init stage was done during initialization.
* @hwmon_initialized: is H/W monitor sensors was initialized.
@@ -2699,7 +2769,6 @@ struct hl_reset_info {
* huge pages.
* @init_done: is the initialization of the device done.
* @device_cpu_disabled: is the device CPU disabled (due to timeouts)
- * @dma_mask: the dma mask that was set for this device
* @in_debug: whether the device is in a state where the profiling/tracing infrastructure
* can be used. This indication is needed because in some ASICs we need to do
* specific operations to enable that infrastructure.
@@ -2721,6 +2790,8 @@ struct hl_reset_info {
* cases where Linux was not loaded to device CPU
* @supports_wait_for_multi_cs: true if wait for multi CS is supported
* @is_compute_ctx_active: Whether there is an active compute context executing.
+ * @compute_ctx_in_release: true if the current compute context is being released.
+ * @supports_mmu_prefetch: true if prefetch is supported, otherwise false.
*/
struct hl_device {
struct pci_dev *pdev;
@@ -2742,11 +2813,12 @@ struct hl_device {
struct workqueue_struct **cq_wq;
struct workqueue_struct *eq_wq;
struct workqueue_struct *ts_free_obj_wq;
+ struct workqueue_struct *pf_wq;
struct hl_ctx *kernel_ctx;
struct hl_hw_queue *kernel_queues;
struct list_head cs_mirror_list;
spinlock_t cs_mirror_lock;
- struct hl_cb_mgr kernel_cb_mgr;
+ struct hl_mem_mgr kernel_mem_mgr;
struct hl_eq event_queue;
struct dma_pool *dma_pool;
void *cpu_accessible_dma_mem;
@@ -2797,6 +2869,7 @@ struct hl_device {
struct hl_reset_info reset_info;
u32 *stream_master_qid_arr;
+ u32 fw_major_version;
atomic64_t dram_used_mem;
u64 timeout_jiffies;
u64 max_power;
@@ -2807,12 +2880,15 @@ struct hl_device {
u64 open_counter;
u64 fw_poll_interval_usec;
ktime_t last_successful_open_ktime;
+ u64 fw_comms_poll_interval_usec;
+
enum cpucp_card_types card_type;
u32 major;
u32 high_pll;
u16 id;
u16 id_control;
u16 cpu_pci_msb_addr;
+ u8 is_in_dram_scrub;
u8 disabled;
u8 late_init_done;
u8 hwmon_initialized;
@@ -2823,7 +2899,6 @@ struct hl_device {
u8 pmmu_huge_range;
u8 init_done;
u8 device_cpu_disabled;
- u8 dma_mask;
u8 in_debug;
u8 cdev_sysfs_created;
u8 stop_on_err;
@@ -2839,6 +2914,8 @@ struct hl_device {
u8 supports_wait_for_multi_cs;
u8 stream_master_qid_arr_size;
u8 is_compute_ctx_active;
+ u8 compute_ctx_in_release;
+ u8 supports_mmu_prefetch;
/* Parameters for bring-up */
u64 nic_ports_mask;
@@ -2971,6 +3048,14 @@ static inline bool hl_mem_area_crosses_range(u64 address, u32 size,
return ((address <= range_end_address) && (range_start_address <= end_address));
}
+uint64_t hl_set_dram_bar_default(struct hl_device *hdev, u64 addr);
+int hl_dma_map_sgtable(struct hl_device *hdev, struct sg_table *sgt, enum dma_data_direction dir);
+void hl_dma_unmap_sgtable(struct hl_device *hdev, struct sg_table *sgt,
+ enum dma_data_direction dir);
+int hl_access_cfg_region(struct hl_device *hdev, u64 addr, u64 *val,
+ enum debugfs_access_type acc_type);
+int hl_access_dev_mem(struct hl_device *hdev, struct pci_mem_region *region,
+ enum pci_region region_type, u64 addr, u64 *val, enum debugfs_access_type acc_type);
int hl_device_open(struct inode *inode, struct file *filp);
int hl_device_open_ctrl(struct inode *inode, struct file *filp);
bool hl_device_operational(struct hl_device *hdev,
@@ -3013,7 +3098,7 @@ int hl_ctx_create(struct hl_device *hdev, struct hl_fpriv *hpriv);
void hl_ctx_free(struct hl_device *hdev, struct hl_ctx *ctx);
int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx);
void hl_ctx_do_release(struct kref *ref);
-void hl_ctx_get(struct hl_device *hdev, struct hl_ctx *ctx);
+void hl_ctx_get(struct hl_ctx *ctx);
int hl_ctx_put(struct hl_ctx *ctx);
struct hl_ctx *hl_get_compute_ctx(struct hl_device *hdev);
struct hl_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq);
@@ -3034,23 +3119,21 @@ int hl_device_utilization(struct hl_device *hdev, u32 *utilization);
int hl_build_hwmon_channel_info(struct hl_device *hdev,
struct cpucp_sensor *sensors_arr);
+void hl_notifier_event_send_all(struct hl_device *hdev, u64 event);
+
int hl_sysfs_init(struct hl_device *hdev);
void hl_sysfs_fini(struct hl_device *hdev);
int hl_hwmon_init(struct hl_device *hdev);
void hl_hwmon_fini(struct hl_device *hdev);
-int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr,
+int hl_cb_create(struct hl_device *hdev, struct hl_mem_mgr *mmg,
struct hl_ctx *ctx, u32 cb_size, bool internal_cb,
bool map_cb, u64 *handle);
-int hl_cb_destroy(struct hl_device *hdev, struct hl_cb_mgr *mgr, u64 cb_handle);
-int hl_cb_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma);
+int hl_cb_destroy(struct hl_mem_mgr *mmg, u64 cb_handle);
int hl_hw_block_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma);
-struct hl_cb *hl_cb_get(struct hl_device *hdev, struct hl_cb_mgr *mgr,
- u32 handle);
+struct hl_cb *hl_cb_get(struct hl_mem_mgr *mmg, u64 handle);
void hl_cb_put(struct hl_cb *cb);
-void hl_cb_mgr_init(struct hl_cb_mgr *mgr);
-void hl_cb_mgr_fini(struct hl_device *hdev, struct hl_cb_mgr *mgr);
struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size,
bool internal_cb);
int hl_cb_pool_init(struct hl_device *hdev);
@@ -3104,6 +3187,8 @@ int hl_mmu_ctx_init(struct hl_ctx *ctx);
void hl_mmu_ctx_fini(struct hl_ctx *ctx);
int hl_mmu_map_page(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
u32 page_size, bool flush_pte);
+int hl_mmu_get_real_page_size(struct hl_device *hdev, struct hl_mmu_properties *mmu_prop,
+ u32 page_size, u32 *real_page_size, bool is_dram_addr);
int hl_mmu_unmap_page(struct hl_ctx *ctx, u64 virt_addr, u32 page_size,
bool flush_pte);
int hl_mmu_map_contiguous(struct hl_ctx *ctx, u64 virt_addr,
@@ -3112,6 +3197,7 @@ int hl_mmu_unmap_contiguous(struct hl_ctx *ctx, u64 virt_addr, u32 size);
int hl_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard, u32 flags);
int hl_mmu_invalidate_cache_range(struct hl_device *hdev, bool is_hard,
u32 flags, u32 asid, u64 va, u64 size);
+int hl_mmu_prefetch_cache_range(struct hl_ctx *ctx, u32 flags, u32 asid, u64 va, u64 size);
u64 hl_mmu_get_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte);
u64 hl_mmu_get_hop_pte_phys_addr(struct hl_ctx *ctx, struct hl_mmu_properties *mmu_prop,
u8 hop_idx, u64 hop_addr, u64 virt_addr);
@@ -3149,6 +3235,7 @@ int hl_fw_cpucp_handshake(struct hl_device *hdev,
u32 sts_boot_dev_sts1_reg, u32 boot_err0_reg,
u32 boot_err1_reg);
int hl_fw_get_eeprom_data(struct hl_device *hdev, void *data, size_t max_size);
+int hl_fw_get_monitor_dump(struct hl_device *hdev, void *data);
int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev,
struct hl_info_pci_counters *counters);
int hl_fw_cpucp_total_energy_get(struct hl_device *hdev,
@@ -3224,11 +3311,19 @@ __printf(4, 5) int hl_snprintf_resize(char **buf, size_t *size, size_t *offset,
const char *format, ...);
char *hl_format_as_binary(char *buf, size_t buf_len, u32 n);
const char *hl_sync_engine_to_string(enum hl_sync_engine_type engine_type);
-void hl_ts_mgr_init(struct hl_ts_mgr *mgr);
-void hl_ts_mgr_fini(struct hl_device *hdev, struct hl_ts_mgr *mgr);
-int hl_ts_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma);
-struct hl_ts_buff *hl_ts_get(struct hl_device *hdev, struct hl_ts_mgr *mgr, u32 handle);
-void hl_ts_put(struct hl_ts_buff *buff);
+
+void hl_mem_mgr_init(struct device *dev, struct hl_mem_mgr *mmg);
+void hl_mem_mgr_fini(struct hl_mem_mgr *mmg);
+int hl_mem_mgr_mmap(struct hl_mem_mgr *mmg, struct vm_area_struct *vma,
+ void *args);
+struct hl_mmap_mem_buf *hl_mmap_mem_buf_get(struct hl_mem_mgr *mmg,
+ u64 handle);
+int hl_mmap_mem_buf_put_handle(struct hl_mem_mgr *mmg, u64 handle);
+int hl_mmap_mem_buf_put(struct hl_mmap_mem_buf *buf);
+struct hl_mmap_mem_buf *
+hl_mmap_mem_buf_alloc(struct hl_mem_mgr *mmg,
+ struct hl_mmap_mem_buf_behavior *behavior, gfp_t gfp,
+ void *args);
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/misc/habanalabs/common/habanalabs_drv.c b/drivers/misc/habanalabs/common/habanalabs_drv.c
index ca404ed9d9a7..37edb69a7255 100644
--- a/drivers/misc/habanalabs/common/habanalabs_drv.c
+++ b/drivers/misc/habanalabs/common/habanalabs_drv.c
@@ -134,13 +134,14 @@ int hl_device_open(struct inode *inode, struct file *filp)
hpriv->hdev = hdev;
filp->private_data = hpriv;
hpriv->filp = filp;
+
+ mutex_init(&hpriv->notifier_event.lock);
mutex_init(&hpriv->restore_phase_mutex);
kref_init(&hpriv->refcount);
nonseekable_open(inode, filp);
- hl_cb_mgr_init(&hpriv->cb_mgr);
hl_ctx_mgr_init(&hpriv->ctx_mgr);
- hl_ts_mgr_init(&hpriv->ts_mem_mgr);
+ hl_mem_mgr_init(hpriv->hdev->dev, &hpriv->mem_mgr);
hpriv->taskpid = get_task_pid(current, PIDTYPE_PID);
@@ -150,7 +151,28 @@ int hl_device_open(struct inode *inode, struct file *filp)
dev_err_ratelimited(hdev->dev,
"Can't open %s because it is %s\n",
dev_name(hdev->dev), hdev->status[status]);
- rc = -EPERM;
+
+ if (status == HL_DEVICE_STATUS_IN_RESET)
+ rc = -EAGAIN;
+ else
+ rc = -EPERM;
+
+ goto out_err;
+ }
+
+ if (hdev->is_in_dram_scrub) {
+ dev_dbg_ratelimited(hdev->dev,
+ "Can't open %s during dram scrub\n",
+ dev_name(hdev->dev));
+ rc = -EAGAIN;
+ goto out_err;
+ }
+
+ if (hdev->compute_ctx_in_release) {
+ dev_dbg_ratelimited(hdev->dev,
+ "Can't open %s because another user is still releasing it\n",
+ dev_name(hdev->dev));
+ rc = -EAGAIN;
goto out_err;
}
@@ -173,8 +195,8 @@ int hl_device_open(struct inode *inode, struct file *filp)
hl_debugfs_add_file(hpriv);
- atomic_set(&hdev->last_error.cs_write_disable, 0);
- atomic_set(&hdev->last_error.razwi_write_disable, 0);
+ atomic_set(&hdev->last_error.cs_timeout.write_disable, 0);
+ atomic_set(&hdev->last_error.razwi.write_disable, 0);
hdev->open_counter++;
hdev->last_successful_open_jif = jiffies;
@@ -184,11 +206,11 @@ int hl_device_open(struct inode *inode, struct file *filp)
out_err:
mutex_unlock(&hdev->fpriv_list_lock);
- hl_cb_mgr_fini(hpriv->hdev, &hpriv->cb_mgr);
- hl_ts_mgr_fini(hpriv->hdev, &hpriv->ts_mem_mgr);
+ hl_mem_mgr_fini(&hpriv->mem_mgr);
hl_ctx_mgr_fini(hpriv->hdev, &hpriv->ctx_mgr);
filp->private_data = NULL;
mutex_destroy(&hpriv->restore_phase_mutex);
+ mutex_destroy(&hpriv->notifier_event.lock);
put_pid(hpriv->taskpid);
kfree(hpriv);
@@ -222,9 +244,11 @@ int hl_device_open_ctrl(struct inode *inode, struct file *filp)
hpriv->hdev = hdev;
filp->private_data = hpriv;
hpriv->filp = filp;
+
+ mutex_init(&hpriv->notifier_event.lock);
nonseekable_open(inode, filp);
- hpriv->taskpid = find_get_pid(current->pid);
+ hpriv->taskpid = get_task_pid(current, PIDTYPE_PID);
mutex_lock(&hdev->fpriv_ctrl_list_lock);
@@ -288,6 +312,7 @@ static int fixup_device_params(struct hl_device *hdev)
hdev->asic_prop.fw_security_enabled = is_asic_secured(hdev->asic_type);
hdev->fw_poll_interval_usec = HL_FW_STATUS_POLL_INTERVAL_USEC;
+ hdev->fw_comms_poll_interval_usec = HL_FW_STATUS_POLL_INTERVAL_USEC;
hdev->stop_on_err = true;
hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_UNKNOWN;
@@ -296,9 +321,6 @@ static int fixup_device_params(struct hl_device *hdev)
/* Enable only after the initialization of the device */
hdev->disabled = true;
- /* Set default DMA mask to 32 bits */
- hdev->dma_mask = 32;
-
return 0;
}
diff --git a/drivers/misc/habanalabs/common/habanalabs_ioctl.c b/drivers/misc/habanalabs/common/habanalabs_ioctl.c
index c13a3c2a7013..c7864d6bb0a1 100644
--- a/drivers/misc/habanalabs/common/habanalabs_ioctl.c
+++ b/drivers/misc/habanalabs/common/habanalabs_ioctl.c
@@ -76,6 +76,7 @@ static int hw_ip_info(struct hl_device *hdev, struct hl_info_args *args)
if (hw_ip.dram_size > PAGE_SIZE)
hw_ip.dram_enabled = 1;
hw_ip.dram_page_size = prop->dram_page_size;
+ hw_ip.device_mem_alloc_default_page_size = prop->device_mem_alloc_default_page_size;
hw_ip.num_of_events = prop->num_of_events;
memcpy(hw_ip.cpucp_version, prop->cpucp_info.cpucp_version,
@@ -115,6 +116,23 @@ static int hw_events_info(struct hl_device *hdev, bool aggregate,
return copy_to_user(out, arr, min(max_size, size)) ? -EFAULT : 0;
}
+static int events_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
+{
+ u32 max_size = args->return_size;
+ u64 events_mask;
+ void __user *out = (void __user *) (uintptr_t) args->return_pointer;
+
+ if ((max_size < sizeof(u64)) || (!out))
+ return -EINVAL;
+
+ mutex_lock(&hpriv->notifier_event.lock);
+ events_mask = hpriv->notifier_event.events_mask;
+ hpriv->notifier_event.events_mask = 0;
+ mutex_unlock(&hpriv->notifier_event.lock);
+
+ return copy_to_user(out, &events_mask, sizeof(u64)) ? -EFAULT : 0;
+}
+
static int dram_usage_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
{
struct hl_device *hdev = hpriv->hdev;
@@ -497,6 +515,8 @@ static int open_stats_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
open_stats_info.last_open_period_ms = jiffies64_to_msecs(
hdev->last_open_session_duration_jif);
open_stats_info.open_counter = hdev->open_counter;
+ open_stats_info.is_compute_ctx_active = hdev->is_compute_ctx_active;
+ open_stats_info.compute_ctx_in_release = hdev->compute_ctx_in_release;
return copy_to_user(out, &open_stats_info,
min((size_t) max_size, sizeof(open_stats_info))) ? -EFAULT : 0;
@@ -549,7 +569,7 @@ static int last_err_open_dev_info(struct hl_fpriv *hpriv, struct hl_info_args *a
if ((!max_size) || (!out))
return -EINVAL;
- info.timestamp = ktime_to_ns(hdev->last_error.open_dev_timestamp);
+ info.timestamp = ktime_to_ns(hdev->last_successful_open_ktime);
return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0;
}
@@ -564,8 +584,8 @@ static int cs_timeout_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
if ((!max_size) || (!out))
return -EINVAL;
- info.seq = hdev->last_error.cs_timeout_seq;
- info.timestamp = ktime_to_ns(hdev->last_error.cs_timeout_timestamp);
+ info.seq = hdev->last_error.cs_timeout.seq;
+ info.timestamp = ktime_to_ns(hdev->last_error.cs_timeout.timestamp);
return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0;
}
@@ -580,16 +600,74 @@ static int razwi_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
if ((!max_size) || (!out))
return -EINVAL;
- info.timestamp = ktime_to_ns(hdev->last_error.razwi_timestamp);
- info.addr = hdev->last_error.razwi_addr;
- info.engine_id_1 = hdev->last_error.razwi_engine_id_1;
- info.engine_id_2 = hdev->last_error.razwi_engine_id_2;
- info.no_engine_id = hdev->last_error.razwi_non_engine_initiator;
- info.error_type = hdev->last_error.razwi_type;
+ info.timestamp = ktime_to_ns(hdev->last_error.razwi.timestamp);
+ info.addr = hdev->last_error.razwi.addr;
+ info.engine_id_1 = hdev->last_error.razwi.engine_id_1;
+ info.engine_id_2 = hdev->last_error.razwi.engine_id_2;
+ info.no_engine_id = hdev->last_error.razwi.non_engine_initiator;
+ info.error_type = hdev->last_error.razwi.type;
+
+ return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0;
+}
+
+static int dev_mem_alloc_page_sizes_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
+{
+ void __user *out = (void __user *) (uintptr_t) args->return_pointer;
+ struct hl_info_dev_memalloc_page_sizes info = {0};
+ struct hl_device *hdev = hpriv->hdev;
+ u32 max_size = args->return_size;
+
+ if ((!max_size) || (!out))
+ return -EINVAL;
+
+ /*
+ * Future ASICs that will support multiple DRAM page sizes will support only "powers of 2"
+ * pages (unlike some of the ASICs before supporting multiple page sizes).
+ * For this reason for all ASICs that not support multiple page size the function will
+ * return an empty bitmask indicating that multiple page sizes is not supported.
+ */
+ hdev->asic_funcs->get_valid_dram_page_orders(&info);
return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0;
}
+static int eventfd_register(struct hl_fpriv *hpriv, struct hl_info_args *args)
+{
+ int rc;
+
+ /* check if there is already a registered on that process */
+ mutex_lock(&hpriv->notifier_event.lock);
+ if (hpriv->notifier_event.eventfd) {
+ mutex_unlock(&hpriv->notifier_event.lock);
+ return -EINVAL;
+ }
+
+ hpriv->notifier_event.eventfd = eventfd_ctx_fdget(args->eventfd);
+ if (IS_ERR(hpriv->notifier_event.eventfd)) {
+ rc = PTR_ERR(hpriv->notifier_event.eventfd);
+ hpriv->notifier_event.eventfd = NULL;
+ mutex_unlock(&hpriv->notifier_event.lock);
+ return rc;
+ }
+
+ mutex_unlock(&hpriv->notifier_event.lock);
+ return 0;
+}
+
+static int eventfd_unregister(struct hl_fpriv *hpriv, struct hl_info_args *args)
+{
+ mutex_lock(&hpriv->notifier_event.lock);
+ if (!hpriv->notifier_event.eventfd) {
+ mutex_unlock(&hpriv->notifier_event.lock);
+ return -EINVAL;
+ }
+
+ eventfd_ctx_put(hpriv->notifier_event.eventfd);
+ hpriv->notifier_event.eventfd = NULL;
+ mutex_unlock(&hpriv->notifier_event.lock);
+ return 0;
+}
+
static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
struct device *dev)
{
@@ -640,6 +718,12 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
case HL_INFO_RAZWI_EVENT:
return razwi_info(hpriv, args);
+ case HL_INFO_DEV_MEM_ALLOC_PAGE_SIZES:
+ return dev_mem_alloc_page_sizes_info(hpriv, args);
+
+ case HL_INFO_GET_EVENTS:
+ return events_info(hpriv, args);
+
default:
break;
}
@@ -690,6 +774,12 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
case HL_INFO_DRAM_PENDING_ROWS:
return dram_pending_rows_info(hpriv, args);
+ case HL_INFO_REGISTER_EVENTFD:
+ return eventfd_register(hpriv, args);
+
+ case HL_INFO_UNREGISTER_EVENTFD:
+ return eventfd_unregister(hpriv, args);
+
default:
dev_err(dev, "Invalid request %d\n", args->op);
rc = -EINVAL;
diff --git a/drivers/misc/habanalabs/common/irq.c b/drivers/misc/habanalabs/common/irq.c
index e2bc128f2291..8500e15ef743 100644
--- a/drivers/misc/habanalabs/common/irq.c
+++ b/drivers/misc/habanalabs/common/irq.c
@@ -152,11 +152,11 @@ static void hl_ts_free_objects(struct work_struct *work)
struct hl_device *hdev = job->hdev;
list_for_each_entry_safe(free_obj, temp_free_obj, free_list_head, free_objects_node) {
- dev_dbg(hdev->dev, "About to put refcount to ts_buff (%p) cq_cb(%p)\n",
- free_obj->ts_buff,
+ dev_dbg(hdev->dev, "About to put refcount to buf (%p) cq_cb(%p)\n",
+ free_obj->buf,
free_obj->cq_cb);
- hl_ts_put(free_obj->ts_buff);
+ hl_mmap_mem_buf_put(free_obj->buf);
hl_cb_put(free_obj->cq_cb);
kfree(free_obj);
}
@@ -210,7 +210,7 @@ static int handle_registration_node(struct hl_device *hdev, struct hl_user_pendi
/* Putting the refcount for ts_buff and cq_cb objects will be handled
* in workqueue context, just add job to free_list.
*/
- free_node->ts_buff = pend->ts_reg_info.ts_buff;
+ free_node->buf = pend->ts_reg_info.buf;
free_node->cq_cb = pend->ts_reg_info.cq_cb;
list_add(&free_node->free_objects_node, *free_list);
@@ -244,7 +244,7 @@ static void handle_user_cq(struct hl_device *hdev,
list_for_each_entry_safe(pend, temp_pend, &user_cq->wait_list_head, wait_list_node) {
if ((pend->cq_kernel_addr && *(pend->cq_kernel_addr) >= pend->cq_target_value) ||
!pend->cq_kernel_addr) {
- if (pend->ts_reg_info.ts_buff) {
+ if (pend->ts_reg_info.buf) {
if (!reg_node_handle_fail) {
rc = handle_registration_node(hdev, pend,
&ts_reg_free_list_head);
@@ -282,10 +282,6 @@ irqreturn_t hl_irq_handler_user_cq(int irq, void *arg)
struct hl_user_interrupt *user_cq = arg;
struct hl_device *hdev = user_cq->hdev;
- dev_dbg(hdev->dev,
- "got user completion interrupt id %u",
- user_cq->interrupt_id);
-
/* Handle user cq interrupts registered on all interrupts */
handle_user_cq(hdev, &hdev->common_user_interrupt);
diff --git a/drivers/misc/habanalabs/common/memory.c b/drivers/misc/habanalabs/common/memory.c
index a13506dd8119..663dd7e589d4 100644
--- a/drivers/misc/habanalabs/common/memory.c
+++ b/drivers/misc/habanalabs/common/memory.c
@@ -41,7 +41,7 @@ static int set_alloc_page_size(struct hl_device *hdev, struct hl_mem_in *args, u
return -EINVAL;
}
} else {
- psize = hdev->asic_prop.dram_page_size;
+ psize = prop->device_mem_alloc_default_page_size;
}
*page_size = psize;
@@ -117,7 +117,7 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
paddr = gen_pool_alloc(vm->dram_pg_pool, total_size);
if (!paddr) {
dev_err(hdev->dev,
- "failed to allocate %llu contiguous pages with total size of %llu\n",
+ "Cannot allocate %llu contiguous pages with total size of %llu\n",
num_pgs, total_size);
return -ENOMEM;
}
@@ -156,9 +156,10 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
else
phys_pg_pack->pages[i] = gen_pool_alloc(vm->dram_pg_pool,
page_size);
+
if (!phys_pg_pack->pages[i]) {
dev_err(hdev->dev,
- "Failed to allocate device memory (out of memory)\n");
+ "Cannot allocate device memory (out of memory)\n");
rc = -ENOMEM;
goto page_err;
}
@@ -237,19 +238,18 @@ static int dma_map_host_va(struct hl_device *hdev, u64 addr, u64 size,
goto pin_err;
}
- rc = hdev->asic_funcs->asic_dma_map_sg(hdev, userptr->sgt->sgl,
- userptr->sgt->nents, DMA_BIDIRECTIONAL);
- if (rc) {
- dev_err(hdev->dev, "failed to map sgt with DMA region\n");
- goto dma_map_err;
- }
-
userptr->dma_mapped = true;
userptr->dir = DMA_BIDIRECTIONAL;
userptr->vm_type = VM_TYPE_USERPTR;
*p_userptr = userptr;
+ rc = hdev->asic_funcs->asic_dma_map_sgtable(hdev, userptr->sgt, DMA_BIDIRECTIONAL);
+ if (rc) {
+ dev_err(hdev->dev, "failed to map sgt with DMA region\n");
+ goto dma_map_err;
+ }
+
return 0;
dma_map_err:
@@ -900,7 +900,7 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
* consecutive block.
*/
total_npages = 0;
- for_each_sg(userptr->sgt->sgl, sg, userptr->sgt->nents, i) {
+ for_each_sgtable_dma_sg(userptr->sgt, sg, i) {
npages = hl_get_sg_info(sg, &dma_addr);
total_npages += npages;
@@ -929,7 +929,7 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
phys_pg_pack->total_size = total_npages * page_size;
j = 0;
- for_each_sg(userptr->sgt->sgl, sg, userptr->sgt->nents, i) {
+ for_each_sgtable_dma_sg(userptr->sgt, sg, i) {
npages = hl_get_sg_info(sg, &dma_addr);
/* align down to physical page size and save the offset */
@@ -1102,21 +1102,24 @@ static int get_paddr_from_handle(struct hl_ctx *ctx, struct hl_mem_in *args,
* map a device virtual block to this pages and return the start address of
* this block.
*/
-static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
- u64 *device_addr)
+static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args, u64 *device_addr)
{
- struct hl_device *hdev = ctx->hdev;
- struct hl_vm *vm = &hdev->vm;
struct hl_vm_phys_pg_pack *phys_pg_pack;
+ enum hl_va_range_type va_range_type = 0;
+ struct hl_device *hdev = ctx->hdev;
struct hl_userptr *userptr = NULL;
+ u32 handle = 0, va_block_align;
struct hl_vm_hash_node *hnode;
+ struct hl_vm *vm = &hdev->vm;
struct hl_va_range *va_range;
- enum vm_type *vm_type;
+ bool is_userptr, do_prefetch;
u64 ret_vaddr, hint_addr;
- u32 handle = 0, va_block_align;
+ enum vm_type *vm_type;
int rc;
- bool is_userptr = args->flags & HL_MEM_USERPTR;
- enum hl_va_range_type va_range_type = 0;
+
+ /* set map flags */
+ is_userptr = args->flags & HL_MEM_USERPTR;
+ do_prefetch = hdev->supports_mmu_prefetch && (args->flags & HL_MEM_PREFETCH);
/* Assume failure */
*device_addr = 0;
@@ -1241,19 +1244,27 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
rc = map_phys_pg_pack(ctx, ret_vaddr, phys_pg_pack);
if (rc) {
- mutex_unlock(&ctx->mmu_lock);
- dev_err(hdev->dev, "mapping page pack failed for handle %u\n",
- handle);
+ dev_err(hdev->dev, "mapping page pack failed for handle %u\n", handle);
goto map_err;
}
rc = hl_mmu_invalidate_cache_range(hdev, false, *vm_type | MMU_OP_SKIP_LOW_CACHE_INV,
ctx->asid, ret_vaddr, phys_pg_pack->total_size);
+ if (rc)
+ goto map_err;
mutex_unlock(&ctx->mmu_lock);
- if (rc)
- goto map_err;
+ /*
+ * prefetch is done upon user's request. it is performed in WQ as and so can
+ * be outside the MMU lock. the operation itself is already protected by the mmu lock
+ */
+ if (do_prefetch) {
+ rc = hl_mmu_prefetch_cache_range(ctx, *vm_type, ctx->asid, ret_vaddr,
+ phys_pg_pack->total_size);
+ if (rc)
+ goto map_err;
+ }
ret_vaddr += phys_pg_pack->offset;
@@ -1272,6 +1283,8 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
return rc;
map_err:
+ mutex_unlock(&ctx->mmu_lock);
+
if (add_va_block(hdev, va_range, ret_vaddr,
ret_vaddr + phys_pg_pack->total_size - 1))
dev_warn(hdev->dev,
@@ -1509,7 +1522,7 @@ int hl_hw_block_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma)
vma->vm_ops = &hw_block_vm_ops;
vma->vm_private_data = lnode;
- hl_ctx_get(hdev, ctx);
+ hl_ctx_get(ctx);
rc = hdev->asic_funcs->hw_block_mmap(hdev, vma, block_id, block_size);
if (rc) {
@@ -1819,7 +1832,7 @@ static int export_dmabuf_common(struct hl_ctx *ctx,
}
hl_dmabuf->ctx = ctx;
- hl_ctx_get(hdev, hl_dmabuf->ctx);
+ hl_ctx_get(hl_dmabuf->ctx);
*dmabuf_fd = fd;
@@ -2076,164 +2089,34 @@ out:
return rc;
}
-static void ts_buff_release(struct kref *ref)
-{
- struct hl_ts_buff *buff;
-
- buff = container_of(ref, struct hl_ts_buff, refcount);
-
- vfree(buff->kernel_buff_address);
- vfree(buff->user_buff_address);
- kfree(buff);
-}
-
-struct hl_ts_buff *hl_ts_get(struct hl_device *hdev, struct hl_ts_mgr *mgr,
- u32 handle)
-{
- struct hl_ts_buff *buff;
-
- spin_lock(&mgr->ts_lock);
- buff = idr_find(&mgr->ts_handles, handle);
- if (!buff) {
- spin_unlock(&mgr->ts_lock);
- dev_warn(hdev->dev,
- "TS buff get failed, no match to handle 0x%x\n", handle);
- return NULL;
- }
- kref_get(&buff->refcount);
- spin_unlock(&mgr->ts_lock);
-
- return buff;
-}
-
-void hl_ts_put(struct hl_ts_buff *buff)
+static void ts_buff_release(struct hl_mmap_mem_buf *buf)
{
- kref_put(&buff->refcount, ts_buff_release);
-}
-
-static void buff_vm_close(struct vm_area_struct *vma)
-{
- struct hl_ts_buff *buff = (struct hl_ts_buff *) vma->vm_private_data;
- long new_mmap_size;
-
- new_mmap_size = buff->mmap_size - (vma->vm_end - vma->vm_start);
+ struct hl_ts_buff *ts_buff = buf->private;
- if (new_mmap_size > 0) {
- buff->mmap_size = new_mmap_size;
- return;
- }
-
- atomic_set(&buff->mmap, 0);
- hl_ts_put(buff);
- vma->vm_private_data = NULL;
+ vfree(ts_buff->kernel_buff_address);
+ vfree(ts_buff->user_buff_address);
+ kfree(ts_buff);
}
-static const struct vm_operations_struct ts_buff_vm_ops = {
- .close = buff_vm_close
-};
-
-int hl_ts_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma)
+static int hl_ts_mmap(struct hl_mmap_mem_buf *buf, struct vm_area_struct *vma, void *args)
{
- struct hl_device *hdev = hpriv->hdev;
- struct hl_ts_buff *buff;
- u32 handle, user_buff_size;
- int rc;
-
- /* We use the page offset to hold the idr and thus we need to clear
- * it before doing the mmap itself
- */
- handle = vma->vm_pgoff;
- vma->vm_pgoff = 0;
-
- buff = hl_ts_get(hdev, &hpriv->ts_mem_mgr, handle);
- if (!buff) {
- dev_err(hdev->dev,
- "TS buff mmap failed, no match to handle 0x%x\n", handle);
- return -EINVAL;
- }
-
- /* Validation check */
- user_buff_size = vma->vm_end - vma->vm_start;
- if (user_buff_size != ALIGN(buff->user_buff_size, PAGE_SIZE)) {
- dev_err(hdev->dev,
- "TS buff mmap failed, mmap size 0x%x != 0x%x buff size\n",
- user_buff_size, ALIGN(buff->user_buff_size, PAGE_SIZE));
- rc = -EINVAL;
- goto put_buff;
- }
-
-#ifdef _HAS_TYPE_ARG_IN_ACCESS_OK
- if (!access_ok(VERIFY_WRITE,
- (void __user *) (uintptr_t) vma->vm_start, user_buff_size)) {
-#else
- if (!access_ok((void __user *) (uintptr_t) vma->vm_start,
- user_buff_size)) {
-#endif
- dev_err(hdev->dev,
- "user pointer is invalid - 0x%lx\n",
- vma->vm_start);
-
- rc = -EINVAL;
- goto put_buff;
- }
+ struct hl_ts_buff *ts_buff = buf->private;
- if (atomic_cmpxchg(&buff->mmap, 0, 1)) {
- dev_err(hdev->dev, "TS buff memory mmap failed, already mmaped to user\n");
- rc = -EINVAL;
- goto put_buff;
- }
-
- vma->vm_ops = &ts_buff_vm_ops;
- vma->vm_private_data = buff;
vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_DONTCOPY | VM_NORESERVE;
- rc = remap_vmalloc_range(vma, buff->user_buff_address, 0);
- if (rc) {
- atomic_set(&buff->mmap, 0);
- goto put_buff;
- }
-
- buff->mmap_size = buff->user_buff_size;
- vma->vm_pgoff = handle;
-
- return 0;
-
-put_buff:
- hl_ts_put(buff);
- return rc;
-}
-
-void hl_ts_mgr_init(struct hl_ts_mgr *mgr)
-{
- spin_lock_init(&mgr->ts_lock);
- idr_init(&mgr->ts_handles);
+ return remap_vmalloc_range(vma, ts_buff->user_buff_address, 0);
}
-void hl_ts_mgr_fini(struct hl_device *hdev, struct hl_ts_mgr *mgr)
-{
- struct hl_ts_buff *buff;
- struct idr *idp;
- u32 id;
-
- idp = &mgr->ts_handles;
-
- idr_for_each_entry(idp, buff, id) {
- if (kref_put(&buff->refcount, ts_buff_release) != 1)
- dev_err(hdev->dev, "TS buff handle %d for CTX is still alive\n",
- id);
- }
-
- idr_destroy(&mgr->ts_handles);
-}
-
-static struct hl_ts_buff *hl_ts_alloc_buff(struct hl_device *hdev, u32 num_elements)
+static int hl_ts_alloc_buf(struct hl_mmap_mem_buf *buf, gfp_t gfp, void *args)
{
struct hl_ts_buff *ts_buff = NULL;
- u32 size;
+ u32 size, num_elements;
void *p;
+ num_elements = *(u32 *)args;
+
ts_buff = kzalloc(sizeof(*ts_buff), GFP_KERNEL);
if (!ts_buff)
- return NULL;
+ return -ENOMEM;
/* Allocate the user buffer */
size = num_elements * sizeof(u64);
@@ -2242,7 +2125,7 @@ static struct hl_ts_buff *hl_ts_alloc_buff(struct hl_device *hdev, u32 num_eleme
goto free_mem;
ts_buff->user_buff_address = p;
- ts_buff->user_buff_size = size;
+ buf->mappable_size = size;
/* Allocate the internal kernel buffer */
size = num_elements * sizeof(struct hl_user_pending_interrupt);
@@ -2253,15 +2136,25 @@ static struct hl_ts_buff *hl_ts_alloc_buff(struct hl_device *hdev, u32 num_eleme
ts_buff->kernel_buff_address = p;
ts_buff->kernel_buff_size = size;
- return ts_buff;
+ buf->private = ts_buff;
+
+ return 0;
free_user_buff:
vfree(ts_buff->user_buff_address);
free_mem:
kfree(ts_buff);
- return NULL;
+ return -ENOMEM;
}
+static struct hl_mmap_mem_buf_behavior hl_ts_behavior = {
+ .topic = "TS",
+ .mem_id = HL_MMAP_TYPE_TS_BUFF,
+ .mmap = hl_ts_mmap,
+ .alloc = hl_ts_alloc_buf,
+ .release = ts_buff_release,
+};
+
/**
* allocate_timestamps_buffers() - allocate timestamps buffers
* This function will allocate ts buffer that will later on be mapped to the user
@@ -2278,54 +2171,22 @@ free_mem:
*/
static int allocate_timestamps_buffers(struct hl_fpriv *hpriv, struct hl_mem_in *args, u64 *handle)
{
- struct hl_ts_mgr *ts_mgr = &hpriv->ts_mem_mgr;
- struct hl_device *hdev = hpriv->hdev;
- struct hl_ts_buff *ts_buff;
- int rc = 0;
+ struct hl_mem_mgr *mmg = &hpriv->mem_mgr;
+ struct hl_mmap_mem_buf *buf;
if (args->num_of_elements > TS_MAX_ELEMENTS_NUM) {
- dev_err(hdev->dev, "Num of elements exceeds Max allowed number (0x%x > 0x%x)\n",
+ dev_err(mmg->dev, "Num of elements exceeds Max allowed number (0x%x > 0x%x)\n",
args->num_of_elements, TS_MAX_ELEMENTS_NUM);
return -EINVAL;
}
- /* Allocate ts buffer object
- * This object will contain two buffers one that will be mapped to the user
- * and another internal buffer for the driver use only, which won't be mapped
- * to the user.
- */
- ts_buff = hl_ts_alloc_buff(hdev, args->num_of_elements);
- if (!ts_buff) {
- rc = -ENOMEM;
- goto out_err;
- }
-
- spin_lock(&ts_mgr->ts_lock);
- rc = idr_alloc(&ts_mgr->ts_handles, ts_buff, 1, 0, GFP_ATOMIC);
- spin_unlock(&ts_mgr->ts_lock);
- if (rc < 0) {
- dev_err(hdev->dev, "Failed to allocate IDR for a new ts buffer\n");
- goto release_ts_buff;
- }
-
- ts_buff->id = rc;
- ts_buff->hdev = hdev;
-
- kref_init(&ts_buff->refcount);
-
- /* idr is 32-bit so we can safely OR it with a mask that is above 32 bit */
- *handle = (u64) ts_buff->id | HL_MMAP_TYPE_TS_BUFF;
- *handle <<= PAGE_SHIFT;
+ buf = hl_mmap_mem_buf_alloc(mmg, &hl_ts_behavior, GFP_KERNEL, &args->num_of_elements);
+ if (!buf)
+ return -ENOMEM;
- dev_dbg(hdev->dev, "Created ts buff object handle(%u)\n", ts_buff->id);
+ *handle = buf->handle;
return 0;
-
-release_ts_buff:
- kref_put(&ts_buff->refcount, ts_buff_release);
-out_err:
- *handle = 0;
- return rc;
}
int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data)
@@ -2587,9 +2448,7 @@ void hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr)
hl_debugfs_remove_userptr(hdev, userptr);
if (userptr->dma_mapped)
- hdev->asic_funcs->hl_dma_unmap_sg(hdev, userptr->sgt->sgl,
- userptr->sgt->nents,
- userptr->dir);
+ hdev->asic_funcs->hl_dma_unmap_sgtable(hdev, userptr->sgt, userptr->dir);
unpin_user_pages_dirty_lock(userptr->pages, userptr->npages, true);
kvfree(userptr->pages);
diff --git a/drivers/misc/habanalabs/common/memory_mgr.c b/drivers/misc/habanalabs/common/memory_mgr.c
new file mode 100644
index 000000000000..ea5f2bd31b0a
--- /dev/null
+++ b/drivers/misc/habanalabs/common/memory_mgr.c
@@ -0,0 +1,349 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2022 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ */
+
+#include "habanalabs.h"
+
+/**
+ * hl_mmap_mem_buf_get - increase the buffer refcount and return a pointer to
+ * the buffer descriptor.
+ *
+ * @mmg: parent unifed memory manager
+ * @handle: requested buffer handle
+ *
+ * Find the buffer in the store and return a pointer to its descriptor.
+ * Increase buffer refcount. If not found - return NULL.
+ */
+struct hl_mmap_mem_buf *hl_mmap_mem_buf_get(struct hl_mem_mgr *mmg, u64 handle)
+{
+ struct hl_mmap_mem_buf *buf;
+
+ spin_lock(&mmg->lock);
+ buf = idr_find(&mmg->handles, lower_32_bits(handle >> PAGE_SHIFT));
+ if (!buf) {
+ spin_unlock(&mmg->lock);
+ dev_warn(mmg->dev,
+ "Buff get failed, no match to handle %#llx\n", handle);
+ return NULL;
+ }
+ kref_get(&buf->refcount);
+ spin_unlock(&mmg->lock);
+ return buf;
+}
+
+/**
+ * hl_mmap_mem_buf_destroy - destroy the unused buffer
+ *
+ * @buf: memory manager buffer descriptor
+ *
+ * Internal function, used as a final step of buffer release. Shall be invoked
+ * only when the buffer is no longer in use (removed from idr). Will call the
+ * release callback (if applicable), and free the memory.
+ */
+static void hl_mmap_mem_buf_destroy(struct hl_mmap_mem_buf *buf)
+{
+ if (buf->behavior->release)
+ buf->behavior->release(buf);
+
+ kfree(buf);
+}
+
+/**
+ * hl_mmap_mem_buf_release - release buffer
+ *
+ * @kref: kref that reached 0.
+ *
+ * Internal function, used as a kref release callback, when the last user of
+ * the buffer is released. Shall be called from an interrupt context.
+ */
+static void hl_mmap_mem_buf_release(struct kref *kref)
+{
+ struct hl_mmap_mem_buf *buf =
+ container_of(kref, struct hl_mmap_mem_buf, refcount);
+
+ spin_lock(&buf->mmg->lock);
+ idr_remove(&buf->mmg->handles, lower_32_bits(buf->handle >> PAGE_SHIFT));
+ spin_unlock(&buf->mmg->lock);
+
+ hl_mmap_mem_buf_destroy(buf);
+}
+
+/**
+ * hl_mmap_mem_buf_remove_idr_locked - remove handle from idr
+ *
+ * @kref: kref that reached 0.
+ *
+ * Internal function, used for kref put by handle. Assumes mmg lock is taken.
+ * Will remove the buffer from idr, without destroying it.
+ */
+static void hl_mmap_mem_buf_remove_idr_locked(struct kref *kref)
+{
+ struct hl_mmap_mem_buf *buf =
+ container_of(kref, struct hl_mmap_mem_buf, refcount);
+
+ idr_remove(&buf->mmg->handles, lower_32_bits(buf->handle >> PAGE_SHIFT));
+}
+
+/**
+ * hl_mmap_mem_buf_put - decrease the reference to the buffer
+ *
+ * @buf: memory manager buffer descriptor
+ *
+ * Decrease the reference to the buffer, and release it if it was the last one.
+ * Shall be called from an interrupt context.
+ */
+int hl_mmap_mem_buf_put(struct hl_mmap_mem_buf *buf)
+{
+ return kref_put(&buf->refcount, hl_mmap_mem_buf_release);
+}
+
+/**
+ * hl_mmap_mem_buf_put_handle - decrease the reference to the buffer with the
+ * given handle.
+ *
+ * @mmg: parent unifed memory manager
+ * @handle: requested buffer handle
+ *
+ * Decrease the reference to the buffer, and release it if it was the last one.
+ * Shall not be called from an interrupt context. Return -EINVAL if handle was
+ * not found, else return the put outcome (0 or 1).
+ */
+int hl_mmap_mem_buf_put_handle(struct hl_mem_mgr *mmg, u64 handle)
+{
+ struct hl_mmap_mem_buf *buf;
+
+ spin_lock(&mmg->lock);
+ buf = idr_find(&mmg->handles, lower_32_bits(handle >> PAGE_SHIFT));
+ if (!buf) {
+ spin_unlock(&mmg->lock);
+ dev_dbg(mmg->dev,
+ "Buff put failed, no match to handle %#llx\n", handle);
+ return -EINVAL;
+ }
+
+ if (kref_put(&buf->refcount, hl_mmap_mem_buf_remove_idr_locked)) {
+ spin_unlock(&mmg->lock);
+ hl_mmap_mem_buf_destroy(buf);
+ return 1;
+ }
+
+ spin_unlock(&mmg->lock);
+ return 0;
+}
+
+/**
+ * @hl_mmap_mem_buf_alloc - allocate a new mappable buffer
+ *
+ * @mmg: parent unifed memory manager
+ * @behavior: behavior object describing this buffer polymorphic behavior
+ * @gfp: gfp flags to use for the memory allocations
+ * @args: additional args passed to behavior->alloc
+ *
+ * Allocate and register a new memory buffer inside the give memory manager.
+ * Return the pointer to the new buffer on success or NULL on failure.
+ */
+struct hl_mmap_mem_buf *
+hl_mmap_mem_buf_alloc(struct hl_mem_mgr *mmg,
+ struct hl_mmap_mem_buf_behavior *behavior, gfp_t gfp,
+ void *args)
+{
+ struct hl_mmap_mem_buf *buf;
+ int rc;
+
+ buf = kzalloc(sizeof(*buf), gfp);
+ if (!buf)
+ return NULL;
+
+ spin_lock(&mmg->lock);
+ rc = idr_alloc(&mmg->handles, buf, 1, 0, GFP_ATOMIC);
+ spin_unlock(&mmg->lock);
+ if (rc < 0) {
+ dev_err(mmg->dev,
+ "%s: Failed to allocate IDR for a new buffer, rc=%d\n",
+ behavior->topic, rc);
+ goto free_buf;
+ }
+
+ buf->mmg = mmg;
+ buf->behavior = behavior;
+ buf->handle = (((u64)rc | buf->behavior->mem_id) << PAGE_SHIFT);
+ kref_init(&buf->refcount);
+
+ rc = buf->behavior->alloc(buf, gfp, args);
+ if (rc) {
+ dev_err(mmg->dev, "%s: Failure in buffer alloc callback %d\n",
+ behavior->topic, rc);
+ goto remove_idr;
+ }
+
+ return buf;
+
+remove_idr:
+ spin_lock(&mmg->lock);
+ idr_remove(&mmg->handles, lower_32_bits(buf->handle >> PAGE_SHIFT));
+ spin_unlock(&mmg->lock);
+free_buf:
+ kfree(buf);
+ return NULL;
+}
+
+/**
+ * hl_mmap_mem_buf_vm_close - handle mmap close
+ *
+ * @vma: the vma object for which mmap was closed.
+ *
+ * Put the memory buffer if it is no longer mapped.
+ */
+static void hl_mmap_mem_buf_vm_close(struct vm_area_struct *vma)
+{
+ struct hl_mmap_mem_buf *buf =
+ (struct hl_mmap_mem_buf *)vma->vm_private_data;
+ long new_mmap_size;
+
+ new_mmap_size = buf->real_mapped_size - (vma->vm_end - vma->vm_start);
+
+ if (new_mmap_size > 0) {
+ buf->real_mapped_size = new_mmap_size;
+ return;
+ }
+
+ atomic_set(&buf->mmap, 0);
+ hl_mmap_mem_buf_put(buf);
+ vma->vm_private_data = NULL;
+}
+
+static const struct vm_operations_struct hl_mmap_mem_buf_vm_ops = {
+ .close = hl_mmap_mem_buf_vm_close
+};
+
+/**
+ * hl_mem_mgr_mmap - map the given buffer to the user
+ *
+ * @mmg: unifed memory manager
+ * @vma: the vma object for which mmap was closed.
+ * @args: additional args passed to behavior->mmap
+ *
+ * Map the buffer specified by the vma->vm_pgoff to the given vma.
+ */
+int hl_mem_mgr_mmap(struct hl_mem_mgr *mmg, struct vm_area_struct *vma,
+ void *args)
+{
+ struct hl_mmap_mem_buf *buf;
+ u64 user_mem_size;
+ u64 handle;
+ int rc;
+
+ /* We use the page offset to hold the idr and thus we need to clear
+ * it before doing the mmap itself
+ */
+ handle = vma->vm_pgoff << PAGE_SHIFT;
+ vma->vm_pgoff = 0;
+
+ /* Reference was taken here */
+ buf = hl_mmap_mem_buf_get(mmg, handle);
+ if (!buf) {
+ dev_err(mmg->dev,
+ "Memory mmap failed, no match to handle %#llx\n", handle);
+ return -EINVAL;
+ }
+
+ /* Validation check */
+ user_mem_size = vma->vm_end - vma->vm_start;
+ if (user_mem_size != ALIGN(buf->mappable_size, PAGE_SIZE)) {
+ dev_err(mmg->dev,
+ "%s: Memory mmap failed, mmap VM size 0x%llx != 0x%llx allocated physical mem size\n",
+ buf->behavior->topic, user_mem_size, buf->mappable_size);
+ rc = -EINVAL;
+ goto put_mem;
+ }
+
+#ifdef _HAS_TYPE_ARG_IN_ACCESS_OK
+ if (!access_ok(VERIFY_WRITE, (void __user *)(uintptr_t)vma->vm_start,
+ user_mem_size)) {
+#else
+ if (!access_ok((void __user *)(uintptr_t)vma->vm_start,
+ user_mem_size)) {
+#endif
+ dev_err(mmg->dev, "%s: User pointer is invalid - 0x%lx\n",
+ buf->behavior->topic, vma->vm_start);
+
+ rc = -EINVAL;
+ goto put_mem;
+ }
+
+ if (atomic_cmpxchg(&buf->mmap, 0, 1)) {
+ dev_err(mmg->dev,
+ "%s, Memory mmap failed, already mmaped to user\n",
+ buf->behavior->topic);
+ rc = -EINVAL;
+ goto put_mem;
+ }
+
+ vma->vm_ops = &hl_mmap_mem_buf_vm_ops;
+
+ /* Note: We're transferring the memory reference to vma->vm_private_data here. */
+
+ vma->vm_private_data = buf;
+
+ rc = buf->behavior->mmap(buf, vma, args);
+ if (rc) {
+ atomic_set(&buf->mmap, 0);
+ goto put_mem;
+ }
+
+ buf->real_mapped_size = buf->mappable_size;
+ vma->vm_pgoff = handle >> PAGE_SHIFT;
+
+ return 0;
+
+put_mem:
+ hl_mmap_mem_buf_put(buf);
+ return rc;
+}
+
+/**
+ * hl_mem_mgr_init - initialize unified memory manager
+ *
+ * @dev: owner device pointer
+ * @mmg: structure to initialize
+ *
+ * Initialize an instance of unified memory manager
+ */
+void hl_mem_mgr_init(struct device *dev, struct hl_mem_mgr *mmg)
+{
+ mmg->dev = dev;
+ spin_lock_init(&mmg->lock);
+ idr_init(&mmg->handles);
+}
+
+/**
+ * hl_mem_mgr_fini - release unified memory manager
+ *
+ * @mmg: parent unifed memory manager
+ *
+ * Release the unified memory manager. Shall be called from an interrupt context.
+ */
+void hl_mem_mgr_fini(struct hl_mem_mgr *mmg)
+{
+ struct hl_mmap_mem_buf *buf;
+ struct idr *idp;
+ const char *topic;
+ u32 id;
+
+ idp = &mmg->handles;
+
+ idr_for_each_entry(idp, buf, id) {
+ topic = buf->behavior->topic;
+ if (hl_mmap_mem_buf_put(buf) != 1)
+ dev_err(mmg->dev,
+ "%s: Buff handle %u for CTX is still alive\n",
+ topic, id);
+ }
+
+ /* TODO: can it happen that some buffer is still in use at this point? */
+
+ idr_destroy(&mmg->handles);
+}
diff --git a/drivers/misc/habanalabs/common/mmu/mmu.c b/drivers/misc/habanalabs/common/mmu/mmu.c
index 810b73421ce1..f3734718d94f 100644
--- a/drivers/misc/habanalabs/common/mmu/mmu.c
+++ b/drivers/misc/habanalabs/common/mmu/mmu.c
@@ -9,6 +9,20 @@
#include "../habanalabs.h"
+/**
+ * hl_mmu_get_funcs() - get MMU functions structure
+ * @hdev: habanalabs device structure.
+ * @pgt_residency: page table residency.
+ * @is_dram_addr: true if we need HMMU functions
+ *
+ * @return appropriate MMU functions structure
+ */
+static struct hl_mmu_funcs *hl_mmu_get_funcs(struct hl_device *hdev, int pgt_residency,
+ bool is_dram_addr)
+{
+ return &hdev->mmu_func[pgt_residency];
+}
+
bool hl_is_dram_va(struct hl_device *hdev, u64 virt_addr)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
@@ -122,6 +136,53 @@ void hl_mmu_ctx_fini(struct hl_ctx *ctx)
}
/*
+ * hl_mmu_get_real_page_size - get real page size to use in map/unmap operation
+ *
+ * @hdev: pointer to device data.
+ * @mmu_prop: MMU properties.
+ * @page_size: page size
+ * @real_page_size: set here the actual page size to use for the operation
+ * @is_dram_addr: true if DRAM address, otherwise false.
+ *
+ * @return 0 on success, otherwise non 0 error code
+ *
+ * note that this is general implementation that can fit most MMU arch. but as this is used as an
+ * MMU function:
+ * 1. it shall not be called directly- only from mmu_func structure instance
+ * 2. each MMU may modify the implementation internally
+ */
+int hl_mmu_get_real_page_size(struct hl_device *hdev, struct hl_mmu_properties *mmu_prop,
+ u32 page_size, u32 *real_page_size, bool is_dram_addr)
+{
+ /*
+ * The H/W handles mapping of specific page sizes. Hence if the page
+ * size is bigger, we break it to sub-pages and map them separately.
+ */
+ if ((page_size % mmu_prop->page_size) == 0) {
+ *real_page_size = mmu_prop->page_size;
+ return 0;
+ }
+
+ dev_err(hdev->dev, "page size of %u is not %uKB aligned, can't map\n",
+ page_size, mmu_prop->page_size >> 10);
+
+ return -EFAULT;
+}
+
+static struct hl_mmu_properties *hl_mmu_get_prop(struct hl_device *hdev, u32 page_size,
+ bool is_dram_addr)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+
+ if (is_dram_addr)
+ return &prop->dmmu;
+ else if ((page_size % prop->pmmu_huge.page_size) == 0)
+ return &prop->pmmu_huge;
+
+ return &prop->pmmu;
+}
+
+/*
* hl_mmu_unmap_page - unmaps a virtual addr
*
* @ctx: pointer to the context structure
@@ -142,60 +203,35 @@ void hl_mmu_ctx_fini(struct hl_ctx *ctx)
* For optimization reasons PCI flush may be requested once after unmapping of
* large area.
*/
-int hl_mmu_unmap_page(struct hl_ctx *ctx, u64 virt_addr, u32 page_size,
- bool flush_pte)
+int hl_mmu_unmap_page(struct hl_ctx *ctx, u64 virt_addr, u32 page_size, bool flush_pte)
{
struct hl_device *hdev = ctx->hdev;
- struct asic_fixed_properties *prop = &hdev->asic_prop;
struct hl_mmu_properties *mmu_prop;
- u64 real_virt_addr;
+ struct hl_mmu_funcs *mmu_funcs;
+ int i, pgt_residency, rc = 0;
u32 real_page_size, npages;
- int i, rc = 0, pgt_residency;
+ u64 real_virt_addr;
bool is_dram_addr;
if (!hdev->mmu_enable)
return 0;
is_dram_addr = hl_is_dram_va(hdev, virt_addr);
-
- if (is_dram_addr)
- mmu_prop = &prop->dmmu;
- else if ((page_size % prop->pmmu_huge.page_size) == 0)
- mmu_prop = &prop->pmmu_huge;
- else
- mmu_prop = &prop->pmmu;
+ mmu_prop = hl_mmu_get_prop(hdev, page_size, is_dram_addr);
pgt_residency = mmu_prop->host_resident ? MMU_HR_PGT : MMU_DR_PGT;
- /*
- * The H/W handles mapping of specific page sizes. Hence if the page
- * size is bigger, we break it to sub-pages and unmap them separately.
- */
- if ((page_size % mmu_prop->page_size) == 0) {
- real_page_size = mmu_prop->page_size;
- } else {
- /*
- * MMU page size may differ from DRAM page size.
- * In such case work with the DRAM page size and let the MMU
- * scrambling routine to handle this mismatch when
- * calculating the address to remove from the MMU page table
- */
- if (is_dram_addr && ((page_size % prop->dram_page_size) == 0)) {
- real_page_size = prop->dram_page_size;
- } else {
- dev_err(hdev->dev,
- "page size of %u is not %uKB aligned, can't unmap\n",
- page_size, mmu_prop->page_size >> 10);
+ mmu_funcs = hl_mmu_get_funcs(hdev, pgt_residency, is_dram_addr);
- return -EFAULT;
- }
- }
+ rc = hdev->asic_funcs->mmu_get_real_page_size(hdev, mmu_prop, page_size, &real_page_size,
+ is_dram_addr);
+ if (rc)
+ return rc;
npages = page_size / real_page_size;
real_virt_addr = virt_addr;
for (i = 0 ; i < npages ; i++) {
- rc = hdev->mmu_func[pgt_residency].unmap(ctx,
- real_virt_addr, is_dram_addr);
+ rc = mmu_funcs->unmap(ctx, real_virt_addr, is_dram_addr);
if (rc)
break;
@@ -203,7 +239,7 @@ int hl_mmu_unmap_page(struct hl_ctx *ctx, u64 virt_addr, u32 page_size,
}
if (flush_pte)
- hdev->mmu_func[pgt_residency].flush(ctx);
+ mmu_funcs->flush(ctx);
return rc;
}
@@ -230,15 +266,15 @@ int hl_mmu_unmap_page(struct hl_ctx *ctx, u64 virt_addr, u32 page_size,
* For optimization reasons PCI flush may be requested once after mapping of
* large area.
*/
-int hl_mmu_map_page(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
- u32 page_size, bool flush_pte)
+int hl_mmu_map_page(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size,
+ bool flush_pte)
{
+ int i, rc, pgt_residency, mapped_cnt = 0;
struct hl_device *hdev = ctx->hdev;
- struct asic_fixed_properties *prop = &hdev->asic_prop;
struct hl_mmu_properties *mmu_prop;
u64 real_virt_addr, real_phys_addr;
+ struct hl_mmu_funcs *mmu_funcs;
u32 real_page_size, npages;
- int i, rc, pgt_residency, mapped_cnt = 0;
bool is_dram_addr;
@@ -246,40 +282,15 @@ int hl_mmu_map_page(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
return 0;
is_dram_addr = hl_is_dram_va(hdev, virt_addr);
-
- if (is_dram_addr)
- mmu_prop = &prop->dmmu;
- else if ((page_size % prop->pmmu_huge.page_size) == 0)
- mmu_prop = &prop->pmmu_huge;
- else
- mmu_prop = &prop->pmmu;
+ mmu_prop = hl_mmu_get_prop(hdev, page_size, is_dram_addr);
pgt_residency = mmu_prop->host_resident ? MMU_HR_PGT : MMU_DR_PGT;
+ mmu_funcs = hl_mmu_get_funcs(hdev, pgt_residency, is_dram_addr);
- /*
- * The H/W handles mapping of specific page sizes. Hence if the page
- * size is bigger, we break it to sub-pages and map them separately.
- */
- if ((page_size % mmu_prop->page_size) == 0) {
- real_page_size = mmu_prop->page_size;
- } else if (is_dram_addr && ((page_size % prop->dram_page_size) == 0) &&
- (prop->dram_page_size < mmu_prop->page_size)) {
- /*
- * MMU page size may differ from DRAM page size.
- * In such case work with the DRAM page size and let the MMU
- * scrambling routine handle this mismatch when calculating
- * the address to place in the MMU page table. (in that case
- * also make sure that the dram_page_size smaller than the
- * mmu page size)
- */
- real_page_size = prop->dram_page_size;
- } else {
- dev_err(hdev->dev,
- "page size of %u is not %uKB aligned, can't map\n",
- page_size, mmu_prop->page_size >> 10);
-
- return -EFAULT;
- }
+ rc = hdev->asic_funcs->mmu_get_real_page_size(hdev, mmu_prop, page_size, &real_page_size,
+ is_dram_addr);
+ if (rc)
+ return rc;
/*
* Verify that the phys and virt addresses are aligned with the
@@ -302,9 +313,8 @@ int hl_mmu_map_page(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
real_phys_addr = phys_addr;
for (i = 0 ; i < npages ; i++) {
- rc = hdev->mmu_func[pgt_residency].map(ctx,
- real_virt_addr, real_phys_addr,
- real_page_size, is_dram_addr);
+ rc = mmu_funcs->map(ctx, real_virt_addr, real_phys_addr, real_page_size,
+ is_dram_addr);
if (rc)
goto err;
@@ -314,22 +324,21 @@ int hl_mmu_map_page(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
}
if (flush_pte)
- hdev->mmu_func[pgt_residency].flush(ctx);
+ mmu_funcs->flush(ctx);
return 0;
err:
real_virt_addr = virt_addr;
for (i = 0 ; i < mapped_cnt ; i++) {
- if (hdev->mmu_func[pgt_residency].unmap(ctx,
- real_virt_addr, is_dram_addr))
+ if (mmu_funcs->unmap(ctx, real_virt_addr, is_dram_addr))
dev_warn_ratelimited(hdev->dev,
"failed to unmap va: 0x%llx\n", real_virt_addr);
real_virt_addr += real_page_size;
}
- hdev->mmu_func[pgt_residency].flush(ctx);
+ mmu_funcs->flush(ctx);
return rc;
}
@@ -480,11 +489,9 @@ static void hl_mmu_pa_page_with_offset(struct hl_ctx *ctx, u64 virt_addr,
struct hl_mmu_hop_info *hops,
u64 *phys_addr)
{
- struct hl_device *hdev = ctx->hdev;
- struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct asic_fixed_properties *prop = &ctx->hdev->asic_prop;
u64 offset_mask, addr_mask, hop_shift, tmp_phys_addr;
- u32 hop0_shift_off;
- void *p;
+ struct hl_mmu_properties *mmu_prop;
/* last hop holds the phys address and flags */
if (hops->unscrambled_paddr)
@@ -493,11 +500,11 @@ static void hl_mmu_pa_page_with_offset(struct hl_ctx *ctx, u64 virt_addr,
tmp_phys_addr = hops->hop_info[hops->used_hops - 1].hop_pte_val;
if (hops->range_type == HL_VA_RANGE_TYPE_HOST_HUGE)
- p = &prop->pmmu_huge;
+ mmu_prop = &prop->pmmu_huge;
else if (hops->range_type == HL_VA_RANGE_TYPE_HOST)
- p = &prop->pmmu;
+ mmu_prop = &prop->pmmu;
else /* HL_VA_RANGE_TYPE_DRAM */
- p = &prop->dmmu;
+ mmu_prop = &prop->dmmu;
if ((hops->range_type == HL_VA_RANGE_TYPE_DRAM) &&
!is_power_of_2(prop->dram_page_size)) {
@@ -508,7 +515,7 @@ static void hl_mmu_pa_page_with_offset(struct hl_ctx *ctx, u64 virt_addr,
/*
* Bit arithmetics cannot be used for non power of two page
* sizes. In addition, since bit arithmetics is not used,
- * we cannot ignore dram base. All that shall be considerd.
+ * we cannot ignore dram base. All that shall be considered.
*/
dram_page_size = prop->dram_page_size;
@@ -526,10 +533,7 @@ static void hl_mmu_pa_page_with_offset(struct hl_ctx *ctx, u64 virt_addr,
* structure in order to determine the right masks
* for the page offset.
*/
- hop0_shift_off = offsetof(struct hl_mmu_properties, hop0_shift);
- p = (char *)p + hop0_shift_off;
- p = (char *)p + ((hops->used_hops - 1) * sizeof(u64));
- hop_shift = *(u64 *)p;
+ hop_shift = mmu_prop->hop_shifts[hops->used_hops - 1];
offset_mask = (1ull << hop_shift) - 1;
addr_mask = ~(offset_mask);
*phys_addr = (tmp_phys_addr & addr_mask) |
@@ -557,40 +561,39 @@ int hl_mmu_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr,
struct hl_mmu_hop_info *hops)
{
struct hl_device *hdev = ctx->hdev;
- struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct asic_fixed_properties *prop;
struct hl_mmu_properties *mmu_prop;
- int rc;
+ struct hl_mmu_funcs *mmu_funcs;
+ int pgt_residency, rc;
bool is_dram_addr;
if (!hdev->mmu_enable)
return -EOPNOTSUPP;
+ prop = &hdev->asic_prop;
hops->scrambled_vaddr = virt_addr; /* assume no scrambling */
is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size,
- prop->dmmu.start_addr,
- prop->dmmu.end_addr);
+ prop->dmmu.start_addr,
+ prop->dmmu.end_addr);
- /* host-residency is the same in PMMU and HPMMU, use one of them */
+ /* host-residency is the same in PMMU and PMMU huge, no need to distinguish here */
mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu;
+ pgt_residency = mmu_prop->host_resident ? MMU_HR_PGT : MMU_DR_PGT;
+ mmu_funcs = hl_mmu_get_funcs(hdev, pgt_residency, is_dram_addr);
mutex_lock(&ctx->mmu_lock);
-
- if (mmu_prop->host_resident)
- rc = hdev->mmu_func[MMU_HR_PGT].get_tlb_info(ctx,
- virt_addr, hops);
- else
- rc = hdev->mmu_func[MMU_DR_PGT].get_tlb_info(ctx,
- virt_addr, hops);
-
+ rc = mmu_funcs->get_tlb_info(ctx, virt_addr, hops);
mutex_unlock(&ctx->mmu_lock);
+ if (rc)
+ return rc;
+
/* add page offset to physical address */
if (hops->unscrambled_paddr)
- hl_mmu_pa_page_with_offset(ctx, virt_addr, hops,
- &hops->unscrambled_paddr);
+ hl_mmu_pa_page_with_offset(ctx, virt_addr, hops, &hops->unscrambled_paddr);
- return rc;
+ return 0;
}
int hl_mmu_if_set_funcs(struct hl_device *hdev)
@@ -662,6 +665,55 @@ int hl_mmu_invalidate_cache_range(struct hl_device *hdev, bool is_hard,
return rc;
}
+static void hl_mmu_prefetch_work_function(struct work_struct *work)
+{
+ struct hl_prefetch_work *pfw = container_of(work, struct hl_prefetch_work, pf_work);
+ struct hl_ctx *ctx = pfw->ctx;
+
+ if (!hl_device_operational(ctx->hdev, NULL))
+ goto put_ctx;
+
+ mutex_lock(&ctx->mmu_lock);
+
+ ctx->hdev->asic_funcs->mmu_prefetch_cache_range(ctx, pfw->flags, pfw->asid,
+ pfw->va, pfw->size);
+
+ mutex_unlock(&ctx->mmu_lock);
+
+put_ctx:
+ /*
+ * context was taken in the common mmu prefetch function- see comment there about
+ * context handling.
+ */
+ hl_ctx_put(ctx);
+ kfree(pfw);
+}
+
+int hl_mmu_prefetch_cache_range(struct hl_ctx *ctx, u32 flags, u32 asid, u64 va, u64 size)
+{
+ struct hl_prefetch_work *handle_pf_work;
+
+ handle_pf_work = kmalloc(sizeof(*handle_pf_work), GFP_KERNEL);
+ if (!handle_pf_work)
+ return -ENOMEM;
+
+ INIT_WORK(&handle_pf_work->pf_work, hl_mmu_prefetch_work_function);
+ handle_pf_work->ctx = ctx;
+ handle_pf_work->va = va;
+ handle_pf_work->size = size;
+ handle_pf_work->flags = flags;
+ handle_pf_work->asid = asid;
+
+ /*
+ * as actual prefetch is done in a WQ we must get the context (and put it
+ * at the end of the work function)
+ */
+ hl_ctx_get(ctx);
+ queue_work(ctx->hdev->pf_wq, &handle_pf_work->pf_work);
+
+ return 0;
+}
+
u64 hl_mmu_get_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte)
{
return (curr_pte & PAGE_PRESENT_MASK) ? (curr_pte & HOP_PHYS_ADDR_MASK) : ULLONG_MAX;
@@ -670,6 +722,7 @@ u64 hl_mmu_get_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte)
/**
* hl_mmu_get_hop_pte_phys_addr() - extract PTE address from HOP
* @ctx: pointer to the context structure to initialize.
+ * @mmu_prop: MMU properties.
* @hop_idx: HOP index.
* @hop_addr: HOP address.
* @virt_addr: virtual address fro the translation.
@@ -686,33 +739,8 @@ u64 hl_mmu_get_hop_pte_phys_addr(struct hl_ctx *ctx, struct hl_mmu_properties *m
return U64_MAX;
}
- /* currently max number of HOPs is 6 */
- switch (hop_idx) {
- case 0:
- mask = mmu_prop->hop0_mask;
- shift = mmu_prop->hop0_shift;
- break;
- case 1:
- mask = mmu_prop->hop1_mask;
- shift = mmu_prop->hop1_shift;
- break;
- case 2:
- mask = mmu_prop->hop2_mask;
- shift = mmu_prop->hop2_shift;
- break;
- case 3:
- mask = mmu_prop->hop3_mask;
- shift = mmu_prop->hop3_shift;
- break;
- case 4:
- mask = mmu_prop->hop4_mask;
- shift = mmu_prop->hop4_shift;
- break;
- default:
- mask = mmu_prop->hop5_mask;
- shift = mmu_prop->hop5_shift;
- break;
- }
+ shift = mmu_prop->hop_shifts[hop_idx];
+ mask = mmu_prop->hop_masks[hop_idx];
return hop_addr + ctx->hdev->asic_prop.mmu_pte_size * ((virt_addr & mask) >> shift);
}
diff --git a/drivers/misc/habanalabs/common/mmu/mmu_v1.c b/drivers/misc/habanalabs/common/mmu/mmu_v1.c
index d03786d0c407..e2d91a69acc2 100644
--- a/drivers/misc/habanalabs/common/mmu/mmu_v1.c
+++ b/drivers/misc/habanalabs/common/mmu/mmu_v1.c
@@ -10,6 +10,8 @@
#include <linux/slab.h>
+#define MMU_V1_MAX_HOPS (MMU_HOP4 + 1)
+
static inline u64 get_phys_addr(struct hl_ctx *ctx, u64 shadow_addr);
static struct pgt_info *get_pgt_info(struct hl_ctx *ctx, u64 hop_addr)
@@ -170,51 +172,15 @@ static inline int put_pte(struct hl_ctx *ctx, u64 hop_addr)
return num_of_ptes_left;
}
-static inline u64 get_hopN_pte_addr(struct hl_ctx *ctx, u64 hop_addr,
- u64 virt_addr, u64 mask, u64 shift)
-{
- return hop_addr + ctx->hdev->asic_prop.mmu_pte_size *
- ((virt_addr & mask) >> shift);
-}
-
-static inline u64 get_hop0_pte_addr(struct hl_ctx *ctx,
- struct hl_mmu_properties *mmu_prop,
- u64 hop_addr, u64 vaddr)
-{
- return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop0_mask,
- mmu_prop->hop0_shift);
-}
-
-static inline u64 get_hop1_pte_addr(struct hl_ctx *ctx,
- struct hl_mmu_properties *mmu_prop,
- u64 hop_addr, u64 vaddr)
-{
- return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop1_mask,
- mmu_prop->hop1_shift);
-}
-
-static inline u64 get_hop2_pte_addr(struct hl_ctx *ctx,
- struct hl_mmu_properties *mmu_prop,
- u64 hop_addr, u64 vaddr)
+static inline u64 get_hop_pte_addr(struct hl_ctx *ctx, struct hl_mmu_properties *mmu_prop,
+ u64 *hop_addr_arr, u64 virt_addr, enum mmu_hop_num hop_idx)
{
- return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop2_mask,
- mmu_prop->hop2_shift);
-}
+ u64 mask, shift;
-static inline u64 get_hop3_pte_addr(struct hl_ctx *ctx,
- struct hl_mmu_properties *mmu_prop,
- u64 hop_addr, u64 vaddr)
-{
- return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop3_mask,
- mmu_prop->hop3_shift);
-}
-
-static inline u64 get_hop4_pte_addr(struct hl_ctx *ctx,
- struct hl_mmu_properties *mmu_prop,
- u64 hop_addr, u64 vaddr)
-{
- return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop4_mask,
- mmu_prop->hop4_shift);
+ mask = mmu_prop->hop_masks[hop_idx];
+ shift = mmu_prop->hop_shifts[hop_idx];
+ return hop_addr_arr[hop_idx] +
+ ctx->hdev->asic_prop.mmu_pte_size * ((virt_addr & mask) >> shift);
}
static inline u64 get_alloc_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte,
@@ -516,74 +482,50 @@ static void hl_mmu_v1_ctx_fini(struct hl_ctx *ctx)
}
}
-static int _hl_mmu_v1_unmap(struct hl_ctx *ctx,
+static int hl_mmu_v1_unmap(struct hl_ctx *ctx,
u64 virt_addr, bool is_dram_addr)
{
+ u64 hop_addr[MMU_V1_MAX_HOPS] = {0}, hop_pte_addr[MMU_V1_MAX_HOPS] = {0}, curr_pte = 0;
struct hl_device *hdev = ctx->hdev;
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct hl_mmu_properties *mmu_prop;
- u64 hop0_addr = 0, hop0_pte_addr = 0,
- hop1_addr = 0, hop1_pte_addr = 0,
- hop2_addr = 0, hop2_pte_addr = 0,
- hop3_addr = 0, hop3_pte_addr = 0,
- hop4_addr = 0, hop4_pte_addr = 0,
- curr_pte;
bool is_huge, clear_hop3 = true;
+ int hop_idx;
/* shifts and masks are the same in PMMU and HPMMU, use one of them */
mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu;
- hop0_addr = get_hop0_addr(ctx);
- hop0_pte_addr = get_hop0_pte_addr(ctx, mmu_prop, hop0_addr, virt_addr);
-
- curr_pte = *(u64 *) (uintptr_t) hop0_pte_addr;
-
- hop1_addr = hl_mmu_get_next_hop_addr(ctx, curr_pte);
-
- if (hop1_addr == ULLONG_MAX)
- goto not_mapped;
-
- hop1_pte_addr = get_hop1_pte_addr(ctx, mmu_prop, hop1_addr, virt_addr);
-
- curr_pte = *(u64 *) (uintptr_t) hop1_pte_addr;
-
- hop2_addr = hl_mmu_get_next_hop_addr(ctx, curr_pte);
-
- if (hop2_addr == ULLONG_MAX)
- goto not_mapped;
-
- hop2_pte_addr = get_hop2_pte_addr(ctx, mmu_prop, hop2_addr, virt_addr);
-
- curr_pte = *(u64 *) (uintptr_t) hop2_pte_addr;
-
- hop3_addr = hl_mmu_get_next_hop_addr(ctx, curr_pte);
-
- if (hop3_addr == ULLONG_MAX)
- goto not_mapped;
+ for (hop_idx = MMU_HOP0; hop_idx < MMU_HOP4; hop_idx++) {
+ if (hop_idx == MMU_HOP0) {
+ hop_addr[hop_idx] = get_hop0_addr(ctx);
+ } else {
+ hop_addr[hop_idx] = hl_mmu_get_next_hop_addr(ctx, curr_pte);
+ if (hop_addr[hop_idx] == ULLONG_MAX)
+ goto not_mapped;
+ }
- hop3_pte_addr = get_hop3_pte_addr(ctx, mmu_prop, hop3_addr, virt_addr);
+ hop_pte_addr[hop_idx] =
+ get_hop_pte_addr(ctx, mmu_prop, hop_addr, virt_addr, hop_idx);
- curr_pte = *(u64 *) (uintptr_t) hop3_pte_addr;
+ curr_pte = *(u64 *) (uintptr_t) hop_pte_addr[hop_idx];
+ }
is_huge = curr_pte & mmu_prop->last_mask;
if (is_dram_addr && !is_huge) {
- dev_err(hdev->dev,
- "DRAM unmapping should use huge pages only\n");
+ dev_err(hdev->dev, "DRAM unmapping should use huge pages only\n");
return -EFAULT;
}
if (!is_huge) {
- hop4_addr = hl_mmu_get_next_hop_addr(ctx, curr_pte);
-
- if (hop4_addr == ULLONG_MAX)
+ hop_idx = MMU_HOP4;
+ hop_addr[hop_idx] = hl_mmu_get_next_hop_addr(ctx, curr_pte);
+ if (hop_addr[hop_idx] == ULLONG_MAX)
goto not_mapped;
- hop4_pte_addr = get_hop4_pte_addr(ctx, mmu_prop, hop4_addr,
- virt_addr);
-
- curr_pte = *(u64 *) (uintptr_t) hop4_pte_addr;
-
+ hop_pte_addr[hop_idx] =
+ get_hop_pte_addr(ctx, mmu_prop, hop_addr, virt_addr, hop_idx);
+ curr_pte = *(u64 *) (uintptr_t) hop_pte_addr[hop_idx];
clear_hop3 = false;
}
@@ -605,39 +547,33 @@ static int _hl_mmu_v1_unmap(struct hl_ctx *ctx,
goto not_mapped;
}
- write_final_pte(ctx, hop3_pte_addr, default_pte);
- put_pte(ctx, hop3_addr);
+ hop_idx = MMU_HOP3;
+ write_final_pte(ctx, hop_pte_addr[hop_idx], default_pte);
+ put_pte(ctx, hop_addr[hop_idx]);
} else {
if (!(curr_pte & PAGE_PRESENT_MASK))
goto not_mapped;
- if (hop4_addr)
- clear_pte(ctx, hop4_pte_addr);
+ if (hop_addr[MMU_HOP4])
+ clear_pte(ctx, hop_pte_addr[MMU_HOP4]);
else
- clear_pte(ctx, hop3_pte_addr);
+ clear_pte(ctx, hop_pte_addr[MMU_HOP3]);
- if (hop4_addr && !put_pte(ctx, hop4_addr))
+ if (hop_addr[MMU_HOP4] && !put_pte(ctx, hop_addr[MMU_HOP4]))
clear_hop3 = true;
if (!clear_hop3)
goto mapped;
- clear_pte(ctx, hop3_pte_addr);
+ for (hop_idx = MMU_HOP3; hop_idx >= 0; hop_idx--) {
+ clear_pte(ctx, hop_pte_addr[hop_idx]);
- if (put_pte(ctx, hop3_addr))
- goto mapped;
+ if (hop_idx == MMU_HOP0)
+ break;
- clear_pte(ctx, hop2_pte_addr);
-
- if (put_pte(ctx, hop2_addr))
- goto mapped;
-
- clear_pte(ctx, hop1_pte_addr);
-
- if (put_pte(ctx, hop1_addr))
- goto mapped;
-
- clear_pte(ctx, hop0_pte_addr);
+ if (put_pte(ctx, hop_addr[hop_idx]))
+ goto mapped;
+ }
}
mapped:
@@ -650,21 +586,15 @@ not_mapped:
return -EINVAL;
}
-static int _hl_mmu_v1_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
+static int hl_mmu_v1_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
u32 page_size, bool is_dram_addr)
{
+ u64 hop_addr[MMU_V1_MAX_HOPS] = {0}, hop_pte_addr[MMU_V1_MAX_HOPS] = {0}, curr_pte = 0;
struct hl_device *hdev = ctx->hdev;
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct hl_mmu_properties *mmu_prop;
- u64 hop0_addr = 0, hop0_pte_addr = 0,
- hop1_addr = 0, hop1_pte_addr = 0,
- hop2_addr = 0, hop2_pte_addr = 0,
- hop3_addr = 0, hop3_pte_addr = 0,
- hop4_addr = 0, hop4_pte_addr = 0,
- curr_pte = 0;
- bool hop1_new = false, hop2_new = false, hop3_new = false,
- hop4_new = false, is_huge;
- int rc = -ENOMEM;
+ bool is_huge, hop_new[MMU_V1_MAX_HOPS] = {false};
+ int num_hops, hop_idx, prev_hop, rc = -ENOMEM;
/*
* This mapping function can map a page or a huge page. For huge page
@@ -684,39 +614,21 @@ static int _hl_mmu_v1_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
is_huge = false;
}
- hop0_addr = get_hop0_addr(ctx);
- hop0_pte_addr = get_hop0_pte_addr(ctx, mmu_prop, hop0_addr, virt_addr);
- curr_pte = *(u64 *) (uintptr_t) hop0_pte_addr;
-
- hop1_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop1_new);
- if (hop1_addr == ULLONG_MAX)
- goto err;
-
- hop1_pte_addr = get_hop1_pte_addr(ctx, mmu_prop, hop1_addr, virt_addr);
- curr_pte = *(u64 *) (uintptr_t) hop1_pte_addr;
-
- hop2_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop2_new);
- if (hop2_addr == ULLONG_MAX)
- goto err;
-
- hop2_pte_addr = get_hop2_pte_addr(ctx, mmu_prop, hop2_addr, virt_addr);
- curr_pte = *(u64 *) (uintptr_t) hop2_pte_addr;
+ num_hops = is_huge ? (MMU_V1_MAX_HOPS - 1) : MMU_V1_MAX_HOPS;
- hop3_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop3_new);
- if (hop3_addr == ULLONG_MAX)
- goto err;
-
- hop3_pte_addr = get_hop3_pte_addr(ctx, mmu_prop, hop3_addr, virt_addr);
- curr_pte = *(u64 *) (uintptr_t) hop3_pte_addr;
-
- if (!is_huge) {
- hop4_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop4_new);
- if (hop4_addr == ULLONG_MAX)
- goto err;
+ for (hop_idx = MMU_HOP0; hop_idx < num_hops; hop_idx++) {
+ if (hop_idx == MMU_HOP0) {
+ hop_addr[hop_idx] = get_hop0_addr(ctx);
+ } else {
+ hop_addr[hop_idx] =
+ get_alloc_next_hop_addr(ctx, curr_pte, &hop_new[hop_idx]);
+ if (hop_addr[hop_idx] == ULLONG_MAX)
+ goto err;
+ }
- hop4_pte_addr = get_hop4_pte_addr(ctx, mmu_prop, hop4_addr,
- virt_addr);
- curr_pte = *(u64 *) (uintptr_t) hop4_pte_addr;
+ hop_pte_addr[hop_idx] =
+ get_hop_pte_addr(ctx, mmu_prop, hop_addr, virt_addr, hop_idx);
+ curr_pte = *(u64 *) (uintptr_t) hop_pte_addr[hop_idx];
}
if (hdev->dram_default_page_mapping && is_dram_addr) {
@@ -732,30 +644,22 @@ static int _hl_mmu_v1_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
goto err;
}
- if (hop1_new || hop2_new || hop3_new || hop4_new) {
- dev_err(hdev->dev,
- "DRAM mapping should not allocate more hops\n");
- rc = -EFAULT;
- goto err;
+ for (hop_idx = MMU_HOP1; hop_idx < num_hops; hop_idx++) {
+ if (hop_new[hop_idx]) {
+ dev_err(hdev->dev, "DRAM mapping should not allocate more hops\n");
+ rc = -EFAULT;
+ goto err;
+ }
}
} else if (curr_pte & PAGE_PRESENT_MASK) {
dev_err(hdev->dev,
"mapping already exists for virt_addr 0x%llx\n",
virt_addr);
- dev_dbg(hdev->dev, "hop0 pte: 0x%llx (0x%llx)\n",
- *(u64 *) (uintptr_t) hop0_pte_addr, hop0_pte_addr);
- dev_dbg(hdev->dev, "hop1 pte: 0x%llx (0x%llx)\n",
- *(u64 *) (uintptr_t) hop1_pte_addr, hop1_pte_addr);
- dev_dbg(hdev->dev, "hop2 pte: 0x%llx (0x%llx)\n",
- *(u64 *) (uintptr_t) hop2_pte_addr, hop2_pte_addr);
- dev_dbg(hdev->dev, "hop3 pte: 0x%llx (0x%llx)\n",
- *(u64 *) (uintptr_t) hop3_pte_addr, hop3_pte_addr);
-
- if (!is_huge)
- dev_dbg(hdev->dev, "hop4 pte: 0x%llx (0x%llx)\n",
- *(u64 *) (uintptr_t) hop4_pte_addr,
- hop4_pte_addr);
+ for (hop_idx = MMU_HOP0; hop_idx < num_hops; hop_idx++)
+ dev_dbg(hdev->dev, "hop%d pte: 0x%llx (0x%llx)\n", hop_idx,
+ *(u64 *) (uintptr_t) hop_pte_addr[hop_idx],
+ hop_pte_addr[hop_idx]);
rc = -EINVAL;
goto err;
@@ -764,53 +668,28 @@ static int _hl_mmu_v1_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
curr_pte = (phys_addr & HOP_PHYS_ADDR_MASK) | mmu_prop->last_mask
| PAGE_PRESENT_MASK;
- if (is_huge)
- write_final_pte(ctx, hop3_pte_addr, curr_pte);
- else
- write_final_pte(ctx, hop4_pte_addr, curr_pte);
+ write_final_pte(ctx, hop_pte_addr[num_hops - 1], curr_pte);
- if (hop1_new) {
- curr_pte =
- (hop1_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
- write_pte(ctx, hop0_pte_addr, curr_pte);
- }
- if (hop2_new) {
- curr_pte =
- (hop2_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
- write_pte(ctx, hop1_pte_addr, curr_pte);
- get_pte(ctx, hop1_addr);
- }
- if (hop3_new) {
- curr_pte =
- (hop3_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
- write_pte(ctx, hop2_pte_addr, curr_pte);
- get_pte(ctx, hop2_addr);
- }
+ for (hop_idx = MMU_HOP1; hop_idx < num_hops; hop_idx++) {
+ prev_hop = hop_idx - 1;
- if (!is_huge) {
- if (hop4_new) {
- curr_pte = (hop4_addr & HOP_PHYS_ADDR_MASK) |
- PAGE_PRESENT_MASK;
- write_pte(ctx, hop3_pte_addr, curr_pte);
- get_pte(ctx, hop3_addr);
+ if (hop_new[hop_idx]) {
+ curr_pte = (hop_addr[hop_idx] & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
+ write_pte(ctx, hop_pte_addr[prev_hop], curr_pte);
+ if (hop_idx != MMU_HOP1)
+ get_pte(ctx, hop_addr[prev_hop]);
}
-
- get_pte(ctx, hop4_addr);
- } else {
- get_pte(ctx, hop3_addr);
}
+ get_pte(ctx, hop_addr[num_hops - 1]);
+
return 0;
err:
- if (hop4_new)
- free_hop(ctx, hop4_addr);
- if (hop3_new)
- free_hop(ctx, hop3_addr);
- if (hop2_new)
- free_hop(ctx, hop2_addr);
- if (hop1_new)
- free_hop(ctx, hop1_addr);
+ for (hop_idx = num_hops; hop_idx > MMU_HOP0; hop_idx--) {
+ if (hop_new[hop_idx])
+ free_hop(ctx, hop_addr[hop_idx]);
+ }
return rc;
}
@@ -928,8 +807,8 @@ void hl_mmu_v1_set_funcs(struct hl_device *hdev, struct hl_mmu_funcs *mmu)
mmu->fini = hl_mmu_v1_fini;
mmu->ctx_init = hl_mmu_v1_ctx_init;
mmu->ctx_fini = hl_mmu_v1_ctx_fini;
- mmu->map = _hl_mmu_v1_map;
- mmu->unmap = _hl_mmu_v1_unmap;
+ mmu->map = hl_mmu_v1_map;
+ mmu->unmap = hl_mmu_v1_unmap;
mmu->flush = flush;
mmu->swap_out = hl_mmu_v1_swap_out;
mmu->swap_in = hl_mmu_v1_swap_in;
diff --git a/drivers/misc/habanalabs/common/pci/pci.c b/drivers/misc/habanalabs/common/pci/pci.c
index bb9ce22bafc4..610acd4a8057 100644
--- a/drivers/misc/habanalabs/common/pci/pci.c
+++ b/drivers/misc/habanalabs/common/pci/pci.c
@@ -392,6 +392,7 @@ enum pci_region hl_get_pci_memory_region(struct hl_device *hdev, u64 addr)
*/
int hl_pci_init(struct hl_device *hdev)
{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
struct pci_dev *pdev = hdev->pdev;
int rc;
@@ -419,17 +420,14 @@ int hl_pci_init(struct hl_device *hdev)
}
/* Driver must sleep in order for FW to finish the iATU configuration */
- if (hdev->asic_prop.iatu_done_by_fw) {
+ if (hdev->asic_prop.iatu_done_by_fw)
usleep_range(2000, 3000);
- hdev->asic_funcs->set_dma_mask_from_fw(hdev);
- }
- rc = dma_set_mask_and_coherent(&pdev->dev,
- DMA_BIT_MASK(hdev->dma_mask));
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(prop->dma_mask));
if (rc) {
dev_err(hdev->dev,
"Failed to set dma mask to %d bits, error %d\n",
- hdev->dma_mask, rc);
+ prop->dma_mask, rc);
goto unmap_pci_bars;
}
diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c
index 21c2b678ff72..fba322241096 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi.c
@@ -95,7 +95,7 @@
#define GAUDI_NUM_OF_QM_ARB_ERR_CAUSE 3
-#define GAUDI_ARB_WDT_TIMEOUT 0x1000000
+#define GAUDI_ARB_WDT_TIMEOUT 0xEE6b27FF /* 8 seconds */
#define GAUDI_CLK_GATE_DEBUGFS_MASK (\
BIT(GAUDI_ENGINE_ID_MME_0) |\
@@ -557,6 +557,8 @@ static int gaudi_set_fixed_properties(struct hl_device *hdev)
}
prop->device_dma_offset_for_host_access = HOST_PHYS_BASE;
+ prop->host_base_address = HOST_PHYS_BASE;
+ prop->host_end_address = prop->host_base_address + HOST_PHYS_SIZE;
prop->completion_queues_count = NUMBER_OF_CMPLT_QUEUES;
prop->collective_first_sob = 0;
prop->collective_first_mon = 0;
@@ -595,18 +597,19 @@ static int gaudi_set_fixed_properties(struct hl_device *hdev)
prop->mmu_hop_table_size = HOP_TABLE_SIZE_512_PTE;
prop->mmu_hop0_tables_total_size = HOP0_512_PTE_TABLES_TOTAL_SIZE;
prop->dram_page_size = PAGE_SIZE_2MB;
+ prop->device_mem_alloc_default_page_size = prop->dram_page_size;
prop->dram_supports_virtual_memory = false;
- prop->pmmu.hop0_shift = MMU_V1_1_HOP0_SHIFT;
- prop->pmmu.hop1_shift = MMU_V1_1_HOP1_SHIFT;
- prop->pmmu.hop2_shift = MMU_V1_1_HOP2_SHIFT;
- prop->pmmu.hop3_shift = MMU_V1_1_HOP3_SHIFT;
- prop->pmmu.hop4_shift = MMU_V1_1_HOP4_SHIFT;
- prop->pmmu.hop0_mask = MMU_V1_1_HOP0_MASK;
- prop->pmmu.hop1_mask = MMU_V1_1_HOP1_MASK;
- prop->pmmu.hop2_mask = MMU_V1_1_HOP2_MASK;
- prop->pmmu.hop3_mask = MMU_V1_1_HOP3_MASK;
- prop->pmmu.hop4_mask = MMU_V1_1_HOP4_MASK;
+ prop->pmmu.hop_shifts[MMU_HOP0] = MMU_V1_1_HOP0_SHIFT;
+ prop->pmmu.hop_shifts[MMU_HOP1] = MMU_V1_1_HOP1_SHIFT;
+ prop->pmmu.hop_shifts[MMU_HOP2] = MMU_V1_1_HOP2_SHIFT;
+ prop->pmmu.hop_shifts[MMU_HOP3] = MMU_V1_1_HOP3_SHIFT;
+ prop->pmmu.hop_shifts[MMU_HOP4] = MMU_V1_1_HOP4_SHIFT;
+ prop->pmmu.hop_masks[MMU_HOP0] = MMU_V1_1_HOP0_MASK;
+ prop->pmmu.hop_masks[MMU_HOP1] = MMU_V1_1_HOP1_MASK;
+ prop->pmmu.hop_masks[MMU_HOP2] = MMU_V1_1_HOP2_MASK;
+ prop->pmmu.hop_masks[MMU_HOP3] = MMU_V1_1_HOP3_MASK;
+ prop->pmmu.hop_masks[MMU_HOP4] = MMU_V1_1_HOP4_MASK;
prop->pmmu.start_addr = VA_HOST_SPACE_START;
prop->pmmu.end_addr =
(VA_HOST_SPACE_START + VA_HOST_SPACE_SIZE / 2) - 1;
@@ -673,6 +676,8 @@ static int gaudi_set_fixed_properties(struct hl_device *hdev)
prop->set_max_power_on_device_init = true;
+ prop->dma_mask = 48;
+
return 0;
}
@@ -754,8 +759,6 @@ static int gaudi_init_iatu(struct hl_device *hdev)
if (rc)
goto done;
- hdev->asic_funcs->set_dma_mask_from_fw(hdev);
-
/* Outbound Region 0 - Point to Host */
outbound_region.addr = HOST_PHYS_BASE;
outbound_region.size = HOST_PHYS_SIZE;
@@ -1008,7 +1011,7 @@ free_job:
release_cb:
hl_cb_put(cb);
- hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT);
+ hl_cb_destroy(&hdev->kernel_mem_mgr, cb->buf->handle);
return rc;
}
@@ -1470,7 +1473,7 @@ static int gaudi_collective_wait_create_job(struct hl_device *hdev,
job->patched_cb = NULL;
job->job_cb_size = job->user_cb_size;
- hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT);
+ hl_cb_destroy(&hdev->kernel_mem_mgr, cb->buf->handle);
/* increment refcount as for external queues we get completion */
if (hw_queue_prop->type == QUEUE_TYPE_EXT)
@@ -2808,9 +2811,8 @@ static void gaudi_init_pci_dma_qman(struct hl_device *hdev, int dma_id,
WREG32(mmDMA0_QM_ARB_ERR_MSG_EN + dma_qm_offset,
QM_ARB_ERR_MSG_EN_MASK);
- /* Increase ARB WDT to support streams architecture */
- WREG32(mmDMA0_QM_ARB_SLV_CHOISE_WDT + dma_qm_offset,
- GAUDI_ARB_WDT_TIMEOUT);
+ /* Set timeout to maximum */
+ WREG32(mmDMA0_QM_ARB_SLV_CHOISE_WDT + dma_qm_offset, GAUDI_ARB_WDT_TIMEOUT);
WREG32(mmDMA0_QM_GLBL_PROT + dma_qm_offset,
QMAN_EXTERNAL_MAKE_TRUSTED);
@@ -2987,9 +2989,8 @@ static void gaudi_init_hbm_dma_qman(struct hl_device *hdev, int dma_id,
WREG32(mmDMA0_QM_ARB_ERR_MSG_EN + dma_qm_offset,
QM_ARB_ERR_MSG_EN_MASK);
- /* Increase ARB WDT to support streams architecture */
- WREG32(mmDMA0_QM_ARB_SLV_CHOISE_WDT + dma_qm_offset,
- GAUDI_ARB_WDT_TIMEOUT);
+ /* Set timeout to maximum */
+ WREG32(mmDMA0_QM_ARB_SLV_CHOISE_WDT + dma_qm_offset, GAUDI_ARB_WDT_TIMEOUT);
WREG32(mmDMA0_QM_GLBL_CFG1 + dma_qm_offset, 0);
WREG32(mmDMA0_QM_GLBL_PROT + dma_qm_offset,
@@ -3124,9 +3125,8 @@ static void gaudi_init_mme_qman(struct hl_device *hdev, u32 mme_offset,
WREG32(mmMME0_QM_ARB_ERR_MSG_EN + mme_offset,
QM_ARB_ERR_MSG_EN_MASK);
- /* Increase ARB WDT to support streams architecture */
- WREG32(mmMME0_QM_ARB_SLV_CHOISE_WDT + mme_offset,
- GAUDI_ARB_WDT_TIMEOUT);
+ /* Set timeout to maximum */
+ WREG32(mmMME0_QM_ARB_SLV_CHOISE_WDT + mme_offset, GAUDI_ARB_WDT_TIMEOUT);
WREG32(mmMME0_QM_GLBL_CFG1 + mme_offset, 0);
WREG32(mmMME0_QM_GLBL_PROT + mme_offset,
@@ -3258,9 +3258,8 @@ static void gaudi_init_tpc_qman(struct hl_device *hdev, u32 tpc_offset,
WREG32(mmTPC0_QM_ARB_ERR_MSG_EN + tpc_offset,
QM_ARB_ERR_MSG_EN_MASK);
- /* Increase ARB WDT to support streams architecture */
- WREG32(mmTPC0_QM_ARB_SLV_CHOISE_WDT + tpc_offset,
- GAUDI_ARB_WDT_TIMEOUT);
+ /* Set timeout to maximum */
+ WREG32(mmTPC0_QM_ARB_SLV_CHOISE_WDT + tpc_offset, GAUDI_ARB_WDT_TIMEOUT);
WREG32(mmTPC0_QM_GLBL_CFG1 + tpc_offset, 0);
WREG32(mmTPC0_QM_GLBL_PROT + tpc_offset,
@@ -3409,9 +3408,8 @@ static void gaudi_init_nic_qman(struct hl_device *hdev, u32 nic_offset,
WREG32(mmNIC0_QM0_ARB_ERR_MSG_EN + nic_offset,
QM_ARB_ERR_MSG_EN_MASK);
- /* Increase ARB WDT to support streams architecture */
- WREG32(mmNIC0_QM0_ARB_SLV_CHOISE_WDT + nic_offset,
- GAUDI_ARB_WDT_TIMEOUT);
+ /* Set timeout to maximum */
+ WREG32(mmNIC0_QM0_ARB_SLV_CHOISE_WDT + nic_offset, GAUDI_ARB_WDT_TIMEOUT);
WREG32(mmNIC0_QM0_GLBL_CFG1 + nic_offset, 0);
WREG32(mmNIC0_QM0_GLBL_PROT + nic_offset,
@@ -3792,9 +3790,6 @@ static void gaudi_halt_engines(struct hl_device *hdev, bool hard_reset, bool fw_
{
u32 wait_timeout_ms;
- dev_info(hdev->dev,
- "Halting compute engines and disabling interrupts\n");
-
if (hdev->pldm)
wait_timeout_ms = GAUDI_PLDM_RESET_WAIT_MSEC;
else
@@ -4212,7 +4207,7 @@ static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset, bool fw_reset
}
if (fw_reset) {
- dev_info(hdev->dev,
+ dev_dbg(hdev->dev,
"Firmware performs HARD reset, going to wait %dms\n",
reset_timeout_ms);
@@ -4304,11 +4299,11 @@ static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset, bool fw_reset
WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST,
1 << PSOC_GLOBAL_CONF_SW_ALL_RST_IND_SHIFT);
- dev_info(hdev->dev,
+ dev_dbg(hdev->dev,
"Issued HARD reset command, going to wait %dms\n",
reset_timeout_ms);
} else {
- dev_info(hdev->dev,
+ dev_dbg(hdev->dev,
"Firmware performs HARD reset, going to wait %dms\n",
reset_timeout_ms);
}
@@ -4745,12 +4740,11 @@ static void gaudi_dma_free_coherent(struct hl_device *hdev, size_t size,
dma_free_coherent(&hdev->pdev->dev, size, cpu_addr, fixed_dma_handle);
}
-static int gaudi_hbm_scrubbing(struct hl_device *hdev)
+static int gaudi_scrub_device_dram(struct hl_device *hdev, u64 val)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
u64 cur_addr = DRAM_BASE_ADDR_USER;
- u32 val;
- u32 chunk_size;
+ u32 chunk_size, busy;
int rc, dma_id;
while (cur_addr < prop->dram_end_address) {
@@ -4764,8 +4758,10 @@ static int gaudi_hbm_scrubbing(struct hl_device *hdev)
"Doing HBM scrubbing for 0x%09llx - 0x%09llx\n",
cur_addr, cur_addr + chunk_size);
- WREG32(mmDMA0_CORE_SRC_BASE_LO + dma_offset, 0xdeadbeaf);
- WREG32(mmDMA0_CORE_SRC_BASE_HI + dma_offset, 0xdeadbeaf);
+ WREG32(mmDMA0_CORE_SRC_BASE_LO + dma_offset,
+ lower_32_bits(val));
+ WREG32(mmDMA0_CORE_SRC_BASE_HI + dma_offset,
+ upper_32_bits(val));
WREG32(mmDMA0_CORE_DST_BASE_LO + dma_offset,
lower_32_bits(cur_addr));
WREG32(mmDMA0_CORE_DST_BASE_HI + dma_offset,
@@ -4788,8 +4784,8 @@ static int gaudi_hbm_scrubbing(struct hl_device *hdev)
rc = hl_poll_timeout(
hdev,
mmDMA0_CORE_STS0 + dma_offset,
- val,
- ((val & DMA0_CORE_STS0_BUSY_MASK) == 0),
+ busy,
+ ((busy & DMA0_CORE_STS0_BUSY_MASK) == 0),
1000,
HBM_SCRUBBING_TIMEOUT_US);
@@ -4843,7 +4839,7 @@ static int gaudi_scrub_device_mem(struct hl_device *hdev, u64 addr, u64 size)
}
/* Scrub HBM using all DMA channels in parallel */
- rc = gaudi_hbm_scrubbing(hdev);
+ rc = gaudi_scrub_device_dram(hdev, 0xdeadbeaf);
if (rc)
dev_err(hdev->dev,
"Failed to clear HBM in mem scrub all\n");
@@ -5038,37 +5034,7 @@ static void gaudi_cpu_accessible_dma_pool_free(struct hl_device *hdev,
hl_fw_cpu_accessible_dma_pool_free(hdev, size, vaddr);
}
-static int gaudi_dma_map_sg(struct hl_device *hdev, struct scatterlist *sgl,
- int nents, enum dma_data_direction dir)
-{
- struct scatterlist *sg;
- int i;
-
- if (!dma_map_sg(&hdev->pdev->dev, sgl, nents, dir))
- return -ENOMEM;
-
- /* Shift to the device's base physical address of host memory */
- for_each_sg(sgl, sg, nents, i)
- sg->dma_address += HOST_PHYS_BASE;
-
- return 0;
-}
-
-static void gaudi_dma_unmap_sg(struct hl_device *hdev, struct scatterlist *sgl,
- int nents, enum dma_data_direction dir)
-{
- struct scatterlist *sg;
- int i;
-
- /* Cancel the device's base physical address of host memory */
- for_each_sg(sgl, sg, nents, i)
- sg->dma_address -= HOST_PHYS_BASE;
-
- dma_unmap_sg(&hdev->pdev->dev, sgl, nents, dir);
-}
-
-static u32 gaudi_get_dma_desc_list_size(struct hl_device *hdev,
- struct sg_table *sgt)
+static u32 gaudi_get_dma_desc_list_size(struct hl_device *hdev, struct sg_table *sgt)
{
struct scatterlist *sg, *sg_next_iter;
u32 count, dma_desc_cnt;
@@ -5077,8 +5043,7 @@ static u32 gaudi_get_dma_desc_list_size(struct hl_device *hdev,
dma_desc_cnt = 0;
- for_each_sg(sgt->sgl, sg, sgt->nents, count) {
-
+ for_each_sgtable_dma_sg(sgt, sg, count) {
len = sg_dma_len(sg);
addr = sg_dma_address(sg);
@@ -5132,8 +5097,7 @@ static int gaudi_pin_memory_before_cs(struct hl_device *hdev,
list_add_tail(&userptr->job_node, parser->job_userptr_list);
- rc = hdev->asic_funcs->asic_dma_map_sg(hdev, userptr->sgt->sgl,
- userptr->sgt->nents, dir);
+ rc = hdev->asic_funcs->asic_dma_map_sgtable(hdev, userptr->sgt, dir);
if (rc) {
dev_err(hdev->dev, "failed to map sgt with DMA region\n");
goto unpin_memory;
@@ -5408,7 +5372,7 @@ static int gaudi_patch_dma_packet(struct hl_device *hdev,
sgt = userptr->sgt;
dma_desc_cnt = 0;
- for_each_sg(sgt->sgl, sg, sgt->nents, count) {
+ for_each_sgtable_dma_sg(sgt, sg, count) {
len = sg_dma_len(sg);
dma_addr = sg_dma_address(sg);
@@ -5562,7 +5526,7 @@ static int gaudi_patch_cb(struct hl_device *hdev,
static int gaudi_parse_cb_mmu(struct hl_device *hdev,
struct hl_cs_parser *parser)
{
- u64 patched_cb_handle;
+ u64 handle;
u32 patched_cb_size;
struct hl_cb *user_cb;
int rc;
@@ -5578,9 +5542,9 @@ static int gaudi_parse_cb_mmu(struct hl_device *hdev,
else
parser->patched_cb_size = parser->user_cb_size;
- rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, hdev->kernel_ctx,
+ rc = hl_cb_create(hdev, &hdev->kernel_mem_mgr, hdev->kernel_ctx,
parser->patched_cb_size, false, false,
- &patched_cb_handle);
+ &handle);
if (rc) {
dev_err(hdev->dev,
@@ -5589,13 +5553,10 @@ static int gaudi_parse_cb_mmu(struct hl_device *hdev,
return rc;
}
- patched_cb_handle >>= PAGE_SHIFT;
- parser->patched_cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr,
- (u32) patched_cb_handle);
+ parser->patched_cb = hl_cb_get(&hdev->kernel_mem_mgr, handle);
/* hl_cb_get should never fail */
if (!parser->patched_cb) {
- dev_crit(hdev->dev, "DMA CB handle invalid 0x%x\n",
- (u32) patched_cb_handle);
+ dev_crit(hdev->dev, "DMA CB handle invalid 0x%llx\n", handle);
rc = -EFAULT;
goto out;
}
@@ -5635,8 +5596,7 @@ out:
* cb_put will release it, but here we want to remove it from the
* idr
*/
- hl_cb_destroy(hdev, &hdev->kernel_cb_mgr,
- patched_cb_handle << PAGE_SHIFT);
+ hl_cb_destroy(&hdev->kernel_mem_mgr, handle);
return rc;
}
@@ -5644,7 +5604,7 @@ out:
static int gaudi_parse_cb_no_mmu(struct hl_device *hdev,
struct hl_cs_parser *parser)
{
- u64 patched_cb_handle;
+ u64 handle;
int rc;
rc = gaudi_validate_cb(hdev, parser, false);
@@ -5652,22 +5612,19 @@ static int gaudi_parse_cb_no_mmu(struct hl_device *hdev,
if (rc)
goto free_userptr;
- rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, hdev->kernel_ctx,
+ rc = hl_cb_create(hdev, &hdev->kernel_mem_mgr, hdev->kernel_ctx,
parser->patched_cb_size, false, false,
- &patched_cb_handle);
+ &handle);
if (rc) {
dev_err(hdev->dev,
"Failed to allocate patched CB for DMA CS %d\n", rc);
goto free_userptr;
}
- patched_cb_handle >>= PAGE_SHIFT;
- parser->patched_cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr,
- (u32) patched_cb_handle);
+ parser->patched_cb = hl_cb_get(&hdev->kernel_mem_mgr, handle);
/* hl_cb_get should never fail here */
if (!parser->patched_cb) {
- dev_crit(hdev->dev, "DMA CB handle invalid 0x%x\n",
- (u32) patched_cb_handle);
+ dev_crit(hdev->dev, "DMA CB handle invalid 0x%llx\n", handle);
rc = -EFAULT;
goto out;
}
@@ -5684,8 +5641,7 @@ out:
* cb_put will release it, but here we want to remove it from the
* idr
*/
- hl_cb_destroy(hdev, &hdev->kernel_cb_mgr,
- patched_cb_handle << PAGE_SHIFT);
+ hl_cb_destroy(&hdev->kernel_mem_mgr, handle);
free_userptr:
if (rc)
@@ -5798,7 +5754,6 @@ static int gaudi_memset_device_memory(struct hl_device *hdev, u64 addr,
struct hl_cs_job *job;
u32 cb_size, ctl, err_cause;
struct hl_cb *cb;
- u64 id;
int rc;
cb = hl_cb_kernel_create(hdev, PAGE_SIZE, false);
@@ -5865,9 +5820,8 @@ static int gaudi_memset_device_memory(struct hl_device *hdev, u64 addr,
}
release_cb:
- id = cb->id;
hl_cb_put(cb);
- hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, id << PAGE_SHIFT);
+ hl_cb_destroy(&hdev->kernel_mem_mgr, cb->buf->handle);
return rc;
}
@@ -5930,7 +5884,7 @@ static int gaudi_memset_registers(struct hl_device *hdev, u64 reg_base,
release_cb:
hl_cb_put(cb);
- hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT);
+ hl_cb_destroy(&hdev->kernel_mem_mgr, cb->buf->handle);
return rc;
}
@@ -6101,184 +6055,6 @@ static void gaudi_restore_phase_topology(struct hl_device *hdev)
}
-static int gaudi_debugfs_read32(struct hl_device *hdev, u64 addr,
- bool user_address, u32 *val)
-{
- struct asic_fixed_properties *prop = &hdev->asic_prop;
- u64 hbm_bar_addr, host_phys_end;
- int rc = 0;
-
- host_phys_end = HOST_PHYS_BASE + HOST_PHYS_SIZE;
-
- if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) {
-
- *val = RREG32(addr - CFG_BASE);
-
- } else if ((addr >= SRAM_BASE_ADDR) && (addr < SRAM_BASE_ADDR + SRAM_BAR_SIZE)) {
-
- *val = readl(hdev->pcie_bar[SRAM_BAR_ID] + (addr - SRAM_BASE_ADDR));
-
- } else if (addr < DRAM_PHYS_BASE + hdev->asic_prop.dram_size) {
-
- u64 bar_base_addr = DRAM_PHYS_BASE + (addr & ~(prop->dram_pci_bar_size - 0x1ull));
-
- hbm_bar_addr = gaudi_set_hbm_bar_base(hdev, bar_base_addr);
-
- if (hbm_bar_addr != U64_MAX) {
- *val = readl(hdev->pcie_bar[HBM_BAR_ID] + (addr - bar_base_addr));
- hbm_bar_addr = gaudi_set_hbm_bar_base(hdev, hbm_bar_addr);
- }
-
- if (hbm_bar_addr == U64_MAX)
- rc = -EIO;
-
- } else if (addr >= HOST_PHYS_BASE && addr < host_phys_end &&
- user_address && !iommu_present(&pci_bus_type)) {
-
- *val = *(u32 *) phys_to_virt(addr - HOST_PHYS_BASE);
-
- } else {
- rc = -EFAULT;
- }
-
- return rc;
-}
-
-static int gaudi_debugfs_write32(struct hl_device *hdev, u64 addr,
- bool user_address, u32 val)
-{
- struct asic_fixed_properties *prop = &hdev->asic_prop;
- u64 hbm_bar_addr, host_phys_end;
- int rc = 0;
-
- host_phys_end = HOST_PHYS_BASE + HOST_PHYS_SIZE;
-
- if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) {
-
- WREG32(addr - CFG_BASE, val);
-
- } else if ((addr >= SRAM_BASE_ADDR) && (addr < SRAM_BASE_ADDR + SRAM_BAR_SIZE)) {
-
- writel(val, hdev->pcie_bar[SRAM_BAR_ID] + (addr - SRAM_BASE_ADDR));
-
- } else if (addr < DRAM_PHYS_BASE + hdev->asic_prop.dram_size) {
-
- u64 bar_base_addr = DRAM_PHYS_BASE + (addr & ~(prop->dram_pci_bar_size - 0x1ull));
-
- hbm_bar_addr = gaudi_set_hbm_bar_base(hdev, bar_base_addr);
-
- if (hbm_bar_addr != U64_MAX) {
- writel(val, hdev->pcie_bar[HBM_BAR_ID] + (addr - bar_base_addr));
- hbm_bar_addr = gaudi_set_hbm_bar_base(hdev, hbm_bar_addr);
- }
-
- if (hbm_bar_addr == U64_MAX)
- rc = -EIO;
-
- } else if (addr >= HOST_PHYS_BASE && addr < host_phys_end &&
- user_address && !iommu_present(&pci_bus_type)) {
-
- *(u32 *) phys_to_virt(addr - HOST_PHYS_BASE) = val;
-
- } else {
- rc = -EFAULT;
- }
-
- return rc;
-}
-
-static int gaudi_debugfs_read64(struct hl_device *hdev, u64 addr,
- bool user_address, u64 *val)
-{
- struct asic_fixed_properties *prop = &hdev->asic_prop;
- u64 hbm_bar_addr, host_phys_end;
- int rc = 0;
-
- host_phys_end = HOST_PHYS_BASE + HOST_PHYS_SIZE;
-
- if ((addr >= CFG_BASE) && (addr <= CFG_BASE + CFG_SIZE - sizeof(u64))) {
-
- u32 val_l = RREG32(addr - CFG_BASE);
- u32 val_h = RREG32(addr + sizeof(u32) - CFG_BASE);
-
- *val = (((u64) val_h) << 32) | val_l;
-
- } else if ((addr >= SRAM_BASE_ADDR) &&
- (addr <= SRAM_BASE_ADDR + SRAM_BAR_SIZE - sizeof(u64))) {
-
- *val = readq(hdev->pcie_bar[SRAM_BAR_ID] + (addr - SRAM_BASE_ADDR));
-
- } else if (addr <= DRAM_PHYS_BASE + hdev->asic_prop.dram_size - sizeof(u64)) {
-
- u64 bar_base_addr = DRAM_PHYS_BASE + (addr & ~(prop->dram_pci_bar_size - 0x1ull));
-
- hbm_bar_addr = gaudi_set_hbm_bar_base(hdev, bar_base_addr);
-
- if (hbm_bar_addr != U64_MAX) {
- *val = readq(hdev->pcie_bar[HBM_BAR_ID] + (addr - bar_base_addr));
- hbm_bar_addr = gaudi_set_hbm_bar_base(hdev, hbm_bar_addr);
- }
-
- if (hbm_bar_addr == U64_MAX)
- rc = -EIO;
-
- } else if (addr >= HOST_PHYS_BASE && addr < host_phys_end &&
- user_address && !iommu_present(&pci_bus_type)) {
-
- *val = *(u64 *) phys_to_virt(addr - HOST_PHYS_BASE);
-
- } else {
- rc = -EFAULT;
- }
-
- return rc;
-}
-
-static int gaudi_debugfs_write64(struct hl_device *hdev, u64 addr,
- bool user_address, u64 val)
-{
- struct asic_fixed_properties *prop = &hdev->asic_prop;
- u64 hbm_bar_addr, host_phys_end;
- int rc = 0;
-
- host_phys_end = HOST_PHYS_BASE + HOST_PHYS_SIZE;
-
- if ((addr >= CFG_BASE) && (addr <= CFG_BASE + CFG_SIZE - sizeof(u64))) {
-
- WREG32(addr - CFG_BASE, lower_32_bits(val));
- WREG32(addr + sizeof(u32) - CFG_BASE, upper_32_bits(val));
-
- } else if ((addr >= SRAM_BASE_ADDR) &&
- (addr <= SRAM_BASE_ADDR + SRAM_BAR_SIZE - sizeof(u64))) {
-
- writeq(val, hdev->pcie_bar[SRAM_BAR_ID] + (addr - SRAM_BASE_ADDR));
-
- } else if (addr <= DRAM_PHYS_BASE + hdev->asic_prop.dram_size - sizeof(u64)) {
-
- u64 bar_base_addr = DRAM_PHYS_BASE + (addr & ~(prop->dram_pci_bar_size - 0x1ull));
-
- hbm_bar_addr = gaudi_set_hbm_bar_base(hdev, bar_base_addr);
-
- if (hbm_bar_addr != U64_MAX) {
- writeq(val, hdev->pcie_bar[HBM_BAR_ID] + (addr - bar_base_addr));
- hbm_bar_addr = gaudi_set_hbm_bar_base(hdev, hbm_bar_addr);
- }
-
- if (hbm_bar_addr == U64_MAX)
- rc = -EIO;
-
- } else if (addr >= HOST_PHYS_BASE && addr < host_phys_end &&
- user_address && !iommu_present(&pci_bus_type)) {
-
- *(u64 *) phys_to_virt(addr - HOST_PHYS_BASE) = val;
-
- } else {
- rc = -EFAULT;
- }
-
- return rc;
-}
-
static int gaudi_dma_core_transfer(struct hl_device *hdev, int dma_id, u64 addr,
u32 size_to_dma, dma_addr_t dma_addr)
{
@@ -7628,19 +7404,18 @@ static void gaudi_print_irq_info(struct hl_device *hdev, u16 event_type,
gaudi_print_and_get_mmu_error_info(hdev, &razwi_addr, &razwi_type);
/* In case it's the first razwi, save its parameters*/
- rc = atomic_cmpxchg(&hdev->last_error.razwi_write_disable, 0, 1);
+ rc = atomic_cmpxchg(&hdev->last_error.razwi.write_disable, 0, 1);
if (!rc) {
- hdev->last_error.open_dev_timestamp = hdev->last_successful_open_ktime;
- hdev->last_error.razwi_timestamp = ktime_get();
- hdev->last_error.razwi_addr = razwi_addr;
- hdev->last_error.razwi_engine_id_1 = engine_id_1;
- hdev->last_error.razwi_engine_id_2 = engine_id_2;
+ hdev->last_error.razwi.timestamp = ktime_get();
+ hdev->last_error.razwi.addr = razwi_addr;
+ hdev->last_error.razwi.engine_id_1 = engine_id_1;
+ hdev->last_error.razwi.engine_id_2 = engine_id_2;
/*
* If first engine id holds non valid value the razwi initiator
* does not have engine id
*/
- hdev->last_error.razwi_non_engine_initiator = (engine_id_1 == U16_MAX);
- hdev->last_error.razwi_type = razwi_type;
+ hdev->last_error.razwi.non_engine_initiator = (engine_id_1 == U16_MAX);
+ hdev->last_error.razwi.type = razwi_type;
}
}
@@ -8103,7 +7878,6 @@ static void gaudi_handle_eqe(struct hl_device *hdev,
case GAUDI_EVENT_MMU_PAGE_FAULT:
case GAUDI_EVENT_MMU_WR_PERM:
case GAUDI_EVENT_RAZWI_OR_ADC:
- case GAUDI_EVENT_TPC0_QM ... GAUDI_EVENT_TPC7_QM:
case GAUDI_EVENT_MME0_QM ... GAUDI_EVENT_MME2_QM:
case GAUDI_EVENT_DMA0_QM ... GAUDI_EVENT_DMA7_QM:
fallthrough;
@@ -8123,6 +7897,19 @@ static void gaudi_handle_eqe(struct hl_device *hdev,
hl_fw_unmask_irq(hdev, event_type);
break;
+ case GAUDI_EVENT_TPC0_QM ... GAUDI_EVENT_TPC7_QM:
+ gaudi_print_irq_info(hdev, event_type, true);
+ gaudi_handle_qman_err(hdev, event_type);
+ hl_fw_unmask_irq(hdev, event_type);
+
+ /* In TPC QM event, notify on TPC assertion. While there isn't
+ * a specific event for assertion yet, the FW generates QM event.
+ * The SW upper layer will inspect an internal mapped area to indicate
+ * if the event is a tpc assertion or tpc QM.
+ */
+ hl_notifier_event_send_all(hdev, HL_NOTIFIER_EVENT_TPC_ASSERT);
+ break;
+
case GAUDI_EVENT_RAZWI_OR_ADC_SW:
gaudi_print_irq_info(hdev, event_type, true);
goto reset_device;
@@ -8328,8 +8115,6 @@ static int gaudi_cpucp_info_get(struct hl_device *hdev)
set_default_power_values(hdev);
- hdev->max_power = prop->max_power_default;
-
return 0;
}
@@ -8501,6 +8286,16 @@ static int gaudi_get_eeprom_data(struct hl_device *hdev, void *data,
return hl_fw_get_eeprom_data(hdev, data, max_size);
}
+static int gaudi_get_monitor_dump(struct hl_device *hdev, void *data)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+
+ if (!(gaudi->hw_cap_initialized & HW_CAP_CPU_Q))
+ return 0;
+
+ return hl_fw_get_monitor_dump(hdev, data);
+}
+
/*
* this function should be used only during initialization and/or after reset,
* when there are no active users.
@@ -9066,11 +8861,6 @@ static void gaudi_reset_sob(struct hl_device *hdev, void *data)
kref_init(&hw_sob->kref);
}
-static void gaudi_set_dma_mask_from_fw(struct hl_device *hdev)
-{
- hdev->dma_mask = 48;
-}
-
static u64 gaudi_get_device_time(struct hl_device *hdev)
{
u64 device_time = ((u64) RREG32(mmPSOC_TIMESTAMP_CNTCVU)) << 32;
@@ -9132,7 +8922,7 @@ static int gaudi_add_sync_to_engine_map_entry(
*/
if (reg_value == 0 || reg_value == 0xffffffff)
return 0;
- reg_value -= (u32)CFG_BASE;
+ reg_value -= lower_32_bits(CFG_BASE);
/* create a new hash entry */
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
@@ -9377,6 +9167,12 @@ static u32 *gaudi_get_stream_master_qid_arr(void)
return gaudi_stream_master;
}
+static void gaudi_get_valid_dram_page_orders(struct hl_info_dev_memalloc_page_sizes *info)
+{
+ /* set 0 since multiple pages are not supported */
+ info->page_order_bitmask = 0;
+}
+
static ssize_t infineon_ver_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct hl_device *hdev = dev_get_drvdata(dev);
@@ -9418,24 +9214,21 @@ static const struct hl_asic_funcs gaudi_funcs = {
.asic_dma_alloc_coherent = gaudi_dma_alloc_coherent,
.asic_dma_free_coherent = gaudi_dma_free_coherent,
.scrub_device_mem = gaudi_scrub_device_mem,
+ .scrub_device_dram = gaudi_scrub_device_dram,
.get_int_queue_base = gaudi_get_int_queue_base,
.test_queues = gaudi_test_queues,
.asic_dma_pool_zalloc = gaudi_dma_pool_zalloc,
.asic_dma_pool_free = gaudi_dma_pool_free,
.cpu_accessible_dma_pool_alloc = gaudi_cpu_accessible_dma_pool_alloc,
.cpu_accessible_dma_pool_free = gaudi_cpu_accessible_dma_pool_free,
- .hl_dma_unmap_sg = gaudi_dma_unmap_sg,
+ .hl_dma_unmap_sgtable = hl_dma_unmap_sgtable,
.cs_parser = gaudi_cs_parser,
- .asic_dma_map_sg = gaudi_dma_map_sg,
+ .asic_dma_map_sgtable = hl_dma_map_sgtable,
.get_dma_desc_list_size = gaudi_get_dma_desc_list_size,
.add_end_of_cb_packets = gaudi_add_end_of_cb_packets,
.update_eq_ci = gaudi_update_eq_ci,
.context_switch = gaudi_context_switch,
.restore_phase_topology = gaudi_restore_phase_topology,
- .debugfs_read32 = gaudi_debugfs_read32,
- .debugfs_write32 = gaudi_debugfs_write32,
- .debugfs_read64 = gaudi_debugfs_read64,
- .debugfs_write64 = gaudi_debugfs_write64,
.debugfs_read_dma = gaudi_debugfs_read_dma,
.add_device_attr = gaudi_add_device_attr,
.handle_eqe = gaudi_handle_eqe,
@@ -9444,6 +9237,7 @@ static const struct hl_asic_funcs gaudi_funcs = {
.write_pte = gaudi_write_pte,
.mmu_invalidate_cache = gaudi_mmu_invalidate_cache,
.mmu_invalidate_cache_range = gaudi_mmu_invalidate_cache_range,
+ .mmu_prefetch_cache_range = NULL,
.send_heartbeat = gaudi_send_heartbeat,
.debug_coresight = gaudi_debug_coresight,
.is_device_idle = gaudi_is_device_idle,
@@ -9452,6 +9246,7 @@ static const struct hl_asic_funcs gaudi_funcs = {
.hw_queues_unlock = gaudi_hw_queues_unlock,
.get_pci_id = gaudi_get_pci_id,
.get_eeprom_data = gaudi_get_eeprom_data,
+ .get_monitor_dump = gaudi_get_monitor_dump,
.send_cpu_message = gaudi_send_cpu_message,
.pci_bars_map = gaudi_pci_bars_map,
.init_iatu = gaudi_init_iatu,
@@ -9469,7 +9264,6 @@ static const struct hl_asic_funcs gaudi_funcs = {
.gen_wait_cb = gaudi_gen_wait_cb,
.reset_sob = gaudi_reset_sob,
.reset_sob_group = gaudi_reset_sob_group,
- .set_dma_mask_from_fw = gaudi_set_dma_mask_from_fw,
.get_device_time = gaudi_get_device_time,
.collective_wait_init_cs = gaudi_collective_wait_init_cs,
.collective_wait_create_jobs = gaudi_collective_wait_create_jobs,
@@ -9486,7 +9280,11 @@ static const struct hl_asic_funcs gaudi_funcs = {
.get_sob_addr = gaudi_get_sob_addr,
.set_pci_memory_regions = gaudi_set_pci_memory_regions,
.get_stream_master_qid_arr = gaudi_get_stream_master_qid_arr,
- .is_valid_dram_page_size = NULL
+ .is_valid_dram_page_size = NULL,
+ .mmu_get_real_page_size = hl_mmu_get_real_page_size,
+ .get_valid_dram_page_orders = gaudi_get_valid_dram_page_orders,
+ .access_dev_mem = hl_access_dev_mem,
+ .set_dram_bar_base = gaudi_set_hbm_bar_base,
};
/**
diff --git a/drivers/misc/habanalabs/gaudi/gaudiP.h b/drivers/misc/habanalabs/gaudi/gaudiP.h
index 54de7c599072..4fbcf3f0afe5 100644
--- a/drivers/misc/habanalabs/gaudi/gaudiP.h
+++ b/drivers/misc/habanalabs/gaudi/gaudiP.h
@@ -148,14 +148,14 @@
#define MME_QMAN_LENGTH 1024
#define MME_QMAN_SIZE_IN_BYTES (MME_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE)
-#define HBM_DMA_QMAN_LENGTH 1024
+#define HBM_DMA_QMAN_LENGTH 4096
#define HBM_DMA_QMAN_SIZE_IN_BYTES \
(HBM_DMA_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE)
#define TPC_QMAN_LENGTH 1024
#define TPC_QMAN_SIZE_IN_BYTES (TPC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE)
-#define NIC_QMAN_LENGTH 1024
+#define NIC_QMAN_LENGTH 4096
#define NIC_QMAN_SIZE_IN_BYTES (NIC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE)
diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
index ec9358bcbf0b..4cde505a7416 100644
--- a/drivers/misc/habanalabs/goya/goya.c
+++ b/drivers/misc/habanalabs/goya/goya.c
@@ -390,6 +390,8 @@ int goya_set_fixed_properties(struct hl_device *hdev)
}
prop->device_dma_offset_for_host_access = HOST_PHYS_BASE;
+ prop->host_base_address = HOST_PHYS_BASE;
+ prop->host_end_address = prop->host_base_address + HOST_PHYS_SIZE;
prop->completion_queues_count = NUMBER_OF_CMPLT_QUEUES;
prop->dram_base_address = DRAM_PHYS_BASE;
@@ -413,18 +415,19 @@ int goya_set_fixed_properties(struct hl_device *hdev)
prop->mmu_hop_table_size = HOP_TABLE_SIZE_512_PTE;
prop->mmu_hop0_tables_total_size = HOP0_512_PTE_TABLES_TOTAL_SIZE;
prop->dram_page_size = PAGE_SIZE_2MB;
+ prop->device_mem_alloc_default_page_size = prop->dram_page_size;
prop->dram_supports_virtual_memory = true;
- prop->dmmu.hop0_shift = MMU_V1_0_HOP0_SHIFT;
- prop->dmmu.hop1_shift = MMU_V1_0_HOP1_SHIFT;
- prop->dmmu.hop2_shift = MMU_V1_0_HOP2_SHIFT;
- prop->dmmu.hop3_shift = MMU_V1_0_HOP3_SHIFT;
- prop->dmmu.hop4_shift = MMU_V1_0_HOP4_SHIFT;
- prop->dmmu.hop0_mask = MMU_V1_0_HOP0_MASK;
- prop->dmmu.hop1_mask = MMU_V1_0_HOP1_MASK;
- prop->dmmu.hop2_mask = MMU_V1_0_HOP2_MASK;
- prop->dmmu.hop3_mask = MMU_V1_0_HOP3_MASK;
- prop->dmmu.hop4_mask = MMU_V1_0_HOP4_MASK;
+ prop->dmmu.hop_shifts[MMU_HOP0] = MMU_V1_0_HOP0_SHIFT;
+ prop->dmmu.hop_shifts[MMU_HOP1] = MMU_V1_0_HOP1_SHIFT;
+ prop->dmmu.hop_shifts[MMU_HOP2] = MMU_V1_0_HOP2_SHIFT;
+ prop->dmmu.hop_shifts[MMU_HOP3] = MMU_V1_0_HOP3_SHIFT;
+ prop->dmmu.hop_shifts[MMU_HOP4] = MMU_V1_0_HOP4_SHIFT;
+ prop->dmmu.hop_masks[MMU_HOP0] = MMU_V1_0_HOP0_MASK;
+ prop->dmmu.hop_masks[MMU_HOP1] = MMU_V1_0_HOP1_MASK;
+ prop->dmmu.hop_masks[MMU_HOP2] = MMU_V1_0_HOP2_MASK;
+ prop->dmmu.hop_masks[MMU_HOP3] = MMU_V1_0_HOP3_MASK;
+ prop->dmmu.hop_masks[MMU_HOP4] = MMU_V1_0_HOP4_MASK;
prop->dmmu.start_addr = VA_DDR_SPACE_START;
prop->dmmu.end_addr = VA_DDR_SPACE_END;
prop->dmmu.page_size = PAGE_SIZE_2MB;
@@ -487,6 +490,8 @@ int goya_set_fixed_properties(struct hl_device *hdev)
prop->set_max_power_on_device_init = true;
+ prop->dma_mask = 48;
+
return 0;
}
@@ -574,8 +579,6 @@ static int goya_init_iatu(struct hl_device *hdev)
if (rc)
goto done;
- hdev->asic_funcs->set_dma_mask_from_fw(hdev);
-
/* Outbound Region 0 - Point to Host */
outbound_region.addr = HOST_PHYS_BASE;
outbound_region.size = HOST_PHYS_SIZE;
@@ -2479,9 +2482,6 @@ static void goya_halt_engines(struct hl_device *hdev, bool hard_reset, bool fw_r
{
u32 wait_timeout_ms;
- dev_info(hdev->dev,
- "Halting compute engines and disabling interrupts\n");
-
if (hdev->pldm)
wait_timeout_ms = GOYA_PLDM_RESET_WAIT_MSEC;
else
@@ -2825,12 +2825,12 @@ static void goya_hw_fini(struct hl_device *hdev, bool hard_reset, bool fw_reset)
goya_set_pll_refclk(hdev);
WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG, RESET_ALL);
- dev_info(hdev->dev,
+ dev_dbg(hdev->dev,
"Issued HARD reset command, going to wait %dms\n",
reset_timeout_ms);
} else {
WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG, DMA_MME_TPC_RESET);
- dev_info(hdev->dev,
+ dev_dbg(hdev->dev,
"Issued SOFT reset command, going to wait %dms\n",
reset_timeout_ms);
}
@@ -3311,35 +3311,6 @@ void goya_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size,
hl_fw_cpu_accessible_dma_pool_free(hdev, size, vaddr);
}
-static int goya_dma_map_sg(struct hl_device *hdev, struct scatterlist *sgl,
- int nents, enum dma_data_direction dir)
-{
- struct scatterlist *sg;
- int i;
-
- if (!dma_map_sg(&hdev->pdev->dev, sgl, nents, dir))
- return -ENOMEM;
-
- /* Shift to the device's base physical address of host memory */
- for_each_sg(sgl, sg, nents, i)
- sg->dma_address += HOST_PHYS_BASE;
-
- return 0;
-}
-
-static void goya_dma_unmap_sg(struct hl_device *hdev, struct scatterlist *sgl,
- int nents, enum dma_data_direction dir)
-{
- struct scatterlist *sg;
- int i;
-
- /* Cancel the device's base physical address of host memory */
- for_each_sg(sgl, sg, nents, i)
- sg->dma_address -= HOST_PHYS_BASE;
-
- dma_unmap_sg(&hdev->pdev->dev, sgl, nents, dir);
-}
-
u32 goya_get_dma_desc_list_size(struct hl_device *hdev, struct sg_table *sgt)
{
struct scatterlist *sg, *sg_next_iter;
@@ -3349,8 +3320,7 @@ u32 goya_get_dma_desc_list_size(struct hl_device *hdev, struct sg_table *sgt)
dma_desc_cnt = 0;
- for_each_sg(sgt->sgl, sg, sgt->nents, count) {
-
+ for_each_sgtable_dma_sg(sgt, sg, count) {
len = sg_dma_len(sg);
addr = sg_dma_address(sg);
@@ -3404,8 +3374,7 @@ static int goya_pin_memory_before_cs(struct hl_device *hdev,
list_add_tail(&userptr->job_node, parser->job_userptr_list);
- rc = hdev->asic_funcs->asic_dma_map_sg(hdev, userptr->sgt->sgl,
- userptr->sgt->nents, dir);
+ rc = hdev->asic_funcs->asic_dma_map_sgtable(hdev, userptr->sgt, dir);
if (rc) {
dev_err(hdev->dev, "failed to map sgt with DMA region\n");
goto unpin_memory;
@@ -3869,7 +3838,7 @@ static int goya_patch_dma_packet(struct hl_device *hdev,
sgt = userptr->sgt;
dma_desc_cnt = 0;
- for_each_sg(sgt->sgl, sg, sgt->nents, count) {
+ for_each_sgtable_dma_sg(sgt, sg, count) {
len = sg_dma_len(sg);
dma_addr = sg_dma_address(sg);
@@ -4032,7 +4001,7 @@ static int goya_patch_cb(struct hl_device *hdev,
static int goya_parse_cb_mmu(struct hl_device *hdev,
struct hl_cs_parser *parser)
{
- u64 patched_cb_handle;
+ u64 handle;
u32 patched_cb_size;
struct hl_cb *user_cb;
int rc;
@@ -4045,9 +4014,9 @@ static int goya_parse_cb_mmu(struct hl_device *hdev,
parser->patched_cb_size = parser->user_cb_size +
sizeof(struct packet_msg_prot) * 2;
- rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, hdev->kernel_ctx,
+ rc = hl_cb_create(hdev, &hdev->kernel_mem_mgr, hdev->kernel_ctx,
parser->patched_cb_size, false, false,
- &patched_cb_handle);
+ &handle);
if (rc) {
dev_err(hdev->dev,
@@ -4056,13 +4025,10 @@ static int goya_parse_cb_mmu(struct hl_device *hdev,
return rc;
}
- patched_cb_handle >>= PAGE_SHIFT;
- parser->patched_cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr,
- (u32) patched_cb_handle);
+ parser->patched_cb = hl_cb_get(&hdev->kernel_mem_mgr, handle);
/* hl_cb_get should never fail here */
if (!parser->patched_cb) {
- dev_crit(hdev->dev, "DMA CB handle invalid 0x%x\n",
- (u32) patched_cb_handle);
+ dev_crit(hdev->dev, "DMA CB handle invalid 0x%llx\n", handle);
rc = -EFAULT;
goto out;
}
@@ -4102,8 +4068,7 @@ out:
* cb_put will release it, but here we want to remove it from the
* idr
*/
- hl_cb_destroy(hdev, &hdev->kernel_cb_mgr,
- patched_cb_handle << PAGE_SHIFT);
+ hl_cb_destroy(&hdev->kernel_mem_mgr, handle);
return rc;
}
@@ -4111,7 +4076,7 @@ out:
static int goya_parse_cb_no_mmu(struct hl_device *hdev,
struct hl_cs_parser *parser)
{
- u64 patched_cb_handle;
+ u64 handle;
int rc;
rc = goya_validate_cb(hdev, parser, false);
@@ -4119,22 +4084,19 @@ static int goya_parse_cb_no_mmu(struct hl_device *hdev,
if (rc)
goto free_userptr;
- rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, hdev->kernel_ctx,
+ rc = hl_cb_create(hdev, &hdev->kernel_mem_mgr, hdev->kernel_ctx,
parser->patched_cb_size, false, false,
- &patched_cb_handle);
+ &handle);
if (rc) {
dev_err(hdev->dev,
"Failed to allocate patched CB for DMA CS %d\n", rc);
goto free_userptr;
}
- patched_cb_handle >>= PAGE_SHIFT;
- parser->patched_cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr,
- (u32) patched_cb_handle);
+ parser->patched_cb = hl_cb_get(&hdev->kernel_mem_mgr, handle);
/* hl_cb_get should never fail here */
if (!parser->patched_cb) {
- dev_crit(hdev->dev, "DMA CB handle invalid 0x%x\n",
- (u32) patched_cb_handle);
+ dev_crit(hdev->dev, "DMA CB handle invalid 0x%llx\n", handle);
rc = -EFAULT;
goto out;
}
@@ -4151,8 +4113,7 @@ out:
* cb_put will release it, but here we want to remove it from the
* idr
*/
- hl_cb_destroy(hdev, &hdev->kernel_cb_mgr,
- patched_cb_handle << PAGE_SHIFT);
+ hl_cb_destroy(&hdev->kernel_mem_mgr, handle);
free_userptr:
if (rc)
@@ -4259,224 +4220,7 @@ static void goya_clear_sm_regs(struct hl_device *hdev)
i = RREG32(mmSYNC_MNGR_SOB_OBJ_0);
}
-/*
- * goya_debugfs_read32 - read a 32bit value from a given device or a host mapped
- * address.
- *
- * @hdev: pointer to hl_device structure
- * @addr: device or host mapped address
- * @val: returned value
- *
- * In case of DDR address that is not mapped into the default aperture that
- * the DDR bar exposes, the function will configure the iATU so that the DDR
- * bar will be positioned at a base address that allows reading from the
- * required address. Configuring the iATU during normal operation can
- * lead to undefined behavior and therefore, should be done with extreme care
- *
- */
-static int goya_debugfs_read32(struct hl_device *hdev, u64 addr,
- bool user_address, u32 *val)
-{
- struct asic_fixed_properties *prop = &hdev->asic_prop;
- u64 ddr_bar_addr, host_phys_end;
- int rc = 0;
-
- host_phys_end = HOST_PHYS_BASE + HOST_PHYS_SIZE;
-
- if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) {
- *val = RREG32(addr - CFG_BASE);
-
- } else if ((addr >= SRAM_BASE_ADDR) &&
- (addr < SRAM_BASE_ADDR + SRAM_SIZE)) {
-
- *val = readl(hdev->pcie_bar[SRAM_CFG_BAR_ID] +
- (addr - SRAM_BASE_ADDR));
-
- } else if (addr < DRAM_PHYS_BASE + hdev->asic_prop.dram_size) {
-
- u64 bar_base_addr = DRAM_PHYS_BASE +
- (addr & ~(prop->dram_pci_bar_size - 0x1ull));
-
- ddr_bar_addr = goya_set_ddr_bar_base(hdev, bar_base_addr);
- if (ddr_bar_addr != U64_MAX) {
- *val = readl(hdev->pcie_bar[DDR_BAR_ID] +
- (addr - bar_base_addr));
-
- ddr_bar_addr = goya_set_ddr_bar_base(hdev,
- ddr_bar_addr);
- }
- if (ddr_bar_addr == U64_MAX)
- rc = -EIO;
-
- } else if (addr >= HOST_PHYS_BASE && addr < host_phys_end &&
- user_address && !iommu_present(&pci_bus_type)) {
- *val = *(u32 *) phys_to_virt(addr - HOST_PHYS_BASE);
-
- } else {
- rc = -EFAULT;
- }
-
- return rc;
-}
-
-/*
- * goya_debugfs_write32 - write a 32bit value to a given device or a host mapped
- * address.
- *
- * @hdev: pointer to hl_device structure
- * @addr: device or host mapped address
- * @val: returned value
- *
- * In case of DDR address that is not mapped into the default aperture that
- * the DDR bar exposes, the function will configure the iATU so that the DDR
- * bar will be positioned at a base address that allows writing to the
- * required address. Configuring the iATU during normal operation can
- * lead to undefined behavior and therefore, should be done with extreme care
- *
- */
-static int goya_debugfs_write32(struct hl_device *hdev, u64 addr,
- bool user_address, u32 val)
-{
- struct asic_fixed_properties *prop = &hdev->asic_prop;
- u64 ddr_bar_addr, host_phys_end;
- int rc = 0;
-
- host_phys_end = HOST_PHYS_BASE + HOST_PHYS_SIZE;
-
- if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) {
- WREG32(addr - CFG_BASE, val);
-
- } else if ((addr >= SRAM_BASE_ADDR) &&
- (addr < SRAM_BASE_ADDR + SRAM_SIZE)) {
-
- writel(val, hdev->pcie_bar[SRAM_CFG_BAR_ID] +
- (addr - SRAM_BASE_ADDR));
-
- } else if (addr < DRAM_PHYS_BASE + hdev->asic_prop.dram_size) {
-
- u64 bar_base_addr = DRAM_PHYS_BASE +
- (addr & ~(prop->dram_pci_bar_size - 0x1ull));
-
- ddr_bar_addr = goya_set_ddr_bar_base(hdev, bar_base_addr);
- if (ddr_bar_addr != U64_MAX) {
- writel(val, hdev->pcie_bar[DDR_BAR_ID] +
- (addr - bar_base_addr));
-
- ddr_bar_addr = goya_set_ddr_bar_base(hdev,
- ddr_bar_addr);
- }
- if (ddr_bar_addr == U64_MAX)
- rc = -EIO;
-
- } else if (addr >= HOST_PHYS_BASE && addr < host_phys_end &&
- user_address && !iommu_present(&pci_bus_type)) {
- *(u32 *) phys_to_virt(addr - HOST_PHYS_BASE) = val;
-
- } else {
- rc = -EFAULT;
- }
-
- return rc;
-}
-
-static int goya_debugfs_read64(struct hl_device *hdev, u64 addr,
- bool user_address, u64 *val)
-{
- struct asic_fixed_properties *prop = &hdev->asic_prop;
- u64 ddr_bar_addr, host_phys_end;
- int rc = 0;
-
- host_phys_end = HOST_PHYS_BASE + HOST_PHYS_SIZE;
-
- if ((addr >= CFG_BASE) && (addr <= CFG_BASE + CFG_SIZE - sizeof(u64))) {
- u32 val_l = RREG32(addr - CFG_BASE);
- u32 val_h = RREG32(addr + sizeof(u32) - CFG_BASE);
-
- *val = (((u64) val_h) << 32) | val_l;
-
- } else if ((addr >= SRAM_BASE_ADDR) &&
- (addr <= SRAM_BASE_ADDR + SRAM_SIZE - sizeof(u64))) {
-
- *val = readq(hdev->pcie_bar[SRAM_CFG_BAR_ID] +
- (addr - SRAM_BASE_ADDR));
-
- } else if (addr <=
- DRAM_PHYS_BASE + hdev->asic_prop.dram_size - sizeof(u64)) {
-
- u64 bar_base_addr = DRAM_PHYS_BASE +
- (addr & ~(prop->dram_pci_bar_size - 0x1ull));
-
- ddr_bar_addr = goya_set_ddr_bar_base(hdev, bar_base_addr);
- if (ddr_bar_addr != U64_MAX) {
- *val = readq(hdev->pcie_bar[DDR_BAR_ID] +
- (addr - bar_base_addr));
-
- ddr_bar_addr = goya_set_ddr_bar_base(hdev,
- ddr_bar_addr);
- }
- if (ddr_bar_addr == U64_MAX)
- rc = -EIO;
-
- } else if (addr >= HOST_PHYS_BASE && addr < host_phys_end &&
- user_address && !iommu_present(&pci_bus_type)) {
- *val = *(u64 *) phys_to_virt(addr - HOST_PHYS_BASE);
-
- } else {
- rc = -EFAULT;
- }
-
- return rc;
-}
-
-static int goya_debugfs_write64(struct hl_device *hdev, u64 addr,
- bool user_address, u64 val)
-{
- struct asic_fixed_properties *prop = &hdev->asic_prop;
- u64 ddr_bar_addr, host_phys_end;
- int rc = 0;
-
- host_phys_end = HOST_PHYS_BASE + HOST_PHYS_SIZE;
-
- if ((addr >= CFG_BASE) && (addr <= CFG_BASE + CFG_SIZE - sizeof(u64))) {
- WREG32(addr - CFG_BASE, lower_32_bits(val));
- WREG32(addr + sizeof(u32) - CFG_BASE, upper_32_bits(val));
-
- } else if ((addr >= SRAM_BASE_ADDR) &&
- (addr <= SRAM_BASE_ADDR + SRAM_SIZE - sizeof(u64))) {
-
- writeq(val, hdev->pcie_bar[SRAM_CFG_BAR_ID] +
- (addr - SRAM_BASE_ADDR));
-
- } else if (addr <=
- DRAM_PHYS_BASE + hdev->asic_prop.dram_size - sizeof(u64)) {
-
- u64 bar_base_addr = DRAM_PHYS_BASE +
- (addr & ~(prop->dram_pci_bar_size - 0x1ull));
-
- ddr_bar_addr = goya_set_ddr_bar_base(hdev, bar_base_addr);
- if (ddr_bar_addr != U64_MAX) {
- writeq(val, hdev->pcie_bar[DDR_BAR_ID] +
- (addr - bar_base_addr));
-
- ddr_bar_addr = goya_set_ddr_bar_base(hdev,
- ddr_bar_addr);
- }
- if (ddr_bar_addr == U64_MAX)
- rc = -EIO;
-
- } else if (addr >= HOST_PHYS_BASE && addr < host_phys_end &&
- user_address && !iommu_present(&pci_bus_type)) {
- *(u64 *) phys_to_virt(addr - HOST_PHYS_BASE) = val;
-
- } else {
- rc = -EFAULT;
- }
-
- return rc;
-}
-
-static int goya_debugfs_read_dma(struct hl_device *hdev, u64 addr, u32 size,
- void *blob_addr)
+static int goya_debugfs_read_dma(struct hl_device *hdev, u64 addr, u32 size, void *blob_addr)
{
dev_err(hdev->dev, "Reading via DMA is unimplemented yet\n");
return -EPERM;
@@ -5101,7 +4845,7 @@ static int goya_memset_device_memory(struct hl_device *hdev, u64 addr, u64 size,
release_cb:
hl_cb_put(cb);
- hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT);
+ hl_cb_destroy(&hdev->kernel_mem_mgr, cb->buf->handle);
return rc;
}
@@ -5561,11 +5305,6 @@ static void goya_reset_sob_group(struct hl_device *hdev, u16 sob_group)
}
-static void goya_set_dma_mask_from_fw(struct hl_device *hdev)
-{
- hdev->dma_mask = 48;
-}
-
u64 goya_get_device_time(struct hl_device *hdev)
{
u64 device_time = ((u64) RREG32(mmPSOC_TIMESTAMP_CNTCVU)) << 32;
@@ -5678,6 +5417,22 @@ static u32 *goya_get_stream_master_qid_arr(void)
return NULL;
}
+static void goya_get_valid_dram_page_orders(struct hl_info_dev_memalloc_page_sizes *info)
+{
+ /* set 0 since multiple pages are not supported */
+ info->page_order_bitmask = 0;
+}
+
+static int goya_get_monitor_dump(struct hl_device *hdev, void *data)
+{
+ return -EOPNOTSUPP;
+}
+
+static int goya_scrub_device_dram(struct hl_device *hdev, u64 val)
+{
+ return -EOPNOTSUPP;
+}
+
static const struct hl_asic_funcs goya_funcs = {
.early_init = goya_early_init,
.early_fini = goya_early_fini,
@@ -5696,24 +5451,21 @@ static const struct hl_asic_funcs goya_funcs = {
.asic_dma_alloc_coherent = goya_dma_alloc_coherent,
.asic_dma_free_coherent = goya_dma_free_coherent,
.scrub_device_mem = goya_scrub_device_mem,
+ .scrub_device_dram = goya_scrub_device_dram,
.get_int_queue_base = goya_get_int_queue_base,
.test_queues = goya_test_queues,
.asic_dma_pool_zalloc = goya_dma_pool_zalloc,
.asic_dma_pool_free = goya_dma_pool_free,
.cpu_accessible_dma_pool_alloc = goya_cpu_accessible_dma_pool_alloc,
.cpu_accessible_dma_pool_free = goya_cpu_accessible_dma_pool_free,
- .hl_dma_unmap_sg = goya_dma_unmap_sg,
+ .hl_dma_unmap_sgtable = hl_dma_unmap_sgtable,
.cs_parser = goya_cs_parser,
- .asic_dma_map_sg = goya_dma_map_sg,
+ .asic_dma_map_sgtable = hl_dma_map_sgtable,
.get_dma_desc_list_size = goya_get_dma_desc_list_size,
.add_end_of_cb_packets = goya_add_end_of_cb_packets,
.update_eq_ci = goya_update_eq_ci,
.context_switch = goya_context_switch,
.restore_phase_topology = goya_restore_phase_topology,
- .debugfs_read32 = goya_debugfs_read32,
- .debugfs_write32 = goya_debugfs_write32,
- .debugfs_read64 = goya_debugfs_read64,
- .debugfs_write64 = goya_debugfs_write64,
.debugfs_read_dma = goya_debugfs_read_dma,
.add_device_attr = goya_add_device_attr,
.handle_eqe = goya_handle_eqe,
@@ -5722,6 +5474,7 @@ static const struct hl_asic_funcs goya_funcs = {
.write_pte = goya_write_pte,
.mmu_invalidate_cache = goya_mmu_invalidate_cache,
.mmu_invalidate_cache_range = goya_mmu_invalidate_cache_range,
+ .mmu_prefetch_cache_range = NULL,
.send_heartbeat = goya_send_heartbeat,
.debug_coresight = goya_debug_coresight,
.is_device_idle = goya_is_device_idle,
@@ -5730,6 +5483,7 @@ static const struct hl_asic_funcs goya_funcs = {
.hw_queues_unlock = goya_hw_queues_unlock,
.get_pci_id = goya_get_pci_id,
.get_eeprom_data = goya_get_eeprom_data,
+ .get_monitor_dump = goya_get_monitor_dump,
.send_cpu_message = goya_send_cpu_message,
.pci_bars_map = goya_pci_bars_map,
.init_iatu = goya_init_iatu,
@@ -5747,7 +5501,6 @@ static const struct hl_asic_funcs goya_funcs = {
.gen_wait_cb = goya_gen_wait_cb,
.reset_sob = goya_reset_sob,
.reset_sob_group = goya_reset_sob_group,
- .set_dma_mask_from_fw = goya_set_dma_mask_from_fw,
.get_device_time = goya_get_device_time,
.collective_wait_init_cs = goya_collective_wait_init_cs,
.collective_wait_create_jobs = goya_collective_wait_create_jobs,
@@ -5764,7 +5517,11 @@ static const struct hl_asic_funcs goya_funcs = {
.get_sob_addr = &goya_get_sob_addr,
.set_pci_memory_regions = goya_set_pci_memory_regions,
.get_stream_master_qid_arr = goya_get_stream_master_qid_arr,
- .is_valid_dram_page_size = NULL
+ .is_valid_dram_page_size = NULL,
+ .mmu_get_real_page_size = hl_mmu_get_real_page_size,
+ .get_valid_dram_page_orders = goya_get_valid_dram_page_orders,
+ .access_dev_mem = hl_access_dev_mem,
+ .set_dram_bar_base = goya_set_ddr_bar_base,
};
/*
diff --git a/drivers/misc/habanalabs/include/common/cpucp_if.h b/drivers/misc/habanalabs/include/common/cpucp_if.h
index 65668dac6a5f..38e44b6cf581 100644
--- a/drivers/misc/habanalabs/include/common/cpucp_if.h
+++ b/drivers/misc/habanalabs/include/common/cpucp_if.h
@@ -389,6 +389,14 @@ enum pq_init_status {
*
* CPUCP_PACKET_ENGINE_CORE_ASID_SET -
* Packet to perform engine core ASID configuration
+ *
+ * CPUCP_PACKET_MONITOR_DUMP_GET -
+ * Get monitors registers dump from the CpuCP kernel.
+ * The CPU will put the registers dump in the a buffer allocated by the driver
+ * which address is passed via the CpuCp packet. In addition, the host's driver
+ * passes the max size it allows the CpuCP to write to the structure, to prevent
+ * data corruption in case of mismatched driver/FW versions.
+ * Relevant only to Gaudi.
*/
enum cpucp_packet_id {
@@ -439,6 +447,11 @@ enum cpucp_packet_id {
CPUCP_PACKET_POWER_SET, /* internal */
CPUCP_PACKET_RESERVED, /* not used */
CPUCP_PACKET_ENGINE_CORE_ASID_SET, /* internal */
+ CPUCP_PACKET_RESERVED2, /* not used */
+ CPUCP_PACKET_RESERVED3, /* not used */
+ CPUCP_PACKET_RESERVED4, /* not used */
+ CPUCP_PACKET_RESERVED5, /* not used */
+ CPUCP_PACKET_MONITOR_DUMP_GET, /* debugfs */
};
#define CPUCP_PACKET_FENCE_VAL 0xFE8CE7A5
@@ -555,6 +568,12 @@ struct cpucp_array_data_packet {
__le32 data[];
};
+enum cpucp_led_index {
+ CPUCP_LED0_INDEX = 0,
+ CPUCP_LED1_INDEX,
+ CPUCP_LED2_INDEX
+};
+
enum cpucp_packet_rc {
cpucp_packet_success,
cpucp_packet_invalid,
@@ -576,7 +595,10 @@ enum cpucp_temp_type {
cpucp_temp_offset = 19,
cpucp_temp_lowest = 21,
cpucp_temp_highest = 22,
- cpucp_temp_reset_history = 23
+ cpucp_temp_reset_history = 23,
+ cpucp_temp_warn = 24,
+ cpucp_temp_max_crit = 25,
+ cpucp_temp_max_warn = 26,
};
enum cpucp_in_attributes {
@@ -686,6 +708,7 @@ enum pll_index {
enum rl_index {
TPC_RL = 0,
MME_RL,
+ EDMA_RL,
};
enum pvt_index {
@@ -820,6 +843,7 @@ enum cpucp_serdes_type {
TYPE_2_SERDES_TYPE,
HLS1_SERDES_TYPE,
HLS1H_SERDES_TYPE,
+ HLS2_SERDES_TYPE,
UNKNOWN_SERDES_TYPE,
MAX_NUM_SERDES_TYPE = UNKNOWN_SERDES_TYPE
};
@@ -833,9 +857,28 @@ struct cpucp_nic_info {
__u8 qsfp_eeprom[CPUCP_NIC_QSFP_EEPROM_MAX_LEN];
__le64 auto_neg_mask[CPUCP_NIC_MASK_ARR_LEN];
__le16 serdes_type; /* enum cpucp_serdes_type */
+ __le16 tx_swap_map[CPUCP_MAX_NICS];
__u8 reserved[6];
};
+#define PAGE_DISCARD_MAX 64
+
+struct page_discard_info {
+ __u8 num_entries;
+ __u8 reserved[7];
+ __le32 mmu_page_idx[PAGE_DISCARD_MAX];
+};
+
+/*
+ * struct ser_val - the SER (symbol error rate) value is represented by "integer * 10 ^ -exp".
+ * @integer: the integer part of the SER value;
+ * @exp: the exponent part of the SER value.
+ */
+struct ser_val {
+ __le16 integer;
+ __le16 exp;
+};
+
/*
* struct cpucp_nic_status - describes the status of a NIC port.
* @port: NIC port index.
@@ -889,4 +932,29 @@ struct cpucp_hbm_row_replaced_rows_info {
struct cpucp_hbm_row_info replaced_rows[CPUCP_HBM_ROW_REPLACE_MAX];
};
+/*
+ * struct dcore_monitor_regs_data - DCORE monitor regs data.
+ * the structure follows sync manager block layout. relevant only to Gaudi.
+ * @mon_pay_addrl: array of payload address low bits.
+ * @mon_pay_addrh: array of payload address high bits.
+ * @mon_pay_data: array of payload data.
+ * @mon_arm: array of monitor arm.
+ * @mon_status: array of monitor status.
+ */
+struct dcore_monitor_regs_data {
+ __le32 mon_pay_addrl[512];
+ __le32 mon_pay_addrh[512];
+ __le32 mon_pay_data[512];
+ __le32 mon_arm[512];
+ __le32 mon_status[512];
+};
+
+/* contains SM data for each SYNC_MNGR (relevant only to Gaudi) */
+struct cpucp_monitor_dump {
+ struct dcore_monitor_regs_data sync_mngr_w_s;
+ struct dcore_monitor_regs_data sync_mngr_e_s;
+ struct dcore_monitor_regs_data sync_mngr_w_n;
+ struct dcore_monitor_regs_data sync_mngr_e_n;
+};
+
#endif /* CPUCP_IF_H */
diff --git a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h
index 758f246627f8..cae8ac8bc5b1 100644
--- a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h
+++ b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h
@@ -34,4 +34,14 @@
#define MMU_CONFIG_TIMEOUT_USEC 2000 /* 2 ms */
+enum mmu_hop_num {
+ MMU_HOP0,
+ MMU_HOP1,
+ MMU_HOP2,
+ MMU_HOP3,
+ MMU_HOP4,
+ MMU_HOP5,
+ MMU_HOP_MAX,
+};
+
#endif /* INCLUDE_MMU_GENERAL_H_ */
diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c
index f21854ac5cc2..009239ad1d8a 100644
--- a/drivers/misc/lkdtm/bugs.c
+++ b/drivers/misc/lkdtm/bugs.c
@@ -68,40 +68,40 @@ void __init lkdtm_bugs_init(int *recur_param)
recur_count = *recur_param;
}
-void lkdtm_PANIC(void)
+static void lkdtm_PANIC(void)
{
panic("dumptest");
}
-void lkdtm_BUG(void)
+static void lkdtm_BUG(void)
{
BUG();
}
static int warn_counter;
-void lkdtm_WARNING(void)
+static void lkdtm_WARNING(void)
{
WARN_ON(++warn_counter);
}
-void lkdtm_WARNING_MESSAGE(void)
+static void lkdtm_WARNING_MESSAGE(void)
{
WARN(1, "Warning message trigger count: %d\n", ++warn_counter);
}
-void lkdtm_EXCEPTION(void)
+static void lkdtm_EXCEPTION(void)
{
*((volatile int *) 0) = 0;
}
-void lkdtm_LOOP(void)
+static void lkdtm_LOOP(void)
{
for (;;)
;
}
-void lkdtm_EXHAUST_STACK(void)
+static void lkdtm_EXHAUST_STACK(void)
{
pr_info("Calling function with %lu frame size to depth %d ...\n",
REC_STACK_SIZE, recur_count);
@@ -115,7 +115,7 @@ static noinline void __lkdtm_CORRUPT_STACK(void *stack)
}
/* This should trip the stack canary, not corrupt the return address. */
-noinline void lkdtm_CORRUPT_STACK(void)
+static noinline void lkdtm_CORRUPT_STACK(void)
{
/* Use default char array length that triggers stack protection. */
char data[8] __aligned(sizeof(void *));
@@ -125,7 +125,7 @@ noinline void lkdtm_CORRUPT_STACK(void)
}
/* Same as above but will only get a canary with -fstack-protector-strong */
-noinline void lkdtm_CORRUPT_STACK_STRONG(void)
+static noinline void lkdtm_CORRUPT_STACK_STRONG(void)
{
union {
unsigned short shorts[4];
@@ -139,7 +139,7 @@ noinline void lkdtm_CORRUPT_STACK_STRONG(void)
static pid_t stack_pid;
static unsigned long stack_addr;
-void lkdtm_REPORT_STACK(void)
+static void lkdtm_REPORT_STACK(void)
{
volatile uintptr_t magic;
pid_t pid = task_pid_nr(current);
@@ -222,7 +222,7 @@ static noinline void __lkdtm_REPORT_STACK_CANARY(void *stack)
}
}
-void lkdtm_REPORT_STACK_CANARY(void)
+static void lkdtm_REPORT_STACK_CANARY(void)
{
/* Use default char array length that triggers stack protection. */
char data[8] __aligned(sizeof(void *)) = { };
@@ -230,7 +230,7 @@ void lkdtm_REPORT_STACK_CANARY(void)
__lkdtm_REPORT_STACK_CANARY((void *)&data);
}
-void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void)
+static void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void)
{
static u8 data[5] __attribute__((aligned(4))) = {1, 2, 3, 4, 5};
u32 *p;
@@ -245,21 +245,21 @@ void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void)
pr_err("XFAIL: arch has CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS\n");
}
-void lkdtm_SOFTLOCKUP(void)
+static void lkdtm_SOFTLOCKUP(void)
{
preempt_disable();
for (;;)
cpu_relax();
}
-void lkdtm_HARDLOCKUP(void)
+static void lkdtm_HARDLOCKUP(void)
{
local_irq_disable();
for (;;)
cpu_relax();
}
-void lkdtm_SPINLOCKUP(void)
+static void lkdtm_SPINLOCKUP(void)
{
/* Must be called twice to trigger. */
spin_lock(&lock_me_up);
@@ -267,7 +267,7 @@ void lkdtm_SPINLOCKUP(void)
__release(&lock_me_up);
}
-void lkdtm_HUNG_TASK(void)
+static void lkdtm_HUNG_TASK(void)
{
set_current_state(TASK_UNINTERRUPTIBLE);
schedule();
@@ -276,7 +276,7 @@ void lkdtm_HUNG_TASK(void)
volatile unsigned int huge = INT_MAX - 2;
volatile unsigned int ignored;
-void lkdtm_OVERFLOW_SIGNED(void)
+static void lkdtm_OVERFLOW_SIGNED(void)
{
int value;
@@ -291,7 +291,7 @@ void lkdtm_OVERFLOW_SIGNED(void)
}
-void lkdtm_OVERFLOW_UNSIGNED(void)
+static void lkdtm_OVERFLOW_UNSIGNED(void)
{
unsigned int value;
@@ -319,7 +319,7 @@ struct array_bounds {
int three;
};
-void lkdtm_ARRAY_BOUNDS(void)
+static void lkdtm_ARRAY_BOUNDS(void)
{
struct array_bounds_flex_array *not_checked;
struct array_bounds *checked;
@@ -327,6 +327,11 @@ void lkdtm_ARRAY_BOUNDS(void)
not_checked = kmalloc(sizeof(*not_checked) * 2, GFP_KERNEL);
checked = kmalloc(sizeof(*checked) * 2, GFP_KERNEL);
+ if (!not_checked || !checked) {
+ kfree(not_checked);
+ kfree(checked);
+ return;
+ }
pr_info("Array access within bounds ...\n");
/* For both, touch all bytes in the actual member size. */
@@ -346,10 +351,13 @@ void lkdtm_ARRAY_BOUNDS(void)
kfree(not_checked);
kfree(checked);
pr_err("FAIL: survived array bounds overflow!\n");
- pr_expected_config(CONFIG_UBSAN_BOUNDS);
+ if (IS_ENABLED(CONFIG_UBSAN_BOUNDS))
+ pr_expected_config(CONFIG_UBSAN_TRAP);
+ else
+ pr_expected_config(CONFIG_UBSAN_BOUNDS);
}
-void lkdtm_CORRUPT_LIST_ADD(void)
+static void lkdtm_CORRUPT_LIST_ADD(void)
{
/*
* Initially, an empty list via LIST_HEAD:
@@ -389,7 +397,7 @@ void lkdtm_CORRUPT_LIST_ADD(void)
}
}
-void lkdtm_CORRUPT_LIST_DEL(void)
+static void lkdtm_CORRUPT_LIST_DEL(void)
{
LIST_HEAD(test_head);
struct lkdtm_list item;
@@ -417,7 +425,7 @@ void lkdtm_CORRUPT_LIST_DEL(void)
}
/* Test that VMAP_STACK is actually allocating with a leading guard page */
-void lkdtm_STACK_GUARD_PAGE_LEADING(void)
+static void lkdtm_STACK_GUARD_PAGE_LEADING(void)
{
const unsigned char *stack = task_stack_page(current);
const unsigned char *ptr = stack - 1;
@@ -431,7 +439,7 @@ void lkdtm_STACK_GUARD_PAGE_LEADING(void)
}
/* Test that VMAP_STACK is actually allocating with a trailing guard page */
-void lkdtm_STACK_GUARD_PAGE_TRAILING(void)
+static void lkdtm_STACK_GUARD_PAGE_TRAILING(void)
{
const unsigned char *stack = task_stack_page(current);
const unsigned char *ptr = stack + THREAD_SIZE;
@@ -444,7 +452,7 @@ void lkdtm_STACK_GUARD_PAGE_TRAILING(void)
pr_err("FAIL: accessed page after stack! (byte: %x)\n", byte);
}
-void lkdtm_UNSET_SMEP(void)
+static void lkdtm_UNSET_SMEP(void)
{
#if IS_ENABLED(CONFIG_X86_64) && !IS_ENABLED(CONFIG_UML)
#define MOV_CR4_DEPTH 64
@@ -510,7 +518,7 @@ void lkdtm_UNSET_SMEP(void)
#endif
}
-void lkdtm_DOUBLE_FAULT(void)
+static void lkdtm_DOUBLE_FAULT(void)
{
#if IS_ENABLED(CONFIG_X86_32) && !IS_ENABLED(CONFIG_UML)
/*
@@ -558,7 +566,7 @@ static noinline void change_pac_parameters(void)
}
#endif
-noinline void lkdtm_CORRUPT_PAC(void)
+static noinline void lkdtm_CORRUPT_PAC(void)
{
#ifdef CONFIG_ARM64
#define CORRUPT_PAC_ITERATE 10
@@ -586,3 +594,37 @@ noinline void lkdtm_CORRUPT_PAC(void)
pr_err("XFAIL: this test is arm64-only\n");
#endif
}
+
+static struct crashtype crashtypes[] = {
+ CRASHTYPE(PANIC),
+ CRASHTYPE(BUG),
+ CRASHTYPE(WARNING),
+ CRASHTYPE(WARNING_MESSAGE),
+ CRASHTYPE(EXCEPTION),
+ CRASHTYPE(LOOP),
+ CRASHTYPE(EXHAUST_STACK),
+ CRASHTYPE(CORRUPT_STACK),
+ CRASHTYPE(CORRUPT_STACK_STRONG),
+ CRASHTYPE(REPORT_STACK),
+ CRASHTYPE(REPORT_STACK_CANARY),
+ CRASHTYPE(UNALIGNED_LOAD_STORE_WRITE),
+ CRASHTYPE(SOFTLOCKUP),
+ CRASHTYPE(HARDLOCKUP),
+ CRASHTYPE(SPINLOCKUP),
+ CRASHTYPE(HUNG_TASK),
+ CRASHTYPE(OVERFLOW_SIGNED),
+ CRASHTYPE(OVERFLOW_UNSIGNED),
+ CRASHTYPE(ARRAY_BOUNDS),
+ CRASHTYPE(CORRUPT_LIST_ADD),
+ CRASHTYPE(CORRUPT_LIST_DEL),
+ CRASHTYPE(STACK_GUARD_PAGE_LEADING),
+ CRASHTYPE(STACK_GUARD_PAGE_TRAILING),
+ CRASHTYPE(UNSET_SMEP),
+ CRASHTYPE(DOUBLE_FAULT),
+ CRASHTYPE(CORRUPT_PAC),
+};
+
+struct crashtype_category bugs_crashtypes = {
+ .crashtypes = crashtypes,
+ .len = ARRAY_SIZE(crashtypes),
+};
diff --git a/drivers/misc/lkdtm/cfi.c b/drivers/misc/lkdtm/cfi.c
index c9aeddef1044..666a7f4bc137 100644
--- a/drivers/misc/lkdtm/cfi.c
+++ b/drivers/misc/lkdtm/cfi.c
@@ -3,6 +3,7 @@
* This is for all the tests relating directly to Control Flow Integrity.
*/
#include "lkdtm.h"
+#include <asm/page.h>
static int called_count;
@@ -22,7 +23,7 @@ static noinline int lkdtm_increment_int(int *counter)
/*
* This tries to call an indirect function with a mismatched prototype.
*/
-void lkdtm_CFI_FORWARD_PROTO(void)
+static void lkdtm_CFI_FORWARD_PROTO(void)
{
/*
* Matches lkdtm_increment_void()'s prototype, but not
@@ -41,3 +42,145 @@ void lkdtm_CFI_FORWARD_PROTO(void)
pr_err("FAIL: survived mismatched prototype function call!\n");
pr_expected_config(CONFIG_CFI_CLANG);
}
+
+/*
+ * This can stay local to LKDTM, as there should not be a production reason
+ * to disable PAC && SCS.
+ */
+#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL
+# ifdef CONFIG_ARM64_BTI_KERNEL
+# define __no_pac "branch-protection=bti"
+# else
+# define __no_pac "branch-protection=none"
+# endif
+# define __no_ret_protection __noscs __attribute__((__target__(__no_pac)))
+#else
+# define __no_ret_protection __noscs
+#endif
+
+#define no_pac_addr(addr) \
+ ((__force __typeof__(addr))((uintptr_t)(addr) | PAGE_OFFSET))
+
+/* The ultimate ROP gadget. */
+static noinline __no_ret_protection
+void set_return_addr_unchecked(unsigned long *expected, unsigned long *addr)
+{
+ /* Use of volatile is to make sure final write isn't seen as a dead store. */
+ unsigned long * volatile *ret_addr = (unsigned long **)__builtin_frame_address(0) + 1;
+
+ /* Make sure we've found the right place on the stack before writing it. */
+ if (no_pac_addr(*ret_addr) == expected)
+ *ret_addr = (addr);
+ else
+ /* Check architecture, stack layout, or compiler behavior... */
+ pr_warn("Eek: return address mismatch! %px != %px\n",
+ *ret_addr, addr);
+}
+
+static noinline
+void set_return_addr(unsigned long *expected, unsigned long *addr)
+{
+ /* Use of volatile is to make sure final write isn't seen as a dead store. */
+ unsigned long * volatile *ret_addr = (unsigned long **)__builtin_frame_address(0) + 1;
+
+ /* Make sure we've found the right place on the stack before writing it. */
+ if (no_pac_addr(*ret_addr) == expected)
+ *ret_addr = (addr);
+ else
+ /* Check architecture, stack layout, or compiler behavior... */
+ pr_warn("Eek: return address mismatch! %px != %px\n",
+ *ret_addr, addr);
+}
+
+static volatile int force_check;
+
+static void lkdtm_CFI_BACKWARD(void)
+{
+ /* Use calculated gotos to keep labels addressable. */
+ void *labels[] = {0, &&normal, &&redirected, &&check_normal, &&check_redirected};
+
+ pr_info("Attempting unchecked stack return address redirection ...\n");
+
+ /* Always false */
+ if (force_check) {
+ /*
+ * Prepare to call with NULLs to avoid parameters being treated as
+ * constants in -02.
+ */
+ set_return_addr_unchecked(NULL, NULL);
+ set_return_addr(NULL, NULL);
+ if (force_check)
+ goto *labels[1];
+ if (force_check)
+ goto *labels[2];
+ if (force_check)
+ goto *labels[3];
+ if (force_check)
+ goto *labels[4];
+ return;
+ }
+
+ /*
+ * Use fallthrough switch case to keep basic block ordering between
+ * set_return_addr*() and the label after it.
+ */
+ switch (force_check) {
+ case 0:
+ set_return_addr_unchecked(&&normal, &&redirected);
+ fallthrough;
+ case 1:
+normal:
+ /* Always true */
+ if (!force_check) {
+ pr_err("FAIL: stack return address manipulation failed!\n");
+ /* If we can't redirect "normally", we can't test mitigations. */
+ return;
+ }
+ break;
+ default:
+redirected:
+ pr_info("ok: redirected stack return address.\n");
+ break;
+ }
+
+ pr_info("Attempting checked stack return address redirection ...\n");
+
+ switch (force_check) {
+ case 0:
+ set_return_addr(&&check_normal, &&check_redirected);
+ fallthrough;
+ case 1:
+check_normal:
+ /* Always true */
+ if (!force_check) {
+ pr_info("ok: control flow unchanged.\n");
+ return;
+ }
+
+check_redirected:
+ pr_err("FAIL: stack return address was redirected!\n");
+ break;
+ }
+
+ if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL)) {
+ pr_expected_config(CONFIG_ARM64_PTR_AUTH_KERNEL);
+ return;
+ }
+ if (IS_ENABLED(CONFIG_SHADOW_CALL_STACK)) {
+ pr_expected_config(CONFIG_SHADOW_CALL_STACK);
+ return;
+ }
+ pr_warn("This is probably expected, since this %s was built *without* %s=y nor %s=y\n",
+ lkdtm_kernel_info,
+ "CONFIG_ARM64_PTR_AUTH_KERNEL", "CONFIG_SHADOW_CALL_STACK");
+}
+
+static struct crashtype crashtypes[] = {
+ CRASHTYPE(CFI_FORWARD_PROTO),
+ CRASHTYPE(CFI_BACKWARD),
+};
+
+struct crashtype_category cfi_crashtypes = {
+ .crashtypes = crashtypes,
+ .len = ARRAY_SIZE(crashtypes),
+};
diff --git a/drivers/misc/lkdtm/core.c b/drivers/misc/lkdtm/core.c
index e2228b6fc09b..b4712ff196b4 100644
--- a/drivers/misc/lkdtm/core.c
+++ b/drivers/misc/lkdtm/core.c
@@ -86,109 +86,21 @@ static struct crashpoint crashpoints[] = {
#endif
};
-
-/* Crash types. */
-struct crashtype {
- const char *name;
- void (*func)(void);
-};
-
-#define CRASHTYPE(_name) \
- { \
- .name = __stringify(_name), \
- .func = lkdtm_ ## _name, \
- }
-
-/* Define the possible types of crashes that can be triggered. */
-static const struct crashtype crashtypes[] = {
- CRASHTYPE(PANIC),
- CRASHTYPE(BUG),
- CRASHTYPE(WARNING),
- CRASHTYPE(WARNING_MESSAGE),
- CRASHTYPE(EXCEPTION),
- CRASHTYPE(LOOP),
- CRASHTYPE(EXHAUST_STACK),
- CRASHTYPE(CORRUPT_STACK),
- CRASHTYPE(CORRUPT_STACK_STRONG),
- CRASHTYPE(REPORT_STACK),
- CRASHTYPE(REPORT_STACK_CANARY),
- CRASHTYPE(CORRUPT_LIST_ADD),
- CRASHTYPE(CORRUPT_LIST_DEL),
- CRASHTYPE(STACK_GUARD_PAGE_LEADING),
- CRASHTYPE(STACK_GUARD_PAGE_TRAILING),
- CRASHTYPE(UNSET_SMEP),
- CRASHTYPE(CORRUPT_PAC),
- CRASHTYPE(UNALIGNED_LOAD_STORE_WRITE),
- CRASHTYPE(SLAB_LINEAR_OVERFLOW),
- CRASHTYPE(VMALLOC_LINEAR_OVERFLOW),
- CRASHTYPE(WRITE_AFTER_FREE),
- CRASHTYPE(READ_AFTER_FREE),
- CRASHTYPE(WRITE_BUDDY_AFTER_FREE),
- CRASHTYPE(READ_BUDDY_AFTER_FREE),
- CRASHTYPE(SLAB_INIT_ON_ALLOC),
- CRASHTYPE(BUDDY_INIT_ON_ALLOC),
- CRASHTYPE(SLAB_FREE_DOUBLE),
- CRASHTYPE(SLAB_FREE_CROSS),
- CRASHTYPE(SLAB_FREE_PAGE),
- CRASHTYPE(SOFTLOCKUP),
- CRASHTYPE(HARDLOCKUP),
- CRASHTYPE(SPINLOCKUP),
- CRASHTYPE(HUNG_TASK),
- CRASHTYPE(OVERFLOW_SIGNED),
- CRASHTYPE(OVERFLOW_UNSIGNED),
- CRASHTYPE(ARRAY_BOUNDS),
- CRASHTYPE(EXEC_DATA),
- CRASHTYPE(EXEC_STACK),
- CRASHTYPE(EXEC_KMALLOC),
- CRASHTYPE(EXEC_VMALLOC),
- CRASHTYPE(EXEC_RODATA),
- CRASHTYPE(EXEC_USERSPACE),
- CRASHTYPE(EXEC_NULL),
- CRASHTYPE(ACCESS_USERSPACE),
- CRASHTYPE(ACCESS_NULL),
- CRASHTYPE(WRITE_RO),
- CRASHTYPE(WRITE_RO_AFTER_INIT),
- CRASHTYPE(WRITE_KERN),
- CRASHTYPE(WRITE_OPD),
- CRASHTYPE(REFCOUNT_INC_OVERFLOW),
- CRASHTYPE(REFCOUNT_ADD_OVERFLOW),
- CRASHTYPE(REFCOUNT_INC_NOT_ZERO_OVERFLOW),
- CRASHTYPE(REFCOUNT_ADD_NOT_ZERO_OVERFLOW),
- CRASHTYPE(REFCOUNT_DEC_ZERO),
- CRASHTYPE(REFCOUNT_DEC_NEGATIVE),
- CRASHTYPE(REFCOUNT_DEC_AND_TEST_NEGATIVE),
- CRASHTYPE(REFCOUNT_SUB_AND_TEST_NEGATIVE),
- CRASHTYPE(REFCOUNT_INC_ZERO),
- CRASHTYPE(REFCOUNT_ADD_ZERO),
- CRASHTYPE(REFCOUNT_INC_SATURATED),
- CRASHTYPE(REFCOUNT_DEC_SATURATED),
- CRASHTYPE(REFCOUNT_ADD_SATURATED),
- CRASHTYPE(REFCOUNT_INC_NOT_ZERO_SATURATED),
- CRASHTYPE(REFCOUNT_ADD_NOT_ZERO_SATURATED),
- CRASHTYPE(REFCOUNT_DEC_AND_TEST_SATURATED),
- CRASHTYPE(REFCOUNT_SUB_AND_TEST_SATURATED),
- CRASHTYPE(REFCOUNT_TIMING),
- CRASHTYPE(ATOMIC_TIMING),
- CRASHTYPE(USERCOPY_HEAP_SIZE_TO),
- CRASHTYPE(USERCOPY_HEAP_SIZE_FROM),
- CRASHTYPE(USERCOPY_HEAP_WHITELIST_TO),
- CRASHTYPE(USERCOPY_HEAP_WHITELIST_FROM),
- CRASHTYPE(USERCOPY_STACK_FRAME_TO),
- CRASHTYPE(USERCOPY_STACK_FRAME_FROM),
- CRASHTYPE(USERCOPY_STACK_BEYOND),
- CRASHTYPE(USERCOPY_KERNEL),
- CRASHTYPE(STACKLEAK_ERASING),
- CRASHTYPE(CFI_FORWARD_PROTO),
- CRASHTYPE(FORTIFIED_OBJECT),
- CRASHTYPE(FORTIFIED_SUBOBJECT),
- CRASHTYPE(FORTIFIED_STRSCPY),
- CRASHTYPE(DOUBLE_FAULT),
+/* List of possible types for crashes that can be triggered. */
+static const struct crashtype_category *crashtype_categories[] = {
+ &bugs_crashtypes,
+ &heap_crashtypes,
+ &perms_crashtypes,
+ &refcount_crashtypes,
+ &usercopy_crashtypes,
+ &stackleak_crashtypes,
+ &cfi_crashtypes,
+ &fortify_crashtypes,
#ifdef CONFIG_PPC_64S_HASH_MMU
- CRASHTYPE(PPC_SLB_MULTIHIT),
+ &powerpc_crashtypes,
#endif
};
-
/* Global kprobe entry and crashtype. */
static struct kprobe *lkdtm_kprobe;
static struct crashpoint *lkdtm_crashpoint;
@@ -223,11 +135,16 @@ char *lkdtm_kernel_info;
/* Return the crashtype number or NULL if the name is invalid */
static const struct crashtype *find_crashtype(const char *name)
{
- int i;
+ int cat, idx;
+
+ for (cat = 0; cat < ARRAY_SIZE(crashtype_categories); cat++) {
+ for (idx = 0; idx < crashtype_categories[cat]->len; idx++) {
+ struct crashtype *crashtype;
- for (i = 0; i < ARRAY_SIZE(crashtypes); i++) {
- if (!strcmp(name, crashtypes[i].name))
- return &crashtypes[i];
+ crashtype = &crashtype_categories[cat]->crashtypes[idx];
+ if (!strcmp(name, crashtype->name))
+ return crashtype;
+ }
}
return NULL;
@@ -347,17 +264,24 @@ static ssize_t lkdtm_debugfs_entry(struct file *f,
static ssize_t lkdtm_debugfs_read(struct file *f, char __user *user_buf,
size_t count, loff_t *off)
{
+ int n, cat, idx;
+ ssize_t out;
char *buf;
- int i, n, out;
buf = (char *)__get_free_page(GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
n = scnprintf(buf, PAGE_SIZE, "Available crash types:\n");
- for (i = 0; i < ARRAY_SIZE(crashtypes); i++) {
- n += scnprintf(buf + n, PAGE_SIZE - n, "%s\n",
- crashtypes[i].name);
+
+ for (cat = 0; cat < ARRAY_SIZE(crashtype_categories); cat++) {
+ for (idx = 0; idx < crashtype_categories[cat]->len; idx++) {
+ struct crashtype *crashtype;
+
+ crashtype = &crashtype_categories[cat]->crashtypes[idx];
+ n += scnprintf(buf + n, PAGE_SIZE - n, "%s\n",
+ crashtype->name);
+ }
}
buf[n] = '\0';
diff --git a/drivers/misc/lkdtm/fortify.c b/drivers/misc/lkdtm/fortify.c
index ab33bb5e2e7a..080293fa3c52 100644
--- a/drivers/misc/lkdtm/fortify.c
+++ b/drivers/misc/lkdtm/fortify.c
@@ -10,7 +10,7 @@
static volatile int fortify_scratch_space;
-void lkdtm_FORTIFIED_OBJECT(void)
+static void lkdtm_FORTIFIED_OBJECT(void)
{
struct target {
char a[10];
@@ -31,7 +31,7 @@ void lkdtm_FORTIFIED_OBJECT(void)
pr_expected_config(CONFIG_FORTIFY_SOURCE);
}
-void lkdtm_FORTIFIED_SUBOBJECT(void)
+static void lkdtm_FORTIFIED_SUBOBJECT(void)
{
struct target {
char a[10];
@@ -67,7 +67,7 @@ void lkdtm_FORTIFIED_SUBOBJECT(void)
* strscpy and generate a panic because there is a write overflow (i.e. src
* length is greater than dst length).
*/
-void lkdtm_FORTIFIED_STRSCPY(void)
+static void lkdtm_FORTIFIED_STRSCPY(void)
{
char *src;
char dst[5];
@@ -134,3 +134,14 @@ void lkdtm_FORTIFIED_STRSCPY(void)
kfree(src);
}
+
+static struct crashtype crashtypes[] = {
+ CRASHTYPE(FORTIFIED_OBJECT),
+ CRASHTYPE(FORTIFIED_SUBOBJECT),
+ CRASHTYPE(FORTIFIED_STRSCPY),
+};
+
+struct crashtype_category fortify_crashtypes = {
+ .crashtypes = crashtypes,
+ .len = ARRAY_SIZE(crashtypes),
+};
diff --git a/drivers/misc/lkdtm/heap.c b/drivers/misc/lkdtm/heap.c
index 8a92f5a800fa..62516078a619 100644
--- a/drivers/misc/lkdtm/heap.c
+++ b/drivers/misc/lkdtm/heap.c
@@ -22,8 +22,11 @@ static volatile int __offset = 1;
/*
* If there aren't guard pages, it's likely that a consecutive allocation will
* let us overflow into the second allocation without overwriting something real.
+ *
+ * This should always be caught because there is an unconditional unmapped
+ * page after vmap allocations.
*/
-void lkdtm_VMALLOC_LINEAR_OVERFLOW(void)
+static void lkdtm_VMALLOC_LINEAR_OVERFLOW(void)
{
char *one, *two;
@@ -41,8 +44,11 @@ void lkdtm_VMALLOC_LINEAR_OVERFLOW(void)
* This tries to stay within the next largest power-of-2 kmalloc cache
* to avoid actually overwriting anything important if it's not detected
* correctly.
+ *
+ * This should get caught by either memory tagging, KASan, or by using
+ * CONFIG_SLUB_DEBUG=y and slub_debug=ZF (or CONFIG_SLUB_DEBUG_ON=y).
*/
-void lkdtm_SLAB_LINEAR_OVERFLOW(void)
+static void lkdtm_SLAB_LINEAR_OVERFLOW(void)
{
size_t len = 1020;
u32 *data = kmalloc(len, GFP_KERNEL);
@@ -50,11 +56,12 @@ void lkdtm_SLAB_LINEAR_OVERFLOW(void)
return;
pr_info("Attempting slab linear overflow ...\n");
+ OPTIMIZER_HIDE_VAR(data);
data[1024 / sizeof(u32)] = 0x12345678;
kfree(data);
}
-void lkdtm_WRITE_AFTER_FREE(void)
+static void lkdtm_WRITE_AFTER_FREE(void)
{
int *base, *again;
size_t len = 1024;
@@ -80,7 +87,7 @@ void lkdtm_WRITE_AFTER_FREE(void)
pr_info("Hmm, didn't get the same memory range.\n");
}
-void lkdtm_READ_AFTER_FREE(void)
+static void lkdtm_READ_AFTER_FREE(void)
{
int *base, *val, saw;
size_t len = 1024;
@@ -124,7 +131,7 @@ void lkdtm_READ_AFTER_FREE(void)
kfree(val);
}
-void lkdtm_WRITE_BUDDY_AFTER_FREE(void)
+static void lkdtm_WRITE_BUDDY_AFTER_FREE(void)
{
unsigned long p = __get_free_page(GFP_KERNEL);
if (!p) {
@@ -144,7 +151,7 @@ void lkdtm_WRITE_BUDDY_AFTER_FREE(void)
schedule();
}
-void lkdtm_READ_BUDDY_AFTER_FREE(void)
+static void lkdtm_READ_BUDDY_AFTER_FREE(void)
{
unsigned long p = __get_free_page(GFP_KERNEL);
int saw, *val;
@@ -181,7 +188,7 @@ void lkdtm_READ_BUDDY_AFTER_FREE(void)
kfree(val);
}
-void lkdtm_SLAB_INIT_ON_ALLOC(void)
+static void lkdtm_SLAB_INIT_ON_ALLOC(void)
{
u8 *first;
u8 *val;
@@ -213,7 +220,7 @@ void lkdtm_SLAB_INIT_ON_ALLOC(void)
kfree(val);
}
-void lkdtm_BUDDY_INIT_ON_ALLOC(void)
+static void lkdtm_BUDDY_INIT_ON_ALLOC(void)
{
u8 *first;
u8 *val;
@@ -246,7 +253,7 @@ void lkdtm_BUDDY_INIT_ON_ALLOC(void)
free_page((unsigned long)val);
}
-void lkdtm_SLAB_FREE_DOUBLE(void)
+static void lkdtm_SLAB_FREE_DOUBLE(void)
{
int *val;
@@ -263,7 +270,7 @@ void lkdtm_SLAB_FREE_DOUBLE(void)
kmem_cache_free(double_free_cache, val);
}
-void lkdtm_SLAB_FREE_CROSS(void)
+static void lkdtm_SLAB_FREE_CROSS(void)
{
int *val;
@@ -279,7 +286,7 @@ void lkdtm_SLAB_FREE_CROSS(void)
kmem_cache_free(b_cache, val);
}
-void lkdtm_SLAB_FREE_PAGE(void)
+static void lkdtm_SLAB_FREE_PAGE(void)
{
unsigned long p = __get_free_page(GFP_KERNEL);
@@ -313,3 +320,22 @@ void __exit lkdtm_heap_exit(void)
kmem_cache_destroy(a_cache);
kmem_cache_destroy(b_cache);
}
+
+static struct crashtype crashtypes[] = {
+ CRASHTYPE(SLAB_LINEAR_OVERFLOW),
+ CRASHTYPE(VMALLOC_LINEAR_OVERFLOW),
+ CRASHTYPE(WRITE_AFTER_FREE),
+ CRASHTYPE(READ_AFTER_FREE),
+ CRASHTYPE(WRITE_BUDDY_AFTER_FREE),
+ CRASHTYPE(READ_BUDDY_AFTER_FREE),
+ CRASHTYPE(SLAB_INIT_ON_ALLOC),
+ CRASHTYPE(BUDDY_INIT_ON_ALLOC),
+ CRASHTYPE(SLAB_FREE_DOUBLE),
+ CRASHTYPE(SLAB_FREE_CROSS),
+ CRASHTYPE(SLAB_FREE_PAGE),
+};
+
+struct crashtype_category heap_crashtypes = {
+ .crashtypes = crashtypes,
+ .len = ARRAY_SIZE(crashtypes),
+};
diff --git a/drivers/misc/lkdtm/lkdtm.h b/drivers/misc/lkdtm/lkdtm.h
index 305fc2ec3f25..015e0484026b 100644
--- a/drivers/misc/lkdtm/lkdtm.h
+++ b/drivers/misc/lkdtm/lkdtm.h
@@ -9,19 +9,19 @@
extern char *lkdtm_kernel_info;
#define pr_expected_config(kconfig) \
-{ \
+do { \
if (IS_ENABLED(kconfig)) \
pr_err("Unexpected! This %s was built with " #kconfig "=y\n", \
lkdtm_kernel_info); \
else \
pr_warn("This is probably expected, since this %s was built *without* " #kconfig "=y\n", \
lkdtm_kernel_info); \
-}
+} while (0)
#ifndef MODULE
int lkdtm_check_bool_cmdline(const char *param);
#define pr_expected_config_param(kconfig, param) \
-{ \
+do { \
if (IS_ENABLED(kconfig)) { \
switch (lkdtm_check_bool_cmdline(param)) { \
case 0: \
@@ -52,119 +52,49 @@ int lkdtm_check_bool_cmdline(const char *param);
break; \
} \
} \
-}
+} while (0)
#else
#define pr_expected_config_param(kconfig, param) pr_expected_config(kconfig)
#endif
-/* bugs.c */
+/* Crash types. */
+struct crashtype {
+ const char *name;
+ void (*func)(void);
+};
+
+#define CRASHTYPE(_name) \
+ { \
+ .name = __stringify(_name), \
+ .func = lkdtm_ ## _name, \
+ }
+
+/* Category's collection of crashtypes. */
+struct crashtype_category {
+ struct crashtype *crashtypes;
+ size_t len;
+};
+
+/* Each category's crashtypes list. */
+extern struct crashtype_category bugs_crashtypes;
+extern struct crashtype_category heap_crashtypes;
+extern struct crashtype_category perms_crashtypes;
+extern struct crashtype_category refcount_crashtypes;
+extern struct crashtype_category usercopy_crashtypes;
+extern struct crashtype_category stackleak_crashtypes;
+extern struct crashtype_category cfi_crashtypes;
+extern struct crashtype_category fortify_crashtypes;
+extern struct crashtype_category powerpc_crashtypes;
+
+/* Each category's init/exit routines. */
void __init lkdtm_bugs_init(int *recur_param);
-void lkdtm_PANIC(void);
-void lkdtm_BUG(void);
-void lkdtm_WARNING(void);
-void lkdtm_WARNING_MESSAGE(void);
-void lkdtm_EXCEPTION(void);
-void lkdtm_LOOP(void);
-void lkdtm_EXHAUST_STACK(void);
-void lkdtm_CORRUPT_STACK(void);
-void lkdtm_CORRUPT_STACK_STRONG(void);
-void lkdtm_REPORT_STACK(void);
-void lkdtm_REPORT_STACK_CANARY(void);
-void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void);
-void lkdtm_SOFTLOCKUP(void);
-void lkdtm_HARDLOCKUP(void);
-void lkdtm_SPINLOCKUP(void);
-void lkdtm_HUNG_TASK(void);
-void lkdtm_OVERFLOW_SIGNED(void);
-void lkdtm_OVERFLOW_UNSIGNED(void);
-void lkdtm_ARRAY_BOUNDS(void);
-void lkdtm_CORRUPT_LIST_ADD(void);
-void lkdtm_CORRUPT_LIST_DEL(void);
-void lkdtm_STACK_GUARD_PAGE_LEADING(void);
-void lkdtm_STACK_GUARD_PAGE_TRAILING(void);
-void lkdtm_UNSET_SMEP(void);
-void lkdtm_DOUBLE_FAULT(void);
-void lkdtm_CORRUPT_PAC(void);
-
-/* heap.c */
void __init lkdtm_heap_init(void);
void __exit lkdtm_heap_exit(void);
-void lkdtm_VMALLOC_LINEAR_OVERFLOW(void);
-void lkdtm_SLAB_LINEAR_OVERFLOW(void);
-void lkdtm_WRITE_AFTER_FREE(void);
-void lkdtm_READ_AFTER_FREE(void);
-void lkdtm_WRITE_BUDDY_AFTER_FREE(void);
-void lkdtm_READ_BUDDY_AFTER_FREE(void);
-void lkdtm_SLAB_INIT_ON_ALLOC(void);
-void lkdtm_BUDDY_INIT_ON_ALLOC(void);
-void lkdtm_SLAB_FREE_DOUBLE(void);
-void lkdtm_SLAB_FREE_CROSS(void);
-void lkdtm_SLAB_FREE_PAGE(void);
-
-/* perms.c */
void __init lkdtm_perms_init(void);
-void lkdtm_WRITE_RO(void);
-void lkdtm_WRITE_RO_AFTER_INIT(void);
-void lkdtm_WRITE_KERN(void);
-void lkdtm_WRITE_OPD(void);
-void lkdtm_EXEC_DATA(void);
-void lkdtm_EXEC_STACK(void);
-void lkdtm_EXEC_KMALLOC(void);
-void lkdtm_EXEC_VMALLOC(void);
-void lkdtm_EXEC_RODATA(void);
-void lkdtm_EXEC_USERSPACE(void);
-void lkdtm_EXEC_NULL(void);
-void lkdtm_ACCESS_USERSPACE(void);
-void lkdtm_ACCESS_NULL(void);
-
-/* refcount.c */
-void lkdtm_REFCOUNT_INC_OVERFLOW(void);
-void lkdtm_REFCOUNT_ADD_OVERFLOW(void);
-void lkdtm_REFCOUNT_INC_NOT_ZERO_OVERFLOW(void);
-void lkdtm_REFCOUNT_ADD_NOT_ZERO_OVERFLOW(void);
-void lkdtm_REFCOUNT_DEC_ZERO(void);
-void lkdtm_REFCOUNT_DEC_NEGATIVE(void);
-void lkdtm_REFCOUNT_DEC_AND_TEST_NEGATIVE(void);
-void lkdtm_REFCOUNT_SUB_AND_TEST_NEGATIVE(void);
-void lkdtm_REFCOUNT_INC_ZERO(void);
-void lkdtm_REFCOUNT_ADD_ZERO(void);
-void lkdtm_REFCOUNT_INC_SATURATED(void);
-void lkdtm_REFCOUNT_DEC_SATURATED(void);
-void lkdtm_REFCOUNT_ADD_SATURATED(void);
-void lkdtm_REFCOUNT_INC_NOT_ZERO_SATURATED(void);
-void lkdtm_REFCOUNT_ADD_NOT_ZERO_SATURATED(void);
-void lkdtm_REFCOUNT_DEC_AND_TEST_SATURATED(void);
-void lkdtm_REFCOUNT_SUB_AND_TEST_SATURATED(void);
-void lkdtm_REFCOUNT_TIMING(void);
-void lkdtm_ATOMIC_TIMING(void);
-
-/* rodata.c */
-void lkdtm_rodata_do_nothing(void);
-
-/* usercopy.c */
void __init lkdtm_usercopy_init(void);
void __exit lkdtm_usercopy_exit(void);
-void lkdtm_USERCOPY_HEAP_SIZE_TO(void);
-void lkdtm_USERCOPY_HEAP_SIZE_FROM(void);
-void lkdtm_USERCOPY_HEAP_WHITELIST_TO(void);
-void lkdtm_USERCOPY_HEAP_WHITELIST_FROM(void);
-void lkdtm_USERCOPY_STACK_FRAME_TO(void);
-void lkdtm_USERCOPY_STACK_FRAME_FROM(void);
-void lkdtm_USERCOPY_STACK_BEYOND(void);
-void lkdtm_USERCOPY_KERNEL(void);
-
-/* stackleak.c */
-void lkdtm_STACKLEAK_ERASING(void);
-
-/* cfi.c */
-void lkdtm_CFI_FORWARD_PROTO(void);
-/* fortify.c */
-void lkdtm_FORTIFIED_OBJECT(void);
-void lkdtm_FORTIFIED_SUBOBJECT(void);
-void lkdtm_FORTIFIED_STRSCPY(void);
-
-/* powerpc.c */
-void lkdtm_PPC_SLB_MULTIHIT(void);
+/* Special declaration for function-in-rodata. */
+void lkdtm_rodata_do_nothing(void);
#endif
diff --git a/drivers/misc/lkdtm/perms.c b/drivers/misc/lkdtm/perms.c
index 2c6aba3ff32b..b93404d65650 100644
--- a/drivers/misc/lkdtm/perms.c
+++ b/drivers/misc/lkdtm/perms.c
@@ -103,7 +103,7 @@ static void execute_user_location(void *dst)
pr_err("FAIL: func returned\n");
}
-void lkdtm_WRITE_RO(void)
+static void lkdtm_WRITE_RO(void)
{
/* Explicitly cast away "const" for the test and make volatile. */
volatile unsigned long *ptr = (unsigned long *)&rodata;
@@ -113,7 +113,7 @@ void lkdtm_WRITE_RO(void)
pr_err("FAIL: survived bad write\n");
}
-void lkdtm_WRITE_RO_AFTER_INIT(void)
+static void lkdtm_WRITE_RO_AFTER_INIT(void)
{
volatile unsigned long *ptr = &ro_after_init;
@@ -132,7 +132,7 @@ void lkdtm_WRITE_RO_AFTER_INIT(void)
pr_err("FAIL: survived bad write\n");
}
-void lkdtm_WRITE_KERN(void)
+static void lkdtm_WRITE_KERN(void)
{
size_t size;
volatile unsigned char *ptr;
@@ -149,7 +149,7 @@ void lkdtm_WRITE_KERN(void)
do_overwritten();
}
-void lkdtm_WRITE_OPD(void)
+static void lkdtm_WRITE_OPD(void)
{
size_t size = sizeof(func_desc_t);
void (*func)(void) = do_nothing;
@@ -166,38 +166,38 @@ void lkdtm_WRITE_OPD(void)
func();
}
-void lkdtm_EXEC_DATA(void)
+static void lkdtm_EXEC_DATA(void)
{
execute_location(data_area, CODE_WRITE);
}
-void lkdtm_EXEC_STACK(void)
+static void lkdtm_EXEC_STACK(void)
{
u8 stack_area[EXEC_SIZE];
execute_location(stack_area, CODE_WRITE);
}
-void lkdtm_EXEC_KMALLOC(void)
+static void lkdtm_EXEC_KMALLOC(void)
{
u32 *kmalloc_area = kmalloc(EXEC_SIZE, GFP_KERNEL);
execute_location(kmalloc_area, CODE_WRITE);
kfree(kmalloc_area);
}
-void lkdtm_EXEC_VMALLOC(void)
+static void lkdtm_EXEC_VMALLOC(void)
{
u32 *vmalloc_area = vmalloc(EXEC_SIZE);
execute_location(vmalloc_area, CODE_WRITE);
vfree(vmalloc_area);
}
-void lkdtm_EXEC_RODATA(void)
+static void lkdtm_EXEC_RODATA(void)
{
execute_location(dereference_function_descriptor(lkdtm_rodata_do_nothing),
CODE_AS_IS);
}
-void lkdtm_EXEC_USERSPACE(void)
+static void lkdtm_EXEC_USERSPACE(void)
{
unsigned long user_addr;
@@ -212,12 +212,12 @@ void lkdtm_EXEC_USERSPACE(void)
vm_munmap(user_addr, PAGE_SIZE);
}
-void lkdtm_EXEC_NULL(void)
+static void lkdtm_EXEC_NULL(void)
{
execute_location(NULL, CODE_AS_IS);
}
-void lkdtm_ACCESS_USERSPACE(void)
+static void lkdtm_ACCESS_USERSPACE(void)
{
unsigned long user_addr, tmp = 0;
unsigned long *ptr;
@@ -250,7 +250,7 @@ void lkdtm_ACCESS_USERSPACE(void)
vm_munmap(user_addr, PAGE_SIZE);
}
-void lkdtm_ACCESS_NULL(void)
+static void lkdtm_ACCESS_NULL(void)
{
unsigned long tmp;
volatile unsigned long *ptr = (unsigned long *)NULL;
@@ -270,3 +270,24 @@ void __init lkdtm_perms_init(void)
/* Make sure we can write to __ro_after_init values during __init */
ro_after_init |= 0xAA;
}
+
+static struct crashtype crashtypes[] = {
+ CRASHTYPE(WRITE_RO),
+ CRASHTYPE(WRITE_RO_AFTER_INIT),
+ CRASHTYPE(WRITE_KERN),
+ CRASHTYPE(WRITE_OPD),
+ CRASHTYPE(EXEC_DATA),
+ CRASHTYPE(EXEC_STACK),
+ CRASHTYPE(EXEC_KMALLOC),
+ CRASHTYPE(EXEC_VMALLOC),
+ CRASHTYPE(EXEC_RODATA),
+ CRASHTYPE(EXEC_USERSPACE),
+ CRASHTYPE(EXEC_NULL),
+ CRASHTYPE(ACCESS_USERSPACE),
+ CRASHTYPE(ACCESS_NULL),
+};
+
+struct crashtype_category perms_crashtypes = {
+ .crashtypes = crashtypes,
+ .len = ARRAY_SIZE(crashtypes),
+};
diff --git a/drivers/misc/lkdtm/powerpc.c b/drivers/misc/lkdtm/powerpc.c
index 077c9f9ed8d0..be385449911a 100644
--- a/drivers/misc/lkdtm/powerpc.c
+++ b/drivers/misc/lkdtm/powerpc.c
@@ -100,7 +100,7 @@ static void insert_dup_slb_entry_0(void)
preempt_enable();
}
-void lkdtm_PPC_SLB_MULTIHIT(void)
+static void lkdtm_PPC_SLB_MULTIHIT(void)
{
if (!radix_enabled()) {
pr_info("Injecting SLB multihit errors\n");
@@ -118,3 +118,12 @@ void lkdtm_PPC_SLB_MULTIHIT(void)
pr_err("XFAIL: This test is for ppc64 and with hash mode MMU only\n");
}
}
+
+static struct crashtype crashtypes[] = {
+ CRASHTYPE(PPC_SLB_MULTIHIT),
+};
+
+struct crashtype_category powerpc_crashtypes = {
+ .crashtypes = crashtypes,
+ .len = ARRAY_SIZE(crashtypes),
+};
diff --git a/drivers/misc/lkdtm/refcount.c b/drivers/misc/lkdtm/refcount.c
index de7c5ab528d9..5cd488f54cfa 100644
--- a/drivers/misc/lkdtm/refcount.c
+++ b/drivers/misc/lkdtm/refcount.c
@@ -24,7 +24,7 @@ static void overflow_check(refcount_t *ref)
* A refcount_inc() above the maximum value of the refcount implementation,
* should at least saturate, and at most also WARN.
*/
-void lkdtm_REFCOUNT_INC_OVERFLOW(void)
+static void lkdtm_REFCOUNT_INC_OVERFLOW(void)
{
refcount_t over = REFCOUNT_INIT(REFCOUNT_MAX - 1);
@@ -40,7 +40,7 @@ void lkdtm_REFCOUNT_INC_OVERFLOW(void)
}
/* refcount_add() should behave just like refcount_inc() above. */
-void lkdtm_REFCOUNT_ADD_OVERFLOW(void)
+static void lkdtm_REFCOUNT_ADD_OVERFLOW(void)
{
refcount_t over = REFCOUNT_INIT(REFCOUNT_MAX - 1);
@@ -58,7 +58,7 @@ void lkdtm_REFCOUNT_ADD_OVERFLOW(void)
}
/* refcount_inc_not_zero() should behave just like refcount_inc() above. */
-void lkdtm_REFCOUNT_INC_NOT_ZERO_OVERFLOW(void)
+static void lkdtm_REFCOUNT_INC_NOT_ZERO_OVERFLOW(void)
{
refcount_t over = REFCOUNT_INIT(REFCOUNT_MAX);
@@ -70,7 +70,7 @@ void lkdtm_REFCOUNT_INC_NOT_ZERO_OVERFLOW(void)
}
/* refcount_add_not_zero() should behave just like refcount_inc() above. */
-void lkdtm_REFCOUNT_ADD_NOT_ZERO_OVERFLOW(void)
+static void lkdtm_REFCOUNT_ADD_NOT_ZERO_OVERFLOW(void)
{
refcount_t over = REFCOUNT_INIT(REFCOUNT_MAX);
@@ -103,7 +103,7 @@ static void check_zero(refcount_t *ref)
* zero it should either saturate (when inc-from-zero isn't protected)
* or stay at zero (when inc-from-zero is protected) and should WARN for both.
*/
-void lkdtm_REFCOUNT_DEC_ZERO(void)
+static void lkdtm_REFCOUNT_DEC_ZERO(void)
{
refcount_t zero = REFCOUNT_INIT(2);
@@ -142,7 +142,7 @@ static void check_negative(refcount_t *ref, int start)
}
/* A refcount_dec() going negative should saturate and may WARN. */
-void lkdtm_REFCOUNT_DEC_NEGATIVE(void)
+static void lkdtm_REFCOUNT_DEC_NEGATIVE(void)
{
refcount_t neg = REFCOUNT_INIT(0);
@@ -156,7 +156,7 @@ void lkdtm_REFCOUNT_DEC_NEGATIVE(void)
* A refcount_dec_and_test() should act like refcount_dec() above when
* going negative.
*/
-void lkdtm_REFCOUNT_DEC_AND_TEST_NEGATIVE(void)
+static void lkdtm_REFCOUNT_DEC_AND_TEST_NEGATIVE(void)
{
refcount_t neg = REFCOUNT_INIT(0);
@@ -171,7 +171,7 @@ void lkdtm_REFCOUNT_DEC_AND_TEST_NEGATIVE(void)
* A refcount_sub_and_test() should act like refcount_dec_and_test()
* above when going negative.
*/
-void lkdtm_REFCOUNT_SUB_AND_TEST_NEGATIVE(void)
+static void lkdtm_REFCOUNT_SUB_AND_TEST_NEGATIVE(void)
{
refcount_t neg = REFCOUNT_INIT(3);
@@ -203,7 +203,7 @@ static void check_from_zero(refcount_t *ref)
/*
* A refcount_inc() from zero should pin to zero or saturate and may WARN.
*/
-void lkdtm_REFCOUNT_INC_ZERO(void)
+static void lkdtm_REFCOUNT_INC_ZERO(void)
{
refcount_t zero = REFCOUNT_INIT(0);
@@ -228,7 +228,7 @@ void lkdtm_REFCOUNT_INC_ZERO(void)
* A refcount_add() should act like refcount_inc() above when starting
* at zero.
*/
-void lkdtm_REFCOUNT_ADD_ZERO(void)
+static void lkdtm_REFCOUNT_ADD_ZERO(void)
{
refcount_t zero = REFCOUNT_INIT(0);
@@ -267,7 +267,7 @@ static void check_saturated(refcount_t *ref)
* A refcount_inc() from a saturated value should at most warn about
* being saturated already.
*/
-void lkdtm_REFCOUNT_INC_SATURATED(void)
+static void lkdtm_REFCOUNT_INC_SATURATED(void)
{
refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
@@ -278,7 +278,7 @@ void lkdtm_REFCOUNT_INC_SATURATED(void)
}
/* Should act like refcount_inc() above from saturated. */
-void lkdtm_REFCOUNT_DEC_SATURATED(void)
+static void lkdtm_REFCOUNT_DEC_SATURATED(void)
{
refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
@@ -289,7 +289,7 @@ void lkdtm_REFCOUNT_DEC_SATURATED(void)
}
/* Should act like refcount_inc() above from saturated. */
-void lkdtm_REFCOUNT_ADD_SATURATED(void)
+static void lkdtm_REFCOUNT_ADD_SATURATED(void)
{
refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
@@ -300,7 +300,7 @@ void lkdtm_REFCOUNT_ADD_SATURATED(void)
}
/* Should act like refcount_inc() above from saturated. */
-void lkdtm_REFCOUNT_INC_NOT_ZERO_SATURATED(void)
+static void lkdtm_REFCOUNT_INC_NOT_ZERO_SATURATED(void)
{
refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
@@ -312,7 +312,7 @@ void lkdtm_REFCOUNT_INC_NOT_ZERO_SATURATED(void)
}
/* Should act like refcount_inc() above from saturated. */
-void lkdtm_REFCOUNT_ADD_NOT_ZERO_SATURATED(void)
+static void lkdtm_REFCOUNT_ADD_NOT_ZERO_SATURATED(void)
{
refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
@@ -324,7 +324,7 @@ void lkdtm_REFCOUNT_ADD_NOT_ZERO_SATURATED(void)
}
/* Should act like refcount_inc() above from saturated. */
-void lkdtm_REFCOUNT_DEC_AND_TEST_SATURATED(void)
+static void lkdtm_REFCOUNT_DEC_AND_TEST_SATURATED(void)
{
refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
@@ -336,7 +336,7 @@ void lkdtm_REFCOUNT_DEC_AND_TEST_SATURATED(void)
}
/* Should act like refcount_inc() above from saturated. */
-void lkdtm_REFCOUNT_SUB_AND_TEST_SATURATED(void)
+static void lkdtm_REFCOUNT_SUB_AND_TEST_SATURATED(void)
{
refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
@@ -348,7 +348,7 @@ void lkdtm_REFCOUNT_SUB_AND_TEST_SATURATED(void)
}
/* Used to time the existing atomic_t when used for reference counting */
-void lkdtm_ATOMIC_TIMING(void)
+static void lkdtm_ATOMIC_TIMING(void)
{
unsigned int i;
atomic_t count = ATOMIC_INIT(1);
@@ -373,7 +373,7 @@ void lkdtm_ATOMIC_TIMING(void)
* cd /sys/kernel/debug/provoke-crash
* perf stat -B -- cat <(echo REFCOUNT_TIMING) > DIRECT
*/
-void lkdtm_REFCOUNT_TIMING(void)
+static void lkdtm_REFCOUNT_TIMING(void)
{
unsigned int i;
refcount_t count = REFCOUNT_INIT(1);
@@ -390,3 +390,30 @@ void lkdtm_REFCOUNT_TIMING(void)
else
pr_info("refcount timing: done\n");
}
+
+static struct crashtype crashtypes[] = {
+ CRASHTYPE(REFCOUNT_INC_OVERFLOW),
+ CRASHTYPE(REFCOUNT_ADD_OVERFLOW),
+ CRASHTYPE(REFCOUNT_INC_NOT_ZERO_OVERFLOW),
+ CRASHTYPE(REFCOUNT_ADD_NOT_ZERO_OVERFLOW),
+ CRASHTYPE(REFCOUNT_DEC_ZERO),
+ CRASHTYPE(REFCOUNT_DEC_NEGATIVE),
+ CRASHTYPE(REFCOUNT_DEC_AND_TEST_NEGATIVE),
+ CRASHTYPE(REFCOUNT_SUB_AND_TEST_NEGATIVE),
+ CRASHTYPE(REFCOUNT_INC_ZERO),
+ CRASHTYPE(REFCOUNT_ADD_ZERO),
+ CRASHTYPE(REFCOUNT_INC_SATURATED),
+ CRASHTYPE(REFCOUNT_DEC_SATURATED),
+ CRASHTYPE(REFCOUNT_ADD_SATURATED),
+ CRASHTYPE(REFCOUNT_INC_NOT_ZERO_SATURATED),
+ CRASHTYPE(REFCOUNT_ADD_NOT_ZERO_SATURATED),
+ CRASHTYPE(REFCOUNT_DEC_AND_TEST_SATURATED),
+ CRASHTYPE(REFCOUNT_SUB_AND_TEST_SATURATED),
+ CRASHTYPE(ATOMIC_TIMING),
+ CRASHTYPE(REFCOUNT_TIMING),
+};
+
+struct crashtype_category refcount_crashtypes = {
+ .crashtypes = crashtypes,
+ .len = ARRAY_SIZE(crashtypes),
+};
diff --git a/drivers/misc/lkdtm/stackleak.c b/drivers/misc/lkdtm/stackleak.c
index 82369c6f889e..025b133297a6 100644
--- a/drivers/misc/lkdtm/stackleak.c
+++ b/drivers/misc/lkdtm/stackleak.c
@@ -115,7 +115,7 @@ out:
}
}
-void lkdtm_STACKLEAK_ERASING(void)
+static void lkdtm_STACKLEAK_ERASING(void)
{
unsigned long flags;
@@ -124,7 +124,7 @@ void lkdtm_STACKLEAK_ERASING(void)
local_irq_restore(flags);
}
#else /* defined(CONFIG_GCC_PLUGIN_STACKLEAK) */
-void lkdtm_STACKLEAK_ERASING(void)
+static void lkdtm_STACKLEAK_ERASING(void)
{
if (IS_ENABLED(CONFIG_HAVE_ARCH_STACKLEAK)) {
pr_err("XFAIL: stackleak is not enabled (CONFIG_GCC_PLUGIN_STACKLEAK=n)\n");
@@ -133,3 +133,12 @@ void lkdtm_STACKLEAK_ERASING(void)
}
}
#endif /* defined(CONFIG_GCC_PLUGIN_STACKLEAK) */
+
+static struct crashtype crashtypes[] = {
+ CRASHTYPE(STACKLEAK_ERASING),
+};
+
+struct crashtype_category stackleak_crashtypes = {
+ .crashtypes = crashtypes,
+ .len = ARRAY_SIZE(crashtypes),
+};
diff --git a/drivers/misc/lkdtm/usercopy.c b/drivers/misc/lkdtm/usercopy.c
index 9161ce7ed47a..6215ec995cd3 100644
--- a/drivers/misc/lkdtm/usercopy.c
+++ b/drivers/misc/lkdtm/usercopy.c
@@ -5,6 +5,7 @@
*/
#include "lkdtm.h"
#include <linux/slab.h>
+#include <linux/highmem.h>
#include <linux/vmalloc.h>
#include <linux/sched/task_stack.h>
#include <linux/mman.h>
@@ -30,12 +31,12 @@ static const unsigned char test_text[] = "This is a test.\n";
*/
static noinline unsigned char *trick_compiler(unsigned char *stack)
{
- return stack + 0;
+ return stack + unconst;
}
static noinline unsigned char *do_usercopy_stack_callee(int value)
{
- unsigned char buf[32];
+ unsigned char buf[128];
int i;
/* Exercise stack to avoid everything living in registers. */
@@ -43,7 +44,12 @@ static noinline unsigned char *do_usercopy_stack_callee(int value)
buf[i] = value & 0xff;
}
- return trick_compiler(buf);
+ /*
+ * Put the target buffer in the middle of stack allocation
+ * so that we don't step on future stack users regardless
+ * of stack growth direction.
+ */
+ return trick_compiler(&buf[(128/2)-32]);
}
static noinline void do_usercopy_stack(bool to_user, bool bad_frame)
@@ -66,6 +72,12 @@ static noinline void do_usercopy_stack(bool to_user, bool bad_frame)
bad_stack -= sizeof(unsigned long);
}
+#ifdef ARCH_HAS_CURRENT_STACK_POINTER
+ pr_info("stack : %px\n", (void *)current_stack_pointer);
+#endif
+ pr_info("good_stack: %px-%px\n", good_stack, good_stack + sizeof(good_stack));
+ pr_info("bad_stack : %px-%px\n", bad_stack, bad_stack + sizeof(good_stack));
+
user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_ANONYMOUS | MAP_PRIVATE, 0);
@@ -119,7 +131,7 @@ free_user:
* This checks for whole-object size validation with hardened usercopy,
* with or without usercopy whitelisting.
*/
-static void do_usercopy_heap_size(bool to_user)
+static void do_usercopy_slab_size(bool to_user)
{
unsigned long user_addr;
unsigned char *one, *two;
@@ -185,9 +197,9 @@ free_kernel:
/*
* This checks for the specific whitelist window within an object. If this
- * test passes, then do_usercopy_heap_size() tests will pass too.
+ * test passes, then do_usercopy_slab_size() tests will pass too.
*/
-static void do_usercopy_heap_whitelist(bool to_user)
+static void do_usercopy_slab_whitelist(bool to_user)
{
unsigned long user_alloc;
unsigned char *buf = NULL;
@@ -261,42 +273,42 @@ free_alloc:
}
/* Callable tests. */
-void lkdtm_USERCOPY_HEAP_SIZE_TO(void)
+static void lkdtm_USERCOPY_SLAB_SIZE_TO(void)
{
- do_usercopy_heap_size(true);
+ do_usercopy_slab_size(true);
}
-void lkdtm_USERCOPY_HEAP_SIZE_FROM(void)
+static void lkdtm_USERCOPY_SLAB_SIZE_FROM(void)
{
- do_usercopy_heap_size(false);
+ do_usercopy_slab_size(false);
}
-void lkdtm_USERCOPY_HEAP_WHITELIST_TO(void)
+static void lkdtm_USERCOPY_SLAB_WHITELIST_TO(void)
{
- do_usercopy_heap_whitelist(true);
+ do_usercopy_slab_whitelist(true);
}
-void lkdtm_USERCOPY_HEAP_WHITELIST_FROM(void)
+static void lkdtm_USERCOPY_SLAB_WHITELIST_FROM(void)
{
- do_usercopy_heap_whitelist(false);
+ do_usercopy_slab_whitelist(false);
}
-void lkdtm_USERCOPY_STACK_FRAME_TO(void)
+static void lkdtm_USERCOPY_STACK_FRAME_TO(void)
{
do_usercopy_stack(true, true);
}
-void lkdtm_USERCOPY_STACK_FRAME_FROM(void)
+static void lkdtm_USERCOPY_STACK_FRAME_FROM(void)
{
do_usercopy_stack(false, true);
}
-void lkdtm_USERCOPY_STACK_BEYOND(void)
+static void lkdtm_USERCOPY_STACK_BEYOND(void)
{
do_usercopy_stack(true, false);
}
-void lkdtm_USERCOPY_KERNEL(void)
+static void lkdtm_USERCOPY_KERNEL(void)
{
unsigned long user_addr;
@@ -330,6 +342,86 @@ free_user:
vm_munmap(user_addr, PAGE_SIZE);
}
+/*
+ * This expects "kaddr" to point to a PAGE_SIZE allocation, which means
+ * a more complete test that would include copy_from_user() would risk
+ * memory corruption. Just test copy_to_user() here, as that exercises
+ * almost exactly the same code paths.
+ */
+static void do_usercopy_page_span(const char *name, void *kaddr)
+{
+ unsigned long uaddr;
+
+ uaddr = vm_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_WRITE,
+ MAP_ANONYMOUS | MAP_PRIVATE, 0);
+ if (uaddr >= TASK_SIZE) {
+ pr_warn("Failed to allocate user memory\n");
+ return;
+ }
+
+ /* Initialize contents. */
+ memset(kaddr, 0xAA, PAGE_SIZE);
+
+ /* Bump the kaddr forward to detect a page-spanning overflow. */
+ kaddr += PAGE_SIZE / 2;
+
+ pr_info("attempting good copy_to_user() from kernel %s: %px\n",
+ name, kaddr);
+ if (copy_to_user((void __user *)uaddr, kaddr,
+ unconst + (PAGE_SIZE / 2))) {
+ pr_err("copy_to_user() failed unexpectedly?!\n");
+ goto free_user;
+ }
+
+ pr_info("attempting bad copy_to_user() from kernel %s: %px\n",
+ name, kaddr);
+ if (copy_to_user((void __user *)uaddr, kaddr, unconst + PAGE_SIZE)) {
+ pr_warn("Good, copy_to_user() failed, but lacked Oops(?!)\n");
+ goto free_user;
+ }
+
+ pr_err("FAIL: bad copy_to_user() not detected!\n");
+ pr_expected_config_param(CONFIG_HARDENED_USERCOPY, "hardened_usercopy");
+
+free_user:
+ vm_munmap(uaddr, PAGE_SIZE);
+}
+
+static void lkdtm_USERCOPY_VMALLOC(void)
+{
+ void *addr;
+
+ addr = vmalloc(PAGE_SIZE);
+ if (!addr) {
+ pr_err("vmalloc() failed!?\n");
+ return;
+ }
+ do_usercopy_page_span("vmalloc", addr);
+ vfree(addr);
+}
+
+static void lkdtm_USERCOPY_FOLIO(void)
+{
+ struct folio *folio;
+ void *addr;
+
+ /*
+ * FIXME: Folio checking currently misses 0-order allocations, so
+ * allocate and bump forward to the last page.
+ */
+ folio = folio_alloc(GFP_KERNEL | __GFP_ZERO, 1);
+ if (!folio) {
+ pr_err("folio_alloc() failed!?\n");
+ return;
+ }
+ addr = folio_address(folio);
+ if (addr)
+ do_usercopy_page_span("folio", addr + PAGE_SIZE);
+ else
+ pr_err("folio_address() failed?!\n");
+ folio_put(folio);
+}
+
void __init lkdtm_usercopy_init(void)
{
/* Prepare cache that lacks SLAB_USERCOPY flag. */
@@ -345,3 +437,21 @@ void __exit lkdtm_usercopy_exit(void)
{
kmem_cache_destroy(whitelist_cache);
}
+
+static struct crashtype crashtypes[] = {
+ CRASHTYPE(USERCOPY_SLAB_SIZE_TO),
+ CRASHTYPE(USERCOPY_SLAB_SIZE_FROM),
+ CRASHTYPE(USERCOPY_SLAB_WHITELIST_TO),
+ CRASHTYPE(USERCOPY_SLAB_WHITELIST_FROM),
+ CRASHTYPE(USERCOPY_STACK_FRAME_TO),
+ CRASHTYPE(USERCOPY_STACK_FRAME_FROM),
+ CRASHTYPE(USERCOPY_STACK_BEYOND),
+ CRASHTYPE(USERCOPY_VMALLOC),
+ CRASHTYPE(USERCOPY_FOLIO),
+ CRASHTYPE(USERCOPY_KERNEL),
+};
+
+struct crashtype_category usercopy_crashtypes = {
+ .crashtypes = crashtypes,
+ .len = ARRAY_SIZE(crashtypes),
+};
diff --git a/drivers/misc/mei/hdcp/mei_hdcp.c b/drivers/misc/mei/hdcp/mei_hdcp.c
index ec2a4fce8581..e889a8bd7ac8 100644
--- a/drivers/misc/mei/hdcp/mei_hdcp.c
+++ b/drivers/misc/mei/hdcp/mei_hdcp.c
@@ -784,7 +784,7 @@ static int mei_hdcp_component_match(struct device *dev, int subcomponent,
{
struct device *base = data;
- if (strcmp(dev->driver->name, "i915") ||
+ if (!dev->driver || strcmp(dev->driver->name, "i915") ||
subcomponent != I915_COMPONENT_HDCP)
return 0;
diff --git a/drivers/misc/mei/pxp/mei_pxp.c b/drivers/misc/mei/pxp/mei_pxp.c
index f7380d387bab..5c39457e3f53 100644
--- a/drivers/misc/mei/pxp/mei_pxp.c
+++ b/drivers/misc/mei/pxp/mei_pxp.c
@@ -131,7 +131,7 @@ static int mei_pxp_component_match(struct device *dev, int subcomponent,
{
struct device *base = data;
- if (strcmp(dev->driver->name, "i915") ||
+ if (!dev->driver || strcmp(dev->driver->name, "i915") ||
subcomponent != I915_COMPONENT_PXP)
return 0;
diff --git a/drivers/misc/ocxl/afu_irq.c b/drivers/misc/ocxl/afu_irq.c
index ecdcfae025b7..a06920b7e049 100644
--- a/drivers/misc/ocxl/afu_irq.c
+++ b/drivers/misc/ocxl/afu_irq.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0+
// Copyright 2017 IBM Corp.
#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
#include <asm/pnv-ocxl.h>
#include <asm/xive.h>
#include "ocxl_internal.h"
diff --git a/drivers/misc/ocxl/file.c b/drivers/misc/ocxl/file.c
index d881f5e40ad9..6777c419a8da 100644
--- a/drivers/misc/ocxl/file.c
+++ b/drivers/misc/ocxl/file.c
@@ -556,7 +556,9 @@ int ocxl_file_register_afu(struct ocxl_afu *afu)
err_unregister:
ocxl_sysfs_unregister_afu(info); // safe to call even if register failed
+ free_minor(info);
device_unregister(&info->dev);
+ return rc;
err_put:
ocxl_afu_put(afu);
free_minor(info);
diff --git a/drivers/misc/ocxl/link.c b/drivers/misc/ocxl/link.c
index 9670d02c927f..4cf4c55a5f00 100644
--- a/drivers/misc/ocxl/link.c
+++ b/drivers/misc/ocxl/link.c
@@ -6,6 +6,7 @@
#include <linux/mm_types.h>
#include <linux/mmu_context.h>
#include <linux/mmu_notifier.h>
+#include <linux/irqdomain.h>
#include <asm/copro.h>
#include <asm/pnv-ocxl.h>
#include <asm/xive.h>
diff --git a/drivers/misc/pvpanic/pvpanic.c b/drivers/misc/pvpanic/pvpanic.c
index 4b8f1c7d726d..049a12006348 100644
--- a/drivers/misc/pvpanic/pvpanic.c
+++ b/drivers/misc/pvpanic/pvpanic.c
@@ -34,7 +34,9 @@ pvpanic_send_event(unsigned int event)
{
struct pvpanic_instance *pi_cur;
- spin_lock(&pvpanic_lock);
+ if (!spin_trylock(&pvpanic_lock))
+ return;
+
list_for_each_entry(pi_cur, &pvpanic_list, list) {
if (event & pi_cur->capability & pi_cur->events)
iowrite8(event, pi_cur->base);
@@ -55,9 +57,13 @@ pvpanic_panic_notify(struct notifier_block *nb, unsigned long code, void *unused
return NOTIFY_DONE;
}
+/*
+ * Call our notifier very early on panic, deferring the
+ * action taken to the hypervisor.
+ */
static struct notifier_block pvpanic_panic_nb = {
.notifier_call = pvpanic_panic_notify,
- .priority = 1, /* let this called before broken drm_fb_helper() */
+ .priority = INT_MAX,
};
static void pvpanic_remove(void *param)
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index f1d8ba6d4857..086ce77d9074 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -1452,10 +1452,10 @@ static void vmballoon_reset(struct vmballoon *b)
error = vmballoon_vmci_init(b);
if (error)
- pr_err("failed to initialize vmci doorbell\n");
+ pr_err_once("failed to initialize vmci doorbell\n");
if (vmballoon_send_guest_id(b))
- pr_err("failed to send guest ID to the host\n");
+ pr_err_once("failed to send guest ID to the host\n");
unlock:
up_write(&b->conf_sem);
diff --git a/drivers/misc/vmw_vmci/Kconfig b/drivers/misc/vmw_vmci/Kconfig
index 605794aadf11..b6d4d7fd686a 100644
--- a/drivers/misc/vmw_vmci/Kconfig
+++ b/drivers/misc/vmw_vmci/Kconfig
@@ -5,7 +5,7 @@
config VMWARE_VMCI
tristate "VMware VMCI Driver"
- depends on X86 && PCI
+ depends on (X86 || ARM64) && !CPU_BIG_ENDIAN && PCI
help
This is VMware's Virtual Machine Communication Interface. It enables
high-speed communication between host and guest in a virtual
diff --git a/drivers/misc/vmw_vmci/vmci_context.c b/drivers/misc/vmw_vmci/vmci_context.c
index 6cf3e21c7604..172696abce31 100644
--- a/drivers/misc/vmw_vmci/vmci_context.c
+++ b/drivers/misc/vmw_vmci/vmci_context.c
@@ -665,9 +665,8 @@ int vmci_ctx_add_notification(u32 context_id, u32 remote_cid)
int vmci_ctx_remove_notification(u32 context_id, u32 remote_cid)
{
struct vmci_ctx *context;
- struct vmci_handle_list *notifier, *tmp;
+ struct vmci_handle_list *notifier = NULL, *iter, *tmp;
struct vmci_handle handle;
- bool found = false;
context = vmci_ctx_get(context_id);
if (!context)
@@ -676,23 +675,23 @@ int vmci_ctx_remove_notification(u32 context_id, u32 remote_cid)
handle = vmci_make_handle(remote_cid, VMCI_EVENT_HANDLER);
spin_lock(&context->lock);
- list_for_each_entry_safe(notifier, tmp,
+ list_for_each_entry_safe(iter, tmp,
&context->notifier_list, node) {
- if (vmci_handle_is_equal(notifier->handle, handle)) {
- list_del_rcu(&notifier->node);
+ if (vmci_handle_is_equal(iter->handle, handle)) {
+ list_del_rcu(&iter->node);
context->n_notifiers--;
- found = true;
+ notifier = iter;
break;
}
}
spin_unlock(&context->lock);
- if (found)
+ if (notifier)
kvfree_rcu(notifier);
vmci_ctx_put(context);
- return found ? VMCI_SUCCESS : VMCI_ERROR_NOT_FOUND;
+ return notifier ? VMCI_SUCCESS : VMCI_ERROR_NOT_FOUND;
}
static int vmci_ctx_get_chkpt_notifiers(struct vmci_ctx *context,
diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c
index 57a6157209a1..aa7b05de97dd 100644
--- a/drivers/misc/vmw_vmci/vmci_guest.c
+++ b/drivers/misc/vmw_vmci/vmci_guest.c
@@ -614,6 +614,10 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
}
if (!mmio_base) {
+ if (IS_ENABLED(CONFIG_ARM64)) {
+ dev_err(&pdev->dev, "MMIO base is invalid\n");
+ return -ENXIO;
+ }
error = pcim_iomap_regions(pdev, BIT(0), KBUILD_MODNAME);
if (error) {
dev_err(&pdev->dev, "Failed to reserve/map IO regions\n");
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
index 94ebf7f3fd58..8f2de1893245 100644
--- a/drivers/misc/vmw_vmci/vmci_queue_pair.c
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
@@ -2577,6 +2577,12 @@ static ssize_t qp_enqueue_locked(struct vmci_queue *produce_q,
if (result < VMCI_SUCCESS)
return result;
+ /*
+ * This virt_wmb() ensures that data written to the queue
+ * is observable before the new producer_tail is.
+ */
+ virt_wmb();
+
vmci_q_header_add_producer_tail(produce_q->q_header, written,
produce_q_size);
return written;
@@ -2620,6 +2626,12 @@ static ssize_t qp_dequeue_locked(struct vmci_queue *produce_q,
if (buf_ready < VMCI_SUCCESS)
return (ssize_t) buf_ready;
+ /*
+ * This virt_rmb() ensures that data from the queue will be read
+ * after we have determined how much is ready to be consumed.
+ */
+ virt_rmb();
+
read = (size_t) (buf_ready > buf_size ? buf_size : buf_ready);
head = vmci_q_header_consumer_head(produce_q->q_header);
if (likely(head + read < consume_q_size)) {
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index af6c3c329076..d6144978e32d 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -508,7 +508,7 @@ config MMC_OMAP_HS
config MMC_WBSD
tristate "Winbond W83L51xD SD/MMC Card Interface support"
- depends on ISA_DMA_API && !M68K
+ depends on ISA_DMA_API
help
This selects the Winbond(R) W83L51xD Secure digital and
Multimedia card Interface.
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index 316393c694d7..0db9490dc659 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -31,10 +31,10 @@
#include <linux/gfp.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/soc/pxa/cpu.h>
#include <linux/sizes.h>
-#include <mach/hardware.h>
#include <linux/platform_data/mmc-pxamci.h>
#include "pxamci.h"
diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c
index 7d96758a8f04..1749dbbacc13 100644
--- a/drivers/mtd/maps/pxa2xx-flash.c
+++ b/drivers/mtd/maps/pxa2xx-flash.c
@@ -16,8 +16,6 @@
#include <linux/mtd/partitions.h>
#include <asm/io.h>
-#include <mach/hardware.h>
-
#include <asm/mach/flash.h>
#define CACHELINESIZE 32
diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c
index 28f55f9cf715..0ee452275578 100644
--- a/drivers/mtd/ubi/fastmap-wl.c
+++ b/drivers/mtd/ubi/fastmap-wl.c
@@ -97,6 +97,33 @@ out:
return e;
}
+/*
+ * has_enough_free_count - whether ubi has enough free pebs to fill fm pools
+ * @ubi: UBI device description object
+ * @is_wl_pool: whether UBI is filling wear leveling pool
+ *
+ * This helper function checks whether there are enough free pebs (deducted
+ * by fastmap pebs) to fill fm_pool and fm_wl_pool, above rule works after
+ * there is at least one of free pebs is filled into fm_wl_pool.
+ * For wear leveling pool, UBI should also reserve free pebs for bad pebs
+ * handling, because there maybe no enough free pebs for user volumes after
+ * producing new bad pebs.
+ */
+static bool has_enough_free_count(struct ubi_device *ubi, bool is_wl_pool)
+{
+ int fm_used = 0; // fastmap non anchor pebs.
+ int beb_rsvd_pebs;
+
+ if (!ubi->free.rb_node)
+ return false;
+
+ beb_rsvd_pebs = is_wl_pool ? ubi->beb_rsvd_pebs : 0;
+ if (ubi->fm_wl_pool.size > 0 && !(ubi->ro_mode || ubi->fm_disabled))
+ fm_used = ubi->fm_size / ubi->leb_size - 1;
+
+ return ubi->free_count - beb_rsvd_pebs > fm_used;
+}
+
/**
* ubi_refill_pools - refills all fastmap PEB pools.
* @ubi: UBI device description object
@@ -120,21 +147,17 @@ void ubi_refill_pools(struct ubi_device *ubi)
wl_tree_add(ubi->fm_anchor, &ubi->free);
ubi->free_count++;
}
- if (ubi->fm_next_anchor) {
- wl_tree_add(ubi->fm_next_anchor, &ubi->free);
- ubi->free_count++;
- }
- /* All available PEBs are in ubi->free, now is the time to get
+ /*
+ * All available PEBs are in ubi->free, now is the time to get
* the best anchor PEBs.
*/
ubi->fm_anchor = ubi_wl_get_fm_peb(ubi, 1);
- ubi->fm_next_anchor = ubi_wl_get_fm_peb(ubi, 1);
for (;;) {
enough = 0;
if (pool->size < pool->max_size) {
- if (!ubi->free.rb_node)
+ if (!has_enough_free_count(ubi, false))
break;
e = wl_get_wle(ubi);
@@ -147,8 +170,7 @@ void ubi_refill_pools(struct ubi_device *ubi)
enough++;
if (wl_pool->size < wl_pool->max_size) {
- if (!ubi->free.rb_node ||
- (ubi->free_count - ubi->beb_rsvd_pebs < 5))
+ if (!has_enough_free_count(ubi, true))
break;
e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
@@ -253,6 +275,58 @@ out:
return ret;
}
+/**
+ * next_peb_for_wl - returns next PEB to be used internally by the
+ * WL sub-system.
+ *
+ * @ubi: UBI device description object
+ */
+static struct ubi_wl_entry *next_peb_for_wl(struct ubi_device *ubi)
+{
+ struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
+ int pnum;
+
+ if (pool->used == pool->size)
+ return NULL;
+
+ pnum = pool->pebs[pool->used];
+ return ubi->lookuptbl[pnum];
+}
+
+/**
+ * need_wear_leveling - checks whether to trigger a wear leveling work.
+ * UBI fetches free PEB from wl_pool, we check free PEBs from both 'wl_pool'
+ * and 'ubi->free', because free PEB in 'ubi->free' tree maybe moved into
+ * 'wl_pool' by ubi_refill_pools().
+ *
+ * @ubi: UBI device description object
+ */
+static bool need_wear_leveling(struct ubi_device *ubi)
+{
+ int ec;
+ struct ubi_wl_entry *e;
+
+ if (!ubi->used.rb_node)
+ return false;
+
+ e = next_peb_for_wl(ubi);
+ if (!e) {
+ if (!ubi->free.rb_node)
+ return false;
+ e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
+ ec = e->ec;
+ } else {
+ ec = e->ec;
+ if (ubi->free.rb_node) {
+ e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
+ ec = max(ec, e->ec);
+ }
+ }
+ e = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
+
+ return ec - e->ec >= UBI_WL_THRESHOLD;
+}
+
/* get_peb_for_wl - returns a PEB to be used internally by the WL sub-system.
*
* @ubi: UBI device description object
@@ -286,20 +360,26 @@ static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
{
struct ubi_work *wrk;
+ struct ubi_wl_entry *anchor;
spin_lock(&ubi->wl_lock);
- /* Do we have a next anchor? */
- if (!ubi->fm_next_anchor) {
- ubi->fm_next_anchor = ubi_wl_get_fm_peb(ubi, 1);
- if (!ubi->fm_next_anchor)
- /* Tell wear leveling to produce a new anchor PEB */
- ubi->fm_do_produce_anchor = 1;
+ /* Do we already have an anchor? */
+ if (ubi->fm_anchor) {
+ spin_unlock(&ubi->wl_lock);
+ return 0;
}
- /* Do wear leveling to get a new anchor PEB or check the
- * existing next anchor candidate.
- */
+ /* See if we can find an anchor PEB on the list of free PEBs */
+ anchor = ubi_wl_get_fm_peb(ubi, 1);
+ if (anchor) {
+ ubi->fm_anchor = anchor;
+ spin_unlock(&ubi->wl_lock);
+ return 0;
+ }
+
+ ubi->fm_do_produce_anchor = 1;
+ /* No luck, trigger wear leveling to produce a new anchor PEB. */
if (ubi->wl_scheduled) {
spin_unlock(&ubi->wl_lock);
return 0;
@@ -381,11 +461,6 @@ static void ubi_fastmap_close(struct ubi_device *ubi)
ubi->fm_anchor = NULL;
}
- if (ubi->fm_next_anchor) {
- return_unused_peb(ubi, ubi->fm_next_anchor);
- ubi->fm_next_anchor = NULL;
- }
-
if (ubi->fm) {
for (i = 0; i < ubi->fm->used_blocks; i++)
kfree(ubi->fm->e[i]);
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index 6b5f1ffd961b..6e95c4b1473e 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -1230,17 +1230,6 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
fm_pos += sizeof(*fec);
ubi_assert(fm_pos <= ubi->fm_size);
}
- if (ubi->fm_next_anchor) {
- fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
-
- fec->pnum = cpu_to_be32(ubi->fm_next_anchor->pnum);
- set_seen(ubi, ubi->fm_next_anchor->pnum, seen_pebs);
- fec->ec = cpu_to_be32(ubi->fm_next_anchor->ec);
-
- free_peb_count++;
- fm_pos += sizeof(*fec);
- ubi_assert(fm_pos <= ubi->fm_size);
- }
fmh->free_peb_count = cpu_to_be32(free_peb_count);
ubi_for_each_used_peb(ubi, wl_e, tmp_rb) {
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index 7c083ad58274..078112e23dfd 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -489,8 +489,7 @@ struct ubi_debug_info {
* @fm_work: fastmap work queue
* @fm_work_scheduled: non-zero if fastmap work was scheduled
* @fast_attach: non-zero if UBI was attached by fastmap
- * @fm_anchor: The new anchor PEB used during fastmap update
- * @fm_next_anchor: An anchor PEB candidate for the next time fastmap is updated
+ * @fm_anchor: The next anchor PEB to use for fastmap
* @fm_do_produce_anchor: If true produce an anchor PEB in wl
*
* @used: RB-tree of used physical eraseblocks
@@ -601,7 +600,6 @@ struct ubi_device {
int fm_work_scheduled;
int fast_attach;
struct ubi_wl_entry *fm_anchor;
- struct ubi_wl_entry *fm_next_anchor;
int fm_do_produce_anchor;
/* Wear-leveling sub-system's stuff */
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index 1bc7b3a05604..6ea95ade4ca6 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -309,7 +309,6 @@ out_mapping:
ubi->volumes[vol_id] = NULL;
ubi->vol_count -= 1;
spin_unlock(&ubi->volumes_lock);
- ubi_eba_destroy_table(eba_tbl);
out_acc:
spin_lock(&ubi->volumes_lock);
ubi->rsvd_pebs -= vol->reserved_pebs;
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 8455f1d47f3c..55bae06cf408 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -670,7 +670,11 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
ubi_assert(!ubi->move_from && !ubi->move_to);
ubi_assert(!ubi->move_to_put);
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ if (!next_peb_for_wl(ubi) ||
+#else
if (!ubi->free.rb_node ||
+#endif
(!ubi->used.rb_node && !ubi->scrub.rb_node)) {
/*
* No free physical eraseblocks? Well, they must be waiting in
@@ -689,16 +693,16 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
#ifdef CONFIG_MTD_UBI_FASTMAP
e1 = find_anchor_wl_entry(&ubi->used);
- if (e1 && ubi->fm_next_anchor &&
- (ubi->fm_next_anchor->ec - e1->ec >= UBI_WL_THRESHOLD)) {
+ if (e1 && ubi->fm_anchor &&
+ (ubi->fm_anchor->ec - e1->ec >= UBI_WL_THRESHOLD)) {
ubi->fm_do_produce_anchor = 1;
- /* fm_next_anchor is no longer considered a good anchor
- * candidate.
+ /*
+ * fm_anchor is no longer considered a good anchor.
* NULL assignment also prevents multiple wear level checks
* of this PEB.
*/
- wl_tree_add(ubi->fm_next_anchor, &ubi->free);
- ubi->fm_next_anchor = NULL;
+ wl_tree_add(ubi->fm_anchor, &ubi->free);
+ ubi->fm_anchor = NULL;
ubi->free_count++;
}
@@ -1003,8 +1007,6 @@ out_cancel:
static int ensure_wear_leveling(struct ubi_device *ubi, int nested)
{
int err = 0;
- struct ubi_wl_entry *e1;
- struct ubi_wl_entry *e2;
struct ubi_work *wrk;
spin_lock(&ubi->wl_lock);
@@ -1017,6 +1019,13 @@ static int ensure_wear_leveling(struct ubi_device *ubi, int nested)
* the WL worker has to be scheduled anyway.
*/
if (!ubi->scrub.rb_node) {
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ if (!need_wear_leveling(ubi))
+ goto out_unlock;
+#else
+ struct ubi_wl_entry *e1;
+ struct ubi_wl_entry *e2;
+
if (!ubi->used.rb_node || !ubi->free.rb_node)
/* No physical eraseblocks - no deal */
goto out_unlock;
@@ -1032,6 +1041,7 @@ static int ensure_wear_leveling(struct ubi_device *ubi, int nested)
if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD))
goto out_unlock;
+#endif
dbg_wl("schedule wear-leveling");
} else
dbg_wl("schedule scrubbing");
@@ -1085,12 +1095,13 @@ static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk)
if (!err) {
spin_lock(&ubi->wl_lock);
- if (!ubi->fm_disabled && !ubi->fm_next_anchor &&
+ if (!ubi->fm_disabled && !ubi->fm_anchor &&
e->pnum < UBI_FM_MAX_START) {
- /* Abort anchor production, if needed it will be
+ /*
+ * Abort anchor production, if needed it will be
* enabled again in the wear leveling started below.
*/
- ubi->fm_next_anchor = e;
+ ubi->fm_anchor = e;
ubi->fm_do_produce_anchor = 0;
} else {
wl_tree_add(e, &ubi->free);
diff --git a/drivers/mtd/ubi/wl.h b/drivers/mtd/ubi/wl.h
index c93a53293786..5ebe374a08ae 100644
--- a/drivers/mtd/ubi/wl.h
+++ b/drivers/mtd/ubi/wl.h
@@ -5,6 +5,8 @@
static void update_fastmap_work_fn(struct work_struct *wrk);
static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root);
static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi);
+static struct ubi_wl_entry *next_peb_for_wl(struct ubi_device *ubi);
+static bool need_wear_leveling(struct ubi_device *ubi);
static void ubi_fastmap_close(struct ubi_device *ubi);
static inline void ubi_fastmap_init(struct ubi_device *ubi, int *count)
{
diff --git a/drivers/net/amt.c b/drivers/net/amt.c
index de4ea518c793..ebee5f07a208 100644
--- a/drivers/net/amt.c
+++ b/drivers/net/amt.c
@@ -57,7 +57,7 @@ static char *type_str[] = {
"AMT_MSG_MEMBERSHIP_QUERY",
"AMT_MSG_MEMBERSHIP_UPDATE",
"AMT_MSG_MULTICAST_DATA",
- "AMT_MSG_TEARDOWM",
+ "AMT_MSG_TEARDOWN",
};
static char *action_str[] = {
@@ -2423,7 +2423,7 @@ static bool amt_update_handler(struct amt_dev *amt, struct sk_buff *skb)
}
}
- return false;
+ return true;
report:
iph = ip_hdr(skb);
@@ -2679,7 +2679,7 @@ static int amt_rcv(struct sock *sk, struct sk_buff *skb)
amt = rcu_dereference_sk_user_data(sk);
if (!amt) {
err = true;
- goto out;
+ goto drop;
}
skb->dev = amt->dev;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 3b7baaeae82c..f85372adf042 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -6159,7 +6159,9 @@ static int bond_check_params(struct bond_params *params)
strscpy_pad(params->primary, primary, sizeof(params->primary));
memcpy(params->arp_targets, arp_target, sizeof(arp_target));
+#if IS_ENABLED(CONFIG_IPV6)
memset(params->ns_targets, 0, sizeof(struct in6_addr) * BOND_MAX_NS_TARGETS);
+#endif
return 0;
}
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index f427fa1737c7..6f404f9c34e3 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -290,11 +290,6 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
addr6 = nla_get_in6_addr(attr);
- if (ipv6_addr_type(&addr6) & IPV6_ADDR_LINKLOCAL) {
- NL_SET_ERR_MSG(extack, "Invalid IPv6 addr6");
- return -EINVAL;
- }
-
bond_opt_initextra(&newval, &addr6, sizeof(addr6));
err = __bond_opt_set(bond, BOND_OPT_NS_TARGETS,
&newval);
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 64f7db2627ce..1f8323ad5282 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -34,10 +34,8 @@ static int bond_option_arp_ip_target_add(struct bonding *bond, __be32 target);
static int bond_option_arp_ip_target_rem(struct bonding *bond, __be32 target);
static int bond_option_arp_ip_targets_set(struct bonding *bond,
const struct bond_opt_value *newval);
-#if IS_ENABLED(CONFIG_IPV6)
static int bond_option_ns_ip6_targets_set(struct bonding *bond,
const struct bond_opt_value *newval);
-#endif
static int bond_option_arp_validate_set(struct bonding *bond,
const struct bond_opt_value *newval);
static int bond_option_arp_all_targets_set(struct bonding *bond,
@@ -299,7 +297,6 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = {
.flags = BOND_OPTFLAG_RAWVAL,
.set = bond_option_arp_ip_targets_set
},
-#if IS_ENABLED(CONFIG_IPV6)
[BOND_OPT_NS_TARGETS] = {
.id = BOND_OPT_NS_TARGETS,
.name = "ns_ip6_target",
@@ -307,7 +304,6 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = {
.flags = BOND_OPTFLAG_RAWVAL,
.set = bond_option_ns_ip6_targets_set
},
-#endif
[BOND_OPT_DOWNDELAY] = {
.id = BOND_OPT_DOWNDELAY,
.name = "downdelay",
@@ -1254,6 +1250,12 @@ static int bond_option_ns_ip6_targets_set(struct bonding *bond,
return 0;
}
+#else
+static int bond_option_ns_ip6_targets_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
+{
+ return -EPERM;
+}
#endif
static int bond_option_arp_validate_set(struct bonding *bond,
diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c
index cfe37be42be4..43be458422b3 100644
--- a/drivers/net/bonding/bond_procfs.c
+++ b/drivers/net/bonding/bond_procfs.c
@@ -129,6 +129,21 @@ static void bond_info_show_master(struct seq_file *seq)
printed = 1;
}
seq_printf(seq, "\n");
+
+#if IS_ENABLED(CONFIG_IPV6)
+ printed = 0;
+ seq_printf(seq, "NS IPv6 target/s (xx::xx form):");
+
+ for (i = 0; (i < BOND_MAX_NS_TARGETS); i++) {
+ if (ipv6_addr_any(&bond->params.ns_targets[i]))
+ break;
+ if (printed)
+ seq_printf(seq, ",");
+ seq_printf(seq, " %pI6c", &bond->params.ns_targets[i]);
+ printed = 1;
+ }
+ seq_printf(seq, "\n");
+#endif
}
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index fbb32aa49b24..48cf344750ff 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -1603,12 +1603,8 @@ static int b53_arl_read(struct b53_device *dev, u64 mac,
return 0;
}
- if (bitmap_weight(free_bins, dev->num_arl_bins) == 0)
- return -ENOSPC;
-
*idx = find_first_bit(free_bins, dev->num_arl_bins);
-
- return -ENOENT;
+ return *idx >= dev->num_arl_bins ? -ENOSPC : -ENOENT;
}
static int b53_arl_op(struct b53_device *dev, int op, int port,
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 5d2c57a7c708..0b49d243e00b 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -3960,6 +3960,7 @@ static int mv88e6xxx_mdios_register(struct mv88e6xxx_chip *chip,
*/
child = of_get_child_by_name(np, "mdio");
err = mv88e6xxx_mdio_register(chip, child, false);
+ of_node_put(child);
if (err)
return err;
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 3272aca496dc..47fc8e6963d5 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -2180,13 +2180,9 @@ static int bcm_sysport_rule_set(struct bcm_sysport_priv *priv,
if (nfc->fs.ring_cookie != RX_CLS_FLOW_WAKE)
return -EOPNOTSUPP;
- /* All filters are already in use, we cannot match more rules */
- if (bitmap_weight(priv->filters, RXCHK_BRCM_TAG_MAX) ==
- RXCHK_BRCM_TAG_MAX)
- return -ENOSPC;
-
index = find_first_zero_bit(priv->filters, RXCHK_BRCM_TAG_MAX);
if (index >= RXCHK_BRCM_TAG_MAX)
+ /* All filters are already in use, we cannot match more rules */
return -ENOSPC;
/* Location is the classification ID, and index is the position
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index c78883c3a2c8..45634579adb6 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -1,32 +1,7 @@
-/* Copyright 2008 - 2016 Freescale Semiconductor Inc.
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
+/*
+ * Copyright 2008 - 2016 Freescale Semiconductor Inc.
* Copyright 2020 NXP
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
index daf894a97050..35b8cea7f886 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
@@ -1,31 +1,6 @@
-/* Copyright 2008 - 2016 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
+/*
+ * Copyright 2008 - 2016 Freescale Semiconductor Inc.
*/
#ifndef __DPAA_H
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c
index ee62d25cac81..4fee74c024bd 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c
@@ -1,32 +1,6 @@
-/* Copyright 2008-2016 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
+/*
+ * Copyright 2008 - 2016 Freescale Semiconductor Inc.
*/
#include <linux/init.h>
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h
index 409c1dc39430..889f89df9930 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h
@@ -1,32 +1,6 @@
-/* Copyright 2013-2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
+/*
+ * Copyright 2013-2015 Freescale Semiconductor Inc.
*/
#undef TRACE_SYSTEM
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
index 5750f9a56393..73f07881ce2d 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
@@ -1,32 +1,6 @@
-/* Copyright 2008-2016 Freescale Semiconductor, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
+/*
+ * Copyright 2008 - 2016 Freescale Semiconductor Inc.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c b/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c
index 15f37c5b8dc1..dafb26f81f95 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c
@@ -69,7 +69,7 @@ static int enetc_pci_mdio_probe(struct pci_dev *pdev,
return 0;
err_mdiobus_reg:
- pci_release_mem_regions(pdev);
+ pci_release_region(pdev, 0);
err_pci_mem_reg:
pci_disable_device(pdev);
err_pci_enable:
@@ -88,7 +88,7 @@ static void enetc_pci_mdio_remove(struct pci_dev *pdev)
mdiobus_unregister(bus);
mdio_priv = bus->priv;
iounmap(mdio_priv->hw->port);
- pci_release_mem_regions(pdev);
+ pci_release_region(pdev, 0);
pci_disable_device(pdev);
}
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile
index 46f439641441..9183d480b70b 100644
--- a/drivers/net/ethernet/intel/ice/Makefile
+++ b/drivers/net/ethernet/intel/ice/Makefile
@@ -47,8 +47,3 @@ ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_nl.o ice_dcb_lib.o
ice-$(CONFIG_RFS_ACCEL) += ice_arfs.o
ice-$(CONFIG_XDP_SOCKETS) += ice_xsk.o
ice-$(CONFIG_ICE_SWITCHDEV) += ice_eswitch.o
-
-# FIXME: temporarily silence -Warray-bounds on non W=1+ builds
-ifndef KBUILD_EXTRA_WARN
-CFLAGS_ice_switch.o += -Wno-array-bounds
-endif
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index b25e27c4d887..05cb9dd7035a 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -601,12 +601,30 @@ struct ice_aqc_sw_rules {
__le32 addr_low;
};
+/* Add switch rule response:
+ * Content of return buffer is same as the input buffer. The status field and
+ * LUT index are updated as part of the response
+ */
+struct ice_aqc_sw_rules_elem_hdr {
+ __le16 type; /* Switch rule type, one of T_... */
+#define ICE_AQC_SW_RULES_T_LKUP_RX 0x0
+#define ICE_AQC_SW_RULES_T_LKUP_TX 0x1
+#define ICE_AQC_SW_RULES_T_LG_ACT 0x2
+#define ICE_AQC_SW_RULES_T_VSI_LIST_SET 0x3
+#define ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR 0x4
+#define ICE_AQC_SW_RULES_T_PRUNE_LIST_SET 0x5
+#define ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR 0x6
+ __le16 status;
+} __packed __aligned(sizeof(__le16));
+
/* Add/Update/Get/Remove lookup Rx/Tx command/response entry
* This structures describes the lookup rules and associated actions. "index"
* is returned as part of a response to a successful Add command, and can be
* used to identify the rule for Update/Get/Remove commands.
*/
struct ice_sw_rule_lkup_rx_tx {
+ struct ice_aqc_sw_rules_elem_hdr hdr;
+
__le16 recipe_id;
#define ICE_SW_RECIPE_LOGICAL_PORT_FWD 10
/* Source port for LOOKUP_RX and source VSI in case of LOOKUP_TX */
@@ -683,14 +701,16 @@ struct ice_sw_rule_lkup_rx_tx {
* lookup-type
*/
__le16 hdr_len;
- u8 hdr[];
-};
+ u8 hdr_data[];
+} __packed __aligned(sizeof(__le16));
/* Add/Update/Remove large action command/response entry
* "index" is returned as part of a response to a successful Add command, and
* can be used to identify the action for Update/Get/Remove commands.
*/
struct ice_sw_rule_lg_act {
+ struct ice_aqc_sw_rules_elem_hdr hdr;
+
__le16 index; /* Index in large action table */
__le16 size;
/* Max number of large actions */
@@ -744,45 +764,19 @@ struct ice_sw_rule_lg_act {
#define ICE_LG_ACT_STAT_COUNT_S 3
#define ICE_LG_ACT_STAT_COUNT_M (0x7F << ICE_LG_ACT_STAT_COUNT_S)
__le32 act[]; /* array of size for actions */
-};
+} __packed __aligned(sizeof(__le16));
/* Add/Update/Remove VSI list command/response entry
* "index" is returned as part of a response to a successful Add command, and
* can be used to identify the VSI list for Update/Get/Remove commands.
*/
struct ice_sw_rule_vsi_list {
+ struct ice_aqc_sw_rules_elem_hdr hdr;
+
__le16 index; /* Index of VSI/Prune list */
__le16 number_vsi;
__le16 vsi[]; /* Array of number_vsi VSI numbers */
-};
-
-/* Query VSI list command/response entry */
-struct ice_sw_rule_vsi_list_query {
- __le16 index;
- DECLARE_BITMAP(vsi_list, ICE_MAX_VSI);
-} __packed;
-
-/* Add switch rule response:
- * Content of return buffer is same as the input buffer. The status field and
- * LUT index are updated as part of the response
- */
-struct ice_aqc_sw_rules_elem {
- __le16 type; /* Switch rule type, one of T_... */
-#define ICE_AQC_SW_RULES_T_LKUP_RX 0x0
-#define ICE_AQC_SW_RULES_T_LKUP_TX 0x1
-#define ICE_AQC_SW_RULES_T_LG_ACT 0x2
-#define ICE_AQC_SW_RULES_T_VSI_LIST_SET 0x3
-#define ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR 0x4
-#define ICE_AQC_SW_RULES_T_PRUNE_LIST_SET 0x5
-#define ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR 0x6
- __le16 status;
- union {
- struct ice_sw_rule_lkup_rx_tx lkup_tx_rx;
- struct ice_sw_rule_lg_act lg_act;
- struct ice_sw_rule_vsi_list vsi_list;
- struct ice_sw_rule_vsi_list_query vsi_list_query;
- } __packed pdata;
-};
+} __packed __aligned(sizeof(__le16));
/* Query PFC Mode (direct 0x0302)
* Set PFC Mode (direct 0x0303)
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 9f0a4dfb4818..8d8f3eec79ee 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -1282,18 +1282,13 @@ static const struct ice_dummy_pkt_profile ice_dummy_pkt_profiles[] = {
ICE_PKT_PROFILE(tcp, 0),
};
-#define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
- (offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr) + \
- (DUMMY_ETH_HDR_LEN * \
- sizeof(((struct ice_sw_rule_lkup_rx_tx *)0)->hdr[0])))
-#define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
- (offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr))
-#define ICE_SW_RULE_LG_ACT_SIZE(n) \
- (offsetof(struct ice_aqc_sw_rules_elem, pdata.lg_act.act) + \
- ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act[0])))
-#define ICE_SW_RULE_VSI_LIST_SIZE(n) \
- (offsetof(struct ice_aqc_sw_rules_elem, pdata.vsi_list.vsi) + \
- ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi[0])))
+#define ICE_SW_RULE_RX_TX_HDR_SIZE(s, l) struct_size((s), hdr_data, (l))
+#define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s) \
+ ICE_SW_RULE_RX_TX_HDR_SIZE((s), DUMMY_ETH_HDR_LEN)
+#define ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s) \
+ ICE_SW_RULE_RX_TX_HDR_SIZE((s), 0)
+#define ICE_SW_RULE_LG_ACT_SIZE(s, n) struct_size((s), act, (n))
+#define ICE_SW_RULE_VSI_LIST_SIZE(s, n) struct_size((s), vsi, (n))
/* this is a recipe to profile association bitmap */
static DECLARE_BITMAP(recipe_to_profile[ICE_MAX_NUM_RECIPES],
@@ -2376,7 +2371,8 @@ static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
*/
static void
ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
- struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
+ struct ice_sw_rule_lkup_rx_tx *s_rule,
+ enum ice_adminq_opc opc)
{
u16 vlan_id = ICE_MAX_VLAN_ID + 1;
u16 vlan_tpid = ETH_P_8021Q;
@@ -2388,15 +2384,14 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
u8 q_rgn;
if (opc == ice_aqc_opc_remove_sw_rules) {
- s_rule->pdata.lkup_tx_rx.act = 0;
- s_rule->pdata.lkup_tx_rx.index =
- cpu_to_le16(f_info->fltr_rule_id);
- s_rule->pdata.lkup_tx_rx.hdr_len = 0;
+ s_rule->act = 0;
+ s_rule->index = cpu_to_le16(f_info->fltr_rule_id);
+ s_rule->hdr_len = 0;
return;
}
eth_hdr_sz = sizeof(dummy_eth_header);
- eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
+ eth_hdr = s_rule->hdr_data;
/* initialize the ether header with a dummy header */
memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz);
@@ -2481,14 +2476,14 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
break;
}
- s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
+ s_rule->hdr.type = (f_info->flag & ICE_FLTR_RX) ?
cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX) :
cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
/* Recipe set depending on lookup type */
- s_rule->pdata.lkup_tx_rx.recipe_id = cpu_to_le16(f_info->lkup_type);
- s_rule->pdata.lkup_tx_rx.src = cpu_to_le16(f_info->src);
- s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act);
+ s_rule->recipe_id = cpu_to_le16(f_info->lkup_type);
+ s_rule->src = cpu_to_le16(f_info->src);
+ s_rule->act = cpu_to_le32(act);
if (daddr)
ether_addr_copy(eth_hdr + ICE_ETH_DA_OFFSET, daddr);
@@ -2502,7 +2497,7 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
/* Create the switch rule with the final dummy Ethernet header */
if (opc != ice_aqc_opc_update_sw_rules)
- s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(eth_hdr_sz);
+ s_rule->hdr_len = cpu_to_le16(eth_hdr_sz);
}
/**
@@ -2519,7 +2514,8 @@ static int
ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
u16 sw_marker, u16 l_id)
{
- struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
+ struct ice_sw_rule_lkup_rx_tx *rx_tx;
+ struct ice_sw_rule_lg_act *lg_act;
/* For software marker we need 3 large actions
* 1. FWD action: FWD TO VSI or VSI LIST
* 2. GENERIC VALUE action to hold the profile ID
@@ -2540,18 +2536,18 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
* 1. Large Action
* 2. Look up Tx Rx
*/
- lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
- rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
+ lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(lg_act, num_lg_acts);
+ rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(rx_tx);
lg_act = devm_kzalloc(ice_hw_to_dev(hw), rules_size, GFP_KERNEL);
if (!lg_act)
return -ENOMEM;
- rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
+ rx_tx = (typeof(rx_tx))((u8 *)lg_act + lg_act_size);
/* Fill in the first switch rule i.e. large action */
- lg_act->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LG_ACT);
- lg_act->pdata.lg_act.index = cpu_to_le16(l_id);
- lg_act->pdata.lg_act.size = cpu_to_le16(num_lg_acts);
+ lg_act->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LG_ACT);
+ lg_act->index = cpu_to_le16(l_id);
+ lg_act->size = cpu_to_le16(num_lg_acts);
/* First action VSI forwarding or VSI list forwarding depending on how
* many VSIs
@@ -2563,13 +2559,13 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & ICE_LG_ACT_VSI_LIST_ID_M;
if (m_ent->vsi_count > 1)
act |= ICE_LG_ACT_VSI_LIST;
- lg_act->pdata.lg_act.act[0] = cpu_to_le32(act);
+ lg_act->act[0] = cpu_to_le32(act);
/* Second action descriptor type */
act = ICE_LG_ACT_GENERIC;
act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
- lg_act->pdata.lg_act.act[1] = cpu_to_le32(act);
+ lg_act->act[1] = cpu_to_le32(act);
act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
@@ -2579,24 +2575,22 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
ICE_LG_ACT_GENERIC_VALUE_M;
- lg_act->pdata.lg_act.act[2] = cpu_to_le32(act);
+ lg_act->act[2] = cpu_to_le32(act);
/* call the fill switch rule to fill the lookup Tx Rx structure */
ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
ice_aqc_opc_update_sw_rules);
/* Update the action to point to the large action ID */
- rx_tx->pdata.lkup_tx_rx.act =
- cpu_to_le32(ICE_SINGLE_ACT_PTR |
- ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
- ICE_SINGLE_ACT_PTR_VAL_M));
+ rx_tx->act = cpu_to_le32(ICE_SINGLE_ACT_PTR |
+ ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
+ ICE_SINGLE_ACT_PTR_VAL_M));
/* Use the filter rule ID of the previously created rule with single
* act. Once the update happens, hardware will treat this as large
* action
*/
- rx_tx->pdata.lkup_tx_rx.index =
- cpu_to_le16(m_ent->fltr_info.fltr_rule_id);
+ rx_tx->index = cpu_to_le16(m_ent->fltr_info.fltr_rule_id);
status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
ice_aqc_opc_update_sw_rules, NULL);
@@ -2658,7 +2652,7 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
enum ice_sw_lkup_type lkup_type)
{
- struct ice_aqc_sw_rules_elem *s_rule;
+ struct ice_sw_rule_vsi_list *s_rule;
u16 s_rule_size;
u16 rule_type;
int status;
@@ -2681,7 +2675,7 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
else
return -EINVAL;
- s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
+ s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(s_rule, num_vsi);
s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
if (!s_rule)
return -ENOMEM;
@@ -2691,13 +2685,13 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
goto exit;
}
/* AQ call requires hw_vsi_id(s) */
- s_rule->pdata.vsi_list.vsi[i] =
+ s_rule->vsi[i] =
cpu_to_le16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
}
- s_rule->type = cpu_to_le16(rule_type);
- s_rule->pdata.vsi_list.number_vsi = cpu_to_le16(num_vsi);
- s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id);
+ s_rule->hdr.type = cpu_to_le16(rule_type);
+ s_rule->number_vsi = cpu_to_le16(num_vsi);
+ s_rule->index = cpu_to_le16(vsi_list_id);
status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
@@ -2745,13 +2739,14 @@ ice_create_pkt_fwd_rule(struct ice_hw *hw,
struct ice_fltr_list_entry *f_entry)
{
struct ice_fltr_mgmt_list_entry *fm_entry;
- struct ice_aqc_sw_rules_elem *s_rule;
+ struct ice_sw_rule_lkup_rx_tx *s_rule;
enum ice_sw_lkup_type l_type;
struct ice_sw_recipe *recp;
int status;
s_rule = devm_kzalloc(ice_hw_to_dev(hw),
- ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL);
+ ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule),
+ GFP_KERNEL);
if (!s_rule)
return -ENOMEM;
fm_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*fm_entry),
@@ -2772,17 +2767,16 @@ ice_create_pkt_fwd_rule(struct ice_hw *hw,
ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
ice_aqc_opc_add_sw_rules);
- status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
+ status = ice_aq_sw_rules(hw, s_rule,
+ ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule), 1,
ice_aqc_opc_add_sw_rules, NULL);
if (status) {
devm_kfree(ice_hw_to_dev(hw), fm_entry);
goto ice_create_pkt_fwd_rule_exit;
}
- f_entry->fltr_info.fltr_rule_id =
- le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
- fm_entry->fltr_info.fltr_rule_id =
- le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
+ f_entry->fltr_info.fltr_rule_id = le16_to_cpu(s_rule->index);
+ fm_entry->fltr_info.fltr_rule_id = le16_to_cpu(s_rule->index);
/* The book keeping entries will get removed when base driver
* calls remove filter AQ command
@@ -2807,20 +2801,22 @@ ice_create_pkt_fwd_rule_exit:
static int
ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
{
- struct ice_aqc_sw_rules_elem *s_rule;
+ struct ice_sw_rule_lkup_rx_tx *s_rule;
int status;
s_rule = devm_kzalloc(ice_hw_to_dev(hw),
- ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL);
+ ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule),
+ GFP_KERNEL);
if (!s_rule)
return -ENOMEM;
ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
- s_rule->pdata.lkup_tx_rx.index = cpu_to_le16(f_info->fltr_rule_id);
+ s_rule->index = cpu_to_le16(f_info->fltr_rule_id);
/* Update switch rule with new rule set to forward VSI list */
- status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
+ status = ice_aq_sw_rules(hw, s_rule,
+ ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule), 1,
ice_aqc_opc_update_sw_rules, NULL);
devm_kfree(ice_hw_to_dev(hw), s_rule);
@@ -3104,17 +3100,17 @@ static int
ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
enum ice_sw_lkup_type lkup_type)
{
- struct ice_aqc_sw_rules_elem *s_rule;
+ struct ice_sw_rule_vsi_list *s_rule;
u16 s_rule_size;
int status;
- s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
+ s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(s_rule, 0);
s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
if (!s_rule)
return -ENOMEM;
- s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
- s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id);
+ s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
+ s_rule->index = cpu_to_le16(vsi_list_id);
/* Free the vsi_list resource that we allocated. It is assumed that the
* list is empty at this point.
@@ -3274,10 +3270,10 @@ ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
if (remove_rule) {
/* Remove the lookup rule */
- struct ice_aqc_sw_rules_elem *s_rule;
+ struct ice_sw_rule_lkup_rx_tx *s_rule;
s_rule = devm_kzalloc(ice_hw_to_dev(hw),
- ICE_SW_RULE_RX_TX_NO_HDR_SIZE,
+ ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s_rule),
GFP_KERNEL);
if (!s_rule) {
status = -ENOMEM;
@@ -3288,8 +3284,8 @@ ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
ice_aqc_opc_remove_sw_rules);
status = ice_aq_sw_rules(hw, s_rule,
- ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
- ice_aqc_opc_remove_sw_rules, NULL);
+ ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s_rule),
+ 1, ice_aqc_opc_remove_sw_rules, NULL);
/* Remove a book keeping from the list */
devm_kfree(ice_hw_to_dev(hw), s_rule);
@@ -3437,7 +3433,7 @@ bool ice_vlan_fltr_exist(struct ice_hw *hw, u16 vlan_id, u16 vsi_handle)
*/
int ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
{
- struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
+ struct ice_sw_rule_lkup_rx_tx *s_rule, *r_iter;
struct ice_fltr_list_entry *m_list_itr;
struct list_head *rule_head;
u16 total_elem_left, s_rule_size;
@@ -3501,7 +3497,7 @@ int ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
/* Allocate switch rule buffer for the bulk update for unicast */
- s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
+ s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule);
s_rule = devm_kcalloc(ice_hw_to_dev(hw), num_unicast, s_rule_size,
GFP_KERNEL);
if (!s_rule) {
@@ -3517,8 +3513,7 @@ int ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
if (is_unicast_ether_addr(mac_addr)) {
ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
ice_aqc_opc_add_sw_rules);
- r_iter = (struct ice_aqc_sw_rules_elem *)
- ((u8 *)r_iter + s_rule_size);
+ r_iter = (typeof(s_rule))((u8 *)r_iter + s_rule_size);
}
}
@@ -3527,7 +3522,7 @@ int ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
/* Call AQ switch rule in AQ_MAX chunk */
for (total_elem_left = num_unicast; total_elem_left > 0;
total_elem_left -= elem_sent) {
- struct ice_aqc_sw_rules_elem *entry = r_iter;
+ struct ice_sw_rule_lkup_rx_tx *entry = r_iter;
elem_sent = min_t(u8, total_elem_left,
(ICE_AQ_MAX_BUF_LEN / s_rule_size));
@@ -3536,7 +3531,7 @@ int ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
NULL);
if (status)
goto ice_add_mac_exit;
- r_iter = (struct ice_aqc_sw_rules_elem *)
+ r_iter = (typeof(s_rule))
((u8 *)r_iter + (elem_sent * s_rule_size));
}
@@ -3548,8 +3543,7 @@ int ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
struct ice_fltr_mgmt_list_entry *fm_entry;
if (is_unicast_ether_addr(mac_addr)) {
- f_info->fltr_rule_id =
- le16_to_cpu(r_iter->pdata.lkup_tx_rx.index);
+ f_info->fltr_rule_id = le16_to_cpu(r_iter->index);
f_info->fltr_act = ICE_FWD_TO_VSI;
/* Create an entry to track this MAC address */
fm_entry = devm_kzalloc(ice_hw_to_dev(hw),
@@ -3565,8 +3559,7 @@ int ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
*/
list_add(&fm_entry->list_entry, rule_head);
- r_iter = (struct ice_aqc_sw_rules_elem *)
- ((u8 *)r_iter + s_rule_size);
+ r_iter = (typeof(s_rule))((u8 *)r_iter + s_rule_size);
}
}
@@ -3865,7 +3858,7 @@ ice_rem_adv_rule_info(struct ice_hw *hw, struct list_head *rule_head)
*/
int ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction)
{
- struct ice_aqc_sw_rules_elem *s_rule;
+ struct ice_sw_rule_lkup_rx_tx *s_rule;
struct ice_fltr_info f_info;
enum ice_adminq_opc opcode;
u16 s_rule_size;
@@ -3876,8 +3869,8 @@ int ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction)
return -EINVAL;
hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
- s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
- ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
+ s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule) :
+ ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s_rule);
s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
if (!s_rule)
@@ -3915,7 +3908,7 @@ int ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction)
if (status || !(f_info.flag & ICE_FLTR_TX_RX))
goto out;
if (set) {
- u16 index = le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
+ u16 index = le16_to_cpu(s_rule->index);
if (f_info.flag & ICE_FLTR_TX) {
hw->port_info->dflt_tx_vsi_num = hw_vsi_id;
@@ -5641,7 +5634,7 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
*/
static int
ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
- struct ice_aqc_sw_rules_elem *s_rule,
+ struct ice_sw_rule_lkup_rx_tx *s_rule,
const struct ice_dummy_pkt_profile *profile)
{
u8 *pkt;
@@ -5650,7 +5643,7 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
/* Start with a packet with a pre-defined/dummy content. Then, fill
* in the header values to be looked up or matched.
*/
- pkt = s_rule->pdata.lkup_tx_rx.hdr;
+ pkt = s_rule->hdr_data;
memcpy(pkt, profile->pkt, profile->pkt_len);
@@ -5740,7 +5733,7 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
}
}
- s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(profile->pkt_len);
+ s_rule->hdr_len = cpu_to_le16(profile->pkt_len);
return 0;
}
@@ -5963,7 +5956,7 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
struct ice_rule_query_data *added_entry)
{
struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
- struct ice_aqc_sw_rules_elem *s_rule = NULL;
+ struct ice_sw_rule_lkup_rx_tx *s_rule = NULL;
const struct ice_dummy_pkt_profile *profile;
u16 rid = 0, i, rule_buf_sz, vsi_handle;
struct list_head *rule_head;
@@ -6040,7 +6033,7 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
}
return status;
}
- rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + profile->pkt_len;
+ rule_buf_sz = ICE_SW_RULE_RX_TX_HDR_SIZE(s_rule, profile->pkt_len);
s_rule = kzalloc(rule_buf_sz, GFP_KERNEL);
if (!s_rule)
return -ENOMEM;
@@ -6089,16 +6082,15 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
* by caller)
*/
if (rinfo->rx) {
- s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX);
- s_rule->pdata.lkup_tx_rx.src =
- cpu_to_le16(hw->port_info->lport);
+ s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX);
+ s_rule->src = cpu_to_le16(hw->port_info->lport);
} else {
- s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
- s_rule->pdata.lkup_tx_rx.src = cpu_to_le16(rinfo->sw_act.src);
+ s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
+ s_rule->src = cpu_to_le16(rinfo->sw_act.src);
}
- s_rule->pdata.lkup_tx_rx.recipe_id = cpu_to_le16(rid);
- s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act);
+ s_rule->recipe_id = cpu_to_le16(rid);
+ s_rule->act = cpu_to_le32(act);
status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, profile);
if (status)
@@ -6107,7 +6099,7 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
if (rinfo->tun_type != ICE_NON_TUN &&
rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
- s_rule->pdata.lkup_tx_rx.hdr,
+ s_rule->hdr_data,
profile->offsets);
if (status)
goto err_ice_add_adv_rule;
@@ -6135,8 +6127,7 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
adv_fltr->lkups_cnt = lkups_cnt;
adv_fltr->rule_info = *rinfo;
- adv_fltr->rule_info.fltr_rule_id =
- le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
+ adv_fltr->rule_info.fltr_rule_id = le16_to_cpu(s_rule->index);
sw = hw->switch_info;
sw->recp_list[rid].adv_rule = true;
rule_head = &sw->recp_list[rid].filt_rules;
@@ -6384,17 +6375,16 @@ ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
}
mutex_unlock(rule_lock);
if (remove_rule) {
- struct ice_aqc_sw_rules_elem *s_rule;
+ struct ice_sw_rule_lkup_rx_tx *s_rule;
u16 rule_buf_sz;
- rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
+ rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s_rule);
s_rule = kzalloc(rule_buf_sz, GFP_KERNEL);
if (!s_rule)
return -ENOMEM;
- s_rule->pdata.lkup_tx_rx.act = 0;
- s_rule->pdata.lkup_tx_rx.index =
- cpu_to_le16(list_elem->rule_info.fltr_rule_id);
- s_rule->pdata.lkup_tx_rx.hdr_len = 0;
+ s_rule->act = 0;
+ s_rule->index = cpu_to_le16(list_elem->rule_info.fltr_rule_id);
+ s_rule->hdr_len = 0;
status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
rule_buf_sz, 1,
ice_aqc_opc_remove_sw_rules, NULL);
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h
index ecac75e71395..eb641e5512d2 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.h
+++ b/drivers/net/ethernet/intel/ice/ice_switch.h
@@ -23,9 +23,6 @@
#define ICE_PROFID_IPV6_GTPU_TEID 46
#define ICE_PROFID_IPV6_GTPU_IPV6_TCP_INNER 70
-#define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
- (offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr))
-
/* VSI context structure for add/get/update/free operations */
struct ice_vsi_ctx {
u16 vsi_num;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
index a79201a9a6f0..a9da85e418a4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
@@ -579,7 +579,7 @@ static bool is_valid_offset(struct rvu *rvu, struct cpt_rd_wr_reg_msg *req)
blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr);
if (blkaddr < 0)
- return blkaddr;
+ return false;
/* Registers that can be accessed from PF/VF */
if ((offset & 0xFF000) == CPT_AF_LFX_CTL(0) ||
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
index 54f235c216a9..2dd192b5e4e0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
@@ -355,7 +355,7 @@ int otx2_add_macfilter(struct net_device *netdev, const u8 *mac)
{
struct otx2_nic *pf = netdev_priv(netdev);
- if (bitmap_weight(&pf->flow_cfg->dmacflt_bmap,
+ if (!bitmap_empty(&pf->flow_cfg->dmacflt_bmap,
pf->flow_cfg->dmacflt_max_flows))
netdev_warn(netdev,
"Add %pM to CGX/RPM DMAC filters list as well\n",
@@ -438,7 +438,7 @@ int otx2_get_maxflows(struct otx2_flow_config *flow_cfg)
return 0;
if (flow_cfg->nr_flows == flow_cfg->max_flows ||
- bitmap_weight(&flow_cfg->dmacflt_bmap,
+ !bitmap_empty(&flow_cfg->dmacflt_bmap,
flow_cfg->dmacflt_max_flows))
return flow_cfg->max_flows + flow_cfg->dmacflt_max_flows;
else
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index fe3472e04c23..9106c359e64c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -1120,7 +1120,7 @@ static int otx2_cgx_config_loopback(struct otx2_nic *pf, bool enable)
struct msg_req *msg;
int err;
- if (enable && bitmap_weight(&pf->flow_cfg->dmacflt_bmap,
+ if (enable && !bitmap_empty(&pf->flow_cfg->dmacflt_bmap,
pf->flow_cfg->dmacflt_max_flows))
netdev_warn(pf->netdev,
"CGX/RPM internal loopback might not work as DMAC filters are active\n");
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index a9d4fd8945bb..b3b3c079a0fa 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -2212,6 +2212,9 @@ static int mtk_hwlro_get_fdir_entry(struct net_device *dev,
struct ethtool_rx_flow_spec *fsp =
(struct ethtool_rx_flow_spec *)&cmd->fs;
+ if (fsp->location >= ARRAY_SIZE(mac->hwlro_ip))
+ return -EINVAL;
+
/* only tcp dst ipv4 is meaningful, others are meaningless */
fsp->flow_type = TCP_V4_FLOW;
fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index e10b7b04b894..c56d2194cbfc 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -1994,21 +1994,16 @@ static void mlx4_allocate_port_vpps(struct mlx4_dev *dev, int port)
static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
{
- int port, err;
+ int p, port, err;
struct mlx4_vport_state *vp_admin;
struct mlx4_vport_oper_state *vp_oper;
struct mlx4_slave_state *slave_state =
&priv->mfunc.master.slave_state[slave];
struct mlx4_active_ports actv_ports = mlx4_get_active_ports(
&priv->dev, slave);
- int min_port = find_first_bit(actv_ports.ports,
- priv->dev.caps.num_ports) + 1;
- int max_port = min_port - 1 +
- bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports);
- for (port = min_port; port <= max_port; port++) {
- if (!test_bit(port - 1, actv_ports.ports))
- continue;
+ for_each_set_bit(p, actv_ports.ports, priv->dev.caps.num_ports) {
+ port = p + 1;
priv->mfunc.master.vf_oper[slave].smi_enabled[port] =
priv->mfunc.master.vf_admin[slave].enable_smi[port];
vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
@@ -2063,19 +2058,13 @@ static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
static void mlx4_master_deactivate_admin_state(struct mlx4_priv *priv, int slave)
{
- int port;
+ int p, port;
struct mlx4_vport_oper_state *vp_oper;
struct mlx4_active_ports actv_ports = mlx4_get_active_ports(
&priv->dev, slave);
- int min_port = find_first_bit(actv_ports.ports,
- priv->dev.caps.num_ports) + 1;
- int max_port = min_port - 1 +
- bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports);
-
- for (port = min_port; port <= max_port; port++) {
- if (!test_bit(port - 1, actv_ports.ports))
- continue;
+ for_each_set_bit(p, actv_ports.ports, priv->dev.caps.num_ports) {
+ port = p + 1;
priv->mfunc.master.vf_oper[slave].smi_enabled[port] =
MLX4_VF_SMI_DISABLED;
vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index 11f7c03ae81b..0eb9d74547f8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -571,18 +571,32 @@ static int _next_phys_dev(struct mlx5_core_dev *mdev,
return 1;
}
+static void *pci_get_other_drvdata(struct device *this, struct device *other)
+{
+ if (this->driver != other->driver)
+ return NULL;
+
+ return pci_get_drvdata(to_pci_dev(other));
+}
+
static int next_phys_dev(struct device *dev, const void *data)
{
- struct mlx5_adev *madev = container_of(dev, struct mlx5_adev, adev.dev);
- struct mlx5_core_dev *mdev = madev->mdev;
+ struct mlx5_core_dev *mdev, *this = (struct mlx5_core_dev *)data;
+
+ mdev = pci_get_other_drvdata(this->device, dev);
+ if (!mdev)
+ return 0;
return _next_phys_dev(mdev, data);
}
static int next_phys_dev_lag(struct device *dev, const void *data)
{
- struct mlx5_adev *madev = container_of(dev, struct mlx5_adev, adev.dev);
- struct mlx5_core_dev *mdev = madev->mdev;
+ struct mlx5_core_dev *mdev, *this = (struct mlx5_core_dev *)data;
+
+ mdev = pci_get_other_drvdata(this->device, dev);
+ if (!mdev)
+ return 0;
if (!MLX5_CAP_GEN(mdev, vport_group_manager) ||
!MLX5_CAP_GEN(mdev, lag_master) ||
@@ -596,19 +610,17 @@ static int next_phys_dev_lag(struct device *dev, const void *data)
static struct mlx5_core_dev *mlx5_get_next_dev(struct mlx5_core_dev *dev,
int (*match)(struct device *dev, const void *data))
{
- struct auxiliary_device *adev;
- struct mlx5_adev *madev;
+ struct device *next;
if (!mlx5_core_is_pf(dev))
return NULL;
- adev = auxiliary_find_device(NULL, dev, match);
- if (!adev)
+ next = bus_find_device(&pci_bus_type, NULL, dev, match);
+ if (!next)
return NULL;
- madev = container_of(adev, struct mlx5_adev, adev);
- put_device(&adev->dev);
- return madev->mdev;
+ put_device(next);
+ return pci_get_drvdata(to_pci_dev(next));
}
/* Must be called with intf_mutex held */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 65d3c4865abf..b6c15efe92ad 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -764,6 +764,7 @@ struct mlx5e_rq {
u8 wq_type;
u32 rqn;
struct mlx5_core_dev *mdev;
+ struct mlx5e_channel *channel;
u32 umr_mkey;
struct mlx5e_dma_info wqe_overflow;
@@ -1076,6 +1077,9 @@ void mlx5e_close_cq(struct mlx5e_cq *cq);
int mlx5e_open_locked(struct net_device *netdev);
int mlx5e_close_locked(struct net_device *netdev);
+void mlx5e_trigger_napi_icosq(struct mlx5e_channel *c);
+void mlx5e_trigger_napi_sched(struct napi_struct *napi);
+
int mlx5e_open_channels(struct mlx5e_priv *priv,
struct mlx5e_channels *chs);
void mlx5e_close_channels(struct mlx5e_channels *chs);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
index 4130a871de61..6e3a90a959e9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
@@ -12,6 +12,7 @@ struct mlx5e_post_act;
enum {
MLX5E_TC_FT_LEVEL = 0,
MLX5E_TC_TTC_FT_LEVEL,
+ MLX5E_TC_MISS_LEVEL,
};
struct mlx5e_tc_table {
@@ -20,6 +21,7 @@ struct mlx5e_tc_table {
*/
struct mutex t_lock;
struct mlx5_flow_table *t;
+ struct mlx5_flow_table *miss_t;
struct mlx5_fs_chains *chains;
struct mlx5e_post_act *post_act;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
index 335b20b6383b..047f88f09203 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
@@ -736,6 +736,7 @@ void mlx5e_ptp_activate_channel(struct mlx5e_ptp *c)
if (test_bit(MLX5E_PTP_STATE_RX, c->state)) {
mlx5e_ptp_rx_set_fs(c->priv);
mlx5e_activate_rq(&c->rq);
+ mlx5e_trigger_napi_sched(&c->napi);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
index 2684e9da9f41..fc366e66d0b0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
@@ -123,6 +123,8 @@ static int mlx5e_rx_reporter_err_icosq_cqe_recover(void *ctx)
xskrq->stats->recover++;
}
+ mlx5e_trigger_napi_icosq(icosq->channel);
+
mutex_unlock(&icosq->channel->icosq_recovery_lock);
return 0;
@@ -166,6 +168,10 @@ static int mlx5e_rx_reporter_err_rq_cqe_recover(void *ctx)
clear_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state);
mlx5e_activate_rq(rq);
rq->stats->recover++;
+ if (rq->channel)
+ mlx5e_trigger_napi_icosq(rq->channel);
+ else
+ mlx5e_trigger_napi_sched(rq->cq.napi);
return 0;
out:
clear_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index bceea7a1589e..25f51f80a9b4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -715,7 +715,7 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
struct mlx5_flow_attr *attr,
struct flow_rule *flow_rule,
struct mlx5e_mod_hdr_handle **mh,
- u8 zone_restore_id, bool nat)
+ u8 zone_restore_id, bool nat_table, bool has_nat)
{
DECLARE_MOD_HDR_ACTS_ACTIONS(actions_arr, MLX5_CT_MIN_MOD_ACTS);
DECLARE_MOD_HDR_ACTS(mod_acts, actions_arr);
@@ -731,11 +731,12 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
&attr->ct_attr.ct_labels_id);
if (err)
return -EOPNOTSUPP;
- if (nat) {
- err = mlx5_tc_ct_entry_create_nat(ct_priv, flow_rule,
- &mod_acts);
- if (err)
- goto err_mapping;
+ if (nat_table) {
+ if (has_nat) {
+ err = mlx5_tc_ct_entry_create_nat(ct_priv, flow_rule, &mod_acts);
+ if (err)
+ goto err_mapping;
+ }
ct_state |= MLX5_CT_STATE_NAT_BIT;
}
@@ -750,7 +751,7 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
if (err)
goto err_mapping;
- if (nat) {
+ if (nat_table && has_nat) {
attr->modify_hdr = mlx5_modify_header_alloc(ct_priv->dev, ct_priv->ns_type,
mod_acts.num_actions,
mod_acts.actions);
@@ -818,7 +819,9 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
err = mlx5_tc_ct_entry_create_mod_hdr(ct_priv, attr, flow_rule,
&zone_rule->mh,
- zone_restore_id, nat);
+ zone_restore_id,
+ nat,
+ mlx5_tc_ct_entry_has_nat(entry));
if (err) {
ct_dbg("Failed to create ct entry mod hdr");
goto err_mod_hdr;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
index 857840ab1e91..11f2a7fb72a9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
@@ -179,6 +179,7 @@ static void mlx5e_activate_trap(struct mlx5e_trap *trap)
{
napi_enable(&trap->napi);
mlx5e_activate_rq(&trap->rq);
+ mlx5e_trigger_napi_sched(&trap->napi);
}
void mlx5e_deactivate_trap(struct mlx5e_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c
index 279cd8f4e79f..2c520394aa1d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c
@@ -117,6 +117,7 @@ static int mlx5e_xsk_enable_locked(struct mlx5e_priv *priv,
goto err_remove_pool;
mlx5e_activate_xsk(c);
+ mlx5e_trigger_napi_icosq(c);
/* Don't wait for WQEs, because the newer xdpsock sample doesn't provide
* any Fill Ring entries at the setup stage.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
index 3ad7f1301fa8..98ed9ef3a6bd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
@@ -64,6 +64,7 @@ static int mlx5e_init_xsk_rq(struct mlx5e_channel *c,
rq->clock = &mdev->clock;
rq->icosq = &c->icosq;
rq->ix = c->ix;
+ rq->channel = c;
rq->mdev = mdev;
rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
rq->xdpsq = &c->rq_xdpsq;
@@ -179,10 +180,6 @@ void mlx5e_activate_xsk(struct mlx5e_channel *c)
mlx5e_reporter_icosq_resume_recovery(c);
/* TX queue is created active. */
-
- spin_lock_bh(&c->async_icosq_lock);
- mlx5e_trigger_irq(&c->async_icosq);
- spin_unlock_bh(&c->async_icosq_lock);
}
void mlx5e_deactivate_xsk(struct mlx5e_channel *c)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 05c015515cce..087952b84ccb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -475,6 +475,7 @@ static int mlx5e_init_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *param
rq->clock = &mdev->clock;
rq->icosq = &c->icosq;
rq->ix = c->ix;
+ rq->channel = c;
rq->mdev = mdev;
rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
rq->xdpsq = &c->rq_xdpsq;
@@ -1066,13 +1067,6 @@ err_free_rq:
void mlx5e_activate_rq(struct mlx5e_rq *rq)
{
set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
- if (rq->icosq) {
- mlx5e_trigger_irq(rq->icosq);
- } else {
- local_bh_disable();
- napi_schedule(rq->cq.napi);
- local_bh_enable();
- }
}
void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
@@ -2227,6 +2221,20 @@ static int mlx5e_channel_stats_alloc(struct mlx5e_priv *priv, int ix, int cpu)
return 0;
}
+void mlx5e_trigger_napi_icosq(struct mlx5e_channel *c)
+{
+ spin_lock_bh(&c->async_icosq_lock);
+ mlx5e_trigger_irq(&c->async_icosq);
+ spin_unlock_bh(&c->async_icosq_lock);
+}
+
+void mlx5e_trigger_napi_sched(struct napi_struct *napi)
+{
+ local_bh_disable();
+ napi_schedule(napi);
+ local_bh_enable();
+}
+
static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
struct mlx5e_params *params,
struct mlx5e_channel_param *cparam,
@@ -2308,6 +2316,8 @@ static void mlx5e_activate_channel(struct mlx5e_channel *c)
if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
mlx5e_activate_xsk(c);
+
+ mlx5e_trigger_napi_icosq(c);
}
static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
@@ -4559,6 +4569,11 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
unlock:
mutex_unlock(&priv->state_lock);
+
+ /* Need to fix some features. */
+ if (!err)
+ netdev_update_features(netdev);
+
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 49dea02a12d2..34bf11cdf90f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -4714,6 +4714,33 @@ static int mlx5e_tc_nic_get_ft_size(struct mlx5_core_dev *dev)
return tc_tbl_size;
}
+static int mlx5e_tc_nic_create_miss_table(struct mlx5e_priv *priv)
+{
+ struct mlx5_flow_table **ft = &priv->fs.tc.miss_t;
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_namespace *ns;
+ int err = 0;
+
+ ft_attr.max_fte = 1;
+ ft_attr.autogroup.max_num_groups = 1;
+ ft_attr.level = MLX5E_TC_MISS_LEVEL;
+ ft_attr.prio = 0;
+ ns = mlx5_get_flow_namespace(priv->mdev, MLX5_FLOW_NAMESPACE_KERNEL);
+
+ *ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
+ if (IS_ERR(*ft)) {
+ err = PTR_ERR(*ft);
+ netdev_err(priv->netdev, "failed to create tc nic miss table err=%d\n", err);
+ }
+
+ return err;
+}
+
+static void mlx5e_tc_nic_destroy_miss_table(struct mlx5e_priv *priv)
+{
+ mlx5_destroy_flow_table(priv->fs.tc.miss_t);
+}
+
int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
{
struct mlx5e_tc_table *tc = &priv->fs.tc;
@@ -4746,19 +4773,23 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
}
tc->mapping = chains_mapping;
+ err = mlx5e_tc_nic_create_miss_table(priv);
+ if (err)
+ goto err_chains;
+
if (MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level))
attr.flags = MLX5_CHAINS_AND_PRIOS_SUPPORTED |
MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
attr.ns = MLX5_FLOW_NAMESPACE_KERNEL;
attr.max_ft_sz = mlx5e_tc_nic_get_ft_size(dev);
attr.max_grp_num = MLX5E_TC_TABLE_NUM_GROUPS;
- attr.default_ft = mlx5e_vlan_get_flowtable(priv->fs.vlan);
+ attr.default_ft = priv->fs.tc.miss_t;
attr.mapping = chains_mapping;
tc->chains = mlx5_chains_create(dev, &attr);
if (IS_ERR(tc->chains)) {
err = PTR_ERR(tc->chains);
- goto err_chains;
+ goto err_miss;
}
tc->post_act = mlx5e_tc_post_act_init(priv, tc->chains, MLX5_FLOW_NAMESPACE_KERNEL);
@@ -4781,6 +4812,8 @@ err_reg:
mlx5_tc_ct_clean(tc->ct);
mlx5e_tc_post_act_destroy(tc->post_act);
mlx5_chains_destroy(tc->chains);
+err_miss:
+ mlx5e_tc_nic_destroy_miss_table(priv);
err_chains:
mapping_destroy(chains_mapping);
err_mapping:
@@ -4821,6 +4854,7 @@ void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
mlx5e_tc_post_act_destroy(tc->post_act);
mapping_destroy(tc->mapping);
mlx5_chains_destroy(tc->chains);
+ mlx5e_tc_nic_destroy_miss_table(priv);
}
int mlx5e_tc_ht_init(struct rhashtable *tc_ht)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 84caffe4c278..fdcf7f529330 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -114,7 +114,7 @@
#define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1)
#define KERNEL_NIC_TC_NUM_PRIOS 1
-#define KERNEL_NIC_TC_NUM_LEVELS 2
+#define KERNEL_NIC_TC_NUM_LEVELS 3
#define ANCHOR_NUM_LEVELS 1
#define ANCHOR_NUM_PRIOS 1
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
index 887ee0f729d1..2935614f6fa9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
@@ -87,6 +87,11 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs)
enable_vfs_hca:
num_msix_count = mlx5_get_default_msix_vec_count(dev, num_vfs);
for (vf = 0; vf < num_vfs; vf++) {
+ /* Notify the VF before its enablement to let it set
+ * some stuff.
+ */
+ blocking_notifier_call_chain(&sriov->vfs_ctx[vf].notifier,
+ MLX5_PF_NOTIFY_ENABLE_VF, dev);
err = mlx5_core_enable_hca(dev, vf + 1);
if (err) {
mlx5_core_warn(dev, "failed to enable VF %d (%d)\n", vf, err);
@@ -127,6 +132,11 @@ mlx5_device_disable_sriov(struct mlx5_core_dev *dev, int num_vfs, bool clear_vf)
for (vf = num_vfs - 1; vf >= 0; vf--) {
if (!sriov->vfs_ctx[vf].enabled)
continue;
+ /* Notify the VF before its disablement to let it clean
+ * some resources.
+ */
+ blocking_notifier_call_chain(&sriov->vfs_ctx[vf].notifier,
+ MLX5_PF_NOTIFY_DISABLE_VF, dev);
err = mlx5_core_disable_hca(dev, vf + 1);
if (err) {
mlx5_core_warn(dev, "failed to disable VF %d\n", vf);
@@ -257,7 +267,7 @@ int mlx5_sriov_init(struct mlx5_core_dev *dev)
{
struct mlx5_core_sriov *sriov = &dev->priv.sriov;
struct pci_dev *pdev = dev->pdev;
- int total_vfs;
+ int total_vfs, i;
if (!mlx5_core_is_pf(dev))
return 0;
@@ -269,6 +279,9 @@ int mlx5_sriov_init(struct mlx5_core_dev *dev)
if (!sriov->vfs_ctx)
return -ENOMEM;
+ for (i = 0; i < total_vfs; i++)
+ BLOCKING_INIT_NOTIFIER_HEAD(&sriov->vfs_ctx[i].notifier);
+
return 0;
}
@@ -281,3 +294,53 @@ void mlx5_sriov_cleanup(struct mlx5_core_dev *dev)
kfree(sriov->vfs_ctx);
}
+
+/**
+ * mlx5_sriov_blocking_notifier_unregister - Unregister a VF from
+ * a notification block chain.
+ *
+ * @mdev: The mlx5 core device.
+ * @vf_id: The VF id.
+ * @nb: The notifier block to be unregistered.
+ */
+void mlx5_sriov_blocking_notifier_unregister(struct mlx5_core_dev *mdev,
+ int vf_id,
+ struct notifier_block *nb)
+{
+ struct mlx5_vf_context *vfs_ctx;
+ struct mlx5_core_sriov *sriov;
+
+ sriov = &mdev->priv.sriov;
+ if (WARN_ON(vf_id < 0 || vf_id >= sriov->num_vfs))
+ return;
+
+ vfs_ctx = &sriov->vfs_ctx[vf_id];
+ blocking_notifier_chain_unregister(&vfs_ctx->notifier, nb);
+}
+EXPORT_SYMBOL(mlx5_sriov_blocking_notifier_unregister);
+
+/**
+ * mlx5_sriov_blocking_notifier_register - Register a VF notification
+ * block chain.
+ *
+ * @mdev: The mlx5 core device.
+ * @vf_id: The VF id.
+ * @nb: The notifier block to be called upon the VF events.
+ *
+ * Returns 0 on success or an error code.
+ */
+int mlx5_sriov_blocking_notifier_register(struct mlx5_core_dev *mdev,
+ int vf_id,
+ struct notifier_block *nb)
+{
+ struct mlx5_vf_context *vfs_ctx;
+ struct mlx5_core_sriov *sriov;
+
+ sriov = &mdev->priv.sriov;
+ if (vf_id < 0 || vf_id >= sriov->num_vfs)
+ return -EINVAL;
+
+ vfs_ctx = &sriov->vfs_ctx[vf_id];
+ return blocking_notifier_chain_register(&vfs_ctx->notifier, nb);
+}
+EXPORT_SYMBOL(mlx5_sriov_blocking_notifier_register);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
index 728f81882589..6a9abba92df6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
@@ -44,11 +44,10 @@ static int set_miss_action(struct mlx5_flow_root_namespace *ns,
err = mlx5dr_table_set_miss_action(ft->fs_dr_table.dr_table, action);
if (err && action) {
err = mlx5dr_action_destroy(action);
- if (err) {
- action = NULL;
- mlx5_core_err(ns->dev, "Failed to destroy action (%d)\n",
- err);
- }
+ if (err)
+ mlx5_core_err(ns->dev,
+ "Failed to destroy action (%d)\n", err);
+ action = NULL;
}
ft->fs_dr_table.miss_action = action;
if (old_miss_action) {
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index efbddf24ba31..af81236b4b4e 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -1164,9 +1164,14 @@ static int lan743x_phy_open(struct lan743x_adapter *adapter)
if (!phydev)
goto return_error;
- ret = phy_connect_direct(netdev, phydev,
- lan743x_phy_link_status_change,
- PHY_INTERFACE_MODE_GMII);
+ if (adapter->is_pci11x1x)
+ ret = phy_connect_direct(netdev, phydev,
+ lan743x_phy_link_status_change,
+ PHY_INTERFACE_MODE_RGMII);
+ else
+ ret = phy_connect_direct(netdev, phydev,
+ lan743x_phy_link_status_change,
+ PHY_INTERFACE_MODE_GMII);
if (ret)
goto return_error;
}
@@ -2936,20 +2941,27 @@ static int lan743x_mdiobus_init(struct lan743x_adapter *adapter)
lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl);
netif_dbg(adapter, drv, adapter->netdev,
"SGMII operation\n");
+ adapter->mdiobus->probe_capabilities = MDIOBUS_C22_C45;
+ adapter->mdiobus->read = lan743x_mdiobus_c45_read;
+ adapter->mdiobus->write = lan743x_mdiobus_c45_write;
+ adapter->mdiobus->name = "lan743x-mdiobus-c45";
+ netif_dbg(adapter, drv, adapter->netdev,
+ "lan743x-mdiobus-c45\n");
} else {
sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL);
sgmii_ctl &= ~SGMII_CTL_SGMII_ENABLE_;
sgmii_ctl |= SGMII_CTL_SGMII_POWER_DN_;
lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl);
netif_dbg(adapter, drv, adapter->netdev,
- "(R)GMII operation\n");
+ "RGMII operation\n");
+ // Only C22 support when RGMII I/F
+ adapter->mdiobus->probe_capabilities = MDIOBUS_C22;
+ adapter->mdiobus->read = lan743x_mdiobus_read;
+ adapter->mdiobus->write = lan743x_mdiobus_write;
+ adapter->mdiobus->name = "lan743x-mdiobus";
+ netif_dbg(adapter, drv, adapter->netdev,
+ "lan743x-mdiobus\n");
}
-
- adapter->mdiobus->probe_capabilities = MDIOBUS_C22_C45;
- adapter->mdiobus->read = lan743x_mdiobus_c45_read;
- adapter->mdiobus->write = lan743x_mdiobus_c45_write;
- adapter->mdiobus->name = "lan743x-mdiobus-c45";
- netif_dbg(adapter, drv, adapter->netdev, "lan743x-mdiobus-c45\n");
} else {
adapter->mdiobus->read = lan743x_mdiobus_read;
adapter->mdiobus->write = lan743x_mdiobus_write;
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
index 6ad68b422129..5784c4161e5e 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
@@ -1120,8 +1120,13 @@ static int lan966x_probe(struct platform_device *pdev)
lan966x->ports[p]->fwnode = fwnode_handle_get(portnp);
serdes = devm_of_phy_get(lan966x->dev, to_of_node(portnp), NULL);
- if (!IS_ERR(serdes))
- lan966x->ports[p]->serdes = serdes;
+ if (PTR_ERR(serdes) == -ENODEV)
+ serdes = NULL;
+ if (IS_ERR(serdes)) {
+ err = PTR_ERR(serdes);
+ goto cleanup_ports;
+ }
+ lan966x->ports[p]->serdes = serdes;
lan966x_port_init(lan966x->ports[p]);
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfdk/dp.c b/drivers/net/ethernet/netronome/nfp/nfdk/dp.c
index e3da9ac20e57..e509d6dcba5c 100644
--- a/drivers/net/ethernet/netronome/nfp/nfdk/dp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfdk/dp.c
@@ -314,7 +314,7 @@ netdev_tx_t nfp_nfdk_tx(struct sk_buff *skb, struct net_device *netdev)
FIELD_PREP(NFDK_DESC_TX_TYPE_HEAD, type);
txd->dma_len_type = cpu_to_le16(dlen_type);
- nfp_desc_set_dma_addr(txd, dma_addr);
+ nfp_nfdk_tx_desc_set_dma_addr(txd, dma_addr);
/* starts at bit 0 */
BUILD_BUG_ON(!(NFDK_DESC_TX_DMA_LEN_HEAD & 1));
@@ -339,7 +339,7 @@ netdev_tx_t nfp_nfdk_tx(struct sk_buff *skb, struct net_device *netdev)
dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN, dma_len);
txd->dma_len_type = cpu_to_le16(dlen_type);
- nfp_desc_set_dma_addr(txd, dma_addr);
+ nfp_nfdk_tx_desc_set_dma_addr(txd, dma_addr);
dma_len -= dlen_type;
dma_addr += dlen_type + 1;
@@ -929,7 +929,7 @@ nfp_nfdk_tx_xdp_buf(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring,
FIELD_PREP(NFDK_DESC_TX_TYPE_HEAD, type);
txd->dma_len_type = cpu_to_le16(dlen_type);
- nfp_desc_set_dma_addr(txd, dma_addr);
+ nfp_nfdk_tx_desc_set_dma_addr(txd, dma_addr);
tmp_dlen = dlen_type & NFDK_DESC_TX_DMA_LEN_HEAD;
dma_len -= tmp_dlen;
@@ -940,7 +940,7 @@ nfp_nfdk_tx_xdp_buf(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring,
dma_len -= 1;
dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN, dma_len);
txd->dma_len_type = cpu_to_le16(dlen_type);
- nfp_desc_set_dma_addr(txd, dma_addr);
+ nfp_nfdk_tx_desc_set_dma_addr(txd, dma_addr);
dlen_type &= NFDK_DESC_TX_DMA_LEN;
dma_len -= dlen_type;
@@ -1332,7 +1332,7 @@ nfp_nfdk_ctrl_tx_one(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
FIELD_PREP(NFDK_DESC_TX_TYPE_HEAD, type);
txd->dma_len_type = cpu_to_le16(dlen_type);
- nfp_desc_set_dma_addr(txd, dma_addr);
+ nfp_nfdk_tx_desc_set_dma_addr(txd, dma_addr);
tmp_dlen = dlen_type & NFDK_DESC_TX_DMA_LEN_HEAD;
dma_len -= tmp_dlen;
@@ -1343,7 +1343,7 @@ nfp_nfdk_ctrl_tx_one(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
dma_len -= 1;
dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN, dma_len);
txd->dma_len_type = cpu_to_le16(dlen_type);
- nfp_desc_set_dma_addr(txd, dma_addr);
+ nfp_nfdk_tx_desc_set_dma_addr(txd, dma_addr);
dlen_type &= NFDK_DESC_TX_DMA_LEN;
dma_len -= dlen_type;
diff --git a/drivers/net/ethernet/netronome/nfp/nfdk/nfdk.h b/drivers/net/ethernet/netronome/nfp/nfdk/nfdk.h
index c41e0975eb73..0ea51d9f2325 100644
--- a/drivers/net/ethernet/netronome/nfp/nfdk/nfdk.h
+++ b/drivers/net/ethernet/netronome/nfp/nfdk/nfdk.h
@@ -46,8 +46,7 @@
struct nfp_nfdk_tx_desc {
union {
struct {
- u8 dma_addr_hi; /* High bits of host buf address */
- u8 padding; /* Must be zero */
+ __le16 dma_addr_hi; /* High bits of host buf address */
__le16 dma_len_type; /* Length to DMA for this desc */
__le32 dma_addr_lo; /* Low 32bit of host buf addr */
};
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index 428783b7018b..3dd3a92d2e7f 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -117,13 +117,22 @@ struct nfp_nfdk_tx_buf;
/* Convenience macro for writing dma address into RX/TX descriptors */
#define nfp_desc_set_dma_addr(desc, dma_addr) \
do { \
- __typeof(desc) __d = (desc); \
+ __typeof__(desc) __d = (desc); \
dma_addr_t __addr = (dma_addr); \
\
__d->dma_addr_lo = cpu_to_le32(lower_32_bits(__addr)); \
__d->dma_addr_hi = upper_32_bits(__addr) & 0xff; \
} while (0)
+#define nfp_nfdk_tx_desc_set_dma_addr(desc, dma_addr) \
+ do { \
+ __typeof__(desc) __d = (desc); \
+ dma_addr_t __addr = (dma_addr); \
+ \
+ __d->dma_addr_hi = cpu_to_le16(upper_32_bits(__addr) & 0xff); \
+ __d->dma_addr_lo = cpu_to_le32(lower_32_bits(__addr)); \
+ } while (0)
+
/**
* struct nfp_net_tx_ring - TX ring structure
* @r_vec: Back pointer to ring vector structure
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 61c8b450aafb..df0afd271a21 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -289,8 +289,6 @@ nfp_net_get_link_ksettings(struct net_device *netdev,
/* Init to unknowns */
ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
- ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
cmd->base.port = PORT_OTHER;
cmd->base.speed = SPEED_UNKNOWN;
cmd->base.duplex = DUPLEX_UNKNOWN;
@@ -298,6 +296,8 @@ nfp_net_get_link_ksettings(struct net_device *netdev,
port = nfp_port_from_netdev(netdev);
eth_port = nfp_port_get_eth_port(port);
if (eth_port) {
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
cmd->base.autoneg = eth_port->aneg != NFP_ANEG_DISABLED ?
AUTONEG_ENABLE : AUTONEG_DISABLE;
nfp_net_set_fec_link_mode(eth_port, cmd);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
index 23b668de4640..69b0ede75cae 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
@@ -319,44 +319,27 @@ free_rdma_dev:
void qed_rdma_bmap_free(struct qed_hwfn *p_hwfn,
struct qed_bmap *bmap, bool check)
{
- int weight = bitmap_weight(bmap->bitmap, bmap->max_count);
- int last_line = bmap->max_count / (64 * 8);
- int last_item = last_line * 8 +
- DIV_ROUND_UP(bmap->max_count % (64 * 8), 64);
- u64 *pmap = (u64 *)bmap->bitmap;
- int line, item, offset;
- u8 str_last_line[200] = { 0 };
-
- if (!weight || !check)
+ unsigned int bit, weight, nbits;
+ unsigned long *b;
+
+ if (!check)
+ goto end;
+
+ weight = bitmap_weight(bmap->bitmap, bmap->max_count);
+ if (!weight)
goto end;
DP_NOTICE(p_hwfn,
"%s bitmap not free - size=%d, weight=%d, 512 bits per line\n",
bmap->name, bmap->max_count, weight);
- /* print aligned non-zero lines, if any */
- for (item = 0, line = 0; line < last_line; line++, item += 8)
- if (bitmap_weight((unsigned long *)&pmap[item], 64 * 8))
+ for (bit = 0; bit < bmap->max_count; bit += 512) {
+ b = bmap->bitmap + BITS_TO_LONGS(bit);
+ nbits = min(bmap->max_count - bit, 512U);
+
+ if (!bitmap_empty(b, nbits))
DP_NOTICE(p_hwfn,
- "line 0x%04x: 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
- line,
- pmap[item],
- pmap[item + 1],
- pmap[item + 2],
- pmap[item + 3],
- pmap[item + 4],
- pmap[item + 5],
- pmap[item + 6], pmap[item + 7]);
-
- /* print last unaligned non-zero line, if any */
- if ((bmap->max_count % (64 * 8)) &&
- (bitmap_weight((unsigned long *)&pmap[item],
- bmap->max_count - item * 64))) {
- offset = sprintf(str_last_line, "line 0x%04x: ", line);
- for (; item < last_item; item++)
- offset += sprintf(str_last_line + offset,
- "0x%016llx ", pmap[item]);
- DP_NOTICE(p_hwfn, "%s\n", str_last_line);
+ "line 0x%04x: %*pb\n", bit / 512, nbits, b);
}
end:
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
index 071b4aeaddf2..134ecfca96a3 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
@@ -76,7 +76,7 @@ void qed_roce_stop(struct qed_hwfn *p_hwfn)
* We delay for a short while if an async destroy QP is still expected.
* Beyond the added delay we clear the bitmap anyway.
*/
- while (bitmap_weight(rcid_map->bitmap, rcid_map->max_count)) {
+ while (!bitmap_empty(rcid_map->bitmap, rcid_map->max_count)) {
/* If the HW device is during recovery, all resources are
* immediately reset without receiving a per-cid indication
* from HW. In this case we don't expect the cid bitmap to be
diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c
index f4919e7ee77b..032b8c0bd788 100644
--- a/drivers/net/ethernet/sfc/efx_channels.c
+++ b/drivers/net/ethernet/sfc/efx_channels.c
@@ -298,6 +298,7 @@ int efx_probe_interrupts(struct efx_nic *efx)
efx->n_channels = 1;
efx->n_rx_channels = 1;
efx->n_tx_channels = 1;
+ efx->tx_channel_offset = 0;
efx->n_xdp_channels = 0;
efx->xdp_channel_offset = efx->n_channels;
rc = pci_enable_msi(efx->pci_dev);
@@ -318,6 +319,7 @@ int efx_probe_interrupts(struct efx_nic *efx)
efx->n_channels = 1 + (efx_separate_tx_channels ? 1 : 0);
efx->n_rx_channels = 1;
efx->n_tx_channels = 1;
+ efx->tx_channel_offset = 1;
efx->n_xdp_channels = 0;
efx->xdp_channel_offset = efx->n_channels;
efx->legacy_irq = efx->pci_dev->irq;
@@ -954,10 +956,6 @@ int efx_set_channels(struct efx_nic *efx)
struct efx_channel *channel;
int rc;
- efx->tx_channel_offset =
- efx_separate_tx_channels ?
- efx->n_channels - efx->n_tx_channels : 0;
-
if (efx->xdp_tx_queue_count) {
EFX_WARN_ON_PARANOID(efx->xdp_tx_queues);
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 318db906a154..723bbeea5d0c 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -1530,7 +1530,7 @@ static inline bool efx_channel_is_xdp_tx(struct efx_channel *channel)
static inline bool efx_channel_has_tx_queues(struct efx_channel *channel)
{
- return true;
+ return channel && channel->channel >= channel->efx->tx_channel_offset;
}
static inline unsigned int efx_channel_num_tx_queues(struct efx_channel *channel)
diff --git a/drivers/net/ethernet/sfc/siena/efx_channels.c b/drivers/net/ethernet/sfc/siena/efx_channels.c
index 2465cf4d505c..017212a40df3 100644
--- a/drivers/net/ethernet/sfc/siena/efx_channels.c
+++ b/drivers/net/ethernet/sfc/siena/efx_channels.c
@@ -299,6 +299,7 @@ int efx_siena_probe_interrupts(struct efx_nic *efx)
efx->n_channels = 1;
efx->n_rx_channels = 1;
efx->n_tx_channels = 1;
+ efx->tx_channel_offset = 0;
efx->n_xdp_channels = 0;
efx->xdp_channel_offset = efx->n_channels;
rc = pci_enable_msi(efx->pci_dev);
@@ -319,6 +320,7 @@ int efx_siena_probe_interrupts(struct efx_nic *efx)
efx->n_channels = 1 + (efx_siena_separate_tx_channels ? 1 : 0);
efx->n_rx_channels = 1;
efx->n_tx_channels = 1;
+ efx->tx_channel_offset = 1;
efx->n_xdp_channels = 0;
efx->xdp_channel_offset = efx->n_channels;
efx->legacy_irq = efx->pci_dev->irq;
@@ -958,10 +960,6 @@ int efx_siena_set_channels(struct efx_nic *efx)
struct efx_channel *channel;
int rc;
- efx->tx_channel_offset =
- efx_siena_separate_tx_channels ?
- efx->n_channels - efx->n_tx_channels : 0;
-
if (efx->xdp_tx_queue_count) {
EFX_WARN_ON_PARANOID(efx->xdp_tx_queues);
diff --git a/drivers/net/ethernet/sfc/siena/net_driver.h b/drivers/net/ethernet/sfc/siena/net_driver.h
index a8f6c3699c8b..c4a97fbf4672 100644
--- a/drivers/net/ethernet/sfc/siena/net_driver.h
+++ b/drivers/net/ethernet/sfc/siena/net_driver.h
@@ -1529,7 +1529,7 @@ static inline bool efx_channel_is_xdp_tx(struct efx_channel *channel)
static inline bool efx_channel_has_tx_queues(struct efx_channel *channel)
{
- return true;
+ return channel && channel->channel >= channel->efx->tx_channel_offset;
}
static inline unsigned int efx_channel_num_tx_queues(struct efx_channel *channel)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
index 0b0be0898ac5..f9f80933e0c9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
@@ -1161,6 +1161,7 @@ static SIMPLE_DEV_PM_OPS(intel_eth_pm_ops, intel_eth_pci_suspend,
#define PCI_DEVICE_ID_INTEL_ADLS_SGMII1G_0 0x7aac
#define PCI_DEVICE_ID_INTEL_ADLS_SGMII1G_1 0x7aad
#define PCI_DEVICE_ID_INTEL_ADLN_SGMII1G 0x54ac
+#define PCI_DEVICE_ID_INTEL_RPLP_SGMII1G 0x51ac
static const struct pci_device_id intel_eth_pci_id_table[] = {
{ PCI_DEVICE_DATA(INTEL, QUARK, &quark_info) },
@@ -1179,6 +1180,7 @@ static const struct pci_device_id intel_eth_pci_id_table[] = {
{ PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_0, &adls_sgmii1g_phy0_info) },
{ PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_1, &adls_sgmii1g_phy1_info) },
{ PCI_DEVICE_DATA(INTEL, ADLN_SGMII1G, &tgl_sgmii1g_phy0_info) },
+ { PCI_DEVICE_DATA(INTEL, RPLP_SGMII1G, &tgl_sgmii1g_phy0_info) },
{}
};
MODULE_DEVICE_TABLE(pci, intel_eth_pci_id_table);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 3b81d4e9dc83..d1a7cf4567bc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -7129,9 +7129,9 @@ int stmmac_dvr_probe(struct device *device,
/* MDIO bus Registration */
ret = stmmac_mdio_register(ndev);
if (ret < 0) {
- dev_err(priv->device,
- "%s: MDIO bus (id: %d) registration failed",
- __func__, priv->plat->bus_id);
+ dev_err_probe(priv->device, ret,
+ "%s: MDIO bus (id: %d) registration failed\n",
+ __func__, priv->plat->bus_id);
goto error_mdio_register;
}
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index 9bc625fccca0..03d3d1f7aa4b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -482,7 +482,7 @@ int stmmac_mdio_register(struct net_device *ndev)
err = of_mdiobus_register(new_bus, mdio_node);
if (err != 0) {
- dev_err(dev, "Cannot register the MDIO bus\n");
+ dev_err_probe(dev, err, "Cannot register the MDIO bus\n");
goto bus_register_fail;
}
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index 34197c67f8d9..fb92d4c1547d 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -9,6 +9,7 @@
#include <linux/etherdevice.h>
#include <linux/if_vlan.h>
#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/kmemleak.h>
#include <linux/module.h>
@@ -1788,6 +1789,7 @@ static int am65_cpsw_init_cpts(struct am65_cpsw_common *common)
if (IS_ERR(cpts)) {
int ret = PTR_ERR(cpts);
+ of_node_put(node);
if (ret == -EOPNOTSUPP) {
dev_info(dev, "cpts disabled\n");
return 0;
@@ -1981,7 +1983,9 @@ am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx)
phy_interface_set_rgmii(port->slave.phylink_config.supported_interfaces);
- phylink = phylink_create(&port->slave.phylink_config, dev->fwnode, port->slave.phy_if,
+ phylink = phylink_create(&port->slave.phylink_config,
+ of_node_to_fwnode(port->slave.phy_node),
+ port->slave.phy_if,
&am65_cpsw_phylink_mac_ops);
if (IS_ERR(phylink))
return PTR_ERR(phylink);
@@ -2662,9 +2666,9 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
if (!node)
return -ENOENT;
common->port_num = of_get_child_count(node);
+ of_node_put(node);
if (common->port_num < 1 || common->port_num > AM65_CPSW_MAX_PORTS)
return -ENOENT;
- of_node_put(node);
common->rx_flow_id_base = -1;
init_completion(&common->tdown_complete);
diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
index 385aa63ab4bb..d3b3255ac3d1 100644
--- a/drivers/net/ipa/ipa_endpoint.c
+++ b/drivers/net/ipa/ipa_endpoint.c
@@ -1095,7 +1095,7 @@ static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint,
ret = gsi_trans_page_add(trans, page, len, offset);
if (ret)
- __free_pages(page, get_order(buffer_size));
+ put_page(page);
else
trans->data = page; /* transaction owns page now */
@@ -1418,11 +1418,8 @@ void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint,
} else {
struct page *page = trans->data;
- if (page) {
- u32 buffer_size = endpoint->config.rx.buffer_size;
-
- __free_pages(page, get_order(buffer_size));
- }
+ if (page)
+ put_page(page);
}
}
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 832f09ac075e..817577e713d7 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -99,6 +99,7 @@ struct pcpu_secy_stats {
* struct macsec_dev - private data
* @secy: SecY config
* @real_dev: pointer to underlying netdevice
+ * @dev_tracker: refcount tracker for @real_dev reference
* @stats: MACsec device stats
* @secys: linked list of SecY's on the underlying device
* @gro_cells: pointer to the Generic Receive Offload cell
@@ -107,6 +108,7 @@ struct pcpu_secy_stats {
struct macsec_dev {
struct macsec_secy secy;
struct net_device *real_dev;
+ netdevice_tracker dev_tracker;
struct pcpu_secy_stats __percpu *stats;
struct list_head secys;
struct gro_cells gro_cells;
@@ -3459,6 +3461,9 @@ static int macsec_dev_init(struct net_device *dev)
if (is_zero_ether_addr(dev->broadcast))
memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len);
+ /* Get macsec's reference to real_dev */
+ dev_hold_track(real_dev, &macsec->dev_tracker, GFP_KERNEL);
+
return 0;
}
@@ -3704,6 +3709,8 @@ static void macsec_free_netdev(struct net_device *dev)
free_percpu(macsec->stats);
free_percpu(macsec->secy.tx_sc.stats);
+ /* Get rid of the macsec's reference to real_dev */
+ dev_put_track(macsec->real_dev, &macsec->dev_tracker);
}
static void macsec_setup(struct net_device *dev)
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index 73926006d319..6a467e7817a6 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -433,20 +433,21 @@ static void at803x_context_restore(struct phy_device *phydev,
static int at803x_set_wol(struct phy_device *phydev,
struct ethtool_wolinfo *wol)
{
- struct net_device *ndev = phydev->attached_dev;
- const u8 *mac;
int ret, irq_enabled;
- unsigned int i;
- static const unsigned int offsets[] = {
- AT803X_LOC_MAC_ADDR_32_47_OFFSET,
- AT803X_LOC_MAC_ADDR_16_31_OFFSET,
- AT803X_LOC_MAC_ADDR_0_15_OFFSET,
- };
-
- if (!ndev)
- return -ENODEV;
if (wol->wolopts & WAKE_MAGIC) {
+ struct net_device *ndev = phydev->attached_dev;
+ const u8 *mac;
+ unsigned int i;
+ static const unsigned int offsets[] = {
+ AT803X_LOC_MAC_ADDR_32_47_OFFSET,
+ AT803X_LOC_MAC_ADDR_16_31_OFFSET,
+ AT803X_LOC_MAC_ADDR_0_15_OFFSET,
+ };
+
+ if (!ndev)
+ return -ENODEV;
+
mac = (const u8 *) ndev->dev_addr;
if (!is_valid_ether_addr(mac))
@@ -857,6 +858,9 @@ static int at803x_probe(struct phy_device *phydev)
if (phydev->drv->phy_id == ATH8031_PHY_ID) {
int ccr = phy_read(phydev, AT803X_REG_CHIP_CONFIG);
int mode_cfg;
+ struct ethtool_wolinfo wol = {
+ .wolopts = 0,
+ };
if (ccr < 0)
goto err;
@@ -872,6 +876,13 @@ static int at803x_probe(struct phy_device *phydev)
priv->is_fiber = true;
break;
}
+
+ /* Disable WOL by default */
+ ret = at803x_set_wol(phydev, &wol);
+ if (ret < 0) {
+ phydev_err(phydev, "failed to disable WOL on probe: %d\n", ret);
+ goto err;
+ }
}
return 0;
diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c
index c65fb5f5d2dc..03abe6233bbb 100644
--- a/drivers/net/phy/fixed_phy.c
+++ b/drivers/net/phy/fixed_phy.c
@@ -180,7 +180,7 @@ static void fixed_phy_del(int phy_addr)
if (fp->link_gpiod)
gpiod_put(fp->link_gpiod);
kfree(fp);
- ida_simple_remove(&phy_fixed_ida, phy_addr);
+ ida_free(&phy_fixed_ida, phy_addr);
return;
}
}
@@ -244,13 +244,13 @@ static struct phy_device *__fixed_phy_register(unsigned int irq,
}
/* Get the next available PHY address, up to PHY_MAX_ADDR */
- phy_addr = ida_simple_get(&phy_fixed_ida, 0, PHY_MAX_ADDR, GFP_KERNEL);
+ phy_addr = ida_alloc_max(&phy_fixed_ida, PHY_MAX_ADDR - 1, GFP_KERNEL);
if (phy_addr < 0)
return ERR_PTR(phy_addr);
ret = fixed_phy_add_gpiod(irq, phy_addr, status, gpiod);
if (ret < 0) {
- ida_simple_remove(&phy_fixed_ida, phy_addr);
+ ida_free(&phy_fixed_ida, phy_addr);
return ERR_PTR(ret);
}
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index cdca00c0dc1f..d55f59ce4a31 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -441,7 +441,7 @@ static void cdc_ncm_update_rxtx_max(struct usbnet *dev, u32 new_rx, u32 new_tx)
* .bind which is called before usbnet sets up dev->maxpacket
*/
if (val != le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize) &&
- val % usb_maxpacket(dev->udev, dev->out, 1) == 0)
+ val % usb_maxpacket(dev->udev, dev->out) == 0)
val++;
/* we might need to flush any pending tx buffers if running */
@@ -465,7 +465,7 @@ static void cdc_ncm_update_rxtx_max(struct usbnet *dev, u32 new_rx, u32 new_tx)
usbnet_update_max_qlen(dev);
/* never pad more than 3 full USB packets per transfer */
- ctx->min_tx_pkt = clamp_t(u16, ctx->tx_max - 3 * usb_maxpacket(dev->udev, dev->out, 1),
+ ctx->min_tx_pkt = clamp_t(u16, ctx->tx_max - 3 * usb_maxpacket(dev->udev, dev->out),
CDC_NCM_MIN_TX_PKT, ctx->tx_max);
}
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 636a405844c5..3226ab33afae 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -4421,7 +4421,7 @@ static int lan78xx_probe(struct usb_interface *intf,
goto out4;
period = ep_intr->desc.bInterval;
- maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
+ maxp = usb_maxpacket(dev->udev, dev->pipe_intr);
buf = kmalloc(maxp, GFP_KERNEL);
if (!buf) {
ret = -ENOMEM;
@@ -4439,7 +4439,7 @@ static int lan78xx_probe(struct usb_interface *intf,
dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
}
- dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
+ dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out);
/* Reject broken descriptors. */
if (dev->maxpacket == 0) {
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 79f8bd849b1a..571a399c195d 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1366,6 +1366,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1230, 2)}, /* Telit LE910Cx */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1250, 0)}, /* Telit LE910Cx */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1260, 2)}, /* Telit LE910Cx */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1261, 2)}, /* Telit LE910Cx */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1900, 1)}, /* Telit LN940 series */
@@ -1388,6 +1389,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x1e2d, 0x0083, 4)}, /* Cinterion PHxx,PXxx (1 RmNet + USB Audio)*/
{QMI_QUIRK_SET_DTR(0x1e2d, 0x00b0, 4)}, /* Cinterion CLS8 */
{QMI_FIXED_INTF(0x1e2d, 0x00b7, 0)}, /* Cinterion MV31 RmNet */
+ {QMI_FIXED_INTF(0x1e2d, 0x00b9, 0)}, /* Cinterion MV31 RmNet based on new baseline */
{QMI_FIXED_INTF(0x413c, 0x81a2, 8)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
{QMI_FIXED_INTF(0x413c, 0x81a3, 8)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
{QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index 4e70dec30e5a..f79333fe1783 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -333,7 +333,7 @@ generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags)
net->hard_header_len += sizeof (struct rndis_data_hdr);
dev->hard_mtu = net->mtu + net->hard_header_len;
- dev->maxpacket = usb_maxpacket(dev->udev, dev->out, 1);
+ dev->maxpacket = usb_maxpacket(dev->udev, dev->out);
if (dev->maxpacket == 0) {
netif_dbg(dev, probe, dev->net,
"dev->maxpacket can't be 0\n");
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 36b24ec11650..1cb6dab3e2d0 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -229,7 +229,7 @@ static int init_status (struct usbnet *dev, struct usb_interface *intf)
pipe = usb_rcvintpipe (dev->udev,
dev->status->desc.bEndpointAddress
& USB_ENDPOINT_NUMBER_MASK);
- maxp = usb_maxpacket (dev->udev, pipe, 0);
+ maxp = usb_maxpacket(dev->udev, pipe);
/* avoid 1 msec chatter: min 8 msec poll rate */
period = max ((int) dev->status->desc.bInterval,
@@ -1789,7 +1789,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
if (!dev->rx_urb_size)
dev->rx_urb_size = dev->hard_mtu;
- dev->maxpacket = usb_maxpacket (dev->udev, dev->out, 1);
+ dev->maxpacket = usb_maxpacket(dev->udev, dev->out);
if (dev->maxpacket == 0) {
/* that is a broken device */
status = -ENODEV;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index 6fc69c42f36e..bd50f52a1aad 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -1090,7 +1090,7 @@ struct iwl_causes_list {
u8 addr;
};
-#define CAUSE(reg, mask) \
+#define IWL_CAUSE(reg, mask) \
{ \
.mask_reg = reg, \
.bit = ilog2(mask), \
@@ -1101,28 +1101,28 @@ struct iwl_causes_list {
}
static const struct iwl_causes_list causes_list_common[] = {
- CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_D2S_CH0_NUM),
- CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_D2S_CH1_NUM),
- CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_S2D),
- CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_FH_ERR),
- CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_ALIVE),
- CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_WAKEUP),
- CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_RESET_DONE),
- CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_CT_KILL),
- CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_RF_KILL),
- CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_PERIODIC),
- CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_SCD),
- CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_FH_TX),
- CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_HW_ERR),
- CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_HAP),
+ IWL_CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_D2S_CH0_NUM),
+ IWL_CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_D2S_CH1_NUM),
+ IWL_CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_S2D),
+ IWL_CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_FH_ERR),
+ IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_ALIVE),
+ IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_WAKEUP),
+ IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_RESET_DONE),
+ IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_CT_KILL),
+ IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_RF_KILL),
+ IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_PERIODIC),
+ IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_SCD),
+ IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_FH_TX),
+ IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_HW_ERR),
+ IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_HAP),
};
static const struct iwl_causes_list causes_list_pre_bz[] = {
- CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_SW_ERR),
+ IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_SW_ERR),
};
static const struct iwl_causes_list causes_list_bz[] = {
- CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ),
+ IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ),
};
static void iwl_pcie_map_list(struct iwl_trans *trans,
diff --git a/drivers/net/wireless/marvell/libertas/cfg.c b/drivers/net/wireless/marvell/libertas/cfg.c
index 4e3de684928b..b0b3f59dabc6 100644
--- a/drivers/net/wireless/marvell/libertas/cfg.c
+++ b/drivers/net/wireless/marvell/libertas/cfg.c
@@ -1053,7 +1053,6 @@ static int lbs_set_authtype(struct lbs_private *priv,
*/
#define LBS_ASSOC_MAX_CMD_SIZE \
(sizeof(struct cmd_ds_802_11_associate) \
- - 512 /* cmd_ds_802_11_associate.iebuf */ \
+ LBS_MAX_SSID_TLV_SIZE \
+ LBS_MAX_CHANNEL_TLV_SIZE \
+ LBS_MAX_CF_PARAM_TLV_SIZE \
@@ -1130,8 +1129,7 @@ static int lbs_associate(struct lbs_private *priv,
if (sme->ie && sme->ie_len)
pos += lbs_add_wpa_tlv(pos, sme->ie, sme->ie_len);
- len = (sizeof(*cmd) - sizeof(cmd->iebuf)) +
- (u16)(pos - (u8 *) &cmd->iebuf);
+ len = sizeof(*cmd) + (u16)(pos - (u8 *) &cmd->iebuf);
cmd->hdr.size = cpu_to_le16(len);
lbs_deb_hex(LBS_DEB_ASSOC, "ASSOC_CMD", (u8 *) cmd,
diff --git a/drivers/net/wireless/marvell/libertas/host.h b/drivers/net/wireless/marvell/libertas/host.h
index ceff4b92e7a1..a202b716ad5d 100644
--- a/drivers/net/wireless/marvell/libertas/host.h
+++ b/drivers/net/wireless/marvell/libertas/host.h
@@ -528,7 +528,8 @@ struct cmd_ds_802_11_associate {
__le16 listeninterval;
__le16 bcnperiod;
u8 dtimperiod;
- u8 iebuf[512]; /* Enough for required and most optional IEs */
+ /* 512 permitted - enough for required and most optional IEs */
+ u8 iebuf[];
} __packed;
struct cmd_ds_802_11_associate_response {
@@ -537,7 +538,8 @@ struct cmd_ds_802_11_associate_response {
__le16 capability;
__le16 statuscode;
__le16 aid;
- u8 iebuf[512];
+ /* max 512 */
+ u8 iebuf[];
} __packed;
struct cmd_ds_802_11_set_wep {
diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
index a85e192c9d59..1bb92ca7451b 100644
--- a/drivers/net/wireless/mediatek/mt76/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/usb.c
@@ -1068,7 +1068,7 @@ int __mt76u_init(struct mt76_dev *dev, struct usb_interface *intf,
INIT_WORK(&usb->stat_work, mt76u_tx_status_data);
- usb->data_len = usb_maxpacket(udev, usb_sndctrlpipe(udev, 0), 1);
+ usb->data_len = usb_maxpacket(udev, usb_sndctrlpipe(udev, 0));
if (usb->data_len < 32)
usb->data_len = 32;
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
index 74c3d8cb3100..0827bc860bf8 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
@@ -586,10 +586,10 @@ static void rt2x00usb_assign_endpoint(struct data_queue *queue,
if (queue->qid == QID_RX) {
pipe = usb_rcvbulkpipe(usb_dev, queue->usb_endpoint);
- queue->usb_maxpacket = usb_maxpacket(usb_dev, pipe, 0);
+ queue->usb_maxpacket = usb_maxpacket(usb_dev, pipe);
} else {
pipe = usb_sndbulkpipe(usb_dev, queue->usb_endpoint);
- queue->usb_maxpacket = usb_maxpacket(usb_dev, pipe, 1);
+ queue->usb_maxpacket = usb_maxpacket(usb_dev, pipe);
}
if (!queue->usb_maxpacket)
diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c
index 090610e48d08..c3ae631c2264 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.c
+++ b/drivers/net/wireless/realtek/rtw88/fw.c
@@ -1602,6 +1602,16 @@ free:
return ret;
}
+void rtw_fw_update_beacon_work(struct work_struct *work)
+{
+ struct rtw_dev *rtwdev = container_of(work, struct rtw_dev,
+ update_beacon_work);
+
+ mutex_lock(&rtwdev->mutex);
+ rtw_fw_download_rsvd_page(rtwdev);
+ mutex_unlock(&rtwdev->mutex);
+}
+
static void rtw_fw_read_fifo_page(struct rtw_dev *rtwdev, u32 offset, u32 size,
u32 *buf, u32 residue, u16 start_pg)
{
diff --git a/drivers/net/wireless/realtek/rtw88/fw.h b/drivers/net/wireless/realtek/rtw88/fw.h
index 734113fba184..7a37675c61e8 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.h
+++ b/drivers/net/wireless/realtek/rtw88/fw.h
@@ -809,6 +809,7 @@ void rtw_add_rsvd_page_pno(struct rtw_dev *rtwdev,
void rtw_add_rsvd_page_sta(struct rtw_dev *rtwdev,
struct rtw_vif *rtwvif);
int rtw_fw_download_rsvd_page(struct rtw_dev *rtwdev);
+void rtw_fw_update_beacon_work(struct work_struct *work);
void rtw_send_rsvd_page_h2c(struct rtw_dev *rtwdev);
int rtw_dump_drv_rsvd_page(struct rtw_dev *rtwdev,
u32 offset, u32 size, u32 *buf);
diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c
index 30903c567cd9..4310362dc333 100644
--- a/drivers/net/wireless/realtek/rtw88/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw88/mac80211.c
@@ -493,9 +493,7 @@ static int rtw_ops_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
{
struct rtw_dev *rtwdev = hw->priv;
- mutex_lock(&rtwdev->mutex);
- rtw_fw_download_rsvd_page(rtwdev);
- mutex_unlock(&rtwdev->mutex);
+ ieee80211_queue_work(hw, &rtwdev->update_beacon_work);
return 0;
}
diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
index 14289f83feb5..efabd5b1bf5b 100644
--- a/drivers/net/wireless/realtek/rtw88/main.c
+++ b/drivers/net/wireless/realtek/rtw88/main.c
@@ -1442,6 +1442,7 @@ void rtw_core_stop(struct rtw_dev *rtwdev)
mutex_unlock(&rtwdev->mutex);
cancel_work_sync(&rtwdev->c2h_work);
+ cancel_work_sync(&rtwdev->update_beacon_work);
cancel_delayed_work_sync(&rtwdev->watch_dog_work);
cancel_delayed_work_sync(&coex->bt_relink_work);
cancel_delayed_work_sync(&coex->bt_reenable_work);
@@ -1998,6 +1999,7 @@ int rtw_core_init(struct rtw_dev *rtwdev)
INIT_WORK(&rtwdev->c2h_work, rtw_c2h_work);
INIT_WORK(&rtwdev->ips_work, rtw_ips_work);
INIT_WORK(&rtwdev->fw_recovery_work, rtw_fw_recovery_work);
+ INIT_WORK(&rtwdev->update_beacon_work, rtw_fw_update_beacon_work);
INIT_WORK(&rtwdev->ba_work, rtw_txq_ba_work);
skb_queue_head_init(&rtwdev->c2h_queue);
skb_queue_head_init(&rtwdev->coex.queue);
diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h
index 0baaf5a32e82..c02be4ac159e 100644
--- a/drivers/net/wireless/realtek/rtw88/main.h
+++ b/drivers/net/wireless/realtek/rtw88/main.h
@@ -2008,6 +2008,7 @@ struct rtw_dev {
struct work_struct c2h_work;
struct work_struct ips_work;
struct work_struct fw_recovery_work;
+ struct work_struct update_beacon_work;
/* used to protect txqs list */
spinlock_t txq_lock;
diff --git a/drivers/net/wireless/silabs/wfx/hif_tx.c b/drivers/net/wireless/silabs/wfx/hif_tx.c
index 2b92c227efbc..d35dd940d968 100644
--- a/drivers/net/wireless/silabs/wfx/hif_tx.c
+++ b/drivers/net/wireless/silabs/wfx/hif_tx.c
@@ -280,7 +280,7 @@ int wfx_hif_stop_scan(struct wfx_vif *wvif)
}
int wfx_hif_join(struct wfx_vif *wvif, const struct ieee80211_bss_conf *conf,
- struct ieee80211_channel *channel, const u8 *ssid, int ssidlen)
+ struct ieee80211_channel *channel, const u8 *ssid, int ssid_len)
{
int ret;
struct wfx_hif_msg *hif;
@@ -288,8 +288,8 @@ int wfx_hif_join(struct wfx_vif *wvif, const struct ieee80211_bss_conf *conf,
WARN_ON(!conf->beacon_int);
WARN_ON(!conf->basic_rates);
- WARN_ON(sizeof(body->ssid) < ssidlen);
- WARN(!conf->ibss_joined && !ssidlen, "joining an unknown BSS");
+ WARN_ON(sizeof(body->ssid) < ssid_len);
+ WARN(!conf->ibss_joined && !ssid_len, "joining an unknown BSS");
if (!hif)
return -ENOMEM;
body->infrastructure_bss_mode = !conf->ibss_joined;
@@ -300,8 +300,8 @@ int wfx_hif_join(struct wfx_vif *wvif, const struct ieee80211_bss_conf *conf,
body->basic_rate_set = cpu_to_le32(wfx_rate_mask_to_hw(wvif->wdev, conf->basic_rates));
memcpy(body->bssid, conf->bssid, sizeof(body->bssid));
if (ssid) {
- body->ssid_length = cpu_to_le32(ssidlen);
- memcpy(body->ssid, ssid, ssidlen);
+ body->ssid_length = cpu_to_le32(ssid_len);
+ memcpy(body->ssid, ssid, ssid_len);
}
wfx_fill_header(hif, wvif->id, HIF_REQ_ID_JOIN, sizeof(*body));
ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
diff --git a/drivers/net/wireless/silabs/wfx/main.c b/drivers/net/wireless/silabs/wfx/main.c
index bbfd3fa51921..e015bfb8d221 100644
--- a/drivers/net/wireless/silabs/wfx/main.c
+++ b/drivers/net/wireless/silabs/wfx/main.c
@@ -170,7 +170,7 @@ bool wfx_api_older_than(struct wfx_dev *wdev, int major, int minor)
*
* The PDS file is an array of Time-Length-Value structs.
*/
- int wfx_send_pds(struct wfx_dev *wdev, u8 *buf, size_t len)
+int wfx_send_pds(struct wfx_dev *wdev, u8 *buf, size_t len)
{
int ret, chunk_type, chunk_len, chunk_num = 0;
diff --git a/drivers/net/wireless/silabs/wfx/sta.c b/drivers/net/wireless/silabs/wfx/sta.c
index e551fa284a43..329d7f4a2b2e 100644
--- a/drivers/net/wireless/silabs/wfx/sta.c
+++ b/drivers/net/wireless/silabs/wfx/sta.c
@@ -409,8 +409,8 @@ static void wfx_join(struct wfx_vif *wvif)
struct ieee80211_bss_conf *conf = &vif->bss_conf;
struct cfg80211_bss *bss = NULL;
u8 ssid[IEEE80211_MAX_SSID_LEN];
- const u8 *ssidie = NULL;
- int ssidlen = 0;
+ const u8 *ssid_ie = NULL;
+ int ssid_len = 0;
int ret;
wfx_tx_lock_flush(wvif->wdev);
@@ -422,21 +422,21 @@ static void wfx_join(struct wfx_vif *wvif)
return;
}
- rcu_read_lock(); /* protect ssidie */
+ rcu_read_lock(); /* protect ssid_ie */
if (bss)
- ssidie = ieee80211_bss_get_ie(bss, WLAN_EID_SSID);
- if (ssidie) {
- ssidlen = ssidie[1];
- if (ssidlen > IEEE80211_MAX_SSID_LEN)
- ssidlen = IEEE80211_MAX_SSID_LEN;
- memcpy(ssid, &ssidie[2], ssidlen);
+ ssid_ie = ieee80211_bss_get_ie(bss, WLAN_EID_SSID);
+ if (ssid_ie) {
+ ssid_len = ssid_ie[1];
+ if (ssid_len > IEEE80211_MAX_SSID_LEN)
+ ssid_len = IEEE80211_MAX_SSID_LEN;
+ memcpy(ssid, &ssid_ie[2], ssid_len);
}
rcu_read_unlock();
cfg80211_put_bss(wvif->wdev->hw->wiphy, bss);
wvif->join_in_progress = true;
- ret = wfx_hif_join(wvif, conf, wvif->channel, ssid, ssidlen);
+ ret = wfx_hif_join(wvif, conf, wvif->channel, ssid, ssid_len);
if (ret) {
ieee80211_connection_loss(vif);
wfx_reset(wvif);
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 0f7fd159f0f2..d93814c14a23 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -828,7 +828,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
break;
}
- work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx);
+ work_to_do = XEN_RING_NR_UNCONSUMED_REQUESTS(&queue->tx);
if (!work_to_do)
break;
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 65ab907aca5a..8c0b9546d5a2 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1386,7 +1386,7 @@ static void xennet_release_tx_bufs(struct netfront_queue *queue)
queue->tx_skbs[i] = NULL;
get_page(queue->grant_tx_page[i]);
gnttab_end_foreign_access(queue->grant_tx_ref[i],
- (unsigned long)page_address(queue->grant_tx_page[i]));
+ queue->grant_tx_page[i]);
queue->grant_tx_page[i] = NULL;
queue->grant_tx_ref[i] = INVALID_GRANT_REF;
add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, i);
@@ -1418,8 +1418,7 @@ static void xennet_release_rx_bufs(struct netfront_queue *queue)
* foreign access is ended (which may be deferred).
*/
get_page(page);
- gnttab_end_foreign_access(ref,
- (unsigned long)page_address(page));
+ gnttab_end_foreign_access(ref, page);
queue->grant_rx_ref[id] = INVALID_GRANT_REF;
kfree_skb(skb);
@@ -1760,7 +1759,7 @@ static void xennet_end_access(int ref, void *page)
{
/* This frees the page as a side-effect */
if (ref != INVALID_GRANT_REF)
- gnttab_end_foreign_access(ref, (unsigned long)page);
+ gnttab_end_foreign_access(ref, virt_to_page(page));
}
static void xennet_disconnect_backend(struct netfront_info *info)
diff --git a/drivers/nvdimm/btt_devs.c b/drivers/nvdimm/btt_devs.c
index e5a58520d398..fabbb31f2c35 100644
--- a/drivers/nvdimm/btt_devs.c
+++ b/drivers/nvdimm/btt_devs.c
@@ -50,14 +50,14 @@ static ssize_t sector_size_store(struct device *dev,
struct nd_btt *nd_btt = to_nd_btt(dev);
ssize_t rc;
- nd_device_lock(dev);
+ device_lock(dev);
nvdimm_bus_lock(dev);
rc = nd_size_select_store(dev, buf, &nd_btt->lbasize,
btt_lbasize_supported);
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
buf[len - 1] == '\n' ? "" : "\n");
nvdimm_bus_unlock(dev);
- nd_device_unlock(dev);
+ device_unlock(dev);
return rc ? rc : len;
}
@@ -79,11 +79,11 @@ static ssize_t uuid_store(struct device *dev,
struct nd_btt *nd_btt = to_nd_btt(dev);
ssize_t rc;
- nd_device_lock(dev);
+ device_lock(dev);
rc = nd_uuid_store(dev, &nd_btt->uuid, buf, len);
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
buf[len - 1] == '\n' ? "" : "\n");
- nd_device_unlock(dev);
+ device_unlock(dev);
return rc ? rc : len;
}
@@ -108,13 +108,13 @@ static ssize_t namespace_store(struct device *dev,
struct nd_btt *nd_btt = to_nd_btt(dev);
ssize_t rc;
- nd_device_lock(dev);
+ device_lock(dev);
nvdimm_bus_lock(dev);
rc = nd_namespace_store(dev, &nd_btt->ndns, buf, len);
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
buf[len - 1] == '\n' ? "" : "\n");
nvdimm_bus_unlock(dev);
- nd_device_unlock(dev);
+ device_unlock(dev);
return rc;
}
@@ -126,14 +126,14 @@ static ssize_t size_show(struct device *dev,
struct nd_btt *nd_btt = to_nd_btt(dev);
ssize_t rc;
- nd_device_lock(dev);
+ device_lock(dev);
if (dev->driver)
rc = sprintf(buf, "%llu\n", nd_btt->size);
else {
/* no size to convey if the btt instance is disabled */
rc = -ENXIO;
}
- nd_device_unlock(dev);
+ device_unlock(dev);
return rc;
}
@@ -178,6 +178,8 @@ bool is_nd_btt(struct device *dev)
}
EXPORT_SYMBOL(is_nd_btt);
+static struct lock_class_key nvdimm_btt_key;
+
static struct device *__nd_btt_create(struct nd_region *nd_region,
unsigned long lbasize, uuid_t *uuid,
struct nd_namespace_common *ndns)
@@ -205,6 +207,7 @@ static struct device *__nd_btt_create(struct nd_region *nd_region,
dev->parent = &nd_region->dev;
dev->type = &nd_btt_device_type;
device_initialize(&nd_btt->dev);
+ lockdep_set_class(&nd_btt->dev.mutex, &nvdimm_btt_key);
if (ndns && !__nd_attach_ndns(&nd_btt->dev, ndns, &nd_btt->ndns)) {
dev_dbg(&ndns->dev, "failed, already claimed by %s\n",
dev_name(ndns->claim));
@@ -225,7 +228,7 @@ struct device *nd_btt_create(struct nd_region *nd_region)
{
struct device *dev = __nd_btt_create(nd_region, 0, NULL, NULL);
- __nd_device_register(dev);
+ nd_device_register(dev);
return dev;
}
@@ -324,7 +327,7 @@ static int __nd_btt_probe(struct nd_btt *nd_btt,
if (!nd_btt->uuid)
return -ENOMEM;
- __nd_device_register(&nd_btt->dev);
+ nd_device_register(&nd_btt->dev);
return 0;
}
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 7b0d1443217a..a4fc17db707c 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -88,10 +88,7 @@ static int nvdimm_bus_probe(struct device *dev)
dev->driver->name, dev_name(dev));
nvdimm_bus_probe_start(nvdimm_bus);
- debug_nvdimm_lock(dev);
rc = nd_drv->probe(dev);
- debug_nvdimm_unlock(dev);
-
if ((rc == 0 || rc == -EOPNOTSUPP) &&
dev->parent && is_nd_region(dev->parent))
nd_region_advance_seeds(to_nd_region(dev->parent), dev);
@@ -111,11 +108,8 @@ static void nvdimm_bus_remove(struct device *dev)
struct module *provider = to_bus_provider(dev);
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
- if (nd_drv->remove) {
- debug_nvdimm_lock(dev);
+ if (nd_drv->remove)
nd_drv->remove(dev);
- debug_nvdimm_unlock(dev);
- }
dev_dbg(&nvdimm_bus->dev, "%s.remove(%s)\n", dev->driver->name,
dev_name(dev));
@@ -139,7 +133,7 @@ static void nvdimm_bus_shutdown(struct device *dev)
void nd_device_notify(struct device *dev, enum nvdimm_event event)
{
- nd_device_lock(dev);
+ device_lock(dev);
if (dev->driver) {
struct nd_device_driver *nd_drv;
@@ -147,7 +141,7 @@ void nd_device_notify(struct device *dev, enum nvdimm_event event)
if (nd_drv->notify)
nd_drv->notify(dev, event);
}
- nd_device_unlock(dev);
+ device_unlock(dev);
}
EXPORT_SYMBOL(nd_device_notify);
@@ -334,6 +328,8 @@ struct nvdimm_bus *nvdimm_to_bus(struct nvdimm *nvdimm)
}
EXPORT_SYMBOL_GPL(nvdimm_to_bus);
+static struct lock_class_key nvdimm_bus_key;
+
struct nvdimm_bus *nvdimm_bus_register(struct device *parent,
struct nvdimm_bus_descriptor *nd_desc)
{
@@ -360,6 +356,7 @@ struct nvdimm_bus *nvdimm_bus_register(struct device *parent,
nvdimm_bus->dev.bus = &nvdimm_bus_type;
nvdimm_bus->dev.of_node = nd_desc->of_node;
device_initialize(&nvdimm_bus->dev);
+ lockdep_set_class(&nvdimm_bus->dev.mutex, &nvdimm_bus_key);
device_set_pm_not_required(&nvdimm_bus->dev);
rc = dev_set_name(&nvdimm_bus->dev, "ndbus%d", nvdimm_bus->id);
if (rc)
@@ -511,7 +508,7 @@ static void nd_async_device_unregister(void *d, async_cookie_t cookie)
put_device(dev);
}
-void __nd_device_register(struct device *dev)
+void nd_device_register(struct device *dev)
{
if (!dev)
return;
@@ -537,12 +534,6 @@ void __nd_device_register(struct device *dev)
async_schedule_dev_domain(nd_async_device_register, dev,
&nd_async_domain);
}
-
-void nd_device_register(struct device *dev)
-{
- device_initialize(dev);
- __nd_device_register(dev);
-}
EXPORT_SYMBOL(nd_device_register);
void nd_device_unregister(struct device *dev, enum nd_async_mode mode)
@@ -572,9 +563,9 @@ void nd_device_unregister(struct device *dev, enum nd_async_mode mode)
* or otherwise let the async path handle it if the
* unregistration was already queued.
*/
- nd_device_lock(dev);
+ device_lock(dev);
killed = kill_device(dev);
- nd_device_unlock(dev);
+ device_unlock(dev);
if (!killed)
return;
@@ -724,6 +715,8 @@ static void ndctl_release(struct device *dev)
kfree(dev);
}
+static struct lock_class_key nvdimm_ndctl_key;
+
int nvdimm_bus_create_ndctl(struct nvdimm_bus *nvdimm_bus)
{
dev_t devt = MKDEV(nvdimm_bus_major, nvdimm_bus->id);
@@ -734,6 +727,7 @@ int nvdimm_bus_create_ndctl(struct nvdimm_bus *nvdimm_bus)
if (!dev)
return -ENOMEM;
device_initialize(dev);
+ lockdep_set_class(&dev->mutex, &nvdimm_ndctl_key);
device_set_pm_not_required(dev);
dev->class = nd_class;
dev->parent = &nvdimm_bus->dev;
@@ -930,10 +924,10 @@ void wait_nvdimm_bus_probe_idle(struct device *dev)
if (nvdimm_bus->probe_active == 0)
break;
nvdimm_bus_unlock(dev);
- nd_device_unlock(dev);
+ device_unlock(dev);
wait_event(nvdimm_bus->wait,
nvdimm_bus->probe_active == 0);
- nd_device_lock(dev);
+ device_lock(dev);
nvdimm_bus_lock(dev);
} while (true);
}
@@ -1167,7 +1161,7 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
goto out;
}
- nd_device_lock(dev);
+ device_lock(dev);
nvdimm_bus_lock(dev);
rc = nd_cmd_clear_to_send(nvdimm_bus, nvdimm, func, buf);
if (rc)
@@ -1189,7 +1183,7 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
out_unlock:
nvdimm_bus_unlock(dev);
- nd_device_unlock(dev);
+ device_unlock(dev);
out:
kfree(in_env);
kfree(out_env);
diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c
index 69a03358817f..d91799b71d23 100644
--- a/drivers/nvdimm/core.c
+++ b/drivers/nvdimm/core.c
@@ -215,7 +215,7 @@ EXPORT_SYMBOL_GPL(to_nvdimm_bus_dev);
*
* Enforce that uuids can only be changed while the device is disabled
* (driver detached)
- * LOCKING: expects nd_device_lock() is held on entry
+ * LOCKING: expects device_lock() is held on entry
*/
int nd_uuid_store(struct device *dev, uuid_t **uuid_out, const char *buf,
size_t len)
@@ -316,15 +316,15 @@ static DEVICE_ATTR_RO(provider);
static int flush_namespaces(struct device *dev, void *data)
{
- nd_device_lock(dev);
- nd_device_unlock(dev);
+ device_lock(dev);
+ device_unlock(dev);
return 0;
}
static int flush_regions_dimms(struct device *dev, void *data)
{
- nd_device_lock(dev);
- nd_device_unlock(dev);
+ device_lock(dev);
+ device_unlock(dev);
device_for_each_child(dev, NULL, flush_namespaces);
return 0;
}
@@ -368,9 +368,7 @@ static ssize_t capability_show(struct device *dev,
if (!nd_desc->fw_ops)
return -EOPNOTSUPP;
- nvdimm_bus_lock(dev);
cap = nd_desc->fw_ops->capability(nd_desc);
- nvdimm_bus_unlock(dev);
switch (cap) {
case NVDIMM_FWA_CAP_QUIESCE:
@@ -395,10 +393,8 @@ static ssize_t activate_show(struct device *dev,
if (!nd_desc->fw_ops)
return -EOPNOTSUPP;
- nvdimm_bus_lock(dev);
cap = nd_desc->fw_ops->capability(nd_desc);
state = nd_desc->fw_ops->activate_state(nd_desc);
- nvdimm_bus_unlock(dev);
if (cap < NVDIMM_FWA_CAP_QUIESCE)
return -EOPNOTSUPP;
@@ -443,7 +439,6 @@ static ssize_t activate_store(struct device *dev,
else
return -EINVAL;
- nvdimm_bus_lock(dev);
state = nd_desc->fw_ops->activate_state(nd_desc);
switch (state) {
@@ -461,7 +456,6 @@ static ssize_t activate_store(struct device *dev,
default:
rc = -ENXIO;
}
- nvdimm_bus_unlock(dev);
if (rc == 0)
rc = len;
@@ -484,10 +478,7 @@ static umode_t nvdimm_bus_firmware_visible(struct kobject *kobj, struct attribut
if (!nd_desc->fw_ops)
return 0;
- nvdimm_bus_lock(dev);
cap = nd_desc->fw_ops->capability(nd_desc);
- nvdimm_bus_unlock(dev);
-
if (cap < NVDIMM_FWA_CAP_QUIESCE)
return 0;
diff --git a/drivers/nvdimm/dax_devs.c b/drivers/nvdimm/dax_devs.c
index 99965077bac4..7f4a9d28b670 100644
--- a/drivers/nvdimm/dax_devs.c
+++ b/drivers/nvdimm/dax_devs.c
@@ -80,7 +80,7 @@ struct device *nd_dax_create(struct nd_region *nd_region)
nd_dax = nd_dax_alloc(nd_region);
if (nd_dax)
dev = nd_pfn_devinit(&nd_dax->nd_pfn, NULL);
- __nd_device_register(dev);
+ nd_device_register(dev);
return dev;
}
@@ -119,7 +119,7 @@ int nd_dax_probe(struct device *dev, struct nd_namespace_common *ndns)
nd_detach_ndns(dax_dev, &nd_pfn->ndns);
put_device(dax_dev);
} else
- __nd_device_register(dax_dev);
+ nd_device_register(dax_dev);
return rc;
}
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
index ee507eed42b5..c7c980577491 100644
--- a/drivers/nvdimm/dimm_devs.c
+++ b/drivers/nvdimm/dimm_devs.c
@@ -341,9 +341,9 @@ static ssize_t available_slots_show(struct device *dev,
{
ssize_t rc;
- nd_device_lock(dev);
+ device_lock(dev);
rc = __available_slots_show(dev_get_drvdata(dev), buf);
- nd_device_unlock(dev);
+ device_unlock(dev);
return rc;
}
@@ -386,12 +386,12 @@ static ssize_t security_store(struct device *dev,
* done while probing is idle and the DIMM is not in active use
* in any region.
*/
- nd_device_lock(dev);
+ device_lock(dev);
nvdimm_bus_lock(dev);
wait_nvdimm_bus_probe_idle(dev);
rc = nvdimm_security_store(dev, buf, len);
nvdimm_bus_unlock(dev);
- nd_device_unlock(dev);
+ device_unlock(dev);
return rc;
}
@@ -570,6 +570,8 @@ bool is_nvdimm(struct device *dev)
return dev->type == &nvdimm_device_type;
}
+static struct lock_class_key nvdimm_key;
+
struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus,
void *provider_data, const struct attribute_group **groups,
unsigned long flags, unsigned long cmd_mask, int num_flush,
@@ -613,6 +615,8 @@ struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus,
/* get security state and extended (master) state */
nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
nvdimm->sec.ext_flags = nvdimm_security_flags(nvdimm, NVDIMM_MASTER);
+ device_initialize(dev);
+ lockdep_set_class(&dev->mutex, &nvdimm_key);
nd_device_register(dev);
return nvdimm;
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index 62b83b2e26e3..bf4f5c09d9b1 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -264,7 +264,7 @@ static ssize_t alt_name_store(struct device *dev,
struct nd_region *nd_region = to_nd_region(dev->parent);
ssize_t rc;
- nd_device_lock(dev);
+ device_lock(dev);
nvdimm_bus_lock(dev);
wait_nvdimm_bus_probe_idle(dev);
rc = __alt_name_store(dev, buf, len);
@@ -272,7 +272,7 @@ static ssize_t alt_name_store(struct device *dev,
rc = nd_namespace_label_update(nd_region, dev);
dev_dbg(dev, "%s(%zd)\n", rc < 0 ? "fail " : "", rc);
nvdimm_bus_unlock(dev);
- nd_device_unlock(dev);
+ device_unlock(dev);
return rc < 0 ? rc : len;
}
@@ -846,7 +846,7 @@ static ssize_t size_store(struct device *dev,
if (rc)
return rc;
- nd_device_lock(dev);
+ device_lock(dev);
nvdimm_bus_lock(dev);
wait_nvdimm_bus_probe_idle(dev);
rc = __size_store(dev, val);
@@ -868,7 +868,7 @@ static ssize_t size_store(struct device *dev,
dev_dbg(dev, "%llx %s (%d)\n", val, rc < 0 ? "fail" : "success", rc);
nvdimm_bus_unlock(dev);
- nd_device_unlock(dev);
+ device_unlock(dev);
return rc < 0 ? rc : len;
}
@@ -1043,7 +1043,7 @@ static ssize_t uuid_store(struct device *dev,
} else
return -ENXIO;
- nd_device_lock(dev);
+ device_lock(dev);
nvdimm_bus_lock(dev);
wait_nvdimm_bus_probe_idle(dev);
if (to_ndns(dev)->claim)
@@ -1059,7 +1059,7 @@ static ssize_t uuid_store(struct device *dev,
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
buf[len - 1] == '\n' ? "" : "\n");
nvdimm_bus_unlock(dev);
- nd_device_unlock(dev);
+ device_unlock(dev);
return rc < 0 ? rc : len;
}
@@ -1118,7 +1118,7 @@ static ssize_t sector_size_store(struct device *dev,
} else
return -ENXIO;
- nd_device_lock(dev);
+ device_lock(dev);
nvdimm_bus_lock(dev);
if (to_ndns(dev)->claim)
rc = -EBUSY;
@@ -1129,7 +1129,7 @@ static ssize_t sector_size_store(struct device *dev,
dev_dbg(dev, "result: %zd %s: %s%s", rc, rc < 0 ? "tried" : "wrote",
buf, buf[len - 1] == '\n' ? "" : "\n");
nvdimm_bus_unlock(dev);
- nd_device_unlock(dev);
+ device_unlock(dev);
return rc ? rc : len;
}
@@ -1239,9 +1239,9 @@ static ssize_t holder_show(struct device *dev,
struct nd_namespace_common *ndns = to_ndns(dev);
ssize_t rc;
- nd_device_lock(dev);
+ device_lock(dev);
rc = sprintf(buf, "%s\n", ndns->claim ? dev_name(ndns->claim) : "");
- nd_device_unlock(dev);
+ device_unlock(dev);
return rc;
}
@@ -1278,7 +1278,7 @@ static ssize_t holder_class_store(struct device *dev,
struct nd_region *nd_region = to_nd_region(dev->parent);
int rc;
- nd_device_lock(dev);
+ device_lock(dev);
nvdimm_bus_lock(dev);
wait_nvdimm_bus_probe_idle(dev);
rc = __holder_class_store(dev, buf);
@@ -1286,7 +1286,7 @@ static ssize_t holder_class_store(struct device *dev,
rc = nd_namespace_label_update(nd_region, dev);
dev_dbg(dev, "%s(%d)\n", rc < 0 ? "fail " : "", rc);
nvdimm_bus_unlock(dev);
- nd_device_unlock(dev);
+ device_unlock(dev);
return rc < 0 ? rc : len;
}
@@ -1297,7 +1297,7 @@ static ssize_t holder_class_show(struct device *dev,
struct nd_namespace_common *ndns = to_ndns(dev);
ssize_t rc;
- nd_device_lock(dev);
+ device_lock(dev);
if (ndns->claim_class == NVDIMM_CCLASS_NONE)
rc = sprintf(buf, "\n");
else if ((ndns->claim_class == NVDIMM_CCLASS_BTT) ||
@@ -1309,7 +1309,7 @@ static ssize_t holder_class_show(struct device *dev,
rc = sprintf(buf, "dax\n");
else
rc = sprintf(buf, "<unknown>\n");
- nd_device_unlock(dev);
+ device_unlock(dev);
return rc;
}
@@ -1323,7 +1323,7 @@ static ssize_t mode_show(struct device *dev,
char *mode;
ssize_t rc;
- nd_device_lock(dev);
+ device_lock(dev);
claim = ndns->claim;
if (claim && is_nd_btt(claim))
mode = "safe";
@@ -1336,7 +1336,7 @@ static ssize_t mode_show(struct device *dev,
else
mode = "raw";
rc = sprintf(buf, "%s\n", mode);
- nd_device_unlock(dev);
+ device_unlock(dev);
return rc;
}
@@ -1456,8 +1456,8 @@ struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev)
* Flush any in-progess probes / removals in the driver
* for the raw personality of this namespace.
*/
- nd_device_lock(&ndns->dev);
- nd_device_unlock(&ndns->dev);
+ device_lock(&ndns->dev);
+ device_unlock(&ndns->dev);
if (ndns->dev.driver) {
dev_dbg(&ndns->dev, "is active, can't bind %s\n",
dev_name(dev));
@@ -1830,6 +1830,8 @@ static struct device *nd_namespace_pmem_create(struct nd_region *nd_region)
return dev;
}
+static struct lock_class_key nvdimm_namespace_key;
+
void nd_region_create_ns_seed(struct nd_region *nd_region)
{
WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
@@ -1845,8 +1847,12 @@ void nd_region_create_ns_seed(struct nd_region *nd_region)
*/
if (!nd_region->ns_seed)
dev_err(&nd_region->dev, "failed to create namespace\n");
- else
+ else {
+ device_initialize(nd_region->ns_seed);
+ lockdep_set_class(&nd_region->ns_seed->mutex,
+ &nvdimm_namespace_key);
nd_device_register(nd_region->ns_seed);
+ }
}
void nd_region_create_dax_seed(struct nd_region *nd_region)
@@ -2200,6 +2206,8 @@ int nd_region_register_namespaces(struct nd_region *nd_region, int *err)
if (id < 0)
break;
dev_set_name(dev, "namespace%d.%d", nd_region->id, id);
+ device_initialize(dev);
+ lockdep_set_class(&dev->mutex, &nvdimm_namespace_key);
nd_device_register(dev);
}
if (i)
diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h
index 448f9dcb4bb7..cc86ee09d7c0 100644
--- a/drivers/nvdimm/nd-core.h
+++ b/drivers/nvdimm/nd-core.h
@@ -106,7 +106,7 @@ void nd_region_create_dax_seed(struct nd_region *nd_region);
int nvdimm_bus_create_ndctl(struct nvdimm_bus *nvdimm_bus);
void nvdimm_bus_destroy_ndctl(struct nvdimm_bus *nvdimm_bus);
void nd_synchronize(void);
-void __nd_device_register(struct device *dev);
+void nd_device_register(struct device *dev);
struct nd_label_id;
char *nd_label_gen_id(struct nd_label_id *label_id, const uuid_t *uuid,
u32 flags);
@@ -161,70 +161,4 @@ static inline void devm_nsio_disable(struct device *dev,
{
}
#endif
-
-#ifdef CONFIG_PROVE_NVDIMM_LOCKING
-extern struct class *nd_class;
-
-enum {
- LOCK_BUS,
- LOCK_NDCTL,
- LOCK_REGION,
- LOCK_DIMM = LOCK_REGION,
- LOCK_NAMESPACE,
- LOCK_CLAIM,
-};
-
-static inline void debug_nvdimm_lock(struct device *dev)
-{
- if (is_nd_region(dev))
- mutex_lock_nested(&dev->lockdep_mutex, LOCK_REGION);
- else if (is_nvdimm(dev))
- mutex_lock_nested(&dev->lockdep_mutex, LOCK_DIMM);
- else if (is_nd_btt(dev) || is_nd_pfn(dev) || is_nd_dax(dev))
- mutex_lock_nested(&dev->lockdep_mutex, LOCK_CLAIM);
- else if (dev->parent && (is_nd_region(dev->parent)))
- mutex_lock_nested(&dev->lockdep_mutex, LOCK_NAMESPACE);
- else if (is_nvdimm_bus(dev))
- mutex_lock_nested(&dev->lockdep_mutex, LOCK_BUS);
- else if (dev->class && dev->class == nd_class)
- mutex_lock_nested(&dev->lockdep_mutex, LOCK_NDCTL);
- else
- dev_WARN(dev, "unknown lock level\n");
-}
-
-static inline void debug_nvdimm_unlock(struct device *dev)
-{
- mutex_unlock(&dev->lockdep_mutex);
-}
-
-static inline void nd_device_lock(struct device *dev)
-{
- device_lock(dev);
- debug_nvdimm_lock(dev);
-}
-
-static inline void nd_device_unlock(struct device *dev)
-{
- debug_nvdimm_unlock(dev);
- device_unlock(dev);
-}
-#else
-static inline void nd_device_lock(struct device *dev)
-{
- device_lock(dev);
-}
-
-static inline void nd_device_unlock(struct device *dev)
-{
- device_unlock(dev);
-}
-
-static inline void debug_nvdimm_lock(struct device *dev)
-{
-}
-
-static inline void debug_nvdimm_unlock(struct device *dev)
-{
-}
-#endif
#endif /* __ND_CORE_H__ */
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index c31e184bfa45..0e92ab4b3283 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -55,7 +55,7 @@ static ssize_t mode_store(struct device *dev,
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
ssize_t rc = 0;
- nd_device_lock(dev);
+ device_lock(dev);
nvdimm_bus_lock(dev);
if (dev->driver)
rc = -EBUSY;
@@ -77,7 +77,7 @@ static ssize_t mode_store(struct device *dev,
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
buf[len - 1] == '\n' ? "" : "\n");
nvdimm_bus_unlock(dev);
- nd_device_unlock(dev);
+ device_unlock(dev);
return rc ? rc : len;
}
@@ -123,14 +123,14 @@ static ssize_t align_store(struct device *dev,
unsigned long aligns[MAX_NVDIMM_ALIGN] = { [0] = 0, };
ssize_t rc;
- nd_device_lock(dev);
+ device_lock(dev);
nvdimm_bus_lock(dev);
rc = nd_size_select_store(dev, buf, &nd_pfn->align,
nd_pfn_supported_alignments(aligns));
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
buf[len - 1] == '\n' ? "" : "\n");
nvdimm_bus_unlock(dev);
- nd_device_unlock(dev);
+ device_unlock(dev);
return rc ? rc : len;
}
@@ -152,11 +152,11 @@ static ssize_t uuid_store(struct device *dev,
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
ssize_t rc;
- nd_device_lock(dev);
+ device_lock(dev);
rc = nd_uuid_store(dev, &nd_pfn->uuid, buf, len);
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
buf[len - 1] == '\n' ? "" : "\n");
- nd_device_unlock(dev);
+ device_unlock(dev);
return rc ? rc : len;
}
@@ -181,13 +181,13 @@ static ssize_t namespace_store(struct device *dev,
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
ssize_t rc;
- nd_device_lock(dev);
+ device_lock(dev);
nvdimm_bus_lock(dev);
rc = nd_namespace_store(dev, &nd_pfn->ndns, buf, len);
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
buf[len - 1] == '\n' ? "" : "\n");
nvdimm_bus_unlock(dev);
- nd_device_unlock(dev);
+ device_unlock(dev);
return rc;
}
@@ -199,7 +199,7 @@ static ssize_t resource_show(struct device *dev,
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
ssize_t rc;
- nd_device_lock(dev);
+ device_lock(dev);
if (dev->driver) {
struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
u64 offset = __le64_to_cpu(pfn_sb->dataoff);
@@ -213,7 +213,7 @@ static ssize_t resource_show(struct device *dev,
/* no address to convey if the pfn instance is disabled */
rc = -ENXIO;
}
- nd_device_unlock(dev);
+ device_unlock(dev);
return rc;
}
@@ -225,7 +225,7 @@ static ssize_t size_show(struct device *dev,
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
ssize_t rc;
- nd_device_lock(dev);
+ device_lock(dev);
if (dev->driver) {
struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
u64 offset = __le64_to_cpu(pfn_sb->dataoff);
@@ -241,7 +241,7 @@ static ssize_t size_show(struct device *dev,
/* no size to convey if the pfn instance is disabled */
rc = -ENXIO;
}
- nd_device_unlock(dev);
+ device_unlock(dev);
return rc;
}
@@ -291,6 +291,8 @@ bool is_nd_pfn(struct device *dev)
}
EXPORT_SYMBOL(is_nd_pfn);
+static struct lock_class_key nvdimm_pfn_key;
+
struct device *nd_pfn_devinit(struct nd_pfn *nd_pfn,
struct nd_namespace_common *ndns)
{
@@ -303,6 +305,7 @@ struct device *nd_pfn_devinit(struct nd_pfn *nd_pfn,
nd_pfn->align = nd_pfn_default_alignment();
dev = &nd_pfn->dev;
device_initialize(&nd_pfn->dev);
+ lockdep_set_class(&nd_pfn->dev.mutex, &nvdimm_pfn_key);
if (ndns && !__nd_attach_ndns(&nd_pfn->dev, ndns, &nd_pfn->ndns)) {
dev_dbg(&ndns->dev, "failed, already claimed by %s\n",
dev_name(ndns->claim));
@@ -346,7 +349,7 @@ struct device *nd_pfn_create(struct nd_region *nd_region)
nd_pfn = nd_pfn_alloc(nd_region);
dev = nd_pfn_devinit(nd_pfn, NULL);
- __nd_device_register(dev);
+ nd_device_register(dev);
return dev;
}
@@ -643,7 +646,7 @@ int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns)
nd_detach_ndns(pfn_dev, &nd_pfn->ndns);
put_device(pfn_dev);
} else
- __nd_device_register(pfn_dev);
+ nd_device_register(pfn_dev);
return rc;
}
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 58d95242a836..629d10fcf53b 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -45,9 +45,25 @@ static struct nd_region *to_region(struct pmem_device *pmem)
return to_nd_region(to_dev(pmem)->parent);
}
-static void hwpoison_clear(struct pmem_device *pmem,
- phys_addr_t phys, unsigned int len)
+static phys_addr_t to_phys(struct pmem_device *pmem, phys_addr_t offset)
{
+ return pmem->phys_addr + offset;
+}
+
+static sector_t to_sect(struct pmem_device *pmem, phys_addr_t offset)
+{
+ return (offset - pmem->data_offset) >> SECTOR_SHIFT;
+}
+
+static phys_addr_t to_offset(struct pmem_device *pmem, sector_t sector)
+{
+ return (sector << SECTOR_SHIFT) + pmem->data_offset;
+}
+
+static void pmem_mkpage_present(struct pmem_device *pmem, phys_addr_t offset,
+ unsigned int len)
+{
+ phys_addr_t phys = to_phys(pmem, offset);
unsigned long pfn_start, pfn_end, pfn;
/* only pmem in the linear map supports HWPoison */
@@ -69,33 +85,40 @@ static void hwpoison_clear(struct pmem_device *pmem,
}
}
-static blk_status_t pmem_clear_poison(struct pmem_device *pmem,
- phys_addr_t offset, unsigned int len)
+static void pmem_clear_bb(struct pmem_device *pmem, sector_t sector, long blks)
{
- struct device *dev = to_dev(pmem);
- sector_t sector;
- long cleared;
- blk_status_t rc = BLK_STS_OK;
+ if (blks == 0)
+ return;
+ badblocks_clear(&pmem->bb, sector, blks);
+ if (pmem->bb_state)
+ sysfs_notify_dirent(pmem->bb_state);
+}
- sector = (offset - pmem->data_offset) / 512;
+static long __pmem_clear_poison(struct pmem_device *pmem,
+ phys_addr_t offset, unsigned int len)
+{
+ phys_addr_t phys = to_phys(pmem, offset);
+ long cleared = nvdimm_clear_poison(to_dev(pmem), phys, len);
- cleared = nvdimm_clear_poison(dev, pmem->phys_addr + offset, len);
- if (cleared < len)
- rc = BLK_STS_IOERR;
- if (cleared > 0 && cleared / 512) {
- hwpoison_clear(pmem, pmem->phys_addr + offset, cleared);
- cleared /= 512;
- dev_dbg(dev, "%#llx clear %ld sector%s\n",
- (unsigned long long) sector, cleared,
- cleared > 1 ? "s" : "");
- badblocks_clear(&pmem->bb, sector, cleared);
- if (pmem->bb_state)
- sysfs_notify_dirent(pmem->bb_state);
+ if (cleared > 0) {
+ pmem_mkpage_present(pmem, offset, cleared);
+ arch_invalidate_pmem(pmem->virt_addr + offset, len);
}
+ return cleared;
+}
+
+static blk_status_t pmem_clear_poison(struct pmem_device *pmem,
+ phys_addr_t offset, unsigned int len)
+{
+ long cleared = __pmem_clear_poison(pmem, offset, len);
- arch_invalidate_pmem(pmem->virt_addr + offset, len);
+ if (cleared < 0)
+ return BLK_STS_IOERR;
- return rc;
+ pmem_clear_bb(pmem, to_sect(pmem, offset), cleared >> SECTOR_SHIFT);
+ if (cleared < len)
+ return BLK_STS_IOERR;
+ return BLK_STS_OK;
}
static void write_pmem(void *pmem_addr, struct page *page,
@@ -143,7 +166,7 @@ static blk_status_t pmem_do_read(struct pmem_device *pmem,
sector_t sector, unsigned int len)
{
blk_status_t rc;
- phys_addr_t pmem_off = sector * 512 + pmem->data_offset;
+ phys_addr_t pmem_off = to_offset(pmem, sector);
void *pmem_addr = pmem->virt_addr + pmem_off;
if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
@@ -158,36 +181,20 @@ static blk_status_t pmem_do_write(struct pmem_device *pmem,
struct page *page, unsigned int page_off,
sector_t sector, unsigned int len)
{
- blk_status_t rc = BLK_STS_OK;
- bool bad_pmem = false;
- phys_addr_t pmem_off = sector * 512 + pmem->data_offset;
+ phys_addr_t pmem_off = to_offset(pmem, sector);
void *pmem_addr = pmem->virt_addr + pmem_off;
- if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
- bad_pmem = true;
+ if (unlikely(is_bad_pmem(&pmem->bb, sector, len))) {
+ blk_status_t rc = pmem_clear_poison(pmem, pmem_off, len);
+
+ if (rc != BLK_STS_OK)
+ return rc;
+ }
- /*
- * Note that we write the data both before and after
- * clearing poison. The write before clear poison
- * handles situations where the latest written data is
- * preserved and the clear poison operation simply marks
- * the address range as valid without changing the data.
- * In this case application software can assume that an
- * interrupted write will either return the new good
- * data or an error.
- *
- * However, if pmem_clear_poison() leaves the data in an
- * indeterminate state we need to perform the write
- * after clear poison.
- */
flush_dcache_page(page);
write_pmem(pmem_addr, page, page_off, len);
- if (unlikely(bad_pmem)) {
- rc = pmem_clear_poison(pmem, pmem_off, len);
- write_pmem(pmem_addr, page, page_off, len);
- }
- return rc;
+ return BLK_STS_OK;
}
static void pmem_submit_bio(struct bio *bio)
@@ -255,24 +262,47 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector,
/* see "strong" declaration in tools/testing/nvdimm/pmem-dax.c */
__weak long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
- long nr_pages, void **kaddr, pfn_t *pfn)
+ long nr_pages, enum dax_access_mode mode, void **kaddr,
+ pfn_t *pfn)
{
resource_size_t offset = PFN_PHYS(pgoff) + pmem->data_offset;
-
- if (unlikely(is_bad_pmem(&pmem->bb, PFN_PHYS(pgoff) / 512,
- PFN_PHYS(nr_pages))))
- return -EIO;
+ sector_t sector = PFN_PHYS(pgoff) >> SECTOR_SHIFT;
+ unsigned int num = PFN_PHYS(nr_pages) >> SECTOR_SHIFT;
+ struct badblocks *bb = &pmem->bb;
+ sector_t first_bad;
+ int num_bad;
if (kaddr)
*kaddr = pmem->virt_addr + offset;
if (pfn)
*pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
+ if (bb->count &&
+ badblocks_check(bb, sector, num, &first_bad, &num_bad)) {
+ long actual_nr;
+
+ if (mode != DAX_RECOVERY_WRITE)
+ return -EIO;
+
+ /*
+ * Set the recovery stride is set to kernel page size because
+ * the underlying driver and firmware clear poison functions
+ * don't appear to handle large chunk(such as 2MiB) reliably.
+ */
+ actual_nr = PHYS_PFN(
+ PAGE_ALIGN((first_bad - sector) << SECTOR_SHIFT));
+ dev_dbg(pmem->bb.dev, "start sector(%llu), nr_pages(%ld), first_bad(%llu), actual_nr(%ld)\n",
+ sector, nr_pages, first_bad, actual_nr);
+ if (actual_nr)
+ return actual_nr;
+ return 1;
+ }
+
/*
- * If badblocks are present, limit known good range to the
- * requested range.
+ * If badblocks are present but not in the range, limit known good range
+ * to the requested range.
*/
- if (unlikely(pmem->bb.count))
+ if (bb->count)
return nr_pages;
return PHYS_PFN(pmem->size - pmem->pfn_pad - offset);
}
@@ -294,16 +324,73 @@ static int pmem_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
}
static long pmem_dax_direct_access(struct dax_device *dax_dev,
- pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn)
+ pgoff_t pgoff, long nr_pages, enum dax_access_mode mode,
+ void **kaddr, pfn_t *pfn)
+{
+ struct pmem_device *pmem = dax_get_private(dax_dev);
+
+ return __pmem_direct_access(pmem, pgoff, nr_pages, mode, kaddr, pfn);
+}
+
+/*
+ * The recovery write thread started out as a normal pwrite thread and
+ * when the filesystem was told about potential media error in the
+ * range, filesystem turns the normal pwrite to a dax_recovery_write.
+ *
+ * The recovery write consists of clearing media poison, clearing page
+ * HWPoison bit, reenable page-wide read-write permission, flush the
+ * caches and finally write. A competing pread thread will be held
+ * off during the recovery process since data read back might not be
+ * valid, and this is achieved by clearing the badblock records after
+ * the recovery write is complete. Competing recovery write threads
+ * are already serialized by writer lock held by dax_iomap_rw().
+ */
+static size_t pmem_recovery_write(struct dax_device *dax_dev, pgoff_t pgoff,
+ void *addr, size_t bytes, struct iov_iter *i)
{
struct pmem_device *pmem = dax_get_private(dax_dev);
+ size_t olen, len, off;
+ phys_addr_t pmem_off;
+ struct device *dev = pmem->bb.dev;
+ long cleared;
+
+ off = offset_in_page(addr);
+ len = PFN_PHYS(PFN_UP(off + bytes));
+ if (!is_bad_pmem(&pmem->bb, PFN_PHYS(pgoff) >> SECTOR_SHIFT, len))
+ return _copy_from_iter_flushcache(addr, bytes, i);
+
+ /*
+ * Not page-aligned range cannot be recovered. This should not
+ * happen unless something else went wrong.
+ */
+ if (off || !PAGE_ALIGNED(bytes)) {
+ dev_dbg(dev, "Found poison, but addr(%p) or bytes(%#zx) not page aligned\n",
+ addr, bytes);
+ return 0;
+ }
+
+ pmem_off = PFN_PHYS(pgoff) + pmem->data_offset;
+ cleared = __pmem_clear_poison(pmem, pmem_off, len);
+ if (cleared > 0 && cleared < len) {
+ dev_dbg(dev, "poison cleared only %ld out of %zu bytes\n",
+ cleared, len);
+ return 0;
+ }
+ if (cleared < 0) {
+ dev_dbg(dev, "poison clear failed: %ld\n", cleared);
+ return 0;
+ }
+
+ olen = _copy_from_iter_flushcache(addr, bytes, i);
+ pmem_clear_bb(pmem, to_sect(pmem, pmem_off), cleared >> SECTOR_SHIFT);
- return __pmem_direct_access(pmem, pgoff, nr_pages, kaddr, pfn);
+ return olen;
}
static const struct dax_operations pmem_dax_ops = {
.direct_access = pmem_dax_direct_access,
.zero_page_range = pmem_dax_zero_page_range,
+ .recovery_write = pmem_recovery_write,
};
static ssize_t write_cache_show(struct device *dev,
@@ -573,7 +660,7 @@ static void nd_pmem_remove(struct device *dev)
nvdimm_namespace_detach_btt(to_nd_btt(dev));
else {
/*
- * Note, this assumes nd_device_lock() context to not
+ * Note, this assumes device_lock() context to not
* race nd_pmem_notify()
*/
sysfs_put(pmem->bb_state);
diff --git a/drivers/nvdimm/pmem.h b/drivers/nvdimm/pmem.h
index 1f51a2361429..392b0b38acb9 100644
--- a/drivers/nvdimm/pmem.h
+++ b/drivers/nvdimm/pmem.h
@@ -8,6 +8,8 @@
#include <linux/pfn_t.h>
#include <linux/fs.h>
+enum dax_access_mode;
+
/* this definition is in it's own header for tools/testing/nvdimm to consume */
struct pmem_device {
/* One contiguous memory region per device */
@@ -28,7 +30,8 @@ struct pmem_device {
};
long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
- long nr_pages, void **kaddr, pfn_t *pfn);
+ long nr_pages, enum dax_access_mode mode, void **kaddr,
+ pfn_t *pfn);
#ifdef CONFIG_MEMORY_FAILURE
static inline bool test_and_clear_pmem_poison(struct page *page)
diff --git a/drivers/nvdimm/region.c b/drivers/nvdimm/region.c
index 188560b1c110..390123d293ea 100644
--- a/drivers/nvdimm/region.c
+++ b/drivers/nvdimm/region.c
@@ -95,7 +95,7 @@ static void nd_region_remove(struct device *dev)
nvdimm_bus_unlock(dev);
/*
- * Note, this assumes nd_device_lock() context to not race
+ * Note, this assumes device_lock() context to not race
* nd_region_notify()
*/
sysfs_put(nd_region->bb_state);
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index 0cb274c2b508..d976260eca7a 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -279,7 +279,7 @@ static ssize_t set_cookie_show(struct device *dev,
* the v1.1 namespace label cookie definition. To read all this
* data we need to wait for probing to settle.
*/
- nd_device_lock(dev);
+ device_lock(dev);
nvdimm_bus_lock(dev);
wait_nvdimm_bus_probe_idle(dev);
if (nd_region->ndr_mappings) {
@@ -296,7 +296,7 @@ static ssize_t set_cookie_show(struct device *dev,
}
}
nvdimm_bus_unlock(dev);
- nd_device_unlock(dev);
+ device_unlock(dev);
if (rc)
return rc;
@@ -353,12 +353,12 @@ static ssize_t available_size_show(struct device *dev,
* memory nvdimm_bus_lock() is dropped, but that's userspace's
* problem to not race itself.
*/
- nd_device_lock(dev);
+ device_lock(dev);
nvdimm_bus_lock(dev);
wait_nvdimm_bus_probe_idle(dev);
available = nd_region_available_dpa(nd_region);
nvdimm_bus_unlock(dev);
- nd_device_unlock(dev);
+ device_unlock(dev);
return sprintf(buf, "%llu\n", available);
}
@@ -370,12 +370,12 @@ static ssize_t max_available_extent_show(struct device *dev,
struct nd_region *nd_region = to_nd_region(dev);
unsigned long long available = 0;
- nd_device_lock(dev);
+ device_lock(dev);
nvdimm_bus_lock(dev);
wait_nvdimm_bus_probe_idle(dev);
available = nd_region_allocatable_dpa(nd_region);
nvdimm_bus_unlock(dev);
- nd_device_unlock(dev);
+ device_unlock(dev);
return sprintf(buf, "%llu\n", available);
}
@@ -549,12 +549,12 @@ static ssize_t region_badblocks_show(struct device *dev,
struct nd_region *nd_region = to_nd_region(dev);
ssize_t rc;
- nd_device_lock(dev);
+ device_lock(dev);
if (dev->driver)
rc = badblocks_show(&nd_region->bb, buf, 0);
else
rc = -ENXIO;
- nd_device_unlock(dev);
+ device_unlock(dev);
return rc;
}
@@ -949,6 +949,8 @@ static unsigned long default_align(struct nd_region *nd_region)
return align;
}
+static struct lock_class_key nvdimm_region_key;
+
static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
struct nd_region_desc *ndr_desc,
const struct device_type *dev_type, const char *caller)
@@ -1035,6 +1037,8 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
else
nd_region->flush = NULL;
+ device_initialize(dev);
+ lockdep_set_class(&dev->mutex, &nvdimm_region_key);
nd_device_register(dev);
return nd_region;
diff --git a/drivers/nvdimm/security.c b/drivers/nvdimm/security.c
index 4b80150e4afa..b5aa55c61461 100644
--- a/drivers/nvdimm/security.c
+++ b/drivers/nvdimm/security.c
@@ -379,11 +379,6 @@ static int security_overwrite(struct nvdimm *nvdimm, unsigned int keyid)
|| !nvdimm->sec.flags)
return -EOPNOTSUPP;
- if (dev->driver == NULL) {
- dev_dbg(dev, "Unable to overwrite while DIMM active.\n");
- return -EINVAL;
- }
-
rc = check_security_state(nvdimm);
if (rc)
return rc;
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 72f7c955c707..24165daee3c8 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1206,9 +1206,10 @@ static void nvme_keep_alive_work(struct work_struct *work)
nvme_init_request(rq, &ctrl->ka_cmd);
rq->timeout = ctrl->kato * HZ;
+ rq->end_io = nvme_keep_alive_end_io;
rq->end_io_data = ctrl;
rq->rq_flags |= RQF_QUIET;
- blk_execute_rq_nowait(rq, false, nvme_keep_alive_end_io);
+ blk_execute_rq_nowait(rq, false);
}
static void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
@@ -2227,8 +2228,16 @@ int nvme_enable_ctrl(struct nvme_ctrl *ctrl)
ctrl->ctrl_config |= (NVME_CTRL_PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT;
ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE;
ctrl->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
- ctrl->ctrl_config |= NVME_CC_ENABLE;
+ ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
+ if (ret)
+ return ret;
+ /* Flush write to device (required if transport is PCI) */
+ ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CC, &ctrl->ctrl_config);
+ if (ret)
+ return ret;
+
+ ctrl->ctrl_config |= NVME_CC_ENABLE;
ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
if (ret)
return ret;
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 7ae72c7a211b..3c778bb0c294 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -1899,6 +1899,24 @@ nvme_fc_ctrl_ioerr_work(struct work_struct *work)
nvme_fc_error_recovery(ctrl, "transport detected io error");
}
+/*
+ * nvme_fc_io_getuuid - Routine called to get the appid field
+ * associated with request by the lldd
+ * @req:IO request from nvme fc to driver
+ * Returns: UUID if there is an appid associated with VM or
+ * NULL if the user/libvirt has not set the appid to VM
+ */
+char *nvme_fc_io_getuuid(struct nvmefc_fcp_req *req)
+{
+ struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req);
+ struct request *rq = op->rq;
+
+ if (!IS_ENABLED(CONFIG_BLK_CGROUP_FC_APPID) || !rq->bio)
+ return NULL;
+ return blkcg_get_fc_appid(rq->bio);
+}
+EXPORT_SYMBOL_GPL(nvme_fc_io_getuuid);
+
static void
nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
{
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index 096b1b47d750..a2e89db1cd63 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -453,6 +453,7 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
blk_flags);
if (IS_ERR(req))
return PTR_ERR(req);
+ req->end_io = nvme_uring_cmd_end_io;
req->end_io_data = ioucmd;
/* to free bio on completion, as req->bio will be null at that time */
@@ -461,7 +462,7 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
pdu->meta_buffer = nvme_to_user_ptr(d.metadata);
pdu->meta_len = d.metadata_len;
- blk_execute_rq_nowait(req, 0, nvme_uring_cmd_end_io);
+ blk_execute_rq_nowait(req, false);
return -EIOCBQUEUED;
}
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 5a98a7de0964..48f4f6eb877b 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1438,9 +1438,10 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
}
nvme_init_request(abort_req, &cmd);
+ abort_req->end_io = abort_endio;
abort_req->end_io_data = NULL;
abort_req->rq_flags |= RQF_QUIET;
- blk_execute_rq_nowait(abort_req, false, abort_endio);
+ blk_execute_rq_nowait(abort_req, false);
/*
* The aborted req will be completed on receiving the abort req.
@@ -2485,12 +2486,15 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
return PTR_ERR(req);
nvme_init_request(req, &cmd);
+ if (opcode == nvme_admin_delete_cq)
+ req->end_io = nvme_del_cq_end;
+ else
+ req->end_io = nvme_del_queue_end;
req->end_io_data = nvmeq;
init_completion(&nvmeq->delete_done);
req->rq_flags |= RQF_QUIET;
- blk_execute_rq_nowait(req, false, opcode == nvme_admin_delete_cq ?
- nvme_del_cq_end : nvme_del_queue_end);
+ blk_execute_rq_nowait(req, false);
return 0;
}
@@ -3453,6 +3457,8 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
{ PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */
.driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
+ { PCI_DEVICE(0x1e4B, 0x1001), /* MAXIO MAP1001 */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1e4B, 0x1002), /* MAXIO MAP1002 */
.driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1e4B, 0x1202), /* MAXIO MAP1202 */
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index 5247c24538eb..b1f7efab3918 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -97,7 +97,7 @@ static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
id->sgls |= cpu_to_le32(1 << 20);
/*
- * When passsthru controller is setup using nvme-loop transport it will
+ * When passthru controller is setup using nvme-loop transport it will
* export the passthru ctrl subsysnqn (PCIe NVMe ctrl) and will fail in
* the nvme/host/core.c in the nvme_init_subsystem()->nvme_active_ctrl()
* code path with duplicate ctr subsynqn. In order to prevent that we
@@ -285,8 +285,9 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
req->p.rq = rq;
queue_work(nvmet_wq, &req->p.work);
} else {
+ rq->end_io = nvmet_passthru_req_done;
rq->end_io_data = req;
- blk_execute_rq_nowait(rq, false, nvmet_passthru_req_done);
+ blk_execute_rq_nowait(rq, false);
}
if (ns)
diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig
index 555aa77a574d..967d0084800e 100644
--- a/drivers/nvmem/Kconfig
+++ b/drivers/nvmem/Kconfig
@@ -304,6 +304,7 @@ config NVMEM_LAYERSCAPE_SFP
tristate "Layerscape SFP (Security Fuse Processor) support"
depends on ARCH_LAYERSCAPE || COMPILE_TEST
depends on HAS_IOMEM
+ select REGMAP_MMIO
help
This driver provides support to read the eFuses on Freescale
Layerscape SoC's. For example, the vendor provides a per part
@@ -324,4 +325,16 @@ config NVMEM_SUNPLUS_OCOTP
This driver can also be built as a module. If so, the module
will be called nvmem-sunplus-ocotp.
+config NVMEM_APPLE_EFUSES
+ tristate "Apple eFuse support"
+ depends on ARCH_APPLE || COMPILE_TEST
+ default ARCH_APPLE
+ help
+ Say y here to enable support for reading eFuses on Apple SoCs
+ such as the M1. These are e.g. used to store factory programmed
+ calibration data required for the PCIe or the USB-C PHY.
+
+ This driver can also be built as a module. If so, the module will
+ be called nvmem-apple-efuses.
+
endif
diff --git a/drivers/nvmem/Makefile b/drivers/nvmem/Makefile
index 891958e29d25..00e136a0a123 100644
--- a/drivers/nvmem/Makefile
+++ b/drivers/nvmem/Makefile
@@ -65,3 +65,5 @@ obj-$(CONFIG_NVMEM_LAYERSCAPE_SFP) += nvmem-layerscape-sfp.o
nvmem-layerscape-sfp-y := layerscape-sfp.o
obj-$(CONFIG_NVMEM_SUNPLUS_OCOTP) += nvmem_sunplus_ocotp.o
nvmem_sunplus_ocotp-y := sunplus-ocotp.o
+obj-$(CONFIG_NVMEM_APPLE_EFUSES) += nvmem-apple-efuses.o
+nvmem-apple-efuses-y := apple-efuses.o
diff --git a/drivers/nvmem/apple-efuses.c b/drivers/nvmem/apple-efuses.c
new file mode 100644
index 000000000000..9b7c87102104
--- /dev/null
+++ b/drivers/nvmem/apple-efuses.c
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Apple SoC eFuse driver
+ *
+ * Copyright (C) The Asahi Linux Contributors
+ */
+
+#include <linux/io.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/platform_device.h>
+
+struct apple_efuses_priv {
+ void __iomem *fuses;
+};
+
+static int apple_efuses_read(void *context, unsigned int offset, void *val,
+ size_t bytes)
+{
+ struct apple_efuses_priv *priv = context;
+ u32 *dst = val;
+
+ while (bytes >= sizeof(u32)) {
+ *dst++ = readl_relaxed(priv->fuses + offset);
+ bytes -= sizeof(u32);
+ offset += sizeof(u32);
+ }
+
+ return 0;
+}
+
+static int apple_efuses_probe(struct platform_device *pdev)
+{
+ struct apple_efuses_priv *priv;
+ struct resource *res;
+ struct nvmem_config config = {
+ .dev = &pdev->dev,
+ .read_only = true,
+ .reg_read = apple_efuses_read,
+ .stride = sizeof(u32),
+ .word_size = sizeof(u32),
+ .name = "apple_efuses_nvmem",
+ .id = NVMEM_DEVID_AUTO,
+ .root_only = true,
+ };
+
+ priv = devm_kzalloc(config.dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->fuses = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(priv->fuses))
+ return PTR_ERR(priv->fuses);
+
+ config.priv = priv;
+ config.size = resource_size(res);
+
+ return PTR_ERR_OR_ZERO(devm_nvmem_register(config.dev, &config));
+}
+
+static const struct of_device_id apple_efuses_of_match[] = {
+ { .compatible = "apple,efuses", },
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, apple_efuses_of_match);
+
+static struct platform_driver apple_efuses_driver = {
+ .driver = {
+ .name = "apple_efuses",
+ .of_match_table = apple_efuses_of_match,
+ },
+ .probe = apple_efuses_probe,
+};
+
+module_platform_driver(apple_efuses_driver);
+
+MODULE_AUTHOR("Sven Peter <sven@svenpeter.dev>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/nvmem/bcm-ocotp.c b/drivers/nvmem/bcm-ocotp.c
index a8097511582a..dfea96c52463 100644
--- a/drivers/nvmem/bcm-ocotp.c
+++ b/drivers/nvmem/bcm-ocotp.c
@@ -244,7 +244,7 @@ static const struct of_device_id bcm_otpc_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, bcm_otpc_dt_ids);
-static const struct acpi_device_id bcm_otpc_acpi_ids[] = {
+static const struct acpi_device_id bcm_otpc_acpi_ids[] __maybe_unused = {
{ .id = "BRCM0700", .driver_data = (kernel_ulong_t)&otp_map },
{ .id = "BRCM0701", .driver_data = (kernel_ulong_t)&otp_map_v2 },
{ /* sentinel */ }
diff --git a/drivers/nvmem/brcm_nvram.c b/drivers/nvmem/brcm_nvram.c
index 439f00b9eef6..450b927691c3 100644
--- a/drivers/nvmem/brcm_nvram.c
+++ b/drivers/nvmem/brcm_nvram.c
@@ -8,6 +8,7 @@
#include <linux/module.h>
#include <linux/nvmem-consumer.h>
#include <linux/nvmem-provider.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -72,6 +73,7 @@ static int brcm_nvram_add_cells(struct brcm_nvram *priv, uint8_t *data,
return -ENOMEM;
priv->cells[idx].offset = value - (char *)data;
priv->cells[idx].bytes = strlen(value);
+ priv->cells[idx].np = of_get_child_by_name(dev->of_node, priv->cells[idx].name);
}
return 0;
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index f58d9bc7aa08..1e3c754efd0d 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -467,6 +467,7 @@ static int nvmem_cell_info_to_nvmem_cell_entry_nodup(struct nvmem_device *nvmem,
cell->bit_offset = info->bit_offset;
cell->nbits = info->nbits;
+ cell->np = info->np;
if (cell->nbits)
cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset,
diff --git a/drivers/nvmem/layerscape-sfp.c b/drivers/nvmem/layerscape-sfp.c
index e591c1511e33..e2b424561949 100644
--- a/drivers/nvmem/layerscape-sfp.c
+++ b/drivers/nvmem/layerscape-sfp.c
@@ -13,15 +13,17 @@
#include <linux/nvmem-provider.h>
#include <linux/platform_device.h>
#include <linux/property.h>
+#include <linux/regmap.h>
#define LAYERSCAPE_SFP_OTP_OFFSET 0x0200
struct layerscape_sfp_priv {
- void __iomem *base;
+ struct regmap *regmap;
};
struct layerscape_sfp_data {
int size;
+ enum regmap_endian endian;
};
static int layerscape_sfp_read(void *context, unsigned int offset, void *val,
@@ -29,15 +31,16 @@ static int layerscape_sfp_read(void *context, unsigned int offset, void *val,
{
struct layerscape_sfp_priv *priv = context;
- memcpy_fromio(val, priv->base + LAYERSCAPE_SFP_OTP_OFFSET + offset,
- bytes);
-
- return 0;
+ return regmap_bulk_read(priv->regmap,
+ LAYERSCAPE_SFP_OTP_OFFSET + offset, val,
+ bytes / 4);
}
static struct nvmem_config layerscape_sfp_nvmem_config = {
.name = "fsl-sfp",
.reg_read = layerscape_sfp_read,
+ .word_size = 4,
+ .stride = 4,
};
static int layerscape_sfp_probe(struct platform_device *pdev)
@@ -45,16 +48,26 @@ static int layerscape_sfp_probe(struct platform_device *pdev)
const struct layerscape_sfp_data *data;
struct layerscape_sfp_priv *priv;
struct nvmem_device *nvmem;
+ struct regmap_config config = { 0 };
+ void __iomem *base;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- priv->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(priv->base))
- return PTR_ERR(priv->base);
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
data = device_get_match_data(&pdev->dev);
+ config.reg_bits = 32;
+ config.reg_stride = 4;
+ config.val_bits = 32;
+ config.val_format_endian = data->endian;
+ config.max_register = LAYERSCAPE_SFP_OTP_OFFSET + data->size - 4;
+ priv->regmap = devm_regmap_init_mmio(&pdev->dev, base, &config);
+ if (IS_ERR(priv->regmap))
+ return PTR_ERR(priv->regmap);
layerscape_sfp_nvmem_config.size = data->size;
layerscape_sfp_nvmem_config.dev = &pdev->dev;
@@ -65,11 +78,18 @@ static int layerscape_sfp_probe(struct platform_device *pdev)
return PTR_ERR_OR_ZERO(nvmem);
}
+static const struct layerscape_sfp_data ls1021a_data = {
+ .size = 0x88,
+ .endian = REGMAP_ENDIAN_BIG,
+};
+
static const struct layerscape_sfp_data ls1028a_data = {
.size = 0x88,
+ .endian = REGMAP_ENDIAN_LITTLE,
};
static const struct of_device_id layerscape_sfp_dt_ids[] = {
+ { .compatible = "fsl,ls1021a-sfp", .data = &ls1021a_data },
{ .compatible = "fsl,ls1028a-sfp", .data = &ls1028a_data },
{},
};
diff --git a/drivers/nvmem/qfprom.c b/drivers/nvmem/qfprom.c
index 162132c7dab9..c1e893c8a247 100644
--- a/drivers/nvmem/qfprom.c
+++ b/drivers/nvmem/qfprom.c
@@ -217,9 +217,8 @@ static int qfprom_enable_fuse_blowing(const struct qfprom_priv *priv,
goto err_clk_rate_set;
}
- ret = pm_runtime_get_sync(priv->dev);
+ ret = pm_runtime_resume_and_get(priv->dev);
if (ret < 0) {
- pm_runtime_put_noidle(priv->dev);
dev_err(priv->dev, "Failed to enable power-domain\n");
goto err_reg_enable;
}
diff --git a/drivers/nvmem/sunplus-ocotp.c b/drivers/nvmem/sunplus-ocotp.c
index 2dc59c22eb55..52b928a7a6d5 100644
--- a/drivers/nvmem/sunplus-ocotp.c
+++ b/drivers/nvmem/sunplus-ocotp.c
@@ -71,7 +71,7 @@ struct sp_ocotp_data {
int size;
};
-const struct sp_ocotp_data sp_otp_v0 = {
+static const struct sp_ocotp_data sp_otp_v0 = {
.size = QAC628_OTP_SIZE,
};
@@ -202,8 +202,6 @@ static int sp_ocotp_probe(struct platform_device *pdev)
(int)QAC628_OTP_NUM_BANKS, (int)OTP_WORDS_PER_BANK,
(int)OTP_WORD_SIZE, (int)QAC628_OTP_SIZE);
- dev_info(dev, "by Sunplus (C) 2020");
-
return 0;
}
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index 740407252298..84063eaebb91 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -456,103 +456,6 @@ struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
-/**
- * dev_pm_opp_find_level_exact() - search for an exact level
- * @dev: device for which we do this operation
- * @level: level to search for
- *
- * Return: Searches for exact match in the opp table and returns pointer to the
- * matching opp if found, else returns ERR_PTR in case of error and should
- * be handled using IS_ERR. Error return values can be:
- * EINVAL: for bad pointer
- * ERANGE: no match found for search
- * ENODEV: if device not found in list of registered devices
- *
- * The callers are required to call dev_pm_opp_put() for the returned OPP after
- * use.
- */
-struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev,
- unsigned int level)
-{
- struct opp_table *opp_table;
- struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
-
- opp_table = _find_opp_table(dev);
- if (IS_ERR(opp_table)) {
- int r = PTR_ERR(opp_table);
-
- dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r);
- return ERR_PTR(r);
- }
-
- mutex_lock(&opp_table->lock);
-
- list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
- if (temp_opp->level == level) {
- opp = temp_opp;
-
- /* Increment the reference count of OPP */
- dev_pm_opp_get(opp);
- break;
- }
- }
-
- mutex_unlock(&opp_table->lock);
- dev_pm_opp_put_opp_table(opp_table);
-
- return opp;
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_exact);
-
-/**
- * dev_pm_opp_find_level_ceil() - search for an rounded up level
- * @dev: device for which we do this operation
- * @level: level to search for
- *
- * Return: Searches for rounded up match in the opp table and returns pointer
- * to the matching opp if found, else returns ERR_PTR in case of error and
- * should be handled using IS_ERR. Error return values can be:
- * EINVAL: for bad pointer
- * ERANGE: no match found for search
- * ENODEV: if device not found in list of registered devices
- *
- * The callers are required to call dev_pm_opp_put() for the returned OPP after
- * use.
- */
-struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev,
- unsigned int *level)
-{
- struct opp_table *opp_table;
- struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
-
- opp_table = _find_opp_table(dev);
- if (IS_ERR(opp_table)) {
- int r = PTR_ERR(opp_table);
-
- dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r);
- return ERR_PTR(r);
- }
-
- mutex_lock(&opp_table->lock);
-
- list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
- if (temp_opp->available && temp_opp->level >= *level) {
- opp = temp_opp;
- *level = opp->level;
-
- /* Increment the reference count of OPP */
- dev_pm_opp_get(opp);
- break;
- }
- }
-
- mutex_unlock(&opp_table->lock);
- dev_pm_opp_put_opp_table(opp_table);
-
- return opp;
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_ceil);
-
static noinline struct dev_pm_opp *_find_freq_ceil(struct opp_table *opp_table,
unsigned long *freq)
{
@@ -729,6 +632,223 @@ struct dev_pm_opp *dev_pm_opp_find_freq_ceil_by_volt(struct device *dev,
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil_by_volt);
+/**
+ * dev_pm_opp_find_level_exact() - search for an exact level
+ * @dev: device for which we do this operation
+ * @level: level to search for
+ *
+ * Return: Searches for exact match in the opp table and returns pointer to the
+ * matching opp if found, else returns ERR_PTR in case of error and should
+ * be handled using IS_ERR. Error return values can be:
+ * EINVAL: for bad pointer
+ * ERANGE: no match found for search
+ * ENODEV: if device not found in list of registered devices
+ *
+ * The callers are required to call dev_pm_opp_put() for the returned OPP after
+ * use.
+ */
+struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev,
+ unsigned int level)
+{
+ struct opp_table *opp_table;
+ struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
+
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table)) {
+ int r = PTR_ERR(opp_table);
+
+ dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r);
+ return ERR_PTR(r);
+ }
+
+ mutex_lock(&opp_table->lock);
+
+ list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
+ if (temp_opp->level == level) {
+ opp = temp_opp;
+
+ /* Increment the reference count of OPP */
+ dev_pm_opp_get(opp);
+ break;
+ }
+ }
+
+ mutex_unlock(&opp_table->lock);
+ dev_pm_opp_put_opp_table(opp_table);
+
+ return opp;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_exact);
+
+/**
+ * dev_pm_opp_find_level_ceil() - search for an rounded up level
+ * @dev: device for which we do this operation
+ * @level: level to search for
+ *
+ * Return: Searches for rounded up match in the opp table and returns pointer
+ * to the matching opp if found, else returns ERR_PTR in case of error and
+ * should be handled using IS_ERR. Error return values can be:
+ * EINVAL: for bad pointer
+ * ERANGE: no match found for search
+ * ENODEV: if device not found in list of registered devices
+ *
+ * The callers are required to call dev_pm_opp_put() for the returned OPP after
+ * use.
+ */
+struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev,
+ unsigned int *level)
+{
+ struct opp_table *opp_table;
+ struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
+
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table)) {
+ int r = PTR_ERR(opp_table);
+
+ dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r);
+ return ERR_PTR(r);
+ }
+
+ mutex_lock(&opp_table->lock);
+
+ list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
+ if (temp_opp->available && temp_opp->level >= *level) {
+ opp = temp_opp;
+ *level = opp->level;
+
+ /* Increment the reference count of OPP */
+ dev_pm_opp_get(opp);
+ break;
+ }
+ }
+
+ mutex_unlock(&opp_table->lock);
+ dev_pm_opp_put_opp_table(opp_table);
+
+ return opp;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_ceil);
+
+/**
+ * dev_pm_opp_find_bw_ceil() - Search for a rounded ceil bandwidth
+ * @dev: device for which we do this operation
+ * @freq: start bandwidth
+ * @index: which bandwidth to compare, in case of OPPs with several values
+ *
+ * Search for the matching floor *available* OPP from a starting bandwidth
+ * for a device.
+ *
+ * Return: matching *opp and refreshes *bw accordingly, else returns
+ * ERR_PTR in case of error and should be handled using IS_ERR. Error return
+ * values can be:
+ * EINVAL: for bad pointer
+ * ERANGE: no match found for search
+ * ENODEV: if device not found in list of registered devices
+ *
+ * The callers are required to call dev_pm_opp_put() for the returned OPP after
+ * use.
+ */
+struct dev_pm_opp *dev_pm_opp_find_bw_ceil(struct device *dev,
+ unsigned int *bw, int index)
+{
+ struct opp_table *opp_table;
+ struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
+
+ if (!dev || !bw) {
+ dev_err(dev, "%s: Invalid argument bw=%p\n", __func__, bw);
+ return ERR_PTR(-EINVAL);
+ }
+
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table))
+ return ERR_CAST(opp_table);
+
+ if (index >= opp_table->path_count)
+ return ERR_PTR(-EINVAL);
+
+ mutex_lock(&opp_table->lock);
+
+ list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
+ if (temp_opp->available && temp_opp->bandwidth) {
+ if (temp_opp->bandwidth[index].peak >= *bw) {
+ opp = temp_opp;
+ *bw = opp->bandwidth[index].peak;
+
+ /* Increment the reference count of OPP */
+ dev_pm_opp_get(opp);
+ break;
+ }
+ }
+ }
+
+ mutex_unlock(&opp_table->lock);
+ dev_pm_opp_put_opp_table(opp_table);
+
+ return opp;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_find_bw_ceil);
+
+/**
+ * dev_pm_opp_find_bw_floor() - Search for a rounded floor bandwidth
+ * @dev: device for which we do this operation
+ * @freq: start bandwidth
+ * @index: which bandwidth to compare, in case of OPPs with several values
+ *
+ * Search for the matching floor *available* OPP from a starting bandwidth
+ * for a device.
+ *
+ * Return: matching *opp and refreshes *bw accordingly, else returns
+ * ERR_PTR in case of error and should be handled using IS_ERR. Error return
+ * values can be:
+ * EINVAL: for bad pointer
+ * ERANGE: no match found for search
+ * ENODEV: if device not found in list of registered devices
+ *
+ * The callers are required to call dev_pm_opp_put() for the returned OPP after
+ * use.
+ */
+struct dev_pm_opp *dev_pm_opp_find_bw_floor(struct device *dev,
+ unsigned int *bw, int index)
+{
+ struct opp_table *opp_table;
+ struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
+
+ if (!dev || !bw) {
+ dev_err(dev, "%s: Invalid argument bw=%p\n", __func__, bw);
+ return ERR_PTR(-EINVAL);
+ }
+
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table))
+ return ERR_CAST(opp_table);
+
+ if (index >= opp_table->path_count)
+ return ERR_PTR(-EINVAL);
+
+ mutex_lock(&opp_table->lock);
+
+ list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
+ if (temp_opp->available && temp_opp->bandwidth) {
+ /* go to the next node, before choosing prev */
+ if (temp_opp->bandwidth[index].peak > *bw)
+ break;
+ opp = temp_opp;
+ }
+ }
+
+ /* Increment the reference count of OPP */
+ if (!IS_ERR(opp))
+ dev_pm_opp_get(opp);
+ mutex_unlock(&opp_table->lock);
+ dev_pm_opp_put_opp_table(opp_table);
+
+ if (!IS_ERR(opp))
+ *bw = opp->bandwidth[index].peak;
+
+ return opp;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_find_bw_floor);
+
static int _set_opp_voltage(struct device *dev, struct regulator *reg,
struct dev_pm_opp_supply *supply)
{
@@ -1486,9 +1606,8 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_put);
*/
void dev_pm_opp_remove(struct device *dev, unsigned long freq)
{
- struct dev_pm_opp *opp;
+ struct dev_pm_opp *opp = NULL, *iter;
struct opp_table *opp_table;
- bool found = false;
opp_table = _find_opp_table(dev);
if (IS_ERR(opp_table))
@@ -1496,16 +1615,16 @@ void dev_pm_opp_remove(struct device *dev, unsigned long freq)
mutex_lock(&opp_table->lock);
- list_for_each_entry(opp, &opp_table->opp_list, node) {
- if (opp->rate == freq) {
- found = true;
+ list_for_each_entry(iter, &opp_table->opp_list, node) {
+ if (iter->rate == freq) {
+ opp = iter;
break;
}
}
mutex_unlock(&opp_table->lock);
- if (found) {
+ if (opp) {
dev_pm_opp_put(opp);
/* Drop the reference taken by dev_pm_opp_add() */
@@ -2019,10 +2138,9 @@ struct opp_table *dev_pm_opp_set_regulators(struct device *dev,
for (i = 0; i < count; i++) {
reg = regulator_get_optional(dev, names[i]);
if (IS_ERR(reg)) {
- ret = PTR_ERR(reg);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "%s: no regulator (%s) found: %d\n",
- __func__, names[i], ret);
+ ret = dev_err_probe(dev, PTR_ERR(reg),
+ "%s: no regulator (%s) found\n",
+ __func__, names[i]);
goto free_regulators;
}
@@ -2168,11 +2286,8 @@ struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char *name)
/* Find clk for the device */
opp_table->clk = clk_get(dev, name);
if (IS_ERR(opp_table->clk)) {
- ret = PTR_ERR(opp_table->clk);
- if (ret != -EPROBE_DEFER) {
- dev_err(dev, "%s: Couldn't find clock: %d\n", __func__,
- ret);
- }
+ ret = dev_err_probe(dev, PTR_ERR(opp_table->clk),
+ "%s: Couldn't find clock\n", __func__);
goto err;
}
diff --git a/drivers/opp/debugfs.c b/drivers/opp/debugfs.c
index 3fcc1f97f2d1..1b6e5c55c3ed 100644
--- a/drivers/opp/debugfs.c
+++ b/drivers/opp/debugfs.c
@@ -195,14 +195,18 @@ void opp_debug_register(struct opp_device *opp_dev, struct opp_table *opp_table)
static void opp_migrate_dentry(struct opp_device *opp_dev,
struct opp_table *opp_table)
{
- struct opp_device *new_dev;
+ struct opp_device *new_dev = NULL, *iter;
const struct device *dev;
struct dentry *dentry;
/* Look for next opp-dev */
- list_for_each_entry(new_dev, &opp_table->dev_list, node)
- if (new_dev != opp_dev)
+ list_for_each_entry(iter, &opp_table->dev_list, node)
+ if (iter != opp_dev) {
+ new_dev = iter;
break;
+ }
+
+ BUG_ON(!new_dev);
/* new_dev is guaranteed to be valid here */
dev = new_dev->dev;
diff --git a/drivers/opp/of.c b/drivers/opp/of.c
index 485ea980bde7..30394929d700 100644
--- a/drivers/opp/of.c
+++ b/drivers/opp/of.c
@@ -437,11 +437,11 @@ static int _bandwidth_supported(struct device *dev, struct opp_table *opp_table)
/* Checking only first OPP is sufficient */
np = of_get_next_available_child(opp_np, NULL);
+ of_node_put(opp_np);
if (!np) {
dev_err(dev, "OPP table empty\n");
return -EINVAL;
}
- of_node_put(opp_np);
prop = of_find_property(np, "opp-peak-kBps", NULL);
of_node_put(np);
diff --git a/drivers/pci/controller/cadence/pci-j721e.c b/drivers/pci/controller/cadence/pci-j721e.c
index 768d33f9ebc8..a82f845cc4b5 100644
--- a/drivers/pci/controller/cadence/pci-j721e.c
+++ b/drivers/pci/controller/cadence/pci-j721e.c
@@ -69,6 +69,7 @@ struct j721e_pcie_data {
enum j721e_pcie_mode mode;
unsigned int quirk_retrain_flag:1;
unsigned int quirk_detect_quiet_flag:1;
+ unsigned int quirk_disable_flr:1;
u32 linkdown_irq_regfield;
unsigned int byte_access_allowed:1;
};
@@ -307,6 +308,7 @@ static const struct j721e_pcie_data j7200_pcie_rc_data = {
static const struct j721e_pcie_data j7200_pcie_ep_data = {
.mode = PCI_MODE_EP,
.quirk_detect_quiet_flag = true,
+ .quirk_disable_flr = true,
};
static const struct j721e_pcie_data am64_pcie_rc_data = {
@@ -405,6 +407,7 @@ static int j721e_pcie_probe(struct platform_device *pdev)
return -ENOMEM;
ep->quirk_detect_quiet_flag = data->quirk_detect_quiet_flag;
+ ep->quirk_disable_flr = data->quirk_disable_flr;
cdns_pcie = &ep->pcie;
cdns_pcie->dev = dev;
diff --git a/drivers/pci/controller/cadence/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c
index 88e05b9c2e5b..b8b655d4047e 100644
--- a/drivers/pci/controller/cadence/pcie-cadence-ep.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c
@@ -187,8 +187,7 @@ static int cdns_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, u8 vfn,
struct cdns_pcie *pcie = &ep->pcie;
u32 r;
- r = find_first_zero_bit(&ep->ob_region_map,
- sizeof(ep->ob_region_map) * BITS_PER_LONG);
+ r = find_first_zero_bit(&ep->ob_region_map, BITS_PER_LONG);
if (r >= ep->max_regions - 1) {
dev_err(&epc->dev, "no free outbound region\n");
return -EINVAL;
@@ -565,7 +564,8 @@ static int cdns_pcie_ep_start(struct pci_epc *epc)
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
struct device *dev = pcie->dev;
- int ret;
+ int max_epfs = sizeof(epc->function_num_map) * 8;
+ int ret, value, epf;
/*
* BIT(0) is hardwired to 1, hence function 0 is always enabled
@@ -573,6 +573,21 @@ static int cdns_pcie_ep_start(struct pci_epc *epc)
*/
cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, epc->function_num_map);
+ if (ep->quirk_disable_flr) {
+ for (epf = 0; epf < max_epfs; epf++) {
+ if (!(epc->function_num_map & BIT(epf)))
+ continue;
+
+ value = cdns_pcie_ep_fn_readl(pcie, epf,
+ CDNS_PCIE_EP_FUNC_DEV_CAP_OFFSET +
+ PCI_EXP_DEVCAP);
+ value &= ~PCI_EXP_DEVCAP_FLR;
+ cdns_pcie_ep_fn_writel(pcie, epf,
+ CDNS_PCIE_EP_FUNC_DEV_CAP_OFFSET +
+ PCI_EXP_DEVCAP, value);
+ }
+ }
+
ret = cdns_pcie_start_link(pcie);
if (ret) {
dev_err(dev, "Failed to start link\n");
diff --git a/drivers/pci/controller/cadence/pcie-cadence-host.c b/drivers/pci/controller/cadence/pcie-cadence-host.c
index fb96d37a135c..940c7dd701d6 100644
--- a/drivers/pci/controller/cadence/pcie-cadence-host.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-host.c
@@ -123,6 +123,14 @@ static int cdns_pcie_retrain(struct cdns_pcie *pcie)
return ret;
}
+static void cdns_pcie_host_enable_ptm_response(struct cdns_pcie *pcie)
+{
+ u32 val;
+
+ val = cdns_pcie_readl(pcie, CDNS_PCIE_LM_PTM_CTRL);
+ cdns_pcie_writel(pcie, CDNS_PCIE_LM_PTM_CTRL, val | CDNS_PCIE_LM_TPM_CTRL_PTMRSEN);
+}
+
static int cdns_pcie_host_start_link(struct cdns_pcie_rc *rc)
{
struct cdns_pcie *pcie = &rc->pcie;
@@ -501,6 +509,8 @@ int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
if (rc->quirk_detect_quiet_flag)
cdns_pcie_detect_quiet_min_delay_set(&rc->pcie);
+ cdns_pcie_host_enable_ptm_response(pcie);
+
ret = cdns_pcie_start_link(pcie);
if (ret) {
dev_err(dev, "Failed to start link\n");
diff --git a/drivers/pci/controller/cadence/pcie-cadence.h b/drivers/pci/controller/cadence/pcie-cadence.h
index c8a27b6290ce..190786e47df9 100644
--- a/drivers/pci/controller/cadence/pcie-cadence.h
+++ b/drivers/pci/controller/cadence/pcie-cadence.h
@@ -116,6 +116,10 @@
#define LM_RC_BAR_CFG_APERTURE(bar, aperture) \
(((aperture) - 2) << ((bar) * 8))
+/* PTM Control Register */
+#define CDNS_PCIE_LM_PTM_CTRL (CDNS_PCIE_LM_BASE + 0x0da8)
+#define CDNS_PCIE_LM_TPM_CTRL_PTMRSEN BIT(17)
+
/*
* Endpoint Function Registers (PCI configuration space for endpoint functions)
*/
@@ -123,6 +127,7 @@
#define CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET 0x90
#define CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET 0xb0
+#define CDNS_PCIE_EP_FUNC_DEV_CAP_OFFSET 0xc0
#define CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET 0x200
/*
@@ -357,6 +362,7 @@ struct cdns_pcie_epf {
* minimize time between read and write
* @epf: Structure to hold info about endpoint function
* @quirk_detect_quiet_flag: LTSSM Detect Quiet min delay set as quirk
+ * @quirk_disable_flr: Disable FLR (Function Level Reset) quirk flag
*/
struct cdns_pcie_ep {
struct cdns_pcie pcie;
@@ -372,6 +378,7 @@ struct cdns_pcie_ep {
spinlock_t lock;
struct cdns_pcie_epf *epf;
unsigned int quirk_detect_quiet_flag:1;
+ unsigned int quirk_disable_flr:1;
};
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index 6619e3caffe2..7a285fb0f619 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -408,6 +408,11 @@ static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
dev_err(dev, "failed to disable vpcie regulator: %d\n",
ret);
}
+
+ /* Some boards don't have PCIe reset GPIO. */
+ if (gpio_is_valid(imx6_pcie->reset_gpio))
+ gpio_set_value_cansleep(imx6_pcie->reset_gpio,
+ imx6_pcie->gpio_active_high);
}
static unsigned int imx6_pcie_grp_offset(const struct imx6_pcie *imx6_pcie)
@@ -540,15 +545,6 @@ static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
/* allow the clocks to stabilize */
usleep_range(200, 500);
- /* Some boards don't have PCIe reset GPIO. */
- if (gpio_is_valid(imx6_pcie->reset_gpio)) {
- gpio_set_value_cansleep(imx6_pcie->reset_gpio,
- imx6_pcie->gpio_active_high);
- msleep(100);
- gpio_set_value_cansleep(imx6_pcie->reset_gpio,
- !imx6_pcie->gpio_active_high);
- }
-
switch (imx6_pcie->drvdata->variant) {
case IMX8MQ:
reset_control_deassert(imx6_pcie->pciephy_reset);
@@ -595,6 +591,15 @@ static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
break;
}
+ /* Some boards don't have PCIe reset GPIO. */
+ if (gpio_is_valid(imx6_pcie->reset_gpio)) {
+ msleep(100);
+ gpio_set_value_cansleep(imx6_pcie->reset_gpio,
+ !imx6_pcie->gpio_active_high);
+ /* Wait for 100ms after PERST# deassertion (PCIe r5.0, 6.6.1) */
+ msleep(100);
+ }
+
return;
err_ref_clk:
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index 2fa86f32d964..9979302532b7 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -396,7 +396,8 @@ int dw_pcie_host_init(struct pcie_port *pp)
sizeof(pp->msi_msg),
DMA_FROM_DEVICE,
DMA_ATTR_SKIP_CPU_SYNC);
- if (dma_mapping_error(pci->dev, pp->msi_data)) {
+ ret = dma_mapping_error(pci->dev, pp->msi_data);
+ if (ret) {
dev_err(pci->dev, "Failed to map MSI data\n");
pp->msi_data = 0;
goto err_free_msi;
diff --git a/drivers/pci/controller/dwc/pcie-dw-rockchip.c b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
index c9b341e55cbb..8c5bb9d7cc36 100644
--- a/drivers/pci/controller/dwc/pcie-dw-rockchip.c
+++ b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
@@ -10,9 +10,12 @@
#include <linux/clk.h>
#include <linux/gpio/consumer.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of_device.h>
+#include <linux/of_irq.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
@@ -26,6 +29,7 @@
*/
#define HIWORD_UPDATE(mask, val) (((mask) << 16) | (val))
#define HIWORD_UPDATE_BIT(val) HIWORD_UPDATE(val, val)
+#define HIWORD_DISABLE_BIT(val) HIWORD_UPDATE(val, ~val)
#define to_rockchip_pcie(x) dev_get_drvdata((x)->dev)
@@ -36,10 +40,12 @@
#define PCIE_LINKUP (PCIE_SMLH_LINKUP | PCIE_RDLH_LINKUP)
#define PCIE_L0S_ENTRY 0x11
#define PCIE_CLIENT_GENERAL_CONTROL 0x0
+#define PCIE_CLIENT_INTR_STATUS_LEGACY 0x8
+#define PCIE_CLIENT_INTR_MASK_LEGACY 0x1c
#define PCIE_CLIENT_GENERAL_DEBUG 0x104
-#define PCIE_CLIENT_HOT_RESET_CTRL 0x180
+#define PCIE_CLIENT_HOT_RESET_CTRL 0x180
#define PCIE_CLIENT_LTSSM_STATUS 0x300
-#define PCIE_LTSSM_ENABLE_ENHANCE BIT(4)
+#define PCIE_LTSSM_ENABLE_ENHANCE BIT(4)
#define PCIE_LTSSM_STATUS_MASK GENMASK(5, 0)
struct rockchip_pcie {
@@ -51,6 +57,7 @@ struct rockchip_pcie {
struct reset_control *rst;
struct gpio_desc *rst_gpio;
struct regulator *vpcie3v3;
+ struct irq_domain *irq_domain;
};
static int rockchip_pcie_readl_apb(struct rockchip_pcie *rockchip,
@@ -65,6 +72,78 @@ static void rockchip_pcie_writel_apb(struct rockchip_pcie *rockchip,
writel_relaxed(val, rockchip->apb_base + reg);
}
+static void rockchip_pcie_legacy_int_handler(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct rockchip_pcie *rockchip = irq_desc_get_handler_data(desc);
+ unsigned long reg, hwirq;
+
+ chained_irq_enter(chip, desc);
+
+ reg = rockchip_pcie_readl_apb(rockchip, PCIE_CLIENT_INTR_STATUS_LEGACY);
+
+ for_each_set_bit(hwirq, &reg, 4)
+ generic_handle_domain_irq(rockchip->irq_domain, hwirq);
+
+ chained_irq_exit(chip, desc);
+}
+
+static void rockchip_intx_mask(struct irq_data *data)
+{
+ rockchip_pcie_writel_apb(irq_data_get_irq_chip_data(data),
+ HIWORD_UPDATE_BIT(BIT(data->hwirq)),
+ PCIE_CLIENT_INTR_MASK_LEGACY);
+};
+
+static void rockchip_intx_unmask(struct irq_data *data)
+{
+ rockchip_pcie_writel_apb(irq_data_get_irq_chip_data(data),
+ HIWORD_DISABLE_BIT(BIT(data->hwirq)),
+ PCIE_CLIENT_INTR_MASK_LEGACY);
+};
+
+static struct irq_chip rockchip_intx_irq_chip = {
+ .name = "INTx",
+ .irq_mask = rockchip_intx_mask,
+ .irq_unmask = rockchip_intx_unmask,
+ .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND,
+};
+
+static int rockchip_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &rockchip_intx_irq_chip, handle_level_irq);
+ irq_set_chip_data(irq, domain->host_data);
+
+ return 0;
+}
+
+static const struct irq_domain_ops intx_domain_ops = {
+ .map = rockchip_pcie_intx_map,
+};
+
+static int rockchip_pcie_init_irq_domain(struct rockchip_pcie *rockchip)
+{
+ struct device *dev = rockchip->pci.dev;
+ struct device_node *intc;
+
+ intc = of_get_child_by_name(dev->of_node, "legacy-interrupt-controller");
+ if (!intc) {
+ dev_err(dev, "missing child interrupt-controller node\n");
+ return -EINVAL;
+ }
+
+ rockchip->irq_domain = irq_domain_add_linear(intc, PCI_NUM_INTX,
+ &intx_domain_ops, rockchip);
+ of_node_put(intc);
+ if (!rockchip->irq_domain) {
+ dev_err(dev, "failed to get a INTx IRQ domain\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static void rockchip_pcie_enable_ltssm(struct rockchip_pcie *rockchip)
{
rockchip_pcie_writel_apb(rockchip, PCIE_CLIENT_ENABLE_LTSSM,
@@ -111,7 +190,20 @@ static int rockchip_pcie_host_init(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct rockchip_pcie *rockchip = to_rockchip_pcie(pci);
+ struct device *dev = rockchip->pci.dev;
u32 val = HIWORD_UPDATE_BIT(PCIE_LTSSM_ENABLE_ENHANCE);
+ int irq, ret;
+
+ irq = of_irq_get_byname(dev->of_node, "legacy");
+ if (irq < 0)
+ return irq;
+
+ ret = rockchip_pcie_init_irq_domain(rockchip);
+ if (ret < 0)
+ dev_err(dev, "failed to init irq domain\n");
+
+ irq_set_chained_handler_and_data(irq, rockchip_pcie_legacy_int_handler,
+ rockchip);
/* LTSSM enable control mode */
rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_HOT_RESET_CTRL);
@@ -152,6 +244,11 @@ static int rockchip_pcie_resource_get(struct platform_device *pdev,
if (IS_ERR(rockchip->rst_gpio))
return PTR_ERR(rockchip->rst_gpio);
+ rockchip->rst = devm_reset_control_array_get_exclusive(&pdev->dev);
+ if (IS_ERR(rockchip->rst))
+ return dev_err_probe(&pdev->dev, PTR_ERR(rockchip->rst),
+ "failed to get reset lines\n");
+
return 0;
}
@@ -182,18 +279,6 @@ static void rockchip_pcie_phy_deinit(struct rockchip_pcie *rockchip)
phy_power_off(rockchip->phy);
}
-static int rockchip_pcie_reset_control_release(struct rockchip_pcie *rockchip)
-{
- struct device *dev = rockchip->pci.dev;
-
- rockchip->rst = devm_reset_control_array_get_exclusive(dev);
- if (IS_ERR(rockchip->rst))
- return dev_err_probe(dev, PTR_ERR(rockchip->rst),
- "failed to get reset lines\n");
-
- return reset_control_deassert(rockchip->rst);
-}
-
static const struct dw_pcie_ops dw_pcie_ops = {
.link_up = rockchip_pcie_link_up,
.start_link = rockchip_pcie_start_link,
@@ -222,6 +307,10 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
if (ret)
return ret;
+ ret = reset_control_assert(rockchip->rst);
+ if (ret)
+ return ret;
+
/* DON'T MOVE ME: must be enable before PHY init */
rockchip->vpcie3v3 = devm_regulator_get_optional(dev, "vpcie3v3");
if (IS_ERR(rockchip->vpcie3v3)) {
@@ -241,7 +330,7 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
if (ret)
goto disable_regulator;
- ret = rockchip_pcie_reset_control_release(rockchip);
+ ret = reset_control_deassert(rockchip->rst);
if (ret)
goto deinit_phy;
diff --git a/drivers/pci/controller/dwc/pcie-qcom-ep.c b/drivers/pci/controller/dwc/pcie-qcom-ep.c
index 6ce8eddf3a37..ec99116ad05c 100644
--- a/drivers/pci/controller/dwc/pcie-qcom-ep.c
+++ b/drivers/pci/controller/dwc/pcie-qcom-ep.c
@@ -223,11 +223,8 @@ static void qcom_pcie_dw_stop_link(struct dw_pcie *pci)
disable_irq(pcie_ep->perst_irq);
}
-static int qcom_pcie_perst_deassert(struct dw_pcie *pci)
+static int qcom_pcie_enable_resources(struct qcom_pcie_ep *pcie_ep)
{
- struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci);
- struct device *dev = pci->dev;
- u32 val, offset;
int ret;
ret = clk_bulk_prepare_enable(ARRAY_SIZE(qcom_pcie_ep_clks),
@@ -247,6 +244,38 @@ static int qcom_pcie_perst_deassert(struct dw_pcie *pci)
if (ret)
goto err_phy_exit;
+ return 0;
+
+err_phy_exit:
+ phy_exit(pcie_ep->phy);
+err_disable_clk:
+ clk_bulk_disable_unprepare(ARRAY_SIZE(qcom_pcie_ep_clks),
+ qcom_pcie_ep_clks);
+
+ return ret;
+}
+
+static void qcom_pcie_disable_resources(struct qcom_pcie_ep *pcie_ep)
+{
+ phy_power_off(pcie_ep->phy);
+ phy_exit(pcie_ep->phy);
+ clk_bulk_disable_unprepare(ARRAY_SIZE(qcom_pcie_ep_clks),
+ qcom_pcie_ep_clks);
+}
+
+static int qcom_pcie_perst_deassert(struct dw_pcie *pci)
+{
+ struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci);
+ struct device *dev = pci->dev;
+ u32 val, offset;
+ int ret;
+
+ ret = qcom_pcie_enable_resources(pcie_ep);
+ if (ret) {
+ dev_err(dev, "Failed to enable resources: %d\n", ret);
+ return ret;
+ }
+
/* Assert WAKE# to RC to indicate device is ready */
gpiod_set_value_cansleep(pcie_ep->wake, 1);
usleep_range(WAKE_DELAY_US, WAKE_DELAY_US + 500);
@@ -335,7 +364,7 @@ static int qcom_pcie_perst_deassert(struct dw_pcie *pci)
ret = dw_pcie_ep_init_complete(&pcie_ep->pci.ep);
if (ret) {
dev_err(dev, "Failed to complete initialization: %d\n", ret);
- goto err_phy_power_off;
+ goto err_disable_resources;
}
/*
@@ -355,13 +384,8 @@ static int qcom_pcie_perst_deassert(struct dw_pcie *pci)
return 0;
-err_phy_power_off:
- phy_power_off(pcie_ep->phy);
-err_phy_exit:
- phy_exit(pcie_ep->phy);
-err_disable_clk:
- clk_bulk_disable_unprepare(ARRAY_SIZE(qcom_pcie_ep_clks),
- qcom_pcie_ep_clks);
+err_disable_resources:
+ qcom_pcie_disable_resources(pcie_ep);
return ret;
}
@@ -376,10 +400,7 @@ static void qcom_pcie_perst_assert(struct dw_pcie *pci)
return;
}
- phy_power_off(pcie_ep->phy);
- phy_exit(pcie_ep->phy);
- clk_bulk_disable_unprepare(ARRAY_SIZE(qcom_pcie_ep_clks),
- qcom_pcie_ep_clks);
+ qcom_pcie_disable_resources(pcie_ep);
pcie_ep->link_status = QCOM_PCIE_EP_LINK_DISABLED;
}
@@ -643,43 +664,26 @@ static int qcom_pcie_ep_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = clk_bulk_prepare_enable(ARRAY_SIZE(qcom_pcie_ep_clks),
- qcom_pcie_ep_clks);
- if (ret)
+ ret = qcom_pcie_enable_resources(pcie_ep);
+ if (ret) {
+ dev_err(dev, "Failed to enable resources: %d\n", ret);
return ret;
-
- ret = qcom_pcie_ep_core_reset(pcie_ep);
- if (ret)
- goto err_disable_clk;
-
- ret = phy_init(pcie_ep->phy);
- if (ret)
- goto err_disable_clk;
-
- /* PHY needs to be powered on for dw_pcie_ep_init() */
- ret = phy_power_on(pcie_ep->phy);
- if (ret)
- goto err_phy_exit;
+ }
ret = dw_pcie_ep_init(&pcie_ep->pci.ep);
if (ret) {
dev_err(dev, "Failed to initialize endpoint: %d\n", ret);
- goto err_phy_power_off;
+ goto err_disable_resources;
}
ret = qcom_pcie_ep_enable_irq_resources(pdev, pcie_ep);
if (ret)
- goto err_phy_power_off;
+ goto err_disable_resources;
return 0;
-err_phy_power_off:
- phy_power_off(pcie_ep->phy);
-err_phy_exit:
- phy_exit(pcie_ep->phy);
-err_disable_clk:
- clk_bulk_disable_unprepare(ARRAY_SIZE(qcom_pcie_ep_clks),
- qcom_pcie_ep_clks);
+err_disable_resources:
+ qcom_pcie_disable_resources(pcie_ep);
return ret;
}
@@ -691,10 +695,7 @@ static int qcom_pcie_ep_remove(struct platform_device *pdev)
if (pcie_ep->link_status == QCOM_PCIE_EP_LINK_DISABLED)
return 0;
- phy_power_off(pcie_ep->phy);
- phy_exit(pcie_ep->phy);
- clk_bulk_disable_unprepare(ARRAY_SIZE(qcom_pcie_ep_clks),
- qcom_pcie_ep_clks);
+ qcom_pcie_disable_resources(pcie_ep);
return 0;
}
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index 816028c0f6ed..2ea13750b492 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -1238,12 +1238,6 @@ static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie)
goto err_disable_clocks;
}
- ret = clk_prepare_enable(res->pipe_clk);
- if (ret) {
- dev_err(dev, "cannot prepare/enable pipe clock\n");
- goto err_disable_clocks;
- }
-
/* Wait for reset to complete, required on SM8450 */
usleep_range(1000, 1500);
@@ -1523,6 +1517,13 @@ static const struct qcom_pcie_cfg sdm845_cfg = {
.has_tbu_clk = true,
};
+static const struct qcom_pcie_cfg sm8150_cfg = {
+ /* sm8150 has qcom IP rev 1.5.0. However 1.5.0 ops are same as
+ * 1.9.0, so reuse the same.
+ */
+ .ops = &ops_1_9_0,
+};
+
static const struct qcom_pcie_cfg sm8250_cfg = {
.ops = &ops_1_9_0,
.has_tbu_clk = true,
@@ -1627,22 +1628,21 @@ static int qcom_pcie_probe(struct platform_device *pdev)
pp->ops = &qcom_pcie_dw_ops;
ret = phy_init(pcie->phy);
- if (ret) {
- pm_runtime_disable(&pdev->dev);
+ if (ret)
goto err_pm_runtime_put;
- }
platform_set_drvdata(pdev, pcie);
ret = dw_pcie_host_init(pp);
if (ret) {
dev_err(dev, "cannot initialize host\n");
- pm_runtime_disable(&pdev->dev);
- goto err_pm_runtime_put;
+ goto err_phy_exit;
}
return 0;
+err_phy_exit:
+ phy_exit(pcie->phy);
err_pm_runtime_put:
pm_runtime_put(dev);
pm_runtime_disable(dev);
@@ -1660,6 +1660,7 @@ static const struct of_device_id qcom_pcie_match[] = {
{ .compatible = "qcom,pcie-ipq4019", .data = &ipq4019_cfg },
{ .compatible = "qcom,pcie-qcs404", .data = &ipq4019_cfg },
{ .compatible = "qcom,pcie-sdm845", .data = &sdm845_cfg },
+ { .compatible = "qcom,pcie-sm8150", .data = &sm8150_cfg },
{ .compatible = "qcom,pcie-sm8250", .data = &sm8250_cfg },
{ .compatible = "qcom,pcie-sc8180x", .data = &sc8180x_cfg },
{ .compatible = "qcom,pcie-sm8450-pcie0", .data = &sm8450_pcie0_cfg },
diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
index b1b5f836a806..cc2678490162 100644
--- a/drivers/pci/controller/dwc/pcie-tegra194.c
+++ b/drivers/pci/controller/dwc/pcie-tegra194.c
@@ -186,8 +186,6 @@
#define N_FTS_VAL 52
#define FTS_VAL 52
-#define PORT_LOGIC_MSI_CTRL_INT_0_EN 0x828
-
#define GEN3_EQ_CONTROL_OFF 0x8a8
#define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT 8
#define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK GENMASK(23, 8)
@@ -2189,9 +2187,6 @@ static int tegra194_pcie_suspend_noirq(struct device *dev)
if (!pcie->link_state)
return 0;
- /* Save MSI interrupt vector */
- pcie->msi_ctrl_int = dw_pcie_readl_dbi(&pcie->pci,
- PORT_LOGIC_MSI_CTRL_INT_0_EN);
tegra_pcie_downstream_dev_to_D0(pcie);
tegra194_pcie_pme_turnoff(pcie);
tegra_pcie_unconfig_controller(pcie);
@@ -2223,10 +2218,6 @@ static int tegra194_pcie_resume_noirq(struct device *dev)
if (ret < 0)
goto fail_host_init;
- /* Restore MSI interrupt vector */
- dw_pcie_writel_dbi(&pcie->pci, PORT_LOGIC_MSI_CTRL_INT_0_EN,
- pcie->msi_ctrl_int);
-
return 0;
fail_host_init:
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index d270a204324e..db814f7b93ba 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -92,6 +92,13 @@ static enum pci_protocol_version_t pci_protocol_versions[] = {
#define SLOT_NAME_SIZE 11
/*
+ * Size of requestor for VMbus; the value is based on the observation
+ * that having more than one request outstanding is 'rare', and so 64
+ * should be generous in ensuring that we don't ever run out.
+ */
+#define HV_PCI_RQSTOR_SIZE 64
+
+/*
* Message Types
*/
@@ -604,17 +611,19 @@ static unsigned int hv_msi_get_int_vector(struct irq_data *data)
return cfg->vector;
}
-static void hv_set_msi_entry_from_desc(union hv_msi_entry *msi_entry,
- struct msi_desc *msi_desc)
-{
- msi_entry->address.as_uint32 = msi_desc->msg.address_lo;
- msi_entry->data.as_uint32 = msi_desc->msg.data;
-}
-
static int hv_msi_prepare(struct irq_domain *domain, struct device *dev,
int nvec, msi_alloc_info_t *info)
{
- return pci_msi_prepare(domain, dev, nvec, info);
+ int ret = pci_msi_prepare(domain, dev, nvec, info);
+
+ /*
+ * By using the interrupt remapper in the hypervisor IOMMU, contiguous
+ * CPU vectors is not needed for multi-MSI
+ */
+ if (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI)
+ info->flags &= ~X86_IRQ_ALLOC_CONTIGUOUS_VECTORS;
+
+ return ret;
}
/**
@@ -631,6 +640,7 @@ static void hv_arch_irq_unmask(struct irq_data *data)
{
struct msi_desc *msi_desc = irq_data_get_msi_desc(data);
struct hv_retarget_device_interrupt *params;
+ struct tran_int_desc *int_desc;
struct hv_pcibus_device *hbus;
struct cpumask *dest;
cpumask_var_t tmp;
@@ -645,6 +655,7 @@ static void hv_arch_irq_unmask(struct irq_data *data)
pdev = msi_desc_to_pci_dev(msi_desc);
pbus = pdev->bus;
hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata);
+ int_desc = data->chip_data;
spin_lock_irqsave(&hbus->retarget_msi_interrupt_lock, flags);
@@ -652,7 +663,8 @@ static void hv_arch_irq_unmask(struct irq_data *data)
memset(params, 0, sizeof(*params));
params->partition_id = HV_PARTITION_ID_SELF;
params->int_entry.source = HV_INTERRUPT_SOURCE_MSI;
- hv_set_msi_entry_from_desc(&params->int_entry.msi_entry, msi_desc);
+ params->int_entry.msi_entry.address.as_uint32 = int_desc->address & 0xffffffff;
+ params->int_entry.msi_entry.data.as_uint32 = int_desc->data;
params->device_id = (hbus->hdev->dev_instance.b[5] << 24) |
(hbus->hdev->dev_instance.b[4] << 16) |
(hbus->hdev->dev_instance.b[7] << 8) |
@@ -969,11 +981,7 @@ static void hv_pci_generic_compl(void *context, struct pci_response *resp,
{
struct hv_pci_compl *comp_pkt = context;
- if (resp_packet_size >= offsetofend(struct pci_response, status))
- comp_pkt->completion_status = resp->status;
- else
- comp_pkt->completion_status = -1;
-
+ comp_pkt->completion_status = resp->status;
complete(&comp_pkt->host_event);
}
@@ -1513,6 +1521,10 @@ static void hv_int_desc_free(struct hv_pci_dev *hpdev,
u8 buffer[sizeof(struct pci_delete_interrupt)];
} ctxt;
+ if (!int_desc->vector_count) {
+ kfree(int_desc);
+ return;
+ }
memset(&ctxt, 0, sizeof(ctxt));
int_pkt = (struct pci_delete_interrupt *)&ctxt.pkt.message;
int_pkt->message_type.type =
@@ -1520,7 +1532,7 @@ static void hv_int_desc_free(struct hv_pci_dev *hpdev,
int_pkt->wslot.slot = hpdev->desc.win_slot.slot;
int_pkt->int_desc = *int_desc;
vmbus_sendpacket(hpdev->hbus->hdev->channel, int_pkt, sizeof(*int_pkt),
- (unsigned long)&ctxt.pkt, VM_PKT_DATA_INBAND, 0);
+ 0, VM_PKT_DATA_INBAND, 0);
kfree(int_desc);
}
@@ -1590,19 +1602,24 @@ static void hv_pci_compose_compl(void *context, struct pci_response *resp,
struct pci_create_int_response *int_resp =
(struct pci_create_int_response *)resp;
+ if (resp_packet_size < sizeof(*int_resp)) {
+ comp_pkt->comp_pkt.completion_status = -1;
+ goto out;
+ }
comp_pkt->comp_pkt.completion_status = resp->status;
comp_pkt->int_desc = int_resp->int_desc;
+out:
complete(&comp_pkt->comp_pkt.host_event);
}
static u32 hv_compose_msi_req_v1(
struct pci_create_interrupt *int_pkt, struct cpumask *affinity,
- u32 slot, u8 vector)
+ u32 slot, u8 vector, u8 vector_count)
{
int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE;
int_pkt->wslot.slot = slot;
int_pkt->int_desc.vector = vector;
- int_pkt->int_desc.vector_count = 1;
+ int_pkt->int_desc.vector_count = vector_count;
int_pkt->int_desc.delivery_mode = DELIVERY_MODE;
/*
@@ -1625,14 +1642,14 @@ static int hv_compose_msi_req_get_cpu(struct cpumask *affinity)
static u32 hv_compose_msi_req_v2(
struct pci_create_interrupt2 *int_pkt, struct cpumask *affinity,
- u32 slot, u8 vector)
+ u32 slot, u8 vector, u8 vector_count)
{
int cpu;
int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE2;
int_pkt->wslot.slot = slot;
int_pkt->int_desc.vector = vector;
- int_pkt->int_desc.vector_count = 1;
+ int_pkt->int_desc.vector_count = vector_count;
int_pkt->int_desc.delivery_mode = DELIVERY_MODE;
cpu = hv_compose_msi_req_get_cpu(affinity);
int_pkt->int_desc.processor_array[0] =
@@ -1644,7 +1661,7 @@ static u32 hv_compose_msi_req_v2(
static u32 hv_compose_msi_req_v3(
struct pci_create_interrupt3 *int_pkt, struct cpumask *affinity,
- u32 slot, u32 vector)
+ u32 slot, u32 vector, u8 vector_count)
{
int cpu;
@@ -1652,7 +1669,7 @@ static u32 hv_compose_msi_req_v3(
int_pkt->wslot.slot = slot;
int_pkt->int_desc.vector = vector;
int_pkt->int_desc.reserved = 0;
- int_pkt->int_desc.vector_count = 1;
+ int_pkt->int_desc.vector_count = vector_count;
int_pkt->int_desc.delivery_mode = DELIVERY_MODE;
cpu = hv_compose_msi_req_get_cpu(affinity);
int_pkt->int_desc.processor_array[0] =
@@ -1683,6 +1700,8 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
struct cpumask *dest;
struct compose_comp_ctxt comp;
struct tran_int_desc *int_desc;
+ struct msi_desc *msi_desc;
+ u8 vector, vector_count;
struct {
struct pci_packet pci_pkt;
union {
@@ -1691,11 +1710,21 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
struct pci_create_interrupt3 v3;
} int_pkts;
} __packed ctxt;
-
+ u64 trans_id;
u32 size;
int ret;
- pdev = msi_desc_to_pci_dev(irq_data_get_msi_desc(data));
+ /* Reuse the previous allocation */
+ if (data->chip_data) {
+ int_desc = data->chip_data;
+ msg->address_hi = int_desc->address >> 32;
+ msg->address_lo = int_desc->address & 0xffffffff;
+ msg->data = int_desc->data;
+ return;
+ }
+
+ msi_desc = irq_data_get_msi_desc(data);
+ pdev = msi_desc_to_pci_dev(msi_desc);
dest = irq_data_get_effective_affinity_mask(data);
pbus = pdev->bus;
hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata);
@@ -1704,17 +1733,40 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
if (!hpdev)
goto return_null_message;
- /* Free any previous message that might have already been composed. */
- if (data->chip_data) {
- int_desc = data->chip_data;
- data->chip_data = NULL;
- hv_int_desc_free(hpdev, int_desc);
- }
-
int_desc = kzalloc(sizeof(*int_desc), GFP_ATOMIC);
if (!int_desc)
goto drop_reference;
+ if (!msi_desc->pci.msi_attrib.is_msix && msi_desc->nvec_used > 1) {
+ /*
+ * If this is not the first MSI of Multi MSI, we already have
+ * a mapping. Can exit early.
+ */
+ if (msi_desc->irq != data->irq) {
+ data->chip_data = int_desc;
+ int_desc->address = msi_desc->msg.address_lo |
+ (u64)msi_desc->msg.address_hi << 32;
+ int_desc->data = msi_desc->msg.data +
+ (data->irq - msi_desc->irq);
+ msg->address_hi = msi_desc->msg.address_hi;
+ msg->address_lo = msi_desc->msg.address_lo;
+ msg->data = int_desc->data;
+ put_pcichild(hpdev);
+ return;
+ }
+ /*
+ * The vector we select here is a dummy value. The correct
+ * value gets sent to the hypervisor in unmask(). This needs
+ * to be aligned with the count, and also not zero. Multi-msi
+ * is powers of 2 up to 32, so 32 will always work here.
+ */
+ vector = 32;
+ vector_count = msi_desc->nvec_used;
+ } else {
+ vector = hv_msi_get_int_vector(data);
+ vector_count = 1;
+ }
+
memset(&ctxt, 0, sizeof(ctxt));
init_completion(&comp.comp_pkt.host_event);
ctxt.pci_pkt.completion_func = hv_pci_compose_compl;
@@ -1725,7 +1777,8 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
size = hv_compose_msi_req_v1(&ctxt.int_pkts.v1,
dest,
hpdev->desc.win_slot.slot,
- hv_msi_get_int_vector(data));
+ vector,
+ vector_count);
break;
case PCI_PROTOCOL_VERSION_1_2:
@@ -1733,14 +1786,16 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
size = hv_compose_msi_req_v2(&ctxt.int_pkts.v2,
dest,
hpdev->desc.win_slot.slot,
- hv_msi_get_int_vector(data));
+ vector,
+ vector_count);
break;
case PCI_PROTOCOL_VERSION_1_4:
size = hv_compose_msi_req_v3(&ctxt.int_pkts.v3,
dest,
hpdev->desc.win_slot.slot,
- hv_msi_get_int_vector(data));
+ vector,
+ vector_count);
break;
default:
@@ -1753,10 +1808,10 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
goto free_int_desc;
}
- ret = vmbus_sendpacket(hpdev->hbus->hdev->channel, &ctxt.int_pkts,
- size, (unsigned long)&ctxt.pci_pkt,
- VM_PKT_DATA_INBAND,
- VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ ret = vmbus_sendpacket_getid(hpdev->hbus->hdev->channel, &ctxt.int_pkts,
+ size, (unsigned long)&ctxt.pci_pkt,
+ &trans_id, VM_PKT_DATA_INBAND,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
if (ret) {
dev_err(&hbus->hdev->device,
"Sending request for interrupt failed: 0x%x",
@@ -1835,6 +1890,15 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
enable_tasklet:
tasklet_enable(&channel->callback_event);
+ /*
+ * The completion packet on the stack becomes invalid after 'return';
+ * remove the ID from the VMbus requestor if the identifier is still
+ * mapped to/associated with the packet. (The identifier could have
+ * been 're-used', i.e., already removed and (re-)mapped.)
+ *
+ * Cf. hv_pci_onchannelcallback().
+ */
+ vmbus_request_addr_match(channel, trans_id, (unsigned long)&ctxt.pci_pkt);
free_int_desc:
kfree(int_desc);
drop_reference:
@@ -2082,12 +2146,17 @@ static void prepopulate_bars(struct hv_pcibus_device *hbus)
}
}
if (high_size <= 1 && low_size <= 1) {
- /* Set the memory enable bit. */
- _hv_pcifront_read_config(hpdev, PCI_COMMAND, 2,
- &command);
- command |= PCI_COMMAND_MEMORY;
- _hv_pcifront_write_config(hpdev, PCI_COMMAND, 2,
- command);
+ /*
+ * No need to set the PCI_COMMAND_MEMORY bit as
+ * the core PCI driver doesn't require the bit
+ * to be pre-set. Actually here we intentionally
+ * keep the bit off so that the PCI BAR probing
+ * in the core PCI driver doesn't cause Hyper-V
+ * to unnecessarily unmap/map the virtual BARs
+ * from/to the physical BARs multiple times.
+ * This reduces the VM boot time significantly
+ * if the BAR sizes are huge.
+ */
break;
}
}
@@ -2223,12 +2292,14 @@ static void q_resource_requirements(void *context, struct pci_response *resp,
struct q_res_req_compl *completion = context;
struct pci_q_res_req_response *q_res_req =
(struct pci_q_res_req_response *)resp;
+ s32 status;
int i;
- if (resp->status < 0) {
+ status = (resp_packet_size < sizeof(*q_res_req)) ? -1 : resp->status;
+ if (status < 0) {
dev_err(&completion->hpdev->hbus->hdev->device,
"query resource requirements failed: %x\n",
- resp->status);
+ status);
} else {
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
completion->hpdev->probed_bar[i] =
@@ -2652,7 +2723,7 @@ static void hv_eject_device_work(struct work_struct *work)
ejct_pkt->message_type.type = PCI_EJECTION_COMPLETE;
ejct_pkt->wslot.slot = hpdev->desc.win_slot.slot;
vmbus_sendpacket(hbus->hdev->channel, ejct_pkt,
- sizeof(*ejct_pkt), (unsigned long)&ctxt.pkt,
+ sizeof(*ejct_pkt), 0,
VM_PKT_DATA_INBAND, 0);
/* For the get_pcichild() in hv_pci_eject_device() */
@@ -2699,8 +2770,9 @@ static void hv_pci_onchannelcallback(void *context)
const int packet_size = 0x100;
int ret;
struct hv_pcibus_device *hbus = context;
+ struct vmbus_channel *chan = hbus->hdev->channel;
u32 bytes_recvd;
- u64 req_id;
+ u64 req_id, req_addr;
struct vmpacket_descriptor *desc;
unsigned char *buffer;
int bufferlen = packet_size;
@@ -2712,14 +2784,15 @@ static void hv_pci_onchannelcallback(void *context)
struct pci_dev_inval_block *inval;
struct pci_dev_incoming *dev_message;
struct hv_pci_dev *hpdev;
+ unsigned long flags;
buffer = kmalloc(bufferlen, GFP_ATOMIC);
if (!buffer)
return;
while (1) {
- ret = vmbus_recvpacket_raw(hbus->hdev->channel, buffer,
- bufferlen, &bytes_recvd, &req_id);
+ ret = vmbus_recvpacket_raw(chan, buffer, bufferlen,
+ &bytes_recvd, &req_id);
if (ret == -ENOBUFS) {
kfree(buffer);
@@ -2746,15 +2819,29 @@ static void hv_pci_onchannelcallback(void *context)
switch (desc->type) {
case VM_PKT_COMP:
+ lock_requestor(chan, flags);
+ req_addr = __vmbus_request_addr_match(chan, req_id,
+ VMBUS_RQST_ADDR_ANY);
+ if (req_addr == VMBUS_RQST_ERROR) {
+ unlock_requestor(chan, flags);
+ dev_err(&hbus->hdev->device,
+ "Invalid transaction ID %llx\n",
+ req_id);
+ break;
+ }
+ comp_packet = (struct pci_packet *)req_addr;
+ response = (struct pci_response *)buffer;
/*
- * The host is trusted, and thus it's safe to interpret
- * this transaction ID as a pointer.
+ * Call ->completion_func() within the critical section to make
+ * sure that the packet pointer is still valid during the call:
+ * here 'valid' means that there's a task still waiting for the
+ * completion, and that the packet data is still on the waiting
+ * task's stack. Cf. hv_compose_msi_msg().
*/
- comp_packet = (struct pci_packet *)req_id;
- response = (struct pci_response *)buffer;
comp_packet->completion_func(comp_packet->compl_ctxt,
response,
bytes_recvd);
+ unlock_requestor(chan, flags);
break;
case VM_PKT_DATA_INBAND:
@@ -2764,7 +2851,8 @@ static void hv_pci_onchannelcallback(void *context)
case PCI_BUS_RELATIONS:
bus_rel = (struct pci_bus_relations *)buffer;
- if (bytes_recvd <
+ if (bytes_recvd < sizeof(*bus_rel) ||
+ bytes_recvd <
struct_size(bus_rel, func,
bus_rel->device_count)) {
dev_err(&hbus->hdev->device,
@@ -2778,7 +2866,8 @@ static void hv_pci_onchannelcallback(void *context)
case PCI_BUS_RELATIONS2:
bus_rel2 = (struct pci_bus_relations2 *)buffer;
- if (bytes_recvd <
+ if (bytes_recvd < sizeof(*bus_rel2) ||
+ bytes_recvd <
struct_size(bus_rel2, func,
bus_rel2->device_count)) {
dev_err(&hbus->hdev->device,
@@ -2792,6 +2881,11 @@ static void hv_pci_onchannelcallback(void *context)
case PCI_EJECT:
dev_message = (struct pci_dev_incoming *)buffer;
+ if (bytes_recvd < sizeof(*dev_message)) {
+ dev_err(&hbus->hdev->device,
+ "eject message too small\n");
+ break;
+ }
hpdev = get_pcichild_wslot(hbus,
dev_message->wslot.slot);
if (hpdev) {
@@ -2803,6 +2897,11 @@ static void hv_pci_onchannelcallback(void *context)
case PCI_INVALIDATE_BLOCK:
inval = (struct pci_dev_inval_block *)buffer;
+ if (bytes_recvd < sizeof(*inval)) {
+ dev_err(&hbus->hdev->device,
+ "invalidate message too small\n");
+ break;
+ }
hpdev = get_pcichild_wslot(hbus,
inval->wslot.slot);
if (hpdev) {
@@ -3431,6 +3530,10 @@ static int hv_pci_probe(struct hv_device *hdev,
goto free_dom;
}
+ hdev->channel->next_request_id_callback = vmbus_next_request_id;
+ hdev->channel->request_addr_callback = vmbus_request_addr;
+ hdev->channel->rqstor_size = HV_PCI_RQSTOR_SIZE;
+
ret = vmbus_open(hdev->channel, pci_ring_size, pci_ring_size, NULL, 0,
hv_pci_onchannelcallback, hbus);
if (ret)
@@ -3561,6 +3664,7 @@ free_bus:
static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs)
{
struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
+ struct vmbus_channel *chan = hdev->channel;
struct {
struct pci_packet teardown_packet;
u8 buffer[sizeof(struct pci_message)];
@@ -3568,13 +3672,14 @@ static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs)
struct hv_pci_compl comp_pkt;
struct hv_pci_dev *hpdev, *tmp;
unsigned long flags;
+ u64 trans_id;
int ret;
/*
* After the host sends the RESCIND_CHANNEL message, it doesn't
* access the per-channel ringbuffer any longer.
*/
- if (hdev->channel->rescind)
+ if (chan->rescind)
return 0;
if (!keep_devs) {
@@ -3611,16 +3716,26 @@ static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs)
pkt.teardown_packet.compl_ctxt = &comp_pkt;
pkt.teardown_packet.message[0].type = PCI_BUS_D0EXIT;
- ret = vmbus_sendpacket(hdev->channel, &pkt.teardown_packet.message,
- sizeof(struct pci_message),
- (unsigned long)&pkt.teardown_packet,
- VM_PKT_DATA_INBAND,
- VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ ret = vmbus_sendpacket_getid(chan, &pkt.teardown_packet.message,
+ sizeof(struct pci_message),
+ (unsigned long)&pkt.teardown_packet,
+ &trans_id, VM_PKT_DATA_INBAND,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
if (ret)
return ret;
- if (wait_for_completion_timeout(&comp_pkt.host_event, 10 * HZ) == 0)
+ if (wait_for_completion_timeout(&comp_pkt.host_event, 10 * HZ) == 0) {
+ /*
+ * The completion packet on the stack becomes invalid after
+ * 'return'; remove the ID from the VMbus requestor if the
+ * identifier is still mapped to/associated with the packet.
+ *
+ * Cf. hv_pci_onchannelcallback().
+ */
+ vmbus_request_addr_match(chan, trans_id,
+ (unsigned long)&pkt.teardown_packet);
return -ETIMEDOUT;
+ }
return 0;
}
@@ -3761,6 +3876,10 @@ static int hv_pci_resume(struct hv_device *hdev)
hbus->state = hv_pcibus_init;
+ hdev->channel->next_request_id_callback = vmbus_next_request_id;
+ hdev->channel->request_addr_callback = vmbus_request_addr;
+ hdev->channel->rqstor_size = HV_PCI_RQSTOR_SIZE;
+
ret = vmbus_open(hdev->channel, pci_ring_size, pci_ring_size, NULL, 0,
hv_pci_onchannelcallback, hbus);
if (ret)
diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c
index 8f76d4bda356..c1ffdb06c971 100644
--- a/drivers/pci/controller/pci-mvebu.c
+++ b/drivers/pci/controller/pci-mvebu.c
@@ -8,6 +8,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/gpio.h>
@@ -66,6 +67,12 @@
#define PCIE_STAT_BUS 0xff00
#define PCIE_STAT_DEV 0x1f0000
#define PCIE_STAT_LINK_DOWN BIT(0)
+#define PCIE_SSPL_OFF 0x1a0c
+#define PCIE_SSPL_VALUE_SHIFT 0
+#define PCIE_SSPL_VALUE_MASK GENMASK(7, 0)
+#define PCIE_SSPL_SCALE_SHIFT 8
+#define PCIE_SSPL_SCALE_MASK GENMASK(9, 8)
+#define PCIE_SSPL_ENABLE BIT(16)
#define PCIE_RC_RTSTA 0x1a14
#define PCIE_DEBUG_CTRL 0x1a60
#define PCIE_DEBUG_SOFT_RESET BIT(20)
@@ -111,6 +118,8 @@ struct mvebu_pcie_port {
struct mvebu_pcie_window iowin;
u32 saved_pcie_stat;
struct resource regs;
+ u8 slot_power_limit_value;
+ u8 slot_power_limit_scale;
struct irq_domain *intx_irq_domain;
raw_spinlock_t irq_lock;
int intx_irq;
@@ -239,7 +248,7 @@ static void mvebu_pcie_setup_wins(struct mvebu_pcie_port *port)
static void mvebu_pcie_setup_hw(struct mvebu_pcie_port *port)
{
- u32 ctrl, lnkcap, cmd, dev_rev, unmask;
+ u32 ctrl, lnkcap, cmd, dev_rev, unmask, sspl;
/* Setup PCIe controller to Root Complex mode. */
ctrl = mvebu_readl(port, PCIE_CTRL_OFF);
@@ -292,6 +301,20 @@ static void mvebu_pcie_setup_hw(struct mvebu_pcie_port *port)
/* Point PCIe unit MBUS decode windows to DRAM space. */
mvebu_pcie_setup_wins(port);
+ /*
+ * Program Root Port to automatically send Set_Slot_Power_Limit
+ * PCIe Message when changing status from Dl_Down to Dl_Up and valid
+ * slot power limit was specified.
+ */
+ sspl = mvebu_readl(port, PCIE_SSPL_OFF);
+ sspl &= ~(PCIE_SSPL_VALUE_MASK | PCIE_SSPL_SCALE_MASK | PCIE_SSPL_ENABLE);
+ if (port->slot_power_limit_value) {
+ sspl |= port->slot_power_limit_value << PCIE_SSPL_VALUE_SHIFT;
+ sspl |= port->slot_power_limit_scale << PCIE_SSPL_SCALE_SHIFT;
+ sspl |= PCIE_SSPL_ENABLE;
+ }
+ mvebu_writel(port, sspl, PCIE_SSPL_OFF);
+
/* Mask all interrupt sources. */
mvebu_writel(port, ~PCIE_INT_ALL_MASK, PCIE_INT_UNMASK_OFF);
@@ -628,9 +651,24 @@ mvebu_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge,
(PCI_EXP_LNKSTA_DLLLA << 16) : 0);
break;
- case PCI_EXP_SLTCTL:
- *value = PCI_EXP_SLTSTA_PDS << 16;
+ case PCI_EXP_SLTCTL: {
+ u16 slotctl = le16_to_cpu(bridge->pcie_conf.slotctl);
+ u16 slotsta = le16_to_cpu(bridge->pcie_conf.slotsta);
+ u32 val = 0;
+ /*
+ * When slot power limit was not specified in DT then
+ * ASPL_DISABLE bit is stored only in emulated config space.
+ * Otherwise reflect status of PCIE_SSPL_ENABLE bit in HW.
+ */
+ if (!port->slot_power_limit_value)
+ val |= slotctl & PCI_EXP_SLTCTL_ASPL_DISABLE;
+ else if (!(mvebu_readl(port, PCIE_SSPL_OFF) & PCIE_SSPL_ENABLE))
+ val |= PCI_EXP_SLTCTL_ASPL_DISABLE;
+ /* This callback is 32-bit and in high bits is slot status. */
+ val |= slotsta << 16;
+ *value = val;
break;
+ }
case PCI_EXP_RTSTA:
*value = mvebu_readl(port, PCIE_RC_RTSTA);
@@ -774,6 +812,22 @@ mvebu_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge,
mvebu_writel(port, new, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL);
break;
+ case PCI_EXP_SLTCTL:
+ /*
+ * Allow to change PCIE_SSPL_ENABLE bit only when slot power
+ * limit was specified in DT and configured into HW.
+ */
+ if ((mask & PCI_EXP_SLTCTL_ASPL_DISABLE) &&
+ port->slot_power_limit_value) {
+ u32 sspl = mvebu_readl(port, PCIE_SSPL_OFF);
+ if (new & PCI_EXP_SLTCTL_ASPL_DISABLE)
+ sspl &= ~PCIE_SSPL_ENABLE;
+ else
+ sspl |= PCIE_SSPL_ENABLE;
+ mvebu_writel(port, sspl, PCIE_SSPL_OFF);
+ }
+ break;
+
case PCI_EXP_RTSTA:
/*
* PME Status bit in Root Status Register (PCIE_RC_RTSTA)
@@ -868,8 +922,26 @@ static int mvebu_pci_bridge_emul_init(struct mvebu_pcie_port *port)
/*
* Older mvebu hardware provides PCIe Capability structure only in
* version 1. New hardware provides it in version 2.
+ * Enable slot support which is emulated.
*/
- bridge->pcie_conf.cap = cpu_to_le16(pcie_cap_ver);
+ bridge->pcie_conf.cap = cpu_to_le16(pcie_cap_ver | PCI_EXP_FLAGS_SLOT);
+
+ /*
+ * Set Presence Detect State bit permanently as there is no support for
+ * unplugging PCIe card from the slot. Assume that PCIe card is always
+ * connected in slot.
+ *
+ * Set physical slot number to port+1 as mvebu ports are indexed from
+ * zero and zero value is reserved for ports within the same silicon
+ * as Root Port which is not mvebu case.
+ *
+ * Also set correct slot power limit.
+ */
+ bridge->pcie_conf.slotcap = cpu_to_le32(
+ FIELD_PREP(PCI_EXP_SLTCAP_SPLV, port->slot_power_limit_value) |
+ FIELD_PREP(PCI_EXP_SLTCAP_SPLS, port->slot_power_limit_scale) |
+ FIELD_PREP(PCI_EXP_SLTCAP_PSN, port->port+1));
+ bridge->pcie_conf.slotsta = cpu_to_le16(PCI_EXP_SLTSTA_PDS);
bridge->subsystem_vendor_id = ssdev_id & 0xffff;
bridge->subsystem_id = ssdev_id >> 16;
@@ -1191,6 +1263,7 @@ static int mvebu_pcie_parse_port(struct mvebu_pcie *pcie,
{
struct device *dev = &pcie->pdev->dev;
enum of_gpio_flags flags;
+ u32 slot_power_limit;
int reset_gpio, ret;
u32 num_lanes;
@@ -1291,6 +1364,15 @@ static int mvebu_pcie_parse_port(struct mvebu_pcie *pcie,
port->reset_gpio = gpio_to_desc(reset_gpio);
}
+ slot_power_limit = of_pci_get_slot_power_limit(child,
+ &port->slot_power_limit_value,
+ &port->slot_power_limit_scale);
+ if (slot_power_limit)
+ dev_info(dev, "%s: Slot power limit %u.%uW\n",
+ port->name,
+ slot_power_limit / 1000,
+ (slot_power_limit / 100) % 10);
+
port->clk = of_clk_get_by_name(child, NULL);
if (IS_ERR(port->clk)) {
dev_err(dev, "%s: cannot get clock\n", port->name);
@@ -1588,7 +1670,7 @@ static int mvebu_pcie_remove(struct platform_device *pdev)
{
struct mvebu_pcie *pcie = platform_get_drvdata(pdev);
struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
- u32 cmd;
+ u32 cmd, sspl;
int i;
/* Remove PCI bus with all devices. */
@@ -1625,6 +1707,11 @@ static int mvebu_pcie_remove(struct platform_device *pdev)
/* Free config space for emulated root bridge. */
pci_bridge_emul_cleanup(&port->bridge);
+ /* Disable sending Set_Slot_Power_Limit PCIe Message. */
+ sspl = mvebu_readl(port, PCIE_SSPL_OFF);
+ sspl &= ~(PCIE_SSPL_VALUE_MASK | PCIE_SSPL_SCALE_MASK | PCIE_SSPL_ENABLE);
+ mvebu_writel(port, sspl, PCIE_SSPL_OFF);
+
/* Disable and clear BARs and windows. */
mvebu_pcie_disable_wins(port);
diff --git a/drivers/pci/controller/pci-versatile.c b/drivers/pci/controller/pci-versatile.c
index 653d5d0ecf81..7991d334e0f1 100644
--- a/drivers/pci/controller/pci-versatile.c
+++ b/drivers/pci/controller/pci-versatile.c
@@ -31,10 +31,9 @@ static u32 pci_slot_ignore;
static int __init versatile_pci_slot_ignore(char *str)
{
- int retval;
int slot;
- while ((retval = get_option(&str, &slot))) {
+ while (get_option(&str, &slot)) {
if ((slot < 0) || (slot > 31))
pr_err("Illegal slot value: %d\n", slot);
else
diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c
index 375c0c40bbf8..e61058e13818 100644
--- a/drivers/pci/controller/pcie-brcmstb.c
+++ b/drivers/pci/controller/pcie-brcmstb.c
@@ -24,7 +24,6 @@
#include <linux/pci.h>
#include <linux/pci-ecam.h>
#include <linux/printk.h>
-#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#include <linux/sizes.h>
#include <linux/slab.h>
@@ -196,8 +195,6 @@ static inline void brcm_pcie_bridge_sw_init_set_generic(struct brcm_pcie *pcie,
static inline void brcm_pcie_perst_set_4908(struct brcm_pcie *pcie, u32 val);
static inline void brcm_pcie_perst_set_7278(struct brcm_pcie *pcie, u32 val);
static inline void brcm_pcie_perst_set_generic(struct brcm_pcie *pcie, u32 val);
-static int brcm_pcie_linkup(struct brcm_pcie *pcie);
-static int brcm_pcie_add_bus(struct pci_bus *bus);
enum {
RGR1_SW_INIT_1,
@@ -286,14 +283,6 @@ static const struct pcie_cfg_data bcm2711_cfg = {
.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
};
-struct subdev_regulators {
- unsigned int num_supplies;
- struct regulator_bulk_data supplies[];
-};
-
-static int pci_subdev_regulators_add_bus(struct pci_bus *bus);
-static void pci_subdev_regulators_remove_bus(struct pci_bus *bus);
-
struct brcm_msi {
struct device *dev;
void __iomem *base;
@@ -331,9 +320,6 @@ struct brcm_pcie {
u32 hw_rev;
void (*perst_set)(struct brcm_pcie *pcie, u32 val);
void (*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val);
- bool refusal_mode;
- struct subdev_regulators *sr;
- bool ep_wakeup_capable;
};
static inline bool is_bmips(const struct brcm_pcie *pcie)
@@ -450,99 +436,6 @@ static int brcm_pcie_set_ssc(struct brcm_pcie *pcie)
return ssc && pll ? 0 : -EIO;
}
-static void *alloc_subdev_regulators(struct device *dev)
-{
- static const char * const supplies[] = {
- "vpcie3v3",
- "vpcie3v3aux",
- "vpcie12v",
- };
- const size_t size = sizeof(struct subdev_regulators)
- + sizeof(struct regulator_bulk_data) * ARRAY_SIZE(supplies);
- struct subdev_regulators *sr;
- int i;
-
- sr = devm_kzalloc(dev, size, GFP_KERNEL);
- if (sr) {
- sr->num_supplies = ARRAY_SIZE(supplies);
- for (i = 0; i < ARRAY_SIZE(supplies); i++)
- sr->supplies[i].supply = supplies[i];
- }
-
- return sr;
-}
-
-static int pci_subdev_regulators_add_bus(struct pci_bus *bus)
-{
- struct device *dev = &bus->dev;
- struct subdev_regulators *sr;
- int ret;
-
- if (!dev->of_node || !bus->parent || !pci_is_root_bus(bus->parent))
- return 0;
-
- if (dev->driver_data)
- dev_err(dev, "dev.driver_data unexpectedly non-NULL\n");
-
- sr = alloc_subdev_regulators(dev);
- if (!sr)
- return -ENOMEM;
-
- dev->driver_data = sr;
- ret = regulator_bulk_get(dev, sr->num_supplies, sr->supplies);
- if (ret)
- return ret;
-
- ret = regulator_bulk_enable(sr->num_supplies, sr->supplies);
- if (ret) {
- dev_err(dev, "failed to enable regulators for downstream device\n");
- return ret;
- }
-
- return 0;
-}
-
-static int brcm_pcie_add_bus(struct pci_bus *bus)
-{
- struct device *dev = &bus->dev;
- struct brcm_pcie *pcie = (struct brcm_pcie *) bus->sysdata;
- int ret;
-
- if (!dev->of_node || !bus->parent || !pci_is_root_bus(bus->parent))
- return 0;
-
- ret = pci_subdev_regulators_add_bus(bus);
- if (ret)
- return ret;
-
- /* Grab the regulators for suspend/resume */
- pcie->sr = bus->dev.driver_data;
-
- /*
- * If we have failed linkup there is no point to return an error as
- * currently it will cause a WARNING() from pci_alloc_child_bus().
- * We return 0 and turn on the "refusal_mode" so that any further
- * accesses to the pci_dev just get 0xffffffff
- */
- if (brcm_pcie_linkup(pcie) != 0)
- pcie->refusal_mode = true;
-
- return 0;
-}
-
-static void pci_subdev_regulators_remove_bus(struct pci_bus *bus)
-{
- struct device *dev = &bus->dev;
- struct subdev_regulators *sr = dev->driver_data;
-
- if (!sr || !bus->parent || !pci_is_root_bus(bus->parent))
- return;
-
- if (regulator_bulk_disable(sr->num_supplies, sr->supplies))
- dev_err(dev, "failed to disable regulators for downstream device\n");
- dev->driver_data = NULL;
-}
-
/* Limits operation to a specific generation (1, 2, or 3) */
static void brcm_pcie_set_gen(struct brcm_pcie *pcie, int gen)
{
@@ -858,18 +751,6 @@ static void __iomem *brcm_pcie_map_conf(struct pci_bus *bus, unsigned int devfn,
/* Accesses to the RC go right to the RC registers if slot==0 */
if (pci_is_root_bus(bus))
return PCI_SLOT(devfn) ? NULL : base + where;
- if (pcie->refusal_mode) {
- /*
- * At this point we do not have link. There will be a CPU
- * abort -- a quirk with this controller --if Linux tries
- * to read any config-space registers besides those
- * targeting the host bridge. To prevent this we hijack
- * the address to point to a safe access that will return
- * 0xffffffff.
- */
- writel(0xffffffff, base + PCIE_MISC_RC_BAR2_CONFIG_HI);
- return base + PCIE_MISC_RC_BAR2_CONFIG_HI + (where & 0x3);
- }
/* For devices, write to the config space index register */
idx = PCIE_ECAM_OFFSET(bus->number, devfn, 0);
@@ -898,8 +779,6 @@ static struct pci_ops brcm_pcie_ops = {
.map_bus = brcm_pcie_map_conf,
.read = pci_generic_config_read,
.write = pci_generic_config_write,
- .add_bus = brcm_pcie_add_bus,
- .remove_bus = pci_subdev_regulators_remove_bus,
};
static struct pci_ops brcm_pcie_ops32 = {
@@ -1047,9 +926,16 @@ static inline int brcm_pcie_get_rc_bar2_size_and_offset(struct brcm_pcie *pcie,
static int brcm_pcie_setup(struct brcm_pcie *pcie)
{
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
u64 rc_bar2_offset, rc_bar2_size;
void __iomem *base = pcie->base;
- int ret, memc;
+ struct device *dev = pcie->dev;
+ struct resource_entry *entry;
+ bool ssc_good = false;
+ struct resource *res;
+ int num_out_wins = 0;
+ u16 nlw, cls, lnksta;
+ int i, ret, memc;
u32 tmp, burst, aspm_support;
/* Reset the bridge */
@@ -1139,40 +1025,6 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
if (pcie->gen)
brcm_pcie_set_gen(pcie, pcie->gen);
- /* Don't advertise L0s capability if 'aspm-no-l0s' */
- aspm_support = PCIE_LINK_STATE_L1;
- if (!of_property_read_bool(pcie->np, "aspm-no-l0s"))
- aspm_support |= PCIE_LINK_STATE_L0S;
- tmp = readl(base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
- u32p_replace_bits(&tmp, aspm_support,
- PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_ASPM_SUPPORT_MASK);
- writel(tmp, base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
-
- /*
- * For config space accesses on the RC, show the right class for
- * a PCIe-PCIe bridge (the default setting is to be EP mode).
- */
- tmp = readl(base + PCIE_RC_CFG_PRIV1_ID_VAL3);
- u32p_replace_bits(&tmp, 0x060400,
- PCIE_RC_CFG_PRIV1_ID_VAL3_CLASS_CODE_MASK);
- writel(tmp, base + PCIE_RC_CFG_PRIV1_ID_VAL3);
-
- return 0;
-}
-
-static int brcm_pcie_linkup(struct brcm_pcie *pcie)
-{
- struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
- struct device *dev = pcie->dev;
- void __iomem *base = pcie->base;
- struct resource_entry *entry;
- struct resource *res;
- int num_out_wins = 0;
- u16 nlw, cls, lnksta;
- bool ssc_good = false;
- u32 tmp;
- int ret, i;
-
/* Unassert the fundamental reset */
pcie->perst_set(pcie, 0);
@@ -1223,6 +1075,24 @@ static int brcm_pcie_linkup(struct brcm_pcie *pcie)
num_out_wins++;
}
+ /* Don't advertise L0s capability if 'aspm-no-l0s' */
+ aspm_support = PCIE_LINK_STATE_L1;
+ if (!of_property_read_bool(pcie->np, "aspm-no-l0s"))
+ aspm_support |= PCIE_LINK_STATE_L0S;
+ tmp = readl(base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
+ u32p_replace_bits(&tmp, aspm_support,
+ PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_ASPM_SUPPORT_MASK);
+ writel(tmp, base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
+
+ /*
+ * For config space accesses on the RC, show the right class for
+ * a PCIe-PCIe bridge (the default setting is to be EP mode).
+ */
+ tmp = readl(base + PCIE_RC_CFG_PRIV1_ID_VAL3);
+ u32p_replace_bits(&tmp, 0x060400,
+ PCIE_RC_CFG_PRIV1_ID_VAL3_CLASS_CODE_MASK);
+ writel(tmp, base + PCIE_RC_CFG_PRIV1_ID_VAL3);
+
if (pcie->ssc) {
ret = brcm_pcie_set_ssc(pcie);
if (ret == 0)
@@ -1351,21 +1221,9 @@ static void brcm_pcie_turn_off(struct brcm_pcie *pcie)
pcie->bridge_sw_init_set(pcie, 1);
}
-static int pci_dev_may_wakeup(struct pci_dev *dev, void *data)
-{
- bool *ret = data;
-
- if (device_may_wakeup(&dev->dev)) {
- *ret = true;
- dev_info(&dev->dev, "disable cancelled for wake-up device\n");
- }
- return (int) *ret;
-}
-
static int brcm_pcie_suspend(struct device *dev)
{
struct brcm_pcie *pcie = dev_get_drvdata(dev);
- struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
int ret;
brcm_pcie_turn_off(pcie);
@@ -1383,25 +1241,6 @@ static int brcm_pcie_suspend(struct device *dev)
return ret;
}
- if (pcie->sr) {
- /*
- * Now turn off the regulators, but if at least one
- * downstream device is enabled as a wake-up source, do not
- * turn off regulators.
- */
- pcie->ep_wakeup_capable = false;
- pci_walk_bus(bridge->bus, pci_dev_may_wakeup,
- &pcie->ep_wakeup_capable);
- if (!pcie->ep_wakeup_capable) {
- ret = regulator_bulk_disable(pcie->sr->num_supplies,
- pcie->sr->supplies);
- if (ret) {
- dev_err(dev, "Could not turn off regulators\n");
- reset_control_reset(pcie->rescal);
- return ret;
- }
- }
- }
clk_disable_unprepare(pcie->clk);
return 0;
@@ -1419,28 +1258,9 @@ static int brcm_pcie_resume(struct device *dev)
if (ret)
return ret;
- if (pcie->sr) {
- if (pcie->ep_wakeup_capable) {
- /*
- * We are resuming from a suspend. In the suspend we
- * did not disable the power supplies, so there is
- * no need to enable them (and falsely increase their
- * usage count).
- */
- pcie->ep_wakeup_capable = false;
- } else {
- ret = regulator_bulk_enable(pcie->sr->num_supplies,
- pcie->sr->supplies);
- if (ret) {
- dev_err(dev, "Could not turn on regulators\n");
- goto err_disable_clk;
- }
- }
- }
-
ret = reset_control_reset(pcie->rescal);
if (ret)
- goto err_regulator;
+ goto err_disable_clk;
ret = brcm_phy_start(pcie);
if (ret)
@@ -1461,10 +1281,6 @@ static int brcm_pcie_resume(struct device *dev)
if (ret)
goto err_reset;
- ret = brcm_pcie_linkup(pcie);
- if (ret)
- goto err_reset;
-
if (pcie->msi)
brcm_msi_set_regs(pcie->msi);
@@ -1472,9 +1288,6 @@ static int brcm_pcie_resume(struct device *dev)
err_reset:
reset_control_rearm(pcie->rescal);
-err_regulator:
- if (pcie->sr)
- regulator_bulk_disable(pcie->sr->num_supplies, pcie->sr->supplies);
err_disable_clk:
clk_disable_unprepare(pcie->clk);
return ret;
@@ -1606,17 +1419,7 @@ static int brcm_pcie_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pcie);
- ret = pci_host_probe(bridge);
- if (!ret && !brcm_pcie_link_up(pcie))
- ret = -ENODEV;
-
- if (ret) {
- brcm_pcie_remove(pdev);
- return ret;
- }
-
- return 0;
-
+ return pci_host_probe(bridge);
fail:
__brcm_pcie_remove(pcie);
return ret;
@@ -1625,8 +1428,8 @@ fail:
MODULE_DEVICE_TABLE(of, brcm_pcie_match);
static const struct dev_pm_ops brcm_pcie_pm_ops = {
- .suspend_noirq = brcm_pcie_suspend,
- .resume_noirq = brcm_pcie_resume,
+ .suspend = brcm_pcie_suspend,
+ .resume = brcm_pcie_resume,
};
static struct platform_driver brcm_pcie_driver = {
diff --git a/drivers/pci/controller/pcie-mediatek-gen3.c b/drivers/pci/controller/pcie-mediatek-gen3.c
index 3e8d70bfabc6..5d9fd36b02d1 100644
--- a/drivers/pci/controller/pcie-mediatek-gen3.c
+++ b/drivers/pci/controller/pcie-mediatek-gen3.c
@@ -838,6 +838,14 @@ static int mtk_pcie_setup(struct mtk_gen3_pcie *pcie)
if (err)
return err;
+ /*
+ * The controller may have been left out of reset by the bootloader
+ * so make sure that we get a clean start by asserting resets here.
+ */
+ reset_control_assert(pcie->phy_reset);
+ reset_control_assert(pcie->mac_reset);
+ usleep_range(10, 20);
+
/* Don't touch the hardware registers before power up */
err = mtk_pcie_power_up(pcie);
if (err)
diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c
index ddfbd4aebdec..be8bd919cb88 100644
--- a/drivers/pci/controller/pcie-mediatek.c
+++ b/drivers/pci/controller/pcie-mediatek.c
@@ -1008,6 +1008,7 @@ static int mtk_pcie_subsys_powerup(struct mtk_pcie *pcie)
"mediatek,generic-pciecfg");
if (cfg_node) {
pcie->cfg = syscon_node_to_regmap(cfg_node);
+ of_node_put(cfg_node);
if (IS_ERR(pcie->cfg))
return PTR_ERR(pcie->cfg);
}
diff --git a/drivers/pci/controller/pcie-microchip-host.c b/drivers/pci/controller/pcie-microchip-host.c
index 29d8e81e4181..dd5dba419047 100644
--- a/drivers/pci/controller/pcie-microchip-host.c
+++ b/drivers/pci/controller/pcie-microchip-host.c
@@ -406,6 +406,7 @@ static void mc_pcie_enable_msi(struct mc_pcie *port, void __iomem *base)
static void mc_handle_msi(struct irq_desc *desc)
{
struct mc_pcie *port = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
struct device *dev = port->dev;
struct mc_msi *msi = &port->msi;
void __iomem *bridge_base_addr =
@@ -414,8 +415,11 @@ static void mc_handle_msi(struct irq_desc *desc)
u32 bit;
int ret;
+ chained_irq_enter(chip, desc);
+
status = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL);
if (status & PM_MSI_INT_MSI_MASK) {
+ writel_relaxed(status & PM_MSI_INT_MSI_MASK, bridge_base_addr + ISTATUS_LOCAL);
status = readl_relaxed(bridge_base_addr + ISTATUS_MSI);
for_each_set_bit(bit, &status, msi->num_vectors) {
ret = generic_handle_domain_irq(msi->dev_domain, bit);
@@ -424,6 +428,8 @@ static void mc_handle_msi(struct irq_desc *desc)
bit);
}
}
+
+ chained_irq_exit(chip, desc);
}
static void mc_msi_bottom_irq_ack(struct irq_data *data)
@@ -432,13 +438,8 @@ static void mc_msi_bottom_irq_ack(struct irq_data *data)
void __iomem *bridge_base_addr =
port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
u32 bitpos = data->hwirq;
- unsigned long status;
writel_relaxed(BIT(bitpos), bridge_base_addr + ISTATUS_MSI);
- status = readl_relaxed(bridge_base_addr + ISTATUS_MSI);
- if (!status)
- writel_relaxed(BIT(PM_MSI_INT_MSI_SHIFT),
- bridge_base_addr + ISTATUS_LOCAL);
}
static void mc_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
@@ -563,6 +564,7 @@ static int mc_allocate_msi_domains(struct mc_pcie *port)
static void mc_handle_intx(struct irq_desc *desc)
{
struct mc_pcie *port = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
struct device *dev = port->dev;
void __iomem *bridge_base_addr =
port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
@@ -570,6 +572,8 @@ static void mc_handle_intx(struct irq_desc *desc)
u32 bit;
int ret;
+ chained_irq_enter(chip, desc);
+
status = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL);
if (status & PM_MSI_INT_INTX_MASK) {
status &= PM_MSI_INT_INTX_MASK;
@@ -581,6 +585,8 @@ static void mc_handle_intx(struct irq_desc *desc)
bit);
}
}
+
+ chained_irq_exit(chip, desc);
}
static void mc_ack_intx_irq(struct irq_data *data)
@@ -1115,7 +1121,7 @@ static const struct of_device_id mc_pcie_of_match[] = {
{},
};
-MODULE_DEVICE_TABLE(of, mc_pcie_of_match)
+MODULE_DEVICE_TABLE(of, mc_pcie_of_match);
static struct platform_driver mc_pcie_driver = {
.probe = pci_host_common_probe,
diff --git a/drivers/pci/controller/pcie-rockchip-ep.c b/drivers/pci/controller/pcie-rockchip-ep.c
index 5fb9ce6e536e..d1a200b93b2b 100644
--- a/drivers/pci/controller/pcie-rockchip-ep.c
+++ b/drivers/pci/controller/pcie-rockchip-ep.c
@@ -264,8 +264,7 @@ static int rockchip_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, u8 vfn,
struct rockchip_pcie *pcie = &ep->rockchip;
u32 r;
- r = find_first_zero_bit(&ep->ob_region_map,
- sizeof(ep->ob_region_map) * BITS_PER_LONG);
+ r = find_first_zero_bit(&ep->ob_region_map, BITS_PER_LONG);
/*
* Region 0 is reserved for configuration space and shouldn't
* be used elsewhere per TRM, so leave it out.
diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
index eb05cceab964..94a14a3d7e55 100644
--- a/drivers/pci/controller/vmd.c
+++ b/drivers/pci/controller/vmd.c
@@ -6,7 +6,6 @@
#include <linux/device.h>
#include <linux/interrupt.h>
-#include <linux/iommu.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -813,8 +812,7 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
* acceptable because the guest is usually CPU-limited and MSI
* remapping doesn't become a performance bottleneck.
*/
- if (iommu_capable(vmd->dev->dev.bus, IOMMU_CAP_INTR_REMAP) ||
- !(features & VMD_FEAT_CAN_BYPASS_MSI_REMAP) ||
+ if (!(features & VMD_FEAT_CAN_BYPASS_MSI_REMAP) ||
offset[0] || offset[1]) {
ret = vmd_alloc_irqs(vmd);
if (ret)
@@ -853,6 +851,9 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
vmd_attach_resources(vmd);
if (vmd->irq_domain)
dev_set_msi_domain(&vmd->bus->dev, vmd->irq_domain);
+ else
+ dev_set_msi_domain(&vmd->bus->dev,
+ dev_get_msi_domain(&vmd->dev->dev));
vmd_acpi_begin();
diff --git a/drivers/pci/hotplug/pnv_php.c b/drivers/pci/hotplug/pnv_php.c
index f4c2e6e01be0..881d420637bf 100644
--- a/drivers/pci/hotplug/pnv_php.c
+++ b/drivers/pci/hotplug/pnv_php.c
@@ -9,6 +9,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/pci_hotplug.h>
+#include <linux/of_fdt.h>
#include <asm/opal.h>
#include <asm/pnv-pci.h>
diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c
index e6991ff67526..980bb3afd092 100644
--- a/drivers/pci/hotplug/rpadlpar_core.c
+++ b/drivers/pci/hotplug/rpadlpar_core.c
@@ -15,6 +15,7 @@
#include <linux/init.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/pci.h>
#include <linux/string.h>
#include <linux/vmalloc.h>
diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c
index 9887c9de08c3..491986197c47 100644
--- a/drivers/pci/hotplug/rpaphp_core.c
+++ b/drivers/pci/hotplug/rpaphp_core.c
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
+#include <linux/of.h>
#include <linux/pci.h>
#include <linux/pci_hotplug.h>
#include <linux/smp.h>
@@ -20,6 +21,7 @@
#include <asm/eeh.h> /* for eeh_add_device() */
#include <asm/rtas.h> /* rtas_call */
#include <asm/pci-bridge.h> /* for pci_controller */
+#include <asm/prom.h>
#include "../pci.h" /* for pci_add_new_bus */
/* and pci_do_scan_bus */
#include "rpaphp.h"
diff --git a/drivers/pci/hotplug/rpaphp_pci.c b/drivers/pci/hotplug/rpaphp_pci.c
index c380bdacd146..630f77057c23 100644
--- a/drivers/pci/hotplug/rpaphp_pci.c
+++ b/drivers/pci/hotplug/rpaphp_pci.c
@@ -8,6 +8,7 @@
* Send feedback to <lxie@us.ibm.com>
*
*/
+#include <linux/of.h>
#include <linux/pci.h>
#include <linux/string.h>
diff --git a/drivers/pci/hotplug/rpaphp_slot.c b/drivers/pci/hotplug/rpaphp_slot.c
index 93b4a945c55d..779eab12e981 100644
--- a/drivers/pci/hotplug/rpaphp_slot.c
+++ b/drivers/pci/hotplug/rpaphp_slot.c
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sysfs.h>
+#include <linux/of.h>
#include <linux/pci.h>
#include <linux/string.h>
#include <linux/slab.h>
diff --git a/drivers/pci/of.c b/drivers/pci/of.c
index cb2e8351c2cc..196834ed44fe 100644
--- a/drivers/pci/of.c
+++ b/drivers/pci/of.c
@@ -369,7 +369,6 @@ static int devm_of_pci_get_host_bridge_resources(struct device *dev,
dev_dbg(dev, "Parsing dma-ranges property...\n");
for_each_of_pci_range(&parser, &range) {
- struct resource_entry *entry;
/*
* If we failed translation or got a zero-sized region
* then skip this range
@@ -393,12 +392,7 @@ static int devm_of_pci_get_host_bridge_resources(struct device *dev,
goto failed;
}
- /* Keep the resource list sorted */
- resource_list_for_each_entry(entry, ib_resources)
- if (entry->res->start > res->start)
- break;
-
- pci_add_resource_offset(&entry->node, res,
+ pci_add_resource_offset(ib_resources, res,
res->start - range.pci_addr);
}
@@ -633,3 +627,73 @@ int of_pci_get_max_link_speed(struct device_node *node)
return max_link_speed;
}
EXPORT_SYMBOL_GPL(of_pci_get_max_link_speed);
+
+/**
+ * of_pci_get_slot_power_limit - Parses the "slot-power-limit-milliwatt"
+ * property.
+ *
+ * @node: device tree node with the slot power limit information
+ * @slot_power_limit_value: pointer where the value should be stored in PCIe
+ * Slot Capabilities Register format
+ * @slot_power_limit_scale: pointer where the scale should be stored in PCIe
+ * Slot Capabilities Register format
+ *
+ * Returns the slot power limit in milliwatts and if @slot_power_limit_value
+ * and @slot_power_limit_scale pointers are non-NULL, fills in the value and
+ * scale in format used by PCIe Slot Capabilities Register.
+ *
+ * If the property is not found or is invalid, returns 0.
+ */
+u32 of_pci_get_slot_power_limit(struct device_node *node,
+ u8 *slot_power_limit_value,
+ u8 *slot_power_limit_scale)
+{
+ u32 slot_power_limit_mw;
+ u8 value, scale;
+
+ if (of_property_read_u32(node, "slot-power-limit-milliwatt",
+ &slot_power_limit_mw))
+ slot_power_limit_mw = 0;
+
+ /* Calculate Slot Power Limit Value and Slot Power Limit Scale */
+ if (slot_power_limit_mw == 0) {
+ value = 0x00;
+ scale = 0;
+ } else if (slot_power_limit_mw <= 255) {
+ value = slot_power_limit_mw;
+ scale = 3;
+ } else if (slot_power_limit_mw <= 255*10) {
+ value = slot_power_limit_mw / 10;
+ scale = 2;
+ slot_power_limit_mw = slot_power_limit_mw / 10 * 10;
+ } else if (slot_power_limit_mw <= 255*100) {
+ value = slot_power_limit_mw / 100;
+ scale = 1;
+ slot_power_limit_mw = slot_power_limit_mw / 100 * 100;
+ } else if (slot_power_limit_mw <= 239*1000) {
+ value = slot_power_limit_mw / 1000;
+ scale = 0;
+ slot_power_limit_mw = slot_power_limit_mw / 1000 * 1000;
+ } else if (slot_power_limit_mw < 250*1000) {
+ value = 0xEF;
+ scale = 0;
+ slot_power_limit_mw = 239*1000;
+ } else if (slot_power_limit_mw <= 600*1000) {
+ value = 0xF0 + (slot_power_limit_mw / 1000 - 250) / 25;
+ scale = 0;
+ slot_power_limit_mw = slot_power_limit_mw / (1000*25) * (1000*25);
+ } else {
+ value = 0xFE;
+ scale = 0;
+ slot_power_limit_mw = 600*1000;
+ }
+
+ if (slot_power_limit_value)
+ *slot_power_limit_value = value;
+
+ if (slot_power_limit_scale)
+ *slot_power_limit_scale = scale;
+
+ return slot_power_limit_mw;
+}
+EXPORT_SYMBOL_GPL(of_pci_get_slot_power_limit);
diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c
index 30b1df3c9d2f..462b429ad243 100644
--- a/drivers/pci/p2pdma.c
+++ b/drivers/pci/p2pdma.c
@@ -326,15 +326,16 @@ static const struct pci_p2pdma_whitelist_entry {
};
/*
- * This lookup function tries to find the PCI device corresponding to a given
- * host bridge.
+ * If the first device on host's root bus is either devfn 00.0 or a PCIe
+ * Root Port, return it. Otherwise return NULL.
*
- * It assumes the host bridge device is the first PCI device in the
- * bus->devices list and that the devfn is 00.0. These assumptions should hold
- * for all the devices in the whitelist above.
+ * We often use a devfn 00.0 "host bridge" in the pci_p2pdma_whitelist[]
+ * (though there is no PCI/PCIe requirement for such a device). On some
+ * platforms, e.g., Intel Skylake, there is no such host bridge device, and
+ * pci_p2pdma_whitelist[] may contain a Root Port at any devfn.
*
- * This function is equivalent to pci_get_slot(host->bus, 0), however it does
- * not take the pci_bus_sem lock seeing __host_bridge_whitelist() must not
+ * This function is similar to pci_get_slot(host->bus, 0), but it does
+ * not take the pci_bus_sem lock since __host_bridge_whitelist() must not
* sleep.
*
* For this to be safe, the caller should hold a reference to a device on the
@@ -350,10 +351,14 @@ static struct pci_dev *pci_host_bridge_dev(struct pci_host_bridge *host)
if (!root)
return NULL;
- if (root->devfn != PCI_DEVFN(0, 0))
- return NULL;
- return root;
+ if (root->devfn == PCI_DEVFN(0, 0))
+ return root;
+
+ if (pci_pcie_type(root) == PCI_EXP_TYPE_ROOT_PORT)
+ return root;
+
+ return NULL;
}
static bool __host_bridge_whitelist(struct pci_host_bridge *host,
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 3787876ecb24..3760d85c10d2 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -974,9 +974,11 @@ bool acpi_pci_power_manageable(struct pci_dev *dev)
bool acpi_pci_bridge_d3(struct pci_dev *dev)
{
- const union acpi_object *obj;
- struct acpi_device *adev;
struct pci_dev *rpdev;
+ struct acpi_device *adev;
+ acpi_status status;
+ unsigned long long state;
+ const union acpi_object *obj;
if (acpi_pci_disabled || !dev->is_hotplug_bridge)
return false;
@@ -985,12 +987,6 @@ bool acpi_pci_bridge_d3(struct pci_dev *dev)
if (acpi_pci_power_manageable(dev))
return true;
- /*
- * The ACPI firmware will provide the device-specific properties through
- * _DSD configuration object. Look for the 'HotPlugSupportInD3' property
- * for the root port and if it is set we know the hierarchy behind it
- * supports D3 just fine.
- */
rpdev = pcie_find_root_port(dev);
if (!rpdev)
return false;
@@ -999,11 +995,34 @@ bool acpi_pci_bridge_d3(struct pci_dev *dev)
if (!adev)
return false;
- if (acpi_dev_get_property(adev, "HotPlugSupportInD3",
- ACPI_TYPE_INTEGER, &obj) < 0)
+ /*
+ * If the Root Port cannot signal wakeup signals at all, i.e., it
+ * doesn't supply a wakeup GPE via _PRW, it cannot signal hotplug
+ * events from low-power states including D3hot and D3cold.
+ */
+ if (!adev->wakeup.flags.valid)
return false;
- return obj->integer.value == 1;
+ /*
+ * If the Root Port cannot wake itself from D3hot or D3cold, we
+ * can't use D3.
+ */
+ status = acpi_evaluate_integer(adev->handle, "_S0W", NULL, &state);
+ if (ACPI_SUCCESS(status) && state < ACPI_STATE_D3_HOT)
+ return false;
+
+ /*
+ * The "HotPlugSupportInD3" property in a Root Port _DSD indicates
+ * the Port can signal hotplug events while in D3. We assume any
+ * bridges *below* that Root Port can also signal hotplug events
+ * while in D3.
+ */
+ if (!acpi_dev_get_property(adev, "HotPlugSupportInD3",
+ ACPI_TYPE_INTEGER, &obj) &&
+ obj->integer.value == 1)
+ return true;
+
+ return false;
}
int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 4ceeb75fc899..49238ddd39ee 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -20,6 +20,7 @@
#include <linux/of_device.h>
#include <linux/acpi.h>
#include <linux/dma-map-ops.h>
+#include <linux/iommu.h>
#include "pci.h"
#include "pcie/portdrv.h"
@@ -522,9 +523,9 @@ static void pci_device_shutdown(struct device *dev)
pci_clear_master(pci_dev);
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
-/* Auxiliary functions used for system resume and run-time resume. */
+/* Auxiliary functions used for system resume */
/**
* pci_restore_standard_config - restore standard config registers of PCI device
@@ -544,6 +545,11 @@ static int pci_restore_standard_config(struct pci_dev *pci_dev)
pci_pme_restore(pci_dev);
return 0;
}
+#endif /* CONFIG_PM_SLEEP */
+
+#ifdef CONFIG_PM
+
+/* Auxiliary functions used for system resume and run-time resume */
static void pci_pm_default_resume(struct pci_dev *pci_dev)
{
@@ -551,18 +557,34 @@ static void pci_pm_default_resume(struct pci_dev *pci_dev)
pci_enable_wake(pci_dev, PCI_D0, false);
}
-#endif
-
-#ifdef CONFIG_PM_SLEEP
-
-static void pci_pm_default_resume_early(struct pci_dev *pci_dev)
+static void pci_pm_power_up_and_verify_state(struct pci_dev *pci_dev)
{
pci_power_up(pci_dev);
pci_update_current_state(pci_dev, PCI_D0);
+}
+
+static void pci_pm_default_resume_early(struct pci_dev *pci_dev)
+{
+ pci_pm_power_up_and_verify_state(pci_dev);
pci_restore_state(pci_dev);
pci_pme_restore(pci_dev);
}
+static void pci_pm_bridge_power_up_actions(struct pci_dev *pci_dev)
+{
+ pci_bridge_wait_for_secondary_bus(pci_dev);
+ /*
+ * When powering on a bridge from D3cold, the whole hierarchy may be
+ * powered on into D0uninitialized state, resume them to give them a
+ * chance to suspend again
+ */
+ pci_resume_bus(pci_dev->subordinate);
+}
+
+#endif /* CONFIG_PM */
+
+#ifdef CONFIG_PM_SLEEP
+
/*
* Default "suspend" method for devices that have no driver provided suspend,
* or not even a driver at all (second part).
@@ -934,7 +956,7 @@ static int pci_pm_resume_noirq(struct device *dev)
pcie_pme_root_status_cleanup(pci_dev);
if (!skip_bus_pm && prev_state == PCI_D3cold)
- pci_bridge_wait_for_secondary_bus(pci_dev);
+ pci_pm_bridge_power_up_actions(pci_dev);
if (pci_has_legacy_pm_support(pci_dev))
return 0;
@@ -1068,7 +1090,7 @@ static int pci_pm_thaw_noirq(struct device *dev)
* in case the driver's "freeze" callbacks put it into a low-power
* state.
*/
- pci_set_power_state(pci_dev, PCI_D0);
+ pci_pm_power_up_and_verify_state(pci_dev);
pci_restore_state(pci_dev);
if (pci_has_legacy_pm_support(pci_dev))
@@ -1312,7 +1334,7 @@ static int pci_pm_runtime_resume(struct device *dev)
* to a driver because although we left it in D0, it may have gone to
* D3cold when the bridge above it runtime suspended.
*/
- pci_restore_standard_config(pci_dev);
+ pci_pm_default_resume_early(pci_dev);
if (!pci_dev->driver)
return 0;
@@ -1321,13 +1343,11 @@ static int pci_pm_runtime_resume(struct device *dev)
pci_pm_default_resume(pci_dev);
if (prev_state == PCI_D3cold)
- pci_bridge_wait_for_secondary_bus(pci_dev);
+ pci_pm_bridge_power_up_actions(pci_dev);
if (pm && pm->runtime_resume)
error = pm->runtime_resume(dev);
- pci_dev->runtime_d3cold = false;
-
return error;
}
@@ -1601,6 +1621,7 @@ static int pci_bus_num_vf(struct device *dev)
*/
static int pci_dma_configure(struct device *dev)
{
+ struct pci_driver *driver = to_pci_driver(dev->driver);
struct device *bridge;
int ret = 0;
@@ -1616,9 +1637,24 @@ static int pci_dma_configure(struct device *dev)
}
pci_put_host_bridge_device(bridge);
+
+ if (!ret && !driver->driver_managed_dma) {
+ ret = iommu_device_use_default_domain(dev);
+ if (ret)
+ arch_teardown_dma_ops(dev);
+ }
+
return ret;
}
+static void pci_dma_cleanup(struct device *dev)
+{
+ struct pci_driver *driver = to_pci_driver(dev->driver);
+
+ if (!driver->driver_managed_dma)
+ iommu_device_unuse_default_domain(dev);
+}
+
struct bus_type pci_bus_type = {
.name = "pci",
.match = pci_bus_match,
@@ -1632,6 +1668,7 @@ struct bus_type pci_bus_type = {
.pm = PCI_PM_OPS_PTR,
.num_vf = pci_bus_num_vf,
.dma_configure = pci_dma_configure,
+ .dma_cleanup = pci_dma_cleanup,
};
EXPORT_SYMBOL(pci_bus_type);
diff --git a/drivers/pci/pci-stub.c b/drivers/pci/pci-stub.c
index e408099fea52..d1f4c1ce7bd1 100644
--- a/drivers/pci/pci-stub.c
+++ b/drivers/pci/pci-stub.c
@@ -36,6 +36,7 @@ static struct pci_driver stub_driver = {
.name = "pci-stub",
.id_table = NULL, /* only dynamic id's */
.probe = pci_stub_probe,
+ .driver_managed_dma = true,
};
static int __init pci_stub_init(void)
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index c263ffc5884a..fc804e08e3cb 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -567,31 +567,11 @@ static ssize_t driver_override_store(struct device *dev,
const char *buf, size_t count)
{
struct pci_dev *pdev = to_pci_dev(dev);
- char *driver_override, *old, *cp;
-
- /* We need to keep extra room for a newline */
- if (count >= (PAGE_SIZE - 1))
- return -EINVAL;
-
- driver_override = kstrndup(buf, count, GFP_KERNEL);
- if (!driver_override)
- return -ENOMEM;
-
- cp = strchr(driver_override, '\n');
- if (cp)
- *cp = '\0';
-
- device_lock(dev);
- old = pdev->driver_override;
- if (strlen(driver_override)) {
- pdev->driver_override = driver_override;
- } else {
- kfree(driver_override);
- pdev->driver_override = NULL;
- }
- device_unlock(dev);
+ int ret;
- kfree(old);
+ ret = driver_set_override(dev, &pdev->driver_override, buf, count);
+ if (ret)
+ return ret;
return count;
}
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index d25122fbe98a..cfaf40a540a8 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1068,126 +1068,6 @@ static inline bool platform_pci_bridge_d3(struct pci_dev *dev)
}
/**
- * pci_raw_set_power_state - Use PCI PM registers to set the power state of
- * given PCI device
- * @dev: PCI device to handle.
- * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
- *
- * RETURN VALUE:
- * -EINVAL if the requested state is invalid.
- * -EIO if device does not support PCI PM or its PM capabilities register has a
- * wrong version, or device doesn't support the requested state.
- * 0 if device already is in the requested state.
- * 0 if device's power state has been successfully changed.
- */
-static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
-{
- u16 pmcsr;
- bool need_restore = false;
-
- /* Check if we're already there */
- if (dev->current_state == state)
- return 0;
-
- if (!dev->pm_cap)
- return -EIO;
-
- if (state < PCI_D0 || state > PCI_D3hot)
- return -EINVAL;
-
- /*
- * Validate transition: We can enter D0 from any state, but if
- * we're already in a low-power state, we can only go deeper. E.g.,
- * we can go from D1 to D3, but we can't go directly from D3 to D1;
- * we'd have to go from D3 to D0, then to D1.
- */
- if (state != PCI_D0 && dev->current_state <= PCI_D3cold
- && dev->current_state > state) {
- pci_err(dev, "invalid power transition (from %s to %s)\n",
- pci_power_name(dev->current_state),
- pci_power_name(state));
- return -EINVAL;
- }
-
- /* Check if this device supports the desired state */
- if ((state == PCI_D1 && !dev->d1_support)
- || (state == PCI_D2 && !dev->d2_support))
- return -EIO;
-
- pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
- if (PCI_POSSIBLE_ERROR(pmcsr)) {
- pci_err(dev, "can't change power state from %s to %s (config space inaccessible)\n",
- pci_power_name(dev->current_state),
- pci_power_name(state));
- return -EIO;
- }
-
- /*
- * If we're (effectively) in D3, force entire word to 0.
- * This doesn't affect PME_Status, disables PME_En, and
- * sets PowerState to 0.
- */
- switch (dev->current_state) {
- case PCI_D0:
- case PCI_D1:
- case PCI_D2:
- pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
- pmcsr |= state;
- break;
- case PCI_D3hot:
- case PCI_D3cold:
- case PCI_UNKNOWN: /* Boot-up */
- if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
- && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
- need_restore = true;
- fallthrough; /* force to D0 */
- default:
- pmcsr = 0;
- break;
- }
-
- /* Enter specified state */
- pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
-
- /*
- * Mandatory power management transition delays; see PCI PM 1.1
- * 5.6.1 table 18
- */
- if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
- pci_dev_d3_sleep(dev);
- else if (state == PCI_D2 || dev->current_state == PCI_D2)
- udelay(PCI_PM_D2_DELAY);
-
- pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
- dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
- if (dev->current_state != state)
- pci_info_ratelimited(dev, "refused to change power state from %s to %s\n",
- pci_power_name(dev->current_state),
- pci_power_name(state));
-
- /*
- * According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
- * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
- * from D3hot to D0 _may_ perform an internal reset, thereby
- * going to "D0 Uninitialized" rather than "D0 Initialized".
- * For example, at least some versions of the 3c905B and the
- * 3c556B exhibit this behaviour.
- *
- * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
- * devices in a D3hot state at boot. Consequently, we need to
- * restore at least the BARs so that the device will be
- * accessible to its driver.
- */
- if (need_restore)
- pci_restore_bars(dev);
-
- if (dev->bus->self)
- pcie_aspm_pm_state_change(dev->bus->self);
-
- return 0;
-}
-
-/**
* pci_update_current_state - Read power state of given device and cache it
* @dev: PCI device to handle.
* @state: State to cache in case the device doesn't have the PM capability
@@ -1201,14 +1081,17 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
*/
void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
{
- if (platform_pci_get_power_state(dev) == PCI_D3cold ||
- !pci_device_is_present(dev)) {
+ if (platform_pci_get_power_state(dev) == PCI_D3cold) {
dev->current_state = PCI_D3cold;
} else if (dev->pm_cap) {
u16 pmcsr;
pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
- dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
+ if (PCI_POSSIBLE_ERROR(pmcsr)) {
+ dev->current_state = PCI_D3cold;
+ return;
+ }
+ dev->current_state = pmcsr & PCI_PM_CTRL_STATE_MASK;
} else {
dev->current_state = state;
}
@@ -1306,26 +1189,114 @@ static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
/**
* pci_power_up - Put the given device into D0
* @dev: PCI device to power up
+ *
+ * On success, return 0 or 1, depending on whether or not it is necessary to
+ * restore the device's BARs subsequently (1 is returned in that case).
*/
int pci_power_up(struct pci_dev *dev)
{
- pci_platform_power_transition(dev, PCI_D0);
+ bool need_restore;
+ pci_power_t state;
+ u16 pmcsr;
+
+ platform_pci_set_power_state(dev, PCI_D0);
+
+ if (!dev->pm_cap) {
+ state = platform_pci_get_power_state(dev);
+ if (state == PCI_UNKNOWN)
+ dev->current_state = PCI_D0;
+ else
+ dev->current_state = state;
+
+ if (state == PCI_D0)
+ return 0;
+
+ return -EIO;
+ }
+
+ pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
+ if (PCI_POSSIBLE_ERROR(pmcsr)) {
+ pci_err(dev, "Unable to change power state from %s to D0, device inaccessible\n",
+ pci_power_name(dev->current_state));
+ dev->current_state = PCI_D3cold;
+ return -EIO;
+ }
+
+ state = pmcsr & PCI_PM_CTRL_STATE_MASK;
+
+ need_restore = (state == PCI_D3hot || dev->current_state >= PCI_D3hot) &&
+ !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET);
+
+ if (state == PCI_D0)
+ goto end;
/*
- * Mandatory power management transition delays are handled in
- * pci_pm_resume_noirq() and pci_pm_runtime_resume() of the
- * corresponding bridge.
+ * Force the entire word to 0. This doesn't affect PME_Status, disables
+ * PME_En, and sets PowerState to 0.
*/
- if (dev->runtime_d3cold) {
+ pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, 0);
+
+ /* Mandatory transition delays; see PCI PM 1.2. */
+ if (state == PCI_D3hot)
+ pci_dev_d3_sleep(dev);
+ else if (state == PCI_D2)
+ udelay(PCI_PM_D2_DELAY);
+
+end:
+ dev->current_state = PCI_D0;
+ if (need_restore)
+ return 1;
+
+ return 0;
+}
+
+/**
+ * pci_set_full_power_state - Put a PCI device into D0 and update its state
+ * @dev: PCI device to power up
+ *
+ * Call pci_power_up() to put @dev into D0, read from its PCI_PM_CTRL register
+ * to confirm the state change, restore its BARs if they might be lost and
+ * reconfigure ASPM in acordance with the new power state.
+ *
+ * If pci_restore_state() is going to be called right after a power state change
+ * to D0, it is more efficient to use pci_power_up() directly instead of this
+ * function.
+ */
+static int pci_set_full_power_state(struct pci_dev *dev)
+{
+ u16 pmcsr;
+ int ret;
+
+ ret = pci_power_up(dev);
+ if (ret < 0)
+ return ret;
+
+ pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
+ dev->current_state = pmcsr & PCI_PM_CTRL_STATE_MASK;
+ if (dev->current_state != PCI_D0) {
+ pci_info_ratelimited(dev, "Refused to change power state from %s to D0\n",
+ pci_power_name(dev->current_state));
+ } else if (ret > 0) {
/*
- * When powering on a bridge from D3cold, the whole hierarchy
- * may be powered on into D0uninitialized state, resume them to
- * give them a chance to suspend again
+ * According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
+ * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
+ * from D3hot to D0 _may_ perform an internal reset, thereby
+ * going to "D0 Uninitialized" rather than "D0 Initialized".
+ * For example, at least some versions of the 3c905B and the
+ * 3c556B exhibit this behaviour.
+ *
+ * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
+ * devices in a D3hot state at boot. Consequently, we need to
+ * restore at least the BARs so that the device will be
+ * accessible to its driver.
*/
- pci_resume_bus(dev->subordinate);
+ pci_restore_bars(dev);
}
- return pci_raw_set_power_state(dev, PCI_D0);
+ if (dev->bus->self)
+ pcie_aspm_pm_state_change(dev->bus->self);
+
+ return 0;
}
/**
@@ -1353,6 +1324,79 @@ void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
}
/**
+ * pci_set_low_power_state - Put a PCI device into a low-power state.
+ * @dev: PCI device to handle.
+ * @state: PCI power state (D1, D2, D3hot) to put the device into.
+ *
+ * Use the device's PCI_PM_CTRL register to put it into a low-power state.
+ *
+ * RETURN VALUE:
+ * -EINVAL if the requested state is invalid.
+ * -EIO if device does not support PCI PM or its PM capabilities register has a
+ * wrong version, or device doesn't support the requested state.
+ * 0 if device already is in the requested state.
+ * 0 if device's power state has been successfully changed.
+ */
+static int pci_set_low_power_state(struct pci_dev *dev, pci_power_t state)
+{
+ u16 pmcsr;
+
+ if (!dev->pm_cap)
+ return -EIO;
+
+ /*
+ * Validate transition: We can enter D0 from any state, but if
+ * we're already in a low-power state, we can only go deeper. E.g.,
+ * we can go from D1 to D3, but we can't go directly from D3 to D1;
+ * we'd have to go from D3 to D0, then to D1.
+ */
+ if (dev->current_state <= PCI_D3cold && dev->current_state > state) {
+ pci_dbg(dev, "Invalid power transition (from %s to %s)\n",
+ pci_power_name(dev->current_state),
+ pci_power_name(state));
+ return -EINVAL;
+ }
+
+ /* Check if this device supports the desired state */
+ if ((state == PCI_D1 && !dev->d1_support)
+ || (state == PCI_D2 && !dev->d2_support))
+ return -EIO;
+
+ pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
+ if (PCI_POSSIBLE_ERROR(pmcsr)) {
+ pci_err(dev, "Unable to change power state from %s to %s, device inaccessible\n",
+ pci_power_name(dev->current_state),
+ pci_power_name(state));
+ dev->current_state = PCI_D3cold;
+ return -EIO;
+ }
+
+ pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
+ pmcsr |= state;
+
+ /* Enter specified state */
+ pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
+
+ /* Mandatory power management transition delays; see PCI PM 1.2. */
+ if (state == PCI_D3hot)
+ pci_dev_d3_sleep(dev);
+ else if (state == PCI_D2)
+ udelay(PCI_PM_D2_DELAY);
+
+ pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
+ dev->current_state = pmcsr & PCI_PM_CTRL_STATE_MASK;
+ if (dev->current_state != state)
+ pci_info_ratelimited(dev, "Refused to change power state from %s to %s\n",
+ pci_power_name(dev->current_state),
+ pci_power_name(state));
+
+ if (dev->bus->self)
+ pcie_aspm_pm_state_change(dev->bus->self);
+
+ return 0;
+}
+
+/**
* pci_set_power_state - Set the power state of a PCI device
* @dev: PCI device to handle.
* @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
@@ -1393,7 +1437,7 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
return 0;
if (state == PCI_D0)
- return pci_power_up(dev);
+ return pci_set_full_power_state(dev);
/*
* This device is quirked not to be put into D3, so don't put it in
@@ -1402,19 +1446,25 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
return 0;
- /*
- * To put device in D3cold, we put device into D3hot in native
- * way, then put device into D3cold with platform ops
- */
- error = pci_raw_set_power_state(dev, state > PCI_D3hot ?
- PCI_D3hot : state);
+ if (state == PCI_D3cold) {
+ /*
+ * To put the device in D3cold, put it into D3hot in the native
+ * way, then put it into D3cold using platform ops.
+ */
+ error = pci_set_low_power_state(dev, PCI_D3hot);
+
+ if (pci_platform_power_transition(dev, PCI_D3cold))
+ return error;
- if (pci_platform_power_transition(dev, state))
- return error;
+ /* Powering off a bridge may power off the whole hierarchy */
+ if (dev->current_state == PCI_D3cold)
+ pci_bus_set_current_state(dev->subordinate, PCI_D3cold);
+ } else {
+ error = pci_set_low_power_state(dev, state);
- /* Powering off a bridge may power off the whole hierarchy */
- if (state == PCI_D3cold)
- pci_bus_set_current_state(dev->subordinate, PCI_D3cold);
+ if (pci_platform_power_transition(dev, state))
+ return error;
+ }
return 0;
}
@@ -2718,8 +2768,6 @@ int pci_finish_runtime_suspend(struct pci_dev *dev)
if (target_state == PCI_POWER_ERROR)
return -EIO;
- dev->runtime_d3cold = target_state == PCI_D3cold;
-
/*
* There are systems (for example, Intel mobile chips since Coffee
* Lake) where the power drawn while suspended can be significantly
@@ -2737,7 +2785,6 @@ int pci_finish_runtime_suspend(struct pci_dev *dev)
if (error) {
pci_enable_wake(dev, target_state, false);
pci_restore_ptm_state(dev);
- dev->runtime_d3cold = false;
}
return error;
@@ -2920,6 +2967,8 @@ static const struct dmi_system_id bridge_d3_blacklist[] = {
DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
DMI_MATCH(DMI_BOARD_NAME, "X299 DESIGNARE EX-CF"),
},
+ },
+ {
/*
* Downstream device is not accessible after putting a root port
* into D3cold and back into D0 on Elo i2.
@@ -5113,19 +5162,19 @@ static int pci_reset_bus_function(struct pci_dev *dev, bool probe)
void pci_dev_lock(struct pci_dev *dev)
{
- pci_cfg_access_lock(dev);
/* block PM suspend, driver probe, etc. */
device_lock(&dev->dev);
+ pci_cfg_access_lock(dev);
}
EXPORT_SYMBOL_GPL(pci_dev_lock);
/* Return 1 on successful lock, 0 on contention */
int pci_dev_trylock(struct pci_dev *dev)
{
- if (pci_cfg_access_trylock(dev)) {
- if (device_trylock(&dev->dev))
+ if (device_trylock(&dev->dev)) {
+ if (pci_cfg_access_trylock(dev))
return 1;
- pci_cfg_access_unlock(dev);
+ device_unlock(&dev->dev);
}
return 0;
@@ -5134,8 +5183,8 @@ EXPORT_SYMBOL_GPL(pci_dev_trylock);
void pci_dev_unlock(struct pci_dev *dev)
{
- device_unlock(&dev->dev);
pci_cfg_access_unlock(dev);
+ device_unlock(&dev->dev);
}
EXPORT_SYMBOL_GPL(pci_dev_unlock);
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 3d60cabde1a1..e10cdec6c56e 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -627,6 +627,9 @@ struct device_node;
int of_pci_parse_bus_range(struct device_node *node, struct resource *res);
int of_get_pci_domain_nr(struct device_node *node);
int of_pci_get_max_link_speed(struct device_node *node);
+u32 of_pci_get_slot_power_limit(struct device_node *node,
+ u8 *slot_power_limit_value,
+ u8 *slot_power_limit_scale);
void pci_set_of_node(struct pci_dev *dev);
void pci_release_of_node(struct pci_dev *dev);
void pci_set_bus_of_node(struct pci_bus *bus);
@@ -653,6 +656,18 @@ of_pci_get_max_link_speed(struct device_node *node)
return -EINVAL;
}
+static inline u32
+of_pci_get_slot_power_limit(struct device_node *node,
+ u8 *slot_power_limit_value,
+ u8 *slot_power_limit_scale)
+{
+ if (slot_power_limit_value)
+ *slot_power_limit_value = 0;
+ if (slot_power_limit_scale)
+ *slot_power_limit_scale = 0;
+ return 0;
+}
+
static inline void pci_set_of_node(struct pci_dev *dev) { }
static inline void pci_release_of_node(struct pci_dev *dev) { }
static inline void pci_set_bus_of_node(struct pci_bus *bus) { }
diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
index 9fa1f97e5b27..7952e5efd6cf 100644
--- a/drivers/pci/pcie/aer.c
+++ b/drivers/pci/pcie/aer.c
@@ -101,6 +101,11 @@ struct aer_stats {
#define ERR_COR_ID(d) (d & 0xffff)
#define ERR_UNCOR_ID(d) (d >> 16)
+#define AER_ERR_STATUS_MASK (PCI_ERR_ROOT_UNCOR_RCV | \
+ PCI_ERR_ROOT_COR_RCV | \
+ PCI_ERR_ROOT_MULTI_COR_RCV | \
+ PCI_ERR_ROOT_MULTI_UNCOR_RCV)
+
static int pcie_aer_disable;
static pci_ers_result_t aer_root_reset(struct pci_dev *dev);
@@ -1196,7 +1201,7 @@ static irqreturn_t aer_irq(int irq, void *context)
struct aer_err_source e_src = {};
pci_read_config_dword(rp, aer + PCI_ERR_ROOT_STATUS, &e_src.status);
- if (!(e_src.status & (PCI_ERR_ROOT_UNCOR_RCV|PCI_ERR_ROOT_COR_RCV)))
+ if (!(e_src.status & AER_ERR_STATUS_MASK))
return IRQ_NONE;
pci_read_config_dword(rp, aer + PCI_ERR_ROOT_ERR_SRC, &e_src.id);
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index 4b8801656ffb..7f8788a970ae 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -202,6 +202,8 @@ static struct pci_driver pcie_portdriver = {
.err_handler = &pcie_portdrv_err_handler,
+ .driver_managed_dma = true,
+
.driver.pm = PCIE_PORTDRV_PM_OPS,
};
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index da829274fc66..41aeaa235132 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -12,6 +12,7 @@
* file, where their drivers can use them.
*/
+#include <linux/bitfield.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/export.h>
@@ -5895,3 +5896,49 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1533, rom_bar_overlap_defect);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1536, rom_bar_overlap_defect);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1537, rom_bar_overlap_defect);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1538, rom_bar_overlap_defect);
+
+#ifdef CONFIG_PCIEASPM
+/*
+ * Several Intel DG2 graphics devices advertise that they can only tolerate
+ * 1us latency when transitioning from L1 to L0, which may prevent ASPM L1
+ * from being enabled. But in fact these devices can tolerate unlimited
+ * latency. Override their Device Capabilities value to allow ASPM L1 to
+ * be enabled.
+ */
+static void aspm_l1_acceptable_latency(struct pci_dev *dev)
+{
+ u32 l1_lat = FIELD_GET(PCI_EXP_DEVCAP_L1, dev->devcap);
+
+ if (l1_lat < 7) {
+ dev->devcap |= FIELD_PREP(PCI_EXP_DEVCAP_L1, 7);
+ pci_info(dev, "ASPM: overriding L1 acceptable latency from %#x to 0x7\n",
+ l1_lat);
+ }
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x4f80, aspm_l1_acceptable_latency);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x4f81, aspm_l1_acceptable_latency);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x4f82, aspm_l1_acceptable_latency);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x4f83, aspm_l1_acceptable_latency);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x4f84, aspm_l1_acceptable_latency);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x4f85, aspm_l1_acceptable_latency);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x4f86, aspm_l1_acceptable_latency);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x4f87, aspm_l1_acceptable_latency);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x4f88, aspm_l1_acceptable_latency);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x5690, aspm_l1_acceptable_latency);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x5691, aspm_l1_acceptable_latency);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x5692, aspm_l1_acceptable_latency);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x5693, aspm_l1_acceptable_latency);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x5694, aspm_l1_acceptable_latency);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x5695, aspm_l1_acceptable_latency);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x56a0, aspm_l1_acceptable_latency);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x56a1, aspm_l1_acceptable_latency);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x56a2, aspm_l1_acceptable_latency);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x56a3, aspm_l1_acceptable_latency);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x56a4, aspm_l1_acceptable_latency);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x56a5, aspm_l1_acceptable_latency);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x56a6, aspm_l1_acceptable_latency);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x56b0, aspm_l1_acceptable_latency);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x56b1, aspm_l1_acceptable_latency);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x56c0, aspm_l1_acceptable_latency);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x56c1, aspm_l1_acceptable_latency);
+#endif
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index ec977f031bc2..bf495bf0f48a 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -151,7 +151,7 @@ config TCIC
config PCMCIA_ALCHEMY_DEVBOARD
tristate "Alchemy Db/Pb1xxx PCMCIA socket services"
- depends on MIPS_ALCHEMY && PCMCIA
+ depends on MIPS_DB1XXX && PCMCIA
help
Enable this driver of you want PCMCIA support on your Alchemy
Db1000, Db/Pb1100, Db/Pb1500, Db/Pb1550, Db/Pb1200, DB1300
diff --git a/drivers/pcmcia/Makefile b/drivers/pcmcia/Makefile
index c43267b18f55..c59ddde42007 100644
--- a/drivers/pcmcia/Makefile
+++ b/drivers/pcmcia/Makefile
@@ -50,18 +50,5 @@ sa1100_cs-$(CONFIG_SA1100_SIMPAD) += sa1100_simpad.o
pxa2xx-obj-$(CONFIG_MACH_MAINSTONE) += pxa2xx_mainstone.o
pxa2xx-obj-$(CONFIG_PXA_SHARPSL) += pxa2xx_sharpsl.o
-pxa2xx-obj-$(CONFIG_ARCOM_PCMCIA) += pxa2xx_viper.o
-pxa2xx-obj-$(CONFIG_TRIZEPS_PCMCIA) += pxa2xx_trizeps4.o
-pxa2xx-obj-$(CONFIG_MACH_PALMTX) += pxa2xx_palmtx.o
-pxa2xx-obj-$(CONFIG_MACH_PALMTC) += pxa2xx_palmtc.o
-pxa2xx-obj-$(CONFIG_MACH_PALMLD) += pxa2xx_palmld.o
-pxa2xx-obj-$(CONFIG_MACH_E740) += pxa2xx_e740.o
-pxa2xx-obj-$(CONFIG_MACH_VPAC270) += pxa2xx_vpac270.o
-pxa2xx-obj-$(CONFIG_MACH_BALLOON3) += pxa2xx_balloon3.o
-pxa2xx-obj-$(CONFIG_MACH_COLIBRI) += pxa2xx_colibri.o
-pxa2xx-obj-$(CONFIG_MACH_COLIBRI320) += pxa2xx_colibri.o
-pxa2xx-obj-$(CONFIG_MACH_H4700) += pxa2xx_hx4700.o
-
obj-$(CONFIG_PCMCIA_PXA2XX) += pxa2xx_base.o $(pxa2xx-obj-y)
-
obj-$(CONFIG_PCMCIA_XXS1500) += xxs1500_ss.o
diff --git a/drivers/pcmcia/bcm63xx_pcmcia.c b/drivers/pcmcia/bcm63xx_pcmcia.c
index 16f573173471..bb06311d0b5f 100644
--- a/drivers/pcmcia/bcm63xx_pcmcia.c
+++ b/drivers/pcmcia/bcm63xx_pcmcia.c
@@ -327,10 +327,11 @@ static int bcm63xx_drv_pcmcia_probe(struct platform_device *pdev)
{
struct bcm63xx_pcmcia_socket *skt;
struct pcmcia_socket *sock;
- struct resource *res, *irq_res;
+ struct resource *res;
unsigned int regmem_size = 0, iomem_size = 0;
u32 val;
int ret;
+ int irq;
skt = kzalloc(sizeof(*skt), GFP_KERNEL);
if (!skt)
@@ -342,9 +343,9 @@ static int bcm63xx_drv_pcmcia_probe(struct platform_device *pdev)
/* make sure we have all resources we need */
skt->common_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
skt->attr_res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
- irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ irq = platform_get_irq(pdev, 0);
skt->pd = pdev->dev.platform_data;
- if (!skt->common_res || !skt->attr_res || !irq_res || !skt->pd) {
+ if (!skt->common_res || !skt->attr_res || (irq < 0) || !skt->pd) {
ret = -EINVAL;
goto err;
}
@@ -380,7 +381,7 @@ static int bcm63xx_drv_pcmcia_probe(struct platform_device *pdev)
sock->dev.parent = &pdev->dev;
sock->features = SS_CAP_STATIC_MAP | SS_CAP_PCCARD;
sock->io_offset = (unsigned long)skt->io_base;
- sock->pci_irq = irq_res->start;
+ sock->pci_irq = irq;
#ifdef CONFIG_CARDBUS
sock->cb_dev = bcm63xx_cb_dev;
diff --git a/drivers/pcmcia/pxa2xx_base.c b/drivers/pcmcia/pxa2xx_base.c
index d6d2f75f8f47..0ea41f1411e5 100644
--- a/drivers/pcmcia/pxa2xx_base.c
+++ b/drivers/pcmcia/pxa2xx_base.c
@@ -23,12 +23,11 @@
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/platform_device.h>
+#include <linux/soc/pxa/cpu.h>
+#include <linux/soc/pxa/smemc.h>
-#include <mach/hardware.h>
-#include <mach/smemc.h>
#include <asm/io.h>
#include <asm/irq.h>
-#include <mach/pxa2xx-regs.h>
#include <asm/mach-types.h>
#include <pcmcia/ss.h>
@@ -113,7 +112,7 @@ static inline u_int pxa2xx_pcmcia_cmd_time(u_int mem_clk_10khz,
return (300000 * (pcmcia_mcxx_asst + 1) / mem_clk_10khz);
}
-static int pxa2xx_pcmcia_set_mcmem( int sock, int speed, int clock )
+static uint32_t pxa2xx_pcmcia_mcmem(int sock, int speed, int clock)
{
uint32_t val;
@@ -124,12 +123,10 @@ static int pxa2xx_pcmcia_set_mcmem( int sock, int speed, int clock )
| ((pxa2xx_mcxx_hold(speed, clock)
& MCXX_HOLD_MASK) << MCXX_HOLD_SHIFT);
- __raw_writel(val, MCMEM(sock));
-
- return 0;
+ return val;
}
-static int pxa2xx_pcmcia_set_mcio( int sock, int speed, int clock )
+static int pxa2xx_pcmcia_mcio(int sock, int speed, int clock)
{
uint32_t val;
@@ -140,12 +137,11 @@ static int pxa2xx_pcmcia_set_mcio( int sock, int speed, int clock )
| ((pxa2xx_mcxx_hold(speed, clock)
& MCXX_HOLD_MASK) << MCXX_HOLD_SHIFT);
- __raw_writel(val, MCIO(sock));
- return 0;
+ return val;
}
-static int pxa2xx_pcmcia_set_mcatt( int sock, int speed, int clock )
+static int pxa2xx_pcmcia_mcatt(int sock, int speed, int clock)
{
uint32_t val;
@@ -156,31 +152,26 @@ static int pxa2xx_pcmcia_set_mcatt( int sock, int speed, int clock )
| ((pxa2xx_mcxx_hold(speed, clock)
& MCXX_HOLD_MASK) << MCXX_HOLD_SHIFT);
- __raw_writel(val, MCATT(sock));
- return 0;
+ return val;
}
-static int pxa2xx_pcmcia_set_mcxx(struct soc_pcmcia_socket *skt, unsigned int clk)
+static int pxa2xx_pcmcia_set_timing(struct soc_pcmcia_socket *skt)
{
+ unsigned long clk = clk_get_rate(skt->clk) / 10000;
struct soc_pcmcia_timing timing;
int sock = skt->nr;
soc_common_pcmcia_get_timing(skt, &timing);
- pxa2xx_pcmcia_set_mcmem(sock, timing.mem, clk);
- pxa2xx_pcmcia_set_mcatt(sock, timing.attr, clk);
- pxa2xx_pcmcia_set_mcio(sock, timing.io, clk);
+ pxa_smemc_set_pcmcia_timing(sock,
+ pxa2xx_pcmcia_mcmem(sock, timing.mem, clk),
+ pxa2xx_pcmcia_mcatt(sock, timing.attr, clk),
+ pxa2xx_pcmcia_mcio(sock, timing.io, clk));
return 0;
}
-static int pxa2xx_pcmcia_set_timing(struct soc_pcmcia_socket *skt)
-{
- unsigned long clk = clk_get_rate(skt->clk);
- return pxa2xx_pcmcia_set_mcxx(skt, clk / 10000);
-}
-
#ifdef CONFIG_CPU_FREQ
static int
@@ -215,18 +206,13 @@ pxa2xx_pcmcia_frequency_change(struct soc_pcmcia_socket *skt,
void pxa2xx_configure_sockets(struct device *dev, struct pcmcia_low_level *ops)
{
- /*
- * We have at least one socket, so set MECR:CIT
- * (Card Is There)
- */
- uint32_t mecr = MECR_CIT;
+ int nr = 1;
- /* Set MECR:NOS (Number Of Sockets) */
if ((ops->first + ops->nr) > 1 ||
machine_is_viper() || machine_is_arcom_zeus())
- mecr |= MECR_NOS;
+ nr = 2;
- __raw_writel(mecr, MECR);
+ pxa_smemc_set_pcmcia_socket(nr);
}
EXPORT_SYMBOL(pxa2xx_configure_sockets);
diff --git a/drivers/pcmcia/pxa2xx_sharpsl.c b/drivers/pcmcia/pxa2xx_sharpsl.c
index 5fdd25a9e28e..b3ba858f70cb 100644
--- a/drivers/pcmcia/pxa2xx_sharpsl.c
+++ b/drivers/pcmcia/pxa2xx_sharpsl.c
@@ -15,11 +15,10 @@
#include <linux/platform_device.h>
#include <asm/mach-types.h>
-#include <mach/hardware.h>
#include <asm/irq.h>
#include <asm/hardware/scoop.h>
-#include "soc_common.h"
+#include <pcmcia/soc_common.h>
#define NO_KEEP_VS 0x0001
#define SCOOP_DEV platform_scoop_config->devs
diff --git a/drivers/pcmcia/rsrc_nonstatic.c b/drivers/pcmcia/rsrc_nonstatic.c
index 6b6c578b5f92..ad1141fddb4c 100644
--- a/drivers/pcmcia/rsrc_nonstatic.c
+++ b/drivers/pcmcia/rsrc_nonstatic.c
@@ -394,7 +394,7 @@ static int do_validate_mem(struct pcmcia_socket *s,
* do_mem_probe() checks a memory region for use by the PCMCIA subsystem.
* To do so, the area is split up into sensible parts, and then passed
* into the @validate() function. Only if @validate() and @fallback() fail,
- * the area is marked as unavaibale for use by the PCMCIA subsystem. The
+ * the area is marked as unavailable for use by the PCMCIA subsystem. The
* function returns the size of the usable memory area.
*/
static int do_mem_probe(struct pcmcia_socket *s, u_long base, u_long num,
diff --git a/drivers/pcmcia/sa1111_generic.c b/drivers/pcmcia/sa1111_generic.c
index 29fdd174bc23..bce664bbdc98 100644
--- a/drivers/pcmcia/sa1111_generic.c
+++ b/drivers/pcmcia/sa1111_generic.c
@@ -17,7 +17,6 @@
#include <pcmcia/ss.h>
-#include <mach/hardware.h>
#include <asm/hardware/sa1111.h>
#include <asm/mach-types.h>
#include <asm/irq.h>
diff --git a/drivers/pcmcia/sa1111_lubbock.c b/drivers/pcmcia/sa1111_lubbock.c
index 7feb8d61c639..f1b5160cb8fa 100644
--- a/drivers/pcmcia/sa1111_lubbock.c
+++ b/drivers/pcmcia/sa1111_lubbock.c
@@ -17,7 +17,6 @@
#include <linux/init.h>
#include <linux/delay.h>
-#include <mach/hardware.h>
#include <asm/hardware/sa1111.h>
#include <asm/mach-types.h>
diff --git a/drivers/pcmcia/soc_common.c b/drivers/pcmcia/soc_common.c
index 3a8c84bb174d..61b0c8952bb5 100644
--- a/drivers/pcmcia/soc_common.c
+++ b/drivers/pcmcia/soc_common.c
@@ -46,8 +46,7 @@
#include <linux/regulator/consumer.h>
#include <linux/spinlock.h>
#include <linux/timer.h>
-
-#include <mach/hardware.h>
+#include <linux/pci.h>
#include "soc_common.h"
@@ -784,8 +783,7 @@ void soc_pcmcia_remove_one(struct soc_pcmcia_socket *skt)
/* should not be required; violates some lowlevel drivers */
soc_common_pcmcia_config_skt(skt, &dead_socket);
- iounmap(skt->virt_io);
- skt->virt_io = NULL;
+ iounmap(PCI_IOBASE + skt->res_io_io.start);
release_resource(&skt->res_attr);
release_resource(&skt->res_mem);
release_resource(&skt->res_io);
@@ -818,11 +816,12 @@ int soc_pcmcia_add_one(struct soc_pcmcia_socket *skt)
if (ret)
goto out_err_4;
- skt->virt_io = ioremap(skt->res_io.start, 0x10000);
- if (skt->virt_io == NULL) {
- ret = -ENOMEM;
+ skt->res_io_io = (struct resource)
+ DEFINE_RES_IO_NAMED(skt->nr * 0x1000 + 0x10000, 0x1000,
+ "PCMCIA I/O");
+ ret = pci_remap_iospace(&skt->res_io_io, skt->res_io.start);
+ if (ret)
goto out_err_5;
- }
/*
* We initialize default socket timing here, because
@@ -840,7 +839,7 @@ int soc_pcmcia_add_one(struct soc_pcmcia_socket *skt)
skt->socket.resource_ops = &pccard_static_ops;
skt->socket.irq_mask = 0;
skt->socket.map_size = PAGE_SIZE;
- skt->socket.io_offset = (unsigned long)skt->virt_io;
+ skt->socket.io_offset = (unsigned long)skt->res_io_io.start;
skt->status = soc_common_pcmcia_skt_state(skt);
@@ -874,7 +873,7 @@ int soc_pcmcia_add_one(struct soc_pcmcia_socket *skt)
out_err_7:
soc_pcmcia_hw_shutdown(skt);
out_err_6:
- iounmap(skt->virt_io);
+ iounmap(PCI_IOBASE + skt->res_io_io.start);
out_err_5:
release_resource(&skt->res_attr);
out_err_4:
diff --git a/drivers/pcmcia/soc_common.h b/drivers/pcmcia/soc_common.h
index 222e81c79365..17ef05aa8afe 100644
--- a/drivers/pcmcia/soc_common.h
+++ b/drivers/pcmcia/soc_common.h
@@ -13,137 +13,19 @@
/* include the world */
#include <linux/clk.h>
#include <linux/cpufreq.h>
-#include <pcmcia/ss.h>
#include <pcmcia/cistpl.h>
-
+#include <pcmcia/soc_common.h>
struct device;
struct gpio_desc;
struct pcmcia_low_level;
struct regulator;
-struct soc_pcmcia_regulator {
- struct regulator *reg;
- bool on;
-};
-
-/*
- * This structure encapsulates per-socket state which we might need to
- * use when responding to a Card Services query of some kind.
- */
-struct soc_pcmcia_socket {
- struct pcmcia_socket socket;
-
- /*
- * Info from low level handler
- */
- unsigned int nr;
- struct clk *clk;
-
- /*
- * Core PCMCIA state
- */
- const struct pcmcia_low_level *ops;
-
- unsigned int status;
- socket_state_t cs_state;
-
- unsigned short spd_io[MAX_IO_WIN];
- unsigned short spd_mem[MAX_WIN];
- unsigned short spd_attr[MAX_WIN];
-
- struct resource res_skt;
- struct resource res_io;
- struct resource res_mem;
- struct resource res_attr;
- void __iomem *virt_io;
-
- struct {
- int gpio;
- struct gpio_desc *desc;
- unsigned int irq;
- const char *name;
- } stat[6];
-#define SOC_STAT_CD 0 /* Card detect */
-#define SOC_STAT_BVD1 1 /* BATDEAD / IOSTSCHG */
-#define SOC_STAT_BVD2 2 /* BATWARN / IOSPKR */
-#define SOC_STAT_RDY 3 /* Ready / Interrupt */
-#define SOC_STAT_VS1 4 /* Voltage sense 1 */
-#define SOC_STAT_VS2 5 /* Voltage sense 2 */
-
- struct gpio_desc *gpio_reset;
- struct gpio_desc *gpio_bus_enable;
- struct soc_pcmcia_regulator vcc;
- struct soc_pcmcia_regulator vpp;
-
- unsigned int irq_state;
-
-#ifdef CONFIG_CPU_FREQ
- struct notifier_block cpufreq_nb;
-#endif
- struct timer_list poll_timer;
- struct list_head node;
- void *driver_data;
-};
-
struct skt_dev_info {
int nskt;
struct soc_pcmcia_socket skt[];
};
-struct pcmcia_state {
- unsigned detect: 1,
- ready: 1,
- bvd1: 1,
- bvd2: 1,
- wrprot: 1,
- vs_3v: 1,
- vs_Xv: 1;
-};
-
-struct pcmcia_low_level {
- struct module *owner;
-
- /* first socket in system */
- int first;
- /* nr of sockets */
- int nr;
-
- int (*hw_init)(struct soc_pcmcia_socket *);
- void (*hw_shutdown)(struct soc_pcmcia_socket *);
-
- void (*socket_state)(struct soc_pcmcia_socket *, struct pcmcia_state *);
- int (*configure_socket)(struct soc_pcmcia_socket *, const socket_state_t *);
-
- /*
- * Enable card status IRQs on (re-)initialisation. This can
- * be called at initialisation, power management event, or
- * pcmcia event.
- */
- void (*socket_init)(struct soc_pcmcia_socket *);
-
- /*
- * Disable card status IRQs and PCMCIA bus on suspend.
- */
- void (*socket_suspend)(struct soc_pcmcia_socket *);
-
- /*
- * Hardware specific timing routines.
- * If provided, the get_timing routine overrides the SOC default.
- */
- unsigned int (*get_timing)(struct soc_pcmcia_socket *, unsigned int, unsigned int);
- int (*set_timing)(struct soc_pcmcia_socket *);
- int (*show_timing)(struct soc_pcmcia_socket *, char *);
-
-#ifdef CONFIG_CPU_FREQ
- /*
- * CPUFREQ support.
- */
- int (*frequency_change)(struct soc_pcmcia_socket *, unsigned long, struct cpufreq_freqs *);
-#endif
-};
-
-
struct soc_pcmcia_timing {
unsigned short io;
unsigned short mem;
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index 82b63e60c5a2..300b0f2b5f84 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -64,6 +64,7 @@ config USB_LGM_PHY
config PHY_CAN_TRANSCEIVER
tristate "CAN transceiver PHY"
select GENERIC_PHY
+ select MULTIPLEXER
help
This option enables support for CAN transceivers as a PHY. This
driver provides function for putting the transceivers in various
diff --git a/drivers/phy/allwinner/phy-sun6i-mipi-dphy.c b/drivers/phy/allwinner/phy-sun6i-mipi-dphy.c
index f0bc87d654d4..3900f1650851 100644
--- a/drivers/phy/allwinner/phy-sun6i-mipi-dphy.c
+++ b/drivers/phy/allwinner/phy-sun6i-mipi-dphy.c
@@ -24,6 +24,14 @@
#define SUN6I_DPHY_TX_CTL_REG 0x04
#define SUN6I_DPHY_TX_CTL_HS_TX_CLK_CONT BIT(28)
+#define SUN6I_DPHY_RX_CTL_REG 0x08
+#define SUN6I_DPHY_RX_CTL_EN_DBC BIT(31)
+#define SUN6I_DPHY_RX_CTL_RX_CLK_FORCE BIT(24)
+#define SUN6I_DPHY_RX_CTL_RX_D3_FORCE BIT(23)
+#define SUN6I_DPHY_RX_CTL_RX_D2_FORCE BIT(22)
+#define SUN6I_DPHY_RX_CTL_RX_D1_FORCE BIT(21)
+#define SUN6I_DPHY_RX_CTL_RX_D0_FORCE BIT(20)
+
#define SUN6I_DPHY_TX_TIME0_REG 0x10
#define SUN6I_DPHY_TX_TIME0_HS_TRAIL(n) (((n) & 0xff) << 24)
#define SUN6I_DPHY_TX_TIME0_HS_PREPARE(n) (((n) & 0xff) << 16)
@@ -44,12 +52,29 @@
#define SUN6I_DPHY_TX_TIME4_HS_TX_ANA1(n) (((n) & 0xff) << 8)
#define SUN6I_DPHY_TX_TIME4_HS_TX_ANA0(n) ((n) & 0xff)
+#define SUN6I_DPHY_RX_TIME0_REG 0x30
+#define SUN6I_DPHY_RX_TIME0_HS_RX_SYNC(n) (((n) & 0xff) << 24)
+#define SUN6I_DPHY_RX_TIME0_HS_RX_CLK_MISS(n) (((n) & 0xff) << 16)
+#define SUN6I_DPHY_RX_TIME0_LP_RX(n) (((n) & 0xff) << 8)
+
+#define SUN6I_DPHY_RX_TIME1_REG 0x34
+#define SUN6I_DPHY_RX_TIME1_RX_DLY(n) (((n) & 0xfff) << 20)
+#define SUN6I_DPHY_RX_TIME1_LP_RX_ULPS_WP(n) ((n) & 0xfffff)
+
+#define SUN6I_DPHY_RX_TIME2_REG 0x38
+#define SUN6I_DPHY_RX_TIME2_HS_RX_ANA1(n) (((n) & 0xff) << 8)
+#define SUN6I_DPHY_RX_TIME2_HS_RX_ANA0(n) ((n) & 0xff)
+
+#define SUN6I_DPHY_RX_TIME3_REG 0x40
+#define SUN6I_DPHY_RX_TIME3_LPRST_DLY(n) (((n) & 0xffff) << 16)
+
#define SUN6I_DPHY_ANA0_REG 0x4c
#define SUN6I_DPHY_ANA0_REG_PWS BIT(31)
#define SUN6I_DPHY_ANA0_REG_DMPC BIT(28)
#define SUN6I_DPHY_ANA0_REG_DMPD(n) (((n) & 0xf) << 24)
#define SUN6I_DPHY_ANA0_REG_SLV(n) (((n) & 7) << 12)
#define SUN6I_DPHY_ANA0_REG_DEN(n) (((n) & 0xf) << 8)
+#define SUN6I_DPHY_ANA0_REG_SFB(n) (((n) & 3) << 2)
#define SUN6I_DPHY_ANA1_REG 0x50
#define SUN6I_DPHY_ANA1_REG_VTTMODE BIT(31)
@@ -84,6 +109,11 @@
#define SUN6I_DPHY_DBG5_REG 0xf4
+enum sun6i_dphy_direction {
+ SUN6I_DPHY_DIRECTION_TX,
+ SUN6I_DPHY_DIRECTION_RX,
+};
+
struct sun6i_dphy {
struct clk *bus_clk;
struct clk *mod_clk;
@@ -92,6 +122,8 @@ struct sun6i_dphy {
struct phy *phy;
struct phy_configure_opts_mipi_dphy config;
+
+ enum sun6i_dphy_direction direction;
};
static int sun6i_dphy_init(struct phy *phy)
@@ -119,9 +151,8 @@ static int sun6i_dphy_configure(struct phy *phy, union phy_configure_opts *opts)
return 0;
}
-static int sun6i_dphy_power_on(struct phy *phy)
+static int sun6i_dphy_tx_power_on(struct sun6i_dphy *dphy)
{
- struct sun6i_dphy *dphy = phy_get_drvdata(phy);
u8 lanes_mask = GENMASK(dphy->config.lanes - 1, 0);
regmap_write(dphy->regs, SUN6I_DPHY_TX_CTL_REG,
@@ -211,12 +242,129 @@ static int sun6i_dphy_power_on(struct phy *phy)
return 0;
}
+static int sun6i_dphy_rx_power_on(struct sun6i_dphy *dphy)
+{
+ /* Physical clock rate is actually half of symbol rate with DDR. */
+ unsigned long mipi_symbol_rate = dphy->config.hs_clk_rate;
+ unsigned long dphy_clk_rate;
+ unsigned int rx_dly;
+ unsigned int lprst_dly;
+ u32 value;
+
+ dphy_clk_rate = clk_get_rate(dphy->mod_clk);
+ if (!dphy_clk_rate)
+ return -EINVAL;
+
+ /* Hardcoded timing parameters from the Allwinner BSP. */
+ regmap_write(dphy->regs, SUN6I_DPHY_RX_TIME0_REG,
+ SUN6I_DPHY_RX_TIME0_HS_RX_SYNC(255) |
+ SUN6I_DPHY_RX_TIME0_HS_RX_CLK_MISS(255) |
+ SUN6I_DPHY_RX_TIME0_LP_RX(255));
+
+ /*
+ * Formula from the Allwinner BSP, with hardcoded coefficients
+ * (probably internal divider/multiplier).
+ */
+ rx_dly = 8 * (unsigned int)(dphy_clk_rate / (mipi_symbol_rate / 8));
+
+ /*
+ * The Allwinner BSP has an alternative formula for LP_RX_ULPS_WP:
+ * lp_ulps_wp_cnt = lp_ulps_wp_ms * lp_clk / 1000
+ * but does not use it and hardcodes 255 instead.
+ */
+ regmap_write(dphy->regs, SUN6I_DPHY_RX_TIME1_REG,
+ SUN6I_DPHY_RX_TIME1_RX_DLY(rx_dly) |
+ SUN6I_DPHY_RX_TIME1_LP_RX_ULPS_WP(255));
+
+ /* HS_RX_ANA0 value is hardcoded in the Allwinner BSP. */
+ regmap_write(dphy->regs, SUN6I_DPHY_RX_TIME2_REG,
+ SUN6I_DPHY_RX_TIME2_HS_RX_ANA0(4));
+
+ /*
+ * Formula from the Allwinner BSP, with hardcoded coefficients
+ * (probably internal divider/multiplier).
+ */
+ lprst_dly = 4 * (unsigned int)(dphy_clk_rate / (mipi_symbol_rate / 2));
+
+ regmap_write(dphy->regs, SUN6I_DPHY_RX_TIME3_REG,
+ SUN6I_DPHY_RX_TIME3_LPRST_DLY(lprst_dly));
+
+ /* Analog parameters are hardcoded in the Allwinner BSP. */
+ regmap_write(dphy->regs, SUN6I_DPHY_ANA0_REG,
+ SUN6I_DPHY_ANA0_REG_PWS |
+ SUN6I_DPHY_ANA0_REG_SLV(7) |
+ SUN6I_DPHY_ANA0_REG_SFB(2));
+
+ regmap_write(dphy->regs, SUN6I_DPHY_ANA1_REG,
+ SUN6I_DPHY_ANA1_REG_SVTT(4));
+
+ regmap_write(dphy->regs, SUN6I_DPHY_ANA4_REG,
+ SUN6I_DPHY_ANA4_REG_DMPLVC |
+ SUN6I_DPHY_ANA4_REG_DMPLVD(1));
+
+ regmap_write(dphy->regs, SUN6I_DPHY_ANA2_REG,
+ SUN6I_DPHY_ANA2_REG_ENIB);
+
+ regmap_write(dphy->regs, SUN6I_DPHY_ANA3_REG,
+ SUN6I_DPHY_ANA3_EN_LDOR |
+ SUN6I_DPHY_ANA3_EN_LDOC |
+ SUN6I_DPHY_ANA3_EN_LDOD);
+
+ /*
+ * Delay comes from the Allwinner BSP, likely for internal regulator
+ * ramp-up.
+ */
+ udelay(3);
+
+ value = SUN6I_DPHY_RX_CTL_EN_DBC | SUN6I_DPHY_RX_CTL_RX_CLK_FORCE;
+
+ /*
+ * Rx data lane force-enable bits are used as regular RX enable by the
+ * Allwinner BSP.
+ */
+ if (dphy->config.lanes >= 1)
+ value |= SUN6I_DPHY_RX_CTL_RX_D0_FORCE;
+ if (dphy->config.lanes >= 2)
+ value |= SUN6I_DPHY_RX_CTL_RX_D1_FORCE;
+ if (dphy->config.lanes >= 3)
+ value |= SUN6I_DPHY_RX_CTL_RX_D2_FORCE;
+ if (dphy->config.lanes == 4)
+ value |= SUN6I_DPHY_RX_CTL_RX_D3_FORCE;
+
+ regmap_write(dphy->regs, SUN6I_DPHY_RX_CTL_REG, value);
+
+ regmap_write(dphy->regs, SUN6I_DPHY_GCTL_REG,
+ SUN6I_DPHY_GCTL_LANE_NUM(dphy->config.lanes) |
+ SUN6I_DPHY_GCTL_EN);
+
+ return 0;
+}
+
+static int sun6i_dphy_power_on(struct phy *phy)
+{
+ struct sun6i_dphy *dphy = phy_get_drvdata(phy);
+
+ switch (dphy->direction) {
+ case SUN6I_DPHY_DIRECTION_TX:
+ return sun6i_dphy_tx_power_on(dphy);
+ case SUN6I_DPHY_DIRECTION_RX:
+ return sun6i_dphy_rx_power_on(dphy);
+ default:
+ return -EINVAL;
+ }
+}
+
static int sun6i_dphy_power_off(struct phy *phy)
{
struct sun6i_dphy *dphy = phy_get_drvdata(phy);
- regmap_update_bits(dphy->regs, SUN6I_DPHY_ANA1_REG,
- SUN6I_DPHY_ANA1_REG_VTTMODE, 0);
+ regmap_write(dphy->regs, SUN6I_DPHY_GCTL_REG, 0);
+
+ regmap_write(dphy->regs, SUN6I_DPHY_ANA0_REG, 0);
+ regmap_write(dphy->regs, SUN6I_DPHY_ANA1_REG, 0);
+ regmap_write(dphy->regs, SUN6I_DPHY_ANA2_REG, 0);
+ regmap_write(dphy->regs, SUN6I_DPHY_ANA3_REG, 0);
+ regmap_write(dphy->regs, SUN6I_DPHY_ANA4_REG, 0);
return 0;
}
@@ -253,7 +401,9 @@ static int sun6i_dphy_probe(struct platform_device *pdev)
{
struct phy_provider *phy_provider;
struct sun6i_dphy *dphy;
+ const char *direction;
void __iomem *regs;
+ int ret;
dphy = devm_kzalloc(&pdev->dev, sizeof(*dphy), GFP_KERNEL);
if (!dphy)
@@ -290,6 +440,14 @@ static int sun6i_dphy_probe(struct platform_device *pdev)
return PTR_ERR(dphy->phy);
}
+ dphy->direction = SUN6I_DPHY_DIRECTION_TX;
+
+ ret = of_property_read_string(pdev->dev.of_node, "allwinner,direction",
+ &direction);
+
+ if (!ret && !strncmp(direction, "rx", 2))
+ dphy->direction = SUN6I_DPHY_DIRECTION_RX;
+
phy_set_drvdata(dphy->phy, dphy);
phy_provider = devm_of_phy_provider_register(&pdev->dev, of_phy_simple_xlate);
diff --git a/drivers/phy/cadence/phy-cadence-sierra.c b/drivers/phy/cadence/phy-cadence-sierra.c
index 6b917f7bddbe..73fb99ccd525 100644
--- a/drivers/phy/cadence/phy-cadence-sierra.c
+++ b/drivers/phy/cadence/phy-cadence-sierra.c
@@ -83,6 +83,7 @@
#define SIERRA_DFE_BIASTRIM_PREG 0x04C
#define SIERRA_DRVCTRL_ATTEN_PREG 0x06A
#define SIERRA_DRVCTRL_BOOST_PREG 0x06F
+#define SIERRA_TX_RCVDET_OVRD_PREG 0x072
#define SIERRA_CLKPATHCTRL_TMR_PREG 0x081
#define SIERRA_RX_CREQ_FLTR_A_MODE3_PREG 0x085
#define SIERRA_RX_CREQ_FLTR_A_MODE2_PREG 0x086
@@ -1684,6 +1685,66 @@ static struct cdns_sierra_vals ml_pcie_100_no_ssc_ln_vals = {
.num_regs = ARRAY_SIZE(ml_pcie_100_no_ssc_ln_regs),
};
+/*
+ * TI J721E:
+ * refclk100MHz_32b_PCIe_ln_no_ssc, multilink, using_plllc,
+ * cmn_pllcy_anaclk0_1Ghz, xcvr_pllclk_fullrt_500mhz
+ */
+static const struct cdns_reg_pairs ti_ml_pcie_100_no_ssc_ln_regs[] = {
+ {0xFC08, SIERRA_DET_STANDEC_A_PREG},
+ {0x001D, SIERRA_PSM_A3IN_TMR_PREG},
+ {0x0004, SIERRA_PSC_LN_A3_PREG},
+ {0x0004, SIERRA_PSC_LN_A4_PREG},
+ {0x0004, SIERRA_PSC_LN_IDLE_PREG},
+ {0x1555, SIERRA_DFE_BIASTRIM_PREG},
+ {0x9703, SIERRA_DRVCTRL_BOOST_PREG},
+ {0x8055, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG},
+ {0x80BB, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG},
+ {0x8351, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
+ {0x8349, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
+ {0x0002, SIERRA_CREQ_DCBIASATTEN_OVR_PREG},
+ {0x9800, SIERRA_RX_CTLE_CAL_PREG},
+ {0x5624, SIERRA_DEQ_CONCUR_CTRL2_PREG},
+ {0x000F, SIERRA_DEQ_EPIPWR_CTRL2_PREG},
+ {0x00FF, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG},
+ {0x4C4C, SIERRA_DEQ_ERRCMP_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_OFFSET_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_GAIN_CTRL_PREG},
+ {0x0041, SIERRA_DEQ_GLUT0},
+ {0x0082, SIERRA_DEQ_GLUT1},
+ {0x00C3, SIERRA_DEQ_GLUT2},
+ {0x0145, SIERRA_DEQ_GLUT3},
+ {0x0186, SIERRA_DEQ_GLUT4},
+ {0x09E7, SIERRA_DEQ_ALUT0},
+ {0x09A6, SIERRA_DEQ_ALUT1},
+ {0x0965, SIERRA_DEQ_ALUT2},
+ {0x08E3, SIERRA_DEQ_ALUT3},
+ {0x00FA, SIERRA_DEQ_DFETAP0},
+ {0x00FA, SIERRA_DEQ_DFETAP1},
+ {0x00FA, SIERRA_DEQ_DFETAP2},
+ {0x00FA, SIERRA_DEQ_DFETAP3},
+ {0x00FA, SIERRA_DEQ_DFETAP4},
+ {0x000F, SIERRA_DEQ_PRECUR_PREG},
+ {0x0280, SIERRA_DEQ_POSTCUR_PREG},
+ {0x8F00, SIERRA_DEQ_POSTCUR_DECR_PREG},
+ {0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
+ {0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG},
+ {0x0100, SIERRA_DEQ_TAU_CTRL3_PREG},
+ {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG},
+ {0x002B, SIERRA_CPI_TRIM_PREG},
+ {0x0003, SIERRA_EPI_CTRL_PREG},
+ {0x803F, SIERRA_SDFILT_H2L_A_PREG},
+ {0x0004, SIERRA_RXBUFFER_CTLECTRL_PREG},
+ {0x2010, SIERRA_RXBUFFER_RCDFECTRL_PREG},
+ {0x4432, SIERRA_RXBUFFER_DFECTRL_PREG},
+ {0x0002, SIERRA_TX_RCVDET_OVRD_PREG}
+};
+
+static struct cdns_sierra_vals ti_ml_pcie_100_no_ssc_ln_vals = {
+ .reg_pairs = ti_ml_pcie_100_no_ssc_ln_regs,
+ .num_regs = ARRAY_SIZE(ti_ml_pcie_100_no_ssc_ln_regs),
+};
+
/* refclk100MHz_32b_PCIe_cmn_pll_int_ssc, pcie_links_using_plllc, pipe_bw_3 */
static const struct cdns_reg_pairs pcie_100_int_ssc_plllc_cmn_regs[] = {
{0x000E, SIERRA_CMN_PLLLC_MODE_PREG},
@@ -1765,6 +1826,69 @@ static struct cdns_sierra_vals ml_pcie_100_int_ssc_ln_vals = {
.num_regs = ARRAY_SIZE(ml_pcie_100_int_ssc_ln_regs),
};
+/*
+ * TI J721E:
+ * refclk100MHz_32b_PCIe_ln_int_ssc, multilink, using_plllc,
+ * cmn_pllcy_anaclk0_1Ghz, xcvr_pllclk_fullrt_500mhz
+ */
+static const struct cdns_reg_pairs ti_ml_pcie_100_int_ssc_ln_regs[] = {
+ {0xFC08, SIERRA_DET_STANDEC_A_PREG},
+ {0x001D, SIERRA_PSM_A3IN_TMR_PREG},
+ {0x0004, SIERRA_PSC_LN_A3_PREG},
+ {0x0004, SIERRA_PSC_LN_A4_PREG},
+ {0x0004, SIERRA_PSC_LN_IDLE_PREG},
+ {0x1555, SIERRA_DFE_BIASTRIM_PREG},
+ {0x9703, SIERRA_DRVCTRL_BOOST_PREG},
+ {0x813E, SIERRA_CLKPATHCTRL_TMR_PREG},
+ {0x8047, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
+ {0x0002, SIERRA_CREQ_DCBIASATTEN_OVR_PREG},
+ {0x9800, SIERRA_RX_CTLE_CAL_PREG},
+ {0x033C, SIERRA_RX_CTLE_MAINTENANCE_PREG},
+ {0x44CC, SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG},
+ {0x5624, SIERRA_DEQ_CONCUR_CTRL2_PREG},
+ {0x000F, SIERRA_DEQ_EPIPWR_CTRL2_PREG},
+ {0x00FF, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG},
+ {0x4C4C, SIERRA_DEQ_ERRCMP_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_OFFSET_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_GAIN_CTRL_PREG},
+ {0x0041, SIERRA_DEQ_GLUT0},
+ {0x0082, SIERRA_DEQ_GLUT1},
+ {0x00C3, SIERRA_DEQ_GLUT2},
+ {0x0145, SIERRA_DEQ_GLUT3},
+ {0x0186, SIERRA_DEQ_GLUT4},
+ {0x09E7, SIERRA_DEQ_ALUT0},
+ {0x09A6, SIERRA_DEQ_ALUT1},
+ {0x0965, SIERRA_DEQ_ALUT2},
+ {0x08E3, SIERRA_DEQ_ALUT3},
+ {0x00FA, SIERRA_DEQ_DFETAP0},
+ {0x00FA, SIERRA_DEQ_DFETAP1},
+ {0x00FA, SIERRA_DEQ_DFETAP2},
+ {0x00FA, SIERRA_DEQ_DFETAP3},
+ {0x00FA, SIERRA_DEQ_DFETAP4},
+ {0x000F, SIERRA_DEQ_PRECUR_PREG},
+ {0x0280, SIERRA_DEQ_POSTCUR_PREG},
+ {0x8F00, SIERRA_DEQ_POSTCUR_DECR_PREG},
+ {0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
+ {0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG},
+ {0x0100, SIERRA_DEQ_TAU_CTRL3_PREG},
+ {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG},
+ {0x002B, SIERRA_CPI_TRIM_PREG},
+ {0x0003, SIERRA_EPI_CTRL_PREG},
+ {0x803F, SIERRA_SDFILT_H2L_A_PREG},
+ {0x0004, SIERRA_RXBUFFER_CTLECTRL_PREG},
+ {0x2010, SIERRA_RXBUFFER_RCDFECTRL_PREG},
+ {0x4432, SIERRA_RXBUFFER_DFECTRL_PREG},
+ {0x0002, SIERRA_TX_RCVDET_OVRD_PREG}
+};
+
+static struct cdns_sierra_vals ti_ml_pcie_100_int_ssc_ln_vals = {
+ .reg_pairs = ti_ml_pcie_100_int_ssc_ln_regs,
+ .num_regs = ARRAY_SIZE(ti_ml_pcie_100_int_ssc_ln_regs),
+};
+
/* refclk100MHz_32b_PCIe_cmn_pll_ext_ssc, pcie_links_using_plllc, pipe_bw_3 */
static const struct cdns_reg_pairs pcie_100_ext_ssc_plllc_cmn_regs[] = {
{0x2106, SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG},
@@ -1840,6 +1964,69 @@ static struct cdns_sierra_vals ml_pcie_100_ext_ssc_ln_vals = {
.num_regs = ARRAY_SIZE(ml_pcie_100_ext_ssc_ln_regs),
};
+/*
+ * TI J721E:
+ * refclk100MHz_32b_PCIe_ln_ext_ssc, multilink, using_plllc,
+ * cmn_pllcy_anaclk0_1Ghz, xcvr_pllclk_fullrt_500mhz
+ */
+static const struct cdns_reg_pairs ti_ml_pcie_100_ext_ssc_ln_regs[] = {
+ {0xFC08, SIERRA_DET_STANDEC_A_PREG},
+ {0x001D, SIERRA_PSM_A3IN_TMR_PREG},
+ {0x0004, SIERRA_PSC_LN_A3_PREG},
+ {0x0004, SIERRA_PSC_LN_A4_PREG},
+ {0x0004, SIERRA_PSC_LN_IDLE_PREG},
+ {0x1555, SIERRA_DFE_BIASTRIM_PREG},
+ {0x9703, SIERRA_DRVCTRL_BOOST_PREG},
+ {0x813E, SIERRA_CLKPATHCTRL_TMR_PREG},
+ {0x8047, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
+ {0x0002, SIERRA_CREQ_DCBIASATTEN_OVR_PREG},
+ {0x9800, SIERRA_RX_CTLE_CAL_PREG},
+ {0x033C, SIERRA_RX_CTLE_MAINTENANCE_PREG},
+ {0x44CC, SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG},
+ {0x5624, SIERRA_DEQ_CONCUR_CTRL2_PREG},
+ {0x000F, SIERRA_DEQ_EPIPWR_CTRL2_PREG},
+ {0x00FF, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG},
+ {0x4C4C, SIERRA_DEQ_ERRCMP_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_OFFSET_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_GAIN_CTRL_PREG},
+ {0x0041, SIERRA_DEQ_GLUT0},
+ {0x0082, SIERRA_DEQ_GLUT1},
+ {0x00C3, SIERRA_DEQ_GLUT2},
+ {0x0145, SIERRA_DEQ_GLUT3},
+ {0x0186, SIERRA_DEQ_GLUT4},
+ {0x09E7, SIERRA_DEQ_ALUT0},
+ {0x09A6, SIERRA_DEQ_ALUT1},
+ {0x0965, SIERRA_DEQ_ALUT2},
+ {0x08E3, SIERRA_DEQ_ALUT3},
+ {0x00FA, SIERRA_DEQ_DFETAP0},
+ {0x00FA, SIERRA_DEQ_DFETAP1},
+ {0x00FA, SIERRA_DEQ_DFETAP2},
+ {0x00FA, SIERRA_DEQ_DFETAP3},
+ {0x00FA, SIERRA_DEQ_DFETAP4},
+ {0x000F, SIERRA_DEQ_PRECUR_PREG},
+ {0x0280, SIERRA_DEQ_POSTCUR_PREG},
+ {0x8F00, SIERRA_DEQ_POSTCUR_DECR_PREG},
+ {0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
+ {0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG},
+ {0x0100, SIERRA_DEQ_TAU_CTRL3_PREG},
+ {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG},
+ {0x002B, SIERRA_CPI_TRIM_PREG},
+ {0x0003, SIERRA_EPI_CTRL_PREG},
+ {0x803F, SIERRA_SDFILT_H2L_A_PREG},
+ {0x0004, SIERRA_RXBUFFER_CTLECTRL_PREG},
+ {0x2010, SIERRA_RXBUFFER_RCDFECTRL_PREG},
+ {0x4432, SIERRA_RXBUFFER_DFECTRL_PREG},
+ {0x0002, SIERRA_TX_RCVDET_OVRD_PREG}
+};
+
+static struct cdns_sierra_vals ti_ml_pcie_100_ext_ssc_ln_vals = {
+ .reg_pairs = ti_ml_pcie_100_ext_ssc_ln_regs,
+ .num_regs = ARRAY_SIZE(ti_ml_pcie_100_ext_ssc_ln_regs),
+};
+
/* refclk100MHz_32b_PCIe_cmn_pll_no_ssc */
static const struct cdns_reg_pairs cdns_pcie_cmn_regs_no_ssc[] = {
{0x2105, SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG},
@@ -2299,9 +2486,9 @@ static const struct cdns_sierra_data cdns_ti_map_sierra = {
[INTERNAL_SSC] = &pcie_100_int_ssc_ln_vals,
},
[TYPE_QSGMII] = {
- [NO_SSC] = &ml_pcie_100_no_ssc_ln_vals,
- [EXTERNAL_SSC] = &ml_pcie_100_ext_ssc_ln_vals,
- [INTERNAL_SSC] = &ml_pcie_100_int_ssc_ln_vals,
+ [NO_SSC] = &ti_ml_pcie_100_no_ssc_ln_vals,
+ [EXTERNAL_SSC] = &ti_ml_pcie_100_ext_ssc_ln_vals,
+ [INTERNAL_SSC] = &ti_ml_pcie_100_int_ssc_ln_vals,
},
},
[TYPE_USB] = {
diff --git a/drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c b/drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c
index a95572b397ca..e625b32889bf 100644
--- a/drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c
+++ b/drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c
@@ -4,17 +4,33 @@
* Copyright 2019 Purism SPC
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
+#include <linux/firmware/imx/ipc.h>
+#include <linux/firmware/imx/svc/misc.h>
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
+#include <dt-bindings/firmware/imx/rsrc.h>
+
+/* Control and Status Registers(CSR) */
+#define PHY_CTRL 0x00
+#define CCM_MASK GENMASK(7, 5)
+#define CCM(n) FIELD_PREP(CCM_MASK, (n))
+#define CCM_1_2V 0x5
+#define CA_MASK GENMASK(4, 2)
+#define CA_3_51MA 0x4
+#define CA(n) FIELD_PREP(CA_MASK, (n))
+#define RFB BIT(1)
+#define LVDS_EN BIT(0)
/* DPHY registers */
#define DPHY_PD_DPHY 0x00
@@ -55,8 +71,15 @@
#define PWR_ON 0
#define PWR_OFF 1
+#define MIN_VCO_FREQ 640000000
+#define MAX_VCO_FREQ 1500000000
+
+#define MIN_LVDS_REFCLK_FREQ 24000000
+#define MAX_LVDS_REFCLK_FREQ 150000000
+
enum mixel_dphy_devtype {
MIXEL_IMX8MQ,
+ MIXEL_IMX8QXP,
};
struct mixel_dphy_devdata {
@@ -65,6 +88,7 @@ struct mixel_dphy_devdata {
u8 reg_rxlprp;
u8 reg_rxcdrp;
u8 reg_rxhs_settle;
+ bool is_combo; /* MIPI DPHY and LVDS PHY combo */
};
static const struct mixel_dphy_devdata mixel_dphy_devdata[] = {
@@ -74,6 +98,10 @@ static const struct mixel_dphy_devdata mixel_dphy_devdata[] = {
.reg_rxlprp = 0x40,
.reg_rxcdrp = 0x44,
.reg_rxhs_settle = 0x48,
+ .is_combo = false,
+ },
+ [MIXEL_IMX8QXP] = {
+ .is_combo = true,
},
};
@@ -95,8 +123,12 @@ struct mixel_dphy_cfg {
struct mixel_dphy_priv {
struct mixel_dphy_cfg cfg;
struct regmap *regmap;
+ struct regmap *lvds_regmap;
struct clk *phy_ref_clk;
const struct mixel_dphy_devdata *devdata;
+ struct imx_sc_ipc *ipc_handle;
+ bool is_slave;
+ int id;
};
static const struct regmap_config mixel_dphy_regmap_config = {
@@ -317,7 +349,8 @@ static int mixel_dphy_set_pll_params(struct phy *phy)
return 0;
}
-static int mixel_dphy_configure(struct phy *phy, union phy_configure_opts *opts)
+static int
+mixel_dphy_configure_mipi_dphy(struct phy *phy, union phy_configure_opts *opts)
{
struct mixel_dphy_priv *priv = phy_get_drvdata(phy);
struct mixel_dphy_cfg cfg = { 0 };
@@ -345,15 +378,126 @@ static int mixel_dphy_configure(struct phy *phy, union phy_configure_opts *opts)
return 0;
}
+static int
+mixel_dphy_configure_lvds_phy(struct phy *phy, union phy_configure_opts *opts)
+{
+ struct mixel_dphy_priv *priv = phy_get_drvdata(phy);
+ struct phy_configure_opts_lvds *lvds_opts = &opts->lvds;
+ unsigned long data_rate;
+ unsigned long fvco;
+ u32 rsc;
+ u32 co;
+ int ret;
+
+ priv->is_slave = lvds_opts->is_slave;
+
+ /* LVDS interface pins */
+ regmap_write(priv->lvds_regmap, PHY_CTRL,
+ CCM(CCM_1_2V) | CA(CA_3_51MA) | RFB);
+
+ /* enable MODE8 only for slave LVDS PHY */
+ rsc = priv->id ? IMX_SC_R_MIPI_1 : IMX_SC_R_MIPI_0;
+ ret = imx_sc_misc_set_control(priv->ipc_handle, rsc, IMX_SC_C_DUAL_MODE,
+ lvds_opts->is_slave);
+ if (ret) {
+ dev_err(&phy->dev, "Failed to configure MODE8: %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * Choose an appropriate divider ratio to meet the requirement of
+ * PLL VCO frequency range.
+ *
+ * ----- 640MHz ~ 1500MHz ------------ ---------------
+ * | VCO | ----------------> | CO divider | -> | LVDS data rate|
+ * ----- FVCO ------------ ---------------
+ * 1/2/4/8 div 7 * differential_clk_rate
+ */
+ data_rate = 7 * lvds_opts->differential_clk_rate;
+ for (co = 1; co <= 8; co *= 2) {
+ fvco = data_rate * co;
+
+ if (fvco >= MIN_VCO_FREQ)
+ break;
+ }
+
+ if (fvco < MIN_VCO_FREQ || fvco > MAX_VCO_FREQ) {
+ dev_err(&phy->dev, "VCO frequency %lu is out of range\n", fvco);
+ return -ERANGE;
+ }
+
+ /*
+ * CO is configurable, while CN and CM are not,
+ * as fixed ratios 1 and 7 are applied respectively.
+ */
+ phy_write(phy, __ffs(co), DPHY_CO);
+
+ /* set reference clock rate */
+ clk_set_rate(priv->phy_ref_clk, lvds_opts->differential_clk_rate);
+
+ return ret;
+}
+
+static int mixel_dphy_configure(struct phy *phy, union phy_configure_opts *opts)
+{
+ if (!opts) {
+ dev_err(&phy->dev, "No configuration options\n");
+ return -EINVAL;
+ }
+
+ if (phy->attrs.mode == PHY_MODE_MIPI_DPHY)
+ return mixel_dphy_configure_mipi_dphy(phy, opts);
+ else if (phy->attrs.mode == PHY_MODE_LVDS)
+ return mixel_dphy_configure_lvds_phy(phy, opts);
+
+ dev_err(&phy->dev,
+ "Failed to configure PHY with invalid PHY mode: %d\n", phy->attrs.mode);
+
+ return -EINVAL;
+}
+
+static int
+mixel_dphy_validate_lvds_phy(struct phy *phy, union phy_configure_opts *opts)
+{
+ struct phy_configure_opts_lvds *lvds_cfg = &opts->lvds;
+
+ if (lvds_cfg->bits_per_lane_and_dclk_cycle != 7) {
+ dev_err(&phy->dev, "Invalid bits per LVDS data lane: %u\n",
+ lvds_cfg->bits_per_lane_and_dclk_cycle);
+ return -EINVAL;
+ }
+
+ if (lvds_cfg->lanes != 4) {
+ dev_err(&phy->dev, "Invalid LVDS data lanes: %u\n", lvds_cfg->lanes);
+ return -EINVAL;
+ }
+
+ if (lvds_cfg->differential_clk_rate < MIN_LVDS_REFCLK_FREQ ||
+ lvds_cfg->differential_clk_rate > MAX_LVDS_REFCLK_FREQ) {
+ dev_err(&phy->dev,
+ "Invalid LVDS differential clock rate: %lu\n",
+ lvds_cfg->differential_clk_rate);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int mixel_dphy_validate(struct phy *phy, enum phy_mode mode, int submode,
union phy_configure_opts *opts)
{
- struct mixel_dphy_cfg cfg = { 0 };
+ if (mode == PHY_MODE_MIPI_DPHY) {
+ struct mixel_dphy_cfg mipi_dphy_cfg = { 0 };
- if (mode != PHY_MODE_MIPI_DPHY)
- return -EINVAL;
+ return mixel_dphy_config_from_opts(phy, &opts->mipi_dphy,
+ &mipi_dphy_cfg);
+ } else if (mode == PHY_MODE_LVDS) {
+ return mixel_dphy_validate_lvds_phy(phy, opts);
+ }
- return mixel_dphy_config_from_opts(phy, &opts->mipi_dphy, &cfg);
+ dev_err(&phy->dev,
+ "Failed to validate PHY with invalid PHY mode: %d\n", mode);
+ return -EINVAL;
}
static int mixel_dphy_init(struct phy *phy)
@@ -373,27 +517,75 @@ static int mixel_dphy_exit(struct phy *phy)
return 0;
}
-static int mixel_dphy_power_on(struct phy *phy)
+static int mixel_dphy_power_on_mipi_dphy(struct phy *phy)
{
struct mixel_dphy_priv *priv = phy_get_drvdata(phy);
u32 locked;
int ret;
- ret = clk_prepare_enable(priv->phy_ref_clk);
- if (ret < 0)
- return ret;
-
phy_write(phy, PWR_ON, DPHY_PD_PLL);
ret = regmap_read_poll_timeout(priv->regmap, DPHY_LOCK, locked,
locked, PLL_LOCK_SLEEP,
PLL_LOCK_TIMEOUT);
if (ret < 0) {
dev_err(&phy->dev, "Could not get DPHY lock (%d)!\n", ret);
- goto clock_disable;
+ return ret;
}
phy_write(phy, PWR_ON, DPHY_PD_DPHY);
return 0;
+}
+
+static int mixel_dphy_power_on_lvds_phy(struct phy *phy)
+{
+ struct mixel_dphy_priv *priv = phy_get_drvdata(phy);
+ u32 locked;
+ int ret;
+
+ regmap_update_bits(priv->lvds_regmap, PHY_CTRL, LVDS_EN, LVDS_EN);
+
+ phy_write(phy, PWR_ON, DPHY_PD_DPHY);
+ phy_write(phy, PWR_ON, DPHY_PD_PLL);
+
+ /* do not wait for slave LVDS PHY being locked */
+ if (priv->is_slave)
+ return 0;
+
+ ret = regmap_read_poll_timeout(priv->regmap, DPHY_LOCK, locked,
+ locked, PLL_LOCK_SLEEP,
+ PLL_LOCK_TIMEOUT);
+ if (ret < 0) {
+ dev_err(&phy->dev, "Could not get LVDS PHY lock (%d)!\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mixel_dphy_power_on(struct phy *phy)
+{
+ struct mixel_dphy_priv *priv = phy_get_drvdata(phy);
+ int ret;
+
+ ret = clk_prepare_enable(priv->phy_ref_clk);
+ if (ret < 0)
+ return ret;
+
+ if (phy->attrs.mode == PHY_MODE_MIPI_DPHY) {
+ ret = mixel_dphy_power_on_mipi_dphy(phy);
+ } else if (phy->attrs.mode == PHY_MODE_LVDS) {
+ ret = mixel_dphy_power_on_lvds_phy(phy);
+ } else {
+ dev_err(&phy->dev,
+ "Failed to power on PHY with invalid PHY mode: %d\n",
+ phy->attrs.mode);
+ ret = -EINVAL;
+ }
+
+ if (ret)
+ goto clock_disable;
+
+ return 0;
clock_disable:
clk_disable_unprepare(priv->phy_ref_clk);
return ret;
@@ -406,16 +598,51 @@ static int mixel_dphy_power_off(struct phy *phy)
phy_write(phy, PWR_OFF, DPHY_PD_PLL);
phy_write(phy, PWR_OFF, DPHY_PD_DPHY);
+ if (phy->attrs.mode == PHY_MODE_LVDS)
+ regmap_update_bits(priv->lvds_regmap, PHY_CTRL, LVDS_EN, 0);
+
clk_disable_unprepare(priv->phy_ref_clk);
return 0;
}
+static int mixel_dphy_set_mode(struct phy *phy, enum phy_mode mode, int submode)
+{
+ struct mixel_dphy_priv *priv = phy_get_drvdata(phy);
+ int ret;
+
+ if (priv->devdata->is_combo && mode != PHY_MODE_LVDS) {
+ dev_err(&phy->dev, "Failed to set PHY mode for combo PHY\n");
+ return -EINVAL;
+ }
+
+ if (!priv->devdata->is_combo && mode != PHY_MODE_MIPI_DPHY) {
+ dev_err(&phy->dev, "Failed to set PHY mode to MIPI DPHY\n");
+ return -EINVAL;
+ }
+
+ if (priv->devdata->is_combo) {
+ u32 rsc = priv->id ? IMX_SC_R_MIPI_1 : IMX_SC_R_MIPI_0;
+
+ ret = imx_sc_misc_set_control(priv->ipc_handle,
+ rsc, IMX_SC_C_MODE,
+ mode == PHY_MODE_LVDS);
+ if (ret) {
+ dev_err(&phy->dev,
+ "Failed to set PHY mode via SCU ipc: %d\n", ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
static const struct phy_ops mixel_dphy_phy_ops = {
.init = mixel_dphy_init,
.exit = mixel_dphy_exit,
.power_on = mixel_dphy_power_on,
.power_off = mixel_dphy_power_off,
+ .set_mode = mixel_dphy_set_mode,
.configure = mixel_dphy_configure,
.validate = mixel_dphy_validate,
.owner = THIS_MODULE,
@@ -424,6 +651,8 @@ static const struct phy_ops mixel_dphy_phy_ops = {
static const struct of_device_id mixel_dphy_of_match[] = {
{ .compatible = "fsl,imx8mq-mipi-dphy",
.data = &mixel_dphy_devdata[MIXEL_IMX8MQ] },
+ { .compatible = "fsl,imx8qxp-mipi-dphy",
+ .data = &mixel_dphy_devdata[MIXEL_IMX8QXP] },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, mixel_dphy_of_match);
@@ -436,6 +665,7 @@ static int mixel_dphy_probe(struct platform_device *pdev)
struct mixel_dphy_priv *priv;
struct phy *phy;
void __iomem *base;
+ int ret;
if (!np)
return -ENODEV;
@@ -467,6 +697,30 @@ static int mixel_dphy_probe(struct platform_device *pdev)
dev_dbg(dev, "phy_ref clock rate: %lu\n",
clk_get_rate(priv->phy_ref_clk));
+ if (priv->devdata->is_combo) {
+ priv->lvds_regmap =
+ syscon_regmap_lookup_by_phandle(np, "fsl,syscon");
+ if (IS_ERR(priv->lvds_regmap)) {
+ ret = PTR_ERR(priv->lvds_regmap);
+ dev_err_probe(dev, ret, "Failed to get LVDS regmap\n");
+ return ret;
+ }
+
+ priv->id = of_alias_get_id(np, "mipi_dphy");
+ if (priv->id < 0) {
+ dev_err(dev, "Failed to get phy node alias id: %d\n",
+ priv->id);
+ return priv->id;
+ }
+
+ ret = imx_scu_get_handle(&priv->ipc_handle);
+ if (ret) {
+ dev_err_probe(dev, ret,
+ "Failed to get SCU ipc handle\n");
+ return ret;
+ }
+ }
+
dev_set_drvdata(dev, priv);
phy = devm_phy_create(dev, np, &mixel_dphy_phy_ops);
diff --git a/drivers/phy/freescale/phy-fsl-imx8m-pcie.c b/drivers/phy/freescale/phy-fsl-imx8m-pcie.c
index f1eb03ba25d6..ad7d2edfc414 100644
--- a/drivers/phy/freescale/phy-fsl-imx8m-pcie.c
+++ b/drivers/phy/freescale/phy-fsl-imx8m-pcie.c
@@ -94,15 +94,21 @@ static int imx8_pcie_phy_init(struct phy *phy)
IMX8MM_GPR_PCIE_CMN_RST);
usleep_range(200, 500);
- if (pad_mode == IMX8_PCIE_REFCLK_PAD_INPUT) {
+ if (pad_mode == IMX8_PCIE_REFCLK_PAD_INPUT ||
+ pad_mode == IMX8_PCIE_REFCLK_PAD_UNUSED) {
/* Configure the pad as input */
val = readl(imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG061);
writel(val & ~ANA_PLL_CLK_OUT_TO_EXT_IO_EN,
imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG061);
- } else if (pad_mode == IMX8_PCIE_REFCLK_PAD_OUTPUT) {
+ } else {
/* Configure the PHY to output the refclock via pad */
writel(ANA_PLL_CLK_OUT_TO_EXT_IO_EN,
imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG061);
+ }
+
+ if (pad_mode == IMX8_PCIE_REFCLK_PAD_OUTPUT ||
+ pad_mode == IMX8_PCIE_REFCLK_PAD_UNUSED) {
+ /* Source clock from SoC internal PLL */
writel(ANA_PLL_CLK_OUT_TO_EXT_IO_SEL,
imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG062);
writel(AUX_PLL_REFCLK_SEL_SYS_PLL,
diff --git a/drivers/phy/mediatek/phy-mtk-hdmi.c b/drivers/phy/mediatek/phy-mtk-hdmi.c
index 5fb4217fb8e0..d4bd419abc3c 100644
--- a/drivers/phy/mediatek/phy-mtk-hdmi.c
+++ b/drivers/phy/mediatek/phy-mtk-hdmi.c
@@ -120,20 +120,16 @@ static int mtk_hdmi_phy_probe(struct platform_device *pdev)
return PTR_ERR(hdmi_phy->regs);
ref_clk = devm_clk_get(dev, "pll_ref");
- if (IS_ERR(ref_clk)) {
- ret = PTR_ERR(ref_clk);
- dev_err(&pdev->dev, "Failed to get PLL reference clock: %d\n",
- ret);
- return ret;
- }
+ if (IS_ERR(ref_clk))
+ return dev_err_probe(dev, PTR_ERR(ref_clk),
+ "Failed to get PLL reference clock\n");
+
ref_clk_name = __clk_get_name(ref_clk);
ret = of_property_read_string(dev->of_node, "clock-output-names",
&clk_init.name);
- if (ret < 0) {
- dev_err(dev, "Failed to read clock-output-names: %d\n", ret);
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to read clock-output-names\n");
hdmi_phy->dev = dev;
hdmi_phy->conf =
@@ -141,25 +137,19 @@ static int mtk_hdmi_phy_probe(struct platform_device *pdev)
mtk_hdmi_phy_clk_get_data(hdmi_phy, &clk_init);
hdmi_phy->pll_hw.init = &clk_init;
hdmi_phy->pll = devm_clk_register(dev, &hdmi_phy->pll_hw);
- if (IS_ERR(hdmi_phy->pll)) {
- ret = PTR_ERR(hdmi_phy->pll);
- dev_err(dev, "Failed to register PLL: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(hdmi_phy->pll))
+ return dev_err_probe(dev, PTR_ERR(hdmi_phy->pll),
+ "Failed to register PLL\n");
ret = of_property_read_u32(dev->of_node, "mediatek,ibias",
&hdmi_phy->ibias);
- if (ret < 0) {
- dev_err(&pdev->dev, "Failed to get ibias: %d\n", ret);
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to get ibias\n");
ret = of_property_read_u32(dev->of_node, "mediatek,ibias_up",
&hdmi_phy->ibias_up);
- if (ret < 0) {
- dev_err(&pdev->dev, "Failed to get ibias up: %d\n", ret);
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to get ibias_up\n");
dev_info(dev, "Using default TX DRV impedance: 4.2k/36\n");
hdmi_phy->drv_imp_clk = 0x30;
@@ -168,17 +158,15 @@ static int mtk_hdmi_phy_probe(struct platform_device *pdev)
hdmi_phy->drv_imp_d0 = 0x30;
phy = devm_phy_create(dev, NULL, mtk_hdmi_phy_dev_get_ops(hdmi_phy));
- if (IS_ERR(phy)) {
- dev_err(dev, "Failed to create HDMI PHY\n");
- return PTR_ERR(phy);
- }
+ if (IS_ERR(phy))
+ return dev_err_probe(dev, PTR_ERR(phy), "Cannot create HDMI PHY\n");
+
phy_set_drvdata(phy, hdmi_phy);
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
- if (IS_ERR(phy_provider)) {
- dev_err(dev, "Failed to register HDMI PHY\n");
- return PTR_ERR(phy_provider);
- }
+ if (IS_ERR(phy_provider))
+ return dev_err_probe(dev, PTR_ERR(phy_provider),
+ "Failed to register HDMI PHY\n");
if (hdmi_phy->conf->pll_default_off)
hdmi_phy->conf->hdmi_phy_disable_tmds(hdmi_phy);
diff --git a/drivers/phy/mediatek/phy-mtk-mipi-dsi.c b/drivers/phy/mediatek/phy-mtk-mipi-dsi.c
index 67b005d5b9e3..28506932bd91 100644
--- a/drivers/phy/mediatek/phy-mtk-mipi-dsi.c
+++ b/drivers/phy/mediatek/phy-mtk-mipi-dsi.c
@@ -154,11 +154,9 @@ static int mtk_mipi_tx_probe(struct platform_device *pdev)
return PTR_ERR(mipi_tx->regs);
ref_clk = devm_clk_get(dev, NULL);
- if (IS_ERR(ref_clk)) {
- ret = PTR_ERR(ref_clk);
- dev_err(dev, "Failed to get reference clock: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(ref_clk))
+ return dev_err_probe(dev, PTR_ERR(ref_clk),
+ "Failed to get reference clock\n");
ret = of_property_read_u32(dev->of_node, "drive-strength-microamp",
&mipi_tx->mipitx_drive);
@@ -178,27 +176,20 @@ static int mtk_mipi_tx_probe(struct platform_device *pdev)
ret = of_property_read_string(dev->of_node, "clock-output-names",
&clk_init.name);
- if (ret < 0) {
- dev_err(dev, "Failed to read clock-output-names: %d\n", ret);
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to read clock-output-names\n");
clk_init.ops = mipi_tx->driver_data->mipi_tx_clk_ops;
mipi_tx->pll_hw.init = &clk_init;
mipi_tx->pll = devm_clk_register(dev, &mipi_tx->pll_hw);
- if (IS_ERR(mipi_tx->pll)) {
- ret = PTR_ERR(mipi_tx->pll);
- dev_err(dev, "Failed to register PLL: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(mipi_tx->pll))
+ return dev_err_probe(dev, PTR_ERR(mipi_tx->pll), "Failed to register PLL\n");
phy = devm_phy_create(dev, NULL, &mtk_mipi_tx_ops);
- if (IS_ERR(phy)) {
- ret = PTR_ERR(phy);
- dev_err(dev, "Failed to create MIPI D-PHY: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(phy))
+ return dev_err_probe(dev, PTR_ERR(phy), "Failed to create MIPI D-PHY\n");
+
phy_set_drvdata(phy, mipi_tx);
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
diff --git a/drivers/phy/phy-can-transceiver.c b/drivers/phy/phy-can-transceiver.c
index 6f3fe37dee0e..95c6dbb52da7 100644
--- a/drivers/phy/phy-can-transceiver.c
+++ b/drivers/phy/phy-can-transceiver.c
@@ -10,6 +10,7 @@
#include<linux/module.h>
#include<linux/gpio.h>
#include<linux/gpio/consumer.h>
+#include <linux/mux/consumer.h>
struct can_transceiver_data {
u32 flags;
@@ -21,13 +22,22 @@ struct can_transceiver_phy {
struct phy *generic_phy;
struct gpio_desc *standby_gpio;
struct gpio_desc *enable_gpio;
+ struct mux_state *mux_state;
};
/* Power on function */
static int can_transceiver_phy_power_on(struct phy *phy)
{
struct can_transceiver_phy *can_transceiver_phy = phy_get_drvdata(phy);
-
+ int ret;
+
+ if (can_transceiver_phy->mux_state) {
+ ret = mux_state_select(can_transceiver_phy->mux_state);
+ if (ret) {
+ dev_err(&phy->dev, "Failed to select CAN mux: %d\n", ret);
+ return ret;
+ }
+ }
if (can_transceiver_phy->standby_gpio)
gpiod_set_value_cansleep(can_transceiver_phy->standby_gpio, 0);
if (can_transceiver_phy->enable_gpio)
@@ -45,6 +55,8 @@ static int can_transceiver_phy_power_off(struct phy *phy)
gpiod_set_value_cansleep(can_transceiver_phy->standby_gpio, 1);
if (can_transceiver_phy->enable_gpio)
gpiod_set_value_cansleep(can_transceiver_phy->enable_gpio, 0);
+ if (can_transceiver_phy->mux_state)
+ mux_state_deselect(can_transceiver_phy->mux_state);
return 0;
}
@@ -95,6 +107,16 @@ static int can_transceiver_phy_probe(struct platform_device *pdev)
match = of_match_node(can_transceiver_phy_ids, pdev->dev.of_node);
drvdata = match->data;
+ if (of_property_read_bool(dev->of_node, "mux-states")) {
+ struct mux_state *mux_state;
+
+ mux_state = devm_mux_state_get(dev, NULL);
+ if (IS_ERR(mux_state))
+ return dev_err_probe(&pdev->dev, PTR_ERR(mux_state),
+ "failed to get mux\n");
+ can_transceiver_phy->mux_state = mux_state;
+ }
+
phy = devm_phy_create(dev, dev->of_node,
&can_transceiver_phy_ops);
if (IS_ERR(phy)) {
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index 91e28d6ce450..d93ddf1262c5 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -229,6 +229,17 @@ void phy_pm_runtime_forbid(struct phy *phy)
}
EXPORT_SYMBOL_GPL(phy_pm_runtime_forbid);
+/**
+ * phy_init - phy internal initialization before phy operation
+ * @phy: the phy returned by phy_get()
+ *
+ * Used to allow phy's driver to perform phy internal initialization,
+ * such as PLL block powering, clock initialization or anything that's
+ * is required by the phy to perform the start of operation.
+ * Must be called before phy_power_on().
+ *
+ * Return: %0 if successful, a negative error code otherwise
+ */
int phy_init(struct phy *phy)
{
int ret;
@@ -242,6 +253,9 @@ int phy_init(struct phy *phy)
ret = 0; /* Override possible ret == -ENOTSUPP */
mutex_lock(&phy->mutex);
+ if (phy->power_count > phy->init_count)
+ dev_warn(&phy->dev, "phy_power_on was called before phy_init\n");
+
if (phy->init_count == 0 && phy->ops->init) {
ret = phy->ops->init(phy);
if (ret < 0) {
@@ -258,6 +272,14 @@ out:
}
EXPORT_SYMBOL_GPL(phy_init);
+/**
+ * phy_exit - Phy internal un-initialization
+ * @phy: the phy returned by phy_get()
+ *
+ * Must be called after phy_power_off().
+ *
+ * Return: %0 if successful, a negative error code otherwise
+ */
int phy_exit(struct phy *phy)
{
int ret;
@@ -287,6 +309,14 @@ out:
}
EXPORT_SYMBOL_GPL(phy_exit);
+/**
+ * phy_power_on - Enable the phy and enter proper operation
+ * @phy: the phy returned by phy_get()
+ *
+ * Must be called after phy_init().
+ *
+ * Return: %0 if successful, a negative error code otherwise
+ */
int phy_power_on(struct phy *phy)
{
int ret = 0;
@@ -329,6 +359,14 @@ out:
}
EXPORT_SYMBOL_GPL(phy_power_on);
+/**
+ * phy_power_off - Disable the phy.
+ * @phy: the phy returned by phy_get()
+ *
+ * Must be called before phy_exit().
+ *
+ * Return: %0 if successful, a negative error code otherwise
+ */
int phy_power_off(struct phy *phy)
{
int ret;
@@ -432,7 +470,7 @@ EXPORT_SYMBOL_GPL(phy_reset);
* runtime, which are otherwise lost after host controller reset and cannot
* be applied in phy_init() or phy_power_on().
*
- * Returns: 0 if successful, an negative error code otherwise
+ * Return: %0 if successful, a negative error code otherwise
*/
int phy_calibrate(struct phy *phy)
{
@@ -458,7 +496,7 @@ EXPORT_SYMBOL_GPL(phy_calibrate);
* on the phy. The configuration will be applied on the current phy
* mode, that can be changed using phy_set_mode().
*
- * Returns: 0 if successful, an negative error code otherwise
+ * Return: %0 if successful, a negative error code otherwise
*/
int phy_configure(struct phy *phy, union phy_configure_opts *opts)
{
@@ -492,7 +530,7 @@ EXPORT_SYMBOL_GPL(phy_configure);
* PHY, so calling it as many times as deemed fit will have no side
* effect.
*
- * Returns: 0 if successful, an negative error code otherwise
+ * Return: %0 if successful, a negative error code otherwise
*/
int phy_validate(struct phy *phy, enum phy_mode mode, int submode,
union phy_configure_opts *opts)
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.c b/drivers/phy/qualcomm/phy-qcom-qmp.c
index b144ae1f729a..c7309e981bfb 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.c
@@ -2535,6 +2535,50 @@ static const struct qmp_phy_init_tbl sdx55_qmp_pcie_pcs_misc_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V4_20_PCS_LANE1_INSIG_MX_CTRL2, 0x00),
};
+static const struct qmp_phy_init_tbl sdx65_usb3_uniphy_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_1, 0xa5),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_2, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_3, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_4, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_PI_QEC_CTRL, 0x21),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_TX, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_RX, 0x0b),
+};
+
+static const struct qmp_phy_init_tbl sdx65_usb3_uniphy_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH4, 0xdb),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH3, 0xbd),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH2, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_LOW, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH4, 0xa9),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH3, 0x7b),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH2, 0xe4),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH, 0x24),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_LOW, 0x64),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_PI_CONTROLS, 0x99),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_THRESH1, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_THRESH2, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_GAIN1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_GAIN2, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_FO_GAIN, 0x2f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_COUNT_LOW, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FO_GAIN, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_VGA_CAL_CNTRL1, 0x54),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_VGA_CAL_CNTRL2, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x47),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_CNTRL, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_DEGLITCH_CNTRL, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SO_GAIN, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_GM_CAL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_ENABLES, 0x00),
+};
+
static const struct qmp_phy_init_tbl sm8350_ufsphy_serdes_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V5_COM_SYSCLK_EN_SEL, 0xd9),
QMP_PHY_INIT_CFG(QSERDES_V5_COM_HSCLK_SEL, 0x11),
@@ -3177,7 +3221,7 @@ struct qmp_phy_combo_cfg {
* @tx2: iomapped memory space for second lane's tx (in dual lane PHYs)
* @rx2: iomapped memory space for second lane's rx (in dual lane PHYs)
* @pcs_misc: iomapped memory space for lane's pcs_misc
- * @pipe_clk: pipe lock
+ * @pipe_clk: pipe clock
* @index: lane index
* @qmp: QMP phy to which this lane belongs
* @lane_rst: lane's reset controller
@@ -4217,6 +4261,35 @@ static const struct qmp_phy_cfg sdx55_qmp_pciephy_cfg = {
.pwrdn_delay_max = 1005, /* us */
};
+static const struct qmp_phy_cfg sdx65_usb3_uniphy_cfg = {
+ .type = PHY_TYPE_USB3,
+ .nlanes = 1,
+
+ .serdes_tbl = sm8150_usb3_uniphy_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_uniphy_serdes_tbl),
+ .tx_tbl = sdx65_usb3_uniphy_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sdx65_usb3_uniphy_tx_tbl),
+ .rx_tbl = sdx65_usb3_uniphy_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sdx65_usb3_uniphy_rx_tbl),
+ .pcs_tbl = sm8350_usb3_uniphy_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sm8350_usb3_uniphy_pcs_tbl),
+ .clk_list = qmp_v4_sdx55_usbphy_clk_l,
+ .num_clks = ARRAY_SIZE(qmp_v4_sdx55_usbphy_clk_l),
+ .reset_list = msm8996_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = sm8350_usb3_uniphy_regs_layout,
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN,
+ .phy_status = PHYSTATUS,
+
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
+ .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
+};
+
static const struct qmp_phy_cfg sm8350_ufsphy_cfg = {
.type = PHY_TYPE_UFS,
.nlanes = 2,
@@ -5012,7 +5085,7 @@ static int qcom_qmp_phy_com_init(struct qmp_phy *qphy)
ret = regulator_bulk_enable(cfg->num_vregs, qmp->vregs);
if (ret) {
dev_err(qmp->dev, "failed to enable regulators, err=%d\n", ret);
- goto err_reg_enable;
+ goto err_unlock;
}
for (i = 0; i < cfg->num_resets; i++) {
@@ -5020,7 +5093,7 @@ static int qcom_qmp_phy_com_init(struct qmp_phy *qphy)
if (ret) {
dev_err(qmp->dev, "%s reset assert failed\n",
cfg->reset_list[i]);
- goto err_rst_assert;
+ goto err_disable_regulators;
}
}
@@ -5029,13 +5102,13 @@ static int qcom_qmp_phy_com_init(struct qmp_phy *qphy)
if (ret) {
dev_err(qmp->dev, "%s reset deassert failed\n",
qphy->cfg->reset_list[i]);
- goto err_rst;
+ goto err_assert_reset;
}
}
ret = clk_bulk_prepare_enable(cfg->num_clks, qmp->clks);
if (ret)
- goto err_rst;
+ goto err_assert_reset;
if (cfg->has_phy_dp_com_ctrl) {
qphy_setbits(dp_com, QPHY_V3_DP_COM_POWER_DOWN_CTRL,
@@ -5077,12 +5150,12 @@ static int qcom_qmp_phy_com_init(struct qmp_phy *qphy)
return 0;
-err_rst:
+err_assert_reset:
while (++i < cfg->num_resets)
reset_control_assert(qmp->resets[i]);
-err_rst_assert:
+err_disable_regulators:
regulator_bulk_disable(cfg->num_vregs, qmp->vregs);
-err_reg_enable:
+err_unlock:
mutex_unlock(&qmp->phy_mutex);
return ret;
@@ -5188,14 +5261,14 @@ static int qcom_qmp_phy_power_on(struct phy *phy)
if (ret) {
dev_err(qmp->dev, "lane%d reset deassert failed\n",
qphy->index);
- goto err_lane_rst;
+ return ret;
}
}
ret = clk_prepare_enable(qphy->pipe_clk);
if (ret) {
dev_err(qmp->dev, "pipe_clk enable failed err=%d\n", ret);
- goto err_clk_enable;
+ goto err_reset_lane;
}
/* Tx, Rx, and PCS configurations */
@@ -5246,7 +5319,7 @@ static int qcom_qmp_phy_power_on(struct phy *phy)
ret = reset_control_deassert(qmp->ufs_reset);
if (ret)
- goto err_lane_rst;
+ goto err_disable_pipe_clk;
qcom_qmp_phy_configure(pcs_misc, cfg->regs, cfg->pcs_misc_tbl,
cfg->pcs_misc_tbl_num);
@@ -5285,17 +5358,17 @@ static int qcom_qmp_phy_power_on(struct phy *phy)
PHY_INIT_COMPLETE_TIMEOUT);
if (ret) {
dev_err(qmp->dev, "phy initialization timed-out\n");
- goto err_pcs_ready;
+ goto err_disable_pipe_clk;
}
}
return 0;
-err_pcs_ready:
+err_disable_pipe_clk:
clk_disable_unprepare(qphy->pipe_clk);
-err_clk_enable:
+err_reset_lane:
if (cfg->has_lane_rst)
reset_control_assert(qphy->lane_rst);
-err_lane_rst:
+
return ret;
}
@@ -5514,7 +5587,7 @@ static int qcom_qmp_phy_reset_init(struct device *dev, const struct qmp_phy_cfg
struct reset_control *rst;
const char *name = cfg->reset_list[i];
- rst = devm_reset_control_get(dev, name);
+ rst = devm_reset_control_get_exclusive(dev, name);
if (IS_ERR(rst)) {
dev_err(dev, "failed to get %s reset\n", name);
return PTR_ERR(rst);
@@ -5818,6 +5891,11 @@ static const struct phy_ops qcom_qmp_pcie_ufs_ops = {
.owner = THIS_MODULE,
};
+static void qcom_qmp_reset_control_put(void *data)
+{
+ reset_control_put(data);
+}
+
static
int qcom_qmp_phy_create(struct device *dev, struct device_node *np, int id,
void __iomem *serdes, const struct qmp_phy_cfg *cfg)
@@ -5890,7 +5968,7 @@ int qcom_qmp_phy_create(struct device *dev, struct device_node *np, int id,
* all phys that don't need this.
*/
snprintf(prop_name, sizeof(prop_name), "pipe%d", id);
- qphy->pipe_clk = of_clk_get_by_name(np, prop_name);
+ qphy->pipe_clk = devm_get_clk_from_child(dev, np, prop_name);
if (IS_ERR(qphy->pipe_clk)) {
if (cfg->type == PHY_TYPE_PCIE ||
cfg->type == PHY_TYPE_USB3) {
@@ -5907,11 +5985,15 @@ int qcom_qmp_phy_create(struct device *dev, struct device_node *np, int id,
/* Get lane reset, if any */
if (cfg->has_lane_rst) {
snprintf(prop_name, sizeof(prop_name), "lane%d", id);
- qphy->lane_rst = of_reset_control_get(np, prop_name);
+ qphy->lane_rst = of_reset_control_get_exclusive(np, prop_name);
if (IS_ERR(qphy->lane_rst)) {
dev_err(dev, "failed to get lane%d reset\n", id);
return PTR_ERR(qphy->lane_rst);
}
+ ret = devm_add_action_or_reset(dev, qcom_qmp_reset_control_put,
+ qphy->lane_rst);
+ if (ret)
+ return ret;
}
if (cfg->type == PHY_TYPE_UFS || cfg->type == PHY_TYPE_PCIE)
@@ -6008,6 +6090,9 @@ static const struct of_device_id qcom_qmp_phy_of_match_table[] = {
.compatible = "qcom,sm6115-qmp-ufs-phy",
.data = &sm6115_ufsphy_cfg,
}, {
+ .compatible = "qcom,sm6350-qmp-ufs-phy",
+ .data = &sdm845_ufsphy_cfg,
+ }, {
.compatible = "qcom,sm8150-qmp-ufs-phy",
.data = &sm8150_ufsphy_cfg,
}, {
@@ -6047,6 +6132,9 @@ static const struct of_device_id qcom_qmp_phy_of_match_table[] = {
.compatible = "qcom,sdx55-qmp-usb3-uni-phy",
.data = &sdx55_usb3_uniphy_cfg,
}, {
+ .compatible = "qcom,sdx65-qmp-usb3-uni-phy",
+ .data = &sdx65_usb3_uniphy_cfg,
+ }, {
.compatible = "qcom,sm8350-qmp-usb3-phy",
.data = &sm8350_usb3phy_cfg,
}, {
diff --git a/drivers/phy/rockchip/phy-rockchip-dphy-rx0.c b/drivers/phy/rockchip/phy-rockchip-dphy-rx0.c
index 4df9476ef2a9..639452f47869 100644
--- a/drivers/phy/rockchip/phy-rockchip-dphy-rx0.c
+++ b/drivers/phy/rockchip/phy-rockchip-dphy-rx0.c
@@ -327,7 +327,6 @@ static int rk_dphy_probe(struct platform_device *pdev)
struct device_node *np = dev->of_node;
const struct rk_dphy_drv_data *drv_data;
struct phy_provider *phy_provider;
- const struct of_device_id *of_id;
struct rk_dphy *priv;
struct phy *phy;
unsigned int i;
@@ -347,11 +346,7 @@ static int rk_dphy_probe(struct platform_device *pdev)
return -ENODEV;
}
- of_id = of_match_device(rk_dphy_dt_ids, dev);
- if (!of_id)
- return -EINVAL;
-
- drv_data = of_id->data;
+ drv_data = of_device_get_match_data(dev);
priv->drv_data = drv_data;
priv->clks = devm_kcalloc(&pdev->dev, drv_data->num_clks,
sizeof(*priv->clks), GFP_KERNEL);
diff --git a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
index eca77e44a4c1..6711659f727c 100644
--- a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
+++ b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
@@ -116,11 +116,15 @@ struct rockchip_chg_det_reg {
* @bvalid_det_en: vbus valid rise detection enable register.
* @bvalid_det_st: vbus valid rise detection status register.
* @bvalid_det_clr: vbus valid rise detection clear register.
+ * @id_det_en: id detection enable register.
+ * @id_det_st: id detection state register.
+ * @id_det_clr: id detection clear register.
* @ls_det_en: linestate detection enable register.
* @ls_det_st: linestate detection state register.
* @ls_det_clr: linestate detection clear register.
* @utmi_avalid: utmi vbus avalid status register.
* @utmi_bvalid: utmi vbus bvalid status register.
+ * @utmi_id: utmi id state register.
* @utmi_ls: utmi linestate state register.
* @utmi_hstdet: utmi host disconnect register.
*/
@@ -129,11 +133,15 @@ struct rockchip_usb2phy_port_cfg {
struct usb2phy_reg bvalid_det_en;
struct usb2phy_reg bvalid_det_st;
struct usb2phy_reg bvalid_det_clr;
+ struct usb2phy_reg id_det_en;
+ struct usb2phy_reg id_det_st;
+ struct usb2phy_reg id_det_clr;
struct usb2phy_reg ls_det_en;
struct usb2phy_reg ls_det_st;
struct usb2phy_reg ls_det_clr;
struct usb2phy_reg utmi_avalid;
struct usb2phy_reg utmi_bvalid;
+ struct usb2phy_reg utmi_id;
struct usb2phy_reg utmi_ls;
struct usb2phy_reg utmi_hstdet;
};
@@ -161,6 +169,7 @@ struct rockchip_usb2phy_cfg {
* @suspended: phy suspended flag.
* @vbus_attached: otg device vbus status.
* @bvalid_irq: IRQ number assigned for vbus valid rise detection.
+ * @id_irq: IRQ number assigned for ID pin detection.
* @ls_irq: IRQ number assigned for linestate detection.
* @otg_mux_irq: IRQ number which multiplex otg-id/otg-bvalid/linestate
* irqs to one irq in otg-port.
@@ -179,6 +188,7 @@ struct rockchip_usb2phy_port {
bool suspended;
bool vbus_attached;
int bvalid_irq;
+ int id_irq;
int ls_irq;
int otg_mux_irq;
struct mutex mutex;
@@ -253,7 +263,7 @@ static inline bool property_enabled(struct regmap *base,
return false;
tmp = (orig & mask) >> reg->bitstart;
- return tmp == reg->enable;
+ return tmp != reg->disable;
}
static int rockchip_usb2phy_clk480m_prepare(struct clk_hw *hw)
@@ -419,6 +429,19 @@ static int rockchip_usb2phy_init(struct phy *phy)
if (ret)
goto out;
+ /* clear id status and enable id detect irq */
+ ret = property_enable(rphy->grf,
+ &rport->port_cfg->id_det_clr,
+ true);
+ if (ret)
+ goto out;
+
+ ret = property_enable(rphy->grf,
+ &rport->port_cfg->id_det_en,
+ true);
+ if (ret)
+ goto out;
+
schedule_delayed_work(&rport->otg_sm_work,
OTG_SCHEDULE_DELAY * 3);
} else {
@@ -905,27 +928,40 @@ static irqreturn_t rockchip_usb2phy_bvalid_irq(int irq, void *data)
if (!property_enabled(rphy->grf, &rport->port_cfg->bvalid_det_st))
return IRQ_NONE;
- mutex_lock(&rport->mutex);
-
/* clear bvalid detect irq pending status */
property_enable(rphy->grf, &rport->port_cfg->bvalid_det_clr, true);
- mutex_unlock(&rport->mutex);
-
rockchip_usb2phy_otg_sm_work(&rport->otg_sm_work.work);
return IRQ_HANDLED;
}
-static irqreturn_t rockchip_usb2phy_otg_mux_irq(int irq, void *data)
+static irqreturn_t rockchip_usb2phy_id_irq(int irq, void *data)
{
struct rockchip_usb2phy_port *rport = data;
struct rockchip_usb2phy *rphy = dev_get_drvdata(rport->phy->dev.parent);
+ bool id;
- if (property_enabled(rphy->grf, &rport->port_cfg->bvalid_det_st))
- return rockchip_usb2phy_bvalid_irq(irq, data);
- else
+ if (!property_enabled(rphy->grf, &rport->port_cfg->id_det_st))
return IRQ_NONE;
+
+ /* clear id detect irq pending status */
+ property_enable(rphy->grf, &rport->port_cfg->id_det_clr, true);
+
+ id = property_enabled(rphy->grf, &rport->port_cfg->utmi_id);
+ extcon_set_state_sync(rphy->edev, EXTCON_USB_HOST, !id);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rockchip_usb2phy_otg_mux_irq(int irq, void *data)
+{
+ irqreturn_t ret = IRQ_NONE;
+
+ ret |= rockchip_usb2phy_bvalid_irq(irq, data);
+ ret |= rockchip_usb2phy_id_irq(irq, data);
+
+ return ret;
}
static irqreturn_t rockchip_usb2phy_irq(int irq, void *data)
@@ -940,8 +976,14 @@ static irqreturn_t rockchip_usb2phy_irq(int irq, void *data)
if (!rport->phy)
continue;
- /* Handle linestate irq for both otg port and host port */
- ret = rockchip_usb2phy_linestate_irq(irq, rport);
+ switch (rport->port_id) {
+ case USB2PHY_PORT_OTG:
+ ret |= rockchip_usb2phy_otg_mux_irq(irq, rport);
+ break;
+ case USB2PHY_PORT_HOST:
+ ret |= rockchip_usb2phy_linestate_irq(irq, rport);
+ break;
+ }
}
return ret;
@@ -1015,6 +1057,25 @@ static int rockchip_usb2phy_port_irq_init(struct rockchip_usb2phy *rphy,
"failed to request otg-bvalid irq handle\n");
return ret;
}
+
+ rport->id_irq = of_irq_get_byname(child_np, "otg-id");
+ if (rport->id_irq < 0) {
+ dev_err(rphy->dev, "no otg-id irq provided\n");
+ ret = rport->id_irq;
+ return ret;
+ }
+
+ ret = devm_request_threaded_irq(rphy->dev, rport->id_irq,
+ NULL,
+ rockchip_usb2phy_id_irq,
+ IRQF_ONESHOT,
+ "rockchip_usb2phy_id",
+ rport);
+ if (ret) {
+ dev_err(rphy->dev,
+ "failed to request otg-id irq handle\n");
+ return ret;
+ }
}
break;
default:
@@ -1139,8 +1200,8 @@ static int rockchip_usb2phy_probe(struct platform_device *pdev)
else {
rphy->grf = syscon_node_to_regmap(dev->parent->of_node);
- if (IS_ERR(rphy->grf))
- return PTR_ERR(rphy->grf);
+ if (IS_ERR(rphy->grf))
+ return PTR_ERR(rphy->grf);
}
if (of_device_is_compatible(np, "rockchip,rv1108-usb2phy")) {
@@ -1289,10 +1350,14 @@ static const struct rockchip_usb2phy_cfg rk3228_phy_cfgs[] = {
.bvalid_det_en = { 0x0680, 3, 3, 0, 1 },
.bvalid_det_st = { 0x0690, 3, 3, 0, 1 },
.bvalid_det_clr = { 0x06a0, 3, 3, 0, 1 },
+ .id_det_en = { 0x0680, 6, 5, 0, 3 },
+ .id_det_st = { 0x0690, 6, 5, 0, 3 },
+ .id_det_clr = { 0x06a0, 6, 5, 0, 3 },
.ls_det_en = { 0x0680, 2, 2, 0, 1 },
.ls_det_st = { 0x0690, 2, 2, 0, 1 },
.ls_det_clr = { 0x06a0, 2, 2, 0, 1 },
.utmi_bvalid = { 0x0480, 4, 4, 0, 1 },
+ .utmi_id = { 0x0480, 1, 1, 0, 1 },
.utmi_ls = { 0x0480, 3, 2, 0, 1 },
},
[USB2PHY_PORT_HOST] = {
@@ -1345,14 +1410,18 @@ static const struct rockchip_usb2phy_cfg rk3308_phy_cfgs[] = {
.port_cfgs = {
[USB2PHY_PORT_OTG] = {
.phy_sus = { 0x0100, 8, 0, 0, 0x1d1 },
- .bvalid_det_en = { 0x3020, 2, 2, 0, 1 },
- .bvalid_det_st = { 0x3024, 2, 2, 0, 1 },
- .bvalid_det_clr = { 0x3028, 2, 2, 0, 1 },
+ .bvalid_det_en = { 0x3020, 3, 2, 0, 3 },
+ .bvalid_det_st = { 0x3024, 3, 2, 0, 3 },
+ .bvalid_det_clr = { 0x3028, 3, 2, 0, 3 },
+ .id_det_en = { 0x3020, 5, 4, 0, 3 },
+ .id_det_st = { 0x3024, 5, 4, 0, 3 },
+ .id_det_clr = { 0x3028, 5, 4, 0, 3 },
.ls_det_en = { 0x3020, 0, 0, 0, 1 },
.ls_det_st = { 0x3024, 0, 0, 0, 1 },
.ls_det_clr = { 0x3028, 0, 0, 0, 1 },
.utmi_avalid = { 0x0120, 10, 10, 0, 1 },
.utmi_bvalid = { 0x0120, 9, 9, 0, 1 },
+ .utmi_id = { 0x0120, 6, 6, 0, 1 },
.utmi_ls = { 0x0120, 5, 4, 0, 1 },
},
[USB2PHY_PORT_HOST] = {
@@ -1388,14 +1457,18 @@ static const struct rockchip_usb2phy_cfg rk3328_phy_cfgs[] = {
.port_cfgs = {
[USB2PHY_PORT_OTG] = {
.phy_sus = { 0x0100, 15, 0, 0, 0x1d1 },
- .bvalid_det_en = { 0x0110, 2, 2, 0, 1 },
- .bvalid_det_st = { 0x0114, 2, 2, 0, 1 },
- .bvalid_det_clr = { 0x0118, 2, 2, 0, 1 },
+ .bvalid_det_en = { 0x0110, 3, 2, 0, 3 },
+ .bvalid_det_st = { 0x0114, 3, 2, 0, 3 },
+ .bvalid_det_clr = { 0x0118, 3, 2, 0, 3 },
+ .id_det_en = { 0x0110, 5, 4, 0, 3 },
+ .id_det_st = { 0x0114, 5, 4, 0, 3 },
+ .id_det_clr = { 0x0118, 5, 4, 0, 3 },
.ls_det_en = { 0x0110, 0, 0, 0, 1 },
.ls_det_st = { 0x0114, 0, 0, 0, 1 },
.ls_det_clr = { 0x0118, 0, 0, 0, 1 },
.utmi_avalid = { 0x0120, 10, 10, 0, 1 },
.utmi_bvalid = { 0x0120, 9, 9, 0, 1 },
+ .utmi_id = { 0x0120, 6, 6, 0, 1 },
.utmi_ls = { 0x0120, 5, 4, 0, 1 },
},
[USB2PHY_PORT_HOST] = {
@@ -1453,8 +1526,12 @@ static const struct rockchip_usb2phy_cfg rk3399_phy_cfgs[] = {
.bvalid_det_en = { 0xe3c0, 3, 3, 0, 1 },
.bvalid_det_st = { 0xe3e0, 3, 3, 0, 1 },
.bvalid_det_clr = { 0xe3d0, 3, 3, 0, 1 },
+ .id_det_en = { 0xe3c0, 5, 4, 0, 3 },
+ .id_det_st = { 0xe3e0, 5, 4, 0, 3 },
+ .id_det_clr = { 0xe3d0, 5, 4, 0, 3 },
.utmi_avalid = { 0xe2ac, 7, 7, 0, 1 },
.utmi_bvalid = { 0xe2ac, 12, 12, 0, 1 },
+ .utmi_id = { 0xe2ac, 8, 8, 0, 1 },
},
[USB2PHY_PORT_HOST] = {
.phy_sus = { 0xe458, 1, 0, 0x2, 0x1 },
@@ -1488,8 +1565,12 @@ static const struct rockchip_usb2phy_cfg rk3399_phy_cfgs[] = {
.bvalid_det_en = { 0xe3c0, 8, 8, 0, 1 },
.bvalid_det_st = { 0xe3e0, 8, 8, 0, 1 },
.bvalid_det_clr = { 0xe3d0, 8, 8, 0, 1 },
+ .id_det_en = { 0xe3c0, 10, 9, 0, 3 },
+ .id_det_st = { 0xe3e0, 10, 9, 0, 3 },
+ .id_det_clr = { 0xe3d0, 10, 9, 0, 3 },
.utmi_avalid = { 0xe2ac, 10, 10, 0, 1 },
.utmi_bvalid = { 0xe2ac, 16, 16, 0, 1 },
+ .utmi_id = { 0xe2ac, 11, 11, 0, 1 },
},
[USB2PHY_PORT_HOST] = {
.phy_sus = { 0xe468, 1, 0, 0x2, 0x1 },
@@ -1512,11 +1593,15 @@ static const struct rockchip_usb2phy_cfg rk3568_phy_cfgs[] = {
.port_cfgs = {
[USB2PHY_PORT_OTG] = {
.phy_sus = { 0x0000, 8, 0, 0, 0x1d1 },
- .bvalid_det_en = { 0x0080, 2, 2, 0, 1 },
- .bvalid_det_st = { 0x0084, 2, 2, 0, 1 },
- .bvalid_det_clr = { 0x0088, 2, 2, 0, 1 },
+ .bvalid_det_en = { 0x0080, 3, 2, 0, 3 },
+ .bvalid_det_st = { 0x0084, 3, 2, 0, 3 },
+ .bvalid_det_clr = { 0x0088, 3, 2, 0, 3 },
+ .id_det_en = { 0x0080, 5, 4, 0, 3 },
+ .id_det_st = { 0x0084, 5, 4, 0, 3 },
+ .id_det_clr = { 0x0088, 5, 4, 0, 3 },
.utmi_avalid = { 0x00c0, 10, 10, 0, 1 },
.utmi_bvalid = { 0x00c0, 9, 9, 0, 1 },
+ .utmi_id = { 0x00c0, 6, 6, 0, 1 },
},
[USB2PHY_PORT_HOST] = {
/* Select suspend control from controller */
diff --git a/drivers/phy/rockchip/phy-rockchip-typec.c b/drivers/phy/rockchip/phy-rockchip-typec.c
index d2bbdc96a167..d76440ae10ff 100644
--- a/drivers/phy/rockchip/phy-rockchip-typec.c
+++ b/drivers/phy/rockchip/phy-rockchip-typec.c
@@ -1105,15 +1105,14 @@ static int rockchip_typec_phy_probe(struct platform_device *pdev)
struct phy_provider *phy_provider;
struct resource *res;
const struct rockchip_usb3phy_port_cfg *phy_cfgs;
- const struct of_device_id *match;
int index, ret;
tcphy = devm_kzalloc(dev, sizeof(*tcphy), GFP_KERNEL);
if (!tcphy)
return -ENOMEM;
- match = of_match_device(dev->driver->of_match_table, dev);
- if (!match || !match->data) {
+ phy_cfgs = of_device_get_match_data(dev);
+ if (!phy_cfgs) {
dev_err(dev, "phy configs are not assigned!\n");
return -EINVAL;
}
@@ -1123,7 +1122,6 @@ static int rockchip_typec_phy_probe(struct platform_device *pdev)
if (IS_ERR(tcphy->base))
return PTR_ERR(tcphy->base);
- phy_cfgs = match->data;
/* find out a proper config which can be matched with dt. */
index = 0;
while (phy_cfgs[index].reg) {
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
index 47e433e09c5c..dad453054776 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
@@ -358,6 +358,22 @@ static int bcm2835_gpio_direction_output(struct gpio_chip *chip,
return 0;
}
+static int bcm2835_of_gpio_ranges_fallback(struct gpio_chip *gc,
+ struct device_node *np)
+{
+ struct pinctrl_dev *pctldev = of_pinctrl_get(np);
+
+ of_node_put(np);
+
+ if (!pctldev)
+ return 0;
+
+ gpiochip_add_pin_range(gc, pinctrl_dev_get_devname(pctldev), 0, 0,
+ gc->ngpio);
+
+ return 0;
+}
+
static const struct gpio_chip bcm2835_gpio_chip = {
.label = MODULE_NAME,
.owner = THIS_MODULE,
@@ -372,6 +388,7 @@ static const struct gpio_chip bcm2835_gpio_chip = {
.base = -1,
.ngpio = BCM2835_NUM_GPIOS,
.can_sleep = false,
+ .of_gpio_ranges_fallback = bcm2835_of_gpio_ranges_fallback,
};
static const struct gpio_chip bcm2711_gpio_chip = {
@@ -388,6 +405,7 @@ static const struct gpio_chip bcm2711_gpio_chip = {
.base = -1,
.ngpio = BCM2711_NUM_GPIOS,
.can_sleep = false,
+ .of_gpio_ranges_fallback = bcm2835_of_gpio_ranges_fallback,
};
static void bcm2835_gpio_irq_handle_bank(struct bcm2835_pinctrl *pc,
diff --git a/drivers/pinctrl/berlin/berlin-bg4ct.c b/drivers/pinctrl/berlin/berlin-bg4ct.c
index 6a7fe929a68b..3026a3b3da2d 100644
--- a/drivers/pinctrl/berlin/berlin-bg4ct.c
+++ b/drivers/pinctrl/berlin/berlin-bg4ct.c
@@ -460,8 +460,7 @@ static int berlin4ct_pinctrl_probe(struct platform_device *pdev)
if (!rmconfig)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/pinctrl/freescale/Kconfig b/drivers/pinctrl/freescale/Kconfig
index 453dc47f4fa4..d96b1130efd3 100644
--- a/drivers/pinctrl/freescale/Kconfig
+++ b/drivers/pinctrl/freescale/Kconfig
@@ -206,3 +206,10 @@ config PINCTRL_IMX23
config PINCTRL_IMX28
bool
select PINCTRL_MXS
+
+config PINCTRL_IMXRT1170
+ bool "IMXRT1170 pinctrl driver"
+ depends on ARCH_MXC
+ select PINCTRL_IMX
+ help
+ Say Y here to enable the imxrt1170 pinctrl driver
diff --git a/drivers/pinctrl/freescale/Makefile b/drivers/pinctrl/freescale/Makefile
index 9f5d1c090338..647dff060477 100644
--- a/drivers/pinctrl/freescale/Makefile
+++ b/drivers/pinctrl/freescale/Makefile
@@ -32,3 +32,4 @@ obj-$(CONFIG_PINCTRL_IMX23) += pinctrl-imx23.o
obj-$(CONFIG_PINCTRL_IMX25) += pinctrl-imx25.o
obj-$(CONFIG_PINCTRL_IMX28) += pinctrl-imx28.o
obj-$(CONFIG_PINCTRL_IMXRT1050) += pinctrl-imxrt1050.o
+obj-$(CONFIG_PINCTRL_IMXRT1170) += pinctrl-imxrt1170.o
diff --git a/drivers/pinctrl/freescale/pinctrl-imxrt1170.c b/drivers/pinctrl/freescale/pinctrl-imxrt1170.c
new file mode 100644
index 000000000000..5da1545fde91
--- /dev/null
+++ b/drivers/pinctrl/freescale/pinctrl-imxrt1170.c
@@ -0,0 +1,349 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022
+ * Author(s): Jesse Taube <Mr.Bossman075@gmail.com>
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/platform_device.h>
+
+#include "pinctrl-imx.h"
+
+enum imxrt1170_pads {
+ IMXRT1170_PAD_RESERVE0,
+ IMXRT1170_PAD_RESERVE1,
+ IMXRT1170_PAD_RESERVE2,
+ IMXRT1170_PAD_RESERVE3,
+ IMXRT1170_PAD_EMC_B1_00,
+ IMXRT1170_PAD_EMC_B1_01,
+ IMXRT1170_PAD_EMC_B1_02,
+ IMXRT1170_PAD_EMC_B1_03,
+ IMXRT1170_PAD_EMC_B1_04,
+ IMXRT1170_PAD_EMC_B1_05,
+ IMXRT1170_PAD_EMC_B1_06,
+ IMXRT1170_PAD_EMC_B1_07,
+ IMXRT1170_PAD_EMC_B1_08,
+ IMXRT1170_PAD_EMC_B1_09,
+ IMXRT1170_PAD_EMC_B1_10,
+ IMXRT1170_PAD_EMC_B1_11,
+ IMXRT1170_PAD_EMC_B1_12,
+ IMXRT1170_PAD_EMC_B1_13,
+ IMXRT1170_PAD_EMC_B1_14,
+ IMXRT1170_PAD_EMC_B1_15,
+ IMXRT1170_PAD_EMC_B1_16,
+ IMXRT1170_PAD_EMC_B1_17,
+ IMXRT1170_PAD_EMC_B1_18,
+ IMXRT1170_PAD_EMC_B1_19,
+ IMXRT1170_PAD_EMC_B1_20,
+ IMXRT1170_PAD_EMC_B1_21,
+ IMXRT1170_PAD_EMC_B1_22,
+ IMXRT1170_PAD_EMC_B1_23,
+ IMXRT1170_PAD_EMC_B1_24,
+ IMXRT1170_PAD_EMC_B1_25,
+ IMXRT1170_PAD_EMC_B1_26,
+ IMXRT1170_PAD_EMC_B1_27,
+ IMXRT1170_PAD_EMC_B1_28,
+ IMXRT1170_PAD_EMC_B1_29,
+ IMXRT1170_PAD_EMC_B1_30,
+ IMXRT1170_PAD_EMC_B1_31,
+ IMXRT1170_PAD_EMC_B1_32,
+ IMXRT1170_PAD_EMC_B1_33,
+ IMXRT1170_PAD_EMC_B1_34,
+ IMXRT1170_PAD_EMC_B1_35,
+ IMXRT1170_PAD_EMC_B1_36,
+ IMXRT1170_PAD_EMC_B1_37,
+ IMXRT1170_PAD_EMC_B1_38,
+ IMXRT1170_PAD_EMC_B1_39,
+ IMXRT1170_PAD_EMC_B1_40,
+ IMXRT1170_PAD_EMC_B1_41,
+ IMXRT1170_PAD_EMC_B2_00,
+ IMXRT1170_PAD_EMC_B2_01,
+ IMXRT1170_PAD_EMC_B2_02,
+ IMXRT1170_PAD_EMC_B2_03,
+ IMXRT1170_PAD_EMC_B2_04,
+ IMXRT1170_PAD_EMC_B2_05,
+ IMXRT1170_PAD_EMC_B2_06,
+ IMXRT1170_PAD_EMC_B2_07,
+ IMXRT1170_PAD_EMC_B2_08,
+ IMXRT1170_PAD_EMC_B2_09,
+ IMXRT1170_PAD_EMC_B2_10,
+ IMXRT1170_PAD_EMC_B2_11,
+ IMXRT1170_PAD_EMC_B2_12,
+ IMXRT1170_PAD_EMC_B2_13,
+ IMXRT1170_PAD_EMC_B2_14,
+ IMXRT1170_PAD_EMC_B2_15,
+ IMXRT1170_PAD_EMC_B2_16,
+ IMXRT1170_PAD_EMC_B2_17,
+ IMXRT1170_PAD_EMC_B2_18,
+ IMXRT1170_PAD_EMC_B2_19,
+ IMXRT1170_PAD_EMC_B2_20,
+ IMXRT1170_PAD_AD_00,
+ IMXRT1170_PAD_AD_01,
+ IMXRT1170_PAD_AD_02,
+ IMXRT1170_PAD_AD_03,
+ IMXRT1170_PAD_AD_04,
+ IMXRT1170_PAD_AD_05,
+ IMXRT1170_PAD_AD_06,
+ IMXRT1170_PAD_AD_07,
+ IMXRT1170_PAD_AD_08,
+ IMXRT1170_PAD_AD_09,
+ IMXRT1170_PAD_AD_10,
+ IMXRT1170_PAD_AD_11,
+ IMXRT1170_PAD_AD_12,
+ IMXRT1170_PAD_AD_13,
+ IMXRT1170_PAD_AD_14,
+ IMXRT1170_PAD_AD_15,
+ IMXRT1170_PAD_AD_16,
+ IMXRT1170_PAD_AD_17,
+ IMXRT1170_PAD_AD_18,
+ IMXRT1170_PAD_AD_19,
+ IMXRT1170_PAD_AD_20,
+ IMXRT1170_PAD_AD_21,
+ IMXRT1170_PAD_AD_22,
+ IMXRT1170_PAD_AD_23,
+ IMXRT1170_PAD_AD_24,
+ IMXRT1170_PAD_AD_25,
+ IMXRT1170_PAD_AD_26,
+ IMXRT1170_PAD_AD_27,
+ IMXRT1170_PAD_AD_28,
+ IMXRT1170_PAD_AD_29,
+ IMXRT1170_PAD_AD_30,
+ IMXRT1170_PAD_AD_31,
+ IMXRT1170_PAD_AD_32,
+ IMXRT1170_PAD_AD_33,
+ IMXRT1170_PAD_AD_34,
+ IMXRT1170_PAD_AD_35,
+ IMXRT1170_PAD_SD_B1_00,
+ IMXRT1170_PAD_SD_B1_01,
+ IMXRT1170_PAD_SD_B1_02,
+ IMXRT1170_PAD_SD_B1_03,
+ IMXRT1170_PAD_SD_B1_04,
+ IMXRT1170_PAD_SD_B1_05,
+ IMXRT1170_PAD_SD_B2_00,
+ IMXRT1170_PAD_SD_B2_01,
+ IMXRT1170_PAD_SD_B2_02,
+ IMXRT1170_PAD_SD_B2_03,
+ IMXRT1170_PAD_SD_B2_04,
+ IMXRT1170_PAD_SD_B2_05,
+ IMXRT1170_PAD_SD_B2_06,
+ IMXRT1170_PAD_SD_B2_07,
+ IMXRT1170_PAD_SD_B2_08,
+ IMXRT1170_PAD_SD_B2_09,
+ IMXRT1170_PAD_SD_B2_10,
+ IMXRT1170_PAD_SD_B2_11,
+ IMXRT1170_PAD_DISP_B1_00,
+ IMXRT1170_PAD_DISP_B1_01,
+ IMXRT1170_PAD_DISP_B1_02,
+ IMXRT1170_PAD_DISP_B1_03,
+ IMXRT1170_PAD_DISP_B1_04,
+ IMXRT1170_PAD_DISP_B1_05,
+ IMXRT1170_PAD_DISP_B1_06,
+ IMXRT1170_PAD_DISP_B1_07,
+ IMXRT1170_PAD_DISP_B1_08,
+ IMXRT1170_PAD_DISP_B1_09,
+ IMXRT1170_PAD_DISP_B1_10,
+ IMXRT1170_PAD_DISP_B1_11,
+ IMXRT1170_PAD_DISP_B2_00,
+ IMXRT1170_PAD_DISP_B2_01,
+ IMXRT1170_PAD_DISP_B2_02,
+ IMXRT1170_PAD_DISP_B2_03,
+ IMXRT1170_PAD_DISP_B2_04,
+ IMXRT1170_PAD_DISP_B2_05,
+ IMXRT1170_PAD_DISP_B2_06,
+ IMXRT1170_PAD_DISP_B2_07,
+ IMXRT1170_PAD_DISP_B2_08,
+ IMXRT1170_PAD_DISP_B2_09,
+ IMXRT1170_PAD_DISP_B2_10,
+ IMXRT1170_PAD_DISP_B2_11,
+ IMXRT1170_PAD_DISP_B2_12,
+ IMXRT1170_PAD_DISP_B2_13,
+ IMXRT1170_PAD_DISP_B2_14,
+ IMXRT1170_PAD_DISP_B2_15,
+};
+
+/* Pad names for the pinmux subsystem */
+static const struct pinctrl_pin_desc imxrt1170_pinctrl_pads[] = {
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_RESERVE0),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_RESERVE1),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_RESERVE2),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_RESERVE3),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_00),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_01),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_02),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_03),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_04),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_05),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_06),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_07),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_08),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_09),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_10),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_11),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_12),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_13),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_14),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_15),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_16),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_17),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_18),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_19),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_20),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_21),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_22),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_23),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_24),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_25),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_26),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_27),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_28),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_29),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_30),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_31),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_32),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_33),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_34),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_35),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_36),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_37),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_38),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_39),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_40),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_41),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B2_00),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B2_01),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B2_02),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B2_03),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B2_04),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B2_05),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B2_06),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B2_07),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B2_08),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B2_09),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B2_10),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B2_11),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B2_12),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B2_13),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B2_14),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B2_15),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B2_16),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B2_17),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B2_18),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B2_19),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B2_20),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_00),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_01),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_02),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_03),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_04),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_05),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_06),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_07),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_08),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_09),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_10),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_11),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_12),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_13),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_14),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_15),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_16),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_17),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_18),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_19),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_20),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_21),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_22),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_23),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_24),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_25),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_26),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_27),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_28),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_29),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_30),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_31),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_32),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_33),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_34),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_35),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_SD_B1_00),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_SD_B1_01),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_SD_B1_02),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_SD_B1_03),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_SD_B1_04),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_SD_B1_05),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_SD_B2_00),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_SD_B2_01),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_SD_B2_02),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_SD_B2_03),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_SD_B2_04),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_SD_B2_05),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_SD_B2_06),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_SD_B2_07),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_SD_B2_08),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_SD_B2_09),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_SD_B2_10),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_SD_B2_11),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B1_00),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B1_01),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B1_02),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B1_03),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B1_04),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B1_05),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B1_06),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B1_07),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B1_08),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B1_09),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B1_10),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B1_11),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B2_00),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B2_01),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B2_02),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B2_03),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B2_04),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B2_05),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B2_06),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B2_07),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B2_08),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B2_09),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B2_10),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B2_11),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B2_12),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B2_13),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B2_14),
+ IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B2_15),
+};
+
+static const struct imx_pinctrl_soc_info imxrt1170_pinctrl_info = {
+ .pins = imxrt1170_pinctrl_pads,
+ .npins = ARRAY_SIZE(imxrt1170_pinctrl_pads),
+ .gpr_compatible = "fsl,imxrt1170-iomuxc-gpr",
+};
+
+static const struct of_device_id imxrt1170_pinctrl_of_match[] = {
+ { .compatible = "fsl,imxrt1170-iomuxc", .data = &imxrt1170_pinctrl_info, },
+ { /* sentinel */ }
+};
+
+static int imxrt1170_pinctrl_probe(struct platform_device *pdev)
+{
+ return imx_pinctrl_probe(pdev, &imxrt1170_pinctrl_info);
+}
+
+static struct platform_driver imxrt1170_pinctrl_driver = {
+ .driver = {
+ .name = "imxrt1170-pinctrl",
+ .of_match_table = of_match_ptr(imxrt1170_pinctrl_of_match),
+ .suppress_bind_attrs = true,
+ },
+ .probe = imxrt1170_pinctrl_probe,
+};
+
+static int __init imxrt1170_pinctrl_init(void)
+{
+ return platform_driver_register(&imxrt1170_pinctrl_driver);
+}
+arch_initcall(imxrt1170_pinctrl_init);
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index f89c9fcd4e1b..31f8f271628c 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -1350,15 +1350,15 @@ static void byt_irq_ack(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct intel_pinctrl *vg = gpiochip_get_data(gc);
- unsigned int offset = irqd_to_hwirq(d);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
void __iomem *reg;
- reg = byt_gpio_reg(vg, offset, BYT_INT_STAT_REG);
+ reg = byt_gpio_reg(vg, hwirq, BYT_INT_STAT_REG);
if (!reg)
return;
raw_spin_lock(&byt_lock);
- writel(BIT(offset % 32), reg);
+ writel(BIT(hwirq % 32), reg);
raw_spin_unlock(&byt_lock);
}
@@ -1366,20 +1366,24 @@ static void byt_irq_mask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct intel_pinctrl *vg = gpiochip_get_data(gc);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
- byt_gpio_clear_triggering(vg, irqd_to_hwirq(d));
+ byt_gpio_clear_triggering(vg, hwirq);
+ gpiochip_disable_irq(gc, hwirq);
}
static void byt_irq_unmask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct intel_pinctrl *vg = gpiochip_get_data(gc);
- unsigned int offset = irqd_to_hwirq(d);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
unsigned long flags;
void __iomem *reg;
u32 value;
- reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
+ gpiochip_enable_irq(gc, hwirq);
+
+ reg = byt_gpio_reg(vg, hwirq, BYT_CONF0_REG);
if (!reg)
return;
@@ -1412,12 +1416,13 @@ static void byt_irq_unmask(struct irq_data *d)
static int byt_irq_type(struct irq_data *d, unsigned int type)
{
struct intel_pinctrl *vg = gpiochip_get_data(irq_data_get_irq_chip_data(d));
- u32 offset = irqd_to_hwirq(d);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
u32 value;
unsigned long flags;
- void __iomem *reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
+ void __iomem *reg;
- if (!reg || offset >= vg->chip.ngpio)
+ reg = byt_gpio_reg(vg, hwirq, BYT_CONF0_REG);
+ if (!reg)
return -EINVAL;
raw_spin_lock_irqsave(&byt_lock, flags);
@@ -1447,6 +1452,16 @@ static int byt_irq_type(struct irq_data *d, unsigned int type)
return 0;
}
+static const struct irq_chip byt_gpio_irq_chip = {
+ .name = "BYT-GPIO",
+ .irq_ack = byt_irq_ack,
+ .irq_mask = byt_irq_mask,
+ .irq_unmask = byt_irq_unmask,
+ .irq_set_type = byt_irq_type,
+ .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_SET_TYPE_MASKED | IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
static void byt_gpio_irq_handler(struct irq_desc *desc)
{
struct irq_data *data = irq_desc_get_irq_data(desc);
@@ -1633,15 +1648,8 @@ static int byt_gpio_probe(struct intel_pinctrl *vg)
if (irq > 0) {
struct gpio_irq_chip *girq;
- vg->irqchip.name = "BYT-GPIO",
- vg->irqchip.irq_ack = byt_irq_ack,
- vg->irqchip.irq_mask = byt_irq_mask,
- vg->irqchip.irq_unmask = byt_irq_unmask,
- vg->irqchip.irq_set_type = byt_irq_type,
- vg->irqchip.flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_SET_TYPE_MASKED,
-
girq = &gc->irq;
- girq->chip = &vg->irqchip;
+ gpio_irq_chip_set_chip(girq, &byt_gpio_irq_chip);
girq->init_hw = byt_gpio_irq_init_hw;
girq->init_valid_mask = byt_init_irq_valid_mask;
girq->parent_handler = byt_gpio_irq_handler;
diff --git a/drivers/pinctrl/intel/pinctrl-broxton.c b/drivers/pinctrl/intel/pinctrl-broxton.c
index 2be7e414f803..fb15cd10a32f 100644
--- a/drivers/pinctrl/intel/pinctrl-broxton.c
+++ b/drivers/pinctrl/intel/pinctrl-broxton.c
@@ -1035,4 +1035,5 @@ module_exit(bxt_pinctrl_exit);
MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
MODULE_DESCRIPTION("Intel Broxton SoC pinctrl/GPIO driver");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:apollolake-pinctrl");
MODULE_ALIAS("platform:broxton-pinctrl");
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 1d5818269076..26b2a425d201 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -1242,12 +1242,12 @@ static void chv_gpio_irq_ack(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct intel_pinctrl *pctrl = gpiochip_get_data(gc);
- int pin = irqd_to_hwirq(d);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
u32 intr_line;
raw_spin_lock(&chv_lock);
- intr_line = chv_readl(pctrl, pin, CHV_PADCTRL0);
+ intr_line = chv_readl(pctrl, hwirq, CHV_PADCTRL0);
intr_line &= CHV_PADCTRL0_INTSEL_MASK;
intr_line >>= CHV_PADCTRL0_INTSEL_SHIFT;
chv_pctrl_writel(pctrl, CHV_INTSTAT, BIT(intr_line));
@@ -1255,17 +1255,15 @@ static void chv_gpio_irq_ack(struct irq_data *d)
raw_spin_unlock(&chv_lock);
}
-static void chv_gpio_irq_mask_unmask(struct irq_data *d, bool mask)
+static void chv_gpio_irq_mask_unmask(struct gpio_chip *gc, irq_hw_number_t hwirq, bool mask)
{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct intel_pinctrl *pctrl = gpiochip_get_data(gc);
- int pin = irqd_to_hwirq(d);
u32 value, intr_line;
unsigned long flags;
raw_spin_lock_irqsave(&chv_lock, flags);
- intr_line = chv_readl(pctrl, pin, CHV_PADCTRL0);
+ intr_line = chv_readl(pctrl, hwirq, CHV_PADCTRL0);
intr_line &= CHV_PADCTRL0_INTSEL_MASK;
intr_line >>= CHV_PADCTRL0_INTSEL_SHIFT;
@@ -1281,12 +1279,20 @@ static void chv_gpio_irq_mask_unmask(struct irq_data *d, bool mask)
static void chv_gpio_irq_mask(struct irq_data *d)
{
- chv_gpio_irq_mask_unmask(d, true);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+
+ chv_gpio_irq_mask_unmask(gc, hwirq, true);
+ gpiochip_disable_irq(gc, hwirq);
}
static void chv_gpio_irq_unmask(struct irq_data *d)
{
- chv_gpio_irq_mask_unmask(d, false);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+
+ gpiochip_enable_irq(gc, hwirq);
+ chv_gpio_irq_mask_unmask(gc, hwirq, false);
}
static unsigned chv_gpio_irq_startup(struct irq_data *d)
@@ -1306,17 +1312,17 @@ static unsigned chv_gpio_irq_startup(struct irq_data *d)
struct intel_pinctrl *pctrl = gpiochip_get_data(gc);
struct device *dev = pctrl->dev;
struct intel_community_context *cctx = &pctrl->context.communities[0];
- unsigned int pin = irqd_to_hwirq(d);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
irq_flow_handler_t handler;
unsigned long flags;
u32 intsel, value;
raw_spin_lock_irqsave(&chv_lock, flags);
- intsel = chv_readl(pctrl, pin, CHV_PADCTRL0);
+ intsel = chv_readl(pctrl, hwirq, CHV_PADCTRL0);
intsel &= CHV_PADCTRL0_INTSEL_MASK;
intsel >>= CHV_PADCTRL0_INTSEL_SHIFT;
- value = chv_readl(pctrl, pin, CHV_PADCTRL1);
+ value = chv_readl(pctrl, hwirq, CHV_PADCTRL1);
if (value & CHV_PADCTRL1_INTWAKECFG_LEVEL)
handler = handle_level_irq;
else
@@ -1324,9 +1330,9 @@ static unsigned chv_gpio_irq_startup(struct irq_data *d)
if (cctx->intr_lines[intsel] == CHV_INVALID_HWIRQ) {
irq_set_handler_locked(d, handler);
- dev_dbg(dev, "using interrupt line %u for IRQ_TYPE_NONE on pin %u\n",
- intsel, pin);
- cctx->intr_lines[intsel] = pin;
+ dev_dbg(dev, "using interrupt line %u for IRQ_TYPE_NONE on pin %lu\n",
+ intsel, hwirq);
+ cctx->intr_lines[intsel] = hwirq;
}
raw_spin_unlock_irqrestore(&chv_lock, flags);
}
@@ -1392,14 +1398,14 @@ static int chv_gpio_irq_type(struct irq_data *d, unsigned int type)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct intel_pinctrl *pctrl = gpiochip_get_data(gc);
- unsigned int pin = irqd_to_hwirq(d);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
unsigned long flags;
u32 value;
int ret;
raw_spin_lock_irqsave(&chv_lock, flags);
- ret = chv_gpio_set_intr_line(pctrl, pin);
+ ret = chv_gpio_set_intr_line(pctrl, hwirq);
if (ret)
goto out_unlock;
@@ -1416,8 +1422,8 @@ static int chv_gpio_irq_type(struct irq_data *d, unsigned int type)
* 2. If the pin cfg is not locked in BIOS:
* Driver programs the IntWakeCfg bits and save the mapping.
*/
- if (!chv_pad_locked(pctrl, pin)) {
- value = chv_readl(pctrl, pin, CHV_PADCTRL1);
+ if (!chv_pad_locked(pctrl, hwirq)) {
+ value = chv_readl(pctrl, hwirq, CHV_PADCTRL1);
value &= ~CHV_PADCTRL1_INTWAKECFG_MASK;
value &= ~CHV_PADCTRL1_INVRXTX_MASK;
@@ -1434,7 +1440,7 @@ static int chv_gpio_irq_type(struct irq_data *d, unsigned int type)
value |= CHV_PADCTRL1_INVRXTX_RXDATA;
}
- chv_writel(pctrl, pin, CHV_PADCTRL1, value);
+ chv_writel(pctrl, hwirq, CHV_PADCTRL1, value);
}
if (type & IRQ_TYPE_EDGE_BOTH)
@@ -1448,6 +1454,17 @@ out_unlock:
return ret;
}
+static const struct irq_chip chv_gpio_irq_chip = {
+ .name = "chv-gpio",
+ .irq_startup = chv_gpio_irq_startup,
+ .irq_ack = chv_gpio_irq_ack,
+ .irq_mask = chv_gpio_irq_mask,
+ .irq_unmask = chv_gpio_irq_unmask,
+ .irq_set_type = chv_gpio_irq_type,
+ .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
static void chv_gpio_irq_handler(struct irq_desc *desc)
{
struct gpio_chip *gc = irq_desc_get_handler_data(desc);
@@ -1611,15 +1628,8 @@ static int chv_gpio_probe(struct intel_pinctrl *pctrl, int irq)
chip->base = -1;
pctrl->irq = irq;
- pctrl->irqchip.name = "chv-gpio";
- pctrl->irqchip.irq_startup = chv_gpio_irq_startup;
- pctrl->irqchip.irq_ack = chv_gpio_irq_ack;
- pctrl->irqchip.irq_mask = chv_gpio_irq_mask;
- pctrl->irqchip.irq_unmask = chv_gpio_irq_unmask;
- pctrl->irqchip.irq_set_type = chv_gpio_irq_type;
- pctrl->irqchip.flags = IRQCHIP_SKIP_SET_WAKE;
-
- chip->irq.chip = &pctrl->irqchip;
+
+ gpio_irq_chip_set_chip(&chip->irq, &chv_gpio_irq_chip);
chip->irq.init_hw = chv_gpio_irq_init_hw;
chip->irq.parent_handler = chv_gpio_irq_handler;
chip->irq.num_parents = 1;
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index 826d494f3cc6..ffc045f7bf00 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -858,6 +858,9 @@ static const struct pinctrl_desc intel_pinctrl_desc = {
* When coming through gpiolib irqchip, the GPIO offset is not
* automatically translated to pinctrl pin number. This function can be
* used to find out the corresponding pinctrl pin.
+ *
+ * Return: a pin number and pointers to the community and pad group, which
+ * the pin belongs to, or negative error code if translation can't be done.
*/
static int intel_gpio_to_pin(struct intel_pinctrl *pctrl, unsigned int offset,
const struct intel_community **community,
@@ -899,6 +902,8 @@ static int intel_gpio_to_pin(struct intel_pinctrl *pctrl, unsigned int offset,
* @pin: pin number
*
* Translate the pin number of pinctrl to GPIO offset
+ *
+ * Return: a GPIO offset, or negative error code if translation can't be done.
*/
static __maybe_unused int intel_pin_to_gpio(struct intel_pinctrl *pctrl, int pin)
{
@@ -1039,15 +1044,14 @@ static void intel_gpio_irq_ack(struct irq_data *d)
}
}
-static void intel_gpio_irq_mask_unmask(struct irq_data *d, bool mask)
+static void intel_gpio_irq_mask_unmask(struct gpio_chip *gc, irq_hw_number_t hwirq, bool mask)
{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct intel_pinctrl *pctrl = gpiochip_get_data(gc);
const struct intel_community *community;
const struct intel_padgroup *padgrp;
int pin;
- pin = intel_gpio_to_pin(pctrl, irqd_to_hwirq(d), &community, &padgrp);
+ pin = intel_gpio_to_pin(pctrl, hwirq, &community, &padgrp);
if (pin >= 0) {
unsigned int gpp, gpp_offset;
unsigned long flags;
@@ -1077,12 +1081,20 @@ static void intel_gpio_irq_mask_unmask(struct irq_data *d, bool mask)
static void intel_gpio_irq_mask(struct irq_data *d)
{
- intel_gpio_irq_mask_unmask(d, true);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+
+ intel_gpio_irq_mask_unmask(gc, hwirq, true);
+ gpiochip_disable_irq(gc, hwirq);
}
static void intel_gpio_irq_unmask(struct irq_data *d)
{
- intel_gpio_irq_mask_unmask(d, false);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+
+ gpiochip_enable_irq(gc, hwirq);
+ intel_gpio_irq_mask_unmask(gc, hwirq, false);
}
static int intel_gpio_irq_type(struct irq_data *d, unsigned int type)
@@ -1157,6 +1169,17 @@ static int intel_gpio_irq_wake(struct irq_data *d, unsigned int on)
return 0;
}
+static const struct irq_chip intel_gpio_irq_chip = {
+ .name = "intel-gpio",
+ .irq_ack = intel_gpio_irq_ack,
+ .irq_mask = intel_gpio_irq_mask,
+ .irq_unmask = intel_gpio_irq_unmask,
+ .irq_set_type = intel_gpio_irq_type,
+ .irq_set_wake = intel_gpio_irq_wake,
+ .flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
static int intel_gpio_community_irq_handler(struct intel_pinctrl *pctrl,
const struct intel_community *community)
{
@@ -1319,15 +1342,6 @@ static int intel_gpio_probe(struct intel_pinctrl *pctrl, int irq)
pctrl->chip.add_pin_ranges = intel_gpio_add_pin_ranges;
pctrl->irq = irq;
- /* Setup IRQ chip */
- pctrl->irqchip.name = dev_name(pctrl->dev);
- pctrl->irqchip.irq_ack = intel_gpio_irq_ack;
- pctrl->irqchip.irq_mask = intel_gpio_irq_mask;
- pctrl->irqchip.irq_unmask = intel_gpio_irq_unmask;
- pctrl->irqchip.irq_set_type = intel_gpio_irq_type;
- pctrl->irqchip.irq_set_wake = intel_gpio_irq_wake;
- pctrl->irqchip.flags = IRQCHIP_MASK_ON_SUSPEND;
-
/*
* On some platforms several GPIO controllers share the same interrupt
* line.
@@ -1340,8 +1354,9 @@ static int intel_gpio_probe(struct intel_pinctrl *pctrl, int irq)
return ret;
}
+ /* Setup IRQ chip */
girq = &pctrl->chip.irq;
- girq->chip = &pctrl->irqchip;
+ gpio_irq_chip_set_chip(girq, &intel_gpio_irq_chip);
/* This will let us handle the IRQ in the driver */
girq->parent_handler = NULL;
girq->num_parents = 0;
diff --git a/drivers/pinctrl/intel/pinctrl-intel.h b/drivers/pinctrl/intel/pinctrl-intel.h
index c4fef03b663f..710341bb67cc 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.h
+++ b/drivers/pinctrl/intel/pinctrl-intel.h
@@ -223,7 +223,6 @@ struct intel_pinctrl_context {
* @pctldesc: Pin controller description
* @pctldev: Pointer to the pin controller device
* @chip: GPIO chip in this pin controller
- * @irqchip: IRQ chip in this pin controller
* @soc: SoC/PCH specific pin configuration data
* @communities: All communities in this pin controller
* @ncommunities: Number of communities in this pin controller
@@ -236,7 +235,6 @@ struct intel_pinctrl {
struct pinctrl_desc pctldesc;
struct pinctrl_dev *pctldev;
struct gpio_chip chip;
- struct irq_chip irqchip;
const struct intel_pinctrl_soc_data *soc;
struct intel_community *communities;
size_t ncommunities;
diff --git a/drivers/pinctrl/intel/pinctrl-lynxpoint.c b/drivers/pinctrl/intel/pinctrl-lynxpoint.c
index 561fa322b0b4..4fb39eb30902 100644
--- a/drivers/pinctrl/intel/pinctrl-lynxpoint.c
+++ b/drivers/pinctrl/intel/pinctrl-lynxpoint.c
@@ -663,7 +663,7 @@ static void lp_irq_ack(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct intel_pinctrl *lg = gpiochip_get_data(gc);
- u32 hwirq = irqd_to_hwirq(d);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
void __iomem *reg = lp_gpio_reg(&lg->chip, hwirq, LP_INT_STAT);
unsigned long flags;
@@ -684,10 +684,12 @@ static void lp_irq_enable(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct intel_pinctrl *lg = gpiochip_get_data(gc);
- u32 hwirq = irqd_to_hwirq(d);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
void __iomem *reg = lp_gpio_reg(&lg->chip, hwirq, LP_INT_ENABLE);
unsigned long flags;
+ gpiochip_enable_irq(gc, hwirq);
+
raw_spin_lock_irqsave(&lg->lock, flags);
iowrite32(ioread32(reg) | BIT(hwirq % 32), reg);
raw_spin_unlock_irqrestore(&lg->lock, flags);
@@ -697,30 +699,33 @@ static void lp_irq_disable(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct intel_pinctrl *lg = gpiochip_get_data(gc);
- u32 hwirq = irqd_to_hwirq(d);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
void __iomem *reg = lp_gpio_reg(&lg->chip, hwirq, LP_INT_ENABLE);
unsigned long flags;
raw_spin_lock_irqsave(&lg->lock, flags);
iowrite32(ioread32(reg) & ~BIT(hwirq % 32), reg);
raw_spin_unlock_irqrestore(&lg->lock, flags);
+
+ gpiochip_disable_irq(gc, hwirq);
}
static int lp_irq_set_type(struct irq_data *d, unsigned int type)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct intel_pinctrl *lg = gpiochip_get_data(gc);
- u32 hwirq = irqd_to_hwirq(d);
- void __iomem *reg = lp_gpio_reg(&lg->chip, hwirq, LP_CONFIG1);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
unsigned long flags;
+ void __iomem *reg;
u32 value;
- if (hwirq >= lg->chip.ngpio)
+ reg = lp_gpio_reg(&lg->chip, hwirq, LP_CONFIG1);
+ if (!reg)
return -EINVAL;
/* Fail if BIOS reserved pin for ACPI use */
if (lp_gpio_acpi_use(lg, hwirq)) {
- dev_err(lg->dev, "pin %u can't be used as IRQ\n", hwirq);
+ dev_err(lg->dev, "pin %lu can't be used as IRQ\n", hwirq);
return -EBUSY;
}
@@ -755,7 +760,7 @@ static int lp_irq_set_type(struct irq_data *d, unsigned int type)
return 0;
}
-static struct irq_chip lp_irqchip = {
+static const struct irq_chip lp_irqchip = {
.name = "LP-GPIO",
.irq_ack = lp_irq_ack,
.irq_mask = lp_irq_mask,
@@ -763,7 +768,8 @@ static struct irq_chip lp_irqchip = {
.irq_enable = lp_irq_enable,
.irq_disable = lp_irq_disable,
.irq_set_type = lp_irq_set_type,
- .flags = IRQCHIP_SKIP_SET_WAKE,
+ .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static int lp_gpio_irq_init_hw(struct gpio_chip *chip)
@@ -884,7 +890,7 @@ static int lp_gpio_probe(struct platform_device *pdev)
struct gpio_irq_chip *girq;
girq = &gc->irq;
- girq->chip = &lp_irqchip;
+ gpio_irq_chip_set_chip(girq, &lp_irqchip);
girq->init_hw = lp_gpio_irq_init_hw;
girq->parent_handler = lp_gpio_irq_handler;
girq->num_parents = 1;
diff --git a/drivers/pinctrl/mediatek/Kconfig b/drivers/pinctrl/mediatek/Kconfig
index 40accd110c3d..1600a2c18eee 100644
--- a/drivers/pinctrl/mediatek/Kconfig
+++ b/drivers/pinctrl/mediatek/Kconfig
@@ -106,6 +106,13 @@ config PINCTRL_MT6779
In MTK platform, we support virtual gpio and use it to
map specific eint which doesn't have real gpio pin.
+config PINCTRL_MT6795
+ bool "Mediatek MT6795 pin control"
+ depends on OF
+ depends on ARM64 || COMPILE_TEST
+ default ARM64 && ARCH_MEDIATEK
+ select PINCTRL_MTK_PARIS
+
config PINCTRL_MT6797
bool "Mediatek MT6797 pin control"
depends on OF
@@ -166,6 +173,7 @@ config PINCTRL_MT8195
bool "Mediatek MT8195 pin control"
depends on OF
depends on ARM64 || COMPILE_TEST
+ default ARM64 && ARCH_MEDIATEK
select PINCTRL_MTK_PARIS
config PINCTRL_MT8365
diff --git a/drivers/pinctrl/mediatek/Makefile b/drivers/pinctrl/mediatek/Makefile
index 29018d6ad0de..c8f226ae36c9 100644
--- a/drivers/pinctrl/mediatek/Makefile
+++ b/drivers/pinctrl/mediatek/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_PINCTRL_MT8135) += pinctrl-mt8135.o
obj-$(CONFIG_PINCTRL_MT8127) += pinctrl-mt8127.o
obj-$(CONFIG_PINCTRL_MT6765) += pinctrl-mt6765.o
obj-$(CONFIG_PINCTRL_MT6779) += pinctrl-mt6779.o
+obj-$(CONFIG_PINCTRL_MT6795) += pinctrl-mt6795.o
obj-$(CONFIG_PINCTRL_MT6797) += pinctrl-mt6797.o
obj-$(CONFIG_PINCTRL_MT7622) += pinctrl-mt7622.o
obj-$(CONFIG_PINCTRL_MT7623) += pinctrl-mt7623.o
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt6795.c b/drivers/pinctrl/mediatek/pinctrl-mt6795.c
new file mode 100644
index 000000000000..f90152261a0f
--- /dev/null
+++ b/drivers/pinctrl/mediatek/pinctrl-mt6795.c
@@ -0,0 +1,623 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 Collabora Ltd.
+ * Author: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#include "pinctrl-mtk-mt6795.h"
+#include "pinctrl-paris.h"
+
+#define PIN_FIELD15(_s_pin, _e_pin, _s_addr, _x_addrs, _s_bit, _x_bits) \
+ PIN_FIELD_CALC(_s_pin, _e_pin, 0, _s_addr, _x_addrs, _s_bit, \
+ _x_bits, 15, 0)
+
+#define PIN_FIELD16(_s_pin, _e_pin, _s_addr, _x_addrs, _s_bit, _x_bits) \
+ PIN_FIELD_CALC(_s_pin, _e_pin, 0, _s_addr, _x_addrs, _s_bit, \
+ _x_bits, 16, 0)
+
+#define PINS_FIELD16(_s_pin, _e_pin, _s_addr, _x_addrs, _s_bit, _x_bits)\
+ PIN_FIELD_CALC(_s_pin, _e_pin, 0, _s_addr, _x_addrs, _s_bit, \
+ _x_bits, 16, 1)
+
+static const struct mtk_pin_field_calc mt6795_pin_dir_range[] = {
+ PIN_FIELD16(0, 196, 0x0, 0x10, 0, 1),
+};
+
+static const struct mtk_pin_field_calc mt6795_pin_pullen_range[] = {
+ PIN_FIELD16(0, 196, 0x100, 0x10, 0, 1),
+};
+
+static const struct mtk_pin_field_calc mt6795_pin_pullsel_range[] = {
+ PIN_FIELD16(0, 196, 0x200, 0x10, 0, 1),
+};
+
+static const struct mtk_pin_field_calc mt6795_pin_do_range[] = {
+ PIN_FIELD16(0, 196, 0x400, 0x10, 0, 1),
+};
+
+static const struct mtk_pin_field_calc mt6795_pin_di_range[] = {
+ PIN_FIELD16(0, 196, 0x500, 0x10, 0, 1),
+};
+
+static const struct mtk_pin_field_calc mt6795_pin_mode_range[] = {
+ PIN_FIELD15(0, 196, 0x600, 0x10, 0, 3),
+};
+
+static const struct mtk_pin_field_calc mt6795_pin_ies_range[] = {
+ PINS_FIELD16(0, 4, 0x900, 0x10, 1, 1),
+ PINS_FIELD16(5, 9, 0x900, 0x10, 2, 1),
+ PINS_FIELD16(10, 15, 0x900, 0x10, 10, 1),
+ PINS_FIELD16(16, 16, 0x900, 0x10, 2, 1),
+ PINS_FIELD16(17, 19, 0x910, 0x10, 3, 1),
+ PINS_FIELD16(20, 22, 0x910, 0x10, 4, 1),
+ PINS_FIELD16(23, 26, 0xce0, 0x10, 14, 1),
+ PINS_FIELD16(27, 27, 0xcc0, 0x10, 14, 1),
+ PINS_FIELD16(28, 28, 0xcd0, 0x10, 14, 1),
+ PINS_FIELD16(29, 32, 0x900, 0x10, 3, 1),
+ PINS_FIELD16(33, 33, 0x900, 0x10, 4, 1),
+ PINS_FIELD16(34, 36, 0x900, 0x10, 5, 1),
+ PINS_FIELD16(37, 38, 0x900, 0x10, 6, 1),
+ PINS_FIELD16(39, 39, 0x900, 0x10, 7, 1),
+ PINS_FIELD16(40, 40, 0x900, 0x10, 8, 1),
+ PINS_FIELD16(41, 42, 0x900, 0x10, 9, 1),
+ PINS_FIELD16(43, 46, 0x900, 0x10, 11, 1),
+ PINS_FIELD16(47, 61, 0x920, 0x10, 3, 1),
+ PINS_FIELD16(62, 66, 0x920, 0x10, 4, 1),
+ PINS_FIELD16(67, 67, 0x920, 0x10, 3, 1),
+ PINS_FIELD16(68, 72, 0x920, 0x10, 5, 1),
+ PINS_FIELD16(73, 77, 0x920, 0x10, 6, 1),
+ PINS_FIELD16(78, 91, 0x920, 0x10, 7, 1),
+ PINS_FIELD16(92, 92, 0x900, 0x10, 13, 1),
+ PINS_FIELD16(93, 95, 0x900, 0x10, 14, 1),
+ PINS_FIELD16(96, 99, 0x900, 0x10, 15, 1),
+ PINS_FIELD16(100, 103, 0xca0, 0x10, 14, 1),
+ PINS_FIELD16(104, 104, 0xc80, 0x10, 14, 1),
+ PINS_FIELD16(105, 105, 0xc90, 0x10, 14, 1),
+ PINS_FIELD16(106, 107, 0x910, 0x10, 0, 1),
+ PINS_FIELD16(108, 112, 0x910, 0x10, 1, 1),
+ PINS_FIELD16(113, 116, 0x910, 0x10, 2, 1),
+ PINS_FIELD16(117, 118, 0x910, 0x10, 5, 1),
+ PINS_FIELD16(119, 124, 0x910, 0x10, 6, 1),
+ PINS_FIELD16(125, 126, 0x910, 0x10, 7, 1),
+ PINS_FIELD16(129, 129, 0x910, 0x10, 8, 1),
+ PINS_FIELD16(130, 131, 0x910, 0x10, 9, 1),
+ PINS_FIELD16(132, 135, 0x910, 0x10, 8, 1),
+ PINS_FIELD16(136, 137, 0x910, 0x10, 7, 1),
+ PINS_FIELD16(154, 161, 0xc20, 0x10, 14, 1),
+ PINS_FIELD16(162, 162, 0xc10, 0x10, 14, 1),
+ PINS_FIELD16(163, 163, 0xc00, 0x10, 14, 1),
+ PINS_FIELD16(164, 164, 0xd10, 0x10, 14, 1),
+ PINS_FIELD16(165, 165, 0xd00, 0x10, 14, 1),
+ PINS_FIELD16(166, 169, 0x910, 0x10, 14, 1),
+ PINS_FIELD16(176, 179, 0x910, 0x10, 15, 1),
+ PINS_FIELD16(180, 180, 0x920, 0x10, 0, 1),
+ PINS_FIELD16(181, 184, 0x920, 0x10, 1, 1),
+ PINS_FIELD16(185, 191, 0x920, 0x10, 2, 1),
+ PINS_FIELD16(192, 192, 0x920, 0x10, 8, 1),
+ PINS_FIELD16(193, 194, 0x920, 0x10, 9, 1),
+ PINS_FIELD16(195, 196, 0x920, 0x10, 8, 1),
+};
+
+static const struct mtk_pin_field_calc mt6795_pin_smt_range[] = {
+ PINS_FIELD16(0, 4, 0x930, 0x10, 1, 1),
+ PINS_FIELD16(5, 9, 0x930, 0x10, 2, 1),
+ PINS_FIELD16(10, 15, 0x930, 0x10, 10, 1),
+ PINS_FIELD16(16, 16, 0x930, 0x10, 2, 1),
+ PINS_FIELD16(17, 19, 0x940, 0x10, 3, 1),
+ PINS_FIELD16(20, 22, 0x940, 0x10, 4, 1),
+ PINS_FIELD16(23, 26, 0xce0, 0x10, 13, 1),
+ PINS_FIELD16(27, 27, 0xcc0, 0x10, 13, 1),
+ PINS_FIELD16(28, 28, 0xcd0, 0x10, 13, 1),
+ PINS_FIELD16(29, 32, 0x930, 0x10, 3, 1),
+ PINS_FIELD16(33, 33, 0x930, 0x10, 4, 1),
+ PINS_FIELD16(34, 36, 0x930, 0x10, 5, 1),
+ PINS_FIELD16(37, 38, 0x930, 0x10, 6, 1),
+ PINS_FIELD16(39, 39, 0x930, 0x10, 7, 1),
+ PINS_FIELD16(40, 40, 0x930, 0x10, 8, 1),
+ PINS_FIELD16(41, 42, 0x930, 0x10, 9, 1),
+ PINS_FIELD16(43, 46, 0x930, 0x10, 11, 1),
+ PINS_FIELD16(47, 61, 0x950, 0x10, 3, 1),
+ PINS_FIELD16(62, 66, 0x950, 0x10, 4, 1),
+ PINS_FIELD16(67, 67, 0x950, 0x10, 3, 1),
+ PINS_FIELD16(68, 72, 0x950, 0x10, 5, 1),
+ PINS_FIELD16(73, 77, 0x950, 0x10, 6, 1),
+ PINS_FIELD16(78, 91, 0x950, 0x10, 7, 1),
+ PINS_FIELD16(92, 92, 0x930, 0x10, 13, 1),
+ PINS_FIELD16(93, 95, 0x930, 0x10, 14, 1),
+ PINS_FIELD16(96, 99, 0x930, 0x10, 15, 1),
+ PINS_FIELD16(100, 103, 0xca0, 0x10, 13, 1),
+ PINS_FIELD16(104, 104, 0xc80, 0x10, 13, 1),
+ PINS_FIELD16(105, 105, 0xc90, 0x10, 13, 1),
+ PINS_FIELD16(106, 107, 0x940, 0x10, 0, 1),
+ PINS_FIELD16(108, 112, 0x940, 0x10, 1, 1),
+ PINS_FIELD16(113, 116, 0x940, 0x10, 2, 1),
+ PINS_FIELD16(117, 118, 0x940, 0x10, 5, 1),
+ PINS_FIELD16(119, 124, 0x940, 0x10, 6, 1),
+ PINS_FIELD16(125, 126, 0x940, 0x10, 7, 1),
+ PINS_FIELD16(129, 129, 0x940, 0x10, 8, 1),
+ PINS_FIELD16(130, 131, 0x940, 0x10, 9, 1),
+ PINS_FIELD16(132, 135, 0x940, 0x10, 8, 1),
+ PINS_FIELD16(136, 137, 0x940, 0x10, 7, 1),
+ PINS_FIELD16(154, 161, 0xc20, 0x10, 13, 1),
+ PINS_FIELD16(162, 162, 0xc10, 0x10, 13, 1),
+ PINS_FIELD16(163, 163, 0xc00, 0x10, 13, 1),
+ PINS_FIELD16(164, 164, 0xd10, 0x10, 13, 1),
+ PINS_FIELD16(165, 165, 0xd00, 0x10, 13, 1),
+ PINS_FIELD16(166, 169, 0x940, 0x10, 14, 1),
+ PINS_FIELD16(176, 179, 0x940, 0x10, 15, 1),
+ PINS_FIELD16(180, 180, 0x950, 0x10, 0, 1),
+ PINS_FIELD16(181, 184, 0x950, 0x10, 1, 1),
+ PINS_FIELD16(185, 191, 0x950, 0x10, 2, 1),
+ PINS_FIELD16(192, 192, 0x950, 0x10, 8, 1),
+ PINS_FIELD16(193, 194, 0x950, 0x10, 9, 1),
+ PINS_FIELD16(195, 196, 0x950, 0x10, 8, 1),
+};
+
+
+static const struct mtk_pin_field_calc mt6795_pin_pupd_range[] = {
+ /* KROW */
+ PIN_FIELD16(119, 119, 0xe00, 0x10, 2, 1), /* KROW0 */
+ PIN_FIELD16(120, 120, 0xe00, 0x10, 6, 1), /* KROW1 */
+ PIN_FIELD16(121, 121, 0xe00, 0x10, 10, 1), /* KROW2 */
+ PIN_FIELD16(122, 122, 0xe10, 0x10, 2, 1), /* KCOL0 */
+ PIN_FIELD16(123, 123, 0xe10, 0x10, 6, 1), /* KCOL1 */
+ PIN_FIELD16(124, 124, 0xe10, 0x10, 10, 1), /* KCOL2 */
+
+ /* DPI */
+ PIN_FIELD16(138, 138, 0xd50, 0x10, 2, 1), /* CK */
+ PIN_FIELD16(139, 139, 0xd60, 0x10, 1, 1), /* DE */
+ PIN_FIELD16(140, 140, 0xd70, 0x10, 1, 1), /* data0 */
+ PIN_FIELD16(141, 141, 0xd70, 0x10, 3, 1), /* data1 */
+ PIN_FIELD16(142, 142, 0xd70, 0x10, 5, 1), /* data2 */
+ PIN_FIELD16(143, 143, 0xd70, 0x10, 7, 1), /* data3 */
+ PIN_FIELD16(144, 144, 0xd50, 0x10, 5, 1), /* data4 */
+ PIN_FIELD16(145, 145, 0xd50, 0x10, 7, 1), /* data5 */
+ PIN_FIELD16(146, 146, 0xd60, 0x10, 7, 1), /* data6 */
+ PIN_FIELD16(147, 147, 0xed0, 0x10, 6, 1), /* data7 */
+ PIN_FIELD16(148, 148, 0xed0, 0x10, 8, 1), /* data8 */
+ PIN_FIELD16(149, 149, 0xed0, 0x10, 10, 1), /* data9 */
+ PIN_FIELD16(150, 150, 0xed0, 0x10, 12, 1), /* data10 */
+ PIN_FIELD16(151, 151, 0xed0, 0x10, 14, 1), /* data11 */
+ PIN_FIELD16(152, 152, 0xd60, 0x10, 3, 1), /* hsync */
+ PIN_FIELD16(153, 153, 0xd60, 0x10, 5, 1), /* vsync */
+
+ /* MSDC0 */
+ PIN_FIELD16(154, 154, 0xc20, 0x10, 2, 1), /* DATA 0-7 */
+ PIN_FIELD16(155, 155, 0xc20, 0x10, 2, 1), /* DATA 0-7 */
+ PIN_FIELD16(156, 156, 0xc20, 0x10, 2, 1), /* DATA 0-7 */
+ PIN_FIELD16(157, 157, 0xc20, 0x10, 2, 1), /* DATA 0-7 */
+ PIN_FIELD16(158, 158, 0xc20, 0x10, 2, 1), /* DATA 0-7 */
+ PIN_FIELD16(159, 159, 0xc20, 0x10, 2, 1), /* DATA 0-7 */
+ PIN_FIELD16(160, 160, 0xc20, 0x10, 2, 1), /* DATA 0-7 */
+ PIN_FIELD16(161, 161, 0xc20, 0x10, 2, 1), /* DATA 0-7 */
+ PIN_FIELD16(162, 162, 0xc10, 0x10, 2, 1), /* CMD */
+ PIN_FIELD16(163, 163, 0xc00, 0x10, 2, 1), /* CLK */
+ PIN_FIELD16(164, 164, 0xd10, 0x10, 2, 1), /* DS */
+ PIN_FIELD16(165, 165, 0xd00, 0x10, 2, 1), /* RST */
+
+ /* MSDC1 */
+ PIN_FIELD16(170, 170, 0xc50, 0x10, 2, 1), /* CMD */
+ PIN_FIELD16(171, 171, 0xd20, 0x10, 2, 1), /* DAT0 */
+ PIN_FIELD16(172, 172, 0xd20, 0x10, 6, 1), /* DAT1 */
+ PIN_FIELD16(173, 173, 0xd20, 0x10, 10, 1), /* DAT2 */
+ PIN_FIELD16(174, 174, 0xd20, 0x10, 14, 1), /* DAT3 */
+ PIN_FIELD16(175, 175, 0xc40, 0x10, 2, 1), /* CLK */
+
+ /* MSDC2 */
+ PIN_FIELD16(100, 100, 0xd30, 0x10, 2, 1), /* DAT0 */
+ PIN_FIELD16(101, 101, 0xd30, 0x10, 6, 1), /* DAT1 */
+ PIN_FIELD16(102, 102, 0xd30, 0x10, 10, 1), /* DAT2 */
+ PIN_FIELD16(103, 103, 0xd30, 0x10, 14, 1), /* DAT3 */
+ PIN_FIELD16(104, 104, 0xc80, 0x10, 2, 1), /* CLK */
+ PIN_FIELD16(105, 105, 0xc90, 0x10, 2, 1), /* CMD */
+
+ /* MSDC3 */
+ PIN_FIELD16(23, 23, 0xd40, 0x10, 2, 1), /* DAT0 */
+ PIN_FIELD16(24, 24, 0xd40, 0x10, 6, 5), /* DAT1 */
+ PIN_FIELD16(25, 25, 0xd40, 0x10, 10, 9), /* DAT2 */
+ PIN_FIELD16(26, 26, 0xd40, 0x10, 14, 13), /* DAT3 */
+ PIN_FIELD16(27, 27, 0xcc0, 0x10, 2, 1), /* CLK */
+ PIN_FIELD16(28, 28, 0xcd0, 0x10, 2, 1) /* CMD */
+};
+
+static const struct mtk_pin_field_calc mt6795_pin_r0_range[] = {
+ PIN_FIELD16(23, 23, 0xd40, 0x10, 0, 1),
+ PIN_FIELD16(24, 24, 0xd40, 0x10, 4, 1),
+ PIN_FIELD16(25, 25, 0xd40, 0x10, 8, 1),
+ PIN_FIELD16(26, 26, 0xd40, 0x10, 12, 1),
+ PIN_FIELD16(27, 27, 0xcc0, 0x10, 0, 1),
+ PIN_FIELD16(28, 28, 0xcd0, 0x10, 0, 1),
+ PIN_FIELD16(100, 100, 0xd30, 0x10, 0, 1),
+ PIN_FIELD16(101, 101, 0xd30, 0x10, 4, 1),
+ PIN_FIELD16(102, 102, 0xd30, 0x10, 8, 1),
+ PIN_FIELD16(103, 103, 0xd30, 0x10, 12, 1),
+ PIN_FIELD16(104, 104, 0xc80, 0x10, 0, 1),
+ PIN_FIELD16(105, 105, 0xc90, 0x10, 0, 1),
+ PIN_FIELD16(119, 119, 0xe00, 0x10, 0, 1),
+ PIN_FIELD16(120, 120, 0xe00, 0x10, 4, 1),
+ PIN_FIELD16(121, 121, 0xe00, 0x10, 8, 1),
+ PIN_FIELD16(122, 122, 0xe10, 0x10, 0, 1),
+ PIN_FIELD16(123, 123, 0xe10, 0x10, 4, 1),
+ PIN_FIELD16(124, 124, 0xe10, 0x10, 8, 1),
+ PIN_FIELD16(138, 138, 0xd50, 0x10, 0, 1),
+ PIN_FIELD16(139, 139, 0xd60, 0x10, 0, 1),
+ PIN_FIELD16(140, 140, 0xd70, 0x10, 0, 1),
+ PIN_FIELD16(141, 141, 0xd70, 0x10, 1, 1),
+ PIN_FIELD16(142, 142, 0xd70, 0x10, 3, 1),
+ PIN_FIELD16(143, 143, 0xd70, 0x10, 5, 1),
+ PIN_FIELD16(144, 144, 0xd50, 0x10, 3, 1),
+ PIN_FIELD16(145, 145, 0xd50, 0x10, 5, 1),
+ PIN_FIELD16(146, 146, 0xd60, 0x10, 5, 1),
+ PIN_FIELD16(147, 147, 0xed0, 0x10, 4, 1),
+ PIN_FIELD16(148, 148, 0xed0, 0x10, 6, 1),
+ PIN_FIELD16(149, 149, 0xed0, 0x10, 8, 1),
+ PIN_FIELD16(150, 150, 0xed0, 0x10, 10, 1),
+ PIN_FIELD16(151, 151, 0xed0, 0x10, 12, 1),
+ PIN_FIELD16(152, 152, 0xd60, 0x10, 1, 1),
+ PIN_FIELD16(153, 153, 0xd60, 0x10, 3, 1),
+ PIN_FIELD16(154, 155, 0xc20, 0x10, 0, 1),
+ PIN_FIELD16(155, 156, 0xc20, 0x10, 0, 1),
+ PIN_FIELD16(156, 157, 0xc20, 0x10, 0, 1),
+ PIN_FIELD16(157, 158, 0xc20, 0x10, 0, 1),
+ PIN_FIELD16(158, 159, 0xc20, 0x10, 0, 1),
+ PIN_FIELD16(159, 160, 0xc20, 0x10, 0, 1),
+ PIN_FIELD16(160, 161, 0xc20, 0x10, 0, 1),
+ PIN_FIELD16(161, 161, 0xc20, 0x10, 0, 1),
+ PIN_FIELD16(162, 162, 0xc10, 0x10, 0, 1),
+ PIN_FIELD16(163, 163, 0xc00, 0x10, 0, 1),
+ PIN_FIELD16(164, 164, 0xd10, 0x10, 0, 1),
+ PIN_FIELD16(165, 165, 0xd00, 0x10, 0, 1),
+ PIN_FIELD16(170, 170, 0xc50, 0x10, 0, 1),
+ PIN_FIELD16(171, 171, 0xd20, 0x10, 0, 1),
+ PIN_FIELD16(172, 172, 0xd20, 0x10, 4, 1),
+ PIN_FIELD16(173, 173, 0xd20, 0x10, 8, 1),
+ PIN_FIELD16(174, 174, 0xd20, 0x10, 12, 1),
+ PIN_FIELD16(175, 175, 0xc40, 0x10, 0, 1),
+};
+
+static const struct mtk_pin_field_calc mt6795_pin_r1_range[] = {
+ PIN_FIELD16(23, 23, 0xd40, 0x10, 1, 1),
+ PIN_FIELD16(24, 24, 0xd40, 0x10, 5, 1),
+ PIN_FIELD16(25, 25, 0xd40, 0x10, 9, 1),
+ PIN_FIELD16(26, 26, 0xd40, 0x10, 13, 1),
+ PIN_FIELD16(27, 27, 0xcc0, 0x10, 1, 1),
+ PIN_FIELD16(28, 28, 0xcd0, 0x10, 1, 1),
+ PIN_FIELD16(100, 100, 0xd30, 0x10, 1, 1),
+ PIN_FIELD16(101, 101, 0xd30, 0x10, 5, 1),
+ PIN_FIELD16(102, 102, 0xd30, 0x10, 9, 1),
+ PIN_FIELD16(103, 103, 0xd30, 0x10, 13, 1),
+ PIN_FIELD16(104, 104, 0xc80, 0x10, 1, 1),
+ PIN_FIELD16(105, 105, 0xc90, 0x10, 1, 1),
+ PIN_FIELD16(119, 119, 0xe00, 0x10, 1, 1),
+ PIN_FIELD16(120, 120, 0xe00, 0x10, 5, 1),
+ PIN_FIELD16(121, 121, 0xe00, 0x10, 9, 1),
+ PIN_FIELD16(122, 122, 0xe10, 0x10, 1, 1),
+ PIN_FIELD16(123, 123, 0xe10, 0x10, 5, 1),
+ PIN_FIELD16(124, 124, 0xe10, 0x10, 9, 1),
+ PIN_FIELD16(138, 138, 0xd50, 0x10, 1, 1),
+ PIN_FIELD16(139, 139, 0xd60, 0x10, 0, 1),
+ PIN_FIELD16(140, 140, 0xd70, 0x10, 0, 1),
+ PIN_FIELD16(141, 141, 0xd70, 0x10, 2, 1),
+ PIN_FIELD16(142, 142, 0xd70, 0x10, 4, 1),
+ PIN_FIELD16(143, 143, 0xd70, 0x10, 6, 1),
+ PIN_FIELD16(144, 144, 0xd50, 0x10, 4, 1),
+ PIN_FIELD16(145, 145, 0xd50, 0x10, 6, 1),
+ PIN_FIELD16(146, 146, 0xd60, 0x10, 6, 1),
+ PIN_FIELD16(147, 147, 0xed0, 0x10, 5, 1),
+ PIN_FIELD16(148, 148, 0xed0, 0x10, 7, 1),
+ PIN_FIELD16(149, 149, 0xed0, 0x10, 9, 1),
+ PIN_FIELD16(150, 150, 0xed0, 0x10, 11, 1),
+ PIN_FIELD16(151, 151, 0xed0, 0x10, 13, 1),
+ PIN_FIELD16(152, 152, 0xd60, 0x10, 2, 1),
+ PIN_FIELD16(153, 153, 0xd60, 0x10, 4, 1),
+ PIN_FIELD16(154, 155, 0xc20, 0x10, 1, 1),
+ PIN_FIELD16(155, 156, 0xc20, 0x10, 1, 1),
+ PIN_FIELD16(156, 157, 0xc20, 0x10, 1, 1),
+ PIN_FIELD16(157, 158, 0xc20, 0x10, 1, 1),
+ PIN_FIELD16(158, 159, 0xc20, 0x10, 1, 1),
+ PIN_FIELD16(159, 160, 0xc20, 0x10, 1, 1),
+ PIN_FIELD16(160, 161, 0xc20, 0x10, 1, 1),
+ PIN_FIELD16(161, 161, 0xc20, 0x10, 1, 1),
+ PIN_FIELD16(162, 162, 0xc10, 0x10, 1, 1),
+ PIN_FIELD16(163, 163, 0xc00, 0x10, 1, 1),
+ PIN_FIELD16(164, 164, 0xd10, 0x10, 1, 1),
+ PIN_FIELD16(165, 165, 0xd00, 0x10, 1, 1),
+ PIN_FIELD16(170, 170, 0xc50, 0x10, 1, 1),
+ PIN_FIELD16(171, 171, 0xd20, 0x10, 1, 1),
+ PIN_FIELD16(172, 172, 0xd20, 0x10, 5, 1),
+ PIN_FIELD16(173, 173, 0xd20, 0x10, 9, 1),
+ PIN_FIELD16(174, 174, 0xd20, 0x10, 13, 1),
+ PIN_FIELD16(175, 175, 0xc40, 0x10, 1, 1),
+};
+
+static const struct mtk_pin_field_calc mt6795_pin_drv_range[] = {
+ PINS_FIELD16(0, 4, 0xb30, 0x10, 13, 2),
+ PINS_FIELD16(5, 9, 0xb30, 0x10, 1, 2),
+ PINS_FIELD16(10, 15, 0xb30, 0x10, 5, 2),
+ PIN_FIELD16(16, 16, 0xb30, 0x10, 1, 2),
+ PINS_FIELD16(17, 19, 0xb70, 0x10, 5, 2),
+ PINS_FIELD16(20, 22, 0xb70, 0x10, 9, 2),
+ PINS_FIELD16(23, 26, 0xce0, 0x10, 8, 2),
+ PIN_FIELD16(27, 27, 0xcc0, 0x10, 8, 2),
+ PIN_FIELD16(28, 28, 0xcd0, 0x10, 8, 2),
+ PINS_FIELD16(29, 32, 0xb80, 0x10, 13, 2),
+ PIN_FIELD16(33, 33, 0xb10, 0x10, 13, 2),
+ PINS_FIELD16(34, 36, 0xb10, 0x10, 9, 2),
+ PINS_FIELD16(37, 38, 0xb10, 0x10, 5, 2),
+ PIN_FIELD16(39, 39, 0xb20, 0x10, 1, 2),
+ PIN_FIELD16(40, 40, 0xb20, 0x10, 5, 2),
+ PINS_FIELD16(41, 42, 0xb20, 0x10, 9, 2),
+ PINS_FIELD16(47, 61, 0xb00, 0x10, 9, 2),
+ PINS_FIELD16(62, 66, 0xb70, 0x10, 1, 2),
+ PINS_FIELD16(67, 67, 0xb00, 0x10, 9, 2),
+ PINS_FIELD16(68, 72, 0xb60, 0x10, 13, 2),
+ PINS_FIELD16(73, 77, 0xb40, 0x10, 13, 2),
+ PIN_FIELD16(78, 78, 0xb00, 0x10, 12, 3),
+ PINS_FIELD16(79, 91, 0xb00, 0x10, 13, 2),
+ PIN_FIELD16(92, 92, 0xb60, 0x10, 5, 2),
+ PINS_FIELD16(93, 95, 0xb60, 0x10, 1, 2),
+ PINS_FIELD16(96, 99, 0xb80, 0x10, 9, 2),
+ PINS_FIELD16(100, 103, 0xca0, 0x10, 8, 2),
+ PIN_FIELD16(104, 104, 0xc80, 0x10, 8, 2),
+ PIN_FIELD16(105, 105, 0xc90, 0x10, 8, 2),
+ PINS_FIELD16(106, 107, 0xb50, 0x10, 9, 2),
+ PINS_FIELD16(108, 112, 0xb50, 0x10, 1, 2),
+ PINS_FIELD16(113, 116, 0xb80, 0x10, 5, 2),
+ PINS_FIELD16(117, 118, 0xb90, 0x10, 1, 2),
+ PINS_FIELD16(119, 124, 0xb50, 0x10, 5, 2),
+ PIN_FIELD16(127, 127, 0xb70, 0x10, 5, 2),
+ PIN_FIELD16(128, 128, 0xb70, 0x10, 9, 2),
+ PIN_FIELD16(129, 129, 0xb40, 0x10, 9, 2),
+ PINS_FIELD16(130, 131, 0xb40, 0x10, 13, 2),
+ PINS_FIELD16(132, 135, 0xb40, 0x10, 9, 2),
+ PIN_FIELD16(138, 138, 0xb50, 0x10, 8, 2),
+ PIN_FIELD16(139, 139, 0xb60, 0x10, 8, 2),
+ PINS_FIELD16(140, 151, 0xb70, 0x10, 8, 2),
+ PINS_FIELD16(152, 153, 0xb60, 0x10, 8, 2),
+ PINS_FIELD16(153, 153, 0xb60, 0x10, 8, 2),
+ PINS_FIELD16(154, 161, 0xc20, 0x10, 8, 2),
+ PIN_FIELD16(162, 162, 0xc10, 0x10, 8, 2),
+ PIN_FIELD16(163, 163, 0xc00, 0x10, 8, 2),
+ PIN_FIELD16(164, 164, 0xd10, 0x10, 8, 2),
+ PIN_FIELD16(165, 165, 0xd00, 0x10, 8, 2),
+ PINS_FIELD16(166, 169, 0xb80, 0x10, 1, 2),
+ PINS_FIELD16(170, 173, 0xc60, 0x10, 8, 2),
+ PIN_FIELD16(174, 174, 0xc40, 0x10, 8, 2),
+ PIN_FIELD16(175, 175, 0xc50, 0x10, 8, 2),
+ PINS_FIELD16(176, 179, 0xb70, 0x10, 13, 2),
+ PIN_FIELD16(180, 180, 0xb00, 0x10, 5, 2),
+ PINS_FIELD16(181, 184, 0xb00, 0x10, 1, 2),
+ PINS_FIELD16(185, 191, 0xb60, 0x10, 9, 2),
+ PIN_FIELD16(192, 192, 0xb40, 0x10, 1, 2),
+ PINS_FIELD16(193, 194, 0xb40, 0x10, 5, 2),
+ PINS_FIELD16(195, 196, 0xb40, 0x10, 1, 2),
+};
+
+static const struct mtk_pin_field_calc mt6795_pin_sr_range[] = {
+ PINS_FIELD16(0, 4, 0xb30, 0x10, 15, 1),
+ PINS_FIELD16(5, 9, 0xb30, 0x10, 3, 1),
+ PINS_FIELD16(10, 15, 0xb30, 0x10, 7, 1),
+ PIN_FIELD16(16, 16, 0xb30, 0x10, 5, 1),
+ PINS_FIELD16(23, 26, 0xce0, 0x10, 12, 1),
+ PIN_FIELD16(27, 27, 0xcc0, 0x10, 12, 1),
+ PIN_FIELD16(28, 28, 0xcd0, 0x10, 12, 1),
+ PINS_FIELD16(29, 32, 0xb80, 0x10, 15, 1),
+ PIN_FIELD16(33, 33, 0xb10, 0x10, 15, 1),
+ PINS_FIELD16(34, 36, 0xb10, 0x10, 11, 1),
+ PINS_FIELD16(37, 38, 0xb10, 0x10, 7, 1),
+ PIN_FIELD16(39, 39, 0xb20, 0x10, 3, 1),
+ PIN_FIELD16(40, 40, 0xb20, 0x10, 7, 1),
+ PINS_FIELD16(41, 42, 0xb20, 0x10, 11, 1),
+ PINS_FIELD16(47, 61, 0xb00, 0x10, 11, 1),
+ PINS_FIELD16(62, 66, 0xb70, 0x10, 3, 1),
+ PINS_FIELD16(67, 67, 0xb00, 0x10, 11, 1),
+ PINS_FIELD16(68, 72, 0xb60, 0x10, 15, 1),
+ PINS_FIELD16(73, 77, 0xb40, 0x10, 15, 1),
+ PIN_FIELD16(78, 78, 0xb00, 0x10, 15, 3),
+ PINS_FIELD16(79, 91, 0xb00, 0x10, 15, 1),
+ PIN_FIELD16(92, 92, 0xb60, 0x10, 7, 1),
+ PINS_FIELD16(93, 95, 0xb60, 0x10, 3, 1),
+ PINS_FIELD16(96, 99, 0xb80, 0x10, 11, 1),
+ PINS_FIELD16(100, 103, 0xca0, 0x10, 12, 1),
+ PIN_FIELD16(104, 104, 0xc80, 0x10, 12, 1),
+ PIN_FIELD16(105, 105, 0xc90, 0x10, 12, 1),
+ PINS_FIELD16(106, 107, 0xb50, 0x10, 11, 1),
+ PINS_FIELD16(108, 112, 0xb50, 0x10, 3, 1),
+ PINS_FIELD16(113, 116, 0xb80, 0x10, 7, 1),
+ PINS_FIELD16(117, 118, 0xb90, 0x10, 3, 1),
+ PINS_FIELD16(119, 124, 0xb50, 0x10, 7, 1),
+ PIN_FIELD16(127, 127, 0xb70, 0x10, 7, 1),
+ PIN_FIELD16(128, 128, 0xb70, 0x10, 11, 1),
+ PIN_FIELD16(129, 129, 0xb40, 0x10, 11, 1),
+ PINS_FIELD16(130, 131, 0xb40, 0x10, 15, 1),
+ PINS_FIELD16(132, 135, 0xb40, 0x10, 11, 1),
+ PIN_FIELD16(138, 138, 0xb50, 0x10, 12, 1),
+ PIN_FIELD16(139, 139, 0xb60, 0x10, 12, 1),
+ PINS_FIELD16(140, 151, 0xb70, 0x10, 12, 1),
+ PINS_FIELD16(152, 153, 0xb60, 0x10, 12, 1),
+ PINS_FIELD16(153, 153, 0xb60, 0x10, 12, 1),
+ PINS_FIELD16(154, 161, 0xc20, 0x10, 12, 1),
+ PIN_FIELD16(162, 162, 0xc10, 0x10, 12, 1),
+ PIN_FIELD16(163, 163, 0xc00, 0x10, 12, 1),
+ PIN_FIELD16(164, 164, 0xd10, 0x10, 12, 1),
+ PIN_FIELD16(165, 165, 0xd00, 0x10, 12, 1),
+ PINS_FIELD16(166, 169, 0xb80, 0x10, 3, 1),
+ PINS_FIELD16(170, 173, 0xc60, 0x10, 12, 1),
+ PIN_FIELD16(174, 174, 0xc40, 0x10, 12, 1),
+ PIN_FIELD16(175, 175, 0xc50, 0x10, 12, 1),
+ PINS_FIELD16(176, 179, 0xb70, 0x10, 15, 1),
+ PIN_FIELD16(180, 180, 0xb00, 0x10, 7, 1),
+ PINS_FIELD16(181, 184, 0xb00, 0x10, 3, 1),
+ PINS_FIELD16(185, 191, 0xb60, 0x10, 11, 1),
+ PIN_FIELD16(192, 192, 0xb40, 0x10, 3, 1),
+ PINS_FIELD16(193, 194, 0xb40, 0x10, 7, 1),
+ PINS_FIELD16(195, 196, 0xb40, 0x10, 3, 1),
+};
+
+static const struct mtk_pin_reg_calc mt6795_reg_cals[PINCTRL_PIN_REG_MAX] = {
+ [PINCTRL_PIN_REG_MODE] = MTK_RANGE(mt6795_pin_mode_range),
+ [PINCTRL_PIN_REG_DIR] = MTK_RANGE(mt6795_pin_dir_range),
+ [PINCTRL_PIN_REG_DI] = MTK_RANGE(mt6795_pin_di_range),
+ [PINCTRL_PIN_REG_DO] = MTK_RANGE(mt6795_pin_do_range),
+ [PINCTRL_PIN_REG_SR] = MTK_RANGE(mt6795_pin_sr_range),
+ [PINCTRL_PIN_REG_SMT] = MTK_RANGE(mt6795_pin_smt_range),
+ [PINCTRL_PIN_REG_DRV] = MTK_RANGE(mt6795_pin_drv_range),
+ [PINCTRL_PIN_REG_PUPD] = MTK_RANGE(mt6795_pin_pupd_range),
+ [PINCTRL_PIN_REG_R0] = MTK_RANGE(mt6795_pin_r0_range),
+ [PINCTRL_PIN_REG_R1] = MTK_RANGE(mt6795_pin_r1_range),
+ [PINCTRL_PIN_REG_IES] = MTK_RANGE(mt6795_pin_ies_range),
+ [PINCTRL_PIN_REG_PULLEN] = MTK_RANGE(mt6795_pin_pullen_range),
+ [PINCTRL_PIN_REG_PULLSEL] = MTK_RANGE(mt6795_pin_pullsel_range),
+};
+
+static const struct mtk_eint_hw mt6795_eint_hw = {
+ .port_mask = 7,
+ .ports = 7,
+ .ap_num = 224,
+ .db_cnt = 32,
+};
+
+static const unsigned int mt6795_pull_type[] = {
+ MTK_PULL_PULLSEL_TYPE,/*0*/ MTK_PULL_PULLSEL_TYPE,/*1*/
+ MTK_PULL_PULLSEL_TYPE,/*2*/ MTK_PULL_PULLSEL_TYPE,/*3*/
+ MTK_PULL_PULLSEL_TYPE,/*4*/ MTK_PULL_PULLSEL_TYPE,/*5*/
+ MTK_PULL_PULLSEL_TYPE,/*6*/ MTK_PULL_PULLSEL_TYPE,/*7*/
+ MTK_PULL_PULLSEL_TYPE,/*8*/ MTK_PULL_PULLSEL_TYPE,/*9*/
+ MTK_PULL_PULLSEL_TYPE,/*10*/ MTK_PULL_PULLSEL_TYPE,/*11*/
+ MTK_PULL_PULLSEL_TYPE,/*12*/ MTK_PULL_PULLSEL_TYPE,/*13*/
+ MTK_PULL_PULLSEL_TYPE,/*14*/ MTK_PULL_PULLSEL_TYPE,/*15*/
+ MTK_PULL_PULLSEL_TYPE,/*16*/ MTK_PULL_PULLSEL_TYPE,/*17*/
+ MTK_PULL_PULLSEL_TYPE,/*18*/ MTK_PULL_PULLSEL_TYPE,/*19*/
+ MTK_PULL_PULLSEL_TYPE,/*20*/ MTK_PULL_PULLSEL_TYPE,/*21*/
+ MTK_PULL_PULLSEL_TYPE,/*22*/ MTK_PULL_PUPD_R1R0_TYPE,/*23*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*24*/ MTK_PULL_PUPD_R1R0_TYPE,/*25*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*26*/ MTK_PULL_PUPD_R1R0_TYPE,/*27*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*28*/ MTK_PULL_PULLSEL_TYPE,/*29*/
+ MTK_PULL_PULLSEL_TYPE,/*30*/ MTK_PULL_PULLSEL_TYPE,/*31*/
+ MTK_PULL_PULLSEL_TYPE,/*32*/ MTK_PULL_PULLSEL_TYPE,/*33*/
+ MTK_PULL_PULLSEL_TYPE,/*34*/ MTK_PULL_PULLSEL_TYPE,/*35*/
+ MTK_PULL_PULLSEL_TYPE,/*36*/ MTK_PULL_PULLSEL_TYPE,/*37*/
+ MTK_PULL_PULLSEL_TYPE,/*38*/ MTK_PULL_PULLSEL_TYPE,/*39*/
+ MTK_PULL_PULLSEL_TYPE,/*40*/ MTK_PULL_PULLSEL_TYPE,/*41*/
+ MTK_PULL_PULLSEL_TYPE,/*42*/ MTK_PULL_PULLSEL_TYPE,/*43*/
+ MTK_PULL_PULLSEL_TYPE,/*44*/ MTK_PULL_PULLSEL_TYPE,/*45*/
+ MTK_PULL_PULLSEL_TYPE,/*46*/ MTK_PULL_PULLSEL_TYPE,/*47*/
+ MTK_PULL_PULLSEL_TYPE,/*48*/ MTK_PULL_PULLSEL_TYPE,/*49*/
+ MTK_PULL_PULLSEL_TYPE,/*50*/ MTK_PULL_PULLSEL_TYPE,/*51*/
+ MTK_PULL_PULLSEL_TYPE,/*52*/ MTK_PULL_PULLSEL_TYPE,/*53*/
+ MTK_PULL_PULLSEL_TYPE,/*54*/ MTK_PULL_PULLSEL_TYPE,/*55*/
+ MTK_PULL_PULLSEL_TYPE,/*56*/ MTK_PULL_PULLSEL_TYPE,/*57*/
+ MTK_PULL_PULLSEL_TYPE,/*58*/ MTK_PULL_PULLSEL_TYPE,/*59*/
+ MTK_PULL_PULLSEL_TYPE,/*60*/ MTK_PULL_PULLSEL_TYPE,/*61*/
+ MTK_PULL_PULLSEL_TYPE,/*62*/ MTK_PULL_PULLSEL_TYPE,/*63*/
+ MTK_PULL_PULLSEL_TYPE,/*64*/ MTK_PULL_PULLSEL_TYPE,/*65*/
+ MTK_PULL_PULLSEL_TYPE,/*66*/ MTK_PULL_PUPD_R1R0_TYPE,/*67*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*68*/ MTK_PULL_PUPD_R1R0_TYPE,/*69*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*70*/ MTK_PULL_PUPD_R1R0_TYPE,/*71*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*72*/ MTK_PULL_PUPD_R1R0_TYPE,/*73*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*74*/ MTK_PULL_PUPD_R1R0_TYPE,/*75*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*76*/ MTK_PULL_PUPD_R1R0_TYPE,/*77*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*78*/ MTK_PULL_PUPD_R1R0_TYPE,/*79*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*80*/ MTK_PULL_PUPD_R1R0_TYPE,/*81*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*82*/ MTK_PULL_PULLSEL_TYPE,/*83*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*84*/ MTK_PULL_PUPD_R1R0_TYPE,/*85*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*86*/ MTK_PULL_PUPD_R1R0_TYPE,/*87*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*88*/ MTK_PULL_PUPD_R1R0_TYPE,/*89*/
+ MTK_PULL_PULLSEL_TYPE,/*90*/ MTK_PULL_PULLSEL_TYPE,/*91*/
+ MTK_PULL_PULLSEL_TYPE,/*92*/ MTK_PULL_PULLSEL_TYPE,/*93*/
+ MTK_PULL_PULLSEL_TYPE,/*94*/ MTK_PULL_PULLSEL_TYPE,/*95*/
+ MTK_PULL_PULLSEL_TYPE,/*96*/ MTK_PULL_PULLSEL_TYPE,/*97*/
+ MTK_PULL_PULLSEL_TYPE,/*98*/ MTK_PULL_PULLSEL_TYPE,/*99*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*100*/ MTK_PULL_PUPD_R1R0_TYPE,/*101*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*102*/ MTK_PULL_PUPD_R1R0_TYPE,/*103*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*104*/ MTK_PULL_PUPD_R1R0_TYPE,/*105*/
+ MTK_PULL_PULLSEL_TYPE,/*106*/ MTK_PULL_PULLSEL_TYPE,/*107*/
+ MTK_PULL_PULLSEL_TYPE,/*108*/ MTK_PULL_PULLSEL_TYPE,/*109*/
+ MTK_PULL_PULLSEL_TYPE,/*110*/ MTK_PULL_PULLSEL_TYPE,/*111*/
+ MTK_PULL_PULLSEL_TYPE,/*112*/ MTK_PULL_PULLSEL_TYPE,/*113*/
+ MTK_PULL_PULLSEL_TYPE,/*114*/ MTK_PULL_PULLSEL_TYPE,/*115*/
+ MTK_PULL_PULLSEL_TYPE,/*116*/ MTK_PULL_PULLSEL_TYPE,/*117*/
+ MTK_PULL_PULLSEL_TYPE,/*118*/ MTK_PULL_PUPD_R1R0_TYPE,/*119*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*120*/ MTK_PULL_PUPD_R1R0_TYPE,/*121*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*122*/ MTK_PULL_PUPD_R1R0_TYPE,/*123*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*124*/ MTK_PULL_PULLSEL_TYPE,/*125*/
+ MTK_PULL_PULLSEL_TYPE,/*126*/ MTK_PULL_PULLSEL_TYPE,/*127*/
+ MTK_PULL_PULLSEL_TYPE,/*128*/ MTK_PULL_PULLSEL_TYPE,/*129*/
+ MTK_PULL_PULLSEL_TYPE,/*130*/ MTK_PULL_PULLSEL_TYPE,/*131*/
+ MTK_PULL_PULLSEL_TYPE,/*132*/ MTK_PULL_PULLSEL_TYPE,/*133*/
+ MTK_PULL_PULLSEL_TYPE,/*134*/ MTK_PULL_PULLSEL_TYPE,/*135*/
+ MTK_PULL_PULLSEL_TYPE,/*136*/ MTK_PULL_PULLSEL_TYPE,/*137*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*138*/ MTK_PULL_PUPD_R1R0_TYPE,/*139*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*140*/ MTK_PULL_PUPD_R1R0_TYPE,/*141*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*142*/ MTK_PULL_PUPD_R1R0_TYPE,/*143*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*144*/ MTK_PULL_PUPD_R1R0_TYPE,/*145*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*146*/ MTK_PULL_PUPD_R1R0_TYPE,/*147*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*148*/ MTK_PULL_PUPD_R1R0_TYPE,/*149*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*150*/ MTK_PULL_PUPD_R1R0_TYPE,/*151*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*152*/ MTK_PULL_PUPD_R1R0_TYPE,/*153*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*154*/ MTK_PULL_PUPD_R1R0_TYPE,/*155*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*156*/ MTK_PULL_PUPD_R1R0_TYPE,/*157*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*158*/ MTK_PULL_PUPD_R1R0_TYPE,/*159*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*160*/ MTK_PULL_PUPD_R1R0_TYPE,/*161*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*162*/ MTK_PULL_PUPD_R1R0_TYPE,/*163*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*164*/ MTK_PULL_PUPD_R1R0_TYPE,/*165*/
+ MTK_PULL_PULLSEL_TYPE,/*166*/ MTK_PULL_PULLSEL_TYPE,/*167*/
+ MTK_PULL_PULLSEL_TYPE,/*168*/ MTK_PULL_PULLSEL_TYPE,/*169*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*170*/ MTK_PULL_PUPD_R1R0_TYPE,/*171*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*172*/ MTK_PULL_PUPD_R1R0_TYPE,/*173*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*174*/ MTK_PULL_PUPD_R1R0_TYPE,/*175*/
+ MTK_PULL_PULLSEL_TYPE,/*176*/ MTK_PULL_PULLSEL_TYPE,/*177*/
+ MTK_PULL_PULLSEL_TYPE,/*178*/ MTK_PULL_PULLSEL_TYPE,/*179*/
+ MTK_PULL_PULLSEL_TYPE,/*180*/ MTK_PULL_PULLSEL_TYPE,/*181*/
+ MTK_PULL_PULLSEL_TYPE,/*182*/ MTK_PULL_PULLSEL_TYPE,/*183*/
+ MTK_PULL_PULLSEL_TYPE,/*184*/ MTK_PULL_PULLSEL_TYPE,/*185*/
+ MTK_PULL_PULLSEL_TYPE,/*186*/ MTK_PULL_PULLSEL_TYPE,/*187*/
+ MTK_PULL_PULLSEL_TYPE,/*188*/ MTK_PULL_PULLSEL_TYPE,/*189*/
+ MTK_PULL_PULLSEL_TYPE,/*190*/ MTK_PULL_PULLSEL_TYPE,/*191*/
+ MTK_PULL_PULLSEL_TYPE,/*192*/ MTK_PULL_PULLSEL_TYPE,/*193*/
+ MTK_PULL_PULLSEL_TYPE,/*194*/ MTK_PULL_PULLSEL_TYPE,/*195*/
+ MTK_PULL_PULLSEL_TYPE,/*196*/
+};
+
+static const struct mtk_pin_soc mt6795_data = {
+ .reg_cal = mt6795_reg_cals,
+ .pins = mtk_pins_mt6795,
+ .npins = ARRAY_SIZE(mtk_pins_mt6795),
+ .ngrps = ARRAY_SIZE(mtk_pins_mt6795),
+ .nfuncs = 8,
+ .eint_hw = &mt6795_eint_hw,
+ .gpio_m = 0,
+ .base_names = mtk_default_register_base_names,
+ .nbase_names = ARRAY_SIZE(mtk_default_register_base_names),
+ .pull_type = mt6795_pull_type,
+ .bias_disable_set = mtk_pinconf_bias_disable_set_rev1,
+ .bias_disable_get = mtk_pinconf_bias_disable_get_rev1,
+ .bias_set = mtk_pinconf_bias_set_rev1,
+ .bias_get = mtk_pinconf_bias_get_rev1,
+ .bias_set_combo = mtk_pinconf_bias_set_combo,
+ .bias_get_combo = mtk_pinconf_bias_get_combo,
+ .drive_set = mtk_pinconf_drive_set_rev1,
+ .drive_get = mtk_pinconf_drive_get_rev1,
+ .adv_pull_get = mtk_pinconf_adv_pull_get,
+ .adv_pull_set = mtk_pinconf_adv_pull_set,
+};
+
+static const struct of_device_id mt6795_pctrl_match[] = {
+ { .compatible = "mediatek,mt6795-pinctrl", .data = &mt6795_data },
+ { }
+};
+
+static struct platform_driver mt6795_pinctrl_driver = {
+ .driver = {
+ .name = "mt6795-pinctrl",
+ .of_match_table = mt6795_pctrl_match,
+ .pm = &mtk_paris_pinctrl_pm_ops,
+ },
+ .probe = mtk_paris_pinctrl_probe,
+};
+
+static int __init mtk_pinctrl_init(void)
+{
+ return platform_driver_register(&mt6795_pinctrl_driver);
+}
+arch_initcall(mtk_pinctrl_init);
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-mt6795.h b/drivers/pinctrl/mediatek/pinctrl-mtk-mt6795.h
new file mode 100644
index 000000000000..f639bd859116
--- /dev/null
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-mt6795.h
@@ -0,0 +1,1698 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2022 Collabora Ltd.
+ * Author: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#ifndef __PINCTRL_MTK_MT6795_H
+#define __PINCTRL_MTK_MT6795_H
+
+#include "pinctrl-paris.h"
+
+static const struct mtk_pin_desc mtk_pins_mt6795[] = {
+ MTK_PIN(
+ 0, "GPIO0",
+ MTK_EINT_FUNCTION(0, 0),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO0"),
+ MTK_FUNCTION(1, "IRDA_PDN"),
+ MTK_FUNCTION(2, "I2S1_WS"),
+ MTK_FUNCTION(4, "TDD_TMS"),
+ MTK_FUNCTION(5, "UTXD0")
+ ),
+ MTK_PIN(
+ 1, "GPIO1",
+ MTK_EINT_FUNCTION(0, 1),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO1"),
+ MTK_FUNCTION(1, "IRDA_RXD"),
+ MTK_FUNCTION(2, "I2S1_BCK"),
+ MTK_FUNCTION(3, "SDA4"),
+ MTK_FUNCTION(4, "TDD_TCK"),
+ MTK_FUNCTION(5, "URXD0")
+ ),
+ MTK_PIN(
+ 2, "GPIO2",
+ MTK_EINT_FUNCTION(0, 2),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO2"),
+ MTK_FUNCTION(1, "IRDA_TXD"),
+ MTK_FUNCTION(2, "I2S1_MCK"),
+ MTK_FUNCTION(3, "SCL4"),
+ MTK_FUNCTION(4, "TDD_TDI"),
+ MTK_FUNCTION(5, "UTXD3")
+ ),
+ MTK_PIN(
+ 3, "GPIO3",
+ MTK_EINT_FUNCTION(0, 3),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO3"),
+ MTK_FUNCTION(1, "DSI1_TE"),
+ MTK_FUNCTION(2, "I2S1_DO_1"),
+ MTK_FUNCTION(3, "SDA3"),
+ MTK_FUNCTION(4, "TDD_TDO"),
+ MTK_FUNCTION(5, "URXD3")
+ ),
+ MTK_PIN(
+ 4, "GPIO4",
+ MTK_EINT_FUNCTION(0, 4),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO4"),
+ MTK_FUNCTION(1, "DISP_PWM1"),
+ MTK_FUNCTION(2, "I2S1_DO_2"),
+ MTK_FUNCTION(3, "SCL3"),
+ MTK_FUNCTION(4, "TDD_TRSTN")
+ ),
+ MTK_PIN(
+ 5, "GPIO5",
+ MTK_EINT_FUNCTION(0, 5),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO5"),
+ MTK_FUNCTION(1, "PCM1_CLK"),
+ MTK_FUNCTION(2, "I2S2_WS"),
+ MTK_FUNCTION(3, "SPI_CK_3"),
+ MTK_FUNCTION(4, "LTE_MD32_JTAG_TMS"),
+ MTK_FUNCTION(5, "AP_MD32_JTAG_TMS")
+ ),
+ MTK_PIN(
+ 6, "GPIO6",
+ MTK_EINT_FUNCTION(0, 6),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO6"),
+ MTK_FUNCTION(1, "PCM1_SYNC"),
+ MTK_FUNCTION(2, "I2S2_BCK"),
+ MTK_FUNCTION(3, "SPI_MI_3"),
+ MTK_FUNCTION(4, "LTE_MD32_JTAG_TCK"),
+ MTK_FUNCTION(5, "AP_MD32_JTAG_TCK")
+ ),
+ MTK_PIN(
+ 7, "GPIO7",
+ MTK_EINT_FUNCTION(0, 7),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO7"),
+ MTK_FUNCTION(1, "PCM1_DI"),
+ MTK_FUNCTION(2, "I2S2_DI_1"),
+ MTK_FUNCTION(3, "SPI_MO_3"),
+ MTK_FUNCTION(4, "LTE_MD32_JTAG_TDI"),
+ MTK_FUNCTION(5, "AP_MD32_JTAG_TDI")
+ ),
+ MTK_PIN(
+ 8, "GPIO8",
+ MTK_EINT_FUNCTION(0, 8),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO8"),
+ MTK_FUNCTION(1, "PCM1_DO"),
+ MTK_FUNCTION(2, "I2S2_DI_2"),
+ MTK_FUNCTION(3, "SPI_CS_3"),
+ MTK_FUNCTION(4, "LTE_MD32_JTAG_TDO"),
+ MTK_FUNCTION(5, "AP_MD32_JTAG_TDO")
+ ),
+ MTK_PIN(
+ 9, "GPIO9",
+ MTK_EINT_FUNCTION(0, 9),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO9"),
+ MTK_FUNCTION(1, "USB_DRVVBUS"),
+ MTK_FUNCTION(2, "I2S2_MCK"),
+ MTK_FUNCTION(4, "LTE_MD32_JTAG_TRST"),
+ MTK_FUNCTION(5, "AP_MD32_JTAG_TRST")
+ ),
+ MTK_PIN(
+ 10, "GPIO10",
+ MTK_EINT_FUNCTION(0, 10),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO10"),
+ MTK_FUNCTION(2, "I2S0_WS")
+ ),
+ MTK_PIN(
+ 11, "GPIO11",
+ MTK_EINT_FUNCTION(0, 11),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO11"),
+ MTK_FUNCTION(2, "I2S0_BCK")
+ ),
+ MTK_PIN(
+ 12, "GPIO12",
+ MTK_EINT_FUNCTION(0, 12),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO12"),
+ MTK_FUNCTION(2, "I2S0_MCK")
+ ),
+ MTK_PIN(
+ 13, "GPIO13",
+ MTK_EINT_FUNCTION(0, 13),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO13"),
+ MTK_FUNCTION(2, "I2S0_DO")
+ ),
+ MTK_PIN(
+ 14, "GPIO14",
+ MTK_EINT_FUNCTION(0, 14),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO14"),
+ MTK_FUNCTION(2, "I2S0_DI"),
+ MTK_FUNCTION(3, "DISP_PWM1"),
+ MTK_FUNCTION(4, "PWM4"),
+ MTK_FUNCTION(5, "IRDA_RXD"),
+ MTK_FUNCTION(6, "I2S1_BCK")
+ ),
+ MTK_PIN(
+ 15, "GPIO15",
+ MTK_EINT_FUNCTION(0, 15),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO15"),
+ MTK_FUNCTION(2, "DSI1_TE"),
+ MTK_FUNCTION(3, "USB_DRVVBUS"),
+ MTK_FUNCTION(4, "PWM5"),
+ MTK_FUNCTION(5, "IRDA_TXD"),
+ MTK_FUNCTION(6, "I2S1_MCK")
+ ),
+ MTK_PIN(
+ 16, "GPIO16",
+ MTK_EINT_FUNCTION(0, 16),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO16"),
+ MTK_FUNCTION(1, "IDDIG"),
+ MTK_FUNCTION(2, "FLASH"),
+ MTK_FUNCTION(3, "EXT_FRAME_SYNC"),
+ MTK_FUNCTION(4, "PWM5")
+ ),
+ MTK_PIN(
+ 17, "GPIO17",
+ MTK_EINT_FUNCTION(0, 17),
+ DRV_GRP0,
+ MTK_FUNCTION(0, "GPIO17"),
+ MTK_FUNCTION(1, "SIM1_SCLK"),
+ MTK_FUNCTION(2, "SIM2_SCLK")
+ ),
+ MTK_PIN(
+ 18, "GPIO18",
+ MTK_EINT_FUNCTION(0, 18),
+ DRV_GRP0,
+ MTK_FUNCTION(0, "GPIO18"),
+ MTK_FUNCTION(1, "SIM1_SRST"),
+ MTK_FUNCTION(2, "SIM2_SRST")
+ ),
+ MTK_PIN(
+ 19, "GPIO19",
+ MTK_EINT_FUNCTION(0, 19),
+ DRV_GRP0,
+ MTK_FUNCTION(0, "GPIO19"),
+ MTK_FUNCTION(1, "SIM1_SDAT"),
+ MTK_FUNCTION(2, "SIM2_SDAT")
+ ),
+ MTK_PIN(
+ 20, "GPIO20",
+ MTK_EINT_FUNCTION(0, 20),
+ DRV_GRP0,
+ MTK_FUNCTION(0, "GPIO20"),
+ MTK_FUNCTION(1, "SIM2_SCLK"),
+ MTK_FUNCTION(2, "SIM1_SCLK")
+ ),
+ MTK_PIN(
+ 21, "GPIO21",
+ MTK_EINT_FUNCTION(0, 21),
+ DRV_GRP0,
+ MTK_FUNCTION(0, "GPIO21"),
+ MTK_FUNCTION(1, "SIM2_SRST"),
+ MTK_FUNCTION(2, "SIM1_SRST")
+ ),
+ MTK_PIN(
+ 22, "GPIO22",
+ MTK_EINT_FUNCTION(0, 22),
+ DRV_GRP0,
+ MTK_FUNCTION(0, "GPIO22"),
+ MTK_FUNCTION(1, "SIM2_SDAT"),
+ MTK_FUNCTION(2, "SIM1_SDAT")
+ ),
+ MTK_PIN(
+ 23, "GPIO23",
+ MTK_EINT_FUNCTION(0, 23),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO23"),
+ MTK_FUNCTION(1, "MSDC3_DAT0")
+ ),
+ MTK_PIN(
+ 24, "GPIO24",
+ MTK_EINT_FUNCTION(0, 24),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO24"),
+ MTK_FUNCTION(1, "MSDC3_DAT1")
+ ),
+ MTK_PIN(
+ 25, "GPIO25",
+ MTK_EINT_FUNCTION(0, 25),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO25"),
+ MTK_FUNCTION(1, "MSDC3_DAT2")
+ ),
+ MTK_PIN(
+ 26, "GPIO26",
+ MTK_EINT_FUNCTION(0, 26),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO26"),
+ MTK_FUNCTION(1, "MSDC3_DAT3")
+ ),
+ MTK_PIN(
+ 27, "GPIO27",
+ MTK_EINT_FUNCTION(0, 27),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO27"),
+ MTK_FUNCTION(1, "MSDC3_CLK")
+ ),
+ MTK_PIN(
+ 28, "GPIO28",
+ MTK_EINT_FUNCTION(0, 28),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO28"),
+ MTK_FUNCTION(1, "MSDC3_CMD")
+ ),
+ MTK_PIN(
+ 29, "GPIO29",
+ MTK_EINT_FUNCTION(0, 29),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO29"),
+ MTK_FUNCTION(1, "PTA_RXD"),
+ MTK_FUNCTION(2, "UCTS2")
+ ),
+ MTK_PIN(
+ 30, "GPIO30",
+ MTK_EINT_FUNCTION(0, 30),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO30"),
+ MTK_FUNCTION(1, "PTA_TXD"),
+ MTK_FUNCTION(2, "URTS2")
+ ),
+ MTK_PIN(
+ 31, "GPIO31",
+ MTK_EINT_FUNCTION(0, 31),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO31"),
+ MTK_FUNCTION(1, "URXD2"),
+ MTK_FUNCTION(2, "UTXD2")
+ ),
+ MTK_PIN(
+ 32, "GPIO32",
+ MTK_EINT_FUNCTION(0, 32),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO32"),
+ MTK_FUNCTION(1, "UTXD2"),
+ MTK_FUNCTION(2, "URXD2")
+ ),
+ MTK_PIN(
+ 33, "GPIO33",
+ MTK_EINT_FUNCTION(0, 33),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO33"),
+ MTK_FUNCTION(1, "MRG_CLK"),
+ MTK_FUNCTION(2, "PCM0_CLK")
+ ),
+ MTK_PIN(
+ 34, "GPIO34",
+ MTK_EINT_FUNCTION(0, 34),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO34"),
+ MTK_FUNCTION(1, "MRG_DI"),
+ MTK_FUNCTION(2, "PCM0_DI")
+ ),
+ MTK_PIN(
+ 35, "GPIO35",
+ MTK_EINT_FUNCTION(0, 35),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO35"),
+ MTK_FUNCTION(1, "MRG_DO"),
+ MTK_FUNCTION(2, "PCM0_DO")
+ ),
+ MTK_PIN(
+ 36, "GPIO36",
+ MTK_EINT_FUNCTION(0, 36),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO36"),
+ MTK_FUNCTION(1, "MRG_SYNC"),
+ MTK_FUNCTION(2, "PCM0_SYNC")
+ ),
+ MTK_PIN(
+ 37, "GPIO37",
+ MTK_EINT_FUNCTION(0, 37),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO37"),
+ MTK_FUNCTION(1, "GPS_SYNC")
+ ),
+ MTK_PIN(
+ 38, "GPIO38",
+ MTK_EINT_FUNCTION(0, 38),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO38"),
+ MTK_FUNCTION(1, "DAIRSTB")
+ ),
+ MTK_PIN(
+ 39, "GPIO39",
+ MTK_EINT_FUNCTION(0, 39),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO39"),
+ MTK_FUNCTION(1, "CM2MCLK")
+ ),
+ MTK_PIN(
+ 40, "GPIO40",
+ MTK_EINT_FUNCTION(0, 40),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO40"),
+ MTK_FUNCTION(1, "CM3MCLK"),
+ MTK_FUNCTION(2, "IRDA_PDN"),
+ MTK_FUNCTION(3, "PWM6"),
+ MTK_FUNCTION(4, "I2S1_WS")
+ ),
+ MTK_PIN(
+ 41, "GPIO41",
+ MTK_EINT_FUNCTION(0, 41),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO41"),
+ MTK_FUNCTION(1, "CMPCLK"),
+ MTK_FUNCTION(2, "CMCSK"),
+ MTK_FUNCTION(3, "FLASH")
+ ),
+ MTK_PIN(
+ 42, "GPIO42",
+ MTK_EINT_FUNCTION(0, 42),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO42"),
+ MTK_FUNCTION(1, "CMMCLK")
+ ),
+ MTK_PIN(
+ 43, "GPIO43",
+ MTK_EINT_FUNCTION(0, 43),
+ DRV_FIXED,
+ MTK_FUNCTION(0, "GPIO43"),
+ MTK_FUNCTION(1, "SDA2")
+ ),
+ MTK_PIN(
+ 44, "GPIO44",
+ MTK_EINT_FUNCTION(0, 44),
+ DRV_FIXED,
+ MTK_FUNCTION(0, "GPIO44"),
+ MTK_FUNCTION(1, "SCL2")
+ ),
+ MTK_PIN(
+ 45, "GPIO45",
+ MTK_EINT_FUNCTION(0, 45),
+ DRV_FIXED,
+ MTK_FUNCTION(0, "GPIO45"),
+ MTK_FUNCTION(1, "SDA0")
+ ),
+ MTK_PIN(
+ 46, "GPIO46",
+ MTK_EINT_FUNCTION(0, 46),
+ DRV_FIXED,
+ MTK_FUNCTION(0, "GPIO46"),
+ MTK_FUNCTION(1, "SCL0")
+ ),
+ MTK_PIN(
+ 47, "GPIO47",
+ MTK_EINT_FUNCTION(0, 47),
+ DRV_GRP0,
+ MTK_FUNCTION(0, "GPIO47"),
+ MTK_FUNCTION(1, "BPI_BUS0")
+ ),
+ MTK_PIN(
+ 48, "GPIO48",
+ MTK_EINT_FUNCTION(0, 48),
+ DRV_GRP0,
+ MTK_FUNCTION(0, "GPIO48"),
+ MTK_FUNCTION(1, "BPI_BUS1")
+ ),
+ MTK_PIN(
+ 49, "GPIO49",
+ MTK_EINT_FUNCTION(0, 49),
+ DRV_GRP0,
+ MTK_FUNCTION(0, "GPIO49"),
+ MTK_FUNCTION(1, "BPI_BUS2")
+ ),
+ MTK_PIN(
+ 50, "GPIO50",
+ MTK_EINT_FUNCTION(0, 50),
+ DRV_GRP0,
+ MTK_FUNCTION(0, "GPIO50"),
+ MTK_FUNCTION(1, "BPI_BUS3")
+ ),
+ MTK_PIN(
+ 51, "GPIO51",
+ MTK_EINT_FUNCTION(0, 51),
+ DRV_GRP0,
+ MTK_FUNCTION(0, "GPIO51"),
+ MTK_FUNCTION(1, "BPI_BUS4")
+ ),
+ MTK_PIN(
+ 52, "GPIO52",
+ MTK_EINT_FUNCTION(0, 52),
+ DRV_GRP0,
+ MTK_FUNCTION(0, "GPIO52"),
+ MTK_FUNCTION(1, "BPI_BUS5")
+ ),
+ MTK_PIN(
+ 53, "GPIO53",
+ MTK_EINT_FUNCTION(0, 53),
+ DRV_GRP0,
+ MTK_FUNCTION(0, "GPIO53"),
+ MTK_FUNCTION(1, "BPI_BUS6")
+ ),
+ MTK_PIN(
+ 54, "GPIO54",
+ MTK_EINT_FUNCTION(0, 54),
+ DRV_GRP0,
+ MTK_FUNCTION(0, "GPIO54"),
+ MTK_FUNCTION(1, "BPI_BUS7")
+ ),
+ MTK_PIN(
+ 55, "GPIO55",
+ MTK_EINT_FUNCTION(0, 55),
+ DRV_GRP0,
+ MTK_FUNCTION(0, "GPIO55"),
+ MTK_FUNCTION(1, "BPI_BUS8")
+ ),
+ MTK_PIN(
+ 56, "GPIO56",
+ MTK_EINT_FUNCTION(0, 56),
+ DRV_GRP0,
+ MTK_FUNCTION(0, "GPIO56"),
+ MTK_FUNCTION(1, "BPI_BUS9")
+ ),
+ MTK_PIN(
+ 57, "GPIO57",
+ MTK_EINT_FUNCTION(0, 57),
+ DRV_GRP0,
+ MTK_FUNCTION(0, "GPIO57"),
+ MTK_FUNCTION(1, "BPI_BUS10")
+ ),
+ MTK_PIN(
+ 58, "GPIO58",
+ MTK_EINT_FUNCTION(0, 58),
+ DRV_GRP0,
+ MTK_FUNCTION(0, "GPIO58"),
+ MTK_FUNCTION(1, "BPI_BUS11")
+ ),
+ MTK_PIN(
+ 59, "GPIO59",
+ MTK_EINT_FUNCTION(0, 59),
+ DRV_GRP0,
+ MTK_FUNCTION(0, "GPIO59"),
+ MTK_FUNCTION(1, "BPI_BUS12")
+ ),
+ MTK_PIN(
+ 60, "GPIO60",
+ MTK_EINT_FUNCTION(0, 60),
+ DRV_GRP0,
+ MTK_FUNCTION(0, "GPIO60"),
+ MTK_FUNCTION(1, "BPI_BUS13")
+ ),
+ MTK_PIN(
+ 61, "GPIO61",
+ MTK_EINT_FUNCTION(0, 61),
+ DRV_GRP0,
+ MTK_FUNCTION(0, "GPIO61"),
+ MTK_FUNCTION(1, "BPI_BUS14")
+ ),
+ MTK_PIN(
+ 62, "GPIO62",
+ MTK_EINT_FUNCTION(0, 62),
+ DRV_GRP0,
+ MTK_FUNCTION(0, "GPIO62"),
+ MTK_FUNCTION(1, "RFIC1_BSI_CK")
+ ),
+ MTK_PIN(
+ 63, "GPIO63",
+ MTK_EINT_FUNCTION(0, 63),
+ DRV_GRP0,
+ MTK_FUNCTION(0, "GPIO63"),
+ MTK_FUNCTION(1, "RFIC1_BSI_D0")
+ ),
+ MTK_PIN(
+ 64, "GPIO64",
+ MTK_EINT_FUNCTION(0, 64),
+ DRV_GRP0,
+ MTK_FUNCTION(0, "GPIO64"),
+ MTK_FUNCTION(1, "RFIC1_BSI_D1")
+ ),
+ MTK_PIN(
+ 65, "GPIO65",
+ MTK_EINT_FUNCTION(0, 65),
+ DRV_GRP0,
+ MTK_FUNCTION(0, "GPIO65"),
+ MTK_FUNCTION(1, "RFIC1_BSI_D2")
+ ),
+ MTK_PIN(
+ 66, "GPIO66",
+ MTK_EINT_FUNCTION(0, 66),
+ DRV_GRP0,
+ MTK_FUNCTION(0, "GPIO66"),
+ MTK_FUNCTION(1, "RFIC1_BSI_CS")
+ ),
+ MTK_PIN(
+ 67, "GPIO67",
+ MTK_EINT_FUNCTION(0, 67),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO67"),
+ MTK_FUNCTION(1, "TD_TXBPI")
+ ),
+ MTK_PIN(
+ 68, "GPIO68",
+ MTK_EINT_FUNCTION(0, 68),
+ DRV_GRP0,
+ MTK_FUNCTION(0, "GPIO68"),
+ MTK_FUNCTION(1, "RFIC0_BSI_CK")
+ ),
+ MTK_PIN(
+ 69, "GPIO69",
+ MTK_EINT_FUNCTION(0, 69),
+ DRV_GRP0,
+ MTK_FUNCTION(0, "GPIO69"),
+ MTK_FUNCTION(1, "RFIC0_BSI_D0")
+ ),
+ MTK_PIN(
+ 70, "GPIO70",
+ MTK_EINT_FUNCTION(0, 70),
+ DRV_GRP0,
+ MTK_FUNCTION(0, "GPIO70"),
+ MTK_FUNCTION(1, "RFIC0_BSI_D1")
+ ),
+ MTK_PIN(
+ 71, "GPIO71",
+ MTK_EINT_FUNCTION(0, 71),
+ DRV_GRP0,
+ MTK_FUNCTION(0, "GPIO71"),
+ MTK_FUNCTION(1, "RFIC0_BSI_D2")
+ ),
+ MTK_PIN(
+ 72, "GPIO72",
+ MTK_EINT_FUNCTION(0, 72),
+ DRV_GRP0,
+ MTK_FUNCTION(0, "GPIO72"),
+ MTK_FUNCTION(1, "RFIC0_BSI_CS")
+ ),
+ MTK_PIN(
+ 73, "GPIO73",
+ MTK_EINT_FUNCTION(0, 73),
+ DRV_GRP0,
+ MTK_FUNCTION(0, "GPIO73"),
+ MTK_FUNCTION(1, "MISC_BSI_DO")
+ ),
+ MTK_PIN(
+ 74, "GPIO74",
+ MTK_EINT_FUNCTION(0, 74),
+ DRV_GRP0,
+ MTK_FUNCTION(0, "GPIO74"),
+ MTK_FUNCTION(1, "MISC_BSI_CK")
+ ),
+ MTK_PIN(
+ 75, "GPIO75",
+ MTK_EINT_FUNCTION(0, 75),
+ DRV_GRP0,
+ MTK_FUNCTION(0, "GPIO75"),
+ MTK_FUNCTION(1, "MISC_BSI_CS0B"),
+ MTK_FUNCTION(2, "MIPI1_SCLK")
+ ),
+ MTK_PIN(
+ 76, "GPIO76",
+ MTK_EINT_FUNCTION(0, 76),
+ DRV_GRP0,
+ MTK_FUNCTION(0, "GPIO76"),
+ MTK_FUNCTION(1, "MISC_BSI_CS1B")
+ ),
+ MTK_PIN(
+ 77, "GPIO77",
+ MTK_EINT_FUNCTION(0, 77),
+ DRV_GRP0,
+ MTK_FUNCTION(0, "GPIO77"),
+ MTK_FUNCTION(1, "MISC_BSI_DI"),
+ MTK_FUNCTION(2, "MIPI1_SDATA")
+ ),
+ MTK_PIN(
+ 78, "GPIO78",
+ MTK_EINT_FUNCTION(0, 78),
+ DRV_GRP0,
+ MTK_FUNCTION(0, "GPIO78"),
+ MTK_FUNCTION(1, "LTE_TXBPI")
+ ),
+ MTK_PIN(
+ 79, "GPIO79",
+ MTK_EINT_FUNCTION(0, 79),
+ DRV_GRP0,
+ MTK_FUNCTION(0, "GPIO79"),
+ MTK_FUNCTION(1, "BPI_BUS15")
+ ),
+ MTK_PIN(
+ 80, "GPIO80",
+ MTK_EINT_FUNCTION(0, 80),
+ DRV_GRP0,
+ MTK_FUNCTION(0, "GPIO80"),
+ MTK_FUNCTION(1, "BPI_BUS16")
+ ),
+ MTK_PIN(
+ 81, "GPIO81",
+ MTK_EINT_FUNCTION(0, 81),
+ DRV_GRP0,
+ MTK_FUNCTION(0, "GPIO81"),
+ MTK_FUNCTION(1, "BPI_BUS17")
+ ),
+ MTK_PIN(
+ 82, "GPIO82",
+ MTK_EINT_FUNCTION(0, 82),
+ DRV_GRP0,
+ MTK_FUNCTION(0, "GPIO82"),
+ MTK_FUNCTION(1, "BPI_BUS18")
+ ),
+ MTK_PIN(
+ 83, "GPIO83",
+ MTK_EINT_FUNCTION(0, 83),
+ DRV_GRP0,
+ MTK_FUNCTION(0, "GPIO83"),
+ MTK_FUNCTION(1, "BPI_BUS19")
+ ),
+ MTK_PIN(
+ 84, "GPIO84",
+ MTK_EINT_FUNCTION(0, 84),
+ DRV_GRP0,
+ MTK_FUNCTION(0, "GPIO84"),
+ MTK_FUNCTION(1, "BPI_BUS20")
+ ),
+ MTK_PIN(
+ 85, "GPIO85",
+ MTK_EINT_FUNCTION(0, 85),
+ DRV_GRP0,
+ MTK_FUNCTION(0, "GPIO85"),
+ MTK_FUNCTION(1, "BPI_BUS21")
+ ),
+ MTK_PIN(
+ 86, "GPIO86",
+ MTK_EINT_FUNCTION(0, 86),
+ DRV_GRP0,
+ MTK_FUNCTION(0, "GPIO86"),
+ MTK_FUNCTION(1, "BPI_BUS22")
+ ),
+ MTK_PIN(
+ 87, "GPIO87",
+ MTK_EINT_FUNCTION(0, 87),
+ DRV_GRP0,
+ MTK_FUNCTION(0, "GPIO87"),
+ MTK_FUNCTION(1, "BPI_BUS23")
+ ),
+ MTK_PIN(
+ 88, "GPIO88",
+ MTK_EINT_FUNCTION(0, 88),
+ DRV_GRP0,
+ MTK_FUNCTION(0, "GPIO88"),
+ MTK_FUNCTION(1, "BPI_BUS24")
+ ),
+ MTK_PIN(
+ 89, "GPIO89",
+ MTK_EINT_FUNCTION(0, 89),
+ DRV_GRP0,
+ MTK_FUNCTION(0, "GPIO89"),
+ MTK_FUNCTION(1, "BPI_BUS25")
+ ),
+ MTK_PIN(
+ 90, "GPIO90",
+ MTK_EINT_FUNCTION(0, 90),
+ DRV_GRP0,
+ MTK_FUNCTION(0, "GPIO90"),
+ MTK_FUNCTION(1, "BPI_BUS26")
+ ),
+ MTK_PIN(
+ 91, "GPIO91",
+ MTK_EINT_FUNCTION(0, 91),
+ DRV_GRP0,
+ MTK_FUNCTION(0, "GPIO91"),
+ MTK_FUNCTION(1, "BPI_BUS27")
+ ),
+ MTK_PIN(
+ 92, "GPIO92",
+ MTK_EINT_FUNCTION(0, 92),
+ DRV_GRP0,
+ MTK_FUNCTION(0, "GPIO92"),
+ MTK_FUNCTION(1, "PCM1_CLK"),
+ MTK_FUNCTION(2, "I2S0_BCK"),
+ MTK_FUNCTION(3, "NLD6")
+ ),
+ MTK_PIN(
+ 93, "GPIO93",
+ MTK_EINT_FUNCTION(0, 93),
+ DRV_GRP0,
+ MTK_FUNCTION(0, "GPIO93"),
+ MTK_FUNCTION(1, "PCM1_SYNC"),
+ MTK_FUNCTION(2, "I2S0_WS"),
+ MTK_FUNCTION(3, "NLD7")
+ ),
+ MTK_PIN(
+ 94, "GPIO94",
+ MTK_EINT_FUNCTION(0, 94),
+ DRV_GRP0,
+ MTK_FUNCTION(0, "GPIO94"),
+ MTK_FUNCTION(1, "PCM1_DI"),
+ MTK_FUNCTION(2, "I2S0_DI"),
+ MTK_FUNCTION(3, "NREB")
+ ),
+ MTK_PIN(
+ 95, "GPIO95",
+ MTK_EINT_FUNCTION(0, 95),
+ DRV_GRP0,
+ MTK_FUNCTION(0, "GPIO95"),
+ MTK_FUNCTION(1, "PCM1_DO"),
+ MTK_FUNCTION(2, "I2S0_DO"),
+ MTK_FUNCTION(3, "NRNB0")
+ ),
+ MTK_PIN(
+ 96, "GPIO96",
+ MTK_EINT_FUNCTION(0, 96),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO96"),
+ MTK_FUNCTION(1, "URXD1"),
+ MTK_FUNCTION(2, "UTXD1"),
+ MTK_FUNCTION(3, "NWEB")
+ ),
+ MTK_PIN(
+ 97, "GPIO97",
+ MTK_EINT_FUNCTION(0, 97),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO97"),
+ MTK_FUNCTION(1, "UTXD1"),
+ MTK_FUNCTION(2, "URXD1"),
+ MTK_FUNCTION(3, "NCEB0")
+ ),
+ MTK_PIN(
+ 98, "GPIO98",
+ MTK_EINT_FUNCTION(0, 98),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO98"),
+ MTK_FUNCTION(1, "URTS1"),
+ MTK_FUNCTION(2, "UCTS1"),
+ MTK_FUNCTION(3, "NALE")
+ ),
+ MTK_PIN(
+ 99, "GPIO99",
+ MTK_EINT_FUNCTION(0, 99),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO99"),
+ MTK_FUNCTION(1, "UCTS1"),
+ MTK_FUNCTION(2, "URTS1"),
+ MTK_FUNCTION(3, "NCLE")
+ ),
+ MTK_PIN(
+ 100, "GPIO100",
+ MTK_EINT_FUNCTION(0, 100),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO100"),
+ MTK_FUNCTION(1, "MSDC2_DAT0"),
+ MTK_FUNCTION(2, "URXD1"),
+ MTK_FUNCTION(3, "USB_DRVVBUS"),
+ MTK_FUNCTION(4, "SDA4")
+ ),
+ MTK_PIN(
+ 101, "GPIO101",
+ MTK_EINT_FUNCTION(0, 101),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO101"),
+ MTK_FUNCTION(1, "MSDC2_DAT1"),
+ MTK_FUNCTION(2, "UTXD1"),
+ MTK_FUNCTION(4, "SCL4")
+ ),
+ MTK_PIN(
+ 102, "GPIO102",
+ MTK_EINT_FUNCTION(0, 102),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO102"),
+ MTK_FUNCTION(1, "MSDC2_DAT2"),
+ MTK_FUNCTION(2, "URTS1"),
+ MTK_FUNCTION(3, "UTXD0"),
+ MTK_FUNCTION(5, "PWM0"),
+ MTK_FUNCTION(6, "SPI_CK_1")
+ ),
+ MTK_PIN(
+ 103, "GPIO103",
+ MTK_EINT_FUNCTION(0, 103),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO103"),
+ MTK_FUNCTION(1, "MSDC2_DAT3"),
+ MTK_FUNCTION(2, "UCTS1"),
+ MTK_FUNCTION(3, "URXD0"),
+ MTK_FUNCTION(5, "PWM1"),
+ MTK_FUNCTION(6, "SPI_MI_1")
+ ),
+ MTK_PIN(
+ 104, "GPIO104",
+ MTK_EINT_FUNCTION(0, 104),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO104"),
+ MTK_FUNCTION(1, "MSDC2_CLK"),
+ MTK_FUNCTION(2, "NLD4"),
+ MTK_FUNCTION(3, "UTXD3"),
+ MTK_FUNCTION(4, "SDA3"),
+ MTK_FUNCTION(5, "PWM2"),
+ MTK_FUNCTION(6, "SPI_MO_1")
+ ),
+ MTK_PIN(
+ 105, "GPIO105",
+ MTK_EINT_FUNCTION(0, 105),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO105"),
+ MTK_FUNCTION(1, "MSDC2_CMD"),
+ MTK_FUNCTION(2, "NLD5"),
+ MTK_FUNCTION(3, "URXD3"),
+ MTK_FUNCTION(4, "SCL3"),
+ MTK_FUNCTION(5, "PWM3"),
+ MTK_FUNCTION(6, "SPI_CS_1")
+ ),
+ MTK_PIN(
+ 106, "GPIO106",
+ MTK_EINT_FUNCTION(0, 106),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO106"),
+ MTK_FUNCTION(1, "LCM_RST")
+ ),
+ MTK_PIN(
+ 107, "GPIO107",
+ MTK_EINT_FUNCTION(0, 107),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO107"),
+ MTK_FUNCTION(1, "DSI_TE")
+ ),
+ MTK_PIN(
+ 108, "GPIO108",
+ MTK_EINT_FUNCTION(0, 108),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO108"),
+ MTK_FUNCTION(1, "JTMS"),
+ MTK_FUNCTION(2, "MFG_JTAG_TMS"),
+ MTK_FUNCTION(3, "TDD_TMS"),
+ MTK_FUNCTION(4, "LTE_MD32_JTAG_TMS"),
+ MTK_FUNCTION(5, "AP_MD32_JTAG_TMS"),
+ MTK_FUNCTION(6, "DFD_TMS")
+ ),
+ MTK_PIN(
+ 109, "GPIO109",
+ MTK_EINT_FUNCTION(0, 109),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO109"),
+ MTK_FUNCTION(1, "JTCK"),
+ MTK_FUNCTION(2, "MFG_JTAG_TCK"),
+ MTK_FUNCTION(3, "TDD_TCK"),
+ MTK_FUNCTION(4, "LTE_MD32_JTAG_TCK"),
+ MTK_FUNCTION(5, "AP_MD32_JTAG_TCK"),
+ MTK_FUNCTION(6, "DFD_TCK")
+ ),
+ MTK_PIN(
+ 110, "GPIO110",
+ MTK_EINT_FUNCTION(0, 110),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO110"),
+ MTK_FUNCTION(1, "JTDI"),
+ MTK_FUNCTION(2, "MFG_JTAG_TDI"),
+ MTK_FUNCTION(3, "TDD_TDI"),
+ MTK_FUNCTION(4, "LTE_MD32_JTAG_TDI"),
+ MTK_FUNCTION(5, "AP_MD32_JTAG_TDI"),
+ MTK_FUNCTION(6, "DFD_TDI")
+ ),
+ MTK_PIN(
+ 111, "GPIO111",
+ MTK_EINT_FUNCTION(0, 111),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO111"),
+ MTK_FUNCTION(1, "JTDO"),
+ MTK_FUNCTION(2, "MFG_JTAG_TDO"),
+ MTK_FUNCTION(3, "TDD_TDO"),
+ MTK_FUNCTION(4, "LTE_MD32_JTAG_TDO"),
+ MTK_FUNCTION(5, "AP_MD32_JTAG_TDO"),
+ MTK_FUNCTION(6, "DFD_TDO")
+ ),
+ MTK_PIN(
+ 112, "GPIO112",
+ MTK_EINT_FUNCTION(0, 112),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO112"),
+ MTK_FUNCTION(1, "JTRST_B"),
+ MTK_FUNCTION(2, "MFG_JTAG_TRSTN"),
+ MTK_FUNCTION(3, "TDD_TRSTN"),
+ MTK_FUNCTION(4, "LTE_MD32_JTAG_TRST"),
+ MTK_FUNCTION(5, "AP_MD32_JTAG_TRST"),
+ MTK_FUNCTION(6, "DFD_NTRST")
+ ),
+ MTK_PIN(
+ 113, "GPIO113",
+ MTK_EINT_FUNCTION(0, 113),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO113"),
+ MTK_FUNCTION(1, "URXD0"),
+ MTK_FUNCTION(2, "UTXD0"),
+ MTK_FUNCTION(3, "MD_URXD"),
+ MTK_FUNCTION(4, "LTE_URXD"),
+ MTK_FUNCTION(5, "TDD_TXD"),
+ MTK_FUNCTION(6, "I2S2_WS")
+ ),
+ MTK_PIN(
+ 114, "GPIO114",
+ MTK_EINT_FUNCTION(0, 114),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO114"),
+ MTK_FUNCTION(1, "UTXD0"),
+ MTK_FUNCTION(2, "URXD0"),
+ MTK_FUNCTION(3, "MD_UTXD"),
+ MTK_FUNCTION(4, "LTE_UTXD"),
+ MTK_FUNCTION(5, "TDD_TXD"),
+ MTK_FUNCTION(6, "I2S2_BCK")
+ ),
+ MTK_PIN(
+ 115, "GPIO115",
+ MTK_EINT_FUNCTION(0, 115),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO115"),
+ MTK_FUNCTION(1, "URTS0"),
+ MTK_FUNCTION(2, "UCTS0"),
+ MTK_FUNCTION(3, "MD_URXD"),
+ MTK_FUNCTION(4, "LTE_URXD"),
+ MTK_FUNCTION(5, "TDD_TXD"),
+ MTK_FUNCTION(6, "I2S2_MCK")
+ ),
+ MTK_PIN(
+ 116, "GPIO116",
+ MTK_EINT_FUNCTION(0, 116),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO116"),
+ MTK_FUNCTION(1, "UCTS0"),
+ MTK_FUNCTION(2, "URTS0"),
+ MTK_FUNCTION(3, "MD_UTXD"),
+ MTK_FUNCTION(4, "LTE_UTXD"),
+ MTK_FUNCTION(5, "TDD_TXD"),
+ MTK_FUNCTION(6, "I2S2_DI_1")
+ ),
+ MTK_PIN(
+ 117, "GPIO117",
+ MTK_EINT_FUNCTION(0, 117),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO117"),
+ MTK_FUNCTION(1, "URXD3"),
+ MTK_FUNCTION(2, "UTXD3"),
+ MTK_FUNCTION(3, "MD_URXD"),
+ MTK_FUNCTION(4, "LTE_URXD"),
+ MTK_FUNCTION(5, "TDD_TXD")
+ ),
+ MTK_PIN(
+ 118, "GPIO118",
+ MTK_EINT_FUNCTION(0, 118),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO118"),
+ MTK_FUNCTION(1, "UTXD3"),
+ MTK_FUNCTION(2, "URXD3"),
+ MTK_FUNCTION(3, "MD_UTXD"),
+ MTK_FUNCTION(4, "LTE_UTXD"),
+ MTK_FUNCTION(5, "TDD_TXD")
+ ),
+ MTK_PIN(
+ 119, "GPIO119",
+ MTK_EINT_FUNCTION(0, 119),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO119"),
+ MTK_FUNCTION(1, "KROW0")
+ ),
+ MTK_PIN(
+ 120, "GPIO120",
+ MTK_EINT_FUNCTION(0, 120),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO120"),
+ MTK_FUNCTION(1, "KROW1"),
+ MTK_FUNCTION(3, "PWM6")
+ ),
+ MTK_PIN(
+ 121, "GPIO121",
+ MTK_EINT_FUNCTION(0, 121),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO121"),
+ MTK_FUNCTION(1, "KROW2"),
+ MTK_FUNCTION(2, "IRDA_PDN"),
+ MTK_FUNCTION(3, "I2S1_DO_1"),
+ MTK_FUNCTION(4, "USB_DRVVBUS"),
+ MTK_FUNCTION(5, "SPI_CK_2"),
+ MTK_FUNCTION(6, "PWM4")
+ ),
+ MTK_PIN(
+ 122, "GPIO122",
+ MTK_EINT_FUNCTION(0, 122),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO122"),
+ MTK_FUNCTION(1, "KCOL0")
+ ),
+ MTK_PIN(
+ 123, "GPIO123",
+ MTK_EINT_FUNCTION(0, 123),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO123"),
+ MTK_FUNCTION(1, "KCOL1"),
+ MTK_FUNCTION(2, "IRDA_RXD"),
+ MTK_FUNCTION(3, "I2S2_DI_2"),
+ MTK_FUNCTION(4, "PWM5")
+ ),
+ MTK_PIN(
+ 124, "GPIO124",
+ MTK_EINT_FUNCTION(0, 124),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO124"),
+ MTK_FUNCTION(1, "KCOL2"),
+ MTK_FUNCTION(2, "IRDA_TXD"),
+ MTK_FUNCTION(3, "I2S1_DO_2"),
+ MTK_FUNCTION(4, "USB_DRVVBUS"),
+ MTK_FUNCTION(5, "SPI_MI_2"),
+ MTK_FUNCTION(6, "PWM3")
+ ),
+ MTK_PIN(
+ 125, "GPIO125",
+ MTK_EINT_FUNCTION(0, 125),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO125"),
+ MTK_FUNCTION(1, "SDA1")
+ ),
+ MTK_PIN(
+ 126, "GPIO126",
+ MTK_EINT_FUNCTION(0, 126),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO126"),
+ MTK_FUNCTION(1, "SCL1")
+ ),
+ MTK_PIN(
+ 127, "GPIO127",
+ MTK_EINT_FUNCTION(1, 127),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO127"),
+ MTK_FUNCTION(1, "MD_EINT1"),
+ MTK_FUNCTION(2, "DISP_PWM1"),
+ MTK_FUNCTION(3, "SPI_MO_2")
+ ),
+ MTK_PIN(
+ 128, "GPIO128",
+ MTK_EINT_FUNCTION(1, 128),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO128"),
+ MTK_FUNCTION(1, "MD_EINT2"),
+ MTK_FUNCTION(2, "DSI1_TE"),
+ MTK_FUNCTION(3, "SPI_CS_2")
+ ),
+ MTK_PIN(
+ 129, "GPIO129",
+ MTK_EINT_FUNCTION(0, 129),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO129"),
+ MTK_FUNCTION(1, "I2S3_WS"),
+ MTK_FUNCTION(2, "I2S2_WS"),
+ MTK_FUNCTION(3, "PWM0")
+ ),
+ MTK_PIN(
+ 130, "GPIO130",
+ MTK_EINT_FUNCTION(0, 130),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO130"),
+ MTK_FUNCTION(1, "I2S3_BCK"),
+ MTK_FUNCTION(2, "I2S2_BCK"),
+ MTK_FUNCTION(3, "PWM1")
+ ),
+ MTK_PIN(
+ 131, "GPIO131",
+ MTK_EINT_FUNCTION(0, 131),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO131"),
+ MTK_FUNCTION(1, "I2S3_MCK"),
+ MTK_FUNCTION(2, "I2S2_MCK"),
+ MTK_FUNCTION(3, "PWM2")
+ ),
+ MTK_PIN(
+ 132, "GPIO132",
+ MTK_EINT_FUNCTION(0, 132),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO132"),
+ MTK_FUNCTION(1, "I2S3_DO_1"),
+ MTK_FUNCTION(2, "I2S2_DI_1"),
+ MTK_FUNCTION(3, "PWM3")
+ ),
+ MTK_PIN(
+ 133, "GPIO133",
+ MTK_EINT_FUNCTION(0, 133),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO133"),
+ MTK_FUNCTION(1, "I2S3_DO_2"),
+ MTK_FUNCTION(2, "I2S2_DI_2"),
+ MTK_FUNCTION(3, "PWM4")
+ ),
+ MTK_PIN(
+ 134, "GPIO134",
+ MTK_EINT_FUNCTION(0, 134),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO134"),
+ MTK_FUNCTION(1, "I2S3_DO_3"),
+ MTK_FUNCTION(2, "DISP_PWM1"),
+ MTK_FUNCTION(3, "I2S1_DO_1"),
+ MTK_FUNCTION(4, "PWM5")
+ ),
+ MTK_PIN(
+ 135, "GPIO135",
+ MTK_EINT_FUNCTION(0, 135),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO135"),
+ MTK_FUNCTION(1, "I2S3_DO_4"),
+ MTK_FUNCTION(2, "DSI1_TE"),
+ MTK_FUNCTION(3, "I2S1_DO_2"),
+ MTK_FUNCTION(4, "PWM6")
+ ),
+ MTK_PIN(
+ 136, "GPIO136",
+ MTK_EINT_FUNCTION(0, 136),
+ DRV_FIXED,
+ MTK_FUNCTION(0, "GPIO136"),
+ MTK_FUNCTION(1, "SDA3")
+ ),
+ MTK_PIN(
+ 137, "GPIO137",
+ MTK_EINT_FUNCTION(0, 137),
+ DRV_FIXED,
+ MTK_FUNCTION(0, "GPIO137"),
+ MTK_FUNCTION(1, "SCL3")
+ ),
+ MTK_PIN(
+ 138, "GPIO138",
+ MTK_EINT_FUNCTION(0, 138),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO138"),
+ MTK_FUNCTION(1, "DPI_CK"),
+ MTK_FUNCTION(2, "NLD6"),
+ MTK_FUNCTION(3, "UTXD0"),
+ MTK_FUNCTION(4, "USB_DRVVBUS"),
+ MTK_FUNCTION(5, "IRDA_PDN")
+ ),
+ MTK_PIN(
+ 139, "GPIO139",
+ MTK_EINT_FUNCTION(0, 139),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO139"),
+ MTK_FUNCTION(1, "DPI_DE"),
+ MTK_FUNCTION(2, "NLD7"),
+ MTK_FUNCTION(3, "URXD0"),
+ MTK_FUNCTION(4, "MD_UTXD"),
+ MTK_FUNCTION(5, "IRDA_RXD")
+ ),
+ MTK_PIN(
+ 140, "GPIO140",
+ MTK_EINT_FUNCTION(0, 140),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO140"),
+ MTK_FUNCTION(1, "DPI_D0"),
+ MTK_FUNCTION(2, "NREB"),
+ MTK_FUNCTION(3, "UCTS0"),
+ MTK_FUNCTION(4, "MD_URXD"),
+ MTK_FUNCTION(5, "IRDA_TXD")
+ ),
+ MTK_PIN(
+ 141, "GPIO141",
+ MTK_EINT_FUNCTION(0, 141),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO141"),
+ MTK_FUNCTION(1, "DPI_D1"),
+ MTK_FUNCTION(2, "NRNB0"),
+ MTK_FUNCTION(3, "URTS0"),
+ MTK_FUNCTION(4, "LTE_UTXD"),
+ MTK_FUNCTION(5, "I2S2_WS")
+ ),
+ MTK_PIN(
+ 142, "GPIO142",
+ MTK_EINT_FUNCTION(0, 142),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO142"),
+ MTK_FUNCTION(1, "DPI_D2"),
+ MTK_FUNCTION(2, "NWEB"),
+ MTK_FUNCTION(3, "UTXD1"),
+ MTK_FUNCTION(4, "LTE_URXD"),
+ MTK_FUNCTION(5, "I2S2_BCK")
+ ),
+ MTK_PIN(
+ 143, "GPIO143",
+ MTK_EINT_FUNCTION(0, 143),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO143"),
+ MTK_FUNCTION(1, "DPI_D3"),
+ MTK_FUNCTION(2, "NCEB0"),
+ MTK_FUNCTION(3, "URXD1"),
+ MTK_FUNCTION(4, "TDD_TXD"),
+ MTK_FUNCTION(5, "I2S2_MCK")
+ ),
+ MTK_PIN(
+ 144, "GPIO144",
+ MTK_EINT_FUNCTION(0, 144),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO144"),
+ MTK_FUNCTION(1, "DPI_D4"),
+ MTK_FUNCTION(2, "NALE"),
+ MTK_FUNCTION(3, "UCTS1"),
+ MTK_FUNCTION(4, "TDD_TMS"),
+ MTK_FUNCTION(5, "I2S2_DI_1")
+ ),
+ MTK_PIN(
+ 145, "GPIO145",
+ MTK_EINT_FUNCTION(0, 145),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO145"),
+ MTK_FUNCTION(1, "DPI_D5"),
+ MTK_FUNCTION(2, "NCLE"),
+ MTK_FUNCTION(3, "URTS1"),
+ MTK_FUNCTION(4, "TDD_TCK"),
+ MTK_FUNCTION(5, "I2S2_DI_2")
+ ),
+ MTK_PIN(
+ 146, "GPIO146",
+ MTK_EINT_FUNCTION(0, 146),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO146"),
+ MTK_FUNCTION(1, "DPI_D6"),
+ MTK_FUNCTION(2, "NLD8"),
+ MTK_FUNCTION(3, "UTXD2"),
+ MTK_FUNCTION(4, "TDD_TDI")
+ ),
+ MTK_PIN(
+ 147, "GPIO147",
+ MTK_EINT_FUNCTION(0, 147),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO147"),
+ MTK_FUNCTION(1, "DPI_D7"),
+ MTK_FUNCTION(2, "NLD9"),
+ MTK_FUNCTION(3, "URXD2"),
+ MTK_FUNCTION(4, "TDD_TDO"),
+ MTK_FUNCTION(5, "I2S1_WS")
+ ),
+ MTK_PIN(
+ 148, "GPIO148",
+ MTK_EINT_FUNCTION(0, 148),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO148"),
+ MTK_FUNCTION(1, "DPI_D8"),
+ MTK_FUNCTION(2, "NLD10"),
+ MTK_FUNCTION(3, "UCTS2"),
+ MTK_FUNCTION(4, "TDD_TRSTN"),
+ MTK_FUNCTION(5, "I2S1_BCK")
+ ),
+ MTK_PIN(
+ 149, "GPIO149",
+ MTK_EINT_FUNCTION(0, 149),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO149"),
+ MTK_FUNCTION(1, "DPI_D9"),
+ MTK_FUNCTION(2, "NLD11"),
+ MTK_FUNCTION(3, "URTS2"),
+ MTK_FUNCTION(4, "LTE_MD32_JTAG_TMS"),
+ MTK_FUNCTION(5, "I2S1_MCK")
+ ),
+ MTK_PIN(
+ 150, "GPIO150",
+ MTK_EINT_FUNCTION(0, 150),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO150"),
+ MTK_FUNCTION(1, "DPI_D10"),
+ MTK_FUNCTION(2, "NLD12"),
+ MTK_FUNCTION(3, "UTXD3"),
+ MTK_FUNCTION(4, "LTE_MD32_JTAG_TCK"),
+ MTK_FUNCTION(5, "I2S1_DO_1")
+ ),
+ MTK_PIN(
+ 151, "GPIO151",
+ MTK_EINT_FUNCTION(0, 151),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO151"),
+ MTK_FUNCTION(1, "DPI_D11"),
+ MTK_FUNCTION(2, "NLD13"),
+ MTK_FUNCTION(3, "URXD3"),
+ MTK_FUNCTION(4, "LTE_MD32_JTAG_TDI"),
+ MTK_FUNCTION(5, "I2S1_DO_2")
+ ),
+ MTK_PIN(
+ 152, "GPIO152",
+ MTK_EINT_FUNCTION(0, 152),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO152"),
+ MTK_FUNCTION(1, "DPI_HSYNC"),
+ MTK_FUNCTION(2, "NLD14"),
+ MTK_FUNCTION(3, "UCTS3"),
+ MTK_FUNCTION(4, "LTE_MD32_JTAG_TDO"),
+ MTK_FUNCTION(5, "DSI1_TE")
+ ),
+ MTK_PIN(
+ 153, "GPIO153",
+ MTK_EINT_FUNCTION(0, 153),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO153"),
+ MTK_FUNCTION(1, "DPI_VSYNC"),
+ MTK_FUNCTION(2, "NLD15"),
+ MTK_FUNCTION(3, "URTS3"),
+ MTK_FUNCTION(4, "LTE_MD32_JTAG_TRST"),
+ MTK_FUNCTION(5, "DISP_PWM1")
+ ),
+ MTK_PIN(
+ 154, "GPIO154",
+ MTK_EINT_FUNCTION(0, 154),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO154"),
+ MTK_FUNCTION(1, "MSDC0_DAT0"),
+ MTK_FUNCTION(2, "NLD8")
+ ),
+ MTK_PIN(
+ 155, "GPIO155",
+ MTK_EINT_FUNCTION(0, 155),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO155"),
+ MTK_FUNCTION(1, "MSDC0_DAT1"),
+ MTK_FUNCTION(2, "NLD9")
+ ),
+ MTK_PIN(
+ 156, "GPIO156",
+ MTK_EINT_FUNCTION(0, 156),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO156"),
+ MTK_FUNCTION(1, "MSDC0_DAT2"),
+ MTK_FUNCTION(2, "NLD10")
+ ),
+ MTK_PIN(
+ 157, "GPIO157",
+ MTK_EINT_FUNCTION(0, 157),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO157"),
+ MTK_FUNCTION(1, "MSDC0_DAT3"),
+ MTK_FUNCTION(2, "NLD11")
+ ),
+ MTK_PIN(
+ 158, "GPIO158",
+ MTK_EINT_FUNCTION(0, 158),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO158"),
+ MTK_FUNCTION(1, "MSDC0_DAT4"),
+ MTK_FUNCTION(2, "NLD12")
+ ),
+ MTK_PIN(
+ 159, "GPIO159",
+ MTK_EINT_FUNCTION(0, 159),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO159"),
+ MTK_FUNCTION(1, "MSDC0_DAT5"),
+ MTK_FUNCTION(2, "NLD13")
+ ),
+ MTK_PIN(
+ 160, "GPIO160",
+ MTK_EINT_FUNCTION(0, 160),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO160"),
+ MTK_FUNCTION(1, "MSDC0_DAT6"),
+ MTK_FUNCTION(2, "NLD14")
+ ),
+ MTK_PIN(
+ 161, "GPIO161",
+ MTK_EINT_FUNCTION(0, 161),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO161"),
+ MTK_FUNCTION(1, "MSDC0_DAT7"),
+ MTK_FUNCTION(2, "NLD15")
+ ),
+ MTK_PIN(
+ 162, "GPIO162",
+ MTK_EINT_FUNCTION(0, 162),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO162"),
+ MTK_FUNCTION(1, "MSDC0_CMD")
+ ),
+ MTK_PIN(
+ 163, "GPIO163",
+ MTK_EINT_FUNCTION(0, 163),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO163"),
+ MTK_FUNCTION(1, "MSDC0_CLK")
+ ),
+ MTK_PIN(
+ 164, "GPIO164",
+ MTK_EINT_FUNCTION(0, 164),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO164"),
+ MTK_FUNCTION(1, "MSDC0_DSL")
+ ),
+ MTK_PIN(
+ 165, "GPIO165",
+ MTK_EINT_FUNCTION(0, 165),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO165"),
+ MTK_FUNCTION(1, "MSDC0_RSTB")
+ ),
+ MTK_PIN(
+ 166, "GPIO166",
+ MTK_EINT_FUNCTION(0, 166),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO166"),
+ MTK_FUNCTION(1, "SPI_CK_0"),
+ MTK_FUNCTION(3, "PWM0")
+ ),
+ MTK_PIN(
+ 167, "GPIO167",
+ MTK_EINT_FUNCTION(0, 167),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO167"),
+ MTK_FUNCTION(1, "SPI_MI_0"),
+ MTK_FUNCTION(3, "PWM1"),
+ MTK_FUNCTION(4, "SPI_MO_0")
+ ),
+ MTK_PIN(
+ 168, "GPIO168",
+ MTK_EINT_FUNCTION(2, 168),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO168"),
+ MTK_FUNCTION(1, "SPI_MO_0"),
+ MTK_FUNCTION(2, "MD_EINT3"),
+ MTK_FUNCTION(3, "PWM2"),
+ MTK_FUNCTION(4, "SPI_MI_0")
+ ),
+ MTK_PIN(
+ 169, "GPIO169",
+ MTK_EINT_FUNCTION(2, 169),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO169"),
+ MTK_FUNCTION(1, "SPI_CS_0"),
+ MTK_FUNCTION(2, "MD_EINT4"),
+ MTK_FUNCTION(3, "PWM3")
+ ),
+ MTK_PIN(
+ 170, "GPIO170",
+ MTK_EINT_FUNCTION(0, 170),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO170"),
+ MTK_FUNCTION(1, "MSDC1_CMD")
+ ),
+ MTK_PIN(
+ 171, "GPIO171",
+ MTK_EINT_FUNCTION(0, 171),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO171"),
+ MTK_FUNCTION(1, "MSDC1_DAT0")
+ ),
+ MTK_PIN(
+ 172, "GPIO172",
+ MTK_EINT_FUNCTION(0, 172),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO172"),
+ MTK_FUNCTION(1, "MSDC1_DAT1")
+ ),
+ MTK_PIN(
+ 173, "GPIO173",
+ MTK_EINT_FUNCTION(0, 173),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO173"),
+ MTK_FUNCTION(1, "MSDC1_DAT2")
+ ),
+ MTK_PIN(
+ 174, "GPIO174",
+ MTK_EINT_FUNCTION(0, 174),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO174"),
+ MTK_FUNCTION(1, "MSDC1_DAT3")
+ ),
+ MTK_PIN(
+ 175, "GPIO175",
+ MTK_EINT_FUNCTION(0, 175),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO175"),
+ MTK_FUNCTION(1, "MSDC1_CLK")
+ ),
+ MTK_PIN(
+ 176, "GPIO176",
+ MTK_EINT_FUNCTION(0, 176),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO176"),
+ MTK_FUNCTION(1, "PWRAP_SPIMI"),
+ MTK_FUNCTION(2, "PWRAP_SPIMO")
+ ),
+ MTK_PIN(
+ 177, "GPIO177",
+ MTK_EINT_FUNCTION(0, 177),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO177"),
+ MTK_FUNCTION(1, "PWRAP_SPIMO"),
+ MTK_FUNCTION(2, "PWRAP_SPIMI")
+ ),
+ MTK_PIN(
+ 178, "GPIO178",
+ MTK_EINT_FUNCTION(0, 178),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO178"),
+ MTK_FUNCTION(1, "PWRAP_SPICK")
+ ),
+ MTK_PIN(
+ 179, "GPIO179",
+ MTK_EINT_FUNCTION(0, 179),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO179"),
+ MTK_FUNCTION(1, "PWRAP_SPICS")
+ ),
+ MTK_PIN(
+ 180, "GPIO180",
+ MTK_EINT_FUNCTION(0, 180),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO180"),
+ MTK_FUNCTION(1, "AUD_CLK_MOSI"),
+ MTK_FUNCTION(2, "I2S1_WS"),
+ MTK_FUNCTION(3, "I2S2_WS"),
+ MTK_FUNCTION(4, "I2S0_WS")
+ ),
+ MTK_PIN(
+ 181, "GPIO181",
+ MTK_EINT_FUNCTION(0, 181),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO181"),
+ MTK_FUNCTION(1, "AUD_DAT_MISO_1"),
+ MTK_FUNCTION(2, "I2S1_BCK"),
+ MTK_FUNCTION(3, "I2S2_BCK"),
+ MTK_FUNCTION(4, "I2S0_BCK")
+ ),
+ MTK_PIN(
+ 182, "GPIO182",
+ MTK_EINT_FUNCTION(0, 182),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO182"),
+ MTK_FUNCTION(1, "AUD_DAT_MOSI_1"),
+ MTK_FUNCTION(2, "I2S1_MCK"),
+ MTK_FUNCTION(3, "I2S2_MCK"),
+ MTK_FUNCTION(4, "I2S0_MCK")
+ ),
+ MTK_PIN(
+ 183, "GPIO183",
+ MTK_EINT_FUNCTION(0, 183),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO183"),
+ MTK_FUNCTION(1, "AUD_DAT_MISO_2"),
+ MTK_FUNCTION(2, "I2S1_DO_1"),
+ MTK_FUNCTION(3, "I2S2_DI_1"),
+ MTK_FUNCTION(4, "I2S0_DO")
+ ),
+ MTK_PIN(
+ 184, "GPIO184",
+ MTK_EINT_FUNCTION(0, 184),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO184"),
+ MTK_FUNCTION(1, "AUD_DAT_MOSI_2"),
+ MTK_FUNCTION(2, "I2S1_DO_2"),
+ MTK_FUNCTION(3, "I2S2_DI_2"),
+ MTK_FUNCTION(4, "I2S0_DI")
+ ),
+ MTK_PIN(
+ 185, "GPIO185",
+ MTK_EINT_FUNCTION(0, 185),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO185"),
+ MTK_FUNCTION(1, "RTC32K_CK")
+ ),
+ MTK_PIN(
+ 186, "GPIO186",
+ MTK_EINT_FUNCTION(0, 186),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO186"),
+ MTK_FUNCTION(1, "DISP_PWM0"),
+ MTK_FUNCTION(2, "DISP_PWM1")
+ ),
+ MTK_PIN(
+ 187, "GPIO187",
+ MTK_EINT_FUNCTION(0, 187),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO187"),
+ MTK_FUNCTION(1, "SRCLKENAI")
+ ),
+ MTK_PIN(
+ 188, "GPIO188",
+ MTK_EINT_FUNCTION(0, 188),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO188"),
+ MTK_FUNCTION(1, "SRCLKENAI2")
+ ),
+ MTK_PIN(
+ 189, "GPIO189",
+ MTK_EINT_FUNCTION(0, 189),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO189"),
+ MTK_FUNCTION(1, "SRCLKENA0")
+ ),
+ MTK_PIN(
+ 190, "GPIO190",
+ MTK_EINT_FUNCTION(0, 190),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO190"),
+ MTK_FUNCTION(1, "SRCLKENA1")
+ ),
+ MTK_PIN(
+ 191, "GPIO191",
+ MTK_EINT_FUNCTION(0, 191),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO191"),
+ MTK_FUNCTION(1, "WATCHDOG_AO")
+ ),
+ MTK_PIN(
+ 192, "GPIO192",
+ MTK_EINT_FUNCTION(0, 192),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO192"),
+ MTK_FUNCTION(1, "I2S0_WS"),
+ MTK_FUNCTION(2, "I2S1_WS"),
+ MTK_FUNCTION(3, "I2S2_WS"),
+ MTK_FUNCTION(4, "NCEB1")
+ ),
+ MTK_PIN(
+ 193, "GPIO193",
+ MTK_EINT_FUNCTION(0, 193),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO193"),
+ MTK_FUNCTION(1, "I2S0_BCK"),
+ MTK_FUNCTION(2, "I2S1_BCK"),
+ MTK_FUNCTION(3, "I2S2_BCK"),
+ MTK_FUNCTION(4, "NRNB1")
+ ),
+ MTK_PIN(
+ 194, "GPIO194",
+ MTK_EINT_FUNCTION(0, 194),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO194"),
+ MTK_FUNCTION(1, "I2S0_MCK"),
+ MTK_FUNCTION(2, "I2S1_MCK"),
+ MTK_FUNCTION(3, "I2S2_MCK")
+ ),
+ MTK_PIN(
+ 195, "GPIO195",
+ MTK_EINT_FUNCTION(0, 195),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO195"),
+ MTK_FUNCTION(1, "I2S0_DO"),
+ MTK_FUNCTION(2, "I2S1_DO_1"),
+ MTK_FUNCTION(3, "I2S2_DI_1")
+ ),
+ MTK_PIN(
+ 196, "GPIO196",
+ MTK_EINT_FUNCTION(0, 196),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO196"),
+ MTK_FUNCTION(1, "I2S0_DI"),
+ MTK_FUNCTION(2, "I2S1_DO_2"),
+ MTK_FUNCTION(3, "I2S2_DI_2")
+ ),
+};
+
+#endif /* __PINCTRL_MTK_MT6795_H */
diff --git a/drivers/pinctrl/meson/pinctrl-meson-s4.c b/drivers/pinctrl/meson/pinctrl-meson-s4.c
index 3c7358f53302..cea77864b880 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-s4.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-s4.c
@@ -575,6 +575,7 @@ static struct meson_pmx_group meson_s4_periphs_groups[] = {
GROUP(tdm_d2_c, 4),
GROUP(tdm_d3_c, 4),
GROUP(tdm_fs1_c, 4),
+ GROUP(tdm_sclk1_c, 4),
GROUP(mclk_1_c, 4),
GROUP(tdm_d4_c, 4),
GROUP(tdm_d5_c, 4),
@@ -936,7 +937,7 @@ static const char * const iso7816_groups[] = {
};
static const char * const tdm_groups[] = {
- "tdm_d2_c", "tdm_d3_c", "tdm_fs1_c", "tdm_d4_c", "tdm_d5_c",
+ "tdm_d2_c", "tdm_d3_c", "tdm_fs1_c", "tdm_d4_c", "tdm_d5_c", "tdm_sclk1_c",
"tdm_fs1_d", "tdm_d4_d", "tdm_d3_d", "tdm_d2_d", "tdm_sclk1_d",
"tdm_sclk1_h", "tdm_fs1_h", "tdm_d2_h", "tdm_d3_h", "tdm_d4_h",
"tdm_d1", "tdm_d0", "tdm_fs0", "tdm_sclk0", "tdm_fs2", "tdm_sclk2",
diff --git a/drivers/pinctrl/mvebu/Kconfig b/drivers/pinctrl/mvebu/Kconfig
index 0d12894d3ee1..aa5883f09d7b 100644
--- a/drivers/pinctrl/mvebu/Kconfig
+++ b/drivers/pinctrl/mvebu/Kconfig
@@ -45,6 +45,10 @@ config PINCTRL_ORION
bool
select PINCTRL_MVEBU
+config PINCTRL_AC5
+ bool
+ select PINCTRL_MVEBU
+
config PINCTRL_ARMADA_37XX
bool
select GENERIC_PINCONF
diff --git a/drivers/pinctrl/mvebu/Makefile b/drivers/pinctrl/mvebu/Makefile
index cd082dca4482..23458ab17c53 100644
--- a/drivers/pinctrl/mvebu/Makefile
+++ b/drivers/pinctrl/mvebu/Makefile
@@ -11,3 +11,4 @@ obj-$(CONFIG_PINCTRL_ARMADA_CP110) += pinctrl-armada-cp110.o
obj-$(CONFIG_PINCTRL_ARMADA_XP) += pinctrl-armada-xp.o
obj-$(CONFIG_PINCTRL_ARMADA_37XX) += pinctrl-armada-37xx.o
obj-$(CONFIG_PINCTRL_ORION) += pinctrl-orion.o
+obj-$(CONFIG_PINCTRL_AC5) += pinctrl-ac5.o
diff --git a/drivers/pinctrl/mvebu/pinctrl-ac5.c b/drivers/pinctrl/mvebu/pinctrl-ac5.c
new file mode 100644
index 000000000000..292633e61129
--- /dev/null
+++ b/drivers/pinctrl/mvebu/pinctrl-ac5.c
@@ -0,0 +1,261 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Marvell ac5 pinctrl driver based on mvebu pinctrl core
+ *
+ * Copyright (C) 2021 Marvell
+ *
+ * Noam Liron <lnoam@marvell.com>
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-mvebu.h"
+
+static struct mvebu_mpp_mode ac5_mpp_modes[] = {
+ MPP_MODE(0,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "sdio", "d0"),
+ MPP_FUNCTION(2, "nand", "io4")),
+ MPP_MODE(1,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "sdio", "d1"),
+ MPP_FUNCTION(2, "nand", "io3")),
+ MPP_MODE(2,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "sdio", "d2"),
+ MPP_FUNCTION(2, "nand", "io2")),
+ MPP_MODE(3,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "sdio", "d3"),
+ MPP_FUNCTION(2, "nand", "io7")),
+ MPP_MODE(4,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "sdio", "d4"),
+ MPP_FUNCTION(2, "nand", "io6"),
+ MPP_FUNCTION(3, "uart3", "txd"),
+ MPP_FUNCTION(4, "uart2", "txd")),
+ MPP_MODE(5,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "sdio", "d5"),
+ MPP_FUNCTION(2, "nand", "io5"),
+ MPP_FUNCTION(3, "uart3", "rxd"),
+ MPP_FUNCTION(4, "uart2", "rxd")),
+ MPP_MODE(6,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "sdio", "d6"),
+ MPP_FUNCTION(2, "nand", "io0"),
+ MPP_FUNCTION(3, "i2c1", "sck")),
+ MPP_MODE(7,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "sdio", "d7"),
+ MPP_FUNCTION(2, "nand", "io1"),
+ MPP_FUNCTION(3, "i2c1", "sda")),
+ MPP_MODE(8,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "sdio", "clk"),
+ MPP_FUNCTION(2, "nand", "wen")),
+ MPP_MODE(9,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "sdio", "cmd"),
+ MPP_FUNCTION(2, "nand", "ale")),
+ MPP_MODE(10,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "sdio", "ds"),
+ MPP_FUNCTION(2, "nand", "cle")),
+ MPP_MODE(11,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "sdio", "rst"),
+ MPP_FUNCTION(2, "nand", "cen")),
+ MPP_MODE(12,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "spi0", "clk")),
+ MPP_MODE(13,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "spi0", "csn")),
+ MPP_MODE(14,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "spi0", "mosi")),
+ MPP_MODE(15,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "spi0", "miso")),
+ MPP_MODE(16,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "spi0", "wpn"),
+ MPP_FUNCTION(2, "nand", "ren"),
+ MPP_FUNCTION(3, "uart1", "txd")),
+ MPP_MODE(17,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "spi0", "hold"),
+ MPP_FUNCTION(2, "nand", "rb"),
+ MPP_FUNCTION(3, "uart1", "rxd")),
+ MPP_MODE(18,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "tsen_int", NULL),
+ MPP_FUNCTION(2, "uart2", "rxd"),
+ MPP_FUNCTION(3, "wd_int", NULL)),
+ MPP_MODE(19,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "dev_init_done", NULL),
+ MPP_FUNCTION(2, "uart2", "txd")),
+ MPP_MODE(20,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(2, "i2c1", "sck"),
+ MPP_FUNCTION(3, "spi1", "clk"),
+ MPP_FUNCTION(4, "uart3", "txd")),
+ MPP_MODE(21,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(2, "i2c1", "sda"),
+ MPP_FUNCTION(3, "spi1", "csn"),
+ MPP_FUNCTION(4, "uart3", "rxd")),
+ MPP_MODE(22,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(3, "spi1", "mosi")),
+ MPP_MODE(23,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(3, "spi1", "miso")),
+ MPP_MODE(24,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "wd_int", NULL),
+ MPP_FUNCTION(2, "uart2", "txd"),
+ MPP_FUNCTION(3, "uartsd", "txd")),
+ MPP_MODE(25,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "int_out", NULL),
+ MPP_FUNCTION(2, "uart2", "rxd"),
+ MPP_FUNCTION(3, "uartsd", "rxd")),
+ MPP_MODE(26,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "i2c0", "sck"),
+ MPP_FUNCTION(2, "ptp", "clk1"),
+ MPP_FUNCTION(3, "uart3", "txd")),
+ MPP_MODE(27,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "i2c0", "sda"),
+ MPP_FUNCTION(2, "ptp", "pulse"),
+ MPP_FUNCTION(3, "uart3", "rxd")),
+ MPP_MODE(28,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "xg", "mdio"),
+ MPP_FUNCTION(2, "ge", "mdio"),
+ MPP_FUNCTION(3, "uart3", "txd")),
+ MPP_MODE(29,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "xg", "mdio"),
+ MPP_FUNCTION(2, "ge", "mdio"),
+ MPP_FUNCTION(3, "uart3", "rxd")),
+ MPP_MODE(30,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "xg", "mdio"),
+ MPP_FUNCTION(2, "ge", "mdio"),
+ MPP_FUNCTION(3, "ge", "mdio")),
+ MPP_MODE(31,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "xg", "mdio"),
+ MPP_FUNCTION(2, "ge", "mdio"),
+ MPP_FUNCTION(3, "ge", "mdio")),
+ MPP_MODE(32,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "uart0", "txd")),
+ MPP_MODE(33,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "uart0", "rxd"),
+ MPP_FUNCTION(2, "ptp", "clk1"),
+ MPP_FUNCTION(3, "ptp", "pulse")),
+ MPP_MODE(34,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "ge", "mdio"),
+ MPP_FUNCTION(2, "uart3", "rxd")),
+ MPP_MODE(35,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "ge", "mdio"),
+ MPP_FUNCTION(2, "uart3", "txd"),
+ MPP_FUNCTION(3, "pcie", "rstoutn")),
+ MPP_MODE(36,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "ptp", "clk0_tp"),
+ MPP_FUNCTION(2, "ptp", "clk1_tp")),
+ MPP_MODE(37,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "ptp", "pulse_tp"),
+ MPP_FUNCTION(2, "wd_int", NULL)),
+ MPP_MODE(38,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "synce", "clk_out0")),
+ MPP_MODE(39,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "synce", "clk_out1")),
+ MPP_MODE(40,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "ptp", "pclk_out0"),
+ MPP_FUNCTION(2, "ptp", "pclk_out1")),
+ MPP_MODE(41,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "ptp", "ref_clk"),
+ MPP_FUNCTION(2, "ptp", "clk1"),
+ MPP_FUNCTION(3, "ptp", "pulse"),
+ MPP_FUNCTION(4, "uart2", "txd"),
+ MPP_FUNCTION(5, "i2c1", "sck")),
+ MPP_MODE(42,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "ptp", "clk0"),
+ MPP_FUNCTION(2, "ptp", "clk1"),
+ MPP_FUNCTION(3, "ptp", "pulse"),
+ MPP_FUNCTION(4, "uart2", "rxd"),
+ MPP_FUNCTION(5, "i2c1", "sda")),
+ MPP_MODE(43,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "led", "clk")),
+ MPP_MODE(44,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "led", "stb")),
+ MPP_MODE(45,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "led", "data")),
+};
+
+static struct mvebu_pinctrl_soc_info ac5_pinctrl_info;
+
+static const struct of_device_id ac5_pinctrl_of_match[] = {
+ {
+ .compatible = "marvell,ac5-pinctrl",
+ },
+ { },
+};
+
+static const struct mvebu_mpp_ctrl ac5_mpp_controls[] = {
+ MPP_FUNC_CTRL(0, 45, NULL, mvebu_mmio_mpp_ctrl), };
+
+static struct pinctrl_gpio_range ac5_mpp_gpio_ranges[] = {
+ MPP_GPIO_RANGE(0, 0, 0, 46), };
+
+static int ac5_pinctrl_probe(struct platform_device *pdev)
+{
+ struct mvebu_pinctrl_soc_info *soc = &ac5_pinctrl_info;
+
+ soc->variant = 0; /* no variants for ac5 */
+ soc->controls = ac5_mpp_controls;
+ soc->ncontrols = ARRAY_SIZE(ac5_mpp_controls);
+ soc->gpioranges = ac5_mpp_gpio_ranges;
+ soc->ngpioranges = ARRAY_SIZE(ac5_mpp_gpio_ranges);
+ soc->modes = ac5_mpp_modes;
+ soc->nmodes = ac5_mpp_controls[0].npins;
+
+ pdev->dev.platform_data = soc;
+
+ return mvebu_pinctrl_simple_mmio_probe(pdev);
+}
+
+static struct platform_driver ac5_pinctrl_driver = {
+ .driver = {
+ .name = "ac5-pinctrl",
+ .of_match_table = of_match_ptr(ac5_pinctrl_of_match),
+ },
+ .probe = ac5_pinctrl_probe,
+};
+builtin_platform_driver(ac5_pinctrl_driver);
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
index ef4118e49f16..a140b6bfbfaa 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
@@ -764,7 +764,7 @@ static int armada_37xx_irqchip_register(struct platform_device *pdev,
for (i = 0; i < nr_irq_parent; i++) {
int irq = irq_of_parse_and_map(np, i);
- if (irq < 0)
+ if (!irq)
continue;
girq->parents[i] = irq;
}
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c b/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c
index 0b9b6cbfd10c..ac3d4d91266d 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c
@@ -440,6 +440,10 @@ static const unsigned mc2_a_1_pins[] = { DB8500_PIN_A5, DB8500_PIN_B4,
DB8500_PIN_C8, DB8500_PIN_A12, DB8500_PIN_C10, DB8500_PIN_B10,
DB8500_PIN_B9, DB8500_PIN_A9, DB8500_PIN_C7, DB8500_PIN_A7,
DB8500_PIN_C5 };
+/* MC2 without the feedback clock */
+static const unsigned mc2_a_2_pins[] = { DB8500_PIN_A5, DB8500_PIN_B4,
+ DB8500_PIN_A12, DB8500_PIN_C10, DB8500_PIN_B10, DB8500_PIN_B9,
+ DB8500_PIN_A9, DB8500_PIN_C7, DB8500_PIN_A7, DB8500_PIN_C5 };
static const unsigned ssp1_a_1_pins[] = { DB8500_PIN_C9, DB8500_PIN_B11,
DB8500_PIN_C12, DB8500_PIN_C11 };
static const unsigned ssp0_a_1_pins[] = { DB8500_PIN_D12, DB8500_PIN_B13,
@@ -699,6 +703,7 @@ static const struct nmk_pingroup nmk_db8500_groups[] = {
DB8500_PIN_GROUP(kp_a_1, NMK_GPIO_ALT_A),
DB8500_PIN_GROUP(kpskaskb_a_1, NMK_GPIO_ALT_A),
DB8500_PIN_GROUP(mc2_a_1, NMK_GPIO_ALT_A),
+ DB8500_PIN_GROUP(mc2_a_2, NMK_GPIO_ALT_A),
DB8500_PIN_GROUP(ssp1_a_1, NMK_GPIO_ALT_A),
DB8500_PIN_GROUP(ssp0_a_1, NMK_GPIO_ALT_A),
DB8500_PIN_GROUP(i2c0_a_1, NMK_GPIO_ALT_A),
@@ -856,7 +861,7 @@ DB8500_FUNC_GROUPS(lcd, "lcdvsi0_a_1", "lcdvsi1_a_1", "lcd_d0_d7_a_1",
"lcd_d8_d11_a_1", "lcd_d12_d15_a_1", "lcd_d12_d23_a_1", "lcd_b_1",
"lcd_d16_d23_b_1");
DB8500_FUNC_GROUPS(kp, "kp_a_1", "kp_a_2", "kp_b_1", "kp_b_2", "kp_c_1", "kp_oc1_1");
-DB8500_FUNC_GROUPS(mc2, "mc2_a_1", "mc2rstn_c_1");
+DB8500_FUNC_GROUPS(mc2, "mc2_a_1", "mc2_a_2", "mc2rstn_c_1");
DB8500_FUNC_GROUPS(ssp1, "ssp1_a_1");
DB8500_FUNC_GROUPS(ssp0, "ssp0_a_1");
DB8500_FUNC_GROUPS(i2c0, "i2c0_a_1");
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
index 4757bf964d3c..640e50d94f27 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
@@ -1113,6 +1113,7 @@ static int nmk_gpio_probe(struct platform_device *dev)
spin_lock_init(&nmk_chip->lock);
chip = &nmk_chip->chip;
+ chip->parent = &dev->dev;
chip->request = gpiochip_generic_request;
chip->free = gpiochip_generic_free;
chip->get_direction = nmk_gpio_get_dir;
@@ -1154,7 +1155,6 @@ static int nmk_gpio_probe(struct platform_device *dev)
clk_enable(nmk_chip->clk);
nmk_chip->lowemi = readl_relaxed(nmk_chip->addr + NMK_GPIO_LOWEMI);
clk_disable(nmk_chip->clk);
- chip->of_node = np;
ret = gpiochip_add_data(chip, nmk_chip);
if (ret)
diff --git a/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c b/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
index 4828aa25e5c9..64d8a568b3db 100644
--- a/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
+++ b/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
@@ -1898,9 +1898,9 @@ static int npcm7xx_gpio_of(struct npcm7xx_pinctrl *pctrl)
}
ret = irq_of_parse_and_map(np, 0);
- if (ret < 0) {
+ if (!ret) {
dev_err(dev, "No IRQ for GPIO bank %u\n", id);
- return ret;
+ return -EINVAL;
}
pctrl->gpio_bank[id].irq = ret;
pctrl->gpio_bank[id].irq_chip = npcmgpio_irqchip;
diff --git a/drivers/pinctrl/pinctrl-apple-gpio.c b/drivers/pinctrl/pinctrl-apple-gpio.c
index 5e610849dfc3..2490384ef1b8 100644
--- a/drivers/pinctrl/pinctrl-apple-gpio.c
+++ b/drivers/pinctrl/pinctrl-apple-gpio.c
@@ -71,6 +71,7 @@ struct regmap_config regmap_config = {
.max_register = 512 * sizeof(u32),
.num_reg_defaults_raw = 512,
.use_relaxed_mmio = true,
+ .use_raw_spinlock = true,
};
/* No locking needed to mask/unmask IRQs as the interrupt mode is per pin-register. */
@@ -509,6 +510,7 @@ static const struct of_device_id apple_gpio_pinctrl_of_match[] = {
{ .compatible = "apple,pinctrl", },
{ }
};
+MODULE_DEVICE_TABLE(of, apple_gpio_pinctrl_of_match);
static struct platform_driver apple_gpio_pinctrl_driver = {
.driver = {
diff --git a/drivers/pinctrl/pinctrl-equilibrium.c b/drivers/pinctrl/pinctrl-equilibrium.c
index 3f0143087cc7..99cf24eb67ae 100644
--- a/drivers/pinctrl/pinctrl-equilibrium.c
+++ b/drivers/pinctrl/pinctrl-equilibrium.c
@@ -11,6 +11,7 @@
#include <linux/pinctrl/pinconf-generic.h>
#include <linux/pinctrl/pinmux.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include "core.h"
#include "pinconf.h"
@@ -167,11 +168,9 @@ static int gpiochip_setup(struct device *dev, struct eqbr_gpio_ctrl *gctrl)
gc = &gctrl->chip;
gc->label = gctrl->name;
-#if defined(CONFIG_OF_GPIO)
- gc->of_node = gctrl->node;
-#endif
+ gc->fwnode = gctrl->fwnode;
- if (!of_property_read_bool(gctrl->node, "interrupt-controller")) {
+ if (!fwnode_property_read_bool(gctrl->fwnode, "interrupt-controller")) {
dev_dbg(dev, "gc %s: doesn't act as interrupt controller!\n",
gctrl->name);
return 0;
@@ -209,7 +208,7 @@ static int gpiolib_reg(struct eqbr_pinctrl_drv_data *drvdata)
for (i = 0; i < drvdata->nr_gpio_ctrls; i++) {
gctrl = drvdata->gpio_ctrls + i;
- np = gctrl->node;
+ np = to_of_node(gctrl->fwnode);
gctrl->name = devm_kasprintf(dev, GFP_KERNEL, "gpiochip%d", i);
if (!gctrl->name)
@@ -895,7 +894,7 @@ static int pinbank_probe(struct eqbr_pinctrl_drv_data *drvdata)
pinbank_init(np_gpio, drvdata, banks + i, i);
- gctrls[i].node = np_gpio;
+ gctrls[i].fwnode = of_fwnode_handle(np_gpio);
gctrls[i].bank = banks + i;
i++;
}
diff --git a/drivers/pinctrl/pinctrl-equilibrium.h b/drivers/pinctrl/pinctrl-equilibrium.h
index 83cb7dafc657..0c635a5b79f0 100644
--- a/drivers/pinctrl/pinctrl-equilibrium.h
+++ b/drivers/pinctrl/pinctrl-equilibrium.h
@@ -95,22 +95,24 @@ struct eqbr_pin_bank {
u32 aval_pinmap;
};
+struct fwnode_handle;
+
/**
* struct eqbr_gpio_ctrl: represent a gpio controller.
- * @node: device node of gpio controller.
+ * @chip: gpio chip.
+ * @fwnode: firmware node of gpio controller.
* @bank: pointer to corresponding pin bank.
* @membase: base address of the gpio controller.
- * @chip: gpio chip.
* @ic: irq chip.
* @name: gpio chip name.
* @virq: irq number of the gpio chip to parent's irq domain.
* @lock: spin lock to protect gpio register write.
*/
struct eqbr_gpio_ctrl {
- struct device_node *node;
+ struct gpio_chip chip;
+ struct fwnode_handle *fwnode;
struct eqbr_pin_bank *bank;
void __iomem *membase;
- struct gpio_chip chip;
struct irq_chip ic;
const char *name;
unsigned int virq;
diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c
index fa6becca1788..1ca11616db74 100644
--- a/drivers/pinctrl/pinctrl-ingenic.c
+++ b/drivers/pinctrl/pinctrl-ingenic.c
@@ -139,6 +139,30 @@ struct ingenic_gpio_chip {
unsigned int irq, reg_base;
};
+static const unsigned long enabled_socs =
+ IS_ENABLED(CONFIG_MACH_JZ4730) << ID_JZ4730 |
+ IS_ENABLED(CONFIG_MACH_JZ4740) << ID_JZ4740 |
+ IS_ENABLED(CONFIG_MACH_JZ4725B) << ID_JZ4725B |
+ IS_ENABLED(CONFIG_MACH_JZ4750) << ID_JZ4750 |
+ IS_ENABLED(CONFIG_MACH_JZ4755) << ID_JZ4755 |
+ IS_ENABLED(CONFIG_MACH_JZ4760) << ID_JZ4760 |
+ IS_ENABLED(CONFIG_MACH_JZ4770) << ID_JZ4770 |
+ IS_ENABLED(CONFIG_MACH_JZ4775) << ID_JZ4775 |
+ IS_ENABLED(CONFIG_MACH_JZ4780) << ID_JZ4780 |
+ IS_ENABLED(CONFIG_MACH_X1000) << ID_X1000 |
+ IS_ENABLED(CONFIG_MACH_X1500) << ID_X1500 |
+ IS_ENABLED(CONFIG_MACH_X1830) << ID_X1830 |
+ IS_ENABLED(CONFIG_MACH_X2000) << ID_X2000 |
+ IS_ENABLED(CONFIG_MACH_X2100) << ID_X2100;
+
+static bool
+is_soc_or_above(const struct ingenic_pinctrl *jzpc, enum jz_version version)
+{
+ return (enabled_socs >> version) &&
+ (!(enabled_socs & GENMASK(version - 1, 0))
+ || jzpc->info->version >= version);
+}
+
static const u32 jz4730_pull_ups[4] = {
0x3fa3320f, 0xf200ffff, 0xffffffff, 0xffffffff,
};
@@ -3242,7 +3266,7 @@ static u32 ingenic_gpio_read_reg(struct ingenic_gpio_chip *jzgc, u8 reg)
static void ingenic_gpio_set_bit(struct ingenic_gpio_chip *jzgc,
u8 reg, u8 offset, bool set)
{
- if (jzgc->jzpc->info->version == ID_JZ4730) {
+ if (!is_soc_or_above(jzgc->jzpc, ID_JZ4740)) {
regmap_update_bits(jzgc->jzpc->map, jzgc->reg_base + reg,
BIT(offset), set ? BIT(offset) : 0);
return;
@@ -3300,9 +3324,9 @@ static inline bool ingenic_gpio_get_value(struct ingenic_gpio_chip *jzgc,
static void ingenic_gpio_set_value(struct ingenic_gpio_chip *jzgc,
u8 offset, int value)
{
- if (jzgc->jzpc->info->version >= ID_JZ4770)
+ if (is_soc_or_above(jzgc->jzpc, ID_JZ4770))
ingenic_gpio_set_bit(jzgc, JZ4770_GPIO_PAT0, offset, !!value);
- else if (jzgc->jzpc->info->version >= ID_JZ4740)
+ else if (is_soc_or_above(jzgc->jzpc, ID_JZ4740))
ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_DATA, offset, !!value);
else
ingenic_gpio_set_bit(jzgc, JZ4730_GPIO_DATA, offset, !!value);
@@ -3337,10 +3361,10 @@ static void irq_set_type(struct ingenic_gpio_chip *jzgc,
break;
}
- if (jzgc->jzpc->info->version >= ID_JZ4770) {
+ if (is_soc_or_above(jzgc->jzpc, ID_JZ4770)) {
reg1 = JZ4770_GPIO_PAT1;
reg2 = JZ4770_GPIO_PAT0;
- } else if (jzgc->jzpc->info->version >= ID_JZ4740) {
+ } else if (is_soc_or_above(jzgc->jzpc, ID_JZ4740)) {
reg1 = JZ4740_GPIO_TRIG;
reg2 = JZ4740_GPIO_DIR;
} else {
@@ -3350,12 +3374,12 @@ static void irq_set_type(struct ingenic_gpio_chip *jzgc,
return;
}
- if (jzgc->jzpc->info->version >= ID_X2000) {
+ if (is_soc_or_above(jzgc->jzpc, ID_X2000)) {
ingenic_gpio_shadow_set_bit(jzgc, reg2, offset, val1);
ingenic_gpio_shadow_set_bit(jzgc, reg1, offset, val2);
ingenic_gpio_shadow_set_bit_load(jzgc);
ingenic_gpio_set_bit(jzgc, X2000_GPIO_EDG, offset, val3);
- } else if (jzgc->jzpc->info->version >= ID_X1000) {
+ } else if (is_soc_or_above(jzgc->jzpc, ID_X1000)) {
ingenic_gpio_shadow_set_bit(jzgc, reg2, offset, val1);
ingenic_gpio_shadow_set_bit(jzgc, reg1, offset, val2);
ingenic_gpio_shadow_set_bit_load(jzgc);
@@ -3371,7 +3395,7 @@ static void ingenic_gpio_irq_mask(struct irq_data *irqd)
struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
int irq = irqd->hwirq;
- if (jzgc->jzpc->info->version >= ID_JZ4740)
+ if (is_soc_or_above(jzgc->jzpc, ID_JZ4740))
ingenic_gpio_set_bit(jzgc, GPIO_MSK, irq, true);
else
ingenic_gpio_set_bit(jzgc, JZ4730_GPIO_GPIMR, irq, true);
@@ -3383,7 +3407,7 @@ static void ingenic_gpio_irq_unmask(struct irq_data *irqd)
struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
int irq = irqd->hwirq;
- if (jzgc->jzpc->info->version >= ID_JZ4740)
+ if (is_soc_or_above(jzgc->jzpc, ID_JZ4740))
ingenic_gpio_set_bit(jzgc, GPIO_MSK, irq, false);
else
ingenic_gpio_set_bit(jzgc, JZ4730_GPIO_GPIMR, irq, false);
@@ -3395,9 +3419,9 @@ static void ingenic_gpio_irq_enable(struct irq_data *irqd)
struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
int irq = irqd->hwirq;
- if (jzgc->jzpc->info->version >= ID_JZ4770)
+ if (is_soc_or_above(jzgc->jzpc, ID_JZ4770))
ingenic_gpio_set_bit(jzgc, JZ4770_GPIO_INT, irq, true);
- else if (jzgc->jzpc->info->version >= ID_JZ4740)
+ else if (is_soc_or_above(jzgc->jzpc, ID_JZ4740))
ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_SELECT, irq, true);
else
ingenic_gpio_set_bit(jzgc, JZ4730_GPIO_GPIER, irq, true);
@@ -3413,9 +3437,9 @@ static void ingenic_gpio_irq_disable(struct irq_data *irqd)
ingenic_gpio_irq_mask(irqd);
- if (jzgc->jzpc->info->version >= ID_JZ4770)
+ if (is_soc_or_above(jzgc->jzpc, ID_JZ4770))
ingenic_gpio_set_bit(jzgc, JZ4770_GPIO_INT, irq, false);
- else if (jzgc->jzpc->info->version >= ID_JZ4740)
+ else if (is_soc_or_above(jzgc->jzpc, ID_JZ4740))
ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_SELECT, irq, false);
else
ingenic_gpio_set_bit(jzgc, JZ4730_GPIO_GPIER, irq, false);
@@ -3429,7 +3453,7 @@ static void ingenic_gpio_irq_ack(struct irq_data *irqd)
bool high;
if ((irqd_get_trigger_type(irqd) == IRQ_TYPE_EDGE_BOTH) &&
- (jzgc->jzpc->info->version < ID_X2000)) {
+ !is_soc_or_above(jzgc->jzpc, ID_X2000)) {
/*
* Switch to an interrupt for the opposite edge to the one that
* triggered the interrupt being ACKed.
@@ -3441,9 +3465,9 @@ static void ingenic_gpio_irq_ack(struct irq_data *irqd)
irq_set_type(jzgc, irq, IRQ_TYPE_LEVEL_HIGH);
}
- if (jzgc->jzpc->info->version >= ID_JZ4770)
+ if (is_soc_or_above(jzgc->jzpc, ID_JZ4770))
ingenic_gpio_set_bit(jzgc, JZ4770_GPIO_FLAG, irq, false);
- else if (jzgc->jzpc->info->version >= ID_JZ4740)
+ else if (is_soc_or_above(jzgc->jzpc, ID_JZ4740))
ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_DATA, irq, true);
else
ingenic_gpio_set_bit(jzgc, JZ4730_GPIO_GPFR, irq, false);
@@ -3468,7 +3492,7 @@ static int ingenic_gpio_irq_set_type(struct irq_data *irqd, unsigned int type)
irq_set_handler_locked(irqd, handle_bad_irq);
}
- if ((type == IRQ_TYPE_EDGE_BOTH) && (jzgc->jzpc->info->version < ID_X2000)) {
+ if ((type == IRQ_TYPE_EDGE_BOTH) && !is_soc_or_above(jzgc->jzpc, ID_X2000)) {
/*
* The hardware does not support interrupts on both edges. The
* best we can do is to set up a single-edge interrupt and then
@@ -3500,9 +3524,9 @@ static void ingenic_gpio_irq_handler(struct irq_desc *desc)
chained_irq_enter(irq_chip, desc);
- if (jzgc->jzpc->info->version >= ID_JZ4770)
+ if (is_soc_or_above(jzgc->jzpc, ID_JZ4770))
flag = ingenic_gpio_read_reg(jzgc, JZ4770_GPIO_FLAG);
- else if (jzgc->jzpc->info->version >= ID_JZ4740)
+ else if (is_soc_or_above(jzgc->jzpc, ID_JZ4740))
flag = ingenic_gpio_read_reg(jzgc, JZ4740_GPIO_FLAG);
else
flag = ingenic_gpio_read_reg(jzgc, JZ4730_GPIO_GPFR);
@@ -3547,14 +3571,14 @@ static inline void ingenic_config_pin(struct ingenic_pinctrl *jzpc,
unsigned int offt = pin / PINS_PER_GPIO_CHIP;
if (set) {
- if (jzpc->info->version >= ID_JZ4740)
+ if (is_soc_or_above(jzpc, ID_JZ4740))
regmap_write(jzpc->map, offt * jzpc->info->reg_offset +
REG_SET(reg), BIT(idx));
else
regmap_set_bits(jzpc->map, offt * jzpc->info->reg_offset +
reg, BIT(idx));
} else {
- if (jzpc->info->version >= ID_JZ4740)
+ if (is_soc_or_above(jzpc, ID_JZ4740))
regmap_write(jzpc->map, offt * jzpc->info->reg_offset +
REG_CLEAR(reg), BIT(idx));
else
@@ -3613,12 +3637,12 @@ static int ingenic_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
struct ingenic_pinctrl *jzpc = jzgc->jzpc;
unsigned int pin = gc->base + offset;
- if (jzpc->info->version >= ID_JZ4770) {
+ if (is_soc_or_above(jzpc, ID_JZ4770)) {
if (ingenic_get_pin_config(jzpc, pin, JZ4770_GPIO_INT) ||
ingenic_get_pin_config(jzpc, pin, JZ4770_GPIO_PAT1))
return GPIO_LINE_DIRECTION_IN;
return GPIO_LINE_DIRECTION_OUT;
- } else if (jzpc->info->version == ID_JZ4730) {
+ } else if (!is_soc_or_above(jzpc, ID_JZ4740)) {
if (!ingenic_get_pin_config(jzpc, pin, JZ4730_GPIO_GPDIR))
return GPIO_LINE_DIRECTION_IN;
return GPIO_LINE_DIRECTION_OUT;
@@ -3669,18 +3693,18 @@ static int ingenic_pinmux_set_pin_fn(struct ingenic_pinctrl *jzpc,
dev_dbg(jzpc->dev, "set pin P%c%u to function %u\n",
'A' + offt, idx, func);
- if (jzpc->info->version >= ID_X1000) {
+ if (is_soc_or_above(jzpc, ID_X1000)) {
ingenic_shadow_config_pin(jzpc, pin, JZ4770_GPIO_INT, false);
ingenic_shadow_config_pin(jzpc, pin, GPIO_MSK, false);
ingenic_shadow_config_pin(jzpc, pin, JZ4770_GPIO_PAT1, func & 0x2);
ingenic_shadow_config_pin(jzpc, pin, JZ4770_GPIO_PAT0, func & 0x1);
ingenic_shadow_config_pin_load(jzpc, pin);
- } else if (jzpc->info->version >= ID_JZ4770) {
+ } else if (is_soc_or_above(jzpc, ID_JZ4770)) {
ingenic_config_pin(jzpc, pin, JZ4770_GPIO_INT, false);
ingenic_config_pin(jzpc, pin, GPIO_MSK, false);
ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PAT1, func & 0x2);
ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PAT0, func & 0x1);
- } else if (jzpc->info->version >= ID_JZ4740) {
+ } else if (is_soc_or_above(jzpc, ID_JZ4740)) {
ingenic_config_pin(jzpc, pin, JZ4740_GPIO_FUNC, true);
ingenic_config_pin(jzpc, pin, JZ4740_GPIO_TRIG, func & 0x2);
ingenic_config_pin(jzpc, pin, JZ4740_GPIO_SELECT, func & 0x1);
@@ -3738,16 +3762,16 @@ static int ingenic_pinmux_gpio_set_direction(struct pinctrl_dev *pctldev,
dev_dbg(pctldev->dev, "set pin P%c%u to %sput\n",
'A' + offt, idx, input ? "in" : "out");
- if (jzpc->info->version >= ID_X1000) {
+ if (is_soc_or_above(jzpc, ID_X1000)) {
ingenic_shadow_config_pin(jzpc, pin, JZ4770_GPIO_INT, false);
ingenic_shadow_config_pin(jzpc, pin, GPIO_MSK, true);
ingenic_shadow_config_pin(jzpc, pin, JZ4770_GPIO_PAT1, input);
ingenic_shadow_config_pin_load(jzpc, pin);
- } else if (jzpc->info->version >= ID_JZ4770) {
+ } else if (is_soc_or_above(jzpc, ID_JZ4770)) {
ingenic_config_pin(jzpc, pin, JZ4770_GPIO_INT, false);
ingenic_config_pin(jzpc, pin, GPIO_MSK, true);
ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PAT1, input);
- } else if (jzpc->info->version >= ID_JZ4740) {
+ } else if (is_soc_or_above(jzpc, ID_JZ4740)) {
ingenic_config_pin(jzpc, pin, JZ4740_GPIO_SELECT, false);
ingenic_config_pin(jzpc, pin, JZ4740_GPIO_DIR, !input);
ingenic_config_pin(jzpc, pin, JZ4740_GPIO_FUNC, false);
@@ -3779,7 +3803,7 @@ static int ingenic_pinconf_get(struct pinctrl_dev *pctldev,
unsigned int bias, reg;
bool pull, pullup, pulldown;
- if (jzpc->info->version >= ID_X2000) {
+ if (is_soc_or_above(jzpc, ID_X2000)) {
pullup = ingenic_get_pin_config(jzpc, pin, X2000_GPIO_PEPU) &&
!ingenic_get_pin_config(jzpc, pin, X2000_GPIO_PEPD) &&
(jzpc->info->pull_ups[offt] & BIT(idx));
@@ -3787,7 +3811,7 @@ static int ingenic_pinconf_get(struct pinctrl_dev *pctldev,
!ingenic_get_pin_config(jzpc, pin, X2000_GPIO_PEPU) &&
(jzpc->info->pull_downs[offt] & BIT(idx));
- } else if (jzpc->info->version >= ID_X1830) {
+ } else if (is_soc_or_above(jzpc, ID_X1830)) {
unsigned int half = PINS_PER_GPIO_CHIP / 2;
unsigned int idxh = (pin % half) * 2;
@@ -3804,9 +3828,9 @@ static int ingenic_pinconf_get(struct pinctrl_dev *pctldev,
pulldown = (bias == GPIO_PULL_DOWN) && (jzpc->info->pull_downs[offt] & BIT(idx));
} else {
- if (jzpc->info->version >= ID_JZ4770)
+ if (is_soc_or_above(jzpc, ID_JZ4770))
pull = !ingenic_get_pin_config(jzpc, pin, JZ4770_GPIO_PEN);
- else if (jzpc->info->version >= ID_JZ4740)
+ else if (is_soc_or_above(jzpc, ID_JZ4740))
pull = !ingenic_get_pin_config(jzpc, pin, JZ4740_GPIO_PULL_DIS);
else
pull = ingenic_get_pin_config(jzpc, pin, JZ4730_GPIO_GPPUR);
@@ -3835,9 +3859,9 @@ static int ingenic_pinconf_get(struct pinctrl_dev *pctldev,
break;
case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
- if (jzpc->info->version >= ID_X2000)
+ if (is_soc_or_above(jzpc, ID_X2000))
reg = X2000_GPIO_SMT;
- else if (jzpc->info->version >= ID_X1830)
+ else if (is_soc_or_above(jzpc, ID_X1830))
reg = X1830_GPIO_SMT;
else
return -EINVAL;
@@ -3846,9 +3870,9 @@ static int ingenic_pinconf_get(struct pinctrl_dev *pctldev,
break;
case PIN_CONFIG_SLEW_RATE:
- if (jzpc->info->version >= ID_X2000)
+ if (is_soc_or_above(jzpc, ID_X2000))
reg = X2000_GPIO_SR;
- else if (jzpc->info->version >= ID_X1830)
+ else if (is_soc_or_above(jzpc, ID_X1830))
reg = X1830_GPIO_SR;
else
return -EINVAL;
@@ -3867,7 +3891,7 @@ static int ingenic_pinconf_get(struct pinctrl_dev *pctldev,
static void ingenic_set_bias(struct ingenic_pinctrl *jzpc,
unsigned int pin, unsigned int bias)
{
- if (jzpc->info->version >= ID_X2000) {
+ if (is_soc_or_above(jzpc, ID_X2000)) {
switch (bias) {
case GPIO_PULL_UP:
ingenic_config_pin(jzpc, pin, X2000_GPIO_PEPD, false);
@@ -3885,7 +3909,7 @@ static void ingenic_set_bias(struct ingenic_pinctrl *jzpc,
ingenic_config_pin(jzpc, pin, X2000_GPIO_PEPD, false);
}
- } else if (jzpc->info->version >= ID_X1830) {
+ } else if (is_soc_or_above(jzpc, ID_X1830)) {
unsigned int idx = pin % PINS_PER_GPIO_CHIP;
unsigned int half = PINS_PER_GPIO_CHIP / 2;
unsigned int idxh = (pin % half) * 2;
@@ -3903,9 +3927,9 @@ static void ingenic_set_bias(struct ingenic_pinctrl *jzpc,
REG_SET(X1830_GPIO_PEH), bias << idxh);
}
- } else if (jzpc->info->version >= ID_JZ4770) {
+ } else if (is_soc_or_above(jzpc, ID_JZ4770)) {
ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PEN, !bias);
- } else if (jzpc->info->version >= ID_JZ4740) {
+ } else if (is_soc_or_above(jzpc, ID_JZ4740)) {
ingenic_config_pin(jzpc, pin, JZ4740_GPIO_PULL_DIS, !bias);
} else {
ingenic_config_pin(jzpc, pin, JZ4730_GPIO_GPPUR, bias);
@@ -3915,7 +3939,7 @@ static void ingenic_set_bias(struct ingenic_pinctrl *jzpc,
static void ingenic_set_schmitt_trigger(struct ingenic_pinctrl *jzpc,
unsigned int pin, bool enable)
{
- if (jzpc->info->version >= ID_X2000)
+ if (is_soc_or_above(jzpc, ID_X2000))
ingenic_config_pin(jzpc, pin, X2000_GPIO_SMT, enable);
else
ingenic_config_pin(jzpc, pin, X1830_GPIO_SMT, enable);
@@ -3924,9 +3948,9 @@ static void ingenic_set_schmitt_trigger(struct ingenic_pinctrl *jzpc,
static void ingenic_set_output_level(struct ingenic_pinctrl *jzpc,
unsigned int pin, bool high)
{
- if (jzpc->info->version >= ID_JZ4770)
+ if (is_soc_or_above(jzpc, ID_JZ4770))
ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PAT0, high);
- else if (jzpc->info->version >= ID_JZ4740)
+ else if (is_soc_or_above(jzpc, ID_JZ4740))
ingenic_config_pin(jzpc, pin, JZ4740_GPIO_DATA, high);
else
ingenic_config_pin(jzpc, pin, JZ4730_GPIO_DATA, high);
@@ -3935,7 +3959,7 @@ static void ingenic_set_output_level(struct ingenic_pinctrl *jzpc,
static void ingenic_set_slew_rate(struct ingenic_pinctrl *jzpc,
unsigned int pin, unsigned int slew)
{
- if (jzpc->info->version >= ID_X2000)
+ if (is_soc_or_above(jzpc, ID_X2000))
ingenic_config_pin(jzpc, pin, X2000_GPIO_SR, slew);
else
ingenic_config_pin(jzpc, pin, X1830_GPIO_SR, slew);
@@ -3991,7 +4015,7 @@ static int ingenic_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
break;
case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
- if (jzpc->info->version < ID_X1830)
+ if (!is_soc_or_above(jzpc, ID_X1830))
return -EINVAL;
ingenic_set_schmitt_trigger(jzpc, pin, arg);
@@ -4006,7 +4030,7 @@ static int ingenic_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
break;
case PIN_CONFIG_SLEW_RATE:
- if (jzpc->info->version < ID_X1830)
+ if (!is_soc_or_above(jzpc, ID_X1830))
return -EINVAL;
ingenic_set_slew_rate(jzpc, pin, arg);
diff --git a/drivers/pinctrl/pinctrl-max77620.c b/drivers/pinctrl/pinctrl-max77620.c
index 1ee94574f0af..ab723ab4ec1d 100644
--- a/drivers/pinctrl/pinctrl-max77620.c
+++ b/drivers/pinctrl/pinctrl-max77620.c
@@ -668,5 +668,4 @@ module_platform_driver(max77620_pinctrl_driver);
MODULE_DESCRIPTION("MAX77620/MAX20024 pin control driver");
MODULE_AUTHOR("Chaitanya Bandi<bandik@nvidia.com>");
MODULE_AUTHOR("Laxman Dewangan<ldewangan@nvidia.com>");
-MODULE_ALIAS("platform:max77620-pinctrl");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/pinctrl-microchip-sgpio.c b/drivers/pinctrl/pinctrl-microchip-sgpio.c
index 80a8939ad0c0..6f55bf7d5e05 100644
--- a/drivers/pinctrl/pinctrl-microchip-sgpio.c
+++ b/drivers/pinctrl/pinctrl-microchip-sgpio.c
@@ -688,11 +688,17 @@ static void microchip_sgpio_irq_setreg(struct irq_data *data,
static void microchip_sgpio_irq_mask(struct irq_data *data)
{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+
microchip_sgpio_irq_setreg(data, REG_INT_ENABLE, true);
+ gpiochip_disable_irq(chip, data->hwirq);
}
static void microchip_sgpio_irq_unmask(struct irq_data *data)
{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+
+ gpiochip_enable_irq(chip, data->hwirq);
microchip_sgpio_irq_setreg(data, REG_INT_ENABLE, false);
}
@@ -746,6 +752,8 @@ static const struct irq_chip microchip_sgpio_irqchip = {
.irq_ack = microchip_sgpio_irq_ack,
.irq_unmask = microchip_sgpio_irq_unmask,
.irq_set_type = microchip_sgpio_irq_set_type,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static void sgpio_irq_handler(struct irq_desc *desc)
@@ -840,7 +848,7 @@ static int microchip_sgpio_register_bank(struct device *dev,
gc = &bank->gpio;
gc->label = pctl_desc->name;
gc->parent = dev;
- gc->of_node = to_of_node(fwnode);
+ gc->fwnode = fwnode;
gc->owner = THIS_MODULE;
gc->get_direction = microchip_sgpio_get_direction;
gc->direction_input = microchip_sgpio_direction_input;
@@ -861,11 +869,7 @@ static int microchip_sgpio_register_bank(struct device *dev,
if (irq) {
struct gpio_irq_chip *girq = &gc->irq;
- girq->chip = devm_kmemdup(dev, &microchip_sgpio_irqchip,
- sizeof(microchip_sgpio_irqchip),
- GFP_KERNEL);
- if (!girq->chip)
- return -ENOMEM;
+ gpio_irq_chip_set_chip(girq, &microchip_sgpio_irqchip);
girq->parent_handler = sgpio_irq_handler;
girq->num_parents = 1;
girq->parents = devm_kcalloc(dev, 1,
diff --git a/drivers/pinctrl/pinctrl-ocelot.c b/drivers/pinctrl/pinctrl-ocelot.c
index 6a956ee94494..5f4a8c5c6650 100644
--- a/drivers/pinctrl/pinctrl-ocelot.c
+++ b/drivers/pinctrl/pinctrl-ocelot.c
@@ -19,6 +19,7 @@
#include <linux/pinctrl/pinconf-generic.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
+#include <linux/reset.h>
#include <linux/slab.h>
#include "core.h"
@@ -60,6 +61,7 @@ enum {
FUNC_CAN0_a,
FUNC_CAN0_b,
FUNC_CAN1,
+ FUNC_CLKMON,
FUNC_NONE,
FUNC_FC0_a,
FUNC_FC0_b,
@@ -138,6 +140,8 @@ enum {
FUNC_PTPSYNC_6,
FUNC_PTPSYNC_7,
FUNC_PWM,
+ FUNC_PWM_a,
+ FUNC_PWM_b,
FUNC_QSPI1,
FUNC_QSPI2,
FUNC_R,
@@ -184,6 +188,7 @@ static const char *const ocelot_function_names[] = {
[FUNC_CAN0_a] = "can0_a",
[FUNC_CAN0_b] = "can0_b",
[FUNC_CAN1] = "can1",
+ [FUNC_CLKMON] = "clkmon",
[FUNC_NONE] = "none",
[FUNC_FC0_a] = "fc0_a",
[FUNC_FC0_b] = "fc0_b",
@@ -262,6 +267,8 @@ static const char *const ocelot_function_names[] = {
[FUNC_PTPSYNC_6] = "ptpsync_6",
[FUNC_PTPSYNC_7] = "ptpsync_7",
[FUNC_PWM] = "pwm",
+ [FUNC_PWM_a] = "pwm_a",
+ [FUNC_PWM_b] = "pwm_b",
[FUNC_QSPI1] = "qspi1",
[FUNC_QSPI2] = "qspi2",
[FUNC_R] = "reserved",
@@ -977,11 +984,11 @@ LAN966X_P(23, GPIO, NONE, NONE, NONE, OB_TRG_a, NONE, NON
LAN966X_P(24, GPIO, FC0_b, IB_TRG_a, USB_H_c, OB_TRG_a, IRQ_IN_c, TACHO_a, R);
LAN966X_P(25, GPIO, FC0_b, IB_TRG_a, USB_H_c, OB_TRG_a, IRQ_OUT_c, SFP_SD, R);
LAN966X_P(26, GPIO, FC0_b, IB_TRG_a, USB_S_c, OB_TRG_a, CAN0_a, SFP_SD, R);
-LAN966X_P(27, GPIO, NONE, NONE, NONE, OB_TRG_a, CAN0_a, NONE, R);
+LAN966X_P(27, GPIO, NONE, NONE, NONE, OB_TRG_a, CAN0_a, PWM_a, R);
LAN966X_P(28, GPIO, MIIM_a, NONE, NONE, OB_TRG_a, IRQ_OUT_c, SFP_SD, R);
LAN966X_P(29, GPIO, MIIM_a, NONE, NONE, OB_TRG_a, NONE, NONE, R);
-LAN966X_P(30, GPIO, FC3_c, CAN1, NONE, OB_TRG, RECO_b, NONE, R);
-LAN966X_P(31, GPIO, FC3_c, CAN1, NONE, OB_TRG, RECO_b, NONE, R);
+LAN966X_P(30, GPIO, FC3_c, CAN1, CLKMON, OB_TRG, RECO_b, NONE, R);
+LAN966X_P(31, GPIO, FC3_c, CAN1, CLKMON, OB_TRG, RECO_b, NONE, R);
LAN966X_P(32, GPIO, FC3_c, NONE, SGPIO_a, NONE, MIIM_Sa, NONE, R);
LAN966X_P(33, GPIO, FC1_b, NONE, SGPIO_a, NONE, MIIM_Sa, MIIM_b, R);
LAN966X_P(34, GPIO, FC1_b, NONE, SGPIO_a, NONE, MIIM_Sa, MIIM_b, R);
@@ -1001,7 +1008,7 @@ LAN966X_P(47, GPIO, FC1_c, OB_TRG_b, IB_TRG_b, IRQ_OUT_a, FC_SHRD5, IRQ_IN
LAN966X_P(48, GPIO, FC1_c, OB_TRG_b, IB_TRG_b, IRQ_OUT_a, FC_SHRD6, IRQ_IN_a, R);
LAN966X_P(49, GPIO, FC_SHRD7, OB_TRG_b, IB_TRG_b, IRQ_OUT_a, TWI_SLC_GATE, IRQ_IN_a, R);
LAN966X_P(50, GPIO, FC_SHRD16, OB_TRG_b, IB_TRG_b, IRQ_OUT_a, TWI_SLC_GATE, NONE, R);
-LAN966X_P(51, GPIO, FC3_b, OB_TRG_b, IB_TRG_c, IRQ_OUT_b, NONE, IRQ_IN_b, R);
+LAN966X_P(51, GPIO, FC3_b, OB_TRG_b, IB_TRG_c, IRQ_OUT_b, PWM_b, IRQ_IN_b, R);
LAN966X_P(52, GPIO, FC3_b, OB_TRG_b, IB_TRG_c, IRQ_OUT_b, TACHO_b, IRQ_IN_b, R);
LAN966X_P(53, GPIO, FC3_b, OB_TRG_b, IB_TRG_c, IRQ_OUT_b, NONE, IRQ_IN_b, R);
LAN966X_P(54, GPIO, FC_SHRD8, OB_TRG_b, IB_TRG_c, IRQ_OUT_b, TWI_SLC_GATE, IRQ_IN_b, R);
@@ -1908,6 +1915,7 @@ static int ocelot_pinctrl_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct ocelot_pinctrl *info;
+ struct reset_control *reset;
struct regmap *pincfg;
void __iomem *base;
int ret;
@@ -1923,6 +1931,12 @@ static int ocelot_pinctrl_probe(struct platform_device *pdev)
info->desc = (struct pinctrl_desc *)device_get_match_data(dev);
+ reset = devm_reset_control_get_optional_shared(dev, "switch");
+ if (IS_ERR(reset))
+ return dev_err_probe(dev, PTR_ERR(reset),
+ "Failed to get reset\n");
+ reset_control_reset(reset);
+
base = devm_ioremap_resource(dev,
platform_get_resource(pdev, IORESOURCE_MEM, 0));
if (IS_ERR(base))
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index 2cb79e649fcf..32e41395fc76 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -103,6 +103,25 @@
}, \
}
+#define PIN_BANK_IOMUX_FLAGS_PULL_FLAGS(id, pins, label, iom0, iom1, \
+ iom2, iom3, pull0, pull1, \
+ pull2, pull3) \
+ { \
+ .bank_num = id, \
+ .nr_pins = pins, \
+ .name = label, \
+ .iomux = { \
+ { .type = iom0, .offset = -1 }, \
+ { .type = iom1, .offset = -1 }, \
+ { .type = iom2, .offset = -1 }, \
+ { .type = iom3, .offset = -1 }, \
+ }, \
+ .pull_type[0] = pull0, \
+ .pull_type[1] = pull1, \
+ .pull_type[2] = pull2, \
+ .pull_type[3] = pull3, \
+ }
+
#define PIN_BANK_DRV_FLAGS_PULL_FLAGS(id, pins, label, drv0, drv1, \
drv2, drv3, pull0, pull1, \
pull2, pull3) \
@@ -197,6 +216,9 @@
#define RK_MUXROUTE_PMU(ID, PIN, FUNC, REG, VAL) \
PIN_BANK_MUX_ROUTE_FLAGS(ID, PIN, FUNC, REG, VAL, ROCKCHIP_ROUTE_PMU)
+#define RK3588_PIN_BANK_FLAGS(ID, PIN, LABEL, M, P) \
+ PIN_BANK_IOMUX_FLAGS_PULL_FLAGS(ID, PIN, LABEL, M, M, M, M, P, P, P, P)
+
static struct regmap_config rockchip_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
@@ -837,6 +859,7 @@ static bool rockchip_get_mux_route(struct rockchip_pin_bank *bank, int pin,
static int rockchip_get_mux(struct rockchip_pin_bank *bank, int pin)
{
struct rockchip_pinctrl *info = bank->drvdata;
+ struct rockchip_pin_ctrl *ctrl = info->ctrl;
int iomux_num = (pin / 8);
struct regmap *regmap;
unsigned int val;
@@ -878,6 +901,27 @@ static int rockchip_get_mux(struct rockchip_pin_bank *bank, int pin)
if (bank->recalced_mask & BIT(pin))
rockchip_get_recalced_mux(bank, pin, &reg, &bit, &mask);
+ if (ctrl->type == RK3588) {
+ if (bank->bank_num == 0) {
+ if ((pin >= RK_PB4) && (pin <= RK_PD7)) {
+ u32 reg0 = 0;
+
+ reg0 = reg + 0x4000 - 0xC; /* PMU2_IOC_BASE */
+ ret = regmap_read(regmap, reg0, &val);
+ if (ret)
+ return ret;
+
+ if (!(val & BIT(8)))
+ return ((val >> bit) & mask);
+
+ reg = reg + 0x8000; /* BUS_IOC_BASE */
+ regmap = info->regmap_base;
+ }
+ } else if (bank->bank_num > 0) {
+ reg += 0x8000; /* BUS_IOC_BASE */
+ }
+ }
+
ret = regmap_read(regmap, reg, &val);
if (ret)
return ret;
@@ -926,6 +970,7 @@ static int rockchip_verify_mux(struct rockchip_pin_bank *bank,
static int rockchip_set_mux(struct rockchip_pin_bank *bank, int pin, int mux)
{
struct rockchip_pinctrl *info = bank->drvdata;
+ struct rockchip_pin_ctrl *ctrl = info->ctrl;
struct device *dev = info->dev;
int iomux_num = (pin / 8);
struct regmap *regmap;
@@ -966,6 +1011,46 @@ static int rockchip_set_mux(struct rockchip_pin_bank *bank, int pin, int mux)
if (bank->recalced_mask & BIT(pin))
rockchip_get_recalced_mux(bank, pin, &reg, &bit, &mask);
+ if (ctrl->type == RK3588) {
+ if (bank->bank_num == 0) {
+ if ((pin >= RK_PB4) && (pin <= RK_PD7)) {
+ if (mux < 8) {
+ reg += 0x4000 - 0xC; /* PMU2_IOC_BASE */
+ data = (mask << (bit + 16));
+ rmask = data | (data >> 16);
+ data |= (mux & mask) << bit;
+ ret = regmap_update_bits(regmap, reg, rmask, data);
+ } else {
+ u32 reg0 = 0;
+
+ reg0 = reg + 0x4000 - 0xC; /* PMU2_IOC_BASE */
+ data = (mask << (bit + 16));
+ rmask = data | (data >> 16);
+ data |= 8 << bit;
+ ret = regmap_update_bits(regmap, reg0, rmask, data);
+
+ reg0 = reg + 0x8000; /* BUS_IOC_BASE */
+ data = (mask << (bit + 16));
+ rmask = data | (data >> 16);
+ data |= mux << bit;
+ regmap = info->regmap_base;
+ ret |= regmap_update_bits(regmap, reg0, rmask, data);
+ }
+ } else {
+ data = (mask << (bit + 16));
+ rmask = data | (data >> 16);
+ data |= (mux & mask) << bit;
+ ret = regmap_update_bits(regmap, reg, rmask, data);
+ }
+ return ret;
+ } else if (bank->bank_num > 0) {
+ reg += 0x8000; /* BUS_IOC_BASE */
+ }
+ }
+
+ if (mux > mask)
+ return -EINVAL;
+
if (bank->route_mask & BIT(pin)) {
if (rockchip_get_mux_route(bank, pin, mux, &route_location,
&route_reg, &route_val)) {
@@ -1001,9 +1086,9 @@ static int rockchip_set_mux(struct rockchip_pin_bank *bank, int pin, int mux)
#define PX30_PULL_PINS_PER_REG 8
#define PX30_PULL_BANK_STRIDE 16
-static void px30_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank,
- int pin_num, struct regmap **regmap,
- int *reg, u8 *bit)
+static int px30_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num, struct regmap **regmap,
+ int *reg, u8 *bit)
{
struct rockchip_pinctrl *info = bank->drvdata;
@@ -1023,6 +1108,8 @@ static void px30_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank,
*reg += ((pin_num / PX30_PULL_PINS_PER_REG) * 4);
*bit = (pin_num % PX30_PULL_PINS_PER_REG);
*bit *= PX30_PULL_BITS_PER_PIN;
+
+ return 0;
}
#define PX30_DRV_PMU_OFFSET 0x20
@@ -1031,9 +1118,9 @@ static void px30_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank,
#define PX30_DRV_PINS_PER_REG 8
#define PX30_DRV_BANK_STRIDE 16
-static void px30_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank,
- int pin_num, struct regmap **regmap,
- int *reg, u8 *bit)
+static int px30_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num, struct regmap **regmap,
+ int *reg, u8 *bit)
{
struct rockchip_pinctrl *info = bank->drvdata;
@@ -1053,6 +1140,8 @@ static void px30_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank,
*reg += ((pin_num / PX30_DRV_PINS_PER_REG) * 4);
*bit = (pin_num % PX30_DRV_PINS_PER_REG);
*bit *= PX30_DRV_BITS_PER_PIN;
+
+ return 0;
}
#define PX30_SCHMITT_PMU_OFFSET 0x38
@@ -1092,9 +1181,9 @@ static int px30_calc_schmitt_reg_and_bit(struct rockchip_pin_bank *bank,
#define RV1108_PULL_BITS_PER_PIN 2
#define RV1108_PULL_BANK_STRIDE 16
-static void rv1108_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank,
- int pin_num, struct regmap **regmap,
- int *reg, u8 *bit)
+static int rv1108_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num, struct regmap **regmap,
+ int *reg, u8 *bit)
{
struct rockchip_pinctrl *info = bank->drvdata;
@@ -1113,6 +1202,8 @@ static void rv1108_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank,
*reg += ((pin_num / RV1108_PULL_PINS_PER_REG) * 4);
*bit = (pin_num % RV1108_PULL_PINS_PER_REG);
*bit *= RV1108_PULL_BITS_PER_PIN;
+
+ return 0;
}
#define RV1108_DRV_PMU_OFFSET 0x20
@@ -1121,9 +1212,9 @@ static void rv1108_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank,
#define RV1108_DRV_PINS_PER_REG 8
#define RV1108_DRV_BANK_STRIDE 16
-static void rv1108_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank,
- int pin_num, struct regmap **regmap,
- int *reg, u8 *bit)
+static int rv1108_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num, struct regmap **regmap,
+ int *reg, u8 *bit)
{
struct rockchip_pinctrl *info = bank->drvdata;
@@ -1143,6 +1234,8 @@ static void rv1108_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank,
*reg += ((pin_num / RV1108_DRV_PINS_PER_REG) * 4);
*bit = pin_num % RV1108_DRV_PINS_PER_REG;
*bit *= RV1108_DRV_BITS_PER_PIN;
+
+ return 0;
}
#define RV1108_SCHMITT_PMU_OFFSET 0x30
@@ -1199,9 +1292,9 @@ static int rk3308_calc_schmitt_reg_and_bit(struct rockchip_pin_bank *bank,
#define RK2928_PULL_PINS_PER_REG 16
#define RK2928_PULL_BANK_STRIDE 8
-static void rk2928_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank,
- int pin_num, struct regmap **regmap,
- int *reg, u8 *bit)
+static int rk2928_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num, struct regmap **regmap,
+ int *reg, u8 *bit)
{
struct rockchip_pinctrl *info = bank->drvdata;
@@ -1211,13 +1304,15 @@ static void rk2928_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank,
*reg += (pin_num / RK2928_PULL_PINS_PER_REG) * 4;
*bit = pin_num % RK2928_PULL_PINS_PER_REG;
+
+ return 0;
};
#define RK3128_PULL_OFFSET 0x118
-static void rk3128_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank,
- int pin_num, struct regmap **regmap,
- int *reg, u8 *bit)
+static int rk3128_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num, struct regmap **regmap,
+ int *reg, u8 *bit)
{
struct rockchip_pinctrl *info = bank->drvdata;
@@ -1227,6 +1322,8 @@ static void rk3128_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank,
*reg += ((pin_num / RK2928_PULL_PINS_PER_REG) * 4);
*bit = pin_num % RK2928_PULL_PINS_PER_REG;
+
+ return 0;
}
#define RK3188_PULL_OFFSET 0x164
@@ -1235,9 +1332,9 @@ static void rk3128_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank,
#define RK3188_PULL_BANK_STRIDE 16
#define RK3188_PULL_PMU_OFFSET 0x64
-static void rk3188_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank,
- int pin_num, struct regmap **regmap,
- int *reg, u8 *bit)
+static int rk3188_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num, struct regmap **regmap,
+ int *reg, u8 *bit)
{
struct rockchip_pinctrl *info = bank->drvdata;
@@ -1267,12 +1364,14 @@ static void rk3188_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank,
*bit = 7 - (pin_num % RK3188_PULL_PINS_PER_REG);
*bit *= RK3188_PULL_BITS_PER_PIN;
}
+
+ return 0;
}
#define RK3288_PULL_OFFSET 0x140
-static void rk3288_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank,
- int pin_num, struct regmap **regmap,
- int *reg, u8 *bit)
+static int rk3288_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num, struct regmap **regmap,
+ int *reg, u8 *bit)
{
struct rockchip_pinctrl *info = bank->drvdata;
@@ -1296,6 +1395,8 @@ static void rk3288_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank,
*bit = (pin_num % RK3188_PULL_PINS_PER_REG);
*bit *= RK3188_PULL_BITS_PER_PIN;
}
+
+ return 0;
}
#define RK3288_DRV_PMU_OFFSET 0x70
@@ -1304,9 +1405,9 @@ static void rk3288_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank,
#define RK3288_DRV_PINS_PER_REG 8
#define RK3288_DRV_BANK_STRIDE 16
-static void rk3288_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank,
- int pin_num, struct regmap **regmap,
- int *reg, u8 *bit)
+static int rk3288_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num, struct regmap **regmap,
+ int *reg, u8 *bit)
{
struct rockchip_pinctrl *info = bank->drvdata;
@@ -1330,13 +1431,15 @@ static void rk3288_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank,
*bit = (pin_num % RK3288_DRV_PINS_PER_REG);
*bit *= RK3288_DRV_BITS_PER_PIN;
}
+
+ return 0;
}
#define RK3228_PULL_OFFSET 0x100
-static void rk3228_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank,
- int pin_num, struct regmap **regmap,
- int *reg, u8 *bit)
+static int rk3228_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num, struct regmap **regmap,
+ int *reg, u8 *bit)
{
struct rockchip_pinctrl *info = bank->drvdata;
@@ -1347,13 +1450,15 @@ static void rk3228_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank,
*bit = (pin_num % RK3188_PULL_PINS_PER_REG);
*bit *= RK3188_PULL_BITS_PER_PIN;
+
+ return 0;
}
#define RK3228_DRV_GRF_OFFSET 0x200
-static void rk3228_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank,
- int pin_num, struct regmap **regmap,
- int *reg, u8 *bit)
+static int rk3228_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num, struct regmap **regmap,
+ int *reg, u8 *bit)
{
struct rockchip_pinctrl *info = bank->drvdata;
@@ -1364,13 +1469,15 @@ static void rk3228_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank,
*bit = (pin_num % RK3288_DRV_PINS_PER_REG);
*bit *= RK3288_DRV_BITS_PER_PIN;
+
+ return 0;
}
#define RK3308_PULL_OFFSET 0xa0
-static void rk3308_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank,
- int pin_num, struct regmap **regmap,
- int *reg, u8 *bit)
+static int rk3308_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num, struct regmap **regmap,
+ int *reg, u8 *bit)
{
struct rockchip_pinctrl *info = bank->drvdata;
@@ -1381,13 +1488,15 @@ static void rk3308_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank,
*bit = (pin_num % RK3188_PULL_PINS_PER_REG);
*bit *= RK3188_PULL_BITS_PER_PIN;
+
+ return 0;
}
#define RK3308_DRV_GRF_OFFSET 0x100
-static void rk3308_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank,
- int pin_num, struct regmap **regmap,
- int *reg, u8 *bit)
+static int rk3308_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num, struct regmap **regmap,
+ int *reg, u8 *bit)
{
struct rockchip_pinctrl *info = bank->drvdata;
@@ -1398,14 +1507,16 @@ static void rk3308_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank,
*bit = (pin_num % RK3288_DRV_PINS_PER_REG);
*bit *= RK3288_DRV_BITS_PER_PIN;
+
+ return 0;
}
#define RK3368_PULL_GRF_OFFSET 0x100
#define RK3368_PULL_PMU_OFFSET 0x10
-static void rk3368_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank,
- int pin_num, struct regmap **regmap,
- int *reg, u8 *bit)
+static int rk3368_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num, struct regmap **regmap,
+ int *reg, u8 *bit)
{
struct rockchip_pinctrl *info = bank->drvdata;
@@ -1429,14 +1540,16 @@ static void rk3368_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank,
*bit = (pin_num % RK3188_PULL_PINS_PER_REG);
*bit *= RK3188_PULL_BITS_PER_PIN;
}
+
+ return 0;
}
#define RK3368_DRV_PMU_OFFSET 0x20
#define RK3368_DRV_GRF_OFFSET 0x200
-static void rk3368_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank,
- int pin_num, struct regmap **regmap,
- int *reg, u8 *bit)
+static int rk3368_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num, struct regmap **regmap,
+ int *reg, u8 *bit)
{
struct rockchip_pinctrl *info = bank->drvdata;
@@ -1460,15 +1573,17 @@ static void rk3368_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank,
*bit = (pin_num % RK3288_DRV_PINS_PER_REG);
*bit *= RK3288_DRV_BITS_PER_PIN;
}
+
+ return 0;
}
#define RK3399_PULL_GRF_OFFSET 0xe040
#define RK3399_PULL_PMU_OFFSET 0x40
#define RK3399_DRV_3BITS_PER_PIN 3
-static void rk3399_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank,
- int pin_num, struct regmap **regmap,
- int *reg, u8 *bit)
+static int rk3399_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num, struct regmap **regmap,
+ int *reg, u8 *bit)
{
struct rockchip_pinctrl *info = bank->drvdata;
@@ -1494,11 +1609,13 @@ static void rk3399_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank,
*bit = (pin_num % RK3188_PULL_PINS_PER_REG);
*bit *= RK3188_PULL_BITS_PER_PIN;
}
+
+ return 0;
}
-static void rk3399_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank,
- int pin_num, struct regmap **regmap,
- int *reg, u8 *bit)
+static int rk3399_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num, struct regmap **regmap,
+ int *reg, u8 *bit)
{
struct rockchip_pinctrl *info = bank->drvdata;
int drv_num = (pin_num / 8);
@@ -1515,6 +1632,8 @@ static void rk3399_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank,
*bit = (pin_num % 8) * 3;
else
*bit = (pin_num % 8) * 2;
+
+ return 0;
}
#define RK3568_PULL_PMU_OFFSET 0x20
@@ -1523,9 +1642,9 @@ static void rk3399_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank,
#define RK3568_PULL_PINS_PER_REG 8
#define RK3568_PULL_BANK_STRIDE 0x10
-static void rk3568_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank,
- int pin_num, struct regmap **regmap,
- int *reg, u8 *bit)
+static int rk3568_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num, struct regmap **regmap,
+ int *reg, u8 *bit)
{
struct rockchip_pinctrl *info = bank->drvdata;
@@ -1546,6 +1665,8 @@ static void rk3568_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank,
*bit = (pin_num % RK3568_PULL_PINS_PER_REG);
*bit *= RK3568_PULL_BITS_PER_PIN;
}
+
+ return 0;
}
#define RK3568_DRV_PMU_OFFSET 0x70
@@ -1554,9 +1675,9 @@ static void rk3568_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank,
#define RK3568_DRV_PINS_PER_REG 2
#define RK3568_DRV_BANK_STRIDE 0x40
-static void rk3568_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank,
- int pin_num, struct regmap **regmap,
- int *reg, u8 *bit)
+static int rk3568_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num, struct regmap **regmap,
+ int *reg, u8 *bit)
{
struct rockchip_pinctrl *info = bank->drvdata;
@@ -1577,6 +1698,189 @@ static void rk3568_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank,
*bit = (pin_num % RK3568_DRV_PINS_PER_REG);
*bit *= RK3568_DRV_BITS_PER_PIN;
}
+
+ return 0;
+}
+
+#define RK3588_PMU1_IOC_REG (0x0000)
+#define RK3588_PMU2_IOC_REG (0x4000)
+#define RK3588_BUS_IOC_REG (0x8000)
+#define RK3588_VCCIO1_4_IOC_REG (0x9000)
+#define RK3588_VCCIO3_5_IOC_REG (0xA000)
+#define RK3588_VCCIO2_IOC_REG (0xB000)
+#define RK3588_VCCIO6_IOC_REG (0xC000)
+#define RK3588_EMMC_IOC_REG (0xD000)
+
+static const u32 rk3588_ds_regs[][2] = {
+ {RK_GPIO0_A0, RK3588_PMU1_IOC_REG + 0x0010},
+ {RK_GPIO0_A4, RK3588_PMU1_IOC_REG + 0x0014},
+ {RK_GPIO0_B0, RK3588_PMU1_IOC_REG + 0x0018},
+ {RK_GPIO0_B4, RK3588_PMU2_IOC_REG + 0x0014},
+ {RK_GPIO0_C0, RK3588_PMU2_IOC_REG + 0x0018},
+ {RK_GPIO0_C4, RK3588_PMU2_IOC_REG + 0x001C},
+ {RK_GPIO0_D0, RK3588_PMU2_IOC_REG + 0x0020},
+ {RK_GPIO0_D4, RK3588_PMU2_IOC_REG + 0x0024},
+ {RK_GPIO1_A0, RK3588_VCCIO1_4_IOC_REG + 0x0020},
+ {RK_GPIO1_A4, RK3588_VCCIO1_4_IOC_REG + 0x0024},
+ {RK_GPIO1_B0, RK3588_VCCIO1_4_IOC_REG + 0x0028},
+ {RK_GPIO1_B4, RK3588_VCCIO1_4_IOC_REG + 0x002C},
+ {RK_GPIO1_C0, RK3588_VCCIO1_4_IOC_REG + 0x0030},
+ {RK_GPIO1_C4, RK3588_VCCIO1_4_IOC_REG + 0x0034},
+ {RK_GPIO1_D0, RK3588_VCCIO1_4_IOC_REG + 0x0038},
+ {RK_GPIO1_D4, RK3588_VCCIO1_4_IOC_REG + 0x003C},
+ {RK_GPIO2_A0, RK3588_EMMC_IOC_REG + 0x0040},
+ {RK_GPIO2_A4, RK3588_VCCIO3_5_IOC_REG + 0x0044},
+ {RK_GPIO2_B0, RK3588_VCCIO3_5_IOC_REG + 0x0048},
+ {RK_GPIO2_B4, RK3588_VCCIO3_5_IOC_REG + 0x004C},
+ {RK_GPIO2_C0, RK3588_VCCIO3_5_IOC_REG + 0x0050},
+ {RK_GPIO2_C4, RK3588_VCCIO3_5_IOC_REG + 0x0054},
+ {RK_GPIO2_D0, RK3588_EMMC_IOC_REG + 0x0058},
+ {RK_GPIO2_D4, RK3588_EMMC_IOC_REG + 0x005C},
+ {RK_GPIO3_A0, RK3588_VCCIO3_5_IOC_REG + 0x0060},
+ {RK_GPIO3_A4, RK3588_VCCIO3_5_IOC_REG + 0x0064},
+ {RK_GPIO3_B0, RK3588_VCCIO3_5_IOC_REG + 0x0068},
+ {RK_GPIO3_B4, RK3588_VCCIO3_5_IOC_REG + 0x006C},
+ {RK_GPIO3_C0, RK3588_VCCIO3_5_IOC_REG + 0x0070},
+ {RK_GPIO3_C4, RK3588_VCCIO3_5_IOC_REG + 0x0074},
+ {RK_GPIO3_D0, RK3588_VCCIO3_5_IOC_REG + 0x0078},
+ {RK_GPIO3_D4, RK3588_VCCIO3_5_IOC_REG + 0x007C},
+ {RK_GPIO4_A0, RK3588_VCCIO6_IOC_REG + 0x0080},
+ {RK_GPIO4_A4, RK3588_VCCIO6_IOC_REG + 0x0084},
+ {RK_GPIO4_B0, RK3588_VCCIO6_IOC_REG + 0x0088},
+ {RK_GPIO4_B4, RK3588_VCCIO6_IOC_REG + 0x008C},
+ {RK_GPIO4_C0, RK3588_VCCIO6_IOC_REG + 0x0090},
+ {RK_GPIO4_C2, RK3588_VCCIO3_5_IOC_REG + 0x0090},
+ {RK_GPIO4_C4, RK3588_VCCIO3_5_IOC_REG + 0x0094},
+ {RK_GPIO4_D0, RK3588_VCCIO2_IOC_REG + 0x0098},
+ {RK_GPIO4_D4, RK3588_VCCIO2_IOC_REG + 0x009C},
+};
+
+static const u32 rk3588_p_regs[][2] = {
+ {RK_GPIO0_A0, RK3588_PMU1_IOC_REG + 0x0020},
+ {RK_GPIO0_B0, RK3588_PMU1_IOC_REG + 0x0024},
+ {RK_GPIO0_B5, RK3588_PMU2_IOC_REG + 0x0028},
+ {RK_GPIO0_C0, RK3588_PMU2_IOC_REG + 0x002C},
+ {RK_GPIO0_D0, RK3588_PMU2_IOC_REG + 0x0030},
+ {RK_GPIO1_A0, RK3588_VCCIO1_4_IOC_REG + 0x0110},
+ {RK_GPIO1_B0, RK3588_VCCIO1_4_IOC_REG + 0x0114},
+ {RK_GPIO1_C0, RK3588_VCCIO1_4_IOC_REG + 0x0118},
+ {RK_GPIO1_D0, RK3588_VCCIO1_4_IOC_REG + 0x011C},
+ {RK_GPIO2_A0, RK3588_EMMC_IOC_REG + 0x0120},
+ {RK_GPIO2_A6, RK3588_VCCIO3_5_IOC_REG + 0x0120},
+ {RK_GPIO2_B0, RK3588_VCCIO3_5_IOC_REG + 0x0124},
+ {RK_GPIO2_C0, RK3588_VCCIO3_5_IOC_REG + 0x0128},
+ {RK_GPIO2_D0, RK3588_EMMC_IOC_REG + 0x012C},
+ {RK_GPIO3_A0, RK3588_VCCIO3_5_IOC_REG + 0x0130},
+ {RK_GPIO3_B0, RK3588_VCCIO3_5_IOC_REG + 0x0134},
+ {RK_GPIO3_C0, RK3588_VCCIO3_5_IOC_REG + 0x0138},
+ {RK_GPIO3_D0, RK3588_VCCIO3_5_IOC_REG + 0x013C},
+ {RK_GPIO4_A0, RK3588_VCCIO6_IOC_REG + 0x0140},
+ {RK_GPIO4_B0, RK3588_VCCIO6_IOC_REG + 0x0144},
+ {RK_GPIO4_C0, RK3588_VCCIO6_IOC_REG + 0x0148},
+ {RK_GPIO4_C2, RK3588_VCCIO3_5_IOC_REG + 0x0148},
+ {RK_GPIO4_D0, RK3588_VCCIO2_IOC_REG + 0x014C},
+};
+
+static const u32 rk3588_smt_regs[][2] = {
+ {RK_GPIO0_A0, RK3588_PMU1_IOC_REG + 0x0030},
+ {RK_GPIO0_B0, RK3588_PMU1_IOC_REG + 0x0034},
+ {RK_GPIO0_B5, RK3588_PMU2_IOC_REG + 0x0040},
+ {RK_GPIO0_C0, RK3588_PMU2_IOC_REG + 0x0044},
+ {RK_GPIO0_D0, RK3588_PMU2_IOC_REG + 0x0048},
+ {RK_GPIO1_A0, RK3588_VCCIO1_4_IOC_REG + 0x0210},
+ {RK_GPIO1_B0, RK3588_VCCIO1_4_IOC_REG + 0x0214},
+ {RK_GPIO1_C0, RK3588_VCCIO1_4_IOC_REG + 0x0218},
+ {RK_GPIO1_D0, RK3588_VCCIO1_4_IOC_REG + 0x021C},
+ {RK_GPIO2_A0, RK3588_EMMC_IOC_REG + 0x0220},
+ {RK_GPIO2_A6, RK3588_VCCIO3_5_IOC_REG + 0x0220},
+ {RK_GPIO2_B0, RK3588_VCCIO3_5_IOC_REG + 0x0224},
+ {RK_GPIO2_C0, RK3588_VCCIO3_5_IOC_REG + 0x0228},
+ {RK_GPIO2_D0, RK3588_EMMC_IOC_REG + 0x022C},
+ {RK_GPIO3_A0, RK3588_VCCIO3_5_IOC_REG + 0x0230},
+ {RK_GPIO3_B0, RK3588_VCCIO3_5_IOC_REG + 0x0234},
+ {RK_GPIO3_C0, RK3588_VCCIO3_5_IOC_REG + 0x0238},
+ {RK_GPIO3_D0, RK3588_VCCIO3_5_IOC_REG + 0x023C},
+ {RK_GPIO4_A0, RK3588_VCCIO6_IOC_REG + 0x0240},
+ {RK_GPIO4_B0, RK3588_VCCIO6_IOC_REG + 0x0244},
+ {RK_GPIO4_C0, RK3588_VCCIO6_IOC_REG + 0x0248},
+ {RK_GPIO4_C2, RK3588_VCCIO3_5_IOC_REG + 0x0248},
+ {RK_GPIO4_D0, RK3588_VCCIO2_IOC_REG + 0x024C},
+};
+
+#define RK3588_PULL_BITS_PER_PIN 2
+#define RK3588_PULL_PINS_PER_REG 8
+
+static int rk3588_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num, struct regmap **regmap,
+ int *reg, u8 *bit)
+{
+ struct rockchip_pinctrl *info = bank->drvdata;
+ u8 bank_num = bank->bank_num;
+ u32 pin = bank_num * 32 + pin_num;
+ int i;
+
+ for (i = ARRAY_SIZE(rk3588_p_regs) - 1; i >= 0; i--) {
+ if (pin >= rk3588_p_regs[i][0]) {
+ *reg = rk3588_p_regs[i][1];
+ *regmap = info->regmap_base;
+ *bit = pin_num % RK3588_PULL_PINS_PER_REG;
+ *bit *= RK3588_PULL_BITS_PER_PIN;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+#define RK3588_DRV_BITS_PER_PIN 4
+#define RK3588_DRV_PINS_PER_REG 4
+
+static int rk3588_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num, struct regmap **regmap,
+ int *reg, u8 *bit)
+{
+ struct rockchip_pinctrl *info = bank->drvdata;
+ u8 bank_num = bank->bank_num;
+ u32 pin = bank_num * 32 + pin_num;
+ int i;
+
+ for (i = ARRAY_SIZE(rk3588_ds_regs) - 1; i >= 0; i--) {
+ if (pin >= rk3588_ds_regs[i][0]) {
+ *reg = rk3588_ds_regs[i][1];
+ *regmap = info->regmap_base;
+ *bit = pin_num % RK3588_DRV_PINS_PER_REG;
+ *bit *= RK3588_DRV_BITS_PER_PIN;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+#define RK3588_SMT_BITS_PER_PIN 1
+#define RK3588_SMT_PINS_PER_REG 8
+
+static int rk3588_calc_schmitt_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num,
+ struct regmap **regmap,
+ int *reg, u8 *bit)
+{
+ struct rockchip_pinctrl *info = bank->drvdata;
+ u8 bank_num = bank->bank_num;
+ u32 pin = bank_num * 32 + pin_num;
+ int i;
+
+ for (i = ARRAY_SIZE(rk3588_smt_regs) - 1; i >= 0; i--) {
+ if (pin >= rk3588_smt_regs[i][0]) {
+ *reg = rk3588_smt_regs[i][1];
+ *regmap = info->regmap_base;
+ *bit = pin_num % RK3588_SMT_PINS_PER_REG;
+ *bit *= RK3588_SMT_BITS_PER_PIN;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
}
static int rockchip_perpin_drv_list[DRV_TYPE_MAX][8] = {
@@ -1599,7 +1903,9 @@ static int rockchip_get_drive_perpin(struct rockchip_pin_bank *bank,
u8 bit;
int drv_type = bank->drv[pin_num / 8].drv_type;
- ctrl->drv_calc_reg(bank, pin_num, &regmap, &reg, &bit);
+ ret = ctrl->drv_calc_reg(bank, pin_num, &regmap, &reg, &bit);
+ if (ret)
+ return ret;
switch (drv_type) {
case DRV_TYPE_IO_1V8_3V0_AUTO:
@@ -1679,8 +1985,14 @@ static int rockchip_set_drive_perpin(struct rockchip_pin_bank *bank,
dev_dbg(dev, "setting drive of GPIO%d-%d to %d\n",
bank->bank_num, pin_num, strength);
- ctrl->drv_calc_reg(bank, pin_num, &regmap, &reg, &bit);
- if (ctrl->type == RK3568) {
+ ret = ctrl->drv_calc_reg(bank, pin_num, &regmap, &reg, &bit);
+ if (ret)
+ return ret;
+ if (ctrl->type == RK3588) {
+ rmask_bits = RK3588_DRV_BITS_PER_PIN;
+ ret = strength;
+ goto config;
+ } else if (ctrl->type == RK3568) {
rmask_bits = RK3568_DRV_BITS_PER_PIN;
ret = (1 << (strength + 1)) - 1;
goto config;
@@ -1792,7 +2104,9 @@ static int rockchip_get_pull(struct rockchip_pin_bank *bank, int pin_num)
if (ctrl->type == RK3066B)
return PIN_CONFIG_BIAS_DISABLE;
- ctrl->pull_calc_reg(bank, pin_num, &regmap, &reg, &bit);
+ ret = ctrl->pull_calc_reg(bank, pin_num, &regmap, &reg, &bit);
+ if (ret)
+ return ret;
ret = regmap_read(regmap, reg, &data);
if (ret)
@@ -1811,6 +2125,7 @@ static int rockchip_get_pull(struct rockchip_pin_bank *bank, int pin_num)
case RK3308:
case RK3368:
case RK3399:
+ case RK3588:
pull_type = bank->pull_type[pin_num / 8];
data >>= bit;
data &= (1 << RK3188_PULL_BITS_PER_PIN) - 1;
@@ -1839,7 +2154,9 @@ static int rockchip_set_pull(struct rockchip_pin_bank *bank,
if (ctrl->type == RK3066B)
return pull ? -EINVAL : 0;
- ctrl->pull_calc_reg(bank, pin_num, &regmap, &reg, &bit);
+ ret = ctrl->pull_calc_reg(bank, pin_num, &regmap, &reg, &bit);
+ if (ret)
+ return ret;
switch (ctrl->type) {
case RK2928:
@@ -1857,6 +2174,7 @@ static int rockchip_set_pull(struct rockchip_pin_bank *bank,
case RK3368:
case RK3399:
case RK3568:
+ case RK3588:
pull_type = bank->pull_type[pin_num / 8];
ret = -EINVAL;
for (i = 0; i < ARRAY_SIZE(rockchip_pull_list[pull_type]);
@@ -2104,25 +2422,27 @@ static bool rockchip_pinconf_pull_valid(struct rockchip_pin_ctrl *ctrl,
case RK3368:
case RK3399:
case RK3568:
+ case RK3588:
return (pull != PIN_CONFIG_BIAS_PULL_PIN_DEFAULT);
}
return false;
}
-static int rockchip_pinconf_defer_output(struct rockchip_pin_bank *bank,
- unsigned int pin, u32 arg)
+static int rockchip_pinconf_defer_pin(struct rockchip_pin_bank *bank,
+ unsigned int pin, u32 param, u32 arg)
{
- struct rockchip_pin_output_deferred *cfg;
+ struct rockchip_pin_deferred *cfg;
cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
if (!cfg)
return -ENOMEM;
cfg->pin = pin;
+ cfg->param = param;
cfg->arg = arg;
- list_add_tail(&cfg->head, &bank->deferred_output);
+ list_add_tail(&cfg->head, &bank->deferred_pins);
return 0;
}
@@ -2143,6 +2463,25 @@ static int rockchip_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
param = pinconf_to_config_param(configs[i]);
arg = pinconf_to_config_argument(configs[i]);
+ if (param == PIN_CONFIG_OUTPUT || param == PIN_CONFIG_INPUT_ENABLE) {
+ /*
+ * Check for gpio driver not being probed yet.
+ * The lock makes sure that either gpio-probe has completed
+ * or the gpio driver hasn't probed yet.
+ */
+ mutex_lock(&bank->deferred_lock);
+ if (!gpio || !gpio->direction_output) {
+ rc = rockchip_pinconf_defer_pin(bank, pin - bank->pin_base, param,
+ arg);
+ mutex_unlock(&bank->deferred_lock);
+ if (rc)
+ return rc;
+
+ break;
+ }
+ mutex_unlock(&bank->deferred_lock);
+ }
+
switch (param) {
case PIN_CONFIG_BIAS_DISABLE:
rc = rockchip_set_pull(bank, pin - bank->pin_base,
@@ -2171,27 +2510,21 @@ static int rockchip_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
if (rc != RK_FUNC_GPIO)
return -EINVAL;
- /*
- * Check for gpio driver not being probed yet.
- * The lock makes sure that either gpio-probe has completed
- * or the gpio driver hasn't probed yet.
- */
- mutex_lock(&bank->deferred_lock);
- if (!gpio || !gpio->direction_output) {
- rc = rockchip_pinconf_defer_output(bank, pin - bank->pin_base, arg);
- mutex_unlock(&bank->deferred_lock);
- if (rc)
- return rc;
-
- break;
- }
- mutex_unlock(&bank->deferred_lock);
-
rc = gpio->direction_output(gpio, pin - bank->pin_base,
arg);
if (rc)
return rc;
break;
+ case PIN_CONFIG_INPUT_ENABLE:
+ rc = rockchip_set_mux(bank, pin - bank->pin_base,
+ RK_FUNC_GPIO);
+ if (rc != RK_FUNC_GPIO)
+ return -EINVAL;
+
+ rc = gpio->direction_input(gpio, pin - bank->pin_base);
+ if (rc)
+ return rc;
+ break;
case PIN_CONFIG_DRIVE_STRENGTH:
/* rk3288 is the first with per-pin drive-strength */
if (!info->ctrl->drv_calc_reg)
@@ -2500,7 +2833,7 @@ static int rockchip_pinctrl_register(struct platform_device *pdev,
pdesc++;
}
- INIT_LIST_HEAD(&pin_bank->deferred_output);
+ INIT_LIST_HEAD(&pin_bank->deferred_pins);
mutex_init(&pin_bank->deferred_lock);
}
@@ -2763,7 +3096,7 @@ static int rockchip_pinctrl_remove(struct platform_device *pdev)
{
struct rockchip_pinctrl *info = platform_get_drvdata(pdev);
struct rockchip_pin_bank *bank;
- struct rockchip_pin_output_deferred *cfg;
+ struct rockchip_pin_deferred *cfg;
int i;
of_platform_depopulate(&pdev->dev);
@@ -2772,9 +3105,9 @@ static int rockchip_pinctrl_remove(struct platform_device *pdev)
bank = &info->ctrl->pin_banks[i];
mutex_lock(&bank->deferred_lock);
- while (!list_empty(&bank->deferred_output)) {
- cfg = list_first_entry(&bank->deferred_output,
- struct rockchip_pin_output_deferred, head);
+ while (!list_empty(&bank->deferred_pins)) {
+ cfg = list_first_entry(&bank->deferred_pins,
+ struct rockchip_pin_deferred, head);
list_del(&cfg->head);
kfree(cfg);
}
@@ -3207,6 +3540,29 @@ static struct rockchip_pin_ctrl rk3568_pin_ctrl = {
.schmitt_calc_reg = rk3568_calc_schmitt_reg_and_bit,
};
+static struct rockchip_pin_bank rk3588_pin_banks[] = {
+ RK3588_PIN_BANK_FLAGS(0, 32, "gpio0",
+ IOMUX_WIDTH_4BIT, PULL_TYPE_IO_1V8_ONLY),
+ RK3588_PIN_BANK_FLAGS(1, 32, "gpio1",
+ IOMUX_WIDTH_4BIT, PULL_TYPE_IO_1V8_ONLY),
+ RK3588_PIN_BANK_FLAGS(2, 32, "gpio2",
+ IOMUX_WIDTH_4BIT, PULL_TYPE_IO_1V8_ONLY),
+ RK3588_PIN_BANK_FLAGS(3, 32, "gpio3",
+ IOMUX_WIDTH_4BIT, PULL_TYPE_IO_1V8_ONLY),
+ RK3588_PIN_BANK_FLAGS(4, 32, "gpio4",
+ IOMUX_WIDTH_4BIT, PULL_TYPE_IO_1V8_ONLY),
+};
+
+static struct rockchip_pin_ctrl rk3588_pin_ctrl = {
+ .pin_banks = rk3588_pin_banks,
+ .nr_banks = ARRAY_SIZE(rk3588_pin_banks),
+ .label = "RK3588-GPIO",
+ .type = RK3588,
+ .pull_calc_reg = rk3588_calc_pull_reg_and_bit,
+ .drv_calc_reg = rk3588_calc_drv_reg_and_bit,
+ .schmitt_calc_reg = rk3588_calc_schmitt_reg_and_bit,
+};
+
static const struct of_device_id rockchip_pinctrl_dt_match[] = {
{ .compatible = "rockchip,px30-pinctrl",
.data = &px30_pin_ctrl },
@@ -3238,6 +3594,8 @@ static const struct of_device_id rockchip_pinctrl_dt_match[] = {
.data = &rk3399_pin_ctrl },
{ .compatible = "rockchip,rk3568-pinctrl",
.data = &rk3568_pin_ctrl },
+ { .compatible = "rockchip,rk3588-pinctrl",
+ .data = &rk3588_pin_ctrl },
{},
};
diff --git a/drivers/pinctrl/pinctrl-rockchip.h b/drivers/pinctrl/pinctrl-rockchip.h
index 91f10279d084..ec46f8815ac9 100644
--- a/drivers/pinctrl/pinctrl-rockchip.h
+++ b/drivers/pinctrl/pinctrl-rockchip.h
@@ -18,6 +18,171 @@
#ifndef _PINCTRL_ROCKCHIP_H
#define _PINCTRL_ROCKCHIP_H
+#define RK_GPIO0_A0 0
+#define RK_GPIO0_A1 1
+#define RK_GPIO0_A2 2
+#define RK_GPIO0_A3 3
+#define RK_GPIO0_A4 4
+#define RK_GPIO0_A5 5
+#define RK_GPIO0_A6 6
+#define RK_GPIO0_A7 7
+#define RK_GPIO0_B0 8
+#define RK_GPIO0_B1 9
+#define RK_GPIO0_B2 10
+#define RK_GPIO0_B3 11
+#define RK_GPIO0_B4 12
+#define RK_GPIO0_B5 13
+#define RK_GPIO0_B6 14
+#define RK_GPIO0_B7 15
+#define RK_GPIO0_C0 16
+#define RK_GPIO0_C1 17
+#define RK_GPIO0_C2 18
+#define RK_GPIO0_C3 19
+#define RK_GPIO0_C4 20
+#define RK_GPIO0_C5 21
+#define RK_GPIO0_C6 22
+#define RK_GPIO0_C7 23
+#define RK_GPIO0_D0 24
+#define RK_GPIO0_D1 25
+#define RK_GPIO0_D2 26
+#define RK_GPIO0_D3 27
+#define RK_GPIO0_D4 28
+#define RK_GPIO0_D5 29
+#define RK_GPIO0_D6 30
+#define RK_GPIO0_D7 31
+
+#define RK_GPIO1_A0 32
+#define RK_GPIO1_A1 33
+#define RK_GPIO1_A2 34
+#define RK_GPIO1_A3 35
+#define RK_GPIO1_A4 36
+#define RK_GPIO1_A5 37
+#define RK_GPIO1_A6 38
+#define RK_GPIO1_A7 39
+#define RK_GPIO1_B0 40
+#define RK_GPIO1_B1 41
+#define RK_GPIO1_B2 42
+#define RK_GPIO1_B3 43
+#define RK_GPIO1_B4 44
+#define RK_GPIO1_B5 45
+#define RK_GPIO1_B6 46
+#define RK_GPIO1_B7 47
+#define RK_GPIO1_C0 48
+#define RK_GPIO1_C1 49
+#define RK_GPIO1_C2 50
+#define RK_GPIO1_C3 51
+#define RK_GPIO1_C4 52
+#define RK_GPIO1_C5 53
+#define RK_GPIO1_C6 54
+#define RK_GPIO1_C7 55
+#define RK_GPIO1_D0 56
+#define RK_GPIO1_D1 57
+#define RK_GPIO1_D2 58
+#define RK_GPIO1_D3 59
+#define RK_GPIO1_D4 60
+#define RK_GPIO1_D5 61
+#define RK_GPIO1_D6 62
+#define RK_GPIO1_D7 63
+
+#define RK_GPIO2_A0 64
+#define RK_GPIO2_A1 65
+#define RK_GPIO2_A2 66
+#define RK_GPIO2_A3 67
+#define RK_GPIO2_A4 68
+#define RK_GPIO2_A5 69
+#define RK_GPIO2_A6 70
+#define RK_GPIO2_A7 71
+#define RK_GPIO2_B0 72
+#define RK_GPIO2_B1 73
+#define RK_GPIO2_B2 74
+#define RK_GPIO2_B3 75
+#define RK_GPIO2_B4 76
+#define RK_GPIO2_B5 77
+#define RK_GPIO2_B6 78
+#define RK_GPIO2_B7 79
+#define RK_GPIO2_C0 80
+#define RK_GPIO2_C1 81
+#define RK_GPIO2_C2 82
+#define RK_GPIO2_C3 83
+#define RK_GPIO2_C4 84
+#define RK_GPIO2_C5 85
+#define RK_GPIO2_C6 86
+#define RK_GPIO2_C7 87
+#define RK_GPIO2_D0 88
+#define RK_GPIO2_D1 89
+#define RK_GPIO2_D2 90
+#define RK_GPIO2_D3 91
+#define RK_GPIO2_D4 92
+#define RK_GPIO2_D5 93
+#define RK_GPIO2_D6 94
+#define RK_GPIO2_D7 95
+
+#define RK_GPIO3_A0 96
+#define RK_GPIO3_A1 97
+#define RK_GPIO3_A2 98
+#define RK_GPIO3_A3 99
+#define RK_GPIO3_A4 100
+#define RK_GPIO3_A5 101
+#define RK_GPIO3_A6 102
+#define RK_GPIO3_A7 103
+#define RK_GPIO3_B0 104
+#define RK_GPIO3_B1 105
+#define RK_GPIO3_B2 106
+#define RK_GPIO3_B3 107
+#define RK_GPIO3_B4 108
+#define RK_GPIO3_B5 109
+#define RK_GPIO3_B6 110
+#define RK_GPIO3_B7 111
+#define RK_GPIO3_C0 112
+#define RK_GPIO3_C1 113
+#define RK_GPIO3_C2 114
+#define RK_GPIO3_C3 115
+#define RK_GPIO3_C4 116
+#define RK_GPIO3_C5 117
+#define RK_GPIO3_C6 118
+#define RK_GPIO3_C7 119
+#define RK_GPIO3_D0 120
+#define RK_GPIO3_D1 121
+#define RK_GPIO3_D2 122
+#define RK_GPIO3_D3 123
+#define RK_GPIO3_D4 124
+#define RK_GPIO3_D5 125
+#define RK_GPIO3_D6 126
+#define RK_GPIO3_D7 127
+
+#define RK_GPIO4_A0 128
+#define RK_GPIO4_A1 129
+#define RK_GPIO4_A2 130
+#define RK_GPIO4_A3 131
+#define RK_GPIO4_A4 132
+#define RK_GPIO4_A5 133
+#define RK_GPIO4_A6 134
+#define RK_GPIO4_A7 135
+#define RK_GPIO4_B0 136
+#define RK_GPIO4_B1 137
+#define RK_GPIO4_B2 138
+#define RK_GPIO4_B3 139
+#define RK_GPIO4_B4 140
+#define RK_GPIO4_B5 141
+#define RK_GPIO4_B6 142
+#define RK_GPIO4_B7 143
+#define RK_GPIO4_C0 144
+#define RK_GPIO4_C1 145
+#define RK_GPIO4_C2 146
+#define RK_GPIO4_C3 147
+#define RK_GPIO4_C4 148
+#define RK_GPIO4_C5 149
+#define RK_GPIO4_C6 150
+#define RK_GPIO4_C7 151
+#define RK_GPIO4_D0 152
+#define RK_GPIO4_D1 153
+#define RK_GPIO4_D2 154
+#define RK_GPIO4_D3 155
+#define RK_GPIO4_D4 156
+#define RK_GPIO4_D5 157
+#define RK_GPIO4_D6 158
+#define RK_GPIO4_D7 159
+
enum rockchip_pinctrl_type {
PX30,
RV1108,
@@ -30,6 +195,7 @@ enum rockchip_pinctrl_type {
RK3368,
RK3399,
RK3568,
+ RK3588,
};
/**
@@ -171,7 +337,7 @@ struct rockchip_pin_bank {
u32 toggle_edge_mode;
u32 recalced_mask;
u32 route_mask;
- struct list_head deferred_output;
+ struct list_head deferred_pins;
struct mutex deferred_lock;
};
@@ -230,10 +396,10 @@ struct rockchip_pin_ctrl {
struct rockchip_mux_route_data *iomux_routes;
u32 niomux_routes;
- void (*pull_calc_reg)(struct rockchip_pin_bank *bank,
+ int (*pull_calc_reg)(struct rockchip_pin_bank *bank,
int pin_num, struct regmap **regmap,
int *reg, u8 *bit);
- void (*drv_calc_reg)(struct rockchip_pin_bank *bank,
+ int (*drv_calc_reg)(struct rockchip_pin_bank *bank,
int pin_num, struct regmap **regmap,
int *reg, u8 *bit);
int (*schmitt_calc_reg)(struct rockchip_pin_bank *bank,
@@ -247,9 +413,12 @@ struct rockchip_pin_config {
unsigned int nconfigs;
};
-struct rockchip_pin_output_deferred {
+enum pin_config_param;
+
+struct rockchip_pin_deferred {
struct list_head head;
unsigned int pin;
+ enum pin_config_param param;
u32 arg;
};
diff --git a/drivers/pinctrl/pinctrl-starfive.c b/drivers/pinctrl/pinctrl-starfive.c
index c586cfd09fa8..2a86c1035cc8 100644
--- a/drivers/pinctrl/pinctrl-starfive.c
+++ b/drivers/pinctrl/pinctrl-starfive.c
@@ -1074,6 +1074,8 @@ static void starfive_irq_mask(struct irq_data *d)
value = readl_relaxed(ie) & ~mask;
writel_relaxed(value, ie);
raw_spin_unlock_irqrestore(&sfp->lock, flags);
+
+ gpiochip_disable_irq(&sfp->gc, d->hwirq);
}
static void starfive_irq_mask_ack(struct irq_data *d)
@@ -1102,6 +1104,8 @@ static void starfive_irq_unmask(struct irq_data *d)
unsigned long flags;
u32 value;
+ gpiochip_enable_irq(&sfp->gc, d->hwirq);
+
raw_spin_lock_irqsave(&sfp->lock, flags);
value = readl_relaxed(ie) | mask;
writel_relaxed(value, ie);
@@ -1163,14 +1167,15 @@ static int starfive_irq_set_type(struct irq_data *d, unsigned int trigger)
return 0;
}
-static struct irq_chip starfive_irq_chip = {
+static const struct irq_chip starfive_irq_chip = {
.name = "StarFive GPIO",
.irq_ack = starfive_irq_ack,
.irq_mask = starfive_irq_mask,
.irq_mask_ack = starfive_irq_mask_ack,
.irq_unmask = starfive_irq_unmask,
.irq_set_type = starfive_irq_set_type,
- .flags = IRQCHIP_SET_TYPE_MASKED,
+ .flags = IRQCHIP_IMMUTABLE | IRQCHIP_SET_TYPE_MASKED,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static void starfive_gpio_irq_handler(struct irq_desc *desc)
@@ -1308,7 +1313,7 @@ static int starfive_probe(struct platform_device *pdev)
sfp->gc.base = -1;
sfp->gc.ngpio = NR_GPIOS;
- sfp->gc.irq.chip = &starfive_irq_chip;
+ gpio_irq_chip_set_chip(&sfp->gc.irq, &starfive_irq_chip);
sfp->gc.irq.parent_handler = starfive_gpio_irq_handler;
sfp->gc.irq.num_parents = 1;
sfp->gc.irq.parents = devm_kcalloc(dev, sfp->gc.irq.num_parents,
diff --git a/drivers/pinctrl/pinctrl-thunderbay.c b/drivers/pinctrl/pinctrl-thunderbay.c
index 79d44bca039e..9328b17485cf 100644
--- a/drivers/pinctrl/pinctrl-thunderbay.c
+++ b/drivers/pinctrl/pinctrl-thunderbay.c
@@ -1229,7 +1229,6 @@ static int thunderbay_pinctrl_probe(struct platform_device *pdev)
const struct of_device_id *of_id;
struct device *dev = &pdev->dev;
struct thunderbay_pinctrl *tpc;
- struct resource *iomem;
int ret;
of_id = of_match_node(thunderbay_pinctrl_match, pdev->dev.of_node);
@@ -1243,11 +1242,7 @@ static int thunderbay_pinctrl_probe(struct platform_device *pdev)
tpc->dev = dev;
tpc->soc = of_id->data;
- iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!iomem)
- return -ENXIO;
-
- tpc->base0 = devm_ioremap_resource(dev, iomem);
+ tpc->base0 = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(tpc->base0))
return PTR_ERR(tpc->base0);
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
index c51ef54a9f61..3daeb9772391 100644
--- a/drivers/pinctrl/qcom/Kconfig
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -239,6 +239,15 @@ config PINCTRL_SC7280
Qualcomm Technologies Inc TLMM block found on the Qualcomm
Technologies Inc SC7280 platform.
+config PINCTRL_SC7280_LPASS_LPI
+ tristate "Qualcomm Technologies Inc SC7280 LPASS LPI pin controller driver"
+ depends on GPIOLIB
+ depends on PINCTRL_LPASS_LPI
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm Technologies Inc LPASS (Low Power Audio SubSystem) LPI
+ (Low Power Island) found on the Qualcomm Technologies Inc SC7280 platform.
+
config PINCTRL_SC8180X
tristate "Qualcomm Technologies Inc SC8180x pin controller driver"
depends on (OF || ACPI)
@@ -338,6 +347,15 @@ config PINCTRL_SM8250
Qualcomm Technologies Inc TLMM block found on the Qualcomm
Technologies Inc SM8250 platform.
+config PINCTRL_SM8250_LPASS_LPI
+ tristate "Qualcomm Technologies Inc SM8250 LPASS LPI pin controller driver"
+ depends on GPIOLIB
+ depends on PINCTRL_LPASS_LPI
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm Technologies Inc LPASS (Low Power Audio SubSystem) LPI
+ (Low Power Island) found on the Qualcomm Technologies Inc SM8250 platform.
+
config PINCTRL_SM8350
tristate "Qualcomm Technologies Inc SM8350 pin controller driver"
depends on PINCTRL_MSM
@@ -360,6 +378,7 @@ config PINCTRL_LPASS_LPI
select PINMUX
select PINCONF
select GENERIC_PINCONF
+ select GENERIC_PINCTRL_GROUPS
depends on GPIOLIB
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile
index 5efbfd9f6248..4f0ee7597f81 100644
--- a/drivers/pinctrl/qcom/Makefile
+++ b/drivers/pinctrl/qcom/Makefile
@@ -28,6 +28,7 @@ obj-$(CONFIG_PINCTRL_QCOM_SSBI_PMIC) += pinctrl-ssbi-gpio.o
obj-$(CONFIG_PINCTRL_QCOM_SSBI_PMIC) += pinctrl-ssbi-mpp.o
obj-$(CONFIG_PINCTRL_SC7180) += pinctrl-sc7180.o
obj-$(CONFIG_PINCTRL_SC7280) += pinctrl-sc7280.o
+obj-$(CONFIG_PINCTRL_SC7280_LPASS_LPI) += pinctrl-sc7280-lpass-lpi.o
obj-$(CONFIG_PINCTRL_SC8180X) += pinctrl-sc8180x.o
obj-$(CONFIG_PINCTRL_SC8280XP) += pinctrl-sc8280xp.o
obj-$(CONFIG_PINCTRL_SDM660) += pinctrl-sdm660.o
@@ -39,6 +40,7 @@ obj-$(CONFIG_PINCTRL_SM6350) += pinctrl-sm6350.o
obj-$(CONFIG_PINCTRL_SDX65) += pinctrl-sdx65.o
obj-$(CONFIG_PINCTRL_SM8150) += pinctrl-sm8150.o
obj-$(CONFIG_PINCTRL_SM8250) += pinctrl-sm8250.o
+obj-$(CONFIG_PINCTRL_SM8250_LPASS_LPI) += pinctrl-sm8250-lpass-lpi.o
obj-$(CONFIG_PINCTRL_SM8350) += pinctrl-sm8350.o
obj-$(CONFIG_PINCTRL_SM8450) += pinctrl-sm8450.o
obj-$(CONFIG_PINCTRL_LPASS_LPI) += pinctrl-lpass-lpi.o
diff --git a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
index 2f19ab4db720..74810ec4df44 100644
--- a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
+++ b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
@@ -4,93 +4,15 @@
* Copyright (c) 2020 Linaro Ltd.
*/
-#include <linux/bitops.h>
-#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/gpio/driver.h>
-#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_device.h>
-#include <linux/of.h>
#include <linux/pinctrl/pinconf-generic.h>
#include <linux/pinctrl/pinconf.h>
#include <linux/pinctrl/pinmux.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/types.h>
-#include "../core.h"
#include "../pinctrl-utils.h"
-
-#define LPI_SLEW_RATE_CTL_REG 0xa000
-#define LPI_TLMM_REG_OFFSET 0x1000
-#define LPI_SLEW_RATE_MAX 0x03
-#define LPI_SLEW_BITS_SIZE 0x02
-#define LPI_SLEW_RATE_MASK GENMASK(1, 0)
-#define LPI_GPIO_CFG_REG 0x00
-#define LPI_GPIO_PULL_MASK GENMASK(1, 0)
-#define LPI_GPIO_FUNCTION_MASK GENMASK(5, 2)
-#define LPI_GPIO_OUT_STRENGTH_MASK GENMASK(8, 6)
-#define LPI_GPIO_OE_MASK BIT(9)
-#define LPI_GPIO_VALUE_REG 0x04
-#define LPI_GPIO_VALUE_IN_MASK BIT(0)
-#define LPI_GPIO_VALUE_OUT_MASK BIT(1)
-
-#define LPI_GPIO_BIAS_DISABLE 0x0
-#define LPI_GPIO_PULL_DOWN 0x1
-#define LPI_GPIO_KEEPER 0x2
-#define LPI_GPIO_PULL_UP 0x3
-#define LPI_GPIO_DS_TO_VAL(v) (v / 2 - 1)
-#define NO_SLEW -1
-
-#define LPI_FUNCTION(fname) \
- [LPI_MUX_##fname] = { \
- .name = #fname, \
- .groups = fname##_groups, \
- .ngroups = ARRAY_SIZE(fname##_groups), \
- }
-
-#define LPI_PINGROUP(id, soff, f1, f2, f3, f4) \
- { \
- .name = "gpio" #id, \
- .pins = gpio##id##_pins, \
- .pin = id, \
- .slew_offset = soff, \
- .npins = ARRAY_SIZE(gpio##id##_pins), \
- .funcs = (int[]){ \
- LPI_MUX_gpio, \
- LPI_MUX_##f1, \
- LPI_MUX_##f2, \
- LPI_MUX_##f3, \
- LPI_MUX_##f4, \
- }, \
- .nfuncs = 5, \
- }
-
-struct lpi_pingroup {
- const char *name;
- const unsigned int *pins;
- unsigned int npins;
- unsigned int pin;
- /* Bit offset in slew register for SoundWire pins only */
- int slew_offset;
- unsigned int *funcs;
- unsigned int nfuncs;
-};
-
-struct lpi_function {
- const char *name;
- const char * const *groups;
- unsigned int ngroups;
-};
-
-struct lpi_pinctrl_variant_data {
- const struct pinctrl_pin_desc *pins;
- int npins;
- const struct lpi_pingroup *groups;
- int ngroups;
- const struct lpi_function *functions;
- int nfunctions;
-};
+#include "pinctrl-lpass-lpi.h"
#define MAX_LPI_NUM_CLKS 2
@@ -106,136 +28,6 @@ struct lpi_pinctrl {
const struct lpi_pinctrl_variant_data *data;
};
-/* sm8250 variant specific data */
-static const struct pinctrl_pin_desc sm8250_lpi_pins[] = {
- PINCTRL_PIN(0, "gpio0"),
- PINCTRL_PIN(1, "gpio1"),
- PINCTRL_PIN(2, "gpio2"),
- PINCTRL_PIN(3, "gpio3"),
- PINCTRL_PIN(4, "gpio4"),
- PINCTRL_PIN(5, "gpio5"),
- PINCTRL_PIN(6, "gpio6"),
- PINCTRL_PIN(7, "gpio7"),
- PINCTRL_PIN(8, "gpio8"),
- PINCTRL_PIN(9, "gpio9"),
- PINCTRL_PIN(10, "gpio10"),
- PINCTRL_PIN(11, "gpio11"),
- PINCTRL_PIN(12, "gpio12"),
- PINCTRL_PIN(13, "gpio13"),
-};
-
-enum sm8250_lpi_functions {
- LPI_MUX_dmic1_clk,
- LPI_MUX_dmic1_data,
- LPI_MUX_dmic2_clk,
- LPI_MUX_dmic2_data,
- LPI_MUX_dmic3_clk,
- LPI_MUX_dmic3_data,
- LPI_MUX_i2s1_clk,
- LPI_MUX_i2s1_data,
- LPI_MUX_i2s1_ws,
- LPI_MUX_i2s2_clk,
- LPI_MUX_i2s2_data,
- LPI_MUX_i2s2_ws,
- LPI_MUX_qua_mi2s_data,
- LPI_MUX_qua_mi2s_sclk,
- LPI_MUX_qua_mi2s_ws,
- LPI_MUX_swr_rx_clk,
- LPI_MUX_swr_rx_data,
- LPI_MUX_swr_tx_clk,
- LPI_MUX_swr_tx_data,
- LPI_MUX_wsa_swr_clk,
- LPI_MUX_wsa_swr_data,
- LPI_MUX_gpio,
- LPI_MUX__,
-};
-
-static const unsigned int gpio0_pins[] = { 0 };
-static const unsigned int gpio1_pins[] = { 1 };
-static const unsigned int gpio2_pins[] = { 2 };
-static const unsigned int gpio3_pins[] = { 3 };
-static const unsigned int gpio4_pins[] = { 4 };
-static const unsigned int gpio5_pins[] = { 5 };
-static const unsigned int gpio6_pins[] = { 6 };
-static const unsigned int gpio7_pins[] = { 7 };
-static const unsigned int gpio8_pins[] = { 8 };
-static const unsigned int gpio9_pins[] = { 9 };
-static const unsigned int gpio10_pins[] = { 10 };
-static const unsigned int gpio11_pins[] = { 11 };
-static const unsigned int gpio12_pins[] = { 12 };
-static const unsigned int gpio13_pins[] = { 13 };
-static const char * const swr_tx_clk_groups[] = { "gpio0" };
-static const char * const swr_tx_data_groups[] = { "gpio1", "gpio2", "gpio5" };
-static const char * const swr_rx_clk_groups[] = { "gpio3" };
-static const char * const swr_rx_data_groups[] = { "gpio4", "gpio5" };
-static const char * const dmic1_clk_groups[] = { "gpio6" };
-static const char * const dmic1_data_groups[] = { "gpio7" };
-static const char * const dmic2_clk_groups[] = { "gpio8" };
-static const char * const dmic2_data_groups[] = { "gpio9" };
-static const char * const i2s2_clk_groups[] = { "gpio10" };
-static const char * const i2s2_ws_groups[] = { "gpio11" };
-static const char * const dmic3_clk_groups[] = { "gpio12" };
-static const char * const dmic3_data_groups[] = { "gpio13" };
-static const char * const qua_mi2s_sclk_groups[] = { "gpio0" };
-static const char * const qua_mi2s_ws_groups[] = { "gpio1" };
-static const char * const qua_mi2s_data_groups[] = { "gpio2", "gpio3", "gpio4" };
-static const char * const i2s1_clk_groups[] = { "gpio6" };
-static const char * const i2s1_ws_groups[] = { "gpio7" };
-static const char * const i2s1_data_groups[] = { "gpio8", "gpio9" };
-static const char * const wsa_swr_clk_groups[] = { "gpio10" };
-static const char * const wsa_swr_data_groups[] = { "gpio11" };
-static const char * const i2s2_data_groups[] = { "gpio12", "gpio12" };
-
-static const struct lpi_pingroup sm8250_groups[] = {
- LPI_PINGROUP(0, 0, swr_tx_clk, qua_mi2s_sclk, _, _),
- LPI_PINGROUP(1, 2, swr_tx_data, qua_mi2s_ws, _, _),
- LPI_PINGROUP(2, 4, swr_tx_data, qua_mi2s_data, _, _),
- LPI_PINGROUP(3, 8, swr_rx_clk, qua_mi2s_data, _, _),
- LPI_PINGROUP(4, 10, swr_rx_data, qua_mi2s_data, _, _),
- LPI_PINGROUP(5, 12, swr_tx_data, swr_rx_data, _, _),
- LPI_PINGROUP(6, NO_SLEW, dmic1_clk, i2s1_clk, _, _),
- LPI_PINGROUP(7, NO_SLEW, dmic1_data, i2s1_ws, _, _),
- LPI_PINGROUP(8, NO_SLEW, dmic2_clk, i2s1_data, _, _),
- LPI_PINGROUP(9, NO_SLEW, dmic2_data, i2s1_data, _, _),
- LPI_PINGROUP(10, 16, i2s2_clk, wsa_swr_clk, _, _),
- LPI_PINGROUP(11, 18, i2s2_ws, wsa_swr_data, _, _),
- LPI_PINGROUP(12, NO_SLEW, dmic3_clk, i2s2_data, _, _),
- LPI_PINGROUP(13, NO_SLEW, dmic3_data, i2s2_data, _, _),
-};
-
-static const struct lpi_function sm8250_functions[] = {
- LPI_FUNCTION(dmic1_clk),
- LPI_FUNCTION(dmic1_data),
- LPI_FUNCTION(dmic2_clk),
- LPI_FUNCTION(dmic2_data),
- LPI_FUNCTION(dmic3_clk),
- LPI_FUNCTION(dmic3_data),
- LPI_FUNCTION(i2s1_clk),
- LPI_FUNCTION(i2s1_data),
- LPI_FUNCTION(i2s1_ws),
- LPI_FUNCTION(i2s2_clk),
- LPI_FUNCTION(i2s2_data),
- LPI_FUNCTION(i2s2_ws),
- LPI_FUNCTION(qua_mi2s_data),
- LPI_FUNCTION(qua_mi2s_sclk),
- LPI_FUNCTION(qua_mi2s_ws),
- LPI_FUNCTION(swr_rx_clk),
- LPI_FUNCTION(swr_rx_data),
- LPI_FUNCTION(swr_tx_clk),
- LPI_FUNCTION(swr_tx_data),
- LPI_FUNCTION(wsa_swr_clk),
- LPI_FUNCTION(wsa_swr_data),
-};
-
-static struct lpi_pinctrl_variant_data sm8250_lpi_data = {
- .pins = sm8250_lpi_pins,
- .npins = ARRAY_SIZE(sm8250_lpi_pins),
- .groups = sm8250_groups,
- .ngroups = ARRAY_SIZE(sm8250_groups),
- .functions = sm8250_functions,
- .nfunctions = ARRAY_SIZE(sm8250_functions),
-};
-
static int lpi_gpio_read(struct lpi_pinctrl *state, unsigned int pin,
unsigned int addr)
{
@@ -250,38 +42,10 @@ static int lpi_gpio_write(struct lpi_pinctrl *state, unsigned int pin,
return 0;
}
-static int lpi_gpio_get_groups_count(struct pinctrl_dev *pctldev)
-{
- struct lpi_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
-
- return pctrl->data->ngroups;
-}
-
-static const char *lpi_gpio_get_group_name(struct pinctrl_dev *pctldev,
- unsigned int group)
-{
- struct lpi_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
-
- return pctrl->data->groups[group].name;
-}
-
-static int lpi_gpio_get_group_pins(struct pinctrl_dev *pctldev,
- unsigned int group,
- const unsigned int **pins,
- unsigned int *num_pins)
-{
- struct lpi_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
-
- *pins = pctrl->data->groups[group].pins;
- *num_pins = pctrl->data->groups[group].npins;
-
- return 0;
-}
-
static const struct pinctrl_ops lpi_gpio_pinctrl_ops = {
- .get_groups_count = lpi_gpio_get_groups_count,
- .get_group_name = lpi_gpio_get_group_name,
- .get_group_pins = lpi_gpio_get_group_pins,
+ .get_groups_count = pinctrl_generic_get_group_count,
+ .get_group_name = pinctrl_generic_get_group_name,
+ .get_group_pins = pinctrl_generic_get_group_pins,
.dt_node_to_map = pinconf_generic_dt_node_to_map_group,
.dt_free_map = pinctrl_utils_free_map,
};
@@ -435,7 +199,7 @@ static int lpi_config_set(struct pinctrl_dev *pctldev, unsigned int group,
}
slew_offset = g->slew_offset;
- if (slew_offset == NO_SLEW)
+ if (slew_offset == LPI_NO_SLEW)
break;
mutex_lock(&pctrl->slew_access_lock);
@@ -582,7 +346,29 @@ static const struct gpio_chip lpi_gpio_template = {
.dbg_show = lpi_gpio_dbg_show,
};
-static int lpi_pinctrl_probe(struct platform_device *pdev)
+static int lpi_build_pin_desc_groups(struct lpi_pinctrl *pctrl)
+{
+ int i, ret;
+
+ for (i = 0; i < pctrl->data->npins; i++) {
+ const struct pinctrl_pin_desc *pin_info = pctrl->desc.pins + i;
+
+ ret = pinctrl_generic_add_group(pctrl->ctrl, pin_info->name,
+ (int *)&pin_info->number, 1, NULL);
+ if (ret < 0)
+ goto err_pinctrl;
+ }
+
+ return 0;
+
+err_pinctrl:
+ for (; i > 0; i--)
+ pinctrl_generic_remove_group(pctrl->ctrl, i - 1);
+
+ return ret;
+}
+
+int lpi_pinctrl_probe(struct platform_device *pdev)
{
const struct lpi_pinctrl_variant_data *data;
struct device *dev = &pdev->dev;
@@ -615,9 +401,13 @@ static int lpi_pinctrl_probe(struct platform_device *pdev)
return dev_err_probe(dev, PTR_ERR(pctrl->slew_base),
"Slew resource not provided\n");
- ret = devm_clk_bulk_get(dev, MAX_LPI_NUM_CLKS, pctrl->clks);
+ if (data->is_clk_optional)
+ ret = devm_clk_bulk_get_optional(dev, MAX_LPI_NUM_CLKS, pctrl->clks);
+ else
+ ret = devm_clk_bulk_get(dev, MAX_LPI_NUM_CLKS, pctrl->clks);
+
if (ret)
- return dev_err_probe(dev, ret, "Can't get clocks\n");
+ return ret;
ret = clk_bulk_prepare_enable(MAX_LPI_NUM_CLKS, pctrl->clks);
if (ret)
@@ -647,6 +437,10 @@ static int lpi_pinctrl_probe(struct platform_device *pdev)
goto err_pinctrl;
}
+ ret = lpi_build_pin_desc_groups(pctrl);
+ if (ret)
+ goto err_pinctrl;
+
ret = devm_gpiochip_add_data(dev, &pctrl->chip, pctrl);
if (ret) {
dev_err(pctrl->dev, "can't add gpio chip\n");
@@ -661,35 +455,22 @@ err_pinctrl:
return ret;
}
+EXPORT_SYMBOL_GPL(lpi_pinctrl_probe);
-static int lpi_pinctrl_remove(struct platform_device *pdev)
+int lpi_pinctrl_remove(struct platform_device *pdev)
{
struct lpi_pinctrl *pctrl = platform_get_drvdata(pdev);
+ int i;
mutex_destroy(&pctrl->slew_access_lock);
clk_bulk_disable_unprepare(MAX_LPI_NUM_CLKS, pctrl->clks);
+ for (i = 0; i < pctrl->data->npins; i++)
+ pinctrl_generic_remove_group(pctrl->ctrl, i);
+
return 0;
}
+EXPORT_SYMBOL_GPL(lpi_pinctrl_remove);
-static const struct of_device_id lpi_pinctrl_of_match[] = {
- {
- .compatible = "qcom,sm8250-lpass-lpi-pinctrl",
- .data = &sm8250_lpi_data,
- },
- { }
-};
-MODULE_DEVICE_TABLE(of, lpi_pinctrl_of_match);
-
-static struct platform_driver lpi_pinctrl_driver = {
- .driver = {
- .name = "qcom-lpass-lpi-pinctrl",
- .of_match_table = lpi_pinctrl_of_match,
- },
- .probe = lpi_pinctrl_probe,
- .remove = lpi_pinctrl_remove,
-};
-
-module_platform_driver(lpi_pinctrl_driver);
MODULE_DESCRIPTION("QTI LPI GPIO pin control driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.h b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.h
new file mode 100644
index 000000000000..759d5d8da562
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2020 Linaro Ltd.
+ */
+#ifndef __PINCTRL_LPASS_LPI_H__
+#define __PINCTRL_LPASS_LPI_H__
+
+#include <linux/bitops.h>
+#include <linux/bitfield.h>
+#include "../core.h"
+
+#define LPI_SLEW_RATE_CTL_REG 0xa000
+#define LPI_TLMM_REG_OFFSET 0x1000
+#define LPI_SLEW_RATE_MAX 0x03
+#define LPI_SLEW_BITS_SIZE 0x02
+#define LPI_SLEW_RATE_MASK GENMASK(1, 0)
+#define LPI_GPIO_CFG_REG 0x00
+#define LPI_GPIO_PULL_MASK GENMASK(1, 0)
+#define LPI_GPIO_FUNCTION_MASK GENMASK(5, 2)
+#define LPI_GPIO_OUT_STRENGTH_MASK GENMASK(8, 6)
+#define LPI_GPIO_OE_MASK BIT(9)
+#define LPI_GPIO_VALUE_REG 0x04
+#define LPI_GPIO_VALUE_IN_MASK BIT(0)
+#define LPI_GPIO_VALUE_OUT_MASK BIT(1)
+
+#define LPI_GPIO_BIAS_DISABLE 0x0
+#define LPI_GPIO_PULL_DOWN 0x1
+#define LPI_GPIO_KEEPER 0x2
+#define LPI_GPIO_PULL_UP 0x3
+#define LPI_GPIO_DS_TO_VAL(v) (v / 2 - 1)
+#define LPI_NO_SLEW -1
+
+#define LPI_FUNCTION(fname) \
+ [LPI_MUX_##fname] = { \
+ .name = #fname, \
+ .groups = fname##_groups, \
+ .ngroups = ARRAY_SIZE(fname##_groups), \
+ }
+
+#define LPI_PINGROUP(id, soff, f1, f2, f3, f4) \
+ { \
+ .group.name = "gpio" #id, \
+ .group.pins = gpio##id##_pins, \
+ .pin = id, \
+ .slew_offset = soff, \
+ .group.num_pins = ARRAY_SIZE(gpio##id##_pins), \
+ .funcs = (int[]){ \
+ LPI_MUX_gpio, \
+ LPI_MUX_##f1, \
+ LPI_MUX_##f2, \
+ LPI_MUX_##f3, \
+ LPI_MUX_##f4, \
+ }, \
+ .nfuncs = 5, \
+ }
+
+struct lpi_pingroup {
+ struct group_desc group;
+ unsigned int pin;
+ /* Bit offset in slew register for SoundWire pins only */
+ int slew_offset;
+ unsigned int *funcs;
+ unsigned int nfuncs;
+};
+
+struct lpi_function {
+ const char *name;
+ const char * const *groups;
+ unsigned int ngroups;
+};
+
+struct lpi_pinctrl_variant_data {
+ const struct pinctrl_pin_desc *pins;
+ int npins;
+ const struct lpi_pingroup *groups;
+ int ngroups;
+ const struct lpi_function *functions;
+ int nfunctions;
+ bool is_clk_optional;
+};
+
+int lpi_pinctrl_probe(struct platform_device *pdev);
+int lpi_pinctrl_remove(struct platform_device *pdev);
+
+#endif /*__PINCTRL_LPASS_LPI_H__*/
diff --git a/drivers/pinctrl/qcom/pinctrl-sc7280-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-sc7280-lpass-lpi.c
new file mode 100644
index 000000000000..2add9a4520c2
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-sc7280-lpass-lpi.c
@@ -0,0 +1,167 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ * ALSA SoC platform-machine driver for QTi LPASS
+ */
+
+#include <linux/gpio/driver.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "pinctrl-lpass-lpi.h"
+
+enum lpass_lpi_functions {
+ LPI_MUX_dmic1_clk,
+ LPI_MUX_dmic1_data,
+ LPI_MUX_dmic2_clk,
+ LPI_MUX_dmic2_data,
+ LPI_MUX_dmic3_clk,
+ LPI_MUX_dmic3_data,
+ LPI_MUX_i2s1_clk,
+ LPI_MUX_i2s1_data,
+ LPI_MUX_i2s1_ws,
+ LPI_MUX_i2s2_clk,
+ LPI_MUX_i2s2_data,
+ LPI_MUX_i2s2_ws,
+ LPI_MUX_qua_mi2s_data,
+ LPI_MUX_qua_mi2s_sclk,
+ LPI_MUX_qua_mi2s_ws,
+ LPI_MUX_swr_rx_clk,
+ LPI_MUX_swr_rx_data,
+ LPI_MUX_swr_tx_clk,
+ LPI_MUX_swr_tx_data,
+ LPI_MUX_wsa_swr_clk,
+ LPI_MUX_wsa_swr_data,
+ LPI_MUX_gpio,
+ LPI_MUX__,
+};
+
+static int gpio0_pins[] = { 0 };
+static int gpio1_pins[] = { 1 };
+static int gpio2_pins[] = { 2 };
+static int gpio3_pins[] = { 3 };
+static int gpio4_pins[] = { 4 };
+static int gpio5_pins[] = { 5 };
+static int gpio6_pins[] = { 6 };
+static int gpio7_pins[] = { 7 };
+static int gpio8_pins[] = { 8 };
+static int gpio9_pins[] = { 9 };
+static int gpio10_pins[] = { 10 };
+static int gpio11_pins[] = { 11 };
+static int gpio12_pins[] = { 12 };
+static int gpio13_pins[] = { 13 };
+static int gpio14_pins[] = { 14 };
+
+static const struct pinctrl_pin_desc sc7280_lpi_pins[] = {
+ PINCTRL_PIN(0, "gpio0"),
+ PINCTRL_PIN(1, "gpio1"),
+ PINCTRL_PIN(2, "gpio2"),
+ PINCTRL_PIN(3, "gpio3"),
+ PINCTRL_PIN(4, "gpio4"),
+ PINCTRL_PIN(5, "gpio5"),
+ PINCTRL_PIN(6, "gpio6"),
+ PINCTRL_PIN(7, "gpio7"),
+ PINCTRL_PIN(8, "gpio8"),
+ PINCTRL_PIN(9, "gpio9"),
+ PINCTRL_PIN(10, "gpio10"),
+ PINCTRL_PIN(11, "gpio11"),
+ PINCTRL_PIN(12, "gpio12"),
+ PINCTRL_PIN(13, "gpio13"),
+ PINCTRL_PIN(14, "gpio14"),
+};
+
+static const char * const swr_tx_clk_groups[] = { "gpio0" };
+static const char * const swr_tx_data_groups[] = { "gpio1", "gpio2", "gpio14" };
+static const char * const swr_rx_clk_groups[] = { "gpio3" };
+static const char * const swr_rx_data_groups[] = { "gpio4", "gpio5" };
+static const char * const dmic1_clk_groups[] = { "gpio6" };
+static const char * const dmic1_data_groups[] = { "gpio7" };
+static const char * const dmic2_clk_groups[] = { "gpio8" };
+static const char * const dmic2_data_groups[] = { "gpio9" };
+static const char * const i2s2_clk_groups[] = { "gpio10" };
+static const char * const i2s2_ws_groups[] = { "gpio11" };
+static const char * const dmic3_clk_groups[] = { "gpio12" };
+static const char * const dmic3_data_groups[] = { "gpio13" };
+static const char * const qua_mi2s_sclk_groups[] = { "gpio0" };
+static const char * const qua_mi2s_ws_groups[] = { "gpio1" };
+static const char * const qua_mi2s_data_groups[] = { "gpio2", "gpio3", "gpio4" };
+static const char * const i2s1_clk_groups[] = { "gpio6" };
+static const char * const i2s1_ws_groups[] = { "gpio7" };
+static const char * const i2s1_data_groups[] = { "gpio8", "gpio9" };
+static const char * const wsa_swr_clk_groups[] = { "gpio10" };
+static const char * const wsa_swr_data_groups[] = { "gpio11" };
+static const char * const i2s2_data_groups[] = { "gpio12", "gpio13" };
+
+static const struct lpi_pingroup sc7280_groups[] = {
+ LPI_PINGROUP(0, 0, swr_tx_clk, qua_mi2s_sclk, _, _),
+ LPI_PINGROUP(1, 2, swr_tx_data, qua_mi2s_ws, _, _),
+ LPI_PINGROUP(2, 4, swr_tx_data, qua_mi2s_data, _, _),
+ LPI_PINGROUP(3, 8, swr_rx_clk, qua_mi2s_data, _, _),
+ LPI_PINGROUP(4, 10, swr_rx_data, qua_mi2s_data, _, _),
+ LPI_PINGROUP(5, 12, swr_rx_data, _, _, _),
+ LPI_PINGROUP(6, LPI_NO_SLEW, dmic1_clk, i2s1_clk, _, _),
+ LPI_PINGROUP(7, LPI_NO_SLEW, dmic1_data, i2s1_ws, _, _),
+ LPI_PINGROUP(8, LPI_NO_SLEW, dmic2_clk, i2s1_data, _, _),
+ LPI_PINGROUP(9, LPI_NO_SLEW, dmic2_data, i2s1_data, _, _),
+ LPI_PINGROUP(10, 16, i2s2_clk, wsa_swr_clk, _, _),
+ LPI_PINGROUP(11, 18, i2s2_ws, wsa_swr_data, _, _),
+ LPI_PINGROUP(12, LPI_NO_SLEW, dmic3_clk, i2s2_data, _, _),
+ LPI_PINGROUP(13, LPI_NO_SLEW, dmic3_data, i2s2_data, _, _),
+ LPI_PINGROUP(14, 6, swr_tx_data, _, _, _),
+};
+
+static const struct lpi_function sc7280_functions[] = {
+ LPI_FUNCTION(dmic1_clk),
+ LPI_FUNCTION(dmic1_data),
+ LPI_FUNCTION(dmic2_clk),
+ LPI_FUNCTION(dmic2_data),
+ LPI_FUNCTION(dmic3_clk),
+ LPI_FUNCTION(dmic3_data),
+ LPI_FUNCTION(i2s1_clk),
+ LPI_FUNCTION(i2s1_data),
+ LPI_FUNCTION(i2s1_ws),
+ LPI_FUNCTION(i2s2_clk),
+ LPI_FUNCTION(i2s2_data),
+ LPI_FUNCTION(i2s2_ws),
+ LPI_FUNCTION(qua_mi2s_data),
+ LPI_FUNCTION(qua_mi2s_sclk),
+ LPI_FUNCTION(qua_mi2s_ws),
+ LPI_FUNCTION(swr_rx_clk),
+ LPI_FUNCTION(swr_rx_data),
+ LPI_FUNCTION(swr_tx_clk),
+ LPI_FUNCTION(swr_tx_data),
+ LPI_FUNCTION(wsa_swr_clk),
+ LPI_FUNCTION(wsa_swr_data),
+};
+
+static const struct lpi_pinctrl_variant_data sc7280_lpi_data = {
+ .pins = sc7280_lpi_pins,
+ .npins = ARRAY_SIZE(sc7280_lpi_pins),
+ .groups = sc7280_groups,
+ .ngroups = ARRAY_SIZE(sc7280_groups),
+ .functions = sc7280_functions,
+ .nfunctions = ARRAY_SIZE(sc7280_functions),
+ .is_clk_optional = true,
+};
+
+static const struct of_device_id lpi_pinctrl_of_match[] = {
+ {
+ .compatible = "qcom,sc7280-lpass-lpi-pinctrl",
+ .data = &sc7280_lpi_data,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, lpi_pinctrl_of_match);
+
+static struct platform_driver lpi_pinctrl_driver = {
+ .driver = {
+ .name = "qcom-sc7280-lpass-lpi-pinctrl",
+ .of_match_table = lpi_pinctrl_of_match,
+ },
+ .probe = lpi_pinctrl_probe,
+ .remove = lpi_pinctrl_remove,
+};
+
+module_platform_driver(lpi_pinctrl_driver);
+MODULE_DESCRIPTION("QTI SC7280 LPI GPIO pin control driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8150.c b/drivers/pinctrl/qcom/pinctrl-sm8150.c
index 7359bae68c69..1cc622694553 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm8150.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm8150.c
@@ -1500,6 +1500,25 @@ static const struct msm_pingroup sm8150_groups[] = {
[178] = SDC_QDSD_PINGROUP(sdc2_data, 0xB2000, 9, 0),
};
+static const struct msm_gpio_wakeirq_map sm8150_pdc_map[] = {
+ { 3, 31 }, { 5, 32 }, { 8, 33 }, { 9, 34 }, { 10, 100 },
+ { 12, 104 }, { 24, 37 }, { 26, 38 }, { 27, 41 }, { 28, 42 },
+ { 30, 39 }, { 36, 43 }, { 37, 44 }, { 38, 30 }, { 39, 118 },
+ { 39, 125 }, { 41, 47 }, { 42, 48 }, { 46, 50 }, { 47, 49 },
+ { 48, 51 }, { 49, 53 }, { 50, 52 }, { 51, 116 }, { 51, 123 },
+ { 53, 54 }, { 54, 55 }, { 55, 56 }, { 56, 57 }, { 58, 58 },
+ { 60, 60 }, { 61, 61 }, { 68, 62 }, { 70, 63 }, { 76, 71 },
+ { 77, 66 }, { 81, 64 }, { 83, 65 }, { 86, 67 }, { 87, 84 },
+ { 88, 117 }, { 88, 124 }, { 90, 69 }, { 91, 70 }, { 93, 75 },
+ { 95, 72 }, { 96, 73 }, { 97, 74 }, { 101, 40 }, { 103, 77 },
+ { 104, 78 }, { 108, 79 }, { 112, 80 }, { 113, 81 }, { 114, 82 },
+ { 117, 85 }, { 118, 101 }, { 119, 87 }, { 120, 88 }, { 121, 89 },
+ { 122, 90 }, { 123, 91 }, { 124, 92 }, { 125, 93 }, { 129, 94 },
+ { 132, 105 }, { 133, 83 }, { 134, 36 }, { 136, 97 }, { 142, 103 },
+ { 144, 115 }, { 144, 122 }, { 147, 102 }, { 150, 107 },
+ { 152, 108 }, { 153, 109 }
+};
+
static const struct msm_pinctrl_soc_data sm8150_pinctrl = {
.pins = sm8150_pins,
.npins = ARRAY_SIZE(sm8150_pins),
@@ -1510,6 +1529,9 @@ static const struct msm_pinctrl_soc_data sm8150_pinctrl = {
.ngpios = 176,
.tiles = sm8150_tiles,
.ntiles = ARRAY_SIZE(sm8150_tiles),
+ .wakeirq_map = sm8150_pdc_map,
+ .nwakeirq_map = ARRAY_SIZE(sm8150_pdc_map),
+ .wakeirq_dual_edge_errata = true,
};
static int sm8150_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8250-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-sm8250-lpass-lpi.c
new file mode 100644
index 000000000000..ddbc6317f2a7
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-sm8250-lpass-lpi.c
@@ -0,0 +1,163 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2020 Linaro Ltd.
+ */
+
+#include <linux/gpio/driver.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "pinctrl-lpass-lpi.h"
+
+enum lpass_lpi_functions {
+ LPI_MUX_dmic1_clk,
+ LPI_MUX_dmic1_data,
+ LPI_MUX_dmic2_clk,
+ LPI_MUX_dmic2_data,
+ LPI_MUX_dmic3_clk,
+ LPI_MUX_dmic3_data,
+ LPI_MUX_i2s1_clk,
+ LPI_MUX_i2s1_data,
+ LPI_MUX_i2s1_ws,
+ LPI_MUX_i2s2_clk,
+ LPI_MUX_i2s2_data,
+ LPI_MUX_i2s2_ws,
+ LPI_MUX_qua_mi2s_data,
+ LPI_MUX_qua_mi2s_sclk,
+ LPI_MUX_qua_mi2s_ws,
+ LPI_MUX_swr_rx_clk,
+ LPI_MUX_swr_rx_data,
+ LPI_MUX_swr_tx_clk,
+ LPI_MUX_swr_tx_data,
+ LPI_MUX_wsa_swr_clk,
+ LPI_MUX_wsa_swr_data,
+ LPI_MUX_gpio,
+ LPI_MUX__,
+};
+
+static int gpio0_pins[] = { 0 };
+static int gpio1_pins[] = { 1 };
+static int gpio2_pins[] = { 2 };
+static int gpio3_pins[] = { 3 };
+static int gpio4_pins[] = { 4 };
+static int gpio5_pins[] = { 5 };
+static int gpio6_pins[] = { 6 };
+static int gpio7_pins[] = { 7 };
+static int gpio8_pins[] = { 8 };
+static int gpio9_pins[] = { 9 };
+static int gpio10_pins[] = { 10 };
+static int gpio11_pins[] = { 11 };
+static int gpio12_pins[] = { 12 };
+static int gpio13_pins[] = { 13 };
+
+static const struct pinctrl_pin_desc sm8250_lpi_pins[] = {
+ PINCTRL_PIN(0, "gpio0"),
+ PINCTRL_PIN(1, "gpio1"),
+ PINCTRL_PIN(2, "gpio2"),
+ PINCTRL_PIN(3, "gpio3"),
+ PINCTRL_PIN(4, "gpio4"),
+ PINCTRL_PIN(5, "gpio5"),
+ PINCTRL_PIN(6, "gpio6"),
+ PINCTRL_PIN(7, "gpio7"),
+ PINCTRL_PIN(8, "gpio8"),
+ PINCTRL_PIN(9, "gpio9"),
+ PINCTRL_PIN(10, "gpio10"),
+ PINCTRL_PIN(11, "gpio11"),
+ PINCTRL_PIN(12, "gpio12"),
+ PINCTRL_PIN(13, "gpio13"),
+};
+
+static const char * const swr_tx_clk_groups[] = { "gpio0" };
+static const char * const swr_tx_data_groups[] = { "gpio1", "gpio2", "gpio5" };
+static const char * const swr_rx_clk_groups[] = { "gpio3" };
+static const char * const swr_rx_data_groups[] = { "gpio4", "gpio5" };
+static const char * const dmic1_clk_groups[] = { "gpio6" };
+static const char * const dmic1_data_groups[] = { "gpio7" };
+static const char * const dmic2_clk_groups[] = { "gpio8" };
+static const char * const dmic2_data_groups[] = { "gpio9" };
+static const char * const i2s2_clk_groups[] = { "gpio10" };
+static const char * const i2s2_ws_groups[] = { "gpio11" };
+static const char * const dmic3_clk_groups[] = { "gpio12" };
+static const char * const dmic3_data_groups[] = { "gpio13" };
+static const char * const qua_mi2s_sclk_groups[] = { "gpio0" };
+static const char * const qua_mi2s_ws_groups[] = { "gpio1" };
+static const char * const qua_mi2s_data_groups[] = { "gpio2", "gpio3", "gpio4" };
+static const char * const i2s1_clk_groups[] = { "gpio6" };
+static const char * const i2s1_ws_groups[] = { "gpio7" };
+static const char * const i2s1_data_groups[] = { "gpio8", "gpio9" };
+static const char * const wsa_swr_clk_groups[] = { "gpio10" };
+static const char * const wsa_swr_data_groups[] = { "gpio11" };
+static const char * const i2s2_data_groups[] = { "gpio12", "gpio12" };
+
+static const struct lpi_pingroup sm8250_groups[] = {
+ LPI_PINGROUP(0, 0, swr_tx_clk, qua_mi2s_sclk, _, _),
+ LPI_PINGROUP(1, 2, swr_tx_data, qua_mi2s_ws, _, _),
+ LPI_PINGROUP(2, 4, swr_tx_data, qua_mi2s_data, _, _),
+ LPI_PINGROUP(3, 8, swr_rx_clk, qua_mi2s_data, _, _),
+ LPI_PINGROUP(4, 10, swr_rx_data, qua_mi2s_data, _, _),
+ LPI_PINGROUP(5, 12, swr_tx_data, swr_rx_data, _, _),
+ LPI_PINGROUP(6, LPI_NO_SLEW, dmic1_clk, i2s1_clk, _, _),
+ LPI_PINGROUP(7, LPI_NO_SLEW, dmic1_data, i2s1_ws, _, _),
+ LPI_PINGROUP(8, LPI_NO_SLEW, dmic2_clk, i2s1_data, _, _),
+ LPI_PINGROUP(9, LPI_NO_SLEW, dmic2_data, i2s1_data, _, _),
+ LPI_PINGROUP(10, 16, i2s2_clk, wsa_swr_clk, _, _),
+ LPI_PINGROUP(11, 18, i2s2_ws, wsa_swr_data, _, _),
+ LPI_PINGROUP(12, LPI_NO_SLEW, dmic3_clk, i2s2_data, _, _),
+ LPI_PINGROUP(13, LPI_NO_SLEW, dmic3_data, i2s2_data, _, _),
+};
+
+static const struct lpi_function sm8250_functions[] = {
+ LPI_FUNCTION(dmic1_clk),
+ LPI_FUNCTION(dmic1_data),
+ LPI_FUNCTION(dmic2_clk),
+ LPI_FUNCTION(dmic2_data),
+ LPI_FUNCTION(dmic3_clk),
+ LPI_FUNCTION(dmic3_data),
+ LPI_FUNCTION(i2s1_clk),
+ LPI_FUNCTION(i2s1_data),
+ LPI_FUNCTION(i2s1_ws),
+ LPI_FUNCTION(i2s2_clk),
+ LPI_FUNCTION(i2s2_data),
+ LPI_FUNCTION(i2s2_ws),
+ LPI_FUNCTION(qua_mi2s_data),
+ LPI_FUNCTION(qua_mi2s_sclk),
+ LPI_FUNCTION(qua_mi2s_ws),
+ LPI_FUNCTION(swr_rx_clk),
+ LPI_FUNCTION(swr_rx_data),
+ LPI_FUNCTION(swr_tx_clk),
+ LPI_FUNCTION(swr_tx_data),
+ LPI_FUNCTION(wsa_swr_clk),
+ LPI_FUNCTION(wsa_swr_data),
+};
+
+static const struct lpi_pinctrl_variant_data sm8250_lpi_data = {
+ .pins = sm8250_lpi_pins,
+ .npins = ARRAY_SIZE(sm8250_lpi_pins),
+ .groups = sm8250_groups,
+ .ngroups = ARRAY_SIZE(sm8250_groups),
+ .functions = sm8250_functions,
+ .nfunctions = ARRAY_SIZE(sm8250_functions),
+};
+
+static const struct of_device_id lpi_pinctrl_of_match[] = {
+ {
+ .compatible = "qcom,sm8250-lpass-lpi-pinctrl",
+ .data = &sm8250_lpi_data,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, lpi_pinctrl_of_match);
+
+static struct platform_driver lpi_pinctrl_driver = {
+ .driver = {
+ .name = "qcom-sm8250-lpass-lpi-pinctrl",
+ .of_match_table = lpi_pinctrl_of_match,
+ },
+ .probe = lpi_pinctrl_probe,
+ .remove = lpi_pinctrl_remove,
+};
+
+module_platform_driver(lpi_pinctrl_driver);
+MODULE_DESCRIPTION("QTI SM8250 LPI GPIO pin control driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
index 4fbf8d3938ef..fd5fff9adff0 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
@@ -1146,6 +1146,7 @@ static const struct of_device_id pmic_gpio_of_match[] = {
{ .compatible = "qcom,pm660-gpio", .data = (void *) 13 },
/* pm660l has 12 GPIOs with holes on 1, 2, 10, 11 and 12 */
{ .compatible = "qcom,pm660l-gpio", .data = (void *) 12 },
+ { .compatible = "qcom,pm6125-gpio", .data = (void *) 9 },
{ .compatible = "qcom,pm6150-gpio", .data = (void *) 10 },
{ .compatible = "qcom,pm6150l-gpio", .data = (void *) 12 },
{ .compatible = "qcom,pm6350-gpio", .data = (void *) 9 },
@@ -1183,6 +1184,7 @@ static const struct of_device_id pmic_gpio_of_match[] = {
{ .compatible = "qcom,pms405-gpio", .data = (void *) 12 },
/* pmx55 has 11 GPIOs with holes on 3, 7, 10, 11 */
{ .compatible = "qcom,pmx55-gpio", .data = (void *) 11 },
+ { .compatible = "qcom,pmx65-gpio", .data = (void *) 16 },
{ },
};
diff --git a/drivers/pinctrl/ralink/Kconfig b/drivers/pinctrl/ralink/Kconfig
index a76ee3deb8c3..1e4c5e43d69b 100644
--- a/drivers/pinctrl/ralink/Kconfig
+++ b/drivers/pinctrl/ralink/Kconfig
@@ -3,37 +3,33 @@ menu "Ralink pinctrl drivers"
depends on RALINK
config PINCTRL_RALINK
- bool "Ralink pin control support"
- default y if RALINK
-
-config PINCTRL_RT2880
- bool "RT2880 pinctrl driver for RALINK/Mediatek SOCs"
+ bool "Ralink pinctrl driver"
select PINMUX
select GENERIC_PINCONF
config PINCTRL_MT7620
- bool "mt7620 pinctrl driver for RALINK/Mediatek SOCs"
+ bool "MT7620 pinctrl subdriver"
depends on RALINK && SOC_MT7620
- select PINCTRL_RT2880
+ select PINCTRL_RALINK
config PINCTRL_MT7621
- bool "mt7621 pinctrl driver for RALINK/Mediatek SOCs"
+ bool "MT7621 pinctrl subdriver"
depends on RALINK && SOC_MT7621
- select PINCTRL_RT2880
+ select PINCTRL_RALINK
-config PINCTRL_RT288X
- bool "RT288X pinctrl driver for RALINK/Mediatek SOCs"
+config PINCTRL_RT2880
+ bool "RT2880 pinctrl subdriver"
depends on RALINK && SOC_RT288X
- select PINCTRL_RT2880
+ select PINCTRL_RALINK
config PINCTRL_RT305X
- bool "RT305X pinctrl driver for RALINK/Mediatek SOCs"
+ bool "RT305X pinctrl subdriver"
depends on RALINK && SOC_RT305X
- select PINCTRL_RT2880
+ select PINCTRL_RALINK
config PINCTRL_RT3883
- bool "RT3883 pinctrl driver for RALINK/Mediatek SOCs"
+ bool "RT3883 pinctrl subdriver"
depends on RALINK && SOC_RT3883
- select PINCTRL_RT2880
+ select PINCTRL_RALINK
endmenu
diff --git a/drivers/pinctrl/ralink/Makefile b/drivers/pinctrl/ralink/Makefile
index a15610206ced..0ebbe552526d 100644
--- a/drivers/pinctrl/ralink/Makefile
+++ b/drivers/pinctrl/ralink/Makefile
@@ -1,8 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_PINCTRL_RT2880) += pinctrl-rt2880.o
+obj-$(CONFIG_PINCTRL_RALINK) += pinctrl-ralink.o
obj-$(CONFIG_PINCTRL_MT7620) += pinctrl-mt7620.o
obj-$(CONFIG_PINCTRL_MT7621) += pinctrl-mt7621.o
-obj-$(CONFIG_PINCTRL_RT288X) += pinctrl-rt288x.o
+obj-$(CONFIG_PINCTRL_RT2880) += pinctrl-rt2880.o
obj-$(CONFIG_PINCTRL_RT305X) += pinctrl-rt305x.o
obj-$(CONFIG_PINCTRL_RT3883) += pinctrl-rt3883.o
diff --git a/drivers/pinctrl/ralink/pinctrl-mt7620.c b/drivers/pinctrl/ralink/pinctrl-mt7620.c
index 6853b5b8b0fe..22ff16eff02f 100644
--- a/drivers/pinctrl/ralink/pinctrl-mt7620.c
+++ b/drivers/pinctrl/ralink/pinctrl-mt7620.c
@@ -5,7 +5,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/of.h>
-#include "pinmux.h"
+#include "pinctrl-ralink.h"
#define MT7620_GPIO_MODE_UART0_SHIFT 2
#define MT7620_GPIO_MODE_UART0_MASK 0x7
@@ -54,20 +54,20 @@
#define MT7620_GPIO_MODE_EPHY 15
#define MT7620_GPIO_MODE_PA 20
-static struct rt2880_pmx_func i2c_grp[] = { FUNC("i2c", 0, 1, 2) };
-static struct rt2880_pmx_func spi_grp[] = { FUNC("spi", 0, 3, 4) };
-static struct rt2880_pmx_func uartlite_grp[] = { FUNC("uartlite", 0, 15, 2) };
-static struct rt2880_pmx_func mdio_grp[] = {
+static struct ralink_pmx_func i2c_func[] = { FUNC("i2c", 0, 1, 2) };
+static struct ralink_pmx_func spi_func[] = { FUNC("spi", 0, 3, 4) };
+static struct ralink_pmx_func uartlite_func[] = { FUNC("uartlite", 0, 15, 2) };
+static struct ralink_pmx_func mdio_func[] = {
FUNC("mdio", MT7620_GPIO_MODE_MDIO, 22, 2),
FUNC("refclk", MT7620_GPIO_MODE_MDIO_REFCLK, 22, 2),
};
-static struct rt2880_pmx_func rgmii1_grp[] = { FUNC("rgmii1", 0, 24, 12) };
-static struct rt2880_pmx_func refclk_grp[] = { FUNC("spi refclk", 0, 37, 3) };
-static struct rt2880_pmx_func ephy_grp[] = { FUNC("ephy", 0, 40, 5) };
-static struct rt2880_pmx_func rgmii2_grp[] = { FUNC("rgmii2", 0, 60, 12) };
-static struct rt2880_pmx_func wled_grp[] = { FUNC("wled", 0, 72, 1) };
-static struct rt2880_pmx_func pa_grp[] = { FUNC("pa", 0, 18, 4) };
-static struct rt2880_pmx_func uartf_grp[] = {
+static struct ralink_pmx_func rgmii1_func[] = { FUNC("rgmii1", 0, 24, 12) };
+static struct ralink_pmx_func refclk_func[] = { FUNC("spi refclk", 0, 37, 3) };
+static struct ralink_pmx_func ephy_func[] = { FUNC("ephy", 0, 40, 5) };
+static struct ralink_pmx_func rgmii2_func[] = { FUNC("rgmii2", 0, 60, 12) };
+static struct ralink_pmx_func wled_func[] = { FUNC("wled", 0, 72, 1) };
+static struct ralink_pmx_func pa_func[] = { FUNC("pa", 0, 18, 4) };
+static struct ralink_pmx_func uartf_func[] = {
FUNC("uartf", MT7620_GPIO_MODE_UARTF, 7, 8),
FUNC("pcm uartf", MT7620_GPIO_MODE_PCM_UARTF, 7, 8),
FUNC("pcm i2s", MT7620_GPIO_MODE_PCM_I2S, 7, 8),
@@ -76,316 +76,316 @@ static struct rt2880_pmx_func uartf_grp[] = {
FUNC("gpio uartf", MT7620_GPIO_MODE_GPIO_UARTF, 7, 4),
FUNC("gpio i2s", MT7620_GPIO_MODE_GPIO_I2S, 7, 4),
};
-static struct rt2880_pmx_func wdt_grp[] = {
+static struct ralink_pmx_func wdt_func[] = {
FUNC("wdt rst", 0, 17, 1),
FUNC("wdt refclk", 0, 17, 1),
};
-static struct rt2880_pmx_func pcie_rst_grp[] = {
+static struct ralink_pmx_func pcie_rst_func[] = {
FUNC("pcie rst", MT7620_GPIO_MODE_PCIE_RST, 36, 1),
FUNC("pcie refclk", MT7620_GPIO_MODE_PCIE_REF, 36, 1)
};
-static struct rt2880_pmx_func nd_sd_grp[] = {
+static struct ralink_pmx_func nd_sd_func[] = {
FUNC("nand", MT7620_GPIO_MODE_NAND, 45, 15),
FUNC("sd", MT7620_GPIO_MODE_SD, 47, 13)
};
-static struct rt2880_pmx_group mt7620a_pinmux_data[] = {
- GRP("i2c", i2c_grp, 1, MT7620_GPIO_MODE_I2C),
- GRP("uartf", uartf_grp, MT7620_GPIO_MODE_UART0_MASK,
+static struct ralink_pmx_group mt7620a_pinmux_data[] = {
+ GRP("i2c", i2c_func, 1, MT7620_GPIO_MODE_I2C),
+ GRP("uartf", uartf_func, MT7620_GPIO_MODE_UART0_MASK,
MT7620_GPIO_MODE_UART0_SHIFT),
- GRP("spi", spi_grp, 1, MT7620_GPIO_MODE_SPI),
- GRP("uartlite", uartlite_grp, 1, MT7620_GPIO_MODE_UART1),
- GRP_G("wdt", wdt_grp, MT7620_GPIO_MODE_WDT_MASK,
+ GRP("spi", spi_func, 1, MT7620_GPIO_MODE_SPI),
+ GRP("uartlite", uartlite_func, 1, MT7620_GPIO_MODE_UART1),
+ GRP_G("wdt", wdt_func, MT7620_GPIO_MODE_WDT_MASK,
MT7620_GPIO_MODE_WDT_GPIO, MT7620_GPIO_MODE_WDT_SHIFT),
- GRP_G("mdio", mdio_grp, MT7620_GPIO_MODE_MDIO_MASK,
+ GRP_G("mdio", mdio_func, MT7620_GPIO_MODE_MDIO_MASK,
MT7620_GPIO_MODE_MDIO_GPIO, MT7620_GPIO_MODE_MDIO_SHIFT),
- GRP("rgmii1", rgmii1_grp, 1, MT7620_GPIO_MODE_RGMII1),
- GRP("spi refclk", refclk_grp, 1, MT7620_GPIO_MODE_SPI_REF_CLK),
- GRP_G("pcie", pcie_rst_grp, MT7620_GPIO_MODE_PCIE_MASK,
+ GRP("rgmii1", rgmii1_func, 1, MT7620_GPIO_MODE_RGMII1),
+ GRP("spi refclk", refclk_func, 1, MT7620_GPIO_MODE_SPI_REF_CLK),
+ GRP_G("pcie", pcie_rst_func, MT7620_GPIO_MODE_PCIE_MASK,
MT7620_GPIO_MODE_PCIE_GPIO, MT7620_GPIO_MODE_PCIE_SHIFT),
- GRP_G("nd_sd", nd_sd_grp, MT7620_GPIO_MODE_ND_SD_MASK,
+ GRP_G("nd_sd", nd_sd_func, MT7620_GPIO_MODE_ND_SD_MASK,
MT7620_GPIO_MODE_ND_SD_GPIO, MT7620_GPIO_MODE_ND_SD_SHIFT),
- GRP("rgmii2", rgmii2_grp, 1, MT7620_GPIO_MODE_RGMII2),
- GRP("wled", wled_grp, 1, MT7620_GPIO_MODE_WLED),
- GRP("ephy", ephy_grp, 1, MT7620_GPIO_MODE_EPHY),
- GRP("pa", pa_grp, 1, MT7620_GPIO_MODE_PA),
+ GRP("rgmii2", rgmii2_func, 1, MT7620_GPIO_MODE_RGMII2),
+ GRP("wled", wled_func, 1, MT7620_GPIO_MODE_WLED),
+ GRP("ephy", ephy_func, 1, MT7620_GPIO_MODE_EPHY),
+ GRP("pa", pa_func, 1, MT7620_GPIO_MODE_PA),
{ 0 }
};
-static struct rt2880_pmx_func pwm1_grp_mt7628[] = {
+static struct ralink_pmx_func pwm1_func_mt76x8[] = {
FUNC("sdxc d6", 3, 19, 1),
FUNC("utif", 2, 19, 1),
FUNC("gpio", 1, 19, 1),
FUNC("pwm1", 0, 19, 1),
};
-static struct rt2880_pmx_func pwm0_grp_mt7628[] = {
+static struct ralink_pmx_func pwm0_func_mt76x8[] = {
FUNC("sdxc d7", 3, 18, 1),
FUNC("utif", 2, 18, 1),
FUNC("gpio", 1, 18, 1),
FUNC("pwm0", 0, 18, 1),
};
-static struct rt2880_pmx_func uart2_grp_mt7628[] = {
+static struct ralink_pmx_func uart2_func_mt76x8[] = {
FUNC("sdxc d5 d4", 3, 20, 2),
FUNC("pwm", 2, 20, 2),
FUNC("gpio", 1, 20, 2),
FUNC("uart2", 0, 20, 2),
};
-static struct rt2880_pmx_func uart1_grp_mt7628[] = {
+static struct ralink_pmx_func uart1_func_mt76x8[] = {
FUNC("sw_r", 3, 45, 2),
FUNC("pwm", 2, 45, 2),
FUNC("gpio", 1, 45, 2),
FUNC("uart1", 0, 45, 2),
};
-static struct rt2880_pmx_func i2c_grp_mt7628[] = {
+static struct ralink_pmx_func i2c_func_mt76x8[] = {
FUNC("-", 3, 4, 2),
FUNC("debug", 2, 4, 2),
FUNC("gpio", 1, 4, 2),
FUNC("i2c", 0, 4, 2),
};
-static struct rt2880_pmx_func refclk_grp_mt7628[] = { FUNC("refclk", 0, 37, 1) };
-static struct rt2880_pmx_func perst_grp_mt7628[] = { FUNC("perst", 0, 36, 1) };
-static struct rt2880_pmx_func wdt_grp_mt7628[] = { FUNC("wdt", 0, 38, 1) };
-static struct rt2880_pmx_func spi_grp_mt7628[] = { FUNC("spi", 0, 7, 4) };
+static struct ralink_pmx_func refclk_func_mt76x8[] = { FUNC("refclk", 0, 37, 1) };
+static struct ralink_pmx_func perst_func_mt76x8[] = { FUNC("perst", 0, 36, 1) };
+static struct ralink_pmx_func wdt_func_mt76x8[] = { FUNC("wdt", 0, 38, 1) };
+static struct ralink_pmx_func spi_func_mt76x8[] = { FUNC("spi", 0, 7, 4) };
-static struct rt2880_pmx_func sd_mode_grp_mt7628[] = {
+static struct ralink_pmx_func sd_mode_func_mt76x8[] = {
FUNC("jtag", 3, 22, 8),
FUNC("utif", 2, 22, 8),
FUNC("gpio", 1, 22, 8),
FUNC("sdxc", 0, 22, 8),
};
-static struct rt2880_pmx_func uart0_grp_mt7628[] = {
+static struct ralink_pmx_func uart0_func_mt76x8[] = {
FUNC("-", 3, 12, 2),
FUNC("-", 2, 12, 2),
FUNC("gpio", 1, 12, 2),
FUNC("uart0", 0, 12, 2),
};
-static struct rt2880_pmx_func i2s_grp_mt7628[] = {
+static struct ralink_pmx_func i2s_func_mt76x8[] = {
FUNC("antenna", 3, 0, 4),
FUNC("pcm", 2, 0, 4),
FUNC("gpio", 1, 0, 4),
FUNC("i2s", 0, 0, 4),
};
-static struct rt2880_pmx_func spi_cs1_grp_mt7628[] = {
+static struct ralink_pmx_func spi_cs1_func_mt76x8[] = {
FUNC("-", 3, 6, 1),
FUNC("refclk", 2, 6, 1),
FUNC("gpio", 1, 6, 1),
FUNC("spi cs1", 0, 6, 1),
};
-static struct rt2880_pmx_func spis_grp_mt7628[] = {
+static struct ralink_pmx_func spis_func_mt76x8[] = {
FUNC("pwm_uart2", 3, 14, 4),
FUNC("utif", 2, 14, 4),
FUNC("gpio", 1, 14, 4),
FUNC("spis", 0, 14, 4),
};
-static struct rt2880_pmx_func gpio_grp_mt7628[] = {
+static struct ralink_pmx_func gpio_func_mt76x8[] = {
FUNC("pcie", 3, 11, 1),
FUNC("refclk", 2, 11, 1),
FUNC("gpio", 1, 11, 1),
FUNC("gpio", 0, 11, 1),
};
-static struct rt2880_pmx_func p4led_kn_grp_mt7628[] = {
+static struct ralink_pmx_func p4led_kn_func_mt76x8[] = {
FUNC("jtag", 3, 30, 1),
FUNC("utif", 2, 30, 1),
FUNC("gpio", 1, 30, 1),
FUNC("p4led_kn", 0, 30, 1),
};
-static struct rt2880_pmx_func p3led_kn_grp_mt7628[] = {
+static struct ralink_pmx_func p3led_kn_func_mt76x8[] = {
FUNC("jtag", 3, 31, 1),
FUNC("utif", 2, 31, 1),
FUNC("gpio", 1, 31, 1),
FUNC("p3led_kn", 0, 31, 1),
};
-static struct rt2880_pmx_func p2led_kn_grp_mt7628[] = {
+static struct ralink_pmx_func p2led_kn_func_mt76x8[] = {
FUNC("jtag", 3, 32, 1),
FUNC("utif", 2, 32, 1),
FUNC("gpio", 1, 32, 1),
FUNC("p2led_kn", 0, 32, 1),
};
-static struct rt2880_pmx_func p1led_kn_grp_mt7628[] = {
+static struct ralink_pmx_func p1led_kn_func_mt76x8[] = {
FUNC("jtag", 3, 33, 1),
FUNC("utif", 2, 33, 1),
FUNC("gpio", 1, 33, 1),
FUNC("p1led_kn", 0, 33, 1),
};
-static struct rt2880_pmx_func p0led_kn_grp_mt7628[] = {
+static struct ralink_pmx_func p0led_kn_func_mt76x8[] = {
FUNC("jtag", 3, 34, 1),
FUNC("rsvd", 2, 34, 1),
FUNC("gpio", 1, 34, 1),
FUNC("p0led_kn", 0, 34, 1),
};
-static struct rt2880_pmx_func wled_kn_grp_mt7628[] = {
+static struct ralink_pmx_func wled_kn_func_mt76x8[] = {
FUNC("rsvd", 3, 35, 1),
FUNC("rsvd", 2, 35, 1),
FUNC("gpio", 1, 35, 1),
FUNC("wled_kn", 0, 35, 1),
};
-static struct rt2880_pmx_func p4led_an_grp_mt7628[] = {
+static struct ralink_pmx_func p4led_an_func_mt76x8[] = {
FUNC("jtag", 3, 39, 1),
FUNC("utif", 2, 39, 1),
FUNC("gpio", 1, 39, 1),
FUNC("p4led_an", 0, 39, 1),
};
-static struct rt2880_pmx_func p3led_an_grp_mt7628[] = {
+static struct ralink_pmx_func p3led_an_func_mt76x8[] = {
FUNC("jtag", 3, 40, 1),
FUNC("utif", 2, 40, 1),
FUNC("gpio", 1, 40, 1),
FUNC("p3led_an", 0, 40, 1),
};
-static struct rt2880_pmx_func p2led_an_grp_mt7628[] = {
+static struct ralink_pmx_func p2led_an_func_mt76x8[] = {
FUNC("jtag", 3, 41, 1),
FUNC("utif", 2, 41, 1),
FUNC("gpio", 1, 41, 1),
FUNC("p2led_an", 0, 41, 1),
};
-static struct rt2880_pmx_func p1led_an_grp_mt7628[] = {
+static struct ralink_pmx_func p1led_an_func_mt76x8[] = {
FUNC("jtag", 3, 42, 1),
FUNC("utif", 2, 42, 1),
FUNC("gpio", 1, 42, 1),
FUNC("p1led_an", 0, 42, 1),
};
-static struct rt2880_pmx_func p0led_an_grp_mt7628[] = {
+static struct ralink_pmx_func p0led_an_func_mt76x8[] = {
FUNC("jtag", 3, 43, 1),
FUNC("rsvd", 2, 43, 1),
FUNC("gpio", 1, 43, 1),
FUNC("p0led_an", 0, 43, 1),
};
-static struct rt2880_pmx_func wled_an_grp_mt7628[] = {
+static struct ralink_pmx_func wled_an_func_mt76x8[] = {
FUNC("rsvd", 3, 44, 1),
FUNC("rsvd", 2, 44, 1),
FUNC("gpio", 1, 44, 1),
FUNC("wled_an", 0, 44, 1),
};
-#define MT7628_GPIO_MODE_MASK 0x3
-
-#define MT7628_GPIO_MODE_P4LED_KN 58
-#define MT7628_GPIO_MODE_P3LED_KN 56
-#define MT7628_GPIO_MODE_P2LED_KN 54
-#define MT7628_GPIO_MODE_P1LED_KN 52
-#define MT7628_GPIO_MODE_P0LED_KN 50
-#define MT7628_GPIO_MODE_WLED_KN 48
-#define MT7628_GPIO_MODE_P4LED_AN 42
-#define MT7628_GPIO_MODE_P3LED_AN 40
-#define MT7628_GPIO_MODE_P2LED_AN 38
-#define MT7628_GPIO_MODE_P1LED_AN 36
-#define MT7628_GPIO_MODE_P0LED_AN 34
-#define MT7628_GPIO_MODE_WLED_AN 32
-#define MT7628_GPIO_MODE_PWM1 30
-#define MT7628_GPIO_MODE_PWM0 28
-#define MT7628_GPIO_MODE_UART2 26
-#define MT7628_GPIO_MODE_UART1 24
-#define MT7628_GPIO_MODE_I2C 20
-#define MT7628_GPIO_MODE_REFCLK 18
-#define MT7628_GPIO_MODE_PERST 16
-#define MT7628_GPIO_MODE_WDT 14
-#define MT7628_GPIO_MODE_SPI 12
-#define MT7628_GPIO_MODE_SDMODE 10
-#define MT7628_GPIO_MODE_UART0 8
-#define MT7628_GPIO_MODE_I2S 6
-#define MT7628_GPIO_MODE_CS1 4
-#define MT7628_GPIO_MODE_SPIS 2
-#define MT7628_GPIO_MODE_GPIO 0
-
-static struct rt2880_pmx_group mt7628an_pinmux_data[] = {
- GRP_G("pwm1", pwm1_grp_mt7628, MT7628_GPIO_MODE_MASK,
- 1, MT7628_GPIO_MODE_PWM1),
- GRP_G("pwm0", pwm0_grp_mt7628, MT7628_GPIO_MODE_MASK,
- 1, MT7628_GPIO_MODE_PWM0),
- GRP_G("uart2", uart2_grp_mt7628, MT7628_GPIO_MODE_MASK,
- 1, MT7628_GPIO_MODE_UART2),
- GRP_G("uart1", uart1_grp_mt7628, MT7628_GPIO_MODE_MASK,
- 1, MT7628_GPIO_MODE_UART1),
- GRP_G("i2c", i2c_grp_mt7628, MT7628_GPIO_MODE_MASK,
- 1, MT7628_GPIO_MODE_I2C),
- GRP("refclk", refclk_grp_mt7628, 1, MT7628_GPIO_MODE_REFCLK),
- GRP("perst", perst_grp_mt7628, 1, MT7628_GPIO_MODE_PERST),
- GRP("wdt", wdt_grp_mt7628, 1, MT7628_GPIO_MODE_WDT),
- GRP("spi", spi_grp_mt7628, 1, MT7628_GPIO_MODE_SPI),
- GRP_G("sdmode", sd_mode_grp_mt7628, MT7628_GPIO_MODE_MASK,
- 1, MT7628_GPIO_MODE_SDMODE),
- GRP_G("uart0", uart0_grp_mt7628, MT7628_GPIO_MODE_MASK,
- 1, MT7628_GPIO_MODE_UART0),
- GRP_G("i2s", i2s_grp_mt7628, MT7628_GPIO_MODE_MASK,
- 1, MT7628_GPIO_MODE_I2S),
- GRP_G("spi cs1", spi_cs1_grp_mt7628, MT7628_GPIO_MODE_MASK,
- 1, MT7628_GPIO_MODE_CS1),
- GRP_G("spis", spis_grp_mt7628, MT7628_GPIO_MODE_MASK,
- 1, MT7628_GPIO_MODE_SPIS),
- GRP_G("gpio", gpio_grp_mt7628, MT7628_GPIO_MODE_MASK,
- 1, MT7628_GPIO_MODE_GPIO),
- GRP_G("wled_an", wled_an_grp_mt7628, MT7628_GPIO_MODE_MASK,
- 1, MT7628_GPIO_MODE_WLED_AN),
- GRP_G("p0led_an", p0led_an_grp_mt7628, MT7628_GPIO_MODE_MASK,
- 1, MT7628_GPIO_MODE_P0LED_AN),
- GRP_G("p1led_an", p1led_an_grp_mt7628, MT7628_GPIO_MODE_MASK,
- 1, MT7628_GPIO_MODE_P1LED_AN),
- GRP_G("p2led_an", p2led_an_grp_mt7628, MT7628_GPIO_MODE_MASK,
- 1, MT7628_GPIO_MODE_P2LED_AN),
- GRP_G("p3led_an", p3led_an_grp_mt7628, MT7628_GPIO_MODE_MASK,
- 1, MT7628_GPIO_MODE_P3LED_AN),
- GRP_G("p4led_an", p4led_an_grp_mt7628, MT7628_GPIO_MODE_MASK,
- 1, MT7628_GPIO_MODE_P4LED_AN),
- GRP_G("wled_kn", wled_kn_grp_mt7628, MT7628_GPIO_MODE_MASK,
- 1, MT7628_GPIO_MODE_WLED_KN),
- GRP_G("p0led_kn", p0led_kn_grp_mt7628, MT7628_GPIO_MODE_MASK,
- 1, MT7628_GPIO_MODE_P0LED_KN),
- GRP_G("p1led_kn", p1led_kn_grp_mt7628, MT7628_GPIO_MODE_MASK,
- 1, MT7628_GPIO_MODE_P1LED_KN),
- GRP_G("p2led_kn", p2led_kn_grp_mt7628, MT7628_GPIO_MODE_MASK,
- 1, MT7628_GPIO_MODE_P2LED_KN),
- GRP_G("p3led_kn", p3led_kn_grp_mt7628, MT7628_GPIO_MODE_MASK,
- 1, MT7628_GPIO_MODE_P3LED_KN),
- GRP_G("p4led_kn", p4led_kn_grp_mt7628, MT7628_GPIO_MODE_MASK,
- 1, MT7628_GPIO_MODE_P4LED_KN),
+#define MT76X8_GPIO_MODE_MASK 0x3
+
+#define MT76X8_GPIO_MODE_P4LED_KN 58
+#define MT76X8_GPIO_MODE_P3LED_KN 56
+#define MT76X8_GPIO_MODE_P2LED_KN 54
+#define MT76X8_GPIO_MODE_P1LED_KN 52
+#define MT76X8_GPIO_MODE_P0LED_KN 50
+#define MT76X8_GPIO_MODE_WLED_KN 48
+#define MT76X8_GPIO_MODE_P4LED_AN 42
+#define MT76X8_GPIO_MODE_P3LED_AN 40
+#define MT76X8_GPIO_MODE_P2LED_AN 38
+#define MT76X8_GPIO_MODE_P1LED_AN 36
+#define MT76X8_GPIO_MODE_P0LED_AN 34
+#define MT76X8_GPIO_MODE_WLED_AN 32
+#define MT76X8_GPIO_MODE_PWM1 30
+#define MT76X8_GPIO_MODE_PWM0 28
+#define MT76X8_GPIO_MODE_UART2 26
+#define MT76X8_GPIO_MODE_UART1 24
+#define MT76X8_GPIO_MODE_I2C 20
+#define MT76X8_GPIO_MODE_REFCLK 18
+#define MT76X8_GPIO_MODE_PERST 16
+#define MT76X8_GPIO_MODE_WDT 14
+#define MT76X8_GPIO_MODE_SPI 12
+#define MT76X8_GPIO_MODE_SDMODE 10
+#define MT76X8_GPIO_MODE_UART0 8
+#define MT76X8_GPIO_MODE_I2S 6
+#define MT76X8_GPIO_MODE_CS1 4
+#define MT76X8_GPIO_MODE_SPIS 2
+#define MT76X8_GPIO_MODE_GPIO 0
+
+static struct ralink_pmx_group mt76x8_pinmux_data[] = {
+ GRP_G("pwm1", pwm1_func_mt76x8, MT76X8_GPIO_MODE_MASK,
+ 1, MT76X8_GPIO_MODE_PWM1),
+ GRP_G("pwm0", pwm0_func_mt76x8, MT76X8_GPIO_MODE_MASK,
+ 1, MT76X8_GPIO_MODE_PWM0),
+ GRP_G("uart2", uart2_func_mt76x8, MT76X8_GPIO_MODE_MASK,
+ 1, MT76X8_GPIO_MODE_UART2),
+ GRP_G("uart1", uart1_func_mt76x8, MT76X8_GPIO_MODE_MASK,
+ 1, MT76X8_GPIO_MODE_UART1),
+ GRP_G("i2c", i2c_func_mt76x8, MT76X8_GPIO_MODE_MASK,
+ 1, MT76X8_GPIO_MODE_I2C),
+ GRP("refclk", refclk_func_mt76x8, 1, MT76X8_GPIO_MODE_REFCLK),
+ GRP("perst", perst_func_mt76x8, 1, MT76X8_GPIO_MODE_PERST),
+ GRP("wdt", wdt_func_mt76x8, 1, MT76X8_GPIO_MODE_WDT),
+ GRP("spi", spi_func_mt76x8, 1, MT76X8_GPIO_MODE_SPI),
+ GRP_G("sdmode", sd_mode_func_mt76x8, MT76X8_GPIO_MODE_MASK,
+ 1, MT76X8_GPIO_MODE_SDMODE),
+ GRP_G("uart0", uart0_func_mt76x8, MT76X8_GPIO_MODE_MASK,
+ 1, MT76X8_GPIO_MODE_UART0),
+ GRP_G("i2s", i2s_func_mt76x8, MT76X8_GPIO_MODE_MASK,
+ 1, MT76X8_GPIO_MODE_I2S),
+ GRP_G("spi cs1", spi_cs1_func_mt76x8, MT76X8_GPIO_MODE_MASK,
+ 1, MT76X8_GPIO_MODE_CS1),
+ GRP_G("spis", spis_func_mt76x8, MT76X8_GPIO_MODE_MASK,
+ 1, MT76X8_GPIO_MODE_SPIS),
+ GRP_G("gpio", gpio_func_mt76x8, MT76X8_GPIO_MODE_MASK,
+ 1, MT76X8_GPIO_MODE_GPIO),
+ GRP_G("wled_an", wled_an_func_mt76x8, MT76X8_GPIO_MODE_MASK,
+ 1, MT76X8_GPIO_MODE_WLED_AN),
+ GRP_G("p0led_an", p0led_an_func_mt76x8, MT76X8_GPIO_MODE_MASK,
+ 1, MT76X8_GPIO_MODE_P0LED_AN),
+ GRP_G("p1led_an", p1led_an_func_mt76x8, MT76X8_GPIO_MODE_MASK,
+ 1, MT76X8_GPIO_MODE_P1LED_AN),
+ GRP_G("p2led_an", p2led_an_func_mt76x8, MT76X8_GPIO_MODE_MASK,
+ 1, MT76X8_GPIO_MODE_P2LED_AN),
+ GRP_G("p3led_an", p3led_an_func_mt76x8, MT76X8_GPIO_MODE_MASK,
+ 1, MT76X8_GPIO_MODE_P3LED_AN),
+ GRP_G("p4led_an", p4led_an_func_mt76x8, MT76X8_GPIO_MODE_MASK,
+ 1, MT76X8_GPIO_MODE_P4LED_AN),
+ GRP_G("wled_kn", wled_kn_func_mt76x8, MT76X8_GPIO_MODE_MASK,
+ 1, MT76X8_GPIO_MODE_WLED_KN),
+ GRP_G("p0led_kn", p0led_kn_func_mt76x8, MT76X8_GPIO_MODE_MASK,
+ 1, MT76X8_GPIO_MODE_P0LED_KN),
+ GRP_G("p1led_kn", p1led_kn_func_mt76x8, MT76X8_GPIO_MODE_MASK,
+ 1, MT76X8_GPIO_MODE_P1LED_KN),
+ GRP_G("p2led_kn", p2led_kn_func_mt76x8, MT76X8_GPIO_MODE_MASK,
+ 1, MT76X8_GPIO_MODE_P2LED_KN),
+ GRP_G("p3led_kn", p3led_kn_func_mt76x8, MT76X8_GPIO_MODE_MASK,
+ 1, MT76X8_GPIO_MODE_P3LED_KN),
+ GRP_G("p4led_kn", p4led_kn_func_mt76x8, MT76X8_GPIO_MODE_MASK,
+ 1, MT76X8_GPIO_MODE_P4LED_KN),
{ 0 }
};
-static int mt7620_pinmux_probe(struct platform_device *pdev)
+static int mt7620_pinctrl_probe(struct platform_device *pdev)
{
if (is_mt76x8())
- return rt2880_pinmux_init(pdev, mt7628an_pinmux_data);
+ return ralink_pinctrl_init(pdev, mt76x8_pinmux_data);
else
- return rt2880_pinmux_init(pdev, mt7620a_pinmux_data);
+ return ralink_pinctrl_init(pdev, mt7620a_pinmux_data);
}
-static const struct of_device_id mt7620_pinmux_match[] = {
- { .compatible = "ralink,rt2880-pinmux" },
+static const struct of_device_id mt7620_pinctrl_match[] = {
+ { .compatible = "ralink,mt7620-pinctrl" },
{}
};
-MODULE_DEVICE_TABLE(of, mt7620_pinmux_match);
+MODULE_DEVICE_TABLE(of, mt7620_pinctrl_match);
-static struct platform_driver mt7620_pinmux_driver = {
- .probe = mt7620_pinmux_probe,
+static struct platform_driver mt7620_pinctrl_driver = {
+ .probe = mt7620_pinctrl_probe,
.driver = {
- .name = "rt2880-pinmux",
- .of_match_table = mt7620_pinmux_match,
+ .name = "mt7620-pinctrl",
+ .of_match_table = mt7620_pinctrl_match,
},
};
-static int __init mt7620_pinmux_init(void)
+static int __init mt7620_pinctrl_init(void)
{
- return platform_driver_register(&mt7620_pinmux_driver);
+ return platform_driver_register(&mt7620_pinctrl_driver);
}
-core_initcall_sync(mt7620_pinmux_init);
+core_initcall_sync(mt7620_pinctrl_init);
diff --git a/drivers/pinctrl/ralink/pinctrl-mt7621.c b/drivers/pinctrl/ralink/pinctrl-mt7621.c
index 7d96144c474e..b47968f40e0c 100644
--- a/drivers/pinctrl/ralink/pinctrl-mt7621.c
+++ b/drivers/pinctrl/ralink/pinctrl-mt7621.c
@@ -3,7 +3,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/of.h>
-#include "pinmux.h"
+#include "pinctrl-ralink.h"
#define MT7621_GPIO_MODE_UART1 1
#define MT7621_GPIO_MODE_I2C 2
@@ -34,83 +34,83 @@
#define MT7621_GPIO_MODE_SDHCI_SHIFT 18
#define MT7621_GPIO_MODE_SDHCI_GPIO 1
-static struct rt2880_pmx_func uart1_grp[] = { FUNC("uart1", 0, 1, 2) };
-static struct rt2880_pmx_func i2c_grp[] = { FUNC("i2c", 0, 3, 2) };
-static struct rt2880_pmx_func uart3_grp[] = {
+static struct ralink_pmx_func uart1_func[] = { FUNC("uart1", 0, 1, 2) };
+static struct ralink_pmx_func i2c_func[] = { FUNC("i2c", 0, 3, 2) };
+static struct ralink_pmx_func uart3_func[] = {
FUNC("uart3", 0, 5, 4),
FUNC("i2s", 2, 5, 4),
FUNC("spdif3", 3, 5, 4),
};
-static struct rt2880_pmx_func uart2_grp[] = {
+static struct ralink_pmx_func uart2_func[] = {
FUNC("uart2", 0, 9, 4),
FUNC("pcm", 2, 9, 4),
FUNC("spdif2", 3, 9, 4),
};
-static struct rt2880_pmx_func jtag_grp[] = { FUNC("jtag", 0, 13, 5) };
-static struct rt2880_pmx_func wdt_grp[] = {
+static struct ralink_pmx_func jtag_func[] = { FUNC("jtag", 0, 13, 5) };
+static struct ralink_pmx_func wdt_func[] = {
FUNC("wdt rst", 0, 18, 1),
FUNC("wdt refclk", 2, 18, 1),
};
-static struct rt2880_pmx_func pcie_rst_grp[] = {
+static struct ralink_pmx_func pcie_rst_func[] = {
FUNC("pcie rst", MT7621_GPIO_MODE_PCIE_RST, 19, 1),
FUNC("pcie refclk", MT7621_GPIO_MODE_PCIE_REF, 19, 1)
};
-static struct rt2880_pmx_func mdio_grp[] = { FUNC("mdio", 0, 20, 2) };
-static struct rt2880_pmx_func rgmii2_grp[] = { FUNC("rgmii2", 0, 22, 12) };
-static struct rt2880_pmx_func spi_grp[] = {
+static struct ralink_pmx_func mdio_func[] = { FUNC("mdio", 0, 20, 2) };
+static struct ralink_pmx_func rgmii2_func[] = { FUNC("rgmii2", 0, 22, 12) };
+static struct ralink_pmx_func spi_func[] = {
FUNC("spi", 0, 34, 7),
FUNC("nand1", 2, 34, 7),
};
-static struct rt2880_pmx_func sdhci_grp[] = {
+static struct ralink_pmx_func sdhci_func[] = {
FUNC("sdhci", 0, 41, 8),
FUNC("nand2", 2, 41, 8),
};
-static struct rt2880_pmx_func rgmii1_grp[] = { FUNC("rgmii1", 0, 49, 12) };
+static struct ralink_pmx_func rgmii1_func[] = { FUNC("rgmii1", 0, 49, 12) };
-static struct rt2880_pmx_group mt7621_pinmux_data[] = {
- GRP("uart1", uart1_grp, 1, MT7621_GPIO_MODE_UART1),
- GRP("i2c", i2c_grp, 1, MT7621_GPIO_MODE_I2C),
- GRP_G("uart3", uart3_grp, MT7621_GPIO_MODE_UART3_MASK,
+static struct ralink_pmx_group mt7621_pinmux_data[] = {
+ GRP("uart1", uart1_func, 1, MT7621_GPIO_MODE_UART1),
+ GRP("i2c", i2c_func, 1, MT7621_GPIO_MODE_I2C),
+ GRP_G("uart3", uart3_func, MT7621_GPIO_MODE_UART3_MASK,
MT7621_GPIO_MODE_UART3_GPIO, MT7621_GPIO_MODE_UART3_SHIFT),
- GRP_G("uart2", uart2_grp, MT7621_GPIO_MODE_UART2_MASK,
+ GRP_G("uart2", uart2_func, MT7621_GPIO_MODE_UART2_MASK,
MT7621_GPIO_MODE_UART2_GPIO, MT7621_GPIO_MODE_UART2_SHIFT),
- GRP("jtag", jtag_grp, 1, MT7621_GPIO_MODE_JTAG),
- GRP_G("wdt", wdt_grp, MT7621_GPIO_MODE_WDT_MASK,
+ GRP("jtag", jtag_func, 1, MT7621_GPIO_MODE_JTAG),
+ GRP_G("wdt", wdt_func, MT7621_GPIO_MODE_WDT_MASK,
MT7621_GPIO_MODE_WDT_GPIO, MT7621_GPIO_MODE_WDT_SHIFT),
- GRP_G("pcie", pcie_rst_grp, MT7621_GPIO_MODE_PCIE_MASK,
+ GRP_G("pcie", pcie_rst_func, MT7621_GPIO_MODE_PCIE_MASK,
MT7621_GPIO_MODE_PCIE_GPIO, MT7621_GPIO_MODE_PCIE_SHIFT),
- GRP_G("mdio", mdio_grp, MT7621_GPIO_MODE_MDIO_MASK,
+ GRP_G("mdio", mdio_func, MT7621_GPIO_MODE_MDIO_MASK,
MT7621_GPIO_MODE_MDIO_GPIO, MT7621_GPIO_MODE_MDIO_SHIFT),
- GRP("rgmii2", rgmii2_grp, 1, MT7621_GPIO_MODE_RGMII2),
- GRP_G("spi", spi_grp, MT7621_GPIO_MODE_SPI_MASK,
+ GRP("rgmii2", rgmii2_func, 1, MT7621_GPIO_MODE_RGMII2),
+ GRP_G("spi", spi_func, MT7621_GPIO_MODE_SPI_MASK,
MT7621_GPIO_MODE_SPI_GPIO, MT7621_GPIO_MODE_SPI_SHIFT),
- GRP_G("sdhci", sdhci_grp, MT7621_GPIO_MODE_SDHCI_MASK,
+ GRP_G("sdhci", sdhci_func, MT7621_GPIO_MODE_SDHCI_MASK,
MT7621_GPIO_MODE_SDHCI_GPIO, MT7621_GPIO_MODE_SDHCI_SHIFT),
- GRP("rgmii1", rgmii1_grp, 1, MT7621_GPIO_MODE_RGMII1),
+ GRP("rgmii1", rgmii1_func, 1, MT7621_GPIO_MODE_RGMII1),
{ 0 }
};
-static int mt7621_pinmux_probe(struct platform_device *pdev)
+static int mt7621_pinctrl_probe(struct platform_device *pdev)
{
- return rt2880_pinmux_init(pdev, mt7621_pinmux_data);
+ return ralink_pinctrl_init(pdev, mt7621_pinmux_data);
}
-static const struct of_device_id mt7621_pinmux_match[] = {
- { .compatible = "ralink,rt2880-pinmux" },
+static const struct of_device_id mt7621_pinctrl_match[] = {
+ { .compatible = "ralink,mt7621-pinctrl" },
{}
};
-MODULE_DEVICE_TABLE(of, mt7621_pinmux_match);
+MODULE_DEVICE_TABLE(of, mt7621_pinctrl_match);
-static struct platform_driver mt7621_pinmux_driver = {
- .probe = mt7621_pinmux_probe,
+static struct platform_driver mt7621_pinctrl_driver = {
+ .probe = mt7621_pinctrl_probe,
.driver = {
- .name = "rt2880-pinmux",
- .of_match_table = mt7621_pinmux_match,
+ .name = "mt7621-pinctrl",
+ .of_match_table = mt7621_pinctrl_match,
},
};
-static int __init mt7621_pinmux_init(void)
+static int __init mt7621_pinctrl_init(void)
{
- return platform_driver_register(&mt7621_pinmux_driver);
+ return platform_driver_register(&mt7621_pinctrl_driver);
}
-core_initcall_sync(mt7621_pinmux_init);
+core_initcall_sync(mt7621_pinctrl_init);
diff --git a/drivers/pinctrl/ralink/pinctrl-ralink.c b/drivers/pinctrl/ralink/pinctrl-ralink.c
new file mode 100644
index 000000000000..63429a287434
--- /dev/null
+++ b/drivers/pinctrl/ralink/pinctrl-ralink.c
@@ -0,0 +1,349 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/pinctrl/machine.h>
+
+#include <asm/mach-ralink/ralink_regs.h>
+#include <asm/mach-ralink/mt7620.h>
+
+#include "pinctrl-ralink.h"
+#include "../core.h"
+#include "../pinctrl-utils.h"
+
+#define SYSC_REG_GPIO_MODE 0x60
+#define SYSC_REG_GPIO_MODE2 0x64
+
+struct ralink_priv {
+ struct device *dev;
+
+ struct pinctrl_pin_desc *pads;
+ struct pinctrl_desc *desc;
+
+ struct ralink_pmx_func **func;
+ int func_count;
+
+ struct ralink_pmx_group *groups;
+ const char **group_names;
+ int group_count;
+
+ u8 *gpio;
+ int max_pins;
+};
+
+static int ralink_get_group_count(struct pinctrl_dev *pctrldev)
+{
+ struct ralink_priv *p = pinctrl_dev_get_drvdata(pctrldev);
+
+ return p->group_count;
+}
+
+static const char *ralink_get_group_name(struct pinctrl_dev *pctrldev,
+ unsigned int group)
+{
+ struct ralink_priv *p = pinctrl_dev_get_drvdata(pctrldev);
+
+ return (group >= p->group_count) ? NULL : p->group_names[group];
+}
+
+static int ralink_get_group_pins(struct pinctrl_dev *pctrldev,
+ unsigned int group,
+ const unsigned int **pins,
+ unsigned int *num_pins)
+{
+ struct ralink_priv *p = pinctrl_dev_get_drvdata(pctrldev);
+
+ if (group >= p->group_count)
+ return -EINVAL;
+
+ *pins = p->groups[group].func[0].pins;
+ *num_pins = p->groups[group].func[0].pin_count;
+
+ return 0;
+}
+
+static const struct pinctrl_ops ralink_pctrl_ops = {
+ .get_groups_count = ralink_get_group_count,
+ .get_group_name = ralink_get_group_name,
+ .get_group_pins = ralink_get_group_pins,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_all,
+ .dt_free_map = pinconf_generic_dt_free_map,
+};
+
+static int ralink_pmx_func_count(struct pinctrl_dev *pctrldev)
+{
+ struct ralink_priv *p = pinctrl_dev_get_drvdata(pctrldev);
+
+ return p->func_count;
+}
+
+static const char *ralink_pmx_func_name(struct pinctrl_dev *pctrldev,
+ unsigned int func)
+{
+ struct ralink_priv *p = pinctrl_dev_get_drvdata(pctrldev);
+
+ return p->func[func]->name;
+}
+
+static int ralink_pmx_group_get_groups(struct pinctrl_dev *pctrldev,
+ unsigned int func,
+ const char * const **groups,
+ unsigned int * const num_groups)
+{
+ struct ralink_priv *p = pinctrl_dev_get_drvdata(pctrldev);
+
+ if (p->func[func]->group_count == 1)
+ *groups = &p->group_names[p->func[func]->groups[0]];
+ else
+ *groups = p->group_names;
+
+ *num_groups = p->func[func]->group_count;
+
+ return 0;
+}
+
+static int ralink_pmx_group_enable(struct pinctrl_dev *pctrldev,
+ unsigned int func, unsigned int group)
+{
+ struct ralink_priv *p = pinctrl_dev_get_drvdata(pctrldev);
+ u32 mode = 0;
+ u32 reg = SYSC_REG_GPIO_MODE;
+ int i;
+ int shift;
+
+ /* dont allow double use */
+ if (p->groups[group].enabled) {
+ dev_err(p->dev, "%s is already enabled\n",
+ p->groups[group].name);
+ return 0;
+ }
+
+ p->groups[group].enabled = 1;
+ p->func[func]->enabled = 1;
+
+ shift = p->groups[group].shift;
+ if (shift >= 32) {
+ shift -= 32;
+ reg = SYSC_REG_GPIO_MODE2;
+ }
+ mode = rt_sysc_r32(reg);
+ mode &= ~(p->groups[group].mask << shift);
+
+ /* mark the pins as gpio */
+ for (i = 0; i < p->groups[group].func[0].pin_count; i++)
+ p->gpio[p->groups[group].func[0].pins[i]] = 1;
+
+ /* function 0 is gpio and needs special handling */
+ if (func == 0) {
+ mode |= p->groups[group].gpio << shift;
+ } else {
+ for (i = 0; i < p->func[func]->pin_count; i++)
+ p->gpio[p->func[func]->pins[i]] = 0;
+ mode |= p->func[func]->value << shift;
+ }
+ rt_sysc_w32(mode, reg);
+
+ return 0;
+}
+
+static int ralink_pmx_group_gpio_request_enable(struct pinctrl_dev *pctrldev,
+ struct pinctrl_gpio_range *range,
+ unsigned int pin)
+{
+ struct ralink_priv *p = pinctrl_dev_get_drvdata(pctrldev);
+
+ if (!p->gpio[pin]) {
+ dev_err(p->dev, "pin %d is not set to gpio mux\n", pin);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct pinmux_ops ralink_pmx_group_ops = {
+ .get_functions_count = ralink_pmx_func_count,
+ .get_function_name = ralink_pmx_func_name,
+ .get_function_groups = ralink_pmx_group_get_groups,
+ .set_mux = ralink_pmx_group_enable,
+ .gpio_request_enable = ralink_pmx_group_gpio_request_enable,
+};
+
+static struct pinctrl_desc ralink_pctrl_desc = {
+ .owner = THIS_MODULE,
+ .name = "ralink-pinctrl",
+ .pctlops = &ralink_pctrl_ops,
+ .pmxops = &ralink_pmx_group_ops,
+};
+
+static struct ralink_pmx_func gpio_func = {
+ .name = "gpio",
+};
+
+static int ralink_pinctrl_index(struct ralink_priv *p)
+{
+ struct ralink_pmx_group *mux = p->groups;
+ int i, j, c = 0;
+
+ /* count the mux functions */
+ while (mux->name) {
+ p->group_count++;
+ mux++;
+ }
+
+ /* allocate the group names array needed by the gpio function */
+ p->group_names = devm_kcalloc(p->dev, p->group_count,
+ sizeof(char *), GFP_KERNEL);
+ if (!p->group_names)
+ return -ENOMEM;
+
+ for (i = 0; i < p->group_count; i++) {
+ p->group_names[i] = p->groups[i].name;
+ p->func_count += p->groups[i].func_count;
+ }
+
+ /* we have a dummy function[0] for gpio */
+ p->func_count++;
+
+ /* allocate our function and group mapping index buffers */
+ p->func = devm_kcalloc(p->dev, p->func_count,
+ sizeof(*p->func), GFP_KERNEL);
+ gpio_func.groups = devm_kcalloc(p->dev, p->group_count, sizeof(int),
+ GFP_KERNEL);
+ if (!p->func || !gpio_func.groups)
+ return -ENOMEM;
+
+ /* add a backpointer to the function so it knows its group */
+ gpio_func.group_count = p->group_count;
+ for (i = 0; i < gpio_func.group_count; i++)
+ gpio_func.groups[i] = i;
+
+ p->func[c] = &gpio_func;
+ c++;
+
+ /* add remaining functions */
+ for (i = 0; i < p->group_count; i++) {
+ for (j = 0; j < p->groups[i].func_count; j++) {
+ p->func[c] = &p->groups[i].func[j];
+ p->func[c]->groups = devm_kzalloc(p->dev, sizeof(int),
+ GFP_KERNEL);
+ if (!p->func[c]->groups)
+ return -ENOMEM;
+ p->func[c]->groups[0] = i;
+ p->func[c]->group_count = 1;
+ c++;
+ }
+ }
+ return 0;
+}
+
+static int ralink_pinctrl_pins(struct ralink_priv *p)
+{
+ int i, j;
+
+ /*
+ * loop over the functions and initialize the pins array.
+ * also work out the highest pin used.
+ */
+ for (i = 0; i < p->func_count; i++) {
+ int pin;
+
+ if (!p->func[i]->pin_count)
+ continue;
+
+ p->func[i]->pins = devm_kcalloc(p->dev,
+ p->func[i]->pin_count,
+ sizeof(int),
+ GFP_KERNEL);
+ for (j = 0; j < p->func[i]->pin_count; j++)
+ p->func[i]->pins[j] = p->func[i]->pin_first + j;
+
+ pin = p->func[i]->pin_first + p->func[i]->pin_count;
+ if (pin > p->max_pins)
+ p->max_pins = pin;
+ }
+
+ /* the buffer that tells us which pins are gpio */
+ p->gpio = devm_kcalloc(p->dev, p->max_pins, sizeof(u8), GFP_KERNEL);
+ /* the pads needed to tell pinctrl about our pins */
+ p->pads = devm_kcalloc(p->dev, p->max_pins,
+ sizeof(struct pinctrl_pin_desc), GFP_KERNEL);
+ if (!p->pads || !p->gpio)
+ return -ENOMEM;
+
+ memset(p->gpio, 1, sizeof(u8) * p->max_pins);
+ for (i = 0; i < p->func_count; i++) {
+ if (!p->func[i]->pin_count)
+ continue;
+
+ for (j = 0; j < p->func[i]->pin_count; j++)
+ p->gpio[p->func[i]->pins[j]] = 0;
+ }
+
+ /* pin 0 is always a gpio */
+ p->gpio[0] = 1;
+
+ /* set the pads */
+ for (i = 0; i < p->max_pins; i++) {
+ /* strlen("ioXY") + 1 = 5 */
+ char *name = devm_kzalloc(p->dev, 5, GFP_KERNEL);
+
+ if (!name)
+ return -ENOMEM;
+ snprintf(name, 5, "io%d", i);
+ p->pads[i].number = i;
+ p->pads[i].name = name;
+ }
+ p->desc->pins = p->pads;
+ p->desc->npins = p->max_pins;
+
+ return 0;
+}
+
+int ralink_pinctrl_init(struct platform_device *pdev,
+ struct ralink_pmx_group *data)
+{
+ struct ralink_priv *p;
+ struct pinctrl_dev *dev;
+ int err;
+
+ if (!data)
+ return -ENOTSUPP;
+
+ /* setup the private data */
+ p = devm_kzalloc(&pdev->dev, sizeof(struct ralink_priv), GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ p->dev = &pdev->dev;
+ p->desc = &ralink_pctrl_desc;
+ p->groups = data;
+ platform_set_drvdata(pdev, p);
+
+ /* init the device */
+ err = ralink_pinctrl_index(p);
+ if (err) {
+ dev_err(&pdev->dev, "failed to load index\n");
+ return err;
+ }
+
+ err = ralink_pinctrl_pins(p);
+ if (err) {
+ dev_err(&pdev->dev, "failed to load pins\n");
+ return err;
+ }
+ dev = pinctrl_register(p->desc, &pdev->dev, p);
+
+ return PTR_ERR_OR_ZERO(dev);
+}
diff --git a/drivers/pinctrl/ralink/pinmux.h b/drivers/pinctrl/ralink/pinctrl-ralink.h
index 0046abe3bcc7..e6037be1e153 100644
--- a/drivers/pinctrl/ralink/pinmux.h
+++ b/drivers/pinctrl/ralink/pinctrl-ralink.h
@@ -3,8 +3,8 @@
* Copyright (C) 2012 John Crispin <john@phrozen.org>
*/
-#ifndef _RT288X_PINMUX_H__
-#define _RT288X_PINMUX_H__
+#ifndef _PINCTRL_RALINK_H__
+#define _PINCTRL_RALINK_H__
#define FUNC(name, value, pin_first, pin_count) \
{ name, value, pin_first, pin_count }
@@ -19,9 +19,9 @@
.func = _func, .gpio = _gpio, \
.func_count = ARRAY_SIZE(_func) }
-struct rt2880_pmx_group;
+struct ralink_pmx_group;
-struct rt2880_pmx_func {
+struct ralink_pmx_func {
const char *name;
const char value;
@@ -35,7 +35,7 @@ struct rt2880_pmx_func {
int enabled;
};
-struct rt2880_pmx_group {
+struct ralink_pmx_group {
const char *name;
int enabled;
@@ -43,11 +43,11 @@ struct rt2880_pmx_group {
const char mask;
const char gpio;
- struct rt2880_pmx_func *func;
+ struct ralink_pmx_func *func;
int func_count;
};
-int rt2880_pinmux_init(struct platform_device *pdev,
- struct rt2880_pmx_group *data);
+int ralink_pinctrl_init(struct platform_device *pdev,
+ struct ralink_pmx_group *data);
#endif
diff --git a/drivers/pinctrl/ralink/pinctrl-rt2880.c b/drivers/pinctrl/ralink/pinctrl-rt2880.c
index 96fc06d1b8b9..811e12df1133 100644
--- a/drivers/pinctrl/ralink/pinctrl-rt2880.c
+++ b/drivers/pinctrl/ralink/pinctrl-rt2880.c
@@ -1,349 +1,60 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
- */
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/bitops.h>
#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/io.h>
#include <linux/platform_device.h>
-#include <linux/slab.h>
#include <linux/of.h>
-#include <linux/pinctrl/pinctrl.h>
-#include <linux/pinctrl/pinconf.h>
-#include <linux/pinctrl/pinconf-generic.h>
-#include <linux/pinctrl/pinmux.h>
-#include <linux/pinctrl/consumer.h>
-#include <linux/pinctrl/machine.h>
-
-#include <asm/mach-ralink/ralink_regs.h>
-#include <asm/mach-ralink/mt7620.h>
-
-#include "pinmux.h"
-#include "../core.h"
-#include "../pinctrl-utils.h"
-
-#define SYSC_REG_GPIO_MODE 0x60
-#define SYSC_REG_GPIO_MODE2 0x64
-
-struct rt2880_priv {
- struct device *dev;
-
- struct pinctrl_pin_desc *pads;
- struct pinctrl_desc *desc;
-
- struct rt2880_pmx_func **func;
- int func_count;
-
- struct rt2880_pmx_group *groups;
- const char **group_names;
- int group_count;
-
- u8 *gpio;
- int max_pins;
+#include "pinctrl-ralink.h"
+
+#define RT2880_GPIO_MODE_I2C BIT(0)
+#define RT2880_GPIO_MODE_UART0 BIT(1)
+#define RT2880_GPIO_MODE_SPI BIT(2)
+#define RT2880_GPIO_MODE_UART1 BIT(3)
+#define RT2880_GPIO_MODE_JTAG BIT(4)
+#define RT2880_GPIO_MODE_MDIO BIT(5)
+#define RT2880_GPIO_MODE_SDRAM BIT(6)
+#define RT2880_GPIO_MODE_PCI BIT(7)
+
+static struct ralink_pmx_func i2c_func[] = { FUNC("i2c", 0, 1, 2) };
+static struct ralink_pmx_func spi_func[] = { FUNC("spi", 0, 3, 4) };
+static struct ralink_pmx_func uartlite_func[] = { FUNC("uartlite", 0, 7, 8) };
+static struct ralink_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) };
+static struct ralink_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) };
+static struct ralink_pmx_func sdram_func[] = { FUNC("sdram", 0, 24, 16) };
+static struct ralink_pmx_func pci_func[] = { FUNC("pci", 0, 40, 32) };
+
+static struct ralink_pmx_group rt2880_pinmux_data_act[] = {
+ GRP("i2c", i2c_func, 1, RT2880_GPIO_MODE_I2C),
+ GRP("spi", spi_func, 1, RT2880_GPIO_MODE_SPI),
+ GRP("uartlite", uartlite_func, 1, RT2880_GPIO_MODE_UART0),
+ GRP("jtag", jtag_func, 1, RT2880_GPIO_MODE_JTAG),
+ GRP("mdio", mdio_func, 1, RT2880_GPIO_MODE_MDIO),
+ GRP("sdram", sdram_func, 1, RT2880_GPIO_MODE_SDRAM),
+ GRP("pci", pci_func, 1, RT2880_GPIO_MODE_PCI),
+ { 0 }
};
-static int rt2880_get_group_count(struct pinctrl_dev *pctrldev)
+static int rt2880_pinctrl_probe(struct platform_device *pdev)
{
- struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev);
-
- return p->group_count;
+ return ralink_pinctrl_init(pdev, rt2880_pinmux_data_act);
}
-static const char *rt2880_get_group_name(struct pinctrl_dev *pctrldev,
- unsigned int group)
-{
- struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev);
-
- return (group >= p->group_count) ? NULL : p->group_names[group];
-}
-
-static int rt2880_get_group_pins(struct pinctrl_dev *pctrldev,
- unsigned int group,
- const unsigned int **pins,
- unsigned int *num_pins)
-{
- struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev);
-
- if (group >= p->group_count)
- return -EINVAL;
-
- *pins = p->groups[group].func[0].pins;
- *num_pins = p->groups[group].func[0].pin_count;
-
- return 0;
-}
-
-static const struct pinctrl_ops rt2880_pctrl_ops = {
- .get_groups_count = rt2880_get_group_count,
- .get_group_name = rt2880_get_group_name,
- .get_group_pins = rt2880_get_group_pins,
- .dt_node_to_map = pinconf_generic_dt_node_to_map_all,
- .dt_free_map = pinconf_generic_dt_free_map,
+static const struct of_device_id rt2880_pinctrl_match[] = {
+ { .compatible = "ralink,rt2880-pinctrl" },
+ {}
};
-
-static int rt2880_pmx_func_count(struct pinctrl_dev *pctrldev)
-{
- struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev);
-
- return p->func_count;
-}
-
-static const char *rt2880_pmx_func_name(struct pinctrl_dev *pctrldev,
- unsigned int func)
-{
- struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev);
-
- return p->func[func]->name;
-}
-
-static int rt2880_pmx_group_get_groups(struct pinctrl_dev *pctrldev,
- unsigned int func,
- const char * const **groups,
- unsigned int * const num_groups)
-{
- struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev);
-
- if (p->func[func]->group_count == 1)
- *groups = &p->group_names[p->func[func]->groups[0]];
- else
- *groups = p->group_names;
-
- *num_groups = p->func[func]->group_count;
-
- return 0;
-}
-
-static int rt2880_pmx_group_enable(struct pinctrl_dev *pctrldev,
- unsigned int func, unsigned int group)
-{
- struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev);
- u32 mode = 0;
- u32 reg = SYSC_REG_GPIO_MODE;
- int i;
- int shift;
-
- /* dont allow double use */
- if (p->groups[group].enabled) {
- dev_err(p->dev, "%s is already enabled\n",
- p->groups[group].name);
- return 0;
- }
-
- p->groups[group].enabled = 1;
- p->func[func]->enabled = 1;
-
- shift = p->groups[group].shift;
- if (shift >= 32) {
- shift -= 32;
- reg = SYSC_REG_GPIO_MODE2;
- }
- mode = rt_sysc_r32(reg);
- mode &= ~(p->groups[group].mask << shift);
-
- /* mark the pins as gpio */
- for (i = 0; i < p->groups[group].func[0].pin_count; i++)
- p->gpio[p->groups[group].func[0].pins[i]] = 1;
-
- /* function 0 is gpio and needs special handling */
- if (func == 0) {
- mode |= p->groups[group].gpio << shift;
- } else {
- for (i = 0; i < p->func[func]->pin_count; i++)
- p->gpio[p->func[func]->pins[i]] = 0;
- mode |= p->func[func]->value << shift;
- }
- rt_sysc_w32(mode, reg);
-
- return 0;
-}
-
-static int rt2880_pmx_group_gpio_request_enable(struct pinctrl_dev *pctrldev,
- struct pinctrl_gpio_range *range,
- unsigned int pin)
-{
- struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev);
-
- if (!p->gpio[pin]) {
- dev_err(p->dev, "pin %d is not set to gpio mux\n", pin);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static const struct pinmux_ops rt2880_pmx_group_ops = {
- .get_functions_count = rt2880_pmx_func_count,
- .get_function_name = rt2880_pmx_func_name,
- .get_function_groups = rt2880_pmx_group_get_groups,
- .set_mux = rt2880_pmx_group_enable,
- .gpio_request_enable = rt2880_pmx_group_gpio_request_enable,
+MODULE_DEVICE_TABLE(of, rt2880_pinctrl_match);
+
+static struct platform_driver rt2880_pinctrl_driver = {
+ .probe = rt2880_pinctrl_probe,
+ .driver = {
+ .name = "rt2880-pinctrl",
+ .of_match_table = rt2880_pinctrl_match,
+ },
};
-static struct pinctrl_desc rt2880_pctrl_desc = {
- .owner = THIS_MODULE,
- .name = "rt2880-pinmux",
- .pctlops = &rt2880_pctrl_ops,
- .pmxops = &rt2880_pmx_group_ops,
-};
-
-static struct rt2880_pmx_func gpio_func = {
- .name = "gpio",
-};
-
-static int rt2880_pinmux_index(struct rt2880_priv *p)
-{
- struct rt2880_pmx_group *mux = p->groups;
- int i, j, c = 0;
-
- /* count the mux functions */
- while (mux->name) {
- p->group_count++;
- mux++;
- }
-
- /* allocate the group names array needed by the gpio function */
- p->group_names = devm_kcalloc(p->dev, p->group_count,
- sizeof(char *), GFP_KERNEL);
- if (!p->group_names)
- return -ENOMEM;
-
- for (i = 0; i < p->group_count; i++) {
- p->group_names[i] = p->groups[i].name;
- p->func_count += p->groups[i].func_count;
- }
-
- /* we have a dummy function[0] for gpio */
- p->func_count++;
-
- /* allocate our function and group mapping index buffers */
- p->func = devm_kcalloc(p->dev, p->func_count,
- sizeof(*p->func), GFP_KERNEL);
- gpio_func.groups = devm_kcalloc(p->dev, p->group_count, sizeof(int),
- GFP_KERNEL);
- if (!p->func || !gpio_func.groups)
- return -ENOMEM;
-
- /* add a backpointer to the function so it knows its group */
- gpio_func.group_count = p->group_count;
- for (i = 0; i < gpio_func.group_count; i++)
- gpio_func.groups[i] = i;
-
- p->func[c] = &gpio_func;
- c++;
-
- /* add remaining functions */
- for (i = 0; i < p->group_count; i++) {
- for (j = 0; j < p->groups[i].func_count; j++) {
- p->func[c] = &p->groups[i].func[j];
- p->func[c]->groups = devm_kzalloc(p->dev, sizeof(int),
- GFP_KERNEL);
- if (!p->func[c]->groups)
- return -ENOMEM;
- p->func[c]->groups[0] = i;
- p->func[c]->group_count = 1;
- c++;
- }
- }
- return 0;
-}
-
-static int rt2880_pinmux_pins(struct rt2880_priv *p)
+static int __init rt2880_pinctrl_init(void)
{
- int i, j;
-
- /*
- * loop over the functions and initialize the pins array.
- * also work out the highest pin used.
- */
- for (i = 0; i < p->func_count; i++) {
- int pin;
-
- if (!p->func[i]->pin_count)
- continue;
-
- p->func[i]->pins = devm_kcalloc(p->dev,
- p->func[i]->pin_count,
- sizeof(int),
- GFP_KERNEL);
- for (j = 0; j < p->func[i]->pin_count; j++)
- p->func[i]->pins[j] = p->func[i]->pin_first + j;
-
- pin = p->func[i]->pin_first + p->func[i]->pin_count;
- if (pin > p->max_pins)
- p->max_pins = pin;
- }
-
- /* the buffer that tells us which pins are gpio */
- p->gpio = devm_kcalloc(p->dev, p->max_pins, sizeof(u8), GFP_KERNEL);
- /* the pads needed to tell pinctrl about our pins */
- p->pads = devm_kcalloc(p->dev, p->max_pins,
- sizeof(struct pinctrl_pin_desc), GFP_KERNEL);
- if (!p->pads || !p->gpio)
- return -ENOMEM;
-
- memset(p->gpio, 1, sizeof(u8) * p->max_pins);
- for (i = 0; i < p->func_count; i++) {
- if (!p->func[i]->pin_count)
- continue;
-
- for (j = 0; j < p->func[i]->pin_count; j++)
- p->gpio[p->func[i]->pins[j]] = 0;
- }
-
- /* pin 0 is always a gpio */
- p->gpio[0] = 1;
-
- /* set the pads */
- for (i = 0; i < p->max_pins; i++) {
- /* strlen("ioXY") + 1 = 5 */
- char *name = devm_kzalloc(p->dev, 5, GFP_KERNEL);
-
- if (!name)
- return -ENOMEM;
- snprintf(name, 5, "io%d", i);
- p->pads[i].number = i;
- p->pads[i].name = name;
- }
- p->desc->pins = p->pads;
- p->desc->npins = p->max_pins;
-
- return 0;
-}
-
-int rt2880_pinmux_init(struct platform_device *pdev,
- struct rt2880_pmx_group *data)
-{
- struct rt2880_priv *p;
- struct pinctrl_dev *dev;
- int err;
-
- if (!data)
- return -ENOTSUPP;
-
- /* setup the private data */
- p = devm_kzalloc(&pdev->dev, sizeof(struct rt2880_priv), GFP_KERNEL);
- if (!p)
- return -ENOMEM;
-
- p->dev = &pdev->dev;
- p->desc = &rt2880_pctrl_desc;
- p->groups = data;
- platform_set_drvdata(pdev, p);
-
- /* init the device */
- err = rt2880_pinmux_index(p);
- if (err) {
- dev_err(&pdev->dev, "failed to load index\n");
- return err;
- }
-
- err = rt2880_pinmux_pins(p);
- if (err) {
- dev_err(&pdev->dev, "failed to load pins\n");
- return err;
- }
- dev = pinctrl_register(p->desc, &pdev->dev, p);
-
- return PTR_ERR_OR_ZERO(dev);
+ return platform_driver_register(&rt2880_pinctrl_driver);
}
+core_initcall_sync(rt2880_pinctrl_init);
diff --git a/drivers/pinctrl/ralink/pinctrl-rt288x.c b/drivers/pinctrl/ralink/pinctrl-rt288x.c
deleted file mode 100644
index 0744aebbace5..000000000000
--- a/drivers/pinctrl/ralink/pinctrl-rt288x.c
+++ /dev/null
@@ -1,60 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-
-#include <linux/bitops.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/of.h>
-#include "pinmux.h"
-
-#define RT2880_GPIO_MODE_I2C BIT(0)
-#define RT2880_GPIO_MODE_UART0 BIT(1)
-#define RT2880_GPIO_MODE_SPI BIT(2)
-#define RT2880_GPIO_MODE_UART1 BIT(3)
-#define RT2880_GPIO_MODE_JTAG BIT(4)
-#define RT2880_GPIO_MODE_MDIO BIT(5)
-#define RT2880_GPIO_MODE_SDRAM BIT(6)
-#define RT2880_GPIO_MODE_PCI BIT(7)
-
-static struct rt2880_pmx_func i2c_func[] = { FUNC("i2c", 0, 1, 2) };
-static struct rt2880_pmx_func spi_func[] = { FUNC("spi", 0, 3, 4) };
-static struct rt2880_pmx_func uartlite_func[] = { FUNC("uartlite", 0, 7, 8) };
-static struct rt2880_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) };
-static struct rt2880_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) };
-static struct rt2880_pmx_func sdram_func[] = { FUNC("sdram", 0, 24, 16) };
-static struct rt2880_pmx_func pci_func[] = { FUNC("pci", 0, 40, 32) };
-
-static struct rt2880_pmx_group rt2880_pinmux_data_act[] = {
- GRP("i2c", i2c_func, 1, RT2880_GPIO_MODE_I2C),
- GRP("spi", spi_func, 1, RT2880_GPIO_MODE_SPI),
- GRP("uartlite", uartlite_func, 1, RT2880_GPIO_MODE_UART0),
- GRP("jtag", jtag_func, 1, RT2880_GPIO_MODE_JTAG),
- GRP("mdio", mdio_func, 1, RT2880_GPIO_MODE_MDIO),
- GRP("sdram", sdram_func, 1, RT2880_GPIO_MODE_SDRAM),
- GRP("pci", pci_func, 1, RT2880_GPIO_MODE_PCI),
- { 0 }
-};
-
-static int rt288x_pinmux_probe(struct platform_device *pdev)
-{
- return rt2880_pinmux_init(pdev, rt2880_pinmux_data_act);
-}
-
-static const struct of_device_id rt288x_pinmux_match[] = {
- { .compatible = "ralink,rt2880-pinmux" },
- {}
-};
-MODULE_DEVICE_TABLE(of, rt288x_pinmux_match);
-
-static struct platform_driver rt288x_pinmux_driver = {
- .probe = rt288x_pinmux_probe,
- .driver = {
- .name = "rt2880-pinmux",
- .of_match_table = rt288x_pinmux_match,
- },
-};
-
-static int __init rt288x_pinmux_init(void)
-{
- return platform_driver_register(&rt288x_pinmux_driver);
-}
-core_initcall_sync(rt288x_pinmux_init);
diff --git a/drivers/pinctrl/ralink/pinctrl-rt305x.c b/drivers/pinctrl/ralink/pinctrl-rt305x.c
index 5d8fa156c003..5b204b7ca1f3 100644
--- a/drivers/pinctrl/ralink/pinctrl-rt305x.c
+++ b/drivers/pinctrl/ralink/pinctrl-rt305x.c
@@ -5,7 +5,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/of.h>
-#include "pinmux.h"
+#include "pinctrl-ralink.h"
#define RT305X_GPIO_MODE_UART0_SHIFT 2
#define RT305X_GPIO_MODE_UART0_MASK 0x7
@@ -31,9 +31,9 @@
#define RT3352_GPIO_MODE_LNA 18
#define RT3352_GPIO_MODE_PA 20
-static struct rt2880_pmx_func i2c_func[] = { FUNC("i2c", 0, 1, 2) };
-static struct rt2880_pmx_func spi_func[] = { FUNC("spi", 0, 3, 4) };
-static struct rt2880_pmx_func uartf_func[] = {
+static struct ralink_pmx_func i2c_func[] = { FUNC("i2c", 0, 1, 2) };
+static struct ralink_pmx_func spi_func[] = { FUNC("spi", 0, 3, 4) };
+static struct ralink_pmx_func uartf_func[] = {
FUNC("uartf", RT305X_GPIO_MODE_UARTF, 7, 8),
FUNC("pcm uartf", RT305X_GPIO_MODE_PCM_UARTF, 7, 8),
FUNC("pcm i2s", RT305X_GPIO_MODE_PCM_I2S, 7, 8),
@@ -42,28 +42,28 @@ static struct rt2880_pmx_func uartf_func[] = {
FUNC("gpio uartf", RT305X_GPIO_MODE_GPIO_UARTF, 7, 4),
FUNC("gpio i2s", RT305X_GPIO_MODE_GPIO_I2S, 7, 4),
};
-static struct rt2880_pmx_func uartlite_func[] = { FUNC("uartlite", 0, 15, 2) };
-static struct rt2880_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) };
-static struct rt2880_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) };
-static struct rt2880_pmx_func rt5350_led_func[] = { FUNC("led", 0, 22, 5) };
-static struct rt2880_pmx_func rt5350_cs1_func[] = {
+static struct ralink_pmx_func uartlite_func[] = { FUNC("uartlite", 0, 15, 2) };
+static struct ralink_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) };
+static struct ralink_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) };
+static struct ralink_pmx_func rt5350_led_func[] = { FUNC("led", 0, 22, 5) };
+static struct ralink_pmx_func rt5350_cs1_func[] = {
FUNC("spi_cs1", 0, 27, 1),
FUNC("wdg_cs1", 1, 27, 1),
};
-static struct rt2880_pmx_func sdram_func[] = { FUNC("sdram", 0, 24, 16) };
-static struct rt2880_pmx_func rt3352_rgmii_func[] = {
+static struct ralink_pmx_func sdram_func[] = { FUNC("sdram", 0, 24, 16) };
+static struct ralink_pmx_func rt3352_rgmii_func[] = {
FUNC("rgmii", 0, 24, 12)
};
-static struct rt2880_pmx_func rgmii_func[] = { FUNC("rgmii", 0, 40, 12) };
-static struct rt2880_pmx_func rt3352_lna_func[] = { FUNC("lna", 0, 36, 2) };
-static struct rt2880_pmx_func rt3352_pa_func[] = { FUNC("pa", 0, 38, 2) };
-static struct rt2880_pmx_func rt3352_led_func[] = { FUNC("led", 0, 40, 5) };
-static struct rt2880_pmx_func rt3352_cs1_func[] = {
+static struct ralink_pmx_func rgmii_func[] = { FUNC("rgmii", 0, 40, 12) };
+static struct ralink_pmx_func rt3352_lna_func[] = { FUNC("lna", 0, 36, 2) };
+static struct ralink_pmx_func rt3352_pa_func[] = { FUNC("pa", 0, 38, 2) };
+static struct ralink_pmx_func rt3352_led_func[] = { FUNC("led", 0, 40, 5) };
+static struct ralink_pmx_func rt3352_cs1_func[] = {
FUNC("spi_cs1", 0, 45, 1),
FUNC("wdg_cs1", 1, 45, 1),
};
-static struct rt2880_pmx_group rt3050_pinmux_data[] = {
+static struct ralink_pmx_group rt3050_pinmux_data[] = {
GRP("i2c", i2c_func, 1, RT305X_GPIO_MODE_I2C),
GRP("spi", spi_func, 1, RT305X_GPIO_MODE_SPI),
GRP("uartf", uartf_func, RT305X_GPIO_MODE_UART0_MASK,
@@ -76,7 +76,7 @@ static struct rt2880_pmx_group rt3050_pinmux_data[] = {
{ 0 }
};
-static struct rt2880_pmx_group rt3352_pinmux_data[] = {
+static struct ralink_pmx_group rt3352_pinmux_data[] = {
GRP("i2c", i2c_func, 1, RT305X_GPIO_MODE_I2C),
GRP("spi", spi_func, 1, RT305X_GPIO_MODE_SPI),
GRP("uartf", uartf_func, RT305X_GPIO_MODE_UART0_MASK,
@@ -92,7 +92,7 @@ static struct rt2880_pmx_group rt3352_pinmux_data[] = {
{ 0 }
};
-static struct rt2880_pmx_group rt5350_pinmux_data[] = {
+static struct ralink_pmx_group rt5350_pinmux_data[] = {
GRP("i2c", i2c_func, 1, RT305X_GPIO_MODE_I2C),
GRP("spi", spi_func, 1, RT305X_GPIO_MODE_SPI),
GRP("uartf", uartf_func, RT305X_GPIO_MODE_UART0_MASK,
@@ -104,34 +104,34 @@ static struct rt2880_pmx_group rt5350_pinmux_data[] = {
{ 0 }
};
-static int rt305x_pinmux_probe(struct platform_device *pdev)
+static int rt305x_pinctrl_probe(struct platform_device *pdev)
{
if (soc_is_rt5350())
- return rt2880_pinmux_init(pdev, rt5350_pinmux_data);
+ return ralink_pinctrl_init(pdev, rt5350_pinmux_data);
else if (soc_is_rt305x() || soc_is_rt3350())
- return rt2880_pinmux_init(pdev, rt3050_pinmux_data);
+ return ralink_pinctrl_init(pdev, rt3050_pinmux_data);
else if (soc_is_rt3352())
- return rt2880_pinmux_init(pdev, rt3352_pinmux_data);
+ return ralink_pinctrl_init(pdev, rt3352_pinmux_data);
else
return -EINVAL;
}
-static const struct of_device_id rt305x_pinmux_match[] = {
- { .compatible = "ralink,rt2880-pinmux" },
+static const struct of_device_id rt305x_pinctrl_match[] = {
+ { .compatible = "ralink,rt305x-pinctrl" },
{}
};
-MODULE_DEVICE_TABLE(of, rt305x_pinmux_match);
+MODULE_DEVICE_TABLE(of, rt305x_pinctrl_match);
-static struct platform_driver rt305x_pinmux_driver = {
- .probe = rt305x_pinmux_probe,
+static struct platform_driver rt305x_pinctrl_driver = {
+ .probe = rt305x_pinctrl_probe,
.driver = {
- .name = "rt2880-pinmux",
- .of_match_table = rt305x_pinmux_match,
+ .name = "rt305x-pinctrl",
+ .of_match_table = rt305x_pinctrl_match,
},
};
-static int __init rt305x_pinmux_init(void)
+static int __init rt305x_pinctrl_init(void)
{
- return platform_driver_register(&rt305x_pinmux_driver);
+ return platform_driver_register(&rt305x_pinctrl_driver);
}
-core_initcall_sync(rt305x_pinmux_init);
+core_initcall_sync(rt305x_pinctrl_init);
diff --git a/drivers/pinctrl/ralink/pinctrl-rt3883.c b/drivers/pinctrl/ralink/pinctrl-rt3883.c
index 3e0e1b4caa64..44a66c3d2d2a 100644
--- a/drivers/pinctrl/ralink/pinctrl-rt3883.c
+++ b/drivers/pinctrl/ralink/pinctrl-rt3883.c
@@ -3,7 +3,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/of.h>
-#include "pinmux.h"
+#include "pinctrl-ralink.h"
#define RT3883_GPIO_MODE_UART0_SHIFT 2
#define RT3883_GPIO_MODE_UART0_MASK 0x7
@@ -39,9 +39,9 @@
#define RT3883_GPIO_MODE_LNA_G_GPIO 0x3
#define RT3883_GPIO_MODE_LNA_G _RT3883_GPIO_MODE_LNA_G(RT3883_GPIO_MODE_LNA_G_MASK)
-static struct rt2880_pmx_func i2c_func[] = { FUNC("i2c", 0, 1, 2) };
-static struct rt2880_pmx_func spi_func[] = { FUNC("spi", 0, 3, 4) };
-static struct rt2880_pmx_func uartf_func[] = {
+static struct ralink_pmx_func i2c_func[] = { FUNC("i2c", 0, 1, 2) };
+static struct ralink_pmx_func spi_func[] = { FUNC("spi", 0, 3, 4) };
+static struct ralink_pmx_func uartf_func[] = {
FUNC("uartf", RT3883_GPIO_MODE_UARTF, 7, 8),
FUNC("pcm uartf", RT3883_GPIO_MODE_PCM_UARTF, 7, 8),
FUNC("pcm i2s", RT3883_GPIO_MODE_PCM_I2S, 7, 8),
@@ -50,21 +50,21 @@ static struct rt2880_pmx_func uartf_func[] = {
FUNC("gpio uartf", RT3883_GPIO_MODE_GPIO_UARTF, 7, 4),
FUNC("gpio i2s", RT3883_GPIO_MODE_GPIO_I2S, 7, 4),
};
-static struct rt2880_pmx_func uartlite_func[] = { FUNC("uartlite", 0, 15, 2) };
-static struct rt2880_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) };
-static struct rt2880_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) };
-static struct rt2880_pmx_func lna_a_func[] = { FUNC("lna a", 0, 32, 3) };
-static struct rt2880_pmx_func lna_g_func[] = { FUNC("lna g", 0, 35, 3) };
-static struct rt2880_pmx_func pci_func[] = {
+static struct ralink_pmx_func uartlite_func[] = { FUNC("uartlite", 0, 15, 2) };
+static struct ralink_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) };
+static struct ralink_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) };
+static struct ralink_pmx_func lna_a_func[] = { FUNC("lna a", 0, 32, 3) };
+static struct ralink_pmx_func lna_g_func[] = { FUNC("lna g", 0, 35, 3) };
+static struct ralink_pmx_func pci_func[] = {
FUNC("pci-dev", 0, 40, 32),
FUNC("pci-host2", 1, 40, 32),
FUNC("pci-host1", 2, 40, 32),
FUNC("pci-fnc", 3, 40, 32)
};
-static struct rt2880_pmx_func ge1_func[] = { FUNC("ge1", 0, 72, 12) };
-static struct rt2880_pmx_func ge2_func[] = { FUNC("ge2", 0, 84, 12) };
+static struct ralink_pmx_func ge1_func[] = { FUNC("ge1", 0, 72, 12) };
+static struct ralink_pmx_func ge2_func[] = { FUNC("ge2", 0, 84, 12) };
-static struct rt2880_pmx_group rt3883_pinmux_data[] = {
+static struct ralink_pmx_group rt3883_pinmux_data[] = {
GRP("i2c", i2c_func, 1, RT3883_GPIO_MODE_I2C),
GRP("spi", spi_func, 1, RT3883_GPIO_MODE_SPI),
GRP("uartf", uartf_func, RT3883_GPIO_MODE_UART0_MASK,
@@ -81,27 +81,27 @@ static struct rt2880_pmx_group rt3883_pinmux_data[] = {
{ 0 }
};
-static int rt3883_pinmux_probe(struct platform_device *pdev)
+static int rt3883_pinctrl_probe(struct platform_device *pdev)
{
- return rt2880_pinmux_init(pdev, rt3883_pinmux_data);
+ return ralink_pinctrl_init(pdev, rt3883_pinmux_data);
}
-static const struct of_device_id rt3883_pinmux_match[] = {
- { .compatible = "ralink,rt2880-pinmux" },
+static const struct of_device_id rt3883_pinctrl_match[] = {
+ { .compatible = "ralink,rt3883-pinctrl" },
{}
};
-MODULE_DEVICE_TABLE(of, rt3883_pinmux_match);
+MODULE_DEVICE_TABLE(of, rt3883_pinctrl_match);
-static struct platform_driver rt3883_pinmux_driver = {
- .probe = rt3883_pinmux_probe,
+static struct platform_driver rt3883_pinctrl_driver = {
+ .probe = rt3883_pinctrl_probe,
.driver = {
- .name = "rt2880-pinmux",
- .of_match_table = rt3883_pinmux_match,
+ .name = "rt3883-pinctrl",
+ .of_match_table = rt3883_pinctrl_match,
},
};
-static int __init rt3883_pinmux_init(void)
+static int __init rt3883_pinctrl_init(void)
{
- return platform_driver_register(&rt3883_pinmux_driver);
+ return platform_driver_register(&rt3883_pinctrl_driver);
}
-core_initcall_sync(rt3883_pinmux_init);
+core_initcall_sync(rt3883_pinctrl_init);
diff --git a/drivers/pinctrl/renesas/Kconfig b/drivers/pinctrl/renesas/Kconfig
index 6b38720c56e3..961007ce7b3a 100644
--- a/drivers/pinctrl/renesas/Kconfig
+++ b/drivers/pinctrl/renesas/Kconfig
@@ -38,8 +38,7 @@ config PINCTRL_RENESAS
select PINCTRL_PFC_R8A77995 if ARCH_R8A77995
select PINCTRL_PFC_R8A779A0 if ARCH_R8A779A0
select PINCTRL_PFC_R8A779F0 if ARCH_R8A779F0
- select PINCTRL_RZG2L if ARCH_R9A07G044
- select PINCTRL_RZG2L if ARCH_R9A07G054
+ select PINCTRL_RZG2L if ARCH_RZG2L
select PINCTRL_PFC_SH7203 if CPU_SUBTYPE_SH7203
select PINCTRL_PFC_SH7264 if CPU_SUBTYPE_SH7264
select PINCTRL_PFC_SH7269 if CPU_SUBTYPE_SH7269
@@ -184,14 +183,14 @@ config PINCTRL_RZA2
This selects GPIO and pinctrl driver for Renesas RZ/A2 platforms.
config PINCTRL_RZG2L
- bool "pin control support for RZ/{G2L,V2L}" if COMPILE_TEST
+ bool "pin control support for RZ/{G2L,G2UL,V2L}" if COMPILE_TEST
depends on OF
select GPIOLIB
select GENERIC_PINCTRL_GROUPS
select GENERIC_PINMUX_FUNCTIONS
select GENERIC_PINCONF
help
- This selects GPIO and pinctrl driver for Renesas RZ/{G2L,V2L}
+ This selects GPIO and pinctrl driver for Renesas RZ/{G2L,G2UL,V2L}
platforms.
config PINCTRL_PFC_R8A77470
diff --git a/drivers/pinctrl/renesas/core.c b/drivers/pinctrl/renesas/core.c
index d0d4714731c1..8c14b2021bf0 100644
--- a/drivers/pinctrl/renesas/core.c
+++ b/drivers/pinctrl/renesas/core.c
@@ -13,10 +13,11 @@
#include <linux/bitops.h>
#include <linux/err.h>
#include <linux/errno.h>
+#include <linux/init.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
-#include <linux/init.h>
+#include <linux/math.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/pinctrl/machine.h>
@@ -71,12 +72,11 @@ static int sh_pfc_map_resources(struct sh_pfc *pfc,
/* Fill them. */
for (i = 0; i < num_windows; i++) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, i);
- windows->phys = res->start;
- windows->size = resource_size(res);
- windows->virt = devm_ioremap_resource(pfc->dev, res);
+ windows->virt = devm_platform_get_and_ioremap_resource(pdev, i, &res);
if (IS_ERR(windows->virt))
return -ENOMEM;
+ windows->phys = res->start;
+ windows->size = resource_size(res);
windows++;
}
for (i = 0; i < num_irqs; i++)
@@ -214,7 +214,7 @@ static void sh_pfc_config_reg_helper(struct sh_pfc *pfc,
*maskp = (1 << crp->var_field_width[in_pos]) - 1;
*posp = crp->reg_width;
for (k = 0; k <= in_pos; k++)
- *posp -= crp->var_field_width[k];
+ *posp -= abs(crp->var_field_width[k]);
}
}
@@ -262,14 +262,17 @@ static int sh_pfc_get_config_reg(struct sh_pfc *pfc, u16 enum_id,
if (!r_width)
break;
- for (bit_pos = 0; bit_pos < r_width; bit_pos += curr_width) {
+ for (bit_pos = 0; bit_pos < r_width; bit_pos += curr_width, m++) {
u32 ncomb;
u32 n;
- if (f_width)
+ if (f_width) {
curr_width = f_width;
- else
- curr_width = config_reg->var_field_width[m];
+ } else {
+ curr_width = abs(config_reg->var_field_width[m]);
+ if (config_reg->var_field_width[m] < 0)
+ continue;
+ }
ncomb = 1 << curr_width;
for (n = 0; n < ncomb; n++) {
@@ -281,7 +284,6 @@ static int sh_pfc_get_config_reg(struct sh_pfc *pfc, u16 enum_id,
}
}
pos += ncomb;
- m++;
}
k++;
}
@@ -875,7 +877,8 @@ static const struct sh_pfc_pin __init *sh_pfc_find_pin(
static void __init sh_pfc_check_cfg_reg(const char *drvname,
const struct pinmux_cfg_reg *cfg_reg)
{
- unsigned int i, n, rw, fw;
+ unsigned int i, n, rw, r;
+ int fw;
sh_pfc_check_reg(drvname, cfg_reg->reg,
GENMASK(cfg_reg->reg_width - 1, 0));
@@ -883,16 +886,29 @@ static void __init sh_pfc_check_cfg_reg(const char *drvname,
if (cfg_reg->field_width) {
fw = cfg_reg->field_width;
n = (cfg_reg->reg_width / fw) << fw;
+ for (i = 0, r = 0; i < n; i += 1 << fw) {
+ if (is0s(&cfg_reg->enum_ids[i], 1 << fw))
+ r++;
+ }
+
+ if ((r << fw) * sizeof(u16) > cfg_reg->reg_width / fw)
+ sh_pfc_warn("reg 0x%x can be described with variable-width reserved fields\n",
+ cfg_reg->reg);
+
/* Skip field checks (done at build time) */
goto check_enum_ids;
}
for (i = 0, n = 0, rw = 0; (fw = cfg_reg->var_field_width[i]); i++) {
- if (fw > 3 && is0s(&cfg_reg->enum_ids[n], 1 << fw))
- sh_pfc_warn("reg 0x%x: reserved field [%u:%u] can be split to reduce table size\n",
- cfg_reg->reg, rw, rw + fw - 1);
- n += 1 << fw;
- rw += fw;
+ if (fw < 0) {
+ rw += -fw;
+ } else {
+ if (is0s(&cfg_reg->enum_ids[n], 1 << fw))
+ sh_pfc_warn("reg 0x%x: field [%u:%u] can be described as reserved\n",
+ cfg_reg->reg, rw, rw + fw - 1);
+ n += 1 << fw;
+ rw += fw;
+ }
}
if (rw != cfg_reg->reg_width)
@@ -1007,7 +1023,18 @@ static void __init sh_pfc_compare_groups(const char *drvname,
static void __init sh_pfc_check_info(const struct sh_pfc_soc_info *info)
{
const struct pinmux_drive_reg *drive_regs = info->drive_regs;
+#define drive_nfields ARRAY_SIZE(drive_regs->fields)
+#define drive_ofs(i) drive_regs[(i) / drive_nfields]
+#define drive_reg(i) drive_ofs(i).reg
+#define drive_bit(i) ((i) % drive_nfields)
+#define drive_field(i) drive_ofs(i).fields[drive_bit(i)]
const struct pinmux_bias_reg *bias_regs = info->bias_regs;
+#define bias_npins ARRAY_SIZE(bias_regs->pins)
+#define bias_ofs(i) bias_regs[(i) / bias_npins]
+#define bias_puen(i) bias_ofs(i).puen
+#define bias_pud(i) bias_ofs(i).pud
+#define bias_bit(i) ((i) % bias_npins)
+#define bias_pin(i) bias_ofs(i).pins[bias_bit(i)]
const char *drvname = info->name;
unsigned int *refcnts;
unsigned int i, j, k;
@@ -1076,17 +1103,17 @@ static void __init sh_pfc_check_info(const struct sh_pfc_soc_info *info)
if (!drive_regs) {
sh_pfc_err_once(drive, "SH_PFC_PIN_CFG_DRIVE_STRENGTH flag set but drive_regs missing\n");
} else {
- for (j = 0; drive_regs[j / 8].reg; j++) {
- if (!drive_regs[j / 8].fields[j % 8].pin &&
- !drive_regs[j / 8].fields[j % 8].offset &&
- !drive_regs[j / 8].fields[j % 8].size)
+ for (j = 0; drive_reg(j); j++) {
+ if (!drive_field(j).pin &&
+ !drive_field(j).offset &&
+ !drive_field(j).size)
continue;
- if (drive_regs[j / 8].fields[j % 8].pin == pin->pin)
+ if (drive_field(j).pin == pin->pin)
break;
}
- if (!drive_regs[j / 8].reg)
+ if (!drive_reg(j))
sh_pfc_err("pin %s: SH_PFC_PIN_CFG_DRIVE_STRENGTH flag set but not in drive_regs\n",
pin->name);
}
@@ -1164,20 +1191,17 @@ static void __init sh_pfc_check_info(const struct sh_pfc_soc_info *info)
for (i = 0; drive_regs && drive_regs[i].reg; i++)
sh_pfc_check_drive_reg(info, &drive_regs[i]);
- for (i = 0; drive_regs && drive_regs[i / 8].reg; i++) {
- if (!drive_regs[i / 8].fields[i % 8].pin &&
- !drive_regs[i / 8].fields[i % 8].offset &&
- !drive_regs[i / 8].fields[i % 8].size)
+ for (i = 0; drive_regs && drive_reg(i); i++) {
+ if (!drive_field(i).pin && !drive_field(i).offset &&
+ !drive_field(i).size)
continue;
for (j = 0; j < i; j++) {
- if (drive_regs[i / 8].fields[i % 8].pin ==
- drive_regs[j / 8].fields[j % 8].pin &&
- drive_regs[j / 8].fields[j % 8].offset &&
- drive_regs[j / 8].fields[j % 8].size) {
- sh_pfc_err("drive_reg 0x%x:%u/0x%x:%u: pin conflict\n",
- drive_regs[i / 8].reg, i % 8,
- drive_regs[j / 8].reg, j % 8);
+ if (drive_field(i).pin == drive_field(j).pin &&
+ drive_field(j).offset && drive_field(j).size) {
+ sh_pfc_err("drive_reg 0x%x:%zu/0x%x:%zu: pin conflict\n",
+ drive_reg(i), drive_bit(i),
+ drive_reg(j), drive_bit(j));
}
}
}
@@ -1186,26 +1210,23 @@ static void __init sh_pfc_check_info(const struct sh_pfc_soc_info *info)
for (i = 0; bias_regs && (bias_regs[i].puen || bias_regs[i].pud); i++)
sh_pfc_check_bias_reg(info, &bias_regs[i]);
- for (i = 0; bias_regs &&
- (bias_regs[i / 32].puen || bias_regs[i / 32].pud); i++) {
- if (bias_regs[i / 32].pins[i % 32] == SH_PFC_PIN_NONE)
+ for (i = 0; bias_regs && (bias_puen(i) || bias_pud(i)); i++) {
+ if (bias_pin(i) == SH_PFC_PIN_NONE)
continue;
for (j = 0; j < i; j++) {
- if (bias_regs[i / 32].pins[i % 32] !=
- bias_regs[j / 32].pins[j % 32])
+ if (bias_pin(i) != bias_pin(j))
continue;
- if (bias_regs[i / 32].puen && bias_regs[j / 32].puen)
- sh_pfc_err("bias_reg 0x%x:%u/0x%x:%u: pin conflict\n",
- bias_regs[i / 32].puen, i % 32,
- bias_regs[j / 32].puen, j % 32);
- if (bias_regs[i / 32].pud && bias_regs[j / 32].pud)
- sh_pfc_err("bias_reg 0x%x:%u/0x%x:%u: pin conflict\n",
- bias_regs[i / 32].pud, i % 32,
- bias_regs[j / 32].pud, j % 32);
+ if (bias_puen(i) && bias_puen(j))
+ sh_pfc_err("bias_reg 0x%x:%zu/0x%x:%zu: pin conflict\n",
+ bias_puen(i), bias_bit(i),
+ bias_puen(j), bias_bit(j));
+ if (bias_pud(i) && bias_pud(j))
+ sh_pfc_err("bias_reg 0x%x:%zu/0x%x:%zu: pin conflict\n",
+ bias_pud(i), bias_bit(i),
+ bias_pud(j), bias_bit(j));
}
-
}
/* Check ioctrl registers */
diff --git a/drivers/pinctrl/renesas/gpio.c b/drivers/pinctrl/renesas/gpio.c
index ad06f5355d1e..ea3d38b4af8d 100644
--- a/drivers/pinctrl/renesas/gpio.c
+++ b/drivers/pinctrl/renesas/gpio.c
@@ -8,7 +8,6 @@
#include <linux/device.h>
#include <linux/gpio/driver.h>
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/pinctrl/consumer.h>
#include <linux/slab.h>
diff --git a/drivers/pinctrl/renesas/pfc-emev2.c b/drivers/pinctrl/renesas/pfc-emev2.c
index 2326d348447d..1d8b540110f2 100644
--- a/drivers/pinctrl/renesas/pfc-emev2.c
+++ b/drivers/pinctrl/renesas/pfc-emev2.c
@@ -4,7 +4,6 @@
*
* Copyright (C) 2015 Niklas Söderlund
*/
-#include <linux/init.h>
#include <linux/kernel.h>
#include "sh_pfc.h"
@@ -1570,61 +1569,39 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
))
},
{ PINMUX_CFG_REG_VAR("CHG_PINSEL_LCD3", 0xe0140284, 32,
- GROUP(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2,
- 2, 2),
+ GROUP(-20, 2, 2, -6, 2),
GROUP(
- /* 31 - 12 */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
+ /* 31 - 12 RESERVED */
/* 11 - 10 */
FN_SEL_LCD3_11_10_00, FN_SEL_LCD3_11_10_01,
FN_SEL_LCD3_11_10_10, 0,
/* 9 - 8 */
FN_SEL_LCD3_9_8_00, 0, FN_SEL_LCD3_9_8_10, 0,
- /* 7 - 2 */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* 7 - 2 RESERVED */
/* 1 - 0 */
FN_SEL_LCD3_1_0_00, FN_SEL_LCD3_1_0_01, 0, 0,
))
},
{ PINMUX_CFG_REG_VAR("CHG_PINSEL_UART", 0xe0140288, 32,
- GROUP(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 2),
+ GROUP(-30, 2),
GROUP(
- /* 31 - 2 */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* 31 - 2 RESERVED */
/* 1 - 0 */
FN_SEL_UART_1_0_00, FN_SEL_UART_1_0_01, 0, 0,
))
},
{ PINMUX_CFG_REG_VAR("CHG_PINSEL_IIC", 0xe014028c, 32,
- GROUP(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 2),
+ GROUP(-30, 2),
GROUP(
- /* 31 - 2 */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* 31 - 2 RESERVED */
/* 1 - 0 */
FN_SEL_IIC_1_0_00, FN_SEL_IIC_1_0_01, 0, 0,
))
},
{ PINMUX_CFG_REG_VAR("CHG_PINSEL_AB", 0xe0140294, 32,
- GROUP(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2),
+ GROUP(-18, 2, 2, 2, 2, 2, 2, 2),
GROUP(
- /* 31 - 14 */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0,
+ /* 31 - 14 RESERVED */
/* 13 - 12 */
FN_SEL_AB_13_12_00, 0, FN_SEL_AB_13_12_10, 0,
/* 11 - 10 */
@@ -1644,14 +1621,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
))
},
{ PINMUX_CFG_REG_VAR("CHG_PINSEL_USI", 0xe0140298, 32,
- GROUP(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2,
- 2, 2, 2),
+ GROUP(-22, 2, 2, 2, 2, 2),
GROUP(
- /* 31 - 10 */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* 31 - 10 RESERVED */
/* 9 - 8 */
FN_SEL_USI_9_8_00, FN_SEL_USI_9_8_01, 0, 0,
/* 7 - 6 */
@@ -1665,15 +1637,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
))
},
{ PINMUX_CFG_REG_VAR("CHG_PINSEL_HSI", 0xe01402a8, 32,
- GROUP(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 2),
+ GROUP(-30, 2),
GROUP(
- /* 31 - 2 */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* 31 - 2 RESERVED */
/* 1 - 0 */
FN_SEL_HSI_1_0_00, FN_SEL_HSI_1_0_01, 0, 0,
))
diff --git a/drivers/pinctrl/renesas/pfc-r8a73a4.c b/drivers/pinctrl/renesas/pfc-r8a73a4.c
index ba3a1857f80a..dbfc46fe2f27 100644
--- a/drivers/pinctrl/renesas/pfc-r8a73a4.c
+++ b/drivers/pinctrl/renesas/pfc-r8a73a4.c
@@ -2270,15 +2270,17 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
MSEL1CR_00_0, MSEL1CR_00_1,
))
},
- { PINMUX_CFG_REG("MSEL3CR", 0xe6058020, 32, 1, GROUP(
+ { PINMUX_CFG_REG_VAR("MSEL3CR", 0xe6058020, 32,
+ GROUP(1, -2, 1, 1, 1, -2, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, -2, 1, 1, 1, 1, -2, 1, -2, 1,
+ -1, 1, 1),
+ GROUP(
MSEL3CR_31_0, MSEL3CR_31_1,
- 0, 0,
- 0, 0,
+ /* RESERVED [2] */
MSEL3CR_28_0, MSEL3CR_28_1,
MSEL3CR_27_0, MSEL3CR_27_1,
MSEL3CR_26_0, MSEL3CR_26_1,
- 0, 0,
- 0, 0,
+ /* RESERVED [2] */
MSEL3CR_23_0, MSEL3CR_23_1,
MSEL3CR_22_0, MSEL3CR_22_1,
MSEL3CR_21_0, MSEL3CR_21_1,
@@ -2288,19 +2290,16 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
MSEL3CR_17_0, MSEL3CR_17_1,
MSEL3CR_16_0, MSEL3CR_16_1,
MSEL3CR_15_0, MSEL3CR_15_1,
- 0, 0,
- 0, 0,
+ /* RESERVED [2] */
MSEL3CR_12_0, MSEL3CR_12_1,
MSEL3CR_11_0, MSEL3CR_11_1,
MSEL3CR_10_0, MSEL3CR_10_1,
MSEL3CR_09_0, MSEL3CR_09_1,
- 0, 0,
- 0, 0,
+ /* RESERVED [2] */
MSEL3CR_06_0, MSEL3CR_06_1,
- 0, 0,
- 0, 0,
+ /* RESERVED [2] */
MSEL3CR_03_0, MSEL3CR_03_1,
- 0, 0,
+ /* RESERVED [1] */
MSEL3CR_01_0, MSEL3CR_01_1,
MSEL3CR_00_0, MSEL3CR_00_1,
))
@@ -2375,37 +2374,12 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0,
))
},
- { PINMUX_CFG_REG("MSEL8CR", 0xe6058034, 32, 1, GROUP(
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ { PINMUX_CFG_REG_VAR("MSEL8CR", 0xe6058034, 32,
+ GROUP(-15, 1, -14, 1, 1),
+ GROUP(
+ /* RESERVED [15] */
MSEL8CR_16_0, MSEL8CR_16_1,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ /* RESERVED [14] */
MSEL8CR_01_0, MSEL8CR_01_1,
MSEL8CR_00_0, MSEL8CR_00_1,
))
diff --git a/drivers/pinctrl/renesas/pfc-r8a7740.c b/drivers/pinctrl/renesas/pfc-r8a7740.c
index e8b9fb74a802..6dcd39918daf 100644
--- a/drivers/pinctrl/renesas/pfc-r8a7740.c
+++ b/drivers/pinctrl/renesas/pfc-r8a7740.c
@@ -3250,89 +3250,93 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PORTCR(210, 0xe60530d2), /* PORT210CR */
PORTCR(211, 0xe60530d3), /* PORT211CR */
- { PINMUX_CFG_REG("MSEL1CR", 0xe605800c, 32, 1, GROUP(
+ { PINMUX_CFG_REG_VAR("MSEL1CR", 0xe605800c, 32,
+ GROUP(1, 1, 1, 1, 1, 1, -9, 1, 1, 1, 1, 1,
+ -2, 1, -1, 1, 1, 1, 1, 1, 1, -1, 1),
+ GROUP(
MSEL1CR_31_0, MSEL1CR_31_1,
MSEL1CR_30_0, MSEL1CR_30_1,
MSEL1CR_29_0, MSEL1CR_29_1,
MSEL1CR_28_0, MSEL1CR_28_1,
MSEL1CR_27_0, MSEL1CR_27_1,
MSEL1CR_26_0, MSEL1CR_26_1,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
+ /* RESERVED [9] */
MSEL1CR_16_0, MSEL1CR_16_1,
MSEL1CR_15_0, MSEL1CR_15_1,
MSEL1CR_14_0, MSEL1CR_14_1,
MSEL1CR_13_0, MSEL1CR_13_1,
MSEL1CR_12_0, MSEL1CR_12_1,
- 0, 0, 0, 0,
+ /* RESERVED [2] */
MSEL1CR_9_0, MSEL1CR_9_1,
- 0, 0,
+ /* RESERVED [1] */
MSEL1CR_7_0, MSEL1CR_7_1,
MSEL1CR_6_0, MSEL1CR_6_1,
MSEL1CR_5_0, MSEL1CR_5_1,
MSEL1CR_4_0, MSEL1CR_4_1,
MSEL1CR_3_0, MSEL1CR_3_1,
MSEL1CR_2_0, MSEL1CR_2_1,
- 0, 0,
+ /* RESERVED [1] */
MSEL1CR_0_0, MSEL1CR_0_1,
))
},
- { PINMUX_CFG_REG("MSEL3CR", 0xE6058020, 32, 1, GROUP(
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
+ { PINMUX_CFG_REG_VAR("MSEL3CR", 0xE6058020, 32,
+ GROUP(-16, 1, -8, 1, -6),
+ GROUP(
+ /* RESERVED [16] */
MSEL3CR_15_0, MSEL3CR_15_1,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
+ /* RESERVED [8] */
MSEL3CR_6_0, MSEL3CR_6_1,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0,
+ /* RESERVED [6] */
))
},
- { PINMUX_CFG_REG("MSEL4CR", 0xE6058024, 32, 1, GROUP(
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
+ { PINMUX_CFG_REG_VAR("MSEL4CR", 0xE6058024, 32,
+ GROUP(-12, 1, 1, -2, 1, -4, 1, -3, 1, -1, 1, -2,
+ 1, -1),
+ GROUP(
+ /* RESERVED [12] */
MSEL4CR_19_0, MSEL4CR_19_1,
MSEL4CR_18_0, MSEL4CR_18_1,
- 0, 0, 0, 0,
+ /* RESERVED [2] */
MSEL4CR_15_0, MSEL4CR_15_1,
- 0, 0, 0, 0, 0, 0, 0, 0,
+ /* RESERVED [4] */
MSEL4CR_10_0, MSEL4CR_10_1,
- 0, 0, 0, 0, 0, 0,
+ /* RESERVED [3] */
MSEL4CR_6_0, MSEL4CR_6_1,
- 0, 0,
+ /* RESERVED [1] */
MSEL4CR_4_0, MSEL4CR_4_1,
- 0, 0, 0, 0,
+ /* RESERVED [2] */
MSEL4CR_1_0, MSEL4CR_1_1,
- 0, 0,
+ /* RESERVED [1] */
))
},
- { PINMUX_CFG_REG("MSEL5CR", 0xE6058028, 32, 1, GROUP(
+ { PINMUX_CFG_REG_VAR("MSEL5CR", 0xE6058028, 32,
+ GROUP(1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1,
+ -1, 1, -1, 1, -1, 1, 1, 1, 1, 1, 1,
+ -1, 1, 1, 1, 1, 1, 1, 1, -1, 1),
+ GROUP(
MSEL5CR_31_0, MSEL5CR_31_1,
MSEL5CR_30_0, MSEL5CR_30_1,
MSEL5CR_29_0, MSEL5CR_29_1,
- 0, 0,
+ /* RESERVED [1] */
MSEL5CR_27_0, MSEL5CR_27_1,
- 0, 0,
+ /* RESERVED [1] */
MSEL5CR_25_0, MSEL5CR_25_1,
- 0, 0,
+ /* RESERVED [1] */
MSEL5CR_23_0, MSEL5CR_23_1,
- 0, 0,
+ /* RESERVED [1] */
MSEL5CR_21_0, MSEL5CR_21_1,
- 0, 0,
+ /* RESERVED [1] */
MSEL5CR_19_0, MSEL5CR_19_1,
- 0, 0,
+ /* RESERVED [1] */
MSEL5CR_17_0, MSEL5CR_17_1,
- 0, 0,
+ /* RESERVED [1] */
MSEL5CR_15_0, MSEL5CR_15_1,
MSEL5CR_14_0, MSEL5CR_14_1,
MSEL5CR_13_0, MSEL5CR_13_1,
MSEL5CR_12_0, MSEL5CR_12_1,
MSEL5CR_11_0, MSEL5CR_11_1,
MSEL5CR_10_0, MSEL5CR_10_1,
- 0, 0,
+ /* RESERVED [1] */
MSEL5CR_8_0, MSEL5CR_8_1,
MSEL5CR_7_0, MSEL5CR_7_1,
MSEL5CR_6_0, MSEL5CR_6_1,
@@ -3340,7 +3344,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
MSEL5CR_4_0, MSEL5CR_4_1,
MSEL5CR_3_0, MSEL5CR_3_1,
MSEL5CR_2_0, MSEL5CR_2_1,
- 0, 0,
+ /* RESERVED [1] */
MSEL5CR_0_0, MSEL5CR_0_1,
))
},
diff --git a/drivers/pinctrl/renesas/pfc-r8a77470.c b/drivers/pinctrl/renesas/pfc-r8a77470.c
index ee6e8fabab24..b5725c3ed2b6 100644
--- a/drivers/pinctrl/renesas/pfc-r8a77470.c
+++ b/drivers/pinctrl/renesas/pfc-r8a77470.c
@@ -2485,16 +2485,11 @@ static const struct sh_pfc_function pinmux_functions[] = {
};
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
- { PINMUX_CFG_REG("GPSR0", 0xE6060004, 32, 1, GROUP(
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ { PINMUX_CFG_REG_VAR("GPSR0", 0xE6060004, 32,
+ GROUP(-9, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP0_31_23 RESERVED */
GP_0_22_FN, FN_MMC0_D7,
GP_0_21_FN, FN_MMC0_D6,
GP_0_20_FN, FN_IP1_7_4,
@@ -2519,16 +2514,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_0_1_FN, FN_USB0_OVC,
GP_0_0_FN, FN_USB0_PWEN, ))
},
- { PINMUX_CFG_REG("GPSR1", 0xE6060008, 32, 1, GROUP(
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ { PINMUX_CFG_REG_VAR("GPSR1", 0xE6060008, 32,
+ GROUP(-9, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP1_31_23 RESERVED */
GP_1_22_FN, FN_IP4_3_0,
GP_1_21_FN, FN_IP3_31_28,
GP_1_20_FN, FN_IP3_27_24,
@@ -2587,22 +2577,15 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_2_1_FN, FN_IP4_11_8,
GP_2_0_FN, FN_IP4_7_4, ))
},
- { PINMUX_CFG_REG("GPSR3", 0xE6060010, 32, 1, GROUP(
- 0, 0,
- 0, 0,
+ { PINMUX_CFG_REG_VAR("GPSR3", 0xE6060010, 32,
+ GROUP(-2, 1, 1, -10, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP3_31_30 RESERVED */
GP_3_29_FN, FN_IP10_19_16,
GP_3_28_FN, FN_IP10_15_12,
GP_3_27_FN, FN_IP10_11_8,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ /* GP3_26_17 RESERVED */
GP_3_16_FN, FN_IP10_7_4,
GP_3_15_FN, FN_IP10_3_0,
GP_3_14_FN, FN_IP9_31_28,
@@ -2689,9 +2672,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_5_1_FN, FN_IP14_3_0,
GP_5_0_FN, FN_IP13_31_28, ))
},
- { PINMUX_CFG_REG_VAR("IPSR0", 0xE6060040, 32,
- GROUP(4, 4, 4, 4, 4, 4, 4, 4),
- GROUP(
+ { PINMUX_CFG_REG("IPSR0", 0xE6060040, 32, 4, GROUP(
/* IP0_31_28 [4] */
FN_SD0_WP, FN_IRQ7, FN_CAN0_TX_A, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
@@ -2717,9 +2698,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_SD0_CLK, 0, 0, FN_SSI_SCK1_C, FN_RX3_C, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, ))
},
- { PINMUX_CFG_REG_VAR("IPSR1", 0xE6060044, 32,
- GROUP(4, 4, 4, 4, 4, 4, 4, 4),
- GROUP(
+ { PINMUX_CFG_REG("IPSR1", 0xE6060044, 32, 4, GROUP(
/* IP1_31_28 [4] */
FN_D5, FN_HRX2, FN_SCL1_B, FN_PWM2_C, FN_TCLK2_B, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
@@ -2745,9 +2724,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_MMC0_D4, FN_SD1_CD, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, ))
},
- { PINMUX_CFG_REG_VAR("IPSR2", 0xE6060048, 32,
- GROUP(4, 4, 4, 4, 4, 4, 4, 4),
- GROUP(
+ { PINMUX_CFG_REG("IPSR2", 0xE6060048, 32, 4, GROUP(
/* IP2_31_28 [4] */
FN_D13, FN_MSIOF2_SYNC_A, 0, FN_RX4_C, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0,
@@ -2773,9 +2750,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_D6, FN_HTX2, FN_SDA1_B, FN_PWM4_C, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, ))
},
- { PINMUX_CFG_REG_VAR("IPSR3", 0xE606004C, 32,
- GROUP(4, 4, 4, 4, 4, 4, 4, 4),
- GROUP(
+ { PINMUX_CFG_REG("IPSR3", 0xE606004C, 32, 4, GROUP(
/* IP3_31_28 [4] */
FN_QSPI0_SSL, FN_WE1_N, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
@@ -2802,9 +2777,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, FN_AVB_AVTP_CAPTURE_A,
0, 0, 0, 0, 0, 0, 0, 0, 0, ))
},
- { PINMUX_CFG_REG_VAR("IPSR4", 0xE6060050, 32,
- GROUP(4, 4, 4, 4, 4, 4, 4, 4),
- GROUP(
+ { PINMUX_CFG_REG("IPSR4", 0xE6060050, 32, 4, GROUP(
/* IP4_31_28 [4] */
FN_DU0_DR6, 0, FN_RX2_C, 0, 0, 0, FN_A6, 0,
0, 0, 0, 0, 0, 0, 0, 0,
@@ -2830,9 +2803,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_EX_WAIT0, FN_CAN_CLK_B, FN_SCIF_CLK_A, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, ))
},
- { PINMUX_CFG_REG_VAR("IPSR5", 0xE6060054, 32,
- GROUP(4, 4, 4, 4, 4, 4, 4, 4),
- GROUP(
+ { PINMUX_CFG_REG("IPSR5", 0xE6060054, 32, 4, GROUP(
/* IP5_31_28 [4] */
FN_DU0_DG6, 0, FN_HRX1_C, 0, 0, 0, FN_A14, 0, 0, 0,
0, 0, 0, 0, 0, 0,
@@ -2858,9 +2829,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_DU0_DR7, 0, FN_TX2_C, 0, FN_PWM2_B, 0, FN_A7, 0,
0, 0, 0, 0, 0, 0, 0, 0, ))
},
- { PINMUX_CFG_REG_VAR("IPSR6", 0xE6060058, 32,
- GROUP(4, 4, 4, 4, 4, 4, 4, 4),
- GROUP(
+ { PINMUX_CFG_REG("IPSR6", 0xE6060058, 32, 4, GROUP(
/* IP6_31_28 [4] */
FN_DU0_DB6, 0, 0, 0, 0, 0, FN_A22, 0, 0,
0, 0, 0, 0, 0, 0, 0,
@@ -2886,9 +2855,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_DU0_DG7, 0, FN_HTX1_C, 0, FN_PWM6_B, 0, FN_A15,
0, 0, 0, 0, 0, 0, 0, 0, 0, ))
},
- { PINMUX_CFG_REG_VAR("IPSR7", 0xE606005C, 32,
- GROUP(4, 4, 4, 4, 4, 4, 4, 4),
- GROUP(
+ { PINMUX_CFG_REG("IPSR7", 0xE606005C, 32, 4, GROUP(
/* IP7_31_28 [4] */
FN_DU0_DISP, 0, 0, 0, FN_CAN1_RX_C, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
@@ -2914,9 +2881,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_DU0_DB7, 0, 0, 0, 0, 0, FN_A23, 0, 0,
0, 0, 0, 0, 0, 0, 0, ))
},
- { PINMUX_CFG_REG_VAR("IPSR8", 0xE6060060, 32,
- GROUP(4, 4, 4, 4, 4, 4, 4, 4),
- GROUP(
+ { PINMUX_CFG_REG("IPSR8", 0xE6060060, 32, 4, GROUP(
/* IP8_31_28 [4] */
FN_VI1_DATA5, 0, 0, 0, FN_AVB_RXD4, FN_ETH_LINK, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
@@ -2942,9 +2907,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_DU0_CDE, 0, 0, 0, FN_CAN1_TX_C, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, ))
},
- { PINMUX_CFG_REG_VAR("IPSR9", 0xE6060064, 32,
- GROUP(4, 4, 4, 4, 4, 4, 4, 4),
- GROUP(
+ { PINMUX_CFG_REG("IPSR9", 0xE6060064, 32, 4, GROUP(
/* IP9_31_28 [4] */
FN_VI1_DATA9, 0, 0, FN_SDA2_B, FN_AVB_TXD0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
@@ -2970,9 +2933,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_VI1_DATA6, 0, 0, 0, FN_AVB_RXD5, FN_ETH_TXD1, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, ))
},
- { PINMUX_CFG_REG_VAR("IPSR10", 0xE6060068, 32,
- GROUP(4, 4, 4, 4, 4, 4, 4, 4),
- GROUP(
+ { PINMUX_CFG_REG("IPSR10", 0xE6060068, 32, 4, GROUP(
/* IP10_31_28 [4] */
FN_SCL1_A, FN_RX4_A, FN_PWM5_D, FN_DU1_DR0, 0, 0,
FN_SSI_SCK6_B, FN_VI0_G0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -2999,9 +2960,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_VI1_DATA10, 0, 0, FN_CAN0_RX_B, FN_AVB_TXD1, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, ))
},
- { PINMUX_CFG_REG_VAR("IPSR11", 0xE606006C, 32,
- GROUP(4, 4, 4, 4, 4, 4, 4, 4),
- GROUP(
+ { PINMUX_CFG_REG("IPSR11", 0xE606006C, 32, 4, GROUP(
/* IP11_31_28 [4] */
FN_HRX1_A, FN_SCL4_A, FN_PWM6_A, FN_DU1_DG0, FN_RX0_A, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -3031,9 +2990,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_SDA1_A, FN_TX4_A, 0, FN_DU1_DR1, 0, 0, FN_SSI_WS6_B,
FN_VI0_G1, 0, 0, 0, 0, 0, 0, 0, 0, ))
},
- { PINMUX_CFG_REG_VAR("IPSR12", 0xE6060070, 32,
- GROUP(4, 4, 4, 4, 4, 4, 4, 4),
- GROUP(
+ { PINMUX_CFG_REG("IPSR12", 0xE6060070, 32, 4, GROUP(
/* IP12_31_28 [4] */
FN_SD2_DAT2, FN_RX2_A, 0, FN_DU1_DB0, FN_SSI_SDATA2_B, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -3059,9 +3016,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_HTX1_A, FN_SDA4_A, 0, FN_DU1_DG1, FN_TX0_A, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, ))
},
- { PINMUX_CFG_REG_VAR("IPSR13", 0xE6060074, 32,
- GROUP(4, 4, 4, 4, 4, 4, 4, 4),
- GROUP(
+ { PINMUX_CFG_REG("IPSR13", 0xE6060074, 32, 4, GROUP(
/* IP13_31_28 [4] */
FN_SSI_SCK5_A, 0, 0, FN_DU1_DOTCLKOUT1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
@@ -3088,9 +3043,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_SD2_DAT3, FN_TX2_A, 0, FN_DU1_DB1, FN_SSI_WS9_B, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, ))
},
- { PINMUX_CFG_REG_VAR("IPSR14", 0xE6060078, 32,
- GROUP(4, 4, 4, 4, 4, 4, 4, 4),
- GROUP(
+ { PINMUX_CFG_REG("IPSR14", 0xE6060078, 32, 4, GROUP(
/* IP14_31_28 [4] */
FN_SSI_SDATA7_A, 0, 0, FN_IRQ8, FN_AUDIO_CLKA_D, FN_CAN_CLK_D,
FN_VI0_G5, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -3116,9 +3069,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_SSI_WS5_A, 0, FN_SCL3_C, FN_DU1_DOTCLKIN, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, ))
},
- { PINMUX_CFG_REG_VAR("IPSR15", 0xE606007C, 32,
- GROUP(4, 4, 4, 4, 4, 4, 4, 4),
- GROUP(
+ { PINMUX_CFG_REG("IPSR15", 0xE606007C, 32, 4, GROUP(
/* IP15_31_28 [4] */
FN_SSI_WS4_A, 0, FN_AVB_PHY_INT, 0, 0, 0, FN_VI0_R5, 0, 0, 0,
0, 0, 0, 0, 0, 0,
@@ -3144,9 +3095,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_SSI_SCK0129_A, FN_MSIOF1_RXD_A, FN_RX5_D, 0, 0, 0,
FN_VI0_G6, 0, 0, 0, 0, 0, 0, 0, 0, 0, ))
},
- { PINMUX_CFG_REG_VAR("IPSR16", 0xE6060080, 32,
- GROUP(4, 4, 4, 4, 4, 4, 4, 4),
- GROUP(
+ { PINMUX_CFG_REG("IPSR16", 0xE6060080, 32, 4, GROUP(
/* IP16_31_28 [4] */
FN_SSI_SDATA2_A, FN_HRTS1_N_B, 0, 0, 0, 0,
FN_VI0_DATA4_VI0_B4, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -3174,10 +3123,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, 0, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR17", 0xE6060084, 32,
- GROUP(4, 4, 4, 4, 4, 4, 4, 4),
+ GROUP(-4, 4, 4, 4, 4, 4, 4, 4),
GROUP(
- /* IP17_31_28 [4] */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP17_31_28 [4] RESERVED */
/* IP17_27_24 [4] */
FN_AUDIO_CLKOUT_A, FN_SDA4_B, 0, 0, 0, 0,
FN_VI0_VSYNC_N, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -3201,25 +3149,13 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_VI0_DATA5_VI0_B5, 0, 0, 0, 0, 0, 0, 0, 0, 0, ))
},
{ PINMUX_CFG_REG_VAR("MOD_SEL0", 0xE60600C0, 32,
- GROUP(1, 1, 1, 1, 1, 2, 1, 1, 2, 2, 2, 1,
- 3, 3, 1, 2, 3, 3, 1),
+ GROUP(-5, 2, -2, 2, 2, 2, -1,
+ 3, 3, -1, 2, 3, 3, 1),
GROUP(
- /* RESERVED [1] */
- 0, 0,
- /* RESERVED [1] */
- 0, 0,
- /* RESERVED [1] */
- 0, 0,
- /* RESERVED [1] */
- 0, 0,
- /* RESERVED [1] */
- 0, 0,
+ /* RESERVED [5] */
/* SEL_ADGA [2] */
FN_SEL_ADGA_0, FN_SEL_ADGA_1, FN_SEL_ADGA_2, FN_SEL_ADGA_3,
- /* RESERVED [1] */
- 0, 0,
- /* RESERVED [1] */
- 0, 0,
+ /* RESERVED [2] */
/* SEL_CANCLK [2] */
FN_SEL_CANCLK_0, FN_SEL_CANCLK_1, FN_SEL_CANCLK_2,
FN_SEL_CANCLK_3,
@@ -3228,7 +3164,6 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* SEL_CAN0 [2] */
FN_SEL_CAN0_0, FN_SEL_CAN0_1, FN_SEL_CAN0_2, FN_SEL_CAN0_3,
/* RESERVED [1] */
- 0, 0,
/* SEL_I2C04 [3] */
FN_SEL_I2C04_0, FN_SEL_I2C04_1, FN_SEL_I2C04_2, FN_SEL_I2C04_3,
FN_SEL_I2C04_4, 0, 0, 0,
@@ -3236,7 +3171,6 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_SEL_I2C03_0, FN_SEL_I2C03_1, FN_SEL_I2C03_2, FN_SEL_I2C03_3,
FN_SEL_I2C03_4, 0, 0, 0,
/* RESERVED [1] */
- 0, 0,
/* SEL_I2C02 [2] */
FN_SEL_I2C02_0, FN_SEL_I2C02_1, FN_SEL_I2C02_2, FN_SEL_I2C02_3,
/* SEL_I2C01 [3] */
@@ -3249,8 +3183,8 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_SEL_AVB_0, FN_SEL_AVB_1, ))
},
{ PINMUX_CFG_REG_VAR("MOD_SEL1", 0xE60600C4, 32,
- GROUP(1, 3, 3, 2, 2, 1, 2, 2, 2, 1, 1, 1,
- 1, 1, 2, 1, 1, 2, 2, 1),
+ GROUP(1, 3, 3, 2, 2, 1, 2, 2, 2, -1, 1, -1,
+ 1, 1, -2, 1, 1, -2, 2, 1),
GROUP(
/* SEL_SCIFCLK [1] */
FN_SEL_SCIFCLK_0, FN_SEL_SCIFCLK_1,
@@ -3273,52 +3207,28 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* SEL_MSIOF2 [2] */
FN_SEL_MSIOF2_0, FN_SEL_MSIOF2_1, FN_SEL_MSIOF2_2, 0,
/* RESERVED [1] */
- 0, 0,
/* SEL_MSIOF1 [1] */
FN_SEL_MSIOF1_0, FN_SEL_MSIOF1_1,
/* RESERVED [1] */
- 0, 0,
/* SEL_MSIOF0 [1] */
FN_SEL_MSIOF0_0, FN_SEL_MSIOF0_1,
/* SEL_RCN [1] */
FN_SEL_RCN_0, FN_SEL_RCN_1,
/* RESERVED [2] */
- 0, 0, 0, 0,
/* SEL_TMU2 [1] */
FN_SEL_TMU2_0, FN_SEL_TMU2_1,
/* SEL_TMU1 [1] */
FN_SEL_TMU1_0, FN_SEL_TMU1_1,
/* RESERVED [2] */
- 0, 0, 0, 0,
/* SEL_HSCIF1 [2] */
FN_SEL_HSCIF1_0, FN_SEL_HSCIF1_1, FN_SEL_HSCIF1_2, 0,
/* SEL_HSCIF0 [1] */
FN_SEL_HSCIF0_0, FN_SEL_HSCIF0_1, ))
},
{ PINMUX_CFG_REG_VAR("MOD_SEL2", 0xE60600C8, 32,
- GROUP(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 2, 2),
+ GROUP(-10, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2),
GROUP(
- /* RESERVED [1] */
- 0, 0,
- /* RESERVED [1] */
- 0, 0,
- /* RESERVED [1] */
- 0, 0,
- /* RESERVED [1] */
- 0, 0,
- /* RESERVED [1] */
- 0, 0,
- /* RESERVED [1] */
- 0, 0,
- /* RESERVED [1] */
- 0, 0,
- /* RESERVED [1] */
- 0, 0,
- /* RESERVED [1] */
- 0, 0,
- /* RESERVED [1] */
- 0, 0,
+ /* RESERVED [10] */
/* SEL_ADGB [2] */
FN_SEL_ADGB_0, FN_SEL_ADGB_1, FN_SEL_ADGB_2, 0,
/* SEL_ADGC [2] */
diff --git a/drivers/pinctrl/renesas/pfc-r8a7778.c b/drivers/pinctrl/renesas/pfc-r8a7778.c
index a24672ca3c01..35bdb9af8160 100644
--- a/drivers/pinctrl/renesas/pfc-r8a7778.c
+++ b/drivers/pinctrl/renesas/pfc-r8a7778.c
@@ -2240,11 +2240,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
},
{ PINMUX_CFG_REG_VAR("IPSR0", 0xfffc0020, 32,
- GROUP(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ GROUP(-1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 3, 4, 3, 3, 2),
GROUP(
- /* IP0_31 [1] */
- 0, 0,
+ /* IP0_31 [1] RESERVED */
/* IP0_30 [1] */
FN_A19, 0,
/* IP0_29 [1] */
@@ -2296,13 +2295,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
))
},
{ PINMUX_CFG_REG_VAR("IPSR1", 0xfffc0024, 32,
- GROUP(1, 1, 2, 3, 1, 3, 3, 1, 2, 4, 3, 3,
+ GROUP(-2, 2, 3, 1, 3, 3, 1, 2, 4, 3, 3,
3, 1, 1),
GROUP(
- /* IP1_31 [1] */
- 0, 0,
- /* IP1_30 [1] */
- 0, 0,
+ /* IP1_31_30 [2] RESERVED */
/* IP1_29_28 [2] */
FN_EX_CS1, FN_MMC_D4, 0, 0,
/* IP1_27_25 [3] */
@@ -2437,11 +2433,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
))
},
{ PINMUX_CFG_REG_VAR("IPSR4", 0xfffc0030, 32,
- GROUP(1, 2, 2, 2, 4, 4, 2, 2, 2, 2, 1, 1,
+ GROUP(-1, 2, 2, 2, 4, 4, 2, 2, 2, 2, 1, 1,
3, 3, 1),
GROUP(
- /* IP4_31 [1] */
- 0, 0,
+ /* IP4_31 [1] RESERVED */
/* IP4_30_29 [2] */
FN_VI0_R4_B, FN_DU0_DB4, FN_LCDOUT20, 0,
/* IP4_28_27 [2] */
@@ -2481,12 +2476,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
))
},
{ PINMUX_CFG_REG_VAR("IPSR5", 0xfffc0034, 32,
- GROUP(1, 2, 3, 3, 2, 3, 3, 2, 1, 2, 2, 1,
+ GROUP(-1, 2, 3, 3, 2, 3, 3, 2, 1, 2, 2, 1,
1, 2, 2, 2),
GROUP(
- /* IP5_31 [1] */
- 0, 0,
+ /* IP5_31 [1] RESERVED */
/* IP5_30_29 [2] */
FN_SSI_SDATA7, FN_HSPI_TX0_B, FN_RX2_A, FN_CAN0_RX_B,
/* IP5_28_26 [3] */
@@ -2619,12 +2613,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
))
},
{ PINMUX_CFG_REG_VAR("IPSR8", 0xfffc0040, 32,
- GROUP(1, 1, 3, 3, 2, 3, 3, 2, 3, 2, 3, 3, 3),
+ GROUP(-2, 3, 3, 2, 3, 3, 2, 3, 2, 3, 3, 3),
GROUP(
- /* IP8_31 [1] */
- 0, 0,
- /* IP8_30 [1] */
- 0, 0,
+ /* IP8_31_30 [2] RESERVED */
/* IP8_29_27 [3] */
FN_VI0_G3, FN_SD2_CMD_B, FN_VI1_DATA5, FN_DU1_DR5,
0, FN_HRX1_B, 0, 0,
@@ -2660,12 +2651,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
))
},
{ PINMUX_CFG_REG_VAR("IPSR9", 0xfffc0044, 32,
- GROUP(1, 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3),
+ GROUP(-2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3),
GROUP(
- /* IP9_31 [1] */
- 0, 0,
- /* IP9_30 [1] */
- 0, 0,
+ /* IP9_31_30 [2] RESERVED */
/* IP9_29_27 [3] */
FN_VI1_DATA11_A, FN_DU1_EXHSYNC_DU1_HSYNC,
FN_ETH_RXD1, FN_FMIN_C,
@@ -2703,24 +2691,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
))
},
{ PINMUX_CFG_REG_VAR("IPSR10", 0xfffc0048, 32,
- GROUP(1, 1, 1, 1, 1, 1, 1, 3, 3, 3, 3, 4,
- 3, 3, 3),
+ GROUP(-7, 3, 3, 3, 3, 4, 3, 3, 3),
GROUP(
- /* IP10_31 [1] */
- 0, 0,
- /* IP10_30 [1] */
- 0, 0,
- /* IP10_29 [1] */
- 0, 0,
- /* IP10_28 [1] */
- 0, 0,
- /* IP10_27 [1] */
- 0, 0,
- /* IP10_26 [1] */
- 0, 0,
- /* IP10_25 [1] */
- 0, 0,
+ /* IP10_31_25 [7] RESERVED */
/* IP10_24_22 [3] */
FN_SD2_WP_A, FN_VI1_DATA15, FN_EX_WAIT2_B, FN_DACK0_B,
FN_HSPI_TX2_B, FN_CAN_CLK_C, 0, 0,
@@ -2754,12 +2728,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
))
},
{ PINMUX_CFG_REG_VAR("MOD_SEL0", 0xfffc0050, 32,
- GROUP(1, 1, 2, 2, 3, 2, 2, 1, 1, 1, 1, 2,
- 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(-1, 1, 2, 2, 3, 2, 2, -1, 1, 1, 1, 2,
+ -1, 1, 1, 1, 2, 1, -1, 1, 1, 1, 1, 1),
GROUP(
- /* SEL 31 [1] */
- 0, 0,
+ /* SEL 31 [1] RESERVED */
/* SEL_30 (SCIF5) [1] */
FN_SEL_SCIF5_A, FN_SEL_SCIF5_B,
/* SEL_29_28 (SCIF4) [2] */
@@ -2779,8 +2752,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* SEL_20_19 (SCIF0) [2] */
FN_SEL_SCIF0_A, FN_SEL_SCIF0_B,
FN_SEL_SCIF0_C, FN_SEL_SCIF0_D,
- /* SEL_18 [1] */
- 0, 0,
+ /* SEL_18 [1] RESERVED */
/* SEL_17 (SSI2) [1] */
FN_SEL_SSI2_A, FN_SEL_SSI2_B,
/* SEL_16 (SSI1) [1] */
@@ -2790,8 +2762,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* SEL_14_13 (VI0) [2] */
FN_SEL_VI0_A, FN_SEL_VI0_B,
FN_SEL_VI0_C, FN_SEL_VI0_D,
- /* SEL_12 [1] */
- 0, 0,
+ /* SEL_12 [1] RESERVED */
/* SEL_11 (SD2) [1] */
FN_SEL_SD2_A, FN_SEL_SD2_B,
/* SEL_10 (SD1) [1] */
@@ -2803,8 +2774,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_SEL_IRQ2_C, 0,
/* SEL_6 (IRQ1) [1] */
FN_SEL_IRQ1_A, FN_SEL_IRQ1_B,
- /* SEL_5 [1] */
- 0, 0,
+ /* SEL_5 [1] RESERVED */
/* SEL_4 (DREQ2) [1] */
FN_SEL_DREQ2_A, FN_SEL_DREQ2_B,
/* SEL_3 (DREQ1) [1] */
@@ -2818,18 +2788,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
))
},
{ PINMUX_CFG_REG_VAR("MOD_SEL1", 0xfffc0054, 32,
- GROUP(1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 2, 2, 1),
+ GROUP(-4, 1, 1, 2, 1, 1, -7,
+ 2, 2, 2, 1, 1, 1, 1, 2, 2, 1),
GROUP(
- /* SEL_31 [1] */
- 0, 0,
- /* SEL_30 [1] */
- 0, 0,
- /* SEL_29 [1] */
- 0, 0,
- /* SEL_28 [1] */
- 0, 0,
+ /* SEL_31_28 [4] RESERVED */
/* SEL_27 (CAN1) [1] */
FN_SEL_CAN1_A, FN_SEL_CAN1_B,
/* SEL_26 (CAN0) [1] */
@@ -2841,20 +2804,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_SEL_HSCIF1_A, FN_SEL_HSCIF1_B,
/* SEL_22 (HSCIF0) [1] */
FN_SEL_HSCIF0_A, FN_SEL_HSCIF0_B,
- /* SEL_21 [1] */
- 0, 0,
- /* SEL_20 [1] */
- 0, 0,
- /* SEL_19 [1] */
- 0, 0,
- /* SEL_18 [1] */
- 0, 0,
- /* SEL_17 [1] */
- 0, 0,
- /* SEL_16 [1] */
- 0, 0,
- /* SEL_15 [1] */
- 0, 0,
+ /* SEL_21_15 [7] RESERVED */
/* SEL_14_13 (REMOCON) [2] */
FN_SEL_REMOCON_A, FN_SEL_REMOCON_B,
FN_SEL_REMOCON_C, 0,
diff --git a/drivers/pinctrl/renesas/pfc-r8a7779.c b/drivers/pinctrl/renesas/pfc-r8a7779.c
index 296b5fb0f349..fcc8ea48881f 100644
--- a/drivers/pinctrl/renesas/pfc-r8a7779.c
+++ b/drivers/pinctrl/renesas/pfc-r8a7779.c
@@ -3300,13 +3300,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_5_1_FN, FN_A2,
GP_5_0_FN, FN_A1 ))
},
- { PINMUX_CFG_REG("GPSR6", 0xfffc001c, 32, 1, GROUP(
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ { PINMUX_CFG_REG_VAR("GPSR6", 0xfffc001c, 32,
+ GROUP(-23, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP6_31_9 RESERVED */
GP_6_8_FN, FN_IP3_20,
GP_6_7_FN, FN_IP3_19,
GP_6_6_FN, FN_IP3_18,
@@ -3319,10 +3316,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
},
{ PINMUX_CFG_REG_VAR("IPSR0", 0xfffc0020, 32,
- GROUP(1, 3, 2, 1, 2, 4, 3, 2, 2, 2, 2, 2, 3, 3),
+ GROUP(-1, 3, 2, 1, 2, 4, 3, 2, 2, 2, 2, 2, 3, 3),
GROUP(
- /* IP0_31 [1] */
- 0, 0,
+ /* IP0_31 [1] RESERVED */
/* IP0_30_28 [3] */
FN_RD_WR, FN_FWE, FN_ATAG0, FN_VI1_R7,
FN_HRTS1, FN_RX4_C, 0, 0,
@@ -3358,10 +3354,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_SCIF_CLK, FN_TCLK0_C, 0, 0 ))
},
{ PINMUX_CFG_REG_VAR("IPSR1", 0xfffc0024, 32,
- GROUP(3, 4, 2, 2, 2, 4, 4, 4, 3, 2, 2),
+ GROUP(-3, 4, 2, 2, 2, 4, 4, 4, 3, 2, 2),
GROUP(
- /* IP1_31_29 [3] */
- 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP1_31_29 [3] RESERVED */
/* IP1_28_25 [4] */
FN_HTX0, FN_TX1, FN_SDATA, FN_CTS0_C,
FN_SUB_TCK, FN_CC5_STATE2, FN_CC5_STATE10, FN_CC5_STATE18,
@@ -3397,10 +3392,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_EX_CS0, FN_RX3_C_IRDA_RX_C, FN_MMC0_D6, FN_FD6 ))
},
{ PINMUX_CFG_REG_VAR("IPSR2", 0xfffc0028, 32,
- GROUP(1, 3, 1, 1, 1, 1, 1, 1, 3, 3, 4, 4, 4, 4),
+ GROUP(-1, 3, 1, 1, 1, 1, 1, 1, 3, 3, 4, 4, 4, 4),
GROUP(
- /* IP2_31 [1] */
- 0, 0,
+ /* IP2_31 [1] RESERVED */
/* IP2_30_28 [3] */
FN_DU0_DG0, FN_LCDOUT8, FN_DREQ1, FN_SCL2,
FN_AUDATA2, 0, 0, 0,
@@ -3545,11 +3539,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_DU0_DISP, FN_QPOLA, FN_CAN_CLK_C, FN_SCK2_C ))
},
{ PINMUX_CFG_REG_VAR("IPSR5", 0xfffc0034, 32,
- GROUP(1, 2, 1, 4, 3, 4, 2, 2, 2, 2, 1, 1,
+ GROUP(-1, 2, 1, 4, 3, 4, 2, 2, 2, 2, 1, 1,
1, 1, 1, 1, 3),
GROUP(
- /* IP5_31 [1] */
- 0, 0,
+ /* IP5_31 [1] RESERVED */
/* IP5_30_29 [2] */
FN_AUDIO_CLKB, FN_USB_OVC2, FN_CAN_DEBUGOUT0, FN_MOUT0,
/* IP5_28 [1] */
@@ -3592,15 +3585,13 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_RX5, FN_RTS0_D_TANS_D, 0, 0 ))
},
{ PINMUX_CFG_REG_VAR("IPSR6", 0xfffc0038, 32,
- GROUP(1, 2, 2, 2, 2, 3, 2, 3, 3, 3, 1, 2,
+ GROUP(-1, 2, -2, 2, 2, 3, 2, 3, 3, 3, 1, 2,
2, 2, 2),
GROUP(
- /* IP6_31 [1] */
- 0, 0,
+ /* IP6_31 [1] RESERVED */
/* IP6_30_29 [2] */
FN_SSI_SCK6, FN_ADICHS0, FN_CAN0_TX, FN_IERX_B,
- /* IP_28_27 [2] */
- 0, 0, 0, 0,
+ /* IP_28_27 [2] RESERVED */
/* IP6_26_25 [2] */
FN_SSI_SDATA5, FN_ADIDATA, FN_CAN_DEBUGOUT12, FN_RX3_IRDA_RX,
/* IP6_24_23 [2] */
@@ -3631,11 +3622,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_SSI_SCK0129, FN_CAN_DEBUGOUT1, FN_MOUT1, 0 ))
},
{ PINMUX_CFG_REG_VAR("IPSR7", 0xfffc003c, 32,
- GROUP(1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3,
+ GROUP(-1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3,
3, 2, 2),
GROUP(
- /* IP7_31 [1] */
- 0, 0,
+ /* IP7_31 [1] RESERVED */
/* IP7_30_29 [2] */
FN_SD0_WP, FN_DACK2, FN_CTS1_B, 0,
/* IP7_28_27 [2] */
@@ -3669,10 +3659,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_SSI_WS6, FN_ADICHS1, FN_CAN0_RX, FN_IETX_B ))
},
{ PINMUX_CFG_REG_VAR("IPSR8", 0xfffc0040, 32,
- GROUP(1, 3, 3, 2, 2, 1, 1, 1, 2, 4, 4, 4, 4),
+ GROUP(-1, 3, 3, 2, 2, 1, 1, 1, 2, 4, 4, 4, 4),
GROUP(
- /* IP8_31 [1] */
- 0, 0,
+ /* IP8_31 [1] RESERVED */
/* IP8_30_28 [3] */
FN_VI0_VSYNC, FN_VI0_DATA1_B_VI0_B1_B, FN_RTS1_C_TANS_C, FN_RX4_D,
FN_PWMFSW0_C, 0, 0, 0,
@@ -3713,11 +3702,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0 ))
},
{ PINMUX_CFG_REG_VAR("IPSR9", 0xfffc0044, 32,
- GROUP(2, 2, 2, 2, 2, 3, 3, 2, 2, 2, 2, 1,
+ GROUP(-2, 2, 2, 2, 2, 3, 3, 2, 2, 2, 2, 1,
1, 1, 1, 2, 2),
GROUP(
- /* IP9_31_30 [2] */
- 0, 0, 0, 0,
+ /* IP9_31_30 [2] RESERVED */
/* IP9_29_28 [2] */
FN_VI0_G7, FN_ETH_RXD1, FN_SD2_DAT3_B, FN_ARM_TRACEDATA_9,
/* IP9_27_26 [2] */
@@ -3790,10 +3778,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_ARM_TRACEDATA_10, FN_DREQ0_C, 0, 0 ))
},
{ PINMUX_CFG_REG_VAR("IPSR11", 0xfffc004c, 32,
- GROUP(2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3),
+ GROUP(-2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3),
GROUP(
- /* IP11_31_30 [2] */
- 0, 0, 0, 0,
+ /* IP11_31_30 [2] RESERVED */
/* IP11_29_27 [3] */
FN_VI1_G1, FN_VI3_DATA1, FN_SSI_SCK1, FN_TS_SDEN1,
FN_DACK2_B, FN_RX2, FN_HRTS0_B, 0,
@@ -3826,19 +3813,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_ADICLK_B, 0, 0, 0 ))
},
{ PINMUX_CFG_REG_VAR("IPSR12", 0xfffc0050, 32,
- GROUP(4, 4, 4, 2, 3, 3, 3, 3, 3, 3),
+ GROUP(-14, 3, 3, 3, 3, 3, 3),
GROUP(
- /* IP12_31_28 [4] */
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- /* IP12_27_24 [4] */
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- /* IP12_23_20 [4] */
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- /* IP12_19_18 [2] */
- 0, 0, 0, 0,
+ /* IP12_31_18 [14] RESERVED */
/* IP12_17_15 [3] */
FN_VI1_G7, FN_VI3_DATA7, FN_GPS_MAG, FN_FCE,
FN_SCK4_B, 0, 0, 0,
@@ -3904,7 +3881,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_SEL_EXBUS0_0, FN_SEL_EXBUS0_1, FN_SEL_EXBUS0_2, 0 ))
},
{ PINMUX_CFG_REG_VAR("MOD_SEL2", 0xfffc0094, 32,
- GROUP(2, 2, 2, 2, 1, 1, 1, 3, 1, 2, 2, 2,
+ GROUP(2, 2, 2, 2, 1, 1, 1, 3, 1, -6,
2, 1, 1, 2, 1, 2, 2),
GROUP(
/* SEL_TMU1 [2] */
@@ -3926,12 +3903,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_SEL_PWMFSW_3, FN_SEL_PWMFSW_4, 0, 0, 0,
/* SEL_ADI [1] */
FN_SEL_ADI_0, FN_SEL_ADI_1,
- /* [2] */
- 0, 0, 0, 0,
- /* [2] */
- 0, 0, 0, 0,
- /* [2] */
- 0, 0, 0, 0,
+ /* [6] RESERVED */
/* SEL_GPS [2] */
FN_SEL_GPS_0, FN_SEL_GPS_1, FN_SEL_GPS_2, FN_SEL_GPS_3,
/* SEL_SIM [1] */
diff --git a/drivers/pinctrl/renesas/pfc-r8a7790.c b/drivers/pinctrl/renesas/pfc-r8a7790.c
index 9db9e61d96bc..ee21d650991b 100644
--- a/drivers/pinctrl/renesas/pfc-r8a7790.c
+++ b/drivers/pinctrl/renesas/pfc-r8a7790.c
@@ -5122,10 +5122,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_5_0_FN, FN_IP14_21_19 ))
},
{ PINMUX_CFG_REG_VAR("IPSR0", 0xE6060020, 32,
- GROUP(1, 4, 4, 3, 4, 4, 3, 3, 3, 3),
+ GROUP(-1, 4, 4, 3, 4, 4, 3, 3, 3, 3),
GROUP(
- /* IP0_31 [1] */
- 0, 0,
+ /* IP0_31 [1] RESERVED */
/* IP0_30_27 [4] */
FN_D8, FN_SCIFA1_SCK_C, FN_AVB_TXD0, 0,
FN_VI0_G0, FN_VI0_G0_B, FN_VI2_DATA0_VI2_B0,
@@ -5159,10 +5158,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR1", 0xE6060024, 32,
- GROUP(2, 2, 2, 4, 4, 3, 3, 4, 4, 4),
+ GROUP(-2, 2, 2, 4, 4, 3, 3, 4, 4, 4),
GROUP(
- /* IP1_31_30 [2] */
- 0, 0, 0, 0,
+ /* IP1_31_30 [2] RESERVED */
/* IP1_29_28 [2] */
FN_A1, FN_PWM4, 0, 0,
/* IP1_27_26 [2] */
@@ -5197,10 +5195,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR2", 0xE6060028, 32,
- GROUP(3, 3, 4, 4, 3, 3, 3, 3, 3, 3),
+ GROUP(-3, 3, 4, 4, 3, 3, 3, 3, 3, 3),
GROUP(
- /* IP2_31_29 [3] */
- 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP2_31_29 [3] RESERVED */
/* IP2_28_26 [3] */
FN_A10, FN_SSI_SDATA5_B, FN_MSIOF2_SYNC, FN_VI0_R6,
FN_VI0_R6_B, FN_VI2_DATA2_VI2_B2_B, 0, 0,
@@ -5261,10 +5258,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, 0, 0, 0, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR4", 0xE6060030, 32,
- GROUP(2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3),
+ GROUP(-2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3),
GROUP(
- /* IP4_31_30 [2] */
- 0, 0, 0, 0,
+ /* IP4_31_30 [2] RESERVED */
/* IP4_29_27 [3] */
FN_EX_CS2_N, FN_GPS_SIGN, FN_HRTS1_N_B,
FN_VI3_CLKENB, FN_VI1_G0, FN_VI1_G0_B, FN_VI2_R2, 0,
@@ -5295,10 +5291,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
))
},
{ PINMUX_CFG_REG_VAR("IPSR5", 0xE6060034, 32,
- GROUP(2, 3, 3, 3, 3, 3, 2, 3, 4, 3, 3),
+ GROUP(-2, 3, 3, 3, 3, 3, 2, 3, 4, 3, 3),
GROUP(
- /* IP5_31_30 [2] */
- 0, 0, 0, 0,
+ /* IP5_31_30 [2] RESERVED */
/* IP5_29_27 [3] */
FN_DREQ0_N, FN_VI1_HSYNC_N, FN_VI1_HSYNC_N_B, FN_VI2_R7,
FN_SSI_SCK78_C, FN_SSI_WS78_B, 0, 0,
@@ -5368,10 +5363,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_VI1_VSYNC_N_B, FN_SSI_WS78_C, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR7", 0xE606003C, 32,
- GROUP(1, 2, 2, 2, 3, 3, 3, 3, 3, 2, 2, 3, 3),
+ GROUP(-1, 2, 2, 2, 3, 3, 3, 3, 3, 2, 2, 3, 3),
GROUP(
- /* IP7_31 [1] */
- 0, 0,
+ /* IP7_31 [1] RESERVED */
/* IP7_30_29 [2] */
FN_VI0_DATA0_VI0_B0, FN_ATACS10_N, FN_AVB_RXD2, 0,
/* IP7_28_27 [2] */
@@ -5404,11 +5398,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_SIM0_D_C, FN_HCTS0_N_F, 0, 0, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR8", 0xE6060040, 32,
- GROUP(1, 2, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2,
+ GROUP(-1, 2, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2),
GROUP(
- /* IP8_31 [1] */
- 0, 0,
+ /* IP8_31 [1] RESERVED */
/* IP8_30_29 [2] */
FN_SD0_CMD, FN_SCIFB1_SCK_B, FN_VI1_DATA1_VI1_B1_B, 0,
/* IP8_28 [1] */
@@ -5482,10 +5475,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_SD0_DAT0, FN_SCIFB1_RXD_B, FN_VI1_DATA2_VI1_B2_B, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR10", 0xE6060048, 32,
- GROUP(2, 4, 3, 4, 4, 4, 4, 3, 4),
+ GROUP(-2, 4, 3, 4, 4, 4, 4, 3, 4),
GROUP(
- /* IP10_31_30 [2] */
- 0, 0, 0, 0,
+ /* IP10_31_30 [2] RESERVED */
/* IP10_29_26 [4] */
FN_SD2_CD, FN_MMC0_D4, FN_TS_SDAT0_B, FN_USB2_EXTP, FN_GLO_I0,
FN_VI0_DATA6_VI0_B6_B, FN_HCTS0_N_D, FN_TS_SDAT1_B,
@@ -5558,10 +5550,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_TS_SCK1_B, FN_GLO_I1_B, FN_VI3_DATA7_B, 0, 0, 0, 0, 0, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR12", 0xE6060050, 32,
- GROUP(1, 3, 3, 2, 3, 3, 3, 3, 3, 2, 2, 2, 2),
+ GROUP(-1, 3, 3, 2, 3, 3, 3, 3, 3, 2, 2, 2, 2),
GROUP(
- /* IP12_31 [1] */
- 0, 0,
+ /* IP12_31 [1] RESERVED */
/* IP12_30_28 [3] */
FN_SSI_WS5, FN_SCIFB1_RXD, FN_IECLK_B,
FN_DU2_EXVSYNC_DU2_VSYNC, FN_QSTB_QHE,
@@ -5598,10 +5589,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_SSI_WS0129, FN_CAN0_TX_B, FN_MOUT1, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR13", 0xE6060054, 32,
- GROUP(1, 2, 3, 3, 4, 3, 3, 3, 3, 4, 3),
+ GROUP(-1, 2, 3, 3, 4, 3, 3, 3, 3, 4, 3),
GROUP(
- /* IP13_31 [1] */
- 0, 0,
+ /* IP13_31 [1] RESERVED */
/* IP13_30_29 [2] */
FN_AUDIO_CLKA, FN_SCIFB2_RTS_N, FN_CAN_DEBUGOUT14, 0,
/* IP13_28_26 [3] */
@@ -5635,10 +5625,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_LCDOUT2, FN_CAN_DEBUGOUT5, 0, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR14", 0xE6060058, 32,
- GROUP(1, 3, 3, 3, 3, 3, 4, 3, 3, 3, 3),
+ GROUP(-1, 3, 3, 3, 3, 3, 4, 3, 3, 3, 3),
GROUP(
- /* IP14_30 [1] */
- 0, 0,
+ /* IP14_30 [1] RESERVED */
/* IP14_30_28 [3] */
FN_SCIFA1_RTS_N, FN_AD_NCS_N, FN_RTS1_N,
FN_MSIOF3_TXD, FN_DU1_DOTCLKOUT, FN_QSTVB_QVE,
@@ -5674,10 +5663,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_REMOCON, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR15", 0xE606005C, 32,
- GROUP(2, 2, 2, 3, 3, 2, 2, 2, 2, 3, 3, 3, 3),
+ GROUP(-2, 2, 2, 3, 3, 2, 2, 2, 2, 3, 3, 3, 3),
GROUP(
- /* IP15_31_30 [2] */
- 0, 0, 0, 0,
+ /* IP15_31_30 [2] RESERVED */
/* IP15_29_28 [2] */
FN_MSIOF0_TXD, FN_ADICHS1, FN_DU2_DG6, FN_LCDOUT14,
/* IP15_27_26 [2] */
@@ -5710,26 +5698,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_LCDOUT15, FN_SCIF_CLK_B, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR16", 0xE6060160, 32,
- GROUP(4, 4, 4, 4, 4, 4, 1, 1, 3, 3),
+ GROUP(-24, 1, 1, 3, 3),
GROUP(
- /* IP16_31_28 [4] */
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- /* IP16_27_24 [4] */
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- /* IP16_23_20 [4] */
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- /* IP16_19_16 [4] */
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- /* IP16_15_12 [4] */
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- /* IP16_11_8 [4] */
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP16_31_8 [24] RESERVED */
/* IP16_7 [1] */
FN_USB1_OVC, FN_TCLK1_B,
/* IP16_6 [1] */
@@ -5743,7 +5714,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
},
{ PINMUX_CFG_REG_VAR("MOD_SEL", 0xE6060090, 32,
GROUP(3, 2, 2, 3, 2, 1, 1, 1, 2, 1, 2, 1,
- 1, 1, 1, 2, 1, 1, 2, 1, 1),
+ 1, 1, 1, 2, -1, 1, 2, 1, 1),
GROUP(
/* SEL_SCIF1 [3] */
FN_SEL_SCIF1_0, FN_SEL_SCIF1_1, FN_SEL_SCIF1_2, FN_SEL_SCIF1_3,
@@ -5782,7 +5753,6 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* SEL_TSIF1 [2] */
FN_SEL_TSIF1_0, FN_SEL_TSIF1_1, FN_SEL_TSIF1_2, 0,
/* RESERVED [1] */
- 0, 0,
/* SEL_LBS [1] */
FN_SEL_LBS_0, FN_SEL_LBS_1,
/* SEL_TSIF0 [2] */
@@ -5793,11 +5763,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_SEL_SOF0_0, FN_SEL_SOF0_1, ))
},
{ PINMUX_CFG_REG_VAR("MOD_SEL2", 0xE6060094, 32,
- GROUP(3, 1, 1, 1, 2, 1, 2, 1, 2, 1, 1, 1,
- 3, 3, 2, 3, 2, 2),
+ GROUP(-3, 1, 1, 1, 2, 1, 2, 1, -2, 1, 1, 1,
+ 3, 3, 2, -3, 2, 2),
GROUP(
/* RESERVED [3] */
- 0, 0, 0, 0, 0, 0, 0, 0,
/* SEL_TMU1 [1] */
FN_SEL_TMU1_0, FN_SEL_TMU1_1,
/* SEL_HSCIF1 [1] */
@@ -5813,7 +5782,6 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* SEL_CAN1 [1] */
FN_SEL_CAN1_0, FN_SEL_CAN1_1,
/* RESERVED [2] */
- 0, 0, 0, 0,
/* SEL_SCIF2 [1] */
FN_SEL_SCIF2_0, FN_SEL_SCIF2_1,
/* SEL_ADI [1] */
@@ -5829,36 +5797,22 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* SEL_GPS [2] */
FN_SEL_GPS_0, FN_SEL_GPS_1, FN_SEL_GPS_2, 0,
/* RESERVED [3] */
- 0, 0, 0, 0, 0, 0, 0, 0,
/* SEL_SIM [2] */
FN_SEL_SIM_0, FN_SEL_SIM_1, FN_SEL_SIM_2, 0,
/* SEL_SSI8 [2] */
FN_SEL_SSI8_0, FN_SEL_SSI8_1, FN_SEL_SSI8_2, 0, ))
},
{ PINMUX_CFG_REG_VAR("MOD_SEL3", 0xE6060098, 32,
- GROUP(1, 1, 2, 4, 4, 2, 2, 4, 2, 3, 2, 3, 2),
+ GROUP(1, 1, -12, 2, -6, 3, 2, 3, 2),
GROUP(
/* SEL_IICDVFS [1] */
FN_SEL_IICDVFS_0, FN_SEL_IICDVFS_1,
/* SEL_IIC0 [1] */
FN_SEL_IIC0_0, FN_SEL_IIC0_1,
- /* RESERVED [2] */
- 0, 0, 0, 0,
- /* RESERVED [4] */
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- /* RESERVED [4] */
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- /* RESERVED [2] */
- 0, 0, 0, 0,
+ /* RESERVED [12] */
/* SEL_IEB [2] */
FN_SEL_IEB_0, FN_SEL_IEB_1, FN_SEL_IEB_2, 0,
- /* RESERVED [4] */
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- /* RESERVED [2] */
- 0, 0, 0, 0,
+ /* RESERVED [6] */
/* SEL_IIC2 [3] */
FN_SEL_IIC2_0, FN_SEL_IIC2_1, FN_SEL_IIC2_2, FN_SEL_IIC2_3,
FN_SEL_IIC2_4, 0, 0, 0,
diff --git a/drivers/pinctrl/renesas/pfc-r8a7791.c b/drivers/pinctrl/renesas/pfc-r8a7791.c
index 076a8b7d71de..d57458504117 100644
--- a/drivers/pinctrl/renesas/pfc-r8a7791.c
+++ b/drivers/pinctrl/renesas/pfc-r8a7791.c
@@ -5686,11 +5686,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_7_0_FN, FN_IP15_17_15 ))
},
{ PINMUX_CFG_REG_VAR("IPSR0", 0xE6060020, 32,
- GROUP(1, 2, 2, 2, 2, 2, 2, 3, 1, 1, 1, 1,
+ GROUP(-1, 2, 2, 2, 2, 2, 2, 3, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
GROUP(
- /* IP0_31 [1] */
- 0, 0,
+ /* IP0_31 [1] RESERVED */
/* IP0_30_29 [2] */
FN_A6, FN_MSIOF1_SCK,
0, 0,
@@ -5783,10 +5782,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR2", 0xE6060028, 32,
- GROUP(2, 3, 2, 2, 2, 2, 3, 3, 3, 3, 2, 2, 3),
+ GROUP(-2, 3, 2, 2, 2, 2, 3, 3, 3, 3, 2, 2, 3),
GROUP(
- /* IP2_31_30 [2] */
- 0, 0, 0, 0,
+ /* IP2_31_30 [2] RESERVED */
/* IP2_29_27 [3] */
FN_EX_CS3_N, FN_ATADIR0_N, FN_MSIOF2_TXD,
FN_ATAG0_N, 0, FN_EX_WAIT1,
@@ -5820,10 +5818,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_SCIFB1_TXD_C, 0, FN_SCIFB1_SCK_B, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR3", 0xE606002C, 32,
- GROUP(1, 3, 3, 3, 2, 2, 2, 2, 2, 3, 3, 3, 3),
+ GROUP(-1, 3, 3, 3, 2, 2, 2, 2, 2, 3, 3, 3, 3),
GROUP(
- /* IP3_31 [1] */
- 0, 0,
+ /* IP3_31 [1] RESERVED */
/* IP3_30_28 [3] */
FN_SSI_WS0129, FN_HTX0_C, FN_HTX2_C,
FN_SCIFB0_TXD_C, FN_SCIFB2_TXD_C,
@@ -5859,11 +5856,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR4", 0xE6060030, 32,
- GROUP(1, 3, 2, 2, 2, 1, 1, 1, 3, 3, 3, 2,
+ GROUP(-1, 3, 2, 2, 2, 1, 1, 1, 3, 3, 3, 2,
3, 3, 2),
GROUP(
- /* IP4_31 [1] */
- 0, 0,
+ /* IP4_31 [1] RESERVED */
/* IP4_30_28 [3] */
FN_SSI_SCK5, FN_MSIOF1_SCK_C, FN_TS_SDATA0, FN_GLO_I0,
FN_MSIOF2_SYNC_D, FN_VI1_R2_B,
@@ -5943,10 +5939,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR6", 0xE6060038, 32,
- GROUP(2, 3, 3, 3, 2, 3, 2, 2, 2, 2, 2, 3, 3),
+ GROUP(-2, 3, 3, 3, 2, 3, 2, 2, 2, 2, 2, 3, 3),
GROUP(
- /* IP6_31_30 [2] */
- 0, 0, 0, 0,
+ /* IP6_31_30 [2] RESERVED */
/* IP6_29_27 [3] */
FN_IRQ8, FN_HRTS1_N_C, FN_MSIOF1_RXD_B,
FN_GPS_SIGN_C, FN_GPS_SIGN_D,
@@ -5984,10 +5979,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR7", 0xE606003C, 32,
- GROUP(2, 3, 3, 3, 2, 2, 2, 2, 2, 2, 3, 3, 3),
+ GROUP(-2, 3, 3, 3, 2, 2, 2, 2, 2, 2, 3, 3, 3),
GROUP(
- /* IP7_31_30 [2] */
- 0, 0, 0, 0,
+ /* IP7_31_30 [2] RESERVED */
/* IP7_29_27 [3] */
FN_DU1_DG2, FN_LCDOUT10, FN_VI1_DATA4_B, FN_SCIF1_SCK_B,
FN_SCIFA1_SCK, FN_SSI_SCK78_B,
@@ -6026,10 +6020,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR8", 0xE6060040, 32,
- GROUP(1, 3, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3),
+ GROUP(-1, 3, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3),
GROUP(
- /* IP8_31 [1] */
- 0, 0,
+ /* IP8_31 [1] RESERVED */
/* IP8_30_28 [3] */
FN_DU1_DB5, FN_LCDOUT21, FN_TX3, FN_SCIFA3_TXD, FN_CAN1_TX,
0, 0, 0,
@@ -6201,10 +6194,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_I2C1_SDA_D, 0, 0, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR12", 0xE6060050, 32,
- GROUP(2, 3, 3, 2, 2, 2, 2, 3, 3, 3, 3, 2, 2),
+ GROUP(-2, 3, 3, 2, 2, 2, 2, 3, 3, 3, 3, 2, 2),
GROUP(
- /* IP12_31_30 [2] */
- 0, 0, 0, 0,
+ /* IP12_31_30 [2] RESERVED */
/* IP12_29_27 [3] */
FN_STP_ISCLK_0, FN_AVB_TX_EN, FN_SCIFB2_RXD_D,
FN_ADICS_SAMP_B, FN_MSIOF0_SCK_C,
@@ -6243,11 +6235,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_ETH_RX_ER, FN_AVB_CRS, FN_I2C3_SCL, FN_IIC0_SCL, ))
},
{ PINMUX_CFG_REG_VAR("IPSR13", 0xE6060054, 32,
- GROUP(1, 3, 1, 1, 1, 2, 1, 3, 3, 1, 1, 1,
+ GROUP(-1, 3, 1, 1, 1, 2, 1, 3, 3, 1, 1, 1,
1, 1, 1, 3, 2, 2, 3),
GROUP(
- /* IP13_31 [1] */
- 0, 0,
+ /* IP13_31 [1] RESERVED */
/* IP13_30_28 [3] */
FN_SD1_CD, FN_PWM0, FN_TPU_TO0, FN_I2C1_SCL_C,
0, 0, 0, 0,
@@ -6340,10 +6331,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_SD1_WP, FN_PWM1_B, FN_I2C1_SDA_C, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR15", 0xE606005C, 32,
- GROUP(2, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2),
+ GROUP(-2, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2),
GROUP(
- /* IP15_31_30 [2] */
- 0, 0, 0, 0,
+ /* IP15_31_30 [2] RESERVED */
/* IP15_29_27 [3] */
FN_HTX0, FN_SCIFB0_TXD, 0, FN_GLO_SCLK_C,
FN_CAN0_TX_B, FN_VI1_DATA5_C,
@@ -6382,23 +6372,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_SIM0_RST, FN_IETX, FN_CAN1_TX_D, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR16", 0xE6060160, 32,
- GROUP(4, 4, 4, 4, 4, 2, 2, 2, 3, 3),
+ GROUP(-20, 2, 2, 2, 3, 3),
GROUP(
- /* IP16_31_28 [4] */
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- /* IP16_27_24 [4] */
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- /* IP16_23_20 [4] */
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- /* IP16_19_16 [4] */
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- /* IP16_15_12 [4] */
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
+ /* RESERVED [20] */
/* IP16_11_10 [2] */
FN_HRTS1_N, FN_SCIFB1_RTS_N, FN_MLB_DAT, FN_CAN1_RX_B,
/* IP16_9_8 [2] */
@@ -6415,11 +6391,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, ))
},
{ PINMUX_CFG_REG_VAR("MOD_SEL", 0xE6060090, 32,
- GROUP(1, 2, 2, 2, 3, 2, 1, 1, 1, 1, 3, 2,
- 2, 2, 1, 2, 2, 2),
+ GROUP(-1, 2, 2, 2, 3, 2, 1, 1, 1, 1, 3, -2,
+ 2, -2, 1, 2, 2, 2),
GROUP(
/* RESERVED [1] */
- 0, 0,
/* SEL_SCIF1 [2] */
FN_SEL_SCIF1_0, FN_SEL_SCIF1_1, FN_SEL_SCIF1_2, FN_SEL_SCIF1_3,
/* SEL_SCIFB [2] */
@@ -6446,11 +6421,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_SEL_HSCIF1_3, FN_SEL_HSCIF1_4,
0, 0, 0,
/* RESERVED [2] */
- 0, 0, 0, 0,
/* SEL_VI1 [2] */
FN_SEL_VI1_0, FN_SEL_VI1_1, FN_SEL_VI1_2, 0,
/* RESERVED [2] */
- 0, 0, 0, 0,
/* SEL_TMU [1] */
FN_SEL_TMU1_0, FN_SEL_TMU1_1,
/* SEL_LBS [2] */
@@ -6461,15 +6434,14 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_SEL_SOF0_0, FN_SEL_SOF0_1, FN_SEL_SOF0_2, 0, ))
},
{ PINMUX_CFG_REG_VAR("MOD_SEL2", 0xE6060094, 32,
- GROUP(3, 1, 1, 3, 2, 1, 1, 2, 2, 1, 3, 2,
- 1, 2, 2, 2, 1, 1, 1),
+ GROUP(3, -1, 1, 3, 2, -1, 1, 2, -2, 1, 3, 2,
+ -1, 2, 2, 2, 1, -1, 1),
GROUP(
/* SEL_SCIF0 [3] */
FN_SEL_SCIF0_0, FN_SEL_SCIF0_1, FN_SEL_SCIF0_2,
FN_SEL_SCIF0_3, FN_SEL_SCIF0_4,
0, 0, 0,
/* RESERVED [1] */
- 0, 0,
/* SEL_SCIF [1] */
FN_SEL_SCIF_0, FN_SEL_SCIF_1,
/* SEL_CAN0 [3] */
@@ -6479,13 +6451,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* SEL_CAN1 [2] */
FN_SEL_CAN1_0, FN_SEL_CAN1_1, FN_SEL_CAN1_2, FN_SEL_CAN1_3,
/* RESERVED [1] */
- 0, 0,
/* SEL_SCIFA2 [1] */
FN_SEL_SCIFA2_0, FN_SEL_SCIFA2_1,
/* SEL_SCIF4 [2] */
FN_SEL_SCIF4_0, FN_SEL_SCIF4_1, FN_SEL_SCIF4_2, 0,
/* RESERVED [2] */
- 0, 0, 0, 0,
/* SEL_ADG [1] */
FN_SEL_ADG_0, FN_SEL_ADG_1,
/* SEL_FM [3] */
@@ -6495,7 +6465,6 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* SEL_SCIFA5 [2] */
FN_SEL_SCIFA5_0, FN_SEL_SCIFA5_1, FN_SEL_SCIFA5_2, 0,
/* RESERVED [1] */
- 0, 0,
/* SEL_GPS [2] */
FN_SEL_GPS_0, FN_SEL_GPS_1, FN_SEL_GPS_2, FN_SEL_GPS_3,
/* SEL_SCIFA4 [2] */
@@ -6505,13 +6474,12 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* SEL_SIM [1] */
FN_SEL_SIM_0, FN_SEL_SIM_1,
/* RESERVED [1] */
- 0, 0,
/* SEL_SSI8 [1] */
FN_SEL_SSI8_0, FN_SEL_SSI8_1, ))
},
{ PINMUX_CFG_REG_VAR("MOD_SEL3", 0xE6060098, 32,
- GROUP(2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 2, 2,
- 3, 2, 2, 2, 1),
+ GROUP(2, 2, 2, 2, 2, 2, 2, 2, 1, 1, -2, 2,
+ 3, 2, -5),
GROUP(
/* SEL_HSCIF2 [2] */
FN_SEL_HSCIF2_0, FN_SEL_HSCIF2_1,
@@ -6536,7 +6504,6 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* SEL_SCIF5 [1] */
FN_SEL_SCIF5_0, FN_SEL_SCIF5_1,
/* RESERVED [2] */
- 0, 0, 0, 0,
/* SEL_I2C2 [2] */
FN_SEL_I2C2_0, FN_SEL_I2C2_1, FN_SEL_I2C2_2, FN_SEL_I2C2_3,
/* SEL_I2C1 [3] */
@@ -6545,16 +6512,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0,
/* SEL_I2C0 [2] */
FN_SEL_I2C0_0, FN_SEL_I2C0_1, FN_SEL_I2C0_2, 0,
- /* RESERVED [2] */
- 0, 0, 0, 0,
- /* RESERVED [2] */
- 0, 0, 0, 0,
- /* RESERVED [1] */
- 0, 0, ))
+ /* RESERVED [5] */ ))
},
{ PINMUX_CFG_REG_VAR("MOD_SEL4", 0xE606009C, 32,
- GROUP(3, 2, 2, 1, 1, 1, 1, 3, 2, 2, 3, 1,
- 1, 1, 2, 2, 2, 2),
+ GROUP(3, 2, 2, -1, 1, 1, 1, 3, -4, 3, -1,
+ 1, 1, 2, -6),
GROUP(
/* SEL_SOF1 [3] */
FN_SEL_SOF1_0, FN_SEL_SOF1_1, FN_SEL_SOF1_2, FN_SEL_SOF1_3,
@@ -6565,7 +6527,6 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* SEL_DIS [2] */
FN_SEL_DIS_0, FN_SEL_DIS_1, FN_SEL_DIS_2, 0,
/* RESERVED [1] */
- 0, 0,
/* SEL_RAD [1] */
FN_SEL_RAD_0, FN_SEL_RAD_1,
/* SEL_RCN [1] */
@@ -6577,27 +6538,19 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_SEL_SCIF2_3, FN_SEL_SCIF2_4,
0, 0, 0,
/* RESERVED [2] */
- 0, 0, 0, 0,
/* RESERVED [2] */
- 0, 0, 0, 0,
/* SEL_SOF2 [3] */
FN_SEL_SOF2_0, FN_SEL_SOF2_1, FN_SEL_SOF2_2,
FN_SEL_SOF2_3, FN_SEL_SOF2_4,
0, 0, 0,
/* RESERVED [1] */
- 0, 0,
/* SEL_SSI1 [1] */
FN_SEL_SSI1_0, FN_SEL_SSI1_1,
/* SEL_SSI0 [1] */
FN_SEL_SSI0_0, FN_SEL_SSI0_1,
/* SEL_SSP [2] */
FN_SEL_SSP_0, FN_SEL_SSP_1, FN_SEL_SSP_2, 0,
- /* RESERVED [2] */
- 0, 0, 0, 0,
- /* RESERVED [2] */
- 0, 0, 0, 0,
- /* RESERVED [2] */
- 0, 0, 0, 0, ))
+ /* RESERVED [6] */ ))
},
{ },
};
diff --git a/drivers/pinctrl/renesas/pfc-r8a7792.c b/drivers/pinctrl/renesas/pfc-r8a7792.c
index 3e101f630148..808a85d62415 100644
--- a/drivers/pinctrl/renesas/pfc-r8a7792.c
+++ b/drivers/pinctrl/renesas/pfc-r8a7792.c
@@ -1999,16 +1999,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_0_1_FN, FN_IP0_1,
GP_0_0_FN, FN_IP0_0 ))
},
- { PINMUX_CFG_REG("GPSR1", 0xE6060008, 32, 1, GROUP(
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ { PINMUX_CFG_REG_VAR("GPSR1", 0xE6060008, 32,
+ GROUP(-9, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP1_31_23 RESERVED */
GP_1_22_FN, FN_DU1_CDE,
GP_1_21_FN, FN_DU1_DISP,
GP_1_20_FN, FN_DU1_EXODDF_DU1_ODDF_DISP_CDE,
@@ -2101,22 +2096,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_3_1_FN, FN_A17,
GP_3_0_FN, FN_A16 ))
},
- { PINMUX_CFG_REG("GPSR4", 0xE6060014, 32, 1, GROUP(
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ { PINMUX_CFG_REG_VAR("GPSR4", 0xE6060014, 32,
+ GROUP(-15, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP4_31_17 RESERVED */
GP_4_16_FN, FN_VI0_FIELD,
GP_4_15_FN, FN_VI0_D11_G3_Y3,
GP_4_14_FN, FN_VI0_D10_G2_Y2,
@@ -2135,22 +2119,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_4_1_FN, FN_VI0_CLKENB,
GP_4_0_FN, FN_VI0_CLK ))
},
- { PINMUX_CFG_REG("GPSR5", 0xE6060018, 32, 1, GROUP(
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ { PINMUX_CFG_REG_VAR("GPSR5", 0xE6060018, 32,
+ GROUP(-15, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP5_31_17 RESERVED */
GP_5_16_FN, FN_VI1_FIELD,
GP_5_15_FN, FN_VI1_D11_G3_Y3,
GP_5_14_FN, FN_VI1_D10_G2_Y2,
@@ -2169,22 +2142,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_5_1_FN, FN_VI1_CLKENB,
GP_5_0_FN, FN_VI1_CLK ))
},
- { PINMUX_CFG_REG("GPSR6", 0xE606001C, 32, 1, GROUP(
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ { PINMUX_CFG_REG_VAR("GPSR6", 0xE606001C, 32,
+ GROUP(-15, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP6_31_17 RESERVED */
GP_6_16_FN, FN_IP2_16,
GP_6_15_FN, FN_IP2_15,
GP_6_14_FN, FN_IP2_14,
@@ -2203,22 +2165,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_6_1_FN, FN_IP2_1,
GP_6_0_FN, FN_IP2_0 ))
},
- { PINMUX_CFG_REG("GPSR7", 0xE6060020, 32, 1, GROUP(
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ { PINMUX_CFG_REG_VAR("GPSR7", 0xE6060020, 32,
+ GROUP(-15, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP7_31_17 RESERVED */
GP_7_16_FN, FN_VI3_FIELD,
GP_7_15_FN, FN_IP3_14,
GP_7_14_FN, FN_VI3_D10_Y2,
@@ -2237,22 +2188,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_7_1_FN, FN_IP3_1,
GP_7_0_FN, FN_IP3_0 ))
},
- { PINMUX_CFG_REG("GPSR8", 0xE6060024, 32, 1, GROUP(
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ { PINMUX_CFG_REG_VAR("GPSR8", 0xE6060024, 32,
+ GROUP(-15, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP8_31_17 RESERVED */
GP_8_16_FN, FN_IP4_24,
GP_8_15_FN, FN_IP4_23,
GP_8_14_FN, FN_IP4_22,
@@ -2271,22 +2211,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_8_1_FN, FN_IP4_0,
GP_8_0_FN, FN_VI4_CLK ))
},
- { PINMUX_CFG_REG("GPSR9", 0xE6060028, 32, 1, GROUP(
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ { PINMUX_CFG_REG_VAR("GPSR9", 0xE6060028, 32,
+ GROUP(-15, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP9_31_17 RESERVED */
GP_9_16_FN, FN_VI5_FIELD,
GP_9_15_FN, FN_VI5_D11_Y3,
GP_9_14_FN, FN_VI5_D10_Y2,
@@ -2374,15 +2303,12 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_11_0_FN, FN_IP7_1_0 ))
},
{ PINMUX_CFG_REG_VAR("IPSR0", 0xE6060040, 32,
- GROUP(4, 4,
+ GROUP(-8,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1),
GROUP(
- /* IP0_31_28 [4] */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /* IP0_27_24 [4] */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP0_31_24 [8] RESERVED */
/* IP0_23 [1] */
FN_DU0_DB7_C5, 0,
/* IP0_22 [1] */
@@ -2433,17 +2359,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_DU0_DR0_DATA0, 0 ))
},
{ PINMUX_CFG_REG_VAR("IPSR1", 0xE6060044, 32,
- GROUP(4, 4,
- 1, 1, 1, 1, 1, 1, 1, 1,
+ GROUP(-9, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1),
GROUP(
- /* IP1_31_28 [4] */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /* IP1_27_24 [4] */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /* IP1_23 [1] */
- 0, 0,
+ /* IP1_31_23 [9] RESERVED */
/* IP1_22 [1] */
FN_A25, FN_SSL,
/* IP1_21 [1] */
@@ -2492,19 +2412,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_DU0_EXHSYNC_DU0_HSYNC, 0 ))
},
{ PINMUX_CFG_REG_VAR("IPSR2", 0xE6060048, 32,
- GROUP(4, 4,
- 4, 3, 1,
+ GROUP(-15, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1),
GROUP(
- /* IP2_31_28 [4] */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /* IP2_27_24 [4] */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /* IP2_23_20 [4] */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /* IP2_19_17 [3] */
- 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP2_31_17 [15] RESERVED */
/* IP2_16 [1] */
FN_VI2_FIELD, FN_AVB_TXD2,
/* IP2_15 [1] */
@@ -2541,21 +2453,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_VI2_CLK, FN_AVB_RX_CLK ))
},
{ PINMUX_CFG_REG_VAR("IPSR3", 0xE606004C, 32,
- GROUP(4, 4,
- 4, 4,
- 1, 1, 1, 1, 1, 1, 1, 1,
+ GROUP(-17, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1),
GROUP(
- /* IP3_31_28 [4] */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /* IP3_27_24 [4] */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /* IP3_23_20 [4] */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /* IP3_19_16 [4] */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /* IP3_15 [1] */
- 0, 0,
+ /* IP3_31_15 [17] RESERVED */
/* IP3_14 [1] */
FN_VI3_D11_Y3, FN_AVB_AVTP_MATCH,
/* IP3_13 [1] */
@@ -2588,14 +2489,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_VI3_CLK, FN_AVB_TX_CLK ))
},
{ PINMUX_CFG_REG_VAR("IPSR4", 0xE6060050, 32,
- GROUP(4, 3, 1,
- 1, 1, 1, 2, 2, 2,
+ GROUP(-7, 1, 1, 1, 1, 2, 2, 2,
2, 2, 2, 2, 2, 1, 2, 1, 1),
GROUP(
- /* IP4_31_28 [4] */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /* IP4_27_25 [3] */
- 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP4_31_25 [7] RESERVED */
/* IP4_24 [1] */
FN_VI4_FIELD, FN_VI3_D15_Y7,
/* IP4_23 [1] */
@@ -2630,21 +2527,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_VI4_CLKENB, FN_VI0_D12_G4_Y4 ))
},
{ PINMUX_CFG_REG_VAR("IPSR5", 0xE6060054, 32,
- GROUP(4, 4,
- 4, 4,
- 4, 1, 1, 1, 1,
+ GROUP(-20, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1),
GROUP(
- /* IP5_31_28 [4] */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /* IP5_27_24 [4] */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /* IP5_23_20 [4] */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /* IP5_19_16 [4] */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /* IP5_15_12 [4] */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP5_31_12 [20] RESERVED */
/* IP5_11 [1] */
FN_VI5_D8_Y0, FN_VI1_D23_R7,
/* IP5_10 [1] */
@@ -2671,19 +2557,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_VI5_CLKENB, FN_VI1_D12_G4_Y4_B ))
},
{ PINMUX_CFG_REG_VAR("IPSR6", 0xE6060058, 32,
- GROUP(4, 4,
- 4, 1, 2, 1,
- 2, 2, 2, 2,
+ GROUP(-13, 2, 1, 2, 2, 2, 2,
1, 1, 1, 1, 1, 1, 1, 1),
GROUP(
- /* IP6_31_28 [4] */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /* IP6_27_24 [4] */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /* IP6_23_20 [4] */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /* IP6_19 [1] */
- 0, 0,
+ /* IP6_31_19 [13] RESERVED */
/* IP6_18_17 [2] */
FN_DREQ1_N, FN_RX3, 0, 0,
/* IP6_16 [1] */
@@ -2714,17 +2591,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_MSIOF0_SCK, FN_HSCK0 ))
},
{ PINMUX_CFG_REG_VAR("IPSR7", 0xE606005C, 32,
- GROUP(4, 4,
- 3, 1, 1, 1, 1, 1,
+ GROUP(-11, 1, 1, 1, 1, 1,
2, 2, 2, 2,
1, 1, 2, 2, 2),
GROUP(
- /* IP7_31_28 [4] */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /* IP7_27_24 [4] */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /* IP7_23_21 [3] */
- 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP7_31_21 [11] RESERVED */
/* IP7_20 [1] */
FN_AUDIO_CLKB, 0,
/* IP7_19 [1] */
diff --git a/drivers/pinctrl/renesas/pfc-r8a7794.c b/drivers/pinctrl/renesas/pfc-r8a7794.c
index d1b0e6517382..668643553a70 100644
--- a/drivers/pinctrl/renesas/pfc-r8a7794.c
+++ b/drivers/pinctrl/renesas/pfc-r8a7794.c
@@ -4867,7 +4867,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
},
{ PINMUX_CFG_REG_VAR("IPSR0", 0xE6060020, 32,
GROUP(2, 2, 2, 1, 1, 2, 2, 2, 1, 1, 1, 1,
- 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1),
+ 1, 1, 1, 1, 2, -7, 1),
GROUP(
/* IP0_31_30 [2] */
FN_D5, FN_SCIF4_RXD_B, FN_I2C0_SCL_D, 0,
@@ -4903,25 +4903,12 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_MMC_CLK, FN_SD2_CLK,
/* IP0_9_8 [2] */
FN_SD1_WP, FN_IRQ7, FN_CAN0_TX, 0,
- /* IP0_7 [1] */
- 0, 0,
- /* IP0_6 [1] */
- 0, 0,
- /* IP0_5 [1] */
- 0, 0,
- /* IP0_4 [1] */
- 0, 0,
- /* IP0_3 [1] */
- 0, 0,
- /* IP0_2 [1] */
- 0, 0,
- /* IP0_1 [1] */
- 0, 0,
+ /* IP0_7_1 [7] RESERVED */
/* IP0_0 [1] */
FN_SD1_CD, FN_CAN0_RX, ))
},
{ PINMUX_CFG_REG_VAR("IPSR1", 0xE6060024, 32,
- GROUP(2, 2, 1, 1, 1, 1, 2, 2, 2, 3, 2, 2,
+ GROUP(2, 2, 1, 1, -1, 1, 2, 2, 2, 3, 2, 2,
3, 2, 2, 2, 2),
GROUP(
/* IP1_31_30 [2] */
@@ -4932,8 +4919,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_A4, FN_SCIFB0_TXD,
/* IP1_26 [1] */
FN_A3, FN_SCIFB0_SCK,
- /* IP1_25 [1] */
- 0, 0,
+ /* IP1_25 [1] RESERVED */
/* IP1_24 [1] */
FN_A1, FN_SCIFB1_TXD,
/* IP1_23_22 [2] */
@@ -5160,12 +5146,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_DU0_EXVSYNC_DU0_VSYNC, FN_QSTB_QHE, 0, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR7", 0xE606003C, 32,
- GROUP(1, 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3),
+ GROUP(1, -1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3),
GROUP(
/* IP7_31 [1] */
FN_DREQ0_N, FN_SCIFB1_RXD,
- /* IP7_30 [1] */
- 0, 0,
+ /* IP7_30 [1] RESERVED */
/* IP7_29_27 [3] */
FN_ETH_TXD0, FN_VI0_R2, FN_SCIF3_RXD_B, FN_I2C4_SCL_E,
FN_AVB_GTX_CLK, FN_SSI_WS6_B, 0, 0,
@@ -5234,10 +5219,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_AVB_MDC, FN_SSI_SDATA6_B, 0, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR9", 0xE6060044, 32,
- GROUP(1, 3, 3, 3, 3, 2, 2, 3, 3, 3, 3, 3),
+ GROUP(-1, 3, 3, 3, 3, 2, 2, 3, 3, 3, 3, 3),
GROUP(
- /* IP9_31 [1] */
- 0, 0,
+ /* IP9_31 [1] RESERVED */
/* IP9_30_28 [3] */
FN_SCIF1_SCK, FN_PWM3, FN_TCLK2, FN_DU1_DG5,
FN_SSI_SDATA1_B, 0, 0, 0,
@@ -5307,10 +5291,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR11", 0xE606004C, 32,
- GROUP(2, 3, 3, 3, 3, 2, 2, 3, 3, 2, 3, 3),
+ GROUP(-2, 3, 3, 3, 3, 2, 2, 3, 3, 2, 3, 3),
GROUP(
- /* IP11_31_30 [2] */
- 0, 0, 0, 0,
+ /* IP11_31_30 [2] RESERVED */
/* IP11_29_27 [3] */
FN_SSI_SDATA0, FN_MSIOF1_SCK_B, FN_PWM0_B, FN_ADICLK_B,
0, 0, 0, 0,
@@ -5343,10 +5326,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR12", 0xE6060050, 32,
- GROUP(2, 3, 3, 3, 3, 3, 2, 2, 2, 3, 3, 3),
+ GROUP(-2, 3, 3, 3, 3, 3, 2, 2, 2, 3, 3, 3),
GROUP(
- /* IP12_31_30 [2] */
- 0, 0, 0, 0,
+ /* IP12_31_30 [2] RESERVED */
/* IP12_29_27 [3] */
FN_SSI_SCK2, FN_HSCIF1_HTX_B, FN_VI1_DATA2, 0,
FN_ATAG0_N, FN_ETH_RXD1_B, 0, 0,
@@ -5379,18 +5361,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, FN_DREQ1_N_B, 0, 0, ))
},
{ PINMUX_CFG_REG_VAR("IPSR13", 0xE6060054, 32,
- GROUP(1, 1, 1, 1, 1, 3, 3, 3, 3, 3, 3, 3, 3, 3),
+ GROUP(-5, 3, 3, 3, 3, 3, 3, 3, 3, 3),
GROUP(
- /* IP13_31 [1] */
- 0, 0,
- /* IP13_30 [1] */
- 0, 0,
- /* IP13_29 [1] */
- 0, 0,
- /* IP13_28 [1] */
- 0, 0,
- /* IP13_27 [1] */
- 0, 0,
+ /* IP13_31_27 [5] RESERVED */
/* IP13_26_24 [3] */
FN_AUDIO_CLKOUT, FN_I2C4_SDA_B, FN_SCIFA5_TXD_D, FN_VI1_VSYNC_N,
FN_TS_SPSYNC_C, 0, FN_FMIN_E, 0,
@@ -5420,23 +5393,21 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, FN_ATACS00_N, FN_ETH_LINK_B, 0, ))
},
{ PINMUX_CFG_REG_VAR("MOD_SEL", 0xE6060090, 32,
- GROUP(2, 1, 2, 3, 4, 1, 1, 3, 3, 3, 3, 3, 2, 1),
+ GROUP(2, -1, 2, 3, -4, 1, -1,
+ 3, 3, 3, 3, 3, 2, -1),
GROUP(
/* SEL_ADG [2] */
FN_SEL_ADG_0, FN_SEL_ADG_1, FN_SEL_ADG_2, FN_SEL_ADG_3,
/* RESERVED [1] */
- 0, 0,
/* SEL_CAN [2] */
FN_SEL_CAN_0, FN_SEL_CAN_1, FN_SEL_CAN_2, FN_SEL_CAN_3,
/* SEL_DARC [3] */
FN_SEL_DARC_0, FN_SEL_DARC_1, FN_SEL_DARC_2, FN_SEL_DARC_3,
FN_SEL_DARC_4, 0, 0, 0,
/* RESERVED [4] */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
/* SEL_ETH [1] */
FN_SEL_ETH_0, FN_SEL_ETH_1,
/* RESERVED [1] */
- 0, 0,
/* SEL_IC200 [3] */
FN_SEL_I2C00_0, FN_SEL_I2C00_1, FN_SEL_I2C00_2, FN_SEL_I2C00_3,
FN_SEL_I2C00_4, 0, 0, 0,
@@ -5454,12 +5425,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_SEL_I2C04_4, 0, 0, 0,
/* SEL_I2C05 [2] */
FN_SEL_I2C05_0, FN_SEL_I2C05_1, FN_SEL_I2C05_2, FN_SEL_I2C05_3,
- /* RESERVED [1] */
- 0, 0, ))
+ /* RESERVED [1] */ ))
},
{ PINMUX_CFG_REG_VAR("MOD_SEL2", 0xE6060094, 32,
GROUP(2, 2, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1,
- 2, 2, 1, 1, 2, 2, 2, 1, 1, 2),
+ 2, 2, -1, 1, 2, 2, 2, 1, 1, -2),
GROUP(
/* SEL_IEB [2] */
FN_SEL_IEB_0, FN_SEL_IEB_1, FN_SEL_IEB_2, 0,
@@ -5493,7 +5463,6 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_SEL_SCIFA5_0, FN_SEL_SCIFA5_1, FN_SEL_SCIFA5_2,
FN_SEL_SCIFA5_3,
/* RESERVED [1] */
- 0, 0,
/* SEL_TMU [1] */
FN_SEL_TMU_0, FN_SEL_TMU_1,
/* SEL_TSIF0 [2] */
@@ -5506,12 +5475,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_SEL_HSCIF0_0, FN_SEL_HSCIF0_1,
/* SEL_HSCIF1 [1] */
FN_SEL_HSCIF1_0, FN_SEL_HSCIF1_1,
- /* RESERVED [2] */
- 0, 0, 0, 0, ))
+ /* RESERVED [2] */ ))
},
{ PINMUX_CFG_REG_VAR("MOD_SEL3", 0xE6060098, 32,
GROUP(2, 2, 2, 1, 3, 2, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ 1, 1, -12),
GROUP(
/* SEL_SCIF0 [2] */
FN_SEL_SCIF0_0, FN_SEL_SCIF0_1, FN_SEL_SCIF0_2, FN_SEL_SCIF0_3,
@@ -5542,30 +5510,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_SEL_SSI8_0, FN_SEL_SSI8_1,
/* SEL_SSI9 [1] */
FN_SEL_SSI9_0, FN_SEL_SSI9_1,
- /* RESERVED [1] */
- 0, 0,
- /* RESERVED [1] */
- 0, 0,
- /* RESERVED [1] */
- 0, 0,
- /* RESERVED [1] */
- 0, 0,
- /* RESERVED [1] */
- 0, 0,
- /* RESERVED [1] */
- 0, 0,
- /* RESERVED [1] */
- 0, 0,
- /* RESERVED [1] */
- 0, 0,
- /* RESERVED [1] */
- 0, 0,
- /* RESERVED [1] */
- 0, 0,
- /* RESERVED [1] */
- 0, 0,
- /* RESERVED [1] */
- 0, 0, ))
+ /* RESERVED [12] */ ))
},
{ },
};
diff --git a/drivers/pinctrl/renesas/pfc-r8a77950.c b/drivers/pinctrl/renesas/pfc-r8a77950.c
index 63c9f6d6468b..4c543ec3a863 100644
--- a/drivers/pinctrl/renesas/pfc-r8a77950.c
+++ b/drivers/pinctrl/renesas/pfc-r8a77950.c
@@ -4701,23 +4701,11 @@ static const struct sh_pfc_function pinmux_functions[] = {
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
#define F_(x, y) FN_##y
#define FM(x) FN_##x
- { PINMUX_CFG_REG("GPSR0", 0xe6060100, 32, 1, GROUP(
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ { PINMUX_CFG_REG_VAR("GPSR0", 0xe6060100, 32,
+ GROUP(-16, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP0_31_16 RESERVED */
GP_0_15_FN, GPSR0_15,
GP_0_14_FN, GPSR0_14,
GP_0_13_FN, GPSR0_13,
@@ -4769,24 +4757,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_1_1_FN, GPSR1_1,
GP_1_0_FN, GPSR1_0, ))
},
- { PINMUX_CFG_REG("GPSR2", 0xe6060108, 32, 1, GROUP(
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ { PINMUX_CFG_REG_VAR("GPSR2", 0xe6060108, 32,
+ GROUP(-17, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1),
+ GROUP(
+ /* GP2_31_15 RESERVED */
GP_2_14_FN, GPSR2_14,
GP_2_13_FN, GPSR2_13,
GP_2_12_FN, GPSR2_12,
@@ -4803,23 +4778,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_2_1_FN, GPSR2_1,
GP_2_0_FN, GPSR2_0, ))
},
- { PINMUX_CFG_REG("GPSR3", 0xe606010c, 32, 1, GROUP(
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ { PINMUX_CFG_REG_VAR("GPSR3", 0xe606010c, 32,
+ GROUP(-16, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP3_31_16 RESERVED */
GP_3_15_FN, GPSR3_15,
GP_3_14_FN, GPSR3_14,
GP_3_13_FN, GPSR3_13,
@@ -4837,21 +4800,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_3_1_FN, GPSR3_1,
GP_3_0_FN, GPSR3_0, ))
},
- { PINMUX_CFG_REG("GPSR4", 0xe6060110, 32, 1, GROUP(
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ { PINMUX_CFG_REG_VAR("GPSR4", 0xe6060110, 32,
+ GROUP(-14, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP4_31_18 RESERVED */
GP_4_17_FN, GPSR4_17,
GP_4_16_FN, GPSR4_16,
GP_4_15_FN, GPSR4_15,
@@ -4939,35 +4892,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_6_1_FN, GPSR6_1,
GP_6_0_FN, GPSR6_0, ))
},
- { PINMUX_CFG_REG("GPSR7", 0xe606011c, 32, 1, GROUP(
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ { PINMUX_CFG_REG_VAR("GPSR7", 0xe606011c, 32,
+ GROUP(-28, 1, 1, 1, 1),
+ GROUP(
+ /* GP7_31_4 RESERVED */
GP_7_3_FN, GPSR7_3,
GP_7_2_FN, GPSR7_2,
GP_7_1_FN, GPSR7_1,
@@ -5148,13 +5076,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP16_7_4
IP16_3_0 ))
},
- { PINMUX_CFG_REG("IPSR17", 0xe6060244, 32, 4, GROUP(
- /* IP17_31_28 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /* IP17_27_24 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /* IP17_23_20 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /* IP17_19_16 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /* IP17_15_12 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /* IP17_11_8 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ { PINMUX_CFG_REG_VAR("IPSR17", 0xe6060244, 32,
+ GROUP(-24, 4, 4),
+ GROUP(
+ /* IP17_31_8 RESERVED */
IP17_7_4
IP17_3_0 ))
},
@@ -5164,10 +5089,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
#define F_(x, y) x,
#define FM(x) FN_##x,
{ PINMUX_CFG_REG_VAR("MOD_SEL0", 0xe6060500, 32,
- GROUP(1, 2, 2, 3, 1, 1, 2, 1, 1, 1, 2, 1,
- 1, 1, 1, 1, 1, 1, 2, 2, 1, 2, 1),
+ GROUP(-1, 2, 2, 3, 1, 1, 2, 1, 1, 1, 2, 1,
+ 1, 1, 1, 1, 1, 1, 2, 2, 1, 2, -1),
GROUP(
- 0, 0, /* RESERVED 31 */
+ /* RESERVED 31 */
MOD_SEL0_30_29
MOD_SEL0_28_27
MOD_SEL0_26_25_24
@@ -5189,11 +5114,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
MOD_SEL0_5_4
MOD_SEL0_3
MOD_SEL0_2_1
- 0, 0, /* RESERVED 0 */ ))
+ /* RESERVED 0 */ ))
},
{ PINMUX_CFG_REG_VAR("MOD_SEL1", 0xe6060504, 32,
GROUP(2, 3, 1, 2, 3, 1, 1, 2, 1, 2, 1, 1,
- 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1),
+ 1, 1, 1, -2, 1, 1, 1, 1, 1, 1, 1),
GROUP(
MOD_SEL1_31_30
MOD_SEL1_29_28_27
@@ -5210,7 +5135,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
MOD_SEL1_11
MOD_SEL1_10
MOD_SEL1_9
- 0, 0, 0, 0, /* RESERVED 8, 7 */
+ /* RESERVED 8, 7 */
MOD_SEL1_6
MOD_SEL1_5
MOD_SEL1_4
@@ -5220,35 +5145,12 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
MOD_SEL1_0 ))
},
{ PINMUX_CFG_REG_VAR("MOD_SEL2", 0xe6060508, 32,
- GROUP(1, 1, 1, 1, 4, 4, 4, 4, 4, 4, 1, 2, 1),
+ GROUP(1, 1, 1, -28, 1),
GROUP(
MOD_SEL2_31
MOD_SEL2_30
MOD_SEL2_29
- /* RESERVED 28 */
- 0, 0,
- /* RESERVED 27, 26, 25, 24 */
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- /* RESERVED 23, 22, 21, 20 */
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- /* RESERVED 19, 18, 17, 16 */
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- /* RESERVED 15, 14, 13, 12 */
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- /* RESERVED 11, 10, 9, 8 */
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- /* RESERVED 7, 6, 5, 4 */
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- /* RESERVED 3 */
- 0, 0,
- /* RESERVED 2, 1 */
- 0, 0, 0, 0,
+ /* RESERVED 28-1 */
MOD_SEL2_0 ))
},
{ },
diff --git a/drivers/pinctrl/renesas/pfc-r8a77951.c b/drivers/pinctrl/renesas/pfc-r8a77951.c
index 9d6eef4e9d18..d4d271dff055 100644
--- a/drivers/pinctrl/renesas/pfc-r8a77951.c
+++ b/drivers/pinctrl/renesas/pfc-r8a77951.c
@@ -5139,23 +5139,11 @@ static const struct {
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
#define F_(x, y) FN_##y
#define FM(x) FN_##x
- { PINMUX_CFG_REG("GPSR0", 0xe6060100, 32, 1, GROUP(
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ { PINMUX_CFG_REG_VAR("GPSR0", 0xe6060100, 32,
+ GROUP(-16, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP0_31_16 RESERVED */
GP_0_15_FN, GPSR0_15,
GP_0_14_FN, GPSR0_14,
GP_0_13_FN, GPSR0_13,
@@ -5207,24 +5195,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_1_1_FN, GPSR1_1,
GP_1_0_FN, GPSR1_0, ))
},
- { PINMUX_CFG_REG("GPSR2", 0xe6060108, 32, 1, GROUP(
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ { PINMUX_CFG_REG_VAR("GPSR2", 0xe6060108, 32,
+ GROUP(-17, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1),
+ GROUP(
+ /* GP2_31_15 RESERVED */
GP_2_14_FN, GPSR2_14,
GP_2_13_FN, GPSR2_13,
GP_2_12_FN, GPSR2_12,
@@ -5241,23 +5216,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_2_1_FN, GPSR2_1,
GP_2_0_FN, GPSR2_0, ))
},
- { PINMUX_CFG_REG("GPSR3", 0xe606010c, 32, 1, GROUP(
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ { PINMUX_CFG_REG_VAR("GPSR3", 0xe606010c, 32,
+ GROUP(-16, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP3_31_16 RESERVED */
GP_3_15_FN, GPSR3_15,
GP_3_14_FN, GPSR3_14,
GP_3_13_FN, GPSR3_13,
@@ -5275,21 +5238,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_3_1_FN, GPSR3_1,
GP_3_0_FN, GPSR3_0, ))
},
- { PINMUX_CFG_REG("GPSR4", 0xe6060110, 32, 1, GROUP(
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ { PINMUX_CFG_REG_VAR("GPSR4", 0xe6060110, 32,
+ GROUP(-14, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP4_31_18 RESERVED */
GP_4_17_FN, GPSR4_17,
GP_4_16_FN, GPSR4_16,
GP_4_15_FN, GPSR4_15,
@@ -5377,35 +5330,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_6_1_FN, GPSR6_1,
GP_6_0_FN, GPSR6_0, ))
},
- { PINMUX_CFG_REG("GPSR7", 0xe606011c, 32, 1, GROUP(
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ { PINMUX_CFG_REG_VAR("GPSR7", 0xe606011c, 32,
+ GROUP(-28, 1, 1, 1, 1),
+ GROUP(
+ /* GP7_31_4 RESERVED */
GP_7_3_FN, GPSR7_3,
GP_7_2_FN, GPSR7_2,
GP_7_1_FN, GPSR7_1,
@@ -5486,12 +5414,14 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP6_7_4
IP6_3_0 ))
},
- { PINMUX_CFG_REG("IPSR7", 0xe606021c, 32, 4, GROUP(
+ { PINMUX_CFG_REG_VAR("IPSR7", 0xe606021c, 32,
+ GROUP(4, 4, 4, 4, -4, 4, 4, 4),
+ GROUP(
IP7_31_28
IP7_27_24
IP7_23_20
IP7_19_16
- /* IP7_15_12 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP7_15_12 RESERVED */
IP7_11_8
IP7_7_4
IP7_3_0 ))
@@ -5596,13 +5526,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP17_7_4
IP17_3_0 ))
},
- { PINMUX_CFG_REG("IPSR18", 0xe6060248, 32, 4, GROUP(
- /* IP18_31_28 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /* IP18_27_24 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /* IP18_23_20 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /* IP18_19_16 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /* IP18_15_12 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /* IP18_11_8 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ { PINMUX_CFG_REG_VAR("IPSR18", 0xe6060248, 32,
+ GROUP(-24, 4, 4),
+ GROUP(
+ /* IP18_31_8 RESERVED */
IP18_7_4
IP18_3_0 ))
},
@@ -5612,8 +5539,8 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
#define F_(x, y) x,
#define FM(x) FN_##x,
{ PINMUX_CFG_REG_VAR("MOD_SEL0", 0xe6060500, 32,
- GROUP(3, 2, 3, 1, 1, 1, 1, 1, 2, 1, 1, 2,
- 1, 1, 1, 2, 2, 1, 2, 3),
+ GROUP(3, 2, 3, 1, 1, 1, 1, 1, 2, 1, -1, 2,
+ 1, 1, 1, 2, 2, 1, 2, -3),
GROUP(
MOD_SEL0_31_30_29
MOD_SEL0_28_27
@@ -5625,7 +5552,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
MOD_SEL0_19
MOD_SEL0_18_17
MOD_SEL0_16
- 0, 0, /* RESERVED 15 */
+ /* RESERVED 15 */
MOD_SEL0_14_13
MOD_SEL0_12
MOD_SEL0_11
@@ -5634,12 +5561,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
MOD_SEL0_7_6
MOD_SEL0_5
MOD_SEL0_4_3
- /* RESERVED 2, 1, 0 */
- 0, 0, 0, 0, 0, 0, 0, 0 ))
+ /* RESERVED 2, 1, 0 */ ))
},
{ PINMUX_CFG_REG_VAR("MOD_SEL1", 0xe6060504, 32,
GROUP(2, 3, 1, 2, 3, 1, 1, 2, 1, 2, 1, 1,
- 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1),
+ 1, 1, 1, -2, 1, 1, 1, 1, 1, 1, 1),
GROUP(
MOD_SEL1_31_30
MOD_SEL1_29_28_27
@@ -5656,7 +5582,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
MOD_SEL1_11
MOD_SEL1_10
MOD_SEL1_9
- 0, 0, 0, 0, /* RESERVED 8, 7 */
+ /* RESERVED 8, 7 */
MOD_SEL1_6
MOD_SEL1_5
MOD_SEL1_4
@@ -5666,8 +5592,8 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
MOD_SEL1_0 ))
},
{ PINMUX_CFG_REG_VAR("MOD_SEL2", 0xe6060508, 32,
- GROUP(1, 1, 1, 2, 1, 3, 1, 1, 1, 1, 1, 1,
- 1, 4, 4, 4, 3, 1),
+ GROUP(1, 1, 1, 2, 1, 3, -1, 1, 1, 1, 1, 1,
+ -16, 1),
GROUP(
MOD_SEL2_31
MOD_SEL2_30
@@ -5676,25 +5602,12 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
MOD_SEL2_26
MOD_SEL2_25_24_23
/* RESERVED 22 */
- 0, 0,
MOD_SEL2_21
MOD_SEL2_20
MOD_SEL2_19
MOD_SEL2_18
MOD_SEL2_17
- /* RESERVED 16 */
- 0, 0,
- /* RESERVED 15, 14, 13, 12 */
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- /* RESERVED 11, 10, 9, 8 */
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- /* RESERVED 7, 6, 5, 4 */
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- /* RESERVED 3, 2, 1 */
- 0, 0, 0, 0, 0, 0, 0, 0,
+ /* RESERVED 16-1 */
MOD_SEL2_0 ))
},
{ },
diff --git a/drivers/pinctrl/renesas/pfc-r8a7796.c b/drivers/pinctrl/renesas/pfc-r8a7796.c
index 75ea36829a70..a0096ef5e68d 100644
--- a/drivers/pinctrl/renesas/pfc-r8a7796.c
+++ b/drivers/pinctrl/renesas/pfc-r8a7796.c
@@ -5094,23 +5094,11 @@ static const struct {
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
#define F_(x, y) FN_##y
#define FM(x) FN_##x
- { PINMUX_CFG_REG("GPSR0", 0xe6060100, 32, 1, GROUP(
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ { PINMUX_CFG_REG_VAR("GPSR0", 0xe6060100, 32,
+ GROUP(-16, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP0_31_16 RESERVED */
GP_0_15_FN, GPSR0_15,
GP_0_14_FN, GPSR0_14,
GP_0_13_FN, GPSR0_13,
@@ -5162,24 +5150,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_1_1_FN, GPSR1_1,
GP_1_0_FN, GPSR1_0, ))
},
- { PINMUX_CFG_REG("GPSR2", 0xe6060108, 32, 1, GROUP(
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ { PINMUX_CFG_REG_VAR("GPSR2", 0xe6060108, 32,
+ GROUP(-17, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1),
+ GROUP(
+ /* GP2_31_15 RESERVED */
GP_2_14_FN, GPSR2_14,
GP_2_13_FN, GPSR2_13,
GP_2_12_FN, GPSR2_12,
@@ -5196,23 +5171,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_2_1_FN, GPSR2_1,
GP_2_0_FN, GPSR2_0, ))
},
- { PINMUX_CFG_REG("GPSR3", 0xe606010c, 32, 1, GROUP(
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ { PINMUX_CFG_REG_VAR("GPSR3", 0xe606010c, 32,
+ GROUP(-16, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP3_31_16 RESERVED */
GP_3_15_FN, GPSR3_15,
GP_3_14_FN, GPSR3_14,
GP_3_13_FN, GPSR3_13,
@@ -5230,21 +5193,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_3_1_FN, GPSR3_1,
GP_3_0_FN, GPSR3_0, ))
},
- { PINMUX_CFG_REG("GPSR4", 0xe6060110, 32, 1, GROUP(
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ { PINMUX_CFG_REG_VAR("GPSR4", 0xe6060110, 32,
+ GROUP(-14, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP4_31_18 RESERVED */
GP_4_17_FN, GPSR4_17,
GP_4_16_FN, GPSR4_16,
GP_4_15_FN, GPSR4_15,
@@ -5332,35 +5285,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_6_1_FN, GPSR6_1,
GP_6_0_FN, GPSR6_0, ))
},
- { PINMUX_CFG_REG("GPSR7", 0xe606011c, 32, 1, GROUP(
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ { PINMUX_CFG_REG_VAR("GPSR7", 0xe606011c, 32,
+ GROUP(-28, 1, 1, 1, 1),
+ GROUP(
+ /* GP7_31_4 RESERVED */
GP_7_3_FN, GPSR7_3,
GP_7_2_FN, GPSR7_2,
GP_7_1_FN, GPSR7_1,
@@ -5441,12 +5369,14 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP6_7_4
IP6_3_0 ))
},
- { PINMUX_CFG_REG("IPSR7", 0xe606021c, 32, 4, GROUP(
+ { PINMUX_CFG_REG_VAR("IPSR7", 0xe606021c, 32,
+ GROUP(4, 4, 4, 4, -4, 4, 4, 4),
+ GROUP(
IP7_31_28
IP7_27_24
IP7_23_20
IP7_19_16
- /* IP7_15_12 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP7_15_12 RESERVED */
IP7_11_8
IP7_7_4
IP7_3_0 ))
@@ -5551,13 +5481,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP17_7_4
IP17_3_0 ))
},
- { PINMUX_CFG_REG("IPSR18", 0xe6060248, 32, 4, GROUP(
- /* IP18_31_28 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /* IP18_27_24 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /* IP18_23_20 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /* IP18_19_16 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /* IP18_15_12 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /* IP18_11_8 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ { PINMUX_CFG_REG_VAR("IPSR18", 0xe6060248, 32,
+ GROUP(-24, 4, 4),
+ GROUP(
+ /* IP18_31_8 RESERVED */
IP18_7_4
IP18_3_0 ))
},
@@ -5567,8 +5494,8 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
#define F_(x, y) x,
#define FM(x) FN_##x,
{ PINMUX_CFG_REG_VAR("MOD_SEL0", 0xe6060500, 32,
- GROUP(3, 2, 3, 1, 1, 1, 1, 1, 2, 1, 1, 2,
- 1, 1, 1, 2, 2, 1, 2, 3),
+ GROUP(3, 2, 3, 1, 1, 1, 1, 1, 2, 1, -1, 2,
+ 1, 1, 1, 2, 2, 1, 2, -3),
GROUP(
MOD_SEL0_31_30_29
MOD_SEL0_28_27
@@ -5580,7 +5507,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
MOD_SEL0_19
MOD_SEL0_18_17
MOD_SEL0_16
- 0, 0, /* RESERVED 15 */
+ /* RESERVED 15 */
MOD_SEL0_14_13
MOD_SEL0_12
MOD_SEL0_11
@@ -5589,12 +5516,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
MOD_SEL0_7_6
MOD_SEL0_5
MOD_SEL0_4_3
- /* RESERVED 2, 1, 0 */
- 0, 0, 0, 0, 0, 0, 0, 0 ))
+ /* RESERVED 2, 1, 0 */ ))
},
{ PINMUX_CFG_REG_VAR("MOD_SEL1", 0xe6060504, 32,
GROUP(2, 3, 1, 2, 3, 1, 1, 2, 1, 2, 1, 1,
- 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1),
+ 1, 1, 1, -2, 1, 1, 1, 1, 1, 1, 1),
GROUP(
MOD_SEL1_31_30
MOD_SEL1_29_28_27
@@ -5611,7 +5537,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
MOD_SEL1_11
MOD_SEL1_10
MOD_SEL1_9
- 0, 0, 0, 0, /* RESERVED 8, 7 */
+ /* RESERVED 8, 7 */
MOD_SEL1_6
MOD_SEL1_5
MOD_SEL1_4
@@ -5622,7 +5548,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
},
{ PINMUX_CFG_REG_VAR("MOD_SEL2", 0xe6060508, 32,
GROUP(1, 1, 1, 2, 1, 3, 1, 1, 1, 1, 1, 1,
- 1, 4, 4, 4, 3, 1),
+ -16, 1),
GROUP(
MOD_SEL2_31
MOD_SEL2_30
@@ -5636,19 +5562,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
MOD_SEL2_19
MOD_SEL2_18
MOD_SEL2_17
- /* RESERVED 16 */
- 0, 0,
- /* RESERVED 15, 14, 13, 12 */
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- /* RESERVED 11, 10, 9, 8 */
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- /* RESERVED 7, 6, 5, 4 */
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- /* RESERVED 3, 2, 1 */
- 0, 0, 0, 0, 0, 0, 0, 0,
+ /* RESERVED 16-1 */
MOD_SEL2_0 ))
},
{ },
diff --git a/drivers/pinctrl/renesas/pfc-r8a77965.c b/drivers/pinctrl/renesas/pfc-r8a77965.c
index 6bb7f7543c37..acd0bdf13018 100644
--- a/drivers/pinctrl/renesas/pfc-r8a77965.c
+++ b/drivers/pinctrl/renesas/pfc-r8a77965.c
@@ -5335,23 +5335,11 @@ static const struct {
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
#define F_(x, y) FN_##y
#define FM(x) FN_##x
- { PINMUX_CFG_REG("GPSR0", 0xe6060100, 32, 1, GROUP(
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ { PINMUX_CFG_REG_VAR("GPSR0", 0xe6060100, 32,
+ GROUP(-16, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP0_31_16 RESERVED */
GP_0_15_FN, GPSR0_15,
GP_0_14_FN, GPSR0_14,
GP_0_13_FN, GPSR0_13,
@@ -5403,24 +5391,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_1_1_FN, GPSR1_1,
GP_1_0_FN, GPSR1_0, ))
},
- { PINMUX_CFG_REG("GPSR2", 0xe6060108, 32, 1, GROUP(
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ { PINMUX_CFG_REG_VAR("GPSR2", 0xe6060108, 32,
+ GROUP(-17, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1),
+ GROUP(
+ /* GP2_31_15 RESERVED */
GP_2_14_FN, GPSR2_14,
GP_2_13_FN, GPSR2_13,
GP_2_12_FN, GPSR2_12,
@@ -5437,23 +5412,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_2_1_FN, GPSR2_1,
GP_2_0_FN, GPSR2_0, ))
},
- { PINMUX_CFG_REG("GPSR3", 0xe606010c, 32, 1, GROUP(
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ { PINMUX_CFG_REG_VAR("GPSR3", 0xe606010c, 32,
+ GROUP(-16, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP3_31_16 RESERVED */
GP_3_15_FN, GPSR3_15,
GP_3_14_FN, GPSR3_14,
GP_3_13_FN, GPSR3_13,
@@ -5471,21 +5434,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_3_1_FN, GPSR3_1,
GP_3_0_FN, GPSR3_0, ))
},
- { PINMUX_CFG_REG("GPSR4", 0xe6060110, 32, 1, GROUP(
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ { PINMUX_CFG_REG_VAR("GPSR4", 0xe6060110, 32,
+ GROUP(-14, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP4_31_18 RESERVED */
GP_4_17_FN, GPSR4_17,
GP_4_16_FN, GPSR4_16,
GP_4_15_FN, GPSR4_15,
@@ -5573,35 +5526,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_6_1_FN, GPSR6_1,
GP_6_0_FN, GPSR6_0, ))
},
- { PINMUX_CFG_REG("GPSR7", 0xe606011c, 32, 1, GROUP(
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ { PINMUX_CFG_REG_VAR("GPSR7", 0xe606011c, 32,
+ GROUP(-28, 1, 1, 1, 1),
+ GROUP(
+ /* GP7_31_4 RESERVED */
GP_7_3_FN, GPSR7_3,
GP_7_2_FN, GPSR7_2,
GP_7_1_FN, GPSR7_1,
@@ -5682,12 +5610,14 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP6_7_4
IP6_3_0 ))
},
- { PINMUX_CFG_REG("IPSR7", 0xe606021c, 32, 4, GROUP(
+ { PINMUX_CFG_REG_VAR("IPSR7", 0xe606021c, 32,
+ GROUP(4, 4, 4, 4, -4, 4, 4, 4),
+ GROUP(
IP7_31_28
IP7_27_24
IP7_23_20
IP7_19_16
- /* IP7_15_12 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP7_15_12 RESERVED */
IP7_11_8
IP7_7_4
IP7_3_0 ))
@@ -5792,13 +5722,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP17_7_4
IP17_3_0 ))
},
- { PINMUX_CFG_REG("IPSR18", 0xe6060248, 32, 4, GROUP(
- /* IP18_31_28 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /* IP18_27_24 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /* IP18_23_20 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /* IP18_19_16 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /* IP18_15_12 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /* IP18_11_8 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ { PINMUX_CFG_REG_VAR("IPSR18", 0xe6060248, 32,
+ GROUP(-24, 4, 4),
+ GROUP(
+ /* IP18_31_8 RESERVED */
IP18_7_4
IP18_3_0 ))
},
@@ -5808,8 +5735,8 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
#define F_(x, y) x,
#define FM(x) FN_##x,
{ PINMUX_CFG_REG_VAR("MOD_SEL0", 0xe6060500, 32,
- GROUP(3, 2, 3, 1, 1, 1, 1, 1, 2, 1, 1, 2,
- 1, 1, 1, 2, 2, 1, 2, 3),
+ GROUP(3, 2, 3, 1, 1, 1, 1, 1, 2, 1, -1, 2,
+ 1, 1, 1, 2, 2, 1, 2, -3),
GROUP(
MOD_SEL0_31_30_29
MOD_SEL0_28_27
@@ -5821,7 +5748,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
MOD_SEL0_19
MOD_SEL0_18_17
MOD_SEL0_16
- 0, 0, /* RESERVED 15 */
+ /* RESERVED 15 */
MOD_SEL0_14_13
MOD_SEL0_12
MOD_SEL0_11
@@ -5830,12 +5757,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
MOD_SEL0_7_6
MOD_SEL0_5
MOD_SEL0_4_3
- /* RESERVED 2, 1, 0 */
- 0, 0, 0, 0, 0, 0, 0, 0 ))
+ /* RESERVED 2, 1, 0 */ ))
},
{ PINMUX_CFG_REG_VAR("MOD_SEL1", 0xe6060504, 32,
GROUP(2, 3, 1, 2, 3, 1, 1, 2, 1, 2, 1, 1,
- 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1),
+ 1, 1, 1, -2, 1, 1, 1, 1, 1, 1, 1),
GROUP(
MOD_SEL1_31_30
MOD_SEL1_29_28_27
@@ -5852,7 +5778,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
MOD_SEL1_11
MOD_SEL1_10
MOD_SEL1_9
- 0, 0, 0, 0, /* RESERVED 8, 7 */
+ /* RESERVED 8, 7 */
MOD_SEL1_6
MOD_SEL1_5
MOD_SEL1_4
@@ -5863,7 +5789,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
},
{ PINMUX_CFG_REG_VAR("MOD_SEL2", 0xe6060508, 32,
GROUP(1, 1, 1, 2, 1, 3, 1, 1, 1, 1, 1, 1,
- 1, 4, 4, 4, 3, 1),
+ -16, 1),
GROUP(
MOD_SEL2_31
MOD_SEL2_30
@@ -5877,19 +5803,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
MOD_SEL2_19
MOD_SEL2_18
MOD_SEL2_17
- /* RESERVED 16 */
- 0, 0,
- /* RESERVED 15, 14, 13, 12 */
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- /* RESERVED 11, 10, 9, 8 */
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- /* RESERVED 7, 6, 5, 4 */
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- /* RESERVED 3, 2, 1 */
- 0, 0, 0, 0, 0, 0, 0, 0,
+ /* RESERVED 16-1 */
MOD_SEL2_0 ))
},
{ },
diff --git a/drivers/pinctrl/renesas/pfc-r8a77970.c b/drivers/pinctrl/renesas/pfc-r8a77970.c
index 94f90c13989e..4a7803eaafaa 100644
--- a/drivers/pinctrl/renesas/pfc-r8a77970.c
+++ b/drivers/pinctrl/renesas/pfc-r8a77970.c
@@ -231,7 +231,6 @@
#define IP8_19_16 FM(CANFD_CLK_A) FM(CLK_EXTFXR) FM(PWM4_B) FM(SPEEDIN_B) FM(SCIF_CLK_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP8_23_20 FM(DIGRF_CLKIN) FM(DIGRF_CLKEN_IN) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP8_27_24 FM(DIGRF_CLKOUT) FM(DIGRF_CLKEN_OUT) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP8_31_28 F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define PINMUX_GPSR \
\
@@ -290,8 +289,7 @@ FM(IP8_11_8) IP8_11_8 \
FM(IP8_15_12) IP8_15_12 \
FM(IP8_19_16) IP8_19_16 \
FM(IP8_23_20) IP8_23_20 \
-FM(IP8_27_24) IP8_27_24 \
-FM(IP8_31_28) IP8_31_28
+FM(IP8_27_24) IP8_27_24
/* MOD_SEL0 */ /* 0 */ /* 1 */
#define MOD_SEL0_11 FM(SEL_I2C3_0) FM(SEL_I2C3_1)
@@ -2085,17 +2083,11 @@ static const struct sh_pfc_function pinmux_functions[] = {
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
#define F_(x, y) FN_##y
#define FM(x) FN_##x
- { PINMUX_CFG_REG("GPSR0", 0xe6060100, 32, 1, GROUP(
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ { PINMUX_CFG_REG_VAR("GPSR0", 0xe6060100, 32,
+ GROUP(-10, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP0_31_22 RESERVED */
GP_0_21_FN, GPSR0_21,
GP_0_20_FN, GPSR0_20,
GP_0_19_FN, GPSR0_19,
@@ -2153,22 +2145,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_1_1_FN, GPSR1_1,
GP_1_0_FN, GPSR1_0, ))
},
- { PINMUX_CFG_REG("GPSR2", 0xe6060108, 32, 1, GROUP(
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ { PINMUX_CFG_REG_VAR("GPSR2", 0xe6060108, 32,
+ GROUP(-15, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP2_31_17 RESERVED */
GP_2_16_FN, GPSR2_16,
GP_2_15_FN, GPSR2_15,
GP_2_14_FN, GPSR2_14,
@@ -2187,22 +2168,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_2_1_FN, GPSR2_1,
GP_2_0_FN, GPSR2_0, ))
},
- { PINMUX_CFG_REG("GPSR3", 0xe606010c, 32, 1, GROUP(
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ { PINMUX_CFG_REG_VAR("GPSR3", 0xe606010c, 32,
+ GROUP(-15, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP3_31_17 RESERVED */
GP_3_16_FN, GPSR3_16,
GP_3_15_FN, GPSR3_15,
GP_3_14_FN, GPSR3_14,
@@ -2221,33 +2191,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_3_1_FN, GPSR3_1,
GP_3_0_FN, GPSR3_0, ))
},
- { PINMUX_CFG_REG("GPSR4", 0xe6060110, 32, 1, GROUP(
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ { PINMUX_CFG_REG_VAR("GPSR4", 0xe6060110, 32,
+ GROUP(-26, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP4_31_6 RESERVED */
GP_4_5_FN, GPSR4_5,
GP_4_4_FN, GPSR4_4,
GP_4_3_FN, GPSR4_3,
@@ -2255,24 +2202,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_4_1_FN, GPSR4_1,
GP_4_0_FN, GPSR4_0, ))
},
- { PINMUX_CFG_REG("GPSR5", 0xe6060114, 32, 1, GROUP(
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ { PINMUX_CFG_REG_VAR("GPSR5", 0xe6060114, 32,
+ GROUP(-17, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1),
+ GROUP(
+ /* GP5_31_15 RESERVED */
GP_5_14_FN, GPSR5_14,
GP_5_13_FN, GPSR5_13,
GP_5_12_FN, GPSR5_12,
@@ -2374,8 +2308,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP7_7_4
IP7_3_0 ))
},
- { PINMUX_CFG_REG("IPSR8", 0xe6060220, 32, 4, GROUP(
- IP8_31_28
+ { PINMUX_CFG_REG_VAR("IPSR8", 0xe6060220, 32,
+ GROUP(-4, 4, 4, 4, 4, 4, 4, 4),
+ GROUP(
+ /* IP8_31_28 RESERVED */
IP8_27_24
IP8_23_20
IP8_19_16
@@ -2390,19 +2326,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
#define F_(x, y) x,
#define FM(x) FN_##x,
{ PINMUX_CFG_REG_VAR("MOD_SEL0", 0xe6060500, 32,
- GROUP(4, 4, 4, 4, 4, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1),
+ GROUP(-20, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
GROUP(
- /* RESERVED 31, 30, 29, 28 */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /* RESERVED 27, 26, 25, 24 */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /* RESERVED 23, 22, 21, 20 */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /* RESERVED 19, 18, 17, 16 */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /* RESERVED 15, 14, 13, 12 */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* RESERVED 31-12 */
MOD_SEL0_11
MOD_SEL0_10
MOD_SEL0_9
diff --git a/drivers/pinctrl/renesas/pfc-r8a77980.c b/drivers/pinctrl/renesas/pfc-r8a77980.c
index c229a5d8fa57..ac03309c5c0c 100644
--- a/drivers/pinctrl/renesas/pfc-r8a77980.c
+++ b/drivers/pinctrl/renesas/pfc-r8a77980.c
@@ -278,9 +278,6 @@
#define IP10_11_8 FM(FSO_CFE_0_N) F_(0, 0) F_(0, 0) FM(VI0_DATA22) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP10_15_12 FM(FSO_CFE_1_N) F_(0, 0) F_(0, 0) FM(VI0_DATA23) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP10_19_16 FM(FSO_TOE_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP10_23_20 F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP10_27_24 F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP10_31_28 F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define PINMUX_GPSR \
\
@@ -340,9 +337,9 @@ FM(IP8_7_4) IP8_7_4 FM(IP9_7_4) IP9_7_4 FM(IP10_7_4) IP10_7_4 \
FM(IP8_11_8) IP8_11_8 FM(IP9_11_8) IP9_11_8 FM(IP10_11_8) IP10_11_8 \
FM(IP8_15_12) IP8_15_12 FM(IP9_15_12) IP9_15_12 FM(IP10_15_12) IP10_15_12 \
FM(IP8_19_16) IP8_19_16 FM(IP9_19_16) IP9_19_16 FM(IP10_19_16) IP10_19_16 \
-FM(IP8_23_20) IP8_23_20 FM(IP9_23_20) IP9_23_20 FM(IP10_23_20) IP10_23_20 \
-FM(IP8_27_24) IP8_27_24 FM(IP9_27_24) IP9_27_24 FM(IP10_27_24) IP10_27_24 \
-FM(IP8_31_28) IP8_31_28 FM(IP9_31_28) IP9_31_28 FM(IP10_31_28) IP10_31_28
+FM(IP8_23_20) IP8_23_20 FM(IP9_23_20) IP9_23_20 \
+FM(IP8_27_24) IP8_27_24 FM(IP9_27_24) IP9_27_24 \
+FM(IP8_31_28) IP8_31_28 FM(IP9_31_28) IP9_31_28
/* MOD_SEL0 */ /* 0 */ /* 1 */
#define MOD_SEL0_11 FM(SEL_CANFD0_0) FM(SEL_CANFD0_1)
@@ -2507,17 +2504,11 @@ static const struct sh_pfc_function pinmux_functions[] = {
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
#define F_(x, y) FN_##y
#define FM(x) FN_##x
- { PINMUX_CFG_REG("GPSR0", 0xe6060100, 32, 1, GROUP(
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ { PINMUX_CFG_REG_VAR("GPSR0", 0xe6060100, 32,
+ GROUP(-10, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP0_31_22 RESERVED */
GP_0_21_FN, GPSR0_21,
GP_0_20_FN, GPSR0_20,
GP_0_19_FN, GPSR0_19,
@@ -2609,22 +2600,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_2_1_FN, GPSR2_1,
GP_2_0_FN, GPSR2_0, ))
},
- { PINMUX_CFG_REG("GPSR3", 0xe606010c, 32, 1, GROUP(
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ { PINMUX_CFG_REG_VAR("GPSR3", 0xe606010c, 32,
+ GROUP(-15, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP3_31_17 RESERVED */
GP_3_16_FN, GPSR3_16,
GP_3_15_FN, GPSR3_15,
GP_3_14_FN, GPSR3_14,
@@ -2643,14 +2623,12 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_3_1_FN, GPSR3_1,
GP_3_0_FN, GPSR3_0, ))
},
- { PINMUX_CFG_REG("GPSR4", 0xe6060110, 32, 1, GROUP(
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ { PINMUX_CFG_REG_VAR("GPSR4", 0xe6060110, 32,
+ GROUP(-7, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1),
+ GROUP(
+ /* GP4_31_25 RESERVED */
GP_4_24_FN, GPSR4_24,
GP_4_23_FN, GPSR4_23,
GP_4_22_FN, GPSR4_22,
@@ -2677,24 +2655,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_4_1_FN, GPSR4_1,
GP_4_0_FN, GPSR4_0, ))
},
- { PINMUX_CFG_REG("GPSR5", 0xe6060114, 32, 1, GROUP(
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ { PINMUX_CFG_REG_VAR("GPSR5", 0xe6060114, 32,
+ GROUP(-17, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1),
+ GROUP(
+ /* GP5_31_15 RESERVED */
GP_5_14_FN, GPSR5_14,
GP_5_13_FN, GPSR5_13,
GP_5_12_FN, GPSR5_12,
@@ -2816,10 +2781,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP9_7_4
IP9_3_0 ))
},
- { PINMUX_CFG_REG("IPSR10", 0xe6060228, 32, 4, GROUP(
- IP10_31_28
- IP10_27_24
- IP10_23_20
+ { PINMUX_CFG_REG_VAR("IPSR10", 0xe6060228, 32,
+ GROUP(-12, 4, 4, 4, 4, 4),
+ GROUP(
+ /* IP10_31_20 RESERVED */
IP10_19_16
IP10_15_12
IP10_11_8
@@ -2832,19 +2797,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
#define F_(x, y) x,
#define FM(x) FN_##x,
{ PINMUX_CFG_REG_VAR("MOD_SEL0", 0xe6060500, 32,
- GROUP(4, 4, 4, 4, 4, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1),
+ GROUP(-20, 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, 1, 1),
GROUP(
- /* RESERVED 31, 30, 29, 28 */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /* RESERVED 27, 26, 25, 24 */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /* RESERVED 23, 22, 21, 20 */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /* RESERVED 19, 18, 17, 16 */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /* RESERVED 15, 14, 13, 12 */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* RESERVED 31-12 */
MOD_SEL0_11
MOD_SEL0_10
MOD_SEL0_9
@@ -2853,7 +2808,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
MOD_SEL0_6
MOD_SEL0_5
MOD_SEL0_4
- 0, 0,
+ /* RESERVED 3 */
MOD_SEL0_2
MOD_SEL0_1
MOD_SEL0_0 ))
diff --git a/drivers/pinctrl/renesas/pfc-r8a77990.c b/drivers/pinctrl/renesas/pfc-r8a77990.c
index 6c4ba9e16058..b0936962fad7 100644
--- a/drivers/pinctrl/renesas/pfc-r8a77990.c
+++ b/drivers/pinctrl/renesas/pfc-r8a77990.c
@@ -22,12 +22,12 @@
PORT_GP_CFG_18(0, fn, sfx, CFG_FLAGS), \
PORT_GP_CFG_23(1, fn, sfx, CFG_FLAGS), \
PORT_GP_CFG_26(2, fn, sfx, CFG_FLAGS), \
- PORT_GP_CFG_12(3, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE), \
+ PORT_GP_CFG_12(3, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE | SH_PFC_PIN_CFG_DRIVE_STRENGTH), \
PORT_GP_CFG_1(3, 12, fn, sfx, CFG_FLAGS), \
PORT_GP_CFG_1(3, 13, fn, sfx, CFG_FLAGS), \
PORT_GP_CFG_1(3, 14, fn, sfx, CFG_FLAGS), \
PORT_GP_CFG_1(3, 15, fn, sfx, CFG_FLAGS), \
- PORT_GP_CFG_11(4, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE), \
+ PORT_GP_CFG_11(4, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE | SH_PFC_PIN_CFG_DRIVE_STRENGTH), \
PORT_GP_CFG_20(5, fn, sfx, CFG_FLAGS), \
PORT_GP_CFG_9(6, fn, sfx, CFG_FLAGS), \
PORT_GP_CFG_1(6, 9, fn, sfx, SH_PFC_PIN_CFG_PULL_UP), \
@@ -2827,16 +2827,6 @@ static const unsigned int qspi0_ctrl_pins[] = {
static const unsigned int qspi0_ctrl_mux[] = {
QSPI0_SPCLK_MARK, QSPI0_SSL_MARK,
};
-static const unsigned int qspi0_data_pins[] = {
- /* QSPI0_MOSI_IO0, QSPI0_MISO_IO1 */
- RCAR_GP_PIN(2, 1), RCAR_GP_PIN(2, 2),
- /* QSPI0_IO2, QSPI0_IO3 */
- RCAR_GP_PIN(2, 3), RCAR_GP_PIN(2, 4),
-};
-static const unsigned int qspi0_data_mux[] = {
- QSPI0_MOSI_IO0_MARK, QSPI0_MISO_IO1_MARK,
- QSPI0_IO2_MARK, QSPI0_IO3_MARK,
-};
/* - QSPI1 ------------------------------------------------------------------ */
static const unsigned int qspi1_ctrl_pins[] = {
/* QSPI1_SPCLK, QSPI1_SSL */
@@ -2845,16 +2835,51 @@ static const unsigned int qspi1_ctrl_pins[] = {
static const unsigned int qspi1_ctrl_mux[] = {
QSPI1_SPCLK_MARK, QSPI1_SSL_MARK,
};
-static const unsigned int qspi1_data_pins[] = {
- /* QSPI1_MOSI_IO0, QSPI1_MISO_IO1 */
+
+/* - RPC -------------------------------------------------------------------- */
+static const unsigned int rpc_clk_pins[] = {
+ /* Octal-SPI flash: C/SCLK */
+ /* HyperFlash: CK, CK# */
+ RCAR_GP_PIN(2, 0), RCAR_GP_PIN(2, 6),
+};
+static const unsigned int rpc_clk_mux[] = {
+ QSPI0_SPCLK_MARK, QSPI1_SPCLK_MARK,
+};
+static const unsigned int rpc_ctrl_pins[] = {
+ /* Octal-SPI flash: S#/CS, DQS */
+ /* HyperFlash: CS#, RDS */
+ RCAR_GP_PIN(2, 5), RCAR_GP_PIN(2, 11),
+};
+static const unsigned int rpc_ctrl_mux[] = {
+ QSPI0_SSL_MARK, QSPI1_SSL_MARK,
+};
+static const unsigned int rpc_data_pins[] = {
+ /* DQ[0:7] */
+ RCAR_GP_PIN(2, 1), RCAR_GP_PIN(2, 2),
+ RCAR_GP_PIN(2, 3), RCAR_GP_PIN(2, 4),
RCAR_GP_PIN(2, 7), RCAR_GP_PIN(2, 8),
- /* QSPI1_IO2, QSPI1_IO3 */
RCAR_GP_PIN(2, 9), RCAR_GP_PIN(2, 10),
};
-static const unsigned int qspi1_data_mux[] = {
+static const unsigned int rpc_data_mux[] = {
+ QSPI0_MOSI_IO0_MARK, QSPI0_MISO_IO1_MARK,
+ QSPI0_IO2_MARK, QSPI0_IO3_MARK,
QSPI1_MOSI_IO0_MARK, QSPI1_MISO_IO1_MARK,
QSPI1_IO2_MARK, QSPI1_IO3_MARK,
};
+static const unsigned int rpc_reset_pins[] = {
+ /* RPC_RESET# */
+ RCAR_GP_PIN(2, 13),
+};
+static const unsigned int rpc_reset_mux[] = {
+ RPC_RESET_N_MARK,
+};
+static const unsigned int rpc_int_pins[] = {
+ /* RPC_INT# */
+ RCAR_GP_PIN(2, 12),
+};
+static const unsigned int rpc_int_mux[] = {
+ RPC_INT_N_MARK,
+};
/* - SCIF0 ------------------------------------------------------------------ */
static const unsigned int scif0_data_a_pins[] = {
@@ -3758,7 +3783,7 @@ static const unsigned int vin5_clk_b_mux[] = {
};
static const struct {
- struct sh_pfc_pin_group common[255];
+ struct sh_pfc_pin_group common[261];
#ifdef CONFIG_PINCTRL_PFC_R8A77990
struct sh_pfc_pin_group automotive[22];
#endif
@@ -3907,11 +3932,17 @@ static const struct {
SH_PFC_PIN_GROUP(pwm6_a),
SH_PFC_PIN_GROUP(pwm6_b),
SH_PFC_PIN_GROUP(qspi0_ctrl),
- BUS_DATA_PIN_GROUP(qspi0_data, 2),
- BUS_DATA_PIN_GROUP(qspi0_data, 4),
+ SH_PFC_PIN_GROUP_SUBSET(qspi0_data2, rpc_data, 0, 2),
+ SH_PFC_PIN_GROUP_SUBSET(qspi0_data4, rpc_data, 0, 4),
SH_PFC_PIN_GROUP(qspi1_ctrl),
- BUS_DATA_PIN_GROUP(qspi1_data, 2),
- BUS_DATA_PIN_GROUP(qspi1_data, 4),
+ SH_PFC_PIN_GROUP_SUBSET(qspi1_data2, rpc_data, 4, 2),
+ SH_PFC_PIN_GROUP_SUBSET(qspi1_data4, rpc_data, 4, 4),
+ BUS_DATA_PIN_GROUP(rpc_clk, 1),
+ BUS_DATA_PIN_GROUP(rpc_clk, 2),
+ SH_PFC_PIN_GROUP(rpc_ctrl),
+ SH_PFC_PIN_GROUP(rpc_data),
+ SH_PFC_PIN_GROUP(rpc_reset),
+ SH_PFC_PIN_GROUP(rpc_int),
SH_PFC_PIN_GROUP(scif0_data_a),
SH_PFC_PIN_GROUP(scif0_clk_a),
SH_PFC_PIN_GROUP(scif0_ctrl_a),
@@ -4336,6 +4367,15 @@ static const char * const qspi1_groups[] = {
"qspi1_data4",
};
+static const char * const rpc_groups[] = {
+ "rpc_clk1",
+ "rpc_clk2",
+ "rpc_ctrl",
+ "rpc_data",
+ "rpc_reset",
+ "rpc_int",
+};
+
static const char * const scif0_groups[] = {
"scif0_data_a",
"scif0_clk_a",
@@ -4492,7 +4532,7 @@ static const char * const vin5_groups[] = {
};
static const struct {
- struct sh_pfc_function common[49];
+ struct sh_pfc_function common[50];
#ifdef CONFIG_PINCTRL_PFC_R8A77990
struct sh_pfc_function automotive[5];
#endif
@@ -4531,6 +4571,7 @@ static const struct {
SH_PFC_FUNCTION(pwm6),
SH_PFC_FUNCTION(qspi0),
SH_PFC_FUNCTION(qspi1),
+ SH_PFC_FUNCTION(rpc),
SH_PFC_FUNCTION(scif0),
SH_PFC_FUNCTION(scif1),
SH_PFC_FUNCTION(scif2),
@@ -4562,21 +4603,11 @@ static const struct {
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
#define F_(x, y) FN_##y
#define FM(x) FN_##x
- { PINMUX_CFG_REG("GPSR0", 0xe6060100, 32, 1, GROUP(
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ { PINMUX_CFG_REG_VAR("GPSR0", 0xe6060100, 32,
+ GROUP(-14, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP0_31_18 RESERVED */
GP_0_17_FN, GPSR0_17,
GP_0_16_FN, GPSR0_16,
GP_0_15_FN, GPSR0_15,
@@ -4596,16 +4627,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_0_1_FN, GPSR0_1,
GP_0_0_FN, GPSR0_0, ))
},
- { PINMUX_CFG_REG("GPSR1", 0xe6060104, 32, 1, GROUP(
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ { PINMUX_CFG_REG_VAR("GPSR1", 0xe6060104, 32,
+ GROUP(-9, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP1_31_23 RESERVED */
GP_1_22_FN, GPSR1_22,
GP_1_21_FN, GPSR1_21,
GP_1_20_FN, GPSR1_20,
@@ -4664,23 +4690,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_2_1_FN, GPSR2_1,
GP_2_0_FN, GPSR2_0, ))
},
- { PINMUX_CFG_REG("GPSR3", 0xe606010c, 32, 1, GROUP(
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ { PINMUX_CFG_REG_VAR("GPSR3", 0xe606010c, 32,
+ GROUP(-16, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP3_31_16 RESERVED */
GP_3_15_FN, GPSR3_15,
GP_3_14_FN, GPSR3_14,
GP_3_13_FN, GPSR3_13,
@@ -4698,28 +4712,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_3_1_FN, GPSR3_1,
GP_3_0_FN, GPSR3_0, ))
},
- { PINMUX_CFG_REG("GPSR4", 0xe6060110, 32, 1, GROUP(
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ { PINMUX_CFG_REG_VAR("GPSR4", 0xe6060110, 32,
+ GROUP(-21, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP4_31_11 RESERVED */
GP_4_10_FN, GPSR4_10,
GP_4_9_FN, GPSR4_9,
GP_4_8_FN, GPSR4_8,
@@ -4732,19 +4728,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_4_1_FN, GPSR4_1,
GP_4_0_FN, GPSR4_0, ))
},
- { PINMUX_CFG_REG("GPSR5", 0xe6060114, 32, 1, GROUP(
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ { PINMUX_CFG_REG_VAR("GPSR5", 0xe6060114, 32,
+ GROUP(-12, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP5_31_20 RESERVED */
GP_5_19_FN, GPSR5_19,
GP_5_18_FN, GPSR5_18,
GP_5_17_FN, GPSR5_17,
@@ -4766,21 +4754,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_5_1_FN, GPSR5_1,
GP_5_0_FN, GPSR5_0, ))
},
- { PINMUX_CFG_REG("GPSR6", 0xe6060118, 32, 1, GROUP(
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ { PINMUX_CFG_REG_VAR("GPSR6", 0xe6060118, 32,
+ GROUP(-14, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP6_31_18 RESERVED */
GP_6_17_FN, GPSR6_17,
GP_6_16_FN, GPSR6_16,
GP_6_15_FN, GPSR6_15,
@@ -4971,11 +4949,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
#define F_(x, y) x,
#define FM(x) FN_##x,
{ PINMUX_CFG_REG_VAR("MOD_SEL0", 0xe6060500, 32,
- GROUP(1, 2, 1, 2, 1, 1, 1, 1, 2, 3, 1, 1,
+ GROUP(-1, 2, 1, 2, 1, 1, 1, 1, 2, 3, 1, 1,
1, 2, 2, 1, 1, 1, 2, 1, 1, 1, 2),
GROUP(
/* RESERVED 31 */
- 0, 0,
MOD_SEL0_30_29
MOD_SEL0_28
MOD_SEL0_27_26
@@ -5000,15 +4977,14 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
MOD_SEL0_1_0 ))
},
{ PINMUX_CFG_REG_VAR("MOD_SEL1", 0xe6060504, 32,
- GROUP(1, 1, 1, 1, 1, 1, 1, 3, 3, 1, 1, 1,
- 1, 2, 2, 2, 1, 1, 2, 1, 4),
+ GROUP(1, 1, 1, 1, -1, 1, 1, 3, 3, 1, 1, 1,
+ 1, 2, 2, 2, 1, 1, 2, 1, -4),
GROUP(
MOD_SEL1_31
MOD_SEL1_30
MOD_SEL1_29
MOD_SEL1_28
/* RESERVED 27 */
- 0, 0,
MOD_SEL1_26
MOD_SEL1_25
MOD_SEL1_24_23_22
@@ -5024,12 +5000,44 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
MOD_SEL1_7
MOD_SEL1_6_5
MOD_SEL1_4
- /* RESERVED 3, 2, 1, 0 */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ))
+ /* RESERVED 3, 2, 1, 0 */ ))
},
{ },
};
+static const struct pinmux_drive_reg pinmux_drive_regs[] = {
+ { PINMUX_DRIVE_REG("DRVCTRL8", 0xe6060320) {
+ { RCAR_GP_PIN(3, 0), 18, 2 }, /* SD0_CLK */
+ { RCAR_GP_PIN(3, 1), 15, 2 }, /* SD0_CMD */
+ { RCAR_GP_PIN(3, 2), 12, 2 }, /* SD0_DAT0 */
+ { RCAR_GP_PIN(3, 3), 9, 2 }, /* SD0_DAT1 */
+ { RCAR_GP_PIN(3, 4), 6, 2 }, /* SD0_DAT2 */
+ { RCAR_GP_PIN(3, 5), 3, 2 }, /* SD0_DAT3 */
+ { RCAR_GP_PIN(3, 6), 0, 2 }, /* SD1_CLK */
+ } },
+ { PINMUX_DRIVE_REG("DRVCTRL9", 0xe6060324) {
+ { RCAR_GP_PIN(3, 7), 29, 2 }, /* SD1_CMD */
+ { RCAR_GP_PIN(3, 8), 26, 2 }, /* SD1_DAT0 */
+ { RCAR_GP_PIN(3, 9), 23, 2 }, /* SD1_DAT1 */
+ { RCAR_GP_PIN(3, 10), 20, 2 }, /* SD1_DAT2 */
+ { RCAR_GP_PIN(3, 11), 17, 2 }, /* SD1_DAT3 */
+ { RCAR_GP_PIN(4, 0), 14, 2 }, /* SD3_CLK */
+ { RCAR_GP_PIN(4, 1), 11, 2 }, /* SD3_CMD */
+ { RCAR_GP_PIN(4, 2), 8, 2 }, /* SD3_DAT0 */
+ { RCAR_GP_PIN(4, 3), 5, 2 }, /* SD3_DAT1 */
+ { RCAR_GP_PIN(4, 4), 2, 2 }, /* SD3_DAT2 */
+ } },
+ { PINMUX_DRIVE_REG("DRVCTRL10", 0xe6060328) {
+ { RCAR_GP_PIN(4, 5), 29, 2 }, /* SD3_DAT3 */
+ { RCAR_GP_PIN(4, 6), 26, 2 }, /* SD3_DAT4 */
+ { RCAR_GP_PIN(4, 7), 23, 2 }, /* SD3_DAT5 */
+ { RCAR_GP_PIN(4, 8), 20, 2 }, /* SD3_DAT6 */
+ { RCAR_GP_PIN(4, 9), 17, 2 }, /* SD3_DAT7 */
+ { RCAR_GP_PIN(4, 10), 14, 2 }, /* SD3_DS */
+ } },
+ { },
+};
+
enum ioctrl_regs {
POCCTRL0,
TDSELCTRL,
@@ -5286,6 +5294,7 @@ const struct sh_pfc_soc_info r8a774c0_pinmux_info = {
.nr_functions = ARRAY_SIZE(pinmux_functions.common),
.cfg_regs = pinmux_config_regs,
+ .drive_regs = pinmux_drive_regs,
.bias_regs = pinmux_bias_regs,
.ioctrl_regs = pinmux_ioctrl_regs,
@@ -5312,6 +5321,7 @@ const struct sh_pfc_soc_info r8a77990_pinmux_info = {
ARRAY_SIZE(pinmux_functions.automotive),
.cfg_regs = pinmux_config_regs,
+ .drive_regs = pinmux_drive_regs,
.bias_regs = pinmux_bias_regs,
.ioctrl_regs = pinmux_ioctrl_regs,
diff --git a/drivers/pinctrl/renesas/pfc-r8a77995.c b/drivers/pinctrl/renesas/pfc-r8a77995.c
index 445c903a121a..d949ae59c757 100644
--- a/drivers/pinctrl/renesas/pfc-r8a77995.c
+++ b/drivers/pinctrl/renesas/pfc-r8a77995.c
@@ -1682,6 +1682,68 @@ static const unsigned int pwm3_c_mux[] = {
PWM3_C_MARK,
};
+/* - QSPI0 ------------------------------------------------------------------ */
+static const unsigned int qspi0_ctrl_pins[] = {
+ /* QSPI0_SPCLK, QSPI0_SSL */
+ RCAR_GP_PIN(6, 0), RCAR_GP_PIN(6, 5),
+};
+static const unsigned int qspi0_ctrl_mux[] = {
+ QSPI0_SPCLK_MARK, QSPI0_SSL_MARK,
+};
+/* - QSPI1 ------------------------------------------------------------------ */
+static const unsigned int qspi1_ctrl_pins[] = {
+ /* QSPI1_SPCLK, QSPI1_SSL */
+ RCAR_GP_PIN(6, 6), RCAR_GP_PIN(6, 11),
+};
+static const unsigned int qspi1_ctrl_mux[] = {
+ QSPI1_SPCLK_MARK, QSPI1_SSL_MARK,
+};
+
+/* - RPC -------------------------------------------------------------------- */
+static const unsigned int rpc_clk_pins[] = {
+ /* Octal-SPI flash: C/SCLK */
+ /* HyperFlash: CK, CK# */
+ RCAR_GP_PIN(6, 0), RCAR_GP_PIN(6, 6),
+};
+static const unsigned int rpc_clk_mux[] = {
+ QSPI0_SPCLK_MARK, QSPI1_SPCLK_MARK,
+};
+static const unsigned int rpc_ctrl_pins[] = {
+ /* Octal-SPI flash: S#/CS, DQS */
+ /* HyperFlash: CS#, RDS */
+ RCAR_GP_PIN(6, 5), RCAR_GP_PIN(6, 11),
+};
+static const unsigned int rpc_ctrl_mux[] = {
+ QSPI0_SSL_MARK, QSPI1_SSL_MARK,
+};
+static const unsigned int rpc_data_pins[] = {
+ /* DQ[0:7] */
+ RCAR_GP_PIN(6, 1), RCAR_GP_PIN(6, 2),
+ RCAR_GP_PIN(6, 3), RCAR_GP_PIN(6, 4),
+ RCAR_GP_PIN(6, 7), RCAR_GP_PIN(6, 8),
+ RCAR_GP_PIN(6, 9), RCAR_GP_PIN(6, 10),
+};
+static const unsigned int rpc_data_mux[] = {
+ QSPI0_MOSI_IO0_MARK, QSPI0_MISO_IO1_MARK,
+ QSPI0_IO2_MARK, QSPI0_IO3_MARK,
+ QSPI1_MOSI_IO0_MARK, QSPI1_MISO_IO1_MARK,
+ QSPI1_IO2_MARK, QSPI1_IO3_MARK,
+};
+static const unsigned int rpc_reset_pins[] = {
+ /* RPC_RESET# */
+ RCAR_GP_PIN(6, 12),
+};
+static const unsigned int rpc_reset_mux[] = {
+ RPC_RESET_N_MARK,
+};
+static const unsigned int rpc_int_pins[] = {
+ /* RPC_INT# */
+ RCAR_GP_PIN(6, 13),
+};
+static const unsigned int rpc_int_mux[] = {
+ RPC_INT_N_MARK,
+};
+
/* - SCIF0 ------------------------------------------------------------------ */
static const unsigned int scif0_data_a_pins[] = {
/* RX, TX */
@@ -2085,6 +2147,18 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(pwm3_a),
SH_PFC_PIN_GROUP(pwm3_b),
SH_PFC_PIN_GROUP(pwm3_c),
+ SH_PFC_PIN_GROUP(qspi0_ctrl),
+ SH_PFC_PIN_GROUP_SUBSET(qspi0_data2, rpc_data, 0, 2),
+ SH_PFC_PIN_GROUP_SUBSET(qspi0_data4, rpc_data, 0, 4),
+ SH_PFC_PIN_GROUP(qspi1_ctrl),
+ SH_PFC_PIN_GROUP_SUBSET(qspi1_data2, rpc_data, 4, 2),
+ SH_PFC_PIN_GROUP_SUBSET(qspi1_data4, rpc_data, 4, 4),
+ BUS_DATA_PIN_GROUP(rpc_clk, 1),
+ BUS_DATA_PIN_GROUP(rpc_clk, 2),
+ SH_PFC_PIN_GROUP(rpc_ctrl),
+ SH_PFC_PIN_GROUP(rpc_data),
+ SH_PFC_PIN_GROUP(rpc_reset),
+ SH_PFC_PIN_GROUP(rpc_int),
SH_PFC_PIN_GROUP(scif0_data_a),
SH_PFC_PIN_GROUP(scif0_clk_a),
SH_PFC_PIN_GROUP(scif0_data_b),
@@ -2277,6 +2351,27 @@ static const char * const pwm3_groups[] = {
"pwm3_c",
};
+static const char * const qspi0_groups[] = {
+ "qspi0_ctrl",
+ "qspi0_data2",
+ "qspi0_data4",
+};
+
+static const char * const qspi1_groups[] = {
+ "qspi1_ctrl",
+ "qspi1_data2",
+ "qspi1_data4",
+};
+
+static const char * const rpc_groups[] = {
+ "rpc_clk1",
+ "rpc_clk2",
+ "rpc_ctrl",
+ "rpc_data",
+ "rpc_reset",
+ "rpc_int",
+};
+
static const char * const scif0_groups[] = {
"scif0_data_a",
"scif0_clk_a",
@@ -2373,6 +2468,9 @@ static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(pwm1),
SH_PFC_FUNCTION(pwm2),
SH_PFC_FUNCTION(pwm3),
+ SH_PFC_FUNCTION(qspi0),
+ SH_PFC_FUNCTION(qspi1),
+ SH_PFC_FUNCTION(rpc),
SH_PFC_FUNCTION(scif0),
SH_PFC_FUNCTION(scif1),
SH_PFC_FUNCTION(scif2),
@@ -2388,30 +2486,10 @@ static const struct sh_pfc_function pinmux_functions[] = {
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
#define F_(x, y) FN_##y
#define FM(x) FN_##x
- { PINMUX_CFG_REG("GPSR0", 0xe6060100, 32, 1, GROUP(
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ { PINMUX_CFG_REG_VAR("GPSR0", 0xe6060100, 32,
+ GROUP(-23, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP0_31_9 RESERVED */
GP_0_8_FN, GPSR0_8,
GP_0_7_FN, GPSR0_7,
GP_0_6_FN, GPSR0_6,
@@ -2490,29 +2568,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_2_1_FN, GPSR2_1,
GP_2_0_FN, GPSR2_0, ))
},
- { PINMUX_CFG_REG("GPSR3", 0xe606010c, 32, 1, GROUP(
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ { PINMUX_CFG_REG_VAR("GPSR3", 0xe606010c, 32,
+ GROUP(-22, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP3_31_10 RESERVED */
GP_3_9_FN, GPSR3_9,
GP_3_8_FN, GPSR3_8,
GP_3_7_FN, GPSR3_7,
@@ -2558,18 +2617,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_4_1_FN, GPSR4_1,
GP_4_0_FN, GPSR4_0, ))
},
- { PINMUX_CFG_REG("GPSR5", 0xe6060114, 32, 1, GROUP(
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ { PINMUX_CFG_REG_VAR("GPSR5", 0xe6060114, 32,
+ GROUP(-11, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP5_31_21 RESERVED */
GP_5_20_FN, GPSR5_20,
GP_5_19_FN, GPSR5_19,
GP_5_18_FN, GPSR5_18,
@@ -2592,25 +2644,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_5_1_FN, GPSR5_1,
GP_5_0_FN, GPSR5_0, ))
},
- { PINMUX_CFG_REG("GPSR6", 0xe6060118, 32, 1, GROUP(
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ { PINMUX_CFG_REG_VAR("GPSR6", 0xe6060118, 32,
+ GROUP(-18, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1),
+ GROUP(
+ /* GP6_31_14 RESERVED */
GP_6_13_FN, GPSR6_13,
GP_6_12_FN, GPSR6_12,
GP_6_11_FN, GPSR6_11,
@@ -2761,13 +2799,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP12_7_4
IP12_3_0 ))
},
- { PINMUX_CFG_REG("IPSR13", 0xe6060234, 32, 4, GROUP(
- /* IP13_31_28 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /* IP13_27_24 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /* IP13_23_20 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /* IP13_19_16 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /* IP13_15_12 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /* IP13_11_8 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ { PINMUX_CFG_REG_VAR("IPSR13", 0xe6060234, 32,
+ GROUP(-24, 4, 4),
+ GROUP(
+ /* IP13_31_8 RESERVED */
IP13_7_4
IP13_3_0 ))
},
@@ -2777,11 +2812,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
#define F_(x, y) x,
#define FM(x) FN_##x,
{ PINMUX_CFG_REG_VAR("MOD_SEL0", 0xe6060500, 32,
- GROUP(1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 1,
- 1, 1, 1, 1, 1, 1, 4, 1, 1, 1, 1, 1, 1),
+ GROUP(-1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, -1,
+ 1, 1, 1, 1, 1, 1, -4, 1, 1, 1, 1, 1, 1),
GROUP(
/* RESERVED 31 */
- 0, 0,
MOD_SEL0_30
MOD_SEL0_29
MOD_SEL0_28
@@ -2793,7 +2827,6 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
MOD_SEL0_20_19
MOD_SEL0_18_17
/* RESERVED 16 */
- 0, 0,
MOD_SEL0_15
MOD_SEL0_14
MOD_SEL0_13
@@ -2801,7 +2834,6 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
MOD_SEL0_11
MOD_SEL0_10
/* RESERVED 9, 8, 7, 6 */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
MOD_SEL0_5
MOD_SEL0_4
MOD_SEL0_3
@@ -2810,7 +2842,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
MOD_SEL0_0 ))
},
{ PINMUX_CFG_REG_VAR("MOD_SEL1", 0xe6060504, 32,
- GROUP(1, 1, 1, 1, 1, 1, 2, 4, 4, 4, 4, 4, 4),
+ GROUP(1, 1, 1, 1, 1, 1, -26),
GROUP(
MOD_SEL1_31
MOD_SEL1_30
@@ -2818,20 +2850,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
MOD_SEL1_28
MOD_SEL1_27
MOD_SEL1_26
- /* RESERVED 25, 24 */
- 0, 0, 0, 0,
- /* RESERVED 23, 22, 21, 20 */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /* RESERVED 19, 18, 17, 16 */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /* RESERVED 15, 14, 13, 12 */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /* RESERVED 11, 10, 9, 8 */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /* RESERVED 7, 6, 5, 4 */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /* RESERVED 3, 2, 1, 0 */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ))
+ /* RESERVED 25-0 */ ))
},
{ },
};
diff --git a/drivers/pinctrl/renesas/pfc-r8a779a0.c b/drivers/pinctrl/renesas/pfc-r8a779a0.c
index 4a668a04b7ca..760c83a8740b 100644
--- a/drivers/pinctrl/renesas/pfc-r8a779a0.c
+++ b/drivers/pinctrl/renesas/pfc-r8a779a0.c
@@ -389,7 +389,6 @@
#define IP3SR1_19_16 FM(GP1_28) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) FM(D0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP3SR1_23_20 FM(GP1_29) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) FM(D1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP3SR1_27_24 FM(GP1_30) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) FM(D2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP3SR1_31_28 F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
/* IP0SR2 */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 - F */
#define IP0SR2_3_0 FM(IPC_CLKIN) FM(IPC_CLKEN_IN) F_(0, 0) F_(0, 0) FM(DU_DOTCLKIN) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
@@ -420,11 +419,8 @@
#define IP2SR2_31_28 FM(TCLK1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) FM(EX_WAIT0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
/* IP0SR3 */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 - F */
-#define IP0SR3_3_0 F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP0SR3_7_4 FM(CANFD0_TX) FM(FXR_TXDA_B) FM(TX1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP0SR3_11_8 FM(CANFD0_RX) FM(RXDA_EXTFXR_B) FM(RX1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0SR3_15_12 F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0SR3_19_16 F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP0SR3_23_20 FM(CANFD2_TX) FM(TPU0TO2) FM(PWM0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP0SR3_27_24 FM(CANFD2_RX) FM(TPU0TO3) FM(PWM1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP0SR3_31_28 FM(CANFD3_TX) F_(0, 0) FM(PWM2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
@@ -435,8 +431,6 @@
#define IP1SR3_15_12 FM(CANFD5_TX) F_(0, 0) F_(0, 0) FM(FXR_TXENA_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP1SR3_19_16 FM(CANFD5_RX) F_(0, 0) F_(0, 0) FM(FXR_TXENB_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP1SR3_23_20 FM(CANFD6_TX) F_(0, 0) F_(0, 0) FM(STPWT_EXTFXR) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1SR3_27_24 F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1SR3_31_28 F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
/* IP0SR4 */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 - F */
#define IP0SR4_3_0 FM(AVB0_RX_CTL) FM(AVB0_MII_RX_DV) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
@@ -457,14 +451,10 @@
#define IP1SR4_27_24 FM(AVB0_MDC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP1SR4_31_28 FM(AVB0_MAGIC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
/* IP2SR4 */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 - F */
-#define IP2SR4_3_0 F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP2SR4_7_4 FM(AVB0_LINK) FM(AVB0_MII_TX_ER) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP2SR4_11_8 FM(AVB0_AVTP_MATCH) FM(AVB0_MII_RX_ER) FM(CC5_OSCOUT) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP2SR4_15_12 FM(AVB0_AVTP_CAPTURE) FM(AVB0_MII_CRS) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP2SR4_19_16 FM(AVB0_AVTP_PPS) FM(AVB0_MII_COL) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP2SR4_23_20 F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP2SR4_27_24 F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP2SR4_31_28 F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
/* IP0SR5 */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 - F */
#define IP0SR5_3_0 FM(AVB1_RX_CTL) FM(AVB1_MII_RX_DV) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
@@ -485,14 +475,10 @@
#define IP1SR5_27_24 FM(AVB1_MDC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP1SR5_31_28 FM(AVB1_MAGIC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
/* IP2SR5 */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 - F */
-#define IP2SR5_3_0 F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP2SR5_7_4 FM(AVB1_LINK) FM(AVB1_MII_TX_ER) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP2SR5_11_8 FM(AVB1_AVTP_MATCH) FM(AVB1_MII_RX_ER) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP2SR5_15_12 FM(AVB1_AVTP_CAPTURE) FM(AVB1_MII_CRS) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP2SR5_19_16 FM(AVB1_AVTP_PPS) FM(AVB1_MII_COL) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP2SR5_23_20 F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP2SR5_27_24 F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP2SR5_31_28 F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define PINMUX_GPSR \
\
@@ -537,7 +523,7 @@ FM(IP0SR1_15_12) IP0SR1_15_12 FM(IP1SR1_15_12) IP1SR1_15_12 FM(IP2SR1_15_12) IP2
FM(IP0SR1_19_16) IP0SR1_19_16 FM(IP1SR1_19_16) IP1SR1_19_16 FM(IP2SR1_19_16) IP2SR1_19_16 FM(IP3SR1_19_16) IP3SR1_19_16 \
FM(IP0SR1_23_20) IP0SR1_23_20 FM(IP1SR1_23_20) IP1SR1_23_20 FM(IP2SR1_23_20) IP2SR1_23_20 FM(IP3SR1_23_20) IP3SR1_23_20 \
FM(IP0SR1_27_24) IP0SR1_27_24 FM(IP1SR1_27_24) IP1SR1_27_24 FM(IP2SR1_27_24) IP2SR1_27_24 FM(IP3SR1_27_24) IP3SR1_27_24 \
-FM(IP0SR1_31_28) IP0SR1_31_28 FM(IP1SR1_31_28) IP1SR1_31_28 FM(IP2SR1_31_28) IP2SR1_31_28 FM(IP3SR1_31_28) IP3SR1_31_28 \
+FM(IP0SR1_31_28) IP0SR1_31_28 FM(IP1SR1_31_28) IP1SR1_31_28 FM(IP2SR1_31_28) IP2SR1_31_28 \
\
FM(IP0SR2_3_0) IP0SR2_3_0 FM(IP1SR2_3_0) IP1SR2_3_0 FM(IP2SR2_3_0) IP2SR2_3_0 \
FM(IP0SR2_7_4) IP0SR2_7_4 FM(IP1SR2_7_4) IP1SR2_7_4 FM(IP2SR2_7_4) IP2SR2_7_4 \
@@ -548,32 +534,32 @@ FM(IP0SR2_23_20) IP0SR2_23_20 FM(IP1SR2_23_20) IP1SR2_23_20 FM(IP2SR2_23_20) IP2
FM(IP0SR2_27_24) IP0SR2_27_24 FM(IP1SR2_27_24) IP1SR2_27_24 FM(IP2SR2_27_24) IP2SR2_27_24 \
FM(IP0SR2_31_28) IP0SR2_31_28 FM(IP1SR2_31_28) IP1SR2_31_28 FM(IP2SR2_31_28) IP2SR2_31_28 \
\
-FM(IP0SR3_3_0) IP0SR3_3_0 FM(IP1SR3_3_0) IP1SR3_3_0 \
+ FM(IP1SR3_3_0) IP1SR3_3_0 \
FM(IP0SR3_7_4) IP0SR3_7_4 FM(IP1SR3_7_4) IP1SR3_7_4 \
FM(IP0SR3_11_8) IP0SR3_11_8 FM(IP1SR3_11_8) IP1SR3_11_8 \
-FM(IP0SR3_15_12) IP0SR3_15_12 FM(IP1SR3_15_12) IP1SR3_15_12 \
-FM(IP0SR3_19_16) IP0SR3_19_16 FM(IP1SR3_19_16) IP1SR3_19_16 \
+ FM(IP1SR3_15_12) IP1SR3_15_12 \
+ FM(IP1SR3_19_16) IP1SR3_19_16 \
FM(IP0SR3_23_20) IP0SR3_23_20 FM(IP1SR3_23_20) IP1SR3_23_20 \
-FM(IP0SR3_27_24) IP0SR3_27_24 FM(IP1SR3_27_24) IP1SR3_27_24 \
-FM(IP0SR3_31_28) IP0SR3_31_28 FM(IP1SR3_31_28) IP1SR3_31_28 \
+FM(IP0SR3_27_24) IP0SR3_27_24 \
+FM(IP0SR3_31_28) IP0SR3_31_28 \
\
-FM(IP0SR4_3_0) IP0SR4_3_0 FM(IP1SR4_3_0) IP1SR4_3_0 FM(IP2SR4_3_0) IP2SR4_3_0 \
+FM(IP0SR4_3_0) IP0SR4_3_0 FM(IP1SR4_3_0) IP1SR4_3_0 \
FM(IP0SR4_7_4) IP0SR4_7_4 FM(IP1SR4_7_4) IP1SR4_7_4 FM(IP2SR4_7_4) IP2SR4_7_4 \
FM(IP0SR4_11_8) IP0SR4_11_8 FM(IP1SR4_11_8) IP1SR4_11_8 FM(IP2SR4_11_8) IP2SR4_11_8 \
FM(IP0SR4_15_12) IP0SR4_15_12 FM(IP1SR4_15_12) IP1SR4_15_12 FM(IP2SR4_15_12) IP2SR4_15_12 \
FM(IP0SR4_19_16) IP0SR4_19_16 FM(IP1SR4_19_16) IP1SR4_19_16 FM(IP2SR4_19_16) IP2SR4_19_16 \
-FM(IP0SR4_23_20) IP0SR4_23_20 FM(IP1SR4_23_20) IP1SR4_23_20 FM(IP2SR4_23_20) IP2SR4_23_20 \
-FM(IP0SR4_27_24) IP0SR4_27_24 FM(IP1SR4_27_24) IP1SR4_27_24 FM(IP2SR4_27_24) IP2SR4_27_24 \
-FM(IP0SR4_31_28) IP0SR4_31_28 FM(IP1SR4_31_28) IP1SR4_31_28 FM(IP2SR4_31_28) IP2SR4_31_28 \
+FM(IP0SR4_23_20) IP0SR4_23_20 FM(IP1SR4_23_20) IP1SR4_23_20 \
+FM(IP0SR4_27_24) IP0SR4_27_24 FM(IP1SR4_27_24) IP1SR4_27_24 \
+FM(IP0SR4_31_28) IP0SR4_31_28 FM(IP1SR4_31_28) IP1SR4_31_28 \
\
-FM(IP0SR5_3_0) IP0SR5_3_0 FM(IP1SR5_3_0) IP1SR5_3_0 FM(IP2SR5_3_0) IP2SR5_3_0 \
+FM(IP0SR5_3_0) IP0SR5_3_0 FM(IP1SR5_3_0) IP1SR5_3_0 \
FM(IP0SR5_7_4) IP0SR5_7_4 FM(IP1SR5_7_4) IP1SR5_7_4 FM(IP2SR5_7_4) IP2SR5_7_4 \
FM(IP0SR5_11_8) IP0SR5_11_8 FM(IP1SR5_11_8) IP1SR5_11_8 FM(IP2SR5_11_8) IP2SR5_11_8 \
FM(IP0SR5_15_12) IP0SR5_15_12 FM(IP1SR5_15_12) IP1SR5_15_12 FM(IP2SR5_15_12) IP2SR5_15_12 \
FM(IP0SR5_19_16) IP0SR5_19_16 FM(IP1SR5_19_16) IP1SR5_19_16 FM(IP2SR5_19_16) IP2SR5_19_16 \
-FM(IP0SR5_23_20) IP0SR5_23_20 FM(IP1SR5_23_20) IP1SR5_23_20 FM(IP2SR5_23_20) IP2SR5_23_20 \
-FM(IP0SR5_27_24) IP0SR5_27_24 FM(IP1SR5_27_24) IP1SR5_27_24 FM(IP2SR5_27_24) IP2SR5_27_24 \
-FM(IP0SR5_31_28) IP0SR5_31_28 FM(IP1SR5_31_28) IP1SR5_31_28 FM(IP2SR5_31_28) IP2SR5_31_28
+FM(IP0SR5_23_20) IP0SR5_23_20 FM(IP1SR5_23_20) IP1SR5_23_20 \
+FM(IP0SR5_27_24) IP0SR5_27_24 FM(IP1SR5_27_24) IP1SR5_27_24 \
+FM(IP0SR5_31_28) IP0SR5_31_28 FM(IP1SR5_31_28) IP1SR5_31_28
/* MOD_SEL2 */ /* 0 */ /* 1 */ /* 2 */ /* 3 */
#define MOD_SEL2_15_14 FM(SEL_I2C6_0) F_(0, 0) F_(0, 0) FM(SEL_I2C6_3)
@@ -629,7 +615,36 @@ enum {
};
static const u16 pinmux_data[] = {
+/* Using GP_2_[2-15] requires disabling I2C in MOD_SEL2 */
+#define GP_2_2_FN GP_2_2_FN, FN_SEL_I2C0_0
+#define GP_2_3_FN GP_2_3_FN, FN_SEL_I2C0_0
+#define GP_2_4_FN GP_2_4_FN, FN_SEL_I2C1_0
+#define GP_2_5_FN GP_2_5_FN, FN_SEL_I2C1_0
+#define GP_2_6_FN GP_2_6_FN, FN_SEL_I2C2_0
+#define GP_2_7_FN GP_2_7_FN, FN_SEL_I2C2_0
+#define GP_2_8_FN GP_2_8_FN, FN_SEL_I2C3_0
+#define GP_2_9_FN GP_2_9_FN, FN_SEL_I2C3_0
+#define GP_2_10_FN GP_2_10_FN, FN_SEL_I2C4_0
+#define GP_2_11_FN GP_2_11_FN, FN_SEL_I2C4_0
+#define GP_2_12_FN GP_2_12_FN, FN_SEL_I2C5_0
+#define GP_2_13_FN GP_2_13_FN, FN_SEL_I2C5_0
+#define GP_2_14_FN GP_2_14_FN, FN_SEL_I2C6_0
+#define GP_2_15_FN GP_2_15_FN, FN_SEL_I2C6_0
PINMUX_DATA_GP_ALL(),
+#undef GP_2_2_FN
+#undef GP_2_3_FN
+#undef GP_2_4_FN
+#undef GP_2_5_FN
+#undef GP_2_6_FN
+#undef GP_2_7_FN
+#undef GP_2_8_FN
+#undef GP_2_9_FN
+#undef GP_2_10_FN
+#undef GP_2_11_FN
+#undef GP_2_12_FN
+#undef GP_2_13_FN
+#undef GP_2_14_FN
+#undef GP_2_15_FN
PINMUX_SINGLE(MMC_D7),
PINMUX_SINGLE(MMC_D6),
@@ -3223,14 +3238,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_1_1_FN, GPSR1_1,
GP_1_0_FN, GPSR1_0, ))
},
- { PINMUX_CFG_REG("GPSR2", 0xe6050840, 32, 1, GROUP(
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ { PINMUX_CFG_REG_VAR("GPSR2", 0xe6050840, 32,
+ GROUP(-7, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP2_31_25 RESERVED */
GP_2_24_FN, GPSR2_24,
GP_2_23_FN, GPSR2_23,
GP_2_22_FN, GPSR2_22,
@@ -3257,22 +3269,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_2_1_FN, GPSR2_1,
GP_2_0_FN, GPSR2_0, ))
},
- { PINMUX_CFG_REG("GPSR3", 0xe6058840, 32, 1, GROUP(
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ { PINMUX_CFG_REG_VAR("GPSR3", 0xe6058840, 32,
+ GROUP(-15, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP3_31_17 RESERVED */
GP_3_16_FN, GPSR3_16,
GP_3_15_FN, GPSR3_15,
GP_3_14_FN, GPSR3_14,
@@ -3325,18 +3326,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_4_1_FN, GPSR4_1,
GP_4_0_FN, GPSR4_0, ))
},
- { PINMUX_CFG_REG("GPSR5", 0xe6060840, 32, 1, GROUP(
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ { PINMUX_CFG_REG_VAR("GPSR5", 0xe6060840, 32,
+ GROUP(-11, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP5_31_21 RESERVED */
GP_5_20_FN, GPSR5_20,
GP_5_19_FN, GPSR5_19,
GP_5_18_FN, GPSR5_18,
@@ -3359,18 +3353,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_5_1_FN, GPSR5_1,
GP_5_0_FN, GPSR5_0, ))
},
- { PINMUX_CFG_REG("GPSR6", 0xe6068040, 32, 1, GROUP(
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ { PINMUX_CFG_REG_VAR("GPSR6", 0xe6068040, 32,
+ GROUP(-11, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP6_31_21 RESERVED */
GP_6_20_FN, GPSR6_20,
GP_6_19_FN, GPSR6_19,
GP_6_18_FN, GPSR6_18,
@@ -3393,18 +3380,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_6_1_FN, GPSR6_1,
GP_6_0_FN, GPSR6_0, ))
},
- { PINMUX_CFG_REG("GPSR7", 0xe6068840, 32, 1, GROUP(
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ { PINMUX_CFG_REG_VAR("GPSR7", 0xe6068840, 32,
+ GROUP(-11, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP7_31_21 RESERVED */
GP_7_20_FN, GPSR7_20,
GP_7_19_FN, GPSR7_19,
GP_7_18_FN, GPSR7_18,
@@ -3427,18 +3407,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_7_1_FN, GPSR7_1,
GP_7_0_FN, GPSR7_0, ))
},
- { PINMUX_CFG_REG("GPSR8", 0xe6069040, 32, 1, GROUP(
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ { PINMUX_CFG_REG_VAR("GPSR8", 0xe6069040, 32,
+ GROUP(-11, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP8_31_21 RESERVED */
GP_8_20_FN, GPSR8_20,
GP_8_19_FN, GPSR8_19,
GP_8_18_FN, GPSR8_18,
@@ -3461,18 +3434,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_8_1_FN, GPSR8_1,
GP_8_0_FN, GPSR8_0, ))
},
- { PINMUX_CFG_REG("GPSR9", 0xe6069840, 32, 1, GROUP(
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ { PINMUX_CFG_REG_VAR("GPSR9", 0xe6069840, 32,
+ GROUP(-11, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP9_31_21 RESERVED */
GP_9_20_FN, GPSR9_20,
GP_9_19_FN, GPSR9_19,
GP_9_18_FN, GPSR9_18,
@@ -3530,8 +3496,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP2SR1_7_4
IP2SR1_3_0))
},
- { PINMUX_CFG_REG("IP3SR1", 0xe605006c, 32, 4, GROUP(
- IP3SR1_31_28
+ { PINMUX_CFG_REG_VAR("IP3SR1", 0xe605006c, 32,
+ GROUP(-4, 4, 4, 4, 4, 4, 4, 4),
+ GROUP(
+ /* IP3SR1_31_28 RESERVED */
IP3SR1_27_24
IP3SR1_23_20
IP3SR1_19_16
@@ -3570,19 +3538,21 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP2SR2_7_4
IP2SR2_3_0))
},
- { PINMUX_CFG_REG("IP0SR3", 0xe6058860, 32, 4, GROUP(
+ { PINMUX_CFG_REG_VAR("IP0SR3", 0xe6058860, 32,
+ GROUP(4, 4, 4, -8, 4, 4, -4),
+ GROUP(
IP0SR3_31_28
IP0SR3_27_24
IP0SR3_23_20
- IP0SR3_19_16
- IP0SR3_15_12
+ /* IP0SR3_19_12 RESERVED */
IP0SR3_11_8
IP0SR3_7_4
- IP0SR3_3_0))
+ /* IP0SR3_3_0 RESERVED */ ))
},
- { PINMUX_CFG_REG("IP1SR3", 0xe6058864, 32, 4, GROUP(
- IP1SR3_31_28
- IP1SR3_27_24
+ { PINMUX_CFG_REG_VAR("IP1SR3", 0xe6058864, 32,
+ GROUP(-8, 4, 4, 4, 4, 4, 4),
+ GROUP(
+ /* IP1SR3_31_24 RESERVED */
IP1SR3_23_20
IP1SR3_19_16
IP1SR3_15_12
@@ -3610,15 +3580,15 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP1SR4_7_4
IP1SR4_3_0))
},
- { PINMUX_CFG_REG("IP2SR4", 0xe6060068, 32, 4, GROUP(
- IP2SR4_31_28
- IP2SR4_27_24
- IP2SR4_23_20
+ { PINMUX_CFG_REG_VAR("IP2SR4", 0xe6060068, 32,
+ GROUP(-12, 4, 4, 4, 4, -4),
+ GROUP(
+ /* IP2SR4_31_20 RESERVED */
IP2SR4_19_16
IP2SR4_15_12
IP2SR4_11_8
IP2SR4_7_4
- IP2SR4_3_0))
+ /* IP2SR4_3_0 RESERVED */ ))
},
{ PINMUX_CFG_REG("IP0SR5", 0xe6060860, 32, 4, GROUP(
IP0SR5_31_28
@@ -3640,15 +3610,15 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP1SR5_7_4
IP1SR5_3_0))
},
- { PINMUX_CFG_REG("IP2SR5", 0xe6060868, 32, 4, GROUP(
- IP2SR5_31_28
- IP2SR5_27_24
- IP2SR5_23_20
+ { PINMUX_CFG_REG_VAR("IP2SR5", 0xe6060868, 32,
+ GROUP(-12, 4, 4, 4, 4, -4),
+ GROUP(
+ /* IP2SR5_31_20 RESERVED */
IP2SR5_19_16
IP2SR5_15_12
IP2SR5_11_8
IP2SR5_7_4
- IP2SR5_3_0))
+ /* IP2SR5_3_0 RESERVED */ ))
},
#undef F_
#undef FM
@@ -3656,16 +3626,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
#define F_(x, y) x,
#define FM(x) FN_##x,
{ PINMUX_CFG_REG_VAR("MOD_SEL2", 0xe6050900, 32,
- GROUP(4, 4, 4, 4, 2, 2, 2, 2, 2, 2, 2, 1, 1),
+ GROUP(-16, 2, 2, 2, 2, 2, 2, 2, -2),
GROUP(
- /* RESERVED 31, 30, 29, 28 */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /* RESERVED 27, 26, 25, 24 */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /* RESERVED 23, 22, 21, 20 */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /* RESERVED 19, 18, 17, 16 */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* RESERVED 31-16 */
MOD_SEL2_15_14
MOD_SEL2_13_12
MOD_SEL2_11_10
@@ -3673,8 +3636,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
MOD_SEL2_7_6
MOD_SEL2_5_4
MOD_SEL2_3_2
- 0, 0,
- 0, 0, ))
+ /* RESERVED 1-0 */ ))
},
{ },
};
diff --git a/drivers/pinctrl/renesas/pfc-r8a779f0.c b/drivers/pinctrl/renesas/pfc-r8a779f0.c
index 91860608242c..aaca4ee2af55 100644
--- a/drivers/pinctrl/renesas/pfc-r8a779f0.c
+++ b/drivers/pinctrl/renesas/pfc-r8a779f0.c
@@ -144,9 +144,6 @@
#define IP2SR0_11_8 FM(IRQ1) F_(0, 0) F_(0, 0) FM(MSIOF1_SS2) F_(0, 0) FM(TSN0_PHY_INT_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP2SR0_15_12 FM(IRQ2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) FM(TSN1_PHY_INT_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP2SR0_19_16 FM(IRQ3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) FM(TSN2_PHY_INT_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP2SR0_23_20 F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP2SR0_27_24 F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP2SR0_31_28 F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
/* IP0SR1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 */ /* 7 - F */
#define IP0SR1_3_0 FM(GP1_00) FM(TCLK1) FM(HSCK2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
@@ -192,9 +189,9 @@ FM(IP0SR0_7_4) IP0SR0_7_4 FM(IP1SR0_7_4) IP1SR0_7_4 FM(IP2SR0_7_4) IP2SR0_7_4
FM(IP0SR0_11_8) IP0SR0_11_8 FM(IP1SR0_11_8) IP1SR0_11_8 FM(IP2SR0_11_8) IP2SR0_11_8 \
FM(IP0SR0_15_12) IP0SR0_15_12 FM(IP1SR0_15_12) IP1SR0_15_12 FM(IP2SR0_15_12) IP2SR0_15_12 \
FM(IP0SR0_19_16) IP0SR0_19_16 FM(IP1SR0_19_16) IP1SR0_19_16 FM(IP2SR0_19_16) IP2SR0_19_16 \
-FM(IP0SR0_23_20) IP0SR0_23_20 FM(IP1SR0_23_20) IP1SR0_23_20 FM(IP2SR0_23_20) IP2SR0_23_20 \
-FM(IP0SR0_27_24) IP0SR0_27_24 FM(IP1SR0_27_24) IP1SR0_27_24 FM(IP2SR0_27_24) IP2SR0_27_24 \
-FM(IP0SR0_31_28) IP0SR0_31_28 FM(IP1SR0_31_28) IP1SR0_31_28 FM(IP2SR0_31_28) IP2SR0_31_28 \
+FM(IP0SR0_23_20) IP0SR0_23_20 FM(IP1SR0_23_20) IP1SR0_23_20 \
+FM(IP0SR0_27_24) IP0SR0_27_24 FM(IP1SR0_27_24) IP1SR0_27_24 \
+FM(IP0SR0_31_28) IP0SR0_31_28 FM(IP1SR0_31_28) IP1SR0_31_28 \
\
FM(IP0SR1_3_0) IP0SR1_3_0 \
FM(IP0SR1_7_4) IP0SR1_7_4 \
@@ -257,7 +254,28 @@ enum {
};
static const u16 pinmux_data[] = {
+/* Using GP_1_[0-9] requires disabling I2C in MOD_SEL1 */
+#define GP_1_0_FN GP_1_0_FN, FN_SEL_I2C0_0
+#define GP_1_1_FN GP_1_1_FN, FN_SEL_I2C0_0
+#define GP_1_2_FN GP_1_2_FN, FN_SEL_I2C1_0
+#define GP_1_3_FN GP_1_3_FN, FN_SEL_I2C1_0
+#define GP_1_4_FN GP_1_4_FN, FN_SEL_I2C2_0
+#define GP_1_5_FN GP_1_5_FN, FN_SEL_I2C2_0
+#define GP_1_6_FN GP_1_6_FN, FN_SEL_I2C3_0
+#define GP_1_7_FN GP_1_7_FN, FN_SEL_I2C3_0
+#define GP_1_8_FN GP_1_8_FN, FN_SEL_I2C4_0
+#define GP_1_9_FN GP_1_9_FN, FN_SEL_I2C4_0
PINMUX_DATA_GP_ALL(),
+#undef GP_1_0_FN
+#undef GP_1_1_FN
+#undef GP_1_2_FN
+#undef GP_1_3_FN
+#undef GP_1_4_FN
+#undef GP_1_5_FN
+#undef GP_1_6_FN
+#undef GP_1_7_FN
+#undef GP_1_8_FN
+#undef GP_1_9_FN
PINMUX_SINGLE(SD_WP),
PINMUX_SINGLE(SD_CD),
@@ -1599,18 +1617,11 @@ static const struct sh_pfc_function pinmux_functions[] = {
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
#define F_(x, y) FN_##y
#define FM(x) FN_##x
- { PINMUX_CFG_REG("GPSR0", 0xe6050040, 32, 1, GROUP(
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ { PINMUX_CFG_REG_VAR("GPSR0", 0xe6050040, 32,
+ GROUP(-11, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP0_31_21 RESERVED */
GP_0_20_FN, GPSR0_20,
GP_0_19_FN, GPSR0_19,
GP_0_18_FN, GPSR0_18,
@@ -1633,14 +1644,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_0_1_FN, GPSR0_1,
GP_0_0_FN, GPSR0_0, ))
},
- { PINMUX_CFG_REG("GPSR1", 0xe6050840, 32, 1, GROUP(
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ { PINMUX_CFG_REG_VAR("GPSR1", 0xe6050840, 32,
+ GROUP(-7, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP1_31_25 RESERVED */
GP_1_24_FN, GPSR1_24,
GP_1_23_FN, GPSR1_23,
GP_1_22_FN, GPSR1_22,
@@ -1667,22 +1675,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_1_1_FN, GPSR1_1,
GP_1_0_FN, GPSR1_0, ))
},
- { PINMUX_CFG_REG("GPSR2", 0xe6051040, 32, 1, GROUP(
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ { PINMUX_CFG_REG_VAR("GPSR2", 0xe6051040, 32,
+ GROUP(-15, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP2_31_17 RESERVED */
GP_2_16_FN, GPSR2_16,
GP_2_15_FN, GPSR2_15,
GP_2_14_FN, GPSR2_14,
@@ -1701,20 +1698,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_2_1_FN, GPSR2_1,
GP_2_0_FN, GPSR2_0, ))
},
- { PINMUX_CFG_REG("GPSR3", 0xe6051840, 32, 1, GROUP(
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ { PINMUX_CFG_REG_VAR("GPSR3", 0xe6051840, 32,
+ GROUP(-13, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP3_31_19 RESERVED */
GP_3_18_FN, GPSR3_18,
GP_3_17_FN, GPSR3_17,
GP_3_16_FN, GPSR3_16,
@@ -1760,10 +1748,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP1SR0_7_4
IP1SR0_3_0))
},
- { PINMUX_CFG_REG("IP2SR0", 0xe6050068, 32, 4, GROUP(
- IP2SR0_31_28
- IP2SR0_27_24
- IP2SR0_23_20
+ { PINMUX_CFG_REG_VAR("IP2SR0", 0xe6050068, 32,
+ GROUP(-12, 4, 4, 4, 4, 4),
+ GROUP(
+ /* IP2SR0_31_20 RESERVED */
IP2SR0_19_16
IP2SR0_15_12
IP2SR0_11_8
@@ -1786,18 +1774,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
#define F_(x, y) x,
#define FM(x) FN_##x,
{ PINMUX_CFG_REG_VAR("MOD_SEL1", 0xe6050900, 32,
- GROUP(4, 4, 4, 4, 4, 2, 2, 2, 2, 2, 2),
+ GROUP(-20, 2, 2, 2, 2, 2, 2),
GROUP(
- /* RESERVED 31, 30, 29, 28 */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /* RESERVED 27, 26, 25, 24 */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /* RESERVED 23, 22, 21, 20 */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /* RESERVED 19, 18, 17, 16 */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /* RESERVED 15, 14, 13, 12 */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* RESERVED 31-12 */
MOD_SEL1_11_10
MOD_SEL1_9_8
MOD_SEL1_7_6
diff --git a/drivers/pinctrl/renesas/pfc-sh7203.c b/drivers/pinctrl/renesas/pfc-sh7203.c
index 3986802b448a..19735746b1bb 100644
--- a/drivers/pinctrl/renesas/pfc-sh7203.c
+++ b/drivers/pinctrl/renesas/pfc-sh7203.c
@@ -1072,31 +1072,20 @@ static const struct pinmux_func pinmux_func_gpios[] = {
};
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
- { PINMUX_CFG_REG("PBIORL", 0xfffe3886, 16, 1, GROUP(
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ { PINMUX_CFG_REG_VAR("PBIORL", 0xfffe3886, 16,
+ GROUP(-4, 1, 1, 1, 1, -8),
+ GROUP(
+ /* RESERVED [4] */
PB11_IN, PB11_OUT,
PB10_IN, PB10_OUT,
PB9_IN, PB9_OUT,
PB8_IN, PB8_OUT,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0 ))
+ /* RESERVED [8] */ ))
},
- { PINMUX_CFG_REG("PBCRL4", 0xfffe3890, 16, 4, GROUP(
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-
+ { PINMUX_CFG_REG_VAR("PBCRL4", 0xfffe3890, 16,
+ GROUP(-12, 4),
+ GROUP(
+ /* RESERVED [12] */
PB12MD_00, PB12MD_01, PB12MD_10, PB12MD_11,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ))
},
@@ -1139,13 +1128,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PB0MD_00, PB0MD_01, PB0MD_10, PB0MD_11,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("IFCR", 0xfffe38a2, 16, 4, GROUP(
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-
+ { PINMUX_CFG_REG_VAR("IFCR", 0xfffe38a2, 16,
+ GROUP(-12, 4),
+ GROUP(
+ /* RESERVED [12] */
PB12IRQ_00, PB12IRQ_01, PB12IRQ_10, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ))
},
@@ -1167,9 +1153,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PC1_IN, PC1_OUT,
PC0_IN, PC0_OUT ))
},
- { PINMUX_CFG_REG("PCCRL4", 0xfffe3910, 16, 4, GROUP(
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-
+ { PINMUX_CFG_REG_VAR("PCCRL4", 0xfffe3910, 16,
+ GROUP(-4, 4, 4, 4),
+ GROUP(
+ /* RESERVED [4] */
PC14MD_0, PC14MD_1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -1417,8 +1404,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PF1_IN, PF1_OUT,
PF0_IN, PF0_OUT ))
},
- { PINMUX_CFG_REG("PFCRH4", 0xfffe3a88, 16, 4, GROUP(
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ { PINMUX_CFG_REG_VAR("PFCRH4", 0xfffe3a88, 16,
+ GROUP(-4, 4, 4, 4),
+ GROUP(
+ /* RESERVED [4] */
PF30MD_0, PF30MD_1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
diff --git a/drivers/pinctrl/renesas/pfc-sh7264.c b/drivers/pinctrl/renesas/pfc-sh7264.c
index 7476b982101d..30096925a70c 100644
--- a/drivers/pinctrl/renesas/pfc-sh7264.c
+++ b/drivers/pinctrl/renesas/pfc-sh7264.c
@@ -1464,19 +1464,20 @@ static const struct pinmux_func pinmux_func_gpios[] = {
};
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
- { PINMUX_CFG_REG("PAIOR0", 0xfffe3812, 16, 1, GROUP(
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
+ { PINMUX_CFG_REG_VAR("PAIOR0", 0xfffe3812, 16,
+ GROUP(-12, 1, 1, 1, 1),
+ GROUP(
+ /* RESERVED [12] */
PA3_IN, PA3_OUT,
PA2_IN, PA2_OUT,
PA1_IN, PA1_OUT,
PA0_IN, PA0_OUT ))
},
- { PINMUX_CFG_REG("PBCR5", 0xfffe3824, 16, 4, GROUP(
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
+ { PINMUX_CFG_REG_VAR("PBCR5", 0xfffe3824, 16,
+ GROUP(-4, 4, 4, 4),
+ GROUP(
+ /* RESERVED [4] */
PB22MD_00, PB22MD_01, PB22MD_10, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
PB21MD_0, PB21MD_1, 0, 0, 0, 0, 0, 0,
@@ -1525,21 +1526,22 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, PB4MD_01, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PBCR0", 0xfffe382e, 16, 4, GROUP(
+ { PINMUX_CFG_REG_VAR("PBCR0", 0xfffe382e, 16,
+ GROUP(4, 4, 4, -4),
+ GROUP(
0, PB3MD_1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, PB2MD_1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, PB1MD_1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0 ))
+ /* RESERVED [4] */ ))
},
- { PINMUX_CFG_REG("PBIOR1", 0xfffe3830, 16, 1, GROUP(
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0,
+ { PINMUX_CFG_REG_VAR("PBIOR1", 0xfffe3830, 16,
+ GROUP(-9, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* RESERVED [9] */
PB22_IN, PB22_OUT,
PB21_IN, PB21_OUT,
PB20_IN, PB20_OUT,
@@ -1568,9 +1570,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0 ))
},
- { PINMUX_CFG_REG("PCCR2", 0xfffe384a, 16, 4, GROUP(
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
+ { PINMUX_CFG_REG_VAR("PCCR2", 0xfffe384a, 16,
+ GROUP(-4, 4, 4, 4),
+ GROUP(
+ /* RESERVED [4] */
PC10MD_0, PC10MD_1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
PC9MD_0, PC9MD_1, 0, 0, 0, 0, 0, 0,
@@ -1599,8 +1602,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PCIOR0", 0xfffe3852, 16, 1, GROUP(
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ { PINMUX_CFG_REG_VAR("PCIOR0", 0xfffe3852, 16,
+ GROUP(-5, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* RESERVED [5] */
PC10_IN, PC10_OUT,
PC9_IN, PC9_OUT,
PC8_IN, PC8_OUT,
@@ -1675,11 +1680,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PD0_IN, PD0_OUT ))
},
- { PINMUX_CFG_REG("PECR1", 0xfffe388c, 16, 4, GROUP(
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
+ { PINMUX_CFG_REG_VAR("PECR1", 0xfffe388c, 16,
+ GROUP(-8, 4, 4),
+ GROUP(
+ /* RESERVED [8] */
PE5MD_00, PE5MD_01, 0, PE5MD_11, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
PE4MD_00, PE4MD_01, 0, PE4MD_11, 0, 0, 0, 0,
@@ -1698,10 +1702,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PEIOR0", 0xfffe3892, 16, 1, GROUP(
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0,
+ { PINMUX_CFG_REG_VAR("PEIOR0", 0xfffe3892, 16,
+ GROUP(-10, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* RESERVED [10] */
PE5_IN, PE5_OUT,
PE4_IN, PE4_OUT,
PE3_IN, PE3_OUT,
@@ -1710,10 +1714,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PE0_IN, PE0_OUT ))
},
- { PINMUX_CFG_REG("PFCR3", 0xfffe38a8, 16, 4, GROUP(
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ { PINMUX_CFG_REG_VAR("PFCR3", 0xfffe38a8, 16,
+ GROUP(-12, 4),
+ GROUP(
+ /* RESERVED [12] */
PF12MD_000, PF12MD_001, 0, PF12MD_011,
PF12MD_100, PF12MD_101, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0 ))
@@ -1780,25 +1784,19 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PF0_IN, PF0_OUT ))
},
- { PINMUX_CFG_REG("PGCR7", 0xfffe38c0, 16, 4, GROUP(
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
+ { PINMUX_CFG_REG_VAR("PGCR7", 0xfffe38c0, 16,
+ GROUP(-12, 4),
+ GROUP(
+ /* RESERVED [12] */
PG0MD_000, PG0MD_001, PG0MD_010, PG0MD_011,
PG0MD_100, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PGCR6", 0xfffe38c2, 16, 4, GROUP(
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
+ { PINMUX_CFG_REG_VAR("PGCR6", 0xfffe38c2, 16,
+ GROUP(-12, 4),
+ GROUP(
+ /* RESERVED [12] */
PG24MD_00, PG24MD_01, PG24MD_10, PG24MD_11, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0 ))
},
@@ -1869,19 +1867,21 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PG4MD_00, PG4MD_01, PG4MD_10, PG4MD_11, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PGCR0", 0xfffe38ce, 16, 4, GROUP(
+ { PINMUX_CFG_REG_VAR("PGCR0", 0xfffe38ce, 16,
+ GROUP(4, 4, 4, -4),
+ GROUP(
PG3MD_00, PG3MD_01, PG3MD_10, PG3MD_11, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
PG2MD_00, PG2MD_01, PG2MD_10, PG2MD_11, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
PG1MD_00, PG1MD_01, PG1MD_10, PG1MD_11, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0 ))
+ /* RESERVED [4] */ ))
},
- { PINMUX_CFG_REG("PGIOR1", 0xfffe38d0, 16, 1, GROUP(
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0,
+ { PINMUX_CFG_REG_VAR("PGIOR1", 0xfffe38d0, 16,
+ GROUP(-7, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* RESERVED [7] */
PG24_IN, PG24_OUT,
PG23_IN, PG23_OUT,
PG22_IN, PG22_OUT,
diff --git a/drivers/pinctrl/renesas/pfc-sh7269.c b/drivers/pinctrl/renesas/pfc-sh7269.c
index 733a2c114ca2..f59f558d75ae 100644
--- a/drivers/pinctrl/renesas/pfc-sh7269.c
+++ b/drivers/pinctrl/renesas/pfc-sh7269.c
@@ -1966,15 +1966,18 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
* mode registers and modes are described in assending order [0..15]
*/
- { PINMUX_CFG_REG("PAIOR0", 0xfffe3812, 16, 1, GROUP(
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, PA1_IN, PA1_OUT,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, PA0_IN, PA0_OUT ))
+ { PINMUX_CFG_REG_VAR("PAIOR0", 0xfffe3812, 16,
+ GROUP(-7, 1, -7, 1),
+ GROUP(
+ /* RESERVED [7] */
+ PA1_IN, PA1_OUT,
+ /* RESERVED [7] */
+ PA0_IN, PA0_OUT ))
},
- { PINMUX_CFG_REG("PBCR5", 0xfffe3824, 16, 4, GROUP(
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-
+ { PINMUX_CFG_REG_VAR("PBCR5", 0xfffe3824, 16,
+ GROUP(-4, 4, 4, 4),
+ GROUP(
+ /* RESERVED [4] */
PB22MD_000, PB22MD_001, PB22MD_010, PB22MD_011,
PB22MD_100, PB22MD_101, PB22MD_110, PB22MD_111,
0, 0, 0, 0, 0, 0, 0, 0,
@@ -2045,7 +2048,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PB4MD_00, PB4MD_01, PB4MD_10, PB4MD_11, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PBCR0", 0xfffe382e, 16, 4, GROUP(
+ { PINMUX_CFG_REG_VAR("PBCR0", 0xfffe382e, 16,
+ GROUP(4, 4, 4, -4),
+ GROUP(
PB3MD_00, PB3MD_01, PB3MD_10, PB3MD_11, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
@@ -2055,13 +2060,13 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PB1MD_00, PB1MD_01, PB1MD_10, PB1MD_11, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ))
+ /* RESERVED [4] */ ))
},
- { PINMUX_CFG_REG("PBIOR1", 0xfffe3830, 16, 1, GROUP(
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0,
+ { PINMUX_CFG_REG_VAR("PBIOR1", 0xfffe3830, 16,
+ GROUP(-9, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* RESERVED [9] */
PB22_IN, PB22_OUT,
PB21_IN, PB21_OUT,
PB20_IN, PB20_OUT,
@@ -2089,13 +2094,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0 ))
},
- { PINMUX_CFG_REG("PCCR2", 0xfffe384a, 16, 4, GROUP(
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-
+ { PINMUX_CFG_REG_VAR("PCCR2", 0xfffe384a, 16,
+ GROUP(-12, 4),
+ GROUP(
+ /* RESERVED [12] */
PC8MD_000, PC8MD_001, PC8MD_010, PC8MD_011,
PC8MD_100, PC8MD_101, PC8MD_110, PC8MD_111,
0, 0, 0, 0, 0, 0, 0, 0 ))
@@ -2130,8 +2132,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PCIOR0", 0xfffe3852, 16, 1, GROUP(
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ { PINMUX_CFG_REG_VAR("PCIOR0", 0xfffe3852, 16,
+ GROUP(-7, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* RESERVED [7] */
PC8_IN, PC8_OUT,
PC7_IN, PC7_OUT,
PC6_IN, PC6_OUT,
@@ -2244,9 +2248,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PE0MD_00, PE0MD_01, PE0MD_10, PE0MD_11, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PEIOR0", 0xfffe3892, 16, 1, GROUP(
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
+ { PINMUX_CFG_REG_VAR("PEIOR0", 0xfffe3892, 16,
+ GROUP(-8, 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* RESERVED [8] */
PE7_IN, PE7_OUT,
PE6_IN, PE6_OUT,
PE5_IN, PE5_OUT,
@@ -2291,20 +2296,18 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PF16MD_100, PF16MD_101, PF16MD_110, PF16MD_111,
0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PFCR4", 0xfffe38a6, 16, 4, GROUP(
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-
+ { PINMUX_CFG_REG_VAR("PFCR4", 0xfffe38a6, 16,
+ GROUP(-12, 4),
+ GROUP(
+ /* RESERVED [12] */
PF15MD_000, PF15MD_001, PF15MD_010, PF15MD_011,
PF15MD_100, PF15MD_101, PF15MD_110, PF15MD_111,
0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PFCR3", 0xfffe38a8, 16, 4, GROUP(
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-
+ { PINMUX_CFG_REG_VAR("PFCR3", 0xfffe38a8, 16,
+ GROUP(-4, 4, 4, 4),
+ GROUP(
+ /* RESERVED [4] */
PF14MD_000, PF14MD_001, PF14MD_010, PF14MD_011,
PF14MD_100, PF14MD_101, PF14MD_110, PF14MD_111,
0, 0, 0, 0, 0, 0, 0, 0,
@@ -2369,9 +2372,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, 0, 0, 0, 0 ))
},
- { PINMUX_CFG_REG("PFIOR1", 0xfffe38b0, 16, 1, GROUP(
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
+ { PINMUX_CFG_REG_VAR("PFIOR1", 0xfffe38b0, 16,
+ GROUP(-8, 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* RESERVED [8] */
PF23_IN, PF23_OUT,
PF22_IN, PF22_OUT,
PF21_IN, PF21_OUT,
diff --git a/drivers/pinctrl/renesas/pfc-sh73a0.c b/drivers/pinctrl/renesas/pfc-sh73a0.c
index 5d8a0179fd60..4f54dfd5a967 100644
--- a/drivers/pinctrl/renesas/pfc-sh73a0.c
+++ b/drivers/pinctrl/renesas/pfc-sh73a0.c
@@ -3798,24 +3798,16 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PORTCR(308, 0xe6052134), /* PORT308CR */
PORTCR(309, 0xe6052135), /* PORT309CR */
- { PINMUX_CFG_REG("MSEL2CR", 0xe605801c, 32, 1, GROUP(
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ { PINMUX_CFG_REG_VAR("MSEL2CR", 0xe605801c, 32,
+ GROUP(-12, 1, 1, 1, 1, -1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* RESERVED [12] */
MSEL2CR_MSEL19_0, MSEL2CR_MSEL19_1,
MSEL2CR_MSEL18_0, MSEL2CR_MSEL18_1,
MSEL2CR_MSEL17_0, MSEL2CR_MSEL17_1,
MSEL2CR_MSEL16_0, MSEL2CR_MSEL16_1,
- 0, 0,
+ /* RESERVED [1] */
MSEL2CR_MSEL14_0, MSEL2CR_MSEL14_1,
MSEL2CR_MSEL13_0, MSEL2CR_MSEL13_1,
MSEL2CR_MSEL12_0, MSEL2CR_MSEL12_1,
@@ -3833,60 +3825,43 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
MSEL2CR_MSEL0_0, MSEL2CR_MSEL0_1,
))
},
- { PINMUX_CFG_REG("MSEL3CR", 0xe6058020, 32, 1, GROUP(
- 0, 0,
- 0, 0,
- 0, 0,
+ { PINMUX_CFG_REG_VAR("MSEL3CR", 0xe6058020, 32,
+ GROUP(-3, 1, -12, 1, -3, 1, -1, 1, -2, 1, -3, 1,
+ -2),
+ GROUP(
+ /* RESERVED [3] */
MSEL3CR_MSEL28_0, MSEL3CR_MSEL28_1,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ /* RESERVED [12] */
MSEL3CR_MSEL15_0, MSEL3CR_MSEL15_1,
- 0, 0,
- 0, 0,
- 0, 0,
+ /* RESERVED [3] */
MSEL3CR_MSEL11_0, MSEL3CR_MSEL11_1,
- 0, 0,
+ /* RESERVED [1] */
MSEL3CR_MSEL9_0, MSEL3CR_MSEL9_1,
- 0, 0,
- 0, 0,
+ /* RESERVED [2] */
MSEL3CR_MSEL6_0, MSEL3CR_MSEL6_1,
- 0, 0,
- 0, 0,
- 0, 0,
+ /* RESERVED [3] */
MSEL3CR_MSEL2_0, MSEL3CR_MSEL2_1,
- 0, 0,
- 0, 0,
+ /* RESERVED [2] */
))
},
- { PINMUX_CFG_REG("MSEL4CR", 0xe6058024, 32, 1, GROUP(
- 0, 0,
- 0, 0,
+ { PINMUX_CFG_REG_VAR("MSEL4CR", 0xe6058024, 32,
+ GROUP(-2, 1, -1, 1, 1, -3, 1, 1, 1, 1, -3, 1,
+ -1, 1, 1, 1, 1, 1, 1, 1, -2, 1, -2, 1,
+ -1),
+ GROUP(
+ /* RESERVED [2] */
MSEL4CR_MSEL29_0, MSEL4CR_MSEL29_1,
- 0, 0,
+ /* RESERVED [1] */
MSEL4CR_MSEL27_0, MSEL4CR_MSEL27_1,
MSEL4CR_MSEL26_0, MSEL4CR_MSEL26_1,
- 0, 0,
- 0, 0,
- 0, 0,
+ /* RESERVED [3] */
MSEL4CR_MSEL22_0, MSEL4CR_MSEL22_1,
MSEL4CR_MSEL21_0, MSEL4CR_MSEL21_1,
MSEL4CR_MSEL20_0, MSEL4CR_MSEL20_1,
MSEL4CR_MSEL19_0, MSEL4CR_MSEL19_1,
- 0, 0,
- 0, 0,
- 0, 0,
+ /* RESERVED [3] */
MSEL4CR_MSEL15_0, MSEL4CR_MSEL15_1,
- 0, 0,
+ /* RESERVED [1] */
MSEL4CR_MSEL13_0, MSEL4CR_MSEL13_1,
MSEL4CR_MSEL12_0, MSEL4CR_MSEL12_1,
MSEL4CR_MSEL11_0, MSEL4CR_MSEL11_1,
@@ -3894,13 +3869,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
MSEL4CR_MSEL9_0, MSEL4CR_MSEL9_1,
MSEL4CR_MSEL8_0, MSEL4CR_MSEL8_1,
MSEL4CR_MSEL7_0, MSEL4CR_MSEL7_1,
- 0, 0,
- 0, 0,
+ /* RESERVED [2] */
MSEL4CR_MSEL4_0, MSEL4CR_MSEL4_1,
- 0, 0,
- 0, 0,
+ /* RESERVED [2] */
MSEL4CR_MSEL1_0, MSEL4CR_MSEL1_1,
- 0, 0,
+ /* RESERVED [1] */
))
},
{ },
diff --git a/drivers/pinctrl/renesas/pfc-sh7720.c b/drivers/pinctrl/renesas/pfc-sh7720.c
index 7071ef52449d..6eedcc5bbb4d 100644
--- a/drivers/pinctrl/renesas/pfc-sh7720.c
+++ b/drivers/pinctrl/renesas/pfc-sh7720.c
@@ -1014,25 +1014,24 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTJ1_FN, PTJ1_OUT, 0, PTJ1_IN,
PTJ0_FN, PTJ0_OUT, 0, PTJ0_IN ))
},
- { PINMUX_CFG_REG("PKCR", 0xa4050112, 16, 2, GROUP(
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
+ { PINMUX_CFG_REG_VAR("PKCR", 0xa4050112, 16,
+ GROUP(-8, 2, 2, 2, 2),
+ GROUP(
+ /* RESERVED [8] */
PTK3_FN, PTK3_OUT, 0, PTK3_IN,
PTK2_FN, PTK2_OUT, 0, PTK2_IN,
PTK1_FN, PTK1_OUT, 0, PTK1_IN,
PTK0_FN, PTK0_OUT, 0, PTK0_IN ))
},
- { PINMUX_CFG_REG("PLCR", 0xa4050114, 16, 2, GROUP(
+ { PINMUX_CFG_REG_VAR("PLCR", 0xa4050114, 16,
+ GROUP(2, 2, 2, 2, 2, -6),
+ GROUP(
PTL7_FN, PTL7_OUT, 0, PTL7_IN,
PTL6_FN, PTL6_OUT, 0, PTL6_IN,
PTL5_FN, PTL5_OUT, 0, PTL5_IN,
PTL4_FN, PTL4_OUT, 0, PTL4_IN,
PTL3_FN, PTL3_OUT, 0, PTL3_IN,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0 ))
+ /* RESERVED [6] */ ))
},
{ PINMUX_CFG_REG("PMCR", 0xa4050116, 16, 2, GROUP(
PTM7_FN, PTM7_OUT, 0, PTM7_IN,
@@ -1044,10 +1043,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTM1_FN, PTM1_OUT, 0, PTM1_IN,
PTM0_FN, PTM0_OUT, 0, PTM0_IN ))
},
- { PINMUX_CFG_REG("PPCR", 0xa4050118, 16, 2, GROUP(
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
+ { PINMUX_CFG_REG_VAR("PPCR", 0xa4050118, 16,
+ GROUP(-6, 2, 2, 2, 2, 2),
+ GROUP(
+ /* RESERVED [6] */
PTP4_FN, PTP4_OUT, 0, PTP4_IN,
PTP3_FN, PTP3_OUT, 0, PTP3_IN,
PTP2_FN, PTP2_OUT, 0, PTP2_IN,
@@ -1064,40 +1063,40 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTR1_FN, PTR1_OUT, 0, PTR1_IN,
PTR0_FN, PTR0_OUT, 0, PTR0_IN ))
},
- { PINMUX_CFG_REG("PSCR", 0xa405011c, 16, 2, GROUP(
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
+ { PINMUX_CFG_REG_VAR("PSCR", 0xa405011c, 16,
+ GROUP(-6, 2, 2, 2, 2, 2),
+ GROUP(
+ /* RESERVED [6] */
PTS4_FN, PTS4_OUT, 0, PTS4_IN,
PTS3_FN, PTS3_OUT, 0, PTS3_IN,
PTS2_FN, PTS2_OUT, 0, PTS2_IN,
PTS1_FN, PTS1_OUT, 0, PTS1_IN,
PTS0_FN, PTS0_OUT, 0, PTS0_IN ))
},
- { PINMUX_CFG_REG("PTCR", 0xa405011e, 16, 2, GROUP(
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
+ { PINMUX_CFG_REG_VAR("PTCR", 0xa405011e, 16,
+ GROUP(-6, 2, 2, 2, 2, 2),
+ GROUP(
+ /* RESERVED [6] */
PTT4_FN, PTT4_OUT, 0, PTT4_IN,
PTT3_FN, PTT3_OUT, 0, PTT3_IN,
PTT2_FN, PTT2_OUT, 0, PTT2_IN,
PTT1_FN, PTT1_OUT, 0, PTT1_IN,
PTT0_FN, PTT0_OUT, 0, PTT0_IN ))
},
- { PINMUX_CFG_REG("PUCR", 0xa4050120, 16, 2, GROUP(
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
+ { PINMUX_CFG_REG_VAR("PUCR", 0xa4050120, 16,
+ GROUP(-6, 2, 2, 2, 2, 2),
+ GROUP(
+ /* RESERVED [6] */
PTU4_FN, PTU4_OUT, 0, PTU4_IN,
PTU3_FN, PTU3_OUT, 0, PTU3_IN,
PTU2_FN, PTU2_OUT, 0, PTU2_IN,
PTU1_FN, PTU1_OUT, 0, PTU1_IN,
PTU0_FN, PTU0_OUT, 0, PTU0_IN ))
},
- { PINMUX_CFG_REG("PVCR", 0xa4050122, 16, 2, GROUP(
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
+ { PINMUX_CFG_REG_VAR("PVCR", 0xa4050122, 16,
+ GROUP(-6, 2, 2, 2, 2, 2),
+ GROUP(
+ /* RESERVED [6] */
PTV4_FN, PTV4_OUT, 0, PTV4_IN,
PTV3_FN, PTV3_OUT, 0, PTV3_IN,
PTV2_FN, PTV2_OUT, 0, PTV2_IN,
diff --git a/drivers/pinctrl/renesas/pfc-sh7722.c b/drivers/pinctrl/renesas/pfc-sh7722.c
index 13d9967dce59..4b82ac2c5e91 100644
--- a/drivers/pinctrl/renesas/pfc-sh7722.c
+++ b/drivers/pinctrl/renesas/pfc-sh7722.c
@@ -1,5 +1,4 @@
// SPDX-License-Identifier: GPL-2.0
-#include <linux/init.h>
#include <linux/kernel.h>
#include <cpu/sh7722.h>
@@ -1256,14 +1255,16 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
HPD49, PTB1_OUT, 0, PTB1_IN,
HPD48, PTB0_OUT, 0, PTB0_IN ))
},
- { PINMUX_CFG_REG("PCCR", 0xa4050104, 16, 2, GROUP(
+ { PINMUX_CFG_REG_VAR("PCCR", 0xa4050104, 16,
+ GROUP(2, -2, 2, 2, 2, 2, -2, 2),
+ GROUP(
0, 0, 0, PTC7_IN,
- 0, 0, 0, 0,
+ /* RESERVED [2] */
IOIS16, 0, 0, PTC5_IN,
HPDQM7, PTC4_OUT, 0, PTC4_IN,
HPDQM6, PTC3_OUT, 0, PTC3_IN,
HPDQM5, PTC2_OUT, 0, PTC2_IN,
- 0, 0, 0, 0,
+ /* RESERVED [2] */
HPDQM4, PTC0_OUT, 0, PTC0_IN ))
},
{ PINMUX_CFG_REG("PDCR", 0xa4050106, 16, 2, GROUP(
@@ -1276,13 +1277,14 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
SDHICMD, PTD1_OUT, 0, PTD1_IN,
SDHICLK, PTD0_OUT, 0, 0 ))
},
- { PINMUX_CFG_REG("PECR", 0xa4050108, 16, 2, GROUP(
+ { PINMUX_CFG_REG_VAR("PECR", 0xa4050108, 16,
+ GROUP(2, 2, 2, 2, -4, 2, 2),
+ GROUP(
A25, PTE7_OUT, 0, PTE7_IN,
A24, PTE6_OUT, 0, PTE6_IN,
A23, PTE5_OUT, 0, PTE5_IN,
A22, PTE4_OUT, 0, PTE4_IN,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
+ /* RESERVED [4] */
IRQ5, PTE1_OUT, 0, PTE1_IN,
IRQ4_BS, PTE0_OUT, 0, PTE0_IN ))
},
@@ -1296,10 +1298,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
SIORXD_SIUBISLD, 0, 0, PTF1_IN,
SIOTXD_SIUBOSLD, PTF0_OUT, 0, 0 ))
},
- { PINMUX_CFG_REG("PGCR", 0xa405010c, 16, 2, GROUP(
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
+ { PINMUX_CFG_REG_VAR("PGCR", 0xa405010c, 16,
+ GROUP(-6, 2, 2, 2, 2, 2),
+ GROUP(
+ /* RESERVED [6] */
AUDSYNC, PTG4_OUT, 0, 0,
AUDATA3, PTG3_OUT, 0, 0,
AUDATA2, PTG2_OUT, 0, 0,
@@ -1316,13 +1318,13 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
LCDD17_DV_HSYNC, PTH1_OUT, 0, PTH1_IN,
LCDD16_DV_VSYNC, PTH0_OUT, 0, PTH0_IN ))
},
- { PINMUX_CFG_REG("PJCR", 0xa4050110, 16, 2, GROUP(
+ { PINMUX_CFG_REG_VAR("PJCR", 0xa4050110, 16,
+ GROUP(2, 2, 2, -6, 2, 2),
+ GROUP(
STATUS0, PTJ7_OUT, 0, 0,
0, PTJ6_OUT, 0, 0,
PDSTATUS, PTJ5_OUT, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
+ /* RESERVED [6] */
IRQ1, PTJ1_OUT, 0, PTJ1_IN,
IRQ0, PTJ0_OUT, 0, PTJ0_IN ))
},
@@ -1376,50 +1378,50 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTQ1, PTQ1_OUT, 0, 0,
PTQ0, PTQ0_OUT, 0, PTQ0_IN ))
},
- { PINMUX_CFG_REG("PRCR", 0xa405011c, 16, 2, GROUP(
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
+ { PINMUX_CFG_REG_VAR("PRCR", 0xa405011c, 16,
+ GROUP(-6, 2, 2, 2, 2, 2),
+ GROUP(
+ /* RESERVED [6] */
LCDRD, PTR4_OUT, 0, 0,
CS6B_CE1B_LCDCS2, PTR3_OUT, 0, 0,
WAIT, 0, 0, PTR2_IN,
LCDDCK_LCDWR, PTR1_OUT, 0, 0,
LCDVEPWC_LCDVEPWC2, PTR0_OUT, 0, 0 ))
},
- { PINMUX_CFG_REG("PSCR", 0xa405011e, 16, 2, GROUP(
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
+ { PINMUX_CFG_REG_VAR("PSCR", 0xa405011e, 16,
+ GROUP(-6, 2, 2, 2, 2, 2),
+ GROUP(
+ /* RESERVED [6] */
SCIF0_CTS_SIUAISPD, 0, 0, PTS4_IN,
SCIF0_RTS_SIUAOSPD, PTS3_OUT, 0, 0,
SCIF0_SCK_TPUTO, PTS2_OUT, 0, PTS2_IN,
SCIF0_RXD, 0, 0, PTS1_IN,
SCIF0_TXD, PTS0_OUT, 0, 0 ))
},
- { PINMUX_CFG_REG("PTCR", 0xa4050140, 16, 2, GROUP(
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
+ { PINMUX_CFG_REG_VAR("PTCR", 0xa4050140, 16,
+ GROUP(-6, 2, 2, 2, 2, 2),
+ GROUP(
+ /* RESERVED [6] */
FOE_VIO_VD2, PTT4_OUT, 0, PTT4_IN,
FWE, PTT3_OUT, 0, PTT3_IN,
FSC, PTT2_OUT, 0, PTT2_IN,
DREQ0, 0, 0, PTT1_IN,
FCDE, PTT0_OUT, 0, 0 ))
},
- { PINMUX_CFG_REG("PUCR", 0xa4050142, 16, 2, GROUP(
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
+ { PINMUX_CFG_REG_VAR("PUCR", 0xa4050142, 16,
+ GROUP(-6, 2, 2, 2, 2, 2),
+ GROUP(
+ /* RESERVED [6] */
NAF2_VIO_D10, PTU4_OUT, 0, PTU4_IN,
NAF1_VIO_D9, PTU3_OUT, 0, PTU3_IN,
NAF0_VIO_D8, PTU2_OUT, 0, PTU2_IN,
FRB_VIO_CLK2, 0, 0, PTU1_IN,
FCE_VIO_HD2, PTU0_OUT, 0, PTU0_IN ))
},
- { PINMUX_CFG_REG("PVCR", 0xa4050144, 16, 2, GROUP(
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
+ { PINMUX_CFG_REG_VAR("PVCR", 0xa4050144, 16,
+ GROUP(-6, 2, 2, 2, 2, 2),
+ GROUP(
+ /* RESERVED [6] */
NAF7_VIO_D15, PTV4_OUT, 0, PTV4_IN,
NAF6_VIO_D14, PTV3_OUT, 0, PTV3_IN,
NAF5_VIO_D13, PTV2_OUT, 0, PTV2_IN,
@@ -1446,9 +1448,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
LCDD19_DV_CLKI, PTX1_OUT, 0, PTX1_IN,
LCDD18_DV_CLK, PTX0_OUT, 0, PTX0_IN ))
},
- { PINMUX_CFG_REG("PYCR", 0xa405014a, 16, 2, GROUP(
- 0, 0, 0, 0,
- 0, 0, 0, 0,
+ { PINMUX_CFG_REG_VAR("PYCR", 0xa405014a, 16,
+ GROUP(-4, 2, 2, 2, 2, 2, 2),
+ GROUP(
+ /* RESERVED [4] */
KEYOUT5_IN5, PTY5_OUT, 0, PTY5_IN,
KEYOUT4_IN6, PTY4_OUT, 0, PTY4_IN,
KEYOUT3, PTY3_OUT, 0, PTY3_IN,
@@ -1456,33 +1459,27 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
KEYOUT1, PTY1_OUT, 0, 0,
KEYOUT0, PTY0_OUT, 0, PTY0_IN ))
},
- { PINMUX_CFG_REG("PZCR", 0xa405014c, 16, 2, GROUP(
- 0, 0, 0, 0,
- 0, 0, 0, 0,
+ { PINMUX_CFG_REG_VAR("PZCR", 0xa405014c, 16,
+ GROUP(-4, 2, 2, 2, 2, 2, -2),
+ GROUP(
+ /* RESERVED [4] */
KEYIN4_IRQ7, 0, 0, PTZ5_IN,
KEYIN3, 0, 0, PTZ4_IN,
KEYIN2, 0, 0, PTZ3_IN,
KEYIN1, 0, 0, PTZ2_IN,
KEYIN0_IRQ6, 0, 0, PTZ1_IN,
- 0, 0, 0, 0 ))
+ /* RESERVED [2] */ ))
},
- { PINMUX_CFG_REG("PSELA", 0xa405014e, 16, 1, GROUP(
+ { PINMUX_CFG_REG_VAR("PSELA", 0xa405014e, 16,
+ GROUP(1, 1, -4, 1, -4, 1, -4),
+ GROUP(
PSA15_KEYIN0, PSA15_IRQ6,
PSA14_KEYIN4, PSA14_IRQ7,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ /* RESERVED [4] */
PSA9_IRQ4, PSA9_BS,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ /* RESERVED [4] */
PSA4_IRQ2, PSA4_SDHID2,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0 ))
+ /* RESERVED [4] */ ))
},
{ PINMUX_CFG_REG("PSELB", 0xa4050150, 16, 1, GROUP(
PSB15_SIOTXD, PSB15_SIUBOSLD,
@@ -1502,22 +1499,15 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PSB1_SIUMCKA, PSB1_SIOF1_MCK,
PSB0_SIUAOSLD, PSB0_SIOF1_TXD ))
},
- { PINMUX_CFG_REG("PSELC", 0xa4050152, 16, 1, GROUP(
+ { PINMUX_CFG_REG_VAR("PSELC", 0xa4050152, 16,
+ GROUP(1, 1, 1, 1, 1, -10, 1),
+ GROUP(
PSC15_SIUAISLD, PSC15_SIOF1_RXD,
PSC14_SIUAOBT, PSC14_SIOF1_SCK,
PSC13_SIUAOLR, PSC13_SIOF1_SYNC,
PSC12_SIUAIBT, PSC12_SIOF1_SS1,
PSC11_SIUAILR, PSC11_SIOF1_SS2,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ /* RESERVED [10] */
PSC0_NAF, PSC0_VIO ))
},
{ PINMUX_CFG_REG("PSELD", 0xa4050154, 16, 1, GROUP(
@@ -1538,61 +1528,45 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0,
PSD0_LCDD19_LCDD0, PSD0_DV ))
},
- { PINMUX_CFG_REG("PSELE", 0xa4050156, 16, 1, GROUP(
+ { PINMUX_CFG_REG_VAR("PSELE", 0xa4050156, 16,
+ GROUP(1, 1, 1, 1, 1, -7, 1, 1, 1, 1),
+ GROUP(
PSE15_SIOF0_MCK_IRQ3, PSE15_SIM_D,
PSE14_SIOF0_TXD_IRDA_OUT, PSE14_SIM_CLK,
PSE13_SIOF0_RXD_IRDA_IN, PSE13_TS_SDAT,
PSE12_LCDVSYN2, PSE12_DACK,
PSE11_SIUMCKA_SIOF1_MCK, PSE11_SIUFCKA,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ /* RESERVED [7] */
PSE3_FLCTL, PSE3_VIO,
PSE2_NAF2, PSE2_VIO_D10,
PSE1_NAF1, PSE1_VIO_D9,
PSE0_NAF0, PSE0_VIO_D8 ))
},
- { PINMUX_CFG_REG("HIZCRA", 0xa4050158, 16, 1, GROUP(
- 0, 0,
+ { PINMUX_CFG_REG_VAR("HIZCRA", 0xa4050158, 16,
+ GROUP(-1, 1, -3, 1, 1, 1, 1, 1, -6),
+ GROUP(
+ /* RESERVED [1] */
HIZA14_KEYSC, HIZA14_HIZ,
- 0, 0,
- 0, 0,
- 0, 0,
+ /* RESERVED [3] */
HIZA10_NAF, HIZA10_HIZ,
HIZA9_VIO, HIZA9_HIZ,
HIZA8_LCDC, HIZA8_HIZ,
HIZA7_LCDC, HIZA7_HIZ,
HIZA6_LCDC, HIZA6_HIZ,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0 ))
+ /* RESERVED [6] */ ))
},
- { PINMUX_CFG_REG("HIZCRB", 0xa405015a, 16, 1, GROUP(
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ { PINMUX_CFG_REG_VAR("HIZCRB", 0xa405015a, 16,
+ GROUP(-11, 1, -2, 1, 1),
+ GROUP(
+ /* RESERVED [11] */
HIZB4_SIUA, HIZB4_HIZ,
- 0, 0,
- 0, 0,
+ /* RESERVED [2] */
HIZB1_VIO, HIZB1_HIZ,
HIZB0_VIO, HIZB0_HIZ ))
},
- { PINMUX_CFG_REG("HIZCRC", 0xa405015c, 16, 1, GROUP(
+ { PINMUX_CFG_REG_VAR("HIZCRC", 0xa405015c, 16,
+ GROUP(1, 1, 1, 1, 1, 1, 1, 1, -8),
+ GROUP(
HIZC15_IRQ7, HIZC15_HIZ,
HIZC14_IRQ6, HIZC14_HIZ,
HIZC13_IRQ5, HIZC13_HIZ,
@@ -1601,32 +1575,15 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
HIZC10_IRQ2, HIZC10_HIZ,
HIZC9_IRQ1, HIZC9_HIZ,
HIZC8_IRQ0, HIZC8_HIZ,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0 ))
+ /* RESERVED [8] */ ))
},
- { PINMUX_CFG_REG("MSELCRB", 0xa4050182, 16, 1, GROUP(
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ { PINMUX_CFG_REG_VAR("MSELCRB", 0xa4050182, 16,
+ GROUP(-6, 1, 1, -8),
+ GROUP(
+ /* RESERVED [6] */
MSELB9_VIO, MSELB9_VIO2,
MSELB8_RGB, MSELB8_SYS,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0 ))
+ /* RESERVED [8] */ ))
},
{}
};
diff --git a/drivers/pinctrl/renesas/pfc-sh7723.c b/drivers/pinctrl/renesas/pfc-sh7723.c
index 6f08f527c010..95344281966e 100644
--- a/drivers/pinctrl/renesas/pfc-sh7723.c
+++ b/drivers/pinctrl/renesas/pfc-sh7723.c
@@ -5,7 +5,6 @@
* Copyright (C) 2008 Magnus Damm
*/
-#include <linux/init.h>
#include <linux/kernel.h>
#include <cpu/sh7723.h>
@@ -1547,9 +1546,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTD1_FN, PTD1_OUT, 0, PTD1_IN,
PTD0_FN, PTD0_OUT, 0, PTD0_IN ))
},
- { PINMUX_CFG_REG("PECR", 0xa4050108, 16, 2, GROUP(
- 0, 0, 0, 0,
- 0, 0, 0, 0,
+ { PINMUX_CFG_REG_VAR("PECR", 0xa4050108, 16,
+ GROUP(-4, 2, 2, 2, 2, 2, 2),
+ GROUP(
+ /* RESERVED [4] */
PTE5_FN, PTE5_OUT, 0, PTE5_IN,
PTE4_FN, PTE4_OUT, 0, PTE4_IN,
PTE3_FN, PTE3_OUT, 0, PTE3_IN,
@@ -1567,9 +1567,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTF1_FN, PTF1_OUT, 0, PTF1_IN,
PTF0_FN, PTF0_OUT, 0, PTF0_IN ))
},
- { PINMUX_CFG_REG("PGCR", 0xa405010c, 16, 2, GROUP(
- 0, 0, 0, 0,
- 0, 0, 0, 0,
+ { PINMUX_CFG_REG_VAR("PGCR", 0xa405010c, 16,
+ GROUP(-4, 2, 2, 2, 2, 2, 2),
+ GROUP(
+ /* RESERVED [4] */
PTG5_FN, PTG5_OUT, 0, 0,
PTG4_FN, PTG4_OUT, 0, 0,
PTG3_FN, PTG3_OUT, 0, 0,
@@ -1587,11 +1588,13 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTH1_FN, PTH1_OUT, 0, PTH1_IN,
PTH0_FN, PTH0_OUT, 0, PTH0_IN ))
},
- { PINMUX_CFG_REG("PJCR", 0xa4050110, 16, 2, GROUP(
+ { PINMUX_CFG_REG_VAR("PJCR", 0xa4050110, 16,
+ GROUP(2, -2, 2, -2, 2, 2, 2, 2),
+ GROUP(
PTJ7_FN, PTJ7_OUT, 0, 0,
- 0, 0, 0, 0,
+ /* RESERVED [2] */
PTJ5_FN, PTJ5_OUT, 0, 0,
- 0, 0, 0, 0,
+ /* RESERVED [2] */
PTJ3_FN, PTJ3_OUT, 0, PTJ3_IN,
PTJ2_FN, PTJ2_OUT, 0, PTJ2_IN,
PTJ1_FN, PTJ1_OUT, 0, PTJ1_IN,
@@ -1637,11 +1640,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTN1_FN, PTN1_OUT, 0, PTN1_IN,
PTN0_FN, PTN0_OUT, 0, PTN0_IN ))
},
- { PINMUX_CFG_REG("PQCR", 0xa405011a, 16, 2, GROUP(
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
+ { PINMUX_CFG_REG_VAR("PQCR", 0xa405011a, 16,
+ GROUP(-8, 2, 2, 2, 2),
+ GROUP(
+ /* RESERVED [8] */
PTQ3_FN, 0, 0, PTQ3_IN,
PTQ2_FN, 0, 0, PTQ2_IN,
PTQ1_FN, 0, 0, PTQ1_IN,
@@ -1667,9 +1669,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTS1_FN, PTS1_OUT, 0, PTS1_IN,
PTS0_FN, PTS0_OUT, 0, PTS0_IN ))
},
- { PINMUX_CFG_REG("PTCR", 0xa4050140, 16, 2, GROUP(
- 0, 0, 0, 0,
- 0, 0, 0, 0,
+ { PINMUX_CFG_REG_VAR("PTCR", 0xa4050140, 16,
+ GROUP(-4, 2, 2, 2, 2, 2, 2),
+ GROUP(
+ /* RESERVED [4] */
PTT5_FN, PTT5_OUT, 0, PTT5_IN,
PTT4_FN, PTT4_OUT, 0, PTT4_IN,
PTT3_FN, PTT3_OUT, 0, PTT3_IN,
@@ -1677,9 +1680,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTT1_FN, PTT1_OUT, 0, PTT1_IN,
PTT0_FN, PTT0_OUT, 0, PTT0_IN ))
},
- { PINMUX_CFG_REG("PUCR", 0xa4050142, 16, 2, GROUP(
- 0, 0, 0, 0,
- 0, 0, 0, 0,
+ { PINMUX_CFG_REG_VAR("PUCR", 0xa4050142, 16,
+ GROUP(-4, 2, 2, 2, 2, 2, 2),
+ GROUP(
+ /* RESERVED [4] */
PTU5_FN, PTU5_OUT, 0, PTU5_IN,
PTU4_FN, PTU4_OUT, 0, PTU4_IN,
PTU3_FN, PTU3_OUT, 0, PTU3_IN,
@@ -1737,35 +1741,38 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTZ1_FN, PTZ1_OUT, 0, PTZ1_IN,
PTZ0_FN, PTZ0_OUT, 0, PTZ0_IN ))
},
- { PINMUX_CFG_REG("PSELA", 0xa405014e, 16, 2, GROUP(
+ { PINMUX_CFG_REG_VAR("PSELA", 0xa405014e, 16,
+ GROUP(2, 2, 2, -4, 2, 2, -2),
+ GROUP(
PSA15_PSA14_FN1, PSA15_PSA14_FN2, 0, 0,
PSA13_PSA12_FN1, PSA13_PSA12_FN2, 0, 0,
PSA11_PSA10_FN1, PSA11_PSA10_FN2, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
+ /* RESERVED [4] */
PSA5_PSA4_FN1, PSA5_PSA4_FN2, PSA5_PSA4_FN3, 0,
PSA3_PSA2_FN1, PSA3_PSA2_FN2, 0, 0,
- 0, 0, 0, 0 ))
+ /* RESERVED [2] */ ))
},
- { PINMUX_CFG_REG("PSELB", 0xa4050150, 16, 2, GROUP(
+ { PINMUX_CFG_REG_VAR("PSELB", 0xa4050150, 16,
+ GROUP(2, 2, -2, 2, 2, 2, 2, -2),
+ GROUP(
PSB15_PSB14_FN1, PSB15_PSB14_FN2, 0, 0,
PSB13_PSB12_LCDC_RGB, PSB13_PSB12_LCDC_SYS, 0, 0,
- 0, 0, 0, 0,
+ /* RESERVED [2] */
PSB9_PSB8_FN1, PSB9_PSB8_FN2, PSB9_PSB8_FN3, 0,
PSB7_PSB6_FN1, PSB7_PSB6_FN2, 0, 0,
PSB5_PSB4_FN1, PSB5_PSB4_FN2, 0, 0,
PSB3_PSB2_FN1, PSB3_PSB2_FN2, 0, 0,
- 0, 0, 0, 0 ))
+ /* RESERVED [2] */ ))
},
- { PINMUX_CFG_REG("PSELC", 0xa4050152, 16, 2, GROUP(
+ { PINMUX_CFG_REG_VAR("PSELC", 0xa4050152, 16,
+ GROUP(2, 2, 2, 2, 2, -6),
+ GROUP(
PSC15_PSC14_FN1, PSC15_PSC14_FN2, 0, 0,
PSC13_PSC12_FN1, PSC13_PSC12_FN2, 0, 0,
PSC11_PSC10_FN1, PSC11_PSC10_FN2, PSC11_PSC10_FN3, 0,
PSC9_PSC8_FN1, PSC9_PSC8_FN2, 0, 0,
PSC7_PSC6_FN1, PSC7_PSC6_FN2, PSC7_PSC6_FN3, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0 ))
+ /* RESERVED [3] */ ))
},
{ PINMUX_CFG_REG("PSELD", 0xa4050154, 16, 2, GROUP(
PSD15_PSD14_FN1, PSD15_PSD14_FN2, 0, 0,
diff --git a/drivers/pinctrl/renesas/pfc-sh7724.c b/drivers/pinctrl/renesas/pfc-sh7724.c
index 7a18afecda2c..26517ad26a0f 100644
--- a/drivers/pinctrl/renesas/pfc-sh7724.c
+++ b/drivers/pinctrl/renesas/pfc-sh7724.c
@@ -10,7 +10,6 @@
* Copyright (C) 2008 Magnus Damm
*/
-#include <linux/init.h>
#include <linux/kernel.h>
#include <cpu/sh7724.h>
@@ -1799,9 +1798,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTF1_FN, PTF1_OUT, 0, PTF1_IN,
PTF0_FN, PTF0_OUT, 0, PTF0_IN ))
},
- { PINMUX_CFG_REG("PGCR", 0xa405010c, 16, 2, GROUP(
- 0, 0, 0, 0,
- 0, 0, 0, 0,
+ { PINMUX_CFG_REG_VAR("PGCR", 0xa405010c, 16,
+ GROUP(-4, 2, 2, 2, 2, 2, 2),
+ GROUP(
+ /* RESERVED [4] */
PTG5_FN, PTG5_OUT, 0, 0,
PTG4_FN, PTG4_OUT, 0, 0,
PTG3_FN, PTG3_OUT, 0, 0,
diff --git a/drivers/pinctrl/renesas/pfc-sh7734.c b/drivers/pinctrl/renesas/pfc-sh7734.c
index dbc36079c381..106a500ad13d 100644
--- a/drivers/pinctrl/renesas/pfc-sh7734.c
+++ b/drivers/pinctrl/renesas/pfc-sh7734.c
@@ -5,7 +5,6 @@
* Copyright (C) 2012 Renesas Solutions Corp.
* Copyright (C) 2012 Nobuhiro Iwamatsu <nobuhiro.iwamatsu.yj@renesas.com>
*/
-#include <linux/init.h>
#include <linux/kernel.h>
#include <cpu/sh7734.h>
@@ -1806,16 +1805,13 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_4_1_FN, FN_IP9_21_20,
GP_4_0_FN, FN_IP9_19_18 ))
},
- { PINMUX_CFG_REG("GPSR5", 0xFFFC0018, 32, 1, GROUP(
- 0, 0, 0, 0, 0, 0, 0, 0, /* 31 - 28 */
- 0, 0, 0, 0, 0, 0, 0, 0, /* 27 - 24 */
- 0, 0, 0, 0, 0, 0, 0, 0, /* 23 - 20 */
- 0, 0, 0, 0, 0, 0, 0, 0, /* 19 - 16 */
- 0, 0, 0, 0, 0, 0, 0, 0, /* 15 - 12 */
+ { PINMUX_CFG_REG_VAR("GPSR5", 0xFFFC0018, 32,
+ GROUP(-20, 1, 1, -6, 1, 1, 1, 1),
+ GROUP(
+ /* GP5_31_12 RESERVED */
GP_5_11_FN, FN_IP10_29_28,
GP_5_10_FN, FN_IP10_27_26,
- 0, 0, 0, 0, 0, 0, 0, 0, /* 9 - 6 */
- 0, 0, 0, 0, /* 5, 4 */
+ /* GP5_9_4 RESERVED */
GP_5_3_FN, FN_IRQ3_B,
GP_5_2_FN, FN_IRQ2_B,
GP_5_1_FN, FN_IP11_3,
@@ -1896,10 +1892,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_A16, FN_ST0_PWM, FN_LCD_DON_A, FN_TIOC4A_C ))
},
{ PINMUX_CFG_REG_VAR("IPSR2", 0xFFFC0024, 32,
- GROUP(1, 3, 3, 2, 3, 3, 3, 3, 3, 3, 2, 3),
+ GROUP(-1, 3, 3, 2, 3, 3, 3, 3, 3, 3, 2, 3),
GROUP(
- /* IP2_31 [1] */
- 0, 0,
+ /* IP2_31 [1] RESERVED */
/* IP2_30_28 [3] */
FN_D14, FN_TX2_B, 0, FN_FSE_A,
FN_ET0_TX_CLK_B, 0, 0, 0,
@@ -1933,10 +1928,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_FD4_A, 0, 0, 0 ))
},
{ PINMUX_CFG_REG_VAR("IPSR3", 0xFFFC0028, 32,
- GROUP(2, 3, 3, 3, 1, 2, 3, 3, 3, 3, 3, 1, 2),
+ GROUP(-2, 3, 3, 3, 1, 2, 3, 3, 3, 3, 3, 1, 2),
GROUP(
- /* IP3_31_30 [2] */
- 0, 0, 0, 0,
+ /* IP3_31_30 [2] RESERVED */
/* IP3_29_27 [3] */
FN_DRACK0, FN_SD1_DAT2_A, FN_ATAG, FN_TCLK1_A,
FN_ET0_ETXD7, 0, 0, 0,
@@ -2007,19 +2001,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_ET0_ERXD7, 0, 0, 0 ))
},
{ PINMUX_CFG_REG_VAR("IPSR5", 0xFFFC0030, 32,
- GROUP(1, 1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3,
- 3, 3, 3),
+ GROUP(-5, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3),
GROUP(
- /* IP5_31 [1] */
- 0, 0,
- /* IP5_30 [1] */
- 0, 0,
- /* IP5_29 [1] */
- 0, 0,
- /* IP5_28 [1] */
- 0, 0,
- /* IP5_27 [1] */
- 0, 0,
+ /* IP5_31_27 [5] RESERVED */
/* IP5_26_25 [2] */
FN_REF50CK, FN_CTS1_E, FN_HCTS0_D, 0,
/* IP5_24_23 [2] */
@@ -2049,25 +2033,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_ET0_RX_CLK_B, 0, 0, 0 ))
},
{ PINMUX_CFG_REG_VAR("IPSR6", 0xFFFC0034, 32,
- GROUP(1, 1, 1, 1, 1, 1, 1, 1, 3, 3, 2, 2,
- 2, 2, 2, 2, 3, 3),
+ GROUP(-8, 3, 3, 2, 2, 2, 2, 2, 2, 3, 3),
GROUP(
- /* IP5_31 [1] */
- 0, 0,
- /* IP6_30 [1] */
- 0, 0,
- /* IP6_29 [1] */
- 0, 0,
- /* IP6_28 [1] */
- 0, 0,
- /* IP6_27 [1] */
- 0, 0,
- /* IP6_26 [1] */
- 0, 0,
- /* IP6_25 [1] */
- 0, 0,
- /* IP6_24 [1] */
- 0, 0,
+ /* IP5_31_24 [8] RESERVED */
/* IP6_23_21 [3] */
FN_DU0_DG1, FN_CTS1_C, FN_HRTS0_D, FN_TIOC1B_A,
FN_HIFD09, 0, 0, 0,
@@ -2094,10 +2062,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_TCLKA_A, FN_HIFD00, 0, 0 ))
},
{ PINMUX_CFG_REG_VAR("IPSR7", 0xFFFC0038, 32,
- GROUP(1, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3),
+ GROUP(-1, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3),
GROUP(
- /* IP7_31 [1] */
- 0, 0,
+ /* IP7_31 [1] RESERVED */
/* IP7_30_29 [2] */
FN_DU0_DB4, 0, FN_HIFINT, 0,
/* IP7_28_27 [2] */
@@ -2131,11 +2098,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_HIFD10, 0, 0, 0 ))
},
{ PINMUX_CFG_REG_VAR("IPSR8", 0xFFFC003C, 32,
- GROUP(2, 2, 2, 3, 3, 2, 2, 2, 2, 2, 2, 2,
+ GROUP(-2, 2, 2, 3, 3, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2),
GROUP(
- /* IP9_31_30 [2] */
- 0, 0, 0, 0,
+ /* IP9_31_30 [2] RESERVED */
/* IP8_29_28 [2] */
FN_IRQ3_A, FN_RTS0_A, FN_HRTS0_B, FN_ET0_ERXD3_A,
/* IP8_27_26 [2] */
@@ -2169,11 +2135,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_DU0_DB5, 0, FN_HIFDREQ, 0 ))
},
{ PINMUX_CFG_REG_VAR("IPSR9", 0xFFFC0040, 32,
- GROUP(2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ GROUP(-2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2),
GROUP(
- /* IP9_31_30 [2] */
- 0, 0, 0, 0,
+ /* IP9_31_30 [2] RESERVED */
/* IP9_29_28 [2] */
FN_SSI_SDATA1_A, FN_VI1_3_B, FN_LCD_DATA14_B, 0,
/* IP9_27_26 [2] */
@@ -2206,10 +2171,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_VI1_CLK_A, 0, FN_FD0_B, FN_LCD_DATA0_B ))
},
{ PINMUX_CFG_REG_VAR("IPSR10", 0xFFFC0044, 32,
- GROUP(2, 2, 2, 1, 2, 1, 3, 3, 1, 3, 3, 3, 3, 3),
+ GROUP(-2, 2, 2, 1, 2, 1, 3, 3, 1, 3, 3, 3, 3, 3),
GROUP(
- /* IP9_31_30 [2] */
- 0, 0, 0, 0,
+ /* IP9_31_30 [2] RESERVED */
/* IP10_29_28 [2] */
FN_CAN1_TX_A, FN_TX5_C, FN_MLB_DAT, 0,
/* IP10_27_26 [2] */
@@ -2245,11 +2209,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_LCD_DATA15_B, 0, 0, 0 ))
},
{ PINMUX_CFG_REG_VAR("IPSR11", 0xFFFC0048, 32,
- GROUP(3, 1, 2, 3, 2, 2, 3, 3, 1, 2, 3, 3,
+ GROUP(-3, 1, 2, 3, 2, 2, 3, 3, 1, 2, 3, 3,
1, 1, 1, 1),
GROUP(
- /* IP11_31_29 [3] */
- 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP11_31_29 [3] RESERVED */
/* IP11_28 [1] */
FN_PRESETOUT, FN_ST_CLKOUT,
/* IP11_27_26 [2] */
@@ -2287,11 +2250,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_SCL1, FN_SCIF_CLK_C ))
},
{ PINMUX_CFG_REG_VAR("MOD_SEL1", 0xFFFC004C, 32,
- GROUP(3, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 2,
+ GROUP(-3, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 2,
2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
GROUP(
- /* SEL1_31_29 [3] */
- 0, 0, 0, 0, 0, 0, 0, 0,
+ /* SEL1_31_29 [3] RESERVED */
/* SEL1_28 [1] */
FN_SEL_IEBUS_0, FN_SEL_IEBUS_1,
/* SEL1_27 [1] */
@@ -2344,25 +2306,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_SEL_INTC_0, FN_SEL_INTC_1 ))
},
{ PINMUX_CFG_REG_VAR("MOD_SEL2", 0xFFFC0050, 32,
- GROUP(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2,
- 2, 1, 2, 2, 3, 2, 3, 2, 2),
+ GROUP(-8, 1, 1, 1, 2, 2, 1, 2, 2, 3, 2, 3, 2, 2),
GROUP(
- /* SEL2_31 [1] */
- 0, 0,
- /* SEL2_30 [1] */
- 0, 0,
- /* SEL2_29 [1] */
- 0, 0,
- /* SEL2_28 [1] */
- 0, 0,
- /* SEL2_27 [1] */
- 0, 0,
- /* SEL2_26 [1] */
- 0, 0,
- /* SEL2_25 [1] */
- 0, 0,
- /* SEL2_24 [1] */
- 0, 0,
+ /* SEL2_31_24 [8] RESERVED */
/* SEL2_23 [1] */
FN_SEL_MTU2_CLK_0, FN_SEL_MTU2_CLK_1,
/* SEL2_22 [1] */
@@ -2403,10 +2349,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
},
{ PINMUX_CFG_REG("INOUTSEL4", 0xFFC44004, 32, 1, GROUP(GP_INOUTSEL(4)))
},
- { PINMUX_CFG_REG("INOUTSEL5", 0xffc45004, 32, 1, GROUP(
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 31 - 24 */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 23 - 16 */
- 0, 0, 0, 0, 0, 0, 0, 0, /* 15 - 12 */
+ { PINMUX_CFG_REG_VAR("INOUTSEL5", 0xffc45004, 32,
+ GROUP(-20, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP5_31_12 RESERVED */
GP_5_11_IN, GP_5_11_OUT,
GP_5_10_IN, GP_5_10_OUT,
GP_5_9_IN, GP_5_9_OUT,
diff --git a/drivers/pinctrl/renesas/pfc-sh7757.c b/drivers/pinctrl/renesas/pfc-sh7757.c
index 064e987b09cb..0d7857d7efef 100644
--- a/drivers/pinctrl/renesas/pfc-sh7757.c
+++ b/drivers/pinctrl/renesas/pfc-sh7757.c
@@ -10,7 +10,6 @@
* Copyright (C) 2008 Magnus Damm
*/
-#include <linux/init.h>
#include <linux/kernel.h>
#include <cpu/sh7757.h>
@@ -1964,43 +1963,35 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0,
0, 0, ))
},
- { PINMUX_CFG_REG("PSEL1", 0xffec0072, 16, 1, GROUP(
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ { PINMUX_CFG_REG_VAR("PSEL1", 0xffec0072, 16,
+ GROUP(-5, 1, 1, 1, -5, 1, -2),
+ GROUP(
+ /* RESERVED [5] */
PS1_10_FN1, PS1_10_FN2,
PS1_9_FN1, PS1_9_FN2,
PS1_8_FN1, PS1_8_FN2,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ /* RESERVED [5] */
PS1_2_FN1, PS1_2_FN2,
- 0, 0,
- 0, 0, ))
+ /* RESERVED [2] */ ))
},
- { PINMUX_CFG_REG("PSEL2", 0xffec0074, 16, 1, GROUP(
- 0, 0,
- 0, 0,
+ { PINMUX_CFG_REG_VAR("PSEL2", 0xffec0074, 16,
+ GROUP(-2, 1, 1, -4, 1, 1, 1, 1, -1, 1, -2),
+ GROUP(
+ /* RESERVED [2] */
PS2_13_FN1, PS2_13_FN2,
PS2_12_FN1, PS2_12_FN2,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ /* RESERVED [4] */
PS2_7_FN1, PS2_7_FN2,
PS2_6_FN1, PS2_6_FN2,
PS2_5_FN1, PS2_5_FN2,
PS2_4_FN1, PS2_4_FN2,
- 0, 0,
+ /* RESERVED [1] */
PS2_2_FN1, PS2_2_FN2,
- 0, 0,
- 0, 0, ))
+ /* RESERVED [2] */ ))
},
- { PINMUX_CFG_REG("PSEL3", 0xffec0076, 16, 1, GROUP(
+ { PINMUX_CFG_REG_VAR("PSEL3", 0xffec0076, 16,
+ GROUP(1, 1, 1, 1, 1, 1, 1, 1, 1, -4, 1, 1, -1),
+ GROUP(
PS3_15_FN1, PS3_15_FN2,
PS3_14_FN1, PS3_14_FN2,
PS3_13_FN1, PS3_13_FN2,
@@ -2010,38 +2001,35 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PS3_9_FN1, PS3_9_FN2,
PS3_8_FN1, PS3_8_FN2,
PS3_7_FN1, PS3_7_FN2,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ /* RESERVED [4] */
PS3_2_FN1, PS3_2_FN2,
PS3_1_FN1, PS3_1_FN2,
- 0, 0, ))
+ /* RESERVED [1] */ ))
},
- { PINMUX_CFG_REG("PSEL4", 0xffec0078, 16, 1, GROUP(
- 0, 0,
+ { PINMUX_CFG_REG_VAR("PSEL4", 0xffec0078, 16,
+ GROUP(-1, 1, 1, 1, -1, 1, 1, 1, -3, 1, 1, 1,
+ 1, 1),
+ GROUP(
+ /* RESERVED [1] */
PS4_14_FN1, PS4_14_FN2,
PS4_13_FN1, PS4_13_FN2,
PS4_12_FN1, PS4_12_FN2,
- 0, 0,
+ /* RESERVED [1] */
PS4_10_FN1, PS4_10_FN2,
PS4_9_FN1, PS4_9_FN2,
PS4_8_FN1, PS4_8_FN2,
- 0, 0,
- 0, 0,
- 0, 0,
+ /* RESERVED [3] */
PS4_4_FN1, PS4_4_FN2,
PS4_3_FN1, PS4_3_FN2,
PS4_2_FN1, PS4_2_FN2,
PS4_1_FN1, PS4_1_FN2,
PS4_0_FN1, PS4_0_FN2, ))
},
- { PINMUX_CFG_REG("PSEL5", 0xffec007a, 16, 1, GROUP(
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ { PINMUX_CFG_REG_VAR("PSEL5", 0xffec007a, 16,
+ GROUP(-4, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -2),
+ GROUP(
+ /* RESERVED [4] */
PS5_11_FN1, PS5_11_FN2,
PS5_10_FN1, PS5_10_FN2,
PS5_9_FN1, PS5_9_FN2,
@@ -2052,8 +2040,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PS5_4_FN1, PS5_4_FN2,
PS5_3_FN1, PS5_3_FN2,
PS5_2_FN1, PS5_2_FN2,
- 0, 0,
- 0, 0, ))
+ /* RESERVED [2] */ ))
},
{ PINMUX_CFG_REG("PSEL6", 0xffec007c, 16, 1, GROUP(
PS6_15_FN1, PS6_15_FN2,
@@ -2073,7 +2060,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PS6_1_FN1, PS6_1_FN2,
PS6_0_FN1, PS6_0_FN2, ))
},
- { PINMUX_CFG_REG("PSEL7", 0xffec0082, 16, 1, GROUP(
+ { PINMUX_CFG_REG_VAR("PSEL7", 0xffec0082, 16,
+ GROUP(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -5),
+ GROUP(
PS7_15_FN1, PS7_15_FN2,
PS7_14_FN1, PS7_14_FN2,
PS7_13_FN1, PS7_13_FN2,
@@ -2085,13 +2074,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PS7_7_FN1, PS7_7_FN2,
PS7_6_FN1, PS7_6_FN2,
PS7_5_FN1, PS7_5_FN2,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0, ))
+ /* RESERVED [5] */ ))
},
- { PINMUX_CFG_REG("PSEL8", 0xffec0084, 16, 1, GROUP(
+ { PINMUX_CFG_REG_VAR("PSEL8", 0xffec0084, 16,
+ GROUP(1, 1, 1, 1, 1, 1, 1, 1, -8),
+ GROUP(
PS8_15_FN1, PS8_15_FN2,
PS8_14_FN1, PS8_14_FN2,
PS8_13_FN1, PS8_13_FN2,
@@ -2100,14 +2087,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PS8_10_FN1, PS8_10_FN2,
PS8_9_FN1, PS8_9_FN2,
PS8_8_FN1, PS8_8_FN2,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0, ))
+ /* RESERVED [8] */ ))
},
{}
};
diff --git a/drivers/pinctrl/renesas/pfc-sh7785.c b/drivers/pinctrl/renesas/pfc-sh7785.c
index c4c1e288c53e..126b663bb6eb 100644
--- a/drivers/pinctrl/renesas/pfc-sh7785.c
+++ b/drivers/pinctrl/renesas/pfc-sh7785.c
@@ -5,7 +5,6 @@
* Copyright (C) 2008 Magnus Damm
*/
-#include <linux/init.h>
#include <linux/kernel.h>
#include <cpu/sh7785.h>
@@ -1025,9 +1024,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PD1_FN, PD1_OUT, PD1_IN, 0,
PD0_FN, PD0_OUT, PD0_IN, 0 ))
},
- { PINMUX_CFG_REG("PECR", 0xffe70008, 16, 2, GROUP(
- 0, 0, 0, 0,
- 0, 0, 0, 0,
+ { PINMUX_CFG_REG_VAR("PECR", 0xffe70008, 16,
+ GROUP(-4, 2, 2, 2, 2, 2, 2),
+ GROUP(
+ /* RESERVED [4] */
PE5_FN, PE5_OUT, PE5_IN, 0,
PE4_FN, PE4_OUT, PE4_IN, 0,
PE3_FN, PE3_OUT, PE3_IN, 0,
@@ -1095,13 +1095,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PL1_FN, PL1_OUT, PL1_IN, 0,
PL0_FN, PL0_OUT, PL0_IN, 0 ))
},
- { PINMUX_CFG_REG("PMCR", 0xffe70016, 16, 2, GROUP(
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
+ { PINMUX_CFG_REG_VAR("PMCR", 0xffe70016, 16,
+ GROUP(-12, 2, 2),
+ GROUP(
+ /* RESERVED [12] */
PM1_FN, PM1_OUT, PM1_IN, 0,
PM0_FN, PM0_OUT, PM0_IN, 0 ))
},
@@ -1115,9 +1112,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PN1_FN, PN1_OUT, PN1_IN, 0,
PN0_FN, PN0_OUT, PN0_IN, 0 ))
},
- { PINMUX_CFG_REG("PPCR", 0xffe7001a, 16, 2, GROUP(
- 0, 0, 0, 0,
- 0, 0, 0, 0,
+ { PINMUX_CFG_REG_VAR("PPCR", 0xffe7001a, 16,
+ GROUP(-4, 2, 2, 2, 2, 2, 2),
+ GROUP(
+ /* RESERVED [4] */
PP5_FN, PP5_OUT, PP5_IN, 0,
PP4_FN, PP4_OUT, PP4_IN, 0,
PP3_FN, PP3_OUT, PP3_IN, 0,
@@ -1125,21 +1123,20 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PP1_FN, PP1_OUT, PP1_IN, 0,
PP0_FN, PP0_OUT, PP0_IN, 0 ))
},
- { PINMUX_CFG_REG("PQCR", 0xffe7001c, 16, 2, GROUP(
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
+ { PINMUX_CFG_REG_VAR("PQCR", 0xffe7001c, 16,
+ GROUP(-6, 2, 2, 2, 2, 2),
+ GROUP(
+ /* RESERVED [6] */
PQ4_FN, PQ4_OUT, PQ4_IN, 0,
PQ3_FN, PQ3_OUT, PQ3_IN, 0,
PQ2_FN, PQ2_OUT, PQ2_IN, 0,
PQ1_FN, PQ1_OUT, PQ1_IN, 0,
PQ0_FN, PQ0_OUT, PQ0_IN, 0 ))
},
- { PINMUX_CFG_REG("PRCR", 0xffe7001e, 16, 2, GROUP(
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
+ { PINMUX_CFG_REG_VAR("PRCR", 0xffe7001e, 16,
+ GROUP(-8, 2, 2, 2, 2),
+ GROUP(
+ /* RESERVED [8] */
PR3_FN, PR3_OUT, PR3_IN, 0,
PR2_FN, PR2_OUT, PR2_IN, 0,
PR1_FN, PR1_OUT, PR1_IN, 0,
@@ -1163,20 +1160,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
P1MSEL1_0, P1MSEL1_1,
P1MSEL0_0, P1MSEL0_1 ))
},
- { PINMUX_CFG_REG("P2MSELR", 0xffe70082, 16, 1, GROUP(
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
- 0, 0,
+ { PINMUX_CFG_REG_VAR("P2MSELR", 0xffe70082, 16,
+ GROUP(-13, 1, 1, 1),
+ GROUP(
+ /* RESERVED [13] */
P2MSEL2_0, P2MSEL2_1,
P2MSEL1_0, P2MSEL1_1,
P2MSEL0_0, P2MSEL0_1 ))
diff --git a/drivers/pinctrl/renesas/pfc-sh7786.c b/drivers/pinctrl/renesas/pfc-sh7786.c
index b8a098cd7721..f09f4a769010 100644
--- a/drivers/pinctrl/renesas/pfc-sh7786.c
+++ b/drivers/pinctrl/renesas/pfc-sh7786.c
@@ -10,7 +10,6 @@
* Copyright (C) 2008 Magnus Damm
*/
-#include <linux/init.h>
#include <linux/kernel.h>
#include <cpu/sh7786.h>
@@ -667,15 +666,12 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PD1_FN, PD1_OUT, PD1_IN, 0,
PD0_FN, PD0_OUT, PD0_IN, 0 ))
},
- { PINMUX_CFG_REG("PECR", 0xffcc0008, 16, 2, GROUP(
+ { PINMUX_CFG_REG_VAR("PECR", 0xffcc0008, 16,
+ GROUP(2, 2, -12),
+ GROUP(
PE7_FN, PE7_OUT, PE7_IN, 0,
PE6_FN, PE6_OUT, PE6_IN, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0, ))
+ /* RESERVED [12] */ ))
},
{ PINMUX_CFG_REG("PFCR", 0xffcc000a, 16, 2, GROUP(
PF7_FN, PF7_OUT, PF7_IN, 0,
@@ -687,15 +683,13 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PF1_FN, PF1_OUT, PF1_IN, 0,
PF0_FN, PF0_OUT, PF0_IN, 0 ))
},
- { PINMUX_CFG_REG("PGCR", 0xffcc000c, 16, 2, GROUP(
+ { PINMUX_CFG_REG_VAR("PGCR", 0xffcc000c, 16,
+ GROUP(2, 2, 2, -10),
+ GROUP(
PG7_FN, PG7_OUT, PG7_IN, 0,
PG6_FN, PG6_OUT, PG6_IN, 0,
PG5_FN, PG5_OUT, PG5_IN, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0, ))
+ /* RESERVED [10] */ ))
},
{ PINMUX_CFG_REG("PHCR", 0xffcc000e, 16, 2, GROUP(
PH7_FN, PH7_OUT, PH7_IN, 0,
diff --git a/drivers/pinctrl/renesas/pfc-shx3.c b/drivers/pinctrl/renesas/pfc-shx3.c
index 22e812850964..96a65d83774f 100644
--- a/drivers/pinctrl/renesas/pfc-shx3.c
+++ b/drivers/pinctrl/renesas/pfc-shx3.c
@@ -4,7 +4,6 @@
*
* Copyright (C) 2010 Paul Mundt
*/
-#include <linux/init.h>
#include <linux/kernel.h>
#include <cpu/shx3.h>
diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
index cb805502fb0f..a48cac55152c 100644
--- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c
+++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
@@ -996,83 +996,112 @@ static const u32 rzg2l_gpio_configs[] = {
RZG2L_GPIO_PORT_PACK(5, 0x40, RZG2L_MPXED_PIN_FUNCS),
};
-static struct rzg2l_dedicated_configs rzg2l_dedicated_pins[] = {
- { "NMI", RZG2L_SINGLE_PIN_PACK(0x1, 0,
- (PIN_CFG_FILONOFF | PIN_CFG_FILNUM | PIN_CFG_FILCLKSEL)) },
- { "TMS/SWDIO", RZG2L_SINGLE_PIN_PACK(0x2, 0,
- (PIN_CFG_SR | PIN_CFG_IOLH_A | PIN_CFG_IEN)) },
- { "TDO", RZG2L_SINGLE_PIN_PACK(0x3, 0,
- (PIN_CFG_IOLH_A | PIN_CFG_SR | PIN_CFG_IEN)) },
- { "AUDIO_CLK1", RZG2L_SINGLE_PIN_PACK(0x4, 0, PIN_CFG_IEN) },
- { "AUDIO_CLK2", RZG2L_SINGLE_PIN_PACK(0x4, 1, PIN_CFG_IEN) },
- { "SD0_CLK", RZG2L_SINGLE_PIN_PACK(0x6, 0,
- (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_SD0)) },
- { "SD0_CMD", RZG2L_SINGLE_PIN_PACK(0x6, 1,
- (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD0)) },
- { "SD0_RST#", RZG2L_SINGLE_PIN_PACK(0x6, 2,
- (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_SD0)) },
- { "SD0_DATA0", RZG2L_SINGLE_PIN_PACK(0x7, 0,
- (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD0)) },
- { "SD0_DATA1", RZG2L_SINGLE_PIN_PACK(0x7, 1,
- (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD0)) },
- { "SD0_DATA2", RZG2L_SINGLE_PIN_PACK(0x7, 2,
- (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD0)) },
- { "SD0_DATA3", RZG2L_SINGLE_PIN_PACK(0x7, 3,
- (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD0)) },
- { "SD0_DATA4", RZG2L_SINGLE_PIN_PACK(0x7, 4,
- (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD0)) },
- { "SD0_DATA5", RZG2L_SINGLE_PIN_PACK(0x7, 5,
- (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD0)) },
- { "SD0_DATA6", RZG2L_SINGLE_PIN_PACK(0x7, 6,
- (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD0)) },
- { "SD0_DATA7", RZG2L_SINGLE_PIN_PACK(0x7, 7,
- (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD0)) },
- { "SD1_CLK", RZG2L_SINGLE_PIN_PACK(0x8, 0,
- (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_SD1)) },
- { "SD1_CMD", RZG2L_SINGLE_PIN_PACK(0x8, 1,
- (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD1)) },
- { "SD1_DATA0", RZG2L_SINGLE_PIN_PACK(0x9, 0,
- (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD1)) },
- { "SD1_DATA1", RZG2L_SINGLE_PIN_PACK(0x9, 1,
- (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD1)) },
- { "SD1_DATA2", RZG2L_SINGLE_PIN_PACK(0x9, 2,
- (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD1)) },
- { "SD1_DATA3", RZG2L_SINGLE_PIN_PACK(0x9, 3,
- (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD1)) },
- { "QSPI0_SPCLK", RZG2L_SINGLE_PIN_PACK(0xa, 0,
- (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) },
- { "QSPI0_IO0", RZG2L_SINGLE_PIN_PACK(0xa, 1,
- (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) },
- { "QSPI0_IO1", RZG2L_SINGLE_PIN_PACK(0xa, 2,
- (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) },
- { "QSPI0_IO2", RZG2L_SINGLE_PIN_PACK(0xa, 3,
- (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) },
- { "QSPI0_IO3", RZG2L_SINGLE_PIN_PACK(0xa, 4,
- (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) },
- { "QSPI0_SSL", RZG2L_SINGLE_PIN_PACK(0xa, 5,
- (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) },
- { "QSPI1_SPCLK", RZG2L_SINGLE_PIN_PACK(0xb, 0,
- (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) },
- { "QSPI1_IO0", RZG2L_SINGLE_PIN_PACK(0xb, 1,
- (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) },
- { "QSPI1_IO1", RZG2L_SINGLE_PIN_PACK(0xb, 2,
- (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) },
- { "QSPI1_IO2", RZG2L_SINGLE_PIN_PACK(0xb, 3,
- (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) },
- { "QSPI1_IO3", RZG2L_SINGLE_PIN_PACK(0xb, 4,
- (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) },
- { "QSPI1_SSL", RZG2L_SINGLE_PIN_PACK(0xb, 5,
- (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) },
- { "QSPI_RESET#", RZG2L_SINGLE_PIN_PACK(0xc, 0,
- (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) },
- { "QSPI_WP#", RZG2L_SINGLE_PIN_PACK(0xc, 1,
- (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) },
- { "QSPI_INT#", RZG2L_SINGLE_PIN_PACK(0xc, 2, (PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) },
- { "WDTOVF_PERROUT#", RZG2L_SINGLE_PIN_PACK(0xd, 0, (PIN_CFG_IOLH_A | PIN_CFG_SR)) },
- { "RIIC0_SDA", RZG2L_SINGLE_PIN_PACK(0xe, 0, PIN_CFG_IEN) },
- { "RIIC0_SCL", RZG2L_SINGLE_PIN_PACK(0xe, 1, PIN_CFG_IEN) },
- { "RIIC1_SDA", RZG2L_SINGLE_PIN_PACK(0xe, 2, PIN_CFG_IEN) },
- { "RIIC1_SCL", RZG2L_SINGLE_PIN_PACK(0xe, 3, PIN_CFG_IEN) },
+static const u32 r9a07g043_gpio_configs[] = {
+ RZG2L_GPIO_PORT_PACK(4, 0x10, RZG2L_MPXED_PIN_FUNCS),
+ RZG2L_GPIO_PORT_PACK(5, 0x11, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH0)),
+ RZG2L_GPIO_PORT_PACK(4, 0x12, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH0)),
+ RZG2L_GPIO_PORT_PACK(4, 0x13, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH0)),
+ RZG2L_GPIO_PORT_PACK(6, 0x14, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH0)),
+ RZG2L_GPIO_PORT_PACK(5, 0x15, RZG2L_MPXED_PIN_FUNCS),
+ RZG2L_GPIO_PORT_PACK(5, 0x16, RZG2L_MPXED_PIN_FUNCS),
+ RZG2L_GPIO_PORT_PACK(5, 0x17, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH1)),
+ RZG2L_GPIO_PORT_PACK(5, 0x18, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH1)),
+ RZG2L_GPIO_PORT_PACK(4, 0x19, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH1)),
+ RZG2L_GPIO_PORT_PACK(5, 0x1a, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH1)),
+ RZG2L_GPIO_PORT_PACK(4, 0x1b, RZG2L_MPXED_PIN_FUNCS),
+ RZG2L_GPIO_PORT_PACK(2, 0x1c, RZG2L_MPXED_PIN_FUNCS),
+ RZG2L_GPIO_PORT_PACK(5, 0x1d, RZG2L_MPXED_PIN_FUNCS),
+ RZG2L_GPIO_PORT_PACK(3, 0x1e, RZG2L_MPXED_PIN_FUNCS),
+ RZG2L_GPIO_PORT_PACK(4, 0x1f, RZG2L_MPXED_PIN_FUNCS),
+ RZG2L_GPIO_PORT_PACK(2, 0x20, RZG2L_MPXED_PIN_FUNCS),
+ RZG2L_GPIO_PORT_PACK(4, 0x21, RZG2L_MPXED_PIN_FUNCS),
+ RZG2L_GPIO_PORT_PACK(6, 0x22, RZG2L_MPXED_PIN_FUNCS),
+};
+
+static struct {
+ struct rzg2l_dedicated_configs common[35];
+ struct rzg2l_dedicated_configs rzg2l_pins[7];
+} rzg2l_dedicated_pins = {
+ .common = {
+ { "NMI", RZG2L_SINGLE_PIN_PACK(0x1, 0,
+ (PIN_CFG_FILONOFF | PIN_CFG_FILNUM | PIN_CFG_FILCLKSEL)) },
+ { "TMS/SWDIO", RZG2L_SINGLE_PIN_PACK(0x2, 0,
+ (PIN_CFG_IOLH_A | PIN_CFG_SR | PIN_CFG_IEN)) },
+ { "TDO", RZG2L_SINGLE_PIN_PACK(0x3, 0,
+ (PIN_CFG_IOLH_A | PIN_CFG_SR | PIN_CFG_IEN)) },
+ { "AUDIO_CLK1", RZG2L_SINGLE_PIN_PACK(0x4, 0, PIN_CFG_IEN) },
+ { "AUDIO_CLK2", RZG2L_SINGLE_PIN_PACK(0x4, 1, PIN_CFG_IEN) },
+ { "SD0_CLK", RZG2L_SINGLE_PIN_PACK(0x6, 0,
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_SD0)) },
+ { "SD0_CMD", RZG2L_SINGLE_PIN_PACK(0x6, 1,
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD0)) },
+ { "SD0_RST#", RZG2L_SINGLE_PIN_PACK(0x6, 2,
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_SD0)) },
+ { "SD0_DATA0", RZG2L_SINGLE_PIN_PACK(0x7, 0,
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD0)) },
+ { "SD0_DATA1", RZG2L_SINGLE_PIN_PACK(0x7, 1,
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD0)) },
+ { "SD0_DATA2", RZG2L_SINGLE_PIN_PACK(0x7, 2,
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD0)) },
+ { "SD0_DATA3", RZG2L_SINGLE_PIN_PACK(0x7, 3,
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD0)) },
+ { "SD0_DATA4", RZG2L_SINGLE_PIN_PACK(0x7, 4,
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD0)) },
+ { "SD0_DATA5", RZG2L_SINGLE_PIN_PACK(0x7, 5,
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD0)) },
+ { "SD0_DATA6", RZG2L_SINGLE_PIN_PACK(0x7, 6,
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD0)) },
+ { "SD0_DATA7", RZG2L_SINGLE_PIN_PACK(0x7, 7,
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD0)) },
+ { "SD1_CLK", RZG2L_SINGLE_PIN_PACK(0x8, 0,
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_SD1)) },
+ { "SD1_CMD", RZG2L_SINGLE_PIN_PACK(0x8, 1,
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD1)) },
+ { "SD1_DATA0", RZG2L_SINGLE_PIN_PACK(0x9, 0,
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD1)) },
+ { "SD1_DATA1", RZG2L_SINGLE_PIN_PACK(0x9, 1,
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD1)) },
+ { "SD1_DATA2", RZG2L_SINGLE_PIN_PACK(0x9, 2,
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD1)) },
+ { "SD1_DATA3", RZG2L_SINGLE_PIN_PACK(0x9, 3,
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD1)) },
+ { "QSPI0_SPCLK", RZG2L_SINGLE_PIN_PACK(0xa, 0,
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) },
+ { "QSPI0_IO0", RZG2L_SINGLE_PIN_PACK(0xa, 1,
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) },
+ { "QSPI0_IO1", RZG2L_SINGLE_PIN_PACK(0xa, 2,
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) },
+ { "QSPI0_IO2", RZG2L_SINGLE_PIN_PACK(0xa, 3,
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) },
+ { "QSPI0_IO3", RZG2L_SINGLE_PIN_PACK(0xa, 4,
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) },
+ { "QSPI0_SSL", RZG2L_SINGLE_PIN_PACK(0xa, 5,
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) },
+ { "QSPI_RESET#", RZG2L_SINGLE_PIN_PACK(0xc, 0,
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) },
+ { "QSPI_WP#", RZG2L_SINGLE_PIN_PACK(0xc, 1,
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) },
+ { "WDTOVF_PERROUT#", RZG2L_SINGLE_PIN_PACK(0xd, 0, (PIN_CFG_IOLH_A | PIN_CFG_SR)) },
+ { "RIIC0_SDA", RZG2L_SINGLE_PIN_PACK(0xe, 0, PIN_CFG_IEN) },
+ { "RIIC0_SCL", RZG2L_SINGLE_PIN_PACK(0xe, 1, PIN_CFG_IEN) },
+ { "RIIC1_SDA", RZG2L_SINGLE_PIN_PACK(0xe, 2, PIN_CFG_IEN) },
+ { "RIIC1_SCL", RZG2L_SINGLE_PIN_PACK(0xe, 3, PIN_CFG_IEN) },
+ },
+ .rzg2l_pins = {
+ { "QSPI_INT#", RZG2L_SINGLE_PIN_PACK(0xc, 2, (PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) },
+ { "QSPI1_SPCLK", RZG2L_SINGLE_PIN_PACK(0xb, 0,
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) },
+ { "QSPI1_IO0", RZG2L_SINGLE_PIN_PACK(0xb, 1,
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) },
+ { "QSPI1_IO1", RZG2L_SINGLE_PIN_PACK(0xb, 2,
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) },
+ { "QSPI1_IO2", RZG2L_SINGLE_PIN_PACK(0xb, 3,
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) },
+ { "QSPI1_IO3", RZG2L_SINGLE_PIN_PACK(0xb, 4,
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) },
+ { "QSPI1_SSL", RZG2L_SINGLE_PIN_PACK(0xb, 5,
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) },
+ }
};
static int rzg2l_gpio_register(struct rzg2l_pinctrl *pctrl)
@@ -1250,16 +1279,29 @@ static int rzg2l_pinctrl_probe(struct platform_device *pdev)
return 0;
}
+static struct rzg2l_pinctrl_data r9a07g043_data = {
+ .port_pins = rzg2l_gpio_names,
+ .port_pin_configs = r9a07g043_gpio_configs,
+ .dedicated_pins = rzg2l_dedicated_pins.common,
+ .n_port_pins = ARRAY_SIZE(r9a07g043_gpio_configs) * RZG2L_PINS_PER_PORT,
+ .n_dedicated_pins = ARRAY_SIZE(rzg2l_dedicated_pins.common),
+};
+
static struct rzg2l_pinctrl_data r9a07g044_data = {
.port_pins = rzg2l_gpio_names,
.port_pin_configs = rzg2l_gpio_configs,
- .dedicated_pins = rzg2l_dedicated_pins,
+ .dedicated_pins = rzg2l_dedicated_pins.common,
.n_port_pins = ARRAY_SIZE(rzg2l_gpio_names),
- .n_dedicated_pins = ARRAY_SIZE(rzg2l_dedicated_pins),
+ .n_dedicated_pins = ARRAY_SIZE(rzg2l_dedicated_pins.common) +
+ ARRAY_SIZE(rzg2l_dedicated_pins.rzg2l_pins),
};
static const struct of_device_id rzg2l_pinctrl_of_table[] = {
{
+ .compatible = "renesas,r9a07g043-pinctrl",
+ .data = &r9a07g043_data,
+ },
+ {
.compatible = "renesas,r9a07g044-pinctrl",
.data = &r9a07g044_data,
},
diff --git a/drivers/pinctrl/renesas/pinctrl-rzn1.c b/drivers/pinctrl/renesas/pinctrl-rzn1.c
index ef5fb25b6016..849d091205d4 100644
--- a/drivers/pinctrl/renesas/pinctrl-rzn1.c
+++ b/drivers/pinctrl/renesas/pinctrl-rzn1.c
@@ -865,17 +865,15 @@ static int rzn1_pinctrl_probe(struct platform_device *pdev)
ipctl->mdio_func[0] = -1;
ipctl->mdio_func[1] = -1;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ipctl->lev1_protect_phys = (u32)res->start + 0x400;
- ipctl->lev1 = devm_ioremap_resource(&pdev->dev, res);
+ ipctl->lev1 = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(ipctl->lev1))
return PTR_ERR(ipctl->lev1);
+ ipctl->lev1_protect_phys = (u32)res->start + 0x400;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- ipctl->lev2_protect_phys = (u32)res->start + 0x400;
- ipctl->lev2 = devm_ioremap_resource(&pdev->dev, res);
+ ipctl->lev2 = devm_platform_get_and_ioremap_resource(pdev, 1, &res);
if (IS_ERR(ipctl->lev2))
return PTR_ERR(ipctl->lev2);
+ ipctl->lev2_protect_phys = (u32)res->start + 0x400;
ipctl->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(ipctl->clk))
diff --git a/drivers/pinctrl/renesas/pinctrl.c b/drivers/pinctrl/renesas/pinctrl.c
index 4c37aebc75b8..b438d24c13b5 100644
--- a/drivers/pinctrl/renesas/pinctrl.c
+++ b/drivers/pinctrl/renesas/pinctrl.c
@@ -9,7 +9,6 @@
#include <linux/device.h>
#include <linux/err.h>
-#include <linux/init.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
diff --git a/drivers/pinctrl/renesas/sh_pfc.h b/drivers/pinctrl/renesas/sh_pfc.h
index 6b5836ea47de..12bc279f5733 100644
--- a/drivers/pinctrl/renesas/sh_pfc.h
+++ b/drivers/pinctrl/renesas/sh_pfc.h
@@ -112,7 +112,7 @@ struct pinmux_cfg_reg {
#define SET_NR_ENUM_IDS(n)
#endif
const u16 *enum_ids;
- const u8 *var_field_width;
+ const s8 *var_field_width;
};
#define GROUP(...) __VA_ARGS__
@@ -132,9 +132,8 @@ struct pinmux_cfg_reg {
.reg = r, .reg_width = r_width, \
.field_width = f_width + BUILD_BUG_ON_ZERO(r_width % f_width) + \
BUILD_BUG_ON_ZERO(sizeof((const u16 []) { ids }) / sizeof(u16) != \
- (r_width / f_width) * (1 << f_width)), \
- .enum_ids = (const u16 [(r_width / f_width) * (1 << f_width)]) \
- { ids }
+ (r_width / f_width) << f_width), \
+ .enum_ids = (const u16 [(r_width / f_width) << f_width]) { ids }
/*
* Describe a config register consisting of several fields of different widths
@@ -143,14 +142,15 @@ struct pinmux_cfg_reg {
* - r_width: Width of the register (in bits)
* - f_widths: List of widths of the register fields (in bits), from left
* to right (i.e. MSB to LSB), wrapped using the GROUP() macro.
- * - ids: For each register field (from left to right, i.e. MSB to LSB),
- * 2^f_widths[i] enum IDs must be specified, one for each possible
- * combination of the register field bit values, all wrapped using
- * the GROUP() macro.
+ * Reserved fields are indicated by negating the field width.
+ * - ids: For each non-reserved register field (from left to right, i.e. MSB
+ * to LSB), 2^f_widths[i] enum IDs must be specified, one for each
+ * possible combination of the register field bit values, all wrapped
+ * using the GROUP() macro.
*/
#define PINMUX_CFG_REG_VAR(name, r, r_width, f_widths, ids) \
.reg = r, .reg_width = r_width, \
- .var_field_width = (const u8 []) { f_widths, 0 }, \
+ .var_field_width = (const s8 []) { f_widths, 0 }, \
SET_NR_ENUM_IDS(sizeof((const u16 []) { ids }) / sizeof(u16)) \
.enum_ids = (const u16 []) { ids }
@@ -162,7 +162,7 @@ struct pinmux_drive_reg_field {
struct pinmux_drive_reg {
u32 reg;
- const struct pinmux_drive_reg_field fields[8];
+ const struct pinmux_drive_reg_field fields[10];
};
#define PINMUX_DRIVE_REG(name, r) \
@@ -739,14 +739,12 @@ extern const struct sh_pfc_soc_info shx3_pinmux_info;
* PORTnCR helper macro for SH-Mobile/R-Mobile
*/
#define PORTCR(nr, reg) { \
- PINMUX_CFG_REG_VAR("PORT" nr "CR", reg, 8, GROUP(2, 2, 1, 3), \
+ PINMUX_CFG_REG_VAR("PORT" nr "CR", reg, 8, GROUP(-2, 2, -1, 3), \
GROUP( \
/* PULMD[1:0], handled by .set_bias() */ \
- 0, 0, 0, 0, \
/* IE and OE */ \
0, PORT##nr##_OUT, PORT##nr##_IN, 0, \
/* SEC, not supported */ \
- 0, 0, \
/* PTMD[2:0] */ \
PORT##nr##_FN0, PORT##nr##_FN1, \
PORT##nr##_FN2, PORT##nr##_FN3, \
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
index 7aecd0efde07..57a33fb0f2d7 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
@@ -44,6 +44,7 @@
#define STM32_GPIO_LCKR 0x1c
#define STM32_GPIO_AFRL 0x20
#define STM32_GPIO_AFRH 0x24
+#define STM32_GPIO_SECCFGR 0x30
/* custom bitfield to backup pin status */
#define STM32_GPIO_BKP_MODE_SHIFT 0
@@ -95,6 +96,7 @@ struct stm32_gpio_bank {
u32 bank_ioport_nr;
u32 pin_backup[STM32_GPIO_PINS_PER_BANK];
u8 irq_type[STM32_GPIO_PINS_PER_BANK];
+ bool secure_control;
};
struct stm32_pinctrl {
@@ -198,11 +200,7 @@ static inline void __stm32_gpio_set(struct stm32_gpio_bank *bank,
if (!value)
offset += STM32_GPIO_PINS_PER_BANK;
- clk_enable(bank->clk);
-
writel_relaxed(BIT(offset), bank->base + STM32_GPIO_BSRR);
-
- clk_disable(bank->clk);
}
static int stm32_gpio_request(struct gpio_chip *chip, unsigned offset)
@@ -226,25 +224,11 @@ static void stm32_gpio_free(struct gpio_chip *chip, unsigned offset)
pinctrl_gpio_free(chip->base + offset);
}
-static int stm32_gpio_get_noclk(struct gpio_chip *chip, unsigned int offset)
-{
- struct stm32_gpio_bank *bank = gpiochip_get_data(chip);
-
- return !!(readl_relaxed(bank->base + STM32_GPIO_IDR) & BIT(offset));
-}
-
static int stm32_gpio_get(struct gpio_chip *chip, unsigned offset)
{
struct stm32_gpio_bank *bank = gpiochip_get_data(chip);
- int ret;
-
- clk_enable(bank->clk);
-
- ret = stm32_gpio_get_noclk(chip, offset);
- clk_disable(bank->clk);
-
- return ret;
+ return !!(readl_relaxed(bank->base + STM32_GPIO_IDR) & BIT(offset));
}
static void stm32_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
@@ -302,6 +286,33 @@ static int stm32_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
return ret;
}
+static int stm32_gpio_init_valid_mask(struct gpio_chip *chip,
+ unsigned long *valid_mask,
+ unsigned int ngpios)
+{
+ struct stm32_gpio_bank *bank = gpiochip_get_data(chip);
+ struct stm32_pinctrl *pctl = dev_get_drvdata(bank->gpio_chip.parent);
+ unsigned int i;
+ u32 sec;
+
+ /* All gpio are valid per default */
+ bitmap_fill(valid_mask, ngpios);
+
+ if (bank->secure_control) {
+ /* Tag secured pins as invalid */
+ sec = readl_relaxed(bank->base + STM32_GPIO_SECCFGR);
+
+ for (i = 0; i < ngpios; i++) {
+ if (sec & BIT(i)) {
+ clear_bit(i, valid_mask);
+ dev_dbg(pctl->dev, "No access to gpio %d - %d\n", bank->bank_nr, i);
+ }
+ }
+ }
+
+ return 0;
+}
+
static const struct gpio_chip stm32_gpio_template = {
.request = stm32_gpio_request,
.free = stm32_gpio_free,
@@ -312,6 +323,7 @@ static const struct gpio_chip stm32_gpio_template = {
.to_irq = stm32_gpio_to_irq,
.get_direction = stm32_gpio_get_direction,
.set_config = gpiochip_generic_config,
+ .init_valid_mask = stm32_gpio_init_valid_mask,
};
static void stm32_gpio_irq_trigger(struct irq_data *d)
@@ -324,7 +336,7 @@ static void stm32_gpio_irq_trigger(struct irq_data *d)
return;
/* If level interrupt type then retrig */
- level = stm32_gpio_get_noclk(&bank->gpio_chip, d->hwirq);
+ level = stm32_gpio_get(&bank->gpio_chip, d->hwirq);
if ((level == 0 && bank->irq_type[d->hwirq] == IRQ_TYPE_LEVEL_LOW) ||
(level == 1 && bank->irq_type[d->hwirq] == IRQ_TYPE_LEVEL_HIGH))
irq_chip_retrigger_hierarchy(d);
@@ -366,7 +378,6 @@ static int stm32_gpio_irq_request_resources(struct irq_data *irq_data)
{
struct stm32_gpio_bank *bank = irq_data->domain->host_data;
struct stm32_pinctrl *pctl = dev_get_drvdata(bank->gpio_chip.parent);
- unsigned long flags;
int ret;
ret = stm32_gpio_direction_input(&bank->gpio_chip, irq_data->hwirq);
@@ -380,10 +391,6 @@ static int stm32_gpio_irq_request_resources(struct irq_data *irq_data)
return ret;
}
- flags = irqd_get_trigger_type(irq_data);
- if (flags & IRQ_TYPE_LEVEL_MASK)
- clk_enable(bank->clk);
-
return 0;
}
@@ -391,9 +398,6 @@ static void stm32_gpio_irq_release_resources(struct irq_data *irq_data)
{
struct stm32_gpio_bank *bank = irq_data->domain->host_data;
- if (bank->irq_type[irq_data->hwirq] & IRQ_TYPE_LEVEL_MASK)
- clk_disable(bank->clk);
-
gpiochip_unlock_as_irq(&bank->gpio_chip, irq_data->hwirq);
}
@@ -534,7 +538,7 @@ stm32_pctrl_find_group_by_pin(struct stm32_pinctrl *pctl, u32 pin)
static bool stm32_pctrl_is_function_valid(struct stm32_pinctrl *pctl,
u32 pin_num, u32 fnum)
{
- int i;
+ int i, k;
for (i = 0; i < pctl->npins; i++) {
const struct stm32_desc_pin *pin = pctl->pins + i;
@@ -543,7 +547,7 @@ static bool stm32_pctrl_is_function_valid(struct stm32_pinctrl *pctl,
if (pin->pin.number != pin_num)
continue;
- while (func && func->name) {
+ for (k = 0; k < STM32_CONFIG_NUM; k++) {
if (func->num == fnum)
return true;
func++;
@@ -770,7 +774,6 @@ static int stm32_pmx_set_mode(struct stm32_gpio_bank *bank,
unsigned long flags;
int err = 0;
- clk_enable(bank->clk);
spin_lock_irqsave(&bank->lock, flags);
if (pctl->hwlock) {
@@ -799,7 +802,6 @@ static int stm32_pmx_set_mode(struct stm32_gpio_bank *bank,
unlock:
spin_unlock_irqrestore(&bank->lock, flags);
- clk_disable(bank->clk);
return err;
}
@@ -812,7 +814,6 @@ void stm32_pmx_get_mode(struct stm32_gpio_bank *bank, int pin, u32 *mode,
int alt_offset = STM32_GPIO_AFRL + (pin / 8) * 4;
unsigned long flags;
- clk_enable(bank->clk);
spin_lock_irqsave(&bank->lock, flags);
val = readl_relaxed(bank->base + alt_offset);
@@ -824,7 +825,6 @@ void stm32_pmx_get_mode(struct stm32_gpio_bank *bank, int pin, u32 *mode,
*mode = val >> (pin * 2);
spin_unlock_irqrestore(&bank->lock, flags);
- clk_disable(bank->clk);
}
static int stm32_pmx_set_mux(struct pinctrl_dev *pctldev,
@@ -868,12 +868,32 @@ static int stm32_pmx_gpio_set_direction(struct pinctrl_dev *pctldev,
return stm32_pmx_set_mode(bank, pin, !input, 0);
}
+static int stm32_pmx_request(struct pinctrl_dev *pctldev, unsigned int gpio)
+{
+ struct stm32_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+ struct pinctrl_gpio_range *range;
+
+ range = pinctrl_find_gpio_range_from_pin_nolock(pctldev, gpio);
+ if (!range) {
+ dev_err(pctl->dev, "No gpio range defined.\n");
+ return -EINVAL;
+ }
+
+ if (!gpiochip_line_is_valid(range->gc, stm32_gpio_pin(gpio))) {
+ dev_warn(pctl->dev, "Can't access gpio %d\n", gpio);
+ return -EACCES;
+ }
+
+ return 0;
+}
+
static const struct pinmux_ops stm32_pmx_ops = {
.get_functions_count = stm32_pmx_get_funcs_cnt,
.get_function_name = stm32_pmx_get_func_name,
.get_function_groups = stm32_pmx_get_func_groups,
.set_mux = stm32_pmx_set_mux,
.gpio_set_direction = stm32_pmx_gpio_set_direction,
+ .request = stm32_pmx_request,
.strict = true,
};
@@ -887,7 +907,6 @@ static int stm32_pconf_set_driving(struct stm32_gpio_bank *bank,
u32 val;
int err = 0;
- clk_enable(bank->clk);
spin_lock_irqsave(&bank->lock, flags);
if (pctl->hwlock) {
@@ -911,7 +930,6 @@ static int stm32_pconf_set_driving(struct stm32_gpio_bank *bank,
unlock:
spin_unlock_irqrestore(&bank->lock, flags);
- clk_disable(bank->clk);
return err;
}
@@ -922,14 +940,12 @@ static u32 stm32_pconf_get_driving(struct stm32_gpio_bank *bank,
unsigned long flags;
u32 val;
- clk_enable(bank->clk);
spin_lock_irqsave(&bank->lock, flags);
val = readl_relaxed(bank->base + STM32_GPIO_TYPER);
val &= BIT(offset);
spin_unlock_irqrestore(&bank->lock, flags);
- clk_disable(bank->clk);
return (val >> offset);
}
@@ -942,7 +958,6 @@ static int stm32_pconf_set_speed(struct stm32_gpio_bank *bank,
u32 val;
int err = 0;
- clk_enable(bank->clk);
spin_lock_irqsave(&bank->lock, flags);
if (pctl->hwlock) {
@@ -966,7 +981,6 @@ static int stm32_pconf_set_speed(struct stm32_gpio_bank *bank,
unlock:
spin_unlock_irqrestore(&bank->lock, flags);
- clk_disable(bank->clk);
return err;
}
@@ -977,14 +991,12 @@ static u32 stm32_pconf_get_speed(struct stm32_gpio_bank *bank,
unsigned long flags;
u32 val;
- clk_enable(bank->clk);
spin_lock_irqsave(&bank->lock, flags);
val = readl_relaxed(bank->base + STM32_GPIO_SPEEDR);
val &= GENMASK(offset * 2 + 1, offset * 2);
spin_unlock_irqrestore(&bank->lock, flags);
- clk_disable(bank->clk);
return (val >> (offset * 2));
}
@@ -997,7 +1009,6 @@ static int stm32_pconf_set_bias(struct stm32_gpio_bank *bank,
u32 val;
int err = 0;
- clk_enable(bank->clk);
spin_lock_irqsave(&bank->lock, flags);
if (pctl->hwlock) {
@@ -1021,7 +1032,6 @@ static int stm32_pconf_set_bias(struct stm32_gpio_bank *bank,
unlock:
spin_unlock_irqrestore(&bank->lock, flags);
- clk_disable(bank->clk);
return err;
}
@@ -1032,14 +1042,12 @@ static u32 stm32_pconf_get_bias(struct stm32_gpio_bank *bank,
unsigned long flags;
u32 val;
- clk_enable(bank->clk);
spin_lock_irqsave(&bank->lock, flags);
val = readl_relaxed(bank->base + STM32_GPIO_PUPDR);
val &= GENMASK(offset * 2 + 1, offset * 2);
spin_unlock_irqrestore(&bank->lock, flags);
- clk_disable(bank->clk);
return (val >> (offset * 2));
}
@@ -1050,7 +1058,6 @@ static bool stm32_pconf_get(struct stm32_gpio_bank *bank,
unsigned long flags;
u32 val;
- clk_enable(bank->clk);
spin_lock_irqsave(&bank->lock, flags);
if (dir)
@@ -1061,7 +1068,6 @@ static bool stm32_pconf_get(struct stm32_gpio_bank *bank,
BIT(offset));
spin_unlock_irqrestore(&bank->lock, flags);
- clk_disable(bank->clk);
return val;
}
@@ -1084,6 +1090,11 @@ static int stm32_pconf_parse_conf(struct pinctrl_dev *pctldev,
bank = gpiochip_get_data(range->gc);
offset = stm32_gpio_pin(pin);
+ if (!gpiochip_line_is_valid(range->gc, offset)) {
+ dev_warn(pctl->dev, "Can't access gpio %d\n", pin);
+ return -EACCES;
+ }
+
switch (param) {
case PIN_CONFIG_DRIVE_PUSH_PULL:
ret = stm32_pconf_set_driving(bank, offset, 0);
@@ -1163,10 +1174,27 @@ static int stm32_pconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
return 0;
}
+static struct stm32_desc_pin *
+stm32_pconf_get_pin_desc_by_pin_number(struct stm32_pinctrl *pctl,
+ unsigned int pin_number)
+{
+ struct stm32_desc_pin *pins = pctl->pins;
+ int i;
+
+ for (i = 0; i < pctl->npins; i++) {
+ if (pins->pin.number == pin_number)
+ return pins;
+ pins++;
+ }
+ return NULL;
+}
+
static void stm32_pconf_dbg_show(struct pinctrl_dev *pctldev,
struct seq_file *s,
unsigned int pin)
{
+ struct stm32_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+ const struct stm32_desc_pin *pin_desc;
struct pinctrl_gpio_range *range;
struct stm32_gpio_bank *bank;
int offset;
@@ -1186,6 +1214,11 @@ static void stm32_pconf_dbg_show(struct pinctrl_dev *pctldev,
bank = gpiochip_get_data(range->gc);
offset = stm32_gpio_pin(pin);
+ if (!gpiochip_line_is_valid(range->gc, offset)) {
+ seq_puts(s, "NO ACCESS");
+ return;
+ }
+
stm32_pmx_get_mode(bank, offset, &mode, &alt);
bias = stm32_pconf_get_bias(bank, offset);
@@ -1216,7 +1249,12 @@ static void stm32_pconf_dbg_show(struct pinctrl_dev *pctldev,
case 2:
drive = stm32_pconf_get_driving(bank, offset);
speed = stm32_pconf_get_speed(bank, offset);
- seq_printf(s, "%d - %s - %s - %s %s", alt,
+ pin_desc = stm32_pconf_get_pin_desc_by_pin_number(pctl, pin);
+ if (!pin_desc)
+ return;
+
+ seq_printf(s, "%d (%s) - %s - %s - %s %s", alt,
+ pin_desc->functions[alt + 1].name,
drive ? "open drain" : "push pull",
biasing[bias],
speeds[speed], "speed");
@@ -1256,9 +1294,9 @@ static int stm32_gpiolib_register_bank(struct stm32_pinctrl *pctl, struct fwnode
if (IS_ERR(bank->base))
return PTR_ERR(bank->base);
- err = clk_prepare(bank->clk);
+ err = clk_prepare_enable(bank->clk);
if (err) {
- dev_err(dev, "failed to prepare clk (%d)\n", err);
+ dev_err(dev, "failed to prepare_enable clk (%d)\n", err);
return err;
}
@@ -1297,6 +1335,7 @@ static int stm32_gpiolib_register_bank(struct stm32_pinctrl *pctl, struct fwnode
bank->gpio_chip.parent = dev;
bank->bank_nr = bank_nr;
bank->bank_ioport_nr = bank_ioport_nr;
+ bank->secure_control = pctl->match_data->secure_control;
spin_lock_init(&bank->lock);
/* create irq hierarchical domain */
@@ -1306,21 +1345,28 @@ static int stm32_gpiolib_register_bank(struct stm32_pinctrl *pctl, struct fwnode
STM32_GPIO_IRQ_LINE, bank->fwnode,
&stm32_gpio_domain_ops, bank);
- if (!bank->domain)
- return -ENODEV;
+ if (!bank->domain) {
+ err = -ENODEV;
+ goto err_clk;
+ }
err = gpiochip_add_data(&bank->gpio_chip, bank);
if (err) {
dev_err(dev, "Failed to add gpiochip(%d)!\n", bank_nr);
- return err;
+ goto err_clk;
}
dev_info(dev, "%s bank added\n", bank->gpio_chip.label);
return 0;
+
+err_clk:
+ clk_disable_unprepare(bank->clk);
+ return err;
}
-static struct irq_domain *stm32_pctrl_get_irq_domain(struct device_node *np)
+static struct irq_domain *stm32_pctrl_get_irq_domain(struct platform_device *pdev)
{
+ struct device_node *np = pdev->dev.of_node;
struct device_node *parent;
struct irq_domain *domain;
@@ -1424,7 +1470,8 @@ static int stm32_pctrl_create_pins_tab(struct stm32_pinctrl *pctl,
if (pctl->pkg && !(pctl->pkg & p->pkg))
continue;
pins->pin = p->pin;
- pins->functions = p->functions;
+ memcpy((struct stm32_desc_pin *)pins->functions, p->functions,
+ STM32_CONFIG_NUM * sizeof(struct stm32_desc_function));
pins++;
nb_pins_available++;
}
@@ -1436,23 +1483,19 @@ static int stm32_pctrl_create_pins_tab(struct stm32_pinctrl *pctl,
int stm32_pctl_probe(struct platform_device *pdev)
{
- struct device_node *np = pdev->dev.of_node;
+ const struct stm32_pinctrl_match_data *match_data;
struct fwnode_handle *child;
- const struct of_device_id *match;
struct device *dev = &pdev->dev;
struct stm32_pinctrl *pctl;
struct pinctrl_pin_desc *pins;
int i, ret, hwlock_id;
unsigned int banks;
- if (!np)
+ match_data = device_get_match_data(dev);
+ if (!match_data)
return -EINVAL;
- match = of_match_device(dev->driver->of_match_table, dev);
- if (!match || !match->data)
- return -EINVAL;
-
- if (!of_find_property(np, "pins-are-numbered", NULL)) {
+ if (!device_property_present(dev, "pins-are-numbered")) {
dev_err(dev, "only support pins-are-numbered format\n");
return -EINVAL;
}
@@ -1464,7 +1507,7 @@ int stm32_pctl_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pctl);
/* check for IRQ controller (may require deferred probe) */
- pctl->domain = stm32_pctrl_get_irq_domain(np);
+ pctl->domain = stm32_pctrl_get_irq_domain(pdev);
if (IS_ERR(pctl->domain))
return PTR_ERR(pctl->domain);
@@ -1480,10 +1523,10 @@ int stm32_pctl_probe(struct platform_device *pdev)
spin_lock_init(&pctl->irqmux_lock);
pctl->dev = dev;
- pctl->match_data = match->data;
+ pctl->match_data = match_data;
/* get optional package information */
- if (!of_property_read_u32(np, "st,package", &pctl->pkg))
+ if (!device_property_read_u32(dev, "st,package", &pctl->pkg))
dev_dbg(pctl->dev, "package detected: %x\n", pctl->pkg);
pctl->pins = devm_kcalloc(pctl->dev, pctl->match_data->npins,
@@ -1568,6 +1611,10 @@ int stm32_pctl_probe(struct platform_device *pdev)
ret = stm32_gpiolib_register_bank(pctl, child);
if (ret) {
fwnode_handle_put(child);
+
+ for (i = 0; i < pctl->nbanks; i++)
+ clk_disable_unprepare(pctl->banks[i].clk);
+
return ret;
}
@@ -1593,6 +1640,9 @@ static int __maybe_unused stm32_pinctrl_restore_gpio_regs(
if (!range)
return 0;
+ if (!gpiochip_line_is_valid(range->gc, offset))
+ return 0;
+
pin_is_irq = gpiochip_line_is_irq(range->gc, offset);
if (!desc || (!pin_is_irq && !desc->gpio_owner))
@@ -1639,12 +1689,26 @@ static int __maybe_unused stm32_pinctrl_restore_gpio_regs(
return 0;
}
+int __maybe_unused stm32_pinctrl_suspend(struct device *dev)
+{
+ struct stm32_pinctrl *pctl = dev_get_drvdata(dev);
+ int i;
+
+ for (i = 0; i < pctl->nbanks; i++)
+ clk_disable(pctl->banks[i].clk);
+
+ return 0;
+}
+
int __maybe_unused stm32_pinctrl_resume(struct device *dev)
{
struct stm32_pinctrl *pctl = dev_get_drvdata(dev);
struct stm32_pinctrl_group *g = pctl->groups;
int i;
+ for (i = 0; i < pctl->nbanks; i++)
+ clk_enable(pctl->banks[i].clk);
+
for (i = 0; i < pctl->ngroups; i++, g++)
stm32_pinctrl_restore_gpio_regs(pctl, g->pin);
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.h b/drivers/pinctrl/stm32/pinctrl-stm32.h
index b0882d120765..e0c31c4c8bca 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32.h
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.h
@@ -17,6 +17,7 @@
#define STM32_PIN_GPIO 0
#define STM32_PIN_AF(x) ((x) + 1)
#define STM32_PIN_ANALOG (STM32_PIN_AF(15) + 1)
+#define STM32_CONFIG_NUM (STM32_PIN_ANALOG + 1)
/* package information */
#define STM32MP_PKG_AA BIT(0)
@@ -31,26 +32,26 @@ struct stm32_desc_function {
struct stm32_desc_pin {
struct pinctrl_pin_desc pin;
- const struct stm32_desc_function *functions;
+ const struct stm32_desc_function functions[STM32_CONFIG_NUM];
const unsigned int pkg;
};
#define STM32_PIN(_pin, ...) \
{ \
.pin = _pin, \
- .functions = (struct stm32_desc_function[]){ \
- __VA_ARGS__, { } }, \
+ .functions = { \
+ __VA_ARGS__}, \
}
#define STM32_PIN_PKG(_pin, _pkg, ...) \
{ \
.pin = _pin, \
.pkg = _pkg, \
- .functions = (struct stm32_desc_function[]){ \
- __VA_ARGS__, { } }, \
+ .functions = { \
+ __VA_ARGS__}, \
}
#define STM32_FUNCTION(_num, _name) \
- { \
+ [_num] = { \
.num = _num, \
.name = _name, \
}
@@ -58,6 +59,7 @@ struct stm32_desc_pin {
struct stm32_pinctrl_match_data {
const struct stm32_desc_pin *pins;
const unsigned int npins;
+ bool secure_control;
};
struct stm32_gpio_bank;
@@ -65,6 +67,7 @@ struct stm32_gpio_bank;
int stm32_pctl_probe(struct platform_device *pdev);
void stm32_pmx_get_mode(struct stm32_gpio_bank *bank,
int pin, u32 *mode, u32 *alt);
+int stm32_pinctrl_suspend(struct device *dev);
int stm32_pinctrl_resume(struct device *dev);
#endif /* __PINCTRL_STM32_H */
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32mp135.c b/drivers/pinctrl/stm32/pinctrl-stm32mp135.c
index 4ab03520c407..fde1df191c24 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32mp135.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32mp135.c
@@ -1649,6 +1649,7 @@ static const struct stm32_desc_pin stm32mp135_pins[] = {
static struct stm32_pinctrl_match_data stm32mp135_match_data = {
.pins = stm32mp135_pins,
.npins = ARRAY_SIZE(stm32mp135_pins),
+ .secure_control = true,
};
static const struct of_device_id stm32mp135_pctrl_match[] = {
@@ -1660,7 +1661,7 @@ static const struct of_device_id stm32mp135_pctrl_match[] = {
};
static const struct dev_pm_ops stm32_pinctrl_dev_pm_ops = {
- SET_LATE_SYSTEM_SLEEP_PM_OPS(NULL, stm32_pinctrl_resume)
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(stm32_pinctrl_suspend, stm32_pinctrl_resume)
};
static struct platform_driver stm32mp135_pinctrl_driver = {
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32mp157.c b/drivers/pinctrl/stm32/pinctrl-stm32mp157.c
index 2ccb99d64df8..91b2fc8ddbdb 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32mp157.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32mp157.c
@@ -2343,7 +2343,7 @@ static const struct of_device_id stm32mp157_pctrl_match[] = {
};
static const struct dev_pm_ops stm32_pinctrl_dev_pm_ops = {
- SET_LATE_SYSTEM_SLEEP_PM_OPS(NULL, stm32_pinctrl_resume)
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(stm32_pinctrl_suspend, stm32_pinctrl_resume)
};
static struct platform_driver stm32mp157_pinctrl_driver = {
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra194.c b/drivers/pinctrl/tegra/pinctrl-tegra194.c
index 5c1dfcb46749..f6c5d5e6dbb6 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra194.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra194.c
@@ -1110,24 +1110,15 @@ static const unsigned int sdmmc4_dat0_pins[] = {
static const unsigned int sdmmc1_comp_pins[] = {
TEGRA_PIN_SDMMC1_COMP,
};
-static const unsigned int sdmmc1_hv_trim_pins[] = {
- TEGRA_PIN_SDMMC1_HV_TRIM,
-};
static const unsigned int sdmmc3_comp_pins[] = {
TEGRA_PIN_SDMMC3_COMP,
};
-static const unsigned int sdmmc3_hv_trim_pins[] = {
- TEGRA_PIN_SDMMC3_HV_TRIM,
-};
static const unsigned int eqos_comp_pins[] = {
TEGRA_PIN_EQOS_COMP,
};
static const unsigned int qspi_comp_pins[] = {
TEGRA_PIN_QSPI_COMP,
};
-static const unsigned int sys_reset_n_pins[] = {
- TEGRA_PIN_SYS_RESET_N,
-};
static const unsigned int shutdown_n_pins[] = {
TEGRA_PIN_SHUTDOWN_N,
};
diff --git a/drivers/platform/mips/cpu_hwmon.c b/drivers/platform/mips/cpu_hwmon.c
index 386389ffec41..d8c5f9195f85 100644
--- a/drivers/platform/mips/cpu_hwmon.c
+++ b/drivers/platform/mips/cpu_hwmon.c
@@ -55,55 +55,6 @@ out:
static int nr_packages;
static struct device *cpu_hwmon_dev;
-static SENSOR_DEVICE_ATTR(name, 0444, NULL, NULL, 0);
-
-static struct attribute *cpu_hwmon_attributes[] = {
- &sensor_dev_attr_name.dev_attr.attr,
- NULL
-};
-
-/* Hwmon device attribute group */
-static struct attribute_group cpu_hwmon_attribute_group = {
- .attrs = cpu_hwmon_attributes,
-};
-
-static ssize_t get_cpu_temp(struct device *dev,
- struct device_attribute *attr, char *buf);
-static ssize_t cpu_temp_label(struct device *dev,
- struct device_attribute *attr, char *buf);
-
-static SENSOR_DEVICE_ATTR(temp1_input, 0444, get_cpu_temp, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp1_label, 0444, cpu_temp_label, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp2_input, 0444, get_cpu_temp, NULL, 2);
-static SENSOR_DEVICE_ATTR(temp2_label, 0444, cpu_temp_label, NULL, 2);
-static SENSOR_DEVICE_ATTR(temp3_input, 0444, get_cpu_temp, NULL, 3);
-static SENSOR_DEVICE_ATTR(temp3_label, 0444, cpu_temp_label, NULL, 3);
-static SENSOR_DEVICE_ATTR(temp4_input, 0444, get_cpu_temp, NULL, 4);
-static SENSOR_DEVICE_ATTR(temp4_label, 0444, cpu_temp_label, NULL, 4);
-
-static const struct attribute *hwmon_cputemp[4][3] = {
- {
- &sensor_dev_attr_temp1_input.dev_attr.attr,
- &sensor_dev_attr_temp1_label.dev_attr.attr,
- NULL
- },
- {
- &sensor_dev_attr_temp2_input.dev_attr.attr,
- &sensor_dev_attr_temp2_label.dev_attr.attr,
- NULL
- },
- {
- &sensor_dev_attr_temp3_input.dev_attr.attr,
- &sensor_dev_attr_temp3_label.dev_attr.attr,
- NULL
- },
- {
- &sensor_dev_attr_temp4_input.dev_attr.attr,
- &sensor_dev_attr_temp4_label.dev_attr.attr,
- NULL
- }
-};
-
static ssize_t cpu_temp_label(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -121,24 +72,47 @@ static ssize_t get_cpu_temp(struct device *dev,
return sprintf(buf, "%d\n", value);
}
-static int create_sysfs_cputemp_files(struct kobject *kobj)
-{
- int i, ret = 0;
-
- for (i = 0; i < nr_packages; i++)
- ret = sysfs_create_files(kobj, hwmon_cputemp[i]);
+static SENSOR_DEVICE_ATTR(temp1_input, 0444, get_cpu_temp, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp1_label, 0444, cpu_temp_label, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp2_input, 0444, get_cpu_temp, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp2_label, 0444, cpu_temp_label, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp3_input, 0444, get_cpu_temp, NULL, 3);
+static SENSOR_DEVICE_ATTR(temp3_label, 0444, cpu_temp_label, NULL, 3);
+static SENSOR_DEVICE_ATTR(temp4_input, 0444, get_cpu_temp, NULL, 4);
+static SENSOR_DEVICE_ATTR(temp4_label, 0444, cpu_temp_label, NULL, 4);
- return ret;
-}
+static struct attribute *cpu_hwmon_attributes[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_temp1_label.dev_attr.attr,
+ &sensor_dev_attr_temp2_input.dev_attr.attr,
+ &sensor_dev_attr_temp2_label.dev_attr.attr,
+ &sensor_dev_attr_temp3_input.dev_attr.attr,
+ &sensor_dev_attr_temp3_label.dev_attr.attr,
+ &sensor_dev_attr_temp4_input.dev_attr.attr,
+ &sensor_dev_attr_temp4_label.dev_attr.attr,
+ NULL
+};
-static void remove_sysfs_cputemp_files(struct kobject *kobj)
+static umode_t cpu_hwmon_is_visible(struct kobject *kobj,
+ struct attribute *attr, int i)
{
- int i;
+ int id = i / 2;
- for (i = 0; i < nr_packages; i++)
- sysfs_remove_files(kobj, hwmon_cputemp[i]);
+ if (id < nr_packages)
+ return attr->mode;
+ return 0;
}
+static struct attribute_group cpu_hwmon_group = {
+ .attrs = cpu_hwmon_attributes,
+ .is_visible = cpu_hwmon_is_visible,
+};
+
+static const struct attribute_group *cpu_hwmon_groups[] = {
+ &cpu_hwmon_group,
+ NULL
+};
+
#define CPU_THERMAL_THRESHOLD 90000
static struct delayed_work thermal_work;
@@ -159,50 +133,31 @@ static void do_thermal_timer(struct work_struct *work)
static int __init loongson_hwmon_init(void)
{
- int ret;
-
pr_info("Loongson Hwmon Enter...\n");
if (cpu_has_csr())
csr_temp_enable = csr_readl(LOONGSON_CSR_FEATURES) &
LOONGSON_CSRF_TEMP;
- cpu_hwmon_dev = hwmon_device_register_with_info(NULL, "cpu_hwmon", NULL, NULL, NULL);
- if (IS_ERR(cpu_hwmon_dev)) {
- ret = PTR_ERR(cpu_hwmon_dev);
- pr_err("hwmon_device_register fail!\n");
- goto fail_hwmon_device_register;
- }
-
nr_packages = loongson_sysconf.nr_cpus /
loongson_sysconf.cores_per_package;
- ret = create_sysfs_cputemp_files(&cpu_hwmon_dev->kobj);
- if (ret) {
- pr_err("fail to create cpu temperature interface!\n");
- goto fail_create_sysfs_cputemp_files;
+ cpu_hwmon_dev = hwmon_device_register_with_groups(NULL, "cpu_hwmon",
+ NULL, cpu_hwmon_groups);
+ if (IS_ERR(cpu_hwmon_dev)) {
+ pr_err("hwmon_device_register fail!\n");
+ return PTR_ERR(cpu_hwmon_dev);
}
INIT_DEFERRABLE_WORK(&thermal_work, do_thermal_timer);
schedule_delayed_work(&thermal_work, msecs_to_jiffies(20000));
- return ret;
-
-fail_create_sysfs_cputemp_files:
- sysfs_remove_group(&cpu_hwmon_dev->kobj,
- &cpu_hwmon_attribute_group);
- hwmon_device_unregister(cpu_hwmon_dev);
-
-fail_hwmon_device_register:
- return ret;
+ return 0;
}
static void __exit loongson_hwmon_exit(void)
{
cancel_delayed_work_sync(&thermal_work);
- remove_sysfs_cputemp_files(&cpu_hwmon_dev->kobj);
- sysfs_remove_group(&cpu_hwmon_dev->kobj,
- &cpu_hwmon_attribute_group);
hwmon_device_unregister(cpu_hwmon_dev);
}
diff --git a/drivers/power/supply/ab8500_fg.c b/drivers/power/supply/ab8500_fg.c
index 97ac588a9e9c..ec8a404d71b4 100644
--- a/drivers/power/supply/ab8500_fg.c
+++ b/drivers/power/supply/ab8500_fg.c
@@ -3037,13 +3037,6 @@ static int ab8500_fg_bind(struct device *dev, struct device *master,
{
struct ab8500_fg *di = dev_get_drvdata(dev);
- /* Create a work queue for running the FG algorithm */
- di->fg_wq = alloc_ordered_workqueue("ab8500_fg_wq", WQ_MEM_RECLAIM);
- if (di->fg_wq == NULL) {
- dev_err(dev, "failed to create work queue\n");
- return -ENOMEM;
- }
-
di->bat_cap.max_mah_design = di->bm->bi->charge_full_design_uah;
di->bat_cap.max_mah = di->bat_cap.max_mah_design;
di->vbat_nom_uv = di->bm->bi->voltage_max_design_uv;
@@ -3067,8 +3060,7 @@ static void ab8500_fg_unbind(struct device *dev, struct device *master,
if (ret)
dev_err(dev, "failed to disable coulomb counter\n");
- destroy_workqueue(di->fg_wq);
- flush_scheduled_work();
+ flush_workqueue(di->fg_wq);
}
static const struct component_ops ab8500_fg_component_ops = {
@@ -3117,6 +3109,13 @@ static int ab8500_fg_probe(struct platform_device *pdev)
ab8500_fg_charge_state_to(di, AB8500_FG_CHARGE_INIT);
ab8500_fg_discharge_state_to(di, AB8500_FG_DISCHARGE_INIT);
+ /* Create a work queue for running the FG algorithm */
+ di->fg_wq = alloc_ordered_workqueue("ab8500_fg_wq", WQ_MEM_RECLAIM);
+ if (di->fg_wq == NULL) {
+ dev_err(dev, "failed to create work queue\n");
+ return -ENOMEM;
+ }
+
/* Init work for running the fg algorithm instantly */
INIT_WORK(&di->fg_work, ab8500_fg_instant_work);
@@ -3227,6 +3226,8 @@ static int ab8500_fg_remove(struct platform_device *pdev)
{
struct ab8500_fg *di = platform_get_drvdata(pdev);
+ destroy_workqueue(di->fg_wq);
+ flush_scheduled_work();
component_del(&pdev->dev, &ab8500_fg_component_ops);
list_del(&di->node);
ab8500_fg_sysfs_exit(di);
diff --git a/drivers/power/supply/axp288_charger.c b/drivers/power/supply/axp288_charger.c
index 19746e658a6a..15219ed43ce9 100644
--- a/drivers/power/supply/axp288_charger.c
+++ b/drivers/power/supply/axp288_charger.c
@@ -865,17 +865,20 @@ static int axp288_charger_probe(struct platform_device *pdev)
info->regmap_irqc = axp20x->regmap_irqc;
info->cable.edev = extcon_get_extcon_dev(AXP288_EXTCON_DEV_NAME);
- if (info->cable.edev == NULL) {
- dev_dbg(dev, "%s is not ready, probe deferred\n",
- AXP288_EXTCON_DEV_NAME);
- return -EPROBE_DEFER;
+ if (IS_ERR(info->cable.edev)) {
+ dev_err_probe(dev, PTR_ERR(info->cable.edev),
+ "extcon_get_extcon_dev(%s) failed\n",
+ AXP288_EXTCON_DEV_NAME);
+ return PTR_ERR(info->cable.edev);
}
if (acpi_dev_present(USB_HOST_EXTCON_HID, NULL, -1)) {
info->otg.cable = extcon_get_extcon_dev(USB_HOST_EXTCON_NAME);
- if (info->otg.cable == NULL) {
- dev_dbg(dev, "EXTCON_USB_HOST is not ready, probe deferred\n");
- return -EPROBE_DEFER;
+ if (IS_ERR(info->otg.cable)) {
+ dev_err_probe(dev, PTR_ERR(info->otg.cable),
+ "extcon_get_extcon_dev(%s) failed\n",
+ USB_HOST_EXTCON_NAME);
+ return PTR_ERR(info->otg.cable);
}
dev_info(dev, "Using " USB_HOST_EXTCON_HID " extcon for usb-id\n");
}
diff --git a/drivers/power/supply/axp288_fuel_gauge.c b/drivers/power/supply/axp288_fuel_gauge.c
index e9f285dae489..8e6f8a655079 100644
--- a/drivers/power/supply/axp288_fuel_gauge.c
+++ b/drivers/power/supply/axp288_fuel_gauge.c
@@ -90,6 +90,8 @@
#define AXP288_REG_UPDATE_INTERVAL (60 * HZ)
#define AXP288_FG_INTR_NUM 6
+#define AXP288_QUIRK_NO_BATTERY BIT(0)
+
static bool no_current_sense_res;
module_param(no_current_sense_res, bool, 0444);
MODULE_PARM_DESC(no_current_sense_res, "No (or broken) current sense resistor");
@@ -524,7 +526,7 @@ static struct power_supply_desc fuel_gauge_desc = {
* detection reports one despite it not being there.
* Please keep this listed sorted alphabetically.
*/
-static const struct dmi_system_id axp288_no_battery_list[] = {
+static const struct dmi_system_id axp288_quirks[] = {
{
/* ACEPC T8 Cherry Trail Z8350 mini PC */
.matches = {
@@ -534,6 +536,7 @@ static const struct dmi_system_id axp288_no_battery_list[] = {
/* also match on somewhat unique bios-version */
DMI_EXACT_MATCH(DMI_BIOS_VERSION, "1.000"),
},
+ .driver_data = (void *)AXP288_QUIRK_NO_BATTERY,
},
{
/* ACEPC T11 Cherry Trail Z8350 mini PC */
@@ -544,6 +547,7 @@ static const struct dmi_system_id axp288_no_battery_list[] = {
/* also match on somewhat unique bios-version */
DMI_EXACT_MATCH(DMI_BIOS_VERSION, "1.000"),
},
+ .driver_data = (void *)AXP288_QUIRK_NO_BATTERY,
},
{
/* Intel Cherry Trail Compute Stick, Windows version */
@@ -551,6 +555,7 @@ static const struct dmi_system_id axp288_no_battery_list[] = {
DMI_MATCH(DMI_SYS_VENDOR, "Intel"),
DMI_MATCH(DMI_PRODUCT_NAME, "STK1AW32SC"),
},
+ .driver_data = (void *)AXP288_QUIRK_NO_BATTERY,
},
{
/* Intel Cherry Trail Compute Stick, version without an OS */
@@ -558,34 +563,54 @@ static const struct dmi_system_id axp288_no_battery_list[] = {
DMI_MATCH(DMI_SYS_VENDOR, "Intel"),
DMI_MATCH(DMI_PRODUCT_NAME, "STK1A32SC"),
},
+ .driver_data = (void *)AXP288_QUIRK_NO_BATTERY,
},
{
/* Meegopad T02 */
.matches = {
DMI_MATCH(DMI_PRODUCT_NAME, "MEEGOPAD T02"),
},
+ .driver_data = (void *)AXP288_QUIRK_NO_BATTERY,
},
{ /* Mele PCG03 Mini PC */
.matches = {
DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Mini PC"),
DMI_EXACT_MATCH(DMI_BOARD_NAME, "Mini PC"),
},
+ .driver_data = (void *)AXP288_QUIRK_NO_BATTERY,
},
{
/* Minix Neo Z83-4 mini PC */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "MINIX"),
DMI_MATCH(DMI_PRODUCT_NAME, "Z83-4"),
- }
+ },
+ .driver_data = (void *)AXP288_QUIRK_NO_BATTERY,
},
{
- /* Various Ace PC/Meegopad/MinisForum/Wintel Mini-PCs/HDMI-sticks */
+ /*
+ * One Mix 1, this uses the "T3 MRD" boardname used by
+ * generic mini PCs, but it is a mini laptop so it does
+ * actually have a battery!
+ */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "T3 MRD"),
+ DMI_MATCH(DMI_BIOS_DATE, "06/14/2018"),
+ },
+ .driver_data = NULL,
+ },
+ {
+ /*
+ * Various Ace PC/Meegopad/MinisForum/Wintel Mini-PCs/HDMI-sticks
+ * This entry must be last because it is generic, this allows
+ * adding more specifuc quirks overriding this generic entry.
+ */
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "T3 MRD"),
DMI_MATCH(DMI_CHASSIS_TYPE, "3"),
DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."),
- DMI_MATCH(DMI_BIOS_VERSION, "5.11"),
},
+ .driver_data = (void *)AXP288_QUIRK_NO_BATTERY,
},
{}
};
@@ -665,7 +690,9 @@ static int axp288_fuel_gauge_probe(struct platform_device *pdev)
[BAT_D_CURR] = "axp288-chrg-d-curr",
[BAT_VOLT] = "axp288-batt-volt",
};
+ const struct dmi_system_id *dmi_id;
struct device *dev = &pdev->dev;
+ unsigned long quirks = 0;
int i, pirq, ret;
/*
@@ -675,7 +702,11 @@ static int axp288_fuel_gauge_probe(struct platform_device *pdev)
if (!acpi_quirk_skip_acpi_ac_and_battery())
return -ENODEV;
- if (dmi_check_system(axp288_no_battery_list))
+ dmi_id = dmi_first_match(axp288_quirks);
+ if (dmi_id)
+ quirks = (unsigned long)dmi_id->driver_data;
+
+ if (quirks & AXP288_QUIRK_NO_BATTERY)
return -ENODEV;
info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
diff --git a/drivers/power/supply/bq24190_charger.c b/drivers/power/supply/bq24190_charger.c
index aa1a589eb9f2..27f5c7648617 100644
--- a/drivers/power/supply/bq24190_charger.c
+++ b/drivers/power/supply/bq24190_charger.c
@@ -455,11 +455,9 @@ static ssize_t bq24190_sysfs_show(struct device *dev,
if (!info)
return -EINVAL;
- ret = pm_runtime_get_sync(bdi->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(bdi->dev);
+ ret = pm_runtime_resume_and_get(bdi->dev);
+ if (ret < 0)
return ret;
- }
ret = bq24190_read_mask(bdi, info->reg, info->mask, info->shift, &v);
if (ret)
@@ -490,11 +488,9 @@ static ssize_t bq24190_sysfs_store(struct device *dev,
if (ret < 0)
return ret;
- ret = pm_runtime_get_sync(bdi->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(bdi->dev);
+ ret = pm_runtime_resume_and_get(bdi->dev);
+ if (ret < 0)
return ret;
- }
ret = bq24190_write_mask(bdi, info->reg, info->mask, info->shift, v);
if (ret)
@@ -512,10 +508,9 @@ static int bq24190_set_otg_vbus(struct bq24190_dev_info *bdi, bool enable)
union power_supply_propval val = { .intval = bdi->charge_type };
int ret;
- ret = pm_runtime_get_sync(bdi->dev);
+ ret = pm_runtime_resume_and_get(bdi->dev);
if (ret < 0) {
dev_warn(bdi->dev, "pm_runtime_get failed: %i\n", ret);
- pm_runtime_put_noidle(bdi->dev);
return ret;
}
@@ -551,10 +546,9 @@ static int bq24190_vbus_is_enabled(struct regulator_dev *dev)
int ret;
u8 val;
- ret = pm_runtime_get_sync(bdi->dev);
+ ret = pm_runtime_resume_and_get(bdi->dev);
if (ret < 0) {
dev_warn(bdi->dev, "pm_runtime_get failed: %i\n", ret);
- pm_runtime_put_noidle(bdi->dev);
return ret;
}
@@ -1128,11 +1122,9 @@ static int bq24190_charger_get_property(struct power_supply *psy,
dev_dbg(bdi->dev, "prop: %d\n", psp);
- ret = pm_runtime_get_sync(bdi->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(bdi->dev);
+ ret = pm_runtime_resume_and_get(bdi->dev);
+ if (ret < 0)
return ret;
- }
switch (psp) {
case POWER_SUPPLY_PROP_CHARGE_TYPE:
@@ -1204,11 +1196,9 @@ static int bq24190_charger_set_property(struct power_supply *psy,
dev_dbg(bdi->dev, "prop: %d\n", psp);
- ret = pm_runtime_get_sync(bdi->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(bdi->dev);
+ ret = pm_runtime_resume_and_get(bdi->dev);
+ if (ret < 0)
return ret;
- }
switch (psp) {
case POWER_SUPPLY_PROP_ONLINE:
@@ -1477,11 +1467,9 @@ static int bq24190_battery_get_property(struct power_supply *psy,
dev_warn(bdi->dev, "warning: /sys/class/power_supply/bq24190-battery is deprecated\n");
dev_dbg(bdi->dev, "prop: %d\n", psp);
- ret = pm_runtime_get_sync(bdi->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(bdi->dev);
+ ret = pm_runtime_resume_and_get(bdi->dev);
+ if (ret < 0)
return ret;
- }
switch (psp) {
case POWER_SUPPLY_PROP_STATUS:
@@ -1525,11 +1513,9 @@ static int bq24190_battery_set_property(struct power_supply *psy,
dev_warn(bdi->dev, "warning: /sys/class/power_supply/bq24190-battery is deprecated\n");
dev_dbg(bdi->dev, "prop: %d\n", psp);
- ret = pm_runtime_get_sync(bdi->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(bdi->dev);
+ ret = pm_runtime_resume_and_get(bdi->dev);
+ if (ret < 0)
return ret;
- }
switch (psp) {
case POWER_SUPPLY_PROP_ONLINE:
@@ -1683,10 +1669,9 @@ static irqreturn_t bq24190_irq_handler_thread(int irq, void *data)
int error;
bdi->irq_event = true;
- error = pm_runtime_get_sync(bdi->dev);
+ error = pm_runtime_resume_and_get(bdi->dev);
if (error < 0) {
dev_warn(bdi->dev, "pm_runtime_get failed: %i\n", error);
- pm_runtime_put_noidle(bdi->dev);
return IRQ_NONE;
}
bq24190_check_status(bdi);
@@ -1921,11 +1906,9 @@ static int bq24190_remove(struct i2c_client *client)
struct bq24190_dev_info *bdi = i2c_get_clientdata(client);
int error;
- error = pm_runtime_get_sync(bdi->dev);
- if (error < 0) {
+ error = pm_runtime_resume_and_get(bdi->dev);
+ if (error < 0)
dev_warn(bdi->dev, "pm_runtime_get failed: %i\n", error);
- pm_runtime_put_noidle(bdi->dev);
- }
bq24190_register_reset(bdi);
if (bdi->battery)
@@ -1982,11 +1965,9 @@ static __maybe_unused int bq24190_pm_suspend(struct device *dev)
struct bq24190_dev_info *bdi = i2c_get_clientdata(client);
int error;
- error = pm_runtime_get_sync(bdi->dev);
- if (error < 0) {
+ error = pm_runtime_resume_and_get(bdi->dev);
+ if (error < 0)
dev_warn(bdi->dev, "pm_runtime_get failed: %i\n", error);
- pm_runtime_put_noidle(bdi->dev);
- }
bq24190_register_reset(bdi);
@@ -2007,11 +1988,9 @@ static __maybe_unused int bq24190_pm_resume(struct device *dev)
bdi->f_reg = 0;
bdi->ss_reg = BQ24190_REG_SS_VBUS_STAT_MASK; /* impossible state */
- error = pm_runtime_get_sync(bdi->dev);
- if (error < 0) {
+ error = pm_runtime_resume_and_get(bdi->dev);
+ if (error < 0)
dev_warn(bdi->dev, "pm_runtime_get failed: %i\n", error);
- pm_runtime_put_noidle(bdi->dev);
- }
bq24190_register_reset(bdi);
bq24190_set_config(bdi);
diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c
index 72e727cd31e8..35e6a394c0df 100644
--- a/drivers/power/supply/bq27xxx_battery.c
+++ b/drivers/power/supply/bq27xxx_battery.c
@@ -1572,14 +1572,6 @@ static int bq27xxx_battery_read_charge(struct bq27xxx_device_info *di, u8 reg)
*/
static inline int bq27xxx_battery_read_nac(struct bq27xxx_device_info *di)
{
- int flags;
-
- if (di->opts & BQ27XXX_O_ZERO) {
- flags = bq27xxx_read(di, BQ27XXX_REG_FLAGS, true);
- if (flags >= 0 && (flags & BQ27000_FLAG_CI))
- return -ENODATA;
- }
-
return bq27xxx_battery_read_charge(di, BQ27XXX_REG_NAC);
}
@@ -1742,6 +1734,18 @@ static bool bq27xxx_battery_dead(struct bq27xxx_device_info *di, u16 flags)
return flags & (BQ27XXX_FLAG_SOC1 | BQ27XXX_FLAG_SOCF);
}
+/*
+ * Returns true if reported battery capacity is inaccurate
+ */
+static bool bq27xxx_battery_capacity_inaccurate(struct bq27xxx_device_info *di,
+ u16 flags)
+{
+ if (di->opts & BQ27XXX_O_HAS_CI)
+ return (flags & BQ27000_FLAG_CI);
+ else
+ return false;
+}
+
static int bq27xxx_battery_read_health(struct bq27xxx_device_info *di)
{
/* Unlikely but important to return first */
@@ -1751,6 +1755,8 @@ static int bq27xxx_battery_read_health(struct bq27xxx_device_info *di)
return POWER_SUPPLY_HEALTH_COLD;
if (unlikely(bq27xxx_battery_dead(di, di->cache.flags)))
return POWER_SUPPLY_HEALTH_DEAD;
+ if (unlikely(bq27xxx_battery_capacity_inaccurate(di, di->cache.flags)))
+ return POWER_SUPPLY_HEALTH_CALIBRATION_REQUIRED;
return POWER_SUPPLY_HEALTH_GOOD;
}
@@ -1758,7 +1764,6 @@ static int bq27xxx_battery_read_health(struct bq27xxx_device_info *di)
void bq27xxx_battery_update(struct bq27xxx_device_info *di)
{
struct bq27xxx_reg_cache cache = {0, };
- bool has_ci_flag = di->opts & BQ27XXX_O_HAS_CI;
bool has_singe_flag = di->opts & BQ27XXX_O_ZERO;
cache.flags = bq27xxx_read(di, BQ27XXX_REG_FLAGS, has_singe_flag);
@@ -1766,30 +1771,19 @@ void bq27xxx_battery_update(struct bq27xxx_device_info *di)
cache.flags = -1; /* read error */
if (cache.flags >= 0) {
cache.temperature = bq27xxx_battery_read_temperature(di);
- if (has_ci_flag && (cache.flags & BQ27000_FLAG_CI)) {
- dev_info_once(di->dev, "battery is not calibrated! ignoring capacity values\n");
- cache.capacity = -ENODATA;
- cache.energy = -ENODATA;
- cache.time_to_empty = -ENODATA;
- cache.time_to_empty_avg = -ENODATA;
- cache.time_to_full = -ENODATA;
- cache.charge_full = -ENODATA;
- cache.health = -ENODATA;
- } else {
- if (di->regs[BQ27XXX_REG_TTE] != INVALID_REG_ADDR)
- cache.time_to_empty = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTE);
- if (di->regs[BQ27XXX_REG_TTECP] != INVALID_REG_ADDR)
- cache.time_to_empty_avg = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTECP);
- if (di->regs[BQ27XXX_REG_TTF] != INVALID_REG_ADDR)
- cache.time_to_full = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTF);
-
- cache.charge_full = bq27xxx_battery_read_fcc(di);
- cache.capacity = bq27xxx_battery_read_soc(di);
- if (di->regs[BQ27XXX_REG_AE] != INVALID_REG_ADDR)
- cache.energy = bq27xxx_battery_read_energy(di);
- di->cache.flags = cache.flags;
- cache.health = bq27xxx_battery_read_health(di);
- }
+ if (di->regs[BQ27XXX_REG_TTE] != INVALID_REG_ADDR)
+ cache.time_to_empty = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTE);
+ if (di->regs[BQ27XXX_REG_TTECP] != INVALID_REG_ADDR)
+ cache.time_to_empty_avg = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTECP);
+ if (di->regs[BQ27XXX_REG_TTF] != INVALID_REG_ADDR)
+ cache.time_to_full = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTF);
+
+ cache.charge_full = bq27xxx_battery_read_fcc(di);
+ cache.capacity = bq27xxx_battery_read_soc(di);
+ if (di->regs[BQ27XXX_REG_AE] != INVALID_REG_ADDR)
+ cache.energy = bq27xxx_battery_read_energy(di);
+ di->cache.flags = cache.flags;
+ cache.health = bq27xxx_battery_read_health(di);
if (di->regs[BQ27XXX_REG_CYCT] != INVALID_REG_ADDR)
cache.cycle_count = bq27xxx_battery_read_cyct(di);
diff --git a/drivers/power/supply/charger-manager.c b/drivers/power/supply/charger-manager.c
index d67edb760c94..92db79400a6a 100644
--- a/drivers/power/supply/charger-manager.c
+++ b/drivers/power/supply/charger-manager.c
@@ -985,13 +985,10 @@ static int charger_extcon_init(struct charger_manager *cm,
cable->nb.notifier_call = charger_extcon_notifier;
cable->extcon_dev = extcon_get_extcon_dev(cable->extcon_name);
- if (IS_ERR_OR_NULL(cable->extcon_dev)) {
+ if (IS_ERR(cable->extcon_dev)) {
pr_err("Cannot find extcon_dev for %s (cable: %s)\n",
cable->extcon_name, cable->name);
- if (cable->extcon_dev == NULL)
- return -EPROBE_DEFER;
- else
- return PTR_ERR(cable->extcon_dev);
+ return PTR_ERR(cable->extcon_dev);
}
for (i = 0; i < ARRAY_SIZE(extcon_mapping); i++) {
diff --git a/drivers/power/supply/max8997_charger.c b/drivers/power/supply/max8997_charger.c
index 127c73b0b3bd..1ec3535a257d 100644
--- a/drivers/power/supply/max8997_charger.c
+++ b/drivers/power/supply/max8997_charger.c
@@ -242,10 +242,10 @@ static int max8997_battery_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "couldn't get charger regulator\n");
}
charger->edev = extcon_get_extcon_dev("max8997-muic");
- if (IS_ERR_OR_NULL(charger->edev)) {
- if (!charger->edev)
- return -EPROBE_DEFER;
- dev_info(charger->dev, "couldn't get extcon device\n");
+ if (IS_ERR(charger->edev)) {
+ dev_err_probe(charger->dev, PTR_ERR(charger->edev),
+ "couldn't get extcon device: max8997-muic\n");
+ return PTR_ERR(charger->edev);
}
if (!IS_ERR(charger->reg) && !IS_ERR_OR_NULL(charger->edev)) {
diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
index d925cb137e12..fad5890c899e 100644
--- a/drivers/power/supply/power_supply_core.c
+++ b/drivers/power/supply/power_supply_core.c
@@ -616,7 +616,7 @@ int power_supply_get_battery_info(struct power_supply *psy,
goto out_put_node;
}
- info = devm_kmalloc(&psy->dev, sizeof(*info), GFP_KERNEL);
+ info = devm_kzalloc(&psy->dev, sizeof(*info), GFP_KERNEL);
if (!info) {
err = -ENOMEM;
goto out_put_node;
diff --git a/drivers/power/supply/tosa_battery.c b/drivers/power/supply/tosa_battery.c
index 32cc31cd4761..73d4aca4c386 100644
--- a/drivers/power/supply/tosa_battery.c
+++ b/drivers/power/supply/tosa_battery.c
@@ -12,10 +12,9 @@
#include <linux/delay.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <asm/mach-types.h>
-#include <mach/tosa.h>
static DEFINE_MUTEX(bat_lock); /* protects gpio pins */
static struct work_struct bat_work;
@@ -28,22 +27,23 @@ struct tosa_bat {
struct mutex work_lock; /* protects data */
bool (*is_present)(struct tosa_bat *bat);
- int gpio_full;
- int gpio_charge_off;
+ struct gpio_desc *gpiod_full;
+ struct gpio_desc *gpiod_charge_off;
int technology;
- int gpio_bat;
+ struct gpio_desc *gpiod_bat;
int adc_bat;
int adc_bat_divider;
int bat_max;
int bat_min;
- int gpio_temp;
+ struct gpio_desc *gpiod_temp;
int adc_temp;
int adc_temp_divider;
};
+static struct gpio_desc *jacket_detect;
static struct tosa_bat tosa_bat_main;
static struct tosa_bat tosa_bat_jacket;
@@ -51,15 +51,15 @@ static unsigned long tosa_read_bat(struct tosa_bat *bat)
{
unsigned long value = 0;
- if (bat->gpio_bat < 0 || bat->adc_bat < 0)
+ if (!bat->gpiod_bat || bat->adc_bat < 0)
return 0;
mutex_lock(&bat_lock);
- gpio_set_value(bat->gpio_bat, 1);
+ gpiod_set_value(bat->gpiod_bat, 1);
msleep(5);
value = wm97xx_read_aux_adc(dev_get_drvdata(bat->psy->dev.parent),
bat->adc_bat);
- gpio_set_value(bat->gpio_bat, 0);
+ gpiod_set_value(bat->gpiod_bat, 0);
mutex_unlock(&bat_lock);
value = value * 1000000 / bat->adc_bat_divider;
@@ -71,15 +71,15 @@ static unsigned long tosa_read_temp(struct tosa_bat *bat)
{
unsigned long value = 0;
- if (bat->gpio_temp < 0 || bat->adc_temp < 0)
+ if (!bat->gpiod_temp || bat->adc_temp < 0)
return 0;
mutex_lock(&bat_lock);
- gpio_set_value(bat->gpio_temp, 1);
+ gpiod_set_value(bat->gpiod_temp, 1);
msleep(5);
value = wm97xx_read_aux_adc(dev_get_drvdata(bat->psy->dev.parent),
bat->adc_temp);
- gpio_set_value(bat->gpio_temp, 0);
+ gpiod_set_value(bat->gpiod_temp, 0);
mutex_unlock(&bat_lock);
value = value * 10000 / bat->adc_temp_divider;
@@ -136,7 +136,7 @@ static int tosa_bat_get_property(struct power_supply *psy,
static bool tosa_jacket_bat_is_present(struct tosa_bat *bat)
{
- return gpio_get_value(TOSA_GPIO_JACKET_DETECT) == 0;
+ return gpiod_get_value(jacket_detect) == 0;
}
static void tosa_bat_external_power_changed(struct power_supply *psy)
@@ -166,23 +166,23 @@ static void tosa_bat_update(struct tosa_bat *bat)
bat->full_chrg = -1;
} else if (power_supply_am_i_supplied(psy)) {
if (bat->status == POWER_SUPPLY_STATUS_DISCHARGING) {
- gpio_set_value(bat->gpio_charge_off, 0);
+ gpiod_set_value(bat->gpiod_charge_off, 0);
mdelay(15);
}
- if (gpio_get_value(bat->gpio_full)) {
+ if (gpiod_get_value(bat->gpiod_full)) {
if (old == POWER_SUPPLY_STATUS_CHARGING ||
bat->full_chrg == -1)
bat->full_chrg = tosa_read_bat(bat);
- gpio_set_value(bat->gpio_charge_off, 1);
+ gpiod_set_value(bat->gpiod_charge_off, 1);
bat->status = POWER_SUPPLY_STATUS_FULL;
} else {
- gpio_set_value(bat->gpio_charge_off, 0);
+ gpiod_set_value(bat->gpiod_charge_off, 0);
bat->status = POWER_SUPPLY_STATUS_CHARGING;
}
} else {
- gpio_set_value(bat->gpio_charge_off, 1);
+ gpiod_set_value(bat->gpiod_charge_off, 1);
bat->status = POWER_SUPPLY_STATUS_DISCHARGING;
}
@@ -251,18 +251,18 @@ static struct tosa_bat tosa_bat_main = {
.full_chrg = -1,
.psy = NULL,
- .gpio_full = TOSA_GPIO_BAT0_CRG,
- .gpio_charge_off = TOSA_GPIO_CHARGE_OFF,
+ .gpiod_full = NULL,
+ .gpiod_charge_off = NULL,
.technology = POWER_SUPPLY_TECHNOLOGY_LIPO,
- .gpio_bat = TOSA_GPIO_BAT0_V_ON,
+ .gpiod_bat = NULL,
.adc_bat = WM97XX_AUX_ID3,
.adc_bat_divider = 414,
.bat_max = 4310000,
.bat_min = 1551 * 1000000 / 414,
- .gpio_temp = TOSA_GPIO_BAT1_TH_ON,
+ .gpiod_temp = NULL,
.adc_temp = WM97XX_AUX_ID2,
.adc_temp_divider = 10000,
};
@@ -273,18 +273,18 @@ static struct tosa_bat tosa_bat_jacket = {
.psy = NULL,
.is_present = tosa_jacket_bat_is_present,
- .gpio_full = TOSA_GPIO_BAT1_CRG,
- .gpio_charge_off = TOSA_GPIO_CHARGE_OFF_JC,
+ .gpiod_full = NULL,
+ .gpiod_charge_off = NULL,
.technology = POWER_SUPPLY_TECHNOLOGY_LIPO,
- .gpio_bat = TOSA_GPIO_BAT1_V_ON,
+ .gpiod_bat = NULL,
.adc_bat = WM97XX_AUX_ID3,
.adc_bat_divider = 414,
.bat_max = 4310000,
.bat_min = 1551 * 1000000 / 414,
- .gpio_temp = TOSA_GPIO_BAT0_TH_ON,
+ .gpiod_temp = NULL,
.adc_temp = WM97XX_AUX_ID2,
.adc_temp_divider = 10000,
};
@@ -294,36 +294,20 @@ static struct tosa_bat tosa_bat_bu = {
.full_chrg = -1,
.psy = NULL,
- .gpio_full = -1,
- .gpio_charge_off = -1,
+ .gpiod_full = NULL,
+ .gpiod_charge_off = NULL,
.technology = POWER_SUPPLY_TECHNOLOGY_LiMn,
- .gpio_bat = TOSA_GPIO_BU_CHRG_ON,
+ .gpiod_bat = NULL,
.adc_bat = WM97XX_AUX_ID4,
.adc_bat_divider = 1266,
- .gpio_temp = -1,
+ .gpiod_temp = NULL,
.adc_temp = -1,
.adc_temp_divider = -1,
};
-static struct gpio tosa_bat_gpios[] = {
- { TOSA_GPIO_CHARGE_OFF, GPIOF_OUT_INIT_HIGH, "main charge off" },
- { TOSA_GPIO_CHARGE_OFF_JC, GPIOF_OUT_INIT_HIGH, "jacket charge off" },
- { TOSA_GPIO_BAT_SW_ON, GPIOF_OUT_INIT_LOW, "battery switch" },
- { TOSA_GPIO_BAT0_V_ON, GPIOF_OUT_INIT_LOW, "main battery" },
- { TOSA_GPIO_BAT1_V_ON, GPIOF_OUT_INIT_LOW, "jacket battery" },
- { TOSA_GPIO_BAT1_TH_ON, GPIOF_OUT_INIT_LOW, "main battery temp" },
- { TOSA_GPIO_BAT0_TH_ON, GPIOF_OUT_INIT_LOW, "jacket battery temp" },
- { TOSA_GPIO_BU_CHRG_ON, GPIOF_OUT_INIT_LOW, "backup battery" },
- { TOSA_GPIO_BAT0_CRG, GPIOF_IN, "main battery full" },
- { TOSA_GPIO_BAT1_CRG, GPIOF_IN, "jacket battery full" },
- { TOSA_GPIO_BAT0_LOW, GPIOF_IN, "main battery low" },
- { TOSA_GPIO_BAT1_LOW, GPIOF_IN, "jacket battery low" },
- { TOSA_GPIO_JACKET_DETECT, GPIOF_IN, "jacket detect" },
-};
-
#ifdef CONFIG_PM
static int tosa_bat_suspend(struct platform_device *dev, pm_message_t state)
{
@@ -343,19 +327,83 @@ static int tosa_bat_resume(struct platform_device *dev)
#define tosa_bat_resume NULL
#endif
-static int tosa_bat_probe(struct platform_device *dev)
+static int tosa_bat_probe(struct platform_device *pdev)
{
int ret;
struct power_supply_config main_psy_cfg = {},
jacket_psy_cfg = {},
bu_psy_cfg = {};
+ struct device *dev = &pdev->dev;
+ struct gpio_desc *dummy;
if (!machine_is_tosa())
return -ENODEV;
- ret = gpio_request_array(tosa_bat_gpios, ARRAY_SIZE(tosa_bat_gpios));
- if (ret)
- return ret;
+ /* Main charging control GPIOs */
+ tosa_bat_main.gpiod_charge_off = devm_gpiod_get(dev, "main charge off", GPIOD_OUT_HIGH);
+ if (IS_ERR(tosa_bat_main.gpiod_charge_off))
+ return dev_err_probe(dev, PTR_ERR(tosa_bat_main.gpiod_charge_off),
+ "no main charger GPIO\n");
+ tosa_bat_jacket.gpiod_charge_off = devm_gpiod_get(dev, "jacket charge off", GPIOD_OUT_HIGH);
+ if (IS_ERR(tosa_bat_jacket.gpiod_charge_off))
+ return dev_err_probe(dev, PTR_ERR(tosa_bat_jacket.gpiod_charge_off),
+ "no jacket charger GPIO\n");
+
+ /* Per-battery output check (routes battery voltage to ADC) */
+ tosa_bat_main.gpiod_bat = devm_gpiod_get(dev, "main battery", GPIOD_OUT_LOW);
+ if (IS_ERR(tosa_bat_main.gpiod_bat))
+ return dev_err_probe(dev, PTR_ERR(tosa_bat_main.gpiod_bat),
+ "no main battery GPIO\n");
+ tosa_bat_jacket.gpiod_bat = devm_gpiod_get(dev, "jacket battery", GPIOD_OUT_LOW);
+ if (IS_ERR(tosa_bat_jacket.gpiod_bat))
+ return dev_err_probe(dev, PTR_ERR(tosa_bat_jacket.gpiod_bat),
+ "no jacket battery GPIO\n");
+ tosa_bat_bu.gpiod_bat = devm_gpiod_get(dev, "backup battery", GPIOD_OUT_LOW);
+ if (IS_ERR(tosa_bat_bu.gpiod_bat))
+ return dev_err_probe(dev, PTR_ERR(tosa_bat_bu.gpiod_bat),
+ "no backup battery GPIO\n");
+
+ /* Battery full detect GPIOs (using PXA SoC GPIOs) */
+ tosa_bat_main.gpiod_full = devm_gpiod_get(dev, "main battery full", GPIOD_IN);
+ if (IS_ERR(tosa_bat_main.gpiod_full))
+ return dev_err_probe(dev, PTR_ERR(tosa_bat_main.gpiod_full),
+ "no main battery full GPIO\n");
+ tosa_bat_jacket.gpiod_full = devm_gpiod_get(dev, "jacket battery full", GPIOD_IN);
+ if (IS_ERR(tosa_bat_jacket.gpiod_full))
+ return dev_err_probe(dev, PTR_ERR(tosa_bat_jacket.gpiod_full),
+ "no jacket battery full GPIO\n");
+
+ /* Battery temperature GPIOs (routes thermistor voltage to ADC) */
+ tosa_bat_main.gpiod_temp = devm_gpiod_get(dev, "main battery temp", GPIOD_OUT_LOW);
+ if (IS_ERR(tosa_bat_main.gpiod_temp))
+ return dev_err_probe(dev, PTR_ERR(tosa_bat_main.gpiod_temp),
+ "no main battery temp GPIO\n");
+ tosa_bat_jacket.gpiod_temp = devm_gpiod_get(dev, "jacket battery temp", GPIOD_OUT_LOW);
+ if (IS_ERR(tosa_bat_jacket.gpiod_temp))
+ return dev_err_probe(dev, PTR_ERR(tosa_bat_jacket.gpiod_temp),
+ "no jacket battery temp GPIO\n");
+
+ /* Jacket detect GPIO */
+ jacket_detect = devm_gpiod_get(dev, "jacket detect", GPIOD_IN);
+ if (IS_ERR(jacket_detect))
+ return dev_err_probe(dev, PTR_ERR(jacket_detect),
+ "no jacket detect GPIO\n");
+
+ /* Battery low indication GPIOs (not used, we just request them) */
+ dummy = devm_gpiod_get(dev, "main battery low", GPIOD_IN);
+ if (IS_ERR(dummy))
+ return dev_err_probe(dev, PTR_ERR(dummy),
+ "no main battery low GPIO\n");
+ dummy = devm_gpiod_get(dev, "jacket battery low", GPIOD_IN);
+ if (IS_ERR(dummy))
+ return dev_err_probe(dev, PTR_ERR(dummy),
+ "no jacket battery low GPIO\n");
+
+ /* Battery switch GPIO (not used just requested) */
+ dummy = devm_gpiod_get(dev, "battery switch", GPIOD_OUT_LOW);
+ if (IS_ERR(dummy))
+ return dev_err_probe(dev, PTR_ERR(dummy),
+ "no battery switch GPIO\n");
mutex_init(&tosa_bat_main.work_lock);
mutex_init(&tosa_bat_jacket.work_lock);
@@ -363,7 +411,7 @@ static int tosa_bat_probe(struct platform_device *dev)
INIT_WORK(&bat_work, tosa_bat_work);
main_psy_cfg.drv_data = &tosa_bat_main;
- tosa_bat_main.psy = power_supply_register(&dev->dev,
+ tosa_bat_main.psy = power_supply_register(dev,
&tosa_bat_main_desc,
&main_psy_cfg);
if (IS_ERR(tosa_bat_main.psy)) {
@@ -372,7 +420,7 @@ static int tosa_bat_probe(struct platform_device *dev)
}
jacket_psy_cfg.drv_data = &tosa_bat_jacket;
- tosa_bat_jacket.psy = power_supply_register(&dev->dev,
+ tosa_bat_jacket.psy = power_supply_register(dev,
&tosa_bat_jacket_desc,
&jacket_psy_cfg);
if (IS_ERR(tosa_bat_jacket.psy)) {
@@ -381,28 +429,28 @@ static int tosa_bat_probe(struct platform_device *dev)
}
bu_psy_cfg.drv_data = &tosa_bat_bu;
- tosa_bat_bu.psy = power_supply_register(&dev->dev, &tosa_bat_bu_desc,
+ tosa_bat_bu.psy = power_supply_register(dev, &tosa_bat_bu_desc,
&bu_psy_cfg);
if (IS_ERR(tosa_bat_bu.psy)) {
ret = PTR_ERR(tosa_bat_bu.psy);
goto err_psy_reg_bu;
}
- ret = request_irq(gpio_to_irq(TOSA_GPIO_BAT0_CRG),
+ ret = request_irq(gpiod_to_irq(tosa_bat_main.gpiod_full),
tosa_bat_gpio_isr,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
"main full", &tosa_bat_main);
if (ret)
goto err_req_main;
- ret = request_irq(gpio_to_irq(TOSA_GPIO_BAT1_CRG),
+ ret = request_irq(gpiod_to_irq(tosa_bat_jacket.gpiod_full),
tosa_bat_gpio_isr,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
"jacket full", &tosa_bat_jacket);
if (ret)
goto err_req_jacket;
- ret = request_irq(gpio_to_irq(TOSA_GPIO_JACKET_DETECT),
+ ret = request_irq(gpiod_to_irq(jacket_detect),
tosa_bat_gpio_isr,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
"jacket detect", &tosa_bat_jacket);
@@ -411,9 +459,9 @@ static int tosa_bat_probe(struct platform_device *dev)
return 0;
}
- free_irq(gpio_to_irq(TOSA_GPIO_BAT1_CRG), &tosa_bat_jacket);
+ free_irq(gpiod_to_irq(tosa_bat_jacket.gpiod_full), &tosa_bat_jacket);
err_req_jacket:
- free_irq(gpio_to_irq(TOSA_GPIO_BAT0_CRG), &tosa_bat_main);
+ free_irq(gpiod_to_irq(tosa_bat_main.gpiod_full), &tosa_bat_main);
err_req_main:
power_supply_unregister(tosa_bat_bu.psy);
err_psy_reg_bu:
@@ -425,15 +473,14 @@ err_psy_reg_main:
/* see comment in tosa_bat_remove */
cancel_work_sync(&bat_work);
- gpio_free_array(tosa_bat_gpios, ARRAY_SIZE(tosa_bat_gpios));
return ret;
}
static int tosa_bat_remove(struct platform_device *dev)
{
- free_irq(gpio_to_irq(TOSA_GPIO_JACKET_DETECT), &tosa_bat_jacket);
- free_irq(gpio_to_irq(TOSA_GPIO_BAT1_CRG), &tosa_bat_jacket);
- free_irq(gpio_to_irq(TOSA_GPIO_BAT0_CRG), &tosa_bat_main);
+ free_irq(gpiod_to_irq(jacket_detect), &tosa_bat_jacket);
+ free_irq(gpiod_to_irq(tosa_bat_jacket.gpiod_full), &tosa_bat_jacket);
+ free_irq(gpiod_to_irq(tosa_bat_main.gpiod_full), &tosa_bat_main);
power_supply_unregister(tosa_bat_bu.psy);
power_supply_unregister(tosa_bat_jacket.psy);
@@ -445,7 +492,6 @@ static int tosa_bat_remove(struct platform_device *dev)
* unregistered now.
*/
cancel_work_sync(&bat_work);
- gpio_free_array(tosa_bat_gpios, ARRAY_SIZE(tosa_bat_gpios));
return 0;
}
diff --git a/drivers/ptp/ptp_clockmatrix.c b/drivers/ptp/ptp_clockmatrix.c
index cb258e1448d5..c9d451bf89e2 100644
--- a/drivers/ptp/ptp_clockmatrix.c
+++ b/drivers/ptp/ptp_clockmatrix.c
@@ -267,7 +267,7 @@ static int arm_tod_read_trig_sel_refclk(struct idtcm_channel *channel, u8 ref)
static bool is_single_shot(u8 mask)
{
/* Treat single bit ToD masks as continuous trigger */
- return mask <= 8 && is_power_of_2(mask);
+ return !(mask <= 8 && is_power_of_2(mask));
}
static int idtcm_extts_enable(struct idtcm_channel *channel,
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index 21e3b05a5153..904de8d61828 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -572,6 +572,17 @@ config PWM_SUN4I
To compile this driver as a module, choose M here: the module
will be called pwm-sun4i.
+config PWM_SUNPLUS
+ tristate "Sunplus PWM support"
+ depends on ARCH_SUNPLUS || COMPILE_TEST
+ depends on HAS_IOMEM && OF
+ help
+ Generic PWM framework driver for the PWM controller on
+ Sunplus SoCs.
+
+ To compile this driver as a module, choose M here: the module
+ will be called pwm-sunplus.
+
config PWM_TEGRA
tristate "NVIDIA Tegra PWM support"
depends on ARCH_TEGRA || COMPILE_TEST
@@ -640,4 +651,18 @@ config PWM_VT8500
To compile this driver as a module, choose M here: the module
will be called pwm-vt8500.
+config PWM_XILINX
+ tristate "Xilinx AXI Timer PWM support"
+ depends on OF_ADDRESS
+ depends on COMMON_CLK
+ select REGMAP_MMIO
+ help
+ PWM driver for Xilinx LogiCORE IP AXI timers. This timer is
+ typically a soft core which may be present in Xilinx FPGAs.
+ This device may also be present in Microblaze soft processors.
+ If you don't have this IP in your design, choose N.
+
+ To compile this driver as a module, choose M here: the module
+ will be called pwm-xilinx.
+
endif
diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile
index 708840b7fba8..5c08bdb817b4 100644
--- a/drivers/pwm/Makefile
+++ b/drivers/pwm/Makefile
@@ -53,6 +53,7 @@ obj-$(CONFIG_PWM_STM32) += pwm-stm32.o
obj-$(CONFIG_PWM_STM32_LP) += pwm-stm32-lp.o
obj-$(CONFIG_PWM_STMPE) += pwm-stmpe.o
obj-$(CONFIG_PWM_SUN4I) += pwm-sun4i.o
+obj-$(CONFIG_PWM_SUNPLUS) += pwm-sunplus.o
obj-$(CONFIG_PWM_TEGRA) += pwm-tegra.o
obj-$(CONFIG_PWM_TIECAP) += pwm-tiecap.o
obj-$(CONFIG_PWM_TIEHRPWM) += pwm-tiehrpwm.o
@@ -60,3 +61,4 @@ obj-$(CONFIG_PWM_TWL) += pwm-twl.o
obj-$(CONFIG_PWM_TWL_LED) += pwm-twl-led.o
obj-$(CONFIG_PWM_VISCONTI) += pwm-visconti.o
obj-$(CONFIG_PWM_VT8500) += pwm-vt8500.o
+obj-$(CONFIG_PWM_XILINX) += pwm-xilinx.o
diff --git a/drivers/pwm/pwm-atmel-tcb.c b/drivers/pwm/pwm-atmel-tcb.c
index 36f7ea381838..3977a0f9d132 100644
--- a/drivers/pwm/pwm-atmel-tcb.c
+++ b/drivers/pwm/pwm-atmel-tcb.c
@@ -61,7 +61,7 @@ struct atmel_tcb_pwm_chip {
struct atmel_tcb_channel bkup;
};
-const u8 atmel_tcb_divisors[] = { 2, 8, 32, 128, 0, };
+static const u8 atmel_tcb_divisors[] = { 2, 8, 32, 128, 0, };
static inline struct atmel_tcb_pwm_chip *to_tcb_chip(struct pwm_chip *chip)
{
@@ -72,7 +72,8 @@ static int atmel_tcb_pwm_set_polarity(struct pwm_chip *chip,
struct pwm_device *pwm,
enum pwm_polarity polarity)
{
- struct atmel_tcb_pwm_device *tcbpwm = pwm_get_chip_data(pwm);
+ struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip);
+ struct atmel_tcb_pwm_device *tcbpwm = tcbpwmc->pwms[pwm->hwpwm];
tcbpwm->polarity = polarity;
@@ -97,7 +98,6 @@ static int atmel_tcb_pwm_request(struct pwm_chip *chip,
return ret;
}
- pwm_set_chip_data(pwm, tcbpwm);
tcbpwm->polarity = PWM_POLARITY_NORMAL;
tcbpwm->duty = 0;
tcbpwm->period = 0;
@@ -139,7 +139,7 @@ static int atmel_tcb_pwm_request(struct pwm_chip *chip,
static void atmel_tcb_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip);
- struct atmel_tcb_pwm_device *tcbpwm = pwm_get_chip_data(pwm);
+ struct atmel_tcb_pwm_device *tcbpwm = tcbpwmc->pwms[pwm->hwpwm];
clk_disable_unprepare(tcbpwmc->clk);
tcbpwmc->pwms[pwm->hwpwm] = NULL;
@@ -149,7 +149,7 @@ static void atmel_tcb_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
static void atmel_tcb_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip);
- struct atmel_tcb_pwm_device *tcbpwm = pwm_get_chip_data(pwm);
+ struct atmel_tcb_pwm_device *tcbpwm = tcbpwmc->pwms[pwm->hwpwm];
unsigned cmr;
enum pwm_polarity polarity = tcbpwm->polarity;
@@ -206,7 +206,7 @@ static void atmel_tcb_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
static int atmel_tcb_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip);
- struct atmel_tcb_pwm_device *tcbpwm = pwm_get_chip_data(pwm);
+ struct atmel_tcb_pwm_device *tcbpwm = tcbpwmc->pwms[pwm->hwpwm];
u32 cmr;
enum pwm_polarity polarity = tcbpwm->polarity;
@@ -291,7 +291,7 @@ static int atmel_tcb_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
int duty_ns, int period_ns)
{
struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip);
- struct atmel_tcb_pwm_device *tcbpwm = pwm_get_chip_data(pwm);
+ struct atmel_tcb_pwm_device *tcbpwm = tcbpwmc->pwms[pwm->hwpwm];
struct atmel_tcb_pwm_device *atcbpwm = NULL;
int i = 0;
int slowclk = 0;
diff --git a/drivers/pwm/pwm-clps711x.c b/drivers/pwm/pwm-clps711x.c
index d7ad88685830..b0d91142da8d 100644
--- a/drivers/pwm/pwm-clps711x.c
+++ b/drivers/pwm/pwm-clps711x.c
@@ -23,29 +23,6 @@ static inline struct clps711x_chip *to_clps711x_chip(struct pwm_chip *chip)
return container_of(chip, struct clps711x_chip, chip);
}
-static void clps711x_pwm_update_val(struct clps711x_chip *priv, u32 n, u32 v)
-{
- /* PWM0 - bits 4..7, PWM1 - bits 8..11 */
- u32 shift = (n + 1) * 4;
- unsigned long flags;
- u32 tmp;
-
- spin_lock_irqsave(&priv->lock, flags);
-
- tmp = readl(priv->pmpcon);
- tmp &= ~(0xf << shift);
- tmp |= v << shift;
- writel(tmp, priv->pmpcon);
-
- spin_unlock_irqrestore(&priv->lock, flags);
-}
-
-static unsigned int clps711x_get_duty(struct pwm_device *pwm, unsigned int v)
-{
- /* Duty cycle 0..15 max */
- return DIV64_U64_ROUND_CLOSEST(v * 0xf, pwm->args.period);
-}
-
static int clps711x_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct clps711x_chip *priv = to_clps711x_chip(chip);
@@ -60,44 +37,41 @@ static int clps711x_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
return 0;
}
-static int clps711x_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
- int duty_ns, int period_ns)
+static int clps711x_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_state *state)
{
struct clps711x_chip *priv = to_clps711x_chip(chip);
- unsigned int duty;
+ /* PWM0 - bits 4..7, PWM1 - bits 8..11 */
+ u32 shift = (pwm->hwpwm + 1) * 4;
+ unsigned long flags;
+ u32 pmpcon, val;
- if (period_ns != pwm->args.period)
+ if (state->polarity != PWM_POLARITY_NORMAL)
return -EINVAL;
- duty = clps711x_get_duty(pwm, duty_ns);
- clps711x_pwm_update_val(priv, pwm->hwpwm, duty);
-
- return 0;
-}
+ if (state->period != pwm->args.period)
+ return -EINVAL;
-static int clps711x_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
-{
- struct clps711x_chip *priv = to_clps711x_chip(chip);
- unsigned int duty;
+ if (state->enabled)
+ val = mul_u64_u64_div_u64(state->duty_cycle, 0xf, state->period);
+ else
+ val = 0;
- duty = clps711x_get_duty(pwm, pwm_get_duty_cycle(pwm));
- clps711x_pwm_update_val(priv, pwm->hwpwm, duty);
+ spin_lock_irqsave(&priv->lock, flags);
- return 0;
-}
+ pmpcon = readl(priv->pmpcon);
+ pmpcon &= ~(0xf << shift);
+ pmpcon |= val << shift;
+ writel(pmpcon, priv->pmpcon);
-static void clps711x_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
-{
- struct clps711x_chip *priv = to_clps711x_chip(chip);
+ spin_unlock_irqrestore(&priv->lock, flags);
- clps711x_pwm_update_val(priv, pwm->hwpwm, 0);
+ return 0;
}
static const struct pwm_ops clps711x_pwm_ops = {
.request = clps711x_pwm_request,
- .config = clps711x_pwm_config,
- .enable = clps711x_pwm_enable,
- .disable = clps711x_pwm_disable,
+ .apply = clps711x_pwm_apply,
.owner = THIS_MODULE,
};
diff --git a/drivers/pwm/pwm-cros-ec.c b/drivers/pwm/pwm-cros-ec.c
index 5e29d9c682c3..7f10f56c3eb6 100644
--- a/drivers/pwm/pwm-cros-ec.c
+++ b/drivers/pwm/pwm-cros-ec.c
@@ -12,17 +12,21 @@
#include <linux/pwm.h>
#include <linux/slab.h>
+#include <dt-bindings/mfd/cros_ec.h>
+
/**
* struct cros_ec_pwm_device - Driver data for EC PWM
*
* @dev: Device node
* @ec: Pointer to EC device
* @chip: PWM controller chip
+ * @use_pwm_type: Use PWM types instead of generic channels
*/
struct cros_ec_pwm_device {
struct device *dev;
struct cros_ec_device *ec;
struct pwm_chip chip;
+ bool use_pwm_type;
};
/**
@@ -58,14 +62,31 @@ static void cros_ec_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
kfree(channel);
}
-static int cros_ec_pwm_set_duty(struct cros_ec_device *ec, u8 index, u16 duty)
+static int cros_ec_dt_type_to_pwm_type(u8 dt_index, u8 *pwm_type)
{
+ switch (dt_index) {
+ case CROS_EC_PWM_DT_KB_LIGHT:
+ *pwm_type = EC_PWM_TYPE_KB_LIGHT;
+ return 0;
+ case CROS_EC_PWM_DT_DISPLAY_LIGHT:
+ *pwm_type = EC_PWM_TYPE_DISPLAY_LIGHT;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int cros_ec_pwm_set_duty(struct cros_ec_pwm_device *ec_pwm, u8 index,
+ u16 duty)
+{
+ struct cros_ec_device *ec = ec_pwm->ec;
struct {
struct cros_ec_command msg;
struct ec_params_pwm_set_duty params;
} __packed buf;
struct ec_params_pwm_set_duty *params = &buf.params;
struct cros_ec_command *msg = &buf.msg;
+ int ret;
memset(&buf, 0, sizeof(buf));
@@ -75,14 +96,25 @@ static int cros_ec_pwm_set_duty(struct cros_ec_device *ec, u8 index, u16 duty)
msg->outsize = sizeof(*params);
params->duty = duty;
- params->pwm_type = EC_PWM_TYPE_GENERIC;
- params->index = index;
+
+ if (ec_pwm->use_pwm_type) {
+ ret = cros_ec_dt_type_to_pwm_type(index, &params->pwm_type);
+ if (ret) {
+ dev_err(ec->dev, "Invalid PWM type index: %d\n", index);
+ return ret;
+ }
+ params->index = 0;
+ } else {
+ params->pwm_type = EC_PWM_TYPE_GENERIC;
+ params->index = index;
+ }
return cros_ec_cmd_xfer_status(ec, msg);
}
-static int cros_ec_pwm_get_duty(struct cros_ec_device *ec, u8 index)
+static int cros_ec_pwm_get_duty(struct cros_ec_pwm_device *ec_pwm, u8 index)
{
+ struct cros_ec_device *ec = ec_pwm->ec;
struct {
struct cros_ec_command msg;
union {
@@ -102,8 +134,17 @@ static int cros_ec_pwm_get_duty(struct cros_ec_device *ec, u8 index)
msg->insize = sizeof(*resp);
msg->outsize = sizeof(*params);
- params->pwm_type = EC_PWM_TYPE_GENERIC;
- params->index = index;
+ if (ec_pwm->use_pwm_type) {
+ ret = cros_ec_dt_type_to_pwm_type(index, &params->pwm_type);
+ if (ret) {
+ dev_err(ec->dev, "Invalid PWM type index: %d\n", index);
+ return ret;
+ }
+ params->index = 0;
+ } else {
+ params->pwm_type = EC_PWM_TYPE_GENERIC;
+ params->index = index;
+ }
ret = cros_ec_cmd_xfer_status(ec, msg);
if (ret < 0)
@@ -133,7 +174,7 @@ static int cros_ec_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
*/
duty_cycle = state->enabled ? state->duty_cycle : 0;
- ret = cros_ec_pwm_set_duty(ec_pwm->ec, pwm->hwpwm, duty_cycle);
+ ret = cros_ec_pwm_set_duty(ec_pwm, pwm->hwpwm, duty_cycle);
if (ret < 0)
return ret;
@@ -149,7 +190,7 @@ static void cros_ec_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
struct cros_ec_pwm *channel = pwm_get_chip_data(pwm);
int ret;
- ret = cros_ec_pwm_get_duty(ec_pwm->ec, pwm->hwpwm);
+ ret = cros_ec_pwm_get_duty(ec_pwm, pwm->hwpwm);
if (ret < 0) {
dev_err(chip->dev, "error getting initial duty: %d\n", ret);
return;
@@ -204,13 +245,13 @@ static const struct pwm_ops cros_ec_pwm_ops = {
* of PWMs it supports directly, so we have to read the pwm duty cycle for
* subsequent channels until we get an error.
*/
-static int cros_ec_num_pwms(struct cros_ec_device *ec)
+static int cros_ec_num_pwms(struct cros_ec_pwm_device *ec_pwm)
{
int i, ret;
/* The index field is only 8 bits */
for (i = 0; i <= U8_MAX; i++) {
- ret = cros_ec_pwm_get_duty(ec, i);
+ ret = cros_ec_pwm_get_duty(ec_pwm, i);
/*
* We look for SUCCESS, INVALID_COMMAND, or INVALID_PARAM
* responses; everything else is treated as an error.
@@ -236,6 +277,7 @@ static int cros_ec_pwm_probe(struct platform_device *pdev)
{
struct cros_ec_device *ec = dev_get_drvdata(pdev->dev.parent);
struct device *dev = &pdev->dev;
+ struct device_node *np = pdev->dev.of_node;
struct cros_ec_pwm_device *ec_pwm;
struct pwm_chip *chip;
int ret;
@@ -251,17 +293,26 @@ static int cros_ec_pwm_probe(struct platform_device *pdev)
chip = &ec_pwm->chip;
ec_pwm->ec = ec;
+ if (of_device_is_compatible(np, "google,cros-ec-pwm-type"))
+ ec_pwm->use_pwm_type = true;
+
/* PWM chip */
chip->dev = dev;
chip->ops = &cros_ec_pwm_ops;
chip->of_xlate = cros_ec_pwm_xlate;
chip->of_pwm_n_cells = 1;
- ret = cros_ec_num_pwms(ec);
- if (ret < 0) {
- dev_err(dev, "Couldn't find PWMs: %d\n", ret);
- return ret;
+
+ if (ec_pwm->use_pwm_type) {
+ chip->npwm = CROS_EC_PWM_DT_COUNT;
+ } else {
+ ret = cros_ec_num_pwms(ec_pwm);
+ if (ret < 0) {
+ dev_err(dev, "Couldn't find PWMs: %d\n", ret);
+ return ret;
+ }
+ chip->npwm = ret;
}
- chip->npwm = ret;
+
dev_dbg(dev, "Probed %u PWMs\n", chip->npwm);
ret = pwmchip_add(chip);
@@ -288,6 +339,7 @@ static int cros_ec_pwm_remove(struct platform_device *dev)
#ifdef CONFIG_OF
static const struct of_device_id cros_ec_pwm_of_match[] = {
{ .compatible = "google,cros-ec-pwm" },
+ { .compatible = "google,cros-ec-pwm-type" },
{},
};
MODULE_DEVICE_TABLE(of, cros_ec_pwm_of_match);
diff --git a/drivers/pwm/pwm-lp3943.c b/drivers/pwm/pwm-lp3943.c
index ea17d446a627..215ef9069114 100644
--- a/drivers/pwm/pwm-lp3943.c
+++ b/drivers/pwm/pwm-lp3943.c
@@ -93,7 +93,7 @@ static void lp3943_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
}
static int lp3943_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
- int duty_ns, int period_ns)
+ u64 duty_ns, u64 period_ns)
{
struct lp3943_pwm *lp3943_pwm = to_lp3943_pwm(chip);
struct lp3943 *lp3943 = lp3943_pwm->lp3943;
@@ -118,14 +118,20 @@ static int lp3943_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
reg_duty = LP3943_REG_PWM1;
}
- period_ns = clamp(period_ns, LP3943_MIN_PERIOD, LP3943_MAX_PERIOD);
- val = (u8)(period_ns / LP3943_MIN_PERIOD - 1);
+ /*
+ * Note that after this clamping, period_ns fits into an int. This is
+ * helpful because we can resort to integer division below instead of
+ * the (more expensive) 64 bit division.
+ */
+ period_ns = clamp(period_ns, (u64)LP3943_MIN_PERIOD, (u64)LP3943_MAX_PERIOD);
+ val = (u8)((int)period_ns / LP3943_MIN_PERIOD - 1);
err = lp3943_write_byte(lp3943, reg_prescale, val);
if (err)
return err;
- val = (u8)(duty_ns * LP3943_MAX_DUTY / period_ns);
+ duty_ns = min(duty_ns, period_ns);
+ val = (u8)((int)duty_ns * LP3943_MAX_DUTY / (int)period_ns);
return lp3943_write_byte(lp3943, reg_duty, val);
}
@@ -182,12 +188,34 @@ static void lp3943_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
lp3943_pwm_set_mode(lp3943_pwm, pwm_map, LP3943_GPIO_OUT_HIGH);
}
+static int lp3943_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_state *state)
+{
+ int err;
+
+ if (state->polarity != PWM_POLARITY_NORMAL)
+ return -EINVAL;
+
+ if (!state->enabled) {
+ if (pwm->state.enabled)
+ lp3943_pwm_disable(chip, pwm);
+ return 0;
+ }
+
+ err = lp3943_pwm_config(chip, pwm, state->duty_cycle, state->period);
+ if (err)
+ return err;
+
+ if (!pwm->state.enabled)
+ err = lp3943_pwm_enable(chip, pwm);
+
+ return err;
+}
+
static const struct pwm_ops lp3943_pwm_ops = {
.request = lp3943_pwm_request,
.free = lp3943_pwm_free,
- .config = lp3943_pwm_config,
- .enable = lp3943_pwm_enable,
- .disable = lp3943_pwm_disable,
+ .apply = lp3943_pwm_apply,
.owner = THIS_MODULE,
};
diff --git a/drivers/pwm/pwm-lpc18xx-sct.c b/drivers/pwm/pwm-lpc18xx-sct.c
index b909096dba2f..272e0b5d01b8 100644
--- a/drivers/pwm/pwm-lpc18xx-sct.c
+++ b/drivers/pwm/pwm-lpc18xx-sct.c
@@ -226,14 +226,7 @@ static int lpc18xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
return 0;
}
-static int lpc18xx_pwm_set_polarity(struct pwm_chip *chip,
- struct pwm_device *pwm,
- enum pwm_polarity polarity)
-{
- return 0;
-}
-
-static int lpc18xx_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+static int lpc18xx_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm, enum pwm_polarity polarity)
{
struct lpc18xx_pwm_chip *lpc18xx_pwm = to_lpc18xx_pwm_chip(chip);
struct lpc18xx_pwm_data *lpc18xx_data = &lpc18xx_pwm->channeldata[pwm->hwpwm];
@@ -249,7 +242,7 @@ static int lpc18xx_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
LPC18XX_PWM_EVSTATEMSK(lpc18xx_data->duty_event),
LPC18XX_PWM_EVSTATEMSK_ALL);
- if (pwm_get_polarity(pwm) == PWM_POLARITY_NORMAL) {
+ if (polarity == PWM_POLARITY_NORMAL) {
set_event = lpc18xx_pwm->period_event;
clear_event = lpc18xx_data->duty_event;
res_action = LPC18XX_PWM_RES_SET;
@@ -308,11 +301,35 @@ static void lpc18xx_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
clear_bit(lpc18xx_data->duty_event, &lpc18xx_pwm->event_map);
}
+static int lpc18xx_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_state *state)
+{
+ int err;
+ bool enabled = pwm->state.enabled;
+
+ if (state->polarity != pwm->state.polarity && pwm->state.enabled) {
+ lpc18xx_pwm_disable(chip, pwm);
+ enabled = false;
+ }
+
+ if (!state->enabled) {
+ if (enabled)
+ lpc18xx_pwm_disable(chip, pwm);
+
+ return 0;
+ }
+
+ err = lpc18xx_pwm_config(pwm->chip, pwm, state->duty_cycle, state->period);
+ if (err)
+ return err;
+
+ if (!enabled)
+ err = lpc18xx_pwm_enable(chip, pwm, state->polarity);
+
+ return err;
+}
static const struct pwm_ops lpc18xx_pwm_ops = {
- .config = lpc18xx_pwm_config,
- .set_polarity = lpc18xx_pwm_set_polarity,
- .enable = lpc18xx_pwm_enable,
- .disable = lpc18xx_pwm_disable,
+ .apply = lpc18xx_pwm_apply,
.request = lpc18xx_pwm_request,
.free = lpc18xx_pwm_free,
.owner = THIS_MODULE,
diff --git a/drivers/pwm/pwm-lpc32xx.c b/drivers/pwm/pwm-lpc32xx.c
index ddeab5687cb8..86a0ea0f6955 100644
--- a/drivers/pwm/pwm-lpc32xx.c
+++ b/drivers/pwm/pwm-lpc32xx.c
@@ -88,10 +88,33 @@ static void lpc32xx_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
clk_disable_unprepare(lpc32xx->clk);
}
+static int lpc32xx_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_state *state)
+{
+ int err;
+
+ if (state->polarity != PWM_POLARITY_NORMAL)
+ return -EINVAL;
+
+ if (!state->enabled) {
+ if (pwm->state.enabled)
+ lpc32xx_pwm_disable(chip, pwm);
+
+ return 0;
+ }
+
+ err = lpc32xx_pwm_config(pwm->chip, pwm, state->duty_cycle, state->period);
+ if (err)
+ return err;
+
+ if (!pwm->state.enabled)
+ err = lpc32xx_pwm_enable(chip, pwm);
+
+ return err;
+}
+
static const struct pwm_ops lpc32xx_pwm_ops = {
- .config = lpc32xx_pwm_config,
- .enable = lpc32xx_pwm_enable,
- .disable = lpc32xx_pwm_disable,
+ .apply = lpc32xx_pwm_apply,
.owner = THIS_MODULE,
};
diff --git a/drivers/pwm/pwm-mediatek.c b/drivers/pwm/pwm-mediatek.c
index 568b13a48717..d28c0874c7f2 100644
--- a/drivers/pwm/pwm-mediatek.c
+++ b/drivers/pwm/pwm-mediatek.c
@@ -198,10 +198,33 @@ static void pwm_mediatek_disable(struct pwm_chip *chip, struct pwm_device *pwm)
pwm_mediatek_clk_disable(chip, pwm);
}
+static int pwm_mediatek_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_state *state)
+{
+ int err;
+
+ if (state->polarity != PWM_POLARITY_NORMAL)
+ return -EINVAL;
+
+ if (!state->enabled) {
+ if (pwm->state.enabled)
+ pwm_mediatek_disable(chip, pwm);
+
+ return 0;
+ }
+
+ err = pwm_mediatek_config(pwm->chip, pwm, state->duty_cycle, state->period);
+ if (err)
+ return err;
+
+ if (!pwm->state.enabled)
+ err = pwm_mediatek_enable(chip, pwm);
+
+ return err;
+}
+
static const struct pwm_ops pwm_mediatek_ops = {
- .config = pwm_mediatek_config,
- .enable = pwm_mediatek_enable,
- .disable = pwm_mediatek_disable,
+ .apply = pwm_mediatek_apply,
.owner = THIS_MODULE,
};
@@ -264,6 +287,12 @@ static const struct pwm_mediatek_of_data mt2712_pwm_data = {
.has_ck_26m_sel = false,
};
+static const struct pwm_mediatek_of_data mt6795_pwm_data = {
+ .num_pwms = 7,
+ .pwm45_fixup = false,
+ .has_ck_26m_sel = false,
+};
+
static const struct pwm_mediatek_of_data mt7622_pwm_data = {
.num_pwms = 6,
.pwm45_fixup = false,
@@ -302,6 +331,7 @@ static const struct pwm_mediatek_of_data mt8516_pwm_data = {
static const struct of_device_id pwm_mediatek_of_match[] = {
{ .compatible = "mediatek,mt2712-pwm", .data = &mt2712_pwm_data },
+ { .compatible = "mediatek,mt6795-pwm", .data = &mt6795_pwm_data },
{ .compatible = "mediatek,mt7622-pwm", .data = &mt7622_pwm_data },
{ .compatible = "mediatek,mt7623-pwm", .data = &mt7623_pwm_data },
{ .compatible = "mediatek,mt7628-pwm", .data = &mt7628_pwm_data },
diff --git a/drivers/pwm/pwm-raspberrypi-poe.c b/drivers/pwm/pwm-raspberrypi-poe.c
index e52e29fc8231..6ff73029f367 100644
--- a/drivers/pwm/pwm-raspberrypi-poe.c
+++ b/drivers/pwm/pwm-raspberrypi-poe.c
@@ -66,7 +66,7 @@ static int raspberrypi_pwm_get_property(struct rpi_firmware *firmware,
u32 reg, u32 *val)
{
struct raspberrypi_pwm_prop msg = {
- .reg = reg
+ .reg = cpu_to_le32(reg),
};
int ret;
diff --git a/drivers/pwm/pwm-renesas-tpu.c b/drivers/pwm/pwm-renesas-tpu.c
index 4381df90a527..d7311614c846 100644
--- a/drivers/pwm/pwm-renesas-tpu.c
+++ b/drivers/pwm/pwm-renesas-tpu.c
@@ -89,71 +89,71 @@ struct tpu_device {
#define to_tpu_device(c) container_of(c, struct tpu_device, chip)
-static void tpu_pwm_write(struct tpu_pwm_device *pwm, int reg_nr, u16 value)
+static void tpu_pwm_write(struct tpu_pwm_device *tpd, int reg_nr, u16 value)
{
- void __iomem *base = pwm->tpu->base + TPU_CHANNEL_OFFSET
- + pwm->channel * TPU_CHANNEL_SIZE;
+ void __iomem *base = tpd->tpu->base + TPU_CHANNEL_OFFSET
+ + tpd->channel * TPU_CHANNEL_SIZE;
iowrite16(value, base + reg_nr);
}
-static void tpu_pwm_set_pin(struct tpu_pwm_device *pwm,
+static void tpu_pwm_set_pin(struct tpu_pwm_device *tpd,
enum tpu_pin_state state)
{
static const char * const states[] = { "inactive", "PWM", "active" };
- dev_dbg(&pwm->tpu->pdev->dev, "%u: configuring pin as %s\n",
- pwm->channel, states[state]);
+ dev_dbg(&tpd->tpu->pdev->dev, "%u: configuring pin as %s\n",
+ tpd->channel, states[state]);
switch (state) {
case TPU_PIN_INACTIVE:
- tpu_pwm_write(pwm, TPU_TIORn,
- pwm->polarity == PWM_POLARITY_INVERSED ?
+ tpu_pwm_write(tpd, TPU_TIORn,
+ tpd->polarity == PWM_POLARITY_INVERSED ?
TPU_TIOR_IOA_1 : TPU_TIOR_IOA_0);
break;
case TPU_PIN_PWM:
- tpu_pwm_write(pwm, TPU_TIORn,
- pwm->polarity == PWM_POLARITY_INVERSED ?
+ tpu_pwm_write(tpd, TPU_TIORn,
+ tpd->polarity == PWM_POLARITY_INVERSED ?
TPU_TIOR_IOA_0_SET : TPU_TIOR_IOA_1_CLR);
break;
case TPU_PIN_ACTIVE:
- tpu_pwm_write(pwm, TPU_TIORn,
- pwm->polarity == PWM_POLARITY_INVERSED ?
+ tpu_pwm_write(tpd, TPU_TIORn,
+ tpd->polarity == PWM_POLARITY_INVERSED ?
TPU_TIOR_IOA_0 : TPU_TIOR_IOA_1);
break;
}
}
-static void tpu_pwm_start_stop(struct tpu_pwm_device *pwm, int start)
+static void tpu_pwm_start_stop(struct tpu_pwm_device *tpd, int start)
{
unsigned long flags;
u16 value;
- spin_lock_irqsave(&pwm->tpu->lock, flags);
- value = ioread16(pwm->tpu->base + TPU_TSTR);
+ spin_lock_irqsave(&tpd->tpu->lock, flags);
+ value = ioread16(tpd->tpu->base + TPU_TSTR);
if (start)
- value |= 1 << pwm->channel;
+ value |= 1 << tpd->channel;
else
- value &= ~(1 << pwm->channel);
+ value &= ~(1 << tpd->channel);
- iowrite16(value, pwm->tpu->base + TPU_TSTR);
- spin_unlock_irqrestore(&pwm->tpu->lock, flags);
+ iowrite16(value, tpd->tpu->base + TPU_TSTR);
+ spin_unlock_irqrestore(&tpd->tpu->lock, flags);
}
-static int tpu_pwm_timer_start(struct tpu_pwm_device *pwm)
+static int tpu_pwm_timer_start(struct tpu_pwm_device *tpd)
{
int ret;
- if (!pwm->timer_on) {
+ if (!tpd->timer_on) {
/* Wake up device and enable clock. */
- pm_runtime_get_sync(&pwm->tpu->pdev->dev);
- ret = clk_prepare_enable(pwm->tpu->clk);
+ pm_runtime_get_sync(&tpd->tpu->pdev->dev);
+ ret = clk_prepare_enable(tpd->tpu->clk);
if (ret) {
- dev_err(&pwm->tpu->pdev->dev, "cannot enable clock\n");
+ dev_err(&tpd->tpu->pdev->dev, "cannot enable clock\n");
return ret;
}
- pwm->timer_on = true;
+ tpd->timer_on = true;
}
/*
@@ -161,8 +161,8 @@ static int tpu_pwm_timer_start(struct tpu_pwm_device *pwm)
* completely. First drive the pin to the inactive state to avoid
* glitches.
*/
- tpu_pwm_set_pin(pwm, TPU_PIN_INACTIVE);
- tpu_pwm_start_stop(pwm, false);
+ tpu_pwm_set_pin(tpd, TPU_PIN_INACTIVE);
+ tpu_pwm_start_stop(tpd, false);
/*
* - Clear TCNT on TGRB match
@@ -172,142 +172,168 @@ static int tpu_pwm_timer_start(struct tpu_pwm_device *pwm)
* - Output 1 until TGRA, output 0 until TGRB (active high polarity
* - PWM mode
*/
- tpu_pwm_write(pwm, TPU_TCRn, TPU_TCR_CCLR_TGRB | TPU_TCR_CKEG_RISING |
- pwm->prescaler);
- tpu_pwm_write(pwm, TPU_TMDRn, TPU_TMDR_MD_PWM);
- tpu_pwm_set_pin(pwm, TPU_PIN_PWM);
- tpu_pwm_write(pwm, TPU_TGRAn, pwm->duty);
- tpu_pwm_write(pwm, TPU_TGRBn, pwm->period);
+ tpu_pwm_write(tpd, TPU_TCRn, TPU_TCR_CCLR_TGRB | TPU_TCR_CKEG_RISING |
+ tpd->prescaler);
+ tpu_pwm_write(tpd, TPU_TMDRn, TPU_TMDR_MD_PWM);
+ tpu_pwm_set_pin(tpd, TPU_PIN_PWM);
+ tpu_pwm_write(tpd, TPU_TGRAn, tpd->duty);
+ tpu_pwm_write(tpd, TPU_TGRBn, tpd->period);
- dev_dbg(&pwm->tpu->pdev->dev, "%u: TGRA 0x%04x TGRB 0x%04x\n",
- pwm->channel, pwm->duty, pwm->period);
+ dev_dbg(&tpd->tpu->pdev->dev, "%u: TGRA 0x%04x TGRB 0x%04x\n",
+ tpd->channel, tpd->duty, tpd->period);
/* Start the channel. */
- tpu_pwm_start_stop(pwm, true);
+ tpu_pwm_start_stop(tpd, true);
return 0;
}
-static void tpu_pwm_timer_stop(struct tpu_pwm_device *pwm)
+static void tpu_pwm_timer_stop(struct tpu_pwm_device *tpd)
{
- if (!pwm->timer_on)
+ if (!tpd->timer_on)
return;
/* Disable channel. */
- tpu_pwm_start_stop(pwm, false);
+ tpu_pwm_start_stop(tpd, false);
/* Stop clock and mark device as idle. */
- clk_disable_unprepare(pwm->tpu->clk);
- pm_runtime_put(&pwm->tpu->pdev->dev);
+ clk_disable_unprepare(tpd->tpu->clk);
+ pm_runtime_put(&tpd->tpu->pdev->dev);
- pwm->timer_on = false;
+ tpd->timer_on = false;
}
/* -----------------------------------------------------------------------------
* PWM API
*/
-static int tpu_pwm_request(struct pwm_chip *chip, struct pwm_device *_pwm)
+static int tpu_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct tpu_device *tpu = to_tpu_device(chip);
- struct tpu_pwm_device *pwm;
+ struct tpu_pwm_device *tpd;
- if (_pwm->hwpwm >= TPU_CHANNEL_MAX)
+ if (pwm->hwpwm >= TPU_CHANNEL_MAX)
return -EINVAL;
- pwm = kzalloc(sizeof(*pwm), GFP_KERNEL);
- if (pwm == NULL)
+ tpd = kzalloc(sizeof(*tpd), GFP_KERNEL);
+ if (tpd == NULL)
return -ENOMEM;
- pwm->tpu = tpu;
- pwm->channel = _pwm->hwpwm;
- pwm->polarity = PWM_POLARITY_NORMAL;
- pwm->prescaler = 0;
- pwm->period = 0;
- pwm->duty = 0;
+ tpd->tpu = tpu;
+ tpd->channel = pwm->hwpwm;
+ tpd->polarity = PWM_POLARITY_NORMAL;
+ tpd->prescaler = 0;
+ tpd->period = 0;
+ tpd->duty = 0;
- pwm->timer_on = false;
+ tpd->timer_on = false;
- pwm_set_chip_data(_pwm, pwm);
+ pwm_set_chip_data(pwm, tpd);
return 0;
}
-static void tpu_pwm_free(struct pwm_chip *chip, struct pwm_device *_pwm)
+static void tpu_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
{
- struct tpu_pwm_device *pwm = pwm_get_chip_data(_pwm);
+ struct tpu_pwm_device *tpd = pwm_get_chip_data(pwm);
- tpu_pwm_timer_stop(pwm);
- kfree(pwm);
+ tpu_pwm_timer_stop(tpd);
+ kfree(tpd);
}
-static int tpu_pwm_config(struct pwm_chip *chip, struct pwm_device *_pwm,
- int duty_ns, int period_ns)
+static int tpu_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
+ u64 duty_ns, u64 period_ns, bool enabled)
{
- static const unsigned int prescalers[] = { 1, 4, 16, 64 };
- struct tpu_pwm_device *pwm = pwm_get_chip_data(_pwm);
+ struct tpu_pwm_device *tpd = pwm_get_chip_data(pwm);
struct tpu_device *tpu = to_tpu_device(chip);
unsigned int prescaler;
bool duty_only = false;
u32 clk_rate;
- u32 period;
+ u64 period;
u32 duty;
int ret;
+ clk_rate = clk_get_rate(tpu->clk);
+ if (unlikely(clk_rate > NSEC_PER_SEC)) {
+ /*
+ * This won't happen in the nearer future, so this is only a
+ * safeguard to prevent the following calculation from
+ * overflowing. With this clk_rate * period_ns / NSEC_PER_SEC is
+ * not greater than period_ns and so fits into an u64.
+ */
+ return -EINVAL;
+ }
+
+ period = mul_u64_u64_div_u64(clk_rate, period_ns, NSEC_PER_SEC);
+
/*
- * Pick a prescaler to avoid overflowing the counter.
- * TODO: Pick the highest acceptable prescaler.
+ * Find the minimal prescaler in [0..3] such that
+ *
+ * period >> (2 * prescaler) < 0x10000
+ *
+ * This could be calculated using something like:
+ *
+ * prescaler = max(ilog2(period) / 2, 7) - 7;
+ *
+ * but given there are only four allowed results and that ilog2 isn't
+ * cheap on all platforms using a switch statement is more effective.
*/
- clk_rate = clk_get_rate(tpu->clk);
+ switch (period) {
+ case 1 ... 0xffff:
+ prescaler = 0;
+ break;
- for (prescaler = 0; prescaler < ARRAY_SIZE(prescalers); ++prescaler) {
- period = clk_rate / prescalers[prescaler]
- / (NSEC_PER_SEC / period_ns);
- if (period <= 0xffff)
- break;
- }
+ case 0x10000 ... 0x3ffff:
+ prescaler = 1;
+ break;
- if (prescaler == ARRAY_SIZE(prescalers) || period == 0) {
- dev_err(&tpu->pdev->dev, "clock rate mismatch\n");
- return -ENOTSUPP;
+ case 0x40000 ... 0xfffff:
+ prescaler = 2;
+ break;
+
+ case 0x100000 ... 0x3fffff:
+ prescaler = 3;
+ break;
+
+ default:
+ return -EINVAL;
}
- if (duty_ns) {
- duty = clk_rate / prescalers[prescaler]
- / (NSEC_PER_SEC / duty_ns);
- if (duty > period)
- return -EINVAL;
- } else {
+ period >>= 2 * prescaler;
+
+ if (duty_ns)
+ duty = mul_u64_u64_div_u64(clk_rate, duty_ns,
+ (u64)NSEC_PER_SEC << (2 * prescaler));
+ else
duty = 0;
- }
dev_dbg(&tpu->pdev->dev,
"rate %u, prescaler %u, period %u, duty %u\n",
- clk_rate, prescalers[prescaler], period, duty);
+ clk_rate, 1 << (2 * prescaler), (u32)period, duty);
- if (pwm->prescaler == prescaler && pwm->period == period)
+ if (tpd->prescaler == prescaler && tpd->period == period)
duty_only = true;
- pwm->prescaler = prescaler;
- pwm->period = period;
- pwm->duty = duty;
+ tpd->prescaler = prescaler;
+ tpd->period = period;
+ tpd->duty = duty;
/* If the channel is disabled we're done. */
- if (!pwm_is_enabled(_pwm))
+ if (!enabled)
return 0;
- if (duty_only && pwm->timer_on) {
+ if (duty_only && tpd->timer_on) {
/*
* If only the duty cycle changed and the timer is already
* running, there's no need to reconfigure it completely, Just
* modify the duty cycle.
*/
- tpu_pwm_write(pwm, TPU_TGRAn, pwm->duty);
- dev_dbg(&tpu->pdev->dev, "%u: TGRA 0x%04x\n", pwm->channel,
- pwm->duty);
+ tpu_pwm_write(tpd, TPU_TGRAn, tpd->duty);
+ dev_dbg(&tpu->pdev->dev, "%u: TGRA 0x%04x\n", tpd->channel,
+ tpd->duty);
} else {
/* Otherwise perform a full reconfiguration. */
- ret = tpu_pwm_timer_start(pwm);
+ ret = tpu_pwm_timer_start(tpd);
if (ret < 0)
return ret;
}
@@ -317,29 +343,29 @@ static int tpu_pwm_config(struct pwm_chip *chip, struct pwm_device *_pwm,
* To avoid running the timer when not strictly required, handle
* 0% and 100% duty cycles as fixed levels and stop the timer.
*/
- tpu_pwm_set_pin(pwm, duty ? TPU_PIN_ACTIVE : TPU_PIN_INACTIVE);
- tpu_pwm_timer_stop(pwm);
+ tpu_pwm_set_pin(tpd, duty ? TPU_PIN_ACTIVE : TPU_PIN_INACTIVE);
+ tpu_pwm_timer_stop(tpd);
}
return 0;
}
-static int tpu_pwm_set_polarity(struct pwm_chip *chip, struct pwm_device *_pwm,
+static int tpu_pwm_set_polarity(struct pwm_chip *chip, struct pwm_device *pwm,
enum pwm_polarity polarity)
{
- struct tpu_pwm_device *pwm = pwm_get_chip_data(_pwm);
+ struct tpu_pwm_device *tpd = pwm_get_chip_data(pwm);
- pwm->polarity = polarity;
+ tpd->polarity = polarity;
return 0;
}
-static int tpu_pwm_enable(struct pwm_chip *chip, struct pwm_device *_pwm)
+static int tpu_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
{
- struct tpu_pwm_device *pwm = pwm_get_chip_data(_pwm);
+ struct tpu_pwm_device *tpd = pwm_get_chip_data(pwm);
int ret;
- ret = tpu_pwm_timer_start(pwm);
+ ret = tpu_pwm_timer_start(tpd);
if (ret < 0)
return ret;
@@ -347,32 +373,64 @@ static int tpu_pwm_enable(struct pwm_chip *chip, struct pwm_device *_pwm)
* To avoid running the timer when not strictly required, handle 0% and
* 100% duty cycles as fixed levels and stop the timer.
*/
- if (pwm->duty == 0 || pwm->duty == pwm->period) {
- tpu_pwm_set_pin(pwm, pwm->duty ?
+ if (tpd->duty == 0 || tpd->duty == tpd->period) {
+ tpu_pwm_set_pin(tpd, tpd->duty ?
TPU_PIN_ACTIVE : TPU_PIN_INACTIVE);
- tpu_pwm_timer_stop(pwm);
+ tpu_pwm_timer_stop(tpd);
}
return 0;
}
-static void tpu_pwm_disable(struct pwm_chip *chip, struct pwm_device *_pwm)
+static void tpu_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
{
- struct tpu_pwm_device *pwm = pwm_get_chip_data(_pwm);
+ struct tpu_pwm_device *tpd = pwm_get_chip_data(pwm);
/* The timer must be running to modify the pin output configuration. */
- tpu_pwm_timer_start(pwm);
- tpu_pwm_set_pin(pwm, TPU_PIN_INACTIVE);
- tpu_pwm_timer_stop(pwm);
+ tpu_pwm_timer_start(tpd);
+ tpu_pwm_set_pin(tpd, TPU_PIN_INACTIVE);
+ tpu_pwm_timer_stop(tpd);
+}
+
+static int tpu_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_state *state)
+{
+ int err;
+ bool enabled = pwm->state.enabled;
+
+ if (state->polarity != pwm->state.polarity) {
+ if (enabled) {
+ tpu_pwm_disable(chip, pwm);
+ enabled = false;
+ }
+
+ err = tpu_pwm_set_polarity(chip, pwm, state->polarity);
+ if (err)
+ return err;
+ }
+
+ if (!state->enabled) {
+ if (enabled)
+ tpu_pwm_disable(chip, pwm);
+
+ return 0;
+ }
+
+ err = tpu_pwm_config(pwm->chip, pwm,
+ state->duty_cycle, state->period, enabled);
+ if (err)
+ return err;
+
+ if (!enabled)
+ err = tpu_pwm_enable(chip, pwm);
+
+ return err;
}
static const struct pwm_ops tpu_pwm_ops = {
.request = tpu_pwm_request,
.free = tpu_pwm_free,
- .config = tpu_pwm_config,
- .set_polarity = tpu_pwm_set_polarity,
- .enable = tpu_pwm_enable,
- .disable = tpu_pwm_disable,
+ .apply = tpu_pwm_apply,
.owner = THIS_MODULE,
};
@@ -398,10 +456,8 @@ static int tpu_probe(struct platform_device *pdev)
return PTR_ERR(tpu->base);
tpu->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(tpu->clk)) {
- dev_err(&pdev->dev, "cannot get clock\n");
- return PTR_ERR(tpu->clk);
- }
+ if (IS_ERR(tpu->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(tpu->clk), "Failed to get clock\n");
/* Initialize and register the device. */
platform_set_drvdata(pdev, tpu);
@@ -410,25 +466,13 @@ static int tpu_probe(struct platform_device *pdev)
tpu->chip.ops = &tpu_pwm_ops;
tpu->chip.npwm = TPU_CHANNEL_MAX;
- pm_runtime_enable(&pdev->dev);
-
- ret = pwmchip_add(&tpu->chip);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to register PWM chip\n");
- pm_runtime_disable(&pdev->dev);
- return ret;
- }
-
- return 0;
-}
-
-static int tpu_remove(struct platform_device *pdev)
-{
- struct tpu_device *tpu = platform_get_drvdata(pdev);
-
- pwmchip_remove(&tpu->chip);
+ ret = devm_pm_runtime_enable(&pdev->dev);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret, "Failed to enable runtime PM\n");
- pm_runtime_disable(&pdev->dev);
+ ret = devm_pwmchip_add(&pdev->dev, &tpu->chip);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret, "Failed to register PWM chip\n");
return 0;
}
@@ -447,7 +491,6 @@ MODULE_DEVICE_TABLE(of, tpu_of_table);
static struct platform_driver tpu_driver = {
.probe = tpu_probe,
- .remove = tpu_remove,
.driver = {
.name = "renesas-tpu-pwm",
.of_match_table = of_match_ptr(tpu_of_table),
diff --git a/drivers/pwm/pwm-samsung.c b/drivers/pwm/pwm-samsung.c
index 0a4ff55fad04..9c5b4f515641 100644
--- a/drivers/pwm/pwm-samsung.c
+++ b/drivers/pwm/pwm-samsung.c
@@ -321,14 +321,6 @@ static int __pwm_samsung_config(struct pwm_chip *chip, struct pwm_device *pwm,
struct samsung_pwm_channel *chan = pwm_get_chip_data(pwm);
u32 tin_ns = chan->tin_ns, tcnt, tcmp, oldtcmp;
- /*
- * We currently avoid using 64bit arithmetic by using the
- * fact that anything faster than 1Hz is easily representable
- * by 32bits.
- */
- if (period_ns > NSEC_PER_SEC)
- return -ERANGE;
-
tcnt = readl(our_chip->base + REG_TCNTB(pwm->hwpwm));
oldtcmp = readl(our_chip->base + REG_TCMPB(pwm->hwpwm));
@@ -438,13 +430,51 @@ static int pwm_samsung_set_polarity(struct pwm_chip *chip,
return 0;
}
+static int pwm_samsung_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_state *state)
+{
+ int err, enabled = pwm->state.enabled;
+
+ if (state->polarity != pwm->state.polarity) {
+ if (enabled) {
+ pwm_samsung_disable(chip, pwm);
+ enabled = false;
+ }
+
+ err = pwm_samsung_set_polarity(chip, pwm, state->polarity);
+ if (err)
+ return err;
+ }
+
+ if (!state->enabled) {
+ if (enabled)
+ pwm_samsung_disable(chip, pwm);
+
+ return 0;
+ }
+
+ /*
+ * We currently avoid using 64bit arithmetic by using the
+ * fact that anything faster than 1Hz is easily representable
+ * by 32bits.
+ */
+ if (state->period > NSEC_PER_SEC)
+ return -ERANGE;
+
+ err = pwm_samsung_config(chip, pwm, state->duty_cycle, state->period);
+ if (err)
+ return err;
+
+ if (!pwm->state.enabled)
+ err = pwm_samsung_enable(chip, pwm);
+
+ return err;
+}
+
static const struct pwm_ops pwm_samsung_ops = {
.request = pwm_samsung_request,
.free = pwm_samsung_free,
- .enable = pwm_samsung_enable,
- .disable = pwm_samsung_disable,
- .config = pwm_samsung_config,
- .set_polarity = pwm_samsung_set_polarity,
+ .apply = pwm_samsung_apply,
.owner = THIS_MODULE,
};
diff --git a/drivers/pwm/pwm-sifive.c b/drivers/pwm/pwm-sifive.c
index 253c4a17d255..e6d05a329002 100644
--- a/drivers/pwm/pwm-sifive.c
+++ b/drivers/pwm/pwm-sifive.c
@@ -138,10 +138,9 @@ static int pwm_sifive_enable(struct pwm_chip *chip, bool enable)
dev_err(ddata->chip.dev, "Enable clk failed\n");
return ret;
}
- }
-
- if (!enable)
+ } else {
clk_disable(ddata->clk);
+ }
return 0;
}
diff --git a/drivers/pwm/pwm-sti.c b/drivers/pwm/pwm-sti.c
index f491d56254d7..44b1f93256b3 100644
--- a/drivers/pwm/pwm-sti.c
+++ b/drivers/pwm/pwm-sti.c
@@ -391,11 +391,34 @@ out:
return ret;
}
+static int sti_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_state *state)
+{
+ int err;
+
+ if (state->polarity != PWM_POLARITY_NORMAL)
+ return -EINVAL;
+
+ if (!state->enabled) {
+ if (pwm->state.enabled)
+ sti_pwm_disable(chip, pwm);
+
+ return 0;
+ }
+
+ err = sti_pwm_config(pwm->chip, pwm, state->duty_cycle, state->period);
+ if (err)
+ return err;
+
+ if (!pwm->state.enabled)
+ err = sti_pwm_enable(chip, pwm);
+
+ return err;
+}
+
static const struct pwm_ops sti_pwm_ops = {
.capture = sti_pwm_capture,
- .config = sti_pwm_config,
- .enable = sti_pwm_enable,
- .disable = sti_pwm_disable,
+ .apply = sti_pwm_apply,
.free = sti_pwm_free,
.owner = THIS_MODULE,
};
diff --git a/drivers/pwm/pwm-stmpe.c b/drivers/pwm/pwm-stmpe.c
index c4336d3bace3..5d4a4762ce0c 100644
--- a/drivers/pwm/pwm-stmpe.c
+++ b/drivers/pwm/pwm-stmpe.c
@@ -259,10 +259,33 @@ static int stmpe_24xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
return 0;
}
+static int stmpe_24xx_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_state *state)
+{
+ int err;
+
+ if (state->polarity != PWM_POLARITY_NORMAL)
+ return -EINVAL;
+
+ if (!state->enabled) {
+ if (pwm->state.enabled)
+ stmpe_24xx_pwm_disable(chip, pwm);
+
+ return 0;
+ }
+
+ err = stmpe_24xx_pwm_config(pwm->chip, pwm, state->duty_cycle, state->period);
+ if (err)
+ return err;
+
+ if (!pwm->state.enabled)
+ err = stmpe_24xx_pwm_enable(chip, pwm);
+
+ return err;
+}
+
static const struct pwm_ops stmpe_24xx_pwm_ops = {
- .config = stmpe_24xx_pwm_config,
- .enable = stmpe_24xx_pwm_enable,
- .disable = stmpe_24xx_pwm_disable,
+ .apply = stmpe_24xx_pwm_apply,
.owner = THIS_MODULE,
};
diff --git a/drivers/pwm/pwm-sun4i.c b/drivers/pwm/pwm-sun4i.c
index 16d75f9aa36a..c8445b0a3339 100644
--- a/drivers/pwm/pwm-sun4i.c
+++ b/drivers/pwm/pwm-sun4i.c
@@ -89,7 +89,6 @@ struct sun4i_pwm_chip {
void __iomem *base;
spinlock_t ctrl_lock;
const struct sun4i_pwm_data *data;
- unsigned long next_period[2];
};
static inline struct sun4i_pwm_chip *to_sun4i_pwm_chip(struct pwm_chip *chip)
@@ -236,7 +235,6 @@ static int sun4i_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
u32 ctrl, duty = 0, period = 0, val;
int ret;
unsigned int delay_us, prescaler = 0;
- unsigned long now;
bool bypass;
pwm_get_state(pwm, &cstate);
@@ -284,8 +282,6 @@ static int sun4i_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
val = (duty & PWM_DTY_MASK) | PWM_PRD(period);
sun4i_pwm_writel(sun4i_pwm, val, PWM_CH_PRD(pwm->hwpwm));
- sun4i_pwm->next_period[pwm->hwpwm] = jiffies +
- nsecs_to_jiffies(cstate.period + 1000);
if (state->polarity != PWM_POLARITY_NORMAL)
ctrl &= ~BIT_CH(PWM_ACT_STATE, pwm->hwpwm);
@@ -305,15 +301,11 @@ static int sun4i_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
return 0;
/* We need a full period to elapse before disabling the channel. */
- now = jiffies;
- if (time_before(now, sun4i_pwm->next_period[pwm->hwpwm])) {
- delay_us = jiffies_to_usecs(sun4i_pwm->next_period[pwm->hwpwm] -
- now);
- if ((delay_us / 500) > MAX_UDELAY_MS)
- msleep(delay_us / 1000 + 1);
- else
- usleep_range(delay_us, delay_us * 2);
- }
+ delay_us = DIV_ROUND_UP_ULL(cstate.period, NSEC_PER_USEC);
+ if ((delay_us / 500) > MAX_UDELAY_MS)
+ msleep(delay_us / 1000 + 1);
+ else
+ usleep_range(delay_us, delay_us * 2);
spin_lock(&sun4i_pwm->ctrl_lock);
ctrl = sun4i_pwm_readl(sun4i_pwm, PWM_CTRL_REG);
diff --git a/drivers/pwm/pwm-sunplus.c b/drivers/pwm/pwm-sunplus.c
new file mode 100644
index 000000000000..e776fd16512d
--- /dev/null
+++ b/drivers/pwm/pwm-sunplus.c
@@ -0,0 +1,232 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PWM device driver for SUNPLUS SP7021 SoC
+ *
+ * Links:
+ * Reference Manual:
+ * https://sunplus-tibbo.atlassian.net/wiki/spaces/doc/overview
+ *
+ * Reference Manual(PWM module):
+ * https://sunplus.atlassian.net/wiki/spaces/doc/pages/461144198/12.+Pulse+Width+Modulation+PWM
+ *
+ * Limitations:
+ * - Only supports normal polarity.
+ * - It output low when PWM channel disabled.
+ * - When the parameters change, current running period will not be completed
+ * and run new settings immediately.
+ * - In .apply() PWM output need to write register FREQ and DUTY. When first write FREQ
+ * done and not yet write DUTY, it has short timing gap use new FREQ and old DUTY.
+ *
+ * Author: Hammer Hsieh <hammerh0314@gmail.com>
+ */
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+
+#define SP7021_PWM_MODE0 0x000
+#define SP7021_PWM_MODE0_PWMEN(ch) BIT(ch)
+#define SP7021_PWM_MODE0_BYPASS(ch) BIT(8 + (ch))
+#define SP7021_PWM_MODE1 0x004
+#define SP7021_PWM_MODE1_CNT_EN(ch) BIT(ch)
+#define SP7021_PWM_FREQ(ch) (0x008 + 4 * (ch))
+#define SP7021_PWM_FREQ_MAX GENMASK(15, 0)
+#define SP7021_PWM_DUTY(ch) (0x018 + 4 * (ch))
+#define SP7021_PWM_DUTY_DD_SEL(ch) FIELD_PREP(GENMASK(9, 8), ch)
+#define SP7021_PWM_DUTY_MAX GENMASK(7, 0)
+#define SP7021_PWM_DUTY_MASK SP7021_PWM_DUTY_MAX
+#define SP7021_PWM_FREQ_SCALER 256
+#define SP7021_PWM_NUM 4
+
+struct sunplus_pwm {
+ struct pwm_chip chip;
+ void __iomem *base;
+ struct clk *clk;
+};
+
+static inline struct sunplus_pwm *to_sunplus_pwm(struct pwm_chip *chip)
+{
+ return container_of(chip, struct sunplus_pwm, chip);
+}
+
+static int sunplus_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_state *state)
+{
+ struct sunplus_pwm *priv = to_sunplus_pwm(chip);
+ u32 dd_freq, duty, mode0, mode1;
+ u64 clk_rate;
+
+ if (state->polarity != pwm->state.polarity)
+ return -EINVAL;
+
+ if (!state->enabled) {
+ /* disable pwm channel output */
+ mode0 = readl(priv->base + SP7021_PWM_MODE0);
+ mode0 &= ~SP7021_PWM_MODE0_PWMEN(pwm->hwpwm);
+ writel(mode0, priv->base + SP7021_PWM_MODE0);
+ /* disable pwm channel clk source */
+ mode1 = readl(priv->base + SP7021_PWM_MODE1);
+ mode1 &= ~SP7021_PWM_MODE1_CNT_EN(pwm->hwpwm);
+ writel(mode1, priv->base + SP7021_PWM_MODE1);
+ return 0;
+ }
+
+ clk_rate = clk_get_rate(priv->clk);
+
+ /*
+ * The following calculations might overflow if clk is bigger
+ * than 256 GHz. In practise it's 202.5MHz, so this limitation
+ * is only theoretic.
+ */
+ if (clk_rate > (u64)SP7021_PWM_FREQ_SCALER * NSEC_PER_SEC)
+ return -EINVAL;
+
+ /*
+ * With clk_rate limited above we have dd_freq <= state->period,
+ * so this cannot overflow.
+ */
+ dd_freq = mul_u64_u64_div_u64(clk_rate, state->period, (u64)SP7021_PWM_FREQ_SCALER
+ * NSEC_PER_SEC);
+
+ if (dd_freq == 0)
+ return -EINVAL;
+
+ if (dd_freq > SP7021_PWM_FREQ_MAX)
+ dd_freq = SP7021_PWM_FREQ_MAX;
+
+ writel(dd_freq, priv->base + SP7021_PWM_FREQ(pwm->hwpwm));
+
+ /* cal and set pwm duty */
+ mode0 = readl(priv->base + SP7021_PWM_MODE0);
+ mode0 |= SP7021_PWM_MODE0_PWMEN(pwm->hwpwm);
+ mode1 = readl(priv->base + SP7021_PWM_MODE1);
+ mode1 |= SP7021_PWM_MODE1_CNT_EN(pwm->hwpwm);
+ if (state->duty_cycle == state->period) {
+ /* PWM channel output = high */
+ mode0 |= SP7021_PWM_MODE0_BYPASS(pwm->hwpwm);
+ duty = SP7021_PWM_DUTY_DD_SEL(pwm->hwpwm) | SP7021_PWM_DUTY_MAX;
+ } else {
+ mode0 &= ~SP7021_PWM_MODE0_BYPASS(pwm->hwpwm);
+ /*
+ * duty_ns <= period_ns 27 bits, clk_rate 28 bits, won't overflow.
+ */
+ duty = mul_u64_u64_div_u64(state->duty_cycle, clk_rate,
+ (u64)dd_freq * NSEC_PER_SEC);
+ duty = SP7021_PWM_DUTY_DD_SEL(pwm->hwpwm) | duty;
+ }
+ writel(duty, priv->base + SP7021_PWM_DUTY(pwm->hwpwm));
+ writel(mode1, priv->base + SP7021_PWM_MODE1);
+ writel(mode0, priv->base + SP7021_PWM_MODE0);
+
+ return 0;
+}
+
+static void sunplus_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+ struct pwm_state *state)
+{
+ struct sunplus_pwm *priv = to_sunplus_pwm(chip);
+ u32 mode0, dd_freq, duty;
+ u64 clk_rate;
+
+ mode0 = readl(priv->base + SP7021_PWM_MODE0);
+
+ if (mode0 & BIT(pwm->hwpwm)) {
+ clk_rate = clk_get_rate(priv->clk);
+ dd_freq = readl(priv->base + SP7021_PWM_FREQ(pwm->hwpwm));
+ duty = readl(priv->base + SP7021_PWM_DUTY(pwm->hwpwm));
+ duty = FIELD_GET(SP7021_PWM_DUTY_MASK, duty);
+ /*
+ * dd_freq 16 bits, SP7021_PWM_FREQ_SCALER 8 bits
+ * NSEC_PER_SEC 30 bits, won't overflow.
+ */
+ state->period = DIV64_U64_ROUND_UP((u64)dd_freq * (u64)SP7021_PWM_FREQ_SCALER
+ * NSEC_PER_SEC, clk_rate);
+ /*
+ * dd_freq 16 bits, duty 8 bits, NSEC_PER_SEC 30 bits, won't overflow.
+ */
+ state->duty_cycle = DIV64_U64_ROUND_UP((u64)dd_freq * (u64)duty * NSEC_PER_SEC,
+ clk_rate);
+ state->enabled = true;
+ } else {
+ state->enabled = false;
+ }
+
+ state->polarity = PWM_POLARITY_NORMAL;
+}
+
+static const struct pwm_ops sunplus_pwm_ops = {
+ .apply = sunplus_pwm_apply,
+ .get_state = sunplus_pwm_get_state,
+ .owner = THIS_MODULE,
+};
+
+static void sunplus_pwm_clk_release(void *data)
+{
+ struct clk *clk = data;
+
+ clk_disable_unprepare(clk);
+}
+
+static int sunplus_pwm_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct sunplus_pwm *priv;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ priv->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->clk))
+ return dev_err_probe(dev, PTR_ERR(priv->clk),
+ "get pwm clock failed\n");
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable clock: %d\n", ret);
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(dev, sunplus_pwm_clk_release, priv->clk);
+ if (ret < 0) {
+ dev_err(dev, "failed to release clock: %d\n", ret);
+ return ret;
+ }
+
+ priv->chip.dev = dev;
+ priv->chip.ops = &sunplus_pwm_ops;
+ priv->chip.npwm = SP7021_PWM_NUM;
+
+ ret = devm_pwmchip_add(dev, &priv->chip);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Cannot register sunplus PWM\n");
+
+ return 0;
+}
+
+static const struct of_device_id sunplus_pwm_of_match[] = {
+ { .compatible = "sunplus,sp7021-pwm", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sunplus_pwm_of_match);
+
+static struct platform_driver sunplus_pwm_driver = {
+ .probe = sunplus_pwm_probe,
+ .driver = {
+ .name = "sunplus-pwm",
+ .of_match_table = sunplus_pwm_of_match,
+ },
+};
+module_platform_driver(sunplus_pwm_driver);
+
+MODULE_DESCRIPTION("Sunplus SoC PWM Driver");
+MODULE_AUTHOR("Hammer Hsieh <hammerh0314@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pwm/pwm-tegra.c b/drivers/pwm/pwm-tegra.c
index e5a9ffef4a71..dad9978c9186 100644
--- a/drivers/pwm/pwm-tegra.c
+++ b/drivers/pwm/pwm-tegra.c
@@ -99,7 +99,7 @@ static int tegra_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
int duty_ns, int period_ns)
{
struct tegra_pwm_chip *pc = to_tegra_pwm_chip(chip);
- unsigned long long c = duty_ns, hz;
+ unsigned long long c = duty_ns;
unsigned long rate, required_clk_rate;
u32 val = 0;
int err;
@@ -156,11 +156,9 @@ static int tegra_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
pc->clk_rate = clk_get_rate(pc->clk);
}
- rate = pc->clk_rate >> PWM_DUTY_WIDTH;
-
/* Consider precision in PWM_SCALE_WIDTH rate calculation */
- hz = DIV_ROUND_CLOSEST_ULL(100ULL * NSEC_PER_SEC, period_ns);
- rate = DIV_ROUND_CLOSEST_ULL(100ULL * rate, hz);
+ rate = mul_u64_u64_div_u64(pc->clk_rate, period_ns,
+ (u64)NSEC_PER_SEC << PWM_DUTY_WIDTH);
/*
* Since the actual PWM divider is the register's frequency divider
@@ -169,6 +167,8 @@ static int tegra_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
*/
if (rate > 0)
rate--;
+ else
+ return -EINVAL;
/*
* Make sure that the rate will fit in the register's frequency
@@ -230,10 +230,34 @@ static void tegra_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
pm_runtime_put_sync(pc->dev);
}
+static int tegra_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_state *state)
+{
+ int err;
+ bool enabled = pwm->state.enabled;
+
+ if (state->polarity != PWM_POLARITY_NORMAL)
+ return -EINVAL;
+
+ if (!state->enabled) {
+ if (enabled)
+ tegra_pwm_disable(chip, pwm);
+
+ return 0;
+ }
+
+ err = tegra_pwm_config(pwm->chip, pwm, state->duty_cycle, state->period);
+ if (err)
+ return err;
+
+ if (!enabled)
+ err = tegra_pwm_enable(chip, pwm);
+
+ return err;
+}
+
static const struct pwm_ops tegra_pwm_ops = {
- .config = tegra_pwm_config,
- .enable = tegra_pwm_enable,
- .disable = tegra_pwm_disable,
+ .apply = tegra_pwm_apply,
.owner = THIS_MODULE,
};
diff --git a/drivers/pwm/pwm-twl-led.c b/drivers/pwm/pwm-twl-led.c
index 49d9f7a78012..ed0b63dd38f1 100644
--- a/drivers/pwm/pwm-twl-led.c
+++ b/drivers/pwm/pwm-twl-led.c
@@ -137,6 +137,45 @@ out:
mutex_unlock(&twl->mutex);
}
+static int twl4030_pwmled_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_state *state)
+{
+ int ret;
+
+ if (state->polarity != PWM_POLARITY_NORMAL)
+ return -EINVAL;
+
+ if (!state->enabled) {
+ if (pwm->state.enabled)
+ twl4030_pwmled_disable(chip, pwm);
+
+ return 0;
+ }
+
+ /*
+ * We cannot skip calling ->config even if state->period ==
+ * pwm->state.period && state->duty_cycle == pwm->state.duty_cycle
+ * because we might have exited early in the last call to
+ * pwm_apply_state because of !state->enabled and so the two values in
+ * pwm->state might not be configured in hardware.
+ */
+ ret = twl4030_pwmled_config(pwm->chip, pwm,
+ state->duty_cycle, state->period);
+ if (ret)
+ return ret;
+
+ if (!pwm->state.enabled)
+ ret = twl4030_pwmled_enable(chip, pwm);
+
+ return ret;
+}
+
+
+static const struct pwm_ops twl4030_pwmled_ops = {
+ .apply = twl4030_pwmled_apply,
+ .owner = THIS_MODULE,
+};
+
static int twl6030_pwmled_config(struct pwm_chip *chip, struct pwm_device *pwm,
int duty_ns, int period_ns)
{
@@ -206,6 +245,32 @@ out:
mutex_unlock(&twl->mutex);
}
+static int twl6030_pwmled_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_state *state)
+{
+ int err;
+
+ if (state->polarity != pwm->state.polarity)
+ return -EINVAL;
+
+ if (!state->enabled) {
+ if (pwm->state.enabled)
+ twl6030_pwmled_disable(chip, pwm);
+
+ return 0;
+ }
+
+ err = twl6030_pwmled_config(pwm->chip, pwm,
+ state->duty_cycle, state->period);
+ if (err)
+ return err;
+
+ if (!pwm->state.enabled)
+ err = twl6030_pwmled_enable(chip, pwm);
+
+ return err;
+}
+
static int twl6030_pwmled_request(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct twl_pwmled_chip *twl = to_twl(chip);
@@ -257,17 +322,8 @@ out:
mutex_unlock(&twl->mutex);
}
-static const struct pwm_ops twl4030_pwmled_ops = {
- .enable = twl4030_pwmled_enable,
- .disable = twl4030_pwmled_disable,
- .config = twl4030_pwmled_config,
- .owner = THIS_MODULE,
-};
-
static const struct pwm_ops twl6030_pwmled_ops = {
- .enable = twl6030_pwmled_enable,
- .disable = twl6030_pwmled_disable,
- .config = twl6030_pwmled_config,
+ .apply = twl6030_pwmled_apply,
.request = twl6030_pwmled_request,
.free = twl6030_pwmled_free,
.owner = THIS_MODULE,
diff --git a/drivers/pwm/pwm-xilinx.c b/drivers/pwm/pwm-xilinx.c
new file mode 100644
index 000000000000..4dab2b86c427
--- /dev/null
+++ b/drivers/pwm/pwm-xilinx.c
@@ -0,0 +1,321 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2021 Sean Anderson <sean.anderson@seco.com>
+ *
+ * Limitations:
+ * - When changing both duty cycle and period, we may end up with one cycle
+ * with the old duty cycle and the new period. This is because the counters
+ * may only be reloaded by first stopping them, or by letting them be
+ * automatically reloaded at the end of a cycle. If this automatic reload
+ * happens after we set TLR0 but before we set TLR1 then we will have a
+ * bad cycle. This could probably be fixed by reading TCR0 just before
+ * reprogramming, but I think it would add complexity for little gain.
+ * - Cannot produce 100% duty cycle by configuring the TLRs. This might be
+ * possible by stopping the counters at an appropriate point in the cycle,
+ * but this is not (yet) implemented.
+ * - Only produces "normal" output.
+ * - Always produces low output if disabled.
+ */
+
+#include <clocksource/timer-xilinx.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+#include <linux/regmap.h>
+
+/*
+ * The following functions are "common" to drivers for this device, and may be
+ * exported at a future date.
+ */
+u32 xilinx_timer_tlr_cycles(struct xilinx_timer_priv *priv, u32 tcsr,
+ u64 cycles)
+{
+ WARN_ON(cycles < 2 || cycles - 2 > priv->max);
+
+ if (tcsr & TCSR_UDT)
+ return cycles - 2;
+ return priv->max - cycles + 2;
+}
+
+unsigned int xilinx_timer_get_period(struct xilinx_timer_priv *priv,
+ u32 tlr, u32 tcsr)
+{
+ u64 cycles;
+
+ if (tcsr & TCSR_UDT)
+ cycles = tlr + 2;
+ else
+ cycles = (u64)priv->max - tlr + 2;
+
+ /* cycles has a max of 2^32 + 2, so we can't overflow */
+ return DIV64_U64_ROUND_UP(cycles * NSEC_PER_SEC,
+ clk_get_rate(priv->clk));
+}
+
+/*
+ * The idea here is to capture whether the PWM is actually running (e.g.
+ * because we or the bootloader set it up) and we need to be careful to ensure
+ * we don't cause a glitch. According to the data sheet, to enable the PWM we
+ * need to
+ *
+ * - Set both timers to generate mode (MDT=1)
+ * - Set both timers to PWM mode (PWMA=1)
+ * - Enable the generate out signals (GENT=1)
+ *
+ * In addition,
+ *
+ * - The timer must be running (ENT=1)
+ * - The timer must auto-reload TLR into TCR (ARHT=1)
+ * - We must not be in the process of loading TLR into TCR (LOAD=0)
+ * - Cascade mode must be disabled (CASC=0)
+ *
+ * If any of these differ from usual, then the PWM is either disabled, or is
+ * running in a mode that this driver does not support.
+ */
+#define TCSR_PWM_SET (TCSR_GENT | TCSR_ARHT | TCSR_ENT | TCSR_PWMA)
+#define TCSR_PWM_CLEAR (TCSR_MDT | TCSR_LOAD)
+#define TCSR_PWM_MASK (TCSR_PWM_SET | TCSR_PWM_CLEAR)
+
+struct xilinx_pwm_device {
+ struct pwm_chip chip;
+ struct xilinx_timer_priv priv;
+};
+
+static inline struct xilinx_timer_priv
+*xilinx_pwm_chip_to_priv(struct pwm_chip *chip)
+{
+ return &container_of(chip, struct xilinx_pwm_device, chip)->priv;
+}
+
+static bool xilinx_timer_pwm_enabled(u32 tcsr0, u32 tcsr1)
+{
+ return ((TCSR_PWM_MASK | TCSR_CASC) & tcsr0) == TCSR_PWM_SET &&
+ (TCSR_PWM_MASK & tcsr1) == TCSR_PWM_SET;
+}
+
+static int xilinx_pwm_apply(struct pwm_chip *chip, struct pwm_device *unused,
+ const struct pwm_state *state)
+{
+ struct xilinx_timer_priv *priv = xilinx_pwm_chip_to_priv(chip);
+ u32 tlr0, tlr1, tcsr0, tcsr1;
+ u64 period_cycles, duty_cycles;
+ unsigned long rate;
+
+ if (state->polarity != PWM_POLARITY_NORMAL)
+ return -EINVAL;
+
+ /*
+ * To be representable by TLR, cycles must be between 2 and
+ * priv->max + 2. To enforce this we can reduce the cycles, but we may
+ * not increase them. Caveat emptor: while this does result in more
+ * predictable rounding, it may also result in a completely different
+ * duty cycle (% high time) than what was requested.
+ */
+ rate = clk_get_rate(priv->clk);
+ /* Avoid overflow */
+ period_cycles = min_t(u64, state->period, U32_MAX * NSEC_PER_SEC);
+ period_cycles = mul_u64_u32_div(period_cycles, rate, NSEC_PER_SEC);
+ period_cycles = min_t(u64, period_cycles, priv->max + 2);
+ if (period_cycles < 2)
+ return -ERANGE;
+
+ /* Same thing for duty cycles */
+ duty_cycles = min_t(u64, state->duty_cycle, U32_MAX * NSEC_PER_SEC);
+ duty_cycles = mul_u64_u32_div(duty_cycles, rate, NSEC_PER_SEC);
+ duty_cycles = min_t(u64, duty_cycles, priv->max + 2);
+
+ /*
+ * If we specify 100% duty cycle, we will get 0% instead, so decrease
+ * the duty cycle count by one.
+ */
+ if (duty_cycles >= period_cycles)
+ duty_cycles = period_cycles - 1;
+
+ /* Round down to 0% duty cycle for unrepresentable duty cycles */
+ if (duty_cycles < 2)
+ duty_cycles = period_cycles;
+
+ regmap_read(priv->map, TCSR0, &tcsr0);
+ regmap_read(priv->map, TCSR1, &tcsr1);
+ tlr0 = xilinx_timer_tlr_cycles(priv, tcsr0, period_cycles);
+ tlr1 = xilinx_timer_tlr_cycles(priv, tcsr1, duty_cycles);
+ regmap_write(priv->map, TLR0, tlr0);
+ regmap_write(priv->map, TLR1, tlr1);
+
+ if (state->enabled) {
+ /*
+ * If the PWM is already running, then the counters will be
+ * reloaded at the end of the current cycle.
+ */
+ if (!xilinx_timer_pwm_enabled(tcsr0, tcsr1)) {
+ /* Load TLR into TCR */
+ regmap_write(priv->map, TCSR0, tcsr0 | TCSR_LOAD);
+ regmap_write(priv->map, TCSR1, tcsr1 | TCSR_LOAD);
+ /* Enable timers all at once with ENALL */
+ tcsr0 = (TCSR_PWM_SET & ~TCSR_ENT) | (tcsr0 & TCSR_UDT);
+ tcsr1 = TCSR_PWM_SET | TCSR_ENALL | (tcsr1 & TCSR_UDT);
+ regmap_write(priv->map, TCSR0, tcsr0);
+ regmap_write(priv->map, TCSR1, tcsr1);
+ }
+ } else {
+ regmap_write(priv->map, TCSR0, 0);
+ regmap_write(priv->map, TCSR1, 0);
+ }
+
+ return 0;
+}
+
+static void xilinx_pwm_get_state(struct pwm_chip *chip,
+ struct pwm_device *unused,
+ struct pwm_state *state)
+{
+ struct xilinx_timer_priv *priv = xilinx_pwm_chip_to_priv(chip);
+ u32 tlr0, tlr1, tcsr0, tcsr1;
+
+ regmap_read(priv->map, TLR0, &tlr0);
+ regmap_read(priv->map, TLR1, &tlr1);
+ regmap_read(priv->map, TCSR0, &tcsr0);
+ regmap_read(priv->map, TCSR1, &tcsr1);
+ state->period = xilinx_timer_get_period(priv, tlr0, tcsr0);
+ state->duty_cycle = xilinx_timer_get_period(priv, tlr1, tcsr1);
+ state->enabled = xilinx_timer_pwm_enabled(tcsr0, tcsr1);
+ state->polarity = PWM_POLARITY_NORMAL;
+
+ /*
+ * 100% duty cycle results in constant low output. This may be (very)
+ * wrong if rate > 1 GHz, so fix this if you have such hardware :)
+ */
+ if (state->period == state->duty_cycle)
+ state->duty_cycle = 0;
+}
+
+static const struct pwm_ops xilinx_pwm_ops = {
+ .apply = xilinx_pwm_apply,
+ .get_state = xilinx_pwm_get_state,
+ .owner = THIS_MODULE,
+};
+
+static const struct regmap_config xilinx_pwm_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .val_format_endian = REGMAP_ENDIAN_LITTLE,
+ .max_register = TCR1,
+};
+
+static int xilinx_pwm_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct xilinx_timer_priv *priv;
+ struct xilinx_pwm_device *xilinx_pwm;
+ u32 pwm_cells, one_timer, width;
+ void __iomem *regs;
+
+ /* If there are no PWM cells, this binding is for a timer */
+ ret = of_property_read_u32(np, "#pwm-cells", &pwm_cells);
+ if (ret == -EINVAL)
+ return -ENODEV;
+ if (ret)
+ return dev_err_probe(dev, ret, "could not read #pwm-cells\n");
+
+ xilinx_pwm = devm_kzalloc(dev, sizeof(*xilinx_pwm), GFP_KERNEL);
+ if (!xilinx_pwm)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, xilinx_pwm);
+ priv = &xilinx_pwm->priv;
+
+ regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ priv->map = devm_regmap_init_mmio(dev, regs,
+ &xilinx_pwm_regmap_config);
+ if (IS_ERR(priv->map))
+ return dev_err_probe(dev, PTR_ERR(priv->map),
+ "Could not create regmap\n");
+
+ ret = of_property_read_u32(np, "xlnx,one-timer-only", &one_timer);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Could not read xlnx,one-timer-only\n");
+
+ if (one_timer)
+ return dev_err_probe(dev, -EINVAL,
+ "Two timers required for PWM mode\n");
+
+ ret = of_property_read_u32(np, "xlnx,count-width", &width);
+ if (ret == -EINVAL)
+ width = 32;
+ else if (ret)
+ return dev_err_probe(dev, ret,
+ "Could not read xlnx,count-width\n");
+
+ if (width != 8 && width != 16 && width != 32)
+ return dev_err_probe(dev, -EINVAL,
+ "Invalid counter width %d\n", width);
+ priv->max = BIT_ULL(width) - 1;
+
+ /*
+ * The polarity of the Generate Out signals must be active high for PWM
+ * mode to work. We could determine this from the device tree, but
+ * alas, such properties are not allowed to be used.
+ */
+
+ priv->clk = devm_clk_get(dev, "s_axi_aclk");
+ if (IS_ERR(priv->clk))
+ return dev_err_probe(dev, PTR_ERR(priv->clk),
+ "Could not get clock\n");
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return dev_err_probe(dev, ret, "Clock enable failed\n");
+ clk_rate_exclusive_get(priv->clk);
+
+ xilinx_pwm->chip.dev = dev;
+ xilinx_pwm->chip.ops = &xilinx_pwm_ops;
+ xilinx_pwm->chip.npwm = 1;
+ ret = pwmchip_add(&xilinx_pwm->chip);
+ if (ret) {
+ clk_rate_exclusive_put(priv->clk);
+ clk_disable_unprepare(priv->clk);
+ return dev_err_probe(dev, ret, "Could not register PWM chip\n");
+ }
+
+ return 0;
+}
+
+static int xilinx_pwm_remove(struct platform_device *pdev)
+{
+ struct xilinx_pwm_device *xilinx_pwm = platform_get_drvdata(pdev);
+
+ pwmchip_remove(&xilinx_pwm->chip);
+ clk_rate_exclusive_put(xilinx_pwm->priv.clk);
+ clk_disable_unprepare(xilinx_pwm->priv.clk);
+ return 0;
+}
+
+static const struct of_device_id xilinx_pwm_of_match[] = {
+ { .compatible = "xlnx,xps-timer-1.00.a", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, xilinx_pwm_of_match);
+
+static struct platform_driver xilinx_pwm_driver = {
+ .probe = xilinx_pwm_probe,
+ .remove = xilinx_pwm_remove,
+ .driver = {
+ .name = "xilinx-pwm",
+ .of_match_table = of_match_ptr(xilinx_pwm_of_match),
+ },
+};
+module_platform_driver(xilinx_pwm_driver);
+
+MODULE_ALIAS("platform:xilinx-pwm");
+MODULE_DESCRIPTION("PWM driver for Xilinx LogiCORE IP AXI Timer");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
index 7df466e22282..2cdc054e53a5 100644
--- a/drivers/rapidio/devices/rio_mport_cdev.c
+++ b/drivers/rapidio/devices/rio_mport_cdev.c
@@ -915,7 +915,7 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
goto err_req;
}
- if (xfer->length + xfer->offset > map->size) {
+ if (xfer->length + xfer->offset > req->map->size) {
ret = -EINVAL;
goto err_req;
}
@@ -927,7 +927,7 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
}
sg_set_buf(req->sgt.sgl,
- map->virt_addr + (baddr - map->phys_addr) +
+ req->map->virt_addr + (baddr - req->map->phys_addr) +
xfer->offset, xfer->length);
}
diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c
index aa55cfca9e40..6b617024a67d 100644
--- a/drivers/regulator/pfuze100-regulator.c
+++ b/drivers/regulator/pfuze100-regulator.c
@@ -10,6 +10,7 @@
#include <linux/of_device.h>
#include <linux/regulator/of_regulator.h>
#include <linux/platform_device.h>
+#include <linux/reboot.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/pfuze100.h>
@@ -571,10 +572,10 @@ static inline struct device_node *match_of_node(int index)
return pfuze_matches[index].of_node;
}
-static struct pfuze_chip *syspm_pfuze_chip;
-
-static void pfuze_power_off_prepare(void)
+static int pfuze_power_off_prepare(struct sys_off_data *data)
{
+ struct pfuze_chip *syspm_pfuze_chip = data->cb_data;
+
dev_info(syspm_pfuze_chip->dev, "Configure standby mode for power off");
/* Switch from default mode: APS/APS to APS/Off */
@@ -609,28 +610,30 @@ static void pfuze_power_off_prepare(void)
regmap_update_bits(syspm_pfuze_chip->regmap, PFUZE100_VGEN6VOL,
PFUZE100_VGENxLPWR | PFUZE100_VGENxSTBY,
PFUZE100_VGENxSTBY);
+
+ return NOTIFY_DONE;
}
static int pfuze_power_off_prepare_init(struct pfuze_chip *pfuze_chip)
{
+ int err;
+
if (pfuze_chip->chip_id != PFUZE100) {
dev_warn(pfuze_chip->dev, "Requested pm_power_off_prepare handler for not supported chip\n");
return -ENODEV;
}
- if (pm_power_off_prepare) {
- dev_warn(pfuze_chip->dev, "pm_power_off_prepare is already registered.\n");
- return -EBUSY;
+ err = devm_register_sys_off_handler(pfuze_chip->dev,
+ SYS_OFF_MODE_POWER_OFF_PREPARE,
+ SYS_OFF_PRIO_DEFAULT,
+ pfuze_power_off_prepare,
+ pfuze_chip);
+ if (err) {
+ dev_err(pfuze_chip->dev, "failed to register sys-off handler: %d\n",
+ err);
+ return err;
}
- if (syspm_pfuze_chip) {
- dev_warn(pfuze_chip->dev, "syspm_pfuze_chip is already set.\n");
- return -EBUSY;
- }
-
- syspm_pfuze_chip = pfuze_chip;
- pm_power_off_prepare = pfuze_power_off_prepare;
-
return 0;
}
@@ -839,23 +842,12 @@ static int pfuze100_regulator_probe(struct i2c_client *client,
return 0;
}
-static int pfuze100_regulator_remove(struct i2c_client *client)
-{
- if (syspm_pfuze_chip) {
- syspm_pfuze_chip = NULL;
- pm_power_off_prepare = NULL;
- }
-
- return 0;
-}
-
static struct i2c_driver pfuze_driver = {
.driver = {
.name = "pfuze100-regulator",
.of_match_table = pfuze_dt_ids,
},
.probe = pfuze100_regulator_probe,
- .remove = pfuze100_regulator_remove,
};
module_i2c_driver(pfuze_driver);
diff --git a/drivers/remoteproc/imx_dsp_rproc.c b/drivers/remoteproc/imx_dsp_rproc.c
index 2abee78df96e..ca0817f8e41e 100644
--- a/drivers/remoteproc/imx_dsp_rproc.c
+++ b/drivers/remoteproc/imx_dsp_rproc.c
@@ -649,99 +649,6 @@ static int imx_dsp_rproc_add_carveout(struct imx_dsp_rproc *priv)
return 0;
}
-/**
- * imx_dsp_rproc_elf_load_segments() - load firmware segments to memory
- * @rproc: remote processor which will be booted using these fw segments
- * @fw: the ELF firmware image
- *
- * This function specially checks if memsz is zero or not, otherwise it
- * is mostly same as rproc_elf_load_segments().
- */
-static int imx_dsp_rproc_elf_load_segments(struct rproc *rproc,
- const struct firmware *fw)
-{
- struct device *dev = &rproc->dev;
- u8 class = fw_elf_get_class(fw);
- u32 elf_phdr_get_size = elf_size_of_phdr(class);
- const u8 *elf_data = fw->data;
- const void *ehdr, *phdr;
- int i, ret = 0;
- u16 phnum;
-
- ehdr = elf_data;
- phnum = elf_hdr_get_e_phnum(class, ehdr);
- phdr = elf_data + elf_hdr_get_e_phoff(class, ehdr);
-
- /* go through the available ELF segments */
- for (i = 0; i < phnum; i++, phdr += elf_phdr_get_size) {
- u64 da = elf_phdr_get_p_paddr(class, phdr);
- u64 memsz = elf_phdr_get_p_memsz(class, phdr);
- u64 filesz = elf_phdr_get_p_filesz(class, phdr);
- u64 offset = elf_phdr_get_p_offset(class, phdr);
- u32 type = elf_phdr_get_p_type(class, phdr);
- void *ptr;
-
- /*
- * There is a case that with PT_LOAD type, the
- * filesz = memsz = 0. If memsz = 0, rproc_da_to_va
- * should return NULL ptr, then error is returned.
- * So this case should be skipped from the loop.
- * Add !memsz checking here.
- */
- if (type != PT_LOAD || !memsz)
- continue;
-
- dev_dbg(dev, "phdr: type %d da 0x%llx memsz 0x%llx filesz 0x%llx\n",
- type, da, memsz, filesz);
-
- if (filesz > memsz) {
- dev_err(dev, "bad phdr filesz 0x%llx memsz 0x%llx\n",
- filesz, memsz);
- ret = -EINVAL;
- break;
- }
-
- if (offset + filesz > fw->size) {
- dev_err(dev, "truncated fw: need 0x%llx avail 0x%zx\n",
- offset + filesz, fw->size);
- ret = -EINVAL;
- break;
- }
-
- if (!rproc_u64_fit_in_size_t(memsz)) {
- dev_err(dev, "size (%llx) does not fit in size_t type\n",
- memsz);
- ret = -EOVERFLOW;
- break;
- }
-
- /* grab the kernel address for this device address */
- ptr = rproc_da_to_va(rproc, da, memsz, NULL);
- if (!ptr) {
- dev_err(dev, "bad phdr da 0x%llx mem 0x%llx\n", da,
- memsz);
- ret = -EINVAL;
- break;
- }
-
- /* put the segment where the remote processor expects it */
- if (filesz)
- memcpy(ptr, elf_data + offset, filesz);
-
- /*
- * Zero out remaining memory for this segment.
- *
- * This isn't strictly required since dma_alloc_coherent already
- * did this for us. albeit harmless, we may consider removing
- * this.
- */
- if (memsz > filesz)
- memset(ptr + filesz, 0, memsz - filesz);
- }
-
- return ret;
-}
-
/* Prepare function for rproc_ops */
static int imx_dsp_rproc_prepare(struct rproc *rproc)
{
@@ -802,14 +709,22 @@ static void imx_dsp_rproc_kick(struct rproc *rproc, int vqid)
dev_err(dev, "%s: failed (%d, err:%d)\n", __func__, vqid, err);
}
+static int imx_dsp_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw)
+{
+ if (rproc_elf_load_rsc_table(rproc, fw))
+ dev_warn(&rproc->dev, "no resource table found for this firmware\n");
+
+ return 0;
+}
+
static const struct rproc_ops imx_dsp_rproc_ops = {
.prepare = imx_dsp_rproc_prepare,
.unprepare = imx_dsp_rproc_unprepare,
.start = imx_dsp_rproc_start,
.stop = imx_dsp_rproc_stop,
.kick = imx_dsp_rproc_kick,
- .load = imx_dsp_rproc_elf_load_segments,
- .parse_fw = rproc_elf_load_rsc_table,
+ .load = rproc_elf_load_segments,
+ .parse_fw = imx_dsp_rproc_parse_fw,
.sanity_check = rproc_elf_sanity_check,
.get_boot_addr = rproc_elf_get_boot_addr,
};
diff --git a/drivers/remoteproc/imx_rproc.c b/drivers/remoteproc/imx_rproc.c
index 7a096f1891e6..4a3352821b1d 100644
--- a/drivers/remoteproc/imx_rproc.c
+++ b/drivers/remoteproc/imx_rproc.c
@@ -91,6 +91,32 @@ struct imx_rproc {
void __iomem *rsc_table;
};
+static const struct imx_rproc_att imx_rproc_att_imx93[] = {
+ /* dev addr , sys addr , size , flags */
+ /* TCM CODE NON-SECURE */
+ { 0x0FFC0000, 0x201C0000, 0x00020000, ATT_OWN | ATT_IOMEM },
+ { 0x0FFE0000, 0x201E0000, 0x00020000, ATT_OWN | ATT_IOMEM },
+
+ /* TCM CODE SECURE */
+ { 0x1FFC0000, 0x201C0000, 0x00020000, ATT_OWN | ATT_IOMEM },
+ { 0x1FFE0000, 0x201E0000, 0x00020000, ATT_OWN | ATT_IOMEM },
+
+ /* TCM SYS NON-SECURE*/
+ { 0x20000000, 0x20200000, 0x00020000, ATT_OWN | ATT_IOMEM },
+ { 0x20020000, 0x20220000, 0x00020000, ATT_OWN | ATT_IOMEM },
+
+ /* TCM SYS SECURE*/
+ { 0x30000000, 0x20200000, 0x00020000, ATT_OWN | ATT_IOMEM },
+ { 0x30020000, 0x20220000, 0x00020000, ATT_OWN | ATT_IOMEM },
+
+ /* DDR */
+ { 0x80000000, 0x80000000, 0x10000000, 0 },
+ { 0x90000000, 0x80000000, 0x10000000, 0 },
+
+ { 0xC0000000, 0xa0000000, 0x10000000, 0 },
+ { 0xD0000000, 0xa0000000, 0x10000000, 0 },
+};
+
static const struct imx_rproc_att imx_rproc_att_imx8mn[] = {
/* dev addr , sys addr , size , flags */
/* ITCM */
@@ -261,6 +287,12 @@ static const struct imx_rproc_dcfg imx_rproc_cfg_imx6sx = {
.method = IMX_RPROC_MMIO,
};
+static const struct imx_rproc_dcfg imx_rproc_cfg_imx93 = {
+ .att = imx_rproc_att_imx93,
+ .att_size = ARRAY_SIZE(imx_rproc_att_imx93),
+ .method = IMX_RPROC_SMC,
+};
+
static int imx_rproc_start(struct rproc *rproc)
{
struct imx_rproc *priv = rproc->priv;
@@ -423,6 +455,9 @@ static int imx_rproc_prepare(struct rproc *rproc)
if (!strcmp(it.node->name, "vdev0buffer"))
continue;
+ if (!strcmp(it.node->name, "rsc-table"))
+ continue;
+
rmem = of_reserved_mem_lookup(it.node);
if (!rmem) {
dev_err(priv->dev, "unable to acquire memory-region\n");
@@ -821,6 +856,7 @@ static const struct of_device_id imx_rproc_of_match[] = {
{ .compatible = "fsl,imx8mn-cm7", .data = &imx_rproc_cfg_imx8mn },
{ .compatible = "fsl,imx8mp-cm7", .data = &imx_rproc_cfg_imx8mn },
{ .compatible = "fsl,imx8ulp-cm33", .data = &imx_rproc_cfg_imx8ulp },
+ { .compatible = "fsl,imx93-cm33", .data = &imx_rproc_cfg_imx93 },
{},
};
MODULE_DEVICE_TABLE(of, imx_rproc_of_match);
diff --git a/drivers/remoteproc/mtk_common.h b/drivers/remoteproc/mtk_common.h
index 71ce4977cb0b..ea6fa1100a00 100644
--- a/drivers/remoteproc/mtk_common.h
+++ b/drivers/remoteproc/mtk_common.h
@@ -54,6 +54,8 @@
#define MT8192_CORE0_WDT_IRQ 0x10030
#define MT8192_CORE0_WDT_CFG 0x10034
+#define MT8195_L1TCM_SRAM_PDN_RESERVED_RSI_BITS GENMASK(7, 4)
+
#define SCP_FW_VER_LEN 32
#define SCP_SHARE_BUFFER_SIZE 288
diff --git a/drivers/remoteproc/mtk_scp.c b/drivers/remoteproc/mtk_scp.c
index 38609153bf64..47b2a40e1b4a 100644
--- a/drivers/remoteproc/mtk_scp.c
+++ b/drivers/remoteproc/mtk_scp.c
@@ -365,22 +365,22 @@ static int mt8183_scp_before_load(struct mtk_scp *scp)
return 0;
}
-static void mt8192_power_on_sram(void __iomem *addr)
+static void scp_sram_power_on(void __iomem *addr, u32 reserved_mask)
{
int i;
for (i = 31; i >= 0; i--)
- writel(GENMASK(i, 0), addr);
+ writel(GENMASK(i, 0) & ~reserved_mask, addr);
writel(0, addr);
}
-static void mt8192_power_off_sram(void __iomem *addr)
+static void scp_sram_power_off(void __iomem *addr, u32 reserved_mask)
{
int i;
writel(0, addr);
for (i = 0; i < 32; i++)
- writel(GENMASK(i, 0), addr);
+ writel(GENMASK(i, 0) & ~reserved_mask, addr);
}
static int mt8186_scp_before_load(struct mtk_scp *scp)
@@ -393,7 +393,7 @@ static int mt8186_scp_before_load(struct mtk_scp *scp)
writel(0x0, scp->reg_base + MT8183_SCP_CLK_DIV_SEL);
/* Turn on the power of SCP's SRAM before using it. Enable 1 block per time*/
- mt8192_power_on_sram(scp->reg_base + MT8183_SCP_SRAM_PDN);
+ scp_sram_power_on(scp->reg_base + MT8183_SCP_SRAM_PDN, 0);
/* Initialize TCM before loading FW. */
writel(0x0, scp->reg_base + MT8183_SCP_L1_SRAM_PD);
@@ -412,11 +412,32 @@ static int mt8192_scp_before_load(struct mtk_scp *scp)
writel(1, scp->reg_base + MT8192_CORE0_SW_RSTN_SET);
/* enable SRAM clock */
- mt8192_power_on_sram(scp->reg_base + MT8192_L2TCM_SRAM_PD_0);
- mt8192_power_on_sram(scp->reg_base + MT8192_L2TCM_SRAM_PD_1);
- mt8192_power_on_sram(scp->reg_base + MT8192_L2TCM_SRAM_PD_2);
- mt8192_power_on_sram(scp->reg_base + MT8192_L1TCM_SRAM_PDN);
- mt8192_power_on_sram(scp->reg_base + MT8192_CPU0_SRAM_PD);
+ scp_sram_power_on(scp->reg_base + MT8192_L2TCM_SRAM_PD_0, 0);
+ scp_sram_power_on(scp->reg_base + MT8192_L2TCM_SRAM_PD_1, 0);
+ scp_sram_power_on(scp->reg_base + MT8192_L2TCM_SRAM_PD_2, 0);
+ scp_sram_power_on(scp->reg_base + MT8192_L1TCM_SRAM_PDN, 0);
+ scp_sram_power_on(scp->reg_base + MT8192_CPU0_SRAM_PD, 0);
+
+ /* enable MPU for all memory regions */
+ writel(0xff, scp->reg_base + MT8192_CORE0_MEM_ATT_PREDEF);
+
+ return 0;
+}
+
+static int mt8195_scp_before_load(struct mtk_scp *scp)
+{
+ /* clear SPM interrupt, SCP2SPM_IPC_CLR */
+ writel(0xff, scp->reg_base + MT8192_SCP2SPM_IPC_CLR);
+
+ writel(1, scp->reg_base + MT8192_CORE0_SW_RSTN_SET);
+
+ /* enable SRAM clock */
+ scp_sram_power_on(scp->reg_base + MT8192_L2TCM_SRAM_PD_0, 0);
+ scp_sram_power_on(scp->reg_base + MT8192_L2TCM_SRAM_PD_1, 0);
+ scp_sram_power_on(scp->reg_base + MT8192_L2TCM_SRAM_PD_2, 0);
+ scp_sram_power_on(scp->reg_base + MT8192_L1TCM_SRAM_PDN,
+ MT8195_L1TCM_SRAM_PDN_RESERVED_RSI_BITS);
+ scp_sram_power_on(scp->reg_base + MT8192_CPU0_SRAM_PD, 0);
/* enable MPU for all memory regions */
writel(0xff, scp->reg_base + MT8192_CORE0_MEM_ATT_PREDEF);
@@ -572,11 +593,25 @@ static void mt8183_scp_stop(struct mtk_scp *scp)
static void mt8192_scp_stop(struct mtk_scp *scp)
{
/* Disable SRAM clock */
- mt8192_power_off_sram(scp->reg_base + MT8192_L2TCM_SRAM_PD_0);
- mt8192_power_off_sram(scp->reg_base + MT8192_L2TCM_SRAM_PD_1);
- mt8192_power_off_sram(scp->reg_base + MT8192_L2TCM_SRAM_PD_2);
- mt8192_power_off_sram(scp->reg_base + MT8192_L1TCM_SRAM_PDN);
- mt8192_power_off_sram(scp->reg_base + MT8192_CPU0_SRAM_PD);
+ scp_sram_power_off(scp->reg_base + MT8192_L2TCM_SRAM_PD_0, 0);
+ scp_sram_power_off(scp->reg_base + MT8192_L2TCM_SRAM_PD_1, 0);
+ scp_sram_power_off(scp->reg_base + MT8192_L2TCM_SRAM_PD_2, 0);
+ scp_sram_power_off(scp->reg_base + MT8192_L1TCM_SRAM_PDN, 0);
+ scp_sram_power_off(scp->reg_base + MT8192_CPU0_SRAM_PD, 0);
+
+ /* Disable SCP watchdog */
+ writel(0, scp->reg_base + MT8192_CORE0_WDT_CFG);
+}
+
+static void mt8195_scp_stop(struct mtk_scp *scp)
+{
+ /* Disable SRAM clock */
+ scp_sram_power_off(scp->reg_base + MT8192_L2TCM_SRAM_PD_0, 0);
+ scp_sram_power_off(scp->reg_base + MT8192_L2TCM_SRAM_PD_1, 0);
+ scp_sram_power_off(scp->reg_base + MT8192_L2TCM_SRAM_PD_2, 0);
+ scp_sram_power_off(scp->reg_base + MT8192_L1TCM_SRAM_PDN,
+ MT8195_L1TCM_SRAM_PDN_RESERVED_RSI_BITS);
+ scp_sram_power_off(scp->reg_base + MT8192_CPU0_SRAM_PD, 0);
/* Disable SCP watchdog */
writel(0, scp->reg_base + MT8192_CORE0_WDT_CFG);
@@ -774,9 +809,13 @@ static int scp_probe(struct platform_device *pdev)
struct mtk_scp *scp;
struct rproc *rproc;
struct resource *res;
- char *fw_name = "scp.img";
+ const char *fw_name = "scp.img";
int ret, i;
+ ret = rproc_of_parse_firmware(dev, 0, &fw_name);
+ if (ret < 0 && ret != -EINVAL)
+ return ret;
+
rproc = devm_rproc_alloc(dev, np->name, &scp_ops, fw_name, sizeof(*scp));
if (!rproc)
return dev_err_probe(dev, -ENOMEM, "unable to allocate remoteproc\n");
@@ -877,7 +916,6 @@ static int scp_remove(struct platform_device *pdev)
for (i = 0; i < SCP_IPI_MAX; i++)
mutex_destroy(&scp->ipi_desc[i].lock);
mutex_destroy(&scp->send_lock);
- rproc_free(scp->rproc);
return 0;
}
@@ -922,11 +960,11 @@ static const struct mtk_scp_of_data mt8192_of_data = {
static const struct mtk_scp_of_data mt8195_of_data = {
.scp_clk_get = mt8195_scp_clk_get,
- .scp_before_load = mt8192_scp_before_load,
+ .scp_before_load = mt8195_scp_before_load,
.scp_irq_handler = mt8192_scp_irq_handler,
.scp_reset_assert = mt8192_scp_reset_assert,
.scp_reset_deassert = mt8192_scp_reset_deassert,
- .scp_stop = mt8192_scp_stop,
+ .scp_stop = mt8195_scp_stop,
.scp_da_to_va = mt8192_scp_da_to_va,
.host_to_scp_reg = MT8192_GIPC_IN_SET,
.host_to_scp_int_bit = MT8192_HOST_IPC_INT_BIT,
diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c
index 1ae47cc153e5..6ae39c5653b1 100644
--- a/drivers/remoteproc/qcom_q6v5_pas.c
+++ b/drivers/remoteproc/qcom_q6v5_pas.c
@@ -704,6 +704,36 @@ static const struct adsp_data sm8250_cdsp_resource = {
.ssctl_id = 0x17,
};
+static const struct adsp_data sc8280xp_nsp0_resource = {
+ .crash_reason_smem = 601,
+ .firmware_name = "cdsp.mdt",
+ .pas_id = 18,
+ .has_aggre2_clk = false,
+ .auto_boot = true,
+ .proxy_pd_names = (char*[]){
+ "nsp",
+ NULL
+ },
+ .ssr_name = "cdsp0",
+ .sysmon_name = "cdsp",
+ .ssctl_id = 0x17,
+};
+
+static const struct adsp_data sc8280xp_nsp1_resource = {
+ .crash_reason_smem = 633,
+ .firmware_name = "cdsp.mdt",
+ .pas_id = 30,
+ .has_aggre2_clk = false,
+ .auto_boot = true,
+ .proxy_pd_names = (char*[]){
+ "nsp",
+ NULL
+ },
+ .ssr_name = "cdsp1",
+ .sysmon_name = "cdsp1",
+ .ssctl_id = 0x20,
+};
+
static const struct adsp_data sm8350_cdsp_resource = {
.crash_reason_smem = 601,
.firmware_name = "cdsp.mdt",
@@ -848,6 +878,7 @@ static const struct adsp_data sdx55_mpss_resource = {
};
static const struct of_device_id adsp_of_match[] = {
+ { .compatible = "qcom,msm8226-adsp-pil", .data = &adsp_resource_init},
{ .compatible = "qcom,msm8974-adsp-pil", .data = &adsp_resource_init},
{ .compatible = "qcom,msm8996-adsp-pil", .data = &msm8996_adsp_resource},
{ .compatible = "qcom,msm8996-slpi-pil", .data = &slpi_resource_init},
@@ -861,6 +892,9 @@ static const struct of_device_id adsp_of_match[] = {
{ .compatible = "qcom,sc8180x-adsp-pas", .data = &sm8150_adsp_resource},
{ .compatible = "qcom,sc8180x-cdsp-pas", .data = &sm8150_cdsp_resource},
{ .compatible = "qcom,sc8180x-mpss-pas", .data = &sc8180x_mpss_resource},
+ { .compatible = "qcom,sc8280xp-adsp-pas", .data = &sm8250_adsp_resource},
+ { .compatible = "qcom,sc8280xp-nsp0-pas", .data = &sc8280xp_nsp0_resource},
+ { .compatible = "qcom,sc8280xp-nsp1-pas", .data = &sc8280xp_nsp1_resource},
{ .compatible = "qcom,sdm660-adsp-pas", .data = &adsp_resource_init},
{ .compatible = "qcom,sdm845-adsp-pas", .data = &sdm845_adsp_resource_init},
{ .compatible = "qcom,sdm845-cdsp-pas", .data = &sdm845_cdsp_resource_init},
diff --git a/drivers/remoteproc/remoteproc_cdev.c b/drivers/remoteproc/remoteproc_cdev.c
index 906ff3c4dfdd..687f205fd70a 100644
--- a/drivers/remoteproc/remoteproc_cdev.c
+++ b/drivers/remoteproc/remoteproc_cdev.c
@@ -32,21 +32,10 @@ static ssize_t rproc_cdev_write(struct file *filp, const char __user *buf, size_
return -EFAULT;
if (!strncmp(cmd, "start", len)) {
- if (rproc->state == RPROC_RUNNING ||
- rproc->state == RPROC_ATTACHED)
- return -EBUSY;
-
ret = rproc_boot(rproc);
} else if (!strncmp(cmd, "stop", len)) {
- if (rproc->state != RPROC_RUNNING &&
- rproc->state != RPROC_ATTACHED)
- return -EINVAL;
-
ret = rproc_shutdown(rproc);
} else if (!strncmp(cmd, "detach", len)) {
- if (rproc->state != RPROC_ATTACHED)
- return -EINVAL;
-
ret = rproc_detach(rproc);
} else {
dev_err(&rproc->dev, "Unrecognized option\n");
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index c510125769b9..02a04ab34a23 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -684,10 +684,6 @@ static int rproc_handle_trace(struct rproc *rproc, void *ptr,
/* create the debugfs entry */
trace->tfile = rproc_create_trace_file(name, rproc, trace);
- if (!trace->tfile) {
- kfree(trace);
- return -EINVAL;
- }
list_add_tail(&trace->node, &rproc->traces);
@@ -2075,6 +2071,12 @@ int rproc_shutdown(struct rproc *rproc)
return ret;
}
+ if (rproc->state != RPROC_RUNNING &&
+ rproc->state != RPROC_ATTACHED) {
+ ret = -EINVAL;
+ goto out;
+ }
+
/* if the remote proc is still needed, bail out */
if (!atomic_dec_and_test(&rproc->power))
goto out;
@@ -2134,6 +2136,11 @@ int rproc_detach(struct rproc *rproc)
return ret;
}
+ if (rproc->state != RPROC_ATTACHED) {
+ ret = -EINVAL;
+ goto out;
+ }
+
/* if the remote proc is still needed, bail out */
if (!atomic_dec_and_test(&rproc->power)) {
ret = 0;
diff --git a/drivers/remoteproc/remoteproc_debugfs.c b/drivers/remoteproc/remoteproc_debugfs.c
index 581930483ef8..b86c1d09c70c 100644
--- a/drivers/remoteproc/remoteproc_debugfs.c
+++ b/drivers/remoteproc/remoteproc_debugfs.c
@@ -386,16 +386,8 @@ void rproc_remove_trace_file(struct dentry *tfile)
struct dentry *rproc_create_trace_file(const char *name, struct rproc *rproc,
struct rproc_debug_trace *trace)
{
- struct dentry *tfile;
-
- tfile = debugfs_create_file(name, 0400, rproc->dbg_dir, trace,
+ return debugfs_create_file(name, 0400, rproc->dbg_dir, trace,
&trace_rproc_ops);
- if (!tfile) {
- dev_err(&rproc->dev, "failed to create debugfs trace entry\n");
- return NULL;
- }
-
- return tfile;
}
void rproc_delete_debug_dir(struct rproc *rproc)
@@ -411,8 +403,6 @@ void rproc_create_debug_dir(struct rproc *rproc)
return;
rproc->dbg_dir = debugfs_create_dir(dev_name(dev), rproc_dbg);
- if (!rproc->dbg_dir)
- return;
debugfs_create_file("name", 0400, rproc->dbg_dir,
rproc, &rproc_name_ops);
@@ -430,11 +420,8 @@ void rproc_create_debug_dir(struct rproc *rproc)
void __init rproc_init_debugfs(void)
{
- if (debugfs_initialized()) {
+ if (debugfs_initialized())
rproc_dbg = debugfs_create_dir(KBUILD_MODNAME, NULL);
- if (!rproc_dbg)
- pr_err("can't create debugfs dir\n");
- }
}
void __exit rproc_exit_debugfs(void)
diff --git a/drivers/remoteproc/remoteproc_elf_loader.c b/drivers/remoteproc/remoteproc_elf_loader.c
index d635d19a5aa8..5a412d7b6e0b 100644
--- a/drivers/remoteproc/remoteproc_elf_loader.c
+++ b/drivers/remoteproc/remoteproc_elf_loader.c
@@ -181,7 +181,7 @@ int rproc_elf_load_segments(struct rproc *rproc, const struct firmware *fw)
bool is_iomem = false;
void *ptr;
- if (type != PT_LOAD)
+ if (type != PT_LOAD || !memsz)
continue;
dev_dbg(dev, "phdr: type %d da 0x%llx memsz 0x%llx filesz 0x%llx\n",
diff --git a/drivers/remoteproc/remoteproc_sysfs.c b/drivers/remoteproc/remoteproc_sysfs.c
index 51a04bc6ba7a..8c7ea8922638 100644
--- a/drivers/remoteproc/remoteproc_sysfs.c
+++ b/drivers/remoteproc/remoteproc_sysfs.c
@@ -194,23 +194,12 @@ static ssize_t state_store(struct device *dev,
int ret = 0;
if (sysfs_streq(buf, "start")) {
- if (rproc->state == RPROC_RUNNING ||
- rproc->state == RPROC_ATTACHED)
- return -EBUSY;
-
ret = rproc_boot(rproc);
if (ret)
dev_err(&rproc->dev, "Boot failed: %d\n", ret);
} else if (sysfs_streq(buf, "stop")) {
- if (rproc->state != RPROC_RUNNING &&
- rproc->state != RPROC_ATTACHED)
- return -EINVAL;
-
ret = rproc_shutdown(rproc);
} else if (sysfs_streq(buf, "detach")) {
- if (rproc->state != RPROC_ATTACHED)
- return -EINVAL;
-
ret = rproc_detach(rproc);
} else {
dev_err(&rproc->dev, "Unrecognised option: %s\n", buf);
diff --git a/drivers/rpmsg/qcom_smd.c b/drivers/rpmsg/qcom_smd.c
index 764c980507be..1957b27c4cf3 100644
--- a/drivers/rpmsg/qcom_smd.c
+++ b/drivers/rpmsg/qcom_smd.c
@@ -1407,9 +1407,9 @@ static int qcom_smd_parse_edge(struct device *dev,
edge->name = node->name;
irq = irq_of_parse_and_map(node, 0);
- if (irq < 0) {
+ if (!irq) {
dev_err(dev, "required smd interrupt missing\n");
- ret = irq;
+ ret = -EINVAL;
goto put_node;
}
diff --git a/drivers/rpmsg/rpmsg_core.c b/drivers/rpmsg/rpmsg_core.c
index 79368a957d89..290c1f02da10 100644
--- a/drivers/rpmsg/rpmsg_core.c
+++ b/drivers/rpmsg/rpmsg_core.c
@@ -400,7 +400,8 @@ field##_store(struct device *dev, struct device_attribute *attr, \
const char *buf, size_t sz) \
{ \
struct rpmsg_device *rpdev = to_rpmsg_device(dev); \
- char *new, *old; \
+ const char *old; \
+ char *new; \
\
new = kstrndup(buf, sz, GFP_KERNEL); \
if (!new) \
@@ -592,24 +593,51 @@ static struct bus_type rpmsg_bus = {
.remove = rpmsg_dev_remove,
};
-int rpmsg_register_device(struct rpmsg_device *rpdev)
+/*
+ * A helper for registering rpmsg device with driver override and name.
+ * Drivers should not be using it, but instead rpmsg_register_device().
+ */
+int rpmsg_register_device_override(struct rpmsg_device *rpdev,
+ const char *driver_override)
{
struct device *dev = &rpdev->dev;
int ret;
- dev_set_name(&rpdev->dev, "%s.%s.%d.%d", dev_name(dev->parent),
+ if (driver_override)
+ strcpy(rpdev->id.name, driver_override);
+
+ dev_set_name(dev, "%s.%s.%d.%d", dev_name(dev->parent),
rpdev->id.name, rpdev->src, rpdev->dst);
- rpdev->dev.bus = &rpmsg_bus;
+ dev->bus = &rpmsg_bus;
- ret = device_register(&rpdev->dev);
+ device_initialize(dev);
+ if (driver_override) {
+ ret = driver_set_override(dev, &rpdev->driver_override,
+ driver_override,
+ strlen(driver_override));
+ if (ret) {
+ dev_err(dev, "device_set_override failed: %d\n", ret);
+ return ret;
+ }
+ }
+
+ ret = device_add(dev);
if (ret) {
- dev_err(dev, "device_register failed: %d\n", ret);
- put_device(&rpdev->dev);
+ dev_err(dev, "device_add failed: %d\n", ret);
+ kfree(rpdev->driver_override);
+ rpdev->driver_override = NULL;
+ put_device(dev);
}
return ret;
}
+EXPORT_SYMBOL(rpmsg_register_device_override);
+
+int rpmsg_register_device(struct rpmsg_device *rpdev)
+{
+ return rpmsg_register_device_override(rpdev, NULL);
+}
EXPORT_SYMBOL(rpmsg_register_device);
/*
diff --git a/drivers/rpmsg/rpmsg_internal.h b/drivers/rpmsg/rpmsg_internal.h
index d4b23fd019a8..a22cd4abe7d1 100644
--- a/drivers/rpmsg/rpmsg_internal.h
+++ b/drivers/rpmsg/rpmsg_internal.h
@@ -94,10 +94,7 @@ int rpmsg_release_channel(struct rpmsg_device *rpdev,
*/
static inline int rpmsg_ctrldev_register_device(struct rpmsg_device *rpdev)
{
- strcpy(rpdev->id.name, "rpmsg_ctrl");
- rpdev->driver_override = "rpmsg_ctrl";
-
- return rpmsg_register_device(rpdev);
+ return rpmsg_register_device_override(rpdev, "rpmsg_ctrl");
}
#endif
diff --git a/drivers/rpmsg/rpmsg_ns.c b/drivers/rpmsg/rpmsg_ns.c
index 762ff1ae279f..c70ad03ff2e9 100644
--- a/drivers/rpmsg/rpmsg_ns.c
+++ b/drivers/rpmsg/rpmsg_ns.c
@@ -20,12 +20,10 @@
*/
int rpmsg_ns_register_device(struct rpmsg_device *rpdev)
{
- strcpy(rpdev->id.name, "rpmsg_ns");
- rpdev->driver_override = "rpmsg_ns";
rpdev->src = RPMSG_NS_ADDR;
rpdev->dst = RPMSG_NS_ADDR;
- return rpmsg_register_device(rpdev);
+ return rpmsg_register_device_override(rpdev, "rpmsg_ns");
}
EXPORT_SYMBOL(rpmsg_ns_register_device);
diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c
index 3ede25b1f2e4..905ac7910c98 100644
--- a/drivers/rpmsg/virtio_rpmsg_bus.c
+++ b/drivers/rpmsg/virtio_rpmsg_bus.c
@@ -851,7 +851,7 @@ static struct rpmsg_device *rpmsg_virtio_add_ctrl_dev(struct virtio_device *vdev
err = rpmsg_ctrldev_register_device(rpdev_ctrl);
if (err) {
- kfree(vch);
+ /* vch will be free in virtio_rpmsg_release_device() */
return ERR_PTR(err);
}
@@ -862,7 +862,7 @@ static void rpmsg_virtio_del_ctrl_dev(struct rpmsg_device *rpdev_ctrl)
{
if (!rpdev_ctrl)
return;
- kfree(to_virtio_rpmsg_channel(rpdev_ctrl));
+ device_unregister(&rpdev_ctrl->dev);
}
static int rpmsg_probe(struct virtio_device *vdev)
@@ -973,7 +973,8 @@ static int rpmsg_probe(struct virtio_device *vdev)
err = rpmsg_ns_register_device(rpdev_ns);
if (err)
- goto free_vch;
+ /* vch will be free in virtio_rpmsg_release_device() */
+ goto free_ctrldev;
}
/*
@@ -997,8 +998,6 @@ static int rpmsg_probe(struct virtio_device *vdev)
return 0;
-free_vch:
- kfree(vch);
free_ctrldev:
rpmsg_virtio_del_ctrl_dev(rpdev_ctrl);
free_coherent:
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 41c65b4d2baf..a00f901b5c1d 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -1548,6 +1548,13 @@ config RTC_DRV_RS5C313
help
If you say yes here you get support for the Ricoh RS5C313 RTC chips.
+config RTC_DRV_RZN1
+ tristate "Renesas RZ/N1 RTC"
+ depends on ARCH_RZN1 || COMPILE_TEST
+ depends on OF && HAS_IOMEM
+ help
+ If you say yes here you get support for the Renesas RZ/N1 RTC.
+
config RTC_DRV_GENERIC
tristate "Generic RTC support"
# Please consider writing a new RTC driver instead of using the generic
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 2d827d8261d5..fb04467b652d 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -151,6 +151,7 @@ obj-$(CONFIG_RTC_DRV_RX6110) += rtc-rx6110.o
obj-$(CONFIG_RTC_DRV_RX8010) += rtc-rx8010.o
obj-$(CONFIG_RTC_DRV_RX8025) += rtc-rx8025.o
obj-$(CONFIG_RTC_DRV_RX8581) += rtc-rx8581.o
+obj-$(CONFIG_RTC_DRV_RZN1) += rtc-rzn1.o
obj-$(CONFIG_RTC_DRV_S35390A) += rtc-s35390a.o
obj-$(CONFIG_RTC_DRV_S3C) += rtc-s3c.o
obj-$(CONFIG_RTC_DRV_S5M) += rtc-s5m.o
diff --git a/drivers/rtc/rtc-ftrtc010.c b/drivers/rtc/rtc-ftrtc010.c
index 53bb08fe1cd4..25c6e7d9570f 100644
--- a/drivers/rtc/rtc-ftrtc010.c
+++ b/drivers/rtc/rtc-ftrtc010.c
@@ -137,26 +137,34 @@ static int ftrtc010_rtc_probe(struct platform_device *pdev)
ret = clk_prepare_enable(rtc->extclk);
if (ret) {
dev_err(dev, "failed to enable EXTCLK\n");
- return ret;
+ goto err_disable_pclk;
}
}
rtc->rtc_irq = platform_get_irq(pdev, 0);
- if (rtc->rtc_irq < 0)
- return rtc->rtc_irq;
+ if (rtc->rtc_irq < 0) {
+ ret = rtc->rtc_irq;
+ goto err_disable_extclk;
+ }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
+ if (!res) {
+ ret = -ENODEV;
+ goto err_disable_extclk;
+ }
rtc->rtc_base = devm_ioremap(dev, res->start,
resource_size(res));
- if (!rtc->rtc_base)
- return -ENOMEM;
+ if (!rtc->rtc_base) {
+ ret = -ENOMEM;
+ goto err_disable_extclk;
+ }
rtc->rtc_dev = devm_rtc_allocate_device(dev);
- if (IS_ERR(rtc->rtc_dev))
- return PTR_ERR(rtc->rtc_dev);
+ if (IS_ERR(rtc->rtc_dev)) {
+ ret = PTR_ERR(rtc->rtc_dev);
+ goto err_disable_extclk;
+ }
rtc->rtc_dev->ops = &ftrtc010_rtc_ops;
@@ -172,9 +180,15 @@ static int ftrtc010_rtc_probe(struct platform_device *pdev)
ret = devm_request_irq(dev, rtc->rtc_irq, ftrtc010_rtc_interrupt,
IRQF_SHARED, pdev->name, dev);
if (unlikely(ret))
- return ret;
+ goto err_disable_extclk;
return devm_rtc_register_device(rtc->rtc_dev);
+
+err_disable_extclk:
+ clk_disable_unprepare(rtc->extclk);
+err_disable_pclk:
+ clk_disable_unprepare(rtc->pclk);
+ return ret;
}
static int ftrtc010_rtc_remove(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-gamecube.c b/drivers/rtc/rtc-gamecube.c
index 18ca3b38b2d0..c2717bb52b2b 100644
--- a/drivers/rtc/rtc-gamecube.c
+++ b/drivers/rtc/rtc-gamecube.c
@@ -267,6 +267,7 @@ static int gamecube_rtc_read_offset_from_sram(struct priv *d)
ret = regmap_read(d->regmap, RTC_SRAM_BIAS, &d->rtc_bias);
if (ret) {
pr_err("failed to get the RTC bias\n");
+ iounmap(hw_srnprot);
return -1;
}
diff --git a/drivers/rtc/rtc-meson.c b/drivers/rtc/rtc-meson.c
index 44bdc8b4a90d..db1d626edca5 100644
--- a/drivers/rtc/rtc-meson.c
+++ b/drivers/rtc/rtc-meson.c
@@ -399,7 +399,7 @@ static struct platform_driver meson_rtc_driver = {
module_platform_driver(meson_rtc_driver);
MODULE_DESCRIPTION("Amlogic Meson RTC Driver");
-MODULE_AUTHOR("Ben Dooks <ben.doosk@codethink.co.uk>");
+MODULE_AUTHOR("Ben Dooks <ben.dooks@codethink.co.uk>");
MODULE_AUTHOR("Martin Blumenstingl <martin.blumenstingl@googlemail.com>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:meson-rtc");
diff --git a/drivers/rtc/rtc-mt6397.c b/drivers/rtc/rtc-mt6397.c
index 80dc479a6ff0..1d297af80f87 100644
--- a/drivers/rtc/rtc-mt6397.c
+++ b/drivers/rtc/rtc-mt6397.c
@@ -269,6 +269,8 @@ static int mtk_rtc_probe(struct platform_device *pdev)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
rtc->addr_base = res->start;
rtc->data = of_device_get_match_data(&pdev->dev);
diff --git a/drivers/rtc/rtc-mxc.c b/drivers/rtc/rtc-mxc.c
index 0f08f22df869..53d4e253e81f 100644
--- a/drivers/rtc/rtc-mxc.c
+++ b/drivers/rtc/rtc-mxc.c
@@ -311,7 +311,7 @@ static int mxc_rtc_probe(struct platform_device *pdev)
if (!pdata)
return -ENOMEM;
- pdata->devtype = (enum imx_rtc_type)of_device_get_match_data(&pdev->dev);
+ pdata->devtype = (uintptr_t)of_device_get_match_data(&pdev->dev);
pdata->ioaddr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pdata->ioaddr))
diff --git a/drivers/rtc/rtc-pcf85063.c b/drivers/rtc/rtc-pcf85063.c
index 9760824ec199..095891999da1 100644
--- a/drivers/rtc/rtc-pcf85063.c
+++ b/drivers/rtc/rtc-pcf85063.c
@@ -650,6 +650,7 @@ static int pcf85063_probe(struct i2c_client *client)
}
static const struct i2c_device_id pcf85063_ids[] = {
+ { "pca85073a", PCF85063A },
{ "pcf85063", PCF85063 },
{ "pcf85063tp", PCF85063TP },
{ "pcf85063a", PCF85063A },
@@ -660,6 +661,7 @@ MODULE_DEVICE_TABLE(i2c, pcf85063_ids);
#ifdef CONFIG_OF
static const struct of_device_id pcf85063_of_match[] = {
+ { .compatible = "nxp,pca85073a", .data = &pcf85063_cfg[PCF85063A] },
{ .compatible = "nxp,pcf85063", .data = &pcf85063_cfg[PCF85063] },
{ .compatible = "nxp,pcf85063tp", .data = &pcf85063_cfg[PCF85063TP] },
{ .compatible = "nxp,pcf85063a", .data = &pcf85063_cfg[PCF85063A] },
diff --git a/drivers/rtc/rtc-pxa.c b/drivers/rtc/rtc-pxa.c
index cf8119b6d320..eeacf480cf36 100644
--- a/drivers/rtc/rtc-pxa.c
+++ b/drivers/rtc/rtc-pxa.c
@@ -16,8 +16,6 @@
#include <linux/of.h>
#include <linux/of_device.h>
-#include <mach/hardware.h>
-
#include "rtc-sa1100.h"
#define RTC_DEF_DIVIDER (32768 - 1)
diff --git a/drivers/rtc/rtc-rx8025.c b/drivers/rtc/rtc-rx8025.c
index 5bfdd34a72ff..b32117ccd74b 100644
--- a/drivers/rtc/rtc-rx8025.c
+++ b/drivers/rtc/rtc-rx8025.c
@@ -436,7 +436,6 @@ static int rx8025_set_offset(struct device *dev, long offset)
{
struct i2c_client *client = to_i2c_client(dev);
u8 digoff;
- int err;
offset /= RX8025_ADJ_RESOLUTION;
if (offset > RX8025_ADJ_DATA_MAX)
@@ -449,11 +448,7 @@ static int rx8025_set_offset(struct device *dev, long offset)
offset += 128;
digoff = offset;
- err = rx8025_write_reg(client, RX8025_REG_DIGOFF, digoff);
- if (err)
- return err;
-
- return 0;
+ return rx8025_write_reg(client, RX8025_REG_DIGOFF, digoff);
}
static const struct rtc_class_ops rx8025_rtc_ops = {
diff --git a/drivers/rtc/rtc-rzn1.c b/drivers/rtc/rtc-rzn1.c
new file mode 100644
index 000000000000..ac788799c8e3
--- /dev/null
+++ b/drivers/rtc/rtc-rzn1.c
@@ -0,0 +1,418 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Renesas RZ/N1 Real Time Clock interface for Linux
+ *
+ * Copyright:
+ * - 2014 Renesas Electronics Europe Limited
+ * - 2022 Schneider Electric
+ *
+ * Authors:
+ * - Michel Pollet <michel.pollet@bp.renesas.com>, <buserror@gmail.com>
+ * - Miquel Raynal <miquel.raynal@bootlin.com>
+ */
+
+#include <linux/bcd.h>
+#include <linux/init.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/rtc.h>
+
+#define RZN1_RTC_CTL0 0x00
+#define RZN1_RTC_CTL0_SLSB_SUBU 0
+#define RZN1_RTC_CTL0_SLSB_SCMP BIT(4)
+#define RZN1_RTC_CTL0_AMPM BIT(5)
+#define RZN1_RTC_CTL0_CE BIT(7)
+
+#define RZN1_RTC_CTL1 0x04
+#define RZN1_RTC_CTL1_ALME BIT(4)
+
+#define RZN1_RTC_CTL2 0x08
+#define RZN1_RTC_CTL2_WAIT BIT(0)
+#define RZN1_RTC_CTL2_WST BIT(1)
+#define RZN1_RTC_CTL2_WUST BIT(5)
+#define RZN1_RTC_CTL2_STOPPED (RZN1_RTC_CTL2_WAIT | RZN1_RTC_CTL2_WST)
+
+#define RZN1_RTC_SEC 0x14
+#define RZN1_RTC_MIN 0x18
+#define RZN1_RTC_HOUR 0x1c
+#define RZN1_RTC_WEEK 0x20
+#define RZN1_RTC_DAY 0x24
+#define RZN1_RTC_MONTH 0x28
+#define RZN1_RTC_YEAR 0x2c
+
+#define RZN1_RTC_SUBU 0x38
+#define RZN1_RTC_SUBU_DEV BIT(7)
+#define RZN1_RTC_SUBU_DECR BIT(6)
+
+#define RZN1_RTC_ALM 0x40
+#define RZN1_RTC_ALH 0x44
+#define RZN1_RTC_ALW 0x48
+
+#define RZN1_RTC_SECC 0x4c
+#define RZN1_RTC_MINC 0x50
+#define RZN1_RTC_HOURC 0x54
+#define RZN1_RTC_WEEKC 0x58
+#define RZN1_RTC_DAYC 0x5c
+#define RZN1_RTC_MONTHC 0x60
+#define RZN1_RTC_YEARC 0x64
+
+struct rzn1_rtc {
+ struct rtc_device *rtcdev;
+ void __iomem *base;
+};
+
+static void rzn1_rtc_get_time_snapshot(struct rzn1_rtc *rtc, struct rtc_time *tm)
+{
+ tm->tm_sec = readl(rtc->base + RZN1_RTC_SECC);
+ tm->tm_min = readl(rtc->base + RZN1_RTC_MINC);
+ tm->tm_hour = readl(rtc->base + RZN1_RTC_HOURC);
+ tm->tm_wday = readl(rtc->base + RZN1_RTC_WEEKC);
+ tm->tm_mday = readl(rtc->base + RZN1_RTC_DAYC);
+ tm->tm_mon = readl(rtc->base + RZN1_RTC_MONTHC);
+ tm->tm_year = readl(rtc->base + RZN1_RTC_YEARC);
+}
+
+static unsigned int rzn1_rtc_tm_to_wday(struct rtc_time *tm)
+{
+ time64_t time;
+ unsigned int days;
+ u32 secs;
+
+ time = rtc_tm_to_time64(tm);
+ days = div_s64_rem(time, 86400, &secs);
+
+ /* day of the week, 1970-01-01 was a Thursday */
+ return (days + 4) % 7;
+}
+
+static int rzn1_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct rzn1_rtc *rtc = dev_get_drvdata(dev);
+ u32 val, secs;
+
+ /*
+ * The RTC was not started or is stopped and thus does not carry the
+ * proper time/date.
+ */
+ val = readl(rtc->base + RZN1_RTC_CTL2);
+ if (val & RZN1_RTC_CTL2_STOPPED)
+ return -EINVAL;
+
+ rzn1_rtc_get_time_snapshot(rtc, tm);
+ secs = readl(rtc->base + RZN1_RTC_SECC);
+ if (tm->tm_sec != secs)
+ rzn1_rtc_get_time_snapshot(rtc, tm);
+
+ tm->tm_sec = bcd2bin(tm->tm_sec);
+ tm->tm_min = bcd2bin(tm->tm_min);
+ tm->tm_hour = bcd2bin(tm->tm_hour);
+ tm->tm_wday = bcd2bin(tm->tm_wday);
+ tm->tm_mday = bcd2bin(tm->tm_mday);
+ tm->tm_mon = bcd2bin(tm->tm_mon);
+ tm->tm_year = bcd2bin(tm->tm_year);
+
+ return 0;
+}
+
+static int rzn1_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct rzn1_rtc *rtc = dev_get_drvdata(dev);
+ u32 val;
+ int ret;
+
+ tm->tm_sec = bin2bcd(tm->tm_sec);
+ tm->tm_min = bin2bcd(tm->tm_min);
+ tm->tm_hour = bin2bcd(tm->tm_hour);
+ tm->tm_wday = bin2bcd(rzn1_rtc_tm_to_wday(tm));
+ tm->tm_mday = bin2bcd(tm->tm_mday);
+ tm->tm_mon = bin2bcd(tm->tm_mon);
+ tm->tm_year = bin2bcd(tm->tm_year);
+
+ val = readl(rtc->base + RZN1_RTC_CTL2);
+ if (!(val & RZN1_RTC_CTL2_STOPPED)) {
+ /* Hold the counter if it was counting up */
+ writel(RZN1_RTC_CTL2_WAIT, rtc->base + RZN1_RTC_CTL2);
+
+ /* Wait for the counter to stop: two 32k clock cycles */
+ usleep_range(61, 100);
+ ret = readl_poll_timeout(rtc->base + RZN1_RTC_CTL2, val,
+ val & RZN1_RTC_CTL2_WST, 0, 100);
+ if (ret)
+ return ret;
+ }
+
+ writel(tm->tm_sec, rtc->base + RZN1_RTC_SEC);
+ writel(tm->tm_min, rtc->base + RZN1_RTC_MIN);
+ writel(tm->tm_hour, rtc->base + RZN1_RTC_HOUR);
+ writel(tm->tm_wday, rtc->base + RZN1_RTC_WEEK);
+ writel(tm->tm_mday, rtc->base + RZN1_RTC_DAY);
+ writel(tm->tm_mon, rtc->base + RZN1_RTC_MONTH);
+ writel(tm->tm_year, rtc->base + RZN1_RTC_YEAR);
+ writel(0, rtc->base + RZN1_RTC_CTL2);
+
+ return 0;
+}
+
+static irqreturn_t rzn1_rtc_alarm_irq(int irq, void *dev_id)
+{
+ struct rzn1_rtc *rtc = dev_id;
+
+ rtc_update_irq(rtc->rtcdev, 1, RTC_AF | RTC_IRQF);
+
+ return IRQ_HANDLED;
+}
+
+static int rzn1_rtc_alarm_irq_enable(struct device *dev, unsigned int enable)
+{
+ struct rzn1_rtc *rtc = dev_get_drvdata(dev);
+ u32 ctl1 = readl(rtc->base + RZN1_RTC_CTL1);
+
+ if (enable)
+ ctl1 |= RZN1_RTC_CTL1_ALME;
+ else
+ ctl1 &= ~RZN1_RTC_CTL1_ALME;
+
+ writel(ctl1, rtc->base + RZN1_RTC_CTL1);
+
+ return 0;
+}
+
+static int rzn1_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct rzn1_rtc *rtc = dev_get_drvdata(dev);
+ struct rtc_time *tm = &alrm->time;
+ unsigned int min, hour, wday, delta_days;
+ time64_t alarm;
+ u32 ctl1;
+ int ret;
+
+ ret = rzn1_rtc_read_time(dev, tm);
+ if (ret)
+ return ret;
+
+ min = readl(rtc->base + RZN1_RTC_ALM);
+ hour = readl(rtc->base + RZN1_RTC_ALH);
+ wday = readl(rtc->base + RZN1_RTC_ALW);
+
+ tm->tm_sec = 0;
+ tm->tm_min = bcd2bin(min);
+ tm->tm_hour = bcd2bin(hour);
+ delta_days = ((fls(wday) - 1) - tm->tm_wday + 7) % 7;
+ tm->tm_wday = fls(wday) - 1;
+
+ if (delta_days) {
+ alarm = rtc_tm_to_time64(tm) + (delta_days * 86400);
+ rtc_time64_to_tm(alarm, tm);
+ }
+
+ ctl1 = readl(rtc->base + RZN1_RTC_CTL1);
+ alrm->enabled = !!(ctl1 & RZN1_RTC_CTL1_ALME);
+
+ return 0;
+}
+
+static int rzn1_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct rzn1_rtc *rtc = dev_get_drvdata(dev);
+ struct rtc_time *tm = &alrm->time, tm_now;
+ unsigned long alarm, farest;
+ unsigned int days_ahead, wday;
+ int ret;
+
+ ret = rzn1_rtc_read_time(dev, &tm_now);
+ if (ret)
+ return ret;
+
+ /* We cannot set alarms more than one week ahead */
+ farest = rtc_tm_to_time64(&tm_now) + (7 * 86400);
+ alarm = rtc_tm_to_time64(tm);
+ if (time_after(alarm, farest))
+ return -ERANGE;
+
+ /* Convert alarm day into week day */
+ days_ahead = tm->tm_mday - tm_now.tm_mday;
+ wday = (tm_now.tm_wday + days_ahead) % 7;
+
+ writel(bin2bcd(tm->tm_min), rtc->base + RZN1_RTC_ALM);
+ writel(bin2bcd(tm->tm_hour), rtc->base + RZN1_RTC_ALH);
+ writel(BIT(wday), rtc->base + RZN1_RTC_ALW);
+
+ rzn1_rtc_alarm_irq_enable(dev, alrm->enabled);
+
+ return 0;
+}
+
+static int rzn1_rtc_read_offset(struct device *dev, long *offset)
+{
+ struct rzn1_rtc *rtc = dev_get_drvdata(dev);
+ unsigned int ppb_per_step;
+ bool subtract;
+ u32 val;
+
+ val = readl(rtc->base + RZN1_RTC_SUBU);
+ ppb_per_step = val & RZN1_RTC_SUBU_DEV ? 1017 : 3051;
+ subtract = val & RZN1_RTC_SUBU_DECR;
+ val &= 0x3F;
+
+ if (!val)
+ *offset = 0;
+ else if (subtract)
+ *offset = -(((~val) & 0x3F) + 1) * ppb_per_step;
+ else
+ *offset = (val - 1) * ppb_per_step;
+
+ return 0;
+}
+
+static int rzn1_rtc_set_offset(struct device *dev, long offset)
+{
+ struct rzn1_rtc *rtc = dev_get_drvdata(dev);
+ int stepsh, stepsl, steps;
+ u32 subu = 0, ctl2;
+ int ret;
+
+ /*
+ * Check which resolution mode (every 20 or 60s) can be used.
+ * Between 2 and 124 clock pulses can be added or substracted.
+ *
+ * In 20s mode, the minimum resolution is 2 / (32768 * 20) which is
+ * close to 3051 ppb. In 60s mode, the resolution is closer to 1017.
+ */
+ stepsh = DIV_ROUND_CLOSEST(offset, 1017);
+ stepsl = DIV_ROUND_CLOSEST(offset, 3051);
+
+ if (stepsh >= -0x3E && stepsh <= 0x3E) {
+ /* 1017 ppb per step */
+ steps = stepsh;
+ subu |= RZN1_RTC_SUBU_DEV;
+ } else if (stepsl >= -0x3E && stepsl <= 0x3E) {
+ /* 3051 ppb per step */
+ steps = stepsl;
+ } else {
+ return -ERANGE;
+ }
+
+ if (!steps)
+ return 0;
+
+ if (steps > 0) {
+ subu |= steps + 1;
+ } else {
+ subu |= RZN1_RTC_SUBU_DECR;
+ subu |= (~(-steps - 1)) & 0x3F;
+ }
+
+ ret = readl_poll_timeout(rtc->base + RZN1_RTC_CTL2, ctl2,
+ !(ctl2 & RZN1_RTC_CTL2_WUST), 100, 2000000);
+ if (ret)
+ return ret;
+
+ writel(subu, rtc->base + RZN1_RTC_SUBU);
+
+ return 0;
+}
+
+static const struct rtc_class_ops rzn1_rtc_ops = {
+ .read_time = rzn1_rtc_read_time,
+ .set_time = rzn1_rtc_set_time,
+ .read_alarm = rzn1_rtc_read_alarm,
+ .set_alarm = rzn1_rtc_set_alarm,
+ .alarm_irq_enable = rzn1_rtc_alarm_irq_enable,
+ .read_offset = rzn1_rtc_read_offset,
+ .set_offset = rzn1_rtc_set_offset,
+};
+
+static int rzn1_rtc_probe(struct platform_device *pdev)
+{
+ struct rzn1_rtc *rtc;
+ int alarm_irq;
+ int ret;
+
+ rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
+ if (!rtc)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, rtc);
+
+ rtc->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(rtc->base))
+ return dev_err_probe(&pdev->dev, PTR_ERR(rtc->base), "Missing reg\n");
+
+ alarm_irq = platform_get_irq(pdev, 0);
+ if (alarm_irq < 0)
+ return alarm_irq;
+
+ rtc->rtcdev = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(rtc->rtcdev))
+ return PTR_ERR(rtc->rtcdev);
+
+ rtc->rtcdev->range_min = RTC_TIMESTAMP_BEGIN_2000;
+ rtc->rtcdev->range_max = RTC_TIMESTAMP_END_2099;
+ rtc->rtcdev->ops = &rzn1_rtc_ops;
+ set_bit(RTC_FEATURE_ALARM_RES_MINUTE, rtc->rtcdev->features);
+ clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc->rtcdev->features);
+
+ devm_pm_runtime_enable(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Ensure the clock counter is enabled.
+ * Set 24-hour mode and possible oscillator offset compensation in SUBU mode.
+ */
+ writel(RZN1_RTC_CTL0_CE | RZN1_RTC_CTL0_AMPM | RZN1_RTC_CTL0_SLSB_SUBU,
+ rtc->base + RZN1_RTC_CTL0);
+
+ /* Disable all interrupts */
+ writel(0, rtc->base + RZN1_RTC_CTL1);
+
+ ret = devm_request_irq(&pdev->dev, alarm_irq, rzn1_rtc_alarm_irq, 0,
+ dev_name(&pdev->dev), rtc);
+ if (ret) {
+ dev_err(&pdev->dev, "RTC timer interrupt not available\n");
+ goto dis_runtime_pm;
+ }
+
+ ret = devm_rtc_register_device(rtc->rtcdev);
+ if (ret)
+ goto dis_runtime_pm;
+
+ return 0;
+
+dis_runtime_pm:
+ pm_runtime_put(&pdev->dev);
+
+ return ret;
+}
+
+static int rzn1_rtc_remove(struct platform_device *pdev)
+{
+ pm_runtime_put(&pdev->dev);
+
+ return 0;
+}
+
+static const struct of_device_id rzn1_rtc_of_match[] = {
+ { .compatible = "renesas,rzn1-rtc" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, rzn1_rtc_of_match);
+
+static struct platform_driver rzn1_rtc_driver = {
+ .probe = rzn1_rtc_probe,
+ .remove = rzn1_rtc_remove,
+ .driver = {
+ .name = "rzn1-rtc",
+ .of_match_table = rzn1_rtc_of_match,
+ },
+};
+module_platform_driver(rzn1_rtc_driver);
+
+MODULE_AUTHOR("Michel Pollet <Michel.Pollet@bp.renesas.com");
+MODULE_AUTHOR("Miquel Raynal <miquel.raynal@bootlin.com");
+MODULE_DESCRIPTION("RZ/N1 RTC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-sun6i.c b/drivers/rtc/rtc-sun6i.c
index 5252ce4cbda4..57540727ce1c 100644
--- a/drivers/rtc/rtc-sun6i.c
+++ b/drivers/rtc/rtc-sun6i.c
@@ -71,6 +71,10 @@
#define SUN6I_LOSC_OUT_GATING 0x0060
#define SUN6I_LOSC_OUT_GATING_EN_OFFSET 0
+/* General-purpose data */
+#define SUN6I_GP_DATA 0x0100
+#define SUN6I_GP_DATA_SIZE 0x20
+
/*
* Get date values
*/
@@ -679,6 +683,39 @@ static const struct rtc_class_ops sun6i_rtc_ops = {
.alarm_irq_enable = sun6i_rtc_alarm_irq_enable
};
+static int sun6i_rtc_nvmem_read(void *priv, unsigned int offset, void *_val, size_t bytes)
+{
+ struct sun6i_rtc_dev *chip = priv;
+ u32 *val = _val;
+ int i;
+
+ for (i = 0; i < bytes / 4; ++i)
+ val[i] = readl(chip->base + SUN6I_GP_DATA + offset + 4 * i);
+
+ return 0;
+}
+
+static int sun6i_rtc_nvmem_write(void *priv, unsigned int offset, void *_val, size_t bytes)
+{
+ struct sun6i_rtc_dev *chip = priv;
+ u32 *val = _val;
+ int i;
+
+ for (i = 0; i < bytes / 4; ++i)
+ writel(val[i], chip->base + SUN6I_GP_DATA + offset + 4 * i);
+
+ return 0;
+}
+
+static struct nvmem_config sun6i_rtc_nvmem_cfg = {
+ .type = NVMEM_TYPE_BATTERY_BACKED,
+ .reg_read = sun6i_rtc_nvmem_read,
+ .reg_write = sun6i_rtc_nvmem_write,
+ .size = SUN6I_GP_DATA_SIZE,
+ .word_size = 4,
+ .stride = 4,
+};
+
#ifdef CONFIG_PM_SLEEP
/* Enable IRQ wake on suspend, to wake up from RTC. */
static int sun6i_rtc_suspend(struct device *dev)
@@ -812,6 +849,11 @@ static int sun6i_rtc_probe(struct platform_device *pdev)
if (ret)
return ret;
+ sun6i_rtc_nvmem_cfg.priv = chip;
+ ret = devm_rtc_nvmem_register(chip->rtc, &sun6i_rtc_nvmem_cfg);
+ if (ret)
+ return ret;
+
dev_info(&pdev->dev, "RTC enabled\n");
return 0;
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index d614843caf6c..8d0d0eaa3059 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -32,7 +32,8 @@ static int dcssblk_open(struct block_device *bdev, fmode_t mode);
static void dcssblk_release(struct gendisk *disk, fmode_t mode);
static void dcssblk_submit_bio(struct bio *bio);
static long dcssblk_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
- long nr_pages, void **kaddr, pfn_t *pfn);
+ long nr_pages, enum dax_access_mode mode, void **kaddr,
+ pfn_t *pfn);
static char dcssblk_segments[DCSSBLK_PARM_LEN] = "\0";
@@ -50,7 +51,8 @@ static int dcssblk_dax_zero_page_range(struct dax_device *dax_dev,
long rc;
void *kaddr;
- rc = dax_direct_access(dax_dev, pgoff, nr_pages, &kaddr, NULL);
+ rc = dax_direct_access(dax_dev, pgoff, nr_pages, DAX_ACCESS,
+ &kaddr, NULL);
if (rc < 0)
return rc;
memset(kaddr, 0, nr_pages << PAGE_SHIFT);
@@ -927,7 +929,8 @@ __dcssblk_direct_access(struct dcssblk_dev_info *dev_info, pgoff_t pgoff,
static long
dcssblk_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
- long nr_pages, void **kaddr, pfn_t *pfn)
+ long nr_pages, enum dax_access_mode mode, void **kaddr,
+ pfn_t *pfn)
{
struct dcssblk_dev_info *dev_info = dax_get_private(dax_dev);
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index 1cb9daf9c645..fa8df50bb49e 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -103,7 +103,11 @@ struct subchannel {
struct work_struct todo_work;
struct schib_config config;
u64 dma_mask;
- char *driver_override; /* Driver name to force a match */
+ /*
+ * Driver name to force a match. Do not set directly, because core
+ * frees it. Use driver_set_override() to set or clear it.
+ */
+ const char *driver_override;
} __attribute__ ((aligned(8)));
DECLARE_PER_CPU_ALIGNED(struct irb, cio_irb);
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index fa8293335077..913b6ddd040b 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -338,31 +338,11 @@ static ssize_t driver_override_store(struct device *dev,
const char *buf, size_t count)
{
struct subchannel *sch = to_subchannel(dev);
- char *driver_override, *old, *cp;
-
- /* We need to keep extra room for a newline */
- if (count >= (PAGE_SIZE - 1))
- return -EINVAL;
-
- driver_override = kstrndup(buf, count, GFP_KERNEL);
- if (!driver_override)
- return -ENOMEM;
-
- cp = strchr(driver_override, '\n');
- if (cp)
- *cp = '\0';
-
- device_lock(dev);
- old = sch->driver_override;
- if (strlen(driver_override)) {
- sch->driver_override = driver_override;
- } else {
- kfree(driver_override);
- sch->driver_override = NULL;
- }
- device_unlock(dev);
+ int ret;
- kfree(old);
+ ret = driver_set_override(dev, &sch->driver_override, buf, count);
+ if (ret)
+ return ret;
return count;
}
diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
index 8d1b2771c1aa..0c2be9421ab7 100644
--- a/drivers/s390/cio/vfio_ccw_cp.c
+++ b/drivers/s390/cio/vfio_ccw_cp.c
@@ -16,6 +16,7 @@
#include <asm/idals.h>
#include "vfio_ccw_cp.h"
+#include "vfio_ccw_private.h"
struct pfn_array {
/* Starting guest physical I/O address. */
@@ -98,17 +99,17 @@ static int pfn_array_alloc(struct pfn_array *pa, u64 iova, unsigned int len)
* If the pin request partially succeeds, or fails completely,
* all pages are left unpinned and a negative error value is returned.
*/
-static int pfn_array_pin(struct pfn_array *pa, struct device *mdev)
+static int pfn_array_pin(struct pfn_array *pa, struct vfio_device *vdev)
{
int ret = 0;
- ret = vfio_pin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr,
+ ret = vfio_pin_pages(vdev, pa->pa_iova_pfn, pa->pa_nr,
IOMMU_READ | IOMMU_WRITE, pa->pa_pfn);
if (ret < 0) {
goto err_out;
} else if (ret > 0 && ret != pa->pa_nr) {
- vfio_unpin_pages(mdev, pa->pa_iova_pfn, ret);
+ vfio_unpin_pages(vdev, pa->pa_iova_pfn, ret);
ret = -EINVAL;
goto err_out;
}
@@ -122,11 +123,11 @@ err_out:
}
/* Unpin the pages before releasing the memory. */
-static void pfn_array_unpin_free(struct pfn_array *pa, struct device *mdev)
+static void pfn_array_unpin_free(struct pfn_array *pa, struct vfio_device *vdev)
{
/* Only unpin if any pages were pinned to begin with */
if (pa->pa_nr)
- vfio_unpin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr);
+ vfio_unpin_pages(vdev, pa->pa_iova_pfn, pa->pa_nr);
pa->pa_nr = 0;
kfree(pa->pa_iova_pfn);
}
@@ -190,8 +191,7 @@ static void convert_ccw0_to_ccw1(struct ccw1 *source, unsigned long len)
* Within the domain (@mdev), copy @n bytes from a guest physical
* address (@iova) to a host physical address (@to).
*/
-static long copy_from_iova(struct device *mdev,
- void *to, u64 iova,
+static long copy_from_iova(struct vfio_device *vdev, void *to, u64 iova,
unsigned long n)
{
struct pfn_array pa = {0};
@@ -203,9 +203,9 @@ static long copy_from_iova(struct device *mdev,
if (ret < 0)
return ret;
- ret = pfn_array_pin(&pa, mdev);
+ ret = pfn_array_pin(&pa, vdev);
if (ret < 0) {
- pfn_array_unpin_free(&pa, mdev);
+ pfn_array_unpin_free(&pa, vdev);
return ret;
}
@@ -226,7 +226,7 @@ static long copy_from_iova(struct device *mdev,
break;
}
- pfn_array_unpin_free(&pa, mdev);
+ pfn_array_unpin_free(&pa, vdev);
return l;
}
@@ -423,11 +423,13 @@ static int ccwchain_loop_tic(struct ccwchain *chain,
static int ccwchain_handle_ccw(u32 cda, struct channel_program *cp)
{
+ struct vfio_device *vdev =
+ &container_of(cp, struct vfio_ccw_private, cp)->vdev;
struct ccwchain *chain;
int len, ret;
/* Copy 2K (the most we support today) of possible CCWs */
- len = copy_from_iova(cp->mdev, cp->guest_cp, cda,
+ len = copy_from_iova(vdev, cp->guest_cp, cda,
CCWCHAIN_LEN_MAX * sizeof(struct ccw1));
if (len)
return len;
@@ -508,6 +510,8 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
int idx,
struct channel_program *cp)
{
+ struct vfio_device *vdev =
+ &container_of(cp, struct vfio_ccw_private, cp)->vdev;
struct ccw1 *ccw;
struct pfn_array *pa;
u64 iova;
@@ -526,7 +530,7 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
if (ccw_is_idal(ccw)) {
/* Read first IDAW to see if it's 4K-aligned or not. */
/* All subsequent IDAws will be 4K-aligned. */
- ret = copy_from_iova(cp->mdev, &iova, ccw->cda, sizeof(iova));
+ ret = copy_from_iova(vdev, &iova, ccw->cda, sizeof(iova));
if (ret)
return ret;
} else {
@@ -555,7 +559,7 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
if (ccw_is_idal(ccw)) {
/* Copy guest IDAL into host IDAL */
- ret = copy_from_iova(cp->mdev, idaws, ccw->cda, idal_len);
+ ret = copy_from_iova(vdev, idaws, ccw->cda, idal_len);
if (ret)
goto out_unpin;
@@ -574,7 +578,7 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
}
if (ccw_does_data_transfer(ccw)) {
- ret = pfn_array_pin(pa, cp->mdev);
+ ret = pfn_array_pin(pa, vdev);
if (ret < 0)
goto out_unpin;
} else {
@@ -590,7 +594,7 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
return 0;
out_unpin:
- pfn_array_unpin_free(pa, cp->mdev);
+ pfn_array_unpin_free(pa, vdev);
out_free_idaws:
kfree(idaws);
out_init:
@@ -632,8 +636,10 @@ static int ccwchain_fetch_one(struct ccwchain *chain,
* Returns:
* %0 on success and a negative error value on failure.
*/
-int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb)
+int cp_init(struct channel_program *cp, union orb *orb)
{
+ struct vfio_device *vdev =
+ &container_of(cp, struct vfio_ccw_private, cp)->vdev;
/* custom ratelimit used to avoid flood during guest IPL */
static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 1);
int ret;
@@ -650,11 +656,12 @@ int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb)
* the problem if something does break.
*/
if (!orb->cmd.pfch && __ratelimit(&ratelimit_state))
- dev_warn(mdev, "Prefetching channel program even though prefetch not specified in ORB");
+ dev_warn(
+ vdev->dev,
+ "Prefetching channel program even though prefetch not specified in ORB");
INIT_LIST_HEAD(&cp->ccwchain_list);
memcpy(&cp->orb, orb, sizeof(*orb));
- cp->mdev = mdev;
/* Build a ccwchain for the first CCW segment */
ret = ccwchain_handle_ccw(orb->cmd.cpa, cp);
@@ -682,6 +689,8 @@ int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb)
*/
void cp_free(struct channel_program *cp)
{
+ struct vfio_device *vdev =
+ &container_of(cp, struct vfio_ccw_private, cp)->vdev;
struct ccwchain *chain, *temp;
int i;
@@ -691,7 +700,7 @@ void cp_free(struct channel_program *cp)
cp->initialized = false;
list_for_each_entry_safe(chain, temp, &cp->ccwchain_list, next) {
for (i = 0; i < chain->ch_len; i++) {
- pfn_array_unpin_free(chain->ch_pa + i, cp->mdev);
+ pfn_array_unpin_free(chain->ch_pa + i, vdev);
ccwchain_cda_free(chain, i);
}
ccwchain_free(chain);
diff --git a/drivers/s390/cio/vfio_ccw_cp.h b/drivers/s390/cio/vfio_ccw_cp.h
index ba31240ce965..e4c436199b4c 100644
--- a/drivers/s390/cio/vfio_ccw_cp.h
+++ b/drivers/s390/cio/vfio_ccw_cp.h
@@ -37,13 +37,11 @@
struct channel_program {
struct list_head ccwchain_list;
union orb orb;
- struct device *mdev;
bool initialized;
struct ccw1 *guest_cp;
};
-extern int cp_init(struct channel_program *cp, struct device *mdev,
- union orb *orb);
+extern int cp_init(struct channel_program *cp, union orb *orb);
extern void cp_free(struct channel_program *cp);
extern int cp_prefetch(struct channel_program *cp);
extern union orb *cp_get_orb(struct channel_program *cp, u32 intparm, u8 lpm);
diff --git a/drivers/s390/cio/vfio_ccw_fsm.c b/drivers/s390/cio/vfio_ccw_fsm.c
index e435a9cd92da..8483a266051c 100644
--- a/drivers/s390/cio/vfio_ccw_fsm.c
+++ b/drivers/s390/cio/vfio_ccw_fsm.c
@@ -262,8 +262,7 @@ static void fsm_io_request(struct vfio_ccw_private *private,
errstr = "transport mode";
goto err_out;
}
- io_region->ret_code = cp_init(&private->cp, mdev_dev(mdev),
- orb);
+ io_region->ret_code = cp_init(&private->cp, orb);
if (io_region->ret_code) {
VFIO_CCW_MSG_EVENT(2,
"%pUl (%x.%x.%04x): cp_init=%d\n",
diff --git a/drivers/s390/cio/vfio_ccw_ops.c b/drivers/s390/cio/vfio_ccw_ops.c
index c4d60cdbf247..b49e2e9db2dc 100644
--- a/drivers/s390/cio/vfio_ccw_ops.c
+++ b/drivers/s390/cio/vfio_ccw_ops.c
@@ -183,7 +183,7 @@ static int vfio_ccw_mdev_open_device(struct vfio_device *vdev)
private->nb.notifier_call = vfio_ccw_mdev_notifier;
- ret = vfio_register_notifier(vdev->dev, VFIO_IOMMU_NOTIFY,
+ ret = vfio_register_notifier(vdev, VFIO_IOMMU_NOTIFY,
&events, &private->nb);
if (ret)
return ret;
@@ -204,8 +204,7 @@ static int vfio_ccw_mdev_open_device(struct vfio_device *vdev)
out_unregister:
vfio_ccw_unregister_dev_regions(private);
- vfio_unregister_notifier(vdev->dev, VFIO_IOMMU_NOTIFY,
- &private->nb);
+ vfio_unregister_notifier(vdev, VFIO_IOMMU_NOTIFY, &private->nb);
return ret;
}
@@ -223,7 +222,7 @@ static void vfio_ccw_mdev_close_device(struct vfio_device *vdev)
cp_free(&private->cp);
vfio_ccw_unregister_dev_regions(private);
- vfio_unregister_notifier(vdev->dev, VFIO_IOMMU_NOTIFY, &private->nb);
+ vfio_unregister_notifier(vdev, VFIO_IOMMU_NOTIFY, &private->nb);
}
static ssize_t vfio_ccw_mdev_read_io_region(struct vfio_ccw_private *private,
diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
index ee0a3bf8f476..a7d2a95796d3 100644
--- a/drivers/s390/crypto/vfio_ap_ops.c
+++ b/drivers/s390/crypto/vfio_ap_ops.c
@@ -124,8 +124,7 @@ static void vfio_ap_free_aqic_resources(struct vfio_ap_queue *q)
q->saved_isc = VFIO_AP_ISC_INVALID;
}
if (q->saved_pfn && !WARN_ON(!q->matrix_mdev)) {
- vfio_unpin_pages(mdev_dev(q->matrix_mdev->mdev),
- &q->saved_pfn, 1);
+ vfio_unpin_pages(&q->matrix_mdev->vdev, &q->saved_pfn, 1);
q->saved_pfn = 0;
}
}
@@ -258,7 +257,7 @@ static struct ap_queue_status vfio_ap_irq_enable(struct vfio_ap_queue *q,
return status;
}
- ret = vfio_pin_pages(mdev_dev(q->matrix_mdev->mdev), &g_pfn, 1,
+ ret = vfio_pin_pages(&q->matrix_mdev->vdev, &g_pfn, 1,
IOMMU_READ | IOMMU_WRITE, &h_pfn);
switch (ret) {
case 1:
@@ -301,7 +300,7 @@ static struct ap_queue_status vfio_ap_irq_enable(struct vfio_ap_queue *q,
break;
case AP_RESPONSE_OTHERWISE_CHANGED:
/* We could not modify IRQ setings: clear new configuration */
- vfio_unpin_pages(mdev_dev(q->matrix_mdev->mdev), &g_pfn, 1);
+ vfio_unpin_pages(&q->matrix_mdev->vdev, &g_pfn, 1);
kvm_s390_gisc_unregister(kvm, isc);
break;
default:
@@ -1250,7 +1249,7 @@ static int vfio_ap_mdev_iommu_notifier(struct notifier_block *nb,
struct vfio_iommu_type1_dma_unmap *unmap = data;
unsigned long g_pfn = unmap->iova >> PAGE_SHIFT;
- vfio_unpin_pages(mdev_dev(matrix_mdev->mdev), &g_pfn, 1);
+ vfio_unpin_pages(&matrix_mdev->vdev, &g_pfn, 1);
return NOTIFY_OK;
}
@@ -1285,25 +1284,6 @@ static void vfio_ap_mdev_unset_kvm(struct ap_matrix_mdev *matrix_mdev)
}
}
-static int vfio_ap_mdev_group_notifier(struct notifier_block *nb,
- unsigned long action, void *data)
-{
- int notify_rc = NOTIFY_OK;
- struct ap_matrix_mdev *matrix_mdev;
-
- if (action != VFIO_GROUP_NOTIFY_SET_KVM)
- return NOTIFY_OK;
-
- matrix_mdev = container_of(nb, struct ap_matrix_mdev, group_notifier);
-
- if (!data)
- vfio_ap_mdev_unset_kvm(matrix_mdev);
- else if (vfio_ap_mdev_set_kvm(matrix_mdev, data))
- notify_rc = NOTIFY_DONE;
-
- return notify_rc;
-}
-
static struct vfio_ap_queue *vfio_ap_find_queue(int apqn)
{
struct device *dev;
@@ -1403,25 +1383,23 @@ static int vfio_ap_mdev_open_device(struct vfio_device *vdev)
unsigned long events;
int ret;
- matrix_mdev->group_notifier.notifier_call = vfio_ap_mdev_group_notifier;
- events = VFIO_GROUP_NOTIFY_SET_KVM;
+ if (!vdev->kvm)
+ return -EINVAL;
- ret = vfio_register_notifier(vdev->dev, VFIO_GROUP_NOTIFY,
- &events, &matrix_mdev->group_notifier);
+ ret = vfio_ap_mdev_set_kvm(matrix_mdev, vdev->kvm);
if (ret)
return ret;
matrix_mdev->iommu_notifier.notifier_call = vfio_ap_mdev_iommu_notifier;
events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
- ret = vfio_register_notifier(vdev->dev, VFIO_IOMMU_NOTIFY,
- &events, &matrix_mdev->iommu_notifier);
+ ret = vfio_register_notifier(vdev, VFIO_IOMMU_NOTIFY, &events,
+ &matrix_mdev->iommu_notifier);
if (ret)
- goto out_unregister_group;
+ goto err_kvm;
return 0;
-out_unregister_group:
- vfio_unregister_notifier(vdev->dev, VFIO_GROUP_NOTIFY,
- &matrix_mdev->group_notifier);
+err_kvm:
+ vfio_ap_mdev_unset_kvm(matrix_mdev);
return ret;
}
@@ -1430,10 +1408,8 @@ static void vfio_ap_mdev_close_device(struct vfio_device *vdev)
struct ap_matrix_mdev *matrix_mdev =
container_of(vdev, struct ap_matrix_mdev, vdev);
- vfio_unregister_notifier(vdev->dev, VFIO_IOMMU_NOTIFY,
+ vfio_unregister_notifier(vdev, VFIO_IOMMU_NOTIFY,
&matrix_mdev->iommu_notifier);
- vfio_unregister_notifier(vdev->dev, VFIO_GROUP_NOTIFY,
- &matrix_mdev->group_notifier);
vfio_ap_mdev_unset_kvm(matrix_mdev);
}
diff --git a/drivers/s390/crypto/vfio_ap_private.h b/drivers/s390/crypto/vfio_ap_private.h
index 648fcaf8104a..a26efd804d0d 100644
--- a/drivers/s390/crypto/vfio_ap_private.h
+++ b/drivers/s390/crypto/vfio_ap_private.h
@@ -81,8 +81,6 @@ struct ap_matrix {
* @node: allows the ap_matrix_mdev struct to be added to a list
* @matrix: the adapters, usage domains and control domains assigned to the
* mediated matrix device.
- * @group_notifier: notifier block used for specifying callback function for
- * handling the VFIO_GROUP_NOTIFY_SET_KVM event
* @iommu_notifier: notifier block used for specifying callback function for
* handling the VFIO_IOMMU_NOTIFY_DMA_UNMAP even
* @kvm: the struct holding guest's state
@@ -94,7 +92,6 @@ struct ap_matrix_mdev {
struct vfio_device vdev;
struct list_head node;
struct ap_matrix matrix;
- struct notifier_block group_notifier;
struct notifier_block iommu_notifier;
struct kvm *kvm;
crypto_hook pqap_hook;
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index d35e7a3f7067..97e51c34e6cf 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -62,6 +62,7 @@ struct virtio_ccw_device {
unsigned int revision; /* Transport revision */
wait_queue_head_t wait_q;
spinlock_t lock;
+ rwlock_t irq_lock;
struct mutex io_lock; /* Serializes I/O requests */
struct list_head virtqueues;
bool is_thinint;
@@ -970,6 +971,10 @@ static void virtio_ccw_set_status(struct virtio_device *vdev, u8 status)
ccw->flags = 0;
ccw->count = sizeof(status);
ccw->cda = (__u32)(unsigned long)&vcdev->dma_area->status;
+ /* We use ssch for setting the status which is a serializing
+ * instruction that guarantees the memory writes have
+ * completed before ssch.
+ */
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_STATUS);
/* Write failed? We assume status is unchanged. */
if (ret)
@@ -984,6 +989,30 @@ static const char *virtio_ccw_bus_name(struct virtio_device *vdev)
return dev_name(&vcdev->cdev->dev);
}
+static void virtio_ccw_synchronize_cbs(struct virtio_device *vdev)
+{
+ struct virtio_ccw_device *vcdev = to_vc_device(vdev);
+ struct airq_info *info = vcdev->airq_info;
+
+ if (info) {
+ /*
+ * This device uses adapter interrupts: synchronize with
+ * vring_interrupt() called by virtio_airq_handler()
+ * via the indicator area lock.
+ */
+ write_lock_irq(&info->lock);
+ write_unlock_irq(&info->lock);
+ } else {
+ /* This device uses classic interrupts: synchronize
+ * with vring_interrupt() called by
+ * virtio_ccw_int_handler() via the per-device
+ * irq_lock
+ */
+ write_lock_irq(&vcdev->irq_lock);
+ write_unlock_irq(&vcdev->irq_lock);
+ }
+}
+
static const struct virtio_config_ops virtio_ccw_config_ops = {
.get_features = virtio_ccw_get_features,
.finalize_features = virtio_ccw_finalize_features,
@@ -995,6 +1024,7 @@ static const struct virtio_config_ops virtio_ccw_config_ops = {
.find_vqs = virtio_ccw_find_vqs,
.del_vqs = virtio_ccw_del_vqs,
.bus_name = virtio_ccw_bus_name,
+ .synchronize_cbs = virtio_ccw_synchronize_cbs,
};
@@ -1106,6 +1136,8 @@ static void virtio_ccw_int_handler(struct ccw_device *cdev,
vcdev->err = -EIO;
}
virtio_ccw_check_activity(vcdev, activity);
+ /* Interrupts are disabled here */
+ read_lock(&vcdev->irq_lock);
for_each_set_bit(i, indicators(vcdev),
sizeof(*indicators(vcdev)) * BITS_PER_BYTE) {
/* The bit clear must happen before the vring kick. */
@@ -1114,6 +1146,7 @@ static void virtio_ccw_int_handler(struct ccw_device *cdev,
vq = virtio_ccw_vq_by_ind(vcdev, i);
vring_interrupt(0, vq);
}
+ read_unlock(&vcdev->irq_lock);
if (test_bit(0, indicators2(vcdev))) {
virtio_config_changed(&vcdev->vdev);
clear_bit(0, indicators2(vcdev));
@@ -1284,6 +1317,7 @@ static int virtio_ccw_online(struct ccw_device *cdev)
init_waitqueue_head(&vcdev->wait_q);
INIT_LIST_HEAD(&vcdev->virtqueues);
spin_lock_init(&vcdev->lock);
+ rwlock_init(&vcdev->irq_lock);
mutex_init(&vcdev->io_lock);
spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 6e3a04107bb6..a9fe5152addd 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -500,7 +500,6 @@ source "drivers/scsi/megaraid/Kconfig.megaraid"
source "drivers/scsi/mpt3sas/Kconfig"
source "drivers/scsi/mpi3mr/Kconfig"
source "drivers/scsi/smartpqi/Kconfig"
-source "drivers/scsi/ufs/Kconfig"
config SCSI_HPTIOP
tristate "HighPoint RocketRAID 3xxx/4xxx Controller support"
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 19814c26c908..2ad3bc052531 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -101,7 +101,6 @@ obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/
obj-$(CONFIG_MEGARAID_SAS) += megaraid/
obj-$(CONFIG_SCSI_MPT3SAS) += mpt3sas/
obj-$(CONFIG_SCSI_MPI3MR) += mpi3mr/
-obj-$(CONFIG_SCSI_UFSHCD) += ufs/
obj-$(CONFIG_SCSI_ACARD) += atp870u.o
obj-$(CONFIG_SCSI_SUNESP) += esp_scsi.o sun_esp.o
obj-$(CONFIG_SCSI_INITIO) += initio.o
diff --git a/drivers/scsi/esas2r/esas2r_flash.c b/drivers/scsi/esas2r/esas2r_flash.c
index 429d64299fe9..f910e2553fbb 100644
--- a/drivers/scsi/esas2r/esas2r_flash.c
+++ b/drivers/scsi/esas2r/esas2r_flash.c
@@ -232,7 +232,7 @@ static bool load_image(struct esas2r_adapter *a, struct esas2r_request *rq)
*/
rq->req_stat = RS_PENDING;
if (test_bit(AF_DEGRADED_MODE, &a->flags))
- /* not suppported for now */;
+ /* not supported for now */;
else
build_flash_msg(a, rq);
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index ac17e3a35d2c..6370cdbfba08 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -2182,7 +2182,7 @@ static enum sci_status atapi_data_tc_completion_handler(struct isci_request *ire
case (SCU_TASK_DONE_UNEXP_FIS << SCU_COMPLETION_TL_STATUS_SHIFT): {
u16 len = sci_req_tx_bytes(ireq);
- /* likely non-error data underrrun, workaround missing
+ /* likely non-error data underrun, workaround missing
* d2h frame from the controller
*/
if (d2h->fis_type != FIS_REGD2H) {
diff --git a/drivers/scsi/lpfc/Makefile b/drivers/scsi/lpfc/Makefile
index 092a971d066b..bbd1faf41e80 100644
--- a/drivers/scsi/lpfc/Makefile
+++ b/drivers/scsi/lpfc/Makefile
@@ -33,4 +33,4 @@ obj-$(CONFIG_SCSI_LPFC) := lpfc.o
lpfc-objs := lpfc_mem.o lpfc_sli.o lpfc_ct.o lpfc_els.o \
lpfc_hbadisc.o lpfc_init.o lpfc_mbox.o lpfc_nportdisc.o \
lpfc_scsi.o lpfc_attr.o lpfc_vport.o lpfc_debugfs.o lpfc_bsg.o \
- lpfc_nvme.o lpfc_nvmet.o
+ lpfc_nvme.o lpfc_nvmet.o lpfc_vmid.o
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index b0775be31d5c..b1be0dd0337a 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -671,6 +671,9 @@ int lpfc_vmid_cmd(struct lpfc_vport *vport,
int lpfc_vmid_hash_fn(const char *vmid, int len);
struct lpfc_vmid *lpfc_get_vmid_from_hashtable(struct lpfc_vport *vport,
uint32_t hash, uint8_t *buf);
+int lpfc_vmid_get_appid(struct lpfc_vport *vport, char *uuid,
+ enum dma_data_direction iodir,
+ union lpfc_vmid_io_tag *tag);
void lpfc_vmid_vport_cleanup(struct lpfc_vport *vport);
int lpfc_issue_els_qfpa(struct lpfc_vport *vport);
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 748c53219986..7b8cf678abb5 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -1736,6 +1736,28 @@ struct lpfc_fdmi_reg_portattr {
#define PCI_DEVICE_ID_TOMCAT 0x0714
#define PCI_DEVICE_ID_SKYHAWK 0x0724
#define PCI_DEVICE_ID_SKYHAWK_VF 0x072c
+#define PCI_VENDOR_ID_ATTO 0x117c
+#define PCI_DEVICE_ID_CLRY_16XE 0x0064
+#define PCI_DEVICE_ID_CLRY_161E 0x0063
+#define PCI_DEVICE_ID_CLRY_162E 0x0064
+#define PCI_DEVICE_ID_CLRY_164E 0x0065
+#define PCI_DEVICE_ID_CLRY_16XP 0x0094
+#define PCI_DEVICE_ID_CLRY_161P 0x00a0
+#define PCI_DEVICE_ID_CLRY_162P 0x0094
+#define PCI_DEVICE_ID_CLRY_164P 0x00a1
+#define PCI_DEVICE_ID_CLRY_32XE 0x0094
+#define PCI_DEVICE_ID_CLRY_321E 0x00a2
+#define PCI_DEVICE_ID_CLRY_322E 0x00a3
+#define PCI_DEVICE_ID_CLRY_324E 0x00ac
+#define PCI_DEVICE_ID_CLRY_32XP 0x00bb
+#define PCI_DEVICE_ID_CLRY_321P 0x00bc
+#define PCI_DEVICE_ID_CLRY_322P 0x00bd
+#define PCI_DEVICE_ID_CLRY_324P 0x00be
+#define PCI_DEVICE_ID_TLFC_2 0x0064
+#define PCI_DEVICE_ID_TLFC_2XX2 0x4064
+#define PCI_DEVICE_ID_TLFC_3 0x0094
+#define PCI_DEVICE_ID_TLFC_3162 0x40a6
+#define PCI_DEVICE_ID_TLFC_3322 0x40a7
#define JEDEC_ID_ADDRESS 0x0080001c
#define FIREFLY_JEDEC_ID 0x1ACC
diff --git a/drivers/scsi/lpfc/lpfc_ids.h b/drivers/scsi/lpfc/lpfc_ids.h
index 6a90e6e53d09..a1b9be245560 100644
--- a/drivers/scsi/lpfc/lpfc_ids.h
+++ b/drivers/scsi/lpfc/lpfc_ids.h
@@ -124,5 +124,35 @@ const struct pci_device_id lpfc_id_table[] = {
PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK_VF,
PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_16XE,
+ PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_161E, },
+ {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_16XE,
+ PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_162E, },
+ {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_16XE,
+ PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_164E, },
+ {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_16XP,
+ PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_161P, },
+ {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_16XP,
+ PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_162P, },
+ {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_16XP,
+ PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_164P, },
+ {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_32XE,
+ PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_321E, },
+ {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_32XE,
+ PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_322E, },
+ {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_32XE,
+ PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_324E, },
+ {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_32XP,
+ PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_321P, },
+ {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_32XP,
+ PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_322P, },
+ {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_32XP,
+ PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_324P, },
+ {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_TLFC_2,
+ PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_TLFC_2XX2, },
+ {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_TLFC_3,
+ PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_TLFC_3162, },
+ {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_TLFC_3,
+ PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_TLFC_3322, },
{ 0 }
};
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 2bffaa681fcc..93b94c64518d 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -2415,6 +2415,90 @@ lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
}
/**
+ * lpfc_get_atto_model_desc - Retrieve ATTO HBA device model name and description
+ * @phba: pointer to lpfc hba data structure.
+ * @mdp: pointer to the data structure to hold the derived model name.
+ * @descp: pointer to the data structure to hold the derived description.
+ *
+ * This routine retrieves HBA's description based on its registered PCI device
+ * ID. The @descp passed into this function points to an array of 256 chars. It
+ * shall be returned with the model name, maximum speed, and the host bus type.
+ * The @mdp passed into this function points to an array of 80 chars. When the
+ * function returns, the @mdp will be filled with the model name.
+ **/
+static void
+lpfc_get_atto_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
+{
+ uint16_t sub_dev_id = phba->pcidev->subsystem_device;
+ char *model = "<Unknown>";
+ int tbolt = 0;
+
+ switch (sub_dev_id) {
+ case PCI_DEVICE_ID_CLRY_161E:
+ model = "161E";
+ break;
+ case PCI_DEVICE_ID_CLRY_162E:
+ model = "162E";
+ break;
+ case PCI_DEVICE_ID_CLRY_164E:
+ model = "164E";
+ break;
+ case PCI_DEVICE_ID_CLRY_161P:
+ model = "161P";
+ break;
+ case PCI_DEVICE_ID_CLRY_162P:
+ model = "162P";
+ break;
+ case PCI_DEVICE_ID_CLRY_164P:
+ model = "164P";
+ break;
+ case PCI_DEVICE_ID_CLRY_321E:
+ model = "321E";
+ break;
+ case PCI_DEVICE_ID_CLRY_322E:
+ model = "322E";
+ break;
+ case PCI_DEVICE_ID_CLRY_324E:
+ model = "324E";
+ break;
+ case PCI_DEVICE_ID_CLRY_321P:
+ model = "321P";
+ break;
+ case PCI_DEVICE_ID_CLRY_322P:
+ model = "322P";
+ break;
+ case PCI_DEVICE_ID_CLRY_324P:
+ model = "324P";
+ break;
+ case PCI_DEVICE_ID_TLFC_2XX2:
+ model = "2XX2";
+ tbolt = 1;
+ break;
+ case PCI_DEVICE_ID_TLFC_3162:
+ model = "3162";
+ tbolt = 1;
+ break;
+ case PCI_DEVICE_ID_TLFC_3322:
+ model = "3322";
+ tbolt = 1;
+ break;
+ default:
+ model = "Unknown";
+ break;
+ }
+
+ if (mdp && mdp[0] == '\0')
+ snprintf(mdp, 79, "%s", model);
+
+ if (descp && descp[0] == '\0')
+ snprintf(descp, 255,
+ "ATTO %s%s, Fibre Channel Adapter Initiator, Port %s",
+ (tbolt) ? "ThunderLink FC " : "Celerity FC-",
+ model,
+ phba->Port);
+}
+
+/**
* lpfc_get_hba_model_desc - Retrieve HBA device model name and description
* @phba: pointer to lpfc hba data structure.
* @mdp: pointer to the data structure to hold the derived model name.
@@ -2444,6 +2528,11 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
&& descp && descp[0] != '\0')
return;
+ if (phba->pcidev->vendor == PCI_VENDOR_ID_ATTO) {
+ lpfc_get_atto_model_desc(phba, mdp, descp);
+ return;
+ }
+
if (phba->lmt & LMT_64Gb)
max_speed = 64;
else if (phba->lmt & LMT_32Gb)
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 5385f4de5523..335e90633933 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -1279,6 +1279,19 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
/* Words 13 14 15 are for PBDE support */
+ /* add the VMID tags as per switch response */
+ if (unlikely(lpfc_ncmd->cur_iocbq.cmd_flag & LPFC_IO_VMID)) {
+ if (phba->pport->vmid_priority_tagging) {
+ bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
+ bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
+ lpfc_ncmd->cur_iocbq.vmid_tag.cs_ctl_vmid);
+ } else {
+ bf_set(wqe_appid, &wqe->fcp_iwrite.wqe_com, 1);
+ bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
+ wqe->words[31] = lpfc_ncmd->cur_iocbq.vmid_tag.app_id;
+ }
+ }
+
pwqeq->vport = vport;
return 0;
}
@@ -1504,6 +1517,11 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
struct lpfc_nvme_fcpreq_priv *freqpriv;
struct nvme_common_command *sqe;
uint64_t start = 0;
+#if (IS_ENABLED(CONFIG_NVME_FC))
+ u8 *uuid = NULL;
+ int err;
+ enum dma_data_direction iodir;
+#endif
/* Validate pointers. LLDD fault handling with transport does
* have timing races.
@@ -1662,6 +1680,33 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
lpfc_ncmd->ndlp = ndlp;
lpfc_ncmd->qidx = lpfc_queue_info->qidx;
+#if (IS_ENABLED(CONFIG_NVME_FC))
+ /* check the necessary and sufficient condition to support VMID */
+ if (lpfc_is_vmid_enabled(phba) &&
+ (ndlp->vmid_support ||
+ phba->pport->vmid_priority_tagging ==
+ LPFC_VMID_PRIO_TAG_ALL_TARGETS)) {
+ /* is the I/O generated by a VM, get the associated virtual */
+ /* entity id */
+ uuid = nvme_fc_io_getuuid(pnvme_fcreq);
+
+ if (uuid) {
+ if (pnvme_fcreq->io_dir == NVMEFC_FCP_WRITE)
+ iodir = DMA_TO_DEVICE;
+ else if (pnvme_fcreq->io_dir == NVMEFC_FCP_READ)
+ iodir = DMA_FROM_DEVICE;
+ else
+ iodir = DMA_NONE;
+
+ err = lpfc_vmid_get_appid(vport, uuid, iodir,
+ (union lpfc_vmid_io_tag *)
+ &lpfc_ncmd->cur_iocbq.vmid_tag);
+ if (!err)
+ lpfc_ncmd->cur_iocbq.cmd_flag |= LPFC_IO_VMID;
+ }
+ }
+#endif
+
/*
* Issue the IO on the WQ indicated by index in the hw_queue_handle.
* This identfier was create in our hardware queue create callback
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 3b8afa9d3056..d43968203248 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -87,14 +87,6 @@ static void
lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb);
static int
lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc);
-static void
-lpfc_put_vmid_in_hashtable(struct lpfc_vport *vport, u32 hash,
- struct lpfc_vmid *vmp);
-static void lpfc_vmid_update_entry(struct lpfc_vport *vport, struct scsi_cmnd
- *cmd, struct lpfc_vmid *vmp,
- union lpfc_vmid_io_tag *tag);
-static void lpfc_vmid_assign_cs_ctl(struct lpfc_vport *vport,
- struct lpfc_vmid *vmid);
/**
* lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge.
@@ -5271,254 +5263,6 @@ void lpfc_poll_timeout(struct timer_list *t)
}
/*
- * lpfc_get_vmid_from_hashtable - search the UUID in the hash table
- * @vport: The virtual port for which this call is being executed.
- * @hash: calculated hash value
- * @buf: uuid associated with the VE
- * Return the VMID entry associated with the UUID
- * Make sure to acquire the appropriate lock before invoking this routine.
- */
-struct lpfc_vmid *lpfc_get_vmid_from_hashtable(struct lpfc_vport *vport,
- u32 hash, u8 *buf)
-{
- struct lpfc_vmid *vmp;
-
- hash_for_each_possible(vport->hash_table, vmp, hnode, hash) {
- if (memcmp(&vmp->host_vmid[0], buf, 16) == 0)
- return vmp;
- }
- return NULL;
-}
-
-/*
- * lpfc_put_vmid_in_hashtable - put the VMID in the hash table
- * @vport: The virtual port for which this call is being executed.
- * @hash - calculated hash value
- * @vmp: Pointer to a VMID entry representing a VM sending I/O
- *
- * This routine will insert the newly acquired VMID entity in the hash table.
- * Make sure to acquire the appropriate lock before invoking this routine.
- */
-static void
-lpfc_put_vmid_in_hashtable(struct lpfc_vport *vport, u32 hash,
- struct lpfc_vmid *vmp)
-{
- hash_add(vport->hash_table, &vmp->hnode, hash);
-}
-
-/*
- * lpfc_vmid_hash_fn - create a hash value of the UUID
- * @vmid: uuid associated with the VE
- * @len: length of the VMID string
- * Returns the calculated hash value
- */
-int lpfc_vmid_hash_fn(const char *vmid, int len)
-{
- int c;
- int hash = 0;
-
- if (len == 0)
- return 0;
- while (len--) {
- c = *vmid++;
- if (c >= 'A' && c <= 'Z')
- c += 'a' - 'A';
-
- hash = (hash + (c << LPFC_VMID_HASH_SHIFT) +
- (c >> LPFC_VMID_HASH_SHIFT)) * 19;
- }
-
- return hash & LPFC_VMID_HASH_MASK;
-}
-
-/*
- * lpfc_vmid_update_entry - update the vmid entry in the hash table
- * @vport: The virtual port for which this call is being executed.
- * @cmd: address of scsi cmd descriptor
- * @vmp: Pointer to a VMID entry representing a VM sending I/O
- * @tag: VMID tag
- */
-static void lpfc_vmid_update_entry(struct lpfc_vport *vport, struct scsi_cmnd
- *cmd, struct lpfc_vmid *vmp,
- union lpfc_vmid_io_tag *tag)
-{
- u64 *lta;
-
- if (vport->phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO)
- tag->cs_ctl_vmid = vmp->un.cs_ctl_vmid;
- else if (vport->phba->cfg_vmid_app_header)
- tag->app_id = vmp->un.app_id;
-
- if (cmd->sc_data_direction == DMA_TO_DEVICE)
- vmp->io_wr_cnt++;
- else
- vmp->io_rd_cnt++;
-
- /* update the last access timestamp in the table */
- lta = per_cpu_ptr(vmp->last_io_time, raw_smp_processor_id());
- *lta = jiffies;
-}
-
-static void lpfc_vmid_assign_cs_ctl(struct lpfc_vport *vport,
- struct lpfc_vmid *vmid)
-{
- u32 hash;
- struct lpfc_vmid *pvmid;
-
- if (vport->port_type == LPFC_PHYSICAL_PORT) {
- vmid->un.cs_ctl_vmid = lpfc_vmid_get_cs_ctl(vport);
- } else {
- hash = lpfc_vmid_hash_fn(vmid->host_vmid, vmid->vmid_len);
- pvmid =
- lpfc_get_vmid_from_hashtable(vport->phba->pport, hash,
- vmid->host_vmid);
- if (pvmid)
- vmid->un.cs_ctl_vmid = pvmid->un.cs_ctl_vmid;
- else
- vmid->un.cs_ctl_vmid = lpfc_vmid_get_cs_ctl(vport);
- }
-}
-
-/*
- * lpfc_vmid_get_appid - get the VMID associated with the UUID
- * @vport: The virtual port for which this call is being executed.
- * @uuid: UUID associated with the VE
- * @cmd: address of scsi_cmd descriptor
- * @tag: VMID tag
- * Returns status of the function
- */
-static int lpfc_vmid_get_appid(struct lpfc_vport *vport, char *uuid, struct
- scsi_cmnd * cmd, union lpfc_vmid_io_tag *tag)
-{
- struct lpfc_vmid *vmp = NULL;
- int hash, len, rc = -EPERM, i;
-
- /* check if QFPA is complete */
- if (lpfc_vmid_is_type_priority_tag(vport) &&
- !(vport->vmid_flag & LPFC_VMID_QFPA_CMPL) &&
- (vport->vmid_flag & LPFC_VMID_ISSUE_QFPA)) {
- vport->work_port_events |= WORKER_CHECK_VMID_ISSUE_QFPA;
- return -EAGAIN;
- }
-
- /* search if the UUID has already been mapped to the VMID */
- len = strlen(uuid);
- hash = lpfc_vmid_hash_fn(uuid, len);
-
- /* search for the VMID in the table */
- read_lock(&vport->vmid_lock);
- vmp = lpfc_get_vmid_from_hashtable(vport, hash, uuid);
-
- /* if found, check if its already registered */
- if (vmp && vmp->flag & LPFC_VMID_REGISTERED) {
- read_unlock(&vport->vmid_lock);
- lpfc_vmid_update_entry(vport, cmd, vmp, tag);
- rc = 0;
- } else if (vmp && (vmp->flag & LPFC_VMID_REQ_REGISTER ||
- vmp->flag & LPFC_VMID_DE_REGISTER)) {
- /* else if register or dereg request has already been sent */
- /* Hence VMID tag will not be added for this I/O */
- read_unlock(&vport->vmid_lock);
- rc = -EBUSY;
- } else {
- /* The VMID was not found in the hashtable. At this point, */
- /* drop the read lock first before proceeding further */
- read_unlock(&vport->vmid_lock);
- /* start the process to obtain one as per the */
- /* type of the VMID indicated */
- write_lock(&vport->vmid_lock);
- vmp = lpfc_get_vmid_from_hashtable(vport, hash, uuid);
-
- /* while the read lock was released, in case the entry was */
- /* added by other context or is in process of being added */
- if (vmp && vmp->flag & LPFC_VMID_REGISTERED) {
- lpfc_vmid_update_entry(vport, cmd, vmp, tag);
- write_unlock(&vport->vmid_lock);
- return 0;
- } else if (vmp && vmp->flag & LPFC_VMID_REQ_REGISTER) {
- write_unlock(&vport->vmid_lock);
- return -EBUSY;
- }
-
- /* else search and allocate a free slot in the hash table */
- if (vport->cur_vmid_cnt < vport->max_vmid) {
- for (i = 0; i < vport->max_vmid; i++) {
- vmp = vport->vmid + i;
- if (vmp->flag == LPFC_VMID_SLOT_FREE)
- break;
- }
- if (i == vport->max_vmid)
- vmp = NULL;
- } else {
- vmp = NULL;
- }
-
- if (!vmp) {
- write_unlock(&vport->vmid_lock);
- return -ENOMEM;
- }
-
- /* Add the vmid and register */
- lpfc_put_vmid_in_hashtable(vport, hash, vmp);
- vmp->vmid_len = len;
- memcpy(vmp->host_vmid, uuid, vmp->vmid_len);
- vmp->io_rd_cnt = 0;
- vmp->io_wr_cnt = 0;
- vmp->flag = LPFC_VMID_SLOT_USED;
-
- vmp->delete_inactive =
- vport->vmid_inactivity_timeout ? 1 : 0;
-
- /* if type priority tag, get next available VMID */
- if (vport->phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO)
- lpfc_vmid_assign_cs_ctl(vport, vmp);
-
- /* allocate the per cpu variable for holding */
- /* the last access time stamp only if VMID is enabled */
- if (!vmp->last_io_time)
- vmp->last_io_time = __alloc_percpu(sizeof(u64),
- __alignof__(struct
- lpfc_vmid));
- if (!vmp->last_io_time) {
- hash_del(&vmp->hnode);
- vmp->flag = LPFC_VMID_SLOT_FREE;
- write_unlock(&vport->vmid_lock);
- return -EIO;
- }
-
- write_unlock(&vport->vmid_lock);
-
- /* complete transaction with switch */
- if (vport->phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO)
- rc = lpfc_vmid_uvem(vport, vmp, true);
- else if (vport->phba->cfg_vmid_app_header)
- rc = lpfc_vmid_cmd(vport, SLI_CTAS_RAPP_IDENT, vmp);
- if (!rc) {
- write_lock(&vport->vmid_lock);
- vport->cur_vmid_cnt++;
- vmp->flag |= LPFC_VMID_REQ_REGISTER;
- write_unlock(&vport->vmid_lock);
- } else {
- write_lock(&vport->vmid_lock);
- hash_del(&vmp->hnode);
- vmp->flag = LPFC_VMID_SLOT_FREE;
- free_percpu(vmp->last_io_time);
- write_unlock(&vport->vmid_lock);
- return -EIO;
- }
-
- /* finally, enable the idle timer once */
- if (!(vport->phba->pport->vmid_flag & LPFC_VMID_TIMER_ENBLD)) {
- mod_timer(&vport->phba->inactive_vmid_poll,
- jiffies +
- msecs_to_jiffies(1000 * LPFC_VMID_TIMER));
- vport->phba->pport->vmid_flag |= LPFC_VMID_TIMER_ENBLD;
- }
- }
- return rc;
-}
-
-/*
* lpfc_is_command_vm_io - get the UUID from blk cgroup
* @cmd: Pointer to scsi_cmnd data structure
* Returns UUID if present, otherwise NULL
@@ -5704,9 +5448,10 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
uuid = lpfc_is_command_vm_io(cmnd);
if (uuid) {
- err = lpfc_vmid_get_appid(vport, uuid, cmnd,
- (union lpfc_vmid_io_tag *)
- &cur_iocbq->vmid_tag);
+ err = lpfc_vmid_get_appid(vport, uuid,
+ cmnd->sc_data_direction,
+ (union lpfc_vmid_io_tag *)
+ &cur_iocbq->vmid_tag);
if (!err)
cur_iocbq->cmd_flag |= LPFC_IO_VMID;
}
diff --git a/drivers/scsi/lpfc/lpfc_vmid.c b/drivers/scsi/lpfc/lpfc_vmid.c
new file mode 100644
index 000000000000..f64ced04b912
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_vmid.c
@@ -0,0 +1,288 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
+ * Copyright (C) 2004-2016 Emulex. All rights reserved. *
+ * EMULEX and SLI are trademarks of Emulex. *
+ * www.broadcom.com *
+ * Portions Copyright (C) 2004-2005 Christoph Hellwig *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of version 2 of the GNU General *
+ * Public License as published by the Free Software Foundation. *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
+ * more details, a copy of which can be found in the file COPYING *
+ * included with this package. *
+ *******************************************************************/
+
+#include <linux/interrupt.h>
+#include <linux/dma-direction.h>
+
+#include <scsi/scsi_transport_fc.h>
+
+#include "lpfc_hw4.h"
+#include "lpfc_hw.h"
+#include "lpfc_sli.h"
+#include "lpfc_sli4.h"
+#include "lpfc_nl.h"
+#include "lpfc_disc.h"
+#include "lpfc.h"
+#include "lpfc_crtn.h"
+
+
+/*
+ * lpfc_get_vmid_from_hashtable - search the UUID in the hash table
+ * @vport: The virtual port for which this call is being executed.
+ * @hash: calculated hash value
+ * @buf: uuid associated with the VE
+ * Return the VMID entry associated with the UUID
+ * Make sure to acquire the appropriate lock before invoking this routine.
+ */
+struct lpfc_vmid *lpfc_get_vmid_from_hashtable(struct lpfc_vport *vport,
+ u32 hash, u8 *buf)
+{
+ struct lpfc_vmid *vmp;
+
+ hash_for_each_possible(vport->hash_table, vmp, hnode, hash) {
+ if (memcmp(&vmp->host_vmid[0], buf, 16) == 0)
+ return vmp;
+ }
+ return NULL;
+}
+
+/*
+ * lpfc_put_vmid_in_hashtable - put the VMID in the hash table
+ * @vport: The virtual port for which this call is being executed.
+ * @hash - calculated hash value
+ * @vmp: Pointer to a VMID entry representing a VM sending I/O
+ *
+ * This routine will insert the newly acquired VMID entity in the hash table.
+ * Make sure to acquire the appropriate lock before invoking this routine.
+ */
+static void
+lpfc_put_vmid_in_hashtable(struct lpfc_vport *vport, u32 hash,
+ struct lpfc_vmid *vmp)
+{
+ hash_add(vport->hash_table, &vmp->hnode, hash);
+}
+
+/*
+ * lpfc_vmid_hash_fn - create a hash value of the UUID
+ * @vmid: uuid associated with the VE
+ * @len: length of the VMID string
+ * Returns the calculated hash value
+ */
+int lpfc_vmid_hash_fn(const char *vmid, int len)
+{
+ int c;
+ int hash = 0;
+
+ if (len == 0)
+ return 0;
+ while (len--) {
+ c = *vmid++;
+ if (c >= 'A' && c <= 'Z')
+ c += 'a' - 'A';
+
+ hash = (hash + (c << LPFC_VMID_HASH_SHIFT) +
+ (c >> LPFC_VMID_HASH_SHIFT)) * 19;
+ }
+
+ return hash & LPFC_VMID_HASH_MASK;
+}
+
+/*
+ * lpfc_vmid_update_entry - update the vmid entry in the hash table
+ * @vport: The virtual port for which this call is being executed.
+ * @iodir: io direction
+ * @vmp: Pointer to a VMID entry representing a VM sending I/O
+ * @tag: VMID tag
+ */
+static void lpfc_vmid_update_entry(struct lpfc_vport *vport,
+ enum dma_data_direction iodir,
+ struct lpfc_vmid *vmp,
+ union lpfc_vmid_io_tag *tag)
+{
+ u64 *lta;
+
+ if (vport->phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO)
+ tag->cs_ctl_vmid = vmp->un.cs_ctl_vmid;
+ else if (vport->phba->cfg_vmid_app_header)
+ tag->app_id = vmp->un.app_id;
+
+ if (iodir == DMA_TO_DEVICE)
+ vmp->io_wr_cnt++;
+ else if (iodir == DMA_FROM_DEVICE)
+ vmp->io_rd_cnt++;
+
+ /* update the last access timestamp in the table */
+ lta = per_cpu_ptr(vmp->last_io_time, raw_smp_processor_id());
+ *lta = jiffies;
+}
+
+static void lpfc_vmid_assign_cs_ctl(struct lpfc_vport *vport,
+ struct lpfc_vmid *vmid)
+{
+ u32 hash;
+ struct lpfc_vmid *pvmid;
+
+ if (vport->port_type == LPFC_PHYSICAL_PORT) {
+ vmid->un.cs_ctl_vmid = lpfc_vmid_get_cs_ctl(vport);
+ } else {
+ hash = lpfc_vmid_hash_fn(vmid->host_vmid, vmid->vmid_len);
+ pvmid =
+ lpfc_get_vmid_from_hashtable(vport->phba->pport, hash,
+ vmid->host_vmid);
+ if (pvmid)
+ vmid->un.cs_ctl_vmid = pvmid->un.cs_ctl_vmid;
+ else
+ vmid->un.cs_ctl_vmid = lpfc_vmid_get_cs_ctl(vport);
+ }
+}
+
+/*
+ * lpfc_vmid_get_appid - get the VMID associated with the UUID
+ * @vport: The virtual port for which this call is being executed.
+ * @uuid: UUID associated with the VE
+ * @cmd: address of scsi_cmd descriptor
+ * @iodir: io direction
+ * @tag: VMID tag
+ * Returns status of the function
+ */
+int lpfc_vmid_get_appid(struct lpfc_vport *vport, char *uuid,
+ enum dma_data_direction iodir,
+ union lpfc_vmid_io_tag *tag)
+{
+ struct lpfc_vmid *vmp = NULL;
+ int hash, len, rc = -EPERM, i;
+
+ /* check if QFPA is complete */
+ if (lpfc_vmid_is_type_priority_tag(vport) &&
+ !(vport->vmid_flag & LPFC_VMID_QFPA_CMPL) &&
+ (vport->vmid_flag & LPFC_VMID_ISSUE_QFPA)) {
+ vport->work_port_events |= WORKER_CHECK_VMID_ISSUE_QFPA;
+ return -EAGAIN;
+ }
+
+ /* search if the UUID has already been mapped to the VMID */
+ len = strlen(uuid);
+ hash = lpfc_vmid_hash_fn(uuid, len);
+
+ /* search for the VMID in the table */
+ read_lock(&vport->vmid_lock);
+ vmp = lpfc_get_vmid_from_hashtable(vport, hash, uuid);
+
+ /* if found, check if its already registered */
+ if (vmp && vmp->flag & LPFC_VMID_REGISTERED) {
+ read_unlock(&vport->vmid_lock);
+ lpfc_vmid_update_entry(vport, iodir, vmp, tag);
+ rc = 0;
+ } else if (vmp && (vmp->flag & LPFC_VMID_REQ_REGISTER ||
+ vmp->flag & LPFC_VMID_DE_REGISTER)) {
+ /* else if register or dereg request has already been sent */
+ /* Hence VMID tag will not be added for this I/O */
+ read_unlock(&vport->vmid_lock);
+ rc = -EBUSY;
+ } else {
+ /* The VMID was not found in the hashtable. At this point, */
+ /* drop the read lock first before proceeding further */
+ read_unlock(&vport->vmid_lock);
+ /* start the process to obtain one as per the */
+ /* type of the VMID indicated */
+ write_lock(&vport->vmid_lock);
+ vmp = lpfc_get_vmid_from_hashtable(vport, hash, uuid);
+
+ /* while the read lock was released, in case the entry was */
+ /* added by other context or is in process of being added */
+ if (vmp && vmp->flag & LPFC_VMID_REGISTERED) {
+ lpfc_vmid_update_entry(vport, iodir, vmp, tag);
+ write_unlock(&vport->vmid_lock);
+ return 0;
+ } else if (vmp && vmp->flag & LPFC_VMID_REQ_REGISTER) {
+ write_unlock(&vport->vmid_lock);
+ return -EBUSY;
+ }
+
+ /* else search and allocate a free slot in the hash table */
+ if (vport->cur_vmid_cnt < vport->max_vmid) {
+ for (i = 0; i < vport->max_vmid; i++) {
+ vmp = vport->vmid + i;
+ if (vmp->flag == LPFC_VMID_SLOT_FREE)
+ break;
+ }
+ if (i == vport->max_vmid)
+ vmp = NULL;
+ } else {
+ vmp = NULL;
+ }
+
+ if (!vmp) {
+ write_unlock(&vport->vmid_lock);
+ return -ENOMEM;
+ }
+
+ /* Add the vmid and register */
+ lpfc_put_vmid_in_hashtable(vport, hash, vmp);
+ vmp->vmid_len = len;
+ memcpy(vmp->host_vmid, uuid, vmp->vmid_len);
+ vmp->io_rd_cnt = 0;
+ vmp->io_wr_cnt = 0;
+ vmp->flag = LPFC_VMID_SLOT_USED;
+
+ vmp->delete_inactive =
+ vport->vmid_inactivity_timeout ? 1 : 0;
+
+ /* if type priority tag, get next available VMID */
+ if (vport->phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO)
+ lpfc_vmid_assign_cs_ctl(vport, vmp);
+
+ /* allocate the per cpu variable for holding */
+ /* the last access time stamp only if VMID is enabled */
+ if (!vmp->last_io_time)
+ vmp->last_io_time = __alloc_percpu(sizeof(u64),
+ __alignof__(struct
+ lpfc_vmid));
+ if (!vmp->last_io_time) {
+ hash_del(&vmp->hnode);
+ vmp->flag = LPFC_VMID_SLOT_FREE;
+ write_unlock(&vport->vmid_lock);
+ return -EIO;
+ }
+
+ write_unlock(&vport->vmid_lock);
+
+ /* complete transaction with switch */
+ if (vport->phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO)
+ rc = lpfc_vmid_uvem(vport, vmp, true);
+ else if (vport->phba->cfg_vmid_app_header)
+ rc = lpfc_vmid_cmd(vport, SLI_CTAS_RAPP_IDENT, vmp);
+ if (!rc) {
+ write_lock(&vport->vmid_lock);
+ vport->cur_vmid_cnt++;
+ vmp->flag |= LPFC_VMID_REQ_REGISTER;
+ write_unlock(&vport->vmid_lock);
+ } else {
+ write_lock(&vport->vmid_lock);
+ hash_del(&vmp->hnode);
+ vmp->flag = LPFC_VMID_SLOT_FREE;
+ free_percpu(vmp->last_io_time);
+ write_unlock(&vport->vmid_lock);
+ return -EIO;
+ }
+
+ /* finally, enable the idle timer once */
+ if (!(vport->phba->pport->vmid_flag & LPFC_VMID_TIMER_ENBLD)) {
+ mod_timer(&vport->phba->inactive_vmid_poll,
+ jiffies +
+ msecs_to_jiffies(1000 * LPFC_VMID_TIMER));
+ vport->phba->pport->vmid_flag |= LPFC_VMID_TIMER_ENBLD;
+ }
+ }
+ return rc;
+}
diff --git a/drivers/scsi/mpi3mr/mpi3mr.h b/drivers/scsi/mpi3mr/mpi3mr.h
index 01cd01787b0f..0e1cb4aa4ca2 100644
--- a/drivers/scsi/mpi3mr/mpi3mr.h
+++ b/drivers/scsi/mpi3mr/mpi3mr.h
@@ -954,7 +954,7 @@ struct mpi3mr_ioc {
u16 active_poll_qcount;
u16 requested_poll_qcount;
- struct device *bsg_dev;
+ struct device bsg_dev;
struct request_queue *bsg_queue;
u8 stop_bsgs;
u8 *logdata_buf;
diff --git a/drivers/scsi/mpi3mr/mpi3mr_app.c b/drivers/scsi/mpi3mr/mpi3mr_app.c
index 9ab1762468ad..9baac224b213 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_app.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_app.c
@@ -1487,28 +1487,28 @@ static int mpi3mr_bsg_request(struct bsg_job *job)
*/
void mpi3mr_bsg_exit(struct mpi3mr_ioc *mrioc)
{
+ struct device *bsg_dev = &mrioc->bsg_dev;
if (!mrioc->bsg_queue)
return;
bsg_remove_queue(mrioc->bsg_queue);
mrioc->bsg_queue = NULL;
- device_del(mrioc->bsg_dev);
- put_device(mrioc->bsg_dev);
- kfree(mrioc->bsg_dev);
+ device_del(bsg_dev);
+ put_device(bsg_dev);
}
/**
* mpi3mr_bsg_node_release -release bsg device node
* @dev: bsg device node
*
- * decrements bsg dev reference count
+ * decrements bsg dev parent reference count
*
* Return:Nothing
*/
static void mpi3mr_bsg_node_release(struct device *dev)
{
- put_device(dev);
+ put_device(dev->parent);
}
/**
@@ -1521,41 +1521,37 @@ static void mpi3mr_bsg_node_release(struct device *dev)
*/
void mpi3mr_bsg_init(struct mpi3mr_ioc *mrioc)
{
- mrioc->bsg_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
- if (!mrioc->bsg_dev) {
- ioc_err(mrioc, "bsg device mem allocation failed\n");
- return;
- }
+ struct device *bsg_dev = &mrioc->bsg_dev;
+ struct device *parent = &mrioc->shost->shost_gendev;
+
+ device_initialize(bsg_dev);
+
+ bsg_dev->parent = get_device(parent);
+ bsg_dev->release = mpi3mr_bsg_node_release;
- device_initialize(mrioc->bsg_dev);
- dev_set_name(mrioc->bsg_dev, "mpi3mrctl%u", mrioc->id);
+ dev_set_name(bsg_dev, "mpi3mrctl%u", mrioc->id);
- if (device_add(mrioc->bsg_dev)) {
+ if (device_add(bsg_dev)) {
ioc_err(mrioc, "%s: bsg device add failed\n",
- dev_name(mrioc->bsg_dev));
- goto err_device_add;
+ dev_name(bsg_dev));
+ put_device(bsg_dev);
+ return;
}
- mrioc->bsg_dev->release = mpi3mr_bsg_node_release;
-
- mrioc->bsg_queue = bsg_setup_queue(mrioc->bsg_dev, dev_name(mrioc->bsg_dev),
+ mrioc->bsg_queue = bsg_setup_queue(bsg_dev, dev_name(bsg_dev),
mpi3mr_bsg_request, NULL, 0);
if (IS_ERR(mrioc->bsg_queue)) {
ioc_err(mrioc, "%s: bsg registration failed\n",
- dev_name(mrioc->bsg_dev));
- goto err_setup_queue;
+ dev_name(bsg_dev));
+ device_del(bsg_dev);
+ put_device(bsg_dev);
+ return;
}
blk_queue_max_segments(mrioc->bsg_queue, MPI3MR_MAX_APP_XFER_SEGMENTS);
blk_queue_max_hw_sectors(mrioc->bsg_queue, MPI3MR_MAX_APP_XFER_SECTORS);
return;
-
-err_setup_queue:
- device_del(mrioc->bsg_dev);
- put_device(mrioc->bsg_dev);
-err_device_add:
- kfree(mrioc->bsg_dev);
}
/**
@@ -1693,7 +1689,7 @@ logging_level_store(struct device *dev,
static DEVICE_ATTR_RW(logging_level);
/**
- * adapter_state_show - SysFS callback for adapter state show
+ * adp_state_show() - SysFS callback for adapter state show
* @dev: class device
* @attr: Device attributes
* @buf: Buffer to copy
diff --git a/drivers/scsi/myrb.c b/drivers/scsi/myrb.c
index 71585528e8db..e885c1dbf61f 100644
--- a/drivers/scsi/myrb.c
+++ b/drivers/scsi/myrb.c
@@ -1239,7 +1239,8 @@ static void myrb_cleanup(struct myrb_hba *cb)
myrb_unmap(cb);
if (cb->mmio_base) {
- cb->disable_intr(cb->io_base);
+ if (cb->disable_intr)
+ cb->disable_intr(cb->io_base);
iounmap(cb->mmio_base);
}
if (cb->irq)
@@ -3413,9 +3414,13 @@ static struct myrb_hba *myrb_detect(struct pci_dev *pdev,
mutex_init(&cb->dcmd_mutex);
mutex_init(&cb->dma_mutex);
cb->pdev = pdev;
+ cb->host = shost;
- if (pci_enable_device(pdev))
- goto failure;
+ if (pci_enable_device(pdev)) {
+ dev_err(&pdev->dev, "Failed to enable PCI device\n");
+ scsi_host_put(shost);
+ return NULL;
+ }
if (privdata->hw_init == DAC960_PD_hw_init ||
privdata->hw_init == DAC960_P_hw_init) {
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 3d5cd337a2a6..bfce60183a6e 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -1434,7 +1434,7 @@ static int pmcraid_notify_aen(
return -EINVAL;
}
- /* send genetlink multicast message to notify appplications */
+ /* send genetlink multicast message to notify applications */
genlmsg_end(skb, msg_header);
result = genlmsg_multicast(&pmcraid_event_family, skb,
diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c
index e57cc22453d0..4750ec5789a8 100644
--- a/drivers/scsi/qedf/qedf_io.c
+++ b/drivers/scsi/qedf/qedf_io.c
@@ -893,7 +893,7 @@ int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
return -EINVAL;
}
- /* Record LUN number for later use if we neeed them */
+ /* Record LUN number for later use if we need them */
io_req->lun = (int)sc_cmd->device->lun;
/* Obtain free SQE */
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 0ab595c0870a..1e7f4d138e06 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -4037,7 +4037,6 @@ qla1280_setup(char *s)
{
char *cp, *ptr;
unsigned long val;
- int toke;
cp = s;
@@ -4052,7 +4051,7 @@ qla1280_setup(char *s)
} else
val = simple_strtoul(ptr, &ptr, 0);
- switch ((toke = qla1280_get_token(cp))) {
+ switch (qla1280_get_token(cp)) {
case TOKEN_NVRAM:
if (!val)
driver_setup.no_nvram = 1;
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index e6b5c4ccce97..346d47b61c07 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -591,7 +591,6 @@ qla25xx_free_req_que(struct scsi_qla_host *vha, struct req_que *req)
}
kfree(req->outstanding_cmds);
kfree(req);
- req = NULL;
}
static void
@@ -617,7 +616,6 @@ qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
mutex_unlock(&ha->vport_lock);
}
kfree(rsp);
- rsp = NULL;
}
int
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index a02235a6a8e9..cb97f625970d 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -48,13 +48,6 @@ MODULE_PARM_DESC(qlini_mode,
"when ready "
"\"enabled\" (default) - initiator mode will always stay enabled.");
-static int ql_dm_tgt_ex_pct = 0;
-module_param(ql_dm_tgt_ex_pct, int, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(ql_dm_tgt_ex_pct,
- "For Dual Mode (qlini_mode=dual), this parameter determines "
- "the percentage of exchanges/cmds FW will allocate resources "
- "for Target mode.");
-
int ql2xuctrlirq = 1;
module_param(ql2xuctrlirq, int, 0644);
MODULE_PARM_DESC(ql2xuctrlirq,
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index cdaca13ac1f1..49ef864df581 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -2039,12 +2039,13 @@ static void scsi_eh_lock_door(struct scsi_device *sdev)
scmd->cmnd[4] = SCSI_REMOVAL_PREVENT;
scmd->cmnd[5] = 0;
scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
+ scmd->allowed = 5;
req->rq_flags |= RQF_QUIET;
req->timeout = 10 * HZ;
- scmd->allowed = 5;
+ req->end_io = eh_lock_door_done;
- blk_execute_rq_nowait(req, true, eh_lock_door_done);
+ blk_execute_rq_nowait(req, true);
}
/**
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index e9db7da0c79c..6ffc9e4258a8 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -779,7 +779,7 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
action = ACTION_DELAYED_RETRY;
break;
case 0x0a: /* ALUA state transition */
- blk_stat = BLK_STS_AGAIN;
+ blk_stat = BLK_STS_TRANSPORT;
fallthrough;
default:
action = ACTION_FAIL;
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 546a9e3cfbec..43949798a2e4 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -573,7 +573,6 @@ struct bus_type scsi_bus_type = {
.pm = &scsi_bus_pm_ops,
#endif
};
-EXPORT_SYMBOL_GPL(scsi_bus_type);
int scsi_sysfs_register(void)
{
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 749316462075..895b56c8f25e 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -3521,7 +3521,7 @@ static int sd_probe(struct device *dev)
error = device_add_disk(dev, gd, NULL);
if (error) {
put_device(&sdkp->disk_dev);
- blk_cleanup_disk(gd);
+ put_disk(gd);
goto out;
}
@@ -3542,7 +3542,6 @@ static int sd_probe(struct device *dev)
out_put:
put_disk(gd);
out_free:
- sd_zbc_release_disk(sdkp);
kfree(sdkp);
out:
scsi_autopm_put_device(sdp);
@@ -3579,7 +3578,7 @@ static void scsi_disk_release(struct device *dev)
struct scsi_disk *sdkp = to_scsi_disk(dev);
ida_free(&sd_index_ida, sdkp->index);
- sd_zbc_release_disk(sdkp);
+ sd_zbc_free_zone_info(sdkp);
put_device(&sdkp->device->sdev_gendev);
free_opal_dev(sdkp->opal_dev);
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 2abad54fd23f..5eea762f84d1 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -241,7 +241,7 @@ static inline int sd_is_zoned(struct scsi_disk *sdkp)
#ifdef CONFIG_BLK_DEV_ZONED
-void sd_zbc_release_disk(struct scsi_disk *sdkp);
+void sd_zbc_free_zone_info(struct scsi_disk *sdkp);
int sd_zbc_read_zones(struct scsi_disk *sdkp, u8 buf[SD_BUF_SIZE]);
int sd_zbc_revalidate_zones(struct scsi_disk *sdkp);
blk_status_t sd_zbc_setup_zone_mgmt_cmnd(struct scsi_cmnd *cmd,
@@ -256,7 +256,7 @@ blk_status_t sd_zbc_prepare_zone_append(struct scsi_cmnd *cmd, sector_t *lba,
#else /* CONFIG_BLK_DEV_ZONED */
-static inline void sd_zbc_release_disk(struct scsi_disk *sdkp) {}
+static inline void sd_zbc_free_zone_info(struct scsi_disk *sdkp) {}
static inline int sd_zbc_read_zones(struct scsi_disk *sdkp, u8 buf[SD_BUF_SIZE])
{
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index 5b9fad70aa88..6acc4f406eb8 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -786,8 +786,11 @@ static int sd_zbc_init_disk(struct scsi_disk *sdkp)
return 0;
}
-static void sd_zbc_clear_zone_info(struct scsi_disk *sdkp)
+void sd_zbc_free_zone_info(struct scsi_disk *sdkp)
{
+ if (!sdkp->zone_wp_update_buf)
+ return;
+
/* Serialize against revalidate zones */
mutex_lock(&sdkp->rev_mutex);
@@ -802,12 +805,6 @@ static void sd_zbc_clear_zone_info(struct scsi_disk *sdkp)
mutex_unlock(&sdkp->rev_mutex);
}
-void sd_zbc_release_disk(struct scsi_disk *sdkp)
-{
- if (sd_is_zoned(sdkp))
- sd_zbc_clear_zone_info(sdkp);
-}
-
static void sd_zbc_revalidate_zones_cb(struct gendisk *disk)
{
struct scsi_disk *sdkp = scsi_disk(disk);
@@ -914,12 +911,15 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, u8 buf[SD_BUF_SIZE])
u32 zone_blocks = 0;
int ret;
- if (!sd_is_zoned(sdkp))
+ if (!sd_is_zoned(sdkp)) {
/*
- * Device managed or normal SCSI disk,
- * no special handling required
+ * Device managed or normal SCSI disk, no special handling
+ * required. Nevertheless, free the disk zone information in
+ * case the device type changed.
*/
+ sd_zbc_free_zone_info(sdkp);
return 0;
+ }
/* READ16/WRITE16 is mandatory for ZBC disks */
sdkp->device->use_16_for_rw = 1;
@@ -928,11 +928,11 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, u8 buf[SD_BUF_SIZE])
if (!blk_queue_is_zoned(q)) {
/*
* This can happen for a host aware disk with partitions.
- * The block device zone information was already cleared
- * by blk_queue_set_zoned(). Only clear the scsi disk zone
+ * The block device zone model was already cleared by
+ * blk_queue_set_zoned(). Only free the scsi disk zone
* information and exit early.
*/
- sd_zbc_clear_zone_info(sdkp);
+ sd_zbc_free_zone_info(sdkp);
return 0;
}
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index cbffa712b9f3..118c7b4a8af2 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -831,7 +831,8 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
srp->rq->timeout = timeout;
kref_get(&sfp->f_ref); /* sg_rq_end_io() does kref_put(). */
- blk_execute_rq_nowait(srp->rq, at_head, sg_rq_end_io);
+ srp->rq->end_io = sg_rq_end_io;
+ blk_execute_rq_nowait(srp->rq, at_head);
return 0;
}
diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h
index c4c48272d8ad..2e40320129c0 100644
--- a/drivers/scsi/smartpqi/smartpqi.h
+++ b/drivers/scsi/smartpqi/smartpqi.h
@@ -1082,7 +1082,7 @@ struct pqi_stream_data {
};
struct pqi_scsi_dev {
- int devtype; /* as reported by INQUIRY commmand */
+ int devtype; /* as reported by INQUIRY command */
u8 device_type; /* as reported by */
/* BMIC_IDENTIFY_PHYSICAL_DEVICE */
/* only valid for devtype = TYPE_DISK */
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 56a093a90b92..850172a2b8f1 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -579,9 +579,10 @@ static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd,
memcpy(scmd->cmnd, cmd, scmd->cmd_len);
req->timeout = timeout;
scmd->allowed = retries;
+ req->end_io = st_scsi_execute_end;
req->end_io_data = SRpnt;
- blk_execute_rq_nowait(req, true, st_scsi_execute_end);
+ blk_execute_rq_nowait(req, true);
return 0;
}
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 9a0bba5a51a7..ca3530982e52 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -54,7 +54,6 @@
#define VMSTOR_PROTO_VERSION(MAJOR_, MINOR_) ((((MAJOR_) & 0xff) << 8) | \
(((MINOR_) & 0xff)))
-
#define VMSTOR_PROTO_VERSION_WIN6 VMSTOR_PROTO_VERSION(2, 0)
#define VMSTOR_PROTO_VERSION_WIN7 VMSTOR_PROTO_VERSION(4, 2)
#define VMSTOR_PROTO_VERSION_WIN8 VMSTOR_PROTO_VERSION(5, 1)
@@ -136,21 +135,11 @@ struct hv_fc_wwn_packet {
*/
#define STORVSC_MAX_CMD_LEN 0x10
-#define POST_WIN7_STORVSC_SENSE_BUFFER_SIZE 0x14
-#define PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE 0x12
-
+/* Sense buffer size is the same for all versions since Windows 8 */
#define STORVSC_SENSE_BUFFER_SIZE 0x14
#define STORVSC_MAX_BUF_LEN_WITH_PADDING 0x14
/*
- * Sense buffer size changed in win8; have a run-time
- * variable to track the size we should use. This value will
- * likely change during protocol negotiation but it is valid
- * to start by assuming pre-Win8.
- */
-static int sense_buffer_size = PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE;
-
-/*
* The storage protocol version is determined during the
* initial exchange with the host. It will indicate which
* storage functionality is available in the host.
@@ -177,18 +166,6 @@ do { \
dev_warn(&(dev)->device, fmt, ##__VA_ARGS__); \
} while (0)
-struct vmscsi_win8_extension {
- /*
- * The following were added in Windows 8
- */
- u16 reserve;
- u8 queue_tag;
- u8 queue_action;
- u32 srb_flags;
- u32 time_out_value;
- u32 queue_sort_ey;
-} __packed;
-
struct vmscsi_request {
u16 length;
u8 srb_status;
@@ -214,46 +191,23 @@ struct vmscsi_request {
/*
* The following was added in win8.
*/
- struct vmscsi_win8_extension win8_extension;
+ u16 reserve;
+ u8 queue_tag;
+ u8 queue_action;
+ u32 srb_flags;
+ u32 time_out_value;
+ u32 queue_sort_ey;
} __attribute((packed));
/*
- * The list of storage protocols in order of preference.
+ * The list of windows version in order of preference.
*/
-struct vmstor_protocol {
- int protocol_version;
- int sense_buffer_size;
- int vmscsi_size_delta;
-};
-
-static const struct vmstor_protocol vmstor_protocols[] = {
- {
+static const int protocol_version[] = {
VMSTOR_PROTO_VERSION_WIN10,
- POST_WIN7_STORVSC_SENSE_BUFFER_SIZE,
- 0
- },
- {
VMSTOR_PROTO_VERSION_WIN8_1,
- POST_WIN7_STORVSC_SENSE_BUFFER_SIZE,
- 0
- },
- {
VMSTOR_PROTO_VERSION_WIN8,
- POST_WIN7_STORVSC_SENSE_BUFFER_SIZE,
- 0
- },
- {
- VMSTOR_PROTO_VERSION_WIN7,
- PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE,
- sizeof(struct vmscsi_win8_extension),
- },
- {
- VMSTOR_PROTO_VERSION_WIN6,
- PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE,
- sizeof(struct vmscsi_win8_extension),
- }
};
@@ -409,9 +363,7 @@ static void storvsc_on_channel_callback(void *context);
#define STORVSC_IDE_MAX_CHANNELS 1
/*
- * Upper bound on the size of a storvsc packet. vmscsi_size_delta is not
- * included in the calculation because it is set after STORVSC_MAX_PKT_SIZE
- * is used in storvsc_connect_to_vsp
+ * Upper bound on the size of a storvsc packet.
*/
#define STORVSC_MAX_PKT_SIZE (sizeof(struct vmpacket_descriptor) +\
sizeof(struct vstor_packet))
@@ -453,17 +405,6 @@ struct storvsc_device {
unsigned char target_id;
/*
- * The size of the vmscsi_request has changed in win8. The
- * additional size is because of new elements added to the
- * structure. These elements are valid only when we are talking
- * to a win8 host.
- * Track the correction to size we need to apply. This value
- * will likely change during protocol negotiation but it is
- * valid to start by assuming pre-Win8.
- */
- int vmscsi_size_delta;
-
- /*
* Max I/O, the device can support.
*/
u32 max_transfer_bytes;
@@ -538,7 +479,7 @@ static void storvsc_host_scan(struct work_struct *work)
host = host_device->host;
/*
* Before scanning the host, first check to see if any of the
- * currrently known devices have been hot removed. We issue a
+ * currently known devices have been hot removed. We issue a
* "unit ready" command against all currently known devices.
* This I/O will result in an error for devices that have been
* removed. As part of handling the I/O error, we remove the device.
@@ -795,8 +736,7 @@ static void handle_multichannel_storage(struct hv_device *device, int max_chns)
vstor_packet->sub_channel_count = num_sc;
ret = vmbus_sendpacket(device->channel, vstor_packet,
- (sizeof(struct vstor_packet) -
- stor_device->vmscsi_size_delta),
+ sizeof(struct vstor_packet),
VMBUS_RQST_INIT,
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
@@ -864,8 +804,7 @@ static int storvsc_execute_vstor_op(struct hv_device *device,
vstor_packet->flags = REQUEST_COMPLETION_FLAG;
ret = vmbus_sendpacket(device->channel, vstor_packet,
- (sizeof(struct vstor_packet) -
- stor_device->vmscsi_size_delta),
+ sizeof(struct vstor_packet),
VMBUS_RQST_INIT,
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
@@ -915,14 +854,13 @@ static int storvsc_channel_init(struct hv_device *device, bool is_fc)
* Query host supported protocol version.
*/
- for (i = 0; i < ARRAY_SIZE(vmstor_protocols); i++) {
+ for (i = 0; i < ARRAY_SIZE(protocol_version); i++) {
/* reuse the packet for version range supported */
memset(vstor_packet, 0, sizeof(struct vstor_packet));
vstor_packet->operation =
VSTOR_OPERATION_QUERY_PROTOCOL_VERSION;
- vstor_packet->version.major_minor =
- vmstor_protocols[i].protocol_version;
+ vstor_packet->version.major_minor = protocol_version[i];
/*
* The revision number is only used in Windows; set it to 0.
@@ -936,21 +874,16 @@ static int storvsc_channel_init(struct hv_device *device, bool is_fc)
return -EINVAL;
if (vstor_packet->status == 0) {
- vmstor_proto_version =
- vmstor_protocols[i].protocol_version;
-
- sense_buffer_size =
- vmstor_protocols[i].sense_buffer_size;
-
- stor_device->vmscsi_size_delta =
- vmstor_protocols[i].vmscsi_size_delta;
+ vmstor_proto_version = protocol_version[i];
break;
}
}
- if (vstor_packet->status != 0)
+ if (vstor_packet->status != 0) {
+ dev_err(&device->device, "Obsolete Hyper-V version\n");
return -EINVAL;
+ }
memset(vstor_packet, 0, sizeof(struct vstor_packet));
@@ -986,11 +919,10 @@ static int storvsc_channel_init(struct hv_device *device, bool is_fc)
cpumask_set_cpu(device->channel->target_cpu,
&stor_device->alloced_cpus);
- if (vmstor_proto_version >= VMSTOR_PROTO_VERSION_WIN8) {
- if (vstor_packet->storage_channel_properties.flags &
- STORAGE_CHANNEL_SUPPORTS_MULTI_CHANNEL)
- process_sub_channels = true;
- }
+ if (vstor_packet->storage_channel_properties.flags &
+ STORAGE_CHANNEL_SUPPORTS_MULTI_CHANNEL)
+ process_sub_channels = true;
+
stor_device->max_transfer_bytes =
vstor_packet->storage_channel_properties.max_transfer_bytes;
@@ -1197,7 +1129,7 @@ static void storvsc_on_io_completion(struct storvsc_device *stor_device,
* Copy over the sense_info_length, but limit to the known max
* size if Hyper-V returns a bad value.
*/
- stor_pkt->vm_srb.sense_info_length = min_t(u8, sense_buffer_size,
+ stor_pkt->vm_srb.sense_info_length = min_t(u8, STORVSC_SENSE_BUFFER_SIZE,
vstor_packet->vm_srb.sense_info_length);
if (vstor_packet->vm_srb.scsi_status != 0 ||
@@ -1289,8 +1221,8 @@ static void storvsc_on_channel_callback(void *context)
struct storvsc_cmd_request *request = NULL;
u32 pktlen = hv_pkt_datalen(desc);
u64 rqst_id = desc->trans_id;
- u32 minlen = rqst_id ? sizeof(struct vstor_packet) -
- stor_device->vmscsi_size_delta : sizeof(enum vstor_packet_operation);
+ u32 minlen = rqst_id ? sizeof(struct vstor_packet) :
+ sizeof(enum vstor_packet_operation);
if (pktlen < minlen) {
dev_err(&device->device,
@@ -1346,7 +1278,7 @@ static void storvsc_on_channel_callback(void *context)
}
memcpy(&request->vstor_packet, packet,
- (sizeof(struct vstor_packet) - stor_device->vmscsi_size_delta));
+ sizeof(struct vstor_packet));
complete(&request->wait_event);
}
}
@@ -1557,11 +1489,10 @@ static int storvsc_do_io(struct hv_device *device,
found_channel:
vstor_packet->flags |= REQUEST_COMPLETION_FLAG;
- vstor_packet->vm_srb.length = (sizeof(struct vmscsi_request) -
- stor_device->vmscsi_size_delta);
+ vstor_packet->vm_srb.length = sizeof(struct vmscsi_request);
- vstor_packet->vm_srb.sense_info_length = sense_buffer_size;
+ vstor_packet->vm_srb.sense_info_length = STORVSC_SENSE_BUFFER_SIZE;
vstor_packet->vm_srb.data_transfer_length =
@@ -1574,13 +1505,11 @@ found_channel:
ret = vmbus_sendpacket_mpb_desc(outgoing_channel,
request->payload, request->payload_sz,
vstor_packet,
- (sizeof(struct vstor_packet) -
- stor_device->vmscsi_size_delta),
+ sizeof(struct vstor_packet),
(unsigned long)request);
} else {
ret = vmbus_sendpacket(outgoing_channel, vstor_packet,
- (sizeof(struct vstor_packet) -
- stor_device->vmscsi_size_delta),
+ sizeof(struct vstor_packet),
(unsigned long)request,
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
@@ -1684,8 +1613,7 @@ static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd)
vstor_packet->vm_srb.path_id = stor_device->path_id;
ret = vmbus_sendpacket(device->channel, vstor_packet,
- (sizeof(struct vstor_packet) -
- stor_device->vmscsi_size_delta),
+ sizeof(struct vstor_packet),
VMBUS_RQST_RESET,
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
@@ -1778,31 +1706,31 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
memset(&cmd_request->vstor_packet, 0, sizeof(struct vstor_packet));
vm_srb = &cmd_request->vstor_packet.vm_srb;
- vm_srb->win8_extension.time_out_value = 60;
+ vm_srb->time_out_value = 60;
- vm_srb->win8_extension.srb_flags |=
+ vm_srb->srb_flags |=
SRB_FLAGS_DISABLE_SYNCH_TRANSFER;
if (scmnd->device->tagged_supported) {
- vm_srb->win8_extension.srb_flags |=
+ vm_srb->srb_flags |=
(SRB_FLAGS_QUEUE_ACTION_ENABLE | SRB_FLAGS_NO_QUEUE_FREEZE);
- vm_srb->win8_extension.queue_tag = SP_UNTAGGED;
- vm_srb->win8_extension.queue_action = SRB_SIMPLE_TAG_REQUEST;
+ vm_srb->queue_tag = SP_UNTAGGED;
+ vm_srb->queue_action = SRB_SIMPLE_TAG_REQUEST;
}
/* Build the SRB */
switch (scmnd->sc_data_direction) {
case DMA_TO_DEVICE:
vm_srb->data_in = WRITE_TYPE;
- vm_srb->win8_extension.srb_flags |= SRB_FLAGS_DATA_OUT;
+ vm_srb->srb_flags |= SRB_FLAGS_DATA_OUT;
break;
case DMA_FROM_DEVICE:
vm_srb->data_in = READ_TYPE;
- vm_srb->win8_extension.srb_flags |= SRB_FLAGS_DATA_IN;
+ vm_srb->srb_flags |= SRB_FLAGS_DATA_IN;
break;
case DMA_NONE:
vm_srb->data_in = UNKNOWN_TYPE;
- vm_srb->win8_extension.srb_flags |= SRB_FLAGS_NO_DATA_TRANSFER;
+ vm_srb->srb_flags |= SRB_FLAGS_NO_DATA_TRANSFER;
break;
default:
/*
@@ -1966,34 +1894,16 @@ static int storvsc_probe(struct hv_device *device,
bool is_fc = ((dev_id->driver_data == SFC_GUID) ? true : false);
int target = 0;
struct storvsc_device *stor_device;
- int max_luns_per_target;
- int max_targets;
- int max_channels;
int max_sub_channels = 0;
/*
- * Based on the windows host we are running on,
- * set state to properly communicate with the host.
+ * We support sub-channels for storage on SCSI and FC controllers.
+ * The number of sub-channels offerred is based on the number of
+ * VCPUs in the guest.
*/
-
- if (vmbus_proto_version < VERSION_WIN8) {
- max_luns_per_target = STORVSC_IDE_MAX_LUNS_PER_TARGET;
- max_targets = STORVSC_IDE_MAX_TARGETS;
- max_channels = STORVSC_IDE_MAX_CHANNELS;
- } else {
- max_luns_per_target = STORVSC_MAX_LUNS_PER_TARGET;
- max_targets = STORVSC_MAX_TARGETS;
- max_channels = STORVSC_MAX_CHANNELS;
- /*
- * On Windows8 and above, we support sub-channels for storage
- * on SCSI and FC controllers.
- * The number of sub-channels offerred is based on the number of
- * VCPUs in the guest.
- */
- if (!dev_is_ide)
- max_sub_channels =
- (num_cpus - 1) / storvsc_vcpus_per_sub_channel;
- }
+ if (!dev_is_ide)
+ max_sub_channels =
+ (num_cpus - 1) / storvsc_vcpus_per_sub_channel;
scsi_driver.can_queue = max_outstanding_req_per_channel *
(max_sub_channels + 1) *
@@ -2022,7 +1932,6 @@ static int storvsc_probe(struct hv_device *device,
init_waitqueue_head(&stor_device->waiting_to_drain);
stor_device->device = device;
stor_device->host = host;
- stor_device->vmscsi_size_delta = sizeof(struct vmscsi_win8_extension);
spin_lock_init(&stor_device->lock);
hv_set_drvdata(device, stor_device);
dma_set_min_align_mask(&device->device, HV_HYP_PAGE_SIZE - 1);
@@ -2046,9 +1955,9 @@ static int storvsc_probe(struct hv_device *device,
break;
case SCSI_GUID:
- host->max_lun = max_luns_per_target;
- host->max_id = max_targets;
- host->max_channel = max_channels - 1;
+ host->max_lun = STORVSC_MAX_LUNS_PER_TARGET;
+ host->max_id = STORVSC_MAX_TARGETS;
+ host->max_channel = STORVSC_MAX_CHANNELS - 1;
break;
default:
@@ -2235,10 +2144,6 @@ static int __init storvsc_drv_init(void)
* than the ring buffer size since that page is reserved for
* the ring buffer indices) by the max request size (which is
* vmbus_channel_packet_multipage_buffer + struct vstor_packet + u64)
- *
- * The computation underestimates max_outstanding_req_per_channel
- * for Win7 and older hosts because it does not take into account
- * the vmscsi_size_delta correction to the max request size.
*/
max_outstanding_req_per_channel =
((storvsc_ringbuffer_size - PAGE_SIZE) /
diff --git a/drivers/slimbus/qcom-ctrl.c b/drivers/slimbus/qcom-ctrl.c
index ec58091fc948..c0c4f895d76e 100644
--- a/drivers/slimbus/qcom-ctrl.c
+++ b/drivers/slimbus/qcom-ctrl.c
@@ -510,10 +510,8 @@ static int qcom_slim_probe(struct platform_device *pdev)
}
ctrl->irq = platform_get_irq(pdev, 0);
- if (ctrl->irq < 0) {
- dev_err(&pdev->dev, "no slimbus IRQ\n");
+ if (ctrl->irq < 0)
return ctrl->irq;
- }
sctrl = &ctrl->ctrl;
sctrl->dev = &pdev->dev;
diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c
index 7040293c2ee8..0aa8408464ad 100644
--- a/drivers/slimbus/qcom-ngd-ctrl.c
+++ b/drivers/slimbus/qcom-ngd-ctrl.c
@@ -1434,6 +1434,7 @@ static int of_qcom_slim_ngd_register(struct device *parent,
const struct of_device_id *match;
struct device_node *node;
u32 id;
+ int ret;
match = of_match_node(qcom_slim_ngd_dt_match, parent->of_node);
data = match->data;
@@ -1455,7 +1456,17 @@ static int of_qcom_slim_ngd_register(struct device *parent,
}
ngd->id = id;
ngd->pdev->dev.parent = parent;
- ngd->pdev->driver_override = QCOM_SLIM_NGD_DRV_NAME;
+
+ ret = driver_set_override(&ngd->pdev->dev,
+ &ngd->pdev->driver_override,
+ QCOM_SLIM_NGD_DRV_NAME,
+ strlen(QCOM_SLIM_NGD_DRV_NAME));
+ if (ret) {
+ platform_device_put(ngd->pdev);
+ kfree(ngd);
+ of_node_put(node);
+ return ret;
+ }
ngd->pdev->dev.of_node = node;
ctrl->ngd = ngd;
@@ -1526,13 +1537,11 @@ static int qcom_slim_ngd_ctrl_probe(struct platform_device *pdev)
if (IS_ERR(ctrl->base))
return PTR_ERR(ctrl->base);
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res) {
- dev_err(&pdev->dev, "no slimbus IRQ resource\n");
- return -ENODEV;
- }
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ return ret;
- ret = devm_request_irq(dev, res->start, qcom_slim_ngd_interrupt,
+ ret = devm_request_irq(dev, ret, qcom_slim_ngd_interrupt,
IRQF_TRIGGER_HIGH, "slim-ngd", ctrl);
if (ret) {
dev_err(&pdev->dev, "request IRQ failed\n");
diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig
index c5aae42673d3..86ccf5970bc1 100644
--- a/drivers/soc/Kconfig
+++ b/drivers/soc/Kconfig
@@ -14,6 +14,7 @@ source "drivers/soc/ixp4xx/Kconfig"
source "drivers/soc/litex/Kconfig"
source "drivers/soc/mediatek/Kconfig"
source "drivers/soc/microchip/Kconfig"
+source "drivers/soc/pxa/Kconfig"
source "drivers/soc/qcom/Kconfig"
source "drivers/soc/renesas/Kconfig"
source "drivers/soc/rockchip/Kconfig"
diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile
index e8228c4e5d18..919716e0e700 100644
--- a/drivers/soc/Makefile
+++ b/drivers/soc/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_SOC_XWAY) += lantiq/
obj-$(CONFIG_LITEX_SOC_CONTROLLER) += litex/
obj-y += mediatek/
obj-y += microchip/
+obj-y += pxa/
obj-y += amlogic/
obj-y += qcom/
obj-y += renesas/
diff --git a/drivers/soc/ixp4xx/ixp4xx-qmgr.c b/drivers/soc/ixp4xx/ixp4xx-qmgr.c
index 9154c7029b05..291086bb9313 100644
--- a/drivers/soc/ixp4xx/ixp4xx-qmgr.c
+++ b/drivers/soc/ixp4xx/ixp4xx-qmgr.c
@@ -459,7 +459,7 @@ static const struct of_device_id ixp4xx_qmgr_of_match[] = {
static struct platform_driver ixp4xx_qmgr_driver = {
.driver = {
.name = "ixp4xx-qmgr",
- .of_match_table = of_match_ptr(ixp4xx_qmgr_of_match),
+ .of_match_table = ixp4xx_qmgr_of_match,
},
.probe = ixp4xx_qmgr_probe,
.remove = ixp4xx_qmgr_remove,
diff --git a/arch/arm/plat-pxa/Kconfig b/drivers/soc/pxa/Kconfig
index 6f7a0a39c2b9..c5c265aa4f07 100644
--- a/arch/arm/plat-pxa/Kconfig
+++ b/drivers/soc/pxa/Kconfig
@@ -1,9 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-only
-if PLAT_PXA
+config PLAT_PXA
+ bool
config PXA_SSP
tristate
help
Enable support for PXA2xx SSP ports
-
-endif
diff --git a/arch/arm/plat-pxa/Makefile b/drivers/soc/pxa/Makefile
index 349ea0af8450..413deceddbdd 100644
--- a/arch/arm/plat-pxa/Makefile
+++ b/drivers/soc/pxa/Makefile
@@ -1,8 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-#
-# Makefile for code common across different PXA processor families
-#
-ccflags-$(CONFIG_ARCH_MMP) := -I$(srctree)/$(src)/include
obj-$(CONFIG_PXA3xx) += mfp.o
obj-$(CONFIG_ARCH_MMP) += mfp.o
diff --git a/arch/arm/plat-pxa/mfp.c b/drivers/soc/pxa/mfp.c
index 17fc4f33f35b..6220ba321cfc 100644
--- a/arch/arm/plat-pxa/mfp.c
+++ b/drivers/soc/pxa/mfp.c
@@ -15,7 +15,7 @@
#include <linux/init.h>
#include <linux/io.h>
-#include <plat/mfp.h>
+#include <linux/soc/pxa/mfp.h>
#define MFPR_SIZE (PAGE_SIZE)
diff --git a/arch/arm/plat-pxa/ssp.c b/drivers/soc/pxa/ssp.c
index 563440315acd..563440315acd 100644
--- a/arch/arm/plat-pxa/ssp.c
+++ b/drivers/soc/pxa/ssp.c
diff --git a/drivers/soc/rockchip/grf.c b/drivers/soc/rockchip/grf.c
index 384461b70684..15a3970e3509 100644
--- a/drivers/soc/rockchip/grf.c
+++ b/drivers/soc/rockchip/grf.c
@@ -165,12 +165,14 @@ static int __init rockchip_grf_init(void)
return -ENODEV;
if (!match || !match->data) {
pr_err("%s: missing grf data\n", __func__);
+ of_node_put(np);
return -EINVAL;
}
grf_info = match->data;
grf = syscon_node_to_regmap(np);
+ of_node_put(np);
if (IS_ERR(grf)) {
pr_err("%s: could not get grf syscon\n", __func__);
return PTR_ERR(grf);
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
index c77ecf61818b..5611d14d3ba2 100644
--- a/drivers/soc/tegra/pmc.c
+++ b/drivers/soc/tegra/pmc.c
@@ -39,6 +39,7 @@
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/pm_opp.h>
+#include <linux/power_supply.h>
#include <linux/reboot.h>
#include <linux/regmap.h>
#include <linux/reset.h>
@@ -108,6 +109,7 @@
#define PMC_USB_DEBOUNCE_DEL 0xec
#define PMC_USB_AO 0xf0
+#define PMC_SCRATCH37 0x130
#define PMC_SCRATCH41 0x140
#define PMC_WAKE2_MASK 0x160
@@ -1101,8 +1103,7 @@ static struct notifier_block tegra_pmc_reboot_notifier = {
.notifier_call = tegra_pmc_reboot_notify,
};
-static int tegra_pmc_restart_notify(struct notifier_block *this,
- unsigned long action, void *data)
+static void tegra_pmc_restart(void)
{
u32 value;
@@ -1110,14 +1111,31 @@ static int tegra_pmc_restart_notify(struct notifier_block *this,
value = tegra_pmc_readl(pmc, PMC_CNTRL);
value |= PMC_CNTRL_MAIN_RST;
tegra_pmc_writel(pmc, value, PMC_CNTRL);
+}
+
+static int tegra_pmc_restart_handler(struct sys_off_data *data)
+{
+ tegra_pmc_restart();
return NOTIFY_DONE;
}
-static struct notifier_block tegra_pmc_restart_handler = {
- .notifier_call = tegra_pmc_restart_notify,
- .priority = 128,
-};
+static int tegra_pmc_power_off_handler(struct sys_off_data *data)
+{
+ /*
+ * Reboot Nexus 7 into special bootloader mode if USB cable is
+ * connected in order to display battery status and power off.
+ */
+ if (of_machine_is_compatible("asus,grouper") &&
+ power_supply_is_system_supplied()) {
+ const u32 go_to_charger_mode = 0xa5a55a5a;
+
+ tegra_pmc_writel(pmc, go_to_charger_mode, PMC_SCRATCH37);
+ tegra_pmc_restart();
+ }
+
+ return NOTIFY_DONE;
+}
static int powergate_show(struct seq_file *s, void *data)
{
@@ -2880,6 +2898,42 @@ static int tegra_pmc_probe(struct platform_device *pdev)
}
/*
+ * PMC should be last resort for restarting since it soft-resets
+ * CPU without resetting everything else.
+ */
+ err = devm_register_reboot_notifier(&pdev->dev,
+ &tegra_pmc_reboot_notifier);
+ if (err) {
+ dev_err(&pdev->dev, "unable to register reboot notifier, %d\n",
+ err);
+ return err;
+ }
+
+ err = devm_register_sys_off_handler(&pdev->dev,
+ SYS_OFF_MODE_RESTART,
+ SYS_OFF_PRIO_LOW,
+ tegra_pmc_restart_handler, NULL);
+ if (err) {
+ dev_err(&pdev->dev, "failed to register sys-off handler: %d\n",
+ err);
+ return err;
+ }
+
+ /*
+ * PMC should be primary power-off method if it soft-resets CPU,
+ * asking bootloader to shutdown hardware.
+ */
+ err = devm_register_sys_off_handler(&pdev->dev,
+ SYS_OFF_MODE_POWER_OFF,
+ SYS_OFF_PRIO_FIRMWARE,
+ tegra_pmc_power_off_handler, NULL);
+ if (err) {
+ dev_err(&pdev->dev, "failed to register sys-off handler: %d\n",
+ err);
+ return err;
+ }
+
+ /*
* PCLK clock rate can't be retrieved using CLK API because it
* causes lockup if CPU enters LP2 idle state from some other
* CLK notifier, hence we're caching the rate's value locally.
@@ -2910,28 +2964,13 @@ static int tegra_pmc_probe(struct platform_device *pdev)
goto cleanup_sysfs;
}
- err = devm_register_reboot_notifier(&pdev->dev,
- &tegra_pmc_reboot_notifier);
- if (err) {
- dev_err(&pdev->dev, "unable to register reboot notifier, %d\n",
- err);
- goto cleanup_debugfs;
- }
-
- err = register_restart_handler(&tegra_pmc_restart_handler);
- if (err) {
- dev_err(&pdev->dev, "unable to register restart handler, %d\n",
- err);
- goto cleanup_debugfs;
- }
-
err = tegra_pmc_pinctrl_init(pmc);
if (err)
- goto cleanup_restart_handler;
+ goto cleanup_debugfs;
err = tegra_pmc_regmap_init(pmc);
if (err < 0)
- goto cleanup_restart_handler;
+ goto cleanup_debugfs;
err = tegra_powergate_init(pmc, pdev->dev.of_node);
if (err < 0)
@@ -2954,8 +2993,6 @@ static int tegra_pmc_probe(struct platform_device *pdev)
cleanup_powergates:
tegra_powergate_remove_all(pdev->dev.of_node);
-cleanup_restart_handler:
- unregister_restart_handler(&tegra_pmc_restart_handler);
cleanup_debugfs:
debugfs_remove(pmc->debugfs);
cleanup_sysfs:
diff --git a/drivers/soc/xilinx/xlnx_event_manager.c b/drivers/soc/xilinx/xlnx_event_manager.c
index b27f8853508e..5dcb7665fe22 100644
--- a/drivers/soc/xilinx/xlnx_event_manager.c
+++ b/drivers/soc/xilinx/xlnx_event_manager.c
@@ -41,25 +41,37 @@ static int event_manager_availability = -EACCES;
static DEFINE_HASHTABLE(reg_driver_map, REGISTERED_DRIVER_MAX_ORDER);
static int sgi_num = XLNX_EVENT_SGI_NUM;
+static bool is_need_to_unregister;
+
+/**
+ * struct agent_cb - Registered callback function and private data.
+ * @agent_data: Data passed back to handler function.
+ * @eve_cb: Function pointer to store the callback function.
+ * @list: member to create list.
+ */
+struct agent_cb {
+ void *agent_data;
+ event_cb_func_t eve_cb;
+ struct list_head list;
+};
+
/**
* struct registered_event_data - Registered Event Data.
* @key: key is the combine id(Node-Id | Event-Id) of type u64
* where upper u32 for Node-Id and lower u32 for Event-Id,
* And this used as key to index into hashmap.
- * @agent_data: Data passed back to handler function.
* @cb_type: Type of Api callback, like PM_NOTIFY_CB, etc.
- * @eve_cb: Function pointer to store the callback function.
- * @wake: If this flag set, firmware will wakeup processor if is
+ * @wake: If this flag set, firmware will wake up processor if is
* in sleep or power down state.
+ * @cb_list_head: Head of call back data list which contain the information
+ * about registered handler and private data.
* @hentry: hlist_node that hooks this entry into hashtable.
*/
struct registered_event_data {
u64 key;
enum pm_api_cb_id cb_type;
- void *agent_data;
-
- event_cb_func_t eve_cb;
bool wake;
+ struct list_head cb_list_head;
struct hlist_node hentry;
};
@@ -78,29 +90,60 @@ static int xlnx_add_cb_for_notify_event(const u32 node_id, const u32 event, cons
event_cb_func_t cb_fun, void *data)
{
u64 key = 0;
+ bool present_in_hash = false;
struct registered_event_data *eve_data;
+ struct agent_cb *cb_data;
+ struct agent_cb *cb_pos;
+ struct agent_cb *cb_next;
key = ((u64)node_id << 32U) | (u64)event;
/* Check for existing entry in hash table for given key id */
hash_for_each_possible(reg_driver_map, eve_data, hentry, key) {
if (eve_data->key == key) {
- pr_err("Found as already registered\n");
- return -EINVAL;
+ present_in_hash = true;
+ break;
}
}
- /* Add new entry if not present */
- eve_data = kmalloc(sizeof(*eve_data), GFP_KERNEL);
- if (!eve_data)
- return -ENOMEM;
+ if (!present_in_hash) {
+ /* Add new entry if not present in HASH table */
+ eve_data = kmalloc(sizeof(*eve_data), GFP_KERNEL);
+ if (!eve_data)
+ return -ENOMEM;
+ eve_data->key = key;
+ eve_data->cb_type = PM_NOTIFY_CB;
+ eve_data->wake = wake;
+ INIT_LIST_HEAD(&eve_data->cb_list_head);
+
+ cb_data = kmalloc(sizeof(*cb_data), GFP_KERNEL);
+ if (!cb_data)
+ return -ENOMEM;
+ cb_data->eve_cb = cb_fun;
+ cb_data->agent_data = data;
+
+ /* Add into callback list */
+ list_add(&cb_data->list, &eve_data->cb_list_head);
+
+ /* Add into HASH table */
+ hash_add(reg_driver_map, &eve_data->hentry, key);
+ } else {
+ /* Search for callback function and private data in list */
+ list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) {
+ if (cb_pos->eve_cb == cb_fun &&
+ cb_pos->agent_data == data) {
+ return 0;
+ }
+ }
- eve_data->key = key;
- eve_data->cb_type = PM_NOTIFY_CB;
- eve_data->eve_cb = cb_fun;
- eve_data->wake = wake;
- eve_data->agent_data = data;
+ /* Add multiple handler and private data in list */
+ cb_data = kmalloc(sizeof(*cb_data), GFP_KERNEL);
+ if (!cb_data)
+ return -ENOMEM;
+ cb_data->eve_cb = cb_fun;
+ cb_data->agent_data = data;
- hash_add(reg_driver_map, &eve_data->hentry, key);
+ list_add(&cb_data->list, &eve_data->cb_list_head);
+ }
return 0;
}
@@ -108,6 +151,7 @@ static int xlnx_add_cb_for_notify_event(const u32 node_id, const u32 event, cons
static int xlnx_add_cb_for_suspend(event_cb_func_t cb_fun, void *data)
{
struct registered_event_data *eve_data;
+ struct agent_cb *cb_data;
/* Check for existing entry in hash table for given cb_type */
hash_for_each_possible(reg_driver_map, eve_data, hentry, PM_INIT_SUSPEND_CB) {
@@ -124,8 +168,16 @@ static int xlnx_add_cb_for_suspend(event_cb_func_t cb_fun, void *data)
eve_data->key = 0;
eve_data->cb_type = PM_INIT_SUSPEND_CB;
- eve_data->eve_cb = cb_fun;
- eve_data->agent_data = data;
+ INIT_LIST_HEAD(&eve_data->cb_list_head);
+
+ cb_data = kmalloc(sizeof(*cb_data), GFP_KERNEL);
+ if (!cb_data)
+ return -ENOMEM;
+ cb_data->eve_cb = cb_fun;
+ cb_data->agent_data = data;
+
+ /* Add into callback list */
+ list_add(&cb_data->list, &eve_data->cb_list_head);
hash_add(reg_driver_map, &eve_data->hentry, PM_INIT_SUSPEND_CB);
@@ -136,15 +188,26 @@ static int xlnx_remove_cb_for_suspend(event_cb_func_t cb_fun)
{
bool is_callback_found = false;
struct registered_event_data *eve_data;
+ struct agent_cb *cb_pos;
+ struct agent_cb *cb_next;
+
+ is_need_to_unregister = false;
/* Check for existing entry in hash table for given cb_type */
hash_for_each_possible(reg_driver_map, eve_data, hentry, PM_INIT_SUSPEND_CB) {
- if (eve_data->cb_type == PM_INIT_SUSPEND_CB &&
- eve_data->eve_cb == cb_fun) {
- is_callback_found = true;
+ if (eve_data->cb_type == PM_INIT_SUSPEND_CB) {
+ /* Delete the list of callback */
+ list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) {
+ if (cb_pos->eve_cb == cb_fun) {
+ is_callback_found = true;
+ list_del_init(&cb_pos->list);
+ kfree(cb_pos);
+ }
+ }
/* remove an object from a hashtable */
hash_del(&eve_data->hentry);
kfree(eve_data);
+ is_need_to_unregister = true;
}
}
if (!is_callback_found) {
@@ -156,20 +219,36 @@ static int xlnx_remove_cb_for_suspend(event_cb_func_t cb_fun)
}
static int xlnx_remove_cb_for_notify_event(const u32 node_id, const u32 event,
- event_cb_func_t cb_fun)
+ event_cb_func_t cb_fun, void *data)
{
bool is_callback_found = false;
struct registered_event_data *eve_data;
u64 key = ((u64)node_id << 32U) | (u64)event;
+ struct agent_cb *cb_pos;
+ struct agent_cb *cb_next;
+
+ is_need_to_unregister = false;
/* Check for existing entry in hash table for given key id */
hash_for_each_possible(reg_driver_map, eve_data, hentry, key) {
- if (eve_data->key == key &&
- eve_data->eve_cb == cb_fun) {
- is_callback_found = true;
- /* remove an object from a hashtable */
- hash_del(&eve_data->hentry);
- kfree(eve_data);
+ if (eve_data->key == key) {
+ /* Delete the list of callback */
+ list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) {
+ if (cb_pos->eve_cb == cb_fun &&
+ cb_pos->agent_data == data) {
+ is_callback_found = true;
+ list_del_init(&cb_pos->list);
+ kfree(cb_pos);
+ }
+ }
+
+ /* Remove HASH table if callback list is empty */
+ if (list_empty(&eve_data->cb_list_head)) {
+ /* remove an object from a HASH table */
+ hash_del(&eve_data->hentry);
+ kfree(eve_data);
+ is_need_to_unregister = true;
+ }
}
}
if (!is_callback_found) {
@@ -241,7 +320,7 @@ int xlnx_register_event(const enum pm_api_cb_id cb_type, const u32 node_id, cons
eve = event & (1 << pos);
if (!eve)
continue;
- xlnx_remove_cb_for_notify_event(node_id, eve, cb_fun);
+ xlnx_remove_cb_for_notify_event(node_id, eve, cb_fun, data);
}
}
}
@@ -263,10 +342,10 @@ int xlnx_register_event(const enum pm_api_cb_id cb_type, const u32 node_id, cons
eve = event & (1 << pos);
if (!eve)
continue;
- xlnx_remove_cb_for_notify_event(node_id, eve, cb_fun);
+ xlnx_remove_cb_for_notify_event(node_id, eve, cb_fun, data);
}
} else {
- xlnx_remove_cb_for_notify_event(node_id, event, cb_fun);
+ xlnx_remove_cb_for_notify_event(node_id, event, cb_fun, data);
}
return ret;
}
@@ -284,15 +363,18 @@ EXPORT_SYMBOL_GPL(xlnx_register_event);
* @node_id: Node-Id related to event.
* @event: Event Mask for the Error Event.
* @cb_fun: Function pointer of callback function.
+ * @data: Pointer of agent's private data.
*
* Return: Returns 0 on successful unregistration else error code.
*/
int xlnx_unregister_event(const enum pm_api_cb_id cb_type, const u32 node_id, const u32 event,
- event_cb_func_t cb_fun)
+ event_cb_func_t cb_fun, void *data)
{
- int ret;
+ int ret = 0;
u32 eve, pos;
+ is_need_to_unregister = false;
+
if (event_manager_availability)
return event_manager_availability;
@@ -309,23 +391,26 @@ int xlnx_unregister_event(const enum pm_api_cb_id cb_type, const u32 node_id, co
} else {
/* Remove Node-Id/Event from hash table */
if (!xlnx_is_error_event(node_id)) {
- xlnx_remove_cb_for_notify_event(node_id, event, cb_fun);
+ xlnx_remove_cb_for_notify_event(node_id, event, cb_fun, data);
} else {
for (pos = 0; pos < MAX_BITS; pos++) {
eve = event & (1 << pos);
if (!eve)
continue;
- xlnx_remove_cb_for_notify_event(node_id, eve, cb_fun);
+ xlnx_remove_cb_for_notify_event(node_id, eve, cb_fun, data);
}
}
- /* Un-register for Node-Id/Event combination */
- ret = zynqmp_pm_register_notifier(node_id, event, false, false);
- if (ret) {
- pr_err("%s() failed for 0x%x and 0x%x: %d\n",
- __func__, node_id, event, ret);
- return ret;
+ /* Un-register if list is empty */
+ if (is_need_to_unregister) {
+ /* Un-register for Node-Id/Event combination */
+ ret = zynqmp_pm_register_notifier(node_id, event, false, false);
+ if (ret) {
+ pr_err("%s() failed for 0x%x and 0x%x: %d\n",
+ __func__, node_id, event, ret);
+ return ret;
+ }
}
}
@@ -338,12 +423,16 @@ static void xlnx_call_suspend_cb_handler(const u32 *payload)
bool is_callback_found = false;
struct registered_event_data *eve_data;
u32 cb_type = payload[0];
+ struct agent_cb *cb_pos;
+ struct agent_cb *cb_next;
/* Check for existing entry in hash table for given cb_type */
hash_for_each_possible(reg_driver_map, eve_data, hentry, cb_type) {
if (eve_data->cb_type == cb_type) {
- eve_data->eve_cb(&payload[0], eve_data->agent_data);
- is_callback_found = true;
+ list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) {
+ cb_pos->eve_cb(&payload[0], cb_pos->agent_data);
+ is_callback_found = true;
+ }
}
}
if (!is_callback_found)
@@ -356,12 +445,16 @@ static void xlnx_call_notify_cb_handler(const u32 *payload)
struct registered_event_data *eve_data;
u64 key = ((u64)payload[1] << 32U) | (u64)payload[2];
int ret;
+ struct agent_cb *cb_pos;
+ struct agent_cb *cb_next;
/* Check for existing entry in hash table for given key id */
hash_for_each_possible(reg_driver_map, eve_data, hentry, key) {
if (eve_data->key == key) {
- eve_data->eve_cb(&payload[0], eve_data->agent_data);
- is_callback_found = true;
+ list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) {
+ cb_pos->eve_cb(&payload[0], cb_pos->agent_data);
+ is_callback_found = true;
+ }
/* re register with firmware to get future events */
ret = zynqmp_pm_register_notifier(payload[1], payload[2],
@@ -369,9 +462,13 @@ static void xlnx_call_notify_cb_handler(const u32 *payload)
if (ret) {
pr_err("%s() failed for 0x%x and 0x%x: %d\r\n", __func__,
payload[1], payload[2], ret);
- /* Remove already registered event from hash table */
- xlnx_remove_cb_for_notify_event(payload[1], payload[2],
- eve_data->eve_cb);
+ list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head,
+ list) {
+ /* Remove already registered event from hash table */
+ xlnx_remove_cb_for_notify_event(payload[1], payload[2],
+ cb_pos->eve_cb,
+ cb_pos->agent_data);
+ }
}
}
}
@@ -572,8 +669,14 @@ static int xlnx_event_manager_remove(struct platform_device *pdev)
struct registered_event_data *eve_data;
struct hlist_node *tmp;
int ret;
+ struct agent_cb *cb_pos;
+ struct agent_cb *cb_next;
hash_for_each_safe(reg_driver_map, i, tmp, eve_data, hentry) {
+ list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) {
+ list_del_init(&cb_pos->list);
+ kfree(cb_pos);
+ }
hash_del(&eve_data->hentry);
kfree(eve_data);
}
diff --git a/drivers/soc/xilinx/zynqmp_power.c b/drivers/soc/xilinx/zynqmp_power.c
index 859dd31b6eff..78a8a7545d1e 100644
--- a/drivers/soc/xilinx/zynqmp_power.c
+++ b/drivers/soc/xilinx/zynqmp_power.c
@@ -208,7 +208,7 @@ static int zynqmp_pm_probe(struct platform_device *pdev)
GFP_KERNEL);
if (!zynqmp_pm_init_suspend_work) {
xlnx_unregister_event(PM_INIT_SUSPEND_CB, 0, 0,
- suspend_event_callback);
+ suspend_event_callback, NULL);
return -ENOMEM;
}
event_registered = true;
@@ -263,7 +263,8 @@ static int zynqmp_pm_probe(struct platform_device *pdev)
ret = sysfs_create_file(&pdev->dev.kobj, &dev_attr_suspend_mode.attr);
if (ret) {
if (event_registered) {
- xlnx_unregister_event(PM_INIT_SUSPEND_CB, 0, 0, suspend_event_callback);
+ xlnx_unregister_event(PM_INIT_SUSPEND_CB, 0, 0, suspend_event_callback,
+ NULL);
event_registered = false;
}
dev_err(&pdev->dev, "unable to create sysfs interface\n");
@@ -277,7 +278,7 @@ static int zynqmp_pm_remove(struct platform_device *pdev)
{
sysfs_remove_file(&pdev->dev.kobj, &dev_attr_suspend_mode.attr);
if (event_registered)
- xlnx_unregister_event(PM_INIT_SUSPEND_CB, 0, 0, suspend_event_callback);
+ xlnx_unregister_event(PM_INIT_SUSPEND_CB, 0, 0, suspend_event_callback, NULL);
if (!rx_chan)
mbox_free_channel(rx_chan);
diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c
index 354d3f89366f..a2bfb0434a67 100644
--- a/drivers/soundwire/bus.c
+++ b/drivers/soundwire/bus.c
@@ -536,11 +536,9 @@ int sdw_nread(struct sdw_slave *slave, u32 addr, size_t count, u8 *val)
{
int ret;
- ret = pm_runtime_get_sync(&slave->dev);
- if (ret < 0 && ret != -EACCES) {
- pm_runtime_put_noidle(&slave->dev);
+ ret = pm_runtime_resume_and_get(&slave->dev);
+ if (ret < 0 && ret != -EACCES)
return ret;
- }
ret = sdw_nread_no_pm(slave, addr, count, val);
@@ -562,11 +560,9 @@ int sdw_nwrite(struct sdw_slave *slave, u32 addr, size_t count, const u8 *val)
{
int ret;
- ret = pm_runtime_get_sync(&slave->dev);
- if (ret < 0 && ret != -EACCES) {
- pm_runtime_put_noidle(&slave->dev);
+ ret = pm_runtime_resume_and_get(&slave->dev);
+ if (ret < 0 && ret != -EACCES)
return ret;
- }
ret = sdw_nwrite_no_pm(slave, addr, count, val);
@@ -1506,10 +1502,9 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave)
sdw_modify_slave_status(slave, SDW_SLAVE_ALERT);
- ret = pm_runtime_get_sync(&slave->dev);
+ ret = pm_runtime_resume_and_get(&slave->dev);
if (ret < 0 && ret != -EACCES) {
dev_err(&slave->dev, "Failed to resume device: %d\n", ret);
- pm_runtime_put_noidle(&slave->dev);
return ret;
}
@@ -1838,6 +1833,18 @@ int sdw_handle_slave_status(struct sdw_bus *bus,
__func__, slave->dev_num);
complete(&slave->initialization_complete);
+
+ /*
+ * If the manager became pm_runtime active, the peripherals will be
+ * restarted and attach, but their pm_runtime status may remain
+ * suspended. If the 'update_slave_status' callback initiates
+ * any sort of deferred processing, this processing would not be
+ * cancelled on pm_runtime suspend.
+ * To avoid such zombie states, we queue a request to resume.
+ * This would be a no-op in case the peripheral was being resumed
+ * by e.g. the ALSA/ASoC framework.
+ */
+ pm_request_resume(&slave->dev);
}
}
diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c
index 558390af44b6..4fbb19557f5e 100644
--- a/drivers/soundwire/cadence_master.c
+++ b/drivers/soundwire/cadence_master.c
@@ -386,12 +386,11 @@ static int cdns_parity_error_injection(void *data, u64 value)
* Resume Master device. If this results in a bus reset, the
* Slave devices will re-attach and be re-enumerated.
*/
- ret = pm_runtime_get_sync(bus->dev);
+ ret = pm_runtime_resume_and_get(bus->dev);
if (ret < 0 && ret != -EACCES) {
dev_err_ratelimited(cdns->dev,
- "pm_runtime_get_sync failed in %s, ret %d\n",
+ "pm_runtime_resume_and_get failed in %s, ret %d\n",
__func__, ret);
- pm_runtime_put_noidle(bus->dev);
return ret;
}
@@ -959,6 +958,8 @@ static void cdns_update_slave_status_work(struct work_struct *work)
container_of(work, struct sdw_cdns, work);
u32 slave0, slave1;
u64 slave_intstat;
+ u32 device0_status;
+ int retry_count = 0;
slave0 = cdns_readl(cdns, CDNS_MCP_SLAVE_INTSTAT0);
slave1 = cdns_readl(cdns, CDNS_MCP_SLAVE_INTSTAT1);
@@ -968,10 +969,45 @@ static void cdns_update_slave_status_work(struct work_struct *work)
dev_dbg_ratelimited(cdns->dev, "Slave status change: 0x%llx\n", slave_intstat);
+update_status:
cdns_update_slave_status(cdns, slave_intstat);
cdns_writel(cdns, CDNS_MCP_SLAVE_INTSTAT0, slave0);
cdns_writel(cdns, CDNS_MCP_SLAVE_INTSTAT1, slave1);
+ /*
+ * When there is more than one peripheral per link, it's
+ * possible that a deviceB becomes attached after we deal with
+ * the attachment of deviceA. Since the hardware does a
+ * logical AND, the attachment of the second device does not
+ * change the status seen by the driver.
+ *
+ * In that case, clearing the registers above would result in
+ * the deviceB never being detected - until a change of status
+ * is observed on the bus.
+ *
+ * To avoid this race condition, re-check if any device0 needs
+ * attention with PING commands. There is no need to check for
+ * ALERTS since they are not allowed until a non-zero
+ * device_number is assigned.
+ */
+
+ device0_status = cdns_readl(cdns, CDNS_MCP_SLAVE_STAT);
+ device0_status &= 3;
+
+ if (device0_status == SDW_SLAVE_ATTACHED) {
+ if (retry_count++ < SDW_MAX_DEVICES) {
+ dev_dbg_ratelimited(cdns->dev,
+ "Device0 detected after clearing status, iteration %d\n",
+ retry_count);
+ slave_intstat = CDNS_MCP_SLAVE_INTSTAT_ATTACHED;
+ goto update_status;
+ } else {
+ dev_err_ratelimited(cdns->dev,
+ "Device0 detected after %d iterations\n",
+ retry_count);
+ }
+ }
+
/* clear and unmask Slave interrupt now */
cdns_writel(cdns, CDNS_MCP_INTSTAT, CDNS_MCP_INT_SLAVE_MASK);
cdns_updatel(cdns, CDNS_MCP_INTMASK,
diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c
index 63101f1ba271..505c5ef061e3 100644
--- a/drivers/soundwire/intel.c
+++ b/drivers/soundwire/intel.c
@@ -799,12 +799,11 @@ static int intel_startup(struct snd_pcm_substream *substream,
struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
int ret;
- ret = pm_runtime_get_sync(cdns->dev);
+ ret = pm_runtime_resume_and_get(cdns->dev);
if (ret < 0 && ret != -EACCES) {
dev_err_ratelimited(cdns->dev,
- "pm_runtime_get_sync failed in %s, ret %d\n",
+ "pm_runtime_resume_and_get failed in %s, ret %d\n",
__func__, ret);
- pm_runtime_put_noidle(cdns->dev);
return ret;
}
return 0;
@@ -1293,6 +1292,9 @@ static int intel_link_probe(struct auxiliary_device *auxdev,
/* use generic bandwidth allocation algorithm */
sdw->cdns.bus.compute_params = sdw_compute_params;
+ /* avoid resuming from pm_runtime suspend if it's not required */
+ dev_pm_set_driver_flags(dev, DPM_FLAG_SMART_SUSPEND);
+
ret = sdw_bus_master_add(bus, dev, dev->fwnode);
if (ret) {
dev_err(dev, "sdw_bus_master_add fail: %d\n", ret);
@@ -1828,6 +1830,9 @@ static int __maybe_unused intel_resume_runtime(struct device *dev)
return 0;
}
+ /* unconditionally disable WAKEEN interrupt */
+ intel_shim_wake(sdw, false);
+
link_flags = md_flags >> (bus->link_id * 8);
multi_link = !(link_flags & SDW_INTEL_MASTER_DISABLE_MULTI_LINK);
diff --git a/drivers/soundwire/qcom.c b/drivers/soundwire/qcom.c
index da1ad7ebb1aa..22b706350ead 100644
--- a/drivers/soundwire/qcom.c
+++ b/drivers/soundwire/qcom.c
@@ -105,7 +105,7 @@
#define SWRM_SPECIAL_CMD_ID 0xF
#define MAX_FREQ_NUM 1
-#define TIMEOUT_MS (2 * HZ)
+#define TIMEOUT_MS 100
#define QCOM_SWRM_MAX_RD_LEN 0x1
#define QCOM_SDW_MAX_PORTS 14
#define DEFAULT_CLK_FREQ 9600000
@@ -510,12 +510,12 @@ static irqreturn_t qcom_swrm_wake_irq_handler(int irq, void *dev_id)
struct qcom_swrm_ctrl *swrm = dev_id;
int ret;
- ret = pm_runtime_get_sync(swrm->dev);
+ ret = pm_runtime_resume_and_get(swrm->dev);
if (ret < 0 && ret != -EACCES) {
dev_err_ratelimited(swrm->dev,
- "pm_runtime_get_sync failed in %s, ret %d\n",
+ "pm_runtime_resume_and_get failed in %s, ret %d\n",
__func__, ret);
- pm_runtime_put_noidle(swrm->dev);
+ return ret;
}
if (swrm->wake_irq > 0) {
@@ -1058,12 +1058,11 @@ static int qcom_swrm_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *codec_dai;
int ret, i;
- ret = pm_runtime_get_sync(ctrl->dev);
+ ret = pm_runtime_resume_and_get(ctrl->dev);
if (ret < 0 && ret != -EACCES) {
dev_err_ratelimited(ctrl->dev,
- "pm_runtime_get_sync failed in %s, ret %d\n",
+ "pm_runtime_resume_and_get failed in %s, ret %d\n",
__func__, ret);
- pm_runtime_put_noidle(ctrl->dev);
return ret;
}
@@ -1252,12 +1251,12 @@ static int swrm_reg_show(struct seq_file *s_file, void *data)
struct qcom_swrm_ctrl *swrm = s_file->private;
int reg, reg_val, ret;
- ret = pm_runtime_get_sync(swrm->dev);
+ ret = pm_runtime_resume_and_get(swrm->dev);
if (ret < 0 && ret != -EACCES) {
dev_err_ratelimited(swrm->dev,
- "pm_runtime_get_sync failed in %s, ret %d\n",
+ "pm_runtime_resume_and_get failed in %s, ret %d\n",
__func__, ret);
- pm_runtime_put_noidle(swrm->dev);
+ return ret;
}
for (reg = 0; reg <= SWR_MSTR_MAX_REG_ADDR; reg += 4) {
@@ -1452,7 +1451,7 @@ static bool swrm_wait_for_frame_gen_enabled(struct qcom_swrm_ctrl *swrm)
} while (retry--);
dev_err(swrm->dev, "%s: link status not %s\n", __func__,
- comp_sts && SWRM_FRM_GEN_ENABLED ? "connected" : "disconnected");
+ comp_sts & SWRM_FRM_GEN_ENABLED ? "connected" : "disconnected");
return false;
}
@@ -1549,6 +1548,7 @@ static const struct dev_pm_ops swrm_dev_pm_ops = {
static const struct of_device_id qcom_swrm_of_match[] = {
{ .compatible = "qcom,soundwire-v1.3.0", .data = &swrm_v1_3_data },
{ .compatible = "qcom,soundwire-v1.5.1", .data = &swrm_v1_5_data },
+ { .compatible = "qcom,soundwire-v1.6.0", .data = &swrm_v1_5_data },
{/* sentinel */},
};
diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c
index f273459b2023..d34150559142 100644
--- a/drivers/soundwire/stream.c
+++ b/drivers/soundwire/stream.c
@@ -822,6 +822,7 @@ static int do_bank_switch(struct sdw_stream_runtime *stream)
} else if (multi_link) {
dev_err(bus->dev,
"Post bank switch ops not implemented\n");
+ ret = -EINVAL;
goto error;
}
diff --git a/drivers/spi/spi-fsi.c b/drivers/spi/spi-fsi.c
index d403a7a3021d..72ab066ce552 100644
--- a/drivers/spi/spi-fsi.c
+++ b/drivers/spi/spi-fsi.c
@@ -319,12 +319,12 @@ static int fsi_spi_transfer_data(struct fsi_spi *ctx,
end = jiffies + msecs_to_jiffies(SPI_FSI_STATUS_TIMEOUT_MS);
do {
+ if (time_after(jiffies, end))
+ return -ETIMEDOUT;
+
rc = fsi_spi_status(ctx, &status, "TX");
if (rc)
return rc;
-
- if (time_after(jiffies, end))
- return -ETIMEDOUT;
} while (status & SPI_FSI_STATUS_TDR_FULL);
sent += nb;
@@ -337,12 +337,12 @@ static int fsi_spi_transfer_data(struct fsi_spi *ctx,
while (transfer->len > recv) {
end = jiffies + msecs_to_jiffies(SPI_FSI_STATUS_TIMEOUT_MS);
do {
+ if (time_after(jiffies, end))
+ return -ETIMEDOUT;
+
rc = fsi_spi_status(ctx, &status, "RX");
if (rc)
return rc;
-
- if (time_after(jiffies, end))
- return -ETIMEDOUT;
} while (!(status & SPI_FSI_STATUS_RDR_FULL));
rc = fsi_spi_read_reg(ctx, SPI_FSI_DATA_RX, &in);
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index fe252a8075a7..ea09d1b42bf6 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -71,29 +71,11 @@ static ssize_t driver_override_store(struct device *dev,
const char *buf, size_t count)
{
struct spi_device *spi = to_spi_device(dev);
- const char *end = memchr(buf, '\n', count);
- const size_t len = end ? end - buf : count;
- const char *driver_override, *old;
-
- /* We need to keep extra room for a newline when displaying value */
- if (len >= (PAGE_SIZE - 1))
- return -EINVAL;
-
- driver_override = kstrndup(buf, len, GFP_KERNEL);
- if (!driver_override)
- return -ENOMEM;
+ int ret;
- device_lock(dev);
- old = spi->driver_override;
- if (len) {
- spi->driver_override = driver_override;
- } else {
- /* Empty string, disable driver override */
- spi->driver_override = NULL;
- kfree(driver_override);
- }
- device_unlock(dev);
- kfree(old);
+ ret = driver_set_override(dev, &spi->driver_override, buf, count);
+ if (ret)
+ return ret;
return count;
}
@@ -1672,7 +1654,8 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
ret = ctlr->transfer_one_message(ctlr, msg);
if (ret) {
dev_err(&ctlr->dev,
- "failed to transfer one message from queue\n");
+ "failed to transfer one message from queue: %d\n",
+ ret);
goto out;
}
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index fc274737053d..0a993c47273e 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -64,8 +64,6 @@ source "drivers/staging/gdm724x/Kconfig"
source "drivers/staging/fwserial/Kconfig"
-source "drivers/staging/unisys/Kconfig"
-
source "drivers/staging/clocking-wizard/Kconfig"
source "drivers/staging/fbtft/Kconfig"
@@ -86,5 +84,6 @@ source "drivers/staging/fieldbus/Kconfig"
source "drivers/staging/qlge/Kconfig"
+source "drivers/staging/vme_user/Kconfig"
endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 65e317922e3f..2800ab9b2d1d 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -14,7 +14,7 @@ obj-$(CONFIG_OCTEON_ETHERNET) += octeon/
obj-$(CONFIG_OCTEON_USB) += octeon-usb/
obj-$(CONFIG_VT6655) += vt6655/
obj-$(CONFIG_VT6656) += vt6656/
-obj-$(CONFIG_VME_BUS) += vme/
+obj-$(CONFIG_VME_BUS) += vme_user/
obj-$(CONFIG_IIO) += iio/
obj-$(CONFIG_FB_SM750) += sm750fb/
obj-$(CONFIG_USB_EMXX) += emxx_udc/
@@ -22,7 +22,6 @@ obj-$(CONFIG_MFD_NVEC) += nvec/
obj-$(CONFIG_STAGING_BOARD) += board/
obj-$(CONFIG_LTE_GDM724X) += gdm724x/
obj-$(CONFIG_FIREWIRE_SERIAL) += fwserial/
-obj-$(CONFIG_UNISYSSPAR) += unisys/
obj-$(CONFIG_COMMON_CLK_XLNX_CLKWZRD) += clocking-wizard/
obj-$(CONFIG_FB_TFT) += fbtft/
obj-$(CONFIG_MOST) += most/
diff --git a/drivers/staging/fieldbus/anybuss/host.c b/drivers/staging/fieldbus/anybuss/host.c
index a344410e48fe..cd86b9c9e345 100644
--- a/drivers/staging/fieldbus/anybuss/host.c
+++ b/drivers/staging/fieldbus/anybuss/host.c
@@ -1384,7 +1384,7 @@ anybuss_host_common_probe(struct device *dev,
goto err_device;
return cd;
err_device:
- device_unregister(&cd->client->dev);
+ put_device(&cd->client->dev);
err_kthread:
kthread_stop(cd->qthread);
err_reset:
diff --git a/drivers/staging/greybus/arche-apb-ctrl.c b/drivers/staging/greybus/arche-apb-ctrl.c
index bbf3ba744fc4..45afa208d004 100644
--- a/drivers/staging/greybus/arche-apb-ctrl.c
+++ b/drivers/staging/greybus/arche-apb-ctrl.c
@@ -445,7 +445,7 @@ static int __maybe_unused arche_apb_ctrl_suspend(struct device *dev)
static int __maybe_unused arche_apb_ctrl_resume(struct device *dev)
{
/*
- * Atleast for ES2 we have to meet the delay requirement between
+ * At least for ES2 we have to meet the delay requirement between
* unipro switch and AP bridge init, depending on whether bridge is in
* OFF state or standby state.
*
diff --git a/drivers/staging/greybus/arche-platform.c b/drivers/staging/greybus/arche-platform.c
index e374dfc0c92f..fcbd5f71eff2 100644
--- a/drivers/staging/greybus/arche-platform.c
+++ b/drivers/staging/greybus/arche-platform.c
@@ -591,7 +591,7 @@ static __maybe_unused int arche_platform_suspend(struct device *dev)
static __maybe_unused int arche_platform_resume(struct device *dev)
{
/*
- * Atleast for ES2 we have to meet the delay requirement between
+ * At least for ES2 we have to meet the delay requirement between
* unipro switch and AP bridge init, depending on whether bridge is in
* OFF state or standby state.
*
diff --git a/drivers/staging/greybus/audio_codec.c b/drivers/staging/greybus/audio_codec.c
index db0b600ee5d1..0ad8aeabccbf 100644
--- a/drivers/staging/greybus/audio_codec.c
+++ b/drivers/staging/greybus/audio_codec.c
@@ -497,7 +497,7 @@ static int gbcodec_prepare(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
int ret;
- struct gbaudio_module_info *module;
+ struct gbaudio_module_info *module = NULL, *iter;
struct gbaudio_data_connection *data;
struct gb_bundle *bundle;
struct gbaudio_codec_info *codec = dev_get_drvdata(dai->dev);
@@ -511,11 +511,13 @@ static int gbcodec_prepare(struct snd_pcm_substream *substream,
return -ENODEV;
}
- list_for_each_entry(module, &codec->module_list, list) {
+ list_for_each_entry(iter, &codec->module_list, list) {
/* find the dai */
- data = find_data(module, dai->id);
- if (data)
+ data = find_data(iter, dai->id);
+ if (data) {
+ module = iter;
break;
+ }
}
if (!data) {
dev_err(dai->dev, "DATA connection missing\n");
@@ -563,7 +565,7 @@ static int gbcodec_mute_stream(struct snd_soc_dai *dai, int mute, int stream)
{
int ret;
struct gbaudio_data_connection *data;
- struct gbaudio_module_info *module;
+ struct gbaudio_module_info *module = NULL, *iter;
struct gb_bundle *bundle;
struct gbaudio_codec_info *codec = dev_get_drvdata(dai->dev);
struct gbaudio_stream_params *params;
@@ -592,15 +594,17 @@ static int gbcodec_mute_stream(struct snd_soc_dai *dai, int mute, int stream)
return ret;
}
- list_for_each_entry(module, &codec->module_list, list) {
+ list_for_each_entry(iter, &codec->module_list, list) {
/* find the dai */
- data = find_data(module, dai->id);
- if (data)
+ data = find_data(iter, dai->id);
+ if (data) {
+ module = iter;
break;
+ }
}
if (!data) {
- dev_err(dai->dev, "%s:%s DATA connection missing\n",
- dai->name, module->name);
+ dev_err(dai->dev, "%s DATA connection missing\n",
+ dai->name);
mutex_unlock(&codec->lock);
return -ENODEV;
}
@@ -1027,12 +1031,6 @@ static int gbcodec_probe(struct snd_soc_component *comp)
return 0;
}
-static void gbcodec_remove(struct snd_soc_component *comp)
-{
- /* Empty function for now */
- return;
-}
-
static int gbcodec_write(struct snd_soc_component *comp, unsigned int reg,
unsigned int value)
{
@@ -1047,8 +1045,6 @@ static unsigned int gbcodec_read(struct snd_soc_component *comp,
static const struct snd_soc_component_driver soc_codec_dev_gbaudio = {
.probe = gbcodec_probe,
- .remove = gbcodec_remove,
-
.read = gbcodec_read,
.write = gbcodec_write,
};
diff --git a/drivers/staging/greybus/pwm.c b/drivers/staging/greybus/pwm.c
index ad20ec24031e..3fda172239d2 100644
--- a/drivers/staging/greybus/pwm.c
+++ b/drivers/staging/greybus/pwm.c
@@ -297,7 +297,6 @@ static int gb_pwm_probe(struct gbphy_device *gbphy_dev,
pwm->dev = &gbphy_dev->dev;
pwm->ops = &gb_pwm_ops;
- pwm->base = -1; /* Allocate base dynamically */
pwm->npwm = pwmc->pwm_max + 1;
ret = pwmchip_add(pwm);
diff --git a/drivers/staging/greybus/tools/loopback_test.c b/drivers/staging/greybus/tools/loopback_test.c
index 867bf289df2e..4c42e393cd3d 100644
--- a/drivers/staging/greybus/tools/loopback_test.c
+++ b/drivers/staging/greybus/tools/loopback_test.c
@@ -533,7 +533,7 @@ static int log_results(struct loopback_test *t)
fd = open(file_name, O_WRONLY | O_CREAT | O_APPEND, 0644);
if (fd < 0) {
- fprintf(stderr, "unable to open %s for appendation\n", file_name);
+ fprintf(stderr, "unable to open %s for appending\n", file_name);
abort();
}
diff --git a/drivers/staging/iio/cdc/ad7746.c b/drivers/staging/iio/cdc/ad7746.c
index 71c709771676..52b8957c19c9 100644
--- a/drivers/staging/iio/cdc/ad7746.c
+++ b/drivers/staging/iio/cdc/ad7746.c
@@ -290,7 +290,7 @@ static inline ssize_t ad7746_start_calib(struct device *dev,
int ret, timeout = 10;
bool doit;
- ret = strtobool(buf, &doit);
+ ret = kstrtobool(buf, &doit);
if (ret < 0)
return ret;
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
index 793918e1c45f..f177b20f0f2d 100644
--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
+++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
@@ -749,7 +749,6 @@ static int ad5933_probe(struct i2c_client *client,
indio_dev->num_channels = ARRAY_SIZE(ad5933_channels);
ret = devm_iio_kfifo_buffer_setup(&client->dev, indio_dev,
- INDIO_BUFFER_SOFTWARE,
&ad5933_ring_setup_ops);
if (ret)
return ret;
diff --git a/drivers/staging/iio/resolver/ad2s1210.c b/drivers/staging/iio/resolver/ad2s1210.c
index 74adb82f37c3..c0b2716d0511 100644
--- a/drivers/staging/iio/resolver/ad2s1210.c
+++ b/drivers/staging/iio/resolver/ad2s1210.c
@@ -499,7 +499,6 @@ static int ad2s1210_read_raw(struct iio_dev *indio_dev,
ret = IIO_VAL_INT;
break;
case IIO_ANGL_VEL:
- negative = st->rx[0] & 0x80;
vel = be16_to_cpup((__be16 *)st->rx);
vel >>= 16 - st->resolution;
if (vel & 0x8000) {
diff --git a/drivers/staging/ks7010/ks_hostif.c b/drivers/staging/ks7010/ks_hostif.c
index 1c63d595313d..9429ee155910 100644
--- a/drivers/staging/ks7010/ks_hostif.c
+++ b/drivers/staging/ks7010/ks_hostif.c
@@ -84,10 +84,6 @@ static void ks_wlan_hw_wakeup_task(struct work_struct *work)
return;
}
}
-
- /* power save */
- if (atomic_read(&priv->sme_task.count) > 0)
- tasklet_enable(&priv->sme_task);
}
static void ks_wlan_do_power_save(struct ks_wlan_private *priv)
@@ -2200,10 +2196,11 @@ static void hostif_sme_execute(struct ks_wlan_private *priv, int event)
}
}
-static
-void hostif_sme_task(struct tasklet_struct *t)
+static void hostif_sme_work(struct work_struct *work)
{
- struct ks_wlan_private *priv = from_tasklet(priv, t, sme_task);
+ struct ks_wlan_private *priv;
+
+ priv = container_of(work, struct ks_wlan_private, sme_work);
if (priv->dev_state < DEVICE_STATE_BOOT)
return;
@@ -2214,7 +2211,7 @@ void hostif_sme_task(struct tasklet_struct *t)
hostif_sme_execute(priv, priv->sme_i.event_buff[priv->sme_i.qhead]);
inc_smeqhead(priv);
if (cnt_smeqbody(priv) > 0)
- tasklet_schedule(&priv->sme_task);
+ schedule_work(&priv->sme_work);
}
/* send to Station Management Entity module */
@@ -2229,7 +2226,7 @@ void hostif_sme_enqueue(struct ks_wlan_private *priv, u16 event)
netdev_err(priv->net_dev, "sme queue buffer overflow\n");
}
- tasklet_schedule(&priv->sme_task);
+ schedule_work(&priv->sme_work);
}
static inline void hostif_aplist_init(struct ks_wlan_private *priv)
@@ -2254,7 +2251,7 @@ static inline void hostif_sme_init(struct ks_wlan_private *priv)
priv->sme_i.qtail = 0;
spin_lock_init(&priv->sme_i.sme_spin);
priv->sme_i.sme_flag = 0;
- tasklet_setup(&priv->sme_task, hostif_sme_task);
+ INIT_WORK(&priv->sme_work, hostif_sme_work);
}
static inline void hostif_wpa_init(struct ks_wlan_private *priv)
@@ -2312,5 +2309,5 @@ int hostif_init(struct ks_wlan_private *priv)
void hostif_exit(struct ks_wlan_private *priv)
{
- tasklet_kill(&priv->sme_task);
+ cancel_work_sync(&priv->sme_work);
}
diff --git a/drivers/staging/ks7010/ks_wlan.h b/drivers/staging/ks7010/ks_wlan.h
index 7aaf8d780939..3e9a91b5131c 100644
--- a/drivers/staging/ks7010/ks_wlan.h
+++ b/drivers/staging/ks7010/ks_wlan.h
@@ -449,7 +449,7 @@ struct ks_wlan_private {
struct sme_info sme_i;
u8 *rxp;
unsigned int rx_size;
- struct tasklet_struct sme_task;
+ struct work_struct sme_work;
struct work_struct wakeup_work;
int scan_ind_count;
diff --git a/drivers/staging/most/dim2/dim2.c b/drivers/staging/most/dim2/dim2.c
index 29f8ce2a47f5..97dff82b7a5f 100644
--- a/drivers/staging/most/dim2/dim2.c
+++ b/drivers/staging/most/dim2/dim2.c
@@ -45,9 +45,6 @@ MODULE_PARM_DESC(fcnt, "Num of frames per sub-buffer for sync channels as a powe
static DEFINE_SPINLOCK(dim_lock);
-static void dim2_tasklet_fn(unsigned long data);
-static DECLARE_TASKLET_OLD(dim2_tasklet, dim2_tasklet_fn);
-
/**
* struct hdm_channel - private structure to keep channel specific data
* @name: channel name
@@ -361,15 +358,9 @@ static irqreturn_t dim2_mlb_isr(int irq, void *_dev)
return IRQ_HANDLED;
}
-/**
- * dim2_tasklet_fn - tasklet function
- * @data: private data
- *
- * Service each initialized channel, if needed
- */
-static void dim2_tasklet_fn(unsigned long data)
+static irqreturn_t dim2_task_irq(int irq, void *_dev)
{
- struct dim2_hdm *dev = (struct dim2_hdm *)data;
+ struct dim2_hdm *dev = _dev;
unsigned long flags;
int ch_idx;
@@ -385,6 +376,8 @@ static void dim2_tasklet_fn(unsigned long data)
while (!try_start_dim_transfer(dev->hch + ch_idx))
continue;
}
+
+ return IRQ_HANDLED;
}
/**
@@ -392,8 +385,8 @@ static void dim2_tasklet_fn(unsigned long data)
* @irq: irq number
* @_dev: private data
*
- * Acknowledge the interrupt and schedule a tasklet to service channels.
- * Return IRQ_HANDLED.
+ * Acknowledge the interrupt and service each initialized channel,
+ * if needed, in task context.
*/
static irqreturn_t dim2_ahb_isr(int irq, void *_dev)
{
@@ -405,9 +398,7 @@ static irqreturn_t dim2_ahb_isr(int irq, void *_dev)
dim_service_ahb_int_irq(get_active_channels(dev, buffer));
spin_unlock_irqrestore(&dim_lock, flags);
- dim2_tasklet.data = (unsigned long)dev;
- tasklet_schedule(&dim2_tasklet);
- return IRQ_HANDLED;
+ return IRQ_WAKE_THREAD;
}
/**
@@ -654,14 +645,12 @@ static int poison_channel(struct most_interface *most_iface, int ch_idx)
if (!hdm_ch->is_initialized)
return -EPERM;
- tasklet_disable(&dim2_tasklet);
spin_lock_irqsave(&dim_lock, flags);
hal_ret = dim_destroy_channel(&hdm_ch->ch);
hdm_ch->is_initialized = false;
if (ch_idx == dev->atx_idx)
dev->atx_idx = -1;
spin_unlock_irqrestore(&dim_lock, flags);
- tasklet_enable(&dim2_tasklet);
if (hal_ret != DIM_NO_ERROR) {
pr_err("HAL Failed to close channel %s\n", hdm_ch->name);
ret = -EFAULT;
@@ -821,8 +810,8 @@ static int dim2_probe(struct platform_device *pdev)
goto err_shutdown_dim;
}
- ret = devm_request_irq(&pdev->dev, irq, dim2_ahb_isr, 0,
- "dim2_ahb0_int", dev);
+ ret = devm_request_threaded_irq(&pdev->dev, irq, dim2_ahb_isr,
+ dim2_task_irq, 0, "dim2_ahb0_int", dev);
if (ret) {
dev_err(&pdev->dev, "failed to request ahb0_int irq %d\n", irq);
goto err_shutdown_dim;
diff --git a/drivers/staging/qlge/qlge.h b/drivers/staging/qlge/qlge.h
index 55e0ad759250..d0dd659834ee 100644
--- a/drivers/staging/qlge/qlge.h
+++ b/drivers/staging/qlge/qlge.h
@@ -2072,6 +2072,7 @@ struct qlge_adapter *netdev_to_qdev(struct net_device *ndev)
return ndev_priv->qdev;
}
+
/*
* The main Adapter structure definition.
* This structure has all fields relevant to the hardware.
diff --git a/drivers/staging/r8188eu/core/rtw_ap.c b/drivers/staging/r8188eu/core/rtw_ap.c
index 2ff78ed1faab..ac6effbecf6d 100644
--- a/drivers/staging/r8188eu/core/rtw_ap.c
+++ b/drivers/staging/r8188eu/core/rtw_ap.c
@@ -188,7 +188,6 @@ void expire_timeout_chk(struct adapter *padapter)
spin_lock_bh(&pstapriv->auth_list_lock);
}
}
-
}
spin_unlock_bh(&pstapriv->auth_list_lock);
@@ -381,7 +380,6 @@ void add_RATid(struct adapter *padapter, struct sta_info *psta, u8 rssi_level)
/* set ra_id, init_rate */
psta->raid = raid;
psta->init_rate = init_rate;
-
}
}
@@ -455,7 +453,6 @@ void update_bmc_sta(struct adapter *padapter)
spin_lock_bh(&psta->lock);
psta->state = _FW_LINKED;
spin_unlock_bh(&psta->lock);
-
}
}
diff --git a/drivers/staging/r8188eu/core/rtw_br_ext.c b/drivers/staging/r8188eu/core/rtw_br_ext.c
index f056204c0fdb..bca20fe5c983 100644
--- a/drivers/staging/r8188eu/core/rtw_br_ext.c
+++ b/drivers/staging/r8188eu/core/rtw_br_ext.c
@@ -53,7 +53,8 @@ static unsigned char *__nat25_find_pppoe_tag(struct pppoe_hdr *ph, unsigned shor
unsigned char *cur_ptr, *start_ptr;
unsigned short tagLen, tagType;
- start_ptr = cur_ptr = (unsigned char *)ph->tag;
+ start_ptr = (unsigned char *)ph->tag;
+ cur_ptr = (unsigned char *)ph->tag;
while ((cur_ptr - start_ptr) < ntohs(ph->length)) {
/* prevent un-alignment access */
tagType = (unsigned short)((cur_ptr[0] << 8) + cur_ptr[1]);
@@ -87,19 +88,19 @@ static int skb_pull_and_merge(struct sk_buff *skb, unsigned char *src, int len)
int tail_len;
unsigned long end, tail;
- if ((src+len) > skb_tail_pointer(skb) || skb->len < len)
+ if ((src + len) > skb_tail_pointer(skb) || skb->len < len)
return -1;
tail = (unsigned long)skb_tail_pointer(skb);
- end = (unsigned long)src+len;
+ end = (unsigned long)src + len;
if (tail < end)
return -1;
- tail_len = (int)(tail-end);
+ tail_len = (int)(tail - end);
if (tail_len > 0)
- memmove(src, src+len, tail_len);
+ memmove(src, src + len, tail_len);
- skb_trim(skb, skb->len-len);
+ skb_trim(skb, skb->len - len);
return 0;
}
@@ -117,7 +118,7 @@ static void __nat25_generate_ipv4_network_addr(unsigned char *networkAddr,
memset(networkAddr, 0, MAX_NETWORK_ADDR_LEN);
networkAddr[0] = NAT25_IPV4;
- memcpy(networkAddr+7, (unsigned char *)ipAddr, 4);
+ memcpy(networkAddr + 7, (unsigned char *)ipAddr, 4);
}
static void __nat25_generate_pppoe_network_addr(unsigned char *networkAddr,
@@ -126,8 +127,8 @@ static void __nat25_generate_pppoe_network_addr(unsigned char *networkAddr,
memset(networkAddr, 0, MAX_NETWORK_ADDR_LEN);
networkAddr[0] = NAT25_PPPOE;
- memcpy(networkAddr+1, (unsigned char *)sid, 2);
- memcpy(networkAddr+3, (unsigned char *)ac_mac, 6);
+ memcpy(networkAddr + 1, (unsigned char *)sid, 2);
+ memcpy(networkAddr + 3, (unsigned char *)ac_mac, 6);
}
static void __nat25_generate_ipv6_network_addr(unsigned char *networkAddr,
@@ -136,17 +137,17 @@ static void __nat25_generate_ipv6_network_addr(unsigned char *networkAddr,
memset(networkAddr, 0, MAX_NETWORK_ADDR_LEN);
networkAddr[0] = NAT25_IPV6;
- memcpy(networkAddr+1, (unsigned char *)ipAddr, 16);
+ memcpy(networkAddr + 1, (unsigned char *)ipAddr, 16);
}
static unsigned char *scan_tlv(unsigned char *data, int len, unsigned char tag, unsigned char len8b)
{
while (len > 0) {
- if (*data == tag && *(data+1) == len8b && len >= len8b*8)
- return data+2;
+ if (*data == tag && *(data + 1) == len8b && len >= len8b * 8)
+ return data + 2;
- len -= (*(data+1))*8;
- data += (*(data+1))*8;
+ len -= (*(data + 1)) * 8;
+ data += (*(data + 1)) * 8;
}
return NULL;
}
@@ -158,7 +159,7 @@ static int update_nd_link_layer_addr(unsigned char *data, int len, unsigned char
if (icmphdr->icmp6_type == NDISC_ROUTER_SOLICITATION) {
if (len >= 8) {
- mac = scan_tlv(&data[8], len-8, 1, 1);
+ mac = scan_tlv(&data[8], len - 8, 1, 1);
if (mac) {
memcpy(mac, replace_mac, 6);
return 1;
@@ -166,7 +167,7 @@ static int update_nd_link_layer_addr(unsigned char *data, int len, unsigned char
}
} else if (icmphdr->icmp6_type == NDISC_ROUTER_ADVERTISEMENT) {
if (len >= 16) {
- mac = scan_tlv(&data[16], len-16, 1, 1);
+ mac = scan_tlv(&data[16], len - 16, 1, 1);
if (mac) {
memcpy(mac, replace_mac, 6);
return 1;
@@ -174,7 +175,7 @@ static int update_nd_link_layer_addr(unsigned char *data, int len, unsigned char
}
} else if (icmphdr->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) {
if (len >= 24) {
- mac = scan_tlv(&data[24], len-24, 1, 1);
+ mac = scan_tlv(&data[24], len - 24, 1, 1);
if (mac) {
memcpy(mac, replace_mac, 6);
return 1;
@@ -182,7 +183,7 @@ static int update_nd_link_layer_addr(unsigned char *data, int len, unsigned char
}
} else if (icmphdr->icmp6_type == NDISC_NEIGHBOUR_ADVERTISEMENT) {
if (len >= 24) {
- mac = scan_tlv(&data[24], len-24, 2, 1);
+ mac = scan_tlv(&data[24], len - 24, 2, 1);
if (mac) {
memcpy(mac, replace_mac, 6);
return 1;
@@ -190,7 +191,7 @@ static int update_nd_link_layer_addr(unsigned char *data, int len, unsigned char
}
} else if (icmphdr->icmp6_type == NDISC_REDIRECT) {
if (len >= 40) {
- mac = scan_tlv(&data[40], len-40, 2, 1);
+ mac = scan_tlv(&data[40], len - 40, 2, 1);
if (mac) {
memcpy(mac, replace_mac, 6);
return 1;
@@ -313,6 +314,7 @@ void nat25_db_cleanup(struct adapter *priv)
for (i = 0; i < NAT25_HASH_SIZE; i++) {
struct nat25_network_db_entry *f;
+
f = priv->nethash[i];
while (f) {
struct nat25_network_db_entry *g;
@@ -339,12 +341,12 @@ void nat25_db_expire(struct adapter *priv)
for (i = 0; i < NAT25_HASH_SIZE; i++) {
struct nat25_network_db_entry *f;
- f = priv->nethash[i];
+ f = priv->nethash[i];
while (f) {
struct nat25_network_db_entry *g;
- g = f->next_hash;
+ g = f->next_hash;
if (__nat25_has_expired(f)) {
if (atomic_dec_and_test(&f->use_count)) {
if (priv->scdb_entry == f) {
@@ -396,7 +398,7 @@ int nat25_db_handle(struct adapter *priv, struct sk_buff *skb, int method)
tmp = be32_to_cpu(iph->saddr);
__nat25_generate_ipv4_network_addr(networkAddr, &tmp);
/* record source IP address and , source mac address into db */
- __nat25_db_network_insert(priv, skb->data+ETH_ALEN, networkAddr);
+ __nat25_db_network_insert(priv, skb->data + ETH_ALEN, networkAddr);
return 0;
default:
return -1;
@@ -421,7 +423,7 @@ int nat25_db_handle(struct adapter *priv, struct sk_buff *skb, int method)
arp_ptr += arp->ar_hln;
sender = (unsigned int *)arp_ptr;
__nat25_generate_ipv4_network_addr(networkAddr, sender);
- __nat25_db_network_insert(priv, skb->data+ETH_ALEN, networkAddr);
+ __nat25_db_network_insert(priv, skb->data + ETH_ALEN, networkAddr);
return 0;
default:
return -1;
@@ -432,7 +434,7 @@ int nat25_db_handle(struct adapter *priv, struct sk_buff *skb, int method)
/* Handle PPPoE frame */
/*---------------------------------------------------*/
struct pppoe_hdr *ph = (struct pppoe_hdr *)(skb->data + ETH_HLEN);
- unsigned short *pMagic;
+ __be16 *pMagic;
switch (method) {
case NAT25_CHECK:
@@ -458,22 +460,22 @@ int nat25_db_handle(struct adapter *priv, struct sk_buff *skb, int method)
sizeof(tag_buf))
return -1;
- memcpy(tag->tag_data+MAGIC_CODE_LEN+RTL_RELAY_TAG_LEN,
+ memcpy(tag->tag_data + MAGIC_CODE_LEN + RTL_RELAY_TAG_LEN,
pOldTag->tag_data, old_tag_len);
- if (skb_pull_and_merge(skb, (unsigned char *)pOldTag, TAG_HDR_LEN+old_tag_len) < 0)
+ if (skb_pull_and_merge(skb, (unsigned char *)pOldTag, TAG_HDR_LEN + old_tag_len) < 0)
return -1;
- ph->length = htons(ntohs(ph->length)-TAG_HDR_LEN-old_tag_len);
+ ph->length = htons(ntohs(ph->length) - TAG_HDR_LEN - old_tag_len);
}
tag->tag_type = PTT_RELAY_SID;
- tag->tag_len = htons(MAGIC_CODE_LEN+RTL_RELAY_TAG_LEN+old_tag_len);
+ tag->tag_len = htons(MAGIC_CODE_LEN + RTL_RELAY_TAG_LEN + old_tag_len);
/* insert the magic_code+client mac in relay tag */
- pMagic = (unsigned short *)tag->tag_data;
+ pMagic = (__be16 *)tag->tag_data;
*pMagic = htons(MAGIC_CODE);
- memcpy(tag->tag_data+MAGIC_CODE_LEN, skb->data+ETH_ALEN, ETH_ALEN);
+ memcpy(tag->tag_data + MAGIC_CODE_LEN, skb->data + ETH_ALEN, ETH_ALEN);
/* Add relay tag */
if (__nat25_add_pppoe_tag(skb, tag) < 0)
@@ -486,7 +488,7 @@ int nat25_db_handle(struct adapter *priv, struct sk_buff *skb, int method)
return -2;
if (priv->pppoe_connection_in_progress == 0)
- memcpy(priv->pppoe_addr, skb->data+ETH_ALEN, ETH_ALEN);
+ memcpy(priv->pppoe_addr, skb->data + ETH_ALEN, ETH_ALEN);
priv->pppoe_connection_in_progress = WAIT_TIME_PPPOE;
}
@@ -496,11 +498,11 @@ int nat25_db_handle(struct adapter *priv, struct sk_buff *skb, int method)
} else { /* session phase */
__nat25_generate_pppoe_network_addr(networkAddr, skb->data, &ph->sid);
- __nat25_db_network_insert(priv, skb->data+ETH_ALEN, networkAddr);
+ __nat25_db_network_insert(priv, skb->data + ETH_ALEN, networkAddr);
if (!priv->ethBrExtInfo.addPPPoETag &&
priv->pppoe_connection_in_progress &&
- !memcmp(skb->data+ETH_ALEN, priv->pppoe_addr, ETH_ALEN))
+ !memcmp(skb->data + ETH_ALEN, priv->pppoe_addr, ETH_ALEN))
priv->pppoe_connection_in_progress = 0;
}
return 0;
@@ -548,7 +550,7 @@ int nat25_db_handle(struct adapter *priv, struct sk_buff *skb, int method)
case NAT25_INSERT:
if (memcmp(&iph->saddr, "\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0", 16)) {
__nat25_generate_ipv6_network_addr(networkAddr, (unsigned int *)&iph->saddr);
- __nat25_db_network_insert(priv, skb->data+ETH_ALEN, networkAddr);
+ __nat25_db_network_insert(priv, skb->data + ETH_ALEN, networkAddr);
if (iph->nexthdr == IPPROTO_ICMPV6 &&
skb->len > (ETH_HLEN + sizeof(*iph) + 4)) {
@@ -557,9 +559,11 @@ int nat25_db_handle(struct adapter *priv, struct sk_buff *skb, int method)
struct icmp6hdr *hdr = (struct icmp6hdr *)(skb->data + ETH_HLEN + sizeof(*iph));
hdr->icmp6_cksum = 0;
hdr->icmp6_cksum = csum_ipv6_magic(&iph->saddr, &iph->daddr,
- iph->payload_len,
+ be16_to_cpu(iph->payload_len),
IPPROTO_ICMPV6,
- csum_partial((__u8 *)hdr, iph->payload_len, 0));
+ csum_partial((__u8 *)hdr,
+ be16_to_cpu(iph->payload_len),
+ 0));
}
}
}
diff --git a/drivers/staging/r8188eu/core/rtw_cmd.c b/drivers/staging/r8188eu/core/rtw_cmd.c
index 6eca30124ee8..06523d91939a 100644
--- a/drivers/staging/r8188eu/core/rtw_cmd.c
+++ b/drivers/staging/r8188eu/core/rtw_cmd.c
@@ -11,14 +11,54 @@
#include "../include/rtw_mlme_ext.h"
#include "../include/rtl8188e_dm.h"
-/*
-Caller and the rtw_cmd_thread can protect cmd_q by spin_lock.
-No irqsave is necessary.
-*/
+/* Caller and the rtw_cmd_thread can protect cmd_q by spin_lock.
+ * No irqsave is necessary.
+ */
-static int _rtw_init_cmd_priv(struct cmd_priv *pcmdpriv)
+static void c2h_wk_callback(struct work_struct *work);
+
+void rtw_free_evt_priv(struct evt_priv *pevtpriv)
{
- int res = _SUCCESS;
+ cancel_work_sync(&pevtpriv->c2h_wk);
+ while (pevtpriv->c2h_wk_alive)
+ msleep(10);
+
+ while (!rtw_cbuf_empty(pevtpriv->c2h_queue)) {
+ void *c2h = rtw_cbuf_pop(pevtpriv->c2h_queue);
+ if (c2h && c2h != (void *)pevtpriv)
+ kfree(c2h);
+ }
+}
+
+/* Calling Context:
+ *
+ * rtw_enqueue_cmd can only be called between kernel thread,
+ * since only spin_lock is used.
+ *
+ * ISR/Call-Back functions can't call this sub-function.
+ */
+
+static int _rtw_enqueue_cmd(struct __queue *queue, struct cmd_obj *obj)
+{
+ unsigned long flags;
+
+ if (!obj)
+ goto exit;
+
+ spin_lock_irqsave(&queue->lock, flags);
+
+ list_add_tail(&obj->list, &queue->queue);
+
+ spin_unlock_irqrestore(&queue->lock, flags);
+
+exit:
+
+ return _SUCCESS;
+}
+
+u32 rtw_init_cmd_priv(struct cmd_priv *pcmdpriv)
+{
+ u32 res = _SUCCESS;
init_completion(&pcmdpriv->enqueue_cmd);
/* sema_init(&(pcmdpriv->cmd_done_sema), 0); */
@@ -57,11 +97,9 @@ exit:
return res;
}
-static void c2h_wk_callback(struct work_struct *work);
-
-static int _rtw_init_evt_priv(struct evt_priv *pevtpriv)
+u32 rtw_init_evt_priv(struct evt_priv *pevtpriv)
{
- int res = _SUCCESS;
+ u32 res = _SUCCESS;
/* allocate DMA-able/Non-Page memory for cmd_buf and rsp_buf */
atomic_set(&pevtpriv->event_seq, 0);
@@ -69,24 +107,13 @@ static int _rtw_init_evt_priv(struct evt_priv *pevtpriv)
INIT_WORK(&pevtpriv->c2h_wk, c2h_wk_callback);
pevtpriv->c2h_wk_alive = false;
pevtpriv->c2h_queue = rtw_cbuf_alloc(C2H_QUEUE_MAX_LEN + 1);
+ if (!pevtpriv->c2h_queue)
+ res = _FAIL;
return res;
}
-void rtw_free_evt_priv(struct evt_priv *pevtpriv)
-{
- cancel_work_sync(&pevtpriv->c2h_wk);
- while (pevtpriv->c2h_wk_alive)
- msleep(10);
-
- while (!rtw_cbuf_empty(pevtpriv->c2h_queue)) {
- void *c2h = rtw_cbuf_pop(pevtpriv->c2h_queue);
- if (c2h && c2h != (void *)pevtpriv)
- kfree(c2h);
- }
-}
-
-static void _rtw_free_cmd_priv(struct cmd_priv *pcmdpriv)
+void rtw_free_cmd_priv(struct cmd_priv *pcmdpriv)
{
if (pcmdpriv) {
kfree(pcmdpriv->cmd_allocated_buf);
@@ -94,75 +121,6 @@ static void _rtw_free_cmd_priv(struct cmd_priv *pcmdpriv)
}
}
-/*
-Calling Context:
-
-rtw_enqueue_cmd can only be called between kernel thread,
-since only spin_lock is used.
-
-ISR/Call-Back functions can't call this sub-function.
-
-*/
-
-static int _rtw_enqueue_cmd(struct __queue *queue, struct cmd_obj *obj)
-{
- unsigned long flags;
-
- if (!obj)
- goto exit;
-
- spin_lock_irqsave(&queue->lock, flags);
-
- list_add_tail(&obj->list, &queue->queue);
-
- spin_unlock_irqrestore(&queue->lock, flags);
-
-exit:
-
- return _SUCCESS;
-}
-
-static struct cmd_obj *_rtw_dequeue_cmd(struct __queue *queue)
-{
- struct cmd_obj *obj;
- unsigned long flags;
-
- spin_lock_irqsave(&queue->lock, flags);
- if (list_empty(&queue->queue)) {
- obj = NULL;
- } else {
- obj = container_of((&queue->queue)->next, struct cmd_obj, list);
- list_del_init(&obj->list);
- }
-
- spin_unlock_irqrestore(&queue->lock, flags);
-
- return obj;
-}
-
-u32 rtw_init_cmd_priv(struct cmd_priv *pcmdpriv)
-{
- u32 res;
-
- res = _rtw_init_cmd_priv(pcmdpriv);
-
- return res;
-}
-
-u32 rtw_init_evt_priv(struct evt_priv *pevtpriv)
-{
- int res;
-
- res = _rtw_init_evt_priv(pevtpriv);
-
- return res;
-}
-
-void rtw_free_cmd_priv(struct cmd_priv *pcmdpriv)
-{
- _rtw_free_cmd_priv(pcmdpriv);
-}
-
static int rtw_cmd_filter(struct cmd_priv *pcmdpriv, struct cmd_obj *cmd_obj)
{
u8 bAllow = false; /* set to true to allow enqueuing cmd when hw_init_completed is false */
@@ -187,7 +145,7 @@ u32 rtw_enqueue_cmd(struct cmd_priv *pcmdpriv, struct cmd_obj *cmd_obj)
cmd_obj->padapter = padapter;
res = rtw_cmd_filter(pcmdpriv, cmd_obj);
- if (_FAIL == res) {
+ if (res == _FAIL) {
rtw_free_cmd_obj(cmd_obj);
goto exit;
}
@@ -204,11 +162,21 @@ exit:
struct cmd_obj *rtw_dequeue_cmd(struct cmd_priv *pcmdpriv)
{
- struct cmd_obj *cmd_obj;
+ struct cmd_obj *obj;
+ struct __queue *queue = &pcmdpriv->cmd_queue;
+ unsigned long flags;
+
+ spin_lock_irqsave(&queue->lock, flags);
+ if (list_empty(&queue->queue)) {
+ obj = NULL;
+ } else {
+ obj = container_of((&queue->queue)->next, struct cmd_obj, list);
+ list_del_init(&obj->list);
+ }
- cmd_obj = _rtw_dequeue_cmd(&pcmdpriv->cmd_queue);
+ spin_unlock_irqrestore(&queue->lock, flags);
- return cmd_obj;
+ return obj;
}
void rtw_free_cmd_obj(struct cmd_obj *pcmd)
@@ -258,12 +226,12 @@ _next:
if (!pcmd)
continue;
- if (_FAIL == rtw_cmd_filter(pcmdpriv, pcmd)) {
+ if (rtw_cmd_filter(pcmdpriv, pcmd) == _FAIL) {
pcmd->res = H2C_DROPPED;
goto post_process;
}
- pcmd->cmdsz = _RND4((pcmd->cmdsz));/* _RND4 */
+ pcmd->cmdsz = round_up(pcmd->cmdsz, 4);
memcpy(pcmdbuf, pcmd->parmbuf, pcmd->cmdsz);
@@ -291,7 +259,7 @@ post_process:
rtw_free_cmd_obj(pcmd);
else
/* todo: !!! fill rsp_buf to pcmd->rsp if (pcmd->rsp!= NULL) */
- pcmd_callback(pcmd->padapter, pcmd);/* need conider that free cmd_obj in rtw_cmd_callback */
+ pcmd_callback(pcmd->padapter, pcmd);/* need consider that free cmd_obj in rtw_cmd_callback */
} else {
rtw_free_cmd_obj(pcmd);
}
@@ -316,11 +284,10 @@ post_process:
return 0;
}
-/*
-rtw_sitesurvey_cmd(~)
- ### NOTE:#### (!!!!)
- MUST TAKE CARE THAT BEFORE CALLING THIS FUNC, YOU SHOULD HAVE LOCKED pmlmepriv->lock
-*/
+/* rtw_sitesurvey_cmd(~)
+ * ### NOTE:#### (!!!!)
+ * MUST TAKE CARE THAT BEFORE CALLING THIS FUNC, YOU SHOULD HAVE LOCKED pmlmepriv->lock
+ */
u8 rtw_sitesurvey_cmd(struct adapter *padapter, struct ndis_802_11_ssid *ssid, int ssid_num,
struct rtw_ieee80211_channel *ch, int ch_num)
{
@@ -330,19 +297,17 @@ u8 rtw_sitesurvey_cmd(struct adapter *padapter, struct ndis_802_11_ssid *ssid,
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- if (check_fwstate(pmlmepriv, _FW_LINKED)) {
+ if (check_fwstate(pmlmepriv, _FW_LINKED))
rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_SCAN, 1);
- }
- if (check_fwstate(pmlmepriv, _FW_LINKED)) {
+ if (check_fwstate(pmlmepriv, _FW_LINKED))
p2p_ps_wk_cmd(padapter, P2P_PS_SCAN, 1);
- }
- ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
+ ph2c = kzalloc(sizeof(*ph2c), GFP_ATOMIC);
if (!ph2c)
return _FAIL;
- psurveyPara = kzalloc(sizeof(struct sitesurvey_parm), GFP_ATOMIC);
+ psurveyPara = kzalloc(sizeof(*psurveyPara), GFP_ATOMIC);
if (!psurveyPara) {
kfree(ph2c);
return _FAIL;
@@ -403,13 +368,13 @@ u8 rtw_setdatarate_cmd(struct adapter *padapter, u8 *rateset)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
- ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
+ ph2c = kzalloc(sizeof(*ph2c), GFP_ATOMIC);
if (!ph2c) {
res = _FAIL;
goto exit;
}
- pbsetdataratepara = kzalloc(sizeof(struct setdatarate_parm), GFP_ATOMIC);
+ pbsetdataratepara = kzalloc(sizeof(*pbsetdataratepara), GFP_ATOMIC);
if (!pbsetdataratepara) {
kfree(ph2c);
res = _FAIL;
@@ -442,7 +407,7 @@ u8 rtw_createbss_cmd(struct adapter *padapter)
rtw_led_control(padapter, LED_CTL_START_TO_LINK);
- pcmd = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
+ pcmd = kzalloc(sizeof(*pcmd), GFP_ATOMIC);
if (!pcmd) {
res = _FAIL;
goto exit;
@@ -479,7 +444,7 @@ u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network *pnetwork)
rtw_led_control(padapter, LED_CTL_START_TO_LINK);
- pcmd = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
+ pcmd = kzalloc(sizeof(*pcmd), GFP_ATOMIC);
if (!pcmd) {
res = _FAIL;
goto exit;
@@ -516,15 +481,14 @@ u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network *pnetwork)
psecuritypriv->authenticator_ie[0] = (unsigned char)psecnetwork->IELength;
- if (psecnetwork->IELength - 12 < 255) {
+ if (psecnetwork->IELength - 12 < 255)
memcpy(&psecuritypriv->authenticator_ie[1], &psecnetwork->IEs[12], psecnetwork->IELength - 12);
- } else {
+ else
memcpy(&psecuritypriv->authenticator_ie[1], &psecnetwork->IEs[12], 255);
- }
psecnetwork->IELength = 0;
/* Added by Albert 2009/02/18 */
- /* If the the driver wants to use the bssid to create the connection. */
+ /* If the driver wants to use the bssid to create the connection. */
/* If not, we have to copy the connecting AP's MAC address to it so that */
/* the driver just has the bssid information for PMKIDList searching. */
@@ -550,9 +514,9 @@ u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network *pnetwork)
phtpriv->ht_option = false;
if (pregistrypriv->ht_enable) {
- /* Added by Albert 2010/06/23 */
- /* For the WEP mode, we will use the bg mode to do the connection to avoid some IOT issue. */
- /* Especially for Realtek 8192u SoftAP. */
+ /* Added by Albert 2010/06/23 */
+ /* For the WEP mode, we will use the bg mode to do the connection to avoid some IOT issue. */
+ /* Especially for Realtek 8192u SoftAP. */
if ((padapter->securitypriv.dot11PrivacyAlgrthm != _WEP40_) &&
(padapter->securitypriv.dot11PrivacyAlgrthm != _WEP104_) &&
(padapter->securitypriv.dot11PrivacyAlgrthm != _TKIP_)) {
@@ -611,7 +575,7 @@ u8 rtw_disassoc_cmd(struct adapter *padapter, u32 deauth_timeout_ms, bool enqueu
res = rtw_enqueue_cmd(cmdpriv, cmdobj);
} else {
/* no need to enqueue, do the cmd hdl directly and free cmd parameter */
- if (H2C_SUCCESS != disconnect_hdl(padapter, (u8 *)param))
+ if (disconnect_hdl(padapter, (u8 *)param) != H2C_SUCCESS)
res = _FAIL;
kfree(param);
}
@@ -629,12 +593,12 @@ u8 rtw_setopmode_cmd(struct adapter *padapter, enum ndis_802_11_network_infra n
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
- ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
+ ph2c = kzalloc(sizeof(*ph2c), GFP_KERNEL);
if (!ph2c) {
res = false;
goto exit;
}
- psetop = kzalloc(sizeof(struct setopmode_parm), GFP_KERNEL);
+ psetop = kzalloc(sizeof(*psetop), GFP_KERNEL);
if (!psetop) {
kfree(ph2c);
@@ -664,20 +628,20 @@ u8 rtw_setstakey_cmd(struct adapter *padapter, u8 *psta, u8 unicast_key)
struct sta_info *sta = (struct sta_info *)psta;
u8 res = _SUCCESS;
- ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
+ ph2c = kzalloc(sizeof(*ph2c), GFP_KERNEL);
if (!ph2c) {
res = _FAIL;
goto exit;
}
- psetstakey_para = kzalloc(sizeof(struct set_stakey_parm), GFP_KERNEL);
+ psetstakey_para = kzalloc(sizeof(*psetstakey_para), GFP_KERNEL);
if (!psetstakey_para) {
kfree(ph2c);
res = _FAIL;
goto exit;
}
- psetstakey_rsp = kzalloc(sizeof(struct set_stakey_rsp), GFP_KERNEL);
+ psetstakey_rsp = kzalloc(sizeof(*psetstakey_rsp), GFP_KERNEL);
if (!psetstakey_rsp) {
kfree(ph2c);
kfree(psetstakey_para);
@@ -723,13 +687,13 @@ u8 rtw_clearstakey_cmd(struct adapter *padapter, u8 *psta, u8 entry, u8 enqueue)
if (!enqueue) {
clear_cam_entry(padapter, entry);
} else {
- ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
+ ph2c = kzalloc(sizeof(*ph2c), GFP_ATOMIC);
if (!ph2c) {
res = _FAIL;
goto exit;
}
- psetstakey_para = kzalloc(sizeof(struct set_stakey_parm),
+ psetstakey_para = kzalloc(sizeof(*psetstakey_para),
GFP_ATOMIC);
if (!psetstakey_para) {
kfree(ph2c);
@@ -737,7 +701,7 @@ u8 rtw_clearstakey_cmd(struct adapter *padapter, u8 *psta, u8 entry, u8 enqueue)
goto exit;
}
- psetstakey_rsp = kzalloc(sizeof(struct set_stakey_rsp),
+ psetstakey_rsp = kzalloc(sizeof(*psetstakey_rsp),
GFP_ATOMIC);
if (!psetstakey_rsp) {
kfree(ph2c);
@@ -770,13 +734,13 @@ u8 rtw_addbareq_cmd(struct adapter *padapter, u8 tid, u8 *addr)
struct addBaReq_parm *paddbareq_parm;
u8 res = _SUCCESS;
- ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
+ ph2c = kzalloc(sizeof(*ph2c), GFP_ATOMIC);
if (!ph2c) {
res = _FAIL;
goto exit;
}
- paddbareq_parm = kzalloc(sizeof(struct addBaReq_parm), GFP_ATOMIC);
+ paddbareq_parm = kzalloc(sizeof(*paddbareq_parm), GFP_ATOMIC);
if (!paddbareq_parm) {
kfree(ph2c);
res = _FAIL;
@@ -803,13 +767,13 @@ u8 rtw_dynamic_chk_wk_cmd(struct adapter *padapter)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
- ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
+ ph2c = kzalloc(sizeof(*ph2c), GFP_ATOMIC);
if (!ph2c) {
res = _FAIL;
goto exit;
}
- pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_ATOMIC);
+ pdrvextra_cmd_parm = kzalloc(sizeof(*pdrvextra_cmd_parm), GFP_ATOMIC);
if (!pdrvextra_cmd_parm) {
kfree(ph2c);
res = _FAIL;
@@ -844,7 +808,7 @@ u8 rtw_set_chplan_cmd(struct adapter *padapter, u8 chplan)
}
/* prepare cmd parameter */
- setChannelPlan_param = kzalloc(sizeof(struct SetChannelPlan_param),
+ setChannelPlan_param = kzalloc(sizeof(*setChannelPlan_param),
GFP_KERNEL);
if (!setChannelPlan_param) {
res = _FAIL;
@@ -853,7 +817,7 @@ u8 rtw_set_chplan_cmd(struct adapter *padapter, u8 chplan)
setChannelPlan_param->channel_plan = chplan;
/* need enqueue, prepare cmd_obj and enqueue */
- pcmdobj = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
+ pcmdobj = kzalloc(sizeof(*pcmdobj), GFP_KERNEL);
if (!pcmdobj) {
kfree(setChannelPlan_param);
res = _FAIL;
@@ -983,12 +947,12 @@ static void lps_ctrl_wk_hdl(struct adapter *padapter, u8 lps_ctrl_type)
mstatus = 1;/* connect */
/* Reset LPS Setting */
padapter->pwrctrlpriv.LpsIdleCount = 0;
- SetHwReg8188EU(padapter, HW_VAR_H2C_FW_JOINBSSRPT, (u8 *)(&mstatus));
+ rtl8188e_set_FwJoinBssReport_cmd(padapter, mstatus);
break;
case LPS_CTRL_DISCONNECT:
mstatus = 0;/* disconnect */
LPS_Leave(padapter);
- SetHwReg8188EU(padapter, HW_VAR_H2C_FW_JOINBSSRPT, (u8 *)(&mstatus));
+ rtl8188e_set_FwJoinBssReport_cmd(padapter, mstatus);
break;
case LPS_CTRL_SPECIAL_PACKET:
pwrpriv->DelayLPSLastTimeStamp = jiffies;
@@ -1012,16 +976,16 @@ u8 rtw_lps_ctrl_wk_cmd(struct adapter *padapter, u8 lps_ctrl_type, u8 enqueue)
u8 res = _SUCCESS;
/* if (!pwrctrlpriv->bLeisurePs) */
- /* return res; */
+ /* return res; */
if (enqueue) {
- ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
+ ph2c = kzalloc(sizeof(*ph2c), GFP_ATOMIC);
if (!ph2c) {
res = _FAIL;
goto exit;
}
- pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm),
+ pdrvextra_cmd_parm = kzalloc(sizeof(*pdrvextra_cmd_parm),
GFP_ATOMIC);
if (!pdrvextra_cmd_parm) {
kfree(ph2c);
@@ -1047,7 +1011,10 @@ exit:
static void rpt_timer_setting_wk_hdl(struct adapter *padapter, u16 min_time)
{
- SetHwReg8188EU(padapter, HW_VAR_RPT_TIMER_SETTING, (u8 *)(&min_time));
+ struct hal_data_8188e *haldata = &padapter->haldata;
+ struct odm_dm_struct *odmpriv = &haldata->odmpriv;
+
+ ODM_RA_Set_TxRPT_Time(odmpriv, min_time);
}
u8 rtw_rpt_timer_cfg_cmd(struct adapter *padapter, u16 min_time)
@@ -1058,13 +1025,13 @@ u8 rtw_rpt_timer_cfg_cmd(struct adapter *padapter, u16 min_time)
u8 res = _SUCCESS;
- ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
+ ph2c = kzalloc(sizeof(*ph2c), GFP_ATOMIC);
if (!ph2c) {
res = _FAIL;
goto exit;
}
- pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm),
+ pdrvextra_cmd_parm = kzalloc(sizeof(*pdrvextra_cmd_parm),
GFP_ATOMIC);
if (!pdrvextra_cmd_parm) {
kfree(ph2c);
@@ -1084,7 +1051,20 @@ exit:
static void antenna_select_wk_hdl(struct adapter *padapter, u8 antenna)
{
- SetHwReg8188EU(padapter, HW_VAR_ANTENNA_DIVERSITY_SELECT, (u8 *)(&antenna));
+ struct hal_data_8188e *haldata = &padapter->haldata;
+
+ /* switch current antenna to optimum antenna */
+ if (haldata->CurAntenna != antenna) {
+ ODM_UpdateRxIdleAnt_88E(&haldata->odmpriv, antenna == 2 ? MAIN_ANT : AUX_ANT);
+ haldata->CurAntenna = antenna;
+ }
+}
+
+static bool rtw_antenna_diversity(struct adapter *adapter)
+{
+ struct hal_data_8188e *haldata = &adapter->haldata;
+
+ return haldata->AntDivCfg != 0;
}
u8 rtw_antenna_select_cmd(struct adapter *padapter, u8 antenna, u8 enqueue)
@@ -1092,21 +1072,19 @@ u8 rtw_antenna_select_cmd(struct adapter *padapter, u8 antenna, u8 enqueue)
struct cmd_obj *ph2c;
struct drvextra_cmd_parm *pdrvextra_cmd_parm;
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
- u8 support_ant_div;
u8 res = _SUCCESS;
- GetHalDefVar8188EUsb(padapter, HAL_DEF_IS_SUPPORT_ANT_DIV, &support_ant_div);
- if (!support_ant_div)
+ if (!rtw_antenna_diversity(padapter))
return res;
if (enqueue) {
- ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
+ ph2c = kzalloc(sizeof(*ph2c), GFP_KERNEL);
if (!ph2c) {
res = _FAIL;
goto exit;
}
- pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm),
+ pdrvextra_cmd_parm = kzalloc(sizeof(*pdrvextra_cmd_parm),
GFP_KERNEL);
if (!pdrvextra_cmd_parm) {
kfree(ph2c);
@@ -1139,13 +1117,13 @@ u8 p2p_protocol_wk_cmd(struct adapter *padapter, int intCmdType)
if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE))
return res;
- ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
+ ph2c = kzalloc(sizeof(*ph2c), GFP_ATOMIC);
if (!ph2c) {
res = _FAIL;
goto exit;
}
- pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_ATOMIC);
+ pdrvextra_cmd_parm = kzalloc(sizeof(*pdrvextra_cmd_parm), GFP_ATOMIC);
if (!pdrvextra_cmd_parm) {
kfree(ph2c);
res = _FAIL;
@@ -1153,8 +1131,8 @@ u8 p2p_protocol_wk_cmd(struct adapter *padapter, int intCmdType)
}
pdrvextra_cmd_parm->ec_id = P2P_PROTO_WK_CID;
- pdrvextra_cmd_parm->type_size = intCmdType; /* As the command tppe. */
- pdrvextra_cmd_parm->pbuf = NULL; /* Must be NULL here */
+ pdrvextra_cmd_parm->type_size = intCmdType; /* As the command type. */
+ pdrvextra_cmd_parm->pbuf = NULL; /* Must be NULL here */
init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm, GEN_CMD_CODE(_Set_Drv_Extra));
@@ -1173,13 +1151,13 @@ u8 rtw_ps_cmd(struct adapter *padapter)
u8 res = _SUCCESS;
- ppscmd = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
+ ppscmd = kzalloc(sizeof(*ppscmd), GFP_ATOMIC);
if (!ppscmd) {
res = _FAIL;
goto exit;
}
- pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_ATOMIC);
+ pdrvextra_cmd_parm = kzalloc(sizeof(*pdrvextra_cmd_parm), GFP_ATOMIC);
if (!pdrvextra_cmd_parm) {
kfree(ppscmd);
res = _FAIL;
@@ -1197,6 +1175,11 @@ exit:
return res;
}
+static bool rtw_is_hi_queue_empty(struct adapter *adapter)
+{
+ return (rtw_read32(adapter, REG_HGQ_INFORMATION) & 0x0000ff00) == 0;
+}
+
static void rtw_chk_hi_queue_hdl(struct adapter *padapter)
{
int cnt = 0;
@@ -1208,12 +1191,7 @@ static void rtw_chk_hi_queue_hdl(struct adapter *padapter)
return;
if (psta_bmc->sleepq_len == 0) {
- u8 val = 0;
-
- /* while ((rtw_read32(padapter, 0x414)&0x00ffff00)!= 0) */
- /* while ((rtw_read32(padapter, 0x414)&0x0000ff00)!= 0) */
-
- GetHwReg8188EU(padapter, HW_VAR_CHK_HI_QUEUE_EMPTY, &val);
+ bool val = rtw_is_hi_queue_empty(padapter);
while (!val) {
msleep(100);
@@ -1223,7 +1201,7 @@ static void rtw_chk_hi_queue_hdl(struct adapter *padapter)
if (cnt > 10)
break;
- GetHwReg8188EU(padapter, HW_VAR_CHK_HI_QUEUE_EMPTY, &val);
+ val = rtw_is_hi_queue_empty(padapter);
}
if (cnt <= 10) {
@@ -1244,13 +1222,13 @@ u8 rtw_chk_hi_queue_cmd(struct adapter *padapter)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
- ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
+ ph2c = kzalloc(sizeof(*ph2c), GFP_ATOMIC);
if (!ph2c) {
res = _FAIL;
goto exit;
}
- pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_ATOMIC);
+ pdrvextra_cmd_parm = kzalloc(sizeof(*pdrvextra_cmd_parm), GFP_ATOMIC);
if (!pdrvextra_cmd_parm) {
kfree(ph2c);
res = _FAIL;
@@ -1275,13 +1253,13 @@ u8 rtw_c2h_wk_cmd(struct adapter *padapter, u8 *c2h_evt)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
- ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
+ ph2c = kzalloc(sizeof(*ph2c), GFP_ATOMIC);
if (!ph2c) {
res = _FAIL;
goto exit;
}
- pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_ATOMIC);
+ pdrvextra_cmd_parm = kzalloc(sizeof(*pdrvextra_cmd_parm), GFP_ATOMIC);
if (!pdrvextra_cmd_parm) {
kfree(ph2c);
res = _FAIL;
@@ -1380,8 +1358,8 @@ u8 rtw_drvextra_cmd_hdl(struct adapter *padapter, unsigned char *pbuf)
p2p_ps_wk_hdl(padapter, pdrvextra_cmd->type_size);
break;
case P2P_PROTO_WK_CID:
- /* Commented by Albert 2011/07/01 */
- /* I used the type_size as the type command */
+ /* Commented by Albert 2011/07/01 */
+ /* I used the type_size as the type command */
p2p_protocol_wk_hdl(padapter, pdrvextra_cmd->type_size);
break;
case CHECK_HIQ_WK_CID:
@@ -1404,11 +1382,8 @@ void rtw_survey_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
{
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- if (pcmd->res == H2C_DROPPED) {
+ if (pcmd->res != H2C_SUCCESS) {
/* TODO: cancel timer and do timeout handler directly... */
- /* need to make timeout handlerOS independent */
- _set_timer(&pmlmepriv->scan_to_timer, 1);
- } else if (pcmd->res != H2C_SUCCESS) {
_set_timer(&pmlmepriv->scan_to_timer, 1);
}
@@ -1416,6 +1391,7 @@ void rtw_survey_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
rtw_free_cmd_obj(pcmd);
}
+
void rtw_disassoc_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
{
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
@@ -1426,8 +1402,10 @@ void rtw_disassoc_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
spin_unlock_bh(&pmlmepriv->lock);
return;
- } else /* clear bridge database */
- nat25_db_cleanup(padapter);
+ }
+
+ /* clear bridge database */
+ nat25_db_cleanup(padapter);
/* free cmd */
rtw_free_cmd_obj(pcmd);
@@ -1437,11 +1415,8 @@ void rtw_joinbss_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
{
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- if (pcmd->res == H2C_DROPPED) {
+ if (pcmd->res != H2C_SUCCESS) {
/* TODO: cancel timer and do timeout handler directly... */
- /* need to make timeout handlerOS independent */
- _set_timer(&pmlmepriv->assoc_timer, 1);
- } else if (pcmd->res != H2C_SUCCESS) {
_set_timer(&pmlmepriv->assoc_timer, 1);
}
@@ -1474,7 +1449,7 @@ void rtw_createbss_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
rtw_indicate_connect(padapter);
} else {
- pwlan = _rtw_alloc_network(pmlmepriv);
+ pwlan = rtw_alloc_network(pmlmepriv);
spin_lock_bh(&pmlmepriv->scanned_queue.lock);
if (!pwlan) {
pwlan = rtw_get_oldest_wlan_network(&pmlmepriv->scanned_queue);
diff --git a/drivers/staging/r8188eu/core/rtw_fw.c b/drivers/staging/r8188eu/core/rtw_fw.c
index 625d186c3647..0451e5177644 100644
--- a/drivers/staging/r8188eu/core/rtw_fw.c
+++ b/drivers/staging/r8188eu/core/rtw_fw.c
@@ -4,51 +4,43 @@
#include <linux/firmware.h>
#include "../include/rtw_fw.h"
-#define MAX_REG_BOLCK_SIZE 196
+#define MAX_REG_BLOCK_SIZE 196
#define FW_8188E_START_ADDRESS 0x1000
#define MAX_PAGE_SIZE 4096
#define IS_FW_HEADER_EXIST(_fwhdr) \
- ((le16_to_cpu(_fwhdr->Signature) & 0xFFF0) == 0x92C0 || \
- (le16_to_cpu(_fwhdr->Signature) & 0xFFF0) == 0x88C0 || \
- (le16_to_cpu(_fwhdr->Signature) & 0xFFF0) == 0x2300 || \
- (le16_to_cpu(_fwhdr->Signature) & 0xFFF0) == 0x88E0)
-
-/* This structure must be careful with byte-ordering */
+ ((le16_to_cpu(_fwhdr->signature) & 0xFFF0) == 0x92C0 || \
+ (le16_to_cpu(_fwhdr->signature) & 0xFFF0) == 0x88C0 || \
+ (le16_to_cpu(_fwhdr->signature) & 0xFFF0) == 0x2300 || \
+ (le16_to_cpu(_fwhdr->signature) & 0xFFF0) == 0x88E0)
struct rt_firmware_hdr {
- /* 8-byte alinment required */
- /* LONG WORD 0 ---- */
- __le16 Signature; /* 92C0: test chip; 92C,
- * 88C0: test chip; 88C1: MP A-cut;
- * 92C1: MP A-cut */
- u8 Category; /* AP/NIC and USB/PCI */
- u8 Function; /* Reserved for different FW function
- * indcation, for further use when
- * driver needs to download different
- * FW for different conditions */
- __le16 Version; /* FW Version */
- u8 Subversion; /* FW Subversion, default 0x00 */
- u16 Rsvd1;
-
- /* LONG WORD 1 ---- */
- u8 Month; /* Release time Month field */
- u8 Date; /* Release time Date field */
- u8 Hour; /* Release time Hour field */
- u8 Minute; /* Release time Minute field */
- __le16 RamCodeSize; /* The size of RAM code */
- u8 Foundry;
- u8 Rsvd2;
-
- /* LONG WORD 2 ---- */
- __le32 SvnIdx; /* The SVN entry index */
- u32 Rsvd3;
-
- /* LONG WORD 3 ---- */
- u32 Rsvd4;
- u32 Rsvd5;
+ __le16 signature; /* 92C0: test chip; 92C,
+ * 88C0: test chip; 88C1: MP A-cut;
+ * 92C1: MP A-cut */
+ u8 category; /* AP/NIC and USB/PCI */
+ u8 function; /* Reserved for different FW function
+ * indcation, for further use when
+ * driver needs to download different
+ * FW for different conditions */
+ __le16 version; /* FW Version */
+ u8 subversion; /* FW Subversion, default 0x00 */
+ u8 rsvd1;
+ u8 month; /* Release time Month field */
+ u8 date; /* Release time Date field */
+ u8 hour; /* Release time Hour field */
+ u8 minute; /* Release time Minute field */
+ __le16 ramcodesize; /* The size of RAM code */
+ u8 foundry;
+ u8 rsvd2;
+ __le32 svnidx; /* The SVN entry index */
+ __le32 rsvd3;
+ __le32 rsvd4;
+ __le32 rsvd5;
};
+static_assert(sizeof(struct rt_firmware_hdr) == 32);
+
static void fw_download_enable(struct adapter *padapter, bool enable)
{
u8 tmp;
@@ -71,53 +63,55 @@ static void fw_download_enable(struct adapter *padapter, bool enable)
}
}
-static int block_write(struct adapter *padapter, void *buffer, u32 buffSize)
+static int block_write(struct adapter *padapter, u8 *buffer, u32 size)
{
int ret = _SUCCESS;
- u32 blockSize_p1 = 4; /* (Default) Phase #1 : PCI muse use 4-byte write to download FW */
- u32 blockSize_p2 = 8; /* Phase #2 : Use 8-byte, if Phase#1 use big size to write FW. */
- u32 blockSize_p3 = 1; /* Phase #3 : Use 1-byte, the remnant of FW image. */
- u32 blockCount_p1 = 0, blockCount_p2 = 0, blockCount_p3 = 0;
- u32 remainSize_p1 = 0, remainSize_p2 = 0;
- u8 *bufferPtr = (u8 *)buffer;
- u32 i = 0, offset = 0;
-
- blockSize_p1 = MAX_REG_BOLCK_SIZE;
-
- /* 3 Phase #1 */
- blockCount_p1 = buffSize / blockSize_p1;
- remainSize_p1 = buffSize % blockSize_p1;
-
- for (i = 0; i < blockCount_p1; i++) {
- ret = rtw_writeN(padapter, (FW_8188E_START_ADDRESS + i * blockSize_p1), blockSize_p1, (bufferPtr + i * blockSize_p1));
+ u32 blocks, block_size, remain;
+ u32 i, offset, addr;
+ u8 *data;
+
+ block_size = MAX_REG_BLOCK_SIZE;
+
+ blocks = size / block_size;
+ remain = size % block_size;
+
+ for (i = 0; i < blocks; i++) {
+ addr = FW_8188E_START_ADDRESS + i * block_size;
+ data = buffer + i * block_size;
+
+ ret = rtw_writeN(padapter, addr, block_size, data);
if (ret == _FAIL)
goto exit;
}
- /* 3 Phase #2 */
- if (remainSize_p1) {
- offset = blockCount_p1 * blockSize_p1;
+ if (remain) {
+ offset = blocks * block_size;
+ block_size = 8;
- blockCount_p2 = remainSize_p1 / blockSize_p2;
- remainSize_p2 = remainSize_p1 % blockSize_p2;
+ blocks = remain / block_size;
+ remain = remain % block_size;
- for (i = 0; i < blockCount_p2; i++) {
- ret = rtw_writeN(padapter, (FW_8188E_START_ADDRESS + offset + i * blockSize_p2), blockSize_p2, (bufferPtr + offset + i * blockSize_p2));
+ for (i = 0; i < blocks; i++) {
+ addr = FW_8188E_START_ADDRESS + offset + i * block_size;
+ data = buffer + offset + i * block_size;
+ ret = rtw_writeN(padapter, addr, block_size, data);
if (ret == _FAIL)
goto exit;
}
}
- /* 3 Phase #3 */
- if (remainSize_p2) {
- offset = (blockCount_p1 * blockSize_p1) + (blockCount_p2 * blockSize_p2);
+ if (remain) {
+ offset += blocks * block_size;
- blockCount_p3 = remainSize_p2 / blockSize_p3;
+ /* block size 1 */
+ blocks = remain;
- for (i = 0; i < blockCount_p3; i++) {
- ret = rtw_write8(padapter, (FW_8188E_START_ADDRESS + offset + i), *(bufferPtr + offset + i));
+ for (i = 0; i < blocks; i++) {
+ addr = FW_8188E_START_ADDRESS + offset + i;
+ data = buffer + offset + i;
+ ret = rtw_write8(padapter, addr, *data);
if (ret == _FAIL)
goto exit;
}
@@ -127,7 +121,7 @@ exit:
return ret;
}
-static int page_write(struct adapter *padapter, u32 page, void *buffer, u32 size)
+static int page_write(struct adapter *padapter, u32 page, u8 *buffer, u32 size)
{
u8 value8;
u8 u8Page = (u8)(page & 0x07);
@@ -138,21 +132,20 @@ static int page_write(struct adapter *padapter, u32 page, void *buffer, u32 size
return block_write(padapter, buffer, size);
}
-static int write_fw(struct adapter *padapter, void *buffer, u32 size)
+static int write_fw(struct adapter *padapter, u8 *buffer, u32 size)
{
/* Since we need dynamic decide method of dwonload fw, so we call this function to get chip version. */
/* We can remove _ReadChipVersion from ReadpadapterInfo8192C later. */
int ret = _SUCCESS;
u32 pageNums, remainSize;
u32 page, offset;
- u8 *bufferPtr = (u8 *)buffer;
pageNums = size / MAX_PAGE_SIZE;
remainSize = size % MAX_PAGE_SIZE;
for (page = 0; page < pageNums; page++) {
offset = page * MAX_PAGE_SIZE;
- ret = page_write(padapter, page, bufferPtr + offset, MAX_PAGE_SIZE);
+ ret = page_write(padapter, page, buffer + offset, MAX_PAGE_SIZE);
if (ret == _FAIL)
goto exit;
@@ -160,7 +153,7 @@ static int write_fw(struct adapter *padapter, void *buffer, u32 size)
if (remainSize) {
offset = pageNums * MAX_PAGE_SIZE;
page = pageNums;
- ret = page_write(padapter, page, bufferPtr + offset, remainSize);
+ ret = page_write(padapter, page, buffer + offset, remainSize);
if (ret == _FAIL)
goto exit;
@@ -247,14 +240,12 @@ int rtl8188e_firmware_download(struct adapter *padapter)
{
int ret = _SUCCESS;
u8 write_fw_retry = 0;
- u32 fwdl_start_time;
+ unsigned long fwdl_timeout;
struct dvobj_priv *dvobj = adapter_to_dvobj(padapter);
struct device *device = dvobj_to_dev(dvobj);
struct rt_firmware_hdr *fwhdr = NULL;
- u16 fw_version, fw_subversion, fw_signature;
u8 *fw_data;
u32 fw_size;
- static int log_version;
if (!dvobj->firmware.data)
ret = load_firmware(&dvobj->firmware, device);
@@ -265,21 +256,15 @@ int rtl8188e_firmware_download(struct adapter *padapter)
fw_data = dvobj->firmware.data;
fw_size = dvobj->firmware.size;
- /* To Check Fw header. Added by tynli. 2009.12.04. */
fwhdr = (struct rt_firmware_hdr *)dvobj->firmware.data;
- fw_version = le16_to_cpu(fwhdr->Version);
- fw_subversion = fwhdr->Subversion;
- fw_signature = le16_to_cpu(fwhdr->Signature);
-
- if (!log_version++)
- pr_info("%sFirmware Version %d, SubVersion %d, Signature 0x%x\n",
- DRIVER_PREFIX, fw_version, fw_subversion, fw_signature);
-
if (IS_FW_HEADER_EXIST(fwhdr)) {
- /* Shift 32 bytes for FW header */
- fw_data = fw_data + 32;
- fw_size = fw_size - 32;
+ pr_info_once("R8188EU: Firmware Version %d, SubVersion %d, Signature 0x%x\n",
+ le16_to_cpu(fwhdr->version), fwhdr->subversion,
+ le16_to_cpu(fwhdr->signature));
+
+ fw_data = fw_data + sizeof(struct rt_firmware_hdr);
+ fw_size = fw_size - sizeof(struct rt_firmware_hdr);
}
/* Suggested by Filen. If 8051 is running in RAM code, driver should inform Fw to reset by itself, */
@@ -290,7 +275,7 @@ int rtl8188e_firmware_download(struct adapter *padapter)
}
fw_download_enable(padapter, true);
- fwdl_start_time = jiffies;
+ fwdl_timeout = jiffies + msecs_to_jiffies(500);
while (1) {
/* reset the FWDL chksum */
rtw_write8(padapter, REG_MCUFWDL, rtw_read8(padapter, REG_MCUFWDL) | FWDL_CHKSUM_RPT);
@@ -298,7 +283,7 @@ int rtl8188e_firmware_download(struct adapter *padapter)
ret = write_fw(padapter, fw_data, fw_size);
if (ret == _SUCCESS ||
- (rtw_get_passing_time_ms(fwdl_start_time) > 500 && write_fw_retry++ >= 3))
+ (time_after(jiffies, fwdl_timeout) && write_fw_retry++ >= 3))
break;
}
fw_download_enable(padapter, false);
diff --git a/drivers/staging/r8188eu/core/rtw_ieee80211.c b/drivers/staging/r8188eu/core/rtw_ieee80211.c
index 5a0e42ed4a47..385a9ed8eff7 100644
--- a/drivers/staging/r8188eu/core/rtw_ieee80211.c
+++ b/drivers/staging/r8188eu/core/rtw_ieee80211.c
@@ -97,16 +97,15 @@ bool rtw_is_cckratesonly_included(u8 *rate)
int rtw_check_network_type(unsigned char *rate, int ratelen, int channel)
{
- if (channel > 14) {
+ if (channel > 14)
return WIRELESS_INVALID;
- } else { /* could be pure B, pure G, or B/G */
- if (rtw_is_cckratesonly_included(rate))
- return WIRELESS_11B;
- else if (rtw_is_cckrates_included(rate))
- return WIRELESS_11BG;
- else
- return WIRELESS_11G;
- }
+ /* could be pure B, pure G, or B/G */
+ if (rtw_is_cckratesonly_included(rate))
+ return WIRELESS_11B;
+ else if (rtw_is_cckrates_included(rate))
+ return WIRELESS_11BG;
+ else
+ return WIRELESS_11G;
}
u8 *rtw_set_fixed_ie(unsigned char *pbuf, unsigned int len, unsigned char *source,
@@ -160,11 +159,10 @@ u8 *rtw_get_ie(u8 *pbuf, int index, int *len, int limit)
if (*p == index) {
*len = *(p + 1);
return p;
- } else {
- tmp = *(p + 1);
- p += (tmp + 2);
- i += (tmp + 2);
}
+ tmp = *(p + 1);
+ p += (tmp + 2);
+ i += (tmp + 2);
if (i >= limit)
break;
}
@@ -295,10 +293,9 @@ unsigned char *rtw_get_wpa_ie(unsigned char *pie, int *wpa_ie_len, int limit)
goto check_next_ie;
*wpa_ie_len = *(pbuf + 1);
return pbuf;
- } else {
- *wpa_ie_len = 0;
- return NULL;
}
+ *wpa_ie_len = 0;
+ return NULL;
check_next_ie:
limit_new = limit - (pbuf - pie) - 2 - len;
@@ -558,9 +555,8 @@ u8 *rtw_get_wps_ie(u8 *in_ie, uint in_len, u8 *wps_ie, uint *wps_ielen)
cnt += in_ie[cnt + 1] + 2;
break;
- } else {
- cnt += in_ie[cnt + 1] + 2; /* goto next */
}
+ cnt += in_ie[cnt + 1] + 2; /* goto next */
}
return wpsie_ptr;
}
@@ -604,9 +600,8 @@ u8 *rtw_get_wps_attr(u8 *wps_ie, uint wps_ielen, u16 target_attr_id, u8 *buf_att
if (len_attr)
*len_attr = attr_len;
break;
- } else {
- attr_ptr += attr_len; /* goto next */
}
+ attr_ptr += attr_len; /* goto next */
}
return target_attr_ptr;
}
@@ -901,9 +896,8 @@ u8 *rtw_get_p2p_ie(u8 *in_ie, int in_len, u8 *p2p_ie, uint *p2p_ielen)
if (p2p_ielen)
*p2p_ielen = in_ie[cnt + 1] + 2;
return p2p_ie_ptr;
- } else {
- cnt += in_ie[cnt + 1] + 2; /* goto next */
}
+ cnt += in_ie[cnt + 1] + 2; /* goto next */
}
return NULL;
}
@@ -948,9 +942,8 @@ u8 *rtw_get_p2p_attr(u8 *p2p_ie, uint p2p_ielen, u8 target_attr_id, u8 *buf_attr
if (len_attr)
*len_attr = attr_len;
break;
- } else {
- attr_ptr += attr_len; /* goto next */
}
+ attr_ptr += attr_len; /* goto next */
}
return target_attr_ptr;
}
@@ -1058,7 +1051,7 @@ static int rtw_get_cipher_info(struct wlan_network *pnetwork)
pbuf = rtw_get_wpa_ie(&pnetwork->network.IEs[12], &wpa_ielen, pnetwork->network.IELength - 12);
if (pbuf && (wpa_ielen > 0)) {
- if (_SUCCESS == rtw_parse_wpa_ie(pbuf, wpa_ielen + 2, &group_cipher, &pairwise_cipher, &is8021x)) {
+ if (rtw_parse_wpa_ie(pbuf, wpa_ielen + 2, &group_cipher, &pairwise_cipher, &is8021x) == _SUCCESS) {
pnetwork->BcnInfo.pairwise_cipher = pairwise_cipher;
pnetwork->BcnInfo.group_cipher = group_cipher;
pnetwork->BcnInfo.is_8021x = is8021x;
@@ -1068,7 +1061,7 @@ static int rtw_get_cipher_info(struct wlan_network *pnetwork)
pbuf = rtw_get_wpa2_ie(&pnetwork->network.IEs[12], &wpa_ielen, pnetwork->network.IELength - 12);
if (pbuf && (wpa_ielen > 0)) {
- if (_SUCCESS == rtw_parse_wpa2_ie(pbuf, wpa_ielen + 2, &group_cipher, &pairwise_cipher, &is8021x)) {
+ if (rtw_parse_wpa2_ie(pbuf, wpa_ielen + 2, &group_cipher, &pairwise_cipher, &is8021x) == _SUCCESS) {
pnetwork->BcnInfo.pairwise_cipher = pairwise_cipher;
pnetwork->BcnInfo.group_cipher = group_cipher;
pnetwork->BcnInfo.is_8021x = is8021x;
diff --git a/drivers/staging/r8188eu/core/rtw_ioctl_set.c b/drivers/staging/r8188eu/core/rtw_ioctl_set.c
index 4b78e42d180d..7ba75f73e47e 100644
--- a/drivers/staging/r8188eu/core/rtw_ioctl_set.c
+++ b/drivers/staging/r8188eu/core/rtw_ioctl_set.c
@@ -44,7 +44,7 @@ u8 rtw_do_join(struct adapter *padapter)
pmlmepriv->to_roaming > 0) {
/* submit site_survey_cmd */
ret = rtw_sitesurvey_cmd(padapter, &pmlmepriv->assoc_ssid, 1, NULL, 0);
- if (_SUCCESS != ret)
+ if (ret != _SUCCESS)
pmlmepriv->to_join = false;
} else {
pmlmepriv->to_join = false;
@@ -91,7 +91,7 @@ u8 rtw_do_join(struct adapter *padapter)
if (!pmlmepriv->LinkDetectInfo.bBusyTraffic ||
pmlmepriv->to_roaming > 0) {
ret = rtw_sitesurvey_cmd(padapter, &pmlmepriv->assoc_ssid, 1, NULL, 0);
- if (_SUCCESS != ret)
+ if (ret != _SUCCESS)
pmlmepriv->to_join = false;
} else {
ret = _FAIL;
diff --git a/drivers/staging/r8188eu/core/rtw_iol.c b/drivers/staging/r8188eu/core/rtw_iol.c
index e14e3746efdd..af8e84a41b85 100644
--- a/drivers/staging/r8188eu/core/rtw_iol.c
+++ b/drivers/staging/r8188eu/core/rtw_iol.c
@@ -57,10 +57,10 @@ int rtw_IOL_append_cmds(struct xmit_frame *xmit_frame, u8 *IOL_cmds, u32 cmd_len
bool rtw_IOL_applied(struct adapter *adapter)
{
- if (1 == adapter->registrypriv.fw_iol)
+ if (adapter->registrypriv.fw_iol == 1)
return true;
- if ((2 == adapter->registrypriv.fw_iol) &&
+ if ((adapter->registrypriv.fw_iol == 2) &&
(adapter_to_dvobj(adapter)->pusbdev->speed != USB_SPEED_HIGH))
return true;
diff --git a/drivers/staging/r8188eu/core/rtw_led.c b/drivers/staging/r8188eu/core/rtw_led.c
index ccd43accb7dc..2f3000428af7 100644
--- a/drivers/staging/r8188eu/core/rtw_led.c
+++ b/drivers/staging/r8188eu/core/rtw_led.c
@@ -110,7 +110,7 @@ static void blink_work(struct work_struct *work)
pLed->bLedLinkBlinkInProgress = true;
pLed->CurrLedState = LED_BLINK_NORMAL;
schedule_delayed_work(&pLed->blink_work, LED_BLINK_LINK_INTVL);
- } else if (!check_fwstate(pmlmepriv, _FW_LINKED)) {
+ } else {
pLed->bLedNoLinkBlinkInProgress = true;
pLed->CurrLedState = LED_BLINK_SLOWLY;
schedule_delayed_work(&pLed->blink_work, LED_BLINK_NO_LINK_INTVL);
@@ -131,7 +131,7 @@ static void blink_work(struct work_struct *work)
pLed->bLedLinkBlinkInProgress = true;
pLed->CurrLedState = LED_BLINK_NORMAL;
schedule_delayed_work(&pLed->blink_work, LED_BLINK_LINK_INTVL);
- } else if (!check_fwstate(pmlmepriv, _FW_LINKED)) {
+ } else {
pLed->bLedNoLinkBlinkInProgress = true;
pLed->CurrLedState = LED_BLINK_SLOWLY;
schedule_delayed_work(&pLed->blink_work, LED_BLINK_NO_LINK_INTVL);
@@ -278,7 +278,7 @@ void rtw_led_control(struct adapter *padapter, enum LED_CTL_MODE LedAction)
else
pLed->BlinkingLedState = RTW_LED_ON;
schedule_delayed_work(&pLed->blink_work, LED_BLINK_SCAN_INTVL);
- }
+ }
break;
case LED_CTL_TX:
case LED_CTL_RX:
@@ -304,7 +304,7 @@ void rtw_led_control(struct adapter *padapter, enum LED_CTL_MODE LedAction)
}
break;
case LED_CTL_START_WPS: /* wait until xinpin finish */
- if (!pLed->bLedWPSBlinkInProgress) {
+ if (!pLed->bLedWPSBlinkInProgress) {
if (pLed->bLedNoLinkBlinkInProgress) {
cancel_delayed_work(&pLed->blink_work);
pLed->bLedNoLinkBlinkInProgress = false;
@@ -328,7 +328,7 @@ void rtw_led_control(struct adapter *padapter, enum LED_CTL_MODE LedAction)
else
pLed->BlinkingLedState = RTW_LED_ON;
schedule_delayed_work(&pLed->blink_work, LED_BLINK_SCAN_INTVL);
- }
+ }
break;
case LED_CTL_STOP_WPS:
if (pLed->bLedNoLinkBlinkInProgress) {
diff --git a/drivers/staging/r8188eu/core/rtw_mlme.c b/drivers/staging/r8188eu/core/rtw_mlme.c
index 6f0bff186477..5a815642c3f6 100644
--- a/drivers/staging/r8188eu/core/rtw_mlme.c
+++ b/drivers/staging/r8188eu/core/rtw_mlme.c
@@ -16,7 +16,6 @@
#include "../include/usb_osintf.h"
#include "../include/rtl8188e_dm.h"
-extern unsigned char MCS_rate_2R[16];
extern unsigned char MCS_rate_1R[16];
void rtw_set_roaming(struct adapter *adapter, u8 to_roaming)
@@ -31,60 +30,6 @@ u8 rtw_to_roaming(struct adapter *adapter)
return adapter->mlmepriv.to_roaming;
}
-int _rtw_init_mlme_priv(struct adapter *padapter)
-{
- int i;
- u8 *pbuf;
- struct wlan_network *pnetwork;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- int res = _SUCCESS;
-
- /* We don't need to memset padapter->XXX to zero, because adapter is allocated by vzalloc(). */
-
- pmlmepriv->nic_hdl = (u8 *)padapter;
-
- pmlmepriv->pscanned = NULL;
- pmlmepriv->fw_state = 0;
- pmlmepriv->cur_network.network.InfrastructureMode = Ndis802_11AutoUnknown;
- pmlmepriv->scan_mode = SCAN_ACTIVE;/* 1: active, 0: pasive. Maybe someday we should rename this varable to "active_mode" (Jeff) */
-
- spin_lock_init(&pmlmepriv->lock);
- rtw_init_queue(&pmlmepriv->free_bss_pool);
- rtw_init_queue(&pmlmepriv->scanned_queue);
-
- set_scanned_network_val(pmlmepriv, 0);
-
- memset(&pmlmepriv->assoc_ssid, 0, sizeof(struct ndis_802_11_ssid));
-
- pbuf = vzalloc(MAX_BSS_CNT * (sizeof(struct wlan_network)));
-
- if (!pbuf) {
- res = _FAIL;
- goto exit;
- }
- pmlmepriv->free_bss_buf = pbuf;
-
- pnetwork = (struct wlan_network *)pbuf;
-
- for (i = 0; i < MAX_BSS_CNT; i++) {
- INIT_LIST_HEAD(&pnetwork->list);
-
- list_add_tail(&pnetwork->list, &pmlmepriv->free_bss_pool.queue);
-
- pnetwork++;
- }
-
- /* allocate DMA-able/Non-Page memory for cmd_buf and rsp_buf */
-
- rtw_clear_scan_deny(padapter);
-
- rtw_init_mlme_timer(padapter);
-
-exit:
-
- return res;
-}
-
static void rtw_free_mlme_ie_data(u8 **ppie, u32 *plen)
{
kfree(*ppie);
@@ -95,7 +40,6 @@ static void rtw_free_mlme_ie_data(u8 **ppie, u32 *plen)
void rtw_free_mlme_priv_ie_data(struct mlme_priv *pmlmepriv)
{
kfree(pmlmepriv->assoc_req);
- kfree(pmlmepriv->assoc_rsp);
rtw_free_mlme_ie_data(&pmlmepriv->wps_beacon_ie, &pmlmepriv->wps_beacon_ie_len);
rtw_free_mlme_ie_data(&pmlmepriv->wps_probe_req_ie, &pmlmepriv->wps_probe_req_ie_len);
rtw_free_mlme_ie_data(&pmlmepriv->wps_probe_resp_ie, &pmlmepriv->wps_probe_resp_ie_len);
@@ -108,49 +52,6 @@ void rtw_free_mlme_priv_ie_data(struct mlme_priv *pmlmepriv)
rtw_free_mlme_ie_data(&pmlmepriv->p2p_assoc_req_ie, &pmlmepriv->p2p_assoc_req_ie_len);
}
-void _rtw_free_mlme_priv(struct mlme_priv *pmlmepriv)
-{
-
- rtw_free_mlme_priv_ie_data(pmlmepriv);
-
- if (pmlmepriv) {
- vfree(pmlmepriv->free_bss_buf);
- }
-
-}
-
-struct wlan_network *_rtw_alloc_network(struct mlme_priv *pmlmepriv)/* _queue *free_queue) */
-{
- struct wlan_network *pnetwork;
- struct __queue *free_queue = &pmlmepriv->free_bss_pool;
- struct list_head *plist = NULL;
-
- spin_lock_bh(&free_queue->lock);
-
- if (list_empty(&free_queue->queue)) {
- pnetwork = NULL;
- goto exit;
- }
- plist = (&free_queue->queue)->next;
-
- pnetwork = container_of(plist, struct wlan_network, list);
-
- list_del_init(&pnetwork->list);
-
- pnetwork->network_type = 0;
- pnetwork->fixed = false;
- pnetwork->last_scanned = jiffies;
- pnetwork->aid = 0;
- pnetwork->join_res = 0;
-
- pmlmepriv->num_of_scanned++;
-
-exit:
- spin_unlock_bh(&free_queue->lock);
-
- return pnetwork;
-}
-
void _rtw_free_network(struct mlme_priv *pmlmepriv, struct wlan_network *pnetwork, u8 isfreeall)
{
u32 curr_time, delta_time;
@@ -194,7 +95,7 @@ void _rtw_free_network_nolock(struct mlme_priv *pmlmepriv, struct wlan_network *
/*
return the wlan_network with the matching addr
- Shall be calle under atomic context... to avoid possible racing condition...
+ Shall be called under atomic context... to avoid possible racing condition...
*/
struct wlan_network *_rtw_find_network(struct __queue *scanned_queue, u8 *addr)
{
@@ -291,23 +192,92 @@ u8 *rtw_get_beacon_interval_from_ie(u8 *ie)
int rtw_init_mlme_priv(struct adapter *padapter)/* struct mlme_priv *pmlmepriv) */
{
- int res;
+ int i;
+ u8 *pbuf;
+ struct wlan_network *pnetwork;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ int res = _SUCCESS;
- res = _rtw_init_mlme_priv(padapter);/* (pmlmepriv); */
+ /* We don't need to memset padapter->XXX to zero, because adapter is allocated by vzalloc(). */
+
+ pmlmepriv->nic_hdl = (u8 *)padapter;
+
+ pmlmepriv->pscanned = NULL;
+ pmlmepriv->fw_state = 0;
+ pmlmepriv->cur_network.network.InfrastructureMode = Ndis802_11AutoUnknown;
+ pmlmepriv->scan_mode = SCAN_ACTIVE;/* 1: active, 0: pasive. Maybe someday we should rename this varable to "active_mode" (Jeff) */
+
+ spin_lock_init(&pmlmepriv->lock);
+ rtw_init_queue(&pmlmepriv->free_bss_pool);
+ rtw_init_queue(&pmlmepriv->scanned_queue);
+
+ set_scanned_network_val(pmlmepriv, 0);
+
+ memset(&pmlmepriv->assoc_ssid, 0, sizeof(struct ndis_802_11_ssid));
+
+ pbuf = vzalloc(MAX_BSS_CNT * (sizeof(struct wlan_network)));
+
+ if (!pbuf) {
+ res = _FAIL;
+ goto exit;
+ }
+ pmlmepriv->free_bss_buf = pbuf;
+
+ pnetwork = (struct wlan_network *)pbuf;
+
+ for (i = 0; i < MAX_BSS_CNT; i++) {
+ INIT_LIST_HEAD(&pnetwork->list);
+
+ list_add_tail(&pnetwork->list, &pmlmepriv->free_bss_pool.queue);
+
+ pnetwork++;
+ }
+
+ /* allocate DMA-able/Non-Page memory for cmd_buf and rsp_buf */
+
+ rtw_clear_scan_deny(padapter);
+
+ rtw_init_mlme_timer(padapter);
+
+exit:
return res;
}
void rtw_free_mlme_priv(struct mlme_priv *pmlmepriv)
{
- _rtw_free_mlme_priv(pmlmepriv);
+ rtw_free_mlme_priv_ie_data(pmlmepriv);
+ vfree(pmlmepriv->free_bss_buf);
}
-static struct wlan_network *rtw_alloc_network(struct mlme_priv *pmlmepriv)
+struct wlan_network *rtw_alloc_network(struct mlme_priv *pmlmepriv)
{
struct wlan_network *pnetwork;
+ struct __queue *free_queue = &pmlmepriv->free_bss_pool;
+ struct list_head *plist = NULL;
+
+ spin_lock_bh(&free_queue->lock);
+
+ if (list_empty(&free_queue->queue)) {
+ pnetwork = NULL;
+ goto exit;
+ }
+ plist = (&free_queue->queue)->next;
+
+ pnetwork = container_of(plist, struct wlan_network, list);
+
+ list_del_init(&pnetwork->list);
- pnetwork = _rtw_alloc_network(pmlmepriv);
+ pnetwork->network_type = 0;
+ pnetwork->fixed = false;
+ pnetwork->last_scanned = jiffies;
+ pnetwork->aid = 0;
+ pnetwork->join_res = 0;
+
+ pmlmepriv->num_of_scanned++;
+
+exit:
+ spin_unlock_bh(&free_queue->lock);
return pnetwork;
}
@@ -330,7 +300,7 @@ void rtw_free_network_queue(struct adapter *dev, u8 isfreeall)
/*
return the wlan_network with the matching addr
- Shall be calle under atomic context... to avoid possible racing condition...
+ Shall be called under atomic context... to avoid possible racing condition...
*/
struct wlan_network *rtw_find_network(struct __queue *scanned_queue, u8 *addr)
{
@@ -465,6 +435,13 @@ static void update_current_network(struct adapter *adapter, struct wlan_bssid_ex
}
+u8 rtw_current_antenna(struct adapter *adapter)
+{
+ struct hal_data_8188e *haldata = &adapter->haldata;
+
+ return haldata->CurAntenna;
+}
+
/*
Caller must hold pmlmepriv->lock first.
*/
@@ -498,7 +475,8 @@ void rtw_update_scanned_network(struct adapter *adapter, struct wlan_bssid_ex *t
/* If there are no more slots, expire the oldest */
pnetwork = oldest;
- GetHalDefVar8188EUsb(adapter, HAL_DEF_CURRENT_ANTENNA, &target->PhyInfo.Optimum_antenna);
+ target->PhyInfo.Optimum_antenna = rtw_current_antenna(adapter);
+
memcpy(&pnetwork->network, target, get_wlan_bssid_ex_sz(target));
/* variable initialize */
pnetwork->fixed = false;
@@ -521,7 +499,7 @@ void rtw_update_scanned_network(struct adapter *adapter, struct wlan_bssid_ex *t
bssid_ex_sz = get_wlan_bssid_ex_sz(target);
target->Length = bssid_ex_sz;
- GetHalDefVar8188EUsb(adapter, HAL_DEF_CURRENT_ANTENNA, &target->PhyInfo.Optimum_antenna);
+ target->PhyInfo.Optimum_antenna = rtw_current_antenna(adapter);
memcpy(&pnetwork->network, target, bssid_ex_sz);
pnetwork->last_scanned = jiffies;
@@ -567,8 +545,8 @@ static void rtw_add_network(struct adapter *adapter,
/* select the desired network based on the capability of the (i)bss. */
/* check items: (1) security */
-/* (2) network_type */
-/* (3) WMM */
+/* (2) network_type */
+/* (3) WMM */
/* (4) HT */
/* (5) others */
static bool rtw_is_desired_network(struct adapter *adapter, struct wlan_network *pnetwork)
@@ -715,15 +693,12 @@ void rtw_surveydone_event_callback(struct adapter *adapter, u8 *pbuf)
set_fwstate(pmlmepriv, _FW_UNDER_LINKING);
pmlmepriv->to_join = false;
s_ret = rtw_select_and_join_from_scanned_queue(pmlmepriv);
- if (_SUCCESS == s_ret) {
- _set_timer(&pmlmepriv->assoc_timer, MAX_JOIN_TIMEOUT);
- } else if (s_ret == 2) { /* there is no need to wait for join */
- _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
- rtw_indicate_connect(adapter);
+ if (s_ret == _SUCCESS) {
+ _set_timer(&pmlmepriv->assoc_timer, MAX_JOIN_TIMEOUT);
} else {
if (rtw_to_roaming(adapter) != 0) {
if (--pmlmepriv->to_roaming == 0 ||
- _SUCCESS != rtw_sitesurvey_cmd(adapter, &pmlmepriv->assoc_ssid, 1, NULL, 0)) {
+ rtw_sitesurvey_cmd(adapter, &pmlmepriv->assoc_ssid, 1, NULL, 0) != _SUCCESS) {
rtw_set_roaming(adapter, 0);
rtw_free_assoc_resources(adapter, 1);
rtw_indicate_disconnect(adapter);
@@ -748,14 +723,6 @@ void rtw_surveydone_event_callback(struct adapter *adapter, u8 *pbuf)
rtw_os_xmit_schedule(adapter);
}
-void rtw_dummy_event_callback(struct adapter *adapter, u8 *pbuf)
-{
-}
-
-void rtw_fwdbg_event_callback(struct adapter *adapter, u8 *pbuf)
-{
-}
-
static void free_scanqueue(struct mlme_priv *pmlmepriv)
{
struct __queue *free_queue = &pmlmepriv->free_bss_pool;
@@ -911,9 +878,8 @@ static struct sta_info *rtw_joinbss_update_stainfo(struct adapter *padapter, str
memset((u8 *)&psta->dot11txpn, 0, sizeof(union pn48));
memset((u8 *)&psta->dot11rxpn, 0, sizeof(union pn48));
}
- /* Commented by Albert 2012/07/21 */
- /* When doing the WPS, the wps_ie_len won't equal to 0 */
- /* And the Wi-Fi driver shouldn't allow the data packet to be tramsmitted. */
+ /* When doing the WPS, the wps_ie_len won't equal to 0 */
+ /* And the Wi-Fi driver shouldn't allow the data packet to be transmitted. */
if (padapter->securitypriv.wps_ie_len != 0) {
psta->ieee8021x_blocked = true;
padapter->securitypriv.wps_ie_len = 0;
@@ -1071,8 +1037,10 @@ void rtw_joinbss_event_prehandle(struct adapter *adapter, u8 *pbuf)
rtw_indicate_connect(adapter);
}
+ spin_unlock_bh(&pmlmepriv->lock);
/* s5. Cancel assoc_timer */
del_timer_sync(&pmlmepriv->assoc_timer);
+ spin_lock_bh(&pmlmepriv->lock);
} else {
spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
goto ignore_joinbss_callback;
@@ -1105,6 +1073,11 @@ void rtw_joinbss_event_callback(struct adapter *adapter, u8 *pbuf)
}
+void rtw_set_max_rpt_macid(struct adapter *adapter, u8 macid)
+{
+ rtw_write8(adapter, REG_TX_RPT_CTRL + 1, macid + 1);
+}
+
static u8 search_max_mac_id(struct adapter *padapter)
{
u8 mac_id;
@@ -1141,7 +1114,8 @@ void rtw_sta_media_status_rpt(struct adapter *adapter, struct sta_info *psta,
return;
macid = search_max_mac_id(adapter);
- SetHwReg8188EU(adapter, HW_VAR_TX_RPT_MAX_MACID, (u8 *)&macid);
+ rtw_set_max_rpt_macid(adapter, macid);
+
/* MACID|OPMODE:1 connect */
media_status_rpt = (u16)((psta->mac_id << 8) | mstatus);
SetHwReg8188EU(adapter, HW_VAR_H2C_MEDIA_STATUS_RPT, (u8 *)&media_status_rpt);
@@ -1299,7 +1273,7 @@ void rtw_stadel_event_callback(struct adapter *adapter, u8 *pbuf)
}
/*
-* _rtw_join_timeout_handler - Timeout/faliure handler for CMD JoinBss
+* _rtw_join_timeout_handler - Timeout/failure handler for CMD JoinBss
* @adapter: pointer to struct adapter structure
*/
void _rtw_join_timeout_handler (struct adapter *adapter)
@@ -1310,7 +1284,7 @@ void _rtw_join_timeout_handler (struct adapter *adapter)
if (adapter->bDriverStopped || adapter->bSurpriseRemoved)
return;
- spin_lock_bh(&pmlmepriv->lock);
+ spin_lock_irq(&pmlmepriv->lock);
if (rtw_to_roaming(adapter) > 0) { /* join timeout caused by roaming */
while (1) {
@@ -1329,12 +1303,12 @@ void _rtw_join_timeout_handler (struct adapter *adapter)
rtw_indicate_disconnect(adapter);
free_scanqueue(pmlmepriv);/* */
}
- spin_unlock_bh(&pmlmepriv->lock);
+ spin_unlock_irq(&pmlmepriv->lock);
}
/*
-* rtw_scan_timeout_handler - Timeout/Faliure handler for CMD SiteSurvey
+* rtw_scan_timeout_handler - Timeout/Failure handler for CMD SiteSurvey
* @adapter: pointer to struct adapter structure
*/
void rtw_scan_timeout_handler (struct adapter *adapter)
@@ -1414,6 +1388,7 @@ static int rtw_check_join_candidate(struct mlme_priv *pmlmepriv
{
int updated = false;
struct adapter *adapter = container_of(pmlmepriv, struct adapter, mlmepriv);
+ unsigned long scan_res_expire;
/* check bssid, if needed */
if (pmlmepriv->assoc_by_bssid) {
@@ -1431,8 +1406,9 @@ static int rtw_check_join_candidate(struct mlme_priv *pmlmepriv
if (!rtw_is_desired_network(adapter, competitor))
goto exit;
+ scan_res_expire = competitor->last_scanned + msecs_to_jiffies(RTW_SCAN_RESULT_EXPIRE);
if (rtw_to_roaming(adapter) > 0) {
- if (rtw_get_passing_time_ms((u32)competitor->last_scanned) >= RTW_SCAN_RESULT_EXPIRE ||
+ if (time_after(jiffies, scan_res_expire) ||
!is_same_ess(&competitor->network, &pmlmepriv->cur_network.network))
goto exit;
}
@@ -1461,7 +1437,6 @@ int rtw_select_and_join_from_scanned_queue(struct mlme_priv *pmlmepriv)
struct __queue *queue = &pmlmepriv->scanned_queue;
struct wlan_network *pnetwork = NULL;
struct wlan_network *candidate = NULL;
- u8 supp_ant_div = false;
spin_lock_bh(&pmlmepriv->scanned_queue.lock);
phead = get_list_head(queue);
@@ -1488,12 +1463,6 @@ int rtw_select_and_join_from_scanned_queue(struct mlme_priv *pmlmepriv)
rtw_free_assoc_resources(adapter, 0);
}
- GetHalDefVar8188EUsb(adapter, HAL_DEF_IS_SUPPORT_ANT_DIV, &supp_ant_div);
- if (supp_ant_div) {
- u8 cur_ant;
- GetHalDefVar8188EUsb(adapter, HAL_DEF_CURRENT_ANTENNA, &cur_ant);
- }
-
ret = rtw_joinbss_cmd(adapter, candidate);
exit:
@@ -1509,13 +1478,13 @@ int rtw_set_auth(struct adapter *adapter, struct security_priv *psecuritypriv)
struct cmd_priv *pcmdpriv = &adapter->cmdpriv;
int res = _SUCCESS;
- pcmd = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
+ pcmd = kzalloc(sizeof(*pcmd), GFP_KERNEL);
if (!pcmd) {
res = _FAIL; /* try again */
goto exit;
}
- psetauthparm = kzalloc(sizeof(struct setauth_parm), GFP_KERNEL);
+ psetauthparm = kzalloc(sizeof(*psetauthparm), GFP_KERNEL);
if (!psetauthparm) {
kfree(pcmd);
res = _FAIL;
@@ -1628,38 +1597,22 @@ int rtw_restruct_wmm_ie(struct adapter *adapter, u8 *in_ie, u8 *out_ie, uint in_
}
/* */
-/* Ported from 8185: IsInPreAuthKeyList(). (Renamed from SecIsInPreAuthKeyList(), 2006-10-13.) */
-/* Added by Annie, 2006-05-07. */
-/* */
/* Search by BSSID, */
/* Return Value: */
-/* -1 :if there is no pre-auth key in the table */
-/* >= 0 :if there is pre-auth key, and return the entry id */
+/* -1 :if there is no pre-auth key in the table */
+/* >= 0 :if there is pre-auth key, and return the entry id */
/* */
/* */
static int SecIsInPMKIDList(struct adapter *Adapter, u8 *bssid)
{
- struct security_priv *psecuritypriv = &Adapter->securitypriv;
- int i = 0;
-
- do {
- if ((psecuritypriv->PMKIDList[i].bUsed) &&
- (!memcmp(psecuritypriv->PMKIDList[i].Bssid, bssid, ETH_ALEN))) {
- break;
- } else {
- i++;
- /* continue; */
- }
-
- } while (i < NUM_PMKID_CACHE);
+ struct security_priv *p = &Adapter->securitypriv;
+ int i;
- if (i == NUM_PMKID_CACHE) {
- i = -1;/* Could not find. */
- } else {
- /* There is one Pre-Authentication Key for the specific BSSID. */
- }
- return i;
+ for (i = 0; i < NUM_PMKID_CACHE; i++)
+ if (p->PMKIDList[i].bUsed && !memcmp(p->PMKIDList[i].Bssid, bssid, ETH_ALEN))
+ return i;
+ return -1;
}
/* */
@@ -1796,10 +1749,23 @@ void rtw_update_registrypriv_dev_network(struct adapter *adapter)
}
+static void rtw_set_threshold(struct adapter *adapter)
+{
+ struct mlme_priv *mlmepriv = &adapter->mlmepriv;
+ struct ht_priv *htpriv = &mlmepriv->htpriv;
+
+ if (htpriv->ht_option && adapter->registrypriv.wifi_spec != 1) {
+ /* validate usb rx aggregation, use init value. */
+ rtw_write8(adapter, REG_RXDMA_AGG_PG_TH, USB_RXAGG_PAGE_COUNT);
+ } else {
+ /* invalidate usb rx aggregation */
+ rtw_write8(adapter, REG_RXDMA_AGG_PG_TH, 1);
+ }
+}
+
/* the function is at passive_level */
void rtw_joinbss_reset(struct adapter *padapter)
{
- u8 threshold;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct ht_priv *phtpriv = &pmlmepriv->htpriv;
@@ -1810,18 +1776,7 @@ void rtw_joinbss_reset(struct adapter *padapter)
phtpriv->ampdu_enable = false;/* reset to disabled */
- /* TH = 1 => means that invalidate usb rx aggregation */
- /* TH = 0 => means that validate usb rx aggregation, use init value. */
- if (phtpriv->ht_option) {
- if (padapter->registrypriv.wifi_spec == 1)
- threshold = 1;
- else
- threshold = 0;
- SetHwReg8188EU(padapter, HW_VAR_RXDMA_AGG_PG_TH, (u8 *)(&threshold));
- } else {
- threshold = 1;
- SetHwReg8188EU(padapter, HW_VAR_RXDMA_AGG_PG_TH, (u8 *)(&threshold));
- }
+ rtw_set_threshold(padapter);
}
/* the function is >= passive_level */
@@ -1984,7 +1939,7 @@ void rtw_issue_addbareq_cmd(struct adapter *padapter, struct xmit_frame *pxmitfr
issued = (phtpriv->agg_enable_bitmap >> priority) & 0x1;
issued |= (phtpriv->candidate_tid_bitmap >> priority) & 0x1;
- if (0 == issued) {
+ if (issued == 0) {
psta->htpriv.candidate_tid_bitmap |= BIT((u8)priority);
rtw_addbareq_cmd(padapter, (u8)priority, pattrib->ra);
}
@@ -2011,19 +1966,19 @@ void _rtw_roaming(struct adapter *padapter, struct wlan_network *tgt_network)
else
pnetwork = &pmlmepriv->cur_network;
- if (0 < rtw_to_roaming(padapter)) {
+ if (rtw_to_roaming(padapter) > 0) {
memcpy(&pmlmepriv->assoc_ssid, &pnetwork->network.Ssid, sizeof(struct ndis_802_11_ssid));
pmlmepriv->assoc_by_bssid = false;
while (1) {
do_join_r = rtw_do_join(padapter);
- if (_SUCCESS == do_join_r) {
+ if (do_join_r == _SUCCESS) {
break;
} else {
pmlmepriv->to_roaming--;
- if (0 < pmlmepriv->to_roaming) {
+ if (pmlmepriv->to_roaming > 0) {
continue;
} else {
rtw_indicate_disconnect(padapter);
diff --git a/drivers/staging/r8188eu/core/rtw_mlme_ext.c b/drivers/staging/r8188eu/core/rtw_mlme_ext.c
index 10d5f1222936..faf23fc950c5 100644
--- a/drivers/staging/r8188eu/core/rtw_mlme_ext.c
+++ b/drivers/staging/r8188eu/core/rtw_mlme_ext.c
@@ -14,39 +14,22 @@
#include "../include/rtl8188e_xmit.h"
#include "../include/rtl8188e_dm.h"
-static struct mlme_handler mlme_sta_tbl[] = {
- {WIFI_ASSOCREQ, "OnAssocReq", &OnAssocReq},
- {WIFI_ASSOCRSP, "OnAssocRsp", &OnAssocRsp},
- {WIFI_REASSOCREQ, "OnReAssocReq", &OnAssocReq},
- {WIFI_REASSOCRSP, "OnReAssocRsp", &OnAssocRsp},
- {WIFI_PROBEREQ, "OnProbeReq", &OnProbeReq},
- {WIFI_PROBERSP, "OnProbeRsp", &OnProbeRsp},
-
- /*----------------------------------------------------------
- below 2 are reserved
- -----------------------------------------------------------*/
- {0, "DoReserved", &DoReserved},
- {0, "DoReserved", &DoReserved},
- {WIFI_BEACON, "OnBeacon", &OnBeacon},
- {WIFI_ATIM, "OnATIM", &OnAtim},
- {WIFI_DISASSOC, "OnDisassoc", &OnDisassoc},
- {WIFI_AUTH, "OnAuth", &OnAuthClient},
- {WIFI_DEAUTH, "OnDeAuth", &OnDeAuth},
- {WIFI_ACTION, "OnAction", &OnAction},
-};
-
-static struct action_handler OnAction_tbl[] = {
- {RTW_WLAN_CATEGORY_SPECTRUM_MGMT, "ACTION_SPECTRUM_MGMT", on_action_spct},
- {RTW_WLAN_CATEGORY_QOS, "ACTION_QOS", &OnAction_qos},
- {RTW_WLAN_CATEGORY_DLS, "ACTION_DLS", &OnAction_dls},
- {RTW_WLAN_CATEGORY_BACK, "ACTION_BACK", &OnAction_back},
- {RTW_WLAN_CATEGORY_PUBLIC, "ACTION_PUBLIC", on_action_public},
- {RTW_WLAN_CATEGORY_RADIO_MEASUREMENT, "ACTION_RADIO_MEASUREMENT", &DoReserved},
- {RTW_WLAN_CATEGORY_FT, "ACTION_FT", &DoReserved},
- {RTW_WLAN_CATEGORY_HT, "ACTION_HT", &OnAction_ht},
- {RTW_WLAN_CATEGORY_SA_QUERY, "ACTION_SA_QUERY", &DoReserved},
- {RTW_WLAN_CATEGORY_WMM, "ACTION_WMM", &OnAction_wmm},
- {RTW_WLAN_CATEGORY_P2P, "ACTION_P2P", &OnAction_p2p},
+/* response function for each management frame subtype, do not reorder */
+static mlme_handler mlme_sta_tbl[] = {
+ OnAssocReq,
+ OnAssocRsp,
+ OnAssocReq,
+ OnAssocRsp,
+ OnProbeReq,
+ OnProbeRsp,
+ NULL,
+ NULL,
+ OnBeacon,
+ NULL,
+ OnDisassoc,
+ OnAuthClient,
+ OnDeAuth,
+ OnAction,
};
static u8 null_addr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
@@ -71,7 +54,6 @@ extern unsigned char REALTEK_96B_IE[];
/********************************************************
MCS rate definitions
*********************************************************/
-unsigned char MCS_rate_2R[16] = {0xff, 0xff, 0x0, 0x0, 0x01, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
unsigned char MCS_rate_1R[16] = {0xff, 0x00, 0x0, 0x0, 0x01, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
/********************************************************
@@ -287,11 +269,11 @@ static void init_channel_list(struct adapter *padapter, struct rt_channel_info *
continue;
}
- if ((0 == padapter->registrypriv.ht_enable) && (8 == o->inc))
+ if ((padapter->registrypriv.ht_enable == 0) && (o->inc == 8))
continue;
- if ((0 == (padapter->registrypriv.cbw40_enable & BIT(1))) &&
- ((BW40MINUS == o->bw) || (BW40PLUS == o->bw)))
+ if (((padapter->registrypriv.cbw40_enable & BIT(1)) == 0) &&
+ ((o->bw == BW40MINUS) || (o->bw == BW40PLUS)))
continue;
if (!reg) {
@@ -320,7 +302,7 @@ static u8 init_channel_set(struct adapter *padapter, u8 ChannelPlan, struct rt_c
if (padapter->registrypriv.wireless_mode & WIRELESS_11G) {
b2_4GBand = true;
- if (RT_CHANNEL_DOMAIN_REALTEK_DEFINE == ChannelPlan)
+ if (ChannelPlan == RT_CHANNEL_DOMAIN_REALTEK_DEFINE)
Index2G = RTW_CHANNEL_PLAN_MAP_REALTEK_DEFINE.Index2G;
else
Index2G = RTW_ChannelPlanMap[ChannelPlan].Index2G;
@@ -330,14 +312,14 @@ static u8 init_channel_set(struct adapter *padapter, u8 ChannelPlan, struct rt_c
for (index = 0; index < RTW_ChannelPlan2G[Index2G].Len; index++) {
channel_set[chanset_size].ChannelNum = RTW_ChannelPlan2G[Index2G].Channel[index];
- if ((RT_CHANNEL_DOMAIN_GLOBAL_DOAMIN == ChannelPlan) ||/* Channel 1~11 is active, and 12~14 is passive */
- (RT_CHANNEL_DOMAIN_GLOBAL_DOAMIN_2G == ChannelPlan)) {
+ if ((ChannelPlan == RT_CHANNEL_DOMAIN_GLOBAL_DOAMIN) ||/* Channel 1~11 is active, and 12~14 is passive */
+ (ChannelPlan == RT_CHANNEL_DOMAIN_GLOBAL_DOAMIN_2G)) {
if (channel_set[chanset_size].ChannelNum >= 1 && channel_set[chanset_size].ChannelNum <= 11)
channel_set[chanset_size].ScanType = SCAN_ACTIVE;
else if ((channel_set[chanset_size].ChannelNum >= 12 && channel_set[chanset_size].ChannelNum <= 14))
channel_set[chanset_size].ScanType = SCAN_PASSIVE;
- } else if (RT_CHANNEL_DOMAIN_WORLD_WIDE_13 == ChannelPlan ||
- RT_CHANNEL_DOMAIN_2G_WORLD == Index2G) {/* channel 12~13, passive scan */
+ } else if (ChannelPlan == RT_CHANNEL_DOMAIN_WORLD_WIDE_13 ||
+ Index2G == RT_CHANNEL_DOMAIN_2G_WORLD) {/* channel 12~13, passive scan */
if (channel_set[chanset_size].ChannelNum <= 11)
channel_set[chanset_size].ScanType = SCAN_ACTIVE;
else
@@ -352,9 +334,8 @@ static u8 init_channel_set(struct adapter *padapter, u8 ChannelPlan, struct rt_c
return chanset_size;
}
-int init_mlme_ext_priv(struct adapter *padapter)
+void init_mlme_ext_priv(struct adapter *padapter)
{
- int res = _SUCCESS;
struct registry_priv *pregistrypriv = &padapter->registrypriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
@@ -376,8 +357,6 @@ int init_mlme_ext_priv(struct adapter *padapter)
pmlmeext->mlmeext_init = true;
pmlmeext->active_keep_alive_check = true;
-
- return res;
}
void free_mlme_ext_priv(struct mlme_ext_priv *pmlmeext)
@@ -394,45 +373,29 @@ void free_mlme_ext_priv(struct mlme_ext_priv *pmlmeext)
}
}
-static void _mgt_dispatcher(struct adapter *padapter, struct mlme_handler *ptable, struct recv_frame *precv_frame)
-{
- u8 *pframe = precv_frame->rx_data;
-
- if (ptable->func) {
- /* receive the frames that ra(a1) is my address or ra(a1) is bc address. */
- if (memcmp(GetAddr1Ptr(pframe), myid(&padapter->eeprompriv), ETH_ALEN) &&
- !is_broadcast_ether_addr(GetAddr1Ptr(pframe)))
- return;
- ptable->func(padapter, precv_frame);
- }
-}
-
void mgt_dispatcher(struct adapter *padapter, struct recv_frame *precv_frame)
{
int index;
- struct mlme_handler *ptable;
+ mlme_handler fct;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- u8 *pframe = precv_frame->rx_data;
- struct sta_info *psta = rtw_get_stainfo(&padapter->stapriv, GetAddr2Ptr(pframe));
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)precv_frame->rx_data;
+ struct sta_info *psta = rtw_get_stainfo(&padapter->stapriv, hdr->addr2);
- if (GetFrameType(pframe) != IEEE80211_FTYPE_MGMT)
+ if (!ieee80211_is_mgmt(hdr->frame_control))
return;
/* receive the frames that ra(a1) is my address or ra(a1) is bc address. */
- if (memcmp(GetAddr1Ptr(pframe), myid(&padapter->eeprompriv), ETH_ALEN) &&
- !is_broadcast_ether_addr(GetAddr1Ptr(pframe)))
+ if (memcmp(hdr->addr1, myid(&padapter->eeprompriv), ETH_ALEN) &&
+ !is_broadcast_ether_addr(hdr->addr1))
return;
- ptable = mlme_sta_tbl;
-
- index = GetFrameSubType(pframe) >> 4;
-
- if (index > 13)
+ index = (le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_STYPE) >> 4;
+ if (index >= ARRAY_SIZE(mlme_sta_tbl))
return;
- ptable += index;
+ fct = mlme_sta_tbl[index];
if (psta) {
- if (GetRetry(pframe)) {
+ if (ieee80211_has_retry(hdr->frame_control)) {
if (precv_frame->attrib.seq_num == psta->RxMgmtFrameSeqNum)
/* drop the duplicate management frame */
return;
@@ -440,13 +403,15 @@ void mgt_dispatcher(struct adapter *padapter, struct recv_frame *precv_frame)
psta->RxMgmtFrameSeqNum = precv_frame->attrib.seq_num;
}
- if (GetFrameSubType(pframe) == WIFI_AUTH) {
+ if (ieee80211_is_auth(hdr->frame_control)) {
if (check_fwstate(pmlmepriv, WIFI_AP_STATE))
- ptable->func = &OnAuth;
+ fct = OnAuth;
else
- ptable->func = &OnAuthClient;
+ fct = OnAuthClient;
}
- _mgt_dispatcher(padapter, ptable, precv_frame);
+
+ if (fct)
+ fct(padapter, precv_frame);
}
static u32 p2p_listen_state_process(struct adapter *padapter, unsigned char *da)
@@ -482,7 +447,6 @@ unsigned int OnProbeReq(struct adapter *padapter, struct recv_frame *precv_frame
u8 is_valid_p2p_probereq = false;
struct wifidirect_info *pwdinfo = &padapter->wdinfo;
- u8 wifi_test_chk_rate = 1;
if (!rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE) &&
!rtw_p2p_chk_state(pwdinfo, P2P_STATE_IDLE) &&
@@ -497,25 +461,18 @@ unsigned int OnProbeReq(struct adapter *padapter, struct recv_frame *precv_frame
/* Commented by Kurt 2012/10/16 */
/* IOT issue: Google Nexus7 use 1M rate to send p2p_probe_req after GO nego completed and Nexus7 is client */
- if (wifi_test_chk_rate == 1) {
- is_valid_p2p_probereq = process_probe_req_p2p_ie(pwdinfo, pframe, len);
- if (is_valid_p2p_probereq) {
- if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_DEVICE)) {
- /* FIXME */
- report_survey_event(padapter, precv_frame);
- p2p_listen_state_process(padapter, get_sa(pframe));
-
- return _SUCCESS;
- }
-
- if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO))
- goto _continue;
+ is_valid_p2p_probereq = process_probe_req_p2p_ie(pwdinfo, pframe, len);
+ if (is_valid_p2p_probereq) {
+ if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_DEVICE)) {
+ /* FIXME */
+ report_survey_event(padapter, precv_frame);
+ p2p_listen_state_process(padapter, get_sa(pframe));
+
+ return _SUCCESS;
}
}
}
-_continue:
-
if (check_fwstate(pmlmepriv, WIFI_STATION_STATE))
return _SUCCESS;
@@ -622,7 +579,7 @@ unsigned int OnBeacon(struct adapter *padapter, struct recv_frame *precv_frame)
}
/* check the vendor of the assoc AP */
- pmlmeinfo->assoc_AP_vendor = check_assoc_AP(pframe + sizeof(struct rtw_ieee80211_hdr_3addr), len - sizeof(struct rtw_ieee80211_hdr_3addr));
+ pmlmeinfo->assoc_AP_vendor = check_assoc_AP(pframe + sizeof(struct ieee80211_hdr_3addr), len - sizeof(struct ieee80211_hdr_3addr));
/* update TSF Value */
update_TSF(pmlmeext, pframe, len);
@@ -988,7 +945,7 @@ unsigned int OnAssocReq(struct adapter *padapter, struct recv_frame *precv_frame
status = _STATS_FAILURE_;
}
- if (_STATS_SUCCESSFUL_ != status)
+ if (status != _STATS_SUCCESSFUL_)
goto OnAssocReqFail;
/* check if the supported rate is ok */
@@ -1077,7 +1034,7 @@ unsigned int OnAssocReq(struct adapter *padapter, struct recv_frame *precv_frame
wpa_ie_len = 0;
}
- if (_STATS_SUCCESSFUL_ != status)
+ if (status != _STATS_SUCCESSFUL_)
goto OnAssocReqFail;
pstat->flags &= ~(WLAN_STA_WPS | WLAN_STA_MAYBE_WPS);
@@ -1272,7 +1229,7 @@ unsigned int OnAssocReq(struct adapter *padapter, struct recv_frame *precv_frame
spin_unlock_bh(&pstapriv->asoc_list_lock);
/* now the station is qualified to join our BSS... */
- if (pstat && (pstat->state & WIFI_FW_ASSOC_SUCCESS) && (_STATS_SUCCESSFUL_ == status)) {
+ if (pstat && (pstat->state & WIFI_FW_ASSOC_SUCCESS) && (status == _STATS_SUCCESSFUL_)) {
/* 1 bss_cap_update & sta_info_update */
bss_cap_update_on_sta_join(padapter, pstat);
sta_info_update(padapter, pstat);
@@ -1315,7 +1272,6 @@ unsigned int OnAssocRsp(struct adapter *padapter, struct recv_frame *precv_frame
int res;
unsigned short status;
struct ndis_802_11_var_ie *pIE;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
/* struct wlan_bssid_ex *cur_network = &(pmlmeinfo->network); */
@@ -1386,11 +1342,6 @@ unsigned int OnAssocRsp(struct adapter *padapter, struct recv_frame *precv_frame
UpdateBrateTbl(padapter, pmlmeinfo->network.SupportedRates);
report_assoc_result:
- if (res > 0)
- rtw_buf_update(&pmlmepriv->assoc_rsp, &pmlmepriv->assoc_rsp_len, pframe, pkt_len);
- else
- kfree(pmlmepriv->assoc_rsp);
-
report_join_res(padapter, res);
return _SUCCESS;
@@ -1448,7 +1399,7 @@ unsigned int OnDeAuth(struct adapter *padapter, struct recv_frame *precv_frame)
(pmlmeinfo->state & WIFI_FW_ASSOC_STATE)) {
if (reason == WLAN_REASON_CLASS2_FRAME_FROM_NONAUTH_STA) {
ignore_received_deauth = 1;
- } else if (WLAN_REASON_PREV_AUTH_NOT_VALID == reason) {
+ } else if (reason == WLAN_REASON_PREV_AUTH_NOT_VALID) {
// TODO: 802.11r
ignore_received_deauth = 1;
}
@@ -1508,126 +1459,76 @@ unsigned int OnDisassoc(struct adapter *padapter, struct recv_frame *precv_frame
return _SUCCESS;
}
-unsigned int OnAtim(struct adapter *padapter, struct recv_frame *precv_frame)
-{
- return _SUCCESS;
-}
-
-unsigned int on_action_spct(struct adapter *padapter, struct recv_frame *precv_frame)
-{
- unsigned int ret = _FAIL;
- struct sta_info *psta = NULL;
- struct sta_priv *pstapriv = &padapter->stapriv;
- u8 *pframe = precv_frame->rx_data;
- u8 *frame_body = (u8 *)(pframe + sizeof(struct rtw_ieee80211_hdr_3addr));
- u8 category;
- u8 action;
-
- psta = rtw_get_stainfo(pstapriv, GetAddr2Ptr(pframe));
-
- if (!psta)
- goto exit;
-
- category = frame_body[0];
- if (category != RTW_WLAN_CATEGORY_SPECTRUM_MGMT)
- goto exit;
-
- action = frame_body[1];
- switch (action) {
- case RTW_WLAN_ACTION_SPCT_MSR_REQ:
- case RTW_WLAN_ACTION_SPCT_MSR_RPRT:
- case RTW_WLAN_ACTION_SPCT_TPC_REQ:
- case RTW_WLAN_ACTION_SPCT_TPC_RPRT:
- break;
- case RTW_WLAN_ACTION_SPCT_CHL_SWITCH:
- break;
- default:
- break;
- }
-
-exit:
- return ret;
-}
-
-unsigned int OnAction_qos(struct adapter *padapter, struct recv_frame *precv_frame)
-{
- return _SUCCESS;
-}
-
-unsigned int OnAction_dls(struct adapter *padapter, struct recv_frame *precv_frame)
-{
- return _SUCCESS;
-}
-
unsigned int OnAction_back(struct adapter *padapter, struct recv_frame *precv_frame)
{
- u8 *addr;
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)precv_frame->rx_data;
struct sta_info *psta = NULL;
struct recv_reorder_ctrl *preorder_ctrl;
unsigned char *frame_body;
- unsigned char category, action;
- unsigned short tid, status;
+ unsigned short tid;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
u8 *pframe = precv_frame->rx_data;
struct sta_priv *pstapriv = &padapter->stapriv;
/* check RA matches or not */
- if (memcmp(myid(&padapter->eeprompriv), GetAddr1Ptr(pframe), ETH_ALEN))/* for if1, sta/ap mode */
+ if (memcmp(myid(&padapter->eeprompriv), mgmt->da, ETH_ALEN))/* for if1, sta/ap mode */
return _SUCCESS;
if ((pmlmeinfo->state & 0x03) != WIFI_FW_AP_STATE)
if (!(pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS))
return _SUCCESS;
- addr = GetAddr2Ptr(pframe);
- psta = rtw_get_stainfo(pstapriv, addr);
+ psta = rtw_get_stainfo(pstapriv, mgmt->sa);
if (!psta)
return _SUCCESS;
- frame_body = (unsigned char *)(pframe + sizeof(struct rtw_ieee80211_hdr_3addr));
+ frame_body = (unsigned char *)(pframe + sizeof(struct ieee80211_hdr_3addr));
- category = frame_body[0];
- if (category == RTW_WLAN_CATEGORY_BACK) { /* representing Block Ack */
- if (!pmlmeinfo->HT_enable)
- return _SUCCESS;
- action = frame_body[1];
- switch (action) {
- case RTW_WLAN_ACTION_ADDBA_REQ: /* ADDBA request */
- memcpy(&pmlmeinfo->ADDBA_req, &frame_body[2], sizeof(struct ADDBA_request));
- process_addba_req(padapter, (u8 *)&pmlmeinfo->ADDBA_req, addr);
-
- if (pmlmeinfo->bAcceptAddbaReq)
- issue_action_BA(padapter, addr, RTW_WLAN_ACTION_ADDBA_RESP, 0);
- else
- issue_action_BA(padapter, addr, RTW_WLAN_ACTION_ADDBA_RESP, 37);/* reject ADDBA Req */
- break;
- case RTW_WLAN_ACTION_ADDBA_RESP: /* ADDBA response */
- status = get_unaligned_le16(&frame_body[3]);
- tid = ((frame_body[5] >> 2) & 0x7);
- if (status == 0) { /* successful */
- psta->htpriv.agg_enable_bitmap |= 1 << tid;
- psta->htpriv.candidate_tid_bitmap &= ~BIT(tid);
- } else {
- psta->htpriv.agg_enable_bitmap &= ~BIT(tid);
- }
- break;
- case RTW_WLAN_ACTION_DELBA: /* DELBA */
- if ((frame_body[3] & BIT(3)) == 0) {
- psta->htpriv.agg_enable_bitmap &= ~(1 << ((frame_body[3] >> 4) & 0xf));
- psta->htpriv.candidate_tid_bitmap &= ~(1 << ((frame_body[3] >> 4) & 0xf));
- } else if ((frame_body[3] & BIT(3)) == BIT(3)) {
- tid = (frame_body[3] >> 4) & 0x0F;
- preorder_ctrl = &psta->recvreorder_ctrl[tid];
- preorder_ctrl->enable = false;
- preorder_ctrl->indicate_seq = 0xffff;
- }
- /* todo: how to notify the host while receiving DELETE BA */
- break;
- default:
- break;
+ if (!pmlmeinfo->HT_enable)
+ return _SUCCESS;
+ /* All union members start with an action code, it's ok to use addba_req. */
+ switch (mgmt->u.action.u.addba_req.action_code) {
+ case WLAN_ACTION_ADDBA_REQ:
+ memcpy(&pmlmeinfo->ADDBA_req, &frame_body[2], sizeof(struct ADDBA_request));
+ tid = u16_get_bits(le16_to_cpu(mgmt->u.action.u.addba_req.capab),
+ IEEE80211_ADDBA_PARAM_TID_MASK);
+ preorder_ctrl = &psta->recvreorder_ctrl[tid];
+ preorder_ctrl->indicate_seq = 0xffff;
+ preorder_ctrl->enable = pmlmeinfo->bAcceptAddbaReq;
+
+ issue_action_BA(padapter, mgmt->sa, WLAN_ACTION_ADDBA_RESP,
+ pmlmeinfo->bAcceptAddbaReq ?
+ WLAN_STATUS_SUCCESS : WLAN_STATUS_REQUEST_DECLINED);
+ break;
+ case WLAN_ACTION_ADDBA_RESP:
+ tid = u16_get_bits(le16_to_cpu(mgmt->u.action.u.addba_resp.capab),
+ IEEE80211_ADDBA_PARAM_TID_MASK);
+ if (mgmt->u.action.u.addba_resp.status == 0) { /* successful */
+ psta->htpriv.agg_enable_bitmap |= BIT(tid);
+ psta->htpriv.candidate_tid_bitmap &= ~BIT(tid);
+ } else {
+ psta->htpriv.agg_enable_bitmap &= ~BIT(tid);
+ }
+ break;
+ case WLAN_ACTION_DELBA:
+ tid = u16_get_bits(le16_to_cpu(mgmt->u.action.u.delba.params),
+ IEEE80211_DELBA_PARAM_TID_MASK);
+ if (u16_get_bits(le16_to_cpu(mgmt->u.action.u.delba.params),
+ IEEE80211_DELBA_PARAM_INITIATOR_MASK) == WLAN_BACK_RECIPIENT) {
+ psta->htpriv.agg_enable_bitmap &= ~BIT(tid);
+ psta->htpriv.candidate_tid_bitmap &= ~BIT(tid);
+ } else {
+ preorder_ctrl = &psta->recvreorder_ctrl[tid];
+ preorder_ctrl->enable = false;
+ preorder_ctrl->indicate_seq = 0xffff;
}
+ /* todo: how to notify the host while receiving DELETE BA */
+ break;
+ default:
+ break;
}
+
return _SUCCESS;
}
@@ -1645,7 +1546,7 @@ static int get_reg_classes_full_count(struct p2p_channels *channel_list)
void issue_p2p_GO_request(struct adapter *padapter, u8 *raddr)
{
- unsigned char category = RTW_WLAN_CATEGORY_PUBLIC;
+ unsigned char category = WLAN_CATEGORY_PUBLIC;
u8 action = P2P_PUB_ACTION_ACTION;
__be32 p2poui = cpu_to_be32(P2POUI);
u8 oui_subtype = P2P_GO_NEGO_REQ;
@@ -1655,7 +1556,7 @@ void issue_p2p_GO_request(struct adapter *padapter, u8 *raddr)
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
unsigned char *pframe;
- struct rtw_ieee80211_hdr *pwlanhdr;
+ struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
@@ -1672,9 +1573,9 @@ void issue_p2p_GO_request(struct adapter *padapter, u8 *raddr)
memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
- fctrl = &pwlanhdr->frame_ctl;
+ fctrl = &pwlanhdr->frame_control;
*(fctrl) = 0;
memcpy(pwlanhdr->addr1, raddr, ETH_ALEN);
@@ -1685,8 +1586,8 @@ void issue_p2p_GO_request(struct adapter *padapter, u8 *raddr)
pmlmeext->mgnt_seq++;
SetFrameSubType(pframe, WIFI_ACTION);
- pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
- pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
pframe = rtw_set_fixed_ie(pframe, 1, &category, &pattrib->pktlen);
pframe = rtw_set_fixed_ie(pframe, 1, &action, &pattrib->pktlen);
@@ -1975,7 +1876,7 @@ void issue_p2p_GO_request(struct adapter *padapter, u8 *raddr)
static void issue_p2p_GO_response(struct adapter *padapter, u8 *raddr, u8 *frame_body, uint len, u8 result)
{
- unsigned char category = RTW_WLAN_CATEGORY_PUBLIC;
+ unsigned char category = WLAN_CATEGORY_PUBLIC;
u8 action = P2P_PUB_ACTION_ACTION;
__be32 p2poui = cpu_to_be32(P2POUI);
u8 oui_subtype = P2P_GO_NEGO_RESP;
@@ -1990,7 +1891,7 @@ static void issue_p2p_GO_response(struct adapter *padapter, u8 *raddr, u8 *frame
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
unsigned char *pframe;
- struct rtw_ieee80211_hdr *pwlanhdr;
+ struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
@@ -2007,9 +1908,9 @@ static void issue_p2p_GO_response(struct adapter *padapter, u8 *raddr, u8 *frame
memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
- fctrl = &pwlanhdr->frame_ctl;
+ fctrl = &pwlanhdr->frame_control;
*(fctrl) = 0;
memcpy(pwlanhdr->addr1, raddr, ETH_ALEN);
@@ -2020,8 +1921,8 @@ static void issue_p2p_GO_response(struct adapter *padapter, u8 *raddr, u8 *frame
pmlmeext->mgnt_seq++;
SetFrameSubType(pframe, WIFI_ACTION);
- pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
- pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
pframe = rtw_set_fixed_ie(pframe, 1, &category, &pattrib->pktlen);
pframe = rtw_set_fixed_ie(pframe, 1, &action, &pattrib->pktlen);
@@ -2337,7 +2238,7 @@ static void issue_p2p_GO_response(struct adapter *padapter, u8 *raddr, u8 *frame
static void issue_p2p_GO_confirm(struct adapter *padapter, u8 *raddr, u8 result)
{
- unsigned char category = RTW_WLAN_CATEGORY_PUBLIC;
+ unsigned char category = WLAN_CATEGORY_PUBLIC;
u8 action = P2P_PUB_ACTION_ACTION;
__be32 p2poui = cpu_to_be32(P2POUI);
u8 oui_subtype = P2P_GO_NEGO_CONF;
@@ -2347,7 +2248,7 @@ static void issue_p2p_GO_confirm(struct adapter *padapter, u8 *raddr, u8 result)
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
unsigned char *pframe;
- struct rtw_ieee80211_hdr *pwlanhdr;
+ struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
@@ -2364,9 +2265,9 @@ static void issue_p2p_GO_confirm(struct adapter *padapter, u8 *raddr, u8 result)
memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
- fctrl = &pwlanhdr->frame_ctl;
+ fctrl = &pwlanhdr->frame_control;
*(fctrl) = 0;
memcpy(pwlanhdr->addr1, raddr, ETH_ALEN);
@@ -2377,8 +2278,8 @@ static void issue_p2p_GO_confirm(struct adapter *padapter, u8 *raddr, u8 result)
pmlmeext->mgnt_seq++;
SetFrameSubType(pframe, WIFI_ACTION);
- pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
- pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
pframe = rtw_set_fixed_ie(pframe, 1, &category, &pattrib->pktlen);
pframe = rtw_set_fixed_ie(pframe, 1, &action, &pattrib->pktlen);
@@ -2498,7 +2399,7 @@ static void issue_p2p_GO_confirm(struct adapter *padapter, u8 *raddr, u8 result)
void issue_p2p_invitation_request(struct adapter *padapter, u8 *raddr)
{
- unsigned char category = RTW_WLAN_CATEGORY_PUBLIC;
+ unsigned char category = WLAN_CATEGORY_PUBLIC;
u8 action = P2P_PUB_ACTION_ACTION;
__be32 p2poui = cpu_to_be32(P2POUI);
u8 oui_subtype = P2P_INVIT_REQ;
@@ -2509,7 +2410,7 @@ void issue_p2p_invitation_request(struct adapter *padapter, u8 *raddr)
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
unsigned char *pframe;
- struct rtw_ieee80211_hdr *pwlanhdr;
+ struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
@@ -2526,9 +2427,9 @@ void issue_p2p_invitation_request(struct adapter *padapter, u8 *raddr)
memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
- fctrl = &pwlanhdr->frame_ctl;
+ fctrl = &pwlanhdr->frame_control;
*(fctrl) = 0;
memcpy(pwlanhdr->addr1, raddr, ETH_ALEN);
@@ -2539,8 +2440,8 @@ void issue_p2p_invitation_request(struct adapter *padapter, u8 *raddr)
pmlmeext->mgnt_seq++;
SetFrameSubType(pframe, WIFI_ACTION);
- pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
- pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
pframe = rtw_set_fixed_ie(pframe, 1, &category, &pattrib->pktlen);
pframe = rtw_set_fixed_ie(pframe, 1, &action, &pattrib->pktlen);
@@ -2745,7 +2646,7 @@ void issue_p2p_invitation_request(struct adapter *padapter, u8 *raddr)
void issue_p2p_invitation_response(struct adapter *padapter, u8 *raddr, u8 dialogToken, u8 status_code)
{
- unsigned char category = RTW_WLAN_CATEGORY_PUBLIC;
+ unsigned char category = WLAN_CATEGORY_PUBLIC;
u8 action = P2P_PUB_ACTION_ACTION;
__be32 p2poui = cpu_to_be32(P2POUI);
u8 oui_subtype = P2P_INVIT_RESP;
@@ -2755,7 +2656,7 @@ void issue_p2p_invitation_response(struct adapter *padapter, u8 *raddr, u8 dialo
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
unsigned char *pframe;
- struct rtw_ieee80211_hdr *pwlanhdr;
+ struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
@@ -2772,9 +2673,9 @@ void issue_p2p_invitation_response(struct adapter *padapter, u8 *raddr, u8 dialo
memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
- fctrl = &pwlanhdr->frame_ctl;
+ fctrl = &pwlanhdr->frame_control;
*(fctrl) = 0;
memcpy(pwlanhdr->addr1, raddr, ETH_ALEN);
@@ -2785,8 +2686,8 @@ void issue_p2p_invitation_response(struct adapter *padapter, u8 *raddr, u8 dialo
pmlmeext->mgnt_seq++;
SetFrameSubType(pframe, WIFI_ACTION);
- pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
- pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
pframe = rtw_set_fixed_ie(pframe, 1, &category, &pattrib->pktlen);
pframe = rtw_set_fixed_ie(pframe, 1, &action, &pattrib->pktlen);
@@ -2935,7 +2836,7 @@ void issue_p2p_invitation_response(struct adapter *padapter, u8 *raddr, u8 dialo
void issue_p2p_provision_request(struct adapter *padapter, u8 *pssid, u8 ussidlen, u8 *pdev_raddr)
{
- unsigned char category = RTW_WLAN_CATEGORY_PUBLIC;
+ unsigned char category = WLAN_CATEGORY_PUBLIC;
u8 action = P2P_PUB_ACTION_ACTION;
u8 dialogToken = 1;
u8 oui_subtype = P2P_PROVISION_DISC_REQ;
@@ -2946,7 +2847,7 @@ void issue_p2p_provision_request(struct adapter *padapter, u8 *pssid, u8 ussidle
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
unsigned char *pframe;
- struct rtw_ieee80211_hdr *pwlanhdr;
+ struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
@@ -2963,9 +2864,9 @@ void issue_p2p_provision_request(struct adapter *padapter, u8 *pssid, u8 ussidle
memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
- fctrl = &pwlanhdr->frame_ctl;
+ fctrl = &pwlanhdr->frame_control;
*(fctrl) = 0;
memcpy(pwlanhdr->addr1, pdev_raddr, ETH_ALEN);
@@ -2976,8 +2877,8 @@ void issue_p2p_provision_request(struct adapter *padapter, u8 *pssid, u8 ussidle
pmlmeext->mgnt_seq++;
SetFrameSubType(pframe, WIFI_ACTION);
- pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
- pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
pframe = rtw_set_fixed_ie(pframe, 1, &category, &pattrib->pktlen);
pframe = rtw_set_fixed_ie(pframe, 1, &action, &pattrib->pktlen);
@@ -3045,7 +2946,7 @@ void issue_probersp_p2p(struct adapter *padapter, unsigned char *da)
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
unsigned char *pframe;
- struct rtw_ieee80211_hdr *pwlanhdr;
+ struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
unsigned char *mac;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
@@ -3067,11 +2968,11 @@ void issue_probersp_p2p(struct adapter *padapter, unsigned char *da)
memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
mac = myid(&padapter->eeprompriv);
- fctrl = &pwlanhdr->frame_ctl;
+ fctrl = &pwlanhdr->frame_control;
*(fctrl) = 0;
memcpy(pwlanhdr->addr1, da, ETH_ALEN);
memcpy(pwlanhdr->addr2, mac, ETH_ALEN);
@@ -3083,7 +2984,7 @@ void issue_probersp_p2p(struct adapter *padapter, unsigned char *da)
pmlmeext->mgnt_seq++;
SetFrameSubType(fctrl, WIFI_PROBERSP);
- pattrib->hdrlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ pattrib->hdrlen = sizeof(struct ieee80211_hdr_3addr);
pattrib->pktlen = pattrib->hdrlen;
pframe += pattrib->hdrlen;
@@ -3291,7 +3192,7 @@ static int _issue_probereq_p2p(struct adapter *padapter, u8 *da, int wait_ack)
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
unsigned char *pframe;
- struct rtw_ieee80211_hdr *pwlanhdr;
+ struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
unsigned char *mac;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
@@ -3312,11 +3213,11 @@ static int _issue_probereq_p2p(struct adapter *padapter, u8 *da, int wait_ack)
memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
mac = myid(&padapter->eeprompriv);
- fctrl = &pwlanhdr->frame_ctl;
+ fctrl = &pwlanhdr->frame_control;
*(fctrl) = 0;
if (da) {
@@ -3339,8 +3240,8 @@ static int _issue_probereq_p2p(struct adapter *padapter, u8 *da, int wait_ack)
pmlmeext->mgnt_seq++;
SetFrameSubType(pframe, WIFI_PROBEREQ);
- pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
- pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_TX_PROVISION_DIS_REQ))
pframe = rtw_set_ie(pframe, _SSID_IE_, pwdinfo->tx_prov_disc_info.ssid.SsidLength, pwdinfo->tx_prov_disc_info.ssid.Ssid, &pattrib->pktlen);
@@ -3614,7 +3515,7 @@ static unsigned int on_action_public_p2p(struct recv_frame *precv_frame)
u8 result = P2P_STATUS_SUCCESS;
u8 empty_addr[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
- frame_body = (unsigned char *)(pframe + sizeof(struct rtw_ieee80211_hdr_3addr));
+ frame_body = (unsigned char *)(pframe + sizeof(struct ieee80211_hdr_3addr));
dialogToken = frame_body[7];
@@ -3626,7 +3527,7 @@ static unsigned int on_action_public_p2p(struct recv_frame *precv_frame)
if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE) || rtw_p2p_chk_state(pwdinfo, P2P_STATE_IDLE))
return _SUCCESS;
- len -= sizeof(struct rtw_ieee80211_hdr_3addr);
+ len -= sizeof(struct ieee80211_hdr_3addr);
switch (frame_body[6]) { /* OUI Subtype */
case P2P_GO_NEGO_REQ:
@@ -3668,7 +3569,7 @@ static unsigned int on_action_public_p2p(struct recv_frame *precv_frame)
pwdinfo->nego_req_info.benable = false;
result = process_p2p_group_negotation_resp(pwdinfo, frame_body, len);
issue_p2p_GO_confirm(pwdinfo->padapter, GetAddr2Ptr(pframe), result);
- if (P2P_STATUS_SUCCESS == result) {
+ if (result == P2P_STATUS_SUCCESS) {
if (rtw_p2p_role(pwdinfo) == P2P_ROLE_CLIENT) {
pwdinfo->p2p_info.operation_ch[0] = pwdinfo->peer_operating_ch;
pwdinfo->p2p_info.scan_op_ch_only = 1;
@@ -3683,7 +3584,7 @@ static unsigned int on_action_public_p2p(struct recv_frame *precv_frame)
break;
case P2P_GO_NEGO_CONF:
result = process_p2p_group_negotation_confirm(pwdinfo, frame_body, len);
- if (P2P_STATUS_SUCCESS == result) {
+ if (result == P2P_STATUS_SUCCESS) {
if (rtw_p2p_role(pwdinfo) == P2P_ROLE_CLIENT) {
pwdinfo->p2p_info.operation_ch[0] = pwdinfo->peer_operating_ch;
pwdinfo->p2p_info.scan_op_ch_only = 1;
@@ -3867,7 +3768,7 @@ static unsigned int on_action_public_vendor(struct recv_frame *precv_frame)
{
unsigned int ret = _FAIL;
u8 *pframe = precv_frame->rx_data;
- u8 *frame_body = pframe + sizeof(struct rtw_ieee80211_hdr_3addr);
+ u8 *frame_body = pframe + sizeof(struct ieee80211_hdr_3addr);
if (!memcmp(frame_body + 2, P2P_OUI, 4)) {
ret = on_action_public_p2p(precv_frame);
@@ -3880,7 +3781,7 @@ static unsigned int on_action_public_default(struct recv_frame *precv_frame)
{
unsigned int ret = _FAIL;
u8 *pframe = precv_frame->rx_data;
- u8 *frame_body = pframe + sizeof(struct rtw_ieee80211_hdr_3addr);
+ u8 *frame_body = pframe + sizeof(struct ieee80211_hdr_3addr);
u8 token;
token = frame_body[2];
@@ -3898,7 +3799,7 @@ unsigned int on_action_public(struct adapter *padapter, struct recv_frame *precv
{
unsigned int ret = _FAIL;
u8 *pframe = precv_frame->rx_data;
- u8 *frame_body = pframe + sizeof(struct rtw_ieee80211_hdr_3addr);
+ u8 *frame_body = pframe + sizeof(struct ieee80211_hdr_3addr);
u8 category, action;
/* check RA matches or not */
@@ -3906,7 +3807,7 @@ unsigned int on_action_public(struct adapter *padapter, struct recv_frame *precv
goto exit;
category = frame_body[0];
- if (category != RTW_WLAN_CATEGORY_PUBLIC)
+ if (category != WLAN_CATEGORY_PUBLIC)
goto exit;
action = frame_body[1];
@@ -3923,16 +3824,6 @@ exit:
return ret;
}
-unsigned int OnAction_ht(struct adapter *padapter, struct recv_frame *precv_frame)
-{
- return _SUCCESS;
-}
-
-unsigned int OnAction_wmm(struct adapter *padapter, struct recv_frame *precv_frame)
-{
- return _SUCCESS;
-}
-
unsigned int OnAction_p2p(struct adapter *padapter, struct recv_frame *precv_frame)
{
u8 *frame_body;
@@ -3945,7 +3836,7 @@ unsigned int OnAction_p2p(struct adapter *padapter, struct recv_frame *precv_fra
if (memcmp(myid(&padapter->eeprompriv), GetAddr1Ptr(pframe), ETH_ALEN))/* for if1, sta/ap mode */
return _SUCCESS;
- frame_body = (unsigned char *)(pframe + sizeof(struct rtw_ieee80211_hdr_3addr));
+ frame_body = (unsigned char *)(pframe + sizeof(struct ieee80211_hdr_3addr));
category = frame_body[0];
if (category != RTW_WLAN_CATEGORY_P2P)
@@ -3954,7 +3845,7 @@ unsigned int OnAction_p2p(struct adapter *padapter, struct recv_frame *precv_fra
if (be32_to_cpu(*((__be32 *)(frame_body + 1))) != P2POUI)
return _SUCCESS;
- len -= sizeof(struct rtw_ieee80211_hdr_3addr);
+ len -= sizeof(struct ieee80211_hdr_3addr);
OUI_Subtype = frame_body[5];
switch (OUI_Subtype) {
@@ -3975,29 +3866,22 @@ unsigned int OnAction_p2p(struct adapter *padapter, struct recv_frame *precv_fra
unsigned int OnAction(struct adapter *padapter, struct recv_frame *precv_frame)
{
- int i;
- unsigned char category;
- struct action_handler *ptable;
- unsigned char *frame_body;
- u8 *pframe = precv_frame->rx_data;
-
- frame_body = (unsigned char *)(pframe + sizeof(struct rtw_ieee80211_hdr_3addr));
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)precv_frame->rx_data;
- category = frame_body[0];
-
- for (i = 0; i < sizeof(OnAction_tbl) / sizeof(struct action_handler); i++) {
- ptable = &OnAction_tbl[i];
- if (category == ptable->num)
- ptable->func(padapter, precv_frame);
+ switch (mgmt->u.action.category) {
+ case WLAN_CATEGORY_BACK:
+ OnAction_back(padapter, precv_frame);
+ break;
+ case WLAN_CATEGORY_PUBLIC:
+ on_action_public(padapter, precv_frame);
+ break;
+ case RTW_WLAN_CATEGORY_P2P:
+ OnAction_p2p(padapter, precv_frame);
+ break;
}
return _SUCCESS;
}
-unsigned int DoReserved(struct adapter *padapter, struct recv_frame *precv_frame)
-{
- return _SUCCESS;
-}
-
struct xmit_frame *alloc_mgtxmitframe(struct xmit_priv *pxmitpriv)
{
struct xmit_frame *pmgntframe;
@@ -4154,7 +4038,7 @@ void issue_beacon(struct adapter *padapter, int timeout_ms)
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
unsigned char *pframe;
- struct rtw_ieee80211_hdr *pwlanhdr;
+ struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
unsigned int rate_len;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
@@ -4177,9 +4061,9 @@ void issue_beacon(struct adapter *padapter, int timeout_ms)
memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
- fctrl = &pwlanhdr->frame_ctl;
+ fctrl = &pwlanhdr->frame_control;
*(fctrl) = 0;
eth_broadcast_addr(pwlanhdr->addr1);
@@ -4190,8 +4074,8 @@ void issue_beacon(struct adapter *padapter, int timeout_ms)
/* pmlmeext->mgnt_seq++; */
SetFrameSubType(pframe, WIFI_BEACON);
- pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
- pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
if ((pmlmeinfo->state & 0x03) == WIFI_FW_AP_STATE) {
/* for P2P : Primary Device Type & Device Name */
@@ -4274,8 +4158,8 @@ void issue_beacon(struct adapter *padapter, int timeout_ms)
u8 *wps_ie;
uint wps_ielen;
u8 sr = 0;
- wps_ie = rtw_get_wps_ie(pmgntframe->buf_addr + TXDESC_OFFSET + sizeof(struct rtw_ieee80211_hdr_3addr) + _BEACON_IE_OFFSET_,
- pattrib->pktlen - sizeof(struct rtw_ieee80211_hdr_3addr) - _BEACON_IE_OFFSET_, NULL, &wps_ielen);
+ wps_ie = rtw_get_wps_ie(pmgntframe->buf_addr + TXDESC_OFFSET + sizeof(struct ieee80211_hdr_3addr) + _BEACON_IE_OFFSET_,
+ pattrib->pktlen - sizeof(struct ieee80211_hdr_3addr) - _BEACON_IE_OFFSET_, NULL, &wps_ielen);
if (wps_ie && wps_ielen > 0)
rtw_get_wps_attr_content(wps_ie, wps_ielen, WPS_ATTR_SELECTED_REGISTRAR, (u8 *)(&sr), NULL);
if (sr != 0)
@@ -4362,7 +4246,7 @@ void issue_probersp(struct adapter *padapter, unsigned char *da, u8 is_valid_p2p
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
unsigned char *pframe;
- struct rtw_ieee80211_hdr *pwlanhdr;
+ struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
unsigned char *mac, *bssid;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
@@ -4386,12 +4270,12 @@ void issue_probersp(struct adapter *padapter, unsigned char *da, u8 is_valid_p2p
memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
mac = myid(&padapter->eeprompriv);
bssid = cur_network->MacAddress;
- fctrl = &pwlanhdr->frame_ctl;
+ fctrl = &pwlanhdr->frame_control;
*(fctrl) = 0;
memcpy(pwlanhdr->addr1, da, ETH_ALEN);
memcpy(pwlanhdr->addr2, mac, ETH_ALEN);
@@ -4401,7 +4285,7 @@ void issue_probersp(struct adapter *padapter, unsigned char *da, u8 is_valid_p2p
pmlmeext->mgnt_seq++;
SetFrameSubType(fctrl, WIFI_PROBERSP);
- pattrib->hdrlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ pattrib->hdrlen = sizeof(struct ieee80211_hdr_3addr);
pattrib->pktlen = pattrib->hdrlen;
pframe += pattrib->hdrlen;
@@ -4511,7 +4395,7 @@ static int _issue_probereq(struct adapter *padapter, struct ndis_802_11_ssid *ps
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
unsigned char *pframe;
- struct rtw_ieee80211_hdr *pwlanhdr;
+ struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
unsigned char *mac;
unsigned char bssrate[NumRates];
@@ -4531,11 +4415,11 @@ static int _issue_probereq(struct adapter *padapter, struct ndis_802_11_ssid *ps
memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
mac = myid(&padapter->eeprompriv);
- fctrl = &pwlanhdr->frame_ctl;
+ fctrl = &pwlanhdr->frame_control;
*(fctrl) = 0;
if (da) {
@@ -4554,8 +4438,8 @@ static int _issue_probereq(struct adapter *padapter, struct ndis_802_11_ssid *ps
pmlmeext->mgnt_seq++;
SetFrameSubType(pframe, WIFI_PROBEREQ);
- pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
- pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
if (pssid)
pframe = rtw_set_ie(pframe, _SSID_IE_, pssid->SsidLength, pssid->Ssid, &pattrib->pktlen);
@@ -4629,7 +4513,7 @@ void issue_auth(struct adapter *padapter, struct sta_info *psta, unsigned short
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
unsigned char *pframe;
- struct rtw_ieee80211_hdr *pwlanhdr;
+ struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
unsigned int val32;
u16 val16;
@@ -4650,17 +4534,17 @@ void issue_auth(struct adapter *padapter, struct sta_info *psta, unsigned short
memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
- fctrl = &pwlanhdr->frame_ctl;
+ fctrl = &pwlanhdr->frame_control;
*(fctrl) = 0;
SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
pmlmeext->mgnt_seq++;
SetFrameSubType(pframe, WIFI_AUTH);
- pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
- pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
if (psta) {/* for AP mode */
memcpy(pwlanhdr->addr1, psta->hwaddr, ETH_ALEN);
@@ -4734,7 +4618,7 @@ void issue_auth(struct adapter *padapter, struct sta_info *psta, unsigned short
SetPrivacy(fctrl);
- pattrib->hdrlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ pattrib->hdrlen = sizeof(struct ieee80211_hdr_3addr);
pattrib->encrypt = _WEP40_;
@@ -4753,7 +4637,7 @@ void issue_auth(struct adapter *padapter, struct sta_info *psta, unsigned short
void issue_asocrsp(struct adapter *padapter, unsigned short status, struct sta_info *pstat, int pkt_type)
{
struct xmit_frame *pmgntframe;
- struct rtw_ieee80211_hdr *pwlanhdr;
+ struct ieee80211_hdr *pwlanhdr;
struct pkt_attrib *pattrib;
unsigned char *pbuf, *pframe;
unsigned short val;
@@ -4778,9 +4662,9 @@ void issue_asocrsp(struct adapter *padapter, unsigned short status, struct sta_i
memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
- fctrl = &pwlanhdr->frame_ctl;
+ fctrl = &pwlanhdr->frame_control;
*(fctrl) = 0;
memcpy((void *)GetAddr1Ptr(pwlanhdr), pstat->hwaddr, ETH_ALEN);
@@ -4794,7 +4678,7 @@ void issue_asocrsp(struct adapter *padapter, unsigned short status, struct sta_i
else
return;
- pattrib->hdrlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ pattrib->hdrlen = sizeof(struct ieee80211_hdr_3addr);
pattrib->pktlen += pattrib->hdrlen;
pframe += pattrib->hdrlen;
@@ -4884,7 +4768,7 @@ void issue_assocreq(struct adapter *padapter)
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
unsigned char *pframe, *p;
- struct rtw_ieee80211_hdr *pwlanhdr;
+ struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
__le16 le_tmp;
unsigned int i, j, ie_len, index = 0;
@@ -4910,9 +4794,9 @@ void issue_assocreq(struct adapter *padapter)
memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
- fctrl = &pwlanhdr->frame_ctl;
+ fctrl = &pwlanhdr->frame_control;
*(fctrl) = 0;
memcpy(pwlanhdr->addr1, get_my_bssid(&pmlmeinfo->network), ETH_ALEN);
memcpy(pwlanhdr->addr2, myid(&padapter->eeprompriv), ETH_ALEN);
@@ -4922,8 +4806,8 @@ void issue_assocreq(struct adapter *padapter)
pmlmeext->mgnt_seq++;
SetFrameSubType(pframe, WIFI_ASSOCREQ);
- pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
- pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
/* caps */
@@ -5184,7 +5068,7 @@ static int _issue_nulldata(struct adapter *padapter, unsigned char *da, unsigned
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
unsigned char *pframe;
- struct rtw_ieee80211_hdr *pwlanhdr;
+ struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
struct xmit_priv *pxmitpriv;
struct mlme_ext_priv *pmlmeext;
@@ -5209,9 +5093,9 @@ static int _issue_nulldata(struct adapter *padapter, unsigned char *da, unsigned
memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
- fctrl = &pwlanhdr->frame_ctl;
+ fctrl = &pwlanhdr->frame_control;
*(fctrl) = 0;
if ((pmlmeinfo->state & 0x03) == WIFI_FW_AP_STATE)
@@ -5230,8 +5114,8 @@ static int _issue_nulldata(struct adapter *padapter, unsigned char *da, unsigned
pmlmeext->mgnt_seq++;
SetFrameSubType(pframe, WIFI_DATA_NULL);
- pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
- pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
pattrib->last_txcmdsz = pattrib->pktlen;
@@ -5286,7 +5170,7 @@ static int _issue_qos_nulldata(struct adapter *padapter, unsigned char *da, u16
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
unsigned char *pframe;
- struct rtw_ieee80211_hdr *pwlanhdr;
+ struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
unsigned short *qc;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
@@ -5310,9 +5194,9 @@ static int _issue_qos_nulldata(struct adapter *padapter, unsigned char *da, u16
memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
- fctrl = &pwlanhdr->frame_ctl;
+ fctrl = &pwlanhdr->frame_control;
*(fctrl) = 0;
if ((pmlmeinfo->state & 0x03) == WIFI_FW_AP_STATE)
@@ -5336,8 +5220,8 @@ static int _issue_qos_nulldata(struct adapter *padapter, unsigned char *da, u16
pmlmeext->mgnt_seq++;
SetFrameSubType(pframe, WIFI_QOS_DATA_NULL);
- pframe += sizeof(struct rtw_ieee80211_hdr_3addr_qos);
- pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr_qos);
+ pframe += sizeof(struct ieee80211_qos_hdr);
+ pattrib->pktlen = sizeof(struct ieee80211_qos_hdr);
pattrib->last_txcmdsz = pattrib->pktlen;
@@ -5390,7 +5274,7 @@ static int _issue_deauth(struct adapter *padapter, unsigned char *da, unsigned s
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
unsigned char *pframe;
- struct rtw_ieee80211_hdr *pwlanhdr;
+ struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
@@ -5416,9 +5300,9 @@ static int _issue_deauth(struct adapter *padapter, unsigned char *da, unsigned s
memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
- fctrl = &pwlanhdr->frame_ctl;
+ fctrl = &pwlanhdr->frame_control;
*(fctrl) = 0;
memcpy(pwlanhdr->addr1, da, ETH_ALEN);
@@ -5429,8 +5313,8 @@ static int _issue_deauth(struct adapter *padapter, unsigned char *da, unsigned s
pmlmeext->mgnt_seq++;
SetFrameSubType(pframe, WIFI_DEAUTH);
- pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
- pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
le_tmp = cpu_to_le16(reason);
pframe = rtw_set_fixed_ie(pframe, _RSON_CODE_, (unsigned char *)&le_tmp, &pattrib->pktlen);
@@ -5481,7 +5365,7 @@ exit:
void issue_action_BA(struct adapter *padapter, unsigned char *raddr, unsigned char action, unsigned short status)
{
- u8 category = RTW_WLAN_CATEGORY_BACK;
+ u8 category = WLAN_CATEGORY_BACK;
u16 start_seq;
u16 BA_para_set;
u16 reason_code;
@@ -5491,7 +5375,7 @@ void issue_action_BA(struct adapter *padapter, unsigned char *raddr, unsigned ch
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
u8 *pframe;
- struct rtw_ieee80211_hdr *pwlanhdr;
+ struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
@@ -5511,9 +5395,9 @@ void issue_action_BA(struct adapter *padapter, unsigned char *raddr, unsigned ch
memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
- fctrl = &pwlanhdr->frame_ctl;
+ fctrl = &pwlanhdr->frame_control;
*(fctrl) = 0;
/* memcpy(pwlanhdr->addr1, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN); */
@@ -5525,8 +5409,8 @@ void issue_action_BA(struct adapter *padapter, unsigned char *raddr, unsigned ch
pmlmeext->mgnt_seq++;
SetFrameSubType(pframe, WIFI_ACTION);
- pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
- pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
pframe = rtw_set_fixed_ie(pframe, 1, &(category), &pattrib->pktlen);
pframe = rtw_set_fixed_ie(pframe, 1, &(action), &pattrib->pktlen);
@@ -5599,7 +5483,7 @@ static void issue_action_BSSCoexistPacket(struct adapter *padapter)
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
unsigned char *pframe;
- struct rtw_ieee80211_hdr *pwlanhdr;
+ struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
struct wlan_network *pnetwork = NULL;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
@@ -5615,7 +5499,7 @@ static void issue_action_BSSCoexistPacket(struct adapter *padapter)
if (pmlmeinfo->bwmode_updated)
return;
- category = RTW_WLAN_CATEGORY_PUBLIC;
+ category = WLAN_CATEGORY_PUBLIC;
action = ACT_PUBLIC_BSSCOEXIST;
pmgntframe = alloc_mgtxmitframe(pxmitpriv);
@@ -5629,9 +5513,9 @@ static void issue_action_BSSCoexistPacket(struct adapter *padapter)
memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
- fctrl = &pwlanhdr->frame_ctl;
+ fctrl = &pwlanhdr->frame_control;
*(fctrl) = 0;
memcpy(pwlanhdr->addr1, get_my_bssid(&pmlmeinfo->network), ETH_ALEN);
@@ -5642,8 +5526,8 @@ static void issue_action_BSSCoexistPacket(struct adapter *padapter)
pmlmeext->mgnt_seq++;
SetFrameSubType(pframe, WIFI_ACTION);
- pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
- pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
pframe = rtw_set_fixed_ie(pframe, 1, &category, &pattrib->pktlen);
pframe = rtw_set_fixed_ie(pframe, 1, &action, &pattrib->pktlen);
@@ -5759,32 +5643,38 @@ unsigned int send_delba(struct adapter *padapter, u8 initiator, u8 *addr)
unsigned int send_beacon(struct adapter *padapter)
{
- u8 bxmitok = false;
+ bool bxmitok = false;
int issue = 0;
int poll = 0;
- u32 start = jiffies;
+ clear_beacon_valid_bit(padapter);
- SetHwReg8188EU(padapter, HW_VAR_BCN_VALID, NULL);
do {
issue_beacon(padapter, 100);
issue++;
do {
yield();
- GetHwReg8188EU(padapter, HW_VAR_BCN_VALID, (u8 *)(&bxmitok));
+ bxmitok = get_beacon_valid_bit(padapter);
poll++;
} while ((poll % 10) != 0 && !bxmitok && !padapter->bSurpriseRemoved && !padapter->bDriverStopped);
} while (!bxmitok && issue < 100 && !padapter->bSurpriseRemoved && !padapter->bDriverStopped);
- if (padapter->bSurpriseRemoved || padapter->bDriverStopped)
- return _FAIL;
- if (!bxmitok) {
+ if (padapter->bSurpriseRemoved || padapter->bDriverStopped || !bxmitok)
return _FAIL;
- } else {
- rtw_get_passing_time_ms(start);
- return _SUCCESS;
- }
+ return _SUCCESS;
+}
+
+bool get_beacon_valid_bit(struct adapter *adapter)
+{
+ /* BIT(16) of REG_TDECTRL = BIT(0) of REG_TDECTRL+2 */
+ return BIT(0) & rtw_read8(adapter, REG_TDECTRL + 2);
+}
+
+void clear_beacon_valid_bit(struct adapter *adapter)
+{
+ /* BIT(16) of REG_TDECTRL = BIT(0) of REG_TDECTRL+2, write 1 to clear, Clear by sw */
+ rtw_write8(adapter, REG_TDECTRL + 2, rtw_read8(adapter, REG_TDECTRL + 2) | BIT(0));
}
/****************************************************************************
@@ -5793,13 +5683,27 @@ Following are some utitity fuctions for WiFi MLME
*****************************************************************************/
+static void rtw_set_initial_gain(struct adapter *adapter, u8 gain)
+{
+ struct hal_data_8188e *haldata = &adapter->haldata;
+ struct odm_dm_struct *odmpriv = &haldata->odmpriv;
+ struct rtw_dig *digtable = &odmpriv->DM_DigTable;
+
+ if (gain == 0xff) {
+ /* restore rx gain */
+ ODM_Write_DIG(odmpriv, digtable->BackupIGValue);
+ } else {
+ digtable->BackupIGValue = digtable->CurIGValue;
+ ODM_Write_DIG(odmpriv, gain);
+ }
+}
+
void site_survey(struct adapter *padapter)
{
unsigned char survey_channel = 0, val8;
enum rt_scan_type ScanType = SCAN_PASSIVE;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- u32 initialgain = 0;
struct wifidirect_info *pwdinfo = &padapter->wdinfo;
if ((pwdinfo->rx_invitereq_info.scan_op_ch_only) || (pwdinfo->p2p_info.scan_op_ch_only)) {
@@ -5877,8 +5781,8 @@ void site_survey(struct adapter *padapter)
rtw_p2p_set_state(pwdinfo, P2P_STATE_FIND_PHASE_LISTEN);
pmlmeext->sitesurvey_res.state = SCAN_DISABLE;
- initialgain = 0xff; /* restore RX GAIN */
- SetHwReg8188EU(padapter, HW_VAR_INITIAL_GAIN, (u8 *)(&initialgain));
+ /* restore RX GAIN */
+ rtw_set_initial_gain(padapter, 0xff);
/* turn on dynamic functions */
Restore_DM_Func_Flag(padapter);
/* Switch_DM_Func(padapter, DYNAMIC_FUNC_DIG|DYNAMIC_FUNC_HP|DYNAMIC_FUNC_SS, true); */
@@ -5911,8 +5815,8 @@ void site_survey(struct adapter *padapter)
/* config MSR */
Set_MSR(padapter, (pmlmeinfo->state & 0x3));
- initialgain = 0xff; /* restore RX GAIN */
- SetHwReg8188EU(padapter, HW_VAR_INITIAL_GAIN, (u8 *)(&initialgain));
+ /* restore RX GAIN */
+ rtw_set_initial_gain(padapter, 0xff);
/* turn on dynamic functions */
Restore_DM_Func_Flag(padapter);
/* Switch_DM_Func(padapter, DYNAMIC_ALL_FUNC_ENABLE, true); */
@@ -5950,7 +5854,7 @@ u8 collect_bss_info(struct adapter *padapter, struct recv_frame *precv_frame, st
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
__le32 le32_tmp;
- len = packet_len - sizeof(struct rtw_ieee80211_hdr_3addr);
+ len = packet_len - sizeof(struct ieee80211_hdr_3addr);
if (len > MAX_IE_SZ)
return _FAIL;
@@ -5980,13 +5884,13 @@ u8 collect_bss_info(struct adapter *padapter, struct recv_frame *precv_frame, st
/* below is to copy the information element */
bssid->IELength = len;
- memcpy(bssid->IEs, (pframe + sizeof(struct rtw_ieee80211_hdr_3addr)), bssid->IELength);
+ memcpy(bssid->IEs, (pframe + sizeof(struct ieee80211_hdr_3addr)), bssid->IELength);
/* get the signal strength */
bssid->Rssi = precv_frame->attrib.phy_info.recvpower; /* in dBM.raw data */
bssid->PhyInfo.SignalQuality = precv_frame->attrib.phy_info.SignalQuality;/* in percentage */
bssid->PhyInfo.SignalStrength = precv_frame->attrib.phy_info.SignalStrength;/* in percentage */
- GetHalDefVar8188EUsb(padapter, HAL_DEF_CURRENT_ANTENNA, &bssid->PhyInfo.Optimum_antenna);
+ bssid->PhyInfo.Optimum_antenna = rtw_current_antenna(padapter);
/* checking SSID */
p = rtw_get_ie(bssid->IEs + ie_offset, _SSID_IE_, &len, bssid->IELength - ie_offset);
@@ -6087,10 +5991,58 @@ u8 collect_bss_info(struct adapter *padapter, struct recv_frame *precv_frame, st
return _SUCCESS;
}
+static void rtw_set_bssid(struct adapter *adapter, u8 *bssid)
+{
+ int i;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ rtw_write8(adapter, REG_BSSID + i, bssid[i]);
+}
+
+static void mlme_join(struct adapter *adapter, int type)
+{
+ struct mlme_priv *mlmepriv = &adapter->mlmepriv;
+ u8 retry_limit = 0x30;
+
+ switch (type) {
+ case 0:
+ /* prepare to join */
+ /* enable to rx data frame, accept all data frame */
+ rtw_write16(adapter, REG_RXFLTMAP2, 0xFFFF);
+
+ rtw_write32(adapter, REG_RCR,
+ rtw_read32(adapter, REG_RCR) | RCR_CBSSID_DATA | RCR_CBSSID_BCN);
+
+ if (check_fwstate(mlmepriv, WIFI_STATION_STATE)) {
+ retry_limit = 48;
+ } else {
+ /* ad-hoc mode */
+ retry_limit = 0x7;
+ }
+ break;
+ case 1:
+ /* joinbss_event call back when join res < 0 */
+ rtw_write16(adapter, REG_RXFLTMAP2, 0x00);
+ break;
+ case 2:
+ /* sta add event call back */
+ /* enable update TSF */
+ rtw_write8(adapter, REG_BCN_CTRL, rtw_read8(adapter, REG_BCN_CTRL) & (~BIT(4)));
+
+ if (check_fwstate(mlmepriv, WIFI_ADHOC_STATE | WIFI_ADHOC_MASTER_STATE))
+ retry_limit = 0x7;
+ break;
+ default:
+ break;
+ }
+
+ rtw_write16(adapter, REG_RL,
+ retry_limit << RETRY_LIMIT_SHORT_SHIFT | retry_limit << RETRY_LIMIT_LONG_SHIFT);
+}
+
void start_create_ibss(struct adapter *padapter)
{
unsigned short caps;
- u8 join_type;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
struct wlan_bssid_ex *pnetwork = (struct wlan_bssid_ex *)(&pmlmeinfo->network);
@@ -6121,9 +6073,8 @@ void start_create_ibss(struct adapter *padapter)
report_join_res(padapter, -1);
pmlmeinfo->state = WIFI_FW_NULL_STATE;
} else {
- SetHwReg8188EU(padapter, HW_VAR_BSSID, padapter->registrypriv.dev_network.MacAddress);
- join_type = 0;
- SetHwReg8188EU(padapter, HW_VAR_MLME_JOIN, (u8 *)(&join_type));
+ rtw_set_bssid(padapter, padapter->registrypriv.dev_network.MacAddress);
+ mlme_join(padapter, 0);
report_join_res(padapter, 1);
pmlmeinfo->state |= WIFI_FW_ASSOC_SUCCESS;
@@ -6421,7 +6372,7 @@ void report_survey_event(struct adapter *padapter, struct recv_frame *precv_fram
pmlmeext = &padapter->mlmeextpriv;
pcmdpriv = &padapter->cmdpriv;
- pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
+ pcmd_obj = kzalloc(sizeof(*pcmd_obj), GFP_ATOMIC);
if (!pcmd_obj)
return;
@@ -6471,7 +6422,7 @@ void report_surveydone_event(struct adapter *padapter)
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
- pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
+ pcmd_obj = kzalloc(sizeof(*pcmd_obj), GFP_KERNEL);
if (!pcmd_obj)
return;
@@ -6513,7 +6464,7 @@ void report_join_res(struct adapter *padapter, int res)
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
- pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
+ pcmd_obj = kzalloc(sizeof(*pcmd_obj), GFP_ATOMIC);
if (!pcmd_obj)
return;
@@ -6610,7 +6561,7 @@ void report_add_sta_event(struct adapter *padapter, unsigned char *MacAddr, int
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
- pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
+ pcmd_obj = kzalloc(sizeof(*pcmd_obj), GFP_KERNEL);
if (!pcmd_obj)
return;
@@ -6696,13 +6647,11 @@ void mlmeext_joinbss_event_callback(struct adapter *padapter, int join_res)
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
struct wlan_bssid_ex *cur_network = &pmlmeinfo->network;
struct sta_priv *pstapriv = &padapter->stapriv;
- u8 join_type;
u16 media_status;
if (join_res < 0) {
- join_type = 1;
- SetHwReg8188EU(padapter, HW_VAR_MLME_JOIN, (u8 *)(&join_type));
- SetHwReg8188EU(padapter, HW_VAR_BSSID, null_addr);
+ mlme_join(padapter, 1);
+ rtw_set_bssid(padapter, null_addr);
/* restore to initial setting. */
update_tx_basic_rate(padapter, padapter->registrypriv.wireless_mode);
@@ -6721,7 +6670,7 @@ void mlmeext_joinbss_event_callback(struct adapter *padapter, int join_res)
}
/* turn on dynamic functions */
- Switch_DM_Func(padapter, DYNAMIC_ALL_FUNC_ENABLE, true);
+ SetHwReg8188EU(padapter, HW_VAR_DM_FUNC_RESET, NULL);
/* update IOT-releated issue */
update_IOT_info(padapter);
@@ -6750,13 +6699,13 @@ void mlmeext_joinbss_event_callback(struct adapter *padapter, int join_res)
/* set per sta rate after updating HT cap. */
set_sta_rate(padapter, psta);
- SetHwReg8188EU(padapter, HW_VAR_TX_RPT_MAX_MACID, (u8 *)&psta->mac_id);
+ rtw_set_max_rpt_macid(padapter, psta->mac_id);
+
media_status = (psta->mac_id << 8) | 1; /* MACID|OPMODE: 1 means connect */
SetHwReg8188EU(padapter, HW_VAR_H2C_MEDIA_STATUS_RPT, (u8 *)&media_status);
}
- join_type = 2;
- SetHwReg8188EU(padapter, HW_VAR_MLME_JOIN, (u8 *)(&join_type));
+ mlme_join(padapter, 2);
if ((pmlmeinfo->state & 0x03) == WIFI_FW_STATION_STATE) {
/* correcting TSF */
@@ -6769,7 +6718,6 @@ void mlmeext_sta_add_event_callback(struct adapter *padapter, struct sta_info *p
{
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- u8 join_type;
if ((pmlmeinfo->state & 0x03) == WIFI_FW_ADHOC_STATE) {
if (pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS) {/* adhoc master or sta_count>1 */
@@ -6786,9 +6734,7 @@ void mlmeext_sta_add_event_callback(struct adapter *padapter, struct sta_info *p
}
pmlmeinfo->state |= WIFI_FW_ASSOC_SUCCESS;
}
-
- join_type = 2;
- SetHwReg8188EU(padapter, HW_VAR_MLME_JOIN, (u8 *)(&join_type));
+ mlme_join(padapter, 2);
}
pmlmeinfo->FW_sta_info[psta->mac_id].psta = psta;
@@ -6800,14 +6746,27 @@ void mlmeext_sta_add_event_callback(struct adapter *padapter, struct sta_info *p
update_sta_info(padapter, psta);
}
+static void mlme_disconnect(struct adapter *adapter)
+{
+ /* Set RCR to not to receive data frame when NO LINK state */
+ /* reject all data frames */
+ rtw_write16(adapter, REG_RXFLTMAP2, 0x00);
+
+ /* reset TSF */
+ rtw_write8(adapter, REG_DUAL_TSF_RST, (BIT(0) | BIT(1)));
+
+ /* disable update TSF */
+ rtw_write8(adapter, REG_BCN_CTRL, rtw_read8(adapter, REG_BCN_CTRL) | BIT(4));
+}
+
void mlmeext_sta_del_event_callback(struct adapter *padapter)
{
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
if (is_client_associated_to_ap(padapter) || is_IBSS_empty(padapter)) {
- SetHwReg8188EU(padapter, HW_VAR_MLME_DISCONNECT, NULL);
- SetHwReg8188EU(padapter, HW_VAR_BSSID, null_addr);
+ mlme_disconnect(padapter);
+ rtw_set_bssid(padapter, null_addr);
/* restore to initial setting. */
update_tx_basic_rate(padapter, padapter->registrypriv.wireless_mode);
@@ -6951,7 +6910,7 @@ void linked_status_chk(struct adapter *padapter)
if (pmlmeinfo->FW_sta_info[i].status == 1) {
psta = pmlmeinfo->FW_sta_info[i].psta;
- if (NULL == psta)
+ if (psta == NULL)
continue;
if (pmlmeinfo->FW_sta_info[i].rx_pkt == sta_rx_pkts(psta)) {
if (pmlmeinfo->FW_sta_info[i].retry < 3) {
@@ -6996,11 +6955,11 @@ void survey_timer_hdl(struct adapter *padapter)
pmlmeext->scan_abort = false;/* reset */
}
- ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
+ ph2c = kzalloc(sizeof(*ph2c), GFP_ATOMIC);
if (!ph2c)
goto exit_survey_timer_hdl;
- psurveyPara = kzalloc(sizeof(struct sitesurvey_parm), GFP_ATOMIC);
+ psurveyPara = kzalloc(sizeof(*psurveyPara), GFP_ATOMIC);
if (!psurveyPara) {
kfree(ph2c);
goto exit_survey_timer_hdl;
@@ -7122,7 +7081,7 @@ u8 createbss_hdl(struct adapter *padapter, u8 *pbuf)
/* disable dynamic functions, such as high power, DIG */
Save_DM_Func_Flag(padapter);
- Switch_DM_Func(padapter, DYNAMIC_FUNC_DISABLE, false);
+ SetHwReg8188EU(padapter, HW_VAR_DM_FUNC_CLR, NULL);
/* cancel link timer */
_cancel_timer_ex(&pmlmeext->link_timer);
@@ -7146,7 +7105,6 @@ u8 createbss_hdl(struct adapter *padapter, u8 *pbuf)
u8 join_cmd_hdl(struct adapter *padapter, u8 *pbuf)
{
- u8 join_type;
struct ndis_802_11_var_ie *pIE;
struct registry_priv *pregpriv = &padapter->registrypriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
@@ -7170,7 +7128,7 @@ u8 join_cmd_hdl(struct adapter *padapter, u8 *pbuf)
/* set MSR to nolink -> infra. mode */
Set_MSR(padapter, _HW_STATE_STATION_);
- SetHwReg8188EU(padapter, HW_VAR_MLME_DISCONNECT, NULL);
+ mlme_disconnect(padapter);
}
rtw_antenna_select_cmd(padapter, pparm->network.PhyInfo.Optimum_antenna, false);
@@ -7243,9 +7201,8 @@ u8 join_cmd_hdl(struct adapter *padapter, u8 *pbuf)
/* config the initial gain under linking, need to write the BB registers */
- SetHwReg8188EU(padapter, HW_VAR_BSSID, pmlmeinfo->network.MacAddress);
- join_type = 0;
- SetHwReg8188EU(padapter, HW_VAR_MLME_JOIN, (u8 *)(&join_type));
+ rtw_set_bssid(padapter, pmlmeinfo->network.MacAddress);
+ mlme_join(padapter, 0);
/* cancel link timer */
_cancel_timer_ex(&pmlmeext->link_timer);
@@ -7266,8 +7223,8 @@ u8 disconnect_hdl(struct adapter *padapter, unsigned char *pbuf)
if (is_client_associated_to_ap(padapter))
issue_deauth_ex(padapter, pnetwork->MacAddress, WLAN_REASON_DEAUTH_LEAVING, param->deauth_timeout_ms / 100, 100);
- SetHwReg8188EU(padapter, HW_VAR_MLME_DISCONNECT, NULL);
- SetHwReg8188EU(padapter, HW_VAR_BSSID, null_addr);
+ mlme_disconnect(padapter);
+ rtw_set_bssid(padapter, null_addr);
/* restore to initial setting. */
update_tx_basic_rate(padapter, padapter->registrypriv.wireless_mode);
@@ -7346,7 +7303,6 @@ u8 sitesurvey_cmd_hdl(struct adapter *padapter, u8 *pbuf)
struct sitesurvey_parm *pparm = (struct sitesurvey_parm *)pbuf;
u8 bdelayscan = false;
u8 val8;
- u32 initialgain;
u32 i;
struct wifidirect_info *pwdinfo = &padapter->wdinfo;
@@ -7391,15 +7347,14 @@ u8 sitesurvey_cmd_hdl(struct adapter *padapter, u8 *pbuf)
if ((pmlmeext->sitesurvey_res.state == SCAN_START) || (pmlmeext->sitesurvey_res.state == SCAN_TXNULL)) {
/* disable dynamic functions, such as high power, DIG */
Save_DM_Func_Flag(padapter);
- Switch_DM_Func(padapter, DYNAMIC_FUNC_DISABLE, false);
+ SetHwReg8188EU(padapter, HW_VAR_DM_FUNC_CLR, NULL);
/* config the initial gain under scanning, need to write the BB registers */
if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE))
- initialgain = 0x1E;
+ rtw_set_initial_gain(padapter, 0x1e);
else
- initialgain = 0x28;
+ rtw_set_initial_gain(padapter, 0x28);
- SetHwReg8188EU(padapter, HW_VAR_INITIAL_GAIN, (u8 *)(&initialgain));
/* set MSR to no link state */
Set_MSR(padapter, _HW_STATE_NOLINK_);
@@ -7538,13 +7493,13 @@ u8 set_tx_beacon_cmd(struct adapter *padapter)
u8 res = _SUCCESS;
int len_diff = 0;
- ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
+ ph2c = kzalloc(sizeof(*ph2c), GFP_ATOMIC);
if (!ph2c) {
res = _FAIL;
goto exit;
}
- ptxBeacon_parm = kzalloc(sizeof(struct Tx_Beacon_param), GFP_ATOMIC);
+ ptxBeacon_parm = kzalloc(sizeof(*ptxBeacon_parm), GFP_ATOMIC);
if (!ptxBeacon_parm) {
kfree(ph2c);
res = _FAIL;
diff --git a/drivers/staging/r8188eu/core/rtw_p2p.c b/drivers/staging/r8188eu/core/rtw_p2p.c
index 48500fb82250..beffe5b16f1e 100644
--- a/drivers/staging/r8188eu/core/rtw_p2p.c
+++ b/drivers/staging/r8188eu/core/rtw_p2p.c
@@ -111,7 +111,7 @@ static void issue_group_disc_req(struct wifidirect_info *pwdinfo, u8 *da)
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
unsigned char *pframe;
- struct rtw_ieee80211_hdr *pwlanhdr;
+ struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
struct adapter *padapter = pwdinfo->padapter;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
@@ -132,9 +132,9 @@ static void issue_group_disc_req(struct wifidirect_info *pwdinfo, u8 *da)
memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
- fctrl = &pwlanhdr->frame_ctl;
+ fctrl = &pwlanhdr->frame_control;
*(fctrl) = 0;
memcpy(pwlanhdr->addr1, da, ETH_ALEN);
@@ -145,8 +145,8 @@ static void issue_group_disc_req(struct wifidirect_info *pwdinfo, u8 *da)
pmlmeext->mgnt_seq++;
SetFrameSubType(pframe, WIFI_ACTION);
- pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
- pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
/* Build P2P action frame header */
pframe = rtw_set_fixed_ie(pframe, 1, &category, &pattrib->pktlen);
@@ -166,12 +166,12 @@ static void issue_p2p_devdisc_resp(struct wifidirect_info *pwdinfo, u8 *da, u8 s
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
unsigned char *pframe;
- struct rtw_ieee80211_hdr *pwlanhdr;
+ struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
struct adapter *padapter = pwdinfo->padapter;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- unsigned char category = RTW_WLAN_CATEGORY_PUBLIC;
+ unsigned char category = WLAN_CATEGORY_PUBLIC;
u8 action = P2P_PUB_ACTION_ACTION;
__be32 p2poui = cpu_to_be32(P2POUI);
u8 oui_subtype = P2P_DEVDISC_RESP;
@@ -189,9 +189,9 @@ static void issue_p2p_devdisc_resp(struct wifidirect_info *pwdinfo, u8 *da, u8 s
memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
- fctrl = &pwlanhdr->frame_ctl;
+ fctrl = &pwlanhdr->frame_control;
*(fctrl) = 0;
memcpy(pwlanhdr->addr1, da, ETH_ALEN);
@@ -202,8 +202,8 @@ static void issue_p2p_devdisc_resp(struct wifidirect_info *pwdinfo, u8 *da, u8 s
pmlmeext->mgnt_seq++;
SetFrameSubType(pframe, WIFI_ACTION);
- pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
- pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
/* Build P2P public action frame header */
pframe = rtw_set_fixed_ie(pframe, 1, &category, &pattrib->pktlen);
@@ -233,7 +233,7 @@ static void issue_p2p_devdisc_resp(struct wifidirect_info *pwdinfo, u8 *da, u8 s
static void issue_p2p_provision_resp(struct wifidirect_info *pwdinfo, u8 *raddr, u8 *frame_body, u16 config_method)
{
struct adapter *padapter = pwdinfo->padapter;
- unsigned char category = RTW_WLAN_CATEGORY_PUBLIC;
+ unsigned char category = WLAN_CATEGORY_PUBLIC;
u8 action = P2P_PUB_ACTION_ACTION;
u8 dialogToken = frame_body[7]; /* The Dialog Token of provisioning discovery request frame. */
__be32 p2poui = cpu_to_be32(P2POUI);
@@ -243,7 +243,7 @@ static void issue_p2p_provision_resp(struct wifidirect_info *pwdinfo, u8 *raddr,
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
unsigned char *pframe;
- struct rtw_ieee80211_hdr *pwlanhdr;
+ struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
@@ -259,9 +259,9 @@ static void issue_p2p_provision_resp(struct wifidirect_info *pwdinfo, u8 *raddr,
memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
- fctrl = &pwlanhdr->frame_ctl;
+ fctrl = &pwlanhdr->frame_control;
*(fctrl) = 0;
memcpy(pwlanhdr->addr1, raddr, ETH_ALEN);
@@ -272,8 +272,8 @@ static void issue_p2p_provision_resp(struct wifidirect_info *pwdinfo, u8 *raddr,
pmlmeext->mgnt_seq++;
SetFrameSubType(pframe, WIFI_ACTION);
- pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
- pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
pframe = rtw_set_fixed_ie(pframe, 1, &category, &pattrib->pktlen);
pframe = rtw_set_fixed_ie(pframe, 1, &action, &pattrib->pktlen);
@@ -311,7 +311,7 @@ static void issue_p2p_presence_resp(struct wifidirect_info *pwdinfo, u8 *da, u8
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
unsigned char *pframe;
- struct rtw_ieee80211_hdr *pwlanhdr;
+ struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
struct adapter *padapter = pwdinfo->padapter;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
@@ -334,9 +334,9 @@ static void issue_p2p_presence_resp(struct wifidirect_info *pwdinfo, u8 *da, u8
memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
- fctrl = &pwlanhdr->frame_ctl;
+ fctrl = &pwlanhdr->frame_control;
*(fctrl) = 0;
memcpy(pwlanhdr->addr1, da, ETH_ALEN);
@@ -347,8 +347,8 @@ static void issue_p2p_presence_resp(struct wifidirect_info *pwdinfo, u8 *da, u8
pmlmeext->mgnt_seq++;
SetFrameSubType(pframe, WIFI_ACTION);
- pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
- pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
/* Build P2P action frame header */
pframe = rtw_set_fixed_ie(pframe, 1, &category, &pattrib->pktlen);
@@ -872,7 +872,7 @@ u32 process_assoc_req_p2p_ie(struct wifidirect_info *pwdinfo, u8 *pframe, uint l
}
psta->dev_name_len = 0;
- if (WPS_ATTR_DEVICE_NAME == be16_to_cpu(*(__be16 *)pattr_content)) {
+ if (be16_to_cpu(*(__be16 *)pattr_content) == WPS_ATTR_DEVICE_NAME) {
dev_name_len = be16_to_cpu(*(__be16 *)(pattr_content + 2));
psta->dev_name_len = (sizeof(psta->dev_name) < dev_name_len) ? sizeof(psta->dev_name) : dev_name_len;
@@ -900,7 +900,7 @@ u32 process_p2p_devdisc_req(struct wifidirect_info *pwdinfo, u8 *pframe, uint le
u8 *p2p_ie;
u32 p2p_ielen = 0;
- frame_body = (unsigned char *)(pframe + sizeof(struct rtw_ieee80211_hdr_3addr));
+ frame_body = (unsigned char *)(pframe + sizeof(struct ieee80211_hdr_3addr));
dialogToken = frame_body[7];
status = P2P_STATUS_FAIL_UNKNOWN_P2PGROUP;
@@ -951,7 +951,7 @@ u32 process_p2p_devdisc_req(struct wifidirect_info *pwdinfo, u8 *pframe, uint le
/* issue Device Discoverability Response */
issue_p2p_devdisc_resp(pwdinfo, GetAddr2Ptr(pframe), status, dialogToken);
- return (status == P2P_STATUS_SUCCESS) ? true : false;
+ return status == P2P_STATUS_SUCCESS;
}
u32 process_p2p_devdisc_resp(struct wifidirect_info *pwdinfo, u8 *pframe, uint len)
@@ -967,7 +967,7 @@ u8 process_p2p_provdisc_req(struct wifidirect_info *pwdinfo, u8 *pframe, uint l
u16 uconfig_method = 0;
__be16 be_tmp;
- frame_body = (pframe + sizeof(struct rtw_ieee80211_hdr_3addr));
+ frame_body = (pframe + sizeof(struct ieee80211_hdr_3addr));
wpsie = rtw_get_wps_ie(frame_body + _PUBLIC_ACTION_IE_OFFSET_, len - _PUBLIC_ACTION_IE_OFFSET_, NULL, &wps_ielen);
if (wpsie) {
@@ -1213,7 +1213,7 @@ u8 process_p2p_group_negotation_resp(struct wifidirect_info *pwdinfo, u8 *pframe
if (attr_content == P2P_STATUS_SUCCESS) {
/* Do nothing. */
} else {
- if (P2P_STATUS_FAIL_INFO_UNAVAILABLE == attr_content) {
+ if (attr_content == P2P_STATUS_FAIL_INFO_UNAVAILABLE) {
rtw_p2p_set_state(pwdinfo, P2P_STATE_RX_INFOR_NOREADY);
} else {
rtw_p2p_set_state(pwdinfo, P2P_STATE_GONEGO_FAIL);
@@ -1401,7 +1401,7 @@ u8 process_p2p_presence_req(struct wifidirect_info *pwdinfo, u8 *pframe, uint le
u8 dialogToken = 0;
u8 status = P2P_STATUS_SUCCESS;
- frame_body = (unsigned char *)(pframe + sizeof(struct rtw_ieee80211_hdr_3addr));
+ frame_body = (unsigned char *)(pframe + sizeof(struct ieee80211_hdr_3addr));
dialogToken = frame_body[6];
@@ -1602,7 +1602,7 @@ void p2p_ps_wk_hdl(struct adapter *padapter, u8 p2p_ps_state)
case P2P_PS_DISABLE:
pwdinfo->p2p_ps_state = p2p_ps_state;
- SetHwReg8188EU(padapter, HW_VAR_H2C_FW_P2P_PS_OFFLOAD, (u8 *)(&p2p_ps_state));
+ rtl8188e_set_p2p_ps_offload_cmd(padapter, p2p_ps_state);
pwdinfo->noa_index = 0;
pwdinfo->ctwindow = 0;
@@ -1612,7 +1612,7 @@ void p2p_ps_wk_hdl(struct adapter *padapter, u8 p2p_ps_state)
if (padapter->pwrctrlpriv.bFwCurrentInPSMode) {
if (pwrpriv->smart_ps == 0) {
pwrpriv->smart_ps = 2;
- SetHwReg8188EU(padapter, HW_VAR_H2C_FW_PWRMODE, (u8 *)(&padapter->pwrctrlpriv.pwr_mode));
+ rtw_set_firmware_ps_mode(padapter, pwrpriv->pwr_mode);
}
}
break;
@@ -1623,10 +1623,10 @@ void p2p_ps_wk_hdl(struct adapter *padapter, u8 p2p_ps_state)
if (pwdinfo->ctwindow > 0) {
if (pwrpriv->smart_ps != 0) {
pwrpriv->smart_ps = 0;
- SetHwReg8188EU(padapter, HW_VAR_H2C_FW_PWRMODE, (u8 *)(&padapter->pwrctrlpriv.pwr_mode));
+ rtw_set_firmware_ps_mode(padapter, pwrpriv->pwr_mode);
}
}
- SetHwReg8188EU(padapter, HW_VAR_H2C_FW_P2P_PS_OFFLOAD, (u8 *)(&p2p_ps_state));
+ rtl8188e_set_p2p_ps_offload_cmd(padapter, p2p_ps_state);
}
break;
case P2P_PS_SCAN:
@@ -1634,7 +1634,7 @@ void p2p_ps_wk_hdl(struct adapter *padapter, u8 p2p_ps_state)
case P2P_PS_ALLSTASLEEP:
if (pwdinfo->p2p_ps_mode > P2P_PS_NONE) {
pwdinfo->p2p_ps_state = p2p_ps_state;
- SetHwReg8188EU(padapter, HW_VAR_H2C_FW_P2P_PS_OFFLOAD, (u8 *)(&p2p_ps_state));
+ rtl8188e_set_p2p_ps_offload_cmd(padapter, p2p_ps_state);
}
break;
default:
@@ -1891,7 +1891,7 @@ int rtw_p2p_enable(struct adapter *padapter, enum P2P_ROLE role)
if (role == P2P_ROLE_DEVICE || role == P2P_ROLE_CLIENT || role == P2P_ROLE_GO) {
/* leave IPS/Autosuspend */
- if (_FAIL == rtw_pwr_wakeup(padapter)) {
+ if (rtw_pwr_wakeup(padapter) == _FAIL) {
ret = _FAIL;
goto exit;
}
@@ -1905,7 +1905,7 @@ int rtw_p2p_enable(struct adapter *padapter, enum P2P_ROLE role)
init_wifidirect_info(padapter, role);
} else if (role == P2P_ROLE_DISABLE) {
- if (_FAIL == rtw_pwr_wakeup(padapter)) {
+ if (rtw_pwr_wakeup(padapter) == _FAIL) {
ret = _FAIL;
goto exit;
}
diff --git a/drivers/staging/r8188eu/core/rtw_pwrctrl.c b/drivers/staging/r8188eu/core/rtw_pwrctrl.c
index 7beabf82eb92..7b816b824947 100644
--- a/drivers/staging/r8188eu/core/rtw_pwrctrl.c
+++ b/drivers/staging/r8188eu/core/rtw_pwrctrl.c
@@ -59,7 +59,7 @@ int ips_leave(struct adapter *padapter)
pwrpriv->rf_pwrstate = rf_on;
}
- if ((_WEP40_ == psecuritypriv->dot11PrivacyAlgrthm) || (_WEP104_ == psecuritypriv->dot11PrivacyAlgrthm)) {
+ if ((psecuritypriv->dot11PrivacyAlgrthm == _WEP40_) || (psecuritypriv->dot11PrivacyAlgrthm == _WEP104_)) {
set_channel_bwmode(padapter, padapter->mlmeextpriv.cur_channel, HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
for (keyid = 0; keyid < 4; keyid++) {
if (pmlmepriv->key_mask & BIT(keyid)) {
@@ -133,9 +133,8 @@ void rtw_ps_processor(struct adapter *padapter)
if (!rtw_pwr_unassociated_idle(padapter))
goto exit;
- if ((pwrpriv->rf_pwrstate == rf_on) && ((pwrpriv->pwr_state_check_cnts % 4) == 0)) {
+ if (pwrpriv->rf_pwrstate == rf_on) {
pwrpriv->change_rfpwrstate = rf_off;
-
ips_enter(padapter);
}
exit:
@@ -177,6 +176,19 @@ static bool PS_RDY_CHECK(struct adapter *padapter)
return true;
}
+void rtw_set_firmware_ps_mode(struct adapter *adapter, u8 mode)
+{
+ struct hal_data_8188e *haldata = &adapter->haldata;
+ struct odm_dm_struct *odmpriv = &haldata->odmpriv;
+
+ /* Force leave RF low power mode for 1T1R to prevent
+ * conflicting setting in firmware power saving sequence.
+ */
+ if (mode != PS_MODE_ACTIVE)
+ ODM_RF_Saving(odmpriv, true);
+ rtl8188e_set_FwPwrMode_cmd(adapter, mode);
+}
+
void rtw_set_ps_mode(struct adapter *padapter, u8 ps_mode, u8 smart_ps, u8 bcn_ant_mode)
{
struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
@@ -186,7 +198,7 @@ void rtw_set_ps_mode(struct adapter *padapter, u8 ps_mode, u8 smart_ps, u8 bcn_a
return;
if (pwrpriv->pwr_mode == ps_mode) {
- if (PS_MODE_ACTIVE == ps_mode)
+ if (ps_mode == PS_MODE_ACTIVE)
return;
if ((pwrpriv->smart_ps == smart_ps) &&
@@ -194,11 +206,10 @@ void rtw_set_ps_mode(struct adapter *padapter, u8 ps_mode, u8 smart_ps, u8 bcn_a
return;
}
- /* if (pwrpriv->pwr_mode == PS_MODE_ACTIVE) */
if (ps_mode == PS_MODE_ACTIVE) {
if (pwdinfo->opp_ps == 0) {
pwrpriv->pwr_mode = ps_mode;
- SetHwReg8188EU(padapter, HW_VAR_H2C_FW_PWRMODE, (u8 *)(&ps_mode));
+ rtw_set_firmware_ps_mode(padapter, ps_mode);
pwrpriv->bFwCurrentInPSMode = false;
}
} else {
@@ -207,14 +218,28 @@ void rtw_set_ps_mode(struct adapter *padapter, u8 ps_mode, u8 smart_ps, u8 bcn_a
pwrpriv->pwr_mode = ps_mode;
pwrpriv->smart_ps = smart_ps;
pwrpriv->bcn_ant_mode = bcn_ant_mode;
- SetHwReg8188EU(padapter, HW_VAR_H2C_FW_PWRMODE, (u8 *)(&ps_mode));
+ rtw_set_firmware_ps_mode(padapter, ps_mode);
/* Set CTWindow after LPS */
if (pwdinfo->opp_ps == 1)
p2p_ps_wk_cmd(padapter, P2P_PS_ENABLE, 0);
}
}
+}
+static bool lps_rf_on(struct adapter *adapter)
+{
+ /* When we halt NIC, we should check if FW LPS is leave. */
+ if (adapter->pwrctrlpriv.rf_pwrstate == rf_off) {
+ /* If it is in HW/SW Radio OFF or IPS state, we do not check Fw LPS Leave, */
+ /* because Fw is unload. */
+ return true;
+ }
+
+ if (rtw_read32(adapter, REG_RCR) & 0x00070000)
+ return false;
+
+ return true;
}
/*
@@ -223,16 +248,13 @@ void rtw_set_ps_mode(struct adapter *padapter, u8 ps_mode, u8 smart_ps, u8 bcn_a
* -1: Timeout
* -2: Other error
*/
-s32 LPS_RF_ON_check(struct adapter *padapter, u32 delay_ms)
+static s32 LPS_RF_ON_check(struct adapter *padapter, u32 delay_ms)
{
- u32 start_time;
- u8 bAwake = false;
+ unsigned long timeout = jiffies + msecs_to_jiffies(delay_ms);
s32 err = 0;
- start_time = jiffies;
while (1) {
- GetHwReg8188EU(padapter, HW_VAR_FWLPS_RF_ON, &bAwake);
- if (bAwake)
+ if (lps_rf_on(padapter))
break;
if (padapter->bSurpriseRemoved) {
@@ -240,7 +262,7 @@ s32 LPS_RF_ON_check(struct adapter *padapter, u32 delay_ms)
break;
}
- if (rtw_get_passing_time_ms(start_time) > delay_ms) {
+ if (time_after(jiffies, timeout)) {
err = -1;
break;
}
@@ -329,13 +351,12 @@ void rtw_init_pwrctrl_priv(struct adapter *padapter)
pwrctrlpriv->ips_mode_req = padapter->registrypriv.ips_mode;
pwrctrlpriv->pwr_state_check_interval = RTW_PWR_STATE_CHK_INTERVAL;
- pwrctrlpriv->pwr_state_check_cnts = 0;
pwrctrlpriv->bInSuspend = false;
pwrctrlpriv->bkeepfwalive = false;
pwrctrlpriv->LpsIdleCount = 0;
pwrctrlpriv->power_mgnt = padapter->registrypriv.power_mgnt;/* PS_MODE_MIN; */
- pwrctrlpriv->bLeisurePs = (PS_MODE_ACTIVE != pwrctrlpriv->power_mgnt) ? true : false;
+ pwrctrlpriv->bLeisurePs = pwrctrlpriv->power_mgnt != PS_MODE_ACTIVE;
pwrctrlpriv->bFwCurrentInPSMode = false;
@@ -346,58 +367,38 @@ void rtw_init_pwrctrl_priv(struct adapter *padapter)
timer_setup(&pwrctrlpriv->pwr_state_check_timer, pwr_state_check_handler, 0);
}
-/*
-* rtw_pwr_wakeup - Wake the NIC up from: 1)IPS. 2)USB autosuspend
-* @adapter: pointer to struct adapter structure
-* @ips_deffer_ms: the ms wiil prevent from falling into IPS after wakeup
-* Return _SUCCESS or _FAIL
-*/
-
-int _rtw_pwr_wakeup(struct adapter *padapter, u32 ips_deffer_ms, const char *caller)
+/* Wake the NIC up from: 1)IPS 2)USB autosuspend */
+int rtw_pwr_wakeup(struct adapter *padapter)
{
struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ unsigned long timeout = jiffies + msecs_to_jiffies(3000);
+ unsigned long deny_time;
int ret = _SUCCESS;
- u32 start = jiffies;
-
- if (pwrpriv->ips_deny_time < jiffies + rtw_ms_to_systime(ips_deffer_ms))
- pwrpriv->ips_deny_time = jiffies + rtw_ms_to_systime(ips_deffer_ms);
-
- if (pwrpriv->ps_processing) {
- while (pwrpriv->ps_processing && rtw_get_passing_time_ms(start) <= 3000)
- msleep(10);
- }
- /* System suspend is not allowed to wakeup */
- if (pwrpriv->bInSuspend) {
- while (pwrpriv->bInSuspend &&
- (rtw_get_passing_time_ms(start) <= 3000 ||
- (rtw_get_passing_time_ms(start) <= 500)))
- msleep(10);
- }
+ while (pwrpriv->ps_processing && time_before(jiffies, timeout))
+ msleep(10);
/* I think this should be check in IPS, LPS, autosuspend functions... */
if (check_fwstate(pmlmepriv, _FW_LINKED)) {
ret = _SUCCESS;
goto exit;
}
- if (rf_off == pwrpriv->rf_pwrstate) {
- if (_FAIL == ips_leave(padapter)) {
- ret = _FAIL;
- goto exit;
- }
+
+ if (pwrpriv->rf_pwrstate == rf_off && ips_leave(padapter) == _FAIL) {
+ ret = _FAIL;
+ goto exit;
}
- /* TODO: the following checking need to be merged... */
- if (padapter->bDriverStopped || !padapter->bup ||
- !padapter->hw_init_completed) {
- ret = false;
+ if (padapter->bDriverStopped || !padapter->bup || !padapter->hw_init_completed) {
+ ret = _FAIL;
goto exit;
}
exit:
- if (pwrpriv->ips_deny_time < jiffies + rtw_ms_to_systime(ips_deffer_ms))
- pwrpriv->ips_deny_time = jiffies + rtw_ms_to_systime(ips_deffer_ms);
+ deny_time = jiffies + msecs_to_jiffies(RTW_PWR_STATE_CHK_INTERVAL);
+ if (time_before(pwrpriv->ips_deny_time, deny_time))
+ pwrpriv->ips_deny_time = deny_time;
return ret;
}
@@ -408,12 +409,12 @@ int rtw_pm_set_lps(struct adapter *padapter, u8 mode)
if (mode < PS_MODE_NUM) {
if (pwrctrlpriv->power_mgnt != mode) {
- if (PS_MODE_ACTIVE == mode)
+ if (mode == PS_MODE_ACTIVE)
LeaveAllPowerSaveMode(padapter);
else
pwrctrlpriv->LpsIdleCount = 2;
pwrctrlpriv->power_mgnt = mode;
- pwrctrlpriv->bLeisurePs = (PS_MODE_ACTIVE != pwrctrlpriv->power_mgnt) ? true : false;
+ pwrctrlpriv->bLeisurePs = pwrctrlpriv->power_mgnt != PS_MODE_ACTIVE;
}
} else {
ret = -EINVAL;
@@ -431,7 +432,7 @@ int rtw_pm_set_ips(struct adapter *padapter, u8 mode)
return 0;
} else if (mode == IPS_NONE) {
rtw_ips_mode_req(pwrctrlpriv, mode);
- if ((padapter->bSurpriseRemoved == 0) && (_FAIL == rtw_pwr_wakeup(padapter)))
+ if ((padapter->bSurpriseRemoved == 0) && (rtw_pwr_wakeup(padapter) == _FAIL))
return -EFAULT;
} else {
return -EINVAL;
diff --git a/drivers/staging/r8188eu/core/rtw_recv.c b/drivers/staging/r8188eu/core/rtw_recv.c
index 8800ea4825ff..df518439aea2 100644
--- a/drivers/staging/r8188eu/core/rtw_recv.c
+++ b/drivers/staging/r8188eu/core/rtw_recv.c
@@ -71,7 +71,6 @@ int _rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *padapter)
list_add_tail(&precvframe->list, &precvpriv->free_recv_queue.queue);
- precvframe->pkt_newalloc = NULL;
precvframe->pkt = NULL;
precvframe->len = 0;
@@ -81,8 +80,6 @@ int _rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *padapter)
}
precvpriv->rx_pending_cnt = 1;
- sema_init(&precvpriv->allrxreturnevt, 0);
-
res = rtl8188eu_init_recv_priv(padapter);
timer_setup(&precvpriv->signal_stat_timer, rtw_signal_stat_timer_hdl, 0);
@@ -749,6 +746,7 @@ static int sta2ap_data_frame(struct adapter *adapter,
struct sta_priv *pstapriv = &adapter->stapriv;
struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
u8 *ptr = precv_frame->rx_data;
+ __le16 fc = *(__le16 *)ptr;
unsigned char *mybssid = get_bssid(pmlmepriv);
int ret = _SUCCESS;
@@ -769,9 +767,8 @@ static int sta2ap_data_frame(struct adapter *adapter,
process_pwrbit_data(adapter, precv_frame);
- if ((GetFrameSubType(ptr) & WIFI_QOS_DATA_TYPE) == WIFI_QOS_DATA_TYPE) {
+ if (ieee80211_is_data_qos(fc))
process_wmmps_data(adapter, precv_frame);
- }
if (GetFrameSubType(ptr) & BIT(6)) {
/* No data, will not indicate to upper layer, temporily count it here */
@@ -795,143 +792,135 @@ exit:
return ret;
}
-static int validate_recv_ctrl_frame(struct adapter *padapter,
- struct recv_frame *precv_frame)
+static void validate_recv_ctrl_frame(struct adapter *padapter,
+ struct recv_frame *precv_frame)
{
struct rx_pkt_attrib *pattrib = &precv_frame->attrib;
struct sta_priv *pstapriv = &padapter->stapriv;
- u8 *pframe = precv_frame->rx_data;
- /* uint len = precv_frame->len; */
-
- if (GetFrameType(pframe) != WIFI_CTRL_TYPE)
- return _FAIL;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)precv_frame->rx_data;
+ struct ieee80211_pspoll *pspoll = (struct ieee80211_pspoll *)hdr;
+ u8 wmmps_ac;
+ struct sta_info *psta;
/* receive the frames that ra(a1) is my address */
- if (memcmp(GetAddr1Ptr(pframe), myid(&padapter->eeprompriv), ETH_ALEN))
- return _FAIL;
+ if (memcmp(hdr->addr1, myid(&padapter->eeprompriv), ETH_ALEN))
+ return;
/* only handle ps-poll */
- if (GetFrameSubType(pframe) == WIFI_PSPOLL) {
- u16 aid;
- u8 wmmps_ac = 0;
- struct sta_info *psta = NULL;
+ if (!ieee80211_is_pspoll(hdr->frame_control))
+ return;
- aid = GetAid(pframe);
- psta = rtw_get_stainfo(pstapriv, GetAddr2Ptr(pframe));
+ psta = rtw_get_stainfo(pstapriv, hdr->addr2);
+ if (!psta || psta->aid != (le16_to_cpu(pspoll->aid) & 0x3FFF))
+ return;
- if (!psta || psta->aid != aid)
- return _FAIL;
+ /* for rx pkt statistics */
+ psta->sta_stats.rx_ctrl_pkts++;
- /* for rx pkt statistics */
- psta->sta_stats.rx_ctrl_pkts++;
+ switch (pattrib->priority) {
+ case 1:
+ case 2:
+ wmmps_ac = psta->uapsd_bk & BIT(0);
+ break;
+ case 4:
+ case 5:
+ wmmps_ac = psta->uapsd_vi & BIT(0);
+ break;
+ case 6:
+ case 7:
+ wmmps_ac = psta->uapsd_vo & BIT(0);
+ break;
+ case 0:
+ case 3:
+ default:
+ wmmps_ac = psta->uapsd_be & BIT(0);
+ break;
+ }
- switch (pattrib->priority) {
- case 1:
- case 2:
- wmmps_ac = psta->uapsd_bk & BIT(0);
- break;
- case 4:
- case 5:
- wmmps_ac = psta->uapsd_vi & BIT(0);
- break;
- case 6:
- case 7:
- wmmps_ac = psta->uapsd_vo & BIT(0);
- break;
- case 0:
- case 3:
- default:
- wmmps_ac = psta->uapsd_be & BIT(0);
- break;
- }
+ if (wmmps_ac)
+ return;
- if (wmmps_ac)
- return _FAIL;
+ if (psta->state & WIFI_STA_ALIVE_CHK_STATE) {
+ psta->expire_to = pstapriv->expire_to;
+ psta->state ^= WIFI_STA_ALIVE_CHK_STATE;
+ }
- if (psta->state & WIFI_STA_ALIVE_CHK_STATE) {
- psta->expire_to = pstapriv->expire_to;
- psta->state ^= WIFI_STA_ALIVE_CHK_STATE;
- }
+ if ((psta->state & WIFI_SLEEP_STATE) && (pstapriv->sta_dz_bitmap & BIT(psta->aid))) {
+ struct list_head *xmitframe_plist, *xmitframe_phead;
+ struct xmit_frame *pxmitframe = NULL;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- if ((psta->state & WIFI_SLEEP_STATE) && (pstapriv->sta_dz_bitmap & BIT(psta->aid))) {
- struct list_head *xmitframe_plist, *xmitframe_phead;
- struct xmit_frame *pxmitframe = NULL;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ spin_lock_bh(&pxmitpriv->lock);
- spin_lock_bh(&pxmitpriv->lock);
+ xmitframe_phead = get_list_head(&psta->sleep_q);
+ xmitframe_plist = xmitframe_phead->next;
- xmitframe_phead = get_list_head(&psta->sleep_q);
- xmitframe_plist = xmitframe_phead->next;
+ if (xmitframe_phead != xmitframe_plist) {
+ pxmitframe = container_of(xmitframe_plist, struct xmit_frame, list);
- if (xmitframe_phead != xmitframe_plist) {
- pxmitframe = container_of(xmitframe_plist, struct xmit_frame, list);
+ xmitframe_plist = xmitframe_plist->next;
- xmitframe_plist = xmitframe_plist->next;
+ list_del_init(&pxmitframe->list);
- list_del_init(&pxmitframe->list);
+ psta->sleepq_len--;
- psta->sleepq_len--;
+ if (psta->sleepq_len > 0)
+ pxmitframe->attrib.mdata = 1;
+ else
+ pxmitframe->attrib.mdata = 0;
- if (psta->sleepq_len > 0)
- pxmitframe->attrib.mdata = 1;
- else
- pxmitframe->attrib.mdata = 0;
+ pxmitframe->attrib.triggered = 1;
- pxmitframe->attrib.triggered = 1;
+ if (psta->sleepq_len == 0) {
+ pstapriv->tim_bitmap &= ~BIT(psta->aid);
- if (psta->sleepq_len == 0) {
- pstapriv->tim_bitmap &= ~BIT(psta->aid);
+ /* upate BCN for TIM IE */
+ /* update_BCNTIM(padapter); */
+ update_beacon(padapter, _TIM_IE_, NULL, false);
+ }
+ } else {
+ if (pstapriv->tim_bitmap & BIT(psta->aid)) {
+ if (psta->sleepq_len == 0)
+ /* issue nulldata with More data bit = 0 to indicate we have no buffered packets */
+ issue_nulldata(padapter, psta->hwaddr, 0, 0, 0);
+ else
+ psta->sleepq_len = 0;
- /* upate BCN for TIM IE */
- /* update_BCNTIM(padapter); */
- update_beacon(padapter, _TIM_IE_, NULL, false);
- }
- } else {
- if (pstapriv->tim_bitmap & BIT(psta->aid)) {
- if (psta->sleepq_len == 0)
- /* issue nulldata with More data bit = 0 to indicate we have no buffered packets */
- issue_nulldata(padapter, psta->hwaddr, 0, 0, 0);
- else
- psta->sleepq_len = 0;
-
- pstapriv->tim_bitmap &= ~BIT(psta->aid);
-
- /* upate BCN for TIM IE */
- /* update_BCNTIM(padapter); */
- update_beacon(padapter, _TIM_IE_, NULL, false);
- }
+ pstapriv->tim_bitmap &= ~BIT(psta->aid);
+
+ /* upate BCN for TIM IE */
+ /* update_BCNTIM(padapter); */
+ update_beacon(padapter, _TIM_IE_, NULL, false);
}
- spin_unlock_bh(&pxmitpriv->lock);
}
+ spin_unlock_bh(&pxmitpriv->lock);
}
-
- return _FAIL;
}
struct recv_frame *recvframe_chk_defrag(struct adapter *padapter, struct recv_frame *precv_frame);
-static int validate_recv_mgnt_frame(struct adapter *padapter,
- struct recv_frame *precv_frame)
+static void validate_recv_mgnt_frame(struct adapter *padapter,
+ struct recv_frame *precv_frame)
{
struct sta_info *psta;
+ struct ieee80211_hdr *hdr;
precv_frame = recvframe_chk_defrag(padapter, precv_frame);
if (!precv_frame)
- return _SUCCESS;
+ return;
- /* for rx pkt statistics */
- psta = rtw_get_stainfo(&padapter->stapriv, GetAddr2Ptr(precv_frame->rx_data));
+ hdr = (struct ieee80211_hdr *)precv_frame->rx_data;
+ psta = rtw_get_stainfo(&padapter->stapriv, hdr->addr2);
if (psta) {
psta->sta_stats.rx_mgnt_pkts++;
- if (GetFrameSubType(precv_frame->rx_data) == WIFI_BEACON) {
+ if (ieee80211_is_beacon(hdr->frame_control))
psta->sta_stats.rx_beacon_pkts++;
- } else if (GetFrameSubType(precv_frame->rx_data) == WIFI_PROBEREQ) {
+ else if (ieee80211_is_probe_req(hdr->frame_control))
psta->sta_stats.rx_probereq_pkts++;
- } else if (GetFrameSubType(precv_frame->rx_data) == WIFI_PROBERSP) {
- if (!memcmp(padapter->eeprompriv.mac_addr, GetAddr1Ptr(precv_frame->rx_data), ETH_ALEN))
+ else if (ieee80211_is_probe_resp(hdr->frame_control)) {
+ if (!memcmp(padapter->eeprompriv.mac_addr, hdr->addr1, ETH_ALEN))
psta->sta_stats.rx_probersp_pkts++;
- else if (is_broadcast_mac_addr(GetAddr1Ptr(precv_frame->rx_data)) ||
- is_multicast_mac_addr(GetAddr1Ptr(precv_frame->rx_data)))
+ else if (is_broadcast_mac_addr(hdr->addr1) || is_multicast_mac_addr(hdr->addr1))
psta->sta_stats.rx_probersp_bm_pkts++;
else
psta->sta_stats.rx_probersp_uo_pkts++;
@@ -939,72 +928,44 @@ static int validate_recv_mgnt_frame(struct adapter *padapter,
}
mgt_dispatcher(padapter, precv_frame);
-
- return _SUCCESS;
}
static int validate_recv_data_frame(struct adapter *adapter,
struct recv_frame *precv_frame)
{
- u8 bretry;
- u8 *psa, *pda, *pbssid;
struct sta_info *psta = NULL;
u8 *ptr = precv_frame->rx_data;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)precv_frame->rx_data;
struct rx_pkt_attrib *pattrib = &precv_frame->attrib;
struct security_priv *psecuritypriv = &adapter->securitypriv;
- int ret = _SUCCESS;
-
- bretry = GetRetry(ptr);
- pda = get_da(ptr);
- psa = get_sa(ptr);
- pbssid = get_hdr_bssid(ptr);
+ int ret;
- if (!pbssid) {
- ret = _FAIL;
- goto exit;
- }
+ memcpy(pattrib->dst, ieee80211_get_DA(hdr), ETH_ALEN);
+ memcpy(pattrib->src, ieee80211_get_SA(hdr), ETH_ALEN);
- memcpy(pattrib->dst, pda, ETH_ALEN);
- memcpy(pattrib->src, psa, ETH_ALEN);
+ /* address4 is used only if both to_ds and from_ds are set */
+ if (ieee80211_has_a4(hdr->frame_control))
+ return _FAIL;
- memcpy(pattrib->bssid, pbssid, ETH_ALEN);
+ memcpy(pattrib->ra, hdr->addr1, ETH_ALEN);
+ memcpy(pattrib->ta, hdr->addr2, ETH_ALEN);
- switch (pattrib->to_fr_ds) {
- case 0:
- memcpy(pattrib->ra, pda, ETH_ALEN);
- memcpy(pattrib->ta, psa, ETH_ALEN);
- ret = sta2sta_data_frame(adapter, precv_frame, &psta);
- break;
- case 1:
- memcpy(pattrib->ra, pda, ETH_ALEN);
- memcpy(pattrib->ta, pbssid, ETH_ALEN);
+ if (ieee80211_has_fromds(hdr->frame_control)) {
+ memcpy(pattrib->bssid, hdr->addr2, ETH_ALEN);
ret = ap2sta_data_frame(adapter, precv_frame, &psta);
- break;
- case 2:
- memcpy(pattrib->ra, pbssid, ETH_ALEN);
- memcpy(pattrib->ta, psa, ETH_ALEN);
+ } else if (ieee80211_has_tods(hdr->frame_control)) {
+ memcpy(pattrib->bssid, hdr->addr1, ETH_ALEN);
ret = sta2ap_data_frame(adapter, precv_frame, &psta);
- break;
- case 3:
- memcpy(pattrib->ra, GetAddr1Ptr(ptr), ETH_ALEN);
- memcpy(pattrib->ta, GetAddr2Ptr(ptr), ETH_ALEN);
- ret = _FAIL;
- break;
- default:
- ret = _FAIL;
- break;
+ } else {
+ memcpy(pattrib->bssid, hdr->addr3, ETH_ALEN);
+ ret = sta2sta_data_frame(adapter, precv_frame, &psta);
}
- if (ret == _FAIL) {
- goto exit;
- } else if (ret == RTW_RX_HANDLED) {
- goto exit;
- }
+ if (ret == _FAIL || ret == RTW_RX_HANDLED)
+ return ret;
- if (!psta) {
- ret = _FAIL;
- goto exit;
- }
+ if (!psta)
+ return _FAIL;
/* psta->rssi = prxcmd->rssi; */
/* psta->signal_quality = prxcmd->sq; */
@@ -1014,16 +975,16 @@ static int validate_recv_data_frame(struct adapter *adapter,
pattrib->ack_policy = 0;
/* parsing QC field */
if (pattrib->qos) {
- pattrib->priority = GetPriority((ptr + 24));
+ pattrib->priority = ieee80211_get_tid(hdr);
pattrib->ack_policy = GetAckpolicy((ptr + 24));
pattrib->amsdu = GetAMsdu((ptr + 24));
- pattrib->hdrlen = pattrib->to_fr_ds == 3 ? 32 : 26;
+ pattrib->hdrlen = 26;
if (pattrib->priority != 0 && pattrib->priority != 3)
adapter->recvpriv.bIsAnyNonBEPkts = true;
} else {
pattrib->priority = 0;
- pattrib->hdrlen = pattrib->to_fr_ds == 3 ? 30 : 24;
+ pattrib->hdrlen = 24;
}
if (pattrib->order)/* HT-CTRL 11n */
@@ -1032,10 +993,9 @@ static int validate_recv_data_frame(struct adapter *adapter,
precv_frame->preorder_ctrl = &psta->recvreorder_ctrl[pattrib->priority];
/* decache, drop duplicate recv packets */
- if (recv_decache(precv_frame, bretry, &psta->sta_recvpriv.rxcache) == _FAIL) {
- ret = _FAIL;
- goto exit;
- }
+ if (recv_decache(precv_frame, ieee80211_has_retry(hdr->frame_control),
+ &psta->sta_recvpriv.rxcache) == _FAIL)
+ return _FAIL;
if (pattrib->privacy) {
GET_ENCRY_ALGO(psecuritypriv, psta, pattrib->encrypt, is_multicast_ether_addr(pattrib->ra));
@@ -1047,9 +1007,7 @@ static int validate_recv_data_frame(struct adapter *adapter,
pattrib->icv_len = 0;
}
-exit:
-
- return ret;
+ return _SUCCESS;
}
static int validate_recv_frame(struct adapter *adapter, struct recv_frame *precv_frame)
@@ -1059,11 +1017,8 @@ static int validate_recv_frame(struct adapter *adapter, struct recv_frame *precv
/* then call check if rx seq/frag. duplicated. */
int retval = _FAIL;
- u8 bDumpRxPkt;
struct rx_pkt_attrib *pattrib = &precv_frame->attrib;
- u8 *ptr = precv_frame->rx_data;
- __le16 fc = *(__le16 *)ptr;
- u8 ver = (unsigned char)(*ptr) & 0x3;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)precv_frame->rx_data;
struct mlme_ext_priv *pmlmeext = &adapter->mlmeextpriv;
if (pmlmeext->sitesurvey_res.state == SCAN_PROCESS) {
@@ -1072,32 +1027,26 @@ static int validate_recv_frame(struct adapter *adapter, struct recv_frame *precv
pmlmeext->channel_set[ch_set_idx].rx_count++;
}
- /* add version chk */
- if (ver != 0)
+ if ((hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_VERS)) != 0)
return _FAIL;
- pattrib->to_fr_ds = get_tofr_ds(ptr);
-
- pattrib->frag_num = GetFragNum(ptr);
- pattrib->seq_num = GetSequence(ptr);
+ pattrib->frag_num = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
+ pattrib->seq_num = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
- pattrib->pw_save = GetPwrMgt(ptr);
- pattrib->mfrag = ieee80211_has_morefrags(fc);
- pattrib->mdata = ieee80211_has_moredata(fc);
- pattrib->privacy = ieee80211_has_protected(fc);
- pattrib->order = ieee80211_has_order(fc);
-
- /* Dump rx packets */
- GetHalDefVar8188EUsb(adapter, HAL_DEF_DBG_DUMP_RXPKT, &bDumpRxPkt);
+ pattrib->pw_save = ieee80211_has_pm(hdr->frame_control);
+ pattrib->mfrag = ieee80211_has_morefrags(hdr->frame_control);
+ pattrib->mdata = ieee80211_has_moredata(hdr->frame_control);
+ pattrib->privacy = ieee80211_has_protected(hdr->frame_control);
+ pattrib->order = ieee80211_has_order(hdr->frame_control);
/* We return _SUCCESS only for data frames. */
- if (ieee80211_is_mgmt(fc))
+ if (ieee80211_is_mgmt(hdr->frame_control))
validate_recv_mgnt_frame(adapter, precv_frame);
- else if (ieee80211_is_ctl(fc))
+ else if (ieee80211_is_ctl(hdr->frame_control))
validate_recv_ctrl_frame(adapter, precv_frame);
- else if (ieee80211_is_data(fc)) {
+ else if (ieee80211_is_data(hdr->frame_control)) {
rtw_led_control(adapter, LED_CTL_RX);
- pattrib->qos = ieee80211_is_data_qos(fc);
+ pattrib->qos = ieee80211_is_data_qos(hdr->frame_control);
retval = validate_recv_data_frame(adapter, precv_frame);
if (retval == _FAIL) {
struct recv_priv *precvpriv = &adapter->recvpriv;
@@ -1284,8 +1233,9 @@ struct recv_frame *recvframe_chk_defrag(struct adapter *padapter, struct recv_fr
psta_addr = pfhdr->attrib.ta;
psta = rtw_get_stainfo(pstapriv, psta_addr);
if (!psta) {
- u8 type = GetFrameType(pfhdr->rx_data);
- if (type != WIFI_DATA_TYPE) {
+ __le16 fc = *(__le16 *)pfhdr->rx_data;
+
+ if (ieee80211_is_data(fc)) {
psta = rtw_get_bcmc_stainfo(padapter);
pdefrag_q = &psta->sta_recvpriv.defrag_q;
} else {
@@ -1723,12 +1673,9 @@ static int recv_func_prehandle(struct adapter *padapter, struct recv_frame *rfra
/* check the frame crtl field and decache */
ret = validate_recv_frame(padapter, rframe);
- if (ret != _SUCCESS) {
+ if (ret != _SUCCESS)
rtw_free_recvframe(rframe, pfree_recv_queue);/* free this recv_frame */
- goto exit;
- }
-exit:
return ret;
}
diff --git a/drivers/staging/r8188eu/core/rtw_security.c b/drivers/staging/r8188eu/core/rtw_security.c
index 2cdcdfd5ca5c..5bba57d18b5f 100644
--- a/drivers/staging/r8188eu/core/rtw_security.c
+++ b/drivers/staging/r8188eu/core/rtw_security.c
@@ -63,7 +63,7 @@ void rtw_wep_encrypt(struct adapter *padapter, struct xmit_frame *pxmitframe)
arc4_crypt(ctx, payload + length, crc.f1, 4);
pframe += pxmitpriv->frag_len;
- pframe = (u8 *)RND4((size_t)(pframe));
+ pframe = PTR_ALIGN(pframe, 4);
}
}
}
@@ -504,7 +504,7 @@ u32 rtw_tkip_encrypt(struct adapter *padapter, struct xmit_frame *pxmitframe)
arc4_crypt(ctx, payload + length, crc.f1, 4);
pframe += pxmitpriv->frag_len;
- pframe = (u8 *)RND4((size_t)(pframe));
+ pframe = PTR_ALIGN(pframe, 4);
}
}
} else {
@@ -1133,7 +1133,7 @@ u32 rtw_aes_encrypt(struct adapter *padapter, struct xmit_frame *pxmitframe)
aes_cipher(prwskey, pattrib->hdrlen, pframe, length);
pframe += pxmitpriv->frag_len;
- pframe = (u8 *)RND4((size_t)(pframe));
+ pframe = PTR_ALIGN(pframe, 4);
}
}
} else {
diff --git a/drivers/staging/r8188eu/core/rtw_sta_mgt.c b/drivers/staging/r8188eu/core/rtw_sta_mgt.c
index 91ff82f24f1f..357f98e22d8a 100644
--- a/drivers/staging/r8188eu/core/rtw_sta_mgt.c
+++ b/drivers/staging/r8188eu/core/rtw_sta_mgt.c
@@ -470,9 +470,9 @@ u8 rtw_access_ctrl(struct adapter *padapter, u8 *mac_addr)
spin_unlock_bh(&pacl_node_q->lock);
if (pacl_list->mode == 1)/* accept unless in deny list */
- res = (match) ? false : true;
+ res = !match;
else if (pacl_list->mode == 2)/* deny unless in accept list */
- res = (match) ? true : false;
+ res = match;
else
res = true;
diff --git a/drivers/staging/r8188eu/core/rtw_wlan_util.c b/drivers/staging/r8188eu/core/rtw_wlan_util.c
index 665b077190bc..392a65783f32 100644
--- a/drivers/staging/r8188eu/core/rtw_wlan_util.c
+++ b/drivers/staging/r8188eu/core/rtw_wlan_util.c
@@ -276,14 +276,6 @@ void Restore_DM_Func_Flag(struct adapter *padapter)
SetHwReg8188EU(padapter, HW_VAR_DM_FUNC_OP, (u8 *)(&saveflag));
}
-void Switch_DM_Func(struct adapter *padapter, u32 mode, u8 enable)
-{
- if (enable)
- SetHwReg8188EU(padapter, HW_VAR_DM_FUNC_SET, (u8 *)(&mode));
- else
- SetHwReg8188EU(padapter, HW_VAR_DM_FUNC_CLR, (u8 *)(&mode));
-}
-
void Set_MSR(struct adapter *padapter, u8 type)
{
u8 val8;
@@ -511,6 +503,31 @@ int WMM_param_handler(struct adapter *padapter, struct ndis_802_11_var_ie *pIE)
return true;
}
+static void set_acm_ctrl(struct adapter *adapter, u8 acm_mask)
+{
+ u8 acmctrl = rtw_read8(adapter, REG_ACMHWCTRL);
+
+ if (acm_mask > 1)
+ acmctrl = acmctrl | 0x1;
+
+ if (acm_mask & BIT(3))
+ acmctrl |= ACMHW_VOQEN;
+ else
+ acmctrl &= (~ACMHW_VOQEN);
+
+ if (acm_mask & BIT(2))
+ acmctrl |= ACMHW_VIQEN;
+ else
+ acmctrl &= (~ACMHW_VIQEN);
+
+ if (acm_mask & BIT(1))
+ acmctrl |= ACMHW_BEQEN;
+ else
+ acmctrl &= (~ACMHW_BEQEN);
+
+ rtw_write8(adapter, REG_ACMHWCTRL, acmctrl);
+}
+
void WMMOnAssocRsp(struct adapter *padapter)
{
u8 ACI, ACM, AIFS, ECWMin, ECWMax, aSifsTime;
@@ -522,6 +539,7 @@ void WMMOnAssocRsp(struct adapter *padapter)
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
struct registry_priv *pregpriv = &padapter->registrypriv;
+ struct hal_data_8188e *haldata = &padapter->haldata;
if (pmlmeinfo->WMM_enable == 0) {
padapter->mlmepriv.acm_mask = 0;
@@ -550,7 +568,8 @@ void WMMOnAssocRsp(struct adapter *padapter)
switch (ACI) {
case 0x0:
- SetHwReg8188EU(padapter, HW_VAR_AC_PARAM_BE, (u8 *)(&acParm));
+ haldata->AcParam_BE = acParm;
+ rtw_write32(padapter, REG_EDCA_BE_PARAM, acParm);
acm_mask |= (ACM ? BIT(1) : 0);
edca[XMIT_BE_QUEUE] = acParm;
break;
@@ -572,7 +591,7 @@ void WMMOnAssocRsp(struct adapter *padapter)
}
if (padapter->registrypriv.acm_method == 1)
- SetHwReg8188EU(padapter, HW_VAR_ACM_CTRL, (u8 *)(&acm_mask));
+ set_acm_ctrl(padapter, acm_mask);
else
padapter->mlmepriv.acm_mask = acm_mask;
@@ -743,6 +762,35 @@ void HT_info_handler(struct adapter *padapter, struct ndis_802_11_var_ie *pIE)
memcpy(&pmlmeinfo->HT_info, pIE->data, pIE->Length);
}
+static void set_min_ampdu_spacing(struct adapter *adapter, u8 spacing)
+{
+ u8 sec_spacing;
+
+ if (spacing <= 7) {
+ switch (adapter->securitypriv.dot11PrivacyAlgrthm) {
+ case _NO_PRIVACY_:
+ case _AES_:
+ sec_spacing = 0;
+ break;
+ case _WEP40_:
+ case _WEP104_:
+ case _TKIP_:
+ case _TKIP_WTMIC_:
+ sec_spacing = 6;
+ break;
+ default:
+ sec_spacing = 7;
+ break;
+ }
+
+ if (spacing < sec_spacing)
+ spacing = sec_spacing;
+
+ rtw_write8(adapter, REG_AMPDU_MIN_SPACE,
+ (rtw_read8(adapter, REG_AMPDU_MIN_SPACE) & 0xf8) | spacing);
+ }
+}
+
void HTOnAssocRsp(struct adapter *padapter)
{
unsigned char max_AMPDU_len;
@@ -767,7 +815,7 @@ void HTOnAssocRsp(struct adapter *padapter)
min_MPDU_spacing = (pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para & 0x1c) >> 2;
- SetHwReg8188EU(padapter, HW_VAR_AMPDU_MIN_SPACE, (u8 *)(&min_MPDU_spacing));
+ set_min_ampdu_spacing(padapter, min_MPDU_spacing);
SetHwReg8188EU(padapter, HW_VAR_AMPDU_FACTOR, (u8 *)(&max_AMPDU_len));
}
@@ -846,7 +894,7 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len)
if (!is_client_associated_to_ap(Adapter))
return true;
- len = packet_len - sizeof(struct rtw_ieee80211_hdr_3addr);
+ len = packet_len - sizeof(struct ieee80211_hdr_3addr);
if (len > MAX_IE_SZ)
return _FAIL;
@@ -867,7 +915,7 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len)
/* below is to copy the information element */
bssid->IELength = len;
- memcpy(bssid->IEs, (pframe + sizeof(struct rtw_ieee80211_hdr_3addr)), bssid->IELength);
+ memcpy(bssid->IEs, (pframe + sizeof(struct ieee80211_hdr_3addr)), bssid->IELength);
/* check bw and channel offset */
/* parsing HT_CAP_IE */
@@ -916,7 +964,7 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len)
else
hidden_ssid = false;
- if ((NULL != p) && (false == hidden_ssid && (*(p + 1)))) {
+ if (p && (!hidden_ssid && (*(p + 1)))) {
memcpy(bssid->Ssid.Ssid, (p + 2), *(p + 1));
bssid->Ssid.SsidLength = *(p + 1);
} else {
@@ -1275,14 +1323,10 @@ void update_IOT_info(struct adapter *padapter)
case HT_IOT_PEER_RALINK:
pmlmeinfo->turboMode_cts2self = 0;
pmlmeinfo->turboMode_rtsen = 1;
- /* disable high power */
- Switch_DM_Func(padapter, (~DYNAMIC_BB_DYNAMIC_TXPWR), false);
break;
case HT_IOT_PEER_REALTEK:
/* rtw_write16(padapter, 0x4cc, 0xffff); */
/* rtw_write16(padapter, 0x546, 0x01c0); */
- /* disable high power */
- Switch_DM_Func(padapter, (~DYNAMIC_BB_DYNAMIC_TXPWR), false);
break;
default:
pmlmeinfo->turboMode_cts2self = 0;
@@ -1291,26 +1335,36 @@ void update_IOT_info(struct adapter *padapter)
}
}
+static void set_ack_preamble(struct adapter *adapter, bool short_preamble)
+{
+ struct hal_data_8188e *haldata = &adapter->haldata;
+ u8 val8;
+
+ /* Joseph marked out for Netgear 3500 TKIP channel 7 issue.(Temporarily) */
+ val8 = haldata->nCur40MhzPrimeSC << 5;
+ if (short_preamble)
+ val8 |= 0x80;
+
+ rtw_write8(adapter, REG_RRSR + 2, val8);
+};
+
void update_capinfo(struct adapter *Adapter, u16 updateCap)
{
struct mlme_ext_priv *pmlmeext = &Adapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- bool ShortPreamble;
/* Check preamble mode, 2005.01.06, by rcnjko. */
/* Mark to update preamble value forever, 2008.03.18 by lanhsin */
if (updateCap & cShortPreamble) { /* Short Preamble */
if (pmlmeinfo->preamble_mode != PREAMBLE_SHORT) { /* PREAMBLE_LONG or PREAMBLE_AUTO */
- ShortPreamble = true;
pmlmeinfo->preamble_mode = PREAMBLE_SHORT;
- SetHwReg8188EU(Adapter, HW_VAR_ACK_PREAMBLE, (u8 *)&ShortPreamble);
+ set_ack_preamble(Adapter, true);
}
} else { /* Long Preamble */
if (pmlmeinfo->preamble_mode != PREAMBLE_LONG) { /* PREAMBLE_SHORT or PREAMBLE_AUTO */
- ShortPreamble = false;
pmlmeinfo->preamble_mode = PREAMBLE_LONG;
- SetHwReg8188EU(Adapter, HW_VAR_ACK_PREAMBLE, (u8 *)&ShortPreamble);
+ set_ack_preamble(Adapter, false);
}
}
@@ -1338,7 +1392,6 @@ void update_capinfo(struct adapter *Adapter, u16 updateCap)
void update_wireless_mode(struct adapter *padapter)
{
int ratelen, network_type = 0;
- u32 SIFS_Timer;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
struct wlan_bssid_ex *cur_network = &pmlmeinfo->network;
@@ -1365,10 +1418,12 @@ void update_wireless_mode(struct adapter *padapter)
pmlmeext->cur_wireless_mode = network_type & padapter->registrypriv.wireless_mode;
- SIFS_Timer = 0x0a0a0808;/* 0x0808 -> for CCK, 0x0a0a -> for OFDM */
- /* change this value if having IOT issues. */
-
- SetHwReg8188EU(padapter, HW_VAR_RESP_SIFS, (u8 *)&SIFS_Timer);
+ /* RESP_SIFS for CCK */
+ rtw_write8(padapter, REG_R2T_SIFS, 0x08);
+ rtw_write8(padapter, REG_R2T_SIFS + 1, 0x08);
+ /* RESP_SIFS for OFDM */
+ rtw_write8(padapter, REG_T2T_SIFS, 0x0a);
+ rtw_write8(padapter, REG_T2T_SIFS + 1, 0x0a);
if (pmlmeext->cur_wireless_mode & WIRELESS_11B)
update_mgnt_tx_rate(padapter, IEEE80211_CCK_RATE_1MB);
@@ -1411,34 +1466,12 @@ int update_sta_support_rate(struct adapter *padapter, u8 *pvar_ie, uint var_ie_l
return _SUCCESS;
}
-void process_addba_req(struct adapter *padapter, u8 *paddba_req, u8 *addr)
-{
- struct sta_info *psta;
- u16 tid;
- u16 param;
- struct recv_reorder_ctrl *preorder_ctrl;
- struct sta_priv *pstapriv = &padapter->stapriv;
- struct ADDBA_request *preq = (struct ADDBA_request *)paddba_req;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- psta = rtw_get_stainfo(pstapriv, addr);
-
- if (psta) {
- param = le16_to_cpu(preq->BA_para_set);
- tid = (param >> 2) & 0x0f;
- preorder_ctrl = &psta->recvreorder_ctrl[tid];
- preorder_ctrl->indicate_seq = 0xffff;
- preorder_ctrl->enable = (pmlmeinfo->bAcceptAddbaReq) ? true : false;
- }
-}
-
void update_TSF(struct mlme_ext_priv *pmlmeext, u8 *pframe, uint len)
{
u8 *pIE;
__le32 *pbuf;
- pIE = pframe + sizeof(struct rtw_ieee80211_hdr_3addr);
+ pIE = pframe + sizeof(struct ieee80211_hdr_3addr);
pbuf = (__le32 *)pIE;
pmlmeext->TSFValue = le32_to_cpu(*(pbuf + 1));
diff --git a/drivers/staging/r8188eu/core/rtw_xmit.c b/drivers/staging/r8188eu/core/rtw_xmit.c
index c2a550e7250e..3d8e9dea7651 100644
--- a/drivers/staging/r8188eu/core/rtw_xmit.c
+++ b/drivers/staging/r8188eu/core/rtw_xmit.c
@@ -52,8 +52,8 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
sema_init(&pxmitpriv->terminate_xmitthread_sema, 0);
/*
- Please insert all the queue initializaiton using rtw_init_queue below
- */
+ * Please insert all the queue initializaiton using rtw_init_queue below
+ */
pxmitpriv->adapter = padapter;
@@ -66,10 +66,10 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
rtw_init_queue(&pxmitpriv->free_xmit_queue);
/*
- Please allocate memory with the sz = (struct xmit_frame) * NR_XMITFRAME,
- and initialize free_xmit_frame below.
- Please also apply free_txobj to link_up all the xmit_frames...
- */
+ * Please allocate memory with the sz = (struct xmit_frame) * NR_XMITFRAME,
+ * and initialize free_xmit_frame below.
+ * Please also apply free_txobj to link_up all the xmit_frames...
+ */
pxmitpriv->pallocated_frame_buf = vzalloc(NR_XMITFRAME * sizeof(struct xmit_frame) + 4);
@@ -178,7 +178,12 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
pxmitpriv->free_xmit_extbuf_cnt = num_xmit_extbuf;
- rtw_alloc_hwxmits(padapter);
+ res = rtw_alloc_hwxmits(padapter);
+ if (res) {
+ res = _FAIL;
+ goto exit;
+ }
+
rtw_init_hwxmits(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry);
for (i = 0; i < 4; i++)
@@ -399,7 +404,7 @@ static void set_qos(struct pkt_file *ppktfile, struct pkt_attrib *pattrib)
pattrib->priority = user_prio;
pattrib->hdrlen = WLAN_HDR_A3_QOS_LEN;
- pattrib->subtype = WIFI_QOS_DATA_TYPE;
+ pattrib->subtype = IEEE80211_STYPE_QOS_DATA | IEEE80211_FTYPE_DATA;
}
static s32 update_attrib(struct adapter *padapter, struct sk_buff *pkt, struct pkt_attrib *pattrib)
@@ -448,14 +453,12 @@ static s32 update_attrib(struct adapter *padapter, struct sk_buff *pkt, struct p
_rtw_pktfile_read(&pktfile, &tmp[0], 24);
pattrib->dhcp_pkt = 0;
if (pktfile.pkt_len > 282) {/* MINIMUM_DHCP_PACKET_SIZE) { */
- if (ETH_P_IP == pattrib->ether_type) {/* IP header */
- if (((tmp[21] == 68) && (tmp[23] == 67)) ||
- ((tmp[21] == 67) && (tmp[23] == 68))) {
- /* 68 : UDP BOOTP client */
- /* 67 : UDP BOOTP server */
- /* Use low rate to send DHCP packet. */
- pattrib->dhcp_pkt = 1;
- }
+ if (((tmp[21] == 68) && (tmp[23] == 67)) ||
+ ((tmp[21] == 67) && (tmp[23] == 68))) {
+ /* 68 : UDP BOOTP client */
+ /* 67 : UDP BOOTP server */
+ /* Use low rate to send DHCP packet. */
+ pattrib->dhcp_pkt = 1;
}
}
}
@@ -497,7 +500,7 @@ static s32 update_attrib(struct adapter *padapter, struct sk_buff *pkt, struct p
pattrib->pkt_hdrlen = ETH_HLEN;/* pattrib->ether_type == 0x8100) ? (14 + 4): 14; vlan tag */
pattrib->hdrlen = WLAN_HDR_A3_LEN;
- pattrib->subtype = WIFI_DATA_TYPE;
+ pattrib->subtype = IEEE80211_FTYPE_DATA;
pattrib->priority = 0;
if (check_fwstate(pmlmepriv, WIFI_AP_STATE | WIFI_ADHOC_STATE | WIFI_ADHOC_MASTER_STATE)) {
@@ -642,7 +645,7 @@ static s32 xmitframe_addmic(struct adapter *padapter, struct xmit_frame *pxmitfr
payload = pframe;
for (curfragnum = 0; curfragnum < pattrib->nr_frags; curfragnum++) {
- payload = (u8 *)RND4((size_t)(payload));
+ payload = PTR_ALIGN(payload, 4);
payload = payload + pattrib->hdrlen + pattrib->iv_len;
if ((curfragnum + 1) == pattrib->nr_frags) {
@@ -696,13 +699,13 @@ s32 rtw_make_wlanhdr(struct adapter *padapter, u8 *hdr, struct pkt_attrib *pattr
{
u16 *qc;
- struct rtw_ieee80211_hdr *pwlanhdr = (struct rtw_ieee80211_hdr *)hdr;
+ struct ieee80211_hdr *pwlanhdr = (struct ieee80211_hdr *)hdr;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct qos_priv *pqospriv = &pmlmepriv->qospriv;
u8 qos_option = false;
int res = _SUCCESS;
- __le16 *fctrl = &pwlanhdr->frame_ctl;
+ __le16 *fctrl = &pwlanhdr->frame_control;
struct sta_info *psta;
@@ -717,7 +720,7 @@ s32 rtw_make_wlanhdr(struct adapter *padapter, u8 *hdr, struct pkt_attrib *pattr
SetFrameSubType(fctrl, pattrib->subtype);
- if (pattrib->subtype & WIFI_DATA_TYPE) {
+ if (pattrib->subtype & IEEE80211_FTYPE_DATA) {
if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)) {
/* to_ds = 1, fr_ds = 0; */
/* Data transfer to AP */
@@ -853,22 +856,19 @@ s32 rtw_txframes_sta_ac_pending(struct adapter *padapter, struct pkt_attrib *pat
}
/*
-
-This sub-routine will perform all the following:
-
-1. remove 802.3 header.
-2. create wlan_header, based on the info in pxmitframe
-3. append sta's iv/ext-iv
-4. append LLC
-5. move frag chunk from pframe to pxmitframe->mem
-6. apply sw-encrypt, if necessary.
-
-*/
+ * This sub-routine will perform all the following:
+ *
+ * 1. remove 802.3 header.
+ * 2. create wlan_header, based on the info in pxmitframe
+ * 3. append sta's iv/ext-iv
+ * 4. append LLC
+ * 5. move frag chunk from pframe to pxmitframe->mem
+ * 6. apply sw-encrypt, if necessary.
+ */
s32 rtw_xmitframe_coalesce(struct adapter *padapter, struct sk_buff *pkt, struct xmit_frame *pxmitframe)
{
struct pkt_file pktfile;
s32 frg_inx, frg_len, mpdu_len, llc_sz, mem_sz;
- size_t addr;
u8 *pframe, *mem_start;
u8 hw_hdr_offset;
struct sta_info *psta;
@@ -985,9 +985,7 @@ s32 rtw_xmitframe_coalesce(struct adapter *padapter, struct sk_buff *pkt, struct
break;
}
- addr = (size_t)(pframe);
-
- mem_start = (unsigned char *)RND4(addr) + hw_hdr_offset;
+ mem_start = PTR_ALIGN(pframe, 4) + hw_hdr_offset;
memcpy(mem_start, pbuf_start + hw_hdr_offset, pattrib->hdrlen);
}
@@ -1210,24 +1208,22 @@ s32 rtw_free_xmitbuf(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf)
}
/*
-Calling context:
-1. OS_TXENTRY
-2. RXENTRY (rx_thread or RX_ISR/RX_CallBack)
-
-If we turn on USE_RXTHREAD, then, no need for critical section.
-Otherwise, we must use _enter/_exit critical to protect free_xmit_queue...
-
-Must be very very cautious...
-
-*/
-
+ * Calling context:
+ * 1. OS_TXENTRY
+ * 2. RXENTRY (rx_thread or RX_ISR/RX_CallBack)
+ *
+ * If we turn on USE_RXTHREAD, then, no need for critical section.
+ * Otherwise, we must use _enter/_exit critical to protect free_xmit_queue...
+ *
+ * Must be very very cautious...
+ */
struct xmit_frame *rtw_alloc_xmitframe(struct xmit_priv *pxmitpriv)/* _queue *pfree_xmit_queue) */
{
/*
- Please remember to use all the osdep_service api,
- and lock/unlock or _enter/_exit critical to protect
- pfree_xmit_queue
- */
+ * Please remember to use all the osdep_service api,
+ * and lock/unlock or _enter/_exit critical to protect
+ * pfree_xmit_queue
+ */
struct xmit_frame *pxframe = NULL;
struct list_head *plist, *phead;
@@ -1474,7 +1470,7 @@ exit:
return res;
}
-void rtw_alloc_hwxmits(struct adapter *padapter)
+int rtw_alloc_hwxmits(struct adapter *padapter)
{
struct hw_xmit *hwxmits;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
@@ -1482,6 +1478,8 @@ void rtw_alloc_hwxmits(struct adapter *padapter)
pxmitpriv->hwxmit_entry = HWXMIT_ENTRY;
pxmitpriv->hwxmits = kzalloc(sizeof(struct hw_xmit) * pxmitpriv->hwxmit_entry, GFP_KERNEL);
+ if (!pxmitpriv->hwxmits)
+ return -ENOMEM;
hwxmits = pxmitpriv->hwxmits;
@@ -1498,6 +1496,8 @@ void rtw_alloc_hwxmits(struct adapter *padapter)
hwxmits[3] .sta_queue = &pxmitpriv->bk_pending;
} else {
}
+
+ return 0;
}
void rtw_free_hwxmits(struct adapter *padapter)
diff --git a/drivers/staging/r8188eu/hal/HalHWImg8188E_BB.c b/drivers/staging/r8188eu/hal/HalHWImg8188E_BB.c
index e7f834b02567..7901d0afa2e7 100644
--- a/drivers/staging/r8188eu/hal/HalHWImg8188E_BB.c
+++ b/drivers/staging/r8188eu/hal/HalHWImg8188E_BB.c
@@ -170,7 +170,7 @@ enum HAL_STATUS ODM_ReadAndConfig_AGC_TAB_1T_8188E(struct odm_dm_struct *dm_odm)
{
u32 hex = 0;
u32 i = 0;
- u32 arraylen = sizeof(array_agc_tab_1t_8188e) / sizeof(u32);
+ u32 arraylen = ARRAY_SIZE(array_agc_tab_1t_8188e);
u32 *array = array_agc_tab_1t_8188e;
bool biol = false;
struct adapter *adapter = dm_odm->Adapter;
@@ -446,7 +446,7 @@ enum HAL_STATUS ODM_ReadAndConfig_PHY_REG_1T_8188E(struct odm_dm_struct *dm_odm)
{
u32 hex = 0;
u32 i = 0;
- u32 arraylen = sizeof(array_phy_reg_1t_8188e) / sizeof(u32);
+ u32 arraylen = ARRAY_SIZE(array_phy_reg_1t_8188e);
u32 *array = array_phy_reg_1t_8188e;
bool biol = false;
struct adapter *adapter = dm_odm->Adapter;
@@ -651,7 +651,7 @@ void ODM_ReadAndConfig_PHY_REG_PG_8188E(struct odm_dm_struct *dm_odm)
{
u32 hex;
u32 i = 0;
- u32 arraylen = sizeof(array_phy_reg_pg_8188e) / sizeof(u32);
+ u32 arraylen = ARRAY_SIZE(array_phy_reg_pg_8188e);
u32 *array = array_phy_reg_pg_8188e;
hex = ODM_ITRF_USB << 8;
diff --git a/drivers/staging/r8188eu/hal/HalHWImg8188E_MAC.c b/drivers/staging/r8188eu/hal/HalHWImg8188E_MAC.c
index 20ce1571fc26..77b25885c63b 100644
--- a/drivers/staging/r8188eu/hal/HalHWImg8188E_MAC.c
+++ b/drivers/staging/r8188eu/hal/HalHWImg8188E_MAC.c
@@ -132,7 +132,7 @@ enum HAL_STATUS ODM_ReadAndConfig_MAC_REG_8188E(struct odm_dm_struct *dm_odm)
u32 hex = 0;
u32 i;
- u32 array_len = sizeof(array_MAC_REG_8188E) / sizeof(u32);
+ u32 array_len = ARRAY_SIZE(array_MAC_REG_8188E);
u32 *array = array_MAC_REG_8188E;
bool biol = false;
diff --git a/drivers/staging/r8188eu/hal/HalHWImg8188E_RF.c b/drivers/staging/r8188eu/hal/HalHWImg8188E_RF.c
index 9dc888a66d09..08cbfce3808d 100644
--- a/drivers/staging/r8188eu/hal/HalHWImg8188E_RF.c
+++ b/drivers/staging/r8188eu/hal/HalHWImg8188E_RF.c
@@ -138,7 +138,7 @@ enum HAL_STATUS ODM_ReadAndConfig_RadioA_1T_8188E(struct odm_dm_struct *pDM_Odm)
u32 hex = 0;
u32 i = 0;
- u32 ArrayLen = sizeof(Array_RadioA_1T_8188E) / sizeof(u32);
+ u32 ArrayLen = ARRAY_SIZE(Array_RadioA_1T_8188E);
u32 *Array = Array_RadioA_1T_8188E;
bool biol = false;
struct adapter *Adapter = pDM_Odm->Adapter;
diff --git a/drivers/staging/r8188eu/hal/HalPwrSeqCmd.c b/drivers/staging/r8188eu/hal/HalPwrSeqCmd.c
index 5b91aec6a7e3..150ea380c39e 100644
--- a/drivers/staging/r8188eu/hal/HalPwrSeqCmd.c
+++ b/drivers/staging/r8188eu/hal/HalPwrSeqCmd.c
@@ -1,30 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-/*++
-
-Module Name:
- HalPwrSeqCmd.c
-
-Abstract:
- Implement HW Power sequence configuration CMD handling routine for Realtek devices.
-
-Major Change History:
- When Who What
- ---------- --------------- -------------------------------
- 2011-10-26 Lucas Modify to be compatible with SD4-CE driver.
- 2011-07-07 Roger Create.
-
---*/
-
#include "../include/HalPwrSeqCmd.h"
-/* Description: */
-/* This routine deals with the Power Configuration CMDs parsing
- * for RTL8723/RTL8188E Series IC.
- * Assumption:
- * We should follow specific format which was released from HW SD.
- */
u8 HalPwrSeqCmdParsing(struct adapter *padapter, struct wl_pwr_cfg pwrseqcmd[])
{
struct wl_pwr_cfg pwrcfgcmd = {0};
diff --git a/drivers/staging/r8188eu/hal/hal_com.c b/drivers/staging/r8188eu/hal/hal_com.c
index 06f2a9083056..910cc07f656c 100644
--- a/drivers/staging/r8188eu/hal/hal_com.c
+++ b/drivers/staging/r8188eu/hal/hal_com.c
@@ -44,7 +44,7 @@ void dump_chip_info(struct HAL_VERSION chip_vers)
cnt += sprintf((buf + cnt), "1T1R_");
- cnt += sprintf((buf + cnt), "RomVer(%d)\n", chip_vers.ROMVer);
+ cnt += sprintf((buf + cnt), "RomVer(%d)\n", 0);
pr_info("%s", buf);
}
@@ -267,7 +267,7 @@ static void three_out_pipe(struct adapter *adapter, bool wifi_cfg)
bool Hal_MappingOutPipe(struct adapter *adapter, u8 numoutpipe)
{
struct registry_priv *pregistrypriv = &adapter->registrypriv;
- bool wifi_cfg = (pregistrypriv->wifi_spec) ? true : false;
+ bool wifi_cfg = pregistrypriv->wifi_spec;
bool result = true;
switch (numoutpipe) {
diff --git a/drivers/staging/r8188eu/hal/odm_HWConfig.c b/drivers/staging/r8188eu/hal/odm_HWConfig.c
index 87e9a5270be0..54cc3d7789cd 100644
--- a/drivers/staging/r8188eu/hal/odm_HWConfig.c
+++ b/drivers/staging/r8188eu/hal/odm_HWConfig.c
@@ -65,13 +65,13 @@ static void odm_RxPhyStatus92CSeries_Parsing(struct odm_dm_struct *dm_odm,
struct phy_status_rpt *pPhyStaRpt = (struct phy_status_rpt *)pPhyStatus;
- isCCKrate = ((pPktinfo->Rate >= DESC92C_RATE1M) && (pPktinfo->Rate <= DESC92C_RATE11M)) ? true : false;
+ isCCKrate = pPktinfo->Rate >= DESC92C_RATE1M && pPktinfo->Rate <= DESC92C_RATE11M;
if (isCCKrate) {
u8 cck_agc_rpt;
/* (1)Hardware does not provide RSSI for CCK */
- /* (2)PWDB, Average PWDB cacluated by hardware (for rate adaptive) */
+ /* (2)PWDB, Average PWDB calculated by hardware (for rate adaptive) */
cck_highpwr = dm_odm->bCckHighPower;
@@ -170,7 +170,7 @@ static void odm_RxPhyStatus92CSeries_Parsing(struct odm_dm_struct *dm_odm,
/* Get Rx snr value in DB */
dm_odm->PhyDbgInfo.RxSNRdB[i] = (s32)(pPhyStaRpt->path_rxsnr[i] / 2);
}
- /* (2)PWDB, Average PWDB cacluated by hardware (for rate adaptive) */
+ /* (2)PWDB, Average PWDB calculated by hardware (for rate adaptive) */
rx_pwr_all = (((pPhyStaRpt->cck_sig_qual_ofdm_pwdb_all) >> 1) & 0x7f) - 110;
PWDB_ALL = odm_QueryRxPwrPercentage(rx_pwr_all);
@@ -234,7 +234,7 @@ static void odm_Process_RSSIForDM(struct odm_dm_struct *dm_odm,
if ((!pPktinfo->bPacketMatchBSSID))
return;
- isCCKrate = ((pPktinfo->Rate >= DESC92C_RATE1M) && (pPktinfo->Rate <= DESC92C_RATE11M)) ? true : false;
+ isCCKrate = pPktinfo->Rate >= DESC92C_RATE1M && pPktinfo->Rate <= DESC92C_RATE11M;
/* Smart Antenna Debug Message------------------ */
if ((dm_odm->AntDivType == CG_TRX_HW_ANTDIV) || (dm_odm->AntDivType == CGCS_RX_HW_ANTDIV)) {
diff --git a/drivers/staging/r8188eu/hal/rtl8188e_cmd.c b/drivers/staging/r8188eu/hal/rtl8188e_cmd.c
index f1464e4ba429..475650dc7301 100644
--- a/drivers/staging/r8188eu/hal/rtl8188e_cmd.c
+++ b/drivers/staging/r8188eu/hal/rtl8188e_cmd.c
@@ -199,16 +199,16 @@ void rtl8188e_set_FwMediaStatus_cmd(struct adapter *adapt, __le16 mstatus_rpt)
static void ConstructBeacon(struct adapter *adapt, u8 *pframe, u32 *pLength)
{
- struct rtw_ieee80211_hdr *pwlanhdr;
+ struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
u32 rate_len, pktlen;
struct mlme_ext_priv *pmlmeext = &adapt->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
struct wlan_bssid_ex *cur_network = &pmlmeinfo->network;
- pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
- fctrl = &pwlanhdr->frame_ctl;
+ fctrl = &pwlanhdr->frame_control;
*(fctrl) = 0;
eth_broadcast_addr(pwlanhdr->addr1);
@@ -218,8 +218,8 @@ static void ConstructBeacon(struct adapter *adapt, u8 *pframe, u32 *pLength)
SetSeqNum(pwlanhdr, 0/*pmlmeext->mgnt_seq*/);
SetFrameSubType(pframe, WIFI_BEACON);
- pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
- pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pktlen = sizeof(struct ieee80211_hdr_3addr);
/* timestamp will be inserted by hardware */
pframe += 8;
@@ -281,15 +281,15 @@ _ConstructBeacon:
static void ConstructPSPoll(struct adapter *adapt, u8 *pframe, u32 *pLength)
{
- struct rtw_ieee80211_hdr *pwlanhdr;
+ struct ieee80211_hdr *pwlanhdr;
struct mlme_ext_priv *pmlmeext = &adapt->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
__le16 *fctrl;
- pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
/* Frame control. */
- fctrl = &pwlanhdr->frame_ctl;
+ fctrl = &pwlanhdr->frame_control;
*(fctrl) = 0;
SetPwrMgt(fctrl);
SetFrameSubType(pframe, WIFI_PSPOLL);
@@ -314,7 +314,7 @@ static void ConstructNullFunctionData(struct adapter *adapt, u8 *pframe,
u8 bEosp,
u8 bForcePowerSave)
{
- struct rtw_ieee80211_hdr *pwlanhdr;
+ struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
u32 pktlen;
struct mlme_priv *pmlmepriv = &adapt->mlmepriv;
@@ -322,9 +322,9 @@ static void ConstructNullFunctionData(struct adapter *adapt, u8 *pframe,
struct mlme_ext_priv *pmlmeext = &adapt->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
- fctrl = &pwlanhdr->frame_ctl;
+ fctrl = &pwlanhdr->frame_control;
*(fctrl) = 0;
if (bForcePowerSave)
SetPwrMgt(fctrl);
@@ -353,19 +353,19 @@ static void ConstructNullFunctionData(struct adapter *adapt, u8 *pframe,
SetSeqNum(pwlanhdr, 0);
if (bQoS) {
- struct rtw_ieee80211_hdr_3addr_qos *pwlanqoshdr;
+ struct ieee80211_qos_hdr *pwlanqoshdr;
SetFrameSubType(pframe, WIFI_QOS_DATA_NULL);
- pwlanqoshdr = (struct rtw_ieee80211_hdr_3addr_qos *)pframe;
- SetPriority(&pwlanqoshdr->qc, AC);
- SetEOSP(&pwlanqoshdr->qc, bEosp);
+ pwlanqoshdr = (struct ieee80211_qos_hdr *)pframe;
+ SetPriority(&pwlanqoshdr->qos_ctrl, AC);
+ SetEOSP(&pwlanqoshdr->qos_ctrl, bEosp);
- pktlen = sizeof(struct rtw_ieee80211_hdr_3addr_qos);
+ pktlen = sizeof(struct ieee80211_qos_hdr);
} else {
SetFrameSubType(pframe, WIFI_DATA_NULL);
- pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ pktlen = sizeof(struct ieee80211_qos_hdr);
}
*pLength = pktlen;
@@ -373,7 +373,7 @@ static void ConstructNullFunctionData(struct adapter *adapt, u8 *pframe,
static void ConstructProbeRsp(struct adapter *adapt, u8 *pframe, u32 *pLength, u8 *StaAddr, bool bHideSSID)
{
- struct rtw_ieee80211_hdr *pwlanhdr;
+ struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
u8 *mac, *bssid;
u32 pktlen;
@@ -381,12 +381,12 @@ static void ConstructProbeRsp(struct adapter *adapt, u8 *pframe, u32 *pLength, u
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
struct wlan_bssid_ex *cur_network = &pmlmeinfo->network;
- pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
mac = myid(&adapt->eeprompriv);
bssid = cur_network->MacAddress;
- fctrl = &pwlanhdr->frame_ctl;
+ fctrl = &pwlanhdr->frame_control;
*(fctrl) = 0;
memcpy(pwlanhdr->addr1, StaAddr, ETH_ALEN);
memcpy(pwlanhdr->addr2, mac, ETH_ALEN);
@@ -395,7 +395,7 @@ static void ConstructProbeRsp(struct adapter *adapt, u8 *pframe, u32 *pLength, u
SetSeqNum(pwlanhdr, 0);
SetFrameSubType(fctrl, WIFI_PROBERSP);
- pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ pktlen = sizeof(struct ieee80211_hdr_3addr);
pframe += pktlen;
if (cur_network->IELength > MAX_IE_SZ)
@@ -557,8 +557,7 @@ void rtl8188e_set_FwJoinBssReport_cmd(struct adapter *adapt, u8 mstatus)
rtw_write8(adapt, REG_FWHW_TXQ_CTRL + 2, (haldata->RegFwHwTxQCtrl & (~BIT(6))));
haldata->RegFwHwTxQCtrl &= (~BIT(6));
- /* Clear beacon valid check bit. */
- SetHwReg8188EU(adapt, HW_VAR_BCN_VALID, NULL);
+ clear_beacon_valid_bit(adapt);
DLBcnCount = 0;
poll = 0;
do {
@@ -569,7 +568,7 @@ void rtl8188e_set_FwJoinBssReport_cmd(struct adapter *adapt, u8 mstatus)
yield();
/* mdelay(10); */
/* check rsvd page download OK. */
- GetHwReg8188EU(adapt, HW_VAR_BCN_VALID, (u8 *)(&bcn_valid));
+ bcn_valid = get_beacon_valid_bit(adapt);
poll++;
} while (!bcn_valid && (poll % 10) != 0 && !adapt->bSurpriseRemoved && !adapt->bDriverStopped);
} while (!bcn_valid && DLBcnCount <= 100 && !adapt->bSurpriseRemoved && !adapt->bDriverStopped);
@@ -597,7 +596,7 @@ void rtl8188e_set_FwJoinBssReport_cmd(struct adapter *adapt, u8 mstatus)
/* Update RSVD page location H2C to Fw. */
if (bcn_valid)
- SetHwReg8188EU(adapt, HW_VAR_BCN_VALID, NULL);
+ clear_beacon_valid_bit(adapt);
/* Do not enable HW DMA BCN or it will cause Pcie interface hang by timing issue. 2011.11.24. by tynli. */
/* Clear CR[8] or beacon packet will not be send to TxBuf anymore. */
diff --git a/drivers/staging/r8188eu/hal/rtl8188e_hal_init.c b/drivers/staging/r8188eu/hal/rtl8188e_hal_init.c
index 6811be95da9a..e17375a74f17 100644
--- a/drivers/staging/r8188eu/hal/rtl8188e_hal_init.c
+++ b/drivers/staging/r8188eu/hal/rtl8188e_hal_init.c
@@ -33,17 +33,16 @@ static s32 iol_execute(struct adapter *padapter, u8 control)
{
s32 status = _FAIL;
u8 reg_0x88 = 0;
- u32 start = 0, passing_time = 0;
+ unsigned long timeout;
control = control & 0x0f;
reg_0x88 = rtw_read8(padapter, REG_HMEBOX_E0);
rtw_write8(padapter, REG_HMEBOX_E0, reg_0x88 | control);
- start = jiffies;
+ timeout = jiffies + msecs_to_jiffies(1000);
while ((reg_0x88 = rtw_read8(padapter, REG_HMEBOX_E0)) & control &&
- (passing_time = rtw_get_passing_time_ms(start)) < 1000) {
+ time_before(jiffies, timeout))
;
- }
reg_0x88 = rtw_read8(padapter, REG_HMEBOX_E0);
status = (reg_0x88 & control) ? _FAIL : _SUCCESS;
@@ -187,8 +186,8 @@ static void efuse_read_phymap_from_txpktbuf(
u16 *size /* for efuse content: the max byte to read. will update to byte read */
)
{
+ unsigned long timeout;
u16 dbg_addr = 0;
- u32 start = 0, passing_time = 0;
__le32 lo32 = 0, hi32 = 0;
u16 len = 0, count = 0;
int i = 0;
@@ -207,9 +206,8 @@ static void efuse_read_phymap_from_txpktbuf(
rtw_write16(adapter, REG_PKTBUF_DBG_ADDR, dbg_addr + i);
rtw_write8(adapter, REG_TXPKTBUF_DBG, 0);
- start = jiffies;
- while (!rtw_read8(adapter, REG_TXPKTBUF_DBG) &&
- (passing_time = rtw_get_passing_time_ms(start)) < 1000)
+ timeout = jiffies + msecs_to_jiffies(1000);
+ while (!rtw_read8(adapter, REG_TXPKTBUF_DBG) && time_before(jiffies, timeout))
rtw_usleep_os(100);
/* data from EEPROM needs to be in LE */
@@ -505,7 +503,6 @@ void rtl8188e_read_chip_version(struct adapter *padapter)
ChipVersion.VendorType = ((value32 & VENDOR_ID) ? CHIP_VENDOR_UMC : CHIP_VENDOR_TSMC);
ChipVersion.CUTVersion = (value32 & CHIP_VER_RTL_MASK) >> CHIP_VER_RTL_SHIFT; /* IC version (CUT) */
- ChipVersion.ROMVer = 0; /* ROM code version. */
dump_chip_info(ChipVersion);
diff --git a/drivers/staging/r8188eu/hal/rtl8188e_phycfg.c b/drivers/staging/r8188eu/hal/rtl8188e_phycfg.c
index ea75ff11ad17..4864dafd887b 100644
--- a/drivers/staging/r8188eu/hal/rtl8188e_phycfg.c
+++ b/drivers/staging/r8188eu/hal/rtl8188e_phycfg.c
@@ -378,10 +378,10 @@ phy_InitBBRFRegisterDefinition(
/* Tx AGC Gain Stage (same for all path. Should we remove this?) */
pHalData->PHYRegDef.rfTxGainStage = rFPGA0_TxGainStage; /* Tx gain stage */
- /* Tranceiver A~D HSSI Parameter-1 */
+ /* Transceiver A~D HSSI Parameter-1 */
pHalData->PHYRegDef.rfHSSIPara1 = rFPGA0_XA_HSSIParameter1; /* wire control parameter1 */
- /* Tranceiver A~D HSSI Parameter-2 */
+ /* Transceiver A~D HSSI Parameter-2 */
pHalData->PHYRegDef.rfHSSIPara2 = rFPGA0_XA_HSSIParameter2; /* wire control parameter2 */
/* RF switch Control */
@@ -405,10 +405,10 @@ phy_InitBBRFRegisterDefinition(
/* Tx AFE control 2 */
pHalData->PHYRegDef.rfTxAFE = rOFDM0_XATxAFE;
- /* Tranceiver LSSI Readback SI mode */
+ /* Transceiver LSSI Readback SI mode */
pHalData->PHYRegDef.rfLSSIReadBack = rFPGA0_XA_LSSIReadBack;
- /* Tranceiver LSSI Readback PI mode */
+ /* Transceiver LSSI Readback PI mode */
pHalData->PHYRegDef.rfLSSIReadBackPi = TransceiverA_HSPI_Readback;
}
diff --git a/drivers/staging/r8188eu/hal/rtl8188e_rxdesc.c b/drivers/staging/r8188eu/hal/rtl8188e_rxdesc.c
index 9bf7a9248026..dff0cba751df 100644
--- a/drivers/staging/r8188eu/hal/rtl8188e_rxdesc.c
+++ b/drivers/staging/r8188eu/hal/rtl8188e_rxdesc.c
@@ -113,12 +113,13 @@ void update_recvframe_phyinfo_88e(struct recv_frame *precvframe, struct phy_stat
struct hal_data_8188e *pHalData = &padapter->haldata;
struct phy_info *pPHYInfo = &pattrib->phy_info;
u8 *wlanhdr = precvframe->rx_data;
+ __le16 fc = *(__le16 *)wlanhdr;
struct odm_per_pkt_info pkt_info;
u8 *sa = NULL;
struct sta_priv *pstapriv;
struct sta_info *psta;
- pkt_info.bPacketMatchBSSID = ((!IsFrameTypeCtrl(wlanhdr)) &&
+ pkt_info.bPacketMatchBSSID = ((!ieee80211_is_ctl(fc)) &&
!pattrib->icv_err && !pattrib->crc_err &&
!memcmp(get_hdr_bssid(wlanhdr),
get_bssid(&padapter->mlmepriv), ETH_ALEN));
@@ -127,9 +128,7 @@ void update_recvframe_phyinfo_88e(struct recv_frame *precvframe, struct phy_stat
(!memcmp(get_da(wlanhdr),
myid(&padapter->eeprompriv), ETH_ALEN));
- pkt_info.bPacketBeacon = pkt_info.bPacketMatchBSSID &&
- (GetFrameSubType(wlanhdr) == WIFI_BEACON);
-
+ pkt_info.bPacketBeacon = pkt_info.bPacketMatchBSSID && ieee80211_is_beacon(fc);
if (pkt_info.bPacketBeacon) {
if (check_fwstate(&padapter->mlmepriv, WIFI_STATION_STATE))
sa = padapter->mlmepriv.cur_network.network.MacAddress;
diff --git a/drivers/staging/r8188eu/hal/rtl8188eu_xmit.c b/drivers/staging/r8188eu/hal/rtl8188eu_xmit.c
index 55032d7ae7e3..bdfa51949289 100644
--- a/drivers/staging/r8188eu/hal/rtl8188eu_xmit.c
+++ b/drivers/staging/r8188eu/hal/rtl8188eu_xmit.c
@@ -347,7 +347,7 @@ static s32 rtw_dump_xframe(struct adapter *adapt, struct xmit_frame *pxmitframe)
mem_addr += w_sz;
- mem_addr = (u8 *)RND4(((size_t)(mem_addr)));
+ mem_addr = PTR_ALIGN(mem_addr, 4);
}
rtw_free_xmitframe(pxmitpriv, pxmitframe);
@@ -437,7 +437,7 @@ bool rtl8188eu_xmitframe_complete(struct adapter *adapt, struct xmit_priv *pxmit
pfirstframe = pxmitframe;
len = xmitframe_need_length(pfirstframe) + TXDESC_SIZE + (pfirstframe->pkt_offset * PACKET_OFFSET_SZ);
pbuf_tail = len;
- pbuf = _RND8(pbuf_tail);
+ pbuf = round_up(pbuf_tail, 8);
/* check pkt amount in one bulk */
desc_cnt = 0;
@@ -488,7 +488,7 @@ bool rtl8188eu_xmitframe_complete(struct adapter *adapt, struct xmit_priv *pxmit
len = xmitframe_need_length(pxmitframe) + TXDESC_SIZE + (pxmitframe->pkt_offset * PACKET_OFFSET_SZ);
- if (_RND8(pbuf + len) > MAX_XMITBUF_SZ) {
+ if (pbuf + len > MAX_XMITBUF_SZ) {
pxmitframe->agg_num = 1;
pxmitframe->pkt_offset = 1;
break;
@@ -511,7 +511,7 @@ bool rtl8188eu_xmitframe_complete(struct adapter *adapt, struct xmit_priv *pxmit
/* handle pointer and stop condition */
pbuf_tail = pbuf + len;
- pbuf = _RND8(pbuf_tail);
+ pbuf = round_up(pbuf_tail, 8);
pfirstframe->agg_num++;
if (MAX_TX_AGG_PACKET_NUMBER == pfirstframe->agg_num)
diff --git a/drivers/staging/r8188eu/hal/usb_halinit.c b/drivers/staging/r8188eu/hal/usb_halinit.c
index a92774352d2d..a217272a07f8 100644
--- a/drivers/staging/r8188eu/hal/usb_halinit.c
+++ b/drivers/staging/r8188eu/hal/usb_halinit.c
@@ -123,7 +123,7 @@ static void _InitQueueReservedPage(struct adapter *Adapter)
if (haldata->OutEpQueueSel & TX_SELE_LQ)
numLQ = 0x1C;
- /* NOTE: This step shall be proceed before writting REG_RQPN. */
+ /* NOTE: This step shall be proceed before writing REG_RQPN. */
if (haldata->OutEpQueueSel & TX_SELE_NQ)
numNQ = 0x1C;
value8 = (u8)_NPQ(numNQ);
@@ -539,10 +539,6 @@ u32 rtl8188eu_hal_init(struct adapter *Adapter)
/* Save target channel */
haldata->CurrentChannel = 6;/* default set to 6 */
- if (pwrctrlpriv->reg_rfoff) {
- pwrctrlpriv->rf_pwrstate = rf_off;
- }
-
/* 2010/08/09 MH We need to check if we need to turnon or off RF after detecting */
/* HW GPIO pin. Before PHY_RFConfig8192C. */
/* 2010/08/26 MH If Efuse does not support sective suspend then disable the function. */
@@ -942,17 +938,6 @@ static void hw_var_set_opmode(struct adapter *Adapter, u8 *val)
}
}
-static void hw_var_set_bssid(struct adapter *Adapter, u8 *val)
-{
- u8 idx = 0;
- u32 reg_bssid;
-
- reg_bssid = REG_BSSID;
-
- for (idx = 0; idx < 6; idx++)
- rtw_write8(Adapter, (reg_bssid + idx), val[idx]);
-}
-
void SetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
{
struct hal_data_8188e *haldata = &Adapter->haldata;
@@ -963,9 +948,6 @@ void SetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
case HW_VAR_SET_OPMODE:
hw_var_set_opmode(Adapter, val);
break;
- case HW_VAR_BSSID:
- hw_var_set_bssid(Adapter, val);
- break;
case HW_VAR_BASIC_RATE:
{
u16 BrateCfg = 0;
@@ -1024,17 +1006,6 @@ void SetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
ResumeTxBeacon(Adapter);
}
break;
- case HW_VAR_MLME_DISCONNECT:
- /* Set RCR to not to receive data frame when NO LINK state */
- /* reject all data frames */
- rtw_write16(Adapter, REG_RXFLTMAP2, 0x00);
-
- /* reset TSF */
- rtw_write8(Adapter, REG_DUAL_TSF_RST, (BIT(0) | BIT(1)));
-
- /* disable update TSF */
- rtw_write8(Adapter, REG_BCN_CTRL, rtw_read8(Adapter, REG_BCN_CTRL) | BIT(4));
- break;
case HW_VAR_MLME_SITESURVEY:
if (*((u8 *)val)) { /* under sitesurvey */
/* config RCR to receive different BSSID & not to receive data frame */
@@ -1065,36 +1036,6 @@ void SetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
rtw_write32(Adapter, REG_RCR, rtw_read32(Adapter, REG_RCR) | RCR_CBSSID_BCN);
}
break;
- case HW_VAR_MLME_JOIN:
- {
- u8 RetryLimit = 0x30;
- u8 type = *((u8 *)val);
- struct mlme_priv *pmlmepriv = &Adapter->mlmepriv;
-
- if (type == 0) { /* prepare to join */
- /* enable to rx data frame.Accept all data frame */
- rtw_write16(Adapter, REG_RXFLTMAP2, 0xFFFF);
-
- rtw_write32(Adapter, REG_RCR, rtw_read32(Adapter, REG_RCR) | RCR_CBSSID_DATA | RCR_CBSSID_BCN);
-
- if (check_fwstate(pmlmepriv, WIFI_STATION_STATE))
- RetryLimit = 48;
- else /* Ad-hoc Mode */
- RetryLimit = 0x7;
- } else if (type == 1) {
- /* joinbss_event call back when join res < 0 */
- rtw_write16(Adapter, REG_RXFLTMAP2, 0x00);
- } else if (type == 2) {
- /* sta add event call back */
- /* enable update TSF */
- rtw_write8(Adapter, REG_BCN_CTRL, rtw_read8(Adapter, REG_BCN_CTRL) & (~BIT(4)));
-
- if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE | WIFI_ADHOC_MASTER_STATE))
- RetryLimit = 0x7;
- }
- rtw_write16(Adapter, REG_RL, RetryLimit << RETRY_LIMIT_SHORT_SHIFT | RetryLimit << RETRY_LIMIT_LONG_SHIFT);
- }
- break;
case HW_VAR_SLOT_TIME:
{
u8 u1bAIFS, aSifsTime;
@@ -1119,26 +1060,6 @@ void SetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
}
}
break;
- case HW_VAR_RESP_SIFS:
- /* RESP_SIFS for CCK */
- rtw_write8(Adapter, REG_R2T_SIFS, val[0]); /* SIFS_T2T_CCK (0x08) */
- rtw_write8(Adapter, REG_R2T_SIFS + 1, val[1]); /* SIFS_R2T_CCK(0x08) */
- /* RESP_SIFS for OFDM */
- rtw_write8(Adapter, REG_T2T_SIFS, val[2]); /* SIFS_T2T_OFDM (0x0a) */
- rtw_write8(Adapter, REG_T2T_SIFS + 1, val[3]); /* SIFS_R2T_OFDM(0x0a) */
- break;
- case HW_VAR_ACK_PREAMBLE:
- {
- u8 regTmp;
- u8 bShortPreamble = *((bool *)val);
- /* Joseph marked out for Netgear 3500 TKIP channel 7 issue.(Temporarily) */
- regTmp = (haldata->nCur40MhzPrimeSC) << 5;
- if (bShortPreamble)
- regTmp |= 0x80;
-
- rtw_write8(Adapter, REG_RRSR + 2, regTmp);
- }
- break;
case HW_VAR_DM_FLAG:
podmpriv->SupportAbility = *((u8 *)val);
break;
@@ -1148,73 +1069,11 @@ void SetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
else
podmpriv->SupportAbility = podmpriv->BK_SupportAbility;
break;
- case HW_VAR_DM_FUNC_SET:
- if (*((u32 *)val) == DYNAMIC_ALL_FUNC_ENABLE) {
- podmpriv->SupportAbility = pdmpriv->InitODMFlag;
- } else {
- podmpriv->SupportAbility |= *((u32 *)val);
- }
+ case HW_VAR_DM_FUNC_RESET:
+ podmpriv->SupportAbility = pdmpriv->InitODMFlag;
break;
case HW_VAR_DM_FUNC_CLR:
- podmpriv->SupportAbility &= *((u32 *)val);
- break;
- case HW_VAR_AC_PARAM_BE:
- haldata->AcParam_BE = ((u32 *)(val))[0];
- rtw_write32(Adapter, REG_EDCA_BE_PARAM, ((u32 *)(val))[0]);
- break;
- case HW_VAR_ACM_CTRL:
- {
- u8 acm_ctrl = *((u8 *)val);
- u8 AcmCtrl = rtw_read8(Adapter, REG_ACMHWCTRL);
-
- if (acm_ctrl > 1)
- AcmCtrl = AcmCtrl | 0x1;
-
- if (acm_ctrl & BIT(3))
- AcmCtrl |= AcmHw_VoqEn;
- else
- AcmCtrl &= (~AcmHw_VoqEn);
-
- if (acm_ctrl & BIT(2))
- AcmCtrl |= AcmHw_ViqEn;
- else
- AcmCtrl &= (~AcmHw_ViqEn);
-
- if (acm_ctrl & BIT(1))
- AcmCtrl |= AcmHw_BeqEn;
- else
- AcmCtrl &= (~AcmHw_BeqEn);
-
- rtw_write8(Adapter, REG_ACMHWCTRL, AcmCtrl);
- }
- break;
- case HW_VAR_AMPDU_MIN_SPACE:
- {
- u8 MinSpacingToSet;
- u8 SecMinSpace;
-
- MinSpacingToSet = *((u8 *)val);
- if (MinSpacingToSet <= 7) {
- switch (Adapter->securitypriv.dot11PrivacyAlgrthm) {
- case _NO_PRIVACY_:
- case _AES_:
- SecMinSpace = 0;
- break;
- case _WEP40_:
- case _WEP104_:
- case _TKIP_:
- case _TKIP_WTMIC_:
- SecMinSpace = 6;
- break;
- default:
- SecMinSpace = 7;
- break;
- }
- if (MinSpacingToSet < SecMinSpace)
- MinSpacingToSet = SecMinSpace;
- rtw_write8(Adapter, REG_AMPDU_MIN_SPACE, (rtw_read8(Adapter, REG_AMPDU_MIN_SPACE) & 0xf8) | MinSpacingToSet);
- }
- }
+ podmpriv->SupportAbility = 0;
break;
case HW_VAR_AMPDU_FACTOR:
{
@@ -1242,221 +1101,15 @@ void SetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
}
}
break;
- case HW_VAR_RXDMA_AGG_PG_TH:
- {
- u8 threshold = *((u8 *)val);
- if (threshold == 0)
- threshold = USB_RXAGG_PAGE_COUNT;
- rtw_write8(Adapter, REG_RXDMA_AGG_PG_TH, threshold);
- }
- break;
- case HW_VAR_H2C_FW_PWRMODE:
- {
- u8 psmode = (*(u8 *)val);
-
- /* Forece leave RF low power mode for 1T1R to prevent conficting setting in Fw power */
- /* saving sequence. 2010.06.07. Added by tynli. Suggested by SD3 yschang. */
- if (psmode != PS_MODE_ACTIVE)
- ODM_RF_Saving(podmpriv, true);
- rtl8188e_set_FwPwrMode_cmd(Adapter, psmode);
- }
- break;
- case HW_VAR_H2C_FW_JOINBSSRPT:
- {
- u8 mstatus = (*(u8 *)val);
- rtl8188e_set_FwJoinBssReport_cmd(Adapter, mstatus);
- }
- break;
- case HW_VAR_H2C_FW_P2P_PS_OFFLOAD:
- {
- u8 p2p_ps_state = (*(u8 *)val);
- rtl8188e_set_p2p_ps_offload_cmd(Adapter, p2p_ps_state);
- }
- break;
- case HW_VAR_INITIAL_GAIN:
- {
- struct rtw_dig *pDigTable = &podmpriv->DM_DigTable;
- u32 rx_gain = ((u32 *)(val))[0];
-
- if (rx_gain == 0xff) {/* restore rx gain */
- ODM_Write_DIG(podmpriv, pDigTable->BackupIGValue);
- } else {
- pDigTable->BackupIGValue = pDigTable->CurIGValue;
- ODM_Write_DIG(podmpriv, rx_gain);
- }
- }
- break;
- case HW_VAR_RPT_TIMER_SETTING:
- {
- u16 min_rpt_time = (*(u16 *)val);
- ODM_RA_Set_TxRPT_Time(podmpriv, min_rpt_time);
- }
- break;
- case HW_VAR_ANTENNA_DIVERSITY_SELECT:
- {
- u8 Optimum_antenna = (*(u8 *)val);
- u8 Ant;
- /* switch antenna to Optimum_antenna */
- if (haldata->CurAntenna != Optimum_antenna) {
- Ant = (Optimum_antenna == 2) ? MAIN_ANT : AUX_ANT;
- ODM_UpdateRxIdleAnt_88E(&haldata->odmpriv, Ant);
-
- haldata->CurAntenna = Optimum_antenna;
- }
- }
- break;
- case HW_VAR_FIFO_CLEARN_UP:
- {
- struct pwrctrl_priv *pwrpriv = &Adapter->pwrctrlpriv;
- u8 trycnt = 100;
-
- /* pause tx */
- rtw_write8(Adapter, REG_TXPAUSE, 0xff);
-
- /* keep sn */
- Adapter->xmitpriv.nqos_ssn = rtw_read16(Adapter, REG_NQOS_SEQ);
-
- if (!pwrpriv->bkeepfwalive) {
- /* RX DMA stop */
- rtw_write32(Adapter, REG_RXPKT_NUM, (rtw_read32(Adapter, REG_RXPKT_NUM) | RW_RELEASE_EN));
- do {
- if (!(rtw_read32(Adapter, REG_RXPKT_NUM) & RXDMA_IDLE))
- break;
- } while (trycnt--);
-
- /* RQPN Load 0 */
- rtw_write16(Adapter, REG_RQPN_NPQ, 0x0);
- rtw_write32(Adapter, REG_RQPN, 0x80000000);
- mdelay(10);
- }
- }
- break;
- case HW_VAR_TX_RPT_MAX_MACID:
- {
- u8 maxMacid = *val;
- rtw_write8(Adapter, REG_TX_RPT_CTRL + 1, maxMacid + 1);
- }
- break;
case HW_VAR_H2C_MEDIA_STATUS_RPT:
rtl8188e_set_FwMediaStatus_cmd(Adapter, (*(__le16 *)val));
break;
- case HW_VAR_BCN_VALID:
- /* BCN_VALID, BIT(16) of REG_TDECTRL = BIT(0) of REG_TDECTRL+2, write 1 to clear, Clear by sw */
- rtw_write8(Adapter, REG_TDECTRL + 2, rtw_read8(Adapter, REG_TDECTRL + 2) | BIT(0));
- break;
- default:
- break;
- }
-
-}
-
-void GetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
-{
- struct hal_data_8188e *haldata = &Adapter->haldata;
- struct odm_dm_struct *podmpriv = &haldata->odmpriv;
-
- switch (variable) {
- case HW_VAR_BCN_VALID:
- /* BCN_VALID, BIT(16) of REG_TDECTRL = BIT(0) of REG_TDECTRL+2 */
- val[0] = (BIT(0) & rtw_read8(Adapter, REG_TDECTRL + 2)) ? true : false;
- break;
- case HW_VAR_DM_FLAG:
- val[0] = podmpriv->SupportAbility;
- break;
- case HW_VAR_FWLPS_RF_ON:
- {
- /* When we halt NIC, we should check if FW LPS is leave. */
- if (Adapter->pwrctrlpriv.rf_pwrstate == rf_off) {
- /* If it is in HW/SW Radio OFF or IPS state, we do not check Fw LPS Leave, */
- /* because Fw is unload. */
- val[0] = true;
- } else {
- u32 valRCR;
- valRCR = rtw_read32(Adapter, REG_RCR);
- valRCR &= 0x00070000;
- if (valRCR)
- val[0] = false;
- else
- val[0] = true;
- }
- }
- break;
- case HW_VAR_CHK_HI_QUEUE_EMPTY:
- *val = ((rtw_read32(Adapter, REG_HGQ_INFORMATION) & 0x0000ff00) == 0) ? true : false;
- break;
default:
break;
}
}
-/* Query setting of specified variable. */
-void GetHalDefVar8188EUsb(struct adapter *Adapter, enum hal_def_variable eVariable, void *pValue)
-{
- struct hal_data_8188e *haldata = &Adapter->haldata;
-
- switch (eVariable) {
- case HAL_DEF_IS_SUPPORT_ANT_DIV:
- *((u8 *)pValue) = (haldata->AntDivCfg == 0) ? false : true;
- break;
- case HAL_DEF_CURRENT_ANTENNA:
- *((u8 *)pValue) = haldata->CurAntenna;
- break;
- case HAL_DEF_DBG_DM_FUNC:
- *((u32 *)pValue) = haldata->odmpriv.SupportAbility;
- break;
- case HAL_DEF_DBG_DUMP_RXPKT:
- *((u8 *)pValue) = haldata->bDumpRxPkt;
- break;
- case HAL_DEF_DBG_DUMP_TXPKT:
- *((u8 *)pValue) = haldata->bDumpTxPkt;
- break;
- default:
- break;
- }
-}
-
-/* Change default setting of specified variable. */
-void SetHalDefVar8188EUsb(struct adapter *Adapter, enum hal_def_variable eVariable, void *pValue)
-{
- struct hal_data_8188e *haldata = &Adapter->haldata;
-
- switch (eVariable) {
- case HAL_DEF_DBG_DM_FUNC:
- {
- u8 dm_func = *((u8 *)pValue);
- struct odm_dm_struct *podmpriv = &haldata->odmpriv;
-
- if (dm_func == 0) { /* disable all dynamic func */
- podmpriv->SupportAbility = DYNAMIC_FUNC_DISABLE;
- } else if (dm_func == 1) {/* disable DIG */
- podmpriv->SupportAbility &= (~DYNAMIC_BB_DIG);
- } else if (dm_func == 2) {/* disable High power */
- podmpriv->SupportAbility &= (~DYNAMIC_BB_DYNAMIC_TXPWR);
- } else if (dm_func == 3) {/* disable tx power tracking */
- podmpriv->SupportAbility &= (~DYNAMIC_RF_CALIBRATION);
- } else if (dm_func == 5) {/* disable antenna diversity */
- podmpriv->SupportAbility &= (~DYNAMIC_BB_ANT_DIV);
- } else if (dm_func == 6) {/* turn on all dynamic func */
- if (!(podmpriv->SupportAbility & DYNAMIC_BB_DIG)) {
- struct rtw_dig *pDigTable = &podmpriv->DM_DigTable;
- pDigTable->CurIGValue = rtw_read8(Adapter, 0xc50);
- }
- podmpriv->SupportAbility = DYNAMIC_ALL_FUNC_ENABLE;
- }
- }
- break;
- case HAL_DEF_DBG_DUMP_RXPKT:
- haldata->bDumpRxPkt = *((u8 *)pValue);
- break;
- case HAL_DEF_DBG_DUMP_TXPKT:
- haldata->bDumpTxPkt = *((u8 *)pValue);
- break;
- default:
- break;
- }
-}
-
void UpdateHalRAMask8188EUsb(struct adapter *adapt, u32 mac_id, u8 rssi_level)
{
u8 init_rate = 0;
diff --git a/drivers/staging/r8188eu/hal/usb_ops_linux.c b/drivers/staging/r8188eu/hal/usb_ops_linux.c
index 673c30ed3cce..d5e674542a78 100644
--- a/drivers/staging/r8188eu/hal/usb_ops_linux.c
+++ b/drivers/staging/r8188eu/hal/usb_ops_linux.c
@@ -16,7 +16,7 @@ static int usb_read(struct intf_hdl *intf, u16 value, void *data, u8 size)
int status;
u8 io_buf[4];
- if (adapt->bSurpriseRemoved || adapt->pwrctrlpriv.pnp_bstop_trx)
+ if (adapt->bSurpriseRemoved)
return -EPERM;
status = usb_control_msg_recv(udev, 0, REALTEK_USB_VENQT_CMD_REQ,
@@ -59,7 +59,7 @@ static int usb_write(struct intf_hdl *intf, u16 value, void *data, u8 size)
int status;
u8 io_buf[VENDOR_CMD_MAX_DATA_LEN];
- if (adapt->bSurpriseRemoved || adapt->pwrctrlpriv.pnp_bstop_trx)
+ if (adapt->bSurpriseRemoved)
return -EPERM;
memcpy(io_buf, data, size);
@@ -260,7 +260,6 @@ static int recvbuf2recvframe(struct adapter *adapt, struct sk_buff *pskb)
pkt_copy = netdev_alloc_skb(adapt->pnetdev, alloc_sz);
if (pkt_copy) {
- pkt_copy->dev = adapt->pnetdev;
precvframe->pkt = pkt_copy;
precvframe->rx_head = pkt_copy->data;
precvframe->rx_end = pkt_copy->data + alloc_sz;
@@ -288,7 +287,7 @@ static int recvbuf2recvframe(struct adapter *adapt, struct sk_buff *pskb)
recvframe_put(precvframe, skb_len);
- pkt_offset = (u16)_RND128(pkt_offset);
+ pkt_offset = (u16)round_up(pkt_offset, 128);
if (pattrib->pkt_rpt_type == NORMAL_RX) { /* Normal rx packet */
if (pattrib->physt)
@@ -415,8 +414,7 @@ u32 rtw_read_port(struct adapter *adapter, u8 *rmem)
size_t alignment = 0;
u32 ret = _SUCCESS;
- if (adapter->bDriverStopped || adapter->bSurpriseRemoved ||
- adapter->pwrctrlpriv.pnp_bstop_trx)
+ if (adapter->bDriverStopped || adapter->bSurpriseRemoved)
return _FAIL;
if (!precvbuf)
diff --git a/drivers/staging/r8188eu/include/HalVerDef.h b/drivers/staging/r8188eu/include/HalVerDef.h
index 2bc18eabb55d..7a530c7d57eb 100644
--- a/drivers/staging/r8188eu/include/HalVerDef.h
+++ b/drivers/staging/r8188eu/include/HalVerDef.h
@@ -25,7 +25,6 @@ struct HAL_VERSION {
enum HAL_CHIP_TYPE ChipType;
enum HAL_CUT_VERSION CUTVersion;
enum HAL_VENDOR VendorType;
- u8 ROMVer;
};
/* Get element */
@@ -34,10 +33,10 @@ struct HAL_VERSION {
/* HAL_CHIP_TYPE_E */
#define IS_NORMAL_CHIP(version) \
- ((GET_CVID_CHIP_TYPE(version) == NORMAL_CHIP) ? true : false)
+ (GET_CVID_CHIP_TYPE(version) == NORMAL_CHIP)
/* HAL_VENDOR_E */
#define IS_CHIP_VENDOR_TSMC(version) \
- ((GET_CVID_MANUFACTUER(version) == CHIP_VENDOR_TSMC) ? true : false)
+ (GET_CVID_MANUFACTUER(version) == CHIP_VENDOR_TSMC)
#endif
diff --git a/drivers/staging/r8188eu/include/basic_types.h b/drivers/staging/r8188eu/include/basic_types.h
index d82b2171d584..ffb21170e898 100644
--- a/drivers/staging/r8188eu/include/basic_types.h
+++ b/drivers/staging/r8188eu/include/basic_types.h
@@ -4,9 +4,6 @@
#ifndef __BASIC_TYPES_H__
#define __BASIC_TYPES_H__
-#define SUCCESS 0
-#define FAIL (-1)
-
#include <linux/types.h>
#define NDIS_OID uint
@@ -14,9 +11,6 @@ typedef void (*proc_t)(void *);
#define FIELD_OFFSET(s, field) ((ssize_t)&((s *)(0))->field)
-#define MEM_ALIGNMENT_OFFSET (sizeof(size_t))
-#define MEM_ALIGNMENT_PADDING (sizeof(size_t) - 1)
-
/* port from fw */
/* TODO: Macros Below are Sync from SD7-Driver. It is necessary
* to check correctness */
@@ -31,86 +25,21 @@ typedef void (*proc_t)(void *);
/* Convert little data endian to host ordering */
#define EF1BYTE(_val) \
((u8)(_val))
-#define EF2BYTE(_val) \
- (le16_to_cpu(_val))
-#define EF4BYTE(_val) \
- (le32_to_cpu(_val))
-
-/* Read data from memory */
-#define READEF1BYTE(_ptr) \
- EF1BYTE(*((u8 *)(_ptr)))
-/* Read le16 data from memory and convert to host ordering */
-#define READEF2BYTE(_ptr) \
- EF2BYTE(*(_ptr))
-#define READEF4BYTE(_ptr) \
- EF4BYTE(*(_ptr))
-/* Write data to memory */
-#define WRITEEF1BYTE(_ptr, _val) \
- do { \
- (*((u8 *)(_ptr))) = EF1BYTE(_val) \
- } while (0)
-/* Write le data to memory in host ordering */
-#define WRITEEF2BYTE(_ptr, _val) \
- do { \
- (*((u16 *)(_ptr))) = EF2BYTE(_val) \
- } while (0)
-
-#define WRITEEF4BYTE(_ptr, _val) \
- do { \
- (*((u32 *)(_ptr))) = EF2BYTE(_val) \
- } while (0)
-
-/* Create a bit mask
- * Examples:
- * BIT_LEN_MASK_32(0) => 0x00000000
- * BIT_LEN_MASK_32(1) => 0x00000001
- * BIT_LEN_MASK_32(2) => 0x00000003
- * BIT_LEN_MASK_32(32) => 0xFFFFFFFF
- */
-#define BIT_LEN_MASK_32(__bitlen) \
- (0xFFFFFFFF >> (32 - (__bitlen)))
-#define BIT_LEN_MASK_16(__bitlen) \
- (0xFFFF >> (16 - (__bitlen)))
+/* Create a bit mask */
#define BIT_LEN_MASK_8(__bitlen) \
(0xFF >> (8 - (__bitlen)))
-/* Create an offset bit mask
- * Examples:
- * BIT_OFFSET_LEN_MASK_32(0, 2) => 0x00000003
- * BIT_OFFSET_LEN_MASK_32(16, 2) => 0x00030000
- */
-#define BIT_OFFSET_LEN_MASK_32(__bitoffset, __bitlen) \
- (BIT_LEN_MASK_32(__bitlen) << (__bitoffset))
-#define BIT_OFFSET_LEN_MASK_16(__bitoffset, __bitlen) \
- (BIT_LEN_MASK_16(__bitlen) << (__bitoffset))
-#define BIT_OFFSET_LEN_MASK_8(__bitoffset, __bitlen) \
- (BIT_LEN_MASK_8(__bitlen) << (__bitoffset))
-
/*Description:
* Return 4-byte value in host byte ordering from
* 4-byte pointer in little-endian system.
*/
-#define LE_P4BYTE_TO_HOST_4BYTE(__pstart) \
- (EF4BYTE(*((__le32 *)(__pstart))))
-#define LE_P2BYTE_TO_HOST_2BYTE(__pstart) \
- (EF2BYTE(*((__le16 *)(__pstart))))
#define LE_P1BYTE_TO_HOST_1BYTE(__pstart) \
(EF1BYTE(*((u8 *)(__pstart))))
/*Description:
Translate subfield (continuous bits in little-endian) of 4-byte
value to host byte ordering.*/
-#define LE_BITS_TO_4BYTE(__pstart, __bitoffset, __bitlen) \
- ( \
- (LE_P4BYTE_TO_HOST_4BYTE(__pstart) >> (__bitoffset)) & \
- BIT_LEN_MASK_32(__bitlen) \
- )
-#define LE_BITS_TO_2BYTE(__pstart, __bitoffset, __bitlen) \
- ( \
- (LE_P2BYTE_TO_HOST_2BYTE(__pstart) >> (__bitoffset)) & \
- BIT_LEN_MASK_16(__bitlen) \
- )
#define LE_BITS_TO_1BYTE(__pstart, __bitoffset, __bitlen) \
( \
(LE_P1BYTE_TO_HOST_1BYTE(__pstart) >> (__bitoffset)) & \
diff --git a/drivers/staging/r8188eu/include/drv_types.h b/drivers/staging/r8188eu/include/drv_types.h
index 09fc27082f7c..bba88a0ede61 100644
--- a/drivers/staging/r8188eu/include/drv_types.h
+++ b/drivers/staging/r8188eu/include/drv_types.h
@@ -26,7 +26,6 @@
#include "rtw_eeprom.h"
#include "sta_info.h"
#include "rtw_mlme.h"
-#include "rtw_debug.h"
#include "rtw_rf.h"
#include "rtw_event.h"
#include "rtw_led.h"
@@ -35,6 +34,7 @@
#include "rtw_ap.h"
#include "rtw_br_ext.h"
#include "rtl8188e_hal.h"
+#include "rtw_fw.h"
#define DRIVERVERSION "v4.1.4_6773.20130222"
@@ -116,11 +116,6 @@ struct registry_priv {
#define MAX_CONTINUAL_URB_ERR 4
-struct rt_firmware {
- u8 *data;
- u32 size;
-};
-
struct dvobj_priv {
struct adapter *if1;
diff --git a/drivers/staging/r8188eu/include/hal_intf.h b/drivers/staging/r8188eu/include/hal_intf.h
index 3cededa4dcfc..a56f3d6ca399 100644
--- a/drivers/staging/r8188eu/include/hal_intf.h
+++ b/drivers/staging/r8188eu/include/hal_intf.h
@@ -10,44 +10,16 @@
enum hw_variables {
HW_VAR_SET_OPMODE,
- HW_VAR_BSSID,
HW_VAR_BASIC_RATE,
HW_VAR_CORRECT_TSF,
- HW_VAR_MLME_DISCONNECT,
HW_VAR_MLME_SITESURVEY,
- HW_VAR_MLME_JOIN,
HW_VAR_SLOT_TIME,
- HW_VAR_RESP_SIFS,
- HW_VAR_ACK_PREAMBLE,
- HW_VAR_BCN_VALID,
HW_VAR_DM_FLAG,
HW_VAR_DM_FUNC_OP,
- HW_VAR_DM_FUNC_SET,
+ HW_VAR_DM_FUNC_RESET,
HW_VAR_DM_FUNC_CLR,
- HW_VAR_AC_PARAM_BE,
- HW_VAR_ACM_CTRL,
- HW_VAR_AMPDU_MIN_SPACE,
HW_VAR_AMPDU_FACTOR,
- HW_VAR_RXDMA_AGG_PG_TH,
- HW_VAR_H2C_FW_PWRMODE,
- HW_VAR_H2C_FW_JOINBSSRPT,
- HW_VAR_FWLPS_RF_ON,
- HW_VAR_H2C_FW_P2P_PS_OFFLOAD,
- HW_VAR_INITIAL_GAIN,
- HW_VAR_ANTENNA_DIVERSITY_SELECT,
- HW_VAR_FIFO_CLEARN_UP,
- HW_VAR_RPT_TIMER_SETTING,
- HW_VAR_TX_RPT_MAX_MACID,
HW_VAR_H2C_MEDIA_STATUS_RPT,
- HW_VAR_CHK_HI_QUEUE_EMPTY,
-};
-
-enum hal_def_variable {
- HAL_DEF_IS_SUPPORT_ANT_DIV,
- HAL_DEF_CURRENT_ANTENNA,
- HAL_DEF_DBG_DUMP_RXPKT,/* for dbg */
- HAL_DEF_DBG_DM_FUNC,/* for dbg */
- HAL_DEF_DBG_DUMP_TXPKT,
};
typedef s32 (*c2h_id_filter)(u8 id);
@@ -70,13 +42,9 @@ void UpdateHalRAMask8188EUsb(struct adapter *adapt, u32 mac_id, u8 rssi_level);
int rtl8188e_IOL_exec_cmds_sync(struct adapter *adapter,
struct xmit_frame *xmit_frame, u32 max_wating_ms, u32 bndy_cnt);
-void SetHalDefVar8188EUsb(struct adapter *Adapter, enum hal_def_variable eVariable, void *pValue);
-void GetHalDefVar8188EUsb(struct adapter *Adapter, enum hal_def_variable eVariable, void *pValue);
-
unsigned int rtl8188eu_inirp_init(struct adapter *Adapter);
void SetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val);
-void GetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val);
uint rtw_hal_init(struct adapter *padapter);
uint rtw_hal_deinit(struct adapter *padapter);
diff --git a/drivers/staging/r8188eu/include/ieee80211.h b/drivers/staging/r8188eu/include/ieee80211.h
index 8c20363cdd31..15636a808f52 100644
--- a/drivers/staging/r8188eu/include/ieee80211.h
+++ b/drivers/staging/r8188eu/include/ieee80211.h
@@ -123,24 +123,6 @@ enum NETWORK_TYPE {
WIRELESS_11BG_24N = (WIRELESS_11B | WIRELESS_11G | WIRELESS_11_24N),
};
-#define SUPPORTED_24G_NETTYPE_MSK \
- (WIRELESS_11B | WIRELESS_11G | WIRELESS_11_24N)
-
-#define IsSupported24G(NetType) \
- ((NetType) & SUPPORTED_24G_NETTYPE_MSK ? true : false)
-
-#define IsEnableHWCCK(NetType) \
- IsSupported24G(NetType)
-
-#define IsSupportedRxCCK(NetType) IsEnableHWCCK(NetType)
-
-#define IsSupportedTxCCK(NetType) \
- ((NetType) & (WIRELESS_11B) ? true : false)
-#define IsSupportedTxOFDM(NetType) \
- ((NetType) & (WIRELESS_11G) ? true : false)
-#define IsSupportedTxMCS(NetType) \
- ((NetType) & (WIRELESS_11_24N) ? true : false)
-
struct ieee_param {
u32 cmd;
u8 sta_addr[ETH_ALEN];
@@ -196,35 +178,6 @@ struct ieee_param {
/* this is stolen from ipw2200 driver */
#define IEEE_IBSS_MAC_HASH_SIZE 31
-struct rtw_ieee80211_hdr {
- __le16 frame_ctl;
- __le16 duration_id;
- u8 addr1[ETH_ALEN];
- u8 addr2[ETH_ALEN];
- u8 addr3[ETH_ALEN];
- u16 seq_ctl;
- u8 addr4[ETH_ALEN];
-} __packed;
-
-struct rtw_ieee80211_hdr_3addr {
- __le16 frame_ctl;
- __le16 duration_id;
- u8 addr1[ETH_ALEN];
- u8 addr2[ETH_ALEN];
- u8 addr3[ETH_ALEN];
- u16 seq_ctl;
-} __packed;
-
-struct rtw_ieee80211_hdr_3addr_qos {
- __le16 frame_ctl;
- __le16 duration_id;
- u8 addr1[ETH_ALEN];
- u8 addr2[ETH_ALEN];
- u8 addr3[ETH_ALEN];
- u16 seq_ctl;
- u16 qc;
-} __packed;
-
#define IEEE80211_3ADDR_LEN 24
#define IEEE80211_4ADDR_LEN 30
#define IEEE80211_FCS_LEN 4
@@ -636,24 +589,8 @@ static inline int is_broadcast_mac_addr(const u8 *addr)
#define MAXTID 16
-#define IEEE_A (1<<0)
-#define IEEE_B (1<<1)
-#define IEEE_G (1<<2)
-#define IEEE_MODE_MASK (IEEE_A|IEEE_B|IEEE_G)
-
/* Action category code */
enum rtw_ieee80211_category {
- RTW_WLAN_CATEGORY_SPECTRUM_MGMT = 0,
- RTW_WLAN_CATEGORY_QOS = 1,
- RTW_WLAN_CATEGORY_DLS = 2,
- RTW_WLAN_CATEGORY_BACK = 3,
- RTW_WLAN_CATEGORY_PUBLIC = 4, /* IEEE 802.11 public action frames */
- RTW_WLAN_CATEGORY_RADIO_MEASUREMENT = 5,
- RTW_WLAN_CATEGORY_FT = 6,
- RTW_WLAN_CATEGORY_HT = 7,
- RTW_WLAN_CATEGORY_SA_QUERY = 8,
- RTW_WLAN_CATEGORY_TDLS = 12,
- RTW_WLAN_CATEGORY_WMM = 17,
RTW_WLAN_CATEGORY_P2P = 0x7f,/* P2P action frames */
};
diff --git a/drivers/staging/r8188eu/include/odm.h b/drivers/staging/r8188eu/include/odm.h
index 1902aa48a255..f131e17167bf 100644
--- a/drivers/staging/r8188eu/include/odm.h
+++ b/drivers/staging/r8188eu/include/odm.h
@@ -98,22 +98,6 @@ struct odm_per_pkt_info {
bool bPacketBeacon;
};
-enum odm_ability {
- /* BB Team */
- ODM_DIG = 0x00000001,
- ODM_HIGH_POWER = 0x00000002,
- ODM_CCK_CCA_TH = 0x00000004,
- ODM_FA_STATISTICS = 0x00000008,
- ODM_RAMASK = 0x00000010,
- ODM_RSSI_MONITOR = 0x00000020,
- ODM_SW_ANTDIV = 0x00000040,
- ODM_HW_ANTDIV = 0x00000080,
- ODM_BB_PWRSV = 0x00000100,
- ODM_2TPATHDIV = 0x00000200,
- ODM_1TPATHDIV = 0x00000400,
- ODM_PSD2AFH = 0x00000800
-};
-
/* 2011/10/20 MH Define Common info enum for all team. */
enum odm_common_info_def {
@@ -137,19 +121,6 @@ enum odm_ability_def {
# define ODM_ITRF_USB 0x2
-/* ODM_CMNINFO_OP_MODE */
-enum odm_operation_mode {
- ODM_NO_LINK = BIT(0),
- ODM_LINK = BIT(1),
- ODM_SCAN = BIT(2),
- ODM_POWERSAVE = BIT(3),
- ODM_AP_MODE = BIT(4),
- ODM_CLIENT_MODE = BIT(5),
- ODM_AD_HOC = BIT(6),
- ODM_WIFI_DIRECT = BIT(7),
- ODM_WIFI_DISPLAY = BIT(8),
-};
-
/* ODM_CMNINFO_WM_MODE */
enum odm_wireless_mode {
ODM_WM_UNKNOW = 0x0,
diff --git a/drivers/staging/r8188eu/include/osdep_service.h b/drivers/staging/r8188eu/include/osdep_service.h
index fca8f3d116c2..f1a703643e74 100644
--- a/drivers/staging/r8188eu/include/osdep_service.h
+++ b/drivers/staging/r8188eu/include/osdep_service.h
@@ -77,10 +77,6 @@ void *rtw_malloc2d(int h, int w, int size);
spin_lock_init(&((q)->lock)); \
} while (0)
-u32 rtw_systime_to_ms(u32 systime);
-u32 rtw_ms_to_systime(u32 ms);
-s32 rtw_get_passing_time_ms(u32 start);
-
void rtw_usleep_os(int us);
static inline unsigned char _cancel_timer_ex(struct timer_list *ptimer)
@@ -94,49 +90,6 @@ static inline void flush_signals_thread(void)
flush_signals(current);
}
-#define _RND(sz, r) ((((sz)+((r)-1))/(r))*(r))
-#define RND4(x) (((x >> 2) + (((x & 3) == 0) ? 0: 1)) << 2)
-
-static inline u32 _RND4(u32 sz)
-{
- u32 val;
-
- val = ((sz >> 2) + ((sz & 3) ? 1: 0)) << 2;
- return val;
-}
-
-static inline u32 _RND8(u32 sz)
-{
- u32 val;
-
- val = ((sz >> 3) + ((sz & 7) ? 1: 0)) << 3;
- return val;
-}
-
-static inline u32 _RND128(u32 sz)
-{
- u32 val;
-
- val = ((sz >> 7) + ((sz & 127) ? 1: 0)) << 7;
- return val;
-}
-
-static inline u32 _RND256(u32 sz)
-{
- u32 val;
-
- val = ((sz >> 8) + ((sz & 255) ? 1: 0)) << 8;
- return val;
-}
-
-static inline u32 _RND512(u32 sz)
-{
- u32 val;
-
- val = ((sz >> 9) + ((sz & 511) ? 1: 0)) << 9;
- return val;
-}
-
struct rtw_netdev_priv_indicator {
void *priv;
u32 sizeof_priv;
diff --git a/drivers/staging/r8188eu/include/rtl8188e_hal.h b/drivers/staging/r8188eu/include/rtl8188e_hal.h
index 82cb4f7f4d3e..d2a069d4e1cc 100644
--- a/drivers/staging/r8188eu/include/rtl8188e_hal.h
+++ b/drivers/staging/r8188eu/include/rtl8188e_hal.h
@@ -160,9 +160,6 @@ struct hal_data_8188e {
u8 AntDivCfg;
u8 TRxAntDivType;
- u8 bDumpRxPkt;/* for debug */
- u8 bDumpTxPkt;/* for debug */
-
u8 OutEpQueueSel;
u8 OutEpNumber;
diff --git a/drivers/staging/r8188eu/include/rtl8188e_spec.h b/drivers/staging/r8188eu/include/rtl8188e_spec.h
index edae053e350e..ef42c4b2f20c 100644
--- a/drivers/staging/r8188eu/include/rtl8188e_spec.h
+++ b/drivers/staging/r8188eu/include/rtl8188e_spec.h
@@ -998,13 +998,9 @@ Current IOREG MAP
#define STOP_BCNQ BIT(6)
/* 2 ACMHWCTRL */
-#define AcmHw_HwEn BIT(0)
-#define AcmHw_BeqEn BIT(1)
-#define AcmHw_ViqEn BIT(2)
-#define AcmHw_VoqEn BIT(3)
-#define AcmHw_BeqStatus BIT(4)
-#define AcmHw_ViqStatus BIT(5)
-#define AcmHw_VoqStatus BIT(6)
+#define ACMHW_BEQEN BIT(1)
+#define ACMHW_VIQEN BIT(2)
+#define ACMHW_VOQEN BIT(3)
/* 0x0600h ~ 0x07FFh WMAC Configuration */
/* 2APSD_CTRL */
diff --git a/drivers/staging/r8188eu/include/rtw_debug.h b/drivers/staging/r8188eu/include/rtw_debug.h
deleted file mode 100644
index 01a7d987d6cc..000000000000
--- a/drivers/staging/r8188eu/include/rtw_debug.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#ifndef __RTW_DEBUG_H__
-#define __RTW_DEBUG_H__
-
-#include "osdep_service.h"
-#include "drv_types.h"
-
-#define _drv_always_ 1
-#define _drv_emerg_ 2
-#define _drv_alert_ 3
-#define _drv_crit_ 4
-#define _drv_err_ 5
-#define _drv_warning_ 6
-#define _drv_notice_ 7
-#define _drv_info_ 8
-#define _drv_debug_ 9
-
-#define _module_rtl871x_xmit_c_ BIT(0)
-#define _module_xmit_osdep_c_ BIT(1)
-#define _module_rtl871x_recv_c_ BIT(2)
-#define _module_recv_osdep_c_ BIT(3)
-#define _module_rtl871x_mlme_c_ BIT(4)
-#define _module_mlme_osdep_c_ BIT(5)
-#define _module_rtl871x_sta_mgt_c_ BIT(6)
-#define _module_rtl871x_cmd_c_ BIT(7)
-#define _module_cmd_osdep_c_ BIT(8)
-#define _module_rtl871x_io_c_ BIT(9)
-#define _module_io_osdep_c_ BIT(10)
-#define _module_os_intfs_c_ BIT(11)
-#define _module_rtl871x_security_c_ BIT(12)
-#define _module_rtl871x_eeprom_c_ BIT(13)
-#define _module_hal_init_c_ BIT(14)
-#define _module_hci_hal_init_c_ BIT(15)
-#define _module_rtl871x_ioctl_c_ BIT(16)
-#define _module_rtl871x_ioctl_set_c_ BIT(17)
-#define _module_rtl871x_ioctl_query_c_ BIT(18)
-#define _module_rtl871x_pwrctrl_c_ BIT(19)
-#define _module_hci_intfs_c_ BIT(20)
-#define _module_hci_ops_c_ BIT(21)
-#define _module_osdep_service_c_ BIT(22)
-#define _module_mp_ BIT(23)
-#define _module_hci_ops_os_c_ BIT(24)
-#define _module_rtl871x_ioctl_os_c BIT(25)
-#define _module_rtl8712_cmd_c_ BIT(26)
-#define _module_rtl8192c_xmit_c_ BIT(27)
-#define _module_hal_xmit_c_ BIT(28)
-#define _module_efuse_ BIT(29)
-#define _module_rtl8712_recv_c_ BIT(30)
-#define _module_rtl8712_led_c_ BIT(31)
-
-#define DRIVER_PREFIX "R8188EU: "
-
-#endif /* __RTW_DEBUG_H__ */
diff --git a/drivers/staging/r8188eu/include/rtw_eeprom.h b/drivers/staging/r8188eu/include/rtw_eeprom.h
index 3e8d3bb48903..d8d48ace356c 100644
--- a/drivers/staging/r8188eu/include/rtw_eeprom.h
+++ b/drivers/staging/r8188eu/include/rtw_eeprom.h
@@ -11,10 +11,7 @@
struct eeprom_priv {
u8 bautoload_fail_flag;
- u8 bloadfile_fail_flag;
- u8 bloadmac_fail_flag;
u8 mac_addr[ETH_ALEN] __aligned(2); /* PermanentAddress */
- u16 channel_plan;
u8 EepromOrEfuse;
u8 efuse_eeprom_data[HWSET_MAX_SIZE_512] __aligned(4);
};
diff --git a/drivers/staging/r8188eu/include/rtw_fw.h b/drivers/staging/r8188eu/include/rtw_fw.h
index c4b1a8370b4a..8f74157ee9ac 100644
--- a/drivers/staging/r8188eu/include/rtw_fw.h
+++ b/drivers/staging/r8188eu/include/rtw_fw.h
@@ -4,6 +4,11 @@
#ifndef __RTW_FW_H__
#define __RTW_FW_H__
+struct rt_firmware {
+ u8 *data;
+ u32 size;
+};
+
#include "drv_types.h"
int rtl8188e_firmware_download(struct adapter *padapter);
diff --git a/drivers/staging/r8188eu/include/rtw_ioctl.h b/drivers/staging/r8188eu/include/rtw_ioctl.h
index a36bd7313755..c704f3040ac8 100644
--- a/drivers/staging/r8188eu/include/rtw_ioctl.h
+++ b/drivers/staging/r8188eu/include/rtw_ioctl.h
@@ -7,86 +7,7 @@
#include "osdep_service.h"
#include "drv_types.h"
-#ifndef OID_802_11_CAPABILITY
- #define OID_802_11_CAPABILITY 0x0d010122
-#endif
-
-#ifndef OID_802_11_PMKID
- #define OID_802_11_PMKID 0x0d010123
-#endif
-
-/* For DDK-defined OIDs */
-#define OID_NDIS_SEG1 0x00010100
-#define OID_NDIS_SEG2 0x00010200
-#define OID_NDIS_SEG3 0x00020100
-#define OID_NDIS_SEG4 0x01010100
-#define OID_NDIS_SEG5 0x01020100
-#define OID_NDIS_SEG6 0x01020200
-#define OID_NDIS_SEG7 0xFD010100
-#define OID_NDIS_SEG8 0x0D010100
-#define OID_NDIS_SEG9 0x0D010200
-#define OID_NDIS_SEG10 0x0D020200
-
-#define SZ_OID_NDIS_SEG1 23
-#define SZ_OID_NDIS_SEG2 3
-#define SZ_OID_NDIS_SEG3 6
-#define SZ_OID_NDIS_SEG4 6
-#define SZ_OID_NDIS_SEG5 4
-#define SZ_OID_NDIS_SEG6 8
-#define SZ_OID_NDIS_SEG7 7
-#define SZ_OID_NDIS_SEG8 36
-#define SZ_OID_NDIS_SEG9 24
-#define SZ_OID_NDIS_SEG10 19
-
-/* For Realtek-defined OIDs */
-#define OID_MP_SEG1 0xFF871100
-#define OID_MP_SEG2 0xFF818000
-
-#define OID_MP_SEG3 0xFF818700
-#define OID_MP_SEG4 0xFF011100
-
-enum oid_type {
- QUERY_OID,
- SET_OID
-};
-
-struct oid_funs_node {
- unsigned int oid_start; /* the starting number for OID */
- unsigned int oid_end; /* the ending number for OID */
- struct oid_obj_priv *node_array;
- unsigned int array_sz; /* the size of node_array */
- int query_counter; /* count the number of query hits for this segment */
- int set_counter; /* count the number of set hits for this segment */
-};
-
-struct oid_par_priv {
- void *adapter_context;
- NDIS_OID oid;
- void *information_buf;
- u32 information_buf_len;
- u32 *bytes_rw;
- u32 *bytes_needed;
- enum oid_type type_of_oid;
- u32 dbg;
-};
-
-struct oid_obj_priv {
- unsigned char dbg; /* 0: without OID debug message
- * 1: with OID debug message */
- int (*oidfuns)(struct oid_par_priv *poid_par_priv);
-};
-
extern struct iw_handler_def rtw_handlers_def;
-
-int drv_query_info(struct net_device *miniportadaptercontext, NDIS_OID oid,
- void *informationbuffer, u32 informationbufferlength,
- u32 *byteswritten, u32 *bytesneeded);
-
-int drv_set_info(struct net_device *MiniportAdapterContext,
- NDIS_OID oid, void *informationbuffer,
- u32 informationbufferlength, u32 *bytesread,
- u32 *bytesneeded);
-
extern int ui_pid[3];
#endif /* #ifndef __INC_CEINFO_ */
diff --git a/drivers/staging/r8188eu/include/rtw_mlme.h b/drivers/staging/r8188eu/include/rtw_mlme.h
index 42d850f9d777..d81668498e46 100644
--- a/drivers/staging/r8188eu/include/rtw_mlme.h
+++ b/drivers/staging/r8188eu/include/rtw_mlme.h
@@ -363,8 +363,6 @@ struct mlme_priv {
u8 *assoc_req;
u32 assoc_req_len;
- u8 *assoc_rsp;
- u32 assoc_rsp_len;
/* Number of associated Non-ERP stations (i.e., stations using 802.11b
* in 802.11g BSS) */
@@ -558,13 +556,9 @@ void rtw_scan_timeout_handler(struct adapter *adapter);
#define rtw_set_scan_deny_timer_hdl(adapter) do {} while (0)
#define rtw_set_scan_deny(adapter, ms) do {} while (0)
-int _rtw_init_mlme_priv(struct adapter *padapter);
-
void rtw_free_mlme_priv_ie_data(struct mlme_priv *pmlmepriv);
-void _rtw_free_mlme_priv(struct mlme_priv *pmlmepriv);
-
- struct wlan_network *_rtw_alloc_network(struct mlme_priv *pmlmepriv);
+struct wlan_network *rtw_alloc_network(struct mlme_priv *pmlmepriv);
void _rtw_free_network(struct mlme_priv *pmlmepriv,
struct wlan_network *pnetwork, u8 isfreeall);
@@ -596,7 +590,10 @@ void _rtw_roaming(struct adapter *padapter, struct wlan_network *tgt_network);
void rtw_set_roaming(struct adapter *adapter, u8 to_roaming);
u8 rtw_to_roaming(struct adapter *adapter);
+void rtw_set_max_rpt_macid(struct adapter *adapter, u8 macid);
void rtw_sta_media_status_rpt(struct adapter *adapter, struct sta_info *psta,
u32 mstatus);
+u8 rtw_current_antenna(struct adapter *adapter);
+
#endif /* __RTL871X_MLME_H_ */
diff --git a/drivers/staging/r8188eu/include/rtw_mlme_ext.h b/drivers/staging/r8188eu/include/rtw_mlme_ext.h
index 0c555ea6719b..573d65b175cc 100644
--- a/drivers/staging/r8188eu/include/rtw_mlme_ext.h
+++ b/drivers/staging/r8188eu/include/rtw_mlme_ext.h
@@ -24,36 +24,12 @@
#define REAUTH_LIMIT (4)
#define REASSOC_LIMIT (4)
-#define READDBA_LIMIT (2)
-
-#define ROAMING_LIMIT 8
#define DYNAMIC_FUNC_DISABLE (0x0)
/* ====== ODM_ABILITY_E ======== */
/* BB ODM section BIT 0-15 */
#define DYNAMIC_BB_DIG BIT(0)
-#define DYNAMIC_BB_RA_MASK BIT(1)
-#define DYNAMIC_BB_DYNAMIC_TXPWR BIT(2)
-#define DYNAMIC_BB_BB_FA_CNT BIT(3)
-
-#define DYNAMIC_BB_RSSI_MONITOR BIT(4)
-#define DYNAMIC_BB_CCK_PD BIT(5)
-#define DYNAMIC_BB_ANT_DIV BIT(6)
-#define DYNAMIC_BB_PWR_SAVE BIT(7)
-#define DYNAMIC_BB_PWR_TRA BIT(8)
-#define DYNAMIC_BB_RATE_ADAPTIVE BIT(9)
-#define DYNAMIC_BB_PATH_DIV BIT(10)
-#define DYNAMIC_BB_PSD BIT(11)
-
-/* MAC DM section BIT 16-23 */
-#define DYNAMIC_MAC_EDCA_TURBO BIT(16)
-#define DYNAMIC_MAC_EARLY_MODE BIT(17)
-
-/* RF ODM section BIT 24-31 */
-#define DYNAMIC_RF_TX_PWR_TRACK BIT(24)
-#define DYNAMIC_RF_RX_GAIN_TRACK BIT(25)
-#define DYNAMIC_RF_CALIBRATION BIT(26)
#define DYNAMIC_ALL_FUNC_ENABLE 0xFFFFFFF
@@ -208,17 +184,7 @@ enum SCAN_STATE {
SCAN_STATE_MAX,
};
-struct mlme_handler {
- unsigned int num;
- char *str;
- unsigned int (*func)(struct adapter *adapt, struct recv_frame *frame);
-};
-
-struct action_handler {
- unsigned int num;
- char* str;
- unsigned int (*func)(struct adapter *adapt, struct recv_frame *frame);
-};
+typedef unsigned int (*mlme_handler)(struct adapter *adapt, struct recv_frame *frame);
struct ss_res {
int state;
@@ -419,7 +385,7 @@ struct mlme_ext_priv {
u8 active_keep_alive_check;
};
-int init_mlme_ext_priv(struct adapter *adapter);
+void init_mlme_ext_priv(struct adapter *adapter);
int init_hw_mlme_ext(struct adapter *padapter);
void free_mlme_ext_priv (struct mlme_ext_priv *pmlmeext);
extern void init_mlme_ext_timer(struct adapter *padapter);
@@ -434,7 +400,6 @@ void UpdateBrateTblForSoftAP(u8 *bssrateset, u32 bssratelen);
void Save_DM_Func_Flag(struct adapter *padapter);
void Restore_DM_Func_Flag(struct adapter *padapter);
-void Switch_DM_Func(struct adapter *padapter, u32 mode, u8 enable);
void Set_MSR(struct adapter *padapter, u8 type);
@@ -563,6 +528,8 @@ void issue_action_BA(struct adapter *padapter, unsigned char *raddr,
unsigned char action, unsigned short status);
unsigned int send_delba(struct adapter *padapter, u8 initiator, u8 *addr);
unsigned int send_beacon(struct adapter *padapter);
+bool get_beacon_valid_bit(struct adapter *adapter);
+void clear_beacon_valid_bit(struct adapter *adapter);
void start_clnt_assoc(struct adapter *padapter);
void start_clnt_auth(struct adapter *padapter);
@@ -594,20 +561,10 @@ unsigned int OnDeAuth(struct adapter *padapter,
unsigned int OnAction(struct adapter *padapter,
struct recv_frame *precv_frame);
-unsigned int on_action_spct(struct adapter *padapter,
- struct recv_frame *precv_frame);
-unsigned int OnAction_qos(struct adapter *padapter,
- struct recv_frame *precv_frame);
-unsigned int OnAction_dls(struct adapter *padapter,
- struct recv_frame *precv_frame);
unsigned int OnAction_back(struct adapter *padapter,
struct recv_frame *precv_frame);
unsigned int on_action_public(struct adapter *padapter,
struct recv_frame *precv_frame);
-unsigned int OnAction_ht(struct adapter *padapter,
- struct recv_frame *precv_frame);
-unsigned int OnAction_wmm(struct adapter *padapter,
- struct recv_frame *precv_frame);
unsigned int OnAction_p2p(struct adapter *padapter,
struct recv_frame *precv_frame);
@@ -635,8 +592,6 @@ void addba_timer_hdl(struct sta_info *psta);
bool cckrates_included(unsigned char *rate, int ratelen);
bool cckratesonly_included(unsigned char *rate, int ratelen);
-void process_addba_req(struct adapter *padapter, u8 *paddba_req, u8 *addr);
-
void update_TSF(struct mlme_ext_priv *pmlmeext, u8 *pframe, uint len);
void correct_TSF(struct adapter *padapter, struct mlme_ext_priv *pmlmeext);
@@ -769,9 +724,6 @@ struct C2HEvent_Header {
unsigned int rsvd;
};
-void rtw_dummy_event_callback(struct adapter *adapter, u8 *pbuf);
-void rtw_fwdbg_event_callback(struct adapter *adapter, u8 *pbuf);
-
enum rtw_c2h_event {
GEN_EVT_CODE(_Read_MACREG) = 0, /*0*/
GEN_EVT_CODE(_Read_BBREG),
@@ -806,7 +758,7 @@ enum rtw_c2h_event {
#ifdef _RTW_MLME_EXT_C_
static struct fwevent wlanevents[] = {
- {0, rtw_dummy_event_callback}, /*0*/
+ {0, NULL}, /*0*/
{0, NULL},
{0, NULL},
{0, NULL},
@@ -820,12 +772,12 @@ static struct fwevent wlanevents[] = {
{sizeof(struct stassoc_event), &rtw_stassoc_event_callback},
{sizeof(struct stadel_event), &rtw_stadel_event_callback},
{0, NULL},
- {0, rtw_dummy_event_callback},
+ {0, NULL},
{0, NULL}, /*15*/
{0, NULL},
{0, NULL},
{0, NULL},
- {0, rtw_fwdbg_event_callback},
+ {0, NULL},
{0, NULL}, /*20*/
{0, NULL},
{0, NULL},
diff --git a/drivers/staging/r8188eu/include/rtw_pwrctrl.h b/drivers/staging/r8188eu/include/rtw_pwrctrl.h
index 7c3cb895c3cd..6e9fdd66fad1 100644
--- a/drivers/staging/r8188eu/include/rtw_pwrctrl.h
+++ b/drivers/staging/r8188eu/include/rtw_pwrctrl.h
@@ -47,16 +47,8 @@ struct pwrctrl_priv {
u8 smart_ps;
u8 bcn_ant_mode;
- u32 alives;
- struct work_struct cpwm_event;
bool bpower_saving;
- u8 reg_rfoff;
- u8 reg_pdnmode; /* powerdown mode */
-
- /* RF OFF Level */
- u32 cur_ps_level;
- u32 reg_rfps_level;
uint ips_enter_cnts;
uint ips_leave_cnts;
@@ -64,7 +56,7 @@ struct pwrctrl_priv {
u8 ips_mode_req; /* used to accept the mode setting request,
* will update to ipsmode later */
uint bips_processing;
- u32 ips_deny_time; /* will deny IPS when system time less than this */
+ unsigned long ips_deny_time; /* will deny IPS when system time less than this */
u8 ps_processing; /* temp used to mark whether in rtw_ps_processor */
u8 bLeisurePs;
@@ -72,21 +64,15 @@ struct pwrctrl_priv {
u8 power_mgnt;
u8 bFwCurrentInPSMode;
u32 DelayLPSLastTimeStamp;
- s32 pnp_current_pwr_state;
- u8 pnp_bstop_trx;
u8 bInSuspend;
u8 bSupportRemoteWakeup;
struct timer_list pwr_state_check_timer;
int pwr_state_check_interval;
- u8 pwr_state_check_cnts;
-
- int ps_flag;
enum rt_rf_power_state rf_pwrstate;/* cur power state */
enum rt_rf_power_state change_rfpwrstate;
- u8 wepkeymask;
u8 bkeepfwalive;
};
@@ -109,6 +95,7 @@ struct pwrctrl_priv {
void rtw_init_pwrctrl_priv(struct adapter *adapter);
+void rtw_set_firmware_ps_mode(struct adapter *adapter, u8 mode);
void rtw_set_ps_mode(struct adapter *adapter, u8 ps_mode, u8 smart_ps,
u8 bcn_ant_mode);
void LeaveAllPowerSaveMode(struct adapter *adapter);
@@ -117,14 +104,10 @@ int ips_leave(struct adapter *padapter);
void rtw_ps_processor(struct adapter *padapter);
-s32 LPS_RF_ON_check(struct adapter *adapter, u32 delay_ms);
void LPS_Enter(struct adapter *adapter);
void LPS_Leave(struct adapter *adapter);
-int _rtw_pwr_wakeup(struct adapter *adapter, u32 ips_defer_ms,
- const char *caller);
-#define rtw_pwr_wakeup(adapter) \
- _rtw_pwr_wakeup(adapter, RTW_PWR_STATE_CHK_INTERVAL, __func__)
+int rtw_pwr_wakeup(struct adapter *adapter);
int rtw_pm_set_ips(struct adapter *adapter, u8 mode);
int rtw_pm_set_lps(struct adapter *adapter, u8 mode);
diff --git a/drivers/staging/r8188eu/include/rtw_recv.h b/drivers/staging/r8188eu/include/rtw_recv.h
index 4ac4e6b3e177..66d240a7123d 100644
--- a/drivers/staging/r8188eu/include/rtw_recv.h
+++ b/drivers/staging/r8188eu/include/rtw_recv.h
@@ -80,7 +80,6 @@ struct rx_pkt_attrib {
u8 drvinfo_sz;
u8 shift_sz;
u8 hdrlen; /* the WLAN Header Len */
- u8 to_fr_ds;
u8 amsdu;
bool qos;
u8 priority;
@@ -167,7 +166,6 @@ struct recv_priv {
uint rx_largepacket_crcerr;
uint rx_smallpacket_crcerr;
uint rx_middlepacket_crcerr;
- struct semaphore allrxreturnevt;
u8 rx_pending_cnt;
struct tasklet_struct recv_tasklet;
@@ -230,7 +228,6 @@ struct recv_buf {
struct recv_frame {
struct list_head list;
struct sk_buff *pkt;
- struct sk_buff *pkt_newalloc;
struct adapter *adapter;
u8 fragcnt;
int frame_tag;
diff --git a/drivers/staging/r8188eu/include/rtw_xmit.h b/drivers/staging/r8188eu/include/rtw_xmit.h
index b2df1480d66b..034a9f8f51c9 100644
--- a/drivers/staging/r8188eu/include/rtw_xmit.h
+++ b/drivers/staging/r8188eu/include/rtw_xmit.h
@@ -198,7 +198,7 @@ struct xmit_buf {
u32 len;
struct submit_ctx *sctx;
u32 ff_hwaddr;
- struct urb *pxmit_urb[8];
+ struct urb *pxmit_urb;
dma_addr_t dma_transfer_addr; /* (in) dma addr for transfer_buffer */
u8 bpending[8];
int last[8];
@@ -341,7 +341,7 @@ s32 rtw_txframes_sta_ac_pending(struct adapter *padapter,
void rtw_init_hwxmits(struct hw_xmit *phwxmit, int entry);
s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter);
void _rtw_free_xmit_priv(struct xmit_priv *pxmitpriv);
-void rtw_alloc_hwxmits(struct adapter *padapter);
+int rtw_alloc_hwxmits(struct adapter *padapter);
void rtw_free_hwxmits(struct adapter *padapter);
s32 rtw_xmit(struct adapter *padapter, struct sk_buff **pkt);
diff --git a/drivers/staging/r8188eu/include/sta_info.h b/drivers/staging/r8188eu/include/sta_info.h
index b7e6b1f319a2..4112c837bcef 100644
--- a/drivers/staging/r8188eu/include/sta_info.h
+++ b/drivers/staging/r8188eu/include/sta_info.h
@@ -48,7 +48,6 @@ struct stainfo_stats {
u64 rx_ctrl_pkts;
u64 rx_data_pkts;
- u64 last_rx_mgnt_pkts;
u64 last_rx_beacon_pkts;
u64 last_rx_probereq_pkts;
u64 last_rx_probersp_pkts;
@@ -230,7 +229,6 @@ struct sta_info {
#define sta_update_last_rx_pkts(sta) \
do { \
- sta->sta_stats.last_rx_mgnt_pkts = sta->sta_stats.rx_mgnt_pkts; \
sta->sta_stats.last_rx_beacon_pkts = sta->sta_stats.rx_beacon_pkts; \
sta->sta_stats.last_rx_probereq_pkts = sta->sta_stats.rx_probereq_pkts; \
sta->sta_stats.last_rx_probersp_pkts = sta->sta_stats.rx_probersp_pkts; \
diff --git a/drivers/staging/r8188eu/include/usb_ops.h b/drivers/staging/r8188eu/include/usb_ops.h
index 14526fcff4ae..ddc46cb44358 100644
--- a/drivers/staging/r8188eu/include/usb_ops.h
+++ b/drivers/staging/r8188eu/include/usb_ops.h
@@ -25,15 +25,14 @@
* @return true:
* @return false:
*/
-static inline int rtw_inc_and_chk_continual_urb_error(struct dvobj_priv *dvobj)
+static inline bool rtw_inc_and_chk_continual_urb_error(struct dvobj_priv *dvobj)
{
- int ret = false;
- int value;
- value = atomic_inc_return(&dvobj->continual_urb_error);
+ int value = atomic_inc_return(&dvobj->continual_urb_error);
+
if (value > MAX_CONTINUAL_URB_ERR)
- ret = true;
+ return true;
- return ret;
+ return false;
}
/*
@@ -47,19 +46,14 @@ static inline void rtw_reset_continual_urb_error(struct dvobj_priv *dvobj)
#define USB_HIGH_SPEED_BULK_SIZE 512
#define USB_FULL_SPEED_BULK_SIZE 64
-static inline u8 rtw_usb_bulk_size_boundary(struct adapter *padapter,
- int buf_len)
+static inline bool rtw_usb_bulk_size_boundary(struct adapter *padapter, int buf_len)
{
- u8 rst = true;
struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(padapter);
if (pdvobjpriv->pusbdev->speed == USB_SPEED_HIGH)
- rst = (0 == (buf_len) % USB_HIGH_SPEED_BULK_SIZE) ?
- true : false;
+ return buf_len % USB_HIGH_SPEED_BULK_SIZE == 0;
else
- rst = (0 == (buf_len) % USB_FULL_SPEED_BULK_SIZE) ?
- true : false;
- return rst;
+ return buf_len % USB_FULL_SPEED_BULK_SIZE == 0;
}
#endif /* __USB_OPS_H_ */
diff --git a/drivers/staging/r8188eu/include/usb_osintf.h b/drivers/staging/r8188eu/include/usb_osintf.h
index 3e777ca52745..f271e93e9ab9 100644
--- a/drivers/staging/r8188eu/include/usb_osintf.h
+++ b/drivers/staging/r8188eu/include/usb_osintf.h
@@ -6,16 +6,12 @@
#include "osdep_service.h"
#include "drv_types.h"
-#include "usb_vendor_req.h"
extern char *rtw_initmac;
extern int rtw_mc2u_disable;
#define USBD_HALTED(Status) ((u32)(Status) >> 30 == 3)
-u8 usbvendorrequest(struct dvobj_priv *pdvobjpriv, enum bt_usb_request brequest,
- enum rt_usb_wvalue wvalue, u8 windex, void *data,
- u8 datalen, u8 isdirectionin);
void netdev_br_init(struct net_device *netdev);
void dhcp_flag_bcast(struct adapter *priv, struct sk_buff *skb);
void *scdb_findEntry(struct adapter *priv, unsigned char *ipAddr);
diff --git a/drivers/staging/r8188eu/include/usb_vendor_req.h b/drivers/staging/r8188eu/include/usb_vendor_req.h
deleted file mode 100644
index 7337b1b7419f..000000000000
--- a/drivers/staging/r8188eu/include/usb_vendor_req.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#ifndef _USB_VENDOR_REQUEST_H_
-#define _USB_VENDOR_REQUEST_H_
-
-/* 4 Set/Get Register related wIndex/Data */
-#define RT_USB_RESET_MASK_OFF 0
-#define RT_USB_RESET_MASK_ON 1
-#define RT_USB_SLEEP_MASK_OFF 0
-#define RT_USB_SLEEP_MASK_ON 1
-#define RT_USB_LDO_ON 1
-#define RT_USB_LDO_OFF 0
-
-/* 4 Set/Get SYSCLK related wValue or Data */
-#define RT_USB_SYSCLK_32KHZ 0
-#define RT_USB_SYSCLK_40MHZ 1
-#define RT_USB_SYSCLK_60MHZ 2
-
-enum bt_usb_request {
- RT_USB_SET_REGISTER = 1,
- RT_USB_SET_SYSCLK = 2,
- RT_USB_GET_SYSCLK = 3,
- RT_USB_GET_REGISTER = 4
-};
-
-enum rt_usb_wvalue {
- RT_USB_RESET_MASK = 1,
- RT_USB_SLEEP_MASK = 2,
- RT_USB_USB_HRCPWM = 3,
- RT_USB_LDO = 4,
- RT_USB_BOOT_TYPE = 5
-};
-
-#endif
diff --git a/drivers/staging/r8188eu/include/wifi.h b/drivers/staging/r8188eu/include/wifi.h
index c331be19ff83..0254310bdf44 100644
--- a/drivers/staging/r8188eu/include/wifi.h
+++ b/drivers/staging/r8188eu/include/wifi.h
@@ -4,25 +4,14 @@
#ifndef _WIFI_H_
#define _WIFI_H_
+#include <linux/bits.h>
#include <linux/ieee80211.h>
-#ifdef BIT
-/* error "BIT define occurred earlier elsewhere!\n" */
-#undef BIT
-#endif
-#define BIT(x) (1 << (x))
-
#define WLAN_ETHHDR_LEN 14
#define WLAN_HDR_A3_LEN 24
#define WLAN_HDR_A3_QOS_LEN 26
#define WLAN_SSID_MAXLEN 32
-enum WIFI_FRAME_TYPE {
- WIFI_CTRL_TYPE = (BIT(2)),
- WIFI_DATA_TYPE = (BIT(3)),
- WIFI_QOS_DATA_TYPE = (BIT(7)|BIT(3)), /* QoS Data */
-};
-
enum WIFI_FRAME_SUBTYPE {
/* below is for mgt frame */
WIFI_ASSOCREQ = (0 | IEEE80211_FTYPE_MGMT),
@@ -39,24 +28,15 @@ enum WIFI_FRAME_SUBTYPE {
WIFI_ACTION = (BIT(7) | BIT(6) | BIT(4) | IEEE80211_FTYPE_MGMT),
/* below is for control frame */
- WIFI_PSPOLL = (BIT(7) | BIT(5) | WIFI_CTRL_TYPE),
- WIFI_RTS = (BIT(7) | BIT(5) | BIT(4) | WIFI_CTRL_TYPE),
- WIFI_CTS = (BIT(7) | BIT(6) | WIFI_CTRL_TYPE),
- WIFI_ACK = (BIT(7) | BIT(6) | BIT(4) | WIFI_CTRL_TYPE),
- WIFI_CFEND = (BIT(7) | BIT(6) | BIT(5) | WIFI_CTRL_TYPE),
- WIFI_CFEND_CFACK = (BIT(7) | BIT(6) | BIT(5) | BIT(4) |
- WIFI_CTRL_TYPE),
+ WIFI_PSPOLL = (BIT(7) | BIT(5) | IEEE80211_FTYPE_CTL),
/* below is for data frame */
- WIFI_DATA = (0 | WIFI_DATA_TYPE),
- WIFI_DATA_CFACK = (BIT(4) | WIFI_DATA_TYPE),
- WIFI_DATA_CFPOLL = (BIT(5) | WIFI_DATA_TYPE),
- WIFI_DATA_CFACKPOLL = (BIT(5) | BIT(4) | WIFI_DATA_TYPE),
- WIFI_DATA_NULL = (BIT(6) | WIFI_DATA_TYPE),
- WIFI_CF_ACK = (BIT(6) | BIT(4) | WIFI_DATA_TYPE),
- WIFI_CF_POLL = (BIT(6) | BIT(5) | WIFI_DATA_TYPE),
- WIFI_CF_ACKPOLL = (BIT(6) | BIT(5) | BIT(4) | WIFI_DATA_TYPE),
- WIFI_QOS_DATA_NULL = (BIT(6) | WIFI_QOS_DATA_TYPE),
+ WIFI_DATA = (0 | IEEE80211_FTYPE_DATA),
+ WIFI_DATA_CFACK = (BIT(4) | IEEE80211_FTYPE_DATA),
+ WIFI_DATA_CFPOLL = (BIT(5) | IEEE80211_FTYPE_DATA),
+ WIFI_DATA_CFACKPOLL = (BIT(5) | BIT(4) | IEEE80211_FTYPE_DATA),
+ WIFI_DATA_NULL = (BIT(6) | IEEE80211_FTYPE_DATA),
+ WIFI_QOS_DATA_NULL = (BIT(6) | IEEE80211_STYPE_QOS_DATA | IEEE80211_FTYPE_DATA),
};
enum WIFI_REASON_CODE {
@@ -172,8 +152,6 @@ enum WIFI_REG_DOMAIN {
#define GetFrDs(pbuf) (((*(__le16 *)(pbuf)) & cpu_to_le16(_FROM_DS_)) != 0)
-#define get_tofr_ds(pframe) ((GetToDs(pframe) << 1) | GetFrDs(pframe))
-
#define SetMFrag(pbuf) \
*(__le16 *)(pbuf) |= cpu_to_le16(_MORE_FRAG_)
@@ -209,12 +187,6 @@ enum WIFI_REG_DOMAIN {
*(__le16 *)(pbuf) |= cpu_to_le16(type); \
} while (0)
-#define GetSequence(pbuf) \
- (le16_to_cpu(*(__le16 *)((size_t)(pbuf) + 22)) >> 4)
-
-#define GetFragNum(pbuf) \
- (le16_to_cpu(*(__le16 *)((size_t)(pbuf) + 22)) & 0x0f)
-
#define GetTupleCache(pbuf) \
(cpu_to_le16(*(unsigned short *)((size_t)(pbuf) + 22)))
@@ -239,8 +211,6 @@ enum WIFI_REG_DOMAIN {
#define SetPriority(pbuf, tid) \
*(__le16 *)(pbuf) |= cpu_to_le16(tid & 0xf)
-#define GetPriority(pbuf) ((le16_to_cpu(*(__le16 *)(pbuf))) & 0xf)
-
#define SetEOSP(pbuf, eosp) \
*(__le16 *)(pbuf) |= cpu_to_le16((eosp & 1) << 4)
@@ -254,8 +224,6 @@ enum WIFI_REG_DOMAIN {
#define SetAMsdu(pbuf, amsdu) \
*(__le16 *)(pbuf) |= cpu_to_le16((amsdu & 1) << 7)
-#define GetAid(pbuf) (le16_to_cpu(*(__le16 *)((size_t)(pbuf) + 2)) & 0x3fff)
-
#define GetTid(pbuf) (le16_to_cpu(*(__le16 *)((size_t)(pbuf) + \
(((GetToDs(pbuf)<<1) | GetFrDs(pbuf)) == 3 ? \
30 : 24))) & 0x000f)
@@ -270,10 +238,7 @@ enum WIFI_REG_DOMAIN {
static inline bool IS_MCAST(unsigned char *da)
{
- if ((*da) & 0x01)
- return true;
- else
- return false;
+ return (*da) & 0x01;
}
static inline unsigned char *get_da(unsigned char *pframe)
@@ -345,13 +310,6 @@ static inline unsigned char *get_hdr_bssid(unsigned char *pframe)
return sa;
}
-static inline bool IsFrameTypeCtrl(unsigned char *pframe)
-{
- if (WIFI_CTRL_TYPE == GetFrameType(pframe))
- return true;
- else
- return false;
-}
/*-----------------------------------------------------------------------------
Below is for the security related definition
------------------------------------------------------------------------------*/
diff --git a/drivers/staging/r8188eu/os_dep/ioctl_linux.c b/drivers/staging/r8188eu/os_dep/ioctl_linux.c
index eb9375b0c660..1b09462ca908 100644
--- a/drivers/staging/r8188eu/os_dep/ioctl_linux.c
+++ b/drivers/staging/r8188eu/os_dep/ioctl_linux.c
@@ -4,7 +4,6 @@
#include "../include/osdep_service.h"
#include "../include/drv_types.h"
#include "../include/wlan_bssdef.h"
-#include "../include/rtw_debug.h"
#include "../include/wifi.h"
#include "../include/rtw_mlme.h"
#include "../include/rtw_mlme_ext.h"
@@ -1131,9 +1130,11 @@ static int rtw_wx_set_scan(struct net_device *dev, struct iw_request_info *a,
break;
}
sec_len = *(pos++); len -= 1;
- if (sec_len > 0 && sec_len <= len) {
+ if (sec_len > 0 &&
+ sec_len <= len &&
+ sec_len <= 32) {
ssid[ssid_index].SsidLength = sec_len;
- memcpy(ssid[ssid_index].Ssid, pos, ssid[ssid_index].SsidLength);
+ memcpy(ssid[ssid_index].Ssid, pos, sec_len);
ssid_index++;
}
pos += sec_len;
@@ -1886,88 +1887,6 @@ static int rtw_wx_get_nick(struct net_device *dev,
return 0;
}
-static int rtw_wx_read32(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct adapter *padapter;
- struct iw_point *p;
- u16 len;
- u32 addr;
- u32 data32;
- u32 bytes;
- u8 *ptmp;
- int ret;
-
- padapter = (struct adapter *)rtw_netdev_priv(dev);
- p = &wrqu->data;
- len = p->length;
- ptmp = memdup_user(p->pointer, len);
- if (IS_ERR(ptmp))
- return PTR_ERR(ptmp);
-
- bytes = 0;
- addr = 0;
- sscanf(ptmp, "%d,%x", &bytes, &addr);
-
- switch (bytes) {
- case 1:
- data32 = rtw_read8(padapter, addr);
- sprintf(extra, "0x%02X", data32);
- break;
- case 2:
- data32 = rtw_read16(padapter, addr);
- sprintf(extra, "0x%04X", data32);
- break;
- case 4:
- data32 = rtw_read32(padapter, addr);
- sprintf(extra, "0x%08X", data32);
- break;
- default:
- ret = -EINVAL;
- goto err_free_ptmp;
- }
-
- kfree(ptmp);
- return 0;
-
-err_free_ptmp:
- kfree(ptmp);
- return ret;
-}
-
-static int rtw_wx_write32(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
-
- u32 addr;
- u32 data32;
- u32 bytes;
-
- bytes = 0;
- addr = 0;
- data32 = 0;
- sscanf(extra, "%d,%x,%x", &bytes, &addr, &data32);
-
- switch (bytes) {
- case 1:
- rtw_write8(padapter, addr, (u8)data32);
- break;
- case 2:
- rtw_write16(padapter, addr, (u16)data32);
- break;
- case 4:
- rtw_write32(padapter, addr, data32);
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
static int rtw_wx_read_rf(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
@@ -2363,114 +2282,6 @@ static void rtw_p2p_setDN(struct net_device *dev,
pwdinfo->device_name_len = wrqu->data.length - 1;
}
-static void rtw_p2p_get_status(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-
- /* Commented by Albert 2010/10/12 */
- /* Because of the output size limitation, I had removed the "Role" information. */
- /* About the "Role" information, we will use the new private IOCTL to get the "Role" information. */
- sprintf(extra, "\n\nStatus =%.2d\n", rtw_p2p_state(pwdinfo));
- wrqu->data.length = strlen(extra);
-}
-
-/* Commented by Albert 20110520 */
-/* This function will return the config method description */
-/* This config method description will show us which config method the remote P2P device is intended to use */
-/* by sending the provisioning discovery request frame. */
-
-static void rtw_p2p_get_req_cm(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-
- sprintf(extra, "\n\nCM =%s\n", pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req);
- wrqu->data.length = strlen(extra);
-}
-
-static void rtw_p2p_get_role(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-
- sprintf(extra, "\n\nRole =%.2d\n", rtw_p2p_role(pwdinfo));
- wrqu->data.length = strlen(extra);
-}
-
-static void rtw_p2p_get_peer_ifaddr(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-
- sprintf(extra, "\nMAC %pM",
- pwdinfo->p2p_peer_interface_addr);
- wrqu->data.length = strlen(extra);
-}
-
-static void rtw_p2p_get_peer_devaddr(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-
-{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-
- sprintf(extra, "\n%pM",
- pwdinfo->rx_prov_disc_info.peerDevAddr);
- wrqu->data.length = strlen(extra);
-}
-
-static void rtw_p2p_get_peer_devaddr_by_invitation(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-
-{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-
- sprintf(extra, "\nMAC %pM",
- pwdinfo->p2p_peer_device_addr);
- wrqu->data.length = strlen(extra);
-}
-
-static void rtw_p2p_get_groupid(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-
-{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-
- sprintf(extra, "\n%.2X:%.2X:%.2X:%.2X:%.2X:%.2X %s",
- pwdinfo->groupid_info.go_device_addr[0], pwdinfo->groupid_info.go_device_addr[1],
- pwdinfo->groupid_info.go_device_addr[2], pwdinfo->groupid_info.go_device_addr[3],
- pwdinfo->groupid_info.go_device_addr[4], pwdinfo->groupid_info.go_device_addr[5],
- pwdinfo->groupid_info.ssid);
- wrqu->data.length = strlen(extra);
-}
-
-static void rtw_p2p_get_op_ch(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-
-{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-
- sprintf(extra, "\n\nOp_ch =%.2d\n", pwdinfo->operating_channel);
- wrqu->data.length = strlen(extra);
-}
-
static int rtw_p2p_get_wps_configmethod(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
@@ -3229,32 +3040,6 @@ static int rtw_p2p_set(struct net_device *dev,
return ret;
}
-static int rtw_p2p_get(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- if (!memcmp(wrqu->data.pointer, "status", 6)) {
- rtw_p2p_get_status(dev, info, wrqu, extra);
- } else if (!memcmp(wrqu->data.pointer, "role", 4)) {
- rtw_p2p_get_role(dev, info, wrqu, extra);
- } else if (!memcmp(wrqu->data.pointer, "peer_ifa", 8)) {
- rtw_p2p_get_peer_ifaddr(dev, info, wrqu, extra);
- } else if (!memcmp(wrqu->data.pointer, "req_cm", 6)) {
- rtw_p2p_get_req_cm(dev, info, wrqu, extra);
- } else if (!memcmp(wrqu->data.pointer, "peer_deva", 9)) {
- /* Get the P2P device address when receiving the provision discovery request frame. */
- rtw_p2p_get_peer_devaddr(dev, info, wrqu, extra);
- } else if (!memcmp(wrqu->data.pointer, "group_id", 8)) {
- rtw_p2p_get_groupid(dev, info, wrqu, extra);
- } else if (!memcmp(wrqu->data.pointer, "peer_deva_inv", 9)) {
- /* Get the P2P device address when receiving the P2P Invitation request frame. */
- rtw_p2p_get_peer_devaddr_by_invitation(dev, info, wrqu, extra);
- } else if (!memcmp(wrqu->data.pointer, "op_ch", 5)) {
- rtw_p2p_get_op_ch(dev, info, wrqu, extra);
- }
- return 0;
-}
-
static int rtw_p2p_get2(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
@@ -3389,6 +3174,34 @@ static void rf_reg_dump(struct adapter *padapter)
}
}
+static void rtw_set_dynamic_functions(struct adapter *adapter, u8 dm_func)
+{
+ struct hal_data_8188e *haldata = &adapter->haldata;
+ struct odm_dm_struct *odmpriv = &haldata->odmpriv;
+
+ switch (dm_func) {
+ case 0:
+ /* disable all dynamic func */
+ odmpriv->SupportAbility = DYNAMIC_FUNC_DISABLE;
+ break;
+ case 1:
+ /* disable DIG */
+ odmpriv->SupportAbility &= (~DYNAMIC_BB_DIG);
+ break;
+ case 6:
+ /* turn on all dynamic func */
+ if (!(odmpriv->SupportAbility & DYNAMIC_BB_DIG)) {
+ struct rtw_dig *digtable = &odmpriv->DM_DigTable;
+
+ digtable->CurIGValue = rtw_read8(adapter, 0xc50);
+ }
+ odmpriv->SupportAbility = DYNAMIC_ALL_FUNC_ENABLE;
+ break;
+ default:
+ break;
+ }
+}
+
static int rtw_dbg_port(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
@@ -3620,9 +3433,7 @@ static int rtw_dbg_port(struct net_device *dev,
break;
case 0x06:
{
- u32 ODMFlag;
- GetHwReg8188EU(padapter, HW_VAR_DM_FLAG, (u8 *)(&ODMFlag));
- ODMFlag = (u32)(0x0f & arg);
+ u32 ODMFlag = (u32)(0x0f & arg);
SetHwReg8188EU(padapter, HW_VAR_DM_FLAG, (u8 *)(&ODMFlag));
}
break;
@@ -3632,13 +3443,6 @@ static int rtw_dbg_port(struct net_device *dev,
break;
case 0x09:
break;
- case 0x0c:/* dump rx/tx packet */
- if (arg == 0) {
- SetHalDefVar8188EUsb(padapter, HAL_DEF_DBG_DUMP_RXPKT, &extra_arg);
- } else if (arg == 1) {
- SetHalDefVar8188EUsb(padapter, HAL_DEF_DBG_DUMP_TXPKT, &extra_arg);
- }
- break;
case 0x15:
break;
case 0x10:/* driver version display */
@@ -3683,23 +3487,14 @@ static int rtw_dbg_port(struct net_device *dev,
rf_reg_dump(padapter);
break;
case 0xee:/* turn on/off dynamic funcs */
- {
- u32 odm_flag;
-
- if (0xf == extra_arg) {
- GetHalDefVar8188EUsb(padapter, HAL_DEF_DBG_DM_FUNC, &odm_flag);
- } else {
- /* extra_arg = 0 - disable all dynamic func
- extra_arg = 1 - disable DIG
- extra_arg = 2 - disable tx power tracking
- extra_arg = 3 - turn on all dynamic func
- */
- SetHalDefVar8188EUsb(padapter, HAL_DEF_DBG_DM_FUNC, &extra_arg);
- GetHalDefVar8188EUsb(padapter, HAL_DEF_DBG_DM_FUNC, &odm_flag);
- }
+ if (extra_arg != 0xf) {
+ /* extra_arg = 0 - disable all dynamic func
+ * extra_arg = 1 - disable DIG
+ * extra_arg = 6 - turn on all dynamic func
+ */
+ rtw_set_dynamic_functions(padapter, extra_arg);
}
break;
-
case 0xfd:
rtw_write8(padapter, 0xc50, arg);
rtw_write8(padapter, 0xc58, arg);
@@ -3895,8 +3690,8 @@ static const struct iw_priv_args rtw_private_args[] = {
};
static iw_handler rtw_private_handler[] = {
-rtw_wx_write32, /* 0x00 */
-rtw_wx_read32, /* 0x01 */
+ NULL, /* 0x00 */
+ NULL, /* 0x01 */
NULL, /* 0x02 */
NULL, /* 0x03 */
/* for MM DTV platform */
@@ -3919,7 +3714,7 @@ NULL, /* 0x03 */
NULL, /* 0x0F */
rtw_p2p_set, /* 0x10 */
- rtw_p2p_get, /* 0x11 */
+ NULL, /* 0x11 */
rtw_p2p_get2, /* 0x12 */
NULL, /* 0x13 */
@@ -3958,10 +3753,10 @@ static struct iw_statistics *rtw_get_wireless_stats(struct net_device *dev)
struct iw_handler_def rtw_handlers_def = {
.standard = rtw_handlers,
- .num_standard = sizeof(rtw_handlers) / sizeof(iw_handler),
+ .num_standard = ARRAY_SIZE(rtw_handlers),
.private = rtw_private_handler,
.private_args = (struct iw_priv_args *)rtw_private_args,
- .num_private = sizeof(rtw_private_handler) / sizeof(iw_handler),
- .num_private_args = sizeof(rtw_private_args) / sizeof(struct iw_priv_args),
+ .num_private = ARRAY_SIZE(rtw_private_handler),
+ .num_private_args = ARRAY_SIZE(rtw_private_args),
.get_wireless_stats = rtw_get_wireless_stats,
};
diff --git a/drivers/staging/r8188eu/os_dep/mlme_linux.c b/drivers/staging/r8188eu/os_dep/mlme_linux.c
index 72ad9700130e..899d8e9c3834 100644
--- a/drivers/staging/r8188eu/os_dep/mlme_linux.c
+++ b/drivers/staging/r8188eu/os_dep/mlme_linux.c
@@ -66,7 +66,6 @@ void rtw_reset_securitypriv(struct adapter *adapter)
/* We have to backup the PMK information for WiFi PMK Caching test item. */
/* Backup the btkip_countermeasure information. */
/* When the countermeasure is trigger, the driver have to disconnect with AP for 60 seconds. */
- memset(&backup_pmkid[0], 0x00, sizeof(struct rt_pmkid_list) * NUM_PMKID_CACHE);
memcpy(&backup_pmkid[0], &adapter->securitypriv.PMKIDList[0], sizeof(struct rt_pmkid_list) * NUM_PMKID_CACHE);
backup_index = adapter->securitypriv.PMKIDIndex;
backup_counter = adapter->securitypriv.btkip_countermeasure;
diff --git a/drivers/staging/r8188eu/os_dep/os_intfs.c b/drivers/staging/r8188eu/os_dep/os_intfs.c
index 550721eef681..891c85b088ca 100644
--- a/drivers/staging/r8188eu/os_dep/os_intfs.c
+++ b/drivers/staging/r8188eu/os_dep/os_intfs.c
@@ -441,7 +441,6 @@ static void rtw_init_default_value(struct adapter *padapter)
u8 rtw_reset_drv_sw(struct adapter *padapter)
{
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct pwrctrl_priv *pwrctrlpriv = &padapter->pwrctrlpriv;
/* hal_priv */
rtl8188eu_init_default_value(padapter);
@@ -457,8 +456,6 @@ u8 rtw_reset_drv_sw(struct adapter *padapter)
_clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY | _FW_UNDER_LINKING);
- pwrctrlpriv->pwr_state_check_cnts = 0;
-
/* mlmeextpriv */
padapter->mlmeextpriv.sitesurvey_res.state = SCAN_DISABLE;
@@ -490,10 +487,7 @@ u8 rtw_init_drv_sw(struct adapter *padapter)
init_wifidirect_info(padapter, P2P_ROLE_DISABLE);
reset_global_wifidirect_info(padapter);
- if (init_mlme_ext_priv(padapter) == _FAIL) {
- dev_err(dvobj_to_dev(padapter->dvobj), "init_mlme_ext_priv failed\n");
- goto free_mlme_priv;
- }
+ init_mlme_ext_priv(padapter);
if (_rtw_init_xmit_priv(&padapter->xmitpriv, padapter) == _FAIL) {
dev_err(dvobj_to_dev(padapter->dvobj), "_rtw_init_xmit_priv failed\n");
@@ -534,7 +528,6 @@ free_xmit_priv:
free_mlme_ext:
free_mlme_ext_priv(&padapter->mlmeextpriv);
-free_mlme_priv:
rtw_free_mlme_priv(&padapter->mlmepriv);
free_evt_priv:
@@ -632,12 +625,6 @@ int _netdev_open(struct net_device *pnetdev)
{
uint status;
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(pnetdev);
- struct pwrctrl_priv *pwrctrlpriv = &padapter->pwrctrlpriv;
-
- if (pwrctrlpriv->ps_flag) {
- padapter->net_closed = false;
- goto netdev_open_normal_process;
- }
if (!padapter->bup) {
padapter->bDriverStopped = false;
@@ -681,7 +668,6 @@ int _netdev_open(struct net_device *pnetdev)
netdev_br_init(pnetdev);
-netdev_open_normal_process:
return 0;
netdev_open_error:
@@ -750,9 +736,36 @@ void rtw_ips_pwr_down(struct adapter *padapter)
padapter->bCardDisableWOHSM = false;
}
+static void rtw_fifo_cleanup(struct adapter *adapter)
+{
+ struct pwrctrl_priv *pwrpriv = &adapter->pwrctrlpriv;
+ u8 trycnt = 100;
+
+ /* pause tx */
+ rtw_write8(adapter, REG_TXPAUSE, 0xff);
+
+ /* keep sn */
+ adapter->xmitpriv.nqos_ssn = rtw_read16(adapter, REG_NQOS_SEQ);
+
+ if (!pwrpriv->bkeepfwalive) {
+ /* RX DMA stop */
+ rtw_write32(adapter, REG_RXPKT_NUM,
+ (rtw_read32(adapter, REG_RXPKT_NUM) | RW_RELEASE_EN));
+ do {
+ if (!(rtw_read32(adapter, REG_RXPKT_NUM) & RXDMA_IDLE))
+ break;
+ } while (trycnt--);
+
+ /* RQPN Load 0 */
+ rtw_write16(adapter, REG_RQPN_NPQ, 0x0);
+ rtw_write32(adapter, REG_RQPN, 0x80000000);
+ mdelay(10);
+ }
+}
+
void rtw_ips_dev_unload(struct adapter *padapter)
{
- SetHwReg8188EU(padapter, HW_VAR_FIFO_CLEARN_UP, NULL);
+ rtw_fifo_cleanup(padapter);
if (padapter->intf_stop)
padapter->intf_stop(padapter);
diff --git a/drivers/staging/r8188eu/os_dep/osdep_service.c b/drivers/staging/r8188eu/os_dep/osdep_service.c
index 7a6fcc96081a..812acd59be79 100644
--- a/drivers/staging/r8188eu/os_dep/osdep_service.c
+++ b/drivers/staging/r8188eu/os_dep/osdep_service.c
@@ -42,22 +42,6 @@ Otherwise, there will be racing condition.
Caller must check if the list is empty before calling rtw_list_delete
*/
-inline u32 rtw_systime_to_ms(u32 systime)
-{
- return systime * 1000 / HZ;
-}
-
-inline u32 rtw_ms_to_systime(u32 ms)
-{
- return ms * HZ / 1000;
-}
-
-/* the input parameter start use the same unit as jiffies */
-inline s32 rtw_get_passing_time_ms(u32 start)
-{
- return rtw_systime_to_ms(jiffies - start);
-}
-
void rtw_usleep_os(int us)
{
if (1 < (us / 1000))
@@ -116,19 +100,10 @@ void rtw_free_netdev(struct net_device *netdev)
{
struct rtw_netdev_priv_indicator *pnpi;
- if (!netdev)
- goto RETURN;
-
pnpi = netdev_priv(netdev);
- if (!pnpi->priv)
- goto RETURN;
-
vfree(pnpi->priv);
free_netdev(netdev);
-
-RETURN:
- return;
}
int rtw_change_ifname(struct adapter *padapter, const char *ifname)
@@ -220,7 +195,7 @@ keep_ori:
*/
inline bool rtw_cbuf_empty(struct rtw_cbuf *cbuf)
{
- return (cbuf->write == cbuf->read) ? true : false;
+ return cbuf->write == cbuf->read;
}
/**
diff --git a/drivers/staging/r8188eu/os_dep/usb_intf.c b/drivers/staging/r8188eu/os_dep/usb_intf.c
index ffd727fb32e3..68869c5daeff 100644
--- a/drivers/staging/r8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/r8188eu/os_dep/usb_intf.c
@@ -8,7 +8,6 @@
#include "../include/xmit_osdep.h"
#include "../include/hal_intf.h"
#include "../include/osdep_intf.h"
-#include "../include/usb_vendor_req.h"
#include "../include/usb_ops.h"
#include "../include/usb_osintf.h"
#include "../include/rtw_ioctl.h"
@@ -200,8 +199,6 @@ static int rtw_suspend(struct usb_interface *pusb_intf, pm_message_t message)
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
- int ret = 0;
-
if ((!padapter->bup) || (padapter->bDriverStopped) ||
(padapter->bSurpriseRemoved))
goto exit;
@@ -240,7 +237,7 @@ static int rtw_suspend(struct usb_interface *pusb_intf, pm_message_t message)
rtw_indicate_disconnect(padapter);
exit:
- return ret;
+ return 0;
}
static int rtw_resume(struct usb_interface *pusb_intf)
diff --git a/drivers/staging/r8188eu/os_dep/usb_ops_linux.c b/drivers/staging/r8188eu/os_dep/usb_ops_linux.c
index c4b6dbc8d66d..0269e602b217 100644
--- a/drivers/staging/r8188eu/os_dep/usb_ops_linux.c
+++ b/drivers/staging/r8188eu/os_dep/usb_ops_linux.c
@@ -106,8 +106,7 @@ u32 rtw_write_port(struct adapter *padapter, u32 addr, u32 cnt, u8 *wmem)
struct xmit_frame *pxmitframe = (struct xmit_frame *)pxmitbuf->priv_data;
struct usb_device *pusbd = pdvobj->pusbdev;
- if ((padapter->bDriverStopped) || (padapter->bSurpriseRemoved) ||
- (padapter->pwrctrlpriv.pnp_bstop_trx)) {
+ if (padapter->bDriverStopped || padapter->bSurpriseRemoved) {
rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_TX_DENY);
goto exit;
}
@@ -141,7 +140,7 @@ u32 rtw_write_port(struct adapter *padapter, u32 addr, u32 cnt, u8 *wmem)
spin_unlock_irqrestore(&pxmitpriv->lock, irqL);
- purb = pxmitbuf->pxmit_urb[0];
+ purb = pxmitbuf->pxmit_urb;
/* translate DMA FIFO addr to pipehandle */
pipe = ffaddr2pipehdl(pdvobj, addr);
@@ -179,25 +178,21 @@ exit:
void rtw_write_port_cancel(struct adapter *padapter)
{
- int i, j;
+ int i;
struct xmit_buf *pxmitbuf = (struct xmit_buf *)padapter->xmitpriv.pxmitbuf;
padapter->bWritePortCancel = true;
for (i = 0; i < NR_XMITBUFF; i++) {
- for (j = 0; j < 8; j++) {
- if (pxmitbuf->pxmit_urb[j])
- usb_kill_urb(pxmitbuf->pxmit_urb[j]);
- }
+ if (pxmitbuf->pxmit_urb)
+ usb_kill_urb(pxmitbuf->pxmit_urb);
pxmitbuf++;
}
pxmitbuf = (struct xmit_buf *)padapter->xmitpriv.pxmit_extbuf;
for (i = 0; i < NR_XMIT_EXTBUFF; i++) {
- for (j = 0; j < 8; j++) {
- if (pxmitbuf->pxmit_urb[j])
- usb_kill_urb(pxmitbuf->pxmit_urb[j]);
- }
+ if (pxmitbuf->pxmit_urb)
+ usb_kill_urb(pxmitbuf->pxmit_urb);
pxmitbuf++;
}
}
diff --git a/drivers/staging/r8188eu/os_dep/xmit_linux.c b/drivers/staging/r8188eu/os_dep/xmit_linux.c
index a6012cffd37e..e430c64e9068 100644
--- a/drivers/staging/r8188eu/os_dep/xmit_linux.c
+++ b/drivers/staging/r8188eu/os_dep/xmit_linux.c
@@ -67,8 +67,6 @@ bool rtw_endofpktfile(struct pkt_file *pfile)
int rtw_os_xmit_resource_alloc(struct adapter *padapter, struct xmit_buf *pxmitbuf, u32 alloc_sz)
{
- int i;
-
pxmitbuf->pallocated_buf = kzalloc(alloc_sz, GFP_KERNEL);
if (!pxmitbuf->pallocated_buf)
return _FAIL;
@@ -76,21 +74,17 @@ int rtw_os_xmit_resource_alloc(struct adapter *padapter, struct xmit_buf *pxmitb
pxmitbuf->pbuf = (u8 *)N_BYTE_ALIGMENT((size_t)(pxmitbuf->pallocated_buf), XMITBUF_ALIGN_SZ);
pxmitbuf->dma_transfer_addr = 0;
- for (i = 0; i < 8; i++) {
- pxmitbuf->pxmit_urb[i] = usb_alloc_urb(0, GFP_KERNEL);
- if (!pxmitbuf->pxmit_urb[i])
- return _FAIL;
- }
+ pxmitbuf->pxmit_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!pxmitbuf->pxmit_urb)
+ return _FAIL;
+
return _SUCCESS;
}
void rtw_os_xmit_resource_free(struct adapter *padapter,
struct xmit_buf *pxmitbuf, u32 free_sz)
{
- int i;
-
- for (i = 0; i < 8; i++)
- usb_free_urb(pxmitbuf->pxmit_urb[i]);
+ usb_free_urb(pxmitbuf->pxmit_urb);
kfree(pxmitbuf->pallocated_buf);
}
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c b/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c
index 52eeb56c5c76..4abec7b42993 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c
@@ -185,10 +185,10 @@ void rtl92e_set_ofdm_tx_power(struct net_device *dev, u8 powerlevel)
for (index = 0; index < 6; index++) {
writeVal = (u32)(priv->MCSTxPowerLevelOriginalOffset[index] +
((index < 2) ? powerBase0 : powerBase1));
- byte0 = (u8)(writeVal & 0x7f);
- byte1 = (u8)((writeVal & 0x7f00)>>8);
- byte2 = (u8)((writeVal & 0x7f0000)>>16);
- byte3 = (u8)((writeVal & 0x7f000000)>>24);
+ byte0 = writeVal & 0x7f;
+ byte1 = (writeVal & 0x7f00) >> 8;
+ byte2 = (writeVal & 0x7f0000) >> 16;
+ byte3 = (writeVal & 0x7f000000) >> 24;
if (byte0 > 0x24)
byte0 = 0x24;
if (byte1 > 0x24)
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c
index c5e44bbe997c..cd8bbc358d01 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c
@@ -58,7 +58,7 @@ bool rtl92e_send_cmd_pkt(struct net_device *dev, u32 type, const void *data,
memset(pTxFwInfo, 0, sizeof(struct tx_fwinfo_8190pci));
memset(pTxFwInfo, 0x12, 8);
} else {
- tcb_desc->txbuf_size = (u16)frag_length;
+ tcb_desc->txbuf_size = frag_length;
}
seg_ptr = skb_put(skb, frag_length);
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
index 7f9dee42a04d..4b9249195b5a 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
@@ -221,7 +221,7 @@ void rtl92e_set_reg(struct net_device *dev, u8 variable, u8 *val)
&priv->rtllib->current_network.qos_data.parameters;
u8 pAcParam = *val;
u32 eACI = pAcParam;
- union aci_aifsn *pAciAifsn = (union aci_aifsn *) &
+ union aci_aifsn *pAciAifsn = (union aci_aifsn *)&
(qos_parameters->aifs[0]);
u8 acm = pAciAifsn->f.acm;
u8 AcmCtrl = rtl92e_readb(dev, AcmHwCtrl);
@@ -320,8 +320,8 @@ static void _rtl92e_read_eeprom_info(struct net_device *dev)
priv->eeprom_did = rtl92e_eeprom_read(dev, EEPROM_DID >> 1);
usValue = rtl92e_eeprom_read(dev,
- (u16)(EEPROM_Customer_ID>>1)) >> 8;
- priv->eeprom_CustomerID = (u8)(usValue & 0xff);
+ (EEPROM_Customer_ID >> 1)) >> 8;
+ priv->eeprom_CustomerID = usValue & 0xff;
usValue = rtl92e_eeprom_read(dev,
EEPROM_ICVersion_ChannelPlan>>1);
priv->eeprom_ChannelPlan = usValue&0xff;
@@ -399,9 +399,9 @@ static void _rtl92e_read_eeprom_info(struct net_device *dev)
priv->EEPROMLegacyHTTxPowerDiff);
if (!priv->AutoloadFailFlag)
- priv->EEPROMThermalMeter = (u8)(((rtl92e_eeprom_read(dev,
+ priv->EEPROMThermalMeter = ((rtl92e_eeprom_read(dev,
(EEPROM_ThermalMeter>>1))) &
- 0xff00)>>8);
+ 0xff00) >> 8;
else
priv->EEPROMThermalMeter = EEPROM_Default_ThermalMeter;
RT_TRACE(COMP_INIT, "ThermalMeter = %d\n",
@@ -413,8 +413,8 @@ static void _rtl92e_read_eeprom_info(struct net_device *dev)
usValue = rtl92e_eeprom_read(dev,
EEPROM_TxPwDiff_CrystalCap >> 1);
priv->EEPROMAntPwDiff = usValue & 0x0fff;
- priv->EEPROMCrystalCap = (u8)((usValue & 0xf000)
- >> 12);
+ priv->EEPROMCrystalCap = (usValue & 0xf000)
+ >> 12;
} else {
priv->EEPROMAntPwDiff =
EEPROM_Default_AntTxPowerDiff;
@@ -811,7 +811,7 @@ start:
rtl92e_config_mac(dev);
- if (priv->card_8192_version > (u8) VERSION_8190_BD) {
+ if (priv->card_8192_version > VERSION_8190_BD) {
rtl92e_get_tx_power(dev);
rtl92e_set_tx_power(dev, priv->chan);
}
@@ -894,9 +894,8 @@ start:
for (i = 0; i < TxBBGainTableLength; i++) {
if (tmpRegA == dm_tx_bb_gain[i]) {
- priv->rfa_txpowertrackingindex = (u8)i;
- priv->rfa_txpowertrackingindex_real =
- (u8)i;
+ priv->rfa_txpowertrackingindex = i;
+ priv->rfa_txpowertrackingindex_real = i;
priv->rfa_txpowertracking_default =
priv->rfa_txpowertrackingindex;
break;
@@ -908,7 +907,7 @@ start:
for (i = 0; i < CCKTxBBGainTableLength; i++) {
if (TempCCk == dm_cck_tx_bb_gain[i][0]) {
- priv->CCKPresentAttentuation_20Mdefault = (u8)i;
+ priv->CCKPresentAttentuation_20Mdefault = i;
break;
}
}
@@ -1176,7 +1175,7 @@ void rtl92e_fill_tx_desc(struct net_device *dev, struct tx_desc *pdesc,
pTxFwInfo = (struct tx_fwinfo_8190pci *)skb->data;
memset(pTxFwInfo, 0, sizeof(struct tx_fwinfo_8190pci));
pTxFwInfo->TxHT = (cb_desc->data_rate & 0x80) ? 1 : 0;
- pTxFwInfo->TxRate = _rtl92e_rate_mgn_to_hw((u8)cb_desc->data_rate);
+ pTxFwInfo->TxRate = _rtl92e_rate_mgn_to_hw(cb_desc->data_rate);
pTxFwInfo->EnableCPUDur = cb_desc->bTxEnableFwCalcDur;
pTxFwInfo->Short = _rtl92e_query_is_short(pTxFwInfo->TxHT,
pTxFwInfo->TxRate, cb_desc);
@@ -1195,7 +1194,7 @@ void rtl92e_fill_tx_desc(struct net_device *dev, struct tx_desc *pdesc,
pTxFwInfo->CtsEnable = (cb_desc->bCTSEnable) ? 1 : 0;
pTxFwInfo->RtsSTBC = (cb_desc->bRTSSTBC) ? 1 : 0;
pTxFwInfo->RtsHT = (cb_desc->rts_rate&0x80) ? 1 : 0;
- pTxFwInfo->RtsRate = _rtl92e_rate_mgn_to_hw((u8)cb_desc->rts_rate);
+ pTxFwInfo->RtsRate = _rtl92e_rate_mgn_to_hw(cb_desc->rts_rate);
pTxFwInfo->RtsBandwidth = 0;
pTxFwInfo->RtsSubcarrier = cb_desc->RTSSC;
pTxFwInfo->RtsShort = (pTxFwInfo->RtsHT == 0) ?
@@ -1226,7 +1225,7 @@ void rtl92e_fill_tx_desc(struct net_device *dev, struct tx_desc *pdesc,
pdesc->LINIP = 0;
pdesc->CmdInit = 1;
pdesc->Offset = sizeof(struct tx_fwinfo_8190pci) + 8;
- pdesc->PktSize = (u16)skb->len-sizeof(struct tx_fwinfo_8190pci);
+ pdesc->PktSize = skb->len - sizeof(struct tx_fwinfo_8190pci);
pdesc->SecCAMID = 0;
pdesc->RATid = cb_desc->RATRIndex;
@@ -1299,11 +1298,10 @@ void rtl92e_fill_tx_cmd_desc(struct net_device *dev, struct tx_desc_cmd *entry,
entry_tmp->CmdInit = DESC_PACKET_TYPE_NORMAL;
entry_tmp->Offset = sizeof(struct tx_fwinfo_8190pci) + 8;
- entry_tmp->PktSize = (u16)(cb_desc->pkt_size +
- entry_tmp->Offset);
+ entry_tmp->PktSize = cb_desc->pkt_size + entry_tmp->Offset;
entry_tmp->QueueSelect = QSLT_CMD;
entry_tmp->TxFWInfoSize = 0x08;
- entry_tmp->RATid = (u8)DESC_PACKET_TYPE_INIT;
+ entry_tmp->RATid = DESC_PACKET_TYPE_INIT;
}
entry->TxBufferSize = skb->len;
entry->TxBuffAddr = mapping;
@@ -1613,9 +1611,8 @@ static void _rtl92e_query_rxphystatus(
total_rssi += RSSI;
if (bpacket_match_bssid) {
- pstats->RxMIMOSignalStrength[i] = (u8) RSSI;
- precord_stats->RxMIMOSignalStrength[i] =
- (u8) RSSI;
+ pstats->RxMIMOSignalStrength[i] = RSSI;
+ precord_stats->RxMIMOSignalStrength[i] = RSSI;
}
}
@@ -1661,14 +1658,14 @@ static void _rtl92e_query_rxphystatus(
if (is_cck_rate) {
pstats->SignalStrength = precord_stats->SignalStrength =
- (u8)(_rtl92e_signal_scale_mapping(priv,
- (long)pwdb_all));
+ _rtl92e_signal_scale_mapping(priv,
+ (long)pwdb_all);
} else {
if (rf_rx_num != 0)
pstats->SignalStrength = precord_stats->SignalStrength =
- (u8)(_rtl92e_signal_scale_mapping(priv,
- (long)(total_rssi /= rf_rx_num)));
+ _rtl92e_signal_scale_mapping(priv,
+ (long)(total_rssi /= rf_rx_num));
}
}
@@ -1709,8 +1706,7 @@ static void _rtl92e_process_phyinfo(struct r8192_priv *priv, u8 *buffer,
slide_rssi_index = 0;
tmp_val = priv->stats.slide_rssi_total/slide_rssi_statistics;
- priv->stats.signal_strength = rtl92e_translate_to_dbm(priv,
- (u8)tmp_val);
+ priv->stats.signal_strength = rtl92e_translate_to_dbm(priv, tmp_val);
curr_st->rssi = priv->stats.signal_strength;
if (!prev_st->bPacketMatchBSSID) {
if (!prev_st->bToSelfBA)
@@ -2036,7 +2032,7 @@ bool rtl92e_get_rx_stats(struct net_device *dev, struct rtllib_rx_stats *stats,
pDrvInfo = (struct rx_fwinfo *)(skb->data + stats->RxBufShift);
stats->rate = _rtl92e_rate_hw_to_mgn((bool)pDrvInfo->RxHT,
- (u8)pDrvInfo->RxRate);
+ pDrvInfo->RxRate);
stats->bShortPreamble = pDrvInfo->SPLCP;
_rtl92e_update_received_rate_histogram_stats(dev, stats);
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c
index 9b025b9fa7ab..38110fa4f36d 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c
@@ -34,8 +34,7 @@ static bool _rtl92e_fw_boot_cpu(struct net_device *dev)
netdev_dbg(dev, "Download Firmware: Put code ok!\n");
CPU_status = rtl92e_readl(dev, CPU_GEN);
- rtl92e_writeb(dev, CPU_GEN,
- (u8)((CPU_status|CPU_GEN_PWR_STB_CPU)&0xff));
+ rtl92e_writeb(dev, CPU_GEN, (CPU_status | CPU_GEN_PWR_STB_CPU) & 0xff);
mdelay(1);
if (!_rtl92e_wait_for_fw(dev, CPU_GEN_BOOT_RDY, 200)) {
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c
index 411138102948..f92551094738 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c
@@ -919,7 +919,7 @@ static u8 _rtl92e_phy_switch_channel_step(struct net_device *dev, u8 channel,
continue;
switch (CurrentCmd->CmdID) {
case CmdID_SetTxPowerLevel:
- if (priv->IC_Cut > (u8)VERSION_8190_BD)
+ if (priv->IC_Cut > VERSION_8190_BD)
_rtl92e_set_tx_power_level(dev,
channel);
break;
@@ -929,11 +929,11 @@ static u8 _rtl92e_phy_switch_channel_step(struct net_device *dev, u8 channel,
break;
case CmdID_WritePortUshort:
rtl92e_writew(dev, CurrentCmd->Para1,
- (u16)CurrentCmd->Para2);
+ CurrentCmd->Para2);
break;
case CmdID_WritePortUchar:
rtl92e_writeb(dev, CurrentCmd->Para1,
- (u8)CurrentCmd->Para2);
+ CurrentCmd->Para2);
break;
case CmdID_RF_WriteReg:
for (eRFPath = 0; eRFPath <
@@ -1299,17 +1299,17 @@ void rtl92e_init_gain(struct net_device *dev, u8 Operation)
DIG_ALGO_BY_FALSE_ALARM)
rtl92e_set_bb_reg(dev, UFWP, bMaskByte1, 0x8);
priv->initgain_backup.xaagccore1 =
- (u8)rtl92e_get_bb_reg(dev, rOFDM0_XAAGCCore1,
- BitMask);
+ rtl92e_get_bb_reg(dev, rOFDM0_XAAGCCore1,
+ BitMask);
priv->initgain_backup.xbagccore1 =
- (u8)rtl92e_get_bb_reg(dev, rOFDM0_XBAGCCore1,
- BitMask);
+ rtl92e_get_bb_reg(dev, rOFDM0_XBAGCCore1,
+ BitMask);
priv->initgain_backup.xcagccore1 =
- (u8)rtl92e_get_bb_reg(dev, rOFDM0_XCAGCCore1,
- BitMask);
+ rtl92e_get_bb_reg(dev, rOFDM0_XCAGCCore1,
+ BitMask);
priv->initgain_backup.xdagccore1 =
- (u8)rtl92e_get_bb_reg(dev, rOFDM0_XDAGCCore1,
- BitMask);
+ rtl92e_get_bb_reg(dev, rOFDM0_XDAGCCore1,
+ BitMask);
BitMask = bMaskByte2;
priv->initgain_backup.cca = (u8)rtl92e_get_bb_reg(dev,
rCCK0_CCA, BitMask);
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
index 756d8db51937..d58800d06e8f 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
@@ -633,7 +633,7 @@ static void _rtl92e_dm_tx_power_tracking_callback_tssi(struct net_device *dev)
rtl92e_writeb(dev, FW_Busy_Flag, 0);
priv->rtllib->bdynamic_txpower_enable = false;
- powerlevelOFDM24G = (u8)(priv->Pwr_Track>>24);
+ powerlevelOFDM24G = priv->Pwr_Track >> 24;
RF_Type = priv->rf_type;
Value = (RF_Type<<8) | powerlevelOFDM24G;
@@ -833,7 +833,7 @@ static void _rtl92e_dm_tx_power_tracking_cb_thermal(struct net_device *dev)
bMaskDWord);
for (i = 0; i < OFDM_Table_Length; i++) {
if (tmpRegA == OFDMSwingTable[i]) {
- priv->OFDM_index[0] = (u8)i;
+ priv->OFDM_index[0] = i;
RT_TRACE(COMP_POWER_TRACKING,
"Initial reg0x%x = 0x%x, OFDM_index = 0x%x\n",
rOFDM0_XATxIQImbalance, tmpRegA,
@@ -844,7 +844,7 @@ static void _rtl92e_dm_tx_power_tracking_cb_thermal(struct net_device *dev)
TempCCk = rtl92e_get_bb_reg(dev, rCCK0_TxFilter1, bMaskByte2);
for (i = 0; i < CCK_Table_length; i++) {
if (TempCCk == (u32)CCKSwingTable_Ch1_Ch13[i][0]) {
- priv->CCK_index = (u8) i;
+ priv->CCK_index = i;
RT_TRACE(COMP_POWER_TRACKING,
"Initial reg0x%x = 0x%x, CCK_index = 0x%x\n",
rCCK0_TxFilter1, TempCCk,
@@ -1041,7 +1041,7 @@ static void _rtl92e_dm_cck_tx_power_adjust_tssi(struct net_device *dev,
{
u32 TempVal;
struct r8192_priv *priv = rtllib_priv(dev);
- u8 attenuation = (u8)priv->CCKPresentAttentuation;
+ u8 attenuation = priv->CCKPresentAttentuation;
TempVal = 0;
if (!bInCH14) {
@@ -1245,10 +1245,10 @@ void rtl92e_dm_backup_state(struct net_device *dev)
return;
rtl92e_set_bb_reg(dev, UFWP, bMaskByte1, 0x8);
- priv->initgain_backup.xaagccore1 = (u8)rtl92e_get_bb_reg(dev, rOFDM0_XAAGCCore1, bit_mask);
- priv->initgain_backup.xbagccore1 = (u8)rtl92e_get_bb_reg(dev, rOFDM0_XBAGCCore1, bit_mask);
- priv->initgain_backup.xcagccore1 = (u8)rtl92e_get_bb_reg(dev, rOFDM0_XCAGCCore1, bit_mask);
- priv->initgain_backup.xdagccore1 = (u8)rtl92e_get_bb_reg(dev, rOFDM0_XDAGCCore1, bit_mask);
+ priv->initgain_backup.xaagccore1 = rtl92e_get_bb_reg(dev, rOFDM0_XAAGCCore1, bit_mask);
+ priv->initgain_backup.xbagccore1 = rtl92e_get_bb_reg(dev, rOFDM0_XBAGCCore1, bit_mask);
+ priv->initgain_backup.xcagccore1 = rtl92e_get_bb_reg(dev, rOFDM0_XCAGCCore1, bit_mask);
+ priv->initgain_backup.xdagccore1 = rtl92e_get_bb_reg(dev, rOFDM0_XDAGCCore1, bit_mask);
bit_mask = bMaskByte2;
priv->initgain_backup.cca = (u8)rtl92e_get_bb_reg(dev, rCCK0_CCA, bit_mask);
@@ -1535,7 +1535,7 @@ static void _rtl92e_dm_initial_gain(struct net_device *dev)
if ((dm_digtable.pre_ig_value != dm_digtable.cur_ig_value)
|| !initialized || force_write) {
- initial_gain = (u8)dm_digtable.cur_ig_value;
+ initial_gain = dm_digtable.cur_ig_value;
rtl92e_writeb(dev, rOFDM0_XAAGCCore1, initial_gain);
rtl92e_writeb(dev, rOFDM0_XBAGCCore1, initial_gain);
rtl92e_writeb(dev, rOFDM0_XCAGCCore1, initial_gain);
@@ -2513,5 +2513,5 @@ static void _rtl92e_dm_send_rssi_to_fw(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
- rtl92e_writeb(dev, DRIVER_RSSI, (u8)priv->undecorated_smoothed_pwdb);
+ rtl92e_writeb(dev, DRIVER_RSSI, priv->undecorated_smoothed_pwdb);
}
diff --git a/drivers/staging/rtl8192e/rtl819x_BAProc.c b/drivers/staging/rtl8192e/rtl819x_BAProc.c
index 97afea4c3511..7d04966afdd9 100644
--- a/drivers/staging/rtl8192e/rtl819x_BAProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_BAProc.c
@@ -238,7 +238,7 @@ int rtllib_rx_ADDBAReq(struct rtllib_device *ieee, struct sk_buff *skb)
skb->data, skb->len);
#endif
- req = (struct rtllib_hdr_3addr *) skb->data;
+ req = (struct rtllib_hdr_3addr *)skb->data;
tag = (u8 *)req;
dst = (u8 *)(&req->addr2[0]);
tag += sizeof(struct rtllib_hdr_3addr);
@@ -343,7 +343,6 @@ int rtllib_rx_ADDBARsp(struct rtllib_device *ieee, struct sk_buff *skb)
goto OnADDBARsp_Reject;
}
-
if (!GetTs(ieee, (struct ts_common_info **)(&pTS), dst,
(u8)(pBaParamSet->field.tid), TX_DIR, false)) {
netdev_warn(ieee->dev, "%s(): can't get TS\n", __func__);
@@ -355,7 +354,6 @@ int rtllib_rx_ADDBARsp(struct rtllib_device *ieee, struct sk_buff *skb)
pPendingBA = &pTS->TxPendingBARecord;
pAdmittedBA = &pTS->TxAdmittedBARecord;
-
if (pAdmittedBA->b_valid) {
netdev_dbg(ieee->dev, "%s(): ADDBA response already admitted\n",
__func__);
@@ -374,7 +372,6 @@ int rtllib_rx_ADDBARsp(struct rtllib_device *ieee, struct sk_buff *skb)
DeActivateBAEntry(ieee, pPendingBA);
}
-
if (*pStatusCode == ADDBA_STATUS_SUCCESS) {
if (pBaParamSet->field.ba_policy == BA_POLICY_DELAYED) {
pTS->bAddBaReqDelayed = true;
diff --git a/drivers/staging/rtl8192e/rtllib.h b/drivers/staging/rtl8192e/rtllib.h
index c985e4ebc545..0ecd81a81866 100644
--- a/drivers/staging/rtl8192e/rtllib.h
+++ b/drivers/staging/rtl8192e/rtllib.h
@@ -1585,7 +1585,7 @@ struct rtllib_device {
short sta_sleep;
int ps_timeout;
int ps_period;
- struct tasklet_struct ps_task;
+ struct work_struct ps_task;
u64 ps_time;
bool polling;
diff --git a/drivers/staging/rtl8192e/rtllib_crypt_ccmp.c b/drivers/staging/rtl8192e/rtllib_crypt_ccmp.c
index ed968c01c7ff..a8d22da8bc9a 100644
--- a/drivers/staging/rtl8192e/rtllib_crypt_ccmp.c
+++ b/drivers/staging/rtl8192e/rtllib_crypt_ccmp.c
@@ -103,7 +103,7 @@ static int ccmp_init_iv_and_aad(struct rtllib_hdr_4addr *hdr,
if (a4_included)
aad_len += 6;
if (qc_included) {
- pos = (u8 *) &hdr->addr4;
+ pos = (u8 *)&hdr->addr4;
if (a4_included)
pos += 6;
qc = *pos & 0x0f;
@@ -130,13 +130,13 @@ static int ccmp_init_iv_and_aad(struct rtllib_hdr_4addr *hdr,
* A4 (if present)
* QC (if present)
*/
- pos = (u8 *) hdr;
+ pos = (u8 *)hdr;
aad[0] = pos[0] & 0x8f;
aad[1] = pos[1] & 0xc7;
memcpy(&aad[2], &hdr->addr1, ETH_ALEN);
memcpy(&aad[8], &hdr->addr2, ETH_ALEN);
memcpy(&aad[14], &hdr->addr3, ETH_ALEN);
- pos = (u8 *) &hdr->seq_ctl;
+ pos = (u8 *)&hdr->seq_ctl;
aad[20] = pos[0] & 0x0f;
aad[21] = 0; /* all bits masked */
memset(aad + 22, 0, 8);
@@ -186,7 +186,7 @@ static int rtllib_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
*pos++ = key->tx_pn[1];
*pos++ = key->tx_pn[0];
- hdr = (struct rtllib_hdr_4addr *) skb->data;
+ hdr = (struct rtllib_hdr_4addr *)skb->data;
if (!tcb_desc->bHwSec) {
struct aead_request *req;
struct scatterlist sg[2];
@@ -235,7 +235,7 @@ static int rtllib_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
return -1;
}
- hdr = (struct rtllib_hdr_4addr *) skb->data;
+ hdr = (struct rtllib_hdr_4addr *)skb->data;
pos = skb->data + hdr_len;
keyidx = pos[3];
if (!(keyidx & (1 << 5))) {
diff --git a/drivers/staging/rtl8192e/rtllib_crypt_tkip.c b/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
index 4a760ecbc31e..8bc95651e384 100644
--- a/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
+++ b/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
@@ -136,7 +136,7 @@ static inline u16 Hi16(u32 val)
static inline u16 Mk16(u8 hi, u8 lo)
{
- return lo | (((u16) hi) << 8);
+ return lo | (hi << 8);
}
@@ -220,7 +220,7 @@ static void tkip_mixing_phase2(u8 *WEPSeed, const u8 *TK, const u16 *TTAK,
/* Make temporary area overlap WEP seed so that the final copy can be
* avoided on little endian hosts.
*/
- u16 *PPK = (u16 *) &WEPSeed[4];
+ u16 *PPK = (u16 *)&WEPSeed[4];
/* Step 1 - make copy of TTAK and bring in TSC */
PPK[0] = TTAK[0];
@@ -231,15 +231,15 @@ static void tkip_mixing_phase2(u8 *WEPSeed, const u8 *TK, const u16 *TTAK,
PPK[5] = TTAK[4] + IV16;
/* Step 2 - 96-bit bijective mixing using S-box */
- PPK[0] += _S_(PPK[5] ^ Mk16_le((u16 *) &TK[0]));
- PPK[1] += _S_(PPK[0] ^ Mk16_le((u16 *) &TK[2]));
- PPK[2] += _S_(PPK[1] ^ Mk16_le((u16 *) &TK[4]));
- PPK[3] += _S_(PPK[2] ^ Mk16_le((u16 *) &TK[6]));
- PPK[4] += _S_(PPK[3] ^ Mk16_le((u16 *) &TK[8]));
- PPK[5] += _S_(PPK[4] ^ Mk16_le((u16 *) &TK[10]));
-
- PPK[0] += RotR1(PPK[5] ^ Mk16_le((u16 *) &TK[12]));
- PPK[1] += RotR1(PPK[0] ^ Mk16_le((u16 *) &TK[14]));
+ PPK[0] += _S_(PPK[5] ^ Mk16_le((u16 *)&TK[0]));
+ PPK[1] += _S_(PPK[0] ^ Mk16_le((u16 *)&TK[2]));
+ PPK[2] += _S_(PPK[1] ^ Mk16_le((u16 *)&TK[4]));
+ PPK[3] += _S_(PPK[2] ^ Mk16_le((u16 *)&TK[6]));
+ PPK[4] += _S_(PPK[3] ^ Mk16_le((u16 *)&TK[8]));
+ PPK[5] += _S_(PPK[4] ^ Mk16_le((u16 *)&TK[10]));
+
+ PPK[0] += RotR1(PPK[5] ^ Mk16_le((u16 *)&TK[12]));
+ PPK[1] += RotR1(PPK[0] ^ Mk16_le((u16 *)&TK[14]));
PPK[2] += RotR1(PPK[1]);
PPK[3] += RotR1(PPK[2]);
PPK[4] += RotR1(PPK[3]);
@@ -251,7 +251,7 @@ static void tkip_mixing_phase2(u8 *WEPSeed, const u8 *TK, const u16 *TTAK,
WEPSeed[0] = Hi8(IV16);
WEPSeed[1] = (Hi8(IV16) | 0x20) & 0x7F;
WEPSeed[2] = Lo8(IV16);
- WEPSeed[3] = Lo8((PPK[5] ^ Mk16_le((u16 *) &TK[0])) >> 1);
+ WEPSeed[3] = Lo8((PPK[5] ^ Mk16_le((u16 *)&TK[0])) >> 1);
#ifdef __BIG_ENDIAN
{
@@ -280,7 +280,7 @@ static int rtllib_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
skb->len < hdr_len)
return -1;
- hdr = (struct rtllib_hdr_4addr *) skb->data;
+ hdr = (struct rtllib_hdr_4addr *)skb->data;
if (!tcb_desc->bHwSec) {
if (!tkey->tx_phase1_done) {
@@ -357,7 +357,7 @@ static int rtllib_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
if (skb->len < hdr_len + 8 + 4)
return -1;
- hdr = (struct rtllib_hdr_4addr *) skb->data;
+ hdr = (struct rtllib_hdr_4addr *)skb->data;
pos = skb->data + hdr_len;
keyidx = pos[3];
if (!(keyidx & (1 << 5))) {
@@ -485,7 +485,7 @@ static void michael_mic_hdr(struct sk_buff *skb, u8 *hdr)
{
struct rtllib_hdr_4addr *hdr11;
- hdr11 = (struct rtllib_hdr_4addr *) skb->data;
+ hdr11 = (struct rtllib_hdr_4addr *)skb->data;
switch (le16_to_cpu(hdr11->frame_ctl) &
(RTLLIB_FCTL_FROMDS | RTLLIB_FCTL_TODS)) {
case RTLLIB_FCTL_TODS:
@@ -518,7 +518,7 @@ static int rtllib_michael_mic_add(struct sk_buff *skb, int hdr_len, void *priv)
u8 *pos;
struct rtllib_hdr_4addr *hdr;
- hdr = (struct rtllib_hdr_4addr *) skb->data;
+ hdr = (struct rtllib_hdr_4addr *)skb->data;
if (skb_tailroom(skb) < 8 || skb->len < hdr_len) {
netdev_dbg(skb->dev,
@@ -558,7 +558,7 @@ static void rtllib_michael_mic_failure(struct net_device *dev,
ether_addr_copy(ev.src_addr.sa_data, hdr->addr2);
memset(&wrqu, 0, sizeof(wrqu));
wrqu.data.length = sizeof(ev);
- wireless_send_event(dev, IWEVMICHAELMICFAILURE, &wrqu, (char *) &ev);
+ wireless_send_event(dev, IWEVMICHAELMICFAILURE, &wrqu, (char *)&ev);
}
static int rtllib_michael_mic_verify(struct sk_buff *skb, int keyidx,
@@ -568,7 +568,7 @@ static int rtllib_michael_mic_verify(struct sk_buff *skb, int keyidx,
u8 mic[8];
struct rtllib_hdr_4addr *hdr;
- hdr = (struct rtllib_hdr_4addr *) skb->data;
+ hdr = (struct rtllib_hdr_4addr *)skb->data;
if (!tkey->key_set)
return -1;
@@ -584,7 +584,7 @@ static int rtllib_michael_mic_verify(struct sk_buff *skb, int keyidx,
if (memcmp(mic, skb->data + skb->len - 8, 8) != 0) {
struct rtllib_hdr_4addr *hdr;
- hdr = (struct rtllib_hdr_4addr *) skb->data;
+ hdr = (struct rtllib_hdr_4addr *)skb->data;
netdev_dbg(skb->dev,
"Michael MIC verification failed for MSDU from %pM keyidx=%d\n",
hdr->addr2, keyidx);
diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c
index eb904b42f9c6..abe5c153f74e 100644
--- a/drivers/staging/rtl8192e/rtllib_rx.c
+++ b/drivers/staging/rtl8192e/rtllib_rx.c
@@ -250,7 +250,7 @@ static int rtllib_is_eapol_frame(struct rtllib_device *ieee,
if (skb->len < 24)
return 0;
- hdr = (struct rtllib_hdr_4addr *) skb->data;
+ hdr = (struct rtllib_hdr_4addr *)skb->data;
fc = le16_to_cpu(hdr->frame_ctl);
/* check that the frame is unicast frame to us */
@@ -299,7 +299,7 @@ rtllib_rx_frame_decrypt(struct rtllib_device *ieee, struct sk_buff *skb,
tcb_desc->bHwSec = 0;
}
- hdr = (struct rtllib_hdr_4addr *) skb->data;
+ hdr = (struct rtllib_hdr_4addr *)skb->data;
hdrlen = rtllib_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
atomic_inc(&crypt->refcnt);
@@ -339,7 +339,7 @@ rtllib_rx_frame_decrypt_msdu(struct rtllib_device *ieee, struct sk_buff *skb,
tcb_desc->bHwSec = 0;
}
- hdr = (struct rtllib_hdr_4addr *) skb->data;
+ hdr = (struct rtllib_hdr_4addr *)skb->data;
hdrlen = rtllib_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
atomic_inc(&crypt->refcnt);
@@ -936,7 +936,7 @@ static int rtllib_rx_check_duplicate(struct rtllib_device *ieee,
} else {
struct rx_ts_record *pRxTS = NULL;
- if (GetTs(ieee, (struct ts_common_info **) &pRxTS, hdr->addr2,
+ if (GetTs(ieee, (struct ts_common_info **)&pRxTS, hdr->addr2,
(u8)Frame_QoSTID((u8 *)(skb->data)), RX_DIR, true)) {
if ((fc & (1<<11)) && (frag == pRxTS->rx_last_frag_num) &&
(WLAN_GET_SEQ_SEQ(sc) == pRxTS->rx_last_seq_num))
@@ -1100,7 +1100,7 @@ static int rtllib_rx_decrypt(struct rtllib_device *ieee, struct sk_buff *skb,
return -1;
}
- hdr = (struct rtllib_hdr_4addr *) skb->data;
+ hdr = (struct rtllib_hdr_4addr *)skb->data;
if ((frag != 0 || (fc & RTLLIB_FCTL_MOREFRAGS))) {
int flen;
struct sk_buff *frag_skb = rtllib_frag_cache_get(ieee, hdr);
@@ -1152,7 +1152,7 @@ static int rtllib_rx_decrypt(struct rtllib_device *ieee, struct sk_buff *skb,
* delivered, so remove skb from fragment cache
*/
skb = frag_skb;
- hdr = (struct rtllib_hdr_4addr *) skb->data;
+ hdr = (struct rtllib_hdr_4addr *)skb->data;
rtllib_frag_cache_invalidate(ieee, hdr);
}
@@ -1165,7 +1165,7 @@ static int rtllib_rx_decrypt(struct rtllib_device *ieee, struct sk_buff *skb,
return -1;
}
- hdr = (struct rtllib_hdr_4addr *) skb->data;
+ hdr = (struct rtllib_hdr_4addr *)skb->data;
if (crypt && !(fc & RTLLIB_FCTL_WEP) && !ieee->open_wep) {
if (/*ieee->ieee802_1x &&*/
rtllib_is_eapol_frame(ieee, skb, hdrlen)) {
@@ -1397,13 +1397,13 @@ static int rtllib_rx_InfraAdhoc(struct rtllib_device *ieee, struct sk_buff *skb,
goto rx_exit;
/* Get TS for Rx Reorder */
- hdr = (struct rtllib_hdr_4addr *) skb->data;
+ hdr = (struct rtllib_hdr_4addr *)skb->data;
if (ieee->current_network.qos_data.active && IsQoSDataFrame(skb->data)
&& !is_multicast_ether_addr(hdr->addr1)
&& (!bToOtherSTA)) {
TID = Frame_QoSTID(skb->data);
SeqNum = WLAN_GET_SEQ_SEQ(sc);
- GetTs(ieee, (struct ts_common_info **) &pTS, hdr->addr2, TID,
+ GetTs(ieee, (struct ts_common_info **)&pTS, hdr->addr2, TID,
RX_DIR, true);
if (TID != 0 && TID != 3)
ieee->bis_any_nonbepkts = true;
@@ -2053,7 +2053,7 @@ int rtllib_parse_info_param(struct rtllib_device *ieee,
}
network->ssid_len = min(info_element->len,
- (u8) IW_ESSID_MAX_SIZE);
+ (u8)IW_ESSID_MAX_SIZE);
memcpy(network->ssid, info_element->data,
network->ssid_len);
if (network->ssid_len < IW_ESSID_MAX_SIZE)
@@ -2721,7 +2721,7 @@ static void rtllib_rx_mgt(struct rtllib_device *ieee,
if (ieee->sta_sleep || (ieee->ps != RTLLIB_PS_DISABLED &&
ieee->iw_mode == IW_MODE_INFRA &&
ieee->state == RTLLIB_LINKED))
- tasklet_schedule(&ieee->ps_task);
+ schedule_work(&ieee->ps_task);
break;
diff --git a/drivers/staging/rtl8192e/rtllib_softmac.c b/drivers/staging/rtl8192e/rtllib_softmac.c
index 4b6c2295a3cf..b5f4d35954a9 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac.c
@@ -202,7 +202,7 @@ inline void softmac_mgmt_xmit(struct sk_buff *skb, struct rtllib_device *ieee)
unsigned long flags;
short single = ieee->softmac_features & IEEE_SOFTMAC_SINGLE_QUEUE;
struct rtllib_hdr_3addr *header =
- (struct rtllib_hdr_3addr *) skb->data;
+ (struct rtllib_hdr_3addr *)skb->data;
struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + 8);
@@ -279,7 +279,7 @@ softmac_ps_mgmt_xmit(struct sk_buff *skb,
{
short single = ieee->softmac_features & IEEE_SOFTMAC_SINGLE_QUEUE;
struct rtllib_hdr_3addr *header =
- (struct rtllib_hdr_3addr *) skb->data;
+ (struct rtllib_hdr_3addr *)skb->data;
u16 fc, type, stype;
struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + 8);
@@ -651,9 +651,9 @@ static void rtllib_beacons_stop(struct rtllib_device *ieee)
spin_lock_irqsave(&ieee->beacon_lock, flags);
ieee->beacon_txing = 0;
- del_timer_sync(&ieee->beacon_timer);
spin_unlock_irqrestore(&ieee->beacon_lock, flags);
+ del_timer_sync(&ieee->beacon_timer);
}
@@ -856,9 +856,9 @@ static struct sk_buff *rtllib_probe_resp(struct rtllib_device *ieee,
encrypt = ieee->host_encrypt && crypt && crypt->ops &&
((strcmp(crypt->ops->name, "R-WEP") == 0 || wpa_ie_len));
if (ieee->pHTInfo->bCurrentHTSupport) {
- tmp_ht_cap_buf = (u8 *) &(ieee->pHTInfo->SelfHTCap);
+ tmp_ht_cap_buf = (u8 *)&(ieee->pHTInfo->SelfHTCap);
tmp_ht_cap_len = sizeof(ieee->pHTInfo->SelfHTCap);
- tmp_ht_info_buf = (u8 *) &(ieee->pHTInfo->SelfHTInfo);
+ tmp_ht_info_buf = (u8 *)&(ieee->pHTInfo->SelfHTInfo);
tmp_ht_info_len = sizeof(ieee->pHTInfo->SelfHTInfo);
HTConstructCapabilityElement(ieee, tmp_ht_cap_buf,
&tmp_ht_cap_len, encrypt, false);
@@ -912,7 +912,7 @@ static struct sk_buff *rtllib_probe_resp(struct rtllib_device *ieee,
beacon_buf->info_element[0].id = MFIE_TYPE_SSID;
beacon_buf->info_element[0].len = ssid_len;
- tag = (u8 *) beacon_buf->info_element[0].data;
+ tag = (u8 *)beacon_buf->info_element[0].data;
memcpy(tag, ssid, ssid_len);
@@ -1303,7 +1303,7 @@ rtllib_association_req(struct rtllib_network *beacon,
0x00};
struct octet_string osCcxRmCap;
- osCcxRmCap.Octet = (u8 *) CcxRmCapBuf;
+ osCcxRmCap.Octet = (u8 *)CcxRmCapBuf;
osCcxRmCap.Length = sizeof(CcxRmCapBuf);
tag = skb_put(skb, ccxrm_ie_len);
*tag++ = MFIE_TYPE_GENERIC;
@@ -1764,7 +1764,7 @@ static void rtllib_softmac_check_all_nets(struct rtllib_device *ieee)
spin_unlock_irqrestore(&ieee->lock, flags);
}
-static inline u16 auth_parse(struct net_device *dev, struct sk_buff *skb,
+static inline int auth_parse(struct net_device *dev, struct sk_buff *skb,
u8 **challenge, int *chlen)
{
struct rtllib_authentication *a;
@@ -1773,10 +1773,10 @@ static inline u16 auth_parse(struct net_device *dev, struct sk_buff *skb,
if (skb->len < (sizeof(struct rtllib_authentication) -
sizeof(struct rtllib_info_element))) {
netdev_dbg(dev, "invalid len in auth resp: %d\n", skb->len);
- return 0xcafe;
+ return -EINVAL;
}
*challenge = NULL;
- a = (struct rtllib_authentication *) skb->data;
+ a = (struct rtllib_authentication *)skb->data;
if (skb->len > (sizeof(struct rtllib_authentication) + 3)) {
t = skb->data + sizeof(struct rtllib_authentication);
@@ -1787,7 +1787,13 @@ static inline u16 auth_parse(struct net_device *dev, struct sk_buff *skb,
return -ENOMEM;
}
}
- return le16_to_cpu(a->status);
+
+ if (a->status) {
+ netdev_dbg(dev, "auth_parse() failed\n");
+ return -EINVAL;
+ }
+
+ return 0;
}
static int auth_rq_parse(struct net_device *dev, struct sk_buff *skb, u8 *dest)
@@ -1799,7 +1805,7 @@ static int auth_rq_parse(struct net_device *dev, struct sk_buff *skb, u8 *dest)
netdev_dbg(dev, "invalid len in auth request: %d\n", skb->len);
return -1;
}
- a = (struct rtllib_authentication *) skb->data;
+ a = (struct rtllib_authentication *)skb->data;
ether_addr_copy(dest, a->header.addr2);
@@ -1817,7 +1823,7 @@ static short probe_rq_parse(struct rtllib_device *ieee, struct sk_buff *skb,
u8 *ssid = NULL;
u8 ssidlen = 0;
struct rtllib_hdr_3addr *header =
- (struct rtllib_hdr_3addr *) skb->data;
+ (struct rtllib_hdr_3addr *)skb->data;
bool bssid_match;
if (skb->len < sizeof(struct rtllib_hdr_3addr))
@@ -1865,7 +1871,7 @@ static int assoc_rq_parse(struct net_device *dev, struct sk_buff *skb, u8 *dest)
return -1;
}
- a = (struct rtllib_assoc_request_frame *) skb->data;
+ a = (struct rtllib_assoc_request_frame *)skb->data;
ether_addr_copy(dest, a->header.addr2);
@@ -1884,7 +1890,7 @@ static inline u16 assoc_parse(struct rtllib_device *ieee, struct sk_buff *skb,
return 0xcafe;
}
- response_head = (struct rtllib_assoc_response_frame *) skb->data;
+ response_head = (struct rtllib_assoc_response_frame *)skb->data;
*aid = le16_to_cpu(response_head->aid) & 0x3fff;
status_code = le16_to_cpu(response_head->status);
@@ -2042,13 +2048,15 @@ static short rtllib_sta_ps_sleep(struct rtllib_device *ieee, u64 *time)
}
-static inline void rtllib_sta_ps(struct tasklet_struct *t)
+static inline void rtllib_sta_ps(struct work_struct *work)
{
- struct rtllib_device *ieee = from_tasklet(ieee, t, ps_task);
+ struct rtllib_device *ieee;
u64 time;
short sleep;
unsigned long flags, flags2;
+ ieee = container_of(work, struct rtllib_device, ps_task);
+
spin_lock_irqsave(&ieee->lock, flags);
if ((ieee->ps == RTLLIB_PS_DISABLED ||
@@ -2167,7 +2175,7 @@ EXPORT_SYMBOL(rtllib_ps_tx_ack);
static void rtllib_process_action(struct rtllib_device *ieee,
struct sk_buff *skb)
{
- struct rtllib_hdr_3addr *header = (struct rtllib_hdr_3addr *) skb->data;
+ struct rtllib_hdr_3addr *header = (struct rtllib_hdr_3addr *)skb->data;
u8 *act = rtllib_get_payload((struct rtllib_hdr *)header);
u8 category = 0;
@@ -2206,7 +2214,7 @@ rtllib_rx_assoc_resp(struct rtllib_device *ieee, struct sk_buff *skb,
int aid;
u8 *ies;
struct rtllib_assoc_response_frame *assoc_resp;
- struct rtllib_hdr_3addr *header = (struct rtllib_hdr_3addr *) skb->data;
+ struct rtllib_hdr_3addr *header = (struct rtllib_hdr_3addr *)skb->data;
u16 frame_ctl = le16_to_cpu(header->frame_ctl);
netdev_dbg(ieee->dev, "received [RE]ASSOCIATION RESPONSE (%d)\n",
@@ -2278,7 +2286,7 @@ rtllib_rx_assoc_resp(struct rtllib_device *ieee, struct sk_buff *skb,
static void rtllib_rx_auth_resp(struct rtllib_device *ieee, struct sk_buff *skb)
{
- u16 errcode;
+ int errcode;
u8 *challenge;
int chlen = 0;
bool bSupportNmode = true, bHalfSupportNmode = false;
@@ -2288,8 +2296,7 @@ static void rtllib_rx_auth_resp(struct rtllib_device *ieee, struct sk_buff *skb)
if (errcode) {
ieee->softmac_stats.rx_auth_rs_err++;
netdev_info(ieee->dev,
- "Authentication response status code 0x%x",
- errcode);
+ "Authentication response status code %d", errcode);
rtllib_associate_abort(ieee);
return;
}
@@ -2351,7 +2358,7 @@ rtllib_rx_auth(struct rtllib_device *ieee, struct sk_buff *skb,
static inline int
rtllib_rx_deauth(struct rtllib_device *ieee, struct sk_buff *skb)
{
- struct rtllib_hdr_3addr *header = (struct rtllib_hdr_3addr *) skb->data;
+ struct rtllib_hdr_3addr *header = (struct rtllib_hdr_3addr *)skb->data;
u16 frame_ctl;
if (memcmp(header->addr3, ieee->current_network.bssid, ETH_ALEN) != 0)
@@ -2391,7 +2398,7 @@ inline int rtllib_rx_frame_softmac(struct rtllib_device *ieee,
struct rtllib_rx_stats *rx_stats, u16 type,
u16 stype)
{
- struct rtllib_hdr_3addr *header = (struct rtllib_hdr_3addr *) skb->data;
+ struct rtllib_hdr_3addr *header = (struct rtllib_hdr_3addr *)skb->data;
u16 frame_ctl;
if (!ieee->proto_started)
@@ -2811,7 +2818,7 @@ static struct sk_buff *rtllib_get_beacon_(struct rtllib_device *ieee)
if (!skb)
return NULL;
- b = (struct rtllib_probe_response *) skb->data;
+ b = (struct rtllib_probe_response *)skb->data;
b->header.frame_ctl = cpu_to_le16(RTLLIB_STYPE_BEACON);
return skb;
@@ -2827,7 +2834,7 @@ struct sk_buff *rtllib_get_beacon(struct rtllib_device *ieee)
if (!skb)
return NULL;
- b = (struct rtllib_probe_response *) skb->data;
+ b = (struct rtllib_probe_response *)skb->data;
b->header.seq_ctl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
if (ieee->seq_ctrl[0] == 0xFFF)
@@ -3028,7 +3035,7 @@ int rtllib_softmac_init(struct rtllib_device *ieee)
spin_lock_init(&ieee->mgmt_tx_lock);
spin_lock_init(&ieee->beacon_lock);
- tasklet_setup(&ieee->ps_task, rtllib_sta_ps);
+ INIT_WORK(&ieee->ps_task, rtllib_sta_ps);
return 0;
}
@@ -3050,8 +3057,8 @@ void rtllib_softmac_free(struct rtllib_device *ieee)
cancel_work_sync(&ieee->associate_complete_wq);
cancel_work_sync(&ieee->ips_leave_wq);
cancel_work_sync(&ieee->wx_sync_scan_wq);
+ cancel_work_sync(&ieee->ps_task);
mutex_unlock(&ieee->wx_mutex);
- tasklet_kill(&ieee->ps_task);
}
static inline struct sk_buff *
diff --git a/drivers/staging/rtl8192e/rtllib_softmac_wx.c b/drivers/staging/rtl8192e/rtllib_softmac_wx.c
index 57a6d1130b6a..70a62ca0f69a 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac_wx.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac_wx.c
@@ -41,8 +41,8 @@ int rtllib_wx_set_freq(struct rtllib_device *ieee, struct iw_request_info *a,
/* if setting by freq convert to channel */
if (fwrq->e == 1) {
- if ((fwrq->m >= (int) 2.412e8 &&
- fwrq->m <= (int) 2.487e8)) {
+ if ((fwrq->m >= (int)2.412e8 &&
+ fwrq->m <= (int)2.487e8)) {
int f = fwrq->m / 100000;
int c = 0;
diff --git a/drivers/staging/rtl8192e/rtllib_wx.c b/drivers/staging/rtl8192e/rtllib_wx.c
index 0d67d5880377..cf9a240924f2 100644
--- a/drivers/staging/rtl8192e/rtllib_wx.c
+++ b/drivers/staging/rtl8192e/rtllib_wx.c
@@ -660,7 +660,7 @@ int rtllib_wx_set_mlme(struct rtllib_device *ieee,
{
u8 i = 0;
bool deauth = false;
- struct iw_mlme *mlme = (struct iw_mlme *) extra;
+ struct iw_mlme *mlme = (struct iw_mlme *)extra;
if (ieee->state != RTLLIB_LINKED)
return -ENOLINK;
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211.h b/drivers/staging/rtl8192u/ieee80211/ieee80211.h
index 68c0bf9a191a..b577f9c81f85 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211.h
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211.h
@@ -1790,7 +1790,7 @@ struct ieee80211_device {
short sta_sleep;
int ps_timeout;
int ps_period;
- struct tasklet_struct ps_task;
+ struct work_struct ps_task;
u32 ps_th;
u32 ps_tl;
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c
index 101c28265e91..f17d07dad56d 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c
@@ -362,7 +362,7 @@ static int ieee80211_ccmp_get_key(void *key, int len, u8 *seq, void *priv)
struct ieee80211_ccmp_data *data = priv;
if (len < CCMP_TK_LEN)
- return -1;
+ return 0;
if (!data->key_set)
return 0;
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
index 689d8843f538..7b120b8cb982 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
@@ -637,7 +637,7 @@ static int ieee80211_tkip_get_key(void *key, int len, u8 *seq, void *priv)
struct ieee80211_tkip_data *tkey = priv;
if (len < TKIP_KEY_LEN)
- return -1;
+ return 0;
if (!tkey->key_set)
return 0;
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c
index 8a51ea1dd6e5..a2cdf3bfd1a4 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c
@@ -201,7 +201,7 @@ static int prism2_wep_get_key(void *key, int len, u8 *seq, void *priv)
struct prism2_wep_data *wep = priv;
if (len < wep->key_len)
- return -1;
+ return 0;
memcpy(key, wep->key, wep->key_len);
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
index 1a43979939a8..92001cb36730 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
@@ -528,9 +528,9 @@ static void ieee80211_beacons_stop(struct ieee80211_device *ieee)
spin_lock_irqsave(&ieee->beacon_lock, flags);
ieee->beacon_txing = 0;
- del_timer_sync(&ieee->beacon_timer);
spin_unlock_irqrestore(&ieee->beacon_lock, flags);
+ del_timer_sync(&ieee->beacon_timer);
}
void ieee80211_stop_send_beacons(struct ieee80211_device *ieee)
@@ -1461,13 +1461,13 @@ void ieee80211_softmac_check_all_nets(struct ieee80211_device *ieee)
spin_unlock_irqrestore(&ieee->lock, flags);
}
-static inline u16 auth_parse(struct sk_buff *skb, u8 **challenge, int *chlen)
+static inline int auth_parse(struct sk_buff *skb, u8 **challenge, int *chlen)
{
struct ieee80211_authentication *a;
u8 *t;
if (skb->len < (sizeof(struct ieee80211_authentication) - sizeof(struct ieee80211_info_element))) {
IEEE80211_DEBUG_MGMT("invalid len in auth resp: %d\n", skb->len);
- return 0xcafe;
+ return -EINVAL;
}
*challenge = NULL;
a = (struct ieee80211_authentication *)skb->data;
@@ -1482,7 +1482,12 @@ static inline u16 auth_parse(struct sk_buff *skb, u8 **challenge, int *chlen)
}
}
- return le16_to_cpu(a->status);
+ if (a->status) {
+ IEEE80211_DEBUG_MGMT("auth_parse() failed\n");
+ return -EINVAL;
+ }
+
+ return 0;
}
static int auth_rq_parse(struct sk_buff *skb, u8 *dest)
@@ -1687,14 +1692,15 @@ static short ieee80211_sta_ps_sleep(struct ieee80211_device *ieee, u32 *time_h,
return 1;
}
-static inline void ieee80211_sta_ps(struct tasklet_struct *t)
+static inline void ieee80211_sta_ps(struct work_struct *work)
{
- struct ieee80211_device *ieee = from_tasklet(ieee, t, ps_task);
+ struct ieee80211_device *ieee;
u32 th, tl;
short sleep;
-
unsigned long flags, flags2;
+ ieee = container_of(work, struct ieee80211_device, ps_task);
+
spin_lock_irqsave(&ieee->lock, flags);
if ((ieee->ps == IEEE80211_PS_DISABLED ||
@@ -1826,7 +1832,7 @@ static void ieee80211_check_auth_response(struct ieee80211_device *ieee,
{
/* default support N mode, disable halfNmode */
bool bSupportNmode = true, bHalfSupportNmode = false;
- u16 errcode;
+ int errcode;
u8 *challenge;
int chlen = 0;
u32 iotAction;
@@ -1875,7 +1881,7 @@ static void ieee80211_check_auth_response(struct ieee80211_device *ieee,
}
} else {
ieee->softmac_stats.rx_auth_rs_err++;
- IEEE80211_DEBUG_MGMT("Auth response status code 0x%x", errcode);
+ IEEE80211_DEBUG_MGMT("Auth response status code %d\n", errcode);
ieee80211_associate_abort(ieee);
}
}
@@ -1897,7 +1903,7 @@ ieee80211_rx_frame_softmac(struct ieee80211_device *ieee, struct sk_buff *skb,
if (ieee->sta_sleep || (ieee->ps != IEEE80211_PS_DISABLED &&
ieee->iw_mode == IW_MODE_INFRA &&
ieee->state == IEEE80211_LINKED))
- tasklet_schedule(&ieee->ps_task);
+ schedule_work(&ieee->ps_task);
if (WLAN_FC_GET_STYPE(header->frame_ctl) != IEEE80211_STYPE_PROBE_RESP &&
WLAN_FC_GET_STYPE(header->frame_ctl) != IEEE80211_STYPE_BEACON)
@@ -2602,7 +2608,7 @@ void ieee80211_softmac_init(struct ieee80211_device *ieee)
spin_lock_init(&ieee->mgmt_tx_lock);
spin_lock_init(&ieee->beacon_lock);
- tasklet_setup(&ieee->ps_task, ieee80211_sta_ps);
+ INIT_WORK(&ieee->ps_task, ieee80211_sta_ps);
}
void ieee80211_softmac_free(struct ieee80211_device *ieee)
@@ -2613,7 +2619,7 @@ void ieee80211_softmac_free(struct ieee80211_device *ieee)
del_timer_sync(&ieee->associate_timer);
cancel_delayed_work(&ieee->associate_retry_wq);
-
+ cancel_work_sync(&ieee->ps_task);
mutex_unlock(&ieee->wx_mutex);
}
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c
index 78cc8f357bbc..d6829cf6f7e3 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c
@@ -470,7 +470,9 @@ int ieee80211_wx_get_encode(struct ieee80211_device *ieee,
return 0;
}
len = crypt->ops->get_key(keybuf, SCM_KEY_LEN, NULL, crypt->priv);
- erq->length = (len >= 0 ? len : 0);
+ if (len < 0)
+ len = 0;
+ erq->length = len;
erq->flags |= IW_ENCODE_ENABLED;
@@ -686,9 +688,9 @@ int ieee80211_wx_get_encode_ext(struct ieee80211_device *ieee,
} else {
if (strcmp(crypt->ops->name, "WEP") == 0)
ext->alg = IW_ENCODE_ALG_WEP;
- else if (strcmp(crypt->ops->name, "TKIP"))
+ else if (strcmp(crypt->ops->name, "TKIP") == 0)
ext->alg = IW_ENCODE_ALG_TKIP;
- else if (strcmp(crypt->ops->name, "CCMP"))
+ else if (strcmp(crypt->ops->name, "CCMP") == 0)
ext->alg = IW_ENCODE_ALG_CCMP;
else
return -EINVAL;
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c b/drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c
index dba3f2db9f48..a93f09033d9d 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c
@@ -480,7 +480,7 @@ void HTConstructCapabilityElement(struct ieee80211_device *ieee, u8 *posHTCap, u
}
memset(posHTCap, 0, *len);
if (pHT->ePeerHTSpecVer == HT_SPEC_VER_EWC) {
- u8 EWC11NHTCap[] = {0x00, 0x90, 0x4c, 0x33}; // For 11n EWC definition, 2007.07.17, by Emily
+ static const u8 EWC11NHTCap[] = {0x00, 0x90, 0x4c, 0x33};
memcpy(posHTCap, EWC11NHTCap, sizeof(EWC11NHTCap));
pCapELE = (struct ht_capability_ele *)&posHTCap[4];
@@ -940,10 +940,8 @@ void HTOnAssocRsp(struct ieee80211_device *ieee)
else
pHTInfo->CurrentAMPDUFactor = HT_AGG_SIZE_64K;
} else {
- if (pPeerHTCap->MaxRxAMPDUFactor < HT_AGG_SIZE_32K)
- pHTInfo->CurrentAMPDUFactor = pPeerHTCap->MaxRxAMPDUFactor;
- else
- pHTInfo->CurrentAMPDUFactor = HT_AGG_SIZE_32K;
+ pHTInfo->CurrentAMPDUFactor = min_t(u32, pPeerHTCap->MaxRxAMPDUFactor,
+ HT_AGG_SIZE_32K);
}
}
@@ -951,10 +949,9 @@ void HTOnAssocRsp(struct ieee80211_device *ieee)
* <2> Set AMPDU Minimum MPDU Start Spacing
* 802.11n 3.0 section 9.7d.3
*/
- if (pHTInfo->MPDU_Density > pPeerHTCap->MPDUDensity)
- pHTInfo->CurrentMPDUDensity = pHTInfo->MPDU_Density;
- else
- pHTInfo->CurrentMPDUDensity = pPeerHTCap->MPDUDensity;
+ pHTInfo->CurrentMPDUDensity = max_t(u32, pHTInfo->MPDU_Density,
+ pPeerHTCap->MPDUDensity);
+
if (ieee->pairwise_key_type != KEY_TYPE_NA)
pHTInfo->CurrentMPDUDensity = 7; // 8us
// Force TX AMSDU
diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
index ce807c9d4219..2ca925f35830 100644
--- a/drivers/staging/rtl8192u/r8192U_core.c
+++ b/drivers/staging/rtl8192u/r8192U_core.c
@@ -2537,7 +2537,7 @@ static short rtl8192_init(struct net_device *dev)
}
#else
{
- const u8 queuetopipe[] = {3, 2, 1, 0, 4, 4, 0, 4, 4};
+ static const u8 queuetopipe[] = {3, 2, 1, 0, 4, 4, 0, 4, 4};
memcpy(priv->txqueue_to_outpipemap, queuetopipe, 9);
}
diff --git a/drivers/staging/rtl8712/drv_types.h b/drivers/staging/rtl8712/drv_types.h
index a44d04effc8b..76ac798642bd 100644
--- a/drivers/staging/rtl8712/drv_types.h
+++ b/drivers/staging/rtl8712/drv_types.h
@@ -157,12 +157,11 @@ struct _adapter {
struct iw_statistics iwstats;
int pid; /*process id from UI*/
struct work_struct wk_filter_rx_ff0;
- u8 blnEnableRxFF0Filter;
- spinlock_t lock_rx_ff0_filter;
const struct firmware *fw;
struct usb_interface *pusb_intf;
struct mutex mutex_start;
struct completion rtl8712_fw_ready;
+ struct completion rx_filter_ready;
};
static inline u8 *myid(struct eeprom_priv *peepriv)
diff --git a/drivers/staging/rtl8712/ieee80211.c b/drivers/staging/rtl8712/ieee80211.c
index f926809b1021..7d8f1a29d18a 100644
--- a/drivers/staging/rtl8712/ieee80211.c
+++ b/drivers/staging/rtl8712/ieee80211.c
@@ -162,13 +162,13 @@ int r8712_generate_ie(struct registry_priv *registrypriv)
uint sz = 0;
struct wlan_bssid_ex *dev_network = &registrypriv->dev_network;
u8 *ie = dev_network->IEs;
- u16 beaconPeriod = (u16)dev_network->Configuration.BeaconPeriod;
+ u16 beacon_period = (u16)dev_network->Configuration.BeaconPeriod;
/*timestamp will be inserted by hardware*/
sz += 8;
ie += sz;
/*beacon interval : 2bytes*/
- *(__le16 *)ie = cpu_to_le16(beaconPeriod);
+ *(__le16 *)ie = cpu_to_le16(beacon_period);
sz += 2;
ie += 2;
/*capability info*/
diff --git a/drivers/staging/rtl8712/os_intfs.c b/drivers/staging/rtl8712/os_intfs.c
index d15d52c0d1a7..003e97205124 100644
--- a/drivers/staging/rtl8712/os_intfs.c
+++ b/drivers/staging/rtl8712/os_intfs.c
@@ -332,7 +332,6 @@ void r8712_free_drv_sw(struct _adapter *padapter)
r8712_free_evt_priv(&padapter->evtpriv);
r8712_DeInitSwLeds(padapter);
r8712_free_mlme_priv(&padapter->mlmepriv);
- r8712_free_io_queue(padapter);
_free_xmit_priv(&padapter->xmitpriv);
_r8712_free_sta_priv(&padapter->stapriv);
_r8712_free_recv_priv(&padapter->recvpriv);
diff --git a/drivers/staging/rtl8712/rtl8712_cmdctrl_bitdef.h b/drivers/staging/rtl8712/rtl8712_cmdctrl_bitdef.h
index e125c7222ab5..68bdec07f51e 100644
--- a/drivers/staging/rtl8712/rtl8712_cmdctrl_bitdef.h
+++ b/drivers/staging/rtl8712/rtl8712_cmdctrl_bitdef.h
@@ -91,6 +91,5 @@
#define _BCNSPACE_MSK 0x0FFF
#define _BCNSPACE_SHT 0
-
#endif /* __RTL8712_CMDCTRL_BITDEF_H__*/
diff --git a/drivers/staging/rtl8712/rtl8712_efuse.h b/drivers/staging/rtl8712/rtl8712_efuse.h
index 4969d307e978..2e1ea9d7a295 100644
--- a/drivers/staging/rtl8712/rtl8712_efuse.h
+++ b/drivers/staging/rtl8712/rtl8712_efuse.h
@@ -15,8 +15,8 @@
#define GET_EFUSE_OFFSET(header) ((header & 0xF0) >> 4)
#define GET_EFUSE_WORD_EN(header) (header & 0x0F)
-#define MAKE_EFUSE_HEADER(offset, word_en) (((offset & 0x0F) << 4) | \
- (word_en & 0x0F))
+#define MAKE_EFUSE_HEADER(offset, word_en) ((((offset) & 0x0F) << 4) | \
+ ((word_en) & 0x0F))
/*--------------------------------------------------------------------------*/
struct PGPKT_STRUCT {
u8 offset;
diff --git a/drivers/staging/rtl8712/rtl8712_macsetting_bitdef.h b/drivers/staging/rtl8712/rtl8712_macsetting_bitdef.h
index 3d9f40fa8469..46d758d3f3a4 100644
--- a/drivers/staging/rtl8712/rtl8712_macsetting_bitdef.h
+++ b/drivers/staging/rtl8712/rtl8712_macsetting_bitdef.h
@@ -7,7 +7,6 @@
#ifndef __RTL8712_MACSETTING_BITDEF_H__
#define __RTL8712_MACSETTING_BITDEF_H__
-
/*MACID*/
/*BSSID*/
@@ -28,7 +27,5 @@
/*BUILDUSER*/
-
-
#endif /* __RTL8712_MACSETTING_BITDEF_H__*/
diff --git a/drivers/staging/rtl8712/rtl8712_macsetting_regdef.h b/drivers/staging/rtl8712/rtl8712_macsetting_regdef.h
index e8cb2eee9294..64740d99c252 100644
--- a/drivers/staging/rtl8712/rtl8712_macsetting_regdef.h
+++ b/drivers/staging/rtl8712/rtl8712_macsetting_regdef.h
@@ -16,7 +16,5 @@
#define BUILDTIME (RTL8712_MACIDSETTING_ + 0x0024)
#define BUILDUSER (RTL8712_MACIDSETTING_ + 0x0028)
-
-
#endif /*__RTL8712_MACSETTING_REGDEF_H__*/
diff --git a/drivers/staging/rtl8712/rtl8712_ratectrl_regdef.h b/drivers/staging/rtl8712/rtl8712_ratectrl_regdef.h
index a3eaee0e1b69..9ed5653f3f7f 100644
--- a/drivers/staging/rtl8712/rtl8712_ratectrl_regdef.h
+++ b/drivers/staging/rtl8712/rtl8712_ratectrl_regdef.h
@@ -39,6 +39,5 @@
#define MCS_TXAGC7 (RTL8712_RATECTRL_ + 0x67)
#define CCK_TXAGC (RTL8712_RATECTRL_ + 0x68)
-
#endif /*__RTL8712_RATECTRL_REGDEF_H__*/
diff --git a/drivers/staging/rtl8712/rtl8712_recv.c b/drivers/staging/rtl8712/rtl8712_recv.c
index 0ffb30f1af7e..7f1fdd058551 100644
--- a/drivers/staging/rtl8712/rtl8712_recv.c
+++ b/drivers/staging/rtl8712/rtl8712_recv.c
@@ -56,7 +56,7 @@ void r8712_init_recv_priv(struct recv_priv *precvpriv,
precvbuf->ref_cnt = 0;
precvbuf->adapter = padapter;
list_add_tail(&precvbuf->list,
- &(precvpriv->free_recv_buf_queue.queue));
+ &precvpriv->free_recv_buf_queue.queue);
precvbuf++;
}
precvpriv->free_recv_buf_queue_cnt = NR_RECVBUFF;
@@ -123,8 +123,8 @@ void r8712_free_recvframe(union recv_frame *precvframe,
precvframe->u.hdr.pkt = NULL;
}
spin_lock_irqsave(&pfree_recv_queue->lock, irqL);
- list_del_init(&(precvframe->u.hdr.list));
- list_add_tail(&(precvframe->u.hdr.list), &pfree_recv_queue->queue);
+ list_del_init(&precvframe->u.hdr.list);
+ list_add_tail(&precvframe->u.hdr.list, &pfree_recv_queue->queue);
if (padapter) {
if (pfree_recv_queue == &precvpriv->free_recv_queue)
precvpriv->free_recvframe_cnt++;
@@ -319,7 +319,7 @@ static void amsdu_to_msdu(struct _adapter *padapter, union recv_frame *prframe)
struct rx_pkt_attrib *pattrib;
_pkt *sub_skb, *subframes[MAX_SUBFRAME_COUNT];
struct recv_priv *precvpriv = &padapter->recvpriv;
- struct __queue *pfree_recv_queue = &(precvpriv->free_recv_queue);
+ struct __queue *pfree_recv_queue = &precvpriv->free_recv_queue;
nr_subframes = 0;
pattrib = &prframe->u.hdr.attrib;
@@ -485,8 +485,8 @@ static int enqueue_reorder_recvframe(struct recv_reorder_ctrl *preorder_ctrl,
else
break;
}
- list_del_init(&(prframe->u.hdr.list));
- list_add_tail(&(prframe->u.hdr.list), plist);
+ list_del_init(&prframe->u.hdr.list);
+ list_add_tail(&prframe->u.hdr.list, plist);
return true;
}
@@ -520,7 +520,7 @@ int r8712_recv_indicatepkts_in_order(struct _adapter *padapter,
pattrib = &prframe->u.hdr.attrib;
if (!SN_LESS(preorder_ctrl->indicate_seq, pattrib->seq_num)) {
plist = plist->next;
- list_del_init(&(prframe->u.hdr.list));
+ list_del_init(&prframe->u.hdr.list);
if (SN_EQUAL(preorder_ctrl->indicate_seq,
pattrib->seq_num))
preorder_ctrl->indicate_seq =
@@ -980,7 +980,7 @@ static void recvbuf2recvframe(struct _adapter *padapter, struct sk_buff *pskb)
union recv_frame *precvframe = NULL;
struct recv_priv *precvpriv = &padapter->recvpriv;
- pfree_recv_queue = &(precvpriv->free_recv_queue);
+ pfree_recv_queue = &precvpriv->free_recv_queue;
pbuf = pskb->data;
prxstat = (struct recv_stat *)pbuf;
pkt_cnt = (le32_to_cpu(prxstat->rxdw2) >> 16) & 0xff;
diff --git a/drivers/staging/rtl8712/rtl8712_security_bitdef.h b/drivers/staging/rtl8712/rtl8712_security_bitdef.h
index 1c26a7eca64a..44275ef455a0 100644
--- a/drivers/staging/rtl8712/rtl8712_security_bitdef.h
+++ b/drivers/staging/rtl8712/rtl8712_security_bitdef.h
@@ -30,6 +30,5 @@
#define _RXUSEDK BIT(1)
#define _TXUSEDK BIT(0)
-
#endif /*__RTL8712_SECURITY_BITDEF_H__*/
diff --git a/drivers/staging/rtl8712/rtl8712_spec.h b/drivers/staging/rtl8712/rtl8712_spec.h
index c0bab4c49ae9..613a410e5714 100644
--- a/drivers/staging/rtl8712/rtl8712_spec.h
+++ b/drivers/staging/rtl8712/rtl8712_spec.h
@@ -30,7 +30,6 @@
#define RTL8712_IOBASE_FF 0x10300000 /*IOBASE_FIFO 0x1031000~0x103AFFFF*/
-
/*IOREG Offset for 8712*/
#define RTL8712_SYSCFG_ RTL8712_IOBASE_IOREG
#define RTL8712_CMDCTRL_ (RTL8712_IOBASE_IOREG + 0x40)
@@ -47,7 +46,6 @@
#define RTL8712_DEBUGCTRL_ (RTL8712_IOBASE_IOREG + 0x310)
#define RTL8712_OFFLOAD_ (RTL8712_IOBASE_IOREG + 0x2D0)
-
/*FIFO for 8712*/
#define RTL8712_DMA_BCNQ (RTL8712_IOBASE_FF + 0x10000)
#define RTL8712_DMA_MGTQ (RTL8712_IOBASE_FF + 0x20000)
@@ -60,7 +58,6 @@
#define RTL8712_DMA_H2CCMD (RTL8712_IOBASE_FF + 0x90000)
#define RTL8712_DMA_C2HCMD (RTL8712_IOBASE_FF + 0xA0000)
-
/*------------------------------*/
/*BIT 16 15*/
diff --git a/drivers/staging/rtl8712/rtl8712_syscfg_bitdef.h b/drivers/staging/rtl8712/rtl8712_syscfg_bitdef.h
index a328ca9b340c..d92df3fbd2b1 100644
--- a/drivers/staging/rtl8712/rtl8712_syscfg_bitdef.h
+++ b/drivers/staging/rtl8712/rtl8712_syscfg_bitdef.h
@@ -117,20 +117,17 @@
* Block's Bandgap.
*/
-
/*--------------------------------------------------------------------------*/
/* SPS1_CTRL bits (Offset 0x18-1E, 56bits)*/
/*--------------------------------------------------------------------------*/
#define SPS1_SWEN BIT(1) /* Enable vsps18 SW Macro Block.*/
#define SPS1_LDEN BIT(0) /* Enable VSPS12 LDO Macro block.*/
-
/*----------------------------------------------------------------------------*/
/* LDOA15_CTRL bits (Offset 0x20, 8bits)*/
/*----------------------------------------------------------------------------*/
#define LDA15_EN BIT(0) /* Enable LDOA15 Macro Block*/
-
/*----------------------------------------------------------------------------*/
/* 8192S LDOV12D_CTRL bit (Offset 0x21, 8bits)*/
/*----------------------------------------------------------------------------*/
@@ -140,7 +137,6 @@
/*CLK_PS_CTRL*/
#define _CLK_GATE_EN BIT(0)
-
/* EFUSE_CTRL*/
#define EF_FLAG BIT(31) /* Access Flag, Write:1;
* Read:0
diff --git a/drivers/staging/rtl8712/rtl8712_syscfg_regdef.h b/drivers/staging/rtl8712/rtl8712_syscfg_regdef.h
index e95eb5832ec4..da5efcdedabe 100644
--- a/drivers/staging/rtl8712/rtl8712_syscfg_regdef.h
+++ b/drivers/staging/rtl8712/rtl8712_syscfg_regdef.h
@@ -14,7 +14,6 @@
#ifndef __RTL8712_SYSCFG_REGDEF_H__
#define __RTL8712_SYSCFG_REGDEF_H__
-
#define SYS_ISO_CTRL (RTL8712_SYSCFG_ + 0x0000)
#define SYS_FUNC_EN (RTL8712_SYSCFG_ + 0x0002)
#define PMC_FSM (RTL8712_SYSCFG_ + 0x0004)
@@ -39,6 +38,5 @@
#define RCLK_MON (RTL8712_SYSCFG_ + 0x003E)
#define EFUSE_CLK_CTRL (RTL8712_SYSCFG_ + 0x02F8)
-
#endif /*__RTL8712_SYSCFG_REGDEF_H__*/
diff --git a/drivers/staging/rtl8712/rtl8712_timectrl_bitdef.h b/drivers/staging/rtl8712/rtl8712_timectrl_bitdef.h
index 1af5f1dd3c20..d7bc9dd5cecd 100644
--- a/drivers/staging/rtl8712/rtl8712_timectrl_bitdef.h
+++ b/drivers/staging/rtl8712/rtl8712_timectrl_bitdef.h
@@ -45,6 +45,5 @@
/*BCNERRTH*/
/*MLT*/
-
#endif /* __RTL8712_TIMECTRL_BITDEF_H__*/
diff --git a/drivers/staging/rtl8712/rtl8712_wmac_bitdef.h b/drivers/staging/rtl8712/rtl8712_wmac_bitdef.h
index d3b45c6cd855..ea164e482347 100644
--- a/drivers/staging/rtl8712/rtl8712_wmac_bitdef.h
+++ b/drivers/staging/rtl8712/rtl8712_wmac_bitdef.h
@@ -45,6 +45,5 @@
#define _RPT_CNT_MSK 0x000FFFFF
#define _RPT_CNT_SHT 0
-
#endif /*__RTL8712_WMAC_BITDEF_H__*/
diff --git a/drivers/staging/rtl8712/rtl871x_cmd.c b/drivers/staging/rtl8712/rtl871x_cmd.c
index acda930722b2..4be96df5a329 100644
--- a/drivers/staging/rtl8712/rtl871x_cmd.c
+++ b/drivers/staging/rtl8712/rtl871x_cmd.c
@@ -202,7 +202,7 @@ u8 r8712_sitesurvey_cmd(struct _adapter *padapter,
mod_timer(&pmlmepriv->scan_to_timer,
jiffies + msecs_to_jiffies(SCANNING_TIMEOUT));
padapter->ledpriv.LedControlHandler(padapter, LED_CTL_SITE_SURVEY);
- padapter->blnEnableRxFF0Filter = 0;
+ complete(&padapter->rx_filter_ready);
return _SUCCESS;
}
@@ -536,7 +536,7 @@ void r8712_setstakey_cmd(struct _adapter *padapter, u8 *psta, u8 unicast_key)
return;
}
init_h2fwcmd_w_parm_no_rsp(ph2c, psetstakey_para, _SetStaKey_CMD_);
- ph2c->rsp = (u8 *) psetstakey_rsp;
+ ph2c->rsp = (u8 *)psetstakey_rsp;
ph2c->rspsz = sizeof(struct set_stakey_rsp);
ether_addr_copy(psetstakey_para->addr, sta->hwaddr);
if (check_fwstate(pmlmepriv, WIFI_STATION_STATE))
diff --git a/drivers/staging/rtl8712/rtl871x_cmd.h b/drivers/staging/rtl8712/rtl871x_cmd.h
index 95e9ea5b2d98..8453d8de8248 100644
--- a/drivers/staging/rtl8712/rtl871x_cmd.h
+++ b/drivers/staging/rtl8712/rtl871x_cmd.h
@@ -66,7 +66,6 @@ struct evt_priv {
u8 *evt_buf; /*shall be non-paged, and 4 bytes aligned*/
u8 *evt_allocated_buf;
u32 evt_done_cnt;
- struct tasklet_struct event_tasklet;
};
#define init_h2fwcmd_w_parm_no_rsp(pcmd, pparm, code) \
@@ -316,7 +315,6 @@ enum _RT_CHANNEL_DOMAIN {
RT_CHANNEL_DOMAIN_MAX,
};
-
struct SetChannelPlan_param {
enum _RT_CHANNEL_DOMAIN ChannelPlan;
};
@@ -338,7 +336,6 @@ struct getdatarate_rsp {
u8 datarates[NumRates];
};
-
/*
* Caller Mode: Any
* AP: AP can use the info for the contents of beacon frame
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl.h b/drivers/staging/rtl8712/rtl871x_ioctl.h
index 634e67461712..d6332a8c7f4f 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl.h
+++ b/drivers/staging/rtl8712/rtl871x_ioctl.h
@@ -13,7 +13,6 @@
#define OID_802_11_PMKID 0x0d010123
#endif
-
/* For DDK-defined OIDs*/
#define OID_NDIS_SEG1 0x00010100
#define OID_NDIS_SEG2 0x00010200
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
index 3b6926613257..36f6904d25ab 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
+++ b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
@@ -82,9 +82,9 @@ static inline void handle_pairwise_key(struct sta_info *psta,
(param->u.crypt. key_len > 16 ? 16 : param->u.crypt.key_len));
if (strcmp(param->u.crypt.alg, "TKIP") == 0) { /* set mic key */
memcpy(psta->tkiptxmickey. skey,
- &(param->u.crypt.key[16]), 8);
+ &param->u.crypt.key[16], 8);
memcpy(psta->tkiprxmickey. skey,
- &(param->u.crypt.key[24]), 8);
+ &param->u.crypt.key[24], 8);
padapter->securitypriv. busetkipkey = false;
mod_timer(&padapter->securitypriv.tkip_timer,
jiffies + msecs_to_jiffies(50));
@@ -600,7 +600,7 @@ static int r8711_wx_get_name(struct net_device *dev,
u32 ht_ielen = 0;
char *p;
u8 ht_cap = false;
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
u8 *prates;
@@ -659,8 +659,8 @@ static int r8711_wx_set_freq(struct net_device *dev,
/* If setting by frequency, convert to a channel */
if ((fwrq->e == 1) &&
- (fwrq->m >= (int) 2.412e8) &&
- (fwrq->m <= (int) 2.487e8)) {
+ (fwrq->m >= 241200000) &&
+ (fwrq->m <= 248700000)) {
int f = fwrq->m / 100000;
int c = 0;
@@ -1494,7 +1494,7 @@ static int r8711_wx_set_enc(struct net_device *dev,
u32 keyindex_provided;
struct NDIS_802_11_WEP wep;
enum NDIS_802_11_AUTHENTICATION_MODE authmode;
- struct iw_point *erq = &(wrqu->encoding);
+ struct iw_point *erq = &wrqu->encoding;
struct _adapter *padapter = netdev_priv(dev);
key = erq->flags & IW_ENCODE_INDEX;
@@ -1589,8 +1589,8 @@ static int r8711_wx_get_enc(struct net_device *dev,
{
uint key;
struct _adapter *padapter = netdev_priv(dev);
- struct iw_point *erq = &(wrqu->encoding);
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct iw_point *erq = &wrqu->encoding;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
union Keytype *dk = padapter->securitypriv.DefKey;
if (!check_fwstate(pmlmepriv, _FW_LINKED)) {
@@ -1670,7 +1670,7 @@ static int r871x_wx_set_auth(struct net_device *dev,
union iwreq_data *wrqu, char *extra)
{
struct _adapter *padapter = netdev_priv(dev);
- struct iw_param *param = (struct iw_param *)&(wrqu->param);
+ struct iw_param *param = (struct iw_param *)&wrqu->param;
int paramid;
int paramval;
int ret = 0;
@@ -1964,7 +1964,7 @@ static int r871x_get_ap_info(struct net_device *dev,
return -EINVAL;
data[32] = 0;
- spin_lock_irqsave(&(pmlmepriv->scanned_queue.lock), irqL);
+ spin_lock_irqsave(&pmlmepriv->scanned_queue.lock, irqL);
phead = &queue->queue;
plist = phead->next;
while (1) {
@@ -1974,7 +1974,7 @@ static int r871x_get_ap_info(struct net_device *dev,
if (!mac_pton(data, bssid)) {
netdev_info(dev, "r8712u: Invalid BSSID '%s'.\n",
(u8 *)data);
- spin_unlock_irqrestore(&(pmlmepriv->scanned_queue.lock),
+ spin_unlock_irqrestore(&pmlmepriv->scanned_queue.lock,
irqL);
return -EINVAL;
}
@@ -1996,7 +1996,7 @@ static int r871x_get_ap_info(struct net_device *dev,
}
plist = plist->next;
}
- spin_unlock_irqrestore(&(pmlmepriv->scanned_queue.lock), irqL);
+ spin_unlock_irqrestore(&pmlmepriv->scanned_queue.lock, irqL);
if (pdata->length >= 34) {
if (copy_to_user((u8 __user *)pdata->pointer + 32,
(u8 *)&pdata->flags, 1))
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_rtl.c b/drivers/staging/rtl8712/rtl871x_ioctl_rtl.c
index b78101afc93d..2b539335206a 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl_rtl.c
+++ b/drivers/staging/rtl8712/rtl871x_ioctl_rtl.c
@@ -367,7 +367,6 @@ uint oid_rt_get_scan_in_progress_hdl(struct oid_par_priv *poid_par_priv)
return RNDIS_STATUS_SUCCESS;
}
-
uint oid_rt_forced_data_rate_hdl(struct oid_par_priv *poid_par_priv)
{
return RNDIS_STATUS_SUCCESS;
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_set.c b/drivers/staging/rtl8712/rtl871x_ioctl_set.c
index 6cdc6f1a6bc6..34c9a52b4c42 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl_set.c
+++ b/drivers/staging/rtl8712/rtl871x_ioctl_set.c
@@ -22,7 +22,6 @@
#include "usb_osintf.h"
#include "usb_ops.h"
-
static u8 validate_ssid(struct ndis_802_11_ssid *ssid)
{
u8 i;
@@ -76,7 +75,7 @@ static u8 do_join(struct _adapter *padapter)
* acquired by caller...
*/
struct wlan_bssid_ex *pdev_network =
- &(padapter->registrypriv.dev_network);
+ &padapter->registrypriv.dev_network;
pmlmepriv->fw_state = WIFI_ADHOC_MASTER_STATE;
pibss = padapter->registrypriv.dev_network.MacAddress;
memcpy(&pdev_network->Ssid,
diff --git a/drivers/staging/rtl8712/rtl871x_mlme.c b/drivers/staging/rtl8712/rtl871x_mlme.c
index 92b7c9c07df6..63e12b157001 100644
--- a/drivers/staging/rtl8712/rtl871x_mlme.c
+++ b/drivers/staging/rtl8712/rtl871x_mlme.c
@@ -431,8 +431,7 @@ static int is_desired_network(struct _adapter *adapter,
bselected = false;
if (check_fwstate(&adapter->mlmepriv, WIFI_ADHOC_STATE)) {
if (pnetwork->network.InfrastructureMode !=
- adapter->mlmepriv.cur_network.network.
- InfrastructureMode)
+ adapter->mlmepriv.cur_network.network.InfrastructureMode)
bselected = false;
}
return bselected;
@@ -539,8 +538,7 @@ void r8712_surveydone_event_callback(struct _adapter *adapter, u8 *pbuf)
struct wlan_bssid_ex *pdev_network =
&(adapter->registrypriv.dev_network);
u8 *pibss =
- adapter->registrypriv.
- dev_network.MacAddress;
+ adapter->registrypriv.dev_network.MacAddress;
pmlmepriv->fw_state ^= _FW_UNDER_SURVEY;
memcpy(&pdev_network->Ssid,
&pmlmepriv->assoc_ssid,
@@ -688,11 +686,9 @@ void r8712_joinbss_event_callback(struct _adapter *adapter, u8 *pbuf)
pnetwork->network.Configuration.DSConfig =
le32_to_cpu(pnetwork->network.Configuration.DSConfig);
pnetwork->network.Configuration.FHConfig.DwellTime =
- le32_to_cpu(pnetwork->network.Configuration.FHConfig.
- DwellTime);
+ le32_to_cpu(pnetwork->network.Configuration.FHConfig.DwellTime);
pnetwork->network.Configuration.FHConfig.HopPattern =
- le32_to_cpu(pnetwork->network.Configuration.
- FHConfig.HopPattern);
+ le32_to_cpu(pnetwork->network.Configuration.FHConfig.HopPattern);
pnetwork->network.Configuration.FHConfig.HopSet =
le32_to_cpu(pnetwork->network.Configuration.FHConfig.HopSet);
pnetwork->network.Configuration.FHConfig.Length =
@@ -717,36 +713,29 @@ void r8712_joinbss_event_callback(struct _adapter *adapter, u8 *pbuf)
if (check_fwstate(pmlmepriv, _FW_LINKED)) {
if (the_same_macaddr) {
ptarget_wlan =
- r8712_find_network(&pmlmepriv->
- scanned_queue,
+ r8712_find_network(&pmlmepriv->scanned_queue,
cur_network->network.MacAddress);
} else {
pcur_wlan =
- r8712_find_network(&pmlmepriv->
- scanned_queue,
+ r8712_find_network(&pmlmepriv->scanned_queue,
cur_network->network.MacAddress);
if (pcur_wlan)
pcur_wlan->fixed = false;
pcur_sta = r8712_get_stainfo(pstapriv,
cur_network->network.MacAddress);
- spin_lock_irqsave(&pstapriv->
- sta_hash_lock, irqL2);
+ spin_lock_irqsave(&pstapriv->sta_hash_lock, irqL2);
r8712_free_stainfo(adapter, pcur_sta);
- spin_unlock_irqrestore(&(pstapriv->
- sta_hash_lock), irqL2);
+ spin_unlock_irqrestore(&(pstapriv->sta_hash_lock), irqL2);
ptarget_wlan =
- r8712_find_network(&pmlmepriv->
- scanned_queue,
- pnetwork->network.
- MacAddress);
+ r8712_find_network(&pmlmepriv->scanned_queue,
+ pnetwork->network.MacAddress);
if (ptarget_wlan)
ptarget_wlan->fixed = true;
}
} else {
- ptarget_wlan = r8712_find_network(&pmlmepriv->
- scanned_queue,
+ ptarget_wlan = r8712_find_network(&pmlmepriv->scanned_queue,
pnetwork->network.MacAddress);
if (ptarget_wlan)
ptarget_wlan->fixed = true;
@@ -779,39 +768,25 @@ void r8712_joinbss_event_callback(struct _adapter *adapter, u8 *pbuf)
ptarget_sta->aid = pnetwork->join_res;
ptarget_sta->qos_option = 1;
ptarget_sta->mac_id = 5;
- if (adapter->securitypriv.
- AuthAlgrthm == 2) {
- adapter->securitypriv.
- binstallGrpkey =
- false;
- adapter->securitypriv.
- busetkipkey =
- false;
- adapter->securitypriv.
- bgrpkey_handshake =
- false;
- ptarget_sta->ieee8021x_blocked
- = true;
- ptarget_sta->XPrivacy =
- adapter->securitypriv.
- PrivacyAlgrthm;
- memset((u8 *)&ptarget_sta->
- x_UncstKey,
+ if (adapter->securitypriv.AuthAlgrthm == 2) {
+ adapter->securitypriv.binstallGrpkey = false;
+ adapter->securitypriv.busetkipkey = false;
+ adapter->securitypriv.bgrpkey_handshake = false;
+ ptarget_sta->ieee8021x_blocked = true;
+ ptarget_sta->XPrivacy = adapter->
+ securitypriv.PrivacyAlgrthm;
+ memset((u8 *)&ptarget_sta->x_UncstKey,
0,
sizeof(union Keytype));
- memset((u8 *)&ptarget_sta->
- tkiprxmickey,
+ memset((u8 *)&ptarget_sta->tkiprxmickey,
0,
sizeof(union Keytype));
- memset((u8 *)&ptarget_sta->
- tkiptxmickey,
+ memset((u8 *)&ptarget_sta->tkiptxmickey,
0,
sizeof(union Keytype));
- memset((u8 *)&ptarget_sta->
- txpn, 0,
+ memset((u8 *)&ptarget_sta->txpn, 0,
sizeof(union pn48));
- memset((u8 *)&ptarget_sta->
- rxpn, 0,
+ memset((u8 *)&ptarget_sta->rxpn, 0,
sizeof(union pn48));
}
} else {
@@ -942,8 +917,7 @@ void r8712_stadel_event_callback(struct _adapter *adapter, u8 *pbuf)
pdev_network = &(adapter->registrypriv.dev_network);
pibss = adapter->registrypriv.dev_network.MacAddress;
memcpy(pdev_network, &tgt_network->network,
- r8712_get_wlan_bssid_ex_sz(&tgt_network->
- network));
+ r8712_get_wlan_bssid_ex_sz(&tgt_network->network));
memcpy(&pdev_network->Ssid,
&pmlmepriv->assoc_ssid,
sizeof(struct ndis_802_11_ssid));
@@ -1092,8 +1066,7 @@ int r8712_select_and_join_from_scan(struct mlme_priv *pmlmepriv)
src_ssid = pmlmepriv->assoc_bssid;
if (!memcmp(dst_ssid, src_ssid, ETH_ALEN)) {
if (check_fwstate(pmlmepriv, _FW_LINKED)) {
- if (is_same_network(&pmlmepriv->
- cur_network.network,
+ if (is_same_network(&pmlmepriv->cur_network.network,
&pnetwork->network)) {
_clr_fwstate_(pmlmepriv,
_FW_UNDER_LINKING);
@@ -1284,26 +1257,13 @@ int r8712_restruct_wmm_ie(struct _adapter *adapter, u8 *in_ie, u8 *out_ie,
*/
static int SecIsInPMKIDList(struct _adapter *Adapter, u8 *bssid)
{
- struct security_priv *psecuritypriv = &Adapter->securitypriv;
- int i = 0;
-
- do {
- if (psecuritypriv->PMKIDList[i].bUsed &&
- (!memcmp(psecuritypriv->PMKIDList[i].Bssid,
- bssid, ETH_ALEN)))
- break;
- i++;
-
- } while (i < NUM_PMKID_CACHE);
+ struct security_priv *p = &Adapter->securitypriv;
+ int i;
- if (i == NUM_PMKID_CACHE) {
- i = -1; /* Could not find. */
- } else {
- ; /* There is one Pre-Authentication Key for the
- * specific BSSID.
- */
- }
- return i;
+ for (i = 0; i < NUM_PMKID_CACHE; i++)
+ if (p->PMKIDList[i].bUsed && !memcmp(p->PMKIDList[i].Bssid, bssid, ETH_ALEN))
+ return i;
+ return -1;
}
sint r8712_restruct_sec_ie(struct _adapter *adapter, u8 *in_ie,
diff --git a/drivers/staging/rtl8712/rtl871x_mp_ioctl.h b/drivers/staging/rtl8712/rtl871x_mp_ioctl.h
index 98204493a04c..aa4d5ce471f2 100644
--- a/drivers/staging/rtl8712/rtl871x_mp_ioctl.h
+++ b/drivers/staging/rtl8712/rtl871x_mp_ioctl.h
@@ -148,7 +148,6 @@ extern struct oid_obj_priv oid_rtl_seg_87_12_00[32];
#endif /* _RTL871X_MP_IOCTL_C_ */
-
enum MP_MODE {
MP_START_MODE,
MP_STOP_MODE,
diff --git a/drivers/staging/rtl8712/rtl871x_mp_phy_regdef.h b/drivers/staging/rtl8712/rtl871x_mp_phy_regdef.h
index ca5072e11e22..a08c5d2f59e3 100644
--- a/drivers/staging/rtl8712/rtl871x_mp_phy_regdef.h
+++ b/drivers/staging/rtl8712/rtl871x_mp_phy_regdef.h
@@ -26,7 +26,6 @@
#ifndef __RTL871X_MP_PHY_REGDEF_H
#define __RTL871X_MP_PHY_REGDEF_H
-
/*--------------------------Define Parameters-------------------------------*/
/*============================================================
@@ -1008,7 +1007,6 @@
#define ANTENNA_C 0x4
#define ANTENNA_D 0x8
-
/* accept all physical address */
#define RCR_AAP BIT(0)
#define RCR_APM BIT(1) /* accept physical match */
@@ -1032,6 +1030,5 @@
/*--------------------------Define Parameters-------------------------------*/
-
#endif /*__INC_HAL8192SPHYREG_H */
diff --git a/drivers/staging/rtl8712/rtl871x_recv.c b/drivers/staging/rtl8712/rtl871x_recv.c
index 66cc50f24e29..de9a568eaffa 100644
--- a/drivers/staging/rtl8712/rtl871x_recv.c
+++ b/drivers/staging/rtl8712/rtl871x_recv.c
@@ -455,7 +455,6 @@ static sint validate_recv_mgnt_frame(struct _adapter *adapter,
return _FAIL;
}
-
static sint validate_recv_data_frame(struct _adapter *adapter,
union recv_frame *precv_frame)
{
diff --git a/drivers/staging/rtl8712/rtl871x_security.c b/drivers/staging/rtl8712/rtl871x_security.c
index e0a1c30a8fe6..e46a5dbc7b65 100644
--- a/drivers/staging/rtl8712/rtl871x_security.c
+++ b/drivers/staging/rtl8712/rtl871x_security.c
@@ -381,7 +381,6 @@ void seccalctkipmic(u8 *key, u8 *header, u8 *data, u32 data_len, u8 *mic_code,
#define P1K_SIZE 10 /* 80-bit Phase1 key */
#define RC4_KEY_SIZE 16 /* 128-bit RC4KEY (104 bits unknown) */
-
/* 2-unsigned char by 2-unsigned char subset of the full AES S-box table */
static const unsigned short Sbox1[2][256] = {/* Sbox for hash (can be in ROM) */
{
diff --git a/drivers/staging/rtl8712/sta_info.h b/drivers/staging/rtl8712/sta_info.h
index 9b7e5ffa380d..6286c622475e 100644
--- a/drivers/staging/rtl8712/sta_info.h
+++ b/drivers/staging/rtl8712/sta_info.h
@@ -21,7 +21,6 @@
#define NUM_STA 32
#define NUM_ACL 64
-
/* if mode ==0, then the sta is allowed once the addr is hit.
* if mode ==1, then the sta is rejected once the addr is non-hit.
*/
diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
index ee4c61f85a07..37364d3101e2 100644
--- a/drivers/staging/rtl8712/usb_intf.c
+++ b/drivers/staging/rtl8712/usb_intf.c
@@ -265,6 +265,7 @@ static uint r8712_usb_dvobj_init(struct _adapter *padapter)
static void r8712_usb_dvobj_deinit(struct _adapter *padapter)
{
+ r8712_free_io_queue(padapter);
}
void rtl871x_intf_stop(struct _adapter *padapter)
@@ -302,9 +303,6 @@ void r871x_dev_unload(struct _adapter *padapter)
rtl8712_hal_deinit(padapter);
}
- /*s6.*/
- if (padapter->dvobj_deinit)
- padapter->dvobj_deinit(padapter);
padapter->bup = false;
}
}
@@ -538,13 +536,13 @@ static int r871xu_drv_init(struct usb_interface *pusb_intf,
} else {
AutoloadFail = false;
}
- if (((mac[0] == 0xff) && (mac[1] == 0xff) &&
+ if ((!AutoloadFail) ||
+ ((mac[0] == 0xff) && (mac[1] == 0xff) &&
(mac[2] == 0xff) && (mac[3] == 0xff) &&
(mac[4] == 0xff) && (mac[5] == 0xff)) ||
((mac[0] == 0x00) && (mac[1] == 0x00) &&
(mac[2] == 0x00) && (mac[3] == 0x00) &&
- (mac[4] == 0x00) && (mac[5] == 0x00)) ||
- (!AutoloadFail)) {
+ (mac[4] == 0x00) && (mac[5] == 0x00))) {
mac[0] = 0x00;
mac[1] = 0xe0;
mac[2] = 0x4c;
@@ -568,7 +566,7 @@ static int r871xu_drv_init(struct usb_interface *pusb_intf,
/* step 6. Load the firmware asynchronously */
if (rtl871x_load_fw(padapter))
goto deinit_drv_sw;
- spin_lock_init(&padapter->lock_rx_ff0_filter);
+ init_completion(&padapter->rx_filter_ready);
mutex_init(&padapter->mutex_start);
return 0;
@@ -607,6 +605,8 @@ static void r871xu_dev_remove(struct usb_interface *pusb_intf)
/* Stop driver mlme relation timer */
r8712_stop_drv_timers(padapter);
r871x_dev_unload(padapter);
+ if (padapter->dvobj_deinit)
+ padapter->dvobj_deinit(padapter);
r8712_free_drv_sw(padapter);
free_netdev(pnetdev);
diff --git a/drivers/staging/rtl8712/usb_ops.c b/drivers/staging/rtl8712/usb_ops.c
index e64845e6adf3..af9966d03979 100644
--- a/drivers/staging/rtl8712/usb_ops.c
+++ b/drivers/staging/rtl8712/usb_ops.c
@@ -29,7 +29,8 @@ static u8 usb_read8(struct intf_hdl *intfhdl, u32 addr)
u16 wvalue;
u16 index;
u16 len;
- __le32 data;
+ int status;
+ __le32 data = 0;
struct intf_priv *intfpriv = intfhdl->pintfpriv;
request = 0x05;
@@ -37,8 +38,10 @@ static u8 usb_read8(struct intf_hdl *intfhdl, u32 addr)
index = 0;
wvalue = (u16)(addr & 0x0000ffff);
len = 1;
- r8712_usbctrl_vendorreq(intfpriv, request, wvalue, index, &data, len,
- requesttype);
+ status = r8712_usbctrl_vendorreq(intfpriv, request, wvalue, index,
+ &data, len, requesttype);
+ if (status < 0)
+ return 0;
return (u8)(le32_to_cpu(data) & 0x0ff);
}
@@ -49,7 +52,8 @@ static u16 usb_read16(struct intf_hdl *intfhdl, u32 addr)
u16 wvalue;
u16 index;
u16 len;
- __le32 data;
+ int status;
+ __le32 data = 0;
struct intf_priv *intfpriv = intfhdl->pintfpriv;
request = 0x05;
@@ -57,8 +61,10 @@ static u16 usb_read16(struct intf_hdl *intfhdl, u32 addr)
index = 0;
wvalue = (u16)(addr & 0x0000ffff);
len = 2;
- r8712_usbctrl_vendorreq(intfpriv, request, wvalue, index, &data, len,
- requesttype);
+ status = r8712_usbctrl_vendorreq(intfpriv, request, wvalue, index,
+ &data, len, requesttype);
+ if (status < 0)
+ return 0;
return (u16)(le32_to_cpu(data) & 0xffff);
}
@@ -69,7 +75,8 @@ static u32 usb_read32(struct intf_hdl *intfhdl, u32 addr)
u16 wvalue;
u16 index;
u16 len;
- __le32 data;
+ int status;
+ __le32 data = 0;
struct intf_priv *intfpriv = intfhdl->pintfpriv;
request = 0x05;
@@ -77,8 +84,10 @@ static u32 usb_read32(struct intf_hdl *intfhdl, u32 addr)
index = 0;
wvalue = (u16)(addr & 0x0000ffff);
len = 4;
- r8712_usbctrl_vendorreq(intfpriv, request, wvalue, index, &data, len,
- requesttype);
+ status = r8712_usbctrl_vendorreq(intfpriv, request, wvalue, index,
+ &data, len, requesttype);
+ if (status < 0)
+ return 0;
return le32_to_cpu(data);
}
diff --git a/drivers/staging/rtl8712/usb_ops_linux.c b/drivers/staging/rtl8712/usb_ops_linux.c
index f984a5ab2c6f..b2181e1e2d38 100644
--- a/drivers/staging/rtl8712/usb_ops_linux.c
+++ b/drivers/staging/rtl8712/usb_ops_linux.c
@@ -495,14 +495,21 @@ int r8712_usbctrl_vendorreq(struct intf_priv *pintfpriv, u8 request, u16 value,
}
status = usb_control_msg(udev, pipe, request, reqtype, value, index,
pIo_buf, len, 500);
- if (status > 0) { /* Success this control transfer. */
- if (requesttype == 0x01) {
- /* For Control read transfer, we have to copy the read
- * data from pIo_buf to pdata.
- */
- memcpy(pdata, pIo_buf, status);
- }
+ if (status < 0)
+ goto free;
+ if (status != len) {
+ status = -EREMOTEIO;
+ goto free;
+ }
+ /* Success this control transfer. */
+ if (requesttype == 0x01) {
+ /* For Control read transfer, we have to copy the read
+ * data from pIo_buf to pdata.
+ */
+ memcpy(pdata, pIo_buf, status);
}
+
+free:
kfree(palloc_buf);
return status;
}
diff --git a/drivers/staging/rtl8712/wifi.h b/drivers/staging/rtl8712/wifi.h
index b8acb9c7395d..498e6dec7e67 100644
--- a/drivers/staging/rtl8712/wifi.h
+++ b/drivers/staging/rtl8712/wifi.h
@@ -186,7 +186,6 @@ static inline unsigned char *get_hdr_bssid(unsigned char *pframe)
#define _CAPABILITY_ 2
#define _TIMESTAMP_ 8
-
/*-----------------------------------------------------------------------------
* Below is the definition for WMM
*------------------------------------------------------------------------------
diff --git a/drivers/staging/rtl8712/xmit_linux.c b/drivers/staging/rtl8712/xmit_linux.c
index 90d34cf9d2ff..4a93839bf947 100644
--- a/drivers/staging/rtl8712/xmit_linux.c
+++ b/drivers/staging/rtl8712/xmit_linux.c
@@ -95,18 +95,12 @@ void r8712_SetFilter(struct work_struct *work)
struct _adapter *adapter = container_of(work, struct _adapter,
wk_filter_rx_ff0);
u8 oldvalue = 0x00, newvalue = 0x00;
- unsigned long irqL;
oldvalue = r8712_read8(adapter, 0x117);
newvalue = oldvalue & 0xfe;
r8712_write8(adapter, 0x117, newvalue);
- spin_lock_irqsave(&adapter->lock_rx_ff0_filter, irqL);
- adapter->blnEnableRxFF0Filter = 1;
- spin_unlock_irqrestore(&adapter->lock_rx_ff0_filter, irqL);
- do {
- msleep(100);
- } while (adapter->blnEnableRxFF0Filter == 1);
+ wait_for_completion(&adapter->rx_filter_ready);
r8712_write8(adapter, 0x117, oldvalue);
}
diff --git a/drivers/staging/rtl8723bs/core/rtw_ap.c b/drivers/staging/rtl8723bs/core/rtw_ap.c
index 5478188be991..d30d6e6bcd07 100644
--- a/drivers/staging/rtl8723bs/core/rtw_ap.c
+++ b/drivers/staging/rtl8723bs/core/rtw_ap.c
@@ -520,12 +520,12 @@ void update_sta_info_apmode(struct adapter *padapter, struct sta_info *psta)
/* B0 Config LDPC Coding Capability */
if (TEST_FLAG(phtpriv_ap->ldpc_cap, LDPC_HT_ENABLE_TX) &&
- GET_HT_CAPABILITY_ELE_LDPC_CAP((u8 *)(&phtpriv_sta->ht_cap)))
+ GET_HT_CAPABILITY_ELE_LDPC_CAP((u8 *)(&phtpriv_sta->ht_cap)))
SET_FLAG(cur_ldpc_cap, (LDPC_HT_ENABLE_TX | LDPC_HT_CAP_TX));
/* B7 B8 B9 Config STBC setting */
if (TEST_FLAG(phtpriv_ap->stbc_cap, STBC_HT_ENABLE_TX) &&
- GET_HT_CAPABILITY_ELE_RX_STBC((u8 *)(&phtpriv_sta->ht_cap)))
+ GET_HT_CAPABILITY_ELE_RX_STBC((u8 *)(&phtpriv_sta->ht_cap)))
SET_FLAG(cur_stbc_cap, (STBC_HT_ENABLE_TX | STBC_HT_CAP_TX));
} else {
phtpriv_sta->ampdu_enable = false;
@@ -1065,10 +1065,12 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
);
if ((psecuritypriv->wpa_pairwise_cipher & WPA_CIPHER_CCMP) ||
- (psecuritypriv->wpa2_pairwise_cipher & WPA_CIPHER_CCMP)) {
- pht_cap->ampdu_params_info |= (IEEE80211_HT_CAP_AMPDU_DENSITY & (0x07 << 2));
+ (psecuritypriv->wpa2_pairwise_cipher & WPA_CIPHER_CCMP)) {
+ pht_cap->ampdu_params_info |= (IEEE80211_HT_CAP_AMPDU_DENSITY &
+ (0x07 << 2));
} else {
- pht_cap->ampdu_params_info |= (IEEE80211_HT_CAP_AMPDU_DENSITY & 0x00);
+ pht_cap->ampdu_params_info |= (IEEE80211_HT_CAP_AMPDU_DENSITY &
+ 0x00);
}
rtw_hal_get_def_var(
@@ -1116,7 +1118,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
pmlmepriv->htpriv.ht_option = false;
if ((psecuritypriv->wpa2_pairwise_cipher & WPA_CIPHER_TKIP) ||
- (psecuritypriv->wpa_pairwise_cipher & WPA_CIPHER_TKIP)) {
+ (psecuritypriv->wpa_pairwise_cipher & WPA_CIPHER_TKIP)) {
/* todo: */
/* ht_cap = false; */
}
@@ -1725,7 +1727,7 @@ void bss_cap_update_on_sta_join(struct adapter *padapter, struct sta_info *psta)
pmlmepriv->num_sta_no_short_preamble--;
if ((pmlmeext->cur_wireless_mode > WIRELESS_11B) &&
- (pmlmepriv->num_sta_no_short_preamble == 0)) {
+ (pmlmepriv->num_sta_no_short_preamble == 0)) {
beacon_updated = true;
update_beacon(padapter, 0xFF, NULL, true);
}
@@ -1763,7 +1765,7 @@ void bss_cap_update_on_sta_join(struct adapter *padapter, struct sta_info *psta)
pmlmepriv->num_sta_no_short_slot_time++;
if ((pmlmeext->cur_wireless_mode > WIRELESS_11B) &&
- (pmlmepriv->num_sta_no_short_slot_time == 1)) {
+ (pmlmepriv->num_sta_no_short_slot_time == 1)) {
beacon_updated = true;
update_beacon(padapter, 0xFF, NULL, true);
}
@@ -1775,7 +1777,7 @@ void bss_cap_update_on_sta_join(struct adapter *padapter, struct sta_info *psta)
pmlmepriv->num_sta_no_short_slot_time--;
if ((pmlmeext->cur_wireless_mode > WIRELESS_11B) &&
- (pmlmepriv->num_sta_no_short_slot_time == 0)) {
+ (pmlmepriv->num_sta_no_short_slot_time == 0)) {
beacon_updated = true;
update_beacon(padapter, 0xFF, NULL, true);
}
@@ -2024,7 +2026,7 @@ void rtw_ap_restore_network(struct adapter *padapter)
start_bss_network(padapter);
if ((padapter->securitypriv.dot11PrivacyAlgrthm == _TKIP_) ||
- (padapter->securitypriv.dot11PrivacyAlgrthm == _AES_)) {
+ (padapter->securitypriv.dot11PrivacyAlgrthm == _AES_)) {
/* restore group key, WEP keys is restored in ips_leave() */
rtw_set_key(
padapter,
@@ -2062,7 +2064,7 @@ void rtw_ap_restore_network(struct adapter *padapter)
/* pairwise key */
/* per sta pairwise key and settings */
if ((psecuritypriv->dot11PrivacyAlgrthm == _TKIP_) ||
- (psecuritypriv->dot11PrivacyAlgrthm == _AES_)) {
+ (psecuritypriv->dot11PrivacyAlgrthm == _AES_)) {
rtw_setstakey_cmd(padapter, psta, true, false);
}
}
diff --git a/drivers/staging/rtl8723bs/core/rtw_cmd.c b/drivers/staging/rtl8723bs/core/rtw_cmd.c
index 14d37b369273..b4170f64d118 100644
--- a/drivers/staging/rtl8723bs/core/rtw_cmd.c
+++ b/drivers/staging/rtl8723bs/core/rtw_cmd.c
@@ -1238,7 +1238,7 @@ u8 traffic_status_watchdog(struct adapter *padapter, u8 from_timer)
/*&& !MgntInitAdapterInProgress(pMgntInfo)*/) {
/* if we raise bBusyTraffic in last watchdog, using lower threshold. */
if (pmlmepriv->LinkDetectInfo.bBusyTraffic)
- BusyThreshold = BusyThresholdLow;
+ BusyThreshold = BusyThresholdLow;
if (pmlmepriv->LinkDetectInfo.NumRxOkInPeriod > BusyThreshold ||
pmlmepriv->LinkDetectInfo.NumTxOkInPeriod > BusyThreshold) {
@@ -1885,11 +1885,8 @@ void rtw_survey_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
{
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- if (pcmd->res == H2C_DROPPED) {
+ if (pcmd->res != H2C_SUCCESS) {
/* TODO: cancel timer and do timeout handler directly... */
- /* need to make timeout handlerOS independent */
- _set_timer(&pmlmepriv->scan_to_timer, 1);
- } else if (pcmd->res != H2C_SUCCESS) {
_set_timer(&pmlmepriv->scan_to_timer, 1);
}
@@ -1916,11 +1913,8 @@ void rtw_joinbss_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
{
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- if (pcmd->res == H2C_DROPPED) {
+ if (pcmd->res != H2C_SUCCESS) {
/* TODO: cancel timer and do timeout handler directly... */
- /* need to make timeout handlerOS independent */
- _set_timer(&pmlmepriv->assoc_timer, 1);
- } else if (pcmd->res != H2C_SUCCESS) {
_set_timer(&pmlmepriv->assoc_timer, 1);
}
diff --git a/drivers/staging/rtl8723bs/core/rtw_efuse.c b/drivers/staging/rtl8723bs/core/rtw_efuse.c
index 3d3c77273026..06e727ce9cc2 100644
--- a/drivers/staging/rtl8723bs/core/rtw_efuse.c
+++ b/drivers/staging/rtl8723bs/core/rtw_efuse.c
@@ -100,7 +100,7 @@ u8 PwrState)
u16
Efuse_GetCurrentSize(
struct adapter *padapter,
- u8 efuseType,
+ u8 efuseType,
bool bPseudoTest)
{
return padapter->HalFunc.EfuseGetCurrentSize(padapter, efuseType,
@@ -124,29 +124,29 @@ Efuse_CalculateWordCnts(u8 word_en)
}
/* */
-/* Description: */
-/* 1. Execute E-Fuse read byte operation according as map offset and */
-/* save to E-Fuse table. */
-/* 2. Referred from SD1 Richard. */
+/* Description: */
+/* 1. Execute E-Fuse read byte operation according as map offset and */
+/* save to E-Fuse table. */
+/* 2. Referred from SD1 Richard. */
/* */
-/* Assumption: */
-/* 1. Boot from E-Fuse and successfully auto-load. */
-/* 2. PASSIVE_LEVEL (USB interface) */
+/* Assumption: */
+/* 1. Boot from E-Fuse and successfully auto-load. */
+/* 2. PASSIVE_LEVEL (USB interface) */
/* */
-/* Created by Roger, 2008.10.21. */
+/* Created by Roger, 2008.10.21. */
/* */
-/* 2008/12/12 MH 1. Reorganize code flow and reserve bytes. and add description. */
-/* 2. Add efuse utilization collect. */
-/* 2008/12/22 MH Read Efuse must check if we write section 1 data again!!! Sec1 */
-/* write addr must be after sec5. */
+/* 2008/12/12 MH 1. Reorganize code flow and reserve bytes. and add description. */
+/* 2. Add efuse utilization collect. */
+/* 2008/12/22 MH Read Efuse must check if we write section 1 data again!!! Sec1 */
+/* write addr must be after sec5. */
/* */
void
efuse_ReadEFuse(
struct adapter *Adapter,
u8 efuseType,
- u16 _offset,
- u16 _size_byte,
+ u16 _offset,
+ u16 _size_byte,
u8 *pbuf,
bool bPseudoTest
);
@@ -154,8 +154,8 @@ void
efuse_ReadEFuse(
struct adapter *Adapter,
u8 efuseType,
- u16 _offset,
- u16 _size_byte,
+ u16 _offset,
+ u16 _size_byte,
u8 *pbuf,
bool bPseudoTest
)
@@ -168,7 +168,7 @@ EFUSE_GetEfuseDefinition(
struct adapter *padapter,
u8 efuseType,
u8 type,
- void *pOut,
+ void *pOut,
bool bPseudoTest
)
{
@@ -194,7 +194,7 @@ EFUSE_GetEfuseDefinition(
u8
EFUSE_Read1Byte(
struct adapter *Adapter,
-u16 Address)
+u16 Address)
{
u8 Bytetemp = {0x00};
u8 temp = {0x00};
@@ -235,8 +235,8 @@ u16 Address)
u8
efuse_OneByteRead(
struct adapter *padapter,
-u16 addr,
-u8 *data,
+u16 addr,
+u8 *data,
bool bPseudoTest)
{
u32 tmpidx = 0;
@@ -324,8 +324,8 @@ u8 efuse_OneByteWrite(struct adapter *padapter, u16 addr, u8 data, bool bPseudoT
int
Efuse_PgPacketRead(struct adapter *padapter,
- u8 offset,
- u8 *data,
+ u8 offset,
+ u8 *data,
bool bPseudoTest)
{
return padapter->HalFunc.Efuse_PgPacketRead(padapter, offset, data,
@@ -334,9 +334,9 @@ Efuse_PgPacketRead(struct adapter *padapter,
int
Efuse_PgPacketWrite(struct adapter *padapter,
- u8 offset,
- u8 word_en,
- u8 *data,
+ u8 offset,
+ u8 word_en,
+ u8 *data,
bool bPseudoTest)
{
return padapter->HalFunc.Efuse_PgPacketWrite(padapter, offset, word_en,
@@ -386,7 +386,7 @@ efuse_WordEnableDataRead(u8 word_en,
u8
Efuse_WordEnableDataWrite(struct adapter *padapter,
- u16 efuse_addr,
+ u16 efuse_addr,
u8 word_en,
u8 *data,
bool bPseudoTest)
diff --git a/drivers/staging/rtl8723bs/core/rtw_ieee80211.c b/drivers/staging/rtl8723bs/core/rtw_ieee80211.c
index b449be537376..68e41d99679d 100644
--- a/drivers/staging/rtl8723bs/core/rtw_ieee80211.c
+++ b/drivers/staging/rtl8723bs/core/rtw_ieee80211.c
@@ -94,16 +94,14 @@ bool rtw_is_cckratesonly_included(u8 *rate)
int rtw_check_network_type(unsigned char *rate, int ratelen, int channel)
{
- if (channel > 14) {
+ if (channel > 14)
return WIRELESS_INVALID;
- } else { /* could be pure B, pure G, or B/G */
- if (rtw_is_cckratesonly_included(rate))
- return WIRELESS_11B;
- else if (rtw_is_cckrates_included(rate))
- return WIRELESS_11BG;
- else
- return WIRELESS_11G;
- }
+ /* could be pure B, pure G, or B/G */
+ if (rtw_is_cckratesonly_included(rate))
+ return WIRELESS_11B;
+ if (rtw_is_cckrates_included(rate))
+ return WIRELESS_11BG;
+ return WIRELESS_11G;
}
u8 *rtw_set_fixed_ie(unsigned char *pbuf, unsigned int len, unsigned char *source,
@@ -151,11 +149,10 @@ u8 *rtw_get_ie(u8 *pbuf, signed int index, signed int *len, signed int limit)
if (*p == index) {
*len = *(p + 1);
return p;
- } else {
- tmp = *(p + 1);
- p += (tmp + 2);
- i += (tmp + 2);
}
+ tmp = *(p + 1);
+ p += (tmp + 2);
+ i += (tmp + 2);
if (i >= limit)
break;
}
@@ -199,9 +196,8 @@ u8 *rtw_get_ie_ex(u8 *in_ie, uint in_len, u8 eid, u8 *oui, u8 oui_len, u8 *ie, u
*ielen = in_ie[cnt+1]+2;
break;
- } else {
- cnt += in_ie[cnt+1]+2; /* goto next */
}
+ cnt += in_ie[cnt+1]+2; /* goto next */
}
return target_ie;
@@ -339,9 +335,8 @@ int rtw_generate_ie(struct registry_priv *pregistrypriv)
ie = rtw_set_ie(ie, WLAN_EID_IBSS_PARAMS, 2, (u8 *)&(pdev_network->configuration.atim_window), &sz);
- if (rateLen > 8) {
+ if (rateLen > 8)
ie = rtw_set_ie(ie, WLAN_EID_EXT_SUPP_RATES, (rateLen - 8), (pdev_network->supported_rates + 8), &sz);
- }
/* HT Cap. */
if ((pregistrypriv->wireless_mode & WIRELESS_11_24N) &&
@@ -370,9 +365,8 @@ unsigned char *rtw_get_wpa_ie(unsigned char *pie, int *wpa_ie_len, int limit)
if (pbuf) {
/* check if oui matches... */
- if (memcmp((pbuf + 2), wpa_oui_type, sizeof(wpa_oui_type))) {
+ if (memcmp((pbuf + 2), wpa_oui_type, sizeof(wpa_oui_type)))
goto check_next_ie;
- }
/* check version... */
memcpy((u8 *)&le_tmp, (pbuf + 6), sizeof(val16));
@@ -497,9 +491,8 @@ int rtw_parse_wpa_ie(u8 *wpa_ie, int wpa_ie_len, int *group_cipher, int *pairwis
if (is_8021x) {
if (left >= 6) {
pos += 2;
- if (!memcmp(pos, SUITE_1X, 4)) {
+ if (!memcmp(pos, SUITE_1X, 4))
*is_8021x = 1;
- }
}
}
@@ -518,9 +511,8 @@ int rtw_parse_wpa2_ie(u8 *rsn_ie, int rsn_ie_len, int *group_cipher, int *pairwi
return _FAIL;
}
- if ((*rsn_ie != WLAN_EID_RSN) || (*(rsn_ie+1) != (u8)(rsn_ie_len - 2))) {
+ if ((*rsn_ie != WLAN_EID_RSN) || (*(rsn_ie+1) != (u8)(rsn_ie_len - 2)))
return _FAIL;
- }
pos = rsn_ie;
pos += 4;
@@ -697,9 +689,8 @@ u8 *rtw_get_wps_ie(u8 *in_ie, uint in_len, u8 *wps_ie, uint *wps_ielen)
cnt += in_ie[cnt+1]+2;
break;
- } else {
- cnt += in_ie[cnt+1]+2; /* goto next */
}
+ cnt += in_ie[cnt+1]+2; /* goto next */
}
return wpsie_ptr;
@@ -748,9 +739,8 @@ u8 *rtw_get_wps_attr(u8 *wps_ie, uint wps_ielen, u16 target_attr_id, u8 *buf_att
*len_attr = attr_len;
break;
- } else {
- attr_ptr += attr_len; /* goto next */
}
+ attr_ptr += attr_len; /* goto next */
}
return target_attr_ptr;
diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme.c b/drivers/staging/rtl8723bs/core/rtw_mlme.c
index ed2d3b7d44d9..f2242cf2dfb4 100644
--- a/drivers/staging/rtl8723bs/core/rtw_mlme.c
+++ b/drivers/staging/rtl8723bs/core/rtw_mlme.c
@@ -751,7 +751,9 @@ void rtw_surveydone_event_callback(struct adapter *adapter, u8 *pbuf)
}
if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY)) {
+ spin_unlock_bh(&pmlmepriv->lock);
del_timer_sync(&pmlmepriv->scan_to_timer);
+ spin_lock_bh(&pmlmepriv->lock);
_clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY);
}
@@ -792,7 +794,7 @@ void rtw_surveydone_event_callback(struct adapter *adapter, u8 *pbuf)
set_fwstate(pmlmepriv, _FW_UNDER_LINKING);
pmlmepriv->to_join = false;
s_ret = rtw_select_and_join_from_scanned_queue(pmlmepriv);
- if (_SUCCESS == s_ret) {
+ if (s_ret == _SUCCESS) {
_set_timer(&pmlmepriv->assoc_timer, MAX_JOIN_TIMEOUT);
} else if (s_ret == 2) {/* there is no need to wait for join */
_clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
@@ -1238,8 +1240,10 @@ void rtw_joinbss_event_prehandle(struct adapter *adapter, u8 *pbuf)
spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
+ spin_unlock_bh(&pmlmepriv->lock);
/* s5. Cancel assoc_timer */
del_timer_sync(&pmlmepriv->assoc_timer);
+ spin_lock_bh(&pmlmepriv->lock);
} else {
spin_unlock_bh(&(pmlmepriv->scanned_queue.lock));
}
@@ -1545,7 +1549,7 @@ void _rtw_join_timeout_handler(struct timer_list *t)
if (adapter->bDriverStopped || adapter->bSurpriseRemoved)
return;
- spin_lock_bh(&pmlmepriv->lock);
+ spin_lock_irq(&pmlmepriv->lock);
if (rtw_to_roam(adapter) > 0) { /* join timeout caused by roaming */
while (1) {
@@ -1554,7 +1558,7 @@ void _rtw_join_timeout_handler(struct timer_list *t)
int do_join_r;
do_join_r = rtw_do_join(adapter);
- if (_SUCCESS != do_join_r) {
+ if (do_join_r != _SUCCESS) {
continue;
}
break;
@@ -1573,7 +1577,7 @@ void _rtw_join_timeout_handler(struct timer_list *t)
}
- spin_unlock_bh(&pmlmepriv->lock);
+ spin_unlock_irq(&pmlmepriv->lock);
}
/*
@@ -1586,11 +1590,11 @@ void rtw_scan_timeout_handler(struct timer_list *t)
mlmepriv.scan_to_timer);
struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
- spin_lock_bh(&pmlmepriv->lock);
+ spin_lock_irq(&pmlmepriv->lock);
_clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY);
- spin_unlock_bh(&pmlmepriv->lock);
+ spin_unlock_irq(&pmlmepriv->lock);
rtw_indicate_scan_done(adapter, true);
}
@@ -2036,28 +2040,14 @@ int rtw_restruct_wmm_ie(struct adapter *adapter, u8 *in_ie, u8 *out_ie, uint in_
static int SecIsInPMKIDList(struct adapter *Adapter, u8 *bssid)
{
- struct security_priv *psecuritypriv = &Adapter->securitypriv;
- int i = 0;
-
- do {
- if ((psecuritypriv->PMKIDList[i].bUsed) &&
- (!memcmp(psecuritypriv->PMKIDList[i].Bssid, bssid, ETH_ALEN))) {
- break;
- } else {
- i++;
- /* continue; */
- }
-
- } while (i < NUM_PMKID_CACHE);
-
- if (i == NUM_PMKID_CACHE) {
- i = -1;/* Could not find. */
- } else {
- /* There is one Pre-Authentication Key for the specific BSSID. */
- }
-
- return i;
+ struct security_priv *p = &Adapter->securitypriv;
+ int i;
+ for (i = 0; i < NUM_PMKID_CACHE; i++)
+ if ((p->PMKIDList[i].bUsed) &&
+ (!memcmp(p->PMKIDList[i].Bssid, bssid, ETH_ALEN)))
+ return i;
+ return -1;
}
/* */
@@ -2558,7 +2548,7 @@ void rtw_issue_addbareq_cmd(struct adapter *padapter, struct xmit_frame *pxmitfr
issued = (phtpriv->agg_enable_bitmap>>priority)&0x1;
issued |= (phtpriv->candidate_tid_bitmap>>priority)&0x1;
- if (0 == issued) {
+ if (issued == 0) {
psta->htpriv.candidate_tid_bitmap |= BIT((u8)priority);
rtw_addbareq_cmd(padapter, (u8) priority, pattrib->ra);
}
@@ -2608,30 +2598,20 @@ void _rtw_roaming(struct adapter *padapter, struct wlan_network *tgt_network)
{
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct wlan_network *cur_network = &pmlmepriv->cur_network;
- int do_join_r;
- if (0 < rtw_to_roam(padapter)) {
+ if (rtw_to_roam(padapter) > 0) {
memcpy(&pmlmepriv->assoc_ssid, &cur_network->network.ssid, sizeof(struct ndis_802_11_ssid));
pmlmepriv->assoc_by_bssid = false;
- while (1) {
- do_join_r = rtw_do_join(padapter);
- if (_SUCCESS == do_join_r) {
+ while (rtw_do_join(padapter) != _SUCCESS) {
+ rtw_dec_to_roam(padapter);
+ if (rtw_to_roam(padapter) <= 0) {
+ rtw_indicate_disconnect(padapter);
break;
- } else {
- rtw_dec_to_roam(padapter);
-
- if (rtw_to_roam(padapter) > 0) {
- continue;
- } else {
- rtw_indicate_disconnect(padapter);
- break;
- }
}
}
}
-
}
signed int rtw_linked_check(struct adapter *padapter)
diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
index 49a3f45cb771..1bdbd0971f73 100644
--- a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
@@ -271,11 +271,9 @@ static int has_channel(struct rt_channel_info *channel_set,
{
int i;
- for (i = 0; i < chanset_size; i++) {
- if (channel_set[i].ChannelNum == chan) {
+ for (i = 0; i < chanset_size; i++)
+ if (channel_set[i].ChannelNum == chan)
return 1;
- }
- }
return 0;
}
@@ -311,11 +309,11 @@ static void init_channel_list(struct adapter *padapter, struct rt_channel_info *
if (!has_channel(channel_set, chanset_size, ch))
continue;
- if ((0 == padapter->registrypriv.ht_enable) && (8 == o->inc))
+ if ((padapter->registrypriv.ht_enable == 0) && (o->inc == 8))
continue;
if ((0 < (padapter->registrypriv.bw_mode & 0xf0)) &&
- ((BW40MINUS == o->bw) || (BW40PLUS == o->bw)))
+ ((o->bw == BW40MINUS) || (o->bw == BW40PLUS)))
continue;
if (!reg) {
@@ -345,7 +343,7 @@ static u8 init_channel_set(struct adapter *padapter, u8 ChannelPlan, struct rt_c
if (is_supported_24g(padapter->registrypriv.wireless_mode)) {
b2_4GBand = true;
- if (RT_CHANNEL_DOMAIN_REALTEK_DEFINE == ChannelPlan)
+ if (ChannelPlan == RT_CHANNEL_DOMAIN_REALTEK_DEFINE)
Index2G = RTW_CHANNEL_PLAN_MAP_REALTEK_DEFINE.Index2G;
else
Index2G = RTW_ChannelPlanMap[ChannelPlan].Index2G;
@@ -355,14 +353,14 @@ static u8 init_channel_set(struct adapter *padapter, u8 ChannelPlan, struct rt_c
for (index = 0; index < RTW_ChannelPlan2G[Index2G].Len; index++) {
channel_set[chanset_size].ChannelNum = RTW_ChannelPlan2G[Index2G].Channel[index];
- if ((RT_CHANNEL_DOMAIN_GLOBAL_DOAMIN == ChannelPlan) ||/* Channel 1~11 is active, and 12~14 is passive */
- (RT_CHANNEL_DOMAIN_GLOBAL_NULL == ChannelPlan)) {
+ if ((ChannelPlan == RT_CHANNEL_DOMAIN_GLOBAL_DOAMIN) ||/* Channel 1~11 is active, and 12~14 is passive */
+ (ChannelPlan == RT_CHANNEL_DOMAIN_GLOBAL_NULL)) {
if (channel_set[chanset_size].ChannelNum >= 1 && channel_set[chanset_size].ChannelNum <= 11)
channel_set[chanset_size].ScanType = SCAN_ACTIVE;
else if ((channel_set[chanset_size].ChannelNum >= 12 && channel_set[chanset_size].ChannelNum <= 14))
channel_set[chanset_size].ScanType = SCAN_PASSIVE;
- } else if (RT_CHANNEL_DOMAIN_WORLD_WIDE_13 == ChannelPlan ||
- RT_CHANNEL_DOMAIN_2G_WORLD == Index2G) { /* channel 12~13, passive scan */
+ } else if (ChannelPlan == RT_CHANNEL_DOMAIN_WORLD_WIDE_13 ||
+ Index2G == RT_CHANNEL_DOMAIN_2G_WORLD) { /* channel 12~13, passive scan */
if (channel_set[chanset_size].ChannelNum <= 11)
channel_set[chanset_size].ScanType = SCAN_ACTIVE;
else
@@ -649,9 +647,8 @@ unsigned int OnBeacon(struct adapter *padapter, union recv_frame *precv_frame)
if (psta) {
/* update WMM, ERP in the beacon */
/* todo: the timer is used instead of the number of the beacon received */
- if ((sta_rx_pkts(psta) & 0xf) == 0) {
+ if ((sta_rx_pkts(psta) & 0xf) == 0)
update_beacon_info(padapter, pframe, len, psta);
- }
} else {
/* allocate a new CAM entry for IBSS station */
cam_idx = allocate_fw_sta_entry(padapter);
@@ -911,16 +908,14 @@ unsigned int OnAuthClient(struct adapter *padapter, union recv_frame *precv_fram
set_link_timer(pmlmeext, REAUTH_TO);
return _SUCCESS;
- } else {
- /* open system */
- go2asoc = 1;
}
+ /* open system */
+ go2asoc = 1;
} else if (seq == 4) {
- if (pmlmeinfo->auth_algo == dot11AuthAlgrthm_Shared) {
+ if (pmlmeinfo->auth_algo == dot11AuthAlgrthm_Shared)
go2asoc = 1;
- } else {
+ else
goto authclnt_fail;
- }
} else {
/* this is also illegal */
goto authclnt_fail;
@@ -1331,7 +1326,7 @@ unsigned int OnAssocReq(struct adapter *padapter, union recv_frame *precv_frame)
spin_unlock_bh(&pstapriv->asoc_list_lock);
/* now the station is qualified to join our BSS... */
- if (pstat && (pstat->state & WIFI_FW_ASSOC_SUCCESS) && (WLAN_STATUS_SUCCESS == status)) {
+ if (pstat && (pstat->state & WIFI_FW_ASSOC_SUCCESS) && (status == WLAN_STATUS_SUCCESS)) {
/* 1 bss_cap_update & sta_info_update */
bss_cap_update_on_sta_join(padapter, pstat);
sta_info_update(padapter, pstat);
@@ -1455,11 +1450,10 @@ unsigned int OnAssocRsp(struct adapter *padapter, union recv_frame *precv_frame)
UpdateBrateTbl(padapter, pmlmeinfo->network.supported_rates);
report_assoc_result:
- if (res > 0) {
+ if (res > 0)
rtw_buf_update(&pmlmepriv->assoc_rsp, &pmlmepriv->assoc_rsp_len, pframe, pkt_len);
- } else {
+ else
rtw_buf_free(&pmlmepriv->assoc_rsp, &pmlmepriv->assoc_rsp_len);
- }
report_join_res(padapter, res);
@@ -1473,6 +1467,7 @@ unsigned int OnDeAuth(struct adapter *padapter, union recv_frame *precv_frame)
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
u8 *pframe = precv_frame->u.hdr.rx_data;
+ int ignore_received_deauth = 0;
/* check A3 */
if (memcmp(GetAddr3Ptr(pframe), get_my_bssid(&pmlmeinfo->network), ETH_ALEN))
@@ -1508,36 +1503,33 @@ unsigned int OnDeAuth(struct adapter *padapter, union recv_frame *precv_frame)
return _SUCCESS;
- } else {
- int ignore_received_deauth = 0;
-
- /* Commented by Albert 20130604 */
- /* Before sending the auth frame to start the STA/GC mode connection with AP/GO, */
- /* we will send the deauth first. */
- /* However, the Win8.1 with BRCM Wi-Fi will send the deauth with reason code 6 to us after receieving our deauth. */
- /* Added the following code to avoid this case. */
- if ((pmlmeinfo->state & WIFI_FW_AUTH_STATE) ||
- (pmlmeinfo->state & WIFI_FW_ASSOC_STATE)) {
- if (reason == WLAN_REASON_CLASS2_FRAME_FROM_NONAUTH_STA) {
- ignore_received_deauth = 1;
- } else if (WLAN_REASON_PREV_AUTH_NOT_VALID == reason) {
- /* TODO: 802.11r */
- ignore_received_deauth = 1;
- }
- }
-
- netdev_dbg(padapter->pnetdev,
- "sta recv deauth reason code(%d) sta:%pM, ignore = %d\n",
- reason, GetAddr3Ptr(pframe),
- ignore_received_deauth);
+ }
- if (0 == ignore_received_deauth) {
- receive_disconnect(padapter, GetAddr3Ptr(pframe), reason);
+ /* Commented by Albert 20130604 */
+ /* Before sending the auth frame to start the STA/GC mode connection with AP/GO, */
+ /* we will send the deauth first. */
+ /* However, the Win8.1 with BRCM Wi-Fi will send the deauth with reason code 6 to us after receieving our deauth. */
+ /* Added the following code to avoid this case. */
+ if ((pmlmeinfo->state & WIFI_FW_AUTH_STATE) ||
+ (pmlmeinfo->state & WIFI_FW_ASSOC_STATE)) {
+ if (reason == WLAN_REASON_CLASS2_FRAME_FROM_NONAUTH_STA) {
+ ignore_received_deauth = 1;
+ } else if (reason == WLAN_REASON_PREV_AUTH_NOT_VALID) {
+ /* TODO: 802.11r */
+ ignore_received_deauth = 1;
}
}
+
+ netdev_dbg(padapter->pnetdev,
+ "sta recv deauth reason code(%d) sta:%pM, ignore = %d\n",
+ reason, GetAddr3Ptr(pframe),
+ ignore_received_deauth);
+
+ if (ignore_received_deauth == 0)
+ receive_disconnect(padapter, GetAddr3Ptr(pframe), reason);
+
pmlmepriv->LinkDetectInfo.bBusyTraffic = false;
return _SUCCESS;
-
}
unsigned int OnDisassoc(struct adapter *padapter, union recv_frame *precv_frame)
@@ -1581,13 +1573,13 @@ unsigned int OnDisassoc(struct adapter *padapter, union recv_frame *precv_frame)
}
return _SUCCESS;
- } else {
- netdev_dbg(padapter->pnetdev,
- "sta recv disassoc reason code(%d) sta:%pM\n",
- reason, GetAddr3Ptr(pframe));
-
- receive_disconnect(padapter, GetAddr3Ptr(pframe), reason);
}
+ netdev_dbg(padapter->pnetdev,
+ "sta recv disassoc reason code(%d) sta:%pM\n",
+ reason, GetAddr3Ptr(pframe));
+
+ receive_disconnect(padapter, GetAddr3Ptr(pframe), reason);
+
pmlmepriv->LinkDetectInfo.bBusyTraffic = false;
return _SUCCESS;
@@ -1674,11 +1666,10 @@ unsigned int OnAction_back(struct adapter *padapter, union recv_frame *precv_fra
/* process_addba_req(padapter, (u8 *)&(pmlmeinfo->ADDBA_req), GetAddr3Ptr(pframe)); */
process_addba_req(padapter, (u8 *)&(pmlmeinfo->ADDBA_req), addr);
- if (pmlmeinfo->accept_addba_req) {
+ if (pmlmeinfo->accept_addba_req)
issue_action_BA(padapter, addr, WLAN_ACTION_ADDBA_RESP, 0);
- } else {
+ else
issue_action_BA(padapter, addr, WLAN_ACTION_ADDBA_RESP, 37);/* reject ADDBA Req */
- }
break;
@@ -1774,9 +1765,8 @@ static unsigned int on_action_public_vendor(union recv_frame *precv_frame)
u8 *pframe = precv_frame->u.hdr.rx_data;
u8 *frame_body = pframe + sizeof(struct ieee80211_hdr_3addr);
- if (!memcmp(frame_body + 2, P2P_OUI, 4)) {
+ if (!memcmp(frame_body + 2, P2P_OUI, 4))
ret = on_action_public_p2p(precv_frame);
- }
return ret;
}
@@ -2187,9 +2177,8 @@ void issue_beacon(struct adapter *padapter, int timeout_ms)
wps_ie = rtw_get_wps_ie(pmgntframe->buf_addr+TXDESC_OFFSET+sizeof(struct ieee80211_hdr_3addr)+_BEACON_IE_OFFSET_,
pattrib->pktlen-sizeof(struct ieee80211_hdr_3addr)-_BEACON_IE_OFFSET_, NULL, &wps_ielen);
- if (wps_ie && wps_ielen > 0) {
+ if (wps_ie && wps_ielen > 0)
rtw_get_wps_attr_content(wps_ie, wps_ielen, WPS_ATTR_SELECTED_REGISTRAR, (u8 *)(&sr), NULL);
- }
if (sr != 0)
set_fwstate(pmlmepriv, WIFI_UNDER_WPS);
else
@@ -2245,9 +2234,8 @@ void issue_beacon(struct adapter *padapter, int timeout_ms)
/* EXTERNDED SUPPORTED RATE */
- if (rate_len > 8) {
+ if (rate_len > 8)
pframe = rtw_set_ie(pframe, WLAN_EID_EXT_SUPP_RATES, (rate_len - 8), (cur_network->supported_rates + 8), &pattrib->pktlen);
- }
/* todo:HT for adhoc */
@@ -2400,7 +2388,7 @@ void issue_probersp(struct adapter *padapter, unsigned char *da, u8 is_valid_p2p
pframe += ssid_ielen_diff;
pattrib->pktlen += ssid_ielen_diff;
}
- kfree (buf);
+ kfree(buf);
}
} else {
/* timestamp will be inserted by hardware */
@@ -2447,9 +2435,8 @@ void issue_probersp(struct adapter *padapter, unsigned char *da, u8 is_valid_p2p
/* EXTERNDED SUPPORTED RATE */
- if (rate_len > 8) {
+ if (rate_len > 8)
pframe = rtw_set_ie(pframe, WLAN_EID_EXT_SUPP_RATES, (rate_len - 8), (cur_network->supported_rates + 8), &pattrib->pktlen);
- }
/* todo:HT for adhoc */
@@ -2674,9 +2661,8 @@ void issue_auth(struct adapter *padapter, struct sta_info *psta, unsigned short
/* setting auth algo number */
val16 = (pmlmeinfo->auth_algo == dot11AuthAlgrthm_Shared) ? 1 : 0;/* 0:OPEN System, 1:Shared key */
- if (val16) {
+ if (val16)
use_shared_key = 1;
- }
le_tmp = cpu_to_le16(val16);
/* setting IV for auth seq #3 */
@@ -2831,16 +2817,14 @@ void issue_asocrsp(struct adapter *padapter, unsigned short status, struct sta_i
break;
}
- if (!pbuf || ie_len == 0) {
+ if (!pbuf || ie_len == 0)
break;
- }
}
}
- if (pmlmeinfo->assoc_AP_vendor == HT_IOT_PEER_REALTEK) {
+ if (pmlmeinfo->assoc_AP_vendor == HT_IOT_PEER_REALTEK)
pframe = rtw_set_ie(pframe, WLAN_EID_VENDOR_SPECIFIC, 6, REALTEK_96B_IE, &(pattrib->pktlen));
- }
/* add WPS IE ie for wps 2.0 */
if (pmlmepriv->wps_assoc_resp_ie && pmlmepriv->wps_assoc_resp_ie_len > 0) {
@@ -3301,9 +3285,8 @@ static int _issue_deauth(struct adapter *padapter, unsigned char *da,
__le16 le_tmp;
pmgntframe = alloc_mgtxmitframe(pxmitpriv);
- if (!pmgntframe) {
+ if (!pmgntframe)
goto exit;
- }
/* update attribute */
pattrib = &pmgntframe->attrib;
@@ -3552,13 +3535,13 @@ void issue_action_BA(struct adapter *padapter, unsigned char *raddr, unsigned ch
rtw_hal_get_def_var(padapter,
HW_VAR_MAX_RX_AMPDU_FACTOR, &max_rx_ampdu_factor);
- if (IEEE80211_HT_MAX_AMPDU_64K == max_rx_ampdu_factor)
+ if (max_rx_ampdu_factor == IEEE80211_HT_MAX_AMPDU_64K)
BA_para_set = ((le16_to_cpu(pmlmeinfo->ADDBA_req.BA_para_set) & 0x3f) | 0x1000); /* 64 buffer size */
- else if (IEEE80211_HT_MAX_AMPDU_32K == max_rx_ampdu_factor)
+ else if (max_rx_ampdu_factor == IEEE80211_HT_MAX_AMPDU_32K)
BA_para_set = ((le16_to_cpu(pmlmeinfo->ADDBA_req.BA_para_set) & 0x3f) | 0x0800); /* 32 buffer size */
- else if (IEEE80211_HT_MAX_AMPDU_16K == max_rx_ampdu_factor)
+ else if (max_rx_ampdu_factor == IEEE80211_HT_MAX_AMPDU_16K)
BA_para_set = ((le16_to_cpu(pmlmeinfo->ADDBA_req.BA_para_set) & 0x3f) | 0x0400); /* 16 buffer size */
- else if (IEEE80211_HT_MAX_AMPDU_8K == max_rx_ampdu_factor)
+ else if (max_rx_ampdu_factor == IEEE80211_HT_MAX_AMPDU_8K)
BA_para_set = ((le16_to_cpu(pmlmeinfo->ADDBA_req.BA_para_set) & 0x3f) | 0x0200); /* 8 buffer size */
else
BA_para_set = ((le16_to_cpu(pmlmeinfo->ADDBA_req.BA_para_set) & 0x3f) | 0x1000); /* 64 buffer size */
@@ -3627,9 +3610,8 @@ static void issue_action_BSSCoexistPacket(struct adapter *padapter)
action = ACT_PUBLIC_BSSCOEXIST;
pmgntframe = alloc_mgtxmitframe(pxmitpriv);
- if (!pmgntframe) {
+ if (!pmgntframe)
return;
- }
/* update attribute */
pattrib = &pmgntframe->attrib;
@@ -3802,10 +3784,8 @@ unsigned int send_beacon(struct adapter *padapter)
} while (false == bxmitok && issue < 100 && !padapter->bSurpriseRemoved && !padapter->bDriverStopped);
- if (padapter->bSurpriseRemoved || padapter->bDriverStopped) {
+ if (padapter->bSurpriseRemoved || padapter->bDriverStopped)
return _FAIL;
- }
-
if (!bxmitok)
return _FAIL;
@@ -4388,9 +4368,8 @@ static void process_80211d(struct adapter *padapter, struct wlan_bssid_ex *bssid
}
/* skip AP 2.4G channel plan */
- while ((j < chplan_ap.Len) && (chplan_ap.Channel[j] <= 14)) {
+ while ((j < chplan_ap.Len) && (chplan_ap.Channel[j] <= 14))
j++;
- }
}
pmlmeext->update_channel_plan_by_ap_done = 1;
@@ -4402,9 +4381,8 @@ static void process_80211d(struct adapter *padapter, struct wlan_bssid_ex *bssid
i = 0;
while ((i < MAX_CHANNEL_NUM) && (chplan_new[i].ChannelNum != 0)) {
if (chplan_new[i].ChannelNum == channel) {
- if (chplan_new[i].ScanType == SCAN_PASSIVE) {
+ if (chplan_new[i].ScanType == SCAN_PASSIVE)
chplan_new[i].ScanType = SCAN_ACTIVE;
- }
break;
}
i++;
@@ -4629,9 +4607,8 @@ void report_del_sta_event(struct adapter *padapter, unsigned char *MacAddr, unsi
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
pcmd_obj = rtw_zmalloc(sizeof(struct cmd_obj));
- if (!pcmd_obj) {
+ if (!pcmd_obj)
return;
- }
cmdsz = (sizeof(struct stadel_event) + sizeof(struct C2HEvent_Header));
pevtcmd = rtw_zmalloc(cmdsz);
@@ -5086,7 +5063,7 @@ void linked_status_chk(struct adapter *padapter)
if (pmlmeinfo->FW_sta_info[i].status == 1) {
psta = pmlmeinfo->FW_sta_info[i].psta;
- if (NULL == psta)
+ if (psta == NULL)
continue;
if (pmlmeinfo->FW_sta_info[i].rx_pkt == sta_rx_pkts(psta)) {
@@ -5124,9 +5101,8 @@ void survey_timer_hdl(struct timer_list *t)
/* issue rtw_sitesurvey_cmd */
if (pmlmeext->sitesurvey_res.state > SCAN_START) {
- if (pmlmeext->sitesurvey_res.state == SCAN_PROCESS) {
+ if (pmlmeext->sitesurvey_res.state == SCAN_PROCESS)
pmlmeext->sitesurvey_res.channel_idx++;
- }
if (pmlmeext->scan_abort) {
pmlmeext->sitesurvey_res.channel_idx = pmlmeext->sitesurvey_res.ch_num;
@@ -5135,24 +5111,18 @@ void survey_timer_hdl(struct timer_list *t)
}
ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
- if (!ph2c) {
- goto exit_survey_timer_hdl;
- }
+ if (!ph2c)
+ return;
psurveyPara = rtw_zmalloc(sizeof(struct sitesurvey_parm));
if (!psurveyPara) {
kfree(ph2c);
- goto exit_survey_timer_hdl;
+ return;
}
init_h2fwcmd_w_parm_no_rsp(ph2c, psurveyPara, GEN_CMD_CODE(_SiteSurvey));
rtw_enqueue_cmd(pcmdpriv, ph2c);
}
-
-
-exit_survey_timer_hdl:
-
- return;
}
void link_timer_hdl(struct timer_list *t)
@@ -5173,17 +5143,9 @@ void link_timer_hdl(struct timer_list *t)
} else if (pmlmeinfo->state & WIFI_FW_AUTH_STATE) {
/* re-auth timer */
if (++pmlmeinfo->reauth_count > REAUTH_LIMIT) {
- /* if (pmlmeinfo->auth_algo != dot11AuthAlgrthm_Auto) */
- /* */
- pmlmeinfo->state = 0;
- report_join_res(padapter, -1);
- return;
- /* */
- /* else */
- /* */
- /* pmlmeinfo->auth_algo = dot11AuthAlgrthm_Shared; */
- /* pmlmeinfo->reauth_count = 0; */
- /* */
+ pmlmeinfo->state = 0;
+ report_join_res(padapter, -1);
+ return;
}
pmlmeinfo->auth_seq = 1;
@@ -5348,9 +5310,8 @@ u8 join_cmd_hdl(struct adapter *padapter, u8 *pbuf)
/* check already connecting to AP or not */
if (pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS) {
- if (pmlmeinfo->state & WIFI_FW_STATION_STATE) {
+ if (pmlmeinfo->state & WIFI_FW_STATION_STATE)
issue_deauth_ex(padapter, pnetwork->mac_address, WLAN_REASON_DEAUTH_LEAVING, 1, 100);
- }
pmlmeinfo->state = WIFI_FW_NULL_STATE;
/* clear CAM */
@@ -5485,9 +5446,8 @@ u8 disconnect_hdl(struct adapter *padapter, unsigned char *pbuf)
struct wlan_bssid_ex *pnetwork = (struct wlan_bssid_ex *)(&(pmlmeinfo->network));
u8 val8;
- if (is_client_associated_to_ap(padapter)) {
+ if (is_client_associated_to_ap(padapter))
issue_deauth_ex(padapter, pnetwork->mac_address, WLAN_REASON_DEAUTH_LEAVING, param->deauth_timeout_ms/100, 100);
- }
if (((pmlmeinfo->state&0x03) == WIFI_FW_ADHOC_STATE) || ((pmlmeinfo->state&0x03) == WIFI_FW_AP_STATE)) {
/* Stop BCN */
@@ -6073,7 +6033,7 @@ u8 run_in_thread_hdl(struct adapter *padapter, u8 *pbuf)
struct RunInThread_param *p;
- if (NULL == pbuf)
+ if (pbuf == NULL)
return H2C_PARAMETERS_ERROR;
p = (struct RunInThread_param *)pbuf;
diff --git a/drivers/staging/rtl8723bs/core/rtw_rf.c b/drivers/staging/rtl8723bs/core/rtw_rf.c
index 96eb8ca38003..4f120c894998 100644
--- a/drivers/staging/rtl8723bs/core/rtw_rf.c
+++ b/drivers/staging/rtl8723bs/core/rtw_rf.c
@@ -8,47 +8,27 @@
#include <drv_types.h>
#include <linux/kernel.h>
-
-struct ch_freq {
- u32 channel;
- u32 frequency;
-};
-
-static struct ch_freq ch_freq_map[] = {
- {1, 2412}, {2, 2417}, {3, 2422}, {4, 2427}, {5, 2432},
- {6, 2437}, {7, 2442}, {8, 2447}, {9, 2452}, {10, 2457},
- {11, 2462}, {12, 2467}, {13, 2472}, {14, 2484},
- /* UNII */
- {36, 5180}, {40, 5200}, {44, 5220}, {48, 5240}, {52, 5260},
- {56, 5280}, {60, 5300}, {64, 5320}, {149, 5745}, {153, 5765},
- {157, 5785}, {161, 5805}, {165, 5825}, {167, 5835}, {169, 5845},
- {171, 5855}, {173, 5865},
- /* HiperLAN2 */
- {100, 5500}, {104, 5520}, {108, 5540}, {112, 5560}, {116, 5580},
- {120, 5600}, {124, 5620}, {128, 5640}, {132, 5660}, {136, 5680},
- {140, 5700},
- /* Japan MMAC */
- {34, 5170}, {38, 5190}, {42, 5210}, {46, 5230},
- /* Japan */
- {184, 4920}, {188, 4940}, {192, 4960}, {196, 4980},
- {208, 5040},/* Japan, means J08 */
- {212, 5060},/* Japan, means J12 */
- {216, 5080},/* Japan, means J16 */
+static const u32 ch_freq_map[] = {
+ 2412,
+ 2417,
+ 2422,
+ 2427,
+ 2432,
+ 2437,
+ 2442,
+ 2447,
+ 2452,
+ 2457,
+ 2462,
+ 2467,
+ 2472,
+ 2484
};
u32 rtw_ch2freq(u32 channel)
{
- u8 i;
- u32 freq = 0;
-
- for (i = 0; i < ARRAY_SIZE(ch_freq_map); i++) {
- if (channel == ch_freq_map[i].channel) {
- freq = ch_freq_map[i].frequency;
- break;
- }
- }
- if (i == ARRAY_SIZE(ch_freq_map))
- freq = 2412;
+ if (channel == 0 || channel > ARRAY_SIZE(ch_freq_map))
+ return 2412;
- return freq;
+ return ch_freq_map[channel - 1];
}
diff --git a/drivers/staging/rtl8723bs/hal/HalBtcOutSrc.h b/drivers/staging/rtl8723bs/hal/HalBtcOutSrc.h
index af50674b2a65..9091f2f75fe1 100644
--- a/drivers/staging/rtl8723bs/hal/HalBtcOutSrc.h
+++ b/drivers/staging/rtl8723bs/hal/HalBtcOutSrc.h
@@ -68,16 +68,6 @@ enum btc_chip_interface {
BTC_INTF_MAX
};
-enum {
- BTC_CHIP_UNDEF = 0,
- BTC_CHIP_CSR_BC4 = 1,
- BTC_CHIP_CSR_BC8 = 2,
- BTC_CHIP_RTL8723A = 3,
- BTC_CHIP_RTL8821 = 4,
- BTC_CHIP_RTL8723B = 5,
- BTC_CHIP_MAX
-};
-
/* following is for wifi link status */
#define WIFI_STA_CONNECTED BIT0
#define WIFI_AP_CONNECTED BIT1
@@ -87,7 +77,6 @@ enum {
struct btc_board_info {
/* The following is some board information */
- u8 btChipType;
u8 pgAntNum; /* pg ant number */
u8 btdmAntNum; /* ant number for btdm */
u8 btdmAntPos; /* Bryant Add to indicate Antenna Position for (pgAntNum = 2) && (btdmAntNum = 1) (DPDT+1Ant case) */
diff --git a/drivers/staging/rtl8723bs/hal/hal_btcoex.c b/drivers/staging/rtl8723bs/hal/hal_btcoex.c
index f4b3e8b28712..9acd49323c7c 100644
--- a/drivers/staging/rtl8723bs/hal/hal_btcoex.c
+++ b/drivers/staging/rtl8723bs/hal/hal_btcoex.c
@@ -1113,11 +1113,6 @@ void EXhalbtcoutsrc_Periodical(struct btc_coexist *pBtCoexist)
/* halbtcoutsrc_NormalLowPower(pBtCoexist); */
}
-void EXhalbtcoutsrc_SetChipType(u8 chipType)
-{
- GLBtCoexist.boardInfo.btChipType = BTC_CHIP_RTL8723B;
-}
-
void EXhalbtcoutsrc_SetAntNum(u8 type, u8 antNum)
{
if (BT_COEX_ANT_TYPE_PG == type) {
@@ -1188,9 +1183,6 @@ void hal_btcoex_SetChipType(struct adapter *padapter, u8 chipType)
pHalData = GET_HAL_DATA(padapter);
- pHalData->bt_coexist.btChipType = chipType;
-
- EXhalbtcoutsrc_SetChipType(chipType);
}
void hal_btcoex_SetPgAntNum(struct adapter *padapter, u8 antNum)
diff --git a/drivers/staging/rtl8723bs/hal/sdio_ops.c b/drivers/staging/rtl8723bs/hal/sdio_ops.c
index a545832a468e..107f427ee4aa 100644
--- a/drivers/staging/rtl8723bs/hal/sdio_ops.c
+++ b/drivers/staging/rtl8723bs/hal/sdio_ops.c
@@ -811,17 +811,14 @@ static struct recv_buf *sd_recv_rxfifo(struct adapter *adapter, u32 size)
SIZE_PTR alignment = 0;
recvbuf->pskb = rtw_skb_alloc(MAX_RECVBUF_SZ + RECVBUFF_ALIGN_SZ);
-
- if (recvbuf->pskb) {
- recvbuf->pskb->dev = adapter->pnetdev;
-
- tmpaddr = (SIZE_PTR)recvbuf->pskb->data;
- alignment = tmpaddr & (RECVBUFF_ALIGN_SZ - 1);
- skb_reserve(recvbuf->pskb, (RECVBUFF_ALIGN_SZ - alignment));
- }
-
if (!recvbuf->pskb)
return NULL;
+
+ recvbuf->pskb->dev = adapter->pnetdev;
+
+ tmpaddr = (SIZE_PTR)recvbuf->pskb->data;
+ alignment = tmpaddr & (RECVBUFF_ALIGN_SZ - 1);
+ skb_reserve(recvbuf->pskb, (RECVBUFF_ALIGN_SZ - alignment));
}
/* 3 3. read data from rxfifo */
diff --git a/drivers/staging/rtl8723bs/include/HalVerDef.h b/drivers/staging/rtl8723bs/include/HalVerDef.h
index 8f654a49fb9d..d0ce21ccc1cc 100644
--- a/drivers/staging/rtl8723bs/include/HalVerDef.h
+++ b/drivers/staging/rtl8723bs/include/HalVerDef.h
@@ -9,16 +9,7 @@
/* hal_ic_type_e */
enum hal_ic_type_e { /* tag_HAL_IC_Type_Definition */
- CHIP_8192S = 0,
- CHIP_8188C = 1,
- CHIP_8192C = 2,
- CHIP_8192D = 3,
- CHIP_8723A = 4,
- CHIP_8188E = 5,
- CHIP_8812 = 6,
- CHIP_8821 = 7,
CHIP_8723B = 8,
- CHIP_8192E = 9,
};
/* hal_chip_type_e */
@@ -58,7 +49,6 @@ struct hal_version { /* tag_HAL_VERSION */
u8 ROMVer;
};
-/* VERSION_8192C VersionID; */
/* hal_version VersionID; */
/* Get element */
diff --git a/drivers/staging/rtl8723bs/include/drv_types.h b/drivers/staging/rtl8723bs/include/drv_types.h
index 0ce08c2a0755..0bbbdebdf157 100644
--- a/drivers/staging/rtl8723bs/include/drv_types.h
+++ b/drivers/staging/rtl8723bs/include/drv_types.h
@@ -42,7 +42,6 @@
#include <rtw_mlme.h>
#include <mlme_osdep.h>
#include <rtw_io.h>
-#include <rtw_ioctl.h>
#include <rtw_ioctl_set.h>
#include <osdep_intf.h>
#include <rtw_eeprom.h>
diff --git a/drivers/staging/rtl8723bs/include/hal_com_reg.h b/drivers/staging/rtl8723bs/include/hal_com_reg.h
index 8213dcf48b34..d8d03752dc2e 100644
--- a/drivers/staging/rtl8723bs/include/hal_com_reg.h
+++ b/drivers/staging/rtl8723bs/include/hal_com_reg.h
@@ -72,13 +72,9 @@
#define REG_MULTI_FUNC_CTRL 0x0068 /* RTL8723 WIFI/BT/GPS Multi-Function control source. */
#define REG_GSSR 0x006c
#define REG_AFE_XTAL_CTRL_EXT 0x0078 /* RTL8188E */
-#define REG_XCK_OUT_CTRL 0x007c /* RTL8188E */
#define REG_MCUFWDL 0x0080
-#define REG_WOL_EVENT 0x0081 /* RTL8188E */
#define REG_MCUTSTCFG 0x0084
#define REG_FDHM0 0x0088
-#define REG_HOST_SUSP_CNT 0x00BC /* RTL8192C Host suspend counter on FPGA platform */
-#define REG_SYSTEM_ON_CTRL 0x00CC /* For 8723AE Reset after S3 */
#define REG_EFUSE_ACCESS 0x00CF /* Efuse access protection for RTL8723 */
#define REG_BIST_SCAN 0x00D0
#define REG_BIST_RPT 0x00D4
@@ -117,7 +113,6 @@
#define REG_FWIMR 0x0130
#define REG_FWISR 0x0134
#define REG_FTIMR 0x0138
-#define REG_FTISR 0x013C /* RTL8192C */
#define REG_PKTBUF_DBG_CTRL 0x0140
#define REG_RXPKTBUF_CTRL (REG_PKTBUF_DBG_CTRL+2)
#define REG_PKTBUF_DBG_DATA_L 0x0144
@@ -132,11 +127,9 @@
#define REG_MBIST_START 0x0174
#define REG_MBIST_DONE 0x0178
#define REG_MBIST_FAIL 0x017C
-#define REG_32K_CTRL 0x0194 /* RTL8188E */
#define REG_C2HEVT_MSG_NORMAL 0x01A0
#define REG_C2HEVT_CLEAR 0x01AF
#define REG_MCUTST_1 0x01c0
-#define REG_MCUTST_WOWLAN 0x01C7 /* Defined after 8188E series. */
#define REG_FMETHR 0x01C8
#define REG_HMETFR 0x01CC
#define REG_HMEBOX_0 0x01D0
@@ -526,44 +519,6 @@
#define MAX_MSS_DENSITY_1T 0x0A
/* */
-/* 8192C Cmd9346CR bits (Offset 0xA, 16bit) */
-/* */
-#define CmdEEPROM_En BIT5 /* EEPROM enable when set 1 */
-#define CmdEERPOMSEL BIT4 /* System EEPROM select, 0: boot from E-FUSE, 1: The EEPROM used is 9346 */
-#define Cmd9346CR_9356SEL BIT4
-
-/* */
-/* 8192C GPIO MUX Configuration Register (offset 0x40, 4 byte) */
-/* */
-#define GPIOSEL_GPIO 0
-#define GPIOSEL_ENBT BIT5
-
-/* */
-/* 8192C GPIO PIN Control Register (offset 0x44, 4 byte) */
-/* */
-#define GPIO_IN REG_GPIO_PIN_CTRL /* GPIO pins input value */
-#define GPIO_OUT (REG_GPIO_PIN_CTRL+1) /* GPIO pins output value */
-#define GPIO_IO_SEL (REG_GPIO_PIN_CTRL+2) /* GPIO pins output enable when a bit is set to "1"; otherwise, input is configured. */
-#define GPIO_MOD (REG_GPIO_PIN_CTRL+3)
-
-/* */
-/* 8811A GPIO PIN Control Register (offset 0x60, 4 byte) */
-/* */
-#define GPIO_IN_8811A REG_GPIO_PIN_CTRL_2 /* GPIO pins input value */
-#define GPIO_OUT_8811A (REG_GPIO_PIN_CTRL_2+1) /* GPIO pins output value */
-#define GPIO_IO_SEL_8811A (REG_GPIO_PIN_CTRL_2+2) /* GPIO pins output enable when a bit is set to "1"; otherwise, input is configured. */
-#define GPIO_MOD_8811A (REG_GPIO_PIN_CTRL_2+3)
-
-/* */
-/* 8723/8188E Host System Interrupt Mask Register (offset 0x58, 32 byte) */
-/* */
-#define HSIMR_GPIO12_0_INT_EN BIT0
-#define HSIMR_SPS_OCP_INT_EN BIT5
-#define HSIMR_RON_INT_EN BIT6
-#define HSIMR_PDN_INT_EN BIT7
-#define HSIMR_GPIO9_INT_EN BIT25
-
-/* */
/* 8723/8188E Host System Interrupt Status Register (offset 0x5C, 32 byte) */
/* */
#define HSISR_GPIO12_0_INT BIT0
@@ -573,22 +528,6 @@
#define HSISR_GPIO9_INT BIT25
/* */
-/* 8192C (MSR) Media Status Register (Offset 0x4C, 8 bits) */
-/* */
-/*
-Network Type
-00: No link
-01: Link in ad hoc network
-10: Link in infrastructure network
-11: AP mode
-Default: 00b.
-*/
-#define MSR_NOLINK 0x00
-#define MSR_ADHOC 0x01
-#define MSR_INFRA 0x02
-#define MSR_AP 0x03
-
-/* */
/* USB INTR CONTENT */
/* */
#define USB_C2H_CMDID_OFFSET 0
@@ -787,206 +726,6 @@ Default: 00b.
#define IMR_WLANOFF BIT0
/* */
-/* 8723E series PCIE Host IMR/ISR bit */
-/* */
-/* IMR DW0 Bit 0-31 */
-#define PHIMR_TIMEOUT2 BIT31
-#define PHIMR_TIMEOUT1 BIT30
-#define PHIMR_PSTIMEOUT BIT29
-#define PHIMR_GTINT4 BIT28
-#define PHIMR_GTINT3 BIT27
-#define PHIMR_TXBCNERR BIT26
-#define PHIMR_TXBCNOK BIT25
-#define PHIMR_TSF_BIT32_TOGGLE BIT24
-#define PHIMR_BCNDMAINT3 BIT23
-#define PHIMR_BCNDMAINT2 BIT22
-#define PHIMR_BCNDMAINT1 BIT21
-#define PHIMR_BCNDMAINT0 BIT20
-#define PHIMR_BCNDOK3 BIT19
-#define PHIMR_BCNDOK2 BIT18
-#define PHIMR_BCNDOK1 BIT17
-#define PHIMR_BCNDOK0 BIT16
-#define PHIMR_HSISR_IND_ON BIT15
-#define PHIMR_BCNDMAINT_E BIT14
-#define PHIMR_ATIMEND_E BIT13
-#define PHIMR_ATIM_CTW_END BIT12
-#define PHIMR_HISRE_IND BIT11 /* RO. HISRE Indicator (HISRE & HIMRE is true, this bit is set to 1) */
-#define PHIMR_C2HCMD BIT10
-#define PHIMR_CPWM2 BIT9
-#define PHIMR_CPWM BIT8
-#define PHIMR_HIGHDOK BIT7 /* High Queue DMA OK Interrupt */
-#define PHIMR_MGNTDOK BIT6 /* Management Queue DMA OK Interrupt */
-#define PHIMR_BKDOK BIT5 /* AC_BK DMA OK Interrupt */
-#define PHIMR_BEDOK BIT4 /* AC_BE DMA OK Interrupt */
-#define PHIMR_VIDOK BIT3 /* AC_VI DMA OK Interrupt */
-#define PHIMR_VODOK BIT2 /* AC_VO DMA Interrupt */
-#define PHIMR_RDU BIT1 /* Receive Descriptor Unavailable */
-#define PHIMR_ROK BIT0 /* Receive DMA OK Interrupt */
-
-/* PCIE Host Interrupt Status Extension bit */
-#define PHIMR_BCNDMAINT7 BIT23
-#define PHIMR_BCNDMAINT6 BIT22
-#define PHIMR_BCNDMAINT5 BIT21
-#define PHIMR_BCNDMAINT4 BIT20
-#define PHIMR_BCNDOK7 BIT19
-#define PHIMR_BCNDOK6 BIT18
-#define PHIMR_BCNDOK5 BIT17
-#define PHIMR_BCNDOK4 BIT16
-/* bit12 15: RSVD */
-#define PHIMR_TXERR BIT11
-#define PHIMR_RXERR BIT10
-#define PHIMR_TXFOVW BIT9
-#define PHIMR_RXFOVW BIT8
-/* bit2-7: RSVD */
-#define PHIMR_OCPINT BIT1
-/* bit0: RSVD */
-
-#define UHIMR_TIMEOUT2 BIT31
-#define UHIMR_TIMEOUT1 BIT30
-#define UHIMR_PSTIMEOUT BIT29
-#define UHIMR_GTINT4 BIT28
-#define UHIMR_GTINT3 BIT27
-#define UHIMR_TXBCNERR BIT26
-#define UHIMR_TXBCNOK BIT25
-#define UHIMR_TSF_BIT32_TOGGLE BIT24
-#define UHIMR_BCNDMAINT3 BIT23
-#define UHIMR_BCNDMAINT2 BIT22
-#define UHIMR_BCNDMAINT1 BIT21
-#define UHIMR_BCNDMAINT0 BIT20
-#define UHIMR_BCNDOK3 BIT19
-#define UHIMR_BCNDOK2 BIT18
-#define UHIMR_BCNDOK1 BIT17
-#define UHIMR_BCNDOK0 BIT16
-#define UHIMR_HSISR_IND BIT15
-#define UHIMR_BCNDMAINT_E BIT14
-/* RSVD BIT13 */
-#define UHIMR_CTW_END BIT12
-/* RSVD BIT11 */
-#define UHIMR_C2HCMD BIT10
-#define UHIMR_CPWM2 BIT9
-#define UHIMR_CPWM BIT8
-#define UHIMR_HIGHDOK BIT7 /* High Queue DMA OK Interrupt */
-#define UHIMR_MGNTDOK BIT6 /* Management Queue DMA OK Interrupt */
-#define UHIMR_BKDOK BIT5 /* AC_BK DMA OK Interrupt */
-#define UHIMR_BEDOK BIT4 /* AC_BE DMA OK Interrupt */
-#define UHIMR_VIDOK BIT3 /* AC_VI DMA OK Interrupt */
-#define UHIMR_VODOK BIT2 /* AC_VO DMA Interrupt */
-#define UHIMR_RDU BIT1 /* Receive Descriptor Unavailable */
-#define UHIMR_ROK BIT0 /* Receive DMA OK Interrupt */
-
-/* USB Host Interrupt Status Extension bit */
-#define UHIMR_BCNDMAINT7 BIT23
-#define UHIMR_BCNDMAINT6 BIT22
-#define UHIMR_BCNDMAINT5 BIT21
-#define UHIMR_BCNDMAINT4 BIT20
-#define UHIMR_BCNDOK7 BIT19
-#define UHIMR_BCNDOK6 BIT18
-#define UHIMR_BCNDOK5 BIT17
-#define UHIMR_BCNDOK4 BIT16
-/* bit14-15: RSVD */
-#define UHIMR_ATIMEND_E BIT13
-#define UHIMR_ATIMEND BIT12
-#define UHIMR_TXERR BIT11
-#define UHIMR_RXERR BIT10
-#define UHIMR_TXFOVW BIT9
-#define UHIMR_RXFOVW BIT8
-/* bit2-7: RSVD */
-#define UHIMR_OCPINT BIT1
-/* bit0: RSVD */
-
-
-#define HAL_NIC_UNPLUG_ISR 0xFFFFFFFF /* The value when the NIC is unplugged for PCI. */
-#define HAL_NIC_UNPLUG_PCI_ISR 0xEAEAEAEA /* The value when the NIC is unplugged for PCI in PCI interrupt (page 3). */
-
-/* */
-/* 8188 IMR/ISR bits */
-/* */
-#define IMR_DISABLED_88E 0x0
-/* IMR DW0(0x0060-0063) Bit 0-31 */
-#define IMR_TXCCK_88E BIT30 /* TXRPT interrupt when CCX bit of the packet is set */
-#define IMR_PSTIMEOUT_88E BIT29 /* Power Save Time Out Interrupt */
-#define IMR_GTINT4_88E BIT28 /* When GTIMER4 expires, this bit is set to 1 */
-#define IMR_GTINT3_88E BIT27 /* When GTIMER3 expires, this bit is set to 1 */
-#define IMR_TBDER_88E BIT26 /* Transmit Beacon0 Error */
-#define IMR_TBDOK_88E BIT25 /* Transmit Beacon0 OK */
-#define IMR_TSF_BIT32_TOGGLE_88E BIT24 /* TSF Timer BIT32 toggle indication interrupt */
-#define IMR_BCNDMAINT0_88E BIT20 /* Beacon DMA Interrupt 0 */
-#define IMR_BCNDERR0_88E BIT16 /* Beacon Queue DMA Error 0 */
-#define IMR_HSISR_IND_ON_INT_88E BIT15 /* HSISR Indicator (HSIMR & HSISR is true, this bit is set to 1) */
-#define IMR_BCNDMAINT_E_88E BIT14 /* Beacon DMA Interrupt Extension for Win7 */
-#define IMR_ATIMEND_88E BIT12 /* CTWidnow End or ATIM Window End */
-#define IMR_HISR1_IND_INT_88E BIT11 /* HISR1 Indicator (HISR1 & HIMR1 is true, this bit is set to 1) */
-#define IMR_C2HCMD_88E BIT10 /* CPU to Host Command INT Status, Write 1 clear */
-#define IMR_CPWM2_88E BIT9 /* CPU power Mode exchange INT Status, Write 1 clear */
-#define IMR_CPWM_88E BIT8 /* CPU power Mode exchange INT Status, Write 1 clear */
-#define IMR_HIGHDOK_88E BIT7 /* High Queue DMA OK */
-#define IMR_MGNTDOK_88E BIT6 /* Management Queue DMA OK */
-#define IMR_BKDOK_88E BIT5 /* AC_BK DMA OK */
-#define IMR_BEDOK_88E BIT4 /* AC_BE DMA OK */
-#define IMR_VIDOK_88E BIT3 /* AC_VI DMA OK */
-#define IMR_VODOK_88E BIT2 /* AC_VO DMA OK */
-#define IMR_RDU_88E BIT1 /* Rx Descriptor Unavailable */
-#define IMR_ROK_88E BIT0 /* Receive DMA OK */
-
-/* IMR DW1(0x00B4-00B7) Bit 0-31 */
-#define IMR_BCNDMAINT7_88E BIT27 /* Beacon DMA Interrupt 7 */
-#define IMR_BCNDMAINT6_88E BIT26 /* Beacon DMA Interrupt 6 */
-#define IMR_BCNDMAINT5_88E BIT25 /* Beacon DMA Interrupt 5 */
-#define IMR_BCNDMAINT4_88E BIT24 /* Beacon DMA Interrupt 4 */
-#define IMR_BCNDMAINT3_88E BIT23 /* Beacon DMA Interrupt 3 */
-#define IMR_BCNDMAINT2_88E BIT22 /* Beacon DMA Interrupt 2 */
-#define IMR_BCNDMAINT1_88E BIT21 /* Beacon DMA Interrupt 1 */
-#define IMR_BCNDOK7_88E BIT20 /* Beacon Queue DMA OK Interrupt 7 */
-#define IMR_BCNDOK6_88E BIT19 /* Beacon Queue DMA OK Interrupt 6 */
-#define IMR_BCNDOK5_88E BIT18 /* Beacon Queue DMA OK Interrupt 5 */
-#define IMR_BCNDOK4_88E BIT17 /* Beacon Queue DMA OK Interrupt 4 */
-#define IMR_BCNDOK3_88E BIT16 /* Beacon Queue DMA OK Interrupt 3 */
-#define IMR_BCNDOK2_88E BIT15 /* Beacon Queue DMA OK Interrupt 2 */
-#define IMR_BCNDOK1_88E BIT14 /* Beacon Queue DMA OK Interrupt 1 */
-#define IMR_ATIMEND_E_88E BIT13 /* ATIM Window End Extension for Win7 */
-#define IMR_TXERR_88E BIT11 /* Tx Error Flag Interrupt Status, write 1 clear. */
-#define IMR_RXERR_88E BIT10 /* Rx Error Flag INT Status, Write 1 clear */
-#define IMR_TXFOVW_88E BIT9 /* Transmit FIFO Overflow */
-#define IMR_RXFOVW_88E BIT8 /* Receive FIFO Overflow */
-
-/*===================================================================
-=====================================================================
-Here the register defines are for 92C. When the define is as same with 92C,
-we will use the 92C's define for the consistency
-So the following defines for 92C is not entire!!!!!!
-=====================================================================
-=====================================================================*/
-/*
-Based on Datasheet V33---090401
-Register Summary
-Current IOREG MAP
-0x0000h ~ 0x00FFh System Configuration (256 Bytes)
-0x0100h ~ 0x01FFh MACTOP General Configuration (256 Bytes)
-0x0200h ~ 0x027Fh TXDMA Configuration (128 Bytes)
-0x0280h ~ 0x02FFh RXDMA Configuration (128 Bytes)
-0x0300h ~ 0x03FFh PCIE EMAC Reserved Region (256 Bytes)
-0x0400h ~ 0x04FFh Protocol Configuration (256 Bytes)
-0x0500h ~ 0x05FFh EDCA Configuration (256 Bytes)
-0x0600h ~ 0x07FFh WMAC Configuration (512 Bytes)
-0x2000h ~ 0x3FFFh 8051 FW Download Region (8196 Bytes)
-*/
- /* */
- /* 8192C (TXPAUSE) transmission pause (Offset 0x522, 8 bits) */
- /* */
-/* Note: */
-/* The bits of stopping AC(VO/VI/BE/BK) queue in datasheet RTL8192S/RTL8192C are wrong, */
-/* the correct arrangement is VO - Bit0, VI - Bit1, BE - Bit2, and BK - Bit3. */
-/* 8723 and 88E may be not correct either in the earlier version. Confirmed with DD Tim. */
-/* By Bruce, 2011-09-22. */
-#define StopBecon BIT6
-#define StopHigh BIT5
-#define StopMgt BIT4
-#define StopBK BIT3
-#define StopBE BIT2
-#define StopVI BIT1
-#define StopVO BIT0
-
-/* */
/* 8192C (RCR) Receive Configuration Register (Offset 0x608, 32 bits) */
/* */
#define RCR_APPFCS BIT31 /* WMAC append FCS after pauload */
@@ -1557,10 +1296,6 @@ Current IOREG MAP
#define SDIO_HIMR_ATIMEND_E_MSK BIT26
#define SDIO_HIMR_CTWEND_MSK BIT27
-/* RTL8188E SDIO Specific */
-#define SDIO_HIMR_MCU_ERR_MSK BIT28
-#define SDIO_HIMR_TSF_BIT32_TOGGLE_MSK BIT29
-
/* SDIO Host Interrupt Service Routine */
#define SDIO_HISR_RX_REQUEST BIT0
#define SDIO_HISR_AVAL BIT1
@@ -1583,10 +1318,6 @@ Current IOREG MAP
#define SDIO_HISR_ATIMEND_E BIT26
#define SDIO_HISR_CTWEND BIT27
-/* RTL8188E SDIO Specific */
-#define SDIO_HISR_MCU_ERR BIT28
-#define SDIO_HISR_TSF_BIT32_TOGGLE BIT29
-
#define MASK_SDIO_HISR_CLEAR (SDIO_HISR_TXERR |\
SDIO_HISR_RXERR |\
SDIO_HISR_TXFOVW |\
@@ -1651,39 +1382,13 @@ Current IOREG MAP
#define GPS_HWPDN_SL BIT21 /* GPS HW PDn polarity control */
#define GPS_FUNC_EN BIT22 /* GPS function enable */
-/* 3 REG_LIFECTRL_CTRL */
-#define HAL92C_EN_PKT_LIFE_TIME_BK BIT3
-#define HAL92C_EN_PKT_LIFE_TIME_BE BIT2
-#define HAL92C_EN_PKT_LIFE_TIME_VI BIT1
-#define HAL92C_EN_PKT_LIFE_TIME_VO BIT0
-
-#define HAL92C_MSDU_LIFE_TIME_UNIT 128 /* in us, said by Tim. */
-
-/* 2 8192D PartNo. */
-#define PARTNO_92D_NIC (BIT7|BIT6)
-#define PARTNO_92D_NIC_REMARK (BIT5|BIT4)
-#define PARTNO_SINGLE_BAND_VS BIT3
-#define PARTNO_SINGLE_BAND_VS_REMARK BIT1
-#define PARTNO_CONCURRENT_BAND_VC (BIT3|BIT2)
-#define PARTNO_CONCURRENT_BAND_VC_REMARK (BIT1|BIT0)
-
/* */
/* General definitions */
/* */
-#define LAST_ENTRY_OF_TX_PKT_BUFFER_8188E 176
-#define LAST_ENTRY_OF_TX_PKT_BUFFER_8812 255
#define LAST_ENTRY_OF_TX_PKT_BUFFER_8723B 255
-#define LAST_ENTRY_OF_TX_PKT_BUFFER_8192C 255
-#define LAST_ENTRY_OF_TX_PKT_BUFFER_DUAL_MAC 127
#define POLLING_LLT_THRESHOLD 20
#define POLLING_READY_TIMEOUT_COUNT 1000
-
-/* GPIO BIT */
-#define HAL_8192C_HW_GPIO_WPS_BIT BIT2
-#define HAL_8192EU_HW_GPIO_WPS_BIT BIT7
-#define HAL_8188E_HW_GPIO_WPS_BIT BIT7
-
#endif /* __HAL_COMMON_H__ */
diff --git a/drivers/staging/rtl8723bs/include/rtw_ioctl.h b/drivers/staging/rtl8723bs/include/rtw_ioctl.h
deleted file mode 100644
index 7179591cb01d..000000000000
--- a/drivers/staging/rtl8723bs/include/rtw_ioctl.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- ******************************************************************************/
-#ifndef _RTW_IOCTL_H_
-#define _RTW_IOCTL_H_
-
-/* 00 - Success */
-/* 11 - Error */
-#define STATUS_SUCCESS (0x00000000L)
-#define STATUS_PENDING (0x00000103L)
-
-#define STATUS_UNSUCCESSFUL (0xC0000001L)
-#define STATUS_INSUFFICIENT_RESOURCES (0xC000009AL)
-#define STATUS_NOT_SUPPORTED (0xC00000BBL)
-
-#define NDIS_STATUS_SUCCESS ((uint)STATUS_SUCCESS)
-#define NDIS_STATUS_PENDING ((uint)STATUS_PENDING)
-#define NDIS_STATUS_NOT_RECOGNIZED ((uint)0x00010001L)
-#define NDIS_STATUS_NOT_COPIED ((uint)0x00010002L)
-#define NDIS_STATUS_NOT_ACCEPTED ((uint)0x00010003L)
-#define NDIS_STATUS_CALL_ACTIVE ((uint)0x00010007L)
-
-#define NDIS_STATUS_FAILURE ((uint)STATUS_UNSUCCESSFUL)
-#define NDIS_STATUS_RESOURCES ((uint)STATUS_INSUFFICIENT_RESOURCES)
-#define NDIS_STATUS_CLOSING ((uint)0xC0010002L)
-#define NDIS_STATUS_BAD_VERSION ((uint)0xC0010004L)
-#define NDIS_STATUS_BAD_CHARACTERISTICS ((uint)0xC0010005L)
-#define NDIS_STATUS_ADAPTER_NOT_FOUND ((uint)0xC0010006L)
-#define NDIS_STATUS_OPEN_FAILED ((uint)0xC0010007L)
-#define NDIS_STATUS_DEVICE_FAILED ((uint)0xC0010008L)
-#define NDIS_STATUS_MULTICAST_FULL ((uint)0xC0010009L)
-#define NDIS_STATUS_MULTICAST_EXISTS ((uint)0xC001000AL)
-#define NDIS_STATUS_MULTICAST_NOT_FOUND ((uint)0xC001000BL)
-#define NDIS_STATUS_REQUEST_ABORTED ((uint)0xC001000CL)
-#define NDIS_STATUS_RESET_IN_PROGRESS ((uint)0xC001000DL)
-#define NDIS_STATUS_CLOSING_INDICATING ((uint)0xC001000EL)
-#define NDIS_STATUS_NOT_SUPPORTED ((uint)STATUS_NOT_SUPPORTED)
-#define NDIS_STATUS_INVALID_PACKET ((uint)0xC001000FL)
-#define NDIS_STATUS_OPEN_LIST_FULL ((uint)0xC0010010L)
-#define NDIS_STATUS_ADAPTER_NOT_READY ((uint)0xC0010011L)
-#define NDIS_STATUS_ADAPTER_NOT_OPEN ((uint)0xC0010012L)
-#define NDIS_STATUS_NOT_INDICATING ((uint)0xC0010013L)
-#define NDIS_STATUS_INVALID_LENGTH ((uint)0xC0010014L)
-#define NDIS_STATUS_INVALID_DATA ((uint)0xC0010015L)
-#define NDIS_STATUS_BUFFER_TOO_SHORT ((uint)0xC0010016L)
-#define NDIS_STATUS_INVALID_OID ((uint)0xC0010017L)
-#define NDIS_STATUS_ADAPTER_REMOVED ((uint)0xC0010018L)
-#define NDIS_STATUS_UNSUPPORTED_MEDIA ((uint)0xC0010019L)
-#define NDIS_STATUS_GROUP_ADDRESS_IN_USE ((uint)0xC001001AL)
-#define NDIS_STATUS_FILE_NOT_FOUND ((uint)0xC001001BL)
-#define NDIS_STATUS_ERROR_READING_FILE ((uint)0xC001001CL)
-#define NDIS_STATUS_ALREADY_MAPPED ((uint)0xC001001DL)
-#define NDIS_STATUS_RESOURCE_CONFLICT ((uint)0xC001001EL)
-#define NDIS_STATUS_NO_CABLE ((uint)0xC001001FL)
-
-#define NDIS_STATUS_INVALID_SAP ((uint)0xC0010020L)
-#define NDIS_STATUS_SAP_IN_USE ((uint)0xC0010021L)
-#define NDIS_STATUS_INVALID_ADDRESS ((uint)0xC0010022L)
-#define NDIS_STATUS_VC_NOT_ACTIVATED ((uint)0xC0010023L)
-#define NDIS_STATUS_DEST_OUT_OF_ORDER ((uint)0xC0010024L) /* cause 27 */
-#define NDIS_STATUS_VC_NOT_AVAILABLE ((uint)0xC0010025L) /* cause 35, 45 */
-#define NDIS_STATUS_CELLRATE_NOT_AVAILABLE ((uint)0xC0010026L) /* cause 37 */
-#define NDIS_STATUS_INCOMPATABLE_QOS ((uint)0xC0010027L) /* cause 49 */
-#define NDIS_STATUS_AAL_PARAMS_UNSUPPORTED ((uint)0xC0010028L) /* cause 93 */
-#define NDIS_STATUS_NO_ROUTE_TO_DESTINATION ((uint)0xC0010029L) /* cause 3 */
-
-extern struct iw_handler_def rtw_handlers_def;
-
-#endif /* #ifndef __INC_CEINFO_ */
diff --git a/drivers/staging/rtl8723bs/os_dep/os_intfs.c b/drivers/staging/rtl8723bs/os_dep/os_intfs.c
index 757efeb49d08..380d8c9e1239 100644
--- a/drivers/staging/rtl8723bs/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8723bs/os_dep/os_intfs.c
@@ -389,7 +389,7 @@ static int rtw_ndev_notifier_call(struct notifier_block *nb, unsigned long state
if (dev->netdev_ops->ndo_do_ioctl != rtw_ioctl)
return NOTIFY_DONE;
- netdev_info(dev, FUNC_NDEV_FMT " state:%lu\n", FUNC_NDEV_ARG(dev),
+ netdev_dbg(dev, FUNC_NDEV_FMT " state:%lu\n", FUNC_NDEV_ARG(dev),
state);
return NOTIFY_DONE;
diff --git a/drivers/staging/rts5208/rtsx_transport.c b/drivers/staging/rts5208/rtsx_transport.c
index 805dc18fac0a..d5ad49de4c56 100644
--- a/drivers/staging/rts5208/rtsx_transport.c
+++ b/drivers/staging/rts5208/rtsx_transport.c
@@ -55,9 +55,9 @@ unsigned int rtsx_stor_access_xfer_buf(unsigned char *buffer,
*offset += cnt;
/*
- * Using scatter-gather. We have to go through the list one entry
- * at a time. Each s-g entry contains some number of pages, and
- * each page has to be kmap()'ed separately.
+ * Using scatter-gather. We have to go through the list one entry
+ * at a time. Each s-g entry contains some number of pages which
+ * have to be copied one at a time.
*/
} else {
struct scatterlist *sg =
@@ -92,13 +92,11 @@ unsigned int rtsx_stor_access_xfer_buf(unsigned char *buffer,
while (sglen > 0) {
unsigned int plen = min(sglen, (unsigned int)
PAGE_SIZE - poff);
- unsigned char *ptr = kmap(page);
if (dir == TO_XFER_BUF)
- memcpy(ptr + poff, buffer + cnt, plen);
+ memcpy_to_page(page, poff, buffer + cnt, plen);
else
- memcpy(buffer + cnt, ptr + poff, plen);
- kunmap(page);
+ memcpy_from_page(buffer + cnt, page, poff, plen);
/* Start at the beginning of the next page */
poff = 0;
diff --git a/drivers/staging/sm750fb/sm750_hw.c b/drivers/staging/sm750fb/sm750_hw.c
index a7c6eb07b62e..55cb00e8b0d1 100644
--- a/drivers/staging/sm750fb/sm750_hw.c
+++ b/drivers/staging/sm750fb/sm750_hw.c
@@ -81,6 +81,7 @@ int hw_sm750_map(struct sm750_dev *sm750_dev, struct pci_dev *pdev)
sm750_dev->pvMem =
ioremap_wc(sm750_dev->vidmem_start, sm750_dev->vidmem_size);
if (!sm750_dev->pvMem) {
+ iounmap(sm750_dev->pvReg);
pr_err("Map video memory failed\n");
ret = -EFAULT;
goto exit;
diff --git a/drivers/staging/unisys/Documentation/ABI/sysfs-platform-visorchipset b/drivers/staging/unisys/Documentation/ABI/sysfs-platform-visorchipset
deleted file mode 100644
index c2359de17eaf..000000000000
--- a/drivers/staging/unisys/Documentation/ABI/sysfs-platform-visorchipset
+++ /dev/null
@@ -1,89 +0,0 @@
-This file describes sysfs entries beneath /devices/platform/visorchipset.
-
-What: install/error
-Date: 7/18/2014
-KernelVersion: TBD
-Contact: sparmaintainer@unisys.com
-Description: used to send the ID of a string that should be displayed on
- s-Par's automatic installation progress screen when an error
- is encountered during installation. This field has no effect
- if not in installation mode.
-Users: sparmaintainer@unisys.com
-
-What: install/remainingsteps
-Date: 7/18/2014
-KernelVersion: TBD
-Contact: sparmaintainer@unisys.com
-Description: used to set the value of the progress bar on the s-Par automatic
- installation progress screen. This field has no effect if not in
- installation mode.
-Users: sparmaintainer@unisys.com
-
-What: install/textid
-Date: 7/18/2014
-KernelVersion: TBD
-Contact: sparmaintainer@unisys.com
-Description: used to send the ID of a string that should be displayed on
- s-Par's automatic installation progress screen. Setting this
- field when not in installation mode (boottotool was set on
- the previous guest boot) has no effect.
-Users: sparmaintainer@unisys.com
-
-What: install/boottotool
-Date: 7/18/2014
-KernelVersion: TBD
-Contact: sparmaintainer@unisys.com
-Description: The boottotool flag controls s-Par behavior on the next boot of
- this guest. Setting the flag will cause the guest to boot from
- the utility and installation image, which will use the value in
- the toolaction field to determine what operation is being
- requested.
-Users: sparmaintainer@unisys.com
-
-What: install/toolaction
-Date: 7/18/2014
-KernelVersion: TBD
-Contact: sparmaintainer@unisys.com
-Description: This field is used to tell s-Par which type of recovery tool
- action to perform on the next guest boot-up. The meaning of the
- value is dependent on the type of installation software used to
- commission the guest.
-Users: sparmaintainer@unisys.com
-
-What: parahotplug/deviceenabled
-Date: 7/18/2014
-KernelVersion: TBD
-Contact: sparmaintainer@unisys.com
-Description: This entry is used by a Unisys support script installed on the
- guest, and triggered by a udev event. The support script is
- responsible for enabling and disabling SR-IOV devices when the
- PF device is being recovered in another guest.
-
- Some SR-IOV devices have problems when the PF is reset without
- first disabling all VFs attached to that PF. s-Par handles this
- situation by sending a message to guests using these VFs, and
- the script will disable the device. When the PF is recovered,
- another message is sent to the guests to re-enable the VFs.
-
- The parahotplug/deviceenabled interface is used to acknowledge
- the recovery message.
-Users: sparmaintainer@unisys.com
-
-What: parahotplug/devicedisabled
-Date: 7/18/2014
-KernelVersion: TBD
-Contact: sparmaintainer@unisys.com
-Description: This entry is used by a Unisys support script installed on the
- guest, and triggered by a udev event. The support script is
- responsible for enabling and disabling SR-IOV devices when the
- PF device is being recovered in another guest.
-
- Some SR-IOV devices have problems when the PF is reset without
- first disabling all VFs attached to that PF. s-Par handles this
- situation by sending a message to guests using these VFs, and
- the script will disable the device. When the PF is recovered,
- another message is sent to the guests to re-enable the VFs.
-
- The parahotplug/devicedisaabled interface is used to acknowledge
- the initial recovery message.
-Users: sparmaintainer@unisys.com
diff --git a/drivers/staging/unisys/Documentation/overview.txt b/drivers/staging/unisys/Documentation/overview.txt
deleted file mode 100644
index cf29f884cbe0..000000000000
--- a/drivers/staging/unisys/Documentation/overview.txt
+++ /dev/null
@@ -1,337 +0,0 @@
-1. Overview
------------
-
-This document describes the driver set for Unisys Secure Partitioning
-(s-Par(R)).
-
-s-Par is firmware that provides hardware partitioning capabilities for
-splitting large-scale Intel x86 servers into multiple isolated
-partitions. s-Par provides a set of para-virtualized device drivers to
-allow guest partitions on the same server to share devices that would
-normally be unsharable, specifically:
-
-* visornic - network interface
-* visorhba - scsi disk adapter
-* visorinput - keyboard and mouse
-
-These drivers conform to the standard Linux bus/device model described
-within Documentation/driver-api/driver-model/, and utilize a driver named
-visorbus to present the virtual busses involved. Drivers in the 'visor*'
-driver set are commonly referred to as "guest drivers" or "client drivers".
-All drivers except visorbus expose a device of a specific usable class to the
-Linux guest environment (e.g., block, network, or input), and are collectively
-referred to as "function drivers".
-
-The back-end for each device is owned and managed by a small,
-single-purpose service partition in the s-Par firmware, which communicates
-with each guest partition sharing that device through an area of shared memory
-called a "channel". In s-Par nomenclature, the back-end is often referred to
-as the "service partition", "IO partition" (for virtual network and scsi disk
-devices), or "console partition" (for virtual keyboard and mouse devices).
-
-Each virtual device requires exactly 1 dedicated channel, which the guest
-driver and back-end use to communicate. The hypervisor need not intervene
-(other than normal interrupt handling) in the interactions that occur across
-this channel.
-
-NOT covered in this document:
-
-* s-Par also supports sharing physical PCI adapters via SR-IOV, but
- because this requires no specific support in the guest partitions, it will
- not be discussed in this document. Shared SR-IOV devices should be used
- wherever possible for highest performance.
-
-* Because the s-Par back-end provides a standard EFI framebuffer to each
- guest, the already-existing efifb Linux driver is used to provide guest
- video access. Thus, the only s-Par-unique support that is necessary to
- provide a guest graphics console are for keyboard and mouse (via visorinput).
-
-
-2. Driver Descriptions
-----------------------
-
-2.1. visorbus
--------------
-
-2.1.1. Overview
----------------
-
-The visorbus driver handles the virtual busses on which all of the virtual
-devices reside. It provides a registration function named
-visorbus_register_visor_driver() that is called by each of the function
-drivers at initialization time, which the function driver uses to tell
-visorbus about the device classes (via specifying a list of device type
-GUIDs) it wants to handle. For use by function drivers, visorbus provides
-implementation for struct visor_driver and struct visor_device, as well
-as utility functions for communicating with the back-end.
-
-visorbus is associated with ACPI id "PNP0A07" in modules.alias, so if built
-as a module it will typically be loaded automatically via standard udev or
-systemd (God help us) configurations.
-
-visorbus can similarly force auto-loading of function drivers for virtual
-devices it discovers, as it includes a MODALIAS environment variable of this
-form in the hotplug uevent environment when each virtual device is
-discovered:
-
- visorbus:<device type GUID>
-
-visorbus notifies each function driver when a device of its registered class
-arrives and departs, by calling the function driver's probe() and remove()
-methods.
-
-The actual struct device objects that correspond to each virtual bus and
-each virtual device are created and owned by visorbus. These device objects
-are created in response to messages from the s-Par back-end received on a
-special control channel called the "controlvm channel" (each guest partition
-has access to exactly 1 controlvm channel), and have a lifetime that is
-independent of the function drivers that control them.
-
-2.1.2. "struct visor device" Function Driver Interfaces
--------------------------------------------------------
-
-The interface between visorbus and its function drivers is defined in
-visorbus.h, and described below.
-
-When a visor function driver loads, it calls visorbus_register_visor_driver()
-to register itself with visorbus. The significant information passed in this
-exchange is as follows:
-
-* the GUID(s) of the channel type(s) that are handled by this driver, as
- well as a "friendly name" identifying each (this will be published under
- /sys/devices/visorbus<x>/dev<y>)
-
-* the addresses of callback functions to be called whenever a virtual
- device/channel with the appropriate channel-type GUID(s) appears or
- disappears
-
-* the address of a "channel_interrupt" function, which will be automatically
- called at specific intervals to enable the driver to poll the device
- channel for activity
-
-The following functions implemented within each function driver will be
-called automatically by the visorbus driver at appropriate times:
-
-* The probe() function notifies about the creation of each new virtual
- device/channel instance.
-
-* The remove() function notifies about the destruction of a virtual
- device/channel instance.
-
-* The channel_interrupt() function is called at frequent intervals to
- give the function driver an opportunity to poll the virtual device channel
- for requests. Information is passed to this function to enable the
- function driver to use the visorchannel_signalinsert() and
- visorchannel_signalremove() functions to respond to and initiate activity
- over the channel. (Note that since it is the visorbus driver that
- determines when this is called, it is very easy to switch to
- interrupt-driven mechanisms when available for particular virtual device
- types.)
-
-* The pause() function is called should it ever be necessary to direct the
- function driver to temporarily stop accessing the device channel. An
- example of when this is needed is when the service partition implementing
- the back-end of the virtual device needs to be recovered. After a
- successful return of pause(), the function driver must not access the
- device channel until a subsequent resume() occurs.
-
-* The resume() function is the "book-end" to pause(), and is described above.
-
-2.1.3. sysfs Advertised Information
------------------------------------
-
-Because visorbus is a standard Linux bus driver in the model described in
-Documentation/driver-api/driver-model/, the hierarchy of s-Par virtual devices is
-published in the sysfs tree beneath /bus/visorbus/, e.g.,
-/sys/bus/visorbus/devices/ might look like:
-
- vbus1:dev1 -> ../../../devices/visorbus1/vbus1:dev1
- vbus1:dev2 -> ../../../devices/visorbus1/vbus1:dev2
- vbus1:dev3 -> ../../../devices/visorbus1/vbus1:dev3
- vbus2:dev0 -> ../../../devices/visorbus2/vbus2:dev0
- vbus2:dev1 -> ../../../devices/visorbus2/vbus2:dev1
- vbus2:dev2 -> ../../../devices/visorbus2/vbus2:dev2
- visorbus1 -> ../../../devices/visorbus1
- visorbus2 -> ../../../devices/visorbus2
-
-visor_device notes:
-
-* Each visorbus<n> entry denotes the existence of a struct visor_device
- denoting virtual bus #<n>. A unique s-Par channel exists for each such
- virtual bus.
-
-* Virtual bus numbers uniquely identify s-Par back-end service partitions.
- In this example, bus 1 corresponds to the s-Par console partition
- (controls keyboard, video, and mouse), whereas bus 2 corresponds to the
- s-Par IO partition (controls network and disk).
-
-* Each vbus<x>:dev<y> entry denotes the existence of a struct visor_device
- denoting virtual device #<y> outboard of virtual bus #<x>. A unique s-Par
- channel exists for each such virtual device.
-
-* If a function driver has loaded and claimed a particular device, the
- bus/visorbus/devices/vbus<x>:dev<y>/driver symlink will indicate that
- function driver.
-
-Every active visorbus device will have a sysfs subtree under:
-
- /sys/devices/visorbus<x>/vbus<x>:dev<y>/
-
-The following files exist under /sys/devices/visorbus<x>/vbus<x>:dev<y>:
-
- subsystem link to sysfs tree that describes the
- visorbus bus type; e.g.:
- ../../../bus/visorbus
-
- driver link to sysfs tree that describes the
- function driver controlling this device;
- e.g.:
- ../../../bus/visorbus/drivers/visorhba
- Note that this "driver" link will not exist
- if the appropriate function driver has not
- been loaded yet.
-
- channel properties of the device channel (all in
- ascii text format)
-
- clientpartition handle identifying the guest (client) side
- of this channel, e.g. 0x10000000.
-
- nbytes total size of this channel in bytes
-
- physaddr the guest physical address for the base of
- the channel
-
- typeguid a GUID identifying the channel type, in
- xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx notation
-
- typename a "friendly name" for this channel type, e.g.,
- "keyboard". Note that this name is provided by
- a particular function driver, so "typename"
- will return an empty string until AFTER the
- appropriate function driver controlling this
- channel type is loaded
-
- zoneguid a GUID identifying the channel zone, in
- xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx notation
-
-
-2.2. visorhba
--------------
-
-The visorhba driver registers with visorbus as the function driver to
-handle virtual scsi disk devices, specified using the
-VISOR_VHBA_CHANNEL_GUID type in the visorbus_register_visor_driver()
-call. visorhba uses scsi_add_host() to expose a Linux block device
-(e.g., /sys/block/) in the guest environment for each s-Par virtual device.
-
-visorhba provides access to a shared SCSI host bus adapter and one or more
-disk devices, by proxying SCSI commands between the guest and the service
-partition that owns the shared SCSI adapter, using a channel between the
-guest and the service partition. The disks that appear on the shared bus
-are defined by the s-Par configuration and enforced by the service partition,
-while the guest driver handles sending commands and handling responses. Each
-disk is shared as a whole to a guest. Sharing the bus adapter in this way
-provides resiliency; should the device encounter an error, only the service
-partition is rebooted, and the device is reinitialized. This allows
-guests to continue running and to recover from the error.
-
-When compiled as a module, visorhba can be autoloaded by visorbus in
-standard udev/systemd environments, as it includes the modules.alias
-definition:
-
- "visorbus:"+VISOR_VHBA_CHANNEL_GUID_STR
-
-i.e.:
-
- alias visorbus:414815ed-c58c-11da-95a9-00e08161165f visorhba
-
-
-2.3. visornic
--------------
-
-The visornic driver registers with visorbus as the function driver to
-handle virtual network devices, specified using the
-VISOR_VNIC_CHANNEL_GUID type in the visorbus_register_visor_driver()
-call. visornic uses register_netdev() to expose a Linux device of class net
-(e.g., /sys/class/net/) in the guest environment for each s-Par virtual
-device.
-
-visornic provides a paravirtualized network interface to a
-guest by proxying buffer information between the guest and the service
-partition that owns the shared network interface, using a channel
-between the guest and the service partition. The connectivity of this
-interface with the shared interface and possibly other guest
-partitions is defined by the s-Par configuration and enforced by the
-service partition; the guest driver handles communication and link
-status.
-
-When compiled as a module, visornic can be autoloaded by visorbus in
-standard udev/systemd environments, as it includes the modules.alias
-definition:
-
- "visorbus:"+VISOR_VNIC_CHANNEL_GUID_STR
-
-i.e.:
-
- alias visorbus:8cd5994d-c58e-11da-95a9-00e08161165f visornic
-
-
-2.4. visorinput
----------------
-
-The visorinput driver registers with visorbus as the function driver to
-handle human input devices, specified using the
-VISOR_KEYBOARD_CHANNEL_GUID and VISOR_MOUSE_CHANNEL_GUID
-types in the visorbus_register_visor_driver() call. visorinput uses
-input_register_device() to expose devices of class input
-(e.g., /sys/class/input/) for virtual keyboard and virtual mouse devices.
-A s-Par virtual keyboard device maps 1-to-1 with a Linux input device
-named "visor Keyboard", while a s-Par virtual mouse device has 2 Linux input
-devices created for it: 1 named "visor Wheel", and 1 named "visor Mouse".
-
-By registering as input class devices, modern versions of X will
-automatically find and properly use s-Par virtual keyboard and mouse devices.
-As the s-Par back-end reports keyboard and mouse activity via events on the
-virtual device channel, the visorinput driver delivers the activity to the
-Linux environment by calling input_report_key() and input_report_abs().
-
-You can interact with the guest console using the usyscon Partition Desktop
-(a.k.a., "pd") application, provided as part of s-Par. After installing the
-usyscon Partition Desktop into a Linux environment via the
-usyscon_partitiondesktop-*.rpm, or into a Windows environment via
-PartitionDesktop.msi, you will be able to launch a console for your guest
-Linux environment by clicking the console icon in the s-Par web UI.
-
-When compiled as a module, visorinput can be autoloaded by visorbus in
-standard udev/systemd environments, as it includes the modules.alias
-definition:
-
- "visorbus:"+VISOR_MOUSE_CHANNEL_GUID_STR
- "visorbus:"+VISOR_KEYBOARD_CHANNEL_GUID_STR
-
-i.e.:
-
- alias visorbus:c73416d0-b0b8-44af-b304-9d2ae99f1b3d visorinput
- alias visorbus:addf07d4-94a9-46e2-81c3-61abcdbdbd87 visorinput
-
-
-3. Minimum Required Driver Set
-------------------------------
-
-visorbus is required for every Linux guest running under s-Par.
-
-visorhba is typically required for a Linux guest running under s-Par, as it
-is required if your guest boot disk is a virtual device provided by the s-Par
-back-end, which is the default configuration. However, for advanced
-configurations where the Linux guest boots via an SR-IOV-provided HBA or
-SAN disk for example, visorhba is not technically required.
-
-visornic is typically required for a Linux guest running under s-Par, as it
-is required if your guest network interface is a virtual device provided by
-the s-Par back-end, which is the default configuration. However, for
-configurations where the Linux guest is provided with an SR-IOV NIC
-for example, visornic is not technically required.
-
-visorinput is only required for a Linux guest running under s-Par if you
-require graphics-mode access to your guest console.
diff --git a/drivers/staging/unisys/Kconfig b/drivers/staging/unisys/Kconfig
deleted file mode 100644
index 43fe1ce538e1..000000000000
--- a/drivers/staging/unisys/Kconfig
+++ /dev/null
@@ -1,16 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Unisys SPAR driver configuration
-#
-menuconfig UNISYSSPAR
- bool "Unisys SPAR driver support"
- help
- Support for the Unisys SPAR drivers
-
-if UNISYSSPAR
-
-source "drivers/staging/unisys/visornic/Kconfig"
-source "drivers/staging/unisys/visorinput/Kconfig"
-source "drivers/staging/unisys/visorhba/Kconfig"
-
-endif # UNISYSSPAR
diff --git a/drivers/staging/unisys/MAINTAINERS b/drivers/staging/unisys/MAINTAINERS
deleted file mode 100644
index aaddc619c329..000000000000
--- a/drivers/staging/unisys/MAINTAINERS
+++ /dev/null
@@ -1,5 +0,0 @@
-Unisys s-Par drivers
-M: David Kershner <sparmaintainer@unisys.com>
-S: Maintained
-F: drivers/staging/unisys/Documentation/overview.txt
-F: drivers/staging/unisys/
diff --git a/drivers/staging/unisys/Makefile b/drivers/staging/unisys/Makefile
deleted file mode 100644
index c0f76cc196a6..000000000000
--- a/drivers/staging/unisys/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Makefile for Unisys SPAR drivers
-#
-obj-$(CONFIG_UNISYS_VISORNIC) += visornic/
-obj-$(CONFIG_UNISYS_VISORINPUT) += visorinput/
-obj-$(CONFIG_UNISYS_VISORHBA) += visorhba/
diff --git a/drivers/staging/unisys/TODO b/drivers/staging/unisys/TODO
deleted file mode 100644
index d863f266bf76..000000000000
--- a/drivers/staging/unisys/TODO
+++ /dev/null
@@ -1,16 +0,0 @@
-TODO:
- - enhance visornic to use channel_interrupt() hook instead of a
- kernel thread
- - enhance visorhba to use channel_interrupt() hook instead of a
- kernel thread
- - teach visorbus to handle virtual interrupts triggered by s-Par
- back-end, and call function driver's channel_interrupt() function
- when they occur
- - enhance debugfs interfaces (e.g., per device, etc.)
- - upgrade/remove deprecated workqueue operations
- - move individual drivers into proper driver subsystems
-
-Patches to:
- Greg Kroah-Hartman <gregkh@linuxfoundation.org>
- Ken Cox <jkc@redhat.com>
- Unisys s-Par maintainer mailing list <sparmaintainer@unisys.com>
diff --git a/drivers/staging/unisys/include/iochannel.h b/drivers/staging/unisys/include/iochannel.h
deleted file mode 100644
index 9ef812c0bc42..000000000000
--- a/drivers/staging/unisys/include/iochannel.h
+++ /dev/null
@@ -1,571 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-/*
- * Copyright (C) 2010 - 2016 UNISYS CORPORATION
- * All rights reserved.
- */
-
-#ifndef __IOCHANNEL_H__
-#define __IOCHANNEL_H__
-
-/*
- * Everything needed for IOPart-GuestPart communication is define in
- * this file. Note: Everything is OS-independent because this file is
- * used by Windows, Linux and possible EFI drivers.
- *
- * Communication flow between the IOPart and GuestPart uses the channel headers
- * channel state. The following states are currently being used:
- * UNINIT(All Zeroes), CHANNEL_ATTACHING, CHANNEL_ATTACHED, CHANNEL_OPENED
- *
- * Additional states will be used later. No locking is needed to switch between
- * states due to the following rules:
- *
- * 1. IOPart is only the only partition allowed to change from UNIT
- * 2. IOPart is only the only partition allowed to change from
- * CHANNEL_ATTACHING
- * 3. GuestPart is only the only partition allowed to change from
- * CHANNEL_ATTACHED
- *
- * The state changes are the following: IOPart sees the channel is in UNINIT,
- * UNINIT -> CHANNEL_ATTACHING (performed only by IOPart)
- * CHANNEL_ATTACHING -> CHANNEL_ATTACHED (performed only by IOPart)
- * CHANNEL_ATTACHED -> CHANNEL_OPENED (performed only by GuestPart)
- */
-
-#include <linux/uuid.h>
-#include <linux/skbuff.h>
-#include <linux/visorbus.h>
-
-/*
- * Must increment these whenever you insert or delete fields within this channel
- * struct. Also increment whenever you change the meaning of fields within this
- * channel struct so as to break pre-existing software. Note that you can
- * usually add fields to the END of the channel struct without needing to
- * increment this.
- */
-#define VISOR_VHBA_CHANNEL_VERSIONID 2
-#define VISOR_VNIC_CHANNEL_VERSIONID 2
-
-/*
- * Everything necessary to handle SCSI & NIC traffic between Guest Partition and
- * IO Partition is defined below.
- */
-
-/*
- * Define the two queues per data channel between iopart and ioguestparts.
- * IOCHAN_TO_IOPART -- used by guest to 'insert' signals to iopart.
- * IOCHAN_FROM_IOPART -- used by guest to 'remove' signals from IO part.
- */
-#define IOCHAN_TO_IOPART 0
-#define IOCHAN_FROM_IOPART 1
-
-/* Size of cdb - i.e., SCSI cmnd */
-#define MAX_CMND_SIZE 16
-
-/* Unisys-specific DMA direction values */
-enum uis_dma_data_direction {
- UIS_DMA_BIDIRECTIONAL = 0,
- UIS_DMA_TO_DEVICE = 1,
- UIS_DMA_FROM_DEVICE = 2,
- UIS_DMA_NONE = 3
-};
-
-#define MAX_SENSE_SIZE 64
-#define MAX_PHYS_INFO 64
-
-/*
- * enum net_types - Various types of network packets that can be sent in cmdrsp.
- * @NET_RCV_POST: Submit buffer to hold receiving incoming packet.
- * @NET_RCV: visornic -> uisnic. Incoming packet received.
- * @NET_XMIT: uisnic -> visornic. For outgoing packet.
- * @NET_XMIT_DONE: visornic -> uisnic. Outgoing packet xmitted.
- * @NET_RCV_ENBDIS: uisnic -> visornic. Enable/Disable packet reception.
- * @NET_RCV_ENBDIS_ACK: visornic -> uisnic. Acknowledge enable/disable packet.
- * @NET_RCV_PROMISC: uisnic -> visornic. Enable/Disable promiscuous mode.
- * @NET_CONNECT_STATUS: visornic -> uisnic. Indicate the loss or restoration of
- * a network connection.
- * @NET_MACADDR: uisnic -> visornic. Indicates the client has requested
- * to update it's MAC address.
- * @NET_MACADDR_ACK: MAC address acknowledge.
- */
-enum net_types {
- NET_RCV_POST = 0,
- NET_RCV,
- NET_XMIT,
- NET_XMIT_DONE,
- NET_RCV_ENBDIS,
- NET_RCV_ENBDIS_ACK,
- /* Reception */
- NET_RCV_PROMISC,
- NET_CONNECT_STATUS,
- NET_MACADDR,
- NET_MACADDR_ACK,
-};
-
-/* Minimum eth data size */
-#define ETH_MIN_DATA_SIZE 46
-#define ETH_MIN_PACKET_SIZE (ETH_HLEN + ETH_MIN_DATA_SIZE)
-
-/* Maximum data size */
-#define VISOR_ETH_MAX_MTU 16384
-
-#ifndef MAX_MACADDR_LEN
-/* Number of bytes in MAC address */
-#define MAX_MACADDR_LEN 6
-#endif
-
-/* Various types of scsi task mgmt commands. */
-enum task_mgmt_types {
- TASK_MGMT_ABORT_TASK = 1,
- TASK_MGMT_BUS_RESET,
- TASK_MGMT_LUN_RESET,
- TASK_MGMT_TARGET_RESET,
-};
-
-/* Various types of vdisk mgmt commands. */
-enum vdisk_mgmt_types {
- VDISK_MGMT_ACQUIRE = 1,
- VDISK_MGMT_RELEASE,
-};
-
-struct phys_info {
- u64 pi_pfn;
- u16 pi_off;
- u16 pi_len;
-} __packed;
-
-#define MIN_NUMSIGNALS 64
-
-/* Structs with pragma pack. */
-
-struct guest_phys_info {
- u64 address;
- u64 length;
-} __packed;
-
-/*
- * struct uisscsi_dest
- * @channel: Bus number.
- * @id: Target number.
- * @lun: Logical unit number.
- */
-struct uisscsi_dest {
- u32 channel;
- u32 id;
- u32 lun;
-} __packed;
-
-struct vhba_wwnn {
- u32 wwnn1;
- u32 wwnn2;
-} __packed;
-
-/*
- * struct vhba_config_max
- * @max_channel: Maximum channel for devices attached to this bus.
- * @max_id: Maximum SCSI ID for devices attached to bus.
- * @max_lun: Maximum SCSI LUN for devices attached to bus.
- * @cmd_per_lun: Maximum number of outstanding commands per LUN.
- * @max_io_size: Maximum io size for devices attached to this bus. Max io size
- * is often determined by the resource of the hba.
- * e.g Max scatter gather list length * page size / sector size.
- *
- * WARNING: Values stored in this structure must contain maximum counts (not
- * maximum values).
- *
- * 20 bytes
- */
-struct vhba_config_max {
- u32 max_channel;
- u32 max_id;
- u32 max_lun;
- u32 cmd_per_lun;
- u32 max_io_size;
-} __packed;
-
-/*
- * struct uiscmdrsp_scsi
- *
- * @handle: The handle to the cmd that was received. Send it back as
- * is in the rsp packet.
- * @cmnd: The cdb for the command.
- * @bufflen: Length of data to be transferred out or in.
- * @guest_phys_entries: Number of entries in scatter-gather list.
- * @struct gpi_list: Physical address information for each fragment.
- * @data_dir: Direction of the data, if any.
- * @struct vdest: Identifies the virtual hba, id, channel, lun to which
- * cmd was sent.
- * @linuxstat: Original Linux status used by Linux vdisk.
- * @scsistat: The scsi status.
- * @addlstat: Non-scsi status.
- * @sensebuf: Sense info in case cmd failed. sensebuf holds the
- * sense_data struct. See sense_data struct for more
- * details.
- * @*vdisk: Pointer to the vdisk to clean up when IO completes.
- * @no_disk_result: Used to return no disk inquiry result when
- * no_disk_result is set to 1
- * scsi.scsistat is SAM_STAT_GOOD
- * scsi.addlstat is 0
- * scsi.linuxstat is SAM_STAT_GOOD
- * That is, there is NO error.
- */
-struct uiscmdrsp_scsi {
- u64 handle;
- u8 cmnd[MAX_CMND_SIZE];
- u32 bufflen;
- u16 guest_phys_entries;
- struct guest_phys_info gpi_list[MAX_PHYS_INFO];
- u32 data_dir;
- struct uisscsi_dest vdest;
- /* Needed to queue the rsp back to cmd originator. */
- int linuxstat;
- u8 scsistat;
- u8 addlstat;
-#define ADDL_SEL_TIMEOUT 4
- /* The following fields are need to determine the result of command. */
- u8 sensebuf[MAX_SENSE_SIZE];
- void *vdisk;
- int no_disk_result;
-} __packed;
-
-/*
- * Defines to support sending correct inquiry result when no disk is
- * configured.
- *
- * From SCSI SPC2 -
- *
- * If the target is not capable of supporting a device on this logical unit, the
- * device server shall set this field to 7Fh (PERIPHERAL QUALIFIER set to 011b
- * and PERIPHERAL DEVICE TYPE set to 1Fh).
- *
- * The device server is capable of supporting the specified peripheral device
- * type on this logical unit. However, the physical device is not currently
- * connected to this logical unit.
- */
-
-/*
- * Peripheral qualifier of 0x3
- * Peripheral type of 0x1f
- * Specifies no device but target present
- */
-#define DEV_NOT_CAPABLE 0x7f
-/*
- * Peripheral qualifier of 0x1
- * Peripheral type of 0 - disk
- * Specifies device capable, but not present
- */
-#define DEV_DISK_CAPABLE_NOT_PRESENT 0x20
-/* HiSup = 1; shows support for report luns must be returned for lun 0. */
-#define DEV_HISUPPORT 0x10
-
-/*
- * Peripheral qualifier of 0x3
- * Peripheral type of 0x1f
- * Specifies no device but target present
- */
-#define DEV_NOT_CAPABLE 0x7f
-/*
- * Peripheral qualifier of 0x1
- * Peripheral type of 0 - disk
- * Specifies device capable, but not present
- */
-#define DEV_DISK_CAPABLE_NOT_PRESENT 0x20
-/* HiSup = 1; shows support for report luns must be returned for lun 0. */
-#define DEV_HISUPPORT 0x10
-
-/*
- * NOTE: Linux code assumes inquiry contains 36 bytes. Without checking length
- * in buf[4] some Linux code accesses bytes beyond 5 to retrieve vendor, product
- * and revision. Yikes! So let us always send back 36 bytes, the minimum for
- * inquiry result.
- */
-#define NO_DISK_INQUIRY_RESULT_LEN 36
-/* 5 bytes minimum for inquiry result */
-#define MIN_INQUIRY_RESULT_LEN 5
-
-/* SCSI device version for no disk inquiry result */
-/* indicates SCSI SPC2 (SPC3 is 5) */
-#define SCSI_SPC2_VER 4
-
-/* Struct and Defines to support sense information. */
-
-/*
- * The following struct is returned in sensebuf field in uiscmdrsp_scsi. It is
- * initialized in exactly the manner that is recommended in Windows (hence the
- * odd values).
- * When set, these fields will have the following values:
- * ErrorCode = 0x70 indicates current error
- * Valid = 1 indicates sense info is valid
- * SenseKey contains sense key as defined by SCSI specs.
- * AdditionalSenseCode contains sense key as defined by SCSI specs.
- * AdditionalSenseCodeQualifier contains qualifier to sense code as defined by
- * scsi docs.
- * AdditionalSenseLength contains will be sizeof(sense_data)-8=10.
- */
-struct sense_data {
- u8 errorcode:7;
- u8 valid:1;
- u8 segment_number;
- u8 sense_key:4;
- u8 reserved:1;
- u8 incorrect_length:1;
- u8 end_of_media:1;
- u8 file_mark:1;
- u8 information[4];
- u8 additional_sense_length;
- u8 command_specific_information[4];
- u8 additional_sense_code;
- u8 additional_sense_code_qualifier;
- u8 fru_code;
- u8 sense_key_specific[3];
-} __packed;
-
-/*
- * struct net_pkt_xmt
- * @len: Full length of data in the packet.
- * @num_frags: Number of fragments in frags containing data.
- * @struct phys_info frags: Physical page information.
- * @ethhdr: The ethernet header.
- * @struct lincsum: These are needed for csum at uisnic end.
- * @valid: 1 = struct is valid - else ignore.
- * @hrawoffv: 1 = hwrafoff is valid.
- * @nhrawoffv: 1 = nhwrafoff is valid.
- * @protocol: Specifies packet protocol.
- * @csum: Value used to set skb->csum at IOPart.
- * @hrawoff: Value used to set skb->h.raw at IOPart. hrawoff points to
- * the start of the TRANSPORT LAYER HEADER.
- * @nhrawoff: Value used to set skb->nh.raw at IOPart. nhrawoff points to
- * the start of the NETWORK LAYER HEADER.
- *
- * NOTE:
- * The full packet is described in frags but the ethernet header is separately
- * kept in ethhdr so that uisnic doesn't have "MAP" the guest memory to get to
- * the header. uisnic needs ethhdr to determine how to route the packet.
- */
-struct net_pkt_xmt {
- int len;
- int num_frags;
- struct phys_info frags[MAX_PHYS_INFO];
- char ethhdr[ETH_HLEN];
- struct {
- u8 valid;
- u8 hrawoffv;
- u8 nhrawoffv;
- __be16 protocol;
- __wsum csum;
- u32 hrawoff;
- u32 nhrawoff;
- } lincsum;
-} __packed;
-
-struct net_pkt_xmtdone {
- /* Result of NET_XMIT */
- u32 xmt_done_result;
-} __packed;
-
-/*
- * RCVPOST_BUF_SIZE must be at most page_size(4096) - cache_line_size (64) The
- * reason is because dev_skb_alloc which is used to generate RCV_POST skbs in
- * visornic requires that there is "overhead" in the buffer, and pads 16 bytes.
- * Use 1 full cache line size for "overhead" so that transfers are optimized.
- * IOVM requires that a buffer be represented by 1 phys_info structure
- * which can only cover page_size.
- */
-#define RCVPOST_BUF_SIZE 4032
-#define MAX_NET_RCV_CHAIN \
- ((VISOR_ETH_MAX_MTU + ETH_HLEN + RCVPOST_BUF_SIZE - 1) \
- / RCVPOST_BUF_SIZE)
-
-/* rcv buf size must be large enough to include ethernet data len + ethernet
- * header len - we are choosing 2K because it is guaranteed to be describable.
- */
-struct net_pkt_rcvpost {
- /* Physical page information for the single fragment 2K rcv buf */
- struct phys_info frag;
- /*
- * Ensures that receive posts are returned to the adapter which we sent
- * them from originally.
- */
- u64 unique_num;
-
-} __packed;
-
-/*
- * struct net_pkt_rcv
- * @rcv_done_len: Length of the received data.
- * @numrcvbufs: Contains the incoming data. Guest side MUST chain these
- * together.
- * @*rcvbuf: List of chained rcvbufa. Each entry is a receive buffer
- * provided by NET_RCV_POST. NOTE: First rcvbuf in the
- * chain will also be provided in net.buf.
- * @unique_num:
- * @rcvs_dropped_delta:
- *
- * The number of rcvbuf that can be chained is based on max mtu and size of each
- * rcvbuf.
- */
-struct net_pkt_rcv {
- u32 rcv_done_len;
- u8 numrcvbufs;
- void *rcvbuf[MAX_NET_RCV_CHAIN];
- u64 unique_num;
- u32 rcvs_dropped_delta;
-} __packed;
-
-struct net_pkt_enbdis {
- void *context;
- /* 1 = enable, 0 = disable */
- u16 enable;
-} __packed;
-
-struct net_pkt_macaddr {
- void *context;
- /* 6 bytes */
- u8 macaddr[MAX_MACADDR_LEN];
-} __packed;
-
-/*
- * struct uiscmdrsp_net - cmd rsp packet used for VNIC network traffic.
- * @enum type:
- * @*buf:
- * @union:
- * @struct xmt: Used for NET_XMIT.
- * @struct xmtdone: Used for NET_XMIT_DONE.
- * @struct rcvpost: Used for NET_RCV_POST.
- * @struct rcv: Used for NET_RCV.
- * @struct enbdis: Used for NET_RCV_ENBDIS, NET_RCV_ENBDIS_ACK,
- * NET_RCV_PROMSIC, and NET_CONNECT_STATUS.
- * @struct macaddr:
- */
-struct uiscmdrsp_net {
- enum net_types type;
- void *buf;
- union {
- struct net_pkt_xmt xmt;
- struct net_pkt_xmtdone xmtdone;
- struct net_pkt_rcvpost rcvpost;
- struct net_pkt_rcv rcv;
- struct net_pkt_enbdis enbdis;
- struct net_pkt_macaddr macaddr;
- };
-} __packed;
-
-/*
- * struct uiscmdrsp_scsitaskmgmt
- * @enum tasktype: The type of task.
- * @struct vdest: The vdisk for which this task mgmt is generated.
- * @handle: This is a handle that the guest has saved off for its
- * own use. The handle value is preserved by iopart and
- * returned as in task mgmt rsp.
- * @notify_handle: For Linux guests, this is a pointer to wait_queue_head
- * that a thread is waiting on to see if the taskmgmt
- * command has completed. When the rsp is received by
- * guest, the thread receiving the response uses this to
- * notify the thread waiting for taskmgmt command
- * completion. It's value is preserved by iopart and
- * returned as in the task mgmt rsp.
- * @notifyresult_handle: This is a handle to the location in the guest where
- * the result of the taskmgmt command (result field) is
- * saved to when the response is handled. It's value is
- * preserved by iopart and returned as is in the task mgmt
- * rsp.
- * @result: Result of taskmgmt command - set by IOPart.
- */
-struct uiscmdrsp_scsitaskmgmt {
- enum task_mgmt_types tasktype;
- struct uisscsi_dest vdest;
- u64 handle;
- u64 notify_handle;
- u64 notifyresult_handle;
- char result;
-
-#define TASK_MGMT_FAILED 0
-} __packed;
-
-/*
- * struct uiscmdrsp_disknotify - Used by uissd to send disk add/remove
- * notifications to Guest.
- * @add: 0-remove, 1-add.
- * @*v_hba: Channel info to route msg.
- * @channel: SCSI Path of Disk to added or removed.
- * @id: SCSI Path of Disk to added or removed.
- * @lun: SCSI Path of Disk to added or removed.
- *
- * Note that the vHba pointer is not used by the Client/Guest side.
- */
-struct uiscmdrsp_disknotify {
- u8 add;
- void *v_hba;
- u32 channel, id, lun;
-} __packed;
-
-/* Keeping cmd and rsp info in one structure for now cmd rsp packet for SCSI */
-struct uiscmdrsp {
- char cmdtype;
- /* Describes what type of information is in the struct */
-#define CMD_SCSI_TYPE 1
-#define CMD_NET_TYPE 2
-#define CMD_SCSITASKMGMT_TYPE 3
-#define CMD_NOTIFYGUEST_TYPE 4
- union {
- struct uiscmdrsp_scsi scsi;
- struct uiscmdrsp_net net;
- struct uiscmdrsp_scsitaskmgmt scsitaskmgmt;
- struct uiscmdrsp_disknotify disknotify;
- };
- /* Send the response when the cmd is done (scsi and scsittaskmgmt). */
- void *private_data;
- /* General Purpose Queue Link */
- struct uiscmdrsp *next;
- /* Pointer to the nextactive commands */
- struct uiscmdrsp *activeQ_next;
- /* Pointer to the prevactive commands */
- struct uiscmdrsp *activeQ_prev;
-} __packed;
-
-/* total = 28 bytes */
-struct iochannel_vhba {
- /* 8 bytes */
- struct vhba_wwnn wwnn;
- /* 20 bytes */
- struct vhba_config_max max;
-} __packed;
-
-struct iochannel_vnic {
- /* 6 bytes */
- u8 macaddr[6];
- /* 4 bytes */
- u32 num_rcv_bufs;
- /* 4 bytes */
- u32 mtu;
- /* 16 bytes */
- guid_t zone_guid;
-} __packed;
-
-/*
- * This is just the header of the IO channel. It is assumed that directly after
- * this header there is a large region of memory which contains the command and
- * response queues as specified in cmd_q and rsp_q SIGNAL_QUEUE_HEADERS.
- */
-struct visor_io_channel {
- struct channel_header channel_header;
- struct signal_queue_header cmd_q;
- struct signal_queue_header rsp_q;
- union {
- struct iochannel_vhba vhba;
- struct iochannel_vnic vnic;
- } __packed;
-
-#define MAX_CLIENTSTRING_LEN 1024
- /* client_string is NULL termimated so holds max-1 bytes */
- u8 client_string[MAX_CLIENTSTRING_LEN];
-} __packed;
-
-/* INLINE functions for initializing and accessing I/O data channels. */
-#define SIZEOF_CMDRSP (64 * DIV_ROUND_UP(sizeof(struct uiscmdrsp), 64))
-
-/* Use 4K page sizes when passing page info between Guest and IOPartition. */
-#define PI_PAGE_SIZE 0x1000
-#define PI_PAGE_MASK 0x0FFF
-
-/* __IOCHANNEL_H__ */
-#endif
diff --git a/drivers/staging/unisys/visorhba/Kconfig b/drivers/staging/unisys/visorhba/Kconfig
deleted file mode 100644
index ed59ac11c322..000000000000
--- a/drivers/staging/unisys/visorhba/Kconfig
+++ /dev/null
@@ -1,15 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Unisys visorhba configuration
-#
-
-config UNISYS_VISORHBA
- tristate "Unisys visorhba driver"
- depends on UNISYSSPAR && UNISYS_VISORBUS && SCSI
- help
- The Unisys visorhba driver provides support for s-Par HBA
- devices exposed on the s-Par visorbus. When a message is sent
- to visorbus to create a HBA device, the probe function of
- visorhba is called to create the scsi device.
- If you say Y here, you will enable the Unisys visorhba driver.
-
diff --git a/drivers/staging/unisys/visorhba/Makefile b/drivers/staging/unisys/visorhba/Makefile
deleted file mode 100644
index b613a7dcdae9..000000000000
--- a/drivers/staging/unisys/visorhba/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Makefile for Unisys channel
-#
-
-obj-$(CONFIG_UNISYS_VISORHBA) += visorhba.o
-
-visorhba-y := visorhba_main.o
-
-ccflags-y += -I $(srctree)/$(src)/../include
diff --git a/drivers/staging/unisys/visorhba/visorhba_main.c b/drivers/staging/unisys/visorhba/visorhba_main.c
deleted file mode 100644
index 48aa18f8b984..000000000000
--- a/drivers/staging/unisys/visorhba/visorhba_main.c
+++ /dev/null
@@ -1,1142 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * Copyright (c) 2012 - 2015 UNISYS CORPORATION
- * All rights reserved.
- */
-
-#include <linux/debugfs.h>
-#include <linux/kthread.h>
-#include <linux/module.h>
-#include <linux/seq_file.h>
-#include <linux/visorbus.h>
-#include <linux/xarray.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_host.h>
-#include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_device.h>
-
-#include "iochannel.h"
-
-/* The Send and Receive Buffers of the IO Queue may both be full */
-
-#define IOS_ERROR_THRESHOLD 1000
-#define MAX_PENDING_REQUESTS (MIN_NUMSIGNALS * 2)
-#define VISORHBA_ERROR_COUNT 30
-
-static struct dentry *visorhba_debugfs_dir;
-
-/* GUIDS for HBA channel type supported by this driver */
-static struct visor_channeltype_descriptor visorhba_channel_types[] = {
- /* Note that the only channel type we expect to be reported by the
- * bus driver is the VISOR_VHBA channel.
- */
- { VISOR_VHBA_CHANNEL_GUID, "sparvhba", sizeof(struct channel_header),
- VISOR_VHBA_CHANNEL_VERSIONID },
- {}
-};
-
-MODULE_DEVICE_TABLE(visorbus, visorhba_channel_types);
-MODULE_ALIAS("visorbus:" VISOR_VHBA_CHANNEL_GUID_STR);
-
-struct visordisk_info {
- struct scsi_device *sdev;
- u32 valid;
- atomic_t ios_threshold;
- atomic_t error_count;
- struct visordisk_info *next;
-};
-
-struct scsipending {
- struct uiscmdrsp cmdrsp;
- /* The Data being tracked */
- void *sent;
- /* Type of pointer that is being stored */
- char cmdtype;
-};
-
-/* Each scsi_host has a host_data area that contains this struct. */
-struct visorhba_devdata {
- struct Scsi_Host *scsihost;
- struct visor_device *dev;
- struct list_head dev_info_list;
- /* Tracks the requests that have been forwarded to
- * the IOVM and haven't returned yet
- */
- struct scsipending pending[MAX_PENDING_REQUESTS];
- /* Start search for next pending free slot here */
- unsigned int nextinsert;
- /* lock to protect data in devdata */
- spinlock_t privlock;
- bool serverdown;
- bool serverchangingstate;
- unsigned long long acquire_failed_cnt;
- unsigned long long interrupts_rcvd;
- unsigned long long interrupts_notme;
- unsigned long long interrupts_disabled;
- u64 __iomem *flags_addr;
- struct visordisk_info head;
- unsigned int max_buff_len;
- int devnum;
- struct uiscmdrsp *cmdrsp;
- /*
- * allows us to pass int handles back-and-forth between us and
- * iovm, instead of raw pointers
- */
- struct xarray xa;
- struct dentry *debugfs_dir;
- struct dentry *debugfs_info;
-};
-
-struct visorhba_devices_open {
- struct visorhba_devdata *devdata;
-};
-
-/*
- * add_scsipending_entry - Save off io command that is pending in
- * Service Partition
- * @devdata: Pointer to devdata
- * @cmdtype: Specifies the type of command pending
- * @new: The command to be saved
- *
- * Saves off the io command that is being handled by the Service
- * Partition so that it can be handled when it completes. If new is
- * NULL it is assumed the entry refers only to the cmdrsp.
- *
- * Return: Insert_location where entry was added on success,
- * -EBUSY if it can't
- */
-static int add_scsipending_entry(struct visorhba_devdata *devdata,
- char cmdtype, void *new)
-{
- unsigned long flags;
- struct scsipending *entry;
- int insert_location;
-
- spin_lock_irqsave(&devdata->privlock, flags);
- insert_location = devdata->nextinsert;
- while (devdata->pending[insert_location].sent) {
- insert_location = (insert_location + 1) % MAX_PENDING_REQUESTS;
- if (insert_location == (int)devdata->nextinsert) {
- spin_unlock_irqrestore(&devdata->privlock, flags);
- return -EBUSY;
- }
- }
-
- entry = &devdata->pending[insert_location];
- memset(&entry->cmdrsp, 0, sizeof(entry->cmdrsp));
- entry->cmdtype = cmdtype;
- if (new)
- entry->sent = new;
- /* wants to send cmdrsp */
- else
- entry->sent = &entry->cmdrsp;
- devdata->nextinsert = (insert_location + 1) % MAX_PENDING_REQUESTS;
- spin_unlock_irqrestore(&devdata->privlock, flags);
-
- return insert_location;
-}
-
-/*
- * del_scsipending_ent - Removes an entry from the pending array
- * @devdata: Device holding the pending array
- * @del: Entry to remove
- *
- * Removes the entry pointed at by del and returns it.
- *
- * Return: The scsipending entry pointed to on success, NULL on failure
- */
-static void *del_scsipending_ent(struct visorhba_devdata *devdata, int del)
-{
- unsigned long flags;
- void *sent;
-
- if (del >= MAX_PENDING_REQUESTS)
- return NULL;
-
- spin_lock_irqsave(&devdata->privlock, flags);
- sent = devdata->pending[del].sent;
- devdata->pending[del].cmdtype = 0;
- devdata->pending[del].sent = NULL;
- spin_unlock_irqrestore(&devdata->privlock, flags);
-
- return sent;
-}
-
-/*
- * get_scsipending_cmdrsp - Return the cmdrsp stored in a pending entry
- * @ddata: Device holding the pending array
- * @ent: Entry that stores the cmdrsp
- *
- * Each scsipending entry has a cmdrsp in it. The cmdrsp is only valid
- * if the "sent" field is not NULL.
- *
- * Return: A pointer to the cmdrsp, NULL on failure
- */
-static struct uiscmdrsp *get_scsipending_cmdrsp(struct visorhba_devdata *ddata,
- int ent)
-{
- if (ddata->pending[ent].sent)
- return &ddata->pending[ent].cmdrsp;
-
- return NULL;
-}
-
-/*
- * setup_scsitaskmgmt_handles - Stash the necessary handles so that the
- * completion processing logic for a taskmgmt
- * cmd will be able to find who to wake up
- * and where to stash the result
- * @xa: The data object maintaining the pointer<-->int mappings
- * @cmdrsp: Response from the IOVM
- * @event: The event handle to associate with an id
- * @result: The location to place the result of the event handle into
- */
-static int setup_scsitaskmgmt_handles(struct xarray *xa, struct uiscmdrsp *cmdrsp,
- wait_queue_head_t *event, int *result)
-{
- int ret;
- u32 id;
-
- /* specify the event that has to be triggered when this cmd is complete */
- ret = xa_alloc_irq(xa, &id, event, xa_limit_32b, GFP_KERNEL);
- if (ret)
- return ret;
- cmdrsp->scsitaskmgmt.notify_handle = id;
- ret = xa_alloc_irq(xa, &id, result, xa_limit_32b, GFP_KERNEL);
- if (ret) {
- xa_erase_irq(xa, cmdrsp->scsitaskmgmt.notify_handle);
- return ret;
- }
- cmdrsp->scsitaskmgmt.notifyresult_handle = id;
-
- return 0;
-}
-
-/*
- * cleanup_scsitaskmgmt_handles - Forget handles created by
- * setup_scsitaskmgmt_handles()
- * @xa: The data object maintaining the pointer<-->int mappings
- * @cmdrsp: Response from the IOVM
- */
-static void cleanup_scsitaskmgmt_handles(struct xarray *xa,
- struct uiscmdrsp *cmdrsp)
-{
- xa_erase_irq(xa, cmdrsp->scsitaskmgmt.notify_handle);
- xa_erase_irq(xa, cmdrsp->scsitaskmgmt.notifyresult_handle);
-}
-
-/*
- * forward_taskmgmt_command - Send taskmegmt command to the Service
- * Partition
- * @tasktype: Type of taskmgmt command
- * @scsidev: Scsidev that issued command
- *
- * Create a cmdrsp packet and send it to the Service Partition
- * that will service this request.
- *
- * Return: Int representing whether command was queued successfully or not
- */
-static int forward_taskmgmt_command(enum task_mgmt_types tasktype,
- struct scsi_device *scsidev)
-{
- struct uiscmdrsp *cmdrsp;
- struct visorhba_devdata *devdata =
- (struct visorhba_devdata *)scsidev->host->hostdata;
- int notifyresult = 0xffff;
- wait_queue_head_t notifyevent;
- int scsicmd_id;
- int ret;
-
- if (devdata->serverdown || devdata->serverchangingstate)
- return FAILED;
-
- scsicmd_id = add_scsipending_entry(devdata, CMD_SCSITASKMGMT_TYPE,
- NULL);
- if (scsicmd_id < 0)
- return FAILED;
-
- cmdrsp = get_scsipending_cmdrsp(devdata, scsicmd_id);
-
- init_waitqueue_head(&notifyevent);
-
- /* issue TASK_MGMT_ABORT_TASK */
- cmdrsp->cmdtype = CMD_SCSITASKMGMT_TYPE;
-
- ret = setup_scsitaskmgmt_handles(&devdata->xa, cmdrsp,
- &notifyevent, &notifyresult);
- if (ret) {
- dev_dbg(&scsidev->sdev_gendev,
- "visorhba: setup_scsitaskmgmt_handles returned %d\n", ret);
- return FAILED;
- }
-
- /* save destination */
- cmdrsp->scsitaskmgmt.tasktype = tasktype;
- cmdrsp->scsitaskmgmt.vdest.channel = scsidev->channel;
- cmdrsp->scsitaskmgmt.vdest.id = scsidev->id;
- cmdrsp->scsitaskmgmt.vdest.lun = scsidev->lun;
- cmdrsp->scsitaskmgmt.handle = scsicmd_id;
-
- dev_dbg(&scsidev->sdev_gendev,
- "visorhba: initiating type=%d taskmgmt command\n", tasktype);
- if (visorchannel_signalinsert(devdata->dev->visorchannel,
- IOCHAN_TO_IOPART,
- cmdrsp))
- goto err_del_scsipending_ent;
-
- /* It can take the Service Partition up to 35 seconds to complete
- * an IO in some cases, so wait 45 seconds and error out
- */
- if (!wait_event_timeout(notifyevent, notifyresult != 0xffff,
- msecs_to_jiffies(45000)))
- goto err_del_scsipending_ent;
-
- dev_dbg(&scsidev->sdev_gendev,
- "visorhba: taskmgmt type=%d success; result=0x%x\n",
- tasktype, notifyresult);
- cleanup_scsitaskmgmt_handles(&devdata->xa, cmdrsp);
- return SUCCESS;
-
-err_del_scsipending_ent:
- dev_dbg(&scsidev->sdev_gendev,
- "visorhba: taskmgmt type=%d not executed\n", tasktype);
- del_scsipending_ent(devdata, scsicmd_id);
- cleanup_scsitaskmgmt_handles(&devdata->xa, cmdrsp);
- return FAILED;
-}
-
-/*
- * visorhba_abort_handler - Send TASK_MGMT_ABORT_TASK
- * @scsicmd: The scsicmd that needs aborted
- *
- * Return: SUCCESS if inserted, FAILED otherwise
- */
-static int visorhba_abort_handler(struct scsi_cmnd *scsicmd)
-{
- /* issue TASK_MGMT_ABORT_TASK */
- struct scsi_device *scsidev;
- struct visordisk_info *vdisk;
- int rtn;
-
- scsidev = scsicmd->device;
- vdisk = scsidev->hostdata;
- if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
- atomic_inc(&vdisk->error_count);
- else
- atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
- rtn = forward_taskmgmt_command(TASK_MGMT_ABORT_TASK, scsidev);
- if (rtn == SUCCESS) {
- scsicmd->result = DID_ABORT << 16;
- scsi_done(scsicmd);
- }
- return rtn;
-}
-
-/*
- * visorhba_device_reset_handler - Send TASK_MGMT_LUN_RESET
- * @scsicmd: The scsicmd that needs aborted
- *
- * Return: SUCCESS if inserted, FAILED otherwise
- */
-static int visorhba_device_reset_handler(struct scsi_cmnd *scsicmd)
-{
- /* issue TASK_MGMT_LUN_RESET */
- struct scsi_device *scsidev;
- struct visordisk_info *vdisk;
- int rtn;
-
- scsidev = scsicmd->device;
- vdisk = scsidev->hostdata;
- if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
- atomic_inc(&vdisk->error_count);
- else
- atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
- rtn = forward_taskmgmt_command(TASK_MGMT_LUN_RESET, scsidev);
- if (rtn == SUCCESS) {
- scsicmd->result = DID_RESET << 16;
- scsi_done(scsicmd);
- }
- return rtn;
-}
-
-/*
- * visorhba_bus_reset_handler - Send TASK_MGMT_TARGET_RESET for each
- * target on the bus
- * @scsicmd: The scsicmd that needs aborted
- *
- * Return: SUCCESS if inserted, FAILED otherwise
- */
-static int visorhba_bus_reset_handler(struct scsi_cmnd *scsicmd)
-{
- struct scsi_device *scsidev;
- struct visordisk_info *vdisk;
- int rtn;
-
- scsidev = scsicmd->device;
- shost_for_each_device(scsidev, scsidev->host) {
- vdisk = scsidev->hostdata;
- if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
- atomic_inc(&vdisk->error_count);
- else
- atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
- }
- rtn = forward_taskmgmt_command(TASK_MGMT_BUS_RESET, scsidev);
- if (rtn == SUCCESS) {
- scsicmd->result = DID_RESET << 16;
- scsi_done(scsicmd);
- }
- return rtn;
-}
-
-/*
- * visorhba_host_reset_handler - Not supported
- * @scsicmd: The scsicmd that needs to be aborted
- *
- * Return: Not supported, return SUCCESS
- */
-static int visorhba_host_reset_handler(struct scsi_cmnd *scsicmd)
-{
- /* issue TASK_MGMT_TARGET_RESET for each target on each bus for host */
- return SUCCESS;
-}
-
-/*
- * visorhba_get_info - Get information about SCSI device
- * @shp: Scsi host that is requesting information
- *
- * Return: String with visorhba information
- */
-static const char *visorhba_get_info(struct Scsi_Host *shp)
-{
- /* Return version string */
- return "visorhba";
-}
-
-/*
- * dma_data_dir_linux_to_spar - convert dma_data_direction value to
- * Unisys-specific equivalent
- * @d: dma direction value to convert
- *
- * Returns the Unisys-specific dma direction value corresponding to @d
- */
-static u32 dma_data_dir_linux_to_spar(enum dma_data_direction d)
-{
- switch (d) {
- case DMA_BIDIRECTIONAL:
- return UIS_DMA_BIDIRECTIONAL;
- case DMA_TO_DEVICE:
- return UIS_DMA_TO_DEVICE;
- case DMA_FROM_DEVICE:
- return UIS_DMA_FROM_DEVICE;
- case DMA_NONE:
- return UIS_DMA_NONE;
- default:
- return UIS_DMA_NONE;
- }
-}
-
-/*
- * visorhba_queue_command_lck - Queues command to the Service Partition
- * @scsicmd: Command to be queued
- * @vsiorhba_cmnd_done: Done command to call when scsicmd is returned
- *
- * Queues to scsicmd to the ServicePartition after converting it to a
- * uiscmdrsp structure.
- *
- * Return: 0 if successfully queued to the Service Partition, otherwise
- * error code
- */
-static int visorhba_queue_command_lck(struct scsi_cmnd *scsicmd)
-{
- void (*visorhba_cmnd_done)(struct scsi_cmnd *) = scsi_done;
- struct uiscmdrsp *cmdrsp;
- struct scsi_device *scsidev = scsicmd->device;
- int insert_location;
- unsigned char *cdb = scsicmd->cmnd;
- struct Scsi_Host *scsihost = scsidev->host;
- unsigned int i;
- struct visorhba_devdata *devdata =
- (struct visorhba_devdata *)scsihost->hostdata;
- struct scatterlist *sg = NULL;
- struct scatterlist *sglist = NULL;
-
- if (devdata->serverdown || devdata->serverchangingstate)
- return SCSI_MLQUEUE_DEVICE_BUSY;
-
- insert_location = add_scsipending_entry(devdata, CMD_SCSI_TYPE,
- (void *)scsicmd);
- if (insert_location < 0)
- return SCSI_MLQUEUE_DEVICE_BUSY;
-
- cmdrsp = get_scsipending_cmdrsp(devdata, insert_location);
- cmdrsp->cmdtype = CMD_SCSI_TYPE;
- /* save the pending insertion location. Deletion from pending
- * will return the scsicmd pointer for completion
- */
- cmdrsp->scsi.handle = insert_location;
-
- WARN_ON_ONCE(visorhba_cmnd_done != scsi_done);
- /* save destination */
- cmdrsp->scsi.vdest.channel = scsidev->channel;
- cmdrsp->scsi.vdest.id = scsidev->id;
- cmdrsp->scsi.vdest.lun = scsidev->lun;
- /* save datadir */
- cmdrsp->scsi.data_dir =
- dma_data_dir_linux_to_spar(scsicmd->sc_data_direction);
- memcpy(cmdrsp->scsi.cmnd, cdb, MAX_CMND_SIZE);
- cmdrsp->scsi.bufflen = scsi_bufflen(scsicmd);
-
- /* keep track of the max buffer length so far. */
- if (cmdrsp->scsi.bufflen > devdata->max_buff_len)
- devdata->max_buff_len = cmdrsp->scsi.bufflen;
-
- if (scsi_sg_count(scsicmd) > MAX_PHYS_INFO)
- goto err_del_scsipending_ent;
-
- /* convert buffer to phys information */
- /* buffer is scatterlist - copy it out */
- sglist = scsi_sglist(scsicmd);
-
- for_each_sg(sglist, sg, scsi_sg_count(scsicmd), i) {
- cmdrsp->scsi.gpi_list[i].address = sg_phys(sg);
- cmdrsp->scsi.gpi_list[i].length = sg->length;
- }
- cmdrsp->scsi.guest_phys_entries = scsi_sg_count(scsicmd);
-
- if (visorchannel_signalinsert(devdata->dev->visorchannel,
- IOCHAN_TO_IOPART,
- cmdrsp))
- /* queue must be full and we aren't going to wait */
- goto err_del_scsipending_ent;
-
- return 0;
-
-err_del_scsipending_ent:
- del_scsipending_ent(devdata, insert_location);
- return SCSI_MLQUEUE_DEVICE_BUSY;
-}
-
-#ifdef DEF_SCSI_QCMD
-static DEF_SCSI_QCMD(visorhba_queue_command)
-#else
-#define visorhba_queue_command visorhba_queue_command_lck
-#endif
-
-/*
- * visorhba_slave_alloc - Called when new disk is discovered
- * @scsidev: New disk
- *
- * Create a new visordisk_info structure and add it to our
- * list of vdisks.
- *
- * Return: 0 on success, -ENOMEM on failure.
- */
-static int visorhba_slave_alloc(struct scsi_device *scsidev)
-{
- /* this is called by the midlayer before scan for new devices --
- * LLD can alloc any struct & do init if needed.
- */
- struct visordisk_info *vdisk;
- struct visorhba_devdata *devdata;
- struct Scsi_Host *scsihost = (struct Scsi_Host *)scsidev->host;
-
- /* already allocated return success */
- if (scsidev->hostdata)
- return 0;
-
- /* even though we errored, treat as success */
- devdata = (struct visorhba_devdata *)scsihost->hostdata;
- if (!devdata)
- return 0;
-
- vdisk = kzalloc(sizeof(*vdisk), GFP_ATOMIC);
- if (!vdisk)
- return -ENOMEM;
-
- vdisk->sdev = scsidev;
- scsidev->hostdata = vdisk;
- return 0;
-}
-
-/*
- * visorhba_slave_destroy - Disk is going away, clean up resources.
- * @scsidev: Scsi device to destroy
- */
-static void visorhba_slave_destroy(struct scsi_device *scsidev)
-{
- /* midlevel calls this after device has been quiesced and
- * before it is to be deleted.
- */
- struct visordisk_info *vdisk;
-
- vdisk = scsidev->hostdata;
- scsidev->hostdata = NULL;
- kfree(vdisk);
-}
-
-static struct scsi_host_template visorhba_driver_template = {
- .name = "Unisys Visor HBA",
- .info = visorhba_get_info,
- .queuecommand = visorhba_queue_command,
- .eh_abort_handler = visorhba_abort_handler,
- .eh_device_reset_handler = visorhba_device_reset_handler,
- .eh_bus_reset_handler = visorhba_bus_reset_handler,
- .eh_host_reset_handler = visorhba_host_reset_handler,
-#define visorhba_MAX_CMNDS 128
- .can_queue = visorhba_MAX_CMNDS,
- .sg_tablesize = 64,
- .this_id = -1,
- .slave_alloc = visorhba_slave_alloc,
- .slave_destroy = visorhba_slave_destroy,
-};
-
-/*
- * info_debugfs_show - Debugfs interface to dump visorhba states
- * @seq: The sequence file to write information to
- * @v: Unused, but needed for use with seq file single_open invocation
- *
- * Presents a file in the debugfs tree named: /visorhba/vbus<x>:dev<y>/info.
- *
- * Return: SUCCESS
- */
-static int info_debugfs_show(struct seq_file *seq, void *v)
-{
- struct visorhba_devdata *devdata = seq->private;
-
- seq_printf(seq, "max_buff_len = %u\n", devdata->max_buff_len);
- seq_printf(seq, "interrupts_rcvd = %llu\n", devdata->interrupts_rcvd);
- seq_printf(seq, "interrupts_disabled = %llu\n",
- devdata->interrupts_disabled);
- seq_printf(seq, "interrupts_notme = %llu\n",
- devdata->interrupts_notme);
- seq_printf(seq, "flags_addr = %p\n", devdata->flags_addr);
- if (devdata->flags_addr) {
- u64 phys_flags_addr =
- virt_to_phys((__force void *)devdata->flags_addr);
- seq_printf(seq, "phys_flags_addr = 0x%016llx\n",
- phys_flags_addr);
- seq_printf(seq, "FeatureFlags = %llu\n",
- (u64)readq(devdata->flags_addr));
- }
- seq_printf(seq, "acquire_failed_cnt = %llu\n",
- devdata->acquire_failed_cnt);
-
- return 0;
-}
-DEFINE_SHOW_ATTRIBUTE(info_debugfs);
-
-/*
- * complete_taskmgmt_command - Complete task management
- * @idrtable: The data object maintaining the pointer<-->int mappings
- * @cmdrsp: Response from the IOVM
- * @result: The result of the task management command
- *
- * Service Partition returned the result of the task management
- * command. Wake up anyone waiting for it.
- */
-static void complete_taskmgmt_command(struct xarray *xa,
- struct uiscmdrsp *cmdrsp, int result)
-{
- wait_queue_head_t *wq =
- xa_load(xa, cmdrsp->scsitaskmgmt.notify_handle);
- int *scsi_result_ptr =
- xa_load(xa, cmdrsp->scsitaskmgmt.notifyresult_handle);
- if (unlikely(!(wq && scsi_result_ptr))) {
- pr_err("visorhba: no completion context; cmd will time out\n");
- return;
- }
-
- /* copy the result of the taskmgmt and
- * wake up the error handler that is waiting for this
- */
- pr_debug("visorhba: notifying initiator with result=0x%x\n", result);
- *scsi_result_ptr = result;
- wake_up_all(wq);
-}
-
-/*
- * visorhba_serverdown_complete - Called when we are done cleaning up
- * from serverdown
- * @devdata: Visorhba instance on which to complete serverdown
- *
- * Called when we are done cleanning up from serverdown, stop processing
- * queue, fail pending IOs.
- */
-static void visorhba_serverdown_complete(struct visorhba_devdata *devdata)
-{
- int i;
- struct scsipending *pendingdel = NULL;
- struct scsi_cmnd *scsicmd = NULL;
- struct uiscmdrsp *cmdrsp;
- unsigned long flags;
-
- /* Stop using the IOVM response queue (queue should be drained
- * by the end)
- */
- visorbus_disable_channel_interrupts(devdata->dev);
-
- /* Fail commands that weren't completed */
- spin_lock_irqsave(&devdata->privlock, flags);
- for (i = 0; i < MAX_PENDING_REQUESTS; i++) {
- pendingdel = &devdata->pending[i];
- switch (pendingdel->cmdtype) {
- case CMD_SCSI_TYPE:
- scsicmd = pendingdel->sent;
- scsicmd->result = DID_RESET << 16;
- scsi_done(scsicmd);
- break;
- case CMD_SCSITASKMGMT_TYPE:
- cmdrsp = pendingdel->sent;
- complete_taskmgmt_command(&devdata->xa, cmdrsp,
- TASK_MGMT_FAILED);
- break;
- default:
- break;
- }
- pendingdel->cmdtype = 0;
- pendingdel->sent = NULL;
- }
- spin_unlock_irqrestore(&devdata->privlock, flags);
-
- devdata->serverdown = true;
- devdata->serverchangingstate = false;
-}
-
-/*
- * visorhba_serverdown - Got notified that the IOVM is down
- * @devdata: Visorhba that is being serviced by downed IOVM
- *
- * Something happened to the IOVM, return immediately and
- * schedule cleanup work.
- *
- * Return: 0 on success, -EINVAL on failure
- */
-static int visorhba_serverdown(struct visorhba_devdata *devdata)
-{
- if (!devdata->serverdown && !devdata->serverchangingstate) {
- devdata->serverchangingstate = true;
- visorhba_serverdown_complete(devdata);
- } else if (devdata->serverchangingstate) {
- return -EINVAL;
- }
- return 0;
-}
-
-/*
- * do_scsi_linuxstat - Scsi command returned linuxstat
- * @cmdrsp: Response from IOVM
- * @scsicmd: Command issued
- *
- * Don't log errors for disk-not-present inquiries.
- */
-static void do_scsi_linuxstat(struct uiscmdrsp *cmdrsp,
- struct scsi_cmnd *scsicmd)
-{
- struct visordisk_info *vdisk;
- struct scsi_device *scsidev;
-
- scsidev = scsicmd->device;
- memcpy(scsicmd->sense_buffer, cmdrsp->scsi.sensebuf, MAX_SENSE_SIZE);
-
- /* Do not log errors for disk-not-present inquiries */
- if (cmdrsp->scsi.cmnd[0] == INQUIRY &&
- (host_byte(cmdrsp->scsi.linuxstat) == DID_NO_CONNECT) &&
- cmdrsp->scsi.addlstat == ADDL_SEL_TIMEOUT)
- return;
- /* Okay see what our error_count is here.... */
- vdisk = scsidev->hostdata;
- if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT) {
- atomic_inc(&vdisk->error_count);
- atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
- }
-}
-
-static int set_no_disk_inquiry_result(unsigned char *buf, size_t len,
- bool is_lun0)
-{
- if (len < NO_DISK_INQUIRY_RESULT_LEN)
- return -EINVAL;
- memset(buf, 0, NO_DISK_INQUIRY_RESULT_LEN);
- buf[2] = SCSI_SPC2_VER;
- if (is_lun0) {
- buf[0] = DEV_DISK_CAPABLE_NOT_PRESENT;
- buf[3] = DEV_HISUPPORT;
- } else {
- buf[0] = DEV_NOT_CAPABLE;
- }
- buf[4] = NO_DISK_INQUIRY_RESULT_LEN - 5;
- strncpy(buf + 8, "DELLPSEUDO DEVICE .", NO_DISK_INQUIRY_RESULT_LEN - 8);
- return 0;
-}
-
-/*
- * do_scsi_nolinuxstat - Scsi command didn't have linuxstat
- * @cmdrsp: Response from IOVM
- * @scsicmd: Command issued
- *
- * Handle response when no linuxstat was returned.
- */
-static void do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp,
- struct scsi_cmnd *scsicmd)
-{
- struct scsi_device *scsidev;
- unsigned char *buf;
- struct scatterlist *sg;
- unsigned int i;
- char *this_page;
- char *this_page_orig;
- int bufind = 0;
- struct visordisk_info *vdisk;
-
- scsidev = scsicmd->device;
- if (cmdrsp->scsi.cmnd[0] == INQUIRY &&
- cmdrsp->scsi.bufflen >= MIN_INQUIRY_RESULT_LEN) {
- if (cmdrsp->scsi.no_disk_result == 0)
- return;
-
- buf = kzalloc(36, GFP_KERNEL);
- if (!buf)
- return;
-
- /* Linux scsi code wants a device at Lun 0
- * to issue report luns, but we don't want
- * a disk there so we'll present a processor
- * there.
- */
- set_no_disk_inquiry_result(buf, (size_t)cmdrsp->scsi.bufflen,
- scsidev->lun == 0);
-
- if (scsi_sg_count(scsicmd) == 0) {
- memcpy(scsi_sglist(scsicmd), buf,
- cmdrsp->scsi.bufflen);
- kfree(buf);
- return;
- }
-
- scsi_for_each_sg(scsicmd, sg, scsi_sg_count(scsicmd), i) {
- this_page_orig = kmap_atomic(sg_page(sg));
- this_page = (void *)((unsigned long)this_page_orig |
- sg->offset);
- memcpy(this_page, buf + bufind, sg->length);
- kunmap_atomic(this_page_orig);
- }
- kfree(buf);
- } else {
- vdisk = scsidev->hostdata;
- if (atomic_read(&vdisk->ios_threshold) > 0) {
- atomic_dec(&vdisk->ios_threshold);
- if (atomic_read(&vdisk->ios_threshold) == 0)
- atomic_set(&vdisk->error_count, 0);
- }
- }
-}
-
-/*
- * complete_scsi_command - Complete a scsi command
- * @uiscmdrsp: Response from Service Partition
- * @scsicmd: The scsi command
- *
- * Response was returned by the Service Partition. Finish it and send
- * completion to the scsi midlayer.
- */
-static void complete_scsi_command(struct uiscmdrsp *cmdrsp,
- struct scsi_cmnd *scsicmd)
-{
- /* take what we need out of cmdrsp and complete the scsicmd */
- scsicmd->result = cmdrsp->scsi.linuxstat;
- if (cmdrsp->scsi.linuxstat)
- do_scsi_linuxstat(cmdrsp, scsicmd);
- else
- do_scsi_nolinuxstat(cmdrsp, scsicmd);
-
- scsi_done(scsicmd);
-}
-
-/*
- * drain_queue - Pull responses out of iochannel
- * @cmdrsp: Response from the IOSP
- * @devdata: Device that owns this iochannel
- *
- * Pulls responses out of the iochannel and process the responses.
- */
-static void drain_queue(struct uiscmdrsp *cmdrsp,
- struct visorhba_devdata *devdata)
-{
- struct scsi_cmnd *scsicmd;
-
- while (1) {
- /* queue empty */
- if (visorchannel_signalremove(devdata->dev->visorchannel,
- IOCHAN_FROM_IOPART,
- cmdrsp))
- break;
- if (cmdrsp->cmdtype == CMD_SCSI_TYPE) {
- /* scsicmd location is returned by the
- * deletion
- */
- scsicmd = del_scsipending_ent(devdata,
- cmdrsp->scsi.handle);
- if (!scsicmd)
- break;
- /* complete the orig cmd */
- complete_scsi_command(cmdrsp, scsicmd);
- } else if (cmdrsp->cmdtype == CMD_SCSITASKMGMT_TYPE) {
- if (!del_scsipending_ent(devdata,
- cmdrsp->scsitaskmgmt.handle))
- break;
- complete_taskmgmt_command(&devdata->xa, cmdrsp,
- cmdrsp->scsitaskmgmt.result);
- } else if (cmdrsp->cmdtype == CMD_NOTIFYGUEST_TYPE)
- dev_err_once(&devdata->dev->device,
- "ignoring unsupported NOTIFYGUEST\n");
- /* cmdrsp is now available for re-use */
- }
-}
-
-/*
- * This is used only when this driver is active as an hba driver in the
- * client guest partition. It is called periodically so we can obtain
- * and process the command respond from the IO Service Partition periodically.
- */
-static void visorhba_channel_interrupt(struct visor_device *dev)
-{
- struct visorhba_devdata *devdata = dev_get_drvdata(&dev->device);
-
- if (!devdata)
- return;
-
- drain_queue(devdata->cmdrsp, devdata);
-}
-
-/*
- * visorhba_pause - Function to handle visorbus pause messages
- * @dev: Device that is pausing
- * @complete_func: Function to call when finished
- *
- * Something has happened to the IO Service Partition that is
- * handling this device. Quiet this device and reset commands
- * so that the Service Partition can be corrected.
- *
- * Return: SUCCESS
- */
-static int visorhba_pause(struct visor_device *dev,
- visorbus_state_complete_func complete_func)
-{
- struct visorhba_devdata *devdata = dev_get_drvdata(&dev->device);
-
- visorhba_serverdown(devdata);
- complete_func(dev, 0);
- return 0;
-}
-
-/*
- * visorhba_resume - Function called when the IO Service Partition is back
- * @dev: Device that is pausing
- * @complete_func: Function to call when finished
- *
- * Yay! The IO Service Partition is back, the channel has been wiped
- * so lets re-establish connection and start processing responses.
- *
- * Return: 0 on success, -EINVAL on failure
- */
-static int visorhba_resume(struct visor_device *dev,
- visorbus_state_complete_func complete_func)
-{
- struct visorhba_devdata *devdata;
-
- devdata = dev_get_drvdata(&dev->device);
- if (!devdata)
- return -EINVAL;
-
- if (devdata->serverdown && !devdata->serverchangingstate)
- devdata->serverchangingstate = true;
-
- visorbus_enable_channel_interrupts(dev);
- devdata->serverdown = false;
- devdata->serverchangingstate = false;
-
- return 0;
-}
-
-/*
- * visorhba_probe - Device has been discovered; do acquire
- * @dev: visor_device that was discovered
- *
- * A new HBA was discovered; do the initial connections of it.
- *
- * Return: 0 on success, otherwise error code
- */
-static int visorhba_probe(struct visor_device *dev)
-{
- struct Scsi_Host *scsihost;
- struct vhba_config_max max;
- struct visorhba_devdata *devdata = NULL;
- int err, channel_offset;
- u64 features;
-
- scsihost = scsi_host_alloc(&visorhba_driver_template,
- sizeof(*devdata));
- if (!scsihost)
- return -ENODEV;
-
- channel_offset = offsetof(struct visor_io_channel, vhba.max);
- err = visorbus_read_channel(dev, channel_offset, &max,
- sizeof(struct vhba_config_max));
- if (err < 0)
- goto err_scsi_host_put;
-
- scsihost->max_id = (unsigned int)max.max_id;
- scsihost->max_lun = (unsigned int)max.max_lun;
- scsihost->cmd_per_lun = (unsigned int)max.cmd_per_lun;
- scsihost->max_sectors =
- (unsigned short)(max.max_io_size >> 9);
- scsihost->sg_tablesize =
- (unsigned short)(max.max_io_size / PAGE_SIZE);
- if (scsihost->sg_tablesize > MAX_PHYS_INFO)
- scsihost->sg_tablesize = MAX_PHYS_INFO;
- err = scsi_add_host(scsihost, &dev->device);
- if (err < 0)
- goto err_scsi_host_put;
-
- devdata = (struct visorhba_devdata *)scsihost->hostdata;
- devdata->dev = dev;
- dev_set_drvdata(&dev->device, devdata);
-
- devdata->debugfs_dir = debugfs_create_dir(dev_name(&dev->device),
- visorhba_debugfs_dir);
- if (!devdata->debugfs_dir) {
- err = -ENOMEM;
- goto err_scsi_remove_host;
- }
- devdata->debugfs_info =
- debugfs_create_file("info", 0440,
- devdata->debugfs_dir, devdata,
- &info_debugfs_fops);
- if (!devdata->debugfs_info) {
- err = -ENOMEM;
- goto err_debugfs_dir;
- }
-
- spin_lock_init(&devdata->privlock);
- devdata->serverdown = false;
- devdata->serverchangingstate = false;
- devdata->scsihost = scsihost;
-
- channel_offset = offsetof(struct visor_io_channel,
- channel_header.features);
- err = visorbus_read_channel(dev, channel_offset, &features, 8);
- if (err)
- goto err_debugfs_info;
- features |= VISOR_CHANNEL_IS_POLLING;
- err = visorbus_write_channel(dev, channel_offset, &features, 8);
- if (err)
- goto err_debugfs_info;
-
- xa_init(&devdata->xa);
-
- devdata->cmdrsp = kmalloc(sizeof(*devdata->cmdrsp), GFP_ATOMIC);
- visorbus_enable_channel_interrupts(dev);
-
- scsi_scan_host(scsihost);
-
- return 0;
-
-err_debugfs_info:
- debugfs_remove(devdata->debugfs_info);
-
-err_debugfs_dir:
- debugfs_remove_recursive(devdata->debugfs_dir);
-
-err_scsi_remove_host:
- scsi_remove_host(scsihost);
-
-err_scsi_host_put:
- scsi_host_put(scsihost);
- return err;
-}
-
-/*
- * visorhba_remove - Remove a visorhba device
- * @dev: Device to remove
- *
- * Removes the visorhba device.
- */
-static void visorhba_remove(struct visor_device *dev)
-{
- struct visorhba_devdata *devdata = dev_get_drvdata(&dev->device);
- struct Scsi_Host *scsihost = NULL;
-
- if (!devdata)
- return;
-
- scsihost = devdata->scsihost;
- kfree(devdata->cmdrsp);
- visorbus_disable_channel_interrupts(dev);
- scsi_remove_host(scsihost);
- scsi_host_put(scsihost);
-
- dev_set_drvdata(&dev->device, NULL);
- debugfs_remove(devdata->debugfs_info);
- debugfs_remove_recursive(devdata->debugfs_dir);
-}
-
-/* This is used to tell the visorbus driver which types of visor devices
- * we support, and what functions to call when a visor device that we support
- * is attached or removed.
- */
-static struct visor_driver visorhba_driver = {
- .name = "visorhba",
- .owner = THIS_MODULE,
- .channel_types = visorhba_channel_types,
- .probe = visorhba_probe,
- .remove = visorhba_remove,
- .pause = visorhba_pause,
- .resume = visorhba_resume,
- .channel_interrupt = visorhba_channel_interrupt,
-};
-
-/*
- * visorhba_init - Driver init routine
- *
- * Initialize the visorhba driver and register it with visorbus
- * to handle s-Par virtual host bus adapter.
- *
- * Return: 0 on success, error code otherwise
- */
-static int visorhba_init(void)
-{
- int rc;
-
- visorhba_debugfs_dir = debugfs_create_dir("visorhba", NULL);
- if (!visorhba_debugfs_dir)
- return -ENOMEM;
-
- rc = visorbus_register_visor_driver(&visorhba_driver);
- if (rc)
- goto cleanup_debugfs;
-
- return 0;
-
-cleanup_debugfs:
- debugfs_remove_recursive(visorhba_debugfs_dir);
-
- return rc;
-}
-
-/*
- * visorhba_exit - Driver exit routine
- *
- * Unregister driver from the bus and free up memory.
- */
-static void visorhba_exit(void)
-{
- visorbus_unregister_visor_driver(&visorhba_driver);
- debugfs_remove_recursive(visorhba_debugfs_dir);
-}
-
-module_init(visorhba_init);
-module_exit(visorhba_exit);
-
-MODULE_AUTHOR("Unisys");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("s-Par HBA driver for virtual SCSI host busses");
diff --git a/drivers/staging/unisys/visorinput/Kconfig b/drivers/staging/unisys/visorinput/Kconfig
deleted file mode 100644
index 5f036393aee9..000000000000
--- a/drivers/staging/unisys/visorinput/Kconfig
+++ /dev/null
@@ -1,16 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Unisys visorinput configuration
-#
-
-config UNISYS_VISORINPUT
- tristate "Unisys visorinput driver"
- depends on UNISYSSPAR && UNISYS_VISORBUS && INPUT
- help
- The Unisys s-Par visorinput driver provides a virtualized system
- console (keyboard and mouse) that is accessible through the
- s-Par firmware's user interface. s-Par provides video using the EFI
- GOP protocol, so If this driver is not present, the Linux guest should
- still boot with visible output in the partition desktop, but keyboard
- and mouse interaction will not be available.
-
diff --git a/drivers/staging/unisys/visorinput/Makefile b/drivers/staging/unisys/visorinput/Makefile
deleted file mode 100644
index 68ced7c8a65f..000000000000
--- a/drivers/staging/unisys/visorinput/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Makefile for Unisys visorinput
-#
-
-obj-$(CONFIG_UNISYS_VISORINPUT) += visorinput.o
-
diff --git a/drivers/staging/unisys/visorinput/visorinput.c b/drivers/staging/unisys/visorinput/visorinput.c
deleted file mode 100644
index dffa71ac3cc5..000000000000
--- a/drivers/staging/unisys/visorinput/visorinput.c
+++ /dev/null
@@ -1,788 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2011 - 2015 UNISYS CORPORATION
- * All rights reserved.
- */
-
-/*
- * This driver lives in a generic guest Linux partition, and registers to
- * receive keyboard and mouse channels from the visorbus driver. It reads
- * inputs from such channels, and delivers it to the Linux OS in the
- * standard way the Linux expects for input drivers.
- */
-
-#include <linux/fb.h>
-#include <linux/input.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/uuid.h>
-#include <linux/visorbus.h>
-
-/* These defines identify mouse and keyboard activity which is specified by the
- * firmware to the host using the cmsimpleinput protocol. @ingroup coretypes
- */
-/* only motion; arg1=x, arg2=y */
-#define INPUTACTION_XY_MOTION 1
-
-/* arg1: 1=left,2=center,3=right */
-#define INPUTACTION_MOUSE_BUTTON_DOWN 2
-#define INPUTACTION_MOUSE_BUTTON_UP 3
-#define INPUTACTION_MOUSE_BUTTON_CLICK 4
-#define INPUTACTION_MOUSE_BUTTON_DCLICK 5
-
-/* arg1: wheel rotation away from/toward user */
-#define INPUTACTION_WHEEL_ROTATE_AWAY 6
-#define INPUTACTION_WHEEL_ROTATE_TOWARD 7
-
-/* arg1: scancode, as follows: If arg1 <= 0xff, it's a 1-byte scancode and arg1
- * is that scancode. If arg1 > 0xff, it's a 2-byte scanecode, with the 1st
- * byte in the low 8 bits, and the 2nd byte in the high 8 bits.
- * E.g., the right ALT key would appear as x'38e0'.
- */
-#define INPUTACTION_KEY_DOWN 64
-#define INPUTACTION_KEY_UP 65
-#define INPUTACTION_KEY_DOWN_UP 67
-
-/* arg1: scancode (in same format as inputaction_keyDown); MUST refer to one of
- * the locking keys, like capslock, numlock, or scrolllock.
- * arg2: 1 iff locking key should be in the LOCKED position (e.g., light is ON)
- */
-#define INPUTACTION_SET_LOCKING_KEY_STATE 66
-
-/* Keyboard channel {c73416d0-b0b8-44af-b304-9d2ae99f1b3d} */
-#define VISOR_KEYBOARD_CHANNEL_GUID \
- GUID_INIT(0xc73416d0, 0xb0b8, 0x44af, \
- 0xb3, 0x4, 0x9d, 0x2a, 0xe9, 0x9f, 0x1b, 0x3d)
-#define VISOR_KEYBOARD_CHANNEL_GUID_STR "c73416d0-b0b8-44af-b304-9d2ae99f1b3d"
-
-/* Mouse channel {addf07d4-94a9-46e2-81c3-61abcdbdbd87} */
-#define VISOR_MOUSE_CHANNEL_GUID \
- GUID_INIT(0xaddf07d4, 0x94a9, 0x46e2, \
- 0x81, 0xc3, 0x61, 0xab, 0xcd, 0xbd, 0xbd, 0x87)
-#define VISOR_MOUSE_CHANNEL_GUID_STR "addf07d4-94a9-46e2-81c3-61abcdbdbd87"
-
-#define PIXELS_ACROSS_DEFAULT 1024
-#define PIXELS_DOWN_DEFAULT 768
-#define KEYCODE_TABLE_BYTES 256
-
-struct visor_inputactivity {
- u16 action;
- u16 arg1;
- u16 arg2;
- u16 arg3;
-} __packed;
-
-struct visor_inputreport {
- u64 seq_no;
- struct visor_inputactivity activity;
-} __packed;
-
-/* header of keyboard/mouse channels */
-struct visor_input_channel_data {
- u32 n_input_reports;
- union {
- struct {
- u16 x_res;
- u16 y_res;
- } mouse;
- struct {
- u32 flags;
- } keyboard;
- };
-} __packed;
-
-enum visorinput_dev_type {
- visorinput_keyboard,
- visorinput_mouse,
-};
-
-/*
- * This is the private data that we store for each device. A pointer to this
- * struct is maintained via dev_get_drvdata() / dev_set_drvdata() for each
- * struct device.
- */
-struct visorinput_devdata {
- struct visor_device *dev;
- /* lock for dev */
- struct mutex lock_visor_dev;
- struct input_dev *visorinput_dev;
- bool paused;
- bool interrupts_enabled;
- /* size of following array */
- unsigned int keycode_table_bytes;
- /* for keyboard devices: visorkbd_keycode[] + visorkbd_ext_keycode[] */
- unsigned char keycode_table[];
-};
-
-static const guid_t visor_keyboard_channel_guid = VISOR_KEYBOARD_CHANNEL_GUID;
-static const guid_t visor_mouse_channel_guid = VISOR_MOUSE_CHANNEL_GUID;
-
-/*
- * Borrowed from drivers/input/keyboard/atakbd.c
- * This maps 1-byte scancodes to keycodes.
- */
-static const unsigned char visorkbd_keycode[KEYCODE_TABLE_BYTES] = {
- /* American layout */
- [0] = KEY_GRAVE,
- [1] = KEY_ESC,
- [2] = KEY_1,
- [3] = KEY_2,
- [4] = KEY_3,
- [5] = KEY_4,
- [6] = KEY_5,
- [7] = KEY_6,
- [8] = KEY_7,
- [9] = KEY_8,
- [10] = KEY_9,
- [11] = KEY_0,
- [12] = KEY_MINUS,
- [13] = KEY_EQUAL,
- [14] = KEY_BACKSPACE,
- [15] = KEY_TAB,
- [16] = KEY_Q,
- [17] = KEY_W,
- [18] = KEY_E,
- [19] = KEY_R,
- [20] = KEY_T,
- [21] = KEY_Y,
- [22] = KEY_U,
- [23] = KEY_I,
- [24] = KEY_O,
- [25] = KEY_P,
- [26] = KEY_LEFTBRACE,
- [27] = KEY_RIGHTBRACE,
- [28] = KEY_ENTER,
- [29] = KEY_LEFTCTRL,
- [30] = KEY_A,
- [31] = KEY_S,
- [32] = KEY_D,
- [33] = KEY_F,
- [34] = KEY_G,
- [35] = KEY_H,
- [36] = KEY_J,
- [37] = KEY_K,
- [38] = KEY_L,
- [39] = KEY_SEMICOLON,
- [40] = KEY_APOSTROPHE,
- [41] = KEY_GRAVE,
- [42] = KEY_LEFTSHIFT,
- [43] = KEY_BACKSLASH,
- [44] = KEY_Z,
- [45] = KEY_X,
- [46] = KEY_C,
- [47] = KEY_V,
- [48] = KEY_B,
- [49] = KEY_N,
- [50] = KEY_M,
- [51] = KEY_COMMA,
- [52] = KEY_DOT,
- [53] = KEY_SLASH,
- [54] = KEY_RIGHTSHIFT,
- [55] = KEY_KPASTERISK,
- [56] = KEY_LEFTALT,
- [57] = KEY_SPACE,
- [58] = KEY_CAPSLOCK,
- [59] = KEY_F1,
- [60] = KEY_F2,
- [61] = KEY_F3,
- [62] = KEY_F4,
- [63] = KEY_F5,
- [64] = KEY_F6,
- [65] = KEY_F7,
- [66] = KEY_F8,
- [67] = KEY_F9,
- [68] = KEY_F10,
- [69] = KEY_NUMLOCK,
- [70] = KEY_SCROLLLOCK,
- [71] = KEY_KP7,
- [72] = KEY_KP8,
- [73] = KEY_KP9,
- [74] = KEY_KPMINUS,
- [75] = KEY_KP4,
- [76] = KEY_KP5,
- [77] = KEY_KP6,
- [78] = KEY_KPPLUS,
- [79] = KEY_KP1,
- [80] = KEY_KP2,
- [81] = KEY_KP3,
- [82] = KEY_KP0,
- [83] = KEY_KPDOT,
- /* enables UK backslash+pipe key and FR lessthan+greaterthan key */
- [86] = KEY_102ND,
- [87] = KEY_F11,
- [88] = KEY_F12,
- [90] = KEY_KPLEFTPAREN,
- [91] = KEY_KPRIGHTPAREN,
- [92] = KEY_KPASTERISK,
- [93] = KEY_KPASTERISK,
- [94] = KEY_KPPLUS,
- [95] = KEY_HELP,
- [96] = KEY_KPENTER,
- [97] = KEY_RIGHTCTRL,
- [98] = KEY_KPSLASH,
- [99] = KEY_KPLEFTPAREN,
- [100] = KEY_KPRIGHTPAREN,
- [101] = KEY_KPSLASH,
- [102] = KEY_HOME,
- [103] = KEY_UP,
- [104] = KEY_PAGEUP,
- [105] = KEY_LEFT,
- [106] = KEY_RIGHT,
- [107] = KEY_END,
- [108] = KEY_DOWN,
- [109] = KEY_PAGEDOWN,
- [110] = KEY_INSERT,
- [111] = KEY_DELETE,
- [112] = KEY_MACRO,
- [113] = KEY_MUTE
-};
-
-/*
- * This maps the <xx> in extended scancodes of the form "0xE0 <xx>" into
- * keycodes.
- */
-static const unsigned char visorkbd_ext_keycode[KEYCODE_TABLE_BYTES] = {
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0x00 */
- 0, 0, 0, 0, 0, 0, 0, 0, /* 0x10 */
- 0, 0, 0, 0, KEY_KPENTER, KEY_RIGHTCTRL, 0, 0, /* 0x18 */
- 0, 0, 0, 0, 0, 0, 0, 0, /* 0x20 */
- KEY_RIGHTALT, 0, 0, 0, 0, 0, 0, 0, /* 0x28 */
- 0, 0, 0, 0, 0, 0, 0, 0, /* 0x30 */
- KEY_RIGHTALT /* AltGr */, 0, 0, 0, 0, 0, 0, 0, /* 0x38 */
- 0, 0, 0, 0, 0, 0, 0, KEY_HOME, /* 0x40 */
- KEY_UP, KEY_PAGEUP, 0, KEY_LEFT, 0, KEY_RIGHT, 0, KEY_END, /* 0x48 */
- KEY_DOWN, KEY_PAGEDOWN, KEY_INSERT, KEY_DELETE, 0, 0, 0, 0, /* 0x50 */
- 0, 0, 0, 0, 0, 0, 0, 0, /* 0x58 */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0x60 */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0x70 */
-};
-
-static int visorinput_open(struct input_dev *visorinput_dev)
-{
- struct visorinput_devdata *devdata = input_get_drvdata(visorinput_dev);
-
- if (!devdata) {
- dev_err(&visorinput_dev->dev,
- "%s input_get_drvdata(%p) returned NULL\n",
- __func__, visorinput_dev);
- return -EINVAL;
- }
- dev_dbg(&visorinput_dev->dev, "%s opened\n", __func__);
-
- /*
- * If we're not paused, really enable interrupts. Regardless of whether
- * we are paused, set a flag indicating interrupts should be enabled so
- * when we resume, interrupts will really be enabled.
- */
- mutex_lock(&devdata->lock_visor_dev);
- devdata->interrupts_enabled = true;
- if (devdata->paused)
- goto out_unlock;
- visorbus_enable_channel_interrupts(devdata->dev);
-
-out_unlock:
- mutex_unlock(&devdata->lock_visor_dev);
- return 0;
-}
-
-static void visorinput_close(struct input_dev *visorinput_dev)
-{
- struct visorinput_devdata *devdata = input_get_drvdata(visorinput_dev);
-
- if (!devdata) {
- dev_err(&visorinput_dev->dev,
- "%s input_get_drvdata(%p) returned NULL\n",
- __func__, visorinput_dev);
- return;
- }
- dev_dbg(&visorinput_dev->dev, "%s closed\n", __func__);
-
- /*
- * If we're not paused, really disable interrupts. Regardless of
- * whether we are paused, set a flag indicating interrupts should be
- * disabled so when we resume we will not re-enable them.
- */
- mutex_lock(&devdata->lock_visor_dev);
- devdata->interrupts_enabled = false;
- if (devdata->paused)
- goto out_unlock;
- visorbus_disable_channel_interrupts(devdata->dev);
-
-out_unlock:
- mutex_unlock(&devdata->lock_visor_dev);
-}
-
-/*
- * setup_client_keyboard() initializes and returns a Linux input node that we
- * can use to deliver keyboard inputs to Linux. We of course do this when we
- * see keyboard inputs coming in on a keyboard channel.
- */
-static struct input_dev *setup_client_keyboard(void *devdata,
- unsigned char *keycode_table)
-
-{
- int i;
- struct input_dev *visorinput_dev = input_allocate_device();
-
- if (!visorinput_dev)
- return NULL;
-
- visorinput_dev->name = "visor Keyboard";
- visorinput_dev->phys = "visorkbd:input0";
- visorinput_dev->id.bustype = BUS_VIRTUAL;
- visorinput_dev->id.vendor = 0x0001;
- visorinput_dev->id.product = 0x0001;
- visorinput_dev->id.version = 0x0100;
-
- visorinput_dev->evbit[0] = BIT_MASK(EV_KEY) |
- BIT_MASK(EV_REP) |
- BIT_MASK(EV_LED);
- visorinput_dev->ledbit[0] = BIT_MASK(LED_CAPSL) |
- BIT_MASK(LED_SCROLLL) |
- BIT_MASK(LED_NUML);
- visorinput_dev->keycode = keycode_table;
- /* sizeof(unsigned char) */
- visorinput_dev->keycodesize = 1;
- visorinput_dev->keycodemax = KEYCODE_TABLE_BYTES;
-
- for (i = 1; i < visorinput_dev->keycodemax; i++)
- set_bit(keycode_table[i], visorinput_dev->keybit);
- for (i = 1; i < visorinput_dev->keycodemax; i++)
- set_bit(keycode_table[i + KEYCODE_TABLE_BYTES],
- visorinput_dev->keybit);
-
- visorinput_dev->open = visorinput_open;
- visorinput_dev->close = visorinput_close;
- /* pre input_register! */
- input_set_drvdata(visorinput_dev, devdata);
-
- return visorinput_dev;
-}
-
-static struct input_dev *setup_client_mouse(void *devdata, unsigned int xres,
- unsigned int yres)
-{
- struct input_dev *visorinput_dev = input_allocate_device();
-
- if (!visorinput_dev)
- return NULL;
-
- visorinput_dev->name = "visor Mouse";
- visorinput_dev->phys = "visormou:input0";
- visorinput_dev->id.bustype = BUS_VIRTUAL;
- visorinput_dev->id.vendor = 0x0001;
- visorinput_dev->id.product = 0x0002;
- visorinput_dev->id.version = 0x0100;
-
- visorinput_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
- set_bit(BTN_LEFT, visorinput_dev->keybit);
- set_bit(BTN_RIGHT, visorinput_dev->keybit);
- set_bit(BTN_MIDDLE, visorinput_dev->keybit);
-
- if (xres == 0)
- xres = PIXELS_ACROSS_DEFAULT;
- if (yres == 0)
- yres = PIXELS_DOWN_DEFAULT;
- input_set_abs_params(visorinput_dev, ABS_X, 0, xres, 0, 0);
- input_set_abs_params(visorinput_dev, ABS_Y, 0, yres, 0, 0);
-
- visorinput_dev->open = visorinput_open;
- visorinput_dev->close = visorinput_close;
- /* pre input_register! */
- input_set_drvdata(visorinput_dev, devdata);
- input_set_capability(visorinput_dev, EV_REL, REL_WHEEL);
-
- return visorinput_dev;
-}
-
-static struct visorinput_devdata *devdata_create(struct visor_device *dev,
- enum visorinput_dev_type dtype)
-{
- struct visorinput_devdata *devdata = NULL;
- unsigned int extra_bytes = 0;
- unsigned int size, xres, yres, err;
- struct visor_input_channel_data data;
-
- if (dtype == visorinput_keyboard)
- /* allocate room for devdata->keycode_table, filled in below */
- extra_bytes = KEYCODE_TABLE_BYTES * 2;
- devdata = kzalloc(struct_size(devdata, keycode_table, extra_bytes),
- GFP_KERNEL);
- if (!devdata)
- return NULL;
- mutex_init(&devdata->lock_visor_dev);
- mutex_lock(&devdata->lock_visor_dev);
- devdata->dev = dev;
-
- /*
- * visorinput_open() can be called as soon as input_register_device()
- * happens, and that will enable channel interrupts. Setting paused
- * prevents us from getting into visorinput_channel_interrupt() prior
- * to the device structure being totally initialized.
- */
- devdata->paused = true;
-
- /*
- * This is an input device in a client guest partition, so we need to
- * create whatever input nodes are necessary to deliver our inputs to
- * the guest OS.
- */
- switch (dtype) {
- case visorinput_keyboard:
- devdata->keycode_table_bytes = extra_bytes;
- memcpy(devdata->keycode_table, visorkbd_keycode,
- KEYCODE_TABLE_BYTES);
- memcpy(devdata->keycode_table + KEYCODE_TABLE_BYTES,
- visorkbd_ext_keycode, KEYCODE_TABLE_BYTES);
- devdata->visorinput_dev = setup_client_keyboard
- (devdata, devdata->keycode_table);
- if (!devdata->visorinput_dev)
- goto cleanups_register;
- break;
- case visorinput_mouse:
- size = sizeof(struct visor_input_channel_data);
- err = visorbus_read_channel(dev, sizeof(struct channel_header),
- &data, size);
- if (err)
- goto cleanups_register;
- xres = data.mouse.x_res;
- yres = data.mouse.y_res;
- devdata->visorinput_dev = setup_client_mouse(devdata, xres,
- yres);
- if (!devdata->visorinput_dev)
- goto cleanups_register;
- break;
- default:
- /* No other input devices supported */
- break;
- }
-
- dev_set_drvdata(&dev->device, devdata);
- mutex_unlock(&devdata->lock_visor_dev);
-
- /*
- * Device struct is completely set up now, with the exception of
- * visorinput_dev being registered. We need to unlock before we
- * register the device, because this can cause an on-stack call of
- * visorinput_open(), which would deadlock if we had the lock.
- */
- if (input_register_device(devdata->visorinput_dev)) {
- input_free_device(devdata->visorinput_dev);
- goto err_kfree_devdata;
- }
-
- mutex_lock(&devdata->lock_visor_dev);
- /*
- * Establish calls to visorinput_channel_interrupt() if that is the
- * desired state that we've kept track of in interrupts_enabled while
- * the device was being created.
- */
- devdata->paused = false;
- if (devdata->interrupts_enabled)
- visorbus_enable_channel_interrupts(dev);
- mutex_unlock(&devdata->lock_visor_dev);
-
- return devdata;
-
-cleanups_register:
- mutex_unlock(&devdata->lock_visor_dev);
-err_kfree_devdata:
- kfree(devdata);
- return NULL;
-}
-
-static int visorinput_probe(struct visor_device *dev)
-{
- const guid_t *guid;
- enum visorinput_dev_type dtype;
-
- guid = visorchannel_get_guid(dev->visorchannel);
- if (guid_equal(guid, &visor_mouse_channel_guid))
- dtype = visorinput_mouse;
- else if (guid_equal(guid, &visor_keyboard_channel_guid))
- dtype = visorinput_keyboard;
- else
- return -ENODEV;
- visorbus_disable_channel_interrupts(dev);
- if (!devdata_create(dev, dtype))
- return -ENOMEM;
- return 0;
-}
-
-static void unregister_client_input(struct input_dev *visorinput_dev)
-{
- if (visorinput_dev)
- input_unregister_device(visorinput_dev);
-}
-
-static void visorinput_remove(struct visor_device *dev)
-{
- struct visorinput_devdata *devdata = dev_get_drvdata(&dev->device);
-
- if (!devdata)
- return;
-
- mutex_lock(&devdata->lock_visor_dev);
- visorbus_disable_channel_interrupts(dev);
-
- /*
- * due to above, at this time no thread of execution will be in
- * visorinput_channel_interrupt()
- */
-
- dev_set_drvdata(&dev->device, NULL);
- mutex_unlock(&devdata->lock_visor_dev);
-
- unregister_client_input(devdata->visorinput_dev);
- kfree(devdata);
-}
-
-/*
- * Make it so the current locking state of the locking key indicated by
- * <keycode> is as indicated by <desired_state> (1=locked, 0=unlocked).
- */
-static void handle_locking_key(struct input_dev *visorinput_dev, int keycode,
- int desired_state)
-{
- int led;
-
- switch (keycode) {
- case KEY_CAPSLOCK:
- led = LED_CAPSL;
- break;
- case KEY_SCROLLLOCK:
- led = LED_SCROLLL;
- break;
- case KEY_NUMLOCK:
- led = LED_NUML;
- break;
- default:
- return;
- }
- if (test_bit(led, visorinput_dev->led) != desired_state) {
- input_report_key(visorinput_dev, keycode, 1);
- input_sync(visorinput_dev);
- input_report_key(visorinput_dev, keycode, 0);
- input_sync(visorinput_dev);
- __change_bit(led, visorinput_dev->led);
- }
-}
-
-/*
- * <scancode> is either a 1-byte scancode, or an extended 16-bit scancode with
- * 0xE0 in the low byte and the extended scancode value in the next higher byte.
- */
-static int scancode_to_keycode(int scancode)
-{
- if (scancode > 0xff)
- return visorkbd_ext_keycode[(scancode >> 8) & 0xff];
-
- return visorkbd_keycode[scancode];
-}
-
-static int calc_button(int x)
-{
- switch (x) {
- case 1:
- return BTN_LEFT;
- case 2:
- return BTN_MIDDLE;
- case 3:
- return BTN_RIGHT;
- default:
- return -EINVAL;
- }
-}
-
-/*
- * This is used only when this driver is active as an input driver in the
- * client guest partition. It is called periodically so we can obtain inputs
- * from the channel, and deliver them to the guest OS.
- */
-static void visorinput_channel_interrupt(struct visor_device *dev)
-{
- struct visor_inputreport r;
- int scancode, keycode;
- struct input_dev *visorinput_dev;
- int xmotion, ymotion, button;
- int i;
- struct visorinput_devdata *devdata = dev_get_drvdata(&dev->device);
-
- if (!devdata)
- return;
-
- visorinput_dev = devdata->visorinput_dev;
-
- while (!visorchannel_signalremove(dev->visorchannel, 0, &r)) {
- scancode = r.activity.arg1;
- keycode = scancode_to_keycode(scancode);
- switch (r.activity.action) {
- case INPUTACTION_KEY_DOWN:
- input_report_key(visorinput_dev, keycode, 1);
- input_sync(visorinput_dev);
- break;
- case INPUTACTION_KEY_UP:
- input_report_key(visorinput_dev, keycode, 0);
- input_sync(visorinput_dev);
- break;
- case INPUTACTION_KEY_DOWN_UP:
- input_report_key(visorinput_dev, keycode, 1);
- input_sync(visorinput_dev);
- input_report_key(visorinput_dev, keycode, 0);
- input_sync(visorinput_dev);
- break;
- case INPUTACTION_SET_LOCKING_KEY_STATE:
- handle_locking_key(visorinput_dev, keycode,
- r.activity.arg2);
- break;
- case INPUTACTION_XY_MOTION:
- xmotion = r.activity.arg1;
- ymotion = r.activity.arg2;
- input_report_abs(visorinput_dev, ABS_X, xmotion);
- input_report_abs(visorinput_dev, ABS_Y, ymotion);
- input_sync(visorinput_dev);
- break;
- case INPUTACTION_MOUSE_BUTTON_DOWN:
- button = calc_button(r.activity.arg1);
- if (button < 0)
- break;
- input_report_key(visorinput_dev, button, 1);
- input_sync(visorinput_dev);
- break;
- case INPUTACTION_MOUSE_BUTTON_UP:
- button = calc_button(r.activity.arg1);
- if (button < 0)
- break;
- input_report_key(visorinput_dev, button, 0);
- input_sync(visorinput_dev);
- break;
- case INPUTACTION_MOUSE_BUTTON_CLICK:
- button = calc_button(r.activity.arg1);
- if (button < 0)
- break;
- input_report_key(visorinput_dev, button, 1);
- input_sync(visorinput_dev);
- input_report_key(visorinput_dev, button, 0);
- input_sync(visorinput_dev);
- break;
- case INPUTACTION_MOUSE_BUTTON_DCLICK:
- button = calc_button(r.activity.arg1);
- if (button < 0)
- break;
- for (i = 0; i < 2; i++) {
- input_report_key(visorinput_dev, button, 1);
- input_sync(visorinput_dev);
- input_report_key(visorinput_dev, button, 0);
- input_sync(visorinput_dev);
- }
- break;
- case INPUTACTION_WHEEL_ROTATE_AWAY:
- input_report_rel(visorinput_dev, REL_WHEEL, 1);
- input_sync(visorinput_dev);
- break;
- case INPUTACTION_WHEEL_ROTATE_TOWARD:
- input_report_rel(visorinput_dev, REL_WHEEL, -1);
- input_sync(visorinput_dev);
- break;
- default:
- /* Unsupported input action */
- break;
- }
- }
-}
-
-static int visorinput_pause(struct visor_device *dev,
- visorbus_state_complete_func complete_func)
-{
- int rc;
- struct visorinput_devdata *devdata = dev_get_drvdata(&dev->device);
-
- if (!devdata) {
- rc = -ENODEV;
- goto out;
- }
-
- mutex_lock(&devdata->lock_visor_dev);
- if (devdata->paused) {
- rc = -EBUSY;
- goto out_locked;
- }
- if (devdata->interrupts_enabled)
- visorbus_disable_channel_interrupts(dev);
-
- /*
- * due to above, at this time no thread of execution will be in
- * visorinput_channel_interrupt()
- */
- devdata->paused = true;
- complete_func(dev, 0);
- rc = 0;
-out_locked:
- mutex_unlock(&devdata->lock_visor_dev);
-out:
- return rc;
-}
-
-static int visorinput_resume(struct visor_device *dev,
- visorbus_state_complete_func complete_func)
-{
- int rc;
- struct visorinput_devdata *devdata = dev_get_drvdata(&dev->device);
-
- if (!devdata) {
- rc = -ENODEV;
- goto out;
- }
- mutex_lock(&devdata->lock_visor_dev);
- if (!devdata->paused) {
- rc = -EBUSY;
- goto out_locked;
- }
- devdata->paused = false;
- complete_func(dev, 0);
-
- /*
- * Re-establish calls to visorinput_channel_interrupt() if that is the
- * desired state that we've kept track of in interrupts_enabled while
- * the device was paused.
- */
- if (devdata->interrupts_enabled)
- visorbus_enable_channel_interrupts(dev);
-
- rc = 0;
-out_locked:
- mutex_unlock(&devdata->lock_visor_dev);
-out:
- return rc;
-}
-
-/* GUIDS for all channel types supported by this driver. */
-static struct visor_channeltype_descriptor visorinput_channel_types[] = {
- { VISOR_KEYBOARD_CHANNEL_GUID, "keyboard",
- sizeof(struct channel_header), 0 },
- { VISOR_MOUSE_CHANNEL_GUID, "mouse", sizeof(struct channel_header), 0 },
- {}
-};
-
-static struct visor_driver visorinput_driver = {
- .name = "visorinput",
- .owner = THIS_MODULE,
- .channel_types = visorinput_channel_types,
- .probe = visorinput_probe,
- .remove = visorinput_remove,
- .channel_interrupt = visorinput_channel_interrupt,
- .pause = visorinput_pause,
- .resume = visorinput_resume,
-};
-
-module_driver(visorinput_driver, visorbus_register_visor_driver,
- visorbus_unregister_visor_driver);
-
-MODULE_DEVICE_TABLE(visorbus, visorinput_channel_types);
-
-MODULE_AUTHOR("Unisys");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("s-Par human input driver for virtual keyboard/mouse");
-
-MODULE_ALIAS("visorbus:" VISOR_MOUSE_CHANNEL_GUID_STR);
-MODULE_ALIAS("visorbus:" VISOR_KEYBOARD_CHANNEL_GUID_STR);
diff --git a/drivers/staging/unisys/visornic/Kconfig b/drivers/staging/unisys/visornic/Kconfig
deleted file mode 100644
index 3f8f5570821b..000000000000
--- a/drivers/staging/unisys/visornic/Kconfig
+++ /dev/null
@@ -1,16 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Unisys visornic configuration
-#
-
-config UNISYS_VISORNIC
- tristate "Unisys visornic driver"
- depends on UNISYSSPAR && UNISYS_VISORBUS && NET
- help
- The Unisys Visornic driver provides support for s-Par network
- devices exposed on the s-Par visorbus. When a message is sent
- to visorbus to create a network device, the probe function of
- visornic is called to create the netdev device. Networking on
- s-Par switches will not work if this driver is not selected.
- If you say Y here, you will enable the Unisys visornic driver.
-
diff --git a/drivers/staging/unisys/visornic/Makefile b/drivers/staging/unisys/visornic/Makefile
deleted file mode 100644
index f2984880c340..000000000000
--- a/drivers/staging/unisys/visornic/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Makefile for Unisys channel
-#
-
-obj-$(CONFIG_UNISYS_VISORNIC) += visornic.o
-
-visornic-y := visornic_main.o
-
-ccflags-y += -I $(srctree)/$(src)/../include
diff --git a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c
deleted file mode 100644
index 643432458105..000000000000
--- a/drivers/staging/unisys/visornic/visornic_main.c
+++ /dev/null
@@ -1,2148 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) 2012 - 2015 UNISYS CORPORATION
- * All rights reserved.
- */
-
-/* This driver lives in a spar partition, and registers to ethernet io
- * channels from the visorbus driver. It creates netdev devices and
- * forwards transmit to the IO channel and accepts rcvs from the IO
- * Partition via the IO channel.
- */
-
-#include <linux/debugfs.h>
-#include <linux/etherdevice.h>
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <linux/kthread.h>
-#include <linux/skbuff.h>
-#include <linux/rtnetlink.h>
-#include <linux/visorbus.h>
-
-#include "iochannel.h"
-
-#define VISORNIC_INFINITE_RSP_WAIT 0
-
-/* MAX_BUF = 64 lines x 32 MAXVNIC x 80 characters
- * = 163840 bytes
- */
-#define MAX_BUF 163840
-#define NAPI_WEIGHT 64
-
-/* GUIDS for director channel type supported by this driver. */
-/* {8cd5994d-c58e-11da-95a9-00e08161165f} */
-#define VISOR_VNIC_CHANNEL_GUID \
- GUID_INIT(0x8cd5994d, 0xc58e, 0x11da, \
- 0x95, 0xa9, 0x0, 0xe0, 0x81, 0x61, 0x16, 0x5f)
-#define VISOR_VNIC_CHANNEL_GUID_STR \
- "8cd5994d-c58e-11da-95a9-00e08161165f"
-
-static struct visor_channeltype_descriptor visornic_channel_types[] = {
- /* Note that the only channel type we expect to be reported by the
- * bus driver is the VISOR_VNIC channel.
- */
- { VISOR_VNIC_CHANNEL_GUID, "ultravnic", sizeof(struct channel_header),
- VISOR_VNIC_CHANNEL_VERSIONID },
- {}
-};
-MODULE_DEVICE_TABLE(visorbus, visornic_channel_types);
-/* FIXME XXX: This next line of code must be fixed and removed before
- * acceptance into the 'normal' part of the kernel. It is only here as a place
- * holder to get module autoloading functionality working for visorbus. Code
- * must be added to scripts/mode/file2alias.c, etc., to get this working
- * properly.
- */
-MODULE_ALIAS("visorbus:" VISOR_VNIC_CHANNEL_GUID_STR);
-
-struct chanstat {
- unsigned long got_rcv;
- unsigned long got_enbdisack;
- unsigned long got_xmit_done;
- unsigned long xmit_fail;
- unsigned long sent_enbdis;
- unsigned long sent_promisc;
- unsigned long sent_post;
- unsigned long sent_post_failed;
- unsigned long sent_xmit;
- unsigned long reject_count;
- unsigned long extra_rcvbufs_sent;
-};
-
-/* struct visornic_devdata
- * @enabled: 0 disabled 1 enabled to receive.
- * @enab_dis_acked: NET_RCV_ENABLE/DISABLE acked by IOPART.
- * @struct *dev:
- * @struct *netdev:
- * @struct net_stats:
- * @interrupt_rcvd:
- * @rsp_queue:
- * @struct **rcvbuf:
- * @incarnation_id: incarnation_id lets IOPART know about
- * re-birth.
- * @old_flags: flags as they were prior to
- * set_multicast_list.
- * @usage: count of users.
- * @num_rcv_bufs: number of rcv buffers the vnic will post.
- * @num_rcv_bufs_could_not_alloc:
- * @num_rcvbuf_in_iovm:
- * @alloc_failed_in_if_needed_cnt:
- * @alloc_failed_in_repost_rtn_cnt:
- * @max_outstanding_net_xmits: absolute max number of outstanding xmits
- * - should never hit this.
- * @upper_threshold_net_xmits: high water mark for calling
- * netif_stop_queue().
- * @lower_threshold_net_xmits: high water mark for calling
- * netif_wake_queue().
- * @struct xmitbufhead: xmitbufhead - head of the xmit buffer list
- * sent to the IOPART end.
- * @server_down_complete_func:
- * @struct timeout_reset:
- * @struct *cmdrsp_rcv: cmdrsp_rcv is used for posting/unposting rcv
- * buffers.
- * @struct *xmit_cmdrsp: xmit_cmdrsp - issues NET_XMIT - only one
- * active xmit at a time.
- * @server_down: IOPART is down.
- * @server_change_state: Processing SERVER_CHANGESTATE msg.
- * @going_away: device is being torn down.
- * @struct *eth_debugfs_dir:
- * @interrupts_rcvd:
- * @interrupts_notme:
- * @interrupts_disabled:
- * @busy_cnt:
- * @priv_lock: spinlock to access devdata structures.
- * @flow_control_upper_hits:
- * @flow_control_lower_hits:
- * @n_rcv0: # rcvs of 0 buffers.
- * @n_rcv1: # rcvs of 1 buffers.
- * @n_rcv2: # rcvs of 2 buffers.
- * @n_rcvx: # rcvs of >2 buffers.
- * @found_repost_rcvbuf_cnt: # repost_rcvbuf_cnt.
- * @repost_found_skb_cnt: # of found the skb.
- * @n_repost_deficit: # of lost rcv buffers.
- * @bad_rcv_buf: # of unknown rcv skb not freed.
- * @n_rcv_packets_not_accepted: # bogs rcv packets.
- * @queuefullmsg_logged:
- * @struct chstat:
- * @struct napi:
- * @struct cmdrsp:
- */
-struct visornic_devdata {
- unsigned short enabled;
- unsigned short enab_dis_acked;
-
- struct visor_device *dev;
- struct net_device *netdev;
- struct net_device_stats net_stats;
- atomic_t interrupt_rcvd;
- wait_queue_head_t rsp_queue;
- struct sk_buff **rcvbuf;
- u64 incarnation_id;
- unsigned short old_flags;
- atomic_t usage;
-
- int num_rcv_bufs;
- int num_rcv_bufs_could_not_alloc;
- atomic_t num_rcvbuf_in_iovm;
- unsigned long alloc_failed_in_if_needed_cnt;
- unsigned long alloc_failed_in_repost_rtn_cnt;
-
- unsigned long max_outstanding_net_xmits;
- unsigned long upper_threshold_net_xmits;
- unsigned long lower_threshold_net_xmits;
- struct sk_buff_head xmitbufhead;
-
- visorbus_state_complete_func server_down_complete_func;
- struct work_struct timeout_reset;
- struct uiscmdrsp *cmdrsp_rcv;
- struct uiscmdrsp *xmit_cmdrsp;
- bool server_down;
- bool server_change_state;
- bool going_away;
- struct dentry *eth_debugfs_dir;
- u64 interrupts_rcvd;
- u64 interrupts_notme;
- u64 interrupts_disabled;
- u64 busy_cnt;
- /* spinlock to access devdata structures. */
- spinlock_t priv_lock;
-
- /* flow control counter */
- u64 flow_control_upper_hits;
- u64 flow_control_lower_hits;
-
- /* debug counters */
- unsigned long n_rcv0;
- unsigned long n_rcv1;
- unsigned long n_rcv2;
- unsigned long n_rcvx;
- unsigned long found_repost_rcvbuf_cnt;
- unsigned long repost_found_skb_cnt;
- unsigned long n_repost_deficit;
- unsigned long bad_rcv_buf;
- unsigned long n_rcv_packets_not_accepted;
-
- int queuefullmsg_logged;
- struct chanstat chstat;
- struct napi_struct napi;
- struct uiscmdrsp cmdrsp[SIZEOF_CMDRSP];
-};
-
-/* Returns next non-zero index on success or 0 on failure (i.e. out of room). */
-static u16 add_physinfo_entries(u64 inp_pfn, u16 inp_off, u16 inp_len,
- u16 index, u16 max_pi_arr_entries,
- struct phys_info pi_arr[])
-{
- u16 i, len, firstlen;
-
- firstlen = PI_PAGE_SIZE - inp_off;
- if (inp_len <= firstlen) {
- /* The input entry spans only one page - add as is. */
- if (index >= max_pi_arr_entries)
- return 0;
- pi_arr[index].pi_pfn = inp_pfn;
- pi_arr[index].pi_off = (u16)inp_off;
- pi_arr[index].pi_len = (u16)inp_len;
- return index + 1;
- }
-
- /* This entry spans multiple pages. */
- for (len = inp_len, i = 0; len;
- len -= pi_arr[index + i].pi_len, i++) {
- if (index + i >= max_pi_arr_entries)
- return 0;
- pi_arr[index + i].pi_pfn = inp_pfn + i;
- if (i == 0) {
- pi_arr[index].pi_off = inp_off;
- pi_arr[index].pi_len = firstlen;
- } else {
- pi_arr[index + i].pi_off = 0;
- pi_arr[index + i].pi_len = min_t(u16, len,
- PI_PAGE_SIZE);
- }
- }
- return index + i;
-}
-
-/* visor_copy_fragsinfo_from_skb - copy fragment list in the SKB to a phys_info
- * array that the IOPART understands
- * @skb: Skbuff that we are pulling the frags from.
- * @firstfraglen: Length of first fragment in skb.
- * @frags_max: Max len of frags array.
- * @frags: Frags array filled in on output.
- *
- * Return: Positive integer indicating number of entries filled in frags on
- * success, negative integer on error.
- */
-static int visor_copy_fragsinfo_from_skb(struct sk_buff *skb,
- unsigned int firstfraglen,
- unsigned int frags_max,
- struct phys_info frags[])
-{
- unsigned int count = 0, frag, size, offset = 0, numfrags;
- unsigned int total_count;
-
- numfrags = skb_shinfo(skb)->nr_frags;
-
- /* Compute the number of fragments this skb has, and if its more than
- * frag array can hold, linearize the skb
- */
- total_count = numfrags + (firstfraglen / PI_PAGE_SIZE);
- if (firstfraglen % PI_PAGE_SIZE)
- total_count++;
-
- if (total_count > frags_max) {
- if (skb_linearize(skb))
- return -EINVAL;
- numfrags = skb_shinfo(skb)->nr_frags;
- firstfraglen = 0;
- }
-
- while (firstfraglen) {
- if (count == frags_max)
- return -EINVAL;
-
- frags[count].pi_pfn =
- page_to_pfn(virt_to_page(skb->data + offset));
- frags[count].pi_off =
- (unsigned long)(skb->data + offset) & PI_PAGE_MASK;
- size = min_t(unsigned int, firstfraglen,
- PI_PAGE_SIZE - frags[count].pi_off);
-
- /* can take smallest of firstfraglen (what's left) OR
- * bytes left in the page
- */
- frags[count].pi_len = size;
- firstfraglen -= size;
- offset += size;
- count++;
- }
- if (numfrags) {
- if ((count + numfrags) > frags_max)
- return -EINVAL;
-
- for (frag = 0; frag < numfrags; frag++) {
- count = add_physinfo_entries(page_to_pfn(
- skb_frag_page(&skb_shinfo(skb)->frags[frag])),
- skb_frag_off(&skb_shinfo(skb)->frags[frag]),
- skb_frag_size(&skb_shinfo(skb)->frags[frag]),
- count, frags_max, frags);
- /* add_physinfo_entries only returns
- * zero if the frags array is out of room
- * That should never happen because we
- * fail above, if count+numfrags > frags_max.
- */
- if (!count)
- return -EINVAL;
- }
- }
- if (skb_shinfo(skb)->frag_list) {
- struct sk_buff *skbinlist;
- int c;
-
- for (skbinlist = skb_shinfo(skb)->frag_list; skbinlist;
- skbinlist = skbinlist->next) {
- c = visor_copy_fragsinfo_from_skb(skbinlist,
- skbinlist->len -
- skbinlist->data_len,
- frags_max - count,
- &frags[count]);
- if (c < 0)
- return c;
- count += c;
- }
- }
- return count;
-}
-
-static ssize_t enable_ints_write(struct file *file,
- const char __user *buffer,
- size_t count, loff_t *ppos)
-{
- /* Don't want to break ABI here by having a debugfs
- * file that no longer exists or is writable, so
- * lets just make this a vestigual function
- */
- return count;
-}
-
-static const struct file_operations debugfs_enable_ints_fops = {
- .write = enable_ints_write,
-};
-
-/* visornic_serverdown_complete - pause device following IOPART going down
- * @devdata: Device managed by IOPART.
- *
- * The IO partition has gone down, and we need to do some cleanup for when it
- * comes back. Treat the IO partition as the link being down.
- */
-static void visornic_serverdown_complete(struct visornic_devdata *devdata)
-{
- struct net_device *netdev = devdata->netdev;
-
- /* Stop polling for interrupts */
- visorbus_disable_channel_interrupts(devdata->dev);
-
- rtnl_lock();
- dev_close(netdev);
- rtnl_unlock();
-
- atomic_set(&devdata->num_rcvbuf_in_iovm, 0);
- devdata->chstat.sent_xmit = 0;
- devdata->chstat.got_xmit_done = 0;
-
- if (devdata->server_down_complete_func)
- (*devdata->server_down_complete_func)(devdata->dev, 0);
-
- devdata->server_down = true;
- devdata->server_change_state = false;
- devdata->server_down_complete_func = NULL;
-}
-
-/* visornic_serverdown - Command has notified us that IOPART is down
- * @devdata: Device managed by IOPART.
- * @complete_func: Function to call when finished.
- *
- * Schedule the work needed to handle the server down request. Make sure we
- * haven't already handled the server change state event.
- *
- * Return: 0 if we scheduled the work, negative integer on error.
- */
-static int visornic_serverdown(struct visornic_devdata *devdata,
- visorbus_state_complete_func complete_func)
-{
- unsigned long flags;
- int err;
-
- spin_lock_irqsave(&devdata->priv_lock, flags);
- if (devdata->server_change_state) {
- dev_dbg(&devdata->dev->device, "%s changing state\n",
- __func__);
- err = -EINVAL;
- goto err_unlock;
- }
- if (devdata->server_down) {
- dev_dbg(&devdata->dev->device, "%s already down\n",
- __func__);
- err = -EINVAL;
- goto err_unlock;
- }
- if (devdata->going_away) {
- dev_dbg(&devdata->dev->device,
- "%s aborting because device removal pending\n",
- __func__);
- err = -ENODEV;
- goto err_unlock;
- }
- devdata->server_change_state = true;
- devdata->server_down_complete_func = complete_func;
- spin_unlock_irqrestore(&devdata->priv_lock, flags);
-
- visornic_serverdown_complete(devdata);
- return 0;
-
-err_unlock:
- spin_unlock_irqrestore(&devdata->priv_lock, flags);
- return err;
-}
-
-/* alloc_rcv_buf - alloc rcv buffer to be given to the IO Partition
- * @netdev: Network adapter the rcv bufs are attached too.
- *
- * Create an sk_buff (rcv_buf) that will be passed to the IO Partition
- * so that it can write rcv data into our memory space.
- *
- * Return: Pointer to sk_buff.
- */
-static struct sk_buff *alloc_rcv_buf(struct net_device *netdev)
-{
- struct sk_buff *skb;
-
- /* NOTE: the first fragment in each rcv buffer is pointed to by
- * rcvskb->data. For now all rcv buffers will be RCVPOST_BUF_SIZE
- * in length, so the first frag is large enough to hold 1514.
- */
- skb = alloc_skb(RCVPOST_BUF_SIZE, GFP_ATOMIC);
- if (!skb)
- return NULL;
- skb->dev = netdev;
- /* current value of mtu doesn't come into play here; large
- * packets will just end up using multiple rcv buffers all of
- * same size.
- */
- skb->len = RCVPOST_BUF_SIZE;
- /* alloc_skb already zeroes it out for clarification. */
- skb->data_len = 0;
- return skb;
-}
-
-/* post_skb - post a skb to the IO Partition
- * @cmdrsp: Cmdrsp packet to be send to the IO Partition.
- * @devdata: visornic_devdata to post the skb to.
- * @skb: Skb to give to the IO partition.
- *
- * Return: 0 on success, negative integer on error.
- */
-static int post_skb(struct uiscmdrsp *cmdrsp, struct visornic_devdata *devdata,
- struct sk_buff *skb)
-{
- int err;
-
- cmdrsp->net.buf = skb;
- cmdrsp->net.rcvpost.frag.pi_pfn = page_to_pfn(virt_to_page(skb->data));
- cmdrsp->net.rcvpost.frag.pi_off =
- (unsigned long)skb->data & PI_PAGE_MASK;
- cmdrsp->net.rcvpost.frag.pi_len = skb->len;
- cmdrsp->net.rcvpost.unique_num = devdata->incarnation_id;
-
- if ((cmdrsp->net.rcvpost.frag.pi_off + skb->len) > PI_PAGE_SIZE)
- return -EINVAL;
-
- cmdrsp->net.type = NET_RCV_POST;
- cmdrsp->cmdtype = CMD_NET_TYPE;
- err = visorchannel_signalinsert(devdata->dev->visorchannel,
- IOCHAN_TO_IOPART,
- cmdrsp);
- if (err) {
- devdata->chstat.sent_post_failed++;
- return err;
- }
-
- atomic_inc(&devdata->num_rcvbuf_in_iovm);
- devdata->chstat.sent_post++;
- return 0;
-}
-
-/* send_enbdis - Send NET_RCV_ENBDIS to IO Partition
- * @netdev: Netdevice we are enabling/disabling, used as context return value.
- * @state: Enable = 1/disable = 0.
- * @devdata: Visornic device we are enabling/disabling.
- *
- * Send the enable/disable message to the IO Partition.
- *
- * Return: 0 on success, negative integer on error.
- */
-static int send_enbdis(struct net_device *netdev, int state,
- struct visornic_devdata *devdata)
-{
- int err;
-
- devdata->cmdrsp_rcv->net.enbdis.enable = state;
- devdata->cmdrsp_rcv->net.enbdis.context = netdev;
- devdata->cmdrsp_rcv->net.type = NET_RCV_ENBDIS;
- devdata->cmdrsp_rcv->cmdtype = CMD_NET_TYPE;
- err = visorchannel_signalinsert(devdata->dev->visorchannel,
- IOCHAN_TO_IOPART,
- devdata->cmdrsp_rcv);
- if (err)
- return err;
- devdata->chstat.sent_enbdis++;
- return 0;
-}
-
-/* visornic_disable_with_timeout - disable network adapter
- * @netdev: netdevice to disable.
- * @timeout: Timeout to wait for disable.
- *
- * Disable the network adapter and inform the IO Partition that we are disabled.
- * Reclaim memory from rcv bufs.
- *
- * Return: 0 on success, negative integer on failure of IO Partition responding.
- */
-static int visornic_disable_with_timeout(struct net_device *netdev,
- const int timeout)
-{
- struct visornic_devdata *devdata = netdev_priv(netdev);
- int i;
- unsigned long flags;
- int wait = 0;
- int err;
-
- /* send a msg telling the other end we are stopping incoming pkts */
- spin_lock_irqsave(&devdata->priv_lock, flags);
- devdata->enabled = 0;
- /* must wait for ack */
- devdata->enab_dis_acked = 0;
- spin_unlock_irqrestore(&devdata->priv_lock, flags);
-
- /* send disable and wait for ack -- don't hold lock when sending
- * disable because if the queue is full, insert might sleep.
- * If an error occurs, don't wait for the timeout.
- */
- err = send_enbdis(netdev, 0, devdata);
- if (err)
- return err;
-
- /* wait for ack to arrive before we try to free rcv buffers
- * NOTE: the other end automatically unposts the rcv buffers
- * when it gets a disable.
- */
- spin_lock_irqsave(&devdata->priv_lock, flags);
- while ((timeout == VISORNIC_INFINITE_RSP_WAIT) ||
- (wait < timeout)) {
- if (devdata->enab_dis_acked)
- break;
- if (devdata->server_down || devdata->server_change_state) {
- dev_dbg(&netdev->dev, "%s server went away\n",
- __func__);
- break;
- }
- set_current_state(TASK_INTERRUPTIBLE);
- spin_unlock_irqrestore(&devdata->priv_lock, flags);
- wait += schedule_timeout(msecs_to_jiffies(10));
- spin_lock_irqsave(&devdata->priv_lock, flags);
- }
-
- /* Wait for usage to go to 1 (no other users) before freeing
- * rcv buffers
- */
- if (atomic_read(&devdata->usage) > 1) {
- while (1) {
- set_current_state(TASK_INTERRUPTIBLE);
- spin_unlock_irqrestore(&devdata->priv_lock, flags);
- schedule_timeout(msecs_to_jiffies(10));
- spin_lock_irqsave(&devdata->priv_lock, flags);
- if (atomic_read(&devdata->usage))
- break;
- }
- }
- /* we've set enabled to 0, so we can give up the lock. */
- spin_unlock_irqrestore(&devdata->priv_lock, flags);
-
- /* stop the transmit queue so nothing more can be transmitted */
- netif_stop_queue(netdev);
-
- napi_disable(&devdata->napi);
-
- skb_queue_purge(&devdata->xmitbufhead);
-
- /* Free rcv buffers - other end has automatically unposed them on
- * disable
- */
- for (i = 0; i < devdata->num_rcv_bufs; i++) {
- if (devdata->rcvbuf[i]) {
- kfree_skb(devdata->rcvbuf[i]);
- devdata->rcvbuf[i] = NULL;
- }
- }
-
- return 0;
-}
-
-/* init_rcv_bufs - initialize receive buffs and send them to the IO Partition
- * @netdev: struct netdevice.
- * @devdata: visornic_devdata.
- *
- * Allocate rcv buffers and post them to the IO Partition.
- *
- * Return: 0 on success, negative integer on failure.
- */
-static int init_rcv_bufs(struct net_device *netdev,
- struct visornic_devdata *devdata)
-{
- int i, j, count, err;
-
- /* allocate fixed number of receive buffers to post to uisnic
- * post receive buffers after we've allocated a required amount
- */
- for (i = 0; i < devdata->num_rcv_bufs; i++) {
- devdata->rcvbuf[i] = alloc_rcv_buf(netdev);
- /* if we failed to allocate one let us stop */
- if (!devdata->rcvbuf[i])
- break;
- }
- /* couldn't even allocate one -- bail out */
- if (i == 0)
- return -ENOMEM;
- count = i;
-
- /* Ensure we can alloc 2/3rd of the requested number of buffers.
- * 2/3 is an arbitrary choice; used also in ndis init.c
- */
- if (count < ((2 * devdata->num_rcv_bufs) / 3)) {
- /* free receive buffers we did alloc and then bail out */
- for (i = 0; i < count; i++) {
- kfree_skb(devdata->rcvbuf[i]);
- devdata->rcvbuf[i] = NULL;
- }
- return -ENOMEM;
- }
-
- /* post receive buffers to receive incoming input - without holding
- * lock - we've not enabled nor started the queue so there shouldn't
- * be any rcv or xmit activity
- */
- for (i = 0; i < count; i++) {
- err = post_skb(devdata->cmdrsp_rcv, devdata,
- devdata->rcvbuf[i]);
- if (!err)
- continue;
-
- /* Error handling -
- * If we posted at least one skb, we should return success,
- * but need to free the resources that we have not successfully
- * posted.
- */
- for (j = i; j < count; j++) {
- kfree_skb(devdata->rcvbuf[j]);
- devdata->rcvbuf[j] = NULL;
- }
- if (i == 0)
- return err;
- break;
- }
-
- return 0;
-}
-
-/* visornic_enable_with_timeout - send enable to IO Partition
- * @netdev: struct net_device.
- * @timeout: Time to wait for the ACK from the enable.
- *
- * Sends enable to IOVM and inits, and posts receive buffers to IOVM. Timeout is
- * defined in msecs (timeout of 0 specifies infinite wait).
- *
- * Return: 0 on success, negative integer on failure.
- */
-static int visornic_enable_with_timeout(struct net_device *netdev,
- const int timeout)
-{
- int err = 0;
- struct visornic_devdata *devdata = netdev_priv(netdev);
- unsigned long flags;
- int wait = 0;
-
- napi_enable(&devdata->napi);
-
- /* NOTE: the other end automatically unposts the rcv buffers when it
- * gets a disable.
- */
- err = init_rcv_bufs(netdev, devdata);
- if (err < 0) {
- dev_err(&netdev->dev,
- "%s failed to init rcv bufs\n", __func__);
- return err;
- }
-
- spin_lock_irqsave(&devdata->priv_lock, flags);
- devdata->enabled = 1;
- devdata->enab_dis_acked = 0;
-
- /* now we're ready, let's send an ENB to uisnic but until we get
- * an ACK back from uisnic, we'll drop the packets
- */
- devdata->n_rcv_packets_not_accepted = 0;
- spin_unlock_irqrestore(&devdata->priv_lock, flags);
-
- /* send enable and wait for ack -- don't hold lock when sending enable
- * because if the queue is full, insert might sleep. If an error
- * occurs error out.
- */
- err = send_enbdis(netdev, 1, devdata);
- if (err)
- return err;
-
- spin_lock_irqsave(&devdata->priv_lock, flags);
- while ((timeout == VISORNIC_INFINITE_RSP_WAIT) ||
- (wait < timeout)) {
- if (devdata->enab_dis_acked)
- break;
- if (devdata->server_down || devdata->server_change_state) {
- dev_dbg(&netdev->dev, "%s server went away\n",
- __func__);
- break;
- }
- set_current_state(TASK_INTERRUPTIBLE);
- spin_unlock_irqrestore(&devdata->priv_lock, flags);
- wait += schedule_timeout(msecs_to_jiffies(10));
- spin_lock_irqsave(&devdata->priv_lock, flags);
- }
-
- spin_unlock_irqrestore(&devdata->priv_lock, flags);
-
- if (!devdata->enab_dis_acked) {
- dev_err(&netdev->dev, "%s missing ACK\n", __func__);
- return -EIO;
- }
-
- netif_start_queue(netdev);
- return 0;
-}
-
-/* visornic_timeout_reset - handle xmit timeout resets
- * @work: Work item that scheduled the work.
- *
- * Transmit timeouts are typically handled by resetting the device for our
- * virtual NIC; we will send a disable and enable to the IOVM. If it doesn't
- * respond, we will trigger a serverdown.
- */
-static void visornic_timeout_reset(struct work_struct *work)
-{
- struct visornic_devdata *devdata;
- struct net_device *netdev;
- int response = 0;
-
- devdata = container_of(work, struct visornic_devdata, timeout_reset);
- netdev = devdata->netdev;
-
- rtnl_lock();
- if (!netif_running(netdev)) {
- rtnl_unlock();
- return;
- }
-
- response = visornic_disable_with_timeout(netdev,
- VISORNIC_INFINITE_RSP_WAIT);
- if (response)
- goto call_serverdown;
-
- response = visornic_enable_with_timeout(netdev,
- VISORNIC_INFINITE_RSP_WAIT);
- if (response)
- goto call_serverdown;
-
- rtnl_unlock();
-
- return;
-
-call_serverdown:
- visornic_serverdown(devdata, NULL);
- rtnl_unlock();
-}
-
-/* visornic_open - enable the visornic device and mark the queue started
- * @netdev: netdevice to start.
- *
- * Enable the device and start the transmit queue.
- *
- * Return: 0 on success.
- */
-static int visornic_open(struct net_device *netdev)
-{
- visornic_enable_with_timeout(netdev, VISORNIC_INFINITE_RSP_WAIT);
- return 0;
-}
-
-/* visornic_close - disables the visornic device and stops the queues
- * @netdev: netdevice to stop.
- *
- * Disable the device and stop the transmit queue.
- *
- * Return 0 on success.
- */
-static int visornic_close(struct net_device *netdev)
-{
- visornic_disable_with_timeout(netdev, VISORNIC_INFINITE_RSP_WAIT);
- return 0;
-}
-
-/* devdata_xmits_outstanding - compute outstanding xmits
- * @devdata: visornic_devdata for device
- *
- * Return: Long integer representing the number of outstanding xmits.
- */
-static unsigned long devdata_xmits_outstanding(struct visornic_devdata *devdata)
-{
- if (devdata->chstat.sent_xmit >= devdata->chstat.got_xmit_done)
- return devdata->chstat.sent_xmit -
- devdata->chstat.got_xmit_done;
- return (ULONG_MAX - devdata->chstat.got_xmit_done
- + devdata->chstat.sent_xmit + 1);
-}
-
-/* vnic_hit_high_watermark
- * @devdata: Indicates visornic device we are checking.
- * @high_watermark: Max num of unacked xmits we will tolerate before we will
- * start throttling.
- *
- * Return: True iff the number of unacked xmits sent to the IO Partition is >=
- * high_watermark. False otherwise.
- */
-static bool vnic_hit_high_watermark(struct visornic_devdata *devdata,
- ulong high_watermark)
-{
- return (devdata_xmits_outstanding(devdata) >= high_watermark);
-}
-
-/* vnic_hit_low_watermark
- * @devdata: Indicates visornic device we are checking.
- * @low_watermark: We will wait until the num of unacked xmits drops to this
- * value or lower before we start transmitting again.
- *
- * Return: True iff the number of unacked xmits sent to the IO Partition is <=
- * low_watermark.
- */
-static bool vnic_hit_low_watermark(struct visornic_devdata *devdata,
- ulong low_watermark)
-{
- return (devdata_xmits_outstanding(devdata) <= low_watermark);
-}
-
-/* visornic_xmit - send a packet to the IO Partition
- * @skb: Packet to be sent.
- * @netdev: Net device the packet is being sent from.
- *
- * Convert the skb to a cmdrsp so the IO Partition can understand it, and send
- * the XMIT command to the IO Partition for processing. This function is
- * protected from concurrent calls by a spinlock xmit_lock in the net_device
- * struct. As soon as the function returns, it can be called again.
- *
- * Return: NETDEV_TX_OK.
- */
-static netdev_tx_t visornic_xmit(struct sk_buff *skb, struct net_device *netdev)
-{
- struct visornic_devdata *devdata;
- int len, firstfraglen, padlen;
- struct uiscmdrsp *cmdrsp = NULL;
- unsigned long flags;
- int err;
-
- devdata = netdev_priv(netdev);
- spin_lock_irqsave(&devdata->priv_lock, flags);
-
- if (netif_queue_stopped(netdev) || devdata->server_down ||
- devdata->server_change_state) {
- spin_unlock_irqrestore(&devdata->priv_lock, flags);
- devdata->busy_cnt++;
- dev_dbg(&netdev->dev,
- "%s busy - queue stopped\n", __func__);
- kfree_skb(skb);
- return NETDEV_TX_OK;
- }
-
- /* sk_buff struct is used to host network data throughout all the
- * linux network subsystems
- */
- len = skb->len;
-
- /* skb->len is the FULL length of data (including fragmentary portion)
- * skb->data_len is the length of the fragment portion in frags
- * skb->len - skb->data_len is size of the 1st fragment in skb->data
- * calculate the length of the first fragment that skb->data is
- * pointing to
- */
- firstfraglen = skb->len - skb->data_len;
- if (firstfraglen < ETH_HLEN) {
- spin_unlock_irqrestore(&devdata->priv_lock, flags);
- devdata->busy_cnt++;
- dev_err(&netdev->dev,
- "%s busy - first frag too small (%d)\n",
- __func__, firstfraglen);
- kfree_skb(skb);
- return NETDEV_TX_OK;
- }
-
- if (len < ETH_MIN_PACKET_SIZE &&
- ((skb_end_pointer(skb) - skb->data) >= ETH_MIN_PACKET_SIZE)) {
- /* pad the packet out to minimum size */
- padlen = ETH_MIN_PACKET_SIZE - len;
- skb_put_zero(skb, padlen);
- len += padlen;
- firstfraglen += padlen;
- }
-
- cmdrsp = devdata->xmit_cmdrsp;
- /* clear cmdrsp */
- memset(cmdrsp, 0, SIZEOF_CMDRSP);
- cmdrsp->net.type = NET_XMIT;
- cmdrsp->cmdtype = CMD_NET_TYPE;
-
- /* save the pointer to skb -- we'll need it for completion */
- cmdrsp->net.buf = skb;
-
- if (vnic_hit_high_watermark(devdata,
- devdata->max_outstanding_net_xmits)) {
- /* extra NET_XMITs queued over to IOVM - need to wait */
- devdata->chstat.reject_count++;
- if (!devdata->queuefullmsg_logged &&
- ((devdata->chstat.reject_count & 0x3ff) == 1))
- devdata->queuefullmsg_logged = 1;
- netif_stop_queue(netdev);
- spin_unlock_irqrestore(&devdata->priv_lock, flags);
- devdata->busy_cnt++;
- dev_dbg(&netdev->dev,
- "%s busy - waiting for iovm to catch up\n",
- __func__);
- kfree_skb(skb);
- return NETDEV_TX_OK;
- }
- if (devdata->queuefullmsg_logged)
- devdata->queuefullmsg_logged = 0;
-
- if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
- cmdrsp->net.xmt.lincsum.valid = 1;
- cmdrsp->net.xmt.lincsum.protocol = skb->protocol;
- if (skb_transport_header(skb) > skb->data) {
- cmdrsp->net.xmt.lincsum.hrawoff =
- skb_transport_header(skb) - skb->data;
- cmdrsp->net.xmt.lincsum.hrawoff = 1;
- }
- if (skb_network_header(skb) > skb->data) {
- cmdrsp->net.xmt.lincsum.nhrawoff =
- skb_network_header(skb) - skb->data;
- cmdrsp->net.xmt.lincsum.nhrawoffv = 1;
- }
- cmdrsp->net.xmt.lincsum.csum = skb->csum;
- } else {
- cmdrsp->net.xmt.lincsum.valid = 0;
- }
-
- /* save off the length of the entire data packet */
- cmdrsp->net.xmt.len = len;
-
- /* copy ethernet header from first frag into ocmdrsp
- * - everything else will be pass in frags & DMA'ed
- */
- memcpy(cmdrsp->net.xmt.ethhdr, skb->data, ETH_HLEN);
-
- /* copy frags info - from skb->data we need to only provide access
- * beyond eth header
- */
- cmdrsp->net.xmt.num_frags =
- visor_copy_fragsinfo_from_skb(skb, firstfraglen,
- MAX_PHYS_INFO,
- cmdrsp->net.xmt.frags);
- if (cmdrsp->net.xmt.num_frags < 0) {
- spin_unlock_irqrestore(&devdata->priv_lock, flags);
- devdata->busy_cnt++;
- dev_err(&netdev->dev,
- "%s busy - copy frags failed\n", __func__);
- kfree_skb(skb);
- return NETDEV_TX_OK;
- }
-
- err = visorchannel_signalinsert(devdata->dev->visorchannel,
- IOCHAN_TO_IOPART, cmdrsp);
- if (err) {
- netif_stop_queue(netdev);
- spin_unlock_irqrestore(&devdata->priv_lock, flags);
- devdata->busy_cnt++;
- dev_dbg(&netdev->dev,
- "%s busy - signalinsert failed\n", __func__);
- kfree_skb(skb);
- return NETDEV_TX_OK;
- }
-
- /* Track the skbs that have been sent to the IOVM for XMIT */
- skb_queue_head(&devdata->xmitbufhead, skb);
-
- /* update xmt stats */
- devdata->net_stats.tx_packets++;
- devdata->net_stats.tx_bytes += skb->len;
- devdata->chstat.sent_xmit++;
-
- /* check if we have hit the high watermark for netif_stop_queue() */
- if (vnic_hit_high_watermark(devdata,
- devdata->upper_threshold_net_xmits)) {
- /* extra NET_XMITs queued over to IOVM - need to wait */
- /* stop queue - call netif_wake_queue() after lower threshold */
- netif_stop_queue(netdev);
- dev_dbg(&netdev->dev,
- "%s busy - invoking iovm flow control\n",
- __func__);
- devdata->flow_control_upper_hits++;
- }
- spin_unlock_irqrestore(&devdata->priv_lock, flags);
-
- /* skb will be freed when we get back NET_XMIT_DONE */
- return NETDEV_TX_OK;
-}
-
-/* visornic_get_stats - returns net_stats of the visornic device
- * @netdev: netdevice.
- *
- * Return: Pointer to the net_device_stats struct for the device.
- */
-static struct net_device_stats *visornic_get_stats(struct net_device *netdev)
-{
- struct visornic_devdata *devdata = netdev_priv(netdev);
-
- return &devdata->net_stats;
-}
-
-/* visornic_change_mtu - changes mtu of device
- * @netdev: netdevice.
- * @new_mtu: Value of new mtu.
- *
- * The device's MTU cannot be changed by system; it must be changed via a
- * CONTROLVM message. All vnics and pnics in a switch have to have the same MTU
- * for everything to work. Currently not supported.
- *
- * Return: -EINVAL.
- */
-static int visornic_change_mtu(struct net_device *netdev, int new_mtu)
-{
- return -EINVAL;
-}
-
-/* visornic_set_multi - set visornic device flags
- * @netdev: netdevice.
- *
- * The only flag we currently support is IFF_PROMISC.
- */
-static void visornic_set_multi(struct net_device *netdev)
-{
- struct uiscmdrsp *cmdrsp;
- struct visornic_devdata *devdata = netdev_priv(netdev);
- int err = 0;
-
- if (devdata->old_flags == netdev->flags)
- return;
-
- if ((netdev->flags & IFF_PROMISC) ==
- (devdata->old_flags & IFF_PROMISC))
- goto out_save_flags;
-
- cmdrsp = kmalloc(SIZEOF_CMDRSP, GFP_ATOMIC);
- if (!cmdrsp)
- return;
- cmdrsp->cmdtype = CMD_NET_TYPE;
- cmdrsp->net.type = NET_RCV_PROMISC;
- cmdrsp->net.enbdis.context = netdev;
- cmdrsp->net.enbdis.enable =
- netdev->flags & IFF_PROMISC;
- err = visorchannel_signalinsert(devdata->dev->visorchannel,
- IOCHAN_TO_IOPART,
- cmdrsp);
- kfree(cmdrsp);
- if (err)
- return;
-
-out_save_flags:
- devdata->old_flags = netdev->flags;
-}
-
-/* visornic_xmit_timeout - request to timeout the xmit
- * @netdev: netdevice.
- *
- * Queue the work and return. Make sure we have not already been informed that
- * the IO Partition is gone; if so, we will have already timed-out the xmits.
- */
-static void visornic_xmit_timeout(struct net_device *netdev, unsigned int txqueue)
-{
- struct visornic_devdata *devdata = netdev_priv(netdev);
- unsigned long flags;
-
- spin_lock_irqsave(&devdata->priv_lock, flags);
- if (devdata->going_away) {
- spin_unlock_irqrestore(&devdata->priv_lock, flags);
- dev_dbg(&devdata->dev->device,
- "%s aborting because device removal pending\n",
- __func__);
- return;
- }
-
- /* Ensure that a ServerDown message hasn't been received */
- if (!devdata->enabled ||
- (devdata->server_down && !devdata->server_change_state)) {
- dev_dbg(&netdev->dev, "%s no processing\n",
- __func__);
- spin_unlock_irqrestore(&devdata->priv_lock, flags);
- return;
- }
- schedule_work(&devdata->timeout_reset);
- spin_unlock_irqrestore(&devdata->priv_lock, flags);
-}
-
-/* repost_return - repost rcv bufs that have come back
- * @cmdrsp: IO channel command struct to post.
- * @devdata: Visornic devdata for the device.
- * @skb: Socket buffer.
- * @netdev: netdevice.
- *
- * Repost rcv buffers that have been returned to us when we are finished
- * with them.
- *
- * Return: 0 for success, negative integer on error.
- */
-static int repost_return(struct uiscmdrsp *cmdrsp,
- struct visornic_devdata *devdata,
- struct sk_buff *skb, struct net_device *netdev)
-{
- struct net_pkt_rcv copy;
- int i = 0, cc, numreposted;
- int found_skb = 0;
- int status = 0;
-
- copy = cmdrsp->net.rcv;
- switch (copy.numrcvbufs) {
- case 0:
- devdata->n_rcv0++;
- break;
- case 1:
- devdata->n_rcv1++;
- break;
- case 2:
- devdata->n_rcv2++;
- break;
- default:
- devdata->n_rcvx++;
- break;
- }
- for (cc = 0, numreposted = 0; cc < copy.numrcvbufs; cc++) {
- for (i = 0; i < devdata->num_rcv_bufs; i++) {
- if (devdata->rcvbuf[i] != copy.rcvbuf[cc])
- continue;
-
- if ((skb) && devdata->rcvbuf[i] == skb) {
- devdata->found_repost_rcvbuf_cnt++;
- found_skb = 1;
- devdata->repost_found_skb_cnt++;
- }
- devdata->rcvbuf[i] = alloc_rcv_buf(netdev);
- if (!devdata->rcvbuf[i]) {
- devdata->num_rcv_bufs_could_not_alloc++;
- devdata->alloc_failed_in_repost_rtn_cnt++;
- status = -ENOMEM;
- break;
- }
- status = post_skb(cmdrsp, devdata, devdata->rcvbuf[i]);
- if (status) {
- kfree_skb(devdata->rcvbuf[i]);
- devdata->rcvbuf[i] = NULL;
- break;
- }
- numreposted++;
- break;
- }
- }
- if (numreposted != copy.numrcvbufs) {
- devdata->n_repost_deficit++;
- status = -EINVAL;
- }
- if (skb) {
- if (found_skb) {
- kfree_skb(skb);
- } else {
- status = -EINVAL;
- devdata->bad_rcv_buf++;
- }
- }
- return status;
-}
-
-/* visornic_rx - handle receive packets coming back from IO Partition
- * @cmdrsp: Receive packet returned from IO Partition.
- *
- * Got a receive packet back from the IO Partition; handle it and send it up
- * the stack.
-
- * Return: 1 iff an skb was received, otherwise 0.
- */
-static int visornic_rx(struct uiscmdrsp *cmdrsp)
-{
- struct visornic_devdata *devdata;
- struct sk_buff *skb, *prev, *curr;
- struct net_device *netdev;
- int cc, currsize, off;
- struct ethhdr *eth;
- unsigned long flags;
-
- /* post new rcv buf to the other end using the cmdrsp we have at hand
- * post it without holding lock - but we'll use the signal lock to
- * synchronize the queue insert the cmdrsp that contains the net.rcv
- * is the one we are using to repost, so copy the info we need from it.
- */
- skb = cmdrsp->net.buf;
- netdev = skb->dev;
-
- devdata = netdev_priv(netdev);
-
- spin_lock_irqsave(&devdata->priv_lock, flags);
- atomic_dec(&devdata->num_rcvbuf_in_iovm);
-
- /* set length to how much was ACTUALLY received -
- * NOTE: rcv_done_len includes actual length of data rcvd
- * including ethhdr
- */
- skb->len = cmdrsp->net.rcv.rcv_done_len;
-
- /* update rcv stats - call it with priv_lock held */
- devdata->net_stats.rx_packets++;
- devdata->net_stats.rx_bytes += skb->len;
-
- /* test enabled while holding lock */
- if (!(devdata->enabled && devdata->enab_dis_acked)) {
- /* don't process it unless we're in enable mode and until
- * we've gotten an ACK saying the other end got our RCV enable
- */
- spin_unlock_irqrestore(&devdata->priv_lock, flags);
- repost_return(cmdrsp, devdata, skb, netdev);
- return 0;
- }
-
- spin_unlock_irqrestore(&devdata->priv_lock, flags);
-
- /* when skb was allocated, skb->dev, skb->data, skb->len and
- * skb->data_len were setup. AND, data has already put into the
- * skb (both first frag and in frags pages)
- * NOTE: firstfragslen is the amount of data in skb->data and that
- * which is not in nr_frags or frag_list. This is now simply
- * RCVPOST_BUF_SIZE. bump tail to show how much data is in
- * firstfrag & set data_len to show rest see if we have to chain
- * frag_list.
- */
- /* do PRECAUTIONARY check */
- if (skb->len > RCVPOST_BUF_SIZE) {
- if (cmdrsp->net.rcv.numrcvbufs < 2) {
- if (repost_return(cmdrsp, devdata, skb, netdev) < 0)
- dev_err(&devdata->netdev->dev,
- "repost_return failed");
- return 0;
- }
- /* length rcvd is greater than firstfrag in this skb rcv buf */
- /* amount in skb->data */
- skb->tail += RCVPOST_BUF_SIZE;
- /* amount that will be in frag_list */
- skb->data_len = skb->len - RCVPOST_BUF_SIZE;
- } else {
- /* data fits in this skb - no chaining - do
- * PRECAUTIONARY check
- */
- /* should be 1 */
- if (cmdrsp->net.rcv.numrcvbufs != 1) {
- if (repost_return(cmdrsp, devdata, skb, netdev) < 0)
- dev_err(&devdata->netdev->dev,
- "repost_return failed");
- return 0;
- }
- skb->tail += skb->len;
- /* nothing rcvd in frag_list */
- skb->data_len = 0;
- }
- off = skb_tail_pointer(skb) - skb->data;
-
- /* amount we bumped tail by in the head skb
- * it is used to calculate the size of each chained skb below
- * it is also used to index into bufline to continue the copy
- * (for chansocktwopc)
- * if necessary chain the rcv skbs together.
- * NOTE: index 0 has the same as cmdrsp->net.rcv.skb; we need to
- * chain the rest to that one.
- * - do PRECAUTIONARY check
- */
- if (cmdrsp->net.rcv.rcvbuf[0] != skb) {
- if (repost_return(cmdrsp, devdata, skb, netdev) < 0)
- dev_err(&devdata->netdev->dev, "repost_return failed");
- return 0;
- }
-
- if (cmdrsp->net.rcv.numrcvbufs > 1) {
- /* chain the various rcv buffers into the skb's frag_list. */
- /* Note: off was initialized above */
- for (cc = 1, prev = NULL;
- cc < cmdrsp->net.rcv.numrcvbufs; cc++) {
- curr = (struct sk_buff *)cmdrsp->net.rcv.rcvbuf[cc];
- curr->next = NULL;
- /* start of list- set head */
- if (!prev)
- skb_shinfo(skb)->frag_list = curr;
- else
- prev->next = curr;
- prev = curr;
-
- /* should we set skb->len and skb->data_len for each
- * buffer being chained??? can't hurt!
- */
- currsize = min(skb->len - off,
- (unsigned int)RCVPOST_BUF_SIZE);
- curr->len = currsize;
- curr->tail += currsize;
- curr->data_len = 0;
- off += currsize;
- }
- /* assert skb->len == off */
- if (skb->len != off) {
- netdev_err(devdata->netdev,
- "something wrong; skb->len:%d != off:%d\n",
- skb->len, off);
- }
- }
-
- /* set up packet's protocol type using ethernet header - this
- * sets up skb->pkt_type & it also PULLS out the eth header
- */
- skb->protocol = eth_type_trans(skb, netdev);
- eth = eth_hdr(skb);
- skb->csum = 0;
- skb->ip_summed = CHECKSUM_NONE;
-
- do {
- /* accept all packets */
- if (netdev->flags & IFF_PROMISC)
- break;
- if (skb->pkt_type == PACKET_BROADCAST) {
- /* accept all broadcast packets */
- if (netdev->flags & IFF_BROADCAST)
- break;
- } else if (skb->pkt_type == PACKET_MULTICAST) {
- if ((netdev->flags & IFF_MULTICAST) &&
- (netdev_mc_count(netdev))) {
- struct netdev_hw_addr *ha;
- int found_mc = 0;
-
- /* only accept multicast packets that we can
- * find in our multicast address list
- */
- netdev_for_each_mc_addr(ha, netdev) {
- if (ether_addr_equal(eth->h_dest,
- ha->addr)) {
- found_mc = 1;
- break;
- }
- }
- /* accept pkt, dest matches a multicast addr */
- if (found_mc)
- break;
- }
- /* accept packet, h_dest must match vnic mac address */
- } else if (skb->pkt_type == PACKET_HOST) {
- break;
- } else if (skb->pkt_type == PACKET_OTHERHOST) {
- /* something is not right */
- dev_err(&devdata->netdev->dev,
- "**** FAILED to deliver rcv packet to OS; name:%s Dest:%pM VNIC:%pM\n",
- netdev->name, eth->h_dest, netdev->dev_addr);
- }
- /* drop packet - don't forward it up to OS */
- devdata->n_rcv_packets_not_accepted++;
- repost_return(cmdrsp, devdata, skb, netdev);
- return 0;
- } while (0);
-
- netif_receive_skb(skb);
- /* netif_rx returns various values, but "in practice most drivers
- * ignore the return value
- */
-
- skb = NULL;
- /* whether the packet got dropped or handled, the skb is freed by
- * kernel code, so we shouldn't free it. but we should repost a
- * new rcv buffer.
- */
- repost_return(cmdrsp, devdata, skb, netdev);
- return 1;
-}
-
-/* devdata_initialize - initialize devdata structure
- * @devdata: visornic_devdata structure to initialize.
- * @dev: visorbus_device it belongs to.
- *
- * Setup initial values for the visornic, based on channel and default values.
- *
- * Return: A pointer to the devdata structure.
- */
-static struct visornic_devdata *devdata_initialize(
- struct visornic_devdata *devdata,
- struct visor_device *dev)
-{
- devdata->dev = dev;
- devdata->incarnation_id = get_jiffies_64();
- return devdata;
-}
-
-/* devdata_release - free up references in devdata
- * @devdata: Struct to clean up.
- */
-static void devdata_release(struct visornic_devdata *devdata)
-{
- kfree(devdata->rcvbuf);
- kfree(devdata->cmdrsp_rcv);
- kfree(devdata->xmit_cmdrsp);
-}
-
-static const struct net_device_ops visornic_dev_ops = {
- .ndo_open = visornic_open,
- .ndo_stop = visornic_close,
- .ndo_start_xmit = visornic_xmit,
- .ndo_get_stats = visornic_get_stats,
- .ndo_change_mtu = visornic_change_mtu,
- .ndo_tx_timeout = visornic_xmit_timeout,
- .ndo_set_rx_mode = visornic_set_multi,
-};
-
-/* DebugFS code */
-static ssize_t info_debugfs_read(struct file *file, char __user *buf,
- size_t len, loff_t *offset)
-{
- ssize_t bytes_read = 0;
- int str_pos = 0;
- struct visornic_devdata *devdata;
- struct net_device *dev;
- char *vbuf;
-
- if (len > MAX_BUF)
- len = MAX_BUF;
- vbuf = kzalloc(len, GFP_KERNEL);
- if (!vbuf)
- return -ENOMEM;
-
- /* for each vnic channel dump out channel specific data */
- rcu_read_lock();
- for_each_netdev_rcu(current->nsproxy->net_ns, dev) {
- /* Only consider netdevs that are visornic, and are open */
- if (dev->netdev_ops != &visornic_dev_ops ||
- (!netif_queue_stopped(dev)))
- continue;
-
- devdata = netdev_priv(dev);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- "netdev = %s (0x%p), MAC Addr %pM\n",
- dev->name,
- dev,
- dev->dev_addr);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- "VisorNic Dev Info = 0x%p\n", devdata);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " num_rcv_bufs = %d\n",
- devdata->num_rcv_bufs);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " max_outstanding_next_xmits = %lu\n",
- devdata->max_outstanding_net_xmits);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " upper_threshold_net_xmits = %lu\n",
- devdata->upper_threshold_net_xmits);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " lower_threshold_net_xmits = %lu\n",
- devdata->lower_threshold_net_xmits);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " queuefullmsg_logged = %d\n",
- devdata->queuefullmsg_logged);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " chstat.got_rcv = %lu\n",
- devdata->chstat.got_rcv);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " chstat.got_enbdisack = %lu\n",
- devdata->chstat.got_enbdisack);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " chstat.got_xmit_done = %lu\n",
- devdata->chstat.got_xmit_done);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " chstat.xmit_fail = %lu\n",
- devdata->chstat.xmit_fail);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " chstat.sent_enbdis = %lu\n",
- devdata->chstat.sent_enbdis);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " chstat.sent_promisc = %lu\n",
- devdata->chstat.sent_promisc);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " chstat.sent_post = %lu\n",
- devdata->chstat.sent_post);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " chstat.sent_post_failed = %lu\n",
- devdata->chstat.sent_post_failed);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " chstat.sent_xmit = %lu\n",
- devdata->chstat.sent_xmit);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " chstat.reject_count = %lu\n",
- devdata->chstat.reject_count);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " chstat.extra_rcvbufs_sent = %lu\n",
- devdata->chstat.extra_rcvbufs_sent);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " n_rcv0 = %lu\n", devdata->n_rcv0);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " n_rcv1 = %lu\n", devdata->n_rcv1);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " n_rcv2 = %lu\n", devdata->n_rcv2);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " n_rcvx = %lu\n", devdata->n_rcvx);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " num_rcvbuf_in_iovm = %d\n",
- atomic_read(&devdata->num_rcvbuf_in_iovm));
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " alloc_failed_in_if_needed_cnt = %lu\n",
- devdata->alloc_failed_in_if_needed_cnt);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " alloc_failed_in_repost_rtn_cnt = %lu\n",
- devdata->alloc_failed_in_repost_rtn_cnt);
- /* str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- * " inner_loop_limit_reached_cnt = %lu\n",
- * devdata->inner_loop_limit_reached_cnt);
- */
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " found_repost_rcvbuf_cnt = %lu\n",
- devdata->found_repost_rcvbuf_cnt);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " repost_found_skb_cnt = %lu\n",
- devdata->repost_found_skb_cnt);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " n_repost_deficit = %lu\n",
- devdata->n_repost_deficit);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " bad_rcv_buf = %lu\n",
- devdata->bad_rcv_buf);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " n_rcv_packets_not_accepted = %lu\n",
- devdata->n_rcv_packets_not_accepted);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " interrupts_rcvd = %llu\n",
- devdata->interrupts_rcvd);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " interrupts_notme = %llu\n",
- devdata->interrupts_notme);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " interrupts_disabled = %llu\n",
- devdata->interrupts_disabled);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " busy_cnt = %llu\n",
- devdata->busy_cnt);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " flow_control_upper_hits = %llu\n",
- devdata->flow_control_upper_hits);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " flow_control_lower_hits = %llu\n",
- devdata->flow_control_lower_hits);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " netif_queue = %s\n",
- netif_queue_stopped(devdata->netdev) ?
- "stopped" : "running");
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " xmits_outstanding = %lu\n",
- devdata_xmits_outstanding(devdata));
- }
- rcu_read_unlock();
- bytes_read = simple_read_from_buffer(buf, len, offset, vbuf, str_pos);
- kfree(vbuf);
- return bytes_read;
-}
-
-static struct dentry *visornic_debugfs_dir;
-static const struct file_operations debugfs_info_fops = {
- .read = info_debugfs_read,
-};
-
-/* send_rcv_posts_if_needed - send receive buffers to the IO Partition.
- * @devdata: Visornic device.
- */
-static void send_rcv_posts_if_needed(struct visornic_devdata *devdata)
-{
- int i;
- struct net_device *netdev;
- struct uiscmdrsp *cmdrsp = devdata->cmdrsp_rcv;
- int cur_num_rcv_bufs_to_alloc, rcv_bufs_allocated;
- int err;
-
- /* don't do this until vnic is marked ready */
- if (!(devdata->enabled && devdata->enab_dis_acked))
- return;
-
- netdev = devdata->netdev;
- rcv_bufs_allocated = 0;
- /* this code is trying to prevent getting stuck here forever,
- * but still retry it if you can't allocate them all this time.
- */
- cur_num_rcv_bufs_to_alloc = devdata->num_rcv_bufs_could_not_alloc;
- while (cur_num_rcv_bufs_to_alloc > 0) {
- cur_num_rcv_bufs_to_alloc--;
- for (i = 0; i < devdata->num_rcv_bufs; i++) {
- if (devdata->rcvbuf[i])
- continue;
- devdata->rcvbuf[i] = alloc_rcv_buf(netdev);
- if (!devdata->rcvbuf[i]) {
- devdata->alloc_failed_in_if_needed_cnt++;
- break;
- }
- rcv_bufs_allocated++;
- err = post_skb(cmdrsp, devdata, devdata->rcvbuf[i]);
- if (err) {
- kfree_skb(devdata->rcvbuf[i]);
- devdata->rcvbuf[i] = NULL;
- break;
- }
- devdata->chstat.extra_rcvbufs_sent++;
- }
- }
- devdata->num_rcv_bufs_could_not_alloc -= rcv_bufs_allocated;
-}
-
-/* drain_resp_queue - drains and ignores all messages from the resp queue
- * @cmdrsp: IO channel command response message.
- * @devdata: Visornic device to drain.
- */
-static void drain_resp_queue(struct uiscmdrsp *cmdrsp,
- struct visornic_devdata *devdata)
-{
- while (!visorchannel_signalremove(devdata->dev->visorchannel,
- IOCHAN_FROM_IOPART,
- cmdrsp))
- ;
-}
-
-/* service_resp_queue - drain the response queue
- * @cmdrsp: IO channel command response message.
- * @devdata: Visornic device to drain.
- * @rx_work_done:
- * @budget:
- *
- * Drain the response queue of any responses from the IO Partition. Process the
- * responses as we get them.
- */
-static void service_resp_queue(struct uiscmdrsp *cmdrsp,
- struct visornic_devdata *devdata,
- int *rx_work_done, int budget)
-{
- unsigned long flags;
- struct net_device *netdev;
-
- while (*rx_work_done < budget) {
- /* TODO: CLIENT ACQUIRE -- Don't really need this at the
- * moment
- */
- /* queue empty */
- if (visorchannel_signalremove(devdata->dev->visorchannel,
- IOCHAN_FROM_IOPART,
- cmdrsp))
- break;
-
- switch (cmdrsp->net.type) {
- case NET_RCV:
- devdata->chstat.got_rcv++;
- /* process incoming packet */
- *rx_work_done += visornic_rx(cmdrsp);
- break;
- case NET_XMIT_DONE:
- spin_lock_irqsave(&devdata->priv_lock, flags);
- devdata->chstat.got_xmit_done++;
- if (cmdrsp->net.xmtdone.xmt_done_result)
- devdata->chstat.xmit_fail++;
- /* only call queue wake if we stopped it */
- netdev = ((struct sk_buff *)cmdrsp->net.buf)->dev;
- /* ASSERT netdev == vnicinfo->netdev; */
- if (netdev == devdata->netdev &&
- netif_queue_stopped(netdev)) {
- /* check if we have crossed the lower watermark
- * for netif_wake_queue()
- */
- if (vnic_hit_low_watermark
- (devdata,
- devdata->lower_threshold_net_xmits)) {
- /* enough NET_XMITs completed
- * so can restart netif queue
- */
- netif_wake_queue(netdev);
- devdata->flow_control_lower_hits++;
- }
- }
- skb_unlink(cmdrsp->net.buf, &devdata->xmitbufhead);
- spin_unlock_irqrestore(&devdata->priv_lock, flags);
- kfree_skb(cmdrsp->net.buf);
- break;
- case NET_RCV_ENBDIS_ACK:
- devdata->chstat.got_enbdisack++;
- netdev = (struct net_device *)
- cmdrsp->net.enbdis.context;
- spin_lock_irqsave(&devdata->priv_lock, flags);
- devdata->enab_dis_acked = 1;
- spin_unlock_irqrestore(&devdata->priv_lock, flags);
-
- if (devdata->server_down &&
- devdata->server_change_state) {
- /* Inform Linux that the link is up */
- devdata->server_down = false;
- devdata->server_change_state = false;
- netif_wake_queue(netdev);
- netif_carrier_on(netdev);
- }
- break;
- case NET_CONNECT_STATUS:
- netdev = devdata->netdev;
- if (cmdrsp->net.enbdis.enable == 1) {
- spin_lock_irqsave(&devdata->priv_lock, flags);
- devdata->enabled = cmdrsp->net.enbdis.enable;
- spin_unlock_irqrestore(&devdata->priv_lock,
- flags);
- netif_wake_queue(netdev);
- netif_carrier_on(netdev);
- } else {
- netif_stop_queue(netdev);
- netif_carrier_off(netdev);
- spin_lock_irqsave(&devdata->priv_lock, flags);
- devdata->enabled = cmdrsp->net.enbdis.enable;
- spin_unlock_irqrestore(&devdata->priv_lock,
- flags);
- }
- break;
- default:
- break;
- }
- /* cmdrsp is now available for reuse */
- }
-}
-
-static int visornic_poll(struct napi_struct *napi, int budget)
-{
- struct visornic_devdata *devdata = container_of(napi,
- struct visornic_devdata,
- napi);
- int rx_count = 0;
-
- send_rcv_posts_if_needed(devdata);
- service_resp_queue(devdata->cmdrsp, devdata, &rx_count, budget);
-
- /* If there aren't any more packets to receive stop the poll */
- if (rx_count < budget)
- napi_complete_done(napi, rx_count);
-
- return rx_count;
-}
-
-/* visornic_channel_interrupt - checks the status of the response queue
- *
- * Main function of the vnic_incoming thread. Periodically check the response
- * queue and drain it if needed.
- */
-static void visornic_channel_interrupt(struct visor_device *dev)
-{
- struct visornic_devdata *devdata = dev_get_drvdata(&dev->device);
-
- if (!devdata)
- return;
-
- if (!visorchannel_signalempty(devdata->dev->visorchannel,
- IOCHAN_FROM_IOPART))
- napi_schedule(&devdata->napi);
-
- atomic_set(&devdata->interrupt_rcvd, 0);
-}
-
-/* visornic_probe - probe function for visornic devices
- * @dev: The visor device discovered.
- *
- * Called when visorbus discovers a visornic device on its bus. It creates a new
- * visornic ethernet adapter.
- *
- * Return: 0 on success, or negative integer on error.
- */
-static int visornic_probe(struct visor_device *dev)
-{
- struct visornic_devdata *devdata = NULL;
- struct net_device *netdev = NULL;
- int err;
- int channel_offset = 0;
- u8 addr[ETH_ALEN];
- u64 features;
-
- netdev = alloc_etherdev(sizeof(struct visornic_devdata));
- if (!netdev) {
- dev_err(&dev->device,
- "%s alloc_etherdev failed\n", __func__);
- return -ENOMEM;
- }
-
- netdev->netdev_ops = &visornic_dev_ops;
- netdev->watchdog_timeo = 5 * HZ;
- SET_NETDEV_DEV(netdev, &dev->device);
-
- /* Get MAC address from channel and read it into the device. */
- netdev->addr_len = ETH_ALEN;
- channel_offset = offsetof(struct visor_io_channel, vnic.macaddr);
- err = visorbus_read_channel(dev, channel_offset, addr, ETH_ALEN);
- if (err < 0) {
- dev_err(&dev->device,
- "%s failed to get mac addr from chan (%d)\n",
- __func__, err);
- goto cleanup_netdev;
- }
- eth_hw_addr_set(netdev, addr);
-
- devdata = devdata_initialize(netdev_priv(netdev), dev);
- if (!devdata) {
- dev_err(&dev->device,
- "%s devdata_initialize failed\n", __func__);
- err = -ENOMEM;
- goto cleanup_netdev;
- }
- /* don't trust messages laying around in the channel */
- drain_resp_queue(devdata->cmdrsp, devdata);
-
- devdata->netdev = netdev;
- dev_set_drvdata(&dev->device, devdata);
- init_waitqueue_head(&devdata->rsp_queue);
- spin_lock_init(&devdata->priv_lock);
- /* not yet */
- devdata->enabled = 0;
- atomic_set(&devdata->usage, 1);
-
- /* Setup rcv bufs */
- channel_offset = offsetof(struct visor_io_channel, vnic.num_rcv_bufs);
- err = visorbus_read_channel(dev, channel_offset,
- &devdata->num_rcv_bufs, 4);
- if (err) {
- dev_err(&dev->device,
- "%s failed to get #rcv bufs from chan (%d)\n",
- __func__, err);
- goto cleanup_netdev;
- }
-
- devdata->rcvbuf = kcalloc(devdata->num_rcv_bufs,
- sizeof(struct sk_buff *), GFP_KERNEL);
- if (!devdata->rcvbuf) {
- err = -ENOMEM;
- goto cleanup_netdev;
- }
-
- /* set the net_xmit outstanding threshold
- * always leave two slots open but you should have 3 at a minimum
- * note that max_outstanding_net_xmits must be > 0
- */
- devdata->max_outstanding_net_xmits =
- max_t(unsigned long, 3, ((devdata->num_rcv_bufs / 3) - 2));
- devdata->upper_threshold_net_xmits =
- max_t(unsigned long,
- 2, (devdata->max_outstanding_net_xmits - 1));
- devdata->lower_threshold_net_xmits =
- max_t(unsigned long,
- 1, (devdata->max_outstanding_net_xmits / 2));
-
- skb_queue_head_init(&devdata->xmitbufhead);
-
- /* create a cmdrsp we can use to post and unpost rcv buffers */
- devdata->cmdrsp_rcv = kmalloc(SIZEOF_CMDRSP, GFP_KERNEL);
- if (!devdata->cmdrsp_rcv) {
- err = -ENOMEM;
- goto cleanup_rcvbuf;
- }
- devdata->xmit_cmdrsp = kmalloc(SIZEOF_CMDRSP, GFP_KERNEL);
- if (!devdata->xmit_cmdrsp) {
- err = -ENOMEM;
- goto cleanup_cmdrsp_rcv;
- }
- INIT_WORK(&devdata->timeout_reset, visornic_timeout_reset);
- devdata->server_down = false;
- devdata->server_change_state = false;
-
- /*set the default mtu */
- channel_offset = offsetof(struct visor_io_channel, vnic.mtu);
- err = visorbus_read_channel(dev, channel_offset, &netdev->mtu, 4);
- if (err) {
- dev_err(&dev->device,
- "%s failed to get mtu from chan (%d)\n",
- __func__, err);
- goto cleanup_xmit_cmdrsp;
- }
-
- /* TODO: Setup Interrupt information */
- /* Let's start our threads to get responses */
- netif_napi_add(netdev, &devdata->napi, visornic_poll, NAPI_WEIGHT);
-
- channel_offset = offsetof(struct visor_io_channel,
- channel_header.features);
- err = visorbus_read_channel(dev, channel_offset, &features, 8);
- if (err) {
- dev_err(&dev->device,
- "%s failed to get features from chan (%d)\n",
- __func__, err);
- goto cleanup_napi_add;
- }
-
- features |= VISOR_CHANNEL_IS_POLLING;
- features |= VISOR_DRIVER_ENHANCED_RCVBUF_CHECKING;
- err = visorbus_write_channel(dev, channel_offset, &features, 8);
- if (err) {
- dev_err(&dev->device,
- "%s failed to set features in chan (%d)\n",
- __func__, err);
- goto cleanup_napi_add;
- }
-
- /* Note: Interrupts have to be enable before the while
- * loop below because the napi routine is responsible for
- * setting enab_dis_acked
- */
- visorbus_enable_channel_interrupts(dev);
-
- err = register_netdev(netdev);
- if (err) {
- dev_err(&dev->device,
- "%s register_netdev failed (%d)\n", __func__, err);
- goto cleanup_napi_add;
- }
-
- /* create debug/sysfs directories */
- devdata->eth_debugfs_dir = debugfs_create_dir(netdev->name,
- visornic_debugfs_dir);
- if (!devdata->eth_debugfs_dir) {
- dev_err(&dev->device,
- "%s debugfs_create_dir %s failed\n",
- __func__, netdev->name);
- err = -ENOMEM;
- goto cleanup_register_netdev;
- }
-
- dev_info(&dev->device, "%s success netdev=%s\n",
- __func__, netdev->name);
- return 0;
-
-cleanup_register_netdev:
- unregister_netdev(netdev);
-
-cleanup_napi_add:
- visorbus_disable_channel_interrupts(dev);
- netif_napi_del(&devdata->napi);
-
-cleanup_xmit_cmdrsp:
- kfree(devdata->xmit_cmdrsp);
-
-cleanup_cmdrsp_rcv:
- kfree(devdata->cmdrsp_rcv);
-
-cleanup_rcvbuf:
- kfree(devdata->rcvbuf);
-
-cleanup_netdev:
- free_netdev(netdev);
- return err;
-}
-
-/* host_side_disappeared - IO Partition is gone
- * @devdata: Device object.
- *
- * IO partition servicing this device is gone; do cleanup.
- */
-static void host_side_disappeared(struct visornic_devdata *devdata)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&devdata->priv_lock, flags);
- /* indicate device destroyed */
- devdata->dev = NULL;
- spin_unlock_irqrestore(&devdata->priv_lock, flags);
-}
-
-/* visornic_remove - called when visornic dev goes away
- * @dev: Visornic device that is being removed.
- *
- * Called when DEVICE_DESTROY gets called to remove device.
- */
-static void visornic_remove(struct visor_device *dev)
-{
- struct visornic_devdata *devdata = dev_get_drvdata(&dev->device);
- struct net_device *netdev;
- unsigned long flags;
-
- if (!devdata) {
- dev_err(&dev->device, "%s no devdata\n", __func__);
- return;
- }
- spin_lock_irqsave(&devdata->priv_lock, flags);
- if (devdata->going_away) {
- spin_unlock_irqrestore(&devdata->priv_lock, flags);
- dev_err(&dev->device, "%s already being removed\n", __func__);
- return;
- }
- devdata->going_away = true;
- spin_unlock_irqrestore(&devdata->priv_lock, flags);
- netdev = devdata->netdev;
- if (!netdev) {
- dev_err(&dev->device, "%s not net device\n", __func__);
- return;
- }
-
- /* going_away prevents new items being added to the workqueues */
- cancel_work_sync(&devdata->timeout_reset);
-
- debugfs_remove_recursive(devdata->eth_debugfs_dir);
- /* this will call visornic_close() */
- unregister_netdev(netdev);
-
- visorbus_disable_channel_interrupts(devdata->dev);
- netif_napi_del(&devdata->napi);
-
- dev_set_drvdata(&dev->device, NULL);
- host_side_disappeared(devdata);
- devdata_release(devdata);
- free_netdev(netdev);
-}
-
-/* visornic_pause - called when IO Part disappears
- * @dev: Visornic device that is being serviced.
- * @complete_func: Call when finished.
- *
- * Called when the IO Partition has gone down. Need to free up resources and
- * wait for IO partition to come back. Mark link as down and don't attempt any
- * DMA. When we have freed memory, call the complete_func so that Command knows
- * we are done. If we don't call complete_func, the IO Partition will never
- * come back.
- *
- * Return: 0 on success.
- */
-static int visornic_pause(struct visor_device *dev,
- visorbus_state_complete_func complete_func)
-{
- struct visornic_devdata *devdata = dev_get_drvdata(&dev->device);
-
- visornic_serverdown(devdata, complete_func);
- return 0;
-}
-
-/* visornic_resume - called when IO Partition has recovered
- * @dev: Visornic device that is being serviced.
- * @compelte_func: Call when finished.
- *
- * Called when the IO partition has recovered. Re-establish connection to the IO
- * Partition and set the link up. Okay to do DMA again.
- *
- * Returns 0 for success, negative integer on error.
- */
-static int visornic_resume(struct visor_device *dev,
- visorbus_state_complete_func complete_func)
-{
- struct visornic_devdata *devdata;
- struct net_device *netdev;
- unsigned long flags;
-
- devdata = dev_get_drvdata(&dev->device);
- if (!devdata) {
- dev_err(&dev->device, "%s no devdata\n", __func__);
- return -EINVAL;
- }
-
- netdev = devdata->netdev;
-
- spin_lock_irqsave(&devdata->priv_lock, flags);
- if (devdata->server_change_state) {
- spin_unlock_irqrestore(&devdata->priv_lock, flags);
- dev_err(&dev->device, "%s server already changing state\n",
- __func__);
- return -EINVAL;
- }
- if (!devdata->server_down) {
- spin_unlock_irqrestore(&devdata->priv_lock, flags);
- dev_err(&dev->device, "%s server not down\n", __func__);
- complete_func(dev, 0);
- return 0;
- }
- devdata->server_change_state = true;
- spin_unlock_irqrestore(&devdata->priv_lock, flags);
-
- /* Must transition channel to ATTACHED state BEFORE
- * we can start using the device again.
- * TODO: State transitions
- */
- visorbus_enable_channel_interrupts(dev);
-
- rtnl_lock();
- dev_open(netdev, NULL);
- rtnl_unlock();
-
- complete_func(dev, 0);
- return 0;
-}
-
-/* This is used to tell the visorbus driver which types of visor devices
- * we support, and what functions to call when a visor device that we support
- * is attached or removed.
- */
-static struct visor_driver visornic_driver = {
- .name = "visornic",
- .owner = THIS_MODULE,
- .channel_types = visornic_channel_types,
- .probe = visornic_probe,
- .remove = visornic_remove,
- .pause = visornic_pause,
- .resume = visornic_resume,
- .channel_interrupt = visornic_channel_interrupt,
-};
-
-/* visornic_init - init function
- *
- * Init function for the visornic driver. Do initial driver setup and wait
- * for devices.
- *
- * Return: 0 on success, negative integer on error.
- */
-static int visornic_init(void)
-{
- int err;
-
- visornic_debugfs_dir = debugfs_create_dir("visornic", NULL);
-
- debugfs_create_file("info", 0400, visornic_debugfs_dir, NULL,
- &debugfs_info_fops);
- debugfs_create_file("enable_ints", 0200, visornic_debugfs_dir, NULL,
- &debugfs_enable_ints_fops);
-
- err = visorbus_register_visor_driver(&visornic_driver);
- if (err)
- debugfs_remove_recursive(visornic_debugfs_dir);
-
- return err;
-}
-
-/* visornic_cleanup - driver exit routine
- *
- * Unregister driver from the bus and free up memory.
- */
-static void visornic_cleanup(void)
-{
- visorbus_unregister_visor_driver(&visornic_driver);
- debugfs_remove_recursive(visornic_debugfs_dir);
-}
-
-module_init(visornic_init);
-module_exit(visornic_cleanup);
-
-MODULE_AUTHOR("Unisys");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("s-Par NIC driver for virtual network devices");
diff --git a/drivers/staging/vc04_services/Kconfig b/drivers/staging/vc04_services/Kconfig
index cb7c82403dbf..31e58c9d1a11 100644
--- a/drivers/staging/vc04_services/Kconfig
+++ b/drivers/staging/vc04_services/Kconfig
@@ -13,6 +13,7 @@ if BCM_VIDEOCORE
config BCM2835_VCHIQ
tristate "BCM2835 VCHIQ"
+ depends on HAS_DMA
imply VCHIQ_CDEV
help
Broadcom BCM2835 and similar SoCs have a VPU called VideoCore. This config
diff --git a/drivers/staging/vc04_services/bcm2835-audio/Kconfig b/drivers/staging/vc04_services/bcm2835-audio/Kconfig
index d32ea348e846..7f22f6c85067 100644
--- a/drivers/staging/vc04_services/bcm2835-audio/Kconfig
+++ b/drivers/staging/vc04_services/bcm2835-audio/Kconfig
@@ -3,7 +3,9 @@ config SND_BCM2835
tristate "BCM2835 Audio"
depends on (ARCH_BCM2835 || COMPILE_TEST) && SND
select SND_PCM
- select BCM2835_VCHIQ
+ select BCM2835_VCHIQ if HAS_DMA
help
- Say Y or M if you want to support BCM2835 built in audio
-
+ Say Y or M if you want to support BCM2835 built in audio.
+ This driver handles both 3.5mm and HDMI audio, by leveraging
+ the VCHIQ messaging interface between the kernel and the firmware
+ running on VideoCore. \ No newline at end of file
diff --git a/drivers/staging/vc04_services/bcm2835-audio/TODO b/drivers/staging/vc04_services/bcm2835-audio/TODO
deleted file mode 100644
index b85451255db0..000000000000
--- a/drivers/staging/vc04_services/bcm2835-audio/TODO
+++ /dev/null
@@ -1,10 +0,0 @@
-*****************************************************************************
-* *
-* TODO: BCM2835-AUDIO *
-* *
-*****************************************************************************
-
-1) Revisit multi-cards options and PCM route mixer control (as per comment
-https://lore.kernel.org/lkml/s5hd0to5598.wl-tiwai@suse.de)
-
-2) Fix the remaining checkpatch.pl errors and warnings.
diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-ctl.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-ctl.c
index 3703409715da..1c1f040122d7 100644
--- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-ctl.c
+++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-ctl.c
@@ -117,15 +117,6 @@ static const struct snd_kcontrol_new snd_bcm2835_ctl[] = {
.get = snd_bcm2835_ctl_get,
.put = snd_bcm2835_ctl_put,
},
- {
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "PCM Playback Route",
- .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
- .private_value = PCM_PLAYBACK_DEVICE,
- .info = snd_bcm2835_ctl_info,
- .get = snd_bcm2835_ctl_get,
- .put = snd_bcm2835_ctl_put,
- },
};
static int snd_bcm2835_spdif_default_info(struct snd_kcontrol *kcontrol,
@@ -220,7 +211,14 @@ static int create_ctls(struct bcm2835_chip *chip, size_t size,
return 0;
}
-int snd_bcm2835_new_ctl(struct bcm2835_chip *chip)
+int snd_bcm2835_new_headphones_ctl(struct bcm2835_chip *chip)
+{
+ strscpy(chip->card->mixername, "Broadcom Mixer", sizeof(chip->card->mixername));
+ return create_ctls(chip, ARRAY_SIZE(snd_bcm2835_ctl),
+ snd_bcm2835_ctl);
+}
+
+int snd_bcm2835_new_hdmi_ctl(struct bcm2835_chip *chip)
{
int err;
@@ -232,71 +230,3 @@ int snd_bcm2835_new_ctl(struct bcm2835_chip *chip)
snd_bcm2835_spdif);
}
-static const struct snd_kcontrol_new snd_bcm2835_headphones_ctl[] = {
- {
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "Headphone Playback Volume",
- .index = 0,
- .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
- SNDRV_CTL_ELEM_ACCESS_TLV_READ,
- .private_value = PCM_PLAYBACK_VOLUME,
- .info = snd_bcm2835_ctl_info,
- .get = snd_bcm2835_ctl_get,
- .put = snd_bcm2835_ctl_put,
- .count = 1,
- .tlv = {.p = snd_bcm2835_db_scale}
- },
- {
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "Headphone Playback Switch",
- .index = 0,
- .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
- .private_value = PCM_PLAYBACK_MUTE,
- .info = snd_bcm2835_ctl_info,
- .get = snd_bcm2835_ctl_get,
- .put = snd_bcm2835_ctl_put,
- .count = 1,
- }
-};
-
-int snd_bcm2835_new_headphones_ctl(struct bcm2835_chip *chip)
-{
- strscpy(chip->card->mixername, "Broadcom Mixer", sizeof(chip->card->mixername));
- return create_ctls(chip, ARRAY_SIZE(snd_bcm2835_headphones_ctl),
- snd_bcm2835_headphones_ctl);
-}
-
-static const struct snd_kcontrol_new snd_bcm2835_hdmi[] = {
- {
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "HDMI Playback Volume",
- .index = 0,
- .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
- SNDRV_CTL_ELEM_ACCESS_TLV_READ,
- .private_value = PCM_PLAYBACK_VOLUME,
- .info = snd_bcm2835_ctl_info,
- .get = snd_bcm2835_ctl_get,
- .put = snd_bcm2835_ctl_put,
- .count = 1,
- .tlv = {.p = snd_bcm2835_db_scale}
- },
- {
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "HDMI Playback Switch",
- .index = 0,
- .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
- .private_value = PCM_PLAYBACK_MUTE,
- .info = snd_bcm2835_ctl_info,
- .get = snd_bcm2835_ctl_get,
- .put = snd_bcm2835_ctl_put,
- .count = 1,
- }
-};
-
-int snd_bcm2835_new_hdmi_ctl(struct bcm2835_chip *chip)
-{
- strscpy(chip->card->mixername, "Broadcom Mixer", sizeof(chip->card->mixername));
- return create_ctls(chip, ARRAY_SIZE(snd_bcm2835_hdmi),
- snd_bcm2835_hdmi);
-}
-
diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c
index f2ef1d641e70..68e8d491a7ec 100644
--- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c
+++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c
@@ -82,8 +82,7 @@ void bcm2835_playback_fifo(struct bcm2835_alsa_stream *alsa_stream,
}
/* open callback */
-static int snd_bcm2835_playback_open_generic(
- struct snd_pcm_substream *substream, int spdif)
+static int snd_bcm2835_playback_open_generic(struct snd_pcm_substream *substream, int spdif)
{
struct bcm2835_chip *chip = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
@@ -237,7 +236,7 @@ static void snd_bcm2835_pcm_transfer(struct snd_pcm_substream *substream,
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct bcm2835_alsa_stream *alsa_stream = runtime->private_data;
- void *src = (void *) (substream->runtime->dma_area + rec->sw_data);
+ void *src = (void *)(substream->runtime->dma_area + rec->sw_data);
bcm2835_audio_write(alsa_stream, bytes, src);
}
diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
index d567a2e3f70c..e429b33b4d39 100644
--- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
+++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
@@ -11,7 +11,7 @@ struct bcm2835_audio_instance {
struct device *dev;
unsigned int service_handle;
struct completion msg_avail_comp;
- struct mutex vchi_mutex;
+ struct mutex vchi_mutex; /* Serialize vchiq access */
struct bcm2835_alsa_stream *alsa_stream;
int result;
unsigned int max_packet;
diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835.c
index 628732d7bf6a..00bc898b0189 100644
--- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835.c
+++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835.c
@@ -10,17 +10,13 @@
#include "bcm2835.h"
static bool enable_hdmi;
-static bool enable_headphones;
-static bool enable_compat_alsa = true;
+static bool enable_headphones = true;
static int num_channels = MAX_SUBSTREAMS;
module_param(enable_hdmi, bool, 0444);
MODULE_PARM_DESC(enable_hdmi, "Enables HDMI virtual audio device");
module_param(enable_headphones, bool, 0444);
MODULE_PARM_DESC(enable_headphones, "Enables Headphones virtual audio device");
-module_param(enable_compat_alsa, bool, 0444);
-MODULE_PARM_DESC(enable_compat_alsa,
- "Enables ALSA compatibility virtual audio device");
module_param(num_channels, int, 0644);
MODULE_PARM_DESC(num_channels, "Number of audio channels (default: 8)");
@@ -63,19 +59,20 @@ struct bcm2835_audio_driver {
enum snd_bcm2835_route route;
};
-static int bcm2835_audio_alsa_newpcm(struct bcm2835_chip *chip,
+static int bcm2835_audio_dual_newpcm(struct bcm2835_chip *chip,
const char *name,
enum snd_bcm2835_route route,
u32 numchannels)
{
int err;
- err = snd_bcm2835_new_pcm(chip, "bcm2835 ALSA", 0, AUDIO_DEST_AUTO,
- numchannels - 1, false);
+ err = snd_bcm2835_new_pcm(chip, name, 0, route,
+ numchannels, false);
+
if (err)
return err;
- err = snd_bcm2835_new_pcm(chip, "bcm2835 IEC958/HDMI", 1, 0, 1, true);
+ err = snd_bcm2835_new_pcm(chip, "IEC958", 1, route, 1, true);
if (err)
return err;
@@ -90,18 +87,6 @@ static int bcm2835_audio_simple_newpcm(struct bcm2835_chip *chip,
return snd_bcm2835_new_pcm(chip, name, 0, route, numchannels, false);
}
-static struct bcm2835_audio_driver bcm2835_audio_alsa = {
- .driver = {
- .name = "bcm2835_alsa",
- .owner = THIS_MODULE,
- },
- .shortname = "bcm2835 ALSA",
- .longname = "bcm2835 ALSA",
- .minchannels = 2,
- .newpcm = bcm2835_audio_alsa_newpcm,
- .newctl = snd_bcm2835_new_ctl,
-};
-
static struct bcm2835_audio_driver bcm2835_audio_hdmi = {
.driver = {
.name = "bcm2835_hdmi",
@@ -110,7 +95,7 @@ static struct bcm2835_audio_driver bcm2835_audio_hdmi = {
.shortname = "bcm2835 HDMI",
.longname = "bcm2835 HDMI",
.minchannels = 1,
- .newpcm = bcm2835_audio_simple_newpcm,
+ .newpcm = bcm2835_audio_dual_newpcm,
.newctl = snd_bcm2835_new_hdmi_ctl,
.route = AUDIO_DEST_HDMI
};
@@ -135,10 +120,6 @@ struct bcm2835_audio_drivers {
static struct bcm2835_audio_drivers children_devices[] = {
{
- .audio_driver = &bcm2835_audio_alsa,
- .is_enabled = &enable_compat_alsa,
- },
- {
.audio_driver = &bcm2835_audio_hdmi,
.is_enabled = &enable_hdmi,
},
diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835.h b/drivers/staging/vc04_services/bcm2835-audio/bcm2835.h
index 51066ac8eea5..38b7451d77b2 100644
--- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835.h
+++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835.h
@@ -61,7 +61,7 @@ struct bcm2835_chip {
unsigned int opened;
unsigned int spdif_status;
- struct mutex audio_mutex;
+ struct mutex audio_mutex; /* Serialize chip data access */
struct bcm2835_vchi_ctx *vchi_ctx;
};
diff --git a/drivers/staging/vc04_services/bcm2835-camera/Kconfig b/drivers/staging/vc04_services/bcm2835-camera/Kconfig
index dcda565f9b38..870c9afb223a 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/Kconfig
+++ b/drivers/staging/vc04_services/bcm2835-camera/Kconfig
@@ -3,8 +3,8 @@ config VIDEO_BCM2835
tristate "BCM2835 Camera"
depends on MEDIA_SUPPORT
depends on VIDEO_DEV && (ARCH_BCM2835 || COMPILE_TEST)
- select BCM2835_VCHIQ
- select BCM2835_VCHIQ_MMAL
+ select BCM2835_VCHIQ if HAS_DMA
+ select BCM2835_VCHIQ_MMAL if HAS_DMA
select VIDEOBUF2_VMALLOC
select BTREE
help
diff --git a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
index 88b1878854e0..fd456d1f7061 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
+++ b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
@@ -1033,9 +1033,9 @@ static int mmal_setup_video_component(struct bcm2835_mmal_dev *dev,
preview_port->es.video.crop.y = 0;
preview_port->es.video.crop.width = f->fmt.pix.width;
preview_port->es.video.crop.height = f->fmt.pix.height;
- preview_port->es.video.frame_rate.num =
+ preview_port->es.video.frame_rate.numerator =
dev->capture.timeperframe.denominator;
- preview_port->es.video.frame_rate.den =
+ preview_port->es.video.frame_rate.denominator =
dev->capture.timeperframe.numerator;
ret = vchiq_mmal_port_set_format(dev->instance, preview_port);
@@ -1084,9 +1084,9 @@ static int mmal_setup_encode_component(struct bcm2835_mmal_dev *dev,
port->es.video.crop.y = 0;
port->es.video.crop.width = f->fmt.pix.width;
port->es.video.crop.height = f->fmt.pix.height;
- port->es.video.frame_rate.num =
+ port->es.video.frame_rate.numerator =
dev->capture.timeperframe.denominator;
- port->es.video.frame_rate.den =
+ port->es.video.frame_rate.denominator =
dev->capture.timeperframe.numerator;
port->format.encoding = mfmt->mmal;
@@ -1225,8 +1225,8 @@ static int mmal_setup_components(struct bcm2835_mmal_dev *dev,
camera_port->es.video.crop.y = 0;
camera_port->es.video.crop.width = f->fmt.pix.width;
camera_port->es.video.crop.height = f->fmt.pix.height;
- camera_port->es.video.frame_rate.num = 0;
- camera_port->es.video.frame_rate.den = 1;
+ camera_port->es.video.frame_rate.numerator = 0;
+ camera_port->es.video.frame_rate.denominator = 1;
camera_port->es.video.color_space = MMAL_COLOR_SPACE_JPEG_JFIF;
ret = vchiq_mmal_port_set_format(dev->instance, camera_port);
@@ -1629,8 +1629,8 @@ static int mmal_init(struct bcm2835_mmal_dev *dev)
format->es->video.crop.y = 0;
format->es->video.crop.width = 1024;
format->es->video.crop.height = 768;
- format->es->video.frame_rate.num = 0; /* Rely on fps_range */
- format->es->video.frame_rate.den = 1;
+ format->es->video.frame_rate.numerator = 0; /* Rely on fps_range */
+ format->es->video.frame_rate.denominator = 1;
format = &camera->output[CAM_PORT_VIDEO].format;
@@ -1643,8 +1643,8 @@ static int mmal_init(struct bcm2835_mmal_dev *dev)
format->es->video.crop.y = 0;
format->es->video.crop.width = 1024;
format->es->video.crop.height = 768;
- format->es->video.frame_rate.num = 0; /* Rely on fps_range */
- format->es->video.frame_rate.den = 1;
+ format->es->video.frame_rate.numerator = 0; /* Rely on fps_range */
+ format->es->video.frame_rate.denominator = 1;
format = &camera->output[CAM_PORT_CAPTURE].format;
@@ -1656,8 +1656,8 @@ static int mmal_init(struct bcm2835_mmal_dev *dev)
format->es->video.crop.y = 0;
format->es->video.crop.width = 2592;
format->es->video.crop.height = 1944;
- format->es->video.frame_rate.num = 0; /* Rely on fps_range */
- format->es->video.frame_rate.den = 1;
+ format->es->video.frame_rate.numerator = 0; /* Rely on fps_range */
+ format->es->video.frame_rate.denominator = 1;
dev->capture.width = format->es->video.width;
dev->capture.height = format->es->video.height;
diff --git a/drivers/staging/vc04_services/bcm2835-camera/controls.c b/drivers/staging/vc04_services/bcm2835-camera/controls.c
index eb722f16fb91..5644d1d457b9 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/controls.c
+++ b/drivers/staging/vc04_services/bcm2835-camera/controls.c
@@ -154,13 +154,13 @@ static int ctrl_set_rational(struct bcm2835_mmal_dev *dev,
struct v4l2_ctrl *ctrl,
const struct bcm2835_mmal_v4l2_ctrl *mmal_ctrl)
{
- struct mmal_parameter_rational rational_value;
+ struct s32_fract rational_value;
struct vchiq_mmal_port *control;
control = &dev->component[COMP_CAMERA]->control;
- rational_value.num = ctrl->val;
- rational_value.den = 100;
+ rational_value.numerator = ctrl->val;
+ rational_value.denominator = 100;
return vchiq_mmal_port_parameter_set(dev->instance, control,
mmal_ctrl->mmal_id,
@@ -489,9 +489,10 @@ static int ctrl_set_awb_gains(struct bcm2835_mmal_dev *dev,
else if (ctrl->id == V4L2_CID_BLUE_BALANCE)
dev->blue_gain = ctrl->val;
- gains.r_gain.num = dev->red_gain;
- gains.b_gain.num = dev->blue_gain;
- gains.r_gain.den = gains.b_gain.den = 1000;
+ gains.r_gain.numerator = dev->red_gain;
+ gains.r_gain.denominator = 1000;
+ gains.b_gain.numerator = dev->blue_gain;
+ gains.b_gain.denominator = 1000;
return vchiq_mmal_port_parameter_set(dev->instance, control,
mmal_ctrl->mmal_id,
@@ -1271,26 +1272,26 @@ int set_framerate_params(struct bcm2835_mmal_dev *dev)
struct mmal_parameter_fps_range fps_range;
int ret;
- fps_range.fps_high.num = dev->capture.timeperframe.denominator;
- fps_range.fps_high.den = dev->capture.timeperframe.numerator;
+ fps_range.fps_high.numerator = dev->capture.timeperframe.denominator;
+ fps_range.fps_high.denominator = dev->capture.timeperframe.numerator;
if ((dev->exposure_mode_active != MMAL_PARAM_EXPOSUREMODE_OFF) &&
(dev->exp_auto_priority)) {
/* Variable FPS. Define min FPS as 1fps. */
- fps_range.fps_low.num = 1;
- fps_range.fps_low.den = 1;
+ fps_range.fps_low.numerator = 1;
+ fps_range.fps_low.denominator = 1;
} else {
/* Fixed FPS - set min and max to be the same */
- fps_range.fps_low.num = fps_range.fps_high.num;
- fps_range.fps_low.den = fps_range.fps_high.den;
+ fps_range.fps_low.numerator = fps_range.fps_high.numerator;
+ fps_range.fps_low.denominator = fps_range.fps_high.denominator;
}
v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
"Set fps range to %d/%d to %d/%d\n",
- fps_range.fps_low.num,
- fps_range.fps_low.den,
- fps_range.fps_high.num,
- fps_range.fps_high.den);
+ fps_range.fps_low.numerator,
+ fps_range.fps_low.denominator,
+ fps_range.fps_high.numerator,
+ fps_range.fps_high.denominator);
ret = vchiq_mmal_port_parameter_set(dev->instance,
&dev->component[COMP_CAMERA]->output[CAM_PORT_PREVIEW],
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
index f0bfacfdea80..0596ac61e286 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
@@ -431,21 +431,18 @@ free_pagelist(struct vchiq_pagelist_info *pagelistinfo,
if (head_bytes > actual)
head_bytes = actual;
- memcpy((char *)kmap(pages[0]) +
+ memcpy_to_page(pages[0],
pagelist->offset,
fragments,
head_bytes);
- kunmap(pages[0]);
}
if ((actual >= 0) && (head_bytes < actual) &&
- (tail_bytes != 0)) {
- memcpy((char *)kmap(pages[num_pages - 1]) +
- ((pagelist->offset + actual) &
- (PAGE_SIZE - 1) & ~(g_cache_line_size - 1)),
+ (tail_bytes != 0))
+ memcpy_to_page(pages[num_pages - 1],
+ (pagelist->offset + actual) &
+ (PAGE_SIZE - 1) & ~(g_cache_line_size - 1),
fragments + g_cache_line_size,
tail_bytes);
- kunmap(pages[num_pages - 1]);
- }
down(&g_free_fragments_mutex);
*(char **)fragments = g_free_fragments;
@@ -918,8 +915,7 @@ vchiq_blocking_bulk_transfer(unsigned int handle, void *data, unsigned int size,
struct vchiq_instance *instance;
struct vchiq_service *service;
enum vchiq_status status;
- struct bulk_waiter_node *waiter = NULL;
- bool found = false;
+ struct bulk_waiter_node *waiter = NULL, *iter;
service = find_service_by_handle(handle);
if (!service)
@@ -930,16 +926,16 @@ vchiq_blocking_bulk_transfer(unsigned int handle, void *data, unsigned int size,
vchiq_service_put(service);
mutex_lock(&instance->bulk_waiter_list_mutex);
- list_for_each_entry(waiter, &instance->bulk_waiter_list, list) {
- if (waiter->pid == current->pid) {
- list_del(&waiter->list);
- found = true;
+ list_for_each_entry(iter, &instance->bulk_waiter_list, list) {
+ if (iter->pid == current->pid) {
+ list_del(&iter->list);
+ waiter = iter;
break;
}
}
mutex_unlock(&instance->bulk_waiter_list_mutex);
- if (found) {
+ if (waiter) {
struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk;
if (bulk) {
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
index 82b7bd7b54b2..1ddc661642a9 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
@@ -79,7 +79,6 @@
#define BITSET_BIT(b) (1 << (b & 31))
#define BITSET_IS_SET(bs, b) (bs[BITSET_WORD(b)] & BITSET_BIT(b))
#define BITSET_SET(bs, b) (bs[BITSET_WORD(b)] |= BITSET_BIT(b))
-#define BITSET_CLR(bs, b) (bs[BITSET_WORD(b)] &= ~BITSET_BIT(b))
enum {
DEBUG_ENTRIES,
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c
index b41c2a267355..66bbfec332ba 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c
@@ -289,8 +289,7 @@ static int vchiq_irq_queue_bulk_tx_rx(struct vchiq_instance *instance,
enum vchiq_bulk_mode __user *mode)
{
struct vchiq_service *service;
- struct bulk_waiter_node *waiter = NULL;
- bool found = false;
+ struct bulk_waiter_node *waiter = NULL, *iter;
void *userdata;
int status = 0;
int ret;
@@ -309,16 +308,16 @@ static int vchiq_irq_queue_bulk_tx_rx(struct vchiq_instance *instance,
userdata = &waiter->bulk_waiter;
} else if (args->mode == VCHIQ_BULK_MODE_WAITING) {
mutex_lock(&instance->bulk_waiter_list_mutex);
- list_for_each_entry(waiter, &instance->bulk_waiter_list,
+ list_for_each_entry(iter, &instance->bulk_waiter_list,
list) {
- if (waiter->pid == current->pid) {
- list_del(&waiter->list);
- found = true;
+ if (iter->pid == current->pid) {
+ list_del(&iter->list);
+ waiter = iter;
break;
}
}
mutex_unlock(&instance->bulk_waiter_list_mutex);
- if (!found) {
+ if (!waiter) {
vchiq_log_error(vchiq_arm_log_level,
"no bulk_waiter found for pid %d", current->pid);
ret = -ESRCH;
diff --git a/drivers/staging/vc04_services/vchiq-mmal/mmal-msg-common.h b/drivers/staging/vc04_services/vchiq-mmal/mmal-msg-common.h
index d77e15f25dda..492d4c5dca08 100644
--- a/drivers/staging/vc04_services/vchiq-mmal/mmal-msg-common.h
+++ b/drivers/staging/vc04_services/vchiq-mmal/mmal-msg-common.h
@@ -14,6 +14,8 @@
#ifndef MMAL_MSG_COMMON_H
#define MMAL_MSG_COMMON_H
+#include <linux/types.h>
+
enum mmal_msg_status {
MMAL_MSG_STATUS_SUCCESS = 0, /**< Success */
MMAL_MSG_STATUS_ENOMEM, /**< Out of memory */
@@ -40,9 +42,4 @@ struct mmal_rect {
s32 height; /**< height */
};
-struct mmal_rational {
- s32 num; /**< Numerator */
- s32 den; /**< Denominator */
-};
-
#endif /* MMAL_MSG_COMMON_H */
diff --git a/drivers/staging/vc04_services/vchiq-mmal/mmal-msg-format.h b/drivers/staging/vc04_services/vchiq-mmal/mmal-msg-format.h
index 1e996d8cd283..5569876d8c7d 100644
--- a/drivers/staging/vc04_services/vchiq-mmal/mmal-msg-format.h
+++ b/drivers/staging/vc04_services/vchiq-mmal/mmal-msg-format.h
@@ -14,6 +14,8 @@
#ifndef MMAL_MSG_FORMAT_H
#define MMAL_MSG_FORMAT_H
+#include <linux/math.h>
+
#include "mmal-msg-common.h"
/* MMAL_ES_FORMAT_T */
@@ -30,8 +32,8 @@ struct mmal_video_format {
u32 width; /* Width of frame in pixels */
u32 height; /* Height of frame in rows of pixels */
struct mmal_rect crop; /* Visible region of the frame */
- struct mmal_rational frame_rate; /* Frame rate */
- struct mmal_rational par; /* Pixel aspect ratio */
+ struct s32_fract frame_rate; /* Frame rate */
+ struct s32_fract par; /* Pixel aspect ratio */
/*
* FourCC specifying the color space of the video stream. See the
diff --git a/drivers/staging/vc04_services/vchiq-mmal/mmal-parameters.h b/drivers/staging/vc04_services/vchiq-mmal/mmal-parameters.h
index 2277e05b1e31..a0cdd28101f2 100644
--- a/drivers/staging/vc04_services/vchiq-mmal/mmal-parameters.h
+++ b/drivers/staging/vc04_services/vchiq-mmal/mmal-parameters.h
@@ -22,6 +22,8 @@
#ifndef MMAL_PARAMETERS_H
#define MMAL_PARAMETERS_H
+#include <linux/math.h>
+
/** Common parameter ID group, used with many types of component. */
#define MMAL_PARAMETER_GROUP_COMMON (0 << 16)
/** Camera-specific parameter ID group. */
@@ -223,11 +225,6 @@ enum mmal_parameter_camera_type {
MMAL_PARAMETER_CUSTOM_AWB_GAINS,
};
-struct mmal_parameter_rational {
- s32 num; /**< Numerator */
- s32 den; /**< Denominator */
-};
-
enum mmal_parameter_camera_config_timestamp_mode {
MMAL_PARAM_TIMESTAMP_MODE_ZERO = 0, /* Always timestamp frames as 0 */
MMAL_PARAM_TIMESTAMP_MODE_RAW_STC, /* Use the raw STC value
@@ -243,9 +240,9 @@ enum mmal_parameter_camera_config_timestamp_mode {
struct mmal_parameter_fps_range {
/**< Low end of the permitted framerate range */
- struct mmal_parameter_rational fps_low;
+ struct s32_fract fps_low;
/**< High end of the permitted framerate range */
- struct mmal_parameter_rational fps_high;
+ struct s32_fract fps_high;
};
/* camera configuration parameter */
@@ -350,8 +347,8 @@ enum MMAL_PARAM_FLICKERAVOID {
};
struct mmal_parameter_awbgains {
- struct mmal_parameter_rational r_gain; /**< Red gain */
- struct mmal_parameter_rational b_gain; /**< Blue gain */
+ struct s32_fract r_gain; /**< Red gain */
+ struct s32_fract b_gain; /**< Blue gain */
};
/** Manner of video rate control */
diff --git a/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c b/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c
index 70c9d5544b56..845b20e4d05a 100644
--- a/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c
+++ b/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c
@@ -744,9 +744,9 @@ static void dump_port_info(struct vchiq_mmal_port *port)
port->es.video.crop.y,
port->es.video.crop.width, port->es.video.crop.height);
pr_debug(" : framerate %d/%d aspect %d/%d\n",
- port->es.video.frame_rate.num,
- port->es.video.frame_rate.den,
- port->es.video.par.num, port->es.video.par.den);
+ port->es.video.frame_rate.numerator,
+ port->es.video.frame_rate.denominator,
+ port->es.video.par.numerator, port->es.video.par.denominator);
}
}
@@ -1549,8 +1549,8 @@ int vchiq_mmal_port_connect_tunnel(struct vchiq_mmal_instance *instance,
dst->es.video.crop.y = src->es.video.crop.y;
dst->es.video.crop.width = src->es.video.crop.width;
dst->es.video.crop.height = src->es.video.crop.height;
- dst->es.video.frame_rate.num = src->es.video.frame_rate.num;
- dst->es.video.frame_rate.den = src->es.video.frame_rate.den;
+ dst->es.video.frame_rate.numerator = src->es.video.frame_rate.numerator;
+ dst->es.video.frame_rate.denominator = src->es.video.frame_rate.denominator;
/* set new format */
ret = port_info_set(instance, dst);
@@ -1841,7 +1841,6 @@ int vchiq_mmal_finalise(struct vchiq_mmal_instance *instance)
mutex_unlock(&instance->vchiq_mutex);
vchiq_shutdown(instance->vchiq_instance);
- flush_workqueue(instance->bulk_wq);
destroy_workqueue(instance->bulk_wq);
idr_destroy(&instance->context_map);
diff --git a/drivers/staging/vme/devices/Kconfig b/drivers/staging/vme_user/Kconfig
index 5651bb16b28b..e8b4461bf27f 100644
--- a/drivers/staging/vme/devices/Kconfig
+++ b/drivers/staging/vme_user/Kconfig
@@ -3,7 +3,7 @@ comment "VME Device Drivers"
config VME_USER
tristate "VME user space access driver"
- depends on STAGING
+ depends on STAGING && VME_BUS
help
If you say Y here you want to be able to access a limited number of
VME windows in a manner at least semi-compatible with the interface
diff --git a/drivers/staging/vme/devices/Makefile b/drivers/staging/vme_user/Makefile
index 5380115139b0..5380115139b0 100644
--- a/drivers/staging/vme/devices/Makefile
+++ b/drivers/staging/vme_user/Makefile
diff --git a/drivers/staging/vme/devices/vme_user.c b/drivers/staging/vme_user/vme_user.c
index e3fa38bd7f12..859af797630c 100644
--- a/drivers/staging/vme/devices/vme_user.c
+++ b/drivers/staging/vme_user/vme_user.c
@@ -773,7 +773,7 @@ MODULE_PARM_DESC(bus, "Enumeration of VMEbus to which the driver is connected");
module_param_array(bus, int, &bus_num, 0000);
MODULE_DESCRIPTION("VME User Space Access Driver");
-MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com");
+MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com>");
MODULE_LICENSE("GPL");
module_init(vme_user_init);
diff --git a/drivers/staging/vme/devices/vme_user.h b/drivers/staging/vme_user/vme_user.h
index 19ecb05781cc..19ecb05781cc 100644
--- a/drivers/staging/vme/devices/vme_user.h
+++ b/drivers/staging/vme_user/vme_user.h
diff --git a/drivers/staging/vt6655/baseband.c b/drivers/staging/vt6655/baseband.c
index dfdb0ebf43b5..577a38fae369 100644
--- a/drivers/staging/vt6655/baseband.c
+++ b/drivers/staging/vt6655/baseband.c
@@ -29,7 +29,6 @@
*
*/
-#include "tmacro.h"
#include "mac.h"
#include "baseband.h"
#include "srom.h"
@@ -1910,19 +1909,19 @@ bool bb_read_embedded(struct vnt_private *priv, unsigned char by_bb_addr,
unsigned char by_value;
/* BB reg offset */
- VNSvOutPortB(iobase + MAC_REG_BBREGADR, by_bb_addr);
+ iowrite8(by_bb_addr, iobase + MAC_REG_BBREGADR);
/* turn on REGR */
MACvRegBitsOn(iobase, MAC_REG_BBREGCTL, BBREGCTL_REGR);
/* W_MAX_TIMEOUT is the timeout period */
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
- VNSvInPortB(iobase + MAC_REG_BBREGCTL, &by_value);
+ by_value = ioread8(iobase + MAC_REG_BBREGCTL);
if (by_value & BBREGCTL_DONE)
break;
}
/* get BB data */
- VNSvInPortB(iobase + MAC_REG_BBREGDATA, pby_data);
+ *pby_data = ioread8(iobase + MAC_REG_BBREGDATA);
if (ww == W_MAX_TIMEOUT) {
pr_debug(" DBG_PORT80(0x30)\n");
@@ -1953,15 +1952,15 @@ bool bb_write_embedded(struct vnt_private *priv, unsigned char by_bb_addr,
unsigned char by_value;
/* BB reg offset */
- VNSvOutPortB(iobase + MAC_REG_BBREGADR, by_bb_addr);
+ iowrite8(by_bb_addr, iobase + MAC_REG_BBREGADR);
/* set BB data */
- VNSvOutPortB(iobase + MAC_REG_BBREGDATA, by_data);
+ iowrite8(by_data, iobase + MAC_REG_BBREGDATA);
/* turn on BBREGCTL_REGW */
MACvRegBitsOn(iobase, MAC_REG_BBREGCTL, BBREGCTL_REGW);
/* W_MAX_TIMEOUT is the timeout period */
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
- VNSvInPortB(iobase + MAC_REG_BBREGCTL, &by_value);
+ by_value = ioread8(iobase + MAC_REG_BBREGCTL);
if (by_value & BBREGCTL_DONE)
break;
}
@@ -2054,7 +2053,7 @@ bool bb_vt3253_init(struct vnt_private *priv)
byVT3253B0_AGC[ii][0],
byVT3253B0_AGC[ii][1]);
- VNSvOutPortB(iobase + MAC_REG_ITRTMSET, 0x23);
+ iowrite8(0x23, iobase + MAC_REG_ITRTMSET);
MACvRegBitsOn(iobase, MAC_REG_PAPEDELAY, BIT(0));
priv->abyBBVGA[0] = 0x14;
diff --git a/drivers/staging/vt6655/card.c b/drivers/staging/vt6655/card.c
index 1110366fc415..2cde0082fc03 100644
--- a/drivers/staging/vt6655/card.c
+++ b/drivers/staging/vt6655/card.c
@@ -11,7 +11,7 @@
* CARDbAddBasicRate - Add to BasicRateSet
* CARDbIsOFDMinBasicRate - Check if any OFDM rate is in BasicRateSet
* CARDqGetTSFOffset - Calculate TSFOffset
- * CARDbGetCurrentTSF - Read Current NIC TSF counter
+ * vt6655_get_current_tsf - Read Current NIC TSF counter
* CARDqGetNextTBTT - Calculate Next Beacon TSF counter
* CARDvSetFirstNextTBTT - Set NIC Beacon time
* CARDvUpdateNextTBTT - Sync. NIC Beacon time
@@ -24,7 +24,6 @@
*
*/
-#include "tmacro.h"
#include "card.h"
#include "baseband.h"
#include "mac.h"
@@ -239,26 +238,25 @@ bool CARDbSetPhyParameter(struct vnt_private *priv, u8 bb_type)
if (priv->bySIFS != bySIFS) {
priv->bySIFS = bySIFS;
- VNSvOutPortB(priv->port_offset + MAC_REG_SIFS, priv->bySIFS);
+ iowrite8(priv->bySIFS, priv->port_offset + MAC_REG_SIFS);
}
if (priv->byDIFS != byDIFS) {
priv->byDIFS = byDIFS;
- VNSvOutPortB(priv->port_offset + MAC_REG_DIFS, priv->byDIFS);
+ iowrite8(priv->byDIFS, priv->port_offset + MAC_REG_DIFS);
}
if (priv->byEIFS != C_EIFS) {
priv->byEIFS = C_EIFS;
- VNSvOutPortB(priv->port_offset + MAC_REG_EIFS, priv->byEIFS);
+ iowrite8(priv->byEIFS, priv->port_offset + MAC_REG_EIFS);
}
if (priv->bySlot != bySlot) {
priv->bySlot = bySlot;
- VNSvOutPortB(priv->port_offset + MAC_REG_SLOT, priv->bySlot);
+ iowrite8(priv->bySlot, priv->port_offset + MAC_REG_SLOT);
bb_set_short_slot_time(priv);
}
if (priv->byCWMaxMin != byCWMaxMin) {
priv->byCWMaxMin = byCWMaxMin;
- VNSvOutPortB(priv->port_offset + MAC_REG_CWMAXMIN0,
- priv->byCWMaxMin);
+ iowrite8(priv->byCWMaxMin, priv->port_offset + MAC_REG_CWMAXMIN0);
}
priv->byPacketType = CARDbyGetPktType(priv);
@@ -289,7 +287,7 @@ bool CARDbUpdateTSF(struct vnt_private *priv, unsigned char byRxRate,
u64 local_tsf;
u64 qwTSFOffset = 0;
- CARDbGetCurrentTSF(priv, &local_tsf);
+ local_tsf = vt6655_get_current_tsf(priv);
if (qwBSSTimestamp != local_tsf) {
qwTSFOffset = CARDqGetTSFOffset(byRxRate, qwBSSTimestamp,
@@ -321,9 +319,9 @@ bool CARDbUpdateTSF(struct vnt_private *priv, unsigned char byRxRate,
bool CARDbSetBeaconPeriod(struct vnt_private *priv,
unsigned short wBeaconInterval)
{
- u64 qwNextTBTT = 0;
+ u64 qwNextTBTT;
- CARDbGetCurrentTSF(priv, &qwNextTBTT); /* Get Local TSF counter */
+ qwNextTBTT = vt6655_get_current_tsf(priv); /* Get Local TSF counter */
qwNextTBTT = CARDqGetNextTBTT(qwNextTBTT, wBeaconInterval);
@@ -740,24 +738,24 @@ u64 CARDqGetTSFOffset(unsigned char byRxRate, u64 qwTSF1, u64 qwTSF2)
*
* Return Value: true if success; otherwise false
*/
-bool CARDbGetCurrentTSF(struct vnt_private *priv, u64 *pqwCurrTSF)
+u64 vt6655_get_current_tsf(struct vnt_private *priv)
{
void __iomem *iobase = priv->port_offset;
unsigned short ww;
unsigned char data;
+ u32 low, high;
MACvRegBitsOn(iobase, MAC_REG_TFTCTL, TFTCTL_TSFCNTRRD);
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
- VNSvInPortB(iobase + MAC_REG_TFTCTL, &data);
+ data = ioread8(iobase + MAC_REG_TFTCTL);
if (!(data & TFTCTL_TSFCNTRRD))
break;
}
if (ww == W_MAX_TIMEOUT)
- return false;
- VNSvInPortD(iobase + MAC_REG_TSFCNTR, (u32 *)pqwCurrTSF);
- VNSvInPortD(iobase + MAC_REG_TSFCNTR + 4, (u32 *)pqwCurrTSF + 1);
-
- return true;
+ return 0;
+ low = ioread32(iobase + MAC_REG_TSFCNTR);
+ high = ioread32(iobase + MAC_REG_TSFCNTR + 4);
+ return le64_to_cpu(low + ((u64)high << 32));
}
/*
@@ -804,9 +802,9 @@ void CARDvSetFirstNextTBTT(struct vnt_private *priv,
unsigned short wBeaconInterval)
{
void __iomem *iobase = priv->port_offset;
- u64 qwNextTBTT = 0;
+ u64 qwNextTBTT;
- CARDbGetCurrentTSF(priv, &qwNextTBTT); /* Get Local TSF counter */
+ qwNextTBTT = vt6655_get_current_tsf(priv); /* Get Local TSF counter */
qwNextTBTT = CARDqGetNextTBTT(qwNextTBTT, wBeaconInterval);
/* Set NextTBTT */
diff --git a/drivers/staging/vt6655/card.h b/drivers/staging/vt6655/card.h
index 09e7f3f1cbed..22dc359a6565 100644
--- a/drivers/staging/vt6655/card.h
+++ b/drivers/staging/vt6655/card.h
@@ -46,7 +46,7 @@ void CARDvSetFirstNextTBTT(struct vnt_private *priv,
unsigned short wBeaconInterval);
void CARDvUpdateNextTBTT(struct vnt_private *priv, u64 qwTSF,
unsigned short wBeaconInterval);
-bool CARDbGetCurrentTSF(struct vnt_private *priv, u64 *pqwCurrTSF);
+u64 vt6655_get_current_tsf(struct vnt_private *priv);
u64 CARDqGetNextTBTT(u64 qwTSF, unsigned short wBeaconInterval);
u64 CARDqGetTSFOffset(unsigned char byRxRate, u64 qwTSF1, u64 qwTSF2);
unsigned char CARDbyGetPktType(struct vnt_private *priv);
diff --git a/drivers/staging/vt6655/channel.c b/drivers/staging/vt6655/channel.c
index abe867814dc8..652dcaf61169 100644
--- a/drivers/staging/vt6655/channel.c
+++ b/drivers/staging/vt6655/channel.c
@@ -118,11 +118,9 @@ bool set_channel(struct vnt_private *priv, struct ieee80211_channel *ch)
/* set HW default power register */
MACvSelectPage1(priv->port_offset);
RFbSetPower(priv, RATE_1M, priv->byCurrentCh);
- VNSvOutPortB(priv->port_offset + MAC_REG_PWRCCK,
- priv->byCurPwr);
+ iowrite8(priv->byCurPwr, priv->port_offset + MAC_REG_PWRCCK);
RFbSetPower(priv, RATE_6M, priv->byCurrentCh);
- VNSvOutPortB(priv->port_offset + MAC_REG_PWROFDM,
- priv->byCurPwr);
+ iowrite8(priv->byCurPwr, priv->port_offset + MAC_REG_PWROFDM);
MACvSelectPage0(priv->port_offset);
spin_unlock_irqrestore(&priv->lock, flags);
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index 897d70cf32b8..afaf331fe125 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -219,7 +219,7 @@ static void device_init_registers(struct vnt_private *priv)
MACvInitialize(priv);
/* Get Local ID */
- VNSvInPortB(priv->port_offset + MAC_REG_LOCALID, &priv->local_id);
+ priv->local_id = ioread8(priv->port_offset + MAC_REG_LOCALID);
spin_lock_irqsave(&priv->lock, flags);
@@ -334,8 +334,7 @@ static void device_init_registers(struct vnt_private *priv)
if (priv->local_id > REV_ID_VT3253_B1) {
MACvSelectPage1(priv->port_offset);
- VNSvOutPortB(priv->port_offset + MAC_REG_MSRCTL + 1,
- (MSRCTL1_TXPWR | MSRCTL1_CSAPAREN));
+ iowrite8(MSRCTL1_TXPWR | MSRCTL1_CSAPAREN, priv->port_offset + MAC_REG_MSRCTL + 1);
MACvSelectPage0(priv->port_offset);
}
@@ -349,9 +348,9 @@ static void device_init_registers(struct vnt_private *priv)
MACvSetLongRetryLimit(priv, priv->byLongRetryLimit);
/* reset TSF counter */
- VNSvOutPortB(priv->port_offset + MAC_REG_TFTCTL, TFTCTL_TSFCNTRST);
+ iowrite8(TFTCTL_TSFCNTRST, priv->port_offset + MAC_REG_TFTCTL);
/* enable TSF counter */
- VNSvOutPortB(priv->port_offset + MAC_REG_TFTCTL, TFTCTL_TSFCNTREN);
+ iowrite8(TFTCTL_TSFCNTREN, priv->port_offset + MAC_REG_TFTCTL);
/* initialize BBP registers */
bb_vt3253_init(priv);
@@ -377,7 +376,7 @@ static void device_init_registers(struct vnt_private *priv)
if (priv->byRadioCtl & EEP_RADIOCTL_ENABLE) {
/* Get GPIO */
- MACvGPIOIn(priv->port_offset, &priv->byGPIO);
+ priv->byGPIO = ioread8(priv->port_offset + MAC_REG_GPIOCTL1);
if (((priv->byGPIO & GPIO0_DATA) &&
!(priv->byRadioCtl & EEP_RADIOCTL_INV)) ||
@@ -406,7 +405,7 @@ static void device_init_registers(struct vnt_private *priv)
MACvReceive1(priv->port_offset);
/* start the adapter */
- MACvStart(priv->port_offset);
+ iowrite8(HOSTCR_MACEN | HOSTCR_RXON | HOSTCR_TXON, priv->port_offset + MAC_REG_HOSTCR);
}
static void device_print_info(struct vnt_private *priv)
@@ -1029,7 +1028,7 @@ static void vnt_interrupt_process(struct vnt_private *priv)
u32 isr;
unsigned long flags;
- MACvReadISR(priv->port_offset, &isr);
+ isr = ioread32(priv->port_offset + MAC_REG_ISR);
if (isr == 0)
return;
@@ -1042,7 +1041,7 @@ static void vnt_interrupt_process(struct vnt_private *priv)
spin_lock_irqsave(&priv->lock, flags);
/* Read low level stats */
- MACvReadMIBCounter(priv->port_offset, &mib_counter);
+ mib_counter = ioread32(priv->port_offset + MAC_REG_MIBCNTR);
low_stats->dot11RTSSuccessCount += mib_counter & 0xff;
low_stats->dot11RTSFailureCount += (mib_counter >> 8) & 0xff;
@@ -1060,7 +1059,7 @@ static void vnt_interrupt_process(struct vnt_private *priv)
if (isr & ISR_FETALERR) {
pr_debug(" ISR_FETALERR\n");
- VNSvOutPortB(priv->port_offset + MAC_REG_SOFTPWRCTL, 0);
+ iowrite8(0, priv->port_offset + MAC_REG_SOFTPWRCTL);
VNSvOutPortW(priv->port_offset +
MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPECTI);
device_error(priv, isr);
@@ -1116,7 +1115,7 @@ static void vnt_interrupt_process(struct vnt_private *priv)
ieee80211_queue_stopped(priv->hw, 0))
ieee80211_wake_queues(priv->hw);
- MACvReadISR(priv->port_offset, &isr);
+ isr = ioread32(priv->port_offset + MAC_REG_ISR);
MACvReceive0(priv->port_offset);
MACvReceive1(priv->port_offset);
@@ -1407,7 +1406,7 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
spin_lock_irqsave(&priv->lock, flags);
- MACvWriteBSSIDAddress(priv->port_offset, (u8 *)conf->bssid);
+ MACvWriteBSSIDAddress(priv->port_offset, conf->bssid);
spin_unlock_irqrestore(&priv->lock, flags);
}
@@ -1477,10 +1476,8 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
CARDvSetFirstNextTBTT(priv, conf->beacon_int);
} else {
- VNSvOutPortB(priv->port_offset + MAC_REG_TFTCTL,
- TFTCTL_TSFCNTRST);
- VNSvOutPortB(priv->port_offset + MAC_REG_TFTCTL,
- TFTCTL_TSFCNTREN);
+ iowrite8(TFTCTL_TSFCNTRST, priv->port_offset + MAC_REG_TFTCTL);
+ iowrite8(TFTCTL_TSFCNTREN, priv->port_offset + MAC_REG_TFTCTL);
}
}
}
@@ -1513,7 +1510,7 @@ static void vnt_configure(struct ieee80211_hw *hw,
*total_flags &= FIF_ALLMULTI | FIF_OTHER_BSS | FIF_BCN_PRBRESP_PROMISC;
- VNSvInPortB(priv->port_offset + MAC_REG_RCR, &rx_mode);
+ rx_mode = ioread8(priv->port_offset + MAC_REG_RCR);
dev_dbg(&priv->pcid->dev, "rx mode in = %x\n", rx_mode);
@@ -1561,7 +1558,7 @@ static void vnt_configure(struct ieee80211_hw *hw,
rx_mode |= RCR_BSSID;
}
- VNSvOutPortB(priv->port_offset + MAC_REG_RCR, rx_mode);
+ iowrite8(rx_mode, priv->port_offset + MAC_REG_RCR);
dev_dbg(&priv->pcid->dev, "rx mode out= %x\n", rx_mode);
}
@@ -1603,7 +1600,7 @@ static u64 vnt_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
struct vnt_private *priv = hw->priv;
u64 tsf;
- CARDbGetCurrentTSF(priv, &tsf);
+ tsf = vt6655_get_current_tsf(priv);
return tsf;
}
@@ -1621,7 +1618,7 @@ static void vnt_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
struct vnt_private *priv = hw->priv;
/* reset TSF counter */
- VNSvOutPortB(priv->port_offset + MAC_REG_TFTCTL, TFTCTL_TSFCNTRST);
+ iowrite8(TFTCTL_TSFCNTRST, priv->port_offset + MAC_REG_TFTCTL);
}
static const struct ieee80211_ops vnt_mac_ops = {
diff --git a/drivers/staging/vt6655/key.c b/drivers/staging/vt6655/key.c
index f843966a3ea4..1469015eb5b4 100644
--- a/drivers/staging/vt6655/key.c
+++ b/drivers/staging/vt6655/key.c
@@ -11,7 +11,6 @@
*
*/
-#include "tmacro.h"
#include "key.h"
#include "mac.h"
diff --git a/drivers/staging/vt6655/mac.c b/drivers/staging/vt6655/mac.c
index 80cced7dfda8..88ddd0676463 100644
--- a/drivers/staging/vt6655/mac.c
+++ b/drivers/staging/vt6655/mac.c
@@ -36,7 +36,6 @@
*
*/
-#include "tmacro.h"
#include "mac.h"
/*
diff --git a/drivers/staging/vt6655/mac.h b/drivers/staging/vt6655/mac.h
index 550dc4da80a9..57ae3bdbdb2d 100644
--- a/drivers/staging/vt6655/mac.h
+++ b/drivers/staging/vt6655/mac.h
@@ -18,7 +18,6 @@
#ifndef __MAC_H__
#define __MAC_H__
-#include "tmacro.h"
#include "upc.h"
/*--------------------- Export Definitions -------------------------*/
@@ -261,18 +260,18 @@
#define TFTCTL_TSFCNTREN 0x01
/* Bits in the EnhanceCFG register */
-#define EnCFG_BarkerPream 0x00020000
-#define EnCFG_NXTBTTCFPSTR 0x00010000
-#define EnCFG_BcnSusClr 0x00000200
-#define EnCFG_BcnSusInd 0x00000100
-#define EnCFG_CFP_ProtectEn 0x00000040
-#define EnCFG_ProtectMd 0x00000020
-#define EnCFG_HwParCFP 0x00000010
-#define EnCFG_CFNULRSP 0x00000004
-#define EnCFG_BBType_MASK 0x00000003
-#define EnCFG_BBType_g 0x00000002
-#define EnCFG_BBType_b 0x00000001
-#define EnCFG_BBType_a 0x00000000
+#define ENCFG_BARKERPREAM 0x00020000
+#define ENCFG_NXTBTTCFPSTR 0x00010000
+#define ENCFG_BCNSUSCLR 0x00000200
+#define ENCFG_BCNSUSIND 0x00000100
+#define ENCFG_CFP_PROTECTEN 0x00000040
+#define ENCFG_PROTECTMD 0x00000020
+#define ENCFG_HWPARCFP 0x00000010
+#define ENCFG_CFNULRSP 0x00000004
+#define ENCFG_BBTYPE_MASK 0x00000003
+#define ENCFG_BBTYPE_G 0x00000002
+#define ENCFG_BBTYPE_B 0x00000001
+#define ENCFG_BBTYPE_A 0x00000000
/* Bits in the Page1Sel register */
#define PAGE1_SEL 0x01
@@ -497,7 +496,7 @@
#define MAC_LB_INTERNAL 0x01
#define MAC_LB_NONE 0x00
-#define Default_BI 0x200
+#define DEFAULT_BI 0x200
/* MiscFIFO Offset */
#define MISCFIFO_KEYETRY0 32
@@ -541,77 +540,31 @@
#define MACvRegBitsOn(iobase, byRegOfs, byBits) \
do { \
unsigned char byData; \
- VNSvInPortB(iobase + byRegOfs, &byData); \
- VNSvOutPortB(iobase + byRegOfs, byData | (byBits)); \
+ byData = ioread8(iobase + byRegOfs); \
+ iowrite8(byData | (byBits), iobase + byRegOfs); \
} while (0)
#define MACvWordRegBitsOn(iobase, byRegOfs, wBits) \
do { \
unsigned short wData; \
- VNSvInPortW(iobase + byRegOfs, &wData); \
+ wData = ioread16(iobase + byRegOfs); \
VNSvOutPortW(iobase + byRegOfs, wData | (wBits)); \
} while (0)
-#define MACvDWordRegBitsOn(iobase, byRegOfs, dwBits) \
-do { \
- unsigned long dwData; \
- VNSvInPortD(iobase + byRegOfs, &dwData); \
- VNSvOutPortD(iobase + byRegOfs, dwData | (dwBits)); \
-} while (0)
-
-#define MACvRegBitsOnEx(iobase, byRegOfs, byMask, byBits) \
-do { \
- unsigned char byData; \
- VNSvInPortB(iobase + byRegOfs, &byData); \
- byData &= byMask; \
- VNSvOutPortB(iobase + byRegOfs, byData | (byBits)); \
-} while (0)
-
#define MACvRegBitsOff(iobase, byRegOfs, byBits) \
do { \
unsigned char byData; \
- VNSvInPortB(iobase + byRegOfs, &byData); \
- VNSvOutPortB(iobase + byRegOfs, byData & ~(byBits)); \
+ byData = ioread8(iobase + byRegOfs); \
+ iowrite8(byData & ~(byBits), iobase + byRegOfs); \
} while (0)
#define MACvWordRegBitsOff(iobase, byRegOfs, wBits) \
do { \
unsigned short wData; \
- VNSvInPortW(iobase + byRegOfs, &wData); \
+ wData = ioread16(iobase + byRegOfs); \
VNSvOutPortW(iobase + byRegOfs, wData & ~(wBits)); \
} while (0)
-#define MACvDWordRegBitsOff(iobase, byRegOfs, dwBits) \
-do { \
- unsigned long dwData; \
- VNSvInPortD(iobase + byRegOfs, &dwData); \
- VNSvOutPortD(iobase + byRegOfs, dwData & ~(dwBits)); \
-} while (0)
-
-#define MACvGetCurrRx0DescAddr(iobase, pdwCurrDescAddr) \
- VNSvInPortD(iobase + MAC_REG_RXDMAPTR0, \
- (unsigned long *)pdwCurrDescAddr)
-
-#define MACvGetCurrRx1DescAddr(iobase, pdwCurrDescAddr) \
- VNSvInPortD(iobase + MAC_REG_RXDMAPTR1, \
- (unsigned long *)pdwCurrDescAddr)
-
-#define MACvGetCurrTx0DescAddr(iobase, pdwCurrDescAddr) \
- VNSvInPortD(iobase + MAC_REG_TXDMAPTR0, \
- (unsigned long *)pdwCurrDescAddr)
-
-#define MACvGetCurrAC0DescAddr(iobase, pdwCurrDescAddr) \
- VNSvInPortD(iobase + MAC_REG_AC0DMAPTR, \
- (unsigned long *)pdwCurrDescAddr)
-
-#define MACvGetCurrSyncDescAddr(iobase, pdwCurrDescAddr) \
- VNSvInPortD(iobase + MAC_REG_SYNCDMAPTR, \
- (unsigned long *)pdwCurrDescAddr)
-
-#define MACvGetCurrATIMDescAddr(iobase, pdwCurrDescAddr) \
- VNSvInPortD(iobase + MAC_REG_ATIMDMAPTR, \
- (unsigned long *)pdwCurrDescAddr)
-
/* set the chip with current BCN tx descriptor address */
#define MACvSetCurrBCNTxDescAddr(iobase, dwCurrDescAddr) \
VNSvOutPortD(iobase + MAC_REG_BCNDMAPTR, \
@@ -622,104 +575,40 @@ do { \
VNSvOutPortW(iobase + MAC_REG_BCNDMACTL + 2, \
wCurrBCNLength)
-#define MACvReadBSSIDAddress(iobase, pbyEtherAddr) \
-do { \
- VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 1); \
- VNSvInPortB(iobase + MAC_REG_BSSID0, \
- (unsigned char *)pbyEtherAddr); \
- VNSvInPortB(iobase + MAC_REG_BSSID0 + 1, \
- pbyEtherAddr + 1); \
- VNSvInPortB(iobase + MAC_REG_BSSID0 + 2, \
- pbyEtherAddr + 2); \
- VNSvInPortB(iobase + MAC_REG_BSSID0 + 3, \
- pbyEtherAddr + 3); \
- VNSvInPortB(iobase + MAC_REG_BSSID0 + 4, \
- pbyEtherAddr + 4); \
- VNSvInPortB(iobase + MAC_REG_BSSID0 + 5, \
- pbyEtherAddr + 5); \
- VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 0); \
-} while (0)
-
#define MACvWriteBSSIDAddress(iobase, pbyEtherAddr) \
do { \
- VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 1); \
- VNSvOutPortB(iobase + MAC_REG_BSSID0, \
- *(pbyEtherAddr)); \
- VNSvOutPortB(iobase + MAC_REG_BSSID0 + 1, \
- *(pbyEtherAddr + 1)); \
- VNSvOutPortB(iobase + MAC_REG_BSSID0 + 2, \
- *(pbyEtherAddr + 2)); \
- VNSvOutPortB(iobase + MAC_REG_BSSID0 + 3, \
- *(pbyEtherAddr + 3)); \
- VNSvOutPortB(iobase + MAC_REG_BSSID0 + 4, \
- *(pbyEtherAddr + 4)); \
- VNSvOutPortB(iobase + MAC_REG_BSSID0 + 5, \
- *(pbyEtherAddr + 5)); \
- VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 0); \
+ iowrite8(1, iobase + MAC_REG_PAGE1SEL); \
+ iowrite8(pbyEtherAddr[0], iobase + MAC_REG_BSSID0); \
+ iowrite8(pbyEtherAddr[1], iobase + MAC_REG_BSSID0 + 1); \
+ iowrite8(pbyEtherAddr[2], iobase + MAC_REG_BSSID0 + 2); \
+ iowrite8(pbyEtherAddr[3], iobase + MAC_REG_BSSID0 + 3); \
+ iowrite8(pbyEtherAddr[4], iobase + MAC_REG_BSSID0 + 4); \
+ iowrite8(pbyEtherAddr[5], iobase + MAC_REG_BSSID0 + 5); \
+ iowrite8(0, iobase + MAC_REG_PAGE1SEL); \
} while (0)
#define MACvReadEtherAddress(iobase, pbyEtherAddr) \
do { \
- VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 1); \
- VNSvInPortB(iobase + MAC_REG_PAR0, \
- (unsigned char *)pbyEtherAddr); \
- VNSvInPortB(iobase + MAC_REG_PAR0 + 1, \
- pbyEtherAddr + 1); \
- VNSvInPortB(iobase + MAC_REG_PAR0 + 2, \
- pbyEtherAddr + 2); \
- VNSvInPortB(iobase + MAC_REG_PAR0 + 3, \
- pbyEtherAddr + 3); \
- VNSvInPortB(iobase + MAC_REG_PAR0 + 4, \
- pbyEtherAddr + 4); \
- VNSvInPortB(iobase + MAC_REG_PAR0 + 5, \
- pbyEtherAddr + 5); \
- VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 0); \
+ iowrite8(1, iobase + MAC_REG_PAGE1SEL); \
+ pbyEtherAddr[0] = ioread8(iobase + MAC_REG_PAR0); \
+ pbyEtherAddr[1] = ioread8(iobase + MAC_REG_PAR0 + 1); \
+ pbyEtherAddr[2] = ioread8(iobase + MAC_REG_PAR0 + 2); \
+ pbyEtherAddr[3] = ioread8(iobase + MAC_REG_PAR0 + 3); \
+ pbyEtherAddr[4] = ioread8(iobase + MAC_REG_PAR0 + 4); \
+ pbyEtherAddr[5] = ioread8(iobase + MAC_REG_PAR0 + 5); \
+ iowrite8(0, iobase + MAC_REG_PAGE1SEL); \
} while (0)
-#define MACvWriteEtherAddress(iobase, pbyEtherAddr) \
-do { \
- VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 1); \
- VNSvOutPortB(iobase + MAC_REG_PAR0, \
- *pbyEtherAddr); \
- VNSvOutPortB(iobase + MAC_REG_PAR0 + 1, \
- *(pbyEtherAddr + 1)); \
- VNSvOutPortB(iobase + MAC_REG_PAR0 + 2, \
- *(pbyEtherAddr + 2)); \
- VNSvOutPortB(iobase + MAC_REG_PAR0 + 3, \
- *(pbyEtherAddr + 3)); \
- VNSvOutPortB(iobase + MAC_REG_PAR0 + 4, \
- *(pbyEtherAddr + 4)); \
- VNSvOutPortB(iobase + MAC_REG_PAR0 + 5, \
- *(pbyEtherAddr + 5)); \
- VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 0); \
-} while (0)
-
-#define MACvClearISR(iobase) \
- VNSvOutPortD(iobase + MAC_REG_ISR, IMR_MASK_VALUE)
-
-#define MACvStart(iobase) \
- VNSvOutPortB(iobase + MAC_REG_HOSTCR, \
- (HOSTCR_MACEN | HOSTCR_RXON | HOSTCR_TXON))
-
#define MACvRx0PerPktMode(iobase) \
VNSvOutPortD(iobase + MAC_REG_RXDMACTL0, RX_PERPKT)
-#define MACvRx0BufferFillMode(iobase) \
- VNSvOutPortD(iobase + MAC_REG_RXDMACTL0, RX_PERPKTCLR)
-
#define MACvRx1PerPktMode(iobase) \
VNSvOutPortD(iobase + MAC_REG_RXDMACTL1, RX_PERPKT)
-#define MACvRx1BufferFillMode(iobase) \
- VNSvOutPortD(iobase + MAC_REG_RXDMACTL1, RX_PERPKTCLR)
-
-#define MACvRxOn(iobase) \
- MACvRegBitsOn(iobase, MAC_REG_HOSTCR, HOSTCR_RXON)
-
#define MACvReceive0(iobase) \
do { \
unsigned long dwData; \
- VNSvInPortD(iobase + MAC_REG_RXDMACTL0, &dwData); \
+ dwData = ioread32(iobase + MAC_REG_RXDMACTL0); \
if (dwData & DMACTL_RUN) \
VNSvOutPortD(iobase + MAC_REG_RXDMACTL0, DMACTL_WAKE); \
else \
@@ -729,20 +618,17 @@ do { \
#define MACvReceive1(iobase) \
do { \
unsigned long dwData; \
- VNSvInPortD(iobase + MAC_REG_RXDMACTL1, &dwData); \
+ dwData = ioread32(iobase + MAC_REG_RXDMACTL1); \
if (dwData & DMACTL_RUN) \
VNSvOutPortD(iobase + MAC_REG_RXDMACTL1, DMACTL_WAKE); \
else \
VNSvOutPortD(iobase + MAC_REG_RXDMACTL1, DMACTL_RUN); \
} while (0)
-#define MACvTxOn(iobase) \
- MACvRegBitsOn(iobase, MAC_REG_HOSTCR, HOSTCR_TXON)
-
#define MACvTransmit0(iobase) \
do { \
unsigned long dwData; \
- VNSvInPortD(iobase + MAC_REG_TXDMACTL0, &dwData); \
+ dwData = ioread32(iobase + MAC_REG_TXDMACTL0); \
if (dwData & DMACTL_RUN) \
VNSvOutPortD(iobase + MAC_REG_TXDMACTL0, DMACTL_WAKE); \
else \
@@ -752,47 +638,21 @@ do { \
#define MACvTransmitAC0(iobase) \
do { \
unsigned long dwData; \
- VNSvInPortD(iobase + MAC_REG_AC0DMACTL, &dwData); \
+ dwData = ioread32(iobase + MAC_REG_AC0DMACTL); \
if (dwData & DMACTL_RUN) \
VNSvOutPortD(iobase + MAC_REG_AC0DMACTL, DMACTL_WAKE); \
else \
VNSvOutPortD(iobase + MAC_REG_AC0DMACTL, DMACTL_RUN); \
} while (0)
-#define MACvTransmitSYNC(iobase) \
-do { \
- unsigned long dwData; \
- VNSvInPortD(iobase + MAC_REG_SYNCDMACTL, &dwData); \
- if (dwData & DMACTL_RUN) \
- VNSvOutPortD(iobase + MAC_REG_SYNCDMACTL, DMACTL_WAKE); \
- else \
- VNSvOutPortD(iobase + MAC_REG_SYNCDMACTL, DMACTL_RUN); \
-} while (0)
-
-#define MACvTransmitATIM(iobase) \
-do { \
- unsigned long dwData; \
- VNSvInPortD(iobase + MAC_REG_ATIMDMACTL, &dwData); \
- if (dwData & DMACTL_RUN) \
- VNSvOutPortD(iobase + MAC_REG_ATIMDMACTL, DMACTL_WAKE); \
- else \
- VNSvOutPortD(iobase + MAC_REG_ATIMDMACTL, DMACTL_RUN); \
-} while (0)
-
-#define MACvTransmitBCN(iobase) \
- VNSvOutPortB(iobase + MAC_REG_BCNDMACTL, BEACON_READY)
-
#define MACvClearStckDS(iobase) \
do { \
unsigned char byOrgValue; \
- VNSvInPortB(iobase + MAC_REG_STICKHW, &byOrgValue); \
+ byOrgValue = ioread8(iobase + MAC_REG_STICKHW); \
byOrgValue = byOrgValue & 0xFC; \
- VNSvOutPortB(iobase + MAC_REG_STICKHW, byOrgValue); \
+ iowrite8(byOrgValue, iobase + MAC_REG_STICKHW); \
} while (0)
-#define MACvReadISR(iobase, pdwValue) \
- VNSvInPortD(iobase + MAC_REG_ISR, pdwValue)
-
#define MACvWriteISR(iobase, dwValue) \
VNSvOutPortD(iobase + MAC_REG_ISR, dwValue)
@@ -803,77 +663,58 @@ do { \
VNSvOutPortD(iobase + MAC_REG_IMR, 0)
#define MACvSelectPage0(iobase) \
- VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 0)
+ iowrite8(0, iobase + MAC_REG_PAGE1SEL)
#define MACvSelectPage1(iobase) \
- VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 1)
-
-#define MACvReadMIBCounter(iobase, pdwCounter) \
- VNSvInPortD(iobase + MAC_REG_MIBCNTR, pdwCounter)
-
-#define MACvPwrEvntDisable(iobase) \
- VNSvOutPortW(iobase + MAC_REG_WAKEUPEN0, 0x0000)
+ iowrite8(1, iobase + MAC_REG_PAGE1SEL)
#define MACvEnableProtectMD(iobase) \
do { \
unsigned long dwOrgValue; \
- VNSvInPortD(iobase + MAC_REG_ENCFG, &dwOrgValue); \
- dwOrgValue = dwOrgValue | EnCFG_ProtectMd; \
+ dwOrgValue = ioread32(iobase + MAC_REG_ENCFG); \
+ dwOrgValue = dwOrgValue | ENCFG_PROTECTMD; \
VNSvOutPortD(iobase + MAC_REG_ENCFG, dwOrgValue); \
} while (0)
#define MACvDisableProtectMD(iobase) \
do { \
unsigned long dwOrgValue; \
- VNSvInPortD(iobase + MAC_REG_ENCFG, &dwOrgValue); \
- dwOrgValue = dwOrgValue & ~EnCFG_ProtectMd; \
+ dwOrgValue = ioread32(iobase + MAC_REG_ENCFG); \
+ dwOrgValue = dwOrgValue & ~ENCFG_PROTECTMD; \
VNSvOutPortD(iobase + MAC_REG_ENCFG, dwOrgValue); \
} while (0)
#define MACvEnableBarkerPreambleMd(iobase) \
do { \
unsigned long dwOrgValue; \
- VNSvInPortD(iobase + MAC_REG_ENCFG, &dwOrgValue); \
- dwOrgValue = dwOrgValue | EnCFG_BarkerPream; \
+ dwOrgValue = ioread32(iobase + MAC_REG_ENCFG); \
+ dwOrgValue = dwOrgValue | ENCFG_BARKERPREAM; \
VNSvOutPortD(iobase + MAC_REG_ENCFG, dwOrgValue); \
} while (0)
#define MACvDisableBarkerPreambleMd(iobase) \
do { \
unsigned long dwOrgValue; \
- VNSvInPortD(iobase + MAC_REG_ENCFG, &dwOrgValue); \
- dwOrgValue = dwOrgValue & ~EnCFG_BarkerPream; \
+ dwOrgValue = ioread32(iobase + MAC_REG_ENCFG); \
+ dwOrgValue = dwOrgValue & ~ENCFG_BARKERPREAM; \
VNSvOutPortD(iobase + MAC_REG_ENCFG, dwOrgValue); \
} while (0)
#define MACvSetBBType(iobase, byTyp) \
do { \
unsigned long dwOrgValue; \
- VNSvInPortD(iobase + MAC_REG_ENCFG, &dwOrgValue); \
- dwOrgValue = dwOrgValue & ~EnCFG_BBType_MASK; \
+ dwOrgValue = ioread32(iobase + MAC_REG_ENCFG); \
+ dwOrgValue = dwOrgValue & ~ENCFG_BBTYPE_MASK; \
dwOrgValue = dwOrgValue | (unsigned long)byTyp; \
VNSvOutPortD(iobase + MAC_REG_ENCFG, dwOrgValue); \
} while (0)
-#define MACvReadATIMW(iobase, pwCounter) \
- VNSvInPortW(iobase + MAC_REG_AIDATIM, pwCounter)
-
-#define MACvWriteATIMW(iobase, wCounter) \
- VNSvOutPortW(iobase + MAC_REG_AIDATIM, wCounter)
-
-#define MACvWriteCRC16_128(iobase, byRegOfs, wCRC) \
-do { \
- VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 1); \
- VNSvOutPortW(iobase + byRegOfs, wCRC); \
- VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 0); \
-} while (0)
-
-#define MACvGPIOIn(iobase, pbyValue) \
- VNSvInPortB(iobase + MAC_REG_GPIOCTL1, pbyValue)
-
#define MACvSetRFLE_LatchBase(iobase) \
MACvWordRegBitsOn(iobase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_RFLEOPT)
+#define MAKEWORD(lb, hb) \
+ ((unsigned short)(((unsigned char)(lb)) | (((unsigned short)((unsigned char)(hb))) << 8)))
+
bool MACbIsRegBitsOff(struct vnt_private *priv, unsigned char byRegOfs,
unsigned char byTestBits);
diff --git a/drivers/staging/vt6655/rf.c b/drivers/staging/vt6655/rf.c
index 4498c9d400f2..ee5e2e0d9a8c 100644
--- a/drivers/staging/vt6655/rf.c
+++ b/drivers/staging/vt6655/rf.c
@@ -175,7 +175,7 @@ bool IFRFbWriteEmbedded(struct vnt_private *priv, unsigned long dwData)
/* W_MAX_TIMEOUT is the timeout period */
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
- VNSvInPortD(iobase + MAC_REG_IFREGCTL, &dwValue);
+ dwValue = ioread32(iobase + MAC_REG_IFREGCTL);
if (dwValue & IFREGCTL_DONE)
break;
}
@@ -207,7 +207,7 @@ static bool RFbAL2230Init(struct vnt_private *priv)
ret = true;
/* 3-wire control for normal mode */
- VNSvOutPortB(iobase + MAC_REG_SOFTPWRCTL, 0);
+ iowrite8(0, iobase + MAC_REG_SOFTPWRCTL);
MACvWordRegBitsOn(iobase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPECTI |
SOFTPWRCTL_TXPEINV));
@@ -238,7 +238,7 @@ static bool RFbAL2230Init(struct vnt_private *priv)
SOFTPWRCTL_TXPEINV));
/* 3-wire control for power saving mode */
- VNSvOutPortB(iobase + MAC_REG_PSPWRSIG, (PSSIG_WPE3 | PSSIG_WPE2)); /* 1100 0000 */
+ iowrite8(PSSIG_WPE3 | PSSIG_WPE2, iobase + MAC_REG_PSPWRSIG);
return ret;
}
@@ -254,10 +254,10 @@ static bool RFbAL2230SelectChannel(struct vnt_private *priv, unsigned char byCha
ret &= IFRFbWriteEmbedded(priv, al2230_channel_table1[byChannel - 1]);
/* Set Channel[7] = 0 to tell H/W channel is changing now. */
- VNSvOutPortB(iobase + MAC_REG_CHANNEL, (byChannel & 0x7F));
+ iowrite8(byChannel & 0x7F, iobase + MAC_REG_CHANNEL);
MACvTimer0MicroSDelay(priv, SWITCH_CHANNEL_DELAY_AL2230);
/* Set Channel[7] = 1 to tell H/W channel change is done. */
- VNSvOutPortB(iobase + MAC_REG_CHANNEL, (byChannel | 0x80));
+ iowrite8(byChannel | 0x80, iobase + MAC_REG_CHANNEL);
return ret;
}
diff --git a/drivers/staging/vt6655/rxtx.c b/drivers/staging/vt6655/rxtx.c
index 53506e242a96..71cbfa607d96 100644
--- a/drivers/staging/vt6655/rxtx.c
+++ b/drivers/staging/vt6655/rxtx.c
@@ -1426,7 +1426,7 @@ static int vnt_beacon_xmit(struct vnt_private *priv,
/* Set auto Transmit on */
MACvRegBitsOn(priv->port_offset, MAC_REG_TCR, TCR_AUTOBCNTX);
/* Poll Transmit the adapter */
- MACvTransmitBCN(priv->port_offset);
+ iowrite8(BEACON_READY, priv->port_offset + MAC_REG_BCNDMACTL);
return 0;
}
@@ -1450,9 +1450,9 @@ int vnt_beacon_make(struct vnt_private *priv, struct ieee80211_vif *vif)
int vnt_beacon_enable(struct vnt_private *priv, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *conf)
{
- VNSvOutPortB(priv->port_offset + MAC_REG_TFTCTL, TFTCTL_TSFCNTRST);
+ iowrite8(TFTCTL_TSFCNTRST, priv->port_offset + MAC_REG_TFTCTL);
- VNSvOutPortB(priv->port_offset + MAC_REG_TFTCTL, TFTCTL_TSFCNTREN);
+ iowrite8(TFTCTL_TSFCNTREN, priv->port_offset + MAC_REG_TFTCTL);
CARDvSetFirstNextTBTT(priv, conf->beacon_int);
diff --git a/drivers/staging/vt6655/srom.c b/drivers/staging/vt6655/srom.c
index 5cdbc24e8c45..722a2cc9a473 100644
--- a/drivers/staging/vt6655/srom.c
+++ b/drivers/staging/vt6655/srom.c
@@ -28,7 +28,6 @@
*/
#include "upc.h"
-#include "tmacro.h"
#include "mac.h"
#include "srom.h"
@@ -66,29 +65,29 @@ unsigned char SROMbyReadEmbedded(void __iomem *iobase,
unsigned char byOrg;
byData = 0xFF;
- VNSvInPortB(iobase + MAC_REG_I2MCFG, &byOrg);
+ byOrg = ioread8(iobase + MAC_REG_I2MCFG);
/* turn off hardware retry for getting NACK */
- VNSvOutPortB(iobase + MAC_REG_I2MCFG, (byOrg & (~I2MCFG_NORETRY)));
+ iowrite8(byOrg & (~I2MCFG_NORETRY), iobase + MAC_REG_I2MCFG);
for (wNoACK = 0; wNoACK < W_MAX_I2CRETRY; wNoACK++) {
- VNSvOutPortB(iobase + MAC_REG_I2MTGID, EEP_I2C_DEV_ID);
- VNSvOutPortB(iobase + MAC_REG_I2MTGAD, byContntOffset);
+ iowrite8(EEP_I2C_DEV_ID, iobase + MAC_REG_I2MTGID);
+ iowrite8(byContntOffset, iobase + MAC_REG_I2MTGAD);
/* issue read command */
- VNSvOutPortB(iobase + MAC_REG_I2MCSR, I2MCSR_EEMR);
+ iowrite8(I2MCSR_EEMR, iobase + MAC_REG_I2MCSR);
/* wait DONE be set */
for (wDelay = 0; wDelay < W_MAX_TIMEOUT; wDelay++) {
- VNSvInPortB(iobase + MAC_REG_I2MCSR, &byWait);
+ byWait = ioread8(iobase + MAC_REG_I2MCSR);
if (byWait & (I2MCSR_DONE | I2MCSR_NACK))
break;
- PCAvDelayByIO(CB_DELAY_LOOP_WAIT);
+ udelay(CB_DELAY_LOOP_WAIT);
}
if ((wDelay < W_MAX_TIMEOUT) &&
(!(byWait & I2MCSR_NACK))) {
break;
}
}
- VNSvInPortB(iobase + MAC_REG_I2MDIPT, &byData);
- VNSvOutPortB(iobase + MAC_REG_I2MCFG, byOrg);
+ byData = ioread8(iobase + MAC_REG_I2MDIPT);
+ iowrite8(byOrg, iobase + MAC_REG_I2MCFG);
return byData;
}
diff --git a/drivers/staging/vt6655/tmacro.h b/drivers/staging/vt6655/tmacro.h
deleted file mode 100644
index 1582c03124c9..000000000000
--- a/drivers/staging/vt6655/tmacro.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * Purpose: define basic common types and macros
- *
- * Author: Tevin Chen
- *
- * Date: May 21, 1996
- *
- */
-
-#ifndef __TMACRO_H__
-#define __TMACRO_H__
-
-/****** Common helper macros ***********************************************/
-
-#if !defined(LOBYTE)
-#define LOBYTE(w) ((unsigned char)(w))
-#endif
-#if !defined(HIBYTE)
-#define HIBYTE(w) ((unsigned char)(((unsigned short)(w) >> 8) & 0xFF))
-#endif
-
-#if !defined(LOWORD)
-#define LOWORD(d) ((unsigned short)(d))
-#endif
-#if !defined(HIWORD)
-#define HIWORD(d) ((unsigned short)((((unsigned long)(d)) >> 16) & 0xFFFF))
-#endif
-
-#define LODWORD(q) ((q).u.dwLowDword)
-#define HIDWORD(q) ((q).u.dwHighDword)
-
-#if !defined(MAKEWORD)
-#define MAKEWORD(lb, hb) ((unsigned short)(((unsigned char)(lb)) | (((unsigned short)((unsigned char)(hb))) << 8)))
-#endif
-#if !defined(MAKEDWORD)
-#define MAKEDWORD(lw, hw) ((unsigned long)(((unsigned short)(lw)) | (((unsigned long)((unsigned short)(hw))) << 16)))
-#endif
-
-#endif /* __TMACRO_H__ */
diff --git a/drivers/staging/vt6655/upc.h b/drivers/staging/vt6655/upc.h
index b374db5fca81..2a47f5782b71 100644
--- a/drivers/staging/vt6655/upc.h
+++ b/drivers/staging/vt6655/upc.h
@@ -20,37 +20,12 @@
/* For memory mapped IO */
-#define VNSvInPortB(dwIOAddress, pbyData) \
- (*(pbyData) = ioread8(dwIOAddress))
-
-#define VNSvInPortW(dwIOAddress, pwData) \
- (*(pwData) = ioread16(dwIOAddress))
-
-#define VNSvInPortD(dwIOAddress, pdwData) \
- (*(pdwData) = ioread32(dwIOAddress))
-
-#define VNSvOutPortB(dwIOAddress, byData) \
- iowrite8((u8)(byData), dwIOAddress)
-
#define VNSvOutPortW(dwIOAddress, wData) \
iowrite16((u16)(wData), dwIOAddress)
#define VNSvOutPortD(dwIOAddress, dwData) \
iowrite32((u32)(dwData), dwIOAddress)
-#define PCAvDelayByIO(uDelayUnit) \
-do { \
- unsigned char __maybe_unused byData; \
- unsigned long ii; \
- \
- if (uDelayUnit <= 50) { \
- udelay(uDelayUnit); \
- } else { \
- for (ii = 0; ii < (uDelayUnit); ii++) \
- byData = inb(0x61); \
- } \
-} while (0)
-
/*--------------------- Export Classes ----------------------------*/
/*--------------------- Export Variables --------------------------*/
diff --git a/drivers/staging/vt6656/channel.c b/drivers/staging/vt6656/channel.c
index aca003031995..413e2fc4a50d 100644
--- a/drivers/staging/vt6656/channel.c
+++ b/drivers/staging/vt6656/channel.c
@@ -55,7 +55,6 @@ static struct ieee80211_channel vnt_channels_2ghz[] = {
{ .center_freq = 2484, .hw_value = 14 }
};
-
static struct ieee80211_supported_band vnt_supported_2ghz_band = {
.channels = vnt_channels_2ghz,
.n_channels = ARRAY_SIZE(vnt_channels_2ghz),
diff --git a/drivers/staging/vt6656/rf.c b/drivers/staging/vt6656/rf.c
index acbbf8acdf1b..464602c74727 100644
--- a/drivers/staging/vt6656/rf.c
+++ b/drivers/staging/vt6656/rf.c
@@ -82,7 +82,6 @@ static u8 al2230_channel_table1[CB_MAX_CHANNEL_24G][3] = {
{0x06, 0x66, 0x61}
};
-
static u8 vt3226_init_table[CB_VT3226_INIT_SEQ][3] = {
{0x03, 0xff, 0x80},
{0x02, 0x82, 0xa1},
diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c
index 7951bd63816f..87379edce9a8 100644
--- a/drivers/staging/wlan-ng/cfg80211.c
+++ b/drivers/staging/wlan-ng/cfg80211.c
@@ -328,8 +328,7 @@ static int prism2_scan(struct wiphy *wiphy,
(i < request->n_channels) && i < ARRAY_SIZE(prism2_channels);
i++)
msg1.channellist.data.data[i] =
- ieee80211_frequency_to_channel(
- request->channels[i]->center_freq);
+ ieee80211_frequency_to_channel(request->channels[i]->center_freq);
msg1.channellist.data.len = request->n_channels;
msg1.maxchanneltime.data = 250;
@@ -476,14 +475,13 @@ static int prism2_connect(struct wiphy *wiphy, struct net_device *dev,
return -EINVAL;
result = prism2_domibset_uint32(wlandev,
- DIDMIB_DOT11SMT_PRIVACYTABLE_WEPDEFAULTKEYID,
+ DIDMIB_DOT11SMT_PRIVACYTABLE_WEPDEFAULTKEYID,
sme->key_idx);
if (result)
goto exit;
/* send key to driver */
- did = didmib_dot11smt_wepdefaultkeystable_key(
- sme->key_idx + 1);
+ did = didmib_dot11smt_wepdefaultkeystable_key(sme->key_idx + 1);
result = prism2_domibset_pstr32(wlandev,
did, sme->key_len,
(u8 *)sme->key);
@@ -589,7 +587,7 @@ static int prism2_set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
data = MBM_TO_DBM(mbm);
result = prism2_domibset_uint32(wlandev,
- DIDMIB_DOT11PHY_TXPOWERTABLE_CURRENTTXPOWERLEVEL,
+ DIDMIB_DOT11PHY_TXPOWERTABLE_CURRENTTXPOWERLEVEL,
data);
if (result) {
diff --git a/drivers/staging/wlan-ng/hfa384x.h b/drivers/staging/wlan-ng/hfa384x.h
index 98c154a8d8c1..0611e37df6ac 100644
--- a/drivers/staging/wlan-ng/hfa384x.h
+++ b/drivers/staging/wlan-ng/hfa384x.h
@@ -1227,8 +1227,8 @@ struct hfa384x {
struct timer_list throttle;
- struct tasklet_struct reaper_bh;
- struct tasklet_struct completion_bh;
+ struct work_struct reaper_bh;
+ struct work_struct completion_bh;
struct work_struct usb_work;
diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
index 938e11a1a0b6..33844526c797 100644
--- a/drivers/staging/wlan-ng/hfa384x_usb.c
+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
@@ -191,9 +191,9 @@ static void hfa384x_usbctlx_resptimerfn(struct timer_list *t);
static void hfa384x_usb_throttlefn(struct timer_list *t);
-static void hfa384x_usbctlx_completion_task(struct tasklet_struct *t);
+static void hfa384x_usbctlx_completion_task(struct work_struct *work);
-static void hfa384x_usbctlx_reaper_task(struct tasklet_struct *t);
+static void hfa384x_usbctlx_reaper_task(struct work_struct *work);
static int hfa384x_usbctlx_submit(struct hfa384x *hw,
struct hfa384x_usbctlx *ctlx);
@@ -539,8 +539,8 @@ void hfa384x_create(struct hfa384x *hw, struct usb_device *usb)
/* Initialize the authentication queue */
skb_queue_head_init(&hw->authq);
- tasklet_setup(&hw->reaper_bh, hfa384x_usbctlx_reaper_task);
- tasklet_setup(&hw->completion_bh, hfa384x_usbctlx_completion_task);
+ INIT_WORK(&hw->reaper_bh, hfa384x_usbctlx_reaper_task);
+ INIT_WORK(&hw->completion_bh, hfa384x_usbctlx_completion_task);
INIT_WORK(&hw->link_bh, prism2sta_processing_defer);
INIT_WORK(&hw->usb_work, hfa384x_usb_defer);
@@ -2585,20 +2585,20 @@ void hfa384x_tx_timeout(struct wlandevice *wlandev)
/*----------------------------------------------------------------
* hfa384x_usbctlx_reaper_task
*
- * Tasklet to delete dead CTLX objects
+ * Deferred work callback to delete dead CTLX objects
*
* Arguments:
- * data ptr to a struct hfa384x
+ * work contains ptr to a struct hfa384x
*
* Returns:
*
* Call context:
- * Interrupt
+ * Task
*----------------------------------------------------------------
*/
-static void hfa384x_usbctlx_reaper_task(struct tasklet_struct *t)
+static void hfa384x_usbctlx_reaper_task(struct work_struct *work)
{
- struct hfa384x *hw = from_tasklet(hw, t, reaper_bh);
+ struct hfa384x *hw = container_of(work, struct hfa384x, reaper_bh);
struct hfa384x_usbctlx *ctlx, *temp;
unsigned long flags;
@@ -2618,21 +2618,21 @@ static void hfa384x_usbctlx_reaper_task(struct tasklet_struct *t)
/*----------------------------------------------------------------
* hfa384x_usbctlx_completion_task
*
- * Tasklet to call completion handlers for returned CTLXs
+ * Deferred work callback to call completion handlers for returned CTLXs
*
* Arguments:
- * data ptr to struct hfa384x
+ * work contains ptr to a struct hfa384x
*
* Returns:
* Nothing
*
* Call context:
- * Interrupt
+ * Task
*----------------------------------------------------------------
*/
-static void hfa384x_usbctlx_completion_task(struct tasklet_struct *t)
+static void hfa384x_usbctlx_completion_task(struct work_struct *work)
{
- struct hfa384x *hw = from_tasklet(hw, t, completion_bh);
+ struct hfa384x *hw = container_of(work, struct hfa384x, reaper_bh);
struct hfa384x_usbctlx *ctlx, *temp;
unsigned long flags;
@@ -2686,7 +2686,7 @@ static void hfa384x_usbctlx_completion_task(struct tasklet_struct *t)
spin_unlock_irqrestore(&hw->ctlxq.lock, flags);
if (reap)
- tasklet_schedule(&hw->reaper_bh);
+ schedule_work(&hw->reaper_bh);
}
/*----------------------------------------------------------------
@@ -2743,7 +2743,7 @@ static int unlocked_usbctlx_cancel_async(struct hfa384x *hw,
* aren't active and the timers should have been stopped.
*
* The CTLX is migrated to the "completing" queue, and the completing
- * tasklet is scheduled.
+ * work is scheduled.
*
* Arguments:
* hw ptr to a struct hfa384x structure
@@ -2766,7 +2766,7 @@ static void unlocked_usbctlx_complete(struct hfa384x *hw,
* queue.
*/
list_move_tail(&ctlx->list, &hw->ctlxq.completing);
- tasklet_schedule(&hw->completion_bh);
+ schedule_work(&hw->completion_bh);
switch (ctlx->state) {
case CTLX_COMPLETE:
diff --git a/drivers/staging/wlan-ng/prism2usb.c b/drivers/staging/wlan-ng/prism2usb.c
index dc0749b8eff7..e13da7fadfff 100644
--- a/drivers/staging/wlan-ng/prism2usb.c
+++ b/drivers/staging/wlan-ng/prism2usb.c
@@ -165,8 +165,8 @@ static void prism2sta_disconnect_usb(struct usb_interface *interface)
spin_unlock_irqrestore(&hw->ctlxq.lock, flags);
/* There's no hardware to shutdown, but the driver
- * might have some tasks or tasklets that must be
- * stopped before we can tear everything down.
+ * might have some tasks that must be stopped before
+ * we can tear everything down.
*/
prism2sta_ifstate(wlandev, P80211ENUM_ifstate_disable);
@@ -181,8 +181,8 @@ static void prism2sta_disconnect_usb(struct usb_interface *interface)
usb_kill_urb(&hw->tx_urb);
usb_kill_urb(&hw->ctlx_urb);
- tasklet_kill(&hw->completion_bh);
- tasklet_kill(&hw->reaper_bh);
+ cancel_work_sync(&hw->completion_bh);
+ cancel_work_sync(&hw->reaper_bh);
cancel_work_sync(&hw->link_bh);
cancel_work_sync(&hw->commsqual_bh);
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index bb3fb18b2316..e6a967ddc08c 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -972,8 +972,7 @@ pscsi_execute_cmd(struct se_cmd *cmd)
cmd->priv = scmd->cmnd;
- blk_execute_rq_nowait(req, cmd->sam_task_attr == TCM_HEAD_TAG,
- pscsi_req_done);
+ blk_execute_rq_nowait(req, cmd->sam_task_attr == TCM_HEAD_TAG);
return 0;
diff --git a/drivers/tee/optee/call.c b/drivers/tee/optee/call.c
index 6554e06e053e..28f87cd8b3ed 100644
--- a/drivers/tee/optee/call.c
+++ b/drivers/tee/optee/call.c
@@ -512,7 +512,7 @@ int optee_check_mem_type(unsigned long start, size_t num_pages)
* Allow kernel address to register with OP-TEE as kernel
* pages are configured as normal memory only.
*/
- if (virt_addr_valid(start) || is_vmalloc_addr((void *)start))
+ if (virt_addr_valid((void *)start) || is_vmalloc_addr((void *)start))
return 0;
mmap_read_lock(mm);
diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
index 770d2b0299c3..80d4e0676083 100644
--- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
+++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
@@ -663,6 +663,7 @@ static const struct acpi_device_id int3400_thermal_match[] = {
{"INT3400", 0},
{"INTC1040", 0},
{"INTC1041", 0},
+ {"INTC1042", 0},
{"INTC10A0", 0},
{}
};
diff --git a/drivers/thermal/intel/int340x_thermal/int3403_thermal.c b/drivers/thermal/intel/int340x_thermal/int3403_thermal.c
index 07e25321dfe3..71d084c4c456 100644
--- a/drivers/thermal/intel/int340x_thermal/int3403_thermal.c
+++ b/drivers/thermal/intel/int340x_thermal/int3403_thermal.c
@@ -285,6 +285,7 @@ static const struct acpi_device_id int3403_device_ids[] = {
{"INT3403", 0},
{"INTC1043", 0},
{"INTC1046", 0},
+ {"INTC1062", 0},
{"INTC10A1", 0},
{"", 0},
};
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.h b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.h
index 49932a68abac..7d52fcff4937 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.h
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.h
@@ -24,6 +24,7 @@
#define PCI_DEVICE_ID_INTEL_HSB_THERMAL 0x0A03
#define PCI_DEVICE_ID_INTEL_ICL_THERMAL 0x8a03
#define PCI_DEVICE_ID_INTEL_JSL_THERMAL 0x4E03
+#define PCI_DEVICE_ID_INTEL_MTLP_THERMAL 0x7D03
#define PCI_DEVICE_ID_INTEL_RPL_THERMAL 0xA71D
#define PCI_DEVICE_ID_INTEL_SKL_THERMAL 0x1903
#define PCI_DEVICE_ID_INTEL_TGL_THERMAL 0x9A03
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
index ca40b0967cdd..c2dc4c158b9d 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
@@ -358,6 +358,7 @@ static SIMPLE_DEV_PM_OPS(proc_thermal_pci_pm, proc_thermal_pci_suspend,
static const struct pci_device_id proc_thermal_pci_ids[] = {
{ PCI_DEVICE_DATA(INTEL, ADL_THERMAL, PROC_THERMAL_FEATURE_RAPL | PROC_THERMAL_FEATURE_FIVR | PROC_THERMAL_FEATURE_DVFS | PROC_THERMAL_FEATURE_MBOX) },
+ { PCI_DEVICE_DATA(INTEL, MTLP_THERMAL, PROC_THERMAL_FEATURE_RAPL | PROC_THERMAL_FEATURE_FIVR | PROC_THERMAL_FEATURE_DVFS | PROC_THERMAL_FEATURE_MBOX) },
{ PCI_DEVICE_DATA(INTEL, RPL_THERMAL, PROC_THERMAL_FEATURE_RAPL | PROC_THERMAL_FEATURE_FIVR | PROC_THERMAL_FEATURE_DVFS | PROC_THERMAL_FEATURE_MBOX) },
{ },
};
diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c
index 4986edfbdf67..e92c658dba1c 100644
--- a/drivers/thunderbolt/ctl.c
+++ b/drivers/thunderbolt/ctl.c
@@ -158,21 +158,20 @@ static bool tb_cfg_request_is_active(struct tb_cfg_request *req)
static struct tb_cfg_request *
tb_cfg_request_find(struct tb_ctl *ctl, struct ctl_pkg *pkg)
{
- struct tb_cfg_request *req;
- bool found = false;
+ struct tb_cfg_request *req = NULL, *iter;
mutex_lock(&pkg->ctl->request_queue_lock);
- list_for_each_entry(req, &pkg->ctl->request_queue, list) {
- tb_cfg_request_get(req);
- if (req->match(req, pkg)) {
- found = true;
+ list_for_each_entry(iter, &pkg->ctl->request_queue, list) {
+ tb_cfg_request_get(iter);
+ if (iter->match(iter, pkg)) {
+ req = iter;
break;
}
- tb_cfg_request_put(req);
+ tb_cfg_request_put(iter);
}
mutex_unlock(&pkg->ctl->request_queue_lock);
- return found ? req : NULL;
+ return req;
}
/* utility functions */
diff --git a/drivers/thunderbolt/domain.c b/drivers/thunderbolt/domain.c
index 7018d959f775..2889a214dadc 100644
--- a/drivers/thunderbolt/domain.c
+++ b/drivers/thunderbolt/domain.c
@@ -7,9 +7,7 @@
*/
#include <linux/device.h>
-#include <linux/dmar.h>
#include <linux/idr.h>
-#include <linux/iommu.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
@@ -257,13 +255,9 @@ static ssize_t iommu_dma_protection_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- /*
- * Kernel DMA protection is a feature where Thunderbolt security is
- * handled natively using IOMMU. It is enabled when IOMMU is
- * enabled and ACPI DMAR table has DMAR_PLATFORM_OPT_IN set.
- */
- return sprintf(buf, "%d\n",
- iommu_present(&pci_bus_type) && dmar_platform_optin());
+ struct tb *tb = container_of(dev, struct tb, dev);
+
+ return sysfs_emit(buf, "%d\n", tb->nhi->iommu_dma_protection);
}
static DEVICE_ATTR_RO(iommu_dma_protection);
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index 4a582183f675..1333b158a95e 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -15,9 +15,11 @@
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
+#include <linux/iommu.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/property.h>
+#include <linux/string_helpers.h>
#include "nhi.h"
#include "nhi_regs.h"
@@ -1103,6 +1105,47 @@ static void nhi_check_quirks(struct tb_nhi *nhi)
nhi->quirks |= QUIRK_AUTO_CLEAR_INT;
}
+static int nhi_check_iommu_pdev(struct pci_dev *pdev, void *data)
+{
+ if (!pdev->external_facing ||
+ !device_iommu_capable(&pdev->dev, IOMMU_CAP_PRE_BOOT_PROTECTION))
+ return 0;
+ *(bool *)data = true;
+ return 1; /* Stop walking */
+}
+
+static void nhi_check_iommu(struct tb_nhi *nhi)
+{
+ struct pci_bus *bus = nhi->pdev->bus;
+ bool port_ok = false;
+
+ /*
+ * Ideally what we'd do here is grab every PCI device that
+ * represents a tunnelling adapter for this NHI and check their
+ * status directly, but unfortunately USB4 seems to make it
+ * obnoxiously difficult to reliably make any correlation.
+ *
+ * So for now we'll have to bodge it... Hoping that the system
+ * is at least sane enough that an adapter is in the same PCI
+ * segment as its NHI, if we can find *something* on that segment
+ * which meets the requirements for Kernel DMA Protection, we'll
+ * take that to imply that firmware is aware and has (hopefully)
+ * done the right thing in general. We need to know that the PCI
+ * layer has seen the ExternalFacingPort property which will then
+ * inform the IOMMU layer to enforce the complete "untrusted DMA"
+ * flow, but also that the IOMMU driver itself can be trusted not
+ * to have been subverted by a pre-boot DMA attack.
+ */
+ while (bus->parent)
+ bus = bus->parent;
+
+ pci_walk_bus(bus, nhi_check_iommu_pdev, &port_ok);
+
+ nhi->iommu_dma_protection = port_ok;
+ dev_dbg(&nhi->pdev->dev, "IOMMU DMA protection is %s\n",
+ str_enabled_disabled(port_ok));
+}
+
static int nhi_init_msi(struct tb_nhi *nhi)
{
struct pci_dev *pdev = nhi->pdev;
@@ -1207,7 +1250,7 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
nhi->pdev = pdev;
nhi->ops = (const struct tb_nhi_ops *)id->driver_data;
- /* cannot fail - table is allocated bin pcim_iomap_regions */
+ /* cannot fail - table is allocated in pcim_iomap_regions */
nhi->iobase = pcim_iomap_table(pdev)[0];
nhi->hop_count = ioread32(nhi->iobase + REG_HOP_COUNT) & 0x3ff;
dev_dbg(&pdev->dev, "total paths: %d\n", nhi->hop_count);
@@ -1220,6 +1263,7 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return -ENOMEM;
nhi_check_quirks(nhi);
+ nhi_check_iommu(nhi);
res = nhi_init_msi(nhi);
if (res) {
diff --git a/drivers/thunderbolt/path.c b/drivers/thunderbolt/path.c
index 299712accfe9..ee03fd75a472 100644
--- a/drivers/thunderbolt/path.c
+++ b/drivers/thunderbolt/path.c
@@ -166,6 +166,9 @@ struct tb_path *tb_path_discover(struct tb_port *src, int src_hopid,
return NULL;
}
+ tb_dbg(path->tb, "discovering %s path starting from %llx:%u\n",
+ path->name, tb_route(src->sw), src->port);
+
p = src;
h = src_hopid;
@@ -198,10 +201,13 @@ struct tb_path *tb_path_discover(struct tb_port *src, int src_hopid,
path->hops[i].out_port = out_port;
path->hops[i].next_hop_index = next_hop;
+ tb_dump_hop(&path->hops[i], &hop);
+
h = next_hop;
p = out_port->remote;
}
+ tb_dbg(path->tb, "path discovery complete\n");
return path;
err:
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index ac87e8b50e52..561e1d77240e 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -693,8 +693,14 @@ static int __tb_port_enable(struct tb_port *port, bool enable)
else
phy |= LANE_ADP_CS_1_LD;
- return tb_port_write(port, &phy, TB_CFG_PORT,
- port->cap_phy + LANE_ADP_CS_1, 1);
+
+ ret = tb_port_write(port, &phy, TB_CFG_PORT,
+ port->cap_phy + LANE_ADP_CS_1, 1);
+ if (ret)
+ return ret;
+
+ tb_port_dbg(port, "lane %sabled\n", enable ? "en" : "dis");
+ return 0;
}
/**
@@ -993,7 +999,17 @@ static bool tb_port_is_width_supported(struct tb_port *port, int width)
return !!(widths & width);
}
-static int tb_port_set_link_width(struct tb_port *port, unsigned int width)
+/**
+ * tb_port_set_link_width() - Set target link width of the lane adapter
+ * @port: Lane adapter
+ * @width: Target link width (%1 or %2)
+ *
+ * Sets the target link width of the lane adapter to @width. Does not
+ * enable/disable lane bonding. For that call tb_port_set_lane_bonding().
+ *
+ * Return: %0 in case of success and negative errno in case of error
+ */
+int tb_port_set_link_width(struct tb_port *port, unsigned int width)
{
u32 val;
int ret;
@@ -1020,13 +1036,59 @@ static int tb_port_set_link_width(struct tb_port *port, unsigned int width)
return -EINVAL;
}
- val |= LANE_ADP_CS_1_LB;
-
return tb_port_write(port, &val, TB_CFG_PORT,
port->cap_phy + LANE_ADP_CS_1, 1);
}
/**
+ * tb_port_set_lane_bonding() - Enable/disable lane bonding
+ * @port: Lane adapter
+ * @bonding: enable/disable bonding
+ *
+ * Enables or disables lane bonding. This should be called after target
+ * link width has been set (tb_port_set_link_width()). Note in most
+ * cases one should use tb_port_lane_bonding_enable() instead to enable
+ * lane bonding.
+ *
+ * As a side effect sets @port->bonding accordingly (and does the same
+ * for lane 1 too).
+ *
+ * Return: %0 in case of success and negative errno in case of error
+ */
+int tb_port_set_lane_bonding(struct tb_port *port, bool bonding)
+{
+ u32 val;
+ int ret;
+
+ if (!port->cap_phy)
+ return -EINVAL;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_phy + LANE_ADP_CS_1, 1);
+ if (ret)
+ return ret;
+
+ if (bonding)
+ val |= LANE_ADP_CS_1_LB;
+ else
+ val &= ~LANE_ADP_CS_1_LB;
+
+ ret = tb_port_write(port, &val, TB_CFG_PORT,
+ port->cap_phy + LANE_ADP_CS_1, 1);
+ if (ret)
+ return ret;
+
+ /*
+ * When lane 0 bonding is set it will affect lane 1 too so
+ * update both.
+ */
+ port->bonded = bonding;
+ port->dual_link_port->bonded = bonding;
+
+ return 0;
+}
+
+/**
* tb_port_lane_bonding_enable() - Enable bonding on port
* @port: port to enable
*
@@ -1050,22 +1112,27 @@ int tb_port_lane_bonding_enable(struct tb_port *port)
if (ret == 1) {
ret = tb_port_set_link_width(port, 2);
if (ret)
- return ret;
+ goto err_lane0;
}
ret = tb_port_get_link_width(port->dual_link_port);
if (ret == 1) {
ret = tb_port_set_link_width(port->dual_link_port, 2);
- if (ret) {
- tb_port_set_link_width(port, 1);
- return ret;
- }
+ if (ret)
+ goto err_lane0;
}
- port->bonded = true;
- port->dual_link_port->bonded = true;
+ ret = tb_port_set_lane_bonding(port, true);
+ if (ret)
+ goto err_lane1;
return 0;
+
+err_lane1:
+ tb_port_set_link_width(port->dual_link_port, 1);
+err_lane0:
+ tb_port_set_link_width(port, 1);
+ return ret;
}
/**
@@ -1074,13 +1141,10 @@ int tb_port_lane_bonding_enable(struct tb_port *port)
*
* Disable bonding by setting the link width of the port and the
* other port in case of dual link port.
- *
*/
void tb_port_lane_bonding_disable(struct tb_port *port)
{
- port->dual_link_port->bonded = false;
- port->bonded = false;
-
+ tb_port_set_lane_bonding(port, false);
tb_port_set_link_width(port->dual_link_port, 1);
tb_port_set_link_width(port, 1);
}
@@ -1104,10 +1168,17 @@ int tb_port_wait_for_link_width(struct tb_port *port, int width,
do {
ret = tb_port_get_link_width(port);
- if (ret < 0)
- return ret;
- else if (ret == width)
+ if (ret < 0) {
+ /*
+ * Sometimes we get port locked error when
+ * polling the lanes so we can ignore it and
+ * retry.
+ */
+ if (ret != -EACCES)
+ return ret;
+ } else if (ret == width) {
return 0;
+ }
usleep_range(1000, 2000);
} while (ktime_before(ktime_get(), timeout));
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index 9beb47b31c75..9a3214fb5038 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -169,12 +169,6 @@ static void tb_discover_tunnels(struct tb *tb)
static int tb_port_configure_xdomain(struct tb_port *port)
{
- /*
- * XDomain paths currently only support single lane so we must
- * disable the other lane according to USB4 spec.
- */
- tb_port_disable(port->dual_link_port);
-
if (tb_switch_is_usb4(port->sw))
return usb4_port_configure_xdomain(port);
return tb_lc_configure_xdomain(port);
@@ -867,7 +861,7 @@ static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
static void tb_tunnel_dp(struct tb *tb)
{
- int available_up, available_down, ret;
+ int available_up, available_down, ret, link_nr;
struct tb_cm *tcm = tb_priv(tb);
struct tb_port *port, *in, *out;
struct tb_tunnel *tunnel;
@@ -913,6 +907,20 @@ static void tb_tunnel_dp(struct tb *tb)
}
/*
+ * This is only applicable to links that are not bonded (so
+ * when Thunderbolt 1 hardware is involved somewhere in the
+ * topology). For these try to share the DP bandwidth between
+ * the two lanes.
+ */
+ link_nr = 1;
+ list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
+ if (tb_tunnel_is_dp(tunnel)) {
+ link_nr = 0;
+ break;
+ }
+ }
+
+ /*
* DP stream needs the domain to be active so runtime resume
* both ends of the tunnel.
*
@@ -943,7 +951,8 @@ static void tb_tunnel_dp(struct tb *tb)
tb_dbg(tb, "available bandwidth for new DP tunnel %u/%u Mb/s\n",
available_up, available_down);
- tunnel = tb_tunnel_alloc_dp(tb, in, out, available_up, available_down);
+ tunnel = tb_tunnel_alloc_dp(tb, in, out, link_nr, available_up,
+ available_down);
if (!tunnel) {
tb_port_dbg(out, "could not allocate DP tunnel\n");
goto err_reclaim;
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index b6fcd8d45324..4602c69913fa 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -674,7 +674,7 @@ static inline int tb_port_write(struct tb_port *port, const void *buffer,
#define __TB_PORT_PRINT(level, _port, fmt, arg...) \
do { \
const struct tb_port *__port = (_port); \
- level(__port->sw->tb, "%llx:%x: " fmt, \
+ level(__port->sw->tb, "%llx:%u: " fmt, \
tb_route(__port->sw), __port->port, ## arg); \
} while (0)
#define tb_port_WARN(port, fmt, arg...) \
@@ -991,6 +991,7 @@ int tb_switch_pcie_l1_enable(struct tb_switch *sw);
int tb_switch_xhci_connect(struct tb_switch *sw);
void tb_switch_xhci_disconnect(struct tb_switch *sw);
+int tb_port_state(struct tb_port *port);
int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged);
int tb_port_add_nfc_credits(struct tb_port *port, int credits);
int tb_port_clear_counter(struct tb_port *port, int counter);
@@ -1023,7 +1024,8 @@ static inline bool tb_port_use_credit_allocation(const struct tb_port *port)
int tb_port_get_link_speed(struct tb_port *port);
int tb_port_get_link_width(struct tb_port *port);
-int tb_port_state(struct tb_port *port);
+int tb_port_set_link_width(struct tb_port *port, unsigned int width);
+int tb_port_set_lane_bonding(struct tb_port *port, bool bonding);
int tb_port_lane_bonding_enable(struct tb_port *port);
void tb_port_lane_bonding_disable(struct tb_port *port);
int tb_port_wait_for_link_width(struct tb_port *port, int width,
diff --git a/drivers/thunderbolt/tb_msgs.h b/drivers/thunderbolt/tb_msgs.h
index fe1afa44c56d..33c4c7aed56d 100644
--- a/drivers/thunderbolt/tb_msgs.h
+++ b/drivers/thunderbolt/tb_msgs.h
@@ -527,6 +527,10 @@ enum tb_xdp_type {
PROPERTIES_CHANGED_RESPONSE,
ERROR_RESPONSE,
UUID_REQUEST = 12,
+ LINK_STATE_STATUS_REQUEST = 15,
+ LINK_STATE_STATUS_RESPONSE,
+ LINK_STATE_CHANGE_REQUEST,
+ LINK_STATE_CHANGE_RESPONSE,
};
struct tb_xdp_header {
@@ -540,6 +544,41 @@ struct tb_xdp_error_response {
u32 error;
};
+struct tb_xdp_link_state_status {
+ struct tb_xdp_header hdr;
+};
+
+struct tb_xdp_link_state_status_response {
+ union {
+ struct tb_xdp_error_response err;
+ struct {
+ struct tb_xdp_header hdr;
+ u32 status;
+ u8 slw;
+ u8 tlw;
+ u8 sls;
+ u8 tls;
+ };
+ };
+};
+
+struct tb_xdp_link_state_change {
+ struct tb_xdp_header hdr;
+ u8 tlw;
+ u8 tls;
+ u16 reserved;
+};
+
+struct tb_xdp_link_state_change_response {
+ union {
+ struct tb_xdp_error_response err;
+ struct {
+ struct tb_xdp_header hdr;
+ u32 status;
+ };
+ };
+};
+
struct tb_xdp_uuid {
struct tb_xdp_header hdr;
};
diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h
index b301eeb0c89b..6a16f61a72a1 100644
--- a/drivers/thunderbolt/tb_regs.h
+++ b/drivers/thunderbolt/tb_regs.h
@@ -311,11 +311,16 @@ struct tb_regs_port_header {
/* Lane adapter registers */
#define LANE_ADP_CS_0 0x00
+#define LANE_ADP_CS_0_SUPPORTED_SPEED_MASK GENMASK(19, 16)
+#define LANE_ADP_CS_0_SUPPORTED_SPEED_SHIFT 16
#define LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK GENMASK(25, 20)
#define LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT 20
+#define LANE_ADP_CS_0_SUPPORTED_WIDTH_DUAL 0x2
#define LANE_ADP_CS_0_CL0S_SUPPORT BIT(26)
#define LANE_ADP_CS_0_CL1_SUPPORT BIT(27)
#define LANE_ADP_CS_1 0x01
+#define LANE_ADP_CS_1_TARGET_SPEED_MASK GENMASK(3, 0)
+#define LANE_ADP_CS_1_TARGET_SPEED_GEN3 0xc
#define LANE_ADP_CS_1_TARGET_WIDTH_MASK GENMASK(9, 4)
#define LANE_ADP_CS_1_TARGET_WIDTH_SHIFT 4
#define LANE_ADP_CS_1_TARGET_WIDTH_SINGLE 0x1
diff --git a/drivers/thunderbolt/test.c b/drivers/thunderbolt/test.c
index be9b1d7e63d2..ee37f8b58f50 100644
--- a/drivers/thunderbolt/test.c
+++ b/drivers/thunderbolt/test.c
@@ -341,6 +341,47 @@ static struct tb_switch *alloc_dev_with_dpin(struct kunit *test,
return sw;
}
+static struct tb_switch *alloc_dev_without_dp(struct kunit *test,
+ struct tb_switch *parent,
+ u64 route, bool bonded)
+{
+ struct tb_switch *sw;
+ int i;
+
+ sw = alloc_dev_default(test, parent, route, bonded);
+ if (!sw)
+ return NULL;
+ /*
+ * Device with:
+ * 2x USB4 Adapters (adapters 1,2 and 3,4),
+ * 1x PCIe Upstream (adapter 9),
+ * 1x PCIe Downstream (adapter 10),
+ * 1x USB3 Upstream (adapter 16),
+ * 1x USB3 Downstream (adapter 17)
+ */
+ for (i = 5; i <= 8; i++)
+ sw->ports[i].disabled = true;
+
+ for (i = 11; i <= 14; i++)
+ sw->ports[i].disabled = true;
+
+ sw->ports[13].cap_adap = 0;
+ sw->ports[14].cap_adap = 0;
+
+ for (i = 18; i <= 19; i++)
+ sw->ports[i].disabled = true;
+
+ sw->generation = 4;
+ sw->credit_allocation = true;
+ sw->max_usb3_credits = 109;
+ sw->min_dp_aux_credits = 0;
+ sw->min_dp_main_credits = 0;
+ sw->max_pcie_credits = 30;
+ sw->max_dma_credits = 1;
+
+ return sw;
+}
+
static struct tb_switch *alloc_dev_usb4(struct kunit *test,
struct tb_switch *parent,
u64 route, bool bonded)
@@ -1348,7 +1389,7 @@ static void tb_test_tunnel_dp(struct kunit *test)
in = &host->ports[5];
out = &dev->ports[13];
- tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
+ tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
KUNIT_ASSERT_NOT_NULL(test, tunnel);
KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
@@ -1394,7 +1435,7 @@ static void tb_test_tunnel_dp_chain(struct kunit *test)
in = &host->ports[5];
out = &dev4->ports[14];
- tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
+ tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
KUNIT_ASSERT_NOT_NULL(test, tunnel);
KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
@@ -1444,7 +1485,7 @@ static void tb_test_tunnel_dp_tree(struct kunit *test)
in = &dev2->ports[13];
out = &dev5->ports[13];
- tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
+ tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
KUNIT_ASSERT_NOT_NULL(test, tunnel);
KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
@@ -1509,7 +1550,7 @@ static void tb_test_tunnel_dp_max_length(struct kunit *test)
in = &dev6->ports[13];
out = &dev12->ports[13];
- tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
+ tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
KUNIT_ASSERT_NOT_NULL(test, tunnel);
KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
@@ -1627,7 +1668,7 @@ static void tb_test_tunnel_port_on_path(struct kunit *test)
in = &dev2->ports[13];
out = &dev5->ports[13];
- dp_tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
+ dp_tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
KUNIT_ASSERT_NOT_NULL(test, dp_tunnel);
KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, in));
@@ -1996,6 +2037,56 @@ static void tb_test_credit_alloc_pcie(struct kunit *test)
tb_tunnel_free(tunnel);
}
+static void tb_test_credit_alloc_without_dp(struct kunit *test)
+{
+ struct tb_switch *host, *dev;
+ struct tb_port *up, *down;
+ struct tb_tunnel *tunnel;
+ struct tb_path *path;
+
+ host = alloc_host_usb4(test);
+ dev = alloc_dev_without_dp(test, host, 0x1, true);
+
+ /*
+ * The device has no DP therefore baMinDPmain = baMinDPaux = 0
+ *
+ * Create PCIe path with buffers less than baMaxPCIe.
+ *
+ * For a device with buffers configurations:
+ * baMaxUSB3 = 109
+ * baMinDPaux = 0
+ * baMinDPmain = 0
+ * baMaxPCIe = 30
+ * baMaxHI = 1
+ * Remaining Buffers = Total - (CP + DP) = 120 - (2 + 0) = 118
+ * PCIe Credits = Max(6, Min(baMaxPCIe, Remaining Buffers - baMaxUSB3)
+ * = Max(6, Min(30, 9) = 9
+ */
+ down = &host->ports[8];
+ up = &dev->ports[9];
+ tunnel = tb_tunnel_alloc_pci(NULL, up, down);
+ KUNIT_ASSERT_TRUE(test, tunnel != NULL);
+ KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
+
+ /* PCIe downstream path */
+ path = tunnel->paths[0];
+ KUNIT_ASSERT_EQ(test, path->path_length, 2);
+ KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
+ KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 9U);
+
+ /* PCIe upstream path */
+ path = tunnel->paths[1];
+ KUNIT_ASSERT_EQ(test, path->path_length, 2);
+ KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
+ KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 64U);
+
+ tb_tunnel_free(tunnel);
+}
+
static void tb_test_credit_alloc_dp(struct kunit *test)
{
struct tb_switch *host, *dev;
@@ -2009,7 +2100,7 @@ static void tb_test_credit_alloc_dp(struct kunit *test)
in = &host->ports[5];
out = &dev->ports[14];
- tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
+ tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
KUNIT_ASSERT_NOT_NULL(test, tunnel);
KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)3);
@@ -2245,7 +2336,7 @@ static struct tb_tunnel *TB_TEST_DP_TUNNEL1(struct kunit *test,
in = &host->ports[5];
out = &dev->ports[13];
- dp_tunnel1 = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
+ dp_tunnel1 = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
KUNIT_ASSERT_NOT_NULL(test, dp_tunnel1);
KUNIT_ASSERT_EQ(test, dp_tunnel1->npaths, (size_t)3);
@@ -2282,7 +2373,7 @@ static struct tb_tunnel *TB_TEST_DP_TUNNEL2(struct kunit *test,
in = &host->ports[6];
out = &dev->ports[14];
- dp_tunnel2 = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
+ dp_tunnel2 = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
KUNIT_ASSERT_NOT_NULL(test, dp_tunnel2);
KUNIT_ASSERT_EQ(test, dp_tunnel2->npaths, (size_t)3);
@@ -2709,6 +2800,7 @@ static struct kunit_case tb_test_cases[] = {
KUNIT_CASE(tb_test_credit_alloc_legacy_not_bonded),
KUNIT_CASE(tb_test_credit_alloc_legacy_bonded),
KUNIT_CASE(tb_test_credit_alloc_pcie),
+ KUNIT_CASE(tb_test_credit_alloc_without_dp),
KUNIT_CASE(tb_test_credit_alloc_dp),
KUNIT_CASE(tb_test_credit_alloc_usb3),
KUNIT_CASE(tb_test_credit_alloc_dma),
diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c
index 118742ec93ed..2c3cf7fc3357 100644
--- a/drivers/thunderbolt/tunnel.c
+++ b/drivers/thunderbolt/tunnel.c
@@ -102,8 +102,11 @@ static unsigned int tb_available_credits(const struct tb_port *port,
* Maximum number of DP streams possible through the
* lane adapter.
*/
- ndp = (credits - (usb3 + pcie + spare)) /
- (sw->min_dp_aux_credits + sw->min_dp_main_credits);
+ if (sw->min_dp_aux_credits + sw->min_dp_main_credits)
+ ndp = (credits - (usb3 + pcie + spare)) /
+ (sw->min_dp_aux_credits + sw->min_dp_main_credits);
+ else
+ ndp = 0;
} else {
ndp = 0;
}
@@ -858,6 +861,7 @@ err_free:
* @tb: Pointer to the domain structure
* @in: DP in adapter port
* @out: DP out adapter port
+ * @link_nr: Preferred lane adapter when the link is not bonded
* @max_up: Maximum available upstream bandwidth for the DP tunnel (%0
* if not limited)
* @max_down: Maximum available downstream bandwidth for the DP tunnel
@@ -869,8 +873,8 @@ err_free:
* Return: Returns a tb_tunnel on success or NULL on failure.
*/
struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
- struct tb_port *out, int max_up,
- int max_down)
+ struct tb_port *out, int link_nr,
+ int max_up, int max_down)
{
struct tb_tunnel *tunnel;
struct tb_path **paths;
@@ -894,21 +898,21 @@ struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
paths = tunnel->paths;
path = tb_path_alloc(tb, in, TB_DP_VIDEO_HOPID, out, TB_DP_VIDEO_HOPID,
- 1, "Video");
+ link_nr, "Video");
if (!path)
goto err_free;
tb_dp_init_video_path(path);
paths[TB_DP_VIDEO_PATH_OUT] = path;
path = tb_path_alloc(tb, in, TB_DP_AUX_TX_HOPID, out,
- TB_DP_AUX_TX_HOPID, 1, "AUX TX");
+ TB_DP_AUX_TX_HOPID, link_nr, "AUX TX");
if (!path)
goto err_free;
tb_dp_init_aux_path(path);
paths[TB_DP_AUX_PATH_OUT] = path;
path = tb_path_alloc(tb, out, TB_DP_AUX_RX_HOPID, in,
- TB_DP_AUX_RX_HOPID, 1, "AUX RX");
+ TB_DP_AUX_RX_HOPID, link_nr, "AUX RX");
if (!path)
goto err_free;
tb_dp_init_aux_path(path);
diff --git a/drivers/thunderbolt/tunnel.h b/drivers/thunderbolt/tunnel.h
index 03e56076b5bc..bb4d1f1d6d0b 100644
--- a/drivers/thunderbolt/tunnel.h
+++ b/drivers/thunderbolt/tunnel.h
@@ -71,8 +71,8 @@ struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in,
bool alloc_hopid);
struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
- struct tb_port *out, int max_up,
- int max_down);
+ struct tb_port *out, int link_nr,
+ int max_up, int max_down);
struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
struct tb_port *dst, int transmit_path,
int transmit_ring, int receive_path,
diff --git a/drivers/thunderbolt/usb4_port.c b/drivers/thunderbolt/usb4_port.c
index 29e2a4f9c9f5..6b02945624ee 100644
--- a/drivers/thunderbolt/usb4_port.c
+++ b/drivers/thunderbolt/usb4_port.c
@@ -7,9 +7,37 @@
*/
#include <linux/pm_runtime.h>
+#include <linux/component.h>
+#include <linux/property.h>
#include "tb.h"
+static int connector_bind(struct device *dev, struct device *connector, void *data)
+{
+ int ret;
+
+ ret = sysfs_create_link(&dev->kobj, &connector->kobj, "connector");
+ if (ret)
+ return ret;
+
+ ret = sysfs_create_link(&connector->kobj, &dev->kobj, dev_name(dev));
+ if (ret)
+ sysfs_remove_link(&dev->kobj, "connector");
+
+ return ret;
+}
+
+static void connector_unbind(struct device *dev, struct device *connector, void *data)
+{
+ sysfs_remove_link(&connector->kobj, dev_name(dev));
+ sysfs_remove_link(&dev->kobj, "connector");
+}
+
+static const struct component_ops connector_ops = {
+ .bind = connector_bind,
+ .unbind = connector_unbind,
+};
+
static ssize_t link_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -246,6 +274,14 @@ struct usb4_port *usb4_port_device_add(struct tb_port *port)
return ERR_PTR(ret);
}
+ if (dev_fwnode(&usb4->dev)) {
+ ret = component_add(&usb4->dev, &connector_ops);
+ if (ret) {
+ dev_err(&usb4->dev, "failed to add component\n");
+ device_unregister(&usb4->dev);
+ }
+ }
+
pm_runtime_no_callbacks(&usb4->dev);
pm_runtime_set_active(&usb4->dev);
pm_runtime_enable(&usb4->dev);
@@ -265,6 +301,8 @@ struct usb4_port *usb4_port_device_add(struct tb_port *port)
*/
void usb4_port_device_remove(struct usb4_port *usb4)
{
+ if (dev_fwnode(&usb4->dev))
+ component_del(&usb4->dev, &connector_ops);
device_unregister(&usb4->dev);
}
diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c
index 01d6b724ca51..c31c0d94d8b3 100644
--- a/drivers/thunderbolt/xdomain.c
+++ b/drivers/thunderbolt/xdomain.c
@@ -19,13 +19,38 @@
#include "tb.h"
-#define XDOMAIN_DEFAULT_TIMEOUT 1000 /* ms */
-#define XDOMAIN_UUID_RETRIES 10
-#define XDOMAIN_PROPERTIES_RETRIES 10
-#define XDOMAIN_PROPERTIES_CHANGED_RETRIES 10
-#define XDOMAIN_BONDING_WAIT 100 /* ms */
+#define XDOMAIN_SHORT_TIMEOUT 100 /* ms */
+#define XDOMAIN_DEFAULT_TIMEOUT 1000 /* ms */
+#define XDOMAIN_BONDING_TIMEOUT 10000 /* ms */
+#define XDOMAIN_RETRIES 10
#define XDOMAIN_DEFAULT_MAX_HOPID 15
+enum {
+ XDOMAIN_STATE_INIT,
+ XDOMAIN_STATE_UUID,
+ XDOMAIN_STATE_LINK_STATUS,
+ XDOMAIN_STATE_LINK_STATE_CHANGE,
+ XDOMAIN_STATE_LINK_STATUS2,
+ XDOMAIN_STATE_BONDING_UUID_LOW,
+ XDOMAIN_STATE_BONDING_UUID_HIGH,
+ XDOMAIN_STATE_PROPERTIES,
+ XDOMAIN_STATE_ENUMERATED,
+ XDOMAIN_STATE_ERROR,
+};
+
+static const char * const state_names[] = {
+ [XDOMAIN_STATE_INIT] = "INIT",
+ [XDOMAIN_STATE_UUID] = "UUID",
+ [XDOMAIN_STATE_LINK_STATUS] = "LINK_STATUS",
+ [XDOMAIN_STATE_LINK_STATE_CHANGE] = "LINK_STATE_CHANGE",
+ [XDOMAIN_STATE_LINK_STATUS2] = "LINK_STATUS2",
+ [XDOMAIN_STATE_BONDING_UUID_LOW] = "BONDING_UUID_LOW",
+ [XDOMAIN_STATE_BONDING_UUID_HIGH] = "BONDING_UUID_HIGH",
+ [XDOMAIN_STATE_PROPERTIES] = "PROPERTIES",
+ [XDOMAIN_STATE_ENUMERATED] = "ENUMERATED",
+ [XDOMAIN_STATE_ERROR] = "ERROR",
+};
+
struct xdomain_request_work {
struct work_struct work;
struct tb_xdp_header *pkg;
@@ -235,7 +260,7 @@ static int tb_xdp_handle_error(const struct tb_xdp_error_response *res)
}
static int tb_xdp_uuid_request(struct tb_ctl *ctl, u64 route, int retry,
- uuid_t *uuid)
+ uuid_t *uuid, u64 *remote_route)
{
struct tb_xdp_uuid_response res;
struct tb_xdp_uuid req;
@@ -258,6 +283,8 @@ static int tb_xdp_uuid_request(struct tb_ctl *ctl, u64 route, int retry,
return ret;
uuid_copy(uuid, &res.src_uuid);
+ *remote_route = (u64)res.src_route_hi << 32 | res.src_route_lo;
+
return 0;
}
@@ -473,6 +500,112 @@ tb_xdp_properties_changed_response(struct tb_ctl *ctl, u64 route, u8 sequence)
TB_CFG_PKG_XDOMAIN_RESP);
}
+static int tb_xdp_link_state_status_request(struct tb_ctl *ctl, u64 route,
+ u8 sequence, u8 *slw, u8 *tlw,
+ u8 *sls, u8 *tls)
+{
+ struct tb_xdp_link_state_status_response res;
+ struct tb_xdp_link_state_status req;
+ int ret;
+
+ memset(&req, 0, sizeof(req));
+ tb_xdp_fill_header(&req.hdr, route, sequence, LINK_STATE_STATUS_REQUEST,
+ sizeof(req));
+
+ memset(&res, 0, sizeof(res));
+ ret = __tb_xdomain_request(ctl, &req, sizeof(req), TB_CFG_PKG_XDOMAIN_REQ,
+ &res, sizeof(res), TB_CFG_PKG_XDOMAIN_RESP,
+ XDOMAIN_DEFAULT_TIMEOUT);
+ if (ret)
+ return ret;
+
+ ret = tb_xdp_handle_error(&res.err);
+ if (ret)
+ return ret;
+
+ if (res.status != 0)
+ return -EREMOTEIO;
+
+ *slw = res.slw;
+ *tlw = res.tlw;
+ *sls = res.sls;
+ *tls = res.tls;
+
+ return 0;
+}
+
+static int tb_xdp_link_state_status_response(struct tb *tb, struct tb_ctl *ctl,
+ struct tb_xdomain *xd, u8 sequence)
+{
+ struct tb_switch *sw = tb_to_switch(xd->dev.parent);
+ struct tb_xdp_link_state_status_response res;
+ struct tb_port *port = tb_port_at(xd->route, sw);
+ u32 val[2];
+ int ret;
+
+ memset(&res, 0, sizeof(res));
+ tb_xdp_fill_header(&res.hdr, xd->route, sequence,
+ LINK_STATE_STATUS_RESPONSE, sizeof(res));
+
+ ret = tb_port_read(port, val, TB_CFG_PORT,
+ port->cap_phy + LANE_ADP_CS_0, ARRAY_SIZE(val));
+ if (ret)
+ return ret;
+
+ res.slw = (val[0] & LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK) >>
+ LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT;
+ res.sls = (val[0] & LANE_ADP_CS_0_SUPPORTED_SPEED_MASK) >>
+ LANE_ADP_CS_0_SUPPORTED_SPEED_SHIFT;
+ res.tls = val[1] & LANE_ADP_CS_1_TARGET_SPEED_MASK;
+ res.tlw = (val[1] & LANE_ADP_CS_1_TARGET_WIDTH_MASK) >>
+ LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
+
+ return __tb_xdomain_response(ctl, &res, sizeof(res),
+ TB_CFG_PKG_XDOMAIN_RESP);
+}
+
+static int tb_xdp_link_state_change_request(struct tb_ctl *ctl, u64 route,
+ u8 sequence, u8 tlw, u8 tls)
+{
+ struct tb_xdp_link_state_change_response res;
+ struct tb_xdp_link_state_change req;
+ int ret;
+
+ memset(&req, 0, sizeof(req));
+ tb_xdp_fill_header(&req.hdr, route, sequence, LINK_STATE_CHANGE_REQUEST,
+ sizeof(req));
+ req.tlw = tlw;
+ req.tls = tls;
+
+ memset(&res, 0, sizeof(res));
+ ret = __tb_xdomain_request(ctl, &req, sizeof(req), TB_CFG_PKG_XDOMAIN_REQ,
+ &res, sizeof(res), TB_CFG_PKG_XDOMAIN_RESP,
+ XDOMAIN_DEFAULT_TIMEOUT);
+ if (ret)
+ return ret;
+
+ ret = tb_xdp_handle_error(&res.err);
+ if (ret)
+ return ret;
+
+ return res.status != 0 ? -EREMOTEIO : 0;
+}
+
+static int tb_xdp_link_state_change_response(struct tb_ctl *ctl, u64 route,
+ u8 sequence, u32 status)
+{
+ struct tb_xdp_link_state_change_response res;
+
+ memset(&res, 0, sizeof(res));
+ tb_xdp_fill_header(&res.hdr, route, sequence, LINK_STATE_CHANGE_RESPONSE,
+ sizeof(res));
+
+ res.status = status;
+
+ return __tb_xdomain_response(ctl, &res, sizeof(res),
+ TB_CFG_PKG_XDOMAIN_RESP);
+}
+
/**
* tb_register_protocol_handler() - Register protocol handler
* @handler: Handler to register
@@ -600,14 +733,13 @@ static void tb_xdp_handle_request(struct work_struct *work)
goto out;
}
- tb_dbg(tb, "%llx: received XDomain request %#x\n", route, pkg->type);
-
xd = tb_xdomain_find_by_route_locked(tb, route);
if (xd)
update_property_block(xd);
switch (pkg->type) {
case PROPERTIES_REQUEST:
+ tb_dbg(tb, "%llx: received XDomain properties request\n", route);
if (xd) {
ret = tb_xdp_properties_response(tb, ctl, xd, sequence,
(const struct tb_xdp_properties *)pkg);
@@ -615,6 +747,9 @@ static void tb_xdp_handle_request(struct work_struct *work)
break;
case PROPERTIES_CHANGED_REQUEST:
+ tb_dbg(tb, "%llx: received XDomain properties changed request\n",
+ route);
+
ret = tb_xdp_properties_changed_response(ctl, route, sequence);
/*
@@ -622,18 +757,51 @@ static void tb_xdp_handle_request(struct work_struct *work)
* the xdomain related to this connection as well in
* case there is a change in services it offers.
*/
- if (xd && device_is_registered(&xd->dev)) {
- queue_delayed_work(tb->wq, &xd->get_properties_work,
- msecs_to_jiffies(50));
- }
+ if (xd && device_is_registered(&xd->dev))
+ queue_delayed_work(tb->wq, &xd->state_work,
+ msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT));
break;
case UUID_REQUEST_OLD:
case UUID_REQUEST:
+ tb_dbg(tb, "%llx: received XDomain UUID request\n", route);
ret = tb_xdp_uuid_response(ctl, route, sequence, uuid);
break;
+ case LINK_STATE_STATUS_REQUEST:
+ tb_dbg(tb, "%llx: received XDomain link state status request\n",
+ route);
+
+ if (xd) {
+ ret = tb_xdp_link_state_status_response(tb, ctl, xd,
+ sequence);
+ } else {
+ tb_xdp_error_response(ctl, route, sequence,
+ ERROR_NOT_READY);
+ }
+ break;
+
+ case LINK_STATE_CHANGE_REQUEST:
+ tb_dbg(tb, "%llx: received XDomain link state change request\n",
+ route);
+
+ if (xd && xd->state == XDOMAIN_STATE_BONDING_UUID_HIGH) {
+ const struct tb_xdp_link_state_change *lsc =
+ (const struct tb_xdp_link_state_change *)pkg;
+
+ ret = tb_xdp_link_state_change_response(ctl, route,
+ sequence, 0);
+ xd->target_link_width = lsc->tlw;
+ queue_delayed_work(tb->wq, &xd->state_work,
+ msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT));
+ } else {
+ tb_xdp_error_response(ctl, route, sequence,
+ ERROR_NOT_READY);
+ }
+ break;
+
default:
+ tb_dbg(tb, "%llx: unknown XDomain request %#x\n", route, pkg->type);
tb_xdp_error_response(ctl, route, sequence,
ERROR_NOT_SUPPORTED);
break;
@@ -1000,32 +1168,38 @@ static int tb_xdomain_update_link_attributes(struct tb_xdomain *xd)
return 0;
}
-static void tb_xdomain_get_uuid(struct work_struct *work)
+static int tb_xdomain_get_uuid(struct tb_xdomain *xd)
{
- struct tb_xdomain *xd = container_of(work, typeof(*xd),
- get_uuid_work.work);
struct tb *tb = xd->tb;
uuid_t uuid;
+ u64 route;
int ret;
dev_dbg(&xd->dev, "requesting remote UUID\n");
- ret = tb_xdp_uuid_request(tb->ctl, xd->route, xd->uuid_retries, &uuid);
+ ret = tb_xdp_uuid_request(tb->ctl, xd->route, xd->state_retries, &uuid,
+ &route);
if (ret < 0) {
- if (xd->uuid_retries-- > 0) {
+ if (xd->state_retries-- > 0) {
dev_dbg(&xd->dev, "failed to request UUID, retrying\n");
- queue_delayed_work(xd->tb->wq, &xd->get_uuid_work,
- msecs_to_jiffies(100));
+ return -EAGAIN;
} else {
dev_dbg(&xd->dev, "failed to read remote UUID\n");
}
- return;
+ return ret;
}
dev_dbg(&xd->dev, "got remote UUID %pUb\n", &uuid);
- if (uuid_equal(&uuid, xd->local_uuid))
- dev_dbg(&xd->dev, "intra-domain loop detected\n");
+ if (uuid_equal(&uuid, xd->local_uuid)) {
+ if (route == xd->route)
+ dev_dbg(&xd->dev, "loop back detected\n");
+ else
+ dev_dbg(&xd->dev, "intra-domain loop detected\n");
+
+ /* Don't bond lanes automatically for loops */
+ xd->bonding_possible = false;
+ }
/*
* If the UUID is different, there is another domain connected
@@ -1035,27 +1209,152 @@ static void tb_xdomain_get_uuid(struct work_struct *work)
if (xd->remote_uuid && !uuid_equal(&uuid, xd->remote_uuid)) {
dev_dbg(&xd->dev, "remote UUID is different, unplugging\n");
xd->is_unplugged = true;
- return;
+ return -ENODEV;
}
/* First time fill in the missing UUID */
if (!xd->remote_uuid) {
xd->remote_uuid = kmemdup(&uuid, sizeof(uuid_t), GFP_KERNEL);
if (!xd->remote_uuid)
- return;
+ return -ENOMEM;
}
- /* Now we can start the normal properties exchange */
- queue_delayed_work(xd->tb->wq, &xd->properties_changed_work,
- msecs_to_jiffies(100));
- queue_delayed_work(xd->tb->wq, &xd->get_properties_work,
- msecs_to_jiffies(1000));
+ return 0;
}
-static void tb_xdomain_get_properties(struct work_struct *work)
+static int tb_xdomain_get_link_status(struct tb_xdomain *xd)
+{
+ struct tb *tb = xd->tb;
+ u8 slw, tlw, sls, tls;
+ int ret;
+
+ dev_dbg(&xd->dev, "sending link state status request to %pUb\n",
+ xd->remote_uuid);
+
+ ret = tb_xdp_link_state_status_request(tb->ctl, xd->route,
+ xd->state_retries, &slw, &tlw, &sls,
+ &tls);
+ if (ret) {
+ if (ret != -EOPNOTSUPP && xd->state_retries-- > 0) {
+ dev_dbg(&xd->dev,
+ "failed to request remote link status, retrying\n");
+ return -EAGAIN;
+ }
+ dev_dbg(&xd->dev, "failed to receive remote link status\n");
+ return ret;
+ }
+
+ dev_dbg(&xd->dev, "remote link supports width %#x speed %#x\n", slw, sls);
+
+ if (slw < LANE_ADP_CS_0_SUPPORTED_WIDTH_DUAL) {
+ dev_dbg(&xd->dev, "remote adapter is single lane only\n");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int tb_xdomain_link_state_change(struct tb_xdomain *xd,
+ unsigned int width)
+{
+ struct tb_switch *sw = tb_to_switch(xd->dev.parent);
+ struct tb_port *port = tb_port_at(xd->route, sw);
+ struct tb *tb = xd->tb;
+ u8 tlw, tls;
+ u32 val;
+ int ret;
+
+ if (width == 2)
+ tlw = LANE_ADP_CS_1_TARGET_WIDTH_DUAL;
+ else if (width == 1)
+ tlw = LANE_ADP_CS_1_TARGET_WIDTH_SINGLE;
+ else
+ return -EINVAL;
+
+ /* Use the current target speed */
+ ret = tb_port_read(port, &val, TB_CFG_PORT, port->cap_phy + LANE_ADP_CS_1, 1);
+ if (ret)
+ return ret;
+ tls = val & LANE_ADP_CS_1_TARGET_SPEED_MASK;
+
+ dev_dbg(&xd->dev, "sending link state change request with width %#x speed %#x\n",
+ tlw, tls);
+
+ ret = tb_xdp_link_state_change_request(tb->ctl, xd->route,
+ xd->state_retries, tlw, tls);
+ if (ret) {
+ if (ret != -EOPNOTSUPP && xd->state_retries-- > 0) {
+ dev_dbg(&xd->dev,
+ "failed to change remote link state, retrying\n");
+ return -EAGAIN;
+ }
+ dev_err(&xd->dev, "failed request link state change, aborting\n");
+ return ret;
+ }
+
+ dev_dbg(&xd->dev, "received link state change response\n");
+ return 0;
+}
+
+static int tb_xdomain_bond_lanes_uuid_high(struct tb_xdomain *xd)
+{
+ struct tb_port *port;
+ int ret, width;
+
+ if (xd->target_link_width == LANE_ADP_CS_1_TARGET_WIDTH_SINGLE) {
+ width = 1;
+ } else if (xd->target_link_width == LANE_ADP_CS_1_TARGET_WIDTH_DUAL) {
+ width = 2;
+ } else {
+ if (xd->state_retries-- > 0) {
+ dev_dbg(&xd->dev,
+ "link state change request not received yet, retrying\n");
+ return -EAGAIN;
+ }
+ dev_dbg(&xd->dev, "timeout waiting for link change request\n");
+ return -ETIMEDOUT;
+ }
+
+ port = tb_port_at(xd->route, tb_xdomain_parent(xd));
+
+ /*
+ * We can't use tb_xdomain_lane_bonding_enable() here because it
+ * is the other side that initiates lane bonding. So here we
+ * just set the width to both lane adapters and wait for the
+ * link to transition bonded.
+ */
+ ret = tb_port_set_link_width(port->dual_link_port, width);
+ if (ret) {
+ tb_port_warn(port->dual_link_port,
+ "failed to set link width to %d\n", width);
+ return ret;
+ }
+
+ ret = tb_port_set_link_width(port, width);
+ if (ret) {
+ tb_port_warn(port, "failed to set link width to %d\n", width);
+ return ret;
+ }
+
+ ret = tb_port_wait_for_link_width(port, width, XDOMAIN_BONDING_TIMEOUT);
+ if (ret) {
+ dev_warn(&xd->dev, "error waiting for link width to become %d\n",
+ width);
+ return ret;
+ }
+
+ port->bonded = width == 2;
+ port->dual_link_port->bonded = width == 2;
+
+ tb_port_update_credits(port);
+ tb_xdomain_update_link_attributes(xd);
+
+ dev_dbg(&xd->dev, "lane bonding %sabled\n", width == 2 ? "en" : "dis");
+ return 0;
+}
+
+static int tb_xdomain_get_properties(struct tb_xdomain *xd)
{
- struct tb_xdomain *xd = container_of(work, typeof(*xd),
- get_properties_work.work);
struct tb_property_dir *dir;
struct tb *tb = xd->tb;
bool update = false;
@@ -1066,34 +1365,35 @@ static void tb_xdomain_get_properties(struct work_struct *work)
dev_dbg(&xd->dev, "requesting remote properties\n");
ret = tb_xdp_properties_request(tb->ctl, xd->route, xd->local_uuid,
- xd->remote_uuid, xd->properties_retries,
+ xd->remote_uuid, xd->state_retries,
&block, &gen);
if (ret < 0) {
- if (xd->properties_retries-- > 0) {
+ if (xd->state_retries-- > 0) {
dev_dbg(&xd->dev,
"failed to request remote properties, retrying\n");
- queue_delayed_work(xd->tb->wq, &xd->get_properties_work,
- msecs_to_jiffies(1000));
+ return -EAGAIN;
} else {
/* Give up now */
dev_err(&xd->dev,
"failed read XDomain properties from %pUb\n",
xd->remote_uuid);
}
- return;
- }
- xd->properties_retries = XDOMAIN_PROPERTIES_RETRIES;
+ return ret;
+ }
mutex_lock(&xd->lock);
/* Only accept newer generation properties */
- if (xd->remote_properties && gen <= xd->remote_property_block_gen)
+ if (xd->remote_properties && gen <= xd->remote_property_block_gen) {
+ ret = 0;
goto err_free_block;
+ }
dir = tb_property_parse_dir(block, ret);
if (!dir) {
dev_err(&xd->dev, "failed to parse XDomain properties\n");
+ ret = -ENOMEM;
goto err_free_block;
}
@@ -1124,9 +1424,16 @@ static void tb_xdomain_get_properties(struct work_struct *work)
* registered, we notify the userspace that it has changed.
*/
if (!update) {
+ struct tb_port *port;
+
+ /* Now disable lane 1 if bonding was not enabled */
+ port = tb_port_at(xd->route, tb_xdomain_parent(xd));
+ if (!port->bonded)
+ tb_port_disable(port->dual_link_port);
+
if (device_add(&xd->dev)) {
dev_err(&xd->dev, "failed to add XDomain device\n");
- return;
+ return -ENODEV;
}
dev_info(&xd->dev, "new host found, vendor=%#x device=%#x\n",
xd->vendor, xd->device);
@@ -1138,13 +1445,193 @@ static void tb_xdomain_get_properties(struct work_struct *work)
}
enumerate_services(xd);
- return;
+ return 0;
err_free_dir:
tb_property_free_dir(dir);
err_free_block:
kfree(block);
mutex_unlock(&xd->lock);
+
+ return ret;
+}
+
+static void tb_xdomain_queue_uuid(struct tb_xdomain *xd)
+{
+ xd->state = XDOMAIN_STATE_UUID;
+ xd->state_retries = XDOMAIN_RETRIES;
+ queue_delayed_work(xd->tb->wq, &xd->state_work,
+ msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT));
+}
+
+static void tb_xdomain_queue_link_status(struct tb_xdomain *xd)
+{
+ xd->state = XDOMAIN_STATE_LINK_STATUS;
+ xd->state_retries = XDOMAIN_RETRIES;
+ queue_delayed_work(xd->tb->wq, &xd->state_work,
+ msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
+}
+
+static void tb_xdomain_queue_link_status2(struct tb_xdomain *xd)
+{
+ xd->state = XDOMAIN_STATE_LINK_STATUS2;
+ xd->state_retries = XDOMAIN_RETRIES;
+ queue_delayed_work(xd->tb->wq, &xd->state_work,
+ msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
+}
+
+static void tb_xdomain_queue_bonding(struct tb_xdomain *xd)
+{
+ if (memcmp(xd->local_uuid, xd->remote_uuid, UUID_SIZE) > 0) {
+ dev_dbg(&xd->dev, "we have higher UUID, other side bonds the lanes\n");
+ xd->state = XDOMAIN_STATE_BONDING_UUID_HIGH;
+ } else {
+ dev_dbg(&xd->dev, "we have lower UUID, bonding lanes\n");
+ xd->state = XDOMAIN_STATE_LINK_STATE_CHANGE;
+ }
+
+ xd->state_retries = XDOMAIN_RETRIES;
+ queue_delayed_work(xd->tb->wq, &xd->state_work,
+ msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
+}
+
+static void tb_xdomain_queue_bonding_uuid_low(struct tb_xdomain *xd)
+{
+ xd->state = XDOMAIN_STATE_BONDING_UUID_LOW;
+ xd->state_retries = XDOMAIN_RETRIES;
+ queue_delayed_work(xd->tb->wq, &xd->state_work,
+ msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
+}
+
+static void tb_xdomain_queue_properties(struct tb_xdomain *xd)
+{
+ xd->state = XDOMAIN_STATE_PROPERTIES;
+ xd->state_retries = XDOMAIN_RETRIES;
+ queue_delayed_work(xd->tb->wq, &xd->state_work,
+ msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
+}
+
+static void tb_xdomain_queue_properties_changed(struct tb_xdomain *xd)
+{
+ xd->properties_changed_retries = XDOMAIN_RETRIES;
+ queue_delayed_work(xd->tb->wq, &xd->properties_changed_work,
+ msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT));
+}
+
+static void tb_xdomain_state_work(struct work_struct *work)
+{
+ struct tb_xdomain *xd = container_of(work, typeof(*xd), state_work.work);
+ int ret, state = xd->state;
+
+ if (WARN_ON_ONCE(state < XDOMAIN_STATE_INIT ||
+ state > XDOMAIN_STATE_ERROR))
+ return;
+
+ dev_dbg(&xd->dev, "running state %s\n", state_names[state]);
+
+ switch (state) {
+ case XDOMAIN_STATE_INIT:
+ if (xd->needs_uuid) {
+ tb_xdomain_queue_uuid(xd);
+ } else {
+ tb_xdomain_queue_properties_changed(xd);
+ tb_xdomain_queue_properties(xd);
+ }
+ break;
+
+ case XDOMAIN_STATE_UUID:
+ ret = tb_xdomain_get_uuid(xd);
+ if (ret) {
+ if (ret == -EAGAIN)
+ goto retry_state;
+ xd->state = XDOMAIN_STATE_ERROR;
+ } else {
+ tb_xdomain_queue_properties_changed(xd);
+ if (xd->bonding_possible)
+ tb_xdomain_queue_link_status(xd);
+ else
+ tb_xdomain_queue_properties(xd);
+ }
+ break;
+
+ case XDOMAIN_STATE_LINK_STATUS:
+ ret = tb_xdomain_get_link_status(xd);
+ if (ret) {
+ if (ret == -EAGAIN)
+ goto retry_state;
+
+ /*
+ * If any of the lane bonding states fail we skip
+ * bonding completely and try to continue from
+ * reading properties.
+ */
+ tb_xdomain_queue_properties(xd);
+ } else {
+ tb_xdomain_queue_bonding(xd);
+ }
+ break;
+
+ case XDOMAIN_STATE_LINK_STATE_CHANGE:
+ ret = tb_xdomain_link_state_change(xd, 2);
+ if (ret) {
+ if (ret == -EAGAIN)
+ goto retry_state;
+ tb_xdomain_queue_properties(xd);
+ } else {
+ tb_xdomain_queue_link_status2(xd);
+ }
+ break;
+
+ case XDOMAIN_STATE_LINK_STATUS2:
+ ret = tb_xdomain_get_link_status(xd);
+ if (ret) {
+ if (ret == -EAGAIN)
+ goto retry_state;
+ tb_xdomain_queue_properties(xd);
+ } else {
+ tb_xdomain_queue_bonding_uuid_low(xd);
+ }
+ break;
+
+ case XDOMAIN_STATE_BONDING_UUID_LOW:
+ tb_xdomain_lane_bonding_enable(xd);
+ tb_xdomain_queue_properties(xd);
+ break;
+
+ case XDOMAIN_STATE_BONDING_UUID_HIGH:
+ if (tb_xdomain_bond_lanes_uuid_high(xd) == -EAGAIN)
+ goto retry_state;
+ tb_xdomain_queue_properties(xd);
+ break;
+
+ case XDOMAIN_STATE_PROPERTIES:
+ ret = tb_xdomain_get_properties(xd);
+ if (ret) {
+ if (ret == -EAGAIN)
+ goto retry_state;
+ xd->state = XDOMAIN_STATE_ERROR;
+ } else {
+ xd->state = XDOMAIN_STATE_ENUMERATED;
+ }
+ break;
+
+ case XDOMAIN_STATE_ENUMERATED:
+ tb_xdomain_queue_properties(xd);
+ break;
+
+ case XDOMAIN_STATE_ERROR:
+ break;
+
+ default:
+ dev_warn(&xd->dev, "unexpected state %d\n", state);
+ break;
+ }
+
+ return;
+
+retry_state:
+ queue_delayed_work(xd->tb->wq, &xd->state_work,
+ msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
}
static void tb_xdomain_properties_changed(struct work_struct *work)
@@ -1163,13 +1650,13 @@ static void tb_xdomain_properties_changed(struct work_struct *work)
"failed to send properties changed notification, retrying\n");
queue_delayed_work(xd->tb->wq,
&xd->properties_changed_work,
- msecs_to_jiffies(1000));
+ msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
}
dev_err(&xd->dev, "failed to send properties changed notification\n");
return;
}
- xd->properties_changed_retries = XDOMAIN_PROPERTIES_CHANGED_RETRIES;
+ xd->properties_changed_retries = XDOMAIN_RETRIES;
}
static ssize_t device_show(struct device *dev, struct device_attribute *attr,
@@ -1304,31 +1791,17 @@ static void tb_xdomain_release(struct device *dev)
static void start_handshake(struct tb_xdomain *xd)
{
- xd->uuid_retries = XDOMAIN_UUID_RETRIES;
- xd->properties_retries = XDOMAIN_PROPERTIES_RETRIES;
- xd->properties_changed_retries = XDOMAIN_PROPERTIES_CHANGED_RETRIES;
-
- if (xd->needs_uuid) {
- queue_delayed_work(xd->tb->wq, &xd->get_uuid_work,
- msecs_to_jiffies(100));
- } else {
- /* Start exchanging properties with the other host */
- queue_delayed_work(xd->tb->wq, &xd->properties_changed_work,
- msecs_to_jiffies(100));
- queue_delayed_work(xd->tb->wq, &xd->get_properties_work,
- msecs_to_jiffies(1000));
- }
+ xd->state = XDOMAIN_STATE_INIT;
+ queue_delayed_work(xd->tb->wq, &xd->state_work,
+ msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT));
}
static void stop_handshake(struct tb_xdomain *xd)
{
- xd->uuid_retries = 0;
- xd->properties_retries = 0;
- xd->properties_changed_retries = 0;
-
- cancel_delayed_work_sync(&xd->get_uuid_work);
- cancel_delayed_work_sync(&xd->get_properties_work);
cancel_delayed_work_sync(&xd->properties_changed_work);
+ cancel_delayed_work_sync(&xd->state_work);
+ xd->properties_changed_retries = 0;
+ xd->state_retries = 0;
}
static int __maybe_unused tb_xdomain_suspend(struct device *dev)
@@ -1389,8 +1862,7 @@ struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent,
ida_init(&xd->in_hopids);
ida_init(&xd->out_hopids);
mutex_init(&xd->lock);
- INIT_DELAYED_WORK(&xd->get_uuid_work, tb_xdomain_get_uuid);
- INIT_DELAYED_WORK(&xd->get_properties_work, tb_xdomain_get_properties);
+ INIT_DELAYED_WORK(&xd->state_work, tb_xdomain_state_work);
INIT_DELAYED_WORK(&xd->properties_changed_work,
tb_xdomain_properties_changed);
@@ -1405,6 +1877,7 @@ struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent,
goto err_free_local_uuid;
} else {
xd->needs_uuid = true;
+ xd->bonding_possible = !!down->dual_link_port;
}
device_initialize(&xd->dev);
@@ -1523,9 +1996,9 @@ int tb_xdomain_lane_bonding_enable(struct tb_xdomain *xd)
return ret;
}
- ret = tb_port_wait_for_link_width(port, 2, 100);
+ ret = tb_port_wait_for_link_width(port, 2, XDOMAIN_BONDING_TIMEOUT);
if (ret) {
- tb_port_warn(port, "timeout enabling lane bonding\n");
+ tb_port_warn(port, "failed to enable lane bonding\n");
return ret;
}
diff --git a/drivers/tty/amiserial.c b/drivers/tty/amiserial.c
index 533d02b38e02..afb2d373dd47 100644
--- a/drivers/tty/amiserial.c
+++ b/drivers/tty/amiserial.c
@@ -588,10 +588,8 @@ static void change_speed(struct tty_struct *tty, struct serial_state *info,
}
if (!(cflag & PARODD))
cval |= UART_LCR_EPAR;
-#ifdef CMSPAR
if (cflag & CMSPAR)
cval |= UART_LCR_SPAR;
-#endif
/* Determine divisor based on baud rate */
baud = tty_get_baud_rate(tty);
diff --git a/drivers/tty/goldfish.c b/drivers/tty/goldfish.c
index 9e8ccb8ed6d6..c7968aecd870 100644
--- a/drivers/tty/goldfish.c
+++ b/drivers/tty/goldfish.c
@@ -405,6 +405,7 @@ static int goldfish_tty_probe(struct platform_device *pdev)
err_tty_register_device_failed:
free_irq(irq, qtty);
err_dec_line_count:
+ tty_port_destroy(&qtty->port);
goldfish_tty_current_line_count--;
if (goldfish_tty_current_line_count == 0)
goldfish_tty_delete_driver();
@@ -426,6 +427,7 @@ static int goldfish_tty_remove(struct platform_device *pdev)
iounmap(qtty->base);
qtty->base = NULL;
free_irq(qtty->irq, pdev);
+ tty_port_destroy(&qtty->port);
goldfish_tty_current_line_count--;
if (goldfish_tty_current_line_count == 0)
goldfish_tty_delete_driver();
diff --git a/drivers/tty/hvc/Kconfig b/drivers/tty/hvc/Kconfig
index 8d60e0ff67b4..4f9264d005c0 100644
--- a/drivers/tty/hvc/Kconfig
+++ b/drivers/tty/hvc/Kconfig
@@ -87,6 +87,25 @@ config HVC_DCC
driver. This console is used through a JTAG only on ARM. If you don't have
a JTAG then you probably don't want this option.
+config HVC_DCC_SERIALIZE_SMP
+ bool "Use DCC only on CPU core 0"
+ depends on SMP && HVC_DCC
+ help
+ This is a DEBUG option to serialize all console input and output to CPU 0.
+ Some external debuggers, do not handle reads/writes from/to DCC on more
+ than one CPU core. Each core has its own DCC device registers, so when a
+ CPU core reads or writes from/to DCC, it only accesses its own DCC device.
+ Since kernel code can run on any CPU core, every time the kernel wants to
+ write to the console, it might write to a different DCC.
+
+ In SMP mode, external debuggers create multiple windows, and each window
+ shows the DCC output only from that core's DCC. The result is that
+ console output is either lost or scattered across windows.
+
+ Enable this option only if you are sure that you do not need features like
+ CPU hotplug to work. For example, during early chipset bringups without
+ debug serial console support. If unsure, say N.
+
config HVC_RISCV_SBI
bool "RISC-V SBI console support"
depends on RISCV_SBI_V01
diff --git a/drivers/tty/hvc/hvc_dcc.c b/drivers/tty/hvc/hvc_dcc.c
index bd61f9372d83..1751108cf763 100644
--- a/drivers/tty/hvc/hvc_dcc.c
+++ b/drivers/tty/hvc/hvc_dcc.c
@@ -1,10 +1,15 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) 2010, 2014 The Linux Foundation. All rights reserved. */
+/* Copyright (c) 2010, 2014, 2022 The Linux Foundation. All rights reserved. */
#include <linux/console.h>
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
#include <linux/init.h>
+#include <linux/kfifo.h>
#include <linux/serial.h>
#include <linux/serial_core.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
#include <asm/dcc.h>
#include <asm/processor.h>
@@ -15,6 +20,15 @@
#define DCC_STATUS_RX (1 << 30)
#define DCC_STATUS_TX (1 << 29)
+#define DCC_INBUF_SIZE 128
+#define DCC_OUTBUF_SIZE 1024
+
+/* Lock to serialize access to DCC fifo */
+static DEFINE_SPINLOCK(dcc_lock);
+
+static DEFINE_KFIFO(inbuf, unsigned char, DCC_INBUF_SIZE);
+static DEFINE_KFIFO(outbuf, unsigned char, DCC_OUTBUF_SIZE);
+
static void dcc_uart_console_putchar(struct uart_port *port, unsigned char ch)
{
while (__dcc_getstatus() & DCC_STATUS_TX)
@@ -67,24 +81,176 @@ static int hvc_dcc_get_chars(uint32_t vt, char *buf, int count)
return i;
}
+/*
+ * Check if the DCC is enabled. If CONFIG_HVC_DCC_SERIALIZE_SMP is enabled,
+ * then we assume then this function will be called first on core0. That way,
+ * dcc_core0_available will be true only if it's available on core0.
+ */
static bool hvc_dcc_check(void)
{
unsigned long time = jiffies + (HZ / 10);
+ static bool dcc_core0_available;
+
+ /*
+ * If we're not on core 0, but we previously confirmed that DCC is
+ * active, then just return true.
+ */
+ int cpu = get_cpu();
+
+ if (IS_ENABLED(CONFIG_HVC_DCC_SERIALIZE_SMP) && cpu && dcc_core0_available) {
+ put_cpu();
+ return true;
+ }
+
+ put_cpu();
/* Write a test character to check if it is handled */
__dcc_putchar('\n');
while (time_is_after_jiffies(time)) {
- if (!(__dcc_getstatus() & DCC_STATUS_TX))
+ if (!(__dcc_getstatus() & DCC_STATUS_TX)) {
+ dcc_core0_available = true;
return true;
+ }
}
return false;
}
+/*
+ * Workqueue function that writes the output FIFO to the DCC on core 0.
+ */
+static void dcc_put_work(struct work_struct *work)
+{
+ unsigned char ch;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dcc_lock, irqflags);
+
+ /* While there's data in the output FIFO, write it to the DCC */
+ while (kfifo_get(&outbuf, &ch))
+ hvc_dcc_put_chars(0, &ch, 1);
+
+ /* While we're at it, check for any input characters */
+ while (!kfifo_is_full(&inbuf)) {
+ if (!hvc_dcc_get_chars(0, &ch, 1))
+ break;
+ kfifo_put(&inbuf, ch);
+ }
+
+ spin_unlock_irqrestore(&dcc_lock, irqflags);
+}
+
+static DECLARE_WORK(dcc_pwork, dcc_put_work);
+
+/*
+ * Workqueue function that reads characters from DCC and puts them into the
+ * input FIFO.
+ */
+static void dcc_get_work(struct work_struct *work)
+{
+ unsigned char ch;
+ unsigned long irqflags;
+
+ /*
+ * Read characters from DCC and put them into the input FIFO, as
+ * long as there is room and we have characters to read.
+ */
+ spin_lock_irqsave(&dcc_lock, irqflags);
+
+ while (!kfifo_is_full(&inbuf)) {
+ if (!hvc_dcc_get_chars(0, &ch, 1))
+ break;
+ kfifo_put(&inbuf, ch);
+ }
+ spin_unlock_irqrestore(&dcc_lock, irqflags);
+}
+
+static DECLARE_WORK(dcc_gwork, dcc_get_work);
+
+/*
+ * Write characters directly to the DCC if we're on core 0 and the FIFO
+ * is empty, or write them to the FIFO if we're not.
+ */
+static int hvc_dcc0_put_chars(u32 vt, const char *buf, int count)
+{
+ int len;
+ unsigned long irqflags;
+
+ if (!IS_ENABLED(CONFIG_HVC_DCC_SERIALIZE_SMP))
+ return hvc_dcc_put_chars(vt, buf, count);
+
+ spin_lock_irqsave(&dcc_lock, irqflags);
+ if (smp_processor_id() || (!kfifo_is_empty(&outbuf))) {
+ len = kfifo_in(&outbuf, buf, count);
+ spin_unlock_irqrestore(&dcc_lock, irqflags);
+
+ /*
+ * We just push data to the output FIFO, so schedule the
+ * workqueue that will actually write that data to DCC.
+ * CPU hotplug is disabled in dcc_init so CPU0 cannot be
+ * offlined after the cpu online check.
+ */
+ if (cpu_online(0))
+ schedule_work_on(0, &dcc_pwork);
+
+ return len;
+ }
+
+ /*
+ * If we're already on core 0, and the FIFO is empty, then just
+ * write the data to DCC.
+ */
+ len = hvc_dcc_put_chars(vt, buf, count);
+ spin_unlock_irqrestore(&dcc_lock, irqflags);
+
+ return len;
+}
+
+/*
+ * Read characters directly from the DCC if we're on core 0 and the FIFO
+ * is empty, or read them from the FIFO if we're not.
+ */
+static int hvc_dcc0_get_chars(u32 vt, char *buf, int count)
+{
+ int len;
+ unsigned long irqflags;
+
+ if (!IS_ENABLED(CONFIG_HVC_DCC_SERIALIZE_SMP))
+ return hvc_dcc_get_chars(vt, buf, count);
+
+ spin_lock_irqsave(&dcc_lock, irqflags);
+
+ if (smp_processor_id() || (!kfifo_is_empty(&inbuf))) {
+ len = kfifo_out(&inbuf, buf, count);
+ spin_unlock_irqrestore(&dcc_lock, irqflags);
+
+ /*
+ * If the FIFO was empty, there may be characters in the DCC
+ * that we haven't read yet. Schedule a workqueue to fill
+ * the input FIFO, so that the next time this function is
+ * called, we'll have data. CPU hotplug is disabled in dcc_init
+ * so CPU0 cannot be offlined after the cpu online check.
+ */
+ if (!len && cpu_online(0))
+ schedule_work_on(0, &dcc_gwork);
+
+ return len;
+ }
+
+ /*
+ * If we're already on core 0, and the FIFO is empty, then just
+ * read the data from DCC.
+ */
+ len = hvc_dcc_get_chars(vt, buf, count);
+ spin_unlock_irqrestore(&dcc_lock, irqflags);
+
+ return len;
+}
+
static const struct hv_ops hvc_dcc_get_put_ops = {
- .get_chars = hvc_dcc_get_chars,
- .put_chars = hvc_dcc_put_chars,
+ .get_chars = hvc_dcc0_get_chars,
+ .put_chars = hvc_dcc0_put_chars,
};
static int __init hvc_dcc_console_init(void)
@@ -108,6 +274,26 @@ static int __init hvc_dcc_init(void)
if (!hvc_dcc_check())
return -ENODEV;
+ if (IS_ENABLED(CONFIG_HVC_DCC_SERIALIZE_SMP)) {
+ pr_warn("\n");
+ pr_warn("********************************************************************\n");
+ pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
+ pr_warn("** **\n");
+ pr_warn("** HVC_DCC_SERIALIZE_SMP SUPPORT HAS BEEN ENABLED IN THIS KERNEL **\n");
+ pr_warn("** **\n");
+ pr_warn("** This means that this is a DEBUG kernel and unsafe for **\n");
+ pr_warn("** production use and has important feature like CPU hotplug **\n");
+ pr_warn("** disabled. **\n");
+ pr_warn("** **\n");
+ pr_warn("** If you see this message and you are not debugging the **\n");
+ pr_warn("** kernel, report this immediately to your vendor! **\n");
+ pr_warn("** **\n");
+ pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
+ pr_warn("********************************************************************\n");
+
+ cpu_hotplug_disable();
+ }
+
p = hvc_alloc(0, 0, &hvc_dcc_get_put_ops, 128);
return PTR_ERR_OR_ZERO(p);
diff --git a/drivers/tty/hvc/hvc_opal.c b/drivers/tty/hvc/hvc_opal.c
index 056ae21a5121..794c7b18aa06 100644
--- a/drivers/tty/hvc/hvc_opal.c
+++ b/drivers/tty/hvc/hvc_opal.c
@@ -13,12 +13,12 @@
#include <linux/slab.h>
#include <linux/console.h>
#include <linux/of.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/export.h>
#include <linux/interrupt.h>
#include <asm/hvconsole.h>
-#include <asm/prom.h>
#include <asm/firmware.h>
#include <asm/hvsi.h>
#include <asm/udbg.h>
@@ -342,9 +342,9 @@ void __init hvc_opal_init_early(void)
* path, so we hard wire it
*/
opal = of_find_node_by_path("/ibm,opal/consoles");
- if (opal)
+ if (opal) {
pr_devel("hvc_opal: Found consoles in new location\n");
- if (!opal) {
+ } else {
opal = of_find_node_by_path("/ibm,opal");
if (opal)
pr_devel("hvc_opal: "
diff --git a/drivers/tty/hvc/hvc_vio.c b/drivers/tty/hvc/hvc_vio.c
index 72b11aa7e0a6..736b230f5ec0 100644
--- a/drivers/tty/hvc/hvc_vio.c
+++ b/drivers/tty/hvc/hvc_vio.c
@@ -28,10 +28,10 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/console.h>
+#include <linux/of.h>
#include <asm/hvconsole.h>
#include <asm/vio.h>
-#include <asm/prom.h>
#include <asm/hvsi.h>
#include <asm/udbg.h>
#include <asm/machdep.h>
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
index ebaf7500f48f..7c23112dc923 100644
--- a/drivers/tty/hvc/hvc_xen.c
+++ b/drivers/tty/hvc/hvc_xen.c
@@ -253,7 +253,7 @@ static int xen_hvm_console_init(void)
if (r < 0 || v == 0)
goto err;
gfn = v;
- info->intf = xen_remap(gfn << XEN_PAGE_SHIFT, XEN_PAGE_SIZE);
+ info->intf = memremap(gfn << XEN_PAGE_SHIFT, XEN_PAGE_SIZE, MEMREMAP_WB);
if (info->intf == NULL)
goto err;
info->vtermno = HVC_COOKIE;
diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
index 245da1dfd818..9b7e8246a464 100644
--- a/drivers/tty/hvc/hvcs.c
+++ b/drivers/tty/hvc/hvcs.c
@@ -581,10 +581,9 @@ static int hvcs_io(struct hvcs_struct *hvcsd)
spin_unlock_irqrestore(&hvcsd->lock, flags);
/* This is synch -- FIXME :js: it is not! */
- if(got)
+ if (got)
tty_flip_buffer_push(&hvcsd->port);
-
- if (!got) {
+ else {
/* Do this _after_ the flip_buffer_push */
spin_lock_irqsave(&hvcsd->lock, flags);
vio_enable_interrupts(hvcsd->vdev);
diff --git a/drivers/tty/hvc/hvsi.c b/drivers/tty/hvc/hvsi.c
index aa81f4835fef..a200d01eceed 100644
--- a/drivers/tty/hvc/hvsi.c
+++ b/drivers/tty/hvc/hvsi.c
@@ -26,13 +26,13 @@
#include <linux/module.h>
#include <linux/major.h>
#include <linux/kernel.h>
+#include <linux/of_irq.h>
#include <linux/spinlock.h>
#include <linux/sysrq.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <asm/hvcall.h>
#include <asm/hvconsole.h>
-#include <asm/prom.h>
#include <linux/uaccess.h>
#include <asm/vio.h>
#include <asm/param.h>
diff --git a/drivers/tty/mxser.c b/drivers/tty/mxser.c
index 6ebd3e4ed859..70b982b2c6b2 100644
--- a/drivers/tty/mxser.c
+++ b/drivers/tty/mxser.c
@@ -528,7 +528,6 @@ static int mxser_set_baud(struct tty_struct *tty, speed_t newspd)
outb(quot >> 8, info->ioaddr + UART_DLM); /* MS of divisor */
outb(cval, info->ioaddr + UART_LCR); /* reset DLAB */
-#ifdef BOTHER
if (C_BAUD(tty) == BOTHER) {
quot = MXSER_BAUD_BASE % newspd;
quot *= 8;
@@ -539,9 +538,9 @@ static int mxser_set_baud(struct tty_struct *tty, speed_t newspd)
quot /= newspd;
mxser_set_must_enum_value(info->ioaddr, quot);
- } else
-#endif
+ } else {
mxser_set_must_enum_value(info->ioaddr, 0);
+ }
return 0;
}
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index fd8b86dde525..137eebdcfda9 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -444,6 +444,25 @@ static u8 gsm_encode_modem(const struct gsm_dlci *dlci)
return modembits;
}
+static void gsm_hex_dump_bytes(const char *fname, const u8 *data,
+ unsigned long len)
+{
+ char *prefix;
+
+ if (!fname) {
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 1, data, len,
+ true);
+ return;
+ }
+
+ prefix = kasprintf(GFP_KERNEL, "%s: ", fname);
+ if (!prefix)
+ return;
+ print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET, 16, 1, data, len,
+ true);
+ kfree(prefix);
+}
+
/**
* gsm_print_packet - display a frame for debug
* @hdr: header to print before decode
@@ -508,7 +527,7 @@ static void gsm_print_packet(const char *hdr, int addr, int cr,
else
pr_cont("(F)");
- print_hex_dump_bytes("", DUMP_PREFIX_NONE, data, dlen);
+ gsm_hex_dump_bytes(NULL, data, dlen);
}
@@ -698,9 +717,7 @@ static void gsm_data_kick(struct gsm_mux *gsm, struct gsm_dlci *dlci)
}
if (debug & 4)
- print_hex_dump_bytes("gsm_data_kick: ",
- DUMP_PREFIX_OFFSET,
- gsm->txframe, len);
+ gsm_hex_dump_bytes(__func__, gsm->txframe, len);
if (gsmld_output(gsm, gsm->txframe, len) <= 0)
break;
/* FIXME: Can eliminate one SOF in many more cases */
@@ -749,7 +766,7 @@ static void __gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg)
*--dp = msg->ctrl;
if (gsm->initiator)
- *--dp = (msg->addr << 2) | 2 | EA;
+ *--dp = (msg->addr << 2) | CR | EA;
else
*--dp = (msg->addr << 2) | EA;
*fcs = gsm_fcs_add_block(INIT_FCS, dp , msg->data - dp);
@@ -1907,10 +1924,6 @@ static void gsm_queue(struct gsm_mux *gsm)
case UI|PF:
case UIH:
case UIH|PF:
-#if 0
- if (cr)
- goto invalid;
-#endif
if (dlci == NULL || dlci->state != DLCI_OPEN) {
gsm_command(gsm, address, DM|PF);
return;
@@ -2448,8 +2461,7 @@ static int gsmld_output(struct gsm_mux *gsm, u8 *data, int len)
return -ENOSPC;
}
if (debug & 4)
- print_hex_dump_bytes("gsmld_output: ", DUMP_PREFIX_OFFSET,
- data, len);
+ gsm_hex_dump_bytes(__func__, data, len);
return gsm->tty->ops->write(gsm->tty, data, len);
}
@@ -2525,8 +2537,7 @@ static void gsmld_receive_buf(struct tty_struct *tty, const unsigned char *cp,
char flags = TTY_NORMAL;
if (debug & 4)
- print_hex_dump_bytes("gsmld_receive: ", DUMP_PREFIX_OFFSET,
- cp, count);
+ gsm_hex_dump_bytes(__func__, cp, count);
for (; count; count--, cp++) {
if (fp)
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index efc72104c840..640c9e871044 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -1220,21 +1220,34 @@ n_tty_receive_signal_char(struct tty_struct *tty, int signal, unsigned char c)
process_echoes(tty);
}
+static bool n_tty_is_char_flow_ctrl(struct tty_struct *tty, unsigned char c)
+{
+ return c == START_CHAR(tty) || c == STOP_CHAR(tty);
+}
+
+/* Returns true if c is consumed as flow-control character */
+static bool n_tty_receive_char_flow_ctrl(struct tty_struct *tty, unsigned char c)
+{
+ if (!n_tty_is_char_flow_ctrl(tty, c))
+ return false;
+
+ if (c == START_CHAR(tty)) {
+ start_tty(tty);
+ process_echoes(tty);
+ return true;
+ }
+
+ /* STOP_CHAR */
+ stop_tty(tty);
+ return true;
+}
+
static void n_tty_receive_char_special(struct tty_struct *tty, unsigned char c)
{
struct n_tty_data *ldata = tty->disc_data;
- if (I_IXON(tty)) {
- if (c == START_CHAR(tty)) {
- start_tty(tty);
- process_echoes(tty);
- return;
- }
- if (c == STOP_CHAR(tty)) {
- stop_tty(tty);
- return;
- }
- }
+ if (I_IXON(tty) && n_tty_receive_char_flow_ctrl(tty, c))
+ return;
if (L_ISIG(tty)) {
if (c == INTR_CHAR(tty)) {
@@ -1975,6 +1988,35 @@ static bool canon_copy_from_read_buf(struct tty_struct *tty,
return ldata->read_tail != canon_head;
}
+/*
+ * If we finished a read at the exact location of an
+ * EOF (special EOL character that's a __DISABLED_CHAR)
+ * in the stream, silently eat the EOF.
+ */
+static void canon_skip_eof(struct tty_struct *tty)
+{
+ struct n_tty_data *ldata = tty->disc_data;
+ size_t tail, canon_head;
+
+ canon_head = smp_load_acquire(&ldata->canon_head);
+ tail = ldata->read_tail;
+
+ // No data?
+ if (tail == canon_head)
+ return;
+
+ // See if the tail position is EOF in the circular buffer
+ tail &= (N_TTY_BUF_SIZE - 1);
+ if (!test_bit(tail, ldata->read_flags))
+ return;
+ if (read_buf(ldata, tail) != __DISABLED_CHAR)
+ return;
+
+ // Clear the EOL bit, skip the EOF char.
+ clear_bit(tail, ldata->read_flags);
+ smp_store_release(&ldata->read_tail, ldata->read_tail + 1);
+}
+
/**
* job_control - check job control
* @tty: tty
@@ -2045,7 +2087,14 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
*/
if (*cookie) {
if (ldata->icanon && !L_EXTPROC(tty)) {
- if (canon_copy_from_read_buf(tty, &kb, &nr))
+ /*
+ * If we have filled the user buffer, see
+ * if we should skip an EOF character before
+ * releasing the lock and returning done.
+ */
+ if (!nr)
+ canon_skip_eof(tty);
+ else if (canon_copy_from_read_buf(tty, &kb, &nr))
return kb - kbuf;
} else {
if (copy_from_read_buf(tty, &kb, &nr))
diff --git a/drivers/tty/serial/8250/8250.h b/drivers/tty/serial/8250/8250.h
index a8830e15a22c..696030cfcb09 100644
--- a/drivers/tty/serial/8250/8250.h
+++ b/drivers/tty/serial/8250/8250.h
@@ -17,6 +17,8 @@
struct uart_8250_dma {
int (*tx_dma)(struct uart_8250_port *p);
int (*rx_dma)(struct uart_8250_port *p);
+ void (*prepare_tx_dma)(struct uart_8250_port *p);
+ void (*prepare_rx_dma)(struct uart_8250_port *p);
/* Filter function */
dma_filter_fn fn;
@@ -83,6 +85,7 @@ struct serial8250_config {
#define UART_CAP_MINI BIT(17) /* Mini UART on BCM283X family lacks:
* STOP PARITY EPAR SPAR WLEN5 WLEN6
*/
+#define UART_CAP_NOTEMT BIT(18) /* UART without interrupt on TEMT available */
#define UART_BUG_QUOT BIT(0) /* UART has buggy quot LSB */
#define UART_BUG_TXEN BIT(1) /* UART has buggy TX IIR status */
@@ -120,6 +123,28 @@ static inline void serial_out(struct uart_8250_port *up, int offset, int value)
up->port.serial_out(&up->port, offset, value);
}
+/*
+ * For the 16C950
+ */
+static void serial_icr_write(struct uart_8250_port *up, int offset, int value)
+{
+ serial_out(up, UART_SCR, offset);
+ serial_out(up, UART_ICR, value);
+}
+
+static unsigned int __maybe_unused serial_icr_read(struct uart_8250_port *up,
+ int offset)
+{
+ unsigned int value;
+
+ serial_icr_write(up, UART_ACR, up->acr | UART_ACR_ICRRD);
+ serial_out(up, UART_SCR, offset);
+ value = serial_in(up, UART_ICR);
+ serial_icr_write(up, UART_ACR, up->acr);
+
+ return value;
+}
+
void serial8250_clear_and_reinit_fifos(struct uart_8250_port *p);
static inline int serial_dl_read(struct uart_8250_port *up)
@@ -302,6 +327,22 @@ extern int serial8250_rx_dma(struct uart_8250_port *);
extern void serial8250_rx_dma_flush(struct uart_8250_port *);
extern int serial8250_request_dma(struct uart_8250_port *);
extern void serial8250_release_dma(struct uart_8250_port *);
+
+static inline void serial8250_do_prepare_tx_dma(struct uart_8250_port *p)
+{
+ struct uart_8250_dma *dma = p->dma;
+
+ if (dma->prepare_tx_dma)
+ dma->prepare_tx_dma(p);
+}
+
+static inline void serial8250_do_prepare_rx_dma(struct uart_8250_port *p)
+{
+ struct uart_8250_dma *dma = p->dma;
+
+ if (dma->prepare_rx_dma)
+ dma->prepare_rx_dma(p);
+}
#else
static inline int serial8250_tx_dma(struct uart_8250_port *p)
{
diff --git a/drivers/tty/serial/8250/8250_aspeed_vuart.c b/drivers/tty/serial/8250/8250_aspeed_vuart.c
index 93fe10c680fb..9d2a7856784f 100644
--- a/drivers/tty/serial/8250/8250_aspeed_vuart.c
+++ b/drivers/tty/serial/8250/8250_aspeed_vuart.c
@@ -429,6 +429,8 @@ static int aspeed_vuart_probe(struct platform_device *pdev)
timer_setup(&vuart->unthrottle_timer, aspeed_vuart_unthrottle_exp, 0);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
memset(&port, 0, sizeof(port));
port.port.private_data = vuart;
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index 01d30f6ed8fb..cfbd2de0ca6e 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -32,7 +32,6 @@
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
-#include <linux/pm_runtime.h>
#include <linux/io.h>
#ifdef CONFIG_SPARC
#include <linux/sunserialcore.h>
diff --git a/drivers/tty/serial/8250/8250_dma.c b/drivers/tty/serial/8250/8250_dma.c
index b3c3f7e5851a..7133fceed35e 100644
--- a/drivers/tty/serial/8250/8250_dma.c
+++ b/drivers/tty/serial/8250/8250_dma.c
@@ -34,7 +34,7 @@ static void __dma_tx_complete(void *param)
uart_write_wakeup(&p->port);
ret = serial8250_tx_dma(p);
- if (ret)
+ if (ret || !dma->tx_running)
serial8250_set_THRI(p);
spin_unlock_irqrestore(&p->port.lock, flags);
@@ -80,12 +80,13 @@ int serial8250_tx_dma(struct uart_8250_port *p)
if (uart_tx_stopped(&p->port) || uart_circ_empty(xmit)) {
/* We have been called from __dma_tx_complete() */
- serial8250_rpm_put_tx(p);
return 0;
}
dma->tx_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
+ serial8250_do_prepare_tx_dma(p);
+
desc = dmaengine_prep_slave_single(dma->txchan,
dma->tx_addr + xmit->tail,
dma->tx_size, DMA_MEM_TO_DEV,
@@ -123,6 +124,8 @@ int serial8250_rx_dma(struct uart_8250_port *p)
if (dma->rx_running)
return 0;
+ serial8250_do_prepare_rx_dma(p);
+
desc = dmaengine_prep_slave_single(dma->rxchan, dma->rx_addr,
dma->rx_size, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index 1769808031c5..f57bbd32ef11 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -12,13 +12,13 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/io.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/serial_8250.h>
#include <linux/serial_reg.h>
#include <linux/of.h>
-#include <linux/of_irq.h>
-#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/workqueue.h>
#include <linux/notifier.h>
#include <linux/slab.h>
@@ -33,30 +33,28 @@
/* Offsets for the DesignWare specific registers */
#define DW_UART_USR 0x1f /* UART Status Register */
+#define DW_UART_DMASA 0xa8 /* DMA Software Ack */
+
+#define OCTEON_UART_USR 0x27 /* UART Status Register */
+
+#define RZN1_UART_TDMACR 0x10c /* DMA Control Register Transmit Mode */
+#define RZN1_UART_RDMACR 0x110 /* DMA Control Register Receive Mode */
/* DesignWare specific register fields */
#define DW_UART_MCR_SIRE BIT(6)
-struct dw8250_data {
- struct dw8250_port_data data;
+/* Renesas specific register fields */
+#define RZN1_UART_xDMACR_DMA_EN BIT(0)
+#define RZN1_UART_xDMACR_1_WORD_BURST (0 << 1)
+#define RZN1_UART_xDMACR_4_WORD_BURST (1 << 1)
+#define RZN1_UART_xDMACR_8_WORD_BURST (3 << 1)
+#define RZN1_UART_xDMACR_BLK_SZ(x) ((x) << 3)
- u8 usr_reg;
- int msr_mask_on;
- int msr_mask_off;
- struct clk *clk;
- struct clk *pclk;
- struct notifier_block clk_notifier;
- struct work_struct clk_work;
- struct reset_control *rst;
-
- unsigned int skip_autocfg:1;
- unsigned int uart_16550_compatible:1;
-};
-
-static inline struct dw8250_data *to_dw8250_data(struct dw8250_port_data *data)
-{
- return container_of(data, struct dw8250_data, data);
-}
+/* Quirks */
+#define DW_UART_QUIRK_OCTEON BIT(0)
+#define DW_UART_QUIRK_ARMADA_38X BIT(1)
+#define DW_UART_QUIRK_SKIP_SET_RATE BIT(2)
+#define DW_UART_QUIRK_IS_DMA_FC BIT(3)
static inline struct dw8250_data *clk_to_dw8250_data(struct notifier_block *nb)
{
@@ -238,6 +236,8 @@ static int dw8250_handle_irq(struct uart_port *p)
struct uart_8250_port *up = up_to_u8250p(p);
struct dw8250_data *d = to_dw8250_data(p->private_data);
unsigned int iir = p->serial_in(p, UART_IIR);
+ bool rx_timeout = (iir & 0x3f) == UART_IIR_RX_TIMEOUT;
+ unsigned int quirks = d->pdata->quirks;
unsigned int status;
unsigned long flags;
@@ -251,7 +251,7 @@ static int dw8250_handle_irq(struct uart_port *p)
* This problem has only been observed so far when not in DMA mode
* so we limit the workaround only to non-DMA mode.
*/
- if (!up->dma && ((iir & 0x3f) == UART_IIR_RX_TIMEOUT)) {
+ if (!up->dma && rx_timeout) {
spin_lock_irqsave(&p->lock, flags);
status = p->serial_in(p, UART_LSR);
@@ -261,12 +261,21 @@ static int dw8250_handle_irq(struct uart_port *p)
spin_unlock_irqrestore(&p->lock, flags);
}
+ /* Manually stop the Rx DMA transfer when acting as flow controller */
+ if (quirks & DW_UART_QUIRK_IS_DMA_FC && up->dma && up->dma->rx_running && rx_timeout) {
+ status = p->serial_in(p, UART_LSR);
+ if (status & (UART_LSR_DR | UART_LSR_BI)) {
+ dw8250_writel_ext(p, RZN1_UART_RDMACR, 0);
+ dw8250_writel_ext(p, DW_UART_DMASA, 1);
+ }
+ }
+
if (serial8250_handle_irq(p, iir))
return 1;
if ((iir & UART_IIR_BUSY) == UART_IIR_BUSY) {
/* Clear the USR */
- (void)p->serial_in(p, d->usr_reg);
+ (void)p->serial_in(p, d->pdata->usr_reg);
return 1;
}
@@ -384,11 +393,48 @@ static bool dw8250_idma_filter(struct dma_chan *chan, void *param)
return param == chan->device->dev;
}
+static u32 dw8250_rzn1_get_dmacr_burst(int max_burst)
+{
+ if (max_burst >= 8)
+ return RZN1_UART_xDMACR_8_WORD_BURST;
+ else if (max_burst >= 4)
+ return RZN1_UART_xDMACR_4_WORD_BURST;
+ else
+ return RZN1_UART_xDMACR_1_WORD_BURST;
+}
+
+static void dw8250_prepare_tx_dma(struct uart_8250_port *p)
+{
+ struct uart_port *up = &p->port;
+ struct uart_8250_dma *dma = p->dma;
+ u32 val;
+
+ dw8250_writel_ext(up, RZN1_UART_TDMACR, 0);
+ val = dw8250_rzn1_get_dmacr_burst(dma->txconf.dst_maxburst) |
+ RZN1_UART_xDMACR_BLK_SZ(dma->tx_size) |
+ RZN1_UART_xDMACR_DMA_EN;
+ dw8250_writel_ext(up, RZN1_UART_TDMACR, val);
+}
+
+static void dw8250_prepare_rx_dma(struct uart_8250_port *p)
+{
+ struct uart_port *up = &p->port;
+ struct uart_8250_dma *dma = p->dma;
+ u32 val;
+
+ dw8250_writel_ext(up, RZN1_UART_RDMACR, 0);
+ val = dw8250_rzn1_get_dmacr_burst(dma->rxconf.src_maxburst) |
+ RZN1_UART_xDMACR_BLK_SZ(dma->rx_size) |
+ RZN1_UART_xDMACR_DMA_EN;
+ dw8250_writel_ext(up, RZN1_UART_RDMACR, val);
+}
+
static void dw8250_quirks(struct uart_port *p, struct dw8250_data *data)
{
struct device_node *np = p->dev->of_node;
if (np) {
+ unsigned int quirks = data->pdata->quirks;
int id;
/* get index of serial line, if found in DT aliases */
@@ -396,12 +442,11 @@ static void dw8250_quirks(struct uart_port *p, struct dw8250_data *data)
if (id >= 0)
p->line = id;
#ifdef CONFIG_64BIT
- if (of_device_is_compatible(np, "cavium,octeon-3860-uart")) {
+ if (quirks & DW_UART_QUIRK_OCTEON) {
p->serial_in = dw8250_serial_inq;
p->serial_out = dw8250_serial_outq;
p->flags = UPF_SKIP_TEST | UPF_SHARE_IRQ | UPF_FIXED_TYPE;
p->type = PORT_OCTEON;
- data->usr_reg = 0x27;
data->skip_autocfg = true;
}
#endif
@@ -412,10 +457,16 @@ static void dw8250_quirks(struct uart_port *p, struct dw8250_data *data)
p->serial_out = dw8250_serial_out32be;
}
- if (of_device_is_compatible(np, "marvell,armada-38x-uart"))
+ if (quirks & DW_UART_QUIRK_ARMADA_38X)
p->serial_out = dw8250_serial_out38x;
- if (of_device_is_compatible(np, "starfive,jh7100-uart"))
+ if (quirks & DW_UART_QUIRK_SKIP_SET_RATE)
p->set_termios = dw8250_do_set_termios;
+ if (quirks & DW_UART_QUIRK_IS_DMA_FC) {
+ data->data.dma.txconf.device_fc = 1;
+ data->data.dma.rxconf.device_fc = 1;
+ data->data.dma.prepare_tx_dma = dw8250_prepare_tx_dma;
+ data->data.dma.prepare_rx_dma = dw8250_prepare_rx_dma;
+ }
} else if (acpi_dev_present("APMC0D08", NULL, -1)) {
p->iotype = UPIO_MEM32;
@@ -433,21 +484,30 @@ static void dw8250_quirks(struct uart_port *p, struct dw8250_data *data)
}
}
+static void dw8250_clk_disable_unprepare(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
+static void dw8250_reset_control_assert(void *data)
+{
+ reset_control_assert(data);
+}
+
static int dw8250_probe(struct platform_device *pdev)
{
struct uart_8250_port uart = {}, *up = &uart;
- struct resource *regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
struct uart_port *p = &up->port;
struct device *dev = &pdev->dev;
struct dw8250_data *data;
+ struct resource *regs;
int irq;
int err;
u32 val;
- if (!regs) {
- dev_err(dev, "no registers defined\n");
- return -EINVAL;
- }
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!regs)
+ return dev_err_probe(dev, -EINVAL, "no registers defined\n");
irq = platform_get_irq(pdev, 0);
if (irq < 0)
@@ -476,7 +536,7 @@ static int dw8250_probe(struct platform_device *pdev)
return -ENOMEM;
data->data.dma.fn = dw8250_fallback_dma_filter;
- data->usr_reg = DW_UART_USR;
+ data->pdata = device_get_match_data(p->dev);
p->private_data = &data->data;
data->uart_16550_compatible = device_property_read_bool(dev,
@@ -532,37 +592,41 @@ static int dw8250_probe(struct platform_device *pdev)
err = clk_prepare_enable(data->clk);
if (err)
- dev_warn(dev, "could not enable optional baudclk: %d\n", err);
+ return dev_err_probe(dev, err, "could not enable optional baudclk\n");
+
+ err = devm_add_action_or_reset(dev, dw8250_clk_disable_unprepare, data->clk);
+ if (err)
+ return err;
if (data->clk)
p->uartclk = clk_get_rate(data->clk);
/* If no clock rate is defined, fail. */
- if (!p->uartclk) {
- dev_err(dev, "clock rate not defined\n");
- err = -EINVAL;
- goto err_clk;
- }
+ if (!p->uartclk)
+ return dev_err_probe(dev, -EINVAL, "clock rate not defined\n");
data->pclk = devm_clk_get_optional(dev, "apb_pclk");
- if (IS_ERR(data->pclk)) {
- err = PTR_ERR(data->pclk);
- goto err_clk;
- }
+ if (IS_ERR(data->pclk))
+ return PTR_ERR(data->pclk);
err = clk_prepare_enable(data->pclk);
- if (err) {
- dev_err(dev, "could not enable apb_pclk\n");
- goto err_clk;
- }
+ if (err)
+ return dev_err_probe(dev, err, "could not enable apb_pclk\n");
+
+ err = devm_add_action_or_reset(dev, dw8250_clk_disable_unprepare, data->pclk);
+ if (err)
+ return err;
data->rst = devm_reset_control_get_optional_exclusive(dev, NULL);
- if (IS_ERR(data->rst)) {
- err = PTR_ERR(data->rst);
- goto err_pclk;
- }
+ if (IS_ERR(data->rst))
+ return PTR_ERR(data->rst);
+
reset_control_deassert(data->rst);
+ err = devm_add_action_or_reset(dev, dw8250_reset_control_assert, data->rst);
+ if (err)
+ return err;
+
dw8250_quirks(p, data);
/* If the Busy Functionality is not implemented, don't handle it */
@@ -580,10 +644,8 @@ static int dw8250_probe(struct platform_device *pdev)
}
data->data.line = serial8250_register_8250_port(up);
- if (data->data.line < 0) {
- err = data->data.line;
- goto err_reset;
- }
+ if (data->data.line < 0)
+ return data->data.line;
/*
* Some platforms may provide a reference clock shared between several
@@ -593,9 +655,8 @@ static int dw8250_probe(struct platform_device *pdev)
if (data->clk) {
err = clk_notifier_register(data->clk, &data->clk_notifier);
if (err)
- dev_warn(p->dev, "Failed to set the clock notifier\n");
- else
- queue_work(system_unbound_wq, &data->clk_work);
+ return dev_err_probe(dev, err, "Failed to set the clock notifier\n");
+ queue_work(system_unbound_wq, &data->clk_work);
}
platform_set_drvdata(pdev, data);
@@ -604,17 +665,6 @@ static int dw8250_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
return 0;
-
-err_reset:
- reset_control_assert(data->rst);
-
-err_pclk:
- clk_disable_unprepare(data->pclk);
-
-err_clk:
- clk_disable_unprepare(data->clk);
-
- return err;
}
static int dw8250_remove(struct platform_device *pdev)
@@ -632,12 +682,6 @@ static int dw8250_remove(struct platform_device *pdev)
serial8250_unregister_port(data->data.line);
- reset_control_assert(data->rst);
-
- clk_disable_unprepare(data->pclk);
-
- clk_disable_unprepare(data->clk);
-
pm_runtime_disable(dev);
pm_runtime_put_noidle(dev);
@@ -693,12 +737,37 @@ static const struct dev_pm_ops dw8250_pm_ops = {
SET_RUNTIME_PM_OPS(dw8250_runtime_suspend, dw8250_runtime_resume, NULL)
};
+static const struct dw8250_platform_data dw8250_dw_apb = {
+ .usr_reg = DW_UART_USR,
+};
+
+static const struct dw8250_platform_data dw8250_octeon_3860_data = {
+ .usr_reg = OCTEON_UART_USR,
+ .quirks = DW_UART_QUIRK_OCTEON,
+};
+
+static const struct dw8250_platform_data dw8250_armada_38x_data = {
+ .usr_reg = DW_UART_USR,
+ .quirks = DW_UART_QUIRK_ARMADA_38X,
+};
+
+static const struct dw8250_platform_data dw8250_renesas_rzn1_data = {
+ .usr_reg = DW_UART_USR,
+ .cpr_val = 0x00012f32,
+ .quirks = DW_UART_QUIRK_IS_DMA_FC,
+};
+
+static const struct dw8250_platform_data dw8250_starfive_jh7100_data = {
+ .usr_reg = DW_UART_USR,
+ .quirks = DW_UART_QUIRK_SKIP_SET_RATE,
+};
+
static const struct of_device_id dw8250_of_match[] = {
- { .compatible = "snps,dw-apb-uart" },
- { .compatible = "cavium,octeon-3860-uart" },
- { .compatible = "marvell,armada-38x-uart" },
- { .compatible = "renesas,rzn1-uart" },
- { .compatible = "starfive,jh7100-uart" },
+ { .compatible = "snps,dw-apb-uart", .data = &dw8250_dw_apb },
+ { .compatible = "cavium,octeon-3860-uart", .data = &dw8250_octeon_3860_data },
+ { .compatible = "marvell,armada-38x-uart", .data = &dw8250_armada_38x_data },
+ { .compatible = "renesas,rzn1-uart", .data = &dw8250_renesas_rzn1_data },
+ { .compatible = "starfive,jh7100-uart", .data = &dw8250_starfive_jh7100_data },
{ /* Sentinel */ }
};
MODULE_DEVICE_TABLE(of, dw8250_of_match);
diff --git a/drivers/tty/serial/8250/8250_dwlib.c b/drivers/tty/serial/8250/8250_dwlib.c
index 622d3b0d89e7..fbabfdd8c7b8 100644
--- a/drivers/tty/serial/8250/8250_dwlib.c
+++ b/drivers/tty/serial/8250/8250_dwlib.c
@@ -2,19 +2,32 @@
/* Synopsys DesignWare 8250 library. */
#include <linux/bitops.h>
+#include <linux/bitfield.h>
#include <linux/device.h>
-#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/property.h>
#include <linux/serial_8250.h>
#include <linux/serial_core.h>
#include "8250_dwlib.h"
/* Offsets for the DesignWare specific registers */
+#define DW_UART_TCR 0xac /* Transceiver Control Register (RS485) */
+#define DW_UART_DE_EN 0xb0 /* Driver Output Enable Register */
+#define DW_UART_RE_EN 0xb4 /* Receiver Output Enable Register */
#define DW_UART_DLF 0xc0 /* Divisor Latch Fraction Register */
#define DW_UART_CPR 0xf4 /* Component Parameter Register */
#define DW_UART_UCV 0xf8 /* UART Component Version */
+/* Transceiver Control Register bits */
+#define DW_UART_TCR_RS485_EN BIT(0)
+#define DW_UART_TCR_RE_POL BIT(1)
+#define DW_UART_TCR_DE_POL BIT(2)
+#define DW_UART_TCR_XFER_MODE GENMASK(4, 3)
+#define DW_UART_TCR_XFER_MODE_DE_DURING_RE FIELD_PREP(DW_UART_TCR_XFER_MODE, 0)
+#define DW_UART_TCR_XFER_MODE_SW_DE_OR_RE FIELD_PREP(DW_UART_TCR_XFER_MODE, 1)
+#define DW_UART_TCR_XFER_MODE_DE_OR_RE FIELD_PREP(DW_UART_TCR_XFER_MODE, 2)
+
/* Component Parameter Register bits */
#define DW_UART_CPR_ABP_DATA_WIDTH (3 << 0)
#define DW_UART_CPR_AFCE_MODE (1 << 4)
@@ -32,21 +45,6 @@
/* Helper for FIFO size calculation */
#define DW_UART_CPR_FIFO_SIZE(a) (((a >> 16) & 0xff) * 16)
-static inline u32 dw8250_readl_ext(struct uart_port *p, int offset)
-{
- if (p->iotype == UPIO_MEM32BE)
- return ioread32be(p->membase + offset);
- return readl(p->membase + offset);
-}
-
-static inline void dw8250_writel_ext(struct uart_port *p, int offset, u32 reg)
-{
- if (p->iotype == UPIO_MEM32BE)
- iowrite32be(reg, p->membase + offset);
- else
- writel(reg, p->membase + offset);
-}
-
/*
* divisor = div(I) + div(F)
* "I" means integer, "F" means fractional
@@ -87,11 +85,87 @@ void dw8250_do_set_termios(struct uart_port *p, struct ktermios *termios, struct
}
EXPORT_SYMBOL_GPL(dw8250_do_set_termios);
+static int dw8250_rs485_config(struct uart_port *p, struct serial_rs485 *rs485)
+{
+ u32 tcr;
+
+ tcr = dw8250_readl_ext(p, DW_UART_TCR);
+ tcr &= ~DW_UART_TCR_XFER_MODE;
+
+ if (rs485->flags & SER_RS485_ENABLED) {
+ /* Clear unsupported flags. */
+ rs485->flags &= SER_RS485_ENABLED | SER_RS485_RX_DURING_TX |
+ SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND;
+ tcr |= DW_UART_TCR_RS485_EN;
+
+ if (rs485->flags & SER_RS485_RX_DURING_TX) {
+ tcr |= DW_UART_TCR_XFER_MODE_DE_DURING_RE;
+ } else {
+ /* HW does not support same DE level for tx and rx */
+ if (!(rs485->flags & SER_RS485_RTS_ON_SEND) ==
+ !(rs485->flags & SER_RS485_RTS_AFTER_SEND))
+ return -EINVAL;
+
+ tcr |= DW_UART_TCR_XFER_MODE_DE_OR_RE;
+ }
+ dw8250_writel_ext(p, DW_UART_DE_EN, 1);
+ dw8250_writel_ext(p, DW_UART_RE_EN, 1);
+ } else {
+ rs485->flags = 0;
+
+ tcr &= ~DW_UART_TCR_RS485_EN;
+ }
+
+ /* Reset to default polarity */
+ tcr |= DW_UART_TCR_DE_POL;
+ tcr &= ~DW_UART_TCR_RE_POL;
+
+ if (!(rs485->flags & SER_RS485_RTS_ON_SEND))
+ tcr &= ~DW_UART_TCR_DE_POL;
+ if (device_property_read_bool(p->dev, "rs485-rx-active-high"))
+ tcr |= DW_UART_TCR_RE_POL;
+
+ dw8250_writel_ext(p, DW_UART_TCR, tcr);
+
+ rs485->delay_rts_before_send = 0;
+ rs485->delay_rts_after_send = 0;
+
+ p->rs485 = *rs485;
+
+ return 0;
+}
+
+/*
+ * Tests if RE_EN register can have non-zero value to see if RS-485 HW support
+ * is present.
+ */
+static bool dw8250_detect_rs485_hw(struct uart_port *p)
+{
+ u32 reg;
+
+ dw8250_writel_ext(p, DW_UART_RE_EN, 1);
+ reg = dw8250_readl_ext(p, DW_UART_RE_EN);
+ dw8250_writel_ext(p, DW_UART_RE_EN, 0);
+ return reg;
+}
+
void dw8250_setup_port(struct uart_port *p)
{
+ struct dw8250_port_data *pd = p->private_data;
+ struct dw8250_data *data = to_dw8250_data(pd);
struct uart_8250_port *up = up_to_u8250p(p);
u32 reg;
+ pd->hw_rs485_support = dw8250_detect_rs485_hw(p);
+ if (pd->hw_rs485_support) {
+ p->rs485_config = dw8250_rs485_config;
+ } else {
+ p->rs485_config = serial8250_em485_config;
+ up->rs485_start_tx = serial8250_em485_start_tx;
+ up->rs485_stop_tx = serial8250_em485_stop_tx;
+ }
+ up->capabilities |= UART_CAP_NOTEMT;
+
/*
* If the Component Version Register returns zero, we know that
* ADDITIONAL_FEATURES are not enabled. No need to go any further.
@@ -108,14 +182,16 @@ void dw8250_setup_port(struct uart_port *p)
dw8250_writel_ext(p, DW_UART_DLF, 0);
if (reg) {
- struct dw8250_port_data *d = p->private_data;
-
- d->dlf_size = fls(reg);
+ pd->dlf_size = fls(reg);
p->get_divisor = dw8250_get_divisor;
p->set_divisor = dw8250_set_divisor;
}
reg = dw8250_readl_ext(p, DW_UART_CPR);
+ if (!reg) {
+ reg = data->pdata->cpr_val;
+ dev_dbg(p->dev, "CPR is not available, using 0x%08x instead\n", reg);
+ }
if (!reg)
return;
@@ -124,7 +200,7 @@ void dw8250_setup_port(struct uart_port *p)
p->type = PORT_16550A;
p->flags |= UPF_FIXED_TYPE;
p->fifosize = DW_UART_CPR_FIFO_SIZE(reg);
- up->capabilities = UART_CAP_FIFO;
+ up->capabilities = UART_CAP_FIFO | UART_CAP_NOTEMT;
}
if (reg & DW_UART_CPR_AFCE_MODE)
diff --git a/drivers/tty/serial/8250/8250_dwlib.h b/drivers/tty/serial/8250/8250_dwlib.h
index 83d528e5cc21..055bfdc87985 100644
--- a/drivers/tty/serial/8250/8250_dwlib.h
+++ b/drivers/tty/serial/8250/8250_dwlib.h
@@ -1,10 +1,16 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/* Synopsys DesignWare 8250 library header file. */
+#include <linux/io.h>
+#include <linux/notifier.h>
#include <linux/types.h>
+#include <linux/workqueue.h>
#include "8250.h"
+struct clk;
+struct reset_control;
+
struct dw8250_port_data {
/* Port properties */
int line;
@@ -14,7 +20,52 @@ struct dw8250_port_data {
/* Hardware configuration */
u8 dlf_size;
+
+ /* RS485 variables */
+ bool hw_rs485_support;
+};
+
+struct dw8250_platform_data {
+ u8 usr_reg;
+ u32 cpr_val;
+ unsigned int quirks;
+};
+
+struct dw8250_data {
+ struct dw8250_port_data data;
+ const struct dw8250_platform_data *pdata;
+
+ int msr_mask_on;
+ int msr_mask_off;
+ struct clk *clk;
+ struct clk *pclk;
+ struct notifier_block clk_notifier;
+ struct work_struct clk_work;
+ struct reset_control *rst;
+
+ unsigned int skip_autocfg:1;
+ unsigned int uart_16550_compatible:1;
};
void dw8250_do_set_termios(struct uart_port *p, struct ktermios *termios, struct ktermios *old);
void dw8250_setup_port(struct uart_port *p);
+
+static inline struct dw8250_data *to_dw8250_data(struct dw8250_port_data *data)
+{
+ return container_of(data, struct dw8250_data, data);
+}
+
+static inline u32 dw8250_readl_ext(struct uart_port *p, int offset)
+{
+ if (p->iotype == UPIO_MEM32BE)
+ return ioread32be(p->membase + offset);
+ return readl(p->membase + offset);
+}
+
+static inline void dw8250_writel_ext(struct uart_port *p, int offset, u32 reg)
+{
+ if (p->iotype == UPIO_MEM32BE)
+ iowrite32be(reg, p->membase + offset);
+ else
+ writel(reg, p->membase + offset);
+}
diff --git a/drivers/tty/serial/8250/8250_fintek.c b/drivers/tty/serial/8250/8250_fintek.c
index 251f0018ae8c..dba5950b8d0e 100644
--- a/drivers/tty/serial/8250/8250_fintek.c
+++ b/drivers/tty/serial/8250/8250_fintek.c
@@ -200,12 +200,12 @@ static int fintek_8250_rs485_config(struct uart_port *port,
if (!pdata)
return -EINVAL;
- /* Hardware do not support same RTS level on send and receive */
- if (!(rs485->flags & SER_RS485_RTS_ON_SEND) ==
- !(rs485->flags & SER_RS485_RTS_AFTER_SEND))
- return -EINVAL;
if (rs485->flags & SER_RS485_ENABLED) {
+ /* Hardware do not support same RTS level on send and receive */
+ if (!(rs485->flags & SER_RS485_RTS_ON_SEND) ==
+ !(rs485->flags & SER_RS485_RTS_AFTER_SEND))
+ return -EINVAL;
memset(rs485->padding, 0, sizeof(rs485->padding));
config |= RS485_URA;
} else {
diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c
index 21053db93ff1..54051ec7b499 100644
--- a/drivers/tty/serial/8250/8250_mtk.c
+++ b/drivers/tty/serial/8250/8250_mtk.c
@@ -54,9 +54,6 @@
#define MTK_UART_TX_TRIGGER 1
#define MTK_UART_RX_TRIGGER MTK_UART_RX_SIZE
-#define MTK_UART_FEATURE_SEL 39 /* Feature Selection register */
-#define MTK_UART_FEAT_NEWRMAP BIT(0) /* Use new register map */
-
#define MTK_UART_XON1 40 /* I/O: Xon character 1 */
#define MTK_UART_XOFF1 42 /* I/O: Xoff character 1 */
@@ -575,10 +572,6 @@ static int mtk8250_probe(struct platform_device *pdev)
uart.dma = data->dma;
#endif
- /* Set AP UART new register map */
- writel(MTK_UART_FEAT_NEWRMAP, uart.port.membase +
- (MTK_UART_FEATURE_SEL << uart.port.regshift));
-
/* Disable Rate Fix function */
writel(0x0, uart.port.membase +
(MTK_UART_RATE_FIX << uart.port.regshift));
diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c
index be8626234627..5a699a1aa79c 100644
--- a/drivers/tty/serial/8250/8250_of.c
+++ b/drivers/tty/serial/8250/8250_of.c
@@ -326,6 +326,8 @@ static const struct of_device_id of_platform_serial_table[] = {
.data = (void *)PORT_ALTR_16550_F64, },
{ .compatible = "altr,16550-FIFO128",
.data = (void *)PORT_ALTR_16550_F128, },
+ { .compatible = "fsl,16550-FIFO64",
+ .data = (void *)PORT_16550A_FSL64, },
{ .compatible = "mediatek,mtk-btif",
.data = (void *)PORT_MTK_BTIF, },
{ .compatible = "mrvl,mmp-uart",
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index a293e9f107d0..a17619db7939 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -11,6 +11,7 @@
#include <linux/pci.h>
#include <linux/string.h>
#include <linux/kernel.h>
+#include <linux/math.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/tty.h>
@@ -994,41 +995,29 @@ static void pci_ite887x_exit(struct pci_dev *dev)
}
/*
- * EndRun Technologies.
- * Determine the number of ports available on the device.
+ * Oxford Semiconductor Inc.
+ * Check if an OxSemi device is part of the Tornado range of devices.
*/
#define PCI_VENDOR_ID_ENDRUN 0x7401
#define PCI_DEVICE_ID_ENDRUN_1588 0xe100
-static int pci_endrun_init(struct pci_dev *dev)
+static bool pci_oxsemi_tornado_p(struct pci_dev *dev)
{
- u8 __iomem *p;
- unsigned long deviceID;
- unsigned int number_uarts = 0;
+ /* OxSemi Tornado devices are all 0xCxxx */
+ if (dev->vendor == PCI_VENDOR_ID_OXSEMI &&
+ (dev->device & 0xf000) != 0xc000)
+ return false;
- /* EndRun device is all 0xexxx */
+ /* EndRun devices are all 0xExxx */
if (dev->vendor == PCI_VENDOR_ID_ENDRUN &&
- (dev->device & 0xf000) != 0xe000)
- return 0;
-
- p = pci_iomap(dev, 0, 5);
- if (p == NULL)
- return -ENOMEM;
+ (dev->device & 0xf000) != 0xe000)
+ return false;
- deviceID = ioread32(p);
- /* EndRun device */
- if (deviceID == 0x07000200) {
- number_uarts = ioread8(p + 4);
- pci_dbg(dev, "%d ports detected on EndRun PCI Express device\n", number_uarts);
- }
- pci_iounmap(dev, p);
- return number_uarts;
+ return true;
}
/*
- * Oxford Semiconductor Inc.
- * Check that device is part of the Tornado range of devices, then determine
- * the number of ports available on the device.
+ * Determine the number of ports available on a Tornado device.
*/
static int pci_oxsemi_tornado_init(struct pci_dev *dev)
{
@@ -1036,9 +1025,7 @@ static int pci_oxsemi_tornado_init(struct pci_dev *dev)
unsigned long deviceID;
unsigned int number_uarts = 0;
- /* OxSemi Tornado devices are all 0xCxxx */
- if (dev->vendor == PCI_VENDOR_ID_OXSEMI &&
- (dev->device & 0xF000) != 0xC000)
+ if (!pci_oxsemi_tornado_p(dev))
return 0;
p = pci_iomap(dev, 0, 5);
@@ -1049,12 +1036,217 @@ static int pci_oxsemi_tornado_init(struct pci_dev *dev)
/* Tornado device */
if (deviceID == 0x07000200) {
number_uarts = ioread8(p + 4);
- pci_dbg(dev, "%d ports detected on Oxford PCI Express device\n", number_uarts);
+ pci_dbg(dev, "%d ports detected on %s PCI Express device\n",
+ number_uarts,
+ dev->vendor == PCI_VENDOR_ID_ENDRUN ?
+ "EndRun" : "Oxford");
}
pci_iounmap(dev, p);
return number_uarts;
}
+/* Tornado-specific constants for the TCR and CPR registers; see below. */
+#define OXSEMI_TORNADO_TCR_MASK 0xf
+#define OXSEMI_TORNADO_CPR_MASK 0x1ff
+#define OXSEMI_TORNADO_CPR_MIN 0x008
+#define OXSEMI_TORNADO_CPR_DEF 0x10f
+
+/*
+ * Determine the oversampling rate, the clock prescaler, and the clock
+ * divisor for the requested baud rate. The clock rate is 62.5 MHz,
+ * which is four times the baud base, and the prescaler increments in
+ * steps of 1/8. Therefore to make calculations on integers we need
+ * to use a scaled clock rate, which is the baud base multiplied by 32
+ * (or our assumed UART clock rate multiplied by 2).
+ *
+ * The allowed oversampling rates are from 4 up to 16 inclusive (values
+ * from 0 to 3 inclusive map to 16). Likewise the clock prescaler allows
+ * values between 1.000 and 63.875 inclusive (operation for values from
+ * 0.000 to 0.875 has not been specified). The clock divisor is the usual
+ * unsigned 16-bit integer.
+ *
+ * For the most accurate baud rate we use a table of predetermined
+ * oversampling rates and clock prescalers that records all possible
+ * products of the two parameters in the range from 4 up to 255 inclusive,
+ * and additionally 335 for the 1500000bps rate, with the prescaler scaled
+ * by 8. The table is sorted by the decreasing value of the oversampling
+ * rate and ties are resolved by sorting by the decreasing value of the
+ * product. This way preference is given to higher oversampling rates.
+ *
+ * We iterate over the table and choose the product of an oversampling
+ * rate and a clock prescaler that gives the lowest integer division
+ * result deviation, or if an exact integer divider is found we stop
+ * looking for it right away. We do some fixup if the resulting clock
+ * divisor required would be out of its unsigned 16-bit integer range.
+ *
+ * Finally we abuse the supposed fractional part returned to encode the
+ * 4-bit value of the oversampling rate and the 9-bit value of the clock
+ * prescaler which will end up in the TCR and CPR/CPR2 registers.
+ */
+static unsigned int pci_oxsemi_tornado_get_divisor(struct uart_port *port,
+ unsigned int baud,
+ unsigned int *frac)
+{
+ static u8 p[][2] = {
+ { 16, 14, }, { 16, 13, }, { 16, 12, }, { 16, 11, },
+ { 16, 10, }, { 16, 9, }, { 16, 8, }, { 15, 17, },
+ { 15, 16, }, { 15, 15, }, { 15, 14, }, { 15, 13, },
+ { 15, 12, }, { 15, 11, }, { 15, 10, }, { 15, 9, },
+ { 15, 8, }, { 14, 18, }, { 14, 17, }, { 14, 14, },
+ { 14, 13, }, { 14, 12, }, { 14, 11, }, { 14, 10, },
+ { 14, 9, }, { 14, 8, }, { 13, 19, }, { 13, 18, },
+ { 13, 17, }, { 13, 13, }, { 13, 12, }, { 13, 11, },
+ { 13, 10, }, { 13, 9, }, { 13, 8, }, { 12, 19, },
+ { 12, 18, }, { 12, 17, }, { 12, 11, }, { 12, 9, },
+ { 12, 8, }, { 11, 23, }, { 11, 22, }, { 11, 21, },
+ { 11, 20, }, { 11, 19, }, { 11, 18, }, { 11, 17, },
+ { 11, 11, }, { 11, 10, }, { 11, 9, }, { 11, 8, },
+ { 10, 25, }, { 10, 23, }, { 10, 20, }, { 10, 19, },
+ { 10, 17, }, { 10, 10, }, { 10, 9, }, { 10, 8, },
+ { 9, 27, }, { 9, 23, }, { 9, 21, }, { 9, 19, },
+ { 9, 18, }, { 9, 17, }, { 9, 9, }, { 9, 8, },
+ { 8, 31, }, { 8, 29, }, { 8, 23, }, { 8, 19, },
+ { 8, 17, }, { 8, 8, }, { 7, 35, }, { 7, 31, },
+ { 7, 29, }, { 7, 25, }, { 7, 23, }, { 7, 21, },
+ { 7, 19, }, { 7, 17, }, { 7, 15, }, { 7, 14, },
+ { 7, 13, }, { 7, 12, }, { 7, 11, }, { 7, 10, },
+ { 7, 9, }, { 7, 8, }, { 6, 41, }, { 6, 37, },
+ { 6, 31, }, { 6, 29, }, { 6, 23, }, { 6, 19, },
+ { 6, 17, }, { 6, 13, }, { 6, 11, }, { 6, 10, },
+ { 6, 9, }, { 6, 8, }, { 5, 67, }, { 5, 47, },
+ { 5, 43, }, { 5, 41, }, { 5, 37, }, { 5, 31, },
+ { 5, 29, }, { 5, 25, }, { 5, 23, }, { 5, 19, },
+ { 5, 17, }, { 5, 15, }, { 5, 13, }, { 5, 11, },
+ { 5, 10, }, { 5, 9, }, { 5, 8, }, { 4, 61, },
+ { 4, 59, }, { 4, 53, }, { 4, 47, }, { 4, 43, },
+ { 4, 41, }, { 4, 37, }, { 4, 31, }, { 4, 29, },
+ { 4, 23, }, { 4, 19, }, { 4, 17, }, { 4, 13, },
+ { 4, 9, }, { 4, 8, },
+ };
+ /* Scale the quotient for comparison to get the fractional part. */
+ const unsigned int quot_scale = 65536;
+ unsigned int sclk = port->uartclk * 2;
+ unsigned int sdiv = DIV_ROUND_CLOSEST(sclk, baud);
+ unsigned int best_squot;
+ unsigned int squot;
+ unsigned int quot;
+ u16 cpr;
+ u8 tcr;
+ int i;
+
+ /* Old custom speed handling. */
+ if (baud == 38400 && (port->flags & UPF_SPD_MASK) == UPF_SPD_CUST) {
+ unsigned int cust_div = port->custom_divisor;
+
+ quot = cust_div & UART_DIV_MAX;
+ tcr = (cust_div >> 16) & OXSEMI_TORNADO_TCR_MASK;
+ cpr = (cust_div >> 20) & OXSEMI_TORNADO_CPR_MASK;
+ if (cpr < OXSEMI_TORNADO_CPR_MIN)
+ cpr = OXSEMI_TORNADO_CPR_DEF;
+ } else {
+ best_squot = quot_scale;
+ for (i = 0; i < ARRAY_SIZE(p); i++) {
+ unsigned int spre;
+ unsigned int srem;
+ u8 cp;
+ u8 tc;
+
+ tc = p[i][0];
+ cp = p[i][1];
+ spre = tc * cp;
+
+ srem = sdiv % spre;
+ if (srem > spre / 2)
+ srem = spre - srem;
+ squot = DIV_ROUND_CLOSEST(srem * quot_scale, spre);
+
+ if (srem == 0) {
+ tcr = tc;
+ cpr = cp;
+ quot = sdiv / spre;
+ break;
+ } else if (squot < best_squot) {
+ best_squot = squot;
+ tcr = tc;
+ cpr = cp;
+ quot = DIV_ROUND_CLOSEST(sdiv, spre);
+ }
+ }
+ while (tcr <= (OXSEMI_TORNADO_TCR_MASK + 1) >> 1 &&
+ quot % 2 == 0) {
+ quot >>= 1;
+ tcr <<= 1;
+ }
+ while (quot > UART_DIV_MAX) {
+ if (tcr <= (OXSEMI_TORNADO_TCR_MASK + 1) >> 1) {
+ quot >>= 1;
+ tcr <<= 1;
+ } else if (cpr <= OXSEMI_TORNADO_CPR_MASK >> 1) {
+ quot >>= 1;
+ cpr <<= 1;
+ } else {
+ quot = quot * cpr / OXSEMI_TORNADO_CPR_MASK;
+ cpr = OXSEMI_TORNADO_CPR_MASK;
+ }
+ }
+ }
+
+ *frac = (cpr << 8) | (tcr & OXSEMI_TORNADO_TCR_MASK);
+ return quot;
+}
+
+/*
+ * Set the oversampling rate in the transmitter clock cycle register (TCR),
+ * the clock prescaler in the clock prescaler register (CPR and CPR2), and
+ * the clock divisor in the divisor latch (DLL and DLM). Note that for
+ * backwards compatibility any write to CPR clears CPR2 and therefore CPR
+ * has to be written first, followed by CPR2, which occupies the location
+ * of CKS used with earlier UART designs.
+ */
+static void pci_oxsemi_tornado_set_divisor(struct uart_port *port,
+ unsigned int baud,
+ unsigned int quot,
+ unsigned int quot_frac)
+{
+ struct uart_8250_port *up = up_to_u8250p(port);
+ u8 cpr2 = quot_frac >> 16;
+ u8 cpr = quot_frac >> 8;
+ u8 tcr = quot_frac;
+
+ serial_icr_write(up, UART_TCR, tcr);
+ serial_icr_write(up, UART_CPR, cpr);
+ serial_icr_write(up, UART_CKS, cpr2);
+ serial8250_do_set_divisor(port, baud, quot, 0);
+}
+
+/*
+ * For Tornado devices we force MCR[7] set for the Divide-by-M N/8 baud rate
+ * generator prescaler (CPR and CPR2). Otherwise no prescaler would be used.
+ */
+static void pci_oxsemi_tornado_set_mctrl(struct uart_port *port,
+ unsigned int mctrl)
+{
+ struct uart_8250_port *up = up_to_u8250p(port);
+
+ up->mcr |= UART_MCR_CLKSEL;
+ serial8250_do_set_mctrl(port, mctrl);
+}
+
+static int pci_oxsemi_tornado_setup(struct serial_private *priv,
+ const struct pciserial_board *board,
+ struct uart_8250_port *up, int idx)
+{
+ struct pci_dev *dev = priv->dev;
+
+ if (pci_oxsemi_tornado_p(dev)) {
+ up->port.get_divisor = pci_oxsemi_tornado_get_divisor;
+ up->port.set_divisor = pci_oxsemi_tornado_set_divisor;
+ up->port.set_mctrl = pci_oxsemi_tornado_set_mctrl;
+ }
+
+ return pci_default_setup(priv, board, up, idx);
+}
+
static int pci_asix_setup(struct serial_private *priv,
const struct pciserial_board *board,
struct uart_8250_port *port, int idx)
@@ -2244,7 +2436,7 @@ static struct pci_serial_quirk pci_serial_quirks[] = {
.device = PCI_ANY_ID,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
- .init = pci_endrun_init,
+ .init = pci_oxsemi_tornado_init,
.setup = pci_default_setup,
},
/*
@@ -2256,7 +2448,7 @@ static struct pci_serial_quirk pci_serial_quirks[] = {
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.init = pci_oxsemi_tornado_init,
- .setup = pci_default_setup,
+ .setup = pci_oxsemi_tornado_setup,
},
{
.vendor = PCI_VENDOR_ID_MAINPINE,
@@ -2264,7 +2456,7 @@ static struct pci_serial_quirk pci_serial_quirks[] = {
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.init = pci_oxsemi_tornado_init,
- .setup = pci_default_setup,
+ .setup = pci_oxsemi_tornado_setup,
},
{
.vendor = PCI_VENDOR_ID_DIGI,
@@ -2272,7 +2464,7 @@ static struct pci_serial_quirk pci_serial_quirks[] = {
.subvendor = PCI_SUBVENDOR_ID_IBM,
.subdevice = PCI_ANY_ID,
.init = pci_oxsemi_tornado_init,
- .setup = pci_default_setup,
+ .setup = pci_oxsemi_tornado_setup,
},
{
.vendor = PCI_VENDOR_ID_INTEL,
@@ -2589,7 +2781,7 @@ enum pci_board_num_t {
pbn_b0_2_1843200,
pbn_b0_4_1843200,
- pbn_b0_1_3906250,
+ pbn_b0_1_15625000,
pbn_b0_bt_1_115200,
pbn_b0_bt_2_115200,
@@ -2667,12 +2859,11 @@ enum pci_board_num_t {
pbn_panacom2,
pbn_panacom4,
pbn_plx_romulus,
- pbn_endrun_2_3906250,
pbn_oxsemi,
- pbn_oxsemi_1_3906250,
- pbn_oxsemi_2_3906250,
- pbn_oxsemi_4_3906250,
- pbn_oxsemi_8_3906250,
+ pbn_oxsemi_1_15625000,
+ pbn_oxsemi_2_15625000,
+ pbn_oxsemi_4_15625000,
+ pbn_oxsemi_8_15625000,
pbn_intel_i960,
pbn_sgi_ioc3,
pbn_computone_4,
@@ -2815,10 +3006,10 @@ static struct pciserial_board pci_boards[] = {
.uart_offset = 8,
},
- [pbn_b0_1_3906250] = {
+ [pbn_b0_1_15625000] = {
.flags = FL_BASE0,
.num_ports = 1,
- .base_baud = 3906250,
+ .base_baud = 15625000,
.uart_offset = 8,
},
@@ -3190,20 +3381,6 @@ static struct pciserial_board pci_boards[] = {
},
/*
- * EndRun Technologies
- * Uses the size of PCI Base region 0 to
- * signal now many ports are available
- * 2 port 952 Uart support
- */
- [pbn_endrun_2_3906250] = {
- .flags = FL_BASE0,
- .num_ports = 2,
- .base_baud = 3906250,
- .uart_offset = 0x200,
- .first_offset = 0x1000,
- },
-
- /*
* This board uses the size of PCI Base region 0 to
* signal now many ports are available
*/
@@ -3213,31 +3390,31 @@ static struct pciserial_board pci_boards[] = {
.base_baud = 115200,
.uart_offset = 8,
},
- [pbn_oxsemi_1_3906250] = {
+ [pbn_oxsemi_1_15625000] = {
.flags = FL_BASE0,
.num_ports = 1,
- .base_baud = 3906250,
+ .base_baud = 15625000,
.uart_offset = 0x200,
.first_offset = 0x1000,
},
- [pbn_oxsemi_2_3906250] = {
+ [pbn_oxsemi_2_15625000] = {
.flags = FL_BASE0,
.num_ports = 2,
- .base_baud = 3906250,
+ .base_baud = 15625000,
.uart_offset = 0x200,
.first_offset = 0x1000,
},
- [pbn_oxsemi_4_3906250] = {
+ [pbn_oxsemi_4_15625000] = {
.flags = FL_BASE0,
.num_ports = 4,
- .base_baud = 3906250,
+ .base_baud = 15625000,
.uart_offset = 0x200,
.first_offset = 0x1000,
},
- [pbn_oxsemi_8_3906250] = {
+ [pbn_oxsemi_8_15625000] = {
.flags = FL_BASE0,
.num_ports = 8,
- .base_baud = 3906250,
+ .base_baud = 15625000,
.uart_offset = 0x200,
.first_offset = 0x1000,
},
@@ -3518,6 +3695,12 @@ static struct pciserial_board pci_boards[] = {
},
};
+#define REPORT_CONFIG(option) \
+ (IS_ENABLED(CONFIG_##option) ? 0 : (kernel_ulong_t)&#option)
+#define REPORT_8250_CONFIG(option) \
+ (IS_ENABLED(CONFIG_SERIAL_8250_##option) ? \
+ 0 : (kernel_ulong_t)&"SERIAL_8250_"#option)
+
static const struct pci_device_id blacklist[] = {
/* softmodems */
{ PCI_VDEVICE(AL, 0x5457), }, /* ALi Corporation M5457 AC'97 Modem */
@@ -3525,40 +3708,43 @@ static const struct pci_device_id blacklist[] = {
{ PCI_DEVICE(0x1543, 0x3052), }, /* Si3052-based modem, default IDs */
/* multi-io cards handled by parport_serial */
- { PCI_DEVICE(0x4348, 0x7053), }, /* WCH CH353 2S1P */
- { PCI_DEVICE(0x4348, 0x5053), }, /* WCH CH353 1S1P */
- { PCI_DEVICE(0x1c00, 0x3250), }, /* WCH CH382 2S1P */
+ /* WCH CH353 2S1P */
+ { PCI_DEVICE(0x4348, 0x7053), 0, 0, REPORT_CONFIG(PARPORT_SERIAL), },
+ /* WCH CH353 1S1P */
+ { PCI_DEVICE(0x4348, 0x5053), 0, 0, REPORT_CONFIG(PARPORT_SERIAL), },
+ /* WCH CH382 2S1P */
+ { PCI_DEVICE(0x1c00, 0x3250), 0, 0, REPORT_CONFIG(PARPORT_SERIAL), },
/* Intel platforms with MID UART */
- { PCI_VDEVICE(INTEL, 0x081b), },
- { PCI_VDEVICE(INTEL, 0x081c), },
- { PCI_VDEVICE(INTEL, 0x081d), },
- { PCI_VDEVICE(INTEL, 0x1191), },
- { PCI_VDEVICE(INTEL, 0x18d8), },
- { PCI_VDEVICE(INTEL, 0x19d8), },
+ { PCI_VDEVICE(INTEL, 0x081b), REPORT_8250_CONFIG(MID), },
+ { PCI_VDEVICE(INTEL, 0x081c), REPORT_8250_CONFIG(MID), },
+ { PCI_VDEVICE(INTEL, 0x081d), REPORT_8250_CONFIG(MID), },
+ { PCI_VDEVICE(INTEL, 0x1191), REPORT_8250_CONFIG(MID), },
+ { PCI_VDEVICE(INTEL, 0x18d8), REPORT_8250_CONFIG(MID), },
+ { PCI_VDEVICE(INTEL, 0x19d8), REPORT_8250_CONFIG(MID), },
/* Intel platforms with DesignWare UART */
- { PCI_VDEVICE(INTEL, 0x0936), },
- { PCI_VDEVICE(INTEL, 0x0f0a), },
- { PCI_VDEVICE(INTEL, 0x0f0c), },
- { PCI_VDEVICE(INTEL, 0x228a), },
- { PCI_VDEVICE(INTEL, 0x228c), },
- { PCI_VDEVICE(INTEL, 0x4b96), },
- { PCI_VDEVICE(INTEL, 0x4b97), },
- { PCI_VDEVICE(INTEL, 0x4b98), },
- { PCI_VDEVICE(INTEL, 0x4b99), },
- { PCI_VDEVICE(INTEL, 0x4b9a), },
- { PCI_VDEVICE(INTEL, 0x4b9b), },
- { PCI_VDEVICE(INTEL, 0x9ce3), },
- { PCI_VDEVICE(INTEL, 0x9ce4), },
+ { PCI_VDEVICE(INTEL, 0x0936), REPORT_8250_CONFIG(LPSS), },
+ { PCI_VDEVICE(INTEL, 0x0f0a), REPORT_8250_CONFIG(LPSS), },
+ { PCI_VDEVICE(INTEL, 0x0f0c), REPORT_8250_CONFIG(LPSS), },
+ { PCI_VDEVICE(INTEL, 0x228a), REPORT_8250_CONFIG(LPSS), },
+ { PCI_VDEVICE(INTEL, 0x228c), REPORT_8250_CONFIG(LPSS), },
+ { PCI_VDEVICE(INTEL, 0x4b96), REPORT_8250_CONFIG(LPSS), },
+ { PCI_VDEVICE(INTEL, 0x4b97), REPORT_8250_CONFIG(LPSS), },
+ { PCI_VDEVICE(INTEL, 0x4b98), REPORT_8250_CONFIG(LPSS), },
+ { PCI_VDEVICE(INTEL, 0x4b99), REPORT_8250_CONFIG(LPSS), },
+ { PCI_VDEVICE(INTEL, 0x4b9a), REPORT_8250_CONFIG(LPSS), },
+ { PCI_VDEVICE(INTEL, 0x4b9b), REPORT_8250_CONFIG(LPSS), },
+ { PCI_VDEVICE(INTEL, 0x9ce3), REPORT_8250_CONFIG(LPSS), },
+ { PCI_VDEVICE(INTEL, 0x9ce4), REPORT_8250_CONFIG(LPSS), },
/* Exar devices */
- { PCI_VDEVICE(EXAR, PCI_ANY_ID), },
- { PCI_VDEVICE(COMMTECH, PCI_ANY_ID), },
+ { PCI_VDEVICE(EXAR, PCI_ANY_ID), REPORT_8250_CONFIG(EXAR), },
+ { PCI_VDEVICE(COMMTECH, PCI_ANY_ID), REPORT_8250_CONFIG(EXAR), },
/* Pericom devices */
- { PCI_VDEVICE(PERICOM, PCI_ANY_ID), },
- { PCI_VDEVICE(ACCESSIO, PCI_ANY_ID), },
+ { PCI_VDEVICE(PERICOM, PCI_ANY_ID), REPORT_8250_CONFIG(PERICOM), },
+ { PCI_VDEVICE(ACCESSIO, PCI_ANY_ID), REPORT_8250_CONFIG(PERICOM), },
/* End of the black list */
{ }
@@ -3840,8 +4026,12 @@ pciserial_init_one(struct pci_dev *dev, const struct pci_device_id *ent)
board = &pci_boards[ent->driver_data];
exclude = pci_match_id(blacklist, dev);
- if (exclude)
+ if (exclude) {
+ if (exclude->driver_data)
+ pci_warn(dev, "ignoring port, enable %s to handle\n",
+ (const char *)exclude->driver_data);
return -ENODEV;
+ }
rc = pcim_enable_device(dev);
pci_save_state(dev);
@@ -4110,13 +4300,6 @@ static const struct pci_device_id serial_pci_tbl[] = {
0x10b5, 0x106a, 0, 0,
pbn_plx_romulus },
/*
- * EndRun Technologies. PCI express device range.
- * EndRun PTP/1588 has 2 Native UARTs.
- */
- { PCI_VENDOR_ID_ENDRUN, PCI_DEVICE_ID_ENDRUN_1588,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_endrun_2_3906250 },
- /*
* Quatech cards. These actually have configurable clocks but for
* now we just use the default.
*
@@ -4225,158 +4408,165 @@ static const struct pci_device_id serial_pci_tbl[] = {
*/
{ PCI_VENDOR_ID_OXSEMI, 0xc101, /* OXPCIe952 1 Legacy UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_b0_1_3906250 },
+ pbn_b0_1_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc105, /* OXPCIe952 1 Legacy UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_b0_1_3906250 },
+ pbn_b0_1_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc11b, /* OXPCIe952 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_3906250 },
+ pbn_oxsemi_1_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc11f, /* OXPCIe952 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_3906250 },
+ pbn_oxsemi_1_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc120, /* OXPCIe952 1 Legacy UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_b0_1_3906250 },
+ pbn_b0_1_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc124, /* OXPCIe952 1 Legacy UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_b0_1_3906250 },
+ pbn_b0_1_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc138, /* OXPCIe952 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_3906250 },
+ pbn_oxsemi_1_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc13d, /* OXPCIe952 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_3906250 },
+ pbn_oxsemi_1_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc140, /* OXPCIe952 1 Legacy UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_b0_1_3906250 },
+ pbn_b0_1_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc141, /* OXPCIe952 1 Legacy UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_b0_1_3906250 },
+ pbn_b0_1_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc144, /* OXPCIe952 1 Legacy UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_b0_1_3906250 },
+ pbn_b0_1_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc145, /* OXPCIe952 1 Legacy UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_b0_1_3906250 },
+ pbn_b0_1_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc158, /* OXPCIe952 2 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_2_3906250 },
+ pbn_oxsemi_2_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc15d, /* OXPCIe952 2 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_2_3906250 },
+ pbn_oxsemi_2_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc208, /* OXPCIe954 4 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_4_3906250 },
+ pbn_oxsemi_4_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc20d, /* OXPCIe954 4 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_4_3906250 },
+ pbn_oxsemi_4_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc308, /* OXPCIe958 8 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_8_3906250 },
+ pbn_oxsemi_8_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc30d, /* OXPCIe958 8 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_8_3906250 },
+ pbn_oxsemi_8_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc40b, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_3906250 },
+ pbn_oxsemi_1_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc40f, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_3906250 },
+ pbn_oxsemi_1_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc41b, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_3906250 },
+ pbn_oxsemi_1_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc41f, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_3906250 },
+ pbn_oxsemi_1_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc42b, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_3906250 },
+ pbn_oxsemi_1_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc42f, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_3906250 },
+ pbn_oxsemi_1_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc43b, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_3906250 },
+ pbn_oxsemi_1_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc43f, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_3906250 },
+ pbn_oxsemi_1_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc44b, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_3906250 },
+ pbn_oxsemi_1_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc44f, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_3906250 },
+ pbn_oxsemi_1_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc45b, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_3906250 },
+ pbn_oxsemi_1_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc45f, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_3906250 },
+ pbn_oxsemi_1_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc46b, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_3906250 },
+ pbn_oxsemi_1_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc46f, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_3906250 },
+ pbn_oxsemi_1_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc47b, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_3906250 },
+ pbn_oxsemi_1_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc47f, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_3906250 },
+ pbn_oxsemi_1_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc48b, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_3906250 },
+ pbn_oxsemi_1_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc48f, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_3906250 },
+ pbn_oxsemi_1_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc49b, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_3906250 },
+ pbn_oxsemi_1_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc49f, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_3906250 },
+ pbn_oxsemi_1_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc4ab, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_3906250 },
+ pbn_oxsemi_1_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc4af, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_3906250 },
+ pbn_oxsemi_1_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc4bb, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_3906250 },
+ pbn_oxsemi_1_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc4bf, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_3906250 },
+ pbn_oxsemi_1_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc4cb, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_3906250 },
+ pbn_oxsemi_1_15625000 },
{ PCI_VENDOR_ID_OXSEMI, 0xc4cf, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_3906250 },
+ pbn_oxsemi_1_15625000 },
/*
* Mainpine Inc. IQ Express "Rev3" utilizing OxSemi Tornado
*/
{ PCI_VENDOR_ID_MAINPINE, 0x4000, /* IQ Express 1 Port V.34 Super-G3 Fax */
PCI_VENDOR_ID_MAINPINE, 0x4001, 0, 0,
- pbn_oxsemi_1_3906250 },
+ pbn_oxsemi_1_15625000 },
{ PCI_VENDOR_ID_MAINPINE, 0x4000, /* IQ Express 2 Port V.34 Super-G3 Fax */
PCI_VENDOR_ID_MAINPINE, 0x4002, 0, 0,
- pbn_oxsemi_2_3906250 },
+ pbn_oxsemi_2_15625000 },
{ PCI_VENDOR_ID_MAINPINE, 0x4000, /* IQ Express 4 Port V.34 Super-G3 Fax */
PCI_VENDOR_ID_MAINPINE, 0x4004, 0, 0,
- pbn_oxsemi_4_3906250 },
+ pbn_oxsemi_4_15625000 },
{ PCI_VENDOR_ID_MAINPINE, 0x4000, /* IQ Express 8 Port V.34 Super-G3 Fax */
PCI_VENDOR_ID_MAINPINE, 0x4008, 0, 0,
- pbn_oxsemi_8_3906250 },
+ pbn_oxsemi_8_15625000 },
/*
* Digi/IBM PCIe 2-port Async EIA-232 Adapter utilizing OxSemi Tornado
*/
{ PCI_VENDOR_ID_DIGI, PCIE_DEVICE_ID_NEO_2_OX_IBM,
PCI_SUBVENDOR_ID_IBM, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_2_3906250 },
+ pbn_oxsemi_2_15625000 },
+ /*
+ * EndRun Technologies. PCI express device range.
+ * EndRun PTP/1588 has 2 Native UARTs utilizing OxSemi 952.
+ */
+ { PCI_VENDOR_ID_ENDRUN, PCI_DEVICE_ID_ENDRUN_1588,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_2_15625000 },
/*
* SBS Technologies, Inc. P-Octal and PMC-OCTPRO cards,
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 1fbd5bf264be..78b6dedc43e6 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -263,7 +263,7 @@ static const struct serial8250_config uart_config[] = {
.tx_loadsz = 63,
.fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10 |
UART_FCR7_64BYTE,
- .flags = UART_CAP_FIFO,
+ .flags = UART_CAP_FIFO | UART_CAP_NOTEMT,
},
[PORT_RT2880] = {
.name = "Palmchip BK-3103",
@@ -538,27 +538,6 @@ serial_port_out_sync(struct uart_port *p, int offset, int value)
}
/*
- * For the 16C950
- */
-static void serial_icr_write(struct uart_8250_port *up, int offset, int value)
-{
- serial_out(up, UART_SCR, offset);
- serial_out(up, UART_ICR, value);
-}
-
-static unsigned int serial_icr_read(struct uart_8250_port *up, int offset)
-{
- unsigned int value;
-
- serial_icr_write(up, UART_ACR, up->acr | UART_ACR_ICRRD);
- serial_out(up, UART_SCR, offset);
- value = serial_in(up, UART_ICR);
- serial_icr_write(up, UART_ACR, up->acr);
-
- return value;
-}
-
-/*
* FIFO support.
*/
static void serial8250_clear_fifos(struct uart_8250_port *p)
@@ -1504,18 +1483,19 @@ static void start_hrtimer_ms(struct hrtimer *hrt, unsigned long msec)
hrtimer_start(hrt, ms_to_ktime(msec), HRTIMER_MODE_REL);
}
-static void __stop_tx_rs485(struct uart_8250_port *p)
+static void __stop_tx_rs485(struct uart_8250_port *p, u64 stop_delay)
{
struct uart_8250_em485 *em485 = p->em485;
+ stop_delay += (u64)p->port.rs485.delay_rts_after_send * NSEC_PER_MSEC;
+
/*
* rs485_stop_tx() is going to set RTS according to config
* AND flush RX FIFO if required.
*/
- if (p->port.rs485.delay_rts_after_send > 0) {
+ if (stop_delay > 0) {
em485->active_timer = &em485->stop_tx_timer;
- start_hrtimer_ms(&em485->stop_tx_timer,
- p->port.rs485.delay_rts_after_send);
+ hrtimer_start(&em485->stop_tx_timer, ns_to_ktime(stop_delay), HRTIMER_MODE_REL);
} else {
p->rs485_stop_tx(p);
em485->active_timer = NULL;
@@ -1535,16 +1515,32 @@ static inline void __stop_tx(struct uart_8250_port *p)
if (em485) {
unsigned char lsr = serial_in(p, UART_LSR);
+ u64 stop_delay = 0;
+
+ if (!(lsr & UART_LSR_THRE))
+ return;
/*
* To provide required timeing and allow FIFO transfer,
* __stop_tx_rs485() must be called only when both FIFO and
- * shift register are empty. It is for device driver to enable
- * interrupt on TEMT.
+ * shift register are empty. The device driver should either
+ * enable interrupt on TEMT or set UART_CAP_NOTEMT that will
+ * enlarge stop_tx_timer by the tx time of one frame to cover
+ * for emptying of the shift register.
*/
- if ((lsr & BOTH_EMPTY) != BOTH_EMPTY)
- return;
+ if (!(lsr & UART_LSR_TEMT)) {
+ if (!(p->capabilities & UART_CAP_NOTEMT))
+ return;
+ /*
+ * RTS might get deasserted too early with the normal
+ * frame timing formula. It seems to suggest THRE might
+ * get asserted already during tx of the stop bit
+ * rather than after it is fully sent.
+ * Roughly estimate 1 extra bit here with / 7.
+ */
+ stop_delay = p->port.frame_time + DIV_ROUND_UP(p->port.frame_time, 7);
+ }
- __stop_tx_rs485(p);
+ __stop_tx_rs485(p, stop_delay);
}
__do_stop_tx(p);
}
@@ -1948,9 +1944,12 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
status = serial8250_rx_chars(up, status);
}
serial8250_modem_status(up);
- if ((!up->dma || up->dma->tx_err) && (status & UART_LSR_THRE) &&
- (up->ier & UART_IER_THRI))
- serial8250_tx_chars(up);
+ if ((status & UART_LSR_THRE) && (up->ier & UART_IER_THRI)) {
+ if (!up->dma || up->dma->tx_err)
+ serial8250_tx_chars(up);
+ else
+ __stop_tx(up);
+ }
uart_unlock_and_check_sysrq_irqrestore(port, flags);
@@ -2077,10 +2076,7 @@ static void serial8250_break_ctl(struct uart_port *port, int break_state)
serial8250_rpm_put(up);
}
-/*
- * Wait for transmitter & holding register to empty
- */
-static void wait_for_xmitr(struct uart_8250_port *up, int bits)
+static void wait_for_lsr(struct uart_8250_port *up, int bits)
{
unsigned int status, tmout = 10000;
@@ -2097,6 +2093,16 @@ static void wait_for_xmitr(struct uart_8250_port *up, int bits)
udelay(1);
touch_nmi_watchdog();
}
+}
+
+/*
+ * Wait for transmitter & holding register to empty
+ */
+static void wait_for_xmitr(struct uart_8250_port *up, int bits)
+{
+ unsigned int tmout;
+
+ wait_for_lsr(up, bits);
/* Wait up to 1s for flow control if necessary */
if (up->port.flags & UPF_CONS_FLOW) {
@@ -2614,10 +2620,8 @@ static unsigned char serial8250_compute_lcr(struct uart_8250_port *up,
}
if (!(c_cflag & PARODD))
cval |= UART_LCR_EPAR;
-#ifdef CMSPAR
if (c_cflag & CMSPAR)
cval |= UART_LCR_SPAR;
-#endif
return cval;
}
@@ -3333,6 +3337,35 @@ static void serial8250_console_restore(struct uart_8250_port *up)
}
/*
+ * Print a string to the serial port using the device FIFO
+ *
+ * It sends fifosize bytes and then waits for the fifo
+ * to get empty.
+ */
+static void serial8250_console_fifo_write(struct uart_8250_port *up,
+ const char *s, unsigned int count)
+{
+ int i;
+ const char *end = s + count;
+ unsigned int fifosize = up->tx_loadsz;
+ bool cr_sent = false;
+
+ while (s != end) {
+ wait_for_lsr(up, UART_LSR_THRE);
+
+ for (i = 0; i < fifosize && s != end; ++i) {
+ if (*s == '\n' && !cr_sent) {
+ serial_out(up, UART_TX, '\r');
+ cr_sent = true;
+ } else {
+ serial_out(up, UART_TX, *s++);
+ cr_sent = false;
+ }
+ }
+ }
+}
+
+/*
* Print a string to the serial port trying not to disturb
* any possible real use of the port...
*
@@ -3347,7 +3380,7 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s,
struct uart_8250_em485 *em485 = up->em485;
struct uart_port *port = &up->port;
unsigned long flags;
- unsigned int ier;
+ unsigned int ier, use_fifo;
int locked = 1;
touch_nmi_watchdog();
@@ -3379,7 +3412,30 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s,
mdelay(port->rs485.delay_rts_before_send);
}
- uart_console_write(port, s, count, serial8250_console_putchar);
+ use_fifo = (up->capabilities & UART_CAP_FIFO) &&
+ /*
+ * BCM283x requires to check the fifo
+ * after each byte.
+ */
+ !(up->capabilities & UART_CAP_MINI) &&
+ /*
+ * tx_loadsz contains the transmit fifo size
+ */
+ up->tx_loadsz > 1 &&
+ (up->fcr & UART_FCR_ENABLE_FIFO) &&
+ port->state &&
+ test_bit(TTY_PORT_INITIALIZED, &port->state->port.iflags) &&
+ /*
+ * After we put a data in the fifo, the controller will send
+ * it regardless of the CTS state. Therefore, only use fifo
+ * if we don't use control flow.
+ */
+ !(up->port.flags & UPF_CONS_FLOW);
+
+ if (likely(use_fifo))
+ serial8250_console_fifo_write(up, s, count);
+ else
+ uart_console_write(port, s, count, serial8250_console_putchar);
/*
* Finally, wait for transmitter to become empty
diff --git a/drivers/tty/serial/8250/8250_pxa.c b/drivers/tty/serial/8250/8250_pxa.c
index 33ca98bfa5b3..795e55142d4c 100644
--- a/drivers/tty/serial/8250/8250_pxa.c
+++ b/drivers/tty/serial/8250/8250_pxa.c
@@ -22,7 +22,6 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/clk.h>
-#include <linux/pm_runtime.h>
#include "8250.h"
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
index cd93ea6eed65..fdb6c4188695 100644
--- a/drivers/tty/serial/8250/Kconfig
+++ b/drivers/tty/serial/8250/Kconfig
@@ -380,7 +380,7 @@ config SERIAL_8250_DW
config SERIAL_8250_EM
tristate "Support for Emma Mobile integrated serial port"
depends on SERIAL_8250 && HAVE_CLK
- depends on (ARM && ARCH_RENESAS) || COMPILE_TEST
+ depends on ARCH_RENESAS || COMPILE_TEST
help
Selecting this option will add support for the integrated serial
port hardware found on the Emma Mobile line of processors.
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index dbac90e2e209..a452748c69b2 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -782,7 +782,7 @@ config SERIAL_PMACZILOG_CONSOLE
config SERIAL_CPM
tristate "CPM SCC/SMC serial port support"
- depends on CPM2 || CPM1
+ depends on CPM2 || CPM1 || (PPC32 && COMPILE_TEST)
select SERIAL_CORE
help
This driver supports the SCC and SMC serial ports on Motorola
@@ -806,7 +806,7 @@ config SERIAL_CPM_CONSOLE
config SERIAL_PIC32
tristate "Microchip PIC32 serial support"
- depends on MACH_PIC32
+ depends on MACH_PIC32 || (MIPS && COMPILE_TEST)
select SERIAL_CORE
help
If you have a PIC32, this driver supports the serial ports.
@@ -817,7 +817,7 @@ config SERIAL_PIC32
config SERIAL_PIC32_CONSOLE
bool "PIC32 serial console support"
- depends on SERIAL_PIC32
+ depends on SERIAL_PIC32=y
select SERIAL_CORE_CONSOLE
help
If you have a PIC32, this driver supports the putting a console on one
@@ -1246,7 +1246,7 @@ config SERIAL_XILINX_PS_UART_CONSOLE
config SERIAL_AR933X
tristate "AR933X serial port support"
- depends on HAVE_CLK && ATH79
+ depends on (HAVE_CLK && ATH79) || (MIPS && COMPILE_TEST)
select SERIAL_CORE
select SERIAL_MCTRL_GPIO if GPIOLIB
help
@@ -1442,6 +1442,7 @@ config SERIAL_STM32_CONSOLE
bool "Support for console on STM32"
depends on SERIAL_STM32=y
select SERIAL_CORE_CONSOLE
+ select SERIAL_EARLYCON
config SERIAL_MVEBU_UART
bool "Marvell EBU serial port support"
diff --git a/drivers/tty/serial/altera_jtaguart.c b/drivers/tty/serial/altera_jtaguart.c
index 1c16345d0a1f..cb791c5149a3 100644
--- a/drivers/tty/serial/altera_jtaguart.c
+++ b/drivers/tty/serial/altera_jtaguart.c
@@ -168,10 +168,8 @@ static void altera_jtaguart_tx_chars(struct altera_jtaguart *pp)
}
}
- if (pending == 0) {
- pp->imr &= ~ALTERA_JTAGUART_CONTROL_WE_MSK;
- writel(pp->imr, port->membase + ALTERA_JTAGUART_CONTROL_REG);
- }
+ if (pending == 0)
+ altera_jtaguart_stop_tx(port);
}
static irqreturn_t altera_jtaguart_interrupt(int irq, void *data)
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 4d11a3e547f9..97ef41cb2721 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -42,8 +42,6 @@
#include <linux/io.h>
#include <linux/acpi.h>
-#include "amba-pl011.h"
-
#define UART_NR 14
#define SERIAL_AMBA_MAJOR 204
@@ -55,6 +53,36 @@
#define UART_DR_ERROR (UART011_DR_OE|UART011_DR_BE|UART011_DR_PE|UART011_DR_FE)
#define UART_DUMMY_DR_RX (1 << 16)
+enum {
+ REG_DR,
+ REG_ST_DMAWM,
+ REG_ST_TIMEOUT,
+ REG_FR,
+ REG_LCRH_RX,
+ REG_LCRH_TX,
+ REG_IBRD,
+ REG_FBRD,
+ REG_CR,
+ REG_IFLS,
+ REG_IMSC,
+ REG_RIS,
+ REG_MIS,
+ REG_ICR,
+ REG_DMACR,
+ REG_ST_XFCR,
+ REG_ST_XON1,
+ REG_ST_XON2,
+ REG_ST_XOFF1,
+ REG_ST_XOFF2,
+ REG_ST_ITCR,
+ REG_ST_ITIP,
+ REG_ST_ABCR,
+ REG_ST_ABIMSC,
+
+ /* The size of the array - must be last */
+ REG_ARRAY_SIZE,
+};
+
static u16 pl011_std_offsets[REG_ARRAY_SIZE] = {
[REG_DR] = UART01x_DR,
[REG_FR] = UART01x_FR,
@@ -2175,25 +2203,11 @@ static int pl011_rs485_config(struct uart_port *port,
struct uart_amba_port *uap =
container_of(port, struct uart_amba_port, port);
- /* pick sane settings if the user hasn't */
- if (!(rs485->flags & SER_RS485_RTS_ON_SEND) ==
- !(rs485->flags & SER_RS485_RTS_AFTER_SEND)) {
- rs485->flags |= SER_RS485_RTS_ON_SEND;
- rs485->flags &= ~SER_RS485_RTS_AFTER_SEND;
- }
- /* clamp the delays to [0, 100ms] */
- rs485->delay_rts_before_send = min(rs485->delay_rts_before_send, 100U);
- rs485->delay_rts_after_send = min(rs485->delay_rts_after_send, 100U);
- memset(rs485->padding, 0, sizeof(rs485->padding));
-
if (port->rs485.flags & SER_RS485_ENABLED)
pl011_rs485_tx_stop(uap);
- /* Set new configuration */
- port->rs485 = *rs485;
-
/* Make sure auto RTS is disabled */
- if (port->rs485.flags & SER_RS485_ENABLED) {
+ if (rs485->flags & SER_RS485_ENABLED) {
u32 cr = pl011_read(uap, REG_CR);
cr &= ~UART011_CR_RTSEN;
diff --git a/drivers/tty/serial/amba-pl011.h b/drivers/tty/serial/amba-pl011.h
deleted file mode 100644
index 077eb12a3472..000000000000
--- a/drivers/tty/serial/amba-pl011.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef AMBA_PL011_H
-#define AMBA_PL011_H
-
-enum {
- REG_DR,
- REG_ST_DMAWM,
- REG_ST_TIMEOUT,
- REG_FR,
- REG_LCRH_RX,
- REG_LCRH_TX,
- REG_IBRD,
- REG_FBRD,
- REG_CR,
- REG_IFLS,
- REG_IMSC,
- REG_RIS,
- REG_MIS,
- REG_ICR,
- REG_DMACR,
- REG_ST_XFCR,
- REG_ST_XON1,
- REG_ST_XON2,
- REG_ST_XOFF1,
- REG_ST_XOFF2,
- REG_ST_ITCR,
- REG_ST_ITIP,
- REG_ST_ABCR,
- REG_ST_ABIMSC,
-
- /* The size of the array - must be last */
- REG_ARRAY_SIZE,
-};
-
-#endif
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 3a45e4fc7993..dd1c7e4bd1c9 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -299,11 +299,9 @@ static int atmel_config_rs485(struct uart_port *port,
/* Resetting serial mode to RS232 (0x0) */
mode &= ~ATMEL_US_USMODE;
- port->rs485 = *rs485conf;
-
if (rs485conf->flags & SER_RS485_ENABLED) {
dev_dbg(port->dev, "Setting UART to RS485\n");
- if (port->rs485.flags & SER_RS485_RX_DURING_TX)
+ if (rs485conf->flags & SER_RS485_RX_DURING_TX)
atmel_port->tx_done_mask = ATMEL_US_TXRDY;
else
atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart.h b/drivers/tty/serial/cpm_uart/cpm_uart.h
index 6113b953ce25..8c582779cf22 100644
--- a/drivers/tty/serial/cpm_uart/cpm_uart.h
+++ b/drivers/tty/serial/cpm_uart/cpm_uart.h
@@ -19,6 +19,8 @@ struct gpio_desc;
#include "cpm_uart_cpm2.h"
#elif defined(CONFIG_CPM1)
#include "cpm_uart_cpm1.h"
+#elif defined(CONFIG_COMPILE_TEST)
+#include "cpm_uart_cpm2.h"
#endif
#define SERIAL_CPM_MAJOR 204
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_core.c b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
index d6d3db9c3b1f..db07d6a5d764 100644
--- a/drivers/tty/serial/cpm_uart/cpm_uart_core.c
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
@@ -1247,7 +1247,7 @@ static int cpm_uart_init_port(struct device_node *np,
}
#ifdef CONFIG_PPC_EARLY_DEBUG_CPM
-#ifdef CONFIG_CONSOLE_POLL
+#if defined(CONFIG_CONSOLE_POLL) && defined(CONFIG_SERIAL_CPM_CONSOLE)
if (!udbg_port)
#endif
udbg_putc = NULL;
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_cpm2.c b/drivers/tty/serial/cpm_uart/cpm_uart_cpm2.c
index 6a1cd03bfe39..108af254e8f3 100644
--- a/drivers/tty/serial/cpm_uart/cpm_uart_cpm2.c
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_cpm2.c
@@ -25,7 +25,6 @@
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/fs_pd.h>
-#include <asm/prom.h>
#include <linux/serial_core.h>
#include <linux/kernel.h>
diff --git a/drivers/tty/serial/digicolor-usart.c b/drivers/tty/serial/digicolor-usart.c
index e37a917b9dbb..af951e6a2ef4 100644
--- a/drivers/tty/serial/digicolor-usart.c
+++ b/drivers/tty/serial/digicolor-usart.c
@@ -309,6 +309,8 @@ static void digicolor_uart_set_termios(struct uart_port *port,
case CS8:
default:
config |= UA_CONFIG_CHAR_LEN;
+ termios->c_cflag &= ~CSIZE;
+ termios->c_cflag |= CS8;
break;
}
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index be12fee94db5..0d6e62f6bb07 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -239,8 +239,6 @@
/* IMX lpuart has four extra unused regs located at the beginning */
#define IMX_REG_OFF 0x10
-static DEFINE_IDA(fsl_lpuart_ida);
-
enum lpuart_type {
VF610_LPUART,
LS1021A_LPUART,
@@ -276,7 +274,6 @@ struct lpuart_port {
int rx_dma_rng_buf_len;
unsigned int dma_tx_nents;
wait_queue_head_t dma_wait;
- bool id_allocated;
};
struct lpuart_soc_data {
@@ -1118,7 +1115,7 @@ static void lpuart_copy_rx_to_tty(struct lpuart_port *sport)
struct dma_chan *chan = sport->dma_rx_chan;
struct circ_buf *ring = &sport->rx_ring;
unsigned long flags;
- int count = 0, copied;
+ int count, copied;
if (lpuart_is_32(sport)) {
unsigned long sr = lpuart32_read(&sport->port, UARTSTAT);
@@ -1378,19 +1375,6 @@ static int lpuart_config_rs485(struct uart_port *port,
modem |= UARTMODEM_TXRTSE;
/*
- * RTS needs to be logic HIGH either during transfer _or_ after
- * transfer, other variants are not supported by the hardware.
- */
-
- if (!(rs485->flags & (SER_RS485_RTS_ON_SEND |
- SER_RS485_RTS_AFTER_SEND)))
- rs485->flags |= SER_RS485_RTS_ON_SEND;
-
- if (rs485->flags & SER_RS485_RTS_ON_SEND &&
- rs485->flags & SER_RS485_RTS_AFTER_SEND)
- rs485->flags &= ~SER_RS485_RTS_AFTER_SEND;
-
- /*
* The hardware defaults to RTS logic HIGH while transfer.
* Switch polarity in case RTS shall be logic HIGH
* after transfer.
@@ -1402,9 +1386,6 @@ static int lpuart_config_rs485(struct uart_port *port,
modem |= UARTMODEM_TXRTSPOL;
}
- /* Store the new configuration */
- sport->port.rs485 = *rs485;
-
writeb(modem, sport->port.membase + UARTMODEM);
return 0;
}
@@ -1429,19 +1410,6 @@ static int lpuart32_config_rs485(struct uart_port *port,
modem |= UARTMODEM_TXRTSE;
/*
- * RTS needs to be logic HIGH either during transfer _or_ after
- * transfer, other variants are not supported by the hardware.
- */
-
- if (!(rs485->flags & (SER_RS485_RTS_ON_SEND |
- SER_RS485_RTS_AFTER_SEND)))
- rs485->flags |= SER_RS485_RTS_ON_SEND;
-
- if (rs485->flags & SER_RS485_RTS_ON_SEND &&
- rs485->flags & SER_RS485_RTS_AFTER_SEND)
- rs485->flags &= ~SER_RS485_RTS_AFTER_SEND;
-
- /*
* The hardware defaults to RTS logic HIGH while transfer.
* Switch polarity in case RTS shall be logic HIGH
* after transfer.
@@ -1453,9 +1421,6 @@ static int lpuart32_config_rs485(struct uart_port *port,
modem |= UARTMODEM_TXRTSPOL;
}
- /* Store the new configuration */
- sport->port.rs485 = *rs485;
-
lpuart32_write(&sport->port, modem, UARTMODIR);
return 0;
}
@@ -2145,12 +2110,10 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
if (sport->port.rs485.flags & SER_RS485_ENABLED)
termios->c_cflag &= ~CRTSCTS;
- if (termios->c_cflag & CRTSCTS) {
- modem |= (UARTMODIR_RXRTSE | UARTMODIR_TXCTSE);
- } else {
- termios->c_cflag &= ~CRTSCTS;
+ if (termios->c_cflag & CRTSCTS)
+ modem |= UARTMODIR_RXRTSE | UARTMODIR_TXCTSE;
+ else
modem &= ~(UARTMODIR_RXRTSE | UARTMODIR_TXCTSE);
- }
if (termios->c_cflag & CSTOPB)
bd |= UARTBAUD_SBNS;
@@ -2717,23 +2680,18 @@ static int lpuart_probe(struct platform_device *pdev)
ret = of_alias_get_id(np, "serial");
if (ret < 0) {
- ret = ida_simple_get(&fsl_lpuart_ida, 0, UART_NR, GFP_KERNEL);
- if (ret < 0) {
- dev_err(&pdev->dev, "port line is full, add device failed\n");
- return ret;
- }
- sport->id_allocated = true;
+ dev_err(&pdev->dev, "failed to get alias id, errno %d\n", ret);
+ return ret;
}
if (ret >= ARRAY_SIZE(lpuart_ports)) {
dev_err(&pdev->dev, "serial%d out of range\n", ret);
- ret = -EINVAL;
- goto failed_out_of_range;
+ return -EINVAL;
}
sport->port.line = ret;
ret = lpuart_enable_clks(sport);
if (ret)
- goto failed_clock_enable;
+ return ret;
sport->port.uartclk = lpuart_get_baud_clk_rate(sport);
lpuart_ports[sport->port.line] = sport;
@@ -2781,10 +2739,6 @@ failed_reset:
uart_remove_one_port(&lpuart_reg, &sport->port);
failed_attach_port:
lpuart_disable_clks(sport);
-failed_clock_enable:
-failed_out_of_range:
- if (sport->id_allocated)
- ida_simple_remove(&fsl_lpuart_ida, sport->port.line);
return ret;
}
@@ -2794,9 +2748,6 @@ static int lpuart_remove(struct platform_device *pdev)
uart_remove_one_port(&lpuart_reg, &sport->port);
- if (sport->id_allocated)
- ida_simple_remove(&fsl_lpuart_ida, sport->port.line);
-
lpuart_disable_clks(sport);
if (sport->dma_tx_chan)
@@ -2926,7 +2877,6 @@ static int __init lpuart_serial_init(void)
static void __exit lpuart_serial_exit(void)
{
- ida_destroy(&fsl_lpuart_ida);
platform_driver_unregister(&lpuart_driver);
uart_unregister_driver(&lpuart_reg);
}
diff --git a/drivers/tty/serial/icom.c b/drivers/tty/serial/icom.c
index 03a2fe9f4c9a..45df29947fe8 100644
--- a/drivers/tty/serial/icom.c
+++ b/drivers/tty/serial/icom.c
@@ -19,6 +19,7 @@
#include <linux/fs.h>
#include <linux/tty_flip.h>
#include <linux/serial.h>
+#include <linux/serial_core.h>
#include <linux/serial_reg.h>
#include <linux/major.h>
#include <linux/string.h>
@@ -41,15 +42,273 @@
#include <asm/irq.h>
#include <linux/uaccess.h>
-#include "icom.h"
-
/*#define ICOM_TRACE enable port trace capabilities */
#define ICOM_DRIVER_NAME "icom"
-#define ICOM_VERSION_STR "1.3.1"
#define NR_PORTS 128
-#define ICOM_PORT ((struct icom_port *)port)
-#define to_icom_adapter(d) container_of(d, struct icom_adapter, kref)
+
+static const unsigned int icom_acfg_baud[] = {
+ 300,
+ 600,
+ 900,
+ 1200,
+ 1800,
+ 2400,
+ 3600,
+ 4800,
+ 7200,
+ 9600,
+ 14400,
+ 19200,
+ 28800,
+ 38400,
+ 57600,
+ 76800,
+ 115200,
+ 153600,
+ 230400,
+ 307200,
+ 460800,
+};
+#define BAUD_TABLE_LIMIT (ARRAY_SIZE(icom_acfg_baud) - 1)
+
+struct icom_regs {
+ u32 control; /* Adapter Control Register */
+ u32 interrupt; /* Adapter Interrupt Register */
+ u32 int_mask; /* Adapter Interrupt Mask Reg */
+ u32 int_pri; /* Adapter Interrupt Priority r */
+ u32 int_reg_b; /* Adapter non-masked Interrupt */
+ u32 resvd01;
+ u32 resvd02;
+ u32 resvd03;
+ u32 control_2; /* Adapter Control Register 2 */
+ u32 interrupt_2; /* Adapter Interrupt Register 2 */
+ u32 int_mask_2; /* Adapter Interrupt Mask 2 */
+ u32 int_pri_2; /* Adapter Interrupt Prior 2 */
+ u32 int_reg_2b; /* Adapter non-masked 2 */
+};
+
+struct func_dram {
+ u32 reserved[108]; /* 0-1B0 reserved by personality code */
+ u32 RcvStatusAddr; /* 1B0-1B3 Status Address for Next rcv */
+ u8 RcvStnAddr; /* 1B4 Receive Station Addr */
+ u8 IdleState; /* 1B5 Idle State */
+ u8 IdleMonitor; /* 1B6 Idle Monitor */
+ u8 FlagFillIdleTimer; /* 1B7 Flag Fill Idle Timer */
+ u32 XmitStatusAddr; /* 1B8-1BB Transmit Status Address */
+ u8 StartXmitCmd; /* 1BC Start Xmit Command */
+ u8 HDLCConfigReg; /* 1BD Reserved */
+ u8 CauseCode; /* 1BE Cause code for fatal error */
+ u8 xchar; /* 1BF High priority send */
+ u32 reserved3; /* 1C0-1C3 Reserved */
+ u8 PrevCmdReg; /* 1C4 Reserved */
+ u8 CmdReg; /* 1C5 Command Register */
+ u8 async_config2; /* 1C6 Async Config Byte 2 */
+ u8 async_config3; /* 1C7 Async Config Byte 3 */
+ u8 dce_resvd[20]; /* 1C8-1DB DCE Rsvd */
+ u8 dce_resvd21; /* 1DC DCE Rsvd (21st byte */
+ u8 misc_flags; /* 1DD misc flags */
+#define V2_HARDWARE 0x40
+#define ICOM_HDW_ACTIVE 0x01
+ u8 call_length; /* 1DE Phone #/CFI buff ln */
+ u8 call_length2; /* 1DF Upper byte (unused) */
+ u32 call_addr; /* 1E0-1E3 Phn #/CFI buff addr */
+ u16 timer_value; /* 1E4-1E5 general timer value */
+ u8 timer_command; /* 1E6 general timer cmd */
+ u8 dce_command; /* 1E7 dce command reg */
+ u8 dce_cmd_status; /* 1E8 dce command stat */
+ u8 x21_r1_ioff; /* 1E9 dce ready counter */
+ u8 x21_r0_ioff; /* 1EA dce not ready ctr */
+ u8 x21_ralt_ioff; /* 1EB dce CNR counter */
+ u8 x21_r1_ion; /* 1EC dce ready I on ctr */
+ u8 rsvd_ier; /* 1ED Rsvd for IER (if ne */
+ u8 ier; /* 1EE Interrupt Enable */
+ u8 isr; /* 1EF Input Signal Reg */
+ u8 osr; /* 1F0 Output Signal Reg */
+ u8 reset; /* 1F1 Reset/Reload Reg */
+ u8 disable; /* 1F2 Disable Reg */
+ u8 sync; /* 1F3 Sync Reg */
+ u8 error_stat; /* 1F4 Error Status */
+ u8 cable_id; /* 1F5 Cable ID */
+ u8 cs_length; /* 1F6 CS Load Length */
+ u8 mac_length; /* 1F7 Mac Load Length */
+ u32 cs_load_addr; /* 1F8-1FB Call Load PCI Addr */
+ u32 mac_load_addr; /* 1FC-1FF Mac Load PCI Addr */
+};
+
+/*
+ * adapter defines and structures
+ */
+#define ICOM_CONTROL_START_A 0x00000008
+#define ICOM_CONTROL_STOP_A 0x00000004
+#define ICOM_CONTROL_START_B 0x00000002
+#define ICOM_CONTROL_STOP_B 0x00000001
+#define ICOM_CONTROL_START_C 0x00000008
+#define ICOM_CONTROL_STOP_C 0x00000004
+#define ICOM_CONTROL_START_D 0x00000002
+#define ICOM_CONTROL_STOP_D 0x00000001
+#define ICOM_IRAM_OFFSET 0x1000
+#define ICOM_IRAM_SIZE 0x0C00
+#define ICOM_DCE_IRAM_OFFSET 0x0A00
+#define ICOM_CABLE_ID_VALID 0x01
+#define ICOM_CABLE_ID_MASK 0xF0
+#define ICOM_DISABLE 0x80
+#define CMD_XMIT_RCV_ENABLE 0xC0
+#define CMD_XMIT_ENABLE 0x40
+#define CMD_RCV_DISABLE 0x00
+#define CMD_RCV_ENABLE 0x80
+#define CMD_RESTART 0x01
+#define CMD_HOLD_XMIT 0x02
+#define CMD_SND_BREAK 0x04
+#define RS232_CABLE 0x06
+#define V24_CABLE 0x0E
+#define V35_CABLE 0x0C
+#define V36_CABLE 0x02
+#define NO_CABLE 0x00
+#define START_DOWNLOAD 0x80
+#define ICOM_INT_MASK_PRC_A 0x00003FFF
+#define ICOM_INT_MASK_PRC_B 0x3FFF0000
+#define ICOM_INT_MASK_PRC_C 0x00003FFF
+#define ICOM_INT_MASK_PRC_D 0x3FFF0000
+#define INT_RCV_COMPLETED 0x1000
+#define INT_XMIT_COMPLETED 0x2000
+#define INT_IDLE_DETECT 0x0800
+#define INT_RCV_DISABLED 0x0400
+#define INT_XMIT_DISABLED 0x0200
+#define INT_RCV_XMIT_SHUTDOWN 0x0100
+#define INT_FATAL_ERROR 0x0080
+#define INT_CABLE_PULL 0x0020
+#define INT_SIGNAL_CHANGE 0x0010
+#define HDLC_PPP_PURE_ASYNC 0x02
+#define HDLC_FF_FILL 0x00
+#define HDLC_HDW_FLOW 0x01
+#define START_XMIT 0x80
+#define ICOM_ACFG_DRIVE1 0x20
+#define ICOM_ACFG_NO_PARITY 0x00
+#define ICOM_ACFG_PARITY_ENAB 0x02
+#define ICOM_ACFG_PARITY_ODD 0x01
+#define ICOM_ACFG_8BPC 0x00
+#define ICOM_ACFG_7BPC 0x04
+#define ICOM_ACFG_6BPC 0x08
+#define ICOM_ACFG_5BPC 0x0C
+#define ICOM_ACFG_1STOP_BIT 0x00
+#define ICOM_ACFG_2STOP_BIT 0x10
+#define ICOM_DTR 0x80
+#define ICOM_RTS 0x40
+#define ICOM_RI 0x08
+#define ICOM_DSR 0x80
+#define ICOM_DCD 0x20
+#define ICOM_CTS 0x40
+
+#define NUM_XBUFFS 1
+#define NUM_RBUFFS 2
+#define RCV_BUFF_SZ 0x0200
+#define XMIT_BUFF_SZ 0x1000
+struct statusArea {
+ /**********************************************/
+ /* Transmit Status Area */
+ /**********************************************/
+ struct xmit_status_area{
+ __le32 leNext; /* Next entry in Little Endian on Adapter */
+ __le32 leNextASD;
+ __le32 leBuffer; /* Buffer for entry in LE for Adapter */
+ __le16 leLengthASD;
+ __le16 leOffsetASD;
+ __le16 leLength; /* Length of data in segment */
+ __le16 flags;
+#define SA_FLAGS_DONE 0x0080 /* Done with Segment */
+#define SA_FLAGS_CONTINUED 0x8000 /* More Segments */
+#define SA_FLAGS_IDLE 0x4000 /* Mark IDLE after frm */
+#define SA_FLAGS_READY_TO_XMIT 0x0800
+#define SA_FLAGS_STAT_MASK 0x007F
+ } xmit[NUM_XBUFFS];
+
+ /**********************************************/
+ /* Receive Status Area */
+ /**********************************************/
+ struct {
+ __le32 leNext; /* Next entry in Little Endian on Adapter */
+ __le32 leNextASD;
+ __le32 leBuffer; /* Buffer for entry in LE for Adapter */
+ __le16 WorkingLength; /* size of segment */
+ __le16 reserv01;
+ __le16 leLength; /* Length of data in segment */
+ __le16 flags;
+#define SA_FL_RCV_DONE 0x0010 /* Data ready */
+#define SA_FLAGS_OVERRUN 0x0040
+#define SA_FLAGS_PARITY_ERROR 0x0080
+#define SA_FLAGS_FRAME_ERROR 0x0001
+#define SA_FLAGS_FRAME_TRUNC 0x0002
+#define SA_FLAGS_BREAK_DET 0x0004 /* set conditionally by device driver, not hardware */
+#define SA_FLAGS_RCV_MASK 0xFFE6
+ } rcv[NUM_RBUFFS];
+};
+
+struct icom_adapter;
+
+
+#define ICOM_MAJOR 243
+#define ICOM_MINOR_START 0
+
+struct icom_port {
+ struct uart_port uart_port;
+ unsigned char cable_id;
+ unsigned char read_status_mask;
+ unsigned char ignore_status_mask;
+ void __iomem * int_reg;
+ struct icom_regs __iomem *global_reg;
+ struct func_dram __iomem *dram;
+ int port;
+ struct statusArea *statStg;
+ dma_addr_t statStg_pci;
+ __le32 *xmitRestart;
+ dma_addr_t xmitRestart_pci;
+ unsigned char *xmit_buf;
+ dma_addr_t xmit_buf_pci;
+ unsigned char *recv_buf;
+ dma_addr_t recv_buf_pci;
+ int next_rcv;
+ int status;
+#define ICOM_PORT_ACTIVE 1 /* Port exists. */
+#define ICOM_PORT_OFF 0 /* Port does not exist. */
+ struct icom_adapter *adapter;
+};
+
+struct icom_adapter {
+ void __iomem * base_addr;
+ unsigned long base_addr_pci;
+ struct pci_dev *pci_dev;
+ struct icom_port port_info[4];
+ int index;
+ int version;
+#define ADAPTER_V1 0x0001
+#define ADAPTER_V2 0x0002
+ u32 subsystem_id;
+#define FOUR_PORT_MODEL 0x0252
+#define V2_TWO_PORTS_RVX 0x021A
+#define V2_ONE_PORT_RVX_ONE_PORT_IMBED_MDM 0x0251
+ int numb_ports;
+ struct list_head icom_adapter_entry;
+ struct kref kref;
+};
+
+/* prototype */
+extern void iCom_sercons_init(void);
+
+struct lookup_proc_table {
+ u32 __iomem *global_control_reg;
+ unsigned long processor_id;
+};
+
+struct lookup_int_table {
+ u32 __iomem *global_int_mask;
+ unsigned long processor_id;
+};
+
+static inline struct icom_port *to_icom_port(struct uart_port *port)
+{
+ return container_of(port, struct icom_port, uart_port);
+}
static const struct pci_device_id icom_pci_table[] = {
{
@@ -222,7 +481,7 @@ static int get_port_memory(struct icom_port *icom_port)
if (index < (NUM_XBUFFS - 1)) {
memset(&icom_port->statStg->xmit[index], 0, sizeof(struct xmit_status_area));
icom_port->statStg->xmit[index].leLengthASD =
- (unsigned short int) cpu_to_le16(XMIT_BUFF_SZ);
+ cpu_to_le16(XMIT_BUFF_SZ);
trace(icom_port, "FOD_ADDR", stgAddr);
trace(icom_port, "FOD_XBUFF",
(unsigned long) icom_port->xmit_buf);
@@ -231,7 +490,7 @@ static int get_port_memory(struct icom_port *icom_port)
} else if (index == (NUM_XBUFFS - 1)) {
memset(&icom_port->statStg->xmit[index], 0, sizeof(struct xmit_status_area));
icom_port->statStg->xmit[index].leLengthASD =
- (unsigned short int) cpu_to_le16(XMIT_BUFF_SZ);
+ cpu_to_le16(XMIT_BUFF_SZ);
trace(icom_port, "FOD_XBUFF",
(unsigned long) icom_port->xmit_buf);
icom_port->statStg->xmit[index].leBuffer =
@@ -249,7 +508,7 @@ static int get_port_memory(struct icom_port *icom_port)
stgAddr = stgAddr + sizeof(icom_port->statStg->rcv[0]);
icom_port->statStg->rcv[index].leLength = 0;
icom_port->statStg->rcv[index].WorkingLength =
- (unsigned short int) cpu_to_le16(RCV_BUFF_SZ);
+ cpu_to_le16(RCV_BUFF_SZ);
if (index < (NUM_RBUFFS - 1) ) {
offset = stgAddr - (unsigned long) icom_port->statStg;
icom_port->statStg->rcv[index].leNext =
@@ -617,16 +876,17 @@ unlock:
static int icom_write(struct uart_port *port)
{
+ struct icom_port *icom_port = to_icom_port(port);
unsigned long data_count;
unsigned char cmdReg;
unsigned long offset;
int temp_tail = port->state->xmit.tail;
- trace(ICOM_PORT, "WRITE", 0);
+ trace(icom_port, "WRITE", 0);
- if (cpu_to_le16(ICOM_PORT->statStg->xmit[0].flags) &
+ if (le16_to_cpu(icom_port->statStg->xmit[0].flags) &
SA_FLAGS_READY_TO_XMIT) {
- trace(ICOM_PORT, "WRITE_FULL", 0);
+ trace(icom_port, "WRITE_FULL", 0);
return 0;
}
@@ -634,7 +894,7 @@ static int icom_write(struct uart_port *port)
while ((port->state->xmit.head != temp_tail) &&
(data_count <= XMIT_BUFF_SZ)) {
- ICOM_PORT->xmit_buf[data_count++] =
+ icom_port->xmit_buf[data_count++] =
port->state->xmit.buf[temp_tail];
temp_tail++;
@@ -642,22 +902,22 @@ static int icom_write(struct uart_port *port)
}
if (data_count) {
- ICOM_PORT->statStg->xmit[0].flags =
+ icom_port->statStg->xmit[0].flags =
cpu_to_le16(SA_FLAGS_READY_TO_XMIT);
- ICOM_PORT->statStg->xmit[0].leLength =
+ icom_port->statStg->xmit[0].leLength =
cpu_to_le16(data_count);
offset =
- (unsigned long) &ICOM_PORT->statStg->xmit[0] -
- (unsigned long) ICOM_PORT->statStg;
- *ICOM_PORT->xmitRestart =
- cpu_to_le32(ICOM_PORT->statStg_pci + offset);
- cmdReg = readb(&ICOM_PORT->dram->CmdReg);
+ (unsigned long) &icom_port->statStg->xmit[0] -
+ (unsigned long) icom_port->statStg;
+ *icom_port->xmitRestart =
+ cpu_to_le32(icom_port->statStg_pci + offset);
+ cmdReg = readb(&icom_port->dram->CmdReg);
writeb(cmdReg | CMD_XMIT_RCV_ENABLE,
- &ICOM_PORT->dram->CmdReg);
- writeb(START_XMIT, &ICOM_PORT->dram->StartXmitCmd);
- trace(ICOM_PORT, "WRITE_START", data_count);
+ &icom_port->dram->CmdReg);
+ writeb(START_XMIT, &icom_port->dram->StartXmitCmd);
+ trace(icom_port, "WRITE_START", data_count);
/* write flush */
- readb(&ICOM_PORT->dram->StartXmitCmd);
+ readb(&icom_port->dram->StartXmitCmd);
}
return data_count;
@@ -696,8 +956,7 @@ static inline void check_modem_status(struct icom_port *icom_port)
static void xmit_interrupt(u16 port_int_reg, struct icom_port *icom_port)
{
- unsigned short int count;
- int i;
+ u16 count, i;
if (port_int_reg & (INT_XMIT_COMPLETED)) {
trace(icom_port, "XMIT_COMPLETE", 0);
@@ -706,8 +965,7 @@ static void xmit_interrupt(u16 port_int_reg, struct icom_port *icom_port)
icom_port->statStg->xmit[0].flags &=
cpu_to_le16(~SA_FLAGS_READY_TO_XMIT);
- count = (unsigned short int)
- cpu_to_le16(icom_port->statStg->xmit[0].leLength);
+ count = le16_to_cpu(icom_port->statStg->xmit[0].leLength);
icom_port->uart_port.icount.tx += count;
for (i=0; i<count &&
@@ -729,7 +987,7 @@ static void recv_interrupt(u16 port_int_reg, struct icom_port *icom_port)
{
short int count, rcv_buff;
struct tty_port *port = &icom_port->uart_port.state->port;
- unsigned short int status;
+ u16 status;
struct uart_icount *icount;
unsigned long offset;
unsigned char flag;
@@ -737,19 +995,18 @@ static void recv_interrupt(u16 port_int_reg, struct icom_port *icom_port)
trace(icom_port, "RCV_COMPLETE", 0);
rcv_buff = icom_port->next_rcv;
- status = cpu_to_le16(icom_port->statStg->rcv[rcv_buff].flags);
+ status = le16_to_cpu(icom_port->statStg->rcv[rcv_buff].flags);
while (status & SA_FL_RCV_DONE) {
int first = -1;
trace(icom_port, "FID_STATUS", status);
- count = cpu_to_le16(icom_port->statStg->rcv[rcv_buff].leLength);
+ count = le16_to_cpu(icom_port->statStg->rcv[rcv_buff].leLength);
trace(icom_port, "RCV_COUNT", count);
trace(icom_port, "REAL_COUNT", count);
- offset =
- cpu_to_le32(icom_port->statStg->rcv[rcv_buff].leBuffer) -
+ offset = le32_to_cpu(icom_port->statStg->rcv[rcv_buff].leBuffer) -
icom_port->recv_buf_pci;
/* Block copy all but the last byte as this may have status */
@@ -819,13 +1076,13 @@ ignore_char:
icom_port->statStg->rcv[rcv_buff].flags = 0;
icom_port->statStg->rcv[rcv_buff].leLength = 0;
icom_port->statStg->rcv[rcv_buff].WorkingLength =
- (unsigned short int) cpu_to_le16(RCV_BUFF_SZ);
+ cpu_to_le16(RCV_BUFF_SZ);
rcv_buff++;
if (rcv_buff == NUM_RBUFFS)
rcv_buff = 0;
- status = cpu_to_le16(icom_port->statStg->rcv[rcv_buff].flags);
+ status = le16_to_cpu(icom_port->statStg->rcv[rcv_buff].flags);
}
icom_port->next_rcv = rcv_buff;
@@ -925,11 +1182,12 @@ static irqreturn_t icom_interrupt(int irq, void *dev_id)
*/
static unsigned int icom_tx_empty(struct uart_port *port)
{
+ struct icom_port *icom_port = to_icom_port(port);
int ret;
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
- if (cpu_to_le16(ICOM_PORT->statStg->xmit[0].flags) &
+ if (le16_to_cpu(icom_port->statStg->xmit[0].flags) &
SA_FLAGS_READY_TO_XMIT)
ret = TIOCSER_TEMT;
else
@@ -941,38 +1199,40 @@ static unsigned int icom_tx_empty(struct uart_port *port)
static void icom_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
+ struct icom_port *icom_port = to_icom_port(port);
unsigned char local_osr;
- trace(ICOM_PORT, "SET_MODEM", 0);
- local_osr = readb(&ICOM_PORT->dram->osr);
+ trace(icom_port, "SET_MODEM", 0);
+ local_osr = readb(&icom_port->dram->osr);
if (mctrl & TIOCM_RTS) {
- trace(ICOM_PORT, "RAISE_RTS", 0);
+ trace(icom_port, "RAISE_RTS", 0);
local_osr |= ICOM_RTS;
} else {
- trace(ICOM_PORT, "LOWER_RTS", 0);
+ trace(icom_port, "LOWER_RTS", 0);
local_osr &= ~ICOM_RTS;
}
if (mctrl & TIOCM_DTR) {
- trace(ICOM_PORT, "RAISE_DTR", 0);
+ trace(icom_port, "RAISE_DTR", 0);
local_osr |= ICOM_DTR;
} else {
- trace(ICOM_PORT, "LOWER_DTR", 0);
+ trace(icom_port, "LOWER_DTR", 0);
local_osr &= ~ICOM_DTR;
}
- writeb(local_osr, &ICOM_PORT->dram->osr);
+ writeb(local_osr, &icom_port->dram->osr);
}
static unsigned int icom_get_mctrl(struct uart_port *port)
{
+ struct icom_port *icom_port = to_icom_port(port);
unsigned char status;
unsigned int result;
- trace(ICOM_PORT, "GET_MODEM", 0);
+ trace(icom_port, "GET_MODEM", 0);
- status = readb(&ICOM_PORT->dram->isr);
+ status = readb(&icom_port->dram->isr);
result = ((status & ICOM_DCD) ? TIOCM_CAR : 0)
| ((status & ICOM_RI) ? TIOCM_RNG : 0)
@@ -983,44 +1243,47 @@ static unsigned int icom_get_mctrl(struct uart_port *port)
static void icom_stop_tx(struct uart_port *port)
{
+ struct icom_port *icom_port = to_icom_port(port);
unsigned char cmdReg;
- trace(ICOM_PORT, "STOP", 0);
- cmdReg = readb(&ICOM_PORT->dram->CmdReg);
- writeb(cmdReg | CMD_HOLD_XMIT, &ICOM_PORT->dram->CmdReg);
+ trace(icom_port, "STOP", 0);
+ cmdReg = readb(&icom_port->dram->CmdReg);
+ writeb(cmdReg | CMD_HOLD_XMIT, &icom_port->dram->CmdReg);
}
static void icom_start_tx(struct uart_port *port)
{
+ struct icom_port *icom_port = to_icom_port(port);
unsigned char cmdReg;
- trace(ICOM_PORT, "START", 0);
- cmdReg = readb(&ICOM_PORT->dram->CmdReg);
+ trace(icom_port, "START", 0);
+ cmdReg = readb(&icom_port->dram->CmdReg);
if ((cmdReg & CMD_HOLD_XMIT) == CMD_HOLD_XMIT)
writeb(cmdReg & ~CMD_HOLD_XMIT,
- &ICOM_PORT->dram->CmdReg);
+ &icom_port->dram->CmdReg);
icom_write(port);
}
static void icom_send_xchar(struct uart_port *port, char ch)
{
+ struct icom_port *icom_port = to_icom_port(port);
unsigned char xdata;
int index;
unsigned long flags;
- trace(ICOM_PORT, "SEND_XCHAR", ch);
+ trace(icom_port, "SEND_XCHAR", ch);
/* wait .1 sec to send char */
for (index = 0; index < 10; index++) {
spin_lock_irqsave(&port->lock, flags);
- xdata = readb(&ICOM_PORT->dram->xchar);
+ xdata = readb(&icom_port->dram->xchar);
if (xdata == 0x00) {
- trace(ICOM_PORT, "QUICK_WRITE", 0);
- writeb(ch, &ICOM_PORT->dram->xchar);
+ trace(icom_port, "QUICK_WRITE", 0);
+ writeb(ch, &icom_port->dram->xchar);
/* flush write operation */
- xdata = readb(&ICOM_PORT->dram->xchar);
+ xdata = readb(&icom_port->dram->xchar);
spin_unlock_irqrestore(&port->lock, flags);
break;
}
@@ -1031,38 +1294,41 @@ static void icom_send_xchar(struct uart_port *port, char ch)
static void icom_stop_rx(struct uart_port *port)
{
+ struct icom_port *icom_port = to_icom_port(port);
unsigned char cmdReg;
- cmdReg = readb(&ICOM_PORT->dram->CmdReg);
- writeb(cmdReg & ~CMD_RCV_ENABLE, &ICOM_PORT->dram->CmdReg);
+ cmdReg = readb(&icom_port->dram->CmdReg);
+ writeb(cmdReg & ~CMD_RCV_ENABLE, &icom_port->dram->CmdReg);
}
static void icom_break(struct uart_port *port, int break_state)
{
+ struct icom_port *icom_port = to_icom_port(port);
unsigned char cmdReg;
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
- trace(ICOM_PORT, "BREAK", 0);
- cmdReg = readb(&ICOM_PORT->dram->CmdReg);
+ trace(icom_port, "BREAK", 0);
+ cmdReg = readb(&icom_port->dram->CmdReg);
if (break_state == -1) {
- writeb(cmdReg | CMD_SND_BREAK, &ICOM_PORT->dram->CmdReg);
+ writeb(cmdReg | CMD_SND_BREAK, &icom_port->dram->CmdReg);
} else {
- writeb(cmdReg & ~CMD_SND_BREAK, &ICOM_PORT->dram->CmdReg);
+ writeb(cmdReg & ~CMD_SND_BREAK, &icom_port->dram->CmdReg);
}
spin_unlock_irqrestore(&port->lock, flags);
}
static int icom_open(struct uart_port *port)
{
+ struct icom_port *icom_port = to_icom_port(port);
int retval;
- kref_get(&ICOM_PORT->adapter->kref);
- retval = startup(ICOM_PORT);
+ kref_get(&icom_port->adapter->kref);
+ retval = startup(icom_port);
if (retval) {
- kref_put(&ICOM_PORT->adapter->kref, icom_kref_release);
- trace(ICOM_PORT, "STARTUP_ERROR", 0);
+ kref_put(&icom_port->adapter->kref, icom_kref_release);
+ trace(icom_port, "STARTUP_ERROR", 0);
return retval;
}
@@ -1071,23 +1337,25 @@ static int icom_open(struct uart_port *port)
static void icom_close(struct uart_port *port)
{
+ struct icom_port *icom_port = to_icom_port(port);
unsigned char cmdReg;
- trace(ICOM_PORT, "CLOSE", 0);
+ trace(icom_port, "CLOSE", 0);
/* stop receiver */
- cmdReg = readb(&ICOM_PORT->dram->CmdReg);
- writeb(cmdReg & ~CMD_RCV_ENABLE, &ICOM_PORT->dram->CmdReg);
+ cmdReg = readb(&icom_port->dram->CmdReg);
+ writeb(cmdReg & ~CMD_RCV_ENABLE, &icom_port->dram->CmdReg);
- shutdown(ICOM_PORT);
+ shutdown(icom_port);
- kref_put(&ICOM_PORT->adapter->kref, icom_kref_release);
+ kref_put(&icom_port->adapter->kref, icom_kref_release);
}
static void icom_set_termios(struct uart_port *port,
struct ktermios *termios,
struct ktermios *old_termios)
{
+ struct icom_port *icom_port = to_icom_port(port);
int baud;
unsigned cflag, iflag;
char new_config2;
@@ -1099,7 +1367,7 @@ static void icom_set_termios(struct uart_port *port,
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
- trace(ICOM_PORT, "CHANGE_SPEED", 0);
+ trace(icom_port, "CHANGE_SPEED", 0);
cflag = termios->c_cflag;
iflag = termios->c_iflag;
@@ -1130,12 +1398,12 @@ static void icom_set_termios(struct uart_port *port,
if (cflag & PARENB) {
/* parity bit enabled */
new_config2 |= ICOM_ACFG_PARITY_ENAB;
- trace(ICOM_PORT, "PARENB", 0);
+ trace(icom_port, "PARENB", 0);
}
if (cflag & PARODD) {
/* odd parity */
new_config2 |= ICOM_ACFG_PARITY_ODD;
- trace(ICOM_PORT, "PARODD", 0);
+ trace(icom_port, "PARODD", 0);
}
/* Determine divisor based on baud rate */
@@ -1155,100 +1423,99 @@ static void icom_set_termios(struct uart_port *port,
uart_update_timeout(port, cflag, baud);
/* CTS flow control flag and modem status interrupts */
- tmp_byte = readb(&(ICOM_PORT->dram->HDLCConfigReg));
+ tmp_byte = readb(&(icom_port->dram->HDLCConfigReg));
if (cflag & CRTSCTS)
tmp_byte |= HDLC_HDW_FLOW;
else
tmp_byte &= ~HDLC_HDW_FLOW;
- writeb(tmp_byte, &(ICOM_PORT->dram->HDLCConfigReg));
+ writeb(tmp_byte, &(icom_port->dram->HDLCConfigReg));
/*
* Set up parity check flag
*/
- ICOM_PORT->read_status_mask = SA_FLAGS_OVERRUN | SA_FL_RCV_DONE;
+ icom_port->read_status_mask = SA_FLAGS_OVERRUN | SA_FL_RCV_DONE;
if (iflag & INPCK)
- ICOM_PORT->read_status_mask |=
+ icom_port->read_status_mask |=
SA_FLAGS_FRAME_ERROR | SA_FLAGS_PARITY_ERROR;
if ((iflag & BRKINT) || (iflag & PARMRK))
- ICOM_PORT->read_status_mask |= SA_FLAGS_BREAK_DET;
+ icom_port->read_status_mask |= SA_FLAGS_BREAK_DET;
/*
* Characters to ignore
*/
- ICOM_PORT->ignore_status_mask = 0;
+ icom_port->ignore_status_mask = 0;
if (iflag & IGNPAR)
- ICOM_PORT->ignore_status_mask |=
+ icom_port->ignore_status_mask |=
SA_FLAGS_PARITY_ERROR | SA_FLAGS_FRAME_ERROR;
if (iflag & IGNBRK) {
- ICOM_PORT->ignore_status_mask |= SA_FLAGS_BREAK_DET;
+ icom_port->ignore_status_mask |= SA_FLAGS_BREAK_DET;
/*
* If we're ignore parity and break indicators, ignore
* overruns too. (For real raw support).
*/
if (iflag & IGNPAR)
- ICOM_PORT->ignore_status_mask |= SA_FLAGS_OVERRUN;
+ icom_port->ignore_status_mask |= SA_FLAGS_OVERRUN;
}
/*
* !!! ignore all characters if CREAD is not set
*/
if ((cflag & CREAD) == 0)
- ICOM_PORT->ignore_status_mask |= SA_FL_RCV_DONE;
+ icom_port->ignore_status_mask |= SA_FL_RCV_DONE;
/* Turn off Receiver to prepare for reset */
- writeb(CMD_RCV_DISABLE, &ICOM_PORT->dram->CmdReg);
+ writeb(CMD_RCV_DISABLE, &icom_port->dram->CmdReg);
for (index = 0; index < 10; index++) {
- if (readb(&ICOM_PORT->dram->PrevCmdReg) == 0x00) {
+ if (readb(&icom_port->dram->PrevCmdReg) == 0x00) {
break;
}
}
/* clear all current buffers of data */
for (rcv_buff = 0; rcv_buff < NUM_RBUFFS; rcv_buff++) {
- ICOM_PORT->statStg->rcv[rcv_buff].flags = 0;
- ICOM_PORT->statStg->rcv[rcv_buff].leLength = 0;
- ICOM_PORT->statStg->rcv[rcv_buff].WorkingLength =
- (unsigned short int) cpu_to_le16(RCV_BUFF_SZ);
+ icom_port->statStg->rcv[rcv_buff].flags = 0;
+ icom_port->statStg->rcv[rcv_buff].leLength = 0;
+ icom_port->statStg->rcv[rcv_buff].WorkingLength =
+ cpu_to_le16(RCV_BUFF_SZ);
}
for (xmit_buff = 0; xmit_buff < NUM_XBUFFS; xmit_buff++) {
- ICOM_PORT->statStg->xmit[xmit_buff].flags = 0;
+ icom_port->statStg->xmit[xmit_buff].flags = 0;
}
/* activate changes and start xmit and receiver here */
/* Enable the receiver */
- writeb(new_config3, &(ICOM_PORT->dram->async_config3));
- writeb(new_config2, &(ICOM_PORT->dram->async_config2));
- tmp_byte = readb(&(ICOM_PORT->dram->HDLCConfigReg));
+ writeb(new_config3, &(icom_port->dram->async_config3));
+ writeb(new_config2, &(icom_port->dram->async_config2));
+ tmp_byte = readb(&(icom_port->dram->HDLCConfigReg));
tmp_byte |= HDLC_PPP_PURE_ASYNC | HDLC_FF_FILL;
- writeb(tmp_byte, &(ICOM_PORT->dram->HDLCConfigReg));
- writeb(0x04, &(ICOM_PORT->dram->FlagFillIdleTimer)); /* 0.5 seconds */
- writeb(0xFF, &(ICOM_PORT->dram->ier)); /* enable modem signal interrupts */
+ writeb(tmp_byte, &(icom_port->dram->HDLCConfigReg));
+ writeb(0x04, &(icom_port->dram->FlagFillIdleTimer)); /* 0.5 seconds */
+ writeb(0xFF, &(icom_port->dram->ier)); /* enable modem signal interrupts */
/* reset processor */
- writeb(CMD_RESTART, &ICOM_PORT->dram->CmdReg);
+ writeb(CMD_RESTART, &icom_port->dram->CmdReg);
for (index = 0; index < 10; index++) {
- if (readb(&ICOM_PORT->dram->CmdReg) == 0x00) {
+ if (readb(&icom_port->dram->CmdReg) == 0x00) {
break;
}
}
/* Enable Transmitter and Receiver */
offset =
- (unsigned long) &ICOM_PORT->statStg->rcv[0] -
- (unsigned long) ICOM_PORT->statStg;
- writel(ICOM_PORT->statStg_pci + offset,
- &ICOM_PORT->dram->RcvStatusAddr);
- ICOM_PORT->next_rcv = 0;
- ICOM_PORT->put_length = 0;
- *ICOM_PORT->xmitRestart = 0;
- writel(ICOM_PORT->xmitRestart_pci,
- &ICOM_PORT->dram->XmitStatusAddr);
- trace(ICOM_PORT, "XR_ENAB", 0);
- writeb(CMD_XMIT_RCV_ENABLE, &ICOM_PORT->dram->CmdReg);
+ (unsigned long) &icom_port->statStg->rcv[0] -
+ (unsigned long) icom_port->statStg;
+ writel(icom_port->statStg_pci + offset,
+ &icom_port->dram->RcvStatusAddr);
+ icom_port->next_rcv = 0;
+ *icom_port->xmitRestart = 0;
+ writel(icom_port->xmitRestart_pci,
+ &icom_port->dram->XmitStatusAddr);
+ trace(icom_port, "XR_ENAB", 0);
+ writeb(CMD_XMIT_RCV_ENABLE, &icom_port->dram->CmdReg);
spin_unlock_irqrestore(&port->lock, flags);
}
@@ -1258,15 +1525,6 @@ static const char *icom_type(struct uart_port *port)
return "icom";
}
-static void icom_release_port(struct uart_port *port)
-{
-}
-
-static int icom_request_port(struct uart_port *port)
-{
- return 0;
-}
-
static void icom_config_port(struct uart_port *port, int flags)
{
port->type = PORT_ICOM;
@@ -1285,8 +1543,6 @@ static const struct uart_ops icom_ops = {
.shutdown = icom_close,
.set_termios = icom_set_termios,
.type = icom_type,
- .release_port = icom_release_port,
- .request_port = icom_request_port,
.config_port = icom_config_port,
};
@@ -1315,7 +1571,6 @@ static int icom_init_ports(struct icom_adapter *icom_adapter)
icom_port = &icom_adapter->port_info[i];
icom_port->port = i;
icom_port->status = ICOM_PORT_ACTIVE;
- icom_port->imbed_modem = ICOM_UNKNOWN;
}
} else {
if (subsystem_id == PCI_DEVICE_ID_IBM_ICOM_FOUR_PORT_MODEL) {
@@ -1326,26 +1581,15 @@ static int icom_init_ports(struct icom_adapter *icom_adapter)
icom_port->port = i;
icom_port->status = ICOM_PORT_ACTIVE;
- icom_port->imbed_modem = ICOM_IMBED_MODEM;
}
} else {
icom_adapter->numb_ports = 4;
icom_adapter->port_info[0].port = 0;
icom_adapter->port_info[0].status = ICOM_PORT_ACTIVE;
-
- if (subsystem_id ==
- PCI_DEVICE_ID_IBM_ICOM_V2_ONE_PORT_RVX_ONE_PORT_MDM) {
- icom_adapter->port_info[0].imbed_modem = ICOM_IMBED_MODEM;
- } else {
- icom_adapter->port_info[0].imbed_modem = ICOM_RVX;
- }
-
icom_adapter->port_info[1].status = ICOM_PORT_OFF;
-
icom_adapter->port_info[2].port = 2;
icom_adapter->port_info[2].status = ICOM_PORT_ACTIVE;
- icom_adapter->port_info[2].imbed_modem = ICOM_RVX;
icom_adapter->port_info[3].status = ICOM_PORT_OFF;
}
}
@@ -1401,7 +1645,6 @@ static int icom_alloc_adapter(struct icom_adapter
int adapter_count = 0;
struct icom_adapter *icom_adapter;
struct icom_adapter *cur_adapter_entry;
- struct list_head *tmp;
icom_adapter = kzalloc(sizeof(struct icom_adapter), GFP_KERNEL);
@@ -1409,10 +1652,8 @@ static int icom_alloc_adapter(struct icom_adapter
return -ENOMEM;
}
- list_for_each(tmp, &icom_adapter_head) {
- cur_adapter_entry =
- list_entry(tmp, struct icom_adapter,
- icom_adapter_entry);
+ list_for_each_entry(cur_adapter_entry, &icom_adapter_head,
+ icom_adapter_entry) {
if (cur_adapter_entry->index != adapter_count) {
break;
}
@@ -1420,7 +1661,8 @@ static int icom_alloc_adapter(struct icom_adapter
}
icom_adapter->index = adapter_count;
- list_add_tail(&icom_adapter->icom_adapter_entry, tmp);
+ list_add_tail(&icom_adapter->icom_adapter_entry,
+ &cur_adapter_entry->icom_adapter_entry);
*icom_adapter_ref = icom_adapter;
return 0;
@@ -1432,8 +1674,10 @@ static void icom_free_adapter(struct icom_adapter *icom_adapter)
kfree(icom_adapter);
}
-static void icom_remove_adapter(struct icom_adapter *icom_adapter)
+static void icom_kref_release(struct kref *kref)
{
+ struct icom_adapter *icom_adapter = container_of(kref,
+ struct icom_adapter, kref);
struct icom_port *icom_port;
int index;
@@ -1466,14 +1710,6 @@ static void icom_remove_adapter(struct icom_adapter *icom_adapter)
icom_free_adapter(icom_adapter);
}
-static void icom_kref_release(struct kref *kref)
-{
- struct icom_adapter *icom_adapter;
-
- icom_adapter = to_icom_adapter(kref);
- icom_remove_adapter(icom_adapter);
-}
-
static int icom_probe(struct pci_dev *dev,
const struct pci_device_id *ent)
{
@@ -1501,7 +1737,7 @@ static int icom_probe(struct pci_dev *dev,
retval = pci_read_config_dword(dev, PCI_COMMAND, &command_reg);
if (retval) {
dev_err(&dev->dev, "PCI Config read FAILED\n");
- return retval;
+ goto probe_exit0;
}
pci_write_config_dword(dev, PCI_COMMAND,
@@ -1589,11 +1825,9 @@ probe_exit0:
static void icom_remove(struct pci_dev *dev)
{
struct icom_adapter *icom_adapter;
- struct list_head *tmp;
- list_for_each(tmp, &icom_adapter_head) {
- icom_adapter = list_entry(tmp, struct icom_adapter,
- icom_adapter_entry);
+ list_for_each_entry(icom_adapter, &icom_adapter_head,
+ icom_adapter_entry) {
if (icom_adapter->pci_dev == dev) {
kref_put(&icom_adapter->kref, icom_kref_release);
return;
diff --git a/drivers/tty/serial/icom.h b/drivers/tty/serial/icom.h
deleted file mode 100644
index 26e3aa7b01e2..000000000000
--- a/drivers/tty/serial/icom.h
+++ /dev/null
@@ -1,274 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-/*
- * icom.h
- *
- * Copyright (C) 2001 Michael Anderson, IBM Corporation
- *
- * Serial device driver include file.
- */
-
-#include <linux/serial_core.h>
-
-#define BAUD_TABLE_LIMIT ((sizeof(icom_acfg_baud)/sizeof(int)) - 1)
-static int icom_acfg_baud[] = {
- 300,
- 600,
- 900,
- 1200,
- 1800,
- 2400,
- 3600,
- 4800,
- 7200,
- 9600,
- 14400,
- 19200,
- 28800,
- 38400,
- 57600,
- 76800,
- 115200,
- 153600,
- 230400,
- 307200,
- 460800,
-};
-
-struct icom_regs {
- u32 control; /* Adapter Control Register */
- u32 interrupt; /* Adapter Interrupt Register */
- u32 int_mask; /* Adapter Interrupt Mask Reg */
- u32 int_pri; /* Adapter Interrupt Priority r */
- u32 int_reg_b; /* Adapter non-masked Interrupt */
- u32 resvd01;
- u32 resvd02;
- u32 resvd03;
- u32 control_2; /* Adapter Control Register 2 */
- u32 interrupt_2; /* Adapter Interrupt Register 2 */
- u32 int_mask_2; /* Adapter Interrupt Mask 2 */
- u32 int_pri_2; /* Adapter Interrupt Prior 2 */
- u32 int_reg_2b; /* Adapter non-masked 2 */
-};
-
-struct func_dram {
- u32 reserved[108]; /* 0-1B0 reserved by personality code */
- u32 RcvStatusAddr; /* 1B0-1B3 Status Address for Next rcv */
- u8 RcvStnAddr; /* 1B4 Receive Station Addr */
- u8 IdleState; /* 1B5 Idle State */
- u8 IdleMonitor; /* 1B6 Idle Monitor */
- u8 FlagFillIdleTimer; /* 1B7 Flag Fill Idle Timer */
- u32 XmitStatusAddr; /* 1B8-1BB Transmit Status Address */
- u8 StartXmitCmd; /* 1BC Start Xmit Command */
- u8 HDLCConfigReg; /* 1BD Reserved */
- u8 CauseCode; /* 1BE Cause code for fatal error */
- u8 xchar; /* 1BF High priority send */
- u32 reserved3; /* 1C0-1C3 Reserved */
- u8 PrevCmdReg; /* 1C4 Reserved */
- u8 CmdReg; /* 1C5 Command Register */
- u8 async_config2; /* 1C6 Async Config Byte 2 */
- u8 async_config3; /* 1C7 Async Config Byte 3 */
- u8 dce_resvd[20]; /* 1C8-1DB DCE Rsvd */
- u8 dce_resvd21; /* 1DC DCE Rsvd (21st byte */
- u8 misc_flags; /* 1DD misc flags */
-#define V2_HARDWARE 0x40
-#define ICOM_HDW_ACTIVE 0x01
- u8 call_length; /* 1DE Phone #/CFI buff ln */
- u8 call_length2; /* 1DF Upper byte (unused) */
- u32 call_addr; /* 1E0-1E3 Phn #/CFI buff addr */
- u16 timer_value; /* 1E4-1E5 general timer value */
- u8 timer_command; /* 1E6 general timer cmd */
- u8 dce_command; /* 1E7 dce command reg */
- u8 dce_cmd_status; /* 1E8 dce command stat */
- u8 x21_r1_ioff; /* 1E9 dce ready counter */
- u8 x21_r0_ioff; /* 1EA dce not ready ctr */
- u8 x21_ralt_ioff; /* 1EB dce CNR counter */
- u8 x21_r1_ion; /* 1EC dce ready I on ctr */
- u8 rsvd_ier; /* 1ED Rsvd for IER (if ne */
- u8 ier; /* 1EE Interrupt Enable */
- u8 isr; /* 1EF Input Signal Reg */
- u8 osr; /* 1F0 Output Signal Reg */
- u8 reset; /* 1F1 Reset/Reload Reg */
- u8 disable; /* 1F2 Disable Reg */
- u8 sync; /* 1F3 Sync Reg */
- u8 error_stat; /* 1F4 Error Status */
- u8 cable_id; /* 1F5 Cable ID */
- u8 cs_length; /* 1F6 CS Load Length */
- u8 mac_length; /* 1F7 Mac Load Length */
- u32 cs_load_addr; /* 1F8-1FB Call Load PCI Addr */
- u32 mac_load_addr; /* 1FC-1FF Mac Load PCI Addr */
-};
-
-/*
- * adapter defines and structures
- */
-#define ICOM_CONTROL_START_A 0x00000008
-#define ICOM_CONTROL_STOP_A 0x00000004
-#define ICOM_CONTROL_START_B 0x00000002
-#define ICOM_CONTROL_STOP_B 0x00000001
-#define ICOM_CONTROL_START_C 0x00000008
-#define ICOM_CONTROL_STOP_C 0x00000004
-#define ICOM_CONTROL_START_D 0x00000002
-#define ICOM_CONTROL_STOP_D 0x00000001
-#define ICOM_IRAM_OFFSET 0x1000
-#define ICOM_IRAM_SIZE 0x0C00
-#define ICOM_DCE_IRAM_OFFSET 0x0A00
-#define ICOM_CABLE_ID_VALID 0x01
-#define ICOM_CABLE_ID_MASK 0xF0
-#define ICOM_DISABLE 0x80
-#define CMD_XMIT_RCV_ENABLE 0xC0
-#define CMD_XMIT_ENABLE 0x40
-#define CMD_RCV_DISABLE 0x00
-#define CMD_RCV_ENABLE 0x80
-#define CMD_RESTART 0x01
-#define CMD_HOLD_XMIT 0x02
-#define CMD_SND_BREAK 0x04
-#define RS232_CABLE 0x06
-#define V24_CABLE 0x0E
-#define V35_CABLE 0x0C
-#define V36_CABLE 0x02
-#define NO_CABLE 0x00
-#define START_DOWNLOAD 0x80
-#define ICOM_INT_MASK_PRC_A 0x00003FFF
-#define ICOM_INT_MASK_PRC_B 0x3FFF0000
-#define ICOM_INT_MASK_PRC_C 0x00003FFF
-#define ICOM_INT_MASK_PRC_D 0x3FFF0000
-#define INT_RCV_COMPLETED 0x1000
-#define INT_XMIT_COMPLETED 0x2000
-#define INT_IDLE_DETECT 0x0800
-#define INT_RCV_DISABLED 0x0400
-#define INT_XMIT_DISABLED 0x0200
-#define INT_RCV_XMIT_SHUTDOWN 0x0100
-#define INT_FATAL_ERROR 0x0080
-#define INT_CABLE_PULL 0x0020
-#define INT_SIGNAL_CHANGE 0x0010
-#define HDLC_PPP_PURE_ASYNC 0x02
-#define HDLC_FF_FILL 0x00
-#define HDLC_HDW_FLOW 0x01
-#define START_XMIT 0x80
-#define ICOM_ACFG_DRIVE1 0x20
-#define ICOM_ACFG_NO_PARITY 0x00
-#define ICOM_ACFG_PARITY_ENAB 0x02
-#define ICOM_ACFG_PARITY_ODD 0x01
-#define ICOM_ACFG_8BPC 0x00
-#define ICOM_ACFG_7BPC 0x04
-#define ICOM_ACFG_6BPC 0x08
-#define ICOM_ACFG_5BPC 0x0C
-#define ICOM_ACFG_1STOP_BIT 0x00
-#define ICOM_ACFG_2STOP_BIT 0x10
-#define ICOM_DTR 0x80
-#define ICOM_RTS 0x40
-#define ICOM_RI 0x08
-#define ICOM_DSR 0x80
-#define ICOM_DCD 0x20
-#define ICOM_CTS 0x40
-
-#define NUM_XBUFFS 1
-#define NUM_RBUFFS 2
-#define RCV_BUFF_SZ 0x0200
-#define XMIT_BUFF_SZ 0x1000
-struct statusArea {
- /**********************************************/
- /* Transmit Status Area */
- /**********************************************/
- struct xmit_status_area{
- u32 leNext; /* Next entry in Little Endian on Adapter */
- u32 leNextASD;
- u32 leBuffer; /* Buffer for entry in LE for Adapter */
- u16 leLengthASD;
- u16 leOffsetASD;
- u16 leLength; /* Length of data in segment */
- u16 flags;
-#define SA_FLAGS_DONE 0x0080 /* Done with Segment */
-#define SA_FLAGS_CONTINUED 0x8000 /* More Segments */
-#define SA_FLAGS_IDLE 0x4000 /* Mark IDLE after frm */
-#define SA_FLAGS_READY_TO_XMIT 0x0800
-#define SA_FLAGS_STAT_MASK 0x007F
- } xmit[NUM_XBUFFS];
-
- /**********************************************/
- /* Receive Status Area */
- /**********************************************/
- struct {
- u32 leNext; /* Next entry in Little Endian on Adapter */
- u32 leNextASD;
- u32 leBuffer; /* Buffer for entry in LE for Adapter */
- u16 WorkingLength; /* size of segment */
- u16 reserv01;
- u16 leLength; /* Length of data in segment */
- u16 flags;
-#define SA_FL_RCV_DONE 0x0010 /* Data ready */
-#define SA_FLAGS_OVERRUN 0x0040
-#define SA_FLAGS_PARITY_ERROR 0x0080
-#define SA_FLAGS_FRAME_ERROR 0x0001
-#define SA_FLAGS_FRAME_TRUNC 0x0002
-#define SA_FLAGS_BREAK_DET 0x0004 /* set conditionally by device driver, not hardware */
-#define SA_FLAGS_RCV_MASK 0xFFE6
- } rcv[NUM_RBUFFS];
-};
-
-struct icom_adapter;
-
-
-#define ICOM_MAJOR 243
-#define ICOM_MINOR_START 0
-
-struct icom_port {
- struct uart_port uart_port;
- u8 imbed_modem;
-#define ICOM_UNKNOWN 1
-#define ICOM_RVX 2
-#define ICOM_IMBED_MODEM 3
- unsigned char cable_id;
- unsigned char read_status_mask;
- unsigned char ignore_status_mask;
- void __iomem * int_reg;
- struct icom_regs __iomem *global_reg;
- struct func_dram __iomem *dram;
- int port;
- struct statusArea *statStg;
- dma_addr_t statStg_pci;
- u32 *xmitRestart;
- dma_addr_t xmitRestart_pci;
- unsigned char *xmit_buf;
- dma_addr_t xmit_buf_pci;
- unsigned char *recv_buf;
- dma_addr_t recv_buf_pci;
- int next_rcv;
- int put_length;
- int status;
-#define ICOM_PORT_ACTIVE 1 /* Port exists. */
-#define ICOM_PORT_OFF 0 /* Port does not exist. */
- int load_in_progress;
- struct icom_adapter *adapter;
-};
-
-struct icom_adapter {
- void __iomem * base_addr;
- unsigned long base_addr_pci;
- struct pci_dev *pci_dev;
- struct icom_port port_info[4];
- int index;
- int version;
-#define ADAPTER_V1 0x0001
-#define ADAPTER_V2 0x0002
- u32 subsystem_id;
-#define FOUR_PORT_MODEL 0x0252
-#define V2_TWO_PORTS_RVX 0x021A
-#define V2_ONE_PORT_RVX_ONE_PORT_IMBED_MDM 0x0251
- int numb_ports;
- struct list_head icom_adapter_entry;
- struct kref kref;
-};
-
-/* prototype */
-extern void iCom_sercons_init(void);
-
-struct lookup_proc_table {
- u32 __iomem *global_control_reg;
- unsigned long processor_id;
-};
-
-struct lookup_int_table {
- u32 __iomem *global_int_mask;
- unsigned long processor_id;
-};
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index b1639b174292..30edb35a6a15 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -1937,8 +1937,6 @@ static int imx_uart_rs485_config(struct uart_port *port,
rs485conf->flags & SER_RS485_RX_DURING_TX)
imx_uart_start_rx(port);
- port->rs485 = *rs485conf;
-
return 0;
}
diff --git a/drivers/tty/serial/jsm/jsm_cls.c b/drivers/tty/serial/jsm/jsm_cls.c
index 444f233ebd1f..3fd57ac3ad81 100644
--- a/drivers/tty/serial/jsm/jsm_cls.c
+++ b/drivers/tty/serial/jsm/jsm_cls.c
@@ -689,7 +689,7 @@ static void cls_param(struct jsm_channel *ch)
/*
* If baud rate is zero, flush queues, and set mval to drop DTR.
*/
- if ((ch->ch_c_cflag & (CBAUD)) == 0) {
+ if ((ch->ch_c_cflag & CBAUD) == B0) {
ch->ch_r_head = 0;
ch->ch_r_tail = 0;
ch->ch_e_head = 0;
@@ -723,14 +723,8 @@ static void cls_param(struct jsm_channel *ch)
if (!(ch->ch_c_cflag & PARODD))
lcr |= UART_LCR_EPAR;
- /*
- * Not all platforms support mark/space parity,
- * so this will hide behind an ifdef.
- */
-#ifdef CMSPAR
if (ch->ch_c_cflag & CMSPAR)
lcr |= UART_LCR_SPAR;
-#endif
if (ch->ch_c_cflag & CSTOPB)
lcr |= UART_LCR_STOP;
diff --git a/drivers/tty/serial/jsm/jsm_neo.c b/drivers/tty/serial/jsm/jsm_neo.c
index 110696cdaa1d..0c78f66276cd 100644
--- a/drivers/tty/serial/jsm/jsm_neo.c
+++ b/drivers/tty/serial/jsm/jsm_neo.c
@@ -938,7 +938,7 @@ static void neo_param(struct jsm_channel *ch)
/*
* If baud rate is zero, flush queues, and set mval to drop DTR.
*/
- if ((ch->ch_c_cflag & (CBAUD)) == 0) {
+ if ((ch->ch_c_cflag & CBAUD) == B0) {
ch->ch_r_head = ch->ch_r_tail = 0;
ch->ch_e_head = ch->ch_e_tail = 0;
@@ -997,14 +997,8 @@ static void neo_param(struct jsm_channel *ch)
if (!(ch->ch_c_cflag & PARODD))
lcr |= UART_LCR_EPAR;
- /*
- * Not all platforms support mark/space parity,
- * so this will hide behind an ifdef.
- */
-#ifdef CMSPAR
if (ch->ch_c_cflag & CMSPAR)
lcr |= UART_LCR_SPAR;
-#endif
if (ch->ch_c_cflag & CSTOPB)
lcr |= UART_LCR_STOP;
diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
index 3112b4a05448..a0b6ea52d133 100644
--- a/drivers/tty/serial/max310x.c
+++ b/drivers/tty/serial/max310x.c
@@ -1037,7 +1037,6 @@ static int max310x_rs485_config(struct uart_port *port,
rs485->flags &= SER_RS485_RTS_ON_SEND | SER_RS485_RX_DURING_TX |
SER_RS485_ENABLED;
- memset(rs485->padding, 0, sizeof(rs485->padding));
port->rs485 = *rs485;
schedule_work(&one->rs_work);
diff --git a/drivers/tty/serial/men_z135_uart.c b/drivers/tty/serial/men_z135_uart.c
index 9acae5f8fc32..12117b596e73 100644
--- a/drivers/tty/serial/men_z135_uart.c
+++ b/drivers/tty/serial/men_z135_uart.c
@@ -833,7 +833,6 @@ static int men_z135_probe(struct mcb_device *mdev,
uart->port.iotype = UPIO_MEM;
uart->port.ops = &men_z135_ops;
uart->port.irq = mcb_get_irq(mdev);
- uart->port.iotype = UPIO_MEM;
uart->port.flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP;
uart->port.line = line++;
uart->port.dev = dev;
diff --git a/drivers/tty/serial/meson_uart.c b/drivers/tty/serial/meson_uart.c
index 2bf1c57e0981..4869c0059c98 100644
--- a/drivers/tty/serial/meson_uart.c
+++ b/drivers/tty/serial/meson_uart.c
@@ -68,6 +68,7 @@
#define AML_UART_BAUD_MASK 0x7fffff
#define AML_UART_BAUD_USE BIT(23)
#define AML_UART_BAUD_XTAL BIT(24)
+#define AML_UART_BAUD_XTAL_DIV2 BIT(27)
#define AML_UART_PORT_NUM 12
#define AML_UART_PORT_OFFSET 6
@@ -80,6 +81,10 @@ static struct uart_driver meson_uart_driver;
static struct uart_port *meson_ports[AML_UART_PORT_NUM];
+struct meson_uart_data {
+ bool has_xtal_div2;
+};
+
static void meson_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
}
@@ -253,6 +258,14 @@ static const char *meson_uart_type(struct uart_port *port)
return (port->type == PORT_MESON) ? "meson_uart" : NULL;
}
+/*
+ * This function is called only from probe() using a temporary io mapping
+ * in order to perform a reset before setting up the device. Since the
+ * temporarily mapped region was successfully requested, there can be no
+ * console on this port at this time. Hence it is not necessary for this
+ * function to acquire the port->lock. (Since there is no console on this
+ * port at this time, the port->lock is not initialized yet.)
+ */
static void meson_uart_reset(struct uart_port *port)
{
u32 val;
@@ -267,9 +280,12 @@ static void meson_uart_reset(struct uart_port *port)
static int meson_uart_startup(struct uart_port *port)
{
+ unsigned long flags;
u32 val;
int ret = 0;
+ spin_lock_irqsave(&port->lock, flags);
+
val = readl(port->membase + AML_UART_CONTROL);
val |= AML_UART_CLEAR_ERR;
writel(val, port->membase + AML_UART_CONTROL);
@@ -285,6 +301,8 @@ static int meson_uart_startup(struct uart_port *port)
val = (AML_UART_RECV_IRQ(1) | AML_UART_XMIT_IRQ(port->fifosize / 2));
writel(val, port->membase + AML_UART_MISC);
+ spin_unlock_irqrestore(&port->lock, flags);
+
ret = request_irq(port->irq, meson_uart_interrupt, 0,
port->name, port);
@@ -293,16 +311,23 @@ static int meson_uart_startup(struct uart_port *port)
static void meson_uart_change_speed(struct uart_port *port, unsigned long baud)
{
- u32 val;
+ const struct meson_uart_data *private_data = port->private_data;
+ u32 val = 0;
while (!meson_uart_tx_empty(port))
cpu_relax();
if (port->uartclk == 24000000) {
- val = ((port->uartclk / 3) / baud) - 1;
+ unsigned int xtal_div = 3;
+
+ if (private_data && private_data->has_xtal_div2) {
+ xtal_div = 2;
+ val |= AML_UART_BAUD_XTAL_DIV2;
+ }
+ val |= DIV_ROUND_CLOSEST(port->uartclk / xtal_div, baud) - 1;
val |= AML_UART_BAUD_XTAL;
} else {
- val = ((port->uartclk * 10 / (baud * 4) + 5) / 10) - 1;
+ val = DIV_ROUND_CLOSEST(port->uartclk / 4, baud) - 1;
}
val |= AML_UART_BAUD_USE;
writel(val, port->membase + AML_UART_REG5);
@@ -749,6 +774,7 @@ static int meson_uart_probe(struct platform_device *pdev)
port->x_char = 0;
port->ops = &meson_uart_ops;
port->fifosize = fifosize;
+ port->private_data = (void *)device_get_match_data(&pdev->dev);
meson_ports[pdev->id] = port;
platform_set_drvdata(pdev, port);
@@ -777,11 +803,19 @@ static int meson_uart_remove(struct platform_device *pdev)
return 0;
}
+static struct meson_uart_data s4_uart_data = {
+ .has_xtal_div2 = true,
+};
+
static const struct of_device_id meson_uart_dt_match[] = {
{ .compatible = "amlogic,meson6-uart" },
{ .compatible = "amlogic,meson8-uart" },
{ .compatible = "amlogic,meson8b-uart" },
{ .compatible = "amlogic,meson-gx-uart" },
+ {
+ .compatible = "amlogic,meson-s4-uart",
+ .data = (void *)&s4_uart_data,
+ },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, meson_uart_dt_match);
diff --git a/drivers/tty/serial/mpc52xx_uart.c b/drivers/tty/serial/mpc52xx_uart.c
index 3acc0f185762..e50f069b5ebb 100644
--- a/drivers/tty/serial/mpc52xx_uart.c
+++ b/drivers/tty/serial/mpc52xx_uart.c
@@ -38,6 +38,8 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/clk.h>
@@ -754,9 +756,6 @@ static void mpc512x_psc_get_irq(struct uart_port *port, struct device_node *np)
port->irqflags = IRQF_SHARED;
port->irq = psc_fifoc_irq;
}
-#endif
-
-#ifdef CONFIG_PPC_MPC512x
#define PSC_5125(port) ((struct mpc5125_psc __iomem *)((port)->membase))
#define FIFO_5125(port) ((struct mpc512x_psc_fifo __iomem *)(PSC_5125(port)+1))
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index 23c94b927776..e676ec761f18 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -1599,6 +1599,7 @@ static inline struct uart_port *msm_get_port_from_line(unsigned int line)
static void __msm_console_write(struct uart_port *port, const char *s,
unsigned int count, bool is_uartdm)
{
+ unsigned long flags;
int i;
int num_newlines = 0;
bool replaced = false;
@@ -1616,6 +1617,8 @@ static void __msm_console_write(struct uart_port *port, const char *s,
num_newlines++;
count += num_newlines;
+ local_irq_save(flags);
+
if (port->sysrq)
locked = 0;
else if (oops_in_progress)
@@ -1661,6 +1664,8 @@ static void __msm_console_write(struct uart_port *port, const char *s,
if (locked)
spin_unlock(&port->lock);
+
+ local_irq_restore(flags);
}
static void msm_console_write(struct console *co, const char *s,
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index 8d5ffa196097..46f4d4cacb6e 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -1336,18 +1336,11 @@ serial_omap_config_rs485(struct uart_port *port, struct serial_rs485 *rs485)
up->ier = 0;
serial_out(up, UART_IER, 0);
- /* Clamp the delays to [0, 100ms] */
- rs485->delay_rts_before_send = min(rs485->delay_rts_before_send, 100U);
- rs485->delay_rts_after_send = min(rs485->delay_rts_after_send, 100U);
-
- /* store new config */
- port->rs485 = *rs485;
-
if (up->rts_gpiod) {
/* enable / disable rts */
- val = (port->rs485.flags & SER_RS485_ENABLED) ?
+ val = (rs485->flags & SER_RS485_ENABLED) ?
SER_RS485_RTS_AFTER_SEND : SER_RS485_RTS_ON_SEND;
- val = (port->rs485.flags & val) ? 1 : 0;
+ val = (rs485->flags & val) ? 1 : 0;
gpiod_set_value(up->rts_gpiod, val);
}
@@ -1358,7 +1351,7 @@ serial_omap_config_rs485(struct uart_port *port, struct serial_rs485 *rs485)
/* If RS-485 is disabled, make sure the THR interrupt is fired when
* TX FIFO is below the trigger level.
*/
- if (!(port->rs485.flags & SER_RS485_ENABLED) &&
+ if (!(rs485->flags & SER_RS485_ENABLED) &&
(up->scr & OMAP_UART_SCR_TX_EMPTY)) {
up->scr &= ~OMAP_UART_SCR_TX_EMPTY;
serial_out(up, UART_OMAP_SCR, up->scr);
diff --git a/drivers/tty/serial/owl-uart.c b/drivers/tty/serial/owl-uart.c
index 5250bd7d390a..44d20e5a7dd3 100644
--- a/drivers/tty/serial/owl-uart.c
+++ b/drivers/tty/serial/owl-uart.c
@@ -184,9 +184,6 @@ static void owl_uart_send_chars(struct uart_port *port)
struct circ_buf *xmit = &port->state->xmit;
unsigned int ch;
- if (uart_tx_stopped(port))
- return;
-
if (port->x_char) {
while (!(owl_uart_read(port, OWL_UART_STAT) & OWL_UART_STAT_TFFU))
cpu_relax();
@@ -195,6 +192,9 @@ static void owl_uart_send_chars(struct uart_port *port)
port->x_char = 0;
}
+ if (uart_tx_stopped(port))
+ return;
+
while (!(owl_uart_read(port, OWL_UART_STAT) & OWL_UART_STAT_TFFU)) {
if (uart_circ_empty(xmit))
break;
@@ -731,6 +731,7 @@ static int owl_uart_probe(struct platform_device *pdev)
owl_port->port.uartclk = clk_get_rate(owl_port->clk);
if (owl_port->port.uartclk == 0) {
dev_err(&pdev->dev, "clock rate is zero\n");
+ clk_disable_unprepare(owl_port->clk);
return -EINVAL;
}
owl_port->port.flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP | UPF_LOW_LATENCY;
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index affe71f8b50c..3b26524d48e3 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -550,18 +550,6 @@ static u8 pch_uart_hal_get_modem(struct eg20t_port *priv)
return (u8)msr;
}
-static void pch_uart_hal_write(struct eg20t_port *priv,
- const unsigned char *buf, int tx_size)
-{
- int i;
- unsigned int thr;
-
- for (i = 0; i < tx_size;) {
- thr = buf[i++];
- iowrite8(thr, priv->membase + PCH_UART_THR);
- }
-}
-
static int pch_uart_hal_read(struct eg20t_port *priv, unsigned char *buf,
int rx_size)
{
@@ -624,22 +612,6 @@ static int push_rx(struct eg20t_port *priv, const unsigned char *buf,
return 0;
}
-static int pop_tx_x(struct eg20t_port *priv, unsigned char *buf)
-{
- int ret = 0;
- struct uart_port *port = &priv->port;
-
- if (port->x_char) {
- dev_dbg(priv->port.dev, "%s:X character send %02x (%lu)\n",
- __func__, port->x_char, jiffies);
- buf[0] = port->x_char;
- port->x_char = 0;
- ret = 1;
- }
-
- return ret;
-}
-
static int dma_push_rx(struct eg20t_port *priv, int size)
{
int room;
@@ -785,31 +757,6 @@ static void pch_dma_tx_complete(void *arg)
pch_uart_hal_enable_interrupt(priv, PCH_UART_HAL_TX_INT);
}
-static int pop_tx(struct eg20t_port *priv, int size)
-{
- int count = 0;
- struct uart_port *port = &priv->port;
- struct circ_buf *xmit = &port->state->xmit;
-
- if (uart_tx_stopped(port) || uart_circ_empty(xmit) || count >= size)
- goto pop_tx_end;
-
- do {
- int cnt_to_end =
- CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
- int sz = min(size - count, cnt_to_end);
- pch_uart_hal_write(priv, &xmit->buf[xmit->tail], sz);
- xmit->tail = (xmit->tail + sz) & (UART_XMIT_SIZE - 1);
- count += sz;
- } while (!uart_circ_empty(xmit) && count < size);
-
-pop_tx_end:
- dev_dbg(priv->port.dev, "%d characters. Remained %d characters.(%lu)\n",
- count, size - count, jiffies);
-
- return count;
-}
-
static int handle_rx_to(struct eg20t_port *priv)
{
struct pch_uart_buffer *buf;
@@ -875,8 +822,6 @@ static unsigned int handle_tx(struct eg20t_port *priv)
struct uart_port *port = &priv->port;
struct circ_buf *xmit = &port->state->xmit;
int fifo_size;
- int tx_size;
- int size;
int tx_empty;
if (!priv->start_tx) {
@@ -889,19 +834,19 @@ static unsigned int handle_tx(struct eg20t_port *priv)
fifo_size = max(priv->fifo_size, 1);
tx_empty = 1;
- if (pop_tx_x(priv, xmit->buf)) {
- pch_uart_hal_write(priv, xmit->buf, 1);
+ if (port->x_char) {
+ iowrite8(port->x_char, priv->membase + PCH_UART_THR);
port->icount.tx++;
+ port->x_char = 0;
tx_empty = 0;
fifo_size--;
}
- size = min(xmit->head - xmit->tail, fifo_size);
- if (size < 0)
- size = fifo_size;
- tx_size = pop_tx(priv, size);
- if (tx_size > 0) {
- port->icount.tx += tx_size;
+ while (!uart_tx_stopped(port) && !uart_circ_empty(xmit) && fifo_size) {
+ iowrite8(xmit->buf[xmit->tail], priv->membase + PCH_UART_THR);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ port->icount.tx++;
+ fifo_size--;
tx_empty = 0;
}
@@ -946,9 +891,11 @@ static unsigned int dma_handle_tx(struct eg20t_port *priv)
}
fifo_size = max(priv->fifo_size, 1);
- if (pop_tx_x(priv, xmit->buf)) {
- pch_uart_hal_write(priv, xmit->buf, 1);
+
+ if (port->x_char) {
+ iowrite8(port->x_char, priv->membase + PCH_UART_THR);
port->icount.tx++;
+ port->x_char = 0;
fifo_size--;
}
diff --git a/drivers/tty/serial/pic32_uart.c b/drivers/tty/serial/pic32_uart.c
index b7a3a1b959b1..b399aac530fe 100644
--- a/drivers/tty/serial/pic32_uart.c
+++ b/drivers/tty/serial/pic32_uart.c
@@ -25,38 +25,114 @@
#include <linux/delay.h>
#include <asm/mach-pic32/pic32.h>
-#include "pic32_uart.h"
/* UART name and device definitions */
#define PIC32_DEV_NAME "pic32-uart"
#define PIC32_MAX_UARTS 6
#define PIC32_SDEV_NAME "ttyPIC"
-/* pic32_sport pointer for console use */
-static struct pic32_sport *pic32_sports[PIC32_MAX_UARTS];
+#define PIC32_UART_DFLT_BRATE 9600
+#define PIC32_UART_TX_FIFO_DEPTH 8
+#define PIC32_UART_RX_FIFO_DEPTH 8
+
+#define PIC32_UART_MODE 0x00
+#define PIC32_UART_STA 0x10
+#define PIC32_UART_TX 0x20
+#define PIC32_UART_RX 0x30
+#define PIC32_UART_BRG 0x40
+
+/* struct pic32_sport - pic32 serial port descriptor
+ * @port: uart port descriptor
+ * @idx: port index
+ * @irq_fault: virtual fault interrupt number
+ * @irq_fault_name: irq fault name
+ * @irq_rx: virtual rx interrupt number
+ * @irq_rx_name: irq rx name
+ * @irq_tx: virtual tx interrupt number
+ * @irq_tx_name: irq tx name
+ * @cts_gpio: clear to send gpio
+ * @dev: device descriptor
+ **/
+struct pic32_sport {
+ struct uart_port port;
+ int idx;
+
+ int irq_fault;
+ const char *irq_fault_name;
+ int irq_rx;
+ const char *irq_rx_name;
+ int irq_tx;
+ const char *irq_tx_name;
+ bool enable_tx_irq;
+
+ bool hw_flow_ctrl;
+ int cts_gpio;
+
+ struct clk *clk;
+
+ struct device *dev;
+};
-static inline void pic32_wait_deplete_txbuf(struct pic32_sport *sport)
+static inline struct pic32_sport *to_pic32_sport(struct uart_port *port)
{
- /* wait for tx empty, otherwise chars will be lost or corrupted */
- while (!(pic32_uart_readl(sport, PIC32_UART_STA) & PIC32_UART_STA_TRMT))
- udelay(1);
+ return container_of(port, struct pic32_sport, port);
}
-static inline int pic32_enable_clock(struct pic32_sport *sport)
+static inline void pic32_uart_writel(struct pic32_sport *sport,
+ u32 reg, u32 val)
{
- int ret = clk_prepare_enable(sport->clk);
-
- if (ret)
- return ret;
+ __raw_writel(val, sport->port.membase + reg);
+}
- sport->ref_clk++;
- return 0;
+static inline u32 pic32_uart_readl(struct pic32_sport *sport, u32 reg)
+{
+ return __raw_readl(sport->port.membase + reg);
}
-static inline void pic32_disable_clock(struct pic32_sport *sport)
+/* pic32 uart mode register bits */
+#define PIC32_UART_MODE_ON BIT(15)
+#define PIC32_UART_MODE_FRZ BIT(14)
+#define PIC32_UART_MODE_SIDL BIT(13)
+#define PIC32_UART_MODE_IREN BIT(12)
+#define PIC32_UART_MODE_RTSMD BIT(11)
+#define PIC32_UART_MODE_RESV1 BIT(10)
+#define PIC32_UART_MODE_UEN1 BIT(9)
+#define PIC32_UART_MODE_UEN0 BIT(8)
+#define PIC32_UART_MODE_WAKE BIT(7)
+#define PIC32_UART_MODE_LPBK BIT(6)
+#define PIC32_UART_MODE_ABAUD BIT(5)
+#define PIC32_UART_MODE_RXINV BIT(4)
+#define PIC32_UART_MODE_BRGH BIT(3)
+#define PIC32_UART_MODE_PDSEL1 BIT(2)
+#define PIC32_UART_MODE_PDSEL0 BIT(1)
+#define PIC32_UART_MODE_STSEL BIT(0)
+
+/* pic32 uart status register bits */
+#define PIC32_UART_STA_UTXISEL1 BIT(15)
+#define PIC32_UART_STA_UTXISEL0 BIT(14)
+#define PIC32_UART_STA_UTXINV BIT(13)
+#define PIC32_UART_STA_URXEN BIT(12)
+#define PIC32_UART_STA_UTXBRK BIT(11)
+#define PIC32_UART_STA_UTXEN BIT(10)
+#define PIC32_UART_STA_UTXBF BIT(9)
+#define PIC32_UART_STA_TRMT BIT(8)
+#define PIC32_UART_STA_URXISEL1 BIT(7)
+#define PIC32_UART_STA_URXISEL0 BIT(6)
+#define PIC32_UART_STA_ADDEN BIT(5)
+#define PIC32_UART_STA_RIDLE BIT(4)
+#define PIC32_UART_STA_PERR BIT(3)
+#define PIC32_UART_STA_FERR BIT(2)
+#define PIC32_UART_STA_OERR BIT(1)
+#define PIC32_UART_STA_URXDA BIT(0)
+
+/* pic32_sport pointer for console use */
+static struct pic32_sport *pic32_sports[PIC32_MAX_UARTS];
+
+static inline void pic32_wait_deplete_txbuf(struct pic32_sport *sport)
{
- sport->ref_clk--;
- clk_disable_unprepare(sport->clk);
+ /* wait for tx empty, otherwise chars will be lost or corrupted */
+ while (!(pic32_uart_readl(sport, PIC32_UART_STA) & PIC32_UART_STA_TRMT))
+ udelay(1);
}
/* serial core request to check if uart tx buffer is empty */
@@ -117,16 +193,16 @@ static unsigned int pic32_uart_get_mctrl(struct uart_port *port)
*/
static inline void pic32_uart_irqtxen(struct pic32_sport *sport, u8 en)
{
- if (en && !tx_irq_enabled(sport)) {
+ if (en && !sport->enable_tx_irq) {
enable_irq(sport->irq_tx);
- tx_irq_enabled(sport) = 1;
- } else if (!en && tx_irq_enabled(sport)) {
+ sport->enable_tx_irq = true;
+ } else if (!en && sport->enable_tx_irq) {
/* use disable_irq_nosync() and not disable_irq() to avoid self
* imposed deadlock by not waiting for irq handler to end,
* since this callback is called from interrupt context.
*/
disable_irq_nosync(sport->irq_tx);
- tx_irq_enabled(sport) = 0;
+ sport->enable_tx_irq = false;
}
}
@@ -395,7 +471,7 @@ static int pic32_uart_startup(struct uart_port *port)
local_irq_save(flags);
- ret = pic32_enable_clock(sport);
+ ret = clk_prepare_enable(sport->clk);
if (ret) {
local_irq_restore(flags);
goto out_done;
@@ -419,7 +495,7 @@ static int pic32_uart_startup(struct uart_port *port)
* For each irq request_irq() is called with interrupt disabled.
* And the irq is enabled as soon as we are ready to handle them.
*/
- tx_irq_enabled(sport) = 0;
+ sport->enable_tx_irq = false;
sport->irq_fault_name = kasprintf(GFP_KERNEL, "%s%d-fault",
pic32_uart_type(port),
@@ -431,7 +507,7 @@ static int pic32_uart_startup(struct uart_port *port)
}
irq_set_status_flags(sport->irq_fault, IRQ_NOAUTOEN);
ret = request_irq(sport->irq_fault, pic32_uart_fault_interrupt,
- sport->irqflags_fault, sport->irq_fault_name, port);
+ IRQF_NO_THREAD, sport->irq_fault_name, port);
if (ret) {
dev_err(port->dev, "%s: request irq(%d) err! ret:%d name:%s\n",
__func__, sport->irq_fault, ret,
@@ -449,7 +525,7 @@ static int pic32_uart_startup(struct uart_port *port)
}
irq_set_status_flags(sport->irq_rx, IRQ_NOAUTOEN);
ret = request_irq(sport->irq_rx, pic32_uart_rx_interrupt,
- sport->irqflags_rx, sport->irq_rx_name, port);
+ IRQF_NO_THREAD, sport->irq_rx_name, port);
if (ret) {
dev_err(port->dev, "%s: request irq(%d) err! ret:%d name:%s\n",
__func__, sport->irq_rx, ret,
@@ -467,7 +543,7 @@ static int pic32_uart_startup(struct uart_port *port)
}
irq_set_status_flags(sport->irq_tx, IRQ_NOAUTOEN);
ret = request_irq(sport->irq_tx, pic32_uart_tx_interrupt,
- sport->irqflags_tx, sport->irq_tx_name, port);
+ IRQF_NO_THREAD, sport->irq_tx_name, port);
if (ret) {
dev_err(port->dev, "%s: request irq(%d) err! ret:%d name:%s\n",
__func__, sport->irq_tx, ret,
@@ -488,19 +564,21 @@ static int pic32_uart_startup(struct uart_port *port)
/* enable all interrupts and eanable uart */
pic32_uart_en_and_unmask(port);
+ local_irq_restore(flags);
+
enable_irq(sport->irq_rx);
return 0;
out_t:
- kfree(sport->irq_tx_name);
free_irq(sport->irq_tx, port);
+ kfree(sport->irq_tx_name);
out_r:
- kfree(sport->irq_rx_name);
free_irq(sport->irq_rx, port);
+ kfree(sport->irq_rx_name);
out_f:
- kfree(sport->irq_fault_name);
free_irq(sport->irq_fault, port);
+ kfree(sport->irq_fault_name);
out_done:
return ret;
}
@@ -515,12 +593,15 @@ static void pic32_uart_shutdown(struct uart_port *port)
spin_lock_irqsave(&port->lock, flags);
pic32_uart_dsbl_and_mask(port);
spin_unlock_irqrestore(&port->lock, flags);
- pic32_disable_clock(sport);
+ clk_disable_unprepare(sport->clk);
/* free all 3 interrupts for this UART */
free_irq(sport->irq_fault, port);
+ kfree(sport->irq_fault_name);
free_irq(sport->irq_tx, port);
+ kfree(sport->irq_tx_name);
free_irq(sport->irq_rx, port);
+ kfree(sport->irq_rx_name);
}
/* serial core request to change current uart setting */
@@ -712,10 +793,9 @@ static void pic32_console_write(struct console *co, const char *s,
unsigned int count)
{
struct pic32_sport *sport = pic32_sports[co->index];
- struct uart_port *port = pic32_get_port(sport);
/* call uart helper to deal with \r\n */
- uart_console_write(port, s, count, pic32_console_putchar);
+ uart_console_write(&sport->port, s, count, pic32_console_putchar);
}
/* console core request to setup given console, find matching uart
@@ -724,7 +804,6 @@ static void pic32_console_write(struct console *co, const char *s,
static int pic32_console_setup(struct console *co, char *options)
{
struct pic32_sport *sport;
- struct uart_port *port = NULL;
int baud = 115200;
int bits = 8;
int parity = 'n';
@@ -737,16 +816,15 @@ static int pic32_console_setup(struct console *co, char *options)
sport = pic32_sports[co->index];
if (!sport)
return -ENODEV;
- port = pic32_get_port(sport);
- ret = pic32_enable_clock(sport);
+ ret = clk_prepare_enable(sport->clk);
if (ret)
return ret;
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
- return uart_set_options(port, co, baud, parity, bits, flow);
+ return uart_set_options(&sport->port, co, baud, parity, bits, flow);
}
static struct uart_driver pic32_uart_driver;
@@ -816,13 +894,9 @@ static int pic32_uart_probe(struct platform_device *pdev)
sport->idx = uart_idx;
sport->irq_fault = irq_of_parse_and_map(np, 0);
- sport->irqflags_fault = IRQF_NO_THREAD;
sport->irq_rx = irq_of_parse_and_map(np, 1);
- sport->irqflags_rx = IRQF_NO_THREAD;
sport->irq_tx = irq_of_parse_and_map(np, 2);
- sport->irqflags_tx = IRQF_NO_THREAD;
sport->clk = devm_clk_get(&pdev->dev, NULL);
- sport->cts_gpio = -EINVAL;
sport->dev = &pdev->dev;
/* Hardware flow control: gpios
@@ -850,7 +924,6 @@ static int pic32_uart_probe(struct platform_device *pdev)
pic32_sports[uart_idx] = sport;
port = &sport->port;
- memset(port, 0, sizeof(*port));
port->iotype = UPIO_MEM;
port->mapbase = res_mem->start;
port->ops = &pic32_uart_ops;
@@ -872,7 +945,7 @@ static int pic32_uart_probe(struct platform_device *pdev)
/* The peripheral clock has been enabled by console_setup,
* so disable it till the port is used.
*/
- pic32_disable_clock(sport);
+ clk_disable_unprepare(sport->clk);
}
#endif
@@ -893,7 +966,7 @@ static int pic32_uart_remove(struct platform_device *pdev)
struct pic32_sport *sport = to_pic32_sport(port);
uart_remove_one_port(&pic32_uart_driver, port);
- pic32_disable_clock(sport);
+ clk_disable_unprepare(sport->clk);
platform_set_drvdata(pdev, NULL);
pic32_sports[sport->idx] = NULL;
diff --git a/drivers/tty/serial/pic32_uart.h b/drivers/tty/serial/pic32_uart.h
deleted file mode 100644
index b15639cc336b..000000000000
--- a/drivers/tty/serial/pic32_uart.h
+++ /dev/null
@@ -1,125 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-/*
- * PIC32 Integrated Serial Driver.
- *
- * Copyright (C) 2015 Microchip Technology, Inc.
- *
- * Authors:
- * Sorin-Andrei Pistirica <andrei.pistirica@microchip.com>
- */
-#ifndef __DT_PIC32_UART_H__
-#define __DT_PIC32_UART_H__
-
-#define PIC32_UART_DFLT_BRATE (9600)
-#define PIC32_UART_TX_FIFO_DEPTH (8)
-#define PIC32_UART_RX_FIFO_DEPTH (8)
-
-#define PIC32_UART_MODE 0x00
-#define PIC32_UART_STA 0x10
-#define PIC32_UART_TX 0x20
-#define PIC32_UART_RX 0x30
-#define PIC32_UART_BRG 0x40
-
-struct pic32_console_opt {
- int baud;
- int parity;
- int bits;
- int flow;
-};
-
-/* struct pic32_sport - pic32 serial port descriptor
- * @port: uart port descriptor
- * @idx: port index
- * @irq_fault: virtual fault interrupt number
- * @irqflags_fault: flags related to fault irq
- * @irq_fault_name: irq fault name
- * @irq_rx: virtual rx interrupt number
- * @irqflags_rx: flags related to rx irq
- * @irq_rx_name: irq rx name
- * @irq_tx: virtual tx interrupt number
- * @irqflags_tx: : flags related to tx irq
- * @irq_tx_name: irq tx name
- * @cts_gpio: clear to send gpio
- * @dev: device descriptor
- **/
-struct pic32_sport {
- struct uart_port port;
- struct pic32_console_opt opt;
- int idx;
-
- int irq_fault;
- int irqflags_fault;
- const char *irq_fault_name;
- int irq_rx;
- int irqflags_rx;
- const char *irq_rx_name;
- int irq_tx;
- int irqflags_tx;
- const char *irq_tx_name;
- u8 enable_tx_irq;
-
- bool hw_flow_ctrl;
- int cts_gpio;
-
- int ref_clk;
- struct clk *clk;
-
- struct device *dev;
-};
-#define to_pic32_sport(c) container_of(c, struct pic32_sport, port)
-#define pic32_get_port(sport) (&sport->port)
-#define pic32_get_opt(sport) (&sport->opt)
-#define tx_irq_enabled(sport) (sport->enable_tx_irq)
-
-static inline void pic32_uart_writel(struct pic32_sport *sport,
- u32 reg, u32 val)
-{
- struct uart_port *port = pic32_get_port(sport);
-
- __raw_writel(val, port->membase + reg);
-}
-
-static inline u32 pic32_uart_readl(struct pic32_sport *sport, u32 reg)
-{
- struct uart_port *port = pic32_get_port(sport);
-
- return __raw_readl(port->membase + reg);
-}
-
-/* pic32 uart mode register bits */
-#define PIC32_UART_MODE_ON BIT(15)
-#define PIC32_UART_MODE_FRZ BIT(14)
-#define PIC32_UART_MODE_SIDL BIT(13)
-#define PIC32_UART_MODE_IREN BIT(12)
-#define PIC32_UART_MODE_RTSMD BIT(11)
-#define PIC32_UART_MODE_RESV1 BIT(10)
-#define PIC32_UART_MODE_UEN1 BIT(9)
-#define PIC32_UART_MODE_UEN0 BIT(8)
-#define PIC32_UART_MODE_WAKE BIT(7)
-#define PIC32_UART_MODE_LPBK BIT(6)
-#define PIC32_UART_MODE_ABAUD BIT(5)
-#define PIC32_UART_MODE_RXINV BIT(4)
-#define PIC32_UART_MODE_BRGH BIT(3)
-#define PIC32_UART_MODE_PDSEL1 BIT(2)
-#define PIC32_UART_MODE_PDSEL0 BIT(1)
-#define PIC32_UART_MODE_STSEL BIT(0)
-
-/* pic32 uart status register bits */
-#define PIC32_UART_STA_UTXISEL1 BIT(15)
-#define PIC32_UART_STA_UTXISEL0 BIT(14)
-#define PIC32_UART_STA_UTXINV BIT(13)
-#define PIC32_UART_STA_URXEN BIT(12)
-#define PIC32_UART_STA_UTXBRK BIT(11)
-#define PIC32_UART_STA_UTXEN BIT(10)
-#define PIC32_UART_STA_UTXBF BIT(9)
-#define PIC32_UART_STA_TRMT BIT(8)
-#define PIC32_UART_STA_URXISEL1 BIT(7)
-#define PIC32_UART_STA_URXISEL0 BIT(6)
-#define PIC32_UART_STA_ADDEN BIT(5)
-#define PIC32_UART_STA_RIDLE BIT(4)
-#define PIC32_UART_STA_PERR BIT(3)
-#define PIC32_UART_STA_FERR BIT(2)
-#define PIC32_UART_STA_OERR BIT(1)
-#define PIC32_UART_STA_URXDA BIT(0)
-
-#endif /* __DT_PIC32_UART_H__ */
diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c
index 5d97c201ad88..3133446e806c 100644
--- a/drivers/tty/serial/pmac_zilog.c
+++ b/drivers/tty/serial/pmac_zilog.c
@@ -24,7 +24,6 @@
*/
#undef DEBUG
-#undef DEBUG_HARD
#undef USE_CTRL_O_SYSRQ
#include <linux/module.h>
@@ -51,7 +50,6 @@
#include <asm/irq.h>
#ifdef CONFIG_PPC_PMAC
-#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/pmac_feature.h>
#include <asm/dbdma.h>
@@ -66,10 +64,6 @@
#include "pmac_zilog.h"
-/* Not yet implemented */
-#undef HAS_DBDMA
-
-static char version[] __initdata = "pmac_zilog: 0.6 (Benjamin Herrenschmidt <benh@kernel.crashing.org>)";
MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>");
MODULE_DESCRIPTION("Driver for the Mac and PowerMac serial ports.");
MODULE_LICENSE("GPL");
@@ -446,9 +440,6 @@ static irqreturn_t pmz_interrupt(int irq, void *dev_id)
spin_lock(&uap_a->port.lock);
r3 = read_zsreg(uap_a, R3);
-#ifdef DEBUG_HARD
- pmz_debug("irq, r3: %x\n", r3);
-#endif
/* Channel A */
push = false;
if (r3 & (CHAEXT | CHATxIP | CHARxIP)) {
@@ -613,8 +604,6 @@ static void pmz_start_tx(struct uart_port *port)
struct uart_pmac_port *uap = to_pmz(port);
unsigned char status;
- pmz_debug("pmz: start_tx()\n");
-
uap->flags |= PMACZILOG_FLAG_TX_ACTIVE;
uap->flags &= ~PMACZILOG_FLAG_TX_STOPPED;
@@ -636,7 +625,7 @@ static void pmz_start_tx(struct uart_port *port)
struct circ_buf *xmit = &port->state->xmit;
if (uart_circ_empty(xmit))
- goto out;
+ return;
write_zsdata(uap, xmit->buf[xmit->tail]);
zssync(uap);
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
@@ -645,8 +634,6 @@ static void pmz_start_tx(struct uart_port *port)
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(&uap->port);
}
- out:
- pmz_debug("pmz: start_tx() done.\n");
}
/*
@@ -659,13 +646,9 @@ static void pmz_stop_rx(struct uart_port *port)
{
struct uart_pmac_port *uap = to_pmz(port);
- pmz_debug("pmz: stop_rx()()\n");
-
/* Disable all RX interrupts. */
uap->curregs[R1] &= ~RxINT_MASK;
pmz_maybe_update_regs(uap);
-
- pmz_debug("pmz: stop_rx() done.\n");
}
/*
@@ -910,8 +893,6 @@ static int pmz_startup(struct uart_port *port)
unsigned long flags;
int pwr_delay = 0;
- pmz_debug("pmz: startup()\n");
-
uap->flags |= PMACZILOG_FLAG_IS_OPEN;
/* A console is never powered down. Else, power up and
@@ -947,8 +928,6 @@ static int pmz_startup(struct uart_port *port)
pmz_interrupt_control(uap, 1);
spin_unlock_irqrestore(&port->lock, flags);
- pmz_debug("pmz: startup() done.\n");
-
return 0;
}
@@ -957,8 +936,6 @@ static void pmz_shutdown(struct uart_port *port)
struct uart_pmac_port *uap = to_pmz(port);
unsigned long flags;
- pmz_debug("pmz: shutdown()\n");
-
spin_lock_irqsave(&port->lock, flags);
/* Disable interrupt requests for the channel */
@@ -987,8 +964,6 @@ static void pmz_shutdown(struct uart_port *port)
pmz_set_scc_power(uap, 0); /* Shut the chip down */
spin_unlock_irqrestore(&port->lock, flags);
-
- pmz_debug("pmz: shutdown() done.\n");
}
/* Shared by TTY driver and serial console setup. The port lock is held
@@ -1233,10 +1208,6 @@ static void __pmz_set_termios(struct uart_port *port, struct ktermios *termios,
struct uart_pmac_port *uap = to_pmz(port);
unsigned long baud;
- pmz_debug("pmz: set_termios()\n");
-
- memcpy(&uap->termios_cache, termios, sizeof(struct ktermios));
-
/* XXX Check which revs of machines actually allow 1 and 4Mb speeds
* on the IR dongle. Note that the IRTTY driver currently doesn't know
* about the FIR mode and high speed modes. So these are unused. For
@@ -1270,8 +1241,6 @@ static void __pmz_set_termios(struct uart_port *port, struct ktermios *termios,
pmz_maybe_update_regs(uap);
}
uart_update_timeout(port, termios->c_cflag, baud);
-
- pmz_debug("pmz: set_termios() done.\n");
}
/* The port lock is not held. */
@@ -1400,7 +1369,7 @@ static int __init pmz_init_port(struct uart_pmac_port *uap)
char name[1];
} *slots;
int len;
- struct resource r_ports, r_rxdma, r_txdma;
+ struct resource r_ports;
/*
* Request & map chip registers
@@ -1412,35 +1381,6 @@ static int __init pmz_init_port(struct uart_pmac_port *uap)
uap->control_reg = uap->port.membase;
uap->data_reg = uap->control_reg + 0x10;
-
- /*
- * Request & map DBDMA registers
- */
-#ifdef HAS_DBDMA
- if (of_address_to_resource(np, 1, &r_txdma) == 0 &&
- of_address_to_resource(np, 2, &r_rxdma) == 0)
- uap->flags |= PMACZILOG_FLAG_HAS_DMA;
-#else
- memset(&r_txdma, 0, sizeof(struct resource));
- memset(&r_rxdma, 0, sizeof(struct resource));
-#endif
- if (ZS_HAS_DMA(uap)) {
- uap->tx_dma_regs = ioremap(r_txdma.start, 0x100);
- if (uap->tx_dma_regs == NULL) {
- uap->flags &= ~PMACZILOG_FLAG_HAS_DMA;
- goto no_dma;
- }
- uap->rx_dma_regs = ioremap(r_rxdma.start, 0x100);
- if (uap->rx_dma_regs == NULL) {
- iounmap(uap->tx_dma_regs);
- uap->tx_dma_regs = NULL;
- uap->flags &= ~PMACZILOG_FLAG_HAS_DMA;
- goto no_dma;
- }
- uap->tx_dma_irq = irq_of_parse_and_map(np, 1);
- uap->rx_dma_irq = irq_of_parse_and_map(np, 2);
- }
-no_dma:
/*
* Detect port type
@@ -1506,8 +1446,6 @@ no_dma:
of_device_is_compatible(np->parent->parent, "gatwick")) {
/* IRQs on gatwick are offset by 64 */
uap->port.irq = irq_create_mapping(NULL, 64 + 15);
- uap->tx_dma_irq = irq_create_mapping(NULL, 64 + 4);
- uap->rx_dma_irq = irq_create_mapping(NULL, 64 + 5);
}
/* Setup some valid baud rate information in the register
@@ -1527,8 +1465,6 @@ static void pmz_dispose_port(struct uart_pmac_port *uap)
struct device_node *np;
np = uap->node;
- iounmap(uap->rx_dma_regs);
- iounmap(uap->tx_dma_regs);
iounmap(uap->control_reg);
uap->node = NULL;
of_node_put(np);
@@ -1875,7 +1811,6 @@ static struct platform_driver pmz_driver = {
static int __init init_pmz(void)
{
int rc, i;
- printk(KERN_INFO "%s\n", version);
/*
* First, we need to do a direct OF-based probe pass. We
diff --git a/drivers/tty/serial/pmac_zilog.h b/drivers/tty/serial/pmac_zilog.h
index fa85b0de5c2f..837b97ca0a90 100644
--- a/drivers/tty/serial/pmac_zilog.h
+++ b/drivers/tty/serial/pmac_zilog.h
@@ -43,7 +43,6 @@ struct uart_pmac_port {
#define PMACZILOG_FLAG_TX_ACTIVE 0x00000040
#define PMACZILOG_FLAG_IS_IRDA 0x00000100
#define PMACZILOG_FLAG_IS_INTMODEM 0x00000200
-#define PMACZILOG_FLAG_HAS_DMA 0x00000400
#define PMACZILOG_FLAG_RSRC_REQUESTED 0x00000800
#define PMACZILOG_FLAG_IS_OPEN 0x00002000
#define PMACZILOG_FLAG_IS_EXTCLK 0x00008000
@@ -55,16 +54,7 @@ struct uart_pmac_port {
volatile u8 __iomem *control_reg;
volatile u8 __iomem *data_reg;
-#ifdef CONFIG_PPC_PMAC
- unsigned int tx_dma_irq;
- unsigned int rx_dma_irq;
- volatile struct dbdma_regs __iomem *tx_dma_regs;
- volatile struct dbdma_regs __iomem *rx_dma_regs;
-#endif
-
unsigned char irq_name[8];
-
- struct ktermios termios_cache;
};
#define to_pmz(p) ((struct uart_pmac_port *)(p))
@@ -377,7 +367,6 @@ static inline void zssync(struct uart_pmac_port *port)
#define ZS_WANTS_MODEM_STATUS(UP) ((UP)->flags & PMACZILOG_FLAG_MODEM_STATUS)
#define ZS_IS_IRDA(UP) ((UP)->flags & PMACZILOG_FLAG_IS_IRDA)
#define ZS_IS_INTMODEM(UP) ((UP)->flags & PMACZILOG_FLAG_IS_INTMODEM)
-#define ZS_HAS_DMA(UP) ((UP)->flags & PMACZILOG_FLAG_HAS_DMA)
#define ZS_IS_OPEN(UP) ((UP)->flags & PMACZILOG_FLAG_IS_OPEN)
#define ZS_IS_EXTCLK(UP) ((UP)->flags & PMACZILOG_FLAG_IS_EXTCLK)
diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
index 1543a6028856..4733a233bd0c 100644
--- a/drivers/tty/serial/qcom_geni_serial.c
+++ b/drivers/tty/serial/qcom_geni_serial.c
@@ -149,12 +149,6 @@ static unsigned int qcom_geni_serial_tx_empty(struct uart_port *port);
static void qcom_geni_serial_stop_rx(struct uart_port *uport);
static void qcom_geni_serial_handle_rx(struct uart_port *uport, bool drop);
-static const unsigned long root_freq[] = {7372800, 14745600, 19200000, 29491200,
- 32000000, 48000000, 51200000, 64000000,
- 80000000, 96000000, 100000000,
- 102400000, 112000000, 120000000,
- 128000000};
-
#define to_dev_port(ptr, member) \
container_of(ptr, struct qcom_geni_serial_port, member)
@@ -507,7 +501,7 @@ static void qcom_geni_serial_console_write(struct console *co, const char *s,
*/
qcom_geni_serial_poll_tx_done(uport);
- if (uart_circ_chars_pending(&uport->state->xmit)) {
+ if (!uart_circ_empty(&uport->state->xmit)) {
irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN);
writel(irq_en | M_TX_FIFO_WATERMARK_EN,
uport->membase + SE_GENI_M_IRQ_EN);
@@ -946,25 +940,43 @@ static int qcom_geni_serial_startup(struct uart_port *uport)
return 0;
}
-static unsigned long get_clk_cfg(unsigned long clk_freq)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(root_freq); i++) {
- if (!(root_freq[i] % clk_freq))
- return root_freq[i];
- }
- return 0;
-}
-
-static unsigned long get_clk_div_rate(unsigned int baud,
+static unsigned long get_clk_div_rate(struct clk *clk, unsigned int baud,
unsigned int sampling_rate, unsigned int *clk_div)
{
unsigned long ser_clk;
unsigned long desired_clk;
+ unsigned long freq, prev;
+ unsigned long div, maxdiv;
+ int64_t mult;
desired_clk = baud * sampling_rate;
- ser_clk = get_clk_cfg(desired_clk);
+ if (!desired_clk) {
+ pr_err("%s: Invalid frequency\n", __func__);
+ return 0;
+ }
+
+ maxdiv = CLK_DIV_MSK >> CLK_DIV_SHFT;
+ prev = 0;
+
+ for (div = 1; div <= maxdiv; div++) {
+ mult = div * desired_clk;
+ if (mult > ULONG_MAX)
+ break;
+
+ freq = clk_round_rate(clk, (unsigned long)mult);
+ if (!(freq % desired_clk)) {
+ ser_clk = freq;
+ break;
+ }
+
+ if (!prev)
+ ser_clk = freq;
+ else if (prev == freq)
+ break;
+
+ prev = freq;
+ }
+
if (!ser_clk) {
pr_err("%s: Can't find matching DFS entry for baud %d\n",
__func__, baud);
@@ -972,6 +984,9 @@ static unsigned long get_clk_div_rate(unsigned int baud,
}
*clk_div = ser_clk / desired_clk;
+ if (!(*clk_div))
+ *clk_div = 1;
+
return ser_clk;
}
@@ -1003,7 +1018,8 @@ static void qcom_geni_serial_set_termios(struct uart_port *uport,
if (ver >= QUP_SE_VERSION_2_5)
sampling_rate /= 2;
- clk_rate = get_clk_div_rate(baud, sampling_rate, &clk_div);
+ clk_rate = get_clk_div_rate(port->se.clk, baud,
+ sampling_rate, &clk_div);
if (!clk_rate)
goto out_restart_rx;
diff --git a/drivers/tty/serial/rda-uart.c b/drivers/tty/serial/rda-uart.c
index e5f1fded423a..f556b4955f59 100644
--- a/drivers/tty/serial/rda-uart.c
+++ b/drivers/tty/serial/rda-uart.c
@@ -262,6 +262,8 @@ static void rda_uart_set_termios(struct uart_port *port,
fallthrough;
case CS7:
ctrl &= ~RDA_UART_DBITS_8;
+ termios->c_cflag &= ~CSIZE;
+ termios->c_cflag |= CS7;
break;
default:
ctrl |= RDA_UART_DBITS_8;
diff --git a/drivers/tty/serial/sa1100.c b/drivers/tty/serial/sa1100.c
index 5fe6cccfc1ae..e64e42a19d1a 100644
--- a/drivers/tty/serial/sa1100.c
+++ b/drivers/tty/serial/sa1100.c
@@ -446,6 +446,8 @@ sa1100_set_termios(struct uart_port *port, struct ktermios *termios,
baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16);
quot = uart_get_divisor(port, baud);
+ del_timer_sync(&sport->timer);
+
spin_lock_irqsave(&sport->port.lock, flags);
sport->port.read_status_mask &= UTSR0_TO_SM(UTSR0_TFS);
@@ -476,8 +478,6 @@ sa1100_set_termios(struct uart_port *port, struct ktermios *termios,
UTSR1_TO_SM(UTSR1_ROR);
}
- del_timer_sync(&sport->timer);
-
/*
* Update the per-port timeout.
*/
diff --git a/drivers/tty/serial/samsung_tty.c b/drivers/tty/serial/samsung_tty.c
index e1585fbae909..d5ca904def34 100644
--- a/drivers/tty/serial/samsung_tty.c
+++ b/drivers/tty/serial/samsung_tty.c
@@ -2480,12 +2480,24 @@ s3c24xx_serial_console_write(struct console *co, const char *s,
unsigned int count)
{
unsigned int ucon = rd_regl(cons_uart, S3C2410_UCON);
+ unsigned long flags;
+ bool locked = true;
/* not possible to xmit on unconfigured port */
if (!s3c24xx_port_configured(ucon))
return;
+ if (cons_uart->sysrq)
+ locked = false;
+ else if (oops_in_progress)
+ locked = spin_trylock_irqsave(&cons_uart->lock, flags);
+ else
+ spin_lock_irqsave(&cons_uart->lock, flags);
+
uart_console_write(cons_uart, s, count, s3c24xx_serial_console_putchar);
+
+ if (locked)
+ spin_unlock_irqrestore(&cons_uart->lock, flags);
}
/* Shouldn't be __init, as it can be instantiated from other module */
@@ -2814,6 +2826,7 @@ static const struct s3c24xx_serial_drv_data s5l_serial_drv_data = {
.num_clks = 1,
.clksel_mask = 0,
.clksel_shift = 0,
+ .ucon_mask = APPLE_S5L_UCON_MASK,
},
.def_cfg = {
.ucon = APPLE_S5L_UCON_DEFAULT,
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index 5fb201c1b563..8472bf70477c 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -1134,16 +1134,6 @@ static int sc16is7xx_config_rs485(struct uart_port *port,
struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
if (rs485->flags & SER_RS485_ENABLED) {
- bool rts_during_rx, rts_during_tx;
-
- rts_during_rx = rs485->flags & SER_RS485_RTS_AFTER_SEND;
- rts_during_tx = rs485->flags & SER_RS485_RTS_ON_SEND;
-
- if (rts_during_rx == rts_during_tx)
- dev_err(port->dev,
- "unsupported RTS signalling on_send:%d after_send:%d - exactly one of RS485 RTS flags should be set\n",
- rts_during_tx, rts_during_rx);
-
/*
* RTS signal is handled by HW, it's timing can't be influenced.
* However, it's sometimes useful to delay TX even without RTS
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 6a8963caf954..9a85b41caa0a 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -24,6 +24,7 @@
#include <linux/sysrq.h>
#include <linux/delay.h>
#include <linux/mutex.h>
+#include <linux/math64.h>
#include <linux/security.h>
#include <linux/irq.h>
@@ -42,6 +43,11 @@ static struct lock_class_key port_lock_key;
#define HIGH_BITS_OFFSET ((sizeof(long)-sizeof(int))*8)
+/*
+ * Max time with active RTS before/after data is sent.
+ */
+#define RS485_MAX_RTS_DELAY 100 /* msecs */
+
static void uart_change_speed(struct tty_struct *tty, struct uart_state *state,
struct ktermios *old_termios);
static void uart_wait_until_sent(struct tty_struct *tty, int timeout);
@@ -333,15 +339,18 @@ void
uart_update_timeout(struct uart_port *port, unsigned int cflag,
unsigned int baud)
{
- unsigned int size;
+ unsigned int size = tty_get_frame_size(cflag);
+ u64 frame_time;
- size = tty_get_frame_size(cflag) * port->fifosize;
+ frame_time = (u64)size * NSEC_PER_SEC;
+ size *= port->fifosize;
/*
* Figure the timeout to send the above number of bits.
* Add .02 seconds of slop
*/
port->timeout = (HZ * size) / baud + HZ/50;
+ port->frame_time = DIV64_U64_ROUND_UP(frame_time, baud);
}
EXPORT_SYMBOL(uart_update_timeout);
@@ -1296,8 +1305,36 @@ static int uart_set_rs485_config(struct uart_port *port,
if (copy_from_user(&rs485, rs485_user, sizeof(*rs485_user)))
return -EFAULT;
+ /* pick sane settings if the user hasn't */
+ if (!(rs485.flags & SER_RS485_RTS_ON_SEND) ==
+ !(rs485.flags & SER_RS485_RTS_AFTER_SEND)) {
+ dev_warn_ratelimited(port->dev,
+ "%s (%d): invalid RTS setting, using RTS_ON_SEND instead\n",
+ port->name, port->line);
+ rs485.flags |= SER_RS485_RTS_ON_SEND;
+ rs485.flags &= ~SER_RS485_RTS_AFTER_SEND;
+ }
+
+ if (rs485.delay_rts_before_send > RS485_MAX_RTS_DELAY) {
+ rs485.delay_rts_before_send = RS485_MAX_RTS_DELAY;
+ dev_warn_ratelimited(port->dev,
+ "%s (%d): RTS delay before sending clamped to %u ms\n",
+ port->name, port->line, rs485.delay_rts_before_send);
+ }
+
+ if (rs485.delay_rts_after_send > RS485_MAX_RTS_DELAY) {
+ rs485.delay_rts_after_send = RS485_MAX_RTS_DELAY;
+ dev_warn_ratelimited(port->dev,
+ "%s (%d): RTS delay after sending clamped to %u ms\n",
+ port->name, port->line, rs485.delay_rts_after_send);
+ }
+ /* Return clean padding area to userspace */
+ memset(rs485.padding, 0, sizeof(rs485.padding));
+
spin_lock_irqsave(&port->lock, flags);
ret = port->rs485_config(port, &rs485);
+ if (!ret)
+ port->rs485 = rs485;
spin_unlock_irqrestore(&port->lock, flags);
if (ret)
return ret;
@@ -1610,24 +1647,24 @@ static void uart_wait_until_sent(struct tty_struct *tty, int timeout)
* Note: we have to use pretty tight timings here to satisfy
* the NIST-PCTS.
*/
- char_time = (port->timeout - HZ/50) / port->fifosize;
- char_time = char_time / 5;
- if (char_time == 0)
- char_time = 1;
+ char_time = max(nsecs_to_jiffies(port->frame_time / 5), 1UL);
+
if (timeout && timeout < char_time)
char_time = timeout;
- /*
- * If the transmitter hasn't cleared in twice the approximate
- * amount of time to send the entire FIFO, it probably won't
- * ever clear. This assumes the UART isn't doing flow
- * control, which is currently the case. Hence, if it ever
- * takes longer than port->timeout, this is probably due to a
- * UART bug of some kind. So, we clamp the timeout parameter at
- * 2*port->timeout.
- */
- if (timeout == 0 || timeout > 2 * port->timeout)
- timeout = 2 * port->timeout;
+ if (!uart_cts_enabled(port)) {
+ /*
+ * If the transmitter hasn't cleared in twice the approximate
+ * amount of time to send the entire FIFO, it probably won't
+ * ever clear. This assumes the UART isn't doing flow
+ * control, which is currently the case. Hence, if it ever
+ * takes longer than port->timeout, this is probably due to a
+ * UART bug of some kind. So, we clamp the timeout parameter at
+ * 2*port->timeout.
+ */
+ if (timeout == 0 || timeout > 2 * port->timeout)
+ timeout = 2 * port->timeout;
+ }
expire = jiffies + timeout;
@@ -1643,7 +1680,7 @@ static void uart_wait_until_sent(struct tty_struct *tty, int timeout)
msleep_interruptible(jiffies_to_msecs(char_time));
if (signal_pending(current))
break;
- if (time_after(jiffies, expire))
+ if (timeout && time_after(jiffies, expire))
break;
}
uart_port_deref(port);
@@ -2174,15 +2211,23 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport)
}
put_device(tty_dev);
- /* Nothing to do if the console is not suspending */
- if (!console_suspend_enabled && uart_console(uport))
+ /*
+ * Nothing to do if the console is not suspending
+ * except stop_rx to prevent any asynchronous data
+ * over RX line. Re-start_rx, when required, is
+ * done by set_termios in resume sequence
+ */
+ if (!console_suspend_enabled && uart_console(uport)) {
+ uport->ops->stop_rx(uport);
goto unlock;
+ }
uport->suspended = 1;
if (tty_port_initialized(port)) {
const struct uart_ops *ops = uport->ops;
int tries;
+ unsigned int mctrl;
tty_port_set_suspended(port, 1);
tty_port_set_initialized(port, 0);
@@ -2190,6 +2235,9 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport)
spin_lock_irq(&uport->lock);
ops->stop_tx(uport);
ops->set_mctrl(uport, 0);
+ /* save mctrl so it can be restored on resume */
+ mctrl = uport->mctrl;
+ uport->mctrl = 0;
ops->stop_rx(uport);
spin_unlock_irq(&uport->lock);
@@ -2203,6 +2251,7 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport)
uport->name);
ops->shutdown(uport);
+ uport->mctrl = mctrl;
}
/*
diff --git a/drivers/tty/serial/serial_txx9.c b/drivers/tty/serial/serial_txx9.c
index 2213e6b841d3..228e380db080 100644
--- a/drivers/tty/serial/serial_txx9.c
+++ b/drivers/tty/serial/serial_txx9.c
@@ -618,6 +618,8 @@ serial_txx9_set_termios(struct uart_port *up, struct ktermios *termios,
case CS6: /* not supported */
case CS8:
cval |= TXX9_SILCR_UMODE_8BIT;
+ termios->c_cflag &= ~CSIZE;
+ termios->c_cflag |= CS8;
break;
}
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 0f9b8bd23500..0075a1420005 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -2379,8 +2379,12 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
int best_clk = -1;
unsigned long flags;
- if ((termios->c_cflag & CSIZE) == CS7)
+ if ((termios->c_cflag & CSIZE) == CS7) {
smr_val |= SCSMR_CHR;
+ } else {
+ termios->c_cflag &= ~CSIZE;
+ termios->c_cflag |= CS8;
+ }
if (termios->c_cflag & PARENB)
smr_val |= SCSMR_PE;
if (termios->c_cflag & PARODD)
diff --git a/drivers/tty/serial/sifive.c b/drivers/tty/serial/sifive.c
index f5ac14c384c4..c0869b080cc3 100644
--- a/drivers/tty/serial/sifive.c
+++ b/drivers/tty/serial/sifive.c
@@ -148,7 +148,6 @@
* @port: struct uart_port embedded in this struct
* @dev: struct device *
* @ier: shadowed copy of the interrupt enable register
- * @clkin_rate: input clock to the UART IP block.
* @baud_rate: UART serial line rate (e.g., 115200 baud)
* @clk: reference to this device's clock
* @clk_notifier: clock rate change notifier for upstream clock changes
@@ -159,7 +158,6 @@ struct sifive_serial_port {
struct uart_port port;
struct device *dev;
unsigned char ier;
- unsigned long clkin_rate;
unsigned long baud_rate;
struct clk *clk;
struct notifier_block clk_notifier;
@@ -463,7 +461,7 @@ static void __ssp_update_div(struct sifive_serial_port *ssp)
{
u16 div;
- div = DIV_ROUND_UP(ssp->clkin_rate, ssp->baud_rate) - 1;
+ div = DIV_ROUND_UP(ssp->port.uartclk, ssp->baud_rate) - 1;
__ssp_writel(div, SIFIVE_SERIAL_DIV_OFFS, ssp);
}
@@ -648,8 +646,8 @@ static int sifive_serial_clk_notifier(struct notifier_block *nb,
udelay(DIV_ROUND_UP(12 * 1000 * 1000, ssp->baud_rate));
}
- if (event == POST_RATE_CHANGE && ssp->clkin_rate != cnd->new_rate) {
- ssp->clkin_rate = cnd->new_rate;
+ if (event == POST_RATE_CHANGE && ssp->port.uartclk != cnd->new_rate) {
+ ssp->port.uartclk = cnd->new_rate;
__ssp_update_div(ssp);
}
@@ -666,19 +664,24 @@ static void sifive_serial_set_termios(struct uart_port *port,
int rate;
char nstop;
- if ((termios->c_cflag & CSIZE) != CS8)
+ if ((termios->c_cflag & CSIZE) != CS8) {
dev_err_once(ssp->port.dev, "only 8-bit words supported\n");
+ termios->c_cflag &= ~CSIZE;
+ termios->c_cflag |= CS8;
+ }
if (termios->c_iflag & (INPCK | PARMRK))
dev_err_once(ssp->port.dev, "parity checking not supported\n");
if (termios->c_iflag & BRKINT)
dev_err_once(ssp->port.dev, "BREAK detection not supported\n");
+ termios->c_iflag &= ~(INPCK|PARMRK|BRKINT);
/* Set number of stop bits */
nstop = (termios->c_cflag & CSTOPB) ? 2 : 1;
__ssp_set_stop_bits(ssp, nstop);
/* Set line rate */
- rate = uart_get_baud_rate(port, termios, old, 0, ssp->clkin_rate / 16);
+ rate = uart_get_baud_rate(port, termios, old, 0,
+ ssp->port.uartclk / 16);
__ssp_update_baud_rate(ssp, rate);
spin_lock_irqsave(&ssp->port.lock, flags);
@@ -996,9 +999,8 @@ static int sifive_serial_probe(struct platform_device *pdev)
}
/* Set up clock divider */
- ssp->clkin_rate = clk_get_rate(ssp->clk);
+ ssp->port.uartclk = clk_get_rate(ssp->clk);
ssp->baud_rate = SIFIVE_DEFAULT_BAUD_RATE;
- ssp->port.uartclk = ssp->baud_rate * 16;
__ssp_update_div(ssp);
platform_set_drvdata(pdev, ssp);
diff --git a/drivers/tty/serial/st-asc.c b/drivers/tty/serial/st-asc.c
index d7fd692286cf..1b0da603ab54 100644
--- a/drivers/tty/serial/st-asc.c
+++ b/drivers/tty/serial/st-asc.c
@@ -535,10 +535,14 @@ static void asc_set_termios(struct uart_port *port, struct ktermios *termios,
/* set character length */
if ((cflag & CSIZE) == CS7) {
ctrl_val |= ASC_CTL_MODE_7BIT_PAR;
+ cflag |= PARENB;
} else {
ctrl_val |= (cflag & PARENB) ? ASC_CTL_MODE_8BIT_PAR :
ASC_CTL_MODE_8BIT;
+ cflag &= ~CSIZE;
+ cflag |= CS8;
}
+ termios->c_cflag = cflag;
/* set stop bit */
ctrl_val |= (cflag & CSTOPB) ? ASC_CTL_STOP_2BIT : ASC_CTL_STOP_1BIT;
diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
index 87b5cd4c9743..b7b44f4050d4 100644
--- a/drivers/tty/serial/stm32-usart.c
+++ b/drivers/tty/serial/stm32-usart.c
@@ -37,6 +37,7 @@
static void stm32_usart_stop_tx(struct uart_port *port);
static void stm32_usart_transmit_chars(struct uart_port *port);
+static void __maybe_unused stm32_usart_console_putchar(struct uart_port *port, unsigned char ch);
static inline struct stm32_port *to_stm32_port(struct uart_port *port)
{
@@ -107,8 +108,6 @@ static int stm32_usart_config_rs485(struct uart_port *port,
stm32_usart_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
- port->rs485 = *rs485conf;
-
rs485conf->flags |= SER_RS485_RX_DURING_TX;
if (rs485conf->flags & SER_RS485_ENABLED) {
@@ -128,13 +127,10 @@ static int stm32_usart_config_rs485(struct uart_port *port,
rs485conf->delay_rts_after_send,
baud);
- if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
+ if (rs485conf->flags & SER_RS485_RTS_ON_SEND)
cr3 &= ~USART_CR3_DEP;
- rs485conf->flags &= ~SER_RS485_RTS_AFTER_SEND;
- } else {
+ else
cr3 |= USART_CR3_DEP;
- rs485conf->flags |= SER_RS485_RTS_AFTER_SEND;
- }
writel_relaxed(cr3, port->membase + ofs->cr3);
writel_relaxed(cr1, port->membase + ofs->cr1);
@@ -421,6 +417,14 @@ static void stm32_usart_tx_interrupt_enable(struct uart_port *port)
stm32_usart_set_bits(port, ofs->cr1, USART_CR1_TXEIE);
}
+static void stm32_usart_tc_interrupt_enable(struct uart_port *port)
+{
+ struct stm32_port *stm32_port = to_stm32_port(port);
+ const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+
+ stm32_usart_set_bits(port, ofs->cr1, USART_CR1_TCIE);
+}
+
static void stm32_usart_rx_dma_complete(void *arg)
{
struct uart_port *port = arg;
@@ -446,6 +450,50 @@ static void stm32_usart_tx_interrupt_disable(struct uart_port *port)
stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_TXEIE);
}
+static void stm32_usart_tc_interrupt_disable(struct uart_port *port)
+{
+ struct stm32_port *stm32_port = to_stm32_port(port);
+ const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+
+ stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_TCIE);
+}
+
+static void stm32_usart_rs485_rts_enable(struct uart_port *port)
+{
+ struct stm32_port *stm32_port = to_stm32_port(port);
+ struct serial_rs485 *rs485conf = &port->rs485;
+
+ if (stm32_port->hw_flow_control ||
+ !(rs485conf->flags & SER_RS485_ENABLED))
+ return;
+
+ if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
+ mctrl_gpio_set(stm32_port->gpios,
+ stm32_port->port.mctrl | TIOCM_RTS);
+ } else {
+ mctrl_gpio_set(stm32_port->gpios,
+ stm32_port->port.mctrl & ~TIOCM_RTS);
+ }
+}
+
+static void stm32_usart_rs485_rts_disable(struct uart_port *port)
+{
+ struct stm32_port *stm32_port = to_stm32_port(port);
+ struct serial_rs485 *rs485conf = &port->rs485;
+
+ if (stm32_port->hw_flow_control ||
+ !(rs485conf->flags & SER_RS485_ENABLED))
+ return;
+
+ if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
+ mctrl_gpio_set(stm32_port->gpios,
+ stm32_port->port.mctrl & ~TIOCM_RTS);
+ } else {
+ mctrl_gpio_set(stm32_port->gpios,
+ stm32_port->port.mctrl | TIOCM_RTS);
+ }
+}
+
static void stm32_usart_transmit_chars_pio(struct uart_port *port)
{
struct stm32_port *stm32_port = to_stm32_port(port);
@@ -553,6 +601,13 @@ static void stm32_usart_transmit_chars(struct uart_port *port)
u32 isr;
int ret;
+ if (!stm32_port->hw_flow_control &&
+ port->rs485.flags & SER_RS485_ENABLED) {
+ stm32_port->txdone = false;
+ stm32_usart_tc_interrupt_disable(port);
+ stm32_usart_rs485_rts_enable(port);
+ }
+
if (port->x_char) {
if (stm32_usart_tx_dma_started(stm32_port) &&
stm32_usart_tx_dma_enabled(stm32_port))
@@ -593,8 +648,14 @@ static void stm32_usart_transmit_chars(struct uart_port *port)
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
- if (uart_circ_empty(xmit))
+ if (uart_circ_empty(xmit)) {
stm32_usart_tx_interrupt_disable(port);
+ if (!stm32_port->hw_flow_control &&
+ port->rs485.flags & SER_RS485_ENABLED) {
+ stm32_port->txdone = true;
+ stm32_usart_tc_interrupt_enable(port);
+ }
+ }
}
static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
@@ -608,6 +669,13 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
sr = readl_relaxed(port->membase + ofs->isr);
+ if (!stm32_port->hw_flow_control &&
+ port->rs485.flags & SER_RS485_ENABLED &&
+ (sr & USART_SR_TC)) {
+ stm32_usart_tc_interrupt_disable(port);
+ stm32_usart_rs485_rts_disable(port);
+ }
+
if ((sr & USART_SR_RTOF) && ofs->icr != UNDEF_REG)
writel_relaxed(USART_ICR_RTOCF,
port->membase + ofs->icr);
@@ -717,44 +785,27 @@ static void stm32_usart_disable_ms(struct uart_port *port)
static void stm32_usart_stop_tx(struct uart_port *port)
{
struct stm32_port *stm32_port = to_stm32_port(port);
- struct serial_rs485 *rs485conf = &port->rs485;
const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
stm32_usart_tx_interrupt_disable(port);
if (stm32_usart_tx_dma_started(stm32_port) && stm32_usart_tx_dma_enabled(stm32_port))
stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
- if (rs485conf->flags & SER_RS485_ENABLED) {
- if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
- mctrl_gpio_set(stm32_port->gpios,
- stm32_port->port.mctrl & ~TIOCM_RTS);
- } else {
- mctrl_gpio_set(stm32_port->gpios,
- stm32_port->port.mctrl | TIOCM_RTS);
- }
- }
+ stm32_usart_rs485_rts_disable(port);
}
/* There are probably characters waiting to be transmitted. */
static void stm32_usart_start_tx(struct uart_port *port)
{
- struct stm32_port *stm32_port = to_stm32_port(port);
- struct serial_rs485 *rs485conf = &port->rs485;
struct circ_buf *xmit = &port->state->xmit;
- if (uart_circ_empty(xmit) && !port->x_char)
+ if (uart_circ_empty(xmit) && !port->x_char) {
+ stm32_usart_rs485_rts_disable(port);
return;
-
- if (rs485conf->flags & SER_RS485_ENABLED) {
- if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
- mctrl_gpio_set(stm32_port->gpios,
- stm32_port->port.mctrl | TIOCM_RTS);
- } else {
- mctrl_gpio_set(stm32_port->gpios,
- stm32_port->port.mctrl & ~TIOCM_RTS);
- }
}
+ stm32_usart_rs485_rts_enable(port);
+
stm32_usart_transmit_chars(port);
}
@@ -1037,13 +1088,22 @@ static void stm32_usart_set_termios(struct uart_port *port,
* CS8 or (CS7 + parity), 8 bits word aka [M1:M0] = 0b00
* M0 and M1 already cleared by cr1 initialization.
*/
- if (bits == 9)
+ if (bits == 9) {
cr1 |= USART_CR1_M0;
- else if ((bits == 7) && cfg->has_7bits_data)
+ } else if ((bits == 7) && cfg->has_7bits_data) {
cr1 |= USART_CR1_M1;
- else if (bits != 8)
+ } else if (bits != 8) {
dev_dbg(port->dev, "Unsupported data bits config: %u bits\n"
, bits);
+ cflag &= ~CSIZE;
+ cflag |= CS8;
+ termios->c_cflag = cflag;
+ bits = 8;
+ if (cflag & PARENB) {
+ bits++;
+ cr1 |= USART_CR1_M0;
+ }
+ }
if (ofs->rtor != UNDEF_REG && (stm32_port->rx_ch ||
(stm32_port->fifoen &&
@@ -1222,6 +1282,33 @@ static void stm32_usart_pm(struct uart_port *port, unsigned int state,
}
}
+#if defined(CONFIG_CONSOLE_POLL)
+
+ /* Callbacks for characters polling in debug context (i.e. KGDB). */
+static int stm32_usart_poll_init(struct uart_port *port)
+{
+ struct stm32_port *stm32_port = to_stm32_port(port);
+
+ return clk_prepare_enable(stm32_port->clk);
+}
+
+static int stm32_usart_poll_get_char(struct uart_port *port)
+{
+ struct stm32_port *stm32_port = to_stm32_port(port);
+ const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+
+ if (!(readl_relaxed(port->membase + ofs->isr) & USART_SR_RXNE))
+ return NO_POLL_CHAR;
+
+ return readl_relaxed(port->membase + ofs->rdr) & stm32_port->rdr_mask;
+}
+
+static void stm32_usart_poll_put_char(struct uart_port *port, unsigned char ch)
+{
+ stm32_usart_console_putchar(port, ch);
+}
+#endif /* CONFIG_CONSOLE_POLL */
+
static const struct uart_ops stm32_uart_ops = {
.tx_empty = stm32_usart_tx_empty,
.set_mctrl = stm32_usart_set_mctrl,
@@ -1243,6 +1330,11 @@ static const struct uart_ops stm32_uart_ops = {
.request_port = stm32_usart_request_port,
.config_port = stm32_usart_config_port,
.verify_port = stm32_usart_verify_port,
+#if defined(CONFIG_CONSOLE_POLL)
+ .poll_init = stm32_usart_poll_init,
+ .poll_get_char = stm32_usart_poll_get_char,
+ .poll_put_char = stm32_usart_poll_put_char,
+#endif /* CONFIG_CONSOLE_POLL */
};
/*
@@ -1640,18 +1732,24 @@ static int stm32_usart_serial_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_SERIAL_STM32_CONSOLE
-static void stm32_usart_console_putchar(struct uart_port *port, unsigned char ch)
+static void __maybe_unused stm32_usart_console_putchar(struct uart_port *port, unsigned char ch)
{
struct stm32_port *stm32_port = to_stm32_port(port);
const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+ u32 isr;
+ int ret;
- while (!(readl_relaxed(port->membase + ofs->isr) & USART_SR_TXE))
- cpu_relax();
-
+ ret = readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr, isr,
+ (isr & USART_SR_TXE), 100,
+ STM32_USART_TIMEOUT_USEC);
+ if (ret != 0) {
+ dev_err(port->dev, "Error while sending data in UART TX : %d\n", ret);
+ return;
+ }
writel_relaxed(ch, port->membase + ofs->tdr);
}
+#ifdef CONFIG_SERIAL_STM32_CONSOLE
static void stm32_usart_console_write(struct console *co, const char *s,
unsigned int cnt)
{
@@ -1727,6 +1825,57 @@ static struct console stm32_console = {
#define STM32_SERIAL_CONSOLE NULL
#endif /* CONFIG_SERIAL_STM32_CONSOLE */
+#ifdef CONFIG_SERIAL_EARLYCON
+static void early_stm32_usart_console_putchar(struct uart_port *port, unsigned char ch)
+{
+ struct stm32_usart_info *info = port->private_data;
+
+ while (!(readl_relaxed(port->membase + info->ofs.isr) & USART_SR_TXE))
+ cpu_relax();
+
+ writel_relaxed(ch, port->membase + info->ofs.tdr);
+}
+
+static void early_stm32_serial_write(struct console *console, const char *s, unsigned int count)
+{
+ struct earlycon_device *device = console->data;
+ struct uart_port *port = &device->port;
+
+ uart_console_write(port, s, count, early_stm32_usart_console_putchar);
+}
+
+static int __init early_stm32_h7_serial_setup(struct earlycon_device *device, const char *options)
+{
+ if (!(device->port.membase || device->port.iobase))
+ return -ENODEV;
+ device->port.private_data = &stm32h7_info;
+ device->con->write = early_stm32_serial_write;
+ return 0;
+}
+
+static int __init early_stm32_f7_serial_setup(struct earlycon_device *device, const char *options)
+{
+ if (!(device->port.membase || device->port.iobase))
+ return -ENODEV;
+ device->port.private_data = &stm32f7_info;
+ device->con->write = early_stm32_serial_write;
+ return 0;
+}
+
+static int __init early_stm32_f4_serial_setup(struct earlycon_device *device, const char *options)
+{
+ if (!(device->port.membase || device->port.iobase))
+ return -ENODEV;
+ device->port.private_data = &stm32f4_info;
+ device->con->write = early_stm32_serial_write;
+ return 0;
+}
+
+OF_EARLYCON_DECLARE(stm32, "st,stm32h7-uart", early_stm32_h7_serial_setup);
+OF_EARLYCON_DECLARE(stm32, "st,stm32f7-uart", early_stm32_f7_serial_setup);
+OF_EARLYCON_DECLARE(stm32, "st,stm32-uart", early_stm32_f4_serial_setup);
+#endif /* CONFIG_SERIAL_EARLYCON */
+
static struct uart_driver stm32_usart_driver = {
.driver_name = DRIVER_NAME,
.dev_name = STM32_SERIAL_NAME,
diff --git a/drivers/tty/serial/stm32-usart.h b/drivers/tty/serial/stm32-usart.h
index feab952aec16..ee69c203b926 100644
--- a/drivers/tty/serial/stm32-usart.h
+++ b/drivers/tty/serial/stm32-usart.h
@@ -251,6 +251,8 @@ struct stm32_usart_info stm32h7_info = {
#define RX_BUF_P (RX_BUF_L / 2) /* dma rx buffer period */
#define TX_BUF_L RX_BUF_L /* dma tx buffer length */
+#define STM32_USART_TIMEOUT_USEC USEC_PER_SEC /* 1s timeout in µs */
+
struct stm32_port {
struct uart_port port;
struct clk *clk;
@@ -269,6 +271,7 @@ struct stm32_port {
bool hw_flow_control;
bool swap; /* swap RX & TX pins */
bool fifoen;
+ bool txdone;
int rxftcfg; /* RX FIFO threshold CFG */
int txftcfg; /* TX FIFO threshold CFG */
bool wakeup_src;
diff --git a/drivers/tty/serial/sunplus-uart.c b/drivers/tty/serial/sunplus-uart.c
index 9f15922e681b..60c73662f955 100644
--- a/drivers/tty/serial/sunplus-uart.c
+++ b/drivers/tty/serial/sunplus-uart.c
@@ -498,7 +498,7 @@ static const struct uart_ops sunplus_uart_ops = {
};
#ifdef CONFIG_SERIAL_SUNPLUS_CONSOLE
-struct sunplus_uart_port *sunplus_console_ports[SUP_UART_NR];
+static struct sunplus_uart_port *sunplus_console_ports[SUP_UART_NR];
static void sunplus_uart_console_putchar(struct uart_port *port,
unsigned char ch)
diff --git a/drivers/tty/serial/sunsu.c b/drivers/tty/serial/sunsu.c
index c31389114b86..fff50b5b82eb 100644
--- a/drivers/tty/serial/sunsu.c
+++ b/drivers/tty/serial/sunsu.c
@@ -798,10 +798,8 @@ sunsu_change_speed(struct uart_port *port, unsigned int cflag,
cval |= UART_LCR_PARITY;
if (!(cflag & PARODD))
cval |= UART_LCR_EPAR;
-#ifdef CMSPAR
if (cflag & CMSPAR)
cval |= UART_LCR_SPAR;
-#endif
/*
* Work around a bug in the Oxford Semiconductor 952 rev B
diff --git a/drivers/tty/serial/uartlite.c b/drivers/tty/serial/uartlite.c
index 007db67292a2..880e2afbb97b 100644
--- a/drivers/tty/serial/uartlite.c
+++ b/drivers/tty/serial/uartlite.c
@@ -321,7 +321,8 @@ static void ulite_set_termios(struct uart_port *port, struct ktermios *termios,
struct uartlite_data *pdata = port->private_data;
/* Set termios to what the hardware supports */
- termios->c_cflag &= ~(BRKINT | CSTOPB | PARENB | PARODD | CSIZE);
+ termios->c_iflag &= ~BRKINT;
+ termios->c_cflag &= ~(CSTOPB | PARENB | PARODD | CSIZE);
termios->c_cflag |= pdata->cflags & (PARENB | PARODD | CSIZE);
tty_termios_encode_baud_rate(termios, pdata->baud, pdata->baud);
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index 250a1d888eeb..9e01fe6c0ab8 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -313,41 +313,27 @@ static void cdns_uart_handle_rx(void *dev_id, unsigned int isrstatus)
static void cdns_uart_handle_tx(void *dev_id)
{
struct uart_port *port = (struct uart_port *)dev_id;
+ struct circ_buf *xmit = &port->state->xmit;
unsigned int numbytes;
- if (uart_circ_empty(&port->state->xmit)) {
+ if (uart_circ_empty(xmit)) {
writel(CDNS_UART_IXR_TXEMPTY, port->membase + CDNS_UART_IDR);
- } else {
- numbytes = port->fifosize;
- while (numbytes && !uart_circ_empty(&port->state->xmit) &&
- !(readl(port->membase + CDNS_UART_SR) &
- CDNS_UART_SR_TXFULL)) {
- /*
- * Get the data from the UART circular buffer
- * and write it to the cdns_uart's TX_FIFO
- * register.
- */
- writel(
- port->state->xmit.buf[port->state->xmit.tail],
- port->membase + CDNS_UART_FIFO);
-
- port->icount.tx++;
-
- /*
- * Adjust the tail of the UART buffer and wrap
- * the buffer if it reaches limit.
- */
- port->state->xmit.tail =
- (port->state->xmit.tail + 1) &
- (UART_XMIT_SIZE - 1);
-
- numbytes--;
- }
+ return;
+ }
- if (uart_circ_chars_pending(
- &port->state->xmit) < WAKEUP_CHARS)
- uart_write_wakeup(port);
+ numbytes = port->fifosize;
+ while (numbytes && !uart_circ_empty(xmit) &&
+ !(readl(port->membase + CDNS_UART_SR) & CDNS_UART_SR_TXFULL)) {
+
+ writel(xmit->buf[xmit->tail], port->membase + CDNS_UART_FIFO);
+
+ port->icount.tx++;
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ numbytes--;
}
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
}
/**
diff --git a/drivers/tty/serial/zs.c b/drivers/tty/serial/zs.c
index 70969bf9d82c..5bc58591665a 100644
--- a/drivers/tty/serial/zs.c
+++ b/drivers/tty/serial/zs.c
@@ -981,7 +981,7 @@ static const char *zs_type(struct uart_port *uport)
static void zs_release_port(struct uart_port *uport)
{
iounmap(uport->membase);
- uport->membase = 0;
+ uport->membase = NULL;
release_mem_region(uport->mapbase, ZS_CHAN_IO_SIZE);
}
diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
index 25c558e65ece..9bc2a9265277 100644
--- a/drivers/tty/synclink_gt.c
+++ b/drivers/tty/synclink_gt.c
@@ -1746,6 +1746,8 @@ static int hdlcdev_init(struct slgt_info *info)
*/
static void hdlcdev_exit(struct slgt_info *info)
{
+ if (!info->netdev)
+ return;
unregister_hdlc_device(info->netdev);
free_netdev(info->netdev);
info->netdev = NULL;
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
index 2884cd638d64..18e623325887 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
@@ -232,8 +232,10 @@ static void showacpu(void *dummy)
unsigned long flags;
/* Idle CPUs have no interesting backtrace. */
- if (idle_cpu(smp_processor_id()))
+ if (idle_cpu(smp_processor_id())) {
+ pr_info("CPU%d: backtrace skipped as idling\n", smp_processor_id());
return;
+ }
raw_spin_lock_irqsave(&show_lock, flags);
pr_info("CPU%d:\n", smp_processor_id());
@@ -260,10 +262,13 @@ static void sysrq_handle_showallcpus(int key)
if (in_hardirq())
regs = get_irq_regs();
- if (regs) {
- pr_info("CPU%d:\n", smp_processor_id());
+
+ pr_info("CPU%d:\n", smp_processor_id());
+ if (regs)
show_regs(regs);
- }
+ else
+ show_stack(NULL, NULL, KERN_INFO);
+
schedule_work(&sysrq_showallcpus);
}
}
@@ -274,6 +279,8 @@ static const struct sysrq_key_op sysrq_showallcpus_op = {
.action_msg = "Show backtrace of all active CPUs",
.enable_mask = SYSRQ_ENABLE_DUMP,
};
+#else
+#define sysrq_showallcpus_op (*(const struct sysrq_key_op *)NULL)
#endif
static void sysrq_handle_showregs(int key)
@@ -405,6 +412,7 @@ static const struct sysrq_key_op sysrq_moom_op = {
.enable_mask = SYSRQ_ENABLE_SIGNAL,
};
+#ifdef CONFIG_BLOCK
static void sysrq_handle_thaw(int key)
{
emergency_thaw_all();
@@ -415,6 +423,9 @@ static const struct sysrq_key_op sysrq_thaw_op = {
.action_msg = "Emergency Thaw of all frozen filesystems",
.enable_mask = SYSRQ_ENABLE_SIGNAL,
};
+#else
+#define sysrq_thaw_op (*(const struct sysrq_key_op *)NULL)
+#endif
static void sysrq_handle_kill(int key)
{
@@ -468,17 +479,9 @@ static const struct sysrq_key_op *sysrq_key_table[62] = {
NULL, /* g */
NULL, /* h - reserved for help */
&sysrq_kill_op, /* i */
-#ifdef CONFIG_BLOCK
&sysrq_thaw_op, /* j */
-#else
- NULL, /* j */
-#endif
&sysrq_SAK_op, /* k */
-#ifdef CONFIG_SMP
&sysrq_showallcpus_op, /* l */
-#else
- NULL, /* l */
-#endif
&sysrq_showmem_op, /* m */
&sysrq_unrt_op, /* n */
/* o: This will often be registered as 'Off' at init time */
diff --git a/drivers/tty/tty_baudrate.c b/drivers/tty/tty_baudrate.c
index d903e111dbcb..3cd99ed7c710 100644
--- a/drivers/tty/tty_baudrate.c
+++ b/drivers/tty/tty_baudrate.c
@@ -61,11 +61,10 @@ speed_t tty_termios_baud_rate(struct ktermios *termios)
cbaud = termios->c_cflag & CBAUD;
-#ifdef BOTHER
/* Magic token for arbitrary speed via c_ispeed/c_ospeed */
if (cbaud == BOTHER)
return termios->c_ospeed;
-#endif
+
if (cbaud & CBAUDEX) {
cbaud &= ~CBAUDEX;
@@ -92,16 +91,15 @@ EXPORT_SYMBOL(tty_termios_baud_rate);
speed_t tty_termios_input_baud_rate(struct ktermios *termios)
{
-#ifdef IBSHIFT
unsigned int cbaud = (termios->c_cflag >> IBSHIFT) & CBAUD;
if (cbaud == B0)
return tty_termios_baud_rate(termios);
-#ifdef BOTHER
+
/* Magic token for arbitrary speed via c_ispeed*/
if (cbaud == BOTHER)
return termios->c_ispeed;
-#endif
+
if (cbaud & CBAUDEX) {
cbaud &= ~CBAUDEX;
@@ -111,9 +109,6 @@ speed_t tty_termios_input_baud_rate(struct ktermios *termios)
cbaud += 15;
}
return cbaud >= n_baud_table ? 0 : baud_table[cbaud];
-#else /* IBSHIFT */
- return tty_termios_baud_rate(termios);
-#endif /* IBSHIFT */
}
EXPORT_SYMBOL(tty_termios_input_baud_rate);
@@ -153,11 +148,9 @@ void tty_termios_encode_baud_rate(struct ktermios *termios,
termios->c_ispeed = ibaud;
termios->c_ospeed = obaud;
-#ifdef IBSHIFT
if (((termios->c_cflag >> IBSHIFT) & CBAUD) != B0)
ibinput = 1; /* An input speed was specified */
-#endif
-#ifdef BOTHER
+
/* If the user asked for a precise weird speed give a precise weird
* answer. If they asked for a Bfoo speed they may have problems
* digesting non-exact replies so fuzz a bit.
@@ -170,11 +163,9 @@ void tty_termios_encode_baud_rate(struct ktermios *termios,
}
if (((termios->c_cflag >> IBSHIFT) & CBAUD) == BOTHER)
iclose = 0;
-#endif
+
termios->c_cflag &= ~CBAUD;
-#ifdef IBSHIFT
termios->c_cflag &= ~(CBAUD << IBSHIFT);
-#endif
/*
* Our goal is to find a close match to the standard baud rate
@@ -194,22 +185,16 @@ void tty_termios_encode_baud_rate(struct ktermios *termios,
/* For the case input == output don't set IBAUD bits
* if the user didn't do so.
*/
- if (ofound == i && !ibinput)
+ if (ofound == i && !ibinput) {
ifound = i;
-#ifdef IBSHIFT
- else {
+ } else {
ifound = i;
termios->c_cflag |= (baud_bits[i] << IBSHIFT);
}
-#endif
}
} while (++i < n_baud_table);
- /*
- * If we found no match then use BOTHER if provided or warn
- * the user their platform maintainer needs to wake up if not.
- */
-#ifdef BOTHER
+ /* If we found no match then use BOTHER. */
if (ofound == -1)
termios->c_cflag |= BOTHER;
/* Set exact input bits only if the input and output differ or the
@@ -217,10 +202,6 @@ void tty_termios_encode_baud_rate(struct ktermios *termios,
*/
if (ifound == -1 && (ibaud != obaud || ibinput))
termios->c_cflag |= (BOTHER << IBSHIFT);
-#else
- if (ifound == -1 || ofound == -1)
- pr_warn_once("tty: Unable to return correct speed data as your architecture needs updating.\n");
-#endif
}
EXPORT_SYMBOL_GPL(tty_termios_encode_baud_rate);
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index 646510476c30..bfa431a8e690 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -175,7 +175,8 @@ static struct tty_buffer *tty_buffer_alloc(struct tty_port *port, size_t size)
*/
if (atomic_read(&port->buf.mem_used) > port->buf.mem_limit)
return NULL;
- p = kmalloc(sizeof(struct tty_buffer) + 2 * size, GFP_ATOMIC);
+ p = kmalloc(sizeof(struct tty_buffer) + 2 * size,
+ GFP_ATOMIC | __GFP_NOWARN);
if (p == NULL)
return NULL;
diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
index 63181925ec1a..adae687f654b 100644
--- a/drivers/tty/tty_ioctl.c
+++ b/drivers/tty/tty_ioctl.c
@@ -562,10 +562,8 @@ static int set_sgttyb(struct tty_struct *tty, struct sgttyb __user *sgttyb)
termios.c_cc[VKILL] = tmp.sg_kill;
set_sgflags(&termios, tmp.sg_flags);
/* Try and encode into Bfoo format */
-#ifdef BOTHER
tty_termios_encode_baud_rate(&termios, termios.c_ispeed,
termios.c_ospeed);
-#endif
up_write(&tty->termios_rwsem);
tty_set_termios(tty, &termios);
return 0;
diff --git a/drivers/tty/tty_jobctrl.c b/drivers/tty/tty_jobctrl.c
index 80b86a7992b5..0d04287da098 100644
--- a/drivers/tty/tty_jobctrl.c
+++ b/drivers/tty/tty_jobctrl.c
@@ -215,8 +215,8 @@ int tty_signal_session_leader(struct tty_struct *tty, int exit_session)
spin_unlock_irq(&p->sighand->siglock);
continue;
}
- __group_send_sig_info(SIGHUP, SEND_SIG_PRIV, p);
- __group_send_sig_info(SIGCONT, SEND_SIG_PRIV, p);
+ send_signal_locked(SIGHUP, SEND_SIG_PRIV, p, PIDTYPE_TGID);
+ send_signal_locked(SIGCONT, SEND_SIG_PRIV, p, PIDTYPE_TGID);
put_pid(p->signal->tty_old_pgrp); /* A noop */
spin_lock(&tty->ctrl.lock);
tty_pgrp = get_pid(tty->ctrl.pgrp);
diff --git a/drivers/ufs/Kconfig b/drivers/ufs/Kconfig
new file mode 100644
index 000000000000..90226f72c158
--- /dev/null
+++ b/drivers/ufs/Kconfig
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# UFS subsystem configuration
+#
+
+menuconfig SCSI_UFSHCD
+ tristate "Universal Flash Storage Controller"
+ depends on SCSI && SCSI_DMA
+ select PM_DEVFREQ
+ select DEVFREQ_GOV_SIMPLE_ONDEMAND
+ select NLS
+ help
+ Enables support for UFS (Universal Flash Storage) host controllers.
+ A UFS host controller is an electronic component that is able to
+ communicate with a UFS card. UFS host controllers occur in
+ smartphones, laptops, digital cameras and also in cars.
+ The kernel module will be called ufshcd.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/scsi/ufs.rst>.
+ However, do not compile this as a module if your root file system
+ (the one containing the directory /) is located on a UFS device.
+
+if SCSI_UFSHCD
+
+source "drivers/ufs/core/Kconfig"
+
+source "drivers/ufs/host/Kconfig"
+
+endif
diff --git a/drivers/ufs/Makefile b/drivers/ufs/Makefile
new file mode 100644
index 000000000000..5a199ef18d4c
--- /dev/null
+++ b/drivers/ufs/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
+# The link order is important here. ufshcd-core must initialize
+# before vendor drivers.
+obj-$(CONFIG_SCSI_UFSHCD) += core/ host/
diff --git a/drivers/ufs/core/Kconfig b/drivers/ufs/core/Kconfig
new file mode 100644
index 000000000000..e11978171403
--- /dev/null
+++ b/drivers/ufs/core/Kconfig
@@ -0,0 +1,60 @@
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Kernel configuration file for the UFS Host Controller core.
+#
+# Copyright (C) 2011-2013 Samsung India Software Operations
+#
+# Authors:
+# Santosh Yaraganavi <santosh.sy@samsung.com>
+# Vinayak Holikatti <h.vinayak@samsung.com>
+
+config SCSI_UFS_BSG
+ bool "Universal Flash Storage BSG device node"
+ select BLK_DEV_BSGLIB
+ help
+ Universal Flash Storage (UFS) is SCSI transport specification for
+ accessing flash storage on digital cameras, mobile phones and
+ consumer electronic devices.
+ A UFS controller communicates with a UFS device by exchanging
+ UFS Protocol Information Units (UPIUs).
+ UPIUs can not only be used as a transport layer for the SCSI protocol
+ but are also used by the UFS native command set.
+ This transport driver supports exchanging UFS protocol information units
+ with a UFS device. See also the ufshcd driver, which is a SCSI driver
+ that supports UFS devices.
+
+ Select this if you need a bsg device node for your UFS controller.
+ If unsure, say N.
+
+config SCSI_UFS_CRYPTO
+ bool "UFS Crypto Engine Support"
+ depends on BLK_INLINE_ENCRYPTION
+ help
+ Enable Crypto Engine Support in UFS.
+ Enabling this makes it possible for the kernel to use the crypto
+ capabilities of the UFS device (if present) to perform crypto
+ operations on data being transferred to/from the device.
+
+config SCSI_UFS_HPB
+ bool "Support UFS Host Performance Booster"
+ help
+ The UFS HPB feature improves random read performance. It caches
+ L2P (logical to physical) map of UFS to host DRAM. The driver uses HPB
+ read command by piggybacking physical page number for bypassing FTL (flash
+ translation layer)'s L2P address translation.
+
+config SCSI_UFS_FAULT_INJECTION
+ bool "UFS Fault Injection Support"
+ depends on FAULT_INJECTION
+ help
+ Enable fault injection support in the UFS driver. This makes it easier
+ to test the UFS error handler and abort handler.
+
+config SCSI_UFS_HWMON
+ bool "UFS Temperature Notification"
+ depends on SCSI_UFSHCD=HWMON || HWMON=y
+ help
+ This provides support for UFS hardware monitoring. If enabled,
+ a hardware monitoring device will be created for the UFS device.
+
+ If unsure, say N.
diff --git a/drivers/ufs/core/Makefile b/drivers/ufs/core/Makefile
new file mode 100644
index 000000000000..62f38c5bf857
--- /dev/null
+++ b/drivers/ufs/core/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_SCSI_UFSHCD) += ufshcd-core.o
+ufshcd-core-y += ufshcd.o ufs-sysfs.o
+ufshcd-core-$(CONFIG_DEBUG_FS) += ufs-debugfs.o
+ufshcd-core-$(CONFIG_SCSI_UFS_BSG) += ufs_bsg.o
+ufshcd-core-$(CONFIG_SCSI_UFS_CRYPTO) += ufshcd-crypto.o
+ufshcd-core-$(CONFIG_SCSI_UFS_HPB) += ufshpb.o
+ufshcd-core-$(CONFIG_SCSI_UFS_FAULT_INJECTION) += ufs-fault-injection.o
+ufshcd-core-$(CONFIG_SCSI_UFS_HWMON) += ufs-hwmon.o
diff --git a/drivers/scsi/ufs/ufs-debugfs.c b/drivers/ufs/core/ufs-debugfs.c
index c10a8f09682b..e3baed6c70bd 100644
--- a/drivers/scsi/ufs/ufs-debugfs.c
+++ b/drivers/ufs/core/ufs-debugfs.c
@@ -4,7 +4,7 @@
#include <linux/debugfs.h>
#include "ufs-debugfs.h"
-#include "ufshcd.h"
+#include <ufs/ufshcd.h>
#include "ufshcd-priv.h"
static struct dentry *ufs_debugfs_root;
diff --git a/drivers/scsi/ufs/ufs-debugfs.h b/drivers/ufs/core/ufs-debugfs.h
index 97548a3f90eb..97548a3f90eb 100644
--- a/drivers/scsi/ufs/ufs-debugfs.h
+++ b/drivers/ufs/core/ufs-debugfs.h
diff --git a/drivers/scsi/ufs/ufs-fault-injection.c b/drivers/ufs/core/ufs-fault-injection.c
index 7ac7c4e7ff83..7ac7c4e7ff83 100644
--- a/drivers/scsi/ufs/ufs-fault-injection.c
+++ b/drivers/ufs/core/ufs-fault-injection.c
diff --git a/drivers/scsi/ufs/ufs-fault-injection.h b/drivers/ufs/core/ufs-fault-injection.h
index 6d0cd8e10c87..6d0cd8e10c87 100644
--- a/drivers/scsi/ufs/ufs-fault-injection.h
+++ b/drivers/ufs/core/ufs-fault-injection.h
diff --git a/drivers/scsi/ufs/ufs-hwmon.c b/drivers/ufs/core/ufs-hwmon.c
index c38d9d98a86d..4c6a872b7a7c 100644
--- a/drivers/scsi/ufs/ufs-hwmon.c
+++ b/drivers/ufs/core/ufs-hwmon.c
@@ -7,7 +7,7 @@
#include <linux/hwmon.h>
#include <linux/units.h>
-#include "ufshcd.h"
+#include <ufs/ufshcd.h>
#include "ufshcd-priv.h"
struct ufs_hwmon_data {
diff --git a/drivers/scsi/ufs/ufs-sysfs.c b/drivers/ufs/core/ufs-sysfs.c
index 8a3c6442f291..0a088b47d557 100644
--- a/drivers/scsi/ufs/ufs-sysfs.c
+++ b/drivers/ufs/core/ufs-sysfs.c
@@ -6,7 +6,7 @@
#include <linux/bitfield.h>
#include <asm/unaligned.h>
-#include "ufs.h"
+#include <ufs/ufs.h>
#include "ufs-sysfs.h"
#include "ufshcd-priv.h"
diff --git a/drivers/scsi/ufs/ufs-sysfs.h b/drivers/ufs/core/ufs-sysfs.h
index 8d94af3b8077..8d94af3b8077 100644
--- a/drivers/scsi/ufs/ufs-sysfs.h
+++ b/drivers/ufs/core/ufs-sysfs.h
diff --git a/drivers/scsi/ufs/ufs_bsg.c b/drivers/ufs/core/ufs_bsg.c
index 9e9b93867cab..b99e3f3dc4ef 100644
--- a/drivers/scsi/ufs/ufs_bsg.c
+++ b/drivers/ufs/core/ufs_bsg.c
@@ -9,7 +9,7 @@
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include "ufs_bsg.h"
-#include "ufshcd.h"
+#include <ufs/ufshcd.h>
#include "ufshcd-priv.h"
static int ufs_bsg_get_query_desc_size(struct ufs_hba *hba, int *desc_len,
diff --git a/drivers/scsi/ufs/ufs_bsg.h b/drivers/ufs/core/ufs_bsg.h
index 57712d2656d2..57712d2656d2 100644
--- a/drivers/scsi/ufs/ufs_bsg.h
+++ b/drivers/ufs/core/ufs_bsg.h
diff --git a/drivers/scsi/ufs/ufshcd-crypto.c b/drivers/ufs/core/ufshcd-crypto.c
index 67402baf6fae..198360fe5e8e 100644
--- a/drivers/scsi/ufs/ufshcd-crypto.c
+++ b/drivers/ufs/core/ufshcd-crypto.c
@@ -3,7 +3,7 @@
* Copyright 2019 Google LLC
*/
-#include "ufshcd.h"
+#include <ufs/ufshcd.h>
#include "ufshcd-crypto.h"
/* Blk-crypto modes supported by UFS crypto */
diff --git a/drivers/scsi/ufs/ufshcd-crypto.h b/drivers/ufs/core/ufshcd-crypto.h
index 9f98f18f9646..504cc841540b 100644
--- a/drivers/scsi/ufs/ufshcd-crypto.h
+++ b/drivers/ufs/core/ufshcd-crypto.h
@@ -7,9 +7,9 @@
#define _UFSHCD_CRYPTO_H
#include <scsi/scsi_cmnd.h>
-#include "ufshcd.h"
+#include <ufs/ufshcd.h>
#include "ufshcd-priv.h"
-#include "ufshci.h"
+#include <ufs/ufshci.h>
#ifdef CONFIG_SCSI_UFS_CRYPTO
diff --git a/drivers/scsi/ufs/ufshcd-priv.h b/drivers/ufs/core/ufshcd-priv.h
index 38bc77d3dbbd..ffb01fc6de75 100644
--- a/drivers/scsi/ufs/ufshcd-priv.h
+++ b/drivers/ufs/core/ufshcd-priv.h
@@ -4,7 +4,7 @@
#define _UFSHCD_PRIV_H_
#include <linux/pm_runtime.h>
-#include "ufshcd.h"
+#include <ufs/ufshcd.h>
static inline bool ufshcd_is_user_access_allowed(struct ufs_hba *hba)
{
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 1fb3a8b9b03e..01fb4bad86be 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -26,8 +26,8 @@
#include <scsi/scsi_driver.h>
#include <scsi/scsi_eh.h>
#include "ufshcd-priv.h"
-#include "ufs_quirks.h"
-#include "unipro.h"
+#include <ufs/ufs_quirks.h>
+#include <ufs/unipro.h>
#include "ufs-sysfs.h"
#include "ufs-debugfs.h"
#include "ufs-fault-injection.h"
@@ -8445,10 +8445,7 @@ static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
{
struct ufs_vreg_info *info = &hba->vreg_info;
- if (info)
- return ufshcd_get_vreg(hba->dev, info->vdd_hba);
-
- return 0;
+ return ufshcd_get_vreg(hba->dev, info->vdd_hba);
}
static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
diff --git a/drivers/scsi/ufs/ufshpb.c b/drivers/ufs/core/ufshpb.c
index 8882b47f76d3..de2bb8401bc4 100644
--- a/drivers/scsi/ufs/ufshpb.c
+++ b/drivers/ufs/core/ufshpb.c
@@ -17,7 +17,7 @@
#include "ufshcd-priv.h"
#include "ufshpb.h"
-#include "../sd.h"
+#include "../../scsi/sd.h"
#define ACTIVATION_THRESHOLD 8 /* 8 IOs */
#define READ_TO_MS 1000
@@ -671,11 +671,12 @@ static void ufshpb_execute_umap_req(struct ufshpb_lu *hpb,
req->timeout = 0;
req->end_io_data = umap_req;
+ req->end_io = ufshpb_umap_req_compl_fn;
ufshpb_set_unmap_cmd(scmd->cmnd, rgn);
scmd->cmd_len = HPB_WRITE_BUFFER_CMD_LENGTH;
- blk_execute_rq_nowait(req, true, ufshpb_umap_req_compl_fn);
+ blk_execute_rq_nowait(req, true);
hpb->stats.umap_req_cnt++;
}
@@ -707,6 +708,7 @@ static int ufshpb_execute_map_req(struct ufshpb_lu *hpb,
blk_rq_append_bio(req, map_req->bio);
req->end_io_data = map_req;
+ req->end_io = ufshpb_map_req_compl_fn;
if (unlikely(last))
mem_size = hpb->last_srgn_entries * HPB_ENTRY_SIZE;
@@ -716,7 +718,7 @@ static int ufshpb_execute_map_req(struct ufshpb_lu *hpb,
map_req->rb.srgn_idx, mem_size);
scmd->cmd_len = HPB_READ_BUFFER_CMD_LENGTH;
- blk_execute_rq_nowait(req, true, ufshpb_map_req_compl_fn);
+ blk_execute_rq_nowait(req, true);
hpb->stats.map_req_cnt++;
return 0;
diff --git a/drivers/scsi/ufs/ufshpb.h b/drivers/ufs/core/ufshpb.h
index 0d6e6004d783..0d6e6004d783 100644
--- a/drivers/scsi/ufs/ufshpb.h
+++ b/drivers/ufs/core/ufshpb.h
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/ufs/host/Kconfig
index 393b9a01da36..82590224da13 100644
--- a/drivers/scsi/ufs/Kconfig
+++ b/drivers/ufs/host/Kconfig
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0+
#
-# Kernel configuration file for the UFS Host Controller
+# Kernel configuration file for the UFS host controller drivers.
#
# Copyright (C) 2011-2013 Samsung India Software Operations
#
@@ -8,26 +8,6 @@
# Santosh Yaraganavi <santosh.sy@samsung.com>
# Vinayak Holikatti <h.vinayak@samsung.com>
-config SCSI_UFSHCD
- tristate "Universal Flash Storage Controller Driver Core"
- depends on SCSI && SCSI_DMA
- select PM_DEVFREQ
- select DEVFREQ_GOV_SIMPLE_ONDEMAND
- select NLS
- help
- This selects the support for UFS devices in Linux, say Y and make
- sure that you know the name of your UFS host adapter (the card
- inside your computer that "speaks" the UFS protocol, also
- called UFS Host Controller), because you will be asked for it.
- The module will be called ufshcd.
-
- To compile this driver as a module, choose M here and read
- <file:Documentation/scsi/ufs.rst>.
- However, do not compile this as a module if your root file system
- (the one containing the directory /) is located on a UFS device.
-
-if SCSI_UFSHCD
-
config SCSI_UFSHCD_PCI
tristate "PCI bus based UFS Controller support"
depends on PCI
@@ -122,24 +102,6 @@ config SCSI_UFS_TI_J721E
Selects this if you have TI platform with UFS controller.
If unsure, say N.
-config SCSI_UFS_BSG
- bool "Universal Flash Storage BSG device node"
- select BLK_DEV_BSGLIB
- help
- Universal Flash Storage (UFS) is SCSI transport specification for
- accessing flash storage on digital cameras, mobile phones and
- consumer electronic devices.
- A UFS controller communicates with a UFS device by exchanging
- UFS Protocol Information Units (UPIUs).
- UPIUs can not only be used as a transport layer for the SCSI protocol
- but are also used by the UFS native command set.
- This transport driver supports exchanging UFS protocol information units
- with a UFS device. See also the ufshcd driver, which is a SCSI driver
- that supports UFS devices.
-
- Select this if you need a bsg device node for your UFS controller.
- If unsure, say N.
-
config SCSI_UFS_EXYNOS
tristate "Exynos specific hooks to UFS controller platform driver"
depends on SCSI_UFSHCD_PLATFORM && (ARCH_EXYNOS || COMPILE_TEST)
@@ -150,38 +112,3 @@ config SCSI_UFS_EXYNOS
Select this if you have UFS host controller on Samsung Exynos SoC.
If unsure, say N.
-
-config SCSI_UFS_CRYPTO
- bool "UFS Crypto Engine Support"
- depends on BLK_INLINE_ENCRYPTION
- help
- Enable Crypto Engine Support in UFS.
- Enabling this makes it possible for the kernel to use the crypto
- capabilities of the UFS device (if present) to perform crypto
- operations on data being transferred to/from the device.
-
-config SCSI_UFS_HPB
- bool "Support UFS Host Performance Booster"
- help
- The UFS HPB feature improves random read performance. It caches
- L2P (logical to physical) map of UFS to host DRAM. The driver uses HPB
- read command by piggybacking physical page number for bypassing FTL (flash
- translation layer)'s L2P address translation.
-
-config SCSI_UFS_FAULT_INJECTION
- bool "UFS Fault Injection Support"
- depends on FAULT_INJECTION
- help
- Enable fault injection support in the UFS driver. This makes it easier
- to test the UFS error handler and abort handler.
-
-config SCSI_UFS_HWMON
- bool "UFS Temperature Notification"
- depends on SCSI_UFSHCD=HWMON || HWMON=y
- help
- This provides support for UFS hardware monitoring. If enabled,
- a hardware monitoring device will be created for the UFS device.
-
- If unsure, say N.
-
-endif
diff --git a/drivers/scsi/ufs/Makefile b/drivers/ufs/host/Makefile
index 966048875b50..e4be54273c98 100644
--- a/drivers/scsi/ufs/Makefile
+++ b/drivers/ufs/host/Makefile
@@ -1,16 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-# UFSHCD makefile
-
-# The link order is important here. ufshcd-core must initialize
-# before vendor drivers.
-obj-$(CONFIG_SCSI_UFSHCD) += ufshcd-core.o
-ufshcd-core-y += ufshcd.o ufs-sysfs.o
-ufshcd-core-$(CONFIG_DEBUG_FS) += ufs-debugfs.o
-ufshcd-core-$(CONFIG_SCSI_UFS_BSG) += ufs_bsg.o
-ufshcd-core-$(CONFIG_SCSI_UFS_CRYPTO) += ufshcd-crypto.o
-ufshcd-core-$(CONFIG_SCSI_UFS_HPB) += ufshpb.o
-ufshcd-core-$(CONFIG_SCSI_UFS_FAULT_INJECTION) += ufs-fault-injection.o
-ufshcd-core-$(CONFIG_SCSI_UFS_HWMON) += ufs-hwmon.o
obj-$(CONFIG_SCSI_UFS_DWC_TC_PCI) += tc-dwc-g210-pci.o ufshcd-dwc.o tc-dwc-g210.o
obj-$(CONFIG_SCSI_UFS_DWC_TC_PLATFORM) += tc-dwc-g210-pltfrm.o ufshcd-dwc.o tc-dwc-g210.o
diff --git a/drivers/scsi/ufs/cdns-pltfrm.c b/drivers/ufs/host/cdns-pltfrm.c
index e05c0ae64eea..e05c0ae64eea 100644
--- a/drivers/scsi/ufs/cdns-pltfrm.c
+++ b/drivers/ufs/host/cdns-pltfrm.c
diff --git a/drivers/scsi/ufs/tc-dwc-g210-pci.c b/drivers/ufs/host/tc-dwc-g210-pci.c
index e635c211c783..92b8ad4b58fe 100644
--- a/drivers/scsi/ufs/tc-dwc-g210-pci.c
+++ b/drivers/ufs/host/tc-dwc-g210-pci.c
@@ -7,7 +7,7 @@
* Authors: Joao Pinto <jpinto@synopsys.com>
*/
-#include "ufshcd.h"
+#include <ufs/ufshcd.h>
#include "ufshcd-dwc.h"
#include "tc-dwc-g210.h"
diff --git a/drivers/scsi/ufs/tc-dwc-g210-pltfrm.c b/drivers/ufs/host/tc-dwc-g210-pltfrm.c
index f15a84d0c176..f15a84d0c176 100644
--- a/drivers/scsi/ufs/tc-dwc-g210-pltfrm.c
+++ b/drivers/ufs/host/tc-dwc-g210-pltfrm.c
diff --git a/drivers/scsi/ufs/tc-dwc-g210.c b/drivers/ufs/host/tc-dwc-g210.c
index 7ef67c9fc5b8..deb93dbd83a4 100644
--- a/drivers/scsi/ufs/tc-dwc-g210.c
+++ b/drivers/ufs/host/tc-dwc-g210.c
@@ -9,8 +9,8 @@
#include <linux/module.h>
-#include "ufshcd.h"
-#include "unipro.h"
+#include <ufs/ufshcd.h>
+#include <ufs/unipro.h>
#include "ufshcd-dwc.h"
#include "ufshci-dwc.h"
diff --git a/drivers/scsi/ufs/tc-dwc-g210.h b/drivers/ufs/host/tc-dwc-g210.h
index f7154012f5c7..f7154012f5c7 100644
--- a/drivers/scsi/ufs/tc-dwc-g210.h
+++ b/drivers/ufs/host/tc-dwc-g210.h
diff --git a/drivers/scsi/ufs/ti-j721e-ufs.c b/drivers/ufs/host/ti-j721e-ufs.c
index 122d650d0810..122d650d0810 100644
--- a/drivers/scsi/ufs/ti-j721e-ufs.c
+++ b/drivers/ufs/host/ti-j721e-ufs.c
diff --git a/drivers/scsi/ufs/ufs-exynos.c b/drivers/ufs/host/ufs-exynos.c
index ddb2d42605c5..a81d8cbd542f 100644
--- a/drivers/scsi/ufs/ufs-exynos.c
+++ b/drivers/ufs/host/ufs-exynos.c
@@ -18,10 +18,10 @@
#include <linux/platform_device.h>
#include <linux/regmap.h>
-#include "ufshcd.h"
+#include <ufs/ufshcd.h>
#include "ufshcd-pltfrm.h"
-#include "ufshci.h"
-#include "unipro.h"
+#include <ufs/ufshci.h>
+#include <ufs/unipro.h>
#include "ufs-exynos.h"
diff --git a/drivers/scsi/ufs/ufs-exynos.h b/drivers/ufs/host/ufs-exynos.h
index 0b0a3d530ca6..0b0a3d530ca6 100644
--- a/drivers/scsi/ufs/ufs-exynos.h
+++ b/drivers/ufs/host/ufs-exynos.h
diff --git a/drivers/scsi/ufs/ufs-hisi.c b/drivers/ufs/host/ufs-hisi.c
index 7046143063ee..2eed13bc82ca 100644
--- a/drivers/scsi/ufs/ufs-hisi.c
+++ b/drivers/ufs/host/ufs-hisi.c
@@ -15,12 +15,12 @@
#include <linux/platform_device.h>
#include <linux/reset.h>
-#include "ufshcd.h"
+#include <ufs/ufshcd.h>
#include "ufshcd-pltfrm.h"
-#include "unipro.h"
+#include <ufs/unipro.h>
#include "ufs-hisi.h"
-#include "ufshci.h"
-#include "ufs_quirks.h"
+#include <ufs/ufshci.h>
+#include <ufs/ufs_quirks.h>
static int ufs_hisi_check_hibern8(struct ufs_hba *hba)
{
diff --git a/drivers/scsi/ufs/ufs-hisi.h b/drivers/ufs/host/ufs-hisi.h
index 5a90c0f4e90c..5a90c0f4e90c 100644
--- a/drivers/scsi/ufs/ufs-hisi.h
+++ b/drivers/ufs/host/ufs-hisi.h
diff --git a/drivers/scsi/ufs/ufs-mediatek-trace.h b/drivers/ufs/host/ufs-mediatek-trace.h
index 895e82ea6ece..7e010848dc99 100644
--- a/drivers/scsi/ufs/ufs-mediatek-trace.h
+++ b/drivers/ufs/host/ufs-mediatek-trace.h
@@ -31,6 +31,6 @@ TRACE_EVENT(ufs_mtk_event,
#undef TRACE_INCLUDE_PATH
#undef TRACE_INCLUDE_FILE
-#define TRACE_INCLUDE_PATH ../../drivers/scsi/ufs/
+#define TRACE_INCLUDE_PATH ../../drivers/ufs/host
#define TRACE_INCLUDE_FILE ufs-mediatek-trace
#include <trace/define_trace.h>
diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/ufs/host/ufs-mediatek.c
index 083d6bd4d561..beabc3ccd30b 100644
--- a/drivers/scsi/ufs/ufs-mediatek.c
+++ b/drivers/ufs/host/ufs-mediatek.c
@@ -21,10 +21,10 @@
#include <linux/sched/clock.h>
#include <linux/soc/mediatek/mtk_sip_svc.h>
-#include "ufshcd.h"
+#include <ufs/ufshcd.h>
#include "ufshcd-pltfrm.h"
-#include "ufs_quirks.h"
-#include "unipro.h"
+#include <ufs/ufs_quirks.h>
+#include <ufs/unipro.h>
#include "ufs-mediatek.h"
#define CREATE_TRACE_POINTS
diff --git a/drivers/scsi/ufs/ufs-mediatek.h b/drivers/ufs/host/ufs-mediatek.h
index 414dca86c09f..414dca86c09f 100644
--- a/drivers/scsi/ufs/ufs-mediatek.h
+++ b/drivers/ufs/host/ufs-mediatek.h
diff --git a/drivers/scsi/ufs/ufs-qcom-ice.c b/drivers/ufs/host/ufs-qcom-ice.c
index 745e48ec598f..745e48ec598f 100644
--- a/drivers/scsi/ufs/ufs-qcom-ice.c
+++ b/drivers/ufs/host/ufs-qcom-ice.c
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
index 4dcb232facaa..f10d4668814c 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/ufs/host/ufs-qcom.c
@@ -15,12 +15,12 @@
#include <linux/reset-controller.h>
#include <linux/devfreq.h>
-#include "ufshcd.h"
+#include <ufs/ufshcd.h>
#include "ufshcd-pltfrm.h"
-#include "unipro.h"
+#include <ufs/unipro.h>
#include "ufs-qcom.h"
-#include "ufshci.h"
-#include "ufs_quirks.h"
+#include <ufs/ufshci.h>
+#include <ufs/ufs_quirks.h>
#define UFS_QCOM_DEFAULT_DBG_PRINT_EN \
(UFS_QCOM_DBG_PRINT_REGS_EN | UFS_QCOM_DBG_PRINT_TEST_BUS_EN)
diff --git a/drivers/scsi/ufs/ufs-qcom.h b/drivers/ufs/host/ufs-qcom.h
index 771bc95d02c7..44466a395bb5 100644
--- a/drivers/scsi/ufs/ufs-qcom.h
+++ b/drivers/ufs/host/ufs-qcom.h
@@ -7,7 +7,7 @@
#include <linux/reset-controller.h>
#include <linux/reset.h>
-#include "ufshcd.h"
+#include <ufs/ufshcd.h>
#define MAX_UFS_QCOM_HOSTS 1
#define MAX_U32 (~(u32)0)
diff --git a/drivers/scsi/ufs/ufshcd-dwc.c b/drivers/ufs/host/ufshcd-dwc.c
index a57973c8d2a1..e28a67e1e314 100644
--- a/drivers/scsi/ufs/ufshcd-dwc.c
+++ b/drivers/ufs/host/ufshcd-dwc.c
@@ -9,8 +9,8 @@
#include <linux/module.h>
-#include "ufshcd.h"
-#include "unipro.h"
+#include <ufs/ufshcd.h>
+#include <ufs/unipro.h>
#include "ufshcd-dwc.h"
#include "ufshci-dwc.h"
diff --git a/drivers/scsi/ufs/ufshcd-dwc.h b/drivers/ufs/host/ufshcd-dwc.h
index 43b70794e24f..ad91ea56662c 100644
--- a/drivers/scsi/ufs/ufshcd-dwc.h
+++ b/drivers/ufs/host/ufshcd-dwc.h
@@ -10,7 +10,7 @@
#ifndef _UFSHCD_DWC_H
#define _UFSHCD_DWC_H
-#include "ufshcd.h"
+#include <ufs/ufshcd.h>
struct ufshcd_dme_attr_val {
u32 attr_sel;
diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/ufs/host/ufshcd-pci.c
index 20af2fbc3af1..04166bda41da 100644
--- a/drivers/scsi/ufs/ufshcd-pci.c
+++ b/drivers/ufs/host/ufshcd-pci.c
@@ -9,7 +9,7 @@
* Vinayak Holikatti <h.vinayak@samsung.com>
*/
-#include "ufshcd.h"
+#include <ufs/ufshcd.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/pci.h>
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/ufs/host/ufshcd-pltfrm.c
index f5313f407617..e7332cc65b1f 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.c
+++ b/drivers/ufs/host/ufshcd-pltfrm.c
@@ -13,9 +13,9 @@
#include <linux/pm_runtime.h>
#include <linux/of.h>
-#include "ufshcd.h"
+#include <ufs/ufshcd.h>
#include "ufshcd-pltfrm.h"
-#include "unipro.h"
+#include <ufs/unipro.h>
#define UFSHCD_DEFAULT_LANES_PER_DIRECTION 2
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.h b/drivers/ufs/host/ufshcd-pltfrm.h
index c33e28ac6ef6..43c2e412bd99 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.h
+++ b/drivers/ufs/host/ufshcd-pltfrm.h
@@ -5,7 +5,7 @@
#ifndef UFSHCD_PLTFRM_H_
#define UFSHCD_PLTFRM_H_
-#include "ufshcd.h"
+#include <ufs/ufshcd.h>
#define UFS_PWM_MODE 1
#define UFS_HS_MODE 2
diff --git a/drivers/scsi/ufs/ufshci-dwc.h b/drivers/ufs/host/ufshci-dwc.h
index 6c290e272106..6c290e272106 100644
--- a/drivers/scsi/ufs/ufshci-dwc.h
+++ b/drivers/ufs/host/ufshci-dwc.h
diff --git a/drivers/uio/uio_dfl.c b/drivers/uio/uio_dfl.c
index 89c0fc7b0cbc..8f39cc8bb034 100644
--- a/drivers/uio/uio_dfl.c
+++ b/drivers/uio/uio_dfl.c
@@ -45,9 +45,11 @@ static int uio_dfl_probe(struct dfl_device *ddev)
}
#define FME_FEATURE_ID_ETH_GROUP 0x10
+#define FME_FEATURE_ID_HSSI_SUBSYS 0x15
static const struct dfl_device_id uio_dfl_ids[] = {
{ FME_ID, FME_FEATURE_ID_ETH_GROUP },
+ { FME_ID, FME_FEATURE_ID_HSSI_SUBSYS },
{ }
};
MODULE_DEVICE_TABLE(dfl, uio_dfl_ids);
diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
index e3a49d837609..362217189ef3 100644
--- a/drivers/usb/atm/usbatm.c
+++ b/drivers/usb/atm/usbatm.c
@@ -1091,7 +1091,7 @@ int usbatm_usb_probe(struct usb_interface *intf, const struct usb_device_id *id,
snd_buf_bytes - (snd_buf_bytes % instance->tx_channel.stride));
/* rx buffer size must be a positive multiple of the endpoint maxpacket */
- maxpacket = usb_maxpacket(usb_dev, instance->rx_channel.endpoint, 0);
+ maxpacket = usb_maxpacket(usb_dev, instance->rx_channel.endpoint);
if ((maxpacket < 1) || (maxpacket > UDSL_MAX_BUF_SIZE)) {
dev_err(dev, "%s: invalid endpoint %02x!\n", __func__,
diff --git a/drivers/usb/c67x00/c67x00-drv.c b/drivers/usb/c67x00/c67x00-drv.c
index 53838e7d4eef..6db5cb1b2dbb 100644
--- a/drivers/usb/c67x00/c67x00-drv.c
+++ b/drivers/usb/c67x00/c67x00-drv.c
@@ -189,14 +189,12 @@ static int c67x00_drv_remove(struct platform_device *pdev)
c67x00_ll_release(c67x00);
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (res)
- free_irq(res->start, c67x00);
+ free_irq(res->start, c67x00);
iounmap(c67x00->hpi.base);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res)
- release_mem_region(res->start, resource_size(res));
+ release_mem_region(res->start, resource_size(res));
kfree(c67x00);
diff --git a/drivers/usb/c67x00/c67x00-sched.c b/drivers/usb/c67x00/c67x00-sched.c
index c7d3e907be81..a09fa68a6ce7 100644
--- a/drivers/usb/c67x00/c67x00-sched.c
+++ b/drivers/usb/c67x00/c67x00-sched.c
@@ -655,7 +655,7 @@ static int c67x00_add_data_urb(struct c67x00_hcd *c67x00, struct urb *urb)
usb_pipeout(urb->pipe));
remaining = urb->transfer_buffer_length - urb->actual_length;
- maxps = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
+ maxps = usb_maxpacket(urb->dev, urb->pipe);
need_empty = (urb->transfer_flags & URB_ZERO_PACKET) &&
usb_pipeout(urb->pipe) && !(remaining % maxps);
@@ -866,7 +866,7 @@ static inline int c67x00_end_of_data(struct c67x00_td *td)
if (unlikely(!act_bytes))
return 1; /* This was an empty packet */
- maxps = usb_maxpacket(td_udev(td), td->pipe, usb_pipeout(td->pipe));
+ maxps = usb_maxpacket(td_udev(td), td->pipe);
if (unlikely(act_bytes < maxps))
return 1; /* Smaller then full packet */
diff --git a/drivers/usb/cdns3/cdns3-gadget.c b/drivers/usb/cdns3/cdns3-gadget.c
index d6d515d598dc..5c15c48952a6 100644
--- a/drivers/usb/cdns3/cdns3-gadget.c
+++ b/drivers/usb/cdns3/cdns3-gadget.c
@@ -2038,7 +2038,7 @@ int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable)
u8 mult = 0;
int ret;
- buffering = CDNS3_EP_BUF_SIZE - 1;
+ buffering = priv_dev->ep_buf_size - 1;
cdns3_configure_dmult(priv_dev, priv_ep);
@@ -2057,7 +2057,7 @@ int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable)
break;
default:
ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_ISOC);
- mult = CDNS3_EP_ISO_HS_MULT - 1;
+ mult = priv_dev->ep_iso_burst - 1;
buffering = mult + 1;
}
@@ -2073,14 +2073,14 @@ int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable)
mult = 0;
max_packet_size = 1024;
if (priv_ep->type == USB_ENDPOINT_XFER_ISOC) {
- maxburst = CDNS3_EP_ISO_SS_BURST - 1;
+ maxburst = priv_dev->ep_iso_burst - 1;
buffering = (mult + 1) *
(maxburst + 1);
if (priv_ep->interval > 1)
buffering++;
} else {
- maxburst = CDNS3_EP_BUF_SIZE - 1;
+ maxburst = priv_dev->ep_buf_size - 1;
}
break;
default:
@@ -2095,6 +2095,10 @@ int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable)
else
priv_ep->trb_burst_size = 16;
+ mult = min_t(u8, mult, EP_CFG_MULT_MAX);
+ buffering = min_t(u8, buffering, EP_CFG_BUFFERING_MAX);
+ maxburst = min_t(u8, maxburst, EP_CFG_MAXBURST_MAX);
+
/* onchip buffer is only allocated before configuration */
if (!priv_dev->hw_configured_flag) {
ret = cdns3_ep_onchip_buffer_reserve(priv_dev, buffering + 1,
@@ -2961,6 +2965,40 @@ static int cdns3_gadget_udc_stop(struct usb_gadget *gadget)
return 0;
}
+/**
+ * cdns3_gadget_check_config - ensure cdns3 can support the USB configuration
+ * @gadget: pointer to the USB gadget
+ *
+ * Used to record the maximum number of endpoints being used in a USB composite
+ * device. (across all configurations) This is to be used in the calculation
+ * of the TXFIFO sizes when resizing internal memory for individual endpoints.
+ * It will help ensured that the resizing logic reserves enough space for at
+ * least one max packet.
+ */
+static int cdns3_gadget_check_config(struct usb_gadget *gadget)
+{
+ struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget);
+ struct usb_ep *ep;
+ int n_in = 0;
+ int total;
+
+ list_for_each_entry(ep, &gadget->ep_list, ep_list) {
+ if (ep->claimed && (ep->address & USB_DIR_IN))
+ n_in++;
+ }
+
+ /* 2KB are reserved for EP0, 1KB for out*/
+ total = 2 + n_in + 1;
+
+ if (total > priv_dev->onchip_buffers)
+ return -ENOMEM;
+
+ priv_dev->ep_buf_size = priv_dev->ep_iso_burst =
+ (priv_dev->onchip_buffers - 2) / (n_in + 1);
+
+ return 0;
+}
+
static const struct usb_gadget_ops cdns3_gadget_ops = {
.get_frame = cdns3_gadget_get_frame,
.wakeup = cdns3_gadget_wakeup,
@@ -2969,6 +3007,7 @@ static const struct usb_gadget_ops cdns3_gadget_ops = {
.udc_start = cdns3_gadget_udc_start,
.udc_stop = cdns3_gadget_udc_stop,
.match_ep = cdns3_gadget_match_ep,
+ .check_config = cdns3_gadget_check_config,
};
static void cdns3_free_all_eps(struct cdns3_device *priv_dev)
diff --git a/drivers/usb/cdns3/cdns3-gadget.h b/drivers/usb/cdns3/cdns3-gadget.h
index c5660f2c4293..fbe4a8e3aa89 100644
--- a/drivers/usb/cdns3/cdns3-gadget.h
+++ b/drivers/usb/cdns3/cdns3-gadget.h
@@ -562,15 +562,18 @@ struct cdns3_usb_regs {
/* Max burst size (used only in SS mode). */
#define EP_CFG_MAXBURST_MASK GENMASK(11, 8)
#define EP_CFG_MAXBURST(p) (((p) << 8) & EP_CFG_MAXBURST_MASK)
+#define EP_CFG_MAXBURST_MAX 15
/* ISO max burst. */
#define EP_CFG_MULT_MASK GENMASK(15, 14)
#define EP_CFG_MULT(p) (((p) << 14) & EP_CFG_MULT_MASK)
+#define EP_CFG_MULT_MAX 2
/* ISO max burst. */
#define EP_CFG_MAXPKTSIZE_MASK GENMASK(26, 16)
#define EP_CFG_MAXPKTSIZE(p) (((p) << 16) & EP_CFG_MAXPKTSIZE_MASK)
/* Max number of buffered packets. */
#define EP_CFG_BUFFERING_MASK GENMASK(31, 27)
#define EP_CFG_BUFFERING(p) (((p) << 27) & EP_CFG_BUFFERING_MASK)
+#define EP_CFG_BUFFERING_MAX 15
/* EP_CMD - bitmasks */
/* Endpoint reset. */
@@ -1094,9 +1097,6 @@ struct cdns3_trb {
#define CDNS3_ENDPOINTS_MAX_COUNT 32
#define CDNS3_EP_ZLP_BUF_SIZE 1024
-#define CDNS3_EP_BUF_SIZE 4 /* KB */
-#define CDNS3_EP_ISO_HS_MULT 3
-#define CDNS3_EP_ISO_SS_BURST 3
#define CDNS3_MAX_NUM_DESCMISS_BUF 32
#define CDNS3_DESCMIS_BUF_SIZE 2048 /* Bytes */
#define CDNS3_WA2_NUM_BUFFERS 128
@@ -1333,6 +1333,9 @@ struct cdns3_device {
/*in KB */
u16 onchip_buffers;
u16 onchip_used_size;
+
+ u16 ep_buf_size;
+ u16 ep_iso_burst;
};
void cdns3_set_register_bit(void __iomem *ptr, u32 mask);
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
index 3aa7f0a3ad71..d26ecd15be60 100644
--- a/drivers/usb/class/cdc-acm.h
+++ b/drivers/usb/class/cdc-acm.h
@@ -8,14 +8,6 @@
*/
/*
- * CMSPAR, some architectures can't have space and mark parity.
- */
-
-#ifndef CMSPAR
-#define CMSPAR 0
-#endif
-
-/*
* Major and minor numbers.
*/
diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
index d8b0041de612..2c14a9636056 100644
--- a/drivers/usb/core/devices.c
+++ b/drivers/usb/core/devices.c
@@ -228,8 +228,6 @@ static char *usb_dump_interface(int speed, char *start, char *end,
start = usb_dump_interface_descriptor(start, end, intfc, iface, setno);
for (i = 0; i < desc->desc.bNumEndpoints; i++) {
- if (start > end)
- return start;
start = usb_dump_endpoint_descriptor(speed,
start, end, &desc->endpoint[i].desc);
}
@@ -302,8 +300,6 @@ static char *usb_dump_config(int speed, char *start, char *end,
intfc = config->intf_cache[i];
interface = config->interface[i];
for (j = 0; j < intfc->num_altsetting; j++) {
- if (start > end)
- return start;
start = usb_dump_interface(speed,
start, end, intfc, interface, j);
}
@@ -369,19 +365,11 @@ static char *usb_dump_desc(char *start, char *end, struct usb_device *dev)
{
int i;
- if (start > end)
- return start;
-
start = usb_dump_device_descriptor(start, end, &dev->descriptor);
- if (start > end)
- return start;
-
start = usb_dump_device_strings(start, end, dev);
for (i = 0; i < dev->descriptor.bNumConfigurations; i++) {
- if (start > end)
- return start;
start = usb_dump_config(dev->speed,
start, end, dev->config + i,
/* active ? */
@@ -390,41 +378,6 @@ static char *usb_dump_desc(char *start, char *end, struct usb_device *dev)
return start;
}
-
-#ifdef PROC_EXTRA /* TBD: may want to add this code later */
-
-static char *usb_dump_hub_descriptor(char *start, char *end,
- const struct usb_hub_descriptor *desc)
-{
- int leng = USB_DT_HUB_NONVAR_SIZE;
- unsigned char *ptr = (unsigned char *)desc;
-
- if (start > end)
- return start;
- start += sprintf(start, "Interface:");
- while (leng && start <= end) {
- start += sprintf(start, " %02x", *ptr);
- ptr++; leng--;
- }
- *start++ = '\n';
- return start;
-}
-
-static char *usb_dump_string(char *start, char *end,
- const struct usb_device *dev, char *id, int index)
-{
- if (start > end)
- return start;
- start += sprintf(start, "Interface:");
- if (index <= dev->maxstring && dev->stringindex &&
- dev->stringindex[index])
- start += sprintf(start, "%s: %.100s ", id,
- dev->stringindex[index]);
- return start;
-}
-
-#endif /* PROC_EXTRA */
-
/*****************************************************************/
/* This is a recursive function. Parameters:
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 355ed33a2179..b87452e22835 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -1533,22 +1533,23 @@ static void choose_wakeup(struct usb_device *udev, pm_message_t msg)
{
int w;
- /* Remote wakeup is needed only when we actually go to sleep.
- * For things like FREEZE and QUIESCE, if the device is already
- * autosuspended then its current wakeup setting is okay.
+ /*
+ * For FREEZE/QUIESCE, disable remote wakeups so no interrupts get
+ * generated.
*/
if (msg.event == PM_EVENT_FREEZE || msg.event == PM_EVENT_QUIESCE) {
- if (udev->state != USB_STATE_SUSPENDED)
- udev->do_remote_wakeup = 0;
- return;
- }
+ w = 0;
- /* Enable remote wakeup if it is allowed, even if no interface drivers
- * actually want it.
- */
- w = device_may_wakeup(&udev->dev);
+ } else {
+ /*
+ * Enable remote wakeup if it is allowed, even if no interface
+ * drivers actually want it.
+ */
+ w = device_may_wakeup(&udev->dev);
+ }
- /* If the device is autosuspended with the wrong wakeup setting,
+ /*
+ * If the device is autosuspended with the wrong wakeup setting,
* autoresume now so the setting can be changed.
*/
if (udev->state == USB_STATE_SUSPENDED && w != udev->do_remote_wakeup)
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index 8176bc81a635..482dae72ef1c 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -15,7 +15,6 @@
#ifdef CONFIG_PPC_PMAC
#include <asm/machdep.h>
#include <asm/pmac_feature.h>
-#include <asm/prom.h>
#endif
#include "usb.h"
@@ -616,10 +615,10 @@ const struct dev_pm_ops usb_hcd_pci_pm_ops = {
.suspend_noirq = hcd_pci_suspend_noirq,
.resume_noirq = hcd_pci_resume_noirq,
.resume = hcd_pci_resume,
- .freeze = check_root_hub_suspended,
+ .freeze = hcd_pci_suspend,
.freeze_noirq = check_root_hub_suspended,
.thaw_noirq = NULL,
- .thaw = NULL,
+ .thaw = hcd_pci_resume,
.poweroff = hcd_pci_suspend,
.poweroff_noirq = hcd_pci_suspend_noirq,
.restore_noirq = hcd_pci_resume_noirq,
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index d9712c2602af..06eea8848ccc 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -2816,6 +2816,7 @@ int usb_add_hcd(struct usb_hcd *hcd,
{
int retval;
struct usb_device *rhdev;
+ struct usb_hcd *shared_hcd;
if (!hcd->skip_phy_initialization && usb_hcd_is_primary_hcd(hcd)) {
hcd->phy_roothub = usb_phy_roothub_alloc(hcd->self.sysdev);
@@ -2976,13 +2977,26 @@ int usb_add_hcd(struct usb_hcd *hcd,
goto err_hcd_driver_start;
}
+ /* starting here, usbcore will pay attention to the shared HCD roothub */
+ shared_hcd = hcd->shared_hcd;
+ if (!usb_hcd_is_primary_hcd(hcd) && shared_hcd && HCD_DEFER_RH_REGISTER(shared_hcd)) {
+ retval = register_root_hub(shared_hcd);
+ if (retval != 0)
+ goto err_register_root_hub;
+
+ if (shared_hcd->uses_new_polling && HCD_POLL_RH(shared_hcd))
+ usb_hcd_poll_rh_status(shared_hcd);
+ }
+
/* starting here, usbcore will pay attention to this root hub */
- retval = register_root_hub(hcd);
- if (retval != 0)
- goto err_register_root_hub;
+ if (!HCD_DEFER_RH_REGISTER(hcd)) {
+ retval = register_root_hub(hcd);
+ if (retval != 0)
+ goto err_register_root_hub;
- if (hcd->uses_new_polling && HCD_POLL_RH(hcd))
- usb_hcd_poll_rh_status(hcd);
+ if (hcd->uses_new_polling && HCD_POLL_RH(hcd))
+ usb_hcd_poll_rh_status(hcd);
+ }
return retval;
@@ -3020,6 +3034,7 @@ EXPORT_SYMBOL_GPL(usb_add_hcd);
void usb_remove_hcd(struct usb_hcd *hcd)
{
struct usb_device *rhdev = hcd->self.root_hub;
+ bool rh_registered;
dev_info(hcd->self.controller, "remove, state %x\n", hcd->state);
@@ -3030,6 +3045,7 @@ void usb_remove_hcd(struct usb_hcd *hcd)
dev_dbg(hcd->self.controller, "roothub graceful disconnect\n");
spin_lock_irq (&hcd_root_hub_lock);
+ rh_registered = hcd->rh_registered;
hcd->rh_registered = 0;
spin_unlock_irq (&hcd_root_hub_lock);
@@ -3039,7 +3055,8 @@ void usb_remove_hcd(struct usb_hcd *hcd)
cancel_work_sync(&hcd->died_work);
mutex_lock(&usb_bus_idr_lock);
- usb_disconnect(&rhdev); /* Sets rhdev to NULL */
+ if (rh_registered)
+ usb_disconnect(&rhdev); /* Sets rhdev to NULL */
mutex_unlock(&usb_bus_idr_lock);
/*
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 1460857026e0..68e9121c1878 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -1635,7 +1635,7 @@ static int hub_configure(struct usb_hub *hub,
* maxpktsize is defined in hcd.c's fake endpoint descriptors
* to be big enough for at least USB_MAXCHILDREN ports. */
pipe = usb_rcvintpipe(hdev, endpoint->bEndpointAddress);
- maxp = usb_maxpacket(hdev, pipe, usb_pipeout(pipe));
+ maxp = usb_maxpacket(hdev, pipe);
if (maxp > sizeof(*hub->buffer))
maxp = sizeof(*hub->buffer);
@@ -5511,7 +5511,7 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
/* Handle notifying userspace about hub over-current events */
static void port_over_current_notify(struct usb_port *port_dev)
{
- char *envp[3];
+ char *envp[3] = { NULL, NULL, NULL };
struct device *hub_dev;
char *port_dev_path;
@@ -5528,20 +5528,18 @@ static void port_over_current_notify(struct usb_port *port_dev)
envp[0] = kasprintf(GFP_KERNEL, "OVER_CURRENT_PORT=%s", port_dev_path);
if (!envp[0])
- goto exit_path;
+ goto exit;
envp[1] = kasprintf(GFP_KERNEL, "OVER_CURRENT_COUNT=%u",
port_dev->over_current_count);
if (!envp[1])
goto exit;
- envp[2] = NULL;
kobject_uevent_env(&hub_dev->kobj, KOBJ_CHANGE, envp);
- kfree(envp[1]);
exit:
+ kfree(envp[1]);
kfree(envp[0]);
-exit_path:
kfree(port_dev_path);
}
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 97b44a68668a..f99a65a64588 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -510,6 +510,9 @@ static const struct usb_device_id usb_quirk_list[] = {
/* DJI CineSSD */
{ USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM },
+ /* DELL USB GEN2 */
+ { USB_DEVICE(0x413c, 0xb062), .driver_info = USB_QUIRK_NO_LPM | USB_QUIRK_RESET_RESUME },
+
/* VCOM device */
{ USB_DEVICE(0x4296, 0x7570), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS },
diff --git a/drivers/usb/core/usb-acpi.c b/drivers/usb/core/usb-acpi.c
index bb1da35eb891..d4dcaefd0ea4 100644
--- a/drivers/usb/core/usb-acpi.c
+++ b/drivers/usb/core/usb-acpi.c
@@ -205,8 +205,11 @@ usb_acpi_find_companion_for_device(struct usb_device *udev)
struct usb_hub *hub;
if (!udev->parent) {
- /* root hub is only child (_ADR=0) under its parent, the HC */
- adev = ACPI_COMPANION(udev->dev.parent);
+ /*
+ * root hub is only child (_ADR=0) under its parent, the HC.
+ * sysdev pointer is the HC as seen from firmware.
+ */
+ adev = ACPI_COMPANION(udev->bus->sysdev);
return acpi_find_child_device(adev, 0, false);
}
diff --git a/drivers/usb/dwc2/core.c b/drivers/usb/dwc2/core.c
index cf0bcd0dc320..dc4fc72ab1b6 100644
--- a/drivers/usb/dwc2/core.c
+++ b/drivers/usb/dwc2/core.c
@@ -1153,6 +1153,7 @@ static void dwc2_set_turnaround_time(struct dwc2_hsotg *hsotg)
int dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
{
u32 usbcfg;
+ u32 otgctl;
int retval = 0;
if ((hsotg->params.speed == DWC2_SPEED_PARAM_FULL ||
@@ -1187,6 +1188,14 @@ int dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
dwc2_writel(hsotg, usbcfg, GUSBCFG);
}
+ if (!hsotg->params.activate_ingenic_overcurrent_detection) {
+ if (dwc2_is_host_mode(hsotg)) {
+ otgctl = readl(hsotg->regs + GOTGCTL);
+ otgctl |= GOTGCTL_VBVALOEN | GOTGCTL_VBVALOVAL;
+ writel(otgctl, hsotg->regs + GOTGCTL);
+ }
+ }
+
return retval;
}
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index 88c337bf564f..0683852e47e4 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -426,6 +426,10 @@ enum dwc2_ep0_state {
* detection using GGPIO register.
* 0 - Deactivate the external level detection (default)
* 1 - Activate the external level detection
+ * @activate_ingenic_overcurrent_detection: Activate Ingenic overcurrent
+ * detection.
+ * 0 - Deactivate the overcurrent detection
+ * 1 - Activate the overcurrent detection (default)
* @g_dma: Enables gadget dma usage (default: autodetect).
* @g_dma_desc: Enables gadget descriptor DMA (default: autodetect).
* @g_rx_fifo_size: The periodic rx fifo size for the device, in
@@ -494,6 +498,7 @@ struct dwc2_core_params {
u8 hird_threshold;
bool activate_stm_fs_transceiver;
bool activate_stm_id_vb_detection;
+ bool activate_ingenic_overcurrent_detection;
bool ipg_isoc_en;
u16 max_packet_count;
u32 max_transfer_size;
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index eee3504397e6..fe2a58c75861 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -4544,7 +4544,6 @@ static int dwc2_hsotg_udc_start(struct usb_gadget *gadget,
WARN_ON(hsotg->driver);
- driver->driver.bus = NULL;
hsotg->driver = driver;
hsotg->gadget.dev.of_node = hsotg->dev->of_node;
hsotg->gadget.speed = USB_SPEED_UNKNOWN;
diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c
index 1306f4ec788d..fdb8a42fff86 100644
--- a/drivers/usb/dwc2/params.c
+++ b/drivers/usb/dwc2/params.c
@@ -73,6 +73,47 @@ static void dwc2_set_his_params(struct dwc2_hsotg *hsotg)
p->power_down = DWC2_POWER_DOWN_PARAM_NONE;
}
+static void dwc2_set_jz4775_params(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_core_params *p = &hsotg->params;
+
+ p->otg_caps.hnp_support = false;
+ p->speed = DWC2_SPEED_PARAM_HIGH;
+ p->phy_type = DWC2_PHY_TYPE_PARAM_UTMI;
+ p->phy_utmi_width = 16;
+ p->activate_ingenic_overcurrent_detection =
+ !device_property_read_bool(hsotg->dev, "disable-over-current");
+}
+
+static void dwc2_set_x1600_params(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_core_params *p = &hsotg->params;
+
+ p->otg_caps.hnp_support = false;
+ p->speed = DWC2_SPEED_PARAM_HIGH;
+ p->host_channels = 16;
+ p->phy_type = DWC2_PHY_TYPE_PARAM_UTMI;
+ p->phy_utmi_width = 16;
+ p->activate_ingenic_overcurrent_detection =
+ !device_property_read_bool(hsotg->dev, "disable-over-current");
+}
+
+static void dwc2_set_x2000_params(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_core_params *p = &hsotg->params;
+
+ p->otg_caps.hnp_support = false;
+ p->speed = DWC2_SPEED_PARAM_HIGH;
+ p->host_rx_fifo_size = 1024;
+ p->host_nperio_tx_fifo_size = 1024;
+ p->host_perio_tx_fifo_size = 1024;
+ p->host_channels = 16;
+ p->phy_type = DWC2_PHY_TYPE_PARAM_UTMI;
+ p->phy_utmi_width = 16;
+ p->activate_ingenic_overcurrent_detection =
+ !device_property_read_bool(hsotg->dev, "disable-over-current");
+}
+
static void dwc2_set_s3c6400_params(struct dwc2_hsotg *hsotg)
{
struct dwc2_core_params *p = &hsotg->params;
@@ -221,7 +262,14 @@ static void dwc2_set_stm32mp15_hsotg_params(struct dwc2_hsotg *hsotg)
const struct of_device_id dwc2_of_match_table[] = {
{ .compatible = "brcm,bcm2835-usb", .data = dwc2_set_bcm_params },
- { .compatible = "hisilicon,hi6220-usb", .data = dwc2_set_his_params },
+ { .compatible = "hisilicon,hi6220-usb", .data = dwc2_set_his_params },
+ { .compatible = "ingenic,jz4775-otg", .data = dwc2_set_jz4775_params },
+ { .compatible = "ingenic,jz4780-otg", .data = dwc2_set_jz4775_params },
+ { .compatible = "ingenic,x1000-otg", .data = dwc2_set_jz4775_params },
+ { .compatible = "ingenic,x1600-otg", .data = dwc2_set_x1600_params },
+ { .compatible = "ingenic,x1700-otg", .data = dwc2_set_x1600_params },
+ { .compatible = "ingenic,x1830-otg", .data = dwc2_set_x1600_params },
+ { .compatible = "ingenic,x2000-otg", .data = dwc2_set_x2000_params },
{ .compatible = "rockchip,rk3066-usb", .data = dwc2_set_rk_params },
{ .compatible = "lantiq,arx100-usb", .data = dwc2_set_ltq_params },
{ .compatible = "lantiq,xrx200-usb", .data = dwc2_set_ltq_params },
diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig
index c483f28b695d..cd9a734522a7 100644
--- a/drivers/usb/dwc3/Kconfig
+++ b/drivers/usb/dwc3/Kconfig
@@ -159,4 +159,13 @@ config USB_DWC3_XILINX
This driver handles both ZynqMP and Versal SoC operations.
Say 'Y' or 'M' if you have one such device.
+config USB_DWC3_AM62
+ tristate "Texas Instruments AM62 Platforms"
+ depends on ARCH_K3 || COMPILE_TEST
+ default USB_DWC3
+ help
+ Support TI's AM62 platforms with DesignWare Core USB3 IP.
+ The Designware Core USB3 IP is progammed to operate in
+ in USB 2.0 mode only.
+ Say 'Y' or 'M' here if you have one such device
endif
diff --git a/drivers/usb/dwc3/Makefile b/drivers/usb/dwc3/Makefile
index 2d499de6f66a..9f66bd82b639 100644
--- a/drivers/usb/dwc3/Makefile
+++ b/drivers/usb/dwc3/Makefile
@@ -42,6 +42,7 @@ endif
# and allyesconfig builds.
##
+obj-$(CONFIG_USB_DWC3_AM62) += dwc3-am62.o
obj-$(CONFIG_USB_DWC3_OMAP) += dwc3-omap.o
obj-$(CONFIG_USB_DWC3_EXYNOS) += dwc3-exynos.o
obj-$(CONFIG_USB_DWC3_PCI) += dwc3-pci.o
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index d28cd1a6709b..e027c0420dc3 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -23,6 +23,7 @@
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/of.h>
+#include <linux/of_graph.h>
#include <linux/acpi.h>
#include <linux/pinctrl/consumer.h>
#include <linux/reset.h>
@@ -85,7 +86,7 @@ static int dwc3_get_dr_mode(struct dwc3 *dwc)
* mode. If the controller supports DRD but the dr_mode is not
* specified or set to OTG, then set the mode to peripheral.
*/
- if (mode == USB_DR_MODE_OTG &&
+ if (mode == USB_DR_MODE_OTG && !dwc->edev &&
(!IS_ENABLED(CONFIG_USB_ROLE_SWITCH) ||
!device_property_read_bool(dwc->dev, "usb-role-switch")) &&
!DWC3_VER_IS_PRIOR(DWC3, 330A))
@@ -297,6 +298,7 @@ int dwc3_core_soft_reset(struct dwc3 *dwc)
udelay(1);
} while (--retries);
+ dev_warn(dwc->dev, "DWC3 controller soft reset failed.\n");
return -ETIMEDOUT;
done:
@@ -342,7 +344,6 @@ static void dwc3_frame_length_adjustment(struct dwc3 *dwc)
* from the default, this will set clock period in DWC3_GUCTL
* register.
* @dwc: Pointer to our controller context structure
- * @ref_clk_per: reference clock period in ns
*/
static void dwc3_ref_clk_period(struct dwc3 *dwc)
{
@@ -964,10 +965,8 @@ static void dwc3_set_incr_burst_type(struct dwc3 *dwc)
return;
vals = kcalloc(ntype, sizeof(u32), GFP_KERNEL);
- if (!vals) {
- dev_err(dev, "Error to get memory\n");
+ if (!vals)
return;
- }
/* Get INCR burst type, and parse it */
ret = device_property_read_u32_array(dev,
@@ -1268,40 +1267,36 @@ static int dwc3_core_get_phy(struct dwc3 *dwc)
if (IS_ERR(dwc->usb2_phy)) {
ret = PTR_ERR(dwc->usb2_phy);
- if (ret == -ENXIO || ret == -ENODEV) {
+ if (ret == -ENXIO || ret == -ENODEV)
dwc->usb2_phy = NULL;
- } else {
+ else
return dev_err_probe(dev, ret, "no usb2 phy configured\n");
- }
}
if (IS_ERR(dwc->usb3_phy)) {
ret = PTR_ERR(dwc->usb3_phy);
- if (ret == -ENXIO || ret == -ENODEV) {
+ if (ret == -ENXIO || ret == -ENODEV)
dwc->usb3_phy = NULL;
- } else {
+ else
return dev_err_probe(dev, ret, "no usb3 phy configured\n");
- }
}
dwc->usb2_generic_phy = devm_phy_get(dev, "usb2-phy");
if (IS_ERR(dwc->usb2_generic_phy)) {
ret = PTR_ERR(dwc->usb2_generic_phy);
- if (ret == -ENOSYS || ret == -ENODEV) {
+ if (ret == -ENOSYS || ret == -ENODEV)
dwc->usb2_generic_phy = NULL;
- } else {
+ else
return dev_err_probe(dev, ret, "no usb2 phy configured\n");
- }
}
dwc->usb3_generic_phy = devm_phy_get(dev, "usb3-phy");
if (IS_ERR(dwc->usb3_generic_phy)) {
ret = PTR_ERR(dwc->usb3_generic_phy);
- if (ret == -ENOSYS || ret == -ENODEV) {
+ if (ret == -ENOSYS || ret == -ENODEV)
dwc->usb3_generic_phy = NULL;
- } else {
+ else
return dev_err_probe(dev, ret, "no usb3 phy configured\n");
- }
}
return 0;
@@ -1633,6 +1628,51 @@ static void dwc3_check_params(struct dwc3 *dwc)
}
}
+static struct extcon_dev *dwc3_get_extcon(struct dwc3 *dwc)
+{
+ struct device *dev = dwc->dev;
+ struct device_node *np_phy;
+ struct extcon_dev *edev = NULL;
+ const char *name;
+
+ if (device_property_read_bool(dev, "extcon"))
+ return extcon_get_edev_by_phandle(dev, 0);
+
+ /*
+ * Device tree platforms should get extcon via phandle.
+ * On ACPI platforms, we get the name from a device property.
+ * This device property is for kernel internal use only and
+ * is expected to be set by the glue code.
+ */
+ if (device_property_read_string(dev, "linux,extcon-name", &name) == 0) {
+ edev = extcon_get_extcon_dev(name);
+ if (!edev)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ return edev;
+ }
+
+ /*
+ * Try to get an extcon device from the USB PHY controller's "port"
+ * node. Check if it has the "port" node first, to avoid printing the
+ * error message from underlying code, as it's a valid case: extcon
+ * device (and "port" node) may be missing in case of "usb-role-switch"
+ * or OTG mode.
+ */
+ np_phy = of_parse_phandle(dev->of_node, "phys", 0);
+ if (of_graph_is_present(np_phy)) {
+ struct device_node *np_conn;
+
+ np_conn = of_graph_get_remote_node(np_phy, -1, -1);
+ if (np_conn)
+ edev = extcon_find_edev_by_node(np_conn);
+ of_node_put(np_conn);
+ }
+ of_node_put(np_phy);
+
+ return edev;
+}
+
static int dwc3_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -1768,6 +1808,13 @@ static int dwc3_probe(struct platform_device *pdev)
goto err2;
}
+ dwc->edev = dwc3_get_extcon(dwc);
+ if (IS_ERR(dwc->edev)) {
+ ret = PTR_ERR(dwc->edev);
+ dev_err_probe(dwc->dev, ret, "failed to get extcon\n");
+ goto err3;
+ }
+
ret = dwc3_get_dr_mode(dwc);
if (ret)
goto err3;
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 5c9d467195a6..81c486b3941c 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -1046,6 +1046,7 @@ struct dwc3_scratchpad_array {
* @tx_thr_num_pkt_prd: periodic ESS transmit packet count
* @tx_max_burst_prd: max periodic ESS transmit burst size
* @tx_fifo_resize_max_num: max number of fifos allocated during txfifo resize
+ * @clear_stall_protocol: endpoint number that requires a delayed status phase
* @hsphy_interface: "utmi" or "ulpi"
* @connected: true when we're connected to a host, false otherwise
* @softconnect: true when gadget connect is called, false when disconnect runs
@@ -1266,6 +1267,7 @@ struct dwc3 {
u8 tx_thr_num_pkt_prd;
u8 tx_max_burst_prd;
u8 tx_fifo_resize_max_num;
+ u8 clear_stall_protocol;
const char *hsphy_interface;
diff --git a/drivers/usb/dwc3/drd.c b/drivers/usb/dwc3/drd.c
index 8cad9e7d3368..039bf241769a 100644
--- a/drivers/usb/dwc3/drd.c
+++ b/drivers/usb/dwc3/drd.c
@@ -8,7 +8,6 @@
*/
#include <linux/extcon.h>
-#include <linux/of_graph.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/property.h>
@@ -439,51 +438,6 @@ static int dwc3_drd_notifier(struct notifier_block *nb,
return NOTIFY_DONE;
}
-static struct extcon_dev *dwc3_get_extcon(struct dwc3 *dwc)
-{
- struct device *dev = dwc->dev;
- struct device_node *np_phy;
- struct extcon_dev *edev = NULL;
- const char *name;
-
- if (device_property_read_bool(dev, "extcon"))
- return extcon_get_edev_by_phandle(dev, 0);
-
- /*
- * Device tree platforms should get extcon via phandle.
- * On ACPI platforms, we get the name from a device property.
- * This device property is for kernel internal use only and
- * is expected to be set by the glue code.
- */
- if (device_property_read_string(dev, "linux,extcon-name", &name) == 0) {
- edev = extcon_get_extcon_dev(name);
- if (!edev)
- return ERR_PTR(-EPROBE_DEFER);
-
- return edev;
- }
-
- /*
- * Try to get an extcon device from the USB PHY controller's "port"
- * node. Check if it has the "port" node first, to avoid printing the
- * error message from underlying code, as it's a valid case: extcon
- * device (and "port" node) may be missing in case of "usb-role-switch"
- * or OTG mode.
- */
- np_phy = of_parse_phandle(dev->of_node, "phys", 0);
- if (of_graph_is_present(np_phy)) {
- struct device_node *np_conn;
-
- np_conn = of_graph_get_remote_node(np_phy, -1, -1);
- if (np_conn)
- edev = extcon_find_edev_by_node(np_conn);
- of_node_put(np_conn);
- }
- of_node_put(np_phy);
-
- return edev;
-}
-
#if IS_ENABLED(CONFIG_USB_ROLE_SWITCH)
#define ROLE_SWITCH 1
static int dwc3_usb_role_switch_set(struct usb_role_switch *sw,
@@ -588,10 +542,6 @@ int dwc3_drd_init(struct dwc3 *dwc)
device_property_read_bool(dwc->dev, "usb-role-switch"))
return dwc3_setup_role_switch(dwc);
- dwc->edev = dwc3_get_extcon(dwc);
- if (IS_ERR(dwc->edev))
- return PTR_ERR(dwc->edev);
-
if (dwc->edev) {
dwc->edev_nb.notifier_call = dwc3_drd_notifier;
ret = extcon_register_notifier(dwc->edev, EXTCON_USB_HOST,
diff --git a/drivers/usb/dwc3/dwc3-am62.c b/drivers/usb/dwc3/dwc3-am62.c
new file mode 100644
index 000000000000..fea7aca35dc8
--- /dev/null
+++ b/drivers/usb/dwc3/dwc3-am62.c
@@ -0,0 +1,332 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * dwc3-am62.c - TI specific Glue layer for AM62 DWC3 USB Controller
+ *
+ * Copyright (C) 2022 Texas Instruments Incorporated - https://www.ti.com
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/clk.h>
+#include <linux/regmap.h>
+#include <linux/pinctrl/consumer.h>
+
+/* USB WRAPPER register offsets */
+#define USBSS_PID 0x0
+#define USBSS_OVERCURRENT_CTRL 0x4
+#define USBSS_PHY_CONFIG 0x8
+#define USBSS_PHY_TEST 0xc
+#define USBSS_CORE_STAT 0x14
+#define USBSS_HOST_VBUS_CTRL 0x18
+#define USBSS_MODE_CONTROL 0x1c
+#define USBSS_WAKEUP_CONFIG 0x30
+#define USBSS_WAKEUP_STAT 0x34
+#define USBSS_OVERRIDE_CONFIG 0x38
+#define USBSS_IRQ_MISC_STATUS_RAW 0x430
+#define USBSS_IRQ_MISC_STATUS 0x434
+#define USBSS_IRQ_MISC_ENABLE_SET 0x438
+#define USBSS_IRQ_MISC_ENABLE_CLR 0x43c
+#define USBSS_IRQ_MISC_EOI 0x440
+#define USBSS_INTR_TEST 0x490
+#define USBSS_VBUS_FILTER 0x614
+#define USBSS_VBUS_STAT 0x618
+#define USBSS_DEBUG_CFG 0x708
+#define USBSS_DEBUG_DATA 0x70c
+#define USBSS_HOST_HUB_CTRL 0x714
+
+/* PHY CONFIG register bits */
+#define USBSS_PHY_VBUS_SEL_MASK GENMASK(2, 1)
+#define USBSS_PHY_VBUS_SEL_SHIFT 1
+#define USBSS_PHY_LANE_REVERSE BIT(0)
+
+/* MODE CONTROL register bits */
+#define USBSS_MODE_VALID BIT(0)
+
+/* WAKEUP CONFIG register bits */
+#define USBSS_WAKEUP_CFG_OVERCURRENT_EN BIT(3)
+#define USBSS_WAKEUP_CFG_LINESTATE_EN BIT(2)
+#define USBSS_WAKEUP_CFG_SESSVALID_EN BIT(1)
+#define USBSS_WAKEUP_CFG_VBUSVALID_EN BIT(0)
+
+/* WAKEUP STAT register bits */
+#define USBSS_WAKEUP_STAT_OVERCURRENT BIT(4)
+#define USBSS_WAKEUP_STAT_LINESTATE BIT(3)
+#define USBSS_WAKEUP_STAT_SESSVALID BIT(2)
+#define USBSS_WAKEUP_STAT_VBUSVALID BIT(1)
+#define USBSS_WAKEUP_STAT_CLR BIT(0)
+
+/* IRQ_MISC_STATUS_RAW register bits */
+#define USBSS_IRQ_MISC_RAW_VBUSVALID BIT(22)
+#define USBSS_IRQ_MISC_RAW_SESSVALID BIT(20)
+
+/* IRQ_MISC_STATUS register bits */
+#define USBSS_IRQ_MISC_VBUSVALID BIT(22)
+#define USBSS_IRQ_MISC_SESSVALID BIT(20)
+
+/* IRQ_MISC_ENABLE_SET register bits */
+#define USBSS_IRQ_MISC_ENABLE_SET_VBUSVALID BIT(22)
+#define USBSS_IRQ_MISC_ENABLE_SET_SESSVALID BIT(20)
+
+/* IRQ_MISC_ENABLE_CLR register bits */
+#define USBSS_IRQ_MISC_ENABLE_CLR_VBUSVALID BIT(22)
+#define USBSS_IRQ_MISC_ENABLE_CLR_SESSVALID BIT(20)
+
+/* IRQ_MISC_EOI register bits */
+#define USBSS_IRQ_MISC_EOI_VECTOR BIT(0)
+
+/* VBUS_STAT register bits */
+#define USBSS_VBUS_STAT_SESSVALID BIT(2)
+#define USBSS_VBUS_STAT_VBUSVALID BIT(0)
+
+/* Mask for PHY PLL REFCLK */
+#define PHY_PLL_REFCLK_MASK GENMASK(3, 0)
+
+#define DWC3_AM62_AUTOSUSPEND_DELAY 100
+
+struct dwc3_data {
+ struct device *dev;
+ void __iomem *usbss;
+ struct clk *usb2_refclk;
+ int rate_code;
+ struct regmap *syscon;
+ unsigned int offset;
+ unsigned int vbus_divider;
+};
+
+static const int dwc3_ti_rate_table[] = { /* in KHZ */
+ 9600,
+ 10000,
+ 12000,
+ 19200,
+ 20000,
+ 24000,
+ 25000,
+ 26000,
+ 38400,
+ 40000,
+ 58000,
+ 50000,
+ 52000,
+};
+
+static inline u32 dwc3_ti_readl(struct dwc3_data *data, u32 offset)
+{
+ return readl((data->usbss) + offset);
+}
+
+static inline void dwc3_ti_writel(struct dwc3_data *data, u32 offset, u32 value)
+{
+ writel(value, (data->usbss) + offset);
+}
+
+static int phy_syscon_pll_refclk(struct dwc3_data *data)
+{
+ struct device *dev = data->dev;
+ struct device_node *node = dev->of_node;
+ struct of_phandle_args args;
+ struct regmap *syscon;
+ int ret;
+
+ syscon = syscon_regmap_lookup_by_phandle(node, "ti,syscon-phy-pll-refclk");
+ if (IS_ERR(syscon)) {
+ dev_err(dev, "unable to get ti,syscon-phy-pll-refclk regmap\n");
+ return PTR_ERR(syscon);
+ }
+
+ data->syscon = syscon;
+
+ ret = of_parse_phandle_with_fixed_args(node, "ti,syscon-phy-pll-refclk", 1,
+ 0, &args);
+ if (ret)
+ return ret;
+
+ data->offset = args.args[0];
+
+ ret = regmap_update_bits(data->syscon, data->offset, PHY_PLL_REFCLK_MASK, data->rate_code);
+ if (ret) {
+ dev_err(dev, "failed to set phy pll reference clock rate\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int dwc3_ti_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = pdev->dev.of_node;
+ struct dwc3_data *data;
+ int i, ret;
+ unsigned long rate;
+ u32 reg;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->dev = dev;
+ platform_set_drvdata(pdev, data);
+
+ data->usbss = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(data->usbss)) {
+ dev_err(dev, "can't map IOMEM resource\n");
+ return PTR_ERR(data->usbss);
+ }
+
+ data->usb2_refclk = devm_clk_get(dev, "ref");
+ if (IS_ERR(data->usb2_refclk)) {
+ dev_err(dev, "can't get usb2_refclk\n");
+ return PTR_ERR(data->usb2_refclk);
+ }
+
+ /* Calculate the rate code */
+ rate = clk_get_rate(data->usb2_refclk);
+ rate /= 1000; // To KHz
+ for (i = 0; i < ARRAY_SIZE(dwc3_ti_rate_table); i++) {
+ if (dwc3_ti_rate_table[i] == rate)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(dwc3_ti_rate_table)) {
+ dev_err(dev, "unsupported usb2_refclk rate: %lu KHz\n", rate);
+ ret = -EINVAL;
+ goto err_clk_disable;
+ }
+
+ data->rate_code = i;
+
+ /* Read the syscon property and set the rate code */
+ ret = phy_syscon_pll_refclk(data);
+ if (ret)
+ goto err_clk_disable;
+
+ /* VBUS divider select */
+ data->vbus_divider = device_property_read_bool(dev, "ti,vbus-divider");
+ reg = dwc3_ti_readl(data, USBSS_PHY_CONFIG);
+ if (data->vbus_divider)
+ reg |= 1 << USBSS_PHY_VBUS_SEL_SHIFT;
+
+ dwc3_ti_writel(data, USBSS_PHY_CONFIG, reg);
+
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ /*
+ * Don't ignore its dependencies with its children
+ */
+ pm_suspend_ignore_children(dev, false);
+ clk_prepare_enable(data->usb2_refclk);
+ pm_runtime_get_noresume(dev);
+
+ ret = of_platform_populate(node, NULL, NULL, dev);
+ if (ret) {
+ dev_err(dev, "failed to create dwc3 core: %d\n", ret);
+ goto err_pm_disable;
+ }
+
+ /* Set mode valid bit to indicate role is valid */
+ reg = dwc3_ti_readl(data, USBSS_MODE_CONTROL);
+ reg |= USBSS_MODE_VALID;
+ dwc3_ti_writel(data, USBSS_MODE_CONTROL, reg);
+
+ /* Setting up autosuspend */
+ pm_runtime_set_autosuspend_delay(dev, DWC3_AM62_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+ return 0;
+
+err_pm_disable:
+ clk_disable_unprepare(data->usb2_refclk);
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+err_clk_disable:
+ clk_put(data->usb2_refclk);
+ return ret;
+}
+
+static int dwc3_ti_remove_core(struct device *dev, void *c)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+
+ platform_device_unregister(pdev);
+ return 0;
+}
+
+static int dwc3_ti_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct dwc3_data *data = platform_get_drvdata(pdev);
+ u32 reg;
+
+ device_for_each_child(dev, NULL, dwc3_ti_remove_core);
+
+ /* Clear mode valid bit */
+ reg = dwc3_ti_readl(data, USBSS_MODE_CONTROL);
+ reg &= ~USBSS_MODE_VALID;
+ dwc3_ti_writel(data, USBSS_MODE_CONTROL, reg);
+
+ pm_runtime_put_sync(dev);
+ clk_disable_unprepare(data->usb2_refclk);
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+
+ clk_put(data->usb2_refclk);
+ platform_set_drvdata(pdev, NULL);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int dwc3_ti_suspend_common(struct device *dev)
+{
+ struct dwc3_data *data = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(data->usb2_refclk);
+
+ return 0;
+}
+
+static int dwc3_ti_resume_common(struct device *dev)
+{
+ struct dwc3_data *data = dev_get_drvdata(dev);
+
+ clk_prepare_enable(data->usb2_refclk);
+
+ return 0;
+}
+
+static UNIVERSAL_DEV_PM_OPS(dwc3_ti_pm_ops, dwc3_ti_suspend_common,
+ dwc3_ti_resume_common, NULL);
+
+#define DEV_PM_OPS (&dwc3_ti_pm_ops)
+#else
+#define DEV_PM_OPS NULL
+#endif /* CONFIG_PM */
+
+static const struct of_device_id dwc3_ti_of_match[] = {
+ { .compatible = "ti,am62-usb"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, dwc3_ti_of_match);
+
+static struct platform_driver dwc3_ti_driver = {
+ .probe = dwc3_ti_probe,
+ .remove = dwc3_ti_remove,
+ .driver = {
+ .name = "dwc3-am62",
+ .pm = DEV_PM_OPS,
+ .of_match_table = dwc3_ti_of_match,
+ },
+};
+
+module_platform_driver(dwc3_ti_driver);
+
+MODULE_ALIAS("platform:dwc3-am62");
+MODULE_AUTHOR("Aswath Govindraju <a-govindraju@ti.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("DesignWare USB3 TI Glue Layer");
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 2e19e0e4ea53..ba51de7dd760 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -288,7 +288,7 @@ static void dwc3_pci_resume_work(struct work_struct *work)
int ret;
ret = pm_runtime_get_sync(&dwc3->dev);
- if (ret) {
+ if (ret < 0) {
pm_runtime_put_sync_autosuspend(&dwc3->dev);
return;
}
diff --git a/drivers/usb/dwc3/dwc3-xilinx.c b/drivers/usb/dwc3/dwc3-xilinx.c
index a6f3a9b38789..67b237c7a76a 100644
--- a/drivers/usb/dwc3/dwc3-xilinx.c
+++ b/drivers/usb/dwc3/dwc3-xilinx.c
@@ -13,6 +13,7 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
+#include <linux/of_gpio.h>
#include <linux/of_platform.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
@@ -98,6 +99,7 @@ static int dwc3_xlnx_init_zynqmp(struct dwc3_xlnx *priv_data)
{
struct device *dev = priv_data->dev;
struct reset_control *crst, *hibrst, *apbrst;
+ struct gpio_desc *reset_gpio;
struct phy *usb3_phy;
int ret = 0;
u32 reg;
@@ -201,6 +203,21 @@ static int dwc3_xlnx_init_zynqmp(struct dwc3_xlnx *priv_data)
}
skip_usb3_phy:
+ /* ulpi reset via gpio-modepin or gpio-framework driver */
+ reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(reset_gpio)) {
+ return dev_err_probe(dev, PTR_ERR(reset_gpio),
+ "Failed to request reset GPIO\n");
+ }
+
+ if (reset_gpio) {
+ /* Toggle ulpi to reset the phy. */
+ gpiod_set_value_cansleep(reset_gpio, 1);
+ usleep_range(5000, 10000);
+ gpiod_set_value_cansleep(reset_gpio, 0);
+ usleep_range(5000, 10000);
+ }
+
/*
* This routes the USB DMA traffic to go through FPD path instead
* of reaching DDR directly. This traffic routing is needed to
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 1064be5518f6..5d642660fd15 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -218,7 +218,7 @@ out:
return ret;
}
-static void dwc3_ep0_stall_and_restart(struct dwc3 *dwc)
+void dwc3_ep0_stall_and_restart(struct dwc3 *dwc)
{
struct dwc3_ep *dep;
@@ -813,7 +813,7 @@ static void dwc3_ep0_inspect_setup(struct dwc3 *dwc,
int ret = -EINVAL;
u32 len;
- if (!dwc->gadget_driver)
+ if (!dwc->gadget_driver || !dwc->connected)
goto out;
trace_dwc3_ctrl_req(ctrl);
@@ -1080,6 +1080,7 @@ void dwc3_ep0_send_delayed_status(struct dwc3 *dwc)
unsigned int direction = !dwc->ep0_expect_in;
dwc->delayed_status = false;
+ dwc->clear_stall_protocol = 0;
if (dwc->ep0state != EP0_STATUS_PHASE)
return;
@@ -1087,13 +1088,18 @@ void dwc3_ep0_send_delayed_status(struct dwc3 *dwc)
__dwc3_ep0_do_control_status(dwc, dwc->eps[direction]);
}
-static void dwc3_ep0_end_control_data(struct dwc3 *dwc, struct dwc3_ep *dep)
+void dwc3_ep0_end_control_data(struct dwc3 *dwc, struct dwc3_ep *dep)
{
struct dwc3_gadget_ep_cmd_params params;
u32 cmd;
int ret;
- if (!dep->resource_index)
+ /*
+ * For status/DATA OUT stage, TRB will be queued on ep0 out
+ * endpoint for which resource index is zero. Hence allow
+ * queuing ENDXFER command for ep0 out endpoint.
+ */
+ if (!dep->resource_index && dep->number)
return;
cmd = DWC3_DEPCMD_ENDTRANSFER;
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 0b9c2493844a..00427d108ab9 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -657,7 +657,6 @@ static int dwc3_gadget_set_ep_config(struct dwc3_ep *dep, unsigned int action)
/**
* dwc3_gadget_calc_tx_fifo_size - calculates the txfifo size value
* @dwc: pointer to the DWC3 context
- * @nfifos: number of fifos to calculate for
*
* Calculates the size value based on the equation below:
*
@@ -690,7 +689,7 @@ static int dwc3_gadget_calc_tx_fifo_size(struct dwc3 *dwc, int mult)
}
/**
- * dwc3_gadget_clear_tx_fifo_size - Clears txfifo allocation
+ * dwc3_gadget_clear_tx_fifos - Clears txfifo allocation
* @dwc: pointer to the DWC3 context
*
* Iterates through all the endpoint registers and clears the previous txfifo
@@ -783,7 +782,8 @@ static int dwc3_gadget_resize_tx_fifos(struct dwc3_ep *dep)
num_fifos = 3;
if (dep->endpoint.maxburst > 6 &&
- usb_endpoint_xfer_bulk(dep->endpoint.desc) && DWC3_IP_IS(DWC31))
+ (usb_endpoint_xfer_bulk(dep->endpoint.desc) ||
+ usb_endpoint_xfer_isoc(dep->endpoint.desc)) && DWC3_IP_IS(DWC31))
num_fifos = dwc->tx_fifo_resize_max_num;
/* FIFO size for a single buffer */
@@ -882,12 +882,13 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, unsigned int action)
reg |= DWC3_DALEPENA_EP(dep->number);
dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
+ dep->trb_dequeue = 0;
+ dep->trb_enqueue = 0;
+
if (usb_endpoint_xfer_control(desc))
goto out;
/* Initialize the TRB ring */
- dep->trb_dequeue = 0;
- dep->trb_enqueue = 0;
memset(dep->trb_pool, 0,
sizeof(struct dwc3_trb) * DWC3_TRB_NUM);
@@ -2001,10 +2002,10 @@ static void dwc3_gadget_ep_skip_trbs(struct dwc3_ep *dep, struct dwc3_request *r
static void dwc3_gadget_ep_cleanup_cancelled_requests(struct dwc3_ep *dep)
{
struct dwc3_request *req;
- struct dwc3_request *tmp;
struct dwc3 *dwc = dep->dwc;
- list_for_each_entry_safe(req, tmp, &dep->cancelled_list, list) {
+ while (!list_empty(&dep->cancelled_list)) {
+ req = next_request(&dep->cancelled_list);
dwc3_gadget_ep_skip_trbs(dep, req);
switch (req->status) {
case DWC3_REQUEST_STATUS_DISCONNECTED:
@@ -2021,6 +2022,12 @@ static void dwc3_gadget_ep_cleanup_cancelled_requests(struct dwc3_ep *dep)
dwc3_gadget_giveback(dep, req, -ECONNRESET);
break;
}
+ /*
+ * The endpoint is disabled, let the dwc3_remove_requests()
+ * handle the cleanup.
+ */
+ if (!dep->endpoint.desc)
+ break;
}
}
@@ -2056,16 +2063,6 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
if (r == req) {
struct dwc3_request *t;
- /*
- * If a Setup packet is received but yet to DMA out, the controller will
- * not process the End Transfer command of any endpoint. Polling of its
- * DEPCMD.CmdAct may block setting up TRB for Setup packet, causing a
- * timeout. Delay issuing the End Transfer command until the Setup TRB is
- * prepared.
- */
- if (dwc->ep0state != EP0_SETUP_PHASE && !dwc->delayed_status)
- dep->flags |= DWC3_EP_DELAY_STOP;
-
/* wait until it is processed */
dwc3_stop_active_transfer(dep, true, true);
@@ -2152,6 +2149,9 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
if (dep->flags & DWC3_EP_END_TRANSFER_PENDING ||
(dep->flags & DWC3_EP_DELAY_STOP)) {
dep->flags |= DWC3_EP_PENDING_CLEAR_STALL;
+ if (protocol)
+ dwc->clear_stall_protocol = dep->number;
+
return 0;
}
@@ -2498,28 +2498,64 @@ static void dwc3_gadget_disable_irq(struct dwc3 *dwc);
static void __dwc3_gadget_stop(struct dwc3 *dwc);
static int __dwc3_gadget_start(struct dwc3 *dwc);
-static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
+static int dwc3_gadget_soft_disconnect(struct dwc3 *dwc)
{
- struct dwc3 *dwc = gadget_to_dwc(g);
- unsigned long flags;
- int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ dwc->connected = false;
- is_on = !!is_on;
- dwc->softconnect = is_on;
/*
* Per databook, when we want to stop the gadget, if a control transfer
* is still in process, complete it and get the core into setup phase.
*/
- if (!is_on && dwc->ep0state != EP0_SETUP_PHASE) {
+ if (dwc->ep0state != EP0_SETUP_PHASE) {
+ int ret;
+
reinit_completion(&dwc->ep0_in_setup);
+ spin_unlock_irqrestore(&dwc->lock, flags);
ret = wait_for_completion_timeout(&dwc->ep0_in_setup,
msecs_to_jiffies(DWC3_PULL_UP_TIMEOUT));
+ spin_lock_irqsave(&dwc->lock, flags);
if (ret == 0)
dev_warn(dwc->dev, "timed out waiting for SETUP phase\n");
}
/*
+ * In the Synopsys DesignWare Cores USB3 Databook Rev. 3.30a
+ * Section 4.1.8 Table 4-7, it states that for a device-initiated
+ * disconnect, the SW needs to ensure that it sends "a DEPENDXFER
+ * command for any active transfers" before clearing the RunStop
+ * bit.
+ */
+ dwc3_stop_active_transfers(dwc);
+ __dwc3_gadget_stop(dwc);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ /*
+ * Note: if the GEVNTCOUNT indicates events in the event buffer, the
+ * driver needs to acknowledge them before the controller can halt.
+ * Simply let the interrupt handler acknowledges and handle the
+ * remaining event generated by the controller while polling for
+ * DSTS.DEVCTLHLT.
+ */
+ return dwc3_gadget_run_stop(dwc, false, false);
+}
+
+static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
+{
+ struct dwc3 *dwc = gadget_to_dwc(g);
+ int ret;
+
+ is_on = !!is_on;
+
+ if (dwc->pullups_connected == is_on)
+ return 0;
+
+ dwc->softconnect = is_on;
+
+ /*
* Avoid issuing a runtime resume if the device is already in the
* suspended state during gadget disconnect. DWC3 gadget was already
* halted/stopped during runtime suspend.
@@ -2541,42 +2577,8 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
return 0;
}
- /*
- * Synchronize and disable any further event handling while controller
- * is being enabled/disabled.
- */
- disable_irq(dwc->irq_gadget);
-
- spin_lock_irqsave(&dwc->lock, flags);
-
if (!is_on) {
- u32 count;
-
- dwc->connected = false;
- /*
- * In the Synopsis DesignWare Cores USB3 Databook Rev. 3.30a
- * Section 4.1.8 Table 4-7, it states that for a device-initiated
- * disconnect, the SW needs to ensure that it sends "a DEPENDXFER
- * command for any active transfers" before clearing the RunStop
- * bit.
- */
- dwc3_stop_active_transfers(dwc);
- __dwc3_gadget_stop(dwc);
-
- /*
- * In the Synopsis DesignWare Cores USB3 Databook Rev. 3.30a
- * Section 1.3.4, it mentions that for the DEVCTRLHLT bit, the
- * "software needs to acknowledge the events that are generated
- * (by writing to GEVNTCOUNTn) while it is waiting for this bit
- * to be set to '1'."
- */
- count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0));
- count &= DWC3_GEVNTCOUNT_MASK;
- if (count > 0) {
- dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), count);
- dwc->ev_buf->lpos = (dwc->ev_buf->lpos + count) %
- dwc->ev_buf->length;
- }
+ ret = dwc3_gadget_soft_disconnect(dwc);
} else {
/*
* In the Synopsys DWC_usb31 1.90a programming guide section
@@ -2584,18 +2586,13 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
* device-initiated disconnect requires a core soft reset
* (DCTL.CSftRst) before enabling the run/stop bit.
*/
- spin_unlock_irqrestore(&dwc->lock, flags);
dwc3_core_soft_reset(dwc);
- spin_lock_irqsave(&dwc->lock, flags);
dwc3_event_buffers_setup(dwc);
__dwc3_gadget_start(dwc);
+ ret = dwc3_gadget_run_stop(dwc, true, false);
}
- ret = dwc3_gadget_run_stop(dwc, is_on, false);
- spin_unlock_irqrestore(&dwc->lock, flags);
- enable_irq(dwc->irq_gadget);
-
pm_runtime_put(dwc->dev);
return ret;
@@ -2745,6 +2742,7 @@ static int __dwc3_gadget_start(struct dwc3 *dwc)
/* begin to receive SETUP packets */
dwc->ep0state = EP0_SETUP_PHASE;
+ dwc->ep0_bounced = false;
dwc->link_state = DWC3_LINK_STATE_SS_DIS;
dwc->delayed_status = false;
dwc3_ep0_out_start(dwc);
@@ -3333,15 +3331,21 @@ static void dwc3_gadget_ep_cleanup_completed_requests(struct dwc3_ep *dep,
const struct dwc3_event_depevt *event, int status)
{
struct dwc3_request *req;
- struct dwc3_request *tmp;
- list_for_each_entry_safe(req, tmp, &dep->started_list, list) {
+ while (!list_empty(&dep->started_list)) {
int ret;
+ req = next_request(&dep->started_list);
ret = dwc3_gadget_ep_cleanup_completed_request(dep, event,
req, status);
if (ret)
break;
+ /*
+ * The endpoint is disabled, let the dwc3_remove_requests()
+ * handle the cleanup.
+ */
+ if (!dep->endpoint.desc)
+ break;
}
}
@@ -3380,14 +3384,14 @@ static bool dwc3_gadget_endpoint_trbs_complete(struct dwc3_ep *dep,
struct dwc3 *dwc = dep->dwc;
bool no_started_trb = true;
- if (!dep->endpoint.desc)
- return no_started_trb;
-
dwc3_gadget_ep_cleanup_completed_requests(dep, event, status);
if (dep->flags & DWC3_EP_END_TRANSFER_PENDING)
goto out;
+ if (!dep->endpoint.desc)
+ return no_started_trb;
+
if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
list_empty(&dep->started_list) &&
(list_empty(&dep->pending_list) || status == -EXDEV))
@@ -3512,7 +3516,7 @@ static void dwc3_gadget_endpoint_command_complete(struct dwc3_ep *dep,
}
dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
- if (dwc->delayed_status)
+ if (dwc->clear_stall_protocol == dep->number)
dwc3_ep0_send_delayed_status(dwc);
}
@@ -3673,12 +3677,35 @@ static void dwc3_reset_gadget(struct dwc3 *dwc)
void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
bool interrupt)
{
+ struct dwc3 *dwc = dep->dwc;
+
+ /*
+ * Only issue End Transfer command to the control endpoint of a started
+ * Data Phase. Typically we should only do so in error cases such as
+ * invalid/unexpected direction as described in the control transfer
+ * flow of the programming guide.
+ */
+ if (dep->number <= 1 && dwc->ep0state != EP0_DATA_PHASE)
+ return;
+
if (!(dep->flags & DWC3_EP_TRANSFER_STARTED) ||
(dep->flags & DWC3_EP_DELAY_STOP) ||
(dep->flags & DWC3_EP_END_TRANSFER_PENDING))
return;
/*
+ * If a Setup packet is received but yet to DMA out, the controller will
+ * not process the End Transfer command of any endpoint. Polling of its
+ * DEPCMD.CmdAct may block setting up TRB for Setup packet, causing a
+ * timeout. Delay issuing the End Transfer command until the Setup TRB is
+ * prepared.
+ */
+ if (dwc->ep0state != EP0_SETUP_PHASE && !dwc->delayed_status) {
+ dep->flags |= DWC3_EP_DELAY_STOP;
+ return;
+ }
+
+ /*
* NOTICE: We are violating what the Databook says about the
* EndTransfer command. Ideally we would _always_ wait for the
* EndTransfer Command Completion IRQ, but that's causing too
@@ -3795,6 +3822,27 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
}
dwc3_reset_gadget(dwc);
+
+ /*
+ * From SNPS databook section 8.1.2, the EP0 should be in setup
+ * phase. So ensure that EP0 is in setup phase by issuing a stall
+ * and restart if EP0 is not in setup phase.
+ */
+ if (dwc->ep0state != EP0_SETUP_PHASE) {
+ unsigned int dir;
+
+ dir = !!dwc->ep0_expect_in;
+ if (dwc->ep0state == EP0_DATA_PHASE)
+ dwc3_ep0_end_control_data(dwc, dwc->eps[dir]);
+ else
+ dwc3_ep0_end_control_data(dwc, dwc->eps[!dir]);
+
+ dwc->eps[0]->trb_enqueue = 0;
+ dwc->eps[1]->trb_enqueue = 0;
+
+ dwc3_ep0_stall_and_restart(dwc);
+ }
+
/*
* In the Synopsis DesignWare Cores USB3 Databook Rev. 3.30a
* Section 4.1.2 Table 4-2, it states that during a USB reset, the SW
diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h
index f763380e672e..55a56cf67d73 100644
--- a/drivers/usb/dwc3/gadget.h
+++ b/drivers/usb/dwc3/gadget.h
@@ -110,6 +110,8 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
void dwc3_ep0_interrupt(struct dwc3 *dwc,
const struct dwc3_event_depevt *event);
void dwc3_ep0_out_start(struct dwc3 *dwc);
+void dwc3_ep0_end_control_data(struct dwc3 *dwc, struct dwc3_ep *dep);
+void dwc3_ep0_stall_and_restart(struct dwc3 *dwc);
int __dwc3_gadget_ep0_set_halt(struct usb_ep *ep, int value);
int dwc3_gadget_ep0_set_halt(struct usb_ep *ep, int value);
int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request,
diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c
index eda871973d6c..f56c30cf151e 100644
--- a/drivers/usb/dwc3/host.c
+++ b/drivers/usb/dwc3/host.c
@@ -7,7 +7,6 @@
* Authors: Felipe Balbi <balbi@ti.com>,
*/
-#include <linux/acpi.h>
#include <linux/irq.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -83,7 +82,6 @@ int dwc3_host_init(struct dwc3 *dwc)
}
xhci->dev.parent = dwc->dev;
- ACPI_COMPANION_SET(&xhci->dev, ACPI_COMPANION(dwc->dev));
dwc->xhci = xhci;
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 2eaeaae96759..403563c06477 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -2505,7 +2505,7 @@ int usb_composite_probe(struct usb_composite_driver *driver)
gadget_driver->driver.name = driver->name;
gadget_driver->max_speed = driver->max_speed;
- return usb_gadget_probe_driver(gadget_driver);
+ return usb_gadget_register_driver(gadget_driver);
}
EXPORT_SYMBOL_GPL(usb_composite_probe);
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 84b73cb03f87..3a6b4926193e 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -284,7 +284,7 @@ static ssize_t gadget_dev_desc_UDC_store(struct config_item *item,
goto err;
}
gi->composite.gadget_driver.udc_name = name;
- ret = usb_gadget_probe_driver(&gi->composite.gadget_driver);
+ ret = usb_gadget_register_driver(&gi->composite.gadget_driver);
if (ret) {
gi->composite.gadget_driver.udc_name = NULL;
goto err;
diff --git a/drivers/usb/gadget/function/f_acm.c b/drivers/usb/gadget/function/f_acm.c
index 349945e064bb..411eb489e0ff 100644
--- a/drivers/usb/gadget/function/f_acm.c
+++ b/drivers/usb/gadget/function/f_acm.c
@@ -333,6 +333,8 @@ static void acm_complete_set_line_coding(struct usb_ep *ep,
}
}
+static int acm_send_break(struct gserial *port, int duration);
+
static int acm_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
{
struct f_acm *acm = func_to_acm(f);
@@ -391,6 +393,14 @@ static int acm_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
acm->port_handshake_bits = w_value;
break;
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_REQ_SEND_BREAK:
+ if (w_index != acm->ctrl_id)
+ goto invalid;
+
+ acm_send_break(&acm->port, w_value);
+ break;
+
default:
invalid:
dev_vdbg(&cdev->gadget->dev,
diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c
index d37965867b23..d3feeeb50841 100644
--- a/drivers/usb/gadget/function/f_uvc.c
+++ b/drivers/usb/gadget/function/f_uvc.c
@@ -24,7 +24,6 @@
#include <media/v4l2-dev.h>
#include <media/v4l2-event.h>
-#include "u_uvc.h"
#include "uvc.h"
#include "uvc_configfs.h"
#include "uvc_v4l2.h"
@@ -44,7 +43,7 @@ MODULE_PARM_DESC(trace, "Trace level bitmask");
#define UVC_STRING_STREAMING_IDX 1
static struct usb_string uvc_en_us_strings[] = {
- [UVC_STRING_CONTROL_IDX].s = "UVC Camera",
+ /* [UVC_STRING_CONTROL_IDX].s = DYNAMIC, */
[UVC_STRING_STREAMING_IDX].s = "Video Streaming",
{ }
};
@@ -676,6 +675,7 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
uvc_hs_streaming_ep.bEndpointAddress = uvc->video.ep->address;
uvc_ss_streaming_ep.bEndpointAddress = uvc->video.ep->address;
+ uvc_en_us_strings[UVC_STRING_CONTROL_IDX].s = opts->function_name;
us = usb_gstrings_attach(cdev, uvc_function_strings,
ARRAY_SIZE(uvc_en_us_strings));
if (IS_ERR(us)) {
@@ -866,6 +866,7 @@ static struct usb_function_instance *uvc_alloc_inst(void)
opts->streaming_interval = 1;
opts->streaming_maxpacket = 1024;
+ snprintf(opts->function_name, sizeof(opts->function_name), "UVC Camera");
ret = uvcg_attach_configfs(opts);
if (ret < 0) {
diff --git a/drivers/usb/gadget/function/u_audio.c b/drivers/usb/gadget/function/u_audio.c
index 2bb569895a90..c1f62e91b012 100644
--- a/drivers/usb/gadget/function/u_audio.c
+++ b/drivers/usb/gadget/function/u_audio.c
@@ -1179,8 +1179,8 @@ int g_audio_setup(struct g_audio *g_audio, const char *pcm_name,
if (c_chmask) {
struct uac_rtd_params *prm = &uac->c_prm;
- spin_lock_init(&prm->lock);
- uac->c_prm.uac = uac;
+ spin_lock_init(&prm->lock);
+ uac->c_prm.uac = uac;
prm->max_psize = g_audio->out_ep_maxpsize;
prm->srate = params->c_srates[0];
diff --git a/drivers/usb/gadget/function/u_uvc.h b/drivers/usb/gadget/function/u_uvc.h
index 9a01a7d4f17f..24b8681b0d6f 100644
--- a/drivers/usb/gadget/function/u_uvc.h
+++ b/drivers/usb/gadget/function/u_uvc.h
@@ -27,6 +27,7 @@ struct f_uvc_opts {
unsigned int control_interface;
unsigned int streaming_interface;
+ char function_name[32];
/*
* Control descriptors array pointers for full-/high-speed and
diff --git a/drivers/usb/gadget/function/uvc.h b/drivers/usb/gadget/function/uvc.h
index 886103a1fe9b..58e383afdd44 100644
--- a/drivers/usb/gadget/function/uvc.h
+++ b/drivers/usb/gadget/function/uvc.h
@@ -80,6 +80,7 @@ struct uvc_request {
struct uvc_video *video;
struct sg_table sgt;
u8 header[UVCG_REQUEST_HEADER_LEN];
+ struct uvc_buffer *last_buf;
};
struct uvc_video {
diff --git a/drivers/usb/gadget/function/uvc_configfs.c b/drivers/usb/gadget/function/uvc_configfs.c
index 77d64031aa9c..e5a6b6e36b3d 100644
--- a/drivers/usb/gadget/function/uvc_configfs.c
+++ b/drivers/usb/gadget/function/uvc_configfs.c
@@ -10,17 +10,14 @@
* Author: Andrzej Pietrasiewicz <andrzejtp2010@gmail.com>
*/
-#include <linux/sort.h>
-
-#include "u_uvc.h"
#include "uvc_configfs.h"
+#include <linux/sort.h>
+
/* -----------------------------------------------------------------------------
* Global Utility Structures and Macros
*/
-#define UVCG_STREAMING_CONTROL_SIZE 1
-
#define UVC_ATTR(prefix, cname, aname) \
static struct configfs_attribute prefix##attr_##cname = { \
.ca_name = __stringify(aname), \
@@ -49,12 +46,6 @@ static int uvcg_config_compare_u32(const void *l, const void *r)
return li < ri ? -1 : li == ri ? 0 : 1;
}
-static inline struct f_uvc_opts *to_f_uvc_opts(struct config_item *item)
-{
- return container_of(to_config_group(item), struct f_uvc_opts,
- func_inst.group);
-}
-
struct uvcg_config_group_type {
struct config_item_type type;
const char *name;
@@ -125,19 +116,6 @@ static void uvcg_config_remove_children(struct config_group *group)
* control/header
*/
-DECLARE_UVC_HEADER_DESCRIPTOR(1);
-
-struct uvcg_control_header {
- struct config_item item;
- struct UVC_HEADER_DESCRIPTOR(1) desc;
- unsigned linked;
-};
-
-static struct uvcg_control_header *to_uvcg_control_header(struct config_item *item)
-{
- return container_of(item, struct uvcg_control_header, item);
-}
-
#define UVCG_CTRL_HDR_ATTR(cname, aname, bits, limit) \
static ssize_t uvcg_control_header_##cname##_show( \
struct config_item *item, char *page) \
@@ -769,24 +747,6 @@ static const char * const uvcg_format_names[] = {
"mjpeg",
};
-enum uvcg_format_type {
- UVCG_UNCOMPRESSED = 0,
- UVCG_MJPEG,
-};
-
-struct uvcg_format {
- struct config_group group;
- enum uvcg_format_type type;
- unsigned linked;
- unsigned num_frames;
- __u8 bmaControls[UVCG_STREAMING_CONTROL_SIZE];
-};
-
-static struct uvcg_format *to_uvcg_format(struct config_item *item)
-{
- return container_of(to_config_group(item), struct uvcg_format, group);
-}
-
static ssize_t uvcg_format_bma_controls_show(struct uvcg_format *f, char *page)
{
struct f_uvc_opts *opts;
@@ -845,29 +805,11 @@ end:
return ret;
}
-struct uvcg_format_ptr {
- struct uvcg_format *fmt;
- struct list_head entry;
-};
-
/* -----------------------------------------------------------------------------
* streaming/header/<NAME>
* streaming/header
*/
-struct uvcg_streaming_header {
- struct config_item item;
- struct uvc_input_header_descriptor desc;
- unsigned linked;
- struct list_head formats;
- unsigned num_fmt;
-};
-
-static struct uvcg_streaming_header *to_uvcg_streaming_header(struct config_item *item)
-{
- return container_of(item, struct uvcg_streaming_header, item);
-}
-
static void uvcg_format_set_indices(struct config_group *fmt);
static int uvcg_streaming_header_allow_link(struct config_item *src,
@@ -1059,31 +1001,6 @@ static const struct uvcg_config_group_type uvcg_streaming_header_grp_type = {
* streaming/<mode>/<format>/<NAME>
*/
-struct uvcg_frame {
- struct config_item item;
- enum uvcg_format_type fmt_type;
- struct {
- u8 b_length;
- u8 b_descriptor_type;
- u8 b_descriptor_subtype;
- u8 b_frame_index;
- u8 bm_capabilities;
- u16 w_width;
- u16 w_height;
- u32 dw_min_bit_rate;
- u32 dw_max_bit_rate;
- u32 dw_max_video_frame_buffer_size;
- u32 dw_default_frame_interval;
- u8 b_frame_interval_type;
- } __attribute__((packed)) frame;
- u32 *dw_frame_interval;
-};
-
-static struct uvcg_frame *to_uvcg_frame(struct config_item *item)
-{
- return container_of(item, struct uvcg_frame, item);
-}
-
#define UVCG_FRAME_ATTR(cname, aname, bits) \
static ssize_t uvcg_frame_##cname##_show(struct config_item *item, char *page)\
{ \
@@ -1345,6 +1262,7 @@ static struct config_item *uvcg_frame_make(struct config_group *group,
struct uvcg_format *fmt;
struct f_uvc_opts *opts;
struct config_item *opts_item;
+ struct uvcg_frame_ptr *frame_ptr;
h = kzalloc(sizeof(*h), GFP_KERNEL);
if (!h)
@@ -1375,6 +1293,16 @@ static struct config_item *uvcg_frame_make(struct config_group *group,
kfree(h);
return ERR_PTR(-EINVAL);
}
+
+ frame_ptr = kzalloc(sizeof(*frame_ptr), GFP_KERNEL);
+ if (!frame_ptr) {
+ mutex_unlock(&opts->lock);
+ kfree(h);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ frame_ptr->frm = h;
+ list_add_tail(&frame_ptr->entry, &fmt->frames);
++fmt->num_frames;
mutex_unlock(&opts->lock);
@@ -1388,13 +1316,23 @@ static void uvcg_frame_drop(struct config_group *group, struct config_item *item
struct uvcg_format *fmt;
struct f_uvc_opts *opts;
struct config_item *opts_item;
+ struct uvcg_frame *target_frm = NULL;
+ struct uvcg_frame_ptr *frame_ptr, *tmp;
opts_item = group->cg_item.ci_parent->ci_parent->ci_parent;
opts = to_f_uvc_opts(opts_item);
mutex_lock(&opts->lock);
+ target_frm = container_of(item, struct uvcg_frame, item);
fmt = to_uvcg_format(&group->cg_item);
- --fmt->num_frames;
+
+ list_for_each_entry_safe(frame_ptr, tmp, &fmt->frames, entry)
+ if (frame_ptr->frm == target_frm) {
+ list_del(&frame_ptr->entry);
+ kfree(frame_ptr);
+ --fmt->num_frames;
+ break;
+ }
mutex_unlock(&opts->lock);
config_item_put(item);
@@ -1420,18 +1358,6 @@ static void uvcg_format_set_indices(struct config_group *fmt)
* streaming/uncompressed/<NAME>
*/
-struct uvcg_uncompressed {
- struct uvcg_format fmt;
- struct uvc_format_uncompressed desc;
-};
-
-static struct uvcg_uncompressed *to_uvcg_uncompressed(struct config_item *item)
-{
- return container_of(
- container_of(to_config_group(item), struct uvcg_format, group),
- struct uvcg_uncompressed, fmt);
-}
-
static struct configfs_group_operations uvcg_uncompressed_group_ops = {
.make_item = uvcg_frame_make,
.drop_item = uvcg_frame_drop,
@@ -1565,6 +1491,12 @@ uvcg_uncompressed_##cname##_store(struct config_item *item, \
if (ret) \
goto end; \
\
+ /* index values in uvc are never 0 */ \
+ if (!num) { \
+ ret = -EINVAL; \
+ goto end; \
+ } \
+ \
u->desc.aname = num; \
ret = len; \
end: \
@@ -1645,6 +1577,7 @@ static struct config_group *uvcg_uncompressed_make(struct config_group *group,
h->desc.bmInterfaceFlags = 0;
h->desc.bCopyProtect = 0;
+ INIT_LIST_HEAD(&h->fmt.frames);
h->fmt.type = UVCG_UNCOMPRESSED;
config_group_init_type_name(&h->fmt.group, name,
&uvcg_uncompressed_type);
@@ -1669,18 +1602,6 @@ static const struct uvcg_config_group_type uvcg_uncompressed_grp_type = {
* streaming/mjpeg/<NAME>
*/
-struct uvcg_mjpeg {
- struct uvcg_format fmt;
- struct uvc_format_mjpeg desc;
-};
-
-static struct uvcg_mjpeg *to_uvcg_mjpeg(struct config_item *item)
-{
- return container_of(
- container_of(to_config_group(item), struct uvcg_format, group),
- struct uvcg_mjpeg, fmt);
-}
-
static struct configfs_group_operations uvcg_mjpeg_group_ops = {
.make_item = uvcg_frame_make,
.drop_item = uvcg_frame_drop,
@@ -1758,6 +1679,12 @@ uvcg_mjpeg_##cname##_store(struct config_item *item, \
if (ret) \
goto end; \
\
+ /* index values in uvc are never 0 */ \
+ if (!num) { \
+ ret = -EINVAL; \
+ goto end; \
+ } \
+ \
u->desc.aname = num; \
ret = len; \
end: \
@@ -1831,6 +1758,7 @@ static struct config_group *uvcg_mjpeg_make(struct config_group *group,
h->desc.bmInterfaceFlags = 0;
h->desc.bCopyProtect = 0;
+ INIT_LIST_HEAD(&h->fmt.frames);
h->fmt.type = UVCG_MJPEG;
config_group_init_type_name(&h->fmt.group, name,
&uvcg_mjpeg_type);
@@ -2425,10 +2353,51 @@ UVCG_OPTS_ATTR(streaming_maxburst, streaming_maxburst, 15);
#undef UVCG_OPTS_ATTR
+#define UVCG_OPTS_STRING_ATTR(cname, aname) \
+static ssize_t f_uvc_opts_string_##cname##_show(struct config_item *item,\
+ char *page) \
+{ \
+ struct f_uvc_opts *opts = to_f_uvc_opts(item); \
+ int result; \
+ \
+ mutex_lock(&opts->lock); \
+ result = snprintf(page, sizeof(opts->aname), "%s", opts->aname);\
+ mutex_unlock(&opts->lock); \
+ \
+ return result; \
+} \
+ \
+static ssize_t f_uvc_opts_string_##cname##_store(struct config_item *item,\
+ const char *page, size_t len) \
+{ \
+ struct f_uvc_opts *opts = to_f_uvc_opts(item); \
+ int ret = 0; \
+ \
+ mutex_lock(&opts->lock); \
+ if (opts->refcnt) { \
+ ret = -EBUSY; \
+ goto end; \
+ } \
+ \
+ ret = snprintf(opts->aname, min(sizeof(opts->aname), len), \
+ "%s", page); \
+ \
+end: \
+ mutex_unlock(&opts->lock); \
+ return ret; \
+} \
+ \
+UVC_ATTR(f_uvc_opts_string_, cname, aname)
+
+UVCG_OPTS_STRING_ATTR(function_name, function_name);
+
+#undef UVCG_OPTS_STRING_ATTR
+
static struct configfs_attribute *uvc_attrs[] = {
&f_uvc_opts_attr_streaming_interval,
&f_uvc_opts_attr_streaming_maxpacket,
&f_uvc_opts_attr_streaming_maxburst,
+ &f_uvc_opts_string_attr_function_name,
NULL,
};
diff --git a/drivers/usb/gadget/function/uvc_configfs.h b/drivers/usb/gadget/function/uvc_configfs.h
index 7e1d7ca29bf2..ad2ec8c4c78c 100644
--- a/drivers/usb/gadget/function/uvc_configfs.h
+++ b/drivers/usb/gadget/function/uvc_configfs.h
@@ -12,7 +12,125 @@
#ifndef UVC_CONFIGFS_H
#define UVC_CONFIGFS_H
-struct f_uvc_opts;
+#include <linux/configfs.h>
+
+#include "u_uvc.h"
+
+static inline struct f_uvc_opts *to_f_uvc_opts(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct f_uvc_opts,
+ func_inst.group);
+}
+
+#define UVCG_STREAMING_CONTROL_SIZE 1
+
+DECLARE_UVC_HEADER_DESCRIPTOR(1);
+
+struct uvcg_control_header {
+ struct config_item item;
+ struct UVC_HEADER_DESCRIPTOR(1) desc;
+ unsigned linked;
+};
+
+static inline struct uvcg_control_header *to_uvcg_control_header(struct config_item *item)
+{
+ return container_of(item, struct uvcg_control_header, item);
+}
+
+enum uvcg_format_type {
+ UVCG_UNCOMPRESSED = 0,
+ UVCG_MJPEG,
+};
+
+struct uvcg_format {
+ struct config_group group;
+ enum uvcg_format_type type;
+ unsigned linked;
+ struct list_head frames;
+ unsigned num_frames;
+ __u8 bmaControls[UVCG_STREAMING_CONTROL_SIZE];
+};
+
+struct uvcg_format_ptr {
+ struct uvcg_format *fmt;
+ struct list_head entry;
+};
+
+static inline struct uvcg_format *to_uvcg_format(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct uvcg_format, group);
+}
+
+struct uvcg_streaming_header {
+ struct config_item item;
+ struct uvc_input_header_descriptor desc;
+ unsigned linked;
+ struct list_head formats;
+ unsigned num_fmt;
+};
+
+static inline struct uvcg_streaming_header *to_uvcg_streaming_header(struct config_item *item)
+{
+ return container_of(item, struct uvcg_streaming_header, item);
+}
+
+struct uvcg_frame_ptr {
+ struct uvcg_frame *frm;
+ struct list_head entry;
+};
+
+struct uvcg_frame {
+ struct config_item item;
+ enum uvcg_format_type fmt_type;
+ struct {
+ u8 b_length;
+ u8 b_descriptor_type;
+ u8 b_descriptor_subtype;
+ u8 b_frame_index;
+ u8 bm_capabilities;
+ u16 w_width;
+ u16 w_height;
+ u32 dw_min_bit_rate;
+ u32 dw_max_bit_rate;
+ u32 dw_max_video_frame_buffer_size;
+ u32 dw_default_frame_interval;
+ u8 b_frame_interval_type;
+ } __attribute__((packed)) frame;
+ u32 *dw_frame_interval;
+};
+
+static inline struct uvcg_frame *to_uvcg_frame(struct config_item *item)
+{
+ return container_of(item, struct uvcg_frame, item);
+}
+
+/* -----------------------------------------------------------------------------
+ * streaming/uncompressed/<NAME>
+ */
+
+struct uvcg_uncompressed {
+ struct uvcg_format fmt;
+ struct uvc_format_uncompressed desc;
+};
+
+static inline struct uvcg_uncompressed *to_uvcg_uncompressed(struct config_item *item)
+{
+ return container_of(to_uvcg_format(item), struct uvcg_uncompressed, fmt);
+}
+
+/* -----------------------------------------------------------------------------
+ * streaming/mjpeg/<NAME>
+ */
+
+struct uvcg_mjpeg {
+ struct uvcg_format fmt;
+ struct uvc_format_mjpeg desc;
+};
+
+static inline struct uvcg_mjpeg *to_uvcg_mjpeg(struct config_item *item)
+{
+ return container_of(to_uvcg_format(item), struct uvcg_mjpeg, fmt);
+}
int uvcg_attach_configfs(struct f_uvc_opts *opts);
diff --git a/drivers/usb/gadget/function/uvc_queue.c b/drivers/usb/gadget/function/uvc_queue.c
index 2cda982f3765..d25edc3d2174 100644
--- a/drivers/usb/gadget/function/uvc_queue.c
+++ b/drivers/usb/gadget/function/uvc_queue.c
@@ -185,18 +185,7 @@ int uvcg_query_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf)
int uvcg_queue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf)
{
- unsigned long flags;
- int ret;
-
- ret = vb2_qbuf(&queue->queue, NULL, buf);
- if (ret < 0)
- return ret;
-
- spin_lock_irqsave(&queue->irqlock, flags);
- ret = (queue->flags & UVC_QUEUE_PAUSED) != 0;
- queue->flags &= ~UVC_QUEUE_PAUSED;
- spin_unlock_irqrestore(&queue->irqlock, flags);
- return ret;
+ return vb2_qbuf(&queue->queue, NULL, buf);
}
/*
@@ -328,33 +317,22 @@ int uvcg_queue_enable(struct uvc_video_queue *queue, int enable)
}
/* called with &queue_irqlock held.. */
-struct uvc_buffer *uvcg_queue_next_buffer(struct uvc_video_queue *queue,
+void uvcg_complete_buffer(struct uvc_video_queue *queue,
struct uvc_buffer *buf)
{
- struct uvc_buffer *nextbuf;
-
if ((queue->flags & UVC_QUEUE_DROP_INCOMPLETE) &&
buf->length != buf->bytesused) {
buf->state = UVC_BUF_STATE_QUEUED;
vb2_set_plane_payload(&buf->buf.vb2_buf, 0, 0);
- return buf;
+ return;
}
- list_del(&buf->queue);
- if (!list_empty(&queue->irqqueue))
- nextbuf = list_first_entry(&queue->irqqueue, struct uvc_buffer,
- queue);
- else
- nextbuf = NULL;
-
buf->buf.field = V4L2_FIELD_NONE;
buf->buf.sequence = queue->sequence++;
buf->buf.vb2_buf.timestamp = ktime_get_ns();
vb2_set_plane_payload(&buf->buf.vb2_buf, 0, buf->bytesused);
vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_DONE);
-
- return nextbuf;
}
struct uvc_buffer *uvcg_queue_head(struct uvc_video_queue *queue)
@@ -364,8 +342,6 @@ struct uvc_buffer *uvcg_queue_head(struct uvc_video_queue *queue)
if (!list_empty(&queue->irqqueue))
buf = list_first_entry(&queue->irqqueue, struct uvc_buffer,
queue);
- else
- queue->flags |= UVC_QUEUE_PAUSED;
return buf;
}
diff --git a/drivers/usb/gadget/function/uvc_queue.h b/drivers/usb/gadget/function/uvc_queue.h
index 05360a0767f6..41f87b917f6b 100644
--- a/drivers/usb/gadget/function/uvc_queue.h
+++ b/drivers/usb/gadget/function/uvc_queue.h
@@ -43,7 +43,6 @@ struct uvc_buffer {
#define UVC_QUEUE_DISCONNECTED (1 << 0)
#define UVC_QUEUE_DROP_INCOMPLETE (1 << 1)
-#define UVC_QUEUE_PAUSED (1 << 2)
struct uvc_video_queue {
struct vb2_queue queue;
@@ -93,7 +92,7 @@ void uvcg_queue_cancel(struct uvc_video_queue *queue, int disconnect);
int uvcg_queue_enable(struct uvc_video_queue *queue, int enable);
-struct uvc_buffer *uvcg_queue_next_buffer(struct uvc_video_queue *queue,
+void uvcg_complete_buffer(struct uvc_video_queue *queue,
struct uvc_buffer *buf);
struct uvc_buffer *uvcg_queue_head(struct uvc_video_queue *queue);
diff --git a/drivers/usb/gadget/function/uvc_video.c b/drivers/usb/gadget/function/uvc_video.c
index 7f59a0c47402..a9bb4553db84 100644
--- a/drivers/usb/gadget/function/uvc_video.c
+++ b/drivers/usb/gadget/function/uvc_video.c
@@ -112,7 +112,8 @@ uvc_video_encode_bulk(struct usb_request *req, struct uvc_video *video,
if (buf->bytesused == video->queue.buf_used) {
video->queue.buf_used = 0;
buf->state = UVC_BUF_STATE_DONE;
- uvcg_queue_next_buffer(&video->queue, buf);
+ list_del(&buf->queue);
+ uvcg_complete_buffer(&video->queue, buf);
video->fid ^= UVC_STREAM_FID;
video->payload_size = 0;
@@ -154,7 +155,7 @@ uvc_video_encode_isoc_sg(struct usb_request *req, struct uvc_video *video,
sg = sg_next(sg);
for_each_sg(sg, iter, ureq->sgt.nents - 1, i) {
- if (!len || !buf->sg)
+ if (!len || !buf->sg || !sg_dma_len(buf->sg))
break;
sg_left = sg_dma_len(buf->sg) - buf->offset;
@@ -183,8 +184,9 @@ uvc_video_encode_isoc_sg(struct usb_request *req, struct uvc_video *video,
video->queue.buf_used = 0;
buf->state = UVC_BUF_STATE_DONE;
buf->offset = 0;
- uvcg_queue_next_buffer(&video->queue, buf);
+ list_del(&buf->queue);
video->fid ^= UVC_STREAM_FID;
+ ureq->last_buf = buf;
}
}
@@ -210,7 +212,8 @@ uvc_video_encode_isoc(struct usb_request *req, struct uvc_video *video,
if (buf->bytesused == video->queue.buf_used) {
video->queue.buf_used = 0;
buf->state = UVC_BUF_STATE_DONE;
- uvcg_queue_next_buffer(&video->queue, buf);
+ list_del(&buf->queue);
+ uvcg_complete_buffer(&video->queue, buf);
video->fid ^= UVC_STREAM_FID;
}
}
@@ -264,6 +267,11 @@ uvc_video_complete(struct usb_ep *ep, struct usb_request *req)
uvcg_queue_cancel(queue, 0);
}
+ if (ureq->last_buf) {
+ uvcg_complete_buffer(&video->queue, ureq->last_buf);
+ ureq->last_buf = NULL;
+ }
+
spin_lock_irqsave(&video->req_lock, flags);
list_add_tail(&req->list, &video->req_free);
spin_unlock_irqrestore(&video->req_lock, flags);
@@ -332,6 +340,7 @@ uvc_video_alloc_requests(struct uvc_video *video)
video->ureq[i].req->complete = uvc_video_complete;
video->ureq[i].req->context = &video->ureq[i];
video->ureq[i].video = video;
+ video->ureq[i].last_buf = NULL;
list_add_tail(&video->ureq[i].req->list, &video->req_free);
/* req_size/PAGE_SIZE + 1 for overruns and + 1 for header */
diff --git a/drivers/usb/gadget/legacy/dbgp.c b/drivers/usb/gadget/legacy/dbgp.c
index 6bcbad382580..b62e45235e8e 100644
--- a/drivers/usb/gadget/legacy/dbgp.c
+++ b/drivers/usb/gadget/legacy/dbgp.c
@@ -422,7 +422,7 @@ static struct usb_gadget_driver dbgp_driver = {
static int __init dbgp_init(void)
{
- return usb_gadget_probe_driver(&dbgp_driver);
+ return usb_gadget_register_driver(&dbgp_driver);
}
static void __exit dbgp_exit(void)
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index 0c01e749f9ea..79990597c39f 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -1873,7 +1873,7 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
else
gadgetfs_driver.max_speed = USB_SPEED_FULL;
- value = usb_gadget_probe_driver(&gadgetfs_driver);
+ value = usb_gadget_register_driver(&gadgetfs_driver);
if (value != 0) {
spin_lock_irq(&dev->lock);
goto fail;
diff --git a/drivers/usb/gadget/legacy/raw_gadget.c b/drivers/usb/gadget/legacy/raw_gadget.c
index e9440f7bf019..241740024c50 100644
--- a/drivers/usb/gadget/legacy/raw_gadget.c
+++ b/drivers/usb/gadget/legacy/raw_gadget.c
@@ -512,12 +512,12 @@ static int raw_ioctl_run(struct raw_dev *dev, unsigned long value)
dev->state = STATE_DEV_REGISTERING;
spin_unlock_irqrestore(&dev->lock, flags);
- ret = usb_gadget_probe_driver(&dev->driver);
+ ret = usb_gadget_register_driver(&dev->driver);
spin_lock_irqsave(&dev->lock, flags);
if (ret) {
dev_err(dev->dev,
- "fail, usb_gadget_probe_driver returned %d\n", ret);
+ "fail, usb_gadget_register_driver returned %d\n", ret);
dev->state = STATE_DEV_FAILED;
goto out_unlock;
}
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index 85b194011a16..7886497253cc 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -12,6 +12,7 @@
#include <linux/module.h>
#include <linux/device.h>
#include <linux/list.h>
+#include <linux/idr.h>
#include <linux/err.h>
#include <linux/dma-mapping.h>
#include <linux/sched/task_stack.h>
@@ -23,6 +24,10 @@
#include "trace.h"
+static DEFINE_IDA(gadget_id_numbers);
+
+static struct bus_type gadget_bus_type;
+
/**
* struct usb_udc - describes one usb device controller
* @driver: the gadget driver pointer. For use by the class code
@@ -47,11 +52,9 @@ struct usb_udc {
static struct class *udc_class;
static LIST_HEAD(udc_list);
-static LIST_HEAD(gadget_driver_pending_list);
-static DEFINE_MUTEX(udc_lock);
-static int udc_bind_to_driver(struct usb_udc *udc,
- struct usb_gadget_driver *driver);
+/* Protects udc_list, udc->driver, driver->is_bound, and related calls */
+static DEFINE_MUTEX(udc_lock);
/* ------------------------------------------------------------------------- */
@@ -1238,38 +1241,16 @@ static void usb_udc_nop_release(struct device *dev)
dev_vdbg(dev, "%s\n", __func__);
}
-/* should be called with udc_lock held */
-static int check_pending_gadget_drivers(struct usb_udc *udc)
-{
- struct usb_gadget_driver *driver;
- int ret = 0;
-
- list_for_each_entry(driver, &gadget_driver_pending_list, pending)
- if (!driver->udc_name || strcmp(driver->udc_name,
- dev_name(&udc->dev)) == 0) {
- ret = udc_bind_to_driver(udc, driver);
- if (ret != -EPROBE_DEFER)
- list_del_init(&driver->pending);
- break;
- }
-
- return ret;
-}
-
/**
* usb_initialize_gadget - initialize a gadget and its embedded struct device
* @parent: the parent device to this udc. Usually the controller driver's
* device.
* @gadget: the gadget to be initialized.
* @release: a gadget release function.
- *
- * Returns zero on success, negative errno otherwise.
- * Calls the gadget release function in the latter case.
*/
void usb_initialize_gadget(struct device *parent, struct usb_gadget *gadget,
void (*release)(struct device *dev))
{
- dev_set_name(&gadget->dev, "gadget");
INIT_WORK(&gadget->work, usb_gadget_state_work);
gadget->dev.parent = parent;
@@ -1279,6 +1260,7 @@ void usb_initialize_gadget(struct device *parent, struct usb_gadget *gadget,
gadget->dev.release = usb_udc_nop_release;
device_initialize(&gadget->dev);
+ gadget->dev.bus = &gadget_bus_type;
}
EXPORT_SYMBOL_GPL(usb_initialize_gadget);
@@ -1308,10 +1290,6 @@ int usb_add_gadget(struct usb_gadget *gadget)
if (ret)
goto err_put_udc;
- ret = device_add(&gadget->dev);
- if (ret)
- goto err_put_udc;
-
udc->gadget = gadget;
gadget->udc = udc;
@@ -1319,6 +1297,7 @@ int usb_add_gadget(struct usb_gadget *gadget)
mutex_lock(&udc_lock);
list_add_tail(&udc->list, &udc_list);
+ mutex_unlock(&udc_lock);
ret = device_add(&udc->dev);
if (ret)
@@ -1327,25 +1306,30 @@ int usb_add_gadget(struct usb_gadget *gadget)
usb_gadget_set_state(gadget, USB_STATE_NOTATTACHED);
udc->vbus = true;
- /* pick up one of pending gadget drivers */
- ret = check_pending_gadget_drivers(udc);
- if (ret)
+ ret = ida_alloc(&gadget_id_numbers, GFP_KERNEL);
+ if (ret < 0)
goto err_del_udc;
+ gadget->id_number = ret;
+ dev_set_name(&gadget->dev, "gadget.%d", ret);
- mutex_unlock(&udc_lock);
+ ret = device_add(&gadget->dev);
+ if (ret)
+ goto err_free_id;
return 0;
+ err_free_id:
+ ida_free(&gadget_id_numbers, gadget->id_number);
+
err_del_udc:
flush_work(&gadget->work);
device_del(&udc->dev);
err_unlist_udc:
+ mutex_lock(&udc_lock);
list_del(&udc->list);
mutex_unlock(&udc_lock);
- device_del(&gadget->dev);
-
err_put_udc:
put_device(&udc->dev);
@@ -1421,30 +1405,11 @@ int usb_add_gadget_udc(struct device *parent, struct usb_gadget *gadget)
}
EXPORT_SYMBOL_GPL(usb_add_gadget_udc);
-static void usb_gadget_remove_driver(struct usb_udc *udc)
-{
- dev_dbg(&udc->dev, "unregistering UDC driver [%s]\n",
- udc->driver->function);
-
- kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
-
- usb_gadget_disconnect(udc->gadget);
- usb_gadget_disable_async_callbacks(udc);
- if (udc->gadget->irq)
- synchronize_irq(udc->gadget->irq);
- udc->driver->unbind(udc->gadget);
- usb_gadget_udc_stop(udc);
-
- udc->driver = NULL;
- udc->gadget->dev.driver = NULL;
-}
-
/**
- * usb_del_gadget - deletes @udc from udc_list
- * @gadget: the gadget to be removed.
+ * usb_del_gadget - deletes a gadget and unregisters its udc
+ * @gadget: the gadget to be deleted.
*
- * This will call usb_gadget_unregister_driver() if
- * the @udc is still busy.
+ * This will unbind @gadget, if it is bound.
* It will not do a final usb_put_gadget().
*/
void usb_del_gadget(struct usb_gadget *gadget)
@@ -1458,25 +1423,19 @@ void usb_del_gadget(struct usb_gadget *gadget)
mutex_lock(&udc_lock);
list_del(&udc->list);
-
- if (udc->driver) {
- struct usb_gadget_driver *driver = udc->driver;
-
- usb_gadget_remove_driver(udc);
- list_add(&driver->pending, &gadget_driver_pending_list);
- }
mutex_unlock(&udc_lock);
kobject_uevent(&udc->dev.kobj, KOBJ_REMOVE);
flush_work(&gadget->work);
- device_unregister(&udc->dev);
device_del(&gadget->dev);
+ ida_free(&gadget_id_numbers, gadget->id_number);
+ device_unregister(&udc->dev);
}
EXPORT_SYMBOL_GPL(usb_del_gadget);
/**
- * usb_del_gadget_udc - deletes @udc from udc_list
- * @gadget: the gadget to be removed.
+ * usb_del_gadget_udc - unregisters a gadget
+ * @gadget: the gadget to be unregistered.
*
* Calls usb_del_gadget() and does a final usb_put_gadget().
*/
@@ -1489,123 +1448,147 @@ EXPORT_SYMBOL_GPL(usb_del_gadget_udc);
/* ------------------------------------------------------------------------- */
-static int udc_bind_to_driver(struct usb_udc *udc, struct usb_gadget_driver *driver)
+static int gadget_match_driver(struct device *dev, struct device_driver *drv)
{
- int ret;
+ struct usb_gadget *gadget = dev_to_usb_gadget(dev);
+ struct usb_udc *udc = gadget->udc;
+ struct usb_gadget_driver *driver = container_of(drv,
+ struct usb_gadget_driver, driver);
- dev_dbg(&udc->dev, "registering UDC driver [%s]\n",
- driver->function);
+ /* If the driver specifies a udc_name, it must match the UDC's name */
+ if (driver->udc_name &&
+ strcmp(driver->udc_name, dev_name(&udc->dev)) != 0)
+ return 0;
+
+ /* If the driver is already bound to a gadget, it doesn't match */
+ if (driver->is_bound)
+ return 0;
+
+ /* Otherwise any gadget driver matches any UDC */
+ return 1;
+}
+static int gadget_bind_driver(struct device *dev)
+{
+ struct usb_gadget *gadget = dev_to_usb_gadget(dev);
+ struct usb_udc *udc = gadget->udc;
+ struct usb_gadget_driver *driver = container_of(dev->driver,
+ struct usb_gadget_driver, driver);
+ int ret = 0;
+
+ mutex_lock(&udc_lock);
+ if (driver->is_bound) {
+ mutex_unlock(&udc_lock);
+ return -ENXIO; /* Driver binds to only one gadget */
+ }
+ driver->is_bound = true;
udc->driver = driver;
- udc->gadget->dev.driver = &driver->driver;
+ mutex_unlock(&udc_lock);
+
+ dev_dbg(&udc->dev, "binding gadget driver [%s]\n", driver->function);
usb_gadget_udc_set_speed(udc, driver->max_speed);
+ mutex_lock(&udc_lock);
ret = driver->bind(udc->gadget, driver);
if (ret)
- goto err1;
+ goto err_bind;
+
ret = usb_gadget_udc_start(udc);
- if (ret) {
- driver->unbind(udc->gadget);
- goto err1;
- }
+ if (ret)
+ goto err_start;
usb_gadget_enable_async_callbacks(udc);
usb_udc_connect_control(udc);
+ mutex_unlock(&udc_lock);
kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
return 0;
-err1:
+
+ err_start:
+ driver->unbind(udc->gadget);
+
+ err_bind:
if (ret != -EISNAM)
dev_err(&udc->dev, "failed to start %s: %d\n",
- udc->driver->function, ret);
+ driver->function, ret);
+
udc->driver = NULL;
- udc->gadget->dev.driver = NULL;
+ driver->is_bound = false;
+ mutex_unlock(&udc_lock);
+
return ret;
}
-int usb_gadget_probe_driver(struct usb_gadget_driver *driver)
+static void gadget_unbind_driver(struct device *dev)
{
- struct usb_udc *udc = NULL, *iter;
- int ret = -ENODEV;
+ struct usb_gadget *gadget = dev_to_usb_gadget(dev);
+ struct usb_udc *udc = gadget->udc;
+ struct usb_gadget_driver *driver = udc->driver;
+
+ dev_dbg(&udc->dev, "unbinding gadget driver [%s]\n", driver->function);
+
+ kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
+
+ mutex_lock(&udc_lock);
+ usb_gadget_disconnect(gadget);
+ usb_gadget_disable_async_callbacks(udc);
+ if (gadget->irq)
+ synchronize_irq(gadget->irq);
+ udc->driver->unbind(gadget);
+ usb_gadget_udc_stop(udc);
+
+ driver->is_bound = false;
+ udc->driver = NULL;
+ mutex_unlock(&udc_lock);
+}
+
+/* ------------------------------------------------------------------------- */
+
+int usb_gadget_register_driver_owner(struct usb_gadget_driver *driver,
+ struct module *owner, const char *mod_name)
+{
+ int ret;
if (!driver || !driver->bind || !driver->setup)
return -EINVAL;
+ driver->driver.bus = &gadget_bus_type;
+ driver->driver.owner = owner;
+ driver->driver.mod_name = mod_name;
+ ret = driver_register(&driver->driver);
+ if (ret) {
+ pr_warn("%s: driver registration failed: %d\n",
+ driver->function, ret);
+ return ret;
+ }
+
mutex_lock(&udc_lock);
- if (driver->udc_name) {
- list_for_each_entry(iter, &udc_list, list) {
- ret = strcmp(driver->udc_name, dev_name(&iter->dev));
- if (ret)
- continue;
- udc = iter;
- break;
- }
- if (ret)
- ret = -ENODEV;
- else if (udc->driver)
+ if (!driver->is_bound) {
+ if (driver->match_existing_only) {
+ pr_warn("%s: couldn't find an available UDC or it's busy\n",
+ driver->function);
ret = -EBUSY;
- else
- goto found;
- } else {
- list_for_each_entry(iter, &udc_list, list) {
- /* For now we take the first one */
- if (iter->driver)
- continue;
- udc = iter;
- goto found;
+ } else {
+ pr_info("%s: couldn't find an available UDC\n",
+ driver->function);
+ ret = 0;
}
}
-
- if (!driver->match_existing_only) {
- list_add_tail(&driver->pending, &gadget_driver_pending_list);
- pr_info("couldn't find an available UDC - added [%s] to list of pending drivers\n",
- driver->function);
- ret = 0;
- }
-
mutex_unlock(&udc_lock);
+
if (ret)
- pr_warn("couldn't find an available UDC or it's busy: %d\n", ret);
- return ret;
-found:
- ret = udc_bind_to_driver(udc, driver);
- mutex_unlock(&udc_lock);
+ driver_unregister(&driver->driver);
return ret;
}
-EXPORT_SYMBOL_GPL(usb_gadget_probe_driver);
+EXPORT_SYMBOL_GPL(usb_gadget_register_driver_owner);
int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
{
- struct usb_udc *udc = NULL;
- int ret = -ENODEV;
-
if (!driver || !driver->unbind)
return -EINVAL;
- mutex_lock(&udc_lock);
- list_for_each_entry(udc, &udc_list, list) {
- if (udc->driver == driver) {
- usb_gadget_remove_driver(udc);
- usb_gadget_set_state(udc->gadget,
- USB_STATE_NOTATTACHED);
-
- /* Maybe there is someone waiting for this UDC? */
- check_pending_gadget_drivers(udc);
- /*
- * For now we ignore bind errors as probably it's
- * not a valid reason to fail other's gadget unbind
- */
- ret = 0;
- break;
- }
- }
-
- if (ret) {
- list_del(&driver->pending);
- ret = 0;
- }
- mutex_unlock(&udc_lock);
- return ret;
+ driver_unregister(&driver->driver);
+ return 0;
}
EXPORT_SYMBOL_GPL(usb_gadget_unregister_driver);
@@ -1757,8 +1740,17 @@ static int usb_udc_uevent(struct device *dev, struct kobj_uevent_env *env)
return 0;
}
+static struct bus_type gadget_bus_type = {
+ .name = "gadget",
+ .probe = gadget_bind_driver,
+ .remove = gadget_unbind_driver,
+ .match = gadget_match_driver,
+};
+
static int __init usb_udc_init(void)
{
+ int rc;
+
udc_class = class_create(THIS_MODULE, "udc");
if (IS_ERR(udc_class)) {
pr_err("failed to create udc class --> %ld\n",
@@ -1767,12 +1759,17 @@ static int __init usb_udc_init(void)
}
udc_class->dev_uevent = usb_udc_uevent;
- return 0;
+
+ rc = bus_register(&gadget_bus_type);
+ if (rc)
+ class_destroy(udc_class);
+ return rc;
}
subsys_initcall(usb_udc_init);
static void __exit usb_udc_exit(void)
{
+ bus_unregister(&gadget_bus_type);
class_destroy(udc_class);
}
module_exit(usb_udc_exit);
diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c
index 6a8884632273..c97cd4bc817c 100644
--- a/drivers/usb/gadget/udc/net2272.c
+++ b/drivers/usb/gadget/udc/net2272.c
@@ -71,7 +71,7 @@ static ushort dma_ep = 1;
module_param(dma_ep, ushort, 0644);
/*
- * dma_mode: net2272 dma mode setting (see LOCCTL1 definiton):
+ * dma_mode: net2272 dma mode setting (see LOCCTL1 definition):
* mode 0 == Slow DREQ mode
* mode 1 == Fast DREQ mode
* mode 2 == Burst mode
@@ -97,7 +97,7 @@ module_param(fifo_mode, ushort, 0644);
/*
* enable_suspend: When enabled, the driver will respond to
* USB suspend requests by powering down the NET2272. Otherwise,
- * USB suspend requests will be ignored. This is acceptible for
+ * USB suspend requests will be ignored. This is acceptable for
* self-powered devices. For bus powered devices set this to 1.
*/
static ushort enable_suspend = 0;
@@ -288,7 +288,7 @@ static void net2272_ep_reset(struct net2272_ep *ep)
| (1 << LOCAL_OUT_ZLP)
| (1 << BUFFER_FLUSH));
- /* fifo size is handled seperately */
+ /* fifo size is handled separately */
}
static int net2272_disable(struct usb_ep *_ep)
diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c
index 051d024b369e..d6a68631354a 100644
--- a/drivers/usb/gadget/udc/net2280.c
+++ b/drivers/usb/gadget/udc/net2280.c
@@ -932,19 +932,11 @@ static void start_dma(struct net2280_ep *ep, struct net2280_request *req)
static inline void
queue_dma(struct net2280_ep *ep, struct net2280_request *req, int valid)
{
- struct net2280_dma *end;
- dma_addr_t tmp;
-
/* swap new dummy for old, link; fill and maybe activate */
- end = ep->dummy;
- ep->dummy = req->td;
- req->td = end;
-
- tmp = ep->td_dma;
- ep->td_dma = req->td_dma;
- req->td_dma = tmp;
+ swap(ep->dummy, req->td);
+ swap(ep->td_dma, req->td_dma);
- end->dmadesc = cpu_to_le32 (ep->td_dma);
+ req->td->dmadesc = cpu_to_le32 (ep->td_dma);
fill_dma_desc(ep, req, valid);
}
diff --git a/drivers/usb/gadget/udc/omap_udc.c b/drivers/usb/gadget/udc/omap_udc.c
index 5096d24915ce..61cabb9de6ae 100644
--- a/drivers/usb/gadget/udc/omap_udc.c
+++ b/drivers/usb/gadget/udc/omap_udc.c
@@ -1470,7 +1470,7 @@ static void ep0_irq(struct omap_udc *udc, u16 irq_src)
if (!udc->ep0_in) {
stat = 0;
/* read next OUT packet of request, maybe
- * reactiviting the fifo; stall on errors.
+ * reactivating the fifo; stall on errors.
*/
stat = read_fifo(ep0, req);
if (!req || stat < 0) {
@@ -2609,6 +2609,8 @@ static void omap_udc_release(struct device *dev)
if (udc->dc_clk) {
if (udc->clk_requested)
omap_udc_enable_clock(0);
+ clk_unprepare(udc->hhc_clk);
+ clk_unprepare(udc->dc_clk);
clk_put(udc->hhc_clk);
clk_put(udc->dc_clk);
}
@@ -2773,8 +2775,8 @@ static int omap_udc_probe(struct platform_device *pdev)
hhc_clk = clk_get(&pdev->dev, "usb_hhc_ck");
BUG_ON(IS_ERR(dc_clk) || IS_ERR(hhc_clk));
/* can't use omap_udc_enable_clock yet */
- clk_enable(dc_clk);
- clk_enable(hhc_clk);
+ clk_prepare_enable(dc_clk);
+ clk_prepare_enable(hhc_clk);
udelay(100);
}
@@ -2783,8 +2785,8 @@ static int omap_udc_probe(struct platform_device *pdev)
hhc_clk = clk_get(&pdev->dev, "l3_ocpi_ck");
BUG_ON(IS_ERR(dc_clk) || IS_ERR(hhc_clk));
/* can't use omap_udc_enable_clock yet */
- clk_enable(dc_clk);
- clk_enable(hhc_clk);
+ clk_prepare_enable(dc_clk);
+ clk_prepare_enable(hhc_clk);
udelay(100);
}
@@ -2932,8 +2934,8 @@ cleanup0:
usb_put_phy(xceiv);
if (cpu_is_omap16xx() || cpu_is_omap7xx()) {
- clk_disable(hhc_clk);
- clk_disable(dc_clk);
+ clk_disable_unprepare(hhc_clk);
+ clk_disable_unprepare(dc_clk);
clk_put(hhc_clk);
clk_put(dc_clk);
}
diff --git a/drivers/usb/gadget/udc/pxa25x_udc.c b/drivers/usb/gadget/udc/pxa25x_udc.c
index 6c414c99d01c..c593fc383481 100644
--- a/drivers/usb/gadget/udc/pxa25x_udc.c
+++ b/drivers/usb/gadget/udc/pxa25x_udc.c
@@ -44,10 +44,6 @@
#include <linux/usb/gadget.h>
#include <linux/usb/otg.h>
-#ifdef CONFIG_ARCH_LUBBOCK
-#include <mach/lubbock.h>
-#endif
-
#define UDCCR 0x0000 /* UDC Control Register */
#define UDC_RES1 0x0004 /* UDC Undocumented - Reserved1 */
#define UDC_RES2 0x0008 /* UDC Undocumented - Reserved2 */
@@ -1578,18 +1574,15 @@ lubbock_vbus_irq(int irq, void *_dev)
int vbus;
dev->stats.irqs++;
- switch (irq) {
- case LUBBOCK_USB_IRQ:
+ if (irq == dev->usb_irq) {
vbus = 1;
- disable_irq(LUBBOCK_USB_IRQ);
- enable_irq(LUBBOCK_USB_DISC_IRQ);
- break;
- case LUBBOCK_USB_DISC_IRQ:
+ disable_irq(dev->usb_irq);
+ enable_irq(dev->usb_disc_irq);
+ } else if (irq == dev->usb_disc_irq) {
vbus = 0;
- disable_irq(LUBBOCK_USB_DISC_IRQ);
- enable_irq(LUBBOCK_USB_IRQ);
- break;
- default:
+ disable_irq(dev->usb_disc_irq);
+ enable_irq(dev->usb_irq);
+ } else {
return IRQ_NONE;
}
@@ -2422,20 +2415,28 @@ static int pxa25x_udc_probe(struct platform_device *pdev)
#ifdef CONFIG_ARCH_LUBBOCK
if (machine_is_lubbock()) {
- retval = devm_request_irq(&pdev->dev, LUBBOCK_USB_DISC_IRQ,
+ dev->usb_irq = platform_get_irq(pdev, 1);
+ if (dev->usb_irq < 0)
+ return dev->usb_irq;
+
+ dev->usb_disc_irq = platform_get_irq(pdev, 2);
+ if (dev->usb_disc_irq < 0)
+ return dev->usb_disc_irq;
+
+ retval = devm_request_irq(&pdev->dev, dev->usb_disc_irq,
lubbock_vbus_irq, 0, driver_name,
dev);
if (retval != 0) {
pr_err("%s: can't get irq %i, err %d\n",
- driver_name, LUBBOCK_USB_DISC_IRQ, retval);
+ driver_name, dev->usb_disc_irq, retval);
goto err;
}
- retval = devm_request_irq(&pdev->dev, LUBBOCK_USB_IRQ,
+ retval = devm_request_irq(&pdev->dev, dev->usb_irq,
lubbock_vbus_irq, 0, driver_name,
dev);
if (retval != 0) {
pr_err("%s: can't get irq %i, err %d\n",
- driver_name, LUBBOCK_USB_IRQ, retval);
+ driver_name, dev->usb_irq, retval);
goto err;
}
} else
diff --git a/drivers/usb/gadget/udc/pxa25x_udc.h b/drivers/usb/gadget/udc/pxa25x_udc.h
index aa4b68fd9fc0..6ab6047edc83 100644
--- a/drivers/usb/gadget/udc/pxa25x_udc.h
+++ b/drivers/usb/gadget/udc/pxa25x_udc.h
@@ -117,16 +117,13 @@ struct pxa25x_udc {
u64 dma_mask;
struct pxa25x_ep ep [PXA_UDC_NUM_ENDPOINTS];
void __iomem *regs;
+ int usb_irq;
+ int usb_disc_irq;
};
#define to_pxa25x(g) (container_of((g), struct pxa25x_udc, gadget))
/*-------------------------------------------------------------------------*/
-#ifdef CONFIG_ARCH_LUBBOCK
-#include <mach/lubbock.h>
-/* lubbock can also report usb connect/disconnect irqs */
-#endif
-
static struct pxa25x_udc *the_controller;
/*-------------------------------------------------------------------------*/
diff --git a/drivers/usb/gadget/udc/pxa27x_udc.h b/drivers/usb/gadget/udc/pxa27x_udc.h
index 0a6bc18a1264..31bf79ce931c 100644
--- a/drivers/usb/gadget/udc/pxa27x_udc.h
+++ b/drivers/usb/gadget/udc/pxa27x_udc.h
@@ -326,7 +326,7 @@ struct udc_usb_ep {
* @addr: usb endpoint number
* @config: configuration in which this endpoint is active
* @interface: interface in which this endpoint is active
- * @alternate: altsetting in which this endpoitn is active
+ * @alternate: altsetting in which this endpoint is active
* @fifo_size: max packet size in the endpoint fifo
* @type: endpoint type (bulk, iso, int, ...)
* @udccsr_value: save register of UDCCSR0 for suspend/resume
diff --git a/drivers/usb/gadget/udc/s3c-hsudc.c b/drivers/usb/gadget/udc/s3c-hsudc.c
index bf803e013458..4b7eb7701470 100644
--- a/drivers/usb/gadget/udc/s3c-hsudc.c
+++ b/drivers/usb/gadget/udc/s3c-hsudc.c
@@ -126,7 +126,7 @@ struct s3c_hsudc_req {
/**
* struct s3c_hsudc - Driver's abstraction of the device controller.
* @gadget: Instance of usb_gadget which is referenced by gadget driver.
- * @driver: Reference to currenty active gadget driver.
+ * @driver: Reference to currently active gadget driver.
* @dev: The device reference used by probe function.
* @lock: Lock to synchronize the usage of Endpoints (EP's are indexed).
* @regs: Remapped base address of controller's register space.
@@ -633,7 +633,7 @@ static void s3c_hsudc_process_setup(struct s3c_hsudc *hsudc)
}
/** s3c_hsudc_handle_ep0_intr - Handle endpoint 0 interrupt.
- * @hsudc: Device controller on which endpoint 0 interrupt has occured.
+ * @hsudc: Device controller on which endpoint 0 interrupt has occurred.
*
* Handle endpoint 0 interrupt when it occurs. EP0 interrupt could occur
* when a stall handshake is sent to host or data is sent/received on
diff --git a/drivers/usb/gadget/udc/tegra-xudc.c b/drivers/usb/gadget/udc/tegra-xudc.c
index d9c406bdb680..6d31ccf6aee5 100644
--- a/drivers/usb/gadget/udc/tegra-xudc.c
+++ b/drivers/usb/gadget/udc/tegra-xudc.c
@@ -1434,7 +1434,7 @@ __tegra_xudc_ep_dequeue(struct tegra_xudc_ep *ep,
return 0;
}
- /* Halt DMA for this endpiont. */
+ /* Halt DMA for this endpoint. */
if (ep_ctx_read_state(ep->context) == EP_STATE_RUNNING) {
ep_pause(xudc, ep->index);
ep_wait_for_inactive(xudc, ep->index);
@@ -3423,7 +3423,7 @@ static void tegra_xudc_device_params_init(struct tegra_xudc *xudc)
}
/*
- * Compliacne suite appears to be violating polling LFPS tBurst max
+ * Compliance suite appears to be violating polling LFPS tBurst max
* of 1.4us. Send 1.45us instead.
*/
val = xudc_readl(xudc, SSPX_CORE_CNT32);
diff --git a/drivers/usb/gadget/udc/udc-xilinx.c b/drivers/usb/gadget/udc/udc-xilinx.c
index 428c755cf2e1..4827e3cd3834 100644
--- a/drivers/usb/gadget/udc/udc-xilinx.c
+++ b/drivers/usb/gadget/udc/udc-xilinx.c
@@ -632,7 +632,7 @@ top:
dev_dbg(udc->dev, "read %s, %d bytes%s req %p %d/%d\n",
ep->ep_usb.name, count, is_short ? "/S" : "", req,
req->usb_req.actual, req->usb_req.length);
- bufferspace -= count;
+
/* Completion */
if ((req->usb_req.actual == req->usb_req.length) || is_short) {
if (udc->dma_enabled && req->usb_req.length)
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
index 7f4a03e8647a..8c45bc17a580 100644
--- a/drivers/usb/host/ehci-omap.c
+++ b/drivers/usb/host/ehci-omap.c
@@ -61,11 +61,6 @@ static inline void ehci_write(void __iomem *base, u32 reg, u32 val)
__raw_writel(val, base + reg);
}
-static inline u32 ehci_read(void __iomem *base, u32 reg)
-{
- return __raw_readl(base + reg);
-}
-
/* configure so an HC device and id are always provided */
/* always called with process context; sleeping is OK */
diff --git a/drivers/usb/host/ehci-platform.c b/drivers/usb/host/ehci-platform.c
index 1115431a255d..f343967443e2 100644
--- a/drivers/usb/host/ehci-platform.c
+++ b/drivers/usb/host/ehci-platform.c
@@ -518,6 +518,7 @@ static struct platform_driver ehci_platform_driver = {
.pm = pm_ptr(&ehci_platform_pm_ops),
.of_match_table = vt8500_ehci_ids,
.acpi_match_table = ACPI_PTR(ehci_acpi_match),
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
}
};
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index a2a5c2996350..1163af6fad77 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -645,7 +645,7 @@ qh_urb_transaction (
token |= (1 /* "in" */ << 8);
/* else it's already initted to "out" pid (0 << 8) */
- maxpacket = usb_maxpacket(urb->dev, urb->pipe, !is_input);
+ maxpacket = usb_maxpacket(urb->dev, urb->pipe);
/*
* buffer gets wrapped in one or more qtds;
@@ -1218,7 +1218,7 @@ static int ehci_submit_single_step_set_feature(
token |= (1 /* "in" */ << 8); /*This is IN stage*/
- maxpacket = usb_maxpacket(urb->dev, urb->pipe, 0);
+ maxpacket = usb_maxpacket(urb->dev, urb->pipe);
qtd_fill(ehci, qtd, buf, len, token, maxpacket);
diff --git a/drivers/usb/host/ehci-xilinx-of.c b/drivers/usb/host/ehci-xilinx-of.c
index 67a6ee8cb5d8..3d7893747835 100644
--- a/drivers/usb/host/ehci-xilinx-of.c
+++ b/drivers/usb/host/ehci-xilinx-of.c
@@ -32,6 +32,8 @@
* There are cases when the host controller fails to enable the port due to,
* for example, insufficient power that can be supplied to the device from
* the USB bus. In those cases, the messages printed here are not helpful.
+ *
+ * Return: Always return 0
*/
static int ehci_xilinx_port_handed_over(struct usb_hcd *hcd, int portnum)
{
@@ -46,11 +48,9 @@ static int ehci_xilinx_port_handed_over(struct usb_hcd *hcd, int portnum)
dev_warn(hcd->self.controller,
"Maybe your device is not a high speed device?\n");
dev_warn(hcd->self.controller,
- "The USB host controller does not support full speed "
- "nor low speed devices\n");
+ "The USB host controller does not support full speed nor low speed devices\n");
dev_warn(hcd->self.controller,
- "You can reconfigure the host controller to have "
- "full speed support\n");
+ "You can reconfigure the host controller to have full speed support\n");
}
return 0;
@@ -112,6 +112,8 @@ static const struct hc_driver ehci_xilinx_of_hc_driver = {
* host controller. Because the Xilinx USB host controller can be configured
* as HS only or HS/FS only, it checks the configuration in the device tree
* entry, and sets an appropriate value for hcd->has_tt.
+ *
+ * Return: zero on success, negative error code otherwise
*/
static int ehci_hcd_xilinx_of_probe(struct platform_device *op)
{
@@ -196,6 +198,8 @@ err_irq:
*
* Remove the hcd structure, and release resources that has been requested
* during probe.
+ *
+ * Return: Always return 0
*/
static int ehci_hcd_xilinx_of_remove(struct platform_device *op)
{
diff --git a/drivers/usb/host/fhci-hcd.c b/drivers/usb/host/fhci-hcd.c
index a8e1048278d0..2ba09c3fbc2f 100644
--- a/drivers/usb/host/fhci-hcd.c
+++ b/drivers/usb/host/fhci-hcd.c
@@ -408,8 +408,7 @@ static int fhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
size++;
else if ((urb->transfer_flags & URB_ZERO_PACKET) != 0
&& (urb->transfer_buffer_length
- % usb_maxpacket(urb->dev, pipe,
- usb_pipeout(pipe))) != 0)
+ % usb_maxpacket(urb->dev, pipe)) != 0)
size++;
break;
case PIPE_ISOCHRONOUS:
diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
index c3fd375b4778..f8c111e08a0d 100644
--- a/drivers/usb/host/fotg210-hcd.c
+++ b/drivers/usb/host/fotg210-hcd.c
@@ -2596,7 +2596,7 @@ static struct list_head *qh_urb_transaction(struct fotg210_hcd *fotg210,
token |= (1 /* "in" */ << 8);
/* else it's already initted to "out" pid (0 << 8) */
- maxpacket = usb_maxpacket(urb->dev, urb->pipe, !is_input);
+ maxpacket = usb_maxpacket(urb->dev, urb->pipe);
/*
* buffer gets wrapped in one or more qtds;
diff --git a/drivers/usb/host/isp116x-hcd.c b/drivers/usb/host/isp116x-hcd.c
index 8835f6bd528e..4f564d71bb0b 100644
--- a/drivers/usb/host/isp116x-hcd.c
+++ b/drivers/usb/host/isp116x-hcd.c
@@ -726,7 +726,7 @@ static int isp116x_urb_enqueue(struct usb_hcd *hcd,
INIT_LIST_HEAD(&ep->schedule);
ep->udev = udev;
ep->epnum = epnum;
- ep->maxpacket = usb_maxpacket(udev, urb->pipe, is_out);
+ ep->maxpacket = usb_maxpacket(udev, urb->pipe);
usb_settoggle(udev, epnum, is_out, 0);
if (type == PIPE_CONTROL) {
@@ -757,8 +757,7 @@ static int isp116x_urb_enqueue(struct usb_hcd *hcd,
ep->load = usb_calc_bus_time(udev->speed,
!is_out,
(type == PIPE_ISOCHRONOUS),
- usb_maxpacket(udev, pipe,
- is_out)) /
+ usb_maxpacket(udev, pipe)) /
1000;
}
hep->hcpriv = ep;
@@ -1541,10 +1540,12 @@ static int isp116x_remove(struct platform_device *pdev)
iounmap(isp116x->data_reg);
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- release_mem_region(res->start, 2);
+ if (res)
+ release_mem_region(res->start, 2);
iounmap(isp116x->addr_reg);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, 2);
+ if (res)
+ release_mem_region(res->start, 2);
usb_put_hcd(hcd);
return 0;
diff --git a/drivers/usb/host/isp1362-hcd.c b/drivers/usb/host/isp1362-hcd.c
index d8610ce8f2ec..0e14d1d07709 100644
--- a/drivers/usb/host/isp1362-hcd.c
+++ b/drivers/usb/host/isp1362-hcd.c
@@ -1279,7 +1279,7 @@ static int isp1362_urb_enqueue(struct usb_hcd *hcd,
ep->udev = usb_get_dev(udev);
ep->hep = hep;
ep->epnum = epnum;
- ep->maxpacket = usb_maxpacket(udev, urb->pipe, is_out);
+ ep->maxpacket = usb_maxpacket(udev, urb->pipe);
ep->ptd_offset = -EINVAL;
ep->ptd_index = -EINVAL;
usb_settoggle(udev, epnum, is_out, 0);
@@ -1299,8 +1299,8 @@ static int isp1362_urb_enqueue(struct usb_hcd *hcd,
ep->interval = urb->interval;
ep->branch = PERIODIC_SIZE;
ep->load = usb_calc_bus_time(udev->speed, !is_out,
- (type == PIPE_ISOCHRONOUS),
- usb_maxpacket(udev, pipe, is_out)) / 1000;
+ type == PIPE_ISOCHRONOUS,
+ usb_maxpacket(udev, pipe)) / 1000;
break;
}
hep->hcpriv = ep;
diff --git a/drivers/usb/host/max3421-hcd.c b/drivers/usb/host/max3421-hcd.c
index 99a5523a79fb..502a3ac5e35b 100644
--- a/drivers/usb/host/max3421-hcd.c
+++ b/drivers/usb/host/max3421-hcd.c
@@ -546,7 +546,7 @@ max3421_transfer_out(struct usb_hcd *hcd, struct urb *urb, int fast_retransmit)
return MAX3421_HXFR_BULK_OUT(epnum);
}
- max_packet = usb_maxpacket(urb->dev, urb->pipe, 1);
+ max_packet = usb_maxpacket(urb->dev, urb->pipe);
if (max_packet > MAX3421_FIFO_SIZE) {
/*
@@ -952,7 +952,7 @@ max3421_transfer_in_done(struct usb_hcd *hcd, struct urb *urb)
* USB 2.0 Section 5.3.2 Pipes: packets must be full size
* except for last one.
*/
- max_packet = usb_maxpacket(urb->dev, urb->pipe, 0);
+ max_packet = usb_maxpacket(urb->dev, urb->pipe);
if (max_packet > MAX3421_FIFO_SIZE) {
/*
* We do not support isochronous transfers at this
@@ -998,7 +998,7 @@ max3421_transfer_out_done(struct usb_hcd *hcd, struct urb *urb)
* max_packet as an indicator that the end of the
* packet has been reached).
*/
- u32 max_packet = usb_maxpacket(urb->dev, urb->pipe, 1);
+ u32 max_packet = usb_maxpacket(urb->dev, urb->pipe);
if (max3421_hcd->curr_len == max_packet)
return 0;
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 666b1c665188..c4c821c2288c 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -181,8 +181,7 @@ static int ohci_urb_enqueue (
size++;
else if ((urb->transfer_flags & URB_ZERO_PACKET) != 0
&& (urb->transfer_buffer_length
- % usb_maxpacket (urb->dev, pipe,
- usb_pipeout (pipe))) == 0)
+ % usb_maxpacket(urb->dev, pipe)) == 0)
size++;
break;
case PIPE_ISOCHRONOUS: /* number of packets from URB */
diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c
index 069791d25abb..f5bc9c8bdc9a 100644
--- a/drivers/usb/host/ohci-omap.c
+++ b/drivers/usb/host/ohci-omap.c
@@ -259,6 +259,10 @@ static int ohci_hcd_omap_probe(struct platform_device *pdev)
goto err_put_hcd;
}
+ retval = clk_prepare(priv->usb_host_ck);
+ if (retval)
+ goto err_put_host_ck;
+
if (!cpu_is_omap15xx())
priv->usb_dc_ck = clk_get(&pdev->dev, "usb_dc_ck");
else
@@ -266,13 +270,17 @@ static int ohci_hcd_omap_probe(struct platform_device *pdev)
if (IS_ERR(priv->usb_dc_ck)) {
retval = PTR_ERR(priv->usb_dc_ck);
- goto err_put_host_ck;
+ goto err_unprepare_host_ck;
}
+ retval = clk_prepare(priv->usb_dc_ck);
+ if (retval)
+ goto err_put_dc_ck;
+
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
dev_dbg(&pdev->dev, "request_mem_region failed\n");
retval = -EBUSY;
- goto err_put_dc_ck;
+ goto err_unprepare_dc_ck;
}
hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
@@ -297,8 +305,12 @@ err3:
iounmap(hcd->regs);
err2:
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+err_unprepare_dc_ck:
+ clk_unprepare(priv->usb_dc_ck);
err_put_dc_ck:
clk_put(priv->usb_dc_ck);
+err_unprepare_host_ck:
+ clk_unprepare(priv->usb_host_ck);
err_put_host_ck:
clk_put(priv->usb_host_ck);
err_put_hcd:
@@ -333,7 +345,9 @@ static int ohci_hcd_omap_remove(struct platform_device *pdev)
}
iounmap(hcd->regs);
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+ clk_unprepare(priv->usb_dc_ck);
clk_put(priv->usb_dc_ck);
+ clk_unprepare(priv->usb_host_ck);
clk_put(priv->usb_host_ck);
usb_put_hcd(hcd);
return 0;
diff --git a/drivers/usb/host/ohci-platform.c b/drivers/usb/host/ohci-platform.c
index 4a8456f12a73..47dfbfe9e519 100644
--- a/drivers/usb/host/ohci-platform.c
+++ b/drivers/usb/host/ohci-platform.c
@@ -334,6 +334,7 @@ static struct platform_driver ohci_platform_driver = {
.name = "ohci-platform",
.pm = &ohci_platform_pm_ops,
.of_match_table = ohci_platform_ids,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
}
};
diff --git a/drivers/usb/host/ohci-ppc-of.c b/drivers/usb/host/ohci-ppc-of.c
index 45f7cceb6df3..1960b8dfdba5 100644
--- a/drivers/usb/host/ohci-ppc-of.c
+++ b/drivers/usb/host/ohci-ppc-of.c
@@ -19,9 +19,6 @@
#include <linux/of_irq.h>
#include <linux/of_platform.h>
-#include <asm/prom.h>
-
-
static int
ohci_ppc_of_start(struct usb_hcd *hcd)
{
diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c
index 54aa5c77e549..ab4f610a0140 100644
--- a/drivers/usb/host/ohci-pxa27x.c
+++ b/drivers/usb/host/ohci-pxa27x.c
@@ -36,8 +36,7 @@
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include <linux/usb/otg.h>
-
-#include <mach/hardware.h>
+#include <linux/soc/pxa/cpu.h>
#include "ohci.h"
diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
index b741670525e3..3a441310c713 100644
--- a/drivers/usb/host/oxu210hp-hcd.c
+++ b/drivers/usb/host/oxu210hp-hcd.c
@@ -1685,7 +1685,7 @@ static struct list_head *qh_urb_transaction(struct oxu_hcd *oxu,
token |= (1 /* "in" */ << 8);
/* else it's already initted to "out" pid (0 << 8) */
- maxpacket = usb_maxpacket(urb->dev, urb->pipe, !is_input);
+ maxpacket = usb_maxpacket(urb->dev, urb->pipe);
/*
* buffer gets wrapped in one or more qtds;
@@ -1796,7 +1796,7 @@ static struct ehci_qh *qh_make(struct oxu_hcd *oxu,
is_input = usb_pipein(urb->pipe);
type = usb_pipetype(urb->pipe);
- maxp = usb_maxpacket(urb->dev, urb->pipe, !is_input);
+ maxp = usb_maxpacket(urb->dev, urb->pipe);
/* Compute interrupt scheduling parameters just once, and save.
* - allowing for high bandwidth, how many nsec/uframe are used?
@@ -3909,8 +3909,10 @@ static int oxu_bus_suspend(struct usb_hcd *hcd)
}
}
+ spin_unlock_irq(&oxu->lock);
/* turn off now-idle HC */
del_timer_sync(&oxu->watchdog);
+ spin_lock_irq(&oxu->lock);
ehci_halt(oxu);
hcd->state = HC_STATE_SUSPENDED;
@@ -4223,13 +4225,9 @@ static int oxu_drv_probe(struct platform_device *pdev)
/*
* Get the platform resources
*/
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res) {
- dev_err(&pdev->dev,
- "no IRQ! Check %s setup!\n", dev_name(&pdev->dev));
- return -ENODEV;
- }
- irq = res->start;
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
dev_dbg(&pdev->dev, "IRQ resource %d\n", irq);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index 63719cdf6a4e..abb88dd40d4e 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -1867,8 +1867,7 @@ static struct r8a66597_td *r8a66597_make_td(struct r8a66597 *r8a66597,
td->pipe = hep->hcpriv;
td->urb = urb;
td->address = get_urb_to_r8a66597_addr(r8a66597, urb);
- td->maxpacket = usb_maxpacket(urb->dev, urb->pipe,
- !usb_pipein(urb->pipe));
+ td->maxpacket = usb_maxpacket(urb->dev, urb->pipe);
if (usb_pipecontrol(urb->pipe))
td->type = USB_PID_SETUP;
else if (usb_pipein(urb->pipe))
diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
index 85623731a516..d206bd95c7bb 100644
--- a/drivers/usb/host/sl811-hcd.c
+++ b/drivers/usb/host/sl811-hcd.c
@@ -842,7 +842,7 @@ static int sl811h_urb_enqueue(
INIT_LIST_HEAD(&ep->schedule);
ep->udev = udev;
ep->epnum = epnum;
- ep->maxpacket = usb_maxpacket(udev, urb->pipe, is_out);
+ ep->maxpacket = usb_maxpacket(udev, urb->pipe);
ep->defctrl = SL11H_HCTLMASK_ARM | SL11H_HCTLMASK_ENABLE;
usb_settoggle(udev, epnum, is_out, 0);
@@ -878,8 +878,8 @@ static int sl811h_urb_enqueue(
if (type == PIPE_ISOCHRONOUS)
ep->defctrl |= SL11H_HCTLMASK_ISOCH;
ep->load = usb_calc_bus_time(udev->speed, !is_out,
- (type == PIPE_ISOCHRONOUS),
- usb_maxpacket(udev, pipe, is_out))
+ type == PIPE_ISOCHRONOUS,
+ usb_maxpacket(udev, pipe))
/ 1000;
break;
}
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index f65f1ba2b592..c54f2bc23d3f 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -707,6 +707,7 @@ static int xhci_enter_test_mode(struct xhci_hcd *xhci,
u16 test_mode, u16 wIndex, unsigned long *flags)
__must_hold(&xhci->lock)
{
+ struct usb_hcd *usb3_hcd = xhci_get_usb3_hcd(xhci);
int i, retval;
/* Disable all Device Slots */
@@ -727,7 +728,7 @@ static int xhci_enter_test_mode(struct xhci_hcd *xhci,
xhci_dbg(xhci, "Disable all port (PP = 0)\n");
/* Power off USB3 ports*/
for (i = 0; i < xhci->usb3_rhub.num_ports; i++)
- xhci_set_port_power(xhci, xhci->shared_hcd, i, false, flags);
+ xhci_set_port_power(xhci, usb3_hcd, i, false, flags);
/* Power off USB2 ports*/
for (i = 0; i < xhci->usb2_rhub.num_ports; i++)
xhci_set_port_power(xhci, xhci->main_hcd, i, false, flags);
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index bbb27ee2c6a3..8c19e151a945 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -782,14 +782,6 @@ void xhci_free_stream_info(struct xhci_hcd *xhci,
/***************** Device context manipulation *************************/
-static void xhci_init_endpoint_timer(struct xhci_hcd *xhci,
- struct xhci_virt_ep *ep)
-{
- timer_setup(&ep->stop_cmd_timer, xhci_stop_endpoint_command_watchdog,
- 0);
- ep->xhci = xhci;
-}
-
static void xhci_free_tt_info(struct xhci_hcd *xhci,
struct xhci_virt_device *virt_dev,
int slot_id)
@@ -994,11 +986,11 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id,
(unsigned long long)dev->in_ctx->dma);
- /* Initialize the cancellation list and watchdog timers for each ep */
+ /* Initialize the cancellation and bandwidth list for each ep */
for (i = 0; i < 31; i++) {
dev->eps[i].ep_index = i;
dev->eps[i].vdev = dev;
- xhci_init_endpoint_timer(xhci, &dev->eps[i]);
+ dev->eps[i].xhci = xhci;
INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list);
INIT_LIST_HEAD(&dev->eps[i].bw_endpoint_list);
}
@@ -1072,7 +1064,7 @@ static u32 xhci_find_real_port_number(struct xhci_hcd *xhci,
struct usb_hcd *hcd;
if (udev->speed >= USB_SPEED_SUPER)
- hcd = xhci->shared_hcd;
+ hcd = xhci_get_usb3_hcd(xhci);
else
hcd = xhci->main_hcd;
@@ -2362,10 +2354,11 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
xhci->usb2_rhub.num_ports = USB_MAXCHILDREN;
}
- /*
- * Note we could have all USB 3.0 ports, or all USB 2.0 ports.
- * Not sure how the USB core will handle a hub with no ports...
- */
+ if (!xhci->usb2_rhub.num_ports)
+ xhci_info(xhci, "USB2 root hub has no ports\n");
+
+ if (!xhci->usb3_rhub.num_ports)
+ xhci_info(xhci, "USB3 root hub has no ports\n");
xhci_create_rhub_port_array(xhci, &xhci->usb2_rhub, flags);
xhci_create_rhub_port_array(xhci, &xhci->usb3_rhub, flags);
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index d7e0e6ebf080..fac9492a8bda 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -59,6 +59,7 @@
#define PCI_DEVICE_ID_INTEL_TIGER_LAKE_XHCI 0x9a13
#define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI 0x1138
#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_XHCI 0x461e
+#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_N_XHCI 0x464e
#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI 0x51ed
#define PCI_DEVICE_ID_AMD_RENOIR_XHCI 0x1639
@@ -129,8 +130,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
pdev->revision == 0x0) {
xhci->quirks |= XHCI_RESET_EP_QUIRK;
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
- "QUIRK: Fresco Logic xHC needs configure"
- " endpoint cmd after reset endpoint");
+ "XHCI_RESET_EP_QUIRK for this evaluation HW is deprecated");
}
if (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK &&
pdev->revision == 0x4) {
@@ -268,6 +268,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
pdev->device == PCI_DEVICE_ID_INTEL_TIGER_LAKE_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_N_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI))
xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 649ffd861b44..044855818cb1 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -180,7 +180,7 @@ static int xhci_plat_probe(struct platform_device *pdev)
struct device *sysdev, *tmpdev;
struct xhci_hcd *xhci;
struct resource *res;
- struct usb_hcd *hcd;
+ struct usb_hcd *hcd, *usb3_hcd;
int ret;
int irq;
struct xhci_plat_priv *priv = NULL;
@@ -245,6 +245,8 @@ static int xhci_plat_probe(struct platform_device *pdev)
xhci = hcd_to_xhci(hcd);
+ xhci->allow_single_roothub = 1;
+
/*
* Not all platforms have clks so it is not an error if the
* clock do not exist.
@@ -283,12 +285,6 @@ static int xhci_plat_probe(struct platform_device *pdev)
device_set_wakeup_capable(&pdev->dev, true);
xhci->main_hcd = hcd;
- xhci->shared_hcd = __usb_create_hcd(driver, sysdev, &pdev->dev,
- dev_name(&pdev->dev), hcd);
- if (!xhci->shared_hcd) {
- ret = -ENOMEM;
- goto disable_clk;
- }
/* imod_interval is the interrupt moderation value in nanoseconds. */
xhci->imod_interval = 40000;
@@ -313,16 +309,16 @@ static int xhci_plat_probe(struct platform_device *pdev)
if (IS_ERR(hcd->usb_phy)) {
ret = PTR_ERR(hcd->usb_phy);
if (ret == -EPROBE_DEFER)
- goto put_usb3_hcd;
+ goto disable_clk;
hcd->usb_phy = NULL;
} else {
ret = usb_phy_init(hcd->usb_phy);
if (ret)
- goto put_usb3_hcd;
+ goto disable_clk;
}
hcd->tpl_support = of_usb_host_tpl_support(sysdev->of_node);
- xhci->shared_hcd->tpl_support = hcd->tpl_support;
+
if (priv && (priv->quirks & XHCI_SKIP_PHY_INIT))
hcd->skip_phy_initialization = 1;
@@ -333,12 +329,26 @@ static int xhci_plat_probe(struct platform_device *pdev)
if (ret)
goto disable_usb_phy;
- if (HCC_MAX_PSA(xhci->hcc_params) >= 4)
- xhci->shared_hcd->can_do_streams = 1;
+ if (!xhci_has_one_roothub(xhci)) {
+ xhci->shared_hcd = __usb_create_hcd(driver, sysdev, &pdev->dev,
+ dev_name(&pdev->dev), hcd);
+ if (!xhci->shared_hcd) {
+ ret = -ENOMEM;
+ goto dealloc_usb2_hcd;
+ }
- ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED);
- if (ret)
- goto dealloc_usb2_hcd;
+ xhci->shared_hcd->tpl_support = hcd->tpl_support;
+ }
+
+ usb3_hcd = xhci_get_usb3_hcd(xhci);
+ if (usb3_hcd && HCC_MAX_PSA(xhci->hcc_params) >= 4)
+ usb3_hcd->can_do_streams = 1;
+
+ if (xhci->shared_hcd) {
+ ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED);
+ if (ret)
+ goto put_usb3_hcd;
+ }
device_enable_async_suspend(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
@@ -352,15 +362,15 @@ static int xhci_plat_probe(struct platform_device *pdev)
return 0;
+put_usb3_hcd:
+ usb_put_hcd(xhci->shared_hcd);
+
dealloc_usb2_hcd:
usb_remove_hcd(hcd);
disable_usb_phy:
usb_phy_shutdown(hcd->usb_phy);
-put_usb3_hcd:
- usb_put_hcd(xhci->shared_hcd);
-
disable_clk:
clk_disable_unprepare(xhci->clk);
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index f9707997969d..46d0b9ad6f74 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -740,14 +740,6 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
}
}
-static void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci,
- struct xhci_virt_ep *ep)
-{
- ep->ep_state &= ~EP_STOP_CMD_PENDING;
- /* Can't del_timer_sync in interrupt */
- del_timer(&ep->stop_cmd_timer);
-}
-
/*
* Must be called with xhci->lock held in interrupt context,
* releases and re-acquires xhci->lock
@@ -1122,18 +1114,17 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
reset_type);
if (err)
break;
- xhci_stop_watchdog_timer_in_irq(xhci, ep);
+ ep->ep_state &= ~EP_STOP_CMD_PENDING;
return;
case EP_STATE_RUNNING:
/* Race, HW handled stop ep cmd before ep was running */
xhci_dbg(xhci, "Stop ep completion ctx error, ep is running\n");
command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
- if (!command)
- xhci_stop_watchdog_timer_in_irq(xhci, ep);
-
- mod_timer(&ep->stop_cmd_timer,
- jiffies + XHCI_STOP_EP_CMD_TIMEOUT * HZ);
+ if (!command) {
+ ep->ep_state &= ~EP_STOP_CMD_PENDING;
+ return;
+ }
xhci_queue_stop_endpoint(xhci, command, slot_id, ep_index, 0);
xhci_ring_cmd_db(xhci);
@@ -1142,9 +1133,10 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
break;
}
}
+
/* will queue a set TR deq if stopped on a cancelled, uncleared TD */
xhci_invalidate_cancelled_tds(ep);
- xhci_stop_watchdog_timer_in_irq(xhci, ep);
+ ep->ep_state &= ~EP_STOP_CMD_PENDING;
/* Otherwise ring the doorbell(s) to restart queued transfers */
xhci_giveback_invalidated_tds(ep);
@@ -1248,61 +1240,6 @@ void xhci_hc_died(struct xhci_hcd *xhci)
usb_hc_died(xhci_to_hcd(xhci));
}
-/* Watchdog timer function for when a stop endpoint command fails to complete.
- * In this case, we assume the host controller is broken or dying or dead. The
- * host may still be completing some other events, so we have to be careful to
- * let the event ring handler and the URB dequeueing/enqueueing functions know
- * through xhci->state.
- *
- * The timer may also fire if the host takes a very long time to respond to the
- * command, and the stop endpoint command completion handler cannot delete the
- * timer before the timer function is called. Another endpoint cancellation may
- * sneak in before the timer function can grab the lock, and that may queue
- * another stop endpoint command and add the timer back. So we cannot use a
- * simple flag to say whether there is a pending stop endpoint command for a
- * particular endpoint.
- *
- * Instead we use a combination of that flag and checking if a new timer is
- * pending.
- */
-void xhci_stop_endpoint_command_watchdog(struct timer_list *t)
-{
- struct xhci_virt_ep *ep = from_timer(ep, t, stop_cmd_timer);
- struct xhci_hcd *xhci = ep->xhci;
- unsigned long flags;
- u32 usbsts;
- char str[XHCI_MSG_MAX];
-
- spin_lock_irqsave(&xhci->lock, flags);
-
- /* bail out if cmd completed but raced with stop ep watchdog timer.*/
- if (!(ep->ep_state & EP_STOP_CMD_PENDING) ||
- timer_pending(&ep->stop_cmd_timer)) {
- spin_unlock_irqrestore(&xhci->lock, flags);
- xhci_dbg(xhci, "Stop EP timer raced with cmd completion, exit");
- return;
- }
- usbsts = readl(&xhci->op_regs->status);
-
- xhci_warn(xhci, "xHCI host not responding to stop endpoint command.\n");
- xhci_warn(xhci, "USBSTS:%s\n", xhci_decode_usbsts(str, usbsts));
-
- ep->ep_state &= ~EP_STOP_CMD_PENDING;
-
- xhci_halt(xhci);
-
- /*
- * handle a stop endpoint cmd timeout as if host died (-ENODEV).
- * In the future we could distinguish between -ENODEV and -ETIMEDOUT
- * and try to recover a -ETIMEDOUT with a host controller reset
- */
- xhci_hc_died(xhci);
-
- spin_unlock_irqrestore(&xhci->lock, flags);
- xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
- "xHCI host controller is dead.");
-}
-
static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci,
struct xhci_virt_device *dev,
struct xhci_ring *ep_ring,
@@ -1489,8 +1426,6 @@ static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
/* Cleanup cancelled TDs as ep is stopped. May queue a Set TR Deq cmd */
xhci_invalidate_cancelled_tds(ep);
- if (xhci->quirks & XHCI_RESET_EP_QUIRK)
- xhci_dbg(xhci, "Note: Removed workaround to queue config ep for this hw");
/* Clear our internal halted state */
ep->ep_state &= ~EP_HALTED;
@@ -1534,17 +1469,13 @@ static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id,
struct xhci_input_control_ctx *ctrl_ctx;
struct xhci_ep_ctx *ep_ctx;
unsigned int ep_index;
- unsigned int ep_state;
- u32 add_flags, drop_flags;
+ u32 add_flags;
/*
- * Configure endpoint commands can come from the USB core
- * configuration or alt setting changes, or because the HW
- * needed an extra configure endpoint command after a reset
- * endpoint command or streams were being configured.
- * If the command was for a halted endpoint, the xHCI driver
- * is not waiting on the configure endpoint command.
+ * Configure endpoint commands can come from the USB core configuration
+ * or alt setting changes, or when streams were being configured.
*/
+
virt_dev = xhci->devs[slot_id];
if (!virt_dev)
return;
@@ -1555,34 +1486,13 @@ static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id,
}
add_flags = le32_to_cpu(ctrl_ctx->add_flags);
- drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
+
/* Input ctx add_flags are the endpoint index plus one */
ep_index = xhci_last_valid_endpoint(add_flags) - 1;
ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->out_ctx, ep_index);
trace_xhci_handle_cmd_config_ep(ep_ctx);
- /* A usb_set_interface() call directly after clearing a halted
- * condition may race on this quirky hardware. Not worth
- * worrying about, since this is prototype hardware. Not sure
- * if this will work for streams, but streams support was
- * untested on this prototype.
- */
- if (xhci->quirks & XHCI_RESET_EP_QUIRK &&
- ep_index != (unsigned int) -1 &&
- add_flags - SLOT_FLAG == drop_flags) {
- ep_state = virt_dev->eps[ep_index].ep_state;
- if (!(ep_state & EP_HALTED))
- return;
- xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
- "Completed config ep cmd - "
- "last ep index = %d, state = %d",
- ep_index, ep_state);
- /* Clear internal halted state and restart ring(s) */
- virt_dev->eps[ep_index].ep_state &= ~EP_HALTED;
- ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
- return;
- }
return;
}
@@ -1650,9 +1560,12 @@ void xhci_cleanup_command_queue(struct xhci_hcd *xhci)
void xhci_handle_command_timeout(struct work_struct *work)
{
- struct xhci_hcd *xhci;
- unsigned long flags;
- u64 hw_ring_state;
+ struct xhci_hcd *xhci;
+ unsigned long flags;
+ char str[XHCI_MSG_MAX];
+ u64 hw_ring_state;
+ u32 cmd_field3;
+ u32 usbsts;
xhci = container_of(to_delayed_work(work), struct xhci_hcd, cmd_timer);
@@ -1666,6 +1579,27 @@ void xhci_handle_command_timeout(struct work_struct *work)
spin_unlock_irqrestore(&xhci->lock, flags);
return;
}
+
+ cmd_field3 = le32_to_cpu(xhci->current_cmd->command_trb->generic.field[3]);
+ usbsts = readl(&xhci->op_regs->status);
+ xhci_dbg(xhci, "Command timeout, USBSTS:%s\n", xhci_decode_usbsts(str, usbsts));
+
+ /* Bail out and tear down xhci if a stop endpoint command failed */
+ if (TRB_FIELD_TO_TYPE(cmd_field3) == TRB_STOP_RING) {
+ struct xhci_virt_ep *ep;
+
+ xhci_warn(xhci, "xHCI host not responding to stop endpoint command\n");
+
+ ep = xhci_get_virt_ep(xhci, TRB_TO_SLOT_ID(cmd_field3),
+ TRB_TO_EP_INDEX(cmd_field3));
+ if (ep)
+ ep->ep_state &= ~EP_STOP_CMD_PENDING;
+
+ xhci_halt(xhci);
+ xhci_hc_died(xhci);
+ goto time_out_completed;
+ }
+
/* mark this command to be cancelled */
xhci->current_cmd->status = COMP_COMMAND_ABORTED;
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 25b87e99b4dd..f0ab63138016 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -486,6 +486,10 @@ static void compliance_mode_recovery(struct timer_list *t)
xhci = from_timer(xhci, t, comp_mode_recovery_timer);
rhub = &xhci->usb3_rhub;
+ hcd = rhub->hcd;
+
+ if (!hcd)
+ return;
for (i = 0; i < rhub->num_ports; i++) {
temp = readl(rhub->ports[i]->addr);
@@ -499,7 +503,6 @@ static void compliance_mode_recovery(struct timer_list *t)
i + 1);
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
"Attempting compliance mode recovery");
- hcd = xhci->shared_hcd;
if (hcd->state == HC_STATE_SUSPENDED)
usb_hcd_resume_root_hub(hcd);
@@ -612,14 +615,11 @@ static int xhci_run_finished(struct xhci_hcd *xhci)
xhci_halt(xhci);
return -ENODEV;
}
- xhci->shared_hcd->state = HC_STATE_RUNNING;
xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
if (xhci->quirks & XHCI_NEC_HOST)
xhci_ring_cmd_db(xhci);
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "Finished xhci_run for USB3 roothub");
return 0;
}
@@ -694,12 +694,17 @@ int xhci_run(struct usb_hcd *hcd)
xhci_free_command(xhci, command);
}
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "Finished xhci_run for USB2 roothub");
+ "Finished %s for main hcd", __func__);
xhci_create_dbc_dev(xhci);
xhci_debugfs_init(xhci);
+ if (xhci_has_one_roothub(xhci))
+ return xhci_run_finished(xhci);
+
+ set_bit(HCD_FLAG_DEFER_RH_REGISTER, &hcd->flags);
+
return 0;
}
EXPORT_SYMBOL_GPL(xhci_run);
@@ -992,7 +997,7 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
return 0;
if (hcd->state != HC_STATE_SUSPENDED ||
- xhci->shared_hcd->state != HC_STATE_SUSPENDED)
+ (xhci->shared_hcd && xhci->shared_hcd->state != HC_STATE_SUSPENDED))
return -EINVAL;
/* Clear root port wake on bits if wakeup not allowed. */
@@ -1009,15 +1014,18 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
__func__, hcd->self.busnum);
clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
del_timer_sync(&hcd->rh_timer);
- clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
- del_timer_sync(&xhci->shared_hcd->rh_timer);
+ if (xhci->shared_hcd) {
+ clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
+ del_timer_sync(&xhci->shared_hcd->rh_timer);
+ }
if (xhci->quirks & XHCI_SUSPEND_DELAY)
usleep_range(1000, 1500);
spin_lock_irq(&xhci->lock);
clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
- clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
+ if (xhci->shared_hcd)
+ clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
/* step 1: stop endpoint */
/* skipped assuming that port suspend has done */
@@ -1117,7 +1125,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
msleep(100);
set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
- set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
+ if (xhci->shared_hcd)
+ set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
spin_lock_irq(&xhci->lock);
@@ -1177,7 +1186,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
/* Let the USB core know _both_ roothubs lost power. */
usb_root_hub_lost_power(xhci->main_hcd->self.root_hub);
- usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub);
+ if (xhci->shared_hcd)
+ usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub);
xhci_dbg(xhci, "Stop HCD\n");
xhci_halt(xhci);
@@ -1217,12 +1227,13 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
xhci_dbg(xhci, "Start the primary HCD\n");
retval = xhci_run(hcd->primary_hcd);
- if (!retval) {
+ if (!retval && secondary_hcd) {
xhci_dbg(xhci, "Start the secondary HCD\n");
retval = xhci_run(secondary_hcd);
}
hcd->state = HC_STATE_SUSPENDED;
- xhci->shared_hcd->state = HC_STATE_SUSPENDED;
+ if (xhci->shared_hcd)
+ xhci->shared_hcd->state = HC_STATE_SUSPENDED;
goto done;
}
@@ -1260,7 +1271,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
}
if (pending_portevent) {
- usb_hcd_resume_root_hub(xhci->shared_hcd);
+ if (xhci->shared_hcd)
+ usb_hcd_resume_root_hub(xhci->shared_hcd);
usb_hcd_resume_root_hub(hcd);
}
}
@@ -1279,8 +1291,10 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
/* Re-enable port polling. */
xhci_dbg(xhci, "%s: starting usb%d port polling.\n",
__func__, hcd->self.busnum);
- set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
- usb_hcd_poll_rh_status(xhci->shared_hcd);
+ if (xhci->shared_hcd) {
+ set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
+ usb_hcd_poll_rh_status(xhci->shared_hcd);
+ }
set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
usb_hcd_poll_rh_status(hcd);
@@ -1860,9 +1874,6 @@ static int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
goto done;
}
ep->ep_state |= EP_STOP_CMD_PENDING;
- ep->stop_cmd_timer.expires = jiffies +
- XHCI_STOP_EP_CMD_TIMEOUT * HZ;
- add_timer(&ep->stop_cmd_timer);
xhci_queue_stop_endpoint(xhci, command, urb->dev->slot_id,
ep_index, 0);
xhci_ring_cmd_db(xhci);
@@ -3972,10 +3983,8 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
trace_xhci_free_dev(slot_ctx);
/* Stop any wayward timer functions (which may grab the lock) */
- for (i = 0; i < 31; i++) {
+ for (i = 0; i < 31; i++)
virt_dev->eps[i].ep_state &= ~EP_STOP_CMD_PENDING;
- del_timer_sync(&virt_dev->eps[i].stop_cmd_timer);
- }
virt_dev->udev = NULL;
xhci_disable_slot(xhci, udev->slot_id);
xhci_free_virt_device(xhci, udev->slot_id);
@@ -4879,9 +4888,6 @@ static int xhci_check_intel_tier_policy(struct usb_device *udev,
struct usb_device *parent;
unsigned int num_hubs;
- if (state == USB3_LPM_U2)
- return 0;
-
/* Don't enable U1 if the device is on a 2nd tier hub or lower. */
for (parent = udev->parent, num_hubs = 0; parent->parent;
parent = parent->parent)
@@ -4890,7 +4896,7 @@ static int xhci_check_intel_tier_policy(struct usb_device *udev,
if (num_hubs < 2)
return 0;
- dev_dbg(&udev->dev, "Disabling U1 link state for device"
+ dev_dbg(&udev->dev, "Disabling U1/U2 link state for device"
" below second-tier hub.\n");
dev_dbg(&udev->dev, "Plug device into first-tier hub "
"to decrease power consumption.\n");
@@ -4931,9 +4937,6 @@ static u16 xhci_calculate_lpm_timeout(struct usb_hcd *hcd,
return timeout;
}
- if (xhci_check_tier_policy(xhci, udev, state) < 0)
- return timeout;
-
/* Gather some information about the currently installed configuration
* and alternate interface settings.
*/
@@ -5040,6 +5043,9 @@ static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
!xhci->devs[udev->slot_id])
return USB3_LPM_DISABLED;
+ if (xhci_check_tier_policy(xhci, udev, state) < 0)
+ return USB3_LPM_DISABLED;
+
hub_encoded_timeout = xhci_calculate_lpm_timeout(hcd, udev, state);
mel = calculate_max_exit_latency(udev, state, hub_encoded_timeout);
if (mel < 0) {
@@ -5207,6 +5213,57 @@ static int xhci_get_frame(struct usb_hcd *hcd)
return readl(&xhci->run_regs->microframe_index) >> 3;
}
+static void xhci_hcd_init_usb2_data(struct xhci_hcd *xhci, struct usb_hcd *hcd)
+{
+ xhci->usb2_rhub.hcd = hcd;
+ hcd->speed = HCD_USB2;
+ hcd->self.root_hub->speed = USB_SPEED_HIGH;
+ /*
+ * USB 2.0 roothub under xHCI has an integrated TT,
+ * (rate matching hub) as opposed to having an OHCI/UHCI
+ * companion controller.
+ */
+ hcd->has_tt = 1;
+}
+
+static void xhci_hcd_init_usb3_data(struct xhci_hcd *xhci, struct usb_hcd *hcd)
+{
+ unsigned int minor_rev;
+
+ /*
+ * Early xHCI 1.1 spec did not mention USB 3.1 capable hosts
+ * should return 0x31 for sbrn, or that the minor revision
+ * is a two digit BCD containig minor and sub-minor numbers.
+ * This was later clarified in xHCI 1.2.
+ *
+ * Some USB 3.1 capable hosts therefore have sbrn 0x30, and
+ * minor revision set to 0x1 instead of 0x10.
+ */
+ if (xhci->usb3_rhub.min_rev == 0x1)
+ minor_rev = 1;
+ else
+ minor_rev = xhci->usb3_rhub.min_rev / 0x10;
+
+ switch (minor_rev) {
+ case 2:
+ hcd->speed = HCD_USB32;
+ hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
+ hcd->self.root_hub->rx_lanes = 2;
+ hcd->self.root_hub->tx_lanes = 2;
+ hcd->self.root_hub->ssp_rate = USB_SSP_GEN_2x2;
+ break;
+ case 1:
+ hcd->speed = HCD_USB31;
+ hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
+ hcd->self.root_hub->ssp_rate = USB_SSP_GEN_2x1;
+ break;
+ }
+ xhci_info(xhci, "Host supports USB 3.%x %sSuperSpeed\n",
+ minor_rev, minor_rev ? "Enhanced " : "");
+
+ xhci->usb3_rhub.hcd = hcd;
+}
+
int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
{
struct xhci_hcd *xhci;
@@ -5215,7 +5272,6 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
* quirks
*/
struct device *dev = hcd->self.sysdev;
- unsigned int minor_rev;
int retval;
/* Accept arbitrarily long scatter-gather lists */
@@ -5229,61 +5285,13 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
xhci = hcd_to_xhci(hcd);
- if (usb_hcd_is_primary_hcd(hcd)) {
- xhci->main_hcd = hcd;
- xhci->usb2_rhub.hcd = hcd;
- /* Mark the first roothub as being USB 2.0.
- * The xHCI driver will register the USB 3.0 roothub.
- */
- hcd->speed = HCD_USB2;
- hcd->self.root_hub->speed = USB_SPEED_HIGH;
- /*
- * USB 2.0 roothub under xHCI has an integrated TT,
- * (rate matching hub) as opposed to having an OHCI/UHCI
- * companion controller.
- */
- hcd->has_tt = 1;
- } else {
- /*
- * Early xHCI 1.1 spec did not mention USB 3.1 capable hosts
- * should return 0x31 for sbrn, or that the minor revision
- * is a two digit BCD containig minor and sub-minor numbers.
- * This was later clarified in xHCI 1.2.
- *
- * Some USB 3.1 capable hosts therefore have sbrn 0x30, and
- * minor revision set to 0x1 instead of 0x10.
- */
- if (xhci->usb3_rhub.min_rev == 0x1)
- minor_rev = 1;
- else
- minor_rev = xhci->usb3_rhub.min_rev / 0x10;
-
- switch (minor_rev) {
- case 2:
- hcd->speed = HCD_USB32;
- hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
- hcd->self.root_hub->rx_lanes = 2;
- hcd->self.root_hub->tx_lanes = 2;
- hcd->self.root_hub->ssp_rate = USB_SSP_GEN_2x2;
- break;
- case 1:
- hcd->speed = HCD_USB31;
- hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
- hcd->self.root_hub->ssp_rate = USB_SSP_GEN_2x1;
- break;
- }
- xhci_info(xhci, "Host supports USB 3.%x %sSuperSpeed\n",
- minor_rev,
- minor_rev ? "Enhanced " : "");
-
- xhci->usb3_rhub.hcd = hcd;
- /* xHCI private pointer was set in xhci_pci_probe for the second
- * registered roothub.
- */
+ if (!usb_hcd_is_primary_hcd(hcd)) {
+ xhci_hcd_init_usb3_data(xhci, hcd);
return 0;
}
mutex_init(&xhci->mutex);
+ xhci->main_hcd = hcd;
xhci->cap_regs = hcd->regs;
xhci->op_regs = hcd->regs +
HC_LENGTH(readl(&xhci->cap_regs->hc_capbase));
@@ -5358,6 +5366,11 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
return retval;
xhci_dbg(xhci, "Called HCD init\n");
+ if (xhci_hcd_is_usb3(hcd))
+ xhci_hcd_init_usb3_data(xhci, hcd);
+ else
+ xhci_hcd_init_usb2_data(xhci, hcd);
+
xhci_info(xhci, "hcc params 0x%08x hci version 0x%x quirks 0x%016llx\n",
xhci->hcc_params, xhci->hci_version, xhci->quirks);
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 473a33ce299e..0bd76c94a4b1 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -948,8 +948,6 @@ struct xhci_virt_ep {
#define EP_CLEARING_TT (1 << 8)
/* ---- Related to URB cancellation ---- */
struct list_head cancelled_td_list;
- /* Watchdog timer for stop endpoint command to cancel URBs */
- struct timer_list stop_cmd_timer;
struct xhci_hcd *xhci;
/* Dequeue pointer and dequeue segment for a submitted Set TR Dequeue
* command. We'll need to update the ring's dequeue segment and dequeue
@@ -1848,7 +1846,7 @@ struct xhci_hcd {
#define XHCI_STATE_REMOVING (1 << 2)
unsigned long long quirks;
#define XHCI_LINK_TRB_QUIRK BIT_ULL(0)
-#define XHCI_RESET_EP_QUIRK BIT_ULL(1)
+#define XHCI_RESET_EP_QUIRK BIT_ULL(1) /* Deprecated */
#define XHCI_NEC_HOST BIT_ULL(2)
#define XHCI_AMD_PLL_FIX BIT_ULL(3)
#define XHCI_SPURIOUS_SUCCESS BIT_ULL(4)
@@ -1911,6 +1909,8 @@ struct xhci_hcd {
unsigned hw_lpm_support:1;
/* Broken Suspend flag for SNPS Suspend resume issue */
unsigned broken_suspend:1;
+ /* Indicates that omitting hcd is supported if root hub has no ports */
+ unsigned allow_single_roothub:1;
/* cached usb2 extened protocol capabilites */
u32 *ext_caps;
unsigned int num_ext_caps;
@@ -1966,6 +1966,30 @@ static inline struct usb_hcd *xhci_to_hcd(struct xhci_hcd *xhci)
return xhci->main_hcd;
}
+static inline struct usb_hcd *xhci_get_usb3_hcd(struct xhci_hcd *xhci)
+{
+ if (xhci->shared_hcd)
+ return xhci->shared_hcd;
+
+ if (!xhci->usb2_rhub.num_ports)
+ return xhci->main_hcd;
+
+ return NULL;
+}
+
+static inline bool xhci_hcd_is_usb3(struct usb_hcd *hcd)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+ return hcd == xhci_get_usb3_hcd(xhci);
+}
+
+static inline bool xhci_has_one_roothub(struct xhci_hcd *xhci)
+{
+ return xhci->allow_single_roothub &&
+ (!xhci->usb2_rhub.num_ports || !xhci->usb3_rhub.num_ports);
+}
+
#define xhci_dbg(xhci, fmt, args...) \
dev_dbg(xhci_to_hcd(xhci)->self.controller , fmt , ## args)
#define xhci_err(xhci, fmt, args...) \
diff --git a/drivers/usb/isp1760/isp1760-core.c b/drivers/usb/isp1760/isp1760-core.c
index d1d9a7d5da17..af88f4fe00d2 100644
--- a/drivers/usb/isp1760/isp1760-core.c
+++ b/drivers/usb/isp1760/isp1760-core.c
@@ -251,6 +251,8 @@ static const struct reg_field isp1760_hc_reg_fields[] = {
[HW_DM_PULLDOWN] = REG_FIELD(ISP176x_HC_OTG_CTRL, 2, 2),
[HW_DP_PULLDOWN] = REG_FIELD(ISP176x_HC_OTG_CTRL, 1, 1),
[HW_DP_PULLUP] = REG_FIELD(ISP176x_HC_OTG_CTRL, 0, 0),
+ /* Make sure the array is sized properly during compilation */
+ [HC_FIELD_MAX] = {},
};
static const struct reg_field isp1763_hc_reg_fields[] = {
@@ -321,6 +323,8 @@ static const struct reg_field isp1763_hc_reg_fields[] = {
[HW_DM_PULLDOWN_CLEAR] = REG_FIELD(ISP1763_HC_OTG_CTRL_CLEAR, 2, 2),
[HW_DP_PULLDOWN_CLEAR] = REG_FIELD(ISP1763_HC_OTG_CTRL_CLEAR, 1, 1),
[HW_DP_PULLUP_CLEAR] = REG_FIELD(ISP1763_HC_OTG_CTRL_CLEAR, 0, 0),
+ /* Make sure the array is sized properly during compilation */
+ [HC_FIELD_MAX] = {},
};
static const struct regmap_range isp1763_hc_volatile_ranges[] = {
@@ -405,6 +409,8 @@ static const struct reg_field isp1761_dc_reg_fields[] = {
[DC_CHIP_ID_HIGH] = REG_FIELD(ISP176x_DC_CHIPID, 16, 31),
[DC_CHIP_ID_LOW] = REG_FIELD(ISP176x_DC_CHIPID, 0, 15),
[DC_SCRATCH] = REG_FIELD(ISP176x_DC_SCRATCH, 0, 15),
+ /* Make sure the array is sized properly during compilation */
+ [DC_FIELD_MAX] = {},
};
static const struct regmap_range isp1763_dc_volatile_ranges[] = {
@@ -458,6 +464,8 @@ static const struct reg_field isp1763_dc_reg_fields[] = {
[DC_CHIP_ID_HIGH] = REG_FIELD(ISP1763_DC_CHIPID_HIGH, 0, 15),
[DC_CHIP_ID_LOW] = REG_FIELD(ISP1763_DC_CHIPID_LOW, 0, 15),
[DC_SCRATCH] = REG_FIELD(ISP1763_DC_SCRATCH, 0, 15),
+ /* Make sure the array is sized properly during compilation */
+ [DC_FIELD_MAX] = {},
};
static const struct regmap_config isp1763_dc_regmap_conf = {
diff --git a/drivers/usb/isp1760/isp1760-hcd.c b/drivers/usb/isp1760/isp1760-hcd.c
index 893becb077d3..76862ba40f35 100644
--- a/drivers/usb/isp1760/isp1760-hcd.c
+++ b/drivers/usb/isp1760/isp1760-hcd.c
@@ -825,8 +825,7 @@ static void create_ptd_atl(struct isp1760_qh *qh,
memset(ptd, 0, sizeof(*ptd));
/* according to 3.6.2, max packet len can not be > 0x400 */
- maxpacket = usb_maxpacket(qtd->urb->dev, qtd->urb->pipe,
- usb_pipeout(qtd->urb->pipe));
+ maxpacket = usb_maxpacket(qtd->urb->dev, qtd->urb->pipe);
multi = 1 + ((maxpacket >> 11) & 0x3);
maxpacket &= 0x7ff;
@@ -1808,8 +1807,7 @@ static void packetize_urb(struct usb_hcd *hcd,
packet_type = IN_PID;
}
- maxpacketsize = usb_maxpacket(urb->dev, urb->pipe,
- usb_pipeout(urb->pipe));
+ maxpacketsize = usb_maxpacket(urb->dev, urb->pipe);
/*
* buffer gets wrapped in one or more qtds;
diff --git a/drivers/usb/misc/ftdi-elan.c b/drivers/usb/misc/ftdi-elan.c
index 6c38c62d29b2..b2f980409d0b 100644
--- a/drivers/usb/misc/ftdi-elan.c
+++ b/drivers/usb/misc/ftdi-elan.c
@@ -1449,8 +1449,7 @@ wait:if (ftdi->disconnected > 0) {
command->length = 0x8007;
command->address = (toggle_bits << 6) | (ep_number << 2)
| (address << 0);
- command->width = usb_maxpacket(urb->dev, urb->pipe,
- usb_pipeout(urb->pipe));
+ command->width = usb_maxpacket(urb->dev, urb->pipe);
command->follows = 8;
command->value = 0;
command->buffer = urb->setup_packet;
@@ -1514,8 +1513,7 @@ wait:if (ftdi->disconnected > 0) {
1);
command->address = (toggle_bits << 6) | (ep_number << 2)
| (address << 0);
- command->width = usb_maxpacket(urb->dev, urb->pipe,
- usb_pipeout(urb->pipe));
+ command->width = usb_maxpacket(urb->dev, urb->pipe);
command->follows = 0;
command->value = 0;
command->buffer = NULL;
@@ -1571,8 +1569,7 @@ wait:if (ftdi->disconnected > 0) {
command->length = 0x0000;
command->address = (toggle_bits << 6) | (ep_number << 2)
| (address << 0);
- command->width = usb_maxpacket(urb->dev, urb->pipe,
- usb_pipeout(urb->pipe));
+ command->width = usb_maxpacket(urb->dev, urb->pipe);
command->follows = 0;
command->value = 0;
command->buffer = NULL;
@@ -1634,8 +1631,7 @@ wait:if (ftdi->disconnected > 0) {
command->header = 0x81 | (ed << 5);
command->address = (toggle_bits << 6) | (ep_number << 2)
| (address << 0);
- command->width = usb_maxpacket(urb->dev, urb->pipe,
- usb_pipeout(urb->pipe));
+ command->width = usb_maxpacket(urb->dev, urb->pipe);
command->follows = min_t(u32, 1024,
urb->transfer_buffer_length -
urb->actual_length);
@@ -1715,8 +1711,7 @@ wait:if (ftdi->disconnected > 0) {
1);
command->address = (toggle_bits << 6) | (ep_number << 2)
| (address << 0);
- command->width = usb_maxpacket(urb->dev, urb->pipe,
- usb_pipeout(urb->pipe));
+ command->width = usb_maxpacket(urb->dev, urb->pipe);
command->follows = 0;
command->value = 0;
command->buffer = NULL;
diff --git a/drivers/usb/misc/lvstest.c b/drivers/usb/misc/lvstest.c
index f8686139d6f3..25ec5666a75e 100644
--- a/drivers/usb/misc/lvstest.c
+++ b/drivers/usb/misc/lvstest.c
@@ -437,7 +437,7 @@ static int lvs_rh_probe(struct usb_interface *intf,
INIT_WORK(&lvs->rh_work, lvs_rh_work);
pipe = usb_rcvintpipe(hdev, endpoint->bEndpointAddress);
- maxp = usb_maxpacket(hdev, pipe, usb_pipeout(pipe));
+ maxp = usb_maxpacket(hdev, pipe);
usb_fill_int_urb(lvs->urb, hdev, pipe, &lvs->buffer[0], maxp,
lvs_rh_irq, lvs, endpoint->bInterval);
diff --git a/drivers/usb/musb/mediatek.c b/drivers/usb/musb/mediatek.c
index 1aeb34dbe24f..cad991380b0c 100644
--- a/drivers/usb/musb/mediatek.c
+++ b/drivers/usb/musb/mediatek.c
@@ -36,6 +36,8 @@
#define DMA_INTR_STATUS_MSK GENMASK(7, 0)
#define DMA_INTR_UNMASK_SET_MSK GENMASK(31, 24)
+#define MTK_MUSB_CLKS_NUM 3
+
struct mtk_glue {
struct device *dev;
struct musb *musb;
@@ -44,9 +46,7 @@ struct mtk_glue {
struct phy *phy;
struct usb_phy *xceiv;
enum phy_mode phy_mode;
- struct clk *main;
- struct clk *mcu;
- struct clk *univpll;
+ struct clk_bulk_data clks[MTK_MUSB_CLKS_NUM];
enum usb_role role;
struct usb_role_switch *role_sw;
};
@@ -55,64 +55,11 @@ static int mtk_musb_clks_get(struct mtk_glue *glue)
{
struct device *dev = glue->dev;
- glue->main = devm_clk_get(dev, "main");
- if (IS_ERR(glue->main)) {
- dev_err(dev, "fail to get main clock\n");
- return PTR_ERR(glue->main);
- }
-
- glue->mcu = devm_clk_get(dev, "mcu");
- if (IS_ERR(glue->mcu)) {
- dev_err(dev, "fail to get mcu clock\n");
- return PTR_ERR(glue->mcu);
- }
-
- glue->univpll = devm_clk_get(dev, "univpll");
- if (IS_ERR(glue->univpll)) {
- dev_err(dev, "fail to get univpll clock\n");
- return PTR_ERR(glue->univpll);
- }
-
- return 0;
-}
+ glue->clks[0].id = "main";
+ glue->clks[1].id = "mcu";
+ glue->clks[2].id = "univpll";
-static int mtk_musb_clks_enable(struct mtk_glue *glue)
-{
- int ret;
-
- ret = clk_prepare_enable(glue->main);
- if (ret) {
- dev_err(glue->dev, "failed to enable main clock\n");
- goto err_main_clk;
- }
-
- ret = clk_prepare_enable(glue->mcu);
- if (ret) {
- dev_err(glue->dev, "failed to enable mcu clock\n");
- goto err_mcu_clk;
- }
-
- ret = clk_prepare_enable(glue->univpll);
- if (ret) {
- dev_err(glue->dev, "failed to enable univpll clock\n");
- goto err_univpll_clk;
- }
-
- return 0;
-
-err_univpll_clk:
- clk_disable_unprepare(glue->mcu);
-err_mcu_clk:
- clk_disable_unprepare(glue->main);
-err_main_clk:
- return ret;
-}
-
-static void mtk_musb_clks_disable(struct mtk_glue *glue)
-{
- clk_disable_unprepare(glue->univpll);
- clk_disable_unprepare(glue->mcu);
- clk_disable_unprepare(glue->main);
+ return devm_clk_bulk_get(dev, MTK_MUSB_CLKS_NUM, glue->clks);
}
static int mtk_otg_switch_set(struct mtk_glue *glue, enum usb_role role)
@@ -390,7 +337,7 @@ static int mtk_musb_exit(struct musb *musb)
mtk_otg_switch_exit(glue);
phy_power_off(glue->phy);
phy_exit(glue->phy);
- mtk_musb_clks_disable(glue);
+ clk_bulk_disable_unprepare(MTK_MUSB_CLKS_NUM, glue->clks);
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
@@ -528,7 +475,7 @@ static int mtk_musb_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
- ret = mtk_musb_clks_enable(glue);
+ ret = clk_bulk_prepare_enable(MTK_MUSB_CLKS_NUM, glue->clks);
if (ret)
goto err_enable_clk;
@@ -551,7 +498,7 @@ static int mtk_musb_probe(struct platform_device *pdev)
return 0;
err_device_register:
- mtk_musb_clks_disable(glue);
+ clk_bulk_disable_unprepare(MTK_MUSB_CLKS_NUM, glue->clks);
err_enable_clk:
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index d2b7e613eb34..f571a65ae6ee 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -362,6 +362,7 @@ static int omap2430_probe(struct platform_device *pdev)
control_node = of_parse_phandle(np, "ctrl-module", 0);
if (control_node) {
control_pdev = of_find_device_by_node(control_node);
+ of_node_put(control_node);
if (!control_pdev) {
dev_err(&pdev->dev, "Failed to get control device\n");
ret = -EINVAL;
diff --git a/drivers/usb/phy/phy-omap-otg.c b/drivers/usb/phy/phy-omap-otg.c
index ee0863c6553e..6e6ef8c0bc7e 100644
--- a/drivers/usb/phy/phy-omap-otg.c
+++ b/drivers/usb/phy/phy-omap-otg.c
@@ -95,8 +95,8 @@ static int omap_otg_probe(struct platform_device *pdev)
return -ENODEV;
extcon = extcon_get_extcon_dev(config->extcon);
- if (!extcon)
- return -EPROBE_DEFER;
+ if (IS_ERR(extcon))
+ return PTR_ERR(extcon);
otg_dev = devm_kzalloc(&pdev->dev, sizeof(*otg_dev), GFP_KERNEL);
if (!otg_dev)
diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c
index c0e4df87ff22..39eaa7b97c40 100644
--- a/drivers/usb/serial/ark3116.c
+++ b/drivers/usb/serial/ark3116.c
@@ -208,10 +208,9 @@ static void ark3116_set_termios(struct tty_struct *tty,
lcr |= UART_LCR_PARITY;
if (!(cflag & PARODD))
lcr |= UART_LCR_EPAR;
-#ifdef CMSPAR
if (cflag & CMSPAR)
lcr |= UART_LCR_SPAR;
-#endif
+
/* handshake control */
hcr = (cflag & CRTSCTS) ? 0x03 : 0x00;
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 49c08f07c969..b440d338a895 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1671,7 +1671,7 @@ static ssize_t latency_timer_show(struct device *dev,
if (priv->flags & ASYNC_LOW_LATENCY)
return sprintf(buf, "1\n");
else
- return sprintf(buf, "%i\n", priv->latency);
+ return sprintf(buf, "%u\n", priv->latency);
}
/* Write a new value of the latency timer, in units of milliseconds. */
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 152ad882657d..e60425bbf537 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1137,6 +1137,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, 0x0620, 0xff, 0xff, 0x30) }, /* EM160R-GL */
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, 0x0620, 0xff, 0, 0) },
+ { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, 0x0700, 0xff), /* BG95 */
+ .driver_info = RSVD(3) | ZLP },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x10),
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 1d878d05a658..3506c47e1eef 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -421,6 +421,9 @@ static int pl2303_detect_type(struct usb_serial *serial)
bcdUSB = le16_to_cpu(desc->bcdUSB);
switch (bcdUSB) {
+ case 0x101:
+ /* USB 1.0.1? Let's assume they meant 1.1... */
+ fallthrough;
case 0x110:
switch (bcdDevice) {
case 0x300:
diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
index 06aad0d727dd..332fb92ae575 100644
--- a/drivers/usb/serial/whiteheat.c
+++ b/drivers/usb/serial/whiteheat.c
@@ -30,10 +30,6 @@
#include <linux/usb/ezusb.h>
#include "whiteheat.h" /* WhiteHEAT specific commands */
-#ifndef CMSPAR
-#define CMSPAR 0
-#endif
-
/*
* Version Information
*/
diff --git a/drivers/usb/storage/alauda.c b/drivers/usb/storage/alauda.c
index 20b857e97e60..747be69e5e69 100644
--- a/drivers/usb/storage/alauda.c
+++ b/drivers/usb/storage/alauda.c
@@ -1104,7 +1104,7 @@ static int init_alauda(struct us_data *us)
us->extra = kzalloc(sizeof(struct alauda_info), GFP_NOIO);
if (!us->extra)
- return USB_STOR_TRANSPORT_ERROR;
+ return -ENOMEM;
info = (struct alauda_info *) us->extra;
us->extra_destructor = alauda_info_destructor;
@@ -1113,7 +1113,7 @@ static int init_alauda(struct us_data *us)
altsetting->endpoint[0].desc.bEndpointAddress
& USB_ENDPOINT_NUMBER_MASK);
- return USB_STOR_TRANSPORT_GOOD;
+ return 0;
}
static int alauda_transport(struct scsi_cmnd *srb, struct us_data *us)
diff --git a/drivers/usb/storage/isd200.c b/drivers/usb/storage/isd200.c
index 05429f1f69f9..4e0eef1440b7 100644
--- a/drivers/usb/storage/isd200.c
+++ b/drivers/usb/storage/isd200.c
@@ -1449,7 +1449,7 @@ static void isd200_free_info_ptrs(void *info_)
* Allocates (if necessary) and initializes the driver structure.
*
* RETURNS:
- * ISD status code
+ * error status code
*/
static int isd200_init_info(struct us_data *us)
{
@@ -1457,7 +1457,7 @@ static int isd200_init_info(struct us_data *us)
info = kzalloc(sizeof(struct isd200_info), GFP_KERNEL);
if (!info)
- return ISD200_ERROR;
+ return -ENOMEM;
info->id = kzalloc(ATA_ID_WORDS * 2, GFP_KERNEL);
info->RegsBuf = kmalloc(sizeof(info->ATARegs), GFP_KERNEL);
@@ -1466,13 +1466,13 @@ static int isd200_init_info(struct us_data *us)
if (!info->id || !info->RegsBuf || !info->srb.sense_buffer) {
isd200_free_info_ptrs(info);
kfree(info);
- return ISD200_ERROR;
+ return -ENOMEM;
}
us->extra = info;
us->extra_destructor = isd200_free_info_ptrs;
- return ISD200_GOOD;
+ return 0;
}
/**************************************************************************
diff --git a/drivers/usb/storage/karma.c b/drivers/usb/storage/karma.c
index 05cec81dcd3f..38ddfedef629 100644
--- a/drivers/usb/storage/karma.c
+++ b/drivers/usb/storage/karma.c
@@ -174,24 +174,25 @@ static void rio_karma_destructor(void *extra)
static int rio_karma_init(struct us_data *us)
{
- int ret = 0;
struct karma_data *data = kzalloc(sizeof(struct karma_data), GFP_NOIO);
if (!data)
- goto out;
+ return -ENOMEM;
data->recv = kmalloc(RIO_RECV_LEN, GFP_NOIO);
if (!data->recv) {
kfree(data);
- goto out;
+ return -ENOMEM;
}
us->extra = data;
us->extra_destructor = rio_karma_destructor;
- ret = rio_karma_send_command(RIO_ENTER_STORAGE, us);
- data->in_storage = (ret == 0);
-out:
- return ret;
+ if (rio_karma_send_command(RIO_ENTER_STORAGE, us))
+ return -EIO;
+
+ data->in_storage = 1;
+
+ return 0;
}
static struct scsi_host_template karma_host_template;
diff --git a/drivers/usb/storage/onetouch.c b/drivers/usb/storage/onetouch.c
index a989fe930e21..1db2eefeea22 100644
--- a/drivers/usb/storage/onetouch.c
+++ b/drivers/usb/storage/onetouch.c
@@ -180,7 +180,7 @@ static int onetouch_connect_input(struct us_data *ss)
return -ENODEV;
pipe = usb_rcvintpipe(udev, endpoint->bEndpointAddress);
- maxp = usb_maxpacket(udev, pipe, usb_pipeout(pipe));
+ maxp = usb_maxpacket(udev, pipe);
maxp = min(maxp, ONETOUCH_PKT_LEN);
onetouch = kzalloc(sizeof(struct usb_onetouch), GFP_KERNEL);
diff --git a/drivers/usb/storage/shuttle_usbat.c b/drivers/usb/storage/shuttle_usbat.c
index 54aa1392c9ca..f0d0ca37163d 100644
--- a/drivers/usb/storage/shuttle_usbat.c
+++ b/drivers/usb/storage/shuttle_usbat.c
@@ -1456,7 +1456,7 @@ static int init_usbat(struct us_data *us, int devicetype)
us->extra = kzalloc(sizeof(struct usbat_info), GFP_NOIO);
if (!us->extra)
- return 1;
+ return -ENOMEM;
info = (struct usbat_info *) (us->extra);
@@ -1465,7 +1465,7 @@ static int init_usbat(struct us_data *us, int devicetype)
USBAT_UIO_OE1 | USBAT_UIO_OE0,
USBAT_UIO_EPAD | USBAT_UIO_1);
if (rc != USB_STOR_XFER_GOOD)
- return USB_STOR_TRANSPORT_ERROR;
+ return -EIO;
usb_stor_dbg(us, "INIT 1\n");
@@ -1473,42 +1473,42 @@ static int init_usbat(struct us_data *us, int devicetype)
rc = usbat_read_user_io(us, status);
if (rc != USB_STOR_TRANSPORT_GOOD)
- return rc;
+ return -EIO;
usb_stor_dbg(us, "INIT 2\n");
rc = usbat_read_user_io(us, status);
if (rc != USB_STOR_XFER_GOOD)
- return USB_STOR_TRANSPORT_ERROR;
+ return -EIO;
rc = usbat_read_user_io(us, status);
if (rc != USB_STOR_XFER_GOOD)
- return USB_STOR_TRANSPORT_ERROR;
+ return -EIO;
usb_stor_dbg(us, "INIT 3\n");
rc = usbat_select_and_test_registers(us);
if (rc != USB_STOR_TRANSPORT_GOOD)
- return rc;
+ return -EIO;
usb_stor_dbg(us, "INIT 4\n");
rc = usbat_read_user_io(us, status);
if (rc != USB_STOR_XFER_GOOD)
- return USB_STOR_TRANSPORT_ERROR;
+ return -EIO;
usb_stor_dbg(us, "INIT 5\n");
/* Enable peripheral control signals and card detect */
rc = usbat_device_enable_cdt(us);
if (rc != USB_STOR_TRANSPORT_GOOD)
- return rc;
+ return -EIO;
usb_stor_dbg(us, "INIT 6\n");
rc = usbat_read_user_io(us, status);
if (rc != USB_STOR_XFER_GOOD)
- return USB_STOR_TRANSPORT_ERROR;
+ return -EIO;
usb_stor_dbg(us, "INIT 7\n");
@@ -1516,19 +1516,19 @@ static int init_usbat(struct us_data *us, int devicetype)
rc = usbat_read_user_io(us, status);
if (rc != USB_STOR_XFER_GOOD)
- return USB_STOR_TRANSPORT_ERROR;
+ return -EIO;
usb_stor_dbg(us, "INIT 8\n");
rc = usbat_select_and_test_registers(us);
if (rc != USB_STOR_TRANSPORT_GOOD)
- return rc;
+ return -EIO;
usb_stor_dbg(us, "INIT 9\n");
/* At this point, we need to detect which device we are using */
if (usbat_set_transport(us, info, devicetype))
- return USB_STOR_TRANSPORT_ERROR;
+ return -EIO;
usb_stor_dbg(us, "INIT 10\n");
@@ -1539,11 +1539,11 @@ static int init_usbat(struct us_data *us, int devicetype)
rc = usbat_set_shuttle_features(us, (USBAT_FEAT_ETEN | USBAT_FEAT_ET2 | USBAT_FEAT_ET1),
0x00, 0x88, 0x08, subcountH, subcountL);
if (rc != USB_STOR_XFER_GOOD)
- return USB_STOR_TRANSPORT_ERROR;
+ return -EIO;
usb_stor_dbg(us, "INIT 11\n");
- return USB_STOR_TRANSPORT_GOOD;
+ return 0;
}
/*
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
index 1928b3918242..64d96d210e02 100644
--- a/drivers/usb/storage/transport.c
+++ b/drivers/usb/storage/transport.c
@@ -363,7 +363,7 @@ static int usb_stor_intr_transfer(struct us_data *us, void *buf,
usb_stor_dbg(us, "xfer %u bytes\n", length);
/* calculate the max packet size */
- maxp = usb_maxpacket(us->pusb_dev, pipe, usb_pipeout(pipe));
+ maxp = usb_maxpacket(us->pusb_dev, pipe);
if (maxp > length)
maxp = length;
diff --git a/drivers/usb/typec/bus.c b/drivers/usb/typec/bus.c
index 78e0e78954f2..26ea2fdec17d 100644
--- a/drivers/usb/typec/bus.c
+++ b/drivers/usb/typec/bus.c
@@ -24,7 +24,7 @@ typec_altmode_set_mux(struct altmode *alt, unsigned long conf, void *data)
state.mode = conf;
state.data = data;
- return alt->mux->set(alt->mux, &state);
+ return typec_mux_set(alt->mux, &state);
}
static int typec_altmode_set_state(struct typec_altmode *adev,
diff --git a/drivers/usb/typec/mux.c b/drivers/usb/typec/mux.c
index c8340de0ed49..fd55c2c516a5 100644
--- a/drivers/usb/typec/mux.c
+++ b/drivers/usb/typec/mux.c
@@ -17,9 +17,16 @@
#include "class.h"
#include "mux.h"
+#define TYPEC_MUX_MAX_DEVS 3
+
+struct typec_switch {
+ struct typec_switch_dev *sw_devs[TYPEC_MUX_MAX_DEVS];
+ unsigned int num_sw_devs;
+};
+
static int switch_fwnode_match(struct device *dev, const void *fwnode)
{
- if (!is_typec_switch(dev))
+ if (!is_typec_switch_dev(dev))
return 0;
return dev_fwnode(dev) == fwnode;
@@ -49,7 +56,7 @@ static void *typec_switch_match(struct fwnode_handle *fwnode, const char *id,
dev = class_find_device(&typec_mux_class, NULL, fwnode,
switch_fwnode_match);
- return dev ? to_typec_switch(dev) : ERR_PTR(-EPROBE_DEFER);
+ return dev ? to_typec_switch_dev(dev) : ERR_PTR(-EPROBE_DEFER);
}
/**
@@ -63,14 +70,50 @@ static void *typec_switch_match(struct fwnode_handle *fwnode, const char *id,
*/
struct typec_switch *fwnode_typec_switch_get(struct fwnode_handle *fwnode)
{
+ struct typec_switch_dev *sw_devs[TYPEC_MUX_MAX_DEVS];
struct typec_switch *sw;
+ int count;
+ int err;
+ int i;
+
+ sw = kzalloc(sizeof(*sw), GFP_KERNEL);
+ if (!sw)
+ return ERR_PTR(-ENOMEM);
+
+ count = fwnode_connection_find_matches(fwnode, "orientation-switch", NULL,
+ typec_switch_match,
+ (void **)sw_devs,
+ ARRAY_SIZE(sw_devs));
+ if (count <= 0) {
+ kfree(sw);
+ return NULL;
+ }
- sw = fwnode_connection_find_match(fwnode, "orientation-switch", NULL,
- typec_switch_match);
- if (!IS_ERR_OR_NULL(sw))
- WARN_ON(!try_module_get(sw->dev.parent->driver->owner));
+ for (i = 0; i < count; i++) {
+ if (IS_ERR(sw_devs[i])) {
+ err = PTR_ERR(sw_devs[i]);
+ goto put_sw_devs;
+ }
+ }
+
+ for (i = 0; i < count; i++) {
+ WARN_ON(!try_module_get(sw_devs[i]->dev.parent->driver->owner));
+ sw->sw_devs[i] = sw_devs[i];
+ }
+
+ sw->num_sw_devs = count;
return sw;
+
+put_sw_devs:
+ for (i = 0; i < count; i++) {
+ if (!IS_ERR(sw_devs[i]))
+ put_device(&sw_devs[i]->dev);
+ }
+
+ kfree(sw);
+
+ return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(fwnode_typec_switch_get);
@@ -82,16 +125,25 @@ EXPORT_SYMBOL_GPL(fwnode_typec_switch_get);
*/
void typec_switch_put(struct typec_switch *sw)
{
- if (!IS_ERR_OR_NULL(sw)) {
- module_put(sw->dev.parent->driver->owner);
- put_device(&sw->dev);
+ struct typec_switch_dev *sw_dev;
+ unsigned int i;
+
+ if (IS_ERR_OR_NULL(sw))
+ return;
+
+ for (i = 0; i < sw->num_sw_devs; i++) {
+ sw_dev = sw->sw_devs[i];
+
+ module_put(sw_dev->dev.parent->driver->owner);
+ put_device(&sw_dev->dev);
}
+ kfree(sw);
}
EXPORT_SYMBOL_GPL(typec_switch_put);
static void typec_switch_release(struct device *dev)
{
- kfree(to_typec_switch(dev));
+ kfree(to_typec_switch_dev(dev));
}
const struct device_type typec_switch_dev_type = {
@@ -109,82 +161,102 @@ const struct device_type typec_switch_dev_type = {
* connector to the USB controllers. USB Type-C plugs can be inserted
* right-side-up or upside-down.
*/
-struct typec_switch *
+struct typec_switch_dev *
typec_switch_register(struct device *parent,
const struct typec_switch_desc *desc)
{
- struct typec_switch *sw;
+ struct typec_switch_dev *sw_dev;
int ret;
if (!desc || !desc->set)
return ERR_PTR(-EINVAL);
- sw = kzalloc(sizeof(*sw), GFP_KERNEL);
- if (!sw)
+ sw_dev = kzalloc(sizeof(*sw_dev), GFP_KERNEL);
+ if (!sw_dev)
return ERR_PTR(-ENOMEM);
- sw->set = desc->set;
+ sw_dev->set = desc->set;
- device_initialize(&sw->dev);
- sw->dev.parent = parent;
- sw->dev.fwnode = desc->fwnode;
- sw->dev.class = &typec_mux_class;
- sw->dev.type = &typec_switch_dev_type;
- sw->dev.driver_data = desc->drvdata;
- dev_set_name(&sw->dev, "%s-switch",
- desc->name ? desc->name : dev_name(parent));
+ device_initialize(&sw_dev->dev);
+ sw_dev->dev.parent = parent;
+ sw_dev->dev.fwnode = desc->fwnode;
+ sw_dev->dev.class = &typec_mux_class;
+ sw_dev->dev.type = &typec_switch_dev_type;
+ sw_dev->dev.driver_data = desc->drvdata;
+ ret = dev_set_name(&sw_dev->dev, "%s-switch", desc->name ? desc->name : dev_name(parent));
+ if (ret) {
+ put_device(&sw_dev->dev);
+ return ERR_PTR(ret);
+ }
- ret = device_add(&sw->dev);
+ ret = device_add(&sw_dev->dev);
if (ret) {
dev_err(parent, "failed to register switch (%d)\n", ret);
- put_device(&sw->dev);
+ put_device(&sw_dev->dev);
return ERR_PTR(ret);
}
- return sw;
+ return sw_dev;
}
EXPORT_SYMBOL_GPL(typec_switch_register);
int typec_switch_set(struct typec_switch *sw,
enum typec_orientation orientation)
{
+ struct typec_switch_dev *sw_dev;
+ unsigned int i;
+ int ret;
+
if (IS_ERR_OR_NULL(sw))
return 0;
- return sw->set(sw, orientation);
+ for (i = 0; i < sw->num_sw_devs; i++) {
+ sw_dev = sw->sw_devs[i];
+
+ ret = sw_dev->set(sw_dev, orientation);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
EXPORT_SYMBOL_GPL(typec_switch_set);
/**
* typec_switch_unregister - Unregister USB Type-C orientation switch
- * @sw: USB Type-C orientation switch
+ * @sw_dev: USB Type-C orientation switch
*
* Unregister switch that was registered with typec_switch_register().
*/
-void typec_switch_unregister(struct typec_switch *sw)
+void typec_switch_unregister(struct typec_switch_dev *sw_dev)
{
- if (!IS_ERR_OR_NULL(sw))
- device_unregister(&sw->dev);
+ if (!IS_ERR_OR_NULL(sw_dev))
+ device_unregister(&sw_dev->dev);
}
EXPORT_SYMBOL_GPL(typec_switch_unregister);
-void typec_switch_set_drvdata(struct typec_switch *sw, void *data)
+void typec_switch_set_drvdata(struct typec_switch_dev *sw_dev, void *data)
{
- dev_set_drvdata(&sw->dev, data);
+ dev_set_drvdata(&sw_dev->dev, data);
}
EXPORT_SYMBOL_GPL(typec_switch_set_drvdata);
-void *typec_switch_get_drvdata(struct typec_switch *sw)
+void *typec_switch_get_drvdata(struct typec_switch_dev *sw_dev)
{
- return dev_get_drvdata(&sw->dev);
+ return dev_get_drvdata(&sw_dev->dev);
}
EXPORT_SYMBOL_GPL(typec_switch_get_drvdata);
/* ------------------------------------------------------------------------- */
+struct typec_mux {
+ struct typec_mux_dev *mux_devs[TYPEC_MUX_MAX_DEVS];
+ unsigned int num_mux_devs;
+};
+
static int mux_fwnode_match(struct device *dev, const void *fwnode)
{
- if (!is_typec_mux(dev))
+ if (!is_typec_mux_dev(dev))
return 0;
return dev_fwnode(dev) == fwnode;
@@ -246,7 +318,7 @@ find_mux:
dev = class_find_device(&typec_mux_class, NULL, fwnode,
mux_fwnode_match);
- return dev ? to_typec_mux(dev) : ERR_PTR(-EPROBE_DEFER);
+ return dev ? to_typec_mux_dev(dev) : ERR_PTR(-EPROBE_DEFER);
}
/**
@@ -262,14 +334,50 @@ find_mux:
struct typec_mux *fwnode_typec_mux_get(struct fwnode_handle *fwnode,
const struct typec_altmode_desc *desc)
{
+ struct typec_mux_dev *mux_devs[TYPEC_MUX_MAX_DEVS];
struct typec_mux *mux;
+ int count;
+ int err;
+ int i;
+
+ mux = kzalloc(sizeof(*mux), GFP_KERNEL);
+ if (!mux)
+ return ERR_PTR(-ENOMEM);
+
+ count = fwnode_connection_find_matches(fwnode, "mode-switch",
+ (void *)desc, typec_mux_match,
+ (void **)mux_devs,
+ ARRAY_SIZE(mux_devs));
+ if (count <= 0) {
+ kfree(mux);
+ return NULL;
+ }
- mux = fwnode_connection_find_match(fwnode, "mode-switch", (void *)desc,
- typec_mux_match);
- if (!IS_ERR_OR_NULL(mux))
- WARN_ON(!try_module_get(mux->dev.parent->driver->owner));
+ for (i = 0; i < count; i++) {
+ if (IS_ERR(mux_devs[i])) {
+ err = PTR_ERR(mux_devs[i]);
+ goto put_mux_devs;
+ }
+ }
+
+ for (i = 0; i < count; i++) {
+ WARN_ON(!try_module_get(mux_devs[i]->dev.parent->driver->owner));
+ mux->mux_devs[i] = mux_devs[i];
+ }
+
+ mux->num_mux_devs = count;
return mux;
+
+put_mux_devs:
+ for (i = 0; i < count; i++) {
+ if (!IS_ERR(mux_devs[i]))
+ put_device(&mux_devs[i]->dev);
+ }
+
+ kfree(mux);
+
+ return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(fwnode_typec_mux_get);
@@ -281,25 +389,45 @@ EXPORT_SYMBOL_GPL(fwnode_typec_mux_get);
*/
void typec_mux_put(struct typec_mux *mux)
{
- if (!IS_ERR_OR_NULL(mux)) {
- module_put(mux->dev.parent->driver->owner);
- put_device(&mux->dev);
+ struct typec_mux_dev *mux_dev;
+ unsigned int i;
+
+ if (IS_ERR_OR_NULL(mux))
+ return;
+
+ for (i = 0; i < mux->num_mux_devs; i++) {
+ mux_dev = mux->mux_devs[i];
+ module_put(mux_dev->dev.parent->driver->owner);
+ put_device(&mux_dev->dev);
}
+ kfree(mux);
}
EXPORT_SYMBOL_GPL(typec_mux_put);
int typec_mux_set(struct typec_mux *mux, struct typec_mux_state *state)
{
+ struct typec_mux_dev *mux_dev;
+ unsigned int i;
+ int ret;
+
if (IS_ERR_OR_NULL(mux))
return 0;
- return mux->set(mux, state);
+ for (i = 0; i < mux->num_mux_devs; i++) {
+ mux_dev = mux->mux_devs[i];
+
+ ret = mux_dev->set(mux_dev, state);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
EXPORT_SYMBOL_GPL(typec_mux_set);
static void typec_mux_release(struct device *dev)
{
- kfree(to_typec_mux(dev));
+ kfree(to_typec_mux_dev(dev));
}
const struct device_type typec_mux_dev_type = {
@@ -317,63 +445,66 @@ const struct device_type typec_mux_dev_type = {
* the pins on the connector need to be reconfigured. This function registers
* multiplexer switches routing the pins on the connector.
*/
-struct typec_mux *
+struct typec_mux_dev *
typec_mux_register(struct device *parent, const struct typec_mux_desc *desc)
{
- struct typec_mux *mux;
+ struct typec_mux_dev *mux_dev;
int ret;
if (!desc || !desc->set)
return ERR_PTR(-EINVAL);
- mux = kzalloc(sizeof(*mux), GFP_KERNEL);
- if (!mux)
+ mux_dev = kzalloc(sizeof(*mux_dev), GFP_KERNEL);
+ if (!mux_dev)
return ERR_PTR(-ENOMEM);
- mux->set = desc->set;
+ mux_dev->set = desc->set;
- device_initialize(&mux->dev);
- mux->dev.parent = parent;
- mux->dev.fwnode = desc->fwnode;
- mux->dev.class = &typec_mux_class;
- mux->dev.type = &typec_mux_dev_type;
- mux->dev.driver_data = desc->drvdata;
- dev_set_name(&mux->dev, "%s-mux",
- desc->name ? desc->name : dev_name(parent));
+ device_initialize(&mux_dev->dev);
+ mux_dev->dev.parent = parent;
+ mux_dev->dev.fwnode = desc->fwnode;
+ mux_dev->dev.class = &typec_mux_class;
+ mux_dev->dev.type = &typec_mux_dev_type;
+ mux_dev->dev.driver_data = desc->drvdata;
+ ret = dev_set_name(&mux_dev->dev, "%s-mux", desc->name ? desc->name : dev_name(parent));
+ if (ret) {
+ put_device(&mux_dev->dev);
+ return ERR_PTR(ret);
+ }
- ret = device_add(&mux->dev);
+ ret = device_add(&mux_dev->dev);
if (ret) {
dev_err(parent, "failed to register mux (%d)\n", ret);
- put_device(&mux->dev);
+ put_device(&mux_dev->dev);
return ERR_PTR(ret);
}
- return mux;
+ return mux_dev;
}
EXPORT_SYMBOL_GPL(typec_mux_register);
/**
* typec_mux_unregister - Unregister Multiplexer Switch
- * @mux: USB Type-C Connector Multiplexer/DeMultiplexer
+ * @mux_dev: USB Type-C Connector Multiplexer/DeMultiplexer
*
* Unregister mux that was registered with typec_mux_register().
*/
-void typec_mux_unregister(struct typec_mux *mux)
+void typec_mux_unregister(struct typec_mux_dev *mux_dev)
{
- if (!IS_ERR_OR_NULL(mux))
- device_unregister(&mux->dev);
+ if (!IS_ERR_OR_NULL(mux_dev))
+ device_unregister(&mux_dev->dev);
}
EXPORT_SYMBOL_GPL(typec_mux_unregister);
-void typec_mux_set_drvdata(struct typec_mux *mux, void *data)
+void typec_mux_set_drvdata(struct typec_mux_dev *mux_dev, void *data)
{
- dev_set_drvdata(&mux->dev, data);
+ dev_set_drvdata(&mux_dev->dev, data);
}
EXPORT_SYMBOL_GPL(typec_mux_set_drvdata);
-void *typec_mux_get_drvdata(struct typec_mux *mux)
+void *typec_mux_get_drvdata(struct typec_mux_dev *mux_dev)
{
- return dev_get_drvdata(&mux->dev);
+ return dev_get_drvdata(&mux_dev->dev);
}
EXPORT_SYMBOL_GPL(typec_mux_get_drvdata);
diff --git a/drivers/usb/typec/mux.h b/drivers/usb/typec/mux.h
index b1d6e837cb74..58f0f28b6dc8 100644
--- a/drivers/usb/typec/mux.h
+++ b/drivers/usb/typec/mux.h
@@ -5,23 +5,23 @@
#include <linux/usb/typec_mux.h>
-struct typec_switch {
+struct typec_switch_dev {
struct device dev;
typec_switch_set_fn_t set;
};
-struct typec_mux {
+struct typec_mux_dev {
struct device dev;
typec_mux_set_fn_t set;
};
-#define to_typec_switch(_dev_) container_of(_dev_, struct typec_switch, dev)
-#define to_typec_mux(_dev_) container_of(_dev_, struct typec_mux, dev)
+#define to_typec_switch_dev(_dev_) container_of(_dev_, struct typec_switch_dev, dev)
+#define to_typec_mux_dev(_dev_) container_of(_dev_, struct typec_mux_dev, dev)
extern const struct device_type typec_switch_dev_type;
extern const struct device_type typec_mux_dev_type;
-#define is_typec_switch(dev) ((dev)->type == &typec_switch_dev_type)
-#define is_typec_mux(dev) ((dev)->type == &typec_mux_dev_type)
+#define is_typec_switch_dev(dev) ((dev)->type == &typec_switch_dev_type)
+#define is_typec_mux_dev(dev) ((dev)->type == &typec_mux_dev_type)
#endif /* __USB_TYPEC_MUX__ */
diff --git a/drivers/usb/typec/mux/Kconfig b/drivers/usb/typec/mux/Kconfig
index edead555835e..5eb2c17d72c1 100644
--- a/drivers/usb/typec/mux/Kconfig
+++ b/drivers/usb/typec/mux/Kconfig
@@ -2,6 +2,16 @@
menu "USB Type-C Multiplexer/DeMultiplexer Switch support"
+config TYPEC_MUX_FSA4480
+ tristate "ON Semi FSA4480 Analog Audio Switch driver"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ Driver for the ON Semiconductor FSA4480 Analog Audio Switch, which
+ provides support for muxing analog audio and sideband signals on a
+ common USB Type-C connector.
+ If compiled as a module, the module will be named fsa4480.
+
config TYPEC_MUX_PI3USB30532
tristate "Pericom PI3USB30532 Type-C cross switch driver"
depends on I2C
diff --git a/drivers/usb/typec/mux/Makefile b/drivers/usb/typec/mux/Makefile
index 280a6f553115..e52a56c16bfb 100644
--- a/drivers/usb/typec/mux/Makefile
+++ b/drivers/usb/typec/mux/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_TYPEC_MUX_FSA4480) += fsa4480.o
obj-$(CONFIG_TYPEC_MUX_PI3USB30532) += pi3usb30532.o
obj-$(CONFIG_TYPEC_MUX_INTEL_PMC) += intel_pmc_mux.o
diff --git a/drivers/usb/typec/mux/fsa4480.c b/drivers/usb/typec/mux/fsa4480.c
new file mode 100644
index 000000000000..6184f5367190
--- /dev/null
+++ b/drivers/usb/typec/mux/fsa4480.c
@@ -0,0 +1,218 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021-2022 Linaro Ltd.
+ * Copyright (C) 2018-2020 The Linux Foundation
+ */
+
+#include <linux/bits.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+#include <linux/usb/typec_dp.h>
+#include <linux/usb/typec_mux.h>
+
+#define FSA4480_SWITCH_ENABLE 0x04
+#define FSA4480_SWITCH_SELECT 0x05
+#define FSA4480_SWITCH_STATUS1 0x07
+#define FSA4480_SLOW_L 0x08
+#define FSA4480_SLOW_R 0x09
+#define FSA4480_SLOW_MIC 0x0a
+#define FSA4480_SLOW_SENSE 0x0b
+#define FSA4480_SLOW_GND 0x0c
+#define FSA4480_DELAY_L_R 0x0d
+#define FSA4480_DELAY_L_MIC 0x0e
+#define FSA4480_DELAY_L_SENSE 0x0f
+#define FSA4480_DELAY_L_AGND 0x10
+#define FSA4480_RESET 0x1e
+#define FSA4480_MAX_REGISTER 0x1f
+
+#define FSA4480_ENABLE_DEVICE BIT(7)
+#define FSA4480_ENABLE_SBU GENMASK(6, 5)
+#define FSA4480_ENABLE_USB GENMASK(4, 3)
+
+#define FSA4480_SEL_SBU_REVERSE GENMASK(6, 5)
+#define FSA4480_SEL_USB GENMASK(4, 3)
+
+struct fsa4480 {
+ struct i2c_client *client;
+
+ /* used to serialize concurrent change requests */
+ struct mutex lock;
+
+ struct typec_switch_dev *sw;
+ struct typec_mux_dev *mux;
+
+ struct regmap *regmap;
+
+ u8 cur_enable;
+ u8 cur_select;
+};
+
+static const struct regmap_config fsa4480_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = FSA4480_MAX_REGISTER,
+ /* Accesses only done under fsa4480->lock */
+ .disable_locking = true,
+};
+
+static int fsa4480_switch_set(struct typec_switch_dev *sw,
+ enum typec_orientation orientation)
+{
+ struct fsa4480 *fsa = typec_switch_get_drvdata(sw);
+ u8 new_sel;
+
+ mutex_lock(&fsa->lock);
+ new_sel = FSA4480_SEL_USB;
+ if (orientation == TYPEC_ORIENTATION_REVERSE)
+ new_sel |= FSA4480_SEL_SBU_REVERSE;
+
+ if (new_sel == fsa->cur_select)
+ goto out_unlock;
+
+ if (fsa->cur_enable & FSA4480_ENABLE_SBU) {
+ /* Disable SBU output while re-configuring the switch */
+ regmap_write(fsa->regmap, FSA4480_SWITCH_ENABLE,
+ fsa->cur_enable & ~FSA4480_ENABLE_SBU);
+
+ /* 35us to allow the SBU switch to turn off */
+ usleep_range(35, 1000);
+ }
+
+ regmap_write(fsa->regmap, FSA4480_SWITCH_SELECT, new_sel);
+ fsa->cur_select = new_sel;
+
+ if (fsa->cur_enable & FSA4480_ENABLE_SBU) {
+ regmap_write(fsa->regmap, FSA4480_SWITCH_ENABLE, fsa->cur_enable);
+
+ /* 15us to allow the SBU switch to turn on again */
+ usleep_range(15, 1000);
+ }
+
+out_unlock:
+ mutex_unlock(&fsa->lock);
+
+ return 0;
+}
+
+static int fsa4480_mux_set(struct typec_mux_dev *mux, struct typec_mux_state *state)
+{
+ struct fsa4480 *fsa = typec_mux_get_drvdata(mux);
+ u8 new_enable;
+
+ mutex_lock(&fsa->lock);
+
+ new_enable = FSA4480_ENABLE_DEVICE | FSA4480_ENABLE_USB;
+ if (state->mode >= TYPEC_DP_STATE_A)
+ new_enable |= FSA4480_ENABLE_SBU;
+
+ if (new_enable == fsa->cur_enable)
+ goto out_unlock;
+
+ regmap_write(fsa->regmap, FSA4480_SWITCH_ENABLE, new_enable);
+ fsa->cur_enable = new_enable;
+
+ if (new_enable & FSA4480_ENABLE_SBU) {
+ /* 15us to allow the SBU switch to turn off */
+ usleep_range(15, 1000);
+ }
+
+out_unlock:
+ mutex_unlock(&fsa->lock);
+
+ return 0;
+}
+
+static int fsa4480_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct typec_switch_desc sw_desc = { };
+ struct typec_mux_desc mux_desc = { };
+ struct fsa4480 *fsa;
+
+ fsa = devm_kzalloc(dev, sizeof(*fsa), GFP_KERNEL);
+ if (!fsa)
+ return -ENOMEM;
+
+ fsa->client = client;
+ mutex_init(&fsa->lock);
+
+ fsa->regmap = devm_regmap_init_i2c(client, &fsa4480_regmap_config);
+ if (IS_ERR(fsa->regmap))
+ return dev_err_probe(dev, PTR_ERR(fsa->regmap), "failed to initialize regmap\n");
+
+ fsa->cur_enable = FSA4480_ENABLE_DEVICE | FSA4480_ENABLE_USB;
+ fsa->cur_select = FSA4480_SEL_USB;
+
+ /* set default settings */
+ regmap_write(fsa->regmap, FSA4480_SLOW_L, 0x00);
+ regmap_write(fsa->regmap, FSA4480_SLOW_R, 0x00);
+ regmap_write(fsa->regmap, FSA4480_SLOW_MIC, 0x00);
+ regmap_write(fsa->regmap, FSA4480_SLOW_SENSE, 0x00);
+ regmap_write(fsa->regmap, FSA4480_SLOW_GND, 0x00);
+ regmap_write(fsa->regmap, FSA4480_DELAY_L_R, 0x00);
+ regmap_write(fsa->regmap, FSA4480_DELAY_L_MIC, 0x00);
+ regmap_write(fsa->regmap, FSA4480_DELAY_L_SENSE, 0x00);
+ regmap_write(fsa->regmap, FSA4480_DELAY_L_AGND, 0x09);
+ regmap_write(fsa->regmap, FSA4480_SWITCH_SELECT, fsa->cur_select);
+ regmap_write(fsa->regmap, FSA4480_SWITCH_ENABLE, fsa->cur_enable);
+
+ sw_desc.drvdata = fsa;
+ sw_desc.fwnode = dev_fwnode(dev);
+ sw_desc.set = fsa4480_switch_set;
+
+ fsa->sw = typec_switch_register(dev, &sw_desc);
+ if (IS_ERR(fsa->sw))
+ return dev_err_probe(dev, PTR_ERR(fsa->sw), "failed to register typec switch\n");
+
+ mux_desc.drvdata = fsa;
+ mux_desc.fwnode = dev_fwnode(dev);
+ mux_desc.set = fsa4480_mux_set;
+
+ fsa->mux = typec_mux_register(dev, &mux_desc);
+ if (IS_ERR(fsa->mux)) {
+ typec_switch_unregister(fsa->sw);
+ return dev_err_probe(dev, PTR_ERR(fsa->mux), "failed to register typec mux\n");
+ }
+
+ i2c_set_clientdata(client, fsa);
+ return 0;
+}
+
+static int fsa4480_remove(struct i2c_client *client)
+{
+ struct fsa4480 *fsa = i2c_get_clientdata(client);
+
+ typec_mux_unregister(fsa->mux);
+ typec_switch_unregister(fsa->sw);
+
+ return 0;
+}
+
+static const struct i2c_device_id fsa4480_table[] = {
+ { "fsa4480" },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, fsa4480_table);
+
+static const struct of_device_id fsa4480_of_table[] = {
+ { .compatible = "fcs,fsa4480" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, fsa4480_of_table);
+
+static struct i2c_driver fsa4480_driver = {
+ .driver = {
+ .name = "fsa4480",
+ .of_match_table = fsa4480_of_table,
+ },
+ .probe_new = fsa4480_probe,
+ .remove = fsa4480_remove,
+ .id_table = fsa4480_table,
+};
+module_i2c_driver(fsa4480_driver);
+
+MODULE_DESCRIPTION("ON Semiconductor FSA4480 driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/typec/mux/intel_pmc_mux.c b/drivers/usb/typec/mux/intel_pmc_mux.c
index 2cdd22130834..47b733f78fb0 100644
--- a/drivers/usb/typec/mux/intel_pmc_mux.c
+++ b/drivers/usb/typec/mux/intel_pmc_mux.c
@@ -121,8 +121,8 @@ struct pmc_usb_port {
int num;
u32 iom_status;
struct pmc_usb *pmc;
- struct typec_mux *typec_mux;
- struct typec_switch *typec_sw;
+ struct typec_mux_dev *typec_mux;
+ struct typec_switch_dev *typec_sw;
struct usb_role_switch *usb_sw;
enum typec_orientation orientation;
@@ -173,7 +173,7 @@ static int hsl_orientation(struct pmc_usb_port *port)
return port->orientation - 1;
}
-static int pmc_usb_command(struct pmc_usb_port *port, u8 *msg, u32 len)
+static int pmc_usb_send_command(struct intel_scu_ipc_dev *ipc, u8 *msg, u32 len)
{
u8 response[4];
u8 status_res;
@@ -184,7 +184,7 @@ static int pmc_usb_command(struct pmc_usb_port *port, u8 *msg, u32 len)
* Status can be checked from the response message if the
* function intel_scu_ipc_dev_command succeeds.
*/
- ret = intel_scu_ipc_dev_command(port->pmc->ipc, PMC_USBC_CMD, 0, msg,
+ ret = intel_scu_ipc_dev_command(ipc, PMC_USBC_CMD, 0, msg,
len, response, sizeof(response));
if (ret)
@@ -203,6 +203,23 @@ static int pmc_usb_command(struct pmc_usb_port *port, u8 *msg, u32 len)
return 0;
}
+static int pmc_usb_command(struct pmc_usb_port *port, u8 *msg, u32 len)
+{
+ int retry_count = 3;
+ int ret;
+
+ /*
+ * If PMC is busy then retry the command once again
+ */
+ while (retry_count--) {
+ ret = pmc_usb_send_command(port->pmc->ipc, msg, len);
+ if (ret != -EBUSY)
+ break;
+ }
+
+ return ret;
+}
+
static int
pmc_usb_mux_dp_hpd(struct pmc_usb_port *port, struct typec_displayport_data *dp)
{
@@ -416,7 +433,7 @@ static int pmc_usb_connect(struct pmc_usb_port *port, enum usb_role role)
}
static int
-pmc_usb_mux_set(struct typec_mux *mux, struct typec_mux_state *state)
+pmc_usb_mux_set(struct typec_mux_dev *mux, struct typec_mux_state *state)
{
struct pmc_usb_port *port = typec_mux_get_drvdata(mux);
@@ -452,7 +469,7 @@ pmc_usb_mux_set(struct typec_mux *mux, struct typec_mux_state *state)
return -EOPNOTSUPP;
}
-static int pmc_usb_set_orientation(struct typec_switch *sw,
+static int pmc_usb_set_orientation(struct typec_switch_dev *sw,
enum typec_orientation orientation)
{
struct pmc_usb_port *port = typec_switch_get_drvdata(sw);
diff --git a/drivers/usb/typec/mux/pi3usb30532.c b/drivers/usb/typec/mux/pi3usb30532.c
index 7afe275b17d0..6ce9f282594e 100644
--- a/drivers/usb/typec/mux/pi3usb30532.c
+++ b/drivers/usb/typec/mux/pi3usb30532.c
@@ -23,8 +23,8 @@
struct pi3usb30532 {
struct i2c_client *client;
struct mutex lock; /* protects the cached conf register */
- struct typec_switch *sw;
- struct typec_mux *mux;
+ struct typec_switch_dev *sw;
+ struct typec_mux_dev *mux;
u8 conf;
};
@@ -45,7 +45,7 @@ static int pi3usb30532_set_conf(struct pi3usb30532 *pi, u8 new_conf)
return 0;
}
-static int pi3usb30532_sw_set(struct typec_switch *sw,
+static int pi3usb30532_sw_set(struct typec_switch_dev *sw,
enum typec_orientation orientation)
{
struct pi3usb30532 *pi = typec_switch_get_drvdata(sw);
@@ -74,7 +74,7 @@ static int pi3usb30532_sw_set(struct typec_switch *sw,
}
static int
-pi3usb30532_mux_set(struct typec_mux *mux, struct typec_mux_state *state)
+pi3usb30532_mux_set(struct typec_mux_dev *mux, struct typec_mux_state *state)
{
struct pi3usb30532 *pi = typec_mux_get_drvdata(mux);
u8 new_conf;
diff --git a/drivers/usb/typec/tcpm/fusb302.c b/drivers/usb/typec/tcpm/fusb302.c
index 72f9001b0792..96c55eaf3f80 100644
--- a/drivers/usb/typec/tcpm/fusb302.c
+++ b/drivers/usb/typec/tcpm/fusb302.c
@@ -1708,8 +1708,8 @@ static int fusb302_probe(struct i2c_client *client,
*/
if (device_property_read_string(dev, "linux,extcon-name", &name) == 0) {
chip->extcon = extcon_get_extcon_dev(name);
- if (!chip->extcon)
- return -EPROBE_DEFER;
+ if (IS_ERR(chip->extcon))
+ return PTR_ERR(chip->extcon);
}
chip->vbus = devm_regulator_get(chip->dev, "vbus");
diff --git a/drivers/usb/typec/tipd/core.c b/drivers/usb/typec/tipd/core.c
index 16b4560216ba..dfbba5ae9487 100644
--- a/drivers/usb/typec/tipd/core.c
+++ b/drivers/usb/typec/tipd/core.c
@@ -93,6 +93,8 @@ struct tps6598x {
struct power_supply *psy;
struct power_supply_desc psy_desc;
enum power_supply_usb_type usb_type;
+
+ u16 pwr_status;
};
static enum power_supply_property tps6598x_psy_props[] = {
@@ -230,17 +232,12 @@ static int tps6598x_connect(struct tps6598x *tps, u32 status)
{
struct typec_partner_desc desc;
enum typec_pwr_opmode mode;
- u16 pwr_status;
int ret;
if (tps->partner)
return 0;
- ret = tps6598x_read16(tps, TPS_REG_POWER_STATUS, &pwr_status);
- if (ret < 0)
- return ret;
-
- mode = TPS_POWER_STATUS_PWROPMODE(pwr_status);
+ mode = TPS_POWER_STATUS_PWROPMODE(tps->pwr_status);
desc.usb_pd = mode == TYPEC_PWR_MODE_PD;
desc.accessory = TYPEC_ACCESSORY_NONE; /* XXX: handle accessories */
@@ -455,6 +452,7 @@ static bool tps6598x_read_power_status(struct tps6598x *tps)
dev_err(tps->dev, "failed to read power status: %d\n", ret);
return false;
}
+ tps->pwr_status = pwr_status;
trace_tps6598x_power_status(pwr_status);
return true;
@@ -601,15 +599,8 @@ static const struct regmap_config tps6598x_regmap_config = {
static int tps6598x_psy_get_online(struct tps6598x *tps,
union power_supply_propval *val)
{
- int ret;
- u16 pwr_status;
-
- ret = tps6598x_read16(tps, TPS_REG_POWER_STATUS, &pwr_status);
- if (ret < 0)
- return ret;
-
- if (TPS_POWER_STATUS_CONNECTION(pwr_status) &&
- TPS_POWER_STATUS_SOURCESINK(pwr_status)) {
+ if (TPS_POWER_STATUS_CONNECTION(tps->pwr_status) &&
+ TPS_POWER_STATUS_SOURCESINK(tps->pwr_status)) {
val->intval = 1;
} else {
val->intval = 0;
@@ -622,15 +613,11 @@ static int tps6598x_psy_get_prop(struct power_supply *psy,
union power_supply_propval *val)
{
struct tps6598x *tps = power_supply_get_drvdata(psy);
- u16 pwr_status;
int ret = 0;
switch (psp) {
case POWER_SUPPLY_PROP_USB_TYPE:
- ret = tps6598x_read16(tps, TPS_REG_POWER_STATUS, &pwr_status);
- if (ret < 0)
- return ret;
- if (TPS_POWER_STATUS_PWROPMODE(pwr_status) == TYPEC_PWR_MODE_PD)
+ if (TPS_POWER_STATUS_PWROPMODE(tps->pwr_status) == TYPEC_PWR_MODE_PD)
val->intval = POWER_SUPPLY_USB_TYPE_PD;
else
val->intval = POWER_SUPPLY_USB_TYPE_C;
@@ -837,6 +824,11 @@ static int tps6598x_probe(struct i2c_client *client)
fwnode_handle_put(fwnode);
if (status & TPS_STATUS_PLUG_PRESENT) {
+ ret = tps6598x_read16(tps, TPS_REG_POWER_STATUS, &tps->pwr_status);
+ if (ret < 0) {
+ dev_err(tps->dev, "failed to read power status: %d\n", ret);
+ goto err_role_put;
+ }
ret = tps6598x_connect(tps, status);
if (ret)
dev_err(&client->dev, "failed to register partner\n");
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index a6045aef0d04..cbd862f9f2a1 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -1063,6 +1063,14 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
con->num = index + 1;
con->ucsi = ucsi;
+ cap->fwnode = ucsi_find_fwnode(con);
+ con->usb_role_sw = fwnode_usb_role_switch_get(cap->fwnode);
+ if (IS_ERR(con->usb_role_sw)) {
+ dev_err(ucsi->dev, "con%d: failed to get usb role switch\n",
+ con->num);
+ return PTR_ERR(con->usb_role_sw);
+ }
+
/* Delay other interactions with the con until registration is complete */
mutex_lock(&con->lock);
@@ -1098,7 +1106,6 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
if (con->cap.op_mode & UCSI_CONCAP_OPMODE_DEBUG_ACCESSORY)
*accessory = TYPEC_ACCESSORY_DEBUG;
- cap->fwnode = ucsi_find_fwnode(con);
cap->driver_data = con;
cap->ops = &ucsi_ops;
@@ -1156,13 +1163,6 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
ucsi_port_psy_changed(con);
}
- con->usb_role_sw = fwnode_usb_role_switch_get(cap->fwnode);
- if (IS_ERR(con->usb_role_sw)) {
- dev_err(ucsi->dev, "con%d: failed to get usb role switch\n",
- con->num);
- con->usb_role_sw = NULL;
- }
-
/* Only notify USB controller if partner supports USB data */
if (!(UCSI_CONSTAT_PARTNER_FLAGS(con->status.flags) & UCSI_CONSTAT_PARTNER_FLAG_USB))
u_role = USB_ROLE_NONE;
@@ -1196,6 +1196,32 @@ out_unlock:
return ret;
}
+static void ucsi_unregister_connectors(struct ucsi *ucsi)
+{
+ struct ucsi_connector *con;
+ int i;
+
+ if (!ucsi->connector)
+ return;
+
+ for (i = 0; i < ucsi->cap.num_connectors; i++) {
+ con = &ucsi->connector[i];
+
+ if (!con->wq)
+ break;
+
+ cancel_work_sync(&con->work);
+ ucsi_unregister_partner(con);
+ ucsi_unregister_altmodes(con, UCSI_RECIPIENT_CON);
+ ucsi_unregister_port_psy(con);
+ destroy_workqueue(con->wq);
+ typec_unregister_port(con->port);
+ }
+
+ kfree(ucsi->connector);
+ ucsi->connector = NULL;
+}
+
/**
* ucsi_init - Initialize UCSI interface
* @ucsi: UCSI to be initialized
@@ -1204,7 +1230,6 @@ out_unlock:
*/
static int ucsi_init(struct ucsi *ucsi)
{
- struct ucsi_connector *con;
u64 command;
int ret;
int i;
@@ -1235,7 +1260,7 @@ static int ucsi_init(struct ucsi *ucsi)
}
/* Allocate the connectors. Released in ucsi_unregister() */
- ucsi->connector = kcalloc(ucsi->cap.num_connectors + 1,
+ ucsi->connector = kcalloc(ucsi->cap.num_connectors,
sizeof(*ucsi->connector), GFP_KERNEL);
if (!ucsi->connector) {
ret = -ENOMEM;
@@ -1259,15 +1284,7 @@ static int ucsi_init(struct ucsi *ucsi)
return 0;
err_unregister:
- for (con = ucsi->connector; con->port; con++) {
- ucsi_unregister_partner(con);
- ucsi_unregister_altmodes(con, UCSI_RECIPIENT_CON);
- ucsi_unregister_port_psy(con);
- if (con->wq)
- destroy_workqueue(con->wq);
- typec_unregister_port(con->port);
- con->port = NULL;
- }
+ ucsi_unregister_connectors(ucsi);
err_reset:
memset(&ucsi->cap, 0, sizeof(ucsi->cap));
@@ -1278,12 +1295,20 @@ err:
static void ucsi_init_work(struct work_struct *work)
{
- struct ucsi *ucsi = container_of(work, struct ucsi, work);
+ struct ucsi *ucsi = container_of(work, struct ucsi, work.work);
int ret;
ret = ucsi_init(ucsi);
if (ret)
dev_err(ucsi->dev, "PPM init failed (%d)\n", ret);
+
+ if (ret == -EPROBE_DEFER) {
+ if (ucsi->work_count++ > UCSI_ROLE_SWITCH_WAIT_COUNT)
+ return;
+
+ queue_delayed_work(system_long_wq, &ucsi->work,
+ UCSI_ROLE_SWITCH_INTERVAL);
+ }
}
/**
@@ -1323,7 +1348,7 @@ struct ucsi *ucsi_create(struct device *dev, const struct ucsi_operations *ops)
if (!ucsi)
return ERR_PTR(-ENOMEM);
- INIT_WORK(&ucsi->work, ucsi_init_work);
+ INIT_DELAYED_WORK(&ucsi->work, ucsi_init_work);
mutex_init(&ucsi->ppm_lock);
ucsi->dev = dev;
ucsi->ops = ops;
@@ -1358,7 +1383,7 @@ int ucsi_register(struct ucsi *ucsi)
if (!ucsi->version)
return -ENODEV;
- queue_work(system_long_wq, &ucsi->work);
+ queue_delayed_work(system_long_wq, &ucsi->work, 0);
return 0;
}
@@ -1373,26 +1398,14 @@ EXPORT_SYMBOL_GPL(ucsi_register);
void ucsi_unregister(struct ucsi *ucsi)
{
u64 cmd = UCSI_SET_NOTIFICATION_ENABLE;
- int i;
/* Make sure that we are not in the middle of driver initialization */
- cancel_work_sync(&ucsi->work);
+ cancel_delayed_work_sync(&ucsi->work);
/* Disable notifications */
ucsi->ops->async_write(ucsi, UCSI_CONTROL, &cmd, sizeof(cmd));
- for (i = 0; i < ucsi->cap.num_connectors; i++) {
- cancel_work_sync(&ucsi->connector[i].work);
- ucsi_unregister_partner(&ucsi->connector[i]);
- ucsi_unregister_altmodes(&ucsi->connector[i],
- UCSI_RECIPIENT_CON);
- ucsi_unregister_port_psy(&ucsi->connector[i]);
- if (ucsi->connector[i].wq)
- destroy_workqueue(ucsi->connector[i].wq);
- typec_unregister_port(ucsi->connector[i].port);
- }
-
- kfree(ucsi->connector);
+ ucsi_unregister_connectors(ucsi);
}
EXPORT_SYMBOL_GPL(ucsi_unregister);
diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
index 280f1e1bda2c..8eb391e3e592 100644
--- a/drivers/usb/typec/ucsi/ucsi.h
+++ b/drivers/usb/typec/ucsi/ucsi.h
@@ -287,7 +287,11 @@ struct ucsi {
struct ucsi_capability cap;
struct ucsi_connector *connector;
- struct work_struct work;
+ struct delayed_work work;
+ int work_count;
+#define UCSI_ROLE_SWITCH_RETRY_PER_HZ 10
+#define UCSI_ROLE_SWITCH_INTERVAL (HZ / UCSI_ROLE_SWITCH_RETRY_PER_HZ)
+#define UCSI_ROLE_SWITCH_WAIT_COUNT (10 * UCSI_ROLE_SWITCH_RETRY_PER_HZ)
/* PPM Communication lock */
struct mutex ppm_lock;
diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c
index 6771f05e32c2..8873c1644a29 100644
--- a/drivers/usb/typec/ucsi/ucsi_acpi.c
+++ b/drivers/usb/typec/ucsi/ucsi_acpi.c
@@ -19,7 +19,7 @@
struct ucsi_acpi {
struct device *dev;
struct ucsi *ucsi;
- void __iomem *base;
+ void *base;
struct completion complete;
unsigned long flags;
guid_t guid;
@@ -51,7 +51,7 @@ static int ucsi_acpi_read(struct ucsi *ucsi, unsigned int offset,
if (ret)
return ret;
- memcpy(val, (const void __force *)(ua->base + offset), val_len);
+ memcpy(val, ua->base + offset, val_len);
return 0;
}
@@ -61,7 +61,7 @@ static int ucsi_acpi_async_write(struct ucsi *ucsi, unsigned int offset,
{
struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
- memcpy((void __force *)(ua->base + offset), val, val_len);
+ memcpy(ua->base + offset, val, val_len);
return ucsi_acpi_dsm(ua, UCSI_DSM_FUNC_WRITE);
}
@@ -132,20 +132,9 @@ static int ucsi_acpi_probe(struct platform_device *pdev)
return -ENODEV;
}
- /* This will make sure we can use ioremap() */
- status = acpi_release_memory(ACPI_HANDLE(&pdev->dev), res, 1);
- if (ACPI_FAILURE(status))
- return -ENOMEM;
-
- /*
- * NOTE: The memory region for the data structures is used also in an
- * operation region, which means ACPI has already reserved it. Therefore
- * it can not be requested here, and we can not use
- * devm_ioremap_resource().
- */
- ua->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
- if (!ua->base)
- return -ENOMEM;
+ ua->base = devm_memremap(&pdev->dev, res->start, resource_size(res), MEMREMAP_WB);
+ if (IS_ERR(ua->base))
+ return PTR_ERR(ua->base);
ret = guid_parse(UCSI_DSM_UUID, &ua->guid);
if (ret)
diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c
index d8d3892e5a69..3c6d452e3bf4 100644
--- a/drivers/usb/usbip/stub_dev.c
+++ b/drivers/usb/usbip/stub_dev.c
@@ -393,7 +393,6 @@ static int stub_probe(struct usb_device *udev)
err_port:
dev_set_drvdata(&udev->dev, NULL);
- usb_put_dev(udev);
/* we already have busid_priv, just lock busid_lock */
spin_lock(&busid_priv->busid_lock);
@@ -408,6 +407,7 @@ call_put_busid_priv:
put_busid_priv(busid_priv);
sdev_free:
+ usb_put_dev(udev);
stub_device_free(sdev);
return rc;
diff --git a/drivers/usb/usbip/stub_rx.c b/drivers/usb/usbip/stub_rx.c
index 325c22008e53..5dd41e8215e0 100644
--- a/drivers/usb/usbip/stub_rx.c
+++ b/drivers/usb/usbip/stub_rx.c
@@ -138,7 +138,9 @@ static int tweak_set_configuration_cmd(struct urb *urb)
req = (struct usb_ctrlrequest *) urb->setup_packet;
config = le16_to_cpu(req->wValue);
+ usb_lock_device(sdev->udev);
err = usb_set_configuration(sdev->udev, config);
+ usb_unlock_device(sdev->udev);
if (err && err != -ENODEV)
dev_err(&sdev->udev->dev, "can't set config #%d, error %d\n",
config, err);
diff --git a/drivers/vdpa/alibaba/eni_vdpa.c b/drivers/vdpa/alibaba/eni_vdpa.c
index f480d54f308c..5a09a09cca70 100644
--- a/drivers/vdpa/alibaba/eni_vdpa.c
+++ b/drivers/vdpa/alibaba/eni_vdpa.c
@@ -470,7 +470,7 @@ static int eni_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return ret;
eni_vdpa = vdpa_alloc_device(struct eni_vdpa, vdpa,
- dev, &eni_vdpa_ops, NULL, false);
+ dev, &eni_vdpa_ops, 1, 1, NULL, false);
if (IS_ERR(eni_vdpa)) {
ENI_ERR(pdev, "failed to allocate vDPA structure\n");
return PTR_ERR(eni_vdpa);
diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c
index 4366320fb68d..0a5670729412 100644
--- a/drivers/vdpa/ifcvf/ifcvf_main.c
+++ b/drivers/vdpa/ifcvf/ifcvf_main.c
@@ -290,16 +290,16 @@ static int ifcvf_request_config_irq(struct ifcvf_adapter *adapter)
struct ifcvf_hw *vf = &adapter->vf;
int config_vector, ret;
- if (vf->msix_vector_status == MSIX_VECTOR_DEV_SHARED)
- return 0;
-
if (vf->msix_vector_status == MSIX_VECTOR_PER_VQ_AND_CONFIG)
- /* vector 0 ~ vf->nr_vring for vqs, num vf->nr_vring vector for config interrupt */
config_vector = vf->nr_vring;
-
- if (vf->msix_vector_status == MSIX_VECTOR_SHARED_VQ_AND_CONFIG)
+ else if (vf->msix_vector_status == MSIX_VECTOR_SHARED_VQ_AND_CONFIG)
/* vector 0 for vqs and 1 for config interrupt */
config_vector = 1;
+ else if (vf->msix_vector_status == MSIX_VECTOR_DEV_SHARED)
+ /* re-use the vqs vector */
+ return 0;
+ else
+ return -EINVAL;
snprintf(vf->config_msix_name, 256, "ifcvf[%s]-config\n",
pci_name(pdev));
@@ -626,6 +626,11 @@ static size_t ifcvf_vdpa_get_config_size(struct vdpa_device *vdpa_dev)
return vf->config_size;
}
+static u32 ifcvf_vdpa_get_vq_group(struct vdpa_device *vdpa, u16 idx)
+{
+ return 0;
+}
+
static void ifcvf_vdpa_get_config(struct vdpa_device *vdpa_dev,
unsigned int offset,
void *buf, unsigned int len)
@@ -704,6 +709,7 @@ static const struct vdpa_config_ops ifc_vdpa_ops = {
.get_device_id = ifcvf_vdpa_get_device_id,
.get_vendor_id = ifcvf_vdpa_get_vendor_id,
.get_vq_align = ifcvf_vdpa_get_vq_align,
+ .get_vq_group = ifcvf_vdpa_get_vq_group,
.get_config_size = ifcvf_vdpa_get_config_size,
.get_config = ifcvf_vdpa_get_config,
.set_config = ifcvf_vdpa_set_config,
@@ -758,14 +764,13 @@ static int ifcvf_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
pdev = ifcvf_mgmt_dev->pdev;
dev = &pdev->dev;
adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa,
- dev, &ifc_vdpa_ops, name, false);
+ dev, &ifc_vdpa_ops, 1, 1, name, false);
if (IS_ERR(adapter)) {
IFCVF_ERR(pdev, "Failed to allocate vDPA structure");
return PTR_ERR(adapter);
}
ifcvf_mgmt_dev->adapter = adapter;
- pci_set_drvdata(pdev, ifcvf_mgmt_dev);
vf = &adapter->vf;
vf->dev_type = get_dev_type(pdev);
@@ -880,6 +885,8 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err;
}
+ pci_set_drvdata(pdev, ifcvf_mgmt_dev);
+
return 0;
err:
diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa.h b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
index daaf7b503677..44104093163b 100644
--- a/drivers/vdpa/mlx5/core/mlx5_vdpa.h
+++ b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
@@ -61,6 +61,8 @@ struct mlx5_control_vq {
struct vringh_kiov riov;
struct vringh_kiov wiov;
unsigned short head;
+ unsigned int received_desc;
+ unsigned int completed_desc;
};
struct mlx5_vdpa_wq_ent {
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index e0de44000d92..b7a955479156 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -48,6 +48,8 @@ MODULE_LICENSE("Dual BSD/GPL");
#define MLX5_FEATURE(_mvdev, _feature) (!!((_mvdev)->actual_features & BIT_ULL(_feature)))
+#define MLX5V_UNTAGGED 0x1000
+
struct mlx5_vdpa_net_resources {
u32 tisn;
u32 tdn;
@@ -119,6 +121,7 @@ struct mlx5_vdpa_virtqueue {
struct mlx5_vdpa_umem umem2;
struct mlx5_vdpa_umem umem3;
+ u32 counter_set_id;
bool initialized;
int index;
u32 virtq_id;
@@ -143,6 +146,8 @@ static bool is_index_valid(struct mlx5_vdpa_dev *mvdev, u16 idx)
return idx <= mvdev->max_idx;
}
+#define MLX5V_MACVLAN_SIZE 256
+
struct mlx5_vdpa_net {
struct mlx5_vdpa_dev mvdev;
struct mlx5_vdpa_net_resources res;
@@ -154,17 +159,22 @@ struct mlx5_vdpa_net {
* since memory map might change and we need to destroy and create
* resources while driver in operational.
*/
- struct mutex reslock;
+ struct rw_semaphore reslock;
struct mlx5_flow_table *rxft;
- struct mlx5_fc *rx_counter;
- struct mlx5_flow_handle *rx_rule_ucast;
- struct mlx5_flow_handle *rx_rule_mcast;
bool setup;
u32 cur_num_vqs;
u32 rqt_size;
struct notifier_block nb;
struct vdpa_callback config_cb;
struct mlx5_vdpa_wq_ent cvq_ent;
+ struct hlist_head macvlan_hash[MLX5V_MACVLAN_SIZE];
+};
+
+struct macvlan_node {
+ struct hlist_node hlist;
+ struct mlx5_flow_handle *ucast_rule;
+ struct mlx5_flow_handle *mcast_rule;
+ u64 macvlan;
};
static void free_resources(struct mlx5_vdpa_net *ndev);
@@ -818,6 +828,12 @@ static u16 get_features_12_3(u64 features)
(!!(features & BIT_ULL(VIRTIO_NET_F_GUEST_CSUM)) << 6);
}
+static bool counters_supported(const struct mlx5_vdpa_dev *mvdev)
+{
+ return MLX5_CAP_GEN_64(mvdev->mdev, general_obj_types) &
+ BIT_ULL(MLX5_OBJ_TYPE_VIRTIO_Q_COUNTERS);
+}
+
static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
{
int inlen = MLX5_ST_SZ_BYTES(create_virtio_net_q_in);
@@ -872,6 +888,8 @@ static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtque
MLX5_SET(virtio_q, vq_ctx, umem_3_id, mvq->umem3.id);
MLX5_SET(virtio_q, vq_ctx, umem_3_size, mvq->umem3.size);
MLX5_SET(virtio_q, vq_ctx, pd, ndev->mvdev.res.pdn);
+ if (counters_supported(&ndev->mvdev))
+ MLX5_SET(virtio_q, vq_ctx, counter_set_id, mvq->counter_set_id);
err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out));
if (err)
@@ -1135,6 +1153,47 @@ static int modify_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtque
return err;
}
+static int counter_set_alloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
+{
+ u32 in[MLX5_ST_SZ_DW(create_virtio_q_counters_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(create_virtio_q_counters_out)] = {};
+ void *cmd_hdr;
+ int err;
+
+ if (!counters_supported(&ndev->mvdev))
+ return 0;
+
+ cmd_hdr = MLX5_ADDR_OF(create_virtio_q_counters_in, in, hdr);
+
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_type, MLX5_OBJ_TYPE_VIRTIO_Q_COUNTERS);
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid);
+
+ err = mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out));
+ if (err)
+ return err;
+
+ mvq->counter_set_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+
+ return 0;
+}
+
+static void counter_set_dealloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_virtio_q_counters_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(destroy_virtio_q_counters_out)] = {};
+
+ if (!counters_supported(&ndev->mvdev))
+ return;
+
+ MLX5_SET(destroy_virtio_q_counters_in, in, hdr.opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
+ MLX5_SET(destroy_virtio_q_counters_in, in, hdr.obj_id, mvq->counter_set_id);
+ MLX5_SET(destroy_virtio_q_counters_in, in, hdr.uid, ndev->mvdev.res.uid);
+ MLX5_SET(destroy_virtio_q_counters_in, in, hdr.obj_type, MLX5_OBJ_TYPE_VIRTIO_Q_COUNTERS);
+ if (mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out)))
+ mlx5_vdpa_warn(&ndev->mvdev, "dealloc counter set 0x%x\n", mvq->counter_set_id);
+}
+
static int setup_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
{
u16 idx = mvq->index;
@@ -1162,6 +1221,10 @@ static int setup_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
if (err)
goto err_connect;
+ err = counter_set_alloc(ndev, mvq);
+ if (err)
+ goto err_counter;
+
err = create_virtqueue(ndev, mvq);
if (err)
goto err_connect;
@@ -1179,6 +1242,8 @@ static int setup_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
return 0;
err_connect:
+ counter_set_dealloc(ndev, mvq);
+err_counter:
qp_destroy(ndev, &mvq->vqqp);
err_vqqp:
qp_destroy(ndev, &mvq->fwqp);
@@ -1223,6 +1288,7 @@ static void teardown_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *
suspend_vq(ndev, mvq);
destroy_virtqueue(ndev, mvq);
+ counter_set_dealloc(ndev, mvq);
qp_destroy(ndev, &mvq->vqqp);
qp_destroy(ndev, &mvq->fwqp);
cq_destroy(ndev, mvq->index);
@@ -1347,12 +1413,17 @@ static void destroy_tir(struct mlx5_vdpa_net *ndev)
mlx5_vdpa_destroy_tir(&ndev->mvdev, ndev->res.tirn);
}
-static int add_fwd_to_tir(struct mlx5_vdpa_net *ndev)
+#define MAX_STEERING_ENT 0x8000
+#define MAX_STEERING_GROUPS 2
+
+static int mlx5_vdpa_add_mac_vlan_rules(struct mlx5_vdpa_net *ndev, u8 *mac,
+ u16 vid, bool tagged,
+ struct mlx5_flow_handle **ucast,
+ struct mlx5_flow_handle **mcast)
{
- struct mlx5_flow_destination dest[2] = {};
- struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_destination dest = {};
struct mlx5_flow_act flow_act = {};
- struct mlx5_flow_namespace *ns;
+ struct mlx5_flow_handle *rule;
struct mlx5_flow_spec *spec;
void *headers_c;
void *headers_v;
@@ -1365,85 +1436,178 @@ static int add_fwd_to_tir(struct mlx5_vdpa_net *ndev)
return -ENOMEM;
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
- ft_attr.max_fte = 2;
- ft_attr.autogroup.max_num_groups = 2;
+ headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
+ dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c, outer_headers.dmac_47_16);
+ dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v, outer_headers.dmac_47_16);
+ memset(dmac_c, 0xff, ETH_ALEN);
+ ether_addr_copy(dmac_v, mac);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
+ if (tagged) {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, first_vid);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, vid);
+ }
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+ dest.tir_num = ndev->res.tirn;
+ rule = mlx5_add_flow_rules(ndev->rxft, spec, &flow_act, &dest, 1);
+ if (IS_ERR(rule))
+ return PTR_ERR(rule);
- ns = mlx5_get_flow_namespace(ndev->mvdev.mdev, MLX5_FLOW_NAMESPACE_BYPASS);
- if (!ns) {
- mlx5_vdpa_warn(&ndev->mvdev, "failed to get flow namespace\n");
- err = -EOPNOTSUPP;
- goto err_ns;
+ *ucast = rule;
+
+ memset(dmac_c, 0, ETH_ALEN);
+ memset(dmac_v, 0, ETH_ALEN);
+ dmac_c[0] = 1;
+ dmac_v[0] = 1;
+ rule = mlx5_add_flow_rules(ndev->rxft, spec, &flow_act, &dest, 1);
+ kvfree(spec);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ goto err_mcast;
}
- ndev->rxft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
- if (IS_ERR(ndev->rxft)) {
- err = PTR_ERR(ndev->rxft);
- goto err_ns;
+ *mcast = rule;
+ return 0;
+
+err_mcast:
+ mlx5_del_flow_rules(*ucast);
+ return err;
+}
+
+static void mlx5_vdpa_del_mac_vlan_rules(struct mlx5_vdpa_net *ndev,
+ struct mlx5_flow_handle *ucast,
+ struct mlx5_flow_handle *mcast)
+{
+ mlx5_del_flow_rules(ucast);
+ mlx5_del_flow_rules(mcast);
+}
+
+static u64 search_val(u8 *mac, u16 vlan, bool tagged)
+{
+ u64 val;
+
+ if (!tagged)
+ vlan = MLX5V_UNTAGGED;
+
+ val = (u64)vlan << 48 |
+ (u64)mac[0] << 40 |
+ (u64)mac[1] << 32 |
+ (u64)mac[2] << 24 |
+ (u64)mac[3] << 16 |
+ (u64)mac[4] << 8 |
+ (u64)mac[5];
+
+ return val;
+}
+
+static struct macvlan_node *mac_vlan_lookup(struct mlx5_vdpa_net *ndev, u64 value)
+{
+ struct macvlan_node *pos;
+ u32 idx;
+
+ idx = hash_64(value, 8); // tbd 8
+ hlist_for_each_entry(pos, &ndev->macvlan_hash[idx], hlist) {
+ if (pos->macvlan == value)
+ return pos;
}
+ return NULL;
+}
+
+static int mac_vlan_add(struct mlx5_vdpa_net *ndev, u8 *mac, u16 vlan, bool tagged) // vlan -> vid
+{
+ struct macvlan_node *ptr;
+ u64 val;
+ u32 idx;
+ int err;
+
+ val = search_val(mac, vlan, tagged);
+ if (mac_vlan_lookup(ndev, val))
+ return -EEXIST;
+
+ ptr = kzalloc(sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ err = mlx5_vdpa_add_mac_vlan_rules(ndev, ndev->config.mac, vlan, tagged,
+ &ptr->ucast_rule, &ptr->mcast_rule);
+ if (err)
+ goto err_add;
+
+ ptr->macvlan = val;
+ idx = hash_64(val, 8);
+ hlist_add_head(&ptr->hlist, &ndev->macvlan_hash[idx]);
+ return 0;
+
+err_add:
+ kfree(ptr);
+ return err;
+}
+
+static void mac_vlan_del(struct mlx5_vdpa_net *ndev, u8 *mac, u16 vlan, bool tagged)
+{
+ struct macvlan_node *ptr;
+
+ ptr = mac_vlan_lookup(ndev, search_val(mac, vlan, tagged));
+ if (!ptr)
+ return;
+
+ hlist_del(&ptr->hlist);
+ mlx5_vdpa_del_mac_vlan_rules(ndev, ptr->ucast_rule, ptr->mcast_rule);
+ kfree(ptr);
+}
+
+static void clear_mac_vlan_table(struct mlx5_vdpa_net *ndev)
+{
+ struct macvlan_node *pos;
+ struct hlist_node *n;
+ int i;
- ndev->rx_counter = mlx5_fc_create(ndev->mvdev.mdev, false);
- if (IS_ERR(ndev->rx_counter)) {
- err = PTR_ERR(ndev->rx_counter);
- goto err_fc;
+ for (i = 0; i < MLX5V_MACVLAN_SIZE; i++) {
+ hlist_for_each_entry_safe(pos, n, &ndev->macvlan_hash[i], hlist) {
+ hlist_del(&pos->hlist);
+ mlx5_vdpa_del_mac_vlan_rules(ndev, pos->ucast_rule, pos->mcast_rule);
+ kfree(pos);
+ }
}
+}
- headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers);
- dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c, outer_headers.dmac_47_16);
- memset(dmac_c, 0xff, ETH_ALEN);
- headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
- dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v, outer_headers.dmac_47_16);
- ether_addr_copy(dmac_v, ndev->config.mac);
+static int setup_steering(struct mlx5_vdpa_net *ndev)
+{
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_namespace *ns;
+ int err;
- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_COUNT;
- dest[0].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
- dest[0].tir_num = ndev->res.tirn;
- dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest[1].counter_id = mlx5_fc_id(ndev->rx_counter);
- ndev->rx_rule_ucast = mlx5_add_flow_rules(ndev->rxft, spec, &flow_act, dest, 2);
+ ft_attr.max_fte = MAX_STEERING_ENT;
+ ft_attr.autogroup.max_num_groups = MAX_STEERING_GROUPS;
- if (IS_ERR(ndev->rx_rule_ucast)) {
- err = PTR_ERR(ndev->rx_rule_ucast);
- ndev->rx_rule_ucast = NULL;
- goto err_rule_ucast;
+ ns = mlx5_get_flow_namespace(ndev->mvdev.mdev, MLX5_FLOW_NAMESPACE_BYPASS);
+ if (!ns) {
+ mlx5_vdpa_warn(&ndev->mvdev, "failed to get flow namespace\n");
+ return -EOPNOTSUPP;
}
- memset(dmac_c, 0, ETH_ALEN);
- memset(dmac_v, 0, ETH_ALEN);
- dmac_c[0] = 1;
- dmac_v[0] = 1;
- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- ndev->rx_rule_mcast = mlx5_add_flow_rules(ndev->rxft, spec, &flow_act, dest, 1);
- if (IS_ERR(ndev->rx_rule_mcast)) {
- err = PTR_ERR(ndev->rx_rule_mcast);
- ndev->rx_rule_mcast = NULL;
- goto err_rule_mcast;
+ ndev->rxft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
+ if (IS_ERR(ndev->rxft)) {
+ mlx5_vdpa_warn(&ndev->mvdev, "failed to create flow table\n");
+ return PTR_ERR(ndev->rxft);
}
- kvfree(spec);
+ err = mac_vlan_add(ndev, ndev->config.mac, 0, false);
+ if (err)
+ goto err_add;
+
return 0;
-err_rule_mcast:
- mlx5_del_flow_rules(ndev->rx_rule_ucast);
- ndev->rx_rule_ucast = NULL;
-err_rule_ucast:
- mlx5_fc_destroy(ndev->mvdev.mdev, ndev->rx_counter);
-err_fc:
+err_add:
mlx5_destroy_flow_table(ndev->rxft);
-err_ns:
- kvfree(spec);
return err;
}
-static void remove_fwd_to_tir(struct mlx5_vdpa_net *ndev)
+static void teardown_steering(struct mlx5_vdpa_net *ndev)
{
- if (!ndev->rx_rule_ucast)
- return;
-
- mlx5_del_flow_rules(ndev->rx_rule_mcast);
- ndev->rx_rule_mcast = NULL;
- mlx5_del_flow_rules(ndev->rx_rule_ucast);
- ndev->rx_rule_ucast = NULL;
- mlx5_fc_destroy(ndev->mvdev.mdev, ndev->rx_counter);
+ clear_mac_vlan_table(ndev);
mlx5_destroy_flow_table(ndev->rxft);
}
@@ -1494,9 +1658,9 @@ static virtio_net_ctrl_ack handle_ctrl_mac(struct mlx5_vdpa_dev *mvdev, u8 cmd)
/* Need recreate the flow table entry, so that the packet could forward back
*/
- remove_fwd_to_tir(ndev);
+ mac_vlan_del(ndev, ndev->config.mac, 0, false);
- if (add_fwd_to_tir(ndev)) {
+ if (mac_vlan_add(ndev, ndev->config.mac, 0, false)) {
mlx5_vdpa_warn(mvdev, "failed to insert forward rules, try to restore\n");
/* Although it hardly run here, we still need double check */
@@ -1520,7 +1684,7 @@ static virtio_net_ctrl_ack handle_ctrl_mac(struct mlx5_vdpa_dev *mvdev, u8 cmd)
memcpy(ndev->config.mac, mac_back, ETH_ALEN);
- if (add_fwd_to_tir(ndev))
+ if (mac_vlan_add(ndev, ndev->config.mac, 0, false))
mlx5_vdpa_warn(mvdev, "restore forward rules failed: insert forward rules failed\n");
break;
@@ -1622,6 +1786,42 @@ static virtio_net_ctrl_ack handle_ctrl_mq(struct mlx5_vdpa_dev *mvdev, u8 cmd)
return status;
}
+static virtio_net_ctrl_ack handle_ctrl_vlan(struct mlx5_vdpa_dev *mvdev, u8 cmd)
+{
+ struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+ virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
+ struct mlx5_control_vq *cvq = &mvdev->cvq;
+ __virtio16 vlan;
+ size_t read;
+ u16 id;
+
+ switch (cmd) {
+ case VIRTIO_NET_CTRL_VLAN_ADD:
+ read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->riov, &vlan, sizeof(vlan));
+ if (read != sizeof(vlan))
+ break;
+
+ id = mlx5vdpa16_to_cpu(mvdev, vlan);
+ if (mac_vlan_add(ndev, ndev->config.mac, id, true))
+ break;
+
+ status = VIRTIO_NET_OK;
+ break;
+ case VIRTIO_NET_CTRL_VLAN_DEL:
+ read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->riov, &vlan, sizeof(vlan));
+ if (read != sizeof(vlan))
+ break;
+
+ id = mlx5vdpa16_to_cpu(mvdev, vlan);
+ mac_vlan_del(ndev, ndev->config.mac, id, true);
+ break;
+ default:
+ break;
+}
+
+return status;
+}
+
static void mlx5_cvq_kick_handler(struct work_struct *work)
{
virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
@@ -1638,7 +1838,7 @@ static void mlx5_cvq_kick_handler(struct work_struct *work)
ndev = to_mlx5_vdpa_ndev(mvdev);
cvq = &mvdev->cvq;
- mutex_lock(&ndev->reslock);
+ down_write(&ndev->reslock);
if (!(mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
goto out;
@@ -1659,6 +1859,7 @@ static void mlx5_cvq_kick_handler(struct work_struct *work)
if (read != sizeof(ctrl))
break;
+ cvq->received_desc++;
switch (ctrl.class) {
case VIRTIO_NET_CTRL_MAC:
status = handle_ctrl_mac(mvdev, ctrl.cmd);
@@ -1666,7 +1867,9 @@ static void mlx5_cvq_kick_handler(struct work_struct *work)
case VIRTIO_NET_CTRL_MQ:
status = handle_ctrl_mq(mvdev, ctrl.cmd);
break;
-
+ case VIRTIO_NET_CTRL_VLAN:
+ status = handle_ctrl_vlan(mvdev, ctrl.cmd);
+ break;
default:
break;
}
@@ -1682,12 +1885,13 @@ static void mlx5_cvq_kick_handler(struct work_struct *work)
if (vringh_need_notify_iotlb(&cvq->vring))
vringh_notify(&cvq->vring);
+ cvq->completed_desc++;
queue_work(mvdev->wq, &wqent->work);
break;
}
out:
- mutex_unlock(&ndev->reslock);
+ up_write(&ndev->reslock);
}
static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx)
@@ -1888,6 +2092,11 @@ static u32 mlx5_vdpa_get_vq_align(struct vdpa_device *vdev)
return PAGE_SIZE;
}
+static u32 mlx5_vdpa_get_vq_group(struct vdpa_device *vdpa, u16 idx)
+{
+ return 0;
+}
+
enum { MLX5_VIRTIO_NET_F_GUEST_CSUM = 1 << 9,
MLX5_VIRTIO_NET_F_CSUM = 1 << 10,
MLX5_VIRTIO_NET_F_HOST_TSO6 = 1 << 11,
@@ -1925,6 +2134,7 @@ static u64 get_supported_features(struct mlx5_core_dev *mdev)
mlx_vdpa_features |= BIT_ULL(VIRTIO_NET_F_MQ);
mlx_vdpa_features |= BIT_ULL(VIRTIO_NET_F_STATUS);
mlx_vdpa_features |= BIT_ULL(VIRTIO_NET_F_MTU);
+ mlx_vdpa_features |= BIT_ULL(VIRTIO_NET_F_CTRL_VLAN);
return mlx_vdpa_features;
}
@@ -2185,7 +2395,7 @@ static int setup_driver(struct mlx5_vdpa_dev *mvdev)
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
int err;
- WARN_ON(!mutex_is_locked(&ndev->reslock));
+ WARN_ON(!rwsem_is_locked(&ndev->reslock));
if (ndev->setup) {
mlx5_vdpa_warn(mvdev, "setup driver called for already setup driver\n");
@@ -2210,9 +2420,9 @@ static int setup_driver(struct mlx5_vdpa_dev *mvdev)
goto err_tir;
}
- err = add_fwd_to_tir(ndev);
+ err = setup_steering(ndev);
if (err) {
- mlx5_vdpa_warn(mvdev, "add_fwd_to_tir\n");
+ mlx5_vdpa_warn(mvdev, "setup_steering\n");
goto err_fwd;
}
ndev->setup = true;
@@ -2233,12 +2443,12 @@ out:
static void teardown_driver(struct mlx5_vdpa_net *ndev)
{
- WARN_ON(!mutex_is_locked(&ndev->reslock));
+ WARN_ON(!rwsem_is_locked(&ndev->reslock));
if (!ndev->setup)
return;
- remove_fwd_to_tir(ndev);
+ teardown_steering(ndev);
destroy_tir(ndev);
destroy_rqt(ndev);
teardown_virtqueues(ndev);
@@ -2263,7 +2473,7 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
print_status(mvdev, status, true);
- mutex_lock(&ndev->reslock);
+ down_write(&ndev->reslock);
if ((status ^ ndev->mvdev.status) & VIRTIO_CONFIG_S_DRIVER_OK) {
if (status & VIRTIO_CONFIG_S_DRIVER_OK) {
@@ -2279,14 +2489,14 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
}
ndev->mvdev.status = status;
- mutex_unlock(&ndev->reslock);
+ up_write(&ndev->reslock);
return;
err_setup:
mlx5_vdpa_destroy_mr(&ndev->mvdev);
ndev->mvdev.status |= VIRTIO_CONFIG_S_FAILED;
err_clear:
- mutex_unlock(&ndev->reslock);
+ up_write(&ndev->reslock);
}
static int mlx5_vdpa_reset(struct vdpa_device *vdev)
@@ -2297,12 +2507,14 @@ static int mlx5_vdpa_reset(struct vdpa_device *vdev)
print_status(mvdev, 0, true);
mlx5_vdpa_info(mvdev, "performing device reset\n");
- mutex_lock(&ndev->reslock);
+ down_write(&ndev->reslock);
teardown_driver(ndev);
clear_vqs_ready(ndev);
mlx5_vdpa_destroy_mr(&ndev->mvdev);
ndev->mvdev.status = 0;
ndev->cur_num_vqs = 0;
+ ndev->mvdev.cvq.received_desc = 0;
+ ndev->mvdev.cvq.completed_desc = 0;
memset(ndev->event_cbs, 0, sizeof(*ndev->event_cbs) * (mvdev->max_vqs + 1));
ndev->mvdev.actual_features = 0;
++mvdev->generation;
@@ -2310,7 +2522,7 @@ static int mlx5_vdpa_reset(struct vdpa_device *vdev)
if (mlx5_vdpa_create_mr(mvdev, NULL))
mlx5_vdpa_warn(mvdev, "create MR failed\n");
}
- mutex_unlock(&ndev->reslock);
+ up_write(&ndev->reslock);
return 0;
}
@@ -2343,14 +2555,15 @@ static u32 mlx5_vdpa_get_generation(struct vdpa_device *vdev)
return mvdev->generation;
}
-static int mlx5_vdpa_set_map(struct vdpa_device *vdev, struct vhost_iotlb *iotlb)
+static int mlx5_vdpa_set_map(struct vdpa_device *vdev, unsigned int asid,
+ struct vhost_iotlb *iotlb)
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
bool change_map;
int err;
- mutex_lock(&ndev->reslock);
+ down_write(&ndev->reslock);
err = mlx5_vdpa_handle_set_map(mvdev, iotlb, &change_map);
if (err) {
@@ -2362,7 +2575,7 @@ static int mlx5_vdpa_set_map(struct vdpa_device *vdev, struct vhost_iotlb *iotlb
err = mlx5_vdpa_change_map(mvdev, iotlb);
err:
- mutex_unlock(&ndev->reslock);
+ up_write(&ndev->reslock);
return err;
}
@@ -2381,7 +2594,6 @@ static void mlx5_vdpa_free(struct vdpa_device *vdev)
mlx5_mpfs_del_mac(pfmdev, ndev->config.mac);
}
mlx5_vdpa_free_resources(&ndev->mvdev);
- mutex_destroy(&ndev->reslock);
kfree(ndev->event_cbs);
kfree(ndev->vqs);
}
@@ -2422,6 +2634,93 @@ static u64 mlx5_vdpa_get_driver_features(struct vdpa_device *vdev)
return mvdev->actual_features;
}
+static int counter_set_query(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq,
+ u64 *received_desc, u64 *completed_desc)
+{
+ u32 in[MLX5_ST_SZ_DW(query_virtio_q_counters_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(query_virtio_q_counters_out)] = {};
+ void *cmd_hdr;
+ void *ctx;
+ int err;
+
+ if (!counters_supported(&ndev->mvdev))
+ return -EOPNOTSUPP;
+
+ if (mvq->fw_state != MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY)
+ return -EAGAIN;
+
+ cmd_hdr = MLX5_ADDR_OF(query_virtio_q_counters_in, in, hdr);
+
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, opcode, MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_type, MLX5_OBJ_TYPE_VIRTIO_Q_COUNTERS);
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid);
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_id, mvq->counter_set_id);
+
+ err = mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out));
+ if (err)
+ return err;
+
+ ctx = MLX5_ADDR_OF(query_virtio_q_counters_out, out, counters);
+ *received_desc = MLX5_GET64(virtio_q_counters, ctx, received_desc);
+ *completed_desc = MLX5_GET64(virtio_q_counters, ctx, completed_desc);
+ return 0;
+}
+
+static int mlx5_vdpa_get_vendor_vq_stats(struct vdpa_device *vdev, u16 idx,
+ struct sk_buff *msg,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+ struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+ struct mlx5_vdpa_virtqueue *mvq;
+ struct mlx5_control_vq *cvq;
+ u64 received_desc;
+ u64 completed_desc;
+ int err = 0;
+
+ down_read(&ndev->reslock);
+ if (!is_index_valid(mvdev, idx)) {
+ NL_SET_ERR_MSG_MOD(extack, "virtqueue index is not valid");
+ err = -EINVAL;
+ goto out_err;
+ }
+
+ if (idx == ctrl_vq_idx(mvdev)) {
+ cvq = &mvdev->cvq;
+ received_desc = cvq->received_desc;
+ completed_desc = cvq->completed_desc;
+ goto out;
+ }
+
+ mvq = &ndev->vqs[idx];
+ err = counter_set_query(ndev, mvq, &received_desc, &completed_desc);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "failed to query hardware");
+ goto out_err;
+ }
+
+out:
+ err = -EMSGSIZE;
+ if (nla_put_string(msg, VDPA_ATTR_DEV_VENDOR_ATTR_NAME, "received_desc"))
+ goto out_err;
+
+ if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_VENDOR_ATTR_VALUE, received_desc,
+ VDPA_ATTR_PAD))
+ goto out_err;
+
+ if (nla_put_string(msg, VDPA_ATTR_DEV_VENDOR_ATTR_NAME, "completed_desc"))
+ goto out_err;
+
+ if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_VENDOR_ATTR_VALUE, completed_desc,
+ VDPA_ATTR_PAD))
+ goto out_err;
+
+ err = 0;
+out_err:
+ up_read(&ndev->reslock);
+ return err;
+}
+
static const struct vdpa_config_ops mlx5_vdpa_ops = {
.set_vq_address = mlx5_vdpa_set_vq_address,
.set_vq_num = mlx5_vdpa_set_vq_num,
@@ -2431,9 +2730,11 @@ static const struct vdpa_config_ops mlx5_vdpa_ops = {
.get_vq_ready = mlx5_vdpa_get_vq_ready,
.set_vq_state = mlx5_vdpa_set_vq_state,
.get_vq_state = mlx5_vdpa_get_vq_state,
+ .get_vendor_vq_stats = mlx5_vdpa_get_vendor_vq_stats,
.get_vq_notification = mlx5_get_vq_notification,
.get_vq_irq = mlx5_get_vq_irq,
.get_vq_align = mlx5_vdpa_get_vq_align,
+ .get_vq_group = mlx5_vdpa_get_vq_group,
.get_device_features = mlx5_vdpa_get_device_features,
.set_driver_features = mlx5_vdpa_set_driver_features,
.get_driver_features = mlx5_vdpa_get_driver_features,
@@ -2669,7 +2970,7 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
}
ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, mdev->device, &mlx5_vdpa_ops,
- name, false);
+ 1, 1, name, false);
if (IS_ERR(ndev))
return PTR_ERR(ndev);
@@ -2686,18 +2987,18 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
}
init_mvqs(ndev);
- mutex_init(&ndev->reslock);
+ init_rwsem(&ndev->reslock);
config = &ndev->config;
if (add_config->mask & BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MTU)) {
err = config_func_mtu(mdev, add_config->net.mtu);
if (err)
- goto err_mtu;
+ goto err_alloc;
}
err = query_mtu(mdev, &mtu);
if (err)
- goto err_mtu;
+ goto err_alloc;
ndev->config.mtu = cpu_to_mlx5vdpa16(mvdev, mtu);
@@ -2711,14 +3012,14 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
} else {
err = mlx5_query_nic_vport_mac_address(mdev, 0, 0, config->mac);
if (err)
- goto err_mtu;
+ goto err_alloc;
}
if (!is_zero_ether_addr(config->mac)) {
pfmdev = pci_get_drvdata(pci_physfn(mdev->pdev));
err = mlx5_mpfs_add_mac(pfmdev, config->mac);
if (err)
- goto err_mtu;
+ goto err_alloc;
ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_NET_F_MAC);
}
@@ -2768,8 +3069,6 @@ err_res:
err_mpfs:
if (!is_zero_ether_addr(config->mac))
mlx5_mpfs_del_mac(pfmdev, config->mac);
-err_mtu:
- mutex_destroy(&ndev->reslock);
err_alloc:
put_device(&mvdev->vdev.dev);
return err;
diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c
index 2b75c00b1005..ebf2f363fbe7 100644
--- a/drivers/vdpa/vdpa.c
+++ b/drivers/vdpa/vdpa.c
@@ -18,14 +18,14 @@
static LIST_HEAD(mdev_head);
/* A global mutex that protects vdpa management device and device level operations. */
-static DEFINE_MUTEX(vdpa_dev_mutex);
+static DECLARE_RWSEM(vdpa_dev_lock);
static DEFINE_IDA(vdpa_index_ida);
void vdpa_set_status(struct vdpa_device *vdev, u8 status)
{
- mutex_lock(&vdev->cf_mutex);
+ down_write(&vdev->cf_lock);
vdev->config->set_status(vdev, status);
- mutex_unlock(&vdev->cf_mutex);
+ up_write(&vdev->cf_lock);
}
EXPORT_SYMBOL(vdpa_set_status);
@@ -77,32 +77,11 @@ static ssize_t driver_override_store(struct device *dev,
const char *buf, size_t count)
{
struct vdpa_device *vdev = dev_to_vdpa(dev);
- const char *driver_override, *old;
- char *cp;
+ int ret;
- /* We need to keep extra room for a newline */
- if (count >= (PAGE_SIZE - 1))
- return -EINVAL;
-
- driver_override = kstrndup(buf, count, GFP_KERNEL);
- if (!driver_override)
- return -ENOMEM;
-
- cp = strchr(driver_override, '\n');
- if (cp)
- *cp = '\0';
-
- device_lock(dev);
- old = vdev->driver_override;
- if (strlen(driver_override)) {
- vdev->driver_override = driver_override;
- } else {
- kfree(driver_override);
- vdev->driver_override = NULL;
- }
- device_unlock(dev);
-
- kfree(old);
+ ret = driver_set_override(dev, &vdev->driver_override, buf, count);
+ if (ret)
+ return ret;
return count;
}
@@ -148,7 +127,6 @@ static void vdpa_release_dev(struct device *d)
ops->free(vdev);
ida_simple_remove(&vdpa_index_ida, vdev->index);
- mutex_destroy(&vdev->cf_mutex);
kfree(vdev->driver_override);
kfree(vdev);
}
@@ -159,6 +137,8 @@ static void vdpa_release_dev(struct device *d)
* initialized but before registered.
* @parent: the parent device
* @config: the bus operations that is supported by this device
+ * @ngroups: number of groups supported by this device
+ * @nas: number of address spaces supported by this device
* @size: size of the parent structure that contains private data
* @name: name of the vdpa device; optional.
* @use_va: indicate whether virtual address must be used by this device
@@ -171,6 +151,7 @@ static void vdpa_release_dev(struct device *d)
*/
struct vdpa_device *__vdpa_alloc_device(struct device *parent,
const struct vdpa_config_ops *config,
+ unsigned int ngroups, unsigned int nas,
size_t size, const char *name,
bool use_va)
{
@@ -203,6 +184,8 @@ struct vdpa_device *__vdpa_alloc_device(struct device *parent,
vdev->config = config;
vdev->features_valid = false;
vdev->use_va = use_va;
+ vdev->ngroups = ngroups;
+ vdev->nas = nas;
if (name)
err = dev_set_name(&vdev->dev, "%s", name);
@@ -211,7 +194,7 @@ struct vdpa_device *__vdpa_alloc_device(struct device *parent,
if (err)
goto err_name;
- mutex_init(&vdev->cf_mutex);
+ init_rwsem(&vdev->cf_lock);
device_initialize(&vdev->dev);
return vdev;
@@ -238,7 +221,7 @@ static int __vdpa_register_device(struct vdpa_device *vdev, u32 nvqs)
vdev->nvqs = nvqs;
- lockdep_assert_held(&vdpa_dev_mutex);
+ lockdep_assert_held(&vdpa_dev_lock);
dev = bus_find_device(&vdpa_bus, NULL, dev_name(&vdev->dev), vdpa_name_match);
if (dev) {
put_device(dev);
@@ -278,9 +261,9 @@ int vdpa_register_device(struct vdpa_device *vdev, u32 nvqs)
{
int err;
- mutex_lock(&vdpa_dev_mutex);
+ down_write(&vdpa_dev_lock);
err = __vdpa_register_device(vdev, nvqs);
- mutex_unlock(&vdpa_dev_mutex);
+ up_write(&vdpa_dev_lock);
return err;
}
EXPORT_SYMBOL_GPL(vdpa_register_device);
@@ -293,7 +276,7 @@ EXPORT_SYMBOL_GPL(vdpa_register_device);
*/
void _vdpa_unregister_device(struct vdpa_device *vdev)
{
- lockdep_assert_held(&vdpa_dev_mutex);
+ lockdep_assert_held(&vdpa_dev_lock);
WARN_ON(!vdev->mdev);
device_unregister(&vdev->dev);
}
@@ -305,9 +288,9 @@ EXPORT_SYMBOL_GPL(_vdpa_unregister_device);
*/
void vdpa_unregister_device(struct vdpa_device *vdev)
{
- mutex_lock(&vdpa_dev_mutex);
+ down_write(&vdpa_dev_lock);
device_unregister(&vdev->dev);
- mutex_unlock(&vdpa_dev_mutex);
+ up_write(&vdpa_dev_lock);
}
EXPORT_SYMBOL_GPL(vdpa_unregister_device);
@@ -352,9 +335,9 @@ int vdpa_mgmtdev_register(struct vdpa_mgmt_dev *mdev)
return -EINVAL;
INIT_LIST_HEAD(&mdev->list);
- mutex_lock(&vdpa_dev_mutex);
+ down_write(&vdpa_dev_lock);
list_add_tail(&mdev->list, &mdev_head);
- mutex_unlock(&vdpa_dev_mutex);
+ up_write(&vdpa_dev_lock);
return 0;
}
EXPORT_SYMBOL_GPL(vdpa_mgmtdev_register);
@@ -371,14 +354,14 @@ static int vdpa_match_remove(struct device *dev, void *data)
void vdpa_mgmtdev_unregister(struct vdpa_mgmt_dev *mdev)
{
- mutex_lock(&vdpa_dev_mutex);
+ down_write(&vdpa_dev_lock);
list_del(&mdev->list);
/* Filter out all the entries belong to this management device and delete it. */
bus_for_each_dev(&vdpa_bus, NULL, mdev, vdpa_match_remove);
- mutex_unlock(&vdpa_dev_mutex);
+ up_write(&vdpa_dev_lock);
}
EXPORT_SYMBOL_GPL(vdpa_mgmtdev_unregister);
@@ -407,9 +390,9 @@ static void vdpa_get_config_unlocked(struct vdpa_device *vdev,
void vdpa_get_config(struct vdpa_device *vdev, unsigned int offset,
void *buf, unsigned int len)
{
- mutex_lock(&vdev->cf_mutex);
+ down_read(&vdev->cf_lock);
vdpa_get_config_unlocked(vdev, offset, buf, len);
- mutex_unlock(&vdev->cf_mutex);
+ up_read(&vdev->cf_lock);
}
EXPORT_SYMBOL_GPL(vdpa_get_config);
@@ -423,9 +406,9 @@ EXPORT_SYMBOL_GPL(vdpa_get_config);
void vdpa_set_config(struct vdpa_device *vdev, unsigned int offset,
const void *buf, unsigned int length)
{
- mutex_lock(&vdev->cf_mutex);
+ down_write(&vdev->cf_lock);
vdev->config->set_config(vdev, offset, buf, length);
- mutex_unlock(&vdev->cf_mutex);
+ up_write(&vdev->cf_lock);
}
EXPORT_SYMBOL_GPL(vdpa_set_config);
@@ -532,17 +515,17 @@ static int vdpa_nl_cmd_mgmtdev_get_doit(struct sk_buff *skb, struct genl_info *i
if (!msg)
return -ENOMEM;
- mutex_lock(&vdpa_dev_mutex);
+ down_read(&vdpa_dev_lock);
mdev = vdpa_mgmtdev_get_from_attr(info->attrs);
if (IS_ERR(mdev)) {
- mutex_unlock(&vdpa_dev_mutex);
+ up_read(&vdpa_dev_lock);
NL_SET_ERR_MSG_MOD(info->extack, "Fail to find the specified mgmt device");
err = PTR_ERR(mdev);
goto out;
}
err = vdpa_mgmtdev_fill(mdev, msg, info->snd_portid, info->snd_seq, 0);
- mutex_unlock(&vdpa_dev_mutex);
+ up_read(&vdpa_dev_lock);
if (err)
goto out;
err = genlmsg_reply(msg, info);
@@ -561,7 +544,7 @@ vdpa_nl_cmd_mgmtdev_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb)
int idx = 0;
int err;
- mutex_lock(&vdpa_dev_mutex);
+ down_read(&vdpa_dev_lock);
list_for_each_entry(mdev, &mdev_head, list) {
if (idx < start) {
idx++;
@@ -574,7 +557,7 @@ vdpa_nl_cmd_mgmtdev_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb)
idx++;
}
out:
- mutex_unlock(&vdpa_dev_mutex);
+ up_read(&vdpa_dev_lock);
cb->args[0] = idx;
return msg->len;
}
@@ -627,7 +610,7 @@ static int vdpa_nl_cmd_dev_add_set_doit(struct sk_buff *skb, struct genl_info *i
!netlink_capable(skb, CAP_NET_ADMIN))
return -EPERM;
- mutex_lock(&vdpa_dev_mutex);
+ down_write(&vdpa_dev_lock);
mdev = vdpa_mgmtdev_get_from_attr(info->attrs);
if (IS_ERR(mdev)) {
NL_SET_ERR_MSG_MOD(info->extack, "Fail to find the specified management device");
@@ -643,7 +626,7 @@ static int vdpa_nl_cmd_dev_add_set_doit(struct sk_buff *skb, struct genl_info *i
err = mdev->ops->dev_add(mdev, name, &config);
err:
- mutex_unlock(&vdpa_dev_mutex);
+ up_write(&vdpa_dev_lock);
return err;
}
@@ -659,7 +642,7 @@ static int vdpa_nl_cmd_dev_del_set_doit(struct sk_buff *skb, struct genl_info *i
return -EINVAL;
name = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
- mutex_lock(&vdpa_dev_mutex);
+ down_write(&vdpa_dev_lock);
dev = bus_find_device(&vdpa_bus, NULL, name, vdpa_name_match);
if (!dev) {
NL_SET_ERR_MSG_MOD(info->extack, "device not found");
@@ -677,7 +660,7 @@ static int vdpa_nl_cmd_dev_del_set_doit(struct sk_buff *skb, struct genl_info *i
mdev_err:
put_device(dev);
dev_err:
- mutex_unlock(&vdpa_dev_mutex);
+ up_write(&vdpa_dev_lock);
return err;
}
@@ -743,7 +726,7 @@ static int vdpa_nl_cmd_dev_get_doit(struct sk_buff *skb, struct genl_info *info)
if (!msg)
return -ENOMEM;
- mutex_lock(&vdpa_dev_mutex);
+ down_read(&vdpa_dev_lock);
dev = bus_find_device(&vdpa_bus, NULL, devname, vdpa_name_match);
if (!dev) {
NL_SET_ERR_MSG_MOD(info->extack, "device not found");
@@ -756,14 +739,19 @@ static int vdpa_nl_cmd_dev_get_doit(struct sk_buff *skb, struct genl_info *info)
goto mdev_err;
}
err = vdpa_dev_fill(vdev, msg, info->snd_portid, info->snd_seq, 0, info->extack);
- if (!err)
- err = genlmsg_reply(msg, info);
+ if (err)
+ goto mdev_err;
+
+ err = genlmsg_reply(msg, info);
+ put_device(dev);
+ up_read(&vdpa_dev_lock);
+ return err;
+
mdev_err:
put_device(dev);
err:
- mutex_unlock(&vdpa_dev_mutex);
- if (err)
- nlmsg_free(msg);
+ up_read(&vdpa_dev_lock);
+ nlmsg_free(msg);
return err;
}
@@ -804,9 +792,9 @@ static int vdpa_nl_cmd_dev_get_dumpit(struct sk_buff *msg, struct netlink_callba
info.start_idx = cb->args[0];
info.idx = 0;
- mutex_lock(&vdpa_dev_mutex);
+ down_read(&vdpa_dev_lock);
bus_for_each_dev(&vdpa_bus, NULL, &info, vdpa_dev_dump);
- mutex_unlock(&vdpa_dev_mutex);
+ up_read(&vdpa_dev_lock);
cb->args[0] = info.idx;
return msg->len;
}
@@ -861,7 +849,7 @@ vdpa_dev_config_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid,
u8 status;
int err;
- mutex_lock(&vdev->cf_mutex);
+ down_read(&vdev->cf_lock);
status = vdev->config->get_status(vdev);
if (!(status & VIRTIO_CONFIG_S_FEATURES_OK)) {
NL_SET_ERR_MSG_MOD(extack, "Features negotiation not completed");
@@ -898,14 +886,116 @@ vdpa_dev_config_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid,
if (err)
goto msg_err;
- mutex_unlock(&vdev->cf_mutex);
+ up_read(&vdev->cf_lock);
genlmsg_end(msg, hdr);
return 0;
msg_err:
genlmsg_cancel(msg, hdr);
out:
- mutex_unlock(&vdev->cf_mutex);
+ up_read(&vdev->cf_lock);
+ return err;
+}
+
+static int vdpa_fill_stats_rec(struct vdpa_device *vdev, struct sk_buff *msg,
+ struct genl_info *info, u32 index)
+{
+ struct virtio_net_config config = {};
+ u64 features;
+ u16 max_vqp;
+ u8 status;
+ int err;
+
+ status = vdev->config->get_status(vdev);
+ if (!(status & VIRTIO_CONFIG_S_FEATURES_OK)) {
+ NL_SET_ERR_MSG_MOD(info->extack, "feature negotiation not complete");
+ return -EAGAIN;
+ }
+ vdpa_get_config_unlocked(vdev, 0, &config, sizeof(config));
+
+ max_vqp = le16_to_cpu(config.max_virtqueue_pairs);
+ if (nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MAX_VQP, max_vqp))
+ return -EMSGSIZE;
+
+ features = vdev->config->get_driver_features(vdev);
+ if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_NEGOTIATED_FEATURES,
+ features, VDPA_ATTR_PAD))
+ return -EMSGSIZE;
+
+ if (nla_put_u32(msg, VDPA_ATTR_DEV_QUEUE_INDEX, index))
+ return -EMSGSIZE;
+
+ err = vdev->config->get_vendor_vq_stats(vdev, index, msg, info->extack);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int vendor_stats_fill(struct vdpa_device *vdev, struct sk_buff *msg,
+ struct genl_info *info, u32 index)
+{
+ int err;
+
+ down_read(&vdev->cf_lock);
+ if (!vdev->config->get_vendor_vq_stats) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ err = vdpa_fill_stats_rec(vdev, msg, info, index);
+out:
+ up_read(&vdev->cf_lock);
+ return err;
+}
+
+static int vdpa_dev_vendor_stats_fill(struct vdpa_device *vdev,
+ struct sk_buff *msg,
+ struct genl_info *info, u32 index)
+{
+ u32 device_id;
+ void *hdr;
+ int err;
+ u32 portid = info->snd_portid;
+ u32 seq = info->snd_seq;
+ u32 flags = 0;
+
+ hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags,
+ VDPA_CMD_DEV_VSTATS_GET);
+ if (!hdr)
+ return -EMSGSIZE;
+
+ if (nla_put_string(msg, VDPA_ATTR_DEV_NAME, dev_name(&vdev->dev))) {
+ err = -EMSGSIZE;
+ goto undo_msg;
+ }
+
+ device_id = vdev->config->get_device_id(vdev);
+ if (nla_put_u32(msg, VDPA_ATTR_DEV_ID, device_id)) {
+ err = -EMSGSIZE;
+ goto undo_msg;
+ }
+
+ switch (device_id) {
+ case VIRTIO_ID_NET:
+ if (index > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX) {
+ NL_SET_ERR_MSG_MOD(info->extack, "queue index excceeds max value");
+ err = -ERANGE;
+ break;
+ }
+
+ err = vendor_stats_fill(vdev, msg, info, index);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+ genlmsg_end(msg, hdr);
+
+ return err;
+
+undo_msg:
+ genlmsg_cancel(msg, hdr);
return err;
}
@@ -924,7 +1014,7 @@ static int vdpa_nl_cmd_dev_config_get_doit(struct sk_buff *skb, struct genl_info
if (!msg)
return -ENOMEM;
- mutex_lock(&vdpa_dev_mutex);
+ down_read(&vdpa_dev_lock);
dev = bus_find_device(&vdpa_bus, NULL, devname, vdpa_name_match);
if (!dev) {
NL_SET_ERR_MSG_MOD(info->extack, "device not found");
@@ -945,7 +1035,7 @@ static int vdpa_nl_cmd_dev_config_get_doit(struct sk_buff *skb, struct genl_info
mdev_err:
put_device(dev);
dev_err:
- mutex_unlock(&vdpa_dev_mutex);
+ up_read(&vdpa_dev_lock);
if (err)
nlmsg_free(msg);
return err;
@@ -983,13 +1073,67 @@ vdpa_nl_cmd_dev_config_get_dumpit(struct sk_buff *msg, struct netlink_callback *
info.start_idx = cb->args[0];
info.idx = 0;
- mutex_lock(&vdpa_dev_mutex);
+ down_read(&vdpa_dev_lock);
bus_for_each_dev(&vdpa_bus, NULL, &info, vdpa_dev_config_dump);
- mutex_unlock(&vdpa_dev_mutex);
+ up_read(&vdpa_dev_lock);
cb->args[0] = info.idx;
return msg->len;
}
+static int vdpa_nl_cmd_dev_stats_get_doit(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct vdpa_device *vdev;
+ struct sk_buff *msg;
+ const char *devname;
+ struct device *dev;
+ u32 index;
+ int err;
+
+ if (!info->attrs[VDPA_ATTR_DEV_NAME])
+ return -EINVAL;
+
+ if (!info->attrs[VDPA_ATTR_DEV_QUEUE_INDEX])
+ return -EINVAL;
+
+ devname = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ index = nla_get_u32(info->attrs[VDPA_ATTR_DEV_QUEUE_INDEX]);
+ down_read(&vdpa_dev_lock);
+ dev = bus_find_device(&vdpa_bus, NULL, devname, vdpa_name_match);
+ if (!dev) {
+ NL_SET_ERR_MSG_MOD(info->extack, "device not found");
+ err = -ENODEV;
+ goto dev_err;
+ }
+ vdev = container_of(dev, struct vdpa_device, dev);
+ if (!vdev->mdev) {
+ NL_SET_ERR_MSG_MOD(info->extack, "unmanaged vdpa device");
+ err = -EINVAL;
+ goto mdev_err;
+ }
+ err = vdpa_dev_vendor_stats_fill(vdev, msg, info, index);
+ if (err)
+ goto mdev_err;
+
+ err = genlmsg_reply(msg, info);
+
+ put_device(dev);
+ up_read(&vdpa_dev_lock);
+
+ return err;
+
+mdev_err:
+ put_device(dev);
+dev_err:
+ nlmsg_free(msg);
+ up_read(&vdpa_dev_lock);
+ return err;
+}
+
static const struct nla_policy vdpa_nl_policy[VDPA_ATTR_MAX + 1] = {
[VDPA_ATTR_MGMTDEV_BUS_NAME] = { .type = NLA_NUL_STRING },
[VDPA_ATTR_MGMTDEV_DEV_NAME] = { .type = NLA_STRING },
@@ -1030,6 +1174,12 @@ static const struct genl_ops vdpa_nl_ops[] = {
.doit = vdpa_nl_cmd_dev_config_get_doit,
.dumpit = vdpa_nl_cmd_dev_config_get_dumpit,
},
+ {
+ .cmd = VDPA_CMD_DEV_VSTATS_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = vdpa_nl_cmd_dev_stats_get_doit,
+ .flags = GENL_ADMIN_PERM,
+ },
};
static struct genl_family vdpa_nl_family __ro_after_init = {
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c
index ddbe142af09a..0f2865899647 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
@@ -96,11 +96,17 @@ static void vdpasim_do_reset(struct vdpasim *vdpasim)
{
int i;
- for (i = 0; i < vdpasim->dev_attr.nvqs; i++)
+ spin_lock(&vdpasim->iommu_lock);
+
+ for (i = 0; i < vdpasim->dev_attr.nvqs; i++) {
vdpasim_vq_reset(vdpasim, &vdpasim->vqs[i]);
+ vringh_set_iotlb(&vdpasim->vqs[i].vring, &vdpasim->iommu[0],
+ &vdpasim->iommu_lock);
+ }
+
+ for (i = 0; i < vdpasim->dev_attr.nas; i++)
+ vhost_iotlb_reset(&vdpasim->iommu[i]);
- spin_lock(&vdpasim->iommu_lock);
- vhost_iotlb_reset(vdpasim->iommu);
spin_unlock(&vdpasim->iommu_lock);
vdpasim->features = 0;
@@ -145,7 +151,7 @@ static dma_addr_t vdpasim_map_range(struct vdpasim *vdpasim, phys_addr_t paddr,
dma_addr = iova_dma_addr(&vdpasim->iova, iova);
spin_lock(&vdpasim->iommu_lock);
- ret = vhost_iotlb_add_range(vdpasim->iommu, (u64)dma_addr,
+ ret = vhost_iotlb_add_range(&vdpasim->iommu[0], (u64)dma_addr,
(u64)dma_addr + size - 1, (u64)paddr, perm);
spin_unlock(&vdpasim->iommu_lock);
@@ -161,7 +167,7 @@ static void vdpasim_unmap_range(struct vdpasim *vdpasim, dma_addr_t dma_addr,
size_t size)
{
spin_lock(&vdpasim->iommu_lock);
- vhost_iotlb_del_range(vdpasim->iommu, (u64)dma_addr,
+ vhost_iotlb_del_range(&vdpasim->iommu[0], (u64)dma_addr,
(u64)dma_addr + size - 1);
spin_unlock(&vdpasim->iommu_lock);
@@ -251,6 +257,7 @@ struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr)
ops = &vdpasim_config_ops;
vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL, ops,
+ dev_attr->ngroups, dev_attr->nas,
dev_attr->name, false);
if (IS_ERR(vdpasim)) {
ret = PTR_ERR(vdpasim);
@@ -278,16 +285,20 @@ struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr)
if (!vdpasim->vqs)
goto err_iommu;
- vdpasim->iommu = vhost_iotlb_alloc(max_iotlb_entries, 0);
+ vdpasim->iommu = kmalloc_array(vdpasim->dev_attr.nas,
+ sizeof(*vdpasim->iommu), GFP_KERNEL);
if (!vdpasim->iommu)
goto err_iommu;
+ for (i = 0; i < vdpasim->dev_attr.nas; i++)
+ vhost_iotlb_init(&vdpasim->iommu[i], 0, 0);
+
vdpasim->buffer = kvmalloc(dev_attr->buffer_size, GFP_KERNEL);
if (!vdpasim->buffer)
goto err_iommu;
for (i = 0; i < dev_attr->nvqs; i++)
- vringh_set_iotlb(&vdpasim->vqs[i].vring, vdpasim->iommu,
+ vringh_set_iotlb(&vdpasim->vqs[i].vring, &vdpasim->iommu[0],
&vdpasim->iommu_lock);
ret = iova_cache_get();
@@ -353,11 +364,14 @@ static void vdpasim_set_vq_ready(struct vdpa_device *vdpa, u16 idx, bool ready)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
+ bool old_ready;
spin_lock(&vdpasim->lock);
+ old_ready = vq->ready;
vq->ready = ready;
- if (vq->ready)
+ if (vq->ready && !old_ready) {
vdpasim_queue_ready(vdpasim, idx);
+ }
spin_unlock(&vdpasim->lock);
}
@@ -399,6 +413,15 @@ static u32 vdpasim_get_vq_align(struct vdpa_device *vdpa)
return VDPASIM_QUEUE_ALIGN;
}
+static u32 vdpasim_get_vq_group(struct vdpa_device *vdpa, u16 idx)
+{
+ /* RX and TX belongs to group 0, CVQ belongs to group 1 */
+ if (idx == 2)
+ return 1;
+ else
+ return 0;
+}
+
static u64 vdpasim_get_device_features(struct vdpa_device *vdpa)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
@@ -534,20 +557,53 @@ static struct vdpa_iova_range vdpasim_get_iova_range(struct vdpa_device *vdpa)
return range;
}
-static int vdpasim_set_map(struct vdpa_device *vdpa,
+static int vdpasim_set_group_asid(struct vdpa_device *vdpa, unsigned int group,
+ unsigned int asid)
+{
+ struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+ struct vhost_iotlb *iommu;
+ int i;
+
+ if (group > vdpasim->dev_attr.ngroups)
+ return -EINVAL;
+
+ if (asid >= vdpasim->dev_attr.nas)
+ return -EINVAL;
+
+ iommu = &vdpasim->iommu[asid];
+
+ spin_lock(&vdpasim->lock);
+
+ for (i = 0; i < vdpasim->dev_attr.nvqs; i++)
+ if (vdpasim_get_vq_group(vdpa, i) == group)
+ vringh_set_iotlb(&vdpasim->vqs[i].vring, iommu,
+ &vdpasim->iommu_lock);
+
+ spin_unlock(&vdpasim->lock);
+
+ return 0;
+}
+
+static int vdpasim_set_map(struct vdpa_device *vdpa, unsigned int asid,
struct vhost_iotlb *iotlb)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
struct vhost_iotlb_map *map;
+ struct vhost_iotlb *iommu;
u64 start = 0ULL, last = 0ULL - 1;
int ret;
+ if (asid >= vdpasim->dev_attr.nas)
+ return -EINVAL;
+
spin_lock(&vdpasim->iommu_lock);
- vhost_iotlb_reset(vdpasim->iommu);
+
+ iommu = &vdpasim->iommu[asid];
+ vhost_iotlb_reset(iommu);
for (map = vhost_iotlb_itree_first(iotlb, start, last); map;
map = vhost_iotlb_itree_next(map, start, last)) {
- ret = vhost_iotlb_add_range(vdpasim->iommu, map->start,
+ ret = vhost_iotlb_add_range(iommu, map->start,
map->last, map->addr, map->perm);
if (ret)
goto err;
@@ -556,31 +612,39 @@ static int vdpasim_set_map(struct vdpa_device *vdpa,
return 0;
err:
- vhost_iotlb_reset(vdpasim->iommu);
+ vhost_iotlb_reset(iommu);
spin_unlock(&vdpasim->iommu_lock);
return ret;
}
-static int vdpasim_dma_map(struct vdpa_device *vdpa, u64 iova, u64 size,
+static int vdpasim_dma_map(struct vdpa_device *vdpa, unsigned int asid,
+ u64 iova, u64 size,
u64 pa, u32 perm, void *opaque)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
int ret;
+ if (asid >= vdpasim->dev_attr.nas)
+ return -EINVAL;
+
spin_lock(&vdpasim->iommu_lock);
- ret = vhost_iotlb_add_range_ctx(vdpasim->iommu, iova, iova + size - 1,
- pa, perm, opaque);
+ ret = vhost_iotlb_add_range_ctx(&vdpasim->iommu[asid], iova,
+ iova + size - 1, pa, perm, opaque);
spin_unlock(&vdpasim->iommu_lock);
return ret;
}
-static int vdpasim_dma_unmap(struct vdpa_device *vdpa, u64 iova, u64 size)
+static int vdpasim_dma_unmap(struct vdpa_device *vdpa, unsigned int asid,
+ u64 iova, u64 size)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+ if (asid >= vdpasim->dev_attr.nas)
+ return -EINVAL;
+
spin_lock(&vdpasim->iommu_lock);
- vhost_iotlb_del_range(vdpasim->iommu, iova, iova + size - 1);
+ vhost_iotlb_del_range(&vdpasim->iommu[asid], iova, iova + size - 1);
spin_unlock(&vdpasim->iommu_lock);
return 0;
@@ -604,8 +668,7 @@ static void vdpasim_free(struct vdpa_device *vdpa)
}
kvfree(vdpasim->buffer);
- if (vdpasim->iommu)
- vhost_iotlb_free(vdpasim->iommu);
+ vhost_iotlb_free(vdpasim->iommu);
kfree(vdpasim->vqs);
kfree(vdpasim->config);
}
@@ -620,6 +683,7 @@ static const struct vdpa_config_ops vdpasim_config_ops = {
.set_vq_state = vdpasim_set_vq_state,
.get_vq_state = vdpasim_get_vq_state,
.get_vq_align = vdpasim_get_vq_align,
+ .get_vq_group = vdpasim_get_vq_group,
.get_device_features = vdpasim_get_device_features,
.set_driver_features = vdpasim_set_driver_features,
.get_driver_features = vdpasim_get_driver_features,
@@ -635,6 +699,7 @@ static const struct vdpa_config_ops vdpasim_config_ops = {
.set_config = vdpasim_set_config,
.get_generation = vdpasim_get_generation,
.get_iova_range = vdpasim_get_iova_range,
+ .set_group_asid = vdpasim_set_group_asid,
.dma_map = vdpasim_dma_map,
.dma_unmap = vdpasim_dma_unmap,
.free = vdpasim_free,
@@ -650,6 +715,7 @@ static const struct vdpa_config_ops vdpasim_batch_config_ops = {
.set_vq_state = vdpasim_set_vq_state,
.get_vq_state = vdpasim_get_vq_state,
.get_vq_align = vdpasim_get_vq_align,
+ .get_vq_group = vdpasim_get_vq_group,
.get_device_features = vdpasim_get_device_features,
.set_driver_features = vdpasim_set_driver_features,
.get_driver_features = vdpasim_get_driver_features,
@@ -665,6 +731,7 @@ static const struct vdpa_config_ops vdpasim_batch_config_ops = {
.set_config = vdpasim_set_config,
.get_generation = vdpasim_get_generation,
.get_iova_range = vdpasim_get_iova_range,
+ .set_group_asid = vdpasim_set_group_asid,
.set_map = vdpasim_set_map,
.free = vdpasim_free,
};
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.h b/drivers/vdpa/vdpa_sim/vdpa_sim.h
index cd58e888bcf3..622782e92239 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim.h
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim.h
@@ -41,6 +41,8 @@ struct vdpasim_dev_attr {
size_t buffer_size;
int nvqs;
u32 id;
+ u32 ngroups;
+ u32 nas;
work_func_t work_fn;
void (*get_config)(struct vdpasim *vdpasim, void *config);
@@ -63,6 +65,7 @@ struct vdpasim {
u32 status;
u32 generation;
u64 features;
+ u32 groups;
/* spinlock to synchronize iommu table */
spinlock_t iommu_lock;
};
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim_net.c b/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
index d5324f6fd8c7..5125976a4df8 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
@@ -26,9 +26,122 @@
#define DRV_LICENSE "GPL v2"
#define VDPASIM_NET_FEATURES (VDPASIM_FEATURES | \
- (1ULL << VIRTIO_NET_F_MAC))
+ (1ULL << VIRTIO_NET_F_MAC) | \
+ (1ULL << VIRTIO_NET_F_MTU) | \
+ (1ULL << VIRTIO_NET_F_CTRL_VQ) | \
+ (1ULL << VIRTIO_NET_F_CTRL_MAC_ADDR))
-#define VDPASIM_NET_VQ_NUM 2
+/* 3 virtqueues, 2 address spaces, 2 virtqueue groups */
+#define VDPASIM_NET_VQ_NUM 3
+#define VDPASIM_NET_AS_NUM 2
+#define VDPASIM_NET_GROUP_NUM 2
+
+static void vdpasim_net_complete(struct vdpasim_virtqueue *vq, size_t len)
+{
+ /* Make sure data is wrote before advancing index */
+ smp_wmb();
+
+ vringh_complete_iotlb(&vq->vring, vq->head, len);
+
+ /* Make sure used is visible before rasing the interrupt. */
+ smp_wmb();
+
+ local_bh_disable();
+ if (vringh_need_notify_iotlb(&vq->vring) > 0)
+ vringh_notify(&vq->vring);
+ local_bh_enable();
+}
+
+static bool receive_filter(struct vdpasim *vdpasim, size_t len)
+{
+ bool modern = vdpasim->features & (1ULL << VIRTIO_F_VERSION_1);
+ size_t hdr_len = modern ? sizeof(struct virtio_net_hdr_v1) :
+ sizeof(struct virtio_net_hdr);
+ struct virtio_net_config *vio_config = vdpasim->config;
+
+ if (len < ETH_ALEN + hdr_len)
+ return false;
+
+ if (!strncmp(vdpasim->buffer + hdr_len, vio_config->mac, ETH_ALEN))
+ return true;
+
+ return false;
+}
+
+static virtio_net_ctrl_ack vdpasim_handle_ctrl_mac(struct vdpasim *vdpasim,
+ u8 cmd)
+{
+ struct virtio_net_config *vio_config = vdpasim->config;
+ struct vdpasim_virtqueue *cvq = &vdpasim->vqs[2];
+ virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
+ size_t read;
+
+ switch (cmd) {
+ case VIRTIO_NET_CTRL_MAC_ADDR_SET:
+ read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->in_iov,
+ vio_config->mac, ETH_ALEN);
+ if (read == ETH_ALEN)
+ status = VIRTIO_NET_OK;
+ break;
+ default:
+ break;
+ }
+
+ return status;
+}
+
+static void vdpasim_handle_cvq(struct vdpasim *vdpasim)
+{
+ struct vdpasim_virtqueue *cvq = &vdpasim->vqs[2];
+ virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
+ struct virtio_net_ctrl_hdr ctrl;
+ size_t read, write;
+ int err;
+
+ if (!(vdpasim->features & (1ULL << VIRTIO_NET_F_CTRL_VQ)))
+ return;
+
+ if (!cvq->ready)
+ return;
+
+ while (true) {
+ err = vringh_getdesc_iotlb(&cvq->vring, &cvq->in_iov,
+ &cvq->out_iov,
+ &cvq->head, GFP_ATOMIC);
+ if (err <= 0)
+ break;
+
+ read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->in_iov, &ctrl,
+ sizeof(ctrl));
+ if (read != sizeof(ctrl))
+ break;
+
+ switch (ctrl.class) {
+ case VIRTIO_NET_CTRL_MAC:
+ status = vdpasim_handle_ctrl_mac(vdpasim, ctrl.cmd);
+ break;
+ default:
+ break;
+ }
+
+ /* Make sure data is wrote before advancing index */
+ smp_wmb();
+
+ write = vringh_iov_push_iotlb(&cvq->vring, &cvq->out_iov,
+ &status, sizeof(status));
+ vringh_complete_iotlb(&cvq->vring, cvq->head, write);
+ vringh_kiov_cleanup(&cvq->in_iov);
+ vringh_kiov_cleanup(&cvq->out_iov);
+
+ /* Make sure used is visible before rasing the interrupt. */
+ smp_wmb();
+
+ local_bh_disable();
+ if (cvq->cb)
+ cvq->cb(cvq->private);
+ local_bh_enable();
+ }
+}
static void vdpasim_net_work(struct work_struct *work)
{
@@ -36,7 +149,6 @@ static void vdpasim_net_work(struct work_struct *work)
struct vdpasim_virtqueue *txq = &vdpasim->vqs[1];
struct vdpasim_virtqueue *rxq = &vdpasim->vqs[0];
ssize_t read, write;
- size_t total_write;
int pkts = 0;
int err;
@@ -45,53 +157,40 @@ static void vdpasim_net_work(struct work_struct *work)
if (!(vdpasim->status & VIRTIO_CONFIG_S_DRIVER_OK))
goto out;
+ vdpasim_handle_cvq(vdpasim);
+
if (!txq->ready || !rxq->ready)
goto out;
while (true) {
- total_write = 0;
err = vringh_getdesc_iotlb(&txq->vring, &txq->out_iov, NULL,
&txq->head, GFP_ATOMIC);
if (err <= 0)
break;
+ read = vringh_iov_pull_iotlb(&txq->vring, &txq->out_iov,
+ vdpasim->buffer,
+ PAGE_SIZE);
+
+ if (!receive_filter(vdpasim, read)) {
+ vdpasim_net_complete(txq, 0);
+ continue;
+ }
+
err = vringh_getdesc_iotlb(&rxq->vring, NULL, &rxq->in_iov,
&rxq->head, GFP_ATOMIC);
if (err <= 0) {
- vringh_complete_iotlb(&txq->vring, txq->head, 0);
+ vdpasim_net_complete(txq, 0);
break;
}
- while (true) {
- read = vringh_iov_pull_iotlb(&txq->vring, &txq->out_iov,
- vdpasim->buffer,
- PAGE_SIZE);
- if (read <= 0)
- break;
-
- write = vringh_iov_push_iotlb(&rxq->vring, &rxq->in_iov,
- vdpasim->buffer, read);
- if (write <= 0)
- break;
-
- total_write += write;
- }
-
- /* Make sure data is wrote before advancing index */
- smp_wmb();
-
- vringh_complete_iotlb(&txq->vring, txq->head, 0);
- vringh_complete_iotlb(&rxq->vring, rxq->head, total_write);
-
- /* Make sure used is visible before rasing the interrupt. */
- smp_wmb();
+ write = vringh_iov_push_iotlb(&rxq->vring, &rxq->in_iov,
+ vdpasim->buffer, read);
+ if (write <= 0)
+ break;
- local_bh_disable();
- if (vringh_need_notify_iotlb(&txq->vring) > 0)
- vringh_notify(&txq->vring);
- if (vringh_need_notify_iotlb(&rxq->vring) > 0)
- vringh_notify(&rxq->vring);
- local_bh_enable();
+ vdpasim_net_complete(txq, 0);
+ vdpasim_net_complete(rxq, write);
if (++pkts > 4) {
schedule_work(&vdpasim->work);
@@ -145,6 +244,8 @@ static int vdpasim_net_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
dev_attr.id = VIRTIO_ID_NET;
dev_attr.supported_features = VDPASIM_NET_FEATURES;
dev_attr.nvqs = VDPASIM_NET_VQ_NUM;
+ dev_attr.ngroups = VDPASIM_NET_GROUP_NUM;
+ dev_attr.nas = VDPASIM_NET_AS_NUM;
dev_attr.config_size = sizeof(struct virtio_net_config);
dev_attr.get_config = vdpasim_net_get_config;
dev_attr.work_fn = vdpasim_net_work;
diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c
index f85d1a08ed87..d503848b3b6e 100644
--- a/drivers/vdpa/vdpa_user/vduse_dev.c
+++ b/drivers/vdpa/vdpa_user/vduse_dev.c
@@ -693,6 +693,7 @@ static u32 vduse_vdpa_get_generation(struct vdpa_device *vdpa)
}
static int vduse_vdpa_set_map(struct vdpa_device *vdpa,
+ unsigned int asid,
struct vhost_iotlb *iotlb)
{
struct vduse_dev *dev = vdpa_to_vduse(vdpa);
@@ -1495,7 +1496,7 @@ static int vduse_dev_init_vdpa(struct vduse_dev *dev, const char *name)
return -EEXIST;
vdev = vdpa_alloc_device(struct vduse_vdpa, vdpa, dev->dev,
- &vduse_vdpa_config_ops, name, true);
+ &vduse_vdpa_config_ops, 1, 1, name, true);
if (IS_ERR(vdev))
return PTR_ERR(vdev);
diff --git a/drivers/vdpa/virtio_pci/vp_vdpa.c b/drivers/vdpa/virtio_pci/vp_vdpa.c
index cce101e6a940..04522077735b 100644
--- a/drivers/vdpa/virtio_pci/vp_vdpa.c
+++ b/drivers/vdpa/virtio_pci/vp_vdpa.c
@@ -32,7 +32,7 @@ struct vp_vring {
struct vp_vdpa {
struct vdpa_device vdpa;
- struct virtio_pci_modern_device mdev;
+ struct virtio_pci_modern_device *mdev;
struct vp_vring *vring;
struct vdpa_callback config_cb;
char msix_name[VP_VDPA_NAME_SIZE];
@@ -41,6 +41,12 @@ struct vp_vdpa {
int vectors;
};
+struct vp_vdpa_mgmtdev {
+ struct vdpa_mgmt_dev mgtdev;
+ struct virtio_pci_modern_device *mdev;
+ struct vp_vdpa *vp_vdpa;
+};
+
static struct vp_vdpa *vdpa_to_vp(struct vdpa_device *vdpa)
{
return container_of(vdpa, struct vp_vdpa, vdpa);
@@ -50,7 +56,12 @@ static struct virtio_pci_modern_device *vdpa_to_mdev(struct vdpa_device *vdpa)
{
struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
- return &vp_vdpa->mdev;
+ return vp_vdpa->mdev;
+}
+
+static struct virtio_pci_modern_device *vp_vdpa_to_mdev(struct vp_vdpa *vp_vdpa)
+{
+ return vp_vdpa->mdev;
}
static u64 vp_vdpa_get_device_features(struct vdpa_device *vdpa)
@@ -96,7 +107,7 @@ static int vp_vdpa_get_vq_irq(struct vdpa_device *vdpa, u16 idx)
static void vp_vdpa_free_irq(struct vp_vdpa *vp_vdpa)
{
- struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev;
+ struct virtio_pci_modern_device *mdev = vp_vdpa_to_mdev(vp_vdpa);
struct pci_dev *pdev = mdev->pci_dev;
int i;
@@ -143,7 +154,7 @@ static irqreturn_t vp_vdpa_config_handler(int irq, void *arg)
static int vp_vdpa_request_irq(struct vp_vdpa *vp_vdpa)
{
- struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev;
+ struct virtio_pci_modern_device *mdev = vp_vdpa_to_mdev(vp_vdpa);
struct pci_dev *pdev = mdev->pci_dev;
int i, ret, irq;
int queues = vp_vdpa->queues;
@@ -198,7 +209,7 @@ err:
static void vp_vdpa_set_status(struct vdpa_device *vdpa, u8 status)
{
struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
- struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev;
+ struct virtio_pci_modern_device *mdev = vp_vdpa_to_mdev(vp_vdpa);
u8 s = vp_vdpa_get_status(vdpa);
if (status & VIRTIO_CONFIG_S_DRIVER_OK &&
@@ -212,7 +223,7 @@ static void vp_vdpa_set_status(struct vdpa_device *vdpa, u8 status)
static int vp_vdpa_reset(struct vdpa_device *vdpa)
{
struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
- struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev;
+ struct virtio_pci_modern_device *mdev = vp_vdpa_to_mdev(vp_vdpa);
u8 s = vp_vdpa_get_status(vdpa);
vp_modern_set_status(mdev, 0);
@@ -372,7 +383,7 @@ static void vp_vdpa_get_config(struct vdpa_device *vdpa,
void *buf, unsigned int len)
{
struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
- struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev;
+ struct virtio_pci_modern_device *mdev = vp_vdpa_to_mdev(vp_vdpa);
u8 old, new;
u8 *p;
int i;
@@ -392,7 +403,7 @@ static void vp_vdpa_set_config(struct vdpa_device *vdpa,
unsigned int len)
{
struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
- struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev;
+ struct virtio_pci_modern_device *mdev = vp_vdpa_to_mdev(vp_vdpa);
const u8 *p = buf;
int i;
@@ -412,7 +423,7 @@ static struct vdpa_notification_area
vp_vdpa_get_vq_notification(struct vdpa_device *vdpa, u16 qid)
{
struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
- struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev;
+ struct virtio_pci_modern_device *mdev = vp_vdpa_to_mdev(vp_vdpa);
struct vdpa_notification_area notify;
notify.addr = vp_vdpa->vring[qid].notify_pa;
@@ -454,38 +465,31 @@ static void vp_vdpa_free_irq_vectors(void *data)
pci_free_irq_vectors(data);
}
-static int vp_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+static int vp_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
+ const struct vdpa_dev_set_config *add_config)
{
- struct virtio_pci_modern_device *mdev;
+ struct vp_vdpa_mgmtdev *vp_vdpa_mgtdev =
+ container_of(v_mdev, struct vp_vdpa_mgmtdev, mgtdev);
+
+ struct virtio_pci_modern_device *mdev = vp_vdpa_mgtdev->mdev;
+ struct pci_dev *pdev = mdev->pci_dev;
struct device *dev = &pdev->dev;
- struct vp_vdpa *vp_vdpa;
+ struct vp_vdpa *vp_vdpa = NULL;
int ret, i;
- ret = pcim_enable_device(pdev);
- if (ret)
- return ret;
-
vp_vdpa = vdpa_alloc_device(struct vp_vdpa, vdpa,
- dev, &vp_vdpa_ops, NULL, false);
+ dev, &vp_vdpa_ops, 1, 1, name, false);
+
if (IS_ERR(vp_vdpa)) {
dev_err(dev, "vp_vdpa: Failed to allocate vDPA structure\n");
return PTR_ERR(vp_vdpa);
}
- mdev = &vp_vdpa->mdev;
- mdev->pci_dev = pdev;
-
- ret = vp_modern_probe(mdev);
- if (ret) {
- dev_err(&pdev->dev, "Failed to probe modern PCI device\n");
- goto err;
- }
-
- pci_set_master(pdev);
- pci_set_drvdata(pdev, vp_vdpa);
+ vp_vdpa_mgtdev->vp_vdpa = vp_vdpa;
vp_vdpa->vdpa.dma_dev = &pdev->dev;
vp_vdpa->queues = vp_modern_get_num_queues(mdev);
+ vp_vdpa->mdev = mdev;
ret = devm_add_action_or_reset(dev, vp_vdpa_free_irq_vectors, pdev);
if (ret) {
@@ -516,7 +520,8 @@ static int vp_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
vp_vdpa->config_irq = VIRTIO_MSI_NO_VECTOR;
- ret = vdpa_register_device(&vp_vdpa->vdpa, vp_vdpa->queues);
+ vp_vdpa->vdpa.mdev = &vp_vdpa_mgtdev->mgtdev;
+ ret = _vdpa_register_device(&vp_vdpa->vdpa, vp_vdpa->queues);
if (ret) {
dev_err(&pdev->dev, "Failed to register to vdpa bus\n");
goto err;
@@ -529,12 +534,104 @@ err:
return ret;
}
+static void vp_vdpa_dev_del(struct vdpa_mgmt_dev *v_mdev,
+ struct vdpa_device *dev)
+{
+ struct vp_vdpa_mgmtdev *vp_vdpa_mgtdev =
+ container_of(v_mdev, struct vp_vdpa_mgmtdev, mgtdev);
+
+ struct vp_vdpa *vp_vdpa = vp_vdpa_mgtdev->vp_vdpa;
+
+ _vdpa_unregister_device(&vp_vdpa->vdpa);
+ vp_vdpa_mgtdev->vp_vdpa = NULL;
+}
+
+static const struct vdpa_mgmtdev_ops vp_vdpa_mdev_ops = {
+ .dev_add = vp_vdpa_dev_add,
+ .dev_del = vp_vdpa_dev_del,
+};
+
+static int vp_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct vp_vdpa_mgmtdev *vp_vdpa_mgtdev = NULL;
+ struct vdpa_mgmt_dev *mgtdev;
+ struct device *dev = &pdev->dev;
+ struct virtio_pci_modern_device *mdev = NULL;
+ struct virtio_device_id *mdev_id = NULL;
+ int err;
+
+ vp_vdpa_mgtdev = kzalloc(sizeof(*vp_vdpa_mgtdev), GFP_KERNEL);
+ if (!vp_vdpa_mgtdev)
+ return -ENOMEM;
+
+ mgtdev = &vp_vdpa_mgtdev->mgtdev;
+ mgtdev->ops = &vp_vdpa_mdev_ops;
+ mgtdev->device = dev;
+
+ mdev = kzalloc(sizeof(struct virtio_pci_modern_device), GFP_KERNEL);
+ if (!mdev) {
+ err = -ENOMEM;
+ goto mdev_err;
+ }
+
+ mdev_id = kzalloc(sizeof(struct virtio_device_id), GFP_KERNEL);
+ if (!mdev_id) {
+ err = -ENOMEM;
+ goto mdev_id_err;
+ }
+
+ vp_vdpa_mgtdev->mdev = mdev;
+ mdev->pci_dev = pdev;
+
+ err = pcim_enable_device(pdev);
+ if (err) {
+ goto probe_err;
+ }
+
+ err = vp_modern_probe(mdev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to probe modern PCI device\n");
+ goto probe_err;
+ }
+
+ mdev_id->device = mdev->id.device;
+ mdev_id->vendor = mdev->id.vendor;
+ mgtdev->id_table = mdev_id;
+ mgtdev->max_supported_vqs = vp_modern_get_num_queues(mdev);
+ mgtdev->supported_features = vp_modern_get_features(mdev);
+ pci_set_master(pdev);
+ pci_set_drvdata(pdev, vp_vdpa_mgtdev);
+
+ err = vdpa_mgmtdev_register(mgtdev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to register vdpa mgmtdev device\n");
+ goto register_err;
+ }
+
+ return 0;
+
+register_err:
+ vp_modern_remove(vp_vdpa_mgtdev->mdev);
+probe_err:
+ kfree(mdev_id);
+mdev_id_err:
+ kfree(mdev);
+mdev_err:
+ kfree(vp_vdpa_mgtdev);
+ return err;
+}
+
static void vp_vdpa_remove(struct pci_dev *pdev)
{
- struct vp_vdpa *vp_vdpa = pci_get_drvdata(pdev);
+ struct vp_vdpa_mgmtdev *vp_vdpa_mgtdev = pci_get_drvdata(pdev);
+ struct virtio_pci_modern_device *mdev = NULL;
- vp_modern_remove(&vp_vdpa->mdev);
- vdpa_unregister_device(&vp_vdpa->vdpa);
+ mdev = vp_vdpa_mgtdev->mdev;
+ vp_modern_remove(mdev);
+ vdpa_mgmtdev_unregister(&vp_vdpa_mgtdev->mgtdev);
+ kfree(&vp_vdpa_mgtdev->mgtdev.id_table);
+ kfree(mdev);
+ kfree(vp_vdpa_mgtdev);
}
static struct pci_driver vp_vdpa_driver = {
diff --git a/drivers/vfio/fsl-mc/vfio_fsl_mc.c b/drivers/vfio/fsl-mc/vfio_fsl_mc.c
index 6e2e62c6f47a..3feff729f3ce 100644
--- a/drivers/vfio/fsl-mc/vfio_fsl_mc.c
+++ b/drivers/vfio/fsl-mc/vfio_fsl_mc.c
@@ -588,6 +588,7 @@ static struct fsl_mc_driver vfio_fsl_mc_driver = {
.name = "vfio-fsl-mc",
.owner = THIS_MODULE,
},
+ .driver_managed_dma = true,
};
static int __init vfio_fsl_mc_driver_init(void)
diff --git a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
index 767b5d47631a..4def43f5f7b6 100644
--- a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
+++ b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
@@ -337,6 +337,14 @@ static int vf_qm_cache_wb(struct hisi_qm *qm)
return 0;
}
+static struct hisi_acc_vf_core_device *hssi_acc_drvdata(struct pci_dev *pdev)
+{
+ struct vfio_pci_core_device *core_device = dev_get_drvdata(&pdev->dev);
+
+ return container_of(core_device, struct hisi_acc_vf_core_device,
+ core_device);
+}
+
static void vf_qm_fun_reset(struct hisi_acc_vf_core_device *hisi_acc_vdev,
struct hisi_qm *qm)
{
@@ -962,7 +970,7 @@ hisi_acc_vfio_pci_get_device_state(struct vfio_device *vdev,
static void hisi_acc_vf_pci_aer_reset_done(struct pci_dev *pdev)
{
- struct hisi_acc_vf_core_device *hisi_acc_vdev = dev_get_drvdata(&pdev->dev);
+ struct hisi_acc_vf_core_device *hisi_acc_vdev = hssi_acc_drvdata(pdev);
if (hisi_acc_vdev->core_device.vdev.migration_flags !=
VFIO_MIGRATION_STOP_COPY)
@@ -1274,11 +1282,10 @@ static int hisi_acc_vfio_pci_probe(struct pci_dev *pdev, const struct pci_device
&hisi_acc_vfio_pci_ops);
}
+ dev_set_drvdata(&pdev->dev, &hisi_acc_vdev->core_device);
ret = vfio_pci_core_register_device(&hisi_acc_vdev->core_device);
if (ret)
goto out_free;
-
- dev_set_drvdata(&pdev->dev, hisi_acc_vdev);
return 0;
out_free:
@@ -1289,7 +1296,7 @@ out_free:
static void hisi_acc_vfio_pci_remove(struct pci_dev *pdev)
{
- struct hisi_acc_vf_core_device *hisi_acc_vdev = dev_get_drvdata(&pdev->dev);
+ struct hisi_acc_vf_core_device *hisi_acc_vdev = hssi_acc_drvdata(pdev);
vfio_pci_core_unregister_device(&hisi_acc_vdev->core_device);
vfio_pci_core_uninit_device(&hisi_acc_vdev->core_device);
@@ -1316,6 +1323,7 @@ static struct pci_driver hisi_acc_vfio_pci_driver = {
.probe = hisi_acc_vfio_pci_probe,
.remove = hisi_acc_vfio_pci_remove,
.err_handler = &hisi_acc_vf_err_handlers,
+ .driver_managed_dma = true,
};
module_pci_driver(hisi_acc_vfio_pci_driver);
diff --git a/drivers/vfio/pci/mlx5/cmd.c b/drivers/vfio/pci/mlx5/cmd.c
index 5c9f9218cc1d..9b9f33ca270a 100644
--- a/drivers/vfio/pci/mlx5/cmd.c
+++ b/drivers/vfio/pci/mlx5/cmd.c
@@ -5,89 +5,157 @@
#include "cmd.h"
-int mlx5vf_cmd_suspend_vhca(struct pci_dev *pdev, u16 vhca_id, u16 op_mod)
+static int mlx5vf_cmd_get_vhca_id(struct mlx5_core_dev *mdev, u16 function_id,
+ u16 *vhca_id);
+
+int mlx5vf_cmd_suspend_vhca(struct mlx5vf_pci_core_device *mvdev, u16 op_mod)
{
- struct mlx5_core_dev *mdev = mlx5_vf_get_core_dev(pdev);
u32 out[MLX5_ST_SZ_DW(suspend_vhca_out)] = {};
u32 in[MLX5_ST_SZ_DW(suspend_vhca_in)] = {};
- int ret;
- if (!mdev)
+ lockdep_assert_held(&mvdev->state_mutex);
+ if (mvdev->mdev_detach)
return -ENOTCONN;
MLX5_SET(suspend_vhca_in, in, opcode, MLX5_CMD_OP_SUSPEND_VHCA);
- MLX5_SET(suspend_vhca_in, in, vhca_id, vhca_id);
+ MLX5_SET(suspend_vhca_in, in, vhca_id, mvdev->vhca_id);
MLX5_SET(suspend_vhca_in, in, op_mod, op_mod);
- ret = mlx5_cmd_exec_inout(mdev, suspend_vhca, in, out);
- mlx5_vf_put_core_dev(mdev);
- return ret;
+ return mlx5_cmd_exec_inout(mvdev->mdev, suspend_vhca, in, out);
}
-int mlx5vf_cmd_resume_vhca(struct pci_dev *pdev, u16 vhca_id, u16 op_mod)
+int mlx5vf_cmd_resume_vhca(struct mlx5vf_pci_core_device *mvdev, u16 op_mod)
{
- struct mlx5_core_dev *mdev = mlx5_vf_get_core_dev(pdev);
u32 out[MLX5_ST_SZ_DW(resume_vhca_out)] = {};
u32 in[MLX5_ST_SZ_DW(resume_vhca_in)] = {};
- int ret;
- if (!mdev)
+ lockdep_assert_held(&mvdev->state_mutex);
+ if (mvdev->mdev_detach)
return -ENOTCONN;
MLX5_SET(resume_vhca_in, in, opcode, MLX5_CMD_OP_RESUME_VHCA);
- MLX5_SET(resume_vhca_in, in, vhca_id, vhca_id);
+ MLX5_SET(resume_vhca_in, in, vhca_id, mvdev->vhca_id);
MLX5_SET(resume_vhca_in, in, op_mod, op_mod);
- ret = mlx5_cmd_exec_inout(mdev, resume_vhca, in, out);
- mlx5_vf_put_core_dev(mdev);
- return ret;
+ return mlx5_cmd_exec_inout(mvdev->mdev, resume_vhca, in, out);
}
-int mlx5vf_cmd_query_vhca_migration_state(struct pci_dev *pdev, u16 vhca_id,
+int mlx5vf_cmd_query_vhca_migration_state(struct mlx5vf_pci_core_device *mvdev,
size_t *state_size)
{
- struct mlx5_core_dev *mdev = mlx5_vf_get_core_dev(pdev);
u32 out[MLX5_ST_SZ_DW(query_vhca_migration_state_out)] = {};
u32 in[MLX5_ST_SZ_DW(query_vhca_migration_state_in)] = {};
int ret;
- if (!mdev)
+ lockdep_assert_held(&mvdev->state_mutex);
+ if (mvdev->mdev_detach)
return -ENOTCONN;
MLX5_SET(query_vhca_migration_state_in, in, opcode,
MLX5_CMD_OP_QUERY_VHCA_MIGRATION_STATE);
- MLX5_SET(query_vhca_migration_state_in, in, vhca_id, vhca_id);
+ MLX5_SET(query_vhca_migration_state_in, in, vhca_id, mvdev->vhca_id);
MLX5_SET(query_vhca_migration_state_in, in, op_mod, 0);
- ret = mlx5_cmd_exec_inout(mdev, query_vhca_migration_state, in, out);
+ ret = mlx5_cmd_exec_inout(mvdev->mdev, query_vhca_migration_state, in,
+ out);
if (ret)
- goto end;
+ return ret;
*state_size = MLX5_GET(query_vhca_migration_state_out, out,
required_umem_size);
+ return 0;
+}
+
+static int mlx5fv_vf_event(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct mlx5vf_pci_core_device *mvdev =
+ container_of(nb, struct mlx5vf_pci_core_device, nb);
+
+ mutex_lock(&mvdev->state_mutex);
+ switch (event) {
+ case MLX5_PF_NOTIFY_ENABLE_VF:
+ mvdev->mdev_detach = false;
+ break;
+ case MLX5_PF_NOTIFY_DISABLE_VF:
+ mlx5vf_disable_fds(mvdev);
+ mvdev->mdev_detach = true;
+ break;
+ default:
+ break;
+ }
+ mlx5vf_state_mutex_unlock(mvdev);
+ return 0;
+}
+
+void mlx5vf_cmd_remove_migratable(struct mlx5vf_pci_core_device *mvdev)
+{
+ if (!mvdev->migrate_cap)
+ return;
+
+ mlx5_sriov_blocking_notifier_unregister(mvdev->mdev, mvdev->vf_id,
+ &mvdev->nb);
+ destroy_workqueue(mvdev->cb_wq);
+}
+
+void mlx5vf_cmd_set_migratable(struct mlx5vf_pci_core_device *mvdev)
+{
+ struct pci_dev *pdev = mvdev->core_device.pdev;
+ int ret;
+
+ if (!pdev->is_virtfn)
+ return;
+
+ mvdev->mdev = mlx5_vf_get_core_dev(pdev);
+ if (!mvdev->mdev)
+ return;
+
+ if (!MLX5_CAP_GEN(mvdev->mdev, migration))
+ goto end;
+
+ mvdev->vf_id = pci_iov_vf_id(pdev);
+ if (mvdev->vf_id < 0)
+ goto end;
+
+ if (mlx5vf_cmd_get_vhca_id(mvdev->mdev, mvdev->vf_id + 1,
+ &mvdev->vhca_id))
+ goto end;
+
+ mvdev->cb_wq = alloc_ordered_workqueue("mlx5vf_wq", 0);
+ if (!mvdev->cb_wq)
+ goto end;
+
+ mutex_init(&mvdev->state_mutex);
+ spin_lock_init(&mvdev->reset_lock);
+ mvdev->nb.notifier_call = mlx5fv_vf_event;
+ ret = mlx5_sriov_blocking_notifier_register(mvdev->mdev, mvdev->vf_id,
+ &mvdev->nb);
+ if (ret) {
+ destroy_workqueue(mvdev->cb_wq);
+ goto end;
+ }
+
+ mvdev->migrate_cap = 1;
+ mvdev->core_device.vdev.migration_flags =
+ VFIO_MIGRATION_STOP_COPY |
+ VFIO_MIGRATION_P2P;
end:
- mlx5_vf_put_core_dev(mdev);
- return ret;
+ mlx5_vf_put_core_dev(mvdev->mdev);
}
-int mlx5vf_cmd_get_vhca_id(struct pci_dev *pdev, u16 function_id, u16 *vhca_id)
+static int mlx5vf_cmd_get_vhca_id(struct mlx5_core_dev *mdev, u16 function_id,
+ u16 *vhca_id)
{
- struct mlx5_core_dev *mdev = mlx5_vf_get_core_dev(pdev);
u32 in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {};
int out_size;
void *out;
int ret;
- if (!mdev)
- return -ENOTCONN;
-
out_size = MLX5_ST_SZ_BYTES(query_hca_cap_out);
out = kzalloc(out_size, GFP_KERNEL);
- if (!out) {
- ret = -ENOMEM;
- goto end;
- }
+ if (!out)
+ return -ENOMEM;
MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
MLX5_SET(query_hca_cap_in, in, other_function, 1);
@@ -105,8 +173,6 @@ int mlx5vf_cmd_get_vhca_id(struct pci_dev *pdev, u16 function_id, u16 *vhca_id)
err_exec:
kfree(out);
-end:
- mlx5_vf_put_core_dev(mdev);
return ret;
}
@@ -151,21 +217,68 @@ static int _create_state_mkey(struct mlx5_core_dev *mdev, u32 pdn,
return err;
}
-int mlx5vf_cmd_save_vhca_state(struct pci_dev *pdev, u16 vhca_id,
+void mlx5vf_mig_file_cleanup_cb(struct work_struct *_work)
+{
+ struct mlx5vf_async_data *async_data = container_of(_work,
+ struct mlx5vf_async_data, work);
+ struct mlx5_vf_migration_file *migf = container_of(async_data,
+ struct mlx5_vf_migration_file, async_data);
+ struct mlx5_core_dev *mdev = migf->mvdev->mdev;
+
+ mutex_lock(&migf->lock);
+ if (async_data->status) {
+ migf->is_err = true;
+ wake_up_interruptible(&migf->poll_wait);
+ }
+ mutex_unlock(&migf->lock);
+
+ mlx5_core_destroy_mkey(mdev, async_data->mkey);
+ dma_unmap_sgtable(mdev->device, &migf->table.sgt, DMA_FROM_DEVICE, 0);
+ mlx5_core_dealloc_pd(mdev, async_data->pdn);
+ kvfree(async_data->out);
+ fput(migf->filp);
+}
+
+static void mlx5vf_save_callback(int status, struct mlx5_async_work *context)
+{
+ struct mlx5vf_async_data *async_data = container_of(context,
+ struct mlx5vf_async_data, cb_work);
+ struct mlx5_vf_migration_file *migf = container_of(async_data,
+ struct mlx5_vf_migration_file, async_data);
+
+ if (!status) {
+ WRITE_ONCE(migf->total_length,
+ MLX5_GET(save_vhca_state_out, async_data->out,
+ actual_image_size));
+ wake_up_interruptible(&migf->poll_wait);
+ }
+
+ /*
+ * The error and the cleanup flows can't run from an
+ * interrupt context
+ */
+ async_data->status = status;
+ queue_work(migf->mvdev->cb_wq, &async_data->work);
+}
+
+int mlx5vf_cmd_save_vhca_state(struct mlx5vf_pci_core_device *mvdev,
struct mlx5_vf_migration_file *migf)
{
- struct mlx5_core_dev *mdev = mlx5_vf_get_core_dev(pdev);
- u32 out[MLX5_ST_SZ_DW(save_vhca_state_out)] = {};
+ u32 out_size = MLX5_ST_SZ_BYTES(save_vhca_state_out);
u32 in[MLX5_ST_SZ_DW(save_vhca_state_in)] = {};
+ struct mlx5vf_async_data *async_data;
+ struct mlx5_core_dev *mdev;
u32 pdn, mkey;
int err;
- if (!mdev)
+ lockdep_assert_held(&mvdev->state_mutex);
+ if (mvdev->mdev_detach)
return -ENOTCONN;
+ mdev = mvdev->mdev;
err = mlx5_core_alloc_pd(mdev, &pdn);
if (err)
- goto end;
+ return err;
err = dma_map_sgtable(mdev->device, &migf->table.sgt, DMA_FROM_DEVICE,
0);
@@ -179,45 +292,54 @@ int mlx5vf_cmd_save_vhca_state(struct pci_dev *pdev, u16 vhca_id,
MLX5_SET(save_vhca_state_in, in, opcode,
MLX5_CMD_OP_SAVE_VHCA_STATE);
MLX5_SET(save_vhca_state_in, in, op_mod, 0);
- MLX5_SET(save_vhca_state_in, in, vhca_id, vhca_id);
+ MLX5_SET(save_vhca_state_in, in, vhca_id, mvdev->vhca_id);
MLX5_SET(save_vhca_state_in, in, mkey, mkey);
MLX5_SET(save_vhca_state_in, in, size, migf->total_length);
- err = mlx5_cmd_exec_inout(mdev, save_vhca_state, in, out);
+ async_data = &migf->async_data;
+ async_data->out = kvzalloc(out_size, GFP_KERNEL);
+ if (!async_data->out) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ /* no data exists till the callback comes back */
+ migf->total_length = 0;
+ get_file(migf->filp);
+ async_data->mkey = mkey;
+ async_data->pdn = pdn;
+ err = mlx5_cmd_exec_cb(&migf->async_ctx, in, sizeof(in),
+ async_data->out,
+ out_size, mlx5vf_save_callback,
+ &async_data->cb_work);
if (err)
goto err_exec;
- migf->total_length =
- MLX5_GET(save_vhca_state_out, out, actual_image_size);
-
- mlx5_core_destroy_mkey(mdev, mkey);
- mlx5_core_dealloc_pd(mdev, pdn);
- dma_unmap_sgtable(mdev->device, &migf->table.sgt, DMA_FROM_DEVICE, 0);
- mlx5_vf_put_core_dev(mdev);
-
return 0;
err_exec:
+ fput(migf->filp);
+ kvfree(async_data->out);
+err_out:
mlx5_core_destroy_mkey(mdev, mkey);
err_create_mkey:
dma_unmap_sgtable(mdev->device, &migf->table.sgt, DMA_FROM_DEVICE, 0);
err_dma_map:
mlx5_core_dealloc_pd(mdev, pdn);
-end:
- mlx5_vf_put_core_dev(mdev);
return err;
}
-int mlx5vf_cmd_load_vhca_state(struct pci_dev *pdev, u16 vhca_id,
+int mlx5vf_cmd_load_vhca_state(struct mlx5vf_pci_core_device *mvdev,
struct mlx5_vf_migration_file *migf)
{
- struct mlx5_core_dev *mdev = mlx5_vf_get_core_dev(pdev);
+ struct mlx5_core_dev *mdev;
u32 out[MLX5_ST_SZ_DW(save_vhca_state_out)] = {};
u32 in[MLX5_ST_SZ_DW(save_vhca_state_in)] = {};
u32 pdn, mkey;
int err;
- if (!mdev)
+ lockdep_assert_held(&mvdev->state_mutex);
+ if (mvdev->mdev_detach)
return -ENOTCONN;
mutex_lock(&migf->lock);
@@ -226,6 +348,7 @@ int mlx5vf_cmd_load_vhca_state(struct pci_dev *pdev, u16 vhca_id,
goto end;
}
+ mdev = mvdev->mdev;
err = mlx5_core_alloc_pd(mdev, &pdn);
if (err)
goto end;
@@ -241,7 +364,7 @@ int mlx5vf_cmd_load_vhca_state(struct pci_dev *pdev, u16 vhca_id,
MLX5_SET(load_vhca_state_in, in, opcode,
MLX5_CMD_OP_LOAD_VHCA_STATE);
MLX5_SET(load_vhca_state_in, in, op_mod, 0);
- MLX5_SET(load_vhca_state_in, in, vhca_id, vhca_id);
+ MLX5_SET(load_vhca_state_in, in, vhca_id, mvdev->vhca_id);
MLX5_SET(load_vhca_state_in, in, mkey, mkey);
MLX5_SET(load_vhca_state_in, in, size, migf->total_length);
@@ -253,7 +376,6 @@ err_mkey:
err_reg:
mlx5_core_dealloc_pd(mdev, pdn);
end:
- mlx5_vf_put_core_dev(mdev);
mutex_unlock(&migf->lock);
return err;
}
diff --git a/drivers/vfio/pci/mlx5/cmd.h b/drivers/vfio/pci/mlx5/cmd.h
index 1392a11a9cc0..6c3112fdd8b1 100644
--- a/drivers/vfio/pci/mlx5/cmd.h
+++ b/drivers/vfio/pci/mlx5/cmd.h
@@ -7,12 +7,23 @@
#define MLX5_VFIO_CMD_H
#include <linux/kernel.h>
+#include <linux/vfio_pci_core.h>
#include <linux/mlx5/driver.h>
+struct mlx5vf_async_data {
+ struct mlx5_async_work cb_work;
+ struct work_struct work;
+ int status;
+ u32 pdn;
+ u32 mkey;
+ void *out;
+};
+
struct mlx5_vf_migration_file {
struct file *filp;
struct mutex lock;
- bool disabled;
+ u8 disabled:1;
+ u8 is_err:1;
struct sg_append_table table;
size_t total_length;
@@ -22,15 +33,42 @@ struct mlx5_vf_migration_file {
struct scatterlist *last_offset_sg;
unsigned int sg_last_entry;
unsigned long last_offset;
+ struct mlx5vf_pci_core_device *mvdev;
+ wait_queue_head_t poll_wait;
+ struct mlx5_async_ctx async_ctx;
+ struct mlx5vf_async_data async_data;
+};
+
+struct mlx5vf_pci_core_device {
+ struct vfio_pci_core_device core_device;
+ int vf_id;
+ u16 vhca_id;
+ u8 migrate_cap:1;
+ u8 deferred_reset:1;
+ u8 mdev_detach:1;
+ /* protect migration state */
+ struct mutex state_mutex;
+ enum vfio_device_mig_state mig_state;
+ /* protect the reset_done flow */
+ spinlock_t reset_lock;
+ struct mlx5_vf_migration_file *resuming_migf;
+ struct mlx5_vf_migration_file *saving_migf;
+ struct workqueue_struct *cb_wq;
+ struct notifier_block nb;
+ struct mlx5_core_dev *mdev;
};
-int mlx5vf_cmd_suspend_vhca(struct pci_dev *pdev, u16 vhca_id, u16 op_mod);
-int mlx5vf_cmd_resume_vhca(struct pci_dev *pdev, u16 vhca_id, u16 op_mod);
-int mlx5vf_cmd_query_vhca_migration_state(struct pci_dev *pdev, u16 vhca_id,
+int mlx5vf_cmd_suspend_vhca(struct mlx5vf_pci_core_device *mvdev, u16 op_mod);
+int mlx5vf_cmd_resume_vhca(struct mlx5vf_pci_core_device *mvdev, u16 op_mod);
+int mlx5vf_cmd_query_vhca_migration_state(struct mlx5vf_pci_core_device *mvdev,
size_t *state_size);
-int mlx5vf_cmd_get_vhca_id(struct pci_dev *pdev, u16 function_id, u16 *vhca_id);
-int mlx5vf_cmd_save_vhca_state(struct pci_dev *pdev, u16 vhca_id,
+void mlx5vf_cmd_set_migratable(struct mlx5vf_pci_core_device *mvdev);
+void mlx5vf_cmd_remove_migratable(struct mlx5vf_pci_core_device *mvdev);
+int mlx5vf_cmd_save_vhca_state(struct mlx5vf_pci_core_device *mvdev,
struct mlx5_vf_migration_file *migf);
-int mlx5vf_cmd_load_vhca_state(struct pci_dev *pdev, u16 vhca_id,
+int mlx5vf_cmd_load_vhca_state(struct mlx5vf_pci_core_device *mvdev,
struct mlx5_vf_migration_file *migf);
+void mlx5vf_state_mutex_unlock(struct mlx5vf_pci_core_device *mvdev);
+void mlx5vf_disable_fds(struct mlx5vf_pci_core_device *mvdev);
+void mlx5vf_mig_file_cleanup_cb(struct work_struct *_work);
#endif /* MLX5_VFIO_CMD_H */
diff --git a/drivers/vfio/pci/mlx5/main.c b/drivers/vfio/pci/mlx5/main.c
index bbec5d288fee..0558d0649ddb 100644
--- a/drivers/vfio/pci/mlx5/main.c
+++ b/drivers/vfio/pci/mlx5/main.c
@@ -17,7 +17,6 @@
#include <linux/uaccess.h>
#include <linux/vfio.h>
#include <linux/sched/mm.h>
-#include <linux/vfio_pci_core.h>
#include <linux/anon_inodes.h>
#include "cmd.h"
@@ -25,19 +24,13 @@
/* Arbitrary to prevent userspace from consuming endless memory */
#define MAX_MIGRATION_SIZE (512*1024*1024)
-struct mlx5vf_pci_core_device {
- struct vfio_pci_core_device core_device;
- u16 vhca_id;
- u8 migrate_cap:1;
- u8 deferred_reset:1;
- /* protect migration state */
- struct mutex state_mutex;
- enum vfio_device_mig_state mig_state;
- /* protect the reset_done flow */
- spinlock_t reset_lock;
- struct mlx5_vf_migration_file *resuming_migf;
- struct mlx5_vf_migration_file *saving_migf;
-};
+static struct mlx5vf_pci_core_device *mlx5vf_drvdata(struct pci_dev *pdev)
+{
+ struct vfio_pci_core_device *core_device = dev_get_drvdata(&pdev->dev);
+
+ return container_of(core_device, struct mlx5vf_pci_core_device,
+ core_device);
+}
static struct page *
mlx5vf_get_migration_page(struct mlx5_vf_migration_file *migf,
@@ -149,12 +142,22 @@ static ssize_t mlx5vf_save_read(struct file *filp, char __user *buf, size_t len,
return -ESPIPE;
pos = &filp->f_pos;
+ if (!(filp->f_flags & O_NONBLOCK)) {
+ if (wait_event_interruptible(migf->poll_wait,
+ READ_ONCE(migf->total_length) || migf->is_err))
+ return -ERESTARTSYS;
+ }
+
mutex_lock(&migf->lock);
+ if ((filp->f_flags & O_NONBLOCK) && !READ_ONCE(migf->total_length)) {
+ done = -EAGAIN;
+ goto out_unlock;
+ }
if (*pos > migf->total_length) {
done = -EINVAL;
goto out_unlock;
}
- if (migf->disabled) {
+ if (migf->disabled || migf->is_err) {
done = -ENODEV;
goto out_unlock;
}
@@ -194,9 +197,28 @@ out_unlock:
return done;
}
+static __poll_t mlx5vf_save_poll(struct file *filp,
+ struct poll_table_struct *wait)
+{
+ struct mlx5_vf_migration_file *migf = filp->private_data;
+ __poll_t pollflags = 0;
+
+ poll_wait(filp, &migf->poll_wait, wait);
+
+ mutex_lock(&migf->lock);
+ if (migf->disabled || migf->is_err)
+ pollflags = EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
+ else if (READ_ONCE(migf->total_length))
+ pollflags = EPOLLIN | EPOLLRDNORM;
+ mutex_unlock(&migf->lock);
+
+ return pollflags;
+}
+
static const struct file_operations mlx5vf_save_fops = {
.owner = THIS_MODULE,
.read = mlx5vf_save_read,
+ .poll = mlx5vf_save_poll,
.release = mlx5vf_release_file,
.llseek = no_llseek,
};
@@ -222,9 +244,11 @@ mlx5vf_pci_save_device_data(struct mlx5vf_pci_core_device *mvdev)
stream_open(migf->filp->f_inode, migf->filp);
mutex_init(&migf->lock);
-
- ret = mlx5vf_cmd_query_vhca_migration_state(
- mvdev->core_device.pdev, mvdev->vhca_id, &migf->total_length);
+ init_waitqueue_head(&migf->poll_wait);
+ mlx5_cmd_init_async_ctx(mvdev->mdev, &migf->async_ctx);
+ INIT_WORK(&migf->async_data.work, mlx5vf_mig_file_cleanup_cb);
+ ret = mlx5vf_cmd_query_vhca_migration_state(mvdev,
+ &migf->total_length);
if (ret)
goto out_free;
@@ -233,8 +257,8 @@ mlx5vf_pci_save_device_data(struct mlx5vf_pci_core_device *mvdev)
if (ret)
goto out_free;
- ret = mlx5vf_cmd_save_vhca_state(mvdev->core_device.pdev,
- mvdev->vhca_id, migf);
+ migf->mvdev = mvdev;
+ ret = mlx5vf_cmd_save_vhca_state(mvdev, migf);
if (ret)
goto out_free;
return migf;
@@ -339,7 +363,7 @@ mlx5vf_pci_resume_device_data(struct mlx5vf_pci_core_device *mvdev)
return migf;
}
-static void mlx5vf_disable_fds(struct mlx5vf_pci_core_device *mvdev)
+void mlx5vf_disable_fds(struct mlx5vf_pci_core_device *mvdev)
{
if (mvdev->resuming_migf) {
mlx5vf_disable_fd(mvdev->resuming_migf);
@@ -347,6 +371,8 @@ static void mlx5vf_disable_fds(struct mlx5vf_pci_core_device *mvdev)
mvdev->resuming_migf = NULL;
}
if (mvdev->saving_migf) {
+ mlx5_cmd_cleanup_async_ctx(&mvdev->saving_migf->async_ctx);
+ cancel_work_sync(&mvdev->saving_migf->async_data.work);
mlx5vf_disable_fd(mvdev->saving_migf);
fput(mvdev->saving_migf->filp);
mvdev->saving_migf = NULL;
@@ -361,8 +387,7 @@ mlx5vf_pci_step_device_state_locked(struct mlx5vf_pci_core_device *mvdev,
int ret;
if (cur == VFIO_DEVICE_STATE_RUNNING_P2P && new == VFIO_DEVICE_STATE_STOP) {
- ret = mlx5vf_cmd_suspend_vhca(
- mvdev->core_device.pdev, mvdev->vhca_id,
+ ret = mlx5vf_cmd_suspend_vhca(mvdev,
MLX5_SUSPEND_VHCA_IN_OP_MOD_SUSPEND_RESPONDER);
if (ret)
return ERR_PTR(ret);
@@ -370,8 +395,7 @@ mlx5vf_pci_step_device_state_locked(struct mlx5vf_pci_core_device *mvdev,
}
if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RUNNING_P2P) {
- ret = mlx5vf_cmd_resume_vhca(
- mvdev->core_device.pdev, mvdev->vhca_id,
+ ret = mlx5vf_cmd_resume_vhca(mvdev,
MLX5_RESUME_VHCA_IN_OP_MOD_RESUME_RESPONDER);
if (ret)
return ERR_PTR(ret);
@@ -379,8 +403,7 @@ mlx5vf_pci_step_device_state_locked(struct mlx5vf_pci_core_device *mvdev,
}
if (cur == VFIO_DEVICE_STATE_RUNNING && new == VFIO_DEVICE_STATE_RUNNING_P2P) {
- ret = mlx5vf_cmd_suspend_vhca(
- mvdev->core_device.pdev, mvdev->vhca_id,
+ ret = mlx5vf_cmd_suspend_vhca(mvdev,
MLX5_SUSPEND_VHCA_IN_OP_MOD_SUSPEND_INITIATOR);
if (ret)
return ERR_PTR(ret);
@@ -388,8 +411,7 @@ mlx5vf_pci_step_device_state_locked(struct mlx5vf_pci_core_device *mvdev,
}
if (cur == VFIO_DEVICE_STATE_RUNNING_P2P && new == VFIO_DEVICE_STATE_RUNNING) {
- ret = mlx5vf_cmd_resume_vhca(
- mvdev->core_device.pdev, mvdev->vhca_id,
+ ret = mlx5vf_cmd_resume_vhca(mvdev,
MLX5_RESUME_VHCA_IN_OP_MOD_RESUME_INITIATOR);
if (ret)
return ERR_PTR(ret);
@@ -424,8 +446,7 @@ mlx5vf_pci_step_device_state_locked(struct mlx5vf_pci_core_device *mvdev,
}
if (cur == VFIO_DEVICE_STATE_RESUMING && new == VFIO_DEVICE_STATE_STOP) {
- ret = mlx5vf_cmd_load_vhca_state(mvdev->core_device.pdev,
- mvdev->vhca_id,
+ ret = mlx5vf_cmd_load_vhca_state(mvdev,
mvdev->resuming_migf);
if (ret)
return ERR_PTR(ret);
@@ -444,7 +465,7 @@ mlx5vf_pci_step_device_state_locked(struct mlx5vf_pci_core_device *mvdev,
* This function is called in all state_mutex unlock cases to
* handle a 'deferred_reset' if exists.
*/
-static void mlx5vf_state_mutex_unlock(struct mlx5vf_pci_core_device *mvdev)
+void mlx5vf_state_mutex_unlock(struct mlx5vf_pci_core_device *mvdev)
{
again:
spin_lock(&mvdev->reset_lock);
@@ -505,7 +526,7 @@ static int mlx5vf_pci_get_device_state(struct vfio_device *vdev,
static void mlx5vf_pci_aer_reset_done(struct pci_dev *pdev)
{
- struct mlx5vf_pci_core_device *mvdev = dev_get_drvdata(&pdev->dev);
+ struct mlx5vf_pci_core_device *mvdev = mlx5vf_drvdata(pdev);
if (!mvdev->migrate_cap)
return;
@@ -532,34 +553,16 @@ static int mlx5vf_pci_open_device(struct vfio_device *core_vdev)
struct mlx5vf_pci_core_device *mvdev = container_of(
core_vdev, struct mlx5vf_pci_core_device, core_device.vdev);
struct vfio_pci_core_device *vdev = &mvdev->core_device;
- int vf_id;
int ret;
ret = vfio_pci_core_enable(vdev);
if (ret)
return ret;
- if (!mvdev->migrate_cap) {
- vfio_pci_core_finish_enable(vdev);
- return 0;
- }
-
- vf_id = pci_iov_vf_id(vdev->pdev);
- if (vf_id < 0) {
- ret = vf_id;
- goto out_disable;
- }
-
- ret = mlx5vf_cmd_get_vhca_id(vdev->pdev, vf_id + 1, &mvdev->vhca_id);
- if (ret)
- goto out_disable;
-
- mvdev->mig_state = VFIO_DEVICE_STATE_RUNNING;
+ if (mvdev->migrate_cap)
+ mvdev->mig_state = VFIO_DEVICE_STATE_RUNNING;
vfio_pci_core_finish_enable(vdev);
return 0;
-out_disable:
- vfio_pci_core_disable(vdev);
- return ret;
}
static void mlx5vf_pci_close_device(struct vfio_device *core_vdev)
@@ -596,32 +599,15 @@ static int mlx5vf_pci_probe(struct pci_dev *pdev,
if (!mvdev)
return -ENOMEM;
vfio_pci_core_init_device(&mvdev->core_device, pdev, &mlx5vf_pci_ops);
-
- if (pdev->is_virtfn) {
- struct mlx5_core_dev *mdev =
- mlx5_vf_get_core_dev(pdev);
-
- if (mdev) {
- if (MLX5_CAP_GEN(mdev, migration)) {
- mvdev->migrate_cap = 1;
- mvdev->core_device.vdev.migration_flags =
- VFIO_MIGRATION_STOP_COPY |
- VFIO_MIGRATION_P2P;
- mutex_init(&mvdev->state_mutex);
- spin_lock_init(&mvdev->reset_lock);
- }
- mlx5_vf_put_core_dev(mdev);
- }
- }
-
+ mlx5vf_cmd_set_migratable(mvdev);
+ dev_set_drvdata(&pdev->dev, &mvdev->core_device);
ret = vfio_pci_core_register_device(&mvdev->core_device);
if (ret)
goto out_free;
-
- dev_set_drvdata(&pdev->dev, mvdev);
return 0;
out_free:
+ mlx5vf_cmd_remove_migratable(mvdev);
vfio_pci_core_uninit_device(&mvdev->core_device);
kfree(mvdev);
return ret;
@@ -629,9 +615,10 @@ out_free:
static void mlx5vf_pci_remove(struct pci_dev *pdev)
{
- struct mlx5vf_pci_core_device *mvdev = dev_get_drvdata(&pdev->dev);
+ struct mlx5vf_pci_core_device *mvdev = mlx5vf_drvdata(pdev);
vfio_pci_core_unregister_device(&mvdev->core_device);
+ mlx5vf_cmd_remove_migratable(mvdev);
vfio_pci_core_uninit_device(&mvdev->core_device);
kfree(mvdev);
}
@@ -654,6 +641,7 @@ static struct pci_driver mlx5vf_pci_driver = {
.probe = mlx5vf_pci_probe,
.remove = mlx5vf_pci_remove,
.err_handler = &mlx5vf_err_handlers,
+ .driver_managed_dma = true,
};
static void __exit mlx5vf_pci_cleanup(void)
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 2b047469e02f..4d1a97415a27 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -151,10 +151,10 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return -ENOMEM;
vfio_pci_core_init_device(vdev, pdev, &vfio_pci_ops);
+ dev_set_drvdata(&pdev->dev, vdev);
ret = vfio_pci_core_register_device(vdev);
if (ret)
goto out_free;
- dev_set_drvdata(&pdev->dev, vdev);
return 0;
out_free:
@@ -174,10 +174,12 @@ static void vfio_pci_remove(struct pci_dev *pdev)
static int vfio_pci_sriov_configure(struct pci_dev *pdev, int nr_virtfn)
{
+ struct vfio_pci_core_device *vdev = dev_get_drvdata(&pdev->dev);
+
if (!enable_sriov)
return -ENOENT;
- return vfio_pci_core_sriov_configure(pdev, nr_virtfn);
+ return vfio_pci_core_sriov_configure(vdev, nr_virtfn);
}
static const struct pci_device_id vfio_pci_table[] = {
@@ -194,6 +196,7 @@ static struct pci_driver vfio_pci_driver = {
.remove = vfio_pci_remove,
.sriov_configure = vfio_pci_sriov_configure,
.err_handler = &vfio_pci_core_err_handlers,
+ .driver_managed_dma = true,
};
static void __init vfio_pci_fill_ids(void)
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
index 6e58b4bf7a60..9343f597182d 100644
--- a/drivers/vfio/pci/vfio_pci_config.c
+++ b/drivers/vfio/pci/vfio_pci_config.c
@@ -402,11 +402,14 @@ bool __vfio_pci_memory_enabled(struct vfio_pci_core_device *vdev)
u16 cmd = le16_to_cpu(*(__le16 *)&vdev->vconfig[PCI_COMMAND]);
/*
+ * Memory region cannot be accessed if device power state is D3.
+ *
* SR-IOV VF memory enable is handled by the MSE bit in the
* PF SR-IOV capability, there's therefore no need to trigger
* faults based on the virtual value.
*/
- return pdev->no_command_memory || (cmd & PCI_COMMAND_MEMORY);
+ return pdev->current_state < PCI_D3hot &&
+ (pdev->no_command_memory || (cmd & PCI_COMMAND_MEMORY));
}
/*
@@ -692,6 +695,22 @@ static int __init init_pci_cap_basic_perm(struct perm_bits *perm)
return 0;
}
+/*
+ * It takes all the required locks to protect the access of power related
+ * variables and then invokes vfio_pci_set_power_state().
+ */
+static void vfio_lock_and_set_power_state(struct vfio_pci_core_device *vdev,
+ pci_power_t state)
+{
+ if (state >= PCI_D3hot)
+ vfio_pci_zap_and_down_write_memory_lock(vdev);
+ else
+ down_write(&vdev->memory_lock);
+
+ vfio_pci_set_power_state(vdev, state);
+ up_write(&vdev->memory_lock);
+}
+
static int vfio_pm_config_write(struct vfio_pci_core_device *vdev, int pos,
int count, struct perm_bits *perm,
int offset, __le32 val)
@@ -718,7 +737,7 @@ static int vfio_pm_config_write(struct vfio_pci_core_device *vdev, int pos,
break;
}
- vfio_pci_set_power_state(vdev, state);
+ vfio_lock_and_set_power_state(vdev, state);
}
return count;
@@ -739,11 +758,28 @@ static int __init init_pci_cap_pm_perm(struct perm_bits *perm)
p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE);
/*
+ * The guests can't process PME events. If any PME event will be
+ * generated, then it will be mostly handled in the host and the
+ * host will clear the PME_STATUS. So virtualize PME_Support bits.
+ * The vconfig bits will be cleared during device capability
+ * initialization.
+ */
+ p_setw(perm, PCI_PM_PMC, PCI_PM_CAP_PME_MASK, NO_WRITE);
+
+ /*
* Power management is defined *per function*, so we can let
* the user change power state, but we trap and initiate the
* change ourselves, so the state bits are read-only.
+ *
+ * The guest can't process PME from D3cold so virtualize PME_Status
+ * and PME_En bits. The vconfig bits will be cleared during device
+ * capability initialization.
*/
- p_setd(perm, PCI_PM_CTRL, NO_VIRT, ~PCI_PM_CTRL_STATE_MASK);
+ p_setd(perm, PCI_PM_CTRL,
+ PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS,
+ ~(PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS |
+ PCI_PM_CTRL_STATE_MASK));
+
return 0;
}
@@ -1412,6 +1448,17 @@ static int vfio_ext_cap_len(struct vfio_pci_core_device *vdev, u16 ecap, u16 epo
return 0;
}
+static void vfio_update_pm_vconfig_bytes(struct vfio_pci_core_device *vdev,
+ int offset)
+{
+ __le16 *pmc = (__le16 *)&vdev->vconfig[offset + PCI_PM_PMC];
+ __le16 *ctrl = (__le16 *)&vdev->vconfig[offset + PCI_PM_CTRL];
+
+ /* Clear vconfig PME_Support, PME_Status, and PME_En bits */
+ *pmc &= ~cpu_to_le16(PCI_PM_CAP_PME_MASK);
+ *ctrl &= ~cpu_to_le16(PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS);
+}
+
static int vfio_fill_vconfig_bytes(struct vfio_pci_core_device *vdev,
int offset, int size)
{
@@ -1535,6 +1582,9 @@ static int vfio_cap_init(struct vfio_pci_core_device *vdev)
if (ret)
return ret;
+ if (cap == PCI_CAP_ID_PM)
+ vfio_update_pm_vconfig_bytes(vdev, pos);
+
prev = &vdev->vconfig[pos + PCI_CAP_LIST_NEXT];
pos = next;
caps++;
diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c
index 06b6f3594a13..a0d69ddaf90d 100644
--- a/drivers/vfio/pci/vfio_pci_core.c
+++ b/drivers/vfio/pci/vfio_pci_core.c
@@ -156,7 +156,7 @@ no_mmap:
}
struct vfio_pci_group_info;
-static bool vfio_pci_dev_set_try_reset(struct vfio_device_set *dev_set);
+static void vfio_pci_dev_set_try_reset(struct vfio_device_set *dev_set);
static int vfio_pci_dev_set_hot_reset(struct vfio_device_set *dev_set,
struct vfio_pci_group_info *groups);
@@ -217,6 +217,10 @@ int vfio_pci_set_power_state(struct vfio_pci_core_device *vdev, pci_power_t stat
bool needs_restore = false, needs_save = false;
int ret;
+ /* Prevent changing power state for PFs with VFs enabled */
+ if (pci_num_vf(pdev) && state > PCI_D0)
+ return -EBUSY;
+
if (vdev->needs_pm_restore) {
if (pdev->current_state < PCI_D3hot && state >= PCI_D3hot) {
pci_save_state(pdev);
@@ -255,6 +259,17 @@ int vfio_pci_set_power_state(struct vfio_pci_core_device *vdev, pci_power_t stat
return ret;
}
+/*
+ * The dev_pm_ops needs to be provided to make pci-driver runtime PM working,
+ * so use structure without any callbacks.
+ *
+ * The pci-driver core runtime PM routines always save the device state
+ * before going into suspended state. If the device is going into low power
+ * state with only with runtime PM ops, then no explicit handling is needed
+ * for the devices which have NoSoftRst-.
+ */
+static const struct dev_pm_ops vfio_pci_core_pm_ops = { };
+
int vfio_pci_core_enable(struct vfio_pci_core_device *vdev)
{
struct pci_dev *pdev = vdev->pdev;
@@ -262,21 +277,23 @@ int vfio_pci_core_enable(struct vfio_pci_core_device *vdev)
u16 cmd;
u8 msix_pos;
- vfio_pci_set_power_state(vdev, PCI_D0);
+ if (!disable_idle_d3) {
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret < 0)
+ return ret;
+ }
/* Don't allow our initial saved state to include busmaster */
pci_clear_master(pdev);
ret = pci_enable_device(pdev);
if (ret)
- return ret;
+ goto out_power;
/* If reset fails because of the device lock, fail this path entirely */
ret = pci_try_reset_function(pdev);
- if (ret == -EAGAIN) {
- pci_disable_device(pdev);
- return ret;
- }
+ if (ret == -EAGAIN)
+ goto out_disable_device;
vdev->reset_works = !ret;
pci_save_state(pdev);
@@ -300,12 +317,8 @@ int vfio_pci_core_enable(struct vfio_pci_core_device *vdev)
}
ret = vfio_config_init(vdev);
- if (ret) {
- kfree(vdev->pci_saved_state);
- vdev->pci_saved_state = NULL;
- pci_disable_device(pdev);
- return ret;
- }
+ if (ret)
+ goto out_free_state;
msix_pos = pdev->msix_cap;
if (msix_pos) {
@@ -326,6 +339,16 @@ int vfio_pci_core_enable(struct vfio_pci_core_device *vdev)
return 0;
+
+out_free_state:
+ kfree(vdev->pci_saved_state);
+ vdev->pci_saved_state = NULL;
+out_disable_device:
+ pci_disable_device(pdev);
+out_power:
+ if (!disable_idle_d3)
+ pm_runtime_put(&pdev->dev);
+ return ret;
}
EXPORT_SYMBOL_GPL(vfio_pci_core_enable);
@@ -433,8 +456,11 @@ void vfio_pci_core_disable(struct vfio_pci_core_device *vdev)
out:
pci_disable_device(pdev);
- if (!vfio_pci_dev_set_try_reset(vdev->vdev.dev_set) && !disable_idle_d3)
- vfio_pci_set_power_state(vdev, PCI_D3hot);
+ vfio_pci_dev_set_try_reset(vdev->vdev.dev_set);
+
+ /* Put the pm-runtime usage counter acquired during enable */
+ if (!disable_idle_d3)
+ pm_runtime_put(&pdev->dev);
}
EXPORT_SYMBOL_GPL(vfio_pci_core_disable);
@@ -556,7 +582,7 @@ static int vfio_pci_fill_devs(struct pci_dev *pdev, void *data)
struct vfio_pci_group_info {
int count;
- struct vfio_group **groups;
+ struct file **files;
};
static bool vfio_pci_dev_below_slot(struct pci_dev *pdev, struct pci_slot *slot)
@@ -1018,10 +1044,10 @@ reset_info_exit:
} else if (cmd == VFIO_DEVICE_PCI_HOT_RESET) {
struct vfio_pci_hot_reset hdr;
int32_t *group_fds;
- struct vfio_group **groups;
+ struct file **files;
struct vfio_pci_group_info info;
bool slot = false;
- int group_idx, count = 0, ret = 0;
+ int file_idx, count = 0, ret = 0;
minsz = offsetofend(struct vfio_pci_hot_reset, count);
@@ -1054,17 +1080,17 @@ reset_info_exit:
return -EINVAL;
group_fds = kcalloc(hdr.count, sizeof(*group_fds), GFP_KERNEL);
- groups = kcalloc(hdr.count, sizeof(*groups), GFP_KERNEL);
- if (!group_fds || !groups) {
+ files = kcalloc(hdr.count, sizeof(*files), GFP_KERNEL);
+ if (!group_fds || !files) {
kfree(group_fds);
- kfree(groups);
+ kfree(files);
return -ENOMEM;
}
if (copy_from_user(group_fds, (void __user *)(arg + minsz),
hdr.count * sizeof(*group_fds))) {
kfree(group_fds);
- kfree(groups);
+ kfree(files);
return -EFAULT;
}
@@ -1073,22 +1099,22 @@ reset_info_exit:
* user interface and store the group and iommu ID. This
* ensures the group is held across the reset.
*/
- for (group_idx = 0; group_idx < hdr.count; group_idx++) {
- struct vfio_group *group;
- struct fd f = fdget(group_fds[group_idx]);
- if (!f.file) {
+ for (file_idx = 0; file_idx < hdr.count; file_idx++) {
+ struct file *file = fget(group_fds[file_idx]);
+
+ if (!file) {
ret = -EBADF;
break;
}
- group = vfio_group_get_external_user(f.file);
- fdput(f);
- if (IS_ERR(group)) {
- ret = PTR_ERR(group);
+ /* Ensure the FD is a vfio group FD.*/
+ if (!vfio_file_iommu_group(file)) {
+ fput(file);
+ ret = -EINVAL;
break;
}
- groups[group_idx] = group;
+ files[file_idx] = file;
}
kfree(group_fds);
@@ -1098,15 +1124,15 @@ reset_info_exit:
goto hot_reset_release;
info.count = hdr.count;
- info.groups = groups;
+ info.files = files;
ret = vfio_pci_dev_set_hot_reset(vdev->vdev.dev_set, &info);
hot_reset_release:
- for (group_idx--; group_idx >= 0; group_idx--)
- vfio_group_put_external_user(groups[group_idx]);
+ for (file_idx--; file_idx >= 0; file_idx--)
+ fput(files[file_idx]);
- kfree(groups);
+ kfree(files);
return ret;
} else if (cmd == VFIO_DEVICE_IOEVENTFD) {
struct vfio_device_ioeventfd ioeventfd;
@@ -1819,8 +1845,13 @@ EXPORT_SYMBOL_GPL(vfio_pci_core_uninit_device);
int vfio_pci_core_register_device(struct vfio_pci_core_device *vdev)
{
struct pci_dev *pdev = vdev->pdev;
+ struct device *dev = &pdev->dev;
int ret;
+ /* Drivers must set the vfio_pci_core_device to their drvdata */
+ if (WARN_ON(vdev != dev_get_drvdata(dev)))
+ return -EINVAL;
+
if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL)
return -EINVAL;
@@ -1860,19 +1891,21 @@ int vfio_pci_core_register_device(struct vfio_pci_core_device *vdev)
vfio_pci_probe_power_state(vdev);
- if (!disable_idle_d3) {
- /*
- * pci-core sets the device power state to an unknown value at
- * bootup and after being removed from a driver. The only
- * transition it allows from this unknown state is to D0, which
- * typically happens when a driver calls pci_enable_device().
- * We're not ready to enable the device yet, but we do want to
- * be able to get to D3. Therefore first do a D0 transition
- * before going to D3.
- */
- vfio_pci_set_power_state(vdev, PCI_D0);
- vfio_pci_set_power_state(vdev, PCI_D3hot);
- }
+ /*
+ * pci-core sets the device power state to an unknown value at
+ * bootup and after being removed from a driver. The only
+ * transition it allows from this unknown state is to D0, which
+ * typically happens when a driver calls pci_enable_device().
+ * We're not ready to enable the device yet, but we do want to
+ * be able to get to D3. Therefore first do a D0 transition
+ * before enabling runtime PM.
+ */
+ vfio_pci_set_power_state(vdev, PCI_D0);
+
+ dev->driver->pm = &vfio_pci_core_pm_ops;
+ pm_runtime_allow(dev);
+ if (!disable_idle_d3)
+ pm_runtime_put(dev);
ret = vfio_register_group_dev(&vdev->vdev);
if (ret)
@@ -1881,7 +1914,9 @@ int vfio_pci_core_register_device(struct vfio_pci_core_device *vdev)
out_power:
if (!disable_idle_d3)
- vfio_pci_set_power_state(vdev, PCI_D0);
+ pm_runtime_get_noresume(dev);
+
+ pm_runtime_forbid(dev);
out_vf:
vfio_pci_vf_uninit(vdev);
return ret;
@@ -1890,9 +1925,7 @@ EXPORT_SYMBOL_GPL(vfio_pci_core_register_device);
void vfio_pci_core_unregister_device(struct vfio_pci_core_device *vdev)
{
- struct pci_dev *pdev = vdev->pdev;
-
- vfio_pci_core_sriov_configure(pdev, 0);
+ vfio_pci_core_sriov_configure(vdev, 0);
vfio_unregister_group_dev(&vdev->vdev);
@@ -1900,21 +1933,16 @@ void vfio_pci_core_unregister_device(struct vfio_pci_core_device *vdev)
vfio_pci_vga_uninit(vdev);
if (!disable_idle_d3)
- vfio_pci_set_power_state(vdev, PCI_D0);
+ pm_runtime_get_noresume(&vdev->pdev->dev);
+
+ pm_runtime_forbid(&vdev->pdev->dev);
}
EXPORT_SYMBOL_GPL(vfio_pci_core_unregister_device);
pci_ers_result_t vfio_pci_core_aer_err_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
- struct vfio_pci_core_device *vdev;
- struct vfio_device *device;
-
- device = vfio_device_get_from_dev(&pdev->dev);
- if (device == NULL)
- return PCI_ERS_RESULT_DISCONNECT;
-
- vdev = container_of(device, struct vfio_pci_core_device, vdev);
+ struct vfio_pci_core_device *vdev = dev_get_drvdata(&pdev->dev);
mutex_lock(&vdev->igate);
@@ -1923,26 +1951,18 @@ pci_ers_result_t vfio_pci_core_aer_err_detected(struct pci_dev *pdev,
mutex_unlock(&vdev->igate);
- vfio_device_put(device);
-
return PCI_ERS_RESULT_CAN_RECOVER;
}
EXPORT_SYMBOL_GPL(vfio_pci_core_aer_err_detected);
-int vfio_pci_core_sriov_configure(struct pci_dev *pdev, int nr_virtfn)
+int vfio_pci_core_sriov_configure(struct vfio_pci_core_device *vdev,
+ int nr_virtfn)
{
- struct vfio_pci_core_device *vdev;
- struct vfio_device *device;
+ struct pci_dev *pdev = vdev->pdev;
int ret = 0;
device_lock_assert(&pdev->dev);
- device = vfio_device_get_from_dev(&pdev->dev);
- if (!device)
- return -ENODEV;
-
- vdev = container_of(device, struct vfio_pci_core_device, vdev);
-
if (nr_virtfn) {
mutex_lock(&vfio_pci_sriov_pfs_mutex);
/*
@@ -1957,22 +1977,42 @@ int vfio_pci_core_sriov_configure(struct pci_dev *pdev, int nr_virtfn)
}
list_add_tail(&vdev->sriov_pfs_item, &vfio_pci_sriov_pfs);
mutex_unlock(&vfio_pci_sriov_pfs_mutex);
- ret = pci_enable_sriov(pdev, nr_virtfn);
+
+ /*
+ * The PF power state should always be higher than the VF power
+ * state. The PF can be in low power state either with runtime
+ * power management (when there is no user) or PCI_PM_CTRL
+ * register write by the user. If PF is in the low power state,
+ * then change the power state to D0 first before enabling
+ * SR-IOV. Also, this function can be called at any time, and
+ * userspace PCI_PM_CTRL write can race against this code path,
+ * so protect the same with 'memory_lock'.
+ */
+ ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret)
goto out_del;
- ret = nr_virtfn;
- goto out_put;
+
+ down_write(&vdev->memory_lock);
+ vfio_pci_set_power_state(vdev, PCI_D0);
+ ret = pci_enable_sriov(pdev, nr_virtfn);
+ up_write(&vdev->memory_lock);
+ if (ret) {
+ pm_runtime_put(&pdev->dev);
+ goto out_del;
+ }
+ return nr_virtfn;
}
- pci_disable_sriov(pdev);
+ if (pci_num_vf(pdev)) {
+ pci_disable_sriov(pdev);
+ pm_runtime_put(&pdev->dev);
+ }
out_del:
mutex_lock(&vfio_pci_sriov_pfs_mutex);
list_del_init(&vdev->sriov_pfs_item);
out_unlock:
mutex_unlock(&vfio_pci_sriov_pfs_mutex);
-out_put:
- vfio_device_put(device);
return ret;
}
EXPORT_SYMBOL_GPL(vfio_pci_core_sriov_configure);
@@ -1988,7 +2028,7 @@ static bool vfio_dev_in_groups(struct vfio_pci_core_device *vdev,
unsigned int i;
for (i = 0; i < groups->count; i++)
- if (groups->groups[i] == vdev->vdev.group)
+ if (vfio_file_has_dev(groups->files[i], &vdev->vdev))
return true;
return false;
}
@@ -2041,6 +2081,27 @@ vfio_pci_dev_set_resettable(struct vfio_device_set *dev_set)
return pdev;
}
+static int vfio_pci_dev_set_pm_runtime_get(struct vfio_device_set *dev_set)
+{
+ struct vfio_pci_core_device *cur;
+ int ret;
+
+ list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list) {
+ ret = pm_runtime_resume_and_get(&cur->pdev->dev);
+ if (ret)
+ goto unwind;
+ }
+
+ return 0;
+
+unwind:
+ list_for_each_entry_continue_reverse(cur, &dev_set->device_list,
+ vdev.dev_set_list)
+ pm_runtime_put(&cur->pdev->dev);
+
+ return ret;
+}
+
/*
* We need to get memory_lock for each device, but devices can share mmap_lock,
* therefore we need to zap and hold the vma_lock for each device, and only then
@@ -2147,43 +2208,38 @@ static bool vfio_pci_dev_set_needs_reset(struct vfio_device_set *dev_set)
* - At least one of the affected devices is marked dirty via
* needs_reset (such as by lack of FLR support)
* Then attempt to perform that bus or slot reset.
- * Returns true if the dev_set was reset.
*/
-static bool vfio_pci_dev_set_try_reset(struct vfio_device_set *dev_set)
+static void vfio_pci_dev_set_try_reset(struct vfio_device_set *dev_set)
{
struct vfio_pci_core_device *cur;
struct pci_dev *pdev;
- int ret;
+ bool reset_done = false;
if (!vfio_pci_dev_set_needs_reset(dev_set))
- return false;
+ return;
pdev = vfio_pci_dev_set_resettable(dev_set);
if (!pdev)
- return false;
+ return;
/*
- * The pci_reset_bus() will reset all the devices in the bus.
- * The power state can be non-D0 for some of the devices in the bus.
- * For these devices, the pci_reset_bus() will internally set
- * the power state to D0 without vfio driver involvement.
- * For the devices which have NoSoftRst-, the reset function can
- * cause the PCI config space reset without restoring the original
- * state (saved locally in 'vdev->pm_save').
+ * Some of the devices in the bus can be in the runtime suspended
+ * state. Increment the usage count for all the devices in the dev_set
+ * before reset and decrement the same after reset.
*/
- list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list)
- vfio_pci_set_power_state(cur, PCI_D0);
+ if (!disable_idle_d3 && vfio_pci_dev_set_pm_runtime_get(dev_set))
+ return;
- ret = pci_reset_bus(pdev);
- if (ret)
- return false;
+ if (!pci_reset_bus(pdev))
+ reset_done = true;
list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list) {
- cur->needs_reset = false;
+ if (reset_done)
+ cur->needs_reset = false;
+
if (!disable_idle_d3)
- vfio_pci_set_power_state(cur, PCI_D3hot);
+ pm_runtime_put(&cur->pdev->dev);
}
- return true;
}
void vfio_pci_core_set_params(bool is_nointxmask, bool is_disable_vga,
diff --git a/drivers/vfio/platform/vfio_amba.c b/drivers/vfio/platform/vfio_amba.c
index badfffea14fb..1aaa4f721bd2 100644
--- a/drivers/vfio/platform/vfio_amba.c
+++ b/drivers/vfio/platform/vfio_amba.c
@@ -95,6 +95,7 @@ static struct amba_driver vfio_amba_driver = {
.name = "vfio-amba",
.owner = THIS_MODULE,
},
+ .driver_managed_dma = true,
};
module_amba_driver(vfio_amba_driver);
diff --git a/drivers/vfio/platform/vfio_platform.c b/drivers/vfio/platform/vfio_platform.c
index 68a1c87066d7..04f40c5acfd6 100644
--- a/drivers/vfio/platform/vfio_platform.c
+++ b/drivers/vfio/platform/vfio_platform.c
@@ -76,6 +76,7 @@ static struct platform_driver vfio_platform_driver = {
.driver = {
.name = "vfio-platform",
},
+ .driver_managed_dma = true,
};
module_platform_driver(vfio_platform_driver);
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index a4555014bd1e..61e71c1154be 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -62,30 +62,22 @@ struct vfio_container {
bool noiommu;
};
-struct vfio_unbound_dev {
- struct device *dev;
- struct list_head unbound_next;
-};
-
struct vfio_group {
struct device dev;
struct cdev cdev;
refcount_t users;
- atomic_t container_users;
+ unsigned int container_users;
struct iommu_group *iommu_group;
struct vfio_container *container;
struct list_head device_list;
struct mutex device_lock;
- struct notifier_block nb;
struct list_head vfio_next;
struct list_head container_next;
- struct list_head unbound_list;
- struct mutex unbound_lock;
- atomic_t opened;
- wait_queue_head_t container_q;
enum vfio_group_type type;
unsigned int dev_counter;
+ struct rw_semaphore group_rwsem;
struct kvm *kvm;
+ struct file *opened_file;
struct blocking_notifier_head notifier;
};
@@ -281,8 +273,6 @@ void vfio_unregister_iommu_driver(const struct vfio_iommu_driver_ops *ops)
}
EXPORT_SYMBOL_GPL(vfio_unregister_iommu_driver);
-static int vfio_iommu_group_notifier(struct notifier_block *nb,
- unsigned long action, void *data);
static void vfio_group_get(struct vfio_group *group);
/*
@@ -340,16 +330,8 @@ vfio_group_get_from_iommu(struct iommu_group *iommu_group)
static void vfio_group_release(struct device *dev)
{
struct vfio_group *group = container_of(dev, struct vfio_group, dev);
- struct vfio_unbound_dev *unbound, *tmp;
-
- list_for_each_entry_safe(unbound, tmp,
- &group->unbound_list, unbound_next) {
- list_del(&unbound->unbound_next);
- kfree(unbound);
- }
mutex_destroy(&group->device_lock);
- mutex_destroy(&group->unbound_lock);
iommu_group_put(group->iommu_group);
ida_free(&vfio.group_ida, MINOR(group->dev.devt));
kfree(group);
@@ -379,11 +361,9 @@ static struct vfio_group *vfio_group_alloc(struct iommu_group *iommu_group,
group->cdev.owner = THIS_MODULE;
refcount_set(&group->users, 1);
+ init_rwsem(&group->group_rwsem);
INIT_LIST_HEAD(&group->device_list);
mutex_init(&group->device_lock);
- INIT_LIST_HEAD(&group->unbound_list);
- mutex_init(&group->unbound_lock);
- init_waitqueue_head(&group->container_q);
group->iommu_group = iommu_group;
/* put in vfio_group_release() */
iommu_group_ref_get(iommu_group);
@@ -412,13 +392,6 @@ static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group,
goto err_put;
}
- group->nb.notifier_call = vfio_iommu_group_notifier;
- err = iommu_group_register_notifier(iommu_group, &group->nb);
- if (err) {
- ret = ERR_PTR(err);
- goto err_put;
- }
-
mutex_lock(&vfio.group_lock);
/* Did we race creating this group? */
@@ -439,7 +412,6 @@ static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group,
err_unlock:
mutex_unlock(&vfio.group_lock);
- iommu_group_unregister_notifier(group->iommu_group, &group->nb);
err_put:
put_device(&group->dev);
return ret;
@@ -457,14 +429,13 @@ static void vfio_group_put(struct vfio_group *group)
* properly hold the group reference.
*/
WARN_ON(!list_empty(&group->device_list));
- WARN_ON(atomic_read(&group->container_users));
+ WARN_ON(group->container || group->container_users);
WARN_ON(group->notifier.head);
list_del(&group->vfio_next);
cdev_device_del(&group->cdev, &group->dev);
mutex_unlock(&vfio.group_lock);
- iommu_group_unregister_notifier(group->iommu_group, &group->nb);
put_device(&group->dev);
}
@@ -473,31 +444,15 @@ static void vfio_group_get(struct vfio_group *group)
refcount_inc(&group->users);
}
-static struct vfio_group *vfio_group_get_from_dev(struct device *dev)
-{
- struct iommu_group *iommu_group;
- struct vfio_group *group;
-
- iommu_group = iommu_group_get(dev);
- if (!iommu_group)
- return NULL;
-
- group = vfio_group_get_from_iommu(iommu_group);
- iommu_group_put(iommu_group);
-
- return group;
-}
-
/*
* Device objects - create, release, get, put, search
*/
/* Device reference always implies a group reference */
-void vfio_device_put(struct vfio_device *device)
+static void vfio_device_put(struct vfio_device *device)
{
if (refcount_dec_and_test(&device->refcount))
complete(&device->comp);
}
-EXPORT_SYMBOL_GPL(vfio_device_put);
static bool vfio_device_try_get(struct vfio_device *device)
{
@@ -521,175 +476,6 @@ static struct vfio_device *vfio_group_get_device(struct vfio_group *group,
}
/*
- * Some drivers, like pci-stub, are only used to prevent other drivers from
- * claiming a device and are therefore perfectly legitimate for a user owned
- * group. The pci-stub driver has no dependencies on DMA or the IOVA mapping
- * of the device, but it does prevent the user from having direct access to
- * the device, which is useful in some circumstances.
- *
- * We also assume that we can include PCI interconnect devices, ie. bridges.
- * IOMMU grouping on PCI necessitates that if we lack isolation on a bridge
- * then all of the downstream devices will be part of the same IOMMU group as
- * the bridge. Thus, if placing the bridge into the user owned IOVA space
- * breaks anything, it only does so for user owned devices downstream. Note
- * that error notification via MSI can be affected for platforms that handle
- * MSI within the same IOVA space as DMA.
- */
-static const char * const vfio_driver_allowed[] = { "pci-stub" };
-
-static bool vfio_dev_driver_allowed(struct device *dev,
- struct device_driver *drv)
-{
- if (dev_is_pci(dev)) {
- struct pci_dev *pdev = to_pci_dev(dev);
-
- if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL)
- return true;
- }
-
- return match_string(vfio_driver_allowed,
- ARRAY_SIZE(vfio_driver_allowed),
- drv->name) >= 0;
-}
-
-/*
- * A vfio group is viable for use by userspace if all devices are in
- * one of the following states:
- * - driver-less
- * - bound to a vfio driver
- * - bound to an otherwise allowed driver
- * - a PCI interconnect device
- *
- * We use two methods to determine whether a device is bound to a vfio
- * driver. The first is to test whether the device exists in the vfio
- * group. The second is to test if the device exists on the group
- * unbound_list, indicating it's in the middle of transitioning from
- * a vfio driver to driver-less.
- */
-static int vfio_dev_viable(struct device *dev, void *data)
-{
- struct vfio_group *group = data;
- struct vfio_device *device;
- struct device_driver *drv = READ_ONCE(dev->driver);
- struct vfio_unbound_dev *unbound;
- int ret = -EINVAL;
-
- mutex_lock(&group->unbound_lock);
- list_for_each_entry(unbound, &group->unbound_list, unbound_next) {
- if (dev == unbound->dev) {
- ret = 0;
- break;
- }
- }
- mutex_unlock(&group->unbound_lock);
-
- if (!ret || !drv || vfio_dev_driver_allowed(dev, drv))
- return 0;
-
- device = vfio_group_get_device(group, dev);
- if (device) {
- vfio_device_put(device);
- return 0;
- }
-
- return ret;
-}
-
-/*
- * Async device support
- */
-static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev)
-{
- struct vfio_device *device;
-
- /* Do we already know about it? We shouldn't */
- device = vfio_group_get_device(group, dev);
- if (WARN_ON_ONCE(device)) {
- vfio_device_put(device);
- return 0;
- }
-
- /* Nothing to do for idle groups */
- if (!atomic_read(&group->container_users))
- return 0;
-
- /* TODO Prevent device auto probing */
- dev_WARN(dev, "Device added to live group %d!\n",
- iommu_group_id(group->iommu_group));
-
- return 0;
-}
-
-static int vfio_group_nb_verify(struct vfio_group *group, struct device *dev)
-{
- /* We don't care what happens when the group isn't in use */
- if (!atomic_read(&group->container_users))
- return 0;
-
- return vfio_dev_viable(dev, group);
-}
-
-static int vfio_iommu_group_notifier(struct notifier_block *nb,
- unsigned long action, void *data)
-{
- struct vfio_group *group = container_of(nb, struct vfio_group, nb);
- struct device *dev = data;
- struct vfio_unbound_dev *unbound;
-
- switch (action) {
- case IOMMU_GROUP_NOTIFY_ADD_DEVICE:
- vfio_group_nb_add_dev(group, dev);
- break;
- case IOMMU_GROUP_NOTIFY_DEL_DEVICE:
- /*
- * Nothing to do here. If the device is in use, then the
- * vfio sub-driver should block the remove callback until
- * it is unused. If the device is unused or attached to a
- * stub driver, then it should be released and we don't
- * care that it will be going away.
- */
- break;
- case IOMMU_GROUP_NOTIFY_BIND_DRIVER:
- dev_dbg(dev, "%s: group %d binding to driver\n", __func__,
- iommu_group_id(group->iommu_group));
- break;
- case IOMMU_GROUP_NOTIFY_BOUND_DRIVER:
- dev_dbg(dev, "%s: group %d bound to driver %s\n", __func__,
- iommu_group_id(group->iommu_group), dev->driver->name);
- BUG_ON(vfio_group_nb_verify(group, dev));
- break;
- case IOMMU_GROUP_NOTIFY_UNBIND_DRIVER:
- dev_dbg(dev, "%s: group %d unbinding from driver %s\n",
- __func__, iommu_group_id(group->iommu_group),
- dev->driver->name);
- break;
- case IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER:
- dev_dbg(dev, "%s: group %d unbound from driver\n", __func__,
- iommu_group_id(group->iommu_group));
- /*
- * XXX An unbound device in a live group is ok, but we'd
- * really like to avoid the above BUG_ON by preventing other
- * drivers from binding to it. Once that occurs, we have to
- * stop the system to maintain isolation. At a minimum, we'd
- * want a toggle to disable driver auto probe for this device.
- */
-
- mutex_lock(&group->unbound_lock);
- list_for_each_entry(unbound,
- &group->unbound_list, unbound_next) {
- if (dev == unbound->dev) {
- list_del(&unbound->unbound_next);
- kfree(unbound);
- break;
- }
- }
- mutex_unlock(&group->unbound_lock);
- break;
- }
- return NOTIFY_OK;
-}
-
-/*
* VFIO driver API
*/
void vfio_init_group_dev(struct vfio_device *device, struct device *dev,
@@ -745,11 +531,11 @@ static struct vfio_group *vfio_group_find_or_alloc(struct device *dev)
iommu_group = iommu_group_get(dev);
#ifdef CONFIG_VFIO_NOIOMMU
- if (!iommu_group && noiommu && !iommu_present(dev->bus)) {
+ if (!iommu_group && noiommu) {
/*
* With noiommu enabled, create an IOMMU group for devices that
- * don't already have one and don't have an iommu_ops on their
- * bus. Taint the kernel because we're about to give a DMA
+ * don't already have one, implying no IOMMU hardware/driver
+ * exists. Taint the kernel because we're about to give a DMA
* capable device to a user without IOMMU protection.
*/
group = vfio_noiommu_group_alloc(dev, VFIO_NO_IOMMU);
@@ -815,6 +601,13 @@ static int __vfio_register_dev(struct vfio_device *device,
int vfio_register_group_dev(struct vfio_device *device)
{
+ /*
+ * VFIO always sets IOMMU_CACHE because we offer no way for userspace to
+ * restore cache coherency.
+ */
+ if (!iommu_capable(device->dev->bus, IOMMU_CAP_CACHE_COHERENCY))
+ return -EINVAL;
+
return __vfio_register_dev(device,
vfio_group_find_or_alloc(device->dev));
}
@@ -831,29 +624,6 @@ int vfio_register_emulated_iommu_dev(struct vfio_device *device)
}
EXPORT_SYMBOL_GPL(vfio_register_emulated_iommu_dev);
-/*
- * Get a reference to the vfio_device for a device. Even if the
- * caller thinks they own the device, they could be racing with a
- * release call path, so we can't trust drvdata for the shortcut.
- * Go the long way around, from the iommu_group to the vfio_group
- * to the vfio_device.
- */
-struct vfio_device *vfio_device_get_from_dev(struct device *dev)
-{
- struct vfio_group *group;
- struct vfio_device *device;
-
- group = vfio_group_get_from_dev(dev);
- if (!group)
- return NULL;
-
- device = vfio_group_get_device(group, dev);
- vfio_group_put(group);
-
- return device;
-}
-EXPORT_SYMBOL_GPL(vfio_device_get_from_dev);
-
static struct vfio_device *vfio_device_get_from_name(struct vfio_group *group,
char *buf)
{
@@ -889,29 +659,10 @@ static struct vfio_device *vfio_device_get_from_name(struct vfio_group *group,
void vfio_unregister_group_dev(struct vfio_device *device)
{
struct vfio_group *group = device->group;
- struct vfio_unbound_dev *unbound;
unsigned int i = 0;
bool interrupted = false;
long rc;
- /*
- * When the device is removed from the group, the group suddenly
- * becomes non-viable; the device has a driver (until the unbind
- * completes), but it's not present in the group. This is bad news
- * for any external users that need to re-acquire a group reference
- * in order to match and release their existing reference. To
- * solve this, we track such devices on the unbound_list to bridge
- * the gap until they're fully unbound.
- */
- unbound = kzalloc(sizeof(*unbound), GFP_KERNEL);
- if (unbound) {
- unbound->dev = device->dev;
- mutex_lock(&group->unbound_lock);
- list_add(&unbound->unbound_next, &group->unbound_list);
- mutex_unlock(&group->unbound_lock);
- }
- WARN_ON(!unbound);
-
vfio_device_put(device);
rc = try_wait_for_completion(&device->comp);
while (rc <= 0) {
@@ -940,23 +691,6 @@ void vfio_unregister_group_dev(struct vfio_device *device)
group->dev_counter--;
mutex_unlock(&group->device_lock);
- /*
- * In order to support multiple devices per group, devices can be
- * plucked from the group while other devices in the group are still
- * in use. The container persists with this group and those remaining
- * devices still attached. If the user creates an isolation violation
- * by binding this device to another driver while the group is still in
- * use, that's their fault. However, in the case of removing the last,
- * or potentially the only, device in the group there can be no other
- * in-use devices in the group. The user has done their due diligence
- * and we should lay no claims to those devices. In order to do that,
- * we need to make sure the group is detached from the container.
- * Without this stall, we're potentially racing with a user process
- * that may attempt to immediately bind this device to another driver.
- */
- if (list_empty(&group->device_list))
- wait_event(group->container_q, !group->container);
-
if (group->type == VFIO_NO_IOMMU || group->type == VFIO_EMULATED_IOMMU)
iommu_group_remove_device(device->dev);
@@ -1191,6 +925,8 @@ static void __vfio_group_unset_container(struct vfio_group *group)
struct vfio_container *container = group->container;
struct vfio_iommu_driver *driver;
+ lockdep_assert_held_write(&group->group_rwsem);
+
down_write(&container->group_lock);
driver = container->iommu_driver;
@@ -1198,8 +934,11 @@ static void __vfio_group_unset_container(struct vfio_group *group)
driver->ops->detach_group(container->iommu_data,
group->iommu_group);
+ if (group->type == VFIO_IOMMU)
+ iommu_group_release_dma_owner(group->iommu_group);
+
group->container = NULL;
- wake_up(&group->container_q);
+ group->container_users = 0;
list_del(&group->container_next);
/* Detaching the last group deprivileges a container, remove iommu */
@@ -1223,30 +962,16 @@ static void __vfio_group_unset_container(struct vfio_group *group)
*/
static int vfio_group_unset_container(struct vfio_group *group)
{
- int users = atomic_cmpxchg(&group->container_users, 1, 0);
+ lockdep_assert_held_write(&group->group_rwsem);
- if (!users)
+ if (!group->container)
return -EINVAL;
- if (users != 1)
+ if (group->container_users != 1)
return -EBUSY;
-
__vfio_group_unset_container(group);
-
return 0;
}
-/*
- * When removing container users, anything that removes the last user
- * implicitly removes the group from the container. That is, if the
- * group file descriptor is closed, as well as any device file descriptors,
- * the group is free.
- */
-static void vfio_group_try_dissolve_container(struct vfio_group *group)
-{
- if (0 == atomic_dec_if_positive(&group->container_users))
- __vfio_group_unset_container(group);
-}
-
static int vfio_group_set_container(struct vfio_group *group, int container_fd)
{
struct fd f;
@@ -1254,7 +979,9 @@ static int vfio_group_set_container(struct vfio_group *group, int container_fd)
struct vfio_iommu_driver *driver;
int ret = 0;
- if (atomic_read(&group->container_users))
+ lockdep_assert_held_write(&group->group_rwsem);
+
+ if (group->container || WARN_ON(group->container_users))
return -EINVAL;
if (group->type == VFIO_NO_IOMMU && !capable(CAP_SYS_RAWIO))
@@ -1282,22 +1009,32 @@ static int vfio_group_set_container(struct vfio_group *group, int container_fd)
goto unlock_out;
}
+ if (group->type == VFIO_IOMMU) {
+ ret = iommu_group_claim_dma_owner(group->iommu_group, f.file);
+ if (ret)
+ goto unlock_out;
+ }
+
driver = container->iommu_driver;
if (driver) {
ret = driver->ops->attach_group(container->iommu_data,
group->iommu_group,
group->type);
- if (ret)
+ if (ret) {
+ if (group->type == VFIO_IOMMU)
+ iommu_group_release_dma_owner(
+ group->iommu_group);
goto unlock_out;
+ }
}
group->container = container;
+ group->container_users = 1;
container->noiommu = (group->type == VFIO_NO_IOMMU);
list_add(&group->container_next, &container->group_list);
/* Get a reference on the container and mark a user within the group */
vfio_container_get(container);
- atomic_inc(&group->container_users);
unlock_out:
up_write(&container->group_lock);
@@ -1305,60 +1042,74 @@ unlock_out:
return ret;
}
-static bool vfio_group_viable(struct vfio_group *group)
+static const struct file_operations vfio_device_fops;
+
+/* true if the vfio_device has open_device() called but not close_device() */
+static bool vfio_assert_device_open(struct vfio_device *device)
{
- return (iommu_group_for_each_dev(group->iommu_group,
- group, vfio_dev_viable) == 0);
+ return !WARN_ON_ONCE(!READ_ONCE(device->open_count));
}
-static int vfio_group_add_container_user(struct vfio_group *group)
+static int vfio_device_assign_container(struct vfio_device *device)
{
- if (!atomic_inc_not_zero(&group->container_users))
+ struct vfio_group *group = device->group;
+
+ lockdep_assert_held_write(&group->group_rwsem);
+
+ if (!group->container || !group->container->iommu_driver ||
+ WARN_ON(!group->container_users))
return -EINVAL;
- if (group->type == VFIO_NO_IOMMU) {
- atomic_dec(&group->container_users);
+ if (group->type == VFIO_NO_IOMMU && !capable(CAP_SYS_RAWIO))
return -EPERM;
- }
- if (!group->container->iommu_driver || !vfio_group_viable(group)) {
- atomic_dec(&group->container_users);
- return -EINVAL;
- }
+ get_file(group->opened_file);
+ group->container_users++;
return 0;
}
-static const struct file_operations vfio_device_fops;
+static void vfio_device_unassign_container(struct vfio_device *device)
+{
+ down_write(&device->group->group_rwsem);
+ WARN_ON(device->group->container_users <= 1);
+ device->group->container_users--;
+ fput(device->group->opened_file);
+ up_write(&device->group->group_rwsem);
+}
-static int vfio_group_get_device_fd(struct vfio_group *group, char *buf)
+static struct file *vfio_device_open(struct vfio_device *device)
{
- struct vfio_device *device;
struct file *filep;
- int fdno;
- int ret = 0;
-
- if (0 == atomic_read(&group->container_users) ||
- !group->container->iommu_driver || !vfio_group_viable(group))
- return -EINVAL;
-
- if (group->type == VFIO_NO_IOMMU && !capable(CAP_SYS_RAWIO))
- return -EPERM;
+ int ret;
- device = vfio_device_get_from_name(group, buf);
- if (IS_ERR(device))
- return PTR_ERR(device);
+ down_write(&device->group->group_rwsem);
+ ret = vfio_device_assign_container(device);
+ up_write(&device->group->group_rwsem);
+ if (ret)
+ return ERR_PTR(ret);
if (!try_module_get(device->dev->driver->owner)) {
ret = -ENODEV;
- goto err_device_put;
+ goto err_unassign_container;
}
mutex_lock(&device->dev_set->lock);
device->open_count++;
- if (device->open_count == 1 && device->ops->open_device) {
- ret = device->ops->open_device(device);
- if (ret)
- goto err_undo_count;
+ if (device->open_count == 1) {
+ /*
+ * Here we pass the KVM pointer with the group under the read
+ * lock. If the device driver will use it, it must obtain a
+ * reference and release it during close_device.
+ */
+ down_read(&device->group->group_rwsem);
+ device->kvm = device->group->kvm;
+
+ if (device->ops->open_device) {
+ ret = device->ops->open_device(device);
+ if (ret)
+ goto err_undo_count;
+ }
+ up_read(&device->group->group_rwsem);
}
mutex_unlock(&device->dev_set->lock);
@@ -1366,15 +1117,11 @@ static int vfio_group_get_device_fd(struct vfio_group *group, char *buf)
* We can't use anon_inode_getfd() because we need to modify
* the f_mode flags directly to allow more than just ioctls
*/
- fdno = ret = get_unused_fd_flags(O_CLOEXEC);
- if (ret < 0)
- goto err_close_device;
-
filep = anon_inode_getfile("[vfio-device]", &vfio_device_fops,
device, O_RDWR);
if (IS_ERR(filep)) {
ret = PTR_ERR(filep);
- goto err_fd;
+ goto err_close_device;
}
/*
@@ -1384,26 +1131,61 @@ static int vfio_group_get_device_fd(struct vfio_group *group, char *buf)
*/
filep->f_mode |= (FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
- atomic_inc(&group->container_users);
-
- fd_install(fdno, filep);
-
- if (group->type == VFIO_NO_IOMMU)
+ if (device->group->type == VFIO_NO_IOMMU)
dev_warn(device->dev, "vfio-noiommu device opened by user "
"(%s:%d)\n", current->comm, task_pid_nr(current));
- return fdno;
+ /*
+ * On success the ref of device is moved to the file and
+ * put in vfio_device_fops_release()
+ */
+ return filep;
-err_fd:
- put_unused_fd(fdno);
err_close_device:
mutex_lock(&device->dev_set->lock);
+ down_read(&device->group->group_rwsem);
if (device->open_count == 1 && device->ops->close_device)
device->ops->close_device(device);
err_undo_count:
device->open_count--;
+ if (device->open_count == 0 && device->kvm)
+ device->kvm = NULL;
+ up_read(&device->group->group_rwsem);
mutex_unlock(&device->dev_set->lock);
module_put(device->dev->driver->owner);
-err_device_put:
+err_unassign_container:
+ vfio_device_unassign_container(device);
+ return ERR_PTR(ret);
+}
+
+static int vfio_group_get_device_fd(struct vfio_group *group, char *buf)
+{
+ struct vfio_device *device;
+ struct file *filep;
+ int fdno;
+ int ret;
+
+ device = vfio_device_get_from_name(group, buf);
+ if (IS_ERR(device))
+ return PTR_ERR(device);
+
+ fdno = get_unused_fd_flags(O_CLOEXEC);
+ if (fdno < 0) {
+ ret = fdno;
+ goto err_put_device;
+ }
+
+ filep = vfio_device_open(device);
+ if (IS_ERR(filep)) {
+ ret = PTR_ERR(filep);
+ goto err_put_fdno;
+ }
+
+ fd_install(fdno, filep);
+ return fdno;
+
+err_put_fdno:
+ put_unused_fd(fdno);
+err_put_device:
vfio_device_put(device);
return ret;
}
@@ -1430,11 +1212,13 @@ static long vfio_group_fops_unl_ioctl(struct file *filep,
status.flags = 0;
- if (vfio_group_viable(group))
- status.flags |= VFIO_GROUP_FLAGS_VIABLE;
-
+ down_read(&group->group_rwsem);
if (group->container)
- status.flags |= VFIO_GROUP_FLAGS_CONTAINER_SET;
+ status.flags |= VFIO_GROUP_FLAGS_CONTAINER_SET |
+ VFIO_GROUP_FLAGS_VIABLE;
+ else if (!iommu_group_dma_owner_claimed(group->iommu_group))
+ status.flags |= VFIO_GROUP_FLAGS_VIABLE;
+ up_read(&group->group_rwsem);
if (copy_to_user((void __user *)arg, &status, minsz))
return -EFAULT;
@@ -1452,11 +1236,15 @@ static long vfio_group_fops_unl_ioctl(struct file *filep,
if (fd < 0)
return -EINVAL;
+ down_write(&group->group_rwsem);
ret = vfio_group_set_container(group, fd);
+ up_write(&group->group_rwsem);
break;
}
case VFIO_GROUP_UNSET_CONTAINER:
+ down_write(&group->group_rwsem);
ret = vfio_group_unset_container(group);
+ up_write(&group->group_rwsem);
break;
case VFIO_GROUP_GET_DEVICE_FD:
{
@@ -1479,38 +1267,38 @@ static int vfio_group_fops_open(struct inode *inode, struct file *filep)
{
struct vfio_group *group =
container_of(inode->i_cdev, struct vfio_group, cdev);
- int opened;
+ int ret;
- /* users can be zero if this races with vfio_group_put() */
- if (!refcount_inc_not_zero(&group->users))
- return -ENODEV;
+ down_write(&group->group_rwsem);
- if (group->type == VFIO_NO_IOMMU && !capable(CAP_SYS_RAWIO)) {
- vfio_group_put(group);
- return -EPERM;
+ /* users can be zero if this races with vfio_group_put() */
+ if (!refcount_inc_not_zero(&group->users)) {
+ ret = -ENODEV;
+ goto err_unlock;
}
- /* Do we need multiple instances of the group open? Seems not. */
- opened = atomic_cmpxchg(&group->opened, 0, 1);
- if (opened) {
- vfio_group_put(group);
- return -EBUSY;
+ if (group->type == VFIO_NO_IOMMU && !capable(CAP_SYS_RAWIO)) {
+ ret = -EPERM;
+ goto err_put;
}
- /* Is something still in use from a previous open? */
- if (group->container) {
- atomic_dec(&group->opened);
- vfio_group_put(group);
- return -EBUSY;
+ /*
+ * Do we need multiple instances of the group open? Seems not.
+ */
+ if (group->opened_file) {
+ ret = -EBUSY;
+ goto err_put;
}
-
- /* Warn if previous user didn't cleanup and re-init to drop them */
- if (WARN_ON(group->notifier.head))
- BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
-
+ group->opened_file = filep;
filep->private_data = group;
+ up_write(&group->group_rwsem);
return 0;
+err_put:
+ vfio_group_put(group);
+err_unlock:
+ up_write(&group->group_rwsem);
+ return ret;
}
static int vfio_group_fops_release(struct inode *inode, struct file *filep)
@@ -1519,9 +1307,18 @@ static int vfio_group_fops_release(struct inode *inode, struct file *filep)
filep->private_data = NULL;
- vfio_group_try_dissolve_container(group);
-
- atomic_dec(&group->opened);
+ down_write(&group->group_rwsem);
+ /*
+ * Device FDs hold a group file reference, therefore the group release
+ * is only called when there are no open devices.
+ */
+ WARN_ON(group->notifier.head);
+ if (group->container) {
+ WARN_ON(group->container_users != 1);
+ __vfio_group_unset_container(group);
+ }
+ group->opened_file = NULL;
+ up_write(&group->group_rwsem);
vfio_group_put(group);
@@ -1544,13 +1341,19 @@ static int vfio_device_fops_release(struct inode *inode, struct file *filep)
struct vfio_device *device = filep->private_data;
mutex_lock(&device->dev_set->lock);
- if (!--device->open_count && device->ops->close_device)
+ vfio_assert_device_open(device);
+ down_read(&device->group->group_rwsem);
+ if (device->open_count == 1 && device->ops->close_device)
device->ops->close_device(device);
+ up_read(&device->group->group_rwsem);
+ device->open_count--;
+ if (device->open_count == 0)
+ device->kvm = NULL;
mutex_unlock(&device->dev_set->lock);
module_put(device->dev->driver->owner);
- vfio_group_try_dissolve_container(device->group);
+ vfio_device_unassign_container(device);
vfio_device_put(device);
@@ -1899,119 +1702,94 @@ static const struct file_operations vfio_device_fops = {
.mmap = vfio_device_fops_mmap,
};
-/*
- * External user API, exported by symbols to be linked dynamically.
- *
- * The protocol includes:
- * 1. do normal VFIO init operation:
- * - opening a new container;
- * - attaching group(s) to it;
- * - setting an IOMMU driver for a container.
- * When IOMMU is set for a container, all groups in it are
- * considered ready to use by an external user.
- *
- * 2. User space passes a group fd to an external user.
- * The external user calls vfio_group_get_external_user()
- * to verify that:
- * - the group is initialized;
- * - IOMMU is set for it.
- * If both checks passed, vfio_group_get_external_user()
- * increments the container user counter to prevent
- * the VFIO group from disposal before KVM exits.
+/**
+ * vfio_file_iommu_group - Return the struct iommu_group for the vfio group file
+ * @file: VFIO group file
*
- * 3. The external user calls vfio_external_user_iommu_id()
- * to know an IOMMU ID.
- *
- * 4. When the external KVM finishes, it calls
- * vfio_group_put_external_user() to release the VFIO group.
- * This call decrements the container user counter.
+ * The returned iommu_group is valid as long as a ref is held on the file.
*/
-struct vfio_group *vfio_group_get_external_user(struct file *filep)
+struct iommu_group *vfio_file_iommu_group(struct file *file)
{
- struct vfio_group *group = filep->private_data;
- int ret;
+ struct vfio_group *group = file->private_data;
- if (filep->f_op != &vfio_group_fops)
- return ERR_PTR(-EINVAL);
-
- ret = vfio_group_add_container_user(group);
- if (ret)
- return ERR_PTR(ret);
-
- /*
- * Since the caller holds the fget on the file group->users must be >= 1
- */
- vfio_group_get(group);
-
- return group;
+ if (file->f_op != &vfio_group_fops)
+ return NULL;
+ return group->iommu_group;
}
-EXPORT_SYMBOL_GPL(vfio_group_get_external_user);
+EXPORT_SYMBOL_GPL(vfio_file_iommu_group);
-/*
- * External user API, exported by symbols to be linked dynamically.
- * The external user passes in a device pointer
- * to verify that:
- * - A VFIO group is assiciated with the device;
- * - IOMMU is set for the group.
- * If both checks passed, vfio_group_get_external_user_from_dev()
- * increments the container user counter to prevent the VFIO group
- * from disposal before external user exits and returns the pointer
- * to the VFIO group.
- *
- * When the external user finishes using the VFIO group, it calls
- * vfio_group_put_external_user() to release the VFIO group and
- * decrement the container user counter.
+/**
+ * vfio_file_enforced_coherent - True if the DMA associated with the VFIO file
+ * is always CPU cache coherent
+ * @file: VFIO group file
*
- * @dev [in] : device
- * Return error PTR or pointer to VFIO group.
+ * Enforced coherency means that the IOMMU ignores things like the PCIe no-snoop
+ * bit in DMA transactions. A return of false indicates that the user has
+ * rights to access additional instructions such as wbinvd on x86.
*/
-
-struct vfio_group *vfio_group_get_external_user_from_dev(struct device *dev)
+bool vfio_file_enforced_coherent(struct file *file)
{
- struct vfio_group *group;
- int ret;
+ struct vfio_group *group = file->private_data;
+ bool ret;
- group = vfio_group_get_from_dev(dev);
- if (!group)
- return ERR_PTR(-ENODEV);
+ if (file->f_op != &vfio_group_fops)
+ return true;
- ret = vfio_group_add_container_user(group);
- if (ret) {
- vfio_group_put(group);
- return ERR_PTR(ret);
+ down_read(&group->group_rwsem);
+ if (group->container) {
+ ret = vfio_ioctl_check_extension(group->container,
+ VFIO_DMA_CC_IOMMU);
+ } else {
+ /*
+ * Since the coherency state is determined only once a container
+ * is attached the user must do so before they can prove they
+ * have permission.
+ */
+ ret = true;
}
-
- return group;
+ up_read(&group->group_rwsem);
+ return ret;
}
-EXPORT_SYMBOL_GPL(vfio_group_get_external_user_from_dev);
+EXPORT_SYMBOL_GPL(vfio_file_enforced_coherent);
-void vfio_group_put_external_user(struct vfio_group *group)
+/**
+ * vfio_file_set_kvm - Link a kvm with VFIO drivers
+ * @file: VFIO group file
+ * @kvm: KVM to link
+ *
+ * When a VFIO device is first opened the KVM will be available in
+ * device->kvm if one was associated with the group.
+ */
+void vfio_file_set_kvm(struct file *file, struct kvm *kvm)
{
- vfio_group_try_dissolve_container(group);
- vfio_group_put(group);
-}
-EXPORT_SYMBOL_GPL(vfio_group_put_external_user);
+ struct vfio_group *group = file->private_data;
-bool vfio_external_group_match_file(struct vfio_group *test_group,
- struct file *filep)
-{
- struct vfio_group *group = filep->private_data;
+ if (file->f_op != &vfio_group_fops)
+ return;
- return (filep->f_op == &vfio_group_fops) && (group == test_group);
+ down_write(&group->group_rwsem);
+ group->kvm = kvm;
+ up_write(&group->group_rwsem);
}
-EXPORT_SYMBOL_GPL(vfio_external_group_match_file);
+EXPORT_SYMBOL_GPL(vfio_file_set_kvm);
-int vfio_external_user_iommu_id(struct vfio_group *group)
+/**
+ * vfio_file_has_dev - True if the VFIO file is a handle for device
+ * @file: VFIO file to check
+ * @device: Device that must be part of the file
+ *
+ * Returns true if given file has permission to manipulate the given device.
+ */
+bool vfio_file_has_dev(struct file *file, struct vfio_device *device)
{
- return iommu_group_id(group->iommu_group);
-}
-EXPORT_SYMBOL_GPL(vfio_external_user_iommu_id);
+ struct vfio_group *group = file->private_data;
-long vfio_external_check_extension(struct vfio_group *group, unsigned long arg)
-{
- return vfio_ioctl_check_extension(group->container, arg);
+ if (file->f_op != &vfio_group_fops)
+ return false;
+
+ return group == device->group;
}
-EXPORT_SYMBOL_GPL(vfio_external_check_extension);
+EXPORT_SYMBOL_GPL(vfio_file_has_dev);
/*
* Sub-module support
@@ -2134,7 +1912,7 @@ EXPORT_SYMBOL(vfio_set_irqs_validate_and_prepare);
/*
* Pin a set of guest PFNs and return their associated host PFNs for local
* domain only.
- * @dev [in] : device
+ * @device [in] : device
* @user_pfn [in]: array of user/guest PFNs to be pinned.
* @npage [in] : count of elements in user_pfn array. This count should not
* be greater VFIO_PIN_PAGES_MAX_ENTRIES.
@@ -2142,33 +1920,25 @@ EXPORT_SYMBOL(vfio_set_irqs_validate_and_prepare);
* @phys_pfn[out]: array of host PFNs
* Return error or number of pages pinned.
*/
-int vfio_pin_pages(struct device *dev, unsigned long *user_pfn, int npage,
- int prot, unsigned long *phys_pfn)
+int vfio_pin_pages(struct vfio_device *device, unsigned long *user_pfn,
+ int npage, int prot, unsigned long *phys_pfn)
{
struct vfio_container *container;
- struct vfio_group *group;
+ struct vfio_group *group = device->group;
struct vfio_iommu_driver *driver;
int ret;
- if (!dev || !user_pfn || !phys_pfn || !npage)
+ if (!user_pfn || !phys_pfn || !npage ||
+ !vfio_assert_device_open(device))
return -EINVAL;
if (npage > VFIO_PIN_PAGES_MAX_ENTRIES)
return -E2BIG;
- group = vfio_group_get_from_dev(dev);
- if (!group)
- return -ENODEV;
-
- if (group->dev_counter > 1) {
- ret = -EINVAL;
- goto err_pin_pages;
- }
-
- ret = vfio_group_add_container_user(group);
- if (ret)
- goto err_pin_pages;
+ if (group->dev_counter > 1)
+ return -EINVAL;
+ /* group->container cannot change while a vfio device is open */
container = group->container;
driver = container->iommu_driver;
if (likely(driver && driver->ops->pin_pages))
@@ -2178,45 +1948,34 @@ int vfio_pin_pages(struct device *dev, unsigned long *user_pfn, int npage,
else
ret = -ENOTTY;
- vfio_group_try_dissolve_container(group);
-
-err_pin_pages:
- vfio_group_put(group);
return ret;
}
EXPORT_SYMBOL(vfio_pin_pages);
/*
* Unpin set of host PFNs for local domain only.
- * @dev [in] : device
+ * @device [in] : device
* @user_pfn [in]: array of user/guest PFNs to be unpinned. Number of user/guest
* PFNs should not be greater than VFIO_PIN_PAGES_MAX_ENTRIES.
* @npage [in] : count of elements in user_pfn array. This count should not
* be greater than VFIO_PIN_PAGES_MAX_ENTRIES.
* Return error or number of pages unpinned.
*/
-int vfio_unpin_pages(struct device *dev, unsigned long *user_pfn, int npage)
+int vfio_unpin_pages(struct vfio_device *device, unsigned long *user_pfn,
+ int npage)
{
struct vfio_container *container;
- struct vfio_group *group;
struct vfio_iommu_driver *driver;
int ret;
- if (!dev || !user_pfn || !npage)
+ if (!user_pfn || !npage || !vfio_assert_device_open(device))
return -EINVAL;
if (npage > VFIO_PIN_PAGES_MAX_ENTRIES)
return -E2BIG;
- group = vfio_group_get_from_dev(dev);
- if (!group)
- return -ENODEV;
-
- ret = vfio_group_add_container_user(group);
- if (ret)
- goto err_unpin_pages;
-
- container = group->container;
+ /* group->container cannot change while a vfio device is open */
+ container = device->group->container;
driver = container->iommu_driver;
if (likely(driver && driver->ops->unpin_pages))
ret = driver->ops->unpin_pages(container->iommu_data, user_pfn,
@@ -2224,110 +1983,11 @@ int vfio_unpin_pages(struct device *dev, unsigned long *user_pfn, int npage)
else
ret = -ENOTTY;
- vfio_group_try_dissolve_container(group);
-
-err_unpin_pages:
- vfio_group_put(group);
return ret;
}
EXPORT_SYMBOL(vfio_unpin_pages);
/*
- * Pin a set of guest IOVA PFNs and return their associated host PFNs for a
- * VFIO group.
- *
- * The caller needs to call vfio_group_get_external_user() or
- * vfio_group_get_external_user_from_dev() prior to calling this interface,
- * so as to prevent the VFIO group from disposal in the middle of the call.
- * But it can keep the reference to the VFIO group for several calls into
- * this interface.
- * After finishing using of the VFIO group, the caller needs to release the
- * VFIO group by calling vfio_group_put_external_user().
- *
- * @group [in] : VFIO group
- * @user_iova_pfn [in] : array of user/guest IOVA PFNs to be pinned.
- * @npage [in] : count of elements in user_iova_pfn array.
- * This count should not be greater
- * VFIO_PIN_PAGES_MAX_ENTRIES.
- * @prot [in] : protection flags
- * @phys_pfn [out] : array of host PFNs
- * Return error or number of pages pinned.
- */
-int vfio_group_pin_pages(struct vfio_group *group,
- unsigned long *user_iova_pfn, int npage,
- int prot, unsigned long *phys_pfn)
-{
- struct vfio_container *container;
- struct vfio_iommu_driver *driver;
- int ret;
-
- if (!group || !user_iova_pfn || !phys_pfn || !npage)
- return -EINVAL;
-
- if (group->dev_counter > 1)
- return -EINVAL;
-
- if (npage > VFIO_PIN_PAGES_MAX_ENTRIES)
- return -E2BIG;
-
- container = group->container;
- driver = container->iommu_driver;
- if (likely(driver && driver->ops->pin_pages))
- ret = driver->ops->pin_pages(container->iommu_data,
- group->iommu_group, user_iova_pfn,
- npage, prot, phys_pfn);
- else
- ret = -ENOTTY;
-
- return ret;
-}
-EXPORT_SYMBOL(vfio_group_pin_pages);
-
-/*
- * Unpin a set of guest IOVA PFNs for a VFIO group.
- *
- * The caller needs to call vfio_group_get_external_user() or
- * vfio_group_get_external_user_from_dev() prior to calling this interface,
- * so as to prevent the VFIO group from disposal in the middle of the call.
- * But it can keep the reference to the VFIO group for several calls into
- * this interface.
- * After finishing using of the VFIO group, the caller needs to release the
- * VFIO group by calling vfio_group_put_external_user().
- *
- * @group [in] : vfio group
- * @user_iova_pfn [in] : array of user/guest IOVA PFNs to be unpinned.
- * @npage [in] : count of elements in user_iova_pfn array.
- * This count should not be greater than
- * VFIO_PIN_PAGES_MAX_ENTRIES.
- * Return error or number of pages unpinned.
- */
-int vfio_group_unpin_pages(struct vfio_group *group,
- unsigned long *user_iova_pfn, int npage)
-{
- struct vfio_container *container;
- struct vfio_iommu_driver *driver;
- int ret;
-
- if (!group || !user_iova_pfn || !npage)
- return -EINVAL;
-
- if (npage > VFIO_PIN_PAGES_MAX_ENTRIES)
- return -E2BIG;
-
- container = group->container;
- driver = container->iommu_driver;
- if (likely(driver && driver->ops->unpin_pages))
- ret = driver->ops->unpin_pages(container->iommu_data,
- user_iova_pfn, npage);
- else
- ret = -ENOTTY;
-
- return ret;
-}
-EXPORT_SYMBOL(vfio_group_unpin_pages);
-
-
-/*
* This interface allows the CPUs to perform some sort of virtual DMA on
* behalf of the device.
*
@@ -2337,32 +1997,25 @@ EXPORT_SYMBOL(vfio_group_unpin_pages);
* As the read/write of user space memory is conducted via the CPUs and is
* not a real device DMA, it is not necessary to pin the user space memory.
*
- * The caller needs to call vfio_group_get_external_user() or
- * vfio_group_get_external_user_from_dev() prior to calling this interface,
- * so as to prevent the VFIO group from disposal in the middle of the call.
- * But it can keep the reference to the VFIO group for several calls into
- * this interface.
- * After finishing using of the VFIO group, the caller needs to release the
- * VFIO group by calling vfio_group_put_external_user().
- *
- * @group [in] : VFIO group
+ * @device [in] : VFIO device
* @user_iova [in] : base IOVA of a user space buffer
* @data [in] : pointer to kernel buffer
* @len [in] : kernel buffer length
* @write : indicate read or write
* Return error code on failure or 0 on success.
*/
-int vfio_dma_rw(struct vfio_group *group, dma_addr_t user_iova,
- void *data, size_t len, bool write)
+int vfio_dma_rw(struct vfio_device *device, dma_addr_t user_iova, void *data,
+ size_t len, bool write)
{
struct vfio_container *container;
struct vfio_iommu_driver *driver;
int ret = 0;
- if (!group || !data || len <= 0)
+ if (!data || len <= 0 || !vfio_assert_device_open(device))
return -EINVAL;
- container = group->container;
+ /* group->container cannot change while a vfio device is open */
+ container = device->group->container;
driver = container->iommu_driver;
if (likely(driver && driver->ops->dma_rw))
@@ -2370,7 +2023,6 @@ int vfio_dma_rw(struct vfio_group *group, dma_addr_t user_iova,
user_iova, data, len, write);
else
ret = -ENOTTY;
-
return ret;
}
EXPORT_SYMBOL(vfio_dma_rw);
@@ -2383,9 +2035,7 @@ static int vfio_register_iommu_notifier(struct vfio_group *group,
struct vfio_iommu_driver *driver;
int ret;
- ret = vfio_group_add_container_user(group);
- if (ret)
- return -EINVAL;
+ lockdep_assert_held_read(&group->group_rwsem);
container = group->container;
driver = container->iommu_driver;
@@ -2395,8 +2045,6 @@ static int vfio_register_iommu_notifier(struct vfio_group *group,
else
ret = -ENOTTY;
- vfio_group_try_dissolve_container(group);
-
return ret;
}
@@ -2407,9 +2055,7 @@ static int vfio_unregister_iommu_notifier(struct vfio_group *group,
struct vfio_iommu_driver *driver;
int ret;
- ret = vfio_group_add_container_user(group);
- if (ret)
- return -EINVAL;
+ lockdep_assert_held_read(&group->group_rwsem);
container = group->container;
driver = container->iommu_driver;
@@ -2419,147 +2065,52 @@ static int vfio_unregister_iommu_notifier(struct vfio_group *group,
else
ret = -ENOTTY;
- vfio_group_try_dissolve_container(group);
-
- return ret;
-}
-
-void vfio_group_set_kvm(struct vfio_group *group, struct kvm *kvm)
-{
- group->kvm = kvm;
- blocking_notifier_call_chain(&group->notifier,
- VFIO_GROUP_NOTIFY_SET_KVM, kvm);
-}
-EXPORT_SYMBOL_GPL(vfio_group_set_kvm);
-
-static int vfio_register_group_notifier(struct vfio_group *group,
- unsigned long *events,
- struct notifier_block *nb)
-{
- int ret;
- bool set_kvm = false;
-
- if (*events & VFIO_GROUP_NOTIFY_SET_KVM)
- set_kvm = true;
-
- /* clear known events */
- *events &= ~VFIO_GROUP_NOTIFY_SET_KVM;
-
- /* refuse to continue if still events remaining */
- if (*events)
- return -EINVAL;
-
- ret = vfio_group_add_container_user(group);
- if (ret)
- return -EINVAL;
-
- ret = blocking_notifier_chain_register(&group->notifier, nb);
-
- /*
- * The attaching of kvm and vfio_group might already happen, so
- * here we replay once upon registration.
- */
- if (!ret && set_kvm && group->kvm)
- blocking_notifier_call_chain(&group->notifier,
- VFIO_GROUP_NOTIFY_SET_KVM, group->kvm);
-
- vfio_group_try_dissolve_container(group);
-
- return ret;
-}
-
-static int vfio_unregister_group_notifier(struct vfio_group *group,
- struct notifier_block *nb)
-{
- int ret;
-
- ret = vfio_group_add_container_user(group);
- if (ret)
- return -EINVAL;
-
- ret = blocking_notifier_chain_unregister(&group->notifier, nb);
-
- vfio_group_try_dissolve_container(group);
-
return ret;
}
-int vfio_register_notifier(struct device *dev, enum vfio_notify_type type,
- unsigned long *events, struct notifier_block *nb)
+int vfio_register_notifier(struct vfio_device *device,
+ enum vfio_notify_type type, unsigned long *events,
+ struct notifier_block *nb)
{
- struct vfio_group *group;
+ struct vfio_group *group = device->group;
int ret;
- if (!dev || !nb || !events || (*events == 0))
+ if (!nb || !events || (*events == 0) ||
+ !vfio_assert_device_open(device))
return -EINVAL;
- group = vfio_group_get_from_dev(dev);
- if (!group)
- return -ENODEV;
-
switch (type) {
case VFIO_IOMMU_NOTIFY:
ret = vfio_register_iommu_notifier(group, events, nb);
break;
- case VFIO_GROUP_NOTIFY:
- ret = vfio_register_group_notifier(group, events, nb);
- break;
default:
ret = -EINVAL;
}
-
- vfio_group_put(group);
return ret;
}
EXPORT_SYMBOL(vfio_register_notifier);
-int vfio_unregister_notifier(struct device *dev, enum vfio_notify_type type,
+int vfio_unregister_notifier(struct vfio_device *device,
+ enum vfio_notify_type type,
struct notifier_block *nb)
{
- struct vfio_group *group;
+ struct vfio_group *group = device->group;
int ret;
- if (!dev || !nb)
+ if (!nb || !vfio_assert_device_open(device))
return -EINVAL;
- group = vfio_group_get_from_dev(dev);
- if (!group)
- return -ENODEV;
-
switch (type) {
case VFIO_IOMMU_NOTIFY:
ret = vfio_unregister_iommu_notifier(group, nb);
break;
- case VFIO_GROUP_NOTIFY:
- ret = vfio_unregister_group_notifier(group, nb);
- break;
default:
ret = -EINVAL;
}
-
- vfio_group_put(group);
return ret;
}
EXPORT_SYMBOL(vfio_unregister_notifier);
-struct iommu_domain *vfio_group_iommu_domain(struct vfio_group *group)
-{
- struct vfio_container *container;
- struct vfio_iommu_driver *driver;
-
- if (!group)
- return ERR_PTR(-EINVAL);
-
- container = group->container;
- driver = container->iommu_driver;
- if (likely(driver && driver->ops->group_iommu_domain))
- return driver->ops->group_iommu_domain(container->iommu_data,
- group->iommu_group);
-
- return ERR_PTR(-ENOTTY);
-}
-EXPORT_SYMBOL_GPL(vfio_group_iommu_domain);
-
/*
* Module/class support
*/
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 9394aa9444c1..c13b9290e357 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -84,8 +84,8 @@ struct vfio_domain {
struct iommu_domain *domain;
struct list_head next;
struct list_head group_list;
- int prot; /* IOMMU_CACHE */
- bool fgsp; /* Fine-grained super pages */
+ bool fgsp : 1; /* Fine-grained super pages */
+ bool enforce_cache_coherency : 1;
};
struct vfio_dma {
@@ -1461,7 +1461,7 @@ static int vfio_iommu_map(struct vfio_iommu *iommu, dma_addr_t iova,
list_for_each_entry(d, &iommu->domain_list, next) {
ret = iommu_map(d->domain, iova, (phys_addr_t)pfn << PAGE_SHIFT,
- npage << PAGE_SHIFT, prot | d->prot);
+ npage << PAGE_SHIFT, prot | IOMMU_CACHE);
if (ret)
goto unwind;
@@ -1771,7 +1771,7 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
}
ret = iommu_map(domain->domain, iova, phys,
- size, dma->prot | domain->prot);
+ size, dma->prot | IOMMU_CACHE);
if (ret) {
if (!dma->iommu_mapped) {
vfio_unpin_pages_remote(dma, iova,
@@ -1859,7 +1859,7 @@ static void vfio_test_domain_fgsp(struct vfio_domain *domain)
return;
ret = iommu_map(domain->domain, 0, page_to_phys(pages), PAGE_SIZE * 2,
- IOMMU_READ | IOMMU_WRITE | domain->prot);
+ IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE);
if (!ret) {
size_t unmapped = iommu_unmap(domain->domain, 0, PAGE_SIZE);
@@ -2267,8 +2267,15 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
goto out_detach;
}
- if (iommu_capable(bus, IOMMU_CAP_CACHE_COHERENCY))
- domain->prot |= IOMMU_CACHE;
+ /*
+ * If the IOMMU can block non-coherent operations (ie PCIe TLPs with
+ * no-snoop set) then VFIO always turns this feature on because on Intel
+ * platforms it optimizes KVM to disable wbinvd emulation.
+ */
+ if (domain->domain->ops->enforce_cache_coherency)
+ domain->enforce_cache_coherency =
+ domain->domain->ops->enforce_cache_coherency(
+ domain->domain);
/*
* Try to match an existing compatible domain. We don't want to
@@ -2279,7 +2286,8 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
*/
list_for_each_entry(d, &iommu->domain_list, next) {
if (d->domain->ops == domain->domain->ops &&
- d->prot == domain->prot) {
+ d->enforce_cache_coherency ==
+ domain->enforce_cache_coherency) {
iommu_detach_group(domain->domain, group->iommu_group);
if (!iommu_attach_group(d->domain,
group->iommu_group)) {
@@ -2611,14 +2619,14 @@ static void vfio_iommu_type1_release(void *iommu_data)
kfree(iommu);
}
-static int vfio_domains_have_iommu_cache(struct vfio_iommu *iommu)
+static int vfio_domains_have_enforce_cache_coherency(struct vfio_iommu *iommu)
{
struct vfio_domain *domain;
int ret = 1;
mutex_lock(&iommu->lock);
list_for_each_entry(domain, &iommu->domain_list, next) {
- if (!(domain->prot & IOMMU_CACHE)) {
+ if (!(domain->enforce_cache_coherency)) {
ret = 0;
break;
}
@@ -2641,7 +2649,7 @@ static int vfio_iommu_type1_check_extension(struct vfio_iommu *iommu,
case VFIO_DMA_CC_IOMMU:
if (!iommu)
return 0;
- return vfio_domains_have_iommu_cache(iommu);
+ return vfio_domains_have_enforce_cache_coherency(iommu);
default:
return 0;
}
diff --git a/drivers/vhost/iotlb.c b/drivers/vhost/iotlb.c
index 5829cf2d0552..ea61330a3431 100644
--- a/drivers/vhost/iotlb.c
+++ b/drivers/vhost/iotlb.c
@@ -126,6 +126,23 @@ void vhost_iotlb_del_range(struct vhost_iotlb *iotlb, u64 start, u64 last)
EXPORT_SYMBOL_GPL(vhost_iotlb_del_range);
/**
+ * vhost_iotlb_init - initialize a vhost IOTLB
+ * @iotlb: the IOTLB that needs to be initialized
+ * @limit: maximum number of IOTLB entries
+ * @flags: VHOST_IOTLB_FLAG_XXX
+ */
+void vhost_iotlb_init(struct vhost_iotlb *iotlb, unsigned int limit,
+ unsigned int flags)
+{
+ iotlb->root = RB_ROOT_CACHED;
+ iotlb->limit = limit;
+ iotlb->nmaps = 0;
+ iotlb->flags = flags;
+ INIT_LIST_HEAD(&iotlb->list);
+}
+EXPORT_SYMBOL_GPL(vhost_iotlb_init);
+
+/**
* vhost_iotlb_alloc - add a new vhost IOTLB
* @limit: maximum number of IOTLB entries
* @flags: VHOST_IOTLB_FLAG_XXX
@@ -139,11 +156,7 @@ struct vhost_iotlb *vhost_iotlb_alloc(unsigned int limit, unsigned int flags)
if (!iotlb)
return NULL;
- iotlb->root = RB_ROOT_CACHED;
- iotlb->limit = limit;
- iotlb->nmaps = 0;
- iotlb->flags = flags;
- INIT_LIST_HEAD(&iotlb->list);
+ vhost_iotlb_init(iotlb, limit, flags);
return iotlb;
}
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 297b5db47454..68e4ecd1cc0e 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -1374,16 +1374,9 @@ static void vhost_net_stop(struct vhost_net *n, struct socket **tx_sock,
*rx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_RX].vq);
}
-static void vhost_net_flush_vq(struct vhost_net *n, int index)
-{
- vhost_poll_flush(n->poll + index);
- vhost_poll_flush(&n->vqs[index].vq.poll);
-}
-
static void vhost_net_flush(struct vhost_net *n)
{
- vhost_net_flush_vq(n, VHOST_NET_VQ_TX);
- vhost_net_flush_vq(n, VHOST_NET_VQ_RX);
+ vhost_dev_flush(&n->dev);
if (n->vqs[VHOST_NET_VQ_TX].ubufs) {
mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
n->tx_flush = true;
@@ -1572,7 +1565,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
}
if (oldsock) {
- vhost_net_flush_vq(n, index);
+ vhost_dev_flush(&n->dev);
sockfd_put(oldsock);
}
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 532e204f2b1b..ffd9e6c2ffc1 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -1436,7 +1436,7 @@ static void vhost_scsi_flush(struct vhost_scsi *vs)
kref_put(&old_inflight[i]->kref, vhost_scsi_done_inflight);
/* Flush both the vhost poll and vhost work */
- vhost_work_dev_flush(&vs->dev);
+ vhost_dev_flush(&vs->dev);
/* Wait for all reqs issued before the flush to be finished */
for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
@@ -1827,8 +1827,6 @@ static int vhost_scsi_release(struct inode *inode, struct file *f)
vhost_scsi_clear_endpoint(vs, &t);
vhost_dev_stop(&vs->dev);
vhost_dev_cleanup(&vs->dev);
- /* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
- vhost_scsi_flush(vs);
kfree(vs->dev.vqs);
kvfree(vs);
return 0;
diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c
index 05740cba1cd8..bc8e7fb1e635 100644
--- a/drivers/vhost/test.c
+++ b/drivers/vhost/test.c
@@ -144,14 +144,9 @@ static void vhost_test_stop(struct vhost_test *n, void **privatep)
*privatep = vhost_test_stop_vq(n, n->vqs + VHOST_TEST_VQ);
}
-static void vhost_test_flush_vq(struct vhost_test *n, int index)
-{
- vhost_poll_flush(&n->vqs[index].poll);
-}
-
static void vhost_test_flush(struct vhost_test *n)
{
- vhost_test_flush_vq(n, VHOST_TEST_VQ);
+ vhost_dev_flush(&n->dev);
}
static int vhost_test_release(struct inode *inode, struct file *f)
@@ -163,9 +158,6 @@ static int vhost_test_release(struct inode *inode, struct file *f)
vhost_test_flush(n);
vhost_dev_stop(&n->dev);
vhost_dev_cleanup(&n->dev);
- /* We do an extra flush before freeing memory,
- * since jobs can re-queue themselves. */
- vhost_test_flush(n);
kfree(n->dev.vqs);
kfree(n);
return 0;
@@ -210,7 +202,7 @@ static long vhost_test_run(struct vhost_test *n, int test)
goto err;
if (oldpriv) {
- vhost_test_flush_vq(n, index);
+ vhost_test_flush(n);
}
}
@@ -303,7 +295,7 @@ static long vhost_test_set_backend(struct vhost_test *n, unsigned index, int fd)
mutex_unlock(&vq->mutex);
if (enable) {
- vhost_test_flush_vq(n, index);
+ vhost_test_flush(n);
}
mutex_unlock(&n->dev.mutex);
diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
index 4c2f0bd06285..935a1d0ddb97 100644
--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -28,17 +28,27 @@
enum {
VHOST_VDPA_BACKEND_FEATURES =
(1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2) |
- (1ULL << VHOST_BACKEND_F_IOTLB_BATCH),
+ (1ULL << VHOST_BACKEND_F_IOTLB_BATCH) |
+ (1ULL << VHOST_BACKEND_F_IOTLB_ASID),
};
#define VHOST_VDPA_DEV_MAX (1U << MINORBITS)
+#define VHOST_VDPA_IOTLB_BUCKETS 16
+
+struct vhost_vdpa_as {
+ struct hlist_node hash_link;
+ struct vhost_iotlb iotlb;
+ u32 id;
+};
+
struct vhost_vdpa {
struct vhost_dev vdev;
struct iommu_domain *domain;
struct vhost_virtqueue *vqs;
struct completion completion;
struct vdpa_device *vdpa;
+ struct hlist_head as[VHOST_VDPA_IOTLB_BUCKETS];
struct device dev;
struct cdev cdev;
atomic_t opened;
@@ -48,12 +58,89 @@ struct vhost_vdpa {
struct eventfd_ctx *config_ctx;
int in_batch;
struct vdpa_iova_range range;
+ u32 batch_asid;
};
static DEFINE_IDA(vhost_vdpa_ida);
static dev_t vhost_vdpa_major;
+static inline u32 iotlb_to_asid(struct vhost_iotlb *iotlb)
+{
+ struct vhost_vdpa_as *as = container_of(iotlb, struct
+ vhost_vdpa_as, iotlb);
+ return as->id;
+}
+
+static struct vhost_vdpa_as *asid_to_as(struct vhost_vdpa *v, u32 asid)
+{
+ struct hlist_head *head = &v->as[asid % VHOST_VDPA_IOTLB_BUCKETS];
+ struct vhost_vdpa_as *as;
+
+ hlist_for_each_entry(as, head, hash_link)
+ if (as->id == asid)
+ return as;
+
+ return NULL;
+}
+
+static struct vhost_iotlb *asid_to_iotlb(struct vhost_vdpa *v, u32 asid)
+{
+ struct vhost_vdpa_as *as = asid_to_as(v, asid);
+
+ if (!as)
+ return NULL;
+
+ return &as->iotlb;
+}
+
+static struct vhost_vdpa_as *vhost_vdpa_alloc_as(struct vhost_vdpa *v, u32 asid)
+{
+ struct hlist_head *head = &v->as[asid % VHOST_VDPA_IOTLB_BUCKETS];
+ struct vhost_vdpa_as *as;
+
+ if (asid_to_as(v, asid))
+ return NULL;
+
+ if (asid >= v->vdpa->nas)
+ return NULL;
+
+ as = kmalloc(sizeof(*as), GFP_KERNEL);
+ if (!as)
+ return NULL;
+
+ vhost_iotlb_init(&as->iotlb, 0, 0);
+ as->id = asid;
+ hlist_add_head(&as->hash_link, head);
+
+ return as;
+}
+
+static struct vhost_vdpa_as *vhost_vdpa_find_alloc_as(struct vhost_vdpa *v,
+ u32 asid)
+{
+ struct vhost_vdpa_as *as = asid_to_as(v, asid);
+
+ if (as)
+ return as;
+
+ return vhost_vdpa_alloc_as(v, asid);
+}
+
+static int vhost_vdpa_remove_as(struct vhost_vdpa *v, u32 asid)
+{
+ struct vhost_vdpa_as *as = asid_to_as(v, asid);
+
+ if (!as)
+ return -EINVAL;
+
+ hlist_del(&as->hash_link);
+ vhost_iotlb_reset(&as->iotlb);
+ kfree(as);
+
+ return 0;
+}
+
static void handle_vq_kick(struct vhost_work *work)
{
struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
@@ -411,6 +498,22 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
return -EFAULT;
ops->set_vq_ready(vdpa, idx, s.num);
return 0;
+ case VHOST_VDPA_GET_VRING_GROUP:
+ s.index = idx;
+ s.num = ops->get_vq_group(vdpa, idx);
+ if (s.num >= vdpa->ngroups)
+ return -EIO;
+ else if (copy_to_user(argp, &s, sizeof(s)))
+ return -EFAULT;
+ return 0;
+ case VHOST_VDPA_SET_GROUP_ASID:
+ if (copy_from_user(&s, argp, sizeof(s)))
+ return -EFAULT;
+ if (s.num >= vdpa->nas)
+ return -EINVAL;
+ if (!ops->set_group_asid)
+ return -EOPNOTSUPP;
+ return ops->set_group_asid(vdpa, idx, s.num);
case VHOST_GET_VRING_BASE:
r = ops->get_vq_state(v->vdpa, idx, &vq_state);
if (r)
@@ -505,6 +608,15 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
case VHOST_VDPA_GET_VRING_NUM:
r = vhost_vdpa_get_vring_num(v, argp);
break;
+ case VHOST_VDPA_GET_GROUP_NUM:
+ if (copy_to_user(argp, &v->vdpa->ngroups,
+ sizeof(v->vdpa->ngroups)))
+ r = -EFAULT;
+ break;
+ case VHOST_VDPA_GET_AS_NUM:
+ if (copy_to_user(argp, &v->vdpa->nas, sizeof(v->vdpa->nas)))
+ r = -EFAULT;
+ break;
case VHOST_SET_LOG_BASE:
case VHOST_SET_LOG_FD:
r = -ENOIOCTLCMD;
@@ -537,10 +649,11 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
return r;
}
-static void vhost_vdpa_pa_unmap(struct vhost_vdpa *v, u64 start, u64 last)
+static void vhost_vdpa_pa_unmap(struct vhost_vdpa *v,
+ struct vhost_iotlb *iotlb,
+ u64 start, u64 last)
{
struct vhost_dev *dev = &v->vdev;
- struct vhost_iotlb *iotlb = dev->iotlb;
struct vhost_iotlb_map *map;
struct page *page;
unsigned long pfn, pinned;
@@ -559,10 +672,10 @@ static void vhost_vdpa_pa_unmap(struct vhost_vdpa *v, u64 start, u64 last)
}
}
-static void vhost_vdpa_va_unmap(struct vhost_vdpa *v, u64 start, u64 last)
+static void vhost_vdpa_va_unmap(struct vhost_vdpa *v,
+ struct vhost_iotlb *iotlb,
+ u64 start, u64 last)
{
- struct vhost_dev *dev = &v->vdev;
- struct vhost_iotlb *iotlb = dev->iotlb;
struct vhost_iotlb_map *map;
struct vdpa_map_file *map_file;
@@ -574,23 +687,16 @@ static void vhost_vdpa_va_unmap(struct vhost_vdpa *v, u64 start, u64 last)
}
}
-static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v, u64 start, u64 last)
+static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v,
+ struct vhost_iotlb *iotlb,
+ u64 start, u64 last)
{
struct vdpa_device *vdpa = v->vdpa;
if (vdpa->use_va)
- return vhost_vdpa_va_unmap(v, start, last);
-
- return vhost_vdpa_pa_unmap(v, start, last);
-}
-
-static void vhost_vdpa_iotlb_free(struct vhost_vdpa *v)
-{
- struct vhost_dev *dev = &v->vdev;
+ return vhost_vdpa_va_unmap(v, iotlb, start, last);
- vhost_vdpa_iotlb_unmap(v, 0ULL, 0ULL - 1);
- kfree(dev->iotlb);
- dev->iotlb = NULL;
+ return vhost_vdpa_pa_unmap(v, iotlb, start, last);
}
static int perm_to_iommu_flags(u32 perm)
@@ -615,30 +721,31 @@ static int perm_to_iommu_flags(u32 perm)
return flags | IOMMU_CACHE;
}
-static int vhost_vdpa_map(struct vhost_vdpa *v, u64 iova,
- u64 size, u64 pa, u32 perm, void *opaque)
+static int vhost_vdpa_map(struct vhost_vdpa *v, struct vhost_iotlb *iotlb,
+ u64 iova, u64 size, u64 pa, u32 perm, void *opaque)
{
struct vhost_dev *dev = &v->vdev;
struct vdpa_device *vdpa = v->vdpa;
const struct vdpa_config_ops *ops = vdpa->config;
+ u32 asid = iotlb_to_asid(iotlb);
int r = 0;
- r = vhost_iotlb_add_range_ctx(dev->iotlb, iova, iova + size - 1,
+ r = vhost_iotlb_add_range_ctx(iotlb, iova, iova + size - 1,
pa, perm, opaque);
if (r)
return r;
if (ops->dma_map) {
- r = ops->dma_map(vdpa, iova, size, pa, perm, opaque);
+ r = ops->dma_map(vdpa, asid, iova, size, pa, perm, opaque);
} else if (ops->set_map) {
if (!v->in_batch)
- r = ops->set_map(vdpa, dev->iotlb);
+ r = ops->set_map(vdpa, asid, iotlb);
} else {
r = iommu_map(v->domain, iova, pa, size,
perm_to_iommu_flags(perm));
}
if (r) {
- vhost_iotlb_del_range(dev->iotlb, iova, iova + size - 1);
+ vhost_iotlb_del_range(iotlb, iova, iova + size - 1);
return r;
}
@@ -648,25 +755,34 @@ static int vhost_vdpa_map(struct vhost_vdpa *v, u64 iova,
return 0;
}
-static void vhost_vdpa_unmap(struct vhost_vdpa *v, u64 iova, u64 size)
+static void vhost_vdpa_unmap(struct vhost_vdpa *v,
+ struct vhost_iotlb *iotlb,
+ u64 iova, u64 size)
{
- struct vhost_dev *dev = &v->vdev;
struct vdpa_device *vdpa = v->vdpa;
const struct vdpa_config_ops *ops = vdpa->config;
+ u32 asid = iotlb_to_asid(iotlb);
- vhost_vdpa_iotlb_unmap(v, iova, iova + size - 1);
+ vhost_vdpa_iotlb_unmap(v, iotlb, iova, iova + size - 1);
if (ops->dma_map) {
- ops->dma_unmap(vdpa, iova, size);
+ ops->dma_unmap(vdpa, asid, iova, size);
} else if (ops->set_map) {
if (!v->in_batch)
- ops->set_map(vdpa, dev->iotlb);
+ ops->set_map(vdpa, asid, iotlb);
} else {
iommu_unmap(v->domain, iova, size);
}
+
+ /* If we are in the middle of batch processing, delay the free
+ * of AS until BATCH_END.
+ */
+ if (!v->in_batch && !iotlb->nmaps)
+ vhost_vdpa_remove_as(v, asid);
}
static int vhost_vdpa_va_map(struct vhost_vdpa *v,
+ struct vhost_iotlb *iotlb,
u64 iova, u64 size, u64 uaddr, u32 perm)
{
struct vhost_dev *dev = &v->vdev;
@@ -696,7 +812,7 @@ static int vhost_vdpa_va_map(struct vhost_vdpa *v,
offset = (vma->vm_pgoff << PAGE_SHIFT) + uaddr - vma->vm_start;
map_file->offset = offset;
map_file->file = get_file(vma->vm_file);
- ret = vhost_vdpa_map(v, map_iova, map_size, uaddr,
+ ret = vhost_vdpa_map(v, iotlb, map_iova, map_size, uaddr,
perm, map_file);
if (ret) {
fput(map_file->file);
@@ -709,7 +825,7 @@ next:
map_iova += map_size;
}
if (ret)
- vhost_vdpa_unmap(v, iova, map_iova - iova);
+ vhost_vdpa_unmap(v, iotlb, iova, map_iova - iova);
mmap_read_unlock(dev->mm);
@@ -717,6 +833,7 @@ next:
}
static int vhost_vdpa_pa_map(struct vhost_vdpa *v,
+ struct vhost_iotlb *iotlb,
u64 iova, u64 size, u64 uaddr, u32 perm)
{
struct vhost_dev *dev = &v->vdev;
@@ -780,7 +897,7 @@ static int vhost_vdpa_pa_map(struct vhost_vdpa *v,
if (last_pfn && (this_pfn != last_pfn + 1)) {
/* Pin a contiguous chunk of memory */
csize = PFN_PHYS(last_pfn - map_pfn + 1);
- ret = vhost_vdpa_map(v, iova, csize,
+ ret = vhost_vdpa_map(v, iotlb, iova, csize,
PFN_PHYS(map_pfn),
perm, NULL);
if (ret) {
@@ -810,7 +927,7 @@ static int vhost_vdpa_pa_map(struct vhost_vdpa *v,
}
/* Pin the rest chunk */
- ret = vhost_vdpa_map(v, iova, PFN_PHYS(last_pfn - map_pfn + 1),
+ ret = vhost_vdpa_map(v, iotlb, iova, PFN_PHYS(last_pfn - map_pfn + 1),
PFN_PHYS(map_pfn), perm, NULL);
out:
if (ret) {
@@ -830,7 +947,7 @@ out:
for (pfn = map_pfn; pfn <= last_pfn; pfn++)
unpin_user_page(pfn_to_page(pfn));
}
- vhost_vdpa_unmap(v, start, size);
+ vhost_vdpa_unmap(v, iotlb, start, size);
}
unlock:
mmap_read_unlock(dev->mm);
@@ -841,11 +958,10 @@ free:
}
static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
+ struct vhost_iotlb *iotlb,
struct vhost_iotlb_msg *msg)
{
- struct vhost_dev *dev = &v->vdev;
struct vdpa_device *vdpa = v->vdpa;
- struct vhost_iotlb *iotlb = dev->iotlb;
if (msg->iova < v->range.first || !msg->size ||
msg->iova > U64_MAX - msg->size + 1 ||
@@ -857,19 +973,21 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
return -EEXIST;
if (vdpa->use_va)
- return vhost_vdpa_va_map(v, msg->iova, msg->size,
+ return vhost_vdpa_va_map(v, iotlb, msg->iova, msg->size,
msg->uaddr, msg->perm);
- return vhost_vdpa_pa_map(v, msg->iova, msg->size, msg->uaddr,
+ return vhost_vdpa_pa_map(v, iotlb, msg->iova, msg->size, msg->uaddr,
msg->perm);
}
-static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
+static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev, u32 asid,
struct vhost_iotlb_msg *msg)
{
struct vhost_vdpa *v = container_of(dev, struct vhost_vdpa, vdev);
struct vdpa_device *vdpa = v->vdpa;
const struct vdpa_config_ops *ops = vdpa->config;
+ struct vhost_iotlb *iotlb = NULL;
+ struct vhost_vdpa_as *as = NULL;
int r = 0;
mutex_lock(&dev->mutex);
@@ -878,20 +996,47 @@ static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
if (r)
goto unlock;
+ if (msg->type == VHOST_IOTLB_UPDATE ||
+ msg->type == VHOST_IOTLB_BATCH_BEGIN) {
+ as = vhost_vdpa_find_alloc_as(v, asid);
+ if (!as) {
+ dev_err(&v->dev, "can't find and alloc asid %d\n",
+ asid);
+ r = -EINVAL;
+ goto unlock;
+ }
+ iotlb = &as->iotlb;
+ } else
+ iotlb = asid_to_iotlb(v, asid);
+
+ if ((v->in_batch && v->batch_asid != asid) || !iotlb) {
+ if (v->in_batch && v->batch_asid != asid) {
+ dev_info(&v->dev, "batch id %d asid %d\n",
+ v->batch_asid, asid);
+ }
+ if (!iotlb)
+ dev_err(&v->dev, "no iotlb for asid %d\n", asid);
+ r = -EINVAL;
+ goto unlock;
+ }
+
switch (msg->type) {
case VHOST_IOTLB_UPDATE:
- r = vhost_vdpa_process_iotlb_update(v, msg);
+ r = vhost_vdpa_process_iotlb_update(v, iotlb, msg);
break;
case VHOST_IOTLB_INVALIDATE:
- vhost_vdpa_unmap(v, msg->iova, msg->size);
+ vhost_vdpa_unmap(v, iotlb, msg->iova, msg->size);
break;
case VHOST_IOTLB_BATCH_BEGIN:
+ v->batch_asid = asid;
v->in_batch = true;
break;
case VHOST_IOTLB_BATCH_END:
if (v->in_batch && ops->set_map)
- ops->set_map(vdpa, dev->iotlb);
+ ops->set_map(vdpa, asid, iotlb);
v->in_batch = false;
+ if (!iotlb->nmaps)
+ vhost_vdpa_remove_as(v, asid);
break;
default:
r = -EINVAL;
@@ -977,6 +1122,21 @@ static void vhost_vdpa_set_iova_range(struct vhost_vdpa *v)
}
}
+static void vhost_vdpa_cleanup(struct vhost_vdpa *v)
+{
+ struct vhost_vdpa_as *as;
+ u32 asid;
+
+ vhost_dev_cleanup(&v->vdev);
+ kfree(v->vdev.vqs);
+
+ for (asid = 0; asid < v->vdpa->nas; asid++) {
+ as = asid_to_as(v, asid);
+ if (as)
+ vhost_vdpa_remove_as(v, asid);
+ }
+}
+
static int vhost_vdpa_open(struct inode *inode, struct file *filep)
{
struct vhost_vdpa *v;
@@ -1010,15 +1170,9 @@ static int vhost_vdpa_open(struct inode *inode, struct file *filep)
vhost_dev_init(dev, vqs, nvqs, 0, 0, 0, false,
vhost_vdpa_process_iotlb_msg);
- dev->iotlb = vhost_iotlb_alloc(0, 0);
- if (!dev->iotlb) {
- r = -ENOMEM;
- goto err_init_iotlb;
- }
-
r = vhost_vdpa_alloc_domain(v);
if (r)
- goto err_init_iotlb;
+ goto err_alloc_domain;
vhost_vdpa_set_iova_range(v);
@@ -1026,9 +1180,8 @@ static int vhost_vdpa_open(struct inode *inode, struct file *filep)
return 0;
-err_init_iotlb:
- vhost_dev_cleanup(&v->vdev);
- kfree(vqs);
+err_alloc_domain:
+ vhost_vdpa_cleanup(v);
err:
atomic_dec(&v->opened);
return r;
@@ -1052,11 +1205,9 @@ static int vhost_vdpa_release(struct inode *inode, struct file *filep)
vhost_vdpa_clean_irq(v);
vhost_vdpa_reset(v);
vhost_dev_stop(&v->vdev);
- vhost_vdpa_iotlb_free(v);
vhost_vdpa_free_domain(v);
vhost_vdpa_config_put(v);
vhost_dev_cleanup(&v->vdev);
- kfree(v->vdev.vqs);
mutex_unlock(&d->mutex);
atomic_dec(&v->opened);
@@ -1152,7 +1303,14 @@ static int vhost_vdpa_probe(struct vdpa_device *vdpa)
const struct vdpa_config_ops *ops = vdpa->config;
struct vhost_vdpa *v;
int minor;
- int r;
+ int i, r;
+
+ /* We can't support platform IOMMU device with more than 1
+ * group or as
+ */
+ if (!ops->set_map && !ops->dma_map &&
+ (vdpa->ngroups > 1 || vdpa->nas > 1))
+ return -EOPNOTSUPP;
v = kzalloc(sizeof(*v), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
if (!v)
@@ -1196,6 +1354,9 @@ static int vhost_vdpa_probe(struct vdpa_device *vdpa)
init_completion(&v->completion);
vdpa_set_drvdata(vdpa, v);
+ for (i = 0; i < VHOST_VDPA_IOTLB_BUCKETS; i++)
+ INIT_HLIST_HEAD(&v->as[i]);
+
return 0;
err:
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index d02173fb290c..40097826cff0 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -231,7 +231,7 @@ void vhost_poll_stop(struct vhost_poll *poll)
}
EXPORT_SYMBOL_GPL(vhost_poll_stop);
-void vhost_work_dev_flush(struct vhost_dev *dev)
+void vhost_dev_flush(struct vhost_dev *dev)
{
struct vhost_flush_struct flush;
@@ -243,15 +243,7 @@ void vhost_work_dev_flush(struct vhost_dev *dev)
wait_for_completion(&flush.wait_event);
}
}
-EXPORT_SYMBOL_GPL(vhost_work_dev_flush);
-
-/* Flush any work that has been scheduled. When calling this, don't hold any
- * locks that are also used by the callback. */
-void vhost_poll_flush(struct vhost_poll *poll)
-{
- vhost_work_dev_flush(poll->dev);
-}
-EXPORT_SYMBOL_GPL(vhost_poll_flush);
+EXPORT_SYMBOL_GPL(vhost_dev_flush);
void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
{
@@ -468,7 +460,7 @@ void vhost_dev_init(struct vhost_dev *dev,
struct vhost_virtqueue **vqs, int nvqs,
int iov_limit, int weight, int byte_weight,
bool use_worker,
- int (*msg_handler)(struct vhost_dev *dev,
+ int (*msg_handler)(struct vhost_dev *dev, u32 asid,
struct vhost_iotlb_msg *msg))
{
struct vhost_virtqueue *vq;
@@ -538,7 +530,7 @@ static int vhost_attach_cgroups(struct vhost_dev *dev)
attach.owner = current;
vhost_work_init(&attach.work, vhost_attach_cgroups_work);
vhost_work_queue(dev, &attach.work);
- vhost_work_dev_flush(dev);
+ vhost_dev_flush(dev);
return attach.ret;
}
@@ -661,11 +653,11 @@ void vhost_dev_stop(struct vhost_dev *dev)
int i;
for (i = 0; i < dev->nvqs; ++i) {
- if (dev->vqs[i]->kick && dev->vqs[i]->handle_kick) {
+ if (dev->vqs[i]->kick && dev->vqs[i]->handle_kick)
vhost_poll_stop(&dev->vqs[i]->poll);
- vhost_poll_flush(&dev->vqs[i]->poll);
- }
}
+
+ vhost_dev_flush(dev);
}
EXPORT_SYMBOL_GPL(vhost_dev_stop);
@@ -1090,11 +1082,14 @@ static bool umem_access_ok(u64 uaddr, u64 size, int access)
return true;
}
-static int vhost_process_iotlb_msg(struct vhost_dev *dev,
+static int vhost_process_iotlb_msg(struct vhost_dev *dev, u32 asid,
struct vhost_iotlb_msg *msg)
{
int ret = 0;
+ if (asid != 0)
+ return -EINVAL;
+
mutex_lock(&dev->mutex);
vhost_dev_lock_vqs(dev);
switch (msg->type) {
@@ -1141,6 +1136,7 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
struct vhost_iotlb_msg msg;
size_t offset;
int type, ret;
+ u32 asid = 0;
ret = copy_from_iter(&type, sizeof(type), from);
if (ret != sizeof(type)) {
@@ -1156,7 +1152,16 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
offset = offsetof(struct vhost_msg, iotlb) - sizeof(int);
break;
case VHOST_IOTLB_MSG_V2:
- offset = sizeof(__u32);
+ if (vhost_backend_has_feature(dev->vqs[0],
+ VHOST_BACKEND_F_IOTLB_ASID)) {
+ ret = copy_from_iter(&asid, sizeof(asid), from);
+ if (ret != sizeof(asid)) {
+ ret = -EINVAL;
+ goto done;
+ }
+ offset = 0;
+ } else
+ offset = sizeof(__u32);
break;
default:
ret = -EINVAL;
@@ -1178,9 +1183,9 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
}
if (dev->msg_handler)
- ret = dev->msg_handler(dev, &msg);
+ ret = dev->msg_handler(dev, asid, &msg);
else
- ret = vhost_process_iotlb_msg(dev, &msg);
+ ret = vhost_process_iotlb_msg(dev, asid, &msg);
if (ret) {
ret = -EFAULT;
goto done;
@@ -1719,7 +1724,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *arg
mutex_unlock(&vq->mutex);
if (pollstop && vq->handle_kick)
- vhost_poll_flush(&vq->poll);
+ vhost_dev_flush(vq->poll.dev);
return r;
}
EXPORT_SYMBOL_GPL(vhost_vring_ioctl);
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 638bb640d6b4..d9109107af08 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -44,9 +44,8 @@ void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
__poll_t mask, struct vhost_dev *dev);
int vhost_poll_start(struct vhost_poll *poll, struct file *file);
void vhost_poll_stop(struct vhost_poll *poll);
-void vhost_poll_flush(struct vhost_poll *poll);
void vhost_poll_queue(struct vhost_poll *poll);
-void vhost_work_dev_flush(struct vhost_dev *dev);
+void vhost_dev_flush(struct vhost_dev *dev);
struct vhost_log {
u64 addr;
@@ -161,7 +160,7 @@ struct vhost_dev {
int byte_weight;
u64 kcov_handle;
bool use_worker;
- int (*msg_handler)(struct vhost_dev *dev,
+ int (*msg_handler)(struct vhost_dev *dev, u32 asid,
struct vhost_iotlb_msg *msg);
};
@@ -169,7 +168,7 @@ bool vhost_exceeds_weight(struct vhost_virtqueue *vq, int pkts, int total_len);
void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs,
int nvqs, int iov_limit, int weight, int byte_weight,
bool use_worker,
- int (*msg_handler)(struct vhost_dev *dev,
+ int (*msg_handler)(struct vhost_dev *dev, u32 asid,
struct vhost_iotlb_msg *msg));
long vhost_dev_set_owner(struct vhost_dev *dev);
bool vhost_dev_has_owner(struct vhost_dev *dev);
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index e6c9d41db1de..368330417bde 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -705,12 +705,7 @@ out:
static void vhost_vsock_flush(struct vhost_vsock *vsock)
{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++)
- if (vsock->vqs[i].handle_kick)
- vhost_poll_flush(&vsock->vqs[i].poll);
- vhost_work_dev_flush(&vsock->dev);
+ vhost_dev_flush(&vsock->dev);
}
static void vhost_vsock_reset_orphans(struct sock *sk)
diff --git a/drivers/video/console/sticon.c b/drivers/video/console/sticon.c
index 40496e9e9b43..f304163e87e9 100644
--- a/drivers/video/console/sticon.c
+++ b/drivers/video/console/sticon.c
@@ -46,6 +46,7 @@
#include <linux/slab.h>
#include <linux/font.h>
#include <linux/crc32.h>
+#include <linux/fb.h>
#include <asm/io.h>
@@ -392,7 +393,9 @@ static int __init sticonsole_init(void)
for (i = 0; i < MAX_NR_CONSOLES; i++)
font_data[i] = STI_DEF_FONT;
- pr_info("sticon: Initializing STI text console.\n");
+ pr_info("sticon: Initializing STI text console on %s at [%s]\n",
+ sticon_sti->sti_data->inq_outptr.dev_name,
+ sticon_sti->pa_path);
console_lock();
err = do_take_over_console(&sti_con, 0, MAX_NR_CONSOLES - 1,
PAGE0->mem_cons.cl_class != CL_DUPLEX);
diff --git a/drivers/video/console/sticore.c b/drivers/video/console/sticore.c
index f869b723494f..fa23bf0247b0 100644
--- a/drivers/video/console/sticore.c
+++ b/drivers/video/console/sticore.c
@@ -30,10 +30,11 @@
#include <asm/pdc.h>
#include <asm/cacheflush.h>
#include <asm/grfioctl.h>
+#include <asm/fb.h>
#include "../fbdev/sticore.h"
-#define STI_DRIVERVERSION "Version 0.9b"
+#define STI_DRIVERVERSION "Version 0.9c"
static struct sti_struct *default_sti __read_mostly;
@@ -502,7 +503,7 @@ sti_select_fbfont(struct sti_cooked_rom *cooked_rom, const char *fbfont_name)
if (!fbfont)
return NULL;
- pr_info("STI selected %ux%u framebuffer font %s for sticon\n",
+ pr_info(" using %ux%u framebuffer font %s\n",
fbfont->width, fbfont->height, fbfont->name);
bpc = ((fbfont->width+7)/8) * fbfont->height;
@@ -549,6 +550,26 @@ sti_select_fbfont(struct sti_cooked_rom *cooked_rom, const char *fbfont_name)
}
#endif
+static void sti_dump_font(struct sti_cooked_font *font)
+{
+#ifdef STI_DUMP_FONT
+ unsigned char *p = (unsigned char *)font->raw;
+ int n;
+
+ p += sizeof(struct sti_rom_font);
+ pr_debug(" w %d h %d bpc %d\n", font->width, font->height,
+ font->raw->bytes_per_char);
+
+ for (n = 0; n < 256 * font->raw->bytes_per_char; n += 16, p += 16) {
+ pr_debug(" 0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x,"
+ " 0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x,"
+ " 0x%02x, 0x%02x, 0x%02x, 0x%02x,\n",
+ p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8],
+ p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
+ }
+#endif
+}
+
static int sti_search_font(struct sti_cooked_rom *rom, int height, int width)
{
struct sti_cooked_font *font;
@@ -796,6 +817,7 @@ static int sti_read_rom(int wordmode, struct sti_struct *sti,
sti->font->width = sti->font->raw->width;
sti->font->height = sti->font->raw->height;
sti_font_convert_bytemode(sti, sti->font);
+ sti_dump_font(sti->font);
sti->sti_mem_request = raw->sti_mem_req;
sti->graphics_id[0] = raw->graphics_id[0];
@@ -946,6 +968,7 @@ out_err:
static void sticore_check_for_default_sti(struct sti_struct *sti, char *path)
{
+ pr_info(" located at [%s]\n", sti->pa_path);
if (strcmp (path, default_sti_path) == 0)
default_sti = sti;
}
@@ -957,7 +980,6 @@ static void sticore_check_for_default_sti(struct sti_struct *sti, char *path)
*/
static int __init sticore_pa_init(struct parisc_device *dev)
{
- char pa_path[21];
struct sti_struct *sti = NULL;
int hpa = dev->hpa.start;
@@ -970,8 +992,8 @@ static int __init sticore_pa_init(struct parisc_device *dev)
if (!sti)
return 1;
- print_pa_hwpath(dev, pa_path);
- sticore_check_for_default_sti(sti, pa_path);
+ print_pa_hwpath(dev, sti->pa_path);
+ sticore_check_for_default_sti(sti, sti->pa_path);
return 0;
}
@@ -1007,9 +1029,8 @@ static int sticore_pci_init(struct pci_dev *pd, const struct pci_device_id *ent)
sti = sti_try_rom_generic(rom_base, fb_base, pd);
if (sti) {
- char pa_path[30];
- print_pci_hwpath(pd, pa_path);
- sticore_check_for_default_sti(sti, pa_path);
+ print_pci_hwpath(pd, sti->pa_path);
+ sticore_check_for_default_sti(sti, sti->pa_path);
}
if (!sti) {
@@ -1127,6 +1148,22 @@ int sti_call(const struct sti_struct *sti, unsigned long func,
return ret;
}
+/* check if given fb_info is the primary device */
+int fb_is_primary_device(struct fb_info *info)
+{
+ struct sti_struct *sti;
+
+ sti = sti_get_rom(0);
+
+ /* if no built-in graphics card found, allow any fb driver as default */
+ if (!sti)
+ return true;
+
+ /* return true if it's the default built-in framebuffer driver */
+ return (sti->info == info);
+}
+EXPORT_SYMBOL(fb_is_primary_device);
+
MODULE_AUTHOR("Philipp Rumpf, Helge Deller, Thomas Bogendoerfer");
MODULE_DESCRIPTION("Core STI driver for HP's NGLE series graphics cards in HP PARISC machines");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/fbdev/amba-clcd.c b/drivers/video/fbdev/amba-clcd.c
index 9ec969e136bf..8080116aea84 100644
--- a/drivers/video/fbdev/amba-clcd.c
+++ b/drivers/video/fbdev/amba-clcd.c
@@ -758,12 +758,15 @@ static int clcdfb_of_vram_setup(struct clcd_fb *fb)
return -ENODEV;
fb->fb.screen_base = of_iomap(memory, 0);
- if (!fb->fb.screen_base)
+ if (!fb->fb.screen_base) {
+ of_node_put(memory);
return -ENOMEM;
+ }
fb->fb.fix.smem_start = of_translate_address(memory,
of_get_address(memory, 0, &size, NULL));
fb->fb.fix.smem_len = size;
+ of_node_put(memory);
return 0;
}
diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
index 8359a513b600..886c564787f1 100644
--- a/drivers/video/fbdev/hyperv_fb.c
+++ b/drivers/video/fbdev/hyperv_fb.c
@@ -63,6 +63,7 @@
#define MAX_VMBUS_PKT_SIZE 0x4000
#define SYNTHVID_VERSION(major, minor) ((minor) << 16 | (major))
+/* Support for VERSION_WIN7 is removed. #define is retained for reference. */
#define SYNTHVID_VERSION_WIN7 SYNTHVID_VERSION(3, 0)
#define SYNTHVID_VERSION_WIN8 SYNTHVID_VERSION(3, 2)
#define SYNTHVID_VERSION_WIN10 SYNTHVID_VERSION(3, 5)
@@ -70,13 +71,7 @@
#define SYNTHVID_VER_GET_MAJOR(ver) (ver & 0x0000ffff)
#define SYNTHVID_VER_GET_MINOR(ver) ((ver & 0xffff0000) >> 16)
-#define SYNTHVID_DEPTH_WIN7 16
#define SYNTHVID_DEPTH_WIN8 32
-
-#define SYNTHVID_FB_SIZE_WIN7 (4 * 1024 * 1024)
-#define SYNTHVID_WIDTH_MAX_WIN7 1600
-#define SYNTHVID_HEIGHT_MAX_WIN7 1200
-
#define SYNTHVID_FB_SIZE_WIN8 (8 * 1024 * 1024)
#define PCI_VENDOR_ID_MICROSOFT 0x1414
@@ -643,12 +638,6 @@ static int synthvid_connect_vsp(struct hv_device *hdev)
case VERSION_WIN8:
case VERSION_WIN8_1:
ret = synthvid_negotiate_ver(hdev, SYNTHVID_VERSION_WIN8);
- if (!ret)
- break;
- fallthrough;
- case VERSION_WS2008:
- case VERSION_WIN7:
- ret = synthvid_negotiate_ver(hdev, SYNTHVID_VERSION_WIN7);
break;
default:
ret = synthvid_negotiate_ver(hdev, SYNTHVID_VERSION_WIN10);
@@ -660,11 +649,7 @@ static int synthvid_connect_vsp(struct hv_device *hdev)
goto error;
}
- if (par->synthvid_version == SYNTHVID_VERSION_WIN7)
- screen_depth = SYNTHVID_DEPTH_WIN7;
- else
- screen_depth = SYNTHVID_DEPTH_WIN8;
-
+ screen_depth = SYNTHVID_DEPTH_WIN8;
if (synthvid_ver_ge(par->synthvid_version, SYNTHVID_VERSION_WIN10)) {
ret = synthvid_get_supported_resolution(hdev);
if (ret)
@@ -933,9 +918,7 @@ static void hvfb_get_option(struct fb_info *info)
(synthvid_ver_ge(par->synthvid_version, SYNTHVID_VERSION_WIN10) &&
(x * y * screen_depth / 8 > screen_fb_size)) ||
(par->synthvid_version == SYNTHVID_VERSION_WIN8 &&
- x * y * screen_depth / 8 > SYNTHVID_FB_SIZE_WIN8) ||
- (par->synthvid_version == SYNTHVID_VERSION_WIN7 &&
- (x > SYNTHVID_WIDTH_MAX_WIN7 || y > SYNTHVID_HEIGHT_MAX_WIN7))) {
+ x * y * screen_depth / 8 > SYNTHVID_FB_SIZE_WIN8)) {
pr_err("Screen resolution option is out of range: skipped\n");
return;
}
@@ -1009,7 +992,6 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
struct pci_dev *pdev = NULL;
void __iomem *fb_virt;
int gen2vm = efi_enabled(EFI_BOOT);
- resource_size_t pot_start, pot_end;
phys_addr_t paddr;
int ret;
@@ -1060,23 +1042,7 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
dio_fb_size =
screen_width * screen_height * screen_depth / 8;
- if (gen2vm) {
- pot_start = 0;
- pot_end = -1;
- } else {
- if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
- pci_resource_len(pdev, 0) < screen_fb_size) {
- pr_err("Resource not available or (0x%lx < 0x%lx)\n",
- (unsigned long) pci_resource_len(pdev, 0),
- (unsigned long) screen_fb_size);
- goto err1;
- }
-
- pot_end = pci_resource_end(pdev, 0);
- pot_start = pot_end - screen_fb_size + 1;
- }
-
- ret = vmbus_allocate_mmio(&par->mem, hdev, pot_start, pot_end,
+ ret = vmbus_allocate_mmio(&par->mem, hdev, 0, -1,
screen_fb_size, 0x100000, true);
if (ret != 0) {
pr_err("Unable to allocate framebuffer memory\n");
diff --git a/drivers/video/fbdev/omap/omapfb.h b/drivers/video/fbdev/omap/omapfb.h
index 313a051fe7a4..beb841ccb99c 100644
--- a/drivers/video/fbdev/omap/omapfb.h
+++ b/drivers/video/fbdev/omap/omapfb.h
@@ -231,5 +231,9 @@ extern int omapfb_update_window_async(struct fb_info *fbi,
struct omapfb_update_window *win,
void (*callback)(void *),
void *callback_data);
+extern int hwa742_update_window_async(struct fb_info *fbi,
+ struct omapfb_update_window *win,
+ void (*callback)(void *),
+ void *callback_data);
#endif /* __OMAPFB_H */
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-nec-nl8048hl11.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-nec-nl8048hl11.c
index be9910ff6e62..b407173e27b1 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-nec-nl8048hl11.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-nec-nl8048hl11.c
@@ -117,16 +117,11 @@ static int nec_8048_connect(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- int r;
if (omapdss_device_is_connected(dssdev))
return 0;
- r = in->ops.dpi->connect(in, dssdev);
- if (r)
- return r;
-
- return 0;
+ return in->ops.dpi->connect(in, dssdev);
}
static void nec_8048_disconnect(struct omap_dss_device *dssdev)
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_pll.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_pll.c
index c5f89129dcdd..531b36d2232b 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_pll.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_pll.c
@@ -173,7 +173,6 @@ static int dsi_init_pll_data(struct platform_device *pdev, struct hdmi_pll_data
{
struct dss_pll *pll = &hpll->pll;
struct clk *clk;
- int r;
clk = devm_clk_get(&pdev->dev, "sys_clk");
if (IS_ERR(clk)) {
@@ -203,12 +202,7 @@ static int dsi_init_pll_data(struct platform_device *pdev, struct hdmi_pll_data
}
pll->ops = &dsi_pll_ops;
-
- r = dss_pll_register(pll);
- if (r)
- return r;
-
- return 0;
+ return dss_pll_register(pll);
}
int hdmi_pll_init(struct platform_device *pdev, struct hdmi_pll_data *pll,
diff --git a/drivers/video/fbdev/pxa3xx-gcu.c b/drivers/video/fbdev/pxa3xx-gcu.c
index 350b3139c863..043cc8f9ef1c 100644
--- a/drivers/video/fbdev/pxa3xx-gcu.c
+++ b/drivers/video/fbdev/pxa3xx-gcu.c
@@ -646,6 +646,7 @@ static int pxa3xx_gcu_probe(struct platform_device *pdev)
for (i = 0; i < 8; i++) {
ret = pxa3xx_gcu_add_buffer(dev, priv);
if (ret) {
+ pxa3xx_gcu_free_buffers(dev, priv);
dev_err(dev, "failed to allocate DMA memory\n");
goto err_disable_clk;
}
@@ -662,15 +663,15 @@ static int pxa3xx_gcu_probe(struct platform_device *pdev)
SHARED_SIZE, irq);
return 0;
-err_free_dma:
- dma_free_coherent(dev, SHARED_SIZE,
- priv->shared, priv->shared_phys);
+err_disable_clk:
+ clk_disable_unprepare(priv->clk);
err_misc_deregister:
misc_deregister(&priv->misc_dev);
-err_disable_clk:
- clk_disable_unprepare(priv->clk);
+err_free_dma:
+ dma_free_coherent(dev, SHARED_SIZE,
+ priv->shared, priv->shared_phys);
return ret;
}
@@ -683,6 +684,7 @@ static int pxa3xx_gcu_remove(struct platform_device *pdev)
pxa3xx_gcu_wait_idle(priv);
misc_deregister(&priv->misc_dev);
dma_free_coherent(dev, SHARED_SIZE, priv->shared, priv->shared_phys);
+ clk_disable_unprepare(priv->clk);
pxa3xx_gcu_free_buffers(dev, priv);
return 0;
diff --git a/arch/arm/mach-pxa/include/mach/regs-lcd.h b/drivers/video/fbdev/pxa3xx-regs.h
index e2b6e3d1f625..6a96610ef9b5 100644
--- a/arch/arm/mach-pxa/include/mach/regs-lcd.h
+++ b/drivers/video/fbdev/pxa3xx-regs.h
@@ -2,8 +2,6 @@
#ifndef __ASM_ARCH_REGS_LCD_H
#define __ASM_ARCH_REGS_LCD_H
-#include <mach/bitfield.h>
-
/*
* LCD Controller Registers and Bits Definitions
*/
@@ -86,6 +84,9 @@
#define LCCR0_OUC (1 << 25) /* Overlay Underlay control bit */
#define LCCR0_LDDALT (1 << 26) /* LDD alternate mapping control */
+#define Fld(Size, Shft) (((Size) << 16) + (Shft))
+#define FShft(Field) ((Field) & 0x0000FFFF)
+
#define LCCR1_PPL Fld (10, 0) /* Pixels Per Line - 1 */
#define LCCR1_DisWdth(Pixel) (((Pixel) - 1) << FShft (LCCR1_PPL))
@@ -176,23 +177,4 @@
#define PRSR_ST_OK (1 << 9) /* Status OK */
#define PRSR_CON_NT (1 << 10) /* Continue to Next Command */
-#define SMART_CMD_A0 (0x1 << 8)
-#define SMART_CMD_READ_STATUS_REG (0x0 << 9)
-#define SMART_CMD_READ_FRAME_BUFFER ((0x0 << 9) | SMART_CMD_A0)
-#define SMART_CMD_WRITE_COMMAND (0x1 << 9)
-#define SMART_CMD_WRITE_DATA ((0x1 << 9) | SMART_CMD_A0)
-#define SMART_CMD_WRITE_FRAME ((0x2 << 9) | SMART_CMD_A0)
-#define SMART_CMD_WAIT_FOR_VSYNC (0x3 << 9)
-#define SMART_CMD_NOOP (0x4 << 9)
-#define SMART_CMD_INTERRUPT (0x5 << 9)
-
-#define SMART_CMD(x) (SMART_CMD_WRITE_COMMAND | ((x) & 0xff))
-#define SMART_DAT(x) (SMART_CMD_WRITE_DATA | ((x) & 0xff))
-
-/* SMART_DELAY() is introduced for software controlled delay primitive which
- * can be inserted between command sequences, unused command 0x6 is used here
- * and delay ranges from 0ms ~ 255ms
- */
-#define SMART_CMD_DELAY (0x6 << 9)
-#define SMART_DELAY(ms) (SMART_CMD_DELAY | ((ms) & 0xff))
#endif /* __ASM_ARCH_REGS_LCD_H */
diff --git a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c
index 8ad91c251fe6..66cfc3e9d3cf 100644
--- a/drivers/video/fbdev/pxafb.c
+++ b/drivers/video/fbdev/pxafb.c
@@ -57,14 +57,13 @@
#include <linux/console.h>
#include <linux/of_graph.h>
#include <linux/regulator/consumer.h>
+#include <linux/soc/pxa/cpu.h>
#include <video/of_display_timing.h>
#include <video/videomode.h>
-#include <mach/hardware.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/div64.h>
-#include <mach/bitfield.h>
#include <linux/platform_data/video-pxafb.h>
/*
@@ -73,6 +72,7 @@
#define DEBUG_VAR 1
#include "pxafb.h"
+#include "pxa3xx-regs.h"
/* Bits which should not be set in machine configuration structures */
#define LCCR0_INVALID_CONFIG_MASK (LCCR0_OUM | LCCR0_BM | LCCR0_QDM |\
diff --git a/drivers/video/fbdev/sticore.h b/drivers/video/fbdev/sticore.h
index c338f7848ae2..0ebdd28a0b81 100644
--- a/drivers/video/fbdev/sticore.h
+++ b/drivers/video/fbdev/sticore.h
@@ -370,6 +370,9 @@ struct sti_struct {
/* pointer to all internal data */
struct sti_all_data *sti_data;
+
+ /* pa_path of this device */
+ char pa_path[24];
};
diff --git a/drivers/video/fbdev/stifb.c b/drivers/video/fbdev/stifb.c
index bebb2eea6448..38a861e22c33 100644
--- a/drivers/video/fbdev/stifb.c
+++ b/drivers/video/fbdev/stifb.c
@@ -1358,11 +1358,11 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref)
goto out_err3;
}
+ /* save for primary gfx device detection & unregister_framebuffer() */
+ sti->info = info;
if (register_framebuffer(&fb->info) < 0)
goto out_err4;
- sti->info = info; /* save for unregister_framebuffer() */
-
fb_info(&fb->info, "%s %dx%d-%d frame buffer device, %s, id: %04x, mmio: 0x%04lx\n",
fix->id,
var->xres,
diff --git a/drivers/video/fbdev/vesafb.c b/drivers/video/fbdev/vesafb.c
index e25e8de5ff67..929d4775cb4b 100644
--- a/drivers/video/fbdev/vesafb.c
+++ b/drivers/video/fbdev/vesafb.c
@@ -490,11 +490,12 @@ static int vesafb_remove(struct platform_device *pdev)
{
struct fb_info *info = platform_get_drvdata(pdev);
- /* vesafb_destroy takes care of info cleanup */
- unregister_framebuffer(info);
if (((struct vesafb_par *)(info->par))->region)
release_region(0x3c0, 32);
+ /* vesafb_destroy takes care of info cleanup */
+ unregister_framebuffer(info);
+
return 0;
}
diff --git a/drivers/video/fbdev/xen-fbfront.c b/drivers/video/fbdev/xen-fbfront.c
index 3bed357a9870..4d2694d904aa 100644
--- a/drivers/video/fbdev/xen-fbfront.c
+++ b/drivers/video/fbdev/xen-fbfront.c
@@ -223,7 +223,6 @@ static int xenfb_setcolreg(unsigned regno, unsigned red, unsigned green,
red = CNVT_TOHW(red, info->var.red.length);
green = CNVT_TOHW(green, info->var.green.length);
blue = CNVT_TOHW(blue, info->var.blue.length);
- transp = CNVT_TOHW(transp, info->var.transp.length);
#undef CNVT_TOHW
v = (red << info->var.red.offset) |
diff --git a/drivers/virt/fsl_hypervisor.c b/drivers/virt/fsl_hypervisor.c
index e49bec8bc8a4..07035249a5e1 100644
--- a/drivers/virt/fsl_hypervisor.c
+++ b/drivers/virt/fsl_hypervisor.c
@@ -659,7 +659,6 @@ static int fsl_hv_open(struct inode *inode, struct file *filp)
{
struct doorbell_queue *dbq;
unsigned long flags;
- int ret = 0;
dbq = kzalloc(sizeof(struct doorbell_queue), GFP_KERNEL);
if (!dbq) {
@@ -676,7 +675,7 @@ static int fsl_hv_open(struct inode *inode, struct file *filp)
filp->private_data = dbq;
- return ret;
+ return 0;
}
/*
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index 22f15f444f75..ef04a96942bf 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -169,7 +169,7 @@ EXPORT_SYMBOL_GPL(virtio_add_status);
/* Do some validation, then set FEATURES_OK */
static int virtio_features_ok(struct virtio_device *dev)
{
- unsigned status;
+ unsigned int status;
int ret;
might_sleep();
@@ -220,6 +220,15 @@ static int virtio_features_ok(struct virtio_device *dev)
* */
void virtio_reset_device(struct virtio_device *dev)
{
+ /*
+ * The below virtio_synchronize_cbs() guarantees that any
+ * interrupt for this line arriving after
+ * virtio_synchronize_vqs() has completed is guaranteed to see
+ * vq->broken as true.
+ */
+ virtio_break_device(dev);
+ virtio_synchronize_cbs(dev);
+
dev->config->reset(dev);
}
EXPORT_SYMBOL_GPL(virtio_reset_device);
@@ -413,7 +422,7 @@ int register_virtio_device(struct virtio_device *dev)
device_initialize(&dev->dev);
/* Assign a unique device index and hence name. */
- err = ida_simple_get(&virtio_index_ida, 0, 0, GFP_KERNEL);
+ err = ida_alloc(&virtio_index_ida, GFP_KERNEL);
if (err < 0)
goto out;
@@ -428,16 +437,16 @@ int register_virtio_device(struct virtio_device *dev)
dev->config_enabled = false;
dev->config_change_pending = false;
+ INIT_LIST_HEAD(&dev->vqs);
+ spin_lock_init(&dev->vqs_list_lock);
+
/* We always start by resetting the device, in case a previous
* driver messed it up. This also tests that code path a little. */
- dev->config->reset(dev);
+ virtio_reset_device(dev);
/* Acknowledge that we've seen the device. */
virtio_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE);
- INIT_LIST_HEAD(&dev->vqs);
- spin_lock_init(&dev->vqs_list_lock);
-
/*
* device_add() causes the bus infrastructure to look for a matching
* driver.
@@ -451,7 +460,7 @@ int register_virtio_device(struct virtio_device *dev)
out_of_node_put:
of_node_put(dev->dev.of_node);
out_ida_remove:
- ida_simple_remove(&virtio_index_ida, dev->index);
+ ida_free(&virtio_index_ida, dev->index);
out:
virtio_add_status(dev, VIRTIO_CONFIG_S_FAILED);
return err;
@@ -469,7 +478,7 @@ void unregister_virtio_device(struct virtio_device *dev)
int index = dev->index; /* save for after device release */
device_unregister(&dev->dev);
- ida_simple_remove(&virtio_index_ida, index);
+ ida_free(&virtio_index_ida, index);
}
EXPORT_SYMBOL_GPL(unregister_virtio_device);
@@ -496,7 +505,7 @@ int virtio_device_restore(struct virtio_device *dev)
/* We always start by resetting the device, in case a previous
* driver messed it up. */
- dev->config->reset(dev);
+ virtio_reset_device(dev);
/* Acknowledge that we've seen the device. */
virtio_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE);
@@ -526,8 +535,9 @@ int virtio_device_restore(struct virtio_device *dev)
goto err;
}
- /* Finally, tell the device we're all set */
- virtio_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
+ /* If restore didn't do it, mark device DRIVER_OK ourselves. */
+ if (!(dev->config->get_status(dev) & VIRTIO_CONFIG_S_DRIVER_OK))
+ virtio_device_ready(dev);
virtio_config_enable(dev);
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index f4c34a2a6b8e..b9737da6c4dd 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -27,7 +27,7 @@
* multiple balloon pages. All memory counters in this driver are in balloon
* page units.
*/
-#define VIRTIO_BALLOON_PAGES_PER_PAGE (unsigned)(PAGE_SIZE >> VIRTIO_BALLOON_PFN_SHIFT)
+#define VIRTIO_BALLOON_PAGES_PER_PAGE (unsigned int)(PAGE_SIZE >> VIRTIO_BALLOON_PFN_SHIFT)
#define VIRTIO_BALLOON_ARRAY_PFNS_MAX 256
/* Maximum number of (4k) pages to deflate on OOM notifications. */
#define VIRTIO_BALLOON_OOM_NR_PAGES 256
@@ -208,10 +208,10 @@ static void set_page_pfns(struct virtio_balloon *vb,
page_to_balloon_pfn(page) + i);
}
-static unsigned fill_balloon(struct virtio_balloon *vb, size_t num)
+static unsigned int fill_balloon(struct virtio_balloon *vb, size_t num)
{
- unsigned num_allocated_pages;
- unsigned num_pfns;
+ unsigned int num_allocated_pages;
+ unsigned int num_pfns;
struct page *page;
LIST_HEAD(pages);
@@ -272,9 +272,9 @@ static void release_pages_balloon(struct virtio_balloon *vb,
}
}
-static unsigned leak_balloon(struct virtio_balloon *vb, size_t num)
+static unsigned int leak_balloon(struct virtio_balloon *vb, size_t num)
{
- unsigned num_freed_pages;
+ unsigned int num_freed_pages;
struct page *page;
struct balloon_dev_info *vb_dev_info = &vb->vb_dev_info;
LIST_HEAD(pages);
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index 56128b9c46eb..f9a36bc7ac27 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -144,8 +144,8 @@ static int vm_finalize_features(struct virtio_device *vdev)
return 0;
}
-static void vm_get(struct virtio_device *vdev, unsigned offset,
- void *buf, unsigned len)
+static void vm_get(struct virtio_device *vdev, unsigned int offset,
+ void *buf, unsigned int len)
{
struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
void __iomem *base = vm_dev->base + VIRTIO_MMIO_CONFIG;
@@ -186,8 +186,8 @@ static void vm_get(struct virtio_device *vdev, unsigned offset,
}
}
-static void vm_set(struct virtio_device *vdev, unsigned offset,
- const void *buf, unsigned len)
+static void vm_set(struct virtio_device *vdev, unsigned int offset,
+ const void *buf, unsigned int len)
{
struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
void __iomem *base = vm_dev->base + VIRTIO_MMIO_CONFIG;
@@ -253,6 +253,11 @@ static void vm_set_status(struct virtio_device *vdev, u8 status)
/* We should never be setting status to 0. */
BUG_ON(status == 0);
+ /*
+ * Per memory-barriers.txt, wmb() is not needed to guarantee
+ * that the the cache coherent memory writes have completed
+ * before writing to the MMIO region.
+ */
writel(status, vm_dev->base + VIRTIO_MMIO_STATUS);
}
@@ -345,7 +350,14 @@ static void vm_del_vqs(struct virtio_device *vdev)
free_irq(platform_get_irq(vm_dev->pdev, 0), vm_dev);
}
-static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index,
+static void vm_synchronize_cbs(struct virtio_device *vdev)
+{
+ struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
+
+ synchronize_irq(platform_get_irq(vm_dev->pdev, 0));
+}
+
+static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned int index,
void (*callback)(struct virtqueue *vq),
const char *name, bool ctx)
{
@@ -455,7 +467,7 @@ error_available:
return ERR_PTR(err);
}
-static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs,
+static int vm_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
struct virtqueue *vqs[],
vq_callback_t *callbacks[],
const char * const names[],
@@ -541,6 +553,7 @@ static const struct virtio_config_ops virtio_mmio_config_ops = {
.finalize_features = vm_finalize_features,
.bus_name = vm_bus_name,
.get_shm_region = vm_get_shm_region,
+ .synchronize_cbs = vm_synchronize_cbs,
};
@@ -657,7 +670,7 @@ static int vm_cmdline_set(const char *device,
int err;
struct resource resources[2] = {};
char *str;
- long long int base, size;
+ long long base, size;
unsigned int irq;
int processed, consumed = 0;
struct platform_device *pdev;
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index d724f676608b..ca51fcc9daab 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -104,8 +104,8 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
const char *name = dev_name(&vp_dev->vdev.dev);
- unsigned flags = PCI_IRQ_MSIX;
- unsigned i, v;
+ unsigned int flags = PCI_IRQ_MSIX;
+ unsigned int i, v;
int err = -ENOMEM;
vp_dev->msix_vectors = nvectors;
@@ -171,7 +171,7 @@ error:
return err;
}
-static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned index,
+static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned int index,
void (*callback)(struct virtqueue *vq),
const char *name,
bool ctx,
@@ -254,8 +254,7 @@ void vp_del_vqs(struct virtio_device *vdev)
if (vp_dev->msix_affinity_masks) {
for (i = 0; i < vp_dev->msix_vectors; i++)
- if (vp_dev->msix_affinity_masks[i])
- free_cpumask_var(vp_dev->msix_affinity_masks[i]);
+ free_cpumask_var(vp_dev->msix_affinity_masks[i]);
}
if (vp_dev->msix_enabled) {
@@ -276,7 +275,7 @@ void vp_del_vqs(struct virtio_device *vdev)
vp_dev->vqs = NULL;
}
-static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
+static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned int nvqs,
struct virtqueue *vqs[], vq_callback_t *callbacks[],
const char * const names[], bool per_vq_vectors,
const bool *ctx,
@@ -350,7 +349,7 @@ error_find:
return err;
}
-static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned nvqs,
+static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned int nvqs,
struct virtqueue *vqs[], vq_callback_t *callbacks[],
const char * const names[], const bool *ctx)
{
@@ -389,7 +388,7 @@ out_del_vqs:
}
/* the config->find_vqs() implementation */
-int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs,
+int vp_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
struct virtqueue *vqs[], vq_callback_t *callbacks[],
const char * const names[], const bool *ctx,
struct irq_affinity *desc)
diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h
index eb17a29fc7ef..23112d84218f 100644
--- a/drivers/virtio/virtio_pci_common.h
+++ b/drivers/virtio/virtio_pci_common.h
@@ -38,7 +38,7 @@ struct virtio_pci_vq_info {
struct list_head node;
/* MSI-X vector (or none) */
- unsigned msix_vector;
+ unsigned int msix_vector;
};
/* Our device structure */
@@ -68,16 +68,16 @@ struct virtio_pci_device {
* and I'm too lazy to allocate each name separately. */
char (*msix_names)[256];
/* Number of available vectors */
- unsigned msix_vectors;
+ unsigned int msix_vectors;
/* Vectors allocated, excluding per-vq vectors if any */
- unsigned msix_used_vectors;
+ unsigned int msix_used_vectors;
/* Whether we have vector per vq */
bool per_vq_vectors;
struct virtqueue *(*setup_vq)(struct virtio_pci_device *vp_dev,
struct virtio_pci_vq_info *info,
- unsigned idx,
+ unsigned int idx,
void (*callback)(struct virtqueue *vq),
const char *name,
bool ctx,
@@ -108,7 +108,7 @@ bool vp_notify(struct virtqueue *vq);
/* the config->del_vqs() implementation */
void vp_del_vqs(struct virtio_device *vdev);
/* the config->find_vqs() implementation */
-int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs,
+int vp_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
struct virtqueue *vqs[], vq_callback_t *callbacks[],
const char * const names[], const bool *ctx,
struct irq_affinity *desc);
diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c
index 6f4e34ce96b8..a5e5721145c7 100644
--- a/drivers/virtio/virtio_pci_legacy.c
+++ b/drivers/virtio/virtio_pci_legacy.c
@@ -45,8 +45,8 @@ static int vp_finalize_features(struct virtio_device *vdev)
}
/* virtio config->get() implementation */
-static void vp_get(struct virtio_device *vdev, unsigned offset,
- void *buf, unsigned len)
+static void vp_get(struct virtio_device *vdev, unsigned int offset,
+ void *buf, unsigned int len)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
void __iomem *ioaddr = vp_dev->ldev.ioaddr +
@@ -61,8 +61,8 @@ static void vp_get(struct virtio_device *vdev, unsigned offset,
/* the config->set() implementation. it's symmetric to the config->get()
* implementation */
-static void vp_set(struct virtio_device *vdev, unsigned offset,
- const void *buf, unsigned len)
+static void vp_set(struct virtio_device *vdev, unsigned int offset,
+ const void *buf, unsigned int len)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
void __iomem *ioaddr = vp_dev->ldev.ioaddr +
@@ -109,7 +109,7 @@ static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
struct virtio_pci_vq_info *info,
- unsigned index,
+ unsigned int index,
void (*callback)(struct virtqueue *vq),
const char *name,
bool ctx,
@@ -192,6 +192,7 @@ static const struct virtio_config_ops virtio_pci_config_ops = {
.reset = vp_reset,
.find_vqs = vp_find_vqs,
.del_vqs = vp_del_vqs,
+ .synchronize_cbs = vp_synchronize_vectors,
.get_features = vp_get_features,
.finalize_features = vp_finalize_features,
.bus_name = vp_bus_name,
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index a2671a20ef77..623906b4996c 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -60,8 +60,8 @@ static int vp_finalize_features(struct virtio_device *vdev)
}
/* virtio config->get() implementation */
-static void vp_get(struct virtio_device *vdev, unsigned offset,
- void *buf, unsigned len)
+static void vp_get(struct virtio_device *vdev, unsigned int offset,
+ void *buf, unsigned int len)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
@@ -98,8 +98,8 @@ static void vp_get(struct virtio_device *vdev, unsigned offset,
/* the config->set() implementation. it's symmetric to the config->get()
* implementation */
-static void vp_set(struct virtio_device *vdev, unsigned offset,
- const void *buf, unsigned len)
+static void vp_set(struct virtio_device *vdev, unsigned int offset,
+ const void *buf, unsigned int len)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
@@ -183,7 +183,7 @@ static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
struct virtio_pci_vq_info *info,
- unsigned index,
+ unsigned int index,
void (*callback)(struct virtqueue *vq),
const char *name,
bool ctx,
@@ -248,7 +248,7 @@ err_map_notify:
return ERR_PTR(err);
}
-static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned nvqs,
+static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
struct virtqueue *vqs[],
vq_callback_t *callbacks[],
const char * const names[], const bool *ctx,
@@ -394,6 +394,7 @@ static const struct virtio_config_ops virtio_pci_config_nodev_ops = {
.reset = vp_reset,
.find_vqs = vp_modern_find_vqs,
.del_vqs = vp_del_vqs,
+ .synchronize_cbs = vp_synchronize_vectors,
.get_features = vp_get_features,
.finalize_features = vp_finalize_features,
.bus_name = vp_bus_name,
@@ -411,6 +412,7 @@ static const struct virtio_config_ops virtio_pci_config_ops = {
.reset = vp_reset,
.find_vqs = vp_modern_find_vqs,
.del_vqs = vp_del_vqs,
+ .synchronize_cbs = vp_synchronize_vectors,
.get_features = vp_get_features,
.finalize_features = vp_finalize_features,
.bus_name = vp_bus_name,
diff --git a/drivers/virtio/virtio_pci_modern_dev.c b/drivers/virtio/virtio_pci_modern_dev.c
index 591738ad3d56..a0fa14f28a7f 100644
--- a/drivers/virtio/virtio_pci_modern_dev.c
+++ b/drivers/virtio/virtio_pci_modern_dev.c
@@ -347,6 +347,7 @@ err_map_notify:
err_map_isr:
pci_iounmap(pci_dev, mdev->common);
err_map_common:
+ pci_release_selected_regions(pci_dev, mdev->modern_bars);
return err;
}
EXPORT_SYMBOL_GPL(vp_modern_probe);
@@ -466,6 +467,11 @@ void vp_modern_set_status(struct virtio_pci_modern_device *mdev,
{
struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
+ /*
+ * Per memory-barriers.txt, wmb() is not needed to guarantee
+ * that the the cache coherent memory writes have completed
+ * before writing to the MMIO region.
+ */
vp_iowrite8(status, &cfg->device_status);
}
EXPORT_SYMBOL_GPL(vp_modern_set_status);
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index cfb028ca238e..13a7348cedff 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -205,11 +205,9 @@ struct vring_virtqueue {
#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
-static inline bool virtqueue_use_indirect(struct virtqueue *_vq,
+static inline bool virtqueue_use_indirect(struct vring_virtqueue *vq,
unsigned int total_sg)
{
- struct vring_virtqueue *vq = to_vvq(_vq);
-
/*
* If the host supports indirect descriptor tables, and we have multiple
* buffers, then go indirect. FIXME: tune this threshold
@@ -499,7 +497,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
head = vq->free_head;
- if (virtqueue_use_indirect(_vq, total_sg))
+ if (virtqueue_use_indirect(vq, total_sg))
desc = alloc_indirect_split(_vq, total_sg, gfp);
else {
desc = NULL;
@@ -519,7 +517,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
descs_used = total_sg;
}
- if (vq->vq.num_free < descs_used) {
+ if (unlikely(vq->vq.num_free < descs_used)) {
pr_debug("Can't add buf len %i - avail = %i\n",
descs_used, vq->vq.num_free);
/* FIXME: for historical reasons, we force a notify here if
@@ -811,7 +809,7 @@ static void virtqueue_disable_cb_split(struct virtqueue *_vq)
}
}
-static unsigned virtqueue_enable_cb_prepare_split(struct virtqueue *_vq)
+static unsigned int virtqueue_enable_cb_prepare_split(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
u16 last_used_idx;
@@ -836,7 +834,7 @@ static unsigned virtqueue_enable_cb_prepare_split(struct virtqueue *_vq)
return last_used_idx;
}
-static bool virtqueue_poll_split(struct virtqueue *_vq, unsigned last_used_idx)
+static bool virtqueue_poll_split(struct virtqueue *_vq, unsigned int last_used_idx)
{
struct vring_virtqueue *vq = to_vvq(_vq);
@@ -1178,7 +1176,7 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
BUG_ON(total_sg == 0);
- if (virtqueue_use_indirect(_vq, total_sg)) {
+ if (virtqueue_use_indirect(vq, total_sg)) {
err = virtqueue_add_indirect_packed(vq, sgs, total_sg, out_sgs,
in_sgs, data, gfp);
if (err != -ENOMEM) {
@@ -1488,7 +1486,7 @@ static void virtqueue_disable_cb_packed(struct virtqueue *_vq)
}
}
-static unsigned virtqueue_enable_cb_prepare_packed(struct virtqueue *_vq)
+static unsigned int virtqueue_enable_cb_prepare_packed(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
@@ -1690,7 +1688,7 @@ static struct virtqueue *vring_create_virtqueue_packed(
vq->we_own_ring = true;
vq->notify = notify;
vq->weak_barriers = weak_barriers;
- vq->broken = false;
+ vq->broken = true;
vq->last_used_idx = 0;
vq->event_triggered = false;
vq->num_added = 0;
@@ -2027,7 +2025,7 @@ EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
* Caller must ensure we don't call this with other virtqueue
* operations at the same time (except where noted).
*/
-unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
+unsigned int virtqueue_enable_cb_prepare(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
@@ -2048,7 +2046,7 @@ EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
*
* This does not need to be serialized.
*/
-bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
+bool virtqueue_poll(struct virtqueue *_vq, unsigned int last_used_idx)
{
struct vring_virtqueue *vq = to_vvq(_vq);
@@ -2074,7 +2072,7 @@ EXPORT_SYMBOL_GPL(virtqueue_poll);
*/
bool virtqueue_enable_cb(struct virtqueue *_vq)
{
- unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq);
+ unsigned int last_used_idx = virtqueue_enable_cb_prepare(_vq);
return !virtqueue_poll(_vq, last_used_idx);
}
@@ -2136,8 +2134,11 @@ irqreturn_t vring_interrupt(int irq, void *_vq)
return IRQ_NONE;
}
- if (unlikely(vq->broken))
- return IRQ_HANDLED;
+ if (unlikely(vq->broken)) {
+ dev_warn_once(&vq->vq.vdev->dev,
+ "virtio vring IRQ raised before DRIVER_OK");
+ return IRQ_NONE;
+ }
/* Just a hint for performance: so it's ok that this can be racy! */
if (vq->event)
@@ -2179,7 +2180,7 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
vq->we_own_ring = false;
vq->notify = notify;
vq->weak_barriers = weak_barriers;
- vq->broken = false;
+ vq->broken = true;
vq->last_used_idx = 0;
vq->event_triggered = false;
vq->num_added = 0;
@@ -2397,6 +2398,28 @@ void virtio_break_device(struct virtio_device *dev)
}
EXPORT_SYMBOL_GPL(virtio_break_device);
+/*
+ * This should allow the device to be used by the driver. You may
+ * need to grab appropriate locks to flush the write to
+ * vq->broken. This should only be used in some specific case e.g
+ * (probing and restoring). This function should only be called by the
+ * core, not directly by the driver.
+ */
+void __virtio_unbreak_device(struct virtio_device *dev)
+{
+ struct virtqueue *_vq;
+
+ spin_lock(&dev->vqs_list_lock);
+ list_for_each_entry(_vq, &dev->vqs, list) {
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
+ /* Pairs with READ_ONCE() in virtqueue_is_broken(). */
+ WRITE_ONCE(vq->broken, false);
+ }
+ spin_unlock(&dev->vqs_list_lock);
+}
+EXPORT_SYMBOL_GPL(__virtio_unbreak_device);
+
dma_addr_t virtqueue_get_desc_addr(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
diff --git a/drivers/virtio/virtio_vdpa.c b/drivers/virtio/virtio_vdpa.c
index 76504559bc25..c40f7deb6b5a 100644
--- a/drivers/virtio/virtio_vdpa.c
+++ b/drivers/virtio/virtio_vdpa.c
@@ -53,16 +53,16 @@ static struct vdpa_device *vd_get_vdpa(struct virtio_device *vdev)
return to_virtio_vdpa_device(vdev)->vdpa;
}
-static void virtio_vdpa_get(struct virtio_device *vdev, unsigned offset,
- void *buf, unsigned len)
+static void virtio_vdpa_get(struct virtio_device *vdev, unsigned int offset,
+ void *buf, unsigned int len)
{
struct vdpa_device *vdpa = vd_get_vdpa(vdev);
vdpa_get_config(vdpa, offset, buf, len);
}
-static void virtio_vdpa_set(struct virtio_device *vdev, unsigned offset,
- const void *buf, unsigned len)
+static void virtio_vdpa_set(struct virtio_device *vdev, unsigned int offset,
+ const void *buf, unsigned int len)
{
struct vdpa_device *vdpa = vd_get_vdpa(vdev);
@@ -184,7 +184,7 @@ virtio_vdpa_setup_vq(struct virtio_device *vdev, unsigned int index,
}
/* Setup virtqueue callback */
- cb.callback = virtio_vdpa_virtqueue_cb;
+ cb.callback = callback ? virtio_vdpa_virtqueue_cb : NULL;
cb.private = info;
ops->set_vq_cb(vdpa, index, &cb);
ops->set_vq_num(vdpa, index, virtqueue_get_vring_size(vq));
@@ -263,7 +263,7 @@ static void virtio_vdpa_del_vqs(struct virtio_device *vdev)
virtio_vdpa_del_vq(vq);
}
-static int virtio_vdpa_find_vqs(struct virtio_device *vdev, unsigned nvqs,
+static int virtio_vdpa_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
struct virtqueue *vqs[],
vq_callback_t *callbacks[],
const char * const names[],
diff --git a/drivers/visorbus/Kconfig b/drivers/visorbus/Kconfig
deleted file mode 100644
index fa947a79b5cd..000000000000
--- a/drivers/visorbus/Kconfig
+++ /dev/null
@@ -1,15 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# Unisys visorbus configuration
-#
-
-config UNISYS_VISORBUS
- tristate "Unisys visorbus driver"
- depends on X86_64 && ACPI
- help
- The visorbus driver is a virtualized bus for the Unisys s-Par firmware.
- Virtualized devices allow Linux guests on a system to share disks and
- network cards that do not have SR-IOV support, and to be accessed using
- the partition desktop application. The visorbus driver is required to
- discover devices on an s-Par guest, and must be present for any other
- s-Par guest driver to function correctly.
diff --git a/drivers/visorbus/Makefile b/drivers/visorbus/Makefile
deleted file mode 100644
index e8df59d1301f..000000000000
--- a/drivers/visorbus/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Makefile for Unisys visorbus
-#
-
-obj-$(CONFIG_UNISYS_VISORBUS) += visorbus.o
-
-visorbus-y := visorbus_main.o
-visorbus-y += visorchannel.o
-visorbus-y += visorchipset.o
diff --git a/drivers/visorbus/controlvmchannel.h b/drivers/visorbus/controlvmchannel.h
deleted file mode 100644
index c87213554427..000000000000
--- a/drivers/visorbus/controlvmchannel.h
+++ /dev/null
@@ -1,650 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 2010 - 2015 UNISYS CORPORATION
- * All rights reserved.
- */
-
-#ifndef __CONTROLVMCHANNEL_H__
-#define __CONTROLVMCHANNEL_H__
-
-#include <linux/uuid.h>
-#include <linux/visorbus.h>
-
-/* {2B3C2D10-7EF5-4ad8-B966-3448B7386B3D} */
-#define VISOR_CONTROLVM_CHANNEL_GUID \
- GUID_INIT(0x2b3c2d10, 0x7ef5, 0x4ad8, \
- 0xb9, 0x66, 0x34, 0x48, 0xb7, 0x38, 0x6b, 0x3d)
-
-#define CONTROLVM_MESSAGE_MAX 64
-
-/*
- * Must increment this whenever you insert or delete fields within this channel
- * struct. Also increment whenever you change the meaning of fields within this
- * channel struct so as to break pre-existing software. Note that you can
- * usually add fields to the END of the channel struct withOUT needing to
- * increment this.
- */
-#define VISOR_CONTROLVM_CHANNEL_VERSIONID 1
-
-/* Defines for various channel queues */
-#define CONTROLVM_QUEUE_REQUEST 0
-#define CONTROLVM_QUEUE_RESPONSE 1
-#define CONTROLVM_QUEUE_EVENT 2
-#define CONTROLVM_QUEUE_ACK 3
-
-/* Max num of messages stored during IOVM creation to be reused after crash */
-#define CONTROLVM_CRASHMSG_MAX 2
-
-/*
- * struct visor_segment_state
- * @enabled: May enter other states.
- * @active: Assigned to active partition.
- * @alive: Configure message sent to service/server.
- * @revoked: Similar to partition state ShuttingDown.
- * @allocated: Memory (device/port number) has been selected by Command.
- * @known: Has been introduced to the service/guest partition.
- * @ready: Service/Guest partition has responded to introduction.
- * @operating: Resource is configured and operating.
- * @reserved: Natural alignment.
- *
- * Note: Don't use high bit unless we need to switch to ushort which is
- * non-compliant.
- */
-struct visor_segment_state {
- u16 enabled:1;
- u16 active:1;
- u16 alive:1;
- u16 revoked:1;
- u16 allocated:1;
- u16 known:1;
- u16 ready:1;
- u16 operating:1;
- u16 reserved:8;
-} __packed;
-
-static const struct visor_segment_state segment_state_running = {
- 1, 1, 1, 0, 1, 1, 1, 1
-};
-
-static const struct visor_segment_state segment_state_paused = {
- 1, 1, 1, 0, 1, 1, 1, 0
-};
-
-static const struct visor_segment_state segment_state_standby = {
- 1, 1, 0, 0, 1, 1, 1, 0
-};
-
-/*
- * enum controlvm_id
- * @CONTROLVM_INVALID:
- * @CONTROLVM_BUS_CREATE: CP --> SP, GP.
- * @CONTROLVM_BUS_DESTROY: CP --> SP, GP.
- * @CONTROLVM_BUS_CONFIGURE: CP --> SP.
- * @CONTROLVM_BUS_CHANGESTATE: CP --> SP, GP.
- * @CONTROLVM_BUS_CHANGESTATE_EVENT: SP, GP --> CP.
- * @CONTROLVM_DEVICE_CREATE: CP --> SP, GP.
- * @CONTROLVM_DEVICE_DESTROY: CP --> SP, GP.
- * @CONTROLVM_DEVICE_CONFIGURE: CP --> SP.
- * @CONTROLVM_DEVICE_CHANGESTATE: CP --> SP, GP.
- * @CONTROLVM_DEVICE_CHANGESTATE_EVENT: SP, GP --> CP.
- * @CONTROLVM_DEVICE_RECONFIGURE: CP --> Boot.
- * @CONTROLVM_CHIPSET_INIT: CP --> SP, GP.
- * @CONTROLVM_CHIPSET_STOP: CP --> SP, GP.
- * @CONTROLVM_CHIPSET_READY: CP --> SP.
- * @CONTROLVM_CHIPSET_SELFTEST: CP --> SP.
- *
- * Ids for commands that may appear in either queue of a ControlVm channel.
- *
- * Commands that are initiated by the command partition (CP), by an IO or
- * console service partition (SP), or by a guest partition (GP) are:
- * - issued on the RequestQueue queue (q #0) in the ControlVm channel
- * - responded to on the ResponseQueue queue (q #1) in the ControlVm channel
- *
- * Events that are initiated by an IO or console service partition (SP) or
- * by a guest partition (GP) are:
- * - issued on the EventQueue queue (q #2) in the ControlVm channel
- * - responded to on the EventAckQueue queue (q #3) in the ControlVm channel
- */
-enum controlvm_id {
- CONTROLVM_INVALID = 0,
- /*
- * SWITCH commands required Parameter: SwitchNumber.
- * BUS commands required Parameter: BusNumber
- */
- CONTROLVM_BUS_CREATE = 0x101,
- CONTROLVM_BUS_DESTROY = 0x102,
- CONTROLVM_BUS_CONFIGURE = 0x104,
- CONTROLVM_BUS_CHANGESTATE = 0x105,
- CONTROLVM_BUS_CHANGESTATE_EVENT = 0x106,
- /* DEVICE commands required Parameter: BusNumber, DeviceNumber */
- CONTROLVM_DEVICE_CREATE = 0x201,
- CONTROLVM_DEVICE_DESTROY = 0x202,
- CONTROLVM_DEVICE_CONFIGURE = 0x203,
- CONTROLVM_DEVICE_CHANGESTATE = 0x204,
- CONTROLVM_DEVICE_CHANGESTATE_EVENT = 0x205,
- CONTROLVM_DEVICE_RECONFIGURE = 0x206,
- /* CHIPSET commands */
- CONTROLVM_CHIPSET_INIT = 0x301,
- CONTROLVM_CHIPSET_STOP = 0x302,
- CONTROLVM_CHIPSET_READY = 0x304,
- CONTROLVM_CHIPSET_SELFTEST = 0x305,
-};
-
-/*
- * struct irq_info
- * @reserved1: Natural alignment purposes
- * @recv_irq_handle: Specifies interrupt handle. It is used to retrieve the
- * corresponding interrupt pin from Monitor; and the interrupt
- * pin is used to connect to the corresponding interrupt.
- * Used by IOPart-GP only.
- * @recv_irq_vector: Specifies interrupt vector. It, interrupt pin, and shared
- * are used to connect to the corresponding interrupt.
- * Used by IOPart-GP only.
- * @recv_irq_shared: Specifies if the recvInterrupt is shared. It, interrupt
- * pin and vector are used to connect to 0 = not shared;
- * 1 = shared the corresponding interrupt.
- * Used by IOPart-GP only.
- * @reserved: Natural alignment purposes
- */
-struct irq_info {
- u64 reserved1;
- u64 recv_irq_handle;
- u32 recv_irq_vector;
- u8 recv_irq_shared;
- u8 reserved[3];
-} __packed;
-
-/*
- * struct efi_visor_indication
- * @boot_to_fw_ui: Stop in UEFI UI
- * @clear_nvram: Clear NVRAM
- * @clear_cmos: Clear CMOS
- * @boot_to_tool: Run install tool
- * @reserved: Natural alignment
- */
-struct efi_visor_indication {
- u64 boot_to_fw_ui:1;
- u64 clear_nvram:1;
- u64 clear_cmos:1;
- u64 boot_to_tool:1;
- /* Remaining bits are available */
- u64 reserved:60;
-} __packed;
-
-enum visor_chipset_feature {
- VISOR_CHIPSET_FEATURE_REPLY = 0x00000001,
- VISOR_CHIPSET_FEATURE_PARA_HOTPLUG = 0x00000002,
-};
-
-/*
- * struct controlvm_message_header
- * @id: See CONTROLVM_ID.
- * @message_size: Includes size of this struct + size of message.
- * @segment_index: Index of segment containing Vm message/information.
- * @completion_status: Error status code or result of message completion.
- * @struct flags:
- * @failed: =1 in a response to signify failure.
- * @response_expected: =1 in all messages that expect a response.
- * @server: =1 in all bus & device-related messages where the
- * message receiver is to act as the bus or device
- * server.
- * @test_message: =1 for testing use only (Control and Command
- * ignore this).
- * @partial_completion: =1 if there are forthcoming responses/acks
- * associated with this message.
- * @preserve: =1 this is to let us know to preserve channel
- * contents.
- * @writer_in_diag: =1 the DiagWriter is active in the Diagnostic
- * Partition.
- * @reserve: Natural alignment.
- * @reserved: Natural alignment.
- * @message_handle: Identifies the particular message instance.
- * @payload_vm_offset: Offset of payload area from start of this instance.
- * @payload_max_bytes: Maximum bytes allocated in payload area of ControlVm
- * segment.
- * @payload_bytes: Actual number of bytes of payload area to copy between
- * IO/Command. If non-zero, there is a payload to copy.
- *
- * This is the common structure that is at the beginning of every
- * ControlVm message (both commands and responses) in any ControlVm
- * queue. Commands are easily distinguished from responses by
- * looking at the flags.response field.
- */
-struct controlvm_message_header {
- u32 id;
- /*
- * For requests, indicates the message type. For responses, indicates
- * the type of message we are responding to.
- */
- u32 message_size;
- u32 segment_index;
- u32 completion_status;
- struct {
- u32 failed:1;
- u32 response_expected:1;
- u32 server:1;
- u32 test_message:1;
- u32 partial_completion:1;
- u32 preserve:1;
- u32 writer_in_diag:1;
- u32 reserve:25;
- } __packed flags;
- u32 reserved;
- u64 message_handle;
- u64 payload_vm_offset;
- u32 payload_max_bytes;
- u32 payload_bytes;
-} __packed;
-
-/*
- * struct controlvm_packet_device_create - For CONTROLVM_DEVICE_CREATE
- * @bus_no: Bus # (0..n-1) from the msg receiver's end.
- * @dev_no: Bus-relative (0..n-1) device number.
- * @channel_addr: Guest physical address of the channel, which can be
- * dereferenced by the receiver of this ControlVm command.
- * @channel_bytes: Specifies size of the channel in bytes.
- * @data_type_uuid: Specifies format of data in channel.
- * @dev_inst_uuid: Instance guid for the device.
- * @irq_info intr: Specifies interrupt information.
- */
-struct controlvm_packet_device_create {
- u32 bus_no;
- u32 dev_no;
- u64 channel_addr;
- u64 channel_bytes;
- guid_t data_type_guid;
- guid_t dev_inst_guid;
- struct irq_info intr;
-} __packed;
-
-/*
- * struct controlvm_packet_device_configure - For CONTROLVM_DEVICE_CONFIGURE
- * @bus_no: Bus number (0..n-1) from the msg receiver's perspective.
- * @dev_no: Bus-relative (0..n-1) device number.
- */
-struct controlvm_packet_device_configure {
- u32 bus_no;
- u32 dev_no;
-} __packed;
-
-/* Total 128 bytes */
-struct controlvm_message_device_create {
- struct controlvm_message_header header;
- struct controlvm_packet_device_create packet;
-} __packed;
-
-/* Total 56 bytes */
-struct controlvm_message_device_configure {
- struct controlvm_message_header header;
- struct controlvm_packet_device_configure packet;
-} __packed;
-
-/*
- * struct controlvm_message_packet - This is the format for a message in any
- * ControlVm queue.
- * @struct create_bus: For CONTROLVM_BUS_CREATE.
- * @bus_no: Bus # (0..n-1) from the msg receiver's perspective.
- * @dev_count: Indicates the max number of devices on this bus.
- * @channel_addr: Guest physical address of the channel, which can be
- * dereferenced by the receiver of this ControlVM
- * command.
- * @channel_bytes: Size of the channel.
- * @bus_data_type_uuid: Indicates format of data in bus channel.
- * @bus_inst_uuid: Instance uuid for the bus.
- *
- * @struct destroy_bus: For CONTROLVM_BUS_DESTROY.
- * @bus_no: Bus # (0..n-1) from the msg receiver's perspective.
- * @reserved: Natural alignment purposes.
- *
- * @struct configure_bus: For CONTROLVM_BUS_CONFIGURE.
- * @bus_no: Bus # (0..n-1) from the receiver's perspective.
- * @reserved1: For alignment purposes.
- * @guest_handle: This is used to convert guest physical address to
- * physical address.
- * @recv_bus_irq_handle: Specifies interrupt info. It is used by SP to
- * register to receive interrupts from the CP. This
- * interrupt is used for bus level notifications.
- * The corresponding sendBusInterruptHandle is kept
- * in CP.
- *
- * @struct create_device: For CONTROLVM_DEVICE_CREATE.
- *
- * @struct destroy_device: For CONTROLVM_DEVICE_DESTROY.
- * @bus_no: Bus # (0..n-1) from the msg receiver's perspective.
- * @dev_no: Bus-relative (0..n-1) device number.
- *
- * @struct configure_device: For CONTROLVM_DEVICE_CONFIGURE.
- *
- * @struct reconfigure_device: For CONTROLVM_DEVICE_RECONFIGURE.
- * @bus_no: Bus # (0..n-1) from the msg receiver's perspective.
- * @dev_no: Bus-relative (0..n-1) device number.
- *
- * @struct bus_change_state: For CONTROLVM_BUS_CHANGESTATE.
- * @bus_no:
- * @struct state:
- * @reserved: Natural alignment purposes.
- *
- * @struct device_change_state: For CONTROLVM_DEVICE_CHANGESTATE.
- * @bus_no:
- * @dev_no:
- * @struct state:
- * @struct flags:
- * @phys_device: =1 if message is for a physical device.
- * @reserved: Natural alignment.
- * @reserved1: Natural alignment.
- * @reserved: Natural alignment purposes.
- *
- * @struct device_change_state_event: For CONTROLVM_DEVICE_CHANGESTATE_EVENT.
- * @bus_no:
- * @dev_no:
- * @struct state:
- * @reserved: Natural alignment purposes.
- *
- * @struct init_chipset: For CONTROLVM_CHIPSET_INIT.
- * @bus_count: Indicates the max number of busses.
- * @switch_count: Indicates the max number of switches.
- * @enum features:
- * @platform_number:
- *
- * @struct chipset_selftest: For CONTROLVM_CHIPSET_SELFTEST.
- * @options: Reserved.
- * @test: Bit 0 set to run embedded selftest.
- *
- * @addr: A physical address of something, that can be dereferenced by the
- * receiver of this ControlVm command.
- *
- * @handle: A handle of something (depends on command id).
- */
-struct controlvm_message_packet {
- union {
- struct {
- u32 bus_no;
- u32 dev_count;
- u64 channel_addr;
- u64 channel_bytes;
- guid_t bus_data_type_guid;
- guid_t bus_inst_guid;
- } __packed create_bus;
- struct {
- u32 bus_no;
- u32 reserved;
- } __packed destroy_bus;
- struct {
- u32 bus_no;
- u32 reserved1;
- u64 guest_handle;
- u64 recv_bus_irq_handle;
- } __packed configure_bus;
- struct controlvm_packet_device_create create_device;
- struct {
- u32 bus_no;
- u32 dev_no;
- } __packed destroy_device;
- struct controlvm_packet_device_configure configure_device;
- struct {
- u32 bus_no;
- u32 dev_no;
- } __packed reconfigure_device;
- struct {
- u32 bus_no;
- struct visor_segment_state state;
- u8 reserved[2];
- } __packed bus_change_state;
- struct {
- u32 bus_no;
- u32 dev_no;
- struct visor_segment_state state;
- struct {
- u32 phys_device:1;
- u32 reserved:31;
- u32 reserved1;
- } __packed flags;
- u8 reserved[2];
- } __packed device_change_state;
- struct {
- u32 bus_no;
- u32 dev_no;
- struct visor_segment_state state;
- u8 reserved[6];
- } __packed device_change_state_event;
- struct {
- u32 bus_count;
- u32 switch_count;
- enum visor_chipset_feature features;
- u32 platform_number;
- } __packed init_chipset;
- struct {
- u32 options;
- u32 test;
- } __packed chipset_selftest;
- u64 addr;
- u64 handle;
- };
-} __packed;
-
-/* All messages in any ControlVm queue have this layout. */
-struct controlvm_message {
- struct controlvm_message_header hdr;
- struct controlvm_message_packet cmd;
-} __packed;
-
-/*
- * struct visor_controlvm_channel
- * @struct header:
- * @gp_controlvm: Guest phys addr of this channel.
- * @gp_partition_tables: Guest phys addr of partition tables.
- * @gp_diag_guest: Guest phys addr of diagnostic channel.
- * @gp_boot_romdisk: Guest phys addr of (read* only) Boot
- * ROM disk.
- * @gp_boot_ramdisk: Guest phys addr of writable Boot RAM
- * disk.
- * @gp_acpi_table: Guest phys addr of acpi table.
- * @gp_control_channel: Guest phys addr of control channel.
- * @gp_diag_romdisk: Guest phys addr of diagnostic ROM disk.
- * @gp_nvram: Guest phys addr of NVRAM channel.
- * @request_payload_offset: Offset to request payload area.
- * @event_payload_offset: Offset to event payload area.
- * @request_payload_bytes: Bytes available in request payload area.
- * @event_payload_bytes: Bytes available in event payload area.
- * @control_channel_bytes:
- * @nvram_channel_bytes: Bytes in PartitionNvram segment.
- * @message_bytes: sizeof(CONTROLVM_MESSAGE).
- * @message_count: CONTROLVM_MESSAGE_MAX.
- * @gp_smbios_table: Guest phys addr of SMBIOS tables.
- * @gp_physical_smbios_table: Guest phys addr of SMBIOS table.
- * @gp_reserved: VISOR_MAX_GUESTS_PER_SERVICE.
- * @virtual_guest_firmware_image_base: Guest physical address of EFI firmware
- * image base.
- * @virtual_guest_firmware_entry_point: Guest physical address of EFI firmware
- * entry point.
- * @virtual_guest_firmware_image_size: Guest EFI firmware image size.
- * @virtual_guest_firmware_boot_base: GPA = 1MB where EFI firmware image is
- * copied to.
- * @virtual_guest_image_base:
- * @virtual_guest_image_size:
- * @prototype_control_channel_offset:
- * @virtual_guest_partition_handle:
- * @restore_action: Restore Action field to restore the
- * guest partition.
- * @dump_action: For Windows guests it shows if the
- * visordisk is in dump mode.
- * @nvram_fail_count:
- * @saved_crash_message_count: = CONTROLVM_CRASHMSG_MAX.
- * @saved_crash_message_offset: Offset to request payload area needed
- * for crash dump.
- * @installation_error: Type of error encountered during
- * installation.
- * @installation_text_id: Id of string to display.
- * @installation_remaining_steps: Number of remaining installation steps
- * (for progress bars).
- * @tool_action: VISOR_TOOL_ACTIONS Installation Action
- * field.
- * @reserved: Alignment.
- * @struct efi_visor_ind:
- * @sp_reserved:
- * @reserved2: Force signals to begin on 128-byte
- * cache line.
- * @struct request_queue: Guest partition uses this queue to send
- * requests to Control.
- * @struct response_queue: Control uses this queue to respond to
- * service or guest partition request.
- * @struct event_queue: Control uses this queue to send events
- * to guest partition.
- * @struct event_ack_queue: Service or guest partition uses this
- * queue to ack Control events.
- * @struct request_msg: Request fixed-size message pool -
- * does not include payload.
- * @struct response_msg: Response fixed-size message pool -
- * does not include payload.
- * @struct event_msg: Event fixed-size message pool -
- * does not include payload.
- * @struct event_ack_msg: Ack fixed-size message pool -
- * does not include payload.
- * @struct saved_crash_msg: Message stored during IOVM creation to
- * be reused after crash.
- */
-struct visor_controlvm_channel {
- struct channel_header header;
- u64 gp_controlvm;
- u64 gp_partition_tables;
- u64 gp_diag_guest;
- u64 gp_boot_romdisk;
- u64 gp_boot_ramdisk;
- u64 gp_acpi_table;
- u64 gp_control_channel;
- u64 gp_diag_romdisk;
- u64 gp_nvram;
- u64 request_payload_offset;
- u64 event_payload_offset;
- u32 request_payload_bytes;
- u32 event_payload_bytes;
- u32 control_channel_bytes;
- u32 nvram_channel_bytes;
- u32 message_bytes;
- u32 message_count;
- u64 gp_smbios_table;
- u64 gp_physical_smbios_table;
- char gp_reserved[2688];
- u64 virtual_guest_firmware_image_base;
- u64 virtual_guest_firmware_entry_point;
- u64 virtual_guest_firmware_image_size;
- u64 virtual_guest_firmware_boot_base;
- u64 virtual_guest_image_base;
- u64 virtual_guest_image_size;
- u64 prototype_control_channel_offset;
- u64 virtual_guest_partition_handle;
- u16 restore_action;
- u16 dump_action;
- u16 nvram_fail_count;
- u16 saved_crash_message_count;
- u32 saved_crash_message_offset;
- u32 installation_error;
- u32 installation_text_id;
- u16 installation_remaining_steps;
- u8 tool_action;
- u8 reserved;
- struct efi_visor_indication efi_visor_ind;
- u32 sp_reserved;
- u8 reserved2[28];
- struct signal_queue_header request_queue;
- struct signal_queue_header response_queue;
- struct signal_queue_header event_queue;
- struct signal_queue_header event_ack_queue;
- struct controlvm_message request_msg[CONTROLVM_MESSAGE_MAX];
- struct controlvm_message response_msg[CONTROLVM_MESSAGE_MAX];
- struct controlvm_message event_msg[CONTROLVM_MESSAGE_MAX];
- struct controlvm_message event_ack_msg[CONTROLVM_MESSAGE_MAX];
- struct controlvm_message saved_crash_msg[CONTROLVM_CRASHMSG_MAX];
-} __packed;
-
-/*
- * struct visor_controlvm_parameters_header
- *
- * The following header will be located at the beginning of PayloadVmOffset for
- * various ControlVm commands. The receiver of a ControlVm command with a
- * PayloadVmOffset will dereference this address and then use connection_offset,
- * initiator_offset, and target_offset to get the location of UTF-8 formatted
- * strings that can be parsed to obtain command-specific information. The value
- * of total_length should equal PayloadBytes. The format of the strings at
- * PayloadVmOffset will take different forms depending on the message.
- */
-struct visor_controlvm_parameters_header {
- u32 total_length;
- u32 header_length;
- u32 connection_offset;
- u32 connection_length;
- u32 initiator_offset;
- u32 initiator_length;
- u32 target_offset;
- u32 target_length;
- u32 client_offset;
- u32 client_length;
- u32 name_offset;
- u32 name_length;
- guid_t id;
- u32 revision;
- /* Natural alignment */
- u32 reserved;
-} __packed;
-
-/* General Errors------------------------------------------------------[0-99] */
-#define CONTROLVM_RESP_SUCCESS 0
-#define CONTROLVM_RESP_ALREADY_DONE 1
-#define CONTROLVM_RESP_IOREMAP_FAILED 2
-#define CONTROLVM_RESP_KMALLOC_FAILED 3
-#define CONTROLVM_RESP_ID_UNKNOWN 4
-#define CONTROLVM_RESP_ID_INVALID_FOR_CLIENT 5
-/* CONTROLVM_INIT_CHIPSET-------------------------------------------[100-199] */
-#define CONTROLVM_RESP_CLIENT_SWITCHCOUNT_NONZERO 100
-#define CONTROLVM_RESP_EXPECTED_CHIPSET_INIT 101
-/* Maximum Limit----------------------------------------------------[200-299] */
-/* BUS_CREATE */
-#define CONTROLVM_RESP_ERROR_MAX_BUSES 201
-/* DEVICE_CREATE */
-#define CONTROLVM_RESP_ERROR_MAX_DEVICES 202
-/* Payload and Parameter Related------------------------------------[400-499] */
-/* SWITCH_ATTACHEXTPORT, DEVICE_CONFIGURE */
-#define CONTROLVM_RESP_PAYLOAD_INVALID 400
-/* Multiple */
-#define CONTROLVM_RESP_INITIATOR_PARAMETER_INVALID 401
-/* DEVICE_CONFIGURE */
-#define CONTROLVM_RESP_TARGET_PARAMETER_INVALID 402
-/* DEVICE_CONFIGURE */
-#define CONTROLVM_RESP_CLIENT_PARAMETER_INVALID 403
-/* Specified[Packet Structure] Value--------------------------------[500-599] */
-/* SWITCH_ATTACHINTPORT */
-/* BUS_CONFIGURE, DEVICE_CREATE, DEVICE_CONFIG, DEVICE_DESTROY */
-#define CONTROLVM_RESP_BUS_INVALID 500
-/* SWITCH_ATTACHINTPORT*/
-/* DEVICE_CREATE, DEVICE_CONFIGURE, DEVICE_DESTROY */
-#define CONTROLVM_RESP_DEVICE_INVALID 501
-/* DEVICE_CREATE, DEVICE_CONFIGURE */
-#define CONTROLVM_RESP_CHANNEL_INVALID 502
-/* Partition Driver Callback Interface------------------------------[600-699] */
-/* BUS_CREATE, BUS_DESTROY, DEVICE_CREATE, DEVICE_DESTROY */
-#define CONTROLVM_RESP_VIRTPCI_DRIVER_FAILURE 604
-/* Unable to invoke VIRTPCI callback. VIRTPCI Callback returned error. */
-/* BUS_CREATE, BUS_DESTROY, DEVICE_CREATE, DEVICE_DESTROY */
-#define CONTROLVM_RESP_VIRTPCI_DRIVER_CALLBACK_ERROR 605
-/* Generic device callback returned error. */
-/* SWITCH_ATTACHEXTPORT, SWITCH_DETACHEXTPORT, DEVICE_CONFIGURE */
-#define CONTROLVM_RESP_GENERIC_DRIVER_CALLBACK_ERROR 606
-/* Bus Related------------------------------------------------------[700-799] */
-/* BUS_DESTROY */
-#define CONTROLVM_RESP_ERROR_BUS_DEVICE_ATTACHED 700
-/* Channel Related--------------------------------------------------[800-899] */
-/* GET_CHANNELINFO, DEVICE_DESTROY */
-#define CONTROLVM_RESP_CHANNEL_TYPE_UNKNOWN 800
-/* DEVICE_CREATE */
-#define CONTROLVM_RESP_CHANNEL_SIZE_TOO_SMALL 801
-/* Chipset Shutdown Related---------------------------------------[1000-1099] */
-#define CONTROLVM_RESP_CHIPSET_SHUTDOWN_FAILED 1000
-#define CONTROLVM_RESP_CHIPSET_SHUTDOWN_ALREADY_ACTIVE 1001
-/* Chipset Stop Related-------------------------------------------[1100-1199] */
-#define CONTROLVM_RESP_CHIPSET_STOP_FAILED_BUS 1100
-#define CONTROLVM_RESP_CHIPSET_STOP_FAILED_SWITCH 1101
-/* Device Related-------------------------------------------------[1400-1499] */
-#define CONTROLVM_RESP_DEVICE_UDEV_TIMEOUT 1400
-
-/* __CONTROLVMCHANNEL_H__ */
-#endif
diff --git a/drivers/visorbus/vbuschannel.h b/drivers/visorbus/vbuschannel.h
deleted file mode 100644
index 98711fb6d66e..000000000000
--- a/drivers/visorbus/vbuschannel.h
+++ /dev/null
@@ -1,95 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 2010 - 2015 UNISYS CORPORATION
- * All rights reserved.
- */
-
-#ifndef __VBUSCHANNEL_H__
-#define __VBUSCHANNEL_H__
-
-/*
- * The vbus channel is the channel area provided via the BUS_CREATE controlvm
- * message for each virtual bus. This channel area is provided to both server
- * and client ends of the bus. The channel header area is initialized by
- * the server, and the remaining information is filled in by the client.
- * We currently use this for the client to provide various information about
- * the client devices and client drivers for the server end to see.
- */
-
-#include <linux/uuid.h>
-#include <linux/visorbus.h>
-
-/* {193b331b-c58f-11da-95a9-00e08161165f} */
-#define VISOR_VBUS_CHANNEL_GUID \
- GUID_INIT(0x193b331b, 0xc58f, 0x11da, \
- 0x95, 0xa9, 0x0, 0xe0, 0x81, 0x61, 0x16, 0x5f)
-
-/*
- * Must increment this whenever you insert or delete fields within this channel
- * struct. Also increment whenever you change the meaning of fields within this
- * channel struct so as to break pre-existing software. Note that you can
- * usually add fields to the END of the channel struct withOUT needing to
- * increment this.
- */
-#define VISOR_VBUS_CHANNEL_VERSIONID 1
-
-/*
- * struct visor_vbus_deviceinfo
- * @devtype: Short string identifying the device type.
- * @drvname: Driver .sys file name.
- * @infostrs: Kernel vversion.
- * @reserved: Pad size to 256 bytes.
- *
- * An array of this struct is present in the channel area for each vbus. It is
- * filled in by the client side to provide info about the device and driver from
- * the client's perspective.
- */
-struct visor_vbus_deviceinfo {
- u8 devtype[16];
- u8 drvname[16];
- u8 infostrs[96];
- u8 reserved[128];
-} __packed;
-
-/*
- * struct visor_vbus_headerinfo
- * @struct_bytes: Size of this struct in bytes.
- * @device_info_struct_bytes: Size of VISOR_VBUS_DEVICEINFO.
- * @dev_info_count: Num of items in DevInfo member. This is the
- * allocated size.
- * @chp_info_offset: Byte offset from beginning of this struct to the
- * ChpInfo struct.
- * @bus_info_offset: Byte offset from beginning of this struct to the
- * BusInfo struct.
- * @dev_info_offset: Byte offset from beginning of this struct to the
- * DevInfo array.
- * @reserved: Natural alignment.
- */
-struct visor_vbus_headerinfo {
- u32 struct_bytes;
- u32 device_info_struct_bytes;
- u32 dev_info_count;
- u32 chp_info_offset;
- u32 bus_info_offset;
- u32 dev_info_offset;
- u8 reserved[104];
-} __packed;
-
-/*
- * struct visor_vbus_channel
- * @channel_header: Initialized by server.
- * @hdr_info: Initialized by server.
- * @chp_info: Describes client chipset device and driver.
- * @bus_info: Describes client bus device and driver.
- * @dev_info: Describes client device and driver for each device on the
- * bus.
- */
-struct visor_vbus_channel {
- struct channel_header channel_header;
- struct visor_vbus_headerinfo hdr_info;
- struct visor_vbus_deviceinfo chp_info;
- struct visor_vbus_deviceinfo bus_info;
- struct visor_vbus_deviceinfo dev_info[];
-} __packed;
-
-#endif
diff --git a/drivers/visorbus/visorbus_main.c b/drivers/visorbus/visorbus_main.c
deleted file mode 100644
index 152fd29f04f2..000000000000
--- a/drivers/visorbus/visorbus_main.c
+++ /dev/null
@@ -1,1234 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright � 2010 - 2015 UNISYS CORPORATION
- * All rights reserved.
- */
-
-#include <linux/ctype.h>
-#include <linux/debugfs.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/visorbus.h>
-#include <linux/uuid.h>
-
-#include "visorbus_private.h"
-
-static const guid_t visor_vbus_channel_guid = VISOR_VBUS_CHANNEL_GUID;
-
-/* Display string that is guaranteed to be no longer the 99 characters */
-#define LINESIZE 99
-#define POLLJIFFIES_NORMALCHANNEL 10
-
-/* stores whether bus_registration was successful */
-static bool initialized;
-static struct dentry *visorbus_debugfs_dir;
-
-/*
- * DEVICE type attributes
- *
- * The modalias file will contain the guid of the device.
- */
-static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct visor_device *vdev;
- const guid_t *guid;
-
- vdev = to_visor_device(dev);
- guid = visorchannel_get_guid(vdev->visorchannel);
- return sprintf(buf, "visorbus:%pUl\n", guid);
-}
-static DEVICE_ATTR_RO(modalias);
-
-static struct attribute *visorbus_dev_attrs[] = {
- &dev_attr_modalias.attr,
- NULL,
-};
-
-ATTRIBUTE_GROUPS(visorbus_dev);
-
-/* filled in with info about parent chipset driver when we register with it */
-static struct visor_vbus_deviceinfo chipset_driverinfo;
-/* filled in with info about this driver, wrt it servicing client busses */
-static struct visor_vbus_deviceinfo clientbus_driverinfo;
-
-/* list of visor_device structs, linked via .list_all */
-static LIST_HEAD(list_all_bus_instances);
-/* list of visor_device structs, linked via .list_all */
-static LIST_HEAD(list_all_device_instances);
-
-/*
- * Generic function useful for validating any type of channel when it is
- * received by the client that will be accessing the channel.
- * Note that <logCtx> is only needed for callers in the EFI environment, and
- * is used to pass the EFI_DIAG_CAPTURE_PROTOCOL needed to log messages.
- */
-int visor_check_channel(struct channel_header *ch, struct device *dev,
- const guid_t *expected_guid, char *chname,
- u64 expected_min_bytes, u32 expected_version,
- u64 expected_signature)
-{
- if (!guid_is_null(expected_guid)) {
- /* caller wants us to verify type GUID */
- if (!guid_equal(&ch->chtype, expected_guid)) {
- dev_err(dev, "Channel mismatch on channel=%s(%pUL) field=type expected=%pUL actual=%pUL\n",
- chname, expected_guid, expected_guid,
- &ch->chtype);
- return 0;
- }
- }
- /* verify channel size */
- if (expected_min_bytes > 0) {
- if (ch->size < expected_min_bytes) {
- dev_err(dev, "Channel mismatch on channel=%s(%pUL) field=size expected=0x%-8.8Lx actual=0x%-8.8Lx\n",
- chname, expected_guid,
- (unsigned long long)expected_min_bytes,
- ch->size);
- return 0;
- }
- }
- /* verify channel version */
- if (expected_version > 0) {
- if (ch->version_id != expected_version) {
- dev_err(dev, "Channel mismatch on channel=%s(%pUL) field=version expected=0x%-8.8lx actual=0x%-8.8x\n",
- chname, expected_guid,
- (unsigned long)expected_version,
- ch->version_id);
- return 0;
- }
- }
- /* verify channel signature */
- if (expected_signature > 0) {
- if (ch->signature != expected_signature) {
- dev_err(dev, "Channel mismatch on channel=%s(%pUL) field=signature expected=0x%-8.8Lx actual=0x%-8.8Lx\n",
- chname, expected_guid, expected_signature,
- ch->signature);
- return 0;
- }
- }
- return 1;
-}
-
-static int visorbus_uevent(struct device *xdev, struct kobj_uevent_env *env)
-{
- struct visor_device *dev;
- const guid_t *guid;
-
- dev = to_visor_device(xdev);
- guid = visorchannel_get_guid(dev->visorchannel);
- return add_uevent_var(env, "MODALIAS=visorbus:%pUl", guid);
-}
-
-/*
- * visorbus_match() - called automatically upon adding a visor_device
- * (device_add), or adding a visor_driver
- * (visorbus_register_visor_driver)
- * @xdev: struct device for the device being matched
- * @xdrv: struct device_driver for driver to match device against
- *
- * Return: 1 iff the provided driver can control the specified device
- */
-static int visorbus_match(struct device *xdev, struct device_driver *xdrv)
-{
- const guid_t *channel_type;
- int i;
- struct visor_device *dev;
- struct visor_driver *drv;
- struct visorchannel *chan;
-
- dev = to_visor_device(xdev);
- channel_type = visorchannel_get_guid(dev->visorchannel);
- drv = to_visor_driver(xdrv);
- chan = dev->visorchannel;
- if (!drv->channel_types)
- return 0;
- for (i = 0; !guid_is_null(&drv->channel_types[i].guid); i++)
- if (guid_equal(&drv->channel_types[i].guid, channel_type) &&
- visor_check_channel(visorchannel_get_header(chan),
- xdev,
- &drv->channel_types[i].guid,
- (char *)drv->channel_types[i].name,
- drv->channel_types[i].min_bytes,
- drv->channel_types[i].version,
- VISOR_CHANNEL_SIGNATURE))
- return i + 1;
- return 0;
-}
-
-/*
- * This describes the TYPE of bus.
- * (Don't confuse this with an INSTANCE of the bus.)
- */
-static struct bus_type visorbus_type = {
- .name = "visorbus",
- .match = visorbus_match,
- .uevent = visorbus_uevent,
- .dev_groups = visorbus_dev_groups,
-};
-
-struct visor_busdev {
- u32 bus_no;
- u32 dev_no;
-};
-
-static int match_visorbus_dev_by_id(struct device *dev, const void *data)
-{
- struct visor_device *vdev = to_visor_device(dev);
- const struct visor_busdev *id = data;
-
- if (vdev->chipset_bus_no == id->bus_no &&
- vdev->chipset_dev_no == id->dev_no)
- return 1;
- return 0;
-}
-
-struct visor_device *visorbus_get_device_by_id(u32 bus_no, u32 dev_no,
- struct visor_device *from)
-{
- struct device *dev;
- struct device *dev_start = NULL;
- struct visor_busdev id = {
- .bus_no = bus_no,
- .dev_no = dev_no
- };
-
- if (from)
- dev_start = &from->device;
- dev = bus_find_device(&visorbus_type, dev_start, (void *)&id,
- match_visorbus_dev_by_id);
- if (!dev)
- return NULL;
- return to_visor_device(dev);
-}
-
-/*
- * visorbus_release_busdevice() - called when device_unregister() is called for
- * the bus device instance, after all other tasks
- * involved with destroying the dev are complete
- * @xdev: struct device for the bus being released
- */
-static void visorbus_release_busdevice(struct device *xdev)
-{
- struct visor_device *dev = dev_get_drvdata(xdev);
-
- debugfs_remove(dev->debugfs_bus_info);
- debugfs_remove_recursive(dev->debugfs_dir);
- visorchannel_destroy(dev->visorchannel);
- kfree(dev);
-}
-
-/*
- * visorbus_release_device() - called when device_unregister() is called for
- * each child device instance
- * @xdev: struct device for the visor device being released
- */
-static void visorbus_release_device(struct device *xdev)
-{
- struct visor_device *dev = to_visor_device(xdev);
-
- visorchannel_destroy(dev->visorchannel);
- kfree(dev);
-}
-
-/*
- * BUS specific channel attributes to appear under
- * /sys/bus/visorbus<x>/dev<y>/channel
- */
-
-static ssize_t physaddr_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct visor_device *vdev = to_visor_device(dev);
-
- return sprintf(buf, "0x%llx\n",
- visorchannel_get_physaddr(vdev->visorchannel));
-}
-static DEVICE_ATTR_RO(physaddr);
-
-static ssize_t nbytes_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct visor_device *vdev = to_visor_device(dev);
-
- return sprintf(buf, "0x%lx\n",
- visorchannel_get_nbytes(vdev->visorchannel));
-}
-static DEVICE_ATTR_RO(nbytes);
-
-static ssize_t clientpartition_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct visor_device *vdev = to_visor_device(dev);
-
- return sprintf(buf, "0x%llx\n",
- visorchannel_get_clientpartition(vdev->visorchannel));
-}
-static DEVICE_ATTR_RO(clientpartition);
-
-static ssize_t typeguid_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct visor_device *vdev = to_visor_device(dev);
- char typeid[LINESIZE];
-
- return sprintf(buf, "%s\n",
- visorchannel_id(vdev->visorchannel, typeid));
-}
-static DEVICE_ATTR_RO(typeguid);
-
-static ssize_t zoneguid_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct visor_device *vdev = to_visor_device(dev);
- char zoneid[LINESIZE];
-
- return sprintf(buf, "%s\n",
- visorchannel_zoneid(vdev->visorchannel, zoneid));
-}
-static DEVICE_ATTR_RO(zoneguid);
-
-static ssize_t typename_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- int i = 0;
- struct bus_type *xbus = dev->bus;
- struct device_driver *xdrv = dev->driver;
- struct visor_driver *drv = NULL;
-
- if (!xdrv)
- return 0;
- i = xbus->match(dev, xdrv);
- if (!i)
- return 0;
- drv = to_visor_driver(xdrv);
- return sprintf(buf, "%s\n", drv->channel_types[i - 1].name);
-}
-static DEVICE_ATTR_RO(typename);
-
-static struct attribute *channel_attrs[] = {
- &dev_attr_physaddr.attr,
- &dev_attr_nbytes.attr,
- &dev_attr_clientpartition.attr,
- &dev_attr_typeguid.attr,
- &dev_attr_zoneguid.attr,
- &dev_attr_typename.attr,
- NULL
-};
-
-ATTRIBUTE_GROUPS(channel);
-
-/*
- * BUS instance attributes
- *
- * define & implement display of bus attributes under
- * /sys/bus/visorbus/devices/visorbus<n>.
- */
-static ssize_t partition_handle_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct visor_device *vdev = to_visor_device(dev);
- u64 handle = visorchannel_get_clientpartition(vdev->visorchannel);
-
- return sprintf(buf, "0x%llx\n", handle);
-}
-static DEVICE_ATTR_RO(partition_handle);
-
-static ssize_t partition_guid_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct visor_device *vdev = to_visor_device(dev);
-
- return sprintf(buf, "{%pUb}\n", &vdev->partition_guid);
-}
-static DEVICE_ATTR_RO(partition_guid);
-
-static ssize_t partition_name_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct visor_device *vdev = to_visor_device(dev);
-
- return sprintf(buf, "%s\n", vdev->name);
-}
-static DEVICE_ATTR_RO(partition_name);
-
-static ssize_t channel_addr_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct visor_device *vdev = to_visor_device(dev);
- u64 addr = visorchannel_get_physaddr(vdev->visorchannel);
-
- return sprintf(buf, "0x%llx\n", addr);
-}
-static DEVICE_ATTR_RO(channel_addr);
-
-static ssize_t channel_bytes_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct visor_device *vdev = to_visor_device(dev);
- u64 nbytes = visorchannel_get_nbytes(vdev->visorchannel);
-
- return sprintf(buf, "0x%llx\n", nbytes);
-}
-static DEVICE_ATTR_RO(channel_bytes);
-
-static ssize_t channel_id_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct visor_device *vdev = to_visor_device(dev);
- int len = 0;
-
- visorchannel_id(vdev->visorchannel, buf);
- len = strlen(buf);
- buf[len++] = '\n';
- return len;
-}
-static DEVICE_ATTR_RO(channel_id);
-
-static struct attribute *visorbus_attrs[] = {
- &dev_attr_partition_handle.attr,
- &dev_attr_partition_guid.attr,
- &dev_attr_partition_name.attr,
- &dev_attr_channel_addr.attr,
- &dev_attr_channel_bytes.attr,
- &dev_attr_channel_id.attr,
- NULL
-};
-
-ATTRIBUTE_GROUPS(visorbus);
-
-/*
- * BUS debugfs entries
- *
- * define & implement display of debugfs attributes under
- * /sys/kernel/debug/visorbus/visorbus<n>.
- */
-
-/*
- * vbuschannel_print_devinfo() - format a struct visor_vbus_deviceinfo
- * and write it to a seq_file
- * @devinfo: the struct visor_vbus_deviceinfo to format
- * @seq: seq_file to write to
- * @devix: the device index to be included in the output data, or -1 if no
- * device index is to be included
- *
- * Reads @devInfo, and writes it in human-readable notation to @seq.
- */
-static void vbuschannel_print_devinfo(struct visor_vbus_deviceinfo *devinfo,
- struct seq_file *seq, int devix)
-{
- /* uninitialized vbus device entry */
- if (!isprint(devinfo->devtype[0]))
- return;
- if (devix >= 0)
- seq_printf(seq, "[%d]", devix);
- else
- /* vbus device entry is for bus or chipset */
- seq_puts(seq, " ");
- /*
- * Note: because the s-Par back-end is free to scribble in this area,
- * we never assume '\0'-termination.
- */
- seq_printf(seq, "%-*.*s ", (int)sizeof(devinfo->devtype),
- (int)sizeof(devinfo->devtype), devinfo->devtype);
- seq_printf(seq, "%-*.*s ", (int)sizeof(devinfo->drvname),
- (int)sizeof(devinfo->drvname), devinfo->drvname);
- seq_printf(seq, "%.*s\n", (int)sizeof(devinfo->infostrs),
- devinfo->infostrs);
-}
-
-static int bus_info_debugfs_show(struct seq_file *seq, void *v)
-{
- int i = 0;
- unsigned long off;
- struct visor_vbus_deviceinfo dev_info;
- struct visor_device *vdev = seq->private;
- struct visorchannel *channel = vdev->visorchannel;
-
- if (!channel)
- return 0;
-
- seq_printf(seq,
- "Client device/driver info for %s partition (vbus #%u):\n",
- ((vdev->name) ? (char *)(vdev->name) : ""),
- vdev->chipset_bus_no);
- if (visorchannel_read(channel,
- offsetof(struct visor_vbus_channel, chp_info),
- &dev_info, sizeof(dev_info)) >= 0)
- vbuschannel_print_devinfo(&dev_info, seq, -1);
- if (visorchannel_read(channel,
- offsetof(struct visor_vbus_channel, bus_info),
- &dev_info, sizeof(dev_info)) >= 0)
- vbuschannel_print_devinfo(&dev_info, seq, -1);
-
- off = offsetof(struct visor_vbus_channel, dev_info);
- while (off + sizeof(dev_info) <= visorchannel_get_nbytes(channel)) {
- if (visorchannel_read(channel, off, &dev_info,
- sizeof(dev_info)) >= 0)
- vbuschannel_print_devinfo(&dev_info, seq, i);
- off += sizeof(dev_info);
- i++;
- }
- return 0;
-}
-
-static int bus_info_debugfs_open(struct inode *inode, struct file *file)
-{
- return single_open(file, bus_info_debugfs_show, inode->i_private);
-}
-
-static const struct file_operations bus_info_debugfs_fops = {
- .owner = THIS_MODULE,
- .open = bus_info_debugfs_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static void dev_periodic_work(struct timer_list *t)
-{
- struct visor_device *dev = from_timer(dev, t, timer);
- struct visor_driver *drv = to_visor_driver(dev->device.driver);
-
- drv->channel_interrupt(dev);
- mod_timer(&dev->timer, jiffies + POLLJIFFIES_NORMALCHANNEL);
-}
-
-static int dev_start_periodic_work(struct visor_device *dev)
-{
- if (dev->being_removed || dev->timer_active)
- return -EINVAL;
-
- /* now up by at least 2 */
- get_device(&dev->device);
- dev->timer.expires = jiffies + POLLJIFFIES_NORMALCHANNEL;
- add_timer(&dev->timer);
- dev->timer_active = true;
- return 0;
-}
-
-static void dev_stop_periodic_work(struct visor_device *dev)
-{
- if (!dev->timer_active)
- return;
-
- del_timer_sync(&dev->timer);
- dev->timer_active = false;
- put_device(&dev->device);
-}
-
-/*
- * visordriver_remove_device() - handle visor device going away
- * @xdev: struct device for the visor device being removed
- *
- * This is called when device_unregister() is called for each child device
- * instance, to notify the appropriate visorbus function driver that the device
- * is going away, and to decrease the reference count of the device.
- *
- * Return: 0 iff successful
- */
-static int visordriver_remove_device(struct device *xdev)
-{
- struct visor_device *dev = to_visor_device(xdev);
- struct visor_driver *drv = to_visor_driver(xdev->driver);
-
- mutex_lock(&dev->visordriver_callback_lock);
- dev->being_removed = true;
- drv->remove(dev);
- mutex_unlock(&dev->visordriver_callback_lock);
- dev_stop_periodic_work(dev);
- put_device(&dev->device);
- return 0;
-}
-
-/*
- * visorbus_unregister_visor_driver() - unregisters the provided driver
- * @drv: the driver to unregister
- *
- * A visor function driver calls this function to unregister the driver,
- * i.e., within its module_exit function.
- */
-void visorbus_unregister_visor_driver(struct visor_driver *drv)
-{
- driver_unregister(&drv->driver);
-}
-EXPORT_SYMBOL_GPL(visorbus_unregister_visor_driver);
-
-/*
- * visorbus_read_channel() - reads from the designated channel into
- * the provided buffer
- * @dev: the device whose channel is read from
- * @offset: the offset into the channel at which reading starts
- * @dest: the destination buffer that is written into from the channel
- * @nbytes: the number of bytes to read from the channel
- *
- * If receiving a message, use the visorchannel_signalremove() function instead.
- *
- * Return: integer indicating success (zero) or failure (non-zero)
- */
-int visorbus_read_channel(struct visor_device *dev, unsigned long offset,
- void *dest, unsigned long nbytes)
-{
- return visorchannel_read(dev->visorchannel, offset, dest, nbytes);
-}
-EXPORT_SYMBOL_GPL(visorbus_read_channel);
-
-/*
- * visorbus_write_channel() - writes the provided buffer into the designated
- * channel
- * @dev: the device whose channel is written to
- * @offset: the offset into the channel at which writing starts
- * @src: the source buffer that is written into the channel
- * @nbytes: the number of bytes to write into the channel
- *
- * If sending a message, use the visorchannel_signalinsert() function instead.
- *
- * Return: integer indicating success (zero) or failure (non-zero)
- */
-int visorbus_write_channel(struct visor_device *dev, unsigned long offset,
- void *src, unsigned long nbytes)
-{
- return visorchannel_write(dev->visorchannel, offset, src, nbytes);
-}
-EXPORT_SYMBOL_GPL(visorbus_write_channel);
-
-/*
- * visorbus_enable_channel_interrupts() - enables interrupts on the
- * designated device
- * @dev: the device on which to enable interrupts
- *
- * Currently we don't yet have a real interrupt, so for now we just call the
- * interrupt function periodically via a timer.
- */
-int visorbus_enable_channel_interrupts(struct visor_device *dev)
-{
- struct visor_driver *drv = to_visor_driver(dev->device.driver);
-
- if (!drv->channel_interrupt) {
- dev_err(&dev->device, "%s no interrupt function!\n", __func__);
- return -ENOENT;
- }
-
- return dev_start_periodic_work(dev);
-}
-EXPORT_SYMBOL_GPL(visorbus_enable_channel_interrupts);
-
-/*
- * visorbus_disable_channel_interrupts() - disables interrupts on the
- * designated device
- * @dev: the device on which to disable interrupts
- */
-void visorbus_disable_channel_interrupts(struct visor_device *dev)
-{
- dev_stop_periodic_work(dev);
-}
-EXPORT_SYMBOL_GPL(visorbus_disable_channel_interrupts);
-
-/*
- * create_visor_device() - create visor device as a result of receiving the
- * controlvm device_create message for a new device
- * @dev: a freshly-zeroed struct visor_device, containing only filled-in values
- * for chipset_bus_no and chipset_dev_no, that will be initialized
- *
- * This is how everything starts from the device end.
- * This function is called when a channel first appears via a ControlVM
- * message. In response, this function allocates a visor_device to correspond
- * to the new channel, and attempts to connect it the appropriate * driver. If
- * the appropriate driver is found, the visor_driver.probe() function for that
- * driver will be called, and will be passed the new * visor_device that we
- * just created.
- *
- * It's ok if the appropriate driver is not yet loaded, because in that case
- * the new device struct will just stick around in the bus' list of devices.
- * When the appropriate driver calls visorbus_register_visor_driver(), the
- * visor_driver.probe() for the new driver will be called with the new device.
- *
- * Return: 0 if successful, otherwise the negative value returned by
- * device_add() indicating the reason for failure
- */
-int create_visor_device(struct visor_device *dev)
-{
- int err;
- u32 chipset_bus_no = dev->chipset_bus_no;
- u32 chipset_dev_no = dev->chipset_dev_no;
-
- mutex_init(&dev->visordriver_callback_lock);
- dev->device.bus = &visorbus_type;
- dev->device.groups = channel_groups;
- device_initialize(&dev->device);
- dev->device.release = visorbus_release_device;
- /* keep a reference just for us (now 2) */
- get_device(&dev->device);
- timer_setup(&dev->timer, dev_periodic_work, 0);
- /*
- * bus_id must be a unique name with respect to this bus TYPE (NOT bus
- * instance). That's why we need to include the bus number within the
- * name.
- */
- err = dev_set_name(&dev->device, "vbus%u:dev%u",
- chipset_bus_no, chipset_dev_no);
- if (err)
- goto err_put;
- /*
- * device_add does this:
- * bus_add_device(dev)
- * ->device_attach(dev)
- * ->for each driver drv registered on the bus that dev is on
- * if (dev.drv) ** device already has a driver **
- * ** not sure we could ever get here... **
- * else
- * if (bus.match(dev,drv)) [visorbus_match]
- * dev.drv = drv
- * if (!drv.probe(dev)) [visordriver_probe_device]
- * dev.drv = NULL
- *
- * Note that device_add does NOT fail if no driver failed to claim the
- * device. The device will be linked onto bus_type.klist_devices
- * regardless (use bus_for_each_dev).
- */
- err = device_add(&dev->device);
- if (err < 0)
- goto err_put;
- list_add_tail(&dev->list_all, &list_all_device_instances);
- dev->state.created = 1;
- visorbus_response(dev, err, CONTROLVM_DEVICE_CREATE);
- /* success: reference kept via unmatched get_device() */
- return 0;
-
-err_put:
- put_device(&dev->device);
- dev_err(&dev->device, "Creating visor device failed. %d\n", err);
- return err;
-}
-
-void remove_visor_device(struct visor_device *dev)
-{
- list_del(&dev->list_all);
- put_device(&dev->device);
- if (dev->pending_msg_hdr)
- visorbus_response(dev, 0, CONTROLVM_DEVICE_DESTROY);
- device_unregister(&dev->device);
-}
-
-static int get_vbus_header_info(struct visorchannel *chan,
- struct device *dev,
- struct visor_vbus_headerinfo *hdr_info)
-{
- int err;
-
- if (!visor_check_channel(visorchannel_get_header(chan),
- dev,
- &visor_vbus_channel_guid,
- "vbus",
- sizeof(struct visor_vbus_channel),
- VISOR_VBUS_CHANNEL_VERSIONID,
- VISOR_CHANNEL_SIGNATURE))
- return -EINVAL;
-
- err = visorchannel_read(chan, sizeof(struct channel_header), hdr_info,
- sizeof(*hdr_info));
- if (err < 0)
- return err;
- if (hdr_info->struct_bytes < sizeof(struct visor_vbus_headerinfo))
- return -EINVAL;
- if (hdr_info->device_info_struct_bytes <
- sizeof(struct visor_vbus_deviceinfo))
- return -EINVAL;
- return 0;
-}
-
-/*
- * write_vbus_chp_info() - write the contents of <info> to the struct
- * visor_vbus_channel.chp_info
- * @chan: indentifies the s-Par channel that will be updated
- * @hdr_info: used to find appropriate channel offset to write data
- * @info: contains the information to write
- *
- * Writes chipset info into the channel memory to be used for diagnostic
- * purposes.
- *
- * Returns no value since this is debug information and not needed for
- * device functionality.
- */
-static void write_vbus_chp_info(struct visorchannel *chan,
- struct visor_vbus_headerinfo *hdr_info,
- struct visor_vbus_deviceinfo *info)
-{
- int off;
-
- if (hdr_info->chp_info_offset == 0)
- return;
-
- off = sizeof(struct channel_header) + hdr_info->chp_info_offset;
- visorchannel_write(chan, off, info, sizeof(*info));
-}
-
-/*
- * write_vbus_bus_info() - write the contents of <info> to the struct
- * visor_vbus_channel.bus_info
- * @chan: indentifies the s-Par channel that will be updated
- * @hdr_info: used to find appropriate channel offset to write data
- * @info: contains the information to write
- *
- * Writes bus info into the channel memory to be used for diagnostic
- * purposes.
- *
- * Returns no value since this is debug information and not needed for
- * device functionality.
- */
-static void write_vbus_bus_info(struct visorchannel *chan,
- struct visor_vbus_headerinfo *hdr_info,
- struct visor_vbus_deviceinfo *info)
-{
- int off;
-
- if (hdr_info->bus_info_offset == 0)
- return;
-
- off = sizeof(struct channel_header) + hdr_info->bus_info_offset;
- visorchannel_write(chan, off, info, sizeof(*info));
-}
-
-/*
- * write_vbus_dev_info() - write the contents of <info> to the struct
- * visor_vbus_channel.dev_info[<devix>]
- * @chan: indentifies the s-Par channel that will be updated
- * @hdr_info: used to find appropriate channel offset to write data
- * @info: contains the information to write
- * @devix: the relative device number (0..n-1) of the device on the bus
- *
- * Writes device info into the channel memory to be used for diagnostic
- * purposes.
- *
- * Returns no value since this is debug information and not needed for
- * device functionality.
- */
-static void write_vbus_dev_info(struct visorchannel *chan,
- struct visor_vbus_headerinfo *hdr_info,
- struct visor_vbus_deviceinfo *info,
- unsigned int devix)
-{
- int off;
-
- if (hdr_info->dev_info_offset == 0)
- return;
- off = (sizeof(struct channel_header) + hdr_info->dev_info_offset) +
- (hdr_info->device_info_struct_bytes * devix);
- visorchannel_write(chan, off, info, sizeof(*info));
-}
-
-static void bus_device_info_init(
- struct visor_vbus_deviceinfo *bus_device_info_ptr,
- const char *dev_type, const char *drv_name)
-{
- memset(bus_device_info_ptr, 0, sizeof(struct visor_vbus_deviceinfo));
- snprintf(bus_device_info_ptr->devtype,
- sizeof(bus_device_info_ptr->devtype),
- "%s", (dev_type) ? dev_type : "unknownType");
- snprintf(bus_device_info_ptr->drvname,
- sizeof(bus_device_info_ptr->drvname),
- "%s", (drv_name) ? drv_name : "unknownDriver");
- snprintf(bus_device_info_ptr->infostrs,
- sizeof(bus_device_info_ptr->infostrs), "kernel ver. %s",
- utsname()->release);
-}
-
-/*
- * publish_vbus_dev_info() - for a child device just created on a client bus,
- * fill in information about the driver that is
- * controlling this device into the appropriate slot
- * within the vbus channel of the bus instance
- * @visordev: struct visor_device for the desired device
- */
-static void publish_vbus_dev_info(struct visor_device *visordev)
-{
- int i;
- struct visor_device *bdev;
- struct visor_driver *visordrv;
- u32 bus_no = visordev->chipset_bus_no;
- u32 dev_no = visordev->chipset_dev_no;
- struct visor_vbus_deviceinfo dev_info;
- const char *chan_type_name = NULL;
- struct visor_vbus_headerinfo *hdr_info;
-
- if (!visordev->device.driver)
- return;
- bdev = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
- if (!bdev)
- return;
- hdr_info = (struct visor_vbus_headerinfo *)bdev->vbus_hdr_info;
- if (!hdr_info)
- return;
- visordrv = to_visor_driver(visordev->device.driver);
-
- /*
- * Within the list of device types (by GUID) that the driver
- * says it supports, find out which one of those types matches
- * the type of this device, so that we can include the device
- * type name
- */
- for (i = 0; visordrv->channel_types[i].name; i++) {
- if (guid_equal(&visordrv->channel_types[i].guid,
- &visordev->channel_type_guid)) {
- chan_type_name = visordrv->channel_types[i].name;
- break;
- }
- }
- bus_device_info_init(&dev_info, chan_type_name, visordrv->name);
- write_vbus_dev_info(bdev->visorchannel, hdr_info, &dev_info, dev_no);
- write_vbus_chp_info(bdev->visorchannel, hdr_info, &chipset_driverinfo);
- write_vbus_bus_info(bdev->visorchannel, hdr_info,
- &clientbus_driverinfo);
-}
-
-/*
- * visordriver_probe_device() - handle new visor device coming online
- * @xdev: struct device for the visor device being probed
- *
- * This is called automatically upon adding a visor_device (device_add), or
- * adding a visor_driver (visorbus_register_visor_driver), but only after
- * visorbus_match() has returned 1 to indicate a successful match between
- * driver and device.
- *
- * If successful, a reference to the device will be held onto via get_device().
- *
- * Return: 0 if successful, meaning the function driver's probe() function
- * was successful with this device, otherwise a negative errno
- * value indicating failure reason
- */
-static int visordriver_probe_device(struct device *xdev)
-{
- int err;
- struct visor_driver *drv = to_visor_driver(xdev->driver);
- struct visor_device *dev = to_visor_device(xdev);
-
- mutex_lock(&dev->visordriver_callback_lock);
- dev->being_removed = false;
- err = drv->probe(dev);
- if (err) {
- mutex_unlock(&dev->visordriver_callback_lock);
- return err;
- }
- /* success: reference kept via unmatched get_device() */
- get_device(&dev->device);
- publish_vbus_dev_info(dev);
- mutex_unlock(&dev->visordriver_callback_lock);
- return 0;
-}
-
-/*
- * visorbus_register_visor_driver() - registers the provided visor driver for
- * handling one or more visor device
- * types (channel_types)
- * @drv: the driver to register
- *
- * A visor function driver calls this function to register the driver. The
- * caller MUST fill in the following fields within the #drv structure:
- * name, version, owner, channel_types, probe, remove
- *
- * Here's how the whole Linux bus / driver / device model works.
- *
- * At system start-up, the visorbus kernel module is loaded, which registers
- * visorbus_type as a bus type, using bus_register().
- *
- * All kernel modules that support particular device types on a
- * visorbus bus are loaded. Each of these kernel modules calls
- * visorbus_register_visor_driver() in their init functions, passing a
- * visor_driver struct. visorbus_register_visor_driver() in turn calls
- * register_driver(&visor_driver.driver). This .driver member is
- * initialized with generic methods (like probe), whose sole responsibility
- * is to act as a broker for the real methods, which are within the
- * visor_driver struct. (This is the way the subclass behavior is
- * implemented, since visor_driver is essentially a subclass of the
- * generic driver.) Whenever a driver_register() happens, core bus code in
- * the kernel does (see device_attach() in drivers/base/dd.c):
- *
- * for each dev associated with the bus (the bus that driver is on) that
- * does not yet have a driver
- * if bus.match(dev,newdriver) == yes_matched ** .match specified
- * ** during bus_register().
- * newdriver.probe(dev) ** for visor drivers, this will call
- * ** the generic driver.probe implemented in visorbus.c,
- * ** which in turn calls the probe specified within the
- * ** struct visor_driver (which was specified by the
- * ** actual device driver as part of
- * ** visorbus_register_visor_driver()).
- *
- * The above dance also happens when a new device appears.
- * So the question is, how are devices created within the system?
- * Basically, just call device_add(dev). See pci_bus_add_devices().
- * pci_scan_device() shows an example of how to build a device struct. It
- * returns the newly-created struct to pci_scan_single_device(), who adds it
- * to the list of devices at PCIBUS.devices. That list of devices is what
- * is traversed by pci_bus_add_devices().
- *
- * Return: integer indicating success (zero) or failure (non-zero)
- */
-int visorbus_register_visor_driver(struct visor_driver *drv)
-{
- /* can't register on a nonexistent bus */
- if (!initialized)
- return -ENODEV;
- if (!drv->probe)
- return -EINVAL;
- if (!drv->remove)
- return -EINVAL;
- if (!drv->pause)
- return -EINVAL;
- if (!drv->resume)
- return -EINVAL;
-
- drv->driver.name = drv->name;
- drv->driver.bus = &visorbus_type;
- drv->driver.probe = visordriver_probe_device;
- drv->driver.remove = visordriver_remove_device;
- drv->driver.owner = drv->owner;
- /*
- * driver_register does this:
- * bus_add_driver(drv)
- * ->if (drv.bus) ** (bus_type) **
- * driver_attach(drv)
- * for each dev with bus type of drv.bus
- * if (!dev.drv) ** no driver assigned yet **
- * if (bus.match(dev,drv)) [visorbus_match]
- * dev.drv = drv
- * if (!drv.probe(dev)) [visordriver_probe_device]
- * dev.drv = NULL
- */
- return driver_register(&drv->driver);
-}
-EXPORT_SYMBOL_GPL(visorbus_register_visor_driver);
-
-/*
- * visorbus_create_instance() - create a device instance for the visorbus itself
- * @dev: struct visor_device indicating the bus instance
- *
- * Return: 0 for success, otherwise negative errno value indicating reason for
- * failure
- */
-int visorbus_create_instance(struct visor_device *dev)
-{
- int id = dev->chipset_bus_no;
- int err;
- struct visor_vbus_headerinfo *hdr_info;
-
- hdr_info = kzalloc(sizeof(*hdr_info), GFP_KERNEL);
- if (!hdr_info)
- return -ENOMEM;
- dev_set_name(&dev->device, "visorbus%d", id);
- dev->device.bus = &visorbus_type;
- dev->device.groups = visorbus_groups;
- dev->device.release = visorbus_release_busdevice;
- dev->debugfs_dir = debugfs_create_dir(dev_name(&dev->device),
- visorbus_debugfs_dir);
- dev->debugfs_bus_info = debugfs_create_file("client_bus_info", 0440,
- dev->debugfs_dir, dev,
- &bus_info_debugfs_fops);
- dev_set_drvdata(&dev->device, dev);
- err = get_vbus_header_info(dev->visorchannel, &dev->device, hdr_info);
- if (err < 0)
- goto err_debugfs_dir;
- err = device_register(&dev->device);
- if (err < 0)
- goto err_debugfs_dir;
- list_add_tail(&dev->list_all, &list_all_bus_instances);
- dev->state.created = 1;
- dev->vbus_hdr_info = (void *)hdr_info;
- write_vbus_chp_info(dev->visorchannel, hdr_info, &chipset_driverinfo);
- write_vbus_bus_info(dev->visorchannel, hdr_info, &clientbus_driverinfo);
- visorbus_response(dev, err, CONTROLVM_BUS_CREATE);
- return 0;
-
-err_debugfs_dir:
- debugfs_remove_recursive(dev->debugfs_dir);
- kfree(hdr_info);
- dev_err(&dev->device, "%s failed: %d\n", __func__, err);
- return err;
-}
-
-/*
- * visorbus_remove_instance() - remove a device instance for the visorbus itself
- * @dev: struct visor_device indentifying the bus to remove
- */
-void visorbus_remove_instance(struct visor_device *dev)
-{
- /*
- * Note that this will result in the release method for
- * dev->dev being called, which will call
- * visorbus_release_busdevice(). This has something to do with
- * the put_device() done in device_unregister(), but I have never
- * successfully been able to trace thru the code to see where/how
- * release() gets called. But I know it does.
- */
- kfree(dev->vbus_hdr_info);
- list_del(&dev->list_all);
- if (dev->pending_msg_hdr)
- visorbus_response(dev, 0, CONTROLVM_BUS_DESTROY);
- device_unregister(&dev->device);
-}
-
-/*
- * remove_all_visor_devices() - remove all child visorbus device instances
- */
-static void remove_all_visor_devices(void)
-{
- struct list_head *listentry, *listtmp;
-
- list_for_each_safe(listentry, listtmp, &list_all_device_instances) {
- struct visor_device *dev;
-
- dev = list_entry(listentry, struct visor_device, list_all);
- remove_visor_device(dev);
- }
-}
-
-/*
- * pause_state_change_complete() - the callback function to be called by a
- * visorbus function driver when a
- * pending "pause device" operation has
- * completed
- * @dev: struct visor_device identifying the paused device
- * @status: 0 iff the pause state change completed successfully, otherwise
- * a negative errno value indicating the reason for failure
- */
-static void pause_state_change_complete(struct visor_device *dev, int status)
-{
- if (!dev->pausing)
- return;
-
- dev->pausing = false;
- visorbus_device_changestate_response(dev, status,
- segment_state_standby);
-}
-
-/*
- * resume_state_change_complete() - the callback function to be called by a
- * visorbus function driver when a
- * pending "resume device" operation has
- * completed
- * @dev: struct visor_device identifying the resumed device
- * @status: 0 iff the resume state change completed successfully, otherwise
- * a negative errno value indicating the reason for failure
- */
-static void resume_state_change_complete(struct visor_device *dev, int status)
-{
- if (!dev->resuming)
- return;
-
- dev->resuming = false;
- /*
- * Notify the chipset driver that the resume is complete,
- * which will presumably want to send some sort of response to
- * the initiator.
- */
- visorbus_device_changestate_response(dev, status,
- segment_state_running);
-}
-
-/*
- * visorchipset_initiate_device_pause_resume() - start a pause or resume
- * operation for a visor device
- * @dev: struct visor_device identifying the device being paused or resumed
- * @is_pause: true to indicate pause operation, false to indicate resume
- *
- * Tell the subordinate function driver for a specific device to pause
- * or resume that device. Success/failure result is returned asynchronously
- * via a callback function; see pause_state_change_complete() and
- * resume_state_change_complete().
- */
-static int visorchipset_initiate_device_pause_resume(struct visor_device *dev,
- bool is_pause)
-{
- int err;
- struct visor_driver *drv;
-
- /* If no driver associated with the device nothing to pause/resume */
- if (!dev->device.driver)
- return 0;
- if (dev->pausing || dev->resuming)
- return -EBUSY;
-
- drv = to_visor_driver(dev->device.driver);
- if (is_pause) {
- dev->pausing = true;
- err = drv->pause(dev, pause_state_change_complete);
- } else {
- /*
- * The vbus_dev_info structure in the channel was been cleared,
- * make sure it is valid.
- */
- publish_vbus_dev_info(dev);
- dev->resuming = true;
- err = drv->resume(dev, resume_state_change_complete);
- }
- return err;
-}
-
-/*
- * visorchipset_device_pause() - start a pause operation for a visor device
- * @dev_info: struct visor_device identifying the device being paused
- *
- * Tell the subordinate function driver for a specific device to pause
- * that device. Success/failure result is returned asynchronously
- * via a callback function; see pause_state_change_complete().
- */
-int visorchipset_device_pause(struct visor_device *dev_info)
-{
- int err;
-
- err = visorchipset_initiate_device_pause_resume(dev_info, true);
- if (err < 0) {
- dev_info->pausing = false;
- return err;
- }
- return 0;
-}
-
-/*
- * visorchipset_device_resume() - start a resume operation for a visor device
- * @dev_info: struct visor_device identifying the device being resumed
- *
- * Tell the subordinate function driver for a specific device to resume
- * that device. Success/failure result is returned asynchronously
- * via a callback function; see resume_state_change_complete().
- */
-int visorchipset_device_resume(struct visor_device *dev_info)
-{
- int err;
-
- err = visorchipset_initiate_device_pause_resume(dev_info, false);
- if (err < 0) {
- dev_info->resuming = false;
- return err;
- }
- return 0;
-}
-
-int visorbus_init(void)
-{
- int err;
-
- visorbus_debugfs_dir = debugfs_create_dir("visorbus", NULL);
- bus_device_info_init(&clientbus_driverinfo, "clientbus", "visorbus");
- err = bus_register(&visorbus_type);
- if (err < 0)
- return err;
- initialized = true;
- bus_device_info_init(&chipset_driverinfo, "chipset", "visorchipset");
- return 0;
-}
-
-void visorbus_exit(void)
-{
- struct list_head *listentry, *listtmp;
-
- remove_all_visor_devices();
- list_for_each_safe(listentry, listtmp, &list_all_bus_instances) {
- struct visor_device *dev;
-
- dev = list_entry(listentry, struct visor_device, list_all);
- visorbus_remove_instance(dev);
- }
- bus_unregister(&visorbus_type);
- initialized = false;
- debugfs_remove_recursive(visorbus_debugfs_dir);
-}
diff --git a/drivers/visorbus/visorbus_private.h b/drivers/visorbus/visorbus_private.h
deleted file mode 100644
index 6956de605827..000000000000
--- a/drivers/visorbus/visorbus_private.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 2010 - 2015 UNISYS CORPORATION
- * All rights reserved.
- */
-
-#ifndef __VISORBUS_PRIVATE_H__
-#define __VISORBUS_PRIVATE_H__
-
-#include <linux/uuid.h>
-#include <linux/utsname.h>
-#include <linux/visorbus.h>
-
-#include "controlvmchannel.h"
-#include "vbuschannel.h"
-
-struct visor_device *visorbus_get_device_by_id(u32 bus_no, u32 dev_no,
- struct visor_device *from);
-int visorbus_create_instance(struct visor_device *dev);
-void visorbus_remove_instance(struct visor_device *bus_info);
-int create_visor_device(struct visor_device *dev_info);
-void remove_visor_device(struct visor_device *dev_info);
-int visorchipset_device_pause(struct visor_device *dev_info);
-int visorchipset_device_resume(struct visor_device *dev_info);
-void visorbus_response(struct visor_device *p, int response, int controlvm_id);
-void visorbus_device_changestate_response(struct visor_device *p, int response,
- struct visor_segment_state state);
-int visorbus_init(void);
-void visorbus_exit(void);
-
-/* visorchannel access functions */
-struct visorchannel *visorchannel_create(u64 physaddr, gfp_t gfp,
- const guid_t *guid, bool needs_lock);
-void visorchannel_destroy(struct visorchannel *channel);
-int visorchannel_read(struct visorchannel *channel, ulong offset,
- void *dest, ulong nbytes);
-int visorchannel_write(struct visorchannel *channel, ulong offset,
- void *dest, ulong nbytes);
-u64 visorchannel_get_physaddr(struct visorchannel *channel);
-ulong visorchannel_get_nbytes(struct visorchannel *channel);
-char *visorchannel_id(struct visorchannel *channel, char *s);
-char *visorchannel_zoneid(struct visorchannel *channel, char *s);
-u64 visorchannel_get_clientpartition(struct visorchannel *channel);
-int visorchannel_set_clientpartition(struct visorchannel *channel,
- u64 partition_handle);
-char *visorchannel_guid_id(const guid_t *guid, char *s);
-void *visorchannel_get_header(struct visorchannel *channel);
-#endif
diff --git a/drivers/visorbus/visorchannel.c b/drivers/visorbus/visorchannel.c
deleted file mode 100644
index bd890e0f456b..000000000000
--- a/drivers/visorbus/visorchannel.c
+++ /dev/null
@@ -1,434 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2010 - 2015 UNISYS CORPORATION
- * All rights reserved.
- */
-
-/*
- * This provides s-Par channel communication primitives, which are
- * independent of the mechanism used to access the channel data.
- */
-
-#include <linux/uuid.h>
-#include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/visorbus.h>
-
-#include "visorbus_private.h"
-#include "controlvmchannel.h"
-
-#define VISOR_DRV_NAME "visorchannel"
-
-#define VISOR_CONSOLEVIDEO_CHANNEL_GUID \
- GUID_INIT(0x3cd6e705, 0xd6a2, 0x4aa5, \
- 0xad, 0x5c, 0x7b, 0x8, 0x88, 0x9d, 0xff, 0xe2)
-
-static const guid_t visor_video_guid = VISOR_CONSOLEVIDEO_CHANNEL_GUID;
-
-struct visorchannel {
- u64 physaddr;
- ulong nbytes;
- void *mapped;
- bool requested;
- struct channel_header chan_hdr;
- guid_t guid;
- /*
- * channel creator knows if more than one thread will be inserting or
- * removing
- */
- bool needs_lock;
- /* protect head writes in chan_hdr */
- spinlock_t insert_lock;
- /* protect tail writes in chan_hdr */
- spinlock_t remove_lock;
- guid_t type;
- guid_t inst;
-};
-
-void visorchannel_destroy(struct visorchannel *channel)
-{
- if (!channel)
- return;
-
- if (channel->mapped) {
- memunmap(channel->mapped);
- if (channel->requested)
- release_mem_region(channel->physaddr, channel->nbytes);
- }
- kfree(channel);
-}
-
-u64 visorchannel_get_physaddr(struct visorchannel *channel)
-{
- return channel->physaddr;
-}
-
-ulong visorchannel_get_nbytes(struct visorchannel *channel)
-{
- return channel->nbytes;
-}
-
-char *visorchannel_guid_id(const guid_t *guid, char *s)
-{
- sprintf(s, "%pUL", guid);
- return s;
-}
-
-char *visorchannel_id(struct visorchannel *channel, char *s)
-{
- return visorchannel_guid_id(&channel->guid, s);
-}
-
-char *visorchannel_zoneid(struct visorchannel *channel, char *s)
-{
- return visorchannel_guid_id(&channel->chan_hdr.zone_guid, s);
-}
-
-u64 visorchannel_get_clientpartition(struct visorchannel *channel)
-{
- return channel->chan_hdr.partition_handle;
-}
-
-int visorchannel_set_clientpartition(struct visorchannel *channel,
- u64 partition_handle)
-{
- channel->chan_hdr.partition_handle = partition_handle;
- return 0;
-}
-
-/**
- * visorchannel_get_guid() - queries the GUID of the designated channel
- * @channel: the channel to query
- *
- * Return: the GUID of the provided channel
- */
-const guid_t *visorchannel_get_guid(struct visorchannel *channel)
-{
- return &channel->guid;
-}
-EXPORT_SYMBOL_GPL(visorchannel_get_guid);
-
-int visorchannel_read(struct visorchannel *channel, ulong offset, void *dest,
- ulong nbytes)
-{
- if (offset + nbytes > channel->nbytes)
- return -EIO;
-
- memcpy(dest, channel->mapped + offset, nbytes);
- return 0;
-}
-
-int visorchannel_write(struct visorchannel *channel, ulong offset, void *dest,
- ulong nbytes)
-{
- size_t chdr_size = sizeof(struct channel_header);
- size_t copy_size;
-
- if (offset + nbytes > channel->nbytes)
- return -EIO;
-
- if (offset < chdr_size) {
- copy_size = min(chdr_size - offset, nbytes);
- memcpy(((char *)(&channel->chan_hdr)) + offset,
- dest, copy_size);
- }
- memcpy(channel->mapped + offset, dest, nbytes);
- return 0;
-}
-
-void *visorchannel_get_header(struct visorchannel *channel)
-{
- return &channel->chan_hdr;
-}
-
-/*
- * Return offset of a specific SIGNAL_QUEUE_HEADER from the beginning of a
- * channel header
- */
-static int sig_queue_offset(struct channel_header *chan_hdr, int q)
-{
- return ((chan_hdr)->ch_space_offset +
- ((q) * sizeof(struct signal_queue_header)));
-}
-
-/*
- * Return offset of a specific queue entry (data) from the beginning of a
- * channel header
- */
-static int sig_data_offset(struct channel_header *chan_hdr, int q,
- struct signal_queue_header *sig_hdr, int slot)
-{
- return (sig_queue_offset(chan_hdr, q) + sig_hdr->sig_base_offset +
- (slot * sig_hdr->signal_size));
-}
-
-/*
- * Write the contents of a specific field within a SIGNAL_QUEUE_HEADER back into
- * host memory
- */
-#define SIG_WRITE_FIELD(channel, queue, sig_hdr, FIELD) \
- visorchannel_write(channel, \
- sig_queue_offset(&channel->chan_hdr, queue) + \
- offsetof(struct signal_queue_header, FIELD), \
- &((sig_hdr)->FIELD), \
- sizeof((sig_hdr)->FIELD))
-
-static int sig_read_header(struct visorchannel *channel, u32 queue,
- struct signal_queue_header *sig_hdr)
-{
- if (channel->chan_hdr.ch_space_offset < sizeof(struct channel_header))
- return -EINVAL;
-
- /* Read the appropriate SIGNAL_QUEUE_HEADER into local memory. */
- return visorchannel_read(channel,
- sig_queue_offset(&channel->chan_hdr, queue),
- sig_hdr, sizeof(struct signal_queue_header));
-}
-
-static int sig_read_data(struct visorchannel *channel, u32 queue,
- struct signal_queue_header *sig_hdr, u32 slot,
- void *data)
-{
- int signal_data_offset = sig_data_offset(&channel->chan_hdr, queue,
- sig_hdr, slot);
-
- return visorchannel_read(channel, signal_data_offset,
- data, sig_hdr->signal_size);
-}
-
-static int sig_write_data(struct visorchannel *channel, u32 queue,
- struct signal_queue_header *sig_hdr, u32 slot,
- void *data)
-{
- int signal_data_offset = sig_data_offset(&channel->chan_hdr, queue,
- sig_hdr, slot);
-
- return visorchannel_write(channel, signal_data_offset,
- data, sig_hdr->signal_size);
-}
-
-static int signalremove_inner(struct visorchannel *channel, u32 queue,
- void *msg)
-{
- struct signal_queue_header sig_hdr;
- int error;
-
- error = sig_read_header(channel, queue, &sig_hdr);
- if (error)
- return error;
- /* No signals to remove; have caller try again. */
- if (sig_hdr.head == sig_hdr.tail)
- return -EAGAIN;
- sig_hdr.tail = (sig_hdr.tail + 1) % sig_hdr.max_slots;
- error = sig_read_data(channel, queue, &sig_hdr, sig_hdr.tail, msg);
- if (error)
- return error;
- sig_hdr.num_received++;
- /*
- * For each data field in SIGNAL_QUEUE_HEADER that was modified, update
- * host memory. Required for channel sync.
- */
- mb();
- error = SIG_WRITE_FIELD(channel, queue, &sig_hdr, tail);
- if (error)
- return error;
- error = SIG_WRITE_FIELD(channel, queue, &sig_hdr, num_received);
- if (error)
- return error;
- return 0;
-}
-
-/**
- * visorchannel_signalremove() - removes a message from the designated
- * channel/queue
- * @channel: the channel the message will be removed from
- * @queue: the queue the message will be removed from
- * @msg: the message to remove
- *
- * Return: integer error code indicating the status of the removal
- */
-int visorchannel_signalremove(struct visorchannel *channel, u32 queue,
- void *msg)
-{
- int rc;
- unsigned long flags;
-
- if (channel->needs_lock) {
- spin_lock_irqsave(&channel->remove_lock, flags);
- rc = signalremove_inner(channel, queue, msg);
- spin_unlock_irqrestore(&channel->remove_lock, flags);
- } else {
- rc = signalremove_inner(channel, queue, msg);
- }
-
- return rc;
-}
-EXPORT_SYMBOL_GPL(visorchannel_signalremove);
-
-static bool queue_empty(struct visorchannel *channel, u32 queue)
-{
- struct signal_queue_header sig_hdr;
-
- if (sig_read_header(channel, queue, &sig_hdr))
- return true;
- return (sig_hdr.head == sig_hdr.tail);
-}
-
-/**
- * visorchannel_signalempty() - checks if the designated channel/queue contains
- * any messages
- * @channel: the channel to query
- * @queue: the queue in the channel to query
- *
- * Return: boolean indicating whether any messages in the designated
- * channel/queue are present
- */
-bool visorchannel_signalempty(struct visorchannel *channel, u32 queue)
-{
- bool rc;
- unsigned long flags;
-
- if (!channel->needs_lock)
- return queue_empty(channel, queue);
- spin_lock_irqsave(&channel->remove_lock, flags);
- rc = queue_empty(channel, queue);
- spin_unlock_irqrestore(&channel->remove_lock, flags);
- return rc;
-}
-EXPORT_SYMBOL_GPL(visorchannel_signalempty);
-
-static int signalinsert_inner(struct visorchannel *channel, u32 queue,
- void *msg)
-{
- struct signal_queue_header sig_hdr;
- int err;
-
- err = sig_read_header(channel, queue, &sig_hdr);
- if (err)
- return err;
- sig_hdr.head = (sig_hdr.head + 1) % sig_hdr.max_slots;
- if (sig_hdr.head == sig_hdr.tail) {
- sig_hdr.num_overflows++;
- err = SIG_WRITE_FIELD(channel, queue, &sig_hdr, num_overflows);
- if (err)
- return err;
- return -EIO;
- }
- err = sig_write_data(channel, queue, &sig_hdr, sig_hdr.head, msg);
- if (err)
- return err;
- sig_hdr.num_sent++;
- /*
- * For each data field in SIGNAL_QUEUE_HEADER that was modified, update
- * host memory. Required for channel sync.
- */
- mb();
- err = SIG_WRITE_FIELD(channel, queue, &sig_hdr, head);
- if (err)
- return err;
- err = SIG_WRITE_FIELD(channel, queue, &sig_hdr, num_sent);
- if (err)
- return err;
- return 0;
-}
-
-/*
- * visorchannel_create() - creates the struct visorchannel abstraction for a
- * data area in memory, but does NOT modify this data
- * area
- * @physaddr: physical address of start of channel
- * @gfp: gfp_t to use when allocating memory for the data struct
- * @guid: GUID that identifies channel type;
- * @needs_lock: must specify true if you have multiple threads of execution
- * that will be calling visorchannel methods of this
- * visorchannel at the same time
- *
- * Return: pointer to visorchannel that was created if successful,
- * otherwise NULL
- */
-struct visorchannel *visorchannel_create(u64 physaddr, gfp_t gfp,
- const guid_t *guid, bool needs_lock)
-{
- struct visorchannel *channel;
- int err;
- size_t size = sizeof(struct channel_header);
-
- if (physaddr == 0)
- return NULL;
-
- channel = kzalloc(sizeof(*channel), gfp);
- if (!channel)
- return NULL;
- channel->needs_lock = needs_lock;
- spin_lock_init(&channel->insert_lock);
- spin_lock_init(&channel->remove_lock);
- /*
- * Video driver constains the efi framebuffer so it will get a conflict
- * resource when requesting its full mem region. Since we are only
- * using the efi framebuffer for video we can ignore this. Remember that
- * we haven't requested it so we don't try to release later on.
- */
- channel->requested = request_mem_region(physaddr, size, VISOR_DRV_NAME);
- if (!channel->requested && !guid_equal(guid, &visor_video_guid))
- /* we only care about errors if this is not the video channel */
- goto err_destroy_channel;
- channel->mapped = memremap(physaddr, size, MEMREMAP_WB);
- if (!channel->mapped) {
- release_mem_region(physaddr, size);
- goto err_destroy_channel;
- }
- channel->physaddr = physaddr;
- channel->nbytes = size;
- err = visorchannel_read(channel, 0, &channel->chan_hdr, size);
- if (err)
- goto err_destroy_channel;
- size = (ulong)channel->chan_hdr.size;
- memunmap(channel->mapped);
- if (channel->requested)
- release_mem_region(channel->physaddr, channel->nbytes);
- channel->mapped = NULL;
- channel->requested = request_mem_region(channel->physaddr, size,
- VISOR_DRV_NAME);
- if (!channel->requested && !guid_equal(guid, &visor_video_guid))
- /* we only care about errors if this is not the video channel */
- goto err_destroy_channel;
- channel->mapped = memremap(channel->physaddr, size, MEMREMAP_WB);
- if (!channel->mapped) {
- release_mem_region(channel->physaddr, size);
- goto err_destroy_channel;
- }
- channel->nbytes = size;
- guid_copy(&channel->guid, guid);
- return channel;
-
-err_destroy_channel:
- visorchannel_destroy(channel);
- return NULL;
-}
-
-/**
- * visorchannel_signalinsert() - inserts a message into the designated
- * channel/queue
- * @channel: the channel the message will be added to
- * @queue: the queue the message will be added to
- * @msg: the message to insert
- *
- * Return: integer error code indicating the status of the insertion
- */
-int visorchannel_signalinsert(struct visorchannel *channel, u32 queue,
- void *msg)
-{
- int rc;
- unsigned long flags;
-
- if (channel->needs_lock) {
- spin_lock_irqsave(&channel->insert_lock, flags);
- rc = signalinsert_inner(channel, queue, msg);
- spin_unlock_irqrestore(&channel->insert_lock, flags);
- } else {
- rc = signalinsert_inner(channel, queue, msg);
- }
-
- return rc;
-}
-EXPORT_SYMBOL_GPL(visorchannel_signalinsert);
diff --git a/drivers/visorbus/visorchipset.c b/drivers/visorbus/visorchipset.c
deleted file mode 100644
index 5668cad86e37..000000000000
--- a/drivers/visorbus/visorchipset.c
+++ /dev/null
@@ -1,1691 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2010 - 2015 UNISYS CORPORATION
- * All rights reserved.
- */
-
-#include <linux/acpi.h>
-#include <linux/crash_dump.h>
-#include <linux/visorbus.h>
-
-#include "visorbus_private.h"
-
-/* {72120008-4AAB-11DC-8530-444553544200} */
-#define VISOR_SIOVM_GUID GUID_INIT(0x72120008, 0x4AAB, 0x11DC, 0x85, 0x30, \
- 0x44, 0x45, 0x53, 0x54, 0x42, 0x00)
-
-static const guid_t visor_vhba_channel_guid = VISOR_VHBA_CHANNEL_GUID;
-static const guid_t visor_siovm_guid = VISOR_SIOVM_GUID;
-static const guid_t visor_controlvm_channel_guid = VISOR_CONTROLVM_CHANNEL_GUID;
-
-#define POLLJIFFIES_CONTROLVM_FAST 1
-#define POLLJIFFIES_CONTROLVM_SLOW 100
-
-#define MAX_CONTROLVM_PAYLOAD_BYTES (1024 * 128)
-
-#define UNISYS_VISOR_LEAF_ID 0x40000000
-
-/* The s-Par leaf ID returns "UnisysSpar64" encoded across ebx, ecx, edx */
-#define UNISYS_VISOR_ID_EBX 0x73696e55
-#define UNISYS_VISOR_ID_ECX 0x70537379
-#define UNISYS_VISOR_ID_EDX 0x34367261
-
-/*
- * When the controlvm channel is idle for at least MIN_IDLE_SECONDS, we switch
- * to slow polling mode. As soon as we get a controlvm message, we switch back
- * to fast polling mode.
- */
-#define MIN_IDLE_SECONDS 10
-
-struct parser_context {
- unsigned long allocbytes;
- unsigned long param_bytes;
- u8 *curr;
- unsigned long bytes_remaining;
- bool byte_stream;
- struct visor_controlvm_parameters_header data;
-};
-
-/* VMCALL_CONTROLVM_ADDR: Used by all guests, not just IO. */
-#define VMCALL_CONTROLVM_ADDR 0x0501
-
-enum vmcall_result {
- VMCALL_RESULT_SUCCESS = 0,
- VMCALL_RESULT_INVALID_PARAM = 1,
- VMCALL_RESULT_DATA_UNAVAILABLE = 2,
- VMCALL_RESULT_FAILURE_UNAVAILABLE = 3,
- VMCALL_RESULT_DEVICE_ERROR = 4,
- VMCALL_RESULT_DEVICE_NOT_READY = 5
-};
-
-/*
- * struct vmcall_io_controlvm_addr_params - Structure for IO VMCALLS. Has
- * parameters to VMCALL_CONTROLVM_ADDR
- * interface.
- * @address: The Guest-relative physical address of the ControlVm channel.
- * This VMCall fills this in with the appropriate address.
- * Contents provided by this VMCALL (OUT).
- * @channel_bytes: The size of the ControlVm channel in bytes This VMCall fills
- * this in with the appropriate address. Contents provided by
- * this VMCALL (OUT).
- * @unused: Unused Bytes in the 64-Bit Aligned Struct.
- */
-struct vmcall_io_controlvm_addr_params {
- u64 address;
- u32 channel_bytes;
- u8 unused[4];
-} __packed;
-
-struct visorchipset_device {
- struct acpi_device *acpi_device;
- unsigned long poll_jiffies;
- /* when we got our last controlvm message */
- unsigned long most_recent_message_jiffies;
- struct delayed_work periodic_controlvm_work;
- struct visorchannel *controlvm_channel;
- unsigned long controlvm_payload_bytes_buffered;
- /*
- * The following variables are used to handle the scenario where we are
- * unable to offload the payload from a controlvm message due to memory
- * requirements. In this scenario, we simply stash the controlvm
- * message, then attempt to process it again the next time
- * controlvm_periodic_work() runs.
- */
- struct controlvm_message controlvm_pending_msg;
- bool controlvm_pending_msg_valid;
- struct vmcall_io_controlvm_addr_params controlvm_params;
-};
-
-static struct visorchipset_device *chipset_dev;
-
-struct parahotplug_request {
- struct list_head list;
- int id;
- unsigned long expiration;
- struct controlvm_message msg;
-};
-
-/* prototypes for attributes */
-static ssize_t toolaction_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- u8 tool_action = 0;
- int err;
-
- err = visorchannel_read(chipset_dev->controlvm_channel,
- offsetof(struct visor_controlvm_channel,
- tool_action),
- &tool_action, sizeof(u8));
- if (err)
- return err;
- return sprintf(buf, "%u\n", tool_action);
-}
-
-static ssize_t toolaction_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- u8 tool_action;
- int err;
-
- if (kstrtou8(buf, 10, &tool_action))
- return -EINVAL;
- err = visorchannel_write(chipset_dev->controlvm_channel,
- offsetof(struct visor_controlvm_channel,
- tool_action),
- &tool_action, sizeof(u8));
- if (err)
- return err;
- return count;
-}
-static DEVICE_ATTR_RW(toolaction);
-
-static ssize_t boottotool_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct efi_visor_indication efi_visor_indication;
- int err;
-
- err = visorchannel_read(chipset_dev->controlvm_channel,
- offsetof(struct visor_controlvm_channel,
- efi_visor_ind),
- &efi_visor_indication,
- sizeof(struct efi_visor_indication));
- if (err)
- return err;
- return sprintf(buf, "%u\n", efi_visor_indication.boot_to_tool);
-}
-
-static ssize_t boottotool_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- int val, err;
- struct efi_visor_indication efi_visor_indication;
-
- if (kstrtoint(buf, 10, &val))
- return -EINVAL;
- efi_visor_indication.boot_to_tool = val;
- err = visorchannel_write(chipset_dev->controlvm_channel,
- offsetof(struct visor_controlvm_channel,
- efi_visor_ind),
- &(efi_visor_indication),
- sizeof(struct efi_visor_indication));
- if (err)
- return err;
- return count;
-}
-static DEVICE_ATTR_RW(boottotool);
-
-static ssize_t error_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- u32 error = 0;
- int err;
-
- err = visorchannel_read(chipset_dev->controlvm_channel,
- offsetof(struct visor_controlvm_channel,
- installation_error),
- &error, sizeof(u32));
- if (err)
- return err;
- return sprintf(buf, "%u\n", error);
-}
-
-static ssize_t error_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- u32 error;
- int err;
-
- if (kstrtou32(buf, 10, &error))
- return -EINVAL;
- err = visorchannel_write(chipset_dev->controlvm_channel,
- offsetof(struct visor_controlvm_channel,
- installation_error),
- &error, sizeof(u32));
- if (err)
- return err;
- return count;
-}
-static DEVICE_ATTR_RW(error);
-
-static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- u32 text_id = 0;
- int err;
-
- err = visorchannel_read(chipset_dev->controlvm_channel,
- offsetof(struct visor_controlvm_channel,
- installation_text_id),
- &text_id, sizeof(u32));
- if (err)
- return err;
- return sprintf(buf, "%u\n", text_id);
-}
-
-static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- u32 text_id;
- int err;
-
- if (kstrtou32(buf, 10, &text_id))
- return -EINVAL;
- err = visorchannel_write(chipset_dev->controlvm_channel,
- offsetof(struct visor_controlvm_channel,
- installation_text_id),
- &text_id, sizeof(u32));
- if (err)
- return err;
- return count;
-}
-static DEVICE_ATTR_RW(textid);
-
-static ssize_t remaining_steps_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- u16 remaining_steps = 0;
- int err;
-
- err = visorchannel_read(chipset_dev->controlvm_channel,
- offsetof(struct visor_controlvm_channel,
- installation_remaining_steps),
- &remaining_steps, sizeof(u16));
- if (err)
- return err;
- return sprintf(buf, "%hu\n", remaining_steps);
-}
-
-static ssize_t remaining_steps_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- u16 remaining_steps;
- int err;
-
- if (kstrtou16(buf, 10, &remaining_steps))
- return -EINVAL;
- err = visorchannel_write(chipset_dev->controlvm_channel,
- offsetof(struct visor_controlvm_channel,
- installation_remaining_steps),
- &remaining_steps, sizeof(u16));
- if (err)
- return err;
- return count;
-}
-static DEVICE_ATTR_RW(remaining_steps);
-
-static void controlvm_init_response(struct controlvm_message *msg,
- struct controlvm_message_header *msg_hdr,
- int response)
-{
- memset(msg, 0, sizeof(struct controlvm_message));
- memcpy(&msg->hdr, msg_hdr, sizeof(struct controlvm_message_header));
- msg->hdr.payload_bytes = 0;
- msg->hdr.payload_vm_offset = 0;
- msg->hdr.payload_max_bytes = 0;
- if (response < 0) {
- msg->hdr.flags.failed = 1;
- msg->hdr.completion_status = (u32)(-response);
- }
-}
-
-static int controlvm_respond_chipset_init(
- struct controlvm_message_header *msg_hdr,
- int response,
- enum visor_chipset_feature features)
-{
- struct controlvm_message outmsg;
-
- controlvm_init_response(&outmsg, msg_hdr, response);
- outmsg.cmd.init_chipset.features = features;
- return visorchannel_signalinsert(chipset_dev->controlvm_channel,
- CONTROLVM_QUEUE_REQUEST, &outmsg);
-}
-
-static int chipset_init(struct controlvm_message *inmsg)
-{
- static int chipset_inited;
- enum visor_chipset_feature features = 0;
- int rc = CONTROLVM_RESP_SUCCESS;
- int res = 0;
-
- if (chipset_inited) {
- rc = -CONTROLVM_RESP_ALREADY_DONE;
- res = -EIO;
- goto out_respond;
- }
- chipset_inited = 1;
- /*
- * Set features to indicate we support parahotplug (if Command also
- * supports it). Set the "reply" bit so Command knows this is a
- * features-aware driver.
- */
- features = inmsg->cmd.init_chipset.features &
- VISOR_CHIPSET_FEATURE_PARA_HOTPLUG;
- features |= VISOR_CHIPSET_FEATURE_REPLY;
-
-out_respond:
- if (inmsg->hdr.flags.response_expected)
- res = controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
-
- return res;
-}
-
-static int controlvm_respond(struct controlvm_message_header *msg_hdr,
- int response, struct visor_segment_state *state)
-{
- struct controlvm_message outmsg;
-
- controlvm_init_response(&outmsg, msg_hdr, response);
- if (outmsg.hdr.flags.test_message == 1)
- return -EINVAL;
- if (state) {
- outmsg.cmd.device_change_state.state = *state;
- outmsg.cmd.device_change_state.flags.phys_device = 1;
- }
- return visorchannel_signalinsert(chipset_dev->controlvm_channel,
- CONTROLVM_QUEUE_REQUEST, &outmsg);
-}
-
-enum crash_obj_type {
- CRASH_DEV,
- CRASH_BUS,
-};
-
-static int save_crash_message(struct controlvm_message *msg,
- enum crash_obj_type cr_type)
-{
- u32 local_crash_msg_offset;
- u16 local_crash_msg_count;
- int err;
-
- err = visorchannel_read(chipset_dev->controlvm_channel,
- offsetof(struct visor_controlvm_channel,
- saved_crash_message_count),
- &local_crash_msg_count, sizeof(u16));
- if (err) {
- dev_err(&chipset_dev->acpi_device->dev,
- "failed to read message count\n");
- return err;
- }
- if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
- dev_err(&chipset_dev->acpi_device->dev,
- "invalid number of messages\n");
- return -EIO;
- }
- err = visorchannel_read(chipset_dev->controlvm_channel,
- offsetof(struct visor_controlvm_channel,
- saved_crash_message_offset),
- &local_crash_msg_offset, sizeof(u32));
- if (err) {
- dev_err(&chipset_dev->acpi_device->dev,
- "failed to read offset\n");
- return err;
- }
- switch (cr_type) {
- case CRASH_DEV:
- local_crash_msg_offset += sizeof(struct controlvm_message);
- err = visorchannel_write(chipset_dev->controlvm_channel,
- local_crash_msg_offset, msg,
- sizeof(struct controlvm_message));
- if (err) {
- dev_err(&chipset_dev->acpi_device->dev,
- "failed to write dev msg\n");
- return err;
- }
- break;
- case CRASH_BUS:
- err = visorchannel_write(chipset_dev->controlvm_channel,
- local_crash_msg_offset, msg,
- sizeof(struct controlvm_message));
- if (err) {
- dev_err(&chipset_dev->acpi_device->dev,
- "failed to write bus msg\n");
- return err;
- }
- break;
- default:
- dev_err(&chipset_dev->acpi_device->dev,
- "Invalid crash_obj_type\n");
- break;
- }
- return 0;
-}
-
-static int controlvm_responder(enum controlvm_id cmd_id,
- struct controlvm_message_header *pending_msg_hdr,
- int response)
-{
- if (pending_msg_hdr->id != (u32)cmd_id)
- return -EINVAL;
-
- return controlvm_respond(pending_msg_hdr, response, NULL);
-}
-
-static int device_changestate_responder(enum controlvm_id cmd_id,
- struct visor_device *p, int response,
- struct visor_segment_state state)
-{
- struct controlvm_message outmsg;
-
- if (p->pending_msg_hdr->id != cmd_id)
- return -EINVAL;
-
- controlvm_init_response(&outmsg, p->pending_msg_hdr, response);
- outmsg.cmd.device_change_state.bus_no = p->chipset_bus_no;
- outmsg.cmd.device_change_state.dev_no = p->chipset_dev_no;
- outmsg.cmd.device_change_state.state = state;
- return visorchannel_signalinsert(chipset_dev->controlvm_channel,
- CONTROLVM_QUEUE_REQUEST, &outmsg);
-}
-
-static int visorbus_create(struct controlvm_message *inmsg)
-{
- struct controlvm_message_packet *cmd = &inmsg->cmd;
- struct controlvm_message_header *pmsg_hdr;
- u32 bus_no = cmd->create_bus.bus_no;
- struct visor_device *bus_info;
- struct visorchannel *visorchannel;
- int err;
-
- bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
- if (bus_info && bus_info->state.created == 1) {
- dev_err(&chipset_dev->acpi_device->dev,
- "failed %s: already exists\n", __func__);
- err = -EEXIST;
- goto err_respond;
- }
- bus_info = kzalloc(sizeof(*bus_info), GFP_KERNEL);
- if (!bus_info) {
- err = -ENOMEM;
- goto err_respond;
- }
- INIT_LIST_HEAD(&bus_info->list_all);
- bus_info->chipset_bus_no = bus_no;
- bus_info->chipset_dev_no = BUS_ROOT_DEVICE;
- if (guid_equal(&cmd->create_bus.bus_inst_guid, &visor_siovm_guid)) {
- err = save_crash_message(inmsg, CRASH_BUS);
- if (err)
- goto err_free_bus_info;
- }
- if (inmsg->hdr.flags.response_expected == 1) {
- pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
- if (!pmsg_hdr) {
- err = -ENOMEM;
- goto err_free_bus_info;
- }
- memcpy(pmsg_hdr, &inmsg->hdr,
- sizeof(struct controlvm_message_header));
- bus_info->pending_msg_hdr = pmsg_hdr;
- }
- visorchannel = visorchannel_create(cmd->create_bus.channel_addr,
- GFP_KERNEL,
- &cmd->create_bus.bus_data_type_guid,
- false);
- if (!visorchannel) {
- err = -ENOMEM;
- goto err_free_pending_msg;
- }
- bus_info->visorchannel = visorchannel;
- /* Response will be handled by visorbus_create_instance on success */
- err = visorbus_create_instance(bus_info);
- if (err)
- goto err_destroy_channel;
- return 0;
-
-err_destroy_channel:
- visorchannel_destroy(visorchannel);
-
-err_free_pending_msg:
- kfree(bus_info->pending_msg_hdr);
-
-err_free_bus_info:
- kfree(bus_info);
-
-err_respond:
- if (inmsg->hdr.flags.response_expected == 1)
- controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
- return err;
-}
-
-static int visorbus_destroy(struct controlvm_message *inmsg)
-{
- struct controlvm_message_header *pmsg_hdr;
- u32 bus_no = inmsg->cmd.destroy_bus.bus_no;
- struct visor_device *bus_info;
- int err;
-
- bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
- if (!bus_info) {
- err = -ENODEV;
- goto err_respond;
- }
- if (bus_info->state.created == 0) {
- err = -ENOENT;
- goto err_respond;
- }
- if (bus_info->pending_msg_hdr) {
- /* only non-NULL if dev is still waiting on a response */
- err = -EEXIST;
- goto err_respond;
- }
- if (inmsg->hdr.flags.response_expected == 1) {
- pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
- if (!pmsg_hdr) {
- err = -ENOMEM;
- goto err_respond;
- }
- memcpy(pmsg_hdr, &inmsg->hdr,
- sizeof(struct controlvm_message_header));
- bus_info->pending_msg_hdr = pmsg_hdr;
- }
- /* Response will be handled by visorbus_remove_instance */
- visorbus_remove_instance(bus_info);
- return 0;
-
-err_respond:
- if (inmsg->hdr.flags.response_expected == 1)
- controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
- return err;
-}
-
-static const guid_t *parser_id_get(struct parser_context *ctx)
-{
- return &ctx->data.id;
-}
-
-static void *parser_string_get(u8 *pscan, int nscan)
-{
- int value_length;
- void *value;
-
- if (nscan == 0)
- return NULL;
-
- value_length = strnlen(pscan, nscan);
- value = kzalloc(value_length + 1, GFP_KERNEL);
- if (!value)
- return NULL;
- if (value_length > 0)
- memcpy(value, pscan, value_length);
- return value;
-}
-
-static void *parser_name_get(struct parser_context *ctx)
-{
- struct visor_controlvm_parameters_header *phdr;
-
- phdr = &ctx->data;
- if ((unsigned long)phdr->name_offset +
- (unsigned long)phdr->name_length > ctx->param_bytes)
- return NULL;
- ctx->curr = (char *)&phdr + phdr->name_offset;
- ctx->bytes_remaining = phdr->name_length;
- return parser_string_get(ctx->curr, phdr->name_length);
-}
-
-static int visorbus_configure(struct controlvm_message *inmsg,
- struct parser_context *parser_ctx)
-{
- struct controlvm_message_packet *cmd = &inmsg->cmd;
- u32 bus_no;
- struct visor_device *bus_info;
- int err = 0;
-
- bus_no = cmd->configure_bus.bus_no;
- bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
- if (!bus_info) {
- err = -EINVAL;
- goto err_respond;
- }
- if (bus_info->state.created == 0) {
- err = -EINVAL;
- goto err_respond;
- }
- if (bus_info->pending_msg_hdr) {
- err = -EIO;
- goto err_respond;
- }
- err = visorchannel_set_clientpartition(bus_info->visorchannel,
- cmd->configure_bus.guest_handle);
- if (err)
- goto err_respond;
- if (parser_ctx) {
- const guid_t *partition_guid = parser_id_get(parser_ctx);
-
- guid_copy(&bus_info->partition_guid, partition_guid);
- bus_info->name = parser_name_get(parser_ctx);
- }
- if (inmsg->hdr.flags.response_expected == 1)
- controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
- return 0;
-
-err_respond:
- dev_err(&chipset_dev->acpi_device->dev,
- "%s exited with err: %d\n", __func__, err);
- if (inmsg->hdr.flags.response_expected == 1)
- controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
- return err;
-}
-
-static int visorbus_device_create(struct controlvm_message *inmsg)
-{
- struct controlvm_message_packet *cmd = &inmsg->cmd;
- struct controlvm_message_header *pmsg_hdr;
- u32 bus_no = cmd->create_device.bus_no;
- u32 dev_no = cmd->create_device.dev_no;
- struct visor_device *dev_info;
- struct visor_device *bus_info;
- struct visorchannel *visorchannel;
- int err;
-
- bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
- if (!bus_info) {
- dev_err(&chipset_dev->acpi_device->dev,
- "failed to get bus by id: %d\n", bus_no);
- err = -ENODEV;
- goto err_respond;
- }
- if (bus_info->state.created == 0) {
- dev_err(&chipset_dev->acpi_device->dev,
- "bus not created, id: %d\n", bus_no);
- err = -EINVAL;
- goto err_respond;
- }
- dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
- if (dev_info && dev_info->state.created == 1) {
- dev_err(&chipset_dev->acpi_device->dev,
- "failed to get bus by id: %d/%d\n", bus_no, dev_no);
- err = -EEXIST;
- goto err_respond;
- }
-
- dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
- if (!dev_info) {
- err = -ENOMEM;
- goto err_respond;
- }
- dev_info->chipset_bus_no = bus_no;
- dev_info->chipset_dev_no = dev_no;
- guid_copy(&dev_info->inst, &cmd->create_device.dev_inst_guid);
- dev_info->device.parent = &bus_info->device;
- visorchannel = visorchannel_create(cmd->create_device.channel_addr,
- GFP_KERNEL,
- &cmd->create_device.data_type_guid,
- true);
- if (!visorchannel) {
- dev_err(&chipset_dev->acpi_device->dev,
- "failed to create visorchannel: %d/%d\n",
- bus_no, dev_no);
- err = -ENOMEM;
- goto err_free_dev_info;
- }
- dev_info->visorchannel = visorchannel;
- guid_copy(&dev_info->channel_type_guid,
- &cmd->create_device.data_type_guid);
- if (guid_equal(&cmd->create_device.data_type_guid,
- &visor_vhba_channel_guid)) {
- err = save_crash_message(inmsg, CRASH_DEV);
- if (err)
- goto err_destroy_visorchannel;
- }
- if (inmsg->hdr.flags.response_expected == 1) {
- pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
- if (!pmsg_hdr) {
- err = -ENOMEM;
- goto err_destroy_visorchannel;
- }
- memcpy(pmsg_hdr, &inmsg->hdr,
- sizeof(struct controlvm_message_header));
- dev_info->pending_msg_hdr = pmsg_hdr;
- }
- /* create_visor_device will send response */
- err = create_visor_device(dev_info);
- if (err)
- goto err_destroy_visorchannel;
-
- return 0;
-
-err_destroy_visorchannel:
- visorchannel_destroy(visorchannel);
-
-err_free_dev_info:
- kfree(dev_info);
-
-err_respond:
- if (inmsg->hdr.flags.response_expected == 1)
- controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
- return err;
-}
-
-static int visorbus_device_changestate(struct controlvm_message *inmsg)
-{
- struct controlvm_message_packet *cmd = &inmsg->cmd;
- struct controlvm_message_header *pmsg_hdr;
- u32 bus_no = cmd->device_change_state.bus_no;
- u32 dev_no = cmd->device_change_state.dev_no;
- struct visor_segment_state state = cmd->device_change_state.state;
- struct visor_device *dev_info;
- int err = 0;
-
- dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
- if (!dev_info) {
- err = -ENODEV;
- goto err_respond;
- }
- if (dev_info->state.created == 0) {
- err = -EINVAL;
- goto err_respond;
- }
- if (dev_info->pending_msg_hdr) {
- /* only non-NULL if dev is still waiting on a response */
- err = -EIO;
- goto err_respond;
- }
-
- if (inmsg->hdr.flags.response_expected == 1) {
- pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
- if (!pmsg_hdr) {
- err = -ENOMEM;
- goto err_respond;
- }
- memcpy(pmsg_hdr, &inmsg->hdr,
- sizeof(struct controlvm_message_header));
- dev_info->pending_msg_hdr = pmsg_hdr;
- }
- if (state.alive == segment_state_running.alive &&
- state.operating == segment_state_running.operating)
- /* Response will be sent from visorchipset_device_resume */
- err = visorchipset_device_resume(dev_info);
- /* ServerNotReady / ServerLost / SegmentStateStandby */
- else if (state.alive == segment_state_standby.alive &&
- state.operating == segment_state_standby.operating)
- /*
- * technically this is standby case where server is lost.
- * Response will be sent from visorchipset_device_pause.
- */
- err = visorchipset_device_pause(dev_info);
- if (err)
- goto err_respond;
- return 0;
-
-err_respond:
- dev_err(&chipset_dev->acpi_device->dev, "failed: %d\n", err);
- if (inmsg->hdr.flags.response_expected == 1)
- controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
- return err;
-}
-
-static int visorbus_device_destroy(struct controlvm_message *inmsg)
-{
- struct controlvm_message_packet *cmd = &inmsg->cmd;
- struct controlvm_message_header *pmsg_hdr;
- u32 bus_no = cmd->destroy_device.bus_no;
- u32 dev_no = cmd->destroy_device.dev_no;
- struct visor_device *dev_info;
- int err;
-
- dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
- if (!dev_info) {
- err = -ENODEV;
- goto err_respond;
- }
- if (dev_info->state.created == 0) {
- err = -EINVAL;
- goto err_respond;
- }
- if (dev_info->pending_msg_hdr) {
- /* only non-NULL if dev is still waiting on a response */
- err = -EIO;
- goto err_respond;
- }
- if (inmsg->hdr.flags.response_expected == 1) {
- pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
- if (!pmsg_hdr) {
- err = -ENOMEM;
- goto err_respond;
- }
-
- memcpy(pmsg_hdr, &inmsg->hdr,
- sizeof(struct controlvm_message_header));
- dev_info->pending_msg_hdr = pmsg_hdr;
- }
- kfree(dev_info->name);
- remove_visor_device(dev_info);
- return 0;
-
-err_respond:
- if (inmsg->hdr.flags.response_expected == 1)
- controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
- return err;
-}
-
-/*
- * The general parahotplug flow works as follows. The visorchipset receives
- * a DEVICE_CHANGESTATE message from Command specifying a physical device
- * to enable or disable. The CONTROLVM message handler calls
- * parahotplug_process_message, which then adds the message to a global list
- * and kicks off a udev event which causes a user level script to enable or
- * disable the specified device. The udev script then writes to
- * /sys/devices/platform/visorchipset/parahotplug, which causes the
- * parahotplug store functions to get called, at which point the
- * appropriate CONTROLVM message is retrieved from the list and responded to.
- */
-
-#define PARAHOTPLUG_TIMEOUT_MS 2000
-
-/*
- * parahotplug_next_id() - generate unique int to match an outstanding
- * CONTROLVM message with a udev script /sys
- * response
- *
- * Return: a unique integer value
- */
-static int parahotplug_next_id(void)
-{
- static atomic_t id = ATOMIC_INIT(0);
-
- return atomic_inc_return(&id);
-}
-
-/*
- * parahotplug_next_expiration() - returns the time (in jiffies) when a
- * CONTROLVM message on the list should expire
- * -- PARAHOTPLUG_TIMEOUT_MS in the future
- *
- * Return: expected expiration time (in jiffies)
- */
-static unsigned long parahotplug_next_expiration(void)
-{
- return jiffies + msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS);
-}
-
-/*
- * parahotplug_request_create() - create a parahotplug_request, which is
- * basically a wrapper for a CONTROLVM_MESSAGE
- * that we can stick on a list
- * @msg: the message to insert in the request
- *
- * Return: the request containing the provided message
- */
-static struct parahotplug_request *parahotplug_request_create(
- struct controlvm_message *msg)
-{
- struct parahotplug_request *req;
-
- req = kmalloc(sizeof(*req), GFP_KERNEL);
- if (!req)
- return NULL;
- req->id = parahotplug_next_id();
- req->expiration = parahotplug_next_expiration();
- req->msg = *msg;
- return req;
-}
-
-/*
- * parahotplug_request_destroy() - free a parahotplug_request
- * @req: the request to deallocate
- */
-static void parahotplug_request_destroy(struct parahotplug_request *req)
-{
- kfree(req);
-}
-
-static LIST_HEAD(parahotplug_request_list);
-/* lock for above */
-static DEFINE_SPINLOCK(parahotplug_request_list_lock);
-
-/*
- * parahotplug_request_complete() - mark request as complete
- * @id: the id of the request
- * @active: indicates whether the request is assigned to active partition
- *
- * Called from the /sys handler, which means the user script has
- * finished the enable/disable. Find the matching identifier, and
- * respond to the CONTROLVM message with success.
- *
- * Return: 0 on success or -EINVAL on failure
- */
-static int parahotplug_request_complete(int id, u16 active)
-{
- struct list_head *pos;
- struct list_head *tmp;
- struct parahotplug_request *req;
-
- spin_lock(&parahotplug_request_list_lock);
- /* Look for a request matching "id". */
- list_for_each_safe(pos, tmp, &parahotplug_request_list) {
- req = list_entry(pos, struct parahotplug_request, list);
- if (req->id == id) {
- /*
- * Found a match. Remove it from the list and
- * respond.
- */
- list_del(pos);
- spin_unlock(&parahotplug_request_list_lock);
- req->msg.cmd.device_change_state.state.active = active;
- if (req->msg.hdr.flags.response_expected)
- controlvm_respond(
- &req->msg.hdr, CONTROLVM_RESP_SUCCESS,
- &req->msg.cmd.device_change_state.state);
- parahotplug_request_destroy(req);
- return 0;
- }
- }
- spin_unlock(&parahotplug_request_list_lock);
- return -EINVAL;
-}
-
-/*
- * devicedisabled_store() - disables the hotplug device
- * @dev: sysfs interface variable not utilized in this function
- * @attr: sysfs interface variable not utilized in this function
- * @buf: buffer containing the device id
- * @count: the size of the buffer
- *
- * The parahotplug/devicedisabled interface gets called by our support script
- * when an SR-IOV device has been shut down. The ID is passed to the script
- * and then passed back when the device has been removed.
- *
- * Return: the size of the buffer for success or negative for error
- */
-static ssize_t devicedisabled_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- unsigned int id;
- int err;
-
- if (kstrtouint(buf, 10, &id))
- return -EINVAL;
- err = parahotplug_request_complete(id, 0);
- if (err < 0)
- return err;
- return count;
-}
-static DEVICE_ATTR_WO(devicedisabled);
-
-/*
- * deviceenabled_store() - enables the hotplug device
- * @dev: sysfs interface variable not utilized in this function
- * @attr: sysfs interface variable not utilized in this function
- * @buf: buffer containing the device id
- * @count: the size of the buffer
- *
- * The parahotplug/deviceenabled interface gets called by our support script
- * when an SR-IOV device has been recovered. The ID is passed to the script
- * and then passed back when the device has been brought back up.
- *
- * Return: the size of the buffer for success or negative for error
- */
-static ssize_t deviceenabled_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- unsigned int id;
-
- if (kstrtouint(buf, 10, &id))
- return -EINVAL;
- parahotplug_request_complete(id, 1);
- return count;
-}
-static DEVICE_ATTR_WO(deviceenabled);
-
-static struct attribute *visorchipset_install_attrs[] = {
- &dev_attr_toolaction.attr,
- &dev_attr_boottotool.attr,
- &dev_attr_error.attr,
- &dev_attr_textid.attr,
- &dev_attr_remaining_steps.attr,
- NULL
-};
-
-static const struct attribute_group visorchipset_install_group = {
- .name = "install",
- .attrs = visorchipset_install_attrs
-};
-
-static struct attribute *visorchipset_parahotplug_attrs[] = {
- &dev_attr_devicedisabled.attr,
- &dev_attr_deviceenabled.attr,
- NULL
-};
-
-static const struct attribute_group visorchipset_parahotplug_group = {
- .name = "parahotplug",
- .attrs = visorchipset_parahotplug_attrs
-};
-
-static const struct attribute_group *visorchipset_dev_groups[] = {
- &visorchipset_install_group,
- &visorchipset_parahotplug_group,
- NULL
-};
-
-/*
- * parahotplug_request_kickoff() - initiate parahotplug request
- * @req: the request to initiate
- *
- * Cause uevent to run the user level script to do the disable/enable specified
- * in the parahotplug_request.
- */
-static int parahotplug_request_kickoff(struct parahotplug_request *req)
-{
- struct controlvm_message_packet *cmd = &req->msg.cmd;
- char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40],
- env_func[40];
- char *envp[] = { env_cmd, env_id, env_state, env_bus, env_dev,
- env_func, NULL
- };
-
- sprintf(env_cmd, "VISOR_PARAHOTPLUG=1");
- sprintf(env_id, "VISOR_PARAHOTPLUG_ID=%d", req->id);
- sprintf(env_state, "VISOR_PARAHOTPLUG_STATE=%d",
- cmd->device_change_state.state.active);
- sprintf(env_bus, "VISOR_PARAHOTPLUG_BUS=%d",
- cmd->device_change_state.bus_no);
- sprintf(env_dev, "VISOR_PARAHOTPLUG_DEVICE=%d",
- cmd->device_change_state.dev_no >> 3);
- sprintf(env_func, "VISOR_PARAHOTPLUG_FUNCTION=%d",
- cmd->device_change_state.dev_no & 0x7);
- return kobject_uevent_env(&chipset_dev->acpi_device->dev.kobj,
- KOBJ_CHANGE, envp);
-}
-
-/*
- * parahotplug_process_message() - enables or disables a PCI device by kicking
- * off a udev script
- * @inmsg: the message indicating whether to enable or disable
- */
-static int parahotplug_process_message(struct controlvm_message *inmsg)
-{
- struct parahotplug_request *req;
- int err;
-
- req = parahotplug_request_create(inmsg);
- if (!req)
- return -ENOMEM;
- /*
- * For enable messages, just respond with success right away, we don't
- * need to wait to see if the enable was successful.
- */
- if (inmsg->cmd.device_change_state.state.active) {
- err = parahotplug_request_kickoff(req);
- if (err)
- goto err_respond;
- controlvm_respond(&inmsg->hdr, CONTROLVM_RESP_SUCCESS,
- &inmsg->cmd.device_change_state.state);
- parahotplug_request_destroy(req);
- return 0;
- }
- /*
- * For disable messages, add the request to the request list before
- * kicking off the udev script. It won't get responded to until the
- * script has indicated it's done.
- */
- spin_lock(&parahotplug_request_list_lock);
- list_add_tail(&req->list, &parahotplug_request_list);
- spin_unlock(&parahotplug_request_list_lock);
- err = parahotplug_request_kickoff(req);
- if (err)
- goto err_respond;
- return 0;
-
-err_respond:
- controlvm_respond(&inmsg->hdr, err,
- &inmsg->cmd.device_change_state.state);
- return err;
-}
-
-/*
- * chipset_ready_uevent() - sends chipset_ready action
- *
- * Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
- *
- * Return: 0 on success, negative on failure
- */
-static int chipset_ready_uevent(struct controlvm_message_header *msg_hdr)
-{
- int res;
-
- res = kobject_uevent(&chipset_dev->acpi_device->dev.kobj, KOBJ_ONLINE);
- if (msg_hdr->flags.response_expected)
- controlvm_respond(msg_hdr, res, NULL);
- return res;
-}
-
-/*
- * chipset_selftest_uevent() - sends chipset_selftest action
- *
- * Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
- *
- * Return: 0 on success, negative on failure
- */
-static int chipset_selftest_uevent(struct controlvm_message_header *msg_hdr)
-{
- char env_selftest[20];
- char *envp[] = { env_selftest, NULL };
- int res;
-
- sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1);
- res = kobject_uevent_env(&chipset_dev->acpi_device->dev.kobj,
- KOBJ_CHANGE, envp);
- if (msg_hdr->flags.response_expected)
- controlvm_respond(msg_hdr, res, NULL);
- return res;
-}
-
-/*
- * chipset_notready_uevent() - sends chipset_notready action
- *
- * Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
- *
- * Return: 0 on success, negative on failure
- */
-static int chipset_notready_uevent(struct controlvm_message_header *msg_hdr)
-{
- int res = kobject_uevent(&chipset_dev->acpi_device->dev.kobj,
- KOBJ_OFFLINE);
-
- if (msg_hdr->flags.response_expected)
- controlvm_respond(msg_hdr, res, NULL);
- return res;
-}
-
-static int unisys_vmcall(unsigned long tuple, unsigned long param)
-{
- int result = 0;
- unsigned int cpuid_eax, cpuid_ebx, cpuid_ecx, cpuid_edx;
- unsigned long reg_ebx;
- unsigned long reg_ecx;
-
- reg_ebx = param & 0xFFFFFFFF;
- reg_ecx = param >> 32;
- cpuid(0x00000001, &cpuid_eax, &cpuid_ebx, &cpuid_ecx, &cpuid_edx);
- if (!(cpuid_ecx & 0x80000000))
- return -EPERM;
- __asm__ __volatile__(".byte 0x00f, 0x001, 0x0c1" : "=a"(result) :
- "a"(tuple), "b"(reg_ebx), "c"(reg_ecx));
- if (result)
- goto error;
- return 0;
-
-/* Need to convert from VMCALL error codes to Linux */
-error:
- switch (result) {
- case VMCALL_RESULT_INVALID_PARAM:
- return -EINVAL;
- case VMCALL_RESULT_DATA_UNAVAILABLE:
- return -ENODEV;
- default:
- return -EFAULT;
- }
-}
-
-static int controlvm_channel_create(struct visorchipset_device *dev)
-{
- struct visorchannel *chan;
- u64 addr;
- int err;
-
- err = unisys_vmcall(VMCALL_CONTROLVM_ADDR,
- virt_to_phys(&dev->controlvm_params));
- if (err)
- return err;
- addr = dev->controlvm_params.address;
- chan = visorchannel_create(addr, GFP_KERNEL,
- &visor_controlvm_channel_guid, true);
- if (!chan)
- return -ENOMEM;
- dev->controlvm_channel = chan;
- return 0;
-}
-
-static void setup_crash_devices_work_queue(struct work_struct *work)
-{
- struct controlvm_message local_crash_bus_msg;
- struct controlvm_message local_crash_dev_msg;
- struct controlvm_message msg = {
- .hdr.id = CONTROLVM_CHIPSET_INIT,
- .cmd.init_chipset = {
- .bus_count = 23,
- .switch_count = 0,
- },
- };
- u32 local_crash_msg_offset;
- u16 local_crash_msg_count;
-
- /* send init chipset msg */
- chipset_init(&msg);
- /* get saved message count */
- if (visorchannel_read(chipset_dev->controlvm_channel,
- offsetof(struct visor_controlvm_channel,
- saved_crash_message_count),
- &local_crash_msg_count, sizeof(u16)) < 0) {
- dev_err(&chipset_dev->acpi_device->dev,
- "failed to read channel\n");
- return;
- }
- if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
- dev_err(&chipset_dev->acpi_device->dev, "invalid count\n");
- return;
- }
- /* get saved crash message offset */
- if (visorchannel_read(chipset_dev->controlvm_channel,
- offsetof(struct visor_controlvm_channel,
- saved_crash_message_offset),
- &local_crash_msg_offset, sizeof(u32)) < 0) {
- dev_err(&chipset_dev->acpi_device->dev,
- "failed to read channel\n");
- return;
- }
- /* read create device message for storage bus offset */
- if (visorchannel_read(chipset_dev->controlvm_channel,
- local_crash_msg_offset,
- &local_crash_bus_msg,
- sizeof(struct controlvm_message)) < 0) {
- dev_err(&chipset_dev->acpi_device->dev,
- "failed to read channel\n");
- return;
- }
- /* read create device message for storage device */
- if (visorchannel_read(chipset_dev->controlvm_channel,
- local_crash_msg_offset +
- sizeof(struct controlvm_message),
- &local_crash_dev_msg,
- sizeof(struct controlvm_message)) < 0) {
- dev_err(&chipset_dev->acpi_device->dev,
- "failed to read channel\n");
- return;
- }
- /* reuse IOVM create bus message */
- if (!local_crash_bus_msg.cmd.create_bus.channel_addr) {
- dev_err(&chipset_dev->acpi_device->dev,
- "no valid create_bus message\n");
- return;
- }
- visorbus_create(&local_crash_bus_msg);
- /* reuse create device message for storage device */
- if (!local_crash_dev_msg.cmd.create_device.channel_addr) {
- dev_err(&chipset_dev->acpi_device->dev,
- "no valid create_device message\n");
- return;
- }
- visorbus_device_create(&local_crash_dev_msg);
-}
-
-void visorbus_response(struct visor_device *bus_info, int response,
- int controlvm_id)
-{
- if (!bus_info->pending_msg_hdr)
- return;
-
- controlvm_responder(controlvm_id, bus_info->pending_msg_hdr, response);
- kfree(bus_info->pending_msg_hdr);
- bus_info->pending_msg_hdr = NULL;
-}
-
-void visorbus_device_changestate_response(struct visor_device *dev_info,
- int response,
- struct visor_segment_state state)
-{
- if (!dev_info->pending_msg_hdr)
- return;
-
- device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE, dev_info,
- response, state);
- kfree(dev_info->pending_msg_hdr);
- dev_info->pending_msg_hdr = NULL;
-}
-
-static void parser_done(struct parser_context *ctx)
-{
- chipset_dev->controlvm_payload_bytes_buffered -= ctx->param_bytes;
- kfree(ctx);
-}
-
-static struct parser_context *parser_init_stream(u64 addr, u32 bytes,
- bool *retry)
-{
- unsigned long allocbytes;
- struct parser_context *ctx;
- void *mapping;
-
- *retry = false;
- /* alloc an extra byte to ensure payload is \0 terminated */
- allocbytes = (unsigned long)bytes + 1 + (sizeof(struct parser_context) -
- sizeof(struct visor_controlvm_parameters_header));
- if ((chipset_dev->controlvm_payload_bytes_buffered + bytes) >
- MAX_CONTROLVM_PAYLOAD_BYTES) {
- *retry = true;
- return NULL;
- }
- ctx = kzalloc(allocbytes, GFP_KERNEL);
- if (!ctx) {
- *retry = true;
- return NULL;
- }
- ctx->allocbytes = allocbytes;
- ctx->param_bytes = bytes;
- mapping = memremap(addr, bytes, MEMREMAP_WB);
- if (!mapping)
- goto err_finish_ctx;
- memcpy(&ctx->data, mapping, bytes);
- memunmap(mapping);
- ctx->byte_stream = true;
- chipset_dev->controlvm_payload_bytes_buffered += ctx->param_bytes;
- return ctx;
-
-err_finish_ctx:
- kfree(ctx);
- return NULL;
-}
-
-/*
- * handle_command() - process a controlvm message
- * @inmsg: the message to process
- * @channel_addr: address of the controlvm channel
- *
- * Return:
- * 0 - Successfully processed the message
- * -EAGAIN - ControlVM message was not processed and should be retried
- * reading the next controlvm message; a scenario where this can
- * occur is when we need to throttle the allocation of memory in
- * which to copy out controlvm payload data.
- * < 0 - error: ControlVM message was processed but an error occurred.
- */
-static int handle_command(struct controlvm_message inmsg, u64 channel_addr)
-{
- struct controlvm_message_packet *cmd = &inmsg.cmd;
- u64 parm_addr;
- u32 parm_bytes;
- struct parser_context *parser_ctx = NULL;
- struct controlvm_message ackmsg;
- int err = 0;
-
- /* create parsing context if necessary */
- parm_addr = channel_addr + inmsg.hdr.payload_vm_offset;
- parm_bytes = inmsg.hdr.payload_bytes;
- /*
- * Parameter and channel addresses within test messages actually lie
- * within our OS-controlled memory. We need to know that, because it
- * makes a difference in how we compute the virtual address.
- */
- if (parm_bytes) {
- bool retry;
-
- parser_ctx = parser_init_stream(parm_addr, parm_bytes, &retry);
- if (!parser_ctx && retry)
- return -EAGAIN;
- }
- controlvm_init_response(&ackmsg, &inmsg.hdr, CONTROLVM_RESP_SUCCESS);
- err = visorchannel_signalinsert(chipset_dev->controlvm_channel,
- CONTROLVM_QUEUE_ACK, &ackmsg);
- if (err)
- return err;
- switch (inmsg.hdr.id) {
- case CONTROLVM_CHIPSET_INIT:
- err = chipset_init(&inmsg);
- break;
- case CONTROLVM_BUS_CREATE:
- err = visorbus_create(&inmsg);
- break;
- case CONTROLVM_BUS_DESTROY:
- err = visorbus_destroy(&inmsg);
- break;
- case CONTROLVM_BUS_CONFIGURE:
- err = visorbus_configure(&inmsg, parser_ctx);
- break;
- case CONTROLVM_DEVICE_CREATE:
- err = visorbus_device_create(&inmsg);
- break;
- case CONTROLVM_DEVICE_CHANGESTATE:
- if (cmd->device_change_state.flags.phys_device) {
- err = parahotplug_process_message(&inmsg);
- } else {
- /*
- * save the hdr and cmd structures for later use when
- * sending back the response to Command
- */
- err = visorbus_device_changestate(&inmsg);
- break;
- }
- break;
- case CONTROLVM_DEVICE_DESTROY:
- err = visorbus_device_destroy(&inmsg);
- break;
- case CONTROLVM_DEVICE_CONFIGURE:
- /* no op just send a respond that we passed */
- if (inmsg.hdr.flags.response_expected)
- controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS,
- NULL);
- break;
- case CONTROLVM_CHIPSET_READY:
- err = chipset_ready_uevent(&inmsg.hdr);
- break;
- case CONTROLVM_CHIPSET_SELFTEST:
- err = chipset_selftest_uevent(&inmsg.hdr);
- break;
- case CONTROLVM_CHIPSET_STOP:
- err = chipset_notready_uevent(&inmsg.hdr);
- break;
- default:
- err = -ENOMSG;
- if (inmsg.hdr.flags.response_expected)
- controlvm_respond(&inmsg.hdr,
- -CONTROLVM_RESP_ID_UNKNOWN, NULL);
- break;
- }
- if (parser_ctx) {
- parser_done(parser_ctx);
- parser_ctx = NULL;
- }
- return err;
-}
-
-/*
- * read_controlvm_event() - retreives the next message from the
- * CONTROLVM_QUEUE_EVENT queue in the controlvm
- * channel
- * @msg: pointer to the retrieved message
- *
- * Return: 0 if valid message was retrieved or -error
- */
-static int read_controlvm_event(struct controlvm_message *msg)
-{
- int err = visorchannel_signalremove(chipset_dev->controlvm_channel,
- CONTROLVM_QUEUE_EVENT, msg);
-
- if (err)
- return err;
- /* got a message */
- if (msg->hdr.flags.test_message == 1)
- return -EINVAL;
- return 0;
-}
-
-/*
- * parahotplug_process_list() - remove any request from the list that's been on
- * there too long and respond with an error
- */
-static void parahotplug_process_list(void)
-{
- struct list_head *pos;
- struct list_head *tmp;
-
- spin_lock(&parahotplug_request_list_lock);
- list_for_each_safe(pos, tmp, &parahotplug_request_list) {
- struct parahotplug_request *req =
- list_entry(pos, struct parahotplug_request, list);
-
- if (!time_after_eq(jiffies, req->expiration))
- continue;
- list_del(pos);
- if (req->msg.hdr.flags.response_expected)
- controlvm_respond(
- &req->msg.hdr,
- CONTROLVM_RESP_DEVICE_UDEV_TIMEOUT,
- &req->msg.cmd.device_change_state.state);
- parahotplug_request_destroy(req);
- }
- spin_unlock(&parahotplug_request_list_lock);
-}
-
-static void controlvm_periodic_work(struct work_struct *work)
-{
- struct controlvm_message inmsg;
- int count = 0;
- int err;
-
- /* Drain the RESPONSE queue make it empty */
- do {
- err = visorchannel_signalremove(chipset_dev->controlvm_channel,
- CONTROLVM_QUEUE_RESPONSE,
- &inmsg);
- } while ((!err) && (++count < CONTROLVM_MESSAGE_MAX));
- if (err != -EAGAIN)
- goto schedule_out;
- if (chipset_dev->controlvm_pending_msg_valid) {
- /*
- * we throttled processing of a prior msg, so try to process
- * it again rather than reading a new one
- */
- inmsg = chipset_dev->controlvm_pending_msg;
- chipset_dev->controlvm_pending_msg_valid = false;
- err = 0;
- } else {
- err = read_controlvm_event(&inmsg);
- }
- while (!err) {
- chipset_dev->most_recent_message_jiffies = jiffies;
- err = handle_command(inmsg,
- visorchannel_get_physaddr
- (chipset_dev->controlvm_channel));
- if (err == -EAGAIN) {
- chipset_dev->controlvm_pending_msg = inmsg;
- chipset_dev->controlvm_pending_msg_valid = true;
- break;
- }
-
- err = read_controlvm_event(&inmsg);
- }
- /* parahotplug_worker */
- parahotplug_process_list();
-
-/*
- * The controlvm messages are sent in a bulk. If we start receiving messages, we
- * want the polling to be fast. If we do not receive any message for
- * MIN_IDLE_SECONDS, we can slow down the polling.
- */
-schedule_out:
- if (time_after(jiffies, chipset_dev->most_recent_message_jiffies +
- (HZ * MIN_IDLE_SECONDS))) {
- /*
- * it's been longer than MIN_IDLE_SECONDS since we processed
- * our last controlvm message; slow down the polling
- */
- if (chipset_dev->poll_jiffies != POLLJIFFIES_CONTROLVM_SLOW)
- chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVM_SLOW;
- } else {
- if (chipset_dev->poll_jiffies != POLLJIFFIES_CONTROLVM_FAST)
- chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVM_FAST;
- }
- schedule_delayed_work(&chipset_dev->periodic_controlvm_work,
- chipset_dev->poll_jiffies);
-}
-
-static int visorchipset_init(struct acpi_device *acpi_device)
-{
- int err = -ENOMEM;
- struct visorchannel *controlvm_channel;
-
- chipset_dev = kzalloc(sizeof(*chipset_dev), GFP_KERNEL);
- if (!chipset_dev)
- goto error;
- err = controlvm_channel_create(chipset_dev);
- if (err)
- goto error_free_chipset_dev;
- acpi_device->driver_data = chipset_dev;
- chipset_dev->acpi_device = acpi_device;
- chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVM_FAST;
- err = sysfs_create_groups(&chipset_dev->acpi_device->dev.kobj,
- visorchipset_dev_groups);
- if (err < 0)
- goto error_destroy_channel;
- controlvm_channel = chipset_dev->controlvm_channel;
- if (!visor_check_channel(visorchannel_get_header(controlvm_channel),
- &chipset_dev->acpi_device->dev,
- &visor_controlvm_channel_guid,
- "controlvm",
- sizeof(struct visor_controlvm_channel),
- VISOR_CONTROLVM_CHANNEL_VERSIONID,
- VISOR_CHANNEL_SIGNATURE)) {
- err = -ENODEV;
- goto error_delete_groups;
- }
- /* if booting in a crash kernel */
- if (is_kdump_kernel())
- INIT_DELAYED_WORK(&chipset_dev->periodic_controlvm_work,
- setup_crash_devices_work_queue);
- else
- INIT_DELAYED_WORK(&chipset_dev->periodic_controlvm_work,
- controlvm_periodic_work);
- chipset_dev->most_recent_message_jiffies = jiffies;
- chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVM_FAST;
- schedule_delayed_work(&chipset_dev->periodic_controlvm_work,
- chipset_dev->poll_jiffies);
- err = visorbus_init();
- if (err < 0)
- goto error_cancel_work;
- return 0;
-
-error_cancel_work:
- cancel_delayed_work_sync(&chipset_dev->periodic_controlvm_work);
-
-error_delete_groups:
- sysfs_remove_groups(&chipset_dev->acpi_device->dev.kobj,
- visorchipset_dev_groups);
-
-error_destroy_channel:
- visorchannel_destroy(chipset_dev->controlvm_channel);
-
-error_free_chipset_dev:
- kfree(chipset_dev);
-
-error:
- dev_err(&acpi_device->dev, "failed with error %d\n", err);
- return err;
-}
-
-static int visorchipset_exit(struct acpi_device *acpi_device)
-{
- visorbus_exit();
- cancel_delayed_work_sync(&chipset_dev->periodic_controlvm_work);
- sysfs_remove_groups(&chipset_dev->acpi_device->dev.kobj,
- visorchipset_dev_groups);
- visorchannel_destroy(chipset_dev->controlvm_channel);
- kfree(chipset_dev);
- return 0;
-}
-
-static const struct acpi_device_id unisys_device_ids[] = {
- {"PNP0A07", 0},
- {"", 0},
-};
-
-static struct acpi_driver unisys_acpi_driver = {
- .name = "unisys_acpi",
- .class = "unisys_acpi_class",
- .owner = THIS_MODULE,
- .ids = unisys_device_ids,
- .ops = {
- .add = visorchipset_init,
- .remove = visorchipset_exit,
- },
-};
-
-MODULE_DEVICE_TABLE(acpi, unisys_device_ids);
-
-static __init int visorutil_spar_detect(void)
-{
- unsigned int eax, ebx, ecx, edx;
-
- if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
- /* check the ID */
- cpuid(UNISYS_VISOR_LEAF_ID, &eax, &ebx, &ecx, &edx);
- return (ebx == UNISYS_VISOR_ID_EBX) &&
- (ecx == UNISYS_VISOR_ID_ECX) &&
- (edx == UNISYS_VISOR_ID_EDX);
- }
- return 0;
-}
-
-static int __init init_unisys(void)
-{
- int result;
-
- if (!visorutil_spar_detect())
- return -ENODEV;
- result = acpi_bus_register_driver(&unisys_acpi_driver);
- if (result)
- return -ENODEV;
- pr_info("Unisys Visorchipset Driver Loaded.\n");
- return 0;
-};
-
-static void __exit exit_unisys(void)
-{
- acpi_bus_unregister_driver(&unisys_acpi_driver);
-}
-
-module_init(init_unisys);
-module_exit(exit_unisys);
-
-MODULE_AUTHOR("Unisys");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("s-Par visorbus driver for virtual device buses");
diff --git a/drivers/vme/Kconfig b/drivers/vme/Kconfig
index 936392ca3c8c..c13dd9d2a604 100644
--- a/drivers/vme/Kconfig
+++ b/drivers/vme/Kconfig
@@ -15,6 +15,4 @@ source "drivers/vme/bridges/Kconfig"
source "drivers/vme/boards/Kconfig"
-source "drivers/staging/vme/devices/Kconfig"
-
endif # VME
diff --git a/drivers/w1/masters/ds2490.c b/drivers/w1/masters/ds2490.c
index f6664fc9596a..0eb560fc0153 100644
--- a/drivers/w1/masters/ds2490.c
+++ b/drivers/w1/masters/ds2490.c
@@ -172,8 +172,9 @@ static int ds_send_control_cmd(struct ds_device *dev, u16 value, u16 index)
err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, dev->ep[EP_CONTROL]),
CONTROL_CMD, VENDOR, value, index, NULL, 0, 1000);
if (err < 0) {
- pr_err("Failed to send command control message %x.%x: err=%d.\n",
- value, index, err);
+ dev_err(&dev->udev->dev,
+ "Failed to send command control message %x.%x: err=%d.\n",
+ value, index, err);
return err;
}
@@ -187,8 +188,9 @@ static int ds_send_control_mode(struct ds_device *dev, u16 value, u16 index)
err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, dev->ep[EP_CONTROL]),
MODE_CMD, VENDOR, value, index, NULL, 0, 1000);
if (err < 0) {
- pr_err("Failed to send mode control message %x.%x: err=%d.\n",
- value, index, err);
+ dev_err(&dev->udev->dev,
+ "Failed to send mode control message %x.%x: err=%d.\n",
+ value, index, err);
return err;
}
@@ -202,72 +204,68 @@ static int ds_send_control(struct ds_device *dev, u16 value, u16 index)
err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, dev->ep[EP_CONTROL]),
COMM_CMD, VENDOR, value, index, NULL, 0, 1000);
if (err < 0) {
- pr_err("Failed to send control message %x.%x: err=%d.\n",
- value, index, err);
+ dev_err(&dev->udev->dev,
+ "Failed to send control message %x.%x: err=%d.\n",
+ value, index, err);
return err;
}
return err;
}
-static inline void ds_print_msg(unsigned char *buf, unsigned char *str, int off)
-{
- pr_info("%45s: %8x\n", str, buf[off]);
-}
-
-static void ds_dump_status(struct ds_device *dev, unsigned char *buf, int count)
+static void ds_dump_status(struct ds_device *ds_dev, unsigned char *buf, int count)
{
+ struct device *dev = &ds_dev->udev->dev;
int i;
- dev_info(&dev->udev->dev, "ep_status=0x%x, count=%d, status=%*phC",
- dev->ep[EP_STATUS], count, count, buf);
+ dev_info(dev, "ep_status=0x%x, count=%d, status=%*phC",
+ ds_dev->ep[EP_STATUS], count, count, buf);
if (count >= 16) {
- ds_print_msg(buf, "enable flag", 0);
- ds_print_msg(buf, "1-wire speed", 1);
- ds_print_msg(buf, "strong pullup duration", 2);
- ds_print_msg(buf, "programming pulse duration", 3);
- ds_print_msg(buf, "pulldown slew rate control", 4);
- ds_print_msg(buf, "write-1 low time", 5);
- ds_print_msg(buf, "data sample offset/write-0 recovery time",
- 6);
- ds_print_msg(buf, "reserved (test register)", 7);
- ds_print_msg(buf, "device status flags", 8);
- ds_print_msg(buf, "communication command byte 1", 9);
- ds_print_msg(buf, "communication command byte 2", 10);
- ds_print_msg(buf, "communication command buffer status", 11);
- ds_print_msg(buf, "1-wire data output buffer status", 12);
- ds_print_msg(buf, "1-wire data input buffer status", 13);
- ds_print_msg(buf, "reserved", 14);
- ds_print_msg(buf, "reserved", 15);
+ dev_dbg(dev, "enable flag: 0x%02x", buf[0]);
+ dev_dbg(dev, "1-wire speed: 0x%02x", buf[1]);
+ dev_dbg(dev, "strong pullup duration: 0x%02x", buf[2]);
+ dev_dbg(dev, "programming pulse duration: 0x%02x", buf[3]);
+ dev_dbg(dev, "pulldown slew rate control: 0x%02x", buf[4]);
+ dev_dbg(dev, "write-1 low time: 0x%02x", buf[5]);
+ dev_dbg(dev, "data sample offset/write-0 recovery time: 0x%02x", buf[6]);
+ dev_dbg(dev, "reserved (test register): 0x%02x", buf[7]);
+ dev_dbg(dev, "device status flags: 0x%02x", buf[8]);
+ dev_dbg(dev, "communication command byte 1: 0x%02x", buf[9]);
+ dev_dbg(dev, "communication command byte 2: 0x%02x", buf[10]);
+ dev_dbg(dev, "communication command buffer status: 0x%02x", buf[11]);
+ dev_dbg(dev, "1-wire data output buffer status: 0x%02x", buf[12]);
+ dev_dbg(dev, "1-wire data input buffer status: 0x%02x", buf[13]);
+ dev_dbg(dev, "reserved: 0x%02x", buf[14]);
+ dev_dbg(dev, "reserved: 0x%02x", buf[15]);
}
+
for (i = 16; i < count; ++i) {
if (buf[i] == RR_DETECT) {
- ds_print_msg(buf, "new device detect", i);
+ dev_dbg(dev, "New device detect.\n");
continue;
}
- ds_print_msg(buf, "Result Register Value: ", i);
+ dev_dbg(dev, "Result Register Value: 0x%02x", buf[i]);
if (buf[i] & RR_NRS)
- pr_info("NRS: Reset no presence or ...\n");
+ dev_dbg(dev, "NRS: Reset no presence or ...\n");
if (buf[i] & RR_SH)
- pr_info("SH: short on reset or set path\n");
+ dev_dbg(dev, "SH: short on reset or set path\n");
if (buf[i] & RR_APP)
- pr_info("APP: alarming presence on reset\n");
+ dev_dbg(dev, "APP: alarming presence on reset\n");
if (buf[i] & RR_VPP)
- pr_info("VPP: 12V expected not seen\n");
+ dev_dbg(dev, "VPP: 12V expected not seen\n");
if (buf[i] & RR_CMP)
- pr_info("CMP: compare error\n");
+ dev_dbg(dev, "CMP: compare error\n");
if (buf[i] & RR_CRC)
- pr_info("CRC: CRC error detected\n");
+ dev_dbg(dev, "CRC: CRC error detected\n");
if (buf[i] & RR_RDP)
- pr_info("RDP: redirected page\n");
+ dev_dbg(dev, "RDP: redirected page\n");
if (buf[i] & RR_EOS)
- pr_info("EOS: end of search error\n");
+ dev_dbg(dev, "EOS: end of search error\n");
}
}
-static int ds_recv_status(struct ds_device *dev, struct ds_status *st,
- bool dump)
+static int ds_recv_status(struct ds_device *dev, struct ds_status *st)
{
int count, err;
@@ -281,14 +279,12 @@ static int ds_recv_status(struct ds_device *dev, struct ds_status *st,
dev->st_buf, sizeof(dev->st_buf),
&count, 1000);
if (err < 0) {
- pr_err("Failed to read 1-wire data from 0x%x: err=%d.\n",
- dev->ep[EP_STATUS], err);
+ dev_err(&dev->udev->dev,
+ "Failed to read 1-wire data from 0x%x: err=%d.\n",
+ dev->ep[EP_STATUS], err);
return err;
}
- if (dump)
- ds_dump_status(dev, dev->st_buf, count);
-
if (st && count >= sizeof(*st))
memcpy(st, dev->st_buf, sizeof(*st));
@@ -302,13 +298,15 @@ static void ds_reset_device(struct ds_device *dev)
* the strong pullup.
*/
if (ds_send_control_mode(dev, MOD_PULSE_EN, PULSE_SPUE))
- pr_err("ds_reset_device: Error allowing strong pullup\n");
+ dev_err(&dev->udev->dev,
+ "%s: Error allowing strong pullup\n", __func__);
/* Chip strong pullup time was cleared. */
if (dev->spu_sleep) {
/* lower 4 bits are 0, see ds_set_pullup */
u8 del = dev->spu_sleep>>4;
if (ds_send_control(dev, COMM_SET_DURATION | COMM_IM, del))
- pr_err("ds_reset_device: Error setting duration\n");
+ dev_err(&dev->udev->dev,
+ "%s: Error setting duration\n", __func__);
}
}
@@ -329,9 +327,16 @@ static int ds_recv_data(struct ds_device *dev, unsigned char *buf, int size)
err = usb_bulk_msg(dev->udev, usb_rcvbulkpipe(dev->udev, dev->ep[EP_DATA_IN]),
buf, size, &count, 1000);
if (err < 0) {
+ int recv_len;
+
dev_info(&dev->udev->dev, "Clearing ep0x%x.\n", dev->ep[EP_DATA_IN]);
usb_clear_halt(dev->udev, usb_rcvbulkpipe(dev->udev, dev->ep[EP_DATA_IN]));
- ds_recv_status(dev, NULL, true);
+
+ /* status might tell us why endpoint is stuck? */
+ recv_len = ds_recv_status(dev, NULL);
+ if (recv_len >= 0)
+ ds_dump_status(dev, dev->st_buf, recv_len);
+
return err;
}
@@ -355,7 +360,7 @@ static int ds_send_data(struct ds_device *dev, unsigned char *buf, int len)
count = 0;
err = usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, dev->ep[EP_DATA_OUT]), buf, len, &count, 1000);
if (err < 0) {
- pr_err("Failed to write 1-wire data to ep0x%x: "
+ dev_err(&dev->udev->dev, "Failed to write 1-wire data to ep0x%x: "
"err=%d.\n", dev->ep[EP_DATA_OUT], err);
return err;
}
@@ -377,7 +382,7 @@ int ds_stop_pulse(struct ds_device *dev, int limit)
err = ds_send_control(dev, CTL_RESUME_EXE, 0);
if (err)
break;
- err = ds_recv_status(dev, &st, false);
+ err = ds_recv_status(dev, &st);
if (err)
break;
@@ -424,7 +429,7 @@ static int ds_wait_status(struct ds_device *dev, struct ds_status *st)
do {
st->status = 0;
- err = ds_recv_status(dev, st, false);
+ err = ds_recv_status(dev, st);
#if 0
if (err >= 0) {
int i;
@@ -437,7 +442,7 @@ static int ds_wait_status(struct ds_device *dev, struct ds_status *st)
} while (!(st->status & ST_IDLE) && !(err < 0) && ++count < 100);
if (err >= 16 && st->status & ST_EPOF) {
- pr_info("Resetting device after ST_EPOF.\n");
+ dev_info(&dev->udev->dev, "Resetting device after ST_EPOF.\n");
ds_reset_device(dev);
/* Always dump the device status. */
count = 101;
@@ -721,7 +726,7 @@ static void ds9490r_search(void *data, struct w1_master *master,
do {
schedule_timeout(jtime);
- err = ds_recv_status(dev, &st, false);
+ err = ds_recv_status(dev, &st);
if (err < 0 || err < sizeof(st))
break;
@@ -992,10 +997,9 @@ static int ds_probe(struct usb_interface *intf,
int i, err, alt;
dev = kzalloc(sizeof(struct ds_device), GFP_KERNEL);
- if (!dev) {
- pr_info("Failed to allocate new DS9490R structure.\n");
+ if (!dev)
return -ENOMEM;
- }
+
dev->udev = usb_get_dev(udev);
if (!dev->udev) {
err = -ENOMEM;
@@ -1025,7 +1029,7 @@ static int ds_probe(struct usb_interface *intf,
iface_desc = intf->cur_altsetting;
if (iface_desc->desc.bNumEndpoints != NUM_EP-1) {
- pr_info("Num endpoints=%d. It is not DS9490R.\n",
+ dev_err(&dev->udev->dev, "Num endpoints=%d. It is not DS9490R.\n",
iface_desc->desc.bNumEndpoints);
err = -EINVAL;
goto err_out_clear;
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index c4e82a8d863f..32fd37698932 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -883,6 +883,14 @@ config RENESAS_RZAWDT
This driver adds watchdog support for the integrated watchdogs in the
Renesas RZ/A SoCs. These watchdogs can be used to reset a system.
+config RENESAS_RZN1WDT
+ tristate "Renesas RZ/N1 watchdog"
+ depends on ARCH_RENESAS || COMPILE_TEST
+ select WATCHDOG_CORE
+ help
+ This driver adds watchdog support for the integrated watchdogs in the
+ Renesas RZ/N1 SoCs. These watchdogs can be used to reset a system.
+
config RENESAS_RZG2LWDT
tristate "Renesas RZ/G2L WDT Watchdog"
depends on ARCH_RENESAS || COMPILE_TEST
@@ -1011,6 +1019,17 @@ config APPLE_WATCHDOG
To compile this driver as a module, choose M here: the
module will be called apple_wdt.
+config SUNPLUS_WATCHDOG
+ tristate "Sunplus watchdog support"
+ depends on ARCH_SUNPLUS || COMPILE_TEST
+ select WATCHDOG_CORE
+ help
+ Say Y here to include support for the watchdog timer
+ in Sunplus SoCs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called sunplus_wdt.
+
# X86 (i386 + ia64 + x86_64) Architecture
config ACQUIRE_WDT
@@ -1820,6 +1839,17 @@ config RALINK_WDT
help
Hardware driver for the Ralink SoC Watchdog Timer.
+config GXP_WATCHDOG
+ tristate "HPE GXP watchdog support"
+ depends on ARCH_HPE_GXP
+ select WATCHDOG_CORE
+ help
+ Say Y here to include support for the watchdog timer
+ in HPE GXP SoCs.
+
+ To compile this driver as a module, choose M here.
+ The module will be called gxp-wdt.
+
config MT7621_WDT
tristate "Mediatek SoC watchdog"
select WATCHDOG_CORE
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index f7da867e8782..c324e9d820e9 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -84,6 +84,7 @@ obj-$(CONFIG_LPC18XX_WATCHDOG) += lpc18xx_wdt.o
obj-$(CONFIG_BCM7038_WDT) += bcm7038_wdt.o
obj-$(CONFIG_RENESAS_WDT) += renesas_wdt.o
obj-$(CONFIG_RENESAS_RZAWDT) += rza_wdt.o
+obj-$(CONFIG_RENESAS_RZN1WDT) += rzn1_wdt.o
obj-$(CONFIG_RENESAS_RZG2LWDT) += rzg2l_wdt.o
obj-$(CONFIG_ASPEED_WATCHDOG) += aspeed_wdt.o
obj-$(CONFIG_STM32_WATCHDOG) += stm32_iwdg.o
@@ -92,9 +93,11 @@ obj-$(CONFIG_RTD119X_WATCHDOG) += rtd119x_wdt.o
obj-$(CONFIG_SPRD_WATCHDOG) += sprd_wdt.o
obj-$(CONFIG_PM8916_WATCHDOG) += pm8916_wdt.o
obj-$(CONFIG_ARM_SMC_WATCHDOG) += arm_smc_wdt.o
+obj-$(CONFIG_GXP_WATCHDOG) += gxp-wdt.o
obj-$(CONFIG_VISCONTI_WATCHDOG) += visconti_wdt.o
obj-$(CONFIG_MSC313E_WATCHDOG) += msc313e_wdt.o
obj-$(CONFIG_APPLE_WATCHDOG) += apple_wdt.o
+obj-$(CONFIG_SUNPLUS_WATCHDOG) += sunplus_wdt.o
# X86 (i386 + ia64 + x86_64) Architecture
obj-$(CONFIG_ACQUIRE_WDT) += acquirewdt.o
diff --git a/drivers/watchdog/bcm7038_wdt.c b/drivers/watchdog/bcm7038_wdt.c
index 8656a137e9a4..1ffcf6aca6ae 100644
--- a/drivers/watchdog/bcm7038_wdt.c
+++ b/drivers/watchdog/bcm7038_wdt.c
@@ -218,6 +218,7 @@ static SIMPLE_DEV_PM_OPS(bcm7038_wdt_pm_ops, bcm7038_wdt_suspend,
bcm7038_wdt_resume);
static const struct of_device_id bcm7038_wdt_match[] = {
+ { .compatible = "brcm,bcm6345-wdt" },
{ .compatible = "brcm,bcm7038-wdt" },
{},
};
diff --git a/drivers/watchdog/da9063_wdt.c b/drivers/watchdog/da9063_wdt.c
index 9adad1862bbd..09a4af4c58fc 100644
--- a/drivers/watchdog/da9063_wdt.c
+++ b/drivers/watchdog/da9063_wdt.c
@@ -18,6 +18,7 @@
#include <linux/delay.h>
#include <linux/mfd/da9063/registers.h>
#include <linux/mfd/da9063/core.h>
+#include <linux/property.h>
#include <linux/regmap.h>
/*
@@ -26,6 +27,8 @@
* others: timeout = 2048 ms * 2^(TWDSCALE-1).
*/
static const unsigned int wdt_timeout[] = { 0, 2, 4, 8, 16, 32, 65, 131 };
+static bool use_sw_pm;
+
#define DA9063_TWDSCALE_DISABLE 0
#define DA9063_TWDSCALE_MIN 1
#define DA9063_TWDSCALE_MAX (ARRAY_SIZE(wdt_timeout) - 1)
@@ -218,6 +221,8 @@ static int da9063_wdt_probe(struct platform_device *pdev)
if (!wdd)
return -ENOMEM;
+ use_sw_pm = device_property_present(dev, "dlg,use-sw-pm");
+
wdd->info = &da9063_watchdog_info;
wdd->ops = &da9063_watchdog_ops;
wdd->min_timeout = DA9063_WDT_MIN_TIMEOUT;
@@ -228,6 +233,7 @@ static int da9063_wdt_probe(struct platform_device *pdev)
watchdog_set_restart_priority(wdd, 128);
watchdog_set_drvdata(wdd, da9063);
+ dev_set_drvdata(dev, wdd);
wdd->timeout = DA9063_WDG_TIMEOUT;
@@ -249,10 +255,40 @@ static int da9063_wdt_probe(struct platform_device *pdev)
return devm_watchdog_register_device(dev, wdd);
}
+static int __maybe_unused da9063_wdt_suspend(struct device *dev)
+{
+ struct watchdog_device *wdd = dev_get_drvdata(dev);
+
+ if (!use_sw_pm)
+ return 0;
+
+ if (watchdog_active(wdd))
+ return da9063_wdt_stop(wdd);
+
+ return 0;
+}
+
+static int __maybe_unused da9063_wdt_resume(struct device *dev)
+{
+ struct watchdog_device *wdd = dev_get_drvdata(dev);
+
+ if (!use_sw_pm)
+ return 0;
+
+ if (watchdog_active(wdd))
+ return da9063_wdt_start(wdd);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(da9063_wdt_pm_ops,
+ da9063_wdt_suspend, da9063_wdt_resume);
+
static struct platform_driver da9063_wdt_driver = {
.probe = da9063_wdt_probe,
.driver = {
.name = DA9063_DRVNAME_WATCHDOG,
+ .pm = &da9063_wdt_pm_ops,
},
};
module_platform_driver(da9063_wdt_driver);
diff --git a/drivers/watchdog/gxp-wdt.c b/drivers/watchdog/gxp-wdt.c
new file mode 100644
index 000000000000..b0b2d7a6fdde
--- /dev/null
+++ b/drivers/watchdog/gxp-wdt.c
@@ -0,0 +1,174 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2022 Hewlett-Packard Enterprise Development Company, L.P. */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/watchdog.h>
+
+#define MASK_WDGCS_ENABLE 0x01
+#define MASK_WDGCS_RELOAD 0x04
+#define MASK_WDGCS_NMIEN 0x08
+#define MASK_WDGCS_WARN 0x80
+
+#define WDT_MAX_TIMEOUT_MS 655350
+#define WDT_DEFAULT_TIMEOUT 30
+#define SECS_TO_WDOG_TICKS(x) ((x) * 100)
+#define WDOG_TICKS_TO_SECS(x) ((x) / 100)
+
+#define GXP_WDT_CNT_OFS 0x10
+#define GXP_WDT_CTRL_OFS 0x16
+
+struct gxp_wdt {
+ void __iomem *base;
+ struct watchdog_device wdd;
+};
+
+static void gxp_wdt_enable_reload(struct gxp_wdt *drvdata)
+{
+ u8 val;
+
+ val = readb(drvdata->base + GXP_WDT_CTRL_OFS);
+ val |= (MASK_WDGCS_ENABLE | MASK_WDGCS_RELOAD);
+ writeb(val, drvdata->base + GXP_WDT_CTRL_OFS);
+}
+
+static int gxp_wdt_start(struct watchdog_device *wdd)
+{
+ struct gxp_wdt *drvdata = watchdog_get_drvdata(wdd);
+
+ writew(SECS_TO_WDOG_TICKS(wdd->timeout), drvdata->base + GXP_WDT_CNT_OFS);
+ gxp_wdt_enable_reload(drvdata);
+ return 0;
+}
+
+static int gxp_wdt_stop(struct watchdog_device *wdd)
+{
+ struct gxp_wdt *drvdata = watchdog_get_drvdata(wdd);
+ u8 val;
+
+ val = readb_relaxed(drvdata->base + GXP_WDT_CTRL_OFS);
+ val &= ~MASK_WDGCS_ENABLE;
+ writeb(val, drvdata->base + GXP_WDT_CTRL_OFS);
+ return 0;
+}
+
+static int gxp_wdt_set_timeout(struct watchdog_device *wdd,
+ unsigned int timeout)
+{
+ struct gxp_wdt *drvdata = watchdog_get_drvdata(wdd);
+ u32 actual;
+
+ wdd->timeout = timeout;
+ actual = min(timeout * 100, wdd->max_hw_heartbeat_ms / 10);
+ writew(actual, drvdata->base + GXP_WDT_CNT_OFS);
+
+ return 0;
+}
+
+static unsigned int gxp_wdt_get_timeleft(struct watchdog_device *wdd)
+{
+ struct gxp_wdt *drvdata = watchdog_get_drvdata(wdd);
+ u32 val = readw(drvdata->base + GXP_WDT_CNT_OFS);
+
+ return WDOG_TICKS_TO_SECS(val);
+}
+
+static int gxp_wdt_ping(struct watchdog_device *wdd)
+{
+ struct gxp_wdt *drvdata = watchdog_get_drvdata(wdd);
+
+ gxp_wdt_enable_reload(drvdata);
+ return 0;
+}
+
+static int gxp_restart(struct watchdog_device *wdd, unsigned long action,
+ void *data)
+{
+ struct gxp_wdt *drvdata = watchdog_get_drvdata(wdd);
+
+ writew(1, drvdata->base + GXP_WDT_CNT_OFS);
+ gxp_wdt_enable_reload(drvdata);
+ mdelay(100);
+ return 0;
+}
+
+static const struct watchdog_ops gxp_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = gxp_wdt_start,
+ .stop = gxp_wdt_stop,
+ .ping = gxp_wdt_ping,
+ .set_timeout = gxp_wdt_set_timeout,
+ .get_timeleft = gxp_wdt_get_timeleft,
+ .restart = gxp_restart,
+};
+
+static const struct watchdog_info gxp_wdt_info = {
+ .options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING,
+ .identity = "HPE GXP Watchdog timer",
+};
+
+static int gxp_wdt_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct gxp_wdt *drvdata;
+ int err;
+ u8 val;
+
+ drvdata = devm_kzalloc(dev, sizeof(struct gxp_wdt), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ /*
+ * The register area where the timer and watchdog reside is disarranged.
+ * Hence mapping individual register blocks for the timer and watchdog
+ * is not recommended as they would have access to each others
+ * registers. Based on feedback the watchdog is no longer part of the
+ * device tree file and the timer driver now creates the watchdog as a
+ * child device. During the watchdogs creation, the timer driver passes
+ * the base address to the watchdog over the private interface.
+ */
+
+ drvdata->base = (void __iomem *)dev->platform_data;
+
+ drvdata->wdd.info = &gxp_wdt_info;
+ drvdata->wdd.ops = &gxp_wdt_ops;
+ drvdata->wdd.max_hw_heartbeat_ms = WDT_MAX_TIMEOUT_MS;
+ drvdata->wdd.parent = dev;
+ drvdata->wdd.timeout = WDT_DEFAULT_TIMEOUT;
+
+ watchdog_set_drvdata(&drvdata->wdd, drvdata);
+ watchdog_set_nowayout(&drvdata->wdd, WATCHDOG_NOWAYOUT);
+
+ val = readb(drvdata->base + GXP_WDT_CTRL_OFS);
+
+ if (val & MASK_WDGCS_ENABLE)
+ set_bit(WDOG_HW_RUNNING, &drvdata->wdd.status);
+
+ watchdog_set_restart_priority(&drvdata->wdd, 128);
+
+ watchdog_stop_on_reboot(&drvdata->wdd);
+ err = devm_watchdog_register_device(dev, &drvdata->wdd);
+ if (err) {
+ dev_err(dev, "Failed to register watchdog device");
+ return err;
+ }
+
+ dev_info(dev, "HPE GXP watchdog timer");
+
+ return 0;
+}
+
+static struct platform_driver gxp_wdt_driver = {
+ .probe = gxp_wdt_probe,
+ .driver = {
+ .name = "gxp-wdt",
+ },
+};
+module_platform_driver(gxp_wdt_driver);
+
+MODULE_AUTHOR("Nick Hawkins <nick.hawkins@hpe.com>");
+MODULE_AUTHOR("Jean-Marie Verdun <verdun@hpe.com>");
+MODULE_DESCRIPTION("Driver for GXP watchdog timer");
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index 3f2f4343644f..34693f11385f 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -596,7 +596,6 @@ static int iTCO_wdt_probe(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
/*
* Suspend-to-idle requires this, because it stops the ticks and timekeeping, so
* the watchdog cannot be pinged while in that state. In ACPI sleep states the
@@ -604,15 +603,15 @@ static int iTCO_wdt_probe(struct platform_device *pdev)
*/
#ifdef CONFIG_ACPI
-static inline bool need_suspend(void)
+static inline bool __maybe_unused need_suspend(void)
{
return acpi_target_system_state() == ACPI_STATE_S0;
}
#else
-static inline bool need_suspend(void) { return true; }
+static inline bool __maybe_unused need_suspend(void) { return true; }
#endif
-static int iTCO_wdt_suspend_noirq(struct device *dev)
+static int __maybe_unused iTCO_wdt_suspend_noirq(struct device *dev)
{
struct iTCO_wdt_private *p = dev_get_drvdata(dev);
int ret = 0;
@@ -626,7 +625,7 @@ static int iTCO_wdt_suspend_noirq(struct device *dev)
return ret;
}
-static int iTCO_wdt_resume_noirq(struct device *dev)
+static int __maybe_unused iTCO_wdt_resume_noirq(struct device *dev)
{
struct iTCO_wdt_private *p = dev_get_drvdata(dev);
@@ -637,20 +636,15 @@ static int iTCO_wdt_resume_noirq(struct device *dev)
}
static const struct dev_pm_ops iTCO_wdt_pm = {
- .suspend_noirq = iTCO_wdt_suspend_noirq,
- .resume_noirq = iTCO_wdt_resume_noirq,
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(iTCO_wdt_suspend_noirq,
+ iTCO_wdt_resume_noirq)
};
-#define ITCO_WDT_PM_OPS (&iTCO_wdt_pm)
-#else
-#define ITCO_WDT_PM_OPS NULL
-#endif /* CONFIG_PM_SLEEP */
-
static struct platform_driver iTCO_wdt_driver = {
.probe = iTCO_wdt_probe,
.driver = {
.name = DRV_NAME,
- .pm = ITCO_WDT_PM_OPS,
+ .pm = &iTCO_wdt_pm,
},
};
diff --git a/drivers/watchdog/mtk_wdt.c b/drivers/watchdog/mtk_wdt.c
index 4577a76dd464..f0d4e3cc7459 100644
--- a/drivers/watchdog/mtk_wdt.c
+++ b/drivers/watchdog/mtk_wdt.c
@@ -10,7 +10,9 @@
*/
#include <dt-bindings/reset/mt2712-resets.h>
+#include <dt-bindings/reset/mt7986-resets.h>
#include <dt-bindings/reset/mt8183-resets.h>
+#include <dt-bindings/reset/mt8186-resets.h>
#include <dt-bindings/reset/mt8192-resets.h>
#include <dt-bindings/reset/mt8195-resets.h>
#include <linux/delay.h>
@@ -76,10 +78,18 @@ static const struct mtk_wdt_data mt2712_data = {
.toprgu_sw_rst_num = MT2712_TOPRGU_SW_RST_NUM,
};
+static const struct mtk_wdt_data mt7986_data = {
+ .toprgu_sw_rst_num = MT7986_TOPRGU_SW_RST_NUM,
+};
+
static const struct mtk_wdt_data mt8183_data = {
.toprgu_sw_rst_num = MT8183_TOPRGU_SW_RST_NUM,
};
+static const struct mtk_wdt_data mt8186_data = {
+ .toprgu_sw_rst_num = MT8186_TOPRGU_SW_RST_NUM,
+};
+
static const struct mtk_wdt_data mt8192_data = {
.toprgu_sw_rst_num = MT8192_TOPRGU_SW_RST_NUM,
};
@@ -418,7 +428,9 @@ static int mtk_wdt_resume(struct device *dev)
static const struct of_device_id mtk_wdt_dt_ids[] = {
{ .compatible = "mediatek,mt2712-wdt", .data = &mt2712_data },
{ .compatible = "mediatek,mt6589-wdt" },
+ { .compatible = "mediatek,mt7986-wdt", .data = &mt7986_data },
{ .compatible = "mediatek,mt8183-wdt", .data = &mt8183_data },
+ { .compatible = "mediatek,mt8186-wdt", .data = &mt8186_data },
{ .compatible = "mediatek,mt8192-wdt", .data = &mt8192_data },
{ .compatible = "mediatek,mt8195-wdt", .data = &mt8195_data },
{ /* sentinel */ }
diff --git a/drivers/watchdog/rti_wdt.c b/drivers/watchdog/rti_wdt.c
index db843f825860..053ef3bde12d 100644
--- a/drivers/watchdog/rti_wdt.c
+++ b/drivers/watchdog/rti_wdt.c
@@ -226,7 +226,7 @@ static int rti_wdt_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
ret = pm_runtime_get_sync(dev);
- if (ret) {
+ if (ret < 0) {
pm_runtime_put_noidle(dev);
pm_runtime_disable(&pdev->dev);
return dev_err_probe(dev, ret, "runtime pm failed\n");
@@ -253,6 +253,7 @@ static int rti_wdt_probe(struct platform_device *pdev)
}
if (readl(wdt->base + RTIDWDCTRL) == WDENABLE_KEY) {
+ int preset_heartbeat;
u32 time_left_ms;
u64 heartbeat_ms;
u32 wsize;
@@ -263,11 +264,12 @@ static int rti_wdt_probe(struct platform_device *pdev)
heartbeat_ms <<= WDT_PRELOAD_SHIFT;
heartbeat_ms *= 1000;
do_div(heartbeat_ms, wdt->freq);
- if (heartbeat_ms != heartbeat * 1000)
+ preset_heartbeat = heartbeat_ms + 500;
+ preset_heartbeat /= 1000;
+ if (preset_heartbeat != heartbeat)
dev_warn(dev, "watchdog already running, ignoring heartbeat config!\n");
- heartbeat = heartbeat_ms;
- heartbeat /= 1000;
+ heartbeat = preset_heartbeat;
wsize = readl(wdt->base + RTIWWDSIZECTRL);
ret = rti_wdt_setup_hw_hb(wdd, wsize);
diff --git a/drivers/watchdog/rzg2l_wdt.c b/drivers/watchdog/rzg2l_wdt.c
index 6b426df34fd6..6eea0ee4af49 100644
--- a/drivers/watchdog/rzg2l_wdt.c
+++ b/drivers/watchdog/rzg2l_wdt.c
@@ -21,8 +21,11 @@
#define WDTSET 0x04
#define WDTTIM 0x08
#define WDTINT 0x0C
+#define PECR 0x10
+#define PEEN 0x14
#define WDTCNT_WDTEN BIT(0)
#define WDTINT_INTDISP BIT(0)
+#define PEEN_FORCE BIT(0)
#define WDT_DEFAULT_TIMEOUT 60U
@@ -43,6 +46,8 @@ struct rzg2l_wdt_priv {
struct reset_control *rstc;
unsigned long osc_clk_rate;
unsigned long delay;
+ struct clk *pclk;
+ struct clk *osc_clk;
};
static void rzg2l_wdt_wait_delay(struct rzg2l_wdt_priv *priv)
@@ -53,7 +58,7 @@ static void rzg2l_wdt_wait_delay(struct rzg2l_wdt_priv *priv)
static u32 rzg2l_wdt_get_cycle_usec(unsigned long cycle, u32 wdttime)
{
- u64 timer_cycle_us = 1024 * 1024 * (wdttime + 1) * MICRO;
+ u64 timer_cycle_us = 1024 * 1024ULL * (wdttime + 1) * MICRO;
return div64_ul(timer_cycle_us, cycle);
}
@@ -86,7 +91,6 @@ static int rzg2l_wdt_start(struct watchdog_device *wdev)
{
struct rzg2l_wdt_priv *priv = watchdog_get_drvdata(wdev);
- reset_control_deassert(priv->rstc);
pm_runtime_get_sync(wdev->parent);
/* Initialize time out */
@@ -106,7 +110,26 @@ static int rzg2l_wdt_stop(struct watchdog_device *wdev)
struct rzg2l_wdt_priv *priv = watchdog_get_drvdata(wdev);
pm_runtime_put(wdev->parent);
- reset_control_assert(priv->rstc);
+ reset_control_reset(priv->rstc);
+
+ return 0;
+}
+
+static int rzg2l_wdt_set_timeout(struct watchdog_device *wdev, unsigned int timeout)
+{
+ struct rzg2l_wdt_priv *priv = watchdog_get_drvdata(wdev);
+
+ wdev->timeout = timeout;
+
+ /*
+ * If the watchdog is active, reset the module for updating the WDTSET
+ * register so that it is updated with new timeout values.
+ */
+ if (watchdog_active(wdev)) {
+ pm_runtime_put(wdev->parent);
+ reset_control_reset(priv->rstc);
+ rzg2l_wdt_start(wdev);
+ }
return 0;
}
@@ -116,15 +139,14 @@ static int rzg2l_wdt_restart(struct watchdog_device *wdev,
{
struct rzg2l_wdt_priv *priv = watchdog_get_drvdata(wdev);
- /* Reset the module before we modify any register */
- reset_control_reset(priv->rstc);
- pm_runtime_get_sync(wdev->parent);
+ clk_prepare_enable(priv->pclk);
+ clk_prepare_enable(priv->osc_clk);
- /* smallest counter value to reboot soon */
- rzg2l_wdt_write(priv, WDTSET_COUNTER_VAL(1), WDTSET);
+ /* Generate Reset (WDTRSTB) Signal on parity error */
+ rzg2l_wdt_write(priv, 0, PECR);
- /* Enable watchdog timer*/
- rzg2l_wdt_write(priv, WDTCNT_WDTEN, WDTCNT);
+ /* Force parity error */
+ rzg2l_wdt_write(priv, PEEN_FORCE, PEEN);
return 0;
}
@@ -148,15 +170,15 @@ static const struct watchdog_ops rzg2l_wdt_ops = {
.start = rzg2l_wdt_start,
.stop = rzg2l_wdt_stop,
.ping = rzg2l_wdt_ping,
+ .set_timeout = rzg2l_wdt_set_timeout,
.restart = rzg2l_wdt_restart,
};
-static void rzg2l_wdt_reset_assert_pm_disable_put(void *data)
+static void rzg2l_wdt_reset_assert_pm_disable(void *data)
{
struct watchdog_device *wdev = data;
struct rzg2l_wdt_priv *priv = watchdog_get_drvdata(wdev);
- pm_runtime_put(wdev->parent);
pm_runtime_disable(wdev->parent);
reset_control_assert(priv->rstc);
}
@@ -166,7 +188,6 @@ static int rzg2l_wdt_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct rzg2l_wdt_priv *priv;
unsigned long pclk_rate;
- struct clk *wdt_clk;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -178,22 +199,20 @@ static int rzg2l_wdt_probe(struct platform_device *pdev)
return PTR_ERR(priv->base);
/* Get watchdog main clock */
- wdt_clk = clk_get(&pdev->dev, "oscclk");
- if (IS_ERR(wdt_clk))
- return dev_err_probe(&pdev->dev, PTR_ERR(wdt_clk), "no oscclk");
+ priv->osc_clk = devm_clk_get(&pdev->dev, "oscclk");
+ if (IS_ERR(priv->osc_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(priv->osc_clk), "no oscclk");
- priv->osc_clk_rate = clk_get_rate(wdt_clk);
- clk_put(wdt_clk);
+ priv->osc_clk_rate = clk_get_rate(priv->osc_clk);
if (!priv->osc_clk_rate)
return dev_err_probe(&pdev->dev, -EINVAL, "oscclk rate is 0");
/* Get Peripheral clock */
- wdt_clk = clk_get(&pdev->dev, "pclk");
- if (IS_ERR(wdt_clk))
- return dev_err_probe(&pdev->dev, PTR_ERR(wdt_clk), "no pclk");
+ priv->pclk = devm_clk_get(&pdev->dev, "pclk");
+ if (IS_ERR(priv->pclk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(priv->pclk), "no pclk");
- pclk_rate = clk_get_rate(wdt_clk);
- clk_put(wdt_clk);
+ pclk_rate = clk_get_rate(priv->pclk);
if (!pclk_rate)
return dev_err_probe(&pdev->dev, -EINVAL, "pclk rate is 0");
@@ -204,13 +223,11 @@ static int rzg2l_wdt_probe(struct platform_device *pdev)
return dev_err_probe(&pdev->dev, PTR_ERR(priv->rstc),
"failed to get cpg reset");
- reset_control_deassert(priv->rstc);
+ ret = reset_control_deassert(priv->rstc);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to deassert");
+
pm_runtime_enable(&pdev->dev);
- ret = pm_runtime_resume_and_get(&pdev->dev);
- if (ret < 0) {
- dev_err(dev, "pm_runtime_resume_and_get failed ret=%pe", ERR_PTR(ret));
- goto out_pm_get;
- }
priv->wdev.info = &rzg2l_wdt_ident;
priv->wdev.ops = &rzg2l_wdt_ops;
@@ -222,7 +239,7 @@ static int rzg2l_wdt_probe(struct platform_device *pdev)
watchdog_set_drvdata(&priv->wdev, priv);
ret = devm_add_action_or_reset(&pdev->dev,
- rzg2l_wdt_reset_assert_pm_disable_put,
+ rzg2l_wdt_reset_assert_pm_disable,
&priv->wdev);
if (ret < 0)
return ret;
@@ -235,12 +252,6 @@ static int rzg2l_wdt_probe(struct platform_device *pdev)
dev_warn(dev, "Specified timeout invalid, using default");
return devm_watchdog_register_device(&pdev->dev, &priv->wdev);
-
-out_pm_get:
- pm_runtime_disable(dev);
- reset_control_assert(priv->rstc);
-
- return ret;
}
static const struct of_device_id rzg2l_wdt_ids[] = {
diff --git a/drivers/watchdog/rzn1_wdt.c b/drivers/watchdog/rzn1_wdt.c
new file mode 100644
index 000000000000..55ab384b9965
--- /dev/null
+++ b/drivers/watchdog/rzn1_wdt.c
@@ -0,0 +1,203 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas RZ/N1 Watchdog timer.
+ * This is a 12-bit timer driver from a (62.5/16384) MHz clock. It can't even
+ * cope with 2 seconds.
+ *
+ * Copyright 2018 Renesas Electronics Europe Ltd.
+ *
+ * Derived from Ralink RT288x watchdog timer.
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+#include <linux/watchdog.h>
+
+#define DEFAULT_TIMEOUT 60
+
+#define RZN1_WDT_RETRIGGER 0x0
+#define RZN1_WDT_RETRIGGER_RELOAD_VAL 0
+#define RZN1_WDT_RETRIGGER_RELOAD_VAL_MASK 0xfff
+#define RZN1_WDT_RETRIGGER_PRESCALE BIT(12)
+#define RZN1_WDT_RETRIGGER_ENABLE BIT(13)
+#define RZN1_WDT_RETRIGGER_WDSI (0x2 << 14)
+
+#define RZN1_WDT_PRESCALER 16384
+#define RZN1_WDT_MAX 4095
+
+struct rzn1_watchdog {
+ struct watchdog_device wdtdev;
+ void __iomem *base;
+ unsigned long clk_rate_khz;
+};
+
+static inline uint32_t max_heart_beat_ms(unsigned long clk_rate_khz)
+{
+ return (RZN1_WDT_MAX * RZN1_WDT_PRESCALER) / clk_rate_khz;
+}
+
+static inline uint32_t compute_reload_value(uint32_t tick_ms,
+ unsigned long clk_rate_khz)
+{
+ return (tick_ms * clk_rate_khz) / RZN1_WDT_PRESCALER;
+}
+
+static int rzn1_wdt_ping(struct watchdog_device *w)
+{
+ struct rzn1_watchdog *wdt = watchdog_get_drvdata(w);
+
+ /* Any value retrigggers the watchdog */
+ writel(0, wdt->base + RZN1_WDT_RETRIGGER);
+
+ return 0;
+}
+
+static int rzn1_wdt_start(struct watchdog_device *w)
+{
+ struct rzn1_watchdog *wdt = watchdog_get_drvdata(w);
+ u32 val;
+
+ /*
+ * The hardware allows you to write to this reg only once.
+ * Since this includes the reload value, there is no way to change the
+ * timeout once started. Also note that the WDT clock is half the bus
+ * fabric clock rate, so if the bus fabric clock rate is changed after
+ * the WDT is started, the WDT interval will be wrong.
+ */
+ val = RZN1_WDT_RETRIGGER_WDSI;
+ val |= RZN1_WDT_RETRIGGER_ENABLE;
+ val |= RZN1_WDT_RETRIGGER_PRESCALE;
+ val |= compute_reload_value(w->max_hw_heartbeat_ms, wdt->clk_rate_khz);
+ writel(val, wdt->base + RZN1_WDT_RETRIGGER);
+
+ return 0;
+}
+
+static irqreturn_t rzn1_wdt_irq(int irq, void *_wdt)
+{
+ pr_crit("RZN1 Watchdog. Initiating system reboot\n");
+ emergency_restart();
+
+ return IRQ_HANDLED;
+}
+
+static struct watchdog_info rzn1_wdt_info = {
+ .identity = "RZ/N1 Watchdog",
+ .options = WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
+};
+
+static const struct watchdog_ops rzn1_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = rzn1_wdt_start,
+ .ping = rzn1_wdt_ping,
+};
+
+static void rzn1_wdt_clk_disable_unprepare(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
+static int rzn1_wdt_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rzn1_watchdog *wdt;
+ struct device_node *np = dev->of_node;
+ struct clk *clk;
+ unsigned long clk_rate;
+ int ret;
+ int irq;
+
+ wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
+ if (!wdt)
+ return -ENOMEM;
+
+ wdt->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(wdt->base))
+ return PTR_ERR(wdt->base);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_irq(dev, irq, rzn1_wdt_irq, 0,
+ np->name, wdt);
+ if (ret) {
+ dev_err(dev, "failed to request irq %d\n", irq);
+ return ret;
+ }
+
+ clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(clk)) {
+ dev_err(dev, "failed to get the clock\n");
+ return PTR_ERR(clk);
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ dev_err(dev, "failed to prepare/enable the clock\n");
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(dev, rzn1_wdt_clk_disable_unprepare,
+ clk);
+ if (ret)
+ return ret;
+
+ clk_rate = clk_get_rate(clk);
+ if (!clk_rate) {
+ dev_err(dev, "failed to get the clock rate\n");
+ return -EINVAL;
+ }
+
+ wdt->clk_rate_khz = clk_rate / 1000;
+ wdt->wdtdev.info = &rzn1_wdt_info,
+ wdt->wdtdev.ops = &rzn1_wdt_ops,
+ wdt->wdtdev.status = WATCHDOG_NOWAYOUT_INIT_STATUS,
+ wdt->wdtdev.parent = dev;
+ /*
+ * The period of the watchdog cannot be changed once set
+ * and is limited to a very short period.
+ * Configure it for a 1s period once and for all, and
+ * rely on the heart-beat provided by the watchdog core
+ * to make this usable by the user-space.
+ */
+ wdt->wdtdev.max_hw_heartbeat_ms = max_heart_beat_ms(wdt->clk_rate_khz);
+ if (wdt->wdtdev.max_hw_heartbeat_ms > 1000)
+ wdt->wdtdev.max_hw_heartbeat_ms = 1000;
+
+ wdt->wdtdev.timeout = DEFAULT_TIMEOUT;
+ ret = watchdog_init_timeout(&wdt->wdtdev, 0, dev);
+ if (ret)
+ return ret;
+
+ watchdog_set_drvdata(&wdt->wdtdev, wdt);
+
+ return devm_watchdog_register_device(dev, &wdt->wdtdev);
+}
+
+
+static const struct of_device_id rzn1_wdt_match[] = {
+ { .compatible = "renesas,rzn1-wdt" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, rzn1_wdt_match);
+
+static struct platform_driver rzn1_wdt_driver = {
+ .probe = rzn1_wdt_probe,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = rzn1_wdt_match,
+ },
+};
+
+module_platform_driver(rzn1_wdt_driver);
+
+MODULE_DESCRIPTION("Renesas RZ/N1 hardware watchdog");
+MODULE_AUTHOR("Phil Edworthy <phil.edworthy@renesas.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/sa1100_wdt.c b/drivers/watchdog/sa1100_wdt.c
index 27846c6bdfb0..2d0a06a158a8 100644
--- a/drivers/watchdog/sa1100_wdt.c
+++ b/drivers/watchdog/sa1100_wdt.c
@@ -22,6 +22,7 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/fs.h>
+#include <linux/platform_device.h>
#include <linux/miscdevice.h>
#include <linux/watchdog.h>
#include <linux/init.h>
@@ -30,17 +31,42 @@
#include <linux/uaccess.h>
#include <linux/timex.h>
-#ifdef CONFIG_ARCH_PXA
-#include <mach/regs-ost.h>
-#endif
+#define REG_OSMR0 0x0000 /* OS timer Match Reg. 0 */
+#define REG_OSMR1 0x0004 /* OS timer Match Reg. 1 */
+#define REG_OSMR2 0x0008 /* OS timer Match Reg. 2 */
+#define REG_OSMR3 0x000c /* OS timer Match Reg. 3 */
+#define REG_OSCR 0x0010 /* OS timer Counter Reg. */
+#define REG_OSSR 0x0014 /* OS timer Status Reg. */
+#define REG_OWER 0x0018 /* OS timer Watch-dog Enable Reg. */
+#define REG_OIER 0x001C /* OS timer Interrupt Enable Reg. */
-#include <mach/reset.h>
-#include <mach/hardware.h>
+#define OSSR_M3 (1 << 3) /* Match status channel 3 */
+#define OSSR_M2 (1 << 2) /* Match status channel 2 */
+#define OSSR_M1 (1 << 1) /* Match status channel 1 */
+#define OSSR_M0 (1 << 0) /* Match status channel 0 */
+
+#define OWER_WME (1 << 0) /* Watchdog Match Enable */
+
+#define OIER_E3 (1 << 3) /* Interrupt enable channel 3 */
+#define OIER_E2 (1 << 2) /* Interrupt enable channel 2 */
+#define OIER_E1 (1 << 1) /* Interrupt enable channel 1 */
+#define OIER_E0 (1 << 0) /* Interrupt enable channel 0 */
static unsigned long oscr_freq;
static unsigned long sa1100wdt_users;
static unsigned int pre_margin;
static int boot_status;
+static void __iomem *reg_base;
+
+static inline void sa1100_wr(u32 val, u32 offset)
+{
+ writel_relaxed(val, reg_base + offset);
+}
+
+static inline u32 sa1100_rd(u32 offset)
+{
+ return readl_relaxed(reg_base + offset);
+}
/*
* Allow only one person to hold it open
@@ -51,10 +77,10 @@ static int sa1100dog_open(struct inode *inode, struct file *file)
return -EBUSY;
/* Activate SA1100 Watchdog timer */
- writel_relaxed(readl_relaxed(OSCR) + pre_margin, OSMR3);
- writel_relaxed(OSSR_M3, OSSR);
- writel_relaxed(OWER_WME, OWER);
- writel_relaxed(readl_relaxed(OIER) | OIER_E3, OIER);
+ sa1100_wr(sa1100_rd(REG_OSCR) + pre_margin, REG_OSMR3);
+ sa1100_wr(OSSR_M3, REG_OSSR);
+ sa1100_wr(OWER_WME, REG_OWER);
+ sa1100_wr(sa1100_rd(REG_OIER) | OIER_E3, REG_OIER);
return stream_open(inode, file);
}
@@ -62,7 +88,7 @@ static int sa1100dog_open(struct inode *inode, struct file *file)
* The watchdog cannot be disabled.
*
* Previous comments suggested that turning off the interrupt by
- * clearing OIER[E3] would prevent the watchdog timing out but this
+ * clearing REG_OIER[E3] would prevent the watchdog timing out but this
* does not appear to be true (at least on the PXA255).
*/
static int sa1100dog_release(struct inode *inode, struct file *file)
@@ -77,7 +103,7 @@ static ssize_t sa1100dog_write(struct file *file, const char __user *data,
{
if (len)
/* Refresh OSMR3 timer. */
- writel_relaxed(readl_relaxed(OSCR) + pre_margin, OSMR3);
+ sa1100_wr(sa1100_rd(REG_OSCR) + pre_margin, REG_OSMR3);
return len;
}
@@ -111,7 +137,7 @@ static long sa1100dog_ioctl(struct file *file, unsigned int cmd,
break;
case WDIOC_KEEPALIVE:
- writel_relaxed(readl_relaxed(OSCR) + pre_margin, OSMR3);
+ sa1100_wr(sa1100_rd(REG_OSCR) + pre_margin, REG_OSMR3);
ret = 0;
break;
@@ -126,7 +152,7 @@ static long sa1100dog_ioctl(struct file *file, unsigned int cmd,
}
pre_margin = oscr_freq * time;
- writel_relaxed(readl_relaxed(OSCR) + pre_margin, OSMR3);
+ sa1100_wr(sa1100_rd(REG_OSCR) + pre_margin, REG_OSMR3);
fallthrough;
case WDIOC_GETTIMEOUT:
@@ -152,12 +178,22 @@ static struct miscdevice sa1100dog_miscdev = {
.fops = &sa1100dog_fops,
};
-static int margin __initdata = 60; /* (secs) Default is 1 minute */
+static int margin = 60; /* (secs) Default is 1 minute */
static struct clk *clk;
-static int __init sa1100dog_init(void)
+static int sa1100dog_probe(struct platform_device *pdev)
{
int ret;
+ int *platform_data;
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENXIO;
+ reg_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ ret = PTR_ERR_OR_ZERO(reg_base);
+ if (ret)
+ return ret;
clk = clk_get(NULL, "OSTIMER0");
if (IS_ERR(clk)) {
@@ -175,13 +211,9 @@ static int __init sa1100dog_init(void)
oscr_freq = clk_get_rate(clk);
- /*
- * Read the reset status, and save it for later. If
- * we suspend, RCSR will be cleared, and the watchdog
- * reset reason will be lost.
- */
- boot_status = (reset_status & RESET_STATUS_WATCHDOG) ?
- WDIOF_CARDRESET : 0;
+ platform_data = pdev->dev.platform_data;
+ if (platform_data && *platform_data)
+ boot_status = WDIOF_CARDRESET;
pre_margin = oscr_freq * margin;
ret = misc_register(&sa1100dog_miscdev);
@@ -197,15 +229,21 @@ err:
return ret;
}
-static void __exit sa1100dog_exit(void)
+static int sa1100dog_remove(struct platform_device *pdev)
{
misc_deregister(&sa1100dog_miscdev);
clk_disable_unprepare(clk);
clk_put(clk);
+
+ return 0;
}
-module_init(sa1100dog_init);
-module_exit(sa1100dog_exit);
+struct platform_driver sa1100dog_driver = {
+ .driver.name = "sa1100_wdt",
+ .probe = sa1100dog_probe,
+ .remove = sa1100dog_remove,
+};
+module_platform_driver(sa1100dog_driver);
MODULE_AUTHOR("Oleg Drokin <green@crimea.edu>");
MODULE_DESCRIPTION("SA1100/PXA2xx Watchdog");
diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c
index dbeb2146c968..f9479a3fe2a6 100644
--- a/drivers/watchdog/sp805_wdt.c
+++ b/drivers/watchdog/sp805_wdt.c
@@ -272,6 +272,7 @@ sp805_wdt_probe(struct amba_device *adev, const struct amba_id *id)
watchdog_set_nowayout(&wdt->wdd, nowayout);
watchdog_set_drvdata(&wdt->wdd, wdt);
watchdog_set_restart_priority(&wdt->wdd, 128);
+ watchdog_stop_on_unregister(&wdt->wdd);
/*
* If 'timeout-sec' devicetree property is specified, use that.
diff --git a/drivers/watchdog/sunplus_wdt.c b/drivers/watchdog/sunplus_wdt.c
new file mode 100644
index 000000000000..e2d8c532bcb1
--- /dev/null
+++ b/drivers/watchdog/sunplus_wdt.c
@@ -0,0 +1,220 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * sunplus Watchdog Driver
+ *
+ * Copyright (C) 2021 Sunplus Technology Co., Ltd.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/watchdog.h>
+
+#define WDT_CTRL 0x00
+#define WDT_CNT 0x04
+
+#define WDT_STOP 0x3877
+#define WDT_RESUME 0x4A4B
+#define WDT_CLRIRQ 0x7482
+#define WDT_UNLOCK 0xAB00
+#define WDT_LOCK 0xAB01
+#define WDT_CONMAX 0xDEAF
+
+/* TIMEOUT_MAX = ffff0/90kHz =11.65, so longer than 11 seconds will time out. */
+#define SP_WDT_MAX_TIMEOUT 11U
+#define SP_WDT_DEFAULT_TIMEOUT 10
+
+#define STC_CLK 90000
+
+#define DEVICE_NAME "sunplus-wdt"
+
+static unsigned int timeout;
+module_param(timeout, int, 0);
+MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds");
+
+static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
+ __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+struct sp_wdt_priv {
+ struct watchdog_device wdev;
+ void __iomem *base;
+ struct clk *clk;
+ struct reset_control *rstc;
+};
+
+static int sp_wdt_restart(struct watchdog_device *wdev,
+ unsigned long action, void *data)
+{
+ struct sp_wdt_priv *priv = watchdog_get_drvdata(wdev);
+ void __iomem *base = priv->base;
+
+ writel(WDT_STOP, base + WDT_CTRL);
+ writel(WDT_UNLOCK, base + WDT_CTRL);
+ writel(0x0001, base + WDT_CNT);
+ writel(WDT_LOCK, base + WDT_CTRL);
+ writel(WDT_RESUME, base + WDT_CTRL);
+
+ return 0;
+}
+
+static int sp_wdt_ping(struct watchdog_device *wdev)
+{
+ struct sp_wdt_priv *priv = watchdog_get_drvdata(wdev);
+ void __iomem *base = priv->base;
+ u32 count;
+
+ if (wdev->timeout > SP_WDT_MAX_TIMEOUT) {
+ /* WDT_CONMAX sets the count to the maximum (down-counting). */
+ writel(WDT_CONMAX, base + WDT_CTRL);
+ } else {
+ writel(WDT_UNLOCK, base + WDT_CTRL);
+ /*
+ * Watchdog timer is a 20-bit down-counting based on STC_CLK.
+ * This register bits[16:0] is from bit[19:4] of the watchdog
+ * timer counter.
+ */
+ count = (wdev->timeout * STC_CLK) >> 4;
+ writel(count, base + WDT_CNT);
+ writel(WDT_LOCK, base + WDT_CTRL);
+ }
+
+ return 0;
+}
+
+static int sp_wdt_stop(struct watchdog_device *wdev)
+{
+ struct sp_wdt_priv *priv = watchdog_get_drvdata(wdev);
+ void __iomem *base = priv->base;
+
+ writel(WDT_STOP, base + WDT_CTRL);
+
+ return 0;
+}
+
+static int sp_wdt_start(struct watchdog_device *wdev)
+{
+ struct sp_wdt_priv *priv = watchdog_get_drvdata(wdev);
+ void __iomem *base = priv->base;
+
+ writel(WDT_RESUME, base + WDT_CTRL);
+
+ return 0;
+}
+
+static unsigned int sp_wdt_get_timeleft(struct watchdog_device *wdev)
+{
+ struct sp_wdt_priv *priv = watchdog_get_drvdata(wdev);
+ void __iomem *base = priv->base;
+ u32 val;
+
+ val = readl(base + WDT_CNT);
+ val &= 0xffff;
+ val = val << 4;
+
+ return val;
+}
+
+static const struct watchdog_info sp_wdt_info = {
+ .identity = DEVICE_NAME,
+ .options = WDIOF_SETTIMEOUT |
+ WDIOF_MAGICCLOSE |
+ WDIOF_KEEPALIVEPING,
+};
+
+static const struct watchdog_ops sp_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = sp_wdt_start,
+ .stop = sp_wdt_stop,
+ .ping = sp_wdt_ping,
+ .get_timeleft = sp_wdt_get_timeleft,
+ .restart = sp_wdt_restart,
+};
+
+static void sp_clk_disable_unprepare(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
+static void sp_reset_control_assert(void *data)
+{
+ reset_control_assert(data);
+}
+
+static int sp_wdt_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct sp_wdt_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->clk))
+ return dev_err_probe(dev, PTR_ERR(priv->clk), "Failed to get clock\n");
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to enable clock\n");
+
+ ret = devm_add_action_or_reset(dev, sp_clk_disable_unprepare, priv->clk);
+ if (ret)
+ return ret;
+
+ /* The timer and watchdog shared the STC reset */
+ priv->rstc = devm_reset_control_get_shared(dev, NULL);
+ if (IS_ERR(priv->rstc))
+ return dev_err_probe(dev, PTR_ERR(priv->rstc), "Failed to get reset\n");
+
+ reset_control_deassert(priv->rstc);
+
+ ret = devm_add_action_or_reset(dev, sp_reset_control_assert, priv->rstc);
+ if (ret)
+ return ret;
+
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ priv->wdev.info = &sp_wdt_info;
+ priv->wdev.ops = &sp_wdt_ops;
+ priv->wdev.timeout = SP_WDT_DEFAULT_TIMEOUT;
+ priv->wdev.max_hw_heartbeat_ms = SP_WDT_MAX_TIMEOUT * 1000;
+ priv->wdev.min_timeout = 1;
+ priv->wdev.parent = dev;
+
+ watchdog_set_drvdata(&priv->wdev, priv);
+ watchdog_init_timeout(&priv->wdev, timeout, dev);
+ watchdog_set_nowayout(&priv->wdev, nowayout);
+ watchdog_stop_on_reboot(&priv->wdev);
+ watchdog_set_restart_priority(&priv->wdev, 128);
+
+ return devm_watchdog_register_device(dev, &priv->wdev);
+}
+
+static const struct of_device_id sp_wdt_of_match[] = {
+ {.compatible = "sunplus,sp7021-wdt", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, sp_wdt_of_match);
+
+static struct platform_driver sp_wdt_driver = {
+ .probe = sp_wdt_probe,
+ .driver = {
+ .name = DEVICE_NAME,
+ .of_match_table = sp_wdt_of_match,
+ },
+};
+
+module_platform_driver(sp_wdt_driver);
+
+MODULE_AUTHOR("Xiantao Hu <xt.hu@cqplus1.com>");
+MODULE_DESCRIPTION("Sunplus Watchdog Timer Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/ts4800_wdt.c b/drivers/watchdog/ts4800_wdt.c
index c137ad2bd5c3..0ea554c7cda5 100644
--- a/drivers/watchdog/ts4800_wdt.c
+++ b/drivers/watchdog/ts4800_wdt.c
@@ -125,13 +125,16 @@ static int ts4800_wdt_probe(struct platform_device *pdev)
ret = of_property_read_u32_index(np, "syscon", 1, &reg);
if (ret < 0) {
dev_err(dev, "no offset in syscon\n");
+ of_node_put(syscon_np);
return ret;
}
/* allocate memory for watchdog struct */
wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
- if (!wdt)
+ if (!wdt) {
+ of_node_put(syscon_np);
return -ENOMEM;
+ }
/* set regmap and offset to know where to write */
wdt->feed_offset = reg;
diff --git a/drivers/watchdog/wdat_wdt.c b/drivers/watchdog/wdat_wdt.c
index 195c8c004b69..e6f95e99156d 100644
--- a/drivers/watchdog/wdat_wdt.c
+++ b/drivers/watchdog/wdat_wdt.c
@@ -344,6 +344,7 @@ static int wdat_wdt_probe(struct platform_device *pdev)
wdat->period = tbl->timer_period;
wdat->wdd.min_hw_heartbeat_ms = wdat->period * tbl->min_count;
wdat->wdd.max_hw_heartbeat_ms = wdat->period * tbl->max_count;
+ wdat->wdd.min_timeout = 1;
wdat->stopped_in_sleep = tbl->flags & ACPI_WDAT_STOPPED;
wdat->wdd.info = &wdat_wdt_info;
wdat->wdd.ops = &wdat_wdt_ops;
@@ -450,8 +451,7 @@ static int wdat_wdt_probe(struct platform_device *pdev)
* watchdog properly after it has opened the device. In some cases
* the BIOS default is too short and causes immediate reboot.
*/
- if (timeout * 1000 < wdat->wdd.min_hw_heartbeat_ms ||
- timeout * 1000 > wdat->wdd.max_hw_heartbeat_ms) {
+ if (watchdog_timeout_invalid(&wdat->wdd, timeout)) {
dev_warn(dev, "Invalid timeout %d given, using %d\n",
timeout, WDAT_DEFAULT_TIMEOUT);
timeout = WDAT_DEFAULT_TIMEOUT;
@@ -462,6 +462,8 @@ static int wdat_wdt_probe(struct platform_device *pdev)
return ret;
watchdog_set_nowayout(&wdat->wdd, nowayout);
+ watchdog_stop_on_reboot(&wdat->wdd);
+ watchdog_stop_on_unregister(&wdat->wdd);
return devm_watchdog_register_device(dev, &wdat->wdd);
}
diff --git a/drivers/xen/gntalloc.c b/drivers/xen/gntalloc.c
index 55acb32842a3..a15729beb9d1 100644
--- a/drivers/xen/gntalloc.c
+++ b/drivers/xen/gntalloc.c
@@ -175,8 +175,6 @@ undo:
static void __del_gref(struct gntalloc_gref *gref)
{
- unsigned long addr;
-
if (gref->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) {
uint8_t *tmp = kmap_local_page(gref->page);
tmp[gref->notify.pgoff] = 0;
@@ -190,10 +188,9 @@ static void __del_gref(struct gntalloc_gref *gref)
gref->notify.flags = 0;
if (gref->gref_id) {
- if (gref->page) {
- addr = (unsigned long)page_to_virt(gref->page);
- gnttab_end_foreign_access(gref->gref_id, addr);
- } else
+ if (gref->page)
+ gnttab_end_foreign_access(gref->gref_id, gref->page);
+ else
gnttab_free_grant_reference(gref->gref_id);
}
diff --git a/drivers/xen/gntdev-dmabuf.c b/drivers/xen/gntdev-dmabuf.c
index 91073b4e4a20..940e5e9e8a54 100644
--- a/drivers/xen/gntdev-dmabuf.c
+++ b/drivers/xen/gntdev-dmabuf.c
@@ -524,7 +524,7 @@ static void dmabuf_imp_end_foreign_access(u32 *refs, int count)
for (i = 0; i < count; i++)
if (refs[i] != INVALID_GRANT_REF)
- gnttab_end_foreign_access(refs[i], 0UL);
+ gnttab_end_foreign_access(refs[i], NULL);
}
static void dmabuf_imp_free_storage(struct gntdev_dmabuf *gntdev_dmabuf)
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 1a1aec0a88a1..7a18292540bc 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -430,13 +430,13 @@ int gnttab_try_end_foreign_access(grant_ref_t ref)
}
EXPORT_SYMBOL_GPL(gnttab_try_end_foreign_access);
-void gnttab_end_foreign_access(grant_ref_t ref, unsigned long page)
+void gnttab_end_foreign_access(grant_ref_t ref, struct page *page)
{
if (gnttab_try_end_foreign_access(ref)) {
- if (page != 0)
- put_page(virt_to_page(page));
+ if (page)
+ put_page(page);
} else
- gnttab_add_deferred(ref, page ? virt_to_page(page) : NULL);
+ gnttab_add_deferred(ref, page);
}
EXPORT_SYMBOL_GPL(gnttab_end_foreign_access);
@@ -632,7 +632,7 @@ int gnttab_setup_auto_xlat_frames(phys_addr_t addr)
if (xen_auto_xlat_grant_frames.count)
return -EINVAL;
- vaddr = xen_remap(addr, XEN_PAGE_SIZE * max_nr_gframes);
+ vaddr = memremap(addr, XEN_PAGE_SIZE * max_nr_gframes, MEMREMAP_WB);
if (vaddr == NULL) {
pr_warn("Failed to ioremap gnttab share frames (addr=%pa)!\n",
&addr);
@@ -640,7 +640,7 @@ int gnttab_setup_auto_xlat_frames(phys_addr_t addr)
}
pfn = kcalloc(max_nr_gframes, sizeof(pfn[0]), GFP_KERNEL);
if (!pfn) {
- xen_unmap(vaddr);
+ memunmap(vaddr);
return -ENOMEM;
}
for (i = 0; i < max_nr_gframes; i++)
@@ -659,7 +659,7 @@ void gnttab_free_auto_xlat_frames(void)
if (!xen_auto_xlat_grant_frames.count)
return;
kfree(xen_auto_xlat_grant_frames.pfn);
- xen_unmap(xen_auto_xlat_grant_frames.vaddr);
+ memunmap(xen_auto_xlat_grant_frames.vaddr);
xen_auto_xlat_grant_frames.pfn = NULL;
xen_auto_xlat_grant_frames.count = 0;
diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c
index e254ed19488f..1826e8e67125 100644
--- a/drivers/xen/pvcalls-front.c
+++ b/drivers/xen/pvcalls-front.c
@@ -238,8 +238,8 @@ static void pvcalls_front_free_map(struct pvcalls_bedata *bedata,
spin_unlock(&bedata->socket_lock);
for (i = 0; i < (1 << PVCALLS_RING_ORDER); i++)
- gnttab_end_foreign_access(map->active.ring->ref[i], 0);
- gnttab_end_foreign_access(map->active.ref, 0);
+ gnttab_end_foreign_access(map->active.ring->ref[i], NULL);
+ gnttab_end_foreign_access(map->active.ref, NULL);
free_page((unsigned long)map->active.ring);
kfree(map);
@@ -1117,7 +1117,7 @@ static int pvcalls_front_remove(struct xenbus_device *dev)
}
}
if (bedata->ref != -1)
- gnttab_end_foreign_access(bedata->ref, 0);
+ gnttab_end_foreign_access(bedata->ref, NULL);
kfree(bedata->ring.sring);
kfree(bedata);
xenbus_switch_state(dev, XenbusStateClosed);
diff --git a/drivers/xen/xen-front-pgdir-shbuf.c b/drivers/xen/xen-front-pgdir-shbuf.c
index b6433761d42c..bef8d72a6ca6 100644
--- a/drivers/xen/xen-front-pgdir-shbuf.c
+++ b/drivers/xen/xen-front-pgdir-shbuf.c
@@ -135,7 +135,7 @@ void xen_front_pgdir_shbuf_free(struct xen_front_pgdir_shbuf *buf)
for (i = 0; i < buf->num_grefs; i++)
if (buf->grefs[i] != INVALID_GRANT_REF)
- gnttab_end_foreign_access(buf->grefs[i], 0UL);
+ gnttab_end_foreign_access(buf->grefs[i], NULL);
}
kfree(buf->grefs);
kfree(buf->directory);
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
index d6fdd2d209d3..d5f3f763717e 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -439,7 +439,7 @@ void xenbus_teardown_ring(void **vaddr, unsigned int nr_pages,
for (i = 0; i < nr_pages; i++) {
if (grefs[i] != INVALID_GRANT_REF) {
- gnttab_end_foreign_access(grefs[i], 0);
+ gnttab_end_foreign_access(grefs[i], NULL);
grefs[i] = INVALID_GRANT_REF;
}
}
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index d367f2bd2b93..58b732dcbfb8 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -752,8 +752,8 @@ static void xenbus_probe(void)
xenstored_ready = 1;
if (!xen_store_interface) {
- xen_store_interface = xen_remap(xen_store_gfn << XEN_PAGE_SHIFT,
- XEN_PAGE_SIZE);
+ xen_store_interface = memremap(xen_store_gfn << XEN_PAGE_SHIFT,
+ XEN_PAGE_SIZE, MEMREMAP_WB);
/*
* Now it is safe to free the IRQ used for xenstore late
* initialization. No need to unbind: it is about to be
@@ -1009,8 +1009,8 @@ static int __init xenbus_init(void)
#endif
xen_store_gfn = (unsigned long)v;
xen_store_interface =
- xen_remap(xen_store_gfn << XEN_PAGE_SHIFT,
- XEN_PAGE_SIZE);
+ memremap(xen_store_gfn << XEN_PAGE_SHIFT,
+ XEN_PAGE_SIZE, MEMREMAP_WB);
if (xen_store_interface->connection != XENSTORE_CONNECTED)
wait = true;
}
diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
index 32dff7ba3dda..21e154516bf2 100644
--- a/fs/Kconfig.binfmt
+++ b/fs/Kconfig.binfmt
@@ -58,7 +58,7 @@ config ARCH_USE_GNU_PROPERTY
config BINFMT_ELF_FDPIC
bool "Kernel support for FDPIC ELF binaries"
default y if !BINFMT_ELF
- depends on (ARM || (SUPERH && !MMU))
+ depends on ARM || ((M68K || SUPERH) && !MMU)
select ELFCORE
help
ELF FDPIC binaries are based on ELF, but allow the individual load
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index 94aa7356248e..79f6b74336d2 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -463,8 +463,11 @@ static int afs_dir_iterate_block(struct afs_vnode *dvnode,
}
/* skip if starts before the current position */
- if (offset < curr)
+ if (offset < curr) {
+ if (next > curr)
+ ctx->pos = blkoff + next * sizeof(union afs_xdr_dirent);
continue;
+ }
/* found the next entry */
if (!dir_emit(ctx, dire->u.name, nlen,
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 7584aa6e5025..e5221be6eb55 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -256,6 +256,7 @@ static bool ceph_netfs_issue_op_inline(struct netfs_io_subrequest *subreq)
struct iov_iter iter;
ssize_t err = 0;
size_t len;
+ int mode;
__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
__clear_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags);
@@ -264,7 +265,8 @@ static bool ceph_netfs_issue_op_inline(struct netfs_io_subrequest *subreq)
goto out;
/* We need to fetch the inline data. */
- req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS);
+ mode = ceph_try_to_choose_auth_mds(inode, CEPH_STAT_CAP_INLINE_DATA);
+ req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, mode);
if (IS_ERR(req)) {
err = PTR_ERR(req);
goto out;
@@ -604,8 +606,10 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE, snapc,
ceph_wbc.truncate_seq, ceph_wbc.truncate_size,
true);
- if (IS_ERR(req))
+ if (IS_ERR(req)) {
+ redirty_page_for_writepage(wbc, page);
return PTR_ERR(req);
+ }
set_page_writeback(page);
if (caching)
@@ -1644,7 +1648,7 @@ int ceph_uninline_data(struct file *file)
struct inode *inode = file_inode(file);
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
- struct ceph_osd_request *req;
+ struct ceph_osd_request *req = NULL;
struct ceph_cap_flush *prealloc_cf;
struct folio *folio = NULL;
u64 inline_version = CEPH_INLINE_NONE;
@@ -1652,10 +1656,23 @@ int ceph_uninline_data(struct file *file)
int err = 0;
u64 len;
+ spin_lock(&ci->i_ceph_lock);
+ inline_version = ci->i_inline_version;
+ spin_unlock(&ci->i_ceph_lock);
+
+ dout("uninline_data %p %llx.%llx inline_version %llu\n",
+ inode, ceph_vinop(inode), inline_version);
+
+ if (inline_version == CEPH_INLINE_NONE)
+ return 0;
+
prealloc_cf = ceph_alloc_cap_flush();
if (!prealloc_cf)
return -ENOMEM;
+ if (inline_version == 1) /* initial version, no data */
+ goto out_uninline;
+
folio = read_mapping_folio(inode->i_mapping, 0, file);
if (IS_ERR(folio)) {
err = PTR_ERR(folio);
@@ -1664,17 +1681,6 @@ int ceph_uninline_data(struct file *file)
folio_lock(folio);
- spin_lock(&ci->i_ceph_lock);
- inline_version = ci->i_inline_version;
- spin_unlock(&ci->i_ceph_lock);
-
- dout("uninline_data %p %llx.%llx inline_version %llu\n",
- inode, ceph_vinop(inode), inline_version);
-
- if (inline_version == 1 || /* initial version, no data */
- inline_version == CEPH_INLINE_NONE)
- goto out_unlock;
-
len = i_size_read(inode);
if (len > folio_size(folio))
len = folio_size(folio);
@@ -1739,6 +1745,7 @@ int ceph_uninline_data(struct file *file)
ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency,
req->r_end_latency, len, err);
+out_uninline:
if (!err) {
int dirty;
@@ -1757,8 +1764,10 @@ out_put_req:
if (err == -ECANCELED)
err = 0;
out_unlock:
- folio_unlock(folio);
- folio_put(folio);
+ if (folio) {
+ folio_unlock(folio);
+ folio_put(folio);
+ }
out:
ceph_free_cap_flush(prealloc_cf);
dout("uninline_data %p %llx.%llx inline_version %llu = %d\n",
@@ -1777,7 +1786,6 @@ int ceph_mmap(struct file *file, struct vm_area_struct *vma)
if (!mapping->a_ops->read_folio)
return -ENOEXEC;
- file_accessed(file);
vma->vm_ops = &ceph_vmops;
return 0;
}
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 5c14ef04e474..bf2e94005598 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -1577,7 +1577,7 @@ static void __ceph_flush_snaps(struct ceph_inode_info *ci,
while (first_tid <= last_tid) {
struct ceph_cap *cap = ci->i_auth_cap;
- struct ceph_cap_flush *cf;
+ struct ceph_cap_flush *cf = NULL, *iter;
int ret;
if (!(cap && cap->session == session)) {
@@ -1587,8 +1587,9 @@ static void __ceph_flush_snaps(struct ceph_inode_info *ci,
}
ret = -ENOENT;
- list_for_each_entry(cf, &ci->i_cap_flush_list, i_list) {
- if (cf->tid >= first_tid) {
+ list_for_each_entry(iter, &ci->i_cap_flush_list, i_list) {
+ if (iter->tid >= first_tid) {
+ cf = iter;
ret = 0;
break;
}
@@ -1910,6 +1911,7 @@ void ceph_check_caps(struct ceph_inode_info *ci, int flags,
struct rb_node *p;
bool queue_invalidate = false;
bool tried_invalidate = false;
+ bool queue_writeback = false;
if (session)
ceph_get_mds_session(session);
@@ -2062,10 +2064,27 @@ retry:
}
/* completed revocation? going down and there are no caps? */
- if (revoking && (revoking & cap_used) == 0) {
- dout("completed revocation of %s\n",
- ceph_cap_string(cap->implemented & ~cap->issued));
- goto ack;
+ if (revoking) {
+ if ((revoking & cap_used) == 0) {
+ dout("completed revocation of %s\n",
+ ceph_cap_string(cap->implemented & ~cap->issued));
+ goto ack;
+ }
+
+ /*
+ * If the "i_wrbuffer_ref" was increased by mmap or generic
+ * cache write just before the ceph_check_caps() is called,
+ * the Fb capability revoking will fail this time. Then we
+ * must wait for the BDI's delayed work to flush the dirty
+ * pages and to release the "i_wrbuffer_ref", which will cost
+ * at most 5 seconds. That means the MDS needs to wait at
+ * most 5 seconds to finished the Fb capability's revocation.
+ *
+ * Let's queue a writeback for it.
+ */
+ if (S_ISREG(inode->i_mode) && ci->i_wrbuffer_ref &&
+ (revoking & CEPH_CAP_FILE_BUFFER))
+ queue_writeback = true;
}
/* want more caps from mds? */
@@ -2135,6 +2154,8 @@ ack:
spin_unlock(&ci->i_ceph_lock);
ceph_put_mds_session(session);
+ if (queue_writeback)
+ ceph_queue_writeback(inode);
if (queue_invalidate)
ceph_queue_invalidate(inode);
}
@@ -2218,9 +2239,9 @@ static int caps_are_flushed(struct inode *inode, u64 flush_tid)
}
/*
- * wait for any unsafe requests to complete.
+ * flush the mdlog and wait for any unsafe requests to complete.
*/
-static int unsafe_request_wait(struct inode *inode)
+static int flush_mdlog_and_wait_inode_unsafe_requests(struct inode *inode)
{
struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
struct ceph_inode_info *ci = ceph_inode(inode);
@@ -2336,7 +2357,7 @@ retry:
kfree(sessions);
}
- dout("unsafe_request_wait %p wait on tid %llu %llu\n",
+ dout("%s %p wait on tid %llu %llu\n", __func__,
inode, req1 ? req1->r_tid : 0ULL, req2 ? req2->r_tid : 0ULL);
if (req1) {
ret = !wait_for_completion_timeout(&req1->r_safe_completion,
@@ -2380,7 +2401,7 @@ int ceph_fsync(struct file *file, loff_t start, loff_t end, int datasync)
dirty = try_flush_caps(inode, &flush_tid);
dout("fsync dirty caps are %s\n", ceph_cap_string(dirty));
- err = unsafe_request_wait(inode);
+ err = flush_mdlog_and_wait_inode_unsafe_requests(inode);
/*
* only wait on non-file metadata writeback (the mds
@@ -3182,10 +3203,9 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
struct ceph_snap_context *snapc)
{
struct inode *inode = &ci->vfs_inode;
- struct ceph_cap_snap *capsnap = NULL;
+ struct ceph_cap_snap *capsnap = NULL, *iter;
int put = 0;
bool last = false;
- bool found = false;
bool flush_snaps = false;
bool complete_capsnap = false;
@@ -3212,14 +3232,14 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head,
last ? " LAST" : "");
} else {
- list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
- if (capsnap->context == snapc) {
- found = true;
+ list_for_each_entry(iter, &ci->i_cap_snaps, ci_item) {
+ if (iter->context == snapc) {
+ capsnap = iter;
break;
}
}
- if (!found) {
+ if (!capsnap) {
/*
* The capsnap should already be removed when removing
* auth cap in the case of a forced unmount.
@@ -3769,8 +3789,7 @@ static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
u64 follows = le64_to_cpu(m->snap_follows);
- struct ceph_cap_snap *capsnap;
- bool flushed = false;
+ struct ceph_cap_snap *capsnap = NULL, *iter;
bool wake_ci = false;
bool wake_mdsc = false;
@@ -3778,26 +3797,26 @@ static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
inode, ci, session->s_mds, follows);
spin_lock(&ci->i_ceph_lock);
- list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
- if (capsnap->follows == follows) {
- if (capsnap->cap_flush.tid != flush_tid) {
+ list_for_each_entry(iter, &ci->i_cap_snaps, ci_item) {
+ if (iter->follows == follows) {
+ if (iter->cap_flush.tid != flush_tid) {
dout(" cap_snap %p follows %lld tid %lld !="
- " %lld\n", capsnap, follows,
- flush_tid, capsnap->cap_flush.tid);
+ " %lld\n", iter, follows,
+ flush_tid, iter->cap_flush.tid);
break;
}
- flushed = true;
+ capsnap = iter;
break;
} else {
dout(" skipping cap_snap %p follows %lld\n",
- capsnap, capsnap->follows);
+ iter, iter->follows);
}
}
- if (flushed)
+ if (capsnap)
ceph_remove_capsnap(inode, capsnap, &wake_ci, &wake_mdsc);
spin_unlock(&ci->i_ceph_lock);
- if (flushed) {
+ if (capsnap) {
ceph_put_snap_context(capsnap->context);
ceph_put_cap_snap(capsnap);
if (wake_ci)
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 63113e2a4890..b7e9cac3aeef 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -578,7 +578,7 @@ void ceph_evict_inode(struct inode *inode)
__ceph_remove_caps(ci);
- if (__ceph_has_any_quota(ci))
+ if (__ceph_has_quota(ci, QUOTA_GET_ANY))
ceph_adjust_quota_realms_count(inode, false);
/*
@@ -1466,10 +1466,12 @@ retry_lookup:
} else if (have_lease) {
if (d_unhashed(dn))
d_add(dn, NULL);
+ }
+
+ if (!d_unhashed(dn) && have_lease)
update_dentry_lease(dir, dn,
rinfo->dlease, session,
req->r_request_started);
- }
goto done;
}
@@ -1884,7 +1886,6 @@ static void ceph_do_invalidate_pages(struct inode *inode)
orig_gen = ci->i_rdcache_gen;
spin_unlock(&ci->i_ceph_lock);
- ceph_fscache_invalidate(inode, false);
if (invalidate_inode_pages2(inode->i_mapping) < 0) {
pr_err("invalidate_inode_pages2 %llx.%llx failed\n",
ceph_vinop(inode));
@@ -2258,6 +2259,30 @@ int ceph_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
return err;
}
+int ceph_try_to_choose_auth_mds(struct inode *inode, int mask)
+{
+ int issued = ceph_caps_issued(ceph_inode(inode));
+
+ /*
+ * If any 'x' caps is issued we can just choose the auth MDS
+ * instead of the random replica MDSes. Because only when the
+ * Locker is in LOCK_EXEC state will the loner client could
+ * get the 'x' caps. And if we send the getattr requests to
+ * any replica MDS it must auth pin and tries to rdlock from
+ * the auth MDS, and then the auth MDS need to do the Locker
+ * state transition to LOCK_SYNC. And after that the lock state
+ * will change back.
+ *
+ * This cost much when doing the Locker state transition and
+ * usually will need to revoke caps from clients.
+ */
+ if (((mask & CEPH_CAP_ANY_SHARED) && (issued & CEPH_CAP_ANY_EXCL))
+ || (mask & CEPH_STAT_RSTAT))
+ return USE_AUTH_MDS;
+ else
+ return USE_ANY_MDS;
+}
+
/*
* Verify that we have a lease on the given mask. If not,
* do a getattr against an mds.
@@ -2281,7 +2306,7 @@ int __ceph_do_getattr(struct inode *inode, struct page *locked_page,
if (!force && ceph_caps_issued_mask_metric(ceph_inode(inode), mask, 1))
return 0;
- mode = (mask & CEPH_STAT_RSTAT) ? USE_AUTH_MDS : USE_ANY_MDS;
+ mode = ceph_try_to_choose_auth_mds(inode, mask);
req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, mode);
if (IS_ERR(req))
return PTR_ERR(req);
@@ -2423,7 +2448,7 @@ int ceph_getattr(struct user_namespace *mnt_userns, const struct path *path,
return -ESTALE;
/* Skip the getattr altogether if we're asked not to sync */
- if (!(flags & AT_STATX_DONT_SYNC)) {
+ if ((flags & AT_STATX_SYNC_TYPE) != AT_STATX_DONT_SYNC) {
err = ceph_do_getattr(inode,
statx_to_caps(request_mask, inode->i_mode),
flags & AT_STATX_FORCE_SYNC);
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 00c3de177dd6..f5d110d90b77 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -437,7 +437,7 @@ static int ceph_parse_deleg_inos(void **p, void *end,
ceph_decode_32_safe(p, end, sets, bad);
dout("got %u sets of delegated inodes\n", sets);
while (sets--) {
- u64 start, len, ino;
+ u64 start, len;
ceph_decode_64_safe(p, end, start, bad);
ceph_decode_64_safe(p, end, len, bad);
@@ -449,7 +449,7 @@ static int ceph_parse_deleg_inos(void **p, void *end,
continue;
}
while (len--) {
- int err = xa_insert(&s->s_delegated_inos, ino = start++,
+ int err = xa_insert(&s->s_delegated_inos, start++,
DELEGATED_INO_AVAILABLE,
GFP_KERNEL);
if (!err) {
@@ -2651,7 +2651,28 @@ static int __prepare_send_request(struct ceph_mds_session *session,
struct ceph_mds_client *mdsc = session->s_mdsc;
struct ceph_mds_request_head_old *rhead;
struct ceph_msg *msg;
- int flags = 0;
+ int flags = 0, max_retry;
+
+ /*
+ * The type of 'r_attempts' in kernel 'ceph_mds_request'
+ * is 'int', while in 'ceph_mds_request_head' the type of
+ * 'num_retry' is '__u8'. So in case the request retries
+ * exceeding 256 times, the MDS will receive a incorrect
+ * retry seq.
+ *
+ * In this case it's ususally a bug in MDS and continue
+ * retrying the request makes no sense.
+ *
+ * In future this could be fixed in ceph code, so avoid
+ * using the hardcode here.
+ */
+ max_retry = sizeof_field(struct ceph_mds_request_head, num_retry);
+ max_retry = 1 << (max_retry * BITS_PER_BYTE);
+ if (req->r_attempts >= max_retry) {
+ pr_warn_ratelimited("%s request tid %llu seq overflow\n",
+ __func__, req->r_tid);
+ return -EMULTIHOP;
+ }
req->r_attempts++;
if (req->r_inode) {
@@ -2663,7 +2684,7 @@ static int __prepare_send_request(struct ceph_mds_session *session,
else
req->r_sent_on_mseq = -1;
}
- dout("prepare_send_request %p tid %lld %s (attempt %d)\n", req,
+ dout("%s %p tid %lld %s (attempt %d)\n", __func__, req,
req->r_tid, ceph_mds_op_name(req->r_op), req->r_attempts);
if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
@@ -3265,6 +3286,7 @@ static void handle_forward(struct ceph_mds_client *mdsc,
int err = -EINVAL;
void *p = msg->front.iov_base;
void *end = p + msg->front.iov_len;
+ bool aborted = false;
ceph_decode_need(&p, end, 2*sizeof(u32), bad);
next_mds = ceph_decode_32(&p);
@@ -3273,16 +3295,41 @@ static void handle_forward(struct ceph_mds_client *mdsc,
mutex_lock(&mdsc->mutex);
req = lookup_get_request(mdsc, tid);
if (!req) {
+ mutex_unlock(&mdsc->mutex);
dout("forward tid %llu to mds%d - req dne\n", tid, next_mds);
- goto out; /* dup reply? */
+ return; /* dup reply? */
}
if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
dout("forward tid %llu aborted, unregistering\n", tid);
__unregister_request(mdsc, req);
} else if (fwd_seq <= req->r_num_fwd) {
- dout("forward tid %llu to mds%d - old seq %d <= %d\n",
- tid, next_mds, req->r_num_fwd, fwd_seq);
+ /*
+ * The type of 'num_fwd' in ceph 'MClientRequestForward'
+ * is 'int32_t', while in 'ceph_mds_request_head' the
+ * type is '__u8'. So in case the request bounces between
+ * MDSes exceeding 256 times, the client will get stuck.
+ *
+ * In this case it's ususally a bug in MDS and continue
+ * bouncing the request makes no sense.
+ *
+ * In future this could be fixed in ceph code, so avoid
+ * using the hardcode here.
+ */
+ int max = sizeof_field(struct ceph_mds_request_head, num_fwd);
+ max = 1 << (max * BITS_PER_BYTE);
+ if (req->r_num_fwd >= max) {
+ mutex_lock(&req->r_fill_mutex);
+ req->r_err = -EMULTIHOP;
+ set_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags);
+ mutex_unlock(&req->r_fill_mutex);
+ aborted = true;
+ pr_warn_ratelimited("forward tid %llu seq overflow\n",
+ tid);
+ } else {
+ dout("forward tid %llu to mds%d - old seq %d <= %d\n",
+ tid, next_mds, req->r_num_fwd, fwd_seq);
+ }
} else {
/* resend. forward race not possible; mds would drop */
dout("forward tid %llu to mds%d (we resend)\n", tid, next_mds);
@@ -3294,9 +3341,12 @@ static void handle_forward(struct ceph_mds_client *mdsc,
put_request_session(req);
__do_request(mdsc, req);
}
- ceph_mdsc_put_request(req);
-out:
mutex_unlock(&mdsc->mutex);
+
+ /* kick calling process */
+ if (aborted)
+ complete_request(mdsc, req);
+ ceph_mdsc_put_request(req);
return;
bad:
@@ -3375,13 +3425,17 @@ static void handle_session(struct ceph_mds_session *session,
}
if (msg_version >= 5) {
- u32 flags;
- /* version >= 4, struct_v, struct_cv, len, metric_spec */
- ceph_decode_skip_n(&p, end, 2 + sizeof(u32) * 2, bad);
+ u32 flags, len;
+
+ /* version >= 4 */
+ ceph_decode_skip_16(&p, end, bad); /* struct_v, struct_cv */
+ ceph_decode_32_safe(&p, end, len, bad); /* len */
+ ceph_decode_skip_n(&p, end, len, bad); /* metric_spec */
+
/* version >= 5, flags */
- ceph_decode_32_safe(&p, end, flags, bad);
+ ceph_decode_32_safe(&p, end, flags, bad);
if (flags & CEPH_SESSION_BLOCKLISTED) {
- pr_warn("mds%d session blocklisted\n", session->s_mds);
+ pr_warn("mds%d session blocklisted\n", session->s_mds);
blocklisted = true;
}
}
@@ -4396,12 +4450,6 @@ void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session,
memcpy((void *)(lease + 1) + 4,
dentry->d_name.name, dentry->d_name.len);
spin_unlock(&dentry->d_lock);
- /*
- * if this is a preemptive lease RELEASE, no need to
- * flush request stream, since the actual request will
- * soon follow.
- */
- msg->more_to_follow = (action == CEPH_MDS_LEASE_RELEASE);
ceph_con_send(&session->s_con, msg);
}
@@ -4696,15 +4744,17 @@ void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc)
}
/*
- * wait for all write mds requests to flush.
+ * flush the mdlog and wait for all write mds requests to flush.
*/
-static void wait_unsafe_requests(struct ceph_mds_client *mdsc, u64 want_tid)
+static void flush_mdlog_and_wait_mdsc_unsafe_requests(struct ceph_mds_client *mdsc,
+ u64 want_tid)
{
struct ceph_mds_request *req = NULL, *nextreq;
+ struct ceph_mds_session *last_session = NULL;
struct rb_node *n;
mutex_lock(&mdsc->mutex);
- dout("wait_unsafe_requests want %lld\n", want_tid);
+ dout("%s want %lld\n", __func__, want_tid);
restart:
req = __get_oldest_req(mdsc);
while (req && req->r_tid <= want_tid) {
@@ -4716,14 +4766,32 @@ restart:
nextreq = NULL;
if (req->r_op != CEPH_MDS_OP_SETFILELOCK &&
(req->r_op & CEPH_MDS_OP_WRITE)) {
+ struct ceph_mds_session *s = req->r_session;
+
+ if (!s) {
+ req = nextreq;
+ continue;
+ }
+
/* write op */
ceph_mdsc_get_request(req);
if (nextreq)
ceph_mdsc_get_request(nextreq);
+ s = ceph_get_mds_session(s);
mutex_unlock(&mdsc->mutex);
- dout("wait_unsafe_requests wait on %llu (want %llu)\n",
+
+ /* send flush mdlog request to MDS */
+ if (last_session != s) {
+ send_flush_mdlog(s);
+ ceph_put_mds_session(last_session);
+ last_session = s;
+ } else {
+ ceph_put_mds_session(s);
+ }
+ dout("%s wait on %llu (want %llu)\n", __func__,
req->r_tid, want_tid);
wait_for_completion(&req->r_safe_completion);
+
mutex_lock(&mdsc->mutex);
ceph_mdsc_put_request(req);
if (!nextreq)
@@ -4738,7 +4806,8 @@ restart:
req = nextreq;
}
mutex_unlock(&mdsc->mutex);
- dout("wait_unsafe_requests done\n");
+ ceph_put_mds_session(last_session);
+ dout("%s done\n", __func__);
}
void ceph_mdsc_sync(struct ceph_mds_client *mdsc)
@@ -4767,7 +4836,7 @@ void ceph_mdsc_sync(struct ceph_mds_client *mdsc)
dout("sync want tid %lld flush_seq %lld\n",
want_tid, want_flush);
- wait_unsafe_requests(mdsc, want_tid);
+ flush_mdlog_and_wait_mdsc_unsafe_requests(mdsc, want_tid);
wait_caps_flush(mdsc, want_flush);
}
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index 33497846e47e..1140aecd82ce 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -579,7 +579,7 @@ static inline int ceph_wait_on_async_create(struct inode *inode)
struct ceph_inode_info *ci = ceph_inode(inode);
return wait_on_bit(&ci->i_ceph_flags, CEPH_ASYNC_CREATE_BIT,
- TASK_INTERRUPTIBLE);
+ TASK_KILLABLE);
}
extern u64 ceph_get_deleg_ino(struct ceph_mds_session *session);
diff --git a/fs/ceph/quota.c b/fs/ceph/quota.c
index a338a3ec0dc4..64592adfe48f 100644
--- a/fs/ceph/quota.c
+++ b/fs/ceph/quota.c
@@ -195,9 +195,9 @@ void ceph_cleanup_quotarealms_inodes(struct ceph_mds_client *mdsc)
/*
* This function walks through the snaprealm for an inode and returns the
- * ceph_snap_realm for the first snaprealm that has quotas set (either max_files
- * or max_bytes). If the root is reached, return the root ceph_snap_realm
- * instead.
+ * ceph_snap_realm for the first snaprealm that has quotas set (max_files,
+ * max_bytes, or any, depending on the 'which_quota' argument). If the root is
+ * reached, return the root ceph_snap_realm instead.
*
* Note that the caller is responsible for calling ceph_put_snap_realm() on the
* returned realm.
@@ -209,7 +209,9 @@ void ceph_cleanup_quotarealms_inodes(struct ceph_mds_client *mdsc)
* will be restarted.
*/
static struct ceph_snap_realm *get_quota_realm(struct ceph_mds_client *mdsc,
- struct inode *inode, bool retry)
+ struct inode *inode,
+ enum quota_get_realm which_quota,
+ bool retry)
{
struct ceph_inode_info *ci = NULL;
struct ceph_snap_realm *realm, *next;
@@ -248,7 +250,7 @@ restart:
}
ci = ceph_inode(in);
- has_quota = __ceph_has_any_quota(ci);
+ has_quota = __ceph_has_quota(ci, which_quota);
iput(in);
next = realm->parent;
@@ -279,8 +281,8 @@ restart:
* dropped and we can then restart the whole operation.
*/
down_read(&mdsc->snap_rwsem);
- old_realm = get_quota_realm(mdsc, old, true);
- new_realm = get_quota_realm(mdsc, new, false);
+ old_realm = get_quota_realm(mdsc, old, QUOTA_GET_ANY, true);
+ new_realm = get_quota_realm(mdsc, new, QUOTA_GET_ANY, false);
if (PTR_ERR(new_realm) == -EAGAIN) {
up_read(&mdsc->snap_rwsem);
if (old_realm)
@@ -483,7 +485,8 @@ bool ceph_quota_update_statfs(struct ceph_fs_client *fsc, struct kstatfs *buf)
bool is_updated = false;
down_read(&mdsc->snap_rwsem);
- realm = get_quota_realm(mdsc, d_inode(fsc->sb->s_root), true);
+ realm = get_quota_realm(mdsc, d_inode(fsc->sb->s_root),
+ QUOTA_GET_MAX_BYTES, true);
up_read(&mdsc->snap_rwsem);
if (!realm)
return false;
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index e6987d295079..b73b4f75462c 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -1119,6 +1119,7 @@ static int ceph_set_super(struct super_block *s, struct fs_context *fc)
s->s_time_gran = 1;
s->s_time_min = 0;
s->s_time_max = U32_MAX;
+ s->s_flags |= SB_NODIRATIME | SB_NOATIME;
ret = set_anon_super_fc(s, fc);
if (ret != 0)
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 20ceab74e871..dd7dac0f984a 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -1022,6 +1022,7 @@ static inline void ceph_queue_flush_snaps(struct inode *inode)
ceph_queue_inode_work(inode, CEPH_I_WORK_FLUSH_SNAPS);
}
+extern int ceph_try_to_choose_auth_mds(struct inode *inode, int mask);
extern int __ceph_do_getattr(struct inode *inode, struct page *locked_page,
int mask, bool force);
static inline int ceph_do_getattr(struct inode *inode, int mask, bool force)
@@ -1278,9 +1279,29 @@ extern void ceph_fs_debugfs_init(struct ceph_fs_client *client);
extern void ceph_fs_debugfs_cleanup(struct ceph_fs_client *client);
/* quota.c */
-static inline bool __ceph_has_any_quota(struct ceph_inode_info *ci)
+
+enum quota_get_realm {
+ QUOTA_GET_MAX_FILES,
+ QUOTA_GET_MAX_BYTES,
+ QUOTA_GET_ANY
+};
+
+static inline bool __ceph_has_quota(struct ceph_inode_info *ci,
+ enum quota_get_realm which)
{
- return ci->i_max_files || ci->i_max_bytes;
+ bool has_quota = false;
+
+ switch (which) {
+ case QUOTA_GET_MAX_BYTES:
+ has_quota = !!ci->i_max_bytes;
+ break;
+ case QUOTA_GET_MAX_FILES:
+ has_quota = !!ci->i_max_files;
+ break;
+ default:
+ has_quota = !!(ci->i_max_files || ci->i_max_bytes);
+ }
+ return has_quota;
}
extern void ceph_adjust_quota_realms_count(struct inode *inode, bool inc);
@@ -1289,10 +1310,10 @@ static inline void __ceph_update_quota(struct ceph_inode_info *ci,
u64 max_bytes, u64 max_files)
{
bool had_quota, has_quota;
- had_quota = __ceph_has_any_quota(ci);
+ had_quota = __ceph_has_quota(ci, QUOTA_GET_ANY);
ci->i_max_bytes = max_bytes;
ci->i_max_files = max_files;
- has_quota = __ceph_has_any_quota(ci);
+ has_quota = __ceph_has_quota(ci, QUOTA_GET_ANY);
if (had_quota != has_quota)
ceph_adjust_quota_realms_count(&ci->vfs_inode, has_quota);
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index afec84088471..8c2dc2c762a4 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -366,6 +366,14 @@ static ssize_t ceph_vxattrcb_auth_mds(struct ceph_inode_info *ci,
}
#define XATTR_RSTAT_FIELD(_type, _name) \
XATTR_NAME_CEPH(_type, _name, VXATTR_FLAG_RSTAT)
+#define XATTR_RSTAT_FIELD_UPDATABLE(_type, _name) \
+ { \
+ .name = CEPH_XATTR_NAME(_type, _name), \
+ .name_size = sizeof (CEPH_XATTR_NAME(_type, _name)), \
+ .getxattr_cb = ceph_vxattrcb_ ## _type ## _ ## _name, \
+ .exists_cb = NULL, \
+ .flags = VXATTR_FLAG_RSTAT, \
+ }
#define XATTR_LAYOUT_FIELD(_type, _name, _field) \
{ \
.name = CEPH_XATTR_NAME2(_type, _name, _field), \
@@ -404,7 +412,7 @@ static struct ceph_vxattr ceph_dir_vxattrs[] = {
XATTR_RSTAT_FIELD(dir, rsubdirs),
XATTR_RSTAT_FIELD(dir, rsnaps),
XATTR_RSTAT_FIELD(dir, rbytes),
- XATTR_RSTAT_FIELD(dir, rctime),
+ XATTR_RSTAT_FIELD_UPDATABLE(dir, rctime),
{
.name = "ceph.dir.pin",
.name_size = sizeof("ceph.dir.pin"),
diff --git a/fs/cifs/Makefile b/fs/cifs/Makefile
index cc8fdcb35b71..8c9f2c00be72 100644
--- a/fs/cifs/Makefile
+++ b/fs/cifs/Makefile
@@ -8,7 +8,7 @@ obj-$(CONFIG_CIFS) += cifs.o
cifs-y := trace.o cifsfs.o cifssmb.o cifs_debug.o connect.o dir.o file.o \
inode.o link.o misc.o netmisc.o smbencrypt.o transport.o \
cifs_unicode.o nterr.o cifsencrypt.o \
- readdir.o ioctl.o sess.o export.o smb1ops.o unc.o winucase.o \
+ readdir.o ioctl.o sess.o export.o unc.o winucase.o \
smb2ops.o smb2maperror.o smb2transport.o \
smb2misc.o smb2pdu.o smb2inode.o smb2file.o cifsacl.o fs_context.o \
dns_resolve.o cifs_spnego_negtokeninit.asn1.o asn1.o
@@ -30,3 +30,5 @@ cifs-$(CONFIG_CIFS_FSCACHE) += fscache.o
cifs-$(CONFIG_CIFS_SMB_DIRECT) += smbdirect.o
cifs-$(CONFIG_CIFS_ROOT) += cifsroot.o
+
+cifs-$(CONFIG_CIFS_ALLOW_INSECURE_LEGACY) += smb1ops.o
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index 9d334816eac0..1dd995efd5b8 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -116,7 +116,8 @@ static void cifs_debug_tcon(struct seq_file *m, struct cifs_tcon *tcon)
tcon->ses->server->ops->dump_share_caps(m, tcon);
if (tcon->use_witness)
seq_puts(m, " Witness");
-
+ if (tcon->broken_sparse_sup)
+ seq_puts(m, " nosparse");
if (tcon->need_reconnect)
seq_puts(m, "\tDISCONNECTED ");
seq_putc(m, '\n');
@@ -386,7 +387,7 @@ skip_rdma:
(ses->serverNOS == NULL)) {
seq_printf(m, "\n\t%d) Address: %s Uses: %d Capability: 0x%x\tSession Status: %d ",
i, ses->ip_addr, ses->ses_count,
- ses->capabilities, ses->status);
+ ses->capabilities, ses->ses_status);
if (ses->session_flags & SMB2_SESSION_FLAG_IS_GUEST)
seq_printf(m, "Guest ");
else if (ses->session_flags & SMB2_SESSION_FLAG_IS_NULL)
@@ -398,7 +399,7 @@ skip_rdma:
"\n\tSMB session status: %d ",
i, ses->ip_addr, ses->serverDomain,
ses->ses_count, ses->serverOS, ses->serverNOS,
- ses->capabilities, ses->status);
+ ses->capabilities, ses->ses_status);
}
seq_printf(m, "\n\tSecurity type: %s ",
@@ -418,6 +419,8 @@ skip_rdma:
spin_lock(&ses->chan_lock);
if (CIFS_CHAN_NEEDS_RECONNECT(ses, 0))
seq_puts(m, "\tPrimary channel: DISCONNECTED ");
+ if (CIFS_CHAN_IN_RECONNECT(ses, 0))
+ seq_puts(m, "\t[RECONNECTING] ");
if (ses->chan_count > 1) {
seq_printf(m, "\n\n\tExtra Channels: %zu ",
@@ -426,6 +429,8 @@ skip_rdma:
cifs_dump_channel(m, j, &ses->chans[j]);
if (CIFS_CHAN_NEEDS_RECONNECT(ses, j))
seq_puts(m, "\tDISCONNECTED ");
+ if (CIFS_CHAN_IN_RECONNECT(ses, j))
+ seq_puts(m, "\t[RECONNECTING] ");
}
}
spin_unlock(&ses->chan_lock);
diff --git a/fs/cifs/cifs_swn.c b/fs/cifs/cifs_swn.c
index 180c234c2f46..1e4c7cc5287f 100644
--- a/fs/cifs/cifs_swn.c
+++ b/fs/cifs/cifs_swn.c
@@ -465,7 +465,7 @@ static int cifs_swn_reconnect(struct cifs_tcon *tcon, struct sockaddr_storage *a
int ret = 0;
/* Store the reconnect address */
- mutex_lock(&tcon->ses->server->srv_mutex);
+ cifs_server_lock(tcon->ses->server);
if (cifs_sockaddr_equal(&tcon->ses->server->dstaddr, addr))
goto unlock;
@@ -501,7 +501,7 @@ static int cifs_swn_reconnect(struct cifs_tcon *tcon, struct sockaddr_storage *a
cifs_signal_cifsd_for_reconnect(tcon->ses->server, false);
unlock:
- mutex_unlock(&tcon->ses->server->srv_mutex);
+ cifs_server_unlock(tcon->ses->server);
return ret;
}
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index 0912d8bbbac1..663cb9db4908 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -236,9 +236,9 @@ int cifs_verify_signature(struct smb_rqst *rqst,
cpu_to_le32(expected_sequence_number);
cifs_pdu->Signature.Sequence.Reserved = 0;
- mutex_lock(&server->srv_mutex);
+ cifs_server_lock(server);
rc = cifs_calc_signature(rqst, server, what_we_think_sig_should_be);
- mutex_unlock(&server->srv_mutex);
+ cifs_server_unlock(server);
if (rc)
return rc;
@@ -626,7 +626,7 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
memcpy(ses->auth_key.response + baselen, tiblob, tilen);
- mutex_lock(&ses->server->srv_mutex);
+ cifs_server_lock(ses->server);
rc = cifs_alloc_hash("hmac(md5)",
&ses->server->secmech.hmacmd5,
@@ -678,7 +678,7 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
cifs_dbg(VFS, "%s: Could not generate md5 hash\n", __func__);
unlock:
- mutex_unlock(&ses->server->srv_mutex);
+ cifs_server_unlock(ses->server);
setup_ntlmv2_rsp_ret:
kfree(tiblob);
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 2b1a1c029c75..12c872800326 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -582,6 +582,8 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
seq_puts(s, ",nocase");
if (tcon->nodelete)
seq_puts(s, ",nodelete");
+ if (cifs_sb->ctx->no_sparse)
+ seq_puts(s, ",nosparse");
if (tcon->local_lease)
seq_puts(s, ",locallease");
if (tcon->retry)
@@ -836,7 +838,7 @@ cifs_smb3_do_mount(struct file_system_type *fs_type,
int flags, struct smb3_fs_context *old_ctx)
{
int rc;
- struct super_block *sb;
+ struct super_block *sb = NULL;
struct cifs_sb_info *cifs_sb = NULL;
struct cifs_mnt_data mnt_data;
struct dentry *root;
@@ -932,9 +934,11 @@ out_super:
return root;
out:
if (cifs_sb) {
- kfree(cifs_sb->prepath);
- smb3_cleanup_fs_context(cifs_sb->ctx);
- kfree(cifs_sb);
+ if (!sb || IS_ERR(sb)) { /* otherwise kill_sb will handle */
+ kfree(cifs_sb->prepath);
+ smb3_cleanup_fs_context(cifs_sb->ctx);
+ kfree(cifs_sb);
+ }
}
return root;
}
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index c0542bdcd06b..dd7e070ca243 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -152,6 +152,7 @@ extern struct dentry *cifs_smb3_do_mount(struct file_system_type *fs_type,
extern const struct export_operations cifs_export_ops;
#endif /* CONFIG_CIFS_NFSD_EXPORT */
-#define SMB3_PRODUCT_BUILD 35
-#define CIFS_VERSION "2.36"
+/* when changing internal version - update following two lines at same time */
+#define SMB3_PRODUCT_BUILD 37
+#define CIFS_VERSION "2.37"
#endif /* _CIFSFS_H */
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 8de977c359b1..f873379066c7 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -16,6 +16,7 @@
#include <linux/mempool.h>
#include <linux/workqueue.h>
#include <linux/utsname.h>
+#include <linux/sched/mm.h>
#include <linux/netfs.h>
#include "cifs_fs_sb.h"
#include "cifsacl.h"
@@ -106,7 +107,7 @@
* CIFS vfs client Status information (based on what we know.)
*/
-/* associated with each tcp and smb session */
+/* associated with each connection */
enum statusEnum {
CifsNew = 0,
CifsGood,
@@ -114,8 +115,15 @@ enum statusEnum {
CifsNeedReconnect,
CifsNeedNegotiate,
CifsInNegotiate,
- CifsNeedSessSetup,
- CifsInSessSetup,
+};
+
+/* associated with each smb session */
+enum ses_status_enum {
+ SES_NEW = 0,
+ SES_GOOD,
+ SES_EXITING,
+ SES_NEED_RECON,
+ SES_IN_SETUP
};
/* associated with each tree connection to the server */
@@ -621,7 +629,8 @@ struct TCP_Server_Info {
unsigned int in_flight; /* number of requests on the wire to server */
unsigned int max_in_flight; /* max number of requests that were on wire */
spinlock_t req_lock; /* protect the two values above */
- struct mutex srv_mutex;
+ struct mutex _srv_mutex;
+ unsigned int nofs_flag;
struct task_struct *tsk;
char server_GUID[16];
__u16 sec_mode;
@@ -736,6 +745,22 @@ struct TCP_Server_Info {
#endif
};
+static inline void cifs_server_lock(struct TCP_Server_Info *server)
+{
+ unsigned int nofs_flag = memalloc_nofs_save();
+
+ mutex_lock(&server->_srv_mutex);
+ server->nofs_flag = nofs_flag;
+}
+
+static inline void cifs_server_unlock(struct TCP_Server_Info *server)
+{
+ unsigned int nofs_flag = server->nofs_flag;
+
+ mutex_unlock(&server->_srv_mutex);
+ memalloc_nofs_restore(nofs_flag);
+}
+
struct cifs_credits {
unsigned int value;
unsigned int instance;
@@ -915,6 +940,7 @@ struct cifs_server_iface {
};
struct cifs_chan {
+ unsigned int in_reconnect : 1; /* if session setup in progress for this channel */
struct TCP_Server_Info *server;
__u8 signkey[SMB3_SIGN_KEY_SIZE];
};
@@ -930,7 +956,7 @@ struct cifs_ses {
struct mutex session_mutex;
struct TCP_Server_Info *server; /* pointer to server info */
int ses_count; /* reference counter */
- enum statusEnum status; /* updates protected by cifs_tcp_ses_lock */
+ enum ses_status_enum ses_status; /* updates protected by cifs_tcp_ses_lock */
unsigned overrideSecFlg; /* if non-zero override global sec flags */
char *serverOS; /* name of operating system underlying server */
char *serverNOS; /* name of network operating system of server */
@@ -944,7 +970,7 @@ struct cifs_ses {
and after mount option parsing we fill it */
char *domainName;
char *password;
- char *workstation_name;
+ char workstation_name[CIFS_MAX_WORKSTATION_LEN];
struct session_key auth_key;
struct ntlmssp_auth *ntlmssp; /* ciphertext, flags, server challenge */
enum securityEnum sectype; /* what security flavor was specified? */
@@ -977,12 +1003,16 @@ struct cifs_ses {
#define CIFS_MAX_CHANNELS 16
#define CIFS_ALL_CHANNELS_SET(ses) \
((1UL << (ses)->chan_count) - 1)
+#define CIFS_ALL_CHANS_GOOD(ses) \
+ (!(ses)->chans_need_reconnect)
#define CIFS_ALL_CHANS_NEED_RECONNECT(ses) \
((ses)->chans_need_reconnect == CIFS_ALL_CHANNELS_SET(ses))
#define CIFS_SET_ALL_CHANS_NEED_RECONNECT(ses) \
((ses)->chans_need_reconnect = CIFS_ALL_CHANNELS_SET(ses))
#define CIFS_CHAN_NEEDS_RECONNECT(ses, index) \
test_bit((index), &(ses)->chans_need_reconnect)
+#define CIFS_CHAN_IN_RECONNECT(ses, index) \
+ ((ses)->chans[(index)].in_reconnect)
struct cifs_chan chans[CIFS_MAX_CHANNELS];
size_t chan_count;
@@ -1009,6 +1039,58 @@ cap_unix(struct cifs_ses *ses)
return ses->server->vals->cap_unix & ses->capabilities;
}
+/*
+ * common struct for holding inode info when searching for or updating an
+ * inode with new info
+ */
+
+#define CIFS_FATTR_DFS_REFERRAL 0x1
+#define CIFS_FATTR_DELETE_PENDING 0x2
+#define CIFS_FATTR_NEED_REVAL 0x4
+#define CIFS_FATTR_INO_COLLISION 0x8
+#define CIFS_FATTR_UNKNOWN_NLINK 0x10
+#define CIFS_FATTR_FAKE_ROOT_INO 0x20
+
+struct cifs_fattr {
+ u32 cf_flags;
+ u32 cf_cifsattrs;
+ u64 cf_uniqueid;
+ u64 cf_eof;
+ u64 cf_bytes;
+ u64 cf_createtime;
+ kuid_t cf_uid;
+ kgid_t cf_gid;
+ umode_t cf_mode;
+ dev_t cf_rdev;
+ unsigned int cf_nlink;
+ unsigned int cf_dtype;
+ struct timespec64 cf_atime;
+ struct timespec64 cf_mtime;
+ struct timespec64 cf_ctime;
+ u32 cf_cifstag;
+};
+
+struct cached_dirent {
+ struct list_head entry;
+ char *name;
+ int namelen;
+ loff_t pos;
+
+ struct cifs_fattr fattr;
+};
+
+struct cached_dirents {
+ bool is_valid:1;
+ bool is_failed:1;
+ struct dir_context *ctx; /*
+ * Only used to make sure we only take entries
+ * from a single context. Never dereferenced.
+ */
+ struct mutex de_mutex;
+ int pos; /* Expected ctx->pos */
+ struct list_head entries;
+};
+
struct cached_fid {
bool is_valid:1; /* Do we have a useable root fid */
bool file_all_info_is_valid:1;
@@ -1021,6 +1103,7 @@ struct cached_fid {
struct dentry *dentry;
struct work_struct lease_break;
struct smb2_file_all_info file_all_info;
+ struct cached_dirents dirents;
};
/*
@@ -1641,37 +1724,6 @@ struct file_list {
struct cifsFileInfo *cfile;
};
-/*
- * common struct for holding inode info when searching for or updating an
- * inode with new info
- */
-
-#define CIFS_FATTR_DFS_REFERRAL 0x1
-#define CIFS_FATTR_DELETE_PENDING 0x2
-#define CIFS_FATTR_NEED_REVAL 0x4
-#define CIFS_FATTR_INO_COLLISION 0x8
-#define CIFS_FATTR_UNKNOWN_NLINK 0x10
-#define CIFS_FATTR_FAKE_ROOT_INO 0x20
-
-struct cifs_fattr {
- u32 cf_flags;
- u32 cf_cifsattrs;
- u64 cf_uniqueid;
- u64 cf_eof;
- u64 cf_bytes;
- u64 cf_createtime;
- kuid_t cf_uid;
- kgid_t cf_gid;
- umode_t cf_mode;
- dev_t cf_rdev;
- unsigned int cf_nlink;
- unsigned int cf_dtype;
- struct timespec64 cf_atime;
- struct timespec64 cf_mtime;
- struct timespec64 cf_ctime;
- u32 cf_cifstag;
-};
-
static inline void free_dfs_info_param(struct dfs_info3_param *param)
{
if (param) {
@@ -1911,11 +1963,13 @@ extern mempool_t *cifs_mid_poolp;
/* Operations for different SMB versions */
#define SMB1_VERSION_STRING "1.0"
+#define SMB20_VERSION_STRING "2.0"
+#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
extern struct smb_version_operations smb1_operations;
extern struct smb_version_values smb1_values;
-#define SMB20_VERSION_STRING "2.0"
extern struct smb_version_operations smb20_operations;
extern struct smb_version_values smb20_values;
+#endif /* CIFS_ALLOW_INSECURE_LEGACY */
#define SMB21_VERSION_STRING "2.1"
extern struct smb_version_operations smb21_operations;
extern struct smb_version_values smb21_values;
@@ -1979,4 +2033,22 @@ static inline bool cifs_is_referral_server(struct cifs_tcon *tcon,
return is_tcon_dfs(tcon) || (ref && (ref->flags & DFSREF_REFERRAL_SERVER));
}
+static inline u64 cifs_flock_len(struct file_lock *fl)
+{
+ return fl->fl_end == OFFSET_MAX ? 0 : fl->fl_end - fl->fl_start + 1;
+}
+
+static inline size_t ntlmssp_workstation_name_size(const struct cifs_ses *ses)
+{
+ if (WARN_ON_ONCE(!ses || !ses->server))
+ return 0;
+ /*
+ * Make workstation name no more than 15 chars when using insecure dialects as some legacy
+ * servers do require it during NTLMSSP.
+ */
+ if (ses->server->dialect <= SMB20_PROT_ID)
+ return min_t(size_t, sizeof(ses->workstation_name), RFC1001_NAME_LEN_WITH_NULL);
+ return sizeof(ses->workstation_name);
+}
+
#endif /* _CIFS_GLOB_H */
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 0df3b24a0bf4..3b7366ec03c7 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -619,6 +619,15 @@ unsigned int
cifs_ses_get_chan_index(struct cifs_ses *ses,
struct TCP_Server_Info *server);
void
+cifs_chan_set_in_reconnect(struct cifs_ses *ses,
+ struct TCP_Server_Info *server);
+void
+cifs_chan_clear_in_reconnect(struct cifs_ses *ses,
+ struct TCP_Server_Info *server);
+bool
+cifs_chan_in_reconnect(struct cifs_ses *ses,
+ struct TCP_Server_Info *server);
+void
cifs_chan_set_need_reconnect(struct cifs_ses *ses,
struct TCP_Server_Info *server);
void
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 47e927c4ff8d..6371b9eebdad 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -75,7 +75,7 @@ cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
/* only send once per connect */
spin_lock(&cifs_tcp_ses_lock);
- if ((tcon->ses->status != CifsGood) || (tcon->status != TID_NEED_RECON)) {
+ if ((tcon->ses->ses_status != SES_GOOD) || (tcon->status != TID_NEED_RECON)) {
spin_unlock(&cifs_tcp_ses_lock);
return;
}
@@ -2558,7 +2558,8 @@ CIFSSMBPosixLock(const unsigned int xid, struct cifs_tcon *tcon,
pLockData->fl_start = le64_to_cpu(parm_data->start);
pLockData->fl_end = pLockData->fl_start +
- le64_to_cpu(parm_data->length) - 1;
+ (le64_to_cpu(parm_data->length) ?
+ le64_to_cpu(parm_data->length) - 1 : 0);
pLockData->fl_pid = -le32_to_cpu(parm_data->pid);
}
}
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 42e14f408856..d46702f5a663 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -148,7 +148,7 @@ static void cifs_resolve_server(struct work_struct *work)
struct TCP_Server_Info *server = container_of(work,
struct TCP_Server_Info, resolve.work);
- mutex_lock(&server->srv_mutex);
+ cifs_server_lock(server);
/*
* Resolve the hostname again to make sure that IP address is up-to-date.
@@ -159,7 +159,7 @@ static void cifs_resolve_server(struct work_struct *work)
__func__, rc);
}
- mutex_unlock(&server->srv_mutex);
+ cifs_server_unlock(server);
}
/*
@@ -241,7 +241,7 @@ cifs_mark_tcp_ses_conns_for_reconnect(struct TCP_Server_Info *server,
if (!mark_smb_session && !CIFS_ALL_CHANS_NEED_RECONNECT(ses))
goto next_session;
- ses->status = CifsNeedReconnect;
+ ses->ses_status = SES_NEED_RECON;
list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
tcon->need_reconnect = true;
@@ -267,7 +267,7 @@ cifs_abort_connection(struct TCP_Server_Info *server)
/* do not want to be sending data on a socket we are freeing */
cifs_dbg(FYI, "%s: tearing down socket\n", __func__);
- mutex_lock(&server->srv_mutex);
+ cifs_server_lock(server);
if (server->ssocket) {
cifs_dbg(FYI, "State: 0x%x Flags: 0x%lx\n", server->ssocket->state,
server->ssocket->flags);
@@ -296,7 +296,7 @@ cifs_abort_connection(struct TCP_Server_Info *server)
mid->mid_flags |= MID_DELETED;
}
spin_unlock(&GlobalMid_Lock);
- mutex_unlock(&server->srv_mutex);
+ cifs_server_unlock(server);
cifs_dbg(FYI, "%s: issuing mid callbacks\n", __func__);
list_for_each_entry_safe(mid, nmid, &retry_list, qhead) {
@@ -306,9 +306,9 @@ cifs_abort_connection(struct TCP_Server_Info *server)
}
if (cifs_rdma_enabled(server)) {
- mutex_lock(&server->srv_mutex);
+ cifs_server_lock(server);
smbd_destroy(server);
- mutex_unlock(&server->srv_mutex);
+ cifs_server_unlock(server);
}
}
@@ -359,7 +359,7 @@ static int __cifs_reconnect(struct TCP_Server_Info *server,
do {
try_to_freeze();
- mutex_lock(&server->srv_mutex);
+ cifs_server_lock(server);
if (!cifs_swn_set_server_dstaddr(server)) {
/* resolve the hostname again to make sure that IP address is up-to-date */
@@ -372,7 +372,7 @@ static int __cifs_reconnect(struct TCP_Server_Info *server,
else
rc = generic_ip_connect(server);
if (rc) {
- mutex_unlock(&server->srv_mutex);
+ cifs_server_unlock(server);
cifs_dbg(FYI, "%s: reconnect error %d\n", __func__, rc);
msleep(3000);
} else {
@@ -383,7 +383,7 @@ static int __cifs_reconnect(struct TCP_Server_Info *server,
server->tcpStatus = CifsNeedNegotiate;
spin_unlock(&cifs_tcp_ses_lock);
cifs_swn_reset_server_dstaddr(server);
- mutex_unlock(&server->srv_mutex);
+ cifs_server_unlock(server);
mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
}
} while (server->tcpStatus == CifsNeedReconnect);
@@ -488,12 +488,12 @@ static int reconnect_dfs_server(struct TCP_Server_Info *server)
do {
try_to_freeze();
- mutex_lock(&server->srv_mutex);
+ cifs_server_lock(server);
rc = reconnect_target_unlocked(server, &tl, &target_hint);
if (rc) {
/* Failed to reconnect socket */
- mutex_unlock(&server->srv_mutex);
+ cifs_server_unlock(server);
cifs_dbg(FYI, "%s: reconnect error %d\n", __func__, rc);
msleep(3000);
continue;
@@ -510,7 +510,7 @@ static int reconnect_dfs_server(struct TCP_Server_Info *server)
server->tcpStatus = CifsNeedNegotiate;
spin_unlock(&cifs_tcp_ses_lock);
cifs_swn_reset_server_dstaddr(server);
- mutex_unlock(&server->srv_mutex);
+ cifs_server_unlock(server);
mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
} while (server->tcpStatus == CifsNeedReconnect);
@@ -1565,7 +1565,7 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx,
init_waitqueue_head(&tcp_ses->response_q);
init_waitqueue_head(&tcp_ses->request_q);
INIT_LIST_HEAD(&tcp_ses->pending_mid_q);
- mutex_init(&tcp_ses->srv_mutex);
+ mutex_init(&tcp_ses->_srv_mutex);
memcpy(tcp_ses->workstation_RFC1001_name,
ctx->source_rfc1001_name, RFC1001_NAME_LEN_WITH_NULL);
memcpy(tcp_ses->server_RFC1001_name,
@@ -1789,7 +1789,7 @@ cifs_setup_ipc(struct cifs_ses *ses, struct smb3_fs_context *ctx)
goto out;
}
- cifs_dbg(FYI, "IPC tcon rc = %d ipc tid = %d\n", rc, tcon->tid);
+ cifs_dbg(FYI, "IPC tcon rc=%d ipc tid=0x%x\n", rc, tcon->tid);
ses->tcon_ipc = tcon;
out:
@@ -1828,7 +1828,7 @@ cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
- if (ses->status == CifsExiting)
+ if (ses->ses_status == SES_EXITING)
continue;
if (!match_session(ses, ctx))
continue;
@@ -1845,10 +1845,9 @@ void cifs_put_smb_ses(struct cifs_ses *ses)
unsigned int rc, xid;
unsigned int chan_count;
struct TCP_Server_Info *server = ses->server;
- cifs_dbg(FYI, "%s: ses_count=%d\n", __func__, ses->ses_count);
spin_lock(&cifs_tcp_ses_lock);
- if (ses->status == CifsExiting) {
+ if (ses->ses_status == SES_EXITING) {
spin_unlock(&cifs_tcp_ses_lock);
return;
}
@@ -1864,13 +1863,13 @@ void cifs_put_smb_ses(struct cifs_ses *ses)
/* ses_count can never go negative */
WARN_ON(ses->ses_count < 0);
- if (ses->status == CifsGood)
- ses->status = CifsExiting;
+ if (ses->ses_status == SES_GOOD)
+ ses->ses_status = SES_EXITING;
spin_unlock(&cifs_tcp_ses_lock);
cifs_free_ipc(ses);
- if (ses->status == CifsExiting && server->ops->logoff) {
+ if (ses->ses_status == SES_EXITING && server->ops->logoff) {
xid = get_xid();
rc = server->ops->logoff(xid, ses);
if (rc)
@@ -2037,18 +2036,7 @@ cifs_set_cifscreds(struct smb3_fs_context *ctx, struct cifs_ses *ses)
}
}
- ctx->workstation_name = kstrdup(ses->workstation_name, GFP_KERNEL);
- if (!ctx->workstation_name) {
- cifs_dbg(FYI, "Unable to allocate memory for workstation_name\n");
- rc = -ENOMEM;
- kfree(ctx->username);
- ctx->username = NULL;
- kfree_sensitive(ctx->password);
- ctx->password = NULL;
- kfree(ctx->domainname);
- ctx->domainname = NULL;
- goto out_key_put;
- }
+ strscpy(ctx->workstation_name, ses->workstation_name, sizeof(ctx->workstation_name));
out_key_put:
up_read(&key->sem);
@@ -2090,7 +2078,7 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
ses = cifs_find_smb_ses(server, ctx);
if (ses) {
cifs_dbg(FYI, "Existing smb sess found (status=%d)\n",
- ses->status);
+ ses->ses_status);
spin_lock(&ses->chan_lock);
if (cifs_chan_needs_reconnect(ses, server)) {
@@ -2157,12 +2145,9 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
if (!ses->domainName)
goto get_ses_fail;
}
- if (ctx->workstation_name) {
- ses->workstation_name = kstrdup(ctx->workstation_name,
- GFP_KERNEL);
- if (!ses->workstation_name)
- goto get_ses_fail;
- }
+
+ strscpy(ses->workstation_name, ctx->workstation_name, sizeof(ses->workstation_name));
+
if (ctx->domainauto)
ses->domainAuto = ctx->domainauto;
ses->cred_uid = ctx->cred_uid;
@@ -2509,6 +2494,7 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
*/
tcon->retry = ctx->retry;
tcon->nocase = ctx->nocase;
+ tcon->broken_sparse_sup = ctx->no_sparse;
if (ses->server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING)
tcon->nohandlecache = ctx->nohandlecache;
else
@@ -3420,8 +3406,9 @@ cifs_are_all_path_components_accessible(struct TCP_Server_Info *server,
}
/*
- * Check if path is remote (e.g. a DFS share). Return -EREMOTE if it is,
- * otherwise 0.
+ * Check if path is remote (i.e. a DFS share).
+ *
+ * Return -EREMOTE if it is, otherwise 0 or -errno.
*/
static int is_path_remote(struct mount_ctx *mnt_ctx)
{
@@ -3432,6 +3419,9 @@ static int is_path_remote(struct mount_ctx *mnt_ctx)
struct cifs_tcon *tcon = mnt_ctx->tcon;
struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
char *full_path;
+#ifdef CONFIG_CIFS_DFS_UPCALL
+ bool nodfs = cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS;
+#endif
if (!server->ops->is_path_accessible)
return -EOPNOTSUPP;
@@ -3449,14 +3439,20 @@ static int is_path_remote(struct mount_ctx *mnt_ctx)
rc = server->ops->is_path_accessible(xid, tcon, cifs_sb,
full_path);
#ifdef CONFIG_CIFS_DFS_UPCALL
+ if (nodfs) {
+ if (rc == -EREMOTE)
+ rc = -EOPNOTSUPP;
+ goto out;
+ }
+
+ /* path *might* exist with non-ASCII characters in DFS root
+ * try again with full path (only if nodfs is not set) */
if (rc == -ENOENT && is_tcon_dfs(tcon))
rc = cifs_dfs_query_info_nonascii_quirk(xid, tcon, cifs_sb,
full_path);
#endif
- if (rc != 0 && rc != -EREMOTE) {
- kfree(full_path);
- return rc;
- }
+ if (rc != 0 && rc != -EREMOTE)
+ goto out;
if (rc != -EREMOTE) {
rc = cifs_are_all_path_components_accessible(server, xid, tcon,
@@ -3468,6 +3464,7 @@ static int is_path_remote(struct mount_ctx *mnt_ctx)
}
}
+out:
kfree(full_path);
return rc;
}
@@ -3703,6 +3700,7 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
if (!isdfs)
goto out;
+ /* proceed as DFS mount */
uuid_gen(&mnt_ctx.mount_id);
rc = connect_dfs_root(&mnt_ctx, &tl);
dfs_cache_free_tgts(&tl);
@@ -3960,7 +3958,7 @@ cifs_negotiate_protocol(const unsigned int xid, struct cifs_ses *ses,
if (rc == 0) {
spin_lock(&cifs_tcp_ses_lock);
if (server->tcpStatus == CifsInNegotiate)
- server->tcpStatus = CifsNeedSessSetup;
+ server->tcpStatus = CifsGood;
else
rc = -EHOSTDOWN;
spin_unlock(&cifs_tcp_ses_lock);
@@ -3982,20 +3980,31 @@ cifs_setup_session(const unsigned int xid, struct cifs_ses *ses,
int rc = -ENOSYS;
bool is_binding = false;
- /* only send once per connect */
+
spin_lock(&cifs_tcp_ses_lock);
- if ((server->tcpStatus != CifsNeedSessSetup) &&
- (ses->status == CifsGood)) {
+ if (ses->ses_status != SES_GOOD &&
+ ses->ses_status != SES_NEW &&
+ ses->ses_status != SES_NEED_RECON) {
spin_unlock(&cifs_tcp_ses_lock);
return 0;
}
- server->tcpStatus = CifsInSessSetup;
- spin_unlock(&cifs_tcp_ses_lock);
+ /* only send once per connect */
spin_lock(&ses->chan_lock);
+ if (CIFS_ALL_CHANS_GOOD(ses) ||
+ cifs_chan_in_reconnect(ses, server)) {
+ spin_unlock(&ses->chan_lock);
+ spin_unlock(&cifs_tcp_ses_lock);
+ return 0;
+ }
is_binding = !CIFS_ALL_CHANS_NEED_RECONNECT(ses);
+ cifs_chan_set_in_reconnect(ses, server);
spin_unlock(&ses->chan_lock);
+ if (!is_binding)
+ ses->ses_status = SES_IN_SETUP;
+ spin_unlock(&cifs_tcp_ses_lock);
+
if (!is_binding) {
ses->capabilities = server->capabilities;
if (!linuxExtEnabled)
@@ -4019,20 +4028,21 @@ cifs_setup_session(const unsigned int xid, struct cifs_ses *ses,
if (rc) {
cifs_server_dbg(VFS, "Send error in SessSetup = %d\n", rc);
spin_lock(&cifs_tcp_ses_lock);
- if (server->tcpStatus == CifsInSessSetup)
- server->tcpStatus = CifsNeedSessSetup;
+ if (ses->ses_status == SES_IN_SETUP)
+ ses->ses_status = SES_NEED_RECON;
+ spin_lock(&ses->chan_lock);
+ cifs_chan_clear_in_reconnect(ses, server);
+ spin_unlock(&ses->chan_lock);
spin_unlock(&cifs_tcp_ses_lock);
} else {
spin_lock(&cifs_tcp_ses_lock);
- if (server->tcpStatus == CifsInSessSetup)
- server->tcpStatus = CifsGood;
- /* Even if one channel is active, session is in good state */
- ses->status = CifsGood;
- spin_unlock(&cifs_tcp_ses_lock);
-
+ if (ses->ses_status == SES_IN_SETUP)
+ ses->ses_status = SES_GOOD;
spin_lock(&ses->chan_lock);
+ cifs_chan_clear_in_reconnect(ses, server);
cifs_chan_clear_need_reconnect(ses, server);
spin_unlock(&ses->chan_lock);
+ spin_unlock(&cifs_tcp_ses_lock);
}
return rc;
@@ -4497,7 +4507,7 @@ int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const stru
/* only send once per connect */
spin_lock(&cifs_tcp_ses_lock);
- if (tcon->ses->status != CifsGood ||
+ if (tcon->ses->ses_status != SES_GOOD ||
(tcon->status != TID_NEW &&
tcon->status != TID_NEED_TCON)) {
spin_unlock(&cifs_tcp_ses_lock);
@@ -4565,7 +4575,7 @@ int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const stru
/* only send once per connect */
spin_lock(&cifs_tcp_ses_lock);
- if (tcon->ses->status != CifsGood ||
+ if (tcon->ses->ses_status != SES_GOOD ||
(tcon->status != TID_NEW &&
tcon->status != TID_NEED_TCON)) {
spin_unlock(&cifs_tcp_ses_lock);
diff --git a/fs/cifs/dfs_cache.c b/fs/cifs/dfs_cache.c
index 956f8e5cf3e7..34a8f3baed5e 100644
--- a/fs/cifs/dfs_cache.c
+++ b/fs/cifs/dfs_cache.c
@@ -654,7 +654,7 @@ static struct cache_entry *__lookup_cache_entry(const char *path, unsigned int h
return ce;
}
}
- return ERR_PTR(-EEXIST);
+ return ERR_PTR(-ENOENT);
}
/*
@@ -662,7 +662,7 @@ static struct cache_entry *__lookup_cache_entry(const char *path, unsigned int h
*
* Use whole path components in the match. Must be called with htable_rw_lock held.
*
- * Return ERR_PTR(-EEXIST) if the entry is not found.
+ * Return ERR_PTR(-ENOENT) if the entry is not found.
*/
static struct cache_entry *lookup_cache_entry(const char *path)
{
@@ -710,7 +710,7 @@ static struct cache_entry *lookup_cache_entry(const char *path)
while (e > s && *e != sep)
e--;
}
- return ERR_PTR(-EEXIST);
+ return ERR_PTR(-ENOENT);
}
/**
@@ -1229,6 +1229,30 @@ void dfs_cache_put_refsrv_sessions(const uuid_t *mount_id)
kref_put(&mg->refcount, mount_group_release);
}
+/* Extract share from DFS target and return a pointer to prefix path or NULL */
+static const char *parse_target_share(const char *target, char **share)
+{
+ const char *s, *seps = "/\\";
+ size_t len;
+
+ s = strpbrk(target + 1, seps);
+ if (!s)
+ return ERR_PTR(-EINVAL);
+
+ len = strcspn(s + 1, seps);
+ if (!len)
+ return ERR_PTR(-EINVAL);
+ s += len;
+
+ len = s - target + 1;
+ *share = kstrndup(target, len, GFP_KERNEL);
+ if (!*share)
+ return ERR_PTR(-ENOMEM);
+
+ s = target + len;
+ return s + strspn(s, seps);
+}
+
/**
* dfs_cache_get_tgt_share - parse a DFS target
*
@@ -1242,56 +1266,46 @@ void dfs_cache_put_refsrv_sessions(const uuid_t *mount_id)
int dfs_cache_get_tgt_share(char *path, const struct dfs_cache_tgt_iterator *it, char **share,
char **prefix)
{
- char *s, sep, *p;
- size_t len;
- size_t plen1, plen2;
+ char sep;
+ char *target_share;
+ char *ppath = NULL;
+ const char *target_ppath, *dfsref_ppath;
+ size_t target_pplen, dfsref_pplen;
+ size_t len, c;
if (!it || !path || !share || !prefix || strlen(path) < it->it_path_consumed)
return -EINVAL;
- *share = NULL;
- *prefix = NULL;
-
sep = it->it_name[0];
if (sep != '\\' && sep != '/')
return -EINVAL;
- s = strchr(it->it_name + 1, sep);
- if (!s)
- return -EINVAL;
+ target_ppath = parse_target_share(it->it_name, &target_share);
+ if (IS_ERR(target_ppath))
+ return PTR_ERR(target_ppath);
- /* point to prefix in target node */
- s = strchrnul(s + 1, sep);
+ /* point to prefix in DFS referral path */
+ dfsref_ppath = path + it->it_path_consumed;
+ dfsref_ppath += strspn(dfsref_ppath, "/\\");
- /* extract target share */
- *share = kstrndup(it->it_name, s - it->it_name, GFP_KERNEL);
- if (!*share)
- return -ENOMEM;
+ target_pplen = strlen(target_ppath);
+ dfsref_pplen = strlen(dfsref_ppath);
- /* skip separator */
- if (*s)
- s++;
- /* point to prefix in DFS path */
- p = path + it->it_path_consumed;
- if (*p == sep)
- p++;
-
- /* merge prefix paths from DFS path and target node */
- plen1 = it->it_name + strlen(it->it_name) - s;
- plen2 = path + strlen(path) - p;
- if (plen1 || plen2) {
- len = plen1 + plen2 + 2;
- *prefix = kmalloc(len, GFP_KERNEL);
- if (!*prefix) {
- kfree(*share);
- *share = NULL;
+ /* merge prefix paths from DFS referral path and target node */
+ if (target_pplen || dfsref_pplen) {
+ len = target_pplen + dfsref_pplen + 2;
+ ppath = kzalloc(len, GFP_KERNEL);
+ if (!ppath) {
+ kfree(target_share);
return -ENOMEM;
}
- if (plen1)
- scnprintf(*prefix, len, "%.*s%c%.*s", (int)plen1, s, sep, (int)plen2, p);
- else
- strscpy(*prefix, p, len);
+ c = strscpy(ppath, target_ppath, len);
+ if (c && dfsref_pplen)
+ ppath[c] = sep;
+ strlcat(ppath, dfsref_ppath, len);
}
+ *share = target_share;
+ *prefix = ppath;
return 0;
}
@@ -1327,9 +1341,9 @@ static bool target_share_equal(struct TCP_Server_Info *server, const char *s1, c
cifs_dbg(VFS, "%s: failed to convert address \'%s\'. skip address matching.\n",
__func__, ip);
} else {
- mutex_lock(&server->srv_mutex);
+ cifs_server_lock(server);
match = cifs_match_ipaddr((struct sockaddr *)&server->dstaddr, &sa);
- mutex_unlock(&server->srv_mutex);
+ cifs_server_unlock(server);
}
kfree(ip);
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 580a847aa8b5..1618e0537d58 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -1395,7 +1395,7 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile)
cifs_dbg(VFS, "Can't push all brlocks!\n");
break;
}
- length = 1 + flock->fl_end - flock->fl_start;
+ length = cifs_flock_len(flock);
if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
type = CIFS_RDLCK;
else
@@ -1511,7 +1511,7 @@ cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
bool wait_flag, bool posix_lck, unsigned int xid)
{
int rc = 0;
- __u64 length = 1 + flock->fl_end - flock->fl_start;
+ __u64 length = cifs_flock_len(flock);
struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
struct TCP_Server_Info *server = tcon->ses->server;
@@ -1609,7 +1609,7 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
struct cifsLockInfo *li, *tmp;
- __u64 length = 1 + flock->fl_end - flock->fl_start;
+ __u64 length = cifs_flock_len(flock);
struct list_head tmp_llist;
INIT_LIST_HEAD(&tmp_llist);
@@ -1713,7 +1713,7 @@ cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
unsigned int xid)
{
int rc = 0;
- __u64 length = 1 + flock->fl_end - flock->fl_start;
+ __u64 length = cifs_flock_len(flock);
struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
struct TCP_Server_Info *server = tcon->ses->server;
@@ -2777,8 +2777,11 @@ int cifs_flush(struct file *file, fl_owner_t id)
rc = filemap_write_and_wait(inode->i_mapping);
cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
- if (rc)
+ if (rc) {
+ /* get more nuanced writeback errors */
+ rc = filemap_check_wb_err(file->f_mapping, 0);
trace_cifs_flush_err(inode->i_ino, rc);
+ }
return rc;
}
diff --git a/fs/cifs/fs_context.c b/fs/cifs/fs_context.c
index a92e9eec521f..8dc0d923ef6a 100644
--- a/fs/cifs/fs_context.c
+++ b/fs/cifs/fs_context.c
@@ -119,6 +119,7 @@ const struct fs_parameter_spec smb3_fs_parameters[] = {
fsparam_flag_no("persistenthandles", Opt_persistent),
fsparam_flag_no("resilienthandles", Opt_resilient),
fsparam_flag_no("tcpnodelay", Opt_tcp_nodelay),
+ fsparam_flag("nosparse", Opt_nosparse),
fsparam_flag("domainauto", Opt_domainauto),
fsparam_flag("rdma", Opt_rdma),
fsparam_flag("modesid", Opt_modesid),
@@ -312,7 +313,6 @@ smb3_fs_context_dup(struct smb3_fs_context *new_ctx, struct smb3_fs_context *ctx
new_ctx->password = NULL;
new_ctx->server_hostname = NULL;
new_ctx->domainname = NULL;
- new_ctx->workstation_name = NULL;
new_ctx->UNC = NULL;
new_ctx->source = NULL;
new_ctx->iocharset = NULL;
@@ -327,7 +327,6 @@ smb3_fs_context_dup(struct smb3_fs_context *new_ctx, struct smb3_fs_context *ctx
DUP_CTX_STR(UNC);
DUP_CTX_STR(source);
DUP_CTX_STR(domainname);
- DUP_CTX_STR(workstation_name);
DUP_CTX_STR(nodename);
DUP_CTX_STR(iocharset);
@@ -766,8 +765,7 @@ static int smb3_verify_reconfigure_ctx(struct fs_context *fc,
cifs_errorf(fc, "can not change domainname during remount\n");
return -EINVAL;
}
- if (new_ctx->workstation_name &&
- (!old_ctx->workstation_name || strcmp(new_ctx->workstation_name, old_ctx->workstation_name))) {
+ if (strcmp(new_ctx->workstation_name, old_ctx->workstation_name)) {
cifs_errorf(fc, "can not change workstation_name during remount\n");
return -EINVAL;
}
@@ -814,7 +812,6 @@ static int smb3_reconfigure(struct fs_context *fc)
STEAL_STRING(cifs_sb, ctx, username);
STEAL_STRING(cifs_sb, ctx, password);
STEAL_STRING(cifs_sb, ctx, domainname);
- STEAL_STRING(cifs_sb, ctx, workstation_name);
STEAL_STRING(cifs_sb, ctx, nodename);
STEAL_STRING(cifs_sb, ctx, iocharset);
@@ -943,6 +940,9 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
case Opt_nolease:
ctx->no_lease = 1;
break;
+ case Opt_nosparse:
+ ctx->no_sparse = 1;
+ break;
case Opt_nodelete:
ctx->nodelete = 1;
break;
@@ -1467,22 +1467,15 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
int smb3_init_fs_context(struct fs_context *fc)
{
- int rc;
struct smb3_fs_context *ctx;
char *nodename = utsname()->nodename;
int i;
ctx = kzalloc(sizeof(struct smb3_fs_context), GFP_KERNEL);
- if (unlikely(!ctx)) {
- rc = -ENOMEM;
- goto err_exit;
- }
+ if (unlikely(!ctx))
+ return -ENOMEM;
- ctx->workstation_name = kstrdup(nodename, GFP_KERNEL);
- if (unlikely(!ctx->workstation_name)) {
- rc = -ENOMEM;
- goto err_exit;
- }
+ strscpy(ctx->workstation_name, nodename, sizeof(ctx->workstation_name));
/*
* does not have to be perfect mapping since field is
@@ -1555,14 +1548,6 @@ int smb3_init_fs_context(struct fs_context *fc)
fc->fs_private = ctx;
fc->ops = &smb3_fs_context_ops;
return 0;
-
-err_exit:
- if (ctx) {
- kfree(ctx->workstation_name);
- kfree(ctx);
- }
-
- return rc;
}
void
@@ -1588,8 +1573,6 @@ smb3_cleanup_fs_context_contents(struct smb3_fs_context *ctx)
ctx->source = NULL;
kfree(ctx->domainname);
ctx->domainname = NULL;
- kfree(ctx->workstation_name);
- ctx->workstation_name = NULL;
kfree(ctx->nodename);
ctx->nodename = NULL;
kfree(ctx->iocharset);
diff --git a/fs/cifs/fs_context.h b/fs/cifs/fs_context.h
index e54090d9ef36..5f093cb7e9b9 100644
--- a/fs/cifs/fs_context.h
+++ b/fs/cifs/fs_context.h
@@ -62,6 +62,7 @@ enum cifs_param {
Opt_noblocksend,
Opt_noautotune,
Opt_nolease,
+ Opt_nosparse,
Opt_hard,
Opt_soft,
Opt_perm,
@@ -170,7 +171,7 @@ struct smb3_fs_context {
char *server_hostname;
char *UNC;
char *nodename;
- char *workstation_name;
+ char workstation_name[CIFS_MAX_WORKSTATION_LEN];
char *iocharset; /* local code page for mapping to and from Unicode */
char source_rfc1001_name[RFC1001_NAME_LEN_WITH_NULL]; /* clnt nb name */
char target_rfc1001_name[RFC1001_NAME_LEN_WITH_NULL]; /* srvr nb name */
@@ -222,6 +223,7 @@ struct smb3_fs_context {
bool noautotune:1;
bool nostrictsync:1; /* do not force expensive SMBflush on every sync */
bool no_lease:1; /* disable requesting leases */
+ bool no_sparse:1; /* do not attempt to set files sparse */
bool fsc:1; /* enable fscache */
bool mfsymlinks:1; /* use Minshall+French Symlinks */
bool multiuser:1;
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index afaf59c22193..35962a1a23b9 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -69,7 +69,7 @@ sesInfoAlloc(void)
ret_buf = kzalloc(sizeof(struct cifs_ses), GFP_KERNEL);
if (ret_buf) {
atomic_inc(&sesInfoAllocCount);
- ret_buf->status = CifsNew;
+ ret_buf->ses_status = SES_NEW;
++ret_buf->ses_count;
INIT_LIST_HEAD(&ret_buf->smb_ses_list);
INIT_LIST_HEAD(&ret_buf->tcon_list);
@@ -95,7 +95,6 @@ sesInfoFree(struct cifs_ses *buf_to_free)
kfree_sensitive(buf_to_free->password);
kfree(buf_to_free->user_name);
kfree(buf_to_free->domainName);
- kfree(buf_to_free->workstation_name);
kfree_sensitive(buf_to_free->auth_key.response);
kfree(buf_to_free->iface_list);
kfree_sensitive(buf_to_free);
@@ -114,6 +113,8 @@ tconInfoAlloc(void)
kfree(ret_buf);
return NULL;
}
+ INIT_LIST_HEAD(&ret_buf->crfid.dirents.entries);
+ mutex_init(&ret_buf->crfid.dirents.de_mutex);
atomic_inc(&tconInfoAllocCount);
ret_buf->status = TID_NEW;
@@ -1309,7 +1310,7 @@ int cifs_update_super_prepath(struct cifs_sb_info *cifs_sb, char *prefix)
* for "\<server>\<dfsname>\<linkpath>" DFS reference,
* where <dfsname> contains non-ASCII unicode symbols.
*
- * Check such DFS reference and emulate -ENOENT if it is actual.
+ * Check such DFS reference.
*/
int cifs_dfs_query_info_nonascii_quirk(const unsigned int xid,
struct cifs_tcon *tcon,
@@ -1341,10 +1342,6 @@ int cifs_dfs_query_info_nonascii_quirk(const unsigned int xid,
cifs_dbg(FYI, "DFS ref '%s' is found, emulate -EREMOTE\n",
dfspath);
rc = -EREMOTE;
- } else if (rc == -EEXIST) {
- cifs_dbg(FYI, "DFS ref '%s' is not found, emulate -ENOENT\n",
- dfspath);
- rc = -ENOENT;
} else {
cifs_dbg(FYI, "%s: dfs_cache_find returned %d\n", __func__, rc);
}
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index 1929e80c09ee..384cabdf47ca 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -840,9 +840,109 @@ find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos,
return rc;
}
+static bool emit_cached_dirents(struct cached_dirents *cde,
+ struct dir_context *ctx)
+{
+ struct cached_dirent *dirent;
+ int rc;
+
+ list_for_each_entry(dirent, &cde->entries, entry) {
+ if (ctx->pos >= dirent->pos)
+ continue;
+ ctx->pos = dirent->pos;
+ rc = dir_emit(ctx, dirent->name, dirent->namelen,
+ dirent->fattr.cf_uniqueid,
+ dirent->fattr.cf_dtype);
+ if (!rc)
+ return rc;
+ }
+ return true;
+}
+
+static void update_cached_dirents_count(struct cached_dirents *cde,
+ struct dir_context *ctx)
+{
+ if (cde->ctx != ctx)
+ return;
+ if (cde->is_valid || cde->is_failed)
+ return;
+
+ cde->pos++;
+}
+
+static void finished_cached_dirents_count(struct cached_dirents *cde,
+ struct dir_context *ctx)
+{
+ if (cde->ctx != ctx)
+ return;
+ if (cde->is_valid || cde->is_failed)
+ return;
+ if (ctx->pos != cde->pos)
+ return;
+
+ cde->is_valid = 1;
+}
+
+static void add_cached_dirent(struct cached_dirents *cde,
+ struct dir_context *ctx,
+ const char *name, int namelen,
+ struct cifs_fattr *fattr)
+{
+ struct cached_dirent *de;
+
+ if (cde->ctx != ctx)
+ return;
+ if (cde->is_valid || cde->is_failed)
+ return;
+ if (ctx->pos != cde->pos) {
+ cde->is_failed = 1;
+ return;
+ }
+ de = kzalloc(sizeof(*de), GFP_ATOMIC);
+ if (de == NULL) {
+ cde->is_failed = 1;
+ return;
+ }
+ de->namelen = namelen;
+ de->name = kstrndup(name, namelen, GFP_ATOMIC);
+ if (de->name == NULL) {
+ kfree(de);
+ cde->is_failed = 1;
+ return;
+ }
+ de->pos = ctx->pos;
+
+ memcpy(&de->fattr, fattr, sizeof(struct cifs_fattr));
+
+ list_add_tail(&de->entry, &cde->entries);
+}
+
+static bool cifs_dir_emit(struct dir_context *ctx,
+ const char *name, int namelen,
+ struct cifs_fattr *fattr,
+ struct cached_fid *cfid)
+{
+ bool rc;
+ ino_t ino = cifs_uniqueid_to_ino_t(fattr->cf_uniqueid);
+
+ rc = dir_emit(ctx, name, namelen, ino, fattr->cf_dtype);
+ if (!rc)
+ return rc;
+
+ if (cfid) {
+ mutex_lock(&cfid->dirents.de_mutex);
+ add_cached_dirent(&cfid->dirents, ctx, name, namelen,
+ fattr);
+ mutex_unlock(&cfid->dirents.de_mutex);
+ }
+
+ return rc;
+}
+
static int cifs_filldir(char *find_entry, struct file *file,
- struct dir_context *ctx,
- char *scratch_buf, unsigned int max_len)
+ struct dir_context *ctx,
+ char *scratch_buf, unsigned int max_len,
+ struct cached_fid *cfid)
{
struct cifsFileInfo *file_info = file->private_data;
struct super_block *sb = file_inode(file)->i_sb;
@@ -851,7 +951,6 @@ static int cifs_filldir(char *find_entry, struct file *file,
struct cifs_fattr fattr;
struct qstr name;
int rc = 0;
- ino_t ino;
rc = cifs_fill_dirent(&de, find_entry, file_info->srch_inf.info_level,
file_info->srch_inf.unicode);
@@ -931,8 +1030,8 @@ static int cifs_filldir(char *find_entry, struct file *file,
cifs_prime_dcache(file_dentry(file), &name, &fattr);
- ino = cifs_uniqueid_to_ino_t(fattr.cf_uniqueid);
- return !dir_emit(ctx, name.name, name.len, ino, fattr.cf_dtype);
+ return !cifs_dir_emit(ctx, name.name, name.len,
+ &fattr, cfid);
}
@@ -941,8 +1040,9 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
int rc = 0;
unsigned int xid;
int i;
+ struct tcon_link *tlink = NULL;
struct cifs_tcon *tcon;
- struct cifsFileInfo *cifsFile = NULL;
+ struct cifsFileInfo *cifsFile;
char *current_entry;
int num_to_fill = 0;
char *tmp_buf = NULL;
@@ -950,6 +1050,8 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
unsigned int max_len;
const char *full_path;
void *page = alloc_dentry_path();
+ struct cached_fid *cfid = NULL;
+ struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
xid = get_xid();
@@ -959,6 +1061,54 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
goto rddir2_exit;
}
+ if (file->private_data == NULL) {
+ tlink = cifs_sb_tlink(cifs_sb);
+ if (IS_ERR(tlink))
+ goto cache_not_found;
+ tcon = tlink_tcon(tlink);
+ } else {
+ cifsFile = file->private_data;
+ tcon = tlink_tcon(cifsFile->tlink);
+ }
+
+ rc = open_cached_dir(xid, tcon, full_path, cifs_sb, &cfid);
+ cifs_put_tlink(tlink);
+ if (rc)
+ goto cache_not_found;
+
+ mutex_lock(&cfid->dirents.de_mutex);
+ /*
+ * If this was reading from the start of the directory
+ * we need to initialize scanning and storing the
+ * directory content.
+ */
+ if (ctx->pos == 0 && cfid->dirents.ctx == NULL) {
+ cfid->dirents.ctx = ctx;
+ cfid->dirents.pos = 2;
+ }
+ /*
+ * If we already have the entire directory cached then
+ * we can just serve the cache.
+ */
+ if (cfid->dirents.is_valid) {
+ if (!dir_emit_dots(file, ctx)) {
+ mutex_unlock(&cfid->dirents.de_mutex);
+ goto rddir2_exit;
+ }
+ emit_cached_dirents(&cfid->dirents, ctx);
+ mutex_unlock(&cfid->dirents.de_mutex);
+ goto rddir2_exit;
+ }
+ mutex_unlock(&cfid->dirents.de_mutex);
+
+ /* Drop the cache while calling initiate_cifs_search and
+ * find_cifs_entry in case there will be reconnects during
+ * query_directory.
+ */
+ close_cached_dir(cfid);
+ cfid = NULL;
+
+ cache_not_found:
/*
* Ensure FindFirst doesn't fail before doing filldir() for '.' and
* '..'. Otherwise we won't be able to notify VFS in case of failure.
@@ -977,7 +1127,6 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
is in current search buffer?
if it before then restart search
if after then keep searching till find it */
-
cifsFile = file->private_data;
if (cifsFile->srch_inf.endOfSearch) {
if (cifsFile->srch_inf.emptyDir) {
@@ -993,12 +1142,18 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
tcon = tlink_tcon(cifsFile->tlink);
rc = find_cifs_entry(xid, tcon, ctx->pos, file, full_path,
&current_entry, &num_to_fill);
+ open_cached_dir(xid, tcon, full_path, cifs_sb, &cfid);
if (rc) {
cifs_dbg(FYI, "fce error %d\n", rc);
goto rddir2_exit;
} else if (current_entry != NULL) {
cifs_dbg(FYI, "entry %lld found\n", ctx->pos);
} else {
+ if (cfid) {
+ mutex_lock(&cfid->dirents.de_mutex);
+ finished_cached_dirents_count(&cfid->dirents, ctx);
+ mutex_unlock(&cfid->dirents.de_mutex);
+ }
cifs_dbg(FYI, "Could not find entry\n");
goto rddir2_exit;
}
@@ -1028,7 +1183,7 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
*/
*tmp_buf = 0;
rc = cifs_filldir(current_entry, file, ctx,
- tmp_buf, max_len);
+ tmp_buf, max_len, cfid);
if (rc) {
if (rc > 0)
rc = 0;
@@ -1036,6 +1191,12 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
}
ctx->pos++;
+ if (cfid) {
+ mutex_lock(&cfid->dirents.de_mutex);
+ update_cached_dirents_count(&cfid->dirents, ctx);
+ mutex_unlock(&cfid->dirents.de_mutex);
+ }
+
if (ctx->pos ==
cifsFile->srch_inf.index_of_last_entry) {
cifs_dbg(FYI, "last entry in buf at pos %lld %s\n",
@@ -1050,6 +1211,8 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
kfree(tmp_buf);
rddir2_exit:
+ if (cfid)
+ close_cached_dir(cfid);
free_dentry_path(page);
free_xid(xid);
return rc;
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index 32f478c7a66d..3b7915af1f62 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -86,6 +86,33 @@ cifs_ses_get_chan_index(struct cifs_ses *ses,
}
void
+cifs_chan_set_in_reconnect(struct cifs_ses *ses,
+ struct TCP_Server_Info *server)
+{
+ unsigned int chan_index = cifs_ses_get_chan_index(ses, server);
+
+ ses->chans[chan_index].in_reconnect = true;
+}
+
+void
+cifs_chan_clear_in_reconnect(struct cifs_ses *ses,
+ struct TCP_Server_Info *server)
+{
+ unsigned int chan_index = cifs_ses_get_chan_index(ses, server);
+
+ ses->chans[chan_index].in_reconnect = false;
+}
+
+bool
+cifs_chan_in_reconnect(struct cifs_ses *ses,
+ struct TCP_Server_Info *server)
+{
+ unsigned int chan_index = cifs_ses_get_chan_index(ses, server);
+
+ return CIFS_CHAN_IN_RECONNECT(ses, chan_index);
+}
+
+void
cifs_chan_set_need_reconnect(struct cifs_ses *ses,
struct TCP_Server_Info *server)
{
@@ -714,9 +741,9 @@ static int size_of_ntlmssp_blob(struct cifs_ses *ses, int base_size)
else
sz += sizeof(__le16);
- if (ses->workstation_name)
+ if (ses->workstation_name[0])
sz += sizeof(__le16) * strnlen(ses->workstation_name,
- CIFS_MAX_WORKSTATION_LEN);
+ ntlmssp_workstation_name_size(ses));
else
sz += sizeof(__le16);
@@ -960,7 +987,7 @@ int build_ntlmssp_auth_blob(unsigned char **pbuffer,
cifs_security_buffer_from_str(&sec_blob->WorkstationName,
ses->workstation_name,
- CIFS_MAX_WORKSTATION_LEN,
+ ntlmssp_workstation_name_size(ses),
*pbuffer, &tmp,
nls_cp);
@@ -1093,14 +1120,14 @@ sess_establish_session(struct sess_data *sess_data)
struct cifs_ses *ses = sess_data->ses;
struct TCP_Server_Info *server = sess_data->server;
- mutex_lock(&server->srv_mutex);
+ cifs_server_lock(server);
if (!server->session_estab) {
if (server->sign) {
server->session_key.response =
kmemdup(ses->auth_key.response,
ses->auth_key.len, GFP_KERNEL);
if (!server->session_key.response) {
- mutex_unlock(&server->srv_mutex);
+ cifs_server_unlock(server);
return -ENOMEM;
}
server->session_key.len =
@@ -1109,7 +1136,7 @@ sess_establish_session(struct sess_data *sess_data)
server->sequence_number = 0x2;
server->session_estab = true;
}
- mutex_unlock(&server->srv_mutex);
+ cifs_server_unlock(server);
cifs_dbg(FYI, "CIFS session established successfully\n");
return 0;
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
index c71c9a44bef4..2e20ee4dab7b 100644
--- a/fs/cifs/smb1ops.c
+++ b/fs/cifs/smb1ops.c
@@ -38,10 +38,10 @@ send_nt_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
in_buf->WordCount = 0;
put_bcc(0, in_buf);
- mutex_lock(&server->srv_mutex);
+ cifs_server_lock(server);
rc = cifs_sign_smb(in_buf, server, &mid->sequence_number);
if (rc) {
- mutex_unlock(&server->srv_mutex);
+ cifs_server_unlock(server);
return rc;
}
@@ -55,7 +55,7 @@ send_nt_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
if (rc < 0)
server->sequence_number--;
- mutex_unlock(&server->srv_mutex);
+ cifs_server_unlock(server);
cifs_dbg(FYI, "issued NT_CANCEL for mid %u, rc = %d\n",
get_mid(in_buf), rc);
diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
index fe5bfa245fa7..8571a459c710 100644
--- a/fs/cifs/smb2inode.c
+++ b/fs/cifs/smb2inode.c
@@ -362,8 +362,6 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
num_rqst++;
if (cfile) {
- cifsFileInfo_put(cfile);
- cfile = NULL;
rc = compound_send_recv(xid, ses, server,
flags, num_rqst - 2,
&rqst[1], &resp_buftype[1],
@@ -514,8 +512,11 @@ smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
if (smb2_data == NULL)
return -ENOMEM;
+ if (strcmp(full_path, ""))
+ rc = -ENOENT;
+ else
+ rc = open_cached_dir(xid, tcon, full_path, cifs_sb, &cfid);
/* If it is a root and its handle is cached then use it */
- rc = open_cached_dir(xid, tcon, full_path, cifs_sb, &cfid);
if (!rc) {
if (tcon->crfid.file_all_info_is_valid) {
move_smb2_info_to_cifs(data,
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
index 3fe47a88f47d..17813c3d0c6e 100644
--- a/fs/cifs/smb2misc.c
+++ b/fs/cifs/smb2misc.c
@@ -656,6 +656,12 @@ smb2_is_valid_lease_break(char *buffer)
}
spin_unlock(&cifs_tcp_ses_lock);
cifs_dbg(FYI, "Can not process lease break - no lease matched\n");
+ trace_smb3_lease_not_found(le32_to_cpu(rsp->CurrentLeaseState),
+ le32_to_cpu(rsp->hdr.Id.SyncId.TreeId),
+ le64_to_cpu(rsp->hdr.SessionId),
+ *((u64 *)rsp->LeaseKey),
+ *((u64 *)&rsp->LeaseKey[8]));
+
return false;
}
@@ -726,6 +732,10 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
}
spin_unlock(&cifs_tcp_ses_lock);
cifs_dbg(FYI, "No file id matched, oplock break ignored\n");
+ trace_smb3_oplock_not_found(0 /* no xid */, rsp->PersistentFid,
+ le32_to_cpu(rsp->hdr.Id.SyncId.TreeId),
+ le64_to_cpu(rsp->hdr.SessionId));
+
return true;
}
@@ -798,7 +808,7 @@ smb2_handle_cancelled_close(struct cifs_tcon *tcon, __u64 persistent_fid,
if (tcon->ses)
server = tcon->ses->server;
- cifs_server_dbg(FYI, "tid=%u: tcon is closing, skipping async close retry of fid %llu %llu\n",
+ cifs_server_dbg(FYI, "tid=0x%x: tcon is closing, skipping async close retry of fid %llu %llu\n",
tcon->tid, persistent_fid, volatile_fid);
return 0;
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index d6aaeff4a30a..98a76fa791c0 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -699,6 +699,7 @@ smb2_close_cached_fid(struct kref *ref)
{
struct cached_fid *cfid = container_of(ref, struct cached_fid,
refcount);
+ struct cached_dirent *dirent, *q;
if (cfid->is_valid) {
cifs_dbg(FYI, "clear cached root file handle\n");
@@ -718,6 +719,21 @@ smb2_close_cached_fid(struct kref *ref)
dput(cfid->dentry);
cfid->dentry = NULL;
}
+ /*
+ * Delete all cached dirent names
+ */
+ mutex_lock(&cfid->dirents.de_mutex);
+ list_for_each_entry_safe(dirent, q, &cfid->dirents.entries, entry) {
+ list_del(&dirent->entry);
+ kfree(dirent->name);
+ kfree(dirent);
+ }
+ cfid->dirents.is_valid = 0;
+ cfid->dirents.is_failed = 0;
+ cfid->dirents.ctx = NULL;
+ cfid->dirents.pos = 0;
+ mutex_unlock(&cfid->dirents.de_mutex);
+
}
void close_cached_dir(struct cached_fid *cfid)
@@ -754,14 +770,15 @@ smb2_cached_lease_break(struct work_struct *work)
/*
* Open the and cache a directory handle.
* Only supported for the root handle.
+ * If error then *cfid is not initialized.
*/
int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
const char *path,
struct cifs_sb_info *cifs_sb,
struct cached_fid **cfid)
{
- struct cifs_ses *ses = tcon->ses;
- struct TCP_Server_Info *server = ses->server;
+ struct cifs_ses *ses;
+ struct TCP_Server_Info *server;
struct cifs_open_parms oparms;
struct smb2_create_rsp *o_rsp = NULL;
struct smb2_query_info_rsp *qi_rsp = NULL;
@@ -776,9 +793,13 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
struct cifs_fid *pfid;
struct dentry *dentry;
- if (tcon->nohandlecache)
+ if (tcon == NULL || tcon->nohandlecache ||
+ is_smb1_server(tcon->ses->server))
return -ENOTSUPP;
+ ses = tcon->ses;
+ server = ses->server;
+
if (cifs_sb->root == NULL)
return -ENOENT;
@@ -824,7 +845,7 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
oparms.tcon = tcon;
- oparms.create_options = cifs_create_options(cifs_sb, 0);
+ oparms.create_options = cifs_create_options(cifs_sb, CREATE_NOT_FILE);
oparms.desired_access = FILE_READ_ATTRIBUTES;
oparms.disposition = FILE_OPEN;
oparms.fid = pfid;
@@ -2695,7 +2716,8 @@ smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
memset(rsp_iov, 0, sizeof(rsp_iov));
- rc = open_cached_dir(xid, tcon, path, cifs_sb, &cfid);
+ if (!strcmp(path, ""))
+ open_cached_dir(xid, tcon, path, cifs_sb, &cfid); /* cfid null if open dir failed */
memset(&open_iov, 0, sizeof(open_iov));
rqst[0].rq_iov = open_iov;
@@ -3837,7 +3859,7 @@ static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon,
if (rc)
goto out;
- if ((cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) == 0)
+ if (cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE)
smb2_set_sparse(xid, tcon, cfile, inode, false);
eof = cpu_to_le64(off + len);
@@ -4323,11 +4345,13 @@ smb3_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
}
}
+#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
static bool
smb2_is_read_op(__u32 oplock)
{
return oplock == SMB2_OPLOCK_LEVEL_II;
}
+#endif /* CIFS_ALLOW_INSECURE_LEGACY */
static bool
smb21_is_read_op(__u32 oplock)
@@ -5426,7 +5450,7 @@ out:
return rc;
}
-
+#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
struct smb_version_operations smb20_operations = {
.compare_fids = smb2_compare_fids,
.setup_request = smb2_setup_request,
@@ -5525,6 +5549,7 @@ struct smb_version_operations smb20_operations = {
.is_status_io_timeout = smb2_is_status_io_timeout,
.is_network_name_deleted = smb2_is_network_name_deleted,
};
+#endif /* CIFS_ALLOW_INSECURE_LEGACY */
struct smb_version_operations smb21_operations = {
.compare_fids = smb2_compare_fids,
@@ -5856,6 +5881,7 @@ struct smb_version_operations smb311_operations = {
.is_network_name_deleted = smb2_is_network_name_deleted,
};
+#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
struct smb_version_values smb20_values = {
.version_string = SMB20_VERSION_STRING,
.protocol_id = SMB20_PROT_ID,
@@ -5876,6 +5902,7 @@ struct smb_version_values smb20_values = {
.signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
.create_lease_size = sizeof(struct create_lease),
};
+#endif /* ALLOW_INSECURE_LEGACY */
struct smb_version_values smb21_values = {
.version_string = SMB21_VERSION_STRING,
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 1b7ad0c09566..0e8c85249579 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -179,7 +179,7 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
}
}
spin_unlock(&cifs_tcp_ses_lock);
- if ((!tcon->ses) || (tcon->ses->status == CifsExiting) ||
+ if ((!tcon->ses) || (tcon->ses->ses_status == SES_EXITING) ||
(!tcon->ses->server) || !server)
return -EIO;
@@ -1369,13 +1369,13 @@ SMB2_sess_establish_session(struct SMB2_sess_data *sess_data)
struct cifs_ses *ses = sess_data->ses;
struct TCP_Server_Info *server = sess_data->server;
- mutex_lock(&server->srv_mutex);
+ cifs_server_lock(server);
if (server->ops->generate_signingkey) {
rc = server->ops->generate_signingkey(ses, server);
if (rc) {
cifs_dbg(FYI,
"SMB3 session key generation failed\n");
- mutex_unlock(&server->srv_mutex);
+ cifs_server_unlock(server);
return rc;
}
}
@@ -1383,7 +1383,7 @@ SMB2_sess_establish_session(struct SMB2_sess_data *sess_data)
server->sequence_number = 0x2;
server->session_estab = true;
}
- mutex_unlock(&server->srv_mutex);
+ cifs_server_unlock(server);
cifs_dbg(FYI, "SMB2/3 session established successfully\n");
return rc;
@@ -3899,7 +3899,8 @@ SMB2_echo(struct TCP_Server_Info *server)
cifs_dbg(FYI, "In echo request for conn_id %lld\n", server->conn_id);
spin_lock(&cifs_tcp_ses_lock);
- if (server->tcpStatus == CifsNeedNegotiate) {
+ if (server->ops->need_neg &&
+ server->ops->need_neg(server)) {
spin_unlock(&cifs_tcp_ses_lock);
/* No need to send echo on newly established connections */
mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
index d8c4388b190d..f57881b8464f 100644
--- a/fs/cifs/smb2pdu.h
+++ b/fs/cifs/smb2pdu.h
@@ -260,28 +260,6 @@ struct get_retrieval_pointers_refcount_rsp {
struct smb3_extents extents[];
} __packed;
-struct fsctl_set_integrity_information_req {
- __le16 ChecksumAlgorithm;
- __le16 Reserved;
- __le32 Flags;
-} __packed;
-
-struct fsctl_get_integrity_information_rsp {
- __le16 ChecksumAlgorithm;
- __le16 Reserved;
- __le32 Flags;
- __le32 ChecksumChunkSizeInBytes;
- __le32 ClusterSizeInBytes;
-} __packed;
-
-/* Integrity ChecksumAlgorithm choices for above */
-#define CHECKSUM_TYPE_NONE 0x0000
-#define CHECKSUM_TYPE_CRC64 0x0002
-#define CHECKSUM_TYPE_UNCHANGED 0xFFFF /* set only */
-
-/* Integrity flags for above */
-#define FSCTL_INTEGRITY_FLAG_CHECKSUM_ENFORCEMENT_OFF 0x00000001
-
/* See MS-DFSC 2.2.2 */
struct fsctl_get_dfs_referral_req {
__le16 MaxReferralLevel;
diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c
index 2af79093b78b..55e79f6ee78d 100644
--- a/fs/cifs/smb2transport.c
+++ b/fs/cifs/smb2transport.c
@@ -641,7 +641,8 @@ smb2_sign_rqst(struct smb_rqst *rqst, struct TCP_Server_Info *server)
if (!is_signed)
return 0;
spin_lock(&cifs_tcp_ses_lock);
- if (server->tcpStatus == CifsNeedNegotiate) {
+ if (server->ops->need_neg &&
+ server->ops->need_neg(server)) {
spin_unlock(&cifs_tcp_ses_lock);
return 0;
}
@@ -779,7 +780,7 @@ smb2_get_mid_entry(struct cifs_ses *ses, struct TCP_Server_Info *server,
return -EAGAIN;
}
- if (ses->status == CifsNew) {
+ if (ses->ses_status == SES_NEW) {
if ((shdr->Command != SMB2_SESSION_SETUP) &&
(shdr->Command != SMB2_NEGOTIATE)) {
spin_unlock(&cifs_tcp_ses_lock);
@@ -788,7 +789,7 @@ smb2_get_mid_entry(struct cifs_ses *ses, struct TCP_Server_Info *server,
/* else ok - we are setting up session */
}
- if (ses->status == CifsExiting) {
+ if (ses->ses_status == SES_EXITING) {
if (shdr->Command != SMB2_LOGOFF) {
spin_unlock(&cifs_tcp_ses_lock);
return -EAGAIN;
diff --git a/fs/cifs/smbdirect.c b/fs/cifs/smbdirect.c
index b3a1265711cc..5fbbec22bcc8 100644
--- a/fs/cifs/smbdirect.c
+++ b/fs/cifs/smbdirect.c
@@ -1350,7 +1350,7 @@ void smbd_destroy(struct TCP_Server_Info *server)
wait_event(info->wait_send_pending,
atomic_read(&info->send_pending) == 0);
- /* It's not posssible for upper layer to get to reassembly */
+ /* It's not possible for upper layer to get to reassembly */
log_rdma_event(INFO, "drain the reassembly queue\n");
do {
spin_lock_irqsave(&info->reassembly_queue_lock, flags);
@@ -1382,9 +1382,9 @@ void smbd_destroy(struct TCP_Server_Info *server)
log_rdma_event(INFO, "freeing mr list\n");
wake_up_interruptible_all(&info->wait_mr);
while (atomic_read(&info->mr_used_count)) {
- mutex_unlock(&server->srv_mutex);
+ cifs_server_unlock(server);
msleep(1000);
- mutex_lock(&server->srv_mutex);
+ cifs_server_lock(server);
}
destroy_mr_list(info);
diff --git a/fs/cifs/trace.h b/fs/cifs/trace.h
index bc279616c513..2be5e0c8564d 100644
--- a/fs/cifs/trace.h
+++ b/fs/cifs/trace.h
@@ -158,6 +158,7 @@ DEFINE_SMB3_FD_EVENT(flush_enter);
DEFINE_SMB3_FD_EVENT(flush_done);
DEFINE_SMB3_FD_EVENT(close_enter);
DEFINE_SMB3_FD_EVENT(close_done);
+DEFINE_SMB3_FD_EVENT(oplock_not_found);
DECLARE_EVENT_CLASS(smb3_fd_err_class,
TP_PROTO(unsigned int xid,
@@ -814,6 +815,7 @@ DEFINE_EVENT(smb3_lease_done_class, smb3_##name, \
TP_ARGS(lease_state, tid, sesid, lease_key_low, lease_key_high))
DEFINE_SMB3_LEASE_DONE_EVENT(lease_done);
+DEFINE_SMB3_LEASE_DONE_EVENT(lease_not_found);
DECLARE_EVENT_CLASS(smb3_lease_err_class,
TP_PROTO(__u32 lease_state,
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index c667e6ddfe2f..bfc9bd55870a 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -726,7 +726,7 @@ static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
struct mid_q_entry **ppmidQ)
{
spin_lock(&cifs_tcp_ses_lock);
- if (ses->status == CifsNew) {
+ if (ses->ses_status == SES_NEW) {
if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
(in_buf->Command != SMB_COM_NEGOTIATE)) {
spin_unlock(&cifs_tcp_ses_lock);
@@ -735,7 +735,7 @@ static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
/* else ok - we are setting up session */
}
- if (ses->status == CifsExiting) {
+ if (ses->ses_status == SES_EXITING) {
/* check if SMB session is bad because we are setting it up */
if (in_buf->Command != SMB_COM_LOGOFF_ANDX) {
spin_unlock(&cifs_tcp_ses_lock);
@@ -822,7 +822,7 @@ cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
} else
instance = exist_credits->instance;
- mutex_lock(&server->srv_mutex);
+ cifs_server_lock(server);
/*
* We can't use credits obtained from the previous session to send this
@@ -830,14 +830,14 @@ cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
* return -EAGAIN in such cases to let callers handle it.
*/
if (instance != server->reconnect_instance) {
- mutex_unlock(&server->srv_mutex);
+ cifs_server_unlock(server);
add_credits_and_wake_if(server, &credits, optype);
return -EAGAIN;
}
mid = server->ops->setup_async_request(server, rqst);
if (IS_ERR(mid)) {
- mutex_unlock(&server->srv_mutex);
+ cifs_server_unlock(server);
add_credits_and_wake_if(server, &credits, optype);
return PTR_ERR(mid);
}
@@ -868,7 +868,7 @@ cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
cifs_delete_mid(mid);
}
- mutex_unlock(&server->srv_mutex);
+ cifs_server_unlock(server);
if (rc == 0)
return 0;
@@ -1109,7 +1109,7 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
* of smb data.
*/
- mutex_lock(&server->srv_mutex);
+ cifs_server_lock(server);
/*
* All the parts of the compound chain belong obtained credits from the
@@ -1119,7 +1119,7 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
* handle it.
*/
if (instance != server->reconnect_instance) {
- mutex_unlock(&server->srv_mutex);
+ cifs_server_unlock(server);
for (j = 0; j < num_rqst; j++)
add_credits(server, &credits[j], optype);
return -EAGAIN;
@@ -1131,7 +1131,7 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
revert_current_mid(server, i);
for (j = 0; j < i; j++)
cifs_delete_mid(midQ[j]);
- mutex_unlock(&server->srv_mutex);
+ cifs_server_unlock(server);
/* Update # of requests on wire to server */
for (j = 0; j < num_rqst; j++)
@@ -1163,7 +1163,7 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
server->sequence_number -= 2;
}
- mutex_unlock(&server->srv_mutex);
+ cifs_server_unlock(server);
/*
* If sending failed for some reason or it is an oplock break that we
@@ -1187,12 +1187,12 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
* Compounding is never used during session establish.
*/
spin_lock(&cifs_tcp_ses_lock);
- if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
+ if ((ses->ses_status == SES_NEW) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
spin_unlock(&cifs_tcp_ses_lock);
- mutex_lock(&server->srv_mutex);
+ cifs_server_lock(server);
smb311_update_preauth_hash(ses, server, rqst[0].rq_iov, rqst[0].rq_nvec);
- mutex_unlock(&server->srv_mutex);
+ cifs_server_unlock(server);
spin_lock(&cifs_tcp_ses_lock);
}
@@ -1260,15 +1260,15 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
* Compounding is never used during session establish.
*/
spin_lock(&cifs_tcp_ses_lock);
- if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
+ if ((ses->ses_status == SES_NEW) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
struct kvec iov = {
.iov_base = resp_iov[0].iov_base,
.iov_len = resp_iov[0].iov_len
};
spin_unlock(&cifs_tcp_ses_lock);
- mutex_lock(&server->srv_mutex);
+ cifs_server_lock(server);
smb311_update_preauth_hash(ses, server, &iov, 1);
- mutex_unlock(&server->srv_mutex);
+ cifs_server_unlock(server);
spin_lock(&cifs_tcp_ses_lock);
}
spin_unlock(&cifs_tcp_ses_lock);
@@ -1385,11 +1385,11 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
and avoid races inside tcp sendmsg code that could cause corruption
of smb data */
- mutex_lock(&server->srv_mutex);
+ cifs_server_lock(server);
rc = allocate_mid(ses, in_buf, &midQ);
if (rc) {
- mutex_unlock(&server->srv_mutex);
+ cifs_server_unlock(server);
/* Update # of requests on wire to server */
add_credits(server, &credits, 0);
return rc;
@@ -1397,7 +1397,7 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
if (rc) {
- mutex_unlock(&server->srv_mutex);
+ cifs_server_unlock(server);
goto out;
}
@@ -1411,7 +1411,7 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
if (rc < 0)
server->sequence_number -= 2;
- mutex_unlock(&server->srv_mutex);
+ cifs_server_unlock(server);
if (rc < 0)
goto out;
@@ -1530,18 +1530,18 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
and avoid races inside tcp sendmsg code that could cause corruption
of smb data */
- mutex_lock(&server->srv_mutex);
+ cifs_server_lock(server);
rc = allocate_mid(ses, in_buf, &midQ);
if (rc) {
- mutex_unlock(&server->srv_mutex);
+ cifs_server_unlock(server);
return rc;
}
rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
if (rc) {
cifs_delete_mid(midQ);
- mutex_unlock(&server->srv_mutex);
+ cifs_server_unlock(server);
return rc;
}
@@ -1554,7 +1554,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
if (rc < 0)
server->sequence_number -= 2;
- mutex_unlock(&server->srv_mutex);
+ cifs_server_unlock(server);
if (rc < 0) {
cifs_delete_mid(midQ);
diff --git a/fs/dax.c b/fs/dax.c
index 1ac12e877f4f..4155a6107fa1 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -722,7 +722,8 @@ static int copy_cow_page_dax(struct vm_fault *vmf, const struct iomap_iter *iter
int id;
id = dax_read_lock();
- rc = dax_direct_access(iter->iomap.dax_dev, pgoff, 1, &kaddr, NULL);
+ rc = dax_direct_access(iter->iomap.dax_dev, pgoff, 1, DAX_ACCESS,
+ &kaddr, NULL);
if (rc < 0) {
dax_read_unlock(id);
return rc;
@@ -939,7 +940,7 @@ static int dax_iomap_pfn(const struct iomap *iomap, loff_t pos, size_t size,
id = dax_read_lock();
length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size),
- NULL, pfnp);
+ DAX_ACCESS, NULL, pfnp);
if (length < 0) {
rc = length;
goto out;
@@ -1048,7 +1049,7 @@ static int dax_memzero(struct dax_device *dax_dev, pgoff_t pgoff,
void *kaddr;
long ret;
- ret = dax_direct_access(dax_dev, pgoff, 1, &kaddr, NULL);
+ ret = dax_direct_access(dax_dev, pgoff, 1, DAX_ACCESS, &kaddr, NULL);
if (ret > 0) {
memset(kaddr + offset, 0, size);
dax_flush(dax_dev, kaddr + offset, size);
@@ -1165,6 +1166,7 @@ static loff_t dax_iomap_iter(const struct iomap_iter *iomi,
const size_t size = ALIGN(length + offset, PAGE_SIZE);
pgoff_t pgoff = dax_iomap_pgoff(iomap, pos);
ssize_t map_len;
+ bool recovery = false;
void *kaddr;
if (fatal_signal_pending(current)) {
@@ -1173,7 +1175,14 @@ static loff_t dax_iomap_iter(const struct iomap_iter *iomi,
}
map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size),
- &kaddr, NULL);
+ DAX_ACCESS, &kaddr, NULL);
+ if (map_len == -EIO && iov_iter_rw(iter) == WRITE) {
+ map_len = dax_direct_access(dax_dev, pgoff,
+ PHYS_PFN(size), DAX_RECOVERY_WRITE,
+ &kaddr, NULL);
+ if (map_len > 0)
+ recovery = true;
+ }
if (map_len < 0) {
ret = map_len;
break;
@@ -1185,7 +1194,10 @@ static loff_t dax_iomap_iter(const struct iomap_iter *iomi,
if (map_len > end - pos)
map_len = end - pos;
- if (iov_iter_rw(iter) == WRITE)
+ if (recovery)
+ xfer = dax_recovery_write(dax_dev, pgoff, kaddr,
+ map_len, iter);
+ else if (iov_iter_rw(iter) == WRITE)
xfer = dax_copy_from_iter(dax_dev, pgoff, kaddr,
map_len, iter);
else
diff --git a/fs/erofs/fscache.c b/fs/erofs/fscache.c
index a5cc4ed2cd0d..8e01d89c3319 100644
--- a/fs/erofs/fscache.c
+++ b/fs/erofs/fscache.c
@@ -17,6 +17,7 @@ static struct netfs_io_request *erofs_fscache_alloc_request(struct address_space
rreq->start = start;
rreq->len = len;
rreq->mapping = mapping;
+ rreq->inode = mapping->host;
INIT_LIST_HEAD(&rreq->subrequests);
refcount_set(&rreq->ref, 1);
return rreq;
diff --git a/fs/erofs/inode.c b/fs/erofs/inode.c
index bcc8335b46b3..95a403720e8c 100644
--- a/fs/erofs/inode.c
+++ b/fs/erofs/inode.c
@@ -288,7 +288,10 @@ static int erofs_fill_inode(struct inode *inode, int isdir)
}
if (erofs_inode_is_data_compressed(vi->datalayout)) {
- err = z_erofs_fill_inode(inode);
+ if (!erofs_is_fscache_mode(inode->i_sb))
+ err = z_erofs_fill_inode(inode);
+ else
+ err = -EOPNOTSUPP;
goto out_unlock;
}
inode->i_mapping->a_ops = &erofs_raw_access_aops;
diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
index 95efc127b2ba..724bb57075f6 100644
--- a/fs/erofs/zdata.c
+++ b/fs/erofs/zdata.c
@@ -199,7 +199,6 @@ struct z_erofs_decompress_frontend {
struct z_erofs_pagevec_ctor vector;
struct z_erofs_pcluster *pcl, *tailpcl;
- struct z_erofs_collection *cl;
/* a pointer used to pick up inplace I/O pages */
struct page **icpage_ptr;
z_erofs_next_pcluster_t owned_head;
@@ -214,7 +213,7 @@ struct z_erofs_decompress_frontend {
#define DECOMPRESS_FRONTEND_INIT(__i) { \
.inode = __i, .owned_head = Z_EROFS_PCLUSTER_TAIL, \
- .mode = COLLECT_PRIMARY_FOLLOWED }
+ .mode = COLLECT_PRIMARY_FOLLOWED, .backmost = true }
static struct page *z_pagemap_global[Z_EROFS_VMAP_GLOBAL_PAGES];
static DEFINE_MUTEX(z_pagemap_global_lock);
@@ -357,7 +356,7 @@ static bool z_erofs_try_inplace_io(struct z_erofs_decompress_frontend *fe,
return false;
}
-/* callers must be with collection lock held */
+/* callers must be with pcluster lock held */
static int z_erofs_attach_page(struct z_erofs_decompress_frontend *fe,
struct page *page, enum z_erofs_page_type type,
bool pvec_safereuse)
@@ -372,7 +371,7 @@ static int z_erofs_attach_page(struct z_erofs_decompress_frontend *fe,
ret = z_erofs_pagevec_enqueue(&fe->vector, page, type,
pvec_safereuse);
- fe->cl->vcnt += (unsigned int)ret;
+ fe->pcl->vcnt += (unsigned int)ret;
return ret ? 0 : -EAGAIN;
}
@@ -405,12 +404,11 @@ static void z_erofs_try_to_claim_pcluster(struct z_erofs_decompress_frontend *f)
f->mode = COLLECT_PRIMARY;
}
-static int z_erofs_lookup_collection(struct z_erofs_decompress_frontend *fe,
- struct inode *inode,
- struct erofs_map_blocks *map)
+static int z_erofs_lookup_pcluster(struct z_erofs_decompress_frontend *fe,
+ struct inode *inode,
+ struct erofs_map_blocks *map)
{
struct z_erofs_pcluster *pcl = fe->pcl;
- struct z_erofs_collection *cl;
unsigned int length;
/* to avoid unexpected loop formed by corrupted images */
@@ -419,8 +417,7 @@ static int z_erofs_lookup_collection(struct z_erofs_decompress_frontend *fe,
return -EFSCORRUPTED;
}
- cl = z_erofs_primarycollection(pcl);
- if (cl->pageofs != (map->m_la & ~PAGE_MASK)) {
+ if (pcl->pageofs_out != (map->m_la & ~PAGE_MASK)) {
DBG_BUGON(1);
return -EFSCORRUPTED;
}
@@ -443,23 +440,21 @@ static int z_erofs_lookup_collection(struct z_erofs_decompress_frontend *fe,
length = READ_ONCE(pcl->length);
}
}
- mutex_lock(&cl->lock);
+ mutex_lock(&pcl->lock);
/* used to check tail merging loop due to corrupted images */
if (fe->owned_head == Z_EROFS_PCLUSTER_TAIL)
fe->tailpcl = pcl;
z_erofs_try_to_claim_pcluster(fe);
- fe->cl = cl;
return 0;
}
-static int z_erofs_register_collection(struct z_erofs_decompress_frontend *fe,
- struct inode *inode,
- struct erofs_map_blocks *map)
+static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe,
+ struct inode *inode,
+ struct erofs_map_blocks *map)
{
bool ztailpacking = map->m_flags & EROFS_MAP_META;
struct z_erofs_pcluster *pcl;
- struct z_erofs_collection *cl;
struct erofs_workgroup *grp;
int err;
@@ -482,17 +477,15 @@ static int z_erofs_register_collection(struct z_erofs_decompress_frontend *fe,
/* new pclusters should be claimed as type 1, primary and followed */
pcl->next = fe->owned_head;
+ pcl->pageofs_out = map->m_la & ~PAGE_MASK;
fe->mode = COLLECT_PRIMARY_FOLLOWED;
- cl = z_erofs_primarycollection(pcl);
- cl->pageofs = map->m_la & ~PAGE_MASK;
-
/*
* lock all primary followed works before visible to others
* and mutex_trylock *never* fails for a new pcluster.
*/
- mutex_init(&cl->lock);
- DBG_BUGON(!mutex_trylock(&cl->lock));
+ mutex_init(&pcl->lock);
+ DBG_BUGON(!mutex_trylock(&pcl->lock));
if (ztailpacking) {
pcl->obj.index = 0; /* which indicates ztailpacking */
@@ -519,11 +512,10 @@ static int z_erofs_register_collection(struct z_erofs_decompress_frontend *fe,
fe->tailpcl = pcl;
fe->owned_head = &pcl->next;
fe->pcl = pcl;
- fe->cl = cl;
return 0;
err_out:
- mutex_unlock(&cl->lock);
+ mutex_unlock(&pcl->lock);
z_erofs_free_pcluster(pcl);
return err;
}
@@ -535,9 +527,9 @@ static int z_erofs_collector_begin(struct z_erofs_decompress_frontend *fe,
struct erofs_workgroup *grp;
int ret;
- DBG_BUGON(fe->cl);
+ DBG_BUGON(fe->pcl);
- /* must be Z_EROFS_PCLUSTER_TAIL or pointed to previous collection */
+ /* must be Z_EROFS_PCLUSTER_TAIL or pointed to previous pcluster */
DBG_BUGON(fe->owned_head == Z_EROFS_PCLUSTER_NIL);
DBG_BUGON(fe->owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
@@ -554,14 +546,14 @@ static int z_erofs_collector_begin(struct z_erofs_decompress_frontend *fe,
fe->pcl = container_of(grp, struct z_erofs_pcluster, obj);
} else {
tailpacking:
- ret = z_erofs_register_collection(fe, inode, map);
+ ret = z_erofs_register_pcluster(fe, inode, map);
if (!ret)
goto out;
if (ret != -EEXIST)
return ret;
}
- ret = z_erofs_lookup_collection(fe, inode, map);
+ ret = z_erofs_lookup_pcluster(fe, inode, map);
if (ret) {
erofs_workgroup_put(&fe->pcl->obj);
return ret;
@@ -569,7 +561,7 @@ tailpacking:
out:
z_erofs_pagevec_ctor_init(&fe->vector, Z_EROFS_NR_INLINE_PAGEVECS,
- fe->cl->pagevec, fe->cl->vcnt);
+ fe->pcl->pagevec, fe->pcl->vcnt);
/* since file-backed online pages are traversed in reverse order */
fe->icpage_ptr = fe->pcl->compressed_pages +
z_erofs_pclusterpages(fe->pcl);
@@ -582,48 +574,36 @@ out:
*/
static void z_erofs_rcu_callback(struct rcu_head *head)
{
- struct z_erofs_collection *const cl =
- container_of(head, struct z_erofs_collection, rcu);
-
- z_erofs_free_pcluster(container_of(cl, struct z_erofs_pcluster,
- primary_collection));
+ z_erofs_free_pcluster(container_of(head,
+ struct z_erofs_pcluster, rcu));
}
void erofs_workgroup_free_rcu(struct erofs_workgroup *grp)
{
struct z_erofs_pcluster *const pcl =
container_of(grp, struct z_erofs_pcluster, obj);
- struct z_erofs_collection *const cl = z_erofs_primarycollection(pcl);
-
- call_rcu(&cl->rcu, z_erofs_rcu_callback);
-}
-static void z_erofs_collection_put(struct z_erofs_collection *cl)
-{
- struct z_erofs_pcluster *const pcl =
- container_of(cl, struct z_erofs_pcluster, primary_collection);
-
- erofs_workgroup_put(&pcl->obj);
+ call_rcu(&pcl->rcu, z_erofs_rcu_callback);
}
static bool z_erofs_collector_end(struct z_erofs_decompress_frontend *fe)
{
- struct z_erofs_collection *cl = fe->cl;
+ struct z_erofs_pcluster *pcl = fe->pcl;
- if (!cl)
+ if (!pcl)
return false;
z_erofs_pagevec_ctor_exit(&fe->vector, false);
- mutex_unlock(&cl->lock);
+ mutex_unlock(&pcl->lock);
/*
* if all pending pages are added, don't hold its reference
* any longer if the pcluster isn't hosted by ourselves.
*/
if (fe->mode < COLLECT_PRIMARY_FOLLOWED_NOINPLACE)
- z_erofs_collection_put(cl);
+ erofs_workgroup_put(&pcl->obj);
- fe->cl = NULL;
+ fe->pcl = NULL;
return true;
}
@@ -663,28 +643,23 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
repeat:
cur = end - 1;
- /* lucky, within the range of the current map_blocks */
- if (offset + cur >= map->m_la &&
- offset + cur < map->m_la + map->m_llen) {
- /* didn't get a valid collection previously (very rare) */
- if (!fe->cl)
- goto restart_now;
- goto hitted;
- }
-
- /* go ahead the next map_blocks */
- erofs_dbg("%s: [out-of-range] pos %llu", __func__, offset + cur);
-
- if (z_erofs_collector_end(fe))
- fe->backmost = false;
+ if (offset + cur < map->m_la ||
+ offset + cur >= map->m_la + map->m_llen) {
+ erofs_dbg("out-of-range map @ pos %llu", offset + cur);
- map->m_la = offset + cur;
- map->m_llen = 0;
- err = z_erofs_map_blocks_iter(inode, map, 0);
- if (err)
- goto err_out;
+ if (z_erofs_collector_end(fe))
+ fe->backmost = false;
+ map->m_la = offset + cur;
+ map->m_llen = 0;
+ err = z_erofs_map_blocks_iter(inode, map, 0);
+ if (err)
+ goto err_out;
+ } else {
+ if (fe->pcl)
+ goto hitted;
+ /* didn't get a valid pcluster previously (very rare) */
+ }
-restart_now:
if (!(map->m_flags & EROFS_MAP_MAPPED))
goto hitted;
@@ -766,7 +741,7 @@ retry:
/* bump up the number of spiltted parts of a page */
++spiltted;
/* also update nr_pages */
- fe->cl->nr_pages = max_t(pgoff_t, fe->cl->nr_pages, index + 1);
+ fe->pcl->nr_pages = max_t(pgoff_t, fe->pcl->nr_pages, index + 1);
next_part:
/* can be used for verification */
map->m_llen = offset + cur - map->m_la;
@@ -821,15 +796,13 @@ static int z_erofs_decompress_pcluster(struct super_block *sb,
enum z_erofs_page_type page_type;
bool overlapped, partial;
- struct z_erofs_collection *cl;
int err;
might_sleep();
- cl = z_erofs_primarycollection(pcl);
- DBG_BUGON(!READ_ONCE(cl->nr_pages));
+ DBG_BUGON(!READ_ONCE(pcl->nr_pages));
- mutex_lock(&cl->lock);
- nr_pages = cl->nr_pages;
+ mutex_lock(&pcl->lock);
+ nr_pages = pcl->nr_pages;
if (nr_pages <= Z_EROFS_VMAP_ONSTACK_PAGES) {
pages = pages_onstack;
@@ -857,9 +830,9 @@ static int z_erofs_decompress_pcluster(struct super_block *sb,
err = 0;
z_erofs_pagevec_ctor_init(&ctor, Z_EROFS_NR_INLINE_PAGEVECS,
- cl->pagevec, 0);
+ pcl->pagevec, 0);
- for (i = 0; i < cl->vcnt; ++i) {
+ for (i = 0; i < pcl->vcnt; ++i) {
unsigned int pagenr;
page = z_erofs_pagevec_dequeue(&ctor, &page_type);
@@ -945,11 +918,11 @@ static int z_erofs_decompress_pcluster(struct super_block *sb,
goto out;
llen = pcl->length >> Z_EROFS_PCLUSTER_LENGTH_BIT;
- if (nr_pages << PAGE_SHIFT >= cl->pageofs + llen) {
+ if (nr_pages << PAGE_SHIFT >= pcl->pageofs_out + llen) {
outputsize = llen;
partial = !(pcl->length & Z_EROFS_PCLUSTER_FULL_LENGTH);
} else {
- outputsize = (nr_pages << PAGE_SHIFT) - cl->pageofs;
+ outputsize = (nr_pages << PAGE_SHIFT) - pcl->pageofs_out;
partial = true;
}
@@ -963,7 +936,7 @@ static int z_erofs_decompress_pcluster(struct super_block *sb,
.in = compressed_pages,
.out = pages,
.pageofs_in = pcl->pageofs_in,
- .pageofs_out = cl->pageofs,
+ .pageofs_out = pcl->pageofs_out,
.inputsize = inputsize,
.outputsize = outputsize,
.alg = pcl->algorithmformat,
@@ -1012,16 +985,12 @@ out:
else if (pages != pages_onstack)
kvfree(pages);
- cl->nr_pages = 0;
- cl->vcnt = 0;
+ pcl->nr_pages = 0;
+ pcl->vcnt = 0;
- /* all cl locks MUST be taken before the following line */
+ /* pcluster lock MUST be taken before the following line */
WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_NIL);
-
- /* all cl locks SHOULD be released right now */
- mutex_unlock(&cl->lock);
-
- z_erofs_collection_put(cl);
+ mutex_unlock(&pcl->lock);
return err;
}
@@ -1043,6 +1012,7 @@ static void z_erofs_decompress_queue(const struct z_erofs_decompressqueue *io,
owned = READ_ONCE(pcl->next);
z_erofs_decompress_pcluster(io->sb, pcl, pagepool);
+ erofs_workgroup_put(&pcl->obj);
}
}
@@ -1466,22 +1436,19 @@ static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f,
struct page *page;
page = erofs_grab_cache_page_nowait(inode->i_mapping, index);
- if (!page)
- goto skip;
-
- if (PageUptodate(page)) {
- unlock_page(page);
+ if (page) {
+ if (PageUptodate(page)) {
+ unlock_page(page);
+ } else {
+ err = z_erofs_do_read_page(f, page, pagepool);
+ if (err)
+ erofs_err(inode->i_sb,
+ "readmore error at page %lu @ nid %llu",
+ index, EROFS_I(inode)->nid);
+ }
put_page(page);
- goto skip;
}
- err = z_erofs_do_read_page(f, page, pagepool);
- if (err)
- erofs_err(inode->i_sb,
- "readmore error at page %lu @ nid %llu",
- index, EROFS_I(inode)->nid);
- put_page(page);
-skip:
if (cur < PAGE_SIZE)
break;
cur = (index << PAGE_SHIFT) - 1;
diff --git a/fs/erofs/zdata.h b/fs/erofs/zdata.h
index 800b11c53f57..58053bb5066f 100644
--- a/fs/erofs/zdata.h
+++ b/fs/erofs/zdata.h
@@ -12,21 +12,40 @@
#define Z_EROFS_PCLUSTER_MAX_PAGES (Z_EROFS_PCLUSTER_MAX_SIZE / PAGE_SIZE)
#define Z_EROFS_NR_INLINE_PAGEVECS 3
+#define Z_EROFS_PCLUSTER_FULL_LENGTH 0x00000001
+#define Z_EROFS_PCLUSTER_LENGTH_BIT 1
+
+/*
+ * let's leave a type here in case of introducing
+ * another tagged pointer later.
+ */
+typedef void *z_erofs_next_pcluster_t;
+
/*
* Structure fields follow one of the following exclusion rules.
*
* I: Modifiable by initialization/destruction paths and read-only
* for everyone else;
*
- * L: Field should be protected by pageset lock;
+ * L: Field should be protected by the pcluster lock;
*
* A: Field should be accessed / updated in atomic for parallelized code.
*/
-struct z_erofs_collection {
+struct z_erofs_pcluster {
+ struct erofs_workgroup obj;
struct mutex lock;
+ /* A: point to next chained pcluster or TAILs */
+ z_erofs_next_pcluster_t next;
+
+ /* A: lower limit of decompressed length and if full length or not */
+ unsigned int length;
+
/* I: page offset of start position of decompression */
- unsigned short pageofs;
+ unsigned short pageofs_out;
+
+ /* I: page offset of inline compressed data */
+ unsigned short pageofs_in;
/* L: maximum relative page index in pagevec[] */
unsigned short nr_pages;
@@ -41,29 +60,6 @@ struct z_erofs_collection {
/* I: can be used to free the pcluster by RCU. */
struct rcu_head rcu;
};
-};
-
-#define Z_EROFS_PCLUSTER_FULL_LENGTH 0x00000001
-#define Z_EROFS_PCLUSTER_LENGTH_BIT 1
-
-/*
- * let's leave a type here in case of introducing
- * another tagged pointer later.
- */
-typedef void *z_erofs_next_pcluster_t;
-
-struct z_erofs_pcluster {
- struct erofs_workgroup obj;
- struct z_erofs_collection primary_collection;
-
- /* A: point to next chained pcluster or TAILs */
- z_erofs_next_pcluster_t next;
-
- /* A: lower limit of decompressed length and if full length or not */
- unsigned int length;
-
- /* I: page offset of inline compressed data */
- unsigned short pageofs_in;
union {
/* I: physical cluster size in pages */
@@ -80,8 +76,6 @@ struct z_erofs_pcluster {
struct page *compressed_pages[];
};
-#define z_erofs_primarycollection(pcluster) (&(pcluster)->primary_collection)
-
/* let's avoid the valid 32-bit kernel addresses */
/* the chained workgroup has't submitted io (still open) */
diff --git a/fs/exec.c b/fs/exec.c
index 14b4b3755580..0989fb8472a1 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1312,9 +1312,7 @@ int begin_new_exec(struct linux_binprm * bprm)
if (retval)
goto out_unlock;
- if (me->flags & PF_KTHREAD)
- free_kthread_struct(me);
- me->flags &= ~(PF_RANDOMIZE | PF_FORKNOEXEC | PF_KTHREAD |
+ me->flags &= ~(PF_RANDOMIZE | PF_FORKNOEXEC |
PF_NOFREEZE | PF_NO_SETAFFINITY);
flush_thread();
me->personality &= ~bprm->per_clear;
@@ -1959,6 +1957,10 @@ int kernel_execve(const char *kernel_filename,
int fd = AT_FDCWD;
int retval;
+ /* It is non-sense for kernel threads to call execve */
+ if (WARN_ON_ONCE(current->flags & PF_KTHREAD))
+ return -EINVAL;
+
filename = getname_kernel(kernel_filename);
if (IS_ERR(filename))
return PTR_ERR(filename);
diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c
index 0106eba46d5a..3ef80d000e13 100644
--- a/fs/exportfs/expfs.c
+++ b/fs/exportfs/expfs.c
@@ -145,7 +145,7 @@ static struct dentry *reconnect_one(struct vfsmount *mnt,
if (err)
goto out_err;
dprintk("%s: found name: %s\n", __func__, nbuf);
- tmp = lookup_one_len_unlocked(nbuf, parent, strlen(nbuf));
+ tmp = lookup_one_unlocked(mnt_user_ns(mnt), nbuf, parent, strlen(nbuf));
if (IS_ERR(tmp)) {
dprintk("%s: lookup failed: %d\n", __func__, PTR_ERR(tmp));
err = PTR_ERR(tmp);
@@ -525,7 +525,8 @@ exportfs_decode_fh_raw(struct vfsmount *mnt, struct fid *fid, int fh_len,
}
inode_lock(target_dir->d_inode);
- nresult = lookup_one_len(nbuf, target_dir, strlen(nbuf));
+ nresult = lookup_one(mnt_user_ns(mnt), nbuf,
+ target_dir, strlen(nbuf));
if (!IS_ERR(nresult)) {
if (unlikely(nresult->d_inode != result->d_inode)) {
dput(nresult);
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index 456c1e89386a..6d8b2bf14de0 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -98,13 +98,7 @@ repeat:
}
if (unlikely(!PageUptodate(page))) {
- if (page->index == sbi->metapage_eio_ofs) {
- if (sbi->metapage_eio_cnt++ == MAX_RETRY_META_PAGE_EIO)
- set_ckpt_flags(sbi, CP_ERROR_FLAG);
- } else {
- sbi->metapage_eio_ofs = page->index;
- sbi->metapage_eio_cnt = 0;
- }
+ f2fs_handle_page_eio(sbi, page->index, META);
f2fs_put_page(page, 1);
return ERR_PTR(-EIO);
}
@@ -158,7 +152,7 @@ static bool __is_bitmap_valid(struct f2fs_sb_info *sbi, block_t blkaddr,
f2fs_err(sbi, "Inconsistent error blkaddr:%u, sit bitmap:%d",
blkaddr, exist);
set_sbi_flag(sbi, SBI_NEED_FSCK);
- WARN_ON(1);
+ dump_stack();
}
return exist;
}
@@ -196,7 +190,7 @@ bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
f2fs_warn(sbi, "access invalid blkaddr:%u",
blkaddr);
set_sbi_flag(sbi, SBI_NEED_FSCK);
- WARN_ON(1);
+ dump_stack();
return false;
} else {
return __is_bitmap_valid(sbi, blkaddr, type);
@@ -1010,9 +1004,7 @@ static void __add_dirty_inode(struct inode *inode, enum inode_type type)
return;
set_inode_flag(inode, flag);
- if (!f2fs_is_volatile_file(inode))
- list_add_tail(&F2FS_I(inode)->dirty_list,
- &sbi->inode_list[type]);
+ list_add_tail(&F2FS_I(inode)->dirty_list, &sbi->inode_list[type]);
stat_inc_dirty_inode(sbi, type);
}
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 8f38c26bb16c..7fcbcf979737 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -69,8 +69,7 @@ static bool __is_cp_guaranteed(struct page *page)
if (f2fs_is_compressed_page(page))
return false;
- if ((S_ISREG(inode->i_mode) &&
- (f2fs_is_atomic_file(inode) || IS_NOQUOTA(inode))) ||
+ if ((S_ISREG(inode->i_mode) && IS_NOQUOTA(inode)) ||
page_private_gcing(page))
return true;
return false;
@@ -585,6 +584,34 @@ static bool __has_merged_page(struct bio *bio, struct inode *inode,
return false;
}
+int f2fs_init_write_merge_io(struct f2fs_sb_info *sbi)
+{
+ int i;
+
+ for (i = 0; i < NR_PAGE_TYPE; i++) {
+ int n = (i == META) ? 1 : NR_TEMP_TYPE;
+ int j;
+
+ sbi->write_io[i] = f2fs_kmalloc(sbi,
+ array_size(n, sizeof(struct f2fs_bio_info)),
+ GFP_KERNEL);
+ if (!sbi->write_io[i])
+ return -ENOMEM;
+
+ for (j = HOT; j < n; j++) {
+ init_f2fs_rwsem(&sbi->write_io[i][j].io_rwsem);
+ sbi->write_io[i][j].sbi = sbi;
+ sbi->write_io[i][j].bio = NULL;
+ spin_lock_init(&sbi->write_io[i][j].io_lock);
+ INIT_LIST_HEAD(&sbi->write_io[i][j].io_list);
+ INIT_LIST_HEAD(&sbi->write_io[i][j].bio_list);
+ init_f2fs_rwsem(&sbi->write_io[i][j].bio_list_lock);
+ }
+ }
+
+ return 0;
+}
+
static void __f2fs_submit_merged_write(struct f2fs_sb_info *sbi,
enum page_type type, enum temp_type temp)
{
@@ -2564,7 +2591,12 @@ int f2fs_do_write_data_page(struct f2fs_io_info *fio)
bool ipu_force = false;
int err = 0;
- set_new_dnode(&dn, inode, NULL, NULL, 0);
+ /* Use COW inode to make dnode_of_data for atomic write */
+ if (f2fs_is_atomic_file(inode))
+ set_new_dnode(&dn, F2FS_I(inode)->cow_inode, NULL, NULL, 0);
+ else
+ set_new_dnode(&dn, inode, NULL, NULL, 0);
+
if (need_inplace_update(fio) &&
f2fs_lookup_extent_cache(inode, page->index, &ei)) {
fio->old_blkaddr = ei.blk + page->index - ei.fofs;
@@ -2601,6 +2633,7 @@ got_it:
err = -EFSCORRUPTED;
goto out_writepage;
}
+
/*
* If current allocation needs SSR,
* it had better in-place writes for updated data.
@@ -2737,11 +2770,6 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,
write:
if (f2fs_is_drop_cache(inode))
goto out;
- /* we should not write 0'th page having journal header */
- if (f2fs_is_volatile_file(inode) && (!page->index ||
- (!wbc->for_reclaim &&
- f2fs_available_free_memory(sbi, BASE_CHECK))))
- goto redirty_out;
/* Dentry/quota blocks are controlled by checkpoint */
if (S_ISDIR(inode->i_mode) || IS_NOQUOTA(inode)) {
@@ -3314,6 +3342,100 @@ unlock_out:
return err;
}
+static int __find_data_block(struct inode *inode, pgoff_t index,
+ block_t *blk_addr)
+{
+ struct dnode_of_data dn;
+ struct page *ipage;
+ struct extent_info ei = {0, };
+ int err = 0;
+
+ ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
+ if (IS_ERR(ipage))
+ return PTR_ERR(ipage);
+
+ set_new_dnode(&dn, inode, ipage, ipage, 0);
+
+ if (f2fs_lookup_extent_cache(inode, index, &ei)) {
+ dn.data_blkaddr = ei.blk + index - ei.fofs;
+ } else {
+ /* hole case */
+ err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
+ if (err) {
+ dn.data_blkaddr = NULL_ADDR;
+ err = 0;
+ }
+ }
+ *blk_addr = dn.data_blkaddr;
+ f2fs_put_dnode(&dn);
+ return err;
+}
+
+static int __reserve_data_block(struct inode *inode, pgoff_t index,
+ block_t *blk_addr, bool *node_changed)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct dnode_of_data dn;
+ struct page *ipage;
+ int err = 0;
+
+ f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
+
+ ipage = f2fs_get_node_page(sbi, inode->i_ino);
+ if (IS_ERR(ipage)) {
+ err = PTR_ERR(ipage);
+ goto unlock_out;
+ }
+ set_new_dnode(&dn, inode, ipage, ipage, 0);
+
+ err = f2fs_get_block(&dn, index);
+
+ *blk_addr = dn.data_blkaddr;
+ *node_changed = dn.node_changed;
+ f2fs_put_dnode(&dn);
+
+unlock_out:
+ f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
+ return err;
+}
+
+static int prepare_atomic_write_begin(struct f2fs_sb_info *sbi,
+ struct page *page, loff_t pos, unsigned int len,
+ block_t *blk_addr, bool *node_changed)
+{
+ struct inode *inode = page->mapping->host;
+ struct inode *cow_inode = F2FS_I(inode)->cow_inode;
+ pgoff_t index = page->index;
+ int err = 0;
+ block_t ori_blk_addr;
+
+ /* If pos is beyond the end of file, reserve a new block in COW inode */
+ if ((pos & PAGE_MASK) >= i_size_read(inode))
+ return __reserve_data_block(cow_inode, index, blk_addr,
+ node_changed);
+
+ /* Look for the block in COW inode first */
+ err = __find_data_block(cow_inode, index, blk_addr);
+ if (err)
+ return err;
+ else if (*blk_addr != NULL_ADDR)
+ return 0;
+
+ /* Look for the block in the original inode */
+ err = __find_data_block(inode, index, &ori_blk_addr);
+ if (err)
+ return err;
+
+ /* Finally, we should reserve a new block in COW inode for the update */
+ err = __reserve_data_block(cow_inode, index, blk_addr, node_changed);
+ if (err)
+ return err;
+
+ if (ori_blk_addr != NULL_ADDR)
+ *blk_addr = ori_blk_addr;
+ return 0;
+}
+
static int f2fs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, struct page **pagep, void **fsdata)
{
@@ -3321,7 +3443,7 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct page *page = NULL;
pgoff_t index = ((unsigned long long) pos) >> PAGE_SHIFT;
- bool need_balance = false, drop_atomic = false;
+ bool need_balance = false;
block_t blkaddr = NULL_ADDR;
int err = 0;
@@ -3332,14 +3454,6 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
goto fail;
}
- if ((f2fs_is_atomic_file(inode) &&
- !f2fs_available_free_memory(sbi, INMEM_PAGES)) ||
- is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) {
- err = -ENOMEM;
- drop_atomic = true;
- goto fail;
- }
-
/*
* We should check this at this moment to avoid deadlock on inode page
* and #0 page. The locking rule for inline_data conversion should be:
@@ -3387,7 +3501,11 @@ repeat:
*pagep = page;
- err = prepare_write_begin(sbi, page, pos, len,
+ if (f2fs_is_atomic_file(inode))
+ err = prepare_atomic_write_begin(sbi, page, pos, len,
+ &blkaddr, &need_balance);
+ else
+ err = prepare_write_begin(sbi, page, pos, len,
&blkaddr, &need_balance);
if (err)
goto fail;
@@ -3443,8 +3561,6 @@ repeat:
fail:
f2fs_put_page(page, 1);
f2fs_write_failed(inode, pos + len);
- if (drop_atomic)
- f2fs_drop_inmem_pages_all(sbi, false);
return err;
}
@@ -3488,8 +3604,12 @@ static int f2fs_write_end(struct file *file,
set_page_dirty(page);
if (pos + copied > i_size_read(inode) &&
- !f2fs_verity_in_progress(inode))
+ !f2fs_verity_in_progress(inode)) {
f2fs_i_size_write(inode, pos + copied);
+ if (f2fs_is_atomic_file(inode))
+ f2fs_i_size_write(F2FS_I(inode)->cow_inode,
+ pos + copied);
+ }
unlock_out:
f2fs_put_page(page, 1);
f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
@@ -3522,9 +3642,6 @@ void f2fs_invalidate_folio(struct folio *folio, size_t offset, size_t length)
inode->i_ino == F2FS_COMPRESS_INO(sbi))
clear_page_private_data(&folio->page);
- if (page_private_atomic(&folio->page))
- return f2fs_drop_inmem_page(inode, &folio->page);
-
folio_detach_private(folio);
}
@@ -3536,10 +3653,6 @@ bool f2fs_release_folio(struct folio *folio, gfp_t wait)
if (folio_test_dirty(folio))
return false;
- /* This is atomic written page, keep Private */
- if (page_private_atomic(&folio->page))
- return false;
-
sbi = F2FS_M_SB(folio->mapping);
if (test_opt(sbi, COMPRESS_CACHE)) {
struct inode *inode = folio->mapping->host;
@@ -3565,18 +3678,6 @@ static bool f2fs_dirty_data_folio(struct address_space *mapping,
folio_mark_uptodate(folio);
BUG_ON(folio_test_swapcache(folio));
- if (f2fs_is_atomic_file(inode) && !f2fs_is_commit_atomic_write(inode)) {
- if (!page_private_atomic(&folio->page)) {
- f2fs_register_inmem_page(inode, &folio->page);
- return true;
- }
- /*
- * Previously, this page has been registered, we just
- * return here.
- */
- return false;
- }
-
if (!folio_test_dirty(folio)) {
filemap_dirty_folio(mapping, folio);
f2fs_update_dirty_folio(inode, folio);
@@ -3656,42 +3757,14 @@ out:
int f2fs_migrate_page(struct address_space *mapping,
struct page *newpage, struct page *page, enum migrate_mode mode)
{
- int rc, extra_count;
- struct f2fs_inode_info *fi = F2FS_I(mapping->host);
- bool atomic_written = page_private_atomic(page);
+ int rc, extra_count = 0;
BUG_ON(PageWriteback(page));
- /* migrating an atomic written page is safe with the inmem_lock hold */
- if (atomic_written) {
- if (mode != MIGRATE_SYNC)
- return -EBUSY;
- if (!mutex_trylock(&fi->inmem_lock))
- return -EAGAIN;
- }
-
- /* one extra reference was held for atomic_write page */
- extra_count = atomic_written ? 1 : 0;
rc = migrate_page_move_mapping(mapping, newpage,
page, extra_count);
- if (rc != MIGRATEPAGE_SUCCESS) {
- if (atomic_written)
- mutex_unlock(&fi->inmem_lock);
+ if (rc != MIGRATEPAGE_SUCCESS)
return rc;
- }
-
- if (atomic_written) {
- struct inmem_pages *cur;
-
- list_for_each_entry(cur, &fi->inmem_pages, list)
- if (cur->page == page) {
- cur->page = newpage;
- break;
- }
- mutex_unlock(&fi->inmem_lock);
- put_page(page);
- get_page(newpage);
- }
/* guarantee to start from no stale private field */
set_page_private(newpage, 0);
diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
index fcdf253cd211..c92625ef16d0 100644
--- a/fs/f2fs/debug.c
+++ b/fs/f2fs/debug.c
@@ -91,11 +91,8 @@ static void update_general_status(struct f2fs_sb_info *sbi)
si->ndirty_files = sbi->ndirty_inode[FILE_INODE];
si->nquota_files = sbi->nquota_files;
si->ndirty_all = sbi->ndirty_inode[DIRTY_META];
- si->inmem_pages = get_pages(sbi, F2FS_INMEM_PAGES);
si->aw_cnt = sbi->atomic_files;
- si->vw_cnt = atomic_read(&sbi->vw_cnt);
si->max_aw_cnt = atomic_read(&sbi->max_aw_cnt);
- si->max_vw_cnt = atomic_read(&sbi->max_vw_cnt);
si->nr_dio_read = get_pages(sbi, F2FS_DIO_READ);
si->nr_dio_write = get_pages(sbi, F2FS_DIO_WRITE);
si->nr_wb_cp_data = get_pages(sbi, F2FS_WB_CP_DATA);
@@ -167,8 +164,6 @@ static void update_general_status(struct f2fs_sb_info *sbi)
si->alloc_nids = NM_I(sbi)->nid_cnt[PREALLOC_NID];
si->io_skip_bggc = sbi->io_skip_bggc;
si->other_skip_bggc = sbi->other_skip_bggc;
- si->skipped_atomic_files[BG_GC] = sbi->skipped_atomic_files[BG_GC];
- si->skipped_atomic_files[FG_GC] = sbi->skipped_atomic_files[FG_GC];
si->util_free = (int)(free_user_blocks(sbi) >> sbi->log_blocks_per_seg)
* 100 / (int)(sbi->user_block_count >> sbi->log_blocks_per_seg)
/ 2;
@@ -296,7 +291,6 @@ get_cache:
sizeof(struct nat_entry);
si->cache_mem += NM_I(sbi)->nat_cnt[DIRTY_NAT] *
sizeof(struct nat_entry_set);
- si->cache_mem += si->inmem_pages * sizeof(struct inmem_pages);
for (i = 0; i < MAX_INO_ENTRY; i++)
si->cache_mem += sbi->im[i].ino_num * sizeof(struct ino_entry);
si->cache_mem += atomic_read(&sbi->total_ext_tree) *
@@ -491,10 +485,6 @@ static int stat_show(struct seq_file *s, void *v)
si->bg_data_blks);
seq_printf(s, " - node blocks : %d (%d)\n", si->node_blks,
si->bg_node_blks);
- seq_printf(s, "Skipped : atomic write %llu (%llu)\n",
- si->skipped_atomic_files[BG_GC] +
- si->skipped_atomic_files[FG_GC],
- si->skipped_atomic_files[BG_GC]);
seq_printf(s, "BG skip : IO: %u, Other: %u\n",
si->io_skip_bggc, si->other_skip_bggc);
seq_puts(s, "\nExtent Cache:\n");
@@ -519,10 +509,8 @@ static int stat_show(struct seq_file *s, void *v)
si->flush_list_empty,
si->nr_discarding, si->nr_discarded,
si->nr_discard_cmd, si->undiscard_blks);
- seq_printf(s, " - inmem: %4d, atomic IO: %4d (Max. %4d), "
- "volatile IO: %4d (Max. %4d)\n",
- si->inmem_pages, si->aw_cnt, si->max_aw_cnt,
- si->vw_cnt, si->max_vw_cnt);
+ seq_printf(s, " - atomic IO: %4d (Max. %4d)\n",
+ si->aw_cnt, si->max_aw_cnt);
seq_printf(s, " - compress: %4d, hit:%8d\n", si->compress_pages, si->compress_page_hit);
seq_printf(s, " - nodes: %4d in %4d\n",
si->ndirty_node, si->node_pages);
@@ -623,9 +611,7 @@ int f2fs_build_stats(struct f2fs_sb_info *sbi)
for (i = META_CP; i < META_MAX; i++)
atomic_set(&sbi->meta_count[i], 0);
- atomic_set(&sbi->vw_cnt, 0);
atomic_set(&sbi->max_aw_cnt, 0);
- atomic_set(&sbi->max_vw_cnt, 0);
raw_spin_lock_irqsave(&f2fs_stat_lock, flags);
list_add_tail(&si->stat_list, &f2fs_stat_list);
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index a0e51937d92e..d5bd7932fb64 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -82,7 +82,8 @@ int f2fs_init_casefolded_name(const struct inode *dir,
#if IS_ENABLED(CONFIG_UNICODE)
struct super_block *sb = dir->i_sb;
- if (IS_CASEFOLDED(dir)) {
+ if (IS_CASEFOLDED(dir) &&
+ !is_dot_dotdot(fname->usr_fname->name, fname->usr_fname->len)) {
fname->cf_name.name = f2fs_kmem_cache_alloc(f2fs_cf_name_slab,
GFP_NOFS, false, F2FS_SB(sb));
if (!fname->cf_name.name)
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 10d1f138d14f..d9bbecd008d2 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -509,11 +509,11 @@ struct f2fs_filename {
#if IS_ENABLED(CONFIG_UNICODE)
/*
* For casefolded directories: the casefolded name, but it's left NULL
- * if the original name is not valid Unicode, if the directory is both
- * casefolded and encrypted and its encryption key is unavailable, or if
- * the filesystem is doing an internal operation where usr_fname is also
- * NULL. In all these cases we fall back to treating the name as an
- * opaque byte sequence.
+ * if the original name is not valid Unicode, if the original name is
+ * "." or "..", if the directory is both casefolded and encrypted and
+ * its encryption key is unavailable, or if the filesystem is doing an
+ * internal operation where usr_fname is also NULL. In all these cases
+ * we fall back to treating the name as an opaque byte sequence.
*/
struct fscrypt_str cf_name;
#endif
@@ -579,8 +579,8 @@ enum {
/* maximum retry quota flush count */
#define DEFAULT_RETRY_QUOTA_FLUSH_COUNT 8
-/* maximum retry of EIO'ed meta page */
-#define MAX_RETRY_META_PAGE_EIO 100
+/* maximum retry of EIO'ed page */
+#define MAX_RETRY_PAGE_EIO 100
#define F2FS_LINK_MAX 0xffffffff /* maximum link count per file */
@@ -717,7 +717,6 @@ enum {
enum {
GC_FAILURE_PIN,
- GC_FAILURE_ATOMIC,
MAX_GC_FAILURE
};
@@ -739,8 +738,6 @@ enum {
FI_UPDATE_WRITE, /* inode has in-place-update data */
FI_NEED_IPU, /* used for ipu per file */
FI_ATOMIC_FILE, /* indicate atomic file */
- FI_ATOMIC_COMMIT, /* indicate the state of atomical committing */
- FI_VOLATILE_FILE, /* indicate volatile file */
FI_FIRST_BLOCK_WRITTEN, /* indicate #0 data block was written */
FI_DROP_CACHE, /* drop dirty page cache */
FI_DATA_EXIST, /* indicate data exists */
@@ -753,7 +750,6 @@ enum {
FI_EXTRA_ATTR, /* indicate file has extra attribute */
FI_PROJ_INHERIT, /* indicate file inherits projectid */
FI_PIN_FILE, /* indicate file should not be gced */
- FI_ATOMIC_REVOKE_REQUEST, /* request to drop atomic data */
FI_VERITY_IN_PROGRESS, /* building fs-verity Merkle tree */
FI_COMPRESSED_FILE, /* indicate file's data can be compressed */
FI_COMPRESS_CORRUPT, /* indicate compressed cluster is corrupted */
@@ -795,11 +791,9 @@ struct f2fs_inode_info {
#endif
struct list_head dirty_list; /* dirty list for dirs and files */
struct list_head gdirty_list; /* linked in global dirty list */
- struct list_head inmem_ilist; /* list for inmem inodes */
- struct list_head inmem_pages; /* inmemory pages managed by f2fs */
- struct task_struct *inmem_task; /* store inmemory task */
- struct mutex inmem_lock; /* lock for inmemory pages */
+ struct task_struct *atomic_write_task; /* store atomic write task */
struct extent_tree *extent_tree; /* cached extent_tree entry */
+ struct inode *cow_inode; /* copy-on-write inode for atomic write */
/* avoid racing between foreground op and gc */
struct f2fs_rwsem i_gc_rwsem[2];
@@ -1093,7 +1087,6 @@ enum count_type {
F2FS_DIRTY_QDATA,
F2FS_DIRTY_NODES,
F2FS_DIRTY_META,
- F2FS_INMEM_PAGES,
F2FS_DIRTY_IMETA,
F2FS_WB_CP_DATA,
F2FS_WB_DATA,
@@ -1118,16 +1111,12 @@ enum count_type {
*/
#define PAGE_TYPE_OF_BIO(type) ((type) > META ? META : (type))
enum page_type {
- DATA,
- NODE,
+ DATA = 0,
+ NODE = 1, /* should not change this */
META,
NR_PAGE_TYPE,
META_FLUSH,
- INMEM, /* the below types are used by tracepoints only. */
- INMEM_DROP,
- INMEM_INVALIDATE,
- INMEM_REVOKE,
- IPU,
+ IPU, /* the below types are used by tracepoints only. */
OPU,
};
@@ -1277,6 +1266,15 @@ struct atgc_management {
unsigned long long age_threshold; /* age threshold */
};
+struct f2fs_gc_control {
+ unsigned int victim_segno; /* target victim segment number */
+ int init_gc_type; /* FG_GC or BG_GC */
+ bool no_bg_gc; /* check the space and stop bg_gc */
+ bool should_migrate_blocks; /* should migrate blocks */
+ bool err_gc_skipped; /* return EAGAIN if GC skipped */
+ unsigned int nr_free_secs; /* # of free sections to do GC */
+};
+
/* For s_flag in struct f2fs_sb_info */
enum {
SBI_IS_DIRTY, /* dirty flag for checkpoint */
@@ -1615,8 +1613,8 @@ struct f2fs_sb_info {
/* keep migration IO order for LFS mode */
struct f2fs_rwsem io_order_lock;
mempool_t *write_io_dummy; /* Dummy pages */
- pgoff_t metapage_eio_ofs; /* EIO page offset */
- int metapage_eio_cnt; /* EIO count */
+ pgoff_t page_eio_ofs[NR_PAGE_TYPE]; /* EIO page offset */
+ int page_eio_cnt[NR_PAGE_TYPE]; /* EIO count */
/* for checkpoint */
struct f2fs_checkpoint *ckpt; /* raw checkpoint pointer */
@@ -1719,7 +1717,6 @@ struct f2fs_sb_info {
/* for skip statistic */
unsigned int atomic_files; /* # of opened atomic file */
- unsigned long long skipped_atomic_files[2]; /* FG_GC and BG_GC */
unsigned long long skipped_gc_rwsem; /* FG_GC only */
/* threshold for gc trials on pinned files */
@@ -1750,9 +1747,7 @@ struct f2fs_sb_info {
atomic_t inline_dir; /* # of inline_dentry inodes */
atomic_t compr_inode; /* # of compressed inodes */
atomic64_t compr_blocks; /* # of compressed blocks */
- atomic_t vw_cnt; /* # of volatile writes */
atomic_t max_aw_cnt; /* max # of atomic writes */
- atomic_t max_vw_cnt; /* max # of volatile writes */
unsigned int io_skip_bggc; /* skip background gc for in-flight IO */
unsigned int other_skip_bggc; /* skip background gc for other reasons */
unsigned int ndirty_inode[NR_INODE_TYPE]; /* # of dirty inodes */
@@ -1763,7 +1758,7 @@ struct f2fs_sb_info {
unsigned int data_io_flag;
unsigned int node_io_flag;
- /* For sysfs suppport */
+ /* For sysfs support */
struct kobject s_kobj; /* /sys/fs/f2fs/<devname> */
struct completion s_kobj_unregister;
@@ -2606,11 +2601,17 @@ static inline void dec_valid_node_count(struct f2fs_sb_info *sbi,
{
spin_lock(&sbi->stat_lock);
- f2fs_bug_on(sbi, !sbi->total_valid_block_count);
- f2fs_bug_on(sbi, !sbi->total_valid_node_count);
+ if (unlikely(!sbi->total_valid_block_count ||
+ !sbi->total_valid_node_count)) {
+ f2fs_warn(sbi, "dec_valid_node_count: inconsistent block counts, total_valid_block:%u, total_valid_node:%u",
+ sbi->total_valid_block_count,
+ sbi->total_valid_node_count);
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ } else {
+ sbi->total_valid_block_count--;
+ sbi->total_valid_node_count--;
+ }
- sbi->total_valid_node_count--;
- sbi->total_valid_block_count--;
if (sbi->reserved_blocks &&
sbi->current_reserved_blocks < sbi->reserved_blocks)
sbi->current_reserved_blocks++;
@@ -3173,6 +3174,10 @@ static inline int inline_xattr_size(struct inode *inode)
return 0;
}
+/*
+ * Notice: check inline_data flag without inode page lock is unsafe.
+ * It could change at any time by f2fs_convert_inline_page().
+ */
static inline int f2fs_has_inline_data(struct inode *inode)
{
return is_inode_flag_set(inode, FI_INLINE_DATA);
@@ -3203,16 +3208,6 @@ static inline bool f2fs_is_atomic_file(struct inode *inode)
return is_inode_flag_set(inode, FI_ATOMIC_FILE);
}
-static inline bool f2fs_is_commit_atomic_write(struct inode *inode)
-{
- return is_inode_flag_set(inode, FI_ATOMIC_COMMIT);
-}
-
-static inline bool f2fs_is_volatile_file(struct inode *inode)
-{
- return is_inode_flag_set(inode, FI_VOLATILE_FILE);
-}
-
static inline bool f2fs_is_first_block_written(struct inode *inode)
{
return is_inode_flag_set(inode, FI_FIRST_BLOCK_WRITTEN);
@@ -3445,6 +3440,8 @@ void f2fs_handle_failed_inode(struct inode *inode);
int f2fs_update_extension_list(struct f2fs_sb_info *sbi, const char *name,
bool hot, bool set);
struct dentry *f2fs_get_parent(struct dentry *child);
+int f2fs_get_tmpfile(struct user_namespace *mnt_userns, struct inode *dir,
+ struct inode **new_inode);
/*
* dir.c
@@ -3580,11 +3577,8 @@ void f2fs_destroy_node_manager_caches(void);
* segment.c
*/
bool f2fs_need_SSR(struct f2fs_sb_info *sbi);
-void f2fs_register_inmem_page(struct inode *inode, struct page *page);
-void f2fs_drop_inmem_pages_all(struct f2fs_sb_info *sbi, bool gc_failure);
-void f2fs_drop_inmem_pages(struct inode *inode);
-void f2fs_drop_inmem_page(struct inode *inode, struct page *page);
-int f2fs_commit_inmem_pages(struct inode *inode);
+int f2fs_commit_atomic_write(struct inode *inode);
+void f2fs_abort_atomic_write(struct inode *inode, bool clean);
void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need);
void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg);
int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino);
@@ -3726,6 +3720,7 @@ int f2fs_init_bio_entry_cache(void);
void f2fs_destroy_bio_entry_cache(void);
void f2fs_submit_bio(struct f2fs_sb_info *sbi,
struct bio *bio, enum page_type type);
+int f2fs_init_write_merge_io(struct f2fs_sb_info *sbi);
void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type);
void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
struct inode *inode, struct page *page,
@@ -3787,8 +3782,7 @@ extern const struct iomap_ops f2fs_iomap_ops;
int f2fs_start_gc_thread(struct f2fs_sb_info *sbi);
void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi);
block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode);
-int f2fs_gc(struct f2fs_sb_info *sbi, bool sync, bool background, bool force,
- unsigned int segno);
+int f2fs_gc(struct f2fs_sb_info *sbi, struct f2fs_gc_control *gc_control);
void f2fs_build_gc_manager(struct f2fs_sb_info *sbi);
int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count);
int __init f2fs_create_garbage_collection_cache(void);
@@ -3816,7 +3810,6 @@ struct f2fs_stat_info {
int ext_tree, zombie_tree, ext_node;
int ndirty_node, ndirty_dent, ndirty_meta, ndirty_imeta;
int ndirty_data, ndirty_qdata;
- int inmem_pages;
unsigned int ndirty_dirs, ndirty_files, nquota_files, ndirty_all;
int nats, dirty_nats, sits, dirty_sits;
int free_nids, avail_nids, alloc_nids;
@@ -3834,7 +3827,7 @@ struct f2fs_stat_info {
int inline_xattr, inline_inode, inline_dir, append, update, orphans;
int compr_inode;
unsigned long long compr_blocks;
- int aw_cnt, max_aw_cnt, vw_cnt, max_vw_cnt;
+ int aw_cnt, max_aw_cnt;
unsigned int valid_count, valid_node_count, valid_inode_count, discard_blks;
unsigned int bimodal, avg_vblocks;
int util_free, util_valid, util_invalid;
@@ -3846,7 +3839,6 @@ struct f2fs_stat_info {
int bg_node_segs, bg_data_segs;
int tot_blks, data_blks, node_blks;
int bg_data_blks, bg_node_blks;
- unsigned long long skipped_atomic_files[2];
int curseg[NR_CURSEG_TYPE];
int cursec[NR_CURSEG_TYPE];
int curzone[NR_CURSEG_TYPE];
@@ -3946,17 +3938,6 @@ static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
if (cur > max) \
atomic_set(&F2FS_I_SB(inode)->max_aw_cnt, cur); \
} while (0)
-#define stat_inc_volatile_write(inode) \
- (atomic_inc(&F2FS_I_SB(inode)->vw_cnt))
-#define stat_dec_volatile_write(inode) \
- (atomic_dec(&F2FS_I_SB(inode)->vw_cnt))
-#define stat_update_max_volatile_write(inode) \
- do { \
- int cur = atomic_read(&F2FS_I_SB(inode)->vw_cnt); \
- int max = atomic_read(&F2FS_I_SB(inode)->max_vw_cnt); \
- if (cur > max) \
- atomic_set(&F2FS_I_SB(inode)->max_vw_cnt, cur); \
- } while (0)
#define stat_inc_seg_count(sbi, type, gc_type) \
do { \
struct f2fs_stat_info *si = F2FS_STAT(sbi); \
@@ -4018,9 +3999,6 @@ void f2fs_update_sit_info(struct f2fs_sb_info *sbi);
#define stat_add_compr_blocks(inode, blocks) do { } while (0)
#define stat_sub_compr_blocks(inode, blocks) do { } while (0)
#define stat_update_max_atomic_write(inode) do { } while (0)
-#define stat_inc_volatile_write(inode) do { } while (0)
-#define stat_dec_volatile_write(inode) do { } while (0)
-#define stat_update_max_volatile_write(inode) do { } while (0)
#define stat_inc_meta_count(sbi, blkaddr) do { } while (0)
#define stat_inc_seg_type(sbi, curseg) do { } while (0)
#define stat_inc_block_count(sbi, curseg) do { } while (0)
@@ -4053,6 +4031,7 @@ extern struct kmem_cache *f2fs_inode_entry_slab;
* inline.c
*/
bool f2fs_may_inline_data(struct inode *inode);
+bool f2fs_sanity_check_inline_data(struct inode *inode);
bool f2fs_may_inline_dentry(struct inode *inode);
void f2fs_do_read_inline_data(struct page *page, struct page *ipage);
void f2fs_truncate_inline_inode(struct inode *inode,
@@ -4422,8 +4401,7 @@ static inline bool f2fs_lfs_mode(struct f2fs_sb_info *sbi)
static inline bool f2fs_may_compress(struct inode *inode)
{
if (IS_SWAPFILE(inode) || f2fs_is_pinned_file(inode) ||
- f2fs_is_atomic_file(inode) ||
- f2fs_is_volatile_file(inode))
+ f2fs_is_atomic_file(inode))
return false;
return S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode);
}
@@ -4431,8 +4409,8 @@ static inline bool f2fs_may_compress(struct inode *inode)
static inline void f2fs_i_compr_blocks_update(struct inode *inode,
u64 blocks, bool add)
{
- int diff = F2FS_I(inode)->i_cluster_size - blocks;
struct f2fs_inode_info *fi = F2FS_I(inode);
+ int diff = fi->i_cluster_size - blocks;
/* don't update i_compr_blocks if saved blocks were released */
if (!add && !atomic_read(&fi->i_compr_blocks))
@@ -4540,6 +4518,21 @@ static inline void f2fs_io_schedule_timeout(long timeout)
io_schedule_timeout(timeout);
}
+static inline void f2fs_handle_page_eio(struct f2fs_sb_info *sbi, pgoff_t ofs,
+ enum page_type type)
+{
+ if (unlikely(f2fs_cp_error(sbi)))
+ return;
+
+ if (ofs == sbi->page_eio_ofs[type]) {
+ if (sbi->page_eio_cnt[type]++ == MAX_RETRY_PAGE_EIO)
+ set_ckpt_flags(sbi, CP_ERROR_FLAG);
+ } else {
+ sbi->page_eio_ofs[type] = ofs;
+ sbi->page_eio_cnt[type] = 0;
+ }
+}
+
#define EFSBADCRC EBADMSG /* Bad CRC detected */
#define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index 100637b1adb3..bd14cef1b08f 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -372,7 +372,8 @@ sync_nodes:
f2fs_remove_ino_entry(sbi, ino, APPEND_INO);
clear_inode_flag(inode, FI_APPEND_WRITE);
flush_out:
- if (!atomic && F2FS_OPTION(sbi).fsync_mode != FSYNC_MODE_NOBARRIER)
+ if ((!atomic && F2FS_OPTION(sbi).fsync_mode != FSYNC_MODE_NOBARRIER) ||
+ (atomic && !test_opt(sbi, NOBARRIER) && f2fs_sb_has_blkzoned(sbi)))
ret = f2fs_issue_flush(sbi, inode->i_ino);
if (!ret) {
f2fs_remove_ino_entry(sbi, ino, UPDATE_INO);
@@ -1437,11 +1438,19 @@ static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start,
ret = -ENOSPC;
break;
}
- if (dn->data_blkaddr != NEW_ADDR) {
- f2fs_invalidate_blocks(sbi, dn->data_blkaddr);
- dn->data_blkaddr = NEW_ADDR;
- f2fs_set_data_blkaddr(dn);
+
+ if (dn->data_blkaddr == NEW_ADDR)
+ continue;
+
+ if (!f2fs_is_valid_blkaddr(sbi, dn->data_blkaddr,
+ DATA_GENERIC_ENHANCE)) {
+ ret = -EFSCORRUPTED;
+ break;
}
+
+ f2fs_invalidate_blocks(sbi, dn->data_blkaddr);
+ dn->data_blkaddr = NEW_ADDR;
+ f2fs_set_data_blkaddr(dn);
}
f2fs_update_extent_cache_range(dn, start, 0, index - start);
@@ -1638,6 +1647,11 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
struct f2fs_map_blocks map = { .m_next_pgofs = NULL,
.m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE,
.m_may_create = true };
+ struct f2fs_gc_control gc_control = { .victim_segno = NULL_SEGNO,
+ .init_gc_type = FG_GC,
+ .should_migrate_blocks = false,
+ .err_gc_skipped = true,
+ .nr_free_secs = 0 };
pgoff_t pg_start, pg_end;
loff_t new_size = i_size_read(inode);
loff_t off_end;
@@ -1675,8 +1689,8 @@ next_alloc:
if (has_not_enough_free_secs(sbi, 0,
GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) {
f2fs_down_write(&sbi->gc_lock);
- err = f2fs_gc(sbi, true, false, false, NULL_SEGNO);
- if (err && err != -ENODATA && err != -EAGAIN)
+ err = f2fs_gc(sbi, &gc_control);
+ if (err && err != -ENODATA)
goto out_err;
}
@@ -1766,6 +1780,10 @@ static long f2fs_fallocate(struct file *file, int mode,
inode_lock(inode);
+ ret = file_modified(file);
+ if (ret)
+ goto out;
+
if (mode & FALLOC_FL_PUNCH_HOLE) {
if (offset >= inode->i_size)
goto out;
@@ -1804,16 +1822,8 @@ static int f2fs_release_file(struct inode *inode, struct file *filp)
atomic_read(&inode->i_writecount) != 1)
return 0;
- /* some remained atomic pages should discarded */
if (f2fs_is_atomic_file(inode))
- f2fs_drop_inmem_pages(inode);
- if (f2fs_is_volatile_file(inode)) {
- set_inode_flag(inode, FI_DROP_CACHE);
- filemap_fdatawrite(inode->i_mapping);
- clear_inode_flag(inode, FI_DROP_CACHE);
- clear_inode_flag(inode, FI_VOLATILE_FILE);
- stat_dec_volatile_write(inode);
- }
+ f2fs_abort_atomic_write(inode, true);
return 0;
}
@@ -1828,8 +1838,8 @@ static int f2fs_file_flush(struct file *file, fl_owner_t id)
* before dropping file lock, it needs to do in ->flush.
*/
if (f2fs_is_atomic_file(inode) &&
- F2FS_I(inode)->inmem_task == current)
- f2fs_drop_inmem_pages(inode);
+ F2FS_I(inode)->atomic_write_task == current)
+ f2fs_abort_atomic_write(inode, true);
return 0;
}
@@ -1992,6 +2002,7 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
struct user_namespace *mnt_userns = file_mnt_user_ns(filp);
struct f2fs_inode_info *fi = F2FS_I(inode);
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct inode *pinode;
int ret;
if (!inode_owner_or_capable(mnt_userns, inode))
@@ -2014,44 +2025,55 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
goto out;
}
- if (f2fs_is_atomic_file(inode)) {
- if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST))
- ret = -EINVAL;
+ if (f2fs_is_atomic_file(inode))
goto out;
- }
ret = f2fs_convert_inline_inode(inode);
if (ret)
goto out;
- f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ f2fs_down_write(&fi->i_gc_rwsem[WRITE]);
/*
* Should wait end_io to count F2FS_WB_CP_DATA correctly by
* f2fs_is_atomic_file.
*/
if (get_dirty_pages(inode))
- f2fs_warn(F2FS_I_SB(inode), "Unexpected flush for atomic writes: ino=%lu, npages=%u",
+ f2fs_warn(sbi, "Unexpected flush for atomic writes: ino=%lu, npages=%u",
inode->i_ino, get_dirty_pages(inode));
ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
if (ret) {
- f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
+ goto out;
+ }
+
+ /* Create a COW inode for atomic write */
+ pinode = f2fs_iget(inode->i_sb, fi->i_pino);
+ if (IS_ERR(pinode)) {
+ f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
+ ret = PTR_ERR(pinode);
+ goto out;
+ }
+
+ ret = f2fs_get_tmpfile(mnt_userns, pinode, &fi->cow_inode);
+ iput(pinode);
+ if (ret) {
+ f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
goto out;
}
+ f2fs_i_size_write(fi->cow_inode, i_size_read(inode));
spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
- if (list_empty(&fi->inmem_ilist))
- list_add_tail(&fi->inmem_ilist, &sbi->inode_list[ATOMIC_FILE]);
sbi->atomic_files++;
spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
- /* add inode in inmem_list first and set atomic_file */
set_inode_flag(inode, FI_ATOMIC_FILE);
- clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
- f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ set_inode_flag(fi->cow_inode, FI_ATOMIC_FILE);
+ clear_inode_flag(fi->cow_inode, FI_INLINE_DATA);
+ f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
- f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
- F2FS_I(inode)->inmem_task = current;
+ f2fs_update_time(sbi, REQ_TIME);
+ fi->atomic_write_task = current;
stat_update_max_atomic_write(inode);
out:
inode_unlock(inode);
@@ -2076,127 +2098,20 @@ static int f2fs_ioc_commit_atomic_write(struct file *filp)
inode_lock(inode);
- if (f2fs_is_volatile_file(inode)) {
- ret = -EINVAL;
- goto err_out;
- }
-
if (f2fs_is_atomic_file(inode)) {
- ret = f2fs_commit_inmem_pages(inode);
+ ret = f2fs_commit_atomic_write(inode);
if (ret)
- goto err_out;
+ goto unlock_out;
ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
if (!ret)
- f2fs_drop_inmem_pages(inode);
+ f2fs_abort_atomic_write(inode, false);
} else {
ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 1, false);
}
-err_out:
- if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) {
- clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
- ret = -EINVAL;
- }
- inode_unlock(inode);
- mnt_drop_write_file(filp);
- return ret;
-}
-
-static int f2fs_ioc_start_volatile_write(struct file *filp)
-{
- struct inode *inode = file_inode(filp);
- struct user_namespace *mnt_userns = file_mnt_user_ns(filp);
- int ret;
-
- if (!inode_owner_or_capable(mnt_userns, inode))
- return -EACCES;
-
- if (!S_ISREG(inode->i_mode))
- return -EINVAL;
-
- ret = mnt_want_write_file(filp);
- if (ret)
- return ret;
-
- inode_lock(inode);
-
- if (f2fs_is_volatile_file(inode))
- goto out;
-
- ret = f2fs_convert_inline_inode(inode);
- if (ret)
- goto out;
-
- stat_inc_volatile_write(inode);
- stat_update_max_volatile_write(inode);
-
- set_inode_flag(inode, FI_VOLATILE_FILE);
- f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
-out:
- inode_unlock(inode);
- mnt_drop_write_file(filp);
- return ret;
-}
-
-static int f2fs_ioc_release_volatile_write(struct file *filp)
-{
- struct inode *inode = file_inode(filp);
- struct user_namespace *mnt_userns = file_mnt_user_ns(filp);
- int ret;
-
- if (!inode_owner_or_capable(mnt_userns, inode))
- return -EACCES;
-
- ret = mnt_want_write_file(filp);
- if (ret)
- return ret;
-
- inode_lock(inode);
-
- if (!f2fs_is_volatile_file(inode))
- goto out;
-
- if (!f2fs_is_first_block_written(inode)) {
- ret = truncate_partial_data_page(inode, 0, true);
- goto out;
- }
-
- ret = punch_hole(inode, 0, F2FS_BLKSIZE);
-out:
- inode_unlock(inode);
- mnt_drop_write_file(filp);
- return ret;
-}
-
-static int f2fs_ioc_abort_volatile_write(struct file *filp)
-{
- struct inode *inode = file_inode(filp);
- struct user_namespace *mnt_userns = file_mnt_user_ns(filp);
- int ret;
-
- if (!inode_owner_or_capable(mnt_userns, inode))
- return -EACCES;
-
- ret = mnt_want_write_file(filp);
- if (ret)
- return ret;
-
- inode_lock(inode);
-
- if (f2fs_is_atomic_file(inode))
- f2fs_drop_inmem_pages(inode);
- if (f2fs_is_volatile_file(inode)) {
- clear_inode_flag(inode, FI_VOLATILE_FILE);
- stat_dec_volatile_write(inode);
- ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
- }
-
- clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
-
+unlock_out:
inode_unlock(inode);
-
mnt_drop_write_file(filp);
- f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
return ret;
}
@@ -2437,6 +2352,10 @@ static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
{
struct inode *inode = file_inode(filp);
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct f2fs_gc_control gc_control = { .victim_segno = NULL_SEGNO,
+ .no_bg_gc = false,
+ .should_migrate_blocks = false,
+ .nr_free_secs = 0 };
__u32 sync;
int ret;
@@ -2462,7 +2381,9 @@ static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
f2fs_down_write(&sbi->gc_lock);
}
- ret = f2fs_gc(sbi, sync, true, false, NULL_SEGNO);
+ gc_control.init_gc_type = sync ? FG_GC : BG_GC;
+ gc_control.err_gc_skipped = sync;
+ ret = f2fs_gc(sbi, &gc_control);
out:
mnt_drop_write_file(filp);
return ret;
@@ -2471,6 +2392,12 @@ out:
static int __f2fs_ioc_gc_range(struct file *filp, struct f2fs_gc_range *range)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
+ struct f2fs_gc_control gc_control = {
+ .init_gc_type = range->sync ? FG_GC : BG_GC,
+ .no_bg_gc = false,
+ .should_migrate_blocks = false,
+ .err_gc_skipped = range->sync,
+ .nr_free_secs = 0 };
u64 end;
int ret;
@@ -2498,8 +2425,8 @@ do_more:
f2fs_down_write(&sbi->gc_lock);
}
- ret = f2fs_gc(sbi, range->sync, true, false,
- GET_SEGNO(sbi, range->start));
+ gc_control.victim_segno = GET_SEGNO(sbi, range->start);
+ ret = f2fs_gc(sbi, &gc_control);
if (ret) {
if (ret == -EBUSY)
ret = -EAGAIN;
@@ -2674,6 +2601,7 @@ do_map:
}
set_page_dirty(page);
+ set_page_private_gcing(page);
f2fs_put_page(page, 1);
idx++;
@@ -2913,6 +2841,11 @@ static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
unsigned int start_segno = 0, end_segno = 0;
unsigned int dev_start_segno = 0, dev_end_segno = 0;
struct f2fs_flush_device range;
+ struct f2fs_gc_control gc_control = {
+ .init_gc_type = FG_GC,
+ .should_migrate_blocks = true,
+ .err_gc_skipped = true,
+ .nr_free_secs = 0 };
int ret;
if (!capable(CAP_SYS_ADMIN))
@@ -2956,7 +2889,9 @@ static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
sm->last_victim[GC_CB] = end_segno + 1;
sm->last_victim[GC_GREEDY] = end_segno + 1;
sm->last_victim[ALLOC_NEXT] = end_segno + 1;
- ret = f2fs_gc(sbi, true, true, true, start_segno);
+
+ gc_control.victim_segno = start_segno;
+ ret = f2fs_gc(sbi, &gc_control);
if (ret == -EAGAIN)
ret = 0;
else if (ret < 0)
@@ -3017,7 +2952,7 @@ static int f2fs_ioc_setproject(struct inode *inode, __u32 projid)
kprojid = make_kprojid(&init_user_ns, (projid_t)projid);
- if (projid_eq(kprojid, F2FS_I(inode)->i_projid))
+ if (projid_eq(kprojid, fi->i_projid))
return 0;
err = -EPERM;
@@ -3037,7 +2972,7 @@ static int f2fs_ioc_setproject(struct inode *inode, __u32 projid)
if (err)
goto out_unlock;
- F2FS_I(inode)->i_projid = kprojid;
+ fi->i_projid = kprojid;
inode->i_ctime = current_time(inode);
f2fs_mark_inode_dirty_sync(inode, true);
out_unlock:
@@ -3987,7 +3922,7 @@ static int f2fs_ioc_decompress_file(struct file *filp, unsigned long arg)
struct f2fs_inode_info *fi = F2FS_I(inode);
pgoff_t page_idx = 0, last_idx;
unsigned int blk_per_seg = sbi->blocks_per_seg;
- int cluster_size = F2FS_I(inode)->i_cluster_size;
+ int cluster_size = fi->i_cluster_size;
int count, ret;
if (!f2fs_sb_has_compression(sbi) ||
@@ -4010,11 +3945,6 @@ static int f2fs_ioc_decompress_file(struct file *filp, unsigned long arg)
goto out;
}
- if (f2fs_is_mmap_file(inode)) {
- ret = -EBUSY;
- goto out;
- }
-
ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
if (ret)
goto out;
@@ -4082,11 +4012,6 @@ static int f2fs_ioc_compress_file(struct file *filp, unsigned long arg)
goto out;
}
- if (f2fs_is_mmap_file(inode)) {
- ret = -EBUSY;
- goto out;
- }
-
ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
if (ret)
goto out;
@@ -4136,11 +4061,9 @@ static long __f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
case F2FS_IOC_COMMIT_ATOMIC_WRITE:
return f2fs_ioc_commit_atomic_write(filp);
case F2FS_IOC_START_VOLATILE_WRITE:
- return f2fs_ioc_start_volatile_write(filp);
case F2FS_IOC_RELEASE_VOLATILE_WRITE:
- return f2fs_ioc_release_volatile_write(filp);
case F2FS_IOC_ABORT_VOLATILE_WRITE:
- return f2fs_ioc_abort_volatile_write(filp);
+ return -EOPNOTSUPP;
case F2FS_IOC_SHUTDOWN:
return f2fs_ioc_shutdown(filp, arg);
case FITRIM:
@@ -4328,17 +4251,39 @@ out:
static ssize_t f2fs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
struct inode *inode = file_inode(iocb->ki_filp);
+ const loff_t pos = iocb->ki_pos;
ssize_t ret;
if (!f2fs_is_compress_backend_ready(inode))
return -EOPNOTSUPP;
- if (f2fs_should_use_dio(inode, iocb, to))
- return f2fs_dio_read_iter(iocb, to);
+ if (trace_f2fs_dataread_start_enabled()) {
+ char *p = f2fs_kmalloc(F2FS_I_SB(inode), PATH_MAX, GFP_KERNEL);
+ char *path;
+
+ if (!p)
+ goto skip_read_trace;
+
+ path = dentry_path_raw(file_dentry(iocb->ki_filp), p, PATH_MAX);
+ if (IS_ERR(path)) {
+ kfree(p);
+ goto skip_read_trace;
+ }
- ret = filemap_read(iocb, to, 0);
- if (ret > 0)
- f2fs_update_iostat(F2FS_I_SB(inode), APP_BUFFERED_READ_IO, ret);
+ trace_f2fs_dataread_start(inode, pos, iov_iter_count(to),
+ current->pid, path, current->comm);
+ kfree(p);
+ }
+skip_read_trace:
+ if (f2fs_should_use_dio(inode, iocb, to)) {
+ ret = f2fs_dio_read_iter(iocb, to);
+ } else {
+ ret = filemap_read(iocb, to, 0);
+ if (ret > 0)
+ f2fs_update_iostat(F2FS_I_SB(inode), APP_BUFFERED_READ_IO, ret);
+ }
+ if (trace_f2fs_dataread_end_enabled())
+ trace_f2fs_dataread_end(inode, pos, ret);
return ret;
}
@@ -4630,14 +4575,36 @@ static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
/* Possibly preallocate the blocks for the write. */
target_size = iocb->ki_pos + iov_iter_count(from);
preallocated = f2fs_preallocate_blocks(iocb, from, dio);
- if (preallocated < 0)
+ if (preallocated < 0) {
ret = preallocated;
- else
+ } else {
+ if (trace_f2fs_datawrite_start_enabled()) {
+ char *p = f2fs_kmalloc(F2FS_I_SB(inode),
+ PATH_MAX, GFP_KERNEL);
+ char *path;
+
+ if (!p)
+ goto skip_write_trace;
+ path = dentry_path_raw(file_dentry(iocb->ki_filp),
+ p, PATH_MAX);
+ if (IS_ERR(path)) {
+ kfree(p);
+ goto skip_write_trace;
+ }
+ trace_f2fs_datawrite_start(inode, orig_pos, orig_count,
+ current->pid, path, current->comm);
+ kfree(p);
+ }
+skip_write_trace:
/* Do the actual write. */
ret = dio ?
f2fs_dio_write_iter(iocb, from, &may_need_sync):
f2fs_buffered_write_iter(iocb, from);
+ if (trace_f2fs_datawrite_end_enabled())
+ trace_f2fs_datawrite_end(inode, orig_pos, ret);
+ }
+
/* Don't leave any preallocated blocks around past i_size. */
if (preallocated && i_size_read(inode) < target_size) {
f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index ea5b93b689cd..d5fb426e0747 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -35,6 +35,10 @@ static int gc_thread_func(void *data)
wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
wait_queue_head_t *fggc_wq = &sbi->gc_thread->fggc_wq;
unsigned int wait_ms;
+ struct f2fs_gc_control gc_control = {
+ .victim_segno = NULL_SEGNO,
+ .should_migrate_blocks = false,
+ .err_gc_skipped = false };
wait_ms = gc_th->min_sleep_time;
@@ -141,8 +145,12 @@ do_gc:
if (foreground)
sync_mode = false;
+ gc_control.init_gc_type = sync_mode ? FG_GC : BG_GC;
+ gc_control.no_bg_gc = foreground;
+ gc_control.nr_free_secs = foreground ? 1 : 0;
+
/* if return value is not zero, no victim was selected */
- if (f2fs_gc(sbi, sync_mode, !foreground, false, NULL_SEGNO))
+ if (f2fs_gc(sbi, &gc_control))
wait_ms = gc_th->no_gc_sleep_time;
if (foreground)
@@ -646,6 +654,54 @@ static void release_victim_entry(struct f2fs_sb_info *sbi)
f2fs_bug_on(sbi, !list_empty(&am->victim_list));
}
+static bool f2fs_pin_section(struct f2fs_sb_info *sbi, unsigned int segno)
+{
+ struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
+ unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
+
+ if (!dirty_i->enable_pin_section)
+ return false;
+ if (!test_and_set_bit(secno, dirty_i->pinned_secmap))
+ dirty_i->pinned_secmap_cnt++;
+ return true;
+}
+
+static bool f2fs_pinned_section_exists(struct dirty_seglist_info *dirty_i)
+{
+ return dirty_i->pinned_secmap_cnt;
+}
+
+static bool f2fs_section_is_pinned(struct dirty_seglist_info *dirty_i,
+ unsigned int secno)
+{
+ return dirty_i->enable_pin_section &&
+ f2fs_pinned_section_exists(dirty_i) &&
+ test_bit(secno, dirty_i->pinned_secmap);
+}
+
+static void f2fs_unpin_all_sections(struct f2fs_sb_info *sbi, bool enable)
+{
+ unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
+
+ if (f2fs_pinned_section_exists(DIRTY_I(sbi))) {
+ memset(DIRTY_I(sbi)->pinned_secmap, 0, bitmap_size);
+ DIRTY_I(sbi)->pinned_secmap_cnt = 0;
+ }
+ DIRTY_I(sbi)->enable_pin_section = enable;
+}
+
+static int f2fs_gc_pinned_control(struct inode *inode, int gc_type,
+ unsigned int segno)
+{
+ if (!f2fs_is_pinned_file(inode))
+ return 0;
+ if (gc_type != FG_GC)
+ return -EBUSY;
+ if (!f2fs_pin_section(F2FS_I_SB(inode), segno))
+ f2fs_pin_file_control(inode, true);
+ return -EAGAIN;
+}
+
/*
* This function is called from two paths.
* One is garbage collection and the other is SSR segment selection.
@@ -787,6 +843,9 @@ retry:
if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
goto next;
+ if (gc_type == FG_GC && f2fs_section_is_pinned(dirty_i, secno))
+ goto next;
+
if (is_atgc) {
add_victim_entry(sbi, &p, segno);
goto next;
@@ -1194,18 +1253,9 @@ static int move_data_block(struct inode *inode, block_t bidx,
goto out;
}
- if (f2fs_is_atomic_file(inode)) {
- F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC]++;
- F2FS_I_SB(inode)->skipped_atomic_files[gc_type]++;
- err = -EAGAIN;
- goto out;
- }
-
- if (f2fs_is_pinned_file(inode)) {
- f2fs_pin_file_control(inode, true);
- err = -EAGAIN;
+ err = f2fs_gc_pinned_control(inode, gc_type, segno);
+ if (err)
goto out;
- }
set_new_dnode(&dn, inode, NULL, NULL, 0);
err = f2fs_get_dnode_of_data(&dn, bidx, LOOKUP_NODE);
@@ -1344,18 +1394,9 @@ static int move_data_page(struct inode *inode, block_t bidx, int gc_type,
goto out;
}
- if (f2fs_is_atomic_file(inode)) {
- F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC]++;
- F2FS_I_SB(inode)->skipped_atomic_files[gc_type]++;
- err = -EAGAIN;
- goto out;
- }
- if (f2fs_is_pinned_file(inode)) {
- if (gc_type == FG_GC)
- f2fs_pin_file_control(inode, true);
- err = -EAGAIN;
+ err = f2fs_gc_pinned_control(inode, gc_type, segno);
+ if (err)
goto out;
- }
if (gc_type == BG_GC) {
if (PageWriteback(page)) {
@@ -1475,11 +1516,19 @@ next_step:
ofs_in_node = le16_to_cpu(entry->ofs_in_node);
if (phase == 3) {
+ int err;
+
inode = f2fs_iget(sb, dni.ino);
if (IS_ERR(inode) || is_bad_inode(inode) ||
special_file(inode->i_mode))
continue;
+ err = f2fs_gc_pinned_control(inode, gc_type, segno);
+ if (err == -EAGAIN) {
+ iput(inode);
+ return submitted;
+ }
+
if (!f2fs_down_write_trylock(
&F2FS_I(inode)->i_gc_rwsem[WRITE])) {
iput(inode);
@@ -1699,23 +1748,21 @@ skip:
return seg_freed;
}
-int f2fs_gc(struct f2fs_sb_info *sbi, bool sync,
- bool background, bool force, unsigned int segno)
+int f2fs_gc(struct f2fs_sb_info *sbi, struct f2fs_gc_control *gc_control)
{
- int gc_type = sync ? FG_GC : BG_GC;
+ int gc_type = gc_control->init_gc_type;
+ unsigned int segno = gc_control->victim_segno;
int sec_freed = 0, seg_freed = 0, total_freed = 0;
int ret = 0;
struct cp_control cpc;
- unsigned int init_segno = segno;
struct gc_inode_list gc_list = {
.ilist = LIST_HEAD_INIT(gc_list.ilist),
.iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
};
- unsigned long long last_skipped = sbi->skipped_atomic_files[FG_GC];
- unsigned long long first_skipped;
unsigned int skipped_round = 0, round = 0;
- trace_f2fs_gc_begin(sbi->sb, sync, background,
+ trace_f2fs_gc_begin(sbi->sb, gc_type, gc_control->no_bg_gc,
+ gc_control->nr_free_secs,
get_pages(sbi, F2FS_DIRTY_NODES),
get_pages(sbi, F2FS_DIRTY_DENTS),
get_pages(sbi, F2FS_DIRTY_IMETA),
@@ -1726,7 +1773,6 @@ int f2fs_gc(struct f2fs_sb_info *sbi, bool sync,
cpc.reason = __get_cp_reason(sbi);
sbi->skipped_gc_rwsem = 0;
- first_skipped = last_skipped;
gc_more:
if (unlikely(!(sbi->sb->s_flags & SB_ACTIVE))) {
ret = -EINVAL;
@@ -1743,8 +1789,7 @@ gc_more:
* threshold, we can make them free by checkpoint. Then, we
* secure free segments which doesn't need fggc any more.
*/
- if (prefree_segments(sbi) &&
- !is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
+ if (prefree_segments(sbi)) {
ret = f2fs_write_checkpoint(sbi, &cpc);
if (ret)
goto stop;
@@ -1754,54 +1799,69 @@ gc_more:
}
/* f2fs_balance_fs doesn't need to do BG_GC in critical path. */
- if (gc_type == BG_GC && !background) {
+ if (gc_type == BG_GC && gc_control->no_bg_gc) {
ret = -EINVAL;
goto stop;
}
+retry:
ret = __get_victim(sbi, &segno, gc_type);
- if (ret)
+ if (ret) {
+ /* allow to search victim from sections has pinned data */
+ if (ret == -ENODATA && gc_type == FG_GC &&
+ f2fs_pinned_section_exists(DIRTY_I(sbi))) {
+ f2fs_unpin_all_sections(sbi, false);
+ goto retry;
+ }
goto stop;
+ }
- seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type, force);
- if (gc_type == FG_GC &&
- seg_freed == f2fs_usable_segs_in_sec(sbi, segno))
- sec_freed++;
+ seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type,
+ gc_control->should_migrate_blocks);
total_freed += seg_freed;
- if (gc_type == FG_GC) {
- if (sbi->skipped_atomic_files[FG_GC] > last_skipped ||
- sbi->skipped_gc_rwsem)
- skipped_round++;
- last_skipped = sbi->skipped_atomic_files[FG_GC];
- round++;
- }
+ if (seg_freed == f2fs_usable_segs_in_sec(sbi, segno))
+ sec_freed++;
if (gc_type == FG_GC)
sbi->cur_victim_sec = NULL_SEGNO;
- if (sync)
+ if (gc_control->init_gc_type == FG_GC ||
+ !has_not_enough_free_secs(sbi,
+ (gc_type == FG_GC) ? sec_freed : 0, 0)) {
+ if (gc_type == FG_GC && sec_freed < gc_control->nr_free_secs)
+ goto go_gc_more;
goto stop;
+ }
- if (has_not_enough_free_secs(sbi, sec_freed, 0)) {
- if (skipped_round <= MAX_SKIP_GC_COUNT ||
- skipped_round * 2 < round) {
- segno = NULL_SEGNO;
- goto gc_more;
+ /* FG_GC stops GC by skip_count */
+ if (gc_type == FG_GC) {
+ if (sbi->skipped_gc_rwsem)
+ skipped_round++;
+ round++;
+ if (skipped_round > MAX_SKIP_GC_COUNT &&
+ skipped_round * 2 >= round) {
+ ret = f2fs_write_checkpoint(sbi, &cpc);
+ goto stop;
}
+ }
- if (first_skipped < last_skipped &&
- (last_skipped - first_skipped) >
- sbi->skipped_gc_rwsem) {
- f2fs_drop_inmem_pages_all(sbi, true);
- segno = NULL_SEGNO;
- goto gc_more;
- }
- if (gc_type == FG_GC && !is_sbi_flag_set(sbi, SBI_CP_DISABLED))
- ret = f2fs_write_checkpoint(sbi, &cpc);
+ /* Write checkpoint to reclaim prefree segments */
+ if (free_sections(sbi) < NR_CURSEG_PERSIST_TYPE &&
+ prefree_segments(sbi)) {
+ ret = f2fs_write_checkpoint(sbi, &cpc);
+ if (ret)
+ goto stop;
}
+go_gc_more:
+ segno = NULL_SEGNO;
+ goto gc_more;
+
stop:
SIT_I(sbi)->last_victim[ALLOC_NEXT] = 0;
- SIT_I(sbi)->last_victim[FLUSH_DEVICE] = init_segno;
+ SIT_I(sbi)->last_victim[FLUSH_DEVICE] = gc_control->victim_segno;
+
+ if (gc_type == FG_GC)
+ f2fs_unpin_all_sections(sbi, true);
trace_f2fs_gc_end(sbi->sb, ret, total_freed, sec_freed,
get_pages(sbi, F2FS_DIRTY_NODES),
@@ -1816,7 +1876,7 @@ stop:
put_gc_inode(&gc_list);
- if (sync && !ret)
+ if (gc_control->err_gc_skipped && !ret)
ret = sec_freed ? 0 : -EAGAIN;
return ret;
}
diff --git a/fs/f2fs/hash.c b/fs/f2fs/hash.c
index 3cb1e7a24740..049ce50cec9b 100644
--- a/fs/f2fs/hash.c
+++ b/fs/f2fs/hash.c
@@ -91,7 +91,7 @@ static u32 TEA_hash_name(const u8 *p, size_t len)
/*
* Compute @fname->hash. For all directories, @fname->disk_name must be set.
* For casefolded directories, @fname->usr_fname must be set, and also
- * @fname->cf_name if the filename is valid Unicode.
+ * @fname->cf_name if the filename is valid Unicode and is not "." or "..".
*/
void f2fs_hash_filename(const struct inode *dir, struct f2fs_filename *fname)
{
@@ -110,10 +110,11 @@ void f2fs_hash_filename(const struct inode *dir, struct f2fs_filename *fname)
/*
* If the casefolded name is provided, hash it instead of the
* on-disk name. If the casefolded name is *not* provided, that
- * should only be because the name wasn't valid Unicode, so fall
- * back to treating the name as an opaque byte sequence. Note
- * that to handle encrypted directories, the fallback must use
- * usr_fname (plaintext) rather than disk_name (ciphertext).
+ * should only be because the name wasn't valid Unicode or was
+ * "." or "..", so fall back to treating the name as an opaque
+ * byte sequence. Note that to handle encrypted directories,
+ * the fallback must use usr_fname (plaintext) rather than
+ * disk_name (ciphertext).
*/
WARN_ON_ONCE(!fname->usr_fname->name);
if (fname->cf_name.name) {
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index a578bf83b803..bf46a7dfbea2 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -14,21 +14,40 @@
#include "node.h"
#include <trace/events/f2fs.h>
-bool f2fs_may_inline_data(struct inode *inode)
+static bool support_inline_data(struct inode *inode)
{
if (f2fs_is_atomic_file(inode))
return false;
-
if (!S_ISREG(inode->i_mode) && !S_ISLNK(inode->i_mode))
return false;
-
if (i_size_read(inode) > MAX_INLINE_DATA(inode))
return false;
+ return true;
+}
- if (f2fs_post_read_required(inode))
+bool f2fs_may_inline_data(struct inode *inode)
+{
+ if (!support_inline_data(inode))
return false;
- return true;
+ return !f2fs_post_read_required(inode);
+}
+
+bool f2fs_sanity_check_inline_data(struct inode *inode)
+{
+ if (!f2fs_has_inline_data(inode))
+ return false;
+
+ if (!support_inline_data(inode))
+ return true;
+
+ /*
+ * used by sanity_check_inode(), when disk layout fields has not
+ * been synchronized to inmem fields.
+ */
+ return (S_ISREG(inode->i_mode) &&
+ (file_is_encrypt(inode) || file_is_verity(inode) ||
+ (F2FS_I(inode)->i_flags & F2FS_COMPR_FL)));
}
bool f2fs_may_inline_dentry(struct inode *inode)
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index 83639238a1fe..fc55f5bd1fcc 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -260,8 +260,8 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page)
return false;
}
- if (F2FS_I(inode)->extent_tree) {
- struct extent_info *ei = &F2FS_I(inode)->extent_tree->largest;
+ if (fi->extent_tree) {
+ struct extent_info *ei = &fi->extent_tree->largest;
if (ei->len &&
(!f2fs_is_valid_blkaddr(sbi, ei->blk,
@@ -276,8 +276,7 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page)
}
}
- if (f2fs_has_inline_data(inode) &&
- (!S_ISREG(inode->i_mode) && !S_ISLNK(inode->i_mode))) {
+ if (f2fs_sanity_check_inline_data(inode)) {
set_sbi_flag(sbi, SBI_NEED_FSCK);
f2fs_warn(sbi, "%s: inode (ino=%lx, mode=%u) should not have inline_data, run fsck to fix",
__func__, inode->i_ino, inode->i_mode);
@@ -466,10 +465,10 @@ static int do_read_inode(struct inode *inode)
}
}
- F2FS_I(inode)->i_disk_time[0] = inode->i_atime;
- F2FS_I(inode)->i_disk_time[1] = inode->i_ctime;
- F2FS_I(inode)->i_disk_time[2] = inode->i_mtime;
- F2FS_I(inode)->i_disk_time[3] = F2FS_I(inode)->i_crtime;
+ fi->i_disk_time[0] = inode->i_atime;
+ fi->i_disk_time[1] = inode->i_ctime;
+ fi->i_disk_time[2] = inode->i_mtime;
+ fi->i_disk_time[3] = fi->i_crtime;
f2fs_put_page(node_page, 1);
stat_inc_inline_xattr(inode);
@@ -745,9 +744,8 @@ void f2fs_evict_inode(struct inode *inode)
nid_t xnid = F2FS_I(inode)->i_xattr_nid;
int err = 0;
- /* some remained atomic pages should discarded */
if (f2fs_is_atomic_file(inode))
- f2fs_drop_inmem_pages(inode);
+ f2fs_abort_atomic_write(inode, true);
trace_f2fs_evict_inode(inode);
truncate_inode_pages_final(&inode->i_data);
@@ -796,8 +794,22 @@ retry:
f2fs_lock_op(sbi);
err = f2fs_remove_inode_page(inode);
f2fs_unlock_op(sbi);
- if (err == -ENOENT)
+ if (err == -ENOENT) {
err = 0;
+
+ /*
+ * in fuzzed image, another node may has the same
+ * block address as inode's, if it was truncated
+ * previously, truncation of inode node will fail.
+ */
+ if (is_inode_flag_set(inode, FI_DIRTY_INODE)) {
+ f2fs_warn(F2FS_I_SB(inode),
+ "f2fs_evict_inode: inconsistent node id, ino:%lu",
+ inode->i_ino);
+ f2fs_inode_synced(inode);
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ }
+ }
}
/* give more chances, if ENOMEM case */
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index 5ed79b29999f..c549acb52ac4 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -37,13 +37,10 @@ static struct inode *f2fs_new_inode(struct user_namespace *mnt_userns,
if (!inode)
return ERR_PTR(-ENOMEM);
- f2fs_lock_op(sbi);
if (!f2fs_alloc_nid(sbi, &ino)) {
- f2fs_unlock_op(sbi);
err = -ENOSPC;
goto fail;
}
- f2fs_unlock_op(sbi);
nid_free = true;
@@ -461,6 +458,13 @@ static int __recover_dot_dentries(struct inode *dir, nid_t pino)
return 0;
}
+ if (!S_ISDIR(dir->i_mode)) {
+ f2fs_err(sbi, "inconsistent inode status, skip recovering inline_dots inode (ino:%lu, i_mode:%u, pino:%u)",
+ dir->i_ino, dir->i_mode, pino);
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ return -ENOTDIR;
+ }
+
err = f2fs_dquot_initialize(dir);
if (err)
return err;
@@ -836,8 +840,8 @@ out:
}
static int __f2fs_tmpfile(struct user_namespace *mnt_userns, struct inode *dir,
- struct dentry *dentry, umode_t mode,
- struct inode **whiteout)
+ struct dentry *dentry, umode_t mode, bool is_whiteout,
+ struct inode **new_inode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
struct inode *inode;
@@ -851,7 +855,7 @@ static int __f2fs_tmpfile(struct user_namespace *mnt_userns, struct inode *dir,
if (IS_ERR(inode))
return PTR_ERR(inode);
- if (whiteout) {
+ if (is_whiteout) {
init_special_inode(inode, inode->i_mode, WHITEOUT_DEV);
inode->i_op = &f2fs_special_inode_operations;
} else {
@@ -876,21 +880,25 @@ static int __f2fs_tmpfile(struct user_namespace *mnt_userns, struct inode *dir,
f2fs_add_orphan_inode(inode);
f2fs_alloc_nid_done(sbi, inode->i_ino);
- if (whiteout) {
+ if (is_whiteout) {
f2fs_i_links_write(inode, false);
spin_lock(&inode->i_lock);
inode->i_state |= I_LINKABLE;
spin_unlock(&inode->i_lock);
-
- *whiteout = inode;
} else {
- d_tmpfile(dentry, inode);
+ if (dentry)
+ d_tmpfile(dentry, inode);
+ else
+ f2fs_i_links_write(inode, false);
}
/* link_count was changed by d_tmpfile as well. */
f2fs_unlock_op(sbi);
unlock_new_inode(inode);
+ if (new_inode)
+ *new_inode = inode;
+
f2fs_balance_fs(sbi, true);
return 0;
@@ -911,7 +919,7 @@ static int f2fs_tmpfile(struct user_namespace *mnt_userns, struct inode *dir,
if (!f2fs_is_checkpoint_ready(sbi))
return -ENOSPC;
- return __f2fs_tmpfile(mnt_userns, dir, dentry, mode, NULL);
+ return __f2fs_tmpfile(mnt_userns, dir, dentry, mode, false, NULL);
}
static int f2fs_create_whiteout(struct user_namespace *mnt_userns,
@@ -921,7 +929,13 @@ static int f2fs_create_whiteout(struct user_namespace *mnt_userns,
return -EIO;
return __f2fs_tmpfile(mnt_userns, dir, NULL,
- S_IFCHR | WHITEOUT_MODE, whiteout);
+ S_IFCHR | WHITEOUT_MODE, true, whiteout);
+}
+
+int f2fs_get_tmpfile(struct user_namespace *mnt_userns, struct inode *dir,
+ struct inode **new_inode)
+{
+ return __f2fs_tmpfile(mnt_userns, dir, NULL, S_IFREG, false, new_inode);
}
static int f2fs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 8ccff18560ff..836c79a20afc 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -90,10 +90,6 @@ bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type)
atomic_read(&sbi->total_ext_node) *
sizeof(struct extent_node)) >> PAGE_SHIFT;
res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
- } else if (type == INMEM_PAGES) {
- /* it allows 20% / total_ram for inmemory pages */
- mem_size = get_pages(sbi, F2FS_INMEM_PAGES);
- res = mem_size < (val.totalram / 5);
} else if (type == DISCARD_CACHE) {
mem_size = (atomic_read(&dcc->discard_cmd_cnt) *
sizeof(struct discard_cmd)) >> PAGE_SHIFT;
@@ -1416,8 +1412,7 @@ repeat:
err = read_node_page(page, 0);
if (err < 0) {
- f2fs_put_page(page, 1);
- return ERR_PTR(err);
+ goto out_put_err;
} else if (err == LOCKED_PAGE) {
err = 0;
goto page_hit;
@@ -1443,19 +1438,21 @@ repeat:
goto out_err;
}
page_hit:
- if (unlikely(nid != nid_of_node(page))) {
- f2fs_warn(sbi, "inconsistent node block, nid:%lu, node_footer[nid:%u,ino:%u,ofs:%u,cpver:%llu,blkaddr:%u]",
+ if (likely(nid == nid_of_node(page)))
+ return page;
+
+ f2fs_warn(sbi, "inconsistent node block, nid:%lu, node_footer[nid:%u,ino:%u,ofs:%u,cpver:%llu,blkaddr:%u]",
nid, nid_of_node(page), ino_of_node(page),
ofs_of_node(page), cpver_of_node(page),
next_blkaddr_of_node(page));
- set_sbi_flag(sbi, SBI_NEED_FSCK);
- err = -EINVAL;
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ err = -EINVAL;
out_err:
- ClearPageUptodate(page);
- f2fs_put_page(page, 1);
- return ERR_PTR(err);
- }
- return page;
+ ClearPageUptodate(page);
+out_put_err:
+ f2fs_handle_page_eio(sbi, page->index, NODE);
+ f2fs_put_page(page, 1);
+ return ERR_PTR(err);
}
struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
@@ -1631,7 +1628,7 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
goto redirty_out;
}
- if (atomic && !test_opt(sbi, NOBARRIER))
+ if (atomic && !test_opt(sbi, NOBARRIER) && !f2fs_sb_has_blkzoned(sbi))
fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
/* should add to global list before clearing PAGECACHE status */
diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h
index 4c1d34bfea78..3c09cae058b0 100644
--- a/fs/f2fs/node.h
+++ b/fs/f2fs/node.h
@@ -147,7 +147,6 @@ enum mem_type {
DIRTY_DENTS, /* indicates dirty dentry pages */
INO_ENTRIES, /* indicates inode entries */
EXTENT_CACHE, /* indicates extent cache */
- INMEM_PAGES, /* indicates inmemory pages */
DISCARD_CACHE, /* indicates memory of cached discard cmds */
COMPRESS_PAGE, /* indicates memory of cached compressed pages */
BASE_CHECK, /* check kernel status */
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 7225ce09f3ab..874c1b9c41a2 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -30,7 +30,7 @@
static struct kmem_cache *discard_entry_slab;
static struct kmem_cache *discard_cmd_slab;
static struct kmem_cache *sit_entry_set_slab;
-static struct kmem_cache *inmem_entry_slab;
+static struct kmem_cache *revoke_entry_slab;
static unsigned long __reverse_ulong(unsigned char *str)
{
@@ -185,301 +185,175 @@ bool f2fs_need_SSR(struct f2fs_sb_info *sbi)
SM_I(sbi)->min_ssr_sections + reserved_sections(sbi));
}
-void f2fs_register_inmem_page(struct inode *inode, struct page *page)
+void f2fs_abort_atomic_write(struct inode *inode, bool clean)
{
- struct inmem_pages *new;
-
- set_page_private_atomic(page);
-
- new = f2fs_kmem_cache_alloc(inmem_entry_slab,
- GFP_NOFS, true, NULL);
-
- /* add atomic page indices to the list */
- new->page = page;
- INIT_LIST_HEAD(&new->list);
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct f2fs_inode_info *fi = F2FS_I(inode);
- /* increase reference count with clean state */
- get_page(page);
- mutex_lock(&F2FS_I(inode)->inmem_lock);
- list_add_tail(&new->list, &F2FS_I(inode)->inmem_pages);
- inc_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
- mutex_unlock(&F2FS_I(inode)->inmem_lock);
+ if (f2fs_is_atomic_file(inode)) {
+ if (clean)
+ truncate_inode_pages_final(inode->i_mapping);
+ clear_inode_flag(fi->cow_inode, FI_ATOMIC_FILE);
+ iput(fi->cow_inode);
+ fi->cow_inode = NULL;
+ clear_inode_flag(inode, FI_ATOMIC_FILE);
- trace_f2fs_register_inmem_page(page, INMEM);
+ spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
+ sbi->atomic_files--;
+ spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
+ }
}
-static int __revoke_inmem_pages(struct inode *inode,
- struct list_head *head, bool drop, bool recover,
- bool trylock)
+static int __replace_atomic_write_block(struct inode *inode, pgoff_t index,
+ block_t new_addr, block_t *old_addr, bool recover)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- struct inmem_pages *cur, *tmp;
- int err = 0;
-
- list_for_each_entry_safe(cur, tmp, head, list) {
- struct page *page = cur->page;
-
- if (drop)
- trace_f2fs_commit_inmem_page(page, INMEM_DROP);
-
- if (trylock) {
- /*
- * to avoid deadlock in between page lock and
- * inmem_lock.
- */
- if (!trylock_page(page))
- continue;
- } else {
- lock_page(page);
- }
-
- f2fs_wait_on_page_writeback(page, DATA, true, true);
-
- if (recover) {
- struct dnode_of_data dn;
- struct node_info ni;
+ struct dnode_of_data dn;
+ struct node_info ni;
+ int err;
- trace_f2fs_commit_inmem_page(page, INMEM_REVOKE);
retry:
- set_new_dnode(&dn, inode, NULL, NULL, 0);
- err = f2fs_get_dnode_of_data(&dn, page->index,
- LOOKUP_NODE);
- if (err) {
- if (err == -ENOMEM) {
- memalloc_retry_wait(GFP_NOFS);
- goto retry;
- }
- err = -EAGAIN;
- goto next;
- }
-
- err = f2fs_get_node_info(sbi, dn.nid, &ni, false);
- if (err) {
- f2fs_put_dnode(&dn);
- return err;
- }
-
- if (cur->old_addr == NEW_ADDR) {
- f2fs_invalidate_blocks(sbi, dn.data_blkaddr);
- f2fs_update_data_blkaddr(&dn, NEW_ADDR);
- } else
- f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
- cur->old_addr, ni.version, true, true);
- f2fs_put_dnode(&dn);
- }
-next:
- /* we don't need to invalidate this in the sccessful status */
- if (drop || recover) {
- ClearPageUptodate(page);
- clear_page_private_gcing(page);
+ set_new_dnode(&dn, inode, NULL, NULL, 0);
+ err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE_RA);
+ if (err) {
+ if (err == -ENOMEM) {
+ f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
+ goto retry;
}
- detach_page_private(page);
- set_page_private(page, 0);
- f2fs_put_page(page, 1);
-
- list_del(&cur->list);
- kmem_cache_free(inmem_entry_slab, cur);
- dec_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
+ return err;
}
- return err;
-}
-void f2fs_drop_inmem_pages_all(struct f2fs_sb_info *sbi, bool gc_failure)
-{
- struct list_head *head = &sbi->inode_list[ATOMIC_FILE];
- struct inode *inode;
- struct f2fs_inode_info *fi;
- unsigned int count = sbi->atomic_files;
- unsigned int looped = 0;
-next:
- spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
- if (list_empty(head)) {
- spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
- return;
+ err = f2fs_get_node_info(sbi, dn.nid, &ni, false);
+ if (err) {
+ f2fs_put_dnode(&dn);
+ return err;
}
- fi = list_first_entry(head, struct f2fs_inode_info, inmem_ilist);
- inode = igrab(&fi->vfs_inode);
- if (inode)
- list_move_tail(&fi->inmem_ilist, head);
- spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
- if (inode) {
- if (gc_failure) {
- if (!fi->i_gc_failures[GC_FAILURE_ATOMIC])
- goto skip;
+ if (recover) {
+ /* dn.data_blkaddr is always valid */
+ if (!__is_valid_data_blkaddr(new_addr)) {
+ if (new_addr == NULL_ADDR)
+ dec_valid_block_count(sbi, inode, 1);
+ f2fs_invalidate_blocks(sbi, dn.data_blkaddr);
+ f2fs_update_data_blkaddr(&dn, new_addr);
+ } else {
+ f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
+ new_addr, ni.version, true, true);
}
- set_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
- f2fs_drop_inmem_pages(inode);
-skip:
- iput(inode);
- }
- f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
- if (gc_failure) {
- if (++looped >= count)
- return;
- }
- goto next;
-}
-
-void f2fs_drop_inmem_pages(struct inode *inode)
-{
- struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- struct f2fs_inode_info *fi = F2FS_I(inode);
+ } else {
+ blkcnt_t count = 1;
- do {
- mutex_lock(&fi->inmem_lock);
- if (list_empty(&fi->inmem_pages)) {
- fi->i_gc_failures[GC_FAILURE_ATOMIC] = 0;
-
- spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
- if (!list_empty(&fi->inmem_ilist))
- list_del_init(&fi->inmem_ilist);
- if (f2fs_is_atomic_file(inode)) {
- clear_inode_flag(inode, FI_ATOMIC_FILE);
- sbi->atomic_files--;
- }
- spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
+ *old_addr = dn.data_blkaddr;
+ f2fs_truncate_data_blocks_range(&dn, 1);
+ dec_valid_block_count(sbi, F2FS_I(inode)->cow_inode, count);
+ inc_valid_block_count(sbi, inode, &count);
+ f2fs_replace_block(sbi, &dn, dn.data_blkaddr, new_addr,
+ ni.version, true, false);
+ }
- mutex_unlock(&fi->inmem_lock);
- break;
- }
- __revoke_inmem_pages(inode, &fi->inmem_pages,
- true, false, true);
- mutex_unlock(&fi->inmem_lock);
- } while (1);
+ f2fs_put_dnode(&dn);
+ return 0;
}
-void f2fs_drop_inmem_page(struct inode *inode, struct page *page)
+static void __complete_revoke_list(struct inode *inode, struct list_head *head,
+ bool revoke)
{
- struct f2fs_inode_info *fi = F2FS_I(inode);
- struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- struct list_head *head = &fi->inmem_pages;
- struct inmem_pages *cur = NULL;
-
- f2fs_bug_on(sbi, !page_private_atomic(page));
+ struct revoke_entry *cur, *tmp;
- mutex_lock(&fi->inmem_lock);
- list_for_each_entry(cur, head, list) {
- if (cur->page == page)
- break;
+ list_for_each_entry_safe(cur, tmp, head, list) {
+ if (revoke)
+ __replace_atomic_write_block(inode, cur->index,
+ cur->old_addr, NULL, true);
+ list_del(&cur->list);
+ kmem_cache_free(revoke_entry_slab, cur);
}
-
- f2fs_bug_on(sbi, list_empty(head) || cur->page != page);
- list_del(&cur->list);
- mutex_unlock(&fi->inmem_lock);
-
- dec_page_count(sbi, F2FS_INMEM_PAGES);
- kmem_cache_free(inmem_entry_slab, cur);
-
- ClearPageUptodate(page);
- clear_page_private_atomic(page);
- f2fs_put_page(page, 0);
-
- detach_page_private(page);
- set_page_private(page, 0);
-
- trace_f2fs_commit_inmem_page(page, INMEM_INVALIDATE);
}
-static int __f2fs_commit_inmem_pages(struct inode *inode)
+static int __f2fs_commit_atomic_write(struct inode *inode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct f2fs_inode_info *fi = F2FS_I(inode);
- struct inmem_pages *cur, *tmp;
- struct f2fs_io_info fio = {
- .sbi = sbi,
- .ino = inode->i_ino,
- .type = DATA,
- .op = REQ_OP_WRITE,
- .op_flags = REQ_SYNC | REQ_PRIO,
- .io_type = FS_DATA_IO,
- };
+ struct inode *cow_inode = fi->cow_inode;
+ struct revoke_entry *new;
struct list_head revoke_list;
- bool submit_bio = false;
- int err = 0;
+ block_t blkaddr;
+ struct dnode_of_data dn;
+ pgoff_t len = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
+ pgoff_t off = 0, blen, index;
+ int ret = 0, i;
INIT_LIST_HEAD(&revoke_list);
- list_for_each_entry_safe(cur, tmp, &fi->inmem_pages, list) {
- struct page *page = cur->page;
+ while (len) {
+ blen = min_t(pgoff_t, ADDRS_PER_BLOCK(cow_inode), len);
- lock_page(page);
- if (page->mapping == inode->i_mapping) {
- trace_f2fs_commit_inmem_page(page, INMEM);
+ set_new_dnode(&dn, cow_inode, NULL, NULL, 0);
+ ret = f2fs_get_dnode_of_data(&dn, off, LOOKUP_NODE_RA);
+ if (ret && ret != -ENOENT) {
+ goto out;
+ } else if (ret == -ENOENT) {
+ ret = 0;
+ if (dn.max_level == 0)
+ goto out;
+ goto next;
+ }
- f2fs_wait_on_page_writeback(page, DATA, true, true);
+ blen = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, cow_inode),
+ len);
+ index = off;
+ for (i = 0; i < blen; i++, dn.ofs_in_node++, index++) {
+ blkaddr = f2fs_data_blkaddr(&dn);
- set_page_dirty(page);
- if (clear_page_dirty_for_io(page)) {
- inode_dec_dirty_pages(inode);
- f2fs_remove_dirty_inode(inode);
- }
-retry:
- fio.page = page;
- fio.old_blkaddr = NULL_ADDR;
- fio.encrypted_page = NULL;
- fio.need_lock = LOCK_DONE;
- err = f2fs_do_write_data_page(&fio);
- if (err) {
- if (err == -ENOMEM) {
- memalloc_retry_wait(GFP_NOFS);
- goto retry;
- }
- unlock_page(page);
- break;
+ if (!__is_valid_data_blkaddr(blkaddr)) {
+ continue;
+ } else if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
+ DATA_GENERIC_ENHANCE)) {
+ f2fs_put_dnode(&dn);
+ ret = -EFSCORRUPTED;
+ goto out;
}
- /* record old blkaddr for revoking */
- cur->old_addr = fio.old_blkaddr;
- submit_bio = true;
- }
- unlock_page(page);
- list_move_tail(&cur->list, &revoke_list);
- }
- if (submit_bio)
- f2fs_submit_merged_write_cond(sbi, inode, NULL, 0, DATA);
+ new = f2fs_kmem_cache_alloc(revoke_entry_slab, GFP_NOFS,
+ true, NULL);
- if (err) {
- /*
- * try to revoke all committed pages, but still we could fail
- * due to no memory or other reason, if that happened, EAGAIN
- * will be returned, which means in such case, transaction is
- * already not integrity, caller should use journal to do the
- * recovery or rewrite & commit last transaction. For other
- * error number, revoking was done by filesystem itself.
- */
- err = __revoke_inmem_pages(inode, &revoke_list,
- false, true, false);
+ ret = __replace_atomic_write_block(inode, index, blkaddr,
+ &new->old_addr, false);
+ if (ret) {
+ f2fs_put_dnode(&dn);
+ kmem_cache_free(revoke_entry_slab, new);
+ goto out;
+ }
- /* drop all uncommitted pages */
- __revoke_inmem_pages(inode, &fi->inmem_pages,
- true, false, false);
- } else {
- __revoke_inmem_pages(inode, &revoke_list,
- false, false, false);
+ f2fs_update_data_blkaddr(&dn, NULL_ADDR);
+ new->index = index;
+ list_add_tail(&new->list, &revoke_list);
+ }
+ f2fs_put_dnode(&dn);
+next:
+ off += blen;
+ len -= blen;
}
- return err;
+out:
+ __complete_revoke_list(inode, &revoke_list, ret ? true : false);
+
+ return ret;
}
-int f2fs_commit_inmem_pages(struct inode *inode)
+int f2fs_commit_atomic_write(struct inode *inode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct f2fs_inode_info *fi = F2FS_I(inode);
int err;
- f2fs_balance_fs(sbi, true);
+ err = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
+ if (err)
+ return err;
f2fs_down_write(&fi->i_gc_rwsem[WRITE]);
-
f2fs_lock_op(sbi);
- set_inode_flag(inode, FI_ATOMIC_COMMIT);
-
- mutex_lock(&fi->inmem_lock);
- err = __f2fs_commit_inmem_pages(inode);
- mutex_unlock(&fi->inmem_lock);
- clear_inode_flag(inode, FI_ATOMIC_COMMIT);
+ err = __f2fs_commit_atomic_write(inode);
f2fs_unlock_op(sbi);
f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
@@ -520,8 +394,15 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
io_schedule();
finish_wait(&sbi->gc_thread->fggc_wq, &wait);
} else {
+ struct f2fs_gc_control gc_control = {
+ .victim_segno = NULL_SEGNO,
+ .init_gc_type = BG_GC,
+ .no_bg_gc = true,
+ .should_migrate_blocks = false,
+ .err_gc_skipped = false,
+ .nr_free_secs = 1 };
f2fs_down_write(&sbi->gc_lock);
- f2fs_gc(sbi, false, false, false, NULL_SEGNO);
+ f2fs_gc(sbi, &gc_control);
}
}
}
@@ -1664,33 +1545,32 @@ static unsigned int __wait_discard_cmd_range(struct f2fs_sb_info *sbi,
struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
struct list_head *wait_list = (dpolicy->type == DPOLICY_FSTRIM) ?
&(dcc->fstrim_list) : &(dcc->wait_list);
- struct discard_cmd *dc, *tmp;
- bool need_wait;
+ struct discard_cmd *dc = NULL, *iter, *tmp;
unsigned int trimmed = 0;
next:
- need_wait = false;
+ dc = NULL;
mutex_lock(&dcc->cmd_lock);
- list_for_each_entry_safe(dc, tmp, wait_list, list) {
- if (dc->lstart + dc->len <= start || end <= dc->lstart)
+ list_for_each_entry_safe(iter, tmp, wait_list, list) {
+ if (iter->lstart + iter->len <= start || end <= iter->lstart)
continue;
- if (dc->len < dpolicy->granularity)
+ if (iter->len < dpolicy->granularity)
continue;
- if (dc->state == D_DONE && !dc->ref) {
- wait_for_completion_io(&dc->wait);
- if (!dc->error)
- trimmed += dc->len;
- __remove_discard_cmd(sbi, dc);
+ if (iter->state == D_DONE && !iter->ref) {
+ wait_for_completion_io(&iter->wait);
+ if (!iter->error)
+ trimmed += iter->len;
+ __remove_discard_cmd(sbi, iter);
} else {
- dc->ref++;
- need_wait = true;
+ iter->ref++;
+ dc = iter;
break;
}
}
mutex_unlock(&dcc->cmd_lock);
- if (need_wait) {
+ if (dc) {
trimmed += __wait_one_discard_bio(sbi, dc);
goto next;
}
@@ -3286,8 +3166,7 @@ static int __get_segment_type_6(struct f2fs_io_info *fio)
return CURSEG_COLD_DATA;
if (file_is_hot(inode) ||
is_inode_flag_set(inode, FI_HOT_DATA) ||
- f2fs_is_atomic_file(inode) ||
- f2fs_is_volatile_file(inode))
+ f2fs_is_atomic_file(inode))
return CURSEG_HOT_DATA;
return f2fs_rw_hint_to_seg_type(inode->i_write_hint);
} else {
@@ -4084,10 +3963,12 @@ static void adjust_sit_entry_set(struct sit_entry_set *ses,
return;
list_for_each_entry_continue(next, head, set_list)
- if (ses->entry_cnt <= next->entry_cnt)
- break;
+ if (ses->entry_cnt <= next->entry_cnt) {
+ list_move_tail(&ses->set_list, &next->set_list);
+ return;
+ }
- list_move_tail(&ses->set_list, &next->set_list);
+ list_move_tail(&ses->set_list, head);
}
static void add_sit_entry(unsigned int segno, struct list_head *head)
@@ -4455,7 +4336,7 @@ static int build_sit_entries(struct f2fs_sb_info *sbi)
unsigned int i, start, end;
unsigned int readed, start_blk = 0;
int err = 0;
- block_t total_node_blocks = 0;
+ block_t sit_valid_blocks[2] = {0, 0};
do {
readed = f2fs_ra_meta_pages(sbi, start_blk, BIO_MAX_VECS,
@@ -4480,8 +4361,8 @@ static int build_sit_entries(struct f2fs_sb_info *sbi)
if (err)
return err;
seg_info_from_raw_sit(se, &sit);
- if (IS_NODESEG(se->type))
- total_node_blocks += se->valid_blocks;
+
+ sit_valid_blocks[SE_PAGETYPE(se)] += se->valid_blocks;
if (f2fs_block_unit_discard(sbi)) {
/* build discard map only one time */
@@ -4521,15 +4402,15 @@ static int build_sit_entries(struct f2fs_sb_info *sbi)
sit = sit_in_journal(journal, i);
old_valid_blocks = se->valid_blocks;
- if (IS_NODESEG(se->type))
- total_node_blocks -= old_valid_blocks;
+
+ sit_valid_blocks[SE_PAGETYPE(se)] -= old_valid_blocks;
err = check_block_count(sbi, start, &sit);
if (err)
break;
seg_info_from_raw_sit(se, &sit);
- if (IS_NODESEG(se->type))
- total_node_blocks += se->valid_blocks;
+
+ sit_valid_blocks[SE_PAGETYPE(se)] += se->valid_blocks;
if (f2fs_block_unit_discard(sbi)) {
if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
@@ -4551,13 +4432,24 @@ static int build_sit_entries(struct f2fs_sb_info *sbi)
}
up_read(&curseg->journal_rwsem);
- if (!err && total_node_blocks != valid_node_count(sbi)) {
+ if (err)
+ return err;
+
+ if (sit_valid_blocks[NODE] != valid_node_count(sbi)) {
f2fs_err(sbi, "SIT is corrupted node# %u vs %u",
- total_node_blocks, valid_node_count(sbi));
- err = -EFSCORRUPTED;
+ sit_valid_blocks[NODE], valid_node_count(sbi));
+ return -EFSCORRUPTED;
}
- return err;
+ if (sit_valid_blocks[DATA] + sit_valid_blocks[NODE] >
+ valid_user_blocks(sbi)) {
+ f2fs_err(sbi, "SIT is corrupted data# %u %u vs %u",
+ sit_valid_blocks[DATA], sit_valid_blocks[NODE],
+ valid_user_blocks(sbi));
+ return -EFSCORRUPTED;
+ }
+
+ return 0;
}
static void init_free_segmap(struct f2fs_sb_info *sbi)
@@ -4637,6 +4529,13 @@ static int init_victim_secmap(struct f2fs_sb_info *sbi)
dirty_i->victim_secmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL);
if (!dirty_i->victim_secmap)
return -ENOMEM;
+
+ dirty_i->pinned_secmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL);
+ if (!dirty_i->pinned_secmap)
+ return -ENOMEM;
+
+ dirty_i->pinned_secmap_cnt = 0;
+ dirty_i->enable_pin_section = true;
return 0;
}
@@ -5225,6 +5124,7 @@ static void destroy_victim_secmap(struct f2fs_sb_info *sbi)
{
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
+ kvfree(dirty_i->pinned_secmap);
kvfree(dirty_i->victim_secmap);
}
@@ -5335,9 +5235,9 @@ int __init f2fs_create_segment_manager_caches(void)
if (!sit_entry_set_slab)
goto destroy_discard_cmd;
- inmem_entry_slab = f2fs_kmem_cache_create("f2fs_inmem_page_entry",
- sizeof(struct inmem_pages));
- if (!inmem_entry_slab)
+ revoke_entry_slab = f2fs_kmem_cache_create("f2fs_revoke_entry",
+ sizeof(struct revoke_entry));
+ if (!revoke_entry_slab)
goto destroy_sit_entry_set;
return 0;
@@ -5356,5 +5256,5 @@ void f2fs_destroy_segment_manager_caches(void)
kmem_cache_destroy(sit_entry_set_slab);
kmem_cache_destroy(discard_cmd_slab);
kmem_cache_destroy(discard_entry_slab);
- kmem_cache_destroy(inmem_entry_slab);
+ kmem_cache_destroy(revoke_entry_slab);
}
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index 5c94caf0c0a1..3f277dfcb131 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -24,6 +24,7 @@
#define IS_DATASEG(t) ((t) <= CURSEG_COLD_DATA)
#define IS_NODESEG(t) ((t) >= CURSEG_HOT_NODE && (t) <= CURSEG_COLD_NODE)
+#define SE_PAGETYPE(se) ((IS_NODESEG((se)->type) ? NODE : DATA))
static inline void sanity_check_seg_type(struct f2fs_sb_info *sbi,
unsigned short seg_type)
@@ -224,10 +225,10 @@ struct segment_allocation {
#define MAX_SKIP_GC_COUNT 16
-struct inmem_pages {
+struct revoke_entry {
struct list_head list;
- struct page *page;
block_t old_addr; /* for revoking when fail to commit */
+ pgoff_t index;
};
struct sit_info {
@@ -294,6 +295,9 @@ struct dirty_seglist_info {
struct mutex seglist_lock; /* lock for segment bitmaps */
int nr_dirty[NR_DIRTY_TYPE]; /* # of dirty segments */
unsigned long *victim_secmap; /* background GC victims */
+ unsigned long *pinned_secmap; /* pinned victims from foreground GC */
+ unsigned int pinned_secmap_cnt; /* count of victims which has pinned data */
+ bool enable_pin_section; /* enable pinning section */
};
/* victim selection function for cleaning and SSR */
@@ -572,11 +576,10 @@ static inline int reserved_sections(struct f2fs_sb_info *sbi)
return GET_SEC_FROM_SEG(sbi, reserved_segments(sbi));
}
-static inline bool has_curseg_enough_space(struct f2fs_sb_info *sbi)
+static inline bool has_curseg_enough_space(struct f2fs_sb_info *sbi,
+ unsigned int node_blocks, unsigned int dent_blocks)
{
- unsigned int node_blocks = get_pages(sbi, F2FS_DIRTY_NODES) +
- get_pages(sbi, F2FS_DIRTY_DENTS);
- unsigned int dent_blocks = get_pages(sbi, F2FS_DIRTY_DENTS);
+
unsigned int segno, left_blocks;
int i;
@@ -602,19 +605,28 @@ static inline bool has_curseg_enough_space(struct f2fs_sb_info *sbi)
static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi,
int freed, int needed)
{
- int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
- int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
- int imeta_secs = get_blocktype_secs(sbi, F2FS_DIRTY_IMETA);
+ unsigned int total_node_blocks = get_pages(sbi, F2FS_DIRTY_NODES) +
+ get_pages(sbi, F2FS_DIRTY_DENTS) +
+ get_pages(sbi, F2FS_DIRTY_IMETA);
+ unsigned int total_dent_blocks = get_pages(sbi, F2FS_DIRTY_DENTS);
+ unsigned int node_secs = total_node_blocks / BLKS_PER_SEC(sbi);
+ unsigned int dent_secs = total_dent_blocks / BLKS_PER_SEC(sbi);
+ unsigned int node_blocks = total_node_blocks % BLKS_PER_SEC(sbi);
+ unsigned int dent_blocks = total_dent_blocks % BLKS_PER_SEC(sbi);
+ unsigned int free, need_lower, need_upper;
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
return false;
- if (free_sections(sbi) + freed == reserved_sections(sbi) + needed &&
- has_curseg_enough_space(sbi))
+ free = free_sections(sbi) + freed;
+ need_lower = node_secs + dent_secs + reserved_sections(sbi) + needed;
+ need_upper = need_lower + (node_blocks ? 1 : 0) + (dent_blocks ? 1 : 0);
+
+ if (free > need_upper)
return false;
- return (free_sections(sbi) + freed) <=
- (node_secs + 2 * dent_secs + imeta_secs +
- reserved_sections(sbi) + needed);
+ else if (free <= need_lower)
+ return true;
+ return !has_curseg_enough_space(sbi, node_blocks, dent_blocks);
}
static inline bool f2fs_is_checkpoint_ready(struct f2fs_sb_info *sbi)
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index ed3e8b7a8260..37221e94e5ef 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -525,10 +525,11 @@ static int f2fs_set_test_dummy_encryption(struct super_block *sb,
return -EINVAL;
}
f2fs_warn(sbi, "Test dummy encryption mode enabled");
+ return 0;
#else
- f2fs_warn(sbi, "Test dummy encryption mount option ignored");
+ f2fs_warn(sbi, "test_dummy_encryption option not supported");
+ return -EINVAL;
#endif
- return 0;
}
#ifdef CONFIG_F2FS_FS_COMPRESSION
@@ -1339,9 +1340,6 @@ static struct inode *f2fs_alloc_inode(struct super_block *sb)
spin_lock_init(&fi->i_size_lock);
INIT_LIST_HEAD(&fi->dirty_list);
INIT_LIST_HEAD(&fi->gdirty_list);
- INIT_LIST_HEAD(&fi->inmem_ilist);
- INIT_LIST_HEAD(&fi->inmem_pages);
- mutex_init(&fi->inmem_lock);
init_f2fs_rwsem(&fi->i_gc_rwsem[READ]);
init_f2fs_rwsem(&fi->i_gc_rwsem[WRITE]);
init_f2fs_rwsem(&fi->i_xattr_sem);
@@ -1382,9 +1380,8 @@ static int f2fs_drop_inode(struct inode *inode)
atomic_inc(&inode->i_count);
spin_unlock(&inode->i_lock);
- /* some remained atomic pages should discarded */
if (f2fs_is_atomic_file(inode))
- f2fs_drop_inmem_pages(inode);
+ f2fs_abort_atomic_write(inode, true);
/* should remain fi->extent_tree for writepage */
f2fs_destroy_extent_node(inode);
@@ -1707,18 +1704,23 @@ static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
block_t total_count, user_block_count, start_count;
u64 avail_node_count;
+ unsigned int total_valid_node_count;
total_count = le64_to_cpu(sbi->raw_super->block_count);
- user_block_count = sbi->user_block_count;
start_count = le32_to_cpu(sbi->raw_super->segment0_blkaddr);
buf->f_type = F2FS_SUPER_MAGIC;
buf->f_bsize = sbi->blocksize;
buf->f_blocks = total_count - start_count;
+
+ spin_lock(&sbi->stat_lock);
+
+ user_block_count = sbi->user_block_count;
+ total_valid_node_count = valid_node_count(sbi);
+ avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
buf->f_bfree = user_block_count - valid_user_blocks(sbi) -
sbi->current_reserved_blocks;
- spin_lock(&sbi->stat_lock);
if (unlikely(buf->f_bfree <= sbi->unusable_block_count))
buf->f_bfree = 0;
else
@@ -1731,14 +1733,12 @@ static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
else
buf->f_bavail = 0;
- avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
-
if (avail_node_count > user_block_count) {
buf->f_files = user_block_count;
buf->f_ffree = buf->f_bavail;
} else {
buf->f_files = avail_node_count;
- buf->f_ffree = min(avail_node_count - valid_node_count(sbi),
+ buf->f_ffree = min(avail_node_count - total_valid_node_count,
buf->f_bavail);
}
@@ -2055,7 +2055,7 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
{
unsigned int s_flags = sbi->sb->s_flags;
struct cp_control cpc;
- unsigned int gc_mode;
+ unsigned int gc_mode = sbi->gc_mode;
int err = 0;
int ret;
block_t unusable;
@@ -2066,14 +2066,25 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
}
sbi->sb->s_flags |= SB_ACTIVE;
+ /* check if we need more GC first */
+ unusable = f2fs_get_unusable_blocks(sbi);
+ if (!f2fs_disable_cp_again(sbi, unusable))
+ goto skip_gc;
+
f2fs_update_time(sbi, DISABLE_TIME);
- gc_mode = sbi->gc_mode;
sbi->gc_mode = GC_URGENT_HIGH;
while (!f2fs_time_over(sbi, DISABLE_TIME)) {
+ struct f2fs_gc_control gc_control = {
+ .victim_segno = NULL_SEGNO,
+ .init_gc_type = FG_GC,
+ .should_migrate_blocks = false,
+ .err_gc_skipped = true,
+ .nr_free_secs = 1 };
+
f2fs_down_write(&sbi->gc_lock);
- err = f2fs_gc(sbi, true, false, false, NULL_SEGNO);
+ err = f2fs_gc(sbi, &gc_control);
if (err == -ENODATA) {
err = 0;
break;
@@ -2094,6 +2105,7 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
goto restore_flag;
}
+skip_gc:
f2fs_down_write(&sbi->gc_lock);
cpc.reason = CP_PAUSE;
set_sbi_flag(sbi, SBI_CP_DISABLED);
@@ -2684,7 +2696,8 @@ int f2fs_quota_sync(struct super_block *sb, int type)
if (!sb_has_quota_active(sb, cnt))
continue;
- inode_lock(dqopt->files[cnt]);
+ if (!f2fs_sb_has_quota_ino(sbi))
+ inode_lock(dqopt->files[cnt]);
/*
* do_quotactl
@@ -2703,7 +2716,8 @@ int f2fs_quota_sync(struct super_block *sb, int type)
f2fs_up_read(&sbi->quota_sem);
f2fs_unlock_op(sbi);
- inode_unlock(dqopt->files[cnt]);
+ if (!f2fs_sb_has_quota_ino(sbi))
+ inode_unlock(dqopt->files[cnt]);
if (ret)
break;
@@ -3648,22 +3662,29 @@ static int init_blkz_info(struct f2fs_sb_info *sbi, int devi)
struct block_device *bdev = FDEV(devi).bdev;
sector_t nr_sectors = bdev_nr_sectors(bdev);
struct f2fs_report_zones_args rep_zone_arg;
+ u64 zone_sectors;
int ret;
if (!f2fs_sb_has_blkzoned(sbi))
return 0;
+ zone_sectors = bdev_zone_sectors(bdev);
+ if (!is_power_of_2(zone_sectors)) {
+ f2fs_err(sbi, "F2FS does not support non power of 2 zone sizes\n");
+ return -EINVAL;
+ }
+
if (sbi->blocks_per_blkz && sbi->blocks_per_blkz !=
- SECTOR_TO_BLOCK(bdev_zone_sectors(bdev)))
+ SECTOR_TO_BLOCK(zone_sectors))
return -EINVAL;
- sbi->blocks_per_blkz = SECTOR_TO_BLOCK(bdev_zone_sectors(bdev));
+ sbi->blocks_per_blkz = SECTOR_TO_BLOCK(zone_sectors);
if (sbi->log_blocks_per_blkz && sbi->log_blocks_per_blkz !=
__ilog2_u32(sbi->blocks_per_blkz))
return -EINVAL;
sbi->log_blocks_per_blkz = __ilog2_u32(sbi->blocks_per_blkz);
FDEV(devi).nr_blkz = SECTOR_TO_BLOCK(nr_sectors) >>
sbi->log_blocks_per_blkz;
- if (nr_sectors & (bdev_zone_sectors(bdev) - 1))
+ if (nr_sectors & (zone_sectors - 1))
FDEV(devi).nr_blkz++;
FDEV(devi).blkz_seq = f2fs_kvzalloc(sbi,
@@ -4070,30 +4091,9 @@ try_onemore:
set_sbi_flag(sbi, SBI_POR_DOING);
spin_lock_init(&sbi->stat_lock);
- for (i = 0; i < NR_PAGE_TYPE; i++) {
- int n = (i == META) ? 1 : NR_TEMP_TYPE;
- int j;
-
- sbi->write_io[i] =
- f2fs_kmalloc(sbi,
- array_size(n,
- sizeof(struct f2fs_bio_info)),
- GFP_KERNEL);
- if (!sbi->write_io[i]) {
- err = -ENOMEM;
- goto free_bio_info;
- }
-
- for (j = HOT; j < n; j++) {
- init_f2fs_rwsem(&sbi->write_io[i][j].io_rwsem);
- sbi->write_io[i][j].sbi = sbi;
- sbi->write_io[i][j].bio = NULL;
- spin_lock_init(&sbi->write_io[i][j].io_lock);
- INIT_LIST_HEAD(&sbi->write_io[i][j].io_list);
- INIT_LIST_HEAD(&sbi->write_io[i][j].bio_list);
- init_f2fs_rwsem(&sbi->write_io[i][j].bio_list_lock);
- }
- }
+ err = f2fs_init_write_merge_io(sbi);
+ if (err)
+ goto free_bio_info;
init_f2fs_rwsem(&sbi->cp_rwsem);
init_f2fs_rwsem(&sbi->quota_sem);
diff --git a/fs/f2fs/verity.c b/fs/f2fs/verity.c
index 65395ae188aa..7b8f2b41c29b 100644
--- a/fs/f2fs/verity.c
+++ b/fs/f2fs/verity.c
@@ -129,7 +129,7 @@ static int f2fs_begin_enable_verity(struct file *filp)
if (f2fs_verity_in_progress(inode))
return -EBUSY;
- if (f2fs_is_atomic_file(inode) || f2fs_is_volatile_file(inode))
+ if (f2fs_is_atomic_file(inode))
return -EOPNOTSUPP;
/*
diff --git a/fs/fat/fat.h b/fs/fat/fat.h
index 02d4d4234956..a415c02ede39 100644
--- a/fs/fat/fat.h
+++ b/fs/fat/fat.h
@@ -126,6 +126,7 @@ struct msdos_inode_info {
struct hlist_node i_fat_hash; /* hash by i_location */
struct hlist_node i_dir_hash; /* hash by i_logstart */
struct rw_semaphore truncate_lock; /* protect bmap against truncate */
+ struct timespec64 i_crtime; /* File creation (birth) time */
struct inode vfs_inode;
};
@@ -433,8 +434,15 @@ void __fat_fs_error(struct super_block *sb, int report, const char *fmt, ...);
__fat_fs_error(sb, 1, fmt , ## args)
#define fat_fs_error_ratelimit(sb, fmt, args...) \
__fat_fs_error(sb, __ratelimit(&MSDOS_SB(sb)->ratelimit), fmt , ## args)
+
+#define FAT_PRINTK_PREFIX "%sFAT-fs (%s): "
+#define fat_msg(sb, level, fmt, args...) \
+do { \
+ printk_index_subsys_emit(FAT_PRINTK_PREFIX, level, fmt, ##args);\
+ _fat_msg(sb, level, fmt, ##args); \
+} while (0)
__printf(3, 4) __cold
-void fat_msg(struct super_block *sb, const char *level, const char *fmt, ...);
+void _fat_msg(struct super_block *sb, const char *level, const char *fmt, ...);
#define fat_msg_ratelimit(sb, level, fmt, args...) \
do { \
if (__ratelimit(&MSDOS_SB(sb)->ratelimit)) \
@@ -446,6 +454,10 @@ extern void fat_time_fat2unix(struct msdos_sb_info *sbi, struct timespec64 *ts,
__le16 __time, __le16 __date, u8 time_cs);
extern void fat_time_unix2fat(struct msdos_sb_info *sbi, struct timespec64 *ts,
__le16 *time, __le16 *date, u8 *time_cs);
+extern struct timespec64 fat_truncate_atime(const struct msdos_sb_info *sbi,
+ const struct timespec64 *ts);
+extern struct timespec64 fat_truncate_mtime(const struct msdos_sb_info *sbi,
+ const struct timespec64 *ts);
extern int fat_truncate_time(struct inode *inode, struct timespec64 *now,
int flags);
extern int fat_update_time(struct inode *inode, struct timespec64 *now,
diff --git a/fs/fat/fatent.c b/fs/fat/fatent.c
index 978ac6751aeb..1db348f8f887 100644
--- a/fs/fat/fatent.c
+++ b/fs/fat/fatent.c
@@ -94,7 +94,8 @@ static int fat12_ent_bread(struct super_block *sb, struct fat_entry *fatent,
err_brelse:
brelse(bhs[0]);
err:
- fat_msg(sb, KERN_ERR, "FAT read failed (blocknr %llu)", (llu)blocknr);
+ fat_msg_ratelimit(sb, KERN_ERR, "FAT read failed (blocknr %llu)",
+ (llu)blocknr);
return -EIO;
}
@@ -107,8 +108,8 @@ static int fat_ent_bread(struct super_block *sb, struct fat_entry *fatent,
fatent->fat_inode = MSDOS_SB(sb)->fat_inode;
fatent->bhs[0] = sb_bread(sb, blocknr);
if (!fatent->bhs[0]) {
- fat_msg(sb, KERN_ERR, "FAT read failed (blocknr %llu)",
- (llu)blocknr);
+ fat_msg_ratelimit(sb, KERN_ERR, "FAT read failed (blocknr %llu)",
+ (llu)blocknr);
return -EIO;
}
fatent->nr_bhs = 1;
diff --git a/fs/fat/file.c b/fs/fat/file.c
index bf91f977debe..3dae3ed60f3a 100644
--- a/fs/fat/file.c
+++ b/fs/fat/file.c
@@ -398,13 +398,21 @@ int fat_getattr(struct user_namespace *mnt_userns, const struct path *path,
struct kstat *stat, u32 request_mask, unsigned int flags)
{
struct inode *inode = d_inode(path->dentry);
+ struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
+
generic_fillattr(mnt_userns, inode, stat);
- stat->blksize = MSDOS_SB(inode->i_sb)->cluster_size;
+ stat->blksize = sbi->cluster_size;
- if (MSDOS_SB(inode->i_sb)->options.nfs == FAT_NFS_NOSTALE_RO) {
+ if (sbi->options.nfs == FAT_NFS_NOSTALE_RO) {
/* Use i_pos for ino. This is used as fileid of nfs. */
- stat->ino = fat_i_pos_read(MSDOS_SB(inode->i_sb), inode);
+ stat->ino = fat_i_pos_read(sbi, inode);
}
+
+ if (sbi->options.isvfat && request_mask & STATX_BTIME) {
+ stat->result_mask |= STATX_BTIME;
+ stat->btime = MSDOS_I(inode)->i_crtime;
+ }
+
return 0;
}
EXPORT_SYMBOL_GPL(fat_getattr);
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 69b4d4ae64d7..a38238d75c08 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -567,12 +567,13 @@ int fat_fill_inode(struct inode *inode, struct msdos_dir_entry *de)
& ~((loff_t)sbi->cluster_size - 1)) >> 9;
fat_time_fat2unix(sbi, &inode->i_mtime, de->time, de->date, 0);
+ inode->i_ctime = inode->i_mtime;
if (sbi->options.isvfat) {
- fat_time_fat2unix(sbi, &inode->i_ctime, de->ctime,
- de->cdate, de->ctime_cs);
fat_time_fat2unix(sbi, &inode->i_atime, 0, de->adate, 0);
+ fat_time_fat2unix(sbi, &MSDOS_I(inode)->i_crtime, de->ctime,
+ de->cdate, de->ctime_cs);
} else
- fat_truncate_time(inode, &inode->i_mtime, S_ATIME|S_CTIME);
+ inode->i_atime = fat_truncate_atime(sbi, &inode->i_mtime);
return 0;
}
@@ -757,6 +758,8 @@ static struct inode *fat_alloc_inode(struct super_block *sb)
ei->i_logstart = 0;
ei->i_attrs = 0;
ei->i_pos = 0;
+ ei->i_crtime.tv_sec = 0;
+ ei->i_crtime.tv_nsec = 0;
return &ei->vfs_inode;
}
@@ -888,10 +891,10 @@ retry:
&raw_entry->date, NULL);
if (sbi->options.isvfat) {
__le16 atime;
- fat_time_unix2fat(sbi, &inode->i_ctime, &raw_entry->ctime,
- &raw_entry->cdate, &raw_entry->ctime_cs);
fat_time_unix2fat(sbi, &inode->i_atime, &atime,
&raw_entry->adate, NULL);
+ fat_time_unix2fat(sbi, &MSDOS_I(inode)->i_crtime, &raw_entry->ctime,
+ &raw_entry->cdate, &raw_entry->ctime_cs);
}
spin_unlock(&sbi->inode_hash_lock);
mark_buffer_dirty(bh);
@@ -1885,10 +1888,8 @@ out_invalid:
fat_msg(sb, KERN_INFO, "Can't find a valid FAT filesystem");
out_fail:
- if (fsinfo_inode)
- iput(fsinfo_inode);
- if (fat_inode)
- iput(fat_inode);
+ iput(fsinfo_inode);
+ iput(fat_inode);
unload_nls(sbi->nls_io);
unload_nls(sbi->nls_disk);
fat_reset_iocharset(&sbi->options);
diff --git a/fs/fat/misc.c b/fs/fat/misc.c
index 91ca3c304211..7e5d6ae305f2 100644
--- a/fs/fat/misc.c
+++ b/fs/fat/misc.c
@@ -42,10 +42,16 @@ void __fat_fs_error(struct super_block *sb, int report, const char *fmt, ...)
EXPORT_SYMBOL_GPL(__fat_fs_error);
/**
- * fat_msg() - print preformated FAT specific messages. Every thing what is
- * not fat_fs_error() should be fat_msg().
+ * _fat_msg() - Print a preformatted FAT message based on a superblock.
+ * @sb: A pointer to a &struct super_block
+ * @level: A Kernel printk level constant
+ * @fmt: The printf-style format string to print.
+ *
+ * Everything that is not fat_fs_error() should be fat_msg().
+ *
+ * fat_msg() wraps _fat_msg() for printk indexing.
*/
-void fat_msg(struct super_block *sb, const char *level, const char *fmt, ...)
+void _fat_msg(struct super_block *sb, const char *level, const char *fmt, ...)
{
struct va_format vaf;
va_list args;
@@ -53,7 +59,7 @@ void fat_msg(struct super_block *sb, const char *level, const char *fmt, ...)
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
- printk("%sFAT-fs (%s): %pV\n", level, sb->s_id, &vaf);
+ _printk(FAT_PRINTK_PREFIX "%pV\n", level, sb->s_id, &vaf);
va_end(args);
}
@@ -187,7 +193,7 @@ static long days_in_year[] = {
0, 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 0, 0, 0,
};
-static inline int fat_tz_offset(struct msdos_sb_info *sbi)
+static inline int fat_tz_offset(const struct msdos_sb_info *sbi)
{
return (sbi->options.tz_set ?
-sbi->options.time_offset :
@@ -275,23 +281,35 @@ static inline struct timespec64 fat_timespec64_trunc_2secs(struct timespec64 ts)
return (struct timespec64){ ts.tv_sec & ~1ULL, 0 };
}
-static inline struct timespec64 fat_timespec64_trunc_10ms(struct timespec64 ts)
+/*
+ * truncate atime to 24 hour granularity (00:00:00 in local timezone)
+ */
+struct timespec64 fat_truncate_atime(const struct msdos_sb_info *sbi,
+ const struct timespec64 *ts)
+{
+ /* to localtime */
+ time64_t seconds = ts->tv_sec - fat_tz_offset(sbi);
+ s32 remainder;
+
+ div_s64_rem(seconds, SECS_PER_DAY, &remainder);
+ /* to day boundary, and back to unix time */
+ seconds = seconds + fat_tz_offset(sbi) - remainder;
+
+ return (struct timespec64){ seconds, 0 };
+}
+
+/*
+ * truncate mtime to 2 second granularity
+ */
+struct timespec64 fat_truncate_mtime(const struct msdos_sb_info *sbi,
+ const struct timespec64 *ts)
{
- if (ts.tv_nsec)
- ts.tv_nsec -= ts.tv_nsec % 10000000UL;
- return ts;
+ return fat_timespec64_trunc_2secs(*ts);
}
/*
* truncate the various times with appropriate granularity:
- * root inode:
- * all times always 0
- * all other inodes:
- * mtime - 2 seconds
- * ctime
- * msdos - 2 seconds
- * vfat - 10 milliseconds
- * atime - 24 hours (00:00:00 in local timezone)
+ * all times in root node are always 0
*/
int fat_truncate_time(struct inode *inode, struct timespec64 *now, int flags)
{
@@ -306,25 +324,15 @@ int fat_truncate_time(struct inode *inode, struct timespec64 *now, int flags)
ts = current_time(inode);
}
- if (flags & S_ATIME) {
- /* to localtime */
- time64_t seconds = now->tv_sec - fat_tz_offset(sbi);
- s32 remainder;
-
- div_s64_rem(seconds, SECS_PER_DAY, &remainder);
- /* to day boundary, and back to unix time */
- seconds = seconds + fat_tz_offset(sbi) - remainder;
-
- inode->i_atime = (struct timespec64){ seconds, 0 };
- }
- if (flags & S_CTIME) {
- if (sbi->options.isvfat)
- inode->i_ctime = fat_timespec64_trunc_10ms(*now);
- else
- inode->i_ctime = fat_timespec64_trunc_2secs(*now);
- }
+ if (flags & S_ATIME)
+ inode->i_atime = fat_truncate_atime(sbi, now);
+ /*
+ * ctime and mtime share the same on-disk field, and should be
+ * identical in memory. all mtime updates will be applied to ctime,
+ * but ctime updates are ignored.
+ */
if (flags & S_MTIME)
- inode->i_mtime = fat_timespec64_trunc_2secs(*now);
+ inode->i_mtime = inode->i_ctime = fat_truncate_mtime(sbi, now);
return 0;
}
diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c
index 5369d82e0bfb..c573314806cf 100644
--- a/fs/fat/namei_vfat.c
+++ b/fs/fat/namei_vfat.c
@@ -780,8 +780,6 @@ static int vfat_create(struct user_namespace *mnt_userns, struct inode *dir,
goto out;
}
inode_inc_iversion(inode);
- fat_truncate_time(inode, &ts, S_ATIME|S_CTIME|S_MTIME);
- /* timestamp is already written, so mark_inode_dirty() is unneeded. */
d_instantiate(dentry, inode);
out:
@@ -878,8 +876,6 @@ static int vfat_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
}
inode_inc_iversion(inode);
set_nlink(inode, 2);
- fat_truncate_time(inode, &ts, S_ATIME|S_CTIME|S_MTIME);
- /* timestamp is already written, so mark_inode_dirty() is unneeded. */
d_instantiate(dentry, inode);
diff --git a/fs/file.c b/fs/file.c
index ee9317346702..dd6692048f4f 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -630,32 +630,23 @@ EXPORT_SYMBOL(fd_install);
* @files: file struct to retrieve file from
* @fd: file descriptor to retrieve file for
*
- * If this functions returns an EINVAL error pointer the fd was beyond the
- * current maximum number of file descriptors for that fdtable.
+ * Context: files_lock must be held.
*
- * Returns: The file associated with @fd, on error returns an error pointer.
+ * Returns: The file associated with @fd (NULL if @fd is not open)
*/
static struct file *pick_file(struct files_struct *files, unsigned fd)
{
+ struct fdtable *fdt = files_fdtable(files);
struct file *file;
- struct fdtable *fdt;
- spin_lock(&files->file_lock);
- fdt = files_fdtable(files);
- if (fd >= fdt->max_fds) {
- file = ERR_PTR(-EINVAL);
- goto out_unlock;
- }
+ if (fd >= fdt->max_fds)
+ return NULL;
+
file = fdt->fd[fd];
- if (!file) {
- file = ERR_PTR(-EBADF);
- goto out_unlock;
+ if (file) {
+ rcu_assign_pointer(fdt->fd[fd], NULL);
+ __put_unused_fd(files, fd);
}
- rcu_assign_pointer(fdt->fd[fd], NULL);
- __put_unused_fd(files, fd);
-
-out_unlock:
- spin_unlock(&files->file_lock);
return file;
}
@@ -664,8 +655,10 @@ int close_fd(unsigned fd)
struct files_struct *files = current->files;
struct file *file;
+ spin_lock(&files->file_lock);
file = pick_file(files, fd);
- if (IS_ERR(file))
+ spin_unlock(&files->file_lock);
+ if (!file)
return -EBADF;
return filp_close(file, files);
@@ -702,20 +695,25 @@ static inline void __range_cloexec(struct files_struct *cur_fds,
static inline void __range_close(struct files_struct *cur_fds, unsigned int fd,
unsigned int max_fd)
{
+ unsigned n;
+
+ rcu_read_lock();
+ n = last_fd(files_fdtable(cur_fds));
+ rcu_read_unlock();
+ max_fd = min(max_fd, n);
+
while (fd <= max_fd) {
struct file *file;
+ spin_lock(&cur_fds->file_lock);
file = pick_file(cur_fds, fd++);
- if (!IS_ERR(file)) {
+ spin_unlock(&cur_fds->file_lock);
+
+ if (file) {
/* found a valid file to close */
filp_close(file, cur_fds);
cond_resched();
- continue;
}
-
- /* beyond the last fd in that table */
- if (PTR_ERR(file) == -EINVAL)
- return;
}
}
@@ -795,26 +793,9 @@ int __close_range(unsigned fd, unsigned max_fd, unsigned int flags)
* See close_fd_get_file() below, this variant assumes current->files->file_lock
* is held.
*/
-int __close_fd_get_file(unsigned int fd, struct file **res)
+struct file *__close_fd_get_file(unsigned int fd)
{
- struct files_struct *files = current->files;
- struct file *file;
- struct fdtable *fdt;
-
- fdt = files_fdtable(files);
- if (fd >= fdt->max_fds)
- goto out_err;
- file = fdt->fd[fd];
- if (!file)
- goto out_err;
- rcu_assign_pointer(fdt->fd[fd], NULL);
- __put_unused_fd(files, fd);
- get_file(file);
- *res = file;
- return 0;
-out_err:
- *res = NULL;
- return -ENOENT;
+ return pick_file(current->files, fd);
}
/*
@@ -822,16 +803,16 @@ out_err:
* The caller must ensure that filp_close() called on the file, and then
* an fput().
*/
-int close_fd_get_file(unsigned int fd, struct file **res)
+struct file *close_fd_get_file(unsigned int fd)
{
struct files_struct *files = current->files;
- int ret;
+ struct file *file;
spin_lock(&files->file_lock);
- ret = __close_fd_get_file(fd, res);
+ file = pick_file(files, fd);
spin_unlock(&files->file_lock);
- return ret;
+ return file;
}
void do_close_on_exec(struct files_struct *files)
@@ -871,7 +852,7 @@ void do_close_on_exec(struct files_struct *files)
}
static inline struct file *__fget_files_rcu(struct files_struct *files,
- unsigned int fd, fmode_t mask, unsigned int refs)
+ unsigned int fd, fmode_t mask)
{
for (;;) {
struct file *file;
@@ -897,10 +878,9 @@ static inline struct file *__fget_files_rcu(struct files_struct *files,
* Such a race can take two forms:
*
* (a) the file ref already went down to zero,
- * and get_file_rcu_many() fails. Just try
- * again:
+ * and get_file_rcu() fails. Just try again:
*/
- if (unlikely(!get_file_rcu_many(file, refs)))
+ if (unlikely(!get_file_rcu(file)))
continue;
/*
@@ -909,11 +889,11 @@ static inline struct file *__fget_files_rcu(struct files_struct *files,
* pointer having changed, because it always goes
* hand-in-hand with 'fdt'.
*
- * If so, we need to put our refs and try again.
+ * If so, we need to put our ref and try again.
*/
if (unlikely(rcu_dereference_raw(files->fdt) != fdt) ||
unlikely(rcu_dereference_raw(*fdentry) != file)) {
- fput_many(file, refs);
+ fput(file);
continue;
}
@@ -926,37 +906,31 @@ static inline struct file *__fget_files_rcu(struct files_struct *files,
}
static struct file *__fget_files(struct files_struct *files, unsigned int fd,
- fmode_t mask, unsigned int refs)
+ fmode_t mask)
{
struct file *file;
rcu_read_lock();
- file = __fget_files_rcu(files, fd, mask, refs);
+ file = __fget_files_rcu(files, fd, mask);
rcu_read_unlock();
return file;
}
-static inline struct file *__fget(unsigned int fd, fmode_t mask,
- unsigned int refs)
-{
- return __fget_files(current->files, fd, mask, refs);
-}
-
-struct file *fget_many(unsigned int fd, unsigned int refs)
+static inline struct file *__fget(unsigned int fd, fmode_t mask)
{
- return __fget(fd, FMODE_PATH, refs);
+ return __fget_files(current->files, fd, mask);
}
struct file *fget(unsigned int fd)
{
- return __fget(fd, FMODE_PATH, 1);
+ return __fget(fd, FMODE_PATH);
}
EXPORT_SYMBOL(fget);
struct file *fget_raw(unsigned int fd)
{
- return __fget(fd, 0, 1);
+ return __fget(fd, 0);
}
EXPORT_SYMBOL(fget_raw);
@@ -966,7 +940,7 @@ struct file *fget_task(struct task_struct *task, unsigned int fd)
task_lock(task);
if (task->files)
- file = __fget_files(task->files, fd, 0, 1);
+ file = __fget_files(task->files, fd, 0);
task_unlock(task);
return file;
@@ -1035,7 +1009,7 @@ static unsigned long __fget_light(unsigned int fd, fmode_t mask)
return 0;
return (unsigned long)file;
} else {
- file = __fget(fd, mask, 1);
+ file = __fget(fd, mask);
if (!file)
return 0;
return FDPUT_FPUT | (unsigned long)file;
diff --git a/fs/file_table.c b/fs/file_table.c
index ada8fe814db9..5424e3a8df5f 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -368,9 +368,9 @@ EXPORT_SYMBOL_GPL(flush_delayed_fput);
static DECLARE_DELAYED_WORK(delayed_fput_work, delayed_fput);
-void fput_many(struct file *file, unsigned int refs)
+void fput(struct file *file)
{
- if (atomic_long_sub_and_test(refs, &file->f_count)) {
+ if (atomic_long_dec_and_test(&file->f_count)) {
struct task_struct *task = current;
if (likely(!in_interrupt() && !(task->flags & PF_KTHREAD))) {
@@ -389,11 +389,6 @@ void fput_many(struct file *file, unsigned int refs)
}
}
-void fput(struct file *file)
-{
- fput_many(file, 1);
-}
-
/*
* synchronous analog of fput(); for kernel threads that might be needed
* in some umount() (and thus can't use flush_delayed_fput() without
diff --git a/fs/freevxfs/vxfs.h b/fs/freevxfs/vxfs.h
index a41ea0ba6943..bffd156d6434 100644
--- a/fs/freevxfs/vxfs.h
+++ b/fs/freevxfs/vxfs.h
@@ -1,32 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2000-2001 Christoph Hellwig.
* Copyright (c) 2016 Krzysztof Blaszkowski
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL").
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
- * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
*/
#ifndef _VXFS_SUPER_H_
#define _VXFS_SUPER_H_
diff --git a/fs/freevxfs/vxfs_bmap.c b/fs/freevxfs/vxfs_bmap.c
index 1fd41cf98b9f..de2a5bccb930 100644
--- a/fs/freevxfs/vxfs_bmap.c
+++ b/fs/freevxfs/vxfs_bmap.c
@@ -1,30 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2000-2001 Christoph Hellwig.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL").
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
- * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
*/
/*
diff --git a/fs/freevxfs/vxfs_dir.h b/fs/freevxfs/vxfs_dir.h
index acc5477b3f23..fbcd603365ad 100644
--- a/fs/freevxfs/vxfs_dir.h
+++ b/fs/freevxfs/vxfs_dir.h
@@ -1,31 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2000-2001 Christoph Hellwig.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL").
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
- * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
*/
#ifndef _VXFS_DIR_H_
#define _VXFS_DIR_H_
diff --git a/fs/freevxfs/vxfs_extern.h b/fs/freevxfs/vxfs_extern.h
index f5c428e21024..3a2180c5e208 100644
--- a/fs/freevxfs/vxfs_extern.h
+++ b/fs/freevxfs/vxfs_extern.h
@@ -1,31 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2000-2001 Christoph Hellwig.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL").
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
- * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
*/
#ifndef _VXFS_EXTERN_H_
#define _VXFS_EXTERN_H_
diff --git a/fs/freevxfs/vxfs_fshead.c b/fs/freevxfs/vxfs_fshead.c
index a4610a77649e..c1174a3f8990 100644
--- a/fs/freevxfs/vxfs_fshead.c
+++ b/fs/freevxfs/vxfs_fshead.c
@@ -1,31 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2000-2001 Christoph Hellwig.
* Copyright (c) 2016 Krzysztof Blaszkowski
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL").
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
- * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
*/
/*
diff --git a/fs/freevxfs/vxfs_fshead.h b/fs/freevxfs/vxfs_fshead.h
index e026f0c49159..dfd2147599c4 100644
--- a/fs/freevxfs/vxfs_fshead.h
+++ b/fs/freevxfs/vxfs_fshead.h
@@ -1,32 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2000-2001 Christoph Hellwig.
* Copyright (c) 2016 Krzysztof Blaszkowski
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL").
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
- * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
*/
#ifndef _VXFS_FSHEAD_H_
#define _VXFS_FSHEAD_H_
diff --git a/fs/freevxfs/vxfs_immed.c b/fs/freevxfs/vxfs_immed.c
index a37431e443d3..c2ef9f0debbd 100644
--- a/fs/freevxfs/vxfs_immed.c
+++ b/fs/freevxfs/vxfs_immed.c
@@ -1,30 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2000-2001 Christoph Hellwig.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL").
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
- * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
*/
/*
diff --git a/fs/freevxfs/vxfs_inode.c b/fs/freevxfs/vxfs_inode.c
index 1f41b25ef38b..ceb6a12649ba 100644
--- a/fs/freevxfs/vxfs_inode.c
+++ b/fs/freevxfs/vxfs_inode.c
@@ -1,31 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2000-2001 Christoph Hellwig.
* Copyright (c) 2016 Krzysztof Blaszkowski
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL").
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
- * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
*/
/*
diff --git a/fs/freevxfs/vxfs_inode.h b/fs/freevxfs/vxfs_inode.h
index f012abed125d..1e9e138d2b33 100644
--- a/fs/freevxfs/vxfs_inode.h
+++ b/fs/freevxfs/vxfs_inode.h
@@ -1,32 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2000-2001 Christoph Hellwig.
* Copyright (c) 2016 Krzysztof Blaszkowski
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL").
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
- * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
*/
#ifndef _VXFS_INODE_H_
#define _VXFS_INODE_H_
diff --git a/fs/freevxfs/vxfs_lookup.c b/fs/freevxfs/vxfs_lookup.c
index a51425634f65..f04ba2ed1e1a 100644
--- a/fs/freevxfs/vxfs_lookup.c
+++ b/fs/freevxfs/vxfs_lookup.c
@@ -1,31 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2000-2001 Christoph Hellwig.
* Copyright (c) 2016 Krzysztof Blaszkowski
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL").
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
- * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
*/
/*
diff --git a/fs/freevxfs/vxfs_olt.c b/fs/freevxfs/vxfs_olt.c
index 813da6685151..23f35187c289 100644
--- a/fs/freevxfs/vxfs_olt.c
+++ b/fs/freevxfs/vxfs_olt.c
@@ -1,30 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2000-2001 Christoph Hellwig.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL").
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
- * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
*/
/*
diff --git a/fs/freevxfs/vxfs_olt.h b/fs/freevxfs/vxfs_olt.h
index 0c0b0c9fa557..53afba08d617 100644
--- a/fs/freevxfs/vxfs_olt.h
+++ b/fs/freevxfs/vxfs_olt.h
@@ -1,31 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2000-2001 Christoph Hellwig.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL").
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
- * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
*/
#ifndef _VXFS_OLT_H_
#define _VXFS_OLT_H_
diff --git a/fs/freevxfs/vxfs_subr.c b/fs/freevxfs/vxfs_subr.c
index 6143ebab940d..0e633d2bfc7d 100644
--- a/fs/freevxfs/vxfs_subr.c
+++ b/fs/freevxfs/vxfs_subr.c
@@ -1,30 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2000-2001 Christoph Hellwig.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL").
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
- * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
*/
/*
diff --git a/fs/freevxfs/vxfs_super.c b/fs/freevxfs/vxfs_super.c
index 22eed5a73ac2..c3b82f716f9a 100644
--- a/fs/freevxfs/vxfs_super.c
+++ b/fs/freevxfs/vxfs_super.c
@@ -1,31 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2000-2001 Christoph Hellwig.
* Copyright (c) 2016 Krzysztof Blaszkowski
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL").
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
- * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
*/
/*
diff --git a/fs/fsopen.c b/fs/fsopen.c
index 27a890aa493a..fc9d2d9fd234 100644
--- a/fs/fsopen.c
+++ b/fs/fsopen.c
@@ -119,7 +119,7 @@ SYSCALL_DEFINE2(fsopen, const char __user *, _fs_name, unsigned int, flags)
const char *fs_name;
int ret;
- if (!ns_capable(current->nsproxy->mnt_ns->user_ns, CAP_SYS_ADMIN))
+ if (!may_mount())
return -EPERM;
if (flags & ~FSOPEN_CLOEXEC)
@@ -162,7 +162,7 @@ SYSCALL_DEFINE3(fspick, int, dfd, const char __user *, path, unsigned int, flags
unsigned int lookup_flags;
int ret;
- if (!ns_capable(current->nsproxy->mnt_ns->user_ns, CAP_SYS_ADMIN))
+ if (!may_mount())
return -EPERM;
if ((flags & ~(FSPICK_CLOEXEC |
diff --git a/fs/fuse/dax.c b/fs/fuse/dax.c
index d7d3a7f06862..10eb50cbf398 100644
--- a/fs/fuse/dax.c
+++ b/fs/fuse/dax.c
@@ -1241,8 +1241,8 @@ static int fuse_dax_mem_range_init(struct fuse_conn_dax *fcd)
INIT_DELAYED_WORK(&fcd->free_work, fuse_dax_free_mem_worker);
id = dax_read_lock();
- nr_pages = dax_direct_access(fcd->dev, 0, PHYS_PFN(dax_size), NULL,
- NULL);
+ nr_pages = dax_direct_access(fcd->dev, 0, PHYS_PFN(dax_size),
+ DAX_ACCESS, NULL, NULL);
dax_read_unlock(id);
if (nr_pages < 0) {
pr_debug("dax_direct_access() returned %ld\n", nr_pages);
diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c
index 86b7dbb6a0d4..8db53fa67359 100644
--- a/fs/fuse/virtio_fs.c
+++ b/fs/fuse/virtio_fs.c
@@ -752,7 +752,8 @@ static void virtio_fs_cleanup_vqs(struct virtio_device *vdev,
* offset.
*/
static long virtio_fs_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
- long nr_pages, void **kaddr, pfn_t *pfn)
+ long nr_pages, enum dax_access_mode mode,
+ void **kaddr, pfn_t *pfn)
{
struct virtio_fs *fs = dax_get_private(dax_dev);
phys_addr_t offset = PFN_PHYS(pgoff);
@@ -772,7 +773,8 @@ static int virtio_fs_zero_page_range(struct dax_device *dax_dev,
long rc;
void *kaddr;
- rc = dax_direct_access(dax_dev, pgoff, nr_pages, &kaddr, NULL);
+ rc = dax_direct_access(dax_dev, pgoff, nr_pages, DAX_ACCESS, &kaddr,
+ NULL);
if (rc < 0)
return rc;
memset(kaddr, 0, nr_pages << PAGE_SHIFT);
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 22d2b94339ed..62408047e8d7 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -195,7 +195,6 @@ out:
* Called under mmap_write_lock(mm).
*/
-#ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
static unsigned long
hugetlb_get_unmapped_area_bottomup(struct file *file, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags)
@@ -206,7 +205,7 @@ hugetlb_get_unmapped_area_bottomup(struct file *file, unsigned long addr,
info.flags = 0;
info.length = len;
info.low_limit = current->mm->mmap_base;
- info.high_limit = arch_get_mmap_end(addr);
+ info.high_limit = arch_get_mmap_end(addr, len, flags);
info.align_mask = PAGE_MASK & ~huge_page_mask(h);
info.align_offset = 0;
return vm_unmapped_area(&info);
@@ -237,21 +236,22 @@ hugetlb_get_unmapped_area_topdown(struct file *file, unsigned long addr,
VM_BUG_ON(addr != -ENOMEM);
info.flags = 0;
info.low_limit = current->mm->mmap_base;
- info.high_limit = arch_get_mmap_end(addr);
+ info.high_limit = arch_get_mmap_end(addr, len, flags);
addr = vm_unmapped_area(&info);
}
return addr;
}
-static unsigned long
-hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
- unsigned long len, unsigned long pgoff, unsigned long flags)
+unsigned long
+generic_hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
struct hstate *h = hstate_file(file);
- const unsigned long mmap_end = arch_get_mmap_end(addr);
+ const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags);
if (len & ~huge_page_mask(h))
return -EINVAL;
@@ -283,6 +283,15 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
return hugetlb_get_unmapped_area_bottomup(file, addr, len,
pgoff, flags);
}
+
+#ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
+static unsigned long
+hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags)
+{
+ return generic_hugetlb_get_unmapped_area(file, addr, len, pgoff, flags);
+}
#endif
static size_t
diff --git a/fs/internal.h b/fs/internal.h
index 9a6c233ee7f1..87e96b9024ce 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -84,6 +84,7 @@ extern int __mnt_want_write_file(struct file *);
extern void __mnt_drop_write_file(struct file *);
extern void dissolve_on_fput(struct vfsmount *);
+extern bool may_mount(void);
int path_mount(const char *dev_name, struct path *path,
const char *type_page, unsigned long flags, void *data_page);
@@ -125,7 +126,7 @@ extern struct file *do_file_open_root(const struct path *,
const char *, const struct open_flags *);
extern struct open_how build_open_how(int flags, umode_t mode);
extern int build_open_flags(const struct open_how *how, struct open_flags *op);
-extern int __close_fd_get_file(unsigned int fd, struct file **res);
+extern struct file *__close_fd_get_file(unsigned int fd);
long do_sys_ftruncate(unsigned int fd, loff_t length, int small);
int chmod_common(const struct path *path, umode_t mode);
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 9f1c682d7caf..86f9df56526b 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -112,7 +112,8 @@
IOSQE_IO_DRAIN | IOSQE_CQE_SKIP_SUCCESS)
#define IO_REQ_CLEAN_FLAGS (REQ_F_BUFFER_SELECTED | REQ_F_NEED_CLEANUP | \
- REQ_F_POLLED | REQ_F_CREDS | REQ_F_ASYNC_DATA)
+ REQ_F_POLLED | REQ_F_INFLIGHT | REQ_F_CREDS | \
+ REQ_F_ASYNC_DATA)
#define IO_REQ_CLEAN_SLOW_FLAGS (REQ_F_REFCOUNT | REQ_F_LINK | REQ_F_HARDLINK |\
IO_REQ_CLEAN_FLAGS)
@@ -540,6 +541,7 @@ struct io_uring_task {
const struct io_ring_ctx *last;
struct io_wq *io_wq;
struct percpu_counter inflight;
+ atomic_t inflight_tracked;
atomic_t in_idle;
spinlock_t task_lock;
@@ -574,6 +576,7 @@ struct io_close {
struct file *file;
int fd;
u32 file_slot;
+ u32 flags;
};
struct io_timeout_data {
@@ -1355,8 +1358,6 @@ static void io_clean_op(struct io_kiocb *req);
static inline struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
unsigned issue_flags);
static struct file *io_file_get_normal(struct io_kiocb *req, int fd);
-static void io_drop_inflight_file(struct io_kiocb *req);
-static bool io_assign_file(struct io_kiocb *req, unsigned int issue_flags);
static void io_queue_sqe(struct io_kiocb *req);
static void io_rsrc_put_work(struct work_struct *work);
@@ -1366,7 +1367,9 @@ static int io_req_prep_async(struct io_kiocb *req);
static int io_install_fixed_file(struct io_kiocb *req, struct file *file,
unsigned int issue_flags, u32 slot_index);
-static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags);
+static int __io_close_fixed(struct io_kiocb *req, unsigned int issue_flags,
+ unsigned int offset);
+static inline int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags);
static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer);
static void io_eventfd_signal(struct io_ring_ctx *ctx);
@@ -1757,9 +1760,29 @@ static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
bool cancel_all)
__must_hold(&req->ctx->timeout_lock)
{
+ struct io_kiocb *req;
+
if (task && head->task != task)
return false;
- return cancel_all;
+ if (cancel_all)
+ return true;
+
+ io_for_each_link(req, head) {
+ if (req->flags & REQ_F_INFLIGHT)
+ return true;
+ }
+ return false;
+}
+
+static bool io_match_linked(struct io_kiocb *head)
+{
+ struct io_kiocb *req;
+
+ io_for_each_link(req, head) {
+ if (req->flags & REQ_F_INFLIGHT)
+ return true;
+ }
+ return false;
}
/*
@@ -1769,9 +1792,24 @@ static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
static bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
bool cancel_all)
{
+ bool matched;
+
if (task && head->task != task)
return false;
- return cancel_all;
+ if (cancel_all)
+ return true;
+
+ if (head->flags & REQ_F_LINK_TIMEOUT) {
+ struct io_ring_ctx *ctx = head->ctx;
+
+ /* protect against races with linked timeouts */
+ spin_lock_irq(&ctx->timeout_lock);
+ matched = io_match_linked(head);
+ spin_unlock_irq(&ctx->timeout_lock);
+ } else {
+ matched = io_match_linked(head);
+ }
+ return matched;
}
static inline bool req_has_async_data(struct io_kiocb *req)
@@ -1927,6 +1965,14 @@ static inline bool io_req_ffs_set(struct io_kiocb *req)
return req->flags & REQ_F_FIXED_FILE;
}
+static inline void io_req_track_inflight(struct io_kiocb *req)
+{
+ if (!(req->flags & REQ_F_INFLIGHT)) {
+ req->flags |= REQ_F_INFLIGHT;
+ atomic_inc(&current->io_uring->inflight_tracked);
+ }
+}
+
static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req)
{
if (WARN_ON_ONCE(!req->link))
@@ -2988,8 +3034,6 @@ static void __io_req_task_work_add(struct io_kiocb *req,
unsigned long flags;
bool running;
- io_drop_inflight_file(req);
-
spin_lock_irqsave(&tctx->task_lock, flags);
wq_list_add_tail(&req->io_task_work.node, list);
running = tctx->task_running;
@@ -4176,6 +4220,16 @@ static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
return 0;
}
+static int io_readv_prep_async(struct io_kiocb *req)
+{
+ return io_rw_prep_async(req, READ);
+}
+
+static int io_writev_prep_async(struct io_kiocb *req)
+{
+ return io_rw_prep_async(req, WRITE);
+}
+
/*
* This is our waitqueue callback handler, registered through __folio_lock_async()
* when we initially tried to do the IO with the iocb armed our waitqueue.
@@ -5103,42 +5157,6 @@ static int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
return 0;
}
-static int io_shutdown_prep(struct io_kiocb *req,
- const struct io_uring_sqe *sqe)
-{
-#if defined(CONFIG_NET)
- if (unlikely(sqe->off || sqe->addr || sqe->rw_flags ||
- sqe->buf_index || sqe->splice_fd_in))
- return -EINVAL;
-
- req->shutdown.how = READ_ONCE(sqe->len);
- return 0;
-#else
- return -EOPNOTSUPP;
-#endif
-}
-
-static int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
-{
-#if defined(CONFIG_NET)
- struct socket *sock;
- int ret;
-
- if (issue_flags & IO_URING_F_NONBLOCK)
- return -EAGAIN;
-
- sock = sock_from_file(req->file);
- if (unlikely(!sock))
- return -ENOTSOCK;
-
- ret = __sys_shutdown_sock(sock, req->shutdown.how);
- io_req_complete(req, ret);
- return 0;
-#else
- return -EOPNOTSUPP;
-#endif
-}
-
static int __io_splice_prep(struct io_kiocb *req,
const struct io_uring_sqe *sqe)
{
@@ -5445,15 +5463,11 @@ static int io_file_bitmap_get(struct io_ring_ctx *ctx)
unsigned long nr = ctx->nr_user_files;
int ret;
- if (table->alloc_hint >= nr)
- table->alloc_hint = 0;
-
do {
ret = find_next_zero_bit(table->bitmap, nr, table->alloc_hint);
- if (ret != nr) {
- table->alloc_hint = ret + 1;
+ if (ret != nr)
return ret;
- }
+
if (!table->alloc_hint)
break;
@@ -5464,6 +5478,10 @@ static int io_file_bitmap_get(struct io_ring_ctx *ctx)
return -ENFILE;
}
+/*
+ * Note when io_fixed_fd_install() returns error value, it will ensure
+ * fput() is called correspondingly.
+ */
static int io_fixed_fd_install(struct io_kiocb *req, unsigned int issue_flags,
struct file *file, unsigned int file_slot)
{
@@ -5471,26 +5489,24 @@ static int io_fixed_fd_install(struct io_kiocb *req, unsigned int issue_flags,
struct io_ring_ctx *ctx = req->ctx;
int ret;
+ io_ring_submit_lock(ctx, issue_flags);
+
if (alloc_slot) {
- io_ring_submit_lock(ctx, issue_flags);
ret = io_file_bitmap_get(ctx);
- if (unlikely(ret < 0)) {
- io_ring_submit_unlock(ctx, issue_flags);
- return ret;
- }
-
+ if (unlikely(ret < 0))
+ goto err;
file_slot = ret;
} else {
file_slot--;
}
ret = io_install_fixed_file(req, file, issue_flags, file_slot);
- if (alloc_slot) {
- io_ring_submit_unlock(ctx, issue_flags);
- if (!ret)
- return file_slot;
- }
-
+ if (!ret && alloc_slot)
+ ret = file_slot;
+err:
+ io_ring_submit_unlock(ctx, issue_flags);
+ if (unlikely(ret < 0))
+ fput(file);
return ret;
}
@@ -5972,14 +5988,18 @@ static int io_statx(struct io_kiocb *req, unsigned int issue_flags)
static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
- if (sqe->off || sqe->addr || sqe->len || sqe->rw_flags || sqe->buf_index)
+ if (sqe->off || sqe->addr || sqe->len || sqe->buf_index)
return -EINVAL;
if (req->flags & REQ_F_FIXED_FILE)
return -EBADF;
req->close.fd = READ_ONCE(sqe->fd);
req->close.file_slot = READ_ONCE(sqe->file_index);
- if (req->close.file_slot && req->close.fd)
+ req->close.flags = READ_ONCE(sqe->close_flags);
+ if (req->close.flags & ~IORING_CLOSE_FD_AND_FILE_SLOT)
+ return -EINVAL;
+ if (!(req->close.flags & IORING_CLOSE_FD_AND_FILE_SLOT) &&
+ req->close.file_slot && req->close.fd)
return -EINVAL;
return 0;
@@ -5995,7 +6015,8 @@ static int io_close(struct io_kiocb *req, unsigned int issue_flags)
if (req->close.file_slot) {
ret = io_close_fixed(req, issue_flags);
- goto err;
+ if (ret || !(req->close.flags & IORING_CLOSE_FD_AND_FILE_SLOT))
+ goto err;
}
spin_lock(&files->file_lock);
@@ -6018,13 +6039,10 @@ static int io_close(struct io_kiocb *req, unsigned int issue_flags)
return -EAGAIN;
}
- ret = __close_fd_get_file(close->fd, &file);
+ file = __close_fd_get_file(close->fd);
spin_unlock(&files->file_lock);
- if (ret < 0) {
- if (ret == -ENOENT)
- ret = -EBADF;
+ if (!file)
goto err;
- }
/* No ->flush() or already async, safely close from here */
ret = filp_close(file, current->files);
@@ -6063,6 +6081,34 @@ static int io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags)
}
#if defined(CONFIG_NET)
+static int io_shutdown_prep(struct io_kiocb *req,
+ const struct io_uring_sqe *sqe)
+{
+ if (unlikely(sqe->off || sqe->addr || sqe->rw_flags ||
+ sqe->buf_index || sqe->splice_fd_in))
+ return -EINVAL;
+
+ req->shutdown.how = READ_ONCE(sqe->len);
+ return 0;
+}
+
+static int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
+{
+ struct socket *sock;
+ int ret;
+
+ if (issue_flags & IO_URING_F_NONBLOCK)
+ return -EAGAIN;
+
+ sock = sock_from_file(req->file);
+ if (unlikely(!sock))
+ return -ENOTSOCK;
+
+ ret = __sys_shutdown_sock(sock, req->shutdown.how);
+ io_req_complete(req, ret);
+ return 0;
+}
+
static bool io_net_retry(struct socket *sock, int flags)
{
if (!(flags & MSG_WAITALL))
@@ -6674,8 +6720,8 @@ static int io_socket(struct io_kiocb *req, unsigned int issue_flags)
fd_install(fd, file);
ret = fd;
} else {
- ret = io_install_fixed_file(req, file, issue_flags,
- sock->file_slot - 1);
+ ret = io_fixed_fd_install(req, issue_flags, file,
+ sock->file_slot);
}
__io_req_complete(req, issue_flags, ret, 0);
return 0;
@@ -6767,6 +6813,7 @@ IO_NETOP_PREP_ASYNC(recvmsg);
IO_NETOP_PREP_ASYNC(connect);
IO_NETOP_PREP(accept);
IO_NETOP_PREP(socket);
+IO_NETOP_PREP(shutdown);
IO_NETOP_FN(send);
IO_NETOP_FN(recv);
#endif /* CONFIG_NET */
@@ -6905,10 +6952,6 @@ static int io_poll_check_events(struct io_kiocb *req, bool *locked)
if (!req->cqe.res) {
struct poll_table_struct pt = { ._key = req->apoll_events };
- unsigned flags = locked ? 0 : IO_URING_F_UNLOCKED;
-
- if (unlikely(!io_assign_file(req, flags)))
- return -EBADF;
req->cqe.res = vfs_poll(req->file, &pt) & req->apoll_events;
}
@@ -7390,7 +7433,7 @@ static __poll_t io_poll_parse_events(const struct io_uring_sqe *sqe,
return demangle_poll(events) | (events & (EPOLLEXCLUSIVE|EPOLLONESHOT));
}
-static int io_poll_update_prep(struct io_kiocb *req,
+static int io_poll_remove_prep(struct io_kiocb *req,
const struct io_uring_sqe *sqe)
{
struct io_poll_update *upd = &req->poll_update;
@@ -7454,7 +7497,7 @@ static int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
return 0;
}
-static int io_poll_update(struct io_kiocb *req, unsigned int issue_flags)
+static int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_cancel_data cd = { .data = req->poll_update.old_user_data, };
struct io_ring_ctx *ctx = req->ctx;
@@ -7698,8 +7741,9 @@ static int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags)
return 0;
}
-static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
- bool is_timeout_link)
+static int __io_timeout_prep(struct io_kiocb *req,
+ const struct io_uring_sqe *sqe,
+ bool is_timeout_link)
{
struct io_timeout_data *data;
unsigned flags;
@@ -7754,6 +7798,18 @@ static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
return 0;
}
+static int io_timeout_prep(struct io_kiocb *req,
+ const struct io_uring_sqe *sqe)
+{
+ return __io_timeout_prep(req, sqe, false);
+}
+
+static int io_link_timeout_prep(struct io_kiocb *req,
+ const struct io_uring_sqe *sqe)
+{
+ return __io_timeout_prep(req, sqe, true);
+}
+
static int io_timeout(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_ring_ctx *ctx = req->ctx;
@@ -7970,7 +8026,7 @@ done:
return 0;
}
-static int io_rsrc_update_prep(struct io_kiocb *req,
+static int io_files_update_prep(struct io_kiocb *req,
const struct io_uring_sqe *sqe)
{
if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
@@ -7986,6 +8042,41 @@ static int io_rsrc_update_prep(struct io_kiocb *req,
return 0;
}
+static int io_files_update_with_index_alloc(struct io_kiocb *req,
+ unsigned int issue_flags)
+{
+ __s32 __user *fds = u64_to_user_ptr(req->rsrc_update.arg);
+ unsigned int done;
+ struct file *file;
+ int ret, fd;
+
+ for (done = 0; done < req->rsrc_update.nr_args; done++) {
+ if (copy_from_user(&fd, &fds[done], sizeof(fd))) {
+ ret = -EFAULT;
+ break;
+ }
+
+ file = fget(fd);
+ if (!file) {
+ ret = -EBADF;
+ break;
+ }
+ ret = io_fixed_fd_install(req, issue_flags, file,
+ IORING_FILE_INDEX_ALLOC);
+ if (ret < 0)
+ break;
+ if (copy_to_user(&fds[done], &ret, sizeof(ret))) {
+ ret = -EFAULT;
+ __io_close_fixed(req, issue_flags, ret);
+ break;
+ }
+ }
+
+ if (done)
+ return done;
+ return ret;
+}
+
static int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_ring_ctx *ctx = req->ctx;
@@ -7999,10 +8090,14 @@ static int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
up.resv = 0;
up.resv2 = 0;
- io_ring_submit_lock(ctx, issue_flags);
- ret = __io_register_rsrc_update(ctx, IORING_RSRC_FILE,
- &up, req->rsrc_update.nr_args);
- io_ring_submit_unlock(ctx, issue_flags);
+ if (req->rsrc_update.offset == IORING_FILE_INDEX_ALLOC) {
+ ret = io_files_update_with_index_alloc(req, issue_flags);
+ } else {
+ io_ring_submit_lock(ctx, issue_flags);
+ ret = __io_register_rsrc_update(ctx, IORING_RSRC_FILE,
+ &up, req->rsrc_update.nr_args);
+ io_ring_submit_unlock(ctx, issue_flags);
+ }
if (ret < 0)
req_set_fail(req);
@@ -8025,7 +8120,7 @@ static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
case IORING_OP_POLL_ADD:
return io_poll_add_prep(req, sqe);
case IORING_OP_POLL_REMOVE:
- return io_poll_update_prep(req, sqe);
+ return io_poll_remove_prep(req, sqe);
case IORING_OP_FSYNC:
return io_fsync_prep(req, sqe);
case IORING_OP_SYNC_FILE_RANGE:
@@ -8039,13 +8134,13 @@ static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
case IORING_OP_CONNECT:
return io_connect_prep(req, sqe);
case IORING_OP_TIMEOUT:
- return io_timeout_prep(req, sqe, false);
+ return io_timeout_prep(req, sqe);
case IORING_OP_TIMEOUT_REMOVE:
return io_timeout_remove_prep(req, sqe);
case IORING_OP_ASYNC_CANCEL:
return io_async_cancel_prep(req, sqe);
case IORING_OP_LINK_TIMEOUT:
- return io_timeout_prep(req, sqe, true);
+ return io_link_timeout_prep(req, sqe);
case IORING_OP_ACCEPT:
return io_accept_prep(req, sqe);
case IORING_OP_FALLOCATE:
@@ -8055,7 +8150,7 @@ static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
case IORING_OP_CLOSE:
return io_close_prep(req, sqe);
case IORING_OP_FILES_UPDATE:
- return io_rsrc_update_prep(req, sqe);
+ return io_files_update_prep(req, sqe);
case IORING_OP_STATX:
return io_statx_prep(req, sqe);
case IORING_OP_FADVISE:
@@ -8123,9 +8218,9 @@ static int io_req_prep_async(struct io_kiocb *req)
switch (req->opcode) {
case IORING_OP_READV:
- return io_rw_prep_async(req, READ);
+ return io_readv_prep_async(req);
case IORING_OP_WRITEV:
- return io_rw_prep_async(req, WRITE);
+ return io_writev_prep_async(req);
case IORING_OP_SENDMSG:
return io_sendmsg_prep_async(req);
case IORING_OP_RECVMSG:
@@ -8264,6 +8359,11 @@ static void io_clean_op(struct io_kiocb *req)
kfree(req->apoll);
req->apoll = NULL;
}
+ if (req->flags & REQ_F_INFLIGHT) {
+ struct io_uring_task *tctx = req->task->io_uring;
+
+ atomic_dec(&tctx->inflight_tracked);
+ }
if (req->flags & REQ_F_CREDS)
put_cred(req->creds);
if (req->flags & REQ_F_ASYNC_DATA) {
@@ -8288,6 +8388,7 @@ static bool io_assign_file(struct io_kiocb *req, unsigned int issue_flags)
static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
{
+ const struct io_op_def *def = &io_op_defs[req->opcode];
const struct cred *creds = NULL;
int ret;
@@ -8297,7 +8398,7 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
if (unlikely((req->flags & REQ_F_CREDS) && req->creds != current_cred()))
creds = override_creds(req->creds);
- if (!io_op_defs[req->opcode].audit_skip)
+ if (!def->audit_skip)
audit_uring_entry(req->opcode);
switch (req->opcode) {
@@ -8321,7 +8422,7 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
ret = io_poll_add(req, issue_flags);
break;
case IORING_OP_POLL_REMOVE:
- ret = io_poll_update(req, issue_flags);
+ ret = io_poll_remove(req, issue_flags);
break;
case IORING_OP_SYNC_FILE_RANGE:
ret = io_sync_file_range(req, issue_flags);
@@ -8436,7 +8537,7 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
break;
}
- if (!io_op_defs[req->opcode].audit_skip)
+ if (!def->audit_skip)
audit_uring_exit(!ret, ret);
if (creds)
@@ -8569,19 +8670,6 @@ out:
return file;
}
-/*
- * Drop the file for requeue operations. Only used of req->file is the
- * io_uring descriptor itself.
- */
-static void io_drop_inflight_file(struct io_kiocb *req)
-{
- if (unlikely(req->flags & REQ_F_INFLIGHT)) {
- fput(req->file);
- req->file = NULL;
- req->flags &= ~REQ_F_INFLIGHT;
- }
-}
-
static struct file *io_file_get_normal(struct io_kiocb *req, int fd)
{
struct file *file = fget(fd);
@@ -8590,7 +8678,7 @@ static struct file *io_file_get_normal(struct io_kiocb *req, int fd)
/* we don't allow fixed io_uring files */
if (file && file->f_op == &io_uring_fops)
- req->flags |= REQ_F_INFLIGHT;
+ io_req_track_inflight(req);
return file;
}
@@ -8788,6 +8876,7 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
const struct io_uring_sqe *sqe)
__must_hold(&ctx->uring_lock)
{
+ const struct io_op_def *def;
unsigned int sqe_flags;
int personality;
u8 opcode;
@@ -8805,12 +8894,13 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
req->opcode = 0;
return -EINVAL;
}
+ def = &io_op_defs[opcode];
if (unlikely(sqe_flags & ~SQE_COMMON_FLAGS)) {
/* enforce forwards compatibility on users */
if (sqe_flags & ~SQE_VALID_FLAGS)
return -EINVAL;
if (sqe_flags & IOSQE_BUFFER_SELECT) {
- if (!io_op_defs[opcode].buffer_select)
+ if (!def->buffer_select)
return -EOPNOTSUPP;
req->buf_index = READ_ONCE(sqe->buf_group);
}
@@ -8836,12 +8926,12 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
}
}
- if (!io_op_defs[opcode].ioprio && sqe->ioprio)
+ if (!def->ioprio && sqe->ioprio)
return -EINVAL;
- if (!io_op_defs[opcode].iopoll && (ctx->flags & IORING_SETUP_IOPOLL))
+ if (!def->iopoll && (ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
- if (io_op_defs[opcode].needs_file) {
+ if (def->needs_file) {
struct io_submit_state *state = &ctx->submit_state;
req->cqe.fd = READ_ONCE(sqe->fd);
@@ -8850,7 +8940,7 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
* Plug now if we have more than 2 IO left after this, and the
* target is potentially a read/write to block based storage.
*/
- if (state->need_plug && io_op_defs[opcode].plug) {
+ if (state->need_plug && def->plug) {
state->plug_started = true;
state->need_plug = false;
blk_start_plug_nr_ios(&state->plug, state->submit_nr);
@@ -9658,8 +9748,7 @@ static inline void io_file_bitmap_set(struct io_file_table *table, int bit)
{
WARN_ON_ONCE(test_bit(bit, table->bitmap));
__set_bit(bit, table->bitmap);
- if (bit == table->alloc_hint)
- table->alloc_hint++;
+ table->alloc_hint = bit + 1;
}
static inline void io_file_bitmap_clear(struct io_file_table *table, int bit)
@@ -10113,21 +10202,19 @@ static int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx,
static int io_install_fixed_file(struct io_kiocb *req, struct file *file,
unsigned int issue_flags, u32 slot_index)
+ __must_hold(&req->ctx->uring_lock)
{
struct io_ring_ctx *ctx = req->ctx;
bool needs_switch = false;
struct io_fixed_file *file_slot;
- int ret = -EBADF;
+ int ret;
- io_ring_submit_lock(ctx, issue_flags);
if (file->f_op == &io_uring_fops)
- goto err;
- ret = -ENXIO;
+ return -EBADF;
if (!ctx->file_data)
- goto err;
- ret = -EINVAL;
+ return -ENXIO;
if (slot_index >= ctx->nr_user_files)
- goto err;
+ return -EINVAL;
slot_index = array_index_nospec(slot_index, ctx->nr_user_files);
file_slot = io_fixed_file_slot(&ctx->file_table, slot_index);
@@ -10158,15 +10245,14 @@ static int io_install_fixed_file(struct io_kiocb *req, struct file *file,
err:
if (needs_switch)
io_rsrc_node_switch(ctx, ctx->file_data);
- io_ring_submit_unlock(ctx, issue_flags);
if (ret)
fput(file);
return ret;
}
-static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags)
+static int __io_close_fixed(struct io_kiocb *req, unsigned int issue_flags,
+ unsigned int offset)
{
- unsigned int offset = req->close.file_slot - 1;
struct io_ring_ctx *ctx = req->ctx;
struct io_fixed_file *file_slot;
struct file *file;
@@ -10203,6 +10289,11 @@ out:
return ret;
}
+static inline int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags)
+{
+ return __io_close_fixed(req, issue_flags, req->close.file_slot - 1);
+}
+
static int __io_sqe_files_update(struct io_ring_ctx *ctx,
struct io_uring_rsrc_update2 *up,
unsigned nr_args)
@@ -10351,6 +10442,7 @@ static __cold int io_uring_alloc_task_context(struct task_struct *task,
xa_init(&tctx->xa);
init_waitqueue_head(&tctx->wait);
atomic_set(&tctx->in_idle, 0);
+ atomic_set(&tctx->inflight_tracked, 0);
task->io_uring = tctx;
spin_lock_init(&tctx->task_lock);
INIT_WQ_LIST(&tctx->task_list);
@@ -11046,6 +11138,7 @@ static void io_destroy_buffers(struct io_ring_ctx *ctx)
xa_for_each(&ctx->io_bl_xa, index, bl) {
xa_erase(&ctx->io_bl_xa, bl->bgid);
__io_remove_buffers(ctx, bl, -1U);
+ kfree(bl);
}
while (!list_empty(&ctx->io_buffers_pages)) {
@@ -11581,7 +11674,7 @@ static __cold void io_uring_clean_tctx(struct io_uring_task *tctx)
static s64 tctx_inflight(struct io_uring_task *tctx, bool tracked)
{
if (tracked)
- return 0;
+ return atomic_read(&tctx->inflight_tracked);
return percpu_counter_sum(&tctx->inflight);
}
@@ -11957,14 +12050,14 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
return -EINVAL;
fd = array_index_nospec(fd, IO_RINGFD_REG_MAX);
f.file = tctx->registered_rings[fd];
- if (unlikely(!f.file))
- return -EBADF;
+ f.flags = 0;
} else {
f = fdget(fd);
- if (unlikely(!f.file))
- return -EBADF;
}
+ if (unlikely(!f.file))
+ return -EBADF;
+
ret = -EOPNOTSUPP;
if (unlikely(f.file->f_op != &io_uring_fops))
goto out_fput;
@@ -12062,8 +12155,7 @@ iopoll_locked:
out:
percpu_ref_put(&ctx->refs);
out_fput:
- if (!(flags & IORING_ENTER_REGISTERED_RING))
- fdput(f);
+ fdput(f);
return ret;
}
diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
index 7e9abdb89712..acd32f05b519 100644
--- a/fs/jffs2/erase.c
+++ b/fs/jffs2/erase.c
@@ -43,9 +43,9 @@ static void jffs2_erase_block(struct jffs2_sb_info *c,
jffs2_dbg(1, "%s(): erase block %#08x (range %#08x-%#08x)\n",
__func__,
jeb->offset, jeb->offset, jeb->offset + c->sector_size);
- instr = kmalloc(sizeof(struct erase_info), GFP_KERNEL);
+ instr = kzalloc(sizeof(struct erase_info), GFP_KERNEL);
if (!instr) {
- pr_warn("kmalloc for struct erase_info in jffs2_erase_block failed. Refiling block for later\n");
+ pr_warn("kzalloc for struct erase_info in jffs2_erase_block failed. Refiling block for later\n");
mutex_lock(&c->erase_free_sem);
spin_lock(&c->erase_completion_lock);
list_move(&jeb->list, &c->erase_pending_list);
@@ -57,8 +57,6 @@ static void jffs2_erase_block(struct jffs2_sb_info *c,
return;
}
- memset(instr, 0, sizeof(*instr));
-
instr->addr = jeb->offset;
instr->len = c->sector_size;
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
index 00a110f40e10..39cec28096a7 100644
--- a/fs/jffs2/fs.c
+++ b/fs/jffs2/fs.c
@@ -604,6 +604,7 @@ out_root:
jffs2_free_raw_node_refs(c);
kvfree(c->blocks);
jffs2_clear_xattr_subsystem(c);
+ jffs2_sum_exit(c);
out_inohash:
kfree(c->inocache_list);
out_wbuf:
diff --git a/fs/jfs/Makefile b/fs/jfs/Makefile
index 285ec189ed5c..7156d2c218c7 100644
--- a/fs/jfs/Makefile
+++ b/fs/jfs/Makefile
@@ -13,5 +13,3 @@ jfs-y := super.o file.o inode.o namei.o jfs_mount.o jfs_umount.o \
resize.o xattr.o ioctl.o
jfs-$(CONFIG_JFS_POSIX_ACL) += acl.o
-
-ccflags-y := -D_JFS_4K
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c
index a5dd7e53754a..259326556ada 100644
--- a/fs/jfs/inode.c
+++ b/fs/jfs/inode.c
@@ -224,18 +224,9 @@ int jfs_get_block(struct inode *ip, sector_t lblock,
* this as a hole
*/
goto unlock;
-#ifdef _JFS_4K
XADoffset(&xad, lblock64);
XADlength(&xad, xlen);
XADaddress(&xad, xaddr);
-#else /* _JFS_4K */
- /*
- * As long as block size = 4K, this isn't a problem.
- * We should mark the whole page not ABNR, but how
- * will we know to mark the other blocks BH_New?
- */
- BUG();
-#endif /* _JFS_4K */
rc = extRecord(ip, &xad);
if (rc)
goto unlock;
@@ -252,7 +243,6 @@ int jfs_get_block(struct inode *ip, sector_t lblock,
/*
* Allocate a new block
*/
-#ifdef _JFS_4K
if ((rc = extHint(ip, lblock64 << ip->i_sb->s_blocksize_bits, &xad)))
goto unlock;
rc = extAlloc(ip, xlen, lblock64, &xad, false);
@@ -263,14 +253,6 @@ int jfs_get_block(struct inode *ip, sector_t lblock,
map_bh(bh_result, ip->i_sb, addressXAD(&xad));
bh_result->b_size = lengthXAD(&xad) << ip->i_blkbits;
-#else /* _JFS_4K */
- /*
- * We need to do whatever it takes to keep all but the last buffers
- * in 4K pages - see jfs_write.c
- */
- BUG();
-#endif /* _JFS_4K */
-
unlock:
/*
* Release lock on inode
diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c
index d8502f4989d9..6b838d3ae7c2 100644
--- a/fs/jfs/jfs_dmap.c
+++ b/fs/jfs/jfs_dmap.c
@@ -385,7 +385,8 @@ int dbFree(struct inode *ip, s64 blkno, s64 nblocks)
}
/* write the last buffer. */
- write_metapage(mp);
+ if (mp)
+ write_metapage(mp);
IREAD_UNLOCK(ipbmap);
@@ -868,74 +869,6 @@ int dbAlloc(struct inode *ip, s64 hint, s64 nblocks, s64 * results)
return (rc);
}
-#ifdef _NOTYET
-/*
- * NAME: dbAllocExact()
- *
- * FUNCTION: try to allocate the requested extent;
- *
- * PARAMETERS:
- * ip - pointer to in-core inode;
- * blkno - extent address;
- * nblocks - extent length;
- *
- * RETURN VALUES:
- * 0 - success
- * -ENOSPC - insufficient disk resources
- * -EIO - i/o error
- */
-int dbAllocExact(struct inode *ip, s64 blkno, int nblocks)
-{
- int rc;
- struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;
- struct bmap *bmp = JFS_SBI(ip->i_sb)->bmap;
- struct dmap *dp;
- s64 lblkno;
- struct metapage *mp;
-
- IREAD_LOCK(ipbmap, RDWRLOCK_DMAP);
-
- /*
- * validate extent request:
- *
- * note: defragfs policy:
- * max 64 blocks will be moved.
- * allocation request size must be satisfied from a single dmap.
- */
- if (nblocks <= 0 || nblocks > BPERDMAP || blkno >= bmp->db_mapsize) {
- IREAD_UNLOCK(ipbmap);
- return -EINVAL;
- }
-
- if (nblocks > ((s64) 1 << bmp->db_maxfreebud)) {
- /* the free space is no longer available */
- IREAD_UNLOCK(ipbmap);
- return -ENOSPC;
- }
-
- /* read in the dmap covering the extent */
- lblkno = BLKTODMAP(blkno, bmp->db_l2nbperpage);
- mp = read_metapage(ipbmap, lblkno, PSIZE, 0);
- if (mp == NULL) {
- IREAD_UNLOCK(ipbmap);
- return -EIO;
- }
- dp = (struct dmap *) mp->data;
-
- /* try to allocate the requested extent */
- rc = dbAllocNext(bmp, dp, blkno, nblocks);
-
- IREAD_UNLOCK(ipbmap);
-
- if (rc == 0)
- mark_metapage_dirty(mp);
-
- release_metapage(mp);
-
- return (rc);
-}
-#endif /* _NOTYET */
-
/*
* NAME: dbReAlloc()
*
diff --git a/fs/jfs/jfs_dtree.c b/fs/jfs/jfs_dtree.c
index 837d42f61464..92b7c533407c 100644
--- a/fs/jfs/jfs_dtree.c
+++ b/fs/jfs/jfs_dtree.c
@@ -2423,304 +2423,6 @@ static int dtDeleteUp(tid_t tid, struct inode *ip,
return 0;
}
-#ifdef _NOTYET
-/*
- * NAME: dtRelocate()
- *
- * FUNCTION: relocate dtpage (internal or leaf) of directory;
- * This function is mainly used by defragfs utility.
- */
-int dtRelocate(tid_t tid, struct inode *ip, s64 lmxaddr, pxd_t * opxd,
- s64 nxaddr)
-{
- int rc = 0;
- struct metapage *mp, *pmp, *lmp, *rmp;
- dtpage_t *p, *pp, *rp = 0, *lp= 0;
- s64 bn;
- int index;
- struct btstack btstack;
- pxd_t *pxd;
- s64 oxaddr, nextbn, prevbn;
- int xlen, xsize;
- struct tlock *tlck;
- struct dt_lock *dtlck;
- struct pxd_lock *pxdlock;
- s8 *stbl;
- struct lv *lv;
-
- oxaddr = addressPXD(opxd);
- xlen = lengthPXD(opxd);
-
- jfs_info("dtRelocate: lmxaddr:%Ld xaddr:%Ld:%Ld xlen:%d",
- (long long)lmxaddr, (long long)oxaddr, (long long)nxaddr,
- xlen);
-
- /*
- * 1. get the internal parent dtpage covering
- * router entry for the tartget page to be relocated;
- */
- rc = dtSearchNode(ip, lmxaddr, opxd, &btstack);
- if (rc)
- return rc;
-
- /* retrieve search result */
- DT_GETSEARCH(ip, btstack.top, bn, pmp, pp, index);
- jfs_info("dtRelocate: parent router entry validated.");
-
- /*
- * 2. relocate the target dtpage
- */
- /* read in the target page from src extent */
- DT_GETPAGE(ip, oxaddr, mp, PSIZE, p, rc);
- if (rc) {
- /* release the pinned parent page */
- DT_PUTPAGE(pmp);
- return rc;
- }
-
- /*
- * read in sibling pages if any to update sibling pointers;
- */
- rmp = NULL;
- if (p->header.next) {
- nextbn = le64_to_cpu(p->header.next);
- DT_GETPAGE(ip, nextbn, rmp, PSIZE, rp, rc);
- if (rc) {
- DT_PUTPAGE(mp);
- DT_PUTPAGE(pmp);
- return (rc);
- }
- }
-
- lmp = NULL;
- if (p->header.prev) {
- prevbn = le64_to_cpu(p->header.prev);
- DT_GETPAGE(ip, prevbn, lmp, PSIZE, lp, rc);
- if (rc) {
- DT_PUTPAGE(mp);
- DT_PUTPAGE(pmp);
- if (rmp)
- DT_PUTPAGE(rmp);
- return (rc);
- }
- }
-
- /* at this point, all xtpages to be updated are in memory */
-
- /*
- * update sibling pointers of sibling dtpages if any;
- */
- if (lmp) {
- tlck = txLock(tid, ip, lmp, tlckDTREE | tlckRELINK);
- dtlck = (struct dt_lock *) & tlck->lock;
- /* linelock header */
- ASSERT(dtlck->index == 0);
- lv = & dtlck->lv[0];
- lv->offset = 0;
- lv->length = 1;
- dtlck->index++;
-
- lp->header.next = cpu_to_le64(nxaddr);
- DT_PUTPAGE(lmp);
- }
-
- if (rmp) {
- tlck = txLock(tid, ip, rmp, tlckDTREE | tlckRELINK);
- dtlck = (struct dt_lock *) & tlck->lock;
- /* linelock header */
- ASSERT(dtlck->index == 0);
- lv = & dtlck->lv[0];
- lv->offset = 0;
- lv->length = 1;
- dtlck->index++;
-
- rp->header.prev = cpu_to_le64(nxaddr);
- DT_PUTPAGE(rmp);
- }
-
- /*
- * update the target dtpage to be relocated
- *
- * write LOG_REDOPAGE of LOG_NEW type for dst page
- * for the whole target page (logredo() will apply
- * after image and update bmap for allocation of the
- * dst extent), and update bmap for allocation of
- * the dst extent;
- */
- tlck = txLock(tid, ip, mp, tlckDTREE | tlckNEW);
- dtlck = (struct dt_lock *) & tlck->lock;
- /* linelock header */
- ASSERT(dtlck->index == 0);
- lv = & dtlck->lv[0];
-
- /* update the self address in the dtpage header */
- pxd = &p->header.self;
- PXDaddress(pxd, nxaddr);
-
- /* the dst page is the same as the src page, i.e.,
- * linelock for afterimage of the whole page;
- */
- lv->offset = 0;
- lv->length = p->header.maxslot;
- dtlck->index++;
-
- /* update the buffer extent descriptor of the dtpage */
- xsize = xlen << JFS_SBI(ip->i_sb)->l2bsize;
-
- /* unpin the relocated page */
- DT_PUTPAGE(mp);
- jfs_info("dtRelocate: target dtpage relocated.");
-
- /* the moved extent is dtpage, then a LOG_NOREDOPAGE log rec
- * needs to be written (in logredo(), the LOG_NOREDOPAGE log rec
- * will also force a bmap update ).
- */
-
- /*
- * 3. acquire maplock for the source extent to be freed;
- */
- /* for dtpage relocation, write a LOG_NOREDOPAGE record
- * for the source dtpage (logredo() will init NoRedoPage
- * filter and will also update bmap for free of the source
- * dtpage), and upadte bmap for free of the source dtpage;
- */
- tlck = txMaplock(tid, ip, tlckDTREE | tlckFREE);
- pxdlock = (struct pxd_lock *) & tlck->lock;
- pxdlock->flag = mlckFREEPXD;
- PXDaddress(&pxdlock->pxd, oxaddr);
- PXDlength(&pxdlock->pxd, xlen);
- pxdlock->index = 1;
-
- /*
- * 4. update the parent router entry for relocation;
- *
- * acquire tlck for the parent entry covering the target dtpage;
- * write LOG_REDOPAGE to apply after image only;
- */
- jfs_info("dtRelocate: update parent router entry.");
- tlck = txLock(tid, ip, pmp, tlckDTREE | tlckENTRY);
- dtlck = (struct dt_lock *) & tlck->lock;
- lv = & dtlck->lv[dtlck->index];
-
- /* update the PXD with the new address */
- stbl = DT_GETSTBL(pp);
- pxd = (pxd_t *) & pp->slot[stbl[index]];
- PXDaddress(pxd, nxaddr);
- lv->offset = stbl[index];
- lv->length = 1;
- dtlck->index++;
-
- /* unpin the parent dtpage */
- DT_PUTPAGE(pmp);
-
- return rc;
-}
-
-/*
- * NAME: dtSearchNode()
- *
- * FUNCTION: Search for an dtpage containing a specified address
- * This function is mainly used by defragfs utility.
- *
- * NOTE: Search result on stack, the found page is pinned at exit.
- * The result page must be an internal dtpage.
- * lmxaddr give the address of the left most page of the
- * dtree level, in which the required dtpage resides.
- */
-static int dtSearchNode(struct inode *ip, s64 lmxaddr, pxd_t * kpxd,
- struct btstack * btstack)
-{
- int rc = 0;
- s64 bn;
- struct metapage *mp;
- dtpage_t *p;
- int psize = 288; /* initial in-line directory */
- s8 *stbl;
- int i;
- pxd_t *pxd;
- struct btframe *btsp;
-
- BT_CLR(btstack); /* reset stack */
-
- /*
- * descend tree to the level with specified leftmost page
- *
- * by convention, root bn = 0.
- */
- for (bn = 0;;) {
- /* get/pin the page to search */
- DT_GETPAGE(ip, bn, mp, psize, p, rc);
- if (rc)
- return rc;
-
- /* does the xaddr of leftmost page of the levevl
- * matches levevl search key ?
- */
- if (p->header.flag & BT_ROOT) {
- if (lmxaddr == 0)
- break;
- } else if (addressPXD(&p->header.self) == lmxaddr)
- break;
-
- /*
- * descend down to leftmost child page
- */
- if (p->header.flag & BT_LEAF) {
- DT_PUTPAGE(mp);
- return -ESTALE;
- }
-
- /* get the leftmost entry */
- stbl = DT_GETSTBL(p);
- pxd = (pxd_t *) & p->slot[stbl[0]];
-
- /* get the child page block address */
- bn = addressPXD(pxd);
- psize = lengthPXD(pxd) << JFS_SBI(ip->i_sb)->l2bsize;
- /* unpin the parent page */
- DT_PUTPAGE(mp);
- }
-
- /*
- * search each page at the current levevl
- */
- loop:
- stbl = DT_GETSTBL(p);
- for (i = 0; i < p->header.nextindex; i++) {
- pxd = (pxd_t *) & p->slot[stbl[i]];
-
- /* found the specified router entry */
- if (addressPXD(pxd) == addressPXD(kpxd) &&
- lengthPXD(pxd) == lengthPXD(kpxd)) {
- btsp = btstack->top;
- btsp->bn = bn;
- btsp->index = i;
- btsp->mp = mp;
-
- return 0;
- }
- }
-
- /* get the right sibling page if any */
- if (p->header.next)
- bn = le64_to_cpu(p->header.next);
- else {
- DT_PUTPAGE(mp);
- return -ESTALE;
- }
-
- /* unpin current page */
- DT_PUTPAGE(mp);
-
- /* get the right sibling page */
- DT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
- if (rc)
- return rc;
-
- goto loop;
-}
-#endif /* _NOTYET */
-
/*
* dtRelink()
*
diff --git a/fs/jfs/jfs_extent.c b/fs/jfs/jfs_extent.c
index bb4a342a193d..ae99a7e232ee 100644
--- a/fs/jfs/jfs_extent.c
+++ b/fs/jfs/jfs_extent.c
@@ -16,9 +16,6 @@
* forward references
*/
static int extBalloc(struct inode *, s64, s64 *, s64 *);
-#ifdef _NOTYET
-static int extBrealloc(struct inode *, s64, s64, s64 *, s64 *);
-#endif
static s64 extRoundDown(s64 nb);
#define DPD(a) (printk("(a): %d\n",(a)))
@@ -177,162 +174,6 @@ extAlloc(struct inode *ip, s64 xlen, s64 pno, xad_t * xp, bool abnr)
return (0);
}
-
-#ifdef _NOTYET
-/*
- * NAME: extRealloc()
- *
- * FUNCTION: extend the allocation of a file extent containing a
- * partial back last page.
- *
- * PARAMETERS:
- * ip - the inode of the file.
- * cp - cbuf for the partial backed last page.
- * xlen - request size of the resulting extent.
- * xp - pointer to an xad. on successful exit, the xad
- * describes the newly allocated extent.
- * abnr - bool indicating whether the newly allocated extent
- * should be marked as allocated but not recorded.
- *
- * RETURN VALUES:
- * 0 - success
- * -EIO - i/o error.
- * -ENOSPC - insufficient disk resources.
- */
-int extRealloc(struct inode *ip, s64 nxlen, xad_t * xp, bool abnr)
-{
- struct super_block *sb = ip->i_sb;
- s64 xaddr, xlen, nxaddr, delta, xoff;
- s64 ntail, nextend, ninsert;
- int rc, nbperpage = JFS_SBI(sb)->nbperpage;
- int xflag;
-
- /* This blocks if we are low on resources */
- txBeginAnon(ip->i_sb);
-
- mutex_lock(&JFS_IP(ip)->commit_mutex);
- /* validate extent length */
- if (nxlen > MAXXLEN)
- nxlen = MAXXLEN;
-
- /* get the extend (partial) page's disk block address and
- * number of blocks.
- */
- xaddr = addressXAD(xp);
- xlen = lengthXAD(xp);
- xoff = offsetXAD(xp);
-
- /* if the extend page is abnr and if the request is for
- * the extent to be allocated and recorded,
- * make the page allocated and recorded.
- */
- if ((xp->flag & XAD_NOTRECORDED) && !abnr) {
- xp->flag = 0;
- if ((rc = xtUpdate(0, ip, xp)))
- goto exit;
- }
-
- /* try to allocated the request number of blocks for the
- * extent. dbRealloc() first tries to satisfy the request
- * by extending the allocation in place. otherwise, it will
- * try to allocate a new set of blocks large enough for the
- * request. in satisfying a request, dbReAlloc() may allocate
- * less than what was request but will always allocate enough
- * space as to satisfy the extend page.
- */
- if ((rc = extBrealloc(ip, xaddr, xlen, &nxlen, &nxaddr)))
- goto exit;
-
- /* Allocat blocks to quota. */
- rc = dquot_alloc_block(ip, nxlen);
- if (rc) {
- dbFree(ip, nxaddr, (s64) nxlen);
- mutex_unlock(&JFS_IP(ip)->commit_mutex);
- return rc;
- }
-
- delta = nxlen - xlen;
-
- /* check if the extend page is not abnr but the request is abnr
- * and the allocated disk space is for more than one page. if this
- * is the case, there is a miss match of abnr between the extend page
- * and the one or more pages following the extend page. as a result,
- * two extents will have to be manipulated. the first will be that
- * of the extent of the extend page and will be manipulated thru
- * an xtExtend() or an xtTailgate(), depending upon whether the
- * disk allocation occurred as an inplace extension. the second
- * extent will be manipulated (created) through an xtInsert() and
- * will be for the pages following the extend page.
- */
- if (abnr && (!(xp->flag & XAD_NOTRECORDED)) && (nxlen > nbperpage)) {
- ntail = nbperpage;
- nextend = ntail - xlen;
- ninsert = nxlen - nbperpage;
-
- xflag = XAD_NOTRECORDED;
- } else {
- ntail = nxlen;
- nextend = delta;
- ninsert = 0;
-
- xflag = xp->flag;
- }
-
- /* if we were able to extend the disk allocation in place,
- * extend the extent. otherwise, move the extent to a
- * new disk location.
- */
- if (xaddr == nxaddr) {
- /* extend the extent */
- if ((rc = xtExtend(0, ip, xoff + xlen, (int) nextend, 0))) {
- dbFree(ip, xaddr + xlen, delta);
- dquot_free_block(ip, nxlen);
- goto exit;
- }
- } else {
- /*
- * move the extent to a new location:
- *
- * xtTailgate() accounts for relocated tail extent;
- */
- if ((rc = xtTailgate(0, ip, xoff, (int) ntail, nxaddr, 0))) {
- dbFree(ip, nxaddr, nxlen);
- dquot_free_block(ip, nxlen);
- goto exit;
- }
- }
-
-
- /* check if we need to also insert a new extent */
- if (ninsert) {
- /* perform the insert. if it fails, free the blocks
- * to be inserted and make it appear that we only did
- * the xtExtend() or xtTailgate() above.
- */
- xaddr = nxaddr + ntail;
- if (xtInsert (0, ip, xflag, xoff + ntail, (int) ninsert,
- &xaddr, 0)) {
- dbFree(ip, xaddr, (s64) ninsert);
- delta = nextend;
- nxlen = ntail;
- xflag = 0;
- }
- }
-
- /* set the return results */
- XADaddress(xp, nxaddr);
- XADlength(xp, nxlen);
- XADoffset(xp, xoff);
- xp->flag = xflag;
-
- mark_inode_dirty(ip);
-exit:
- mutex_unlock(&JFS_IP(ip)->commit_mutex);
- return (rc);
-}
-#endif /* _NOTYET */
-
-
/*
* NAME: extHint()
*
@@ -423,44 +264,6 @@ int extRecord(struct inode *ip, xad_t * xp)
return rc;
}
-
-#ifdef _NOTYET
-/*
- * NAME: extFill()
- *
- * FUNCTION: allocate disk space for a file page that represents
- * a file hole.
- *
- * PARAMETERS:
- * ip - the inode of the file.
- * cp - cbuf of the file page represent the hole.
- *
- * RETURN VALUES:
- * 0 - success
- * -EIO - i/o error.
- * -ENOSPC - insufficient disk resources.
- */
-int extFill(struct inode *ip, xad_t * xp)
-{
- int rc, nbperpage = JFS_SBI(ip->i_sb)->nbperpage;
- s64 blkno = offsetXAD(xp) >> ip->i_blkbits;
-
-// assert(ISSPARSE(ip));
-
- /* initialize the extent allocation hint */
- XADaddress(xp, 0);
-
- /* allocate an extent to fill the hole */
- if ((rc = extAlloc(ip, nbperpage, blkno, xp, false)))
- return (rc);
-
- assert(lengthPXD(xp) == nbperpage);
-
- return (0);
-}
-#endif /* _NOTYET */
-
-
/*
* NAME: extBalloc()
*
@@ -550,64 +353,6 @@ extBalloc(struct inode *ip, s64 hint, s64 * nblocks, s64 * blkno)
return (0);
}
-
-#ifdef _NOTYET
-/*
- * NAME: extBrealloc()
- *
- * FUNCTION: attempt to extend an extent's allocation.
- *
- * Initially, we will try to extend the extent's allocation
- * in place. If this fails, we'll try to move the extent
- * to a new set of blocks. If moving the extent, we initially
- * will try to allocate disk blocks for the requested size
- * (newnblks). if this fails (new contiguous free blocks not
- * available), we'll try to allocate a smaller number of
- * blocks (producing a smaller extent), with this smaller
- * number of blocks consisting of the requested number of
- * blocks rounded down to the next smaller power of 2
- * number (i.e. 16 -> 8). We'll continue to round down and
- * retry the allocation until the number of blocks to allocate
- * is smaller than the number of blocks per page.
- *
- * PARAMETERS:
- * ip - the inode of the file.
- * blkno - starting block number of the extents current allocation.
- * nblks - number of blocks within the extents current allocation.
- * newnblks - pointer to a s64 value. on entry, this value is the
- * new desired extent size (number of blocks). on
- * successful exit, this value is set to the extent's actual
- * new size (new number of blocks).
- * newblkno - the starting block number of the extents new allocation.
- *
- * RETURN VALUES:
- * 0 - success
- * -EIO - i/o error.
- * -ENOSPC - insufficient disk resources.
- */
-static int
-extBrealloc(struct inode *ip,
- s64 blkno, s64 nblks, s64 * newnblks, s64 * newblkno)
-{
- int rc;
-
- /* try to extend in place */
- if ((rc = dbExtend(ip, blkno, nblks, *newnblks - nblks)) == 0) {
- *newblkno = blkno;
- return (0);
- } else {
- if (rc != -ENOSPC)
- return (rc);
- }
-
- /* in place extension not possible.
- * try to move the extent to a new set of blocks.
- */
- return (extBalloc(ip, blkno, newnblks, newblkno));
-}
-#endif /* _NOTYET */
-
-
/*
* NAME: extRoundDown()
*
diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c
index 997c81fcea34..695415cbfe98 100644
--- a/fs/jfs/jfs_logmgr.c
+++ b/fs/jfs/jfs_logmgr.c
@@ -388,14 +388,6 @@ lmWriteRecord(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
p = (caddr_t) &JFS_IP(tlck->ip)->i_xtroot;
linelock = (struct linelock *) & tlck->lock;
}
-#ifdef _JFS_WIP
- else if (tlck->flag & tlckINLINELOCK) {
-
- inlinelock = (struct inlinelock *) & tlck;
- p = (caddr_t) & inlinelock->pxd;
- linelock = (struct linelock *) & tlck;
- }
-#endif /* _JFS_WIP */
else {
jfs_err("lmWriteRecord: UFO tlck:0x%p", tlck);
return 0; /* Probably should trap */
diff --git a/fs/jfs/jfs_mount.c b/fs/jfs/jfs_mount.c
index aa4ff7bcaff2..48d1f70f786c 100644
--- a/fs/jfs/jfs_mount.c
+++ b/fs/jfs/jfs_mount.c
@@ -307,13 +307,11 @@ static int chkSuper(struct super_block *sb)
}
bsize = le32_to_cpu(j_sb->s_bsize);
-#ifdef _JFS_4K
if (bsize != PSIZE) {
- jfs_err("Currently only 4K block size supported!");
+ jfs_err("Only 4K block size supported!");
rc = -EINVAL;
goto out;
}
-#endif /* _JFS_4K */
jfs_info("superblock: flag:0x%08x state:0x%08x size:0x%Lx",
le32_to_cpu(j_sb->s_flag), le32_to_cpu(j_sb->s_state),
diff --git a/fs/jfs/jfs_txnmgr.c b/fs/jfs/jfs_txnmgr.c
index 042bbe6d8ac2..ffd4feece078 100644
--- a/fs/jfs/jfs_txnmgr.c
+++ b/fs/jfs/jfs_txnmgr.c
@@ -1490,40 +1490,6 @@ static void diLog(struct jfs_log *log, struct tblock *tblk, struct lrd *lrd,
tlck->flag |= tlckWRITEPAGE;
} else
jfs_err("diLog: UFO type tlck:0x%p", tlck);
-#ifdef _JFS_WIP
- /*
- * alloc/free external EA extent
- *
- * a maplock for txUpdateMap() to update bPWMAP for alloc/free
- * of the extent has been formatted at txLock() time;
- */
- else {
- assert(tlck->type & tlckEA);
-
- /* log LOG_UPDATEMAP for logredo() to update bmap for
- * alloc of new (and free of old) external EA extent;
- */
- lrd->type = cpu_to_le16(LOG_UPDATEMAP);
- pxdlock = (struct pxd_lock *) & tlck->lock;
- nlock = pxdlock->index;
- for (i = 0; i < nlock; i++, pxdlock++) {
- if (pxdlock->flag & mlckALLOCPXD)
- lrd->log.updatemap.type =
- cpu_to_le16(LOG_ALLOCPXD);
- else
- lrd->log.updatemap.type =
- cpu_to_le16(LOG_FREEPXD);
- lrd->log.updatemap.nxd = cpu_to_le16(1);
- lrd->log.updatemap.pxd = pxdlock->pxd;
- lrd->backchain =
- cpu_to_le32(lmLog(log, tblk, lrd, NULL));
- }
-
- /* update bmap */
- tlck->flag |= tlckUPDATEMAP;
- }
-#endif /* _JFS_WIP */
-
return;
}
diff --git a/fs/jfs/jfs_xtree.c b/fs/jfs/jfs_xtree.c
index 3148e9b35f3b..2d304cee884c 100644
--- a/fs/jfs/jfs_xtree.c
+++ b/fs/jfs/jfs_xtree.c
@@ -114,17 +114,6 @@ static int xtSplitPage(tid_t tid, struct inode *ip, struct xtsplit * split,
static int xtSplitRoot(tid_t tid, struct inode *ip,
struct xtsplit * split, struct metapage ** rmpp);
-#ifdef _STILL_TO_PORT
-static int xtDeleteUp(tid_t tid, struct inode *ip, struct metapage * fmp,
- xtpage_t * fp, struct btstack * btstack);
-
-static int xtSearchNode(struct inode *ip,
- xad_t * xad,
- int *cmpp, struct btstack * btstack, int flag);
-
-static int xtRelink(tid_t tid, struct inode *ip, xtpage_t * fp);
-#endif /* _STILL_TO_PORT */
-
/*
* xtLookup()
*
@@ -1493,189 +1482,6 @@ int xtExtend(tid_t tid, /* transaction id */
return rc;
}
-#ifdef _NOTYET
-/*
- * xtTailgate()
- *
- * function: split existing 'tail' extent
- * (split offset >= start offset of tail extent), and
- * relocate and extend the split tail half;
- *
- * note: existing extent may or may not have been committed.
- * caller is responsible for pager buffer cache update, and
- * working block allocation map update;
- * update pmap: free old split tail extent, alloc new extent;
- */
-int xtTailgate(tid_t tid, /* transaction id */
- struct inode *ip, s64 xoff, /* split/new extent offset */
- s32 xlen, /* new extent length */
- s64 xaddr, /* new extent address */
- int flag)
-{
- int rc = 0;
- int cmp;
- struct metapage *mp; /* meta-page buffer */
- xtpage_t *p; /* base B+-tree index page */
- s64 bn;
- int index, nextindex, llen, rlen;
- struct btstack btstack; /* traverse stack */
- struct xtsplit split; /* split information */
- xad_t *xad;
- struct tlock *tlck;
- struct xtlock *xtlck = 0;
- struct tlock *mtlck;
- struct maplock *pxdlock;
-
-/*
-printf("xtTailgate: nxoff:0x%lx nxlen:0x%x nxaddr:0x%lx\n",
- (ulong)xoff, xlen, (ulong)xaddr);
-*/
-
- /* there must exist extent to be tailgated */
- if ((rc = xtSearch(ip, xoff, NULL, &cmp, &btstack, XT_INSERT)))
- return rc;
-
- /* retrieve search result */
- XT_GETSEARCH(ip, btstack.top, bn, mp, p, index);
-
- if (cmp != 0) {
- XT_PUTPAGE(mp);
- jfs_error(ip->i_sb, "couldn't find extent\n");
- return -EIO;
- }
-
- /* entry found must be last entry */
- nextindex = le16_to_cpu(p->header.nextindex);
- if (index != nextindex - 1) {
- XT_PUTPAGE(mp);
- jfs_error(ip->i_sb, "the entry found is not the last entry\n");
- return -EIO;
- }
-
- BT_MARK_DIRTY(mp, ip);
- /*
- * acquire tlock of the leaf page containing original entry
- */
- if (!test_cflag(COMMIT_Nolink, ip)) {
- tlck = txLock(tid, ip, mp, tlckXTREE | tlckGROW);
- xtlck = (struct xtlock *) & tlck->lock;
- }
-
- /* completely replace extent ? */
- xad = &p->xad[index];
-/*
-printf("xtTailgate: xoff:0x%lx xlen:0x%x xaddr:0x%lx\n",
- (ulong)offsetXAD(xad), lengthXAD(xad), (ulong)addressXAD(xad));
-*/
- if ((llen = xoff - offsetXAD(xad)) == 0)
- goto updateOld;
-
- /*
- * partially replace extent: insert entry for new extent
- */
-//insertNew:
- /*
- * if the leaf page is full, insert the new entry and
- * propagate up the router entry for the new page from split
- *
- * The xtSplitUp() will insert the entry and unpin the leaf page.
- */
- if (nextindex == le16_to_cpu(p->header.maxentry)) {
- /* xtSpliUp() unpins leaf pages */
- split.mp = mp;
- split.index = index + 1;
- split.flag = XAD_NEW;
- split.off = xoff; /* split offset */
- split.len = xlen;
- split.addr = xaddr;
- split.pxdlist = NULL;
- if ((rc = xtSplitUp(tid, ip, &split, &btstack)))
- return rc;
-
- /* get back old page */
- XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
- if (rc)
- return rc;
- /*
- * if leaf root has been split, original root has been
- * copied to new child page, i.e., original entry now
- * resides on the new child page;
- */
- if (p->header.flag & BT_INTERNAL) {
- ASSERT(p->header.nextindex ==
- cpu_to_le16(XTENTRYSTART + 1));
- xad = &p->xad[XTENTRYSTART];
- bn = addressXAD(xad);
- XT_PUTPAGE(mp);
-
- /* get new child page */
- XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
- if (rc)
- return rc;
-
- BT_MARK_DIRTY(mp, ip);
- if (!test_cflag(COMMIT_Nolink, ip)) {
- tlck = txLock(tid, ip, mp, tlckXTREE|tlckGROW);
- xtlck = (struct xtlock *) & tlck->lock;
- }
- }
- }
- /*
- * insert the new entry into the leaf page
- */
- else {
- /* insert the new entry: mark the entry NEW */
- xad = &p->xad[index + 1];
- XT_PUTENTRY(xad, XAD_NEW, xoff, xlen, xaddr);
-
- /* advance next available entry index */
- le16_add_cpu(&p->header.nextindex, 1);
- }
-
- /* get back old XAD */
- xad = &p->xad[index];
-
- /*
- * truncate/relocate old extent at split offset
- */
- updateOld:
- /* update dmap for old/committed/truncated extent */
- rlen = lengthXAD(xad) - llen;
- if (!(xad->flag & XAD_NEW)) {
- /* free from PWMAP at commit */
- if (!test_cflag(COMMIT_Nolink, ip)) {
- mtlck = txMaplock(tid, ip, tlckMAP);
- pxdlock = (struct maplock *) & mtlck->lock;
- pxdlock->flag = mlckFREEPXD;
- PXDaddress(&pxdlock->pxd, addressXAD(xad) + llen);
- PXDlength(&pxdlock->pxd, rlen);
- pxdlock->index = 1;
- }
- } else
- /* free from WMAP */
- dbFree(ip, addressXAD(xad) + llen, (s64) rlen);
-
- if (llen)
- /* truncate */
- XADlength(xad, llen);
- else
- /* replace */
- XT_PUTENTRY(xad, XAD_NEW, xoff, xlen, xaddr);
-
- if (!test_cflag(COMMIT_Nolink, ip)) {
- xtlck->lwm.offset = (xtlck->lwm.offset) ?
- min(index, (int)xtlck->lwm.offset) : index;
- xtlck->lwm.length = le16_to_cpu(p->header.nextindex) -
- xtlck->lwm.offset;
- }
-
- /* unpin the leaf page */
- XT_PUTPAGE(mp);
-
- return rc;
-}
-#endif /* _NOTYET */
-
/*
* xtUpdate()
*
@@ -1753,32 +1559,12 @@ int xtUpdate(tid_t tid, struct inode *ip, xad_t * nxad)
newindex = index + 1;
nextindex = le16_to_cpu(p->header.nextindex);
-#ifdef _JFS_WIP_NOCOALESCE
- if (xoff < nxoff)
- goto updateRight;
-
- /*
- * replace XAD with nXAD
- */
- replace: /* (nxoff == xoff) */
- if (nxlen == xlen) {
- /* replace XAD with nXAD:recorded */
- *xad = *nxad;
- xad->flag = xflag & ~XAD_NOTRECORDED;
-
- goto out;
- } else /* (nxlen < xlen) */
- goto updateLeft;
-#endif /* _JFS_WIP_NOCOALESCE */
-
-/* #ifdef _JFS_WIP_COALESCE */
if (xoff < nxoff)
goto coalesceRight;
/*
* coalesce with left XAD
*/
-//coalesceLeft: /* (xoff == nxoff) */
/* is XAD first entry of page ? */
if (index == XTENTRYSTART)
goto replace;
@@ -1897,7 +1683,6 @@ int xtUpdate(tid_t tid, struct inode *ip, xad_t * nxad)
jfs_error(ip->i_sb, "xoff >= nxoff\n");
return -EIO;
}
-/* #endif _JFS_WIP_COALESCE */
/*
* split XAD into (lXAD, nXAD):
@@ -2305,752 +2090,6 @@ int xtAppend(tid_t tid, /* transaction id */
return rc;
}
-#ifdef _STILL_TO_PORT
-
-/* - TBD for defragmentaion/reorganization -
- *
- * xtDelete()
- *
- * function:
- * delete the entry with the specified key.
- *
- * N.B.: whole extent of the entry is assumed to be deleted.
- *
- * parameter:
- *
- * return:
- * ENOENT: if the entry is not found.
- *
- * exception:
- */
-int xtDelete(tid_t tid, struct inode *ip, s64 xoff, s32 xlen, int flag)
-{
- int rc = 0;
- struct btstack btstack;
- int cmp;
- s64 bn;
- struct metapage *mp;
- xtpage_t *p;
- int index, nextindex;
- struct tlock *tlck;
- struct xtlock *xtlck;
-
- /*
- * find the matching entry; xtSearch() pins the page
- */
- if ((rc = xtSearch(ip, xoff, NULL, &cmp, &btstack, 0)))
- return rc;
-
- XT_GETSEARCH(ip, btstack.top, bn, mp, p, index);
- if (cmp) {
- /* unpin the leaf page */
- XT_PUTPAGE(mp);
- return -ENOENT;
- }
-
- /*
- * delete the entry from the leaf page
- */
- nextindex = le16_to_cpu(p->header.nextindex);
- le16_add_cpu(&p->header.nextindex, -1);
-
- /*
- * if the leaf page bocome empty, free the page
- */
- if (p->header.nextindex == cpu_to_le16(XTENTRYSTART))
- return (xtDeleteUp(tid, ip, mp, p, &btstack));
-
- BT_MARK_DIRTY(mp, ip);
- /*
- * acquire a transaction lock on the leaf page;
- *
- * action:xad deletion;
- */
- tlck = txLock(tid, ip, mp, tlckXTREE);
- xtlck = (struct xtlock *) & tlck->lock;
- xtlck->lwm.offset =
- (xtlck->lwm.offset) ? min(index, xtlck->lwm.offset) : index;
-
- /* if delete from middle, shift left/compact the remaining entries */
- if (index < nextindex - 1)
- memmove(&p->xad[index], &p->xad[index + 1],
- (nextindex - index - 1) * sizeof(xad_t));
-
- XT_PUTPAGE(mp);
-
- return 0;
-}
-
-
-/* - TBD for defragmentaion/reorganization -
- *
- * xtDeleteUp()
- *
- * function:
- * free empty pages as propagating deletion up the tree
- *
- * parameter:
- *
- * return:
- */
-static int
-xtDeleteUp(tid_t tid, struct inode *ip,
- struct metapage * fmp, xtpage_t * fp, struct btstack * btstack)
-{
- int rc = 0;
- struct metapage *mp;
- xtpage_t *p;
- int index, nextindex;
- s64 xaddr;
- int xlen;
- struct btframe *parent;
- struct tlock *tlck;
- struct xtlock *xtlck;
-
- /*
- * keep root leaf page which has become empty
- */
- if (fp->header.flag & BT_ROOT) {
- /* keep the root page */
- fp->header.flag &= ~BT_INTERNAL;
- fp->header.flag |= BT_LEAF;
- fp->header.nextindex = cpu_to_le16(XTENTRYSTART);
-
- /* XT_PUTPAGE(fmp); */
-
- return 0;
- }
-
- /*
- * free non-root leaf page
- */
- if ((rc = xtRelink(tid, ip, fp))) {
- XT_PUTPAGE(fmp);
- return rc;
- }
-
- xaddr = addressPXD(&fp->header.self);
- xlen = lengthPXD(&fp->header.self);
- /* free the page extent */
- dbFree(ip, xaddr, (s64) xlen);
-
- /* free the buffer page */
- discard_metapage(fmp);
-
- /*
- * propagate page deletion up the index tree
- *
- * If the delete from the parent page makes it empty,
- * continue all the way up the tree.
- * stop if the root page is reached (which is never deleted) or
- * if the entry deletion does not empty the page.
- */
- while ((parent = BT_POP(btstack)) != NULL) {
- /* get/pin the parent page <sp> */
- XT_GETPAGE(ip, parent->bn, mp, PSIZE, p, rc);
- if (rc)
- return rc;
-
- index = parent->index;
-
- /* delete the entry for the freed child page from parent.
- */
- nextindex = le16_to_cpu(p->header.nextindex);
-
- /*
- * the parent has the single entry being deleted:
- * free the parent page which has become empty.
- */
- if (nextindex == 1) {
- if (p->header.flag & BT_ROOT) {
- /* keep the root page */
- p->header.flag &= ~BT_INTERNAL;
- p->header.flag |= BT_LEAF;
- p->header.nextindex =
- cpu_to_le16(XTENTRYSTART);
-
- /* XT_PUTPAGE(mp); */
-
- break;
- } else {
- /* free the parent page */
- if ((rc = xtRelink(tid, ip, p)))
- return rc;
-
- xaddr = addressPXD(&p->header.self);
- /* free the page extent */
- dbFree(ip, xaddr,
- (s64) JFS_SBI(ip->i_sb)->nbperpage);
-
- /* unpin/free the buffer page */
- discard_metapage(mp);
-
- /* propagate up */
- continue;
- }
- }
- /*
- * the parent has other entries remaining:
- * delete the router entry from the parent page.
- */
- else {
- BT_MARK_DIRTY(mp, ip);
- /*
- * acquire a transaction lock on the leaf page;
- *
- * action:xad deletion;
- */
- tlck = txLock(tid, ip, mp, tlckXTREE);
- xtlck = (struct xtlock *) & tlck->lock;
- xtlck->lwm.offset =
- (xtlck->lwm.offset) ? min(index,
- xtlck->lwm.
- offset) : index;
-
- /* if delete from middle,
- * shift left/compact the remaining entries in the page
- */
- if (index < nextindex - 1)
- memmove(&p->xad[index], &p->xad[index + 1],
- (nextindex - index -
- 1) << L2XTSLOTSIZE);
-
- le16_add_cpu(&p->header.nextindex, -1);
- jfs_info("xtDeleteUp(entry): 0x%lx[%d]",
- (ulong) parent->bn, index);
- }
-
- /* unpin the parent page */
- XT_PUTPAGE(mp);
-
- /* exit propagation up */
- break;
- }
-
- return 0;
-}
-
-
-/*
- * NAME: xtRelocate()
- *
- * FUNCTION: relocate xtpage or data extent of regular file;
- * This function is mainly used by defragfs utility.
- *
- * NOTE: This routine does not have the logic to handle
- * uncommitted allocated extent. The caller should call
- * txCommit() to commit all the allocation before call
- * this routine.
- */
-int
-xtRelocate(tid_t tid, struct inode * ip, xad_t * oxad, /* old XAD */
- s64 nxaddr, /* new xaddr */
- int xtype)
-{ /* extent type: XTPAGE or DATAEXT */
- int rc = 0;
- struct tblock *tblk;
- struct tlock *tlck;
- struct xtlock *xtlck;
- struct metapage *mp, *pmp, *lmp, *rmp; /* meta-page buffer */
- xtpage_t *p, *pp, *rp, *lp; /* base B+-tree index page */
- xad_t *xad;
- pxd_t *pxd;
- s64 xoff, xsize;
- int xlen;
- s64 oxaddr, sxaddr, dxaddr, nextbn, prevbn;
- cbuf_t *cp;
- s64 offset, nbytes, nbrd, pno;
- int nb, npages, nblks;
- s64 bn;
- int cmp;
- int index;
- struct pxd_lock *pxdlock;
- struct btstack btstack; /* traverse stack */
-
- xtype = xtype & EXTENT_TYPE;
-
- xoff = offsetXAD(oxad);
- oxaddr = addressXAD(oxad);
- xlen = lengthXAD(oxad);
-
- /* validate extent offset */
- offset = xoff << JFS_SBI(ip->i_sb)->l2bsize;
- if (offset >= ip->i_size)
- return -ESTALE; /* stale extent */
-
- jfs_info("xtRelocate: xtype:%d xoff:0x%lx xlen:0x%x xaddr:0x%lx:0x%lx",
- xtype, (ulong) xoff, xlen, (ulong) oxaddr, (ulong) nxaddr);
-
- /*
- * 1. get and validate the parent xtpage/xad entry
- * covering the source extent to be relocated;
- */
- if (xtype == DATAEXT) {
- /* search in leaf entry */
- rc = xtSearch(ip, xoff, NULL, &cmp, &btstack, 0);
- if (rc)
- return rc;
-
- /* retrieve search result */
- XT_GETSEARCH(ip, btstack.top, bn, pmp, pp, index);
-
- if (cmp) {
- XT_PUTPAGE(pmp);
- return -ESTALE;
- }
-
- /* validate for exact match with a single entry */
- xad = &pp->xad[index];
- if (addressXAD(xad) != oxaddr || lengthXAD(xad) != xlen) {
- XT_PUTPAGE(pmp);
- return -ESTALE;
- }
- } else { /* (xtype == XTPAGE) */
-
- /* search in internal entry */
- rc = xtSearchNode(ip, oxad, &cmp, &btstack, 0);
- if (rc)
- return rc;
-
- /* retrieve search result */
- XT_GETSEARCH(ip, btstack.top, bn, pmp, pp, index);
-
- if (cmp) {
- XT_PUTPAGE(pmp);
- return -ESTALE;
- }
-
- /* xtSearchNode() validated for exact match with a single entry
- */
- xad = &pp->xad[index];
- }
- jfs_info("xtRelocate: parent xad entry validated.");
-
- /*
- * 2. relocate the extent
- */
- if (xtype == DATAEXT) {
- /* if the extent is allocated-but-not-recorded
- * there is no real data to be moved in this extent,
- */
- if (xad->flag & XAD_NOTRECORDED)
- goto out;
- else
- /* release xtpage for cmRead()/xtLookup() */
- XT_PUTPAGE(pmp);
-
- /*
- * cmRelocate()
- *
- * copy target data pages to be relocated;
- *
- * data extent must start at page boundary and
- * multiple of page size (except the last data extent);
- * read in each page of the source data extent into cbuf,
- * update the cbuf extent descriptor of the page to be
- * homeward bound to new dst data extent
- * copy the data from the old extent to new extent.
- * copy is essential for compressed files to avoid problems
- * that can arise if there was a change in compression
- * algorithms.
- * it is a good strategy because it may disrupt cache
- * policy to keep the pages in memory afterwards.
- */
- offset = xoff << JFS_SBI(ip->i_sb)->l2bsize;
- assert((offset & CM_OFFSET) == 0);
- nbytes = xlen << JFS_SBI(ip->i_sb)->l2bsize;
- pno = offset >> CM_L2BSIZE;
- npages = (nbytes + (CM_BSIZE - 1)) >> CM_L2BSIZE;
-/*
- npages = ((offset + nbytes - 1) >> CM_L2BSIZE) -
- (offset >> CM_L2BSIZE) + 1;
-*/
- sxaddr = oxaddr;
- dxaddr = nxaddr;
-
- /* process the request one cache buffer at a time */
- for (nbrd = 0; nbrd < nbytes; nbrd += nb,
- offset += nb, pno++, npages--) {
- /* compute page size */
- nb = min(nbytes - nbrd, CM_BSIZE);
-
- /* get the cache buffer of the page */
- if (rc = cmRead(ip, offset, npages, &cp))
- break;
-
- assert(addressPXD(&cp->cm_pxd) == sxaddr);
- assert(!cp->cm_modified);
-
- /* bind buffer with the new extent address */
- nblks = nb >> JFS_IP(ip->i_sb)->l2bsize;
- cmSetXD(ip, cp, pno, dxaddr, nblks);
-
- /* release the cbuf, mark it as modified */
- cmPut(cp, true);
-
- dxaddr += nblks;
- sxaddr += nblks;
- }
-
- /* get back parent page */
- if ((rc = xtSearch(ip, xoff, NULL, &cmp, &btstack, 0)))
- return rc;
-
- XT_GETSEARCH(ip, btstack.top, bn, pmp, pp, index);
- jfs_info("xtRelocate: target data extent relocated.");
- } else { /* (xtype == XTPAGE) */
-
- /*
- * read in the target xtpage from the source extent;
- */
- XT_GETPAGE(ip, oxaddr, mp, PSIZE, p, rc);
- if (rc) {
- XT_PUTPAGE(pmp);
- return rc;
- }
-
- /*
- * read in sibling pages if any to update sibling pointers;
- */
- rmp = NULL;
- if (p->header.next) {
- nextbn = le64_to_cpu(p->header.next);
- XT_GETPAGE(ip, nextbn, rmp, PSIZE, rp, rc);
- if (rc) {
- XT_PUTPAGE(pmp);
- XT_PUTPAGE(mp);
- return (rc);
- }
- }
-
- lmp = NULL;
- if (p->header.prev) {
- prevbn = le64_to_cpu(p->header.prev);
- XT_GETPAGE(ip, prevbn, lmp, PSIZE, lp, rc);
- if (rc) {
- XT_PUTPAGE(pmp);
- XT_PUTPAGE(mp);
- if (rmp)
- XT_PUTPAGE(rmp);
- return (rc);
- }
- }
-
- /* at this point, all xtpages to be updated are in memory */
-
- /*
- * update sibling pointers of sibling xtpages if any;
- */
- if (lmp) {
- BT_MARK_DIRTY(lmp, ip);
- tlck = txLock(tid, ip, lmp, tlckXTREE | tlckRELINK);
- lp->header.next = cpu_to_le64(nxaddr);
- XT_PUTPAGE(lmp);
- }
-
- if (rmp) {
- BT_MARK_DIRTY(rmp, ip);
- tlck = txLock(tid, ip, rmp, tlckXTREE | tlckRELINK);
- rp->header.prev = cpu_to_le64(nxaddr);
- XT_PUTPAGE(rmp);
- }
-
- /*
- * update the target xtpage to be relocated
- *
- * update the self address of the target page
- * and write to destination extent;
- * redo image covers the whole xtpage since it is new page
- * to the destination extent;
- * update of bmap for the free of source extent
- * of the target xtpage itself:
- * update of bmap for the allocation of destination extent
- * of the target xtpage itself:
- * update of bmap for the extents covered by xad entries in
- * the target xtpage is not necessary since they are not
- * updated;
- * if not committed before this relocation,
- * target page may contain XAD_NEW entries which must
- * be scanned for bmap update (logredo() always
- * scan xtpage REDOPAGE image for bmap update);
- * if committed before this relocation (tlckRELOCATE),
- * scan may be skipped by commit() and logredo();
- */
- BT_MARK_DIRTY(mp, ip);
- /* tlckNEW init xtlck->lwm.offset = XTENTRYSTART; */
- tlck = txLock(tid, ip, mp, tlckXTREE | tlckNEW);
- xtlck = (struct xtlock *) & tlck->lock;
-
- /* update the self address in the xtpage header */
- pxd = &p->header.self;
- PXDaddress(pxd, nxaddr);
-
- /* linelock for the after image of the whole page */
- xtlck->lwm.length =
- le16_to_cpu(p->header.nextindex) - xtlck->lwm.offset;
-
- /* update the buffer extent descriptor of target xtpage */
- xsize = xlen << JFS_SBI(ip->i_sb)->l2bsize;
- bmSetXD(mp, nxaddr, xsize);
-
- /* unpin the target page to new homeward bound */
- XT_PUTPAGE(mp);
- jfs_info("xtRelocate: target xtpage relocated.");
- }
-
- /*
- * 3. acquire maplock for the source extent to be freed;
- *
- * acquire a maplock saving the src relocated extent address;
- * to free of the extent at commit time;
- */
- out:
- /* if DATAEXT relocation, write a LOG_UPDATEMAP record for
- * free PXD of the source data extent (logredo() will update
- * bmap for free of source data extent), and update bmap for
- * free of the source data extent;
- */
- if (xtype == DATAEXT)
- tlck = txMaplock(tid, ip, tlckMAP);
- /* if XTPAGE relocation, write a LOG_NOREDOPAGE record
- * for the source xtpage (logredo() will init NoRedoPage
- * filter and will also update bmap for free of the source
- * xtpage), and update bmap for free of the source xtpage;
- * N.B. We use tlckMAP instead of tlkcXTREE because there
- * is no buffer associated with this lock since the buffer
- * has been redirected to the target location.
- */
- else /* (xtype == XTPAGE) */
- tlck = txMaplock(tid, ip, tlckMAP | tlckRELOCATE);
-
- pxdlock = (struct pxd_lock *) & tlck->lock;
- pxdlock->flag = mlckFREEPXD;
- PXDaddress(&pxdlock->pxd, oxaddr);
- PXDlength(&pxdlock->pxd, xlen);
- pxdlock->index = 1;
-
- /*
- * 4. update the parent xad entry for relocation;
- *
- * acquire tlck for the parent entry with XAD_NEW as entry
- * update which will write LOG_REDOPAGE and update bmap for
- * allocation of XAD_NEW destination extent;
- */
- jfs_info("xtRelocate: update parent xad entry.");
- BT_MARK_DIRTY(pmp, ip);
- tlck = txLock(tid, ip, pmp, tlckXTREE | tlckGROW);
- xtlck = (struct xtlock *) & tlck->lock;
-
- /* update the XAD with the new destination extent; */
- xad = &pp->xad[index];
- xad->flag |= XAD_NEW;
- XADaddress(xad, nxaddr);
-
- xtlck->lwm.offset = min(index, xtlck->lwm.offset);
- xtlck->lwm.length = le16_to_cpu(pp->header.nextindex) -
- xtlck->lwm.offset;
-
- /* unpin the parent xtpage */
- XT_PUTPAGE(pmp);
-
- return rc;
-}
-
-
-/*
- * xtSearchNode()
- *
- * function: search for the internal xad entry covering specified extent.
- * This function is mainly used by defragfs utility.
- *
- * parameters:
- * ip - file object;
- * xad - extent to find;
- * cmpp - comparison result:
- * btstack - traverse stack;
- * flag - search process flag;
- *
- * returns:
- * btstack contains (bn, index) of search path traversed to the entry.
- * *cmpp is set to result of comparison with the entry returned.
- * the page containing the entry is pinned at exit.
- */
-static int xtSearchNode(struct inode *ip, xad_t * xad, /* required XAD entry */
- int *cmpp, struct btstack * btstack, int flag)
-{
- int rc = 0;
- s64 xoff, xaddr;
- int xlen;
- int cmp = 1; /* init for empty page */
- s64 bn; /* block number */
- struct metapage *mp; /* meta-page buffer */
- xtpage_t *p; /* page */
- int base, index, lim;
- struct btframe *btsp;
- s64 t64;
-
- BT_CLR(btstack);
-
- xoff = offsetXAD(xad);
- xlen = lengthXAD(xad);
- xaddr = addressXAD(xad);
-
- /*
- * search down tree from root:
- *
- * between two consecutive entries of <Ki, Pi> and <Kj, Pj> of
- * internal page, child page Pi contains entry with k, Ki <= K < Kj.
- *
- * if entry with search key K is not found
- * internal page search find the entry with largest key Ki
- * less than K which point to the child page to search;
- * leaf page search find the entry with smallest key Kj
- * greater than K so that the returned index is the position of
- * the entry to be shifted right for insertion of new entry.
- * for empty tree, search key is greater than any key of the tree.
- *
- * by convention, root bn = 0.
- */
- for (bn = 0;;) {
- /* get/pin the page to search */
- XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
- if (rc)
- return rc;
- if (p->header.flag & BT_LEAF) {
- XT_PUTPAGE(mp);
- return -ESTALE;
- }
-
- lim = le16_to_cpu(p->header.nextindex) - XTENTRYSTART;
-
- /*
- * binary search with search key K on the current page
- */
- for (base = XTENTRYSTART; lim; lim >>= 1) {
- index = base + (lim >> 1);
-
- XT_CMP(cmp, xoff, &p->xad[index], t64);
- if (cmp == 0) {
- /*
- * search hit
- *
- * verify for exact match;
- */
- if (xaddr == addressXAD(&p->xad[index]) &&
- xoff == offsetXAD(&p->xad[index])) {
- *cmpp = cmp;
-
- /* save search result */
- btsp = btstack->top;
- btsp->bn = bn;
- btsp->index = index;
- btsp->mp = mp;
-
- return 0;
- }
-
- /* descend/search its child page */
- goto next;
- }
-
- if (cmp > 0) {
- base = index + 1;
- --lim;
- }
- }
-
- /*
- * search miss - non-leaf page:
- *
- * base is the smallest index with key (Kj) greater than
- * search key (K) and may be zero or maxentry index.
- * if base is non-zero, decrement base by one to get the parent
- * entry of the child page to search.
- */
- index = base ? base - 1 : base;
-
- /*
- * go down to child page
- */
- next:
- /* get the child page block number */
- bn = addressXAD(&p->xad[index]);
-
- /* unpin the parent page */
- XT_PUTPAGE(mp);
- }
-}
-
-
-/*
- * xtRelink()
- *
- * function:
- * link around a freed page.
- *
- * Parameter:
- * int tid,
- * struct inode *ip,
- * xtpage_t *p)
- *
- * returns:
- */
-static int xtRelink(tid_t tid, struct inode *ip, xtpage_t * p)
-{
- int rc = 0;
- struct metapage *mp;
- s64 nextbn, prevbn;
- struct tlock *tlck;
-
- nextbn = le64_to_cpu(p->header.next);
- prevbn = le64_to_cpu(p->header.prev);
-
- /* update prev pointer of the next page */
- if (nextbn != 0) {
- XT_GETPAGE(ip, nextbn, mp, PSIZE, p, rc);
- if (rc)
- return rc;
-
- /*
- * acquire a transaction lock on the page;
- *
- * action: update prev pointer;
- */
- BT_MARK_DIRTY(mp, ip);
- tlck = txLock(tid, ip, mp, tlckXTREE | tlckRELINK);
-
- /* the page may already have been tlock'd */
-
- p->header.prev = cpu_to_le64(prevbn);
-
- XT_PUTPAGE(mp);
- }
-
- /* update next pointer of the previous page */
- if (prevbn != 0) {
- XT_GETPAGE(ip, prevbn, mp, PSIZE, p, rc);
- if (rc)
- return rc;
-
- /*
- * acquire a transaction lock on the page;
- *
- * action: update next pointer;
- */
- BT_MARK_DIRTY(mp, ip);
- tlck = txLock(tid, ip, mp, tlckXTREE | tlckRELINK);
-
- /* the page may already have been tlock'd */
-
- p->header.next = le64_to_cpu(nextbn);
-
- XT_PUTPAGE(mp);
- }
-
- return 0;
-}
-#endif /* _STILL_TO_PORT */
-
/*
* xtInitRoot()
diff --git a/fs/jfs/jfs_xtree.h b/fs/jfs/jfs_xtree.h
index 5f51be8596b3..142caafc73b1 100644
--- a/fs/jfs/jfs_xtree.h
+++ b/fs/jfs/jfs_xtree.h
@@ -95,10 +95,6 @@ extern int xtInsert(tid_t tid, struct inode *ip,
int xflag, s64 xoff, int xlen, s64 * xaddrp, int flag);
extern int xtExtend(tid_t tid, struct inode *ip, s64 xoff, int xlen,
int flag);
-#ifdef _NOTYET
-extern int xtTailgate(tid_t tid, struct inode *ip,
- s64 xoff, int xlen, s64 xaddr, int flag);
-#endif
extern int xtUpdate(tid_t tid, struct inode *ip, struct xad *nxad);
extern int xtDelete(tid_t tid, struct inode *ip, s64 xoff, int xlen,
int flag);
diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
index e205fde7163a..6eca72cfa1f2 100644
--- a/fs/kernfs/dir.c
+++ b/fs/kernfs/dir.c
@@ -18,7 +18,15 @@
#include "kernfs-internal.h"
static DEFINE_SPINLOCK(kernfs_rename_lock); /* kn->parent and ->name */
-static char kernfs_pr_cont_buf[PATH_MAX]; /* protected by rename_lock */
+/*
+ * Don't use rename_lock to piggy back on pr_cont_buf. We don't want to
+ * call pr_cont() while holding rename_lock. Because sometimes pr_cont()
+ * will perform wakeups when releasing console_sem. Holding rename_lock
+ * will introduce deadlock if the scheduler reads the kernfs_name in the
+ * wakeup path.
+ */
+static DEFINE_SPINLOCK(kernfs_pr_cont_lock);
+static char kernfs_pr_cont_buf[PATH_MAX]; /* protected by pr_cont_lock */
static DEFINE_SPINLOCK(kernfs_idr_lock); /* root->ino_idr */
#define rb_to_kn(X) rb_entry((X), struct kernfs_node, rb)
@@ -229,12 +237,12 @@ void pr_cont_kernfs_name(struct kernfs_node *kn)
{
unsigned long flags;
- spin_lock_irqsave(&kernfs_rename_lock, flags);
+ spin_lock_irqsave(&kernfs_pr_cont_lock, flags);
- kernfs_name_locked(kn, kernfs_pr_cont_buf, sizeof(kernfs_pr_cont_buf));
+ kernfs_name(kn, kernfs_pr_cont_buf, sizeof(kernfs_pr_cont_buf));
pr_cont("%s", kernfs_pr_cont_buf);
- spin_unlock_irqrestore(&kernfs_rename_lock, flags);
+ spin_unlock_irqrestore(&kernfs_pr_cont_lock, flags);
}
/**
@@ -248,10 +256,10 @@ void pr_cont_kernfs_path(struct kernfs_node *kn)
unsigned long flags;
int sz;
- spin_lock_irqsave(&kernfs_rename_lock, flags);
+ spin_lock_irqsave(&kernfs_pr_cont_lock, flags);
- sz = kernfs_path_from_node_locked(kn, NULL, kernfs_pr_cont_buf,
- sizeof(kernfs_pr_cont_buf));
+ sz = kernfs_path_from_node(kn, NULL, kernfs_pr_cont_buf,
+ sizeof(kernfs_pr_cont_buf));
if (sz < 0) {
pr_cont("(error)");
goto out;
@@ -265,7 +273,7 @@ void pr_cont_kernfs_path(struct kernfs_node *kn)
pr_cont("%s", kernfs_pr_cont_buf);
out:
- spin_unlock_irqrestore(&kernfs_rename_lock, flags);
+ spin_unlock_irqrestore(&kernfs_pr_cont_lock, flags);
}
/**
@@ -823,13 +831,12 @@ static struct kernfs_node *kernfs_walk_ns(struct kernfs_node *parent,
lockdep_assert_held_read(&kernfs_root(parent)->kernfs_rwsem);
- /* grab kernfs_rename_lock to piggy back on kernfs_pr_cont_buf */
- spin_lock_irq(&kernfs_rename_lock);
+ spin_lock_irq(&kernfs_pr_cont_lock);
len = strlcpy(kernfs_pr_cont_buf, path, sizeof(kernfs_pr_cont_buf));
if (len >= sizeof(kernfs_pr_cont_buf)) {
- spin_unlock_irq(&kernfs_rename_lock);
+ spin_unlock_irq(&kernfs_pr_cont_lock);
return NULL;
}
@@ -841,7 +848,7 @@ static struct kernfs_node *kernfs_walk_ns(struct kernfs_node *parent,
parent = kernfs_find_ns(parent, name, ns);
}
- spin_unlock_irq(&kernfs_rename_lock);
+ spin_unlock_irq(&kernfs_pr_cont_lock);
return parent;
}
diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
index 88423069407c..e3abfa843879 100644
--- a/fs/kernfs/file.c
+++ b/fs/kernfs/file.c
@@ -33,7 +33,6 @@ static DEFINE_SPINLOCK(kernfs_open_node_lock);
static DEFINE_MUTEX(kernfs_open_file_mutex);
struct kernfs_open_node {
- atomic_t refcnt;
atomic_t event;
wait_queue_head_t poll;
struct list_head files; /* goes through kernfs_open_file.list */
@@ -530,10 +529,8 @@ static int kernfs_get_open_node(struct kernfs_node *kn,
}
on = kn->attr.open;
- if (on) {
- atomic_inc(&on->refcnt);
+ if (on)
list_add_tail(&of->list, &on->files);
- }
spin_unlock_irq(&kernfs_open_node_lock);
mutex_unlock(&kernfs_open_file_mutex);
@@ -548,7 +545,6 @@ static int kernfs_get_open_node(struct kernfs_node *kn,
if (!new_on)
return -ENOMEM;
- atomic_set(&new_on->refcnt, 0);
atomic_set(&new_on->event, 1);
init_waitqueue_head(&new_on->poll);
INIT_LIST_HEAD(&new_on->files);
@@ -556,17 +552,19 @@ static int kernfs_get_open_node(struct kernfs_node *kn,
}
/**
- * kernfs_put_open_node - put kernfs_open_node
- * @kn: target kernfs_nodet
+ * kernfs_unlink_open_file - Unlink @of from @kn.
+ *
+ * @kn: target kernfs_node
* @of: associated kernfs_open_file
*
- * Put @kn->attr.open and unlink @of from the files list. If
- * reference count reaches zero, disassociate and free it.
+ * Unlink @of from list of @kn's associated open files. If list of
+ * associated open files becomes empty, disassociate and free
+ * kernfs_open_node.
*
* LOCKING:
* None.
*/
-static void kernfs_put_open_node(struct kernfs_node *kn,
+static void kernfs_unlink_open_file(struct kernfs_node *kn,
struct kernfs_open_file *of)
{
struct kernfs_open_node *on = kn->attr.open;
@@ -578,7 +576,7 @@ static void kernfs_put_open_node(struct kernfs_node *kn,
if (of)
list_del(&of->list);
- if (atomic_dec_and_test(&on->refcnt))
+ if (list_empty(&on->files))
kn->attr.open = NULL;
else
on = NULL;
@@ -706,7 +704,7 @@ static int kernfs_fop_open(struct inode *inode, struct file *file)
return 0;
err_put_node:
- kernfs_put_open_node(kn, of);
+ kernfs_unlink_open_file(kn, of);
err_seq_release:
seq_release(inode, file);
err_free:
@@ -752,7 +750,7 @@ static int kernfs_fop_release(struct inode *inode, struct file *filp)
mutex_unlock(&kernfs_open_file_mutex);
}
- kernfs_put_open_node(kn, of);
+ kernfs_unlink_open_file(kn, of);
seq_release(inode, filp);
kfree(of->prealloc_buf);
kfree(of);
@@ -768,15 +766,24 @@ void kernfs_drain_open_files(struct kernfs_node *kn)
if (!(kn->flags & (KERNFS_HAS_MMAP | KERNFS_HAS_RELEASE)))
return;
- spin_lock_irq(&kernfs_open_node_lock);
- on = kn->attr.open;
- if (on)
- atomic_inc(&on->refcnt);
- spin_unlock_irq(&kernfs_open_node_lock);
- if (!on)
+ /*
+ * lockless opportunistic check is safe below because no one is adding to
+ * ->attr.open at this point of time. This check allows early bail out
+ * if ->attr.open is already NULL. kernfs_unlink_open_file makes
+ * ->attr.open NULL only while holding kernfs_open_file_mutex so below
+ * check under kernfs_open_file_mutex will ensure bailing out if
+ * ->attr.open became NULL while waiting for the mutex.
+ */
+ if (!kn->attr.open)
return;
mutex_lock(&kernfs_open_file_mutex);
+ if (!kn->attr.open) {
+ mutex_unlock(&kernfs_open_file_mutex);
+ return;
+ }
+
+ on = kn->attr.open;
list_for_each_entry(of, &on->files, list) {
struct inode *inode = file_inode(of->file);
@@ -789,8 +796,6 @@ void kernfs_drain_open_files(struct kernfs_node *kn)
}
mutex_unlock(&kernfs_open_file_mutex);
-
- kernfs_put_open_node(kn, NULL);
}
/*
diff --git a/fs/ksmbd/connection.c b/fs/ksmbd/connection.c
index 208d2cff7bd3..e8f476c5f189 100644
--- a/fs/ksmbd/connection.c
+++ b/fs/ksmbd/connection.c
@@ -62,7 +62,7 @@ struct ksmbd_conn *ksmbd_conn_alloc(void)
atomic_set(&conn->req_running, 0);
atomic_set(&conn->r_count, 0);
conn->total_credits = 1;
- conn->outstanding_credits = 1;
+ conn->outstanding_credits = 0;
init_waitqueue_head(&conn->req_running_q);
INIT_LIST_HEAD(&conn->conns_list);
@@ -205,31 +205,31 @@ int ksmbd_conn_write(struct ksmbd_work *work)
return 0;
}
-int ksmbd_conn_rdma_read(struct ksmbd_conn *conn, void *buf,
- unsigned int buflen, u32 remote_key, u64 remote_offset,
- u32 remote_len)
+int ksmbd_conn_rdma_read(struct ksmbd_conn *conn,
+ void *buf, unsigned int buflen,
+ struct smb2_buffer_desc_v1 *desc,
+ unsigned int desc_len)
{
int ret = -EINVAL;
if (conn->transport->ops->rdma_read)
ret = conn->transport->ops->rdma_read(conn->transport,
buf, buflen,
- remote_key, remote_offset,
- remote_len);
+ desc, desc_len);
return ret;
}
-int ksmbd_conn_rdma_write(struct ksmbd_conn *conn, void *buf,
- unsigned int buflen, u32 remote_key,
- u64 remote_offset, u32 remote_len)
+int ksmbd_conn_rdma_write(struct ksmbd_conn *conn,
+ void *buf, unsigned int buflen,
+ struct smb2_buffer_desc_v1 *desc,
+ unsigned int desc_len)
{
int ret = -EINVAL;
if (conn->transport->ops->rdma_write)
ret = conn->transport->ops->rdma_write(conn->transport,
buf, buflen,
- remote_key, remote_offset,
- remote_len);
+ desc, desc_len);
return ret;
}
diff --git a/fs/ksmbd/connection.h b/fs/ksmbd/connection.h
index 7a59aacb5daa..98c1cbe45ec9 100644
--- a/fs/ksmbd/connection.h
+++ b/fs/ksmbd/connection.h
@@ -122,11 +122,14 @@ struct ksmbd_transport_ops {
int (*writev)(struct ksmbd_transport *t, struct kvec *iovs, int niov,
int size, bool need_invalidate_rkey,
unsigned int remote_key);
- int (*rdma_read)(struct ksmbd_transport *t, void *buf, unsigned int len,
- u32 remote_key, u64 remote_offset, u32 remote_len);
- int (*rdma_write)(struct ksmbd_transport *t, void *buf,
- unsigned int len, u32 remote_key, u64 remote_offset,
- u32 remote_len);
+ int (*rdma_read)(struct ksmbd_transport *t,
+ void *buf, unsigned int len,
+ struct smb2_buffer_desc_v1 *desc,
+ unsigned int desc_len);
+ int (*rdma_write)(struct ksmbd_transport *t,
+ void *buf, unsigned int len,
+ struct smb2_buffer_desc_v1 *desc,
+ unsigned int desc_len);
};
struct ksmbd_transport {
@@ -148,12 +151,14 @@ struct ksmbd_conn *ksmbd_conn_alloc(void);
void ksmbd_conn_free(struct ksmbd_conn *conn);
bool ksmbd_conn_lookup_dialect(struct ksmbd_conn *c);
int ksmbd_conn_write(struct ksmbd_work *work);
-int ksmbd_conn_rdma_read(struct ksmbd_conn *conn, void *buf,
- unsigned int buflen, u32 remote_key, u64 remote_offset,
- u32 remote_len);
-int ksmbd_conn_rdma_write(struct ksmbd_conn *conn, void *buf,
- unsigned int buflen, u32 remote_key, u64 remote_offset,
- u32 remote_len);
+int ksmbd_conn_rdma_read(struct ksmbd_conn *conn,
+ void *buf, unsigned int buflen,
+ struct smb2_buffer_desc_v1 *desc,
+ unsigned int desc_len);
+int ksmbd_conn_rdma_write(struct ksmbd_conn *conn,
+ void *buf, unsigned int buflen,
+ struct smb2_buffer_desc_v1 *desc,
+ unsigned int desc_len);
void ksmbd_conn_enqueue_request(struct ksmbd_work *work);
int ksmbd_conn_try_dequeue_request(struct ksmbd_work *work);
void ksmbd_conn_init_server_callbacks(struct ksmbd_conn_ops *ops);
diff --git a/fs/ksmbd/ksmbd_netlink.h b/fs/ksmbd/ksmbd_netlink.h
index ebe6ca08467a..52aa0adeb951 100644
--- a/fs/ksmbd/ksmbd_netlink.h
+++ b/fs/ksmbd/ksmbd_netlink.h
@@ -104,7 +104,8 @@ struct ksmbd_startup_request {
*/
__u32 sub_auth[3]; /* Subauth value for Security ID */
__u32 smb2_max_credits; /* MAX credits */
- __u32 reserved[128]; /* Reserved room */
+ __u32 smbd_max_io_size; /* smbd read write size */
+ __u32 reserved[127]; /* Reserved room */
__u32 ifc_list_sz; /* interfaces list size */
__s8 ____payload[];
};
diff --git a/fs/ksmbd/misc.c b/fs/ksmbd/misc.c
index 1e2076a53bed..df991107ad2c 100644
--- a/fs/ksmbd/misc.c
+++ b/fs/ksmbd/misc.c
@@ -20,7 +20,7 @@
* wildcard '*' and '?'
* TODO : implement consideration about DOS_DOT, DOS_QM and DOS_STAR
*
- * @string: string to compare with a pattern
+ * @str: string to compare with a pattern
* @len: string length
* @pattern: pattern string which might include wildcard '*' and '?'
*
@@ -152,8 +152,8 @@ out:
/**
* convert_to_nt_pathname() - extract and return windows path string
* whose share directory prefix was removed from file path
- * @filename : unix filename
- * @sharepath: share path string
+ * @share: ksmbd_share_config pointer
+ * @path: path to report
*
* Return : windows path string or error
*/
@@ -250,8 +250,8 @@ char *ksmbd_extract_sharename(char *treename)
/**
* convert_to_unix_name() - convert windows name to unix format
- * @path: name to be converted
- * @tid: tree id of mathing share
+ * @share: ksmbd_share_config pointer
+ * @name: file name that is relative to share
*
* Return: converted name on success, otherwise NULL
*/
diff --git a/fs/ksmbd/smb2misc.c b/fs/ksmbd/smb2misc.c
index 4a9460153b59..f8f456377a51 100644
--- a/fs/ksmbd/smb2misc.c
+++ b/fs/ksmbd/smb2misc.c
@@ -338,7 +338,7 @@ static int smb2_validate_credit_charge(struct ksmbd_conn *conn,
ret = 1;
}
- if ((u64)conn->outstanding_credits + credit_charge > conn->vals->max_credits) {
+ if ((u64)conn->outstanding_credits + credit_charge > conn->total_credits) {
ksmbd_debug(SMB, "Limits exceeding the maximum allowable outstanding requests, given : %u, pending : %u\n",
credit_charge, conn->outstanding_credits);
ret = 1;
diff --git a/fs/ksmbd/smb2pdu.c b/fs/ksmbd/smb2pdu.c
index 16c803a9d996..e6f4ccc12f49 100644
--- a/fs/ksmbd/smb2pdu.c
+++ b/fs/ksmbd/smb2pdu.c
@@ -3938,6 +3938,12 @@ int smb2_query_dir(struct ksmbd_work *work)
set_ctx_actor(&dir_fp->readdir_data.ctx, __query_dir);
rc = iterate_dir(dir_fp->filp, &dir_fp->readdir_data.ctx);
+ /*
+ * req->OutputBufferLength is too small to contain even one entry.
+ * In this case, it immediately returns OutputBufferLength 0 to client.
+ */
+ if (!d_info.out_buf_len && !d_info.num_entry)
+ goto no_buf_len;
if (rc == 0)
restart_ctx(&dir_fp->readdir_data.ctx);
if (rc == -ENOSPC)
@@ -3964,10 +3970,12 @@ int smb2_query_dir(struct ksmbd_work *work)
rsp->Buffer[0] = 0;
inc_rfc1001_len(work->response_buf, 9);
} else {
+no_buf_len:
((struct file_directory_info *)
((char *)rsp->Buffer + d_info.last_entry_offset))
->NextEntryOffset = 0;
- d_info.data_count -= d_info.last_entry_off_align;
+ if (d_info.data_count >= d_info.last_entry_off_align)
+ d_info.data_count -= d_info.last_entry_off_align;
rsp->StructureSize = cpu_to_le16(9);
rsp->OutputBufferOffset = cpu_to_le16(72);
@@ -6116,7 +6124,6 @@ out:
static int smb2_set_remote_key_for_rdma(struct ksmbd_work *work,
struct smb2_buffer_desc_v1 *desc,
__le32 Channel,
- __le16 ChannelInfoOffset,
__le16 ChannelInfoLength)
{
unsigned int i, ch_count;
@@ -6134,15 +6141,13 @@ static int smb2_set_remote_key_for_rdma(struct ksmbd_work *work,
le32_to_cpu(desc[i].length));
}
}
- if (ch_count != 1) {
- ksmbd_debug(RDMA, "RDMA multiple buffer descriptors %d are not supported yet\n",
- ch_count);
+ if (!ch_count)
return -EINVAL;
- }
work->need_invalidate_rkey =
(Channel == SMB2_CHANNEL_RDMA_V1_INVALIDATE);
- work->remote_key = le32_to_cpu(desc->token);
+ if (Channel == SMB2_CHANNEL_RDMA_V1_INVALIDATE)
+ work->remote_key = le32_to_cpu(desc->token);
return 0;
}
@@ -6150,14 +6155,12 @@ static ssize_t smb2_read_rdma_channel(struct ksmbd_work *work,
struct smb2_read_req *req, void *data_buf,
size_t length)
{
- struct smb2_buffer_desc_v1 *desc =
- (struct smb2_buffer_desc_v1 *)&req->Buffer[0];
int err;
err = ksmbd_conn_rdma_write(work->conn, data_buf, length,
- le32_to_cpu(desc->token),
- le64_to_cpu(desc->offset),
- le32_to_cpu(desc->length));
+ (struct smb2_buffer_desc_v1 *)
+ ((char *)req + le16_to_cpu(req->ReadChannelInfoOffset)),
+ le16_to_cpu(req->ReadChannelInfoLength));
if (err)
return err;
@@ -6180,6 +6183,8 @@ int smb2_read(struct ksmbd_work *work)
size_t length, mincount;
ssize_t nbytes = 0, remain_bytes = 0;
int err = 0;
+ bool is_rdma_channel = false;
+ unsigned int max_read_size = conn->vals->max_read_size;
WORK_BUFFERS(work, req, rsp);
@@ -6191,6 +6196,11 @@ int smb2_read(struct ksmbd_work *work)
if (req->Channel == SMB2_CHANNEL_RDMA_V1_INVALIDATE ||
req->Channel == SMB2_CHANNEL_RDMA_V1) {
+ is_rdma_channel = true;
+ max_read_size = get_smbd_max_read_write_size();
+ }
+
+ if (is_rdma_channel == true) {
unsigned int ch_offset = le16_to_cpu(req->ReadChannelInfoOffset);
if (ch_offset < offsetof(struct smb2_read_req, Buffer)) {
@@ -6201,7 +6211,6 @@ int smb2_read(struct ksmbd_work *work)
(struct smb2_buffer_desc_v1 *)
((char *)req + ch_offset),
req->Channel,
- req->ReadChannelInfoOffset,
req->ReadChannelInfoLength);
if (err)
goto out;
@@ -6223,9 +6232,9 @@ int smb2_read(struct ksmbd_work *work)
length = le32_to_cpu(req->Length);
mincount = le32_to_cpu(req->MinimumCount);
- if (length > conn->vals->max_read_size) {
+ if (length > max_read_size) {
ksmbd_debug(SMB, "limiting read size to max size(%u)\n",
- conn->vals->max_read_size);
+ max_read_size);
err = -EINVAL;
goto out;
}
@@ -6257,8 +6266,7 @@ int smb2_read(struct ksmbd_work *work)
ksmbd_debug(SMB, "nbytes %zu, offset %lld mincount %zu\n",
nbytes, offset, mincount);
- if (req->Channel == SMB2_CHANNEL_RDMA_V1_INVALIDATE ||
- req->Channel == SMB2_CHANNEL_RDMA_V1) {
+ if (is_rdma_channel == true) {
/* write data to the client using rdma channel */
remain_bytes = smb2_read_rdma_channel(work, req,
work->aux_payload_buf,
@@ -6328,23 +6336,18 @@ static noinline int smb2_write_pipe(struct ksmbd_work *work)
length = le32_to_cpu(req->Length);
id = req->VolatileFileId;
- if (le16_to_cpu(req->DataOffset) ==
- offsetof(struct smb2_write_req, Buffer)) {
- data_buf = (char *)&req->Buffer[0];
- } else {
- if ((u64)le16_to_cpu(req->DataOffset) + length >
- get_rfc1002_len(work->request_buf)) {
- pr_err("invalid write data offset %u, smb_len %u\n",
- le16_to_cpu(req->DataOffset),
- get_rfc1002_len(work->request_buf));
- err = -EINVAL;
- goto out;
- }
-
- data_buf = (char *)(((char *)&req->hdr.ProtocolId) +
- le16_to_cpu(req->DataOffset));
+ if ((u64)le16_to_cpu(req->DataOffset) + length >
+ get_rfc1002_len(work->request_buf)) {
+ pr_err("invalid write data offset %u, smb_len %u\n",
+ le16_to_cpu(req->DataOffset),
+ get_rfc1002_len(work->request_buf));
+ err = -EINVAL;
+ goto out;
}
+ data_buf = (char *)(((char *)&req->hdr.ProtocolId) +
+ le16_to_cpu(req->DataOffset));
+
rpc_resp = ksmbd_rpc_write(work->sess, id, data_buf, length);
if (rpc_resp) {
if (rpc_resp->flags == KSMBD_RPC_ENOTIMPLEMENTED) {
@@ -6384,21 +6387,18 @@ static ssize_t smb2_write_rdma_channel(struct ksmbd_work *work,
struct ksmbd_file *fp,
loff_t offset, size_t length, bool sync)
{
- struct smb2_buffer_desc_v1 *desc;
char *data_buf;
int ret;
ssize_t nbytes;
- desc = (struct smb2_buffer_desc_v1 *)&req->Buffer[0];
-
data_buf = kvmalloc(length, GFP_KERNEL | __GFP_ZERO);
if (!data_buf)
return -ENOMEM;
ret = ksmbd_conn_rdma_read(work->conn, data_buf, length,
- le32_to_cpu(desc->token),
- le64_to_cpu(desc->offset),
- le32_to_cpu(desc->length));
+ (struct smb2_buffer_desc_v1 *)
+ ((char *)req + le16_to_cpu(req->WriteChannelInfoOffset)),
+ le16_to_cpu(req->WriteChannelInfoLength));
if (ret < 0) {
kvfree(data_buf);
return ret;
@@ -6427,8 +6427,9 @@ int smb2_write(struct ksmbd_work *work)
size_t length;
ssize_t nbytes;
char *data_buf;
- bool writethrough = false;
+ bool writethrough = false, is_rdma_channel = false;
int err = 0;
+ unsigned int max_write_size = work->conn->vals->max_write_size;
WORK_BUFFERS(work, req, rsp);
@@ -6437,8 +6438,17 @@ int smb2_write(struct ksmbd_work *work)
return smb2_write_pipe(work);
}
+ offset = le64_to_cpu(req->Offset);
+ length = le32_to_cpu(req->Length);
+
if (req->Channel == SMB2_CHANNEL_RDMA_V1 ||
req->Channel == SMB2_CHANNEL_RDMA_V1_INVALIDATE) {
+ is_rdma_channel = true;
+ max_write_size = get_smbd_max_read_write_size();
+ length = le32_to_cpu(req->RemainingBytes);
+ }
+
+ if (is_rdma_channel == true) {
unsigned int ch_offset = le16_to_cpu(req->WriteChannelInfoOffset);
if (req->Length != 0 || req->DataOffset != 0 ||
@@ -6450,7 +6460,6 @@ int smb2_write(struct ksmbd_work *work)
(struct smb2_buffer_desc_v1 *)
((char *)req + ch_offset),
req->Channel,
- req->WriteChannelInfoOffset,
req->WriteChannelInfoLength);
if (err)
goto out;
@@ -6474,12 +6483,9 @@ int smb2_write(struct ksmbd_work *work)
goto out;
}
- offset = le64_to_cpu(req->Offset);
- length = le32_to_cpu(req->Length);
-
- if (length > work->conn->vals->max_write_size) {
+ if (length > max_write_size) {
ksmbd_debug(SMB, "limiting write size to max size(%u)\n",
- work->conn->vals->max_write_size);
+ max_write_size);
err = -EINVAL;
goto out;
}
@@ -6487,24 +6493,17 @@ int smb2_write(struct ksmbd_work *work)
if (le32_to_cpu(req->Flags) & SMB2_WRITEFLAG_WRITE_THROUGH)
writethrough = true;
- if (req->Channel != SMB2_CHANNEL_RDMA_V1 &&
- req->Channel != SMB2_CHANNEL_RDMA_V1_INVALIDATE) {
- if (le16_to_cpu(req->DataOffset) ==
- offsetof(struct smb2_write_req, Buffer)) {
- data_buf = (char *)&req->Buffer[0];
- } else {
- if ((u64)le16_to_cpu(req->DataOffset) + length >
- get_rfc1002_len(work->request_buf)) {
- pr_err("invalid write data offset %u, smb_len %u\n",
- le16_to_cpu(req->DataOffset),
- get_rfc1002_len(work->request_buf));
- err = -EINVAL;
- goto out;
- }
-
- data_buf = (char *)(((char *)&req->hdr.ProtocolId) +
- le16_to_cpu(req->DataOffset));
+ if (is_rdma_channel == false) {
+ if ((u64)le16_to_cpu(req->DataOffset) + length >
+ get_rfc1002_len(work->request_buf)) {
+ pr_err("invalid write data offset %u, smb_len %u\n",
+ le16_to_cpu(req->DataOffset),
+ get_rfc1002_len(work->request_buf));
+ err = -EINVAL;
+ goto out;
}
+ data_buf = (char *)(((char *)&req->hdr.ProtocolId) +
+ le16_to_cpu(req->DataOffset));
ksmbd_debug(SMB, "flags %u\n", le32_to_cpu(req->Flags));
if (le32_to_cpu(req->Flags) & SMB2_WRITEFLAG_WRITE_THROUGH)
@@ -6520,8 +6519,7 @@ int smb2_write(struct ksmbd_work *work)
/* read data from the client using rdma channel, and
* write the data.
*/
- nbytes = smb2_write_rdma_channel(work, req, fp, offset,
- le32_to_cpu(req->RemainingBytes),
+ nbytes = smb2_write_rdma_channel(work, req, fp, offset, length,
writethrough);
if (nbytes < 0) {
err = (int)nbytes;
diff --git a/fs/ksmbd/smb_common.c b/fs/ksmbd/smb_common.c
index 9a7e211dbf4f..7f8ab14fb8ec 100644
--- a/fs/ksmbd/smb_common.c
+++ b/fs/ksmbd/smb_common.c
@@ -140,8 +140,10 @@ int ksmbd_verify_smb_message(struct ksmbd_work *work)
hdr = work->request_buf;
if (*(__le32 *)hdr->Protocol == SMB1_PROTO_NUMBER &&
- hdr->Command == SMB_COM_NEGOTIATE)
+ hdr->Command == SMB_COM_NEGOTIATE) {
+ work->conn->outstanding_credits++;
return 0;
+ }
return -EINVAL;
}
diff --git a/fs/ksmbd/smbacl.c b/fs/ksmbd/smbacl.c
index 6ecf55ea1fed..38f23bf981ac 100644
--- a/fs/ksmbd/smbacl.c
+++ b/fs/ksmbd/smbacl.c
@@ -1261,6 +1261,7 @@ int smb_check_perm_dacl(struct ksmbd_conn *conn, struct path *path,
if (!access_bits)
access_bits =
SET_MINIMUM_RIGHTS;
+ posix_acl_release(posix_acls);
goto check_access_bits;
}
}
diff --git a/fs/ksmbd/transport_ipc.c b/fs/ksmbd/transport_ipc.c
index 3ad6881e0f7e..7cb0eeb07c80 100644
--- a/fs/ksmbd/transport_ipc.c
+++ b/fs/ksmbd/transport_ipc.c
@@ -26,6 +26,7 @@
#include "mgmt/ksmbd_ida.h"
#include "connection.h"
#include "transport_tcp.h"
+#include "transport_rdma.h"
#define IPC_WAIT_TIMEOUT (2 * HZ)
@@ -303,6 +304,8 @@ static int ipc_server_config_on_startup(struct ksmbd_startup_request *req)
init_smb2_max_trans_size(req->smb2_max_trans);
if (req->smb2_max_credits)
init_smb2_max_credits(req->smb2_max_credits);
+ if (req->smbd_max_io_size)
+ init_smbd_max_io_size(req->smbd_max_io_size);
ret = ksmbd_set_netbios_name(req->netbios_name);
ret |= ksmbd_set_server_string(req->server_string);
diff --git a/fs/ksmbd/transport_rdma.c b/fs/ksmbd/transport_rdma.c
index e646d79554b8..d035e060c2f0 100644
--- a/fs/ksmbd/transport_rdma.c
+++ b/fs/ksmbd/transport_rdma.c
@@ -80,9 +80,7 @@ static int smb_direct_max_fragmented_recv_size = 1024 * 1024;
/* The maximum single-message size which can be received */
static int smb_direct_max_receive_size = 8192;
-static int smb_direct_max_read_write_size = 524224;
-
-static int smb_direct_max_outstanding_rw_ops = 8;
+static int smb_direct_max_read_write_size = SMBD_DEFAULT_IOSIZE;
static LIST_HEAD(smb_direct_device_list);
static DEFINE_RWLOCK(smb_direct_device_lock);
@@ -147,18 +145,18 @@ struct smb_direct_transport {
atomic_t send_credits;
spinlock_t lock_new_recv_credits;
int new_recv_credits;
- atomic_t rw_avail_ops;
+ int max_rw_credits;
+ int pages_per_rw_credit;
+ atomic_t rw_credits;
wait_queue_head_t wait_send_credits;
- wait_queue_head_t wait_rw_avail_ops;
+ wait_queue_head_t wait_rw_credits;
mempool_t *sendmsg_mempool;
struct kmem_cache *sendmsg_cache;
mempool_t *recvmsg_mempool;
struct kmem_cache *recvmsg_cache;
- wait_queue_head_t wait_send_payload_pending;
- atomic_t send_payload_pending;
wait_queue_head_t wait_send_pending;
atomic_t send_pending;
@@ -208,12 +206,25 @@ struct smb_direct_recvmsg {
struct smb_direct_rdma_rw_msg {
struct smb_direct_transport *t;
struct ib_cqe cqe;
+ int status;
struct completion *completion;
+ struct list_head list;
struct rdma_rw_ctx rw_ctx;
struct sg_table sgt;
struct scatterlist sg_list[];
};
+void init_smbd_max_io_size(unsigned int sz)
+{
+ sz = clamp_val(sz, SMBD_MIN_IOSIZE, SMBD_MAX_IOSIZE);
+ smb_direct_max_read_write_size = sz;
+}
+
+unsigned int get_smbd_max_read_write_size(void)
+{
+ return smb_direct_max_read_write_size;
+}
+
static inline int get_buf_page_count(void *buf, int size)
{
return DIV_ROUND_UP((uintptr_t)buf + size, PAGE_SIZE) -
@@ -377,7 +388,7 @@ static struct smb_direct_transport *alloc_transport(struct rdma_cm_id *cm_id)
t->reassembly_queue_length = 0;
init_waitqueue_head(&t->wait_reassembly_queue);
init_waitqueue_head(&t->wait_send_credits);
- init_waitqueue_head(&t->wait_rw_avail_ops);
+ init_waitqueue_head(&t->wait_rw_credits);
spin_lock_init(&t->receive_credit_lock);
spin_lock_init(&t->recvmsg_queue_lock);
@@ -386,8 +397,6 @@ static struct smb_direct_transport *alloc_transport(struct rdma_cm_id *cm_id)
spin_lock_init(&t->empty_recvmsg_queue_lock);
INIT_LIST_HEAD(&t->empty_recvmsg_queue);
- init_waitqueue_head(&t->wait_send_payload_pending);
- atomic_set(&t->send_payload_pending, 0);
init_waitqueue_head(&t->wait_send_pending);
atomic_set(&t->send_pending, 0);
@@ -417,8 +426,6 @@ static void free_transport(struct smb_direct_transport *t)
wake_up_interruptible(&t->wait_send_credits);
ksmbd_debug(RDMA, "wait for all send posted to IB to finish\n");
- wait_event(t->wait_send_payload_pending,
- atomic_read(&t->send_payload_pending) == 0);
wait_event(t->wait_send_pending,
atomic_read(&t->send_pending) == 0);
@@ -569,6 +576,7 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
}
t->negotiation_requested = true;
t->full_packet_received = true;
+ t->status = SMB_DIRECT_CS_CONNECTED;
enqueue_reassembly(t, recvmsg, 0);
wake_up_interruptible(&t->wait_status);
break;
@@ -873,13 +881,8 @@ static void send_done(struct ib_cq *cq, struct ib_wc *wc)
smb_direct_disconnect_rdma_connection(t);
}
- if (sendmsg->num_sge > 1) {
- if (atomic_dec_and_test(&t->send_payload_pending))
- wake_up(&t->wait_send_payload_pending);
- } else {
- if (atomic_dec_and_test(&t->send_pending))
- wake_up(&t->wait_send_pending);
- }
+ if (atomic_dec_and_test(&t->send_pending))
+ wake_up(&t->wait_send_pending);
/* iterate and free the list of messages in reverse. the list's head
* is invalid.
@@ -911,21 +914,12 @@ static int smb_direct_post_send(struct smb_direct_transport *t,
{
int ret;
- if (wr->num_sge > 1)
- atomic_inc(&t->send_payload_pending);
- else
- atomic_inc(&t->send_pending);
-
+ atomic_inc(&t->send_pending);
ret = ib_post_send(t->qp, wr, NULL);
if (ret) {
pr_err("failed to post send: %d\n", ret);
- if (wr->num_sge > 1) {
- if (atomic_dec_and_test(&t->send_payload_pending))
- wake_up(&t->wait_send_payload_pending);
- } else {
- if (atomic_dec_and_test(&t->send_pending))
- wake_up(&t->wait_send_pending);
- }
+ if (atomic_dec_and_test(&t->send_pending))
+ wake_up(&t->wait_send_pending);
smb_direct_disconnect_rdma_connection(t);
}
return ret;
@@ -983,18 +977,19 @@ static int smb_direct_flush_send_list(struct smb_direct_transport *t,
}
static int wait_for_credits(struct smb_direct_transport *t,
- wait_queue_head_t *waitq, atomic_t *credits)
+ wait_queue_head_t *waitq, atomic_t *total_credits,
+ int needed)
{
int ret;
do {
- if (atomic_dec_return(credits) >= 0)
+ if (atomic_sub_return(needed, total_credits) >= 0)
return 0;
- atomic_inc(credits);
+ atomic_add(needed, total_credits);
ret = wait_event_interruptible(*waitq,
- atomic_read(credits) > 0 ||
- t->status != SMB_DIRECT_CS_CONNECTED);
+ atomic_read(total_credits) >= needed ||
+ t->status != SMB_DIRECT_CS_CONNECTED);
if (t->status != SMB_DIRECT_CS_CONNECTED)
return -ENOTCONN;
@@ -1015,7 +1010,19 @@ static int wait_for_send_credits(struct smb_direct_transport *t,
return ret;
}
- return wait_for_credits(t, &t->wait_send_credits, &t->send_credits);
+ return wait_for_credits(t, &t->wait_send_credits, &t->send_credits, 1);
+}
+
+static int wait_for_rw_credits(struct smb_direct_transport *t, int credits)
+{
+ return wait_for_credits(t, &t->wait_rw_credits, &t->rw_credits, credits);
+}
+
+static int calc_rw_credits(struct smb_direct_transport *t,
+ char *buf, unsigned int len)
+{
+ return DIV_ROUND_UP(get_buf_page_count(buf, len),
+ t->pages_per_rw_credit);
}
static int smb_direct_create_header(struct smb_direct_transport *t,
@@ -1086,7 +1093,7 @@ static int get_sg_list(void *buf, int size, struct scatterlist *sg_list, int nen
int offset, len;
int i = 0;
- if (nentries < get_buf_page_count(buf, size))
+ if (size <= 0 || nentries < get_buf_page_count(buf, size))
return -EINVAL;
offset = offset_in_page(buf);
@@ -1118,7 +1125,7 @@ static int get_mapped_sg_list(struct ib_device *device, void *buf, int size,
int npages;
npages = get_sg_list(buf, size, sg_list, nentries);
- if (npages <= 0)
+ if (npages < 0)
return -EINVAL;
return ib_dma_map_sg(device, sg_list, npages, dir);
}
@@ -1313,11 +1320,21 @@ done:
* that means all the I/Os have been out and we are good to return
*/
- wait_event(st->wait_send_payload_pending,
- atomic_read(&st->send_payload_pending) == 0);
+ wait_event(st->wait_send_pending,
+ atomic_read(&st->send_pending) == 0);
return ret;
}
+static void smb_direct_free_rdma_rw_msg(struct smb_direct_transport *t,
+ struct smb_direct_rdma_rw_msg *msg,
+ enum dma_data_direction dir)
+{
+ rdma_rw_ctx_destroy(&msg->rw_ctx, t->qp, t->qp->port,
+ msg->sgt.sgl, msg->sgt.nents, dir);
+ sg_free_table_chained(&msg->sgt, SG_CHUNK_SIZE);
+ kfree(msg);
+}
+
static void read_write_done(struct ib_cq *cq, struct ib_wc *wc,
enum dma_data_direction dir)
{
@@ -1326,19 +1343,14 @@ static void read_write_done(struct ib_cq *cq, struct ib_wc *wc,
struct smb_direct_transport *t = msg->t;
if (wc->status != IB_WC_SUCCESS) {
+ msg->status = -EIO;
pr_err("read/write error. opcode = %d, status = %s(%d)\n",
wc->opcode, ib_wc_status_msg(wc->status), wc->status);
- smb_direct_disconnect_rdma_connection(t);
+ if (wc->status != IB_WC_WR_FLUSH_ERR)
+ smb_direct_disconnect_rdma_connection(t);
}
- if (atomic_inc_return(&t->rw_avail_ops) > 0)
- wake_up(&t->wait_rw_avail_ops);
-
- rdma_rw_ctx_destroy(&msg->rw_ctx, t->qp, t->qp->port,
- msg->sg_list, msg->sgt.nents, dir);
- sg_free_table_chained(&msg->sgt, SG_CHUNK_SIZE);
complete(msg->completion);
- kfree(msg);
}
static void read_done(struct ib_cq *cq, struct ib_wc *wc)
@@ -1351,94 +1363,141 @@ static void write_done(struct ib_cq *cq, struct ib_wc *wc)
read_write_done(cq, wc, DMA_TO_DEVICE);
}
-static int smb_direct_rdma_xmit(struct smb_direct_transport *t, void *buf,
- int buf_len, u32 remote_key, u64 remote_offset,
- u32 remote_len, bool is_read)
+static int smb_direct_rdma_xmit(struct smb_direct_transport *t,
+ void *buf, int buf_len,
+ struct smb2_buffer_desc_v1 *desc,
+ unsigned int desc_len,
+ bool is_read)
{
- struct smb_direct_rdma_rw_msg *msg;
- int ret;
+ struct smb_direct_rdma_rw_msg *msg, *next_msg;
+ int i, ret;
DECLARE_COMPLETION_ONSTACK(completion);
- struct ib_send_wr *first_wr = NULL;
+ struct ib_send_wr *first_wr;
+ LIST_HEAD(msg_list);
+ char *desc_buf;
+ int credits_needed;
+ unsigned int desc_buf_len;
+ size_t total_length = 0;
+
+ if (t->status != SMB_DIRECT_CS_CONNECTED)
+ return -ENOTCONN;
- ret = wait_for_credits(t, &t->wait_rw_avail_ops, &t->rw_avail_ops);
+ /* calculate needed credits */
+ credits_needed = 0;
+ desc_buf = buf;
+ for (i = 0; i < desc_len / sizeof(*desc); i++) {
+ desc_buf_len = le32_to_cpu(desc[i].length);
+
+ credits_needed += calc_rw_credits(t, desc_buf, desc_buf_len);
+ desc_buf += desc_buf_len;
+ total_length += desc_buf_len;
+ if (desc_buf_len == 0 || total_length > buf_len ||
+ total_length > t->max_rdma_rw_size)
+ return -EINVAL;
+ }
+
+ ksmbd_debug(RDMA, "RDMA %s, len %#x, needed credits %#x\n",
+ is_read ? "read" : "write", buf_len, credits_needed);
+
+ ret = wait_for_rw_credits(t, credits_needed);
if (ret < 0)
return ret;
- /* TODO: mempool */
- msg = kmalloc(offsetof(struct smb_direct_rdma_rw_msg, sg_list) +
- sizeof(struct scatterlist) * SG_CHUNK_SIZE, GFP_KERNEL);
- if (!msg) {
- atomic_inc(&t->rw_avail_ops);
- return -ENOMEM;
- }
+ /* build rdma_rw_ctx for each descriptor */
+ desc_buf = buf;
+ for (i = 0; i < desc_len / sizeof(*desc); i++) {
+ msg = kzalloc(offsetof(struct smb_direct_rdma_rw_msg, sg_list) +
+ sizeof(struct scatterlist) * SG_CHUNK_SIZE, GFP_KERNEL);
+ if (!msg) {
+ ret = -ENOMEM;
+ goto out;
+ }
- msg->sgt.sgl = &msg->sg_list[0];
- ret = sg_alloc_table_chained(&msg->sgt,
- get_buf_page_count(buf, buf_len),
- msg->sg_list, SG_CHUNK_SIZE);
- if (ret) {
- atomic_inc(&t->rw_avail_ops);
- kfree(msg);
- return -ENOMEM;
- }
+ desc_buf_len = le32_to_cpu(desc[i].length);
- ret = get_sg_list(buf, buf_len, msg->sgt.sgl, msg->sgt.orig_nents);
- if (ret <= 0) {
- pr_err("failed to get pages\n");
- goto err;
- }
+ msg->t = t;
+ msg->cqe.done = is_read ? read_done : write_done;
+ msg->completion = &completion;
- ret = rdma_rw_ctx_init(&msg->rw_ctx, t->qp, t->qp->port,
- msg->sg_list, get_buf_page_count(buf, buf_len),
- 0, remote_offset, remote_key,
- is_read ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
- if (ret < 0) {
- pr_err("failed to init rdma_rw_ctx: %d\n", ret);
- goto err;
+ msg->sgt.sgl = &msg->sg_list[0];
+ ret = sg_alloc_table_chained(&msg->sgt,
+ get_buf_page_count(desc_buf, desc_buf_len),
+ msg->sg_list, SG_CHUNK_SIZE);
+ if (ret) {
+ kfree(msg);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = get_sg_list(desc_buf, desc_buf_len,
+ msg->sgt.sgl, msg->sgt.orig_nents);
+ if (ret < 0) {
+ sg_free_table_chained(&msg->sgt, SG_CHUNK_SIZE);
+ kfree(msg);
+ goto out;
+ }
+
+ ret = rdma_rw_ctx_init(&msg->rw_ctx, t->qp, t->qp->port,
+ msg->sgt.sgl,
+ get_buf_page_count(desc_buf, desc_buf_len),
+ 0,
+ le64_to_cpu(desc[i].offset),
+ le32_to_cpu(desc[i].token),
+ is_read ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
+ if (ret < 0) {
+ pr_err("failed to init rdma_rw_ctx: %d\n", ret);
+ sg_free_table_chained(&msg->sgt, SG_CHUNK_SIZE);
+ kfree(msg);
+ goto out;
+ }
+
+ list_add_tail(&msg->list, &msg_list);
+ desc_buf += desc_buf_len;
}
- msg->t = t;
- msg->cqe.done = is_read ? read_done : write_done;
- msg->completion = &completion;
- first_wr = rdma_rw_ctx_wrs(&msg->rw_ctx, t->qp, t->qp->port,
- &msg->cqe, NULL);
+ /* concatenate work requests of rdma_rw_ctxs */
+ first_wr = NULL;
+ list_for_each_entry_reverse(msg, &msg_list, list) {
+ first_wr = rdma_rw_ctx_wrs(&msg->rw_ctx, t->qp, t->qp->port,
+ &msg->cqe, first_wr);
+ }
ret = ib_post_send(t->qp, first_wr, NULL);
if (ret) {
- pr_err("failed to post send wr: %d\n", ret);
- goto err;
+ pr_err("failed to post send wr for RDMA R/W: %d\n", ret);
+ goto out;
}
+ msg = list_last_entry(&msg_list, struct smb_direct_rdma_rw_msg, list);
wait_for_completion(&completion);
- return 0;
-
-err:
- atomic_inc(&t->rw_avail_ops);
- if (first_wr)
- rdma_rw_ctx_destroy(&msg->rw_ctx, t->qp, t->qp->port,
- msg->sg_list, msg->sgt.nents,
- is_read ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
- sg_free_table_chained(&msg->sgt, SG_CHUNK_SIZE);
- kfree(msg);
+ ret = msg->status;
+out:
+ list_for_each_entry_safe(msg, next_msg, &msg_list, list) {
+ list_del(&msg->list);
+ smb_direct_free_rdma_rw_msg(t, msg,
+ is_read ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
+ }
+ atomic_add(credits_needed, &t->rw_credits);
+ wake_up(&t->wait_rw_credits);
return ret;
}
-static int smb_direct_rdma_write(struct ksmbd_transport *t, void *buf,
- unsigned int buflen, u32 remote_key,
- u64 remote_offset, u32 remote_len)
+static int smb_direct_rdma_write(struct ksmbd_transport *t,
+ void *buf, unsigned int buflen,
+ struct smb2_buffer_desc_v1 *desc,
+ unsigned int desc_len)
{
return smb_direct_rdma_xmit(smb_trans_direct_transfort(t), buf, buflen,
- remote_key, remote_offset,
- remote_len, false);
+ desc, desc_len, false);
}
-static int smb_direct_rdma_read(struct ksmbd_transport *t, void *buf,
- unsigned int buflen, u32 remote_key,
- u64 remote_offset, u32 remote_len)
+static int smb_direct_rdma_read(struct ksmbd_transport *t,
+ void *buf, unsigned int buflen,
+ struct smb2_buffer_desc_v1 *desc,
+ unsigned int desc_len)
{
return smb_direct_rdma_xmit(smb_trans_direct_transfort(t), buf, buflen,
- remote_key, remote_offset,
- remote_len, true);
+ desc, desc_len, true);
}
static void smb_direct_disconnect(struct ksmbd_transport *t)
@@ -1638,41 +1697,57 @@ out_err:
return ret;
}
+static unsigned int smb_direct_get_max_fr_pages(struct smb_direct_transport *t)
+{
+ return min_t(unsigned int,
+ t->cm_id->device->attrs.max_fast_reg_page_list_len,
+ 256);
+}
+
static int smb_direct_init_params(struct smb_direct_transport *t,
struct ib_qp_cap *cap)
{
struct ib_device *device = t->cm_id->device;
- int max_send_sges, max_pages, max_rw_wrs, max_send_wrs;
+ int max_send_sges, max_rw_wrs, max_send_wrs;
+ unsigned int max_sge_per_wr, wrs_per_credit;
- /* need 2 more sge. because a SMB_DIRECT header will be mapped,
- * and maybe a send buffer could be not page aligned.
+ /* need 3 more sge. because a SMB_DIRECT header, SMB2 header,
+ * SMB2 response could be mapped.
*/
t->max_send_size = smb_direct_max_send_size;
- max_send_sges = DIV_ROUND_UP(t->max_send_size, PAGE_SIZE) + 2;
+ max_send_sges = DIV_ROUND_UP(t->max_send_size, PAGE_SIZE) + 3;
if (max_send_sges > SMB_DIRECT_MAX_SEND_SGES) {
pr_err("max_send_size %d is too large\n", t->max_send_size);
return -EINVAL;
}
- /*
- * allow smb_direct_max_outstanding_rw_ops of in-flight RDMA
- * read/writes. HCA guarantees at least max_send_sge of sges for
- * a RDMA read/write work request, and if memory registration is used,
- * we need reg_mr, local_inv wrs for each read/write.
+ /* Calculate the number of work requests for RDMA R/W.
+ * The maximum number of pages which can be registered
+ * with one Memory region can be transferred with one
+ * R/W credit. And at least 4 work requests for each credit
+ * are needed for MR registration, RDMA R/W, local & remote
+ * MR invalidation.
*/
t->max_rdma_rw_size = smb_direct_max_read_write_size;
- max_pages = DIV_ROUND_UP(t->max_rdma_rw_size, PAGE_SIZE) + 1;
- max_rw_wrs = DIV_ROUND_UP(max_pages, SMB_DIRECT_MAX_SEND_SGES);
- max_rw_wrs += rdma_rw_mr_factor(device, t->cm_id->port_num,
- max_pages) * 2;
- max_rw_wrs *= smb_direct_max_outstanding_rw_ops;
+ t->pages_per_rw_credit = smb_direct_get_max_fr_pages(t);
+ t->max_rw_credits = DIV_ROUND_UP(t->max_rdma_rw_size,
+ (t->pages_per_rw_credit - 1) *
+ PAGE_SIZE);
+
+ max_sge_per_wr = min_t(unsigned int, device->attrs.max_send_sge,
+ device->attrs.max_sge_rd);
+ max_sge_per_wr = max_t(unsigned int, max_sge_per_wr,
+ max_send_sges);
+ wrs_per_credit = max_t(unsigned int, 4,
+ DIV_ROUND_UP(t->pages_per_rw_credit,
+ max_sge_per_wr) + 1);
+ max_rw_wrs = t->max_rw_credits * wrs_per_credit;
max_send_wrs = smb_direct_send_credit_target + max_rw_wrs;
if (max_send_wrs > device->attrs.max_cqe ||
max_send_wrs > device->attrs.max_qp_wr) {
- pr_err("consider lowering send_credit_target = %d, or max_outstanding_rw_ops = %d\n",
- smb_direct_send_credit_target,
- smb_direct_max_outstanding_rw_ops);
+ pr_err("consider lowering send_credit_target = %d\n",
+ smb_direct_send_credit_target);
pr_err("Possible CQE overrun, device reporting max_cqe %d max_qp_wr %d\n",
device->attrs.max_cqe, device->attrs.max_qp_wr);
return -EINVAL;
@@ -1687,11 +1762,6 @@ static int smb_direct_init_params(struct smb_direct_transport *t,
return -EINVAL;
}
- if (device->attrs.max_send_sge < SMB_DIRECT_MAX_SEND_SGES) {
- pr_err("warning: device max_send_sge = %d too small\n",
- device->attrs.max_send_sge);
- return -EINVAL;
- }
if (device->attrs.max_recv_sge < SMB_DIRECT_MAX_RECV_SGES) {
pr_err("warning: device max_recv_sge = %d too small\n",
device->attrs.max_recv_sge);
@@ -1707,7 +1777,7 @@ static int smb_direct_init_params(struct smb_direct_transport *t,
t->send_credit_target = smb_direct_send_credit_target;
atomic_set(&t->send_credits, 0);
- atomic_set(&t->rw_avail_ops, smb_direct_max_outstanding_rw_ops);
+ atomic_set(&t->rw_credits, t->max_rw_credits);
t->max_send_size = smb_direct_max_send_size;
t->max_recv_size = smb_direct_max_receive_size;
@@ -1715,12 +1785,10 @@ static int smb_direct_init_params(struct smb_direct_transport *t,
cap->max_send_wr = max_send_wrs;
cap->max_recv_wr = t->recv_credit_max;
- cap->max_send_sge = SMB_DIRECT_MAX_SEND_SGES;
+ cap->max_send_sge = max_sge_per_wr;
cap->max_recv_sge = SMB_DIRECT_MAX_RECV_SGES;
cap->max_inline_data = 0;
- cap->max_rdma_ctxs =
- rdma_rw_mr_factor(device, t->cm_id->port_num, max_pages) *
- smb_direct_max_outstanding_rw_ops;
+ cap->max_rdma_ctxs = t->max_rw_credits;
return 0;
}
@@ -1813,7 +1881,8 @@ static int smb_direct_create_qpair(struct smb_direct_transport *t,
}
t->send_cq = ib_alloc_cq(t->cm_id->device, t,
- t->send_credit_target, 0, IB_POLL_WORKQUEUE);
+ smb_direct_send_credit_target + cap->max_rdma_ctxs,
+ 0, IB_POLL_WORKQUEUE);
if (IS_ERR(t->send_cq)) {
pr_err("Can't create RDMA send CQ\n");
ret = PTR_ERR(t->send_cq);
@@ -1822,8 +1891,7 @@ static int smb_direct_create_qpair(struct smb_direct_transport *t,
}
t->recv_cq = ib_alloc_cq(t->cm_id->device, t,
- cap->max_send_wr + cap->max_rdma_ctxs,
- 0, IB_POLL_WORKQUEUE);
+ t->recv_credit_max, 0, IB_POLL_WORKQUEUE);
if (IS_ERR(t->recv_cq)) {
pr_err("Can't create RDMA recv CQ\n");
ret = PTR_ERR(t->recv_cq);
@@ -1852,17 +1920,12 @@ static int smb_direct_create_qpair(struct smb_direct_transport *t,
pages_per_rw = DIV_ROUND_UP(t->max_rdma_rw_size, PAGE_SIZE) + 1;
if (pages_per_rw > t->cm_id->device->attrs.max_sgl_rd) {
- int pages_per_mr, mr_count;
-
- pages_per_mr = min_t(int, pages_per_rw,
- t->cm_id->device->attrs.max_fast_reg_page_list_len);
- mr_count = DIV_ROUND_UP(pages_per_rw, pages_per_mr) *
- atomic_read(&t->rw_avail_ops);
- ret = ib_mr_pool_init(t->qp, &t->qp->rdma_mrs, mr_count,
- IB_MR_TYPE_MEM_REG, pages_per_mr, 0);
+ ret = ib_mr_pool_init(t->qp, &t->qp->rdma_mrs,
+ t->max_rw_credits, IB_MR_TYPE_MEM_REG,
+ t->pages_per_rw_credit, 0);
if (ret) {
pr_err("failed to init mr pool count %d pages %d\n",
- mr_count, pages_per_mr);
+ t->max_rw_credits, t->pages_per_rw_credit);
goto err;
}
}
diff --git a/fs/ksmbd/transport_rdma.h b/fs/ksmbd/transport_rdma.h
index 5567d93a6f96..77aee4e5c9dc 100644
--- a/fs/ksmbd/transport_rdma.h
+++ b/fs/ksmbd/transport_rdma.h
@@ -7,6 +7,10 @@
#ifndef __KSMBD_TRANSPORT_RDMA_H__
#define __KSMBD_TRANSPORT_RDMA_H__
+#define SMBD_DEFAULT_IOSIZE (8 * 1024 * 1024)
+#define SMBD_MIN_IOSIZE (512 * 1024)
+#define SMBD_MAX_IOSIZE (16 * 1024 * 1024)
+
/* SMB DIRECT negotiation request packet [MS-SMBD] 2.2.1 */
struct smb_direct_negotiate_req {
__le16 min_version;
@@ -52,10 +56,14 @@ struct smb_direct_data_transfer {
int ksmbd_rdma_init(void);
void ksmbd_rdma_destroy(void);
bool ksmbd_rdma_capable_netdev(struct net_device *netdev);
+void init_smbd_max_io_size(unsigned int sz);
+unsigned int get_smbd_max_read_write_size(void);
#else
static inline int ksmbd_rdma_init(void) { return 0; }
static inline int ksmbd_rdma_destroy(void) { return 0; }
static inline bool ksmbd_rdma_capable_netdev(struct net_device *netdev) { return false; }
+static inline void init_smbd_max_io_size(unsigned int sz) { }
+static inline unsigned int get_smbd_max_read_write_size(void) { return 0; }
#endif
#endif /* __KSMBD_TRANSPORT_RDMA_H__ */
diff --git a/fs/namei.c b/fs/namei.c
index 896ade8b7400..1f28d3f463c3 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -730,13 +730,6 @@ static bool legitimize_links(struct nameidata *nd)
static bool legitimize_root(struct nameidata *nd)
{
- /*
- * For scoped-lookups (where nd->root has been zeroed), we need to
- * restart the whole lookup from scratch -- because set_root() is wrong
- * for these lookups (nd->dfd is the root, not the filesystem root).
- */
- if (!nd->root.mnt && (nd->flags & LOOKUP_IS_SCOPED))
- return false;
/* Nothing to do if nd->root is zero or is managed by the VFS user. */
if (!nd->root.mnt || (nd->state & ND_ROOT_PRESET))
return true;
@@ -798,7 +791,7 @@ out:
* @seq: seq number to check @dentry against
* Returns: true on success, false on failure
*
- * Similar to to try_to_unlazy(), but here we have the next dentry already
+ * Similar to try_to_unlazy(), but here we have the next dentry already
* picked by rcu-walk and want to legitimize that in addition to the current
* nd->path and nd->root for ref-walk mode. Must be called from rcu-walk context.
* Nothing should touch nameidata between try_to_unlazy_next() failure and
@@ -1032,7 +1025,7 @@ static struct ctl_table namei_sysctls[] = {
.procname = "protected_symlinks",
.data = &sysctl_protected_symlinks,
.maxlen = sizeof(int),
- .mode = 0600,
+ .mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
@@ -1041,7 +1034,7 @@ static struct ctl_table namei_sysctls[] = {
.procname = "protected_hardlinks",
.data = &sysctl_protected_hardlinks,
.maxlen = sizeof(int),
- .mode = 0600,
+ .mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
@@ -1050,7 +1043,7 @@ static struct ctl_table namei_sysctls[] = {
.procname = "protected_fifos",
.data = &sysctl_protected_fifos,
.maxlen = sizeof(int),
- .mode = 0600,
+ .mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_TWO,
@@ -1059,7 +1052,7 @@ static struct ctl_table namei_sysctls[] = {
.procname = "protected_regular",
.data = &sysctl_protected_regular,
.maxlen = sizeof(int),
- .mode = 0600,
+ .mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_TWO,
@@ -1755,7 +1748,7 @@ static int reserve_stack(struct nameidata *nd, struct path *link, unsigned seq)
// unlazy even if we fail to grab the link - cleanup needs it
bool grabbed_link = legitimize_path(nd, link, seq);
- if (!try_to_unlazy(nd) != 0 || !grabbed_link)
+ if (!try_to_unlazy(nd) || !grabbed_link)
return -ECHILD;
if (nd_alloc_stack(nd))
@@ -2769,7 +2762,8 @@ struct dentry *lookup_one(struct user_namespace *mnt_userns, const char *name,
EXPORT_SYMBOL(lookup_one);
/**
- * lookup_one_len_unlocked - filesystem helper to lookup single pathname component
+ * lookup_one_unlocked - filesystem helper to lookup single pathname component
+ * @mnt_userns: idmapping of the mount the lookup is performed from
* @name: pathname component to lookup
* @base: base directory to lookup from
* @len: maximum length @len should be interpreted to
@@ -2780,14 +2774,15 @@ EXPORT_SYMBOL(lookup_one);
* Unlike lookup_one_len, it should be called without the parent
* i_mutex held, and will take the i_mutex itself if necessary.
*/
-struct dentry *lookup_one_len_unlocked(const char *name,
- struct dentry *base, int len)
+struct dentry *lookup_one_unlocked(struct user_namespace *mnt_userns,
+ const char *name, struct dentry *base,
+ int len)
{
struct qstr this;
int err;
struct dentry *ret;
- err = lookup_one_common(&init_user_ns, name, base, len, &this);
+ err = lookup_one_common(mnt_userns, name, base, len, &this);
if (err)
return ERR_PTR(err);
@@ -2796,6 +2791,59 @@ struct dentry *lookup_one_len_unlocked(const char *name,
ret = lookup_slow(&this, base, 0);
return ret;
}
+EXPORT_SYMBOL(lookup_one_unlocked);
+
+/**
+ * lookup_one_positive_unlocked - filesystem helper to lookup single
+ * pathname component
+ * @mnt_userns: idmapping of the mount the lookup is performed from
+ * @name: pathname component to lookup
+ * @base: base directory to lookup from
+ * @len: maximum length @len should be interpreted to
+ *
+ * This helper will yield ERR_PTR(-ENOENT) on negatives. The helper returns
+ * known positive or ERR_PTR(). This is what most of the users want.
+ *
+ * Note that pinned negative with unlocked parent _can_ become positive at any
+ * time, so callers of lookup_one_unlocked() need to be very careful; pinned
+ * positives have >d_inode stable, so this one avoids such problems.
+ *
+ * Note that this routine is purely a helper for filesystem usage and should
+ * not be called by generic code.
+ *
+ * The helper should be called without i_mutex held.
+ */
+struct dentry *lookup_one_positive_unlocked(struct user_namespace *mnt_userns,
+ const char *name,
+ struct dentry *base, int len)
+{
+ struct dentry *ret = lookup_one_unlocked(mnt_userns, name, base, len);
+
+ if (!IS_ERR(ret) && d_flags_negative(smp_load_acquire(&ret->d_flags))) {
+ dput(ret);
+ ret = ERR_PTR(-ENOENT);
+ }
+ return ret;
+}
+EXPORT_SYMBOL(lookup_one_positive_unlocked);
+
+/**
+ * lookup_one_len_unlocked - filesystem helper to lookup single pathname component
+ * @name: pathname component to lookup
+ * @base: base directory to lookup from
+ * @len: maximum length @len should be interpreted to
+ *
+ * Note that this routine is purely a helper for filesystem usage and should
+ * not be called by generic code.
+ *
+ * Unlike lookup_one_len, it should be called without the parent
+ * i_mutex held, and will take the i_mutex itself if necessary.
+ */
+struct dentry *lookup_one_len_unlocked(const char *name,
+ struct dentry *base, int len)
+{
+ return lookup_one_unlocked(&init_user_ns, name, base, len);
+}
EXPORT_SYMBOL(lookup_one_len_unlocked);
/*
@@ -2809,12 +2857,7 @@ EXPORT_SYMBOL(lookup_one_len_unlocked);
struct dentry *lookup_positive_unlocked(const char *name,
struct dentry *base, int len)
{
- struct dentry *ret = lookup_one_len_unlocked(name, base, len);
- if (!IS_ERR(ret) && d_flags_negative(smp_load_acquire(&ret->d_flags))) {
- dput(ret);
- ret = ERR_PTR(-ENOENT);
- }
- return ret;
+ return lookup_one_positive_unlocked(&init_user_ns, name, base, len);
}
EXPORT_SYMBOL(lookup_positive_unlocked);
diff --git a/fs/namespace.c b/fs/namespace.c
index 41461f55c039..e6a7e769d25d 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1760,7 +1760,7 @@ out_unlock:
/*
* Is the caller allowed to modify his namespace?
*/
-static inline bool may_mount(void)
+bool may_mount(void)
{
return ns_capable(current->nsproxy->mnt_ns->user_ns, CAP_SYS_ADMIN);
}
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 6f5425e89ca6..2d72b1b7ed74 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -206,15 +206,16 @@ static int
nfs_file_fsync_commit(struct file *file, int datasync)
{
struct inode *inode = file_inode(file);
- int ret;
+ int ret, ret2;
dprintk("NFS: fsync file(%pD2) datasync %d\n", file, datasync);
nfs_inc_stats(inode, NFSIOS_VFSFSYNC);
ret = nfs_commit_inode(inode, FLUSH_SYNC);
- if (ret < 0)
- return ret;
- return file_check_and_advance_wb_err(file);
+ ret2 = file_check_and_advance_wb_err(file);
+ if (ret2 < 0)
+ return ret2;
+ return ret;
}
int
@@ -387,11 +388,8 @@ static int nfs_write_end(struct file *file, struct address_space *mapping,
return status;
NFS_I(mapping->host)->write_io += copied;
- if (nfs_ctx_key_to_expire(ctx, mapping->host)) {
- status = nfs_wb_all(mapping->host);
- if (status < 0)
- return status;
- }
+ if (nfs_ctx_key_to_expire(ctx, mapping->host))
+ nfs_wb_all(mapping->host);
return copied;
}
@@ -606,18 +604,6 @@ static const struct vm_operations_struct nfs_file_vm_ops = {
.page_mkwrite = nfs_vm_page_mkwrite,
};
-static int nfs_need_check_write(struct file *filp, struct inode *inode,
- int error)
-{
- struct nfs_open_context *ctx;
-
- ctx = nfs_file_open_context(filp);
- if (nfs_error_is_fatal_on_server(error) ||
- nfs_ctx_key_to_expire(ctx, inode))
- return 1;
- return 0;
-}
-
ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
@@ -645,7 +631,7 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from)
if (iocb->ki_flags & IOCB_APPEND || iocb->ki_pos > i_size_read(inode)) {
result = nfs_revalidate_file_size(inode, file);
if (result)
- goto out;
+ return result;
}
nfs_clear_invalid_mapping(file->f_mapping);
@@ -664,6 +650,7 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from)
written = result;
iocb->ki_pos += written;
+ nfs_add_stats(inode, NFSIOS_NORMALWRITTENBYTES, written);
if (mntflags & NFS_MOUNT_WRITE_EAGER) {
result = filemap_fdatawrite_range(file->f_mapping,
@@ -681,17 +668,22 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from)
}
result = generic_write_sync(iocb, written);
if (result < 0)
- goto out;
+ return result;
+out:
/* Return error values */
error = filemap_check_wb_err(file->f_mapping, since);
- if (nfs_need_check_write(file, inode, error)) {
- int err = nfs_wb_all(inode);
- if (err < 0)
- result = err;
+ switch (error) {
+ default:
+ break;
+ case -EDQUOT:
+ case -EFBIG:
+ case -ENOSPC:
+ nfs_wb_all(inode);
+ error = file_check_and_advance_wb_err(file);
+ if (error < 0)
+ result = error;
}
- nfs_add_stats(inode, NFSIOS_NORMALWRITTENBYTES, written);
-out:
return result;
out_swapfile:
diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c
index 76deddab0a8f..2b2661582bbe 100644
--- a/fs/nfs/filelayout/filelayout.c
+++ b/fs/nfs/filelayout/filelayout.c
@@ -839,7 +839,12 @@ fl_pnfs_update_layout(struct inode *ino,
lseg = pnfs_update_layout(ino, ctx, pos, count, iomode, strict_iomode,
gfp_flags);
- if (IS_ERR_OR_NULL(lseg))
+ if (IS_ERR(lseg)) {
+ /* Fall back to MDS on recoverable errors */
+ if (!nfs_error_is_fatal_on_server(PTR_ERR(lseg)))
+ lseg = NULL;
+ goto out;
+ } else if (!lseg)
goto out;
lo = NFS_I(ino)->layout;
diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c
index f73c09a9cf0a..e861d7bae305 100644
--- a/fs/nfs/fscache.c
+++ b/fs/nfs/fscache.c
@@ -231,11 +231,10 @@ void nfs_fscache_release_file(struct inode *inode, struct file *filp)
{
struct nfs_fscache_inode_auxdata auxdata;
struct fscache_cookie *cookie = nfs_i_fscache(inode);
+ loff_t i_size = i_size_read(inode);
- if (fscache_cookie_valid(cookie)) {
- nfs_fscache_update_auxdata(&auxdata, inode);
- fscache_unuse_cookie(cookie, &auxdata, NULL);
- }
+ nfs_fscache_update_auxdata(&auxdata, inode);
+ fscache_unuse_cookie(cookie, &auxdata, &i_size);
}
/*
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 7eefa16ed381..8f8cd6e2d4db 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -841,6 +841,7 @@ static inline bool nfs_error_is_fatal_on_server(int err)
case 0:
case -ERESTARTSYS:
case -EINTR:
+ case -ENOMEM:
return false;
}
return nfs_error_is_fatal(err);
diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
index 7b861e4f0533..03d3a270eff4 100644
--- a/fs/nfs/nfs4file.c
+++ b/fs/nfs/nfs4file.c
@@ -328,7 +328,7 @@ static struct file *__nfs42_ssc_open(struct vfsmount *ss_mnt,
char *read_name = NULL;
int len, status = 0;
- server = NFS_SERVER(ss_mnt->mnt_root->d_inode);
+ server = NFS_SB(ss_mnt->mnt_sb);
if (!fattr)
return ERR_PTR(-ENOMEM);
@@ -346,7 +346,7 @@ static struct file *__nfs42_ssc_open(struct vfsmount *ss_mnt,
goto out;
snprintf(read_name, len, SSC_READ_NAME_BODY, read_name_gen++);
- r_ino = nfs_fhget(ss_mnt->mnt_root->d_inode->i_sb, src_fh, fattr);
+ r_ino = nfs_fhget(ss_mnt->mnt_sb, src_fh, fattr);
if (IS_ERR(r_ino)) {
res = ERR_CAST(r_ino);
goto out_free_name;
diff --git a/fs/nfs/nfs4namespace.c b/fs/nfs/nfs4namespace.c
index 3680c8da510c..f2dbf904c598 100644
--- a/fs/nfs/nfs4namespace.c
+++ b/fs/nfs/nfs4namespace.c
@@ -417,6 +417,9 @@ static int nfs_do_refmount(struct fs_context *fc, struct rpc_clnt *client)
fs_locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL);
if (!fs_locations)
goto out_free;
+ fs_locations->fattr = nfs_alloc_fattr();
+ if (!fs_locations->fattr)
+ goto out_free_2;
/* Get locations */
dentry = ctx->clone_data.dentry;
@@ -427,14 +430,16 @@ static int nfs_do_refmount(struct fs_context *fc, struct rpc_clnt *client)
err = nfs4_proc_fs_locations(client, d_inode(parent), &dentry->d_name, fs_locations, page);
dput(parent);
if (err != 0)
- goto out_free_2;
+ goto out_free_3;
err = -ENOENT;
if (fs_locations->nlocations <= 0 ||
fs_locations->fs_path.ncomponents <= 0)
- goto out_free_2;
+ goto out_free_3;
err = nfs_follow_referral(fc, fs_locations);
+out_free_3:
+ kfree(fs_locations->fattr);
out_free_2:
kfree(fs_locations);
out_free:
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index a79f66432bd3..c0fdcf8c0032 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -1162,7 +1162,7 @@ static int nfs4_call_sync_sequence(struct rpc_clnt *clnt,
{
unsigned short task_flags = 0;
- if (server->nfs_client->cl_minorversion)
+ if (server->caps & NFS_CAP_MOVEABLE)
task_flags = RPC_TASK_MOVEABLE;
return nfs4_do_call_sync(clnt, server, msg, args, res, task_flags);
}
@@ -2568,7 +2568,7 @@ static int nfs4_run_open_task(struct nfs4_opendata *data,
};
int status;
- if (server->nfs_client->cl_minorversion)
+ if (nfs_server_capable(dir, NFS_CAP_MOVEABLE))
task_setup_data.flags |= RPC_TASK_MOVEABLE;
kref_get(&data->kref);
@@ -3098,6 +3098,10 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
}
out:
+ if (opendata->lgp) {
+ nfs4_lgopen_release(opendata->lgp);
+ opendata->lgp = NULL;
+ }
if (!opendata->cancelled)
nfs4_sequence_free_slot(&opendata->o_res.seq_res);
return ret;
@@ -3733,7 +3737,7 @@ int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait)
};
int status = -ENOMEM;
- if (server->nfs_client->cl_minorversion)
+ if (nfs_server_capable(state->inode, NFS_CAP_MOVEABLE))
task_setup_data.flags |= RPC_TASK_MOVEABLE;
nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_CLEANUP,
@@ -4243,6 +4247,8 @@ static int nfs4_get_referral(struct rpc_clnt *client, struct inode *dir,
if (locations == NULL)
goto out;
+ locations->fattr = fattr;
+
status = nfs4_proc_fs_locations(client, dir, name, locations, page);
if (status != 0)
goto out;
@@ -4252,17 +4258,14 @@ static int nfs4_get_referral(struct rpc_clnt *client, struct inode *dir,
* referral. Cause us to drop into the exception handler, which
* will kick off migration recovery.
*/
- if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &locations->fattr.fsid)) {
+ if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &fattr->fsid)) {
dprintk("%s: server did not return a different fsid for"
" a referral at %s\n", __func__, name->name);
status = -NFS4ERR_MOVED;
goto out;
}
/* Fixup attributes for the nfs_lookup() call to nfs_fhget() */
- nfs_fixup_referral_attributes(&locations->fattr);
-
- /* replace the lookup nfs_fattr with the locations nfs_fattr */
- memcpy(fattr, &locations->fattr, sizeof(struct nfs_fattr));
+ nfs_fixup_referral_attributes(fattr);
memset(fhandle, 0, sizeof(struct nfs_fh));
out:
if (page)
@@ -4404,7 +4407,7 @@ static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir,
};
unsigned short task_flags = 0;
- if (server->nfs_client->cl_minorversion)
+ if (nfs_server_capable(dir, NFS_CAP_MOVEABLE))
task_flags = RPC_TASK_MOVEABLE;
/* Is this is an attribute revalidation, subject to softreval? */
@@ -5768,9 +5771,17 @@ static int nfs4_proc_renew(struct nfs_client *clp, const struct cred *cred)
return 0;
}
-static inline int nfs4_server_supports_acls(struct nfs_server *server)
+static bool nfs4_server_supports_acls(const struct nfs_server *server,
+ enum nfs4_acl_type type)
{
- return server->caps & NFS_CAP_ACLS;
+ switch (type) {
+ default:
+ return server->attr_bitmask[0] & FATTR4_WORD0_ACL;
+ case NFS4ACL_DACL:
+ return server->attr_bitmask[1] & FATTR4_WORD1_DACL;
+ case NFS4ACL_SACL:
+ return server->attr_bitmask[1] & FATTR4_WORD1_SACL;
+ }
}
/* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_SIZE, and that
@@ -5809,6 +5820,7 @@ unwind:
}
struct nfs4_cached_acl {
+ enum nfs4_acl_type type;
int cached;
size_t len;
char data[];
@@ -5829,7 +5841,8 @@ static void nfs4_zap_acl_attr(struct inode *inode)
nfs4_set_cached_acl(inode, NULL);
}
-static inline ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, size_t buflen)
+static ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf,
+ size_t buflen, enum nfs4_acl_type type)
{
struct nfs_inode *nfsi = NFS_I(inode);
struct nfs4_cached_acl *acl;
@@ -5839,6 +5852,8 @@ static inline ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, size_
acl = nfsi->nfs4_acl;
if (acl == NULL)
goto out;
+ if (acl->type != type)
+ goto out;
if (buf == NULL) /* user is just asking for length */
goto out_len;
if (acl->cached == 0)
@@ -5854,7 +5869,9 @@ out:
return ret;
}
-static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size_t pgbase, size_t acl_len)
+static void nfs4_write_cached_acl(struct inode *inode, struct page **pages,
+ size_t pgbase, size_t acl_len,
+ enum nfs4_acl_type type)
{
struct nfs4_cached_acl *acl;
size_t buflen = sizeof(*acl) + acl_len;
@@ -5871,6 +5888,7 @@ static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size
goto out;
acl->cached = 0;
}
+ acl->type = type;
acl->len = acl_len;
out:
nfs4_set_cached_acl(inode, acl);
@@ -5886,14 +5904,17 @@ out:
* length. The next getxattr call will then produce another round trip to
* the server, this time with the input buf of the required size.
*/
-static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
+static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf,
+ size_t buflen, enum nfs4_acl_type type)
{
struct page **pages;
struct nfs_getaclargs args = {
.fh = NFS_FH(inode),
+ .acl_type = type,
.acl_len = buflen,
};
struct nfs_getaclres res = {
+ .acl_type = type,
.acl_len = buflen,
};
struct rpc_message msg = {
@@ -5943,7 +5964,8 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu
ret = -ERANGE;
goto out_free;
}
- nfs4_write_cached_acl(inode, pages, res.acl_data_offset, res.acl_len);
+ nfs4_write_cached_acl(inode, pages, res.acl_data_offset, res.acl_len,
+ type);
if (buf) {
if (res.acl_len > buflen) {
ret = -ERANGE;
@@ -5963,14 +5985,15 @@ out_free:
return ret;
}
-static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
+static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf,
+ size_t buflen, enum nfs4_acl_type type)
{
struct nfs4_exception exception = {
.interruptible = true,
};
ssize_t ret;
do {
- ret = __nfs4_get_acl_uncached(inode, buf, buflen);
+ ret = __nfs4_get_acl_uncached(inode, buf, buflen, type);
trace_nfs4_get_acl(inode, ret);
if (ret >= 0)
break;
@@ -5979,34 +6002,37 @@ static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bufl
return ret;
}
-static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen)
+static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen,
+ enum nfs4_acl_type type)
{
struct nfs_server *server = NFS_SERVER(inode);
int ret;
- if (!nfs4_server_supports_acls(server))
+ if (!nfs4_server_supports_acls(server, type))
return -EOPNOTSUPP;
ret = nfs_revalidate_inode(inode, NFS_INO_INVALID_CHANGE);
if (ret < 0)
return ret;
if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL)
nfs_zap_acl_cache(inode);
- ret = nfs4_read_cached_acl(inode, buf, buflen);
+ ret = nfs4_read_cached_acl(inode, buf, buflen, type);
if (ret != -ENOENT)
/* -ENOENT is returned if there is no ACL or if there is an ACL
* but no cached acl data, just the acl length */
return ret;
- return nfs4_get_acl_uncached(inode, buf, buflen);
+ return nfs4_get_acl_uncached(inode, buf, buflen, type);
}
-static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
+static int __nfs4_proc_set_acl(struct inode *inode, const void *buf,
+ size_t buflen, enum nfs4_acl_type type)
{
struct nfs_server *server = NFS_SERVER(inode);
struct page *pages[NFS4ACL_MAXPAGES];
struct nfs_setaclargs arg = {
- .fh = NFS_FH(inode),
- .acl_pages = pages,
- .acl_len = buflen,
+ .fh = NFS_FH(inode),
+ .acl_type = type,
+ .acl_len = buflen,
+ .acl_pages = pages,
};
struct nfs_setaclres res;
struct rpc_message msg = {
@@ -6020,7 +6046,7 @@ static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t bufl
/* You can't remove system.nfs4_acl: */
if (buflen == 0)
return -EINVAL;
- if (!nfs4_server_supports_acls(server))
+ if (!nfs4_server_supports_acls(server, type))
return -EOPNOTSUPP;
if (npages > ARRAY_SIZE(pages))
return -ERANGE;
@@ -6051,12 +6077,13 @@ static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t bufl
return ret;
}
-static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
+static int nfs4_proc_set_acl(struct inode *inode, const void *buf,
+ size_t buflen, enum nfs4_acl_type type)
{
struct nfs4_exception exception = { };
int err;
do {
- err = __nfs4_proc_set_acl(inode, buf, buflen);
+ err = __nfs4_proc_set_acl(inode, buf, buflen, type);
trace_nfs4_set_acl(inode, err);
if (err == -NFS4ERR_BADOWNER || err == -NFS4ERR_BADNAME) {
/*
@@ -6612,10 +6639,13 @@ static int _nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred,
.rpc_client = server->client,
.rpc_message = &msg,
.callback_ops = &nfs4_delegreturn_ops,
- .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT | RPC_TASK_MOVEABLE,
+ .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT,
};
int status = 0;
+ if (nfs_server_capable(inode, NFS_CAP_MOVEABLE))
+ task_setup_data.flags |= RPC_TASK_MOVEABLE;
+
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (data == NULL)
return -ENOMEM;
@@ -6929,10 +6959,8 @@ static struct rpc_task *nfs4_do_unlck(struct file_lock *fl,
.workqueue = nfsiod_workqueue,
.flags = RPC_TASK_ASYNC,
};
- struct nfs_client *client =
- NFS_SERVER(lsp->ls_state->inode)->nfs_client;
- if (client->cl_minorversion)
+ if (nfs_server_capable(lsp->ls_state->inode, NFS_CAP_MOVEABLE))
task_setup_data.flags |= RPC_TASK_MOVEABLE;
nfs4_state_protect(NFS_SERVER(lsp->ls_state->inode)->nfs_client,
@@ -7203,9 +7231,8 @@ static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *f
.flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF,
};
int ret;
- struct nfs_client *client = NFS_SERVER(state->inode)->nfs_client;
- if (client->cl_minorversion)
+ if (nfs_server_capable(state->inode, NFS_CAP_MOVEABLE))
task_setup_data.flags |= RPC_TASK_MOVEABLE;
data = nfs4_alloc_lockdata(fl, nfs_file_open_context(fl->fl_file),
@@ -7655,21 +7682,70 @@ static int nfs4_xattr_set_nfs4_acl(const struct xattr_handler *handler,
const char *key, const void *buf,
size_t buflen, int flags)
{
- return nfs4_proc_set_acl(inode, buf, buflen);
+ return nfs4_proc_set_acl(inode, buf, buflen, NFS4ACL_ACL);
}
static int nfs4_xattr_get_nfs4_acl(const struct xattr_handler *handler,
struct dentry *unused, struct inode *inode,
const char *key, void *buf, size_t buflen)
{
- return nfs4_proc_get_acl(inode, buf, buflen);
+ return nfs4_proc_get_acl(inode, buf, buflen, NFS4ACL_ACL);
}
static bool nfs4_xattr_list_nfs4_acl(struct dentry *dentry)
{
- return nfs4_server_supports_acls(NFS_SERVER(d_inode(dentry)));
+ return nfs4_server_supports_acls(NFS_SB(dentry->d_sb), NFS4ACL_ACL);
+}
+
+#if defined(CONFIG_NFS_V4_1)
+#define XATTR_NAME_NFSV4_DACL "system.nfs4_dacl"
+
+static int nfs4_xattr_set_nfs4_dacl(const struct xattr_handler *handler,
+ struct user_namespace *mnt_userns,
+ struct dentry *unused, struct inode *inode,
+ const char *key, const void *buf,
+ size_t buflen, int flags)
+{
+ return nfs4_proc_set_acl(inode, buf, buflen, NFS4ACL_DACL);
+}
+
+static int nfs4_xattr_get_nfs4_dacl(const struct xattr_handler *handler,
+ struct dentry *unused, struct inode *inode,
+ const char *key, void *buf, size_t buflen)
+{
+ return nfs4_proc_get_acl(inode, buf, buflen, NFS4ACL_DACL);
+}
+
+static bool nfs4_xattr_list_nfs4_dacl(struct dentry *dentry)
+{
+ return nfs4_server_supports_acls(NFS_SB(dentry->d_sb), NFS4ACL_DACL);
+}
+
+#define XATTR_NAME_NFSV4_SACL "system.nfs4_sacl"
+
+static int nfs4_xattr_set_nfs4_sacl(const struct xattr_handler *handler,
+ struct user_namespace *mnt_userns,
+ struct dentry *unused, struct inode *inode,
+ const char *key, const void *buf,
+ size_t buflen, int flags)
+{
+ return nfs4_proc_set_acl(inode, buf, buflen, NFS4ACL_SACL);
+}
+
+static int nfs4_xattr_get_nfs4_sacl(const struct xattr_handler *handler,
+ struct dentry *unused, struct inode *inode,
+ const char *key, void *buf, size_t buflen)
+{
+ return nfs4_proc_get_acl(inode, buf, buflen, NFS4ACL_SACL);
}
+static bool nfs4_xattr_list_nfs4_sacl(struct dentry *dentry)
+{
+ return nfs4_server_supports_acls(NFS_SB(dentry->d_sb), NFS4ACL_SACL);
+}
+
+#endif
+
#ifdef CONFIG_NFS_V4_SECURITY_LABEL
static int nfs4_xattr_set_nfs4_label(const struct xattr_handler *handler,
@@ -7902,7 +7978,7 @@ static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
else
bitmask[1] &= ~FATTR4_WORD1_MOUNTED_ON_FILEID;
- nfs_fattr_init(&fs_locations->fattr);
+ nfs_fattr_init(fs_locations->fattr);
fs_locations->server = server;
fs_locations->nlocations = 0;
status = nfs4_call_sync(client, server, &msg, &args.seq_args, &res.seq_res, 0);
@@ -7967,7 +8043,7 @@ static int _nfs40_proc_get_locations(struct nfs_server *server,
unsigned long now = jiffies;
int status;
- nfs_fattr_init(&locations->fattr);
+ nfs_fattr_init(locations->fattr);
locations->server = server;
locations->nlocations = 0;
@@ -8032,7 +8108,7 @@ static int _nfs41_proc_get_locations(struct nfs_server *server,
};
int status;
- nfs_fattr_init(&locations->fattr);
+ nfs_fattr_init(locations->fattr);
locations->server = server;
locations->nlocations = 0;
@@ -10391,7 +10467,8 @@ static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = {
| NFS_CAP_POSIX_LOCK
| NFS_CAP_STATEID_NFSV41
| NFS_CAP_ATOMIC_OPEN_V1
- | NFS_CAP_LGOPEN,
+ | NFS_CAP_LGOPEN
+ | NFS_CAP_MOVEABLE,
.init_client = nfs41_init_client,
.shutdown_client = nfs41_shutdown_client,
.match_stateid = nfs41_match_stateid,
@@ -10426,7 +10503,8 @@ static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = {
| NFS_CAP_LAYOUTSTATS
| NFS_CAP_CLONE
| NFS_CAP_LAYOUTERROR
- | NFS_CAP_READ_PLUS,
+ | NFS_CAP_READ_PLUS
+ | NFS_CAP_MOVEABLE,
.init_client = nfs41_init_client,
.shutdown_client = nfs41_shutdown_client,
.match_stateid = nfs41_match_stateid,
@@ -10587,6 +10665,22 @@ static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = {
.set = nfs4_xattr_set_nfs4_acl,
};
+#if defined(CONFIG_NFS_V4_1)
+static const struct xattr_handler nfs4_xattr_nfs4_dacl_handler = {
+ .name = XATTR_NAME_NFSV4_DACL,
+ .list = nfs4_xattr_list_nfs4_dacl,
+ .get = nfs4_xattr_get_nfs4_dacl,
+ .set = nfs4_xattr_set_nfs4_dacl,
+};
+
+static const struct xattr_handler nfs4_xattr_nfs4_sacl_handler = {
+ .name = XATTR_NAME_NFSV4_SACL,
+ .list = nfs4_xattr_list_nfs4_sacl,
+ .get = nfs4_xattr_get_nfs4_sacl,
+ .set = nfs4_xattr_set_nfs4_sacl,
+};
+#endif
+
#ifdef CONFIG_NFS_V4_2
static const struct xattr_handler nfs4_xattr_nfs4_user_handler = {
.prefix = XATTR_USER_PREFIX,
@@ -10597,6 +10691,10 @@ static const struct xattr_handler nfs4_xattr_nfs4_user_handler = {
const struct xattr_handler *nfs4_xattr_handlers[] = {
&nfs4_xattr_nfs4_acl_handler,
+#if defined(CONFIG_NFS_V4_1)
+ &nfs4_xattr_nfs4_dacl_handler,
+ &nfs4_xattr_nfs4_sacl_handler,
+#endif
#ifdef CONFIG_NFS_V4_SECURITY_LABEL
&nfs4_xattr_nfs4_label_handler,
#endif
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 9e1c987c81e7..2540b35ec187 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1602,7 +1602,8 @@ static inline void nfs42_complete_copies(struct nfs4_state_owner *sp,
#endif /* CONFIG_NFS_V4_2 */
static int __nfs4_reclaim_open_state(struct nfs4_state_owner *sp, struct nfs4_state *state,
- const struct nfs4_state_recovery_ops *ops)
+ const struct nfs4_state_recovery_ops *ops,
+ int *lost_locks)
{
struct nfs4_lock_state *lock;
int status;
@@ -1620,7 +1621,7 @@ static int __nfs4_reclaim_open_state(struct nfs4_state_owner *sp, struct nfs4_st
list_for_each_entry(lock, &state->lock_states, ls_locks) {
trace_nfs4_state_lock_reclaim(state, lock);
if (!test_bit(NFS_LOCK_INITIALIZED, &lock->ls_flags))
- pr_warn_ratelimited("NFS: %s: Lock reclaim failed!\n", __func__);
+ *lost_locks += 1;
}
spin_unlock(&state->state_lock);
}
@@ -1630,7 +1631,9 @@ static int __nfs4_reclaim_open_state(struct nfs4_state_owner *sp, struct nfs4_st
return status;
}
-static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs4_state_recovery_ops *ops)
+static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp,
+ const struct nfs4_state_recovery_ops *ops,
+ int *lost_locks)
{
struct nfs4_state *state;
unsigned int loop = 0;
@@ -1666,7 +1669,7 @@ restart:
#endif /* CONFIG_NFS_V4_2 */
refcount_inc(&state->count);
spin_unlock(&sp->so_lock);
- status = __nfs4_reclaim_open_state(sp, state, ops);
+ status = __nfs4_reclaim_open_state(sp, state, ops, lost_locks);
switch (status) {
default:
@@ -1909,6 +1912,7 @@ static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recov
struct rb_node *pos;
LIST_HEAD(freeme);
int status = 0;
+ int lost_locks = 0;
restart:
rcu_read_lock();
@@ -1928,8 +1932,11 @@ restart:
spin_unlock(&clp->cl_lock);
rcu_read_unlock();
- status = nfs4_reclaim_open_state(sp, ops);
+ status = nfs4_reclaim_open_state(sp, ops, &lost_locks);
if (status < 0) {
+ if (lost_locks)
+ pr_warn("NFS: %s: lost %d locks\n",
+ clp->cl_hostname, lost_locks);
set_bit(ops->owner_flag_bit, &sp->so_flags);
nfs4_put_state_owner(sp);
status = nfs4_recovery_handle_error(clp, status);
@@ -1943,6 +1950,9 @@ restart:
}
rcu_read_unlock();
nfs4_free_state_owners(&freeme);
+ if (lost_locks)
+ pr_warn("NFS: %s: lost %d locks\n",
+ clp->cl_hostname, lost_locks);
return 0;
}
@@ -2106,6 +2116,11 @@ static int nfs4_try_migration(struct nfs_server *server, const struct cred *cred
dprintk("<-- %s: no memory\n", __func__);
goto out;
}
+ locations->fattr = nfs_alloc_fattr();
+ if (locations->fattr == NULL) {
+ dprintk("<-- %s: no memory\n", __func__);
+ goto out;
+ }
inode = d_inode(server->super->s_root);
result = nfs4_proc_get_locations(server, NFS_FH(inode), locations,
@@ -2120,7 +2135,7 @@ static int nfs4_try_migration(struct nfs_server *server, const struct cred *cred
if (!locations->nlocations)
goto out;
- if (!(locations->fattr.valid & NFS_ATTR_FATTR_V4_LOCATIONS)) {
+ if (!(locations->fattr->valid & NFS_ATTR_FATTR_V4_LOCATIONS)) {
dprintk("<-- %s: No fs_locations data, migration skipped\n",
__func__);
goto out;
@@ -2145,6 +2160,8 @@ static int nfs4_try_migration(struct nfs_server *server, const struct cred *cred
out:
if (page != NULL)
__free_page(page);
+ if (locations != NULL)
+ kfree(locations->fattr);
kfree(locations);
if (result) {
pr_err("NFS: migration recovery failed (server %s)\n",
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 86a5f6516928..acfe5f4bda48 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -1680,19 +1680,35 @@ encode_restorefh(struct xdr_stream *xdr, struct compound_hdr *hdr)
encode_op_hdr(xdr, OP_RESTOREFH, decode_restorefh_maxsz, hdr);
}
-static void
-encode_setacl(struct xdr_stream *xdr, const struct nfs_setaclargs *arg,
- struct compound_hdr *hdr)
+static void nfs4_acltype_to_bitmap(enum nfs4_acl_type type, __u32 bitmap[2])
{
- __be32 *p;
+ switch (type) {
+ default:
+ bitmap[0] = FATTR4_WORD0_ACL;
+ bitmap[1] = 0;
+ break;
+ case NFS4ACL_DACL:
+ bitmap[0] = 0;
+ bitmap[1] = FATTR4_WORD1_DACL;
+ break;
+ case NFS4ACL_SACL:
+ bitmap[0] = 0;
+ bitmap[1] = FATTR4_WORD1_SACL;
+ }
+}
+
+static void encode_setacl(struct xdr_stream *xdr,
+ const struct nfs_setaclargs *arg,
+ struct compound_hdr *hdr)
+{
+ __u32 bitmap[2];
+
+ nfs4_acltype_to_bitmap(arg->acl_type, bitmap);
encode_op_hdr(xdr, OP_SETATTR, decode_setacl_maxsz, hdr);
encode_nfs4_stateid(xdr, &zero_stateid);
- p = reserve_space(xdr, 2*4);
- *p++ = cpu_to_be32(1);
- *p = cpu_to_be32(FATTR4_WORD0_ACL);
- p = reserve_space(xdr, 4);
- *p = cpu_to_be32(arg->acl_len);
+ xdr_encode_bitmap4(xdr, bitmap, ARRAY_SIZE(bitmap));
+ encode_uint32(xdr, arg->acl_len);
xdr_write_pages(xdr, arg->acl_pages, 0, arg->acl_len);
}
@@ -2587,11 +2603,11 @@ static void nfs4_xdr_enc_getacl(struct rpc_rqst *req, struct xdr_stream *xdr,
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
- const __u32 nfs4_acl_bitmap[1] = {
- [0] = FATTR4_WORD0_ACL,
- };
+ __u32 nfs4_acl_bitmap[2];
uint32_t replen;
+ nfs4_acltype_to_bitmap(args->acl_type, nfs4_acl_bitmap);
+
encode_compound_hdr(xdr, req, &hdr);
encode_sequence(xdr, &args->seq_args, &hdr);
encode_putfh(xdr, args->fh, &hdr);
@@ -5386,7 +5402,7 @@ decode_restorefh(struct xdr_stream *xdr)
}
static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req,
- struct nfs_getaclres *res)
+ struct nfs_getaclres *res, enum nfs4_acl_type type)
{
unsigned int savep;
uint32_t attrlen,
@@ -5404,26 +5420,39 @@ static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req,
if ((status = decode_attr_length(xdr, &attrlen, &savep)) != 0)
goto out;
- if (unlikely(bitmap[0] & (FATTR4_WORD0_ACL - 1U)))
- return -EIO;
- if (likely(bitmap[0] & FATTR4_WORD0_ACL)) {
-
- /* The bitmap (xdr len + bitmaps) and the attr xdr len words
- * are stored with the acl data to handle the problem of
- * variable length bitmaps.*/
- res->acl_data_offset = xdr_page_pos(xdr);
- res->acl_len = attrlen;
-
- /* Check for receive buffer overflow */
- if (res->acl_len > xdr_stream_remaining(xdr) ||
- res->acl_len + res->acl_data_offset > xdr->buf->page_len) {
- res->acl_flags |= NFS4_ACL_TRUNC;
- dprintk("NFS: acl reply: attrlen %u > page_len %zu\n",
- attrlen, xdr_stream_remaining(xdr));
- }
- } else
- status = -EOPNOTSUPP;
+ switch (type) {
+ default:
+ if (unlikely(bitmap[0] & (FATTR4_WORD0_ACL - 1U)))
+ return -EIO;
+ if (!(bitmap[0] & FATTR4_WORD0_ACL))
+ return -EOPNOTSUPP;
+ break;
+ case NFS4ACL_DACL:
+ if (unlikely(bitmap[0] || bitmap[1] & (FATTR4_WORD1_DACL - 1U)))
+ return -EIO;
+ if (!(bitmap[1] & FATTR4_WORD1_DACL))
+ return -EOPNOTSUPP;
+ break;
+ case NFS4ACL_SACL:
+ if (unlikely(bitmap[0] || bitmap[1] & (FATTR4_WORD1_SACL - 1U)))
+ return -EIO;
+ if (!(bitmap[1] & FATTR4_WORD1_SACL))
+ return -EOPNOTSUPP;
+ }
+ /* The bitmap (xdr len + bitmaps) and the attr xdr len words
+ * are stored with the acl data to handle the problem of
+ * variable length bitmaps.*/
+ res->acl_data_offset = xdr_page_pos(xdr);
+ res->acl_len = attrlen;
+
+ /* Check for receive buffer overflow */
+ if (res->acl_len > xdr_stream_remaining(xdr) ||
+ res->acl_len + res->acl_data_offset > xdr->buf->page_len) {
+ res->acl_flags |= NFS4_ACL_TRUNC;
+ dprintk("NFS: acl reply: attrlen %u > page_len %zu\n",
+ attrlen, xdr_stream_remaining(xdr));
+ }
out:
return status;
}
@@ -6486,7 +6515,7 @@ nfs4_xdr_dec_getacl(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
status = decode_putfh(xdr);
if (status)
goto out;
- status = decode_getacl(xdr, rqstp, res);
+ status = decode_getacl(xdr, rqstp, res, res->acl_type);
out:
return status;
@@ -7051,7 +7080,7 @@ static int nfs4_xdr_dec_fs_locations(struct rpc_rqst *req,
if (res->migration) {
xdr_enter_page(xdr, PAGE_SIZE);
status = decode_getfattr_generic(xdr,
- &res->fs_locations->fattr,
+ res->fs_locations->fattr,
NULL, res->fs_locations,
res->fs_locations->server);
if (status)
@@ -7064,7 +7093,7 @@ static int nfs4_xdr_dec_fs_locations(struct rpc_rqst *req,
goto out;
xdr_enter_page(xdr, PAGE_SIZE);
status = decode_getfattr_generic(xdr,
- &res->fs_locations->fattr,
+ res->fs_locations->fattr,
NULL, res->fs_locations,
res->fs_locations->server);
}
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 9157dd19b8b4..317cedfa52bf 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -767,6 +767,9 @@ int nfs_initiate_pgio(struct rpc_clnt *clnt, struct nfs_pgio_header *hdr,
.flags = RPC_TASK_ASYNC | flags,
};
+ if (nfs_server_capable(hdr->inode, NFS_CAP_MOVEABLE))
+ task_setup_data.flags |= RPC_TASK_MOVEABLE;
+
hdr->rw_ops->rw_initiate(hdr, &msg, rpc_ops, &task_setup_data, how);
dprintk("NFS: initiated pgio call "
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 856c962273c7..68a87be3e6f9 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -2000,6 +2000,7 @@ lookup_again:
lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags);
if (lo == NULL) {
spin_unlock(&ino->i_lock);
+ lseg = ERR_PTR(-ENOMEM);
trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
PNFS_UPDATE_LAYOUT_NOMEM);
goto out;
@@ -2128,6 +2129,7 @@ lookup_again:
lgp = pnfs_alloc_init_layoutget_args(ino, ctx, &stateid, &arg, gfp_flags);
if (!lgp) {
+ lseg = ERR_PTR(-ENOMEM);
trace_pnfs_update_layout(ino, pos, count, iomode, lo, NULL,
PNFS_UPDATE_LAYOUT_NOMEM);
nfs_layoutget_end(lo);
diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c
index 6f325e10056c..9697cd5d2561 100644
--- a/fs/nfs/unlink.c
+++ b/fs/nfs/unlink.c
@@ -102,6 +102,10 @@ static void nfs_do_call_unlink(struct inode *inode, struct nfs_unlinkdata *data)
};
struct rpc_task *task;
struct inode *dir = d_inode(data->dentry->d_parent);
+
+ if (nfs_server_capable(inode, NFS_CAP_MOVEABLE))
+ task_setup_data.flags |= RPC_TASK_MOVEABLE;
+
nfs_sb_active(dir->i_sb);
data->args.fh = NFS_FH(dir);
nfs_fattr_init(data->res.dir_attr);
@@ -344,6 +348,10 @@ nfs_async_rename(struct inode *old_dir, struct inode *new_dir,
.flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF,
};
+ if (nfs_server_capable(old_dir, NFS_CAP_MOVEABLE) &&
+ nfs_server_capable(new_dir, NFS_CAP_MOVEABLE))
+ task_setup_data.flags |= RPC_TASK_MOVEABLE;
+
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (data == NULL)
return ERR_PTR(-ENOMEM);
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index f00d45cf80ef..1c706465d090 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -603,8 +603,9 @@ static void nfs_write_error(struct nfs_page *req, int error)
* Find an associated nfs write request, and prepare to flush it out
* May return an error if the user signalled nfs_wait_on_request().
*/
-static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
- struct page *page)
+static int nfs_page_async_flush(struct page *page,
+ struct writeback_control *wbc,
+ struct nfs_pageio_descriptor *pgio)
{
struct nfs_page *req;
int ret = 0;
@@ -630,11 +631,11 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
/*
* Remove the problematic req upon fatal errors on the server
*/
- if (nfs_error_is_fatal(ret)) {
- if (nfs_error_is_fatal_on_server(ret))
- goto out_launder;
- } else
- ret = -EAGAIN;
+ if (nfs_error_is_fatal_on_server(ret))
+ goto out_launder;
+ if (wbc->sync_mode == WB_SYNC_NONE)
+ ret = AOP_WRITEPAGE_ACTIVATE;
+ redirty_page_for_writepage(wbc, page);
nfs_redirty_request(req);
pgio->pg_error = 0;
} else
@@ -650,15 +651,8 @@ out_launder:
static int nfs_do_writepage(struct page *page, struct writeback_control *wbc,
struct nfs_pageio_descriptor *pgio)
{
- int ret;
-
nfs_pageio_cond_complete(pgio, page_index(page));
- ret = nfs_page_async_flush(pgio, page);
- if (ret == -EAGAIN) {
- redirty_page_for_writepage(wbc, page);
- ret = AOP_WRITEPAGE_ACTIVATE;
- }
- return ret;
+ return nfs_page_async_flush(page, wbc, pgio);
}
/*
@@ -681,11 +675,7 @@ static int nfs_writepage_locked(struct page *page,
err = nfs_do_writepage(page, wbc, &pgio);
pgio.pg_error = 0;
nfs_pageio_complete(&pgio);
- if (err < 0)
- return err;
- if (nfs_error_is_fatal(pgio.pg_error))
- return pgio.pg_error;
- return 0;
+ return err;
}
int nfs_writepage(struct page *page, struct writeback_control *wbc)
@@ -737,19 +727,19 @@ int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
priority = wb_priority(wbc);
}
- nfs_pageio_init_write(&pgio, inode, priority, false,
- &nfs_async_write_completion_ops);
- pgio.pg_io_completion = ioc;
- err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio);
- pgio.pg_error = 0;
- nfs_pageio_complete(&pgio);
+ do {
+ nfs_pageio_init_write(&pgio, inode, priority, false,
+ &nfs_async_write_completion_ops);
+ pgio.pg_io_completion = ioc;
+ err = write_cache_pages(mapping, wbc, nfs_writepages_callback,
+ &pgio);
+ pgio.pg_error = 0;
+ nfs_pageio_complete(&pgio);
+ } while (err < 0 && !nfs_error_is_fatal(err));
nfs_io_completion_put(ioc);
if (err < 0)
goto out_err;
- err = pgio.pg_error;
- if (nfs_error_is_fatal(err))
- goto out_err;
return 0;
out_err:
return err;
@@ -1444,7 +1434,7 @@ static void nfs_async_write_error(struct list_head *head, int error)
while (!list_empty(head)) {
req = nfs_list_entry(head->next);
nfs_list_remove_request(req);
- if (nfs_error_is_fatal(error))
+ if (nfs_error_is_fatal_on_server(error))
nfs_write_error(req, error);
else
nfs_redirty_request(req);
@@ -1719,6 +1709,10 @@ int nfs_initiate_commit(struct rpc_clnt *clnt, struct nfs_commit_data *data,
.flags = RPC_TASK_ASYNC | flags,
.priority = priority,
};
+
+ if (nfs_server_capable(data->inode, NFS_CAP_MOVEABLE))
+ task_setup_data.flags |= RPC_TASK_MOVEABLE;
+
/* Set up the initial task struct. */
nfs_ops->commit_setup(data, &msg, &task_setup_data.rpc_client);
trace_nfs_initiate_commit(data);
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
index e1392a9b8ceb..a8abe2296514 100644
--- a/fs/ntfs/file.c
+++ b/fs/ntfs/file.c
@@ -1772,11 +1772,11 @@ static ssize_t ntfs_perform_write(struct file *file, struct iov_iter *i,
last_vcn = -1;
do {
VCN vcn;
- pgoff_t idx, start_idx;
+ pgoff_t start_idx;
unsigned ofs, do_pages, u;
size_t copied;
- start_idx = idx = pos >> PAGE_SHIFT;
+ start_idx = pos >> PAGE_SHIFT;
ofs = pos & ~PAGE_MASK;
bytes = PAGE_SIZE - ofs;
do_pages = 1;
diff --git a/fs/ntfs3/file.c b/fs/ntfs3/file.c
index a4fcdc7927ca..8e9d2b35175f 100644
--- a/fs/ntfs3/file.c
+++ b/fs/ntfs3/file.c
@@ -492,7 +492,7 @@ static int ntfs_truncate(struct inode *inode, loff_t new_size)
down_write(&ni->file.run_lock);
err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, new_size,
- &new_valid, true, NULL);
+ &new_valid, ni->mi.sbi->options->prealloc, NULL);
up_write(&ni->file.run_lock);
if (new_valid < ni->i_valid)
@@ -659,7 +659,13 @@ static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
/*
* Normal file: Allocate clusters, do not change 'valid' size.
*/
- err = ntfs_set_size(inode, max(end, i_size));
+ loff_t new_size = max(end, i_size);
+
+ err = inode_newsize_ok(inode, new_size);
+ if (err)
+ goto out;
+
+ err = ntfs_set_size(inode, new_size);
if (err)
goto out;
@@ -759,7 +765,7 @@ int ntfs3_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
}
inode_dio_wait(inode);
- if (attr->ia_size < oldsize)
+ if (attr->ia_size <= oldsize)
err = ntfs_truncate(inode, attr->ia_size);
else if (attr->ia_size > oldsize)
err = ntfs_extend(inode, attr->ia_size, 0, NULL);
diff --git a/fs/ntfs3/frecord.c b/fs/ntfs3/frecord.c
index 6f47a9c17f89..18842998c8fa 100644
--- a/fs/ntfs3/frecord.c
+++ b/fs/ntfs3/frecord.c
@@ -1964,10 +1964,8 @@ int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo,
vcn += clen;
- if (vbo + bytes >= end) {
+ if (vbo + bytes >= end)
bytes = end - vbo;
- flags |= FIEMAP_EXTENT_LAST;
- }
if (vbo + bytes <= valid) {
;
@@ -1977,6 +1975,9 @@ int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo,
/* vbo < valid && valid < vbo + bytes */
u64 dlen = valid - vbo;
+ if (vbo + dlen >= end)
+ flags |= FIEMAP_EXTENT_LAST;
+
err = fiemap_fill_next_extent(fieinfo, vbo, lbo, dlen,
flags);
if (err < 0)
@@ -1995,6 +1996,9 @@ int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo,
flags |= FIEMAP_EXTENT_UNWRITTEN;
}
+ if (vbo + bytes >= end)
+ flags |= FIEMAP_EXTENT_LAST;
+
err = fiemap_fill_next_extent(fieinfo, vbo, lbo, bytes, flags);
if (err < 0)
break;
diff --git a/fs/ntfs3/fslog.c b/fs/ntfs3/fslog.c
index 06492f088d60..49b7df616778 100644
--- a/fs/ntfs3/fslog.c
+++ b/fs/ntfs3/fslog.c
@@ -1185,8 +1185,6 @@ static int log_read_rst(struct ntfs_log *log, u32 l_size, bool first,
if (!r_page)
return -ENOMEM;
- memset(info, 0, sizeof(struct restart_info));
-
/* Determine which restart area we are looking for. */
if (first) {
vbo = 0;
@@ -3791,10 +3789,11 @@ int log_replay(struct ntfs_inode *ni, bool *initialized)
if (!log)
return -ENOMEM;
+ memset(&rst_info, 0, sizeof(struct restart_info));
+
log->ni = ni;
log->l_size = l_size;
log->one_page_buf = kmalloc(page_size, GFP_NOFS);
-
if (!log->one_page_buf) {
err = -ENOMEM;
goto out;
@@ -3842,6 +3841,7 @@ int log_replay(struct ntfs_inode *ni, bool *initialized)
if (rst_info.vbo)
goto check_restart_area;
+ memset(&rst_info2, 0, sizeof(struct restart_info));
err = log_read_rst(log, l_size, false, &rst_info2);
/* Determine which restart area to use. */
@@ -4085,8 +4085,10 @@ process_log:
if (client == LFS_NO_CLIENT_LE) {
/* Insert "NTFS" client LogFile. */
client = ra->client_idx[0];
- if (client == LFS_NO_CLIENT_LE)
- return -EINVAL;
+ if (client == LFS_NO_CLIENT_LE) {
+ err = -EINVAL;
+ goto out;
+ }
t16 = le16_to_cpu(client);
cr = ca + t16;
diff --git a/fs/ntfs3/inode.c b/fs/ntfs3/inode.c
index 74f60c457f28..be4ebdd8048b 100644
--- a/fs/ntfs3/inode.c
+++ b/fs/ntfs3/inode.c
@@ -758,6 +758,7 @@ static ssize_t ntfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
loff_t vbo = iocb->ki_pos;
loff_t end;
int wr = iov_iter_rw(iter) & WRITE;
+ size_t iter_count = iov_iter_count(iter);
loff_t valid;
ssize_t ret;
@@ -771,10 +772,13 @@ static ssize_t ntfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
wr ? ntfs_get_block_direct_IO_W
: ntfs_get_block_direct_IO_R);
- if (ret <= 0)
+ if (ret > 0)
+ end = vbo + ret;
+ else if (wr && ret == -EIOCBQUEUED)
+ end = vbo + iter_count;
+ else
goto out;
- end = vbo + ret;
valid = ni->i_valid;
if (wr) {
if (end > valid && !S_ISBLK(inode->i_mode)) {
@@ -1950,6 +1954,7 @@ const struct address_space_operations ntfs_aops = {
.direct_IO = ntfs_direct_IO,
.bmap = ntfs_bmap,
.dirty_folio = block_dirty_folio,
+ .invalidate_folio = block_invalidate_folio,
};
const struct address_space_operations ntfs_aops_cmpr = {
diff --git a/fs/ntfs3/super.c b/fs/ntfs3/super.c
index 5781b9e8e3d8..0c6de6287737 100644
--- a/fs/ntfs3/super.c
+++ b/fs/ntfs3/super.c
@@ -668,9 +668,11 @@ static u32 format_size_gb(const u64 bytes, u32 *mb)
static u32 true_sectors_per_clst(const struct NTFS_BOOT *boot)
{
- return boot->sectors_per_clusters <= 0x80
- ? boot->sectors_per_clusters
- : (1u << (0 - boot->sectors_per_clusters));
+ if (boot->sectors_per_clusters <= 0x80)
+ return boot->sectors_per_clusters;
+ if (boot->sectors_per_clusters >= 0xf4) /* limit shift to 2MB max */
+ return 1U << (0 - boot->sectors_per_clusters);
+ return -EINVAL;
}
/*
@@ -713,6 +715,8 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
/* cluster size: 512, 1K, 2K, 4K, ... 2M */
sct_per_clst = true_sectors_per_clst(boot);
+ if ((int)sct_per_clst < 0)
+ goto out;
if (!is_power_of_2(sct_per_clst))
goto out;
diff --git a/fs/ntfs3/xattr.c b/fs/ntfs3/xattr.c
index afd0ddad826f..5e0e0280e70d 100644
--- a/fs/ntfs3/xattr.c
+++ b/fs/ntfs3/xattr.c
@@ -112,7 +112,7 @@ static int ntfs_read_ea(struct ntfs_inode *ni, struct EA_FULL **ea,
return -ENOMEM;
if (!size) {
- ;
+ /* EA info persists, but xattr is empty. Looks like EA problem. */
} else if (attr_ea->non_res) {
struct runs_tree run;
@@ -259,7 +259,7 @@ out:
static noinline int ntfs_set_ea(struct inode *inode, const char *name,
size_t name_len, const void *value,
- size_t val_size, int flags)
+ size_t val_size, int flags, bool locked)
{
struct ntfs_inode *ni = ntfs_i(inode);
struct ntfs_sb_info *sbi = ni->mi.sbi;
@@ -278,7 +278,8 @@ static noinline int ntfs_set_ea(struct inode *inode, const char *name,
u64 new_sz;
void *p;
- ni_lock(ni);
+ if (!locked)
+ ni_lock(ni);
run_init(&ea_run);
@@ -467,7 +468,8 @@ update_ea:
mark_inode_dirty(&ni->vfs_inode);
out:
- ni_unlock(ni);
+ if (!locked)
+ ni_unlock(ni);
run_close(&ea_run);
kfree(ea_all);
@@ -541,7 +543,7 @@ struct posix_acl *ntfs_get_acl(struct inode *inode, int type, bool rcu)
static noinline int ntfs_set_acl_ex(struct user_namespace *mnt_userns,
struct inode *inode, struct posix_acl *acl,
- int type)
+ int type, bool init_acl)
{
const char *name;
size_t size, name_len;
@@ -554,8 +556,9 @@ static noinline int ntfs_set_acl_ex(struct user_namespace *mnt_userns,
switch (type) {
case ACL_TYPE_ACCESS:
- if (acl) {
- umode_t mode = inode->i_mode;
+ /* Do not change i_mode if we are in init_acl */
+ if (acl && !init_acl) {
+ umode_t mode;
err = posix_acl_update_mode(mnt_userns, inode, &mode,
&acl);
@@ -598,7 +601,7 @@ static noinline int ntfs_set_acl_ex(struct user_namespace *mnt_userns,
flags = 0;
}
- err = ntfs_set_ea(inode, name, name_len, value, size, flags);
+ err = ntfs_set_ea(inode, name, name_len, value, size, flags, 0);
if (err == -ENODATA && !size)
err = 0; /* Removing non existed xattr. */
if (!err)
@@ -616,7 +619,68 @@ out:
int ntfs_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
struct posix_acl *acl, int type)
{
- return ntfs_set_acl_ex(mnt_userns, inode, acl, type);
+ return ntfs_set_acl_ex(mnt_userns, inode, acl, type, false);
+}
+
+static int ntfs_xattr_get_acl(struct user_namespace *mnt_userns,
+ struct inode *inode, int type, void *buffer,
+ size_t size)
+{
+ struct posix_acl *acl;
+ int err;
+
+ if (!(inode->i_sb->s_flags & SB_POSIXACL)) {
+ ntfs_inode_warn(inode, "add mount option \"acl\" to use acl");
+ return -EOPNOTSUPP;
+ }
+
+ acl = ntfs_get_acl(inode, type, false);
+ if (IS_ERR(acl))
+ return PTR_ERR(acl);
+
+ if (!acl)
+ return -ENODATA;
+
+ err = posix_acl_to_xattr(mnt_userns, acl, buffer, size);
+ posix_acl_release(acl);
+
+ return err;
+}
+
+static int ntfs_xattr_set_acl(struct user_namespace *mnt_userns,
+ struct inode *inode, int type, const void *value,
+ size_t size)
+{
+ struct posix_acl *acl;
+ int err;
+
+ if (!(inode->i_sb->s_flags & SB_POSIXACL)) {
+ ntfs_inode_warn(inode, "add mount option \"acl\" to use acl");
+ return -EOPNOTSUPP;
+ }
+
+ if (!inode_owner_or_capable(mnt_userns, inode))
+ return -EPERM;
+
+ if (!value) {
+ acl = NULL;
+ } else {
+ acl = posix_acl_from_xattr(mnt_userns, value, size);
+ if (IS_ERR(acl))
+ return PTR_ERR(acl);
+
+ if (acl) {
+ err = posix_acl_valid(mnt_userns, acl);
+ if (err)
+ goto release_and_out;
+ }
+ }
+
+ err = ntfs_set_acl(mnt_userns, inode, acl, type);
+
+release_and_out:
+ posix_acl_release(acl);
+ return err;
}
/*
@@ -636,7 +700,7 @@ int ntfs_init_acl(struct user_namespace *mnt_userns, struct inode *inode,
if (default_acl) {
err = ntfs_set_acl_ex(mnt_userns, inode, default_acl,
- ACL_TYPE_DEFAULT);
+ ACL_TYPE_DEFAULT, true);
posix_acl_release(default_acl);
} else {
inode->i_default_acl = NULL;
@@ -647,7 +711,7 @@ int ntfs_init_acl(struct user_namespace *mnt_userns, struct inode *inode,
else {
if (!err)
err = ntfs_set_acl_ex(mnt_userns, inode, acl,
- ACL_TYPE_ACCESS);
+ ACL_TYPE_ACCESS, true);
posix_acl_release(acl);
}
@@ -785,6 +849,23 @@ static int ntfs_getxattr(const struct xattr_handler *handler, struct dentry *de,
goto out;
}
+#ifdef CONFIG_NTFS3_FS_POSIX_ACL
+ if ((name_len == sizeof(XATTR_NAME_POSIX_ACL_ACCESS) - 1 &&
+ !memcmp(name, XATTR_NAME_POSIX_ACL_ACCESS,
+ sizeof(XATTR_NAME_POSIX_ACL_ACCESS))) ||
+ (name_len == sizeof(XATTR_NAME_POSIX_ACL_DEFAULT) - 1 &&
+ !memcmp(name, XATTR_NAME_POSIX_ACL_DEFAULT,
+ sizeof(XATTR_NAME_POSIX_ACL_DEFAULT)))) {
+ /* TODO: init_user_ns? */
+ err = ntfs_xattr_get_acl(
+ &init_user_ns, inode,
+ name_len == sizeof(XATTR_NAME_POSIX_ACL_ACCESS) - 1
+ ? ACL_TYPE_ACCESS
+ : ACL_TYPE_DEFAULT,
+ buffer, size);
+ goto out;
+ }
+#endif
/* Deal with NTFS extended attribute. */
err = ntfs_get_ea(inode, name, name_len, buffer, size, NULL);
@@ -897,10 +978,29 @@ set_new_fa:
goto out;
}
+#ifdef CONFIG_NTFS3_FS_POSIX_ACL
+ if ((name_len == sizeof(XATTR_NAME_POSIX_ACL_ACCESS) - 1 &&
+ !memcmp(name, XATTR_NAME_POSIX_ACL_ACCESS,
+ sizeof(XATTR_NAME_POSIX_ACL_ACCESS))) ||
+ (name_len == sizeof(XATTR_NAME_POSIX_ACL_DEFAULT) - 1 &&
+ !memcmp(name, XATTR_NAME_POSIX_ACL_DEFAULT,
+ sizeof(XATTR_NAME_POSIX_ACL_DEFAULT)))) {
+ err = ntfs_xattr_set_acl(
+ mnt_userns, inode,
+ name_len == sizeof(XATTR_NAME_POSIX_ACL_ACCESS) - 1
+ ? ACL_TYPE_ACCESS
+ : ACL_TYPE_DEFAULT,
+ value, size);
+ goto out;
+ }
+#endif
/* Deal with NTFS extended attribute. */
- err = ntfs_set_ea(inode, name, name_len, value, size, flags);
+ err = ntfs_set_ea(inode, name, name_len, value, size, flags, 0);
out:
+ inode->i_ctime = current_time(inode);
+ mark_inode_dirty(inode);
+
return err;
}
@@ -913,35 +1013,37 @@ int ntfs_save_wsl_perm(struct inode *inode)
{
int err;
__le32 value;
+ struct ntfs_inode *ni = ntfs_i(inode);
- /* TODO: refactor this, so we don't lock 4 times in ntfs_set_ea */
+ ni_lock(ni);
value = cpu_to_le32(i_uid_read(inode));
err = ntfs_set_ea(inode, "$LXUID", sizeof("$LXUID") - 1, &value,
- sizeof(value), 0);
+ sizeof(value), 0, true); /* true == already locked. */
if (err)
goto out;
value = cpu_to_le32(i_gid_read(inode));
err = ntfs_set_ea(inode, "$LXGID", sizeof("$LXGID") - 1, &value,
- sizeof(value), 0);
+ sizeof(value), 0, true);
if (err)
goto out;
value = cpu_to_le32(inode->i_mode);
err = ntfs_set_ea(inode, "$LXMOD", sizeof("$LXMOD") - 1, &value,
- sizeof(value), 0);
+ sizeof(value), 0, true);
if (err)
goto out;
if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
value = cpu_to_le32(inode->i_rdev);
err = ntfs_set_ea(inode, "$LXDEV", sizeof("$LXDEV") - 1, &value,
- sizeof(value), 0);
+ sizeof(value), 0, true);
if (err)
goto out;
}
out:
+ ni_unlock(ni);
/* In case of error should we delete all WSL xattr? */
return err;
}
diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c
index d442cf5dda8a..be5e9ed7da8d 100644
--- a/fs/ocfs2/dlm/dlmdebug.c
+++ b/fs/ocfs2/dlm/dlmdebug.c
@@ -541,7 +541,7 @@ static void *lockres_seq_start(struct seq_file *m, loff_t *pos)
struct debug_lockres *dl = m->private;
struct dlm_ctxt *dlm = dl->dl_ctxt;
struct dlm_lock_resource *oldres = dl->dl_res;
- struct dlm_lock_resource *res = NULL;
+ struct dlm_lock_resource *res = NULL, *iter;
struct list_head *track_list;
spin_lock(&dlm->track_lock);
@@ -556,11 +556,11 @@ static void *lockres_seq_start(struct seq_file *m, loff_t *pos)
}
}
- list_for_each_entry(res, track_list, tracking) {
- if (&res->tracking == &dlm->tracking_list)
- res = NULL;
- else
- dlm_lockres_get(res);
+ list_for_each_entry(iter, track_list, tracking) {
+ if (&iter->tracking != &dlm->tracking_list) {
+ dlm_lockres_get(iter);
+ res = iter;
+ }
break;
}
spin_unlock(&dlm->track_lock);
diff --git a/fs/ocfs2/dlm/dlmunlock.c b/fs/ocfs2/dlm/dlmunlock.c
index 61103b2d69fb..7318e4794ef9 100644
--- a/fs/ocfs2/dlm/dlmunlock.c
+++ b/fs/ocfs2/dlm/dlmunlock.c
@@ -392,9 +392,9 @@ int dlm_unlock_lock_handler(struct o2net_msg *msg, u32 len, void *data,
struct dlm_ctxt *dlm = data;
struct dlm_unlock_lock *unlock = (struct dlm_unlock_lock *)msg->buf;
struct dlm_lock_resource *res = NULL;
- struct dlm_lock *lock = NULL;
+ struct dlm_lock *lock = NULL, *iter;
enum dlm_status status = DLM_NORMAL;
- int found = 0, i;
+ int i;
struct dlm_lockstatus *lksb = NULL;
int ignore;
u32 flags;
@@ -437,7 +437,6 @@ int dlm_unlock_lock_handler(struct o2net_msg *msg, u32 len, void *data,
}
queue=&res->granted;
- found = 0;
spin_lock(&res->spinlock);
if (res->state & DLM_LOCK_RES_RECOVERING) {
spin_unlock(&res->spinlock);
@@ -461,21 +460,21 @@ int dlm_unlock_lock_handler(struct o2net_msg *msg, u32 len, void *data,
}
for (i=0; i<3; i++) {
- list_for_each_entry(lock, queue, list) {
- if (lock->ml.cookie == unlock->cookie &&
- lock->ml.node == unlock->node_idx) {
- dlm_lock_get(lock);
- found = 1;
+ list_for_each_entry(iter, queue, list) {
+ if (iter->ml.cookie == unlock->cookie &&
+ iter->ml.node == unlock->node_idx) {
+ dlm_lock_get(iter);
+ lock = iter;
break;
}
}
- if (found)
+ if (lock)
break;
/* scan granted -> converting -> blocked queues */
queue++;
}
spin_unlock(&res->spinlock);
- if (!found) {
+ if (!lock) {
status = DLM_IVLOCKID;
goto not_found;
}
@@ -505,7 +504,7 @@ int dlm_unlock_lock_handler(struct o2net_msg *msg, u32 len, void *data,
dlm_kick_thread(dlm, res);
not_found:
- if (!found)
+ if (!lock)
mlog(ML_ERROR, "failed to find lock to unlock! "
"cookie=%u:%llu\n",
dlm_get_lock_cookie_node(be64_to_cpu(unlock->cookie)),
diff --git a/fs/ocfs2/dlmfs/userdlm.c b/fs/ocfs2/dlmfs/userdlm.c
index 29f183a15798..617c92e7b925 100644
--- a/fs/ocfs2/dlmfs/userdlm.c
+++ b/fs/ocfs2/dlmfs/userdlm.c
@@ -433,6 +433,11 @@ again:
}
spin_lock(&lockres->l_lock);
+ if (lockres->l_flags & USER_LOCK_IN_TEARDOWN) {
+ spin_unlock(&lockres->l_lock);
+ status = -EAGAIN;
+ goto bail;
+ }
/* We only compare against the currently granted level
* here. If the lock is blocked waiting on a downconvert,
@@ -595,7 +600,7 @@ int user_dlm_destroy_lock(struct user_lock_res *lockres)
spin_lock(&lockres->l_lock);
if (lockres->l_flags & USER_LOCK_IN_TEARDOWN) {
spin_unlock(&lockres->l_lock);
- return 0;
+ goto bail;
}
lockres->l_flags |= USER_LOCK_IN_TEARDOWN;
@@ -609,22 +614,30 @@ int user_dlm_destroy_lock(struct user_lock_res *lockres)
}
if (lockres->l_ro_holders || lockres->l_ex_holders) {
+ lockres->l_flags &= ~USER_LOCK_IN_TEARDOWN;
spin_unlock(&lockres->l_lock);
goto bail;
}
status = 0;
if (!(lockres->l_flags & USER_LOCK_ATTACHED)) {
+ /*
+ * lock is never requested, leave USER_LOCK_IN_TEARDOWN set
+ * to avoid new lock request coming in.
+ */
spin_unlock(&lockres->l_lock);
goto bail;
}
- lockres->l_flags &= ~USER_LOCK_ATTACHED;
lockres->l_flags |= USER_LOCK_BUSY;
spin_unlock(&lockres->l_lock);
status = ocfs2_dlm_unlock(conn, &lockres->l_lksb, DLM_LKF_VALBLK);
if (status) {
+ spin_lock(&lockres->l_lock);
+ lockres->l_flags &= ~USER_LOCK_IN_TEARDOWN;
+ lockres->l_flags &= ~USER_LOCK_BUSY;
+ spin_unlock(&lockres->l_lock);
user_log_dlm_error("ocfs2_dlm_unlock", status, lockres);
goto bail;
}
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
index 5739dc301569..bb116c39b581 100644
--- a/fs/ocfs2/inode.c
+++ b/fs/ocfs2/inode.c
@@ -125,6 +125,7 @@ struct inode *ocfs2_iget(struct ocfs2_super *osb, u64 blkno, unsigned flags,
struct inode *inode = NULL;
struct super_block *sb = osb->sb;
struct ocfs2_find_inode_args args;
+ journal_t *journal = osb->journal->j_journal;
trace_ocfs2_iget_begin((unsigned long long)blkno, flags,
sysfile_type);
@@ -171,11 +172,10 @@ struct inode *ocfs2_iget(struct ocfs2_super *osb, u64 blkno, unsigned flags,
* part of the transaction - the inode could have been reclaimed and
* now it is reread from disk.
*/
- if (osb->journal) {
+ if (journal) {
transaction_t *transaction;
tid_t tid;
struct ocfs2_inode_info *oi = OCFS2_I(inode);
- journal_t *journal = osb->journal->j_journal;
read_lock(&journal->j_state_lock);
if (journal->j_running_transaction)
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index 1887a2708709..fa87d89cf754 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -810,22 +810,20 @@ void ocfs2_set_journal_params(struct ocfs2_super *osb)
write_unlock(&journal->j_state_lock);
}
-int ocfs2_journal_init(struct ocfs2_super *osb, int *dirty)
+/*
+ * alloc & initialize skeleton for journal structure.
+ * ocfs2_journal_init() will make fs have journal ability.
+ */
+int ocfs2_journal_alloc(struct ocfs2_super *osb)
{
- int status = -1;
- struct inode *inode = NULL; /* the journal inode */
- journal_t *j_journal = NULL;
- struct ocfs2_journal *journal = NULL;
- struct ocfs2_dinode *di = NULL;
- struct buffer_head *bh = NULL;
- int inode_lock = 0;
+ int status = 0;
+ struct ocfs2_journal *journal;
- /* initialize our journal structure */
journal = kzalloc(sizeof(struct ocfs2_journal), GFP_KERNEL);
if (!journal) {
mlog(ML_ERROR, "unable to alloc journal\n");
status = -ENOMEM;
- goto done;
+ goto bail;
}
osb->journal = journal;
journal->j_osb = osb;
@@ -839,6 +837,21 @@ int ocfs2_journal_init(struct ocfs2_super *osb, int *dirty)
INIT_WORK(&journal->j_recovery_work, ocfs2_complete_recovery);
journal->j_state = OCFS2_JOURNAL_FREE;
+bail:
+ return status;
+}
+
+int ocfs2_journal_init(struct ocfs2_super *osb, int *dirty)
+{
+ int status = -1;
+ struct inode *inode = NULL; /* the journal inode */
+ journal_t *j_journal = NULL;
+ struct ocfs2_journal *journal = osb->journal;
+ struct ocfs2_dinode *di = NULL;
+ struct buffer_head *bh = NULL;
+ int inode_lock = 0;
+
+ BUG_ON(!journal);
/* already have the inode for our journal */
inode = ocfs2_get_system_file_inode(osb, JOURNAL_SYSTEM_INODE,
osb->slot_num);
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h
index 8dcb2f2cadbc..969d0aa28718 100644
--- a/fs/ocfs2/journal.h
+++ b/fs/ocfs2/journal.h
@@ -154,6 +154,7 @@ int ocfs2_compute_replay_slots(struct ocfs2_super *osb);
* Journal Control:
* Initialize, Load, Shutdown, Wipe a journal.
*
+ * ocfs2_journal_alloc - Initialize skeleton for journal structure.
* ocfs2_journal_init - Initialize journal structures in the OSB.
* ocfs2_journal_load - Load the given journal off disk. Replay it if
* there's transactions still in there.
@@ -167,6 +168,7 @@ int ocfs2_compute_replay_slots(struct ocfs2_super *osb);
* ocfs2_start_checkpoint - Kick the commit thread to do a checkpoint.
*/
void ocfs2_set_journal_params(struct ocfs2_super *osb);
+int ocfs2_journal_alloc(struct ocfs2_super *osb);
int ocfs2_journal_init(struct ocfs2_super *osb, int *dirty);
void ocfs2_journal_shutdown(struct ocfs2_super *osb);
int ocfs2_journal_wipe(struct ocfs2_journal *journal,
diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c
index b1a8b046f4c2..5022b3e9bfcd 100644
--- a/fs/ocfs2/quota_local.c
+++ b/fs/ocfs2/quota_local.c
@@ -921,19 +921,19 @@ static struct ocfs2_quota_chunk *ocfs2_find_free_entry(struct super_block *sb,
{
struct mem_dqinfo *info = sb_dqinfo(sb, type);
struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv;
- struct ocfs2_quota_chunk *chunk;
+ struct ocfs2_quota_chunk *chunk = NULL, *iter;
struct ocfs2_local_disk_chunk *dchunk;
int found = 0, len;
- list_for_each_entry(chunk, &oinfo->dqi_chunk, qc_chunk) {
+ list_for_each_entry(iter, &oinfo->dqi_chunk, qc_chunk) {
dchunk = (struct ocfs2_local_disk_chunk *)
- chunk->qc_headerbh->b_data;
+ iter->qc_headerbh->b_data;
if (le32_to_cpu(dchunk->dqc_free) > 0) {
- found = 1;
+ chunk = iter;
break;
}
}
- if (!found)
+ if (!chunk)
return NULL;
if (chunk->qc_num < oinfo->dqi_chunks - 1) {
diff --git a/fs/ocfs2/reservations.c b/fs/ocfs2/reservations.c
index 769e466887b0..a9d1296d736d 100644
--- a/fs/ocfs2/reservations.c
+++ b/fs/ocfs2/reservations.c
@@ -198,7 +198,7 @@ void ocfs2_resv_set_type(struct ocfs2_alloc_reservation *resv,
resv->r_flags |= flags;
}
-int ocfs2_resmap_init(struct ocfs2_super *osb,
+void ocfs2_resmap_init(struct ocfs2_super *osb,
struct ocfs2_reservation_map *resmap)
{
memset(resmap, 0, sizeof(*resmap));
@@ -207,8 +207,6 @@ int ocfs2_resmap_init(struct ocfs2_super *osb,
resmap->m_reservations = RB_ROOT;
/* m_bitmap_len is initialized to zero by the above memset. */
INIT_LIST_HEAD(&resmap->m_lru);
-
- return 0;
}
static void ocfs2_resv_mark_lru(struct ocfs2_reservation_map *resmap,
diff --git a/fs/ocfs2/reservations.h b/fs/ocfs2/reservations.h
index 677c50663595..ec8101ef5717 100644
--- a/fs/ocfs2/reservations.h
+++ b/fs/ocfs2/reservations.h
@@ -73,15 +73,10 @@ void ocfs2_resv_discard(struct ocfs2_reservation_map *resmap,
/**
* ocfs2_resmap_init() - Initialize fields of a reservations bitmap
+ * @osb: struct ocfs2_super to be saved in resmap
* @resmap: struct ocfs2_reservation_map to initialize
- * @obj: unused for now
- * @ops: unused for now
- * @max_bitmap_bytes: Maximum size of the bitmap (typically blocksize)
- *
- * Only possible return value other than '0' is -ENOMEM for failure to
- * allocation mirror bitmap.
*/
-int ocfs2_resmap_init(struct ocfs2_super *osb,
+void ocfs2_resmap_init(struct ocfs2_super *osb,
struct ocfs2_reservation_map *resmap);
/**
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 477cdf94122e..f7298816d8d9 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -989,28 +989,27 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
if (!ocfs2_parse_options(sb, data, &parsed_options, 0)) {
status = -EINVAL;
- goto read_super_error;
+ goto out;
}
/* probe for superblock */
status = ocfs2_sb_probe(sb, &bh, &sector_size, &stats);
if (status < 0) {
mlog(ML_ERROR, "superblock probe failed!\n");
- goto read_super_error;
+ goto out;
}
status = ocfs2_initialize_super(sb, bh, sector_size, &stats);
- osb = OCFS2_SB(sb);
- if (status < 0) {
- mlog_errno(status);
- goto read_super_error;
- }
brelse(bh);
bh = NULL;
+ if (status < 0)
+ goto out;
+
+ osb = OCFS2_SB(sb);
if (!ocfs2_check_set_options(sb, &parsed_options)) {
status = -EINVAL;
- goto read_super_error;
+ goto out_super;
}
osb->s_mount_opt = parsed_options.mount_opt;
osb->s_atime_quantum = parsed_options.atime_quantum;
@@ -1027,7 +1026,7 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
status = ocfs2_verify_userspace_stack(osb, &parsed_options);
if (status)
- goto read_super_error;
+ goto out_super;
sb->s_magic = OCFS2_SUPER_MAGIC;
@@ -1041,7 +1040,7 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
status = -EACCES;
mlog(ML_ERROR, "Readonly device detected but readonly "
"mount was not specified.\n");
- goto read_super_error;
+ goto out_super;
}
/* You should not be able to start a local heartbeat
@@ -1050,7 +1049,7 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
status = -EROFS;
mlog(ML_ERROR, "Local heartbeat specified on readonly "
"device.\n");
- goto read_super_error;
+ goto out_super;
}
status = ocfs2_check_journals_nolocks(osb);
@@ -1059,9 +1058,7 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
mlog(ML_ERROR, "Recovery required on readonly "
"file system, but write access is "
"unavailable.\n");
- else
- mlog_errno(status);
- goto read_super_error;
+ goto out_super;
}
ocfs2_set_ro_flag(osb, 1);
@@ -1077,10 +1074,8 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
}
status = ocfs2_verify_heartbeat(osb);
- if (status < 0) {
- mlog_errno(status);
- goto read_super_error;
- }
+ if (status < 0)
+ goto out_super;
osb->osb_debug_root = debugfs_create_dir(osb->uuid_str,
ocfs2_debugfs_root);
@@ -1094,15 +1089,14 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
status = ocfs2_mount_volume(sb);
if (status < 0)
- goto read_super_error;
+ goto out_debugfs;
if (osb->root_inode)
inode = igrab(osb->root_inode);
if (!inode) {
status = -EIO;
- mlog_errno(status);
- goto read_super_error;
+ goto out_dismount;
}
osb->osb_dev_kset = kset_create_and_add(sb->s_id, NULL,
@@ -1110,7 +1104,7 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
if (!osb->osb_dev_kset) {
status = -ENOMEM;
mlog(ML_ERROR, "Unable to create device kset %s.\n", sb->s_id);
- goto read_super_error;
+ goto out_dismount;
}
/* Create filecheck sysfs related directories/files at
@@ -1119,14 +1113,13 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
status = -ENOMEM;
mlog(ML_ERROR, "Unable to create filecheck sysfs directory at "
"/sys/fs/ocfs2/%s/filecheck.\n", sb->s_id);
- goto read_super_error;
+ goto out_dismount;
}
root = d_make_root(inode);
if (!root) {
status = -ENOMEM;
- mlog_errno(status);
- goto read_super_error;
+ goto out_dismount;
}
sb->s_root = root;
@@ -1178,17 +1171,21 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
return status;
-read_super_error:
- brelse(bh);
-
- if (status)
- mlog_errno(status);
+out_dismount:
+ atomic_set(&osb->vol_state, VOLUME_DISABLED);
+ wake_up(&osb->osb_mount_event);
+ ocfs2_dismount_volume(sb, 1);
+ goto out;
- if (osb) {
- atomic_set(&osb->vol_state, VOLUME_DISABLED);
- wake_up(&osb->osb_mount_event);
- ocfs2_dismount_volume(sb, 1);
- }
+out_debugfs:
+ debugfs_remove_recursive(osb->osb_debug_root);
+out_super:
+ ocfs2_release_system_inodes(osb);
+ kfree(osb->recovery_map);
+ ocfs2_delete_osb(osb);
+ kfree(osb);
+out:
+ mlog_errno(status);
return status;
}
@@ -1803,11 +1800,10 @@ static int ocfs2_get_sector(struct super_block *sb,
static int ocfs2_mount_volume(struct super_block *sb)
{
int status = 0;
- int unlock_super = 0;
struct ocfs2_super *osb = OCFS2_SB(sb);
if (ocfs2_is_hard_readonly(osb))
- goto leave;
+ goto out;
mutex_init(&osb->obs_trim_fs_mutex);
@@ -1817,44 +1813,56 @@ static int ocfs2_mount_volume(struct super_block *sb)
if (status == -EBADR && ocfs2_userspace_stack(osb))
mlog(ML_ERROR, "couldn't mount because cluster name on"
" disk does not match the running cluster name.\n");
- goto leave;
+ goto out;
}
status = ocfs2_super_lock(osb, 1);
if (status < 0) {
mlog_errno(status);
- goto leave;
+ goto out_dlm;
}
- unlock_super = 1;
/* This will load up the node map and add ourselves to it. */
status = ocfs2_find_slot(osb);
if (status < 0) {
mlog_errno(status);
- goto leave;
+ goto out_super_lock;
}
/* load all node-local system inodes */
status = ocfs2_init_local_system_inodes(osb);
if (status < 0) {
mlog_errno(status);
- goto leave;
+ goto out_super_lock;
}
status = ocfs2_check_volume(osb);
if (status < 0) {
mlog_errno(status);
- goto leave;
+ goto out_system_inodes;
}
status = ocfs2_truncate_log_init(osb);
- if (status < 0)
+ if (status < 0) {
mlog_errno(status);
+ goto out_system_inodes;
+ }
-leave:
- if (unlock_super)
- ocfs2_super_unlock(osb, 1);
+ ocfs2_super_unlock(osb, 1);
+ return 0;
+out_system_inodes:
+ if (osb->local_alloc_state == OCFS2_LA_ENABLED)
+ ocfs2_shutdown_local_alloc(osb);
+ ocfs2_release_system_inodes(osb);
+ /* before journal shutdown, we should release slot_info */
+ ocfs2_free_slot_info(osb);
+ ocfs2_journal_shutdown(osb);
+out_super_lock:
+ ocfs2_super_unlock(osb, 1);
+out_dlm:
+ ocfs2_dlm_shutdown(osb, 0);
+out:
return status;
}
@@ -2022,7 +2030,7 @@ static int ocfs2_initialize_super(struct super_block *sb,
if (!osb) {
status = -ENOMEM;
mlog_errno(status);
- goto bail;
+ goto out;
}
sb->s_fs_info = osb;
@@ -2083,7 +2091,7 @@ static int ocfs2_initialize_super(struct super_block *sb,
mlog(ML_ERROR, "Invalid number of node slots (%u)\n",
osb->max_slots);
status = -EINVAL;
- goto bail;
+ goto out;
}
ocfs2_orphan_scan_init(osb);
@@ -2092,7 +2100,7 @@ static int ocfs2_initialize_super(struct super_block *sb,
if (status) {
mlog(ML_ERROR, "Unable to initialize recovery state\n");
mlog_errno(status);
- goto bail;
+ goto out;
}
init_waitqueue_head(&osb->checkpoint_event);
@@ -2110,17 +2118,13 @@ static int ocfs2_initialize_super(struct super_block *sb,
init_waitqueue_head(&osb->osb_mount_event);
- status = ocfs2_resmap_init(osb, &osb->osb_la_resmap);
- if (status) {
- mlog_errno(status);
- goto bail;
- }
+ ocfs2_resmap_init(osb, &osb->osb_la_resmap);
osb->vol_label = kmalloc(OCFS2_MAX_VOL_LABEL_LEN, GFP_KERNEL);
if (!osb->vol_label) {
mlog(ML_ERROR, "unable to alloc vol label\n");
status = -ENOMEM;
- goto bail;
+ goto out_recovery_map;
}
osb->slot_recovery_generations =
@@ -2129,7 +2133,7 @@ static int ocfs2_initialize_super(struct super_block *sb,
if (!osb->slot_recovery_generations) {
status = -ENOMEM;
mlog_errno(status);
- goto bail;
+ goto out_vol_label;
}
init_waitqueue_head(&osb->osb_wipe_event);
@@ -2139,7 +2143,7 @@ static int ocfs2_initialize_super(struct super_block *sb,
if (!osb->osb_orphan_wipes) {
status = -ENOMEM;
mlog_errno(status);
- goto bail;
+ goto out_slot_recovery_gen;
}
osb->osb_rf_lock_tree = RB_ROOT;
@@ -2155,13 +2159,13 @@ static int ocfs2_initialize_super(struct super_block *sb,
mlog(ML_ERROR, "couldn't mount because of unsupported "
"optional features (%x).\n", i);
status = -EINVAL;
- goto bail;
+ goto out_orphan_wipes;
}
if (!sb_rdonly(osb->sb) && (i = OCFS2_HAS_RO_COMPAT_FEATURE(osb->sb, ~OCFS2_FEATURE_RO_COMPAT_SUPP))) {
mlog(ML_ERROR, "couldn't mount RDWR because of "
"unsupported optional features (%x).\n", i);
status = -EINVAL;
- goto bail;
+ goto out_orphan_wipes;
}
if (ocfs2_clusterinfo_valid(osb)) {
@@ -2182,7 +2186,7 @@ static int ocfs2_initialize_super(struct super_block *sb,
"cluster stack label (%s) \n",
osb->osb_cluster_stack);
status = -EINVAL;
- goto bail;
+ goto out_orphan_wipes;
}
memcpy(osb->osb_cluster_name,
OCFS2_RAW_SB(di)->s_cluster_info.ci_cluster,
@@ -2195,6 +2199,15 @@ static int ocfs2_initialize_super(struct super_block *sb,
get_random_bytes(&osb->s_next_generation, sizeof(u32));
+ /*
+ * FIXME
+ * This should be done in ocfs2_journal_init(), but any inode
+ * writes back operation will cause the filesystem to crash.
+ */
+ status = ocfs2_journal_alloc(osb);
+ if (status < 0)
+ goto out_orphan_wipes;
+
INIT_WORK(&osb->dquot_drop_work, ocfs2_drop_dquot_refs);
init_llist_head(&osb->dquot_drop_list);
@@ -2208,7 +2221,7 @@ static int ocfs2_initialize_super(struct super_block *sb,
mlog(ML_ERROR, "Volume has invalid cluster size (%d)\n",
osb->s_clustersize);
status = -EINVAL;
- goto bail;
+ goto out_journal;
}
total_blocks = ocfs2_clusters_to_blocks(osb->sb,
@@ -2220,14 +2233,14 @@ static int ocfs2_initialize_super(struct super_block *sb,
mlog(ML_ERROR, "Volume too large "
"to mount safely on this system");
status = -EFBIG;
- goto bail;
+ goto out_journal;
}
if (ocfs2_setup_osb_uuid(osb, di->id2.i_super.s_uuid,
sizeof(di->id2.i_super.s_uuid))) {
mlog(ML_ERROR, "Out of memory trying to setup our uuid.\n");
status = -ENOMEM;
- goto bail;
+ goto out_journal;
}
strlcpy(osb->vol_label, di->id2.i_super.s_label,
@@ -2247,7 +2260,7 @@ static int ocfs2_initialize_super(struct super_block *sb,
if (!osb->osb_dlm_debug) {
status = -ENOMEM;
mlog_errno(status);
- goto bail;
+ goto out_uuid_str;
}
atomic_set(&osb->vol_state, VOLUME_INIT);
@@ -2256,7 +2269,7 @@ static int ocfs2_initialize_super(struct super_block *sb,
status = ocfs2_init_global_system_inodes(osb);
if (status < 0) {
mlog_errno(status);
- goto bail;
+ goto out_dlm_out;
}
/*
@@ -2267,7 +2280,7 @@ static int ocfs2_initialize_super(struct super_block *sb,
if (!inode) {
status = -EINVAL;
mlog_errno(status);
- goto bail;
+ goto out_system_inodes;
}
osb->bitmap_blkno = OCFS2_I(inode)->ip_blkno;
@@ -2280,16 +2293,39 @@ static int ocfs2_initialize_super(struct super_block *sb,
status = ocfs2_init_slot_info(osb);
if (status < 0) {
mlog_errno(status);
- goto bail;
+ goto out_system_inodes;
}
osb->ocfs2_wq = alloc_ordered_workqueue("ocfs2_wq", WQ_MEM_RECLAIM);
if (!osb->ocfs2_wq) {
status = -ENOMEM;
mlog_errno(status);
+ goto out_slot_info;
}
-bail:
+ return status;
+
+out_slot_info:
+ ocfs2_free_slot_info(osb);
+out_system_inodes:
+ ocfs2_release_system_inodes(osb);
+out_dlm_out:
+ ocfs2_put_dlm_debug(osb->osb_dlm_debug);
+out_uuid_str:
+ kfree(osb->uuid_str);
+out_journal:
+ kfree(osb->journal);
+out_orphan_wipes:
+ kfree(osb->osb_orphan_wipes);
+out_slot_recovery_gen:
+ kfree(osb->slot_recovery_generations);
+out_vol_label:
+ kfree(osb->vol_label);
+out_recovery_map:
+ kfree(osb->recovery_map);
+out:
+ kfree(osb);
+ sb->s_fs_info = NULL;
return status;
}
@@ -2483,6 +2519,12 @@ static void ocfs2_delete_osb(struct ocfs2_super *osb)
kfree(osb->osb_orphan_wipes);
kfree(osb->slot_recovery_generations);
+ /* FIXME
+ * This belongs in journal shutdown, but because we have to
+ * allocate osb->journal at the middle of ocfs2_initialize_super(),
+ * we free it here.
+ */
+ kfree(osb->journal);
kfree(osb->local_alloc_copy);
kfree(osb->uuid_str);
kfree(osb->vol_label);
diff --git a/fs/open.c b/fs/open.c
index be849dcca032..1d57fbde2feb 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -224,6 +224,21 @@ SYSCALL_DEFINE2(ftruncate64, unsigned int, fd, loff_t, length)
}
#endif /* BITS_PER_LONG == 32 */
+#if defined(CONFIG_COMPAT) && defined(__ARCH_WANT_COMPAT_TRUNCATE64)
+COMPAT_SYSCALL_DEFINE3(truncate64, const char __user *, pathname,
+ compat_arg_u64_dual(length))
+{
+ return ksys_truncate(pathname, compat_arg_u64_glue(length));
+}
+#endif
+
+#if defined(CONFIG_COMPAT) && defined(__ARCH_WANT_COMPAT_FTRUNCATE64)
+COMPAT_SYSCALL_DEFINE3(ftruncate64, unsigned int, fd,
+ compat_arg_u64_dual(length))
+{
+ return ksys_ftruncate(fd, compat_arg_u64_glue(length));
+}
+#endif
int vfs_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
{
@@ -339,6 +354,15 @@ SYSCALL_DEFINE4(fallocate, int, fd, int, mode, loff_t, offset, loff_t, len)
return ksys_fallocate(fd, mode, offset, len);
}
+#if defined(CONFIG_COMPAT) && defined(__ARCH_WANT_COMPAT_FALLOCATE)
+COMPAT_SYSCALL_DEFINE6(fallocate, int, fd, int, mode, compat_arg_u64_dual(offset),
+ compat_arg_u64_dual(len))
+{
+ return ksys_fallocate(fd, mode, compat_arg_u64_glue(offset),
+ compat_arg_u64_glue(len));
+}
+#endif
+
/*
* access() needs to use the real uid/gid, not the effective uid/gid.
* We do this by temporarily clearing all FS-related capabilities and
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
index e040970408d4..714ec569d25b 100644
--- a/fs/overlayfs/copy_up.c
+++ b/fs/overlayfs/copy_up.c
@@ -44,9 +44,9 @@ static bool ovl_must_copy_xattr(const char *name)
!strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN);
}
-int ovl_copy_xattr(struct super_block *sb, struct dentry *old,
- struct dentry *new)
+int ovl_copy_xattr(struct super_block *sb, struct path *oldpath, struct dentry *new)
{
+ struct dentry *old = oldpath->dentry;
ssize_t list_size, size, value_size = 0;
char *buf, *name, *value = NULL;
int error = 0;
@@ -94,9 +94,9 @@ int ovl_copy_xattr(struct super_block *sb, struct dentry *old,
continue; /* Discard */
}
retry:
- size = vfs_getxattr(&init_user_ns, old, name, value, value_size);
+ size = ovl_do_getxattr(oldpath, name, value, value_size);
if (size == -ERANGE)
- size = vfs_getxattr(&init_user_ns, old, name, NULL, 0);
+ size = ovl_do_getxattr(oldpath, name, NULL, 0);
if (size < 0) {
error = size;
@@ -117,7 +117,7 @@ retry:
goto retry;
}
- error = vfs_setxattr(&init_user_ns, new, name, value, size, 0);
+ error = ovl_do_setxattr(OVL_FS(sb), new, name, value, size, 0);
if (error) {
if (error != -EOPNOTSUPP || ovl_must_copy_xattr(name))
break;
@@ -292,17 +292,19 @@ out_fput:
return error;
}
-static int ovl_set_size(struct dentry *upperdentry, struct kstat *stat)
+static int ovl_set_size(struct ovl_fs *ofs,
+ struct dentry *upperdentry, struct kstat *stat)
{
struct iattr attr = {
.ia_valid = ATTR_SIZE,
.ia_size = stat->size,
};
- return notify_change(&init_user_ns, upperdentry, &attr, NULL);
+ return ovl_do_notify_change(ofs, upperdentry, &attr);
}
-static int ovl_set_timestamps(struct dentry *upperdentry, struct kstat *stat)
+static int ovl_set_timestamps(struct ovl_fs *ofs, struct dentry *upperdentry,
+ struct kstat *stat)
{
struct iattr attr = {
.ia_valid =
@@ -311,10 +313,11 @@ static int ovl_set_timestamps(struct dentry *upperdentry, struct kstat *stat)
.ia_mtime = stat->mtime,
};
- return notify_change(&init_user_ns, upperdentry, &attr, NULL);
+ return ovl_do_notify_change(ofs, upperdentry, &attr);
}
-int ovl_set_attr(struct dentry *upperdentry, struct kstat *stat)
+int ovl_set_attr(struct ovl_fs *ofs, struct dentry *upperdentry,
+ struct kstat *stat)
{
int err = 0;
@@ -323,7 +326,7 @@ int ovl_set_attr(struct dentry *upperdentry, struct kstat *stat)
.ia_valid = ATTR_MODE,
.ia_mode = stat->mode,
};
- err = notify_change(&init_user_ns, upperdentry, &attr, NULL);
+ err = ovl_do_notify_change(ofs, upperdentry, &attr);
}
if (!err) {
struct iattr attr = {
@@ -331,10 +334,10 @@ int ovl_set_attr(struct dentry *upperdentry, struct kstat *stat)
.ia_uid = stat->uid,
.ia_gid = stat->gid,
};
- err = notify_change(&init_user_ns, upperdentry, &attr, NULL);
+ err = ovl_do_notify_change(ofs, upperdentry, &attr);
}
if (!err)
- ovl_set_timestamps(upperdentry, stat);
+ ovl_set_timestamps(ofs, upperdentry, stat);
return err;
}
@@ -433,7 +436,7 @@ static int ovl_set_upper_fh(struct ovl_fs *ofs, struct dentry *upper,
if (IS_ERR(fh))
return PTR_ERR(fh);
- err = ovl_do_setxattr(ofs, index, OVL_XATTR_UPPER, fh->buf, fh->fb.len);
+ err = ovl_setxattr(ofs, index, OVL_XATTR_UPPER, fh->buf, fh->fb.len);
kfree(fh);
return err;
@@ -474,7 +477,7 @@ static int ovl_create_index(struct dentry *dentry, struct dentry *origin,
if (err)
return err;
- temp = ovl_create_temp(indexdir, OVL_CATTR(S_IFDIR | 0));
+ temp = ovl_create_temp(ofs, indexdir, OVL_CATTR(S_IFDIR | 0));
err = PTR_ERR(temp);
if (IS_ERR(temp))
goto free_name;
@@ -483,16 +486,16 @@ static int ovl_create_index(struct dentry *dentry, struct dentry *origin,
if (err)
goto out;
- index = lookup_one_len(name.name, indexdir, name.len);
+ index = ovl_lookup_upper(ofs, name.name, indexdir, name.len);
if (IS_ERR(index)) {
err = PTR_ERR(index);
} else {
- err = ovl_do_rename(dir, temp, dir, index, 0);
+ err = ovl_do_rename(ofs, dir, temp, dir, index, 0);
dput(index);
}
out:
if (err)
- ovl_cleanup(dir, temp);
+ ovl_cleanup(ofs, dir, temp);
dput(temp);
free_name:
kfree(name.name);
@@ -519,6 +522,7 @@ static int ovl_link_up(struct ovl_copy_up_ctx *c)
int err;
struct dentry *upper;
struct dentry *upperdir = ovl_dentry_upper(c->parent);
+ struct ovl_fs *ofs = OVL_FS(c->dentry->d_sb);
struct inode *udir = d_inode(upperdir);
/* Mark parent "impure" because it may now contain non-pure upper */
@@ -531,16 +535,16 @@ static int ovl_link_up(struct ovl_copy_up_ctx *c)
return err;
inode_lock_nested(udir, I_MUTEX_PARENT);
- upper = lookup_one_len(c->dentry->d_name.name, upperdir,
- c->dentry->d_name.len);
+ upper = ovl_lookup_upper(ofs, c->dentry->d_name.name, upperdir,
+ c->dentry->d_name.len);
err = PTR_ERR(upper);
if (!IS_ERR(upper)) {
- err = ovl_do_link(ovl_dentry_upper(c->dentry), udir, upper);
+ err = ovl_do_link(ofs, ovl_dentry_upper(c->dentry), udir, upper);
dput(upper);
if (!err) {
/* Restore timestamps on parent (best effort) */
- ovl_set_timestamps(upperdir, &c->pstat);
+ ovl_set_timestamps(ofs, upperdir, &c->pstat);
ovl_dentry_set_upper_alias(c->dentry);
}
}
@@ -578,7 +582,7 @@ static int ovl_copy_up_inode(struct ovl_copy_up_ctx *c, struct dentry *temp)
return err;
}
- err = ovl_copy_xattr(c->dentry->d_sb, c->lowerpath.dentry, temp);
+ err = ovl_copy_xattr(c->dentry->d_sb, &c->lowerpath, temp);
if (err)
return err;
@@ -614,9 +618,9 @@ static int ovl_copy_up_inode(struct ovl_copy_up_ctx *c, struct dentry *temp)
inode_lock(temp->d_inode);
if (S_ISREG(c->stat.mode))
- err = ovl_set_size(temp, &c->stat);
+ err = ovl_set_size(ofs, temp, &c->stat);
if (!err)
- err = ovl_set_attr(temp, &c->stat);
+ err = ovl_set_attr(ofs, temp, &c->stat);
inode_unlock(temp->d_inode);
return err;
@@ -656,6 +660,7 @@ static void ovl_revert_cu_creds(struct ovl_cu_creds *cc)
*/
static int ovl_copy_up_workdir(struct ovl_copy_up_ctx *c)
{
+ struct ovl_fs *ofs = OVL_FS(c->dentry->d_sb);
struct inode *inode;
struct inode *udir = d_inode(c->destdir), *wdir = d_inode(c->workdir);
struct dentry *temp, *upper;
@@ -677,7 +682,7 @@ static int ovl_copy_up_workdir(struct ovl_copy_up_ctx *c)
if (err)
goto unlock;
- temp = ovl_create_temp(c->workdir, &cattr);
+ temp = ovl_create_temp(ofs, c->workdir, &cattr);
ovl_revert_cu_creds(&cc);
err = PTR_ERR(temp);
@@ -694,12 +699,13 @@ static int ovl_copy_up_workdir(struct ovl_copy_up_ctx *c)
goto cleanup;
}
- upper = lookup_one_len(c->destname.name, c->destdir, c->destname.len);
+ upper = ovl_lookup_upper(ofs, c->destname.name, c->destdir,
+ c->destname.len);
err = PTR_ERR(upper);
if (IS_ERR(upper))
goto cleanup;
- err = ovl_do_rename(wdir, temp, udir, upper, 0);
+ err = ovl_do_rename(ofs, wdir, temp, udir, upper, 0);
dput(upper);
if (err)
goto cleanup;
@@ -716,7 +722,7 @@ unlock:
return err;
cleanup:
- ovl_cleanup(wdir, temp);
+ ovl_cleanup(ofs, wdir, temp);
dput(temp);
goto unlock;
}
@@ -724,6 +730,7 @@ cleanup:
/* Copyup using O_TMPFILE which does not require cross dir locking */
static int ovl_copy_up_tmpfile(struct ovl_copy_up_ctx *c)
{
+ struct ovl_fs *ofs = OVL_FS(c->dentry->d_sb);
struct inode *udir = d_inode(c->destdir);
struct dentry *temp, *upper;
struct ovl_cu_creds cc;
@@ -733,7 +740,7 @@ static int ovl_copy_up_tmpfile(struct ovl_copy_up_ctx *c)
if (err)
return err;
- temp = ovl_do_tmpfile(c->workdir, c->stat.mode);
+ temp = ovl_do_tmpfile(ofs, c->workdir, c->stat.mode);
ovl_revert_cu_creds(&cc);
if (IS_ERR(temp))
@@ -745,10 +752,11 @@ static int ovl_copy_up_tmpfile(struct ovl_copy_up_ctx *c)
inode_lock_nested(udir, I_MUTEX_PARENT);
- upper = lookup_one_len(c->destname.name, c->destdir, c->destname.len);
+ upper = ovl_lookup_upper(ofs, c->destname.name, c->destdir,
+ c->destname.len);
err = PTR_ERR(upper);
if (!IS_ERR(upper)) {
- err = ovl_do_link(temp, udir, upper);
+ err = ovl_do_link(ofs, temp, udir, upper);
dput(upper);
}
inode_unlock(udir);
@@ -836,7 +844,7 @@ static int ovl_do_copy_up(struct ovl_copy_up_ctx *c)
/* Restore timestamps on parent (best effort) */
inode_lock(udir);
- ovl_set_timestamps(c->destdir, &c->pstat);
+ ovl_set_timestamps(ofs, c->destdir, &c->pstat);
inode_unlock(udir);
ovl_dentry_set_upper_alias(c->dentry);
@@ -865,12 +873,12 @@ static bool ovl_need_meta_copy_up(struct dentry *dentry, umode_t mode,
return true;
}
-static ssize_t ovl_getxattr(struct dentry *dentry, char *name, char **value)
+static ssize_t ovl_getxattr_value(struct path *path, char *name, char **value)
{
ssize_t res;
char *buf;
- res = vfs_getxattr(&init_user_ns, dentry, name, NULL, 0);
+ res = ovl_do_getxattr(path, name, NULL, 0);
if (res == -ENODATA || res == -EOPNOTSUPP)
res = 0;
@@ -879,7 +887,7 @@ static ssize_t ovl_getxattr(struct dentry *dentry, char *name, char **value)
if (!buf)
return -ENOMEM;
- res = vfs_getxattr(&init_user_ns, dentry, name, buf, res);
+ res = ovl_do_getxattr(path, name, buf, res);
if (res < 0)
kfree(buf);
else
@@ -906,8 +914,8 @@ static int ovl_copy_up_meta_inode_data(struct ovl_copy_up_ctx *c)
return -EIO;
if (c->stat.size) {
- err = cap_size = ovl_getxattr(upperpath.dentry, XATTR_NAME_CAPS,
- &capability);
+ err = cap_size = ovl_getxattr_value(&upperpath, XATTR_NAME_CAPS,
+ &capability);
if (cap_size < 0)
goto out;
}
@@ -921,14 +929,14 @@ static int ovl_copy_up_meta_inode_data(struct ovl_copy_up_ctx *c)
* don't want that to happen for normal copy-up operation.
*/
if (capability) {
- err = vfs_setxattr(&init_user_ns, upperpath.dentry,
- XATTR_NAME_CAPS, capability, cap_size, 0);
+ err = ovl_do_setxattr(ofs, upperpath.dentry, XATTR_NAME_CAPS,
+ capability, cap_size, 0);
if (err)
goto out_free;
}
- err = ovl_do_removexattr(ofs, upperpath.dentry, OVL_XATTR_METACOPY);
+ err = ovl_removexattr(ofs, upperpath.dentry, OVL_XATTR_METACOPY);
if (err)
goto out_free;
diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
index f18490813170..6b03457f72bb 100644
--- a/fs/overlayfs/dir.c
+++ b/fs/overlayfs/dir.c
@@ -23,15 +23,15 @@ MODULE_PARM_DESC(redirect_max,
static int ovl_set_redirect(struct dentry *dentry, bool samedir);
-int ovl_cleanup(struct inode *wdir, struct dentry *wdentry)
+int ovl_cleanup(struct ovl_fs *ofs, struct inode *wdir, struct dentry *wdentry)
{
int err;
dget(wdentry);
if (d_is_dir(wdentry))
- err = ovl_do_rmdir(wdir, wdentry);
+ err = ovl_do_rmdir(ofs, wdir, wdentry);
else
- err = ovl_do_unlink(wdir, wdentry);
+ err = ovl_do_unlink(ofs, wdir, wdentry);
dput(wdentry);
if (err) {
@@ -42,7 +42,7 @@ int ovl_cleanup(struct inode *wdir, struct dentry *wdentry)
return err;
}
-struct dentry *ovl_lookup_temp(struct dentry *workdir)
+struct dentry *ovl_lookup_temp(struct ovl_fs *ofs, struct dentry *workdir)
{
struct dentry *temp;
char name[20];
@@ -51,7 +51,7 @@ struct dentry *ovl_lookup_temp(struct dentry *workdir)
/* counter is allowed to wrap, since temp dentries are ephemeral */
snprintf(name, sizeof(name), "#%x", atomic_inc_return(&temp_id));
- temp = lookup_one_len(name, workdir, strlen(name));
+ temp = ovl_lookup_upper(ofs, name, workdir, strlen(name));
if (!IS_ERR(temp) && temp->d_inode) {
pr_err("workdir/%s already exists\n", name);
dput(temp);
@@ -70,11 +70,11 @@ static struct dentry *ovl_whiteout(struct ovl_fs *ofs)
struct inode *wdir = workdir->d_inode;
if (!ofs->whiteout) {
- whiteout = ovl_lookup_temp(workdir);
+ whiteout = ovl_lookup_temp(ofs, workdir);
if (IS_ERR(whiteout))
goto out;
- err = ovl_do_whiteout(wdir, whiteout);
+ err = ovl_do_whiteout(ofs, wdir, whiteout);
if (err) {
dput(whiteout);
whiteout = ERR_PTR(err);
@@ -84,11 +84,11 @@ static struct dentry *ovl_whiteout(struct ovl_fs *ofs)
}
if (ofs->share_whiteout) {
- whiteout = ovl_lookup_temp(workdir);
+ whiteout = ovl_lookup_temp(ofs, workdir);
if (IS_ERR(whiteout))
goto out;
- err = ovl_do_link(ofs->whiteout, wdir, whiteout);
+ err = ovl_do_link(ofs, ofs->whiteout, wdir, whiteout);
if (!err)
goto out;
@@ -122,27 +122,28 @@ int ovl_cleanup_and_whiteout(struct ovl_fs *ofs, struct inode *dir,
if (d_is_dir(dentry))
flags = RENAME_EXCHANGE;
- err = ovl_do_rename(wdir, whiteout, dir, dentry, flags);
+ err = ovl_do_rename(ofs, wdir, whiteout, dir, dentry, flags);
if (err)
goto kill_whiteout;
if (flags)
- ovl_cleanup(wdir, dentry);
+ ovl_cleanup(ofs, wdir, dentry);
out:
dput(whiteout);
return err;
kill_whiteout:
- ovl_cleanup(wdir, whiteout);
+ ovl_cleanup(ofs, wdir, whiteout);
goto out;
}
-int ovl_mkdir_real(struct inode *dir, struct dentry **newdentry, umode_t mode)
+int ovl_mkdir_real(struct ovl_fs *ofs, struct inode *dir,
+ struct dentry **newdentry, umode_t mode)
{
int err;
struct dentry *d, *dentry = *newdentry;
- err = ovl_do_mkdir(dir, dentry, mode);
+ err = ovl_do_mkdir(ofs, dir, dentry, mode);
if (err)
return err;
@@ -154,8 +155,8 @@ int ovl_mkdir_real(struct inode *dir, struct dentry **newdentry, umode_t mode)
* to it unhashed and negative. If that happens, try to
* lookup a new hashed and positive dentry.
*/
- d = lookup_one_len(dentry->d_name.name, dentry->d_parent,
- dentry->d_name.len);
+ d = ovl_lookup_upper(ofs, dentry->d_name.name, dentry->d_parent,
+ dentry->d_name.len);
if (IS_ERR(d)) {
pr_warn("failed lookup after mkdir (%pd2, err=%i).\n",
dentry, err);
@@ -167,8 +168,8 @@ int ovl_mkdir_real(struct inode *dir, struct dentry **newdentry, umode_t mode)
return 0;
}
-struct dentry *ovl_create_real(struct inode *dir, struct dentry *newdentry,
- struct ovl_cattr *attr)
+struct dentry *ovl_create_real(struct ovl_fs *ofs, struct inode *dir,
+ struct dentry *newdentry, struct ovl_cattr *attr)
{
int err;
@@ -180,28 +181,28 @@ struct dentry *ovl_create_real(struct inode *dir, struct dentry *newdentry,
goto out;
if (attr->hardlink) {
- err = ovl_do_link(attr->hardlink, dir, newdentry);
+ err = ovl_do_link(ofs, attr->hardlink, dir, newdentry);
} else {
switch (attr->mode & S_IFMT) {
case S_IFREG:
- err = ovl_do_create(dir, newdentry, attr->mode);
+ err = ovl_do_create(ofs, dir, newdentry, attr->mode);
break;
case S_IFDIR:
/* mkdir is special... */
- err = ovl_mkdir_real(dir, &newdentry, attr->mode);
+ err = ovl_mkdir_real(ofs, dir, &newdentry, attr->mode);
break;
case S_IFCHR:
case S_IFBLK:
case S_IFIFO:
case S_IFSOCK:
- err = ovl_do_mknod(dir, newdentry, attr->mode,
+ err = ovl_do_mknod(ofs, dir, newdentry, attr->mode,
attr->rdev);
break;
case S_IFLNK:
- err = ovl_do_symlink(dir, newdentry, attr->link);
+ err = ovl_do_symlink(ofs, dir, newdentry, attr->link);
break;
default:
@@ -223,10 +224,11 @@ out:
return newdentry;
}
-struct dentry *ovl_create_temp(struct dentry *workdir, struct ovl_cattr *attr)
+struct dentry *ovl_create_temp(struct ovl_fs *ofs, struct dentry *workdir,
+ struct ovl_cattr *attr)
{
- return ovl_create_real(d_inode(workdir), ovl_lookup_temp(workdir),
- attr);
+ return ovl_create_real(ofs, d_inode(workdir),
+ ovl_lookup_temp(ofs, workdir), attr);
}
static int ovl_set_opaque_xerr(struct dentry *dentry, struct dentry *upper,
@@ -330,10 +332,9 @@ static int ovl_create_upper(struct dentry *dentry, struct inode *inode,
attr->mode &= ~current_umask();
inode_lock_nested(udir, I_MUTEX_PARENT);
- newdentry = ovl_create_real(udir,
- lookup_one_len(dentry->d_name.name,
- upperdir,
- dentry->d_name.len),
+ newdentry = ovl_create_real(ofs, udir,
+ ovl_lookup_upper(ofs, dentry->d_name.name,
+ upperdir, dentry->d_name.len),
attr);
err = PTR_ERR(newdentry);
if (IS_ERR(newdentry))
@@ -353,7 +354,7 @@ out_unlock:
return err;
out_cleanup:
- ovl_cleanup(udir, newdentry);
+ ovl_cleanup(ofs, udir, newdentry);
dput(newdentry);
goto out_unlock;
}
@@ -361,6 +362,7 @@ out_cleanup:
static struct dentry *ovl_clear_empty(struct dentry *dentry,
struct list_head *list)
{
+ struct ovl_fs *ofs = OVL_FS(dentry->d_sb);
struct dentry *workdir = ovl_workdir(dentry);
struct inode *wdir = workdir->d_inode;
struct dentry *upperdir = ovl_dentry_upper(dentry->d_parent);
@@ -391,12 +393,12 @@ static struct dentry *ovl_clear_empty(struct dentry *dentry,
if (upper->d_parent->d_inode != udir)
goto out_unlock;
- opaquedir = ovl_create_temp(workdir, OVL_CATTR(stat.mode));
+ opaquedir = ovl_create_temp(ofs, workdir, OVL_CATTR(stat.mode));
err = PTR_ERR(opaquedir);
if (IS_ERR(opaquedir))
goto out_unlock;
- err = ovl_copy_xattr(dentry->d_sb, upper, opaquedir);
+ err = ovl_copy_xattr(dentry->d_sb, &upperpath, opaquedir);
if (err)
goto out_cleanup;
@@ -405,17 +407,17 @@ static struct dentry *ovl_clear_empty(struct dentry *dentry,
goto out_cleanup;
inode_lock(opaquedir->d_inode);
- err = ovl_set_attr(opaquedir, &stat);
+ err = ovl_set_attr(ofs, opaquedir, &stat);
inode_unlock(opaquedir->d_inode);
if (err)
goto out_cleanup;
- err = ovl_do_rename(wdir, opaquedir, udir, upper, RENAME_EXCHANGE);
+ err = ovl_do_rename(ofs, wdir, opaquedir, udir, upper, RENAME_EXCHANGE);
if (err)
goto out_cleanup;
- ovl_cleanup_whiteouts(upper, list);
- ovl_cleanup(wdir, upper);
+ ovl_cleanup_whiteouts(ofs, upper, list);
+ ovl_cleanup(ofs, wdir, upper);
unlock_rename(workdir, upperdir);
/* dentry's upper doesn't match now, get rid of it */
@@ -424,7 +426,7 @@ static struct dentry *ovl_clear_empty(struct dentry *dentry,
return opaquedir;
out_cleanup:
- ovl_cleanup(wdir, opaquedir);
+ ovl_cleanup(ofs, wdir, opaquedir);
dput(opaquedir);
out_unlock:
unlock_rename(workdir, upperdir);
@@ -432,8 +434,8 @@ out:
return ERR_PTR(err);
}
-static int ovl_set_upper_acl(struct dentry *upperdentry, const char *name,
- const struct posix_acl *acl)
+static int ovl_set_upper_acl(struct ovl_fs *ofs, struct dentry *upperdentry,
+ const char *name, const struct posix_acl *acl)
{
void *buffer;
size_t size;
@@ -451,7 +453,7 @@ static int ovl_set_upper_acl(struct dentry *upperdentry, const char *name,
if (err < 0)
goto out_free;
- err = vfs_setxattr(&init_user_ns, upperdentry, name, buffer, size, XATTR_CREATE);
+ err = ovl_do_setxattr(ofs, upperdentry, name, buffer, size, XATTR_CREATE);
out_free:
kfree(buffer);
return err;
@@ -460,6 +462,7 @@ out_free:
static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode,
struct ovl_cattr *cattr)
{
+ struct ovl_fs *ofs = OVL_FS(dentry->d_sb);
struct dentry *workdir = ovl_workdir(dentry);
struct inode *wdir = workdir->d_inode;
struct dentry *upperdir = ovl_dentry_upper(dentry->d_parent);
@@ -484,8 +487,8 @@ static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode,
if (err)
goto out;
- upper = lookup_one_len(dentry->d_name.name, upperdir,
- dentry->d_name.len);
+ upper = ovl_lookup_upper(ofs, dentry->d_name.name, upperdir,
+ dentry->d_name.len);
err = PTR_ERR(upper);
if (IS_ERR(upper))
goto out_unlock;
@@ -494,7 +497,7 @@ static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode,
if (d_is_negative(upper) || !IS_WHITEOUT(d_inode(upper)))
goto out_dput;
- newdentry = ovl_create_temp(workdir, cattr);
+ newdentry = ovl_create_temp(ofs, workdir, cattr);
err = PTR_ERR(newdentry);
if (IS_ERR(newdentry))
goto out_dput;
@@ -510,19 +513,19 @@ static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode,
.ia_mode = cattr->mode,
};
inode_lock(newdentry->d_inode);
- err = notify_change(&init_user_ns, newdentry, &attr, NULL);
+ err = ovl_do_notify_change(ofs, newdentry, &attr);
inode_unlock(newdentry->d_inode);
if (err)
goto out_cleanup;
}
if (!hardlink) {
- err = ovl_set_upper_acl(newdentry, XATTR_NAME_POSIX_ACL_ACCESS,
- acl);
+ err = ovl_set_upper_acl(ofs, newdentry,
+ XATTR_NAME_POSIX_ACL_ACCESS, acl);
if (err)
goto out_cleanup;
- err = ovl_set_upper_acl(newdentry, XATTR_NAME_POSIX_ACL_DEFAULT,
- default_acl);
+ err = ovl_set_upper_acl(ofs, newdentry,
+ XATTR_NAME_POSIX_ACL_DEFAULT, default_acl);
if (err)
goto out_cleanup;
}
@@ -532,20 +535,20 @@ static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode,
if (err)
goto out_cleanup;
- err = ovl_do_rename(wdir, newdentry, udir, upper,
+ err = ovl_do_rename(ofs, wdir, newdentry, udir, upper,
RENAME_EXCHANGE);
if (err)
goto out_cleanup;
- ovl_cleanup(wdir, upper);
+ ovl_cleanup(ofs, wdir, upper);
} else {
- err = ovl_do_rename(wdir, newdentry, udir, upper, 0);
+ err = ovl_do_rename(ofs, wdir, newdentry, udir, upper, 0);
if (err)
goto out_cleanup;
}
err = ovl_instantiate(dentry, inode, newdentry, hardlink);
if (err) {
- ovl_cleanup(udir, newdentry);
+ ovl_cleanup(ofs, udir, newdentry);
dput(newdentry);
}
out_dput:
@@ -560,7 +563,7 @@ out:
return err;
out_cleanup:
- ovl_cleanup(wdir, newdentry);
+ ovl_cleanup(ofs, wdir, newdentry);
dput(newdentry);
goto out_dput;
}
@@ -767,8 +770,8 @@ static int ovl_remove_and_whiteout(struct dentry *dentry,
if (err)
goto out_dput;
- upper = lookup_one_len(dentry->d_name.name, upperdir,
- dentry->d_name.len);
+ upper = ovl_lookup_upper(ofs, dentry->d_name.name, upperdir,
+ dentry->d_name.len);
err = PTR_ERR(upper);
if (IS_ERR(upper))
goto out_unlock;
@@ -800,6 +803,7 @@ out:
static int ovl_remove_upper(struct dentry *dentry, bool is_dir,
struct list_head *list)
{
+ struct ovl_fs *ofs = OVL_FS(dentry->d_sb);
struct dentry *upperdir = ovl_dentry_upper(dentry->d_parent);
struct inode *dir = upperdir->d_inode;
struct dentry *upper;
@@ -814,8 +818,8 @@ static int ovl_remove_upper(struct dentry *dentry, bool is_dir,
}
inode_lock_nested(dir, I_MUTEX_PARENT);
- upper = lookup_one_len(dentry->d_name.name, upperdir,
- dentry->d_name.len);
+ upper = ovl_lookup_upper(ofs, dentry->d_name.name, upperdir,
+ dentry->d_name.len);
err = PTR_ERR(upper);
if (IS_ERR(upper))
goto out_unlock;
@@ -826,9 +830,9 @@ static int ovl_remove_upper(struct dentry *dentry, bool is_dir,
goto out_dput_upper;
if (is_dir)
- err = vfs_rmdir(&init_user_ns, dir, upper);
+ err = ovl_do_rmdir(ofs, dir, upper);
else
- err = vfs_unlink(&init_user_ns, dir, upper, NULL);
+ err = ovl_do_unlink(ofs, dir, upper);
ovl_dir_modified(dentry->d_parent, ovl_type_origin(dentry));
/*
@@ -880,7 +884,6 @@ static int ovl_do_remove(struct dentry *dentry, bool is_dir)
{
int err;
const struct cred *old_cred;
- struct dentry *upperdentry;
bool lower_positive = ovl_lower_positive(dentry);
LIST_HEAD(list);
@@ -923,9 +926,8 @@ static int ovl_do_remove(struct dentry *dentry, bool is_dir)
* Note: we fail to update ctime if there was no copy-up, only a
* whiteout
*/
- upperdentry = ovl_dentry_upper(dentry);
- if (upperdentry)
- ovl_copyattr(d_inode(upperdentry), d_inode(dentry));
+ if (ovl_dentry_upper(dentry))
+ ovl_copyattr(d_inode(dentry));
out_drop_write:
ovl_drop_write(dentry);
@@ -1095,6 +1097,7 @@ static int ovl_rename(struct user_namespace *mnt_userns, struct inode *olddir,
bool samedir = olddir == newdir;
struct dentry *opaquedir = NULL;
const struct cred *old_cred = NULL;
+ struct ovl_fs *ofs = OVL_FS(old->d_sb);
LIST_HEAD(list);
err = -EINVAL;
@@ -1189,8 +1192,8 @@ static int ovl_rename(struct user_namespace *mnt_userns, struct inode *olddir,
trap = lock_rename(new_upperdir, old_upperdir);
- olddentry = lookup_one_len(old->d_name.name, old_upperdir,
- old->d_name.len);
+ olddentry = ovl_lookup_upper(ofs, old->d_name.name, old_upperdir,
+ old->d_name.len);
err = PTR_ERR(olddentry);
if (IS_ERR(olddentry))
goto out_unlock;
@@ -1199,8 +1202,8 @@ static int ovl_rename(struct user_namespace *mnt_userns, struct inode *olddir,
if (!ovl_matches_upper(old, olddentry))
goto out_dput_old;
- newdentry = lookup_one_len(new->d_name.name, new_upperdir,
- new->d_name.len);
+ newdentry = ovl_lookup_upper(ofs, new->d_name.name, new_upperdir,
+ new->d_name.len);
err = PTR_ERR(newdentry);
if (IS_ERR(newdentry))
goto out_dput_old;
@@ -1251,13 +1254,13 @@ static int ovl_rename(struct user_namespace *mnt_userns, struct inode *olddir,
if (err)
goto out_dput;
- err = ovl_do_rename(old_upperdir->d_inode, olddentry,
+ err = ovl_do_rename(ofs, old_upperdir->d_inode, olddentry,
new_upperdir->d_inode, newdentry, flags);
if (err)
goto out_dput;
if (cleanup_whiteout)
- ovl_cleanup(old_upperdir->d_inode, newdentry);
+ ovl_cleanup(ofs, old_upperdir->d_inode, newdentry);
if (overwrite && d_inode(new)) {
if (new_is_dir)
@@ -1272,9 +1275,9 @@ static int ovl_rename(struct user_namespace *mnt_userns, struct inode *olddir,
(d_inode(new) && ovl_type_origin(new)));
/* copy ctime: */
- ovl_copyattr(d_inode(olddentry), d_inode(old));
+ ovl_copyattr(d_inode(old));
if (d_inode(new) && ovl_dentry_upper(new))
- ovl_copyattr(d_inode(newdentry), d_inode(new));
+ ovl_copyattr(d_inode(new));
out_dput:
dput(newdentry);
diff --git a/fs/overlayfs/export.c b/fs/overlayfs/export.c
index ebde05c9cf62..2eada97bbd23 100644
--- a/fs/overlayfs/export.c
+++ b/fs/overlayfs/export.c
@@ -391,6 +391,11 @@ static struct dentry *ovl_lookup_real_one(struct dentry *connected,
* pointer because we hold no lock on the real dentry.
*/
take_dentry_name_snapshot(&name, real);
+ /*
+ * No mnt_userns handling here: it's an internal lookup. Could skip
+ * permission checking altogether, but for now just use non-mnt_userns
+ * transformed ids.
+ */
this = lookup_one_len(name.name.name, connected, name.name.len);
release_dentry_name_snapshot(&name);
err = PTR_ERR(this);
diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c
index 9d69b4dbb8c4..daff601b5c41 100644
--- a/fs/overlayfs/file.c
+++ b/fs/overlayfs/file.c
@@ -38,9 +38,11 @@ static char ovl_whatisit(struct inode *inode, struct inode *realinode)
#define OVL_OPEN_FLAGS (O_NOATIME | FMODE_NONOTIFY)
static struct file *ovl_open_realfile(const struct file *file,
- struct inode *realinode)
+ struct path *realpath)
{
+ struct inode *realinode = d_inode(realpath->dentry);
struct inode *inode = file_inode(file);
+ struct user_namespace *real_mnt_userns;
struct file *realfile;
const struct cred *old_cred;
int flags = file->f_flags | OVL_OPEN_FLAGS;
@@ -51,11 +53,12 @@ static struct file *ovl_open_realfile(const struct file *file,
acc_mode |= MAY_APPEND;
old_cred = ovl_override_creds(inode->i_sb);
- err = inode_permission(&init_user_ns, realinode, MAY_OPEN | acc_mode);
+ real_mnt_userns = mnt_user_ns(realpath->mnt);
+ err = inode_permission(real_mnt_userns, realinode, MAY_OPEN | acc_mode);
if (err) {
realfile = ERR_PTR(err);
} else {
- if (!inode_owner_or_capable(&init_user_ns, realinode))
+ if (!inode_owner_or_capable(real_mnt_userns, realinode))
flags &= ~O_NOATIME;
realfile = open_with_fake_path(&file->f_path, flags, realinode,
@@ -101,21 +104,21 @@ static int ovl_change_flags(struct file *file, unsigned int flags)
static int ovl_real_fdget_meta(const struct file *file, struct fd *real,
bool allow_meta)
{
- struct inode *inode = file_inode(file);
- struct inode *realinode;
+ struct dentry *dentry = file_dentry(file);
+ struct path realpath;
real->flags = 0;
real->file = file->private_data;
if (allow_meta)
- realinode = ovl_inode_real(inode);
+ ovl_path_real(dentry, &realpath);
else
- realinode = ovl_inode_realdata(inode);
+ ovl_path_realdata(dentry, &realpath);
/* Has it been copied up since we'd opened it? */
- if (unlikely(file_inode(real->file) != realinode)) {
+ if (unlikely(file_inode(real->file) != d_inode(realpath.dentry))) {
real->flags = FDPUT_FPUT;
- real->file = ovl_open_realfile(file, realinode);
+ real->file = ovl_open_realfile(file, &realpath);
return PTR_ERR_OR_ZERO(real->file);
}
@@ -141,17 +144,20 @@ static int ovl_real_fdget(const struct file *file, struct fd *real)
static int ovl_open(struct inode *inode, struct file *file)
{
+ struct dentry *dentry = file_dentry(file);
struct file *realfile;
+ struct path realpath;
int err;
- err = ovl_maybe_copy_up(file_dentry(file), file->f_flags);
+ err = ovl_maybe_copy_up(dentry, file->f_flags);
if (err)
return err;
/* No longer need these flags, so don't pass them on to underlying fs */
file->f_flags &= ~(O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC);
- realfile = ovl_open_realfile(file, ovl_inode_realdata(inode));
+ ovl_path_realdata(dentry, &realpath);
+ realfile = ovl_open_realfile(file, &realpath);
if (IS_ERR(realfile))
return PTR_ERR(realfile);
@@ -270,7 +276,7 @@ static void ovl_aio_cleanup_handler(struct ovl_aio_req *aio_req)
__sb_writers_acquired(file_inode(iocb->ki_filp)->i_sb,
SB_FREEZE_WRITE);
file_end_write(iocb->ki_filp);
- ovl_copyattr(ovl_inode_real(inode), inode);
+ ovl_copyattr(inode);
}
orig_iocb->ki_pos = iocb->ki_pos;
@@ -352,7 +358,7 @@ static ssize_t ovl_write_iter(struct kiocb *iocb, struct iov_iter *iter)
inode_lock(inode);
/* Update mode */
- ovl_copyattr(ovl_inode_real(inode), inode);
+ ovl_copyattr(inode);
ret = file_remove_privs(file);
if (ret)
goto out_unlock;
@@ -376,7 +382,7 @@ static ssize_t ovl_write_iter(struct kiocb *iocb, struct iov_iter *iter)
ovl_iocb_to_rwf(ifl));
file_end_write(real.file);
/* Update size */
- ovl_copyattr(ovl_inode_real(inode), inode);
+ ovl_copyattr(inode);
} else {
struct ovl_aio_req *aio_req;
@@ -426,12 +432,11 @@ static ssize_t ovl_splice_write(struct pipe_inode_info *pipe, struct file *out,
struct fd real;
const struct cred *old_cred;
struct inode *inode = file_inode(out);
- struct inode *realinode = ovl_inode_real(inode);
ssize_t ret;
inode_lock(inode);
/* Update mode */
- ovl_copyattr(realinode, inode);
+ ovl_copyattr(inode);
ret = file_remove_privs(out);
if (ret)
goto out_unlock;
@@ -447,7 +452,7 @@ static ssize_t ovl_splice_write(struct pipe_inode_info *pipe, struct file *out,
file_end_write(real.file);
/* Update size */
- ovl_copyattr(realinode, inode);
+ ovl_copyattr(inode);
revert_creds(old_cred);
fdput(real);
@@ -521,7 +526,7 @@ static long ovl_fallocate(struct file *file, int mode, loff_t offset, loff_t len
revert_creds(old_cred);
/* Update size */
- ovl_copyattr(ovl_inode_real(inode), inode);
+ ovl_copyattr(inode);
fdput(real);
@@ -593,7 +598,7 @@ static loff_t ovl_copyfile(struct file *file_in, loff_t pos_in,
revert_creds(old_cred);
/* Update size */
- ovl_copyattr(ovl_inode_real(inode_out), inode_out);
+ ovl_copyattr(inode_out);
fdput(real_in);
fdput(real_out);
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index 1f36158c7dbe..492eddeb481f 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -21,6 +21,7 @@ int ovl_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
struct iattr *attr)
{
int err;
+ struct ovl_fs *ofs = OVL_FS(dentry->d_sb);
bool full_copy_up = false;
struct dentry *upperdentry;
const struct cred *old_cred;
@@ -77,10 +78,10 @@ int ovl_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
inode_lock(upperdentry->d_inode);
old_cred = ovl_override_creds(dentry->d_sb);
- err = notify_change(&init_user_ns, upperdentry, attr, NULL);
+ err = ovl_do_notify_change(ofs, upperdentry, attr);
revert_creds(old_cred);
if (!err)
- ovl_copyattr(upperdentry->d_inode, dentry->d_inode);
+ ovl_copyattr(dentry->d_inode);
inode_unlock(upperdentry->d_inode);
if (winode)
@@ -279,12 +280,14 @@ int ovl_permission(struct user_namespace *mnt_userns,
struct inode *inode, int mask)
{
struct inode *upperinode = ovl_inode_upper(inode);
- struct inode *realinode = upperinode ?: ovl_inode_lower(inode);
+ struct inode *realinode;
+ struct path realpath;
const struct cred *old_cred;
int err;
/* Careful in RCU walk mode */
- if (!realinode) {
+ ovl_i_path_real(inode, &realpath);
+ if (!realpath.dentry) {
WARN_ON(!(mask & MAY_NOT_BLOCK));
return -ECHILD;
}
@@ -297,6 +300,7 @@ int ovl_permission(struct user_namespace *mnt_userns,
if (err)
return err;
+ realinode = d_inode(realpath.dentry);
old_cred = ovl_override_creds(inode->i_sb);
if (!upperinode &&
!special_file(realinode->i_mode) && mask & MAY_WRITE) {
@@ -304,7 +308,7 @@ int ovl_permission(struct user_namespace *mnt_userns,
/* Make sure mounter can read file for copy up later */
mask |= MAY_READ;
}
- err = inode_permission(&init_user_ns, realinode, mask);
+ err = inode_permission(mnt_user_ns(realpath.mnt), realinode, mask);
revert_creds(old_cred);
return err;
@@ -342,8 +346,10 @@ int ovl_xattr_set(struct dentry *dentry, struct inode *inode, const char *name,
const void *value, size_t size, int flags)
{
int err;
+ struct ovl_fs *ofs = OVL_FS(dentry->d_sb);
struct dentry *upperdentry = ovl_i_dentry_upper(inode);
struct dentry *realdentry = upperdentry ?: ovl_dentry_lower(dentry);
+ struct path realpath;
const struct cred *old_cred;
err = ovl_want_write(dentry);
@@ -351,8 +357,9 @@ int ovl_xattr_set(struct dentry *dentry, struct inode *inode, const char *name,
goto out;
if (!value && !upperdentry) {
+ ovl_path_lower(dentry, &realpath);
old_cred = ovl_override_creds(dentry->d_sb);
- err = vfs_getxattr(&init_user_ns, realdentry, name, NULL, 0);
+ err = vfs_getxattr(mnt_user_ns(realpath.mnt), realdentry, name, NULL, 0);
revert_creds(old_cred);
if (err < 0)
goto out_drop_write;
@@ -367,17 +374,17 @@ int ovl_xattr_set(struct dentry *dentry, struct inode *inode, const char *name,
}
old_cred = ovl_override_creds(dentry->d_sb);
- if (value)
- err = vfs_setxattr(&init_user_ns, realdentry, name, value, size,
- flags);
- else {
+ if (value) {
+ err = ovl_do_setxattr(ofs, realdentry, name, value, size,
+ flags);
+ } else {
WARN_ON(flags != XATTR_REPLACE);
- err = vfs_removexattr(&init_user_ns, realdentry, name);
+ err = ovl_do_removexattr(ofs, realdentry, name);
}
revert_creds(old_cred);
/* copy c/mtime */
- ovl_copyattr(d_inode(realdentry), inode);
+ ovl_copyattr(inode);
out_drop_write:
ovl_drop_write(dentry);
@@ -390,11 +397,11 @@ int ovl_xattr_get(struct dentry *dentry, struct inode *inode, const char *name,
{
ssize_t res;
const struct cred *old_cred;
- struct dentry *realdentry =
- ovl_i_dentry_upper(inode) ?: ovl_dentry_lower(dentry);
+ struct path realpath;
+ ovl_i_path_real(inode, &realpath);
old_cred = ovl_override_creds(dentry->d_sb);
- res = vfs_getxattr(&init_user_ns, realdentry, name, value, size);
+ res = vfs_getxattr(mnt_user_ns(realpath.mnt), realpath.dentry, name, value, size);
revert_creds(old_cred);
return res;
}
@@ -535,7 +542,7 @@ int ovl_real_fileattr_set(struct path *realpath, struct fileattr *fa)
if (err)
return err;
- return vfs_fileattr_set(&init_user_ns, realpath->dentry, fa);
+ return vfs_fileattr_set(mnt_user_ns(realpath->mnt), realpath->dentry, fa);
}
int ovl_fileattr_set(struct user_namespace *mnt_userns,
@@ -579,7 +586,7 @@ int ovl_fileattr_set(struct user_namespace *mnt_userns,
inode_set_flags(inode, flags, OVL_COPY_I_FLAGS_MASK);
/* Update ctime */
- ovl_copyattr(ovl_inode_real(inode), inode);
+ ovl_copyattr(inode);
}
ovl_drop_write(dentry);
out:
@@ -777,16 +784,19 @@ void ovl_inode_init(struct inode *inode, struct ovl_inode_params *oip,
unsigned long ino, int fsid)
{
struct inode *realinode;
+ struct ovl_inode *oi = OVL_I(inode);
if (oip->upperdentry)
- OVL_I(inode)->__upperdentry = oip->upperdentry;
- if (oip->lowerpath && oip->lowerpath->dentry)
- OVL_I(inode)->lower = igrab(d_inode(oip->lowerpath->dentry));
+ oi->__upperdentry = oip->upperdentry;
+ if (oip->lowerpath && oip->lowerpath->dentry) {
+ oi->lowerpath.dentry = dget(oip->lowerpath->dentry);
+ oi->lowerpath.layer = oip->lowerpath->layer;
+ }
if (oip->lowerdata)
- OVL_I(inode)->lowerdata = igrab(d_inode(oip->lowerdata));
+ oi->lowerdata = igrab(d_inode(oip->lowerdata));
realinode = ovl_inode_real(inode);
- ovl_copyattr(realinode, inode);
+ ovl_copyattr(inode);
ovl_copyflags(realinode, inode);
ovl_map_ino(inode, ino, fsid);
}
@@ -871,8 +881,8 @@ static int ovl_set_nlink_common(struct dentry *dentry,
if (WARN_ON(len >= sizeof(buf)))
return -EIO;
- return ovl_do_setxattr(OVL_FS(inode->i_sb), ovl_dentry_upper(dentry),
- OVL_XATTR_NLINK, buf, len);
+ return ovl_setxattr(OVL_FS(inode->i_sb), ovl_dentry_upper(dentry),
+ OVL_XATTR_NLINK, buf, len);
}
int ovl_set_nlink_upper(struct dentry *dentry)
@@ -897,8 +907,8 @@ unsigned int ovl_get_nlink(struct ovl_fs *ofs, struct dentry *lowerdentry,
if (!lowerdentry || !upperdentry || d_inode(lowerdentry)->i_nlink == 1)
return fallback;
- err = ovl_do_getxattr(ofs, upperdentry, OVL_XATTR_NLINK,
- &buf, sizeof(buf) - 1);
+ err = ovl_getxattr_upper(ofs, upperdentry, OVL_XATTR_NLINK,
+ &buf, sizeof(buf) - 1);
if (err < 0)
goto fail;
@@ -1102,6 +1112,10 @@ struct inode *ovl_get_inode(struct super_block *sb,
struct inode *realinode = upperdentry ? d_inode(upperdentry) : NULL;
struct inode *inode;
struct dentry *lowerdentry = lowerpath ? lowerpath->dentry : NULL;
+ struct path realpath = {
+ .dentry = upperdentry ?: lowerdentry,
+ .mnt = upperdentry ? ovl_upper_mnt(ofs) : lowerpath->layer->mnt,
+ };
bool bylower = ovl_hash_bylower(sb, upperdentry, lowerdentry,
oip->index);
int fsid = bylower ? lowerpath->layer->fsid : 0;
@@ -1175,7 +1189,7 @@ struct inode *ovl_get_inode(struct super_block *sb,
/* Check for non-merge dir that may have whiteouts */
if (is_dir) {
if (((upperdentry && lowerdentry) || oip->numlower > 1) ||
- ovl_check_origin_xattr(ofs, upperdentry ?: lowerdentry)) {
+ ovl_path_check_origin_xattr(ofs, &realpath)) {
ovl_set_flag(OVL_WHITEOUTS, inode);
}
}
diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c
index 1a9b515fc45d..65c4346a5b43 100644
--- a/fs/overlayfs/namei.c
+++ b/fs/overlayfs/namei.c
@@ -16,6 +16,7 @@
struct ovl_lookup_data {
struct super_block *sb;
+ struct vfsmount *mnt;
struct qstr name;
bool is_dir;
bool opaque;
@@ -25,14 +26,14 @@ struct ovl_lookup_data {
bool metacopy;
};
-static int ovl_check_redirect(struct dentry *dentry, struct ovl_lookup_data *d,
+static int ovl_check_redirect(struct path *path, struct ovl_lookup_data *d,
size_t prelen, const char *post)
{
int res;
char *buf;
struct ovl_fs *ofs = OVL_FS(d->sb);
- buf = ovl_get_redirect_xattr(ofs, dentry, prelen + strlen(post));
+ buf = ovl_get_redirect_xattr(ofs, path, prelen + strlen(post));
if (IS_ERR_OR_NULL(buf))
return PTR_ERR(buf);
@@ -105,13 +106,13 @@ int ovl_check_fb_len(struct ovl_fb *fb, int fb_len)
return 0;
}
-static struct ovl_fh *ovl_get_fh(struct ovl_fs *ofs, struct dentry *dentry,
+static struct ovl_fh *ovl_get_fh(struct ovl_fs *ofs, struct dentry *upperdentry,
enum ovl_xattr ox)
{
int res, err;
struct ovl_fh *fh = NULL;
- res = ovl_do_getxattr(ofs, dentry, ox, NULL, 0);
+ res = ovl_getxattr_upper(ofs, upperdentry, ox, NULL, 0);
if (res < 0) {
if (res == -ENODATA || res == -EOPNOTSUPP)
return NULL;
@@ -125,7 +126,7 @@ static struct ovl_fh *ovl_get_fh(struct ovl_fs *ofs, struct dentry *dentry,
if (!fh)
return ERR_PTR(-ENOMEM);
- res = ovl_do_getxattr(ofs, dentry, ox, fh->buf, res);
+ res = ovl_getxattr_upper(ofs, upperdentry, ox, fh->buf, res);
if (res < 0)
goto fail;
@@ -193,16 +194,17 @@ struct dentry *ovl_decode_real_fh(struct ovl_fs *ofs, struct ovl_fh *fh,
return real;
}
-static bool ovl_is_opaquedir(struct super_block *sb, struct dentry *dentry)
+static bool ovl_is_opaquedir(struct ovl_fs *ofs, struct path *path)
{
- return ovl_check_dir_xattr(sb, dentry, OVL_XATTR_OPAQUE);
+ return ovl_path_check_dir_xattr(ofs, path, OVL_XATTR_OPAQUE);
}
-static struct dentry *ovl_lookup_positive_unlocked(const char *name,
+static struct dentry *ovl_lookup_positive_unlocked(struct ovl_lookup_data *d,
+ const char *name,
struct dentry *base, int len,
bool drop_negative)
{
- struct dentry *ret = lookup_one_len_unlocked(name, base, len);
+ struct dentry *ret = lookup_one_unlocked(mnt_user_ns(d->mnt), name, base, len);
if (!IS_ERR(ret) && d_flags_negative(smp_load_acquire(&ret->d_flags))) {
if (drop_negative && ret->d_lockref.count == 1) {
@@ -224,10 +226,11 @@ static int ovl_lookup_single(struct dentry *base, struct ovl_lookup_data *d,
struct dentry **ret, bool drop_negative)
{
struct dentry *this;
+ struct path path;
int err;
bool last_element = !post[0];
- this = ovl_lookup_positive_unlocked(name, base, namelen, drop_negative);
+ this = ovl_lookup_positive_unlocked(d, name, base, namelen, drop_negative);
if (IS_ERR(this)) {
err = PTR_ERR(this);
this = NULL;
@@ -253,12 +256,15 @@ static int ovl_lookup_single(struct dentry *base, struct ovl_lookup_data *d,
d->stop = true;
goto put_and_out;
}
+
+ path.dentry = this;
+ path.mnt = d->mnt;
if (!d_can_lookup(this)) {
if (d->is_dir || !last_element) {
d->stop = true;
goto put_and_out;
}
- err = ovl_check_metacopy_xattr(OVL_FS(d->sb), this);
+ err = ovl_check_metacopy_xattr(OVL_FS(d->sb), &path);
if (err < 0)
goto out_err;
@@ -278,14 +284,14 @@ static int ovl_lookup_single(struct dentry *base, struct ovl_lookup_data *d,
if (d->last)
goto out;
- if (ovl_is_opaquedir(d->sb, this)) {
+ if (ovl_is_opaquedir(OVL_FS(d->sb), &path)) {
d->stop = true;
if (last_element)
d->opaque = true;
goto out;
}
}
- err = ovl_check_redirect(this, d, prelen, post);
+ err = ovl_check_redirect(&path, d, prelen, post);
if (err)
goto out_err;
out:
@@ -464,7 +470,7 @@ int ovl_verify_set_fh(struct ovl_fs *ofs, struct dentry *dentry,
err = ovl_verify_fh(ofs, dentry, ox, fh);
if (set && err == -ENODATA)
- err = ovl_do_setxattr(ofs, dentry, ox, fh->buf, fh->fb.len);
+ err = ovl_setxattr(ofs, dentry, ox, fh->buf, fh->fb.len);
if (err)
goto fail;
@@ -704,7 +710,8 @@ struct dentry *ovl_lookup_index(struct ovl_fs *ofs, struct dentry *upper,
if (err)
return ERR_PTR(err);
- index = lookup_positive_unlocked(name.name, ofs->indexdir, name.len);
+ index = lookup_one_positive_unlocked(ovl_upper_mnt_userns(ofs), name.name,
+ ofs->indexdir, name.len);
if (IS_ERR(index)) {
err = PTR_ERR(index);
if (err == -ENOENT) {
@@ -856,6 +863,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
old_cred = ovl_override_creds(dentry->d_sb);
upperdir = ovl_dentry_upper(dentry->d_parent);
if (upperdir) {
+ d.mnt = ovl_upper_mnt(ofs);
err = ovl_lookup_layer(upperdir, &d, &upperdentry, true);
if (err)
goto out;
@@ -911,6 +919,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
else
d.last = lower.layer->idx == roe->numlower;
+ d.mnt = lower.layer->mnt;
err = ovl_lookup_layer(lower.dentry, &d, &this, false);
if (err)
goto out_put;
@@ -1071,14 +1080,18 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
if (upperdentry)
ovl_dentry_set_upper_alias(dentry);
else if (index) {
- upperdentry = dget(index);
- upperredirect = ovl_get_redirect_xattr(ofs, upperdentry, 0);
+ struct path upperpath = {
+ .dentry = upperdentry = dget(index),
+ .mnt = ovl_upper_mnt(ofs),
+ };
+
+ upperredirect = ovl_get_redirect_xattr(ofs, &upperpath, 0);
if (IS_ERR(upperredirect)) {
err = PTR_ERR(upperredirect);
upperredirect = NULL;
goto out_free_oe;
}
- err = ovl_check_metacopy_xattr(ofs, upperdentry);
+ err = ovl_check_metacopy_xattr(ofs, &upperpath);
if (err < 0)
goto out_free_oe;
uppermetacopy = err;
@@ -1163,8 +1176,8 @@ bool ovl_lower_positive(struct dentry *dentry)
struct dentry *this;
struct dentry *lowerdir = poe->lowerstack[i].dentry;
- this = lookup_positive_unlocked(name->name, lowerdir,
- name->len);
+ this = lookup_one_positive_unlocked(mnt_user_ns(poe->lowerstack[i].layer->mnt),
+ name->name, lowerdir, name->len);
if (IS_ERR(this)) {
switch (PTR_ERR(this)) {
case -ENOENT:
diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
index 2cd5741c873b..4f34b7e02eee 100644
--- a/fs/overlayfs/overlayfs.h
+++ b/fs/overlayfs/overlayfs.h
@@ -7,6 +7,7 @@
#include <linux/kernel.h>
#include <linux/uuid.h>
#include <linux/fs.h>
+#include <linux/namei.h>
#include "ovl_entry.h"
#undef pr_fmt
@@ -122,109 +123,180 @@ static inline const char *ovl_xattr(struct ovl_fs *ofs, enum ovl_xattr ox)
return ovl_xattr_table[ox][ofs->config.userxattr];
}
-static inline int ovl_do_rmdir(struct inode *dir, struct dentry *dentry)
+/*
+ * When changing ownership of an upper object map the intended ownership
+ * according to the upper layer's idmapping. When an upper mount idmaps files
+ * that are stored on-disk as owned by id 1001 to id 1000 this means stat on
+ * this object will report it as being owned by id 1000 when calling stat via
+ * the upper mount.
+ * In order to change ownership of an object so stat reports id 1000 when
+ * called on an idmapped upper mount the value written to disk - i.e., the
+ * value stored in ia_*id - must 1001. The mount mapping helper will thus take
+ * care to map 1000 to 1001.
+ * The mnt idmapping helpers are nops if the upper layer isn't idmapped.
+ */
+static inline int ovl_do_notify_change(struct ovl_fs *ofs,
+ struct dentry *upperdentry,
+ struct iattr *attr)
+{
+ struct user_namespace *upper_mnt_userns = ovl_upper_mnt_userns(ofs);
+ struct user_namespace *fs_userns = i_user_ns(d_inode(upperdentry));
+
+ if (attr->ia_valid & ATTR_UID)
+ attr->ia_uid = mapped_kuid_user(upper_mnt_userns,
+ fs_userns, attr->ia_uid);
+ if (attr->ia_valid & ATTR_GID)
+ attr->ia_gid = mapped_kgid_user(upper_mnt_userns,
+ fs_userns, attr->ia_gid);
+
+ return notify_change(upper_mnt_userns, upperdentry, attr, NULL);
+}
+
+static inline int ovl_do_rmdir(struct ovl_fs *ofs,
+ struct inode *dir, struct dentry *dentry)
{
- int err = vfs_rmdir(&init_user_ns, dir, dentry);
+ int err = vfs_rmdir(ovl_upper_mnt_userns(ofs), dir, dentry);
pr_debug("rmdir(%pd2) = %i\n", dentry, err);
return err;
}
-static inline int ovl_do_unlink(struct inode *dir, struct dentry *dentry)
+static inline int ovl_do_unlink(struct ovl_fs *ofs, struct inode *dir,
+ struct dentry *dentry)
{
- int err = vfs_unlink(&init_user_ns, dir, dentry, NULL);
+ int err = vfs_unlink(ovl_upper_mnt_userns(ofs), dir, dentry, NULL);
pr_debug("unlink(%pd2) = %i\n", dentry, err);
return err;
}
-static inline int ovl_do_link(struct dentry *old_dentry, struct inode *dir,
- struct dentry *new_dentry)
+static inline int ovl_do_link(struct ovl_fs *ofs, struct dentry *old_dentry,
+ struct inode *dir, struct dentry *new_dentry)
{
- int err = vfs_link(old_dentry, &init_user_ns, dir, new_dentry, NULL);
+ int err = vfs_link(old_dentry, ovl_upper_mnt_userns(ofs), dir, new_dentry, NULL);
pr_debug("link(%pd2, %pd2) = %i\n", old_dentry, new_dentry, err);
return err;
}
-static inline int ovl_do_create(struct inode *dir, struct dentry *dentry,
+static inline int ovl_do_create(struct ovl_fs *ofs,
+ struct inode *dir, struct dentry *dentry,
umode_t mode)
{
- int err = vfs_create(&init_user_ns, dir, dentry, mode, true);
+ int err = vfs_create(ovl_upper_mnt_userns(ofs), dir, dentry, mode, true);
pr_debug("create(%pd2, 0%o) = %i\n", dentry, mode, err);
return err;
}
-static inline int ovl_do_mkdir(struct inode *dir, struct dentry *dentry,
+static inline int ovl_do_mkdir(struct ovl_fs *ofs,
+ struct inode *dir, struct dentry *dentry,
umode_t mode)
{
- int err = vfs_mkdir(&init_user_ns, dir, dentry, mode);
+ int err = vfs_mkdir(ovl_upper_mnt_userns(ofs), dir, dentry, mode);
pr_debug("mkdir(%pd2, 0%o) = %i\n", dentry, mode, err);
return err;
}
-static inline int ovl_do_mknod(struct inode *dir, struct dentry *dentry,
+static inline int ovl_do_mknod(struct ovl_fs *ofs,
+ struct inode *dir, struct dentry *dentry,
umode_t mode, dev_t dev)
{
- int err = vfs_mknod(&init_user_ns, dir, dentry, mode, dev);
+ int err = vfs_mknod(ovl_upper_mnt_userns(ofs), dir, dentry, mode, dev);
pr_debug("mknod(%pd2, 0%o, 0%o) = %i\n", dentry, mode, dev, err);
return err;
}
-static inline int ovl_do_symlink(struct inode *dir, struct dentry *dentry,
+static inline int ovl_do_symlink(struct ovl_fs *ofs,
+ struct inode *dir, struct dentry *dentry,
const char *oldname)
{
- int err = vfs_symlink(&init_user_ns, dir, dentry, oldname);
+ int err = vfs_symlink(ovl_upper_mnt_userns(ofs), dir, dentry, oldname);
pr_debug("symlink(\"%s\", %pd2) = %i\n", oldname, dentry, err);
return err;
}
-static inline ssize_t ovl_do_getxattr(struct ovl_fs *ofs, struct dentry *dentry,
- enum ovl_xattr ox, void *value,
- size_t size)
+static inline ssize_t ovl_do_getxattr(struct path *path, const char *name,
+ void *value, size_t size)
{
- const char *name = ovl_xattr(ofs, ox);
- int err = vfs_getxattr(&init_user_ns, dentry, name, value, size);
- int len = (value && err > 0) ? err : 0;
+ int err, len;
+
+ WARN_ON(path->dentry->d_sb != path->mnt->mnt_sb);
+
+ err = vfs_getxattr(mnt_user_ns(path->mnt), path->dentry,
+ name, value, size);
+ len = (value && err > 0) ? err : 0;
pr_debug("getxattr(%pd2, \"%s\", \"%*pE\", %zu, 0) = %i\n",
- dentry, name, min(len, 48), value, size, err);
+ path->dentry, name, min(len, 48), value, size, err);
return err;
}
+static inline ssize_t ovl_getxattr_upper(struct ovl_fs *ofs,
+ struct dentry *upperdentry,
+ enum ovl_xattr ox, void *value,
+ size_t size)
+{
+ struct path upperpath = {
+ .dentry = upperdentry,
+ .mnt = ovl_upper_mnt(ofs),
+ };
+
+ return ovl_do_getxattr(&upperpath, ovl_xattr(ofs, ox), value, size);
+}
+
+static inline ssize_t ovl_path_getxattr(struct ovl_fs *ofs,
+ struct path *path,
+ enum ovl_xattr ox, void *value,
+ size_t size)
+{
+ return ovl_do_getxattr(path, ovl_xattr(ofs, ox), value, size);
+}
+
static inline int ovl_do_setxattr(struct ovl_fs *ofs, struct dentry *dentry,
- enum ovl_xattr ox, const void *value,
- size_t size)
+ const char *name, const void *value,
+ size_t size, int flags)
{
- const char *name = ovl_xattr(ofs, ox);
- int err = vfs_setxattr(&init_user_ns, dentry, name, value, size, 0);
- pr_debug("setxattr(%pd2, \"%s\", \"%*pE\", %zu, 0) = %i\n",
- dentry, name, min((int)size, 48), value, size, err);
+ int err = vfs_setxattr(ovl_upper_mnt_userns(ofs), dentry, name, value, size, flags);
+
+ pr_debug("setxattr(%pd2, \"%s\", \"%*pE\", %zu, %d) = %i\n",
+ dentry, name, min((int)size, 48), value, size, flags, err);
return err;
}
+static inline int ovl_setxattr(struct ovl_fs *ofs, struct dentry *dentry,
+ enum ovl_xattr ox, const void *value,
+ size_t size)
+{
+ return ovl_do_setxattr(ofs, dentry, ovl_xattr(ofs, ox), value, size, 0);
+}
+
static inline int ovl_do_removexattr(struct ovl_fs *ofs, struct dentry *dentry,
- enum ovl_xattr ox)
+ const char *name)
{
- const char *name = ovl_xattr(ofs, ox);
- int err = vfs_removexattr(&init_user_ns, dentry, name);
+ int err = vfs_removexattr(ovl_upper_mnt_userns(ofs), dentry, name);
pr_debug("removexattr(%pd2, \"%s\") = %i\n", dentry, name, err);
return err;
}
-static inline int ovl_do_rename(struct inode *olddir, struct dentry *olddentry,
- struct inode *newdir, struct dentry *newdentry,
- unsigned int flags)
+static inline int ovl_removexattr(struct ovl_fs *ofs, struct dentry *dentry,
+ enum ovl_xattr ox)
+{
+ return ovl_do_removexattr(ofs, dentry, ovl_xattr(ofs, ox));
+}
+
+static inline int ovl_do_rename(struct ovl_fs *ofs, struct inode *olddir,
+ struct dentry *olddentry, struct inode *newdir,
+ struct dentry *newdentry, unsigned int flags)
{
int err;
struct renamedata rd = {
- .old_mnt_userns = &init_user_ns,
+ .old_mnt_userns = ovl_upper_mnt_userns(ofs),
.old_dir = olddir,
.old_dentry = olddentry,
- .new_mnt_userns = &init_user_ns,
+ .new_mnt_userns = ovl_upper_mnt_userns(ofs),
.new_dir = newdir,
.new_dentry = newdentry,
.flags = flags,
@@ -239,22 +311,31 @@ static inline int ovl_do_rename(struct inode *olddir, struct dentry *olddentry,
return err;
}
-static inline int ovl_do_whiteout(struct inode *dir, struct dentry *dentry)
+static inline int ovl_do_whiteout(struct ovl_fs *ofs,
+ struct inode *dir, struct dentry *dentry)
{
- int err = vfs_whiteout(&init_user_ns, dir, dentry);
+ int err = vfs_whiteout(ovl_upper_mnt_userns(ofs), dir, dentry);
pr_debug("whiteout(%pd2) = %i\n", dentry, err);
return err;
}
-static inline struct dentry *ovl_do_tmpfile(struct dentry *dentry, umode_t mode)
+static inline struct dentry *ovl_do_tmpfile(struct ovl_fs *ofs,
+ struct dentry *dentry, umode_t mode)
{
- struct dentry *ret = vfs_tmpfile(&init_user_ns, dentry, mode, 0);
+ struct dentry *ret = vfs_tmpfile(ovl_upper_mnt_userns(ofs), dentry, mode, 0);
int err = PTR_ERR_OR_ZERO(ret);
pr_debug("tmpfile(%pd2, 0%o) = %i\n", dentry, mode, err);
return ret;
}
+static inline struct dentry *ovl_lookup_upper(struct ovl_fs *ofs,
+ const char *name,
+ struct dentry *base, int len)
+{
+ return lookup_one(ovl_upper_mnt_userns(ofs), name, base, len);
+}
+
static inline bool ovl_open_flags_need_copy_up(int flags)
{
if (!flags)
@@ -293,10 +374,13 @@ enum ovl_path_type ovl_path_type(struct dentry *dentry);
void ovl_path_upper(struct dentry *dentry, struct path *path);
void ovl_path_lower(struct dentry *dentry, struct path *path);
void ovl_path_lowerdata(struct dentry *dentry, struct path *path);
+void ovl_i_path_real(struct inode *inode, struct path *path);
enum ovl_path_type ovl_path_real(struct dentry *dentry, struct path *path);
+enum ovl_path_type ovl_path_realdata(struct dentry *dentry, struct path *path);
struct dentry *ovl_dentry_upper(struct dentry *dentry);
struct dentry *ovl_dentry_lower(struct dentry *dentry);
struct dentry *ovl_dentry_lowerdata(struct dentry *dentry);
+const struct ovl_layer *ovl_i_layer_lower(struct inode *inode);
const struct ovl_layer *ovl_layer_lower(struct dentry *dentry);
struct dentry *ovl_dentry_real(struct dentry *dentry);
struct dentry *ovl_i_dentry_upper(struct inode *inode);
@@ -330,9 +414,20 @@ struct file *ovl_path_open(struct path *path, int flags);
int ovl_copy_up_start(struct dentry *dentry, int flags);
void ovl_copy_up_end(struct dentry *dentry);
bool ovl_already_copied_up(struct dentry *dentry, int flags);
-bool ovl_check_origin_xattr(struct ovl_fs *ofs, struct dentry *dentry);
-bool ovl_check_dir_xattr(struct super_block *sb, struct dentry *dentry,
- enum ovl_xattr ox);
+bool ovl_path_check_dir_xattr(struct ovl_fs *ofs, struct path *path,
+ enum ovl_xattr ox);
+bool ovl_path_check_origin_xattr(struct ovl_fs *ofs, struct path *path);
+
+static inline bool ovl_check_origin_xattr(struct ovl_fs *ofs,
+ struct dentry *upperdentry)
+{
+ struct path upperpath = {
+ .dentry = upperdentry,
+ .mnt = ovl_upper_mnt(ofs),
+ };
+ return ovl_path_check_origin_xattr(ofs, &upperpath);
+}
+
int ovl_check_setxattr(struct ovl_fs *ofs, struct dentry *upperdentry,
enum ovl_xattr ox, const void *value, size_t size,
int xerr);
@@ -344,10 +439,9 @@ bool ovl_need_index(struct dentry *dentry);
int ovl_nlink_start(struct dentry *dentry);
void ovl_nlink_end(struct dentry *dentry);
int ovl_lock_rename_workdir(struct dentry *workdir, struct dentry *upperdir);
-int ovl_check_metacopy_xattr(struct ovl_fs *ofs, struct dentry *dentry);
+int ovl_check_metacopy_xattr(struct ovl_fs *ofs, struct path *path);
bool ovl_is_metacopy_dentry(struct dentry *dentry);
-char *ovl_get_redirect_xattr(struct ovl_fs *ofs, struct dentry *dentry,
- int padding);
+char *ovl_get_redirect_xattr(struct ovl_fs *ofs, struct path *path, int padding);
int ovl_sync_status(struct ovl_fs *ofs);
static inline void ovl_set_flag(unsigned long flag, struct inode *inode)
@@ -366,9 +460,15 @@ static inline bool ovl_test_flag(unsigned long flag, struct inode *inode)
}
static inline bool ovl_is_impuredir(struct super_block *sb,
- struct dentry *dentry)
+ struct dentry *upperdentry)
{
- return ovl_check_dir_xattr(sb, dentry, OVL_XATTR_IMPURE);
+ struct ovl_fs *ofs = OVL_FS(sb);
+ struct path upperpath = {
+ .dentry = upperdentry,
+ .mnt = ovl_upper_mnt(ofs),
+ };
+
+ return ovl_path_check_dir_xattr(ofs, &upperpath, OVL_XATTR_IMPURE);
}
/*
@@ -461,12 +561,13 @@ static inline int ovl_verify_upper(struct ovl_fs *ofs, struct dentry *index,
extern const struct file_operations ovl_dir_operations;
struct file *ovl_dir_real_file(const struct file *file, bool want_upper);
int ovl_check_empty_dir(struct dentry *dentry, struct list_head *list);
-void ovl_cleanup_whiteouts(struct dentry *upper, struct list_head *list);
+void ovl_cleanup_whiteouts(struct ovl_fs *ofs, struct dentry *upper,
+ struct list_head *list);
void ovl_cache_free(struct list_head *list);
void ovl_dir_cache_free(struct inode *inode);
int ovl_check_d_type_supported(struct path *realpath);
-int ovl_workdir_cleanup(struct inode *dir, struct vfsmount *mnt,
- struct dentry *dentry, int level);
+int ovl_workdir_cleanup(struct ovl_fs *ofs, struct inode *dir,
+ struct vfsmount *mnt, struct dentry *dentry, int level);
int ovl_indexdir_cleanup(struct ovl_fs *ofs);
/*
@@ -520,16 +621,7 @@ bool ovl_lookup_trap_inode(struct super_block *sb, struct dentry *dir);
struct inode *ovl_get_trap_inode(struct super_block *sb, struct dentry *dir);
struct inode *ovl_get_inode(struct super_block *sb,
struct ovl_inode_params *oip);
-static inline void ovl_copyattr(struct inode *from, struct inode *to)
-{
- to->i_uid = from->i_uid;
- to->i_gid = from->i_gid;
- to->i_mode = from->i_mode;
- to->i_atime = from->i_atime;
- to->i_mtime = from->i_mtime;
- to->i_ctime = from->i_ctime;
- i_size_write(to, i_size_read(from));
-}
+void ovl_copyattr(struct inode *to);
/* vfs inode flags copied from real to ovl inode */
#define OVL_COPY_I_FLAGS_MASK (S_SYNC | S_NOATIME | S_APPEND | S_IMMUTABLE)
@@ -570,12 +662,15 @@ struct ovl_cattr {
#define OVL_CATTR(m) (&(struct ovl_cattr) { .mode = (m) })
-int ovl_mkdir_real(struct inode *dir, struct dentry **newdentry, umode_t mode);
-struct dentry *ovl_create_real(struct inode *dir, struct dentry *newdentry,
+int ovl_mkdir_real(struct ovl_fs *ofs, struct inode *dir,
+ struct dentry **newdentry, umode_t mode);
+struct dentry *ovl_create_real(struct ovl_fs *ofs,
+ struct inode *dir, struct dentry *newdentry,
+ struct ovl_cattr *attr);
+int ovl_cleanup(struct ovl_fs *ofs, struct inode *dir, struct dentry *dentry);
+struct dentry *ovl_lookup_temp(struct ovl_fs *ofs, struct dentry *workdir);
+struct dentry *ovl_create_temp(struct ovl_fs *ofs, struct dentry *workdir,
struct ovl_cattr *attr);
-int ovl_cleanup(struct inode *dir, struct dentry *dentry);
-struct dentry *ovl_lookup_temp(struct dentry *workdir);
-struct dentry *ovl_create_temp(struct dentry *workdir, struct ovl_cattr *attr);
/* file.c */
extern const struct file_operations ovl_file_operations;
@@ -591,9 +686,8 @@ int ovl_fileattr_set(struct user_namespace *mnt_userns,
int ovl_copy_up(struct dentry *dentry);
int ovl_copy_up_with_data(struct dentry *dentry);
int ovl_maybe_copy_up(struct dentry *dentry, int flags);
-int ovl_copy_xattr(struct super_block *sb, struct dentry *old,
- struct dentry *new);
-int ovl_set_attr(struct dentry *upper, struct kstat *stat);
+int ovl_copy_xattr(struct super_block *sb, struct path *path, struct dentry *new);
+int ovl_set_attr(struct ovl_fs *ofs, struct dentry *upper, struct kstat *stat);
struct ovl_fh *ovl_encode_real_fh(struct ovl_fs *ofs, struct dentry *real,
bool is_upper);
int ovl_set_origin(struct ovl_fs *ofs, struct dentry *lower,
diff --git a/fs/overlayfs/ovl_entry.h b/fs/overlayfs/ovl_entry.h
index 63efee554f69..e1af8f660698 100644
--- a/fs/overlayfs/ovl_entry.h
+++ b/fs/overlayfs/ovl_entry.h
@@ -90,6 +90,11 @@ static inline struct vfsmount *ovl_upper_mnt(struct ovl_fs *ofs)
return ofs->layers[0].mnt;
}
+static inline struct user_namespace *ovl_upper_mnt_userns(struct ovl_fs *ofs)
+{
+ return mnt_user_ns(ovl_upper_mnt(ofs));
+}
+
static inline struct ovl_fs *OVL_FS(struct super_block *sb)
{
return (struct ovl_fs *)sb->s_fs_info;
@@ -129,7 +134,7 @@ struct ovl_inode {
unsigned long flags;
struct inode vfs_inode;
struct dentry *__upperdentry;
- struct inode *lower;
+ struct ovl_path lowerpath;
/* synchronize copy up and more */
struct mutex lock;
diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c
index 150fdf3bc68d..78f62cc1797b 100644
--- a/fs/overlayfs/readdir.c
+++ b/fs/overlayfs/readdir.c
@@ -264,11 +264,11 @@ static int ovl_fill_merge(struct dir_context *ctx, const char *name,
return ovl_fill_lowest(rdd, name, namelen, offset, ino, d_type);
}
-static int ovl_check_whiteouts(struct dentry *dir, struct ovl_readdir_data *rdd)
+static int ovl_check_whiteouts(struct path *path, struct ovl_readdir_data *rdd)
{
int err;
struct ovl_cache_entry *p;
- struct dentry *dentry;
+ struct dentry *dentry, *dir = path->dentry;
const struct cred *old_cred;
old_cred = ovl_override_creds(rdd->dentry->d_sb);
@@ -278,7 +278,7 @@ static int ovl_check_whiteouts(struct dentry *dir, struct ovl_readdir_data *rdd)
while (rdd->first_maybe_whiteout) {
p = rdd->first_maybe_whiteout;
rdd->first_maybe_whiteout = p->next_maybe_whiteout;
- dentry = lookup_one_len(p->name, dir, p->len);
+ dentry = lookup_one(mnt_user_ns(path->mnt), p->name, dir, p->len);
if (!IS_ERR(dentry)) {
p->is_whiteout = ovl_is_whiteout(dentry);
dput(dentry);
@@ -312,7 +312,7 @@ static inline int ovl_dir_read(struct path *realpath,
} while (!err && rdd->count);
if (!err && rdd->first_maybe_whiteout && rdd->dentry)
- err = ovl_check_whiteouts(realpath->dentry, rdd);
+ err = ovl_check_whiteouts(realpath, rdd);
fput(realfile);
@@ -479,7 +479,7 @@ static int ovl_cache_update_ino(struct path *path, struct ovl_cache_entry *p)
goto get;
}
}
- this = lookup_one_len(p->name, dir, p->len);
+ this = lookup_one(mnt_user_ns(path->mnt), p->name, dir, p->len);
if (IS_ERR_OR_NULL(this) || !this->d_inode) {
/* Mark a stale entry */
p->is_whiteout = true;
@@ -623,8 +623,8 @@ static struct ovl_dir_cache *ovl_cache_get_impure(struct path *path)
* Removing the "impure" xattr is best effort.
*/
if (!ovl_want_write(dentry)) {
- ovl_do_removexattr(ofs, ovl_dentry_upper(dentry),
- OVL_XATTR_IMPURE);
+ ovl_removexattr(ofs, ovl_dentry_upper(dentry),
+ OVL_XATTR_IMPURE);
ovl_drop_write(dentry);
}
ovl_clear_flag(OVL_IMPURE, d_inode(dentry));
@@ -1001,7 +1001,8 @@ del_entry:
return err;
}
-void ovl_cleanup_whiteouts(struct dentry *upper, struct list_head *list)
+void ovl_cleanup_whiteouts(struct ovl_fs *ofs, struct dentry *upper,
+ struct list_head *list)
{
struct ovl_cache_entry *p;
@@ -1012,7 +1013,7 @@ void ovl_cleanup_whiteouts(struct dentry *upper, struct list_head *list)
if (WARN_ON(!p->is_whiteout || !p->is_upper))
continue;
- dentry = lookup_one_len(p->name, upper, p->len);
+ dentry = ovl_lookup_upper(ofs, p->name, upper, p->len);
if (IS_ERR(dentry)) {
pr_err("lookup '%s/%.*s' failed (%i)\n",
upper->d_name.name, p->len, p->name,
@@ -1020,7 +1021,7 @@ void ovl_cleanup_whiteouts(struct dentry *upper, struct list_head *list)
continue;
}
if (dentry->d_inode)
- ovl_cleanup(upper->d_inode, dentry);
+ ovl_cleanup(ofs, upper->d_inode, dentry);
dput(dentry);
}
inode_unlock(upper->d_inode);
@@ -1064,7 +1065,8 @@ int ovl_check_d_type_supported(struct path *realpath)
#define OVL_INCOMPATDIR_NAME "incompat"
-static int ovl_workdir_cleanup_recurse(struct path *path, int level)
+static int ovl_workdir_cleanup_recurse(struct ovl_fs *ofs, struct path *path,
+ int level)
{
int err;
struct inode *dir = path->dentry->d_inode;
@@ -1111,11 +1113,11 @@ static int ovl_workdir_cleanup_recurse(struct path *path, int level)
err = -EINVAL;
break;
}
- dentry = lookup_one_len(p->name, path->dentry, p->len);
+ dentry = ovl_lookup_upper(ofs, p->name, path->dentry, p->len);
if (IS_ERR(dentry))
continue;
if (dentry->d_inode)
- err = ovl_workdir_cleanup(dir, path->mnt, dentry, level);
+ err = ovl_workdir_cleanup(ofs, dir, path->mnt, dentry, level);
dput(dentry);
if (err)
break;
@@ -1126,24 +1128,24 @@ out:
return err;
}
-int ovl_workdir_cleanup(struct inode *dir, struct vfsmount *mnt,
- struct dentry *dentry, int level)
+int ovl_workdir_cleanup(struct ovl_fs *ofs, struct inode *dir,
+ struct vfsmount *mnt, struct dentry *dentry, int level)
{
int err;
if (!d_is_dir(dentry) || level > 1) {
- return ovl_cleanup(dir, dentry);
+ return ovl_cleanup(ofs, dir, dentry);
}
- err = ovl_do_rmdir(dir, dentry);
+ err = ovl_do_rmdir(ofs, dir, dentry);
if (err) {
struct path path = { .mnt = mnt, .dentry = dentry };
inode_unlock(dir);
- err = ovl_workdir_cleanup_recurse(&path, level + 1);
+ err = ovl_workdir_cleanup_recurse(ofs, &path, level + 1);
inode_lock_nested(dir, I_MUTEX_PARENT);
if (!err)
- err = ovl_cleanup(dir, dentry);
+ err = ovl_cleanup(ofs, dir, dentry);
}
return err;
@@ -1179,7 +1181,7 @@ int ovl_indexdir_cleanup(struct ovl_fs *ofs)
if (p->len == 2 && p->name[1] == '.')
continue;
}
- index = lookup_one_len(p->name, indexdir, p->len);
+ index = ovl_lookup_upper(ofs, p->name, indexdir, p->len);
if (IS_ERR(index)) {
err = PTR_ERR(index);
index = NULL;
@@ -1187,7 +1189,7 @@ int ovl_indexdir_cleanup(struct ovl_fs *ofs)
}
/* Cleanup leftover from index create/cleanup attempt */
if (index->d_name.name[0] == '#') {
- err = ovl_workdir_cleanup(dir, path.mnt, index, 1);
+ err = ovl_workdir_cleanup(ofs, dir, path.mnt, index, 1);
if (err)
break;
goto next;
@@ -1197,7 +1199,7 @@ int ovl_indexdir_cleanup(struct ovl_fs *ofs)
goto next;
} else if (err == -ESTALE) {
/* Cleanup stale index entries */
- err = ovl_cleanup(dir, index);
+ err = ovl_cleanup(ofs, dir, index);
} else if (err != -ENOENT) {
/*
* Abort mount to avoid corrupting the index if
@@ -1213,7 +1215,7 @@ int ovl_indexdir_cleanup(struct ovl_fs *ofs)
err = ovl_cleanup_and_whiteout(ofs, dir, index);
} else {
/* Cleanup orphan index entries */
- err = ovl_cleanup(dir, index);
+ err = ovl_cleanup(ofs, dir, index);
}
if (err)
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index 001cdbb8f015..e0a2e0468ee7 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -184,7 +184,8 @@ static struct inode *ovl_alloc_inode(struct super_block *sb)
oi->version = 0;
oi->flags = 0;
oi->__upperdentry = NULL;
- oi->lower = NULL;
+ oi->lowerpath.dentry = NULL;
+ oi->lowerpath.layer = NULL;
oi->lowerdata = NULL;
mutex_init(&oi->lock);
@@ -205,7 +206,7 @@ static void ovl_destroy_inode(struct inode *inode)
struct ovl_inode *oi = OVL_I(inode);
dput(oi->__upperdentry);
- iput(oi->lower);
+ dput(oi->lowerpath.dentry);
if (S_ISDIR(inode->i_mode))
ovl_dir_cache_free(inode);
else
@@ -761,7 +762,7 @@ static struct dentry *ovl_workdir_create(struct ovl_fs *ofs,
inode_lock_nested(dir, I_MUTEX_PARENT);
retry:
- work = lookup_one_len(name, ofs->workbasedir, strlen(name));
+ work = ovl_lookup_upper(ofs, name, ofs->workbasedir, strlen(name));
if (!IS_ERR(work)) {
struct iattr attr = {
@@ -778,7 +779,7 @@ retry:
goto out_unlock;
retried = true;
- err = ovl_workdir_cleanup(dir, mnt, work, 0);
+ err = ovl_workdir_cleanup(ofs, dir, mnt, work, 0);
dput(work);
if (err == -EINVAL) {
work = ERR_PTR(err);
@@ -787,7 +788,7 @@ retry:
goto retry;
}
- err = ovl_mkdir_real(dir, &work, attr.ia_mode);
+ err = ovl_mkdir_real(ofs, dir, &work, attr.ia_mode);
if (err)
goto out_dput;
@@ -809,19 +810,19 @@ retry:
* allowed as upper are limited to "normal" ones, where checking
* for the above two errors is sufficient.
*/
- err = vfs_removexattr(&init_user_ns, work,
- XATTR_NAME_POSIX_ACL_DEFAULT);
+ err = ovl_do_removexattr(ofs, work,
+ XATTR_NAME_POSIX_ACL_DEFAULT);
if (err && err != -ENODATA && err != -EOPNOTSUPP)
goto out_dput;
- err = vfs_removexattr(&init_user_ns, work,
- XATTR_NAME_POSIX_ACL_ACCESS);
+ err = ovl_do_removexattr(ofs, work,
+ XATTR_NAME_POSIX_ACL_ACCESS);
if (err && err != -ENODATA && err != -EOPNOTSUPP)
goto out_dput;
/* Clear any inherited mode bits */
inode_lock(work->d_inode);
- err = notify_change(&init_user_ns, work, &attr, NULL);
+ err = ovl_do_notify_change(ofs, work, &attr);
inode_unlock(work->d_inode);
if (err)
goto out_dput;
@@ -873,10 +874,6 @@ static int ovl_mount_dir_noesc(const char *name, struct path *path)
pr_err("filesystem on '%s' not supported\n", name);
goto out_put;
}
- if (is_idmapped_mnt(path->mnt)) {
- pr_err("idmapped layers are currently not supported\n");
- goto out_put;
- }
if (!d_is_dir(path->dentry)) {
pr_err("'%s' not a directory\n", name);
goto out_put;
@@ -1256,8 +1253,9 @@ out:
* Returns 1 if RENAME_WHITEOUT is supported, 0 if not supported and
* negative values if error is encountered.
*/
-static int ovl_check_rename_whiteout(struct dentry *workdir)
+static int ovl_check_rename_whiteout(struct ovl_fs *ofs)
{
+ struct dentry *workdir = ofs->workdir;
struct inode *dir = d_inode(workdir);
struct dentry *temp;
struct dentry *dest;
@@ -1267,12 +1265,12 @@ static int ovl_check_rename_whiteout(struct dentry *workdir)
inode_lock_nested(dir, I_MUTEX_PARENT);
- temp = ovl_create_temp(workdir, OVL_CATTR(S_IFREG | 0));
+ temp = ovl_create_temp(ofs, workdir, OVL_CATTR(S_IFREG | 0));
err = PTR_ERR(temp);
if (IS_ERR(temp))
goto out_unlock;
- dest = ovl_lookup_temp(workdir);
+ dest = ovl_lookup_temp(ofs, workdir);
err = PTR_ERR(dest);
if (IS_ERR(dest)) {
dput(temp);
@@ -1281,14 +1279,14 @@ static int ovl_check_rename_whiteout(struct dentry *workdir)
/* Name is inline and stable - using snapshot as a copy helper */
take_dentry_name_snapshot(&name, temp);
- err = ovl_do_rename(dir, temp, dir, dest, RENAME_WHITEOUT);
+ err = ovl_do_rename(ofs, dir, temp, dir, dest, RENAME_WHITEOUT);
if (err) {
if (err == -EINVAL)
err = 0;
goto cleanup_temp;
}
- whiteout = lookup_one_len(name.name.name, workdir, name.name.len);
+ whiteout = ovl_lookup_upper(ofs, name.name.name, workdir, name.name.len);
err = PTR_ERR(whiteout);
if (IS_ERR(whiteout))
goto cleanup_temp;
@@ -1297,11 +1295,11 @@ static int ovl_check_rename_whiteout(struct dentry *workdir)
/* Best effort cleanup of whiteout and temp file */
if (err)
- ovl_cleanup(dir, whiteout);
+ ovl_cleanup(ofs, dir, whiteout);
dput(whiteout);
cleanup_temp:
- ovl_cleanup(dir, temp);
+ ovl_cleanup(ofs, dir, temp);
release_dentry_name_snapshot(&name);
dput(temp);
dput(dest);
@@ -1312,16 +1310,17 @@ out_unlock:
return err;
}
-static struct dentry *ovl_lookup_or_create(struct dentry *parent,
+static struct dentry *ovl_lookup_or_create(struct ovl_fs *ofs,
+ struct dentry *parent,
const char *name, umode_t mode)
{
size_t len = strlen(name);
struct dentry *child;
inode_lock_nested(parent->d_inode, I_MUTEX_PARENT);
- child = lookup_one_len(name, parent, len);
+ child = ovl_lookup_upper(ofs, name, parent, len);
if (!IS_ERR(child) && !child->d_inode)
- child = ovl_create_real(parent->d_inode, child,
+ child = ovl_create_real(ofs, parent->d_inode, child,
OVL_CATTR(mode));
inode_unlock(parent->d_inode);
dput(parent);
@@ -1343,7 +1342,7 @@ static int ovl_create_volatile_dirty(struct ovl_fs *ofs)
const char *const *name = volatile_path;
for (ctr = ARRAY_SIZE(volatile_path); ctr; ctr--, name++) {
- d = ovl_lookup_or_create(d, *name, ctr > 1 ? S_IFDIR : S_IFREG);
+ d = ovl_lookup_or_create(ofs, d, *name, ctr > 1 ? S_IFDIR : S_IFREG);
if (IS_ERR(d))
return PTR_ERR(d);
}
@@ -1391,7 +1390,7 @@ static int ovl_make_workdir(struct super_block *sb, struct ovl_fs *ofs,
pr_warn("upper fs needs to support d_type.\n");
/* Check if upper/work fs supports O_TMPFILE */
- temp = ovl_do_tmpfile(ofs->workdir, S_IFREG | 0);
+ temp = ovl_do_tmpfile(ofs, ofs->workdir, S_IFREG | 0);
ofs->tmpfile = !IS_ERR(temp);
if (ofs->tmpfile)
dput(temp);
@@ -1400,7 +1399,7 @@ static int ovl_make_workdir(struct super_block *sb, struct ovl_fs *ofs,
/* Check if upper/work fs supports RENAME_WHITEOUT */
- err = ovl_check_rename_whiteout(ofs->workdir);
+ err = ovl_check_rename_whiteout(ofs);
if (err < 0)
goto out;
@@ -1411,7 +1410,7 @@ static int ovl_make_workdir(struct super_block *sb, struct ovl_fs *ofs,
/*
* Check if upper/work fs supports (trusted|user).overlay.* xattr
*/
- err = ovl_do_setxattr(ofs, ofs->workdir, OVL_XATTR_OPAQUE, "0", 1);
+ err = ovl_setxattr(ofs, ofs->workdir, OVL_XATTR_OPAQUE, "0", 1);
if (err) {
ofs->noxattr = true;
if (ofs->config.index || ofs->config.metacopy) {
@@ -1429,7 +1428,7 @@ static int ovl_make_workdir(struct super_block *sb, struct ovl_fs *ofs,
}
err = 0;
} else {
- ovl_do_removexattr(ofs, ofs->workdir, OVL_XATTR_OPAQUE);
+ ovl_removexattr(ofs, ofs->workdir, OVL_XATTR_OPAQUE);
}
/*
diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c
index f48284a2a896..87f811c089e4 100644
--- a/fs/overlayfs/util.c
+++ b/fs/overlayfs/util.c
@@ -194,6 +194,20 @@ enum ovl_path_type ovl_path_real(struct dentry *dentry, struct path *path)
return type;
}
+enum ovl_path_type ovl_path_realdata(struct dentry *dentry, struct path *path)
+{
+ enum ovl_path_type type = ovl_path_type(dentry);
+
+ WARN_ON_ONCE(d_is_dir(dentry));
+
+ if (!OVL_TYPE_UPPER(type) || OVL_TYPE_MERGE(type))
+ ovl_path_lowerdata(dentry, path);
+ else
+ ovl_path_upper(dentry, path);
+
+ return type;
+}
+
struct dentry *ovl_dentry_upper(struct dentry *dentry)
{
return ovl_upperdentry_dereference(OVL_I(d_inode(dentry)));
@@ -236,6 +250,17 @@ struct dentry *ovl_i_dentry_upper(struct inode *inode)
return ovl_upperdentry_dereference(OVL_I(inode));
}
+void ovl_i_path_real(struct inode *inode, struct path *path)
+{
+ path->dentry = ovl_i_dentry_upper(inode);
+ if (!path->dentry) {
+ path->dentry = OVL_I(inode)->lowerpath.dentry;
+ path->mnt = OVL_I(inode)->lowerpath.layer->mnt;
+ } else {
+ path->mnt = ovl_upper_mnt(OVL_FS(inode->i_sb));
+ }
+}
+
struct inode *ovl_inode_upper(struct inode *inode)
{
struct dentry *upperdentry = ovl_i_dentry_upper(inode);
@@ -245,7 +270,9 @@ struct inode *ovl_inode_upper(struct inode *inode)
struct inode *ovl_inode_lower(struct inode *inode)
{
- return OVL_I(inode)->lower;
+ struct dentry *lowerdentry = OVL_I(inode)->lowerpath.dentry;
+
+ return lowerdentry ? d_inode(lowerdentry) : NULL;
}
struct inode *ovl_inode_real(struct inode *inode)
@@ -443,7 +470,7 @@ static void ovl_dir_version_inc(struct dentry *dentry, bool impurity)
void ovl_dir_modified(struct dentry *dentry, bool impurity)
{
/* Copy mtime/ctime */
- ovl_copyattr(d_inode(ovl_dentry_upper(dentry)), d_inode(dentry));
+ ovl_copyattr(d_inode(dentry));
ovl_dir_version_inc(dentry, impurity);
}
@@ -466,6 +493,7 @@ bool ovl_is_whiteout(struct dentry *dentry)
struct file *ovl_path_open(struct path *path, int flags)
{
struct inode *inode = d_inode(path->dentry);
+ struct user_namespace *real_mnt_userns = mnt_user_ns(path->mnt);
int err, acc_mode;
if (flags & ~(O_ACCMODE | O_LARGEFILE))
@@ -482,12 +510,12 @@ struct file *ovl_path_open(struct path *path, int flags)
BUG();
}
- err = inode_permission(&init_user_ns, inode, acc_mode | MAY_OPEN);
+ err = inode_permission(real_mnt_userns, inode, acc_mode | MAY_OPEN);
if (err)
return ERR_PTR(err);
/* O_NOATIME is an optimization, don't fail if not permitted */
- if (inode_owner_or_capable(&init_user_ns, inode))
+ if (inode_owner_or_capable(real_mnt_userns, inode))
flags |= O_NOATIME;
return dentry_open(path, flags, current_cred());
@@ -550,11 +578,11 @@ void ovl_copy_up_end(struct dentry *dentry)
ovl_inode_unlock(d_inode(dentry));
}
-bool ovl_check_origin_xattr(struct ovl_fs *ofs, struct dentry *dentry)
+bool ovl_path_check_origin_xattr(struct ovl_fs *ofs, struct path *path)
{
int res;
- res = ovl_do_getxattr(ofs, dentry, OVL_XATTR_ORIGIN, NULL, 0);
+ res = ovl_path_getxattr(ofs, path, OVL_XATTR_ORIGIN, NULL, 0);
/* Zero size value means "copied up but origin unknown" */
if (res >= 0)
@@ -563,16 +591,16 @@ bool ovl_check_origin_xattr(struct ovl_fs *ofs, struct dentry *dentry)
return false;
}
-bool ovl_check_dir_xattr(struct super_block *sb, struct dentry *dentry,
- enum ovl_xattr ox)
+bool ovl_path_check_dir_xattr(struct ovl_fs *ofs, struct path *path,
+ enum ovl_xattr ox)
{
int res;
char val;
- if (!d_is_dir(dentry))
+ if (!d_is_dir(path->dentry))
return false;
- res = ovl_do_getxattr(OVL_FS(sb), dentry, ox, &val, 1);
+ res = ovl_path_getxattr(ofs, path, ox, &val, 1);
if (res == 1 && val == 'y')
return true;
@@ -612,7 +640,7 @@ int ovl_check_setxattr(struct ovl_fs *ofs, struct dentry *upperdentry,
if (ofs->noxattr)
return xerr;
- err = ovl_do_setxattr(ofs, upperdentry, ox, value, size);
+ err = ovl_setxattr(ofs, upperdentry, ox, value, size);
if (err == -EOPNOTSUPP) {
pr_warn("cannot set %s xattr on upper\n", ovl_xattr(ofs, ox));
@@ -652,8 +680,8 @@ void ovl_check_protattr(struct inode *inode, struct dentry *upper)
char buf[OVL_PROTATTR_MAX+1];
int res, n;
- res = ovl_do_getxattr(ofs, upper, OVL_XATTR_PROTATTR, buf,
- OVL_PROTATTR_MAX);
+ res = ovl_getxattr_upper(ofs, upper, OVL_XATTR_PROTATTR, buf,
+ OVL_PROTATTR_MAX);
if (res < 0)
return;
@@ -708,7 +736,7 @@ int ovl_set_protattr(struct inode *inode, struct dentry *upper,
err = ovl_check_setxattr(ofs, upper, OVL_XATTR_PROTATTR,
buf, len, -EPERM);
} else if (inode->i_flags & OVL_PROT_I_FLAGS_MASK) {
- err = ovl_do_removexattr(ofs, upper, OVL_XATTR_PROTATTR);
+ err = ovl_removexattr(ofs, upper, OVL_XATTR_PROTATTR);
if (err == -EOPNOTSUPP || err == -ENODATA)
err = 0;
}
@@ -824,7 +852,7 @@ static void ovl_cleanup_index(struct dentry *dentry)
}
inode_lock_nested(dir, I_MUTEX_PARENT);
- index = lookup_one_len(name.name, indexdir, name.len);
+ index = ovl_lookup_upper(ofs, name.name, indexdir, name.len);
err = PTR_ERR(index);
if (IS_ERR(index)) {
index = NULL;
@@ -834,7 +862,7 @@ static void ovl_cleanup_index(struct dentry *dentry)
dir, index);
} else {
/* Cleanup orphan index entries */
- err = ovl_cleanup(dir, index);
+ err = ovl_cleanup(ofs, dir, index);
}
inode_unlock(dir);
@@ -943,15 +971,15 @@ err:
}
/* err < 0, 0 if no metacopy xattr, 1 if metacopy xattr found */
-int ovl_check_metacopy_xattr(struct ovl_fs *ofs, struct dentry *dentry)
+int ovl_check_metacopy_xattr(struct ovl_fs *ofs, struct path *path)
{
int res;
/* Only regular files can have metacopy xattr */
- if (!S_ISREG(d_inode(dentry)->i_mode))
+ if (!S_ISREG(d_inode(path->dentry)->i_mode))
return 0;
- res = ovl_do_getxattr(ofs, dentry, OVL_XATTR_METACOPY, NULL, 0);
+ res = ovl_path_getxattr(ofs, path, OVL_XATTR_METACOPY, NULL, 0);
if (res < 0) {
if (res == -ENODATA || res == -EOPNOTSUPP)
return 0;
@@ -987,13 +1015,12 @@ bool ovl_is_metacopy_dentry(struct dentry *dentry)
return (oe->numlower > 1);
}
-char *ovl_get_redirect_xattr(struct ovl_fs *ofs, struct dentry *dentry,
- int padding)
+char *ovl_get_redirect_xattr(struct ovl_fs *ofs, struct path *path, int padding)
{
int res;
char *s, *next, *buf = NULL;
- res = ovl_do_getxattr(ofs, dentry, OVL_XATTR_REDIRECT, NULL, 0);
+ res = ovl_path_getxattr(ofs, path, OVL_XATTR_REDIRECT, NULL, 0);
if (res == -ENODATA || res == -EOPNOTSUPP)
return NULL;
if (res < 0)
@@ -1005,7 +1032,7 @@ char *ovl_get_redirect_xattr(struct ovl_fs *ofs, struct dentry *dentry,
if (!buf)
return ERR_PTR(-ENOMEM);
- res = ovl_do_getxattr(ofs, dentry, OVL_XATTR_REDIRECT, buf, res);
+ res = ovl_path_getxattr(ofs, path, OVL_XATTR_REDIRECT, buf, res);
if (res < 0)
goto fail;
if (res == 0)
@@ -1060,3 +1087,33 @@ int ovl_sync_status(struct ovl_fs *ofs)
return errseq_check(&mnt->mnt_sb->s_wb_err, ofs->errseq);
}
+
+/*
+ * ovl_copyattr() - copy inode attributes from layer to ovl inode
+ *
+ * When overlay copies inode information from an upper or lower layer to the
+ * relevant overlay inode it will apply the idmapping of the upper or lower
+ * layer when doing so ensuring that the ovl inode ownership will correctly
+ * reflect the ownership of the idmapped upper or lower layer. For example, an
+ * idmapped upper or lower layer mapping id 1001 to id 1000 will take care to
+ * map any lower or upper inode owned by id 1001 to id 1000. These mapping
+ * helpers are nops when the relevant layer isn't idmapped.
+ */
+void ovl_copyattr(struct inode *inode)
+{
+ struct path realpath;
+ struct inode *realinode;
+ struct user_namespace *real_mnt_userns;
+
+ ovl_i_path_real(inode, &realpath);
+ realinode = d_inode(realpath.dentry);
+ real_mnt_userns = mnt_user_ns(realpath.mnt);
+
+ inode->i_uid = i_uid_into_mnt(real_mnt_userns, realinode);
+ inode->i_gid = i_gid_into_mnt(real_mnt_userns, realinode);
+ inode->i_mode = realinode->i_mode;
+ inode->i_atime = realinode->i_atime;
+ inode->i_mtime = realinode->i_mtime;
+ inode->i_ctime = realinode->i_ctime;
+ i_size_write(inode, i_size_read(realinode));
+}
diff --git a/fs/pipe.c b/fs/pipe.c
index e140ea150bbb..74ae9fafd25a 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -653,7 +653,7 @@ pipe_poll(struct file *filp, poll_table *wait)
unsigned int head, tail;
/* Epoll has some historical nasty semantics, this enables them */
- pipe->poll_usage = 1;
+ WRITE_ONCE(pipe->poll_usage, true);
/*
* Reading pipe state only -- no need for acquiring the semaphore.
@@ -1245,30 +1245,33 @@ unsigned int round_pipe_size(unsigned long size)
/*
* Resize the pipe ring to a number of slots.
+ *
+ * Note the pipe can be reduced in capacity, but only if the current
+ * occupancy doesn't exceed nr_slots; if it does, EBUSY will be
+ * returned instead.
*/
int pipe_resize_ring(struct pipe_inode_info *pipe, unsigned int nr_slots)
{
struct pipe_buffer *bufs;
unsigned int head, tail, mask, n;
- /*
- * We can shrink the pipe, if arg is greater than the ring occupancy.
- * Since we don't expect a lot of shrink+grow operations, just free and
- * allocate again like we would do for growing. If the pipe currently
- * contains more buffers than arg, then return busy.
- */
- mask = pipe->ring_size - 1;
- head = pipe->head;
- tail = pipe->tail;
- n = pipe_occupancy(pipe->head, pipe->tail);
- if (nr_slots < n)
- return -EBUSY;
-
bufs = kcalloc(nr_slots, sizeof(*bufs),
GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
if (unlikely(!bufs))
return -ENOMEM;
+ spin_lock_irq(&pipe->rd_wait.lock);
+ mask = pipe->ring_size - 1;
+ head = pipe->head;
+ tail = pipe->tail;
+
+ n = pipe_occupancy(head, tail);
+ if (nr_slots < n) {
+ spin_unlock_irq(&pipe->rd_wait.lock);
+ kfree(bufs);
+ return -EBUSY;
+ }
+
/*
* The pipe array wraps around, so just start the new one at zero
* and adjust the indices.
@@ -1300,6 +1303,8 @@ int pipe_resize_ring(struct pipe_inode_info *pipe, unsigned int nr_slots)
pipe->tail = tail;
pipe->head = head;
+ spin_unlock_irq(&pipe->rd_wait.lock);
+
/* This might have made more room for writers */
wake_up_interruptible(&pipe->wr_wait);
return 0;
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index f2132407e133..587b91d9d998 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -448,6 +448,9 @@ static struct proc_dir_entry *__proc_create(struct proc_dir_entry **parent,
proc_set_user(ent, (*parent)->uid, (*parent)->gid);
ent->proc_dops = &proc_misc_dentry_ops;
+ /* Revalidate everything under /proc/${pid}/net */
+ if ((*parent)->proc_dops == &proc_net_dentry_ops)
+ pde_force_lookup(ent);
out:
return ent;
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
index 982e694aae77..dff921f7ca33 100644
--- a/fs/proc/kcore.c
+++ b/fs/proc/kcore.c
@@ -479,10 +479,15 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
* the previous entry, search for a matching entry.
*/
if (!m || start < m->addr || start >= m->addr + m->size) {
- list_for_each_entry(m, &kclist_head, list) {
- if (start >= m->addr &&
- start < m->addr + m->size)
+ struct kcore_list *iter;
+
+ m = NULL;
+ list_for_each_entry(iter, &kclist_head, list) {
+ if (start >= iter->addr &&
+ start < iter->addr + iter->size) {
+ m = iter;
break;
+ }
}
}
@@ -492,12 +497,11 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
page_offline_freeze();
}
- if (&m->list == &kclist_head) {
+ if (!m) {
if (clear_user(buffer, tsz)) {
ret = -EFAULT;
goto out;
}
- m = NULL; /* skip the list anchor */
goto skip;
}
diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
index e1cfeda397f3..913e5acefbb6 100644
--- a/fs/proc/proc_net.c
+++ b/fs/proc/proc_net.c
@@ -376,6 +376,9 @@ static __net_init int proc_net_ns_init(struct net *net)
proc_set_user(netd, uid, gid);
+ /* Seed dentry revalidation for /proc/${pid}/net */
+ pde_force_lookup(netd);
+
err = -EEXIST;
net_statd = proc_net_mkdir(net, "stat", netd);
if (!net_statd)
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
index 6f1b8ddc6f7a..4eaeb645e759 100644
--- a/fs/proc/vmcore.c
+++ b/fs/proc/vmcore.c
@@ -26,6 +26,7 @@
#include <linux/vmalloc.h>
#include <linux/pagemap.h>
#include <linux/uaccess.h>
+#include <linux/uio.h>
#include <linux/cc_platform.h>
#include <asm/io.h>
#include "internal.h"
@@ -128,9 +129,8 @@ static int open_vmcore(struct inode *inode, struct file *file)
}
/* Reads a page from the oldmem device from given offset. */
-ssize_t read_from_oldmem(char *buf, size_t count,
- u64 *ppos, int userbuf,
- bool encrypted)
+ssize_t read_from_oldmem(struct iov_iter *iter, size_t count,
+ u64 *ppos, bool encrypted)
{
unsigned long pfn, offset;
size_t nr_bytes;
@@ -152,29 +152,23 @@ ssize_t read_from_oldmem(char *buf, size_t count,
/* If pfn is not ram, return zeros for sparse dump files */
if (!pfn_is_ram(pfn)) {
- tmp = 0;
- if (!userbuf)
- memset(buf, 0, nr_bytes);
- else if (clear_user(buf, nr_bytes))
- tmp = -EFAULT;
+ tmp = iov_iter_zero(nr_bytes, iter);
} else {
if (encrypted)
- tmp = copy_oldmem_page_encrypted(pfn, buf,
+ tmp = copy_oldmem_page_encrypted(iter, pfn,
nr_bytes,
- offset,
- userbuf);
+ offset);
else
- tmp = copy_oldmem_page(pfn, buf, nr_bytes,
- offset, userbuf);
+ tmp = copy_oldmem_page(iter, pfn, nr_bytes,
+ offset);
}
- if (tmp < 0) {
+ if (tmp < nr_bytes) {
srcu_read_unlock(&vmcore_cb_srcu, idx);
- return tmp;
+ return -EFAULT;
}
*ppos += nr_bytes;
count -= nr_bytes;
- buf += nr_bytes;
read += nr_bytes;
++pfn;
offset = 0;
@@ -203,7 +197,12 @@ void __weak elfcorehdr_free(unsigned long long addr)
*/
ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos)
{
- return read_from_oldmem(buf, count, ppos, 0, false);
+ struct kvec kvec = { .iov_base = buf, .iov_len = count };
+ struct iov_iter iter;
+
+ iov_iter_kvec(&iter, READ, &kvec, 1, count);
+
+ return read_from_oldmem(&iter, count, ppos, false);
}
/*
@@ -211,7 +210,13 @@ ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos)
*/
ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos)
{
- return read_from_oldmem(buf, count, ppos, 0, cc_platform_has(CC_ATTR_MEM_ENCRYPT));
+ struct kvec kvec = { .iov_base = buf, .iov_len = count };
+ struct iov_iter iter;
+
+ iov_iter_kvec(&iter, READ, &kvec, 1, count);
+
+ return read_from_oldmem(&iter, count, ppos,
+ cc_platform_has(CC_ATTR_MEM_ENCRYPT));
}
/*
@@ -228,29 +233,14 @@ int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
/*
* Architectures which support memory encryption override this.
*/
-ssize_t __weak
-copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize,
- unsigned long offset, int userbuf)
+ssize_t __weak copy_oldmem_page_encrypted(struct iov_iter *iter,
+ unsigned long pfn, size_t csize, unsigned long offset)
{
- return copy_oldmem_page(pfn, buf, csize, offset, userbuf);
-}
-
-/*
- * Copy to either kernel or user space
- */
-static int copy_to(void *target, void *src, size_t size, int userbuf)
-{
- if (userbuf) {
- if (copy_to_user((char __user *) target, src, size))
- return -EFAULT;
- } else {
- memcpy(target, src, size);
- }
- return 0;
+ return copy_oldmem_page(iter, pfn, csize, offset);
}
#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
-static int vmcoredd_copy_dumps(void *dst, u64 start, size_t size, int userbuf)
+static int vmcoredd_copy_dumps(struct iov_iter *iter, u64 start, size_t size)
{
struct vmcoredd_node *dump;
u64 offset = 0;
@@ -263,14 +253,13 @@ static int vmcoredd_copy_dumps(void *dst, u64 start, size_t size, int userbuf)
if (start < offset + dump->size) {
tsz = min(offset + (u64)dump->size - start, (u64)size);
buf = dump->buf + start - offset;
- if (copy_to(dst, buf, tsz, userbuf)) {
+ if (copy_to_iter(buf, tsz, iter) < tsz) {
ret = -EFAULT;
goto out_unlock;
}
size -= tsz;
start += tsz;
- dst += tsz;
/* Leave now if buffer filled already */
if (!size)
@@ -326,33 +315,28 @@ out_unlock:
/* Read from the ELF header and then the crash dump. On error, negative value is
* returned otherwise number of bytes read are returned.
*/
-static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
- int userbuf)
+static ssize_t __read_vmcore(struct iov_iter *iter, loff_t *fpos)
{
ssize_t acc = 0, tmp;
size_t tsz;
u64 start;
struct vmcore *m = NULL;
- if (buflen == 0 || *fpos >= vmcore_size)
+ if (!iov_iter_count(iter) || *fpos >= vmcore_size)
return 0;
- /* trim buflen to not go beyond EOF */
- if (buflen > vmcore_size - *fpos)
- buflen = vmcore_size - *fpos;
+ iov_iter_truncate(iter, vmcore_size - *fpos);
/* Read ELF core header */
if (*fpos < elfcorebuf_sz) {
- tsz = min(elfcorebuf_sz - (size_t)*fpos, buflen);
- if (copy_to(buffer, elfcorebuf + *fpos, tsz, userbuf))
+ tsz = min(elfcorebuf_sz - (size_t)*fpos, iov_iter_count(iter));
+ if (copy_to_iter(elfcorebuf + *fpos, tsz, iter) < tsz)
return -EFAULT;
- buflen -= tsz;
*fpos += tsz;
- buffer += tsz;
acc += tsz;
/* leave now if filled buffer already */
- if (buflen == 0)
+ if (!iov_iter_count(iter))
return acc;
}
@@ -373,35 +357,32 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
/* Read device dumps */
if (*fpos < elfcorebuf_sz + vmcoredd_orig_sz) {
tsz = min(elfcorebuf_sz + vmcoredd_orig_sz -
- (size_t)*fpos, buflen);
+ (size_t)*fpos, iov_iter_count(iter));
start = *fpos - elfcorebuf_sz;
- if (vmcoredd_copy_dumps(buffer, start, tsz, userbuf))
+ if (vmcoredd_copy_dumps(iter, start, tsz))
return -EFAULT;
- buflen -= tsz;
*fpos += tsz;
- buffer += tsz;
acc += tsz;
/* leave now if filled buffer already */
- if (!buflen)
+ if (!iov_iter_count(iter))
return acc;
}
#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
/* Read remaining elf notes */
- tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)*fpos, buflen);
+ tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)*fpos,
+ iov_iter_count(iter));
kaddr = elfnotes_buf + *fpos - elfcorebuf_sz - vmcoredd_orig_sz;
- if (copy_to(buffer, kaddr, tsz, userbuf))
+ if (copy_to_iter(kaddr, tsz, iter) < tsz)
return -EFAULT;
- buflen -= tsz;
*fpos += tsz;
- buffer += tsz;
acc += tsz;
/* leave now if filled buffer already */
- if (buflen == 0)
+ if (!iov_iter_count(iter))
return acc;
}
@@ -409,19 +390,17 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
if (*fpos < m->offset + m->size) {
tsz = (size_t)min_t(unsigned long long,
m->offset + m->size - *fpos,
- buflen);
+ iov_iter_count(iter));
start = m->paddr + *fpos - m->offset;
- tmp = read_from_oldmem(buffer, tsz, &start,
- userbuf, cc_platform_has(CC_ATTR_MEM_ENCRYPT));
+ tmp = read_from_oldmem(iter, tsz, &start,
+ cc_platform_has(CC_ATTR_MEM_ENCRYPT));
if (tmp < 0)
return tmp;
- buflen -= tsz;
*fpos += tsz;
- buffer += tsz;
acc += tsz;
/* leave now if filled buffer already */
- if (buflen == 0)
+ if (!iov_iter_count(iter))
return acc;
}
}
@@ -429,15 +408,14 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
return acc;
}
-static ssize_t read_vmcore(struct file *file, char __user *buffer,
- size_t buflen, loff_t *fpos)
+static ssize_t read_vmcore(struct kiocb *iocb, struct iov_iter *iter)
{
- return __read_vmcore((__force char *) buffer, buflen, fpos, 1);
+ return __read_vmcore(iter, &iocb->ki_pos);
}
/*
* The vmcore fault handler uses the page cache and fills data using the
- * standard __vmcore_read() function.
+ * standard __read_vmcore() function.
*
* On s390 the fault handler is used for memory regions that can't be mapped
* directly with remap_pfn_range().
@@ -447,9 +425,10 @@ static vm_fault_t mmap_vmcore_fault(struct vm_fault *vmf)
#ifdef CONFIG_S390
struct address_space *mapping = vmf->vma->vm_file->f_mapping;
pgoff_t index = vmf->pgoff;
+ struct iov_iter iter;
+ struct kvec kvec;
struct page *page;
loff_t offset;
- char *buf;
int rc;
page = find_or_create_page(mapping, index, GFP_KERNEL);
@@ -457,8 +436,11 @@ static vm_fault_t mmap_vmcore_fault(struct vm_fault *vmf)
return VM_FAULT_OOM;
if (!PageUptodate(page)) {
offset = (loff_t) index << PAGE_SHIFT;
- buf = __va((page_to_pfn(page) << PAGE_SHIFT));
- rc = __read_vmcore(buf, PAGE_SIZE, &offset, 0);
+ kvec.iov_base = page_address(page);
+ kvec.iov_len = PAGE_SIZE;
+ iov_iter_kvec(&iter, READ, &kvec, 1, PAGE_SIZE);
+
+ rc = __read_vmcore(&iter, &offset);
if (rc < 0) {
unlock_page(page);
put_page(page);
@@ -708,7 +690,7 @@ static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
static const struct proc_ops vmcore_proc_ops = {
.proc_open = open_vmcore,
- .proc_read = read_vmcore,
+ .proc_read_iter = read_vmcore,
.proc_lseek = default_llseek,
.proc_mmap = mmap_vmcore,
};
diff --git a/fs/read_write.c b/fs/read_write.c
index e643aec2b0ef..b1b1cdfee9d3 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -682,6 +682,14 @@ SYSCALL_DEFINE4(pread64, unsigned int, fd, char __user *, buf,
return ksys_pread64(fd, buf, count, pos);
}
+#if defined(CONFIG_COMPAT) && defined(__ARCH_WANT_COMPAT_PREAD64)
+COMPAT_SYSCALL_DEFINE5(pread64, unsigned int, fd, char __user *, buf,
+ size_t, count, compat_arg_u64_dual(pos))
+{
+ return ksys_pread64(fd, buf, count, compat_arg_u64_glue(pos));
+}
+#endif
+
ssize_t ksys_pwrite64(unsigned int fd, const char __user *buf,
size_t count, loff_t pos)
{
@@ -708,6 +716,14 @@ SYSCALL_DEFINE4(pwrite64, unsigned int, fd, const char __user *, buf,
return ksys_pwrite64(fd, buf, count, pos);
}
+#if defined(CONFIG_COMPAT) && defined(__ARCH_WANT_COMPAT_PWRITE64)
+COMPAT_SYSCALL_DEFINE5(pwrite64, unsigned int, fd, const char __user *, buf,
+ size_t, count, compat_arg_u64_dual(pos))
+{
+ return ksys_pwrite64(fd, buf, count, compat_arg_u64_glue(pos));
+}
+#endif
+
static ssize_t do_iter_readv_writev(struct file *filp, struct iov_iter *iter,
loff_t *ppos, int type, rwf_t flags)
{
diff --git a/fs/smbfs_common/smb2pdu.h b/fs/smbfs_common/smb2pdu.h
index 0507aecfc669..2cab413fffee 100644
--- a/fs/smbfs_common/smb2pdu.h
+++ b/fs/smbfs_common/smb2pdu.h
@@ -1244,6 +1244,106 @@ struct file_zero_data_information {
__le64 BeyondFinalZero;
} __packed;
+/* See MS-FSCC 2.3.7 */
+struct duplicate_extents_to_file {
+ __u64 PersistentFileHandle; /* source file handle, opaque endianness */
+ __u64 VolatileFileHandle;
+ __le64 SourceFileOffset;
+ __le64 TargetFileOffset;
+ __le64 ByteCount; /* Bytes to be copied */
+} __packed;
+
+/* See MS-FSCC 2.3.8 */
+#define DUPLICATE_EXTENTS_DATA_EX_SOURCE_ATOMIC 0x00000001
+struct duplicate_extents_to_file_ex {
+ __u64 PersistentFileHandle; /* source file handle, opaque endianness */
+ __u64 VolatileFileHandle;
+ __le64 SourceFileOffset;
+ __le64 TargetFileOffset;
+ __le64 ByteCount; /* Bytes to be copied */
+ __le32 Flags;
+ __le32 Reserved;
+} __packed;
+
+
+/* See MS-FSCC 2.3.20 */
+struct fsctl_get_integrity_information_rsp {
+ __le16 ChecksumAlgorithm;
+ __le16 Reserved;
+ __le32 Flags;
+ __le32 ChecksumChunkSizeInBytes;
+ __le32 ClusterSizeInBytes;
+} __packed;
+
+/* See MS-FSCC 2.3.55 */
+struct fsctl_query_file_regions_req {
+ __le64 FileOffset;
+ __le64 Length;
+ __le32 DesiredUsage;
+ __le32 Reserved;
+} __packed;
+
+/* DesiredUsage flags see MS-FSCC 2.3.56.1 */
+#define FILE_USAGE_INVALID_RANGE 0x00000000
+#define FILE_USAGE_VALID_CACHED_DATA 0x00000001
+#define FILE_USAGE_NONCACHED_DATA 0x00000002
+
+struct file_region_info {
+ __le64 FileOffset;
+ __le64 Length;
+ __le32 DesiredUsage;
+ __le32 Reserved;
+} __packed;
+
+/* See MS-FSCC 2.3.56 */
+struct fsctl_query_file_region_rsp {
+ __le32 Flags;
+ __le32 TotalRegionEntryCount;
+ __le32 RegionEntryCount;
+ __u32 Reserved;
+ struct file_region_info Regions[];
+} __packed;
+
+/* See MS-FSCC 2.3.58 */
+struct fsctl_query_on_disk_vol_info_rsp {
+ __le64 DirectoryCount;
+ __le64 FileCount;
+ __le16 FsFormatMajVersion;
+ __le16 FsFormatMinVersion;
+ __u8 FsFormatName[24];
+ __le64 FormatTime;
+ __le64 LastUpdateTime;
+ __u8 CopyrightInfo[68];
+ __u8 AbstractInfo[68];
+ __u8 FormatImplInfo[68];
+ __u8 LastModifyImplInfo[68];
+} __packed;
+
+/* See MS-FSCC 2.3.73 */
+struct fsctl_set_integrity_information_req {
+ __le16 ChecksumAlgorithm;
+ __le16 Reserved;
+ __le32 Flags;
+} __packed;
+
+/* See MS-FSCC 2.3.75 */
+struct fsctl_set_integrity_info_ex_req {
+ __u8 EnableIntegrity;
+ __u8 KeepState;
+ __u16 Reserved;
+ __le32 Flags;
+ __u8 Version;
+ __u8 Reserved2[7];
+} __packed;
+
+/* Integrity ChecksumAlgorithm choices for above */
+#define CHECKSUM_TYPE_NONE 0x0000
+#define CHECKSUM_TYPE_CRC64 0x0002
+#define CHECKSUM_TYPE_UNCHANGED 0xFFFF /* set only */
+
+/* Integrity flags for above */
+#define FSCTL_INTEGRITY_FLAG_CHECKSUM_ENFORCEMENT_OFF 0x00000001
+
/* Reparse structures - see MS-FSCC 2.1.2 */
/* struct fsctl_reparse_info_req is empty, only response structs (see below) */
@@ -1304,13 +1404,6 @@ struct validate_negotiate_info_rsp {
__le16 Dialect; /* Dialect in use for the connection */
} __packed;
-struct duplicate_extents_to_file {
- __u64 PersistentFileHandle; /* source file handle, opaque endianness */
- __u64 VolatileFileHandle;
- __le64 SourceFileOffset;
- __le64 TargetFileOffset;
- __le64 ByteCount; /* Bytes to be copied */
-} __packed;
/* Possible InfoType values */
#define SMB2_O_INFO_FILE 0x01
@@ -1419,6 +1512,7 @@ struct smb2_query_info_rsp {
* PDU query infolevel structure definitions
*/
+/* See MS-FSCC 2.3.52 */
struct file_allocated_range_buffer {
__le64 file_offset;
__le64 length;
diff --git a/fs/smbfs_common/smbfsctl.h b/fs/smbfs_common/smbfsctl.h
index d51939c43ad7..edd7fc2a7921 100644
--- a/fs/smbfs_common/smbfsctl.h
+++ b/fs/smbfs_common/smbfsctl.h
@@ -88,21 +88,27 @@
#define FSCTL_READ_RAW_ENCRYPTED 0x000900E3 /* BB add struct */
#define FSCTL_READ_FILE_USN_DATA 0x000900EB /* BB add struct */
#define FSCTL_WRITE_USN_CLOSE_RECORD 0x000900EF /* BB add struct */
+#define FSCTL_MARK_HANDLE 0x000900FC /* BB add struct */
#define FSCTL_SIS_COPYFILE 0x00090100 /* BB add struct */
#define FSCTL_RECALL_FILE 0x00090117 /* BB add struct */
#define FSCTL_QUERY_SPARING_INFO 0x00090138 /* BB add struct */
+#define FSCTL_QUERY_ON_DISK_VOLUME_INFO 0x0009013C
#define FSCTL_SET_ZERO_ON_DEALLOC 0x00090194 /* BB add struct */
#define FSCTL_SET_SHORT_NAME_BEHAVIOR 0x000901B4 /* BB add struct */
#define FSCTL_GET_INTEGRITY_INFORMATION 0x0009027C
+#define FSCTL_QUERY_FILE_REGIONS 0x00090284
#define FSCTL_GET_REFS_VOLUME_DATA 0x000902D8 /* See MS-FSCC 2.3.24 */
#define FSCTL_SET_INTEGRITY_INFORMATION_EXT 0x00090380
#define FSCTL_GET_RETRIEVAL_POINTERS_AND_REFCOUNT 0x000903d3
#define FSCTL_GET_RETRIEVAL_POINTER_COUNT 0x0009042b
#define FSCTL_REFS_STREAM_SNAPSHOT_MANAGEMENT 0x00090440
#define FSCTL_QUERY_ALLOCATED_RANGES 0x000940CF
+#define FSCTL_OFFLOAD_READ 0x00094264 /* BB add struct */
+#define FSCTL_OFFLOAD_WRITE 0x00098268 /* BB add struct */
#define FSCTL_SET_DEFECT_MANAGEMENT 0x00098134 /* BB add struct */
#define FSCTL_FILE_LEVEL_TRIM 0x00098208 /* BB add struct */
#define FSCTL_DUPLICATE_EXTENTS_TO_FILE 0x00098344
+#define FSCTL_DUPLICATE_EXTENTS_TO_FILE_EX 0x000983E8
#define FSCTL_SIS_LINK_FILES 0x0009C104
#define FSCTL_SET_INTEGRITY_INFORMATION 0x0009C280
#define FSCTL_PIPE_PEEK 0x0011400C /* BB add struct */
diff --git a/fs/stat.c b/fs/stat.c
index 5c2c94464e8b..9ced8860e0f3 100644
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -659,7 +659,7 @@ SYSCALL_DEFINE5(statx,
return ret;
}
-#ifdef CONFIG_COMPAT
+#if defined(CONFIG_COMPAT) && defined(__ARCH_WANT_COMPAT_STAT)
static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
{
struct compat_stat tmp;
diff --git a/fs/sync.c b/fs/sync.c
index c7690016453e..dc725914e1ed 100644
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -373,6 +373,15 @@ SYSCALL_DEFINE4(sync_file_range, int, fd, loff_t, offset, loff_t, nbytes,
return ksys_sync_file_range(fd, offset, nbytes, flags);
}
+#if defined(CONFIG_COMPAT) && defined(__ARCH_WANT_COMPAT_SYNC_FILE_RANGE)
+COMPAT_SYSCALL_DEFINE6(sync_file_range, int, fd, compat_arg_u64_dual(offset),
+ compat_arg_u64_dual(nbytes), unsigned int, flags)
+{
+ return ksys_sync_file_range(fd, compat_arg_u64_glue(offset),
+ compat_arg_u64_glue(nbytes), flags);
+}
+#endif
+
/* It would be nice if people remember that not all the world's an i386
when they introduce new system calls */
SYSCALL_DEFINE4(sync_file_range2, int, fd, unsigned int, flags,
diff --git a/fs/sysv/super.c b/fs/sysv/super.c
index d1def0771a40..3365a30dc1e0 100644
--- a/fs/sysv/super.c
+++ b/fs/sysv/super.c
@@ -312,7 +312,9 @@ static int complete_read_super(struct super_block *sb, int silent, int size)
sbi->s_firstinodezone = 2;
flavour_setup[sbi->s_type](sbi, &sb->s_max_links);
-
+ if (sbi->s_firstdatazone < sbi->s_firstinodezone)
+ return 0;
+
sbi->s_ndatazones = sbi->s_nzones - sbi->s_firstdatazone;
sbi->s_inodes_per_block = bsize >> 6;
sbi->s_inodes_per_block_1 = (bsize >> 6)-1;
diff --git a/fs/ubifs/budget.c b/fs/ubifs/budget.c
index c0b84e960b20..e8b9b756f0ac 100644
--- a/fs/ubifs/budget.c
+++ b/fs/ubifs/budget.c
@@ -65,7 +65,7 @@ static void shrink_liability(struct ubifs_info *c, int nr_to_write)
*/
static int run_gc(struct ubifs_info *c)
{
- int err, lnum;
+ int lnum;
/* Make some free space by garbage-collecting dirty space */
down_read(&c->commit_sem);
@@ -76,10 +76,7 @@ static int run_gc(struct ubifs_info *c)
/* GC freed one LEB, return it to lprops */
dbg_budg("GC freed LEB %d", lnum);
- err = ubifs_return_leb(c, lnum);
- if (err)
- return err;
- return 0;
+ return ubifs_return_leb(c, lnum);
}
/**
diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c
index e4f193eae4b2..e4c4761aff7f 100644
--- a/fs/ubifs/xattr.c
+++ b/fs/ubifs/xattr.c
@@ -677,7 +677,7 @@ int ubifs_init_security(struct inode *dentry, struct inode *inode,
int err;
err = security_inode_init_security(inode, dentry, qstr,
- &init_xattrs, 0);
+ &init_xattrs, NULL);
if (err) {
struct ubifs_info *c = dentry->i_sb->s_fs_info;
ubifs_err(c, "cannot initialize security for inode %lu, error %d",
diff --git a/fs/xfs/libxfs/xfs_ag.c b/fs/xfs/libxfs/xfs_ag.c
index 1e4ee042d52f..3e920cf1b454 100644
--- a/fs/xfs/libxfs/xfs_ag.c
+++ b/fs/xfs/libxfs/xfs_ag.c
@@ -173,7 +173,6 @@ __xfs_free_perag(
struct xfs_perag *pag = container_of(head, struct xfs_perag, rcu_head);
ASSERT(!delayed_work_pending(&pag->pag_blockgc_work));
- ASSERT(atomic_read(&pag->pag_ref) == 0);
kmem_free(pag);
}
@@ -192,7 +191,7 @@ xfs_free_perag(
pag = radix_tree_delete(&mp->m_perag_tree, agno);
spin_unlock(&mp->m_perag_lock);
ASSERT(pag);
- ASSERT(atomic_read(&pag->pag_ref) == 0);
+ XFS_IS_CORRUPT(pag->pag_mount, atomic_read(&pag->pag_ref) != 0);
cancel_delayed_work_sync(&pag->pag_blockgc_work);
xfs_iunlink_destroy(pag);
diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c
index 14ae0826bc15..836ab1b8ed7b 100644
--- a/fs/xfs/libxfs/xfs_attr.c
+++ b/fs/xfs/libxfs/xfs_attr.c
@@ -25,10 +25,9 @@
#include "xfs_trans_space.h"
#include "xfs_trace.h"
#include "xfs_attr_item.h"
-#include "xfs_log.h"
+#include "xfs_xattr.h"
-struct kmem_cache *xfs_attri_cache;
-struct kmem_cache *xfs_attrd_cache;
+struct kmem_cache *xfs_attr_intent_cache;
/*
* xfs_attr.c
@@ -58,11 +57,11 @@ STATIC int xfs_attr_leaf_try_add(struct xfs_da_args *args, struct xfs_buf *bp);
*/
STATIC int xfs_attr_node_get(xfs_da_args_t *args);
STATIC void xfs_attr_restore_rmt_blk(struct xfs_da_args *args);
-static int xfs_attr_node_try_addname(struct xfs_attr_item *attr);
-STATIC int xfs_attr_node_addname_find_attr(struct xfs_attr_item *attr);
-STATIC int xfs_attr_node_remove_attr(struct xfs_attr_item *attr);
-STATIC int xfs_attr_node_hasname(xfs_da_args_t *args,
- struct xfs_da_state **state);
+static int xfs_attr_node_try_addname(struct xfs_attr_intent *attr);
+STATIC int xfs_attr_node_addname_find_attr(struct xfs_attr_intent *attr);
+STATIC int xfs_attr_node_remove_attr(struct xfs_attr_intent *attr);
+STATIC int xfs_attr_node_lookup(struct xfs_da_args *args,
+ struct xfs_da_state *state);
int
xfs_inode_hasattr(
@@ -377,7 +376,7 @@ xfs_attr_try_sf_addname(
static int
xfs_attr_sf_addname(
- struct xfs_attr_item *attr)
+ struct xfs_attr_intent *attr)
{
struct xfs_da_args *args = attr->xattri_da_args;
struct xfs_inode *dp = args->dp;
@@ -423,7 +422,7 @@ out:
*/
static enum xfs_delattr_state
xfs_attr_complete_op(
- struct xfs_attr_item *attr,
+ struct xfs_attr_intent *attr,
enum xfs_delattr_state replace_state)
{
struct xfs_da_args *args = attr->xattri_da_args;
@@ -439,7 +438,7 @@ xfs_attr_complete_op(
static int
xfs_attr_leaf_addname(
- struct xfs_attr_item *attr)
+ struct xfs_attr_intent *attr)
{
struct xfs_da_args *args = attr->xattri_da_args;
int error;
@@ -493,7 +492,7 @@ out:
*/
static int
xfs_attr_node_addname(
- struct xfs_attr_item *attr)
+ struct xfs_attr_intent *attr)
{
struct xfs_da_args *args = attr->xattri_da_args;
int error;
@@ -530,7 +529,7 @@ out:
static int
xfs_attr_rmtval_alloc(
- struct xfs_attr_item *attr)
+ struct xfs_attr_intent *attr)
{
struct xfs_da_args *args = attr->xattri_da_args;
int error = 0;
@@ -594,6 +593,19 @@ xfs_attr_leaf_mark_incomplete(
return xfs_attr3_leaf_setflag(args);
}
+/* Ensure the da state of an xattr deferred work item is ready to go. */
+static inline void
+xfs_attr_item_init_da_state(
+ struct xfs_attr_intent *attr)
+{
+ struct xfs_da_args *args = attr->xattri_da_args;
+
+ if (!attr->xattri_da_state)
+ attr->xattri_da_state = xfs_da_state_alloc(args);
+ else
+ xfs_da_state_reset(attr->xattri_da_state, args);
+}
+
/*
* Initial setup for xfs_attr_node_removename. Make sure the attr is there and
* the blocks are valid. Attr keys with remote blocks will be marked
@@ -601,29 +613,33 @@ xfs_attr_leaf_mark_incomplete(
*/
static
int xfs_attr_node_removename_setup(
- struct xfs_attr_item *attr)
+ struct xfs_attr_intent *attr)
{
struct xfs_da_args *args = attr->xattri_da_args;
- struct xfs_da_state **state = &attr->xattri_da_state;
+ struct xfs_da_state *state;
int error;
- error = xfs_attr_node_hasname(args, state);
+ xfs_attr_item_init_da_state(attr);
+ error = xfs_attr_node_lookup(args, attr->xattri_da_state);
if (error != -EEXIST)
goto out;
error = 0;
- ASSERT((*state)->path.blk[(*state)->path.active - 1].bp != NULL);
- ASSERT((*state)->path.blk[(*state)->path.active - 1].magic ==
+ state = attr->xattri_da_state;
+ ASSERT(state->path.blk[state->path.active - 1].bp != NULL);
+ ASSERT(state->path.blk[state->path.active - 1].magic ==
XFS_ATTR_LEAF_MAGIC);
- error = xfs_attr_leaf_mark_incomplete(args, *state);
+ error = xfs_attr_leaf_mark_incomplete(args, state);
if (error)
goto out;
if (args->rmtblkno > 0)
error = xfs_attr_rmtval_invalidate(args);
out:
- if (error)
- xfs_da_state_free(*state);
+ if (error) {
+ xfs_da_state_free(attr->xattri_da_state);
+ attr->xattri_da_state = NULL;
+ }
return error;
}
@@ -635,7 +651,7 @@ out:
*/
static int
xfs_attr_leaf_remove_attr(
- struct xfs_attr_item *attr)
+ struct xfs_attr_intent *attr)
{
struct xfs_da_args *args = attr->xattri_da_args;
struct xfs_inode *dp = args->dp;
@@ -700,7 +716,7 @@ xfs_attr_leaf_shrink(
*/
int
xfs_attr_set_iter(
- struct xfs_attr_item *attr)
+ struct xfs_attr_intent *attr)
{
struct xfs_da_args *args = attr->xattri_da_args;
int error = 0;
@@ -852,6 +868,7 @@ xfs_attr_lookup(
{
struct xfs_inode *dp = args->dp;
struct xfs_buf *bp = NULL;
+ struct xfs_da_state *state;
int error;
if (!xfs_inode_hasattr(dp))
@@ -869,19 +886,22 @@ xfs_attr_lookup(
return error;
}
- return xfs_attr_node_hasname(args, NULL);
+ state = xfs_da_state_alloc(args);
+ error = xfs_attr_node_lookup(args, state);
+ xfs_da_state_free(state);
+ return error;
}
static int
-xfs_attr_item_init(
+xfs_attr_intent_init(
struct xfs_da_args *args,
unsigned int op_flags, /* op flag (set or remove) */
- struct xfs_attr_item **attr) /* new xfs_attr_item */
+ struct xfs_attr_intent **attr) /* new xfs_attr_intent */
{
- struct xfs_attr_item *new;
+ struct xfs_attr_intent *new;
- new = kmem_zalloc(sizeof(struct xfs_attr_item), KM_NOFS);
+ new = kmem_cache_zalloc(xfs_attr_intent_cache, GFP_NOFS | __GFP_NOFAIL);
new->xattri_op_flags = op_flags;
new->xattri_da_args = args;
@@ -894,10 +914,10 @@ static int
xfs_attr_defer_add(
struct xfs_da_args *args)
{
- struct xfs_attr_item *new;
+ struct xfs_attr_intent *new;
int error = 0;
- error = xfs_attr_item_init(args, XFS_ATTR_OP_FLAGS_SET, &new);
+ error = xfs_attr_intent_init(args, XFS_ATTRI_OP_FLAGS_SET, &new);
if (error)
return error;
@@ -913,10 +933,10 @@ static int
xfs_attr_defer_replace(
struct xfs_da_args *args)
{
- struct xfs_attr_item *new;
+ struct xfs_attr_intent *new;
int error = 0;
- error = xfs_attr_item_init(args, XFS_ATTR_OP_FLAGS_REPLACE, &new);
+ error = xfs_attr_intent_init(args, XFS_ATTRI_OP_FLAGS_REPLACE, &new);
if (error)
return error;
@@ -933,10 +953,10 @@ xfs_attr_defer_remove(
struct xfs_da_args *args)
{
- struct xfs_attr_item *new;
+ struct xfs_attr_intent *new;
int error;
- error = xfs_attr_item_init(args, XFS_ATTR_OP_FLAGS_REMOVE, &new);
+ error = xfs_attr_intent_init(args, XFS_ATTRI_OP_FLAGS_REMOVE, &new);
if (error)
return error;
@@ -962,7 +982,6 @@ xfs_attr_set(
int error, local;
int rmt_blks = 0;
unsigned int total;
- int delayed = xfs_has_larp(mp);
if (xfs_is_shutdown(dp->i_mount))
return -EIO;
@@ -1007,12 +1026,6 @@ xfs_attr_set(
rmt_blks = xfs_attr3_rmt_blocks(mp, XFS_XATTR_SIZE_MAX);
}
- if (delayed) {
- error = xfs_attr_use_log_assist(mp);
- if (error)
- return error;
- }
-
/*
* Root fork attributes can use reserved data blocks for this
* operation if necessary
@@ -1020,7 +1033,7 @@ xfs_attr_set(
xfs_init_attr_trans(args, &tres, &total);
error = xfs_trans_alloc_inode(dp, &tres, total, 0, rsvd, &args->trans);
if (error)
- goto drop_incompat;
+ return error;
if (args->value || xfs_inode_hasattr(dp)) {
error = xfs_iext_count_may_overflow(dp, XFS_ATTR_FORK,
@@ -1080,9 +1093,6 @@ xfs_attr_set(
error = xfs_trans_commit(args->trans);
out_unlock:
xfs_iunlock(dp, XFS_ILOCK_EXCL);
-drop_incompat:
- if (delayed)
- xlog_drop_incompat_feat(mp->m_log);
return error;
out_trans_cancel:
@@ -1091,40 +1101,6 @@ out_trans_cancel:
goto out_unlock;
}
-int __init
-xfs_attri_init_cache(void)
-{
- xfs_attri_cache = kmem_cache_create("xfs_attri",
- sizeof(struct xfs_attri_log_item),
- 0, 0, NULL);
-
- return xfs_attri_cache != NULL ? 0 : -ENOMEM;
-}
-
-void
-xfs_attri_destroy_cache(void)
-{
- kmem_cache_destroy(xfs_attri_cache);
- xfs_attri_cache = NULL;
-}
-
-int __init
-xfs_attrd_init_cache(void)
-{
- xfs_attrd_cache = kmem_cache_create("xfs_attrd",
- sizeof(struct xfs_attrd_log_item),
- 0, 0, NULL);
-
- return xfs_attrd_cache != NULL ? 0 : -ENOMEM;
-}
-
-void
-xfs_attrd_destroy_cache(void)
-{
- kmem_cache_destroy(xfs_attrd_cache);
- xfs_attrd_cache = NULL;
-}
-
/*========================================================================
* External routines when attribute list is inside the inode
*========================================================================*/
@@ -1384,32 +1360,20 @@ xfs_attr_leaf_get(xfs_da_args_t *args)
return error;
}
-/*
- * Return EEXIST if attr is found, or ENOATTR if not
- * statep: If not null is set to point at the found state. Caller will
- * be responsible for freeing the state in this case.
- */
+/* Return EEXIST if attr is found, or ENOATTR if not. */
STATIC int
-xfs_attr_node_hasname(
+xfs_attr_node_lookup(
struct xfs_da_args *args,
- struct xfs_da_state **statep)
+ struct xfs_da_state *state)
{
- struct xfs_da_state *state;
int retval, error;
- state = xfs_da_state_alloc(args);
- if (statep != NULL)
- *statep = state;
-
/*
* Search to see if name exists, and get back a pointer to it.
*/
error = xfs_da3_node_lookup_int(state, &retval);
if (error)
- retval = error;
-
- if (!statep)
- xfs_da_state_free(state);
+ return error;
return retval;
}
@@ -1420,7 +1384,7 @@ xfs_attr_node_hasname(
STATIC int
xfs_attr_node_addname_find_attr(
- struct xfs_attr_item *attr)
+ struct xfs_attr_intent *attr)
{
struct xfs_da_args *args = attr->xattri_da_args;
int error;
@@ -1429,7 +1393,8 @@ xfs_attr_node_addname_find_attr(
* Search to see if name already exists, and get back a pointer
* to where it should go.
*/
- error = xfs_attr_node_hasname(args, &attr->xattri_da_state);
+ xfs_attr_item_init_da_state(attr);
+ error = xfs_attr_node_lookup(args, attr->xattri_da_state);
switch (error) {
case -ENOATTR:
if (args->op_flags & XFS_DA_OP_REPLACE)
@@ -1456,8 +1421,10 @@ xfs_attr_node_addname_find_attr(
return 0;
error:
- if (attr->xattri_da_state)
+ if (attr->xattri_da_state) {
xfs_da_state_free(attr->xattri_da_state);
+ attr->xattri_da_state = NULL;
+ }
return error;
}
@@ -1470,7 +1437,7 @@ error:
*/
static int
xfs_attr_node_try_addname(
- struct xfs_attr_item *attr)
+ struct xfs_attr_intent *attr)
{
struct xfs_da_args *args = attr->xattri_da_args;
struct xfs_da_state *state = attr->xattri_da_state;
@@ -1511,6 +1478,7 @@ xfs_attr_node_try_addname(
out:
xfs_da_state_free(state);
+ attr->xattri_da_state = NULL;
return error;
}
@@ -1535,10 +1503,10 @@ xfs_attr_node_removename(
static int
xfs_attr_node_remove_attr(
- struct xfs_attr_item *attr)
+ struct xfs_attr_intent *attr)
{
struct xfs_da_args *args = attr->xattri_da_args;
- struct xfs_da_state *state = NULL;
+ struct xfs_da_state *state = xfs_da_state_alloc(args);
int retval = 0;
int error = 0;
@@ -1548,8 +1516,6 @@ xfs_attr_node_remove_attr(
* attribute entry after any split ops.
*/
args->attr_filter |= XFS_ATTR_INCOMPLETE;
- state = xfs_da_state_alloc(args);
- state->inleaf = 0;
error = xfs_da3_node_lookup_int(state, &retval);
if (error)
goto out;
@@ -1567,8 +1533,7 @@ xfs_attr_node_remove_attr(
retval = error = 0;
out:
- if (state)
- xfs_da_state_free(state);
+ xfs_da_state_free(state);
if (error)
return error;
return retval;
@@ -1597,7 +1562,8 @@ xfs_attr_node_get(
/*
* Search to see if name exists, and get back a pointer to it.
*/
- error = xfs_attr_node_hasname(args, &state);
+ state = xfs_da_state_alloc(args);
+ error = xfs_attr_node_lookup(args, state);
if (error != -EEXIST)
goto out_release;
@@ -1616,8 +1582,7 @@ out_release:
state->path.blk[i].bp = NULL;
}
- if (state)
- xfs_da_state_free(state);
+ xfs_da_state_free(state);
return error;
}
@@ -1637,3 +1602,20 @@ xfs_attr_namecheck(
/* There shouldn't be any nulls here */
return !memchr(name, 0, length);
}
+
+int __init
+xfs_attr_intent_init_cache(void)
+{
+ xfs_attr_intent_cache = kmem_cache_create("xfs_attr_intent",
+ sizeof(struct xfs_attr_intent),
+ 0, 0, NULL);
+
+ return xfs_attr_intent_cache != NULL ? 0 : -ENOMEM;
+}
+
+void
+xfs_attr_intent_destroy_cache(void)
+{
+ kmem_cache_destroy(xfs_attr_intent_cache);
+ xfs_attr_intent_cache = NULL;
+}
diff --git a/fs/xfs/libxfs/xfs_attr.h b/fs/xfs/libxfs/xfs_attr.h
index 1af7abe29eef..e329da3e7afa 100644
--- a/fs/xfs/libxfs/xfs_attr.h
+++ b/fs/xfs/libxfs/xfs_attr.h
@@ -31,7 +31,8 @@ struct xfs_attr_list_context;
static inline bool xfs_has_larp(struct xfs_mount *mp)
{
#ifdef DEBUG
- return xfs_globals.larp;
+ /* Logged xattrs require a V5 super for log_incompat */
+ return xfs_has_crc(mp) && xfs_globals.larp;
#else
return false;
#endif
@@ -434,7 +435,7 @@ struct xfs_attr_list_context {
*/
/*
- * Enum values for xfs_attr_item.xattri_da_state
+ * Enum values for xfs_attr_intent.xattri_da_state
*
* These values are used by delayed attribute operations to keep track of where
* they were before they returned -EAGAIN. A return code of -EAGAIN signals the
@@ -501,44 +502,46 @@ enum xfs_delattr_state {
{ XFS_DAS_NODE_REMOVE_ATTR, "XFS_DAS_NODE_REMOVE_ATTR" }, \
{ XFS_DAS_DONE, "XFS_DAS_DONE" }
-/*
- * Defines for xfs_attr_item.xattri_flags
- */
-#define XFS_DAC_LEAF_ADDNAME_INIT 0x01 /* xfs_attr_leaf_addname init*/
+struct xfs_attri_log_nameval;
/*
* Context used for keeping track of delayed attribute operations
*/
-struct xfs_attr_item {
+struct xfs_attr_intent {
+ /*
+ * used to log this item to an intent containing a list of attrs to
+ * commit later
+ */
+ struct list_head xattri_list;
+
+ /* Used in xfs_attr_node_removename to roll through removing blocks */
+ struct xfs_da_state *xattri_da_state;
+
struct xfs_da_args *xattri_da_args;
/*
+ * Shared buffer containing the attr name and value so that the logging
+ * code can share large memory buffers between log items.
+ */
+ struct xfs_attri_log_nameval *xattri_nameval;
+
+ /*
* Used by xfs_attr_set to hold a leaf buffer across a transaction roll
*/
struct xfs_buf *xattri_leaf_bp;
- /* Used in xfs_attr_rmtval_set_blk to roll through allocating blocks */
- struct xfs_bmbt_irec xattri_map;
- xfs_dablk_t xattri_lblkno;
- int xattri_blkcnt;
-
- /* Used in xfs_attr_node_removename to roll through removing blocks */
- struct xfs_da_state *xattri_da_state;
-
/* Used to keep track of current state of delayed operation */
- unsigned int xattri_flags;
enum xfs_delattr_state xattri_dela_state;
/*
- * Attr operation being performed - XFS_ATTR_OP_FLAGS_*
+ * Attr operation being performed - XFS_ATTRI_OP_FLAGS_*
*/
unsigned int xattri_op_flags;
- /*
- * used to log this item to an intent containing a list of attrs to
- * commit later
- */
- struct list_head xattri_list;
+ /* Used in xfs_attr_rmtval_set_blk to roll through allocating blocks */
+ xfs_dablk_t xattri_lblkno;
+ int xattri_blkcnt;
+ struct xfs_bmbt_irec xattri_map;
};
@@ -557,21 +560,13 @@ bool xfs_attr_is_leaf(struct xfs_inode *ip);
int xfs_attr_get_ilocked(struct xfs_da_args *args);
int xfs_attr_get(struct xfs_da_args *args);
int xfs_attr_set(struct xfs_da_args *args);
-int xfs_attr_set_iter(struct xfs_attr_item *attr);
-int xfs_attr_remove_iter(struct xfs_attr_item *attr);
+int xfs_attr_set_iter(struct xfs_attr_intent *attr);
+int xfs_attr_remove_iter(struct xfs_attr_intent *attr);
bool xfs_attr_namecheck(const void *name, size_t length);
int xfs_attr_calc_size(struct xfs_da_args *args, int *local);
void xfs_init_attr_trans(struct xfs_da_args *args, struct xfs_trans_res *tres,
unsigned int *total);
-extern struct kmem_cache *xfs_attri_cache;
-extern struct kmem_cache *xfs_attrd_cache;
-
-int __init xfs_attri_init_cache(void);
-void xfs_attri_destroy_cache(void);
-int __init xfs_attrd_init_cache(void);
-void xfs_attrd_destroy_cache(void);
-
/*
* Check to see if the attr should be upgraded from non-existent or shortform to
* single-leaf-block attribute list.
@@ -634,4 +629,8 @@ xfs_attr_init_replace_state(struct xfs_da_args *args)
return xfs_attr_init_add_state(args);
}
+extern struct kmem_cache *xfs_attr_intent_cache;
+int __init xfs_attr_intent_init_cache(void);
+void xfs_attr_intent_destroy_cache(void);
+
#endif /* __XFS_ATTR_H__ */
diff --git a/fs/xfs/libxfs/xfs_attr_remote.c b/fs/xfs/libxfs/xfs_attr_remote.c
index 4250159ecced..7298c148f848 100644
--- a/fs/xfs/libxfs/xfs_attr_remote.c
+++ b/fs/xfs/libxfs/xfs_attr_remote.c
@@ -568,7 +568,7 @@ xfs_attr_rmtval_stale(
*/
int
xfs_attr_rmtval_find_space(
- struct xfs_attr_item *attr)
+ struct xfs_attr_intent *attr)
{
struct xfs_da_args *args = attr->xattri_da_args;
struct xfs_bmbt_irec *map = &attr->xattri_map;
@@ -598,7 +598,7 @@ xfs_attr_rmtval_find_space(
*/
int
xfs_attr_rmtval_set_blk(
- struct xfs_attr_item *attr)
+ struct xfs_attr_intent *attr)
{
struct xfs_da_args *args = attr->xattri_da_args;
struct xfs_inode *dp = args->dp;
@@ -674,7 +674,7 @@ xfs_attr_rmtval_invalidate(
*/
int
xfs_attr_rmtval_remove(
- struct xfs_attr_item *attr)
+ struct xfs_attr_intent *attr)
{
struct xfs_da_args *args = attr->xattri_da_args;
int error, done;
diff --git a/fs/xfs/libxfs/xfs_attr_remote.h b/fs/xfs/libxfs/xfs_attr_remote.h
index 62b398edec3f..d097ec6c4dc3 100644
--- a/fs/xfs/libxfs/xfs_attr_remote.h
+++ b/fs/xfs/libxfs/xfs_attr_remote.h
@@ -12,9 +12,9 @@ int xfs_attr_rmtval_get(struct xfs_da_args *args);
int xfs_attr_rmtval_stale(struct xfs_inode *ip, struct xfs_bmbt_irec *map,
xfs_buf_flags_t incore_flags);
int xfs_attr_rmtval_invalidate(struct xfs_da_args *args);
-int xfs_attr_rmtval_remove(struct xfs_attr_item *attr);
+int xfs_attr_rmtval_remove(struct xfs_attr_intent *attr);
int xfs_attr_rmt_find_hole(struct xfs_da_args *args);
int xfs_attr_rmtval_set_value(struct xfs_da_args *args);
-int xfs_attr_rmtval_set_blk(struct xfs_attr_item *attr);
-int xfs_attr_rmtval_find_space(struct xfs_attr_item *attr);
+int xfs_attr_rmtval_set_blk(struct xfs_attr_intent *attr);
+int xfs_attr_rmtval_find_space(struct xfs_attr_intent *attr);
#endif /* __XFS_ATTR_REMOTE_H__ */
diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c
index 2aa300f7461f..2eecc49fc1b2 100644
--- a/fs/xfs/libxfs/xfs_btree.c
+++ b/fs/xfs/libxfs/xfs_btree.c
@@ -51,16 +51,31 @@ xfs_btree_magic(
return magic;
}
-static xfs_failaddr_t
+/*
+ * These sibling pointer checks are optimised for null sibling pointers. This
+ * happens a lot, and we don't need to byte swap at runtime if the sibling
+ * pointer is NULL.
+ *
+ * These are explicitly marked at inline because the cost of calling them as
+ * functions instead of inlining them is about 36 bytes extra code per call site
+ * on x86-64. Yes, gcc-11 fails to inline them, and explicit inlining of these
+ * two sibling check functions reduces the compiled code size by over 300
+ * bytes.
+ */
+static inline xfs_failaddr_t
xfs_btree_check_lblock_siblings(
struct xfs_mount *mp,
struct xfs_btree_cur *cur,
int level,
xfs_fsblock_t fsb,
- xfs_fsblock_t sibling)
+ __be64 dsibling)
{
- if (sibling == NULLFSBLOCK)
+ xfs_fsblock_t sibling;
+
+ if (dsibling == cpu_to_be64(NULLFSBLOCK))
return NULL;
+
+ sibling = be64_to_cpu(dsibling);
if (sibling == fsb)
return __this_address;
if (level >= 0) {
@@ -74,17 +89,21 @@ xfs_btree_check_lblock_siblings(
return NULL;
}
-static xfs_failaddr_t
+static inline xfs_failaddr_t
xfs_btree_check_sblock_siblings(
struct xfs_mount *mp,
struct xfs_btree_cur *cur,
int level,
xfs_agnumber_t agno,
xfs_agblock_t agbno,
- xfs_agblock_t sibling)
+ __be32 dsibling)
{
- if (sibling == NULLAGBLOCK)
+ xfs_agblock_t sibling;
+
+ if (dsibling == cpu_to_be32(NULLAGBLOCK))
return NULL;
+
+ sibling = be32_to_cpu(dsibling);
if (sibling == agbno)
return __this_address;
if (level >= 0) {
@@ -136,10 +155,10 @@ __xfs_btree_check_lblock(
fsb = XFS_DADDR_TO_FSB(mp, xfs_buf_daddr(bp));
fa = xfs_btree_check_lblock_siblings(mp, cur, level, fsb,
- be64_to_cpu(block->bb_u.l.bb_leftsib));
+ block->bb_u.l.bb_leftsib);
if (!fa)
fa = xfs_btree_check_lblock_siblings(mp, cur, level, fsb,
- be64_to_cpu(block->bb_u.l.bb_rightsib));
+ block->bb_u.l.bb_rightsib);
return fa;
}
@@ -204,10 +223,10 @@ __xfs_btree_check_sblock(
}
fa = xfs_btree_check_sblock_siblings(mp, cur, level, agno, agbno,
- be32_to_cpu(block->bb_u.s.bb_leftsib));
+ block->bb_u.s.bb_leftsib);
if (!fa)
fa = xfs_btree_check_sblock_siblings(mp, cur, level, agno,
- agbno, be32_to_cpu(block->bb_u.s.bb_rightsib));
+ agbno, block->bb_u.s.bb_rightsib);
return fa;
}
@@ -426,8 +445,14 @@ xfs_btree_del_cursor(
break;
}
+ /*
+ * If we are doing a BMBT update, the number of unaccounted blocks
+ * allocated during this cursor life time should be zero. If it's not
+ * zero, then we should be shut down or on our way to shutdown due to
+ * cancelling a dirty transaction on error.
+ */
ASSERT(cur->bc_btnum != XFS_BTNUM_BMAP || cur->bc_ino.allocated == 0 ||
- xfs_is_shutdown(cur->bc_mp));
+ xfs_is_shutdown(cur->bc_mp) || error != 0);
if (unlikely(cur->bc_flags & XFS_BTREE_STAGING))
kmem_free(cur->bc_ops);
if (!(cur->bc_flags & XFS_BTREE_LONG_PTRS) && cur->bc_ag.pag)
@@ -3247,7 +3272,7 @@ xfs_btree_insrec(
struct xfs_btree_block *block; /* btree block */
struct xfs_buf *bp; /* buffer for block */
union xfs_btree_ptr nptr; /* new block ptr */
- struct xfs_btree_cur *ncur; /* new btree cursor */
+ struct xfs_btree_cur *ncur = NULL; /* new btree cursor */
union xfs_btree_key nkey; /* new block key */
union xfs_btree_key *lkey;
int optr; /* old key/record index */
@@ -3327,7 +3352,7 @@ xfs_btree_insrec(
#ifdef DEBUG
error = xfs_btree_check_block(cur, block, level, bp);
if (error)
- return error;
+ goto error0;
#endif
/*
@@ -3347,7 +3372,7 @@ xfs_btree_insrec(
for (i = numrecs - ptr; i >= 0; i--) {
error = xfs_btree_debug_check_ptr(cur, pp, i, level);
if (error)
- return error;
+ goto error0;
}
xfs_btree_shift_keys(cur, kp, 1, numrecs - ptr + 1);
@@ -3432,6 +3457,8 @@ xfs_btree_insrec(
return 0;
error0:
+ if (ncur)
+ xfs_btree_del_cursor(ncur, error);
return error;
}
@@ -4523,10 +4550,10 @@ xfs_btree_lblock_verify(
/* sibling pointer verification */
fsb = XFS_DADDR_TO_FSB(mp, xfs_buf_daddr(bp));
fa = xfs_btree_check_lblock_siblings(mp, NULL, -1, fsb,
- be64_to_cpu(block->bb_u.l.bb_leftsib));
+ block->bb_u.l.bb_leftsib);
if (!fa)
fa = xfs_btree_check_lblock_siblings(mp, NULL, -1, fsb,
- be64_to_cpu(block->bb_u.l.bb_rightsib));
+ block->bb_u.l.bb_rightsib);
return fa;
}
@@ -4580,10 +4607,10 @@ xfs_btree_sblock_verify(
agno = xfs_daddr_to_agno(mp, xfs_buf_daddr(bp));
agbno = xfs_daddr_to_agbno(mp, xfs_buf_daddr(bp));
fa = xfs_btree_check_sblock_siblings(mp, NULL, -1, agno, agbno,
- be32_to_cpu(block->bb_u.s.bb_leftsib));
+ block->bb_u.s.bb_leftsib);
if (!fa)
fa = xfs_btree_check_sblock_siblings(mp, NULL, -1, agno, agbno,
- be32_to_cpu(block->bb_u.s.bb_rightsib));
+ block->bb_u.s.bb_rightsib);
return fa;
}
diff --git a/fs/xfs/libxfs/xfs_da_btree.c b/fs/xfs/libxfs/xfs_da_btree.c
index aa74f3fdb571..e7201dc68f43 100644
--- a/fs/xfs/libxfs/xfs_da_btree.c
+++ b/fs/xfs/libxfs/xfs_da_btree.c
@@ -117,6 +117,17 @@ xfs_da_state_free(xfs_da_state_t *state)
kmem_cache_free(xfs_da_state_cache, state);
}
+void
+xfs_da_state_reset(
+ struct xfs_da_state *state,
+ struct xfs_da_args *args)
+{
+ xfs_da_state_kill_altpath(state);
+ memset(state, 0, sizeof(struct xfs_da_state));
+ state->args = args;
+ state->mp = state->args->dp->i_mount;
+}
+
static inline int xfs_dabuf_nfsb(struct xfs_mount *mp, int whichfork)
{
if (whichfork == XFS_DATA_FORK)
diff --git a/fs/xfs/libxfs/xfs_da_btree.h b/fs/xfs/libxfs/xfs_da_btree.h
index ed2303e4d46a..d33b7686a0b3 100644
--- a/fs/xfs/libxfs/xfs_da_btree.h
+++ b/fs/xfs/libxfs/xfs_da_btree.h
@@ -225,6 +225,7 @@ enum xfs_dacmp xfs_da_compname(struct xfs_da_args *args,
struct xfs_da_state *xfs_da_state_alloc(struct xfs_da_args *args);
void xfs_da_state_free(xfs_da_state_t *state);
+void xfs_da_state_reset(struct xfs_da_state *state, struct xfs_da_args *args);
void xfs_da3_node_hdr_from_disk(struct xfs_mount *mp,
struct xfs_da3_icnode_hdr *to, struct xfs_da_intnode *from);
diff --git a/fs/xfs/libxfs/xfs_defer.c b/fs/xfs/libxfs/xfs_defer.c
index ceb222b4f261..5a321b783398 100644
--- a/fs/xfs/libxfs/xfs_defer.c
+++ b/fs/xfs/libxfs/xfs_defer.c
@@ -191,35 +191,56 @@ static const struct xfs_defer_op_type *defer_op_types[] = {
[XFS_DEFER_OPS_TYPE_ATTR] = &xfs_attr_defer_type,
};
-static bool
+/*
+ * Ensure there's a log intent item associated with this deferred work item if
+ * the operation must be restarted on crash. Returns 1 if there's a log item;
+ * 0 if there isn't; or a negative errno.
+ */
+static int
xfs_defer_create_intent(
struct xfs_trans *tp,
struct xfs_defer_pending *dfp,
bool sort)
{
const struct xfs_defer_op_type *ops = defer_op_types[dfp->dfp_type];
+ struct xfs_log_item *lip;
+
+ if (dfp->dfp_intent)
+ return 1;
- if (!dfp->dfp_intent)
- dfp->dfp_intent = ops->create_intent(tp, &dfp->dfp_work,
- dfp->dfp_count, sort);
- return dfp->dfp_intent != NULL;
+ lip = ops->create_intent(tp, &dfp->dfp_work, dfp->dfp_count, sort);
+ if (!lip)
+ return 0;
+ if (IS_ERR(lip))
+ return PTR_ERR(lip);
+
+ dfp->dfp_intent = lip;
+ return 1;
}
/*
* For each pending item in the intake list, log its intent item and the
* associated extents, then add the entire intake list to the end of
* the pending list.
+ *
+ * Returns 1 if at least one log item was associated with the deferred work;
+ * 0 if there are no log items; or a negative errno.
*/
-static bool
+static int
xfs_defer_create_intents(
struct xfs_trans *tp)
{
struct xfs_defer_pending *dfp;
- bool ret = false;
+ int ret = 0;
list_for_each_entry(dfp, &tp->t_dfops, dfp_list) {
+ int ret2;
+
trace_xfs_defer_create_intent(tp->t_mountp, dfp);
- ret |= xfs_defer_create_intent(tp, dfp, true);
+ ret2 = xfs_defer_create_intent(tp, dfp, true);
+ if (ret2 < 0)
+ return ret2;
+ ret |= ret2;
}
return ret;
}
@@ -457,6 +478,8 @@ xfs_defer_finish_one(
dfp->dfp_count--;
error = ops->finish_item(tp, dfp->dfp_done, li, &state);
if (error == -EAGAIN) {
+ int ret;
+
/*
* Caller wants a fresh transaction; put the work item
* back on the list and log a new log intent item to
@@ -467,7 +490,9 @@ xfs_defer_finish_one(
dfp->dfp_count++;
dfp->dfp_done = NULL;
dfp->dfp_intent = NULL;
- xfs_defer_create_intent(tp, dfp, false);
+ ret = xfs_defer_create_intent(tp, dfp, false);
+ if (ret < 0)
+ error = ret;
}
if (error)
@@ -514,10 +539,14 @@ xfs_defer_finish_noroll(
* of time that any one intent item can stick around in memory,
* pinning the log tail.
*/
- bool has_intents = xfs_defer_create_intents(*tp);
+ int has_intents = xfs_defer_create_intents(*tp);
list_splice_init(&(*tp)->t_dfops, &dop_pending);
+ if (has_intents < 0) {
+ error = has_intents;
+ goto out_shutdown;
+ }
if (has_intents || dfp) {
error = xfs_defer_trans_roll(tp);
if (error)
@@ -676,13 +705,15 @@ xfs_defer_ops_capture(
if (list_empty(&tp->t_dfops))
return NULL;
+ error = xfs_defer_create_intents(tp);
+ if (error < 0)
+ return ERR_PTR(error);
+
/* Create an object to capture the defer ops. */
dfc = kmem_zalloc(sizeof(*dfc), KM_NOFS);
INIT_LIST_HEAD(&dfc->dfc_list);
INIT_LIST_HEAD(&dfc->dfc_dfops);
- xfs_defer_create_intents(tp);
-
/* Move the dfops chain and transaction state to the capture struct. */
list_splice_init(&tp->t_dfops, &dfc->dfc_dfops);
dfc->dfc_tpflags = tp->t_flags & XFS_TRANS_LOWMODE;
@@ -759,6 +790,10 @@ xfs_defer_ops_capture_and_commit(
/* If we don't capture anything, commit transaction and exit. */
dfc = xfs_defer_ops_capture(tp);
+ if (IS_ERR(dfc)) {
+ xfs_trans_cancel(tp);
+ return PTR_ERR(dfc);
+ }
if (!dfc)
return xfs_trans_commit(tp);
@@ -873,10 +908,7 @@ xfs_defer_init_item_caches(void)
error = xfs_extfree_intent_init_cache();
if (error)
goto err;
- error = xfs_attri_init_cache();
- if (error)
- goto err;
- error = xfs_attrd_init_cache();
+ error = xfs_attr_intent_init_cache();
if (error)
goto err;
return 0;
@@ -889,8 +921,7 @@ err:
void
xfs_defer_destroy_item_caches(void)
{
- xfs_attri_destroy_cache();
- xfs_attrd_destroy_cache();
+ xfs_attr_intent_destroy_cache();
xfs_extfree_intent_destroy_cache();
xfs_bmap_intent_destroy_cache();
xfs_refcount_intent_destroy_cache();
diff --git a/fs/xfs/libxfs/xfs_log_format.h b/fs/xfs/libxfs/xfs_log_format.h
index f7edd1ecf6d9..b351b9dc6561 100644
--- a/fs/xfs/libxfs/xfs_log_format.h
+++ b/fs/xfs/libxfs/xfs_log_format.h
@@ -906,10 +906,18 @@ struct xfs_icreate_log {
* Flags for deferred attribute operations.
* Upper bits are flags, lower byte is type code
*/
-#define XFS_ATTR_OP_FLAGS_SET 1 /* Set the attribute */
-#define XFS_ATTR_OP_FLAGS_REMOVE 2 /* Remove the attribute */
-#define XFS_ATTR_OP_FLAGS_REPLACE 3 /* Replace the attribute */
-#define XFS_ATTR_OP_FLAGS_TYPE_MASK 0xFF /* Flags type mask */
+#define XFS_ATTRI_OP_FLAGS_SET 1 /* Set the attribute */
+#define XFS_ATTRI_OP_FLAGS_REMOVE 2 /* Remove the attribute */
+#define XFS_ATTRI_OP_FLAGS_REPLACE 3 /* Replace the attribute */
+#define XFS_ATTRI_OP_FLAGS_TYPE_MASK 0xFF /* Flags type mask */
+
+/*
+ * alfi_attr_filter captures the state of xfs_da_args.attr_filter, so it should
+ * never have any other bits set.
+ */
+#define XFS_ATTRI_FILTER_MASK (XFS_ATTR_ROOT | \
+ XFS_ATTR_SECURE | \
+ XFS_ATTR_INCOMPLETE)
/*
* This is the structure used to lay out an attr log item in the
@@ -924,7 +932,7 @@ struct xfs_attri_log_format {
uint32_t alfi_op_flags; /* marks the op as a set or remove */
uint32_t alfi_name_len; /* attr name length */
uint32_t alfi_value_len; /* attr value length */
- uint32_t alfi_attr_flags;/* attr flags */
+ uint32_t alfi_attr_filter;/* attr filter flags */
};
struct xfs_attrd_log_format {
diff --git a/fs/xfs/libxfs/xfs_log_recover.h b/fs/xfs/libxfs/xfs_log_recover.h
index 32e216255cb0..2420865f3007 100644
--- a/fs/xfs/libxfs/xfs_log_recover.h
+++ b/fs/xfs/libxfs/xfs_log_recover.h
@@ -110,12 +110,6 @@ struct xlog_recover {
#define ITEM_TYPE(i) (*(unsigned short *)(i)->ri_buf[0].i_addr)
-/*
- * This is the number of entries in the l_buf_cancel_table used during
- * recovery.
- */
-#define XLOG_BC_TABLE_SIZE 64
-
#define XLOG_RECOVER_CRCPASS 0
#define XLOG_RECOVER_PASS1 1
#define XLOG_RECOVER_PASS2 2
@@ -128,5 +122,13 @@ int xlog_recover_iget(struct xfs_mount *mp, xfs_ino_t ino,
struct xfs_inode **ipp);
void xlog_recover_release_intent(struct xlog *log, unsigned short intent_type,
uint64_t intent_id);
+int xlog_alloc_buf_cancel_table(struct xlog *log);
+void xlog_free_buf_cancel_table(struct xlog *log);
+
+#ifdef DEBUG
+void xlog_check_buf_cancel_table(struct xlog *log);
+#else
+#define xlog_check_buf_cancel_table(log) do { } while (0)
+#endif
#endif /* __XFS_LOG_RECOVER_H__ */
diff --git a/fs/xfs/libxfs/xfs_symlink_remote.c b/fs/xfs/libxfs/xfs_symlink_remote.c
index f0b38f4aba80..8b9bd178a487 100644
--- a/fs/xfs/libxfs/xfs_symlink_remote.c
+++ b/fs/xfs/libxfs/xfs_symlink_remote.c
@@ -213,7 +213,7 @@ xfs_symlink_shortform_verify(
/*
* Zero length symlinks should never occur in memory as they are
- * never alllowed to exist on disk.
+ * never allowed to exist on disk.
*/
if (!size)
return __this_address;
diff --git a/fs/xfs/scrub/scrub.c b/fs/xfs/scrub/scrub.c
index b11870d07c56..2e8e400f10a9 100644
--- a/fs/xfs/scrub/scrub.c
+++ b/fs/xfs/scrub/scrub.c
@@ -340,20 +340,6 @@ static const struct xchk_meta_ops meta_scrub_ops[] = {
},
};
-/* This isn't a stable feature, warn once per day. */
-static inline void
-xchk_experimental_warning(
- struct xfs_mount *mp)
-{
- static struct ratelimit_state scrub_warning = RATELIMIT_STATE_INIT(
- "xchk_warning", 86400 * HZ, 1);
- ratelimit_set_flags(&scrub_warning, RATELIMIT_MSG_ON_RELEASE);
-
- if (__ratelimit(&scrub_warning))
- xfs_alert(mp,
-"EXPERIMENTAL online scrub feature in use. Use at your own risk!");
-}
-
static int
xchk_validate_inputs(
struct xfs_mount *mp,
@@ -478,7 +464,8 @@ xfs_scrub_metadata(
if (error)
goto out;
- xchk_experimental_warning(mp);
+ xfs_warn_mount(mp, XFS_OPSTATE_WARNED_SCRUB,
+ "EXPERIMENTAL online scrub feature in use. Use at your own risk!");
sc = kmem_zalloc(sizeof(struct xfs_scrub), KM_NOFS | KM_MAYFAIL);
if (!sc) {
diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c
index 3df9c1782ead..b744c62052b6 100644
--- a/fs/xfs/xfs_acl.c
+++ b/fs/xfs/xfs_acl.c
@@ -17,6 +17,7 @@
#include "xfs_error.h"
#include "xfs_acl.h"
#include "xfs_trans.h"
+#include "xfs_xattr.h"
#include <linux/posix_acl_xattr.h>
@@ -202,7 +203,7 @@ __xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
xfs_acl_to_disk(args.value, acl);
}
- error = xfs_attr_set(&args);
+ error = xfs_attr_change(&args);
kmem_free(args.value);
/*
diff --git a/fs/xfs/xfs_attr_item.c b/fs/xfs/xfs_attr_item.c
index e8ac88d9fd14..4a28c2d77070 100644
--- a/fs/xfs/xfs_attr_item.c
+++ b/fs/xfs/xfs_attr_item.c
@@ -22,13 +22,15 @@
#include "xfs_attr.h"
#include "xfs_attr_item.h"
#include "xfs_trace.h"
-#include "xfs_inode.h"
#include "xfs_trans_space.h"
#include "xfs_errortag.h"
#include "xfs_error.h"
#include "xfs_log_priv.h"
#include "xfs_log_recover.h"
+struct kmem_cache *xfs_attri_cache;
+struct kmem_cache *xfs_attrd_cache;
+
static const struct xfs_item_ops xfs_attri_item_ops;
static const struct xfs_item_ops xfs_attrd_item_ops;
static struct xfs_attrd_log_item *xfs_trans_get_attrd(struct xfs_trans *tp,
@@ -39,12 +41,80 @@ static inline struct xfs_attri_log_item *ATTRI_ITEM(struct xfs_log_item *lip)
return container_of(lip, struct xfs_attri_log_item, attri_item);
}
+/*
+ * Shared xattr name/value buffers for logged extended attribute operations
+ *
+ * When logging updates to extended attributes, we can create quite a few
+ * attribute log intent items for a single xattr update. To avoid cycling the
+ * memory allocator and memcpy overhead, the name (and value, for setxattr)
+ * are kept in a refcounted object that is shared across all related log items
+ * and the upper-level deferred work state structure. The shared buffer has
+ * a control structure, followed by the name, and then the value.
+ */
+
+static inline struct xfs_attri_log_nameval *
+xfs_attri_log_nameval_get(
+ struct xfs_attri_log_nameval *nv)
+{
+ if (!refcount_inc_not_zero(&nv->refcount))
+ return NULL;
+ return nv;
+}
+
+static inline void
+xfs_attri_log_nameval_put(
+ struct xfs_attri_log_nameval *nv)
+{
+ if (!nv)
+ return;
+ if (refcount_dec_and_test(&nv->refcount))
+ kvfree(nv);
+}
+
+static inline struct xfs_attri_log_nameval *
+xfs_attri_log_nameval_alloc(
+ const void *name,
+ unsigned int name_len,
+ const void *value,
+ unsigned int value_len)
+{
+ struct xfs_attri_log_nameval *nv;
+
+ /*
+ * This could be over 64kB in length, so we have to use kvmalloc() for
+ * this. But kvmalloc() utterly sucks, so we use our own version.
+ */
+ nv = xlog_kvmalloc(sizeof(struct xfs_attri_log_nameval) +
+ name_len + value_len);
+ if (!nv)
+ return nv;
+
+ nv->name.i_addr = nv + 1;
+ nv->name.i_len = name_len;
+ nv->name.i_type = XLOG_REG_TYPE_ATTR_NAME;
+ memcpy(nv->name.i_addr, name, name_len);
+
+ if (value_len) {
+ nv->value.i_addr = nv->name.i_addr + name_len;
+ nv->value.i_len = value_len;
+ memcpy(nv->value.i_addr, value, value_len);
+ } else {
+ nv->value.i_addr = NULL;
+ nv->value.i_len = 0;
+ }
+ nv->value.i_type = XLOG_REG_TYPE_ATTR_VALUE;
+
+ refcount_set(&nv->refcount, 1);
+ return nv;
+}
+
STATIC void
xfs_attri_item_free(
struct xfs_attri_log_item *attrip)
{
kmem_free(attrip->attri_item.li_lv_shadow);
- kvfree(attrip);
+ xfs_attri_log_nameval_put(attrip->attri_nameval);
+ kmem_cache_free(xfs_attri_cache, attrip);
}
/*
@@ -73,16 +143,17 @@ xfs_attri_item_size(
int *nbytes)
{
struct xfs_attri_log_item *attrip = ATTRI_ITEM(lip);
+ struct xfs_attri_log_nameval *nv = attrip->attri_nameval;
*nvecs += 2;
*nbytes += sizeof(struct xfs_attri_log_format) +
- xlog_calc_iovec_len(attrip->attri_name_len);
+ xlog_calc_iovec_len(nv->name.i_len);
- if (!attrip->attri_value_len)
+ if (!nv->value.i_len)
return;
*nvecs += 1;
- *nbytes += xlog_calc_iovec_len(attrip->attri_value_len);
+ *nbytes += xlog_calc_iovec_len(nv->value.i_len);
}
/*
@@ -97,6 +168,7 @@ xfs_attri_item_format(
{
struct xfs_attri_log_item *attrip = ATTRI_ITEM(lip);
struct xfs_log_iovec *vecp = NULL;
+ struct xfs_attri_log_nameval *nv = attrip->attri_nameval;
attrip->attri_format.alfi_type = XFS_LI_ATTRI;
attrip->attri_format.alfi_size = 1;
@@ -108,22 +180,18 @@ xfs_attri_item_format(
* the log recovery.
*/
- ASSERT(attrip->attri_name_len > 0);
+ ASSERT(nv->name.i_len > 0);
attrip->attri_format.alfi_size++;
- if (attrip->attri_value_len > 0)
+ if (nv->value.i_len > 0)
attrip->attri_format.alfi_size++;
xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_ATTRI_FORMAT,
&attrip->attri_format,
sizeof(struct xfs_attri_log_format));
- xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_ATTR_NAME,
- attrip->attri_name,
- attrip->attri_name_len);
- if (attrip->attri_value_len > 0)
- xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_ATTR_VALUE,
- attrip->attri_value,
- attrip->attri_value_len);
+ xlog_copy_from_iovec(lv, &vecp, &nv->name);
+ if (nv->value.i_len > 0)
+ xlog_copy_from_iovec(lv, &vecp, &nv->value);
}
/*
@@ -158,41 +226,18 @@ xfs_attri_item_release(
STATIC struct xfs_attri_log_item *
xfs_attri_init(
struct xfs_mount *mp,
- uint32_t name_len,
- uint32_t value_len)
-
+ struct xfs_attri_log_nameval *nv)
{
struct xfs_attri_log_item *attrip;
- uint32_t buffer_size = name_len + value_len;
- if (buffer_size) {
- /*
- * This could be over 64kB in length, so we have to use
- * kvmalloc() for this. But kvmalloc() utterly sucks, so we
- * use own version.
- */
- attrip = xlog_kvmalloc(sizeof(struct xfs_attri_log_item) +
- buffer_size);
- } else {
- attrip = kmem_cache_alloc(xfs_attri_cache,
- GFP_NOFS | __GFP_NOFAIL);
- }
- memset(attrip, 0, sizeof(struct xfs_attri_log_item));
+ attrip = kmem_cache_zalloc(xfs_attri_cache, GFP_NOFS | __GFP_NOFAIL);
- attrip->attri_name_len = name_len;
- if (name_len)
- attrip->attri_name = ((char *)attrip) +
- sizeof(struct xfs_attri_log_item);
- else
- attrip->attri_name = NULL;
-
- attrip->attri_value_len = value_len;
- if (value_len)
- attrip->attri_value = ((char *)attrip) +
- sizeof(struct xfs_attri_log_item) +
- name_len;
- else
- attrip->attri_value = NULL;
+ /*
+ * Grab an extra reference to the name/value buffer for this log item.
+ * The caller retains its own reference!
+ */
+ attrip->attri_nameval = xfs_attri_log_nameval_get(nv);
+ ASSERT(attrip->attri_nameval);
xfs_log_item_init(mp, &attrip->attri_item, XFS_LI_ATTRI,
&xfs_attri_item_ops);
@@ -233,7 +278,7 @@ STATIC void
xfs_attrd_item_free(struct xfs_attrd_log_item *attrdp)
{
kmem_free(attrdp->attrd_item.li_lv_shadow);
- kmem_free(attrdp);
+ kmem_cache_free(xfs_attrd_cache, attrdp);
}
STATIC void
@@ -297,7 +342,7 @@ xfs_attrd_item_intent(
*/
STATIC int
xfs_xattri_finish_update(
- struct xfs_attr_item *attr,
+ struct xfs_attr_intent *attr,
struct xfs_attrd_log_item *attrdp)
{
struct xfs_da_args *args = attr->xattri_da_args;
@@ -335,7 +380,7 @@ STATIC void
xfs_attr_log_item(
struct xfs_trans *tp,
struct xfs_attri_log_item *attrip,
- struct xfs_attr_item *attr)
+ const struct xfs_attr_intent *attr)
{
struct xfs_attri_log_format *attrp;
@@ -343,23 +388,18 @@ xfs_attr_log_item(
set_bit(XFS_LI_DIRTY, &attrip->attri_item.li_flags);
/*
- * At this point the xfs_attr_item has been constructed, and we've
+ * At this point the xfs_attr_intent has been constructed, and we've
* created the log intent. Fill in the attri log item and log format
- * structure with fields from this xfs_attr_item
+ * structure with fields from this xfs_attr_intent
*/
attrp = &attrip->attri_format;
attrp->alfi_ino = attr->xattri_da_args->dp->i_ino;
+ ASSERT(!(attr->xattri_op_flags & ~XFS_ATTRI_OP_FLAGS_TYPE_MASK));
attrp->alfi_op_flags = attr->xattri_op_flags;
- attrp->alfi_value_len = attr->xattri_da_args->valuelen;
- attrp->alfi_name_len = attr->xattri_da_args->namelen;
- attrp->alfi_attr_flags = attr->xattri_da_args->attr_filter;
-
- memcpy(attrip->attri_name, attr->xattri_da_args->name,
- attr->xattri_da_args->namelen);
- memcpy(attrip->attri_value, attr->xattri_da_args->value,
- attr->xattri_da_args->valuelen);
- attrip->attri_name_len = attr->xattri_da_args->namelen;
- attrip->attri_value_len = attr->xattri_da_args->valuelen;
+ attrp->alfi_value_len = attr->xattri_nameval->value.i_len;
+ attrp->alfi_name_len = attr->xattri_nameval->name.i_len;
+ ASSERT(!(attr->xattri_da_args->attr_filter & ~XFS_ATTRI_FILTER_MASK));
+ attrp->alfi_attr_filter = attr->xattri_da_args->attr_filter;
}
/* Get an ATTRI. */
@@ -372,7 +412,7 @@ xfs_attr_create_intent(
{
struct xfs_mount *mp = tp->t_mountp;
struct xfs_attri_log_item *attrip;
- struct xfs_attr_item *attr;
+ struct xfs_attr_intent *attr;
ASSERT(count == 1);
@@ -383,19 +423,47 @@ xfs_attr_create_intent(
* Each attr item only performs one attribute operation at a time, so
* this is a list of one
*/
- list_for_each_entry(attr, items, xattri_list) {
- attrip = xfs_attri_init(mp, attr->xattri_da_args->namelen,
- attr->xattri_da_args->valuelen);
- if (attrip == NULL)
- return NULL;
-
- xfs_trans_add_item(tp, &attrip->attri_item);
- xfs_attr_log_item(tp, attrip, attr);
+ attr = list_first_entry_or_null(items, struct xfs_attr_intent,
+ xattri_list);
+
+ /*
+ * Create a buffer to store the attribute name and value. This buffer
+ * will be shared between the higher level deferred xattr work state
+ * and the lower level xattr log items.
+ */
+ if (!attr->xattri_nameval) {
+ struct xfs_da_args *args = attr->xattri_da_args;
+
+ /*
+ * Transfer our reference to the name/value buffer to the
+ * deferred work state structure.
+ */
+ attr->xattri_nameval = xfs_attri_log_nameval_alloc(args->name,
+ args->namelen, args->value, args->valuelen);
}
+ if (!attr->xattri_nameval)
+ return ERR_PTR(-ENOMEM);
+
+ attrip = xfs_attri_init(mp, attr->xattri_nameval);
+ xfs_trans_add_item(tp, &attrip->attri_item);
+ xfs_attr_log_item(tp, attrip, attr);
return &attrip->attri_item;
}
+static inline void
+xfs_attr_free_item(
+ struct xfs_attr_intent *attr)
+{
+ if (attr->xattri_da_state)
+ xfs_da_state_free(attr->xattri_da_state);
+ xfs_attri_log_nameval_put(attr->xattri_nameval);
+ if (attr->xattri_da_args->op_flags & XFS_DA_OP_RECOVERY)
+ kmem_free(attr);
+ else
+ kmem_cache_free(xfs_attr_intent_cache, attr);
+}
+
/* Process an attr. */
STATIC int
xfs_attr_finish_item(
@@ -404,11 +472,11 @@ xfs_attr_finish_item(
struct list_head *item,
struct xfs_btree_cur **state)
{
- struct xfs_attr_item *attr;
+ struct xfs_attr_intent *attr;
struct xfs_attrd_log_item *done_item = NULL;
int error;
- attr = container_of(item, struct xfs_attr_item, xattri_list);
+ attr = container_of(item, struct xfs_attr_intent, xattri_list);
if (done)
done_item = ATTRD_ITEM(done);
@@ -420,7 +488,7 @@ xfs_attr_finish_item(
error = xfs_xattri_finish_update(attr, done_item);
if (error != -EAGAIN)
- kmem_free(attr);
+ xfs_attr_free_item(attr);
return error;
}
@@ -438,33 +506,10 @@ STATIC void
xfs_attr_cancel_item(
struct list_head *item)
{
- struct xfs_attr_item *attr;
-
- attr = container_of(item, struct xfs_attr_item, xattri_list);
- kmem_free(attr);
-}
-
-STATIC xfs_lsn_t
-xfs_attri_item_committed(
- struct xfs_log_item *lip,
- xfs_lsn_t lsn)
-{
- struct xfs_attri_log_item *attrip = ATTRI_ITEM(lip);
-
- /*
- * The attrip refers to xfs_attr_item memory to log the name and value
- * with the intent item. This already occurred when the intent was
- * committed so these fields are no longer accessed. Clear them out of
- * caution since we're about to free the xfs_attr_item.
- */
- attrip->attri_name = NULL;
- attrip->attri_value = NULL;
+ struct xfs_attr_intent *attr;
- /*
- * The ATTRI is logged only once and cannot be moved in the log, so
- * simply return the lsn at which it's been logged.
- */
- return lsn;
+ attr = container_of(item, struct xfs_attr_intent, xattri_list);
+ xfs_attr_free_item(attr);
}
STATIC bool
@@ -482,16 +527,22 @@ xfs_attri_validate(
struct xfs_attri_log_format *attrp)
{
unsigned int op = attrp->alfi_op_flags &
- XFS_ATTR_OP_FLAGS_TYPE_MASK;
+ XFS_ATTRI_OP_FLAGS_TYPE_MASK;
if (attrp->__pad != 0)
return false;
+ if (attrp->alfi_op_flags & ~XFS_ATTRI_OP_FLAGS_TYPE_MASK)
+ return false;
+
+ if (attrp->alfi_attr_filter & ~XFS_ATTRI_FILTER_MASK)
+ return false;
+
/* alfi_op_flags should be either a set or remove */
switch (op) {
- case XFS_ATTR_OP_FLAGS_SET:
- case XFS_ATTR_OP_FLAGS_REPLACE:
- case XFS_ATTR_OP_FLAGS_REMOVE:
+ case XFS_ATTRI_OP_FLAGS_SET:
+ case XFS_ATTRI_OP_FLAGS_REPLACE:
+ case XFS_ATTRI_OP_FLAGS_REMOVE:
break;
default:
return false;
@@ -517,13 +568,14 @@ xfs_attri_item_recover(
struct list_head *capture_list)
{
struct xfs_attri_log_item *attrip = ATTRI_ITEM(lip);
- struct xfs_attr_item *attr;
+ struct xfs_attr_intent *attr;
struct xfs_mount *mp = lip->li_log->l_mp;
struct xfs_inode *ip;
struct xfs_da_args *args;
struct xfs_trans *tp;
struct xfs_trans_res tres;
struct xfs_attri_log_format *attrp;
+ struct xfs_attri_log_nameval *nv = attrip->attri_nameval;
int error, ret = 0;
int total;
int local;
@@ -535,41 +587,50 @@ xfs_attri_item_recover(
*/
attrp = &attrip->attri_format;
if (!xfs_attri_validate(mp, attrp) ||
- !xfs_attr_namecheck(attrip->attri_name, attrip->attri_name_len))
+ !xfs_attr_namecheck(nv->name.i_addr, nv->name.i_len))
return -EFSCORRUPTED;
error = xlog_recover_iget(mp, attrp->alfi_ino, &ip);
if (error)
return error;
- attr = kmem_zalloc(sizeof(struct xfs_attr_item) +
+ attr = kmem_zalloc(sizeof(struct xfs_attr_intent) +
sizeof(struct xfs_da_args), KM_NOFS);
args = (struct xfs_da_args *)(attr + 1);
attr->xattri_da_args = args;
- attr->xattri_op_flags = attrp->alfi_op_flags;
+ attr->xattri_op_flags = attrp->alfi_op_flags &
+ XFS_ATTRI_OP_FLAGS_TYPE_MASK;
+
+ /*
+ * We're reconstructing the deferred work state structure from the
+ * recovered log item. Grab a reference to the name/value buffer and
+ * attach it to the new work state.
+ */
+ attr->xattri_nameval = xfs_attri_log_nameval_get(nv);
+ ASSERT(attr->xattri_nameval);
args->dp = ip;
args->geo = mp->m_attr_geo;
args->whichfork = XFS_ATTR_FORK;
- args->name = attrip->attri_name;
- args->namelen = attrp->alfi_name_len;
+ args->name = nv->name.i_addr;
+ args->namelen = nv->name.i_len;
args->hashval = xfs_da_hashname(args->name, args->namelen);
- args->attr_filter = attrp->alfi_attr_flags;
+ args->attr_filter = attrp->alfi_attr_filter & XFS_ATTRI_FILTER_MASK;
args->op_flags = XFS_DA_OP_RECOVERY | XFS_DA_OP_OKNOENT;
- switch (attrp->alfi_op_flags & XFS_ATTR_OP_FLAGS_TYPE_MASK) {
- case XFS_ATTR_OP_FLAGS_SET:
- case XFS_ATTR_OP_FLAGS_REPLACE:
- args->value = attrip->attri_value;
- args->valuelen = attrp->alfi_value_len;
+ switch (attr->xattri_op_flags) {
+ case XFS_ATTRI_OP_FLAGS_SET:
+ case XFS_ATTRI_OP_FLAGS_REPLACE:
+ args->value = nv->value.i_addr;
+ args->valuelen = nv->value.i_len;
args->total = xfs_attr_calc_size(args, &local);
if (xfs_inode_hasattr(args->dp))
attr->xattri_dela_state = xfs_attr_init_replace_state(args);
else
attr->xattri_dela_state = xfs_attr_init_add_state(args);
break;
- case XFS_ATTR_OP_FLAGS_REMOVE:
+ case XFS_ATTRI_OP_FLAGS_REMOVE:
if (!xfs_inode_hasattr(args->dp))
goto out;
attr->xattri_dela_state = xfs_attr_init_remove_state(args);
@@ -613,7 +674,7 @@ out_unlock:
xfs_irele(ip);
out:
if (ret != -EAGAIN)
- kmem_free(attr);
+ xfs_attr_free_item(attr);
return error;
}
@@ -636,22 +697,18 @@ xfs_attri_item_relog(
attrdp = xfs_trans_get_attrd(tp, old_attrip);
set_bit(XFS_LI_DIRTY, &attrdp->attrd_item.li_flags);
- new_attrip = xfs_attri_init(tp->t_mountp, old_attrp->alfi_name_len,
- old_attrp->alfi_value_len);
+ /*
+ * Create a new log item that shares the same name/value buffer as the
+ * old log item.
+ */
+ new_attrip = xfs_attri_init(tp->t_mountp, old_attrip->attri_nameval);
new_attrp = &new_attrip->attri_format;
new_attrp->alfi_ino = old_attrp->alfi_ino;
new_attrp->alfi_op_flags = old_attrp->alfi_op_flags;
new_attrp->alfi_value_len = old_attrp->alfi_value_len;
new_attrp->alfi_name_len = old_attrp->alfi_name_len;
- new_attrp->alfi_attr_flags = old_attrp->alfi_attr_flags;
-
- memcpy(new_attrip->attri_name, old_attrip->attri_name,
- new_attrip->attri_name_len);
-
- if (new_attrip->attri_value_len > 0)
- memcpy(new_attrip->attri_value, old_attrip->attri_value,
- new_attrip->attri_value_len);
+ new_attrp->alfi_attr_filter = old_attrp->alfi_attr_filter;
xfs_trans_add_item(tp, &new_attrip->attri_item);
set_bit(XFS_LI_DIRTY, &new_attrip->attri_item.li_flags);
@@ -666,46 +723,46 @@ xlog_recover_attri_commit_pass2(
struct xlog_recover_item *item,
xfs_lsn_t lsn)
{
- int error;
struct xfs_mount *mp = log->l_mp;
struct xfs_attri_log_item *attrip;
struct xfs_attri_log_format *attri_formatp;
- int region = 0;
+ struct xfs_attri_log_nameval *nv;
+ const void *attr_value = NULL;
+ const void *attr_name;
+ int error;
- attri_formatp = item->ri_buf[region].i_addr;
+ attri_formatp = item->ri_buf[0].i_addr;
+ attr_name = item->ri_buf[1].i_addr;
- /* Validate xfs_attri_log_format */
+ /* Validate xfs_attri_log_format before the large memory allocation */
if (!xfs_attri_validate(mp, attri_formatp)) {
XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
return -EFSCORRUPTED;
}
- /* memory alloc failure will cause replay to abort */
- attrip = xfs_attri_init(mp, attri_formatp->alfi_name_len,
- attri_formatp->alfi_value_len);
- if (attrip == NULL)
- return -ENOMEM;
+ if (!xfs_attr_namecheck(attr_name, attri_formatp->alfi_name_len)) {
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
+ return -EFSCORRUPTED;
+ }
- error = xfs_attri_copy_format(&item->ri_buf[region],
- &attrip->attri_format);
- if (error)
- goto out;
+ if (attri_formatp->alfi_value_len)
+ attr_value = item->ri_buf[2].i_addr;
- region++;
- memcpy(attrip->attri_name, item->ri_buf[region].i_addr,
- attrip->attri_name_len);
+ /*
+ * Memory alloc failure will cause replay to abort. We attach the
+ * name/value buffer to the recovered incore log item and drop our
+ * reference.
+ */
+ nv = xfs_attri_log_nameval_alloc(attr_name,
+ attri_formatp->alfi_name_len, attr_value,
+ attri_formatp->alfi_value_len);
+ if (!nv)
+ return -ENOMEM;
- if (!xfs_attr_namecheck(attrip->attri_name, attrip->attri_name_len)) {
- XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
- error = -EFSCORRUPTED;
+ attrip = xfs_attri_init(mp, nv);
+ error = xfs_attri_copy_format(&item->ri_buf[0], &attrip->attri_format);
+ if (error)
goto out;
- }
-
- if (attrip->attri_value_len > 0) {
- region++;
- memcpy(attrip->attri_value, item->ri_buf[region].i_addr,
- attrip->attri_value_len);
- }
/*
* The ATTRI has two references. One for the ATTRD and one for ATTRI to
@@ -715,9 +772,11 @@ xlog_recover_attri_commit_pass2(
*/
xfs_trans_ail_insert(log->l_ailp, &attrip->attri_item, lsn);
xfs_attri_release(attrip);
+ xfs_attri_log_nameval_put(nv);
return 0;
out:
xfs_attri_item_free(attrip);
+ xfs_attri_log_nameval_put(nv);
return error;
}
@@ -797,7 +856,6 @@ static const struct xfs_item_ops xfs_attri_item_ops = {
.iop_size = xfs_attri_item_size,
.iop_format = xfs_attri_item_format,
.iop_unpin = xfs_attri_item_unpin,
- .iop_committed = xfs_attri_item_committed,
.iop_release = xfs_attri_item_release,
.iop_recover = xfs_attri_item_recover,
.iop_match = xfs_attri_item_match,
diff --git a/fs/xfs/xfs_attr_item.h b/fs/xfs/xfs_attr_item.h
index c3b779f82adb..3280a7930287 100644
--- a/fs/xfs/xfs_attr_item.h
+++ b/fs/xfs/xfs_attr_item.h
@@ -11,25 +11,30 @@
struct xfs_mount;
struct kmem_zone;
+struct xfs_attri_log_nameval {
+ struct xfs_log_iovec name;
+ struct xfs_log_iovec value;
+ refcount_t refcount;
+
+ /* name and value follow the end of this struct */
+};
+
/*
* This is the "attr intention" log item. It is used to log the fact that some
* extended attribute operations need to be processed. An operation is
* currently either a set or remove. Set or remove operations are described by
- * the xfs_attr_item which may be logged to this intent.
+ * the xfs_attr_intent which may be logged to this intent.
*
* During a normal attr operation, name and value point to the name and value
* fields of the caller's xfs_da_args structure. During a recovery, the name
* and value buffers are copied from the log, and stored in a trailing buffer
- * attached to the xfs_attr_item until they are committed. They are freed when
- * the xfs_attr_item itself is freed when the work is done.
+ * attached to the xfs_attr_intent until they are committed. They are freed
+ * when the xfs_attr_intent itself is freed when the work is done.
*/
struct xfs_attri_log_item {
struct xfs_log_item attri_item;
atomic_t attri_refcount;
- int attri_name_len;
- int attri_value_len;
- void *attri_name;
- void *attri_value;
+ struct xfs_attri_log_nameval *attri_nameval;
struct xfs_attri_log_format attri_format;
};
@@ -43,4 +48,7 @@ struct xfs_attrd_log_item {
struct xfs_attrd_log_format attrd_format;
};
+extern struct kmem_cache *xfs_attri_cache;
+extern struct kmem_cache *xfs_attrd_cache;
+
#endif /* __XFS_ATTR_ITEM_H__ */
diff --git a/fs/xfs/xfs_buf_item_recover.c b/fs/xfs/xfs_buf_item_recover.c
index e484251dc9c8..ffa94102094d 100644
--- a/fs/xfs/xfs_buf_item_recover.c
+++ b/fs/xfs/xfs_buf_item_recover.c
@@ -24,6 +24,15 @@
#include "xfs_quota.h"
/*
+ * This is the number of entries in the l_buf_cancel_table used during
+ * recovery.
+ */
+#define XLOG_BC_TABLE_SIZE 64
+
+#define XLOG_BUF_CANCEL_BUCKET(log, blkno) \
+ ((log)->l_buf_cancel_table + ((uint64_t)blkno % XLOG_BC_TABLE_SIZE))
+
+/*
* This structure is used during recovery to record the buf log items which
* have been canceled and should not be replayed.
*/
@@ -993,3 +1002,60 @@ const struct xlog_recover_item_ops xlog_buf_item_ops = {
.commit_pass1 = xlog_recover_buf_commit_pass1,
.commit_pass2 = xlog_recover_buf_commit_pass2,
};
+
+#ifdef DEBUG
+void
+xlog_check_buf_cancel_table(
+ struct xlog *log)
+{
+ int i;
+
+ for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
+ ASSERT(list_empty(&log->l_buf_cancel_table[i]));
+}
+#endif
+
+int
+xlog_alloc_buf_cancel_table(
+ struct xlog *log)
+{
+ void *p;
+ int i;
+
+ ASSERT(log->l_buf_cancel_table == NULL);
+
+ p = kmalloc_array(XLOG_BC_TABLE_SIZE, sizeof(struct list_head),
+ GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ log->l_buf_cancel_table = p;
+ for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
+ INIT_LIST_HEAD(&log->l_buf_cancel_table[i]);
+
+ return 0;
+}
+
+void
+xlog_free_buf_cancel_table(
+ struct xlog *log)
+{
+ int i;
+
+ if (!log->l_buf_cancel_table)
+ return;
+
+ for (i = 0; i < XLOG_BC_TABLE_SIZE; i++) {
+ struct xfs_buf_cancel *bc;
+
+ while ((bc = list_first_entry_or_null(
+ &log->l_buf_cancel_table[i],
+ struct xfs_buf_cancel, bc_list))) {
+ list_del(&bc->bc_list);
+ kmem_free(bc);
+ }
+ }
+
+ kmem_free(log->l_buf_cancel_table);
+ log->l_buf_cancel_table = NULL;
+}
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index a60632ecc3f0..5a171c0b244b 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -576,9 +576,9 @@ xfs_file_dio_write_unaligned(
* don't even bother trying the fast path in this case.
*/
if (iocb->ki_pos > isize || iocb->ki_pos + count >= isize) {
-retry_exclusive:
if (iocb->ki_flags & IOCB_NOWAIT)
return -EAGAIN;
+retry_exclusive:
iolock = XFS_IOLOCK_EXCL;
flags = IOMAP_DIO_FORCE_WAIT;
}
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index 888839e75d11..d4a77c53f94b 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -149,12 +149,7 @@ xfs_growfs_data_private(
error = xfs_resizefs_init_new_ags(tp, &id, oagcount, nagcount,
delta, &lastag_extended);
} else {
- static struct ratelimit_state shrink_warning = \
- RATELIMIT_STATE_INIT("shrink_warning", 86400 * HZ, 1);
- ratelimit_set_flags(&shrink_warning, RATELIMIT_MSG_ON_RELEASE);
-
- if (__ratelimit(&shrink_warning))
- xfs_alert(mp,
+ xfs_warn_mount(mp, XFS_OPSTATE_WARNED_SHRINK,
"EXPERIMENTAL online shrink feature in use. Use at your own risk!");
error = xfs_ag_shrink_space(mp, &tp, nagcount - 1, -delta);
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index b2879870a17e..52d6f2c7d58b 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -2622,7 +2622,7 @@ xfs_ifree(
*/
error = xfs_difree(tp, pag, ip->i_ino, &xic);
if (error)
- return error;
+ goto out;
error = xfs_iunlink_remove(tp, pag, ip);
if (error)
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index 0e5cb7936206..5a364a7d58fd 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -37,6 +37,7 @@
#include "xfs_health.h"
#include "xfs_reflink.h"
#include "xfs_ioctl.h"
+#include "xfs_xattr.h"
#include <linux/mount.h>
#include <linux/namei.h>
@@ -524,7 +525,7 @@ xfs_attrmulti_attr_set(
args.valuelen = len;
}
- error = xfs_attr_set(&args);
+ error = xfs_attr_change(&args);
if (!error && (flags & XFS_IOC_ATTR_ROOT))
xfs_forget_acl(inode, name);
kfree(args.value);
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index e912b7fee714..29f5b8b8aca6 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -24,6 +24,7 @@
#include "xfs_iomap.h"
#include "xfs_error.h"
#include "xfs_ioctl.h"
+#include "xfs_xattr.h"
#include <linux/posix_acl.h>
#include <linux/security.h>
@@ -61,7 +62,7 @@ xfs_initxattrs(
.value = xattr->value,
.valuelen = xattr->value_len,
};
- error = xfs_attr_set(&args);
+ error = xfs_attr_change(&args);
if (error < 0)
break;
}
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 9dc748abdf33..1e972f884a81 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -3877,44 +3877,3 @@ xlog_drop_incompat_feat(
{
up_read(&log->l_incompat_users);
}
-
-/*
- * Get permission to use log-assisted atomic exchange of file extents.
- *
- * Callers must not be running any transactions or hold any inode locks, and
- * they must release the permission by calling xlog_drop_incompat_feat
- * when they're done.
- */
-int
-xfs_attr_use_log_assist(
- struct xfs_mount *mp)
-{
- int error = 0;
-
- /*
- * Protect ourselves from an idle log clearing the logged xattrs log
- * incompat feature bit.
- */
- xlog_use_incompat_feat(mp->m_log);
-
- /*
- * If log-assisted xattrs are already enabled, the caller can use the
- * log assisted swap functions with the log-incompat reference we got.
- */
- if (xfs_sb_version_haslogxattrs(&mp->m_sb))
- return 0;
-
- /* Enable log-assisted xattrs. */
- error = xfs_add_incompat_log_feature(mp,
- XFS_SB_FEAT_INCOMPAT_LOG_XATTRS);
- if (error)
- goto drop_incompat;
-
- xfs_warn_once(mp,
-"EXPERIMENTAL logged extended attributes feature added. Use at your own risk!");
-
- return 0;
-drop_incompat:
- xlog_drop_incompat_feat(mp->m_log);
- return error;
-}
diff --git a/fs/xfs/xfs_log.h b/fs/xfs/xfs_log.h
index 252b098cde1f..f3ce046a7d45 100644
--- a/fs/xfs/xfs_log.h
+++ b/fs/xfs/xfs_log.h
@@ -86,6 +86,13 @@ xlog_copy_iovec(struct xfs_log_vec *lv, struct xfs_log_iovec **vecp,
return buf;
}
+static inline void *
+xlog_copy_from_iovec(struct xfs_log_vec *lv, struct xfs_log_iovec **vecp,
+ const struct xfs_log_iovec *src)
+{
+ return xlog_copy_iovec(lv, vecp, src->i_type, src->i_addr, src->i_len);
+}
+
/*
* By comparing each component, we don't have to worry about extra
* endian issues in treating two 32 bit numbers as one 64 bit number
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
index 67fd9789e69a..686c01eb3661 100644
--- a/fs/xfs/xfs_log_priv.h
+++ b/fs/xfs/xfs_log_priv.h
@@ -428,9 +428,6 @@ struct xlog {
struct rw_semaphore l_incompat_users;
};
-#define XLOG_BUF_CANCEL_BUCKET(log, blkno) \
- ((log)->l_buf_cancel_table + ((uint64_t)blkno % XLOG_BC_TABLE_SIZE))
-
/*
* Bits for operational state
*/
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 97b941c07957..5f7e4e6e33ce 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -39,13 +39,6 @@ STATIC int
xlog_clear_stale_blocks(
struct xlog *,
xfs_lsn_t);
-#if defined(DEBUG)
-STATIC void
-xlog_recover_check_summary(
- struct xlog *);
-#else
-#define xlog_recover_check_summary(log)
-#endif
STATIC int
xlog_do_recovery_pass(
struct xlog *, xfs_daddr_t, xfs_daddr_t, int, xfs_daddr_t *);
@@ -3230,7 +3223,7 @@ xlog_do_log_recovery(
xfs_daddr_t head_blk,
xfs_daddr_t tail_blk)
{
- int error, i;
+ int error;
ASSERT(head_blk != tail_blk);
@@ -3238,37 +3231,25 @@ xlog_do_log_recovery(
* First do a pass to find all of the cancelled buf log items.
* Store them in the buf_cancel_table for use in the second pass.
*/
- log->l_buf_cancel_table = kmem_zalloc(XLOG_BC_TABLE_SIZE *
- sizeof(struct list_head),
- 0);
- for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
- INIT_LIST_HEAD(&log->l_buf_cancel_table[i]);
+ error = xlog_alloc_buf_cancel_table(log);
+ if (error)
+ return error;
error = xlog_do_recovery_pass(log, head_blk, tail_blk,
XLOG_RECOVER_PASS1, NULL);
- if (error != 0) {
- kmem_free(log->l_buf_cancel_table);
- log->l_buf_cancel_table = NULL;
- return error;
- }
+ if (error != 0)
+ goto out_cancel;
+
/*
* Then do a second pass to actually recover the items in the log.
* When it is complete free the table of buf cancel items.
*/
error = xlog_do_recovery_pass(log, head_blk, tail_blk,
XLOG_RECOVER_PASS2, NULL);
-#ifdef DEBUG
- if (!error) {
- int i;
-
- for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
- ASSERT(list_empty(&log->l_buf_cancel_table[i]));
- }
-#endif /* DEBUG */
-
- kmem_free(log->l_buf_cancel_table);
- log->l_buf_cancel_table = NULL;
-
+ if (!error)
+ xlog_check_buf_cancel_table(log);
+out_cancel:
+ xlog_free_buf_cancel_table(log);
return error;
}
@@ -3339,8 +3320,6 @@ xlog_do_recover(
}
mp->m_alloc_set_aside = xfs_alloc_set_aside(mp);
- xlog_recover_check_summary(log);
-
/* Normal transactions can now occur */
clear_bit(XLOG_ACTIVE_RECOVERY, &log->l_opstate);
return 0;
@@ -3483,7 +3462,6 @@ xlog_recover_finish(
}
xlog_recover_process_iunlinks(log);
- xlog_recover_check_summary(log);
/*
* Recover any CoW staging blocks that are still referenced by the
@@ -3517,52 +3495,3 @@ xlog_recover_cancel(
xlog_recover_cancel_intents(log);
}
-#if defined(DEBUG)
-/*
- * Read all of the agf and agi counters and check that they
- * are consistent with the superblock counters.
- */
-STATIC void
-xlog_recover_check_summary(
- struct xlog *log)
-{
- struct xfs_mount *mp = log->l_mp;
- struct xfs_perag *pag;
- struct xfs_buf *agfbp;
- struct xfs_buf *agibp;
- xfs_agnumber_t agno;
- uint64_t freeblks;
- uint64_t itotal;
- uint64_t ifree;
- int error;
-
- freeblks = 0LL;
- itotal = 0LL;
- ifree = 0LL;
- for_each_perag(mp, agno, pag) {
- error = xfs_read_agf(mp, NULL, pag->pag_agno, 0, &agfbp);
- if (error) {
- xfs_alert(mp, "%s agf read failed agno %d error %d",
- __func__, pag->pag_agno, error);
- } else {
- struct xfs_agf *agfp = agfbp->b_addr;
-
- freeblks += be32_to_cpu(agfp->agf_freeblks) +
- be32_to_cpu(agfp->agf_flcount);
- xfs_buf_relse(agfbp);
- }
-
- error = xfs_read_agi(mp, NULL, pag->pag_agno, &agibp);
- if (error) {
- xfs_alert(mp, "%s agi read failed agno %d error %d",
- __func__, pag->pag_agno, error);
- } else {
- struct xfs_agi *agi = agibp->b_addr;
-
- itotal += be32_to_cpu(agi->agi_count);
- ifree += be32_to_cpu(agi->agi_freecount);
- xfs_buf_relse(agibp);
- }
- }
-}
-#endif /* DEBUG */
diff --git a/fs/xfs/xfs_message.h b/fs/xfs/xfs_message.h
index 55ee464ab59f..cc323775a12c 100644
--- a/fs/xfs/xfs_message.h
+++ b/fs/xfs/xfs_message.h
@@ -75,6 +75,12 @@ do { \
#define xfs_debug_ratelimited(dev, fmt, ...) \
xfs_printk_ratelimited(xfs_debug, dev, fmt, ##__VA_ARGS__)
+#define xfs_warn_mount(mp, warntag, fmt, ...) \
+do { \
+ if (xfs_should_warn((mp), (warntag))) \
+ xfs_warn((mp), (fmt), ##__VA_ARGS__); \
+} while (0)
+
#define xfs_warn_once(dev, fmt, ...) \
xfs_printk_once(xfs_warn, dev, fmt, ##__VA_ARGS__)
#define xfs_notice_once(dev, fmt, ...) \
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 0c0bcbd4949d..daa8d29c46b4 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -1356,7 +1356,6 @@ xfs_clear_incompat_log_features(
if (xfs_sb_has_incompat_log_feature(&mp->m_sb,
XFS_SB_FEAT_INCOMPAT_LOG_ALL)) {
- xfs_info(mp, "Clearing log incompat feature flags.");
xfs_sb_remove_incompat_log_features(&mp->m_sb);
ret = true;
}
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 8c42786e4942..ba5d42abf66e 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -391,6 +391,13 @@ __XFS_HAS_FEAT(nouuid, NOUUID)
*/
#define XFS_OPSTATE_BLOCKGC_ENABLED 6
+/* Kernel has logged a warning about online fsck being used on this fs. */
+#define XFS_OPSTATE_WARNED_SCRUB 7
+/* Kernel has logged a warning about shrink being used on this fs. */
+#define XFS_OPSTATE_WARNED_SHRINK 8
+/* Kernel has logged a warning about logged xattr updates being used. */
+#define XFS_OPSTATE_WARNED_LARP 9
+
#define __XFS_IS_OPSTATE(name, NAME) \
static inline bool xfs_is_ ## name (struct xfs_mount *mp) \
{ \
@@ -413,6 +420,12 @@ __XFS_IS_OPSTATE(readonly, READONLY)
__XFS_IS_OPSTATE(inodegc_enabled, INODEGC_ENABLED)
__XFS_IS_OPSTATE(blockgc_enabled, BLOCKGC_ENABLED)
+static inline bool
+xfs_should_warn(struct xfs_mount *mp, long nr)
+{
+ return !test_and_set_bit(nr, &mp->m_opstate);
+}
+
#define XFS_OPSTATE_STRINGS \
{ (1UL << XFS_OPSTATE_UNMOUNTING), "unmounting" }, \
{ (1UL << XFS_OPSTATE_CLEAN), "clean" }, \
@@ -420,7 +433,10 @@ __XFS_IS_OPSTATE(blockgc_enabled, BLOCKGC_ENABLED)
{ (1UL << XFS_OPSTATE_INODE32), "inode32" }, \
{ (1UL << XFS_OPSTATE_READONLY), "read_only" }, \
{ (1UL << XFS_OPSTATE_INODEGC_ENABLED), "inodegc" }, \
- { (1UL << XFS_OPSTATE_BLOCKGC_ENABLED), "blockgc" }
+ { (1UL << XFS_OPSTATE_BLOCKGC_ENABLED), "blockgc" }, \
+ { (1UL << XFS_OPSTATE_WARNED_SCRUB), "wscrub" }, \
+ { (1UL << XFS_OPSTATE_WARNED_SHRINK), "wshrink" }, \
+ { (1UL << XFS_OPSTATE_WARNED_LARP), "wlarp" }
/*
* Max and min values for mount-option defined I/O
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index 8fc813cb6011..abf08bbf34a9 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -1308,8 +1308,15 @@ xfs_qm_quotacheck(
error = xfs_iwalk_threaded(mp, 0, 0, xfs_qm_dqusage_adjust, 0, true,
NULL);
- if (error)
+ if (error) {
+ /*
+ * The inode walk may have partially populated the dquot
+ * caches. We must purge them before disabling quota and
+ * tearing down the quotainfo, or else the dquots will leak.
+ */
+ xfs_qm_dqpurge_all(mp);
goto error_return;
+ }
/*
* We've made all the changes that we need to make incore. Flush them
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 8495ef076ffc..ed18160e6181 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -38,6 +38,8 @@
#include "xfs_pwork.h"
#include "xfs_ag.h"
#include "xfs_defer.h"
+#include "xfs_attr_item.h"
+#include "xfs_xattr.h"
#include <linux/magic.h>
#include <linux/fs_context.h>
@@ -2079,8 +2081,24 @@ xfs_init_caches(void)
if (!xfs_bui_cache)
goto out_destroy_bud_cache;
+ xfs_attrd_cache = kmem_cache_create("xfs_attrd_item",
+ sizeof(struct xfs_attrd_log_item),
+ 0, 0, NULL);
+ if (!xfs_attrd_cache)
+ goto out_destroy_bui_cache;
+
+ xfs_attri_cache = kmem_cache_create("xfs_attri_item",
+ sizeof(struct xfs_attri_log_item),
+ 0, 0, NULL);
+ if (!xfs_attri_cache)
+ goto out_destroy_attrd_cache;
+
return 0;
+ out_destroy_attrd_cache:
+ kmem_cache_destroy(xfs_attrd_cache);
+ out_destroy_bui_cache:
+ kmem_cache_destroy(xfs_bui_cache);
out_destroy_bud_cache:
kmem_cache_destroy(xfs_bud_cache);
out_destroy_cui_cache:
@@ -2127,6 +2145,8 @@ xfs_destroy_caches(void)
* destroy caches.
*/
rcu_barrier();
+ kmem_cache_destroy(xfs_attri_cache);
+ kmem_cache_destroy(xfs_attrd_cache);
kmem_cache_destroy(xfs_bui_cache);
kmem_cache_destroy(xfs_bud_cache);
kmem_cache_destroy(xfs_cui_cache);
diff --git a/fs/xfs/xfs_super.h b/fs/xfs/xfs_super.h
index 167d23f92ffe..3cd5a51bace1 100644
--- a/fs/xfs/xfs_super.h
+++ b/fs/xfs/xfs_super.h
@@ -91,7 +91,6 @@ extern xfs_agnumber_t xfs_set_inode_alloc(struct xfs_mount *,
xfs_agnumber_t agcount);
extern const struct export_operations xfs_export_operations;
-extern const struct xattr_handler *xfs_xattr_handlers[];
extern const struct quotactl_ops xfs_quotactl_operations;
extern void xfs_reinit_percpu_counters(struct xfs_mount *mp);
diff --git a/fs/xfs/xfs_xattr.c b/fs/xfs/xfs_xattr.c
index 7a044afd4c46..35e13e125ec6 100644
--- a/fs/xfs/xfs_xattr.c
+++ b/fs/xfs/xfs_xattr.c
@@ -15,9 +15,86 @@
#include "xfs_da_btree.h"
#include "xfs_attr.h"
#include "xfs_acl.h"
+#include "xfs_log.h"
+#include "xfs_xattr.h"
#include <linux/posix_acl_xattr.h>
+/*
+ * Get permission to use log-assisted atomic exchange of file extents.
+ *
+ * Callers must not be running any transactions or hold any inode locks, and
+ * they must release the permission by calling xlog_drop_incompat_feat
+ * when they're done.
+ */
+static inline int
+xfs_attr_grab_log_assist(
+ struct xfs_mount *mp)
+{
+ int error = 0;
+
+ /*
+ * Protect ourselves from an idle log clearing the logged xattrs log
+ * incompat feature bit.
+ */
+ xlog_use_incompat_feat(mp->m_log);
+
+ /*
+ * If log-assisted xattrs are already enabled, the caller can use the
+ * log assisted swap functions with the log-incompat reference we got.
+ */
+ if (xfs_sb_version_haslogxattrs(&mp->m_sb))
+ return 0;
+
+ /* Enable log-assisted xattrs. */
+ error = xfs_add_incompat_log_feature(mp,
+ XFS_SB_FEAT_INCOMPAT_LOG_XATTRS);
+ if (error)
+ goto drop_incompat;
+
+ xfs_warn_mount(mp, XFS_OPSTATE_WARNED_LARP,
+ "EXPERIMENTAL logged extended attributes feature in use. Use at your own risk!");
+
+ return 0;
+drop_incompat:
+ xlog_drop_incompat_feat(mp->m_log);
+ return error;
+}
+
+static inline void
+xfs_attr_rele_log_assist(
+ struct xfs_mount *mp)
+{
+ xlog_drop_incompat_feat(mp->m_log);
+}
+
+/*
+ * Set or remove an xattr, having grabbed the appropriate logging resources
+ * prior to calling libxfs.
+ */
+int
+xfs_attr_change(
+ struct xfs_da_args *args)
+{
+ struct xfs_mount *mp = args->dp->i_mount;
+ bool use_logging = false;
+ int error;
+
+ if (xfs_has_larp(mp)) {
+ error = xfs_attr_grab_log_assist(mp);
+ if (error)
+ return error;
+
+ use_logging = true;
+ }
+
+ error = xfs_attr_set(args);
+
+ if (use_logging)
+ xfs_attr_rele_log_assist(mp);
+ return error;
+}
+
static int
xfs_xattr_get(const struct xattr_handler *handler, struct dentry *unused,
@@ -56,7 +133,7 @@ xfs_xattr_set(const struct xattr_handler *handler,
};
int error;
- error = xfs_attr_set(&args);
+ error = xfs_attr_change(&args);
if (!error && (handler->flags & XFS_ATTR_ROOT))
xfs_forget_acl(inode, name);
return error;
diff --git a/fs/xfs/xfs_xattr.h b/fs/xfs/xfs_xattr.h
new file mode 100644
index 000000000000..2b09133b1b9b
--- /dev/null
+++ b/fs/xfs/xfs_xattr.h
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2000-2005 Silicon Graphics, Inc.
+ * All Rights Reserved.
+ */
+#ifndef __XFS_XATTR_H__
+#define __XFS_XATTR_H__
+
+int xfs_attr_change(struct xfs_da_args *args);
+
+extern const struct xattr_handler *xfs_xattr_handlers[];
+
+#endif /* __XFS_XATTR_H__ */
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 772590e2eddb..0dc1ea0b52f5 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -585,14 +585,22 @@ int unregister_acpi_bus_type(struct acpi_bus_type *);
int acpi_bind_one(struct device *dev, struct acpi_device *adev);
int acpi_unbind_one(struct device *dev);
+enum acpi_bridge_type {
+ ACPI_BRIDGE_TYPE_PCIE = 1,
+ ACPI_BRIDGE_TYPE_CXL,
+};
+
struct acpi_pci_root {
struct acpi_device * device;
struct pci_bus *bus;
u16 segment;
+ int bridge_type;
struct resource secondary; /* downstream bus range */
- u32 osc_support_set; /* _OSC state of support bits */
- u32 osc_control_set; /* _OSC state of control bits */
+ u32 osc_support_set; /* _OSC state of support bits */
+ u32 osc_control_set; /* _OSC state of control bits */
+ u32 osc_ext_support_set; /* _OSC state of extended support bits */
+ u32 osc_ext_control_set; /* _OSC state of extended control bits */
phys_addr_t mcfg_addr;
};
diff --git a/include/asm-generic/compat.h b/include/asm-generic/compat.h
index d46c0201cc34..d06308a2a7a8 100644
--- a/include/asm-generic/compat.h
+++ b/include/asm-generic/compat.h
@@ -2,6 +2,25 @@
#ifndef __ASM_GENERIC_COMPAT_H
#define __ASM_GENERIC_COMPAT_H
+#ifndef COMPAT_USER_HZ
+#define COMPAT_USER_HZ 100
+#endif
+
+#ifndef COMPAT_RLIM_INFINITY
+#define COMPAT_RLIM_INFINITY 0xffffffff
+#endif
+
+#ifndef COMPAT_OFF_T_MAX
+#define COMPAT_OFF_T_MAX 0x7fffffff
+#endif
+
+#if !defined(compat_arg_u64) && !defined(CONFIG_CPU_BIG_ENDIAN)
+#define compat_arg_u64(name) u32 name##_lo, u32 name##_hi
+#define compat_arg_u64_dual(name) u32, name##_lo, u32, name##_hi
+#define compat_arg_u64_glue(name) (((u64)name##_lo & 0xffffffffUL) | \
+ ((u64)name##_hi << 32))
+#endif
+
/* These types are common across all compat ABIs */
typedef u32 compat_size_t;
typedef s32 compat_ssize_t;
@@ -24,6 +43,11 @@ typedef u32 compat_caddr_t;
typedef u32 compat_aio_context_t;
typedef u32 compat_old_sigset_t;
+#ifndef __compat_uid_t
+typedef u32 __compat_uid_t;
+typedef u32 __compat_gid_t;
+#endif
+
#ifndef __compat_uid32_t
typedef u32 __compat_uid32_t;
typedef u32 __compat_gid32_t;
@@ -47,4 +71,93 @@ typedef u32 compat_sigset_word;
#define _COMPAT_NSIG_BPW 32
#endif
+#ifndef compat_dev_t
+typedef u32 compat_dev_t;
+#endif
+
+#ifndef compat_ipc_pid_t
+typedef s32 compat_ipc_pid_t;
+#endif
+
+#ifndef compat_fsid_t
+typedef __kernel_fsid_t compat_fsid_t;
+#endif
+
+#ifndef compat_statfs
+struct compat_statfs {
+ compat_int_t f_type;
+ compat_int_t f_bsize;
+ compat_int_t f_blocks;
+ compat_int_t f_bfree;
+ compat_int_t f_bavail;
+ compat_int_t f_files;
+ compat_int_t f_ffree;
+ compat_fsid_t f_fsid;
+ compat_int_t f_namelen;
+ compat_int_t f_frsize;
+ compat_int_t f_flags;
+ compat_int_t f_spare[4];
+};
+#endif
+
+#ifndef compat_ipc64_perm
+struct compat_ipc64_perm {
+ compat_key_t key;
+ __compat_uid32_t uid;
+ __compat_gid32_t gid;
+ __compat_uid32_t cuid;
+ __compat_gid32_t cgid;
+ compat_mode_t mode;
+ unsigned char __pad1[4 - sizeof(compat_mode_t)];
+ compat_ushort_t seq;
+ compat_ushort_t __pad2;
+ compat_ulong_t unused1;
+ compat_ulong_t unused2;
+};
+
+struct compat_semid64_ds {
+ struct compat_ipc64_perm sem_perm;
+ compat_ulong_t sem_otime;
+ compat_ulong_t sem_otime_high;
+ compat_ulong_t sem_ctime;
+ compat_ulong_t sem_ctime_high;
+ compat_ulong_t sem_nsems;
+ compat_ulong_t __unused3;
+ compat_ulong_t __unused4;
+};
+
+struct compat_msqid64_ds {
+ struct compat_ipc64_perm msg_perm;
+ compat_ulong_t msg_stime;
+ compat_ulong_t msg_stime_high;
+ compat_ulong_t msg_rtime;
+ compat_ulong_t msg_rtime_high;
+ compat_ulong_t msg_ctime;
+ compat_ulong_t msg_ctime_high;
+ compat_ulong_t msg_cbytes;
+ compat_ulong_t msg_qnum;
+ compat_ulong_t msg_qbytes;
+ compat_pid_t msg_lspid;
+ compat_pid_t msg_lrpid;
+ compat_ulong_t __unused4;
+ compat_ulong_t __unused5;
+};
+
+struct compat_shmid64_ds {
+ struct compat_ipc64_perm shm_perm;
+ compat_size_t shm_segsz;
+ compat_ulong_t shm_atime;
+ compat_ulong_t shm_atime_high;
+ compat_ulong_t shm_dtime;
+ compat_ulong_t shm_dtime_high;
+ compat_ulong_t shm_ctime;
+ compat_ulong_t shm_ctime_high;
+ compat_pid_t shm_cpid;
+ compat_pid_t shm_lpid;
+ compat_ulong_t shm_nattch;
+ compat_ulong_t __unused4;
+ compat_ulong_t __unused5;
+};
+#endif
+
#endif
diff --git a/include/clocksource/timer-xilinx.h b/include/clocksource/timer-xilinx.h
new file mode 100644
index 000000000000..c0f56fe6d22a
--- /dev/null
+++ b/include/clocksource/timer-xilinx.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2021 Sean Anderson <sean.anderson@seco.com>
+ */
+
+#ifndef XILINX_TIMER_H
+#define XILINX_TIMER_H
+
+#include <linux/compiler.h>
+
+#define TCSR0 0x00
+#define TLR0 0x04
+#define TCR0 0x08
+#define TCSR1 0x10
+#define TLR1 0x14
+#define TCR1 0x18
+
+#define TCSR_MDT BIT(0)
+#define TCSR_UDT BIT(1)
+#define TCSR_GENT BIT(2)
+#define TCSR_CAPT BIT(3)
+#define TCSR_ARHT BIT(4)
+#define TCSR_LOAD BIT(5)
+#define TCSR_ENIT BIT(6)
+#define TCSR_ENT BIT(7)
+#define TCSR_TINT BIT(8)
+#define TCSR_PWMA BIT(9)
+#define TCSR_ENALL BIT(10)
+#define TCSR_CASC BIT(11)
+
+struct clk;
+struct device_node;
+struct regmap;
+
+/**
+ * struct xilinx_timer_priv - Private data for Xilinx AXI timer drivers
+ * @map: Regmap of the device, possibly with an offset
+ * @clk: Parent clock
+ * @max: Maximum value of the counters
+ */
+struct xilinx_timer_priv {
+ struct regmap *map;
+ struct clk *clk;
+ u32 max;
+};
+
+/**
+ * xilinx_timer_tlr_cycles() - Calculate the TLR for a period specified
+ * in clock cycles
+ * @priv: The timer's private data
+ * @tcsr: The value of the TCSR register for this counter
+ * @cycles: The number of cycles in this period
+ *
+ * Callers of this function MUST ensure that @cycles is representable as
+ * a TLR.
+ *
+ * Return: The calculated value for TLR
+ */
+u32 xilinx_timer_tlr_cycles(struct xilinx_timer_priv *priv, u32 tcsr,
+ u64 cycles);
+
+/**
+ * xilinx_timer_get_period() - Get the current period of a counter
+ * @priv: The timer's private data
+ * @tlr: The value of TLR for this counter
+ * @tcsr: The value of TCSR for this counter
+ *
+ * Return: The period, in ns
+ */
+unsigned int xilinx_timer_get_period(struct xilinx_timer_priv *priv,
+ u32 tlr, u32 tcsr);
+
+#endif /* XILINX_TIMER_H */
diff --git a/include/crypto/sm4.h b/include/crypto/sm4.h
index 709f286e7b25..9656a9a40326 100644
--- a/include/crypto/sm4.h
+++ b/include/crypto/sm4.h
@@ -21,6 +21,10 @@ struct sm4_ctx {
u32 rkey_dec[SM4_RKEY_WORDS];
};
+extern const u32 crypto_sm4_fk[];
+extern const u32 crypto_sm4_ck[];
+extern const u8 crypto_sm4_sbox[];
+
/**
* sm4_expandkey - Expands the SM4 key as described in GB/T 32907-2016
* @ctx: The location where the computed key will be stored.
diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h
index 22deb216b59c..08e0e3ffad13 100644
--- a/include/drm/drm_cache.h
+++ b/include/drm/drm_cache.h
@@ -67,6 +67,14 @@ static inline bool drm_arch_can_wc_memory(void)
* optimization entirely for ARM and arm64.
*/
return false;
+#elif defined(CONFIG_LOONGARCH)
+ /*
+ * LoongArch maintains cache coherency in hardware, but its WUC attribute
+ * (Weak-ordered UnCached, which is similar to WC) is out of the scope of
+ * cache coherency machanism. This means WUC can only used for write-only
+ * memory regions.
+ */
+ return false;
#else
return true;
#endif
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index c3204a58fb09..b2756753370b 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -121,7 +121,7 @@ struct detailed_data_monitor_range {
u8 supported_scalings;
u8 preferred_refresh;
} __attribute__((packed)) cvt;
- } formula;
+ } __attribute__((packed)) formula;
} __attribute__((packed));
struct detailed_data_wpindex {
@@ -154,7 +154,7 @@ struct detailed_non_pixel {
struct detailed_data_wpindex color;
struct std_timing timings[6];
struct cvt_timing cvt[4];
- } data;
+ } __attribute__((packed)) data;
} __attribute__((packed));
#define EDID_DETAIL_EST_TIMINGS 0xf7
@@ -172,7 +172,7 @@ struct detailed_timing {
union {
struct detailed_pixel_timing pixel_data;
struct detailed_non_pixel other_data;
- } data;
+ } __attribute__((packed)) data;
} __attribute__((packed));
#define DRM_EDID_INPUT_SERRATION_VSYNC (1 << 0)
diff --git a/include/dt-bindings/clock/en7523-clk.h b/include/dt-bindings/clock/en7523-clk.h
new file mode 100644
index 000000000000..717d23a5e5ae
--- /dev/null
+++ b/include/dt-bindings/clock/en7523-clk.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef _DT_BINDINGS_CLOCK_AIROHA_EN7523_H_
+#define _DT_BINDINGS_CLOCK_AIROHA_EN7523_H_
+
+#define EN7523_CLK_GSW 0
+#define EN7523_CLK_EMI 1
+#define EN7523_CLK_BUS 2
+#define EN7523_CLK_SLIC 3
+#define EN7523_CLK_SPI 4
+#define EN7523_CLK_NPU 5
+#define EN7523_CLK_CRYPTO 6
+#define EN7523_CLK_PCIE 7
+
+#define EN7523_NUM_CLOCKS 8
+
+#endif /* _DT_BINDINGS_CLOCK_AIROHA_EN7523_H_ */
diff --git a/include/dt-bindings/clock/imx8mn-clock.h b/include/dt-bindings/clock/imx8mn-clock.h
index 01e8bab1d767..07b8a282c268 100644
--- a/include/dt-bindings/clock/imx8mn-clock.h
+++ b/include/dt-bindings/clock/imx8mn-clock.h
@@ -243,6 +243,20 @@
#define IMX8MN_CLK_M7_CORE 221
-#define IMX8MN_CLK_END 222
+#define IMX8MN_CLK_GPT_3M 222
+#define IMX8MN_CLK_GPT1 223
+#define IMX8MN_CLK_GPT1_ROOT 224
+#define IMX8MN_CLK_GPT2 225
+#define IMX8MN_CLK_GPT2_ROOT 226
+#define IMX8MN_CLK_GPT3 227
+#define IMX8MN_CLK_GPT3_ROOT 228
+#define IMX8MN_CLK_GPT4 229
+#define IMX8MN_CLK_GPT4_ROOT 230
+#define IMX8MN_CLK_GPT5 231
+#define IMX8MN_CLK_GPT5_ROOT 232
+#define IMX8MN_CLK_GPT6 233
+#define IMX8MN_CLK_GPT6_ROOT 234
+
+#define IMX8MN_CLK_END 235
#endif
diff --git a/include/dt-bindings/clock/imx8mp-clock.h b/include/dt-bindings/clock/imx8mp-clock.h
index 235c7a00d379..9d5cc2ddde89 100644
--- a/include/dt-bindings/clock/imx8mp-clock.h
+++ b/include/dt-bindings/clock/imx8mp-clock.h
@@ -317,8 +317,15 @@
#define IMX8MP_CLK_AUDIO_AXI 310
#define IMX8MP_CLK_HSIO_AXI 311
#define IMX8MP_CLK_MEDIA_ISP 312
+#define IMX8MP_CLK_MEDIA_DISP2_PIX 313
+#define IMX8MP_CLK_CLKOUT1_SEL 314
+#define IMX8MP_CLK_CLKOUT1_DIV 315
+#define IMX8MP_CLK_CLKOUT1 316
+#define IMX8MP_CLK_CLKOUT2_SEL 317
+#define IMX8MP_CLK_CLKOUT2_DIV 318
+#define IMX8MP_CLK_CLKOUT2 319
-#define IMX8MP_CLK_END 313
+#define IMX8MP_CLK_END 320
#define IMX8MP_CLK_AUDIOMIX_SAI1_IPG 0
#define IMX8MP_CLK_AUDIOMIX_SAI1_MCLK1 1
diff --git a/include/dt-bindings/clock/mt8186-clk.h b/include/dt-bindings/clock/mt8186-clk.h
new file mode 100644
index 000000000000..a70bf67af47d
--- /dev/null
+++ b/include/dt-bindings/clock/mt8186-clk.h
@@ -0,0 +1,445 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_MT8186_H
+#define _DT_BINDINGS_CLK_MT8186_H
+
+/* MCUSYS */
+
+#define CLK_MCU_ARMPLL_LL_SEL 0
+#define CLK_MCU_ARMPLL_BL_SEL 1
+#define CLK_MCU_ARMPLL_BUS_SEL 2
+#define CLK_MCU_NR_CLK 3
+
+/* TOPCKGEN */
+
+#define CLK_TOP_AXI 0
+#define CLK_TOP_SCP 1
+#define CLK_TOP_MFG 2
+#define CLK_TOP_CAMTG 3
+#define CLK_TOP_CAMTG1 4
+#define CLK_TOP_CAMTG2 5
+#define CLK_TOP_CAMTG3 6
+#define CLK_TOP_CAMTG4 7
+#define CLK_TOP_CAMTG5 8
+#define CLK_TOP_CAMTG6 9
+#define CLK_TOP_UART 10
+#define CLK_TOP_SPI 11
+#define CLK_TOP_MSDC50_0_HCLK 12
+#define CLK_TOP_MSDC50_0 13
+#define CLK_TOP_MSDC30_1 14
+#define CLK_TOP_AUDIO 15
+#define CLK_TOP_AUD_INTBUS 16
+#define CLK_TOP_AUD_1 17
+#define CLK_TOP_AUD_2 18
+#define CLK_TOP_AUD_ENGEN1 19
+#define CLK_TOP_AUD_ENGEN2 20
+#define CLK_TOP_DISP_PWM 21
+#define CLK_TOP_SSPM 22
+#define CLK_TOP_DXCC 23
+#define CLK_TOP_USB_TOP 24
+#define CLK_TOP_SRCK 25
+#define CLK_TOP_SPM 26
+#define CLK_TOP_I2C 27
+#define CLK_TOP_PWM 28
+#define CLK_TOP_SENINF 29
+#define CLK_TOP_SENINF1 30
+#define CLK_TOP_SENINF2 31
+#define CLK_TOP_SENINF3 32
+#define CLK_TOP_AES_MSDCFDE 33
+#define CLK_TOP_PWRAP_ULPOSC 34
+#define CLK_TOP_CAMTM 35
+#define CLK_TOP_VENC 36
+#define CLK_TOP_CAM 37
+#define CLK_TOP_IMG1 38
+#define CLK_TOP_IPE 39
+#define CLK_TOP_DPMAIF 40
+#define CLK_TOP_VDEC 41
+#define CLK_TOP_DISP 42
+#define CLK_TOP_MDP 43
+#define CLK_TOP_AUDIO_H 44
+#define CLK_TOP_UFS 45
+#define CLK_TOP_AES_FDE 46
+#define CLK_TOP_AUDIODSP 47
+#define CLK_TOP_DVFSRC 48
+#define CLK_TOP_DSI_OCC 49
+#define CLK_TOP_SPMI_MST 50
+#define CLK_TOP_SPINOR 51
+#define CLK_TOP_NNA 52
+#define CLK_TOP_NNA1 53
+#define CLK_TOP_NNA2 54
+#define CLK_TOP_SSUSB_XHCI 55
+#define CLK_TOP_SSUSB_TOP_1P 56
+#define CLK_TOP_SSUSB_XHCI_1P 57
+#define CLK_TOP_WPE 58
+#define CLK_TOP_DPI 59
+#define CLK_TOP_U3_OCC_250M 60
+#define CLK_TOP_U3_OCC_500M 61
+#define CLK_TOP_ADSP_BUS 62
+#define CLK_TOP_APLL_I2S0_MCK_SEL 63
+#define CLK_TOP_APLL_I2S1_MCK_SEL 64
+#define CLK_TOP_APLL_I2S2_MCK_SEL 65
+#define CLK_TOP_APLL_I2S4_MCK_SEL 66
+#define CLK_TOP_APLL_TDMOUT_MCK_SEL 67
+#define CLK_TOP_MAINPLL_D2 68
+#define CLK_TOP_MAINPLL_D2_D2 69
+#define CLK_TOP_MAINPLL_D2_D4 70
+#define CLK_TOP_MAINPLL_D2_D16 71
+#define CLK_TOP_MAINPLL_D3 72
+#define CLK_TOP_MAINPLL_D3_D2 73
+#define CLK_TOP_MAINPLL_D3_D4 74
+#define CLK_TOP_MAINPLL_D5 75
+#define CLK_TOP_MAINPLL_D5_D2 76
+#define CLK_TOP_MAINPLL_D5_D4 77
+#define CLK_TOP_MAINPLL_D7 78
+#define CLK_TOP_MAINPLL_D7_D2 79
+#define CLK_TOP_MAINPLL_D7_D4 80
+#define CLK_TOP_UNIVPLL 81
+#define CLK_TOP_UNIVPLL_D2 82
+#define CLK_TOP_UNIVPLL_D2_D2 83
+#define CLK_TOP_UNIVPLL_D2_D4 84
+#define CLK_TOP_UNIVPLL_D3 85
+#define CLK_TOP_UNIVPLL_D3_D2 86
+#define CLK_TOP_UNIVPLL_D3_D4 87
+#define CLK_TOP_UNIVPLL_D3_D8 88
+#define CLK_TOP_UNIVPLL_D3_D32 89
+#define CLK_TOP_UNIVPLL_D5 90
+#define CLK_TOP_UNIVPLL_D5_D2 91
+#define CLK_TOP_UNIVPLL_D5_D4 92
+#define CLK_TOP_UNIVPLL_D7 93
+#define CLK_TOP_UNIVPLL_192M 94
+#define CLK_TOP_UNIVPLL_192M_D4 95
+#define CLK_TOP_UNIVPLL_192M_D8 96
+#define CLK_TOP_UNIVPLL_192M_D16 97
+#define CLK_TOP_UNIVPLL_192M_D32 98
+#define CLK_TOP_APLL1_D2 99
+#define CLK_TOP_APLL1_D4 100
+#define CLK_TOP_APLL1_D8 101
+#define CLK_TOP_APLL2_D2 102
+#define CLK_TOP_APLL2_D4 103
+#define CLK_TOP_APLL2_D8 104
+#define CLK_TOP_MMPLL_D2 105
+#define CLK_TOP_TVDPLL_D2 106
+#define CLK_TOP_TVDPLL_D4 107
+#define CLK_TOP_TVDPLL_D8 108
+#define CLK_TOP_TVDPLL_D16 109
+#define CLK_TOP_TVDPLL_D32 110
+#define CLK_TOP_MSDCPLL_D2 111
+#define CLK_TOP_ULPOSC1 112
+#define CLK_TOP_ULPOSC1_D2 113
+#define CLK_TOP_ULPOSC1_D4 114
+#define CLK_TOP_ULPOSC1_D8 115
+#define CLK_TOP_ULPOSC1_D10 116
+#define CLK_TOP_ULPOSC1_D16 117
+#define CLK_TOP_ULPOSC1_D32 118
+#define CLK_TOP_ADSPPLL_D2 119
+#define CLK_TOP_ADSPPLL_D4 120
+#define CLK_TOP_ADSPPLL_D8 121
+#define CLK_TOP_NNAPLL_D2 122
+#define CLK_TOP_NNAPLL_D4 123
+#define CLK_TOP_NNAPLL_D8 124
+#define CLK_TOP_NNA2PLL_D2 125
+#define CLK_TOP_NNA2PLL_D4 126
+#define CLK_TOP_NNA2PLL_D8 127
+#define CLK_TOP_F_BIST2FPC 128
+#define CLK_TOP_466M_FMEM 129
+#define CLK_TOP_MPLL 130
+#define CLK_TOP_APLL12_CK_DIV0 131
+#define CLK_TOP_APLL12_CK_DIV1 132
+#define CLK_TOP_APLL12_CK_DIV2 133
+#define CLK_TOP_APLL12_CK_DIV4 134
+#define CLK_TOP_APLL12_CK_DIV_TDMOUT_M 135
+#define CLK_TOP_NR_CLK 136
+
+/* INFRACFG_AO */
+
+#define CLK_INFRA_AO_PMIC_TMR 0
+#define CLK_INFRA_AO_PMIC_AP 1
+#define CLK_INFRA_AO_PMIC_MD 2
+#define CLK_INFRA_AO_PMIC_CONN 3
+#define CLK_INFRA_AO_SCP_CORE 4
+#define CLK_INFRA_AO_SEJ 5
+#define CLK_INFRA_AO_APXGPT 6
+#define CLK_INFRA_AO_ICUSB 7
+#define CLK_INFRA_AO_GCE 8
+#define CLK_INFRA_AO_THERM 9
+#define CLK_INFRA_AO_I2C_AP 10
+#define CLK_INFRA_AO_I2C_CCU 11
+#define CLK_INFRA_AO_I2C_SSPM 12
+#define CLK_INFRA_AO_I2C_RSV 13
+#define CLK_INFRA_AO_PWM_HCLK 14
+#define CLK_INFRA_AO_PWM1 15
+#define CLK_INFRA_AO_PWM2 16
+#define CLK_INFRA_AO_PWM3 17
+#define CLK_INFRA_AO_PWM4 18
+#define CLK_INFRA_AO_PWM5 19
+#define CLK_INFRA_AO_PWM 20
+#define CLK_INFRA_AO_UART0 21
+#define CLK_INFRA_AO_UART1 22
+#define CLK_INFRA_AO_UART2 23
+#define CLK_INFRA_AO_GCE_26M 24
+#define CLK_INFRA_AO_CQ_DMA_FPC 25
+#define CLK_INFRA_AO_BTIF 26
+#define CLK_INFRA_AO_SPI0 27
+#define CLK_INFRA_AO_MSDC0 28
+#define CLK_INFRA_AO_MSDCFDE 29
+#define CLK_INFRA_AO_MSDC1 30
+#define CLK_INFRA_AO_DVFSRC 31
+#define CLK_INFRA_AO_GCPU 32
+#define CLK_INFRA_AO_TRNG 33
+#define CLK_INFRA_AO_AUXADC 34
+#define CLK_INFRA_AO_CPUM 35
+#define CLK_INFRA_AO_CCIF1_AP 36
+#define CLK_INFRA_AO_CCIF1_MD 37
+#define CLK_INFRA_AO_AUXADC_MD 38
+#define CLK_INFRA_AO_AP_DMA 39
+#define CLK_INFRA_AO_XIU 40
+#define CLK_INFRA_AO_DEVICE_APC 41
+#define CLK_INFRA_AO_CCIF_AP 42
+#define CLK_INFRA_AO_DEBUGTOP 43
+#define CLK_INFRA_AO_AUDIO 44
+#define CLK_INFRA_AO_CCIF_MD 45
+#define CLK_INFRA_AO_DXCC_SEC_CORE 46
+#define CLK_INFRA_AO_DXCC_AO 47
+#define CLK_INFRA_AO_IMP_IIC 48
+#define CLK_INFRA_AO_DRAMC_F26M 49
+#define CLK_INFRA_AO_RG_PWM_FBCLK6 50
+#define CLK_INFRA_AO_SSUSB_TOP_HCLK 51
+#define CLK_INFRA_AO_DISP_PWM 52
+#define CLK_INFRA_AO_CLDMA_BCLK 53
+#define CLK_INFRA_AO_AUDIO_26M_BCLK 54
+#define CLK_INFRA_AO_SSUSB_TOP_P1_HCLK 55
+#define CLK_INFRA_AO_SPI1 56
+#define CLK_INFRA_AO_I2C4 57
+#define CLK_INFRA_AO_MODEM_TEMP_SHARE 58
+#define CLK_INFRA_AO_SPI2 59
+#define CLK_INFRA_AO_SPI3 60
+#define CLK_INFRA_AO_SSUSB_TOP_REF 61
+#define CLK_INFRA_AO_SSUSB_TOP_XHCI 62
+#define CLK_INFRA_AO_SSUSB_TOP_P1_REF 63
+#define CLK_INFRA_AO_SSUSB_TOP_P1_XHCI 64
+#define CLK_INFRA_AO_SSPM 65
+#define CLK_INFRA_AO_SSUSB_TOP_P1_SYS 66
+#define CLK_INFRA_AO_I2C5 67
+#define CLK_INFRA_AO_I2C5_ARBITER 68
+#define CLK_INFRA_AO_I2C5_IMM 69
+#define CLK_INFRA_AO_I2C1_ARBITER 70
+#define CLK_INFRA_AO_I2C1_IMM 71
+#define CLK_INFRA_AO_I2C2_ARBITER 72
+#define CLK_INFRA_AO_I2C2_IMM 73
+#define CLK_INFRA_AO_SPI4 74
+#define CLK_INFRA_AO_SPI5 75
+#define CLK_INFRA_AO_CQ_DMA 76
+#define CLK_INFRA_AO_BIST2FPC 77
+#define CLK_INFRA_AO_MSDC0_SELF 78
+#define CLK_INFRA_AO_SPINOR 79
+#define CLK_INFRA_AO_SSPM_26M_SELF 80
+#define CLK_INFRA_AO_SSPM_32K_SELF 81
+#define CLK_INFRA_AO_I2C6 82
+#define CLK_INFRA_AO_AP_MSDC0 83
+#define CLK_INFRA_AO_MD_MSDC0 84
+#define CLK_INFRA_AO_MSDC0_SRC 85
+#define CLK_INFRA_AO_MSDC1_SRC 86
+#define CLK_INFRA_AO_SEJ_F13M 87
+#define CLK_INFRA_AO_AES_TOP0_BCLK 88
+#define CLK_INFRA_AO_MCU_PM_BCLK 89
+#define CLK_INFRA_AO_CCIF2_AP 90
+#define CLK_INFRA_AO_CCIF2_MD 91
+#define CLK_INFRA_AO_CCIF3_AP 92
+#define CLK_INFRA_AO_CCIF3_MD 93
+#define CLK_INFRA_AO_FADSP_26M 94
+#define CLK_INFRA_AO_FADSP_32K 95
+#define CLK_INFRA_AO_CCIF4_AP 96
+#define CLK_INFRA_AO_CCIF4_MD 97
+#define CLK_INFRA_AO_FADSP 98
+#define CLK_INFRA_AO_FLASHIF_133M 99
+#define CLK_INFRA_AO_FLASHIF_66M 100
+#define CLK_INFRA_AO_NR_CLK 101
+
+/* APMIXEDSYS */
+
+#define CLK_APMIXED_ARMPLL_LL 0
+#define CLK_APMIXED_ARMPLL_BL 1
+#define CLK_APMIXED_CCIPLL 2
+#define CLK_APMIXED_MAINPLL 3
+#define CLK_APMIXED_UNIV2PLL 4
+#define CLK_APMIXED_MSDCPLL 5
+#define CLK_APMIXED_MMPLL 6
+#define CLK_APMIXED_NNAPLL 7
+#define CLK_APMIXED_NNA2PLL 8
+#define CLK_APMIXED_ADSPPLL 9
+#define CLK_APMIXED_MFGPLL 10
+#define CLK_APMIXED_TVDPLL 11
+#define CLK_APMIXED_APLL1 12
+#define CLK_APMIXED_APLL2 13
+#define CLK_APMIXED_NR_CLK 14
+
+/* IMP_IIC_WRAP */
+
+#define CLK_IMP_IIC_WRAP_AP_CLOCK_I2C0 0
+#define CLK_IMP_IIC_WRAP_AP_CLOCK_I2C1 1
+#define CLK_IMP_IIC_WRAP_AP_CLOCK_I2C2 2
+#define CLK_IMP_IIC_WRAP_AP_CLOCK_I2C3 3
+#define CLK_IMP_IIC_WRAP_AP_CLOCK_I2C4 4
+#define CLK_IMP_IIC_WRAP_AP_CLOCK_I2C5 5
+#define CLK_IMP_IIC_WRAP_AP_CLOCK_I2C6 6
+#define CLK_IMP_IIC_WRAP_AP_CLOCK_I2C7 7
+#define CLK_IMP_IIC_WRAP_AP_CLOCK_I2C8 8
+#define CLK_IMP_IIC_WRAP_AP_CLOCK_I2C9 9
+#define CLK_IMP_IIC_WRAP_NR_CLK 10
+
+/* MFGCFG */
+
+#define CLK_MFG_BG3D 0
+#define CLK_MFG_NR_CLK 1
+
+/* MMSYS */
+
+#define CLK_MM_DISP_MUTEX0 0
+#define CLK_MM_APB_MM_BUS 1
+#define CLK_MM_DISP_OVL0 2
+#define CLK_MM_DISP_RDMA0 3
+#define CLK_MM_DISP_OVL0_2L 4
+#define CLK_MM_DISP_WDMA0 5
+#define CLK_MM_DISP_RSZ0 6
+#define CLK_MM_DISP_AAL0 7
+#define CLK_MM_DISP_CCORR0 8
+#define CLK_MM_DISP_COLOR0 9
+#define CLK_MM_SMI_INFRA 10
+#define CLK_MM_DISP_DSC_WRAP0 11
+#define CLK_MM_DISP_GAMMA0 12
+#define CLK_MM_DISP_POSTMASK0 13
+#define CLK_MM_DISP_DITHER0 14
+#define CLK_MM_SMI_COMMON 15
+#define CLK_MM_DSI0 16
+#define CLK_MM_DISP_FAKE_ENG0 17
+#define CLK_MM_DISP_FAKE_ENG1 18
+#define CLK_MM_SMI_GALS 19
+#define CLK_MM_SMI_IOMMU 20
+#define CLK_MM_DISP_RDMA1 21
+#define CLK_MM_DISP_DPI 22
+#define CLK_MM_DSI0_DSI_CK_DOMAIN 23
+#define CLK_MM_DISP_26M 24
+#define CLK_MM_NR_CLK 25
+
+/* WPESYS */
+
+#define CLK_WPE_CK_EN 0
+#define CLK_WPE_SMI_LARB8_CK_EN 1
+#define CLK_WPE_SYS_EVENT_TX_CK_EN 2
+#define CLK_WPE_SMI_LARB8_PCLK_EN 3
+#define CLK_WPE_NR_CLK 4
+
+/* IMGSYS1 */
+
+#define CLK_IMG1_LARB9_IMG1 0
+#define CLK_IMG1_LARB10_IMG1 1
+#define CLK_IMG1_DIP 2
+#define CLK_IMG1_GALS_IMG1 3
+#define CLK_IMG1_NR_CLK 4
+
+/* IMGSYS2 */
+
+#define CLK_IMG2_LARB9_IMG2 0
+#define CLK_IMG2_LARB10_IMG2 1
+#define CLK_IMG2_MFB 2
+#define CLK_IMG2_WPE 3
+#define CLK_IMG2_MSS 4
+#define CLK_IMG2_GALS_IMG2 5
+#define CLK_IMG2_NR_CLK 6
+
+/* VDECSYS */
+
+#define CLK_VDEC_LARB1_CKEN 0
+#define CLK_VDEC_LAT_CKEN 1
+#define CLK_VDEC_LAT_ACTIVE 2
+#define CLK_VDEC_LAT_CKEN_ENG 3
+#define CLK_VDEC_MINI_MDP_CKEN_CFG_RG 4
+#define CLK_VDEC_CKEN 5
+#define CLK_VDEC_ACTIVE 6
+#define CLK_VDEC_CKEN_ENG 7
+#define CLK_VDEC_NR_CLK 8
+
+/* VENCSYS */
+
+#define CLK_VENC_CKE0_LARB 0
+#define CLK_VENC_CKE1_VENC 1
+#define CLK_VENC_CKE2_JPGENC 2
+#define CLK_VENC_CKE5_GALS 3
+#define CLK_VENC_NR_CLK 4
+
+/* CAMSYS */
+
+#define CLK_CAM_LARB13 0
+#define CLK_CAM_DFP_VAD 1
+#define CLK_CAM_LARB14 2
+#define CLK_CAM 3
+#define CLK_CAMTG 4
+#define CLK_CAM_SENINF 5
+#define CLK_CAMSV1 6
+#define CLK_CAMSV2 7
+#define CLK_CAMSV3 8
+#define CLK_CAM_CCU0 9
+#define CLK_CAM_CCU1 10
+#define CLK_CAM_MRAW0 11
+#define CLK_CAM_FAKE_ENG 12
+#define CLK_CAM_CCU_GALS 13
+#define CLK_CAM2MM_GALS 14
+#define CLK_CAM_NR_CLK 15
+
+/* CAMSYS_RAWA */
+
+#define CLK_CAM_RAWA_LARBX_RAWA 0
+#define CLK_CAM_RAWA 1
+#define CLK_CAM_RAWA_CAMTG_RAWA 2
+#define CLK_CAM_RAWA_NR_CLK 3
+
+/* CAMSYS_RAWB */
+
+#define CLK_CAM_RAWB_LARBX_RAWB 0
+#define CLK_CAM_RAWB 1
+#define CLK_CAM_RAWB_CAMTG_RAWB 2
+#define CLK_CAM_RAWB_NR_CLK 3
+
+/* MDPSYS */
+
+#define CLK_MDP_RDMA0 0
+#define CLK_MDP_TDSHP0 1
+#define CLK_MDP_IMG_DL_ASYNC0 2
+#define CLK_MDP_IMG_DL_ASYNC1 3
+#define CLK_MDP_DISP_RDMA 4
+#define CLK_MDP_HMS 5
+#define CLK_MDP_SMI0 6
+#define CLK_MDP_APB_BUS 7
+#define CLK_MDP_WROT0 8
+#define CLK_MDP_RSZ0 9
+#define CLK_MDP_HDR0 10
+#define CLK_MDP_MUTEX0 11
+#define CLK_MDP_WROT1 12
+#define CLK_MDP_RSZ1 13
+#define CLK_MDP_FAKE_ENG0 14
+#define CLK_MDP_AAL0 15
+#define CLK_MDP_DISP_WDMA 16
+#define CLK_MDP_COLOR 17
+#define CLK_MDP_IMG_DL_ASYNC2 18
+#define CLK_MDP_IMG_DL_RELAY0_ASYNC0 19
+#define CLK_MDP_IMG_DL_RELAY1_ASYNC1 20
+#define CLK_MDP_IMG_DL_RELAY2_ASYNC2 21
+#define CLK_MDP_NR_CLK 22
+
+/* IPESYS */
+
+#define CLK_IPE_LARB19 0
+#define CLK_IPE_LARB20 1
+#define CLK_IPE_SMI_SUBCOM 2
+#define CLK_IPE_FD 3
+#define CLK_IPE_FE 4
+#define CLK_IPE_RSC 5
+#define CLK_IPE_DPE 6
+#define CLK_IPE_GALS_IPE 7
+#define CLK_IPE_NR_CLK 8
+
+#endif /* _DT_BINDINGS_CLK_MT8186_H */
diff --git a/include/dt-bindings/clock/qcom,gcc-msm8976.h b/include/dt-bindings/clock/qcom,gcc-msm8976.h
index 51955fd49426..5351f48b2068 100644
--- a/include/dt-bindings/clock/qcom,gcc-msm8976.h
+++ b/include/dt-bindings/clock/qcom,gcc-msm8976.h
@@ -224,6 +224,7 @@
#define RST_CAMSS_CSI_VFE1_BCR 7
#define RST_CAMSS_VFE1_BCR 8
#define RST_CAMSS_CPP_BCR 9
+#define RST_MSS_BCR 10
/* GDSCs */
#define VENUS_GDSC 0
diff --git a/include/dt-bindings/clock/qcom,gcc-sc8280xp.h b/include/dt-bindings/clock/qcom,gcc-sc8280xp.h
new file mode 100644
index 000000000000..cb2fb638825c
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-sc8280xp.h
@@ -0,0 +1,496 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Linaro Ltd.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_DIREWOLF_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_DIREWOLF_H
+
+/* GCC clocks */
+#define GCC_GPLL0 0
+#define GCC_GPLL0_OUT_EVEN 1
+#define GCC_GPLL2 2
+#define GCC_GPLL4 3
+#define GCC_GPLL7 4
+#define GCC_GPLL8 5
+#define GCC_GPLL9 6
+#define GCC_AGGRE_NOC_PCIE0_TUNNEL_AXI_CLK 7
+#define GCC_AGGRE_NOC_PCIE1_TUNNEL_AXI_CLK 8
+#define GCC_AGGRE_NOC_PCIE_4_AXI_CLK 9
+#define GCC_AGGRE_NOC_PCIE_SOUTH_SF_AXI_CLK 10
+#define GCC_AGGRE_UFS_CARD_AXI_CLK 11
+#define GCC_AGGRE_UFS_PHY_AXI_CLK 12
+#define GCC_AGGRE_USB3_MP_AXI_CLK 13
+#define GCC_AGGRE_USB3_PRIM_AXI_CLK 14
+#define GCC_AGGRE_USB3_SEC_AXI_CLK 15
+#define GCC_AGGRE_USB4_1_AXI_CLK 16
+#define GCC_AGGRE_USB4_AXI_CLK 17
+#define GCC_AGGRE_USB_NOC_AXI_CLK 18
+#define GCC_AGGRE_USB_NOC_NORTH_AXI_CLK 19
+#define GCC_AGGRE_USB_NOC_SOUTH_AXI_CLK 20
+#define GCC_AHB2PHY0_CLK 21
+#define GCC_AHB2PHY2_CLK 22
+#define GCC_BOOT_ROM_AHB_CLK 23
+#define GCC_CAMERA_AHB_CLK 24
+#define GCC_CAMERA_HF_AXI_CLK 25
+#define GCC_CAMERA_SF_AXI_CLK 26
+#define GCC_CAMERA_THROTTLE_NRT_AXI_CLK 27
+#define GCC_CAMERA_THROTTLE_RT_AXI_CLK 28
+#define GCC_CAMERA_THROTTLE_XO_CLK 29
+#define GCC_CAMERA_XO_CLK 30
+#define GCC_CFG_NOC_USB3_MP_AXI_CLK 31
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 32
+#define GCC_CFG_NOC_USB3_SEC_AXI_CLK 33
+#define GCC_CNOC_PCIE0_TUNNEL_CLK 34
+#define GCC_CNOC_PCIE1_TUNNEL_CLK 35
+#define GCC_CNOC_PCIE4_QX_CLK 36
+#define GCC_DDRSS_GPU_AXI_CLK 37
+#define GCC_DDRSS_PCIE_SF_TBU_CLK 38
+#define GCC_DISP1_AHB_CLK 39
+#define GCC_DISP1_HF_AXI_CLK 40
+#define GCC_DISP1_SF_AXI_CLK 41
+#define GCC_DISP1_THROTTLE_NRT_AXI_CLK 42
+#define GCC_DISP1_THROTTLE_RT_AXI_CLK 43
+#define GCC_DISP1_XO_CLK 44
+#define GCC_DISP_AHB_CLK 45
+#define GCC_DISP_HF_AXI_CLK 46
+#define GCC_DISP_SF_AXI_CLK 47
+#define GCC_DISP_THROTTLE_NRT_AXI_CLK 48
+#define GCC_DISP_THROTTLE_RT_AXI_CLK 49
+#define GCC_DISP_XO_CLK 50
+#define GCC_EMAC0_AXI_CLK 51
+#define GCC_EMAC0_PTP_CLK 52
+#define GCC_EMAC0_PTP_CLK_SRC 53
+#define GCC_EMAC0_RGMII_CLK 54
+#define GCC_EMAC0_RGMII_CLK_SRC 55
+#define GCC_EMAC0_SLV_AHB_CLK 56
+#define GCC_EMAC1_AXI_CLK 57
+#define GCC_EMAC1_PTP_CLK 58
+#define GCC_EMAC1_PTP_CLK_SRC 59
+#define GCC_EMAC1_RGMII_CLK 60
+#define GCC_EMAC1_RGMII_CLK_SRC 61
+#define GCC_EMAC1_SLV_AHB_CLK 62
+#define GCC_GP1_CLK 63
+#define GCC_GP1_CLK_SRC 64
+#define GCC_GP2_CLK 65
+#define GCC_GP2_CLK_SRC 66
+#define GCC_GP3_CLK 67
+#define GCC_GP3_CLK_SRC 68
+#define GCC_GP4_CLK 69
+#define GCC_GP4_CLK_SRC 70
+#define GCC_GP5_CLK 71
+#define GCC_GP5_CLK_SRC 72
+#define GCC_GPU_CFG_AHB_CLK 73
+#define GCC_GPU_GPLL0_CLK_SRC 74
+#define GCC_GPU_GPLL0_DIV_CLK_SRC 75
+#define GCC_GPU_IREF_EN 76
+#define GCC_GPU_MEMNOC_GFX_CLK 77
+#define GCC_GPU_SNOC_DVM_GFX_CLK 78
+#define GCC_GPU_TCU_THROTTLE_AHB_CLK 79
+#define GCC_GPU_TCU_THROTTLE_CLK 80
+#define GCC_PCIE0_PHY_RCHNG_CLK 81
+#define GCC_PCIE1_PHY_RCHNG_CLK 82
+#define GCC_PCIE2A_PHY_RCHNG_CLK 83
+#define GCC_PCIE2B_PHY_RCHNG_CLK 84
+#define GCC_PCIE3A_PHY_RCHNG_CLK 85
+#define GCC_PCIE3B_PHY_RCHNG_CLK 86
+#define GCC_PCIE4_PHY_RCHNG_CLK 87
+#define GCC_PCIE_0_AUX_CLK 88
+#define GCC_PCIE_0_AUX_CLK_SRC 89
+#define GCC_PCIE_0_CFG_AHB_CLK 90
+#define GCC_PCIE_0_MSTR_AXI_CLK 91
+#define GCC_PCIE_0_PHY_RCHNG_CLK_SRC 92
+#define GCC_PCIE_0_PIPE_CLK 93
+#define GCC_PCIE_0_SLV_AXI_CLK 94
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 95
+#define GCC_PCIE_1_AUX_CLK 96
+#define GCC_PCIE_1_AUX_CLK_SRC 97
+#define GCC_PCIE_1_CFG_AHB_CLK 98
+#define GCC_PCIE_1_MSTR_AXI_CLK 99
+#define GCC_PCIE_1_PHY_RCHNG_CLK_SRC 100
+#define GCC_PCIE_1_PIPE_CLK 101
+#define GCC_PCIE_1_SLV_AXI_CLK 102
+#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 103
+#define GCC_PCIE_2A2B_CLKREF_CLK 104
+#define GCC_PCIE_2A_AUX_CLK 105
+#define GCC_PCIE_2A_AUX_CLK_SRC 106
+#define GCC_PCIE_2A_CFG_AHB_CLK 107
+#define GCC_PCIE_2A_MSTR_AXI_CLK 108
+#define GCC_PCIE_2A_PHY_RCHNG_CLK_SRC 109
+#define GCC_PCIE_2A_PIPE_CLK 110
+#define GCC_PCIE_2A_PIPE_CLK_SRC 111
+#define GCC_PCIE_2A_PIPE_DIV_CLK_SRC 112
+#define GCC_PCIE_2A_PIPEDIV2_CLK 113
+#define GCC_PCIE_2A_SLV_AXI_CLK 114
+#define GCC_PCIE_2A_SLV_Q2A_AXI_CLK 115
+#define GCC_PCIE_2B_AUX_CLK 116
+#define GCC_PCIE_2B_AUX_CLK_SRC 117
+#define GCC_PCIE_2B_CFG_AHB_CLK 118
+#define GCC_PCIE_2B_MSTR_AXI_CLK 119
+#define GCC_PCIE_2B_PHY_RCHNG_CLK_SRC 120
+#define GCC_PCIE_2B_PIPE_CLK 121
+#define GCC_PCIE_2B_PIPE_CLK_SRC 122
+#define GCC_PCIE_2B_PIPE_DIV_CLK_SRC 123
+#define GCC_PCIE_2B_PIPEDIV2_CLK 124
+#define GCC_PCIE_2B_SLV_AXI_CLK 125
+#define GCC_PCIE_2B_SLV_Q2A_AXI_CLK 126
+#define GCC_PCIE_3A3B_CLKREF_CLK 127
+#define GCC_PCIE_3A_AUX_CLK 128
+#define GCC_PCIE_3A_AUX_CLK_SRC 129
+#define GCC_PCIE_3A_CFG_AHB_CLK 130
+#define GCC_PCIE_3A_MSTR_AXI_CLK 131
+#define GCC_PCIE_3A_PHY_RCHNG_CLK_SRC 132
+#define GCC_PCIE_3A_PIPE_CLK 133
+#define GCC_PCIE_3A_PIPE_CLK_SRC 134
+#define GCC_PCIE_3A_PIPE_DIV_CLK_SRC 135
+#define GCC_PCIE_3A_PIPEDIV2_CLK 136
+#define GCC_PCIE_3A_SLV_AXI_CLK 137
+#define GCC_PCIE_3A_SLV_Q2A_AXI_CLK 138
+#define GCC_PCIE_3B_AUX_CLK 139
+#define GCC_PCIE_3B_AUX_CLK_SRC 140
+#define GCC_PCIE_3B_CFG_AHB_CLK 141
+#define GCC_PCIE_3B_MSTR_AXI_CLK 142
+#define GCC_PCIE_3B_PHY_RCHNG_CLK_SRC 143
+#define GCC_PCIE_3B_PIPE_CLK 144
+#define GCC_PCIE_3B_PIPE_CLK_SRC 145
+#define GCC_PCIE_3B_PIPE_DIV_CLK_SRC 146
+#define GCC_PCIE_3B_PIPEDIV2_CLK 147
+#define GCC_PCIE_3B_SLV_AXI_CLK 148
+#define GCC_PCIE_3B_SLV_Q2A_AXI_CLK 149
+#define GCC_PCIE_4_AUX_CLK 150
+#define GCC_PCIE_4_AUX_CLK_SRC 151
+#define GCC_PCIE_4_CFG_AHB_CLK 152
+#define GCC_PCIE_4_CLKREF_CLK 153
+#define GCC_PCIE_4_MSTR_AXI_CLK 154
+#define GCC_PCIE_4_PHY_RCHNG_CLK_SRC 155
+#define GCC_PCIE_4_PIPE_CLK 156
+#define GCC_PCIE_4_PIPE_CLK_SRC 157
+#define GCC_PCIE_4_PIPE_DIV_CLK_SRC 158
+#define GCC_PCIE_4_PIPEDIV2_CLK 159
+#define GCC_PCIE_4_SLV_AXI_CLK 160
+#define GCC_PCIE_4_SLV_Q2A_AXI_CLK 161
+#define GCC_PCIE_RSCC_AHB_CLK 162
+#define GCC_PCIE_RSCC_XO_CLK 163
+#define GCC_PCIE_RSCC_XO_CLK_SRC 164
+#define GCC_PCIE_THROTTLE_CFG_CLK 165
+#define GCC_PDM2_CLK 166
+#define GCC_PDM2_CLK_SRC 167
+#define GCC_PDM_AHB_CLK 168
+#define GCC_PDM_XO4_CLK 169
+#define GCC_QMIP_CAMERA_NRT_AHB_CLK 170
+#define GCC_QMIP_CAMERA_RT_AHB_CLK 171
+#define GCC_QMIP_DISP1_AHB_CLK 172
+#define GCC_QMIP_DISP1_ROT_AHB_CLK 173
+#define GCC_QMIP_DISP_AHB_CLK 174
+#define GCC_QMIP_DISP_ROT_AHB_CLK 175
+#define GCC_QMIP_VIDEO_CVP_AHB_CLK 176
+#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 177
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK 178
+#define GCC_QUPV3_WRAP0_CORE_CLK 179
+#define GCC_QUPV3_WRAP0_QSPI0_CLK 180
+#define GCC_QUPV3_WRAP0_S0_CLK 181
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 182
+#define GCC_QUPV3_WRAP0_S1_CLK 183
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 184
+#define GCC_QUPV3_WRAP0_S2_CLK 185
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 186
+#define GCC_QUPV3_WRAP0_S3_CLK 187
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 188
+#define GCC_QUPV3_WRAP0_S4_CLK 189
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 190
+#define GCC_QUPV3_WRAP0_S4_DIV_CLK_SRC 191
+#define GCC_QUPV3_WRAP0_S5_CLK 192
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 193
+#define GCC_QUPV3_WRAP0_S6_CLK 194
+#define GCC_QUPV3_WRAP0_S6_CLK_SRC 195
+#define GCC_QUPV3_WRAP0_S7_CLK 196
+#define GCC_QUPV3_WRAP0_S7_CLK_SRC 197
+#define GCC_QUPV3_WRAP1_CORE_2X_CLK 198
+#define GCC_QUPV3_WRAP1_CORE_CLK 199
+#define GCC_QUPV3_WRAP1_QSPI0_CLK 200
+#define GCC_QUPV3_WRAP1_S0_CLK 201
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 202
+#define GCC_QUPV3_WRAP1_S1_CLK 203
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 204
+#define GCC_QUPV3_WRAP1_S2_CLK 205
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 206
+#define GCC_QUPV3_WRAP1_S3_CLK 207
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 208
+#define GCC_QUPV3_WRAP1_S4_CLK 209
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 210
+#define GCC_QUPV3_WRAP1_S4_DIV_CLK_SRC 211
+#define GCC_QUPV3_WRAP1_S5_CLK 212
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 213
+#define GCC_QUPV3_WRAP1_S6_CLK 214
+#define GCC_QUPV3_WRAP1_S6_CLK_SRC 215
+#define GCC_QUPV3_WRAP1_S7_CLK 216
+#define GCC_QUPV3_WRAP1_S7_CLK_SRC 217
+#define GCC_QUPV3_WRAP2_CORE_2X_CLK 218
+#define GCC_QUPV3_WRAP2_CORE_CLK 219
+#define GCC_QUPV3_WRAP2_QSPI0_CLK 220
+#define GCC_QUPV3_WRAP2_S0_CLK 221
+#define GCC_QUPV3_WRAP2_S0_CLK_SRC 222
+#define GCC_QUPV3_WRAP2_S1_CLK 223
+#define GCC_QUPV3_WRAP2_S1_CLK_SRC 224
+#define GCC_QUPV3_WRAP2_S2_CLK 225
+#define GCC_QUPV3_WRAP2_S2_CLK_SRC 226
+#define GCC_QUPV3_WRAP2_S3_CLK 227
+#define GCC_QUPV3_WRAP2_S3_CLK_SRC 228
+#define GCC_QUPV3_WRAP2_S4_CLK 229
+#define GCC_QUPV3_WRAP2_S4_CLK_SRC 230
+#define GCC_QUPV3_WRAP2_S4_DIV_CLK_SRC 231
+#define GCC_QUPV3_WRAP2_S5_CLK 232
+#define GCC_QUPV3_WRAP2_S5_CLK_SRC 233
+#define GCC_QUPV3_WRAP2_S6_CLK 234
+#define GCC_QUPV3_WRAP2_S6_CLK_SRC 235
+#define GCC_QUPV3_WRAP2_S7_CLK 236
+#define GCC_QUPV3_WRAP2_S7_CLK_SRC 237
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 238
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 239
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 240
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 241
+#define GCC_QUPV3_WRAP_2_M_AHB_CLK 242
+#define GCC_QUPV3_WRAP_2_S_AHB_CLK 243
+#define GCC_SDCC2_AHB_CLK 244
+#define GCC_SDCC2_APPS_CLK 245
+#define GCC_SDCC2_APPS_CLK_SRC 246
+#define GCC_SDCC4_AHB_CLK 247
+#define GCC_SDCC4_APPS_CLK 248
+#define GCC_SDCC4_APPS_CLK_SRC 249
+#define GCC_SYS_NOC_USB_AXI_CLK 250
+#define GCC_UFS_1_CARD_CLKREF_CLK 251
+#define GCC_UFS_CARD_AHB_CLK 252
+#define GCC_UFS_CARD_AXI_CLK 253
+#define GCC_UFS_CARD_AXI_CLK_SRC 254
+#define GCC_UFS_CARD_CLKREF_CLK 255
+#define GCC_UFS_CARD_ICE_CORE_CLK 256
+#define GCC_UFS_CARD_ICE_CORE_CLK_SRC 257
+#define GCC_UFS_CARD_PHY_AUX_CLK 258
+#define GCC_UFS_CARD_PHY_AUX_CLK_SRC 259
+#define GCC_UFS_CARD_RX_SYMBOL_0_CLK 260
+#define GCC_UFS_CARD_RX_SYMBOL_0_CLK_SRC 261
+#define GCC_UFS_CARD_RX_SYMBOL_1_CLK 262
+#define GCC_UFS_CARD_RX_SYMBOL_1_CLK_SRC 263
+#define GCC_UFS_CARD_TX_SYMBOL_0_CLK 264
+#define GCC_UFS_CARD_TX_SYMBOL_0_CLK_SRC 265
+#define GCC_UFS_CARD_UNIPRO_CORE_CLK 266
+#define GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC 267
+#define GCC_UFS_PHY_AHB_CLK 268
+#define GCC_UFS_PHY_AXI_CLK 269
+#define GCC_UFS_PHY_AXI_CLK_SRC 270
+#define GCC_UFS_PHY_ICE_CORE_CLK 271
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 272
+#define GCC_UFS_PHY_PHY_AUX_CLK 273
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 274
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 275
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK_SRC 276
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 277
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK_SRC 278
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 279
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK_SRC 280
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 281
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 282
+#define GCC_UFS_REF_CLKREF_CLK 283
+#define GCC_USB2_HS0_CLKREF_CLK 284
+#define GCC_USB2_HS1_CLKREF_CLK 285
+#define GCC_USB2_HS2_CLKREF_CLK 286
+#define GCC_USB2_HS3_CLKREF_CLK 287
+#define GCC_USB30_MP_MASTER_CLK 288
+#define GCC_USB30_MP_MASTER_CLK_SRC 289
+#define GCC_USB30_MP_MOCK_UTMI_CLK 290
+#define GCC_USB30_MP_MOCK_UTMI_CLK_SRC 291
+#define GCC_USB30_MP_MOCK_UTMI_POSTDIV_CLK_SRC 292
+#define GCC_USB30_MP_SLEEP_CLK 293
+#define GCC_USB30_PRIM_MASTER_CLK 294
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 295
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 296
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 297
+#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC 298
+#define GCC_USB30_PRIM_SLEEP_CLK 299
+#define GCC_USB30_SEC_MASTER_CLK 300
+#define GCC_USB30_SEC_MASTER_CLK_SRC 301
+#define GCC_USB30_SEC_MOCK_UTMI_CLK 302
+#define GCC_USB30_SEC_MOCK_UTMI_CLK_SRC 303
+#define GCC_USB30_SEC_MOCK_UTMI_POSTDIV_CLK_SRC 304
+#define GCC_USB30_SEC_SLEEP_CLK 305
+#define GCC_USB34_PRIM_PHY_PIPE_CLK_SRC 306
+#define GCC_USB34_SEC_PHY_PIPE_CLK_SRC 307
+#define GCC_USB3_MP0_CLKREF_CLK 308
+#define GCC_USB3_MP1_CLKREF_CLK 309
+#define GCC_USB3_MP_PHY_AUX_CLK 310
+#define GCC_USB3_MP_PHY_AUX_CLK_SRC 311
+#define GCC_USB3_MP_PHY_COM_AUX_CLK 312
+#define GCC_USB3_MP_PHY_PIPE_0_CLK 313
+#define GCC_USB3_MP_PHY_PIPE_0_CLK_SRC 314
+#define GCC_USB3_MP_PHY_PIPE_1_CLK 315
+#define GCC_USB3_MP_PHY_PIPE_1_CLK_SRC 316
+#define GCC_USB3_PRIM_PHY_AUX_CLK 317
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 318
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 319
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 320
+#define GCC_USB3_PRIM_PHY_PIPE_CLK_SRC 321
+#define GCC_USB3_SEC_PHY_AUX_CLK 322
+#define GCC_USB3_SEC_PHY_AUX_CLK_SRC 323
+#define GCC_USB3_SEC_PHY_COM_AUX_CLK 324
+#define GCC_USB3_SEC_PHY_PIPE_CLK 325
+#define GCC_USB3_SEC_PHY_PIPE_CLK_SRC 326
+#define GCC_USB4_1_CFG_AHB_CLK 327
+#define GCC_USB4_1_DP_CLK 328
+#define GCC_USB4_1_MASTER_CLK 329
+#define GCC_USB4_1_MASTER_CLK_SRC 330
+#define GCC_USB4_1_PHY_DP_CLK_SRC 331
+#define GCC_USB4_1_PHY_P2RR2P_PIPE_CLK 332
+#define GCC_USB4_1_PHY_P2RR2P_PIPE_CLK_SRC 333
+#define GCC_USB4_1_PHY_PCIE_PIPE_CLK 334
+#define GCC_USB4_1_PHY_PCIE_PIPE_CLK_SRC 335
+#define GCC_USB4_1_PHY_PCIE_PIPE_MUX_CLK_SRC 336
+#define GCC_USB4_1_PHY_PCIE_PIPEGMUX_CLK_SRC 337
+#define GCC_USB4_1_PHY_RX0_CLK 338
+#define GCC_USB4_1_PHY_RX0_CLK_SRC 339
+#define GCC_USB4_1_PHY_RX1_CLK 340
+#define GCC_USB4_1_PHY_RX1_CLK_SRC 341
+#define GCC_USB4_1_PHY_SYS_CLK_SRC 342
+#define GCC_USB4_1_PHY_USB_PIPE_CLK 343
+#define GCC_USB4_1_SB_IF_CLK 344
+#define GCC_USB4_1_SB_IF_CLK_SRC 345
+#define GCC_USB4_1_SYS_CLK 346
+#define GCC_USB4_1_TMU_CLK 347
+#define GCC_USB4_1_TMU_CLK_SRC 348
+#define GCC_USB4_CFG_AHB_CLK 349
+#define GCC_USB4_CLKREF_CLK 350
+#define GCC_USB4_DP_CLK 351
+#define GCC_USB4_EUD_CLKREF_CLK 352
+#define GCC_USB4_MASTER_CLK 353
+#define GCC_USB4_MASTER_CLK_SRC 354
+#define GCC_USB4_PHY_DP_CLK_SRC 355
+#define GCC_USB4_PHY_P2RR2P_PIPE_CLK 356
+#define GCC_USB4_PHY_P2RR2P_PIPE_CLK_SRC 357
+#define GCC_USB4_PHY_PCIE_PIPE_CLK 358
+#define GCC_USB4_PHY_PCIE_PIPE_CLK_SRC 359
+#define GCC_USB4_PHY_PCIE_PIPE_MUX_CLK_SRC 360
+#define GCC_USB4_PHY_PCIE_PIPEGMUX_CLK_SRC 361
+#define GCC_USB4_PHY_RX0_CLK 362
+#define GCC_USB4_PHY_RX0_CLK_SRC 363
+#define GCC_USB4_PHY_RX1_CLK 364
+#define GCC_USB4_PHY_RX1_CLK_SRC 365
+#define GCC_USB4_PHY_SYS_CLK_SRC 366
+#define GCC_USB4_PHY_USB_PIPE_CLK 367
+#define GCC_USB4_SB_IF_CLK 368
+#define GCC_USB4_SB_IF_CLK_SRC 369
+#define GCC_USB4_SYS_CLK 370
+#define GCC_USB4_TMU_CLK 371
+#define GCC_USB4_TMU_CLK_SRC 372
+#define GCC_VIDEO_AHB_CLK 373
+#define GCC_VIDEO_AXI0_CLK 374
+#define GCC_VIDEO_AXI1_CLK 375
+#define GCC_VIDEO_CVP_THROTTLE_CLK 376
+#define GCC_VIDEO_VCODEC_THROTTLE_CLK 377
+#define GCC_VIDEO_XO_CLK 378
+#define GCC_AGGRE_UFS_CARD_AXI_HW_CTL_CLK 379
+#define GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK 380
+#define GCC_UFS_CARD_AXI_HW_CTL_CLK 381
+#define GCC_UFS_CARD_ICE_CORE_HW_CTL_CLK 382
+#define GCC_UFS_CARD_PHY_AUX_HW_CTL_CLK 383
+#define GCC_UFS_CARD_UNIPRO_CORE_HW_CTL_CLK 384
+#define GCC_UFS_PHY_AXI_HW_CTL_CLK 385
+#define GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK 386
+#define GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK 387
+#define GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK 388
+
+/* GCC resets */
+#define GCC_EMAC0_BCR 0
+#define GCC_EMAC1_BCR 1
+#define GCC_PCIE_0_LINK_DOWN_BCR 2
+#define GCC_PCIE_0_NOCSR_COM_PHY_BCR 3
+#define GCC_PCIE_0_PHY_BCR 4
+#define GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR 5
+#define GCC_PCIE_0_TUNNEL_BCR 6
+#define GCC_PCIE_1_LINK_DOWN_BCR 7
+#define GCC_PCIE_1_NOCSR_COM_PHY_BCR 8
+#define GCC_PCIE_1_PHY_BCR 9
+#define GCC_PCIE_1_PHY_NOCSR_COM_PHY_BCR 10
+#define GCC_PCIE_1_TUNNEL_BCR 11
+#define GCC_PCIE_2A_BCR 12
+#define GCC_PCIE_2A_LINK_DOWN_BCR 13
+#define GCC_PCIE_2A_NOCSR_COM_PHY_BCR 14
+#define GCC_PCIE_2A_PHY_BCR 15
+#define GCC_PCIE_2A_PHY_NOCSR_COM_PHY_BCR 16
+#define GCC_PCIE_2B_BCR 17
+#define GCC_PCIE_2B_LINK_DOWN_BCR 18
+#define GCC_PCIE_2B_NOCSR_COM_PHY_BCR 19
+#define GCC_PCIE_2B_PHY_BCR 20
+#define GCC_PCIE_2B_PHY_NOCSR_COM_PHY_BCR 21
+#define GCC_PCIE_3A_BCR 22
+#define GCC_PCIE_3A_LINK_DOWN_BCR 23
+#define GCC_PCIE_3A_NOCSR_COM_PHY_BCR 24
+#define GCC_PCIE_3A_PHY_BCR 25
+#define GCC_PCIE_3A_PHY_NOCSR_COM_PHY_BCR 26
+#define GCC_PCIE_3B_BCR 27
+#define GCC_PCIE_3B_LINK_DOWN_BCR 28
+#define GCC_PCIE_3B_NOCSR_COM_PHY_BCR 29
+#define GCC_PCIE_3B_PHY_BCR 30
+#define GCC_PCIE_3B_PHY_NOCSR_COM_PHY_BCR 31
+#define GCC_PCIE_4_BCR 32
+#define GCC_PCIE_4_LINK_DOWN_BCR 33
+#define GCC_PCIE_4_NOCSR_COM_PHY_BCR 34
+#define GCC_PCIE_4_PHY_BCR 35
+#define GCC_PCIE_4_PHY_NOCSR_COM_PHY_BCR 36
+#define GCC_PCIE_PHY_CFG_AHB_BCR 37
+#define GCC_PCIE_PHY_COM_BCR 38
+#define GCC_PCIE_RSCC_BCR 39
+#define GCC_QUSB2PHY_HS0_MP_BCR 40
+#define GCC_QUSB2PHY_HS1_MP_BCR 41
+#define GCC_QUSB2PHY_HS2_MP_BCR 42
+#define GCC_QUSB2PHY_HS3_MP_BCR 43
+#define GCC_QUSB2PHY_PRIM_BCR 44
+#define GCC_QUSB2PHY_SEC_BCR 45
+#define GCC_SDCC2_BCR 46
+#define GCC_SDCC4_BCR 47
+#define GCC_UFS_CARD_BCR 48
+#define GCC_UFS_PHY_BCR 49
+#define GCC_USB2_PHY_PRIM_BCR 50
+#define GCC_USB2_PHY_SEC_BCR 51
+#define GCC_USB30_MP_BCR 52
+#define GCC_USB30_PRIM_BCR 53
+#define GCC_USB30_SEC_BCR 54
+#define GCC_USB3_DP_PHY_PRIM_BCR 55
+#define GCC_USB3_DP_PHY_SEC_BCR 56
+#define GCC_USB3_PHY_PRIM_BCR 57
+#define GCC_USB3_PHY_SEC_BCR 58
+#define GCC_USB3_UNIPHY_MP0_BCR 59
+#define GCC_USB3_UNIPHY_MP1_BCR 60
+#define GCC_USB3PHY_PHY_PRIM_BCR 61
+#define GCC_USB3PHY_PHY_SEC_BCR 62
+#define GCC_USB3UNIPHY_PHY_MP0_BCR 63
+#define GCC_USB3UNIPHY_PHY_MP1_BCR 64
+#define GCC_USB4_1_BCR 65
+#define GCC_USB4_1_DP_PHY_PRIM_BCR 66
+#define GCC_USB4_1_DPPHY_AUX_BCR 67
+#define GCC_USB4_1_PHY_PRIM_BCR 68
+#define GCC_USB4_BCR 69
+#define GCC_USB4_DP_PHY_PRIM_BCR 70
+#define GCC_USB4_DPPHY_AUX_BCR 71
+#define GCC_USB4_PHY_PRIM_BCR 72
+#define GCC_USB4PHY_1_PHY_PRIM_BCR 73
+#define GCC_USB4PHY_PHY_PRIM_BCR 74
+#define GCC_USB_PHY_CFG_AHB2PHY_BCR 75
+#define GCC_VIDEO_BCR 76
+#define GCC_VIDEO_AXI0_CLK_ARES 77
+#define GCC_VIDEO_AXI1_CLK_ARES 78
+
+/* GCC GDSCs */
+#define PCIE_0_TUNNEL_GDSC 0
+#define PCIE_1_TUNNEL_GDSC 1
+#define PCIE_2A_GDSC 2
+#define PCIE_2B_GDSC 3
+#define PCIE_3A_GDSC 4
+#define PCIE_3B_GDSC 5
+#define PCIE_4_GDSC 6
+#define UFS_CARD_GDSC 7
+#define UFS_PHY_GDSC 8
+#define USB30_MP_GDSC 9
+#define USB30_PRIM_GDSC 10
+#define USB30_SEC_GDSC 11
+
+#endif
diff --git a/include/dt-bindings/clock/samsung,exynosautov9.h b/include/dt-bindings/clock/samsung,exynosautov9.h
index 71ec0a955364..ea9f91b4eb1a 100644
--- a/include/dt-bindings/clock/samsung,exynosautov9.h
+++ b/include/dt-bindings/clock/samsung,exynosautov9.h
@@ -166,7 +166,7 @@
#define GOUT_CLKCMU_PERIC1_IP 248
#define GOUT_CLKCMU_PERIS_BUS 249
-#define TOP_NR_CLK 249
+#define TOP_NR_CLK 250
/* CMU_BUSMC */
#define CLK_MOUT_BUSMC_BUS_USER 1
@@ -174,7 +174,7 @@
#define CLK_GOUT_BUSMC_PDMA0_PCLK 3
#define CLK_GOUT_BUSMC_SPDMA_PCLK 4
-#define BUSMC_NR_CLK 4
+#define BUSMC_NR_CLK 5
/* CMU_CORE */
#define CLK_MOUT_CORE_BUS_USER 1
@@ -183,7 +183,7 @@
#define CLK_GOUT_CORE_CCI_PCLK 4
#define CLK_GOUT_CORE_CMU_CORE_PCLK 5
-#define CORE_NR_CLK 5
+#define CORE_NR_CLK 6
/* CMU_FSYS2 */
#define CLK_MOUT_FSYS2_BUS_USER 1
@@ -194,7 +194,7 @@
#define CLK_GOUT_FSYS2_UFS_EMBD1_ACLK 6
#define CLK_GOUT_FSYS2_UFS_EMBD1_UNIPRO 7
-#define FSYS2_NR_CLK 7
+#define FSYS2_NR_CLK 8
/* CMU_PERIC0 */
#define CLK_MOUT_PERIC0_BUS_USER 1
@@ -240,7 +240,7 @@
#define CLK_GOUT_PERIC0_PCLK_10 41
#define CLK_GOUT_PERIC0_PCLK_11 42
-#define PERIC0_NR_CLK 42
+#define PERIC0_NR_CLK 43
/* CMU_PERIC1 */
#define CLK_MOUT_PERIC1_BUS_USER 1
@@ -286,7 +286,7 @@
#define CLK_GOUT_PERIC1_PCLK_10 41
#define CLK_GOUT_PERIC1_PCLK_11 42
-#define PERIC1_NR_CLK 42
+#define PERIC1_NR_CLK 43
/* CMU_PERIS */
#define CLK_MOUT_PERIS_BUS_USER 1
@@ -294,6 +294,6 @@
#define CLK_GOUT_WDT_CLUSTER0 3
#define CLK_GOUT_WDT_CLUSTER1 4
-#define PERIS_NR_CLK 4
+#define PERIS_NR_CLK 5
#endif /* _DT_BINDINGS_CLOCK_EXYNOSAUTOV9_H */
diff --git a/include/dt-bindings/clock/ste-db8500-clkout.h b/include/dt-bindings/clock/ste-db8500-clkout.h
new file mode 100644
index 000000000000..ca07cb2bd1bc
--- /dev/null
+++ b/include/dt-bindings/clock/ste-db8500-clkout.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __STE_CLK_DB8500_CLKOUT_H__
+#define __STE_CLK_DB8500_CLKOUT_H__
+
+#define DB8500_CLKOUT_1 0
+#define DB8500_CLKOUT_2 1
+
+#define DB8500_CLKOUT_SRC_CLK38M 0
+#define DB8500_CLKOUT_SRC_ACLK 1
+#define DB8500_CLKOUT_SRC_SYSCLK 2
+#define DB8500_CLKOUT_SRC_LCDCLK 3
+#define DB8500_CLKOUT_SRC_SDMMCCLK 4
+#define DB8500_CLKOUT_SRC_TVCLK 5
+#define DB8500_CLKOUT_SRC_TIMCLK 6
+#define DB8500_CLKOUT_SRC_CLK009 7
+
+#endif
diff --git a/include/dt-bindings/clock/stm32mp13-clks.h b/include/dt-bindings/clock/stm32mp13-clks.h
new file mode 100644
index 000000000000..02befd25edce
--- /dev/null
+++ b/include/dt-bindings/clock/stm32mp13-clks.h
@@ -0,0 +1,229 @@
+/* SPDX-License-Identifier: GPL-2.0+ or BSD-3-Clause */
+/*
+ * Copyright (C) STMicroelectronics 2020 - All Rights Reserved
+ * Author: Gabriel Fernandez <gabriel.fernandez@st.com> for STMicroelectronics.
+ */
+
+#ifndef _DT_BINDINGS_STM32MP13_CLKS_H_
+#define _DT_BINDINGS_STM32MP13_CLKS_H_
+
+/* OSCILLATOR clocks */
+#define CK_HSE 0
+#define CK_CSI 1
+#define CK_LSI 2
+#define CK_LSE 3
+#define CK_HSI 4
+#define CK_HSE_DIV2 5
+
+/* PLL */
+#define PLL1 6
+#define PLL2 7
+#define PLL3 8
+#define PLL4 9
+
+/* ODF */
+#define PLL1_P 10
+#define PLL1_Q 11
+#define PLL1_R 12
+#define PLL2_P 13
+#define PLL2_Q 14
+#define PLL2_R 15
+#define PLL3_P 16
+#define PLL3_Q 17
+#define PLL3_R 18
+#define PLL4_P 19
+#define PLL4_Q 20
+#define PLL4_R 21
+
+#define PCLK1 22
+#define PCLK2 23
+#define PCLK3 24
+#define PCLK4 25
+#define PCLK5 26
+#define PCLK6 27
+
+/* SYSTEM CLOCK */
+#define CK_PER 28
+#define CK_MPU 29
+#define CK_AXI 30
+#define CK_MLAHB 31
+
+/* BASE TIMER */
+#define CK_TIMG1 32
+#define CK_TIMG2 33
+#define CK_TIMG3 34
+
+/* AUX */
+#define RTC 35
+
+/* TRACE & DEBUG clocks */
+#define CK_DBG 36
+#define CK_TRACE 37
+
+/* MCO clocks */
+#define CK_MCO1 38
+#define CK_MCO2 39
+
+/* IP clocks */
+#define SYSCFG 40
+#define VREF 41
+#define DTS 42
+#define PMBCTRL 43
+#define HDP 44
+#define IWDG2 45
+#define STGENRO 46
+#define USART1 47
+#define RTCAPB 48
+#define TZC 49
+#define TZPC 50
+#define IWDG1 51
+#define BSEC 52
+#define DMA1 53
+#define DMA2 54
+#define DMAMUX1 55
+#define DMAMUX2 56
+#define GPIOA 57
+#define GPIOB 58
+#define GPIOC 59
+#define GPIOD 60
+#define GPIOE 61
+#define GPIOF 62
+#define GPIOG 63
+#define GPIOH 64
+#define GPIOI 65
+#define CRYP1 66
+#define HASH1 67
+#define BKPSRAM 68
+#define MDMA 69
+#define CRC1 70
+#define USBH 71
+#define DMA3 72
+#define TSC 73
+#define PKA 74
+#define AXIMC 75
+#define MCE 76
+#define ETH1TX 77
+#define ETH2TX 78
+#define ETH1RX 79
+#define ETH2RX 80
+#define ETH1MAC 81
+#define ETH2MAC 82
+#define ETH1STP 83
+#define ETH2STP 84
+
+/* IP clocks with parents */
+#define SDMMC1_K 85
+#define SDMMC2_K 86
+#define ADC1_K 87
+#define ADC2_K 88
+#define FMC_K 89
+#define QSPI_K 90
+#define RNG1_K 91
+#define USBPHY_K 92
+#define STGEN_K 93
+#define SPDIF_K 94
+#define SPI1_K 95
+#define SPI2_K 96
+#define SPI3_K 97
+#define SPI4_K 98
+#define SPI5_K 99
+#define I2C1_K 100
+#define I2C2_K 101
+#define I2C3_K 102
+#define I2C4_K 103
+#define I2C5_K 104
+#define TIM2_K 105
+#define TIM3_K 106
+#define TIM4_K 107
+#define TIM5_K 108
+#define TIM6_K 109
+#define TIM7_K 110
+#define TIM12_K 111
+#define TIM13_K 112
+#define TIM14_K 113
+#define TIM1_K 114
+#define TIM8_K 115
+#define TIM15_K 116
+#define TIM16_K 117
+#define TIM17_K 118
+#define LPTIM1_K 119
+#define LPTIM2_K 120
+#define LPTIM3_K 121
+#define LPTIM4_K 122
+#define LPTIM5_K 123
+#define USART1_K 124
+#define USART2_K 125
+#define USART3_K 126
+#define UART4_K 127
+#define UART5_K 128
+#define USART6_K 129
+#define UART7_K 130
+#define UART8_K 131
+#define DFSDM_K 132
+#define FDCAN_K 133
+#define SAI1_K 134
+#define SAI2_K 135
+#define ADFSDM_K 136
+#define USBO_K 137
+#define LTDC_PX 138
+#define ETH1CK_K 139
+#define ETH1PTP_K 140
+#define ETH2CK_K 141
+#define ETH2PTP_K 142
+#define DCMIPP_K 143
+#define SAES_K 144
+#define DTS_K 145
+
+/* DDR */
+#define DDRC1 146
+#define DDRC1LP 147
+#define DDRC2 148
+#define DDRC2LP 149
+#define DDRPHYC 150
+#define DDRPHYCLP 151
+#define DDRCAPB 152
+#define DDRCAPBLP 153
+#define AXIDCG 154
+#define DDRPHYCAPB 155
+#define DDRPHYCAPBLP 156
+#define DDRPERFM 157
+
+#define ADC1 158
+#define ADC2 159
+#define SAI1 160
+#define SAI2 161
+
+#define STM32MP1_LAST_CLK 162
+
+/* SCMI clock identifiers */
+#define CK_SCMI_HSE 0
+#define CK_SCMI_HSI 1
+#define CK_SCMI_CSI 2
+#define CK_SCMI_LSE 3
+#define CK_SCMI_LSI 4
+#define CK_SCMI_HSE_DIV2 5
+#define CK_SCMI_PLL2_Q 6
+#define CK_SCMI_PLL2_R 7
+#define CK_SCMI_PLL3_P 8
+#define CK_SCMI_PLL3_Q 9
+#define CK_SCMI_PLL3_R 10
+#define CK_SCMI_PLL4_P 11
+#define CK_SCMI_PLL4_Q 12
+#define CK_SCMI_PLL4_R 13
+#define CK_SCMI_MPU 14
+#define CK_SCMI_AXI 15
+#define CK_SCMI_MLAHB 16
+#define CK_SCMI_CKPER 17
+#define CK_SCMI_PCLK1 18
+#define CK_SCMI_PCLK2 19
+#define CK_SCMI_PCLK3 20
+#define CK_SCMI_PCLK4 21
+#define CK_SCMI_PCLK5 22
+#define CK_SCMI_PCLK6 23
+#define CK_SCMI_CKTIMG1 24
+#define CK_SCMI_CKTIMG2 25
+#define CK_SCMI_CKTIMG3 26
+#define CK_SCMI_RTC 27
+#define CK_SCMI_RTCAPB 28
+
+#endif /* _DT_BINDINGS_STM32MP13_CLKS_H_ */
diff --git a/include/dt-bindings/clock/sun50i-h6-r-ccu.h b/include/dt-bindings/clock/sun50i-h6-r-ccu.h
index 890368d252c4..a96087abc86f 100644
--- a/include/dt-bindings/clock/sun50i-h6-r-ccu.h
+++ b/include/dt-bindings/clock/sun50i-h6-r-ccu.h
@@ -22,5 +22,6 @@
#define CLK_W1 12
#define CLK_R_APB2_RSB 13
+#define CLK_R_APB1_RTC 14
#endif /* _DT_BINDINGS_CLK_SUN50I_H6_R_CCU_H_ */
diff --git a/include/dt-bindings/clock/sun50i-h616-ccu.h b/include/dt-bindings/clock/sun50i-h616-ccu.h
index 4fc08b0df2f3..1191aca53ac6 100644
--- a/include/dt-bindings/clock/sun50i-h616-ccu.h
+++ b/include/dt-bindings/clock/sun50i-h616-ccu.h
@@ -111,5 +111,6 @@
#define CLK_BUS_TVE0 125
#define CLK_HDCP 126
#define CLK_BUS_HDCP 127
+#define CLK_PLL_SYSTEM_32K 128
#endif /* _DT_BINDINGS_CLK_SUN50I_H616_H_ */
diff --git a/include/dt-bindings/interconnect/qcom,sc8180x.h b/include/dt-bindings/interconnect/qcom,sc8180x.h
index 235b525d2803..e84cfec5afdd 100644
--- a/include/dt-bindings/interconnect/qcom,sc8180x.h
+++ b/include/dt-bindings/interconnect/qcom,sc8180x.h
@@ -182,4 +182,11 @@
#define SLAVE_MNOC_SF_MEM_NOC_DISPLAY 3
#define SLAVE_MNOC_HF_MEM_NOC_DISPLAY 4
+#define MASTER_QUP_CORE_0 0
+#define MASTER_QUP_CORE_1 1
+#define MASTER_QUP_CORE_2 2
+#define SLAVE_QUP_CORE_0 3
+#define SLAVE_QUP_CORE_1 4
+#define SLAVE_QUP_CORE_2 5
+
#endif
diff --git a/include/dt-bindings/interconnect/qcom,sc8280xp.h b/include/dt-bindings/interconnect/qcom,sc8280xp.h
new file mode 100644
index 000000000000..a3e5fda7c127
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,sc8280xp.h
@@ -0,0 +1,232 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Linaro Ltd.
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SC8280XP_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_SC8280XP_H
+
+/* aggre1_noc */
+#define MASTER_QSPI_0 0
+#define MASTER_QUP_1 1
+#define MASTER_QUP_2 2
+#define MASTER_A1NOC_CFG 3
+#define MASTER_IPA 4
+#define MASTER_EMAC_1 5
+#define MASTER_SDCC_4 6
+#define MASTER_UFS_MEM 7
+#define MASTER_USB3_0 8
+#define MASTER_USB3_1 9
+#define MASTER_USB3_MP 10
+#define MASTER_USB4_0 11
+#define MASTER_USB4_1 12
+#define SLAVE_A1NOC_SNOC 13
+#define SLAVE_USB_NOC_SNOC 14
+#define SLAVE_SERVICE_A1NOC 15
+
+/* aggre2_noc */
+#define MASTER_QDSS_BAM 0
+#define MASTER_QUP_0 1
+#define MASTER_A2NOC_CFG 2
+#define MASTER_CRYPTO 3
+#define MASTER_SENSORS_PROC 4
+#define MASTER_SP 5
+#define MASTER_EMAC 6
+#define MASTER_PCIE_0 7
+#define MASTER_PCIE_1 8
+#define MASTER_PCIE_2A 9
+#define MASTER_PCIE_2B 10
+#define MASTER_PCIE_3A 11
+#define MASTER_PCIE_3B 12
+#define MASTER_PCIE_4 13
+#define MASTER_QDSS_ETR 14
+#define MASTER_SDCC_2 15
+#define MASTER_UFS_CARD 16
+#define SLAVE_A2NOC_SNOC 17
+#define SLAVE_ANOC_PCIE_GEM_NOC 18
+#define SLAVE_SERVICE_A2NOC 19
+
+/* clk_virt */
+#define MASTER_IPA_CORE 0
+#define MASTER_QUP_CORE_0 1
+#define MASTER_QUP_CORE_1 2
+#define MASTER_QUP_CORE_2 3
+#define SLAVE_IPA_CORE 4
+#define SLAVE_QUP_CORE_0 5
+#define SLAVE_QUP_CORE_1 6
+#define SLAVE_QUP_CORE_2 7
+
+/* config_noc */
+#define MASTER_GEM_NOC_CNOC 0
+#define MASTER_GEM_NOC_PCIE_SNOC 1
+#define SLAVE_AHB2PHY_0 2
+#define SLAVE_AHB2PHY_1 3
+#define SLAVE_AHB2PHY_2 4
+#define SLAVE_AOSS 5
+#define SLAVE_APPSS 6
+#define SLAVE_CAMERA_CFG 7
+#define SLAVE_CLK_CTL 8
+#define SLAVE_CDSP_CFG 9
+#define SLAVE_CDSP1_CFG 10
+#define SLAVE_RBCPR_CX_CFG 11
+#define SLAVE_RBCPR_MMCX_CFG 12
+#define SLAVE_RBCPR_MX_CFG 13
+#define SLAVE_CPR_NSPCX 14
+#define SLAVE_CRYPTO_0_CFG 15
+#define SLAVE_CX_RDPM 16
+#define SLAVE_DCC_CFG 17
+#define SLAVE_DISPLAY_CFG 18
+#define SLAVE_DISPLAY1_CFG 19
+#define SLAVE_EMAC_CFG 20
+#define SLAVE_EMAC1_CFG 21
+#define SLAVE_GFX3D_CFG 22
+#define SLAVE_HWKM 23
+#define SLAVE_IMEM_CFG 24
+#define SLAVE_IPA_CFG 25
+#define SLAVE_IPC_ROUTER_CFG 26
+#define SLAVE_LPASS 27
+#define SLAVE_MX_RDPM 28
+#define SLAVE_MXC_RDPM 29
+#define SLAVE_PCIE_0_CFG 30
+#define SLAVE_PCIE_1_CFG 31
+#define SLAVE_PCIE_2A_CFG 32
+#define SLAVE_PCIE_2B_CFG 33
+#define SLAVE_PCIE_3A_CFG 34
+#define SLAVE_PCIE_3B_CFG 35
+#define SLAVE_PCIE_4_CFG 36
+#define SLAVE_PCIE_RSC_CFG 37
+#define SLAVE_PDM 38
+#define SLAVE_PIMEM_CFG 39
+#define SLAVE_PKA_WRAPPER_CFG 40
+#define SLAVE_PMU_WRAPPER_CFG 41
+#define SLAVE_QDSS_CFG 42
+#define SLAVE_QSPI_0 43
+#define SLAVE_QUP_0 44
+#define SLAVE_QUP_1 45
+#define SLAVE_QUP_2 46
+#define SLAVE_SDCC_2 47
+#define SLAVE_SDCC_4 48
+#define SLAVE_SECURITY 49
+#define SLAVE_SMMUV3_CFG 50
+#define SLAVE_SMSS_CFG 51
+#define SLAVE_SPSS_CFG 52
+#define SLAVE_TCSR 53
+#define SLAVE_TLMM 54
+#define SLAVE_UFS_CARD_CFG 55
+#define SLAVE_UFS_MEM_CFG 56
+#define SLAVE_USB3_0 57
+#define SLAVE_USB3_1 58
+#define SLAVE_USB3_MP 59
+#define SLAVE_USB4_0 60
+#define SLAVE_USB4_1 61
+#define SLAVE_VENUS_CFG 62
+#define SLAVE_VSENSE_CTRL_CFG 63
+#define SLAVE_VSENSE_CTRL_R_CFG 64
+#define SLAVE_A1NOC_CFG 65
+#define SLAVE_A2NOC_CFG 66
+#define SLAVE_ANOC_PCIE_BRIDGE_CFG 67
+#define SLAVE_DDRSS_CFG 68
+#define SLAVE_CNOC_MNOC_CFG 69
+#define SLAVE_SNOC_CFG 70
+#define SLAVE_SNOC_SF_BRIDGE_CFG 71
+#define SLAVE_IMEM 72
+#define SLAVE_PIMEM 73
+#define SLAVE_SERVICE_CNOC 74
+#define SLAVE_PCIE_0 75
+#define SLAVE_PCIE_1 76
+#define SLAVE_PCIE_2A 77
+#define SLAVE_PCIE_2B 78
+#define SLAVE_PCIE_3A 79
+#define SLAVE_PCIE_3B 80
+#define SLAVE_PCIE_4 81
+#define SLAVE_QDSS_STM 82
+#define SLAVE_SMSS 83
+#define SLAVE_TCU 84
+
+/* dc_noc */
+#define MASTER_CNOC_DC_NOC 0
+#define SLAVE_LLCC_CFG 1
+#define SLAVE_GEM_NOC_CFG 2
+
+/* gem_noc */
+#define MASTER_GPU_TCU 0
+#define MASTER_PCIE_TCU 1
+#define MASTER_SYS_TCU 2
+#define MASTER_APPSS_PROC 3
+#define MASTER_COMPUTE_NOC 4
+#define MASTER_COMPUTE_NOC_1 5
+#define MASTER_GEM_NOC_CFG 6
+#define MASTER_GFX3D 7
+#define MASTER_MNOC_HF_MEM_NOC 8
+#define MASTER_MNOC_SF_MEM_NOC 9
+#define MASTER_ANOC_PCIE_GEM_NOC 10
+#define MASTER_SNOC_GC_MEM_NOC 11
+#define MASTER_SNOC_SF_MEM_NOC 12
+#define SLAVE_GEM_NOC_CNOC 13
+#define SLAVE_LLCC 14
+#define SLAVE_GEM_NOC_PCIE_CNOC 15
+#define SLAVE_SERVICE_GEM_NOC_1 16
+#define SLAVE_SERVICE_GEM_NOC_2 17
+#define SLAVE_SERVICE_GEM_NOC 18
+
+/* lpass_ag_noc */
+#define MASTER_CNOC_LPASS_AG_NOC 0
+#define MASTER_LPASS_PROC 1
+#define SLAVE_LPASS_CORE_CFG 2
+#define SLAVE_LPASS_LPI_CFG 3
+#define SLAVE_LPASS_MPU_CFG 4
+#define SLAVE_LPASS_TOP_CFG 5
+#define SLAVE_LPASS_SNOC 6
+#define SLAVE_SERVICES_LPASS_AML_NOC 7
+#define SLAVE_SERVICE_LPASS_AG_NOC 8
+
+/* mc_virt */
+#define MASTER_LLCC 0
+#define SLAVE_EBI1 1
+
+/*mmss_noc */
+#define MASTER_CAMNOC_HF 0
+#define MASTER_MDP0 1
+#define MASTER_MDP1 2
+#define MASTER_MDP_CORE1_0 3
+#define MASTER_MDP_CORE1_1 4
+#define MASTER_CNOC_MNOC_CFG 5
+#define MASTER_ROTATOR 6
+#define MASTER_ROTATOR_1 7
+#define MASTER_VIDEO_P0 8
+#define MASTER_VIDEO_P1 9
+#define MASTER_VIDEO_PROC 10
+#define MASTER_CAMNOC_ICP 11
+#define MASTER_CAMNOC_SF 12
+#define SLAVE_MNOC_HF_MEM_NOC 13
+#define SLAVE_MNOC_SF_MEM_NOC 14
+#define SLAVE_SERVICE_MNOC 15
+
+/* nspa_noc */
+#define MASTER_CDSP_NOC_CFG 0
+#define MASTER_CDSP_PROC 1
+#define SLAVE_CDSP_MEM_NOC 2
+#define SLAVE_NSP_XFR 3
+#define SLAVE_SERVICE_NSP_NOC 4
+
+/* nspb_noc */
+#define MASTER_CDSPB_NOC_CFG 0
+#define MASTER_CDSP_PROC_B 1
+#define SLAVE_CDSPB_MEM_NOC 2
+#define SLAVE_NSPB_XFR 3
+#define SLAVE_SERVICE_NSPB_NOC 4
+
+/* system_noc */
+#define MASTER_A1NOC_SNOC 0
+#define MASTER_A2NOC_SNOC 1
+#define MASTER_USB_NOC_SNOC 2
+#define MASTER_LPASS_ANOC 3
+#define MASTER_SNOC_CFG 4
+#define MASTER_PIMEM 5
+#define MASTER_GIC 6
+#define SLAVE_SNOC_GEM_NOC_GC 7
+#define SLAVE_SNOC_GEM_NOC_SF 8
+#define SLAVE_SERVICE_SNOC 9
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,sdx65.h b/include/dt-bindings/interconnect/qcom,sdx65.h
new file mode 100644
index 000000000000..b25288aa7d74
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,sdx65.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SDX65_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_SDX65_H
+
+#define MASTER_LLCC 0
+#define SLAVE_EBI1 1
+
+#define MASTER_TCU_0 0
+#define MASTER_SNOC_GC_MEM_NOC 1
+#define MASTER_APPSS_PROC 2
+#define SLAVE_LLCC 3
+#define SLAVE_MEM_NOC_SNOC 4
+#define SLAVE_MEM_NOC_PCIE_SNOC 5
+
+#define MASTER_AUDIO 0
+#define MASTER_BLSP_1 1
+#define MASTER_QDSS_BAM 2
+#define MASTER_QPIC 3
+#define MASTER_SNOC_CFG 4
+#define MASTER_SPMI_FETCHER 5
+#define MASTER_ANOC_SNOC 6
+#define MASTER_IPA 7
+#define MASTER_MEM_NOC_SNOC 8
+#define MASTER_MEM_NOC_PCIE_SNOC 9
+#define MASTER_CRYPTO 10
+#define MASTER_IPA_PCIE 11
+#define MASTER_PCIE_0 12
+#define MASTER_QDSS_ETR 13
+#define MASTER_SDCC_1 14
+#define MASTER_USB3 15
+#define SLAVE_AOSS 16
+#define SLAVE_APPSS 17
+#define SLAVE_AUDIO 18
+#define SLAVE_BLSP_1 19
+#define SLAVE_CLK_CTL 20
+#define SLAVE_CRYPTO_0_CFG 21
+#define SLAVE_CNOC_DDRSS 22
+#define SLAVE_ECC_CFG 23
+#define SLAVE_IMEM_CFG 24
+#define SLAVE_IPA_CFG 25
+#define SLAVE_CNOC_MSS 26
+#define SLAVE_PCIE_PARF 27
+#define SLAVE_PDM 28
+#define SLAVE_PRNG 29
+#define SLAVE_QDSS_CFG 30
+#define SLAVE_QPIC 31
+#define SLAVE_SDCC_1 32
+#define SLAVE_SNOC_CFG 33
+#define SLAVE_SPMI_FETCHER 34
+#define SLAVE_SPMI_VGI_COEX 35
+#define SLAVE_TCSR 36
+#define SLAVE_TLMM 37
+#define SLAVE_USB3 38
+#define SLAVE_USB3_PHY_CFG 39
+#define SLAVE_ANOC_SNOC 40
+#define SLAVE_SNOC_MEM_NOC_GC 41
+#define SLAVE_IMEM 42
+#define SLAVE_SERVICE_SNOC 43
+#define SLAVE_PCIE_0 44
+#define SLAVE_QDSS_STM 45
+#define SLAVE_TCU 46
+
+#endif
diff --git a/include/dt-bindings/memory/mt8186-memory-port.h b/include/dt-bindings/memory/mt8186-memory-port.h
new file mode 100644
index 000000000000..2bc6e4433048
--- /dev/null
+++ b/include/dt-bindings/memory/mt8186-memory-port.h
@@ -0,0 +1,217 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ *
+ * Author: Anan Sun <anan.sun@mediatek.com>
+ * Author: Yong Wu <yong.wu@mediatek.com>
+ */
+#ifndef _DT_BINDINGS_MEMORY_MT8186_LARB_PORT_H_
+#define _DT_BINDINGS_MEMORY_MT8186_LARB_PORT_H_
+
+#include <dt-bindings/memory/mtk-memory-port.h>
+
+/*
+ * MM IOMMU supports 16GB dma address. We separate it to four ranges:
+ * 0 ~ 4G; 4G ~ 8G; 8G ~ 12G; 12G ~ 16G, we could adjust these masters
+ * locate in anyone region. BUT:
+ * a) Make sure all the ports inside a larb are in one range.
+ * b) The iova of any master can NOT cross the 4G/8G/12G boundary.
+ *
+ * This is the suggested mapping in this SoC:
+ *
+ * modules dma-address-region larbs-ports
+ * disp 0 ~ 4G larb0/1/2
+ * vcodec 4G ~ 8G larb4/7
+ * cam/mdp 8G ~ 12G the other larbs.
+ * N/A 12G ~ 16G
+ * CCU0 0x24000_0000 ~ 0x243ff_ffff larb13: port 9/10
+ * CCU1 0x24400_0000 ~ 0x247ff_ffff larb14: port 4/5
+ */
+
+/* MM IOMMU ports */
+/* LARB 0 -- MMSYS */
+#define IOMMU_PORT_L0_DISP_POSTMASK0 MTK_M4U_ID(0, 0)
+#define IOMMU_PORT_L0_REVERSED MTK_M4U_ID(0, 1)
+#define IOMMU_PORT_L0_OVL_RDMA0 MTK_M4U_ID(0, 2)
+#define IOMMU_PORT_L0_DISP_FAKE0 MTK_M4U_ID(0, 3)
+
+/* LARB 1 -- MMSYS */
+#define IOMMU_PORT_L1_DISP_RDMA1 MTK_M4U_ID(1, 0)
+#define IOMMU_PORT_L1_OVL_2L_RDMA0 MTK_M4U_ID(1, 1)
+#define IOMMU_PORT_L1_DISP_RDMA0 MTK_M4U_ID(1, 2)
+#define IOMMU_PORT_L1_DISP_WDMA0 MTK_M4U_ID(1, 3)
+#define IOMMU_PORT_L1_DISP_FAKE1 MTK_M4U_ID(1, 4)
+
+/* LARB 2 -- MMSYS */
+#define IOMMU_PORT_L2_MDP_RDMA0 MTK_M4U_ID(2, 0)
+#define IOMMU_PORT_L2_MDP_RDMA1 MTK_M4U_ID(2, 1)
+#define IOMMU_PORT_L2_MDP_WROT0 MTK_M4U_ID(2, 2)
+#define IOMMU_PORT_L2_MDP_WROT1 MTK_M4U_ID(2, 3)
+#define IOMMU_PORT_L2_DISP_FAKE0 MTK_M4U_ID(2, 4)
+
+/* LARB 4 -- VDEC */
+#define IOMMU_PORT_L4_HW_VDEC_MC_EXT MTK_M4U_ID(4, 0)
+#define IOMMU_PORT_L4_HW_VDEC_UFO_EXT MTK_M4U_ID(4, 1)
+#define IOMMU_PORT_L4_HW_VDEC_PP_EXT MTK_M4U_ID(4, 2)
+#define IOMMU_PORT_L4_HW_VDEC_PRED_RD_EXT MTK_M4U_ID(4, 3)
+#define IOMMU_PORT_L4_HW_VDEC_PRED_WR_EXT MTK_M4U_ID(4, 4)
+#define IOMMU_PORT_L4_HW_VDEC_PPWRAP_EXT MTK_M4U_ID(4, 5)
+#define IOMMU_PORT_L4_HW_VDEC_TILE_EXT MTK_M4U_ID(4, 6)
+#define IOMMU_PORT_L4_HW_VDEC_VLD_EXT MTK_M4U_ID(4, 7)
+#define IOMMU_PORT_L4_HW_VDEC_VLD2_EXT MTK_M4U_ID(4, 8)
+#define IOMMU_PORT_L4_HW_VDEC_AVC_MV_EXT MTK_M4U_ID(4, 9)
+#define IOMMU_PORT_L4_HW_VDEC_UFO_ENC_EXT MTK_M4U_ID(4, 10)
+#define IOMMU_PORT_L4_HW_VDEC_RG_CTRL_DMA_EXT MTK_M4U_ID(4, 11)
+#define IOMMU_PORT_L4_HW_MINI_MDP_R0_EXT MTK_M4U_ID(4, 12)
+#define IOMMU_PORT_L4_HW_MINI_MDP_W0_EXT MTK_M4U_ID(4, 13)
+
+/* LARB 7 -- VENC */
+#define IOMMU_PORT_L7_VENC_RCPU MTK_M4U_ID(7, 0)
+#define IOMMU_PORT_L7_VENC_REC MTK_M4U_ID(7, 1)
+#define IOMMU_PORT_L7_VENC_BSDMA MTK_M4U_ID(7, 2)
+#define IOMMU_PORT_L7_VENC_SV_COMV MTK_M4U_ID(7, 3)
+#define IOMMU_PORT_L7_VENC_RD_COMV MTK_M4U_ID(7, 4)
+#define IOMMU_PORT_L7_VENC_CUR_LUMA MTK_M4U_ID(7, 5)
+#define IOMMU_PORT_L7_VENC_CUR_CHROMA MTK_M4U_ID(7, 6)
+#define IOMMU_PORT_L7_VENC_REF_LUMA MTK_M4U_ID(7, 7)
+#define IOMMU_PORT_L7_VENC_REF_CHROMA MTK_M4U_ID(7, 8)
+#define IOMMU_PORT_L7_JPGENC_Y_RDMA MTK_M4U_ID(7, 9)
+#define IOMMU_PORT_L7_JPGENC_C_RDMA MTK_M4U_ID(7, 10)
+#define IOMMU_PORT_L7_JPGENC_Q_TABLE MTK_M4U_ID(7, 11)
+#define IOMMU_PORT_L7_JPGENC_BSDMA MTK_M4U_ID(7, 12)
+
+/* LARB 8 -- WPE */
+#define IOMMU_PORT_L8_WPE_RDMA_0 MTK_M4U_ID(8, 0)
+#define IOMMU_PORT_L8_WPE_RDMA_1 MTK_M4U_ID(8, 1)
+#define IOMMU_PORT_L8_WPE_WDMA_0 MTK_M4U_ID(8, 2)
+
+/* LARB 9 -- IMG-1 */
+#define IOMMU_PORT_L9_IMG_IMGI_D1 MTK_M4U_ID(9, 0)
+#define IOMMU_PORT_L9_IMG_IMGBI_D1 MTK_M4U_ID(9, 1)
+#define IOMMU_PORT_L9_IMG_DMGI_D1 MTK_M4U_ID(9, 2)
+#define IOMMU_PORT_L9_IMG_DEPI_D1 MTK_M4U_ID(9, 3)
+#define IOMMU_PORT_L9_IMG_LCE_D1 MTK_M4U_ID(9, 4)
+#define IOMMU_PORT_L9_IMG_SMTI_D1 MTK_M4U_ID(9, 5)
+#define IOMMU_PORT_L9_IMG_SMTO_D2 MTK_M4U_ID(9, 6)
+#define IOMMU_PORT_L9_IMG_SMTO_D1 MTK_M4U_ID(9, 7)
+#define IOMMU_PORT_L9_IMG_CRZO_D1 MTK_M4U_ID(9, 8)
+#define IOMMU_PORT_L9_IMG_IMG3O_D1 MTK_M4U_ID(9, 9)
+#define IOMMU_PORT_L9_IMG_VIPI_D1 MTK_M4U_ID(9, 10)
+#define IOMMU_PORT_L9_IMG_SMTI_D5 MTK_M4U_ID(9, 11)
+#define IOMMU_PORT_L9_IMG_TIMGO_D1 MTK_M4U_ID(9, 12)
+#define IOMMU_PORT_L9_IMG_UFBC_W0 MTK_M4U_ID(9, 13)
+#define IOMMU_PORT_L9_IMG_UFBC_R0 MTK_M4U_ID(9, 14)
+#define IOMMU_PORT_L9_IMG_WPE_RDMA1 MTK_M4U_ID(9, 15)
+#define IOMMU_PORT_L9_IMG_WPE_RDMA0 MTK_M4U_ID(9, 16)
+#define IOMMU_PORT_L9_IMG_WPE_WDMA MTK_M4U_ID(9, 17)
+#define IOMMU_PORT_L9_IMG_MFB_RDMA0 MTK_M4U_ID(9, 18)
+#define IOMMU_PORT_L9_IMG_MFB_RDMA1 MTK_M4U_ID(9, 19)
+#define IOMMU_PORT_L9_IMG_MFB_RDMA2 MTK_M4U_ID(9, 20)
+#define IOMMU_PORT_L9_IMG_MFB_RDMA3 MTK_M4U_ID(9, 21)
+#define IOMMU_PORT_L9_IMG_MFB_RDMA4 MTK_M4U_ID(9, 22)
+#define IOMMU_PORT_L9_IMG_MFB_RDMA5 MTK_M4U_ID(9, 23)
+#define IOMMU_PORT_L9_IMG_MFB_WDMA0 MTK_M4U_ID(9, 24)
+#define IOMMU_PORT_L9_IMG_MFB_WDMA1 MTK_M4U_ID(9, 25)
+#define IOMMU_PORT_L9_IMG_RESERVE6 MTK_M4U_ID(9, 26)
+#define IOMMU_PORT_L9_IMG_RESERVE7 MTK_M4U_ID(9, 27)
+#define IOMMU_PORT_L9_IMG_RESERVE8 MTK_M4U_ID(9, 28)
+
+/* LARB 11 -- IMG-2 */
+#define IOMMU_PORT_L11_IMG_IMGI_D1 MTK_M4U_ID(11, 0)
+#define IOMMU_PORT_L11_IMG_IMGBI_D1 MTK_M4U_ID(11, 1)
+#define IOMMU_PORT_L11_IMG_DMGI_D1 MTK_M4U_ID(11, 2)
+#define IOMMU_PORT_L11_IMG_DEPI_D1 MTK_M4U_ID(11, 3)
+#define IOMMU_PORT_L11_IMG_LCE_D1 MTK_M4U_ID(11, 4)
+#define IOMMU_PORT_L11_IMG_SMTI_D1 MTK_M4U_ID(11, 5)
+#define IOMMU_PORT_L11_IMG_SMTO_D2 MTK_M4U_ID(11, 6)
+#define IOMMU_PORT_L11_IMG_SMTO_D1 MTK_M4U_ID(11, 7)
+#define IOMMU_PORT_L11_IMG_CRZO_D1 MTK_M4U_ID(11, 8)
+#define IOMMU_PORT_L11_IMG_IMG3O_D1 MTK_M4U_ID(11, 9)
+#define IOMMU_PORT_L11_IMG_VIPI_D1 MTK_M4U_ID(11, 10)
+#define IOMMU_PORT_L11_IMG_SMTI_D5 MTK_M4U_ID(11, 11)
+#define IOMMU_PORT_L11_IMG_TIMGO_D1 MTK_M4U_ID(11, 12)
+#define IOMMU_PORT_L11_IMG_UFBC_W0 MTK_M4U_ID(11, 13)
+#define IOMMU_PORT_L11_IMG_UFBC_R0 MTK_M4U_ID(11, 14)
+#define IOMMU_PORT_L11_IMG_WPE_RDMA1 MTK_M4U_ID(11, 15)
+#define IOMMU_PORT_L11_IMG_WPE_RDMA0 MTK_M4U_ID(11, 16)
+#define IOMMU_PORT_L11_IMG_WPE_WDMA MTK_M4U_ID(11, 17)
+#define IOMMU_PORT_L11_IMG_MFB_RDMA0 MTK_M4U_ID(11, 18)
+#define IOMMU_PORT_L11_IMG_MFB_RDMA1 MTK_M4U_ID(11, 19)
+#define IOMMU_PORT_L11_IMG_MFB_RDMA2 MTK_M4U_ID(11, 20)
+#define IOMMU_PORT_L11_IMG_MFB_RDMA3 MTK_M4U_ID(11, 21)
+#define IOMMU_PORT_L11_IMG_MFB_RDMA4 MTK_M4U_ID(11, 22)
+#define IOMMU_PORT_L11_IMG_MFB_RDMA5 MTK_M4U_ID(11, 23)
+#define IOMMU_PORT_L11_IMG_MFB_WDMA0 MTK_M4U_ID(11, 24)
+#define IOMMU_PORT_L11_IMG_MFB_WDMA1 MTK_M4U_ID(11, 25)
+#define IOMMU_PORT_L11_IMG_RESERVE6 MTK_M4U_ID(11, 26)
+#define IOMMU_PORT_L11_IMG_RESERVE7 MTK_M4U_ID(11, 27)
+#define IOMMU_PORT_L11_IMG_RESERVE8 MTK_M4U_ID(11, 28)
+
+/* LARB 13 -- CAM */
+#define IOMMU_PORT_L13_CAM_MRAWI MTK_M4U_ID(13, 0)
+#define IOMMU_PORT_L13_CAM_MRAWO_0 MTK_M4U_ID(13, 1)
+#define IOMMU_PORT_L13_CAM_MRAWO_1 MTK_M4U_ID(13, 2)
+#define IOMMU_PORT_L13_CAM_CAMSV_4 MTK_M4U_ID(13, 6)
+#define IOMMU_PORT_L13_CAM_CAMSV_5 MTK_M4U_ID(13, 7)
+#define IOMMU_PORT_L13_CAM_CAMSV_6 MTK_M4U_ID(13, 8)
+#define IOMMU_PORT_L13_CAM_CCUI MTK_M4U_ID(13, 9)
+#define IOMMU_PORT_L13_CAM_CCUO MTK_M4U_ID(13, 10)
+#define IOMMU_PORT_L13_CAM_FAKE MTK_M4U_ID(13, 11)
+
+/* LARB 14 -- CAM */
+#define IOMMU_PORT_L14_CAM_CCUI MTK_M4U_ID(14, 4)
+#define IOMMU_PORT_L14_CAM_CCUO MTK_M4U_ID(14, 5)
+
+/* LARB 16 -- RAW-A */
+#define IOMMU_PORT_L16_CAM_IMGO_R1_A MTK_M4U_ID(16, 0)
+#define IOMMU_PORT_L16_CAM_RRZO_R1_A MTK_M4U_ID(16, 1)
+#define IOMMU_PORT_L16_CAM_CQI_R1_A MTK_M4U_ID(16, 2)
+#define IOMMU_PORT_L16_CAM_BPCI_R1_A MTK_M4U_ID(16, 3)
+#define IOMMU_PORT_L16_CAM_YUVO_R1_A MTK_M4U_ID(16, 4)
+#define IOMMU_PORT_L16_CAM_UFDI_R2_A MTK_M4U_ID(16, 5)
+#define IOMMU_PORT_L16_CAM_RAWI_R2_A MTK_M4U_ID(16, 6)
+#define IOMMU_PORT_L16_CAM_RAWI_R3_A MTK_M4U_ID(16, 7)
+#define IOMMU_PORT_L16_CAM_AAO_R1_A MTK_M4U_ID(16, 8)
+#define IOMMU_PORT_L16_CAM_AFO_R1_A MTK_M4U_ID(16, 9)
+#define IOMMU_PORT_L16_CAM_FLKO_R1_A MTK_M4U_ID(16, 10)
+#define IOMMU_PORT_L16_CAM_LCESO_R1_A MTK_M4U_ID(16, 11)
+#define IOMMU_PORT_L16_CAM_CRZO_R1_A MTK_M4U_ID(16, 12)
+#define IOMMU_PORT_L16_CAM_LTMSO_R1_A MTK_M4U_ID(16, 13)
+#define IOMMU_PORT_L16_CAM_RSSO_R1_A MTK_M4U_ID(16, 14)
+#define IOMMU_PORT_L16_CAM_AAHO_R1_A MTK_M4U_ID(16, 15)
+#define IOMMU_PORT_L16_CAM_LSCI_R1_A MTK_M4U_ID(16, 16)
+
+/* LARB 17 -- RAW-B */
+#define IOMMU_PORT_L17_CAM_IMGO_R1_B MTK_M4U_ID(17, 0)
+#define IOMMU_PORT_L17_CAM_RRZO_R1_B MTK_M4U_ID(17, 1)
+#define IOMMU_PORT_L17_CAM_CQI_R1_B MTK_M4U_ID(17, 2)
+#define IOMMU_PORT_L17_CAM_BPCI_R1_B MTK_M4U_ID(17, 3)
+#define IOMMU_PORT_L17_CAM_YUVO_R1_B MTK_M4U_ID(17, 4)
+#define IOMMU_PORT_L17_CAM_UFDI_R2_B MTK_M4U_ID(17, 5)
+#define IOMMU_PORT_L17_CAM_RAWI_R2_B MTK_M4U_ID(17, 6)
+#define IOMMU_PORT_L17_CAM_RAWI_R3_B MTK_M4U_ID(17, 7)
+#define IOMMU_PORT_L17_CAM_AAO_R1_B MTK_M4U_ID(17, 8)
+#define IOMMU_PORT_L17_CAM_AFO_R1_B MTK_M4U_ID(17, 9)
+#define IOMMU_PORT_L17_CAM_FLKO_R1_B MTK_M4U_ID(17, 10)
+#define IOMMU_PORT_L17_CAM_LCESO_R1_B MTK_M4U_ID(17, 11)
+#define IOMMU_PORT_L17_CAM_CRZO_R1_B MTK_M4U_ID(17, 12)
+#define IOMMU_PORT_L17_CAM_LTMSO_R1_B MTK_M4U_ID(17, 13)
+#define IOMMU_PORT_L17_CAM_RSSO_R1_B MTK_M4U_ID(17, 14)
+#define IOMMU_PORT_L17_CAM_AAHO_R1_B MTK_M4U_ID(17, 15)
+#define IOMMU_PORT_L17_CAM_LSCI_R1_B MTK_M4U_ID(17, 16)
+
+/* LARB 19 -- IPE */
+#define IOMMU_PORT_L19_IPE_DVS_RDMA MTK_M4U_ID(19, 0)
+#define IOMMU_PORT_L19_IPE_DVS_WDMA MTK_M4U_ID(19, 1)
+#define IOMMU_PORT_L19_IPE_DVP_RDMA MTK_M4U_ID(19, 2)
+#define IOMMU_PORT_L19_IPE_DVP_WDMA MTK_M4U_ID(19, 3)
+
+/* LARB 20 -- IPE */
+#define IOMMU_PORT_L20_IPE_FDVT_RDA MTK_M4U_ID(20, 0)
+#define IOMMU_PORT_L20_IPE_FDVT_RDB MTK_M4U_ID(20, 1)
+#define IOMMU_PORT_L20_IPE_FDVT_WRA MTK_M4U_ID(20, 2)
+#define IOMMU_PORT_L20_IPE_FDVT_WRB MTK_M4U_ID(20, 3)
+#define IOMMU_PORT_L20_IPE_RSC_RDMA0 MTK_M4U_ID(20, 4)
+#define IOMMU_PORT_L20_IPE_RSC_WDMA MTK_M4U_ID(20, 5)
+
+#endif
diff --git a/include/dt-bindings/memory/mt8195-memory-port.h b/include/dt-bindings/memory/mt8195-memory-port.h
new file mode 100644
index 000000000000..70ba9f498eeb
--- /dev/null
+++ b/include/dt-bindings/memory/mt8195-memory-port.h
@@ -0,0 +1,408 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Yong Wu <yong.wu@mediatek.com>
+ */
+#ifndef _DT_BINDINGS_MEMORY_MT8195_LARB_PORT_H_
+#define _DT_BINDINGS_MEMORY_MT8195_LARB_PORT_H_
+
+#include <dt-bindings/memory/mtk-memory-port.h>
+
+/*
+ * MM IOMMU supports 16GB dma address. We separate it to four ranges:
+ * 0 ~ 4G; 4G ~ 8G; 8G ~ 12G; 12G ~ 16G, we could adjust these masters
+ * locate in anyone region. BUT:
+ * a) Make sure all the ports inside a larb are in one range.
+ * b) The iova of any master can NOT cross the 4G/8G/12G boundary.
+ *
+ * This is the suggested mapping in this SoC:
+ *
+ * modules dma-address-region larbs-ports
+ * disp 0 ~ 4G larb0/1/2/3
+ * vcodec 4G ~ 8G larb19/20/21/22/23/24
+ * cam/mdp 8G ~ 12G the other larbs.
+ * N/A 12G ~ 16G
+ * CCU0 0x24000_0000 ~ 0x243ff_ffff larb18: port 0/1
+ * CCU1 0x24400_0000 ~ 0x247ff_ffff larb18: port 2/3
+ *
+ * This SoC have two IOMMU HWs, this is the detailed connected information:
+ * iommu-vdo: larb0/2/5/7/9/10/11/13/17/19/21/24/25/28
+ * iommu-vpp: larb1/3/4/6/8/12/14/16/18/20/22/23/26/27
+ */
+
+/* MM IOMMU ports */
+/* larb0 */
+#define M4U_PORT_L0_DISP_RDMA0 MTK_M4U_ID(0, 0)
+#define M4U_PORT_L0_DISP_WDMA0 MTK_M4U_ID(0, 1)
+#define M4U_PORT_L0_DISP_OVL0_RDMA0 MTK_M4U_ID(0, 2)
+#define M4U_PORT_L0_DISP_OVL0_RDMA1 MTK_M4U_ID(0, 3)
+#define M4U_PORT_L0_DISP_OVL0_HDR MTK_M4U_ID(0, 4)
+#define M4U_PORT_L0_DISP_FAKE0 MTK_M4U_ID(0, 5)
+
+/* larb1 */
+#define M4U_PORT_L1_DISP_RDMA0 MTK_M4U_ID(1, 0)
+#define M4U_PORT_L1_DISP_WDMA0 MTK_M4U_ID(1, 1)
+#define M4U_PORT_L1_DISP_OVL0_RDMA0 MTK_M4U_ID(1, 2)
+#define M4U_PORT_L1_DISP_OVL0_RDMA1 MTK_M4U_ID(1, 3)
+#define M4U_PORT_L1_DISP_OVL0_HDR MTK_M4U_ID(1, 4)
+#define M4U_PORT_L1_DISP_FAKE0 MTK_M4U_ID(1, 5)
+
+/* larb2 */
+#define M4U_PORT_L2_MDP_RDMA0 MTK_M4U_ID(2, 0)
+#define M4U_PORT_L2_MDP_RDMA2 MTK_M4U_ID(2, 1)
+#define M4U_PORT_L2_MDP_RDMA4 MTK_M4U_ID(2, 2)
+#define M4U_PORT_L2_MDP_RDMA6 MTK_M4U_ID(2, 3)
+#define M4U_PORT_L2_DISP_FAKE1 MTK_M4U_ID(2, 4)
+
+/* larb3 */
+#define M4U_PORT_L3_MDP_RDMA1 MTK_M4U_ID(3, 0)
+#define M4U_PORT_L3_MDP_RDMA3 MTK_M4U_ID(3, 1)
+#define M4U_PORT_L3_MDP_RDMA5 MTK_M4U_ID(3, 2)
+#define M4U_PORT_L3_MDP_RDMA7 MTK_M4U_ID(3, 3)
+#define M4U_PORT_L3_HDR_DS MTK_M4U_ID(3, 4)
+#define M4U_PORT_L3_HDR_ADL MTK_M4U_ID(3, 5)
+#define M4U_PORT_L3_DISP_FAKE1 MTK_M4U_ID(3, 6)
+
+/* larb4 */
+#define M4U_PORT_L4_MDP_RDMA MTK_M4U_ID(4, 0)
+#define M4U_PORT_L4_MDP_FG MTK_M4U_ID(4, 1)
+#define M4U_PORT_L4_MDP_OVL MTK_M4U_ID(4, 2)
+#define M4U_PORT_L4_MDP_WROT MTK_M4U_ID(4, 3)
+#define M4U_PORT_L4_FAKE MTK_M4U_ID(4, 4)
+
+/* larb5 */
+#define M4U_PORT_L5_SVPP1_MDP_RDMA MTK_M4U_ID(5, 0)
+#define M4U_PORT_L5_SVPP1_MDP_FG MTK_M4U_ID(5, 1)
+#define M4U_PORT_L5_SVPP1_MDP_OVL MTK_M4U_ID(5, 2)
+#define M4U_PORT_L5_SVPP1_MDP_WROT MTK_M4U_ID(5, 3)
+#define M4U_PORT_L5_SVPP2_MDP_RDMA MTK_M4U_ID(5, 4)
+#define M4U_PORT_L5_SVPP2_MDP_FG MTK_M4U_ID(5, 5)
+#define M4U_PORT_L5_SVPP2_MDP_WROT MTK_M4U_ID(5, 6)
+#define M4U_PORT_L5_FAKE MTK_M4U_ID(5, 7)
+
+/* larb6 */
+#define M4U_PORT_L6_SVPP3_MDP_RDMA MTK_M4U_ID(6, 0)
+#define M4U_PORT_L6_SVPP3_MDP_FG MTK_M4U_ID(6, 1)
+#define M4U_PORT_L6_SVPP3_MDP_WROT MTK_M4U_ID(6, 2)
+#define M4U_PORT_L6_FAKE MTK_M4U_ID(6, 3)
+
+/* larb7 */
+#define M4U_PORT_L7_IMG_WPE_RDMA0 MTK_M4U_ID(7, 0)
+#define M4U_PORT_L7_IMG_WPE_RDMA1 MTK_M4U_ID(7, 1)
+#define M4U_PORT_L7_IMG_WPE_WDMA0 MTK_M4U_ID(7, 2)
+
+/* larb8 */
+#define M4U_PORT_L8_IMG_WPE_RDMA0 MTK_M4U_ID(8, 0)
+#define M4U_PORT_L8_IMG_WPE_RDMA1 MTK_M4U_ID(8, 1)
+#define M4U_PORT_L8_IMG_WPE_WDMA0 MTK_M4U_ID(8, 2)
+
+/* larb9 */
+#define M4U_PORT_L9_IMG_IMGI_T1_A MTK_M4U_ID(9, 0)
+#define M4U_PORT_L9_IMG_IMGBI_T1_A MTK_M4U_ID(9, 1)
+#define M4U_PORT_L9_IMG_IMGCI_T1_A MTK_M4U_ID(9, 2)
+#define M4U_PORT_L9_IMG_SMTI_T1_A MTK_M4U_ID(9, 3)
+#define M4U_PORT_L9_IMG_TNCSTI_T1_A MTK_M4U_ID(9, 4)
+#define M4U_PORT_L9_IMG_TNCSTI_T4_A MTK_M4U_ID(9, 5)
+#define M4U_PORT_L9_IMG_YUVO_T1_A MTK_M4U_ID(9, 6)
+#define M4U_PORT_L9_IMG_TIMGO_T1_A MTK_M4U_ID(9, 7)
+#define M4U_PORT_L9_IMG_YUVO_T2_A MTK_M4U_ID(9, 8)
+#define M4U_PORT_L9_IMG_IMGI_T1_B MTK_M4U_ID(9, 9)
+#define M4U_PORT_L9_IMG_IMGBI_T1_B MTK_M4U_ID(9, 10)
+#define M4U_PORT_L9_IMG_IMGCI_T1_B MTK_M4U_ID(9, 11)
+#define M4U_PORT_L9_IMG_YUVO_T5_A MTK_M4U_ID(9, 12)
+#define M4U_PORT_L9_IMG_SMTI_T1_B MTK_M4U_ID(9, 13)
+#define M4U_PORT_L9_IMG_TNCSO_T1_A MTK_M4U_ID(9, 14)
+#define M4U_PORT_L9_IMG_SMTO_T1_A MTK_M4U_ID(9, 15)
+#define M4U_PORT_L9_IMG_TNCSTO_T1_A MTK_M4U_ID(9, 16)
+#define M4U_PORT_L9_IMG_YUVO_T2_B MTK_M4U_ID(9, 17)
+#define M4U_PORT_L9_IMG_YUVO_T5_B MTK_M4U_ID(9, 18)
+#define M4U_PORT_L9_IMG_SMTO_T1_B MTK_M4U_ID(9, 19)
+
+/* larb10 */
+#define M4U_PORT_L10_IMG_IMGI_D1_A MTK_M4U_ID(10, 0)
+#define M4U_PORT_L10_IMG_IMGCI_D1_A MTK_M4U_ID(10, 1)
+#define M4U_PORT_L10_IMG_DEPI_D1_A MTK_M4U_ID(10, 2)
+#define M4U_PORT_L10_IMG_DMGI_D1_A MTK_M4U_ID(10, 3)
+#define M4U_PORT_L10_IMG_VIPI_D1_A MTK_M4U_ID(10, 4)
+#define M4U_PORT_L10_IMG_TNRWI_D1_A MTK_M4U_ID(10, 5)
+#define M4U_PORT_L10_IMG_RECI_D1_A MTK_M4U_ID(10, 6)
+#define M4U_PORT_L10_IMG_SMTI_D1_A MTK_M4U_ID(10, 7)
+#define M4U_PORT_L10_IMG_SMTI_D6_A MTK_M4U_ID(10, 8)
+#define M4U_PORT_L10_IMG_PIMGI_P1_A MTK_M4U_ID(10, 9)
+#define M4U_PORT_L10_IMG_PIMGBI_P1_A MTK_M4U_ID(10, 10)
+#define M4U_PORT_L10_IMG_PIMGCI_P1_A MTK_M4U_ID(10, 11)
+#define M4U_PORT_L10_IMG_PIMGI_P1_B MTK_M4U_ID(10, 12)
+#define M4U_PORT_L10_IMG_PIMGBI_P1_B MTK_M4U_ID(10, 13)
+#define M4U_PORT_L10_IMG_PIMGCI_P1_B MTK_M4U_ID(10, 14)
+#define M4U_PORT_L10_IMG_IMG3O_D1_A MTK_M4U_ID(10, 15)
+#define M4U_PORT_L10_IMG_IMG4O_D1_A MTK_M4U_ID(10, 16)
+#define M4U_PORT_L10_IMG_IMG3CO_D1_A MTK_M4U_ID(10, 17)
+#define M4U_PORT_L10_IMG_FEO_D1_A MTK_M4U_ID(10, 18)
+#define M4U_PORT_L10_IMG_IMG2O_D1_A MTK_M4U_ID(10, 19)
+#define M4U_PORT_L10_IMG_TNRWO_D1_A MTK_M4U_ID(10, 20)
+#define M4U_PORT_L10_IMG_SMTO_D1_A MTK_M4U_ID(10, 21)
+#define M4U_PORT_L10_IMG_WROT_P1_A MTK_M4U_ID(10, 22)
+#define M4U_PORT_L10_IMG_WROT_P1_B MTK_M4U_ID(10, 23)
+
+/* larb11 */
+#define M4U_PORT_L11_IMG_WPE_EIS_RDMA0_A MTK_M4U_ID(11, 0)
+#define M4U_PORT_L11_IMG_WPE_EIS_RDMA1_A MTK_M4U_ID(11, 1)
+#define M4U_PORT_L11_IMG_WPE_EIS_WDMA0_A MTK_M4U_ID(11, 2)
+#define M4U_PORT_L11_IMG_WPE_TNR_RDMA0_A MTK_M4U_ID(11, 3)
+#define M4U_PORT_L11_IMG_WPE_TNR_RDMA1_A MTK_M4U_ID(11, 4)
+#define M4U_PORT_L11_IMG_WPE_TNR_WDMA0_A MTK_M4U_ID(11, 5)
+#define M4U_PORT_L11_IMG_WPE_EIS_CQ0_A MTK_M4U_ID(11, 6)
+#define M4U_PORT_L11_IMG_WPE_EIS_CQ1_A MTK_M4U_ID(11, 7)
+#define M4U_PORT_L11_IMG_WPE_TNR_CQ0_A MTK_M4U_ID(11, 8)
+#define M4U_PORT_L11_IMG_WPE_TNR_CQ1_A MTK_M4U_ID(11, 9)
+
+/* larb12 */
+#define M4U_PORT_L12_IMG_FDVT_RDA MTK_M4U_ID(12, 0)
+#define M4U_PORT_L12_IMG_FDVT_RDB MTK_M4U_ID(12, 1)
+#define M4U_PORT_L12_IMG_FDVT_WRA MTK_M4U_ID(12, 2)
+#define M4U_PORT_L12_IMG_FDVT_WRB MTK_M4U_ID(12, 3)
+#define M4U_PORT_L12_IMG_ME_RDMA MTK_M4U_ID(12, 4)
+#define M4U_PORT_L12_IMG_ME_WDMA MTK_M4U_ID(12, 5)
+#define M4U_PORT_L12_IMG_DVS_RDMA MTK_M4U_ID(12, 6)
+#define M4U_PORT_L12_IMG_DVS_WDMA MTK_M4U_ID(12, 7)
+#define M4U_PORT_L12_IMG_DVP_RDMA MTK_M4U_ID(12, 8)
+#define M4U_PORT_L12_IMG_DVP_WDMA MTK_M4U_ID(12, 9)
+
+/* larb13 */
+#define M4U_PORT_L13_CAM_CAMSV_CQI_E1 MTK_M4U_ID(13, 0)
+#define M4U_PORT_L13_CAM_CAMSV_CQI_E2 MTK_M4U_ID(13, 1)
+#define M4U_PORT_L13_CAM_GCAMSV_A_IMGO_0 MTK_M4U_ID(13, 2)
+#define M4U_PORT_L13_CAM_SCAMSV_A_IMGO_0 MTK_M4U_ID(13, 3)
+#define M4U_PORT_L13_CAM_GCAMSV_B_IMGO_0 MTK_M4U_ID(13, 4)
+#define M4U_PORT_L13_CAM_GCAMSV_B_IMGO_1 MTK_M4U_ID(13, 5)
+#define M4U_PORT_L13_CAM_GCAMSV_A_UFEO_0 MTK_M4U_ID(13, 6)
+#define M4U_PORT_L13_CAM_GCAMSV_B_UFEO_0 MTK_M4U_ID(13, 7)
+#define M4U_PORT_L13_CAM_PDAI_0 MTK_M4U_ID(13, 8)
+#define M4U_PORT_L13_CAM_FAKE MTK_M4U_ID(13, 9)
+
+/* larb14 */
+#define M4U_PORT_L14_CAM_GCAMSV_A_IMGO_1 MTK_M4U_ID(14, 0)
+#define M4U_PORT_L14_CAM_SCAMSV_A_IMGO_1 MTK_M4U_ID(14, 1)
+#define M4U_PORT_L14_CAM_GCAMSV_B_IMGO_0 MTK_M4U_ID(14, 2)
+#define M4U_PORT_L14_CAM_GCAMSV_B_IMGO_1 MTK_M4U_ID(14, 3)
+#define M4U_PORT_L14_CAM_SCAMSV_B_IMGO_0 MTK_M4U_ID(14, 4)
+#define M4U_PORT_L14_CAM_SCAMSV_B_IMGO_1 MTK_M4U_ID(14, 5)
+#define M4U_PORT_L14_CAM_IPUI MTK_M4U_ID(14, 6)
+#define M4U_PORT_L14_CAM_IPU2I MTK_M4U_ID(14, 7)
+#define M4U_PORT_L14_CAM_IPUO MTK_M4U_ID(14, 8)
+#define M4U_PORT_L14_CAM_IPU2O MTK_M4U_ID(14, 9)
+#define M4U_PORT_L14_CAM_IPU3O MTK_M4U_ID(14, 10)
+#define M4U_PORT_L14_CAM_GCAMSV_A_UFEO_1 MTK_M4U_ID(14, 11)
+#define M4U_PORT_L14_CAM_GCAMSV_B_UFEO_1 MTK_M4U_ID(14, 12)
+#define M4U_PORT_L14_CAM_PDAI_1 MTK_M4U_ID(14, 13)
+#define M4U_PORT_L14_CAM_PDAO MTK_M4U_ID(14, 14)
+
+/* larb15: null */
+
+/* larb16 */
+#define M4U_PORT_L16_CAM_IMGO_R1 MTK_M4U_ID(16, 0)
+#define M4U_PORT_L16_CAM_CQI_R1 MTK_M4U_ID(16, 1)
+#define M4U_PORT_L16_CAM_CQI_R2 MTK_M4U_ID(16, 2)
+#define M4U_PORT_L16_CAM_BPCI_R1 MTK_M4U_ID(16, 3)
+#define M4U_PORT_L16_CAM_LSCI_R1 MTK_M4U_ID(16, 4)
+#define M4U_PORT_L16_CAM_RAWI_R2 MTK_M4U_ID(16, 5)
+#define M4U_PORT_L16_CAM_RAWI_R3 MTK_M4U_ID(16, 6)
+#define M4U_PORT_L16_CAM_UFDI_R2 MTK_M4U_ID(16, 7)
+#define M4U_PORT_L16_CAM_UFDI_R3 MTK_M4U_ID(16, 8)
+#define M4U_PORT_L16_CAM_RAWI_R4 MTK_M4U_ID(16, 9)
+#define M4U_PORT_L16_CAM_RAWI_R5 MTK_M4U_ID(16, 10)
+#define M4U_PORT_L16_CAM_AAI_R1 MTK_M4U_ID(16, 11)
+#define M4U_PORT_L16_CAM_FHO_R1 MTK_M4U_ID(16, 12)
+#define M4U_PORT_L16_CAM_AAO_R1 MTK_M4U_ID(16, 13)
+#define M4U_PORT_L16_CAM_TSFSO_R1 MTK_M4U_ID(16, 14)
+#define M4U_PORT_L16_CAM_FLKO_R1 MTK_M4U_ID(16, 15)
+
+/* larb17 */
+#define M4U_PORT_L17_CAM_YUVO_R1 MTK_M4U_ID(17, 0)
+#define M4U_PORT_L17_CAM_YUVO_R3 MTK_M4U_ID(17, 1)
+#define M4U_PORT_L17_CAM_YUVCO_R1 MTK_M4U_ID(17, 2)
+#define M4U_PORT_L17_CAM_YUVO_R2 MTK_M4U_ID(17, 3)
+#define M4U_PORT_L17_CAM_RZH1N2TO_R1 MTK_M4U_ID(17, 4)
+#define M4U_PORT_L17_CAM_DRZS4NO_R1 MTK_M4U_ID(17, 5)
+#define M4U_PORT_L17_CAM_TNCSO_R1 MTK_M4U_ID(17, 6)
+
+/* larb18 */
+#define M4U_PORT_L18_CAM_CCUI MTK_M4U_ID(18, 0)
+#define M4U_PORT_L18_CAM_CCUO MTK_M4U_ID(18, 1)
+#define M4U_PORT_L18_CAM_CCUI2 MTK_M4U_ID(18, 2)
+#define M4U_PORT_L18_CAM_CCUO2 MTK_M4U_ID(18, 3)
+
+/* larb19 */
+#define M4U_PORT_L19_VENC_RCPU MTK_M4U_ID(19, 0)
+#define M4U_PORT_L19_VENC_REC MTK_M4U_ID(19, 1)
+#define M4U_PORT_L19_VENC_BSDMA MTK_M4U_ID(19, 2)
+#define M4U_PORT_L19_VENC_SV_COMV MTK_M4U_ID(19, 3)
+#define M4U_PORT_L19_VENC_RD_COMV MTK_M4U_ID(19, 4)
+#define M4U_PORT_L19_VENC_NBM_RDMA MTK_M4U_ID(19, 5)
+#define M4U_PORT_L19_VENC_NBM_RDMA_LITE MTK_M4U_ID(19, 6)
+#define M4U_PORT_L19_JPGENC_Y_RDMA MTK_M4U_ID(19, 7)
+#define M4U_PORT_L19_JPGENC_C_RDMA MTK_M4U_ID(19, 8)
+#define M4U_PORT_L19_JPGENC_Q_TABLE MTK_M4U_ID(19, 9)
+#define M4U_PORT_L19_VENC_SUB_W_LUMA MTK_M4U_ID(19, 10)
+#define M4U_PORT_L19_VENC_FCS_NBM_RDMA MTK_M4U_ID(19, 11)
+#define M4U_PORT_L19_JPGENC_BSDMA MTK_M4U_ID(19, 12)
+#define M4U_PORT_L19_JPGDEC_WDMA0 MTK_M4U_ID(19, 13)
+#define M4U_PORT_L19_JPGDEC_BSDMA0 MTK_M4U_ID(19, 14)
+#define M4U_PORT_L19_VENC_NBM_WDMA MTK_M4U_ID(19, 15)
+#define M4U_PORT_L19_VENC_NBM_WDMA_LITE MTK_M4U_ID(19, 16)
+#define M4U_PORT_L19_VENC_FCS_NBM_WDMA MTK_M4U_ID(19, 17)
+#define M4U_PORT_L19_JPGDEC_WDMA1 MTK_M4U_ID(19, 18)
+#define M4U_PORT_L19_JPGDEC_BSDMA1 MTK_M4U_ID(19, 19)
+#define M4U_PORT_L19_JPGDEC_BUFF_OFFSET1 MTK_M4U_ID(19, 20)
+#define M4U_PORT_L19_JPGDEC_BUFF_OFFSET0 MTK_M4U_ID(19, 21)
+#define M4U_PORT_L19_VENC_CUR_LUMA MTK_M4U_ID(19, 22)
+#define M4U_PORT_L19_VENC_CUR_CHROMA MTK_M4U_ID(19, 23)
+#define M4U_PORT_L19_VENC_REF_LUMA MTK_M4U_ID(19, 24)
+#define M4U_PORT_L19_VENC_REF_CHROMA MTK_M4U_ID(19, 25)
+#define M4U_PORT_L19_VENC_SUB_R_CHROMA MTK_M4U_ID(19, 26)
+
+/* larb20 */
+#define M4U_PORT_L20_VENC_RCPU MTK_M4U_ID(20, 0)
+#define M4U_PORT_L20_VENC_REC MTK_M4U_ID(20, 1)
+#define M4U_PORT_L20_VENC_BSDMA MTK_M4U_ID(20, 2)
+#define M4U_PORT_L20_VENC_SV_COMV MTK_M4U_ID(20, 3)
+#define M4U_PORT_L20_VENC_RD_COMV MTK_M4U_ID(20, 4)
+#define M4U_PORT_L20_VENC_NBM_RDMA MTK_M4U_ID(20, 5)
+#define M4U_PORT_L20_VENC_NBM_RDMA_LITE MTK_M4U_ID(20, 6)
+#define M4U_PORT_L20_JPGENC_Y_RDMA MTK_M4U_ID(20, 7)
+#define M4U_PORT_L20_JPGENC_C_RDMA MTK_M4U_ID(20, 8)
+#define M4U_PORT_L20_JPGENC_Q_TABLE MTK_M4U_ID(20, 9)
+#define M4U_PORT_L20_VENC_SUB_W_LUMA MTK_M4U_ID(20, 10)
+#define M4U_PORT_L20_VENC_FCS_NBM_RDMA MTK_M4U_ID(20, 11)
+#define M4U_PORT_L20_JPGENC_BSDMA MTK_M4U_ID(20, 12)
+#define M4U_PORT_L20_JPGDEC_WDMA0 MTK_M4U_ID(20, 13)
+#define M4U_PORT_L20_JPGDEC_BSDMA0 MTK_M4U_ID(20, 14)
+#define M4U_PORT_L20_VENC_NBM_WDMA MTK_M4U_ID(20, 15)
+#define M4U_PORT_L20_VENC_NBM_WDMA_LITE MTK_M4U_ID(20, 16)
+#define M4U_PORT_L20_VENC_FCS_NBM_WDMA MTK_M4U_ID(20, 17)
+#define M4U_PORT_L20_JPGDEC_WDMA1 MTK_M4U_ID(20, 18)
+#define M4U_PORT_L20_JPGDEC_BSDMA1 MTK_M4U_ID(20, 19)
+#define M4U_PORT_L20_JPGDEC_BUFF_OFFSET1 MTK_M4U_ID(20, 20)
+#define M4U_PORT_L20_JPGDEC_BUFF_OFFSET0 MTK_M4U_ID(20, 21)
+#define M4U_PORT_L20_VENC_CUR_LUMA MTK_M4U_ID(20, 22)
+#define M4U_PORT_L20_VENC_CUR_CHROMA MTK_M4U_ID(20, 23)
+#define M4U_PORT_L20_VENC_REF_LUMA MTK_M4U_ID(20, 24)
+#define M4U_PORT_L20_VENC_REF_CHROMA MTK_M4U_ID(20, 25)
+#define M4U_PORT_L20_VENC_SUB_R_CHROMA MTK_M4U_ID(20, 26)
+
+/* larb21 */
+#define M4U_PORT_L21_VDEC_MC_EXT MTK_M4U_ID(21, 0)
+#define M4U_PORT_L21_VDEC_UFO_EXT MTK_M4U_ID(21, 1)
+#define M4U_PORT_L21_VDEC_PP_EXT MTK_M4U_ID(21, 2)
+#define M4U_PORT_L21_VDEC_PRED_RD_EXT MTK_M4U_ID(21, 3)
+#define M4U_PORT_L21_VDEC_PRED_WR_EXT MTK_M4U_ID(21, 4)
+#define M4U_PORT_L21_VDEC_PPWRAP_EXT MTK_M4U_ID(21, 5)
+#define M4U_PORT_L21_VDEC_TILE_EXT MTK_M4U_ID(21, 6)
+#define M4U_PORT_L21_VDEC_VLD_EXT MTK_M4U_ID(21, 7)
+#define M4U_PORT_L21_VDEC_VLD2_EXT MTK_M4U_ID(21, 8)
+#define M4U_PORT_L21_VDEC_AVC_MV_EXT MTK_M4U_ID(21, 9)
+
+/* larb22 */
+#define M4U_PORT_L22_VDEC_MC_EXT MTK_M4U_ID(22, 0)
+#define M4U_PORT_L22_VDEC_UFO_EXT MTK_M4U_ID(22, 1)
+#define M4U_PORT_L22_VDEC_PP_EXT MTK_M4U_ID(22, 2)
+#define M4U_PORT_L22_VDEC_PRED_RD_EXT MTK_M4U_ID(22, 3)
+#define M4U_PORT_L22_VDEC_PRED_WR_EXT MTK_M4U_ID(22, 4)
+#define M4U_PORT_L22_VDEC_PPWRAP_EXT MTK_M4U_ID(22, 5)
+#define M4U_PORT_L22_VDEC_TILE_EXT MTK_M4U_ID(22, 6)
+#define M4U_PORT_L22_VDEC_VLD_EXT MTK_M4U_ID(22, 7)
+#define M4U_PORT_L22_VDEC_VLD2_EXT MTK_M4U_ID(22, 8)
+#define M4U_PORT_L22_VDEC_AVC_MV_EXT MTK_M4U_ID(22, 9)
+
+/* larb23 */
+#define M4U_PORT_L23_VDEC_UFO_ENC_EXT MTK_M4U_ID(23, 0)
+#define M4U_PORT_L23_VDEC_RDMA_EXT MTK_M4U_ID(23, 1)
+
+/* larb24 */
+#define M4U_PORT_L24_VDEC_LAT0_VLD_EXT MTK_M4U_ID(24, 0)
+#define M4U_PORT_L24_VDEC_LAT0_VLD2_EXT MTK_M4U_ID(24, 1)
+#define M4U_PORT_L24_VDEC_LAT0_AVC_MC_EXT MTK_M4U_ID(24, 2)
+#define M4U_PORT_L24_VDEC_LAT0_PRED_RD_EXT MTK_M4U_ID(24, 3)
+#define M4U_PORT_L24_VDEC_LAT0_TILE_EXT MTK_M4U_ID(24, 4)
+#define M4U_PORT_L24_VDEC_LAT0_WDMA_EXT MTK_M4U_ID(24, 5)
+#define M4U_PORT_L24_VDEC_LAT1_VLD_EXT MTK_M4U_ID(24, 6)
+#define M4U_PORT_L24_VDEC_LAT1_VLD2_EXT MTK_M4U_ID(24, 7)
+#define M4U_PORT_L24_VDEC_LAT1_AVC_MC_EXT MTK_M4U_ID(24, 8)
+#define M4U_PORT_L24_VDEC_LAT1_PRED_RD_EXT MTK_M4U_ID(24, 9)
+#define M4U_PORT_L24_VDEC_LAT1_TILE_EXT MTK_M4U_ID(24, 10)
+#define M4U_PORT_L24_VDEC_LAT1_WDMA_EXT MTK_M4U_ID(24, 11)
+
+/* larb25 */
+#define M4U_PORT_L25_CAM_MRAW0_LSCI_M1 MTK_M4U_ID(25, 0)
+#define M4U_PORT_L25_CAM_MRAW0_CQI_M1 MTK_M4U_ID(25, 1)
+#define M4U_PORT_L25_CAM_MRAW0_CQI_M2 MTK_M4U_ID(25, 2)
+#define M4U_PORT_L25_CAM_MRAW0_IMGO_M1 MTK_M4U_ID(25, 3)
+#define M4U_PORT_L25_CAM_MRAW0_IMGBO_M1 MTK_M4U_ID(25, 4)
+#define M4U_PORT_L25_CAM_MRAW2_LSCI_M1 MTK_M4U_ID(25, 5)
+#define M4U_PORT_L25_CAM_MRAW2_CQI_M1 MTK_M4U_ID(25, 6)
+#define M4U_PORT_L25_CAM_MRAW2_CQI_M2 MTK_M4U_ID(25, 7)
+#define M4U_PORT_L25_CAM_MRAW2_IMGO_M1 MTK_M4U_ID(25, 8)
+#define M4U_PORT_L25_CAM_MRAW2_IMGBO_M1 MTK_M4U_ID(25, 9)
+#define M4U_PORT_L25_CAM_MRAW0_AFO_M1 MTK_M4U_ID(25, 10)
+#define M4U_PORT_L25_CAM_MRAW2_AFO_M1 MTK_M4U_ID(25, 11)
+
+/* larb26 */
+#define M4U_PORT_L26_CAM_MRAW1_LSCI_M1 MTK_M4U_ID(26, 0)
+#define M4U_PORT_L26_CAM_MRAW1_CQI_M1 MTK_M4U_ID(26, 1)
+#define M4U_PORT_L26_CAM_MRAW1_CQI_M2 MTK_M4U_ID(26, 2)
+#define M4U_PORT_L26_CAM_MRAW1_IMGO_M1 MTK_M4U_ID(26, 3)
+#define M4U_PORT_L26_CAM_MRAW1_IMGBO_M1 MTK_M4U_ID(26, 4)
+#define M4U_PORT_L26_CAM_MRAW3_LSCI_M1 MTK_M4U_ID(26, 5)
+#define M4U_PORT_L26_CAM_MRAW3_CQI_M1 MTK_M4U_ID(26, 6)
+#define M4U_PORT_L26_CAM_MRAW3_CQI_M2 MTK_M4U_ID(26, 7)
+#define M4U_PORT_L26_CAM_MRAW3_IMGO_M1 MTK_M4U_ID(26, 8)
+#define M4U_PORT_L26_CAM_MRAW3_IMGBO_M1 MTK_M4U_ID(26, 9)
+#define M4U_PORT_L26_CAM_MRAW1_AFO_M1 MTK_M4U_ID(26, 10)
+#define M4U_PORT_L26_CAM_MRAW3_AFO_M1 MTK_M4U_ID(26, 11)
+
+/* larb27 */
+#define M4U_PORT_L27_CAM_IMGO_R1 MTK_M4U_ID(27, 0)
+#define M4U_PORT_L27_CAM_CQI_R1 MTK_M4U_ID(27, 1)
+#define M4U_PORT_L27_CAM_CQI_R2 MTK_M4U_ID(27, 2)
+#define M4U_PORT_L27_CAM_BPCI_R1 MTK_M4U_ID(27, 3)
+#define M4U_PORT_L27_CAM_LSCI_R1 MTK_M4U_ID(27, 4)
+#define M4U_PORT_L27_CAM_RAWI_R2 MTK_M4U_ID(27, 5)
+#define M4U_PORT_L27_CAM_RAWI_R3 MTK_M4U_ID(27, 6)
+#define M4U_PORT_L27_CAM_UFDI_R2 MTK_M4U_ID(27, 7)
+#define M4U_PORT_L27_CAM_UFDI_R3 MTK_M4U_ID(27, 8)
+#define M4U_PORT_L27_CAM_RAWI_R4 MTK_M4U_ID(27, 9)
+#define M4U_PORT_L27_CAM_RAWI_R5 MTK_M4U_ID(27, 10)
+#define M4U_PORT_L27_CAM_AAI_R1 MTK_M4U_ID(27, 11)
+#define M4U_PORT_L27_CAM_FHO_R1 MTK_M4U_ID(27, 12)
+#define M4U_PORT_L27_CAM_AAO_R1 MTK_M4U_ID(27, 13)
+#define M4U_PORT_L27_CAM_TSFSO_R1 MTK_M4U_ID(27, 14)
+#define M4U_PORT_L27_CAM_FLKO_R1 MTK_M4U_ID(27, 15)
+
+/* larb28 */
+#define M4U_PORT_L28_CAM_YUVO_R1 MTK_M4U_ID(28, 0)
+#define M4U_PORT_L28_CAM_YUVO_R3 MTK_M4U_ID(28, 1)
+#define M4U_PORT_L28_CAM_YUVCO_R1 MTK_M4U_ID(28, 2)
+#define M4U_PORT_L28_CAM_YUVO_R2 MTK_M4U_ID(28, 3)
+#define M4U_PORT_L28_CAM_RZH1N2TO_R1 MTK_M4U_ID(28, 4)
+#define M4U_PORT_L28_CAM_DRZS4NO_R1 MTK_M4U_ID(28, 5)
+#define M4U_PORT_L28_CAM_TNCSO_R1 MTK_M4U_ID(28, 6)
+
+/* Infra iommu ports */
+/* PCIe1: read: BIT16; write BIT17. */
+#define IOMMU_PORT_INFRA_PCIE1 MTK_IFAIOMMU_PERI_ID(16)
+/* PCIe0: read: BIT18; write BIT19. */
+#define IOMMU_PORT_INFRA_PCIE0 MTK_IFAIOMMU_PERI_ID(18)
+#define IOMMU_PORT_INFRA_SSUSB_P3_R MTK_IFAIOMMU_PERI_ID(20)
+#define IOMMU_PORT_INFRA_SSUSB_P3_W MTK_IFAIOMMU_PERI_ID(21)
+#define IOMMU_PORT_INFRA_SSUSB_P2_R MTK_IFAIOMMU_PERI_ID(22)
+#define IOMMU_PORT_INFRA_SSUSB_P2_W MTK_IFAIOMMU_PERI_ID(23)
+#define IOMMU_PORT_INFRA_SSUSB_P1_1_R MTK_IFAIOMMU_PERI_ID(24)
+#define IOMMU_PORT_INFRA_SSUSB_P1_1_W MTK_IFAIOMMU_PERI_ID(25)
+#define IOMMU_PORT_INFRA_SSUSB_P1_0_R MTK_IFAIOMMU_PERI_ID(26)
+#define IOMMU_PORT_INFRA_SSUSB_P1_0_W MTK_IFAIOMMU_PERI_ID(27)
+#define IOMMU_PORT_INFRA_SSUSB2_R MTK_IFAIOMMU_PERI_ID(28)
+#define IOMMU_PORT_INFRA_SSUSB2_W MTK_IFAIOMMU_PERI_ID(29)
+#define IOMMU_PORT_INFRA_SSUSB_R MTK_IFAIOMMU_PERI_ID(30)
+#define IOMMU_PORT_INFRA_SSUSB_W MTK_IFAIOMMU_PERI_ID(31)
+
+#endif
diff --git a/include/dt-bindings/memory/mtk-memory-port.h b/include/dt-bindings/memory/mtk-memory-port.h
index 7d64103209af..2f68a0511a25 100644
--- a/include/dt-bindings/memory/mtk-memory-port.h
+++ b/include/dt-bindings/memory/mtk-memory-port.h
@@ -12,4 +12,6 @@
#define MTK_M4U_TO_LARB(id) (((id) >> 5) & 0x1f)
#define MTK_M4U_TO_PORT(id) ((id) & 0x1f)
+#define MTK_IFAIOMMU_PERI_ID(port) MTK_M4U_ID(0, port)
+
#endif
diff --git a/include/dt-bindings/mfd/cros_ec.h b/include/dt-bindings/mfd/cros_ec.h
new file mode 100644
index 000000000000..3b29cd049578
--- /dev/null
+++ b/include/dt-bindings/mfd/cros_ec.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * DTS binding definitions used for the Chromium OS Embedded Controller.
+ *
+ * Copyright (c) 2022 The Chromium OS Authors. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_MFD_CROS_EC_H
+#define _DT_BINDINGS_MFD_CROS_EC_H
+
+/* Typed channel for keyboard backlight. */
+#define CROS_EC_PWM_DT_KB_LIGHT 0
+/* Typed channel for display backlight. */
+#define CROS_EC_PWM_DT_DISPLAY_LIGHT 1
+/* Number of typed channels. */
+#define CROS_EC_PWM_DT_COUNT 2
+
+#endif
diff --git a/include/dt-bindings/pinctrl/mt6795-pinfunc.h b/include/dt-bindings/pinctrl/mt6795-pinfunc.h
new file mode 100644
index 000000000000..bd1c5a9fad06
--- /dev/null
+++ b/include/dt-bindings/pinctrl/mt6795-pinfunc.h
@@ -0,0 +1,908 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022 Collabora Ltd.
+ * Author: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#ifndef __DTS_MT8173_PINFUNC_H
+#define __DTS_MT8173_PINFUNC_H
+
+#include <dt-bindings/pinctrl/mt65xx.h>
+
+#define PINMUX_GPIO0__FUNC_GPIO0 (MTK_PIN_NO(0) | 0)
+#define PINMUX_GPIO0__FUNC_IRDA_PDN (MTK_PIN_NO(0) | 1)
+#define PINMUX_GPIO0__FUNC_I2S1_WS (MTK_PIN_NO(0) | 2)
+#define PINMUX_GPIO0__FUNC_TDD_TMS (MTK_PIN_NO(0) | 4)
+#define PINMUX_GPIO0__FUNC_UTXD0 (MTK_PIN_NO(0) | 5)
+
+#define PINMUX_GPIO1__FUNC_GPIO1 (MTK_PIN_NO(1) | 0)
+#define PINMUX_GPIO1__FUNC_IRDA_RXD (MTK_PIN_NO(1) | 1)
+#define PINMUX_GPIO1__FUNC_I2S1_BCK (MTK_PIN_NO(1) | 2)
+#define PINMUX_GPIO1__FUNC_SDA4 (MTK_PIN_NO(1) | 3)
+#define PINMUX_GPIO1__FUNC_TDD_TCK (MTK_PIN_NO(1) | 4)
+#define PINMUX_GPIO1__FUNC_URXD0 (MTK_PIN_NO(1) | 5)
+
+#define PINMUX_GPIO2__FUNC_GPIO2 (MTK_PIN_NO(2) | 0)
+#define PINMUX_GPIO2__FUNC_IRDA_TXD (MTK_PIN_NO(2) | 1)
+#define PINMUX_GPIO2__FUNC_I2S1_MCK (MTK_PIN_NO(2) | 2)
+#define PINMUX_GPIO2__FUNC_SCL4 (MTK_PIN_NO(2) | 3)
+#define PINMUX_GPIO2__FUNC_TDD_TDI (MTK_PIN_NO(2) | 4)
+#define PINMUX_GPIO2__FUNC_UTXD3 (MTK_PIN_NO(2) | 5)
+
+#define PINMUX_GPIO3__FUNC_GPIO3 (MTK_PIN_NO(3) | 0)
+#define PINMUX_GPIO3__FUNC_DSI1_TE (MTK_PIN_NO(3) | 1)
+#define PINMUX_GPIO3__FUNC_I2S1_DO_1 (MTK_PIN_NO(3) | 2)
+#define PINMUX_GPIO3__FUNC_SDA3 (MTK_PIN_NO(3) | 3)
+#define PINMUX_GPIO3__FUNC_TDD_TDO (MTK_PIN_NO(3) | 4)
+#define PINMUX_GPIO3__FUNC_URXD3 (MTK_PIN_NO(3) | 5)
+
+#define PINMUX_GPIO4__FUNC_GPIO4 (MTK_PIN_NO(4) | 0)
+#define PINMUX_GPIO4__FUNC_DISP_PWM1 (MTK_PIN_NO(4) | 1)
+#define PINMUX_GPIO4__FUNC_I2S1_DO_2 (MTK_PIN_NO(4) | 2)
+#define PINMUX_GPIO4__FUNC_SCL3 (MTK_PIN_NO(4) | 3)
+#define PINMUX_GPIO4__FUNC_TDD_TRSTN (MTK_PIN_NO(4) | 4)
+
+#define PINMUX_GPIO5__FUNC_GPIO5 (MTK_PIN_NO(5) | 0)
+#define PINMUX_GPIO5__FUNC_PCM1_CLK (MTK_PIN_NO(5) | 1)
+#define PINMUX_GPIO5__FUNC_I2S2_WS (MTK_PIN_NO(5) | 2)
+#define PINMUX_GPIO5__FUNC_SPI_CK_3 (MTK_PIN_NO(5) | 3)
+#define PINMUX_GPIO5__FUNC_LTE_MD32_JTAG_TMS (MTK_PIN_NO(5) | 4)
+#define PINMUX_GPIO5__FUNC_AP_MD32_JTAG_TMS (MTK_PIN_NO(5) | 5)
+
+#define PINMUX_GPIO6__FUNC_GPIO6 (MTK_PIN_NO(6) | 0)
+#define PINMUX_GPIO6__FUNC_PCM1_SYNC (MTK_PIN_NO(6) | 1)
+#define PINMUX_GPIO6__FUNC_I2S2_BCK (MTK_PIN_NO(6) | 2)
+#define PINMUX_GPIO6__FUNC_SPI_MI_3 (MTK_PIN_NO(6) | 3)
+#define PINMUX_GPIO6__FUNC_LTE_MD32_JTAG_TCK (MTK_PIN_NO(6) | 4)
+#define PINMUX_GPIO6__FUNC_AP_MD32_JTAG_TCK (MTK_PIN_NO(6) | 5)
+
+#define PINMUX_GPIO7__FUNC_GPIO7 (MTK_PIN_NO(7) | 0)
+#define PINMUX_GPIO7__FUNC_PCM1_DI (MTK_PIN_NO(7) | 1)
+#define PINMUX_GPIO7__FUNC_I2S2_DI_1 (MTK_PIN_NO(7) | 2)
+#define PINMUX_GPIO7__FUNC_SPI_MO_3 (MTK_PIN_NO(7) | 3)
+#define PINMUX_GPIO7__FUNC_LTE_MD32_JTAG_TDI (MTK_PIN_NO(7) | 4)
+#define PINMUX_GPIO7__FUNC_AP_MD32_JTAG_TDI (MTK_PIN_NO(7) | 5)
+
+#define PINMUX_GPIO8__FUNC_GPIO8 (MTK_PIN_NO(8) | 0)
+#define PINMUX_GPIO8__FUNC_PCM1_DO (MTK_PIN_NO(8) | 1)
+#define PINMUX_GPIO8__FUNC_I2S2_DI_2 (MTK_PIN_NO(8) | 2)
+#define PINMUX_GPIO8__FUNC_SPI_CS_3 (MTK_PIN_NO(8) | 3)
+#define PINMUX_GPIO8__FUNC_LTE_MD32_JTAG_TDO (MTK_PIN_NO(8) | 4)
+#define PINMUX_GPIO8__FUNC_AP_MD32_JTAG_TDO (MTK_PIN_NO(8) | 5)
+
+#define PINMUX_GPIO9__FUNC_GPIO9 (MTK_PIN_NO(9) | 0)
+#define PINMUX_GPIO9__FUNC_USB_DRVVBUS (MTK_PIN_NO(9) | 1)
+#define PINMUX_GPIO9__FUNC_I2S2_MCK (MTK_PIN_NO(9) | 2)
+#define PINMUX_GPIO9__FUNC_LTE_MD32_JTAG_TRST (MTK_PIN_NO(9) | 4)
+#define PINMUX_GPIO9__FUNC_AP_MD32_JTAG_TRST (MTK_PIN_NO(9) | 5)
+
+#define PINMUX_GPIO10__FUNC_GPIO10 (MTK_PIN_NO(10) | 0)
+#define PINMUX_GPIO10__FUNC_I2S0_WS (MTK_PIN_NO(10) | 2)
+
+#define PINMUX_GPIO11__FUNC_GPIO11 (MTK_PIN_NO(11) | 0)
+#define PINMUX_GPIO11__FUNC_I2S0_BCK (MTK_PIN_NO(11) | 2)
+
+#define PINMUX_GPIO12__FUNC_GPIO12 (MTK_PIN_NO(12) | 0)
+#define PINMUX_GPIO12__FUNC_I2S0_MCK (MTK_PIN_NO(12) | 2)
+
+#define PINMUX_GPIO13__FUNC_GPIO13 (MTK_PIN_NO(13) | 0)
+#define PINMUX_GPIO13__FUNC_I2S0_DO (MTK_PIN_NO(13) | 2)
+
+#define PINMUX_GPIO14__FUNC_GPIO14 (MTK_PIN_NO(14) | 0)
+#define PINMUX_GPIO14__FUNC_I2S0_DI (MTK_PIN_NO(14) | 2)
+#define PINMUX_GPIO14__FUNC_DISP_PWM1 (MTK_PIN_NO(14) | 3)
+#define PINMUX_GPIO14__FUNC_PWM4 (MTK_PIN_NO(14) | 4)
+#define PINMUX_GPIO14__FUNC_IRDA_RXD (MTK_PIN_NO(14) | 5)
+#define PINMUX_GPIO14__FUNC_I2S1_BCK (MTK_PIN_NO(14) | 6)
+
+#define PINMUX_GPIO15__FUNC_GPIO15 (MTK_PIN_NO(15) | 0)
+#define PINMUX_GPIO15__FUNC_DSI1_TE (MTK_PIN_NO(15) | 2)
+#define PINMUX_GPIO15__FUNC_USB_DRVVBUS (MTK_PIN_NO(15) | 3)
+#define PINMUX_GPIO15__FUNC_PWM5 (MTK_PIN_NO(15) | 4)
+#define PINMUX_GPIO15__FUNC_IRDA_TXD (MTK_PIN_NO(15) | 5)
+#define PINMUX_GPIO15__FUNC_I2S1_MCK (MTK_PIN_NO(15) | 6)
+
+#define PINMUX_GPIO16__FUNC_GPIO16 (MTK_PIN_NO(16) | 0)
+#define PINMUX_GPIO16__FUNC_IDDIG (MTK_PIN_NO(16) | 1)
+#define PINMUX_GPIO16__FUNC_FLASH (MTK_PIN_NO(16) | 2)
+#define PINMUX_GPIO16__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(16) | 3)
+#define PINMUX_GPIO16__FUNC_PWM5 (MTK_PIN_NO(16) | 4)
+
+#define PINMUX_GPIO17__FUNC_GPIO17 (MTK_PIN_NO(17) | 0)
+#define PINMUX_GPIO17__FUNC_SIM1_SCLK (MTK_PIN_NO(17) | 1)
+#define PINMUX_GPIO17__FUNC_SIM2_SCLK (MTK_PIN_NO(17) | 2)
+
+#define PINMUX_GPIO18__FUNC_GPIO18 (MTK_PIN_NO(18) | 0)
+#define PINMUX_GPIO18__FUNC_SIM1_SRST (MTK_PIN_NO(18) | 1)
+#define PINMUX_GPIO18__FUNC_SIM2_SRST (MTK_PIN_NO(18) | 2)
+
+#define PINMUX_GPIO19__FUNC_GPIO19 (MTK_PIN_NO(19) | 0)
+#define PINMUX_GPIO19__FUNC_SIM1_SDAT (MTK_PIN_NO(19) | 1)
+#define PINMUX_GPIO19__FUNC_SIM2_SDAT (MTK_PIN_NO(19) | 2)
+
+#define PINMUX_GPIO20__FUNC_GPIO20 (MTK_PIN_NO(20) | 0)
+#define PINMUX_GPIO20__FUNC_SIM2_SCLK (MTK_PIN_NO(20) | 1)
+#define PINMUX_GPIO20__FUNC_SIM1_SCLK (MTK_PIN_NO(20) | 2)
+
+#define PINMUX_GPIO21__FUNC_GPIO21 (MTK_PIN_NO(21) | 0)
+#define PINMUX_GPIO21__FUNC_SIM2_SRST (MTK_PIN_NO(21) | 1)
+#define PINMUX_GPIO21__FUNC_SIM1_SRST (MTK_PIN_NO(21) | 2)
+
+#define PINMUX_GPIO22__FUNC_GPIO22 (MTK_PIN_NO(22) | 0)
+#define PINMUX_GPIO22__FUNC_SIM2_SDAT (MTK_PIN_NO(22) | 1)
+#define PINMUX_GPIO22__FUNC_SIM1_SDAT (MTK_PIN_NO(22) | 2)
+
+#define PINMUX_GPIO23__FUNC_GPIO23 (MTK_PIN_NO(23) | 0)
+#define PINMUX_GPIO23__FUNC_MSDC3_DAT0 (MTK_PIN_NO(23) | 1)
+
+#define PINMUX_GPIO24__FUNC_GPIO24 (MTK_PIN_NO(24) | 0)
+#define PINMUX_GPIO24__FUNC_MSDC3_DAT1 (MTK_PIN_NO(24) | 1)
+
+#define PINMUX_GPIO25__FUNC_GPIO25 (MTK_PIN_NO(25) | 0)
+#define PINMUX_GPIO25__FUNC_MSDC3_DAT2 (MTK_PIN_NO(25) | 1)
+
+#define PINMUX_GPIO26__FUNC_GPIO26 (MTK_PIN_NO(26) | 0)
+#define PINMUX_GPIO26__FUNC_MSDC3_DAT3 (MTK_PIN_NO(26) | 1)
+
+#define PINMUX_GPIO27__FUNC_GPIO27 (MTK_PIN_NO(27) | 0)
+#define PINMUX_GPIO27__FUNC_MSDC3_CLK (MTK_PIN_NO(27) | 1)
+
+#define PINMUX_GPIO28__FUNC_GPIO28 (MTK_PIN_NO(28) | 0)
+#define PINMUX_GPIO28__FUNC_MSDC3_CMD (MTK_PIN_NO(28) | 1)
+
+#define PINMUX_GPIO29__FUNC_GPIO29 (MTK_PIN_NO(29) | 0)
+#define PINMUX_GPIO29__FUNC_PTA_RXD (MTK_PIN_NO(29) | 1)
+#define PINMUX_GPIO29__FUNC_UCTS2 (MTK_PIN_NO(29) | 2)
+
+#define PINMUX_GPIO30__FUNC_GPIO30 (MTK_PIN_NO(30) | 0)
+#define PINMUX_GPIO30__FUNC_PTA_TXD (MTK_PIN_NO(30) | 1)
+#define PINMUX_GPIO30__FUNC_URTS2 (MTK_PIN_NO(30) | 2)
+
+#define PINMUX_GPIO31__FUNC_GPIO31 (MTK_PIN_NO(31) | 0)
+#define PINMUX_GPIO31__FUNC_URXD2 (MTK_PIN_NO(31) | 1)
+#define PINMUX_GPIO31__FUNC_UTXD2 (MTK_PIN_NO(31) | 2)
+
+#define PINMUX_GPIO32__FUNC_GPIO32 (MTK_PIN_NO(32) | 0)
+#define PINMUX_GPIO32__FUNC_UTXD2 (MTK_PIN_NO(32) | 1)
+#define PINMUX_GPIO32__FUNC_URXD2 (MTK_PIN_NO(32) | 2)
+
+#define PINMUX_GPIO33__FUNC_GPIO33 (MTK_PIN_NO(33) | 0)
+#define PINMUX_GPIO33__FUNC_MRG_CLK (MTK_PIN_NO(33) | 1)
+#define PINMUX_GPIO33__FUNC_PCM0_CLK (MTK_PIN_NO(33) | 2)
+
+#define PINMUX_GPIO34__FUNC_GPIO34 (MTK_PIN_NO(34) | 0)
+#define PINMUX_GPIO34__FUNC_MRG_DI (MTK_PIN_NO(34) | 1)
+#define PINMUX_GPIO34__FUNC_PCM0_DI (MTK_PIN_NO(34) | 2)
+
+#define PINMUX_GPIO35__FUNC_GPIO35 (MTK_PIN_NO(35) | 0)
+#define PINMUX_GPIO35__FUNC_MRG_DO (MTK_PIN_NO(35) | 1)
+#define PINMUX_GPIO35__FUNC_PCM0_DO (MTK_PIN_NO(35) | 2)
+
+#define PINMUX_GPIO36__FUNC_GPIO36 (MTK_PIN_NO(36) | 0)
+#define PINMUX_GPIO36__FUNC_MRG_SYNC (MTK_PIN_NO(36) | 1)
+#define PINMUX_GPIO36__FUNC_PCM0_SYNC (MTK_PIN_NO(36) | 2)
+
+#define PINMUX_GPIO37__FUNC_GPIO37 (MTK_PIN_NO(37) | 0)
+#define PINMUX_GPIO37__FUNC_GPS_SYNC (MTK_PIN_NO(37) | 1)
+
+#define PINMUX_GPIO38__FUNC_GPIO38 (MTK_PIN_NO(38) | 0)
+#define PINMUX_GPIO38__FUNC_DAIRSTB (MTK_PIN_NO(38) | 1)
+
+#define PINMUX_GPIO39__FUNC_GPIO39 (MTK_PIN_NO(39) | 0)
+#define PINMUX_GPIO39__FUNC_CM2MCLK (MTK_PIN_NO(39) | 1)
+
+#define PINMUX_GPIO40__FUNC_GPIO40 (MTK_PIN_NO(40) | 0)
+#define PINMUX_GPIO40__FUNC_CM3MCLK (MTK_PIN_NO(40) | 1)
+#define PINMUX_GPIO40__FUNC_IRDA_PDN (MTK_PIN_NO(40) | 2)
+#define PINMUX_GPIO40__FUNC_PWM6 (MTK_PIN_NO(40) | 3)
+#define PINMUX_GPIO40__FUNC_I2S1_WS (MTK_PIN_NO(40) | 4)
+
+#define PINMUX_GPIO41__FUNC_GPIO41 (MTK_PIN_NO(41) | 0)
+#define PINMUX_GPIO41__FUNC_CMPCLK (MTK_PIN_NO(41) | 1)
+#define PINMUX_GPIO41__FUNC_CMCSK (MTK_PIN_NO(41) | 2)
+#define PINMUX_GPIO41__FUNC_FLASH (MTK_PIN_NO(41) | 3)
+
+#define PINMUX_GPIO42__FUNC_GPIO42 (MTK_PIN_NO(42) | 0)
+#define PINMUX_GPIO42__FUNC_CMMCLK (MTK_PIN_NO(42) | 1)
+
+#define PINMUX_GPIO43__FUNC_GPIO43 (MTK_PIN_NO(43) | 0)
+#define PINMUX_GPIO43__FUNC_SDA2 (MTK_PIN_NO(43) | 1)
+
+#define PINMUX_GPIO44__FUNC_GPIO44 (MTK_PIN_NO(44) | 0)
+#define PINMUX_GPIO44__FUNC_SCL2 (MTK_PIN_NO(44) | 1)
+
+#define PINMUX_GPIO45__FUNC_GPIO45 (MTK_PIN_NO(45) | 0)
+#define PINMUX_GPIO45__FUNC_SDA0 (MTK_PIN_NO(45) | 1)
+
+#define PINMUX_GPIO46__FUNC_GPIO46 (MTK_PIN_NO(46) | 0)
+#define PINMUX_GPIO46__FUNC_SCL0 (MTK_PIN_NO(46) | 1)
+
+#define PINMUX_GPIO47__FUNC_GPIO47 (MTK_PIN_NO(47) | 0)
+#define PINMUX_GPIO47__FUNC_BPI_BUS0 (MTK_PIN_NO(47) | 1)
+
+#define PINMUX_GPIO48__FUNC_GPIO48 (MTK_PIN_NO(48) | 0)
+#define PINMUX_GPIO48__FUNC_BPI_BUS1 (MTK_PIN_NO(48) | 1)
+
+#define PINMUX_GPIO49__FUNC_GPIO49 (MTK_PIN_NO(49) | 0)
+#define PINMUX_GPIO49__FUNC_BPI_BUS2 (MTK_PIN_NO(49) | 1)
+
+#define PINMUX_GPIO50__FUNC_GPIO50 (MTK_PIN_NO(50) | 0)
+#define PINMUX_GPIO50__FUNC_BPI_BUS3 (MTK_PIN_NO(50) | 1)
+
+#define PINMUX_GPIO51__FUNC_GPIO51 (MTK_PIN_NO(51) | 0)
+#define PINMUX_GPIO51__FUNC_BPI_BUS4 (MTK_PIN_NO(51) | 1)
+
+#define PINMUX_GPIO52__FUNC_GPIO52 (MTK_PIN_NO(52) | 0)
+#define PINMUX_GPIO52__FUNC_BPI_BUS5 (MTK_PIN_NO(52) | 1)
+
+#define PINMUX_GPIO53__FUNC_GPIO53 (MTK_PIN_NO(53) | 0)
+#define PINMUX_GPIO53__FUNC_BPI_BUS6 (MTK_PIN_NO(53) | 1)
+
+#define PINMUX_GPIO54__FUNC_GPIO54 (MTK_PIN_NO(54) | 0)
+#define PINMUX_GPIO54__FUNC_BPI_BUS7 (MTK_PIN_NO(54) | 1)
+
+#define PINMUX_GPIO55__FUNC_GPIO55 (MTK_PIN_NO(55) | 0)
+#define PINMUX_GPIO55__FUNC_BPI_BUS8 (MTK_PIN_NO(55) | 1)
+
+#define PINMUX_GPIO56__FUNC_GPIO56 (MTK_PIN_NO(56) | 0)
+#define PINMUX_GPIO56__FUNC_BPI_BUS9 (MTK_PIN_NO(56) | 1)
+
+#define PINMUX_GPIO57__FUNC_GPIO57 (MTK_PIN_NO(57) | 0)
+#define PINMUX_GPIO57__FUNC_BPI_BUS10 (MTK_PIN_NO(57) | 1)
+
+#define PINMUX_GPIO58__FUNC_GPIO58 (MTK_PIN_NO(58) | 0)
+#define PINMUX_GPIO58__FUNC_BPI_BUS11 (MTK_PIN_NO(58) | 1)
+
+#define PINMUX_GPIO59__FUNC_GPIO59 (MTK_PIN_NO(59) | 0)
+#define PINMUX_GPIO59__FUNC_BPI_BUS12 (MTK_PIN_NO(59) | 1)
+
+#define PINMUX_GPIO60__FUNC_GPIO60 (MTK_PIN_NO(60) | 0)
+#define PINMUX_GPIO60__FUNC_BPI_BUS13 (MTK_PIN_NO(60) | 1)
+
+#define PINMUX_GPIO61__FUNC_GPIO61 (MTK_PIN_NO(61) | 0)
+#define PINMUX_GPIO61__FUNC_BPI_BUS14 (MTK_PIN_NO(61) | 1)
+
+#define PINMUX_GPIO62__FUNC_GPIO62 (MTK_PIN_NO(62) | 0)
+#define PINMUX_GPIO62__FUNC_RFIC1_BSI_CK (MTK_PIN_NO(62) | 1)
+
+#define PINMUX_GPIO63__FUNC_GPIO63 (MTK_PIN_NO(63) | 0)
+#define PINMUX_GPIO63__FUNC_RFIC1_BSI_D0 (MTK_PIN_NO(63) | 1)
+
+#define PINMUX_GPIO64__FUNC_GPIO64 (MTK_PIN_NO(64) | 0)
+#define PINMUX_GPIO64__FUNC_RFIC1_BSI_D1 (MTK_PIN_NO(64) | 1)
+
+#define PINMUX_GPIO65__FUNC_GPIO65 (MTK_PIN_NO(65) | 0)
+#define PINMUX_GPIO65__FUNC_RFIC1_BSI_D2 (MTK_PIN_NO(65) | 1)
+
+#define PINMUX_GPIO66__FUNC_GPIO66 (MTK_PIN_NO(66) | 0)
+#define PINMUX_GPIO66__FUNC_RFIC1_BSI_CS (MTK_PIN_NO(66) | 1)
+
+#define PINMUX_GPIO67__FUNC_GPIO67 (MTK_PIN_NO(67) | 0)
+#define PINMUX_GPIO67__FUNC_TD_TXBPI (MTK_PIN_NO(67) | 1)
+
+#define PINMUX_GPIO68__FUNC_GPIO68 (MTK_PIN_NO(68) | 0)
+#define PINMUX_GPIO68__FUNC_RFIC0_BSI_CK (MTK_PIN_NO(68) | 1)
+
+#define PINMUX_GPIO69__FUNC_GPIO69 (MTK_PIN_NO(69) | 0)
+#define PINMUX_GPIO69__FUNC_RFIC0_BSI_D0 (MTK_PIN_NO(69) | 1)
+
+#define PINMUX_GPIO70__FUNC_GPIO70 (MTK_PIN_NO(70) | 0)
+#define PINMUX_GPIO70__FUNC_RFIC0_BSI_D1 (MTK_PIN_NO(70) | 1)
+
+#define PINMUX_GPIO71__FUNC_GPIO71 (MTK_PIN_NO(71) | 0)
+#define PINMUX_GPIO71__FUNC_RFIC0_BSI_D2 (MTK_PIN_NO(71) | 1)
+
+#define PINMUX_GPIO72__FUNC_GPIO72 (MTK_PIN_NO(72) | 0)
+#define PINMUX_GPIO72__FUNC_RFIC0_BSI_CS (MTK_PIN_NO(72) | 1)
+
+#define PINMUX_GPIO73__FUNC_GPIO73 (MTK_PIN_NO(73) | 0)
+#define PINMUX_GPIO73__FUNC_MISC_BSI_DO (MTK_PIN_NO(73) | 1)
+
+#define PINMUX_GPIO74__FUNC_GPIO74 (MTK_PIN_NO(74) | 0)
+#define PINMUX_GPIO74__FUNC_MISC_BSI_CK (MTK_PIN_NO(74) | 1)
+
+#define PINMUX_GPIO75__FUNC_GPIO75 (MTK_PIN_NO(75) | 0)
+#define PINMUX_GPIO75__FUNC_MISC_BSI_CS0B (MTK_PIN_NO(75) | 1)
+#define PINMUX_GPIO75__FUNC_MIPI1_SCLK (MTK_PIN_NO(75) | 2)
+
+#define PINMUX_GPIO76__FUNC_GPIO76 (MTK_PIN_NO(76) | 0)
+#define PINMUX_GPIO76__FUNC_MISC_BSI_CS1B (MTK_PIN_NO(76) | 1)
+
+#define PINMUX_GPIO77__FUNC_GPIO77 (MTK_PIN_NO(77) | 0)
+#define PINMUX_GPIO77__FUNC_MISC_BSI_DI (MTK_PIN_NO(77) | 1)
+#define PINMUX_GPIO77__FUNC_MIPI1_SDATA (MTK_PIN_NO(77) | 2)
+
+#define PINMUX_GPIO78__FUNC_GPIO78 (MTK_PIN_NO(78) | 0)
+#define PINMUX_GPIO78__FUNC_LTE_TXBPI (MTK_PIN_NO(78) | 1)
+
+#define PINMUX_GPIO79__FUNC_GPIO79 (MTK_PIN_NO(79) | 0)
+#define PINMUX_GPIO79__FUNC_BPI_BUS15 (MTK_PIN_NO(79) | 1)
+
+#define PINMUX_GPIO80__FUNC_GPIO80 (MTK_PIN_NO(80) | 0)
+#define PINMUX_GPIO80__FUNC_BPI_BUS16 (MTK_PIN_NO(80) | 1)
+
+#define PINMUX_GPIO81__FUNC_GPIO81 (MTK_PIN_NO(81) | 0)
+#define PINMUX_GPIO81__FUNC_BPI_BUS17 (MTK_PIN_NO(81) | 1)
+
+#define PINMUX_GPIO82__FUNC_GPIO82 (MTK_PIN_NO(82) | 0)
+#define PINMUX_GPIO82__FUNC_BPI_BUS18 (MTK_PIN_NO(82) | 1)
+
+#define PINMUX_GPIO83__FUNC_GPIO83 (MTK_PIN_NO(83) | 0)
+#define PINMUX_GPIO83__FUNC_BPI_BUS19 (MTK_PIN_NO(83) | 1)
+
+#define PINMUX_GPIO84__FUNC_GPIO84 (MTK_PIN_NO(84) | 0)
+#define PINMUX_GPIO84__FUNC_BPI_BUS20 (MTK_PIN_NO(84) | 1)
+
+#define PINMUX_GPIO85__FUNC_GPIO85 (MTK_PIN_NO(85) | 0)
+#define PINMUX_GPIO85__FUNC_BPI_BUS21 (MTK_PIN_NO(85) | 1)
+
+#define PINMUX_GPIO86__FUNC_GPIO86 (MTK_PIN_NO(86) | 0)
+#define PINMUX_GPIO86__FUNC_BPI_BUS22 (MTK_PIN_NO(86) | 1)
+
+#define PINMUX_GPIO87__FUNC_GPIO87 (MTK_PIN_NO(87) | 0)
+#define PINMUX_GPIO87__FUNC_BPI_BUS23 (MTK_PIN_NO(87) | 1)
+
+#define PINMUX_GPIO88__FUNC_GPIO88 (MTK_PIN_NO(88) | 0)
+#define PINMUX_GPIO88__FUNC_BPI_BUS24 (MTK_PIN_NO(88) | 1)
+
+#define PINMUX_GPIO89__FUNC_GPIO89 (MTK_PIN_NO(89) | 0)
+#define PINMUX_GPIO89__FUNC_BPI_BUS25 (MTK_PIN_NO(89) | 1)
+
+#define PINMUX_GPIO90__FUNC_GPIO90 (MTK_PIN_NO(90) | 0)
+#define PINMUX_GPIO90__FUNC_BPI_BUS26 (MTK_PIN_NO(90) | 1)
+
+#define PINMUX_GPIO91__FUNC_GPIO91 (MTK_PIN_NO(91) | 0)
+#define PINMUX_GPIO91__FUNC_BPI_BUS27 (MTK_PIN_NO(91) | 1)
+
+#define PINMUX_GPIO92__FUNC_GPIO92 (MTK_PIN_NO(92) | 0)
+#define PINMUX_GPIO92__FUNC_PCM1_CLK (MTK_PIN_NO(92) | 1)
+#define PINMUX_GPIO92__FUNC_I2S0_BCK (MTK_PIN_NO(92) | 2)
+#define PINMUX_GPIO92__FUNC_NLD6 (MTK_PIN_NO(92) | 3)
+
+#define PINMUX_GPIO93__FUNC_GPIO93 (MTK_PIN_NO(93) | 0)
+#define PINMUX_GPIO93__FUNC_PCM1_SYNC (MTK_PIN_NO(93) | 1)
+#define PINMUX_GPIO93__FUNC_I2S0_WS (MTK_PIN_NO(93) | 2)
+#define PINMUX_GPIO93__FUNC_NLD7 (MTK_PIN_NO(93) | 3)
+
+#define PINMUX_GPIO94__FUNC_GPIO94 (MTK_PIN_NO(94) | 0)
+#define PINMUX_GPIO94__FUNC_PCM1_DI (MTK_PIN_NO(94) | 1)
+#define PINMUX_GPIO94__FUNC_I2S0_DI (MTK_PIN_NO(94) | 2)
+#define PINMUX_GPIO94__FUNC_NREB (MTK_PIN_NO(94) | 3)
+
+#define PINMUX_GPIO95__FUNC_GPIO95 (MTK_PIN_NO(95) | 0)
+#define PINMUX_GPIO95__FUNC_PCM1_DO (MTK_PIN_NO(95) | 1)
+#define PINMUX_GPIO95__FUNC_I2S0_DO (MTK_PIN_NO(95) | 2)
+#define PINMUX_GPIO95__FUNC_NRNB0 (MTK_PIN_NO(95) | 3)
+
+#define PINMUX_GPIO96__FUNC_GPIO96 (MTK_PIN_NO(96) | 0)
+#define PINMUX_GPIO96__FUNC_URXD1 (MTK_PIN_NO(96) | 1)
+#define PINMUX_GPIO96__FUNC_UTXD1 (MTK_PIN_NO(96) | 2)
+#define PINMUX_GPIO96__FUNC_NWEB (MTK_PIN_NO(96) | 3)
+
+#define PINMUX_GPIO97__FUNC_GPIO97 (MTK_PIN_NO(97) | 0)
+#define PINMUX_GPIO97__FUNC_UTXD1 (MTK_PIN_NO(97) | 1)
+#define PINMUX_GPIO97__FUNC_URXD1 (MTK_PIN_NO(97) | 2)
+#define PINMUX_GPIO97__FUNC_NCEB0 (MTK_PIN_NO(97) | 3)
+
+#define PINMUX_GPIO98__FUNC_GPIO98 (MTK_PIN_NO(98) | 0)
+#define PINMUX_GPIO98__FUNC_URTS1 (MTK_PIN_NO(98) | 1)
+#define PINMUX_GPIO98__FUNC_UCTS1 (MTK_PIN_NO(98) | 2)
+#define PINMUX_GPIO98__FUNC_NALE (MTK_PIN_NO(98) | 3)
+
+#define PINMUX_GPIO99__FUNC_GPIO99 (MTK_PIN_NO(99) | 0)
+#define PINMUX_GPIO99__FUNC_UCTS1 (MTK_PIN_NO(99) | 1)
+#define PINMUX_GPIO99__FUNC_URTS1 (MTK_PIN_NO(99) | 2)
+#define PINMUX_GPIO99__FUNC_NCLE (MTK_PIN_NO(99) | 3)
+
+#define PINMUX_GPIO100__FUNC_GPIO100 (MTK_PIN_NO(100) | 0)
+#define PINMUX_GPIO100__FUNC_MSDC2_DAT0 (MTK_PIN_NO(100) | 1)
+#define PINMUX_GPIO100__FUNC_URXD1 (MTK_PIN_NO(100) | 2)
+#define PINMUX_GPIO100__FUNC_USB_DRVVBUS (MTK_PIN_NO(100) | 3)
+#define PINMUX_GPIO100__FUNC_SDA4 (MTK_PIN_NO(100) | 4)
+
+#define PINMUX_GPIO101__FUNC_GPIO101 (MTK_PIN_NO(101) | 0)
+#define PINMUX_GPIO101__FUNC_MSDC2_DAT1 (MTK_PIN_NO(101) | 1)
+#define PINMUX_GPIO101__FUNC_UTXD1 (MTK_PIN_NO(101) | 2)
+#define PINMUX_GPIO101__FUNC_SCL4 (MTK_PIN_NO(101) | 4)
+
+#define PINMUX_GPIO102__FUNC_GPIO102 (MTK_PIN_NO(102) | 0)
+#define PINMUX_GPIO102__FUNC_MSDC2_DAT2 (MTK_PIN_NO(102) | 1)
+#define PINMUX_GPIO102__FUNC_URTS1 (MTK_PIN_NO(102) | 2)
+#define PINMUX_GPIO102__FUNC_UTXD0 (MTK_PIN_NO(102) | 3)
+#define PINMUX_GPIO102__FUNC_PWM0 (MTK_PIN_NO(102) | 5)
+#define PINMUX_GPIO102__FUNC_SPI_CK_1 (MTK_PIN_NO(102) | 6)
+
+#define PINMUX_GPIO103__FUNC_GPIO103 (MTK_PIN_NO(103) | 0)
+#define PINMUX_GPIO103__FUNC_MSDC2_DAT3 (MTK_PIN_NO(103) | 1)
+#define PINMUX_GPIO103__FUNC_UCTS1 (MTK_PIN_NO(103) | 2)
+#define PINMUX_GPIO103__FUNC_URXD0 (MTK_PIN_NO(103) | 3)
+#define PINMUX_GPIO103__FUNC_PWM1 (MTK_PIN_NO(103) | 5)
+#define PINMUX_GPIO103__FUNC_SPI_MI_1 (MTK_PIN_NO(103) | 6)
+
+#define PINMUX_GPIO104__FUNC_GPIO104 (MTK_PIN_NO(104) | 0)
+#define PINMUX_GPIO104__FUNC_MSDC2_CLK (MTK_PIN_NO(104) | 1)
+#define PINMUX_GPIO104__FUNC_NLD4 (MTK_PIN_NO(104) | 2)
+#define PINMUX_GPIO104__FUNC_UTXD3 (MTK_PIN_NO(104) | 3)
+#define PINMUX_GPIO104__FUNC_SDA3 (MTK_PIN_NO(104) | 4)
+#define PINMUX_GPIO104__FUNC_PWM2 (MTK_PIN_NO(104) | 5)
+#define PINMUX_GPIO104__FUNC_SPI_MO_1 (MTK_PIN_NO(104) | 6)
+
+#define PINMUX_GPIO105__FUNC_GPIO105 (MTK_PIN_NO(105) | 0)
+#define PINMUX_GPIO105__FUNC_MSDC2_CMD (MTK_PIN_NO(105) | 1)
+#define PINMUX_GPIO105__FUNC_NLD5 (MTK_PIN_NO(105) | 2)
+#define PINMUX_GPIO105__FUNC_URXD3 (MTK_PIN_NO(105) | 3)
+#define PINMUX_GPIO105__FUNC_SCL3 (MTK_PIN_NO(105) | 4)
+#define PINMUX_GPIO105__FUNC_PWM3 (MTK_PIN_NO(105) | 5)
+#define PINMUX_GPIO105__FUNC_SPI_CS_1 (MTK_PIN_NO(105) | 6)
+
+#define PINMUX_GPIO106__FUNC_GPIO106 (MTK_PIN_NO(106) | 0)
+#define PINMUX_GPIO106__FUNC_LCM_RST (MTK_PIN_NO(106) | 1)
+
+#define PINMUX_GPIO107__FUNC_GPIO107 (MTK_PIN_NO(107) | 0)
+#define PINMUX_GPIO107__FUNC_DSI_TE (MTK_PIN_NO(107) | 1)
+
+#define PINMUX_GPIO108__FUNC_GPIO108 (MTK_PIN_NO(108) | 0)
+#define PINMUX_GPIO108__FUNC_JTMS (MTK_PIN_NO(108) | 1)
+#define PINMUX_GPIO108__FUNC_MFG_JTAG_TMS (MTK_PIN_NO(108) | 2)
+#define PINMUX_GPIO108__FUNC_TDD_TMS (MTK_PIN_NO(108) | 3)
+#define PINMUX_GPIO108__FUNC_LTE_MD32_JTAG_TMS (MTK_PIN_NO(108) | 4)
+#define PINMUX_GPIO108__FUNC_AP_MD32_JTAG_TMS (MTK_PIN_NO(108) | 5)
+#define PINMUX_GPIO108__FUNC_DFD_TMS (MTK_PIN_NO(108) | 6)
+
+#define PINMUX_GPIO109__FUNC_GPIO109 (MTK_PIN_NO(109) | 0)
+#define PINMUX_GPIO109__FUNC_JTCK (MTK_PIN_NO(109) | 1)
+#define PINMUX_GPIO109__FUNC_MFG_JTAG_TCK (MTK_PIN_NO(109) | 2)
+#define PINMUX_GPIO109__FUNC_TDD_TCK (MTK_PIN_NO(109) | 3)
+#define PINMUX_GPIO109__FUNC_LTE_MD32_JTAG_TCK (MTK_PIN_NO(109) | 4)
+#define PINMUX_GPIO109__FUNC_AP_MD32_JTAG_TCK (MTK_PIN_NO(109) | 5)
+#define PINMUX_GPIO109__FUNC_DFD_TCK (MTK_PIN_NO(109) | 6)
+
+#define PINMUX_GPIO110__FUNC_GPIO110 (MTK_PIN_NO(110) | 0)
+#define PINMUX_GPIO110__FUNC_JTDI (MTK_PIN_NO(110) | 1)
+#define PINMUX_GPIO110__FUNC_MFG_JTAG_TDI (MTK_PIN_NO(110) | 2)
+#define PINMUX_GPIO110__FUNC_TDD_TDI (MTK_PIN_NO(110) | 3)
+#define PINMUX_GPIO110__FUNC_LTE_MD32_JTAG_TDI (MTK_PIN_NO(110) | 4)
+#define PINMUX_GPIO110__FUNC_AP_MD32_JTAG_TDI (MTK_PIN_NO(110) | 5)
+#define PINMUX_GPIO110__FUNC_DFD_TDI (MTK_PIN_NO(110) | 6)
+
+#define PINMUX_GPIO111__FUNC_GPIO111 (MTK_PIN_NO(111) | 0)
+#define PINMUX_GPIO111__FUNC_JTDO (MTK_PIN_NO(111) | 1)
+#define PINMUX_GPIO111__FUNC_MFG_JTAG_TDO (MTK_PIN_NO(111) | 2)
+#define PINMUX_GPIO111__FUNC_TDD_TDO (MTK_PIN_NO(111) | 3)
+#define PINMUX_GPIO111__FUNC_LTE_MD32_JTAG_TDO (MTK_PIN_NO(111) | 4)
+#define PINMUX_GPIO111__FUNC_AP_MD32_JTAG_TDO (MTK_PIN_NO(111) | 5)
+#define PINMUX_GPIO111__FUNC_DFD_TDO (MTK_PIN_NO(111) | 6)
+
+#define PINMUX_GPIO112__FUNC_GPIO112 (MTK_PIN_NO(112) | 0)
+#define PINMUX_GPIO112__FUNC_JTRST_B (MTK_PIN_NO(112) | 1)
+#define PINMUX_GPIO112__FUNC_MFG_JTAG_TRSTN (MTK_PIN_NO(112) | 2)
+#define PINMUX_GPIO112__FUNC_TDD_TRSTN (MTK_PIN_NO(112) | 3)
+#define PINMUX_GPIO112__FUNC_LTE_MD32_JTAG_TRST (MTK_PIN_NO(112) | 4)
+#define PINMUX_GPIO112__FUNC_AP_MD32_JTAG_TRST (MTK_PIN_NO(112) | 5)
+#define PINMUX_GPIO112__FUNC_DFD_NTRST (MTK_PIN_NO(112) | 6)
+
+#define PINMUX_GPIO113__FUNC_GPIO113 (MTK_PIN_NO(113) | 0)
+#define PINMUX_GPIO113__FUNC_URXD0 (MTK_PIN_NO(113) | 1)
+#define PINMUX_GPIO113__FUNC_UTXD0 (MTK_PIN_NO(113) | 2)
+#define PINMUX_GPIO113__FUNC_MD_URXD (MTK_PIN_NO(113) | 3)
+#define PINMUX_GPIO113__FUNC_LTE_URXD (MTK_PIN_NO(113) | 4)
+#define PINMUX_GPIO113__FUNC_TDD_TXD (MTK_PIN_NO(113) | 5)
+#define PINMUX_GPIO113__FUNC_I2S2_WS (MTK_PIN_NO(113) | 6)
+
+#define PINMUX_GPIO114__FUNC_GPIO114 (MTK_PIN_NO(114) | 0)
+#define PINMUX_GPIO114__FUNC_UTXD0 (MTK_PIN_NO(114) | 1)
+#define PINMUX_GPIO114__FUNC_URXD0 (MTK_PIN_NO(114) | 2)
+#define PINMUX_GPIO114__FUNC_MD_UTXD (MTK_PIN_NO(114) | 3)
+#define PINMUX_GPIO114__FUNC_LTE_UTXD (MTK_PIN_NO(114) | 4)
+#define PINMUX_GPIO114__FUNC_TDD_TXD (MTK_PIN_NO(114) | 5)
+#define PINMUX_GPIO114__FUNC_I2S2_BCK (MTK_PIN_NO(114) | 6)
+
+#define PINMUX_GPIO115__FUNC_GPIO115 (MTK_PIN_NO(115) | 0)
+#define PINMUX_GPIO115__FUNC_URTS0 (MTK_PIN_NO(115) | 1)
+#define PINMUX_GPIO115__FUNC_UCTS0 (MTK_PIN_NO(115) | 2)
+#define PINMUX_GPIO115__FUNC_MD_URXD (MTK_PIN_NO(115) | 3)
+#define PINMUX_GPIO115__FUNC_LTE_URXD (MTK_PIN_NO(115) | 4)
+#define PINMUX_GPIO115__FUNC_TDD_TXD (MTK_PIN_NO(115) | 5)
+#define PINMUX_GPIO115__FUNC_I2S2_MCK (MTK_PIN_NO(115) | 6)
+
+#define PINMUX_GPIO116__FUNC_GPIO116 (MTK_PIN_NO(116) | 0)
+#define PINMUX_GPIO116__FUNC_UCTS0 (MTK_PIN_NO(116) | 1)
+#define PINMUX_GPIO116__FUNC_URTS0 (MTK_PIN_NO(116) | 2)
+#define PINMUX_GPIO116__FUNC_MD_UTXD (MTK_PIN_NO(116) | 3)
+#define PINMUX_GPIO116__FUNC_LTE_UTXD (MTK_PIN_NO(116) | 4)
+#define PINMUX_GPIO116__FUNC_TDD_TXD (MTK_PIN_NO(116) | 5)
+#define PINMUX_GPIO116__FUNC_I2S2_DI_1 (MTK_PIN_NO(116) | 6)
+
+#define PINMUX_GPIO117__FUNC_GPIO117 (MTK_PIN_NO(117) | 0)
+#define PINMUX_GPIO117__FUNC_URXD3 (MTK_PIN_NO(117) | 1)
+#define PINMUX_GPIO117__FUNC_UTXD3 (MTK_PIN_NO(117) | 2)
+#define PINMUX_GPIO117__FUNC_MD_URXD (MTK_PIN_NO(117) | 3)
+#define PINMUX_GPIO117__FUNC_LTE_URXD (MTK_PIN_NO(117) | 4)
+#define PINMUX_GPIO117__FUNC_TDD_TXD (MTK_PIN_NO(117) | 5)
+
+#define PINMUX_GPIO118__FUNC_GPIO118 (MTK_PIN_NO(118) | 0)
+#define PINMUX_GPIO118__FUNC_UTXD3 (MTK_PIN_NO(118) | 1)
+#define PINMUX_GPIO118__FUNC_URXD3 (MTK_PIN_NO(118) | 2)
+#define PINMUX_GPIO118__FUNC_MD_UTXD (MTK_PIN_NO(118) | 3)
+#define PINMUX_GPIO118__FUNC_LTE_UTXD (MTK_PIN_NO(118) | 4)
+#define PINMUX_GPIO118__FUNC_TDD_TXD (MTK_PIN_NO(118) | 5)
+
+#define PINMUX_GPIO119__FUNC_GPIO119 (MTK_PIN_NO(119) | 0)
+#define PINMUX_GPIO119__FUNC_KROW0 (MTK_PIN_NO(119) | 1)
+
+#define PINMUX_GPIO120__FUNC_GPIO120 (MTK_PIN_NO(120) | 0)
+#define PINMUX_GPIO120__FUNC_KROW1 (MTK_PIN_NO(120) | 1)
+#define PINMUX_GPIO120__FUNC_PWM6 (MTK_PIN_NO(120) | 3)
+
+#define PINMUX_GPIO121__FUNC_GPIO121 (MTK_PIN_NO(121) | 0)
+#define PINMUX_GPIO121__FUNC_KROW2 (MTK_PIN_NO(121) | 1)
+#define PINMUX_GPIO121__FUNC_IRDA_PDN (MTK_PIN_NO(121) | 2)
+#define PINMUX_GPIO121__FUNC_I2S1_DO_1 (MTK_PIN_NO(121) | 3)
+#define PINMUX_GPIO121__FUNC_USB_DRVVBUS (MTK_PIN_NO(121) | 4)
+#define PINMUX_GPIO121__FUNC_SPI_CK_2 (MTK_PIN_NO(121) | 5)
+#define PINMUX_GPIO121__FUNC_PWM4 (MTK_PIN_NO(121) | 6)
+
+#define PINMUX_GPIO122__FUNC_GPIO122 (MTK_PIN_NO(122) | 0)
+#define PINMUX_GPIO122__FUNC_KCOL0 (MTK_PIN_NO(122) | 1)
+
+#define PINMUX_GPIO123__FUNC_GPIO123 (MTK_PIN_NO(123) | 0)
+#define PINMUX_GPIO123__FUNC_KCOL1 (MTK_PIN_NO(123) | 1)
+#define PINMUX_GPIO123__FUNC_IRDA_RXD (MTK_PIN_NO(123) | 2)
+#define PINMUX_GPIO123__FUNC_I2S2_DI_2 (MTK_PIN_NO(123) | 3)
+#define PINMUX_GPIO123__FUNC_PWM5 (MTK_PIN_NO(123) | 4)
+
+#define PINMUX_GPIO124__FUNC_GPIO124 (MTK_PIN_NO(124) | 0)
+#define PINMUX_GPIO124__FUNC_KCOL2 (MTK_PIN_NO(124) | 1)
+#define PINMUX_GPIO124__FUNC_IRDA_TXD (MTK_PIN_NO(124) | 2)
+#define PINMUX_GPIO124__FUNC_I2S1_DO_2 (MTK_PIN_NO(124) | 3)
+#define PINMUX_GPIO124__FUNC_USB_DRVVBUS (MTK_PIN_NO(124) | 4)
+#define PINMUX_GPIO124__FUNC_SPI_MI_2 (MTK_PIN_NO(124) | 5)
+#define PINMUX_GPIO124__FUNC_PWM3 (MTK_PIN_NO(124) | 6)
+
+#define PINMUX_GPIO125__FUNC_GPIO125 (MTK_PIN_NO(125) | 0)
+#define PINMUX_GPIO125__FUNC_SDA1 (MTK_PIN_NO(125) | 1)
+
+#define PINMUX_GPIO126__FUNC_GPIO126 (MTK_PIN_NO(126) | 0)
+#define PINMUX_GPIO126__FUNC_SCL1 (MTK_PIN_NO(126) | 1)
+
+#define PINMUX_GPIO127__FUNC_GPIO127 (MTK_PIN_NO(127) | 0)
+#define PINMUX_GPIO127__FUNC_MD_EINT1 (MTK_PIN_NO(127) | 1)
+#define PINMUX_GPIO127__FUNC_DISP_PWM1 (MTK_PIN_NO(127) | 2)
+#define PINMUX_GPIO127__FUNC_SPI_MO_2 (MTK_PIN_NO(127) | 3)
+
+#define PINMUX_GPIO128__FUNC_GPIO128 (MTK_PIN_NO(128) | 0)
+#define PINMUX_GPIO128__FUNC_MD_EINT2 (MTK_PIN_NO(128) | 1)
+#define PINMUX_GPIO128__FUNC_DSI1_TE (MTK_PIN_NO(128) | 2)
+#define PINMUX_GPIO128__FUNC_SPI_CS_2 (MTK_PIN_NO(128) | 3)
+
+#define PINMUX_GPIO129__FUNC_GPIO129 (MTK_PIN_NO(129) | 0)
+#define PINMUX_GPIO129__FUNC_I2S3_WS (MTK_PIN_NO(129) | 1)
+#define PINMUX_GPIO129__FUNC_I2S2_WS (MTK_PIN_NO(129) | 2)
+#define PINMUX_GPIO129__FUNC_PWM0 (MTK_PIN_NO(129) | 3)
+
+#define PINMUX_GPIO130__FUNC_GPIO130 (MTK_PIN_NO(130) | 0)
+#define PINMUX_GPIO130__FUNC_I2S3_BCK (MTK_PIN_NO(130) | 1)
+#define PINMUX_GPIO130__FUNC_I2S2_BCK (MTK_PIN_NO(130) | 2)
+#define PINMUX_GPIO130__FUNC_PWM1 (MTK_PIN_NO(130) | 3)
+
+#define PINMUX_GPIO131__FUNC_GPIO131 (MTK_PIN_NO(131) | 0)
+#define PINMUX_GPIO131__FUNC_I2S3_MCK (MTK_PIN_NO(131) | 1)
+#define PINMUX_GPIO131__FUNC_I2S2_MCK (MTK_PIN_NO(131) | 2)
+#define PINMUX_GPIO131__FUNC_PWM2 (MTK_PIN_NO(131) | 3)
+
+#define PINMUX_GPIO132__FUNC_GPIO132 (MTK_PIN_NO(132) | 0)
+#define PINMUX_GPIO132__FUNC_I2S3_DO_1 (MTK_PIN_NO(132) | 1)
+#define PINMUX_GPIO132__FUNC_I2S2_DI_1 (MTK_PIN_NO(132) | 2)
+#define PINMUX_GPIO132__FUNC_PWM3 (MTK_PIN_NO(132) | 3)
+
+#define PINMUX_GPIO133__FUNC_GPIO133 (MTK_PIN_NO(133) | 0)
+#define PINMUX_GPIO133__FUNC_I2S3_DO_2 (MTK_PIN_NO(133) | 1)
+#define PINMUX_GPIO133__FUNC_I2S2_DI_2 (MTK_PIN_NO(133) | 2)
+#define PINMUX_GPIO133__FUNC_PWM4 (MTK_PIN_NO(133) | 3)
+
+#define PINMUX_GPIO134__FUNC_GPIO134 (MTK_PIN_NO(134) | 0)
+#define PINMUX_GPIO134__FUNC_I2S3_DO_3 (MTK_PIN_NO(134) | 1)
+#define PINMUX_GPIO134__FUNC_DISP_PWM1 (MTK_PIN_NO(134) | 2)
+#define PINMUX_GPIO134__FUNC_I2S1_DO_1 (MTK_PIN_NO(134) | 3)
+#define PINMUX_GPIO134__FUNC_PWM5 (MTK_PIN_NO(134) | 4)
+
+#define PINMUX_GPIO135__FUNC_GPIO135 (MTK_PIN_NO(135) | 0)
+#define PINMUX_GPIO135__FUNC_I2S3_DO_4 (MTK_PIN_NO(135) | 1)
+#define PINMUX_GPIO135__FUNC_DSI1_TE (MTK_PIN_NO(135) | 2)
+#define PINMUX_GPIO135__FUNC_I2S1_DO_2 (MTK_PIN_NO(135) | 3)
+#define PINMUX_GPIO135__FUNC_PWM6 (MTK_PIN_NO(135) | 4)
+
+#define PINMUX_GPIO136__FUNC_GPIO136 (MTK_PIN_NO(136) | 0)
+#define PINMUX_GPIO136__FUNC_SDA3 (MTK_PIN_NO(136) | 1)
+
+#define PINMUX_GPIO137__FUNC_GPIO137 (MTK_PIN_NO(137) | 0)
+#define PINMUX_GPIO137__FUNC_SCL3 (MTK_PIN_NO(137) | 1)
+
+#define PINMUX_GPIO138__FUNC_GPIO138 (MTK_PIN_NO(138) | 0)
+#define PINMUX_GPIO138__FUNC_DPI_CK (MTK_PIN_NO(138) | 1)
+#define PINMUX_GPIO138__FUNC_NLD6 (MTK_PIN_NO(138) | 2)
+#define PINMUX_GPIO138__FUNC_UTXD0 (MTK_PIN_NO(138) | 3)
+#define PINMUX_GPIO138__FUNC_USB_DRVVBUS (MTK_PIN_NO(138) | 4)
+#define PINMUX_GPIO138__FUNC_IRDA_PDN (MTK_PIN_NO(138) | 5)
+
+#define PINMUX_GPIO139__FUNC_GPIO139 (MTK_PIN_NO(139) | 0)
+#define PINMUX_GPIO139__FUNC_DPI_DE (MTK_PIN_NO(139) | 1)
+#define PINMUX_GPIO139__FUNC_NLD7 (MTK_PIN_NO(139) | 2)
+#define PINMUX_GPIO139__FUNC_URXD0 (MTK_PIN_NO(139) | 3)
+#define PINMUX_GPIO139__FUNC_MD_UTXD (MTK_PIN_NO(139) | 4)
+#define PINMUX_GPIO139__FUNC_IRDA_RXD (MTK_PIN_NO(139) | 5)
+
+#define PINMUX_GPIO140__FUNC_GPIO140 (MTK_PIN_NO(140) | 0)
+#define PINMUX_GPIO140__FUNC_DPI_D0 (MTK_PIN_NO(140) | 1)
+#define PINMUX_GPIO140__FUNC_NREB (MTK_PIN_NO(140) | 2)
+#define PINMUX_GPIO140__FUNC_UCTS0 (MTK_PIN_NO(140) | 3)
+#define PINMUX_GPIO140__FUNC_MD_URXD (MTK_PIN_NO(140) | 4)
+#define PINMUX_GPIO140__FUNC_IRDA_TXD (MTK_PIN_NO(140) | 5)
+
+#define PINMUX_GPIO141__FUNC_GPIO141 (MTK_PIN_NO(141) | 0)
+#define PINMUX_GPIO141__FUNC_DPI_D1 (MTK_PIN_NO(141) | 1)
+#define PINMUX_GPIO141__FUNC_NRNB0 (MTK_PIN_NO(141) | 2)
+#define PINMUX_GPIO141__FUNC_URTS0 (MTK_PIN_NO(141) | 3)
+#define PINMUX_GPIO141__FUNC_LTE_UTXD (MTK_PIN_NO(141) | 4)
+#define PINMUX_GPIO141__FUNC_I2S2_WS (MTK_PIN_NO(141) | 5)
+
+#define PINMUX_GPIO142__FUNC_GPIO142 (MTK_PIN_NO(142) | 0)
+#define PINMUX_GPIO142__FUNC_DPI_D2 (MTK_PIN_NO(142) | 1)
+#define PINMUX_GPIO142__FUNC_NWEB (MTK_PIN_NO(142) | 2)
+#define PINMUX_GPIO142__FUNC_UTXD1 (MTK_PIN_NO(142) | 3)
+#define PINMUX_GPIO142__FUNC_LTE_URXD (MTK_PIN_NO(142) | 4)
+#define PINMUX_GPIO142__FUNC_I2S2_BCK (MTK_PIN_NO(142) | 5)
+
+#define PINMUX_GPIO143__FUNC_GPIO143 (MTK_PIN_NO(143) | 0)
+#define PINMUX_GPIO143__FUNC_DPI_D3 (MTK_PIN_NO(143) | 1)
+#define PINMUX_GPIO143__FUNC_NCEB0 (MTK_PIN_NO(143) | 2)
+#define PINMUX_GPIO143__FUNC_URXD1 (MTK_PIN_NO(143) | 3)
+#define PINMUX_GPIO143__FUNC_TDD_TXD (MTK_PIN_NO(143) | 4)
+#define PINMUX_GPIO143__FUNC_I2S2_MCK (MTK_PIN_NO(143) | 5)
+
+#define PINMUX_GPIO144__FUNC_GPIO144 (MTK_PIN_NO(144) | 0)
+#define PINMUX_GPIO144__FUNC_DPI_D4 (MTK_PIN_NO(144) | 1)
+#define PINMUX_GPIO144__FUNC_NALE (MTK_PIN_NO(144) | 2)
+#define PINMUX_GPIO144__FUNC_UCTS1 (MTK_PIN_NO(144) | 3)
+#define PINMUX_GPIO144__FUNC_TDD_TMS (MTK_PIN_NO(144) | 4)
+#define PINMUX_GPIO144__FUNC_I2S2_DI_1 (MTK_PIN_NO(144) | 5)
+
+#define PINMUX_GPIO145__FUNC_GPIO145 (MTK_PIN_NO(145) | 0)
+#define PINMUX_GPIO145__FUNC_DPI_D5 (MTK_PIN_NO(145) | 1)
+#define PINMUX_GPIO145__FUNC_NCLE (MTK_PIN_NO(145) | 2)
+#define PINMUX_GPIO145__FUNC_URTS1 (MTK_PIN_NO(145) | 3)
+#define PINMUX_GPIO145__FUNC_TDD_TCK (MTK_PIN_NO(145) | 4)
+#define PINMUX_GPIO145__FUNC_I2S2_DI_2 (MTK_PIN_NO(145) | 5)
+
+#define PINMUX_GPIO146__FUNC_GPIO146 (MTK_PIN_NO(146) | 0)
+#define PINMUX_GPIO146__FUNC_DPI_D6 (MTK_PIN_NO(146) | 1)
+#define PINMUX_GPIO146__FUNC_NLD8 (MTK_PIN_NO(146) | 2)
+#define PINMUX_GPIO146__FUNC_UTXD2 (MTK_PIN_NO(146) | 3)
+#define PINMUX_GPIO146__FUNC_TDD_TDI (MTK_PIN_NO(146) | 4)
+
+#define PINMUX_GPIO147__FUNC_GPIO147 (MTK_PIN_NO(147) | 0)
+#define PINMUX_GPIO147__FUNC_DPI_D7 (MTK_PIN_NO(147) | 1)
+#define PINMUX_GPIO147__FUNC_NLD9 (MTK_PIN_NO(147) | 2)
+#define PINMUX_GPIO147__FUNC_URXD2 (MTK_PIN_NO(147) | 3)
+#define PINMUX_GPIO147__FUNC_TDD_TDO (MTK_PIN_NO(147) | 4)
+#define PINMUX_GPIO147__FUNC_I2S1_WS (MTK_PIN_NO(147) | 5)
+
+#define PINMUX_GPIO148__FUNC_GPIO148 (MTK_PIN_NO(148) | 0)
+#define PINMUX_GPIO148__FUNC_DPI_D8 (MTK_PIN_NO(148) | 1)
+#define PINMUX_GPIO148__FUNC_NLD10 (MTK_PIN_NO(148) | 2)
+#define PINMUX_GPIO148__FUNC_UCTS2 (MTK_PIN_NO(148) | 3)
+#define PINMUX_GPIO148__FUNC_TDD_TRSTN (MTK_PIN_NO(148) | 4)
+#define PINMUX_GPIO148__FUNC_I2S1_BCK (MTK_PIN_NO(148) | 5)
+
+#define PINMUX_GPIO149__FUNC_GPIO149 (MTK_PIN_NO(149) | 0)
+#define PINMUX_GPIO149__FUNC_DPI_D9 (MTK_PIN_NO(149) | 1)
+#define PINMUX_GPIO149__FUNC_NLD11 (MTK_PIN_NO(149) | 2)
+#define PINMUX_GPIO149__FUNC_URTS2 (MTK_PIN_NO(149) | 3)
+#define PINMUX_GPIO149__FUNC_LTE_MD32_JTAG_TMS (MTK_PIN_NO(149) | 4)
+#define PINMUX_GPIO149__FUNC_I2S1_MCK (MTK_PIN_NO(149) | 5)
+
+#define PINMUX_GPIO150__FUNC_GPIO150 (MTK_PIN_NO(150) | 0)
+#define PINMUX_GPIO150__FUNC_DPI_D10 (MTK_PIN_NO(150) | 1)
+#define PINMUX_GPIO150__FUNC_NLD12 (MTK_PIN_NO(150) | 2)
+#define PINMUX_GPIO150__FUNC_UTXD3 (MTK_PIN_NO(150) | 3)
+#define PINMUX_GPIO150__FUNC_LTE_MD32_JTAG_TCK (MTK_PIN_NO(150) | 4)
+#define PINMUX_GPIO150__FUNC_I2S1_DO_1 (MTK_PIN_NO(150) | 5)
+
+#define PINMUX_GPIO151__FUNC_GPIO151 (MTK_PIN_NO(151) | 0)
+#define PINMUX_GPIO151__FUNC_DPI_D11 (MTK_PIN_NO(151) | 1)
+#define PINMUX_GPIO151__FUNC_NLD13 (MTK_PIN_NO(151) | 2)
+#define PINMUX_GPIO151__FUNC_URXD3 (MTK_PIN_NO(151) | 3)
+#define PINMUX_GPIO151__FUNC_LTE_MD32_JTAG_TDI (MTK_PIN_NO(151) | 4)
+#define PINMUX_GPIO151__FUNC_I2S1_DO_2 (MTK_PIN_NO(151) | 5)
+
+#define PINMUX_GPIO152__FUNC_GPIO152 (MTK_PIN_NO(152) | 0)
+#define PINMUX_GPIO152__FUNC_DPI_HSYNC (MTK_PIN_NO(152) | 1)
+#define PINMUX_GPIO152__FUNC_NLD14 (MTK_PIN_NO(152) | 2)
+#define PINMUX_GPIO152__FUNC_UCTS3 (MTK_PIN_NO(152) | 3)
+#define PINMUX_GPIO152__FUNC_LTE_MD32_JTAG_TDO (MTK_PIN_NO(152) | 4)
+#define PINMUX_GPIO152__FUNC_DSI1_TE (MTK_PIN_NO(152) | 5)
+
+#define PINMUX_GPIO153__FUNC_GPIO153 (MTK_PIN_NO(153) | 0)
+#define PINMUX_GPIO153__FUNC_DPI_VSYNC (MTK_PIN_NO(153) | 1)
+#define PINMUX_GPIO153__FUNC_NLD15 (MTK_PIN_NO(153) | 2)
+#define PINMUX_GPIO153__FUNC_URTS3 (MTK_PIN_NO(153) | 3)
+#define PINMUX_GPIO153__FUNC_LTE_MD32_JTAG_TRST (MTK_PIN_NO(153) | 4)
+#define PINMUX_GPIO153__FUNC_DISP_PWM1 (MTK_PIN_NO(153) | 5)
+
+#define PINMUX_GPIO154__FUNC_GPIO154 (MTK_PIN_NO(154) | 0)
+#define PINMUX_GPIO154__FUNC_MSDC0_DAT0 (MTK_PIN_NO(154) | 1)
+#define PINMUX_GPIO154__FUNC_NLD8 (MTK_PIN_NO(154) | 2)
+
+#define PINMUX_GPIO155__FUNC_GPIO155 (MTK_PIN_NO(155) | 0)
+#define PINMUX_GPIO155__FUNC_MSDC0_DAT1 (MTK_PIN_NO(155) | 1)
+#define PINMUX_GPIO155__FUNC_NLD9 (MTK_PIN_NO(155) | 2)
+
+#define PINMUX_GPIO156__FUNC_GPIO156 (MTK_PIN_NO(156) | 0)
+#define PINMUX_GPIO156__FUNC_MSDC0_DAT2 (MTK_PIN_NO(156) | 1)
+#define PINMUX_GPIO156__FUNC_NLD10 (MTK_PIN_NO(156) | 2)
+
+#define PINMUX_GPIO157__FUNC_GPIO157 (MTK_PIN_NO(157) | 0)
+#define PINMUX_GPIO157__FUNC_MSDC0_DAT3 (MTK_PIN_NO(157) | 1)
+#define PINMUX_GPIO157__FUNC_NLD11 (MTK_PIN_NO(157) | 2)
+
+#define PINMUX_GPIO158__FUNC_GPIO158 (MTK_PIN_NO(158) | 0)
+#define PINMUX_GPIO158__FUNC_MSDC0_DAT4 (MTK_PIN_NO(158) | 1)
+#define PINMUX_GPIO158__FUNC_NLD12 (MTK_PIN_NO(158) | 2)
+
+#define PINMUX_GPIO159__FUNC_GPIO159 (MTK_PIN_NO(159) | 0)
+#define PINMUX_GPIO159__FUNC_MSDC0_DAT5 (MTK_PIN_NO(159) | 1)
+#define PINMUX_GPIO159__FUNC_NLD13 (MTK_PIN_NO(159) | 2)
+
+#define PINMUX_GPIO160__FUNC_GPIO160 (MTK_PIN_NO(160) | 0)
+#define PINMUX_GPIO160__FUNC_MSDC0_DAT6 (MTK_PIN_NO(160) | 1)
+#define PINMUX_GPIO160__FUNC_NLD14 (MTK_PIN_NO(160) | 2)
+
+#define PINMUX_GPIO161__FUNC_GPIO161 (MTK_PIN_NO(161) | 0)
+#define PINMUX_GPIO161__FUNC_MSDC0_DAT7 (MTK_PIN_NO(161) | 1)
+#define PINMUX_GPIO161__FUNC_NLD15 (MTK_PIN_NO(161) | 2)
+
+#define PINMUX_GPIO162__FUNC_GPIO162 (MTK_PIN_NO(162) | 0)
+#define PINMUX_GPIO162__FUNC_MSDC0_CMD (MTK_PIN_NO(162) | 1)
+
+#define PINMUX_GPIO163__FUNC_GPIO163 (MTK_PIN_NO(163) | 0)
+#define PINMUX_GPIO163__FUNC_MSDC0_CLK (MTK_PIN_NO(163) | 1)
+
+#define PINMUX_GPIO164__FUNC_GPIO164 (MTK_PIN_NO(164) | 0)
+#define PINMUX_GPIO164__FUNC_MSDC0_DSL (MTK_PIN_NO(164) | 1)
+
+#define PINMUX_GPIO165__FUNC_GPIO165 (MTK_PIN_NO(165) | 0)
+#define PINMUX_GPIO165__FUNC_MSDC0_RSTB (MTK_PIN_NO(165) | 1)
+
+#define PINMUX_GPIO166__FUNC_GPIO166 (MTK_PIN_NO(166) | 0)
+#define PINMUX_GPIO166__FUNC_SPI_CK_0 (MTK_PIN_NO(166) | 1)
+#define PINMUX_GPIO166__FUNC_PWM0 (MTK_PIN_NO(166) | 3)
+
+#define PINMUX_GPIO167__FUNC_GPIO167 (MTK_PIN_NO(167) | 0)
+#define PINMUX_GPIO167__FUNC_SPI_MI_0 (MTK_PIN_NO(167) | 1)
+#define PINMUX_GPIO167__FUNC_PWM1 (MTK_PIN_NO(167) | 3)
+#define PINMUX_GPIO167__FUNC_SPI_MO_0 (MTK_PIN_NO(167) | 4)
+
+#define PINMUX_GPIO168__FUNC_GPIO168 (MTK_PIN_NO(168) | 0)
+#define PINMUX_GPIO168__FUNC_SPI_MO_0 (MTK_PIN_NO(168) | 1)
+#define PINMUX_GPIO168__FUNC_MD_EINT3 (MTK_PIN_NO(168) | 2)
+#define PINMUX_GPIO168__FUNC_PWM2 (MTK_PIN_NO(168) | 3)
+#define PINMUX_GPIO168__FUNC_SPI_MI_0 (MTK_PIN_NO(168) | 4)
+
+#define PINMUX_GPIO169__FUNC_GPIO169 (MTK_PIN_NO(169) | 0)
+#define PINMUX_GPIO169__FUNC_SPI_CS_0 (MTK_PIN_NO(169) | 1)
+#define PINMUX_GPIO169__FUNC_MD_EINT4 (MTK_PIN_NO(169) | 2)
+#define PINMUX_GPIO169__FUNC_PWM3 (MTK_PIN_NO(169) | 3)
+
+#define PINMUX_GPIO170__FUNC_GPIO170 (MTK_PIN_NO(170) | 0)
+#define PINMUX_GPIO170__FUNC_MSDC1_CMD (MTK_PIN_NO(170) | 1)
+
+#define PINMUX_GPIO171__FUNC_GPIO171 (MTK_PIN_NO(171) | 0)
+#define PINMUX_GPIO171__FUNC_MSDC1_DAT0 (MTK_PIN_NO(171) | 1)
+
+#define PINMUX_GPIO172__FUNC_GPIO172 (MTK_PIN_NO(172) | 0)
+#define PINMUX_GPIO172__FUNC_MSDC1_DAT1 (MTK_PIN_NO(172) | 1)
+
+#define PINMUX_GPIO173__FUNC_GPIO173 (MTK_PIN_NO(173) | 0)
+#define PINMUX_GPIO173__FUNC_MSDC1_DAT2 (MTK_PIN_NO(173) | 1)
+
+#define PINMUX_GPIO174__FUNC_GPIO174 (MTK_PIN_NO(174) | 0)
+#define PINMUX_GPIO174__FUNC_MSDC1_DAT3 (MTK_PIN_NO(174) | 1)
+
+#define PINMUX_GPIO175__FUNC_GPIO175 (MTK_PIN_NO(175) | 0)
+#define PINMUX_GPIO175__FUNC_MSDC1_CLK (MTK_PIN_NO(175) | 1)
+
+#define PINMUX_GPIO176__FUNC_GPIO176 (MTK_PIN_NO(176) | 0)
+#define PINMUX_GPIO176__FUNC_PWRAP_SPIMI (MTK_PIN_NO(176) | 1)
+#define PINMUX_GPIO176__FUNC_PWRAP_SPIMO (MTK_PIN_NO(176) | 2)
+
+#define PINMUX_GPIO177__FUNC_GPIO177 (MTK_PIN_NO(177) | 0)
+#define PINMUX_GPIO177__FUNC_PWRAP_SPIMO (MTK_PIN_NO(177) | 1)
+#define PINMUX_GPIO177__FUNC_PWRAP_SPIMI (MTK_PIN_NO(177) | 2)
+
+#define PINMUX_GPIO178__FUNC_GPIO178 (MTK_PIN_NO(178) | 0)
+#define PINMUX_GPIO178__FUNC_PWRAP_SPICK (MTK_PIN_NO(178) | 1)
+
+#define PINMUX_GPIO179__FUNC_GPIO179 (MTK_PIN_NO(179) | 0)
+#define PINMUX_GPIO179__FUNC_PWRAP_SPICS (MTK_PIN_NO(179) | 1)
+
+#define PINMUX_GPIO180__FUNC_GPIO180 (MTK_PIN_NO(180) | 0)
+#define PINMUX_GPIO180__FUNC_AUD_CLK_MOSI (MTK_PIN_NO(180) | 1)
+#define PINMUX_GPIO180__FUNC_I2S1_WS (MTK_PIN_NO(180) | 2)
+#define PINMUX_GPIO180__FUNC_I2S2_WS (MTK_PIN_NO(180) | 3)
+#define PINMUX_GPIO180__FUNC_I2S0_WS (MTK_PIN_NO(180) | 4)
+
+#define PINMUX_GPIO181__FUNC_GPIO181 (MTK_PIN_NO(181) | 0)
+#define PINMUX_GPIO181__FUNC_AUD_DAT_MISO_1 (MTK_PIN_NO(181) | 1)
+#define PINMUX_GPIO181__FUNC_I2S1_BCK (MTK_PIN_NO(181) | 2)
+#define PINMUX_GPIO181__FUNC_I2S2_BCK (MTK_PIN_NO(181) | 3)
+#define PINMUX_GPIO181__FUNC_I2S0_BCK (MTK_PIN_NO(181) | 4)
+
+#define PINMUX_GPIO182__FUNC_GPIO182 (MTK_PIN_NO(182) | 0)
+#define PINMUX_GPIO182__FUNC_AUD_DAT_MOSI_1 (MTK_PIN_NO(182) | 1)
+#define PINMUX_GPIO182__FUNC_I2S1_MCK (MTK_PIN_NO(182) | 2)
+#define PINMUX_GPIO182__FUNC_I2S2_MCK (MTK_PIN_NO(182) | 3)
+#define PINMUX_GPIO182__FUNC_I2S0_MCK (MTK_PIN_NO(182) | 4)
+
+#define PINMUX_GPIO183__FUNC_GPIO183 (MTK_PIN_NO(183) | 0)
+#define PINMUX_GPIO183__FUNC_AUD_DAT_MISO_2 (MTK_PIN_NO(183) | 1)
+#define PINMUX_GPIO183__FUNC_I2S1_DO_1 (MTK_PIN_NO(183) | 2)
+#define PINMUX_GPIO183__FUNC_I2S2_DI_1 (MTK_PIN_NO(183) | 3)
+#define PINMUX_GPIO183__FUNC_I2S0_DO (MTK_PIN_NO(183) | 4)
+
+#define PINMUX_GPIO184__FUNC_GPIO184 (MTK_PIN_NO(184) | 0)
+#define PINMUX_GPIO184__FUNC_AUD_DAT_MOSI_2 (MTK_PIN_NO(184) | 1)
+#define PINMUX_GPIO184__FUNC_I2S1_DO_2 (MTK_PIN_NO(184) | 2)
+#define PINMUX_GPIO184__FUNC_I2S2_DI_2 (MTK_PIN_NO(184) | 3)
+#define PINMUX_GPIO184__FUNC_I2S0_DI (MTK_PIN_NO(184) | 4)
+
+#define PINMUX_GPIO185__FUNC_GPIO185 (MTK_PIN_NO(185) | 0)
+#define PINMUX_GPIO185__FUNC_RTC32K_CK (MTK_PIN_NO(185) | 1)
+
+#define PINMUX_GPIO186__FUNC_GPIO186 (MTK_PIN_NO(186) | 0)
+#define PINMUX_GPIO186__FUNC_DISP_PWM0 (MTK_PIN_NO(186) | 1)
+#define PINMUX_GPIO186__FUNC_DISP_PWM1 (MTK_PIN_NO(186) | 2)
+
+#define PINMUX_GPIO187__FUNC_GPIO187 (MTK_PIN_NO(187) | 0)
+#define PINMUX_GPIO187__FUNC_SRCLKENAI (MTK_PIN_NO(187) | 1)
+
+#define PINMUX_GPIO188__FUNC_GPIO188 (MTK_PIN_NO(188) | 0)
+#define PINMUX_GPIO188__FUNC_SRCLKENAI2 (MTK_PIN_NO(188) | 1)
+
+#define PINMUX_GPIO189__FUNC_GPIO189 (MTK_PIN_NO(189) | 0)
+#define PINMUX_GPIO189__FUNC_SRCLKENA0 (MTK_PIN_NO(189) | 1)
+
+#define PINMUX_GPIO190__FUNC_GPIO190 (MTK_PIN_NO(190) | 0)
+#define PINMUX_GPIO190__FUNC_SRCLKENA1 (MTK_PIN_NO(190) | 1)
+
+#define PINMUX_GPIO191__FUNC_GPIO191 (MTK_PIN_NO(191) | 0)
+#define PINMUX_GPIO191__FUNC_WATCHDOG_AO (MTK_PIN_NO(191) | 1)
+
+#define PINMUX_GPIO192__FUNC_GPIO192 (MTK_PIN_NO(192) | 0)
+#define PINMUX_GPIO192__FUNC_I2S0_WS (MTK_PIN_NO(192) | 1)
+#define PINMUX_GPIO192__FUNC_I2S1_WS (MTK_PIN_NO(192) | 2)
+#define PINMUX_GPIO192__FUNC_I2S2_WS (MTK_PIN_NO(192) | 3)
+#define PINMUX_GPIO192__FUNC_NCEB1 (MTK_PIN_NO(192) | 4)
+
+#define PINMUX_GPIO193__FUNC_GPIO193 (MTK_PIN_NO(193) | 0)
+#define PINMUX_GPIO193__FUNC_I2S0_BCK (MTK_PIN_NO(193) | 1)
+#define PINMUX_GPIO193__FUNC_I2S1_BCK (MTK_PIN_NO(193) | 2)
+#define PINMUX_GPIO193__FUNC_I2S2_BCK (MTK_PIN_NO(193) | 3)
+#define PINMUX_GPIO193__FUNC_NRNB1 (MTK_PIN_NO(193) | 4)
+
+#define PINMUX_GPIO194__FUNC_GPIO194 (MTK_PIN_NO(194) | 0)
+#define PINMUX_GPIO194__FUNC_I2S0_MCK (MTK_PIN_NO(194) | 1)
+#define PINMUX_GPIO194__FUNC_I2S1_MCK (MTK_PIN_NO(194) | 2)
+#define PINMUX_GPIO194__FUNC_I2S2_MCK (MTK_PIN_NO(194) | 3)
+
+#define PINMUX_GPIO195__FUNC_GPIO195 (MTK_PIN_NO(195) | 0)
+#define PINMUX_GPIO195__FUNC_I2S0_DO (MTK_PIN_NO(195) | 1)
+#define PINMUX_GPIO195__FUNC_I2S1_DO_1 (MTK_PIN_NO(195) | 2)
+#define PINMUX_GPIO195__FUNC_I2S2_DI_1 (MTK_PIN_NO(195) | 3)
+
+#define PINMUX_GPIO196__FUNC_GPIO196 (MTK_PIN_NO(196) | 0)
+#define PINMUX_GPIO196__FUNC_I2S0_DI (MTK_PIN_NO(196) | 1)
+#define PINMUX_GPIO196__FUNC_I2S1_DO_2 (MTK_PIN_NO(196) | 2)
+#define PINMUX_GPIO196__FUNC_I2S2_DI_2 (MTK_PIN_NO(196) | 3)
+
+
+#endif
diff --git a/include/dt-bindings/reset/mt7986-resets.h b/include/dt-bindings/reset/mt7986-resets.h
new file mode 100644
index 000000000000..af3d16c81192
--- /dev/null
+++ b/include/dt-bindings/reset/mt7986-resets.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Sam Shih <sam.shih@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_RESET_CONTROLLER_MT7986
+#define _DT_BINDINGS_RESET_CONTROLLER_MT7986
+
+/* INFRACFG resets */
+#define MT7986_INFRACFG_PEXTP_MAC_SW_RST 6
+#define MT7986_INFRACFG_SSUSB_SW_RST 7
+#define MT7986_INFRACFG_EIP97_SW_RST 8
+#define MT7986_INFRACFG_AUDIO_SW_RST 13
+#define MT7986_INFRACFG_CQ_DMA_SW_RST 14
+
+#define MT7986_INFRACFG_TRNG_SW_RST 17
+#define MT7986_INFRACFG_AP_DMA_SW_RST 32
+#define MT7986_INFRACFG_I2C_SW_RST 33
+#define MT7986_INFRACFG_NFI_SW_RST 34
+#define MT7986_INFRACFG_SPI0_SW_RST 35
+#define MT7986_INFRACFG_SPI1_SW_RST 36
+#define MT7986_INFRACFG_UART0_SW_RST 37
+#define MT7986_INFRACFG_UART1_SW_RST 38
+#define MT7986_INFRACFG_UART2_SW_RST 39
+#define MT7986_INFRACFG_AUXADC_SW_RST 43
+
+#define MT7986_INFRACFG_APXGPT_SW_RST 66
+#define MT7986_INFRACFG_PWM_SW_RST 68
+
+#define MT7986_INFRACFG_SW_RST_NUM 69
+
+/* TOPRGU resets */
+#define MT7986_TOPRGU_APMIXEDSYS_SW_RST 0
+#define MT7986_TOPRGU_SGMII0_SW_RST 1
+#define MT7986_TOPRGU_SGMII1_SW_RST 2
+#define MT7986_TOPRGU_INFRA_SW_RST 3
+#define MT7986_TOPRGU_U2PHY_SW_RST 5
+#define MT7986_TOPRGU_PCIE_SW_RST 6
+#define MT7986_TOPRGU_SSUSB_SW_RST 7
+#define MT7986_TOPRGU_ETHDMA_SW_RST 20
+#define MT7986_TOPRGU_CONSYS_SW_RST 23
+
+#define MT7986_TOPRGU_SW_RST_NUM 24
+
+/* ETHSYS Subsystem resets */
+#define MT7986_ETHSYS_FE_SW_RST 6
+#define MT7986_ETHSYS_PMTR_SW_RST 8
+#define MT7986_ETHSYS_GMAC_SW_RST 23
+#define MT7986_ETHSYS_PPE0_SW_RST 30
+#define MT7986_ETHSYS_PPE1_SW_RST 31
+
+#define MT7986_ETHSYS_SW_RST_NUM 32
+
+#endif /* _DT_BINDINGS_RESET_CONTROLLER_MT7986 */
diff --git a/include/dt-bindings/reset/mt8186-resets.h b/include/dt-bindings/reset/mt8186-resets.h
new file mode 100644
index 000000000000..5f850370c42c
--- /dev/null
+++ b/include/dt-bindings/reset/mt8186-resets.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Runyang Chen <runyang.chen@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_RESET_CONTROLLER_MT8186
+#define _DT_BINDINGS_RESET_CONTROLLER_MT8186
+
+#define MT8186_TOPRGU_INFRA_SW_RST 0
+#define MT8186_TOPRGU_MM_SW_RST 1
+#define MT8186_TOPRGU_MFG_SW_RST 2
+#define MT8186_TOPRGU_VENC_SW_RST 3
+#define MT8186_TOPRGU_VDEC_SW_RST 4
+#define MT8186_TOPRGU_IMG_SW_RST 5
+#define MT8186_TOPRGU_DDR_SW_RST 6
+#define MT8186_TOPRGU_INFRA_AO_SW_RST 8
+#define MT8186_TOPRGU_CONNSYS_SW_RST 9
+#define MT8186_TOPRGU_APMIXED_SW_RST 10
+#define MT8186_TOPRGU_PWRAP_SW_RST 11
+#define MT8186_TOPRGU_CONN_MCU_SW_RST 12
+#define MT8186_TOPRGU_IPNNA_SW_RST 13
+#define MT8186_TOPRGU_WPE_SW_RST 14
+#define MT8186_TOPRGU_ADSP_SW_RST 15
+#define MT8186_TOPRGU_AUDIO_SW_RST 17
+#define MT8186_TOPRGU_CAM_MAIN_SW_RST 18
+#define MT8186_TOPRGU_CAM_RAWA_SW_RST 19
+#define MT8186_TOPRGU_CAM_RAWB_SW_RST 20
+#define MT8186_TOPRGU_IPE_SW_RST 21
+#define MT8186_TOPRGU_IMG2_SW_RST 22
+#define MT8186_TOPRGU_SW_RST_NUM 23
+
+/* MMSYS resets */
+#define MT8186_MMSYS_SW0_RST_B_DISP_DSI0 19
+
+#endif /* _DT_BINDINGS_RESET_CONTROLLER_MT8186 */
diff --git a/include/dt-bindings/reset/stm32mp13-resets.h b/include/dt-bindings/reset/stm32mp13-resets.h
new file mode 100644
index 000000000000..934864e90da6
--- /dev/null
+++ b/include/dt-bindings/reset/stm32mp13-resets.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
+/*
+ * Copyright (C) STMicroelectronics 2018 - All Rights Reserved
+ * Author: Gabriel Fernandez <gabriel.fernandez@st.com> for STMicroelectronics.
+ */
+
+#ifndef _DT_BINDINGS_STM32MP13_RESET_H_
+#define _DT_BINDINGS_STM32MP13_RESET_H_
+
+#define TIM2_R 13568
+#define TIM3_R 13569
+#define TIM4_R 13570
+#define TIM5_R 13571
+#define TIM6_R 13572
+#define TIM7_R 13573
+#define LPTIM1_R 13577
+#define SPI2_R 13579
+#define SPI3_R 13580
+#define USART3_R 13583
+#define UART4_R 13584
+#define UART5_R 13585
+#define UART7_R 13586
+#define UART8_R 13587
+#define I2C1_R 13589
+#define I2C2_R 13590
+#define SPDIF_R 13594
+#define TIM1_R 13632
+#define TIM8_R 13633
+#define SPI1_R 13640
+#define USART6_R 13645
+#define SAI1_R 13648
+#define SAI2_R 13649
+#define DFSDM_R 13652
+#define FDCAN_R 13656
+#define LPTIM2_R 13696
+#define LPTIM3_R 13697
+#define LPTIM4_R 13698
+#define LPTIM5_R 13699
+#define SYSCFG_R 13707
+#define VREF_R 13709
+#define DTS_R 13712
+#define PMBCTRL_R 13713
+#define LTDC_R 13760
+#define DCMIPP_R 13761
+#define DDRPERFM_R 13768
+#define USBPHY_R 13776
+#define STGEN_R 13844
+#define USART1_R 13888
+#define USART2_R 13889
+#define SPI4_R 13890
+#define SPI5_R 13891
+#define I2C3_R 13892
+#define I2C4_R 13893
+#define I2C5_R 13894
+#define TIM12_R 13895
+#define TIM13_R 13896
+#define TIM14_R 13897
+#define TIM15_R 13898
+#define TIM16_R 13899
+#define TIM17_R 13900
+#define DMA1_R 13952
+#define DMA2_R 13953
+#define DMAMUX1_R 13954
+#define DMA3_R 13955
+#define DMAMUX2_R 13956
+#define ADC1_R 13957
+#define ADC2_R 13958
+#define USBO_R 13960
+#define GPIOA_R 14080
+#define GPIOB_R 14081
+#define GPIOC_R 14082
+#define GPIOD_R 14083
+#define GPIOE_R 14084
+#define GPIOF_R 14085
+#define GPIOG_R 14086
+#define GPIOH_R 14087
+#define GPIOI_R 14088
+#define TSC_R 14095
+#define PKA_R 14146
+#define SAES_R 14147
+#define CRYP1_R 14148
+#define HASH1_R 14149
+#define RNG1_R 14150
+#define AXIMC_R 14160
+#define MDMA_R 14208
+#define MCE_R 14209
+#define ETH1MAC_R 14218
+#define FMC_R 14220
+#define QSPI_R 14222
+#define SDMMC1_R 14224
+#define SDMMC2_R 14225
+#define CRC1_R 14228
+#define USBH_R 14232
+#define ETH2MAC_R 14238
+
+/* SCMI reset domain identifiers */
+#define RST_SCMI_LTDC 0
+#define RST_SCMI_MDMA 1
+
+#endif /* _DT_BINDINGS_STM32MP13_RESET_H_ */
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 03465db16b68..4f82a5bc6d98 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -520,9 +520,6 @@ int acpi_check_resource_conflict(const struct resource *res);
int acpi_check_region(resource_size_t start, resource_size_t n,
const char *name);
-acpi_status acpi_release_memory(acpi_handle handle, struct resource *res,
- u32 level);
-
int acpi_resources_are_enforced(void);
#ifdef CONFIG_HIBERNATION
@@ -550,10 +547,16 @@ struct acpi_osc_context {
acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context);
-/* Indexes into _OSC Capabilities Buffer (DWORDs 2 & 3 are device-specific) */
+/* Number of _OSC capability DWORDS depends on bridge type */
+#define OSC_PCI_CAPABILITY_DWORDS 3
+#define OSC_CXL_CAPABILITY_DWORDS 5
+
+/* Indexes into _OSC Capabilities Buffer (DWORDs 2 to 5 are device-specific) */
#define OSC_QUERY_DWORD 0 /* DWORD 1 */
#define OSC_SUPPORT_DWORD 1 /* DWORD 2 */
#define OSC_CONTROL_DWORD 2 /* DWORD 3 */
+#define OSC_EXT_SUPPORT_DWORD 3 /* DWORD 4 */
+#define OSC_EXT_CONTROL_DWORD 4 /* DWORD 5 */
/* _OSC Capabilities DWORD 1: Query/Control and Error Returns (generic) */
#define OSC_QUERY_ENABLE 0x00000001 /* input */
@@ -610,6 +613,29 @@ extern u32 osc_sb_native_usb4_control;
#define OSC_PCI_EXPRESS_LTR_CONTROL 0x00000020
#define OSC_PCI_EXPRESS_DPC_CONTROL 0x00000080
+/* CXL _OSC: Capabilities DWORD 4: Support Field */
+#define OSC_CXL_1_1_PORT_REG_ACCESS_SUPPORT 0x00000001
+#define OSC_CXL_2_0_PORT_DEV_REG_ACCESS_SUPPORT 0x00000002
+#define OSC_CXL_PROTOCOL_ERR_REPORTING_SUPPORT 0x00000004
+#define OSC_CXL_NATIVE_HP_SUPPORT 0x00000008
+
+/* CXL _OSC: Capabilities DWORD 5: Control Field */
+#define OSC_CXL_ERROR_REPORTING_CONTROL 0x00000001
+
+static inline u32 acpi_osc_ctx_get_pci_control(struct acpi_osc_context *context)
+{
+ u32 *ret = context->ret.pointer;
+
+ return ret[OSC_CONTROL_DWORD];
+}
+
+static inline u32 acpi_osc_ctx_get_cxl_control(struct acpi_osc_context *context)
+{
+ u32 *ret = context->ret.pointer;
+
+ return ret[OSC_EXT_CONTROL_DWORD];
+}
+
#define ACPI_GSB_ACCESS_ATTRIB_QUICK 0x00000002
#define ACPI_GSB_ACCESS_ATTRIB_SEND_RCV 0x00000004
#define ACPI_GSB_ACCESS_ATTRIB_BYTE 0x00000006
@@ -1006,6 +1032,17 @@ static inline int acpi_register_wakeup_handler(int wake_irq,
static inline void acpi_unregister_wakeup_handler(
bool (*wakeup)(void *context), void *context) { }
+struct acpi_osc_context;
+static inline u32 acpi_osc_ctx_get_pci_control(struct acpi_osc_context *context)
+{
+ return 0;
+}
+
+static inline u32 acpi_osc_ctx_get_cxl_control(struct acpi_osc_context *context)
+{
+ return 0;
+}
+
#endif /* !CONFIG_ACPI */
#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h
index 6562f543c3e0..e94cdf235f1d 100644
--- a/include/linux/amba/bus.h
+++ b/include/linux/amba/bus.h
@@ -70,7 +70,11 @@ struct amba_device {
unsigned int cid;
struct amba_cs_uci_id uci;
unsigned int irq[AMBA_NR_IRQS];
- char *driver_override;
+ /*
+ * Driver name to force a match. Do not set directly, because core
+ * frees it. Use driver_set_override() to set or clear it.
+ */
+ const char *driver_override;
};
struct amba_driver {
@@ -79,6 +83,14 @@ struct amba_driver {
void (*remove)(struct amba_device *);
void (*shutdown)(struct amba_device *);
const struct amba_id *id_table;
+ /*
+ * For most device drivers, no need to care about this flag as long as
+ * all DMAs are handled through the kernel DMA API. For some special
+ * ones, for example VFIO drivers, they know how to manage the DMA
+ * themselves and set this flag so that the IOMMU layer will allow them
+ * to setup and manage their own I/O address space.
+ */
+ bool driver_managed_dma;
};
/*
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index 7dba0847510c..2e6cd5681040 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -72,6 +72,8 @@ struct device;
* bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region
* bitmap_from_arr32(dst, buf, nbits) Copy nbits from u32[] buf to dst
* bitmap_to_arr32(buf, src, nbits) Copy nbits from buf to u32[] dst
+ * bitmap_to_arr64(buf, src, nbits) Copy nbits from buf to u64[] dst
+ * bitmap_to_arr64(buf, src, nbits) Copy nbits from buf to u64[] dst
* bitmap_get_value8(map, start) Get 8bit value from map at start
* bitmap_set_value8(map, value, start) Set 8bit value to map at start
*
@@ -132,8 +134,8 @@ unsigned long *devm_bitmap_zalloc(struct device *dev,
* lib/bitmap.c provides these functions:
*/
-int __bitmap_equal(const unsigned long *bitmap1,
- const unsigned long *bitmap2, unsigned int nbits);
+bool __bitmap_equal(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int nbits);
bool __pure __bitmap_or_equal(const unsigned long *src1,
const unsigned long *src2,
const unsigned long *src3,
@@ -157,10 +159,10 @@ int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
void __bitmap_replace(unsigned long *dst,
const unsigned long *old, const unsigned long *new,
const unsigned long *mask, unsigned int nbits);
-int __bitmap_intersects(const unsigned long *bitmap1,
- const unsigned long *bitmap2, unsigned int nbits);
-int __bitmap_subset(const unsigned long *bitmap1,
- const unsigned long *bitmap2, unsigned int nbits);
+bool __bitmap_intersects(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int nbits);
+bool __bitmap_subset(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int nbits);
int __bitmap_weight(const unsigned long *bitmap, unsigned int nbits);
void __bitmap_set(unsigned long *map, unsigned int start, int len);
void __bitmap_clear(unsigned long *map, unsigned int start, int len);
@@ -264,8 +266,12 @@ static inline void bitmap_copy_clear_tail(unsigned long *dst,
}
/*
- * On 32-bit systems bitmaps are represented as u32 arrays internally, and
- * therefore conversion is not needed when copying data from/to arrays of u32.
+ * On 32-bit systems bitmaps are represented as u32 arrays internally. On LE64
+ * machines the order of hi and lo parts of numbers match the bitmap structure.
+ * In both cases conversion is not needed when copying data from/to arrays of
+ * u32. But in LE64 case, typecast in bitmap_copy_clear_tail() may lead
+ * to out-of-bound access. To avoid that, both LE and BE variants of 64-bit
+ * architectures are not using bitmap_copy_clear_tail().
*/
#if BITS_PER_LONG == 64
void bitmap_from_arr32(unsigned long *bitmap, const u32 *buf,
@@ -281,6 +287,22 @@ void bitmap_to_arr32(u32 *buf, const unsigned long *bitmap,
(const unsigned long *) (bitmap), (nbits))
#endif
+/*
+ * On 64-bit systems bitmaps are represented as u64 arrays internally. On LE32
+ * machines the order of hi and lo parts of numbers match the bitmap structure.
+ * In both cases conversion is not needed when copying data from/to arrays of
+ * u64.
+ */
+#if (BITS_PER_LONG == 32) && defined(__BIG_ENDIAN)
+void bitmap_from_arr64(unsigned long *bitmap, const u64 *buf, unsigned int nbits);
+void bitmap_to_arr64(u64 *buf, const unsigned long *bitmap, unsigned int nbits);
+#else
+#define bitmap_from_arr64(bitmap, buf, nbits) \
+ bitmap_copy_clear_tail((unsigned long *)(bitmap), (const unsigned long *)(buf), (nbits))
+#define bitmap_to_arr64(buf, bitmap, nbits) \
+ bitmap_copy_clear_tail((unsigned long *)(buf), (const unsigned long *)(bitmap), (nbits))
+#endif
+
static inline int bitmap_and(unsigned long *dst, const unsigned long *src1,
const unsigned long *src2, unsigned int nbits)
{
@@ -331,8 +353,8 @@ static inline void bitmap_complement(unsigned long *dst, const unsigned long *sr
#endif
#define BITMAP_MEM_MASK (BITMAP_MEM_ALIGNMENT - 1)
-static inline int bitmap_equal(const unsigned long *src1,
- const unsigned long *src2, unsigned int nbits)
+static inline bool bitmap_equal(const unsigned long *src1,
+ const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
return !((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits));
@@ -362,8 +384,9 @@ static inline bool bitmap_or_equal(const unsigned long *src1,
return !(((*src1 | *src2) ^ *src3) & BITMAP_LAST_WORD_MASK(nbits));
}
-static inline int bitmap_intersects(const unsigned long *src1,
- const unsigned long *src2, unsigned int nbits)
+static inline bool bitmap_intersects(const unsigned long *src1,
+ const unsigned long *src2,
+ unsigned int nbits)
{
if (small_const_nbits(nbits))
return ((*src1 & *src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0;
@@ -371,8 +394,8 @@ static inline int bitmap_intersects(const unsigned long *src1,
return __bitmap_intersects(src1, src2, nbits);
}
-static inline int bitmap_subset(const unsigned long *src1,
- const unsigned long *src2, unsigned int nbits)
+static inline bool bitmap_subset(const unsigned long *src1,
+ const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
return ! ((*src1 & ~(*src2)) & BITMAP_LAST_WORD_MASK(nbits));
@@ -514,10 +537,7 @@ static inline void bitmap_next_set_region(unsigned long *bitmap,
*/
static inline void bitmap_from_u64(unsigned long *dst, u64 mask)
{
- dst[0] = mask & ULONG_MAX;
-
- if (sizeof(mask) > sizeof(unsigned long))
- dst[1] = mask >> 32;
+ bitmap_from_arr64(dst, &mask, 64);
}
/**
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 9f07061418db..e2d9daf7e8dd 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -969,8 +969,7 @@ int blk_rq_unmap_user(struct bio *);
int blk_rq_map_kern(struct request_queue *, struct request *, void *,
unsigned int, gfp_t);
int blk_rq_append_bio(struct request *rq, struct bio *bio);
-void blk_execute_rq_nowait(struct request *rq, bool at_head,
- rq_end_io_fn *end_io);
+void blk_execute_rq_nowait(struct request *rq, bool at_head);
blk_status_t blk_execute_rq(struct request *rq, bool at_head);
struct req_iterator {
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index c007d58d2703..a24d4078fb21 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -105,6 +105,10 @@ typedef u16 blk_short_t;
/* hack for device mapper, don't use elsewhere: */
#define BLK_STS_DM_REQUEUE ((__force blk_status_t)11)
+/*
+ * BLK_STS_AGAIN should only be returned if RQF_NOWAIT is set
+ * and the bio would block (cf bio_wouldblock_error())
+ */
#define BLK_STS_AGAIN ((__force blk_status_t)12)
/*
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 1b24c1fb3bb1..608d577734c2 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -147,6 +147,7 @@ struct gendisk {
#define GD_DEAD 2
#define GD_NATIVE_CAPACITY 3
#define GD_ADDED 4
+#define GD_SUPPRESS_PART_SCAN 5
struct mutex open_mutex; /* open/close mutex */
unsigned open_partitions; /* number of open partitions */
diff --git a/include/linux/bootconfig.h b/include/linux/bootconfig.h
index a4665c7ab07c..1611f9db878e 100644
--- a/include/linux/bootconfig.h
+++ b/include/linux/bootconfig.h
@@ -289,4 +289,14 @@ int __init xbc_get_info(int *node_size, size_t *data_size);
/* XBC cleanup data structures */
void __init xbc_exit(void);
+/* XBC embedded bootconfig data in kernel */
+#ifdef CONFIG_BOOT_CONFIG_EMBED
+const char * __init xbc_get_embedded_bootconfig(size_t *size);
+#else
+static inline const char *xbc_get_embedded_bootconfig(size_t *size)
+{
+ return NULL;
+}
+#endif
+
#endif
diff --git a/include/linux/clk/pxa.h b/include/linux/clk/pxa.h
new file mode 100644
index 000000000000..736b8bb91bd7
--- /dev/null
+++ b/include/linux/clk/pxa.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+extern int pxa25x_clocks_init(void __iomem *regs);
+extern int pxa27x_clocks_init(void __iomem *regs);
+extern int pxa3xx_clocks_init(void __iomem *regs, void __iomem *oscc_reg);
+
+#ifdef CONFIG_PXA3xx
+extern unsigned pxa3xx_get_clk_frequency_khz(int);
+extern void pxa3xx_clk_update_accr(u32 disable, u32 enable, u32 xclkcfg, u32 mask);
+#else
+#define pxa3xx_get_clk_frequency_khz(x) (0)
+#define pxa3xx_clk_update_accr(disable, enable, xclkcfg, mask) do { } while (0)
+#endif
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 01fddf72a81f..594357881b0b 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -259,6 +259,37 @@ struct compat_rlimit {
compat_ulong_t rlim_max;
};
+#ifdef __ARCH_NEED_COMPAT_FLOCK64_PACKED
+#define __ARCH_COMPAT_FLOCK64_PACK __attribute__((packed))
+#else
+#define __ARCH_COMPAT_FLOCK64_PACK
+#endif
+
+struct compat_flock {
+ short l_type;
+ short l_whence;
+ compat_off_t l_start;
+ compat_off_t l_len;
+#ifdef __ARCH_COMPAT_FLOCK_EXTRA_SYSID
+ __ARCH_COMPAT_FLOCK_EXTRA_SYSID
+#endif
+ compat_pid_t l_pid;
+#ifdef __ARCH_COMPAT_FLOCK_PAD
+ __ARCH_COMPAT_FLOCK_PAD
+#endif
+};
+
+struct compat_flock64 {
+ short l_type;
+ short l_whence;
+ compat_loff_t l_start;
+ compat_loff_t l_len;
+ compat_pid_t l_pid;
+#ifdef __ARCH_COMPAT_FLOCK64_PAD
+ __ARCH_COMPAT_FLOCK64_PAD
+#endif
+} __ARCH_COMPAT_FLOCK64_PACK;
+
struct compat_rusage {
struct old_timeval32 ru_utime;
struct old_timeval32 ru_stime;
@@ -896,6 +927,43 @@ asmlinkage long compat_sys_sigaction(int sig,
/* obsolete: net/socket.c */
asmlinkage long compat_sys_socketcall(int call, u32 __user *args);
+#ifdef __ARCH_WANT_COMPAT_TRUNCATE64
+asmlinkage long compat_sys_truncate64(const char __user *pathname, compat_arg_u64(len));
+#endif
+
+#ifdef __ARCH_WANT_COMPAT_FTRUNCATE64
+asmlinkage long compat_sys_ftruncate64(unsigned int fd, compat_arg_u64(len));
+#endif
+
+#ifdef __ARCH_WANT_COMPAT_FALLOCATE
+asmlinkage long compat_sys_fallocate(int fd, int mode, compat_arg_u64(offset),
+ compat_arg_u64(len));
+#endif
+
+#ifdef __ARCH_WANT_COMPAT_PREAD64
+asmlinkage long compat_sys_pread64(unsigned int fd, char __user *buf, size_t count,
+ compat_arg_u64(pos));
+#endif
+
+#ifdef __ARCH_WANT_COMPAT_PWRITE64
+asmlinkage long compat_sys_pwrite64(unsigned int fd, const char __user *buf, size_t count,
+ compat_arg_u64(pos));
+#endif
+
+#ifdef __ARCH_WANT_COMPAT_SYNC_FILE_RANGE
+asmlinkage long compat_sys_sync_file_range(int fd, compat_arg_u64(pos),
+ compat_arg_u64(nbytes), unsigned int flags);
+#endif
+
+#ifdef __ARCH_WANT_COMPAT_FADVISE64_64
+asmlinkage long compat_sys_fadvise64_64(int fd, compat_arg_u64(pos),
+ compat_arg_u64(len), int advice);
+#endif
+
+#ifdef __ARCH_WANT_COMPAT_READAHEAD
+asmlinkage long compat_sys_readahead(int fd, compat_arg_u64(offset), size_t count);
+#endif
+
#endif /* CONFIG_ARCH_HAS_SYSCALL_WRAPPER */
/**
diff --git a/include/linux/context_tracking_state.h b/include/linux/context_tracking_state.h
index 65a60d3313b0..ae1e63e26947 100644
--- a/include/linux/context_tracking_state.h
+++ b/include/linux/context_tracking_state.h
@@ -46,10 +46,10 @@ static __always_inline bool context_tracking_in_user(void)
return __this_cpu_read(context_tracking.state) == CONTEXT_USER;
}
#else
-static inline bool context_tracking_in_user(void) { return false; }
-static inline bool context_tracking_enabled(void) { return false; }
-static inline bool context_tracking_enabled_cpu(int cpu) { return false; }
-static inline bool context_tracking_enabled_this_cpu(void) { return false; }
+static __always_inline bool context_tracking_in_user(void) { return false; }
+static __always_inline bool context_tracking_enabled(void) { return false; }
+static __always_inline bool context_tracking_enabled_cpu(int cpu) { return false; }
+static __always_inline bool context_tracking_enabled_this_cpu(void) { return false; }
#endif /* CONFIG_CONTEXT_TRACKING */
#endif
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index b66c5f389159..19f0dbfdd7fe 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -130,6 +130,7 @@ enum cpuhp_state {
CPUHP_ZCOMP_PREPARE,
CPUHP_TIMERS_PREPARE,
CPUHP_MIPS_SOC_PREPARE,
+ CPUHP_LOONGARCH_SOC_PREPARE,
CPUHP_BP_PREPARE_DYN,
CPUHP_BP_PREPARE_DYN_END = CPUHP_BP_PREPARE_DYN + 20,
CPUHP_BRINGUP_CPU,
diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
index 620821549b23..0f3a656293b0 100644
--- a/include/linux/crash_dump.h
+++ b/include/linux/crash_dump.h
@@ -24,11 +24,10 @@ extern int remap_oldmem_pfn_range(struct vm_area_struct *vma,
unsigned long from, unsigned long pfn,
unsigned long size, pgprot_t prot);
-extern ssize_t copy_oldmem_page(unsigned long, char *, size_t,
- unsigned long, int);
-extern ssize_t copy_oldmem_page_encrypted(unsigned long pfn, char *buf,
- size_t csize, unsigned long offset,
- int userbuf);
+ssize_t copy_oldmem_page(struct iov_iter *i, unsigned long pfn, size_t csize,
+ unsigned long offset);
+ssize_t copy_oldmem_page_encrypted(struct iov_iter *iter, unsigned long pfn,
+ size_t csize, unsigned long offset);
void vmcore_cleanup(void);
@@ -135,13 +134,11 @@ static inline int vmcore_add_device_dump(struct vmcoredd_data *data)
#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
#ifdef CONFIG_PROC_VMCORE
-ssize_t read_from_oldmem(char *buf, size_t count,
- u64 *ppos, int userbuf,
- bool encrypted);
+ssize_t read_from_oldmem(struct iov_iter *iter, size_t count,
+ u64 *ppos, bool encrypted);
#else
-static inline ssize_t read_from_oldmem(char *buf, size_t count,
- u64 *ppos, int userbuf,
- bool encrypted)
+static inline ssize_t read_from_oldmem(struct iov_iter *iter, size_t count,
+ u64 *ppos, bool encrypted)
{
return -EOPNOTSUPP;
}
diff --git a/include/linux/dax.h b/include/linux/dax.h
index 9fc5f99a0ae2..e7b81634c52a 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -14,6 +14,11 @@ struct iomap_ops;
struct iomap_iter;
struct iomap;
+enum dax_access_mode {
+ DAX_ACCESS,
+ DAX_RECOVERY_WRITE,
+};
+
struct dax_operations {
/*
* direct_access: translate a device-relative
@@ -21,7 +26,7 @@ struct dax_operations {
* number of pages available for DAX at that pfn.
*/
long (*direct_access)(struct dax_device *, pgoff_t, long,
- void **, pfn_t *);
+ enum dax_access_mode, void **, pfn_t *);
/*
* Validate whether this device is usable as an fsdax backing
* device.
@@ -30,6 +35,12 @@ struct dax_operations {
sector_t, sector_t);
/* zero_page_range: required operation. Zero page range */
int (*zero_page_range)(struct dax_device *, pgoff_t, size_t);
+ /*
+ * recovery_write: recover a poisoned range by DAX device driver
+ * capable of clearing poison.
+ */
+ size_t (*recovery_write)(struct dax_device *dax_dev, pgoff_t pgoff,
+ void *addr, size_t bytes, struct iov_iter *iter);
};
#if IS_ENABLED(CONFIG_DAX)
@@ -40,6 +51,8 @@ void dax_write_cache(struct dax_device *dax_dev, bool wc);
bool dax_write_cache_enabled(struct dax_device *dax_dev);
bool dax_synchronous(struct dax_device *dax_dev);
void set_dax_synchronous(struct dax_device *dax_dev);
+size_t dax_recovery_write(struct dax_device *dax_dev, pgoff_t pgoff,
+ void *addr, size_t bytes, struct iov_iter *i);
/*
* Check if given mapping is supported by the file / underlying device.
*/
@@ -87,6 +100,11 @@ static inline bool daxdev_mapping_supported(struct vm_area_struct *vma,
{
return !(vma->vm_flags & VM_SYNC);
}
+static inline size_t dax_recovery_write(struct dax_device *dax_dev,
+ pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i)
+{
+ return 0;
+}
#endif
void set_dax_nocache(struct dax_device *dax_dev);
@@ -178,7 +196,7 @@ static inline void dax_read_unlock(int id)
bool dax_alive(struct dax_device *dax_dev);
void *dax_get_private(struct dax_device *dax_dev);
long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
- void **kaddr, pfn_t *pfn);
+ enum dax_access_mode mode, void **kaddr, pfn_t *pfn);
size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
size_t bytes, struct iov_iter *i);
size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index c2a3758c4aaa..47a01c7cffdf 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -20,6 +20,7 @@ struct dm_table;
struct dm_report_zones_args;
struct mapped_device;
struct bio_vec;
+enum dax_access_mode;
/*
* Type of table, mapped_device's mempool and request_queue
@@ -146,10 +147,19 @@ typedef int (*dm_busy_fn) (struct dm_target *ti);
* >= 0 : the number of bytes accessible at the address
*/
typedef long (*dm_dax_direct_access_fn) (struct dm_target *ti, pgoff_t pgoff,
- long nr_pages, void **kaddr, pfn_t *pfn);
+ long nr_pages, enum dax_access_mode node, void **kaddr,
+ pfn_t *pfn);
typedef int (*dm_dax_zero_page_range_fn)(struct dm_target *ti, pgoff_t pgoff,
size_t nr_pages);
+/*
+ * Returns:
+ * != 0 : number of bytes transferred
+ * 0 : recovery write failed
+ */
+typedef size_t (*dm_dax_recovery_write_fn)(struct dm_target *ti, pgoff_t pgoff,
+ void *addr, size_t bytes, struct iov_iter *i);
+
void dm_error(const char *message);
struct dm_dev {
@@ -199,6 +209,7 @@ struct target_type {
dm_io_hints_fn io_hints;
dm_dax_direct_access_fn direct_access;
dm_dax_zero_page_range_fn dax_zero_page_range;
+ dm_dax_recovery_write_fn dax_recovery_write;
/* For internal device-mapper use. */
struct list_head list;
diff --git a/include/linux/device.h b/include/linux/device.h
index 93459724dcde..dc941997795c 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -387,6 +387,75 @@ struct dev_msi_info {
};
/**
+ * enum device_physical_location_panel - Describes which panel surface of the
+ * system's housing the device connection point resides on.
+ * @DEVICE_PANEL_TOP: Device connection point is on the top panel.
+ * @DEVICE_PANEL_BOTTOM: Device connection point is on the bottom panel.
+ * @DEVICE_PANEL_LEFT: Device connection point is on the left panel.
+ * @DEVICE_PANEL_RIGHT: Device connection point is on the right panel.
+ * @DEVICE_PANEL_FRONT: Device connection point is on the front panel.
+ * @DEVICE_PANEL_BACK: Device connection point is on the back panel.
+ * @DEVICE_PANEL_UNKNOWN: The panel with device connection point is unknown.
+ */
+enum device_physical_location_panel {
+ DEVICE_PANEL_TOP,
+ DEVICE_PANEL_BOTTOM,
+ DEVICE_PANEL_LEFT,
+ DEVICE_PANEL_RIGHT,
+ DEVICE_PANEL_FRONT,
+ DEVICE_PANEL_BACK,
+ DEVICE_PANEL_UNKNOWN,
+};
+
+/**
+ * enum device_physical_location_vertical_position - Describes vertical
+ * position of the device connection point on the panel surface.
+ * @DEVICE_VERT_POS_UPPER: Device connection point is at upper part of panel.
+ * @DEVICE_VERT_POS_CENTER: Device connection point is at center part of panel.
+ * @DEVICE_VERT_POS_LOWER: Device connection point is at lower part of panel.
+ */
+enum device_physical_location_vertical_position {
+ DEVICE_VERT_POS_UPPER,
+ DEVICE_VERT_POS_CENTER,
+ DEVICE_VERT_POS_LOWER,
+};
+
+/**
+ * enum device_physical_location_horizontal_position - Describes horizontal
+ * position of the device connection point on the panel surface.
+ * @DEVICE_HORI_POS_LEFT: Device connection point is at left part of panel.
+ * @DEVICE_HORI_POS_CENTER: Device connection point is at center part of panel.
+ * @DEVICE_HORI_POS_RIGHT: Device connection point is at right part of panel.
+ */
+enum device_physical_location_horizontal_position {
+ DEVICE_HORI_POS_LEFT,
+ DEVICE_HORI_POS_CENTER,
+ DEVICE_HORI_POS_RIGHT,
+};
+
+/**
+ * struct device_physical_location - Device data related to physical location
+ * of the device connection point.
+ * @panel: Panel surface of the system's housing that the device connection
+ * point resides on.
+ * @vertical_position: Vertical position of the device connection point within
+ * the panel.
+ * @horizontal_position: Horizontal position of the device connection point
+ * within the panel.
+ * @dock: Set if the device connection point resides in a docking station or
+ * port replicator.
+ * @lid: Set if this device connection point resides on the lid of laptop
+ * system.
+ */
+struct device_physical_location {
+ enum device_physical_location_panel panel;
+ enum device_physical_location_vertical_position vertical_position;
+ enum device_physical_location_horizontal_position horizontal_position;
+ bool dock;
+ bool lid;
+};
+
+/**
* struct device - The basic device structure
* @parent: The device's "parent" device, the device to which it is attached.
* In most cases, a parent device is some sort of bus or host
@@ -400,8 +469,6 @@ struct dev_msi_info {
* This identifies the device type and carries type-specific
* information.
* @mutex: Mutex to synchronize calls to its driver.
- * @lockdep_mutex: An optional debug lock that a subsystem can use as a
- * peer lock to gain localized lockdep coverage of the device_lock.
* @bus: Type of bus device is on.
* @driver: Which driver has allocated this
* @platform_data: Platform data specific to the device.
@@ -453,6 +520,8 @@ struct dev_msi_info {
* device (i.e. the bus driver that discovered the device).
* @iommu_group: IOMMU group the device belongs to.
* @iommu: Per device generic IOMMU runtime data
+ * @physical_location: Describes physical location of the device connection
+ * point in the system housing.
* @removable: Whether the device can be removed from the system. This
* should be set by the subsystem / bus driver that discovered
* the device.
@@ -499,9 +568,6 @@ struct device {
core doesn't touch it */
void *driver_data; /* Driver data, set and get with
dev_set_drvdata/dev_get_drvdata */
-#ifdef CONFIG_PROVE_LOCKING
- struct mutex lockdep_mutex;
-#endif
struct mutex mutex; /* mutex to synchronize calls to
* its driver.
*/
@@ -567,6 +633,8 @@ struct device {
struct iommu_group *iommu_group;
struct dev_iommu *iommu;
+ struct device_physical_location *physical_location;
+
enum device_removable removable;
bool offline_disabled:1;
@@ -850,6 +918,49 @@ static inline bool device_supports_offline(struct device *dev)
return dev->bus && dev->bus->offline && dev->bus->online;
}
+#define __device_lock_set_class(dev, name, key) \
+do { \
+ struct device *__d2 __maybe_unused = dev; \
+ lock_set_class(&__d2->mutex.dep_map, name, key, 0, _THIS_IP_); \
+} while (0)
+
+/**
+ * device_lock_set_class - Specify a temporary lock class while a device
+ * is attached to a driver
+ * @dev: device to modify
+ * @key: lock class key data
+ *
+ * This must be called with the device_lock() already held, for example
+ * from driver ->probe(). Take care to only override the default
+ * lockdep_no_validate class.
+ */
+#ifdef CONFIG_LOCKDEP
+#define device_lock_set_class(dev, key) \
+do { \
+ struct device *__d = dev; \
+ dev_WARN_ONCE(__d, !lockdep_match_class(&__d->mutex, \
+ &__lockdep_no_validate__), \
+ "overriding existing custom lock class\n"); \
+ __device_lock_set_class(__d, #key, key); \
+} while (0)
+#else
+#define device_lock_set_class(dev, key) __device_lock_set_class(dev, #key, key)
+#endif
+
+/**
+ * device_lock_reset_class - Return a device to the default lockdep novalidate state
+ * @dev: device to modify
+ *
+ * This must be called with the device_lock() already held, for example
+ * from driver ->remove().
+ */
+#define device_lock_reset_class(dev) \
+do { \
+ struct device *__d __maybe_unused = dev; \
+ lock_set_novalidate_class(&__d->mutex.dep_map, "&dev->mutex", \
+ _THIS_IP_); \
+} while (0)
+
void lock_device_hotplug(void);
void unlock_device_hotplug(void);
int lock_device_hotplug_sysfs(void);
diff --git a/include/linux/device/bus.h b/include/linux/device/bus.h
index a039ab809753..d8b29ccd07e5 100644
--- a/include/linux/device/bus.h
+++ b/include/linux/device/bus.h
@@ -59,6 +59,8 @@ struct fwnode_handle;
* bus supports.
* @dma_configure: Called to setup DMA configuration on a device on
* this bus.
+ * @dma_cleanup: Called to cleanup DMA configuration on a device on
+ * this bus.
* @pm: Power management operations of this bus, callback the specific
* device driver's pm-ops.
* @iommu_ops: IOMMU specific operations for this bus, used to attach IOMMU
@@ -103,6 +105,7 @@ struct bus_type {
int (*num_vf)(struct device *dev);
int (*dma_configure)(struct device *dev);
+ void (*dma_cleanup)(struct device *dev);
const struct dev_pm_ops *pm;
diff --git a/include/linux/device/driver.h b/include/linux/device/driver.h
index 15e7c5e15d62..700453017e1c 100644
--- a/include/linux/device/driver.h
+++ b/include/linux/device/driver.h
@@ -151,6 +151,8 @@ extern int __must_check driver_create_file(struct device_driver *driver,
extern void driver_remove_file(struct device_driver *driver,
const struct driver_attribute *attr);
+int driver_set_override(struct device *dev, const char **override,
+ const char *s, size_t len);
extern int __must_check driver_for_each_device(struct device_driver *drv,
struct device *start,
void *data,
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 842d4f7ca752..b46b88e6aa0d 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -870,7 +870,6 @@ struct dma_device {
struct device *dev;
struct module *owner;
struct ida chan_ida;
- struct mutex chan_mutex; /* to protect chan_ida */
u32 src_addr_widths;
u32 dst_addr_widths;
@@ -1031,6 +1030,14 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma(
return chan->device->device_prep_interleaved_dma(chan, xt, flags);
}
+/**
+ * dmaengine_prep_dma_memset() - Prepare a DMA memset descriptor.
+ * @chan: The channel to be used for this descriptor
+ * @dest: Address of buffer to be set
+ * @value: Treated as a single byte value that fills the destination buffer
+ * @len: The total size of dest
+ * @flags: DMA engine flags
+ */
static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memset(
struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
unsigned long flags)
diff --git a/include/linux/export.h b/include/linux/export.h
index 565c5ffcb26f..3f31ced0d977 100644
--- a/include/linux/export.h
+++ b/include/linux/export.h
@@ -2,6 +2,8 @@
#ifndef _LINUX_EXPORT_H
#define _LINUX_EXPORT_H
+#include <linux/stringify.h>
+
/*
* Export symbols from the kernel to modules. Forked from module.h
* to reduce the amount of pointless cruft we feed to gcc when only
@@ -140,7 +142,6 @@ struct kernel_symbol {
#endif /* CONFIG_MODULES */
#ifdef DEFAULT_SYMBOL_NAMESPACE
-#include <linux/stringify.h>
#define _EXPORT_SYMBOL(sym, sec) __EXPORT_SYMBOL(sym, sec, __stringify(DEFAULT_SYMBOL_NAMESPACE))
#else
#define _EXPORT_SYMBOL(sym, sec) __EXPORT_SYMBOL(sym, sec, "")
@@ -148,8 +149,8 @@ struct kernel_symbol {
#define EXPORT_SYMBOL(sym) _EXPORT_SYMBOL(sym, "")
#define EXPORT_SYMBOL_GPL(sym) _EXPORT_SYMBOL(sym, "_gpl")
-#define EXPORT_SYMBOL_NS(sym, ns) __EXPORT_SYMBOL(sym, "", #ns)
-#define EXPORT_SYMBOL_NS_GPL(sym, ns) __EXPORT_SYMBOL(sym, "_gpl", #ns)
+#define EXPORT_SYMBOL_NS(sym, ns) __EXPORT_SYMBOL(sym, "", __stringify(ns))
+#define EXPORT_SYMBOL_NS_GPL(sym, ns) __EXPORT_SYMBOL(sym, "_gpl", __stringify(ns))
#endif /* !__ASSEMBLY__ */
diff --git a/include/linux/extcon.h b/include/linux/extcon.h
index 0c19010da77f..685401d94d39 100644
--- a/include/linux/extcon.h
+++ b/include/linux/extcon.h
@@ -296,7 +296,7 @@ static inline void devm_extcon_unregister_notifier_all(struct device *dev,
static inline struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name)
{
- return ERR_PTR(-ENODEV);
+ return NULL;
}
static inline struct extcon_dev *extcon_find_edev_by_node(struct device_node *node)
diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
index d0e78174874a..e066816f3519 100644
--- a/include/linux/fdtable.h
+++ b/include/linux/fdtable.h
@@ -125,7 +125,7 @@ int iterate_fd(struct files_struct *, unsigned,
extern int close_fd(unsigned int fd);
extern int __close_range(unsigned int fd, unsigned int max_fd, unsigned int flags);
-extern int close_fd_get_file(unsigned int fd, struct file **res);
+extern struct file *close_fd_get_file(unsigned int fd);
extern int unshare_fd(unsigned long unshare_flags, unsigned int max_fds,
struct files_struct **new_fdp);
diff --git a/include/linux/file.h b/include/linux/file.h
index 51e830b4fe3a..39704eae83e2 100644
--- a/include/linux/file.h
+++ b/include/linux/file.h
@@ -14,7 +14,6 @@
struct file;
extern void fput(struct file *);
-extern void fput_many(struct file *, unsigned int);
struct file_operations;
struct task_struct;
@@ -47,7 +46,6 @@ static inline void fdput(struct fd fd)
}
extern struct file *fget(unsigned int fd);
-extern struct file *fget_many(unsigned int fd, unsigned int refs);
extern struct file *fget_raw(unsigned int fd);
extern struct file *fget_task(struct task_struct *task, unsigned int fd);
extern unsigned long __fdget(unsigned int fd);
diff --git a/include/linux/find.h b/include/linux/find.h
index 5bb6db213bcb..424ef67d4a42 100644
--- a/include/linux/find.h
+++ b/include/linux/find.h
@@ -21,8 +21,8 @@ extern unsigned long _find_last_bit(const unsigned long *addr, unsigned long siz
/**
* find_next_bit - find the next set bit in a memory region
* @addr: The address to base the search on
- * @offset: The bitnumber to start searching at
* @size: The bitmap size in bits
+ * @offset: The bitnumber to start searching at
*
* Returns the bit number for the next set bit
* If no bits are set, returns @size.
@@ -50,8 +50,8 @@ unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
* find_next_and_bit - find the next set bit in both memory regions
* @addr1: The first address to base the search on
* @addr2: The second address to base the search on
- * @offset: The bitnumber to start searching at
* @size: The bitmap size in bits
+ * @offset: The bitnumber to start searching at
*
* Returns the bit number for the next set bit
* If no bits are set, returns @size.
@@ -79,8 +79,8 @@ unsigned long find_next_and_bit(const unsigned long *addr1,
/**
* find_next_zero_bit - find the next cleared bit in a memory region
* @addr: The address to base the search on
- * @offset: The bitnumber to start searching at
* @size: The bitmap size in bits
+ * @offset: The bitnumber to start searching at
*
* Returns the bit number of the next zero bit
* If no bits are zero, returns @size.
diff --git a/include/linux/firmware.h b/include/linux/firmware.h
index ec2ccfebef65..de7fea3bca51 100644
--- a/include/linux/firmware.h
+++ b/include/linux/firmware.h
@@ -17,6 +17,64 @@ struct firmware {
void *priv;
};
+/**
+ * enum fw_upload_err - firmware upload error codes
+ * @FW_UPLOAD_ERR_NONE: returned to indicate success
+ * @FW_UPLOAD_ERR_HW_ERROR: error signalled by hardware, see kernel log
+ * @FW_UPLOAD_ERR_TIMEOUT: SW timed out on handshake with HW/firmware
+ * @FW_UPLOAD_ERR_CANCELED: upload was cancelled by the user
+ * @FW_UPLOAD_ERR_BUSY: there is an upload operation already in progress
+ * @FW_UPLOAD_ERR_INVALID_SIZE: invalid firmware image size
+ * @FW_UPLOAD_ERR_RW_ERROR: read or write to HW failed, see kernel log
+ * @FW_UPLOAD_ERR_WEAROUT: FLASH device is approaching wear-out, wait & retry
+ * @FW_UPLOAD_ERR_MAX: Maximum error code marker
+ */
+enum fw_upload_err {
+ FW_UPLOAD_ERR_NONE,
+ FW_UPLOAD_ERR_HW_ERROR,
+ FW_UPLOAD_ERR_TIMEOUT,
+ FW_UPLOAD_ERR_CANCELED,
+ FW_UPLOAD_ERR_BUSY,
+ FW_UPLOAD_ERR_INVALID_SIZE,
+ FW_UPLOAD_ERR_RW_ERROR,
+ FW_UPLOAD_ERR_WEAROUT,
+ FW_UPLOAD_ERR_MAX
+};
+
+struct fw_upload {
+ void *dd_handle; /* reference to parent driver */
+ void *priv; /* firmware loader private fields */
+};
+
+/**
+ * struct fw_upload_ops - device specific operations to support firmware upload
+ * @prepare: Required: Prepare secure update
+ * @write: Required: The write() op receives the remaining
+ * size to be written and must return the actual
+ * size written or a negative error code. The write()
+ * op will be called repeatedly until all data is
+ * written.
+ * @poll_complete: Required: Check for the completion of the
+ * HW authentication/programming process.
+ * @cancel: Required: Request cancellation of update. This op
+ * is called from the context of a different kernel
+ * thread, so race conditions need to be considered.
+ * @cleanup: Optional: Complements the prepare()
+ * function and is called at the completion
+ * of the update, on success or failure, if the
+ * prepare function succeeded.
+ */
+struct fw_upload_ops {
+ enum fw_upload_err (*prepare)(struct fw_upload *fw_upload,
+ const u8 *data, u32 size);
+ enum fw_upload_err (*write)(struct fw_upload *fw_upload,
+ const u8 *data, u32 offset,
+ u32 size, u32 *written);
+ enum fw_upload_err (*poll_complete)(struct fw_upload *fw_upload);
+ void (*cancel)(struct fw_upload *fw_upload);
+ void (*cleanup)(struct fw_upload *fw_upload);
+};
+
struct module;
struct device;
@@ -112,6 +170,30 @@ static inline int request_partial_firmware_into_buf
#endif
+#ifdef CONFIG_FW_UPLOAD
+
+struct fw_upload *
+firmware_upload_register(struct module *module, struct device *parent,
+ const char *name, const struct fw_upload_ops *ops,
+ void *dd_handle);
+void firmware_upload_unregister(struct fw_upload *fw_upload);
+
+#else
+
+static inline struct fw_upload *
+firmware_upload_register(struct module *module, struct device *parent,
+ const char *name, const struct fw_upload_ops *ops,
+ void *dd_handle)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline void firmware_upload_unregister(struct fw_upload *fw_upload)
+{
+}
+
+#endif
+
int firmware_request_cache(struct device *device, const char *name);
#endif
diff --git a/include/linux/firmware/xlnx-event-manager.h b/include/linux/firmware/xlnx-event-manager.h
index 3f87c4929d21..82e8254b0f80 100644
--- a/include/linux/firmware/xlnx-event-manager.h
+++ b/include/linux/firmware/xlnx-event-manager.h
@@ -17,7 +17,7 @@ int xlnx_register_event(const enum pm_api_cb_id cb_type, const u32 node_id,
event_cb_func_t cb_fun, void *data);
int xlnx_unregister_event(const enum pm_api_cb_id cb_type, const u32 node_id,
- const u32 event, event_cb_func_t cb_fun);
+ const u32 event, event_cb_func_t cb_fun, void *data);
#else
static inline int xlnx_register_event(const enum pm_api_cb_id cb_type, const u32 node_id,
const u32 event, const bool wake,
@@ -27,7 +27,7 @@ static inline int xlnx_register_event(const enum pm_api_cb_id cb_type, const u32
}
static inline int xlnx_unregister_event(const enum pm_api_cb_id cb_type, const u32 node_id,
- const u32 event, event_cb_func_t cb_fun)
+ const u32 event, event_cb_func_t cb_fun, void *data)
{
return -ENODEV;
}
diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h
index 14f00a7672d1..1ec73d5352c3 100644
--- a/include/linux/firmware/xlnx-zynqmp.h
+++ b/include/linux/firmware/xlnx-zynqmp.h
@@ -29,6 +29,11 @@
/* SMC SIP service Call Function Identifier Prefix */
#define PM_SIP_SVC 0xC2000000
+
+/* PM API versions */
+#define PM_API_VERSION_2 2
+
+/* ATF only commands */
#define PM_GET_TRUSTZONE_VERSION 0xa03
#define PM_SET_SUSPEND_MODE 0xa02
#define GET_CALLBACK_DATA 0xa01
@@ -460,6 +465,7 @@ int zynqmp_pm_load_pdi(const u32 src, const u64 address);
int zynqmp_pm_register_notifier(const u32 node, const u32 event,
const u32 wake, const u32 enable);
int zynqmp_pm_feature(const u32 api_id);
+int zynqmp_pm_is_function_supported(const u32 api_id, const u32 id);
int zynqmp_pm_set_feature_config(enum pm_feature_config_id id, u32 value);
int zynqmp_pm_get_feature_config(enum pm_feature_config_id id, u32 *payload);
#else
@@ -678,6 +684,11 @@ static inline int zynqmp_pm_pinctrl_get_function(const u32 pin, u32 *id)
return -ENODEV;
}
+static inline int zynqmp_pm_is_function_supported(const u32 api_id, const u32 id)
+{
+ return -ENODEV;
+}
+
static inline int zynqmp_pm_pinctrl_set_function(const u32 pin, const u32 id)
{
return -ENODEV;
diff --git a/include/linux/fpga/fpga-region.h b/include/linux/fpga/fpga-region.h
index 3b87f232425c..9d4d32909340 100644
--- a/include/linux/fpga/fpga-region.h
+++ b/include/linux/fpga/fpga-region.h
@@ -52,9 +52,9 @@ struct fpga_region {
#define to_fpga_region(d) container_of(d, struct fpga_region, dev)
-struct fpga_region *fpga_region_class_find(
- struct device *start, const void *data,
- int (*match)(struct device *, const void *));
+struct fpga_region *
+fpga_region_class_find(struct device *start, const void *data,
+ int (*match)(struct device *, const void *));
int fpga_region_program_fpga(struct fpga_region *region);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 01403e637271..9ad5e3520fae 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -974,9 +974,7 @@ static inline struct file *get_file(struct file *f)
atomic_long_inc(&f->f_count);
return f;
}
-#define get_file_rcu_many(x, cnt) \
- atomic_long_add_unless(&(x)->f_count, (cnt), 0)
-#define get_file_rcu(x) get_file_rcu_many((x), 1)
+#define get_file_rcu(x) atomic_long_inc_not_zero(&(x)->f_count)
#define file_count(x) atomic_long_read(&(x)->f_count)
#define MAX_NON_LFS ((1UL<<31) - 1)
@@ -2471,22 +2469,11 @@ struct super_block *sget(struct file_system_type *type,
extern int register_filesystem(struct file_system_type *);
extern int unregister_filesystem(struct file_system_type *);
-extern struct vfsmount *kern_mount(struct file_system_type *);
-extern void kern_unmount(struct vfsmount *mnt);
-extern int may_umount_tree(struct vfsmount *);
-extern int may_umount(struct vfsmount *);
-extern long do_mount(const char *, const char __user *,
- const char *, unsigned long, void *);
-extern struct vfsmount *collect_mounts(const struct path *);
-extern void drop_collected_mounts(struct vfsmount *);
-extern int iterate_mounts(int (*)(struct vfsmount *, void *), void *,
- struct vfsmount *);
extern int vfs_statfs(const struct path *, struct kstatfs *);
extern int user_statfs(const char __user *, struct kstatfs *);
extern int fd_statfs(int, struct kstatfs *);
extern int freeze_super(struct super_block *super);
extern int thaw_super(struct super_block *super);
-extern bool our_mnt(struct vfsmount *mnt);
extern __printf(2, 3)
int super_setup_bdi_name(struct super_block *sb, char *fmt, ...);
extern int super_setup_bdi(struct super_block *sb);
diff --git a/include/linux/fsl/mc.h b/include/linux/fsl/mc.h
index 7b6c42bfb660..a86115bc799c 100644
--- a/include/linux/fsl/mc.h
+++ b/include/linux/fsl/mc.h
@@ -32,6 +32,13 @@ struct fsl_mc_io;
* @shutdown: Function called at shutdown time to quiesce the device
* @suspend: Function called when a device is stopped
* @resume: Function called when a device is resumed
+ * @driver_managed_dma: Device driver doesn't use kernel DMA API for DMA.
+ * For most device drivers, no need to care about this flag
+ * as long as all DMAs are handled through the kernel DMA API.
+ * For some special ones, for example VFIO drivers, they know
+ * how to manage the DMA themselves and set this flag so that
+ * the IOMMU layer will allow them to setup and manage their
+ * own I/O address space.
*
* Generic DPAA device driver object for device drivers that are registered
* with a DPRC bus. This structure is to be embedded in each device-specific
@@ -45,6 +52,7 @@ struct fsl_mc_driver {
void (*shutdown)(struct fsl_mc_device *dev);
int (*suspend)(struct fsl_mc_device *dev, pm_message_t state);
int (*resume)(struct fsl_mc_device *dev);
+ bool driver_managed_dma;
};
#define to_fsl_mc_driver(_drv) \
@@ -170,7 +178,9 @@ struct fsl_mc_obj_desc {
* @regions: pointer to array of MMIO region entries
* @irqs: pointer to array of pointers to interrupts allocated to this device
* @resource: generic resource associated with this MC object device, if any.
- * @driver_override: driver name to force a match
+ * @driver_override: driver name to force a match; do not set directly,
+ * because core frees it; use driver_set_override() to
+ * set or clear it.
*
* Generic device object for MC object devices that are "attached" to a
* MC bus.
@@ -204,7 +214,7 @@ struct fsl_mc_device {
struct fsl_mc_device_irq **irqs;
struct fsl_mc_resource *resource;
struct device_link *consumer_link;
- char *driver_override;
+ const char *driver_override;
};
#define to_fsl_mc_device(_dev) \
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 3eb03d88320d..979f6bfa2c25 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -452,8 +452,8 @@ static inline void stack_tracer_enable(void) { }
#ifdef CONFIG_DYNAMIC_FTRACE
-int ftrace_arch_code_modify_prepare(void);
-int ftrace_arch_code_modify_post_process(void);
+void ftrace_arch_code_modify_prepare(void);
+void ftrace_arch_code_modify_post_process(void);
enum ftrace_bug_type {
FTRACE_BUG_UNKNOWN,
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index e71f6e1bfafe..fe0f460d9a3b 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -109,6 +109,8 @@ int gpiod_get_direction(struct gpio_desc *desc);
int gpiod_direction_input(struct gpio_desc *desc);
int gpiod_direction_output(struct gpio_desc *desc, int value);
int gpiod_direction_output_raw(struct gpio_desc *desc, int value);
+int gpiod_enable_hw_timestamp_ns(struct gpio_desc *desc, unsigned long flags);
+int gpiod_disable_hw_timestamp_ns(struct gpio_desc *desc, unsigned long flags);
/* Value get/set from non-sleeping context */
int gpiod_get_value(const struct gpio_desc *desc);
@@ -350,8 +352,18 @@ static inline int gpiod_direction_output_raw(struct gpio_desc *desc, int value)
WARN_ON(desc);
return -ENOSYS;
}
-
-
+static inline int gpiod_enable_hw_timestamp_ns(struct gpio_desc *desc,
+ unsigned long flags)
+{
+ WARN_ON(desc);
+ return -ENOSYS;
+}
+static inline int gpiod_disable_hw_timestamp_ns(struct gpio_desc *desc,
+ unsigned long flags)
+{
+ WARN_ON(desc);
+ return -ENOSYS;
+}
static inline int gpiod_get_value(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index 09792529cff2..b1e0f1f8ee2e 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -333,6 +333,10 @@ struct gpio_irq_chip {
* @add_pin_ranges: optional routine to initialize pin ranges, to be used when
* requires special mapping of the pins that provides GPIO functionality.
* It is called after adding GPIO chip and before adding IRQ chip.
+ * @en_hw_timestamp: Dependent on GPIO chip, an optional routine to
+ * enable hardware timestamp.
+ * @dis_hw_timestamp: Dependent on GPIO chip, an optional routine to
+ * disable hardware timestamp.
* @base: identifies the first GPIO number handled by this chip;
* or, if negative during registration, requests dynamic ID allocation.
* DEPRECATION: providing anything non-negative and nailing the base
@@ -429,6 +433,12 @@ struct gpio_chip {
int (*add_pin_ranges)(struct gpio_chip *gc);
+ int (*en_hw_timestamp)(struct gpio_chip *gc,
+ u32 offset,
+ unsigned long flags);
+ int (*dis_hw_timestamp)(struct gpio_chip *gc,
+ u32 offset,
+ unsigned long flags);
int base;
u16 ngpio;
u16 offset;
@@ -502,6 +512,18 @@ struct gpio_chip {
*/
int (*of_xlate)(struct gpio_chip *gc,
const struct of_phandle_args *gpiospec, u32 *flags);
+
+ /**
+ * @of_gpio_ranges_fallback:
+ *
+ * Optional hook for the case that no gpio-ranges property is defined
+ * within the device tree node "np" (usually DT before introduction
+ * of gpio-ranges). So this callback is helpful to provide the
+ * necessary backward compatibility for the pin ranges.
+ */
+ int (*of_gpio_ranges_fallback)(struct gpio_chip *gc,
+ struct device_node *np);
+
#endif /* CONFIG_OF_GPIO */
};
diff --git a/include/linux/gpio/machine.h b/include/linux/gpio/machine.h
index 2647dd10b541..4d55da28e664 100644
--- a/include/linux/gpio/machine.h
+++ b/include/linux/gpio/machine.h
@@ -64,6 +64,18 @@ struct gpiod_hog {
};
/*
+ * Helper for lookup tables with just one single lookup for a device.
+ */
+#define GPIO_LOOKUP_SINGLE(_name, _dev_id, _key, _chip_hwnum, _con_id, _flags) \
+static struct gpiod_lookup_table _name = { \
+ .dev_id = _dev_id, \
+ .table = { \
+ GPIO_LOOKUP(_key, _chip_hwnum, _con_id, _flags), \
+ {}, \
+ }, \
+}
+
+/*
* Simple definition of a single GPIO under a con_id
*/
#define GPIO_LOOKUP(_key, _chip_hwnum, _con_id, _flags) \
diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h
index 177f7b7cd414..6cabafffd0dd 100644
--- a/include/linux/hisi_acc_qm.h
+++ b/include/linux/hisi_acc_qm.h
@@ -168,6 +168,12 @@ enum qm_vf_state {
QM_NOT_READY,
};
+struct dfx_diff_registers {
+ u32 *regs;
+ u32 reg_offset;
+ u32 reg_len;
+};
+
struct qm_dfx {
atomic64_t err_irq_cnt;
atomic64_t aeq_irq_cnt;
@@ -190,6 +196,11 @@ struct qm_debug {
struct dentry *debug_root;
struct dentry *qm_d;
struct debugfs_file files[DEBUG_FILE_NUM];
+ unsigned int *qm_last_words;
+ /* ACC engines recoreding last regs */
+ unsigned int *last_words;
+ struct dfx_diff_registers *qm_diff_regs;
+ struct dfx_diff_registers *acc_diff_regs;
};
struct qm_shaper_factor {
@@ -243,6 +254,7 @@ struct hisi_qm_err_ini {
void (*open_sva_prefetch)(struct hisi_qm *qm);
void (*close_sva_prefetch)(struct hisi_qm *qm);
void (*log_dev_hw_err)(struct hisi_qm *qm, u32 err_sts);
+ void (*show_last_dfx_regs)(struct hisi_qm *qm);
void (*err_info_init)(struct hisi_qm *qm);
};
@@ -433,21 +445,22 @@ int hisi_qm_init(struct hisi_qm *qm);
void hisi_qm_uninit(struct hisi_qm *qm);
int hisi_qm_start(struct hisi_qm *qm);
int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r);
-struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type);
int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg);
int hisi_qm_stop_qp(struct hisi_qp *qp);
-void hisi_qm_release_qp(struct hisi_qp *qp);
int hisi_qp_send(struct hisi_qp *qp, const void *msg);
-int hisi_qm_get_free_qp_num(struct hisi_qm *qm);
-int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number);
void hisi_qm_debug_init(struct hisi_qm *qm);
-enum qm_hw_ver hisi_qm_get_hw_version(struct pci_dev *pdev);
void hisi_qm_debug_regs_clear(struct hisi_qm *qm);
int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs);
int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen);
int hisi_qm_sriov_configure(struct pci_dev *pdev, int num_vfs);
void hisi_qm_dev_err_init(struct hisi_qm *qm);
void hisi_qm_dev_err_uninit(struct hisi_qm *qm);
+int hisi_qm_diff_regs_init(struct hisi_qm *qm,
+ struct dfx_diff_registers *dregs, int reg_len);
+void hisi_qm_diff_regs_uninit(struct hisi_qm *qm, int reg_len);
+void hisi_qm_acc_diff_regs_dump(struct hisi_qm *qm, struct seq_file *s,
+ struct dfx_diff_registers *dregs, int regs_len);
+
pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev,
pci_channel_state_t state);
pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev);
diff --git a/include/linux/host1x_context_bus.h b/include/linux/host1x_context_bus.h
new file mode 100644
index 000000000000..72462737a6db
--- /dev/null
+++ b/include/linux/host1x_context_bus.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2021, NVIDIA Corporation. All rights reserved.
+ */
+
+#ifndef __LINUX_HOST1X_CONTEXT_BUS_H
+#define __LINUX_HOST1X_CONTEXT_BUS_H
+
+#include <linux/device.h>
+
+#ifdef CONFIG_TEGRA_HOST1X_CONTEXT_BUS
+extern struct bus_type host1x_context_device_bus_type;
+#endif
+
+#endif
diff --git a/include/linux/hte.h b/include/linux/hte.h
new file mode 100644
index 000000000000..8289055061ab
--- /dev/null
+++ b/include/linux/hte.h
@@ -0,0 +1,271 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __LINUX_HTE_H
+#define __LINUX_HTE_H
+
+#include <linux/errno.h>
+
+struct hte_chip;
+struct hte_device;
+struct of_phandle_args;
+
+/**
+ * enum hte_edge - HTE line edge flags.
+ *
+ * @HTE_EDGE_NO_SETUP: No edge setup. In this case consumer will setup edges,
+ * for example during request irq call.
+ * @HTE_RISING_EDGE_TS: Rising edge.
+ * @HTE_FALLING_EDGE_TS: Falling edge.
+ *
+ */
+enum hte_edge {
+ HTE_EDGE_NO_SETUP = 1U << 0,
+ HTE_RISING_EDGE_TS = 1U << 1,
+ HTE_FALLING_EDGE_TS = 1U << 2,
+};
+
+/**
+ * enum hte_return - HTE subsystem return values used during callback.
+ *
+ * @HTE_CB_HANDLED: The consumer handled the data.
+ * @HTE_RUN_SECOND_CB: The consumer needs further processing, in that case
+ * HTE subsystem calls secondary callback provided by the consumer where it
+ * is allowed to sleep.
+ */
+enum hte_return {
+ HTE_CB_HANDLED,
+ HTE_RUN_SECOND_CB,
+};
+
+/**
+ * struct hte_ts_data - HTE timestamp data.
+ *
+ * @tsc: Timestamp value.
+ * @seq: Sequence counter of the timestamps.
+ * @raw_level: Level of the line at the timestamp if provider supports it,
+ * -1 otherwise.
+ */
+struct hte_ts_data {
+ u64 tsc;
+ u64 seq;
+ int raw_level;
+};
+
+/**
+ * struct hte_clk_info - Clock source info that HTE provider uses to timestamp.
+ *
+ * @hz: Supported clock rate in HZ, for example 1KHz clock = 1000.
+ * @type: Supported clock type.
+ */
+struct hte_clk_info {
+ u64 hz;
+ clockid_t type;
+};
+
+/**
+ * typedef hte_ts_cb_t - HTE timestamp data processing primary callback.
+ *
+ * The callback is used to push timestamp data to the client and it is
+ * not allowed to sleep.
+ *
+ * @ts: HW timestamp data.
+ * @data: Client supplied data.
+ */
+typedef enum hte_return (*hte_ts_cb_t)(struct hte_ts_data *ts, void *data);
+
+/**
+ * typedef hte_ts_sec_cb_t - HTE timestamp data processing secondary callback.
+ *
+ * This is used when the client needs further processing where it is
+ * allowed to sleep.
+ *
+ * @data: Client supplied data.
+ *
+ */
+typedef enum hte_return (*hte_ts_sec_cb_t)(void *data);
+
+/**
+ * struct hte_line_attr - Line attributes.
+ *
+ * @line_id: The logical ID understood by the consumers and providers.
+ * @line_data: Line data related to line_id.
+ * @edge_flags: Edge setup flags.
+ * @name: Descriptive name of the entity that is being monitored for the
+ * hardware timestamping. If null, HTE core will construct the name.
+ *
+ */
+struct hte_line_attr {
+ u32 line_id;
+ void *line_data;
+ unsigned long edge_flags;
+ const char *name;
+};
+
+/**
+ * struct hte_ts_desc - HTE timestamp descriptor.
+ *
+ * This structure is a communication token between consumers to subsystem
+ * and subsystem to providers.
+ *
+ * @attr: The line attributes.
+ * @hte_data: Subsystem's private data, set by HTE subsystem.
+ */
+struct hte_ts_desc {
+ struct hte_line_attr attr;
+ void *hte_data;
+};
+
+/**
+ * struct hte_ops - HTE operations set by providers.
+ *
+ * @request: Hook for requesting a HTE timestamp. Returns 0 on success,
+ * non-zero for failures.
+ * @release: Hook for releasing a HTE timestamp. Returns 0 on success,
+ * non-zero for failures.
+ * @enable: Hook to enable the specified timestamp. Returns 0 on success,
+ * non-zero for failures.
+ * @disable: Hook to disable specified timestamp. Returns 0 on success,
+ * non-zero for failures.
+ * @get_clk_src_info: Hook to get the clock information the provider uses
+ * to timestamp. Returns 0 for success and negative error code for failure. On
+ * success HTE subsystem fills up provided struct hte_clk_info.
+ *
+ * xlated_id parameter is used to communicate between HTE subsystem and the
+ * providers and is translated by the provider.
+ */
+struct hte_ops {
+ int (*request)(struct hte_chip *chip, struct hte_ts_desc *desc,
+ u32 xlated_id);
+ int (*release)(struct hte_chip *chip, struct hte_ts_desc *desc,
+ u32 xlated_id);
+ int (*enable)(struct hte_chip *chip, u32 xlated_id);
+ int (*disable)(struct hte_chip *chip, u32 xlated_id);
+ int (*get_clk_src_info)(struct hte_chip *chip,
+ struct hte_clk_info *ci);
+};
+
+/**
+ * struct hte_chip - Abstract HTE chip.
+ *
+ * @name: functional name of the HTE IP block.
+ * @dev: device providing the HTE.
+ * @ops: callbacks for this HTE.
+ * @nlines: number of lines/signals supported by this chip.
+ * @xlate_of: Callback which translates consumer supplied logical ids to
+ * physical ids, return 0 for the success and negative for the failures.
+ * It stores (between 0 to @nlines) in xlated_id parameter for the success.
+ * @xlate_plat: Same as above but for the consumers with no DT node.
+ * @match_from_linedata: Match HTE device using the line_data.
+ * @of_hte_n_cells: Number of cells used to form the HTE specifier.
+ * @gdev: HTE subsystem abstract device, internal to the HTE subsystem.
+ * @data: chip specific private data.
+ */
+struct hte_chip {
+ const char *name;
+ struct device *dev;
+ const struct hte_ops *ops;
+ u32 nlines;
+ int (*xlate_of)(struct hte_chip *gc,
+ const struct of_phandle_args *args,
+ struct hte_ts_desc *desc, u32 *xlated_id);
+ int (*xlate_plat)(struct hte_chip *gc, struct hte_ts_desc *desc,
+ u32 *xlated_id);
+ bool (*match_from_linedata)(const struct hte_chip *chip,
+ const struct hte_ts_desc *hdesc);
+ u8 of_hte_n_cells;
+
+ struct hte_device *gdev;
+ void *data;
+};
+
+#if IS_ENABLED(CONFIG_HTE)
+/* HTE APIs for the providers */
+int devm_hte_register_chip(struct hte_chip *chip);
+int hte_push_ts_ns(const struct hte_chip *chip, u32 xlated_id,
+ struct hte_ts_data *data);
+
+/* HTE APIs for the consumers */
+int hte_init_line_attr(struct hte_ts_desc *desc, u32 line_id,
+ unsigned long edge_flags, const char *name,
+ void *data);
+int hte_ts_get(struct device *dev, struct hte_ts_desc *desc, int index);
+int hte_ts_put(struct hte_ts_desc *desc);
+int hte_request_ts_ns(struct hte_ts_desc *desc, hte_ts_cb_t cb,
+ hte_ts_sec_cb_t tcb, void *data);
+int devm_hte_request_ts_ns(struct device *dev, struct hte_ts_desc *desc,
+ hte_ts_cb_t cb, hte_ts_sec_cb_t tcb, void *data);
+int of_hte_req_count(struct device *dev);
+int hte_enable_ts(struct hte_ts_desc *desc);
+int hte_disable_ts(struct hte_ts_desc *desc);
+int hte_get_clk_src_info(const struct hte_ts_desc *desc,
+ struct hte_clk_info *ci);
+
+#else /* !CONFIG_HTE */
+static inline int devm_hte_register_chip(struct hte_chip *chip)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int hte_push_ts_ns(const struct hte_chip *chip,
+ u32 xlated_id,
+ const struct hte_ts_data *data)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int hte_init_line_attr(struct hte_ts_desc *desc, u32 line_id,
+ unsigned long edge_flags,
+ const char *name, void *data)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int hte_ts_get(struct device *dev, struct hte_ts_desc *desc,
+ int index)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int hte_ts_put(struct hte_ts_desc *desc)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int hte_request_ts_ns(struct hte_ts_desc *desc, hte_ts_cb_t cb,
+ hte_ts_sec_cb_t tcb, void *data)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int devm_hte_request_ts_ns(struct device *dev,
+ struct hte_ts_desc *desc,
+ hte_ts_cb_t cb,
+ hte_ts_sec_cb_t tcb,
+ void *data)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int of_hte_req_count(struct device *dev)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int hte_enable_ts(struct hte_ts_desc *desc)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int hte_disable_ts(struct hte_ts_desc *desc)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int hte_get_clk_src_info(const struct hte_ts_desc *desc,
+ struct hte_clk_info *ci)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* !CONFIG_HTE */
+
+#endif
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 0f2894d01333..e4cff27d1198 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -528,6 +528,11 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
unsigned long flags);
#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
+unsigned long
+generic_hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags);
+
/*
* huegtlb page specific state flags. These flags are located in page.private
* of the hugetlb head page. Functions created via the below macros should be
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index fe2e0179ed51..3b42264333ef 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -230,15 +230,19 @@ static inline u32 hv_get_avail_to_write_percent(
* two 16 bit quantities: major_number. minor_number.
*
* 0 . 13 (Windows Server 2008)
- * 1 . 1 (Windows 7)
- * 2 . 4 (Windows 8)
- * 3 . 0 (Windows 8 R2)
+ * 1 . 1 (Windows 7, WS2008 R2)
+ * 2 . 4 (Windows 8, WS2012)
+ * 3 . 0 (Windows 8.1, WS2012 R2)
* 4 . 0 (Windows 10)
* 4 . 1 (Windows 10 RS3)
* 5 . 0 (Newer Windows 10)
* 5 . 1 (Windows 10 RS4)
* 5 . 2 (Windows Server 2019, RS5)
* 5 . 3 (Windows Server 2022)
+ *
+ * The WS2008 and WIN7 versions are listed here for
+ * completeness but are no longer supported in the
+ * Linux kernel.
*/
#define VERSION_WS2008 ((0 << 16) | (13))
@@ -788,6 +792,7 @@ struct vmbus_requestor {
#define VMBUS_NO_RQSTOR U64_MAX
#define VMBUS_RQST_ERROR (U64_MAX - 1)
+#define VMBUS_RQST_ADDR_ANY U64_MAX
/* NetVSC-specific */
#define VMBUS_RQST_ID_NO_RESPONSE (U64_MAX - 2)
/* StorVSC-specific */
@@ -1041,13 +1046,36 @@ struct vmbus_channel {
u32 max_pkt_size;
};
+#define lock_requestor(channel, flags) \
+do { \
+ struct vmbus_requestor *rqstor = &(channel)->requestor; \
+ \
+ spin_lock_irqsave(&rqstor->req_lock, flags); \
+} while (0)
+
+static __always_inline void unlock_requestor(struct vmbus_channel *channel,
+ unsigned long flags)
+{
+ struct vmbus_requestor *rqstor = &channel->requestor;
+
+ spin_unlock_irqrestore(&rqstor->req_lock, flags);
+}
+
u64 vmbus_next_request_id(struct vmbus_channel *channel, u64 rqst_addr);
+u64 __vmbus_request_addr_match(struct vmbus_channel *channel, u64 trans_id,
+ u64 rqst_addr);
+u64 vmbus_request_addr_match(struct vmbus_channel *channel, u64 trans_id,
+ u64 rqst_addr);
u64 vmbus_request_addr(struct vmbus_channel *channel, u64 trans_id);
+static inline bool is_hvsock_offer(const struct vmbus_channel_offer_channel *o)
+{
+ return !!(o->offer.chn_flags & VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER);
+}
+
static inline bool is_hvsock_channel(const struct vmbus_channel *c)
{
- return !!(c->offermsg.offer.chn_flags &
- VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER);
+ return is_hvsock_offer(&c->offermsg);
}
static inline bool is_sub_channel(const struct vmbus_channel *c)
@@ -1161,6 +1189,13 @@ extern int vmbus_open(struct vmbus_channel *channel,
extern void vmbus_close(struct vmbus_channel *channel);
+extern int vmbus_sendpacket_getid(struct vmbus_channel *channel,
+ void *buffer,
+ u32 bufferLen,
+ u64 requestid,
+ u64 *trans_id,
+ enum vmbus_packet_type type,
+ u32 flags);
extern int vmbus_sendpacket(struct vmbus_channel *channel,
void *buffer,
u32 bufferLen,
@@ -1257,7 +1292,11 @@ struct hv_device {
u16 device_id;
struct device device;
- char *driver_override; /* Driver name to force a match */
+ /*
+ * Driver name to force a match. Do not set directly, because core
+ * frees it. Use driver_set_override() to set or clear it.
+ */
+ const char *driver_override;
struct vmbus_channel *channel;
struct kset *channels_kset;
@@ -1451,12 +1490,14 @@ void vmbus_free_mmio(resource_size_t start, resource_size_t size);
0x80, 0x2e, 0x27, 0xed, 0xe1, 0x9f)
/*
- * Linux doesn't support the 3 devices: the first two are for
- * Automatic Virtual Machine Activation, and the third is for
- * Remote Desktop Virtualization.
+ * Linux doesn't support these 4 devices: the first two are for
+ * Automatic Virtual Machine Activation, the third is for
+ * Remote Desktop Virtualization, and the fourth is Initial
+ * Machine Configuration (IMC) used only by Windows guests.
* {f8e65716-3cb3-4a06-9a60-1889c5cccab5}
* {3375baf4-9e15-4b30-b765-67acb10d607b}
* {276aacf4-ac15-426c-98dd-7521ad3f01fe}
+ * {c376c1c3-d276-48d2-90a9-c04748072c60}
*/
#define HV_AVMA1_GUID \
@@ -1471,6 +1512,10 @@ void vmbus_free_mmio(resource_size_t start, resource_size_t size);
.guid = GUID_INIT(0x276aacf4, 0xac15, 0x426c, 0x98, 0xdd, \
0x75, 0x21, 0xad, 0x3f, 0x01, 0xfe)
+#define HV_IMC_GUID \
+ .guid = GUID_INIT(0xc376c1c3, 0xd276, 0x48d2, 0x90, 0xa9, \
+ 0xc0, 0x47, 0x48, 0x07, 0x2c, 0x60)
+
/*
* Common header for Hyper-V ICs
*/
@@ -1663,56 +1708,34 @@ static inline u32 hv_pkt_datalen(const struct vmpacket_descriptor *desc)
return (desc->len8 << 3) - (desc->offset8 << 3);
}
-
-struct vmpacket_descriptor *
-hv_pkt_iter_first_raw(struct vmbus_channel *channel);
+/* Get packet length associated with descriptor */
+static inline u32 hv_pkt_len(const struct vmpacket_descriptor *desc)
+{
+ return desc->len8 << 3;
+}
struct vmpacket_descriptor *
hv_pkt_iter_first(struct vmbus_channel *channel);
struct vmpacket_descriptor *
__hv_pkt_iter_next(struct vmbus_channel *channel,
- const struct vmpacket_descriptor *pkt,
- bool copy);
+ const struct vmpacket_descriptor *pkt);
void hv_pkt_iter_close(struct vmbus_channel *channel);
static inline struct vmpacket_descriptor *
-hv_pkt_iter_next_pkt(struct vmbus_channel *channel,
- const struct vmpacket_descriptor *pkt,
- bool copy)
+hv_pkt_iter_next(struct vmbus_channel *channel,
+ const struct vmpacket_descriptor *pkt)
{
struct vmpacket_descriptor *nxt;
- nxt = __hv_pkt_iter_next(channel, pkt, copy);
+ nxt = __hv_pkt_iter_next(channel, pkt);
if (!nxt)
hv_pkt_iter_close(channel);
return nxt;
}
-/*
- * Get next packet descriptor without copying it out of the ring buffer
- * If at end of list, return NULL and update host.
- */
-static inline struct vmpacket_descriptor *
-hv_pkt_iter_next_raw(struct vmbus_channel *channel,
- const struct vmpacket_descriptor *pkt)
-{
- return hv_pkt_iter_next_pkt(channel, pkt, false);
-}
-
-/*
- * Get next packet descriptor from iterator
- * If at end of list, return NULL and update host.
- */
-static inline struct vmpacket_descriptor *
-hv_pkt_iter_next(struct vmbus_channel *channel,
- const struct vmpacket_descriptor *pkt)
-{
- return hv_pkt_iter_next_pkt(channel, pkt, true);
-}
-
#define foreach_vmbus_pkt(pkt, channel) \
for (pkt = hv_pkt_iter_first(channel); pkt; \
pkt = hv_pkt_iter_next(channel, pkt))
diff --git a/include/linux/iio/adc/ad_sigma_delta.h b/include/linux/iio/adc/ad_sigma_delta.h
index c525fd51652f..7852f6c9a714 100644
--- a/include/linux/iio/adc/ad_sigma_delta.h
+++ b/include/linux/iio/adc/ad_sigma_delta.h
@@ -32,26 +32,34 @@ struct iio_dev;
/**
* struct ad_sigma_delta_info - Sigma Delta driver specific callbacks and options
* @set_channel: Will be called to select the current channel, may be NULL.
+ * @append_status: Will be called to enable status append at the end of the sample, may be NULL.
* @set_mode: Will be called to select the current mode, may be NULL.
+ * @disable_all: Will be called to disable all channels, may be NULL.
* @postprocess_sample: Is called for each sampled data word, can be used to
* modify or drop the sample data, it, may be NULL.
* @has_registers: true if the device has writable and readable registers, false
* if there is just one read-only sample data shift register.
* @addr_shift: Shift of the register address in the communications register.
* @read_mask: Mask for the communications register having the read bit set.
+ * @status_ch_mask: Mask for the channel number stored in status register.
* @data_reg: Address of the data register, if 0 the default address of 0x3 will
* be used.
* @irq_flags: flags for the interrupt used by the triggered buffer
+ * @num_slots: Number of sequencer slots
*/
struct ad_sigma_delta_info {
int (*set_channel)(struct ad_sigma_delta *, unsigned int channel);
+ int (*append_status)(struct ad_sigma_delta *, bool append);
int (*set_mode)(struct ad_sigma_delta *, enum ad_sigma_delta_mode mode);
+ int (*disable_all)(struct ad_sigma_delta *);
int (*postprocess_sample)(struct ad_sigma_delta *, unsigned int raw_sample);
bool has_registers;
unsigned int addr_shift;
unsigned int read_mask;
+ unsigned int status_ch_mask;
unsigned int data_reg;
unsigned long irq_flags;
+ unsigned int num_slots;
};
/**
@@ -76,6 +84,13 @@ struct ad_sigma_delta {
uint8_t comm;
const struct ad_sigma_delta_info *info;
+ unsigned int active_slots;
+ unsigned int current_slot;
+ unsigned int num_slots;
+ bool status_appended;
+ /* map slots to channels in order to know what to expect from devices */
+ unsigned int *slots;
+ uint8_t *samples_buf;
/*
* DMA (thus cache coherency maintenance) requires the
@@ -97,6 +112,29 @@ static inline int ad_sigma_delta_set_channel(struct ad_sigma_delta *sd,
return 0;
}
+static inline int ad_sigma_delta_append_status(struct ad_sigma_delta *sd, bool append)
+{
+ int ret;
+
+ if (sd->info->append_status) {
+ ret = sd->info->append_status(sd, append);
+ if (ret < 0)
+ return ret;
+
+ sd->status_appended = append;
+ }
+
+ return 0;
+}
+
+static inline int ad_sigma_delta_disable_all(struct ad_sigma_delta *sd)
+{
+ if (sd->info->disable_all)
+ return sd->info->disable_all(sd);
+
+ return 0;
+}
+
static inline int ad_sigma_delta_set_mode(struct ad_sigma_delta *sd,
unsigned int mode)
{
diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h
index 22f67845cdd3..db4a1b260348 100644
--- a/include/linux/iio/common/st_sensors.h
+++ b/include/linux/iio/common/st_sensors.h
@@ -237,6 +237,7 @@ struct st_sensor_settings {
* @hw_irq_trigger: if we're using the hardware interrupt on the sensor.
* @hw_timestamp: Latest timestamp from the interrupt handler, when in use.
* @buffer_data: Data used by buffer part.
+ * @odr_lock: Local lock for preventing concurrent ODR accesses/changes
*/
struct st_sensor_data {
struct iio_trigger *trig;
@@ -261,6 +262,8 @@ struct st_sensor_data {
s64 hw_timestamp;
char buffer_data[ST_SENSORS_MAX_BUFFER_SIZE] ____cacheline_aligned;
+
+ struct mutex odr_lock;
};
#ifdef CONFIG_IIO_BUFFER
diff --git a/include/linux/iio/iio-opaque.h b/include/linux/iio/iio-opaque.h
index 2be12b7b5dc5..6b3586b3f952 100644
--- a/include/linux/iio/iio-opaque.h
+++ b/include/linux/iio/iio-opaque.h
@@ -7,6 +7,9 @@
* struct iio_dev_opaque - industrial I/O device opaque information
* @indio_dev: public industrial I/O device information
* @id: used to identify device internally
+ * @currentmode: operating mode currently in use, may be eventually
+ * checked by device drivers but should be considered
+ * read-only as this is a core internal bit
* @driver_module: used to make it harder to undercut users
* @info_exist_lock: lock to prevent use during removal
* @trig_readonly: mark the current trigger immutable
@@ -36,6 +39,7 @@
*/
struct iio_dev_opaque {
struct iio_dev indio_dev;
+ int currentmode;
int id;
struct module *driver_module;
struct mutex info_exist_lock;
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index faf00f2c0be6..233d2e6b7721 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -315,7 +315,54 @@ static inline bool iio_channel_has_available(const struct iio_chan_spec *chan,
s64 iio_get_time_ns(const struct iio_dev *indio_dev);
unsigned int iio_get_time_res(const struct iio_dev *indio_dev);
-/* Device operating modes */
+/*
+ * Device operating modes
+ * @INDIO_DIRECT_MODE: There is an access to either:
+ * a) The last single value available for devices that do not provide
+ * on-demand reads.
+ * b) A new value after performing an on-demand read otherwise.
+ * On most devices, this is a single-shot read. On some devices with data
+ * streams without an 'on-demand' function, this might also be the 'last value'
+ * feature. Above all, this mode internally means that we are not in any of the
+ * other modes, and sysfs reads should work.
+ * Device drivers should inform the core if they support this mode.
+ * @INDIO_BUFFER_TRIGGERED: Common mode when dealing with kfifo buffers.
+ * It indicates that an explicit trigger is required. This requests the core to
+ * attach a poll function when enabling the buffer, which is indicated by the
+ * _TRIGGERED suffix.
+ * The core will ensure this mode is set when registering a triggered buffer
+ * with iio_triggered_buffer_setup().
+ * @INDIO_BUFFER_SOFTWARE: Another kfifo buffer mode, but not event triggered.
+ * No poll function can be attached because there is no triggered infrastructure
+ * we can use to cause capture. There is a kfifo that the driver will fill, but
+ * not "only one scan at a time". Typically, hardware will have a buffer that
+ * can hold multiple scans. Software may read one or more scans at a single time
+ * and push the available data to a Kfifo. This means the core will not attach
+ * any poll function when enabling the buffer.
+ * The core will ensure this mode is set when registering a simple kfifo buffer
+ * with devm_iio_kfifo_buffer_setup().
+ * @INDIO_BUFFER_HARDWARE: For specific hardware, if unsure do not use this mode.
+ * Same as above but this time the buffer is not a kfifo where we have direct
+ * access to the data. Instead, the consumer driver must access the data through
+ * non software visible channels (or DMA when there is no demux possible in
+ * software)
+ * The core will ensure this mode is set when registering a dmaengine buffer
+ * with devm_iio_dmaengine_buffer_setup().
+ * @INDIO_EVENT_TRIGGERED: Very unusual mode.
+ * Triggers usually refer to an external event which will start data capture.
+ * Here it is kind of the opposite as, a particular state of the data might
+ * produce an event which can be considered as an event. We don't necessarily
+ * have access to the data itself, but to the event produced. For example, this
+ * can be a threshold detector. The internal path of this mode is very close to
+ * the INDIO_BUFFER_TRIGGERED mode.
+ * The core will ensure this mode is set when registering a triggered event.
+ * @INDIO_HARDWARE_TRIGGERED: Very unusual mode.
+ * Here, triggers can result in data capture and can be routed to multiple
+ * hardware components, which make them close to regular triggers in the way
+ * they must be managed by the core, but without the entire interrupts/poll
+ * functions burden. Interrupts are irrelevant as the data flow is hardware
+ * mediated and distributed.
+ */
#define INDIO_DIRECT_MODE 0x01
#define INDIO_BUFFER_TRIGGERED 0x02
#define INDIO_BUFFER_SOFTWARE 0x04
@@ -488,8 +535,12 @@ struct iio_buffer_setup_ops {
/**
* struct iio_dev - industrial I/O device
- * @modes: [DRIVER] operating modes supported by device
- * @currentmode: [INTERN] current operating mode
+ * @modes: [DRIVER] bitmask listing all the operating modes
+ * supported by the IIO device. This list should be
+ * initialized before registering the IIO device. It can
+ * also be filed up by the IIO core, as a result of
+ * enabling particular features in the driver
+ * (see iio_triggered_event_setup()).
* @dev: [DRIVER] device structure, should be assigned a parent
* and owner
* @buffer: [DRIVER] any buffer present
@@ -516,7 +567,6 @@ struct iio_buffer_setup_ops {
*/
struct iio_dev {
int modes;
- int currentmode;
struct device dev;
struct iio_buffer *buffer;
@@ -543,6 +593,8 @@ struct iio_dev {
};
int iio_device_id(struct iio_dev *indio_dev);
+int iio_device_get_current_mode(struct iio_dev *indio_dev);
+bool iio_buffer_enabled(struct iio_dev *indio_dev);
const struct iio_chan_spec
*iio_find_channel_from_si(struct iio_dev *indio_dev, int si);
@@ -672,16 +724,6 @@ struct iio_dev *devm_iio_device_alloc(struct device *parent, int sizeof_priv);
__printf(2, 3)
struct iio_trigger *devm_iio_trigger_alloc(struct device *parent,
const char *fmt, ...);
-/**
- * iio_buffer_enabled() - helper function to test if the buffer is enabled
- * @indio_dev: IIO device structure for device
- **/
-static inline bool iio_buffer_enabled(struct iio_dev *indio_dev)
-{
- return indio_dev->currentmode
- & (INDIO_BUFFER_TRIGGERED | INDIO_BUFFER_HARDWARE |
- INDIO_BUFFER_SOFTWARE);
-}
/**
* iio_get_debugfs_dentry() - helper function to get the debugfs_dentry
diff --git a/include/linux/iio/kfifo_buf.h b/include/linux/iio/kfifo_buf.h
index ccd2ceae7b25..8a83fb58232d 100644
--- a/include/linux/iio/kfifo_buf.h
+++ b/include/linux/iio/kfifo_buf.h
@@ -12,11 +12,10 @@ void iio_kfifo_free(struct iio_buffer *r);
int devm_iio_kfifo_buffer_setup_ext(struct device *dev,
struct iio_dev *indio_dev,
- int mode_flags,
const struct iio_buffer_setup_ops *setup_ops,
const struct attribute **buffer_attrs);
-#define devm_iio_kfifo_buffer_setup(dev, indio_dev, mode_flags, setup_ops) \
- devm_iio_kfifo_buffer_setup_ext((dev), (indio_dev), (mode_flags), (setup_ops), NULL)
+#define devm_iio_kfifo_buffer_setup(dev, indio_dev, setup_ops) \
+ devm_iio_kfifo_buffer_setup_ext((dev), (indio_dev), (setup_ops), NULL)
#endif
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index 2f9891cb3d00..4f29139bbfc3 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -539,7 +539,8 @@ struct dmar_domain {
u8 has_iotlb_device: 1;
u8 iommu_coherency: 1; /* indicate coherency of iommu access */
- u8 iommu_snooping: 1; /* indicate snooping control feature */
+ u8 force_snooping : 1; /* Create IOPTEs with snoop control */
+ u8 set_pte_snp:1;
struct list_head devices; /* all devices' list */
struct iova_domain iovad; /* iova's that belong to this domain */
diff --git a/include/linux/intel-svm.h b/include/linux/intel-svm.h
index b3b125b332aa..207ef06ba3e1 100644
--- a/include/linux/intel-svm.h
+++ b/include/linux/intel-svm.h
@@ -9,7 +9,7 @@
#define __INTEL_SVM_H__
/* Page Request Queue depth */
-#define PRQ_ORDER 2
+#define PRQ_ORDER 4
#define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x20)
#define PRQ_DEPTH ((0x1000 << PRQ_ORDER) >> 5)
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 9208eca4b0d1..5e1afe169549 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -103,10 +103,11 @@ static inline bool iommu_is_dma_domain(struct iommu_domain *domain)
}
enum iommu_cap {
- IOMMU_CAP_CACHE_COHERENCY, /* IOMMU can enforce cache coherent DMA
- transactions */
+ IOMMU_CAP_CACHE_COHERENCY, /* IOMMU_CACHE is supported */
IOMMU_CAP_INTR_REMAP, /* IOMMU supports interrupt isolation */
IOMMU_CAP_NOEXEC, /* IOMMU_NOEXEC flag */
+ IOMMU_CAP_PRE_BOOT_PROTECTION, /* Firmware says it used the IOMMU for
+ DMA protection and we should too */
};
/* These are the possible reserved region types */
@@ -272,6 +273,9 @@ struct iommu_ops {
* @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush
* queue
* @iova_to_phys: translate iova to physical address
+ * @enforce_cache_coherency: Prevent any kind of DMA from bypassing IOMMU_CACHE,
+ * including no-snoop TLPs on PCIe or other platform
+ * specific mechanisms.
* @enable_nesting: Enable nesting
* @set_pgtable_quirks: Set io page table quirks (IO_PGTABLE_QUIRK_*)
* @free: Release the domain after use.
@@ -300,6 +304,7 @@ struct iommu_domain_ops {
phys_addr_t (*iova_to_phys)(struct iommu_domain *domain,
dma_addr_t iova);
+ bool (*enforce_cache_coherency)(struct iommu_domain *domain);
int (*enable_nesting)(struct iommu_domain *domain);
int (*set_pgtable_quirks)(struct iommu_domain *domain,
unsigned long quirks);
@@ -407,16 +412,10 @@ static inline const struct iommu_ops *dev_iommu_ops(struct device *dev)
return dev->iommu->iommu_dev->ops;
}
-#define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
-#define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
-#define IOMMU_GROUP_NOTIFY_BIND_DRIVER 3 /* Pre Driver bind */
-#define IOMMU_GROUP_NOTIFY_BOUND_DRIVER 4 /* Post Driver bind */
-#define IOMMU_GROUP_NOTIFY_UNBIND_DRIVER 5 /* Pre Driver unbind */
-#define IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER 6 /* Post Driver unbind */
-
extern int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops);
extern int bus_iommu_probe(struct bus_type *bus);
extern bool iommu_present(struct bus_type *bus);
+extern bool device_iommu_capable(struct device *dev, enum iommu_cap cap);
extern bool iommu_capable(struct bus_type *bus, enum iommu_cap cap);
extern struct iommu_domain *iommu_domain_alloc(struct bus_type *bus);
extern struct iommu_group *iommu_group_get_by_id(int id);
@@ -478,10 +477,6 @@ extern int iommu_group_for_each_dev(struct iommu_group *group, void *data,
extern struct iommu_group *iommu_group_get(struct device *dev);
extern struct iommu_group *iommu_group_ref_get(struct iommu_group *group);
extern void iommu_group_put(struct iommu_group *group);
-extern int iommu_group_register_notifier(struct iommu_group *group,
- struct notifier_block *nb);
-extern int iommu_group_unregister_notifier(struct iommu_group *group,
- struct notifier_block *nb);
extern int iommu_register_device_fault_handler(struct device *dev,
iommu_dev_fault_handler_t handler,
void *data);
@@ -675,6 +670,13 @@ struct iommu_sva *iommu_sva_bind_device(struct device *dev,
void iommu_sva_unbind_device(struct iommu_sva *handle);
u32 iommu_sva_get_pasid(struct iommu_sva *handle);
+int iommu_device_use_default_domain(struct device *dev);
+void iommu_device_unuse_default_domain(struct device *dev);
+
+int iommu_group_claim_dma_owner(struct iommu_group *group, void *owner);
+void iommu_group_release_dma_owner(struct iommu_group *group);
+bool iommu_group_dma_owner_claimed(struct iommu_group *group);
+
#else /* CONFIG_IOMMU_API */
struct iommu_ops {};
@@ -689,6 +691,11 @@ static inline bool iommu_present(struct bus_type *bus)
return false;
}
+static inline bool device_iommu_capable(struct device *dev, enum iommu_cap cap)
+{
+ return false;
+}
+
static inline bool iommu_capable(struct bus_type *bus, enum iommu_cap cap)
{
return false;
@@ -871,18 +878,6 @@ static inline void iommu_group_put(struct iommu_group *group)
{
}
-static inline int iommu_group_register_notifier(struct iommu_group *group,
- struct notifier_block *nb)
-{
- return -ENODEV;
-}
-
-static inline int iommu_group_unregister_notifier(struct iommu_group *group,
- struct notifier_block *nb)
-{
- return 0;
-}
-
static inline
int iommu_register_device_fault_handler(struct device *dev,
iommu_dev_fault_handler_t handler,
@@ -1031,6 +1026,30 @@ static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
{
return NULL;
}
+
+static inline int iommu_device_use_default_domain(struct device *dev)
+{
+ return 0;
+}
+
+static inline void iommu_device_unuse_default_domain(struct device *dev)
+{
+}
+
+static inline int
+iommu_group_claim_dma_owner(struct iommu_group *group, void *owner)
+{
+ return -ENODEV;
+}
+
+static inline void iommu_group_release_dma_owner(struct iommu_group *group)
+{
+}
+
+static inline bool iommu_group_dma_owner_claimed(struct iommu_group *group)
+{
+ return false;
+}
#endif /* CONFIG_IOMMU_API */
/**
diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
index b75395ec8d52..e3e8c8662b49 100644
--- a/include/linux/ipc_namespace.h
+++ b/include/linux/ipc_namespace.h
@@ -10,6 +10,7 @@
#include <linux/ns_common.h>
#include <linux/refcount.h>
#include <linux/rhashtable-types.h>
+#include <linux/sysctl.h>
struct user_namespace;
@@ -63,6 +64,12 @@ struct ipc_namespace {
unsigned int mq_msg_default;
unsigned int mq_msgsize_default;
+ struct ctl_table_set mq_set;
+ struct ctl_table_header *mq_sysctls;
+
+ struct ctl_table_set ipc_set;
+ struct ctl_table_header *ipc_sysctls;
+
/* user_ns which owns the ipc ns */
struct user_namespace *user_ns;
struct ucounts *ucounts;
@@ -169,15 +176,37 @@ static inline void put_ipc_ns(struct ipc_namespace *ns)
#ifdef CONFIG_POSIX_MQUEUE_SYSCTL
-struct ctl_table_header;
-extern struct ctl_table_header *mq_register_sysctl_table(void);
+void retire_mq_sysctls(struct ipc_namespace *ns);
+bool setup_mq_sysctls(struct ipc_namespace *ns);
#else /* CONFIG_POSIX_MQUEUE_SYSCTL */
-static inline struct ctl_table_header *mq_register_sysctl_table(void)
+static inline void retire_mq_sysctls(struct ipc_namespace *ns)
{
- return NULL;
+}
+
+static inline bool setup_mq_sysctls(struct ipc_namespace *ns)
+{
+ return true;
}
#endif /* CONFIG_POSIX_MQUEUE_SYSCTL */
+
+#ifdef CONFIG_SYSVIPC_SYSCTL
+
+bool setup_ipc_sysctls(struct ipc_namespace *ns);
+void retire_ipc_sysctls(struct ipc_namespace *ns);
+
+#else /* CONFIG_SYSVIPC_SYSCTL */
+
+static inline void retire_ipc_sysctls(struct ipc_namespace *ns)
+{
+}
+
+static inline bool setup_ipc_sysctls(struct ipc_namespace *ns)
+{
+ return true;
+}
+
+#endif /* CONFIG_SYSVIPC_SYSCTL */
#endif
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 38c8203d52cb..37dfdcfcdd54 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -61,7 +61,7 @@ struct ipv6_devconf {
__s32 suppress_frag_ndisc;
__s32 accept_ra_mtu;
__s32 drop_unsolicited_na;
- __s32 accept_unsolicited_na;
+ __s32 accept_untracked_na;
struct ipv6_stable_secret {
bool initialized;
struct in6_addr secret;
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index 107751cc047b..bf1eef337a07 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -256,9 +256,9 @@ extern void static_key_disable_cpuslocked(struct static_key *key);
#include <linux/atomic.h>
#include <linux/bug.h>
-static inline int static_key_count(struct static_key *key)
+static __always_inline int static_key_count(struct static_key *key)
{
- return atomic_read(&key->enabled);
+ return arch_atomic_read(&key->enabled);
}
static __always_inline void jump_label_init(void)
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index 58d1b58a971e..ce6536f1d269 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -193,14 +193,6 @@ void *kexec_purgatory_get_symbol_addr(struct kimage *image, const char *name);
int arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
unsigned long buf_len);
void *arch_kexec_kernel_image_load(struct kimage *image);
-int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
- Elf_Shdr *section,
- const Elf_Shdr *relsec,
- const Elf_Shdr *symtab);
-int arch_kexec_apply_relocations(struct purgatory_info *pi,
- Elf_Shdr *section,
- const Elf_Shdr *relsec,
- const Elf_Shdr *symtab);
int arch_kimage_file_post_load_cleanup(struct kimage *image);
#ifdef CONFIG_KEXEC_SIG
int arch_kexec_kernel_verify_sig(struct kimage *image, void *buf,
@@ -227,8 +219,46 @@ struct crash_mem {
extern int crash_exclude_mem_range(struct crash_mem *mem,
unsigned long long mstart,
unsigned long long mend);
-extern int crash_prepare_elf64_headers(struct crash_mem *mem, int kernel_map,
+extern int crash_prepare_elf64_headers(struct crash_mem *mem, int need_kernel_map,
void **addr, unsigned long *sz);
+
+#ifndef arch_kexec_apply_relocations_add
+/*
+ * arch_kexec_apply_relocations_add - apply relocations of type RELA
+ * @pi: Purgatory to be relocated.
+ * @section: Section relocations applying to.
+ * @relsec: Section containing RELAs.
+ * @symtab: Corresponding symtab.
+ *
+ * Return: 0 on success, negative errno on error.
+ */
+static inline int
+arch_kexec_apply_relocations_add(struct purgatory_info *pi, Elf_Shdr *section,
+ const Elf_Shdr *relsec, const Elf_Shdr *symtab)
+{
+ pr_err("RELA relocation unsupported.\n");
+ return -ENOEXEC;
+}
+#endif
+
+#ifndef arch_kexec_apply_relocations
+/*
+ * arch_kexec_apply_relocations - apply relocations of type REL
+ * @pi: Purgatory to be relocated.
+ * @section: Section relocations applying to.
+ * @relsec: Section containing RELs.
+ * @symtab: Corresponding symtab.
+ *
+ * Return: 0 on success, negative errno on error.
+ */
+static inline int
+arch_kexec_apply_relocations(struct purgatory_info *pi, Elf_Shdr *section,
+ const Elf_Shdr *relsec, const Elf_Shdr *symtab)
+{
+ pr_err("REL relocation unsupported.\n");
+ return -ENOEXEC;
+}
+#endif
#endif /* CONFIG_KEXEC_FILE */
#ifdef CONFIG_KEXEC_ELF
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 157168769fc2..55041d2f884d 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -424,7 +424,7 @@ void unregister_kretprobe(struct kretprobe *rp);
int register_kretprobes(struct kretprobe **rps, int num);
void unregister_kretprobes(struct kretprobe **rps, int num);
-#ifdef CONFIG_KRETPROBE_ON_RETHOOK
+#if defined(CONFIG_KRETPROBE_ON_RETHOOK) || !defined(CONFIG_KRETPROBES)
#define kprobe_flush_task(tk) do {} while (0)
#else
void kprobe_flush_task(struct task_struct *tk);
diff --git a/include/linux/list.h b/include/linux/list.h
index 57e8b559cdf6..61762054b4be 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -35,7 +35,7 @@
static inline void INIT_LIST_HEAD(struct list_head *list)
{
WRITE_ONCE(list->next, list);
- list->prev = list;
+ WRITE_ONCE(list->prev, list);
}
#ifdef CONFIG_DEBUG_LIST
@@ -306,7 +306,7 @@ static inline int list_empty(const struct list_head *head)
static inline void list_del_init_careful(struct list_head *entry)
{
__list_del_entry(entry);
- entry->prev = entry;
+ WRITE_ONCE(entry->prev, entry);
smp_store_release(&entry->next, entry);
}
@@ -326,7 +326,7 @@ static inline void list_del_init_careful(struct list_head *entry)
static inline int list_empty_careful(const struct list_head *head)
{
struct list_head *next = smp_load_acquire(&head->next);
- return list_is_head(next, head) && (next == head->prev);
+ return list_is_head(next, head) && (next == READ_ONCE(head->prev));
}
/**
diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h
index 2614247a9781..293e29960c6e 100644
--- a/include/linux/livepatch.h
+++ b/include/linux/livepatch.h
@@ -16,8 +16,6 @@
#if IS_ENABLED(CONFIG_LIVEPATCH)
-#include <asm/livepatch.h>
-
/* task patch states */
#define KLP_UNDEFINED -1
#define KLP_UNPATCHED 0
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 37951c17908e..b6829b970093 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -286,6 +286,9 @@ extern void lock_set_class(struct lockdep_map *lock, const char *name,
struct lock_class_key *key, unsigned int subclass,
unsigned long ip);
+#define lock_set_novalidate_class(l, n, i) \
+ lock_set_class(l, n, &__lockdep_no_validate__, 0, i)
+
static inline void lock_set_subclass(struct lockdep_map *lock,
unsigned int subclass, unsigned long ip)
{
@@ -353,7 +356,8 @@ static inline void lockdep_set_selftest_task(struct task_struct *task)
# define lock_acquire(l, s, t, r, c, n, i) do { } while (0)
# define lock_release(l, i) do { } while (0)
# define lock_downgrade(l, i) do { } while (0)
-# define lock_set_class(l, n, k, s, i) do { } while (0)
+# define lock_set_class(l, n, key, s, i) do { (void)(key); } while (0)
+# define lock_set_novalidate_class(l, n, i) do { } while (0)
# define lock_set_subclass(l, s, i) do { } while (0)
# define lockdep_init() do { } while (0)
# define lockdep_init_map_type(lock, name, key, sub, inner, outer, type) \
diff --git a/include/linux/mfd/hi655x-pmic.h b/include/linux/mfd/hi655x-pmic.h
index af5d97239c0d..6a012784dd1b 100644
--- a/include/linux/mfd/hi655x-pmic.h
+++ b/include/linux/mfd/hi655x-pmic.h
@@ -12,6 +12,8 @@
#ifndef __HI655X_PMIC_H
#define __HI655X_PMIC_H
+#include <linux/gpio/consumer.h>
+
/* Hi655x registers are mapped to memory bus in 4 bytes stride */
#define HI655X_STRIDE 4
#define HI655X_BUS_ADDR(x) ((x) << 2)
@@ -53,7 +55,7 @@ struct hi655x_pmic {
struct resource *res;
struct device *dev;
struct regmap *regmap;
- int gpio;
+ struct gpio_desc *gpio;
unsigned int ver;
struct regmap_irq_chip_data *irq_data;
};
diff --git a/include/linux/mfd/mt6359/registers.h b/include/linux/mfd/mt6359/registers.h
index 2135c9695918..2a4394a27b1c 100644
--- a/include/linux/mfd/mt6359/registers.h
+++ b/include/linux/mfd/mt6359/registers.h
@@ -8,6 +8,8 @@
/* PMIC Registers */
#define MT6359_SWCID 0xa
+#define MT6359_TOPSTATUS 0x2a
+#define MT6359_TOP_RST_MISC 0x14c
#define MT6359_MISC_TOP_INT_CON0 0x188
#define MT6359_MISC_TOP_INT_STATUS0 0x194
#define MT6359_TOP_INT_STATUS0 0x19e
diff --git a/include/linux/mfd/tc6393xb.h b/include/linux/mfd/tc6393xb.h
index fcc8e74f0e8d..d336c541b7df 100644
--- a/include/linux/mfd/tc6393xb.h
+++ b/include/linux/mfd/tc6393xb.h
@@ -27,9 +27,6 @@ struct tc6393xb_platform_data {
int (*resume)(struct platform_device *dev);
int irq_base; /* base for subdevice irqs */
- int gpio_base;
- int (*setup)(struct platform_device *dev);
- void (*teardown)(struct platform_device *dev);
struct tmio_nand_data *nand_data;
struct tmio_fb_data *fb_data;
diff --git a/include/linux/mfd/tps65218.h b/include/linux/mfd/tps65218.h
index f4ca367e3473..122e24ddbd4b 100644
--- a/include/linux/mfd/tps65218.h
+++ b/include/linux/mfd/tps65218.h
@@ -1,7 +1,7 @@
/*
* linux/mfd/tps65218.h
*
- * Functions to access TPS65219 power management chip.
+ * Functions to access TPS65218 power management chip.
*
* Copyright (C) 2014 Texas Instruments Incorporated - https://www.ti.com/
*
diff --git a/include/linux/mhi_ep.h b/include/linux/mhi_ep.h
new file mode 100644
index 000000000000..478aece17046
--- /dev/null
+++ b/include/linux/mhi_ep.h
@@ -0,0 +1,277 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2022, Linaro Ltd.
+ *
+ */
+#ifndef _MHI_EP_H_
+#define _MHI_EP_H_
+
+#include <linux/dma-direction.h>
+#include <linux/mhi.h>
+
+#define MHI_EP_DEFAULT_MTU 0x8000
+
+/**
+ * struct mhi_ep_channel_config - Channel configuration structure for controller
+ * @name: The name of this channel
+ * @num: The number assigned to this channel
+ * @num_elements: The number of elements that can be queued to this channel
+ * @dir: Direction that data may flow on this channel
+ */
+struct mhi_ep_channel_config {
+ char *name;
+ u32 num;
+ u32 num_elements;
+ enum dma_data_direction dir;
+};
+
+/**
+ * struct mhi_ep_cntrl_config - MHI Endpoint controller configuration
+ * @mhi_version: MHI spec version supported by the controller
+ * @max_channels: Maximum number of channels supported
+ * @num_channels: Number of channels defined in @ch_cfg
+ * @ch_cfg: Array of defined channels
+ */
+struct mhi_ep_cntrl_config {
+ u32 mhi_version;
+ u32 max_channels;
+ u32 num_channels;
+ const struct mhi_ep_channel_config *ch_cfg;
+};
+
+/**
+ * struct mhi_ep_db_info - MHI Endpoint doorbell info
+ * @mask: Mask of the doorbell interrupt
+ * @status: Status of the doorbell interrupt
+ */
+struct mhi_ep_db_info {
+ u32 mask;
+ u32 status;
+};
+
+/**
+ * struct mhi_ep_cntrl - MHI Endpoint controller structure
+ * @cntrl_dev: Pointer to the struct device of physical bus acting as the MHI
+ * Endpoint controller
+ * @mhi_dev: MHI Endpoint device instance for the controller
+ * @mmio: MMIO region containing the MHI registers
+ * @mhi_chan: Points to the channel configuration table
+ * @mhi_event: Points to the event ring configurations table
+ * @mhi_cmd: Points to the command ring configurations table
+ * @sm: MHI Endpoint state machine
+ * @ch_ctx_cache: Cache of host channel context data structure
+ * @ev_ctx_cache: Cache of host event context data structure
+ * @cmd_ctx_cache: Cache of host command context data structure
+ * @ch_ctx_host_pa: Physical address of host channel context data structure
+ * @ev_ctx_host_pa: Physical address of host event context data structure
+ * @cmd_ctx_host_pa: Physical address of host command context data structure
+ * @ch_ctx_cache_phys: Physical address of the host channel context cache
+ * @ev_ctx_cache_phys: Physical address of the host event context cache
+ * @cmd_ctx_cache_phys: Physical address of the host command context cache
+ * @chdb: Array of channel doorbell interrupt info
+ * @event_lock: Lock for protecting event rings
+ * @list_lock: Lock for protecting state transition and channel doorbell lists
+ * @state_lock: Lock for protecting state transitions
+ * @st_transition_list: List of state transitions
+ * @ch_db_list: List of queued channel doorbells
+ * @wq: Dedicated workqueue for handling rings and state changes
+ * @state_work: State transition worker
+ * @reset_work: Worker for MHI Endpoint reset
+ * @cmd_ring_work: Worker for processing command rings
+ * @ch_ring_work: Worker for processing channel rings
+ * @raise_irq: CB function for raising IRQ to the host
+ * @alloc_map: CB function for allocating memory in endpoint for storing host context and mapping it
+ * @unmap_free: CB function to unmap and free the allocated memory in endpoint for storing host context
+ * @read_from_host: CB function for reading from host memory from endpoint
+ * @write_to_host: CB function for writing to host memory from endpoint
+ * @mhi_state: MHI Endpoint state
+ * @max_chan: Maximum channels supported by the endpoint controller
+ * @mru: MRU (Maximum Receive Unit) value of the endpoint controller
+ * @event_rings: Number of event rings supported by the endpoint controller
+ * @hw_event_rings: Number of hardware event rings supported by the endpoint controller
+ * @chdb_offset: Channel doorbell offset set by the host
+ * @erdb_offset: Event ring doorbell offset set by the host
+ * @index: MHI Endpoint controller index
+ * @irq: IRQ used by the endpoint controller
+ * @enabled: Check if the endpoint controller is enabled or not
+ */
+struct mhi_ep_cntrl {
+ struct device *cntrl_dev;
+ struct mhi_ep_device *mhi_dev;
+ void __iomem *mmio;
+
+ struct mhi_ep_chan *mhi_chan;
+ struct mhi_ep_event *mhi_event;
+ struct mhi_ep_cmd *mhi_cmd;
+ struct mhi_ep_sm *sm;
+
+ struct mhi_chan_ctxt *ch_ctx_cache;
+ struct mhi_event_ctxt *ev_ctx_cache;
+ struct mhi_cmd_ctxt *cmd_ctx_cache;
+ u64 ch_ctx_host_pa;
+ u64 ev_ctx_host_pa;
+ u64 cmd_ctx_host_pa;
+ phys_addr_t ch_ctx_cache_phys;
+ phys_addr_t ev_ctx_cache_phys;
+ phys_addr_t cmd_ctx_cache_phys;
+
+ struct mhi_ep_db_info chdb[4];
+ struct mutex event_lock;
+ spinlock_t list_lock;
+ spinlock_t state_lock;
+
+ struct list_head st_transition_list;
+ struct list_head ch_db_list;
+
+ struct workqueue_struct *wq;
+ struct work_struct state_work;
+ struct work_struct reset_work;
+ struct work_struct cmd_ring_work;
+ struct work_struct ch_ring_work;
+
+ void (*raise_irq)(struct mhi_ep_cntrl *mhi_cntrl, u32 vector);
+ int (*alloc_map)(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr, phys_addr_t *phys_ptr,
+ void __iomem **virt, size_t size);
+ void (*unmap_free)(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr, phys_addr_t phys,
+ void __iomem *virt, size_t size);
+ int (*read_from_host)(struct mhi_ep_cntrl *mhi_cntrl, u64 from, void *to, size_t size);
+ int (*write_to_host)(struct mhi_ep_cntrl *mhi_cntrl, void *from, u64 to, size_t size);
+
+ enum mhi_state mhi_state;
+
+ u32 max_chan;
+ u32 mru;
+ u32 event_rings;
+ u32 hw_event_rings;
+ u32 chdb_offset;
+ u32 erdb_offset;
+ u32 index;
+ int irq;
+ bool enabled;
+};
+
+/**
+ * struct mhi_ep_device - Structure representing an MHI Endpoint device that binds
+ * to channels or is associated with controllers
+ * @dev: Driver model device node for the MHI Endpoint device
+ * @mhi_cntrl: Controller the device belongs to
+ * @id: Pointer to MHI Endpoint device ID struct
+ * @name: Name of the associated MHI Endpoint device
+ * @ul_chan: UL (from host to endpoint) channel for the device
+ * @dl_chan: DL (from endpoint to host) channel for the device
+ * @dev_type: MHI device type
+ */
+struct mhi_ep_device {
+ struct device dev;
+ struct mhi_ep_cntrl *mhi_cntrl;
+ const struct mhi_device_id *id;
+ const char *name;
+ struct mhi_ep_chan *ul_chan;
+ struct mhi_ep_chan *dl_chan;
+ enum mhi_device_type dev_type;
+};
+
+/**
+ * struct mhi_ep_driver - Structure representing a MHI Endpoint client driver
+ * @id_table: Pointer to MHI Endpoint device ID table
+ * @driver: Device driver model driver
+ * @probe: CB function for client driver probe function
+ * @remove: CB function for client driver remove function
+ * @ul_xfer_cb: CB function for UL (from host to endpoint) data transfer
+ * @dl_xfer_cb: CB function for DL (from endpoint to host) data transfer
+ */
+struct mhi_ep_driver {
+ const struct mhi_device_id *id_table;
+ struct device_driver driver;
+ int (*probe)(struct mhi_ep_device *mhi_ep,
+ const struct mhi_device_id *id);
+ void (*remove)(struct mhi_ep_device *mhi_ep);
+ void (*ul_xfer_cb)(struct mhi_ep_device *mhi_dev,
+ struct mhi_result *result);
+ void (*dl_xfer_cb)(struct mhi_ep_device *mhi_dev,
+ struct mhi_result *result);
+};
+
+#define to_mhi_ep_device(dev) container_of(dev, struct mhi_ep_device, dev)
+#define to_mhi_ep_driver(drv) container_of(drv, struct mhi_ep_driver, driver)
+
+/*
+ * module_mhi_ep_driver() - Helper macro for drivers that don't do
+ * anything special other than using default mhi_ep_driver_register() and
+ * mhi_ep_driver_unregister(). This eliminates a lot of boilerplate.
+ * Each module may only use this macro once.
+ */
+#define module_mhi_ep_driver(mhi_drv) \
+ module_driver(mhi_drv, mhi_ep_driver_register, \
+ mhi_ep_driver_unregister)
+
+/*
+ * Macro to avoid include chaining to get THIS_MODULE
+ */
+#define mhi_ep_driver_register(mhi_drv) \
+ __mhi_ep_driver_register(mhi_drv, THIS_MODULE)
+
+/**
+ * __mhi_ep_driver_register - Register a driver with MHI Endpoint bus
+ * @mhi_drv: Driver to be associated with the device
+ * @owner: The module owner
+ *
+ * Return: 0 if driver registrations succeeds, a negative error code otherwise.
+ */
+int __mhi_ep_driver_register(struct mhi_ep_driver *mhi_drv, struct module *owner);
+
+/**
+ * mhi_ep_driver_unregister - Unregister a driver from MHI Endpoint bus
+ * @mhi_drv: Driver associated with the device
+ */
+void mhi_ep_driver_unregister(struct mhi_ep_driver *mhi_drv);
+
+/**
+ * mhi_ep_register_controller - Register MHI Endpoint controller
+ * @mhi_cntrl: MHI Endpoint controller to register
+ * @config: Configuration to use for the controller
+ *
+ * Return: 0 if controller registrations succeeds, a negative error code otherwise.
+ */
+int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl,
+ const struct mhi_ep_cntrl_config *config);
+
+/**
+ * mhi_ep_unregister_controller - Unregister MHI Endpoint controller
+ * @mhi_cntrl: MHI Endpoint controller to unregister
+ */
+void mhi_ep_unregister_controller(struct mhi_ep_cntrl *mhi_cntrl);
+
+/**
+ * mhi_ep_power_up - Power up the MHI endpoint stack
+ * @mhi_cntrl: MHI Endpoint controller
+ *
+ * Return: 0 if power up succeeds, a negative error code otherwise.
+ */
+int mhi_ep_power_up(struct mhi_ep_cntrl *mhi_cntrl);
+
+/**
+ * mhi_ep_power_down - Power down the MHI endpoint stack
+ * @mhi_cntrl: MHI controller
+ */
+void mhi_ep_power_down(struct mhi_ep_cntrl *mhi_cntrl);
+
+/**
+ * mhi_ep_queue_is_empty - Determine whether the transfer queue is empty
+ * @mhi_dev: Device associated with the channels
+ * @dir: DMA direction for the channel
+ *
+ * Return: true if the queue is empty, false otherwise.
+ */
+bool mhi_ep_queue_is_empty(struct mhi_ep_device *mhi_dev, enum dma_data_direction dir);
+
+/**
+ * mhi_ep_queue_skb - Send SKBs to host over MHI Endpoint
+ * @mhi_dev: Device associated with the DL channel
+ * @skb: SKBs to be queued
+ *
+ * Return: 0 if the SKBs has been sent successfully, a negative error code otherwise.
+ */
+int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb);
+
+#endif
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index b064bc278f52..5040cd774c5a 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -447,6 +447,11 @@ struct mlx5_qp_table {
struct radix_tree_root tree;
};
+enum {
+ MLX5_PF_NOTIFY_DISABLE_VF,
+ MLX5_PF_NOTIFY_ENABLE_VF,
+};
+
struct mlx5_vf_context {
int enabled;
u64 port_guid;
@@ -457,6 +462,7 @@ struct mlx5_vf_context {
u8 port_guid_valid:1;
u8 node_guid_valid:1;
enum port_state_policy policy;
+ struct blocking_notifier_head notifier;
};
struct mlx5_core_sriov {
@@ -1162,6 +1168,12 @@ int mlx5_dm_sw_icm_dealloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type
struct mlx5_core_dev *mlx5_vf_get_core_dev(struct pci_dev *pdev);
void mlx5_vf_put_core_dev(struct mlx5_core_dev *mdev);
+int mlx5_sriov_blocking_notifier_register(struct mlx5_core_dev *mdev,
+ int vf_id,
+ struct notifier_block *nb);
+void mlx5_sriov_blocking_notifier_unregister(struct mlx5_core_dev *mdev,
+ int vf_id,
+ struct notifier_block *nb);
#ifdef CONFIG_MLX5_CORE_IPOIB
struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
struct ib_device *ibdev,
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 78b3d3465dd7..fd7d083a34d3 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -87,6 +87,7 @@ enum {
enum {
MLX5_OBJ_TYPE_GENEVE_TLV_OPT = 0x000b,
MLX5_OBJ_TYPE_VIRTIO_NET_Q = 0x000d,
+ MLX5_OBJ_TYPE_VIRTIO_Q_COUNTERS = 0x001c,
MLX5_OBJ_TYPE_MATCH_DEFINER = 0x0018,
MLX5_OBJ_TYPE_MKEY = 0xff01,
MLX5_OBJ_TYPE_QP = 0xff02,
@@ -5176,12 +5177,11 @@ struct mlx5_ifc_query_qp_out_bits {
u8 syndrome[0x20];
- u8 reserved_at_40[0x20];
- u8 ece[0x20];
+ u8 reserved_at_40[0x40];
u8 opt_param_mask[0x20];
- u8 reserved_at_a0[0x20];
+ u8 ece[0x20];
struct mlx5_ifc_qpc_bits qpc;
diff --git a/include/linux/mlx5/mlx5_ifc_vdpa.h b/include/linux/mlx5/mlx5_ifc_vdpa.h
index 1a9c9d94cb59..4414ed5b6ed2 100644
--- a/include/linux/mlx5/mlx5_ifc_vdpa.h
+++ b/include/linux/mlx5/mlx5_ifc_vdpa.h
@@ -165,4 +165,43 @@ struct mlx5_ifc_modify_virtio_net_q_out_bits {
struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
};
+struct mlx5_ifc_virtio_q_counters_bits {
+ u8 modify_field_select[0x40];
+ u8 reserved_at_40[0x40];
+ u8 received_desc[0x40];
+ u8 completed_desc[0x40];
+ u8 error_cqes[0x20];
+ u8 bad_desc_errors[0x20];
+ u8 exceed_max_chain[0x20];
+ u8 invalid_buffer[0x20];
+ u8 reserved_at_180[0x280];
+};
+
+struct mlx5_ifc_create_virtio_q_counters_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_virtio_q_counters_bits virtio_q_counters;
+};
+
+struct mlx5_ifc_create_virtio_q_counters_out_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_virtio_q_counters_bits virtio_q_counters;
+};
+
+struct mlx5_ifc_destroy_virtio_q_counters_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+};
+
+struct mlx5_ifc_destroy_virtio_q_counters_out_bits {
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits hdr;
+};
+
+struct mlx5_ifc_query_virtio_q_counters_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+};
+
+struct mlx5_ifc_query_virtio_q_counters_out_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_virtio_q_counters_bits counters;
+};
+
#endif /* __MLX5_IFC_VDPA_H_ */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index f85e5a4c070b..bc8f326be0ce 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1594,8 +1594,13 @@ static inline bool page_needs_cow_for_dma(struct vm_area_struct *vma,
#ifdef CONFIG_MIGRATION
static inline bool is_pinnable_page(struct page *page)
{
- return !(is_zone_movable_page(page) || is_migrate_cma_page(page)) ||
- is_zero_pfn(page_to_pfn(page));
+#ifdef CONFIG_CMA
+ int mt = get_pageblock_migratetype(page);
+
+ if (mt == MIGRATE_CMA || mt == MIGRATE_ISOLATE)
+ return false;
+#endif
+ return !(is_zone_movable_page(page) || is_zero_pfn(page_to_pfn(page)));
}
#else
static inline bool is_pinnable_page(struct page *page)
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 5da5d990ff58..549590e9c644 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -835,6 +835,8 @@ struct wmi_device_id {
#define MHI_DEVICE_MODALIAS_FMT "mhi:%s"
#define MHI_NAME_SIZE 32
+#define MHI_EP_DEVICE_MODALIAS_FMT "mhi_ep:%s"
+
/**
* struct mhi_device_id - MHI device identification
* @chan: MHI channel name
diff --git a/include/linux/mount.h b/include/linux/mount.h
index 7f18a7555dff..55a4abaf6715 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -11,17 +11,15 @@
#define _LINUX_MOUNT_H
#include <linux/types.h>
-#include <linux/list.h>
-#include <linux/nodemask.h>
-#include <linux/spinlock.h>
-#include <linux/seqlock.h>
-#include <linux/atomic.h>
+#include <asm/barrier.h>
struct super_block;
-struct vfsmount;
struct dentry;
-struct mnt_namespace;
+struct user_namespace;
+struct file_system_type;
struct fs_context;
+struct file;
+struct path;
#define MNT_NOSUID 0x01
#define MNT_NODEV 0x02
@@ -81,9 +79,6 @@ static inline struct user_namespace *mnt_user_ns(const struct vfsmount *mnt)
return smp_load_acquire(&mnt->mnt_userns);
}
-struct file; /* forward dec */
-struct path;
-
extern int mnt_want_write(struct vfsmount *mnt);
extern int mnt_want_write_file(struct file *file);
extern void mnt_drop_write(struct vfsmount *mnt);
@@ -94,12 +89,10 @@ extern struct vfsmount *mnt_clone_internal(const struct path *path);
extern bool __mnt_is_readonly(struct vfsmount *mnt);
extern bool mnt_may_suid(struct vfsmount *mnt);
-struct path;
extern struct vfsmount *clone_private_mount(const struct path *path);
extern int __mnt_want_write(struct vfsmount *);
extern void __mnt_drop_write(struct vfsmount *);
-struct file_system_type;
extern struct vfsmount *fc_mount(struct fs_context *fc);
extern struct vfsmount *vfs_create_mount(struct fs_context *fc);
extern struct vfsmount *vfs_kern_mount(struct file_system_type *type,
@@ -115,6 +108,18 @@ extern void mark_mounts_for_expiry(struct list_head *mounts);
extern dev_t name_to_dev_t(const char *name);
extern bool path_is_mountpoint(const struct path *path);
+extern bool our_mnt(struct vfsmount *mnt);
+
+extern struct vfsmount *kern_mount(struct file_system_type *);
+extern void kern_unmount(struct vfsmount *mnt);
+extern int may_umount_tree(struct vfsmount *);
+extern int may_umount(struct vfsmount *);
+extern long do_mount(const char *, const char __user *,
+ const char *, unsigned long, void *);
+extern struct vfsmount *collect_mounts(const struct path *);
+extern void drop_collected_mounts(struct vfsmount *);
+extern int iterate_mounts(int (*)(struct vfsmount *, void *), void *,
+ struct vfsmount *);
extern void kern_unmount_array(struct vfsmount *mnt[], unsigned int num);
#endif /* _LINUX_MOUNT_H */
diff --git a/include/linux/namei.h b/include/linux/namei.h
index e89329bb3134..caeb08a98536 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -69,6 +69,12 @@ extern struct dentry *lookup_one_len(const char *, struct dentry *, int);
extern struct dentry *lookup_one_len_unlocked(const char *, struct dentry *, int);
extern struct dentry *lookup_positive_unlocked(const char *, struct dentry *, int);
struct dentry *lookup_one(struct user_namespace *, const char *, struct dentry *, int);
+struct dentry *lookup_one_unlocked(struct user_namespace *mnt_userns,
+ const char *name, struct dentry *base,
+ int len);
+struct dentry *lookup_one_positive_unlocked(struct user_namespace *mnt_userns,
+ const char *name,
+ struct dentry *base, int len);
extern int follow_down_one(struct path *);
extern int follow_down(struct path *);
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index 5662d8be04eb..8d04b6a5964c 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -451,6 +451,8 @@ enum lock_type4 {
#define FATTR4_WORD1_TIME_MODIFY (1UL << 21)
#define FATTR4_WORD1_TIME_MODIFY_SET (1UL << 22)
#define FATTR4_WORD1_MOUNTED_ON_FILEID (1UL << 23)
+#define FATTR4_WORD1_DACL (1UL << 26)
+#define FATTR4_WORD1_SACL (1UL << 27)
#define FATTR4_WORD1_FS_LAYOUT_TYPES (1UL << 30)
#define FATTR4_WORD2_LAYOUT_TYPES (1UL << 0)
#define FATTR4_WORD2_LAYOUT_BLKSIZE (1UL << 1)
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 157d2bd6b241..ea2f7e6b1b0b 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -287,4 +287,5 @@ struct nfs_server {
#define NFS_CAP_XATTR (1U << 28)
#define NFS_CAP_READ_PLUS (1U << 29)
#define NFS_CAP_FS_LOCATIONS (1U << 30)
+#define NFS_CAP_MOVEABLE (1U << 31)
#endif
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 2863e5a69c6a..0e3aa0f5f324 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -800,9 +800,17 @@ struct nfs_setattrargs {
const struct nfs4_label *label;
};
+enum nfs4_acl_type {
+ NFS4ACL_NONE = 0,
+ NFS4ACL_ACL,
+ NFS4ACL_DACL,
+ NFS4ACL_SACL,
+};
+
struct nfs_setaclargs {
struct nfs4_sequence_args seq_args;
struct nfs_fh * fh;
+ enum nfs4_acl_type acl_type;
size_t acl_len;
struct page ** acl_pages;
};
@@ -814,6 +822,7 @@ struct nfs_setaclres {
struct nfs_getaclargs {
struct nfs4_sequence_args seq_args;
struct nfs_fh * fh;
+ enum nfs4_acl_type acl_type;
size_t acl_len;
struct page ** acl_pages;
};
@@ -822,6 +831,7 @@ struct nfs_getaclargs {
#define NFS4_ACL_TRUNC 0x0001 /* ACL was truncated */
struct nfs_getaclres {
struct nfs4_sequence_res seq_res;
+ enum nfs4_acl_type acl_type;
size_t acl_len;
size_t acl_data_offset;
int acl_flags;
@@ -1212,7 +1222,7 @@ struct nfs4_fs_location {
#define NFS4_FS_LOCATIONS_MAXENTRIES 10
struct nfs4_fs_locations {
- struct nfs_fattr fattr;
+ struct nfs_fattr *fattr;
const struct nfs_server *server;
struct nfs4_pathname fs_path;
int nlocations;
diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
index c6199dbe2591..0f233b76c9ce 100644
--- a/include/linux/nodemask.h
+++ b/include/linux/nodemask.h
@@ -42,11 +42,11 @@
* void nodes_shift_right(dst, src, n) Shift right
* void nodes_shift_left(dst, src, n) Shift left
*
- * int first_node(mask) Number lowest set bit, or MAX_NUMNODES
- * int next_node(node, mask) Next node past 'node', or MAX_NUMNODES
- * int next_node_in(node, mask) Next node past 'node', or wrap to first,
+ * unsigned int first_node(mask) Number lowest set bit, or MAX_NUMNODES
+ * unsigend int next_node(node, mask) Next node past 'node', or MAX_NUMNODES
+ * unsigned int next_node_in(node, mask) Next node past 'node', or wrap to first,
* or MAX_NUMNODES
- * int first_unset_node(mask) First node not set in mask, or
+ * unsigned int first_unset_node(mask) First node not set in mask, or
* MAX_NUMNODES
*
* nodemask_t nodemask_of_node(node) Return nodemask with bit 'node' set
@@ -153,7 +153,7 @@ static inline void __nodes_clear(nodemask_t *dstp, unsigned int nbits)
#define node_test_and_set(node, nodemask) \
__node_test_and_set((node), &(nodemask))
-static inline int __node_test_and_set(int node, nodemask_t *addr)
+static inline bool __node_test_and_set(int node, nodemask_t *addr)
{
return test_and_set_bit(node, addr->bits);
}
@@ -200,7 +200,7 @@ static inline void __nodes_complement(nodemask_t *dstp,
#define nodes_equal(src1, src2) \
__nodes_equal(&(src1), &(src2), MAX_NUMNODES)
-static inline int __nodes_equal(const nodemask_t *src1p,
+static inline bool __nodes_equal(const nodemask_t *src1p,
const nodemask_t *src2p, unsigned int nbits)
{
return bitmap_equal(src1p->bits, src2p->bits, nbits);
@@ -208,7 +208,7 @@ static inline int __nodes_equal(const nodemask_t *src1p,
#define nodes_intersects(src1, src2) \
__nodes_intersects(&(src1), &(src2), MAX_NUMNODES)
-static inline int __nodes_intersects(const nodemask_t *src1p,
+static inline bool __nodes_intersects(const nodemask_t *src1p,
const nodemask_t *src2p, unsigned int nbits)
{
return bitmap_intersects(src1p->bits, src2p->bits, nbits);
@@ -216,20 +216,20 @@ static inline int __nodes_intersects(const nodemask_t *src1p,
#define nodes_subset(src1, src2) \
__nodes_subset(&(src1), &(src2), MAX_NUMNODES)
-static inline int __nodes_subset(const nodemask_t *src1p,
+static inline bool __nodes_subset(const nodemask_t *src1p,
const nodemask_t *src2p, unsigned int nbits)
{
return bitmap_subset(src1p->bits, src2p->bits, nbits);
}
#define nodes_empty(src) __nodes_empty(&(src), MAX_NUMNODES)
-static inline int __nodes_empty(const nodemask_t *srcp, unsigned int nbits)
+static inline bool __nodes_empty(const nodemask_t *srcp, unsigned int nbits)
{
return bitmap_empty(srcp->bits, nbits);
}
#define nodes_full(nodemask) __nodes_full(&(nodemask), MAX_NUMNODES)
-static inline int __nodes_full(const nodemask_t *srcp, unsigned int nbits)
+static inline bool __nodes_full(const nodemask_t *srcp, unsigned int nbits)
{
return bitmap_full(srcp->bits, nbits);
}
@@ -260,15 +260,15 @@ static inline void __nodes_shift_left(nodemask_t *dstp,
> MAX_NUMNODES, then the silly min_ts could be dropped. */
#define first_node(src) __first_node(&(src))
-static inline int __first_node(const nodemask_t *srcp)
+static inline unsigned int __first_node(const nodemask_t *srcp)
{
- return min_t(int, MAX_NUMNODES, find_first_bit(srcp->bits, MAX_NUMNODES));
+ return min_t(unsigned int, MAX_NUMNODES, find_first_bit(srcp->bits, MAX_NUMNODES));
}
#define next_node(n, src) __next_node((n), &(src))
-static inline int __next_node(int n, const nodemask_t *srcp)
+static inline unsigned int __next_node(int n, const nodemask_t *srcp)
{
- return min_t(int,MAX_NUMNODES,find_next_bit(srcp->bits, MAX_NUMNODES, n+1));
+ return min_t(unsigned int, MAX_NUMNODES, find_next_bit(srcp->bits, MAX_NUMNODES, n+1));
}
/*
@@ -276,7 +276,7 @@ static inline int __next_node(int n, const nodemask_t *srcp)
* the first node in src if needed. Returns MAX_NUMNODES if src is empty.
*/
#define next_node_in(n, src) __next_node_in((n), &(src))
-int __next_node_in(int node, const nodemask_t *srcp);
+unsigned int __next_node_in(int node, const nodemask_t *srcp);
static inline void init_nodemask_of_node(nodemask_t *mask, int node)
{
@@ -296,9 +296,9 @@ static inline void init_nodemask_of_node(nodemask_t *mask, int node)
})
#define first_unset_node(mask) __first_unset_node(&(mask))
-static inline int __first_unset_node(const nodemask_t *maskp)
+static inline unsigned int __first_unset_node(const nodemask_t *maskp)
{
- return min_t(int,MAX_NUMNODES,
+ return min_t(unsigned int, MAX_NUMNODES,
find_first_zero_bit(maskp->bits, MAX_NUMNODES));
}
@@ -435,11 +435,11 @@ static inline int num_node_state(enum node_states state)
#define first_online_node first_node(node_states[N_ONLINE])
#define first_memory_node first_node(node_states[N_MEMORY])
-static inline int next_online_node(int nid)
+static inline unsigned int next_online_node(int nid)
{
return next_node(nid, node_states[N_ONLINE]);
}
-static inline int next_memory_node(int nid)
+static inline unsigned int next_memory_node(int nid)
{
return next_node(nid, node_states[N_MEMORY]);
}
diff --git a/include/linux/notifier.h b/include/linux/notifier.h
index 87069b8459af..aef88c2d1173 100644
--- a/include/linux/notifier.h
+++ b/include/linux/notifier.h
@@ -150,6 +150,11 @@ extern int raw_notifier_chain_register(struct raw_notifier_head *nh,
extern int srcu_notifier_chain_register(struct srcu_notifier_head *nh,
struct notifier_block *nb);
+extern int atomic_notifier_chain_register_unique_prio(
+ struct atomic_notifier_head *nh, struct notifier_block *nb);
+extern int blocking_notifier_chain_register_unique_prio(
+ struct blocking_notifier_head *nh, struct notifier_block *nb);
+
extern int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh,
struct notifier_block *nb);
extern int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh,
@@ -173,6 +178,8 @@ extern int blocking_notifier_call_chain_robust(struct blocking_notifier_head *nh
extern int raw_notifier_call_chain_robust(struct raw_notifier_head *nh,
unsigned long val_up, unsigned long val_down, void *v);
+extern bool atomic_notifier_call_chain_is_empty(struct atomic_notifier_head *nh);
+
#define NOTIFY_DONE 0x0000 /* Don't care */
#define NOTIFY_OK 0x0001 /* Suits me */
#define NOTIFY_STOP_MASK 0x8000 /* Don't call further */
diff --git a/include/linux/nvme-fc-driver.h b/include/linux/nvme-fc-driver.h
index 5358a5facdee..fa092b9be2fd 100644
--- a/include/linux/nvme-fc-driver.h
+++ b/include/linux/nvme-fc-driver.h
@@ -564,6 +564,15 @@ int nvme_fc_rcv_ls_req(struct nvme_fc_remote_port *remoteport,
void *lsreqbuf, u32 lsreqbuf_len);
+/*
+ * Routine called to get the appid field associated with request by the lldd
+ *
+ * If the return value is NULL : the user/libvirt has not set the appid to VM
+ * If the return value is non-zero: Returns the appid associated with VM
+ *
+ * @req: IO request from nvme fc to driver
+ */
+char *nvme_fc_io_getuuid(struct nvmefc_fcp_req *req);
/*
* *************** LLDD FC-NVME Target/Subsystem API ***************
@@ -1048,5 +1057,10 @@ int nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *tgtport,
void nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port *tgtport,
struct nvmefc_tgt_fcp_req *fcpreq);
+/*
+ * add a define, visible to the compiler, that indicates support
+ * for feature. Allows for conditional compilation in LLDDs.
+ */
+#define NVME_FC_FEAT_UUID 0x0001
#endif /* _NVME_FC_DRIVER_H */
diff --git a/include/linux/nvmem-consumer.h b/include/linux/nvmem-consumer.h
index c0c0cefc3b92..980f9c9ac0bc 100644
--- a/include/linux/nvmem-consumer.h
+++ b/include/linux/nvmem-consumer.h
@@ -25,6 +25,7 @@ struct nvmem_cell_info {
unsigned int bytes;
unsigned int bit_offset;
unsigned int nbits;
+ struct device_node *np;
};
/**
diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h
index aaf219bd0354..83fccd0c9bba 100644
--- a/include/linux/of_irq.h
+++ b/include/linux/of_irq.h
@@ -20,12 +20,12 @@ typedef int (*of_irq_init_cb_t)(struct device_node *, struct device_node *);
#if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC)
extern unsigned int of_irq_workarounds;
extern struct device_node *of_irq_dflt_pic;
-extern int of_irq_parse_oldworld(struct device_node *device, int index,
- struct of_phandle_args *out_irq);
+int of_irq_parse_oldworld(const struct device_node *device, int index,
+ struct of_phandle_args *out_irq);
#else /* CONFIG_PPC32 && CONFIG_PPC_PMAC */
#define of_irq_workarounds (0)
#define of_irq_dflt_pic (NULL)
-static inline int of_irq_parse_oldworld(struct device_node *device, int index,
+static inline int of_irq_parse_oldworld(const struct device_node *device, int index,
struct of_phandle_args *out_irq)
{
return -EINVAL;
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 60adf42460ab..81a57b498f22 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -379,10 +379,6 @@ struct pci_dev {
unsigned int mmio_always_on:1; /* Disallow turning off io/mem
decoding during BAR sizing */
unsigned int wakeup_prepared:1;
- unsigned int runtime_d3cold:1; /* Whether go through runtime
- D3cold, not set for devices
- powered on/off by the
- corresponding bridge */
unsigned int skip_bus_pm:1; /* Internal: Skip bus-level PM */
unsigned int ignore_hotplug:1; /* Ignore hotplug events */
unsigned int hotplug_user_indicators:1; /* SlotCtl indicators
@@ -516,7 +512,11 @@ struct pci_dev {
u16 acs_cap; /* ACS Capability offset */
phys_addr_t rom; /* Physical address if not from BAR */
size_t romlen; /* Length if not from BAR */
- char *driver_override; /* Driver name to force a match */
+ /*
+ * Driver name to force a match. Do not set directly, because core
+ * frees it. Use driver_set_override() to set or clear it.
+ */
+ const char *driver_override;
unsigned long priv_flags; /* Private flags for the PCI driver */
@@ -895,6 +895,13 @@ struct module;
* created once it is bound to the driver.
* @driver: Driver model structure.
* @dynids: List of dynamically added device IDs.
+ * @driver_managed_dma: Device driver doesn't use kernel DMA API for DMA.
+ * For most device drivers, no need to care about this flag
+ * as long as all DMAs are handled through the kernel DMA API.
+ * For some special ones, for example VFIO drivers, they know
+ * how to manage the DMA themselves and set this flag so that
+ * the IOMMU layer will allow them to setup and manage their
+ * own I/O address space.
*/
struct pci_driver {
struct list_head node;
@@ -913,6 +920,7 @@ struct pci_driver {
const struct attribute_group **dev_groups;
struct device_driver driver;
struct pci_dynids dynids;
+ bool driver_managed_dma;
};
static inline struct pci_driver *to_pci_driver(struct device_driver *drv)
diff --git a/include/linux/phy/phy-lvds.h b/include/linux/phy/phy-lvds.h
new file mode 100644
index 000000000000..09931d080a6d
--- /dev/null
+++ b/include/linux/phy/phy-lvds.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2020,2022 NXP
+ */
+
+#ifndef __PHY_LVDS_H_
+#define __PHY_LVDS_H_
+
+/**
+ * struct phy_configure_opts_lvds - LVDS configuration set
+ * @bits_per_lane_and_dclk_cycle: Number of bits per lane per differential
+ * clock cycle.
+ * @differential_clk_rate: Clock rate, in Hertz, of the LVDS
+ * differential clock.
+ * @lanes: Number of active, consecutive,
+ * data lanes, starting from lane 0,
+ * used for the transmissions.
+ * @is_slave: Boolean, true if the phy is a slave
+ * which works together with a master
+ * phy to support dual link transmission,
+ * otherwise a regular phy or a master phy.
+ *
+ * This structure is used to represent the configuration state of a LVDS phy.
+ */
+struct phy_configure_opts_lvds {
+ unsigned int bits_per_lane_and_dclk_cycle;
+ unsigned long differential_clk_rate;
+ unsigned int lanes;
+ bool is_slave;
+};
+
+#endif /* __PHY_LVDS_H_ */
diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h
index f3286f4cd306..b1413757fcc3 100644
--- a/include/linux/phy/phy.h
+++ b/include/linux/phy/phy.h
@@ -17,6 +17,7 @@
#include <linux/regulator/consumer.h>
#include <linux/phy/phy-dp.h>
+#include <linux/phy/phy-lvds.h>
#include <linux/phy/phy-mipi-dphy.h>
struct phy;
@@ -57,10 +58,13 @@ enum phy_media {
* the MIPI_DPHY phy mode.
* @dp: Configuration set applicable for phys supporting
* the DisplayPort protocol.
+ * @lvds: Configuration set applicable for phys supporting
+ * the LVDS phy mode.
*/
union phy_configure_opts {
struct phy_configure_opts_mipi_dphy mipi_dphy;
struct phy_configure_opts_dp dp;
+ struct phy_configure_opts_lvds lvds;
};
/**
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
index c00c618ef290..cb0fd633a610 100644
--- a/include/linux/pipe_fs_i.h
+++ b/include/linux/pipe_fs_i.h
@@ -71,7 +71,7 @@ struct pipe_inode_info {
unsigned int files;
unsigned int r_counter;
unsigned int w_counter;
- unsigned int poll_usage;
+ bool poll_usage;
struct page *tmp_page;
struct fasync_struct *fasync_readers;
struct fasync_struct *fasync_writers;
diff --git a/include/linux/platform_data/asoc-poodle.h b/include/linux/platform_data/asoc-poodle.h
new file mode 100644
index 000000000000..2052fad55c5c
--- /dev/null
+++ b/include/linux/platform_data/asoc-poodle.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_PLATFORM_DATA_POODLE_AUDIO
+#define __LINUX_PLATFORM_DATA_POODLE_AUDIO
+
+/* locomo is not a proper gpio driver, and uses its own api */
+struct poodle_audio_platform_data {
+ struct device *locomo_dev;
+
+ int gpio_amp_on;
+ int gpio_mute_l;
+ int gpio_mute_r;
+ int gpio_232vcc_on;
+ int gpio_jk_b;
+};
+
+#endif
diff --git a/arch/arm/mach-pxa/include/mach/audio.h b/include/linux/platform_data/asoc-pxa.h
index 7beebf7297b5..327454cd8246 100644
--- a/arch/arm/mach-pxa/include/mach/audio.h
+++ b/include/linux/platform_data/asoc-pxa.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_ARCH_AUDIO_H__
-#define __ASM_ARCH_AUDIO_H__
+#ifndef __SOC_PXA_AUDIO_H__
+#define __SOC_PXA_AUDIO_H__
#include <sound/core.h>
#include <sound/pcm.h>
diff --git a/include/linux/platform_data/video-pxafb.h b/include/linux/platform_data/video-pxafb.h
index b3d574778326..6333bac166a5 100644
--- a/include/linux/platform_data/video-pxafb.h
+++ b/include/linux/platform_data/video-pxafb.h
@@ -8,7 +8,6 @@
*/
#include <linux/fb.h>
-#include <mach/regs-lcd.h>
/*
* Supported LCD connections
@@ -153,6 +152,27 @@ struct pxafb_mach_info {
void pxa_set_fb_info(struct device *, struct pxafb_mach_info *);
unsigned long pxafb_get_hsync_time(struct device *dev);
+/* smartpanel related */
+#define SMART_CMD_A0 (0x1 << 8)
+#define SMART_CMD_READ_STATUS_REG (0x0 << 9)
+#define SMART_CMD_READ_FRAME_BUFFER ((0x0 << 9) | SMART_CMD_A0)
+#define SMART_CMD_WRITE_COMMAND (0x1 << 9)
+#define SMART_CMD_WRITE_DATA ((0x1 << 9) | SMART_CMD_A0)
+#define SMART_CMD_WRITE_FRAME ((0x2 << 9) | SMART_CMD_A0)
+#define SMART_CMD_WAIT_FOR_VSYNC (0x3 << 9)
+#define SMART_CMD_NOOP (0x4 << 9)
+#define SMART_CMD_INTERRUPT (0x5 << 9)
+
+#define SMART_CMD(x) (SMART_CMD_WRITE_COMMAND | ((x) & 0xff))
+#define SMART_DAT(x) (SMART_CMD_WRITE_DATA | ((x) & 0xff))
+
+/* SMART_DELAY() is introduced for software controlled delay primitive which
+ * can be inserted between command sequences, unused command 0x6 is used here
+ * and delay ranges from 0ms ~ 255ms
+ */
+#define SMART_CMD_DELAY (0x6 << 9)
+#define SMART_DELAY(ms) (SMART_CMD_DELAY | ((ms) & 0xff))
+
#ifdef CONFIG_FB_PXA_SMARTPANEL
extern int pxafb_smart_queue(struct fb_info *info, uint16_t *cmds, int);
extern int pxafb_smart_flush(struct fb_info *info);
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index 7c96f169d274..b0d5a253156e 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -31,7 +31,11 @@ struct platform_device {
struct resource *resource;
const struct platform_device_id *id_entry;
- char *driver_override; /* Driver name to force a match */
+ /*
+ * Driver name to force a match. Do not set directly, because core
+ * frees it. Use driver_set_override() to set or clear it.
+ */
+ const char *driver_override;
/* MFD cell pointer */
struct mfd_cell *mfd_cell;
@@ -210,6 +214,14 @@ struct platform_driver {
struct device_driver driver;
const struct platform_device_id *id_table;
bool prevent_deferred_probe;
+ /*
+ * For most device drivers, no need to care about this flag as long as
+ * all DMAs are handled through the kernel DMA API. For some special
+ * ones, for example VFIO drivers, they know how to manage the DMA
+ * themselves and set this flag so that the IOMMU layer will allow them
+ * to setup and manage their own I/O address space.
+ */
+ bool driver_managed_dma;
};
#define to_platform_driver(drv) (container_of((drv), struct platform_driver, \
@@ -328,8 +340,6 @@ extern int platform_pm_restore(struct device *dev);
#define platform_pm_restore NULL
#endif
-extern int platform_dma_configure(struct device *dev);
-
#ifdef CONFIG_PM_SLEEP
#define USE_PLATFORM_PM_SLEEP_OPS \
.suspend = platform_pm_suspend, \
diff --git a/include/linux/pm.h b/include/linux/pm.h
index ffe941958501..871c9c49ec9d 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -21,7 +21,6 @@
* Callbacks for platform drivers to implement.
*/
extern void (*pm_power_off)(void);
-extern void (*pm_power_off_prepare)(void);
struct device; /* we have a circular dep with device.h */
#ifdef CONFIG_VT_CONSOLE_SLEEP
@@ -36,6 +35,15 @@ static inline void pm_vt_switch_unregister(struct device *dev)
}
#endif /* CONFIG_VT_CONSOLE_SLEEP */
+#ifdef CONFIG_CXL_SUSPEND
+bool cxl_mem_active(void);
+#else
+static inline bool cxl_mem_active(void)
+{
+ return false;
+}
+#endif
+
/*
* Device power management
*/
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index 0d85a63a1f78..6708b4ec244d 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -117,18 +117,25 @@ unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev);
struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
unsigned long freq,
bool available);
-struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev,
- unsigned int level);
-struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev,
- unsigned int *level);
-
struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
unsigned long *freq);
struct dev_pm_opp *dev_pm_opp_find_freq_ceil_by_volt(struct device *dev,
unsigned long u_volt);
+struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev,
+ unsigned int level);
+struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev,
+ unsigned int *level);
+
struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
unsigned long *freq);
+
+struct dev_pm_opp *dev_pm_opp_find_bw_ceil(struct device *dev,
+ unsigned int *bw, int index);
+
+struct dev_pm_opp *dev_pm_opp_find_bw_floor(struct device *dev,
+ unsigned int *bw, int index);
+
void dev_pm_opp_put(struct dev_pm_opp *opp);
int dev_pm_opp_add(struct device *dev, unsigned long freq,
@@ -243,12 +250,6 @@ static inline unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev)
return 0;
}
-static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
- unsigned long freq, bool available)
-{
- return ERR_PTR(-EOPNOTSUPP);
-}
-
static inline struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev,
unsigned int level)
{
@@ -261,6 +262,12 @@ static inline struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev,
return ERR_PTR(-EOPNOTSUPP);
}
+static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
+ unsigned long freq, bool available)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
static inline struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
unsigned long *freq)
{
@@ -279,6 +286,18 @@ static inline struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
return ERR_PTR(-EOPNOTSUPP);
}
+static inline struct dev_pm_opp *dev_pm_opp_find_bw_ceil(struct device *dev,
+ unsigned int *bw, int index)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline struct dev_pm_opp *dev_pm_opp_find_bw_floor(struct device *dev,
+ unsigned int *bw, int index)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
static inline void dev_pm_opp_put(struct dev_pm_opp *opp) {}
static inline int dev_pm_opp_add(struct device *dev, unsigned long freq,
diff --git a/include/linux/property.h b/include/linux/property.h
index fc24d45632eb..a5b429d623f6 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -451,6 +451,11 @@ static inline void *device_connection_find_match(struct device *dev,
return fwnode_connection_find_match(dev_fwnode(dev), con_id, data, match);
}
+int fwnode_connection_find_matches(struct fwnode_handle *fwnode,
+ const char *con_id, void *data,
+ devcon_match_fn_t match,
+ void **matches, unsigned int matches_len);
+
/* -------------------------------------------------------------------------- */
/* Software fwnode support - when HW description is incomplete or missing */
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index 15b3d176b6b4..c952c5ba8fab 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -30,7 +30,6 @@ extern int ptrace_access_vm(struct task_struct *tsk, unsigned long addr,
#define PT_SEIZED 0x00010000 /* SEIZE used, enable new behavior */
#define PT_PTRACED 0x00000001
-#define PT_DTRACE 0x00000002 /* delayed trace (used on m68k, i386) */
#define PT_OPT_FLAG_SHIFT 3
/* PT_TRACE_* event enable flags */
@@ -47,12 +46,6 @@ extern int ptrace_access_vm(struct task_struct *tsk, unsigned long addr,
#define PT_EXITKILL (PTRACE_O_EXITKILL << PT_OPT_FLAG_SHIFT)
#define PT_SUSPEND_SECCOMP (PTRACE_O_SUSPEND_SECCOMP << PT_OPT_FLAG_SHIFT)
-/* single stepping state bits (used on ARM and PA-RISC) */
-#define PT_SINGLESTEP_BIT 31
-#define PT_SINGLESTEP (1<<PT_SINGLESTEP_BIT)
-#define PT_BLOCKSTEP_BIT 30
-#define PT_BLOCKSTEP (1<<PT_BLOCKSTEP_BIT)
-
extern long arch_ptrace(struct task_struct *child, long request,
unsigned long addr, unsigned long data);
extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len);
diff --git a/include/linux/reboot.h b/include/linux/reboot.h
index a2429648d831..e5d9ef886179 100644
--- a/include/linux/reboot.h
+++ b/include/linux/reboot.h
@@ -7,6 +7,7 @@
#include <uapi/linux/reboot.h>
struct device;
+struct sys_off_handler;
#define SYS_DOWN 0x0001 /* Notify of system down */
#define SYS_RESTART SYS_DOWN
@@ -62,6 +63,95 @@ extern void machine_shutdown(void);
struct pt_regs;
extern void machine_crash_shutdown(struct pt_regs *);
+void do_kernel_power_off(void);
+
+/*
+ * sys-off handler API.
+ */
+
+/*
+ * Standard sys-off priority levels. Users are expected to set priorities
+ * relative to the standard levels.
+ *
+ * SYS_OFF_PRIO_PLATFORM: Use this for platform-level handlers.
+ *
+ * SYS_OFF_PRIO_LOW: Use this for handler of last resort.
+ *
+ * SYS_OFF_PRIO_DEFAULT: Use this for normal handlers.
+ *
+ * SYS_OFF_PRIO_HIGH: Use this for higher priority handlers.
+ *
+ * SYS_OFF_PRIO_FIRMWARE: Use this if handler uses firmware call.
+ */
+#define SYS_OFF_PRIO_PLATFORM -256
+#define SYS_OFF_PRIO_LOW -128
+#define SYS_OFF_PRIO_DEFAULT 0
+#define SYS_OFF_PRIO_HIGH 192
+#define SYS_OFF_PRIO_FIRMWARE 224
+
+enum sys_off_mode {
+ /**
+ * @SYS_OFF_MODE_POWER_OFF_PREPARE:
+ *
+ * Handlers prepare system to be powered off. Handlers are
+ * allowed to sleep.
+ */
+ SYS_OFF_MODE_POWER_OFF_PREPARE,
+
+ /**
+ * @SYS_OFF_MODE_POWER_OFF:
+ *
+ * Handlers power-off system. Handlers are disallowed to sleep.
+ */
+ SYS_OFF_MODE_POWER_OFF,
+
+ /**
+ * @SYS_OFF_MODE_RESTART:
+ *
+ * Handlers restart system. Handlers are disallowed to sleep.
+ */
+ SYS_OFF_MODE_RESTART,
+};
+
+/**
+ * struct sys_off_data - sys-off callback argument
+ *
+ * @mode: Mode ID. Currently used only by the sys-off restart mode,
+ * see enum reboot_mode for the available modes.
+ * @cb_data: User's callback data.
+ * @cmd: Command string. Currently used only by the sys-off restart mode,
+ * NULL otherwise.
+ */
+struct sys_off_data {
+ int mode;
+ void *cb_data;
+ const char *cmd;
+};
+
+struct sys_off_handler *
+register_sys_off_handler(enum sys_off_mode mode,
+ int priority,
+ int (*callback)(struct sys_off_data *data),
+ void *cb_data);
+void unregister_sys_off_handler(struct sys_off_handler *handler);
+
+int devm_register_sys_off_handler(struct device *dev,
+ enum sys_off_mode mode,
+ int priority,
+ int (*callback)(struct sys_off_data *data),
+ void *cb_data);
+
+int devm_register_power_off_handler(struct device *dev,
+ int (*callback)(struct sys_off_data *data),
+ void *cb_data);
+
+int devm_register_restart_handler(struct device *dev,
+ int (*callback)(struct sys_off_data *data),
+ void *cb_data);
+
+int register_platform_power_off(void (*power_off)(void));
+void unregister_platform_power_off(void (*power_off)(void));
+
/*
* Architecture independent implemenations of sys_reboot commands.
*/
@@ -70,6 +160,7 @@ extern void kernel_restart_prepare(char *cmd);
extern void kernel_restart(char *cmd);
extern void kernel_halt(void);
extern void kernel_power_off(void);
+extern bool kernel_can_power_off(void);
void ctrl_alt_del(void);
diff --git a/include/linux/rpmsg.h b/include/linux/rpmsg.h
index 02fa9116cd60..523c98b96cb4 100644
--- a/include/linux/rpmsg.h
+++ b/include/linux/rpmsg.h
@@ -41,7 +41,9 @@ struct rpmsg_channel_info {
* rpmsg_device - device that belong to the rpmsg bus
* @dev: the device struct
* @id: device id (used to match between rpmsg drivers and devices)
- * @driver_override: driver name to force a match
+ * @driver_override: driver name to force a match; do not set directly,
+ * because core frees it; use driver_set_override() to
+ * set or clear it.
* @src: local address
* @dst: destination address
* @ept: the rpmsg endpoint of this channel
@@ -51,7 +53,7 @@ struct rpmsg_channel_info {
struct rpmsg_device {
struct device dev;
struct rpmsg_device_id id;
- char *driver_override;
+ const char *driver_override;
u32 src;
u32 dst;
struct rpmsg_endpoint *ept;
@@ -163,6 +165,8 @@ static inline __rpmsg64 cpu_to_rpmsg64(struct rpmsg_device *rpdev, u64 val)
#if IS_ENABLED(CONFIG_RPMSG)
+int rpmsg_register_device_override(struct rpmsg_device *rpdev,
+ const char *driver_override);
int rpmsg_register_device(struct rpmsg_device *rpdev);
int rpmsg_unregister_device(struct device *parent,
struct rpmsg_channel_info *chinfo);
@@ -190,6 +194,12 @@ ssize_t rpmsg_get_mtu(struct rpmsg_endpoint *ept);
#else
+static inline int rpmsg_register_device_override(struct rpmsg_device *rpdev,
+ const char *driver_override)
+{
+ return -ENXIO;
+}
+
static inline int rpmsg_register_device(struct rpmsg_device *rpdev)
{
return -ENXIO;
diff --git a/include/linux/rtsx_pci.h b/include/linux/rtsx_pci.h
index 3d780b44e678..534038d962e4 100644
--- a/include/linux/rtsx_pci.h
+++ b/include/linux/rtsx_pci.h
@@ -1067,6 +1067,9 @@
#define PCR_SETTING_REG1 0x724
#define PCR_SETTING_REG2 0x814
#define PCR_SETTING_REG3 0x747
+#define PCR_SETTING_REG4 0x818
+#define PCR_SETTING_REG5 0x81C
+
#define rtsx_pci_init_cmd(pcr) ((pcr)->ci = 0)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index b89c8571187b..c46f3a63b758 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -103,7 +103,7 @@ struct task_group;
/* Convenience macros for the sake of set_current_state: */
#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
-#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED)
+#define TASK_TRACED __TASK_TRACED
#define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD)
@@ -118,11 +118,9 @@ struct task_group;
#define task_is_running(task) (READ_ONCE((task)->__state) == TASK_RUNNING)
-#define task_is_traced(task) ((READ_ONCE(task->__state) & __TASK_TRACED) != 0)
-
-#define task_is_stopped(task) ((READ_ONCE(task->__state) & __TASK_STOPPED) != 0)
-
-#define task_is_stopped_or_traced(task) ((READ_ONCE(task->__state) & (__TASK_STOPPED | __TASK_TRACED)) != 0)
+#define task_is_traced(task) ((READ_ONCE(task->jobctl) & JOBCTL_TRACED) != 0)
+#define task_is_stopped(task) ((READ_ONCE(task->jobctl) & JOBCTL_STOPPED) != 0)
+#define task_is_stopped_or_traced(task) ((READ_ONCE(task->jobctl) & (JOBCTL_STOPPED | JOBCTL_TRACED)) != 0)
/*
* Special states are those that do not use the normal wait-loop pattern. See
diff --git a/include/linux/sched/jobctl.h b/include/linux/sched/jobctl.h
index fa067de9f1a9..68876d0a7ef9 100644
--- a/include/linux/sched/jobctl.h
+++ b/include/linux/sched/jobctl.h
@@ -19,6 +19,10 @@ struct task_struct;
#define JOBCTL_TRAPPING_BIT 21 /* switching to TRACED */
#define JOBCTL_LISTENING_BIT 22 /* ptracer is listening for events */
#define JOBCTL_TRAP_FREEZE_BIT 23 /* trap for cgroup freezer */
+#define JOBCTL_PTRACE_FROZEN_BIT 24 /* frozen for ptrace */
+
+#define JOBCTL_STOPPED_BIT 26 /* do_signal_stop() */
+#define JOBCTL_TRACED_BIT 27 /* ptrace_stop() */
#define JOBCTL_STOP_DEQUEUED (1UL << JOBCTL_STOP_DEQUEUED_BIT)
#define JOBCTL_STOP_PENDING (1UL << JOBCTL_STOP_PENDING_BIT)
@@ -28,6 +32,10 @@ struct task_struct;
#define JOBCTL_TRAPPING (1UL << JOBCTL_TRAPPING_BIT)
#define JOBCTL_LISTENING (1UL << JOBCTL_LISTENING_BIT)
#define JOBCTL_TRAP_FREEZE (1UL << JOBCTL_TRAP_FREEZE_BIT)
+#define JOBCTL_PTRACE_FROZEN (1UL << JOBCTL_PTRACE_FROZEN_BIT)
+
+#define JOBCTL_STOPPED (1UL << JOBCTL_STOPPED_BIT)
+#define JOBCTL_TRACED (1UL << JOBCTL_TRACED_BIT)
#define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
#define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
index 1ad1f4bfa025..8cd975a8bfeb 100644
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
@@ -137,7 +137,7 @@ static inline void mm_update_next_owner(struct mm_struct *mm)
#ifdef CONFIG_MMU
#ifndef arch_get_mmap_end
-#define arch_get_mmap_end(addr) (TASK_SIZE)
+#define arch_get_mmap_end(addr, len, flags) (TASK_SIZE)
#endif
#ifndef arch_get_mmap_base
@@ -153,6 +153,15 @@ extern unsigned long
arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff,
unsigned long flags);
+
+unsigned long
+generic_get_unmapped_area(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags);
+unsigned long
+generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags);
#else
static inline void arch_pick_mmap_layout(struct mm_struct *mm,
struct rlimit *rlim_stack) {}
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index 20ed5ba2bde4..cafbe03eed01 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -294,8 +294,10 @@ static inline int kernel_dequeue_signal(void)
static inline void kernel_signal_stop(void)
{
spin_lock_irq(&current->sighand->siglock);
- if (current->jobctl & JOBCTL_STOP_DEQUEUED)
+ if (current->jobctl & JOBCTL_STOP_DEQUEUED) {
+ current->jobctl |= JOBCTL_STOPPED;
set_special_state(TASK_STOPPED);
+ }
spin_unlock_irq(&current->sighand->siglock);
schedule();
@@ -444,13 +446,23 @@ extern void calculate_sigpending(void);
extern void signal_wake_up_state(struct task_struct *t, unsigned int state);
-static inline void signal_wake_up(struct task_struct *t, bool resume)
+static inline void signal_wake_up(struct task_struct *t, bool fatal)
{
- signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
+ unsigned int state = 0;
+ if (fatal && !(t->jobctl & JOBCTL_PTRACE_FROZEN)) {
+ t->jobctl &= ~(JOBCTL_STOPPED | JOBCTL_TRACED);
+ state = TASK_WAKEKILL | __TASK_TRACED;
+ }
+ signal_wake_up_state(t, state);
}
static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
{
- signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
+ unsigned int state = 0;
+ if (resume) {
+ t->jobctl &= ~JOBCTL_TRACED;
+ state = __TASK_TRACED;
+ }
+ signal_wake_up_state(t, state);
}
void task_join_group_stop(struct task_struct *task);
diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
index 719c9a6cac8d..505aaf9fe477 100644
--- a/include/linux/sched/task.h
+++ b/include/linux/sched/task.h
@@ -32,6 +32,10 @@ struct kernel_clone_args {
size_t set_tid_size;
int cgroup;
int io_thread;
+ int kthread;
+ int idle;
+ int (*fn)(void *);
+ void *fn_arg;
struct cgroup *cgrp;
struct css_set *cset;
};
@@ -67,8 +71,7 @@ extern void fork_init(void);
extern void release_task(struct task_struct * p);
-extern int copy_thread(unsigned long, unsigned long, unsigned long,
- struct task_struct *, unsigned long);
+extern int copy_thread(struct task_struct *, const struct kernel_clone_args *);
extern void flush_thread(void);
@@ -89,6 +92,7 @@ struct task_struct *create_io_thread(int (*fn)(void *), void *arg, int node);
struct task_struct *fork_idle(int);
struct mm_struct *copy_init_mm(void);
extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
+extern pid_t user_mode_thread(int (*fn)(void *), void *arg, unsigned long flags);
extern long kernel_wait4(pid_t, int __user *, int, struct rusage *);
int kernel_wait(pid_t pid, int *stat);
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index d4828e69087a..cbd5070bc87f 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -232,6 +232,7 @@ struct uart_port {
int hw_stopped; /* sw-assisted CTS flow state */
unsigned int mctrl; /* current modem ctrl settings */
unsigned int timeout; /* character-based timeout */
+ unsigned int frame_time; /* frame timing in ns */
unsigned int type; /* port type */
const struct uart_ops *ops;
unsigned int custom_divisor;
diff --git a/include/linux/serial_s3c.h b/include/linux/serial_s3c.h
index f6c3323fc4c5..dec15f5b3dec 100644
--- a/include/linux/serial_s3c.h
+++ b/include/linux/serial_s3c.h
@@ -256,6 +256,9 @@
#define APPLE_S5L_UCON_DEFAULT (S3C2410_UCON_TXIRQMODE | \
S3C2410_UCON_RXIRQMODE | \
S3C2410_UCON_RXFIFO_TOI)
+#define APPLE_S5L_UCON_MASK (APPLE_S5L_UCON_RXTO_ENA_MSK | \
+ APPLE_S5L_UCON_RXTHRESH_ENA_MSK | \
+ APPLE_S5L_UCON_TXTHRESH_ENA_MSK)
#define APPLE_S5L_UTRSTAT_RXTHRESH (1<<4)
#define APPLE_S5L_UTRSTAT_TXTHRESH (1<<5)
diff --git a/include/linux/set_memory.h b/include/linux/set_memory.h
index f36be5166c19..369769ce7399 100644
--- a/include/linux/set_memory.h
+++ b/include/linux/set_memory.h
@@ -42,14 +42,14 @@ static inline bool can_set_direct_map(void)
#endif
#endif /* CONFIG_ARCH_HAS_SET_DIRECT_MAP */
-#ifndef set_mce_nospec
-static inline int set_mce_nospec(unsigned long pfn, bool unmap)
+#ifdef CONFIG_X86_64
+int set_mce_nospec(unsigned long pfn);
+int clear_mce_nospec(unsigned long pfn);
+#else
+static inline int set_mce_nospec(unsigned long pfn)
{
return 0;
}
-#endif
-
-#ifndef clear_mce_nospec
static inline int clear_mce_nospec(unsigned long pfn)
{
return 0;
diff --git a/include/linux/signal.h b/include/linux/signal.h
index a6db6f2ae113..3b98e7a28538 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -282,7 +282,8 @@ extern int do_send_sig_info(int sig, struct kernel_siginfo *info,
struct task_struct *p, enum pid_type type);
extern int group_send_sig_info(int sig, struct kernel_siginfo *info,
struct task_struct *p, enum pid_type type);
-extern int __group_send_sig_info(int, struct kernel_siginfo *, struct task_struct *);
+extern int send_signal_locked(int sig, struct kernel_siginfo *info,
+ struct task_struct *p, enum pid_type type);
extern int sigprocmask(int, sigset_t *, sigset_t *);
extern void set_current_blocked(sigset_t *);
extern void __set_current_blocked(const sigset_t *);
diff --git a/include/linux/siphash.h b/include/linux/siphash.h
index 3af1428da559..9153e77382e1 100644
--- a/include/linux/siphash.h
+++ b/include/linux/siphash.h
@@ -1,6 +1,5 @@
-/* Copyright (C) 2016 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
- *
- * This file is provided under a dual BSD/GPLv2 license.
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/* Copyright (C) 2016-2022 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
*
* SipHash: a fast short-input PRF
* https://131002.net/siphash/
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index da96f0d3e753..d3d10556f0fa 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -2696,7 +2696,14 @@ void *skb_pull(struct sk_buff *skb, unsigned int len);
static inline void *__skb_pull(struct sk_buff *skb, unsigned int len)
{
skb->len -= len;
- BUG_ON(skb->len < skb->data_len);
+ if (unlikely(skb->len < skb->data_len)) {
+#if defined(CONFIG_DEBUG_NET)
+ skb->len += len;
+ pr_err("__skb_pull(len=%u)\n", len);
+ skb_dump(KERN_ERR, skb, false);
+#endif
+ BUG();
+ }
return skb->data += len;
}
diff --git a/arch/arm/mach-pxa/include/mach/hardware.h b/include/linux/soc/pxa/cpu.h
index ee7eab16135f..5782450ee45c 100644
--- a/arch/arm/mach-pxa/include/mach/hardware.h
+++ b/include/linux/soc/pxa/cpu.h
@@ -1,61 +1,16 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * arch/arm/mach-pxa/include/mach/hardware.h
- *
* Author: Nicolas Pitre
* Created: Jun 15, 2001
* Copyright: MontaVista Software Inc.
*/
-#ifndef __ASM_ARCH_HARDWARE_H
-#define __ASM_ARCH_HARDWARE_H
-
-#include <mach/addr-map.h>
-
-/*
- * Workarounds for at least 2 errata so far require this.
- * The mapping is set in mach-pxa/generic.c.
- */
-#define UNCACHED_PHYS_0 0xfe000000
-#define UNCACHED_PHYS_0_SIZE 0x00100000
-
-/*
- * Intel PXA2xx internal register mapping:
- *
- * 0x40000000 - 0x41ffffff <--> 0xf2000000 - 0xf3ffffff
- * 0x44000000 - 0x45ffffff <--> 0xf4000000 - 0xf5ffffff
- * 0x48000000 - 0x49ffffff <--> 0xf6000000 - 0xf7ffffff
- * 0x4c000000 - 0x4dffffff <--> 0xf8000000 - 0xf9ffffff
- * 0x50000000 - 0x51ffffff <--> 0xfa000000 - 0xfbffffff
- * 0x54000000 - 0x55ffffff <--> 0xfc000000 - 0xfdffffff
- * 0x58000000 - 0x59ffffff <--> 0xfe000000 - 0xffffffff
- *
- * Note that not all PXA2xx chips implement all those addresses, and the
- * kernel only maps the minimum needed range of this mapping.
- */
-#define io_v2p(x) (0x3c000000 + ((x) & 0x01ffffff) + (((x) & 0x0e000000) << 1))
-#define io_p2v(x) IOMEM(0xf2000000 + ((x) & 0x01ffffff) + (((x) & 0x1c000000) >> 1))
-
-#ifndef __ASSEMBLY__
-# define __REG(x) (*((volatile u32 __iomem *)io_p2v(x)))
-
-/* With indexed regs we don't want to feed the index through io_p2v()
- especially if it is a variable, otherwise horrible code will result. */
-# define __REG2(x,y) \
- (*(volatile u32 __iomem*)((u32)&__REG(x) + (y)))
-
-# define __PREG(x) (io_v2p((u32)&(x)))
-
-#else
-
-# define __REG(x) io_p2v(x)
-# define __PREG(x) io_v2p(x)
-
-#endif
-
-#ifndef __ASSEMBLY__
+#ifndef __SOC_PXA_CPU_H
+#define __SOC_PXA_CPU_H
+#ifdef CONFIG_ARM
#include <asm/cputype.h>
+#endif
/*
* CPU Stepping CPU_ID JTAG_ID
@@ -294,12 +249,4 @@
__cpu_is_pxa93x(read_cpuid_id()); \
})
-
-/*
- * return current memory and LCD clock frequency in units of 10kHz
- */
-extern unsigned int get_memclk_frequency_10khz(void);
-
#endif
-
-#endif /* _ASM_ARCH_HARDWARE_H */
diff --git a/arch/arm/plat-pxa/include/plat/mfp.h b/include/linux/soc/pxa/mfp.h
index 3accaa9ee781..39779cbed0c0 100644
--- a/arch/arm/plat-pxa/include/plat/mfp.h
+++ b/include/linux/soc/pxa/mfp.h
@@ -1,7 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * arch/arm/plat-pxa/include/plat/mfp.h
- *
* Common Multi-Function Pin Definitions
*
* Copyright (C) 2007 Marvell International Ltd.
@@ -453,8 +451,8 @@ struct mfp_addr_map {
#define MFP_ADDR_END { MFP_PIN_INVALID, 0 }
-void __init mfp_init_base(void __iomem *mfpr_base);
-void __init mfp_init_addr(struct mfp_addr_map *map);
+void mfp_init_base(void __iomem *mfpr_base);
+void mfp_init_addr(struct mfp_addr_map *map);
/*
* mfp_{read, write}() - for direct read/write access to the MFPR register
diff --git a/include/linux/soc/pxa/smemc.h b/include/linux/soc/pxa/smemc.h
new file mode 100644
index 000000000000..f1ffea236c15
--- /dev/null
+++ b/include/linux/soc/pxa/smemc.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __PXA_REGS_H
+#define __PXA_REGS_H
+
+#include <linux/types.h>
+
+void pxa_smemc_set_pcmcia_timing(int sock, u32 mcmem, u32 mcatt, u32 mcio);
+void pxa_smemc_set_pcmcia_socket(int nr);
+int pxa2xx_smemc_get_sdram_rows(void);
+unsigned int pxa3xx_smemc_get_memclkdiv(void);
+void __iomem *pxa_smemc_get_mdrefr(void);
+
+#endif
diff --git a/include/linux/soc/renesas/r9a06g032-sysctrl.h b/include/linux/soc/renesas/r9a06g032-sysctrl.h
new file mode 100644
index 000000000000..066dfb15cbdd
--- /dev/null
+++ b/include/linux/soc/renesas/r9a06g032-sysctrl.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_SOC_RENESAS_R9A06G032_SYSCTRL_H__
+#define __LINUX_SOC_RENESAS_R9A06G032_SYSCTRL_H__
+
+#ifdef CONFIG_CLK_R9A06G032
+int r9a06g032_sysctrl_set_dmamux(u32 mask, u32 val);
+#else
+static inline int r9a06g032_sysctrl_set_dmamux(u32 mask, u32 val) { return -ENODEV; }
+#endif
+
+#endif /* __LINUX_SOC_RENESAS_R9A06G032_SYSCTRL_H__ */
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index df70eb1a671e..d361ba26203b 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -138,6 +138,8 @@ extern int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer);
* for driver coldplugging, and in uevents used for hotplugging
* @driver_override: If the name of a driver is written to this attribute, then
* the device will bind to the named driver and only the named driver.
+ * Do not set directly, because core frees it; use driver_set_override() to
+ * set or clear it.
* @cs_gpiod: gpio descriptor of the chipselect line (optional, NULL when
* not using a GPIO line)
* @word_delay: delay to be inserted between consecutive
diff --git a/include/linux/swap.h b/include/linux/swap.h
index f3ae17b43f20..0c0fed1b348f 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -55,6 +55,10 @@ static inline int current_is_kswapd(void)
* actions on faults.
*/
+#define SWP_SWAPIN_ERROR_NUM 1
+#define SWP_SWAPIN_ERROR (MAX_SWAPFILES + SWP_HWPOISON_NUM + \
+ SWP_MIGRATION_NUM + SWP_DEVICE_NUM + \
+ SWP_PTE_MARKER_NUM)
/*
* PTE markers are used to persist information onto PTEs that are mapped with
* file-backed memories. As its name "PTE" hints, it should only be applied to
@@ -120,7 +124,8 @@ static inline int current_is_kswapd(void)
#define MAX_SWAPFILES \
((1 << MAX_SWAPFILES_SHIFT) - SWP_DEVICE_NUM - \
- SWP_MIGRATION_NUM - SWP_HWPOISON_NUM - SWP_PTE_MARKER_NUM)
+ SWP_MIGRATION_NUM - SWP_HWPOISON_NUM - \
+ SWP_PTE_MARKER_NUM - SWP_SWAPIN_ERROR_NUM)
/*
* Magic header for a swap area. The first part of the union is
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index fe220df499f1..f24775b41880 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -108,6 +108,16 @@ static inline void *swp_to_radix_entry(swp_entry_t entry)
return xa_mk_value(entry.val);
}
+static inline swp_entry_t make_swapin_error_entry(struct page *page)
+{
+ return swp_entry(SWP_SWAPIN_ERROR, page_to_pfn(page));
+}
+
+static inline int is_swapin_error_entry(swp_entry_t entry)
+{
+ return swp_type(entry) == SWP_SWAPIN_ERROR;
+}
+
#if IS_ENABLED(CONFIG_DEVICE_PRIVATE)
static inline swp_entry_t make_readable_device_private_entry(pgoff_t offset)
{
diff --git a/include/linux/thunderbolt.h b/include/linux/thunderbolt.h
index 124e13cb1469..9f442d73f3df 100644
--- a/include/linux/thunderbolt.h
+++ b/include/linux/thunderbolt.h
@@ -198,15 +198,15 @@ void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir);
* @local_property_block_len: Length of the @local_property_block in dwords
* @remote_properties: Properties exported by the remote domain
* @remote_property_block_gen: Generation of @remote_properties
- * @get_uuid_work: Work used to retrieve @remote_uuid
- * @uuid_retries: Number of times left @remote_uuid is requested before
- * giving up
- * @get_properties_work: Work used to get remote domain properties
- * @properties_retries: Number of times left to read properties
+ * @state: Next XDomain discovery state to run
+ * @state_work: Work used to run the next state
+ * @state_retries: Number of retries remain for the state
* @properties_changed_work: Work used to notify the remote domain that
* our properties have changed
* @properties_changed_retries: Number of times left to send properties
* changed notification
+ * @bonding_possible: True if lane bonding is possible on local side
+ * @target_link_width: Target link width from the remote host
* @link: Root switch link the remote domain is connected (ICM only)
* @depth: Depth in the chain the remote domain is connected (ICM only)
*
@@ -244,12 +244,13 @@ struct tb_xdomain {
u32 local_property_block_len;
struct tb_property_dir *remote_properties;
u32 remote_property_block_gen;
- struct delayed_work get_uuid_work;
- int uuid_retries;
- struct delayed_work get_properties_work;
- int properties_retries;
+ int state;
+ struct delayed_work state_work;
+ int state_retries;
struct delayed_work properties_changed_work;
int properties_changed_retries;
+ bool bonding_possible;
+ u8 target_link_width;
u8 link;
u8 depth;
};
@@ -465,6 +466,7 @@ static inline struct tb_xdomain *tb_service_parent(struct tb_service *svc)
* @msix_ida: Used to allocate MSI-X vectors for rings
* @going_away: The host controller device is about to disappear so when
* this flag is set, avoid touching the hardware anymore.
+ * @iommu_dma_protection: An IOMMU will isolate external-facing ports.
* @interrupt_work: Work scheduled to handle ring interrupt when no
* MSI-X is used.
* @hop_count: Number of rings (end point hops) supported by NHI.
@@ -479,6 +481,7 @@ struct tb_nhi {
struct tb_ring **rx_rings;
struct ida msix_ida;
bool going_away;
+ bool iommu_dma_protection;
struct work_struct interrupt_work;
u32 hop_count;
unsigned long quirks;
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 200b7b79acb5..60bee864d897 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -1969,21 +1969,10 @@ usb_pipe_endpoint(struct usb_device *dev, unsigned int pipe)
return eps[usb_pipeendpoint(pipe)];
}
-/*-------------------------------------------------------------------------*/
-
-static inline __u16
-usb_maxpacket(struct usb_device *udev, int pipe, int is_out)
+static inline u16 usb_maxpacket(struct usb_device *udev, int pipe)
{
- struct usb_host_endpoint *ep;
- unsigned epnum = usb_pipeendpoint(pipe);
+ struct usb_host_endpoint *ep = usb_pipe_endpoint(udev, pipe);
- if (is_out) {
- WARN_ON(usb_pipein(pipe));
- ep = udev->ep_out[epnum];
- } else {
- WARN_ON(usb_pipeout(pipe));
- ep = udev->ep_in[epnum];
- }
if (!ep)
return 0;
@@ -1991,8 +1980,6 @@ usb_maxpacket(struct usb_device *udev, int pipe, int is_out)
return usb_endpoint_maxp(&ep->desc);
}
-/* ----------------------------------------------------------------------- */
-
/* translate USB error codes to codes user space understands */
static inline int usb_translate_errors(int error_code)
{
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index 10fe57cf40be..3ad58b7a0824 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -386,6 +386,7 @@ struct usb_gadget_ops {
* @lpm_capable: If the gadget max_speed is FULL or HIGH, this flag
* indicates that it supports LPM as per the LPM ECN & errata.
* @irq: the interrupt number for device controller.
+ * @id_number: a unique ID number for ensuring that gadget names are distinct
*
* Gadgets have a mostly-portable "gadget driver" implementing device
* functions, handling all usb configurations and interfaces. Gadget
@@ -446,6 +447,7 @@ struct usb_gadget {
unsigned connected:1;
unsigned lpm_capable:1;
int irq;
+ int id_number;
};
#define work_to_gadget(w) (container_of((w), struct usb_gadget, work))
@@ -664,9 +666,9 @@ static inline int usb_gadget_check_config(struct usb_gadget *gadget)
* @driver: Driver model state for this driver.
* @udc_name: A name of UDC this driver should be bound to. If udc_name is NULL,
* this driver will be bound to any available UDC.
- * @pending: UDC core private data used for deferred probe of this driver.
- * @match_existing_only: If udc is not found, return an error and don't add this
- * gadget driver to list of pending driver
+ * @match_existing_only: If udc is not found, return an error and fail
+ * the driver registration
+ * @is_bound: Allow a driver to be bound to only one gadget
*
* Devices are disabled till a gadget driver successfully bind()s, which
* means the driver will handle setup() requests needed to enumerate (and
@@ -729,8 +731,8 @@ struct usb_gadget_driver {
struct device_driver driver;
char *udc_name;
- struct list_head pending;
unsigned match_existing_only:1;
+ bool is_bound:1;
};
@@ -740,22 +742,30 @@ struct usb_gadget_driver {
/* driver modules register and unregister, as usual.
* these calls must be made in a context that can sleep.
*
- * these will usually be implemented directly by the hardware-dependent
- * usb bus interface driver, which will only support a single driver.
+ * A gadget driver can be bound to only one gadget at a time.
*/
/**
- * usb_gadget_probe_driver - probe a gadget driver
+ * usb_gadget_register_driver_owner - register a gadget driver
* @driver: the driver being registered
+ * @owner: the driver module
+ * @mod_name: the driver module's build name
* Context: can sleep
*
* Call this in your gadget driver's module initialization function,
- * to tell the underlying usb controller driver about your driver.
+ * to tell the underlying UDC controller driver about your driver.
* The @bind() function will be called to bind it to a gadget before this
* registration call returns. It's expected that the @bind() function will
* be in init sections.
+ *
+ * Use the macro defined below instead of calling this directly.
*/
-int usb_gadget_probe_driver(struct usb_gadget_driver *driver);
+int usb_gadget_register_driver_owner(struct usb_gadget_driver *driver,
+ struct module *owner, const char *mod_name);
+
+/* use a define to avoid include chaining to get THIS_MODULE & friends */
+#define usb_gadget_register_driver(driver) \
+ usb_gadget_register_driver_owner(driver, THIS_MODULE, KBUILD_MODNAME)
/**
* usb_gadget_unregister_driver - unregister a gadget driver
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index 548a028f2dab..2c1fc9212cf2 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -124,6 +124,7 @@ struct usb_hcd {
#define HCD_FLAG_RH_RUNNING 5 /* root hub is running? */
#define HCD_FLAG_DEAD 6 /* controller has died? */
#define HCD_FLAG_INTF_AUTHORIZED 7 /* authorize interfaces? */
+#define HCD_FLAG_DEFER_RH_REGISTER 8 /* Defer roothub registration */
/* The flags can be tested using these macros; they are likely to
* be slightly faster than test_bit().
@@ -134,6 +135,7 @@ struct usb_hcd {
#define HCD_WAKEUP_PENDING(hcd) ((hcd)->flags & (1U << HCD_FLAG_WAKEUP_PENDING))
#define HCD_RH_RUNNING(hcd) ((hcd)->flags & (1U << HCD_FLAG_RH_RUNNING))
#define HCD_DEAD(hcd) ((hcd)->flags & (1U << HCD_FLAG_DEAD))
+#define HCD_DEFER_RH_REGISTER(hcd) ((hcd)->flags & (1U << HCD_FLAG_DEFER_RH_REGISTER))
/*
* Specifies if interfaces are authorized by default
diff --git a/include/linux/usb/typec_mux.h b/include/linux/usb/typec_mux.h
index a9d9957933dc..ee57781dcf28 100644
--- a/include/linux/usb/typec_mux.h
+++ b/include/linux/usb/typec_mux.h
@@ -8,11 +8,13 @@
struct device;
struct typec_mux;
+struct typec_mux_dev;
struct typec_switch;
+struct typec_switch_dev;
struct typec_altmode;
struct fwnode_handle;
-typedef int (*typec_switch_set_fn_t)(struct typec_switch *sw,
+typedef int (*typec_switch_set_fn_t)(struct typec_switch_dev *sw,
enum typec_orientation orientation);
struct typec_switch_desc {
@@ -32,13 +34,13 @@ static inline struct typec_switch *typec_switch_get(struct device *dev)
return fwnode_typec_switch_get(dev_fwnode(dev));
}
-struct typec_switch *
+struct typec_switch_dev *
typec_switch_register(struct device *parent,
const struct typec_switch_desc *desc);
-void typec_switch_unregister(struct typec_switch *sw);
+void typec_switch_unregister(struct typec_switch_dev *sw);
-void typec_switch_set_drvdata(struct typec_switch *sw, void *data);
-void *typec_switch_get_drvdata(struct typec_switch *sw);
+void typec_switch_set_drvdata(struct typec_switch_dev *sw, void *data);
+void *typec_switch_get_drvdata(struct typec_switch_dev *sw);
struct typec_mux_state {
struct typec_altmode *alt;
@@ -46,7 +48,7 @@ struct typec_mux_state {
void *data;
};
-typedef int (*typec_mux_set_fn_t)(struct typec_mux *mux,
+typedef int (*typec_mux_set_fn_t)(struct typec_mux_dev *mux,
struct typec_mux_state *state);
struct typec_mux_desc {
@@ -67,11 +69,11 @@ typec_mux_get(struct device *dev, const struct typec_altmode_desc *desc)
return fwnode_typec_mux_get(dev_fwnode(dev), desc);
}
-struct typec_mux *
+struct typec_mux_dev *
typec_mux_register(struct device *parent, const struct typec_mux_desc *desc);
-void typec_mux_unregister(struct typec_mux *mux);
+void typec_mux_unregister(struct typec_mux_dev *mux);
-void typec_mux_set_drvdata(struct typec_mux *mux, void *data);
-void *typec_mux_get_drvdata(struct typec_mux *mux);
+void typec_mux_set_drvdata(struct typec_mux_dev *mux, void *data);
+void *typec_mux_get_drvdata(struct typec_mux_dev *mux);
#endif /* __USB_TYPEC_MUX */
diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h
index 8943a209202e..4700a88a28f6 100644
--- a/include/linux/vdpa.h
+++ b/include/linux/vdpa.h
@@ -64,11 +64,15 @@ struct vdpa_mgmt_dev;
* struct vdpa_device - representation of a vDPA device
* @dev: underlying device
* @dma_dev: the actual device that is performing DMA
- * @driver_override: driver name to force a match
+ * @driver_override: driver name to force a match; do not set directly,
+ * because core frees it; use driver_set_override() to
+ * set or clear it.
* @config: the configuration ops for this device.
- * @cf_mutex: Protects get and set access to configuration layout.
+ * @cf_lock: Protects get and set access to configuration layout.
* @index: device index
* @features_valid: were features initialized? for legacy guests
+ * @ngroups: the number of virtqueue groups
+ * @nas: the number of address spaces
* @use_va: indicate whether virtual address must be used by this device
* @nvqs: maximum number of supported virtqueues
* @mdev: management device pointer; caller must setup when registering device as part
@@ -79,12 +83,14 @@ struct vdpa_device {
struct device *dma_dev;
const char *driver_override;
const struct vdpa_config_ops *config;
- struct mutex cf_mutex; /* Protects get/set config */
+ struct rw_semaphore cf_lock; /* Protects get/set config */
unsigned int index;
bool features_valid;
bool use_va;
u32 nvqs;
struct vdpa_mgmt_dev *mdev;
+ unsigned int ngroups;
+ unsigned int nas;
};
/**
@@ -172,6 +178,10 @@ struct vdpa_map_file {
* for the device
* @vdev: vdpa device
* Returns virtqueue algin requirement
+ * @get_vq_group: Get the group id for a specific virtqueue
+ * @vdev: vdpa device
+ * @idx: virtqueue index
+ * Returns u32: group id for this virtqueue
* @get_device_features: Get virtio features supported by the device
* @vdev: vdpa device
* Returns the virtio features support by the
@@ -232,10 +242,17 @@ struct vdpa_map_file {
* @vdev: vdpa device
* Returns the iova range supported by
* the device.
+ * @set_group_asid: Set address space identifier for a
+ * virtqueue group
+ * @vdev: vdpa device
+ * @group: virtqueue group
+ * @asid: address space id for this group
+ * Returns integer: success (0) or error (< 0)
* @set_map: Set device memory mapping (optional)
* Needed for device that using device
* specific DMA translation (on-chip IOMMU)
* @vdev: vdpa device
+ * @asid: address space identifier
* @iotlb: vhost memory mapping to be
* used by the vDPA
* Returns integer: success (0) or error (< 0)
@@ -244,6 +261,7 @@ struct vdpa_map_file {
* specific DMA translation (on-chip IOMMU)
* and preferring incremental map.
* @vdev: vdpa device
+ * @asid: address space identifier
* @iova: iova to be mapped
* @size: size of the area
* @pa: physical address for the map
@@ -255,6 +273,7 @@ struct vdpa_map_file {
* specific DMA translation (on-chip IOMMU)
* and preferring incremental unmap.
* @vdev: vdpa device
+ * @asid: address space identifier
* @iova: iova to be unmapped
* @size: size of the area
* Returns integer: success (0) or error (< 0)
@@ -276,6 +295,9 @@ struct vdpa_config_ops {
const struct vdpa_vq_state *state);
int (*get_vq_state)(struct vdpa_device *vdev, u16 idx,
struct vdpa_vq_state *state);
+ int (*get_vendor_vq_stats)(struct vdpa_device *vdev, u16 idx,
+ struct sk_buff *msg,
+ struct netlink_ext_ack *extack);
struct vdpa_notification_area
(*get_vq_notification)(struct vdpa_device *vdev, u16 idx);
/* vq irq is not expected to be changed once DRIVER_OK is set */
@@ -283,6 +305,7 @@ struct vdpa_config_ops {
/* Device ops */
u32 (*get_vq_align)(struct vdpa_device *vdev);
+ u32 (*get_vq_group)(struct vdpa_device *vdev, u16 idx);
u64 (*get_device_features)(struct vdpa_device *vdev);
int (*set_driver_features)(struct vdpa_device *vdev, u64 features);
u64 (*get_driver_features)(struct vdpa_device *vdev);
@@ -304,10 +327,14 @@ struct vdpa_config_ops {
struct vdpa_iova_range (*get_iova_range)(struct vdpa_device *vdev);
/* DMA ops */
- int (*set_map)(struct vdpa_device *vdev, struct vhost_iotlb *iotlb);
- int (*dma_map)(struct vdpa_device *vdev, u64 iova, u64 size,
- u64 pa, u32 perm, void *opaque);
- int (*dma_unmap)(struct vdpa_device *vdev, u64 iova, u64 size);
+ int (*set_map)(struct vdpa_device *vdev, unsigned int asid,
+ struct vhost_iotlb *iotlb);
+ int (*dma_map)(struct vdpa_device *vdev, unsigned int asid,
+ u64 iova, u64 size, u64 pa, u32 perm, void *opaque);
+ int (*dma_unmap)(struct vdpa_device *vdev, unsigned int asid,
+ u64 iova, u64 size);
+ int (*set_group_asid)(struct vdpa_device *vdev, unsigned int group,
+ unsigned int asid);
/* Free device resources */
void (*free)(struct vdpa_device *vdev);
@@ -315,6 +342,7 @@ struct vdpa_config_ops {
struct vdpa_device *__vdpa_alloc_device(struct device *parent,
const struct vdpa_config_ops *config,
+ unsigned int ngroups, unsigned int nas,
size_t size, const char *name,
bool use_va);
@@ -325,17 +353,20 @@ struct vdpa_device *__vdpa_alloc_device(struct device *parent,
* @member: the name of struct vdpa_device within the @dev_struct
* @parent: the parent device
* @config: the bus operations that is supported by this device
+ * @ngroups: the number of virtqueue groups supported by this device
+ * @nas: the number of address spaces
* @name: name of the vdpa device
* @use_va: indicate whether virtual address must be used by this device
*
* Return allocated data structure or ERR_PTR upon error
*/
-#define vdpa_alloc_device(dev_struct, member, parent, config, name, use_va) \
- container_of(__vdpa_alloc_device( \
- parent, config, \
- sizeof(dev_struct) + \
+#define vdpa_alloc_device(dev_struct, member, parent, config, ngroups, nas, \
+ name, use_va) \
+ container_of((__vdpa_alloc_device( \
+ parent, config, ngroups, nas, \
+ (sizeof(dev_struct) + \
BUILD_BUG_ON_ZERO(offsetof( \
- dev_struct, member)), name, use_va), \
+ dev_struct, member))), name, use_va)), \
dev_struct, member)
int vdpa_register_device(struct vdpa_device *vdev, u32 nvqs);
@@ -395,10 +426,10 @@ static inline int vdpa_reset(struct vdpa_device *vdev)
const struct vdpa_config_ops *ops = vdev->config;
int ret;
- mutex_lock(&vdev->cf_mutex);
+ down_write(&vdev->cf_lock);
vdev->features_valid = false;
ret = ops->reset(vdev);
- mutex_unlock(&vdev->cf_mutex);
+ up_write(&vdev->cf_lock);
return ret;
}
@@ -417,9 +448,9 @@ static inline int vdpa_set_features(struct vdpa_device *vdev, u64 features)
{
int ret;
- mutex_lock(&vdev->cf_mutex);
+ down_write(&vdev->cf_lock);
ret = vdpa_set_features_unlocked(vdev, features);
- mutex_unlock(&vdev->cf_mutex);
+ up_write(&vdev->cf_lock);
return ret;
}
@@ -463,7 +494,7 @@ struct vdpa_mgmtdev_ops {
struct vdpa_mgmt_dev {
struct device *device;
const struct vdpa_mgmtdev_ops *ops;
- const struct virtio_device_id *id_table;
+ struct virtio_device_id *id_table;
u64 config_attr_mask;
struct list_head list;
u64 supported_features;
diff --git a/include/linux/vfio.h b/include/linux/vfio.h
index 66dda06ec42d..aa888cc51757 100644
--- a/include/linux/vfio.h
+++ b/include/linux/vfio.h
@@ -15,6 +15,8 @@
#include <linux/poll.h>
#include <uapi/linux/vfio.h>
+struct kvm;
+
/*
* VFIO devices can be placed in a set, this allows all devices to share this
* structure and the VFIO core will provide a lock that is held around
@@ -34,6 +36,8 @@ struct vfio_device {
struct vfio_device_set *dev_set;
struct list_head dev_set_list;
unsigned int migration_flags;
+ /* Driver must reference the kvm during open_device or never touch it */
+ struct kvm *kvm;
/* Members below here are private, not for driver use */
refcount_t refcount;
@@ -125,8 +129,6 @@ void vfio_uninit_group_dev(struct vfio_device *device);
int vfio_register_group_dev(struct vfio_device *device);
int vfio_register_emulated_iommu_dev(struct vfio_device *device);
void vfio_unregister_group_dev(struct vfio_device *device);
-extern struct vfio_device *vfio_device_get_from_dev(struct device *dev);
-extern void vfio_device_put(struct vfio_device *device);
int vfio_assign_device_set(struct vfio_device *device, void *set_id);
@@ -138,56 +140,36 @@ int vfio_mig_get_next_state(struct vfio_device *device,
/*
* External user API
*/
-extern struct vfio_group *vfio_group_get_external_user(struct file *filep);
-extern void vfio_group_put_external_user(struct vfio_group *group);
-extern struct vfio_group *vfio_group_get_external_user_from_dev(struct device
- *dev);
-extern bool vfio_external_group_match_file(struct vfio_group *group,
- struct file *filep);
-extern int vfio_external_user_iommu_id(struct vfio_group *group);
-extern long vfio_external_check_extension(struct vfio_group *group,
- unsigned long arg);
+extern struct iommu_group *vfio_file_iommu_group(struct file *file);
+extern bool vfio_file_enforced_coherent(struct file *file);
+extern void vfio_file_set_kvm(struct file *file, struct kvm *kvm);
+extern bool vfio_file_has_dev(struct file *file, struct vfio_device *device);
#define VFIO_PIN_PAGES_MAX_ENTRIES (PAGE_SIZE/sizeof(unsigned long))
-extern int vfio_pin_pages(struct device *dev, unsigned long *user_pfn,
+extern int vfio_pin_pages(struct vfio_device *device, unsigned long *user_pfn,
int npage, int prot, unsigned long *phys_pfn);
-extern int vfio_unpin_pages(struct device *dev, unsigned long *user_pfn,
+extern int vfio_unpin_pages(struct vfio_device *device, unsigned long *user_pfn,
int npage);
-
-extern int vfio_group_pin_pages(struct vfio_group *group,
- unsigned long *user_iova_pfn, int npage,
- int prot, unsigned long *phys_pfn);
-extern int vfio_group_unpin_pages(struct vfio_group *group,
- unsigned long *user_iova_pfn, int npage);
-
-extern int vfio_dma_rw(struct vfio_group *group, dma_addr_t user_iova,
+extern int vfio_dma_rw(struct vfio_device *device, dma_addr_t user_iova,
void *data, size_t len, bool write);
-extern struct iommu_domain *vfio_group_iommu_domain(struct vfio_group *group);
-
/* each type has independent events */
enum vfio_notify_type {
VFIO_IOMMU_NOTIFY = 0,
- VFIO_GROUP_NOTIFY = 1,
};
/* events for VFIO_IOMMU_NOTIFY */
#define VFIO_IOMMU_NOTIFY_DMA_UNMAP BIT(0)
-/* events for VFIO_GROUP_NOTIFY */
-#define VFIO_GROUP_NOTIFY_SET_KVM BIT(0)
-
-extern int vfio_register_notifier(struct device *dev,
+extern int vfio_register_notifier(struct vfio_device *device,
enum vfio_notify_type type,
unsigned long *required_events,
struct notifier_block *nb);
-extern int vfio_unregister_notifier(struct device *dev,
+extern int vfio_unregister_notifier(struct vfio_device *device,
enum vfio_notify_type type,
struct notifier_block *nb);
-struct kvm;
-extern void vfio_group_set_kvm(struct vfio_group *group, struct kvm *kvm);
/*
* Sub-module helpers
diff --git a/include/linux/vfio_pci_core.h b/include/linux/vfio_pci_core.h
index 48f2dd3c568c..23c176d4b073 100644
--- a/include/linux/vfio_pci_core.h
+++ b/include/linux/vfio_pci_core.h
@@ -227,8 +227,9 @@ void vfio_pci_core_init_device(struct vfio_pci_core_device *vdev,
int vfio_pci_core_register_device(struct vfio_pci_core_device *vdev);
void vfio_pci_core_uninit_device(struct vfio_pci_core_device *vdev);
void vfio_pci_core_unregister_device(struct vfio_pci_core_device *vdev);
-int vfio_pci_core_sriov_configure(struct pci_dev *pdev, int nr_virtfn);
extern const struct pci_error_handlers vfio_pci_core_err_handlers;
+int vfio_pci_core_sriov_configure(struct vfio_pci_core_device *vdev,
+ int nr_virtfn);
long vfio_pci_core_ioctl(struct vfio_device *core_vdev, unsigned int cmd,
unsigned long arg);
int vfio_pci_core_ioctl_feature(struct vfio_device *device, u32 flags,
diff --git a/include/linux/vhost_iotlb.h b/include/linux/vhost_iotlb.h
index 2d0e2f52f938..e79a40838998 100644
--- a/include/linux/vhost_iotlb.h
+++ b/include/linux/vhost_iotlb.h
@@ -36,6 +36,8 @@ int vhost_iotlb_add_range(struct vhost_iotlb *iotlb, u64 start, u64 last,
u64 addr, unsigned int perm);
void vhost_iotlb_del_range(struct vhost_iotlb *iotlb, u64 start, u64 last);
+void vhost_iotlb_init(struct vhost_iotlb *iotlb, unsigned int limit,
+ unsigned int flags);
struct vhost_iotlb *vhost_iotlb_alloc(unsigned int limit, unsigned int flags);
void vhost_iotlb_free(struct vhost_iotlb *iotlb);
void vhost_iotlb_reset(struct vhost_iotlb *iotlb);
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index 5464f398912a..d8fdf170637c 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -131,6 +131,7 @@ void unregister_virtio_device(struct virtio_device *dev);
bool is_virtio_device(struct device *dev);
void virtio_break_device(struct virtio_device *dev);
+void __virtio_unbreak_device(struct virtio_device *dev);
void virtio_config_changed(struct virtio_device *dev);
#ifdef CONFIG_PM_SLEEP
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h
index b341dd62aa4d..9a36051ceb76 100644
--- a/include/linux/virtio_config.h
+++ b/include/linux/virtio_config.h
@@ -57,6 +57,11 @@ struct virtio_shm_region {
* include a NULL entry for vqs unused by driver
* Returns 0 on success or error status
* @del_vqs: free virtqueues found by find_vqs().
+ * @synchronize_cbs: synchronize with the virtqueue callbacks (optional)
+ * The function guarantees that all memory operations on the
+ * queue before it are visible to the vring_interrupt() that is
+ * called after it.
+ * vdev: the virtio_device
* @get_features: get the array of feature bits for this device.
* vdev: the virtio_device
* Returns the first 64 feature bits (all we currently need).
@@ -89,6 +94,7 @@ struct virtio_config_ops {
const char * const names[], const bool *ctx,
struct irq_affinity *desc);
void (*del_vqs)(struct virtio_device *);
+ void (*synchronize_cbs)(struct virtio_device *);
u64 (*get_features)(struct virtio_device *vdev);
int (*finalize_features)(struct virtio_device *vdev);
const char *(*bus_name)(struct virtio_device *vdev);
@@ -218,6 +224,25 @@ int virtio_find_vqs_ctx(struct virtio_device *vdev, unsigned nvqs,
}
/**
+ * virtio_synchronize_cbs - synchronize with virtqueue callbacks
+ * @vdev: the device
+ */
+static inline
+void virtio_synchronize_cbs(struct virtio_device *dev)
+{
+ if (dev->config->synchronize_cbs) {
+ dev->config->synchronize_cbs(dev);
+ } else {
+ /*
+ * A best effort fallback to synchronize with
+ * interrupts, preemption and softirq disabled
+ * regions. See comment above synchronize_rcu().
+ */
+ synchronize_rcu();
+ }
+}
+
+/**
* virtio_device_ready - enable vq use in probe function
* @vdev: the device
*
@@ -230,7 +255,27 @@ void virtio_device_ready(struct virtio_device *dev)
{
unsigned status = dev->config->get_status(dev);
- BUG_ON(status & VIRTIO_CONFIG_S_DRIVER_OK);
+ WARN_ON(status & VIRTIO_CONFIG_S_DRIVER_OK);
+
+ /*
+ * The virtio_synchronize_cbs() makes sure vring_interrupt()
+ * will see the driver specific setup if it sees vq->broken
+ * as false (even if the notifications come before DRIVER_OK).
+ */
+ virtio_synchronize_cbs(dev);
+ __virtio_unbreak_device(dev);
+ /*
+ * The transport should ensure the visibility of vq->broken
+ * before setting DRIVER_OK. See the comments for the transport
+ * specific set_status() method.
+ *
+ * A well behaved device will only notify a virtqueue after
+ * DRIVER_OK, this means the device should "see" the coherenct
+ * memory write that set vq->broken as false which is done by
+ * the driver when it sees DRIVER_OK, then the following
+ * driver's vring_interrupt() will see vq->broken as false so
+ * we won't lose any notification.
+ */
dev->config->set_status(dev, status | VIRTIO_CONFIG_S_DRIVER_OK);
}
diff --git a/include/linux/wm97xx.h b/include/linux/wm97xx.h
index 462854f4f286..332d2b0f29b9 100644
--- a/include/linux/wm97xx.h
+++ b/include/linux/wm97xx.h
@@ -254,9 +254,6 @@ struct wm97xx_mach_ops {
int (*acc_startup) (struct wm97xx *);
void (*acc_shutdown) (struct wm97xx *);
- /* interrupt mask control - required for accelerated operation */
- void (*irq_enable) (struct wm97xx *, int enable);
-
/* GPIO pin used for accelerated operation */
int irq_gpio;
@@ -281,7 +278,6 @@ struct wm97xx {
unsigned long ts_reader_min_interval; /* Minimum interval */
unsigned int pen_irq; /* Pen IRQ number in use */
struct workqueue_struct *ts_workq;
- struct work_struct pen_event_work;
u16 acc_slot; /* AC97 slot used for acc touch data */
u16 acc_rate; /* acc touch data rate */
unsigned pen_is_down:1; /* Pen is down */
diff --git a/include/net/amt.h b/include/net/amt.h
index 7a4db8b903ee..0e40c3d64fcf 100644
--- a/include/net/amt.h
+++ b/include/net/amt.h
@@ -15,7 +15,7 @@ enum amt_msg_type {
AMT_MSG_MEMBERSHIP_QUERY,
AMT_MSG_MEMBERSHIP_UPDATE,
AMT_MSG_MULTICAST_DATA,
- AMT_MSG_TEARDOWM,
+ AMT_MSG_TEARDOWN,
__AMT_MSG_MAX,
};
diff --git a/include/net/ax25.h b/include/net/ax25.h
index 0f9790c455bb..a427a05672e2 100644
--- a/include/net/ax25.h
+++ b/include/net/ax25.h
@@ -228,6 +228,7 @@ typedef struct ax25_dev {
ax25_dama_info dama;
#endif
refcount_t refcount;
+ bool device_up;
} ax25_dev;
typedef struct ax25_cb {
diff --git a/include/net/bonding.h b/include/net/bonding.h
index b14f4c0b4e9e..cb904d356e31 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -149,7 +149,9 @@ struct bond_params {
struct reciprocal_value reciprocal_packets_per_slave;
u16 ad_actor_sys_prio;
u16 ad_user_port_key;
+#if IS_ENABLED(CONFIG_IPV6)
struct in6_addr ns_targets[BOND_MAX_NS_TARGETS];
+#endif
/* 2 bytes of padding : see ether_addr_equal_64bits() */
u8 ad_actor_system[ETH_ALEN + 2];
@@ -503,12 +505,14 @@ static inline int bond_is_ip_target_ok(__be32 addr)
return !ipv4_is_lbcast(addr) && !ipv4_is_zeronet(addr);
}
+#if IS_ENABLED(CONFIG_IPV6)
static inline int bond_is_ip6_target_ok(struct in6_addr *addr)
{
return !ipv6_addr_any(addr) &&
!ipv6_addr_loopback(addr) &&
!ipv6_addr_is_multicast(addr);
}
+#endif
/* Get the oldest arp which we've received on this slave for bond's
* arp_targets.
@@ -746,6 +750,7 @@ static inline int bond_get_targets_ip(__be32 *targets, __be32 ip)
return -1;
}
+#if IS_ENABLED(CONFIG_IPV6)
static inline int bond_get_targets_ip6(struct in6_addr *targets, struct in6_addr *ip)
{
int i;
@@ -758,6 +763,7 @@ static inline int bond_get_targets_ip6(struct in6_addr *targets, struct in6_addr
return -1;
}
+#endif
/* exported from bond_main.c */
extern unsigned int bond_net_id;
diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h
index 6406cfee34c2..37866c8386e2 100644
--- a/include/net/netfilter/nf_conntrack_core.h
+++ b/include/net/netfilter/nf_conntrack_core.h
@@ -58,8 +58,13 @@ static inline int nf_conntrack_confirm(struct sk_buff *skb)
int ret = NF_ACCEPT;
if (ct) {
- if (!nf_ct_is_confirmed(ct))
+ if (!nf_ct_is_confirmed(ct)) {
ret = __nf_conntrack_confirm(skb);
+
+ if (ret == NF_ACCEPT)
+ ct = (struct nf_conn *)skb_nfct(skb);
+ }
+
if (ret == NF_ACCEPT && nf_ct_ecache_exist(ct))
nf_ct_deliver_cached_events(ct);
}
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 9bab396c1f3b..d6cf5116b5f9 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -187,37 +187,17 @@ static inline bool qdisc_run_begin(struct Qdisc *qdisc)
if (spin_trylock(&qdisc->seqlock))
return true;
- /* Paired with smp_mb__after_atomic() to make sure
- * STATE_MISSED checking is synchronized with clearing
- * in pfifo_fast_dequeue().
+ /* No need to insist if the MISSED flag was already set.
+ * Note that test_and_set_bit() also gives us memory ordering
+ * guarantees wrt potential earlier enqueue() and below
+ * spin_trylock(), both of which are necessary to prevent races
*/
- smp_mb__before_atomic();
-
- /* If the MISSED flag is set, it means other thread has
- * set the MISSED flag before second spin_trylock(), so
- * we can return false here to avoid multi cpus doing
- * the set_bit() and second spin_trylock() concurrently.
- */
- if (test_bit(__QDISC_STATE_MISSED, &qdisc->state))
+ if (test_and_set_bit(__QDISC_STATE_MISSED, &qdisc->state))
return false;
- /* Set the MISSED flag before the second spin_trylock(),
- * if the second spin_trylock() return false, it means
- * other cpu holding the lock will do dequeuing for us
- * or it will see the MISSED flag set after releasing
- * lock and reschedule the net_tx_action() to do the
- * dequeuing.
- */
- set_bit(__QDISC_STATE_MISSED, &qdisc->state);
-
- /* spin_trylock() only has load-acquire semantic, so use
- * smp_mb__after_atomic() to ensure STATE_MISSED is set
- * before doing the second spin_trylock().
- */
- smp_mb__after_atomic();
-
- /* Retry again in case other CPU may not see the new flag
- * after it releases the lock at the end of qdisc_run_end().
+ /* Try to take the lock again to make sure that we will either
+ * grab it or the CPU that still has it will see MISSED set
+ * when testing it in qdisc_run_end()
*/
return spin_trylock(&qdisc->seqlock);
}
@@ -229,6 +209,12 @@ static inline void qdisc_run_end(struct Qdisc *qdisc)
if (qdisc->flags & TCQ_F_NOLOCK) {
spin_unlock(&qdisc->seqlock);
+ /* spin_unlock() only has store-release semantic. The unlock
+ * and test_bit() ordering is a store-load ordering, so a full
+ * memory barrier is needed here.
+ */
+ smp_mb();
+
if (unlikely(test_bit(__QDISC_STATE_MISSED,
&qdisc->state)))
__netif_schedule(qdisc);
diff --git a/include/pcmcia/soc_common.h b/include/pcmcia/soc_common.h
new file mode 100644
index 000000000000..d4f18f4679df
--- /dev/null
+++ b/include/pcmcia/soc_common.h
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <pcmcia/ss.h>
+
+struct module;
+struct cpufreq_freqs;
+
+struct soc_pcmcia_regulator {
+ struct regulator *reg;
+ bool on;
+};
+
+struct pcmcia_state {
+ unsigned detect: 1,
+ ready: 1,
+ bvd1: 1,
+ bvd2: 1,
+ wrprot: 1,
+ vs_3v: 1,
+ vs_Xv: 1;
+};
+
+/*
+ * This structure encapsulates per-socket state which we might need to
+ * use when responding to a Card Services query of some kind.
+ */
+struct soc_pcmcia_socket {
+ struct pcmcia_socket socket;
+
+ /*
+ * Info from low level handler
+ */
+ unsigned int nr;
+ struct clk *clk;
+
+ /*
+ * Core PCMCIA state
+ */
+ const struct pcmcia_low_level *ops;
+
+ unsigned int status;
+ socket_state_t cs_state;
+
+ unsigned short spd_io[MAX_IO_WIN];
+ unsigned short spd_mem[MAX_WIN];
+ unsigned short spd_attr[MAX_WIN];
+
+ struct resource res_skt;
+ struct resource res_io;
+ struct resource res_io_io;
+ struct resource res_mem;
+ struct resource res_attr;
+
+ struct {
+ int gpio;
+ struct gpio_desc *desc;
+ unsigned int irq;
+ const char *name;
+ } stat[6];
+#define SOC_STAT_CD 0 /* Card detect */
+#define SOC_STAT_BVD1 1 /* BATDEAD / IOSTSCHG */
+#define SOC_STAT_BVD2 2 /* BATWARN / IOSPKR */
+#define SOC_STAT_RDY 3 /* Ready / Interrupt */
+#define SOC_STAT_VS1 4 /* Voltage sense 1 */
+#define SOC_STAT_VS2 5 /* Voltage sense 2 */
+
+ struct gpio_desc *gpio_reset;
+ struct gpio_desc *gpio_bus_enable;
+ struct soc_pcmcia_regulator vcc;
+ struct soc_pcmcia_regulator vpp;
+
+ unsigned int irq_state;
+
+#ifdef CONFIG_CPU_FREQ
+ struct notifier_block cpufreq_nb;
+#endif
+ struct timer_list poll_timer;
+ struct list_head node;
+ void *driver_data;
+};
+
+
+struct pcmcia_low_level {
+ struct module *owner;
+
+ /* first socket in system */
+ int first;
+ /* nr of sockets */
+ int nr;
+
+ int (*hw_init)(struct soc_pcmcia_socket *);
+ void (*hw_shutdown)(struct soc_pcmcia_socket *);
+
+ void (*socket_state)(struct soc_pcmcia_socket *, struct pcmcia_state *);
+ int (*configure_socket)(struct soc_pcmcia_socket *, const socket_state_t *);
+
+ /*
+ * Enable card status IRQs on (re-)initialisation. This can
+ * be called at initialisation, power management event, or
+ * pcmcia event.
+ */
+ void (*socket_init)(struct soc_pcmcia_socket *);
+
+ /*
+ * Disable card status IRQs and PCMCIA bus on suspend.
+ */
+ void (*socket_suspend)(struct soc_pcmcia_socket *);
+
+ /*
+ * Hardware specific timing routines.
+ * If provided, the get_timing routine overrides the SOC default.
+ */
+ unsigned int (*get_timing)(struct soc_pcmcia_socket *, unsigned int, unsigned int);
+ int (*set_timing)(struct soc_pcmcia_socket *);
+ int (*show_timing)(struct soc_pcmcia_socket *, char *);
+
+#ifdef CONFIG_CPU_FREQ
+ /*
+ * CPUFREQ support.
+ */
+ int (*frequency_change)(struct soc_pcmcia_socket *, unsigned long, struct cpufreq_freqs *);
+#endif
+};
+
+
+
diff --git a/include/sound/pxa2xx-lib.h b/include/sound/pxa2xx-lib.h
index 95100cff25d1..0a6f8dabf8c4 100644
--- a/include/sound/pxa2xx-lib.h
+++ b/include/sound/pxa2xx-lib.h
@@ -52,4 +52,8 @@ extern int pxa2xx_ac97_hw_resume(void);
extern int pxa2xx_ac97_hw_probe(struct platform_device *dev);
extern void pxa2xx_ac97_hw_remove(struct platform_device *dev);
+/* modem registers, used by touchscreen driver */
+u32 pxa2xx_ac97_read_modr(void);
+u32 pxa2xx_ac97_read_misr(void);
+
#endif
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index bea654a85e6b..513e889ef8aa 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -15,10 +15,6 @@ TRACE_DEFINE_ENUM(NODE);
TRACE_DEFINE_ENUM(DATA);
TRACE_DEFINE_ENUM(META);
TRACE_DEFINE_ENUM(META_FLUSH);
-TRACE_DEFINE_ENUM(INMEM);
-TRACE_DEFINE_ENUM(INMEM_DROP);
-TRACE_DEFINE_ENUM(INMEM_INVALIDATE);
-TRACE_DEFINE_ENUM(INMEM_REVOKE);
TRACE_DEFINE_ENUM(IPU);
TRACE_DEFINE_ENUM(OPU);
TRACE_DEFINE_ENUM(HOT);
@@ -59,10 +55,6 @@ TRACE_DEFINE_ENUM(CP_RESIZE);
{ DATA, "DATA" }, \
{ META, "META" }, \
{ META_FLUSH, "META_FLUSH" }, \
- { INMEM, "INMEM" }, \
- { INMEM_DROP, "INMEM_DROP" }, \
- { INMEM_INVALIDATE, "INMEM_INVALIDATE" }, \
- { INMEM_REVOKE, "INMEM_REVOKE" }, \
{ IPU, "IN-PLACE" }, \
{ OPU, "OUT-OF-PLACE" })
@@ -652,19 +644,22 @@ TRACE_EVENT(f2fs_background_gc,
TRACE_EVENT(f2fs_gc_begin,
- TP_PROTO(struct super_block *sb, bool sync, bool background,
+ TP_PROTO(struct super_block *sb, int gc_type, bool no_bg_gc,
+ unsigned int nr_free_secs,
long long dirty_nodes, long long dirty_dents,
long long dirty_imeta, unsigned int free_sec,
unsigned int free_seg, int reserved_seg,
unsigned int prefree_seg),
- TP_ARGS(sb, sync, background, dirty_nodes, dirty_dents, dirty_imeta,
+ TP_ARGS(sb, gc_type, no_bg_gc, nr_free_secs, dirty_nodes,
+ dirty_dents, dirty_imeta,
free_sec, free_seg, reserved_seg, prefree_seg),
TP_STRUCT__entry(
__field(dev_t, dev)
- __field(bool, sync)
- __field(bool, background)
+ __field(int, gc_type)
+ __field(bool, no_bg_gc)
+ __field(unsigned int, nr_free_secs)
__field(long long, dirty_nodes)
__field(long long, dirty_dents)
__field(long long, dirty_imeta)
@@ -676,8 +671,9 @@ TRACE_EVENT(f2fs_gc_begin,
TP_fast_assign(
__entry->dev = sb->s_dev;
- __entry->sync = sync;
- __entry->background = background;
+ __entry->gc_type = gc_type;
+ __entry->no_bg_gc = no_bg_gc;
+ __entry->nr_free_secs = nr_free_secs;
__entry->dirty_nodes = dirty_nodes;
__entry->dirty_dents = dirty_dents;
__entry->dirty_imeta = dirty_imeta;
@@ -687,12 +683,13 @@ TRACE_EVENT(f2fs_gc_begin,
__entry->prefree_seg = prefree_seg;
),
- TP_printk("dev = (%d,%d), sync = %d, background = %d, nodes = %lld, "
- "dents = %lld, imeta = %lld, free_sec:%u, free_seg:%u, "
+ TP_printk("dev = (%d,%d), gc_type = %s, no_background_GC = %d, nr_free_secs = %u, "
+ "nodes = %lld, dents = %lld, imeta = %lld, free_sec:%u, free_seg:%u, "
"rsv_seg:%d, prefree_seg:%u",
show_dev(__entry->dev),
- __entry->sync,
- __entry->background,
+ show_gc_type(__entry->gc_type),
+ (__entry->gc_type == BG_GC) ? __entry->no_bg_gc : -1,
+ __entry->nr_free_secs,
__entry->dirty_nodes,
__entry->dirty_dents,
__entry->dirty_imeta,
@@ -1285,20 +1282,6 @@ DEFINE_EVENT(f2fs__page, f2fs_vm_page_mkwrite,
TP_ARGS(page, type)
);
-DEFINE_EVENT(f2fs__page, f2fs_register_inmem_page,
-
- TP_PROTO(struct page *page, int type),
-
- TP_ARGS(page, type)
-);
-
-DEFINE_EVENT(f2fs__page, f2fs_commit_inmem_page,
-
- TP_PROTO(struct page *page, int type),
-
- TP_ARGS(page, type)
-);
-
TRACE_EVENT(f2fs_filemap_fault,
TP_PROTO(struct inode *inode, pgoff_t index, unsigned long ret),
@@ -2063,6 +2046,100 @@ TRACE_EVENT(f2fs_fiemap,
__entry->ret)
);
+DECLARE_EVENT_CLASS(f2fs__rw_start,
+
+ TP_PROTO(struct inode *inode, loff_t offset, int bytes,
+ pid_t pid, char *pathname, char *command),
+
+ TP_ARGS(inode, offset, bytes, pid, pathname, command),
+
+ TP_STRUCT__entry(
+ __string(pathbuf, pathname)
+ __field(loff_t, offset)
+ __field(int, bytes)
+ __field(loff_t, i_size)
+ __string(cmdline, command)
+ __field(pid_t, pid)
+ __field(ino_t, ino)
+ ),
+
+ TP_fast_assign(
+ /*
+ * Replace the spaces in filenames and cmdlines
+ * because this screws up the tooling that parses
+ * the traces.
+ */
+ __assign_str(pathbuf, pathname);
+ (void)strreplace(__get_str(pathbuf), ' ', '_');
+ __entry->offset = offset;
+ __entry->bytes = bytes;
+ __entry->i_size = i_size_read(inode);
+ __assign_str(cmdline, command);
+ (void)strreplace(__get_str(cmdline), ' ', '_');
+ __entry->pid = pid;
+ __entry->ino = inode->i_ino;
+ ),
+
+ TP_printk("entry_name %s, offset %llu, bytes %d, cmdline %s,"
+ " pid %d, i_size %llu, ino %lu",
+ __get_str(pathbuf), __entry->offset, __entry->bytes,
+ __get_str(cmdline), __entry->pid, __entry->i_size,
+ (unsigned long) __entry->ino)
+);
+
+DECLARE_EVENT_CLASS(f2fs__rw_end,
+
+ TP_PROTO(struct inode *inode, loff_t offset, int bytes),
+
+ TP_ARGS(inode, offset, bytes),
+
+ TP_STRUCT__entry(
+ __field(ino_t, ino)
+ __field(loff_t, offset)
+ __field(int, bytes)
+ ),
+
+ TP_fast_assign(
+ __entry->ino = inode->i_ino;
+ __entry->offset = offset;
+ __entry->bytes = bytes;
+ ),
+
+ TP_printk("ino %lu, offset %llu, bytes %d",
+ (unsigned long) __entry->ino,
+ __entry->offset, __entry->bytes)
+);
+
+DEFINE_EVENT(f2fs__rw_start, f2fs_dataread_start,
+
+ TP_PROTO(struct inode *inode, loff_t offset, int bytes,
+ pid_t pid, char *pathname, char *command),
+
+ TP_ARGS(inode, offset, bytes, pid, pathname, command)
+);
+
+DEFINE_EVENT(f2fs__rw_end, f2fs_dataread_end,
+
+ TP_PROTO(struct inode *inode, loff_t offset, int bytes),
+
+ TP_ARGS(inode, offset, bytes)
+);
+
+DEFINE_EVENT(f2fs__rw_start, f2fs_datawrite_start,
+
+ TP_PROTO(struct inode *inode, loff_t offset, int bytes,
+ pid_t pid, char *pathname, char *command),
+
+ TP_ARGS(inode, offset, bytes, pid, pathname, command)
+);
+
+DEFINE_EVENT(f2fs__rw_end, f2fs_datawrite_end,
+
+ TP_PROTO(struct inode *inode, loff_t offset, int bytes),
+
+ TP_ARGS(inode, offset, bytes)
+);
+
#endif /* _TRACE_F2FS_H */
/* This part must be outside protection */
diff --git a/include/trace/events/thermal_pressure.h b/include/trace/events/thermal_pressure.h
new file mode 100644
index 000000000000..b68680201360
--- /dev/null
+++ b/include/trace/events/thermal_pressure.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM thermal_pressure
+
+#if !defined(_TRACE_THERMAL_PRESSURE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_THERMAL_PRESSURE_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(thermal_pressure_update,
+ TP_PROTO(int cpu, unsigned long thermal_pressure),
+ TP_ARGS(cpu, thermal_pressure),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, thermal_pressure)
+ __field(int, cpu)
+ ),
+
+ TP_fast_assign(
+ __entry->thermal_pressure = thermal_pressure;
+ __entry->cpu = cpu;
+ ),
+
+ TP_printk("cpu=%d thermal_pressure=%lu", __entry->cpu, __entry->thermal_pressure)
+);
+#endif /* _TRACE_THERMAL_PRESSURE_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/uapi/asm-generic/fcntl.h b/include/uapi/asm-generic/fcntl.h
index ecd0f5bdfc1d..f13d37b60775 100644
--- a/include/uapi/asm-generic/fcntl.h
+++ b/include/uapi/asm-generic/fcntl.h
@@ -116,13 +116,13 @@
#define F_GETSIG 11 /* for sockets. */
#endif
-#ifndef CONFIG_64BIT
+#if __BITS_PER_LONG == 32 || defined(__KERNEL__)
#ifndef F_GETLK64
#define F_GETLK64 12 /* using 'struct flock64' */
#define F_SETLK64 13
#define F_SETLKW64 14
#endif
-#endif
+#endif /* __BITS_PER_LONG == 32 || defined(__KERNEL__) */
#ifndef F_SETOWN_EX
#define F_SETOWN_EX 15
@@ -192,25 +192,19 @@ struct f_owner_ex {
#define F_LINUX_SPECIFIC_BASE 1024
-#ifndef HAVE_ARCH_STRUCT_FLOCK
-#ifndef __ARCH_FLOCK_PAD
-#define __ARCH_FLOCK_PAD
-#endif
-
struct flock {
short l_type;
short l_whence;
__kernel_off_t l_start;
__kernel_off_t l_len;
__kernel_pid_t l_pid;
- __ARCH_FLOCK_PAD
-};
+#ifdef __ARCH_FLOCK_EXTRA_SYSID
+ __ARCH_FLOCK_EXTRA_SYSID
#endif
-
-#ifndef HAVE_ARCH_STRUCT_FLOCK64
-#ifndef __ARCH_FLOCK64_PAD
-#define __ARCH_FLOCK64_PAD
+#ifdef __ARCH_FLOCK_PAD
+ __ARCH_FLOCK_PAD
#endif
+};
struct flock64 {
short l_type;
@@ -218,8 +212,9 @@ struct flock64 {
__kernel_loff_t l_start;
__kernel_loff_t l_len;
__kernel_pid_t l_pid;
+#ifdef __ARCH_FLOCK64_PAD
__ARCH_FLOCK64_PAD
-};
#endif
+};
#endif /* _ASM_GENERIC_FCNTL_H */
diff --git a/include/uapi/asm-generic/termbits-common.h b/include/uapi/asm-generic/termbits-common.h
new file mode 100644
index 000000000000..4d084fe8def5
--- /dev/null
+++ b/include/uapi/asm-generic/termbits-common.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef __ASM_GENERIC_TERMBITS_COMMON_H
+#define __ASM_GENERIC_TERMBITS_COMMON_H
+
+typedef unsigned char cc_t;
+typedef unsigned int speed_t;
+
+/* c_iflag bits */
+#define IGNBRK 0x001 /* Ignore break condition */
+#define BRKINT 0x002 /* Signal interrupt on break */
+#define IGNPAR 0x004 /* Ignore characters with parity errors */
+#define PARMRK 0x008 /* Mark parity and framing errors */
+#define INPCK 0x010 /* Enable input parity check */
+#define ISTRIP 0x020 /* Strip 8th bit off characters */
+#define INLCR 0x040 /* Map NL to CR on input */
+#define IGNCR 0x080 /* Ignore CR */
+#define ICRNL 0x100 /* Map CR to NL on input */
+#define IXANY 0x800 /* Any character will restart after stop */
+
+/* c_oflag bits */
+#define OPOST 0x01 /* Perform output processing */
+#define OCRNL 0x08
+#define ONOCR 0x10
+#define ONLRET 0x20
+#define OFILL 0x40
+#define OFDEL 0x80
+
+/* c_cflag bit meaning */
+/* Common CBAUD rates */
+#define B0 0x00000000 /* hang up */
+#define B50 0x00000001
+#define B75 0x00000002
+#define B110 0x00000003
+#define B134 0x00000004
+#define B150 0x00000005
+#define B200 0x00000006
+#define B300 0x00000007
+#define B600 0x00000008
+#define B1200 0x00000009
+#define B1800 0x0000000a
+#define B2400 0x0000000b
+#define B4800 0x0000000c
+#define B9600 0x0000000d
+#define B19200 0x0000000e
+#define B38400 0x0000000f
+#define EXTA B19200
+#define EXTB B38400
+
+#define CMSPAR 0x40000000 /* mark or space (stick) parity */
+#define CRTSCTS 0x80000000 /* flow control */
+
+#define IBSHIFT 16 /* Shift from CBAUD to CIBAUD */
+
+/* tcflow() ACTION argument and TCXONC use these */
+#define TCOOFF 0 /* Suspend output */
+#define TCOON 1 /* Restart suspended output */
+#define TCIOFF 2 /* Send a STOP character */
+#define TCION 3 /* Send a START character */
+
+/* tcflush() QUEUE_SELECTOR argument and TCFLSH use these */
+#define TCIFLUSH 0 /* Discard data received but not yet read */
+#define TCOFLUSH 1 /* Discard data written but not yet sent */
+#define TCIOFLUSH 2 /* Discard all pending data */
+
+#endif /* __ASM_GENERIC_TERMBITS_COMMON_H */
diff --git a/include/uapi/asm-generic/termbits.h b/include/uapi/asm-generic/termbits.h
index 2fbaf9ae89dd..890ef29053e2 100644
--- a/include/uapi/asm-generic/termbits.h
+++ b/include/uapi/asm-generic/termbits.h
@@ -2,10 +2,8 @@
#ifndef __ASM_GENERIC_TERMBITS_H
#define __ASM_GENERIC_TERMBITS_H
-#include <linux/posix_types.h>
+#include <asm-generic/termbits-common.h>
-typedef unsigned char cc_t;
-typedef unsigned int speed_t;
typedef unsigned int tcflag_t;
#define NCCS 19
@@ -41,156 +39,107 @@ struct ktermios {
};
/* c_cc characters */
-#define VINTR 0
-#define VQUIT 1
-#define VERASE 2
-#define VKILL 3
-#define VEOF 4
-#define VTIME 5
-#define VMIN 6
-#define VSWTC 7
-#define VSTART 8
-#define VSTOP 9
-#define VSUSP 10
-#define VEOL 11
-#define VREPRINT 12
-#define VDISCARD 13
-#define VWERASE 14
-#define VLNEXT 15
-#define VEOL2 16
+#define VINTR 0
+#define VQUIT 1
+#define VERASE 2
+#define VKILL 3
+#define VEOF 4
+#define VTIME 5
+#define VMIN 6
+#define VSWTC 7
+#define VSTART 8
+#define VSTOP 9
+#define VSUSP 10
+#define VEOL 11
+#define VREPRINT 12
+#define VDISCARD 13
+#define VWERASE 14
+#define VLNEXT 15
+#define VEOL2 16
/* c_iflag bits */
-#define IGNBRK 0000001
-#define BRKINT 0000002
-#define IGNPAR 0000004
-#define PARMRK 0000010
-#define INPCK 0000020
-#define ISTRIP 0000040
-#define INLCR 0000100
-#define IGNCR 0000200
-#define ICRNL 0000400
-#define IUCLC 0001000
-#define IXON 0002000
-#define IXANY 0004000
-#define IXOFF 0010000
-#define IMAXBEL 0020000
-#define IUTF8 0040000
+#define IUCLC 0x0200
+#define IXON 0x0400
+#define IXOFF 0x1000
+#define IMAXBEL 0x2000
+#define IUTF8 0x4000
/* c_oflag bits */
-#define OPOST 0000001
-#define OLCUC 0000002
-#define ONLCR 0000004
-#define OCRNL 0000010
-#define ONOCR 0000020
-#define ONLRET 0000040
-#define OFILL 0000100
-#define OFDEL 0000200
-#define NLDLY 0000400
-#define NL0 0000000
-#define NL1 0000400
-#define CRDLY 0003000
-#define CR0 0000000
-#define CR1 0001000
-#define CR2 0002000
-#define CR3 0003000
-#define TABDLY 0014000
-#define TAB0 0000000
-#define TAB1 0004000
-#define TAB2 0010000
-#define TAB3 0014000
-#define XTABS 0014000
-#define BSDLY 0020000
-#define BS0 0000000
-#define BS1 0020000
-#define VTDLY 0040000
-#define VT0 0000000
-#define VT1 0040000
-#define FFDLY 0100000
-#define FF0 0000000
-#define FF1 0100000
+#define OLCUC 0x00002
+#define ONLCR 0x00004
+#define NLDLY 0x00100
+#define NL0 0x00000
+#define NL1 0x00100
+#define CRDLY 0x00600
+#define CR0 0x00000
+#define CR1 0x00200
+#define CR2 0x00400
+#define CR3 0x00600
+#define TABDLY 0x01800
+#define TAB0 0x00000
+#define TAB1 0x00800
+#define TAB2 0x01000
+#define TAB3 0x01800
+#define XTABS 0x01800
+#define BSDLY 0x02000
+#define BS0 0x00000
+#define BS1 0x02000
+#define VTDLY 0x04000
+#define VT0 0x00000
+#define VT1 0x04000
+#define FFDLY 0x08000
+#define FF0 0x00000
+#define FF1 0x08000
/* c_cflag bit meaning */
-#define CBAUD 0010017
-#define B0 0000000 /* hang up */
-#define B50 0000001
-#define B75 0000002
-#define B110 0000003
-#define B134 0000004
-#define B150 0000005
-#define B200 0000006
-#define B300 0000007
-#define B600 0000010
-#define B1200 0000011
-#define B1800 0000012
-#define B2400 0000013
-#define B4800 0000014
-#define B9600 0000015
-#define B19200 0000016
-#define B38400 0000017
-#define EXTA B19200
-#define EXTB B38400
-#define CSIZE 0000060
-#define CS5 0000000
-#define CS6 0000020
-#define CS7 0000040
-#define CS8 0000060
-#define CSTOPB 0000100
-#define CREAD 0000200
-#define PARENB 0000400
-#define PARODD 0001000
-#define HUPCL 0002000
-#define CLOCAL 0004000
-#define CBAUDEX 0010000
-#define BOTHER 0010000
-#define B57600 0010001
-#define B115200 0010002
-#define B230400 0010003
-#define B460800 0010004
-#define B500000 0010005
-#define B576000 0010006
-#define B921600 0010007
-#define B1000000 0010010
-#define B1152000 0010011
-#define B1500000 0010012
-#define B2000000 0010013
-#define B2500000 0010014
-#define B3000000 0010015
-#define B3500000 0010016
-#define B4000000 0010017
-#define CIBAUD 002003600000 /* input baud rate */
-#define CMSPAR 010000000000 /* mark or space (stick) parity */
-#define CRTSCTS 020000000000 /* flow control */
-
-#define IBSHIFT 16 /* Shift from CBAUD to CIBAUD */
+#define CBAUD 0x0000100f
+#define CSIZE 0x00000030
+#define CS5 0x00000000
+#define CS6 0x00000010
+#define CS7 0x00000020
+#define CS8 0x00000030
+#define CSTOPB 0x00000040
+#define CREAD 0x00000080
+#define PARENB 0x00000100
+#define PARODD 0x00000200
+#define HUPCL 0x00000400
+#define CLOCAL 0x00000800
+#define CBAUDEX 0x00001000
+#define BOTHER 0x00001000
+#define B57600 0x00001001
+#define B115200 0x00001002
+#define B230400 0x00001003
+#define B460800 0x00001004
+#define B500000 0x00001005
+#define B576000 0x00001006
+#define B921600 0x00001007
+#define B1000000 0x00001008
+#define B1152000 0x00001009
+#define B1500000 0x0000100a
+#define B2000000 0x0000100b
+#define B2500000 0x0000100c
+#define B3000000 0x0000100d
+#define B3500000 0x0000100e
+#define B4000000 0x0000100f
+#define CIBAUD 0x100f0000 /* input baud rate */
/* c_lflag bits */
-#define ISIG 0000001
-#define ICANON 0000002
-#define XCASE 0000004
-#define ECHO 0000010
-#define ECHOE 0000020
-#define ECHOK 0000040
-#define ECHONL 0000100
-#define NOFLSH 0000200
-#define TOSTOP 0000400
-#define ECHOCTL 0001000
-#define ECHOPRT 0002000
-#define ECHOKE 0004000
-#define FLUSHO 0010000
-#define PENDIN 0040000
-#define IEXTEN 0100000
-#define EXTPROC 0200000
-
-/* tcflow() and TCXONC use these */
-#define TCOOFF 0
-#define TCOON 1
-#define TCIOFF 2
-#define TCION 3
-
-/* tcflush() and TCFLSH use these */
-#define TCIFLUSH 0
-#define TCOFLUSH 1
-#define TCIOFLUSH 2
+#define ISIG 0x00001
+#define ICANON 0x00002
+#define XCASE 0x00004
+#define ECHO 0x00008
+#define ECHOE 0x00010
+#define ECHOK 0x00020
+#define ECHONL 0x00040
+#define NOFLSH 0x00080
+#define TOSTOP 0x00100
+#define ECHOCTL 0x00200
+#define ECHOPRT 0x00400
+#define ECHOKE 0x00800
+#define FLUSHO 0x01000
+#define PENDIN 0x04000
+#define IEXTEN 0x08000
+#define EXTPROC 0x10000
/* tcsetattr uses these */
#define TCSANOW 0
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index 1c48b0ae3ba3..45fa180cc56a 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -383,7 +383,7 @@ __SYSCALL(__NR_syslog, sys_syslog)
/* kernel/ptrace.c */
#define __NR_ptrace 117
-__SYSCALL(__NR_ptrace, sys_ptrace)
+__SC_COMP(__NR_ptrace, sys_ptrace, compat_sys_ptrace)
/* kernel/sched/core.c */
#define __NR_sched_setparam 118
@@ -779,7 +779,7 @@ __SYSCALL(__NR_rseq, sys_rseq)
#define __NR_kexec_file_load 294
__SYSCALL(__NR_kexec_file_load, sys_kexec_file_load)
/* 295 through 402 are unassigned to sync up with generic numbers, don't use */
-#if __BITS_PER_LONG == 32
+#if defined(__SYSCALL_COMPAT) || __BITS_PER_LONG == 32
#define __NR_clock_gettime64 403
__SYSCALL(__NR_clock_gettime64, sys_clock_gettime)
#define __NR_clock_settime64 404
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index 9a1d210d135d..18d3246d636e 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -140,6 +140,10 @@ extern "C" {
* not require GTT memory accounting
*/
#define AMDGPU_GEM_CREATE_PREEMPTIBLE (1 << 11)
+/* Flag that BO can be discarded under memory pressure without keeping the
+ * content.
+ */
+#define AMDGPU_GEM_CREATE_DISCARDABLE (1 << 12)
struct drm_amdgpu_gem_create_in {
/** the requested memory size */
@@ -529,6 +533,8 @@ struct drm_amdgpu_gem_op {
#define AMDGPU_VM_MTYPE_UC (4 << 5)
/* Use Read Write MTYPE instead of default MTYPE */
#define AMDGPU_VM_MTYPE_RW (5 << 5)
+/* don't allocate MALL */
+#define AMDGPU_VM_PAGE_NOALLOC (1 << 9)
struct drm_amdgpu_gem_va {
/** GEM object handle */
@@ -988,6 +994,8 @@ struct drm_amdgpu_info_vbios {
#define AMDGPU_VRAM_TYPE_DDR4 8
#define AMDGPU_VRAM_TYPE_GDDR6 9
#define AMDGPU_VRAM_TYPE_DDR5 10
+#define AMDGPU_VRAM_TYPE_LPDDR4 11
+#define AMDGPU_VRAM_TYPE_LPDDR5 12
struct drm_amdgpu_info_device {
/** PCI Device ID */
diff --git a/include/uapi/linux/acct.h b/include/uapi/linux/acct.h
index 985b89068591..0e591152aa8a 100644
--- a/include/uapi/linux/acct.h
+++ b/include/uapi/linux/acct.h
@@ -103,12 +103,13 @@ struct acct_v3
/*
* accounting flags
*/
- /* bit set when the process ... */
+ /* bit set when the process/task ... */
#define AFORK 0x01 /* ... executed fork, but did not exec */
#define ASU 0x02 /* ... used super-user privileges */
#define ACOMPAT 0x04 /* ... used compatibility mode (VAX only not used) */
#define ACORE 0x08 /* ... dumped core */
#define AXSIG 0x10 /* ... was killed by a signal */
+#define AGROUP 0x20 /* ... was the last task of the process (task group) */
#if defined(__BYTE_ORDER) ? __BYTE_ORDER == __BIG_ENDIAN : defined(__BIG_ENDIAN)
#define ACCT_BYTEORDER 0x80 /* accounting file is big endian */
diff --git a/include/uapi/linux/android/binder.h b/include/uapi/linux/android/binder.h
index 11157fae8a8e..986333cf5bbe 100644
--- a/include/uapi/linux/android/binder.h
+++ b/include/uapi/linux/android/binder.h
@@ -236,6 +236,21 @@ struct binder_frozen_status_info {
__u32 async_recv;
};
+/* struct binder_extened_error - extended error information
+ * @id: identifier for the failed operation
+ * @command: command as defined by binder_driver_return_protocol
+ * @param: parameter holding a negative errno value
+ *
+ * Used with BINDER_GET_EXTENDED_ERROR. This extends the error information
+ * returned by the driver upon a failed operation. Userspace can pull this
+ * data to properly handle specific error scenarios.
+ */
+struct binder_extended_error {
+ __u32 id;
+ __u32 command;
+ __s32 param;
+};
+
#define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read)
#define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, __s64)
#define BINDER_SET_MAX_THREADS _IOW('b', 5, __u32)
@@ -249,6 +264,7 @@ struct binder_frozen_status_info {
#define BINDER_FREEZE _IOW('b', 14, struct binder_freeze_info)
#define BINDER_GET_FROZEN_INFO _IOWR('b', 15, struct binder_frozen_status_info)
#define BINDER_ENABLE_ONEWAY_SPAM_DETECTION _IOW('b', 16, __u32)
+#define BINDER_GET_EXTENDED_ERROR _IOWR('b', 17, struct binder_extended_error)
/*
* NOTE: Two special error codes you should check for when calling
@@ -289,7 +305,7 @@ struct binder_transaction_data {
/* General information about the transaction. */
__u32 flags;
__kernel_pid_t sender_pid;
- __kernel_uid_t sender_euid;
+ __kernel_uid32_t sender_euid;
binder_size_t data_size; /* number of bytes of data */
binder_size_t offsets_size; /* number of bytes of offsets */
diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h
index 8eda133ca4c1..7c1dc818b1d5 100644
--- a/include/uapi/linux/audit.h
+++ b/include/uapi/linux/audit.h
@@ -439,6 +439,8 @@ enum {
#define AUDIT_ARCH_UNICORE (EM_UNICORE|__AUDIT_ARCH_LE)
#define AUDIT_ARCH_X86_64 (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
#define AUDIT_ARCH_XTENSA (EM_XTENSA)
+#define AUDIT_ARCH_LOONGARCH32 (EM_LOONGARCH|__AUDIT_ARCH_LE)
+#define AUDIT_ARCH_LOONGARCH64 (EM_LOONGARCH|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
#define AUDIT_PERM_EXEC 1
#define AUDIT_PERM_WRITE 2
diff --git a/include/uapi/linux/cxl_mem.h b/include/uapi/linux/cxl_mem.h
index 8d206f27bb6d..c71021a2a9ed 100644
--- a/include/uapi/linux/cxl_mem.h
+++ b/include/uapi/linux/cxl_mem.h
@@ -68,8 +68,8 @@ static const struct {
* struct cxl_command_info - Command information returned from a query.
* @id: ID number for the command.
* @flags: Flags that specify command behavior.
- * @size_in: Expected input size, or -1 if variable length.
- * @size_out: Expected output size, or -1 if variable length.
+ * @size_in: Expected input size, or ~0 if variable length.
+ * @size_out: Expected output size, or ~0 if variable length.
*
* Represents a single command that is supported by both the driver and the
* hardware. This is returned as part of an array from the query ioctl. The
@@ -78,7 +78,7 @@ static const struct {
*
* - @id = 10
* - @flags = 0
- * - @size_in = -1
+ * - @size_in = ~0
* - @size_out = 0
*
* See struct cxl_mem_query_commands.
@@ -89,8 +89,8 @@ struct cxl_command_info {
__u32 flags;
#define CXL_MEM_COMMAND_FLAG_MASK GENMASK(0, 0)
- __s32 size_in;
- __s32 size_out;
+ __u32 size_in;
+ __u32 size_out;
};
/**
@@ -169,13 +169,13 @@ struct cxl_send_command {
__u32 retval;
struct {
- __s32 size;
+ __u32 size;
__u32 rsvd;
__u64 payload;
} in;
struct {
- __s32 size;
+ __u32 size;
__u32 rsvd;
__u64 payload;
} out;
diff --git a/include/uapi/linux/elf-em.h b/include/uapi/linux/elf-em.h
index f47e853546fa..ef38c2bc5ab7 100644
--- a/include/uapi/linux/elf-em.h
+++ b/include/uapi/linux/elf-em.h
@@ -51,6 +51,7 @@
#define EM_RISCV 243 /* RISC-V */
#define EM_BPF 247 /* Linux BPF - in-kernel virtual machine */
#define EM_CSKY 252 /* C-SKY */
+#define EM_LOONGARCH 258 /* LoongArch */
#define EM_FRV 0x5441 /* Fujitsu FR-V */
/*
diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
index 429bec8dd70a..2b9f5e9985e5 100644
--- a/include/uapi/linux/elf.h
+++ b/include/uapi/linux/elf.h
@@ -134,7 +134,7 @@ typedef __s64 Elf64_Sxword;
#define STT_TLS 6
#define ELF_ST_BIND(x) ((x) >> 4)
-#define ELF_ST_TYPE(x) (((unsigned int) x) & 0xf)
+#define ELF_ST_TYPE(x) ((x) & 0xf)
#define ELF32_ST_BIND(x) ELF_ST_BIND(x)
#define ELF32_ST_TYPE(x) ELF_ST_TYPE(x)
#define ELF64_ST_BIND(x) ELF_ST_BIND(x)
@@ -438,6 +438,11 @@ typedef struct elf64_shdr {
#define NT_MIPS_DSP 0x800 /* MIPS DSP ASE registers */
#define NT_MIPS_FP_MODE 0x801 /* MIPS floating-point mode */
#define NT_MIPS_MSA 0x802 /* MIPS SIMD registers */
+#define NT_LOONGARCH_CPUCFG 0xa00 /* LoongArch CPU config registers */
+#define NT_LOONGARCH_CSR 0xa01 /* LoongArch control and status registers */
+#define NT_LOONGARCH_LSX 0xa02 /* LoongArch Loongson SIMD Extension registers */
+#define NT_LOONGARCH_LASX 0xa03 /* LoongArch Loongson Advanced SIMD Extension registers */
+#define NT_LOONGARCH_LBT 0xa04 /* LoongArch Loongson Binary Translation registers */
/* Note types with note name "GNU" */
#define NT_GNU_PROPERTY_TYPE_0 5
diff --git a/include/uapi/linux/gpio.h b/include/uapi/linux/gpio.h
index eaaea3d8e6b4..cb9966d49a16 100644
--- a/include/uapi/linux/gpio.h
+++ b/include/uapi/linux/gpio.h
@@ -66,6 +66,8 @@ struct gpiochip_info {
* @GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN: line has pull-down bias enabled
* @GPIO_V2_LINE_FLAG_BIAS_DISABLED: line has bias disabled
* @GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME: line events contain REALTIME timestamps
+ * @GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE: line events contain timestamps from
+ * hardware timestamp engine
*/
enum gpio_v2_line_flag {
GPIO_V2_LINE_FLAG_USED = _BITULL(0),
@@ -80,6 +82,7 @@ enum gpio_v2_line_flag {
GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN = _BITULL(9),
GPIO_V2_LINE_FLAG_BIAS_DISABLED = _BITULL(10),
GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME = _BITULL(11),
+ GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE = _BITULL(12),
};
/**
diff --git a/include/uapi/linux/idxd.h b/include/uapi/linux/idxd.h
index a8f0ff75c430..bce7c43657d5 100644
--- a/include/uapi/linux/idxd.h
+++ b/include/uapi/linux/idxd.h
@@ -53,6 +53,11 @@ enum idxd_scmd_stat {
/* IAX */
#define IDXD_OP_FLAG_RD_SRC2_AECS 0x010000
+#define IDXD_OP_FLAG_RD_SRC2_2ND 0x020000
+#define IDXD_OP_FLAG_WR_SRC2_AECS_COMP 0x040000
+#define IDXD_OP_FLAG_WR_SRC2_AECS_OVFL 0x080000
+#define IDXD_OP_FLAG_SRC2_STS 0x100000
+#define IDXD_OP_FLAG_CRC_RFC3720 0x200000
/* Opcode */
enum dsa_opcode {
@@ -81,6 +86,18 @@ enum iax_opcode {
IAX_OPCODE_MEMMOVE,
IAX_OPCODE_DECOMPRESS = 0x42,
IAX_OPCODE_COMPRESS,
+ IAX_OPCODE_CRC64,
+ IAX_OPCODE_ZERO_DECOMP_32 = 0x48,
+ IAX_OPCODE_ZERO_DECOMP_16,
+ IAX_OPCODE_DECOMP_32 = 0x4c,
+ IAX_OPCODE_DECOMP_16,
+ IAX_OPCODE_SCAN = 0x50,
+ IAX_OPCODE_SET_MEMBER,
+ IAX_OPCODE_EXTRACT,
+ IAX_OPCODE_SELECT,
+ IAX_OPCODE_RLE_BURST,
+ IAX_OPCDE_FIND_UNIQUE,
+ IAX_OPCODE_EXPAND,
};
/* Completion record status */
@@ -120,6 +137,7 @@ enum iax_completion_status {
IAX_COMP_NONE = 0,
IAX_COMP_SUCCESS,
IAX_COMP_PAGE_FAULT_IR = 0x04,
+ IAX_COMP_ANALYTICS_ERROR = 0x0a,
IAX_COMP_OUTBUF_OVERFLOW,
IAX_COMP_BAD_OPCODE = 0x10,
IAX_COMP_INVALID_FLAGS,
@@ -140,7 +158,10 @@ enum iax_completion_status {
IAX_COMP_WATCHDOG,
IAX_COMP_INVALID_COMP_FLAG = 0x30,
IAX_COMP_INVALID_FILTER_FLAG,
- IAX_COMP_INVALID_NUM_ELEMS = 0x33,
+ IAX_COMP_INVALID_INPUT_SIZE,
+ IAX_COMP_INVALID_NUM_ELEMS,
+ IAX_COMP_INVALID_SRC1_WIDTH,
+ IAX_COMP_INVALID_INVERT_OUT,
};
#define DSA_COMP_STATUS_MASK 0x7f
@@ -319,8 +340,12 @@ struct iax_completion_record {
uint32_t output_size;
uint8_t output_bits;
uint8_t rsvd3;
- uint16_t rsvd4;
- uint64_t rsvd5[4];
+ uint16_t xor_csum;
+ uint32_t crc;
+ uint32_t min;
+ uint32_t max;
+ uint32_t sum;
+ uint64_t rsvd4[2];
} __attribute__((packed));
struct iax_raw_completion_record {
diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index 53e7dae92e42..776e0278f9dd 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -47,6 +47,7 @@ struct io_uring_sqe {
__u32 unlink_flags;
__u32 hardlink_flags;
__u32 xattr_flags;
+ __u32 close_flags;
};
__u64 user_data; /* data to be passed back at completion time */
/* pack this to avoid bogus arm OABI complaints */
@@ -259,6 +260,11 @@ enum io_uring_op {
#define IORING_ACCEPT_MULTISHOT (1U << 0)
/*
+ * close flags, store in sqe->close_flags
+ */
+#define IORING_CLOSE_FD_AND_FILE_SLOT (1U << 0)
+
+/*
* IO completion data structure (Completion Queue Entry)
*/
struct io_uring_cqe {
diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h
index 549ddeaf788b..03cdbe798fe3 100644
--- a/include/uapi/linux/ipv6.h
+++ b/include/uapi/linux/ipv6.h
@@ -194,7 +194,7 @@ enum {
DEVCONF_IOAM6_ID,
DEVCONF_IOAM6_ID_WIDE,
DEVCONF_NDISC_EVICT_NOCARRIER,
- DEVCONF_ACCEPT_UNSOLICITED_NA,
+ DEVCONF_ACCEPT_UNTRACKED_NA,
DEVCONF_MAX
};
diff --git a/include/uapi/linux/kexec.h b/include/uapi/linux/kexec.h
index fb7e2ef60825..981016e05cfa 100644
--- a/include/uapi/linux/kexec.h
+++ b/include/uapi/linux/kexec.h
@@ -43,6 +43,7 @@
#define KEXEC_ARCH_MIPS ( 8 << 16)
#define KEXEC_ARCH_AARCH64 (183 << 16)
#define KEXEC_ARCH_RISCV (243 << 16)
+#define KEXEC_ARCH_LOONGARCH (258 << 16)
/* The artificial cap on the number of segments passed to kexec_load. */
#define KEXEC_SEGMENT_MAX 16
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index bee1a9ed6e66..108f8523fa04 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -616,6 +616,7 @@
#define PCI_EXP_SLTCTL_PWR_OFF 0x0400 /* Power Off */
#define PCI_EXP_SLTCTL_EIC 0x0800 /* Electromechanical Interlock Control */
#define PCI_EXP_SLTCTL_DLLSCE 0x1000 /* Data Link Layer State Changed Enable */
+#define PCI_EXP_SLTCTL_ASPL_DISABLE 0x2000 /* Auto Slot Power Limit Disable */
#define PCI_EXP_SLTCTL_IBPD_DISABLE 0x4000 /* In-band PD disable */
#define PCI_EXP_SLTSTA 0x1a /* Slot Status */
#define PCI_EXP_SLTSTA_ABP 0x0001 /* Attention Button Pressed */
diff --git a/include/uapi/linux/socket.h b/include/uapi/linux/socket.h
index 51d6bb2f6765..d3fcd3b5ec53 100644
--- a/include/uapi/linux/socket.h
+++ b/include/uapi/linux/socket.h
@@ -31,7 +31,7 @@ struct __kernel_sockaddr_storage {
#define SOCK_BUF_LOCK_MASK (SOCK_SNDBUF_LOCK | SOCK_RCVBUF_LOCK)
-#define SOCK_TXREHASH_DEFAULT ((u8)-1)
+#define SOCK_TXREHASH_DEFAULT 255
#define SOCK_TXREHASH_DISABLED 0
#define SOCK_TXREHASH_ENABLED 1
diff --git a/include/uapi/linux/taskstats.h b/include/uapi/linux/taskstats.h
index 12327d32378f..736154171489 100644
--- a/include/uapi/linux/taskstats.h
+++ b/include/uapi/linux/taskstats.h
@@ -34,7 +34,7 @@
*/
-#define TASKSTATS_VERSION 11
+#define TASKSTATS_VERSION 12
#define TS_COMM_LEN 32 /* should be >= TASK_COMM_LEN
* in linux/sched.h */
@@ -48,7 +48,8 @@ struct taskstats {
__u32 ac_exitcode; /* Exit status */
/* The accounting flags of a task as defined in <linux/acct.h>
- * Defined values are AFORK, ASU, ACOMPAT, ACORE, and AXSIG.
+ * Defined values are AFORK, ASU, ACOMPAT, ACORE, AXSIG, and AGROUP.
+ * (AGROUP since version 12).
*/
__u8 ac_flag; /* Record flags */
__u8 ac_nice; /* task_nice */
@@ -173,9 +174,26 @@ struct taskstats {
/* v10: 64-bit btime to avoid overflow */
__u64 ac_btime64; /* 64-bit begin time */
- /* Delay waiting for memory compact */
+ /* v11: Delay waiting for memory compact */
__u64 compact_count;
__u64 compact_delay_total;
+
+ /* v12 begin */
+ __u32 ac_tgid; /* thread group ID */
+ /* Thread group walltime up to now. This is total process walltime if
+ * AGROUP flag is set.
+ */
+ __u64 ac_tgetime __attribute__((aligned(8)));
+ /* Lightweight information to identify process binary files.
+ * This leaves userspace to match this to a file system path, using
+ * MAJOR() and MINOR() macros to identify a device and mount point,
+ * the inode to identify the executable file. This is /proc/self/exe
+ * at the end, so matching the most recent exec(). Values are zero
+ * for kernel threads.
+ */
+ __u64 ac_exe_dev; /* program binary device ID */
+ __u64 ac_exe_inode; /* program binary inode number */
+ /* v12 end */
};
diff --git a/include/uapi/linux/vdpa.h b/include/uapi/linux/vdpa.h
index 1061d8d2d09d..25c55cab3d7c 100644
--- a/include/uapi/linux/vdpa.h
+++ b/include/uapi/linux/vdpa.h
@@ -18,6 +18,7 @@ enum vdpa_command {
VDPA_CMD_DEV_DEL,
VDPA_CMD_DEV_GET, /* can dump */
VDPA_CMD_DEV_CONFIG_GET, /* can dump */
+ VDPA_CMD_DEV_VSTATS_GET,
};
enum vdpa_attr {
@@ -46,6 +47,11 @@ enum vdpa_attr {
VDPA_ATTR_DEV_NEGOTIATED_FEATURES, /* u64 */
VDPA_ATTR_DEV_MGMTDEV_MAX_VQS, /* u32 */
VDPA_ATTR_DEV_SUPPORTED_FEATURES, /* u64 */
+
+ VDPA_ATTR_DEV_QUEUE_INDEX, /* u32 */
+ VDPA_ATTR_DEV_VENDOR_ATTR_NAME, /* string */
+ VDPA_ATTR_DEV_VENDOR_ATTR_VALUE, /* u64 */
+
/* new attributes must be added above here */
VDPA_ATTR_MAX,
};
diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h
index fea86061b44e..733a1cddde30 100644
--- a/include/uapi/linux/vfio.h
+++ b/include/uapi/linux/vfio.h
@@ -643,7 +643,7 @@ enum {
};
/**
- * VFIO_DEVICE_GET_PCI_HOT_RESET_INFO - _IORW(VFIO_TYPE, VFIO_BASE + 12,
+ * VFIO_DEVICE_GET_PCI_HOT_RESET_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 12,
* struct vfio_pci_hot_reset_info)
*
* Return: 0 on success, -errno on failure:
@@ -770,7 +770,7 @@ struct vfio_device_ioeventfd {
#define VFIO_DEVICE_IOEVENTFD _IO(VFIO_TYPE, VFIO_BASE + 16)
/**
- * VFIO_DEVICE_FEATURE - _IORW(VFIO_TYPE, VFIO_BASE + 17,
+ * VFIO_DEVICE_FEATURE - _IOWR(VFIO_TYPE, VFIO_BASE + 17,
* struct vfio_device_feature)
*
* Get, set, or probe feature data of the device. The feature is selected
diff --git a/include/uapi/linux/vhost.h b/include/uapi/linux/vhost.h
index 5d99e7c242a2..cab645d4a645 100644
--- a/include/uapi/linux/vhost.h
+++ b/include/uapi/linux/vhost.h
@@ -89,11 +89,6 @@
/* Set or get vhost backend capability */
-/* Use message type V2 */
-#define VHOST_BACKEND_F_IOTLB_MSG_V2 0x1
-/* IOTLB can accept batching hints */
-#define VHOST_BACKEND_F_IOTLB_BATCH 0x2
-
#define VHOST_SET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x25, __u64)
#define VHOST_GET_BACKEND_FEATURES _IOR(VHOST_VIRTIO, 0x26, __u64)
@@ -150,11 +145,30 @@
/* Get the valid iova range */
#define VHOST_VDPA_GET_IOVA_RANGE _IOR(VHOST_VIRTIO, 0x78, \
struct vhost_vdpa_iova_range)
-
/* Get the config size */
#define VHOST_VDPA_GET_CONFIG_SIZE _IOR(VHOST_VIRTIO, 0x79, __u32)
/* Get the count of all virtqueues */
#define VHOST_VDPA_GET_VQS_COUNT _IOR(VHOST_VIRTIO, 0x80, __u32)
+/* Get the number of virtqueue groups. */
+#define VHOST_VDPA_GET_GROUP_NUM _IOR(VHOST_VIRTIO, 0x81, __u32)
+
+/* Get the number of address spaces. */
+#define VHOST_VDPA_GET_AS_NUM _IOR(VHOST_VIRTIO, 0x7A, unsigned int)
+
+/* Get the group for a virtqueue: read index, write group in num,
+ * The virtqueue index is stored in the index field of
+ * vhost_vring_state. The group for this specific virtqueue is
+ * returned via num field of vhost_vring_state.
+ */
+#define VHOST_VDPA_GET_VRING_GROUP _IOWR(VHOST_VIRTIO, 0x7B, \
+ struct vhost_vring_state)
+/* Set the ASID for a virtqueue group. The group index is stored in
+ * the index field of vhost_vring_state, the ASID associated with this
+ * group is stored at num field of vhost_vring_state.
+ */
+#define VHOST_VDPA_SET_GROUP_ASID _IOW(VHOST_VIRTIO, 0x7C, \
+ struct vhost_vring_state)
+
#endif
diff --git a/include/uapi/linux/vhost_types.h b/include/uapi/linux/vhost_types.h
index f7f6a3a28977..634cee485abb 100644
--- a/include/uapi/linux/vhost_types.h
+++ b/include/uapi/linux/vhost_types.h
@@ -87,7 +87,7 @@ struct vhost_msg {
struct vhost_msg_v2 {
__u32 type;
- __u32 reserved;
+ __u32 asid;
union {
struct vhost_iotlb_msg iotlb;
__u8 padding[64];
@@ -153,4 +153,13 @@ struct vhost_vdpa_iova_range {
/* vhost-net should add virtio_net_hdr for RX, and strip for TX packets. */
#define VHOST_NET_F_VIRTIO_NET_HDR 27
+/* Use message type V2 */
+#define VHOST_BACKEND_F_IOTLB_MSG_V2 0x1
+/* IOTLB can accept batching hints */
+#define VHOST_BACKEND_F_IOTLB_BATCH 0x2
+/* IOTLB can accept address space identifier through V2 type of IOTLB
+ * message
+ */
+#define VHOST_BACKEND_F_IOTLB_ASID 0x3
+
#endif
diff --git a/include/uapi/misc/habanalabs.h b/include/uapi/misc/habanalabs.h
index 1d6b4f0c4159..52540d5b4fc9 100644
--- a/include/uapi/misc/habanalabs.h
+++ b/include/uapi/misc/habanalabs.h
@@ -348,33 +348,41 @@ enum hl_server_type {
* The address which accessing it caused the razwi.
* Razwi initiator.
* Razwi cause, was it a page fault or MMU access error.
+ * HL_INFO_DEV_MEM_ALLOC_PAGE_SIZES - Retrieve valid page sizes for device memory allocation
+ * HL_INFO_REGISTER_EVENTFD - Register eventfd for event notifications.
+ * HL_INFO_UNREGISTER_EVENTFD - Unregister eventfd
+ * HL_INFO_GET_EVENTS - Retrieve the last occurred events
*/
-#define HL_INFO_HW_IP_INFO 0
-#define HL_INFO_HW_EVENTS 1
-#define HL_INFO_DRAM_USAGE 2
-#define HL_INFO_HW_IDLE 3
-#define HL_INFO_DEVICE_STATUS 4
-#define HL_INFO_DEVICE_UTILIZATION 6
-#define HL_INFO_HW_EVENTS_AGGREGATE 7
-#define HL_INFO_CLK_RATE 8
-#define HL_INFO_RESET_COUNT 9
-#define HL_INFO_TIME_SYNC 10
-#define HL_INFO_CS_COUNTERS 11
-#define HL_INFO_PCI_COUNTERS 12
-#define HL_INFO_CLK_THROTTLE_REASON 13
-#define HL_INFO_SYNC_MANAGER 14
-#define HL_INFO_TOTAL_ENERGY 15
-#define HL_INFO_PLL_FREQUENCY 16
-#define HL_INFO_POWER 17
-#define HL_INFO_OPEN_STATS 18
-#define HL_INFO_DRAM_REPLACED_ROWS 21
-#define HL_INFO_DRAM_PENDING_ROWS 22
-#define HL_INFO_LAST_ERR_OPEN_DEV_TIME 23
-#define HL_INFO_CS_TIMEOUT_EVENT 24
-#define HL_INFO_RAZWI_EVENT 25
-
-#define HL_INFO_VERSION_MAX_LEN 128
-#define HL_INFO_CARD_NAME_MAX_LEN 16
+#define HL_INFO_HW_IP_INFO 0
+#define HL_INFO_HW_EVENTS 1
+#define HL_INFO_DRAM_USAGE 2
+#define HL_INFO_HW_IDLE 3
+#define HL_INFO_DEVICE_STATUS 4
+#define HL_INFO_DEVICE_UTILIZATION 6
+#define HL_INFO_HW_EVENTS_AGGREGATE 7
+#define HL_INFO_CLK_RATE 8
+#define HL_INFO_RESET_COUNT 9
+#define HL_INFO_TIME_SYNC 10
+#define HL_INFO_CS_COUNTERS 11
+#define HL_INFO_PCI_COUNTERS 12
+#define HL_INFO_CLK_THROTTLE_REASON 13
+#define HL_INFO_SYNC_MANAGER 14
+#define HL_INFO_TOTAL_ENERGY 15
+#define HL_INFO_PLL_FREQUENCY 16
+#define HL_INFO_POWER 17
+#define HL_INFO_OPEN_STATS 18
+#define HL_INFO_DRAM_REPLACED_ROWS 21
+#define HL_INFO_DRAM_PENDING_ROWS 22
+#define HL_INFO_LAST_ERR_OPEN_DEV_TIME 23
+#define HL_INFO_CS_TIMEOUT_EVENT 24
+#define HL_INFO_RAZWI_EVENT 25
+#define HL_INFO_DEV_MEM_ALLOC_PAGE_SIZES 26
+#define HL_INFO_REGISTER_EVENTFD 28
+#define HL_INFO_UNREGISTER_EVENTFD 29
+#define HL_INFO_GET_EVENTS 30
+
+#define HL_INFO_VERSION_MAX_LEN 128
+#define HL_INFO_CARD_NAME_MAX_LEN 16
/**
* struct hl_info_hw_ip_info - hardware information on various IPs in the ASIC
@@ -409,6 +417,7 @@ enum hl_server_type {
* @dram_page_size: The DRAM physical page size.
* @number_of_user_interrupts: The number of interrupts that are available to the userspace
* application to use. Relevant for Gaudi2 and later.
+ * @device_mem_alloc_default_page_size: default page size used in device memory allocation.
*/
struct hl_info_hw_ip_info {
__u64 sram_base_address;
@@ -436,6 +445,8 @@ struct hl_info_hw_ip_info {
__u32 reserved3;
__u16 number_of_user_interrupts;
__u16 pad2;
+ __u64 reserved4;
+ __u64 device_mem_alloc_default_page_size;
};
struct hl_info_dram_usage {
@@ -538,10 +549,15 @@ struct hl_pll_frequency_info {
* struct hl_open_stats_info - device open statistics information
* @open_counter: ever growing counter, increased on each successful dev open
* @last_open_period_ms: duration (ms) device was open last time
+ * @is_compute_ctx_active: Whether there is an active compute context executing
+ * @compute_ctx_in_release: true if the current compute context is being released
*/
struct hl_open_stats_info {
__u64 open_counter;
__u64 last_open_period_ms;
+ __u8 is_compute_ctx_active;
+ __u8 compute_ctx_in_release;
+ __u8 pad[6];
};
/**
@@ -640,6 +656,15 @@ struct hl_info_razwi_event {
__u8 pad[2];
};
+/**
+ * struct hl_info_dev_memalloc_page_sizes - valid page sizes in device mem alloc information.
+ * @page_order_bitmask: bitmap in which a set bit represents the order of the supported page size
+ * (e.g. 0x2100000 means that 1MB and 32MB pages are supported).
+ */
+struct hl_info_dev_memalloc_page_sizes {
+ __u64 page_order_bitmask;
+};
+
enum gaudi_dcores {
HL_GAUDI_WS_DCORE,
HL_GAUDI_WN_DCORE,
@@ -660,6 +685,7 @@ enum gaudi_dcores {
* @period_ms: Period value, in milliseconds, for utilization rate in range 100ms - 1000ms in 100 ms
* resolution. Currently not in use.
* @pll_index: Index as defined in hl_<asic type>_pll_index enumeration.
+ * @eventfd: event file descriptor for event notifications.
* @pad: Padding to 64 bit.
*/
struct hl_info_args {
@@ -672,6 +698,7 @@ struct hl_info_args {
__u32 ctx_id;
__u32 period_ms;
__u32 pll_index;
+ __u32 eventfd;
};
__u32 pad;
@@ -1115,6 +1142,7 @@ union hl_wait_cs_args {
#define HL_MEM_SHARED 0x2
#define HL_MEM_USERPTR 0x4
#define HL_MEM_FORCE_HINT 0x8
+#define HL_MEM_PREFETCH 0x40
/**
* structure hl_mem_in - structure that handle input args for memory IOCTL
@@ -1371,6 +1399,13 @@ struct hl_debug_args {
};
/*
+ * Notifier event values - for the notification mechanism and the HL_INFO_GET_EVENTS command
+ *
+ * HL_NOTIFIER_EVENT_TPC_ASSERT - Indicates TPC assert event
+ */
+#define HL_NOTIFIER_EVENT_TPC_ASSERT (1 << 0)
+
+/*
* Various information operations such as:
* - H/W IP information
* - Current dram usage
diff --git a/drivers/scsi/ufs/ufs.h b/include/ufs/ufs.h
index 1bba3fead2ce..1bba3fead2ce 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/include/ufs/ufs.h
diff --git a/drivers/scsi/ufs/ufs_quirks.h b/include/ufs/ufs_quirks.h
index bcb4f004bed5..bcb4f004bed5 100644
--- a/drivers/scsi/ufs/ufs_quirks.h
+++ b/include/ufs/ufs_quirks.h
diff --git a/drivers/scsi/ufs/ufshcd.h b/include/ufs/ufshcd.h
index 2b0f3441b813..a92271421718 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/include/ufs/ufshcd.h
@@ -18,10 +18,10 @@
#include <linux/devfreq.h>
#include <linux/pm_runtime.h>
#include <scsi/scsi_device.h>
-#include "unipro.h"
-#include "ufs.h"
-#include "ufs_quirks.h"
-#include "ufshci.h"
+#include <ufs/unipro.h>
+#include <ufs/ufs.h>
+#include <ufs/ufs_quirks.h>
+#include <ufs/ufshci.h>
#define UFSHCD "ufshcd"
diff --git a/drivers/scsi/ufs/ufshci.h b/include/ufs/ufshci.h
index f81aa95ffbc4..f81aa95ffbc4 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/include/ufs/ufshci.h
diff --git a/drivers/scsi/ufs/unipro.h b/include/ufs/unipro.h
index 0521f887e3ac..0521f887e3ac 100644
--- a/drivers/scsi/ufs/unipro.h
+++ b/include/ufs/unipro.h
diff --git a/include/video/radeon.h b/include/video/radeon.h
index 005eae19ec09..72f94ccfa725 100644
--- a/include/video/radeon.h
+++ b/include/video/radeon.h
@@ -750,7 +750,7 @@
#define WAIT_DMA_GUI_IDLE (1 << 9)
#define WAIT_2D_IDLECLEAN (1 << 16)
-/* SURFACE_CNTL bit consants */
+/* SURFACE_CNTL bit constants */
#define SURF_TRANSLATION_DIS (1 << 8)
#define NONSURF_AP0_SWP_16BPP (1 << 20)
#define NONSURF_AP0_SWP_32BPP (1 << 21)
diff --git a/include/xen/arm/page.h b/include/xen/arm/page.h
index 7e199c6656b9..e5c84ff28c8b 100644
--- a/include/xen/arm/page.h
+++ b/include/xen/arm/page.h
@@ -109,9 +109,6 @@ static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
return __set_phys_to_machine(pfn, mfn);
}
-#define xen_remap(cookie, size) ioremap_cache((cookie), (size))
-#define xen_unmap(cookie) iounmap((cookie))
-
bool xen_arch_need_swiotlb(struct device *dev,
phys_addr_t phys,
dma_addr_t dev_addr);
diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h
index 7d0f2f0037b8..527c9907f99c 100644
--- a/include/xen/grant_table.h
+++ b/include/xen/grant_table.h
@@ -101,10 +101,10 @@ int gnttab_end_foreign_access_ref(grant_ref_t ref);
* Eventually end access through the given grant reference, and once that
* access has been ended, free the given page too. Access will be ended
* immediately iff the grant entry is not in use, otherwise it will happen
- * some time later. page may be 0, in which case no freeing will occur.
+ * some time later. page may be NULL, in which case no freeing will occur.
* Note that the granted page might still be accessed (read or write) by the
* other side after gnttab_end_foreign_access() returns, so even if page was
- * specified as 0 it is not allowed to just reuse the page for other
+ * specified as NULL it is not allowed to just reuse the page for other
* purposes immediately. gnttab_end_foreign_access() will take an additional
* reference to the granted page in this case, which is dropped only after
* the grant is no longer in use.
@@ -112,7 +112,7 @@ int gnttab_end_foreign_access_ref(grant_ref_t ref);
* gnttab_end_foreign_access() are done via alloc_pages_exact() (and freeing
* via free_pages_exact()) in order to avoid high order pages.
*/
-void gnttab_end_foreign_access(grant_ref_t ref, unsigned long page);
+void gnttab_end_foreign_access(grant_ref_t ref, struct page *page);
/*
* End access through the given grant reference, iff the grant entry is
diff --git a/init/Kconfig b/init/Kconfig
index 8aed339b8e06..c984afc489de 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -378,6 +378,10 @@ config SYSVIPC_SYSCTL
depends on SYSCTL
default y
+config SYSVIPC_COMPAT
+ def_bool y
+ depends on COMPAT && SYSVIPC
+
config POSIX_MQUEUE
bool "POSIX Message Queues"
depends on NET
@@ -423,8 +427,8 @@ config CROSS_MEMORY_ATTACH
See the man page for more details.
config USELIB
- bool "uselib syscall"
- def_bool ALPHA || M68K || SPARC || X86_32 || IA32_EMULATION
+ bool "uselib syscall (for libc5 and earlier)"
+ default ALPHA || M68K || SPARC
help
This option enables the uselib syscall, a system call used in the
dynamic linker from libc5 and earlier. glibc does not use this
@@ -1338,7 +1342,7 @@ endif
config BOOT_CONFIG
bool "Boot config support"
- select BLK_DEV_INITRD
+ select BLK_DEV_INITRD if !BOOT_CONFIG_EMBED
help
Extra boot config allows system admin to pass a config file as
complemental extension of kernel cmdline when booting.
@@ -1348,6 +1352,35 @@ config BOOT_CONFIG
If unsure, say Y.
+config BOOT_CONFIG_EMBED
+ bool "Embed bootconfig file in the kernel"
+ depends on BOOT_CONFIG
+ help
+ Embed a bootconfig file given by BOOT_CONFIG_EMBED_FILE in the
+ kernel. Usually, the bootconfig file is loaded with the initrd
+ image. But if the system doesn't support initrd, this option will
+ help you by embedding a bootconfig file while building the kernel.
+
+ If unsure, say N.
+
+config BOOT_CONFIG_EMBED_FILE
+ string "Embedded bootconfig file path"
+ depends on BOOT_CONFIG_EMBED
+ help
+ Specify a bootconfig file which will be embedded to the kernel.
+ This bootconfig will be used if there is no initrd or no other
+ bootconfig in the initrd.
+
+config INITRAMFS_PRESERVE_MTIME
+ bool "Preserve cpio archive mtimes in initramfs"
+ default y
+ help
+ Each entry in an initramfs cpio archive carries an mtime value. When
+ enabled, extracted cpio items take this mtime, with directory mtime
+ setting deferred until after creation of any child entries.
+
+ If unsure, say Y.
+
choice
prompt "Compiler optimization level"
default CC_OPTIMIZE_FOR_PERFORMANCE
@@ -1832,60 +1865,6 @@ config DEBUG_PERF_USE_VMALLOC
endmenu
-config VM_EVENT_COUNTERS
- default y
- bool "Enable VM event counters for /proc/vmstat" if EXPERT
- help
- VM event counters are needed for event counts to be shown.
- This option allows the disabling of the VM event counters
- on EXPERT systems. /proc/vmstat will only show page counts
- if VM event counters are disabled.
-
-config SLUB_DEBUG
- default y
- bool "Enable SLUB debugging support" if EXPERT
- depends on SLUB && SYSFS
- select STACKDEPOT if STACKTRACE_SUPPORT
- help
- SLUB has extensive debug support features. Disabling these can
- result in significant savings in code size. This also disables
- SLUB sysfs support. /sys/slab will not exist and there will be
- no support for cache validation etc.
-
-config COMPAT_BRK
- bool "Disable heap randomization"
- default y
- help
- Randomizing heap placement makes heap exploits harder, but it
- also breaks ancient binaries (including anything libc5 based).
- This option changes the bootup default to heap randomization
- disabled, and can be overridden at runtime by setting
- /proc/sys/kernel/randomize_va_space to 2.
-
- On non-ancient distros (post-2000 ones) N is usually a safe choice.
-
-config MMAP_ALLOW_UNINITIALIZED
- bool "Allow mmapped anonymous memory to be uninitialized"
- depends on EXPERT && !MMU
- default n
- help
- Normally, and according to the Linux spec, anonymous memory obtained
- from mmap() has its contents cleared before it is passed to
- userspace. Enabling this config option allows you to request that
- mmap() skip that if it is given an MAP_UNINITIALIZED flag, thus
- providing a huge performance boost. If this option is not enabled,
- then the flag will be ignored.
-
- This is taken advantage of by uClibc's malloc(), and also by
- ELF-FDPIC binfmt's brk and stack allocator.
-
- Because of the obvious security issues, this option should only be
- enabled on embedded devices where you control what is run in
- userspace. Since that isn't generally a problem on no-MMU systems,
- it is normally safe to say Y here.
-
- See Documentation/admin-guide/mm/nommu-mmap.rst for more information.
-
config SYSTEM_DATA_VERIFICATION
def_bool n
select SYSTEM_TRUSTED_KEYRING
diff --git a/init/initramfs.c b/init/initramfs.c
index 2f3d96dc3db6..18229cfe8906 100644
--- a/init/initramfs.c
+++ b/init/initramfs.c
@@ -15,10 +15,14 @@
#include <linux/mm.h>
#include <linux/namei.h>
#include <linux/init_syscalls.h>
+#include <linux/task_work.h>
#include <linux/umh.h>
-static ssize_t __init xwrite(struct file *file, const char *p, size_t count,
- loff_t *pos)
+static __initdata bool csum_present;
+static __initdata u32 io_csum;
+
+static ssize_t __init xwrite(struct file *file, const unsigned char *p,
+ size_t count, loff_t *pos)
{
ssize_t out = 0;
@@ -33,6 +37,13 @@ static ssize_t __init xwrite(struct file *file, const char *p, size_t count,
} else if (rv == 0)
break;
+ if (csum_present) {
+ ssize_t i;
+
+ for (i = 0; i < rv; i++)
+ io_csum += p[i];
+ }
+
p += rv;
out += rv;
count -= rv;
@@ -116,31 +127,36 @@ static void __init free_hash(void)
}
}
-static long __init do_utime(char *filename, time64_t mtime)
+#ifdef CONFIG_INITRAMFS_PRESERVE_MTIME
+static void __init do_utime(char *filename, time64_t mtime)
{
- struct timespec64 t[2];
+ struct timespec64 t[2] = { { .tv_sec = mtime }, { .tv_sec = mtime } };
+ init_utimes(filename, t);
+}
- t[0].tv_sec = mtime;
- t[0].tv_nsec = 0;
- t[1].tv_sec = mtime;
- t[1].tv_nsec = 0;
- return init_utimes(filename, t);
+static void __init do_utime_path(const struct path *path, time64_t mtime)
+{
+ struct timespec64 t[2] = { { .tv_sec = mtime }, { .tv_sec = mtime } };
+ vfs_utimes(path, t);
}
static __initdata LIST_HEAD(dir_list);
struct dir_entry {
struct list_head list;
- char *name;
time64_t mtime;
+ char name[];
};
static void __init dir_add(const char *name, time64_t mtime)
{
- struct dir_entry *de = kmalloc(sizeof(struct dir_entry), GFP_KERNEL);
+ size_t nlen = strlen(name) + 1;
+ struct dir_entry *de;
+
+ de = kmalloc(sizeof(struct dir_entry) + nlen, GFP_KERNEL);
if (!de)
panic_show_mem("can't allocate dir_entry buffer");
INIT_LIST_HEAD(&de->list);
- de->name = kstrdup(name, GFP_KERNEL);
+ strscpy(de->name, name, nlen);
de->mtime = mtime;
list_add(&de->list, &dir_list);
}
@@ -151,10 +167,15 @@ static void __init dir_utime(void)
list_for_each_entry_safe(de, tmp, &dir_list, list) {
list_del(&de->list);
do_utime(de->name, de->mtime);
- kfree(de->name);
kfree(de);
}
}
+#else
+static void __init do_utime(char *filename, time64_t mtime) {}
+static void __init do_utime_path(const struct path *path, time64_t mtime) {}
+static void __init dir_add(const char *name, time64_t mtime) {}
+static void __init dir_utime(void) {}
+#endif
static __initdata time64_t mtime;
@@ -166,15 +187,16 @@ static __initdata unsigned long body_len, name_len;
static __initdata uid_t uid;
static __initdata gid_t gid;
static __initdata unsigned rdev;
+static __initdata u32 hdr_csum;
static void __init parse_header(char *s)
{
- unsigned long parsed[12];
+ unsigned long parsed[13];
char buf[9];
int i;
buf[8] = '\0';
- for (i = 0, s += 6; i < 12; i++, s += 8) {
+ for (i = 0, s += 6; i < 13; i++, s += 8) {
memcpy(buf, s, 8);
parsed[i] = simple_strtoul(buf, NULL, 16);
}
@@ -189,6 +211,7 @@ static void __init parse_header(char *s)
minor = parsed[8];
rdev = new_encode_dev(MKDEV(parsed[9], parsed[10]));
name_len = parsed[11];
+ hdr_csum = parsed[12];
}
/* FSM */
@@ -257,12 +280,15 @@ static int __init do_collect(void)
static int __init do_header(void)
{
- if (memcmp(collected, "070707", 6)==0) {
- error("incorrect cpio method used: use -H newc option");
- return 1;
- }
- if (memcmp(collected, "070701", 6)) {
- error("no cpio magic");
+ if (!memcmp(collected, "070701", 6)) {
+ csum_present = false;
+ } else if (!memcmp(collected, "070702", 6)) {
+ csum_present = true;
+ } else {
+ if (memcmp(collected, "070707", 6) == 0)
+ error("incorrect cpio method used: use -H newc option");
+ else
+ error("no cpio magic");
return 1;
}
parse_header(collected);
@@ -353,6 +379,7 @@ static int __init do_name(void)
if (IS_ERR(wfile))
return 0;
wfile_pos = 0;
+ io_csum = 0;
vfs_fchown(wfile, uid, gid);
vfs_fchmod(wfile, mode);
@@ -380,15 +407,13 @@ static int __init do_name(void)
static int __init do_copy(void)
{
if (byte_count >= body_len) {
- struct timespec64 t[2] = { };
if (xwrite(wfile, victim, body_len, &wfile_pos) != body_len)
error("write error");
- t[0].tv_sec = mtime;
- t[1].tv_sec = mtime;
- vfs_utimes(&wfile->f_path, t);
-
+ do_utime_path(&wfile->f_path, mtime);
fput(wfile);
+ if (csum_present && io_csum != hdr_csum)
+ error("bad data checksum");
eat(body_len);
state = SkipIt;
return 0;
@@ -703,6 +728,7 @@ done:
initrd_end = 0;
flush_delayed_fput();
+ task_work_run();
}
static ASYNC_DOMAIN_EXCLUSIVE(initramfs_domain);
diff --git a/init/main.c b/init/main.c
index f057c49f1d9d..0ee39cdcfcac 100644
--- a/init/main.c
+++ b/init/main.c
@@ -266,7 +266,7 @@ static int __init loglevel(char *str)
early_param("loglevel", loglevel);
#ifdef CONFIG_BLK_DEV_INITRD
-static void * __init get_boot_config_from_initrd(u32 *_size, u32 *_csum)
+static void * __init get_boot_config_from_initrd(size_t *_size)
{
u32 size, csum;
char *data;
@@ -300,17 +300,20 @@ found:
return NULL;
}
+ if (xbc_calc_checksum(data, size) != csum) {
+ pr_err("bootconfig checksum failed\n");
+ return NULL;
+ }
+
/* Remove bootconfig from initramfs/initrd */
initrd_end = (unsigned long)data;
if (_size)
*_size = size;
- if (_csum)
- *_csum = csum;
return data;
}
#else
-static void * __init get_boot_config_from_initrd(u32 *_size, u32 *_csum)
+static void * __init get_boot_config_from_initrd(size_t *_size)
{
return NULL;
}
@@ -407,14 +410,16 @@ static int __init warn_bootconfig(char *str)
static void __init setup_boot_config(void)
{
static char tmp_cmdline[COMMAND_LINE_SIZE] __initdata;
- const char *msg;
- int pos;
- u32 size, csum;
- char *data, *err;
- int ret;
+ const char *msg, *data;
+ int pos, ret;
+ size_t size;
+ char *err;
/* Cut out the bootconfig data even if we have no bootconfig option */
- data = get_boot_config_from_initrd(&size, &csum);
+ data = get_boot_config_from_initrd(&size);
+ /* If there is no bootconfig in initrd, try embedded one. */
+ if (!data)
+ data = xbc_get_embedded_bootconfig(&size);
strlcpy(tmp_cmdline, boot_command_line, COMMAND_LINE_SIZE);
err = parse_args("bootconfig", tmp_cmdline, NULL, 0, 0, 0, NULL,
@@ -433,13 +438,8 @@ static void __init setup_boot_config(void)
}
if (size >= XBC_DATA_MAX) {
- pr_err("bootconfig size %d greater than max size %d\n",
- size, XBC_DATA_MAX);
- return;
- }
-
- if (xbc_calc_checksum(data, size) != csum) {
- pr_err("bootconfig checksum failed\n");
+ pr_err("bootconfig size %ld greater than max size %d\n",
+ (long)size, XBC_DATA_MAX);
return;
}
@@ -452,7 +452,7 @@ static void __init setup_boot_config(void)
msg, pos);
} else {
xbc_get_info(&ret, NULL);
- pr_info("Load bootconfig: %d bytes %d nodes\n", size, ret);
+ pr_info("Load bootconfig: %ld bytes %d nodes\n", (long)size, ret);
/* keys starting with "kernel." are passed via cmdline */
extra_command_line = xbc_make_cmdline("kernel");
/* Also, "init." keys are init arguments */
@@ -471,7 +471,7 @@ static void __init exit_boot_config(void)
static void __init setup_boot_config(void)
{
/* Remove bootconfig data from initrd */
- get_boot_config_from_initrd(NULL, NULL);
+ get_boot_config_from_initrd(NULL);
}
static int __init warn_bootconfig(char *str)
@@ -688,7 +688,7 @@ noinline void __ref rest_init(void)
* the init task will end up wanting to create kthreads, which, if
* we schedule it before we create kthreadd, will OOPS.
*/
- pid = kernel_thread(kernel_init, NULL, CLONE_FS);
+ pid = user_mode_thread(kernel_init, NULL, CLONE_FS);
/*
* Pin init on the boot CPU. Task migration is not properly working
* until sched_init_smp() has been run. It will set the allowed
diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c
index f101c171753f..ef313ecfb53a 100644
--- a/ipc/ipc_sysctl.c
+++ b/ipc/ipc_sysctl.c
@@ -13,43 +13,17 @@
#include <linux/capability.h>
#include <linux/ipc_namespace.h>
#include <linux/msg.h>
+#include <linux/slab.h>
#include "util.h"
-static void *get_ipc(struct ctl_table *table)
-{
- char *which = table->data;
- struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns;
- which = (which - (char *)&init_ipc_ns) + (char *)ipc_ns;
- return which;
-}
-
-static int proc_ipc_dointvec(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos)
-{
- struct ctl_table ipc_table;
-
- memcpy(&ipc_table, table, sizeof(ipc_table));
- ipc_table.data = get_ipc(table);
-
- return proc_dointvec(&ipc_table, write, buffer, lenp, ppos);
-}
-
-static int proc_ipc_dointvec_minmax(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos)
-{
- struct ctl_table ipc_table;
-
- memcpy(&ipc_table, table, sizeof(ipc_table));
- ipc_table.data = get_ipc(table);
-
- return proc_dointvec_minmax(&ipc_table, write, buffer, lenp, ppos);
-}
-
static int proc_ipc_dointvec_minmax_orphans(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
- struct ipc_namespace *ns = current->nsproxy->ipc_ns;
- int err = proc_ipc_dointvec_minmax(table, write, buffer, lenp, ppos);
+ struct ipc_namespace *ns =
+ container_of(table->data, struct ipc_namespace, shm_rmid_forced);
+ int err;
+
+ err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
if (err < 0)
return err;
@@ -58,17 +32,6 @@ static int proc_ipc_dointvec_minmax_orphans(struct ctl_table *table, int write,
return err;
}
-static int proc_ipc_doulongvec_minmax(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos)
-{
- struct ctl_table ipc_table;
- memcpy(&ipc_table, table, sizeof(ipc_table));
- ipc_table.data = get_ipc(table);
-
- return proc_doulongvec_minmax(&ipc_table, write, buffer,
- lenp, ppos);
-}
-
static int proc_ipc_auto_msgmni(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
@@ -87,14 +50,15 @@ static int proc_ipc_auto_msgmni(struct ctl_table *table, int write,
static int proc_ipc_sem_dointvec(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
+ struct ipc_namespace *ns =
+ container_of(table->data, struct ipc_namespace, sem_ctls);
int ret, semmni;
- struct ipc_namespace *ns = current->nsproxy->ipc_ns;
semmni = ns->sem_ctls[3];
- ret = proc_ipc_dointvec(table, write, buffer, lenp, ppos);
+ ret = proc_dointvec(table, write, buffer, lenp, ppos);
if (!ret)
- ret = sem_check_semmni(current->nsproxy->ipc_ns);
+ ret = sem_check_semmni(ns);
/*
* Reset the semmni value if an error happens.
@@ -104,44 +68,31 @@ static int proc_ipc_sem_dointvec(struct ctl_table *table, int write,
return ret;
}
-#ifdef CONFIG_CHECKPOINT_RESTORE
-static int proc_ipc_dointvec_minmax_checkpoint_restore(struct ctl_table *table,
- int write, void *buffer, size_t *lenp, loff_t *ppos)
-{
- struct user_namespace *user_ns = current->nsproxy->ipc_ns->user_ns;
-
- if (write && !checkpoint_restore_ns_capable(user_ns))
- return -EPERM;
-
- return proc_ipc_dointvec_minmax(table, write, buffer, lenp, ppos);
-}
-#endif
-
int ipc_mni = IPCMNI;
int ipc_mni_shift = IPCMNI_SHIFT;
int ipc_min_cycle = RADIX_TREE_MAP_SIZE;
-static struct ctl_table ipc_kern_table[] = {
+static struct ctl_table ipc_sysctls[] = {
{
.procname = "shmmax",
.data = &init_ipc_ns.shm_ctlmax,
.maxlen = sizeof(init_ipc_ns.shm_ctlmax),
.mode = 0644,
- .proc_handler = proc_ipc_doulongvec_minmax,
+ .proc_handler = proc_doulongvec_minmax,
},
{
.procname = "shmall",
.data = &init_ipc_ns.shm_ctlall,
.maxlen = sizeof(init_ipc_ns.shm_ctlall),
.mode = 0644,
- .proc_handler = proc_ipc_doulongvec_minmax,
+ .proc_handler = proc_doulongvec_minmax,
},
{
.procname = "shmmni",
.data = &init_ipc_ns.shm_ctlmni,
.maxlen = sizeof(init_ipc_ns.shm_ctlmni),
.mode = 0644,
- .proc_handler = proc_ipc_dointvec_minmax,
+ .proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = &ipc_mni,
},
@@ -159,7 +110,7 @@ static struct ctl_table ipc_kern_table[] = {
.data = &init_ipc_ns.msg_ctlmax,
.maxlen = sizeof(init_ipc_ns.msg_ctlmax),
.mode = 0644,
- .proc_handler = proc_ipc_dointvec_minmax,
+ .proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_INT_MAX,
},
@@ -168,7 +119,7 @@ static struct ctl_table ipc_kern_table[] = {
.data = &init_ipc_ns.msg_ctlmni,
.maxlen = sizeof(init_ipc_ns.msg_ctlmni),
.mode = 0644,
- .proc_handler = proc_ipc_dointvec_minmax,
+ .proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = &ipc_mni,
},
@@ -186,7 +137,7 @@ static struct ctl_table ipc_kern_table[] = {
.data = &init_ipc_ns.msg_ctlmnb,
.maxlen = sizeof(init_ipc_ns.msg_ctlmnb),
.mode = 0644,
- .proc_handler = proc_ipc_dointvec_minmax,
+ .proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_INT_MAX,
},
@@ -202,8 +153,8 @@ static struct ctl_table ipc_kern_table[] = {
.procname = "sem_next_id",
.data = &init_ipc_ns.ids[IPC_SEM_IDS].next_id,
.maxlen = sizeof(init_ipc_ns.ids[IPC_SEM_IDS].next_id),
- .mode = 0666,
- .proc_handler = proc_ipc_dointvec_minmax_checkpoint_restore,
+ .mode = 0444,
+ .proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_INT_MAX,
},
@@ -211,8 +162,8 @@ static struct ctl_table ipc_kern_table[] = {
.procname = "msg_next_id",
.data = &init_ipc_ns.ids[IPC_MSG_IDS].next_id,
.maxlen = sizeof(init_ipc_ns.ids[IPC_MSG_IDS].next_id),
- .mode = 0666,
- .proc_handler = proc_ipc_dointvec_minmax_checkpoint_restore,
+ .mode = 0444,
+ .proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_INT_MAX,
},
@@ -220,8 +171,8 @@ static struct ctl_table ipc_kern_table[] = {
.procname = "shm_next_id",
.data = &init_ipc_ns.ids[IPC_SHM_IDS].next_id,
.maxlen = sizeof(init_ipc_ns.ids[IPC_SHM_IDS].next_id),
- .mode = 0666,
- .proc_handler = proc_ipc_dointvec_minmax_checkpoint_restore,
+ .mode = 0444,
+ .proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_INT_MAX,
},
@@ -229,18 +180,112 @@ static struct ctl_table ipc_kern_table[] = {
{}
};
-static struct ctl_table ipc_root_table[] = {
- {
- .procname = "kernel",
- .mode = 0555,
- .child = ipc_kern_table,
- },
- {}
+static struct ctl_table_set *set_lookup(struct ctl_table_root *root)
+{
+ return &current->nsproxy->ipc_ns->ipc_set;
+}
+
+static int set_is_seen(struct ctl_table_set *set)
+{
+ return &current->nsproxy->ipc_ns->ipc_set == set;
+}
+
+static int ipc_permissions(struct ctl_table_header *head, struct ctl_table *table)
+{
+ int mode = table->mode;
+
+#ifdef CONFIG_CHECKPOINT_RESTORE
+ struct ipc_namespace *ns = current->nsproxy->ipc_ns;
+
+ if (((table->data == &ns->ids[IPC_SEM_IDS].next_id) ||
+ (table->data == &ns->ids[IPC_MSG_IDS].next_id) ||
+ (table->data == &ns->ids[IPC_SHM_IDS].next_id)) &&
+ checkpoint_restore_ns_capable(ns->user_ns))
+ mode = 0666;
+#endif
+ return mode;
+}
+
+static struct ctl_table_root set_root = {
+ .lookup = set_lookup,
+ .permissions = ipc_permissions,
};
+bool setup_ipc_sysctls(struct ipc_namespace *ns)
+{
+ struct ctl_table *tbl;
+
+ setup_sysctl_set(&ns->ipc_set, &set_root, set_is_seen);
+
+ tbl = kmemdup(ipc_sysctls, sizeof(ipc_sysctls), GFP_KERNEL);
+ if (tbl) {
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ipc_sysctls); i++) {
+ if (tbl[i].data == &init_ipc_ns.shm_ctlmax)
+ tbl[i].data = &ns->shm_ctlmax;
+
+ else if (tbl[i].data == &init_ipc_ns.shm_ctlall)
+ tbl[i].data = &ns->shm_ctlall;
+
+ else if (tbl[i].data == &init_ipc_ns.shm_ctlmni)
+ tbl[i].data = &ns->shm_ctlmni;
+
+ else if (tbl[i].data == &init_ipc_ns.shm_rmid_forced)
+ tbl[i].data = &ns->shm_rmid_forced;
+
+ else if (tbl[i].data == &init_ipc_ns.msg_ctlmax)
+ tbl[i].data = &ns->msg_ctlmax;
+
+ else if (tbl[i].data == &init_ipc_ns.msg_ctlmni)
+ tbl[i].data = &ns->msg_ctlmni;
+
+ else if (tbl[i].data == &init_ipc_ns.msg_ctlmnb)
+ tbl[i].data = &ns->msg_ctlmnb;
+
+ else if (tbl[i].data == &init_ipc_ns.sem_ctls)
+ tbl[i].data = &ns->sem_ctls;
+#ifdef CONFIG_CHECKPOINT_RESTORE
+ else if (tbl[i].data == &init_ipc_ns.ids[IPC_SEM_IDS].next_id)
+ tbl[i].data = &ns->ids[IPC_SEM_IDS].next_id;
+
+ else if (tbl[i].data == &init_ipc_ns.ids[IPC_MSG_IDS].next_id)
+ tbl[i].data = &ns->ids[IPC_MSG_IDS].next_id;
+
+ else if (tbl[i].data == &init_ipc_ns.ids[IPC_SHM_IDS].next_id)
+ tbl[i].data = &ns->ids[IPC_SHM_IDS].next_id;
+#endif
+ else
+ tbl[i].data = NULL;
+ }
+
+ ns->ipc_sysctls = __register_sysctl_table(&ns->ipc_set, "kernel", tbl);
+ }
+ if (!ns->ipc_sysctls) {
+ kfree(tbl);
+ retire_sysctl_set(&ns->ipc_set);
+ return false;
+ }
+
+ return true;
+}
+
+void retire_ipc_sysctls(struct ipc_namespace *ns)
+{
+ struct ctl_table *tbl;
+
+ tbl = ns->ipc_sysctls->ctl_table_arg;
+ unregister_sysctl_table(ns->ipc_sysctls);
+ retire_sysctl_set(&ns->ipc_set);
+ kfree(tbl);
+}
+
static int __init ipc_sysctl_init(void)
{
- register_sysctl_table(ipc_root_table);
+ if (!setup_ipc_sysctls(&init_ipc_ns)) {
+ pr_warn("ipc sysctl registration failed\n");
+ return -ENOMEM;
+ }
return 0;
}
diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c
index 72a92a08c848..fbf6a8b93a26 100644
--- a/ipc/mq_sysctl.c
+++ b/ipc/mq_sysctl.c
@@ -9,39 +9,9 @@
#include <linux/ipc_namespace.h>
#include <linux/sysctl.h>
-#ifdef CONFIG_PROC_SYSCTL
-static void *get_mq(struct ctl_table *table)
-{
- char *which = table->data;
- struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns;
- which = (which - (char *)&init_ipc_ns) + (char *)ipc_ns;
- return which;
-}
-
-static int proc_mq_dointvec(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos)
-{
- struct ctl_table mq_table;
- memcpy(&mq_table, table, sizeof(mq_table));
- mq_table.data = get_mq(table);
-
- return proc_dointvec(&mq_table, write, buffer, lenp, ppos);
-}
-
-static int proc_mq_dointvec_minmax(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos)
-{
- struct ctl_table mq_table;
- memcpy(&mq_table, table, sizeof(mq_table));
- mq_table.data = get_mq(table);
-
- return proc_dointvec_minmax(&mq_table, write, buffer,
- lenp, ppos);
-}
-#else
-#define proc_mq_dointvec NULL
-#define proc_mq_dointvec_minmax NULL
-#endif
+#include <linux/stat.h>
+#include <linux/capability.h>
+#include <linux/slab.h>
static int msg_max_limit_min = MIN_MSGMAX;
static int msg_max_limit_max = HARD_MSGMAX;
@@ -55,14 +25,14 @@ static struct ctl_table mq_sysctls[] = {
.data = &init_ipc_ns.mq_queues_max,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_mq_dointvec,
+ .proc_handler = proc_dointvec,
},
{
.procname = "msg_max",
.data = &init_ipc_ns.mq_msg_max,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_mq_dointvec_minmax,
+ .proc_handler = proc_dointvec_minmax,
.extra1 = &msg_max_limit_min,
.extra2 = &msg_max_limit_max,
},
@@ -71,7 +41,7 @@ static struct ctl_table mq_sysctls[] = {
.data = &init_ipc_ns.mq_msgsize_max,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_mq_dointvec_minmax,
+ .proc_handler = proc_dointvec_minmax,
.extra1 = &msg_maxsize_limit_min,
.extra2 = &msg_maxsize_limit_max,
},
@@ -80,7 +50,7 @@ static struct ctl_table mq_sysctls[] = {
.data = &init_ipc_ns.mq_msg_default,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_mq_dointvec_minmax,
+ .proc_handler = proc_dointvec_minmax,
.extra1 = &msg_max_limit_min,
.extra2 = &msg_max_limit_max,
},
@@ -89,32 +59,73 @@ static struct ctl_table mq_sysctls[] = {
.data = &init_ipc_ns.mq_msgsize_default,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_mq_dointvec_minmax,
+ .proc_handler = proc_dointvec_minmax,
.extra1 = &msg_maxsize_limit_min,
.extra2 = &msg_maxsize_limit_max,
},
{}
};
-static struct ctl_table mq_sysctl_dir[] = {
- {
- .procname = "mqueue",
- .mode = 0555,
- .child = mq_sysctls,
- },
- {}
-};
+static struct ctl_table_set *set_lookup(struct ctl_table_root *root)
+{
+ return &current->nsproxy->ipc_ns->mq_set;
+}
-static struct ctl_table mq_sysctl_root[] = {
- {
- .procname = "fs",
- .mode = 0555,
- .child = mq_sysctl_dir,
- },
- {}
+static int set_is_seen(struct ctl_table_set *set)
+{
+ return &current->nsproxy->ipc_ns->mq_set == set;
+}
+
+static struct ctl_table_root set_root = {
+ .lookup = set_lookup,
};
-struct ctl_table_header *mq_register_sysctl_table(void)
+bool setup_mq_sysctls(struct ipc_namespace *ns)
{
- return register_sysctl_table(mq_sysctl_root);
+ struct ctl_table *tbl;
+
+ setup_sysctl_set(&ns->mq_set, &set_root, set_is_seen);
+
+ tbl = kmemdup(mq_sysctls, sizeof(mq_sysctls), GFP_KERNEL);
+ if (tbl) {
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mq_sysctls); i++) {
+ if (tbl[i].data == &init_ipc_ns.mq_queues_max)
+ tbl[i].data = &ns->mq_queues_max;
+
+ else if (tbl[i].data == &init_ipc_ns.mq_msg_max)
+ tbl[i].data = &ns->mq_msg_max;
+
+ else if (tbl[i].data == &init_ipc_ns.mq_msgsize_max)
+ tbl[i].data = &ns->mq_msgsize_max;
+
+ else if (tbl[i].data == &init_ipc_ns.mq_msg_default)
+ tbl[i].data = &ns->mq_msg_default;
+
+ else if (tbl[i].data == &init_ipc_ns.mq_msgsize_default)
+ tbl[i].data = &ns->mq_msgsize_default;
+ else
+ tbl[i].data = NULL;
+ }
+
+ ns->mq_sysctls = __register_sysctl_table(&ns->mq_set, "fs/mqueue", tbl);
+ }
+ if (!ns->mq_sysctls) {
+ kfree(tbl);
+ retire_sysctl_set(&ns->mq_set);
+ return false;
+ }
+
+ return true;
+}
+
+void retire_mq_sysctls(struct ipc_namespace *ns)
+{
+ struct ctl_table *tbl;
+
+ tbl = ns->mq_sysctls->ctl_table_arg;
+ unregister_sysctl_table(ns->mq_sysctls);
+ retire_sysctl_set(&ns->mq_set);
+ kfree(tbl);
}
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index 7c08eb3c258d..12ad7860bb88 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -45,6 +45,7 @@
struct mqueue_fs_context {
struct ipc_namespace *ipc_ns;
+ bool newns; /* Set if newly created ipc namespace */
};
#define MQUEUE_MAGIC 0x19800202
@@ -163,8 +164,6 @@ static void remove_notification(struct mqueue_inode_info *info);
static struct kmem_cache *mqueue_inode_cachep;
-static struct ctl_table_header *mq_sysctl_table;
-
static inline struct mqueue_inode_info *MQUEUE_I(struct inode *inode)
{
return container_of(inode, struct mqueue_inode_info, vfs_inode);
@@ -427,6 +426,14 @@ static int mqueue_get_tree(struct fs_context *fc)
{
struct mqueue_fs_context *ctx = fc->fs_private;
+ /*
+ * With a newly created ipc namespace, we don't need to do a search
+ * for an ipc namespace match, but we still need to set s_fs_info.
+ */
+ if (ctx->newns) {
+ fc->s_fs_info = ctx->ipc_ns;
+ return get_tree_nodev(fc, mqueue_fill_super);
+ }
return get_tree_keyed(fc, mqueue_fill_super, ctx->ipc_ns);
}
@@ -454,6 +461,10 @@ static int mqueue_init_fs_context(struct fs_context *fc)
return 0;
}
+/*
+ * mq_init_ns() is currently the only caller of mq_create_mount().
+ * So the ns parameter is always a newly created ipc namespace.
+ */
static struct vfsmount *mq_create_mount(struct ipc_namespace *ns)
{
struct mqueue_fs_context *ctx;
@@ -465,6 +476,7 @@ static struct vfsmount *mq_create_mount(struct ipc_namespace *ns)
return ERR_CAST(fc);
ctx = fc->fs_private;
+ ctx->newns = true;
put_ipc_ns(ctx->ipc_ns);
ctx->ipc_ns = get_ipc_ns(ns);
put_user_ns(fc->user_ns);
@@ -1713,8 +1725,10 @@ static int __init init_mqueue_fs(void)
if (mqueue_inode_cachep == NULL)
return -ENOMEM;
- /* ignore failures - they are not fatal */
- mq_sysctl_table = mq_register_sysctl_table();
+ if (!setup_mq_sysctls(&init_ipc_ns)) {
+ pr_warn("sysctl registration failed\n");
+ return -ENOMEM;
+ }
error = register_filesystem(&mqueue_fs_type);
if (error)
@@ -1731,8 +1745,6 @@ static int __init init_mqueue_fs(void)
out_filesystem:
unregister_filesystem(&mqueue_fs_type);
out_sysctl:
- if (mq_sysctl_table)
- unregister_sysctl_table(mq_sysctl_table);
kmem_cache_destroy(mqueue_inode_cachep);
return error;
}
diff --git a/ipc/namespace.c b/ipc/namespace.c
index ae83f0f2651b..754f3237194a 100644
--- a/ipc/namespace.c
+++ b/ipc/namespace.c
@@ -59,6 +59,13 @@ static struct ipc_namespace *create_ipc_ns(struct user_namespace *user_ns,
if (err)
goto fail_put;
+ err = -ENOMEM;
+ if (!setup_mq_sysctls(ns))
+ goto fail_put;
+
+ if (!setup_ipc_sysctls(ns))
+ goto fail_put;
+
sem_init_ns(ns);
msg_init_ns(ns);
shm_init_ns(ns);
@@ -125,6 +132,9 @@ static void free_ipc_ns(struct ipc_namespace *ns)
msg_exit_ns(ns);
shm_exit_ns(ns);
+ retire_mq_sysctls(ns);
+ retire_ipc_sysctls(ns);
+
dec_ipc_namespaces(ns->ucounts);
put_user_ns(ns->user_ns);
ns_free_inum(&ns->ns);
diff --git a/ipc/sem.c b/ipc/sem.c
index 0dbdb98fdf2d..c8496f98b139 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -766,7 +766,6 @@ static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q)
for (sop = sops; sop < sops + nsops; sop++) {
curr = &sma->sems[sop->sem_num];
sem_op = sop->sem_op;
- result = curr->semval;
if (sop->sem_flg & SEM_UNDO) {
int undo = un->semadj[sop->sem_num] - sem_op;
@@ -1430,7 +1429,6 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
if (err)
goto out_rcu_wakeup;
- err = -EACCES;
switch (cmd) {
case GETALL:
{
@@ -1995,7 +1993,9 @@ long __do_semtimedop(int semid, struct sembuf *sops,
int max, locknum;
bool undos = false, alter = false, dupsop = false;
struct sem_queue queue;
- unsigned long dup = 0, jiffies_left = 0;
+ unsigned long dup = 0;
+ ktime_t expires, *exp = NULL;
+ bool timed_out = false;
if (nsops < 1 || semid < 0)
return -EINVAL;
@@ -2003,12 +2003,11 @@ long __do_semtimedop(int semid, struct sembuf *sops,
return -E2BIG;
if (timeout) {
- if (timeout->tv_sec < 0 || timeout->tv_nsec < 0 ||
- timeout->tv_nsec >= 1000000000L) {
- error = -EINVAL;
- goto out;
- }
- jiffies_left = timespec64_to_jiffies(timeout);
+ if (!timespec64_valid(timeout))
+ return -EINVAL;
+ expires = ktime_add_safe(ktime_get(),
+ timespec64_to_ktime(*timeout));
+ exp = &expires;
}
@@ -2166,10 +2165,8 @@ long __do_semtimedop(int semid, struct sembuf *sops,
sem_unlock(sma, locknum);
rcu_read_unlock();
- if (timeout)
- jiffies_left = schedule_timeout(jiffies_left);
- else
- schedule();
+ timed_out = !schedule_hrtimeout_range(exp,
+ current->timer_slack_ns, HRTIMER_MODE_ABS);
/*
* fastpath: the semop has completed, either successfully or
@@ -2210,7 +2207,7 @@ long __do_semtimedop(int semid, struct sembuf *sops,
/*
* If an interrupt occurred we have to clean up the queue.
*/
- if (timeout && jiffies_left == 0)
+ if (timed_out)
error = -EAGAIN;
} while (error == -EINTR && !signal_pending(current)); /* spurious */
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index cacd8684c3c4..5f6f3f829b36 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -1953,6 +1953,11 @@ out:
CONT; \
LDX_MEM_##SIZEOP: \
DST = *(SIZE *)(unsigned long) (SRC + insn->off); \
+ CONT; \
+ LDX_PROBE_MEM_##SIZEOP: \
+ bpf_probe_read_kernel(&DST, sizeof(SIZE), \
+ (const void *)(long) (SRC + insn->off)); \
+ DST = *((SIZE *)&DST); \
CONT;
LDST(B, u8)
@@ -1960,15 +1965,6 @@ out:
LDST(W, u32)
LDST(DW, u64)
#undef LDST
-#define LDX_PROBE(SIZEOP, SIZE) \
- LDX_PROBE_MEM_##SIZEOP: \
- bpf_probe_read_kernel(&DST, SIZE, (const void *)(long) (SRC + insn->off)); \
- CONT;
- LDX_PROBE(B, 1)
- LDX_PROBE(H, 2)
- LDX_PROBE(W, 4)
- LDX_PROBE(DW, 8)
-#undef LDX_PROBE
#define ATOMIC_ALU_OP(BOP, KOP) \
case BOP: \
diff --git a/kernel/crash_core.c b/kernel/crash_core.c
index 4d57c03714f4..71122e01623c 100644
--- a/kernel/crash_core.c
+++ b/kernel/crash_core.c
@@ -222,9 +222,6 @@ next:
p = strstr(p+1, name);
}
- if (!ck_cmdline)
- return NULL;
-
return ck_cmdline;
}
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 950b25c3f210..80782cddb1da 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -4257,7 +4257,6 @@ static void perf_event_remove_on_exec(int ctxn)
{
struct perf_event_context *ctx, *clone_ctx = NULL;
struct perf_event *event, *next;
- LIST_HEAD(free_list);
unsigned long flags;
bool modified = false;
diff --git a/kernel/fork.c b/kernel/fork.c
index 124829ed0163..9d44f2d46c69 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1982,7 +1982,7 @@ static __latent_entropy struct task_struct *copy_process(
struct task_struct *p;
struct multiprocess_signals delayed;
struct file *pidfile = NULL;
- u64 clone_flags = args->flags;
+ const u64 clone_flags = args->flags;
struct nsproxy *nsp = current->nsproxy;
/*
@@ -2071,6 +2071,9 @@ static __latent_entropy struct task_struct *copy_process(
p = dup_task_struct(current, node);
if (!p)
goto fork_out;
+ p->flags &= ~PF_KTHREAD;
+ if (args->kthread)
+ p->flags |= PF_KTHREAD;
if (args->io_thread) {
/*
* Mark us an IO worker, and block any signal that isn't
@@ -2160,7 +2163,7 @@ static __latent_entropy struct task_struct *copy_process(
p->io_context = NULL;
audit_set_context(p, NULL);
cgroup_fork(p);
- if (p->flags & PF_KTHREAD) {
+ if (args->kthread) {
if (!set_kthread_struct(p))
goto bad_fork_cleanup_delayacct;
}
@@ -2243,7 +2246,7 @@ static __latent_entropy struct task_struct *copy_process(
retval = copy_io(clone_flags, p);
if (retval)
goto bad_fork_cleanup_namespaces;
- retval = copy_thread(clone_flags, args->stack, args->stack_size, p, args->tls);
+ retval = copy_thread(p, args);
if (retval)
goto bad_fork_cleanup_io;
@@ -2547,11 +2550,21 @@ static inline void init_idle_pids(struct task_struct *idle)
}
}
+static int idle_dummy(void *dummy)
+{
+ /* This function is never called */
+ return 0;
+}
+
struct task_struct * __init fork_idle(int cpu)
{
struct task_struct *task;
struct kernel_clone_args args = {
- .flags = CLONE_VM,
+ .flags = CLONE_VM,
+ .fn = &idle_dummy,
+ .fn_arg = NULL,
+ .kthread = 1,
+ .idle = 1,
};
task = copy_process(&init_struct_pid, 0, cpu_to_node(cpu), &args);
@@ -2582,8 +2595,8 @@ struct task_struct *create_io_thread(int (*fn)(void *), void *arg, int node)
.flags = ((lower_32_bits(flags) | CLONE_VM |
CLONE_UNTRACED) & ~CSIGNAL),
.exit_signal = (lower_32_bits(flags) & CSIGNAL),
- .stack = (unsigned long)fn,
- .stack_size = (unsigned long)arg,
+ .fn = fn,
+ .fn_arg = arg,
.io_thread = 1,
};
@@ -2687,8 +2700,25 @@ pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
.flags = ((lower_32_bits(flags) | CLONE_VM |
CLONE_UNTRACED) & ~CSIGNAL),
.exit_signal = (lower_32_bits(flags) & CSIGNAL),
- .stack = (unsigned long)fn,
- .stack_size = (unsigned long)arg,
+ .fn = fn,
+ .fn_arg = arg,
+ .kthread = 1,
+ };
+
+ return kernel_clone(&args);
+}
+
+/*
+ * Create a user mode thread.
+ */
+pid_t user_mode_thread(int (*fn)(void *), void *arg, unsigned long flags)
+{
+ struct kernel_clone_args args = {
+ .flags = ((lower_32_bits(flags) | CLONE_VM |
+ CLONE_UNTRACED) & ~CSIGNAL),
+ .exit_signal = (lower_32_bits(flags) & CSIGNAL),
+ .fn = fn,
+ .fn_arg = arg,
};
return kernel_clone(&args);
diff --git a/kernel/hung_task.c b/kernel/hung_task.c
index 02a65d554340..80bfea5dd5c4 100644
--- a/kernel/hung_task.c
+++ b/kernel/hung_task.c
@@ -73,7 +73,7 @@ static unsigned int __read_mostly sysctl_hung_task_all_cpu_backtrace;
* hung task is detected:
*/
unsigned int __read_mostly sysctl_hung_task_panic =
- CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE;
+ IS_ENABLED(CONFIG_BOOTPARAM_HUNG_TASK_PANIC);
static int
hung_task_panic(struct notifier_block *this, unsigned long event, void *ptr)
diff --git a/kernel/kcov.c b/kernel/kcov.c
index b3732b210593..e19c84b02452 100644
--- a/kernel/kcov.c
+++ b/kernel/kcov.c
@@ -204,8 +204,16 @@ void notrace __sanitizer_cov_trace_pc(void)
/* The first 64-bit word is the number of subsequent PCs. */
pos = READ_ONCE(area[0]) + 1;
if (likely(pos < t->kcov_size)) {
- area[pos] = ip;
+ /* Previously we write pc before updating pos. However, some
+ * early interrupt code could bypass check_kcov_mode() check
+ * and invoke __sanitizer_cov_trace_pc(). If such interrupt is
+ * raised between writing pc and updating pos, the pc could be
+ * overitten by the recursive __sanitizer_cov_trace_pc().
+ * Update pos before writing pc to avoid such interleaving.
+ */
WRITE_ONCE(area[0], pos);
+ barrier();
+ area[pos] = ip;
}
}
EXPORT_SYMBOL(__sanitizer_cov_trace_pc);
@@ -236,11 +244,13 @@ static void notrace write_comp_data(u64 type, u64 arg1, u64 arg2, u64 ip)
start_index = 1 + count * KCOV_WORDS_PER_CMP;
end_pos = (start_index + KCOV_WORDS_PER_CMP) * sizeof(u64);
if (likely(end_pos <= max_pos)) {
+ /* See comment in __sanitizer_cov_trace_pc(). */
+ WRITE_ONCE(area[0], count + 1);
+ barrier();
area[start_index] = type;
area[start_index + 1] = arg1;
area[start_index + 2] = arg2;
area[start_index + 3] = ip;
- WRITE_ONCE(area[0], count + 1);
}
}
diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
index 8b3e9a2014cf..4d34c78334ce 100644
--- a/kernel/kexec_core.c
+++ b/kernel/kexec_core.c
@@ -768,7 +768,6 @@ static struct page *kimage_alloc_page(struct kimage *image,
kimage_free_pages(old_page);
continue;
}
- addr = old_addr;
page = old_page;
break;
}
@@ -788,7 +787,6 @@ static int kimage_load_normal_segment(struct kimage *image,
unsigned char __user *buf = NULL;
unsigned char *kbuf = NULL;
- result = 0;
if (image->file_mode)
kbuf = segment->kbuf;
else
diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c
index 8347fc158d2b..145321a5e798 100644
--- a/kernel/kexec_file.c
+++ b/kernel/kexec_file.c
@@ -109,40 +109,6 @@ int __weak arch_kexec_kernel_verify_sig(struct kimage *image, void *buf,
#endif
/*
- * arch_kexec_apply_relocations_add - apply relocations of type RELA
- * @pi: Purgatory to be relocated.
- * @section: Section relocations applying to.
- * @relsec: Section containing RELAs.
- * @symtab: Corresponding symtab.
- *
- * Return: 0 on success, negative errno on error.
- */
-int __weak
-arch_kexec_apply_relocations_add(struct purgatory_info *pi, Elf_Shdr *section,
- const Elf_Shdr *relsec, const Elf_Shdr *symtab)
-{
- pr_err("RELA relocation unsupported.\n");
- return -ENOEXEC;
-}
-
-/*
- * arch_kexec_apply_relocations - apply relocations of type REL
- * @pi: Purgatory to be relocated.
- * @section: Section relocations applying to.
- * @relsec: Section containing RELs.
- * @symtab: Corresponding symtab.
- *
- * Return: 0 on success, negative errno on error.
- */
-int __weak
-arch_kexec_apply_relocations(struct purgatory_info *pi, Elf_Shdr *section,
- const Elf_Shdr *relsec, const Elf_Shdr *symtab)
-{
- pr_err("REL relocation unsupported.\n");
- return -ENOEXEC;
-}
-
-/*
* Free up memory used by kernel, initrd, and command line. This is temporary
* memory allocation which is not needed any more after these buffers have
* been loaded into separate segments and have been copied elsewhere.
@@ -1260,7 +1226,7 @@ int crash_exclude_mem_range(struct crash_mem *mem,
return 0;
}
-int crash_prepare_elf64_headers(struct crash_mem *mem, int kernel_map,
+int crash_prepare_elf64_headers(struct crash_mem *mem, int need_kernel_map,
void **addr, unsigned long *sz)
{
Elf64_Ehdr *ehdr;
@@ -1324,7 +1290,7 @@ int crash_prepare_elf64_headers(struct crash_mem *mem, int kernel_map,
phdr++;
/* Prepare PT_LOAD type program header for kernel text region */
- if (kernel_map) {
+ if (need_kernel_map) {
phdr->p_type = PT_LOAD;
phdr->p_flags = PF_R|PF_W|PF_X;
phdr->p_vaddr = (unsigned long) _text;
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index dd58c0be9ce2..f214f8c088ed 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -1257,79 +1257,6 @@ void kprobe_busy_end(void)
preempt_enable();
}
-#if !defined(CONFIG_KRETPROBE_ON_RETHOOK)
-static void free_rp_inst_rcu(struct rcu_head *head)
-{
- struct kretprobe_instance *ri = container_of(head, struct kretprobe_instance, rcu);
-
- if (refcount_dec_and_test(&ri->rph->ref))
- kfree(ri->rph);
- kfree(ri);
-}
-NOKPROBE_SYMBOL(free_rp_inst_rcu);
-
-static void recycle_rp_inst(struct kretprobe_instance *ri)
-{
- struct kretprobe *rp = get_kretprobe(ri);
-
- if (likely(rp))
- freelist_add(&ri->freelist, &rp->freelist);
- else
- call_rcu(&ri->rcu, free_rp_inst_rcu);
-}
-NOKPROBE_SYMBOL(recycle_rp_inst);
-
-/*
- * This function is called from delayed_put_task_struct() when a task is
- * dead and cleaned up to recycle any kretprobe instances associated with
- * this task. These left over instances represent probed functions that
- * have been called but will never return.
- */
-void kprobe_flush_task(struct task_struct *tk)
-{
- struct kretprobe_instance *ri;
- struct llist_node *node;
-
- /* Early boot, not yet initialized. */
- if (unlikely(!kprobes_initialized))
- return;
-
- kprobe_busy_begin();
-
- node = __llist_del_all(&tk->kretprobe_instances);
- while (node) {
- ri = container_of(node, struct kretprobe_instance, llist);
- node = node->next;
-
- recycle_rp_inst(ri);
- }
-
- kprobe_busy_end();
-}
-NOKPROBE_SYMBOL(kprobe_flush_task);
-
-static inline void free_rp_inst(struct kretprobe *rp)
-{
- struct kretprobe_instance *ri;
- struct freelist_node *node;
- int count = 0;
-
- node = rp->freelist.head;
- while (node) {
- ri = container_of(node, struct kretprobe_instance, freelist);
- node = node->next;
-
- kfree(ri);
- count++;
- }
-
- if (refcount_sub_and_test(count, &rp->rph->ref)) {
- kfree(rp->rph);
- rp->rph = NULL;
- }
-}
-#endif /* !CONFIG_KRETPROBE_ON_RETHOOK */
-
/* Add the new probe to 'ap->list'. */
static int add_new_kprobe(struct kprobe *ap, struct kprobe *p)
{
@@ -1928,6 +1855,77 @@ static struct notifier_block kprobe_exceptions_nb = {
#ifdef CONFIG_KRETPROBES
#if !defined(CONFIG_KRETPROBE_ON_RETHOOK)
+static void free_rp_inst_rcu(struct rcu_head *head)
+{
+ struct kretprobe_instance *ri = container_of(head, struct kretprobe_instance, rcu);
+
+ if (refcount_dec_and_test(&ri->rph->ref))
+ kfree(ri->rph);
+ kfree(ri);
+}
+NOKPROBE_SYMBOL(free_rp_inst_rcu);
+
+static void recycle_rp_inst(struct kretprobe_instance *ri)
+{
+ struct kretprobe *rp = get_kretprobe(ri);
+
+ if (likely(rp))
+ freelist_add(&ri->freelist, &rp->freelist);
+ else
+ call_rcu(&ri->rcu, free_rp_inst_rcu);
+}
+NOKPROBE_SYMBOL(recycle_rp_inst);
+
+/*
+ * This function is called from delayed_put_task_struct() when a task is
+ * dead and cleaned up to recycle any kretprobe instances associated with
+ * this task. These left over instances represent probed functions that
+ * have been called but will never return.
+ */
+void kprobe_flush_task(struct task_struct *tk)
+{
+ struct kretprobe_instance *ri;
+ struct llist_node *node;
+
+ /* Early boot, not yet initialized. */
+ if (unlikely(!kprobes_initialized))
+ return;
+
+ kprobe_busy_begin();
+
+ node = __llist_del_all(&tk->kretprobe_instances);
+ while (node) {
+ ri = container_of(node, struct kretprobe_instance, llist);
+ node = node->next;
+
+ recycle_rp_inst(ri);
+ }
+
+ kprobe_busy_end();
+}
+NOKPROBE_SYMBOL(kprobe_flush_task);
+
+static inline void free_rp_inst(struct kretprobe *rp)
+{
+ struct kretprobe_instance *ri;
+ struct freelist_node *node;
+ int count = 0;
+
+ node = rp->freelist.head;
+ while (node) {
+ ri = container_of(node, struct kretprobe_instance, freelist);
+ node = node->next;
+
+ kfree(ri);
+ count++;
+ }
+
+ if (refcount_sub_and_test(count, &rp->rph->ref)) {
+ kfree(rp->rph);
+ rp->rph = NULL;
+ }
+}
+
/* This assumes the 'tsk' is the current task or the is not running. */
static kprobe_opcode_t *__kretprobe_find_ret_addr(struct task_struct *tsk,
struct llist_node **cur)
diff --git a/kernel/livepatch/patch.c b/kernel/livepatch/patch.c
index c172bf92b576..4c4f5a776d80 100644
--- a/kernel/livepatch/patch.c
+++ b/kernel/livepatch/patch.c
@@ -118,7 +118,7 @@ static void notrace klp_ftrace_handler(unsigned long ip,
if (func->nop)
goto unlock;
- klp_arch_set_pc(fregs, (unsigned long)func->new_func);
+ ftrace_instruction_pointer_set(fregs, (unsigned long)func->new_func);
unlock:
ftrace_test_recursion_unlock(bit);
diff --git a/kernel/module/signing.c b/kernel/module/signing.c
index 85c8999dfecf..a2ff4242e623 100644
--- a/kernel/module/signing.c
+++ b/kernel/module/signing.c
@@ -16,6 +16,9 @@
#include <uapi/linux/module.h>
#include "internal.h"
+#undef MODULE_PARAM_PREFIX
+#define MODULE_PARAM_PREFIX "module."
+
static bool sig_enforce = IS_ENABLED(CONFIG_MODULE_SIG_FORCE);
module_param(sig_enforce, bool_enable_only, 0644);
diff --git a/kernel/notifier.c b/kernel/notifier.c
index ba005ebf4730..0d5bd62c480e 100644
--- a/kernel/notifier.c
+++ b/kernel/notifier.c
@@ -20,7 +20,8 @@ BLOCKING_NOTIFIER_HEAD(reboot_notifier_list);
*/
static int notifier_chain_register(struct notifier_block **nl,
- struct notifier_block *n)
+ struct notifier_block *n,
+ bool unique_priority)
{
while ((*nl) != NULL) {
if (unlikely((*nl) == n)) {
@@ -30,6 +31,8 @@ static int notifier_chain_register(struct notifier_block **nl,
}
if (n->priority > (*nl)->priority)
break;
+ if (n->priority == (*nl)->priority && unique_priority)
+ return -EBUSY;
nl = &((*nl)->next);
}
n->next = *nl;
@@ -144,13 +147,36 @@ int atomic_notifier_chain_register(struct atomic_notifier_head *nh,
int ret;
spin_lock_irqsave(&nh->lock, flags);
- ret = notifier_chain_register(&nh->head, n);
+ ret = notifier_chain_register(&nh->head, n, false);
spin_unlock_irqrestore(&nh->lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(atomic_notifier_chain_register);
/**
+ * atomic_notifier_chain_register_unique_prio - Add notifier to an atomic notifier chain
+ * @nh: Pointer to head of the atomic notifier chain
+ * @n: New entry in notifier chain
+ *
+ * Adds a notifier to an atomic notifier chain if there is no other
+ * notifier registered using the same priority.
+ *
+ * Returns 0 on success, %-EEXIST or %-EBUSY on error.
+ */
+int atomic_notifier_chain_register_unique_prio(struct atomic_notifier_head *nh,
+ struct notifier_block *n)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&nh->lock, flags);
+ ret = notifier_chain_register(&nh->head, n, true);
+ spin_unlock_irqrestore(&nh->lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(atomic_notifier_chain_register_unique_prio);
+
+/**
* atomic_notifier_chain_unregister - Remove notifier from an atomic notifier chain
* @nh: Pointer to head of the atomic notifier chain
* @n: Entry to remove from notifier chain
@@ -204,23 +230,27 @@ int atomic_notifier_call_chain(struct atomic_notifier_head *nh,
EXPORT_SYMBOL_GPL(atomic_notifier_call_chain);
NOKPROBE_SYMBOL(atomic_notifier_call_chain);
+/**
+ * atomic_notifier_call_chain_is_empty - Check whether notifier chain is empty
+ * @nh: Pointer to head of the atomic notifier chain
+ *
+ * Checks whether notifier chain is empty.
+ *
+ * Returns true is notifier chain is empty, false otherwise.
+ */
+bool atomic_notifier_call_chain_is_empty(struct atomic_notifier_head *nh)
+{
+ return !rcu_access_pointer(nh->head);
+}
+
/*
* Blocking notifier chain routines. All access to the chain is
* synchronized by an rwsem.
*/
-/**
- * blocking_notifier_chain_register - Add notifier to a blocking notifier chain
- * @nh: Pointer to head of the blocking notifier chain
- * @n: New entry in notifier chain
- *
- * Adds a notifier to a blocking notifier chain.
- * Must be called in process context.
- *
- * Returns 0 on success, %-EEXIST on error.
- */
-int blocking_notifier_chain_register(struct blocking_notifier_head *nh,
- struct notifier_block *n)
+static int __blocking_notifier_chain_register(struct blocking_notifier_head *nh,
+ struct notifier_block *n,
+ bool unique_priority)
{
int ret;
@@ -230,16 +260,49 @@ int blocking_notifier_chain_register(struct blocking_notifier_head *nh,
* such times we must not call down_write().
*/
if (unlikely(system_state == SYSTEM_BOOTING))
- return notifier_chain_register(&nh->head, n);
+ return notifier_chain_register(&nh->head, n, unique_priority);
down_write(&nh->rwsem);
- ret = notifier_chain_register(&nh->head, n);
+ ret = notifier_chain_register(&nh->head, n, unique_priority);
up_write(&nh->rwsem);
return ret;
}
+
+/**
+ * blocking_notifier_chain_register - Add notifier to a blocking notifier chain
+ * @nh: Pointer to head of the blocking notifier chain
+ * @n: New entry in notifier chain
+ *
+ * Adds a notifier to a blocking notifier chain.
+ * Must be called in process context.
+ *
+ * Returns 0 on success, %-EEXIST on error.
+ */
+int blocking_notifier_chain_register(struct blocking_notifier_head *nh,
+ struct notifier_block *n)
+{
+ return __blocking_notifier_chain_register(nh, n, false);
+}
EXPORT_SYMBOL_GPL(blocking_notifier_chain_register);
/**
+ * blocking_notifier_chain_register_unique_prio - Add notifier to a blocking notifier chain
+ * @nh: Pointer to head of the blocking notifier chain
+ * @n: New entry in notifier chain
+ *
+ * Adds a notifier to an blocking notifier chain if there is no other
+ * notifier registered using the same priority.
+ *
+ * Returns 0 on success, %-EEXIST or %-EBUSY on error.
+ */
+int blocking_notifier_chain_register_unique_prio(struct blocking_notifier_head *nh,
+ struct notifier_block *n)
+{
+ return __blocking_notifier_chain_register(nh, n, true);
+}
+EXPORT_SYMBOL_GPL(blocking_notifier_chain_register_unique_prio);
+
+/**
* blocking_notifier_chain_unregister - Remove notifier from a blocking notifier chain
* @nh: Pointer to head of the blocking notifier chain
* @n: Entry to remove from notifier chain
@@ -341,7 +404,7 @@ EXPORT_SYMBOL_GPL(blocking_notifier_call_chain);
int raw_notifier_chain_register(struct raw_notifier_head *nh,
struct notifier_block *n)
{
- return notifier_chain_register(&nh->head, n);
+ return notifier_chain_register(&nh->head, n, false);
}
EXPORT_SYMBOL_GPL(raw_notifier_chain_register);
@@ -420,10 +483,10 @@ int srcu_notifier_chain_register(struct srcu_notifier_head *nh,
* such times we must not call mutex_lock().
*/
if (unlikely(system_state == SYSTEM_BOOTING))
- return notifier_chain_register(&nh->head, n);
+ return notifier_chain_register(&nh->head, n, false);
mutex_lock(&nh->mutex);
- ret = notifier_chain_register(&nh->head, n);
+ ret = notifier_chain_register(&nh->head, n, false);
mutex_unlock(&nh->mutex);
return ret;
}
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
index a46a3723bc66..f4f8cb0435b4 100644
--- a/kernel/pid_namespace.c
+++ b/kernel/pid_namespace.c
@@ -52,7 +52,7 @@ static struct kmem_cache *create_pid_cachep(unsigned int level)
/* Name collision forces to do allocation under mutex. */
if (!*pkc)
*pkc = kmem_cache_create(name, len, 0,
- SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT, 0);
+ SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT, NULL);
mutex_unlock(&pid_caches_mutex);
/* current can fail, but someone else can succeed. */
return READ_ONCE(*pkc);
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index 938d5c78b421..20a66bf9f465 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -83,7 +83,7 @@ bool hibernation_available(void)
{
return nohibernate == 0 &&
!security_locked_down(LOCKDOWN_HIBERNATION) &&
- !secretmem_active();
+ !secretmem_active() && !cxl_mem_active();
}
/**
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 5242bf2ee469..e3694034b753 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -127,7 +127,9 @@ static ssize_t mem_sleep_show(struct kobject *kobj, struct kobj_attribute *attr,
char *s = buf;
suspend_state_t i;
- for (i = PM_SUSPEND_MIN; i < PM_SUSPEND_MAX; i++)
+ for (i = PM_SUSPEND_MIN; i < PM_SUSPEND_MAX; i++) {
+ if (i >= PM_SUSPEND_MEM && cxl_mem_active())
+ continue;
if (mem_sleep_states[i]) {
const char *label = mem_sleep_states[i];
@@ -136,6 +138,7 @@ static ssize_t mem_sleep_show(struct kobject *kobj, struct kobj_attribute *attr,
else
s += sprintf(s, "%s ", label);
}
+ }
/* Convert the last space to a newline if needed. */
if (s != buf)
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 6fcdee7e87a5..827075944d28 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -236,7 +236,8 @@ EXPORT_SYMBOL_GPL(suspend_valid_only_mem);
static bool sleep_state_supported(suspend_state_t state)
{
- return state == PM_SUSPEND_TO_IDLE || valid_state(state);
+ return state == PM_SUSPEND_TO_IDLE ||
+ (valid_state(state) && !cxl_mem_active());
}
static int platform_suspend_prepare(suspend_state_t state)
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index a3e1035929b0..ea3dd55709e7 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -3904,7 +3904,7 @@ static void wake_up_klogd_work_func(struct irq_work *irq_work)
}
if (pending & PRINTK_PENDING_WAKEUP)
- wake_up_interruptible_all(&log_wait);
+ wake_up_interruptible(&log_wait);
}
static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) =
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index ccc4b465775b..156a99283b11 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -185,7 +185,12 @@ static bool looks_like_a_spurious_pid(struct task_struct *task)
return true;
}
-/* Ensure that nothing can wake it up, even SIGKILL */
+/*
+ * Ensure that nothing can wake it up, even SIGKILL
+ *
+ * A task is switched to this state while a ptrace operation is in progress;
+ * such that the ptrace operation is uninterruptible.
+ */
static bool ptrace_freeze_traced(struct task_struct *task)
{
bool ret = false;
@@ -197,7 +202,7 @@ static bool ptrace_freeze_traced(struct task_struct *task)
spin_lock_irq(&task->sighand->siglock);
if (task_is_traced(task) && !looks_like_a_spurious_pid(task) &&
!__fatal_signal_pending(task)) {
- WRITE_ONCE(task->__state, __TASK_TRACED);
+ task->jobctl |= JOBCTL_PTRACE_FROZEN;
ret = true;
}
spin_unlock_irq(&task->sighand->siglock);
@@ -207,23 +212,21 @@ static bool ptrace_freeze_traced(struct task_struct *task)
static void ptrace_unfreeze_traced(struct task_struct *task)
{
- if (READ_ONCE(task->__state) != __TASK_TRACED)
- return;
-
- WARN_ON(!task->ptrace || task->parent != current);
+ unsigned long flags;
/*
- * PTRACE_LISTEN can allow ptrace_trap_notify to wake us up remotely.
- * Recheck state under the lock to close this race.
+ * The child may be awake and may have cleared
+ * JOBCTL_PTRACE_FROZEN (see ptrace_resume). The child will
+ * not set JOBCTL_PTRACE_FROZEN or enter __TASK_TRACED anew.
*/
- spin_lock_irq(&task->sighand->siglock);
- if (READ_ONCE(task->__state) == __TASK_TRACED) {
- if (__fatal_signal_pending(task))
+ if (lock_task_sighand(task, &flags)) {
+ task->jobctl &= ~JOBCTL_PTRACE_FROZEN;
+ if (__fatal_signal_pending(task)) {
+ task->jobctl &= ~TASK_TRACED;
wake_up_state(task, __TASK_TRACED);
- else
- WRITE_ONCE(task->__state, TASK_TRACED);
+ }
+ unlock_task_sighand(task, &flags);
}
- spin_unlock_irq(&task->sighand->siglock);
}
/**
@@ -256,7 +259,6 @@ static int ptrace_check_attach(struct task_struct *child, bool ignore_state)
*/
read_lock(&tasklist_lock);
if (child->ptrace && child->parent == current) {
- WARN_ON(READ_ONCE(child->__state) == __TASK_TRACED);
/*
* child->sighand can't be NULL, release_task()
* does ptrace_unlink() before __exit_signal().
@@ -266,17 +268,9 @@ static int ptrace_check_attach(struct task_struct *child, bool ignore_state)
}
read_unlock(&tasklist_lock);
- if (!ret && !ignore_state) {
- if (!wait_task_inactive(child, __TASK_TRACED)) {
- /*
- * This can only happen if may_ptrace_stop() fails and
- * ptrace_stop() changes ->state back to TASK_RUNNING,
- * so we should not worry about leaking __TASK_TRACED.
- */
- WARN_ON(READ_ONCE(child->__state) == __TASK_TRACED);
- ret = -ESRCH;
- }
- }
+ if (!ret && !ignore_state &&
+ WARN_ON_ONCE(!wait_task_inactive(child, __TASK_TRACED)))
+ ret = -ESRCH;
return ret;
}
@@ -475,8 +469,10 @@ static int ptrace_attach(struct task_struct *task, long request,
* in and out of STOPPED are protected by siglock.
*/
if (task_is_stopped(task) &&
- task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING))
+ task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING)) {
+ task->jobctl &= ~JOBCTL_STOPPED;
signal_wake_up_state(task, __TASK_STOPPED);
+ }
spin_unlock(&task->sighand->siglock);
@@ -829,11 +825,7 @@ static long ptrace_get_rseq_configuration(struct task_struct *task,
}
#endif
-#ifdef PTRACE_SINGLESTEP
#define is_singlestep(request) ((request) == PTRACE_SINGLESTEP)
-#else
-#define is_singlestep(request) 0
-#endif
#ifdef PTRACE_SINGLEBLOCK
#define is_singleblock(request) ((request) == PTRACE_SINGLEBLOCK)
@@ -850,8 +842,6 @@ static long ptrace_get_rseq_configuration(struct task_struct *task,
static int ptrace_resume(struct task_struct *child, long request,
unsigned long data)
{
- bool need_siglock;
-
if (!valid_signal(data))
return -EIO;
@@ -887,18 +877,12 @@ static int ptrace_resume(struct task_struct *child, long request,
* Note that we need siglock even if ->exit_code == data and/or this
* status was not reported yet, the new status must not be cleared by
* wait_task_stopped() after resume.
- *
- * If data == 0 we do not care if wait_task_stopped() reports the old
- * status and clears the code too; this can't race with the tracee, it
- * takes siglock after resume.
*/
- need_siglock = data && !thread_group_empty(current);
- if (need_siglock)
- spin_lock_irq(&child->sighand->siglock);
+ spin_lock_irq(&child->sighand->siglock);
child->exit_code = data;
+ child->jobctl &= ~JOBCTL_TRACED;
wake_up_state(child, __TASK_TRACED);
- if (need_siglock)
- spin_unlock_irq(&child->sighand->siglock);
+ spin_unlock_irq(&child->sighand->siglock);
return 0;
}
@@ -1221,9 +1205,7 @@ int ptrace_request(struct task_struct *child, long request,
}
#endif
-#ifdef PTRACE_SINGLESTEP
case PTRACE_SINGLESTEP:
-#endif
#ifdef PTRACE_SINGLEBLOCK
case PTRACE_SINGLEBLOCK:
#endif
@@ -1236,9 +1218,8 @@ int ptrace_request(struct task_struct *child, long request,
return ptrace_resume(child, request, data);
case PTRACE_KILL:
- if (child->exit_state) /* already dead */
- return 0;
- return ptrace_resume(child, request, SIGKILL);
+ send_sig_info(SIGKILL, SEND_SIG_NOINFO, child);
+ return 0;
#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
case PTRACE_GETREGSET:
@@ -1285,10 +1266,6 @@ int ptrace_request(struct task_struct *child, long request,
return ret;
}
-#ifndef arch_ptrace_attach
-#define arch_ptrace_attach(child) do { } while (0)
-#endif
-
SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
unsigned long, data)
{
@@ -1297,8 +1274,6 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
if (request == PTRACE_TRACEME) {
ret = ptrace_traceme();
- if (!ret)
- arch_ptrace_attach(current);
goto out;
}
@@ -1310,12 +1285,6 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
ret = ptrace_attach(child, request, addr, data);
- /*
- * Some architectures need to do book-keeping after
- * a ptrace attach.
- */
- if (!ret)
- arch_ptrace_attach(child);
goto out_put_task_struct;
}
@@ -1455,12 +1424,6 @@ COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
ret = ptrace_attach(child, request, addr, data);
- /*
- * Some architectures need to do book-keeping after
- * a ptrace attach.
- */
- if (!ret)
- arch_ptrace_attach(child);
goto out_put_task_struct;
}
diff --git a/kernel/reboot.c b/kernel/reboot.c
index 44228a93742b..a091145ee710 100644
--- a/kernel/reboot.c
+++ b/kernel/reboot.c
@@ -48,12 +48,20 @@ int reboot_cpu;
enum reboot_type reboot_type = BOOT_ACPI;
int reboot_force;
+struct sys_off_handler {
+ struct notifier_block nb;
+ int (*sys_off_cb)(struct sys_off_data *data);
+ void *cb_data;
+ enum sys_off_mode mode;
+ bool blocking;
+ void *list;
+};
+
/*
- * If set, this is used for preparing the system to power off.
+ * Temporary stub that prevents linkage failure while we're in process
+ * of removing all uses of legacy pm_power_off() around the kernel.
*/
-
-void (*pm_power_off_prepare)(void);
-EXPORT_SYMBOL_GPL(pm_power_off_prepare);
+void __weak (*pm_power_off)(void);
/**
* emergency_restart - reboot the system
@@ -281,6 +289,316 @@ void kernel_halt(void)
}
EXPORT_SYMBOL_GPL(kernel_halt);
+/*
+ * Notifier list for kernel code which wants to be called
+ * to prepare system for power off.
+ */
+static BLOCKING_NOTIFIER_HEAD(power_off_prep_handler_list);
+
+/*
+ * Notifier list for kernel code which wants to be called
+ * to power off system.
+ */
+static ATOMIC_NOTIFIER_HEAD(power_off_handler_list);
+
+static int sys_off_notify(struct notifier_block *nb,
+ unsigned long mode, void *cmd)
+{
+ struct sys_off_handler *handler;
+ struct sys_off_data data = {};
+
+ handler = container_of(nb, struct sys_off_handler, nb);
+ data.cb_data = handler->cb_data;
+ data.mode = mode;
+ data.cmd = cmd;
+
+ return handler->sys_off_cb(&data);
+}
+
+/**
+ * register_sys_off_handler - Register sys-off handler
+ * @mode: Sys-off mode
+ * @priority: Handler priority
+ * @callback: Callback function
+ * @cb_data: Callback argument
+ *
+ * Registers system power-off or restart handler that will be invoked
+ * at the step corresponding to the given sys-off mode. Handler's callback
+ * should return NOTIFY_DONE to permit execution of the next handler in
+ * the call chain or NOTIFY_STOP to break the chain (in error case for
+ * example).
+ *
+ * Multiple handlers can be registered at the default priority level.
+ *
+ * Only one handler can be registered at the non-default priority level,
+ * otherwise ERR_PTR(-EBUSY) is returned.
+ *
+ * Returns a new instance of struct sys_off_handler on success, or
+ * an ERR_PTR()-encoded error code otherwise.
+ */
+struct sys_off_handler *
+register_sys_off_handler(enum sys_off_mode mode,
+ int priority,
+ int (*callback)(struct sys_off_data *data),
+ void *cb_data)
+{
+ struct sys_off_handler *handler;
+ int err;
+
+ handler = kzalloc(sizeof(*handler), GFP_KERNEL);
+ if (!handler)
+ return ERR_PTR(-ENOMEM);
+
+ switch (mode) {
+ case SYS_OFF_MODE_POWER_OFF_PREPARE:
+ handler->list = &power_off_prep_handler_list;
+ handler->blocking = true;
+ break;
+
+ case SYS_OFF_MODE_POWER_OFF:
+ handler->list = &power_off_handler_list;
+ break;
+
+ case SYS_OFF_MODE_RESTART:
+ handler->list = &restart_handler_list;
+ break;
+
+ default:
+ kfree(handler);
+ return ERR_PTR(-EINVAL);
+ }
+
+ handler->nb.notifier_call = sys_off_notify;
+ handler->nb.priority = priority;
+ handler->sys_off_cb = callback;
+ handler->cb_data = cb_data;
+ handler->mode = mode;
+
+ if (handler->blocking) {
+ if (priority == SYS_OFF_PRIO_DEFAULT)
+ err = blocking_notifier_chain_register(handler->list,
+ &handler->nb);
+ else
+ err = blocking_notifier_chain_register_unique_prio(handler->list,
+ &handler->nb);
+ } else {
+ if (priority == SYS_OFF_PRIO_DEFAULT)
+ err = atomic_notifier_chain_register(handler->list,
+ &handler->nb);
+ else
+ err = atomic_notifier_chain_register_unique_prio(handler->list,
+ &handler->nb);
+ }
+
+ if (err) {
+ kfree(handler);
+ return ERR_PTR(err);
+ }
+
+ return handler;
+}
+EXPORT_SYMBOL_GPL(register_sys_off_handler);
+
+/**
+ * unregister_sys_off_handler - Unregister sys-off handler
+ * @handler: Sys-off handler
+ *
+ * Unregisters given sys-off handler.
+ */
+void unregister_sys_off_handler(struct sys_off_handler *handler)
+{
+ int err;
+
+ if (!handler)
+ return;
+
+ if (handler->blocking)
+ err = blocking_notifier_chain_unregister(handler->list,
+ &handler->nb);
+ else
+ err = atomic_notifier_chain_unregister(handler->list,
+ &handler->nb);
+
+ /* sanity check, shall never happen */
+ WARN_ON(err);
+
+ kfree(handler);
+}
+EXPORT_SYMBOL_GPL(unregister_sys_off_handler);
+
+static void devm_unregister_sys_off_handler(void *data)
+{
+ struct sys_off_handler *handler = data;
+
+ unregister_sys_off_handler(handler);
+}
+
+/**
+ * devm_register_sys_off_handler - Register sys-off handler
+ * @dev: Device that registers handler
+ * @mode: Sys-off mode
+ * @priority: Handler priority
+ * @callback: Callback function
+ * @cb_data: Callback argument
+ *
+ * Registers resource-managed sys-off handler.
+ *
+ * Returns zero on success, or error code on failure.
+ */
+int devm_register_sys_off_handler(struct device *dev,
+ enum sys_off_mode mode,
+ int priority,
+ int (*callback)(struct sys_off_data *data),
+ void *cb_data)
+{
+ struct sys_off_handler *handler;
+
+ handler = register_sys_off_handler(mode, priority, callback, cb_data);
+ if (IS_ERR(handler))
+ return PTR_ERR(handler);
+
+ return devm_add_action_or_reset(dev, devm_unregister_sys_off_handler,
+ handler);
+}
+EXPORT_SYMBOL_GPL(devm_register_sys_off_handler);
+
+/**
+ * devm_register_power_off_handler - Register power-off handler
+ * @dev: Device that registers callback
+ * @callback: Callback function
+ * @cb_data: Callback's argument
+ *
+ * Registers resource-managed sys-off handler with a default priority
+ * and using power-off mode.
+ *
+ * Returns zero on success, or error code on failure.
+ */
+int devm_register_power_off_handler(struct device *dev,
+ int (*callback)(struct sys_off_data *data),
+ void *cb_data)
+{
+ return devm_register_sys_off_handler(dev,
+ SYS_OFF_MODE_POWER_OFF,
+ SYS_OFF_PRIO_DEFAULT,
+ callback, cb_data);
+}
+EXPORT_SYMBOL_GPL(devm_register_power_off_handler);
+
+/**
+ * devm_register_restart_handler - Register restart handler
+ * @dev: Device that registers callback
+ * @callback: Callback function
+ * @cb_data: Callback's argument
+ *
+ * Registers resource-managed sys-off handler with a default priority
+ * and using restart mode.
+ *
+ * Returns zero on success, or error code on failure.
+ */
+int devm_register_restart_handler(struct device *dev,
+ int (*callback)(struct sys_off_data *data),
+ void *cb_data)
+{
+ return devm_register_sys_off_handler(dev,
+ SYS_OFF_MODE_RESTART,
+ SYS_OFF_PRIO_DEFAULT,
+ callback, cb_data);
+}
+EXPORT_SYMBOL_GPL(devm_register_restart_handler);
+
+static struct sys_off_handler *platform_power_off_handler;
+
+static int platform_power_off_notify(struct sys_off_data *data)
+{
+ void (*platform_power_power_off_cb)(void) = data->cb_data;
+
+ platform_power_power_off_cb();
+
+ return NOTIFY_DONE;
+}
+
+/**
+ * register_platform_power_off - Register platform-level power-off callback
+ * @power_off: Power-off callback
+ *
+ * Registers power-off callback that will be called as last step
+ * of the power-off sequence. This callback is expected to be invoked
+ * for the last resort. Only one platform power-off callback is allowed
+ * to be registered at a time.
+ *
+ * Returns zero on success, or error code on failure.
+ */
+int register_platform_power_off(void (*power_off)(void))
+{
+ struct sys_off_handler *handler;
+
+ handler = register_sys_off_handler(SYS_OFF_MODE_POWER_OFF,
+ SYS_OFF_PRIO_PLATFORM,
+ platform_power_off_notify,
+ power_off);
+ if (IS_ERR(handler))
+ return PTR_ERR(handler);
+
+ platform_power_off_handler = handler;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(register_platform_power_off);
+
+/**
+ * unregister_platform_power_off - Unregister platform-level power-off callback
+ * @power_off: Power-off callback
+ *
+ * Unregisters previously registered platform power-off callback.
+ */
+void unregister_platform_power_off(void (*power_off)(void))
+{
+ if (platform_power_off_handler &&
+ platform_power_off_handler->cb_data == power_off) {
+ unregister_sys_off_handler(platform_power_off_handler);
+ platform_power_off_handler = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(unregister_platform_power_off);
+
+static int legacy_pm_power_off(struct sys_off_data *data)
+{
+ if (pm_power_off)
+ pm_power_off();
+
+ return NOTIFY_DONE;
+}
+
+static void do_kernel_power_off_prepare(void)
+{
+ blocking_notifier_call_chain(&power_off_prep_handler_list, 0, NULL);
+}
+
+/**
+ * do_kernel_power_off - Execute kernel power-off handler call chain
+ *
+ * Expected to be called as last step of the power-off sequence.
+ *
+ * Powers off the system immediately if a power-off handler function has
+ * been registered. Otherwise does nothing.
+ */
+void do_kernel_power_off(void)
+{
+ atomic_notifier_call_chain(&power_off_handler_list, 0, NULL);
+}
+
+/**
+ * kernel_can_power_off - check whether system can be powered off
+ *
+ * Returns true if power-off handler is registered and system can be
+ * powered off, false otherwise.
+ */
+bool kernel_can_power_off(void)
+{
+ return !atomic_notifier_call_chain_is_empty(&power_off_handler_list);
+}
+EXPORT_SYMBOL_GPL(kernel_can_power_off);
+
/**
* kernel_power_off - power_off the system
*
@@ -289,8 +607,7 @@ EXPORT_SYMBOL_GPL(kernel_halt);
void kernel_power_off(void)
{
kernel_shutdown_prepare(SYSTEM_POWER_OFF);
- if (pm_power_off_prepare)
- pm_power_off_prepare();
+ do_kernel_power_off_prepare();
migrate_to_reboot_cpu();
syscore_shutdown();
pr_emerg("Power down\n");
@@ -313,6 +630,7 @@ SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd,
void __user *, arg)
{
struct pid_namespace *pid_ns = task_active_pid_ns(current);
+ struct sys_off_handler *sys_off = NULL;
char buffer[256];
int ret = 0;
@@ -337,10 +655,25 @@ SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd,
if (ret)
return ret;
+ /*
+ * Register sys-off handlers for legacy PM callback. This allows
+ * legacy PM callbacks temporary co-exist with the new sys-off API.
+ *
+ * TODO: Remove legacy handlers once all legacy PM users will be
+ * switched to the sys-off based APIs.
+ */
+ if (pm_power_off) {
+ sys_off = register_sys_off_handler(SYS_OFF_MODE_POWER_OFF,
+ SYS_OFF_PRIO_DEFAULT,
+ legacy_pm_power_off, NULL);
+ if (IS_ERR(sys_off))
+ return PTR_ERR(sys_off);
+ }
+
/* Instead of trying to make the power_off code look like
* halt when pm_power_off is not set do it the easy way.
*/
- if ((cmd == LINUX_REBOOT_CMD_POWER_OFF) && !pm_power_off)
+ if ((cmd == LINUX_REBOOT_CMD_POWER_OFF) && !kernel_can_power_off())
cmd = LINUX_REBOOT_CMD_HALT;
mutex_lock(&system_transition_mutex);
@@ -394,6 +727,7 @@ SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd,
break;
}
mutex_unlock(&system_transition_mutex);
+ unregister_sys_off_handler(sys_off);
return ret;
}
diff --git a/kernel/relay.c b/kernel/relay.c
index d1a67fbb819d..6a611e779e95 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -440,7 +440,7 @@ int relay_prepare_cpu(unsigned int cpu)
mutex_lock(&relay_channels_mutex);
list_for_each_entry(chan, &relay_channels, list) {
- if ((buf = *per_cpu_ptr(chan->buf, cpu)))
+ if (*per_cpu_ptr(chan->buf, cpu))
continue;
buf = relay_open_buf(chan, cpu);
if (!buf) {
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 696c6490bd5b..bfa7452ca92e 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -6353,10 +6353,7 @@ static void __sched notrace __schedule(unsigned int sched_mode)
/*
* We must load prev->state once (task_struct::state is volatile), such
- * that:
- *
- * - we form a control dependency vs deactivate_task() below.
- * - ptrace_{,un}freeze_traced() can change ->state underneath us.
+ * that we form a control dependency vs deactivate_task() below.
*/
prev_state = READ_ONCE(prev->__state);
if (!(sched_mode & SM_MASK_PREEMPT) && prev_state) {
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 8c5b74f66bd3..77b2048a9326 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2927,7 +2927,7 @@ static void task_tick_numa(struct rq *rq, struct task_struct *curr)
/*
* We don't care about NUMA placement if we don't have memory.
*/
- if ((curr->flags & (PF_EXITING | PF_KTHREAD)) || work->next != work)
+ if (!curr->mm || (curr->flags & (PF_EXITING | PF_KTHREAD)) || work->next != work)
return;
/*
diff --git a/kernel/signal.c b/kernel/signal.c
index e43bc2a692f5..edb1dc9b00dc 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -762,7 +762,10 @@ still_pending:
*/
void signal_wake_up_state(struct task_struct *t, unsigned int state)
{
+ lockdep_assert_held(&t->sighand->siglock);
+
set_tsk_thread_flag(t, TIF_SIGPENDING);
+
/*
* TASK_WAKEKILL also means wake it up in the stopped/traced/killable
* case. We don't check t->state here because there is a race with it
@@ -884,7 +887,7 @@ static int check_kill_permission(int sig, struct kernel_siginfo *info,
static void ptrace_trap_notify(struct task_struct *t)
{
WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
- assert_spin_locked(&t->sighand->siglock);
+ lockdep_assert_held(&t->sighand->siglock);
task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
@@ -930,9 +933,10 @@ static bool prepare_signal(int sig, struct task_struct *p, bool force)
for_each_thread(p, t) {
flush_sigqueue_mask(&flush, &t->pending);
task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
- if (likely(!(t->ptrace & PT_SEIZED)))
+ if (likely(!(t->ptrace & PT_SEIZED))) {
+ t->jobctl &= ~JOBCTL_STOPPED;
wake_up_state(t, __TASK_STOPPED);
- else
+ } else
ptrace_trap_notify(t);
}
@@ -1071,15 +1075,15 @@ static inline bool legacy_queue(struct sigpending *signals, int sig)
return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
}
-static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
- enum pid_type type, bool force)
+static int __send_signal_locked(int sig, struct kernel_siginfo *info,
+ struct task_struct *t, enum pid_type type, bool force)
{
struct sigpending *pending;
struct sigqueue *q;
int override_rlimit;
int ret = 0, result;
- assert_spin_locked(&t->sighand->siglock);
+ lockdep_assert_held(&t->sighand->siglock);
result = TRACE_SIGNAL_IGNORED;
if (!prepare_signal(sig, t, force))
@@ -1212,8 +1216,8 @@ static inline bool has_si_pid_and_uid(struct kernel_siginfo *info)
return ret;
}
-static int send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
- enum pid_type type)
+int send_signal_locked(int sig, struct kernel_siginfo *info,
+ struct task_struct *t, enum pid_type type)
{
/* Should SIGKILL or SIGSTOP be received by a pid namespace init? */
bool force = false;
@@ -1245,7 +1249,7 @@ static int send_signal(int sig, struct kernel_siginfo *info, struct task_struct
force = true;
}
}
- return __send_signal(sig, info, t, type, force);
+ return __send_signal_locked(sig, info, t, type, force);
}
static void print_fatal_signal(int signr)
@@ -1281,12 +1285,6 @@ static int __init setup_print_fatal_signals(char *str)
__setup("print-fatal-signals=", setup_print_fatal_signals);
-int
-__group_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
-{
- return send_signal(sig, info, p, PIDTYPE_TGID);
-}
-
int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
enum pid_type type)
{
@@ -1294,7 +1292,7 @@ int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p
int ret = -ESRCH;
if (lock_task_sighand(p, &flags)) {
- ret = send_signal(sig, info, p, type);
+ ret = send_signal_locked(sig, info, p, type);
unlock_task_sighand(p, &flags);
}
@@ -1347,7 +1345,7 @@ force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t,
if (action->sa.sa_handler == SIG_DFL &&
(!t->ptrace || (handler == HANDLER_EXIT)))
t->signal->flags &= ~SIGNAL_UNKILLABLE;
- ret = send_signal(sig, info, t, PIDTYPE_PID);
+ ret = send_signal_locked(sig, info, t, PIDTYPE_PID);
spin_unlock_irqrestore(&t->sighand->siglock, flags);
return ret;
@@ -1567,7 +1565,7 @@ int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr,
if (sig) {
if (lock_task_sighand(p, &flags)) {
- ret = __send_signal(sig, &info, p, PIDTYPE_TGID, false);
+ ret = __send_signal_locked(sig, &info, p, PIDTYPE_TGID, false);
unlock_task_sighand(p, &flags);
} else
ret = -ESRCH;
@@ -2114,7 +2112,7 @@ bool do_notify_parent(struct task_struct *tsk, int sig)
* parent's namespaces.
*/
if (valid_signal(sig) && sig)
- __send_signal(sig, &info, tsk->parent, PIDTYPE_TGID, false);
+ __send_signal_locked(sig, &info, tsk->parent, PIDTYPE_TGID, false);
__wake_up_parent(tsk, tsk->parent);
spin_unlock_irqrestore(&psig->siglock, flags);
@@ -2184,7 +2182,7 @@ static void do_notify_parent_cldstop(struct task_struct *tsk,
spin_lock_irqsave(&sighand->siglock, flags);
if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
!(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
- __group_send_sig_info(SIGCHLD, &info, parent);
+ send_signal_locked(SIGCHLD, &info, parent, PIDTYPE_TGID);
/*
* Even if SIGCHLD is not generated, we must wake up wait4 calls.
*/
@@ -2204,13 +2202,12 @@ static void do_notify_parent_cldstop(struct task_struct *tsk,
* with. If the code did not stop because the tracer is gone,
* the stop signal remains unchanged unless clear_code.
*/
-static int ptrace_stop(int exit_code, int why, int clear_code,
- unsigned long message, kernel_siginfo_t *info)
+static int ptrace_stop(int exit_code, int why, unsigned long message,
+ kernel_siginfo_t *info)
__releases(&current->sighand->siglock)
__acquires(&current->sighand->siglock)
{
bool gstop_done = false;
- bool read_code = true;
if (arch_ptrace_stop_needed()) {
/*
@@ -2227,10 +2224,16 @@ static int ptrace_stop(int exit_code, int why, int clear_code,
}
/*
- * schedule() will not sleep if there is a pending signal that
- * can awaken the task.
+ * After this point ptrace_signal_wake_up or signal_wake_up
+ * will clear TASK_TRACED if ptrace_unlink happens or a fatal
+ * signal comes in. Handle previous ptrace_unlinks and fatal
+ * signals here to prevent ptrace_stop sleeping in schedule.
*/
+ if (!current->ptrace || __fatal_signal_pending(current))
+ return exit_code;
+
set_special_state(TASK_TRACED);
+ current->jobctl |= JOBCTL_TRACED;
/*
* We're committing to trapping. TRACED should be visible before
@@ -2276,54 +2279,33 @@ static int ptrace_stop(int exit_code, int why, int clear_code,
spin_unlock_irq(&current->sighand->siglock);
read_lock(&tasklist_lock);
- if (likely(current->ptrace)) {
- /*
- * Notify parents of the stop.
- *
- * While ptraced, there are two parents - the ptracer and
- * the real_parent of the group_leader. The ptracer should
- * know about every stop while the real parent is only
- * interested in the completion of group stop. The states
- * for the two don't interact with each other. Notify
- * separately unless they're gonna be duplicates.
- */
+ /*
+ * Notify parents of the stop.
+ *
+ * While ptraced, there are two parents - the ptracer and
+ * the real_parent of the group_leader. The ptracer should
+ * know about every stop while the real parent is only
+ * interested in the completion of group stop. The states
+ * for the two don't interact with each other. Notify
+ * separately unless they're gonna be duplicates.
+ */
+ if (current->ptrace)
do_notify_parent_cldstop(current, true, why);
- if (gstop_done && ptrace_reparented(current))
- do_notify_parent_cldstop(current, false, why);
+ if (gstop_done && (!current->ptrace || ptrace_reparented(current)))
+ do_notify_parent_cldstop(current, false, why);
- /*
- * Don't want to allow preemption here, because
- * sys_ptrace() needs this task to be inactive.
- *
- * XXX: implement read_unlock_no_resched().
- */
- preempt_disable();
- read_unlock(&tasklist_lock);
- cgroup_enter_frozen();
- preempt_enable_no_resched();
- freezable_schedule();
- cgroup_leave_frozen(true);
- } else {
- /*
- * By the time we got the lock, our tracer went away.
- * Don't drop the lock yet, another tracer may come.
- *
- * If @gstop_done, the ptracer went away between group stop
- * completion and here. During detach, it would have set
- * JOBCTL_STOP_PENDING on us and we'll re-enter
- * TASK_STOPPED in do_signal_stop() on return, so notifying
- * the real parent of the group stop completion is enough.
- */
- if (gstop_done)
- do_notify_parent_cldstop(current, false, why);
-
- /* tasklist protects us from ptrace_freeze_traced() */
- __set_current_state(TASK_RUNNING);
- read_code = false;
- if (clear_code)
- exit_code = 0;
- read_unlock(&tasklist_lock);
- }
+ /*
+ * Don't want to allow preemption here, because
+ * sys_ptrace() needs this task to be inactive.
+ *
+ * XXX: implement read_unlock_no_resched().
+ */
+ preempt_disable();
+ read_unlock(&tasklist_lock);
+ cgroup_enter_frozen();
+ preempt_enable_no_resched();
+ freezable_schedule();
+ cgroup_leave_frozen(true);
/*
* We are back. Now reacquire the siglock before touching
@@ -2331,14 +2313,13 @@ static int ptrace_stop(int exit_code, int why, int clear_code,
* any signal-sending on another CPU that wants to examine it.
*/
spin_lock_irq(&current->sighand->siglock);
- if (read_code)
- exit_code = current->exit_code;
+ exit_code = current->exit_code;
current->last_siginfo = NULL;
current->ptrace_message = 0;
current->exit_code = 0;
/* LISTENING can be set only during STOP traps, clear it */
- current->jobctl &= ~JOBCTL_LISTENING;
+ current->jobctl &= ~(JOBCTL_LISTENING | JOBCTL_PTRACE_FROZEN);
/*
* Queued signals ignored us while we were stopped for tracing.
@@ -2360,7 +2341,7 @@ static int ptrace_do_notify(int signr, int exit_code, int why, unsigned long mes
info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
/* Let the debugger run. */
- return ptrace_stop(exit_code, why, 1, message, &info);
+ return ptrace_stop(exit_code, why, message, &info);
}
int ptrace_notify(int exit_code, unsigned long message)
@@ -2471,6 +2452,7 @@ static bool do_signal_stop(int signr)
if (task_participate_group_stop(current))
notify = CLD_STOPPED;
+ current->jobctl |= JOBCTL_STOPPED;
set_special_state(TASK_STOPPED);
spin_unlock_irq(&current->sighand->siglock);
@@ -2532,7 +2514,7 @@ static void do_jobctl_trap(void)
CLD_STOPPED, 0);
} else {
WARN_ON_ONCE(!signr);
- ptrace_stop(signr, CLD_STOPPED, 0, 0, NULL);
+ ptrace_stop(signr, CLD_STOPPED, 0, NULL);
}
}
@@ -2585,7 +2567,7 @@ static int ptrace_signal(int signr, kernel_siginfo_t *info, enum pid_type type)
* comment in dequeue_signal().
*/
current->jobctl |= JOBCTL_STOP_DEQUEUED;
- signr = ptrace_stop(signr, CLD_TRAPPED, 0, 0, info);
+ signr = ptrace_stop(signr, CLD_TRAPPED, 0, info);
/* We're back. Did the debugger cancel the sig? */
if (signr == 0)
@@ -2612,7 +2594,7 @@ static int ptrace_signal(int signr, kernel_siginfo_t *info, enum pid_type type)
/* If the (new) signal is now blocked, requeue it. */
if (sigismember(&current->blocked, signr) ||
fatal_signal_pending(current)) {
- send_signal(signr, info, current, type);
+ send_signal_locked(signr, info, current, type);
signr = 0;
}
@@ -4807,7 +4789,7 @@ void kdb_send_sig(struct task_struct *t, int sig)
"the deadlock.\n");
return;
}
- ret = send_signal(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
+ ret = send_signal_locked(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
spin_unlock(&t->sighand->siglock);
if (ret)
kdb_printf("Fail to deliver Signal %d to process %d.\n",
diff --git a/kernel/taskstats.c b/kernel/taskstats.c
index bcac5a9043aa..f7e246336218 100644
--- a/kernel/taskstats.c
+++ b/kernel/taskstats.c
@@ -9,6 +9,7 @@
#include <linux/kernel.h>
#include <linux/taskstats_kern.h>
#include <linux/tsacct_kern.h>
+#include <linux/acct.h>
#include <linux/delayacct.h>
#include <linux/cpumask.h>
#include <linux/percpu.h>
@@ -153,6 +154,23 @@ static void send_cpu_listeners(struct sk_buff *skb,
up_write(&listeners->sem);
}
+static void exe_add_tsk(struct taskstats *stats, struct task_struct *tsk)
+{
+ /* No idea if I'm allowed to access that here, now. */
+ struct file *exe_file = get_task_exe_file(tsk);
+
+ if (exe_file) {
+ /* Following cp_new_stat64() in stat.c . */
+ stats->ac_exe_dev =
+ huge_encode_dev(exe_file->f_inode->i_sb->s_dev);
+ stats->ac_exe_inode = exe_file->f_inode->i_ino;
+ fput(exe_file);
+ } else {
+ stats->ac_exe_dev = 0;
+ stats->ac_exe_inode = 0;
+ }
+}
+
static void fill_stats(struct user_namespace *user_ns,
struct pid_namespace *pid_ns,
struct task_struct *tsk, struct taskstats *stats)
@@ -175,6 +193,9 @@ static void fill_stats(struct user_namespace *user_ns,
/* fill in extended acct fields */
xacct_add_tsk(stats, tsk);
+
+ /* add executable info */
+ exe_add_tsk(stats, tsk);
}
static int fill_stats_for_pid(pid_t pid, struct taskstats *stats)
@@ -620,6 +641,8 @@ void taskstats_exit(struct task_struct *tsk, int group_dead)
goto err;
fill_stats(&init_user_ns, &init_pid_ns, tsk, stats);
+ if (group_dead)
+ stats->ac_flag |= AGROUP;
/*
* Doesn't matter if tsk is the leader or the last group member leaving
@@ -665,6 +688,7 @@ static struct genl_family family __ro_after_init = {
.module = THIS_MODULE,
.ops = taskstats_ops,
.n_ops = ARRAY_SIZE(taskstats_ops),
+ .netnsok = true,
};
/* Needed early in initialization */
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
index 0a97193984db..cb925e8ef9a8 100644
--- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
@@ -870,7 +870,7 @@ static inline void check_dl_overrun(struct task_struct *tsk)
{
if (tsk->dl.dl_overrun) {
tsk->dl.dl_overrun = 0;
- __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
+ send_signal_locked(SIGXCPU, SEND_SIG_PRIV, tsk, PIDTYPE_TGID);
}
}
@@ -884,7 +884,7 @@ static bool check_rlimit(u64 time, u64 limit, int signo, bool rt, bool hard)
rt ? "RT" : "CPU", hard ? "hard" : "soft",
current->comm, task_pid_nr(current));
}
- __group_send_sig_info(signo, SEND_SIG_PRIV, current);
+ send_signal_locked(signo, SEND_SIG_PRIV, current, PIDTYPE_TGID);
return true;
}
@@ -958,7 +958,7 @@ static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it,
trace_itimer_expire(signo == SIGPROF ?
ITIMER_PROF : ITIMER_VIRTUAL,
task_tgid(tsk), cur_time);
- __group_send_sig_info(signo, SEND_SIG_PRIV, tsk);
+ send_signal_locked(signo, SEND_SIG_PRIV, tsk, PIDTYPE_TGID);
}
if (it->expires && it->expires < *expires)
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index d77cd8032213..0d261774d6f3 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -31,6 +31,10 @@ ifdef CONFIG_GCOV_PROFILE_FTRACE
GCOV_PROFILE := y
endif
+# Functions in this file could be invoked from early interrupt
+# code and produce random code coverage.
+KCOV_INSTRUMENT_trace_preemptirq.o := n
+
CFLAGS_bpf_trace.o := -I$(src)
CFLAGS_trace_benchmark.o := -I$(src)
diff --git a/kernel/trace/fgraph.c b/kernel/trace/fgraph.c
index 3fd5284f6487..218cd95bf8e4 100644
--- a/kernel/trace/fgraph.c
+++ b/kernel/trace/fgraph.c
@@ -30,6 +30,7 @@ int ftrace_graph_active;
/* Both enabled by default (can be cleared by function_graph tracer flags */
static bool fgraph_sleep_time = true;
+#ifdef CONFIG_DYNAMIC_FTRACE
/*
* archs can override this function if they must do something
* to enable hook for graph tracer.
@@ -47,6 +48,7 @@ int __weak ftrace_disable_ftrace_graph_caller(void)
{
return 0;
}
+#endif
/**
* ftrace_graph_stop - set to permanently disable function graph tracing
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index f8db59c3a601..e750fe141a60 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -45,6 +45,8 @@
#include "trace_output.h"
#include "trace_stat.h"
+#define FTRACE_INVALID_FUNCTION "__ftrace_invalid_address__"
+
#define FTRACE_WARN_ON(cond) \
({ \
int ___r = cond; \
@@ -119,7 +121,7 @@ struct ftrace_ops __rcu *ftrace_ops_list __read_mostly = &ftrace_list_end;
ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
struct ftrace_ops global_ops;
-/* Defined by vmlinux.lds.h see the commment above arch_ftrace_ops_list_func for details */
+/* Defined by vmlinux.lds.h see the comment above arch_ftrace_ops_list_func for details */
void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct ftrace_regs *fregs);
@@ -952,7 +954,6 @@ static struct tracer_stat function_stats __initdata = {
static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
{
struct ftrace_profile_stat *stat;
- struct dentry *entry;
char *name;
int ret;
int cpu;
@@ -983,11 +984,9 @@ static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
}
}
- entry = tracefs_create_file("function_profile_enabled",
- TRACE_MODE_WRITE, d_tracer, NULL,
- &ftrace_profile_fops);
- if (!entry)
- pr_warn("Could not create tracefs 'function_profile_enabled' entry\n");
+ trace_create_file("function_profile_enabled",
+ TRACE_MODE_WRITE, d_tracer, NULL,
+ &ftrace_profile_fops);
}
#else /* CONFIG_FUNCTION_PROFILER */
@@ -2707,18 +2706,16 @@ ftrace_nop_initialize(struct module *mod, struct dyn_ftrace *rec)
* archs can override this function if they must do something
* before the modifying code is performed.
*/
-int __weak ftrace_arch_code_modify_prepare(void)
+void __weak ftrace_arch_code_modify_prepare(void)
{
- return 0;
}
/*
* archs can override this function if they must do something
* after the modifying code is performed.
*/
-int __weak ftrace_arch_code_modify_post_process(void)
+void __weak ftrace_arch_code_modify_post_process(void)
{
- return 0;
}
void ftrace_modify_all_code(int command)
@@ -2804,12 +2801,7 @@ void __weak arch_ftrace_update_code(int command)
static void ftrace_run_update_code(int command)
{
- int ret;
-
- ret = ftrace_arch_code_modify_prepare();
- FTRACE_WARN_ON(ret);
- if (ret)
- return;
+ ftrace_arch_code_modify_prepare();
/*
* By default we use stop_machine() to modify the code.
@@ -2819,8 +2811,7 @@ static void ftrace_run_update_code(int command)
*/
arch_ftrace_update_code(command);
- ret = ftrace_arch_code_modify_post_process();
- FTRACE_WARN_ON(ret);
+ ftrace_arch_code_modify_post_process();
}
static void ftrace_run_modify_code(struct ftrace_ops *ops, int command,
@@ -3631,6 +3622,105 @@ static void add_trampoline_func(struct seq_file *m, struct ftrace_ops *ops,
seq_printf(m, " ->%pS", ptr);
}
+#ifdef FTRACE_MCOUNT_MAX_OFFSET
+/*
+ * Weak functions can still have an mcount/fentry that is saved in
+ * the __mcount_loc section. These can be detected by having a
+ * symbol offset of greater than FTRACE_MCOUNT_MAX_OFFSET, as the
+ * symbol found by kallsyms is not the function that the mcount/fentry
+ * is part of. The offset is much greater in these cases.
+ *
+ * Test the record to make sure that the ip points to a valid kallsyms
+ * and if not, mark it disabled.
+ */
+static int test_for_valid_rec(struct dyn_ftrace *rec)
+{
+ char str[KSYM_SYMBOL_LEN];
+ unsigned long offset;
+ const char *ret;
+
+ ret = kallsyms_lookup(rec->ip, NULL, &offset, NULL, str);
+
+ /* Weak functions can cause invalid addresses */
+ if (!ret || offset > FTRACE_MCOUNT_MAX_OFFSET) {
+ rec->flags |= FTRACE_FL_DISABLED;
+ return 0;
+ }
+ return 1;
+}
+
+static struct workqueue_struct *ftrace_check_wq __initdata;
+static struct work_struct ftrace_check_work __initdata;
+
+/*
+ * Scan all the mcount/fentry entries to make sure they are valid.
+ */
+static __init void ftrace_check_work_func(struct work_struct *work)
+{
+ struct ftrace_page *pg;
+ struct dyn_ftrace *rec;
+
+ mutex_lock(&ftrace_lock);
+ do_for_each_ftrace_rec(pg, rec) {
+ test_for_valid_rec(rec);
+ } while_for_each_ftrace_rec();
+ mutex_unlock(&ftrace_lock);
+}
+
+static int __init ftrace_check_for_weak_functions(void)
+{
+ INIT_WORK(&ftrace_check_work, ftrace_check_work_func);
+
+ ftrace_check_wq = alloc_workqueue("ftrace_check_wq", WQ_UNBOUND, 0);
+
+ queue_work(ftrace_check_wq, &ftrace_check_work);
+ return 0;
+}
+
+static int __init ftrace_check_sync(void)
+{
+ /* Make sure the ftrace_check updates are finished */
+ if (ftrace_check_wq)
+ destroy_workqueue(ftrace_check_wq);
+ return 0;
+}
+
+late_initcall_sync(ftrace_check_sync);
+subsys_initcall(ftrace_check_for_weak_functions);
+
+static int print_rec(struct seq_file *m, unsigned long ip)
+{
+ unsigned long offset;
+ char str[KSYM_SYMBOL_LEN];
+ char *modname;
+ const char *ret;
+
+ ret = kallsyms_lookup(ip, NULL, &offset, &modname, str);
+ /* Weak functions can cause invalid addresses */
+ if (!ret || offset > FTRACE_MCOUNT_MAX_OFFSET) {
+ snprintf(str, KSYM_SYMBOL_LEN, "%s_%ld",
+ FTRACE_INVALID_FUNCTION, offset);
+ ret = NULL;
+ }
+
+ seq_puts(m, str);
+ if (modname)
+ seq_printf(m, " [%s]", modname);
+ return ret == NULL ? -1 : 0;
+}
+#else
+static inline int test_for_valid_rec(struct dyn_ftrace *rec)
+{
+ return 1;
+}
+
+static inline int print_rec(struct seq_file *m, unsigned long ip)
+{
+ seq_printf(m, "%ps", (void *)ip);
+ return 0;
+}
+#endif
+
static int t_show(struct seq_file *m, void *v)
{
struct ftrace_iterator *iter = m->private;
@@ -3655,7 +3745,13 @@ static int t_show(struct seq_file *m, void *v)
if (!rec)
return 0;
- seq_printf(m, "%ps", (void *)rec->ip);
+ if (print_rec(m, rec->ip)) {
+ /* This should only happen when a rec is disabled */
+ WARN_ON_ONCE(!(rec->flags & FTRACE_FL_DISABLED));
+ seq_putc(m, '\n');
+ return 0;
+ }
+
if (iter->flags & FTRACE_ITER_ENABLED) {
struct ftrace_ops *ops;
@@ -3973,6 +4069,24 @@ add_rec_by_index(struct ftrace_hash *hash, struct ftrace_glob *func_g,
return 0;
}
+#ifdef FTRACE_MCOUNT_MAX_OFFSET
+static int lookup_ip(unsigned long ip, char **modname, char *str)
+{
+ unsigned long offset;
+
+ kallsyms_lookup(ip, NULL, &offset, modname, str);
+ if (offset > FTRACE_MCOUNT_MAX_OFFSET)
+ return -1;
+ return 0;
+}
+#else
+static int lookup_ip(unsigned long ip, char **modname, char *str)
+{
+ kallsyms_lookup(ip, NULL, NULL, modname, str);
+ return 0;
+}
+#endif
+
static int
ftrace_match_record(struct dyn_ftrace *rec, struct ftrace_glob *func_g,
struct ftrace_glob *mod_g, int exclude_mod)
@@ -3980,7 +4094,12 @@ ftrace_match_record(struct dyn_ftrace *rec, struct ftrace_glob *func_g,
char str[KSYM_SYMBOL_LEN];
char *modname;
- kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
+ if (lookup_ip(rec->ip, &modname, str)) {
+ /* This should only happen when a rec is disabled */
+ WARN_ON_ONCE(system_state == SYSTEM_RUNNING &&
+ !(rec->flags & FTRACE_FL_DISABLED));
+ return 0;
+ }
if (mod_g) {
int mod_matches = (modname) ? ftrace_match(modname, mod_g) : 0;
@@ -4431,7 +4550,7 @@ int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper,
* @ip: The instruction pointer address to remove the data from
*
* Returns the data if it is found, otherwise NULL.
- * Note, if the data pointer is used as the data itself, (see
+ * Note, if the data pointer is used as the data itself, (see
* ftrace_func_mapper_find_ip(), then the return value may be meaningless,
* if the data pointer was set to zero.
*/
@@ -4526,8 +4645,8 @@ register_ftrace_function_probe(char *glob, struct trace_array *tr,
struct ftrace_probe_ops *probe_ops,
void *data)
{
+ struct ftrace_func_probe *probe = NULL, *iter;
struct ftrace_func_entry *entry;
- struct ftrace_func_probe *probe;
struct ftrace_hash **orig_hash;
struct ftrace_hash *old_hash;
struct ftrace_hash *hash;
@@ -4546,11 +4665,13 @@ register_ftrace_function_probe(char *glob, struct trace_array *tr,
mutex_lock(&ftrace_lock);
/* Check if the probe_ops is already registered */
- list_for_each_entry(probe, &tr->func_probes, list) {
- if (probe->probe_ops == probe_ops)
+ list_for_each_entry(iter, &tr->func_probes, list) {
+ if (iter->probe_ops == probe_ops) {
+ probe = iter;
break;
+ }
}
- if (&probe->list == &tr->func_probes) {
+ if (!probe) {
probe = kzalloc(sizeof(*probe), GFP_KERNEL);
if (!probe) {
mutex_unlock(&ftrace_lock);
@@ -4668,9 +4789,9 @@ int
unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
struct ftrace_probe_ops *probe_ops)
{
+ struct ftrace_func_probe *probe = NULL, *iter;
struct ftrace_ops_hash old_hash_ops;
struct ftrace_func_entry *entry;
- struct ftrace_func_probe *probe;
struct ftrace_glob func_g;
struct ftrace_hash **orig_hash;
struct ftrace_hash *old_hash;
@@ -4698,11 +4819,13 @@ unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
mutex_lock(&ftrace_lock);
/* Check if the probe_ops is already registered */
- list_for_each_entry(probe, &tr->func_probes, list) {
- if (probe->probe_ops == probe_ops)
+ list_for_each_entry(iter, &tr->func_probes, list) {
+ if (iter->probe_ops == probe_ops) {
+ probe = iter;
break;
+ }
}
- if (&probe->list == &tr->func_probes)
+ if (!probe)
goto err_unlock_ftrace;
ret = -EINVAL;
@@ -5161,8 +5284,6 @@ int register_ftrace_direct(unsigned long ip, unsigned long addr)
goto out_unlock;
ret = ftrace_set_filter_ip(&direct_ops, ip, 0, 0);
- if (ret)
- remove_hash_entry(direct_functions, entry);
if (!ret && !(direct_ops.flags & FTRACE_OPS_FL_ENABLED)) {
ret = register_ftrace_function(&direct_ops);
@@ -5171,6 +5292,7 @@ int register_ftrace_direct(unsigned long ip, unsigned long addr)
}
if (ret) {
+ remove_hash_entry(direct_functions, entry);
kfree(entry);
if (!direct->count) {
list_del_rcu(&direct->next);
@@ -6793,6 +6915,13 @@ void ftrace_module_enable(struct module *mod)
!within_module_init(rec->ip, mod))
break;
+ /* Weak functions should still be ignored */
+ if (!test_for_valid_rec(rec)) {
+ /* Clear all other flags. Should not be enabled anyway */
+ rec->flags = FTRACE_FL_DISABLED;
+ continue;
+ }
+
cnt = 0;
/*
@@ -6829,11 +6958,16 @@ void ftrace_module_enable(struct module *mod)
void ftrace_module_init(struct module *mod)
{
+ int ret;
+
if (ftrace_disabled || !mod->num_ftrace_callsites)
return;
- ftrace_process_locs(mod, mod->ftrace_callsites,
- mod->ftrace_callsites + mod->num_ftrace_callsites);
+ ret = ftrace_process_locs(mod, mod->ftrace_callsites,
+ mod->ftrace_callsites + mod->num_ftrace_callsites);
+ if (ret)
+ pr_warn("ftrace: failed to allocate entries for module '%s' functions\n",
+ mod->name);
}
static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map,
@@ -7166,15 +7300,19 @@ void __init ftrace_init(void)
pr_info("ftrace: allocating %ld entries in %ld pages\n",
count, count / ENTRIES_PER_PAGE + 1);
- last_ftrace_enabled = ftrace_enabled = 1;
-
ret = ftrace_process_locs(NULL,
__start_mcount_loc,
__stop_mcount_loc);
+ if (ret) {
+ pr_warn("ftrace: failed to allocate entries for functions\n");
+ goto failed;
+ }
pr_info("ftrace: allocated %ld pages with %ld groups\n",
ftrace_number_of_pages, ftrace_number_of_groups);
+ last_ftrace_enabled = ftrace_enabled = 1;
+
set_ftrace_early_filters();
return;
diff --git a/kernel/trace/pid_list.c b/kernel/trace/pid_list.c
index a2ef1d18126a..95106d02b32d 100644
--- a/kernel/trace/pid_list.c
+++ b/kernel/trace/pid_list.c
@@ -118,9 +118,9 @@ static inline unsigned int pid_join(unsigned int upper1,
/**
* trace_pid_list_is_set - test if the pid is set in the list
* @pid_list: The pid list to test
- * @pid: The pid to to see if set in the list.
+ * @pid: The pid to see if set in the list.
*
- * Tests if @pid is is set in the @pid_list. This is usually called
+ * Tests if @pid is set in the @pid_list. This is usually called
* from the scheduler when a task is scheduled. Its pid is checked
* if it should be traced or not.
*
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 05dfc7a12d3d..d59b6a328b7f 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -29,6 +29,14 @@
#include <asm/local.h>
+/*
+ * The "absolute" timestamp in the buffer is only 59 bits.
+ * If a clock has the 5 MSBs set, it needs to be saved and
+ * reinserted.
+ */
+#define TS_MSB (0xf8ULL << 56)
+#define ABS_TS_MASK (~TS_MSB)
+
static void update_pages_handler(struct work_struct *work);
/*
@@ -468,6 +476,7 @@ struct rb_time_struct {
local_t cnt;
local_t top;
local_t bottom;
+ local_t msb;
};
#else
#include <asm/local64.h>
@@ -569,7 +578,6 @@ struct ring_buffer_iter {
* For the ring buffer, 64 bit required operations for the time is
* the following:
*
- * - Only need 59 bits (uses 60 to make it even).
* - Reads may fail if it interrupted a modification of the time stamp.
* It will succeed if it did not interrupt another write even if
* the read itself is interrupted by a write.
@@ -594,6 +602,7 @@ struct ring_buffer_iter {
*/
#define RB_TIME_SHIFT 30
#define RB_TIME_VAL_MASK ((1 << RB_TIME_SHIFT) - 1)
+#define RB_TIME_MSB_SHIFT 60
static inline int rb_time_cnt(unsigned long val)
{
@@ -613,7 +622,7 @@ static inline u64 rb_time_val(unsigned long top, unsigned long bottom)
static inline bool __rb_time_read(rb_time_t *t, u64 *ret, unsigned long *cnt)
{
- unsigned long top, bottom;
+ unsigned long top, bottom, msb;
unsigned long c;
/*
@@ -625,6 +634,7 @@ static inline bool __rb_time_read(rb_time_t *t, u64 *ret, unsigned long *cnt)
c = local_read(&t->cnt);
top = local_read(&t->top);
bottom = local_read(&t->bottom);
+ msb = local_read(&t->msb);
} while (c != local_read(&t->cnt));
*cnt = rb_time_cnt(top);
@@ -633,7 +643,8 @@ static inline bool __rb_time_read(rb_time_t *t, u64 *ret, unsigned long *cnt)
if (*cnt != rb_time_cnt(bottom))
return false;
- *ret = rb_time_val(top, bottom);
+ /* The shift to msb will lose its cnt bits */
+ *ret = rb_time_val(top, bottom) | ((u64)msb << RB_TIME_MSB_SHIFT);
return true;
}
@@ -649,10 +660,12 @@ static inline unsigned long rb_time_val_cnt(unsigned long val, unsigned long cnt
return (val & RB_TIME_VAL_MASK) | ((cnt & 3) << RB_TIME_SHIFT);
}
-static inline void rb_time_split(u64 val, unsigned long *top, unsigned long *bottom)
+static inline void rb_time_split(u64 val, unsigned long *top, unsigned long *bottom,
+ unsigned long *msb)
{
*top = (unsigned long)((val >> RB_TIME_SHIFT) & RB_TIME_VAL_MASK);
*bottom = (unsigned long)(val & RB_TIME_VAL_MASK);
+ *msb = (unsigned long)(val >> RB_TIME_MSB_SHIFT);
}
static inline void rb_time_val_set(local_t *t, unsigned long val, unsigned long cnt)
@@ -663,15 +676,16 @@ static inline void rb_time_val_set(local_t *t, unsigned long val, unsigned long
static void rb_time_set(rb_time_t *t, u64 val)
{
- unsigned long cnt, top, bottom;
+ unsigned long cnt, top, bottom, msb;
- rb_time_split(val, &top, &bottom);
+ rb_time_split(val, &top, &bottom, &msb);
/* Writes always succeed with a valid number even if it gets interrupted. */
do {
cnt = local_inc_return(&t->cnt);
rb_time_val_set(&t->top, top, cnt);
rb_time_val_set(&t->bottom, bottom, cnt);
+ rb_time_val_set(&t->msb, val >> RB_TIME_MSB_SHIFT, cnt);
} while (cnt != local_read(&t->cnt));
}
@@ -686,8 +700,8 @@ rb_time_read_cmpxchg(local_t *l, unsigned long expect, unsigned long set)
static int rb_time_cmpxchg(rb_time_t *t, u64 expect, u64 set)
{
- unsigned long cnt, top, bottom;
- unsigned long cnt2, top2, bottom2;
+ unsigned long cnt, top, bottom, msb;
+ unsigned long cnt2, top2, bottom2, msb2;
u64 val;
/* The cmpxchg always fails if it interrupted an update */
@@ -703,16 +717,18 @@ static int rb_time_cmpxchg(rb_time_t *t, u64 expect, u64 set)
cnt2 = cnt + 1;
- rb_time_split(val, &top, &bottom);
+ rb_time_split(val, &top, &bottom, &msb);
top = rb_time_val_cnt(top, cnt);
bottom = rb_time_val_cnt(bottom, cnt);
- rb_time_split(set, &top2, &bottom2);
+ rb_time_split(set, &top2, &bottom2, &msb2);
top2 = rb_time_val_cnt(top2, cnt2);
bottom2 = rb_time_val_cnt(bottom2, cnt2);
if (!rb_time_read_cmpxchg(&t->cnt, cnt, cnt2))
return false;
+ if (!rb_time_read_cmpxchg(&t->msb, msb, msb2))
+ return false;
if (!rb_time_read_cmpxchg(&t->top, top, top2))
return false;
if (!rb_time_read_cmpxchg(&t->bottom, bottom, bottom2))
@@ -783,6 +799,24 @@ static inline void verify_event(struct ring_buffer_per_cpu *cpu_buffer,
}
#endif
+/*
+ * The absolute time stamp drops the 5 MSBs and some clocks may
+ * require them. The rb_fix_abs_ts() will take a previous full
+ * time stamp, and add the 5 MSB of that time stamp on to the
+ * saved absolute time stamp. Then they are compared in case of
+ * the unlikely event that the latest time stamp incremented
+ * the 5 MSB.
+ */
+static inline u64 rb_fix_abs_ts(u64 abs, u64 save_ts)
+{
+ if (save_ts & TS_MSB) {
+ abs |= save_ts & TS_MSB;
+ /* Check for overflow */
+ if (unlikely(abs < save_ts))
+ abs += 1ULL << 59;
+ }
+ return abs;
+}
static inline u64 rb_time_stamp(struct trace_buffer *buffer);
@@ -811,8 +845,10 @@ u64 ring_buffer_event_time_stamp(struct trace_buffer *buffer,
u64 ts;
/* If the event includes an absolute time, then just use that */
- if (event->type_len == RINGBUF_TYPE_TIME_STAMP)
- return rb_event_time_stamp(event);
+ if (event->type_len == RINGBUF_TYPE_TIME_STAMP) {
+ ts = rb_event_time_stamp(event);
+ return rb_fix_abs_ts(ts, cpu_buffer->tail_page->page->time_stamp);
+ }
nest = local_read(&cpu_buffer->committing);
verify_event(cpu_buffer, event);
@@ -2754,8 +2790,15 @@ static void rb_add_timestamp(struct ring_buffer_per_cpu *cpu_buffer,
(RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE);
if (unlikely(info->delta > (1ULL << 59))) {
+ /*
+ * Some timers can use more than 59 bits, and when a timestamp
+ * is added to the buffer, it will lose those bits.
+ */
+ if (abs && (info->ts & TS_MSB)) {
+ info->delta &= ABS_TS_MASK;
+
/* did the clock go backwards */
- if (info->before == info->after && info->before > info->ts) {
+ } else if (info->before == info->after && info->before > info->ts) {
/* not interrupted */
static int once;
@@ -3304,7 +3347,7 @@ static void dump_buffer_page(struct buffer_data_page *bpage,
case RINGBUF_TYPE_TIME_STAMP:
delta = rb_event_time_stamp(event);
- ts = delta;
+ ts = rb_fix_abs_ts(delta, ts);
pr_warn(" [%lld] absolute:%lld TIME STAMP\n", ts, delta);
break;
@@ -3380,7 +3423,7 @@ static void check_buffer(struct ring_buffer_per_cpu *cpu_buffer,
case RINGBUF_TYPE_TIME_STAMP:
delta = rb_event_time_stamp(event);
- ts = delta;
+ ts = rb_fix_abs_ts(delta, ts);
break;
case RINGBUF_TYPE_PADDING:
@@ -4367,6 +4410,7 @@ rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
case RINGBUF_TYPE_TIME_STAMP:
delta = rb_event_time_stamp(event);
+ delta = rb_fix_abs_ts(delta, cpu_buffer->read_stamp);
cpu_buffer->read_stamp = delta;
return;
@@ -4397,6 +4441,7 @@ rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
case RINGBUF_TYPE_TIME_STAMP:
delta = rb_event_time_stamp(event);
+ delta = rb_fix_abs_ts(delta, iter->read_stamp);
iter->read_stamp = delta;
return;
@@ -4650,6 +4695,7 @@ rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts,
case RINGBUF_TYPE_TIME_STAMP:
if (ts) {
*ts = rb_event_time_stamp(event);
+ *ts = rb_fix_abs_ts(*ts, reader->page->time_stamp);
ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
cpu_buffer->cpu, ts);
}
@@ -4741,6 +4787,7 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
case RINGBUF_TYPE_TIME_STAMP:
if (ts) {
*ts = rb_event_time_stamp(event);
+ *ts = rb_fix_abs_ts(*ts, iter->head_page->page->time_stamp);
ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
cpu_buffer->cpu, ts);
}
@@ -6011,10 +6058,10 @@ static __init int test_ringbuffer(void)
pr_info(" total events: %ld\n", total_lost + total_read);
pr_info(" recorded len bytes: %ld\n", total_len);
pr_info(" recorded size bytes: %ld\n", total_size);
- if (total_lost)
+ if (total_lost) {
pr_info(" With dropped events, record len and size may not match\n"
" alloced and written from above\n");
- if (!total_lost) {
+ } else {
if (RB_WARN_ON(buffer, total_len != total_alloc ||
total_size != total_written))
break;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 124f1897fd56..2c95992e2c71 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -721,13 +721,16 @@ int trace_pid_write(struct trace_pid_list *filtered_pids,
pos = 0;
ret = trace_get_user(&parser, ubuf, cnt, &pos);
- if (ret < 0 || !trace_parser_loaded(&parser))
+ if (ret < 0)
break;
read += ret;
ubuf += ret;
cnt -= ret;
+ if (!trace_parser_loaded(&parser))
+ break;
+
ret = -EINVAL;
if (kstrtoul(parser.buffer, 0, &val))
break;
@@ -753,7 +756,6 @@ int trace_pid_write(struct trace_pid_list *filtered_pids,
if (!nr_pids) {
/* Cleared the list of pids */
trace_pid_list_free(pid_list);
- read = ret;
pid_list = NULL;
}
@@ -1174,7 +1176,7 @@ void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
/**
- * tracing_snapshot_cond_data - get the user data associated with a snapshot
+ * tracing_cond_snapshot_data - get the user data associated with a snapshot
* @tr: The tracing instance
*
* When the user enables a conditional snapshot using
@@ -1542,6 +1544,7 @@ static struct {
{ ktime_get_mono_fast_ns, "mono", 1 },
{ ktime_get_raw_fast_ns, "mono_raw", 1 },
{ ktime_get_boot_fast_ns, "boot", 1 },
+ { ktime_get_tai_fast_ns, "tai", 1 },
ARCH_TRACE_CLOCKS
};
@@ -2835,7 +2838,7 @@ trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
}
EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
-static DEFINE_SPINLOCK(tracepoint_iter_lock);
+static DEFINE_RAW_SPINLOCK(tracepoint_iter_lock);
static DEFINE_MUTEX(tracepoint_printk_mutex);
static void output_printk(struct trace_event_buffer *fbuffer)
@@ -2863,14 +2866,14 @@ static void output_printk(struct trace_event_buffer *fbuffer)
event = &fbuffer->trace_file->event_call->event;
- spin_lock_irqsave(&tracepoint_iter_lock, flags);
+ raw_spin_lock_irqsave(&tracepoint_iter_lock, flags);
trace_seq_init(&iter->seq);
iter->ent = fbuffer->entry;
event_call->event.funcs->trace(iter, 0, event);
trace_seq_putc(&iter->seq, 0);
printk("%s", iter->seq.buffer);
- spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
+ raw_spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
}
int tracepoint_printk_sysctl(struct ctl_table *table, int write,
@@ -4249,7 +4252,7 @@ static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file
unsigned int flags)
{
bool tgid = flags & TRACE_ITER_RECORD_TGID;
- const char *space = " ";
+ static const char space[] = " ";
int prec = tgid ? 12 : 2;
print_event_info(buf, m);
@@ -4273,9 +4276,7 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter)
struct tracer *type = iter->trace;
unsigned long entries;
unsigned long total;
- const char *name = "preemption";
-
- name = type->name;
+ const char *name = type->name;
get_total_entries(buf, &total, &entries);
@@ -5469,7 +5470,7 @@ static const char readme_msg[] =
" error_log\t- error log for failed commands (that support it)\n"
" buffer_size_kb\t- view and modify size of per cpu buffer\n"
" buffer_total_size_kb - view total size of all cpu buffers\n\n"
- " trace_clock\t\t-change the clock used to order events\n"
+ " trace_clock\t\t- change the clock used to order events\n"
" local: Per cpu clock but may not be synced across CPUs\n"
" global: Synced across CPUs but slows tracing down.\n"
" counter: Not a clock, but just an increment\n"
@@ -5478,7 +5479,7 @@ static const char readme_msg[] =
#ifdef CONFIG_X86_64
" x86-tsc: TSC cycle counter\n"
#endif
- "\n timestamp_mode\t-view the mode used to timestamp events\n"
+ "\n timestamp_mode\t- view the mode used to timestamp events\n"
" delta: Delta difference against a buffer-wide timestamp\n"
" absolute: Absolute (standalone) timestamp\n"
"\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
@@ -6326,12 +6327,18 @@ static void tracing_set_nop(struct trace_array *tr)
tr->current_trace = &nop_trace;
}
+static bool tracer_options_updated;
+
static void add_tracer_options(struct trace_array *tr, struct tracer *t)
{
/* Only enable if the directory has been created already. */
if (!tr->dir)
return;
+ /* Only create trace option files after update_tracer_options finish */
+ if (!tracer_options_updated)
+ return;
+
create_trace_option_files(tr, t);
}
@@ -6448,7 +6455,7 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf,
{
struct trace_array *tr = filp->private_data;
char buf[MAX_TRACER_SIZE+1];
- int i;
+ char *name;
size_t ret;
int err;
@@ -6462,11 +6469,9 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf,
buf[cnt] = 0;
- /* strip ending whitespace. */
- for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
- buf[i] = 0;
+ name = strim(buf);
- err = tracing_set_tracer(tr, buf);
+ err = tracing_set_tracer(tr, name);
if (err)
return err;
@@ -9170,6 +9175,7 @@ static void __update_tracer_options(struct trace_array *tr)
static void update_tracer_options(struct trace_array *tr)
{
mutex_lock(&trace_types_lock);
+ tracer_options_updated = true;
__update_tracer_options(tr);
mutex_unlock(&trace_types_lock);
}
@@ -9602,6 +9608,7 @@ extern struct trace_eval_map *__stop_ftrace_eval_maps[];
static struct workqueue_struct *eval_map_wq __initdata;
static struct work_struct eval_map_work __initdata;
+static struct work_struct tracerfs_init_work __initdata;
static void __init eval_map_work_func(struct work_struct *work)
{
@@ -9627,6 +9634,8 @@ static int __init trace_eval_init(void)
return 0;
}
+subsys_initcall(trace_eval_init);
+
static int __init trace_eval_sync(void)
{
/* Make sure the eval map updates are finished */
@@ -9709,15 +9718,8 @@ static struct notifier_block trace_module_nb = {
};
#endif /* CONFIG_MODULES */
-static __init int tracer_init_tracefs(void)
+static __init void tracer_init_tracefs_work_func(struct work_struct *work)
{
- int ret;
-
- trace_access_lock_init();
-
- ret = tracing_init_dentry();
- if (ret)
- return 0;
event_trace_init();
@@ -9739,8 +9741,6 @@ static __init int tracer_init_tracefs(void)
trace_create_file("saved_tgids", TRACE_MODE_READ, NULL,
NULL, &tracing_saved_tgids_fops);
- trace_eval_init();
-
trace_create_eval_file(NULL);
#ifdef CONFIG_MODULES
@@ -9755,6 +9755,24 @@ static __init int tracer_init_tracefs(void)
create_trace_instances(NULL);
update_tracer_options(&global_trace);
+}
+
+static __init int tracer_init_tracefs(void)
+{
+ int ret;
+
+ trace_access_lock_init();
+
+ ret = tracing_init_dentry();
+ if (ret)
+ return 0;
+
+ if (eval_map_wq) {
+ INIT_WORK(&tracerfs_init_work, tracer_init_tracefs_work_func);
+ queue_work(eval_map_wq, &tracerfs_init_work);
+ } else {
+ tracer_init_tracefs_work_func(NULL);
+ }
return 0;
}
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 07d990270e2a..ff816fb41e48 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -1573,13 +1573,12 @@ struct enable_trigger_data {
};
extern int event_enable_trigger_print(struct seq_file *m,
- struct event_trigger_ops *ops,
- struct event_trigger_data *data);
-extern void event_enable_trigger_free(struct event_trigger_ops *ops,
struct event_trigger_data *data);
+extern void event_enable_trigger_free(struct event_trigger_data *data);
extern int event_enable_trigger_parse(struct event_command *cmd_ops,
struct trace_event_file *file,
- char *glob, char *cmd, char *param);
+ char *glob, char *cmd,
+ char *param_and_filter);
extern int event_enable_register_trigger(char *glob,
struct event_trigger_data *data,
struct trace_event_file *file);
@@ -1587,8 +1586,7 @@ extern void event_enable_unregister_trigger(char *glob,
struct event_trigger_data *test,
struct trace_event_file *file);
extern void trigger_data_free(struct event_trigger_data *data);
-extern int event_trigger_init(struct event_trigger_ops *ops,
- struct event_trigger_data *data);
+extern int event_trigger_init(struct event_trigger_data *data);
extern int trace_event_trigger_enable_disable(struct trace_event_file *file,
int trigger_enable);
extern void update_cond_flag(struct trace_event_file *file);
@@ -1629,10 +1627,11 @@ extern void event_trigger_reset_filter(struct event_command *cmd_ops,
extern int event_trigger_register(struct event_command *cmd_ops,
struct trace_event_file *file,
char *glob,
- char *cmd,
- char *trigger,
- struct event_trigger_data *trigger_data,
- int *n_registered);
+ struct event_trigger_data *trigger_data);
+extern void event_trigger_unregister(struct event_command *cmd_ops,
+ struct trace_event_file *file,
+ char *glob,
+ struct event_trigger_data *trigger_data);
/**
* struct event_trigger_ops - callbacks for trace event triggers
@@ -1686,12 +1685,9 @@ struct event_trigger_ops {
struct trace_buffer *buffer,
void *rec,
struct ring_buffer_event *rbe);
- int (*init)(struct event_trigger_ops *ops,
- struct event_trigger_data *data);
- void (*free)(struct event_trigger_ops *ops,
- struct event_trigger_data *data);
+ int (*init)(struct event_trigger_data *data);
+ void (*free)(struct event_trigger_data *data);
int (*print)(struct seq_file *m,
- struct event_trigger_ops *ops,
struct event_trigger_data *data);
};
diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c
index 0580287d7a0d..778200dd8ede 100644
--- a/kernel/trace/trace_boot.c
+++ b/kernel/trace/trace_boot.c
@@ -300,7 +300,7 @@ trace_boot_hist_add_handlers(struct xbc_node *hnode, char **bufp,
{
struct xbc_node *node;
const char *p, *handler;
- int ret;
+ int ret = 0;
handler = xbc_node_get_data(hnode);
diff --git a/kernel/trace/trace_dynevent.c b/kernel/trace/trace_dynevent.c
index e34e8182ee4b..076b447a1b88 100644
--- a/kernel/trace/trace_dynevent.c
+++ b/kernel/trace/trace_dynevent.c
@@ -255,19 +255,14 @@ static const struct file_operations dynamic_events_ops = {
/* Make a tracefs interface for controlling dynamic events */
static __init int init_dynamic_event(void)
{
- struct dentry *entry;
int ret;
ret = tracing_init_dentry();
if (ret)
return 0;
- entry = tracefs_create_file("dynamic_events", TRACE_MODE_WRITE, NULL,
- NULL, &dynamic_events_ops);
-
- /* Event list interface */
- if (!entry)
- pr_warn("Could not create tracefs 'dynamic_events' entry\n");
+ trace_create_file("dynamic_events", TRACE_MODE_WRITE, NULL,
+ NULL, &dynamic_events_ops);
return 0;
}
diff --git a/kernel/trace/trace_eprobe.c b/kernel/trace/trace_eprobe.c
index 541aa13581b9..7d4478525c66 100644
--- a/kernel/trace/trace_eprobe.c
+++ b/kernel/trace/trace_eprobe.c
@@ -511,20 +511,17 @@ __eprobe_trace_func(struct eprobe_data *edata, void *rec)
* functions are just stubs to fulfill what is needed to use the trigger
* infrastructure.
*/
-static int eprobe_trigger_init(struct event_trigger_ops *ops,
- struct event_trigger_data *data)
+static int eprobe_trigger_init(struct event_trigger_data *data)
{
return 0;
}
-static void eprobe_trigger_free(struct event_trigger_ops *ops,
- struct event_trigger_data *data)
+static void eprobe_trigger_free(struct event_trigger_data *data)
{
}
static int eprobe_trigger_print(struct seq_file *m,
- struct event_trigger_ops *ops,
struct event_trigger_data *data)
{
/* Do not print eprobe event triggers */
@@ -549,7 +546,8 @@ static struct event_trigger_ops eprobe_trigger_ops = {
static int eprobe_trigger_cmd_parse(struct event_command *cmd_ops,
struct trace_event_file *file,
- char *glob, char *cmd, char *param)
+ char *glob, char *cmd,
+ char *param_and_filter)
{
return -1;
}
@@ -650,7 +648,7 @@ static struct trace_event_functions eprobe_funcs = {
static int disable_eprobe(struct trace_eprobe *ep,
struct trace_array *tr)
{
- struct event_trigger_data *trigger;
+ struct event_trigger_data *trigger = NULL, *iter;
struct trace_event_file *file;
struct eprobe_data *edata;
@@ -658,14 +656,16 @@ static int disable_eprobe(struct trace_eprobe *ep,
if (!file)
return -ENOENT;
- list_for_each_entry(trigger, &file->triggers, list) {
- if (!(trigger->flags & EVENT_TRIGGER_FL_PROBE))
+ list_for_each_entry(iter, &file->triggers, list) {
+ if (!(iter->flags & EVENT_TRIGGER_FL_PROBE))
continue;
- edata = trigger->private_data;
- if (edata->ep == ep)
+ edata = iter->private_data;
+ if (edata->ep == ep) {
+ trigger = iter;
break;
+ }
}
- if (list_entry_is_head(trigger, &file->triggers, list))
+ if (!trigger)
return -ENODEV;
list_del_rcu(&trigger->list);
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index f97de82d1342..181f08186d32 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -392,12 +392,6 @@ static void test_event_printk(struct trace_event_call *call)
if (!(dereference_flags & (1ULL << arg)))
goto next_arg;
- /* Check for __get_sockaddr */;
- if (str_has_prefix(fmt + i, "__get_sockaddr(")) {
- dereference_flags &= ~(1ULL << arg);
- goto next_arg;
- }
-
/* Find the REC-> in the argument */
c = strchr(fmt + i, ',');
r = strstr(fmt + i, "REC->");
@@ -413,7 +407,14 @@ static void test_event_printk(struct trace_event_call *call)
a = strchr(fmt + i, '&');
if ((a && (a < r)) || test_field(r, call))
dereference_flags &= ~(1ULL << arg);
+ } else if ((r = strstr(fmt + i, "__get_dynamic_array(")) &&
+ (!c || r < c)) {
+ dereference_flags &= ~(1ULL << arg);
+ } else if ((r = strstr(fmt + i, "__get_sockaddr(")) &&
+ (!c || r < c)) {
+ dereference_flags &= ~(1ULL << arg);
}
+
next_arg:
i--;
arg++;
@@ -1723,9 +1724,9 @@ static LIST_HEAD(event_subsystems);
static int subsystem_open(struct inode *inode, struct file *filp)
{
+ struct trace_subsystem_dir *dir = NULL, *iter_dir;
+ struct trace_array *tr = NULL, *iter_tr;
struct event_subsystem *system = NULL;
- struct trace_subsystem_dir *dir = NULL; /* Initialize for gcc */
- struct trace_array *tr;
int ret;
if (tracing_is_disabled())
@@ -1734,10 +1735,12 @@ static int subsystem_open(struct inode *inode, struct file *filp)
/* Make sure the system still exists */
mutex_lock(&event_mutex);
mutex_lock(&trace_types_lock);
- list_for_each_entry(tr, &ftrace_trace_arrays, list) {
- list_for_each_entry(dir, &tr->systems, list) {
- if (dir == inode->i_private) {
+ list_for_each_entry(iter_tr, &ftrace_trace_arrays, list) {
+ list_for_each_entry(iter_dir, &iter_tr->systems, list) {
+ if (iter_dir == inode->i_private) {
/* Don't open systems with no events */
+ tr = iter_tr;
+ dir = iter_dir;
if (dir->nr_events) {
__get_system_dir(dir);
system = dir->subsystem;
@@ -1753,9 +1756,6 @@ static int subsystem_open(struct inode *inode, struct file *filp)
if (!system)
return -ENODEV;
- /* Some versions of gcc think dir can be uninitialized here */
- WARN_ON(!dir);
-
/* Still need to increment the ref count of the system */
if (trace_array_get(tr) < 0) {
put_system(dir);
@@ -2280,8 +2280,8 @@ static struct dentry *
event_subsystem_dir(struct trace_array *tr, const char *name,
struct trace_event_file *file, struct dentry *parent)
{
+ struct event_subsystem *system, *iter;
struct trace_subsystem_dir *dir;
- struct event_subsystem *system;
struct dentry *entry;
/* First see if we did not already create this dir */
@@ -2295,13 +2295,13 @@ event_subsystem_dir(struct trace_array *tr, const char *name,
}
/* Now see if the system itself exists. */
- list_for_each_entry(system, &event_subsystems, list) {
- if (strcmp(system->name, name) == 0)
+ system = NULL;
+ list_for_each_entry(iter, &event_subsystems, list) {
+ if (strcmp(iter->name, name) == 0) {
+ system = iter;
break;
+ }
}
- /* Reset system variable when not found */
- if (&system->list == &event_subsystems)
- system = NULL;
dir = kmalloc(sizeof(*dir), GFP_KERNEL);
if (!dir)
@@ -3546,12 +3546,10 @@ create_event_toplevel_files(struct dentry *parent, struct trace_array *tr)
struct dentry *d_events;
struct dentry *entry;
- entry = tracefs_create_file("set_event", TRACE_MODE_WRITE, parent,
- tr, &ftrace_set_event_fops);
- if (!entry) {
- pr_warn("Could not create tracefs 'set_event' entry\n");
+ entry = trace_create_file("set_event", TRACE_MODE_WRITE, parent,
+ tr, &ftrace_set_event_fops);
+ if (!entry)
return -ENOMEM;
- }
d_events = tracefs_create_dir("events", parent);
if (!d_events) {
@@ -3566,16 +3564,12 @@ create_event_toplevel_files(struct dentry *parent, struct trace_array *tr)
/* There are not as crucial, just warn if they are not created */
- entry = tracefs_create_file("set_event_pid", TRACE_MODE_WRITE, parent,
- tr, &ftrace_set_event_pid_fops);
- if (!entry)
- pr_warn("Could not create tracefs 'set_event_pid' entry\n");
+ trace_create_file("set_event_pid", TRACE_MODE_WRITE, parent,
+ tr, &ftrace_set_event_pid_fops);
- entry = tracefs_create_file("set_event_notrace_pid",
- TRACE_MODE_WRITE, parent, tr,
- &ftrace_set_event_notrace_pid_fops);
- if (!entry)
- pr_warn("Could not create tracefs 'set_event_notrace_pid' entry\n");
+ trace_create_file("set_event_notrace_pid",
+ TRACE_MODE_WRITE, parent, tr,
+ &ftrace_set_event_notrace_pid_fops);
/* ring buffer internal formats */
trace_create_file("header_page", TRACE_MODE_READ, d_events,
@@ -3790,17 +3784,14 @@ static __init int event_trace_init_fields(void)
__init int event_trace_init(void)
{
struct trace_array *tr;
- struct dentry *entry;
int ret;
tr = top_trace_array();
if (!tr)
return -ENODEV;
- entry = tracefs_create_file("available_events", TRACE_MODE_READ,
- NULL, tr, &ftrace_avail_fops);
- if (!entry)
- pr_warn("Could not create tracefs 'available_events' entry\n");
+ trace_create_file("available_events", TRACE_MODE_READ,
+ NULL, tr, &ftrace_avail_fops);
ret = early_event_add_tracer(NULL, tr);
if (ret)
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index b458a9afa2c0..4b1057ab9d96 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -1816,7 +1816,7 @@ static void create_filter_finish(struct filter_parse_error *pe)
* create_filter - create a filter for a trace_event_call
* @tr: the trace array associated with these events
* @call: trace_event_call to create a filter for
- * @filter_str: filter string
+ * @filter_string: filter string
* @set_str: remember @filter_str and enable detailed error in filter
* @filterp: out param for created filter (always updated on return)
* Must be a pointer that references a NULL pointer.
diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
index 44db5ba9cabb..48e82e141d54 100644
--- a/kernel/trace/trace_events_hist.c
+++ b/kernel/trace/trace_events_hist.c
@@ -2093,8 +2093,11 @@ static int init_var_ref(struct hist_field *ref_field,
return err;
free:
kfree(ref_field->system);
+ ref_field->system = NULL;
kfree(ref_field->event_name);
+ ref_field->event_name = NULL;
kfree(ref_field->name);
+ ref_field->name = NULL;
goto out;
}
@@ -2785,7 +2788,8 @@ static char *find_trigger_filter(struct hist_trigger_data *hist_data,
static struct event_command trigger_hist_cmd;
static int event_hist_trigger_parse(struct event_command *cmd_ops,
struct trace_event_file *file,
- char *glob, char *cmd, char *param);
+ char *glob, char *cmd,
+ char *param_and_filter);
static bool compatible_keys(struct hist_trigger_data *target_hist_data,
struct hist_trigger_data *hist_data,
@@ -4161,7 +4165,7 @@ static int create_val_field(struct hist_trigger_data *hist_data,
return __create_val_field(hist_data, val_idx, file, NULL, field_str, 0);
}
-static const char *no_comm = "(no comm)";
+static const char no_comm[] = "(no comm)";
static u64 hist_field_execname(struct hist_field *hist_field,
struct tracing_map_elt *elt,
@@ -5252,7 +5256,7 @@ static void hist_trigger_show(struct seq_file *m,
seq_puts(m, "\n\n");
seq_puts(m, "# event histogram\n#\n# trigger info: ");
- data->ops->print(m, data->ops, data);
+ data->ops->print(m, data);
seq_puts(m, "#\n\n");
hist_data = data->private_data;
@@ -5484,7 +5488,7 @@ static void hist_trigger_debug_show(struct seq_file *m,
seq_puts(m, "\n\n");
seq_puts(m, "# event histogram\n#\n# trigger info: ");
- data->ops->print(m, data->ops, data);
+ data->ops->print(m, data);
seq_puts(m, "#\n\n");
hist_data = data->private_data;
@@ -5621,7 +5625,6 @@ static void hist_field_print(struct seq_file *m, struct hist_field *hist_field)
}
static int event_hist_trigger_print(struct seq_file *m,
- struct event_trigger_ops *ops,
struct event_trigger_data *data)
{
struct hist_trigger_data *hist_data = data->private_data;
@@ -5729,8 +5732,7 @@ static int event_hist_trigger_print(struct seq_file *m,
return 0;
}
-static int event_hist_trigger_init(struct event_trigger_ops *ops,
- struct event_trigger_data *data)
+static int event_hist_trigger_init(struct event_trigger_data *data)
{
struct hist_trigger_data *hist_data = data->private_data;
@@ -5758,8 +5760,7 @@ static void unregister_field_var_hists(struct hist_trigger_data *hist_data)
}
}
-static void event_hist_trigger_free(struct event_trigger_ops *ops,
- struct event_trigger_data *data)
+static void event_hist_trigger_free(struct event_trigger_data *data)
{
struct hist_trigger_data *hist_data = data->private_data;
@@ -5788,25 +5789,23 @@ static struct event_trigger_ops event_hist_trigger_ops = {
.free = event_hist_trigger_free,
};
-static int event_hist_trigger_named_init(struct event_trigger_ops *ops,
- struct event_trigger_data *data)
+static int event_hist_trigger_named_init(struct event_trigger_data *data)
{
data->ref++;
save_named_trigger(data->named_data->name, data);
- event_hist_trigger_init(ops, data->named_data);
+ event_hist_trigger_init(data->named_data);
return 0;
}
-static void event_hist_trigger_named_free(struct event_trigger_ops *ops,
- struct event_trigger_data *data)
+static void event_hist_trigger_named_free(struct event_trigger_data *data)
{
if (WARN_ON_ONCE(data->ref <= 0))
return;
- event_hist_trigger_free(ops, data->named_data);
+ event_hist_trigger_free(data->named_data);
data->ref--;
if (!data->ref) {
@@ -5933,6 +5932,48 @@ static bool hist_trigger_match(struct event_trigger_data *data,
return true;
}
+static bool existing_hist_update_only(char *glob,
+ struct event_trigger_data *data,
+ struct trace_event_file *file)
+{
+ struct hist_trigger_data *hist_data = data->private_data;
+ struct event_trigger_data *test, *named_data = NULL;
+ bool updated = false;
+
+ if (!hist_data->attrs->pause && !hist_data->attrs->cont &&
+ !hist_data->attrs->clear)
+ goto out;
+
+ if (hist_data->attrs->name) {
+ named_data = find_named_trigger(hist_data->attrs->name);
+ if (named_data) {
+ if (!hist_trigger_match(data, named_data, named_data,
+ true))
+ goto out;
+ }
+ }
+
+ if (hist_data->attrs->name && !named_data)
+ goto out;
+
+ list_for_each_entry(test, &file->triggers, list) {
+ if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
+ if (!hist_trigger_match(data, test, named_data, false))
+ continue;
+ if (hist_data->attrs->pause)
+ test->paused = true;
+ else if (hist_data->attrs->cont)
+ test->paused = false;
+ else if (hist_data->attrs->clear)
+ hist_clear(test);
+ updated = true;
+ goto out;
+ }
+ }
+ out:
+ return updated;
+}
+
static int hist_register_trigger(char *glob,
struct event_trigger_data *data,
struct trace_event_file *file)
@@ -5961,19 +6002,11 @@ static int hist_register_trigger(char *glob,
list_for_each_entry(test, &file->triggers, list) {
if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
- if (!hist_trigger_match(data, test, named_data, false))
- continue;
- if (hist_data->attrs->pause)
- test->paused = true;
- else if (hist_data->attrs->cont)
- test->paused = false;
- else if (hist_data->attrs->clear)
- hist_clear(test);
- else {
+ if (hist_trigger_match(data, test, named_data, false)) {
hist_err(tr, HIST_ERR_TRIGGER_EEXIST, 0);
ret = -EEXIST;
+ goto out;
}
- goto out;
}
}
new:
@@ -5993,7 +6026,7 @@ static int hist_register_trigger(char *glob,
}
if (data->ops->init) {
- ret = data->ops->init(data->ops, data);
+ ret = data->ops->init(data);
if (ret < 0)
goto out;
}
@@ -6012,8 +6045,6 @@ static int hist_register_trigger(char *glob,
if (named_data)
destroy_hist_data(hist_data);
-
- ret++;
out:
return ret;
}
@@ -6089,20 +6120,19 @@ static void hist_unregister_trigger(char *glob,
struct event_trigger_data *data,
struct trace_event_file *file)
{
+ struct event_trigger_data *test = NULL, *iter, *named_data = NULL;
struct hist_trigger_data *hist_data = data->private_data;
- struct event_trigger_data *test, *named_data = NULL;
- bool unregistered = false;
lockdep_assert_held(&event_mutex);
if (hist_data->attrs->name)
named_data = find_named_trigger(hist_data->attrs->name);
- list_for_each_entry(test, &file->triggers, list) {
- if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
- if (!hist_trigger_match(data, test, named_data, false))
+ list_for_each_entry(iter, &file->triggers, list) {
+ if (iter->cmd_ops->trigger_type == ETT_EVENT_HIST) {
+ if (!hist_trigger_match(data, iter, named_data, false))
continue;
- unregistered = true;
+ test = iter;
list_del_rcu(&test->list);
trace_event_trigger_enable_disable(file, 0);
update_cond_flag(file);
@@ -6110,11 +6140,11 @@ static void hist_unregister_trigger(char *glob,
}
}
- if (unregistered && test->ops->free)
- test->ops->free(test->ops, test);
+ if (test && test->ops->free)
+ test->ops->free(test);
if (hist_data->enable_timestamps) {
- if (!hist_data->remove || unregistered)
+ if (!hist_data->remove || test)
tracing_set_filter_buffering(file->tr, false);
}
}
@@ -6164,57 +6194,57 @@ static void hist_unreg_all(struct trace_event_file *file)
if (hist_data->enable_timestamps)
tracing_set_filter_buffering(file->tr, false);
if (test->ops->free)
- test->ops->free(test->ops, test);
+ test->ops->free(test);
}
}
}
static int event_hist_trigger_parse(struct event_command *cmd_ops,
struct trace_event_file *file,
- char *glob, char *cmd, char *param)
+ char *glob, char *cmd,
+ char *param_and_filter)
{
unsigned int hist_trigger_bits = TRACING_MAP_BITS_DEFAULT;
struct event_trigger_data *trigger_data;
struct hist_trigger_attrs *attrs;
- struct event_trigger_ops *trigger_ops;
struct hist_trigger_data *hist_data;
+ char *param, *filter, *p, *start;
struct synth_event *se;
const char *se_name;
- bool remove = false;
- char *trigger, *p, *start;
+ bool remove;
int ret = 0;
lockdep_assert_held(&event_mutex);
- WARN_ON(!glob);
+ if (WARN_ON(!glob))
+ return -EINVAL;
- if (strlen(glob)) {
+ if (glob[0]) {
hist_err_clear();
- last_cmd_set(file, param);
+ last_cmd_set(file, param_and_filter);
}
- if (!param)
- return -EINVAL;
+ remove = event_trigger_check_remove(glob);
- if (glob[0] == '!')
- remove = true;
+ if (event_trigger_empty_param(param_and_filter))
+ return -EINVAL;
/*
* separate the trigger from the filter (k:v [if filter])
* allowing for whitespace in the trigger
*/
- p = trigger = param;
+ p = param = param_and_filter;
do {
p = strstr(p, "if");
if (!p)
break;
- if (p == param)
+ if (p == param_and_filter)
return -EINVAL;
if (*(p - 1) != ' ' && *(p - 1) != '\t') {
p++;
continue;
}
- if (p >= param + strlen(param) - (sizeof("if") - 1) - 1)
+ if (p >= param_and_filter + strlen(param_and_filter) - (sizeof("if") - 1) - 1)
return -EINVAL;
if (*(p + sizeof("if") - 1) != ' ' && *(p + sizeof("if") - 1) != '\t') {
p++;
@@ -6224,24 +6254,24 @@ static int event_hist_trigger_parse(struct event_command *cmd_ops,
} while (1);
if (!p)
- param = NULL;
+ filter = NULL;
else {
*(p - 1) = '\0';
- param = strstrip(p);
- trigger = strstrip(trigger);
+ filter = strstrip(p);
+ param = strstrip(param);
}
/*
* To simplify arithmetic expression parsing, replace occurrences of
* '.sym-offset' modifier with '.symXoffset'
*/
- start = strstr(trigger, ".sym-offset");
+ start = strstr(param, ".sym-offset");
while (start) {
*(start + 4) = 'X';
start = strstr(start + 11, ".sym-offset");
}
- attrs = parse_hist_trigger_attrs(file->tr, trigger);
+ attrs = parse_hist_trigger_attrs(file->tr, param);
if (IS_ERR(attrs))
return PTR_ERR(attrs);
@@ -6254,29 +6284,15 @@ static int event_hist_trigger_parse(struct event_command *cmd_ops,
return PTR_ERR(hist_data);
}
- trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
-
- trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
+ trigger_data = event_trigger_alloc(cmd_ops, cmd, param, hist_data);
if (!trigger_data) {
ret = -ENOMEM;
goto out_free;
}
- trigger_data->count = -1;
- trigger_data->ops = trigger_ops;
- trigger_data->cmd_ops = cmd_ops;
-
- INIT_LIST_HEAD(&trigger_data->list);
- RCU_INIT_POINTER(trigger_data->filter, NULL);
-
- trigger_data->private_data = hist_data;
-
- /* if param is non-empty, it's supposed to be a filter */
- if (param && cmd_ops->set_filter) {
- ret = cmd_ops->set_filter(param, trigger_data, file);
- if (ret < 0)
- goto out_free;
- }
+ ret = event_trigger_set_filter(cmd_ops, file, filter, trigger_data);
+ if (ret < 0)
+ goto out_free;
if (remove) {
if (!have_hist_trigger_match(trigger_data, file))
@@ -6287,7 +6303,7 @@ static int event_hist_trigger_parse(struct event_command *cmd_ops,
goto out_free;
}
- cmd_ops->unreg(glob+1, trigger_data, file);
+ event_trigger_unregister(cmd_ops, file, glob+1, trigger_data);
se_name = trace_event_name(file->event_call);
se = find_synth_event(se_name);
if (se)
@@ -6296,17 +6312,11 @@ static int event_hist_trigger_parse(struct event_command *cmd_ops,
goto out_free;
}
- ret = cmd_ops->reg(glob, trigger_data, file);
- /*
- * The above returns on success the # of triggers registered,
- * but if it didn't register any it returns zero. Consider no
- * triggers registered a failure too.
- */
- if (!ret) {
- if (!(attrs->pause || attrs->cont || attrs->clear))
- ret = -ENOENT;
+ if (existing_hist_update_only(glob, trigger_data, file))
goto out_free;
- } else if (ret < 0)
+
+ ret = event_trigger_register(cmd_ops, file, glob, trigger_data);
+ if (ret < 0)
goto out_free;
if (get_named_trigger_data(trigger_data))
@@ -6331,18 +6341,15 @@ enable:
se = find_synth_event(se_name);
if (se)
se->ref++;
- /* Just return zero, not the number of registered triggers */
- ret = 0;
out:
if (ret == 0)
hist_err_clear();
return ret;
out_unreg:
- cmd_ops->unreg(glob+1, trigger_data, file);
+ event_trigger_unregister(cmd_ops, file, glob+1, trigger_data);
out_free:
- if (cmd_ops->set_filter)
- cmd_ops->set_filter(NULL, trigger_data, NULL);
+ event_trigger_reset_filter(cmd_ops, trigger_data);
remove_hist_vars(hist_data);
@@ -6463,7 +6470,7 @@ static void hist_enable_unreg_all(struct trace_event_file *file)
update_cond_flag(file);
trace_event_trigger_enable_disable(file, 0);
if (test->ops->free)
- test->ops->free(test->ops, test);
+ test->ops->free(test);
}
}
}
diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
index 7eb9d04f1c2e..cb866c3141af 100644
--- a/kernel/trace/trace_events_trigger.c
+++ b/kernel/trace/trace_events_trigger.c
@@ -188,7 +188,7 @@ static int trigger_show(struct seq_file *m, void *v)
}
data = list_entry(v, struct event_trigger_data, list);
- data->ops->print(m, data->ops, data);
+ data->ops->print(m, data);
return 0;
}
@@ -432,7 +432,6 @@ event_trigger_print(const char *name, struct seq_file *m,
/**
* event_trigger_init - Generic event_trigger_ops @init implementation
- * @ops: The trigger ops associated with the trigger
* @data: Trigger-specific data
*
* Common implementation of event trigger initialization.
@@ -442,8 +441,7 @@ event_trigger_print(const char *name, struct seq_file *m,
*
* Return: 0 on success, errno otherwise
*/
-int event_trigger_init(struct event_trigger_ops *ops,
- struct event_trigger_data *data)
+int event_trigger_init(struct event_trigger_data *data)
{
data->ref++;
return 0;
@@ -451,7 +449,6 @@ int event_trigger_init(struct event_trigger_ops *ops,
/**
* event_trigger_free - Generic event_trigger_ops @free implementation
- * @ops: The trigger ops associated with the trigger
* @data: Trigger-specific data
*
* Common implementation of event trigger de-initialization.
@@ -460,8 +457,7 @@ int event_trigger_init(struct event_trigger_ops *ops,
* implementations.
*/
static void
-event_trigger_free(struct event_trigger_ops *ops,
- struct event_trigger_data *data)
+event_trigger_free(struct event_trigger_data *data)
{
if (WARN_ON_ONCE(data->ref <= 0))
return;
@@ -515,7 +511,7 @@ clear_event_triggers(struct trace_array *tr)
trace_event_trigger_enable_disable(file, 0);
list_del_rcu(&data->list);
if (data->ops->free)
- data->ops->free(data->ops, data);
+ data->ops->free(data);
}
}
}
@@ -581,19 +577,18 @@ static int register_trigger(char *glob,
}
if (data->ops->init) {
- ret = data->ops->init(data->ops, data);
+ ret = data->ops->init(data);
if (ret < 0)
goto out;
}
list_add_rcu(&data->list, &file->triggers);
- ret++;
update_cond_flag(file);
- if (trace_event_trigger_enable_disable(file, 1) < 0) {
+ ret = trace_event_trigger_enable_disable(file, 1);
+ if (ret < 0) {
list_del_rcu(&data->list);
update_cond_flag(file);
- ret--;
}
out:
return ret;
@@ -614,14 +609,13 @@ static void unregister_trigger(char *glob,
struct event_trigger_data *test,
struct trace_event_file *file)
{
- struct event_trigger_data *data;
- bool unregistered = false;
+ struct event_trigger_data *data = NULL, *iter;
lockdep_assert_held(&event_mutex);
- list_for_each_entry(data, &file->triggers, list) {
- if (data->cmd_ops->trigger_type == test->cmd_ops->trigger_type) {
- unregistered = true;
+ list_for_each_entry(iter, &file->triggers, list) {
+ if (iter->cmd_ops->trigger_type == test->cmd_ops->trigger_type) {
+ data = iter;
list_del_rcu(&data->list);
trace_event_trigger_enable_disable(file, 0);
update_cond_flag(file);
@@ -629,8 +623,8 @@ static void unregister_trigger(char *glob,
}
}
- if (unregistered && data->ops->free)
- data->ops->free(data->ops, data);
+ if (data && data->ops->free)
+ data->ops->free(data);
}
/*
@@ -744,15 +738,15 @@ bool event_trigger_empty_param(const char *param)
/**
* event_trigger_separate_filter - separate an event trigger from a filter
- * @param: The param string containing trigger and possibly filter
- * @trigger: outparam, will be filled with a pointer to the trigger
+ * @param_and_filter: String containing trigger and possibly filter
+ * @param: outparam, will be filled with a pointer to the trigger
* @filter: outparam, will be filled with a pointer to the filter
* @param_required: Specifies whether or not the param string is required
*
* Given a param string of the form '[trigger] [if filter]', this
* function separates the filter from the trigger and returns the
- * trigger in *trigger and the filter in *filter. Either the *trigger
- * or the *filter may be set to NULL by this function - if not set to
+ * trigger in @param and the filter in @filter. Either the @param
+ * or the @filter may be set to NULL by this function - if not set to
* NULL, they will contain strings corresponding to the trigger and
* filter.
*
@@ -927,48 +921,37 @@ void event_trigger_reset_filter(struct event_command *cmd_ops,
* @cmd_ops: The event_command operations for the trigger
* @file: The event file for the trigger's event
* @glob: The trigger command string, with optional remove(!) operator
- * @cmd: The cmd string
- * @param: The param string
* @trigger_data: The trigger_data for the trigger
- * @n_registered: optional outparam, the number of triggers registered
*
* Register an event trigger. The @cmd_ops are used to call the
- * cmd_ops->reg() function which actually does the registration. The
- * cmd_ops->reg() function returns the number of triggers registered,
- * which is assigned to n_registered, if n_registered is non-NULL.
+ * cmd_ops->reg() function which actually does the registration.
*
* Return: 0 on success, errno otherwise
*/
int event_trigger_register(struct event_command *cmd_ops,
struct trace_event_file *file,
char *glob,
- char *cmd,
- char *param,
- struct event_trigger_data *trigger_data,
- int *n_registered)
+ struct event_trigger_data *trigger_data)
{
- int ret;
-
- if (n_registered)
- *n_registered = 0;
-
- ret = cmd_ops->reg(glob, trigger_data, file);
- /*
- * The above returns on success the # of functions enabled,
- * but if it didn't find any functions it returns zero.
- * Consider no functions a failure too.
- */
- if (!ret) {
- cmd_ops->unreg(glob, trigger_data, file);
- ret = -ENOENT;
- } else if (ret > 0) {
- if (n_registered)
- *n_registered = ret;
- /* Just return zero, not the number of enabled functions */
- ret = 0;
- }
+ return cmd_ops->reg(glob, trigger_data, file);
+}
- return ret;
+/**
+ * event_trigger_unregister - unregister an event trigger
+ * @cmd_ops: The event_command operations for the trigger
+ * @file: The event file for the trigger's event
+ * @glob: The trigger command string, with optional remove(!) operator
+ * @trigger_data: The trigger_data for the trigger
+ *
+ * Unregister an event trigger. The @cmd_ops are used to call the
+ * cmd_ops->unreg() function which actually does the unregistration.
+ */
+void event_trigger_unregister(struct event_command *cmd_ops,
+ struct trace_event_file *file,
+ char *glob,
+ struct event_trigger_data *trigger_data)
+{
+ cmd_ops->unreg(glob, trigger_data, file);
}
/*
@@ -981,7 +964,7 @@ int event_trigger_register(struct event_command *cmd_ops,
* @file: The trace_event_file associated with the event
* @glob: The raw string used to register the trigger
* @cmd: The cmd portion of the string used to register the trigger
- * @param: The params portion of the string used to register the trigger
+ * @param_and_filter: The param and filter portion of the string used to register the trigger
*
* Common implementation for event command parsing and trigger
* instantiation.
@@ -994,94 +977,53 @@ int event_trigger_register(struct event_command *cmd_ops,
static int
event_trigger_parse(struct event_command *cmd_ops,
struct trace_event_file *file,
- char *glob, char *cmd, char *param)
+ char *glob, char *cmd, char *param_and_filter)
{
struct event_trigger_data *trigger_data;
- struct event_trigger_ops *trigger_ops;
- char *trigger = NULL;
- char *number;
+ char *param, *filter;
+ bool remove;
int ret;
- /* separate the trigger from the filter (t:n [if filter]) */
- if (param && isdigit(param[0])) {
- trigger = strsep(&param, " \t");
- if (param) {
- param = skip_spaces(param);
- if (!*param)
- param = NULL;
- }
- }
+ remove = event_trigger_check_remove(glob);
- trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
+ ret = event_trigger_separate_filter(param_and_filter, &param, &filter, false);
+ if (ret)
+ return ret;
ret = -ENOMEM;
- trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
+ trigger_data = event_trigger_alloc(cmd_ops, cmd, param, file);
if (!trigger_data)
goto out;
- trigger_data->count = -1;
- trigger_data->ops = trigger_ops;
- trigger_data->cmd_ops = cmd_ops;
- trigger_data->private_data = file;
- INIT_LIST_HEAD(&trigger_data->list);
- INIT_LIST_HEAD(&trigger_data->named_list);
-
- if (glob[0] == '!') {
- cmd_ops->unreg(glob+1, trigger_data, file);
+ if (remove) {
+ event_trigger_unregister(cmd_ops, file, glob+1, trigger_data);
kfree(trigger_data);
ret = 0;
goto out;
}
- if (trigger) {
- number = strsep(&trigger, ":");
-
- ret = -EINVAL;
- if (!strlen(number))
- goto out_free;
-
- /*
- * We use the callback data field (which is a pointer)
- * as our counter.
- */
- ret = kstrtoul(number, 0, &trigger_data->count);
- if (ret)
- goto out_free;
- }
-
- if (!param) /* if param is non-empty, it's supposed to be a filter */
- goto out_reg;
-
- if (!cmd_ops->set_filter)
- goto out_reg;
+ ret = event_trigger_parse_num(param, trigger_data);
+ if (ret)
+ goto out_free;
- ret = cmd_ops->set_filter(param, trigger_data, file);
+ ret = event_trigger_set_filter(cmd_ops, file, filter, trigger_data);
if (ret < 0)
goto out_free;
- out_reg:
/* Up the trigger_data count to make sure reg doesn't free it on failure */
- event_trigger_init(trigger_ops, trigger_data);
- ret = cmd_ops->reg(glob, trigger_data, file);
- /*
- * The above returns on success the # of functions enabled,
- * but if it didn't find any functions it returns zero.
- * Consider no functions a failure too.
- */
- if (!ret) {
- cmd_ops->unreg(glob, trigger_data, file);
- ret = -ENOENT;
- } else if (ret > 0)
- ret = 0;
+ event_trigger_init(trigger_data);
+
+ ret = event_trigger_register(cmd_ops, file, glob, trigger_data);
+ if (ret)
+ goto out_free;
/* Down the counter of trigger_data or free it if not used anymore */
- event_trigger_free(trigger_ops, trigger_data);
+ event_trigger_free(trigger_data);
out:
return ret;
out_free:
- if (cmd_ops->set_filter)
- cmd_ops->set_filter(NULL, trigger_data, NULL);
+ event_trigger_reset_filter(cmd_ops, trigger_data);
kfree(trigger_data);
goto out;
}
@@ -1401,16 +1343,14 @@ traceoff_count_trigger(struct event_trigger_data *data,
}
static int
-traceon_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
- struct event_trigger_data *data)
+traceon_trigger_print(struct seq_file *m, struct event_trigger_data *data)
{
return event_trigger_print("traceon", m, (void *)data->count,
data->filter_str);
}
static int
-traceoff_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
- struct event_trigger_data *data)
+traceoff_trigger_print(struct seq_file *m, struct event_trigger_data *data)
{
return event_trigger_print("traceoff", m, (void *)data->count,
data->filter_str);
@@ -1521,8 +1461,7 @@ register_snapshot_trigger(char *glob,
}
static int
-snapshot_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
- struct event_trigger_data *data)
+snapshot_trigger_print(struct seq_file *m, struct event_trigger_data *data)
{
return event_trigger_print("snapshot", m, (void *)data->count,
data->filter_str);
@@ -1617,8 +1556,7 @@ stacktrace_count_trigger(struct event_trigger_data *data,
}
static int
-stacktrace_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
- struct event_trigger_data *data)
+stacktrace_trigger_print(struct seq_file *m, struct event_trigger_data *data)
{
return event_trigger_print("stacktrace", m, (void *)data->count,
data->filter_str);
@@ -1708,7 +1646,6 @@ event_enable_count_trigger(struct event_trigger_data *data,
}
int event_enable_trigger_print(struct seq_file *m,
- struct event_trigger_ops *ops,
struct event_trigger_data *data)
{
struct enable_trigger_data *enable_data = data->private_data;
@@ -1733,8 +1670,7 @@ int event_enable_trigger_print(struct seq_file *m,
return 0;
}
-void event_enable_trigger_free(struct event_trigger_ops *ops,
- struct event_trigger_data *data)
+void event_enable_trigger_free(struct event_trigger_data *data)
{
struct enable_trigger_data *enable_data = data->private_data;
@@ -1781,39 +1717,33 @@ static struct event_trigger_ops event_disable_count_trigger_ops = {
int event_enable_trigger_parse(struct event_command *cmd_ops,
struct trace_event_file *file,
- char *glob, char *cmd, char *param)
+ char *glob, char *cmd, char *param_and_filter)
{
struct trace_event_file *event_enable_file;
struct enable_trigger_data *enable_data;
struct event_trigger_data *trigger_data;
- struct event_trigger_ops *trigger_ops;
struct trace_array *tr = file->tr;
+ char *param, *filter;
+ bool enable, remove;
const char *system;
const char *event;
bool hist = false;
- char *trigger;
- char *number;
- bool enable;
int ret;
- if (!param)
- return -EINVAL;
+ remove = event_trigger_check_remove(glob);
- /* separate the trigger from the filter (s:e:n [if filter]) */
- trigger = strsep(&param, " \t");
- if (!trigger)
+ if (event_trigger_empty_param(param_and_filter))
return -EINVAL;
- if (param) {
- param = skip_spaces(param);
- if (!*param)
- param = NULL;
- }
- system = strsep(&trigger, ":");
- if (!trigger)
+ ret = event_trigger_separate_filter(param_and_filter, &param, &filter, true);
+ if (ret)
+ return ret;
+
+ system = strsep(&param, ":");
+ if (!param)
return -EINVAL;
- event = strsep(&trigger, ":");
+ event = strsep(&param, ":");
ret = -EINVAL;
event_enable_file = find_event_file(tr, system, event);
@@ -1829,32 +1759,24 @@ int event_enable_trigger_parse(struct event_command *cmd_ops,
#else
enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
#endif
- trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
-
ret = -ENOMEM;
- trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
- if (!trigger_data)
- goto out;
enable_data = kzalloc(sizeof(*enable_data), GFP_KERNEL);
- if (!enable_data) {
- kfree(trigger_data);
+ if (!enable_data)
goto out;
- }
-
- trigger_data->count = -1;
- trigger_data->ops = trigger_ops;
- trigger_data->cmd_ops = cmd_ops;
- INIT_LIST_HEAD(&trigger_data->list);
- RCU_INIT_POINTER(trigger_data->filter, NULL);
enable_data->hist = hist;
enable_data->enable = enable;
enable_data->file = event_enable_file;
- trigger_data->private_data = enable_data;
- if (glob[0] == '!') {
- cmd_ops->unreg(glob+1, trigger_data, file);
+ trigger_data = event_trigger_alloc(cmd_ops, cmd, param, enable_data);
+ if (!trigger_data) {
+ kfree(enable_data);
+ goto out;
+ }
+
+ if (remove) {
+ event_trigger_unregister(cmd_ops, file, glob+1, trigger_data);
kfree(trigger_data);
kfree(enable_data);
ret = 0;
@@ -1862,35 +1784,16 @@ int event_enable_trigger_parse(struct event_command *cmd_ops,
}
/* Up the trigger_data count to make sure nothing frees it on failure */
- event_trigger_init(trigger_ops, trigger_data);
-
- if (trigger) {
- number = strsep(&trigger, ":");
-
- ret = -EINVAL;
- if (!strlen(number))
- goto out_free;
-
- /*
- * We use the callback data field (which is a pointer)
- * as our counter.
- */
- ret = kstrtoul(number, 0, &trigger_data->count);
- if (ret)
- goto out_free;
- }
+ event_trigger_init(trigger_data);
- if (!param) /* if param is non-empty, it's supposed to be a filter */
- goto out_reg;
-
- if (!cmd_ops->set_filter)
- goto out_reg;
+ ret = event_trigger_parse_num(param, trigger_data);
+ if (ret)
+ goto out_free;
- ret = cmd_ops->set_filter(param, trigger_data, file);
+ ret = event_trigger_set_filter(cmd_ops, file, filter, trigger_data);
if (ret < 0)
goto out_free;
- out_reg:
/* Don't let event modules unload while probe registered */
ret = trace_event_try_get_ref(event_enable_file->event_call);
if (!ret) {
@@ -1901,32 +1804,23 @@ int event_enable_trigger_parse(struct event_command *cmd_ops,
ret = trace_event_enable_disable(event_enable_file, 1, 1);
if (ret < 0)
goto out_put;
- ret = cmd_ops->reg(glob, trigger_data, file);
- /*
- * The above returns on success the # of functions enabled,
- * but if it didn't find any functions it returns zero.
- * Consider no functions a failure too.
- */
- if (!ret) {
- ret = -ENOENT;
- goto out_disable;
- } else if (ret < 0)
+
+ ret = event_trigger_register(cmd_ops, file, glob, trigger_data);
+ if (ret)
goto out_disable;
- /* Just return zero, not the number of enabled functions */
- ret = 0;
- event_trigger_free(trigger_ops, trigger_data);
+
+ event_trigger_free(trigger_data);
out:
return ret;
-
out_disable:
trace_event_enable_disable(event_enable_file, 0, 1);
out_put:
trace_event_put_ref(event_enable_file->event_call);
out_free:
- if (cmd_ops->set_filter)
- cmd_ops->set_filter(NULL, trigger_data, NULL);
- event_trigger_free(trigger_ops, trigger_data);
+ event_trigger_reset_filter(cmd_ops, trigger_data);
+ event_trigger_free(trigger_data);
kfree(enable_data);
+
goto out;
}
@@ -1953,19 +1847,18 @@ int event_enable_register_trigger(char *glob,
}
if (data->ops->init) {
- ret = data->ops->init(data->ops, data);
+ ret = data->ops->init(data);
if (ret < 0)
goto out;
}
list_add_rcu(&data->list, &file->triggers);
- ret++;
update_cond_flag(file);
- if (trace_event_trigger_enable_disable(file, 1) < 0) {
+ ret = trace_event_trigger_enable_disable(file, 1);
+ if (ret < 0) {
list_del_rcu(&data->list);
update_cond_flag(file);
- ret--;
}
out:
return ret;
@@ -1976,19 +1869,18 @@ void event_enable_unregister_trigger(char *glob,
struct trace_event_file *file)
{
struct enable_trigger_data *test_enable_data = test->private_data;
+ struct event_trigger_data *data = NULL, *iter;
struct enable_trigger_data *enable_data;
- struct event_trigger_data *data;
- bool unregistered = false;
lockdep_assert_held(&event_mutex);
- list_for_each_entry(data, &file->triggers, list) {
- enable_data = data->private_data;
+ list_for_each_entry(iter, &file->triggers, list) {
+ enable_data = iter->private_data;
if (enable_data &&
- (data->cmd_ops->trigger_type ==
+ (iter->cmd_ops->trigger_type ==
test->cmd_ops->trigger_type) &&
(enable_data->file == test_enable_data->file)) {
- unregistered = true;
+ data = iter;
list_del_rcu(&data->list);
trace_event_trigger_enable_disable(file, 0);
update_cond_flag(file);
@@ -1996,8 +1888,8 @@ void event_enable_unregister_trigger(char *glob,
}
}
- if (unregistered && data->ops->free)
- data->ops->free(data->ops, data);
+ if (data && data->ops->free)
+ data->ops->free(data);
}
static struct event_trigger_ops *
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 47cebef78532..93507330462c 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -1907,25 +1907,18 @@ core_initcall(init_kprobe_trace_early);
static __init int init_kprobe_trace(void)
{
int ret;
- struct dentry *entry;
ret = tracing_init_dentry();
if (ret)
return 0;
- entry = tracefs_create_file("kprobe_events", TRACE_MODE_WRITE,
- NULL, NULL, &kprobe_events_ops);
-
/* Event list interface */
- if (!entry)
- pr_warn("Could not create tracefs 'kprobe_events' entry\n");
+ trace_create_file("kprobe_events", TRACE_MODE_WRITE,
+ NULL, NULL, &kprobe_events_ops);
/* Profile interface */
- entry = tracefs_create_file("kprobe_profile", TRACE_MODE_READ,
- NULL, NULL, &kprobe_profile_ops);
-
- if (!entry)
- pr_warn("Could not create tracefs 'kprobe_profile' entry\n");
+ trace_create_file("kprobe_profile", TRACE_MODE_READ,
+ NULL, NULL, &kprobe_profile_ops);
setup_boot_kprobe_events();
diff --git a/kernel/trace/trace_osnoise.c b/kernel/trace/trace_osnoise.c
index afb92e2f0aea..313439920a8c 100644
--- a/kernel/trace/trace_osnoise.c
+++ b/kernel/trace/trace_osnoise.c
@@ -1578,11 +1578,27 @@ static enum hrtimer_restart timerlat_irq(struct hrtimer *timer)
trace_timerlat_sample(&s);
- notify_new_max_latency(diff);
+ if (osnoise_data.stop_tracing) {
+ if (time_to_us(diff) >= osnoise_data.stop_tracing) {
+
+ /*
+ * At this point, if stop_tracing is set and <= print_stack,
+ * print_stack is set and would be printed in the thread handler.
+ *
+ * Thus, print the stack trace as it is helpful to define the
+ * root cause of an IRQ latency.
+ */
+ if (osnoise_data.stop_tracing <= osnoise_data.print_stack) {
+ timerlat_save_stack(0);
+ timerlat_dump_stack(time_to_us(diff));
+ }
- if (osnoise_data.stop_tracing)
- if (time_to_us(diff) >= osnoise_data.stop_tracing)
osnoise_stop_tracing();
+ notify_new_max_latency(diff);
+
+ return HRTIMER_NORESTART;
+ }
+ }
wake_up_process(tlat->kthread);
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index 8aa493d25c73..67f47ea27921 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -692,7 +692,7 @@ static LIST_HEAD(ftrace_event_list);
static int trace_search_list(struct list_head **list)
{
- struct trace_event *e;
+ struct trace_event *e = NULL, *iter;
int next = __TRACE_LAST_TYPE;
if (list_empty(&ftrace_event_list)) {
@@ -704,9 +704,11 @@ static int trace_search_list(struct list_head **list)
* We used up all possible max events,
* lets see if somebody freed one.
*/
- list_for_each_entry(e, &ftrace_event_list, list) {
- if (e->type != next)
+ list_for_each_entry(iter, &ftrace_event_list, list) {
+ if (iter->type != next) {
+ e = iter;
break;
+ }
next++;
}
@@ -714,7 +716,10 @@ static int trace_search_list(struct list_head **list)
if (next > TRACE_EVENT_TYPE_MAX)
return 0;
- *list = &e->list;
+ if (e)
+ *list = &e->list;
+ else
+ *list = &ftrace_event_list;
return next;
}
@@ -778,9 +783,8 @@ int register_trace_event(struct trace_event *event)
list_add_tail(&event->list, list);
- } else if (event->type > __TRACE_LAST_TYPE) {
- printk(KERN_WARNING "Need to add type to trace.h\n");
- WARN_ON(1);
+ } else if (WARN(event->type > __TRACE_LAST_TYPE,
+ "Need to add type to trace.h")) {
goto out;
} else {
/* Is this event already used */
@@ -1571,13 +1575,8 @@ __init static int init_events(void)
for (i = 0; events[i]; i++) {
event = events[i];
-
ret = register_trace_event(event);
- if (!ret) {
- printk(KERN_WARNING "event %d failed to register\n",
- event->type);
- WARN_ON_ONCE(1);
- }
+ WARN_ONCE(!ret, "event %d failed to register", event->type);
}
return 0;
diff --git a/kernel/trace/trace_recursion_record.c b/kernel/trace/trace_recursion_record.c
index 4d4b78c8ca25..a520b11afb0d 100644
--- a/kernel/trace/trace_recursion_record.c
+++ b/kernel/trace/trace_recursion_record.c
@@ -224,12 +224,9 @@ static const struct file_operations recursed_functions_fops = {
__init static int create_recursed_functions(void)
{
- struct dentry *dentry;
- dentry = trace_create_file("recursed_functions", TRACE_MODE_WRITE,
- NULL, NULL, &recursed_functions_fops);
- if (!dentry)
- pr_warn("WARNING: Failed to create recursed_functions\n");
+ trace_create_file("recursed_functions", TRACE_MODE_WRITE,
+ NULL, NULL, &recursed_functions_fops);
return 0;
}
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index abcadbe933bb..a2d301f58ced 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -895,6 +895,9 @@ trace_selftest_startup_function_graph(struct tracer *trace,
ret = -1;
goto out;
}
+
+ /* Enable tracing on all functions again */
+ ftrace_set_global_filter(NULL, 0, 1);
#endif
/* Don't test dynamic tracing, the function tracer already did */
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index f755bde42fd0..b69e207012c9 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -154,7 +154,7 @@ print_syscall_enter(struct trace_iterator *iter, int flags,
goto end;
/* parameter types */
- if (tr->trace_flags & TRACE_ITER_VERBOSE)
+ if (tr && tr->trace_flags & TRACE_ITER_VERBOSE)
trace_seq_printf(s, "%s ", entry->types[i]);
/* parameter values */
@@ -296,9 +296,7 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
struct trace_event_file *trace_file;
struct syscall_trace_enter *entry;
struct syscall_metadata *sys_data;
- struct ring_buffer_event *event;
- struct trace_buffer *buffer;
- unsigned int trace_ctx;
+ struct trace_event_buffer fbuffer;
unsigned long args[6];
int syscall_nr;
int size;
@@ -321,20 +319,16 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args;
- trace_ctx = tracing_gen_ctx();
-
- event = trace_event_buffer_lock_reserve(&buffer, trace_file,
- sys_data->enter_event->event.type, size, trace_ctx);
- if (!event)
+ entry = trace_event_buffer_reserve(&fbuffer, trace_file, size);
+ if (!entry)
return;
- entry = ring_buffer_event_data(event);
+ entry = ring_buffer_event_data(fbuffer.event);
entry->nr = syscall_nr;
syscall_get_arguments(current, regs, args);
memcpy(entry->args, args, sizeof(unsigned long) * sys_data->nb_args);
- event_trigger_unlock_commit(trace_file, buffer, event, entry,
- trace_ctx);
+ trace_event_buffer_commit(&fbuffer);
}
static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
@@ -343,9 +337,7 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
struct trace_event_file *trace_file;
struct syscall_trace_exit *entry;
struct syscall_metadata *sys_data;
- struct ring_buffer_event *event;
- struct trace_buffer *buffer;
- unsigned int trace_ctx;
+ struct trace_event_buffer fbuffer;
int syscall_nr;
syscall_nr = trace_get_syscall_nr(current, regs);
@@ -364,20 +356,15 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
if (!sys_data)
return;
- trace_ctx = tracing_gen_ctx();
-
- event = trace_event_buffer_lock_reserve(&buffer, trace_file,
- sys_data->exit_event->event.type, sizeof(*entry),
- trace_ctx);
- if (!event)
+ entry = trace_event_buffer_reserve(&fbuffer, trace_file, sizeof(*entry));
+ if (!entry)
return;
- entry = ring_buffer_event_data(event);
+ entry = ring_buffer_event_data(fbuffer.event);
entry->nr = syscall_nr;
entry->ret = syscall_get_return_value(current, regs);
- event_trigger_unlock_commit(trace_file, buffer, event, entry,
- trace_ctx);
+ trace_event_buffer_commit(&fbuffer);
}
static int reg_event_syscall_enter(struct trace_event_file *file,
diff --git a/kernel/trace/tracing_map.c b/kernel/trace/tracing_map.c
index 9628b5571846..9901708ce6b8 100644
--- a/kernel/trace/tracing_map.c
+++ b/kernel/trace/tracing_map.c
@@ -1045,7 +1045,8 @@ static void sort_secondary(struct tracing_map *map,
/**
* tracing_map_sort_entries - Sort the current set of tracing_map_elts in a map
* @map: The tracing_map
- * @sort_key: The sort key to use for sorting
+ * @sort_keys: The sort key to use for sorting
+ * @n_sort_keys: hitcount, always have at least one
* @sort_entries: outval: pointer to allocated and sorted array of entries
*
* tracing_map_sort_entries() sorts the current set of entries in the
diff --git a/kernel/tsacct.c b/kernel/tsacct.c
index 1d261fbe367b..4252f0645b9e 100644
--- a/kernel/tsacct.c
+++ b/kernel/tsacct.c
@@ -23,15 +23,20 @@ void bacct_add_tsk(struct user_namespace *user_ns,
{
const struct cred *tcred;
u64 utime, stime, utimescaled, stimescaled;
- u64 delta;
+ u64 now_ns, delta;
time64_t btime;
BUILD_BUG_ON(TS_COMM_LEN < TASK_COMM_LEN);
/* calculate task elapsed time in nsec */
- delta = ktime_get_ns() - tsk->start_time;
+ now_ns = ktime_get_ns();
+ /* store whole group time first */
+ delta = now_ns - tsk->group_leader->start_time;
/* Convert to micro seconds */
do_div(delta, NSEC_PER_USEC);
+ stats->ac_tgetime = delta;
+ delta = now_ns - tsk->start_time;
+ do_div(delta, NSEC_PER_USEC);
stats->ac_etime = delta;
/* Convert to seconds for btime (note y2106 limit) */
btime = ktime_get_real_seconds() - div_u64(delta, USEC_PER_SEC);
@@ -51,6 +56,7 @@ void bacct_add_tsk(struct user_namespace *user_ns,
stats->ac_nice = task_nice(tsk);
stats->ac_sched = tsk->policy;
stats->ac_pid = task_pid_nr_ns(tsk, pid_ns);
+ stats->ac_tgid = task_tgid_nr_ns(tsk, pid_ns);
rcu_read_lock();
tcred = __task_cred(tsk);
stats->ac_uid = from_kuid_munged(user_ns, tcred->uid);
diff --git a/kernel/umh.c b/kernel/umh.c
index 36c123360ab8..b989736e8707 100644
--- a/kernel/umh.c
+++ b/kernel/umh.c
@@ -132,7 +132,7 @@ static void call_usermodehelper_exec_sync(struct subprocess_info *sub_info)
/* If SIGCLD is ignored do_wait won't populate the status. */
kernel_sigaction(SIGCHLD, SIG_DFL);
- pid = kernel_thread(call_usermodehelper_exec_async, sub_info, SIGCHLD);
+ pid = user_mode_thread(call_usermodehelper_exec_async, sub_info, SIGCHLD);
if (pid < 0)
sub_info->retval = pid;
else
@@ -171,8 +171,8 @@ static void call_usermodehelper_exec_work(struct work_struct *work)
* want to pollute current->children, and we need a parent
* that always ignores SIGCHLD to ensure auto-reaping.
*/
- pid = kernel_thread(call_usermodehelper_exec_async, sub_info,
- CLONE_PARENT | SIGCHLD);
+ pid = user_mode_thread(call_usermodehelper_exec_async, sub_info,
+ CLONE_PARENT | SIGCHLD);
if (pid < 0) {
sub_info->retval = pid;
umh_complete(sub_info);
diff --git a/kernel/usermode_driver.c b/kernel/usermode_driver.c
index 9dae1f648713..8303f4c7ca71 100644
--- a/kernel/usermode_driver.c
+++ b/kernel/usermode_driver.c
@@ -28,7 +28,7 @@ static struct vfsmount *blob_to_mnt(const void *data, size_t len, const char *na
file = file_open_root_mnt(mnt, name, O_CREAT | O_WRONLY, 0700);
if (IS_ERR(file)) {
- mntput(mnt);
+ kern_unmount(mnt);
return ERR_CAST(file);
}
@@ -38,7 +38,7 @@ static struct vfsmount *blob_to_mnt(const void *data, size_t len, const char *na
if (err >= 0)
err = -ENOMEM;
filp_close(file, NULL);
- mntput(mnt);
+ kern_unmount(mnt);
return ERR_PTR(err);
}
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 40024e03d422..20a7a55e62b6 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -57,7 +57,7 @@ int __read_mostly sysctl_hardlockup_all_cpu_backtrace;
* Should we panic when a soft-lockup or hard-lockup occurs:
*/
unsigned int __read_mostly hardlockup_panic =
- CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE;
+ IS_ENABLED(CONFIG_BOOTPARAM_HARDLOCKUP_PANIC);
/*
* We may not want to enable hard lockup detection by default in all cases,
* for example when running the kernel as a guest on a hypervisor. In these
@@ -168,7 +168,7 @@ static struct cpumask watchdog_allowed_mask __read_mostly;
/* Global variables, exported for sysctl */
unsigned int __read_mostly softlockup_panic =
- CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
+ IS_ENABLED(CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC);
static bool softlockup_initialized __read_mostly;
static u64 __read_mostly sample_period;
diff --git a/lib/.gitignore b/lib/.gitignore
index e5e217b8307b..54596b634ecb 100644
--- a/lib/.gitignore
+++ b/lib/.gitignore
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
/crc32table.h
/crc64table.h
+/default.bconf
/gen_crc32table
/gen_crc64table
/oid_registry_data.c
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index b8cc65d22169..2e24db4bff19 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -699,41 +699,6 @@ config DEBUG_OBJECTS_ENABLE_DEFAULT
help
Debug objects boot parameter default value
-config DEBUG_SLAB
- bool "Debug slab memory allocations"
- depends on DEBUG_KERNEL && SLAB
- help
- Say Y here to have the kernel do limited verification on memory
- allocation as well as poisoning memory on free to catch use of freed
- memory. This can make kmalloc/kfree-intensive workloads much slower.
-
-config SLUB_DEBUG_ON
- bool "SLUB debugging on by default"
- depends on SLUB && SLUB_DEBUG
- select STACKDEPOT_ALWAYS_INIT if STACKTRACE_SUPPORT
- default n
- help
- Boot with debugging on by default. SLUB boots by default with
- the runtime debug capabilities switched off. Enabling this is
- equivalent to specifying the "slub_debug" parameter on boot.
- There is no support for more fine grained debug control like
- possible with slub_debug=xxx. SLUB debugging may be switched
- off in a kernel built with CONFIG_SLUB_DEBUG_ON by specifying
- "slub_debug=-".
-
-config SLUB_STATS
- default n
- bool "Enable SLUB performance statistics"
- depends on SLUB && SYSFS
- help
- SLUB statistics are useful to debug SLUBs allocation behavior in
- order find ways to optimize the allocator. This should never be
- enabled for production use since keeping statistics slows down
- the allocator by a few percentage points. The slabinfo command
- supports the determination of the most active slabs to figure
- out which slabs are relevant to a particular load.
- Try running: slabinfo -DA
-
config HAVE_DEBUG_KMEMLEAK
bool
@@ -1073,13 +1038,6 @@ config BOOTPARAM_SOFTLOCKUP_PANIC
Say N if unsure.
-config BOOTPARAM_SOFTLOCKUP_PANIC_VALUE
- int
- depends on SOFTLOCKUP_DETECTOR
- range 0 1
- default 0 if !BOOTPARAM_SOFTLOCKUP_PANIC
- default 1 if BOOTPARAM_SOFTLOCKUP_PANIC
-
config HARDLOCKUP_DETECTOR_PERF
bool
select SOFTLOCKUP_DETECTOR
@@ -1121,13 +1079,6 @@ config BOOTPARAM_HARDLOCKUP_PANIC
Say N if unsure.
-config BOOTPARAM_HARDLOCKUP_PANIC_VALUE
- int
- depends on HARDLOCKUP_DETECTOR
- range 0 1
- default 0 if !BOOTPARAM_HARDLOCKUP_PANIC
- default 1 if BOOTPARAM_HARDLOCKUP_PANIC
-
config DETECT_HUNG_TASK
bool "Detect Hung Tasks"
depends on DEBUG_KERNEL
@@ -1175,13 +1126,6 @@ config BOOTPARAM_HUNG_TASK_PANIC
Say N if unsure.
-config BOOTPARAM_HUNG_TASK_PANIC_VALUE
- int
- depends on DETECT_HUNG_TASK
- range 0 1
- default 0 if !BOOTPARAM_HUNG_TASK_PANIC
- default 1 if BOOTPARAM_HUNG_TASK_PANIC
-
config WQ_WATCHDOG
bool "Detect Workqueue Stalls"
depends on DEBUG_KERNEL
@@ -1546,29 +1490,6 @@ config CSD_LOCK_WAIT_DEBUG
include the IPI handler function currently executing (if any)
and relevant stack traces.
-choice
- prompt "Lock debugging: prove subsystem device_lock() correctness"
- depends on PROVE_LOCKING
- help
- For subsystems that have instrumented their usage of the device_lock()
- with nested annotations, enable lock dependency checking. The locking
- hierarchy 'subclass' identifiers are not compatible across
- sub-systems, so only one can be enabled at a time.
-
-config PROVE_NVDIMM_LOCKING
- bool "NVDIMM"
- depends on LIBNVDIMM
- help
- Enable lockdep to validate nd_device_lock() usage.
-
-config PROVE_CXL_LOCKING
- bool "CXL"
- depends on CXL_BUS
- help
- Enable lockdep to validate cxl_device_lock() usage.
-
-endchoice
-
endmenu # lock debugging
config TRACE_IRQFLAGS
diff --git a/lib/Makefile b/lib/Makefile
index 89fcae891361..ea54294d73bf 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -281,7 +281,15 @@ $(foreach file, $(libfdt_files), \
$(eval CFLAGS_$(file) = -I $(srctree)/scripts/dtc/libfdt))
lib-$(CONFIG_LIBFDT) += $(libfdt_files)
-lib-$(CONFIG_BOOT_CONFIG) += bootconfig.o
+obj-$(CONFIG_BOOT_CONFIG) += bootconfig.o
+obj-$(CONFIG_BOOT_CONFIG_EMBED) += bootconfig-data.o
+
+$(obj)/bootconfig-data.o: $(obj)/default.bconf
+
+targets += default.bconf
+filechk_defbconf = cat $(or $(real-prereqs), /dev/null)
+$(obj)/default.bconf: $(CONFIG_BOOT_CONFIG_EMBED_FILE) FORCE
+ $(call filechk,defbconf)
obj-$(CONFIG_RBTREE_TEST) += rbtree_test.o
obj-$(CONFIG_INTERVAL_TREE_TEST) += interval_tree_test.o
diff --git a/lib/assoc_array.c b/lib/assoc_array.c
index 079c72e26493..ca0b4f360c1a 100644
--- a/lib/assoc_array.c
+++ b/lib/assoc_array.c
@@ -1461,6 +1461,7 @@ int assoc_array_gc(struct assoc_array *array,
struct assoc_array_ptr *cursor, *ptr;
struct assoc_array_ptr *new_root, *new_parent, **new_ptr_pp;
unsigned long nr_leaves_on_tree;
+ bool retained;
int keylen, slot, nr_free, next_slot, i;
pr_devel("-->%s()\n", __func__);
@@ -1536,6 +1537,7 @@ continue_node:
goto descend;
}
+retry_compress:
pr_devel("-- compress node %p --\n", new_n);
/* Count up the number of empty slots in this node and work out the
@@ -1553,6 +1555,7 @@ continue_node:
pr_devel("free=%d, leaves=%lu\n", nr_free, new_n->nr_leaves_on_branch);
/* See what we can fold in */
+ retained = false;
next_slot = 0;
for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
struct assoc_array_shortcut *s;
@@ -1602,9 +1605,14 @@ continue_node:
pr_devel("[%d] retain node %lu/%d [nx %d]\n",
slot, child->nr_leaves_on_branch, nr_free + 1,
next_slot);
+ retained = true;
}
}
+ if (retained && new_n->nr_leaves_on_branch <= ASSOC_ARRAY_FAN_OUT) {
+ pr_devel("internal nodes remain despite enough space, retrying\n");
+ goto retry_compress;
+ }
pr_devel("after: %lu\n", new_n->nr_leaves_on_branch);
nr_leaves_on_tree = new_n->nr_leaves_on_branch;
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 0d5c2ece0bcb..b18e31ea6e66 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -45,19 +45,19 @@
* for the best explanations of this ordering.
*/
-int __bitmap_equal(const unsigned long *bitmap1,
- const unsigned long *bitmap2, unsigned int bits)
+bool __bitmap_equal(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int bits)
{
unsigned int k, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; ++k)
if (bitmap1[k] != bitmap2[k])
- return 0;
+ return false;
if (bits % BITS_PER_LONG)
if ((bitmap1[k] ^ bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits))
- return 0;
+ return false;
- return 1;
+ return true;
}
EXPORT_SYMBOL(__bitmap_equal);
@@ -303,33 +303,33 @@ void __bitmap_replace(unsigned long *dst,
}
EXPORT_SYMBOL(__bitmap_replace);
-int __bitmap_intersects(const unsigned long *bitmap1,
- const unsigned long *bitmap2, unsigned int bits)
+bool __bitmap_intersects(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int bits)
{
unsigned int k, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; ++k)
if (bitmap1[k] & bitmap2[k])
- return 1;
+ return true;
if (bits % BITS_PER_LONG)
if ((bitmap1[k] & bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits))
- return 1;
- return 0;
+ return true;
+ return false;
}
EXPORT_SYMBOL(__bitmap_intersects);
-int __bitmap_subset(const unsigned long *bitmap1,
- const unsigned long *bitmap2, unsigned int bits)
+bool __bitmap_subset(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int bits)
{
unsigned int k, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; ++k)
if (bitmap1[k] & ~bitmap2[k])
- return 0;
+ return false;
if (bits % BITS_PER_LONG)
if ((bitmap1[k] & ~bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits))
- return 0;
- return 1;
+ return false;
+ return true;
}
EXPORT_SYMBOL(__bitmap_subset);
@@ -527,33 +527,39 @@ static int bitmap_print_to_buf(bool list, char *buf, const unsigned long *maskp,
* cpumap_print_to_pagebuf() or directly by drivers to export hexadecimal
* bitmask and decimal list to userspace by sysfs ABI.
* Drivers might be using a normal attribute for this kind of ABIs. A
- * normal attribute typically has show entry as below:
- * static ssize_t example_attribute_show(struct device *dev,
+ * normal attribute typically has show entry as below::
+ *
+ * static ssize_t example_attribute_show(struct device *dev,
* struct device_attribute *attr, char *buf)
- * {
+ * {
* ...
* return bitmap_print_to_pagebuf(true, buf, &mask, nr_trig_max);
- * }
+ * }
+ *
* show entry of attribute has no offset and count parameters and this
* means the file is limited to one page only.
* bitmap_print_to_pagebuf() API works terribly well for this kind of
- * normal attribute with buf parameter and without offset, count:
- * bitmap_print_to_pagebuf(bool list, char *buf, const unsigned long *maskp,
+ * normal attribute with buf parameter and without offset, count::
+ *
+ * bitmap_print_to_pagebuf(bool list, char *buf, const unsigned long *maskp,
* int nmaskbits)
- * {
- * }
+ * {
+ * }
+ *
* The problem is once we have a large bitmap, we have a chance to get a
* bitmask or list more than one page. Especially for list, it could be
* as complex as 0,3,5,7,9,... We have no simple way to know it exact size.
* It turns out bin_attribute is a way to break this limit. bin_attribute
- * has show entry as below:
- * static ssize_t
- * example_bin_attribute_show(struct file *filp, struct kobject *kobj,
+ * has show entry as below::
+ *
+ * static ssize_t
+ * example_bin_attribute_show(struct file *filp, struct kobject *kobj,
* struct bin_attribute *attr, char *buf,
* loff_t offset, size_t count)
- * {
+ * {
* ...
- * }
+ * }
+ *
* With the new offset and count parameters, this makes sysfs ABI be able
* to support file size more than one page. For example, offset could be
* >= 4096.
@@ -577,6 +583,7 @@ static int bitmap_print_to_buf(bool list, char *buf, const unsigned long *maskp,
* This function is not a replacement for sprintf() or bitmap_print_to_pagebuf().
* It is intended to workaround sysfs limitations discussed above and should be
* used carefully in general case for the following reasons:
+ *
* - Time complexity is O(nbits^2/count), comparing to O(nbits) for snprintf().
* - Memory complexity is O(nbits), comparing to O(1) for snprintf().
* - @off and @count are NOT offset and number of bits to print.
@@ -1505,5 +1512,59 @@ void bitmap_to_arr32(u32 *buf, const unsigned long *bitmap, unsigned int nbits)
buf[halfwords - 1] &= (u32) (UINT_MAX >> ((-nbits) & 31));
}
EXPORT_SYMBOL(bitmap_to_arr32);
+#endif
+
+#if (BITS_PER_LONG == 32) && defined(__BIG_ENDIAN)
+/**
+ * bitmap_from_arr64 - copy the contents of u64 array of bits to bitmap
+ * @bitmap: array of unsigned longs, the destination bitmap
+ * @buf: array of u64 (in host byte order), the source bitmap
+ * @nbits: number of bits in @bitmap
+ */
+void bitmap_from_arr64(unsigned long *bitmap, const u64 *buf, unsigned int nbits)
+{
+ int n;
+
+ for (n = nbits; n > 0; n -= 64) {
+ u64 val = *buf++;
+ *bitmap++ = val;
+ if (n > 32)
+ *bitmap++ = val >> 32;
+ }
+
+ /*
+ * Clear tail bits in the last word beyond nbits.
+ *
+ * Negative index is OK because here we point to the word next
+ * to the last word of the bitmap, except for nbits == 0, which
+ * is tested implicitly.
+ */
+ if (nbits % BITS_PER_LONG)
+ bitmap[-1] &= BITMAP_LAST_WORD_MASK(nbits);
+}
+EXPORT_SYMBOL(bitmap_from_arr64);
+
+/**
+ * bitmap_to_arr64 - copy the contents of bitmap to a u64 array of bits
+ * @buf: array of u64 (in host byte order), the dest bitmap
+ * @bitmap: array of unsigned longs, the source bitmap
+ * @nbits: number of bits in @bitmap
+ */
+void bitmap_to_arr64(u64 *buf, const unsigned long *bitmap, unsigned int nbits)
+{
+ const unsigned long *end = bitmap + BITS_TO_LONGS(nbits);
+
+ while (bitmap < end) {
+ *buf = *bitmap++;
+ if (bitmap < end)
+ *buf |= (u64)(*bitmap++) << 32;
+ buf++;
+ }
+
+ /* Clear tail bits in the last element of array beyond nbits. */
+ if (nbits % 64)
+ buf[-1] &= GENMASK_ULL(nbits % 64, 0);
+}
+EXPORT_SYMBOL(bitmap_to_arr64);
#endif
diff --git a/lib/bootconfig-data.S b/lib/bootconfig-data.S
new file mode 100644
index 000000000000..ef85ba1a82f4
--- /dev/null
+++ b/lib/bootconfig-data.S
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Embed default bootconfig in the kernel.
+ */
+ .section .init.rodata, "aw"
+ .global embedded_bootconfig_data
+embedded_bootconfig_data:
+ .incbin "lib/default.bconf"
+ .global embedded_bootconfig_data_end
+embedded_bootconfig_data_end:
diff --git a/lib/bootconfig.c b/lib/bootconfig.c
index 74f3201ab8e5..c59d26068a64 100644
--- a/lib/bootconfig.c
+++ b/lib/bootconfig.c
@@ -12,6 +12,19 @@
#include <linux/kernel.h>
#include <linux/memblock.h>
#include <linux/string.h>
+
+#ifdef CONFIG_BOOT_CONFIG_EMBED
+/* embedded_bootconfig_data is defined in bootconfig-data.S */
+extern __visible const char embedded_bootconfig_data[];
+extern __visible const char embedded_bootconfig_data_end[];
+
+const char * __init xbc_get_embedded_bootconfig(size_t *size)
+{
+ *size = embedded_bootconfig_data_end - embedded_bootconfig_data;
+ return (*size) ? embedded_bootconfig_data : NULL;
+}
+#endif
+
#else /* !__KERNEL__ */
/*
* NOTE: This is only for tools/bootconfig, because tools/bootconfig will
diff --git a/lib/crypto/Kconfig b/lib/crypto/Kconfig
index 379a66d7f504..9856e291f414 100644
--- a/lib/crypto/Kconfig
+++ b/lib/crypto/Kconfig
@@ -123,10 +123,4 @@ config CRYPTO_LIB_CHACHA20POLY1305
config CRYPTO_LIB_SHA256
tristate
-config CRYPTO_LIB_SM3
- tristate
-
-config CRYPTO_LIB_SM4
- tristate
-
endmenu
diff --git a/lib/crypto/Makefile b/lib/crypto/Makefile
index 6c872d05d1e6..26be2bbe09c5 100644
--- a/lib/crypto/Makefile
+++ b/lib/crypto/Makefile
@@ -37,12 +37,6 @@ libpoly1305-y += poly1305.o
obj-$(CONFIG_CRYPTO_LIB_SHA256) += libsha256.o
libsha256-y := sha256.o
-obj-$(CONFIG_CRYPTO_LIB_SM3) += libsm3.o
-libsm3-y := sm3.o
-
-obj-$(CONFIG_CRYPTO_LIB_SM4) += libsm4.o
-libsm4-y := sm4.o
-
ifneq ($(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS),y)
libblake2s-y += blake2s-selftest.o
libchacha20poly1305-y += chacha20poly1305-selftest.o
diff --git a/lib/glob.c b/lib/glob.c
index 85ecbda45cd8..15b73f490720 100644
--- a/lib/glob.c
+++ b/lib/glob.c
@@ -45,7 +45,7 @@ bool __pure glob_match(char const *pat, char const *str)
* (no exception for /), it can be easily proved that there's
* never a need to backtrack multiple levels.
*/
- char const *back_pat = NULL, *back_str = back_str;
+ char const *back_pat = NULL, *back_str;
/*
* Loop over each token (character or class) in pat, matching
diff --git a/lib/nodemask.c b/lib/nodemask.c
index 3aa454c54c0d..e22647f5181b 100644
--- a/lib/nodemask.c
+++ b/lib/nodemask.c
@@ -3,9 +3,9 @@
#include <linux/module.h>
#include <linux/random.h>
-int __next_node_in(int node, const nodemask_t *srcp)
+unsigned int __next_node_in(int node, const nodemask_t *srcp)
{
- int ret = __next_node(node, srcp);
+ unsigned int ret = __next_node(node, srcp);
if (ret == MAX_NUMNODES)
ret = __first_node(srcp);
diff --git a/lib/siphash.c b/lib/siphash.c
index 71d315a6ad62..15bc5b6f368c 100644
--- a/lib/siphash.c
+++ b/lib/siphash.c
@@ -1,6 +1,5 @@
-/* Copyright (C) 2016 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
- *
- * This file is provided under a dual BSD/GPLv2 license.
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/* Copyright (C) 2016-2022 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
*
* SipHash: a fast short-input PRF
* https://131002.net/siphash/
diff --git a/lib/string.c b/lib/string.c
index 485777c9da83..6f334420f687 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -517,21 +517,13 @@ EXPORT_SYMBOL(strnlen);
size_t strspn(const char *s, const char *accept)
{
const char *p;
- const char *a;
- size_t count = 0;
for (p = s; *p != '\0'; ++p) {
- for (a = accept; *a != '\0'; ++a) {
- if (*p == *a)
- break;
- }
- if (*a == '\0')
- return count;
- ++count;
+ if (!strchr(accept, *p))
+ break;
}
- return count;
+ return p - s;
}
-
EXPORT_SYMBOL(strspn);
#endif
@@ -544,17 +536,12 @@ EXPORT_SYMBOL(strspn);
size_t strcspn(const char *s, const char *reject)
{
const char *p;
- const char *r;
- size_t count = 0;
for (p = s; *p != '\0'; ++p) {
- for (r = reject; *r != '\0'; ++r) {
- if (*p == *r)
- return count;
- }
- ++count;
+ if (strchr(reject, *p))
+ break;
}
- return count;
+ return p - s;
}
EXPORT_SYMBOL(strcspn);
#endif
diff --git a/lib/string_helpers.c b/lib/string_helpers.c
index 4f877e9551d5..5ed3beb066e6 100644
--- a/lib/string_helpers.c
+++ b/lib/string_helpers.c
@@ -757,6 +757,9 @@ char **devm_kasprintf_strarray(struct device *dev, const char *prefix, size_t n)
return ERR_PTR(-ENOMEM);
}
+ ptr->n = n;
+ devres_add(dev, ptr);
+
return ptr->array;
}
EXPORT_SYMBOL_GPL(devm_kasprintf_strarray);
diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c
index 0c82f07f74fc..d5923a640457 100644
--- a/lib/test_bitmap.c
+++ b/lib/test_bitmap.c
@@ -585,6 +585,30 @@ static void __init test_bitmap_arr32(void)
}
}
+static void __init test_bitmap_arr64(void)
+{
+ unsigned int nbits, next_bit;
+ u64 arr[EXP1_IN_BITS / 64];
+ DECLARE_BITMAP(bmap2, EXP1_IN_BITS);
+
+ memset(arr, 0xa5, sizeof(arr));
+
+ for (nbits = 0; nbits < EXP1_IN_BITS; ++nbits) {
+ memset(bmap2, 0xff, sizeof(arr));
+ bitmap_to_arr64(arr, exp1, nbits);
+ bitmap_from_arr64(bmap2, arr, nbits);
+ expect_eq_bitmap(bmap2, exp1, nbits);
+
+ next_bit = find_next_bit(bmap2, round_up(nbits, BITS_PER_LONG), nbits);
+ if (next_bit < round_up(nbits, BITS_PER_LONG))
+ pr_err("bitmap_copy_arr64(nbits == %d:"
+ " tail is not safely cleared: %d\n", nbits, next_bit);
+
+ if (nbits < EXP1_IN_BITS - 64)
+ expect_eq_uint(arr[DIV_ROUND_UP(nbits, 64)], 0xa5a5a5a5);
+ }
+}
+
static void noinline __init test_mem_optimisations(void)
{
DECLARE_BITMAP(bmap1, 1024);
@@ -852,6 +876,7 @@ static void __init selftest(void)
test_copy();
test_replace();
test_bitmap_arr32();
+ test_bitmap_arr64();
test_bitmap_parse();
test_bitmap_parselist();
test_bitmap_printlist();
diff --git a/lib/test_firmware.c b/lib/test_firmware.c
index 1bccd6cd5f48..c82b65947ce6 100644
--- a/lib/test_firmware.c
+++ b/lib/test_firmware.c
@@ -31,9 +31,12 @@ MODULE_IMPORT_NS(TEST_FIRMWARE);
#define TEST_FIRMWARE_NAME "test-firmware.bin"
#define TEST_FIRMWARE_NUM_REQS 4
#define TEST_FIRMWARE_BUF_SIZE SZ_1K
+#define TEST_UPLOAD_MAX_SIZE SZ_2K
+#define TEST_UPLOAD_BLK_SIZE 37 /* Avoid powers of two in testing */
static DEFINE_MUTEX(test_fw_mutex);
static const struct firmware *test_firmware;
+static LIST_HEAD(test_upload_list);
struct test_batched_req {
u8 idx;
@@ -63,6 +66,7 @@ struct test_batched_req {
* @reqs: stores all requests information
* @read_fw_idx: index of thread from which we want to read firmware results
* from through the read_fw trigger.
+ * @upload_name: firmware name to be used with upload_read sysfs node
* @test_result: a test may use this to collect the result from the call
* of the request_firmware*() calls used in their tests. In order of
* priority we always keep first any setup error. If no setup errors were
@@ -101,6 +105,7 @@ struct test_config {
bool send_uevent;
u8 num_requests;
u8 read_fw_idx;
+ char *upload_name;
/*
* These below don't belong her but we'll move them once we create
@@ -112,8 +117,34 @@ struct test_config {
struct device *device);
};
+struct upload_inject_err {
+ const char *prog;
+ enum fw_upload_err err_code;
+};
+
+struct test_firmware_upload {
+ char *name;
+ struct list_head node;
+ char *buf;
+ size_t size;
+ bool cancel_request;
+ struct upload_inject_err inject;
+ struct fw_upload *fwl;
+};
+
static struct test_config *test_fw_config;
+static struct test_firmware_upload *upload_lookup_name(const char *name)
+{
+ struct test_firmware_upload *tst;
+
+ list_for_each_entry(tst, &test_upload_list, node)
+ if (strncmp(name, tst->name, strlen(tst->name)) == 0)
+ return tst;
+
+ return NULL;
+}
+
static ssize_t test_fw_misc_read(struct file *f, char __user *buf,
size_t size, loff_t *offset)
{
@@ -198,6 +229,7 @@ static int __test_firmware_config_init(void)
test_fw_config->req_firmware = request_firmware;
test_fw_config->test_result = 0;
test_fw_config->reqs = NULL;
+ test_fw_config->upload_name = NULL;
return 0;
@@ -277,6 +309,13 @@ static ssize_t config_show(struct device *dev,
test_fw_config->sync_direct ? "true" : "false");
len += scnprintf(buf + len, PAGE_SIZE - len,
"read_fw_idx:\t%u\n", test_fw_config->read_fw_idx);
+ if (test_fw_config->upload_name)
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "upload_name:\t%s\n",
+ test_fw_config->upload_name);
+ else
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "upload_name:\tEMTPY\n");
mutex_unlock(&test_fw_mutex);
@@ -392,6 +431,32 @@ static ssize_t config_name_show(struct device *dev,
}
static DEVICE_ATTR_RW(config_name);
+static ssize_t config_upload_name_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct test_firmware_upload *tst;
+ int ret = count;
+
+ mutex_lock(&test_fw_mutex);
+ tst = upload_lookup_name(buf);
+ if (tst)
+ test_fw_config->upload_name = tst->name;
+ else
+ ret = -EINVAL;
+ mutex_unlock(&test_fw_mutex);
+
+ return ret;
+}
+
+static ssize_t config_upload_name_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return config_test_show_str(buf, test_fw_config->upload_name);
+}
+static DEVICE_ATTR_RW(config_upload_name);
+
static ssize_t config_num_requests_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
@@ -989,6 +1054,278 @@ out:
}
static DEVICE_ATTR_WO(trigger_batched_requests_async);
+static void upload_release(struct test_firmware_upload *tst)
+{
+ firmware_upload_unregister(tst->fwl);
+ kfree(tst->buf);
+ kfree(tst->name);
+ kfree(tst);
+}
+
+static void upload_release_all(void)
+{
+ struct test_firmware_upload *tst, *tmp;
+
+ list_for_each_entry_safe(tst, tmp, &test_upload_list, node) {
+ list_del(&tst->node);
+ upload_release(tst);
+ }
+ test_fw_config->upload_name = NULL;
+}
+
+/*
+ * This table is replicated from .../firmware_loader/sysfs_upload.c
+ * and needs to be kept in sync.
+ */
+static const char * const fw_upload_err_str[] = {
+ [FW_UPLOAD_ERR_NONE] = "none",
+ [FW_UPLOAD_ERR_HW_ERROR] = "hw-error",
+ [FW_UPLOAD_ERR_TIMEOUT] = "timeout",
+ [FW_UPLOAD_ERR_CANCELED] = "user-abort",
+ [FW_UPLOAD_ERR_BUSY] = "device-busy",
+ [FW_UPLOAD_ERR_INVALID_SIZE] = "invalid-file-size",
+ [FW_UPLOAD_ERR_RW_ERROR] = "read-write-error",
+ [FW_UPLOAD_ERR_WEAROUT] = "flash-wearout",
+};
+
+static void upload_err_inject_error(struct test_firmware_upload *tst,
+ const u8 *p, const char *prog)
+{
+ enum fw_upload_err err;
+
+ for (err = FW_UPLOAD_ERR_NONE + 1; err < FW_UPLOAD_ERR_MAX; err++) {
+ if (strncmp(p, fw_upload_err_str[err],
+ strlen(fw_upload_err_str[err])) == 0) {
+ tst->inject.prog = prog;
+ tst->inject.err_code = err;
+ return;
+ }
+ }
+}
+
+static void upload_err_inject_prog(struct test_firmware_upload *tst,
+ const u8 *p)
+{
+ static const char * const progs[] = {
+ "preparing:", "transferring:", "programming:"
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(progs); i++) {
+ if (strncmp(p, progs[i], strlen(progs[i])) == 0) {
+ upload_err_inject_error(tst, p + strlen(progs[i]),
+ progs[i]);
+ return;
+ }
+ }
+}
+
+#define FIVE_MINUTES_MS (5 * 60 * 1000)
+static enum fw_upload_err
+fw_upload_wait_on_cancel(struct test_firmware_upload *tst)
+{
+ int ms_delay;
+
+ for (ms_delay = 0; ms_delay < FIVE_MINUTES_MS; ms_delay += 100) {
+ msleep(100);
+ if (tst->cancel_request)
+ return FW_UPLOAD_ERR_CANCELED;
+ }
+ return FW_UPLOAD_ERR_NONE;
+}
+
+static enum fw_upload_err test_fw_upload_prepare(struct fw_upload *fwl,
+ const u8 *data, u32 size)
+{
+ struct test_firmware_upload *tst = fwl->dd_handle;
+ enum fw_upload_err ret = FW_UPLOAD_ERR_NONE;
+ const char *progress = "preparing:";
+
+ tst->cancel_request = false;
+
+ if (!size || size > TEST_UPLOAD_MAX_SIZE) {
+ ret = FW_UPLOAD_ERR_INVALID_SIZE;
+ goto err_out;
+ }
+
+ if (strncmp(data, "inject:", strlen("inject:")) == 0)
+ upload_err_inject_prog(tst, data + strlen("inject:"));
+
+ memset(tst->buf, 0, TEST_UPLOAD_MAX_SIZE);
+ tst->size = size;
+
+ if (tst->inject.err_code == FW_UPLOAD_ERR_NONE ||
+ strncmp(tst->inject.prog, progress, strlen(progress)) != 0)
+ return FW_UPLOAD_ERR_NONE;
+
+ if (tst->inject.err_code == FW_UPLOAD_ERR_CANCELED)
+ ret = fw_upload_wait_on_cancel(tst);
+ else
+ ret = tst->inject.err_code;
+
+err_out:
+ /*
+ * The cleanup op only executes if the prepare op succeeds.
+ * If the prepare op fails, it must do it's own clean-up.
+ */
+ tst->inject.err_code = FW_UPLOAD_ERR_NONE;
+ tst->inject.prog = NULL;
+
+ return ret;
+}
+
+static enum fw_upload_err test_fw_upload_write(struct fw_upload *fwl,
+ const u8 *data, u32 offset,
+ u32 size, u32 *written)
+{
+ struct test_firmware_upload *tst = fwl->dd_handle;
+ const char *progress = "transferring:";
+ u32 blk_size;
+
+ if (tst->cancel_request)
+ return FW_UPLOAD_ERR_CANCELED;
+
+ blk_size = min_t(u32, TEST_UPLOAD_BLK_SIZE, size);
+ memcpy(tst->buf + offset, data + offset, blk_size);
+
+ *written = blk_size;
+
+ if (tst->inject.err_code == FW_UPLOAD_ERR_NONE ||
+ strncmp(tst->inject.prog, progress, strlen(progress)) != 0)
+ return FW_UPLOAD_ERR_NONE;
+
+ if (tst->inject.err_code == FW_UPLOAD_ERR_CANCELED)
+ return fw_upload_wait_on_cancel(tst);
+
+ return tst->inject.err_code;
+}
+
+static enum fw_upload_err test_fw_upload_complete(struct fw_upload *fwl)
+{
+ struct test_firmware_upload *tst = fwl->dd_handle;
+ const char *progress = "programming:";
+
+ if (tst->cancel_request)
+ return FW_UPLOAD_ERR_CANCELED;
+
+ if (tst->inject.err_code == FW_UPLOAD_ERR_NONE ||
+ strncmp(tst->inject.prog, progress, strlen(progress)) != 0)
+ return FW_UPLOAD_ERR_NONE;
+
+ if (tst->inject.err_code == FW_UPLOAD_ERR_CANCELED)
+ return fw_upload_wait_on_cancel(tst);
+
+ return tst->inject.err_code;
+}
+
+static void test_fw_upload_cancel(struct fw_upload *fwl)
+{
+ struct test_firmware_upload *tst = fwl->dd_handle;
+
+ tst->cancel_request = true;
+}
+
+static void test_fw_cleanup(struct fw_upload *fwl)
+{
+ struct test_firmware_upload *tst = fwl->dd_handle;
+
+ tst->inject.err_code = FW_UPLOAD_ERR_NONE;
+ tst->inject.prog = NULL;
+}
+
+static const struct fw_upload_ops upload_test_ops = {
+ .prepare = test_fw_upload_prepare,
+ .write = test_fw_upload_write,
+ .poll_complete = test_fw_upload_complete,
+ .cancel = test_fw_upload_cancel,
+ .cleanup = test_fw_cleanup
+};
+
+static ssize_t upload_register_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct test_firmware_upload *tst;
+ struct fw_upload *fwl;
+ char *name;
+ int ret;
+
+ name = kstrndup(buf, count, GFP_KERNEL);
+ if (!name)
+ return -ENOMEM;
+
+ mutex_lock(&test_fw_mutex);
+ tst = upload_lookup_name(name);
+ if (tst) {
+ ret = -EEXIST;
+ goto free_name;
+ }
+
+ tst = kzalloc(sizeof(*tst), GFP_KERNEL);
+ if (!tst) {
+ ret = -ENOMEM;
+ goto free_name;
+ }
+
+ tst->name = name;
+ tst->buf = kzalloc(TEST_UPLOAD_MAX_SIZE, GFP_KERNEL);
+ if (!tst->buf) {
+ ret = -ENOMEM;
+ goto free_tst;
+ }
+
+ fwl = firmware_upload_register(THIS_MODULE, dev, tst->name,
+ &upload_test_ops, tst);
+ if (IS_ERR(fwl)) {
+ ret = PTR_ERR(fwl);
+ goto free_buf;
+ }
+
+ tst->fwl = fwl;
+ list_add_tail(&tst->node, &test_upload_list);
+ mutex_unlock(&test_fw_mutex);
+ return count;
+
+free_buf:
+ kfree(tst->buf);
+
+free_tst:
+ kfree(tst);
+
+free_name:
+ mutex_unlock(&test_fw_mutex);
+ kfree(name);
+
+ return ret;
+}
+static DEVICE_ATTR_WO(upload_register);
+
+static ssize_t upload_unregister_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct test_firmware_upload *tst;
+ int ret = count;
+
+ mutex_lock(&test_fw_mutex);
+ tst = upload_lookup_name(buf);
+ if (!tst) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (test_fw_config->upload_name == tst->name)
+ test_fw_config->upload_name = NULL;
+
+ list_del(&tst->node);
+ upload_release(tst);
+
+out:
+ mutex_unlock(&test_fw_mutex);
+ return ret;
+}
+static DEVICE_ATTR_WO(upload_unregister);
+
static ssize_t test_result_show(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -1051,6 +1388,45 @@ out:
}
static DEVICE_ATTR_RO(read_firmware);
+static ssize_t upload_read_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct test_firmware_upload *tst = NULL;
+ struct test_firmware_upload *tst_iter;
+ int ret = -EINVAL;
+
+ if (!test_fw_config->upload_name) {
+ pr_err("Set config_upload_name before using upload_read\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&test_fw_mutex);
+ list_for_each_entry(tst_iter, &test_upload_list, node)
+ if (tst_iter->name == test_fw_config->upload_name) {
+ tst = tst_iter;
+ break;
+ }
+
+ if (!tst) {
+ pr_err("Firmware name not found: %s\n",
+ test_fw_config->upload_name);
+ goto out;
+ }
+
+ if (tst->size > PAGE_SIZE) {
+ pr_err("Testing interface must use PAGE_SIZE firmware for now\n");
+ goto out;
+ }
+
+ memcpy(buf, tst->buf, tst->size);
+ ret = tst->size;
+out:
+ mutex_unlock(&test_fw_mutex);
+ return ret;
+}
+static DEVICE_ATTR_RO(upload_read);
+
#define TEST_FW_DEV_ATTR(name) &dev_attr_##name.attr
static struct attribute *test_dev_attrs[] = {
@@ -1066,6 +1442,7 @@ static struct attribute *test_dev_attrs[] = {
TEST_FW_DEV_ATTR(config_sync_direct),
TEST_FW_DEV_ATTR(config_send_uevent),
TEST_FW_DEV_ATTR(config_read_fw_idx),
+ TEST_FW_DEV_ATTR(config_upload_name),
/* These don't use the config at all - they could be ported! */
TEST_FW_DEV_ATTR(trigger_request),
@@ -1082,6 +1459,9 @@ static struct attribute *test_dev_attrs[] = {
TEST_FW_DEV_ATTR(release_all_firmware),
TEST_FW_DEV_ATTR(test_result),
TEST_FW_DEV_ATTR(read_firmware),
+ TEST_FW_DEV_ATTR(upload_read),
+ TEST_FW_DEV_ATTR(upload_register),
+ TEST_FW_DEV_ATTR(upload_unregister),
NULL,
};
@@ -1128,6 +1508,7 @@ static void __exit test_firmware_exit(void)
mutex_lock(&test_fw_mutex);
release_firmware(test_firmware);
misc_deregister(&test_fw_misc_device);
+ upload_release_all();
__test_firmware_config_free();
kfree(test_fw_config);
mutex_unlock(&test_fw_mutex);
diff --git a/lib/test_meminit.c b/lib/test_meminit.c
index 3ca717f11397..c95db11a6906 100644
--- a/lib/test_meminit.c
+++ b/lib/test_meminit.c
@@ -279,13 +279,18 @@ static int __init do_kmem_cache_rcu_persistent(int size, int *total_failures)
c = kmem_cache_create("test_cache", size, size, SLAB_TYPESAFE_BY_RCU,
NULL);
buf = kmem_cache_alloc(c, GFP_KERNEL);
+ if (!buf)
+ goto out;
saved_ptr = buf;
fill_with_garbage(buf, size);
buf_contents = kmalloc(size, GFP_KERNEL);
- if (!buf_contents)
+ if (!buf_contents) {
+ kmem_cache_free(c, buf);
goto out;
+ }
used_objects = kmalloc_array(maxiter, sizeof(void *), GFP_KERNEL);
if (!used_objects) {
+ kmem_cache_free(c, buf);
kfree(buf_contents);
goto out;
}
@@ -306,11 +311,14 @@ static int __init do_kmem_cache_rcu_persistent(int size, int *total_failures)
}
}
+ for (iter = 0; iter < maxiter; iter++)
+ kmem_cache_free(c, used_objects[iter]);
+
free_out:
- kmem_cache_destroy(c);
kfree(buf_contents);
kfree(used_objects);
out:
+ kmem_cache_destroy(c);
*total_failures += fail;
return 1;
}
diff --git a/lib/test_siphash.c b/lib/test_siphash.c
index a6d854d933bf..a96788d0141d 100644
--- a/lib/test_siphash.c
+++ b/lib/test_siphash.c
@@ -1,8 +1,7 @@
-/* Test cases for siphash.c
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/* Copyright (C) 2016-2022 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
*
- * Copyright (C) 2016 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
- *
- * This file is provided under a dual BSD/GPLv2 license.
+ * Test cases for siphash.c
*
* SipHash: a fast short-input PRF
* https://131002.net/siphash/
diff --git a/lib/test_string.c b/lib/test_string.c
index 9dfd6f52de92..c5cb92fb710e 100644
--- a/lib/test_string.c
+++ b/lib/test_string.c
@@ -179,6 +179,34 @@ static __init int strnchr_selftest(void)
return 0;
}
+static __init int strspn_selftest(void)
+{
+ static const struct strspn_test {
+ const char str[16];
+ const char accept[16];
+ const char reject[16];
+ unsigned a;
+ unsigned r;
+ } tests[] __initconst = {
+ { "foobar", "", "", 0, 6 },
+ { "abba", "abc", "ABBA", 4, 4 },
+ { "abba", "a", "b", 1, 1 },
+ { "", "abc", "abc", 0, 0},
+ };
+ const struct strspn_test *s = tests;
+ size_t i, res;
+
+ for (i = 0; i < ARRAY_SIZE(tests); ++i, ++s) {
+ res = strspn(s->str, s->accept);
+ if (res != s->a)
+ return 0x100 + 2*i;
+ res = strcspn(s->str, s->reject);
+ if (res != s->r)
+ return 0x100 + 2*i + 1;
+ }
+ return 0;
+}
+
static __exit void string_selftest_remove(void)
{
}
@@ -212,6 +240,11 @@ static __init int string_selftest_init(void)
if (subtest)
goto fail;
+ test = 6;
+ subtest = strspn_selftest();
+ if (subtest)
+ goto fail;
+
pr_info("String selftests succeeded\n");
return 0;
fail:
diff --git a/mm/Kconfig b/mm/Kconfig
index 905c205e14f3..169e64192e48 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -270,6 +270,19 @@ config SLAB_FREELIST_HARDENED
sanity-checking than others. This option is most effective with
CONFIG_SLUB.
+config SLUB_STATS
+ default n
+ bool "Enable SLUB performance statistics"
+ depends on SLUB && SYSFS
+ help
+ SLUB statistics are useful to debug SLUBs allocation behavior in
+ order find ways to optimize the allocator. This should never be
+ enabled for production use since keeping statistics slows down
+ the allocator by a few percentage points. The slabinfo command
+ supports the determination of the most active slabs to figure
+ out which slabs are relevant to a particular load.
+ Try running: slabinfo -DA
+
config SLUB_CPU_PARTIAL
default y
depends on SLUB && SMP
@@ -307,6 +320,40 @@ config SHUFFLE_PAGE_ALLOCATOR
Say Y if unsure.
+config COMPAT_BRK
+ bool "Disable heap randomization"
+ default y
+ help
+ Randomizing heap placement makes heap exploits harder, but it
+ also breaks ancient binaries (including anything libc5 based).
+ This option changes the bootup default to heap randomization
+ disabled, and can be overridden at runtime by setting
+ /proc/sys/kernel/randomize_va_space to 2.
+
+ On non-ancient distros (post-2000 ones) N is usually a safe choice.
+
+config MMAP_ALLOW_UNINITIALIZED
+ bool "Allow mmapped anonymous memory to be uninitialized"
+ depends on EXPERT && !MMU
+ default n
+ help
+ Normally, and according to the Linux spec, anonymous memory obtained
+ from mmap() has its contents cleared before it is passed to
+ userspace. Enabling this config option allows you to request that
+ mmap() skip that if it is given an MAP_UNINITIALIZED flag, thus
+ providing a huge performance boost. If this option is not enabled,
+ then the flag will be ignored.
+
+ This is taken advantage of by uClibc's malloc(), and also by
+ ELF-FDPIC binfmt's brk and stack allocator.
+
+ Because of the obvious security issues, this option should only be
+ enabled on embedded devices where you control what is run in
+ userspace. Since that isn't generally a problem on no-MMU systems,
+ it is normally safe to say Y here.
+
+ See Documentation/admin-guide/mm/nommu-mmap.rst for more information.
+
config SELECT_MEMORY_MODEL
def_bool y
depends on ARCH_SELECT_MEMORY_MODEL
@@ -964,6 +1011,15 @@ config ARCH_USES_HIGH_VMA_FLAGS
config ARCH_HAS_PKEYS
bool
+config VM_EVENT_COUNTERS
+ default y
+ bool "Enable VM event counters for /proc/vmstat" if EXPERT
+ help
+ VM event counters are needed for event counts to be shown.
+ This option allows the disabling of the VM event counters
+ on EXPERT systems. /proc/vmstat will only show page counts
+ if VM event counters are disabled.
+
config PERCPU_STATS
bool "Collect percpu memory statistics"
help
diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug
index 5bd5bb097252..ce8dded36de9 100644
--- a/mm/Kconfig.debug
+++ b/mm/Kconfig.debug
@@ -45,6 +45,39 @@ config DEBUG_PAGEALLOC_ENABLE_DEFAULT
Enable debug page memory allocations by default? This value
can be overridden by debug_pagealloc=off|on.
+config DEBUG_SLAB
+ bool "Debug slab memory allocations"
+ depends on DEBUG_KERNEL && SLAB
+ help
+ Say Y here to have the kernel do limited verification on memory
+ allocation as well as poisoning memory on free to catch use of freed
+ memory. This can make kmalloc/kfree-intensive workloads much slower.
+
+config SLUB_DEBUG
+ default y
+ bool "Enable SLUB debugging support" if EXPERT
+ depends on SLUB && SYSFS
+ select STACKDEPOT if STACKTRACE_SUPPORT
+ help
+ SLUB has extensive debug support features. Disabling these can
+ result in significant savings in code size. This also disables
+ SLUB sysfs support. /sys/slab will not exist and there will be
+ no support for cache validation etc.
+
+config SLUB_DEBUG_ON
+ bool "SLUB debugging on by default"
+ depends on SLUB && SLUB_DEBUG
+ select STACKDEPOT_ALWAYS_INIT if STACKTRACE_SUPPORT
+ default n
+ help
+ Boot with debugging on by default. SLUB boots by default with
+ the runtime debug capabilities switched off. Enabling this is
+ equivalent to specifying the "slub_debug" parameter on boot.
+ There is no support for more fine grained debug control like
+ possible with slub_debug=xxx. SLUB debugging may be switched
+ off in a kernel built with CONFIG_SLUB_DEBUG_ON by specifying
+ "slub_debug=-".
+
config PAGE_OWNER
bool "Track page owner"
depends on DEBUG_KERNEL && STACKTRACE_SUPPORT
diff --git a/mm/cma.c b/mm/cma.c
index eaa4b5c920a2..4a978e09547a 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -37,6 +37,7 @@
struct cma cma_areas[MAX_CMA_AREAS];
unsigned cma_area_count;
+static DEFINE_MUTEX(cma_mutex);
phys_addr_t cma_get_base(const struct cma *cma)
{
@@ -468,9 +469,10 @@ struct page *cma_alloc(struct cma *cma, unsigned long count,
spin_unlock_irq(&cma->lock);
pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
+ mutex_lock(&cma_mutex);
ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA,
GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0));
-
+ mutex_unlock(&cma_mutex);
if (ret == 0) {
page = pfn_to_page(pfn);
break;
diff --git a/mm/fadvise.c b/mm/fadvise.c
index 338f16022012..c76ee665355a 100644
--- a/mm/fadvise.c
+++ b/mm/fadvise.c
@@ -215,4 +215,15 @@ SYSCALL_DEFINE4(fadvise64, int, fd, loff_t, offset, size_t, len, int, advice)
}
#endif
+
+#if defined(CONFIG_COMPAT) && defined(__ARCH_WANT_COMPAT_FADVISE64_64)
+
+COMPAT_SYSCALL_DEFINE6(fadvise64_64, int, fd, compat_arg_u64_dual(offset),
+ compat_arg_u64_dual(len), int, advice)
+{
+ return ksys_fadvise64_64(fd, compat_arg_u64_glue(offset),
+ compat_arg_u64_glue(len), advice);
+}
+
+#endif
#endif
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 01f0e2e5ab48..7c468ac1d069 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -6755,7 +6755,14 @@ int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
pud_clear(pud);
put_page(virt_to_page(ptep));
mm_dec_nr_pmds(mm);
- *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
+ /*
+ * This update of passed address optimizes loops sequentially
+ * processing addresses in increments of huge page size (PMD_SIZE
+ * in this case). By clearing the pud, a PUD_SIZE area is unmapped.
+ * Update address to the 'last page' in the cleared area so that
+ * calling loop can move to first page past this area.
+ */
+ *addr |= PUD_SIZE - PMD_SIZE;
return 1;
}
diff --git a/mm/internal.h b/mm/internal.h
index 64e61b032dac..c0f8fbe0445b 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -374,8 +374,8 @@ extern void *memmap_alloc(phys_addr_t size, phys_addr_t align,
phys_addr_t min_addr,
int nid, bool exact_nid);
-void split_free_page(struct page *free_page,
- int order, unsigned long split_pfn_offset);
+int split_free_page(struct page *free_page,
+ unsigned int order, unsigned long split_pfn_offset);
#if defined CONFIG_COMPACTION || defined CONFIG_CMA
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
index 199d77cce21a..b341a191651d 100644
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -347,7 +347,7 @@ static void print_address_description(void *addr, u8 tag)
va->addr, va->addr + va->size, va->caller);
pr_err("\n");
- page = vmalloc_to_page(page);
+ page = vmalloc_to_page(addr);
}
}
diff --git a/mm/madvise.c b/mm/madvise.c
index 4d6592488b51..d7b4f2602949 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -248,10 +248,13 @@ static void force_shm_swapin_readahead(struct vm_area_struct *vma,
if (!xa_is_value(page))
continue;
+ swap = radix_to_swp_entry(page);
+ /* There might be swapin error entries in shmem mapping. */
+ if (non_swap_entry(swap))
+ continue;
xas_pause(&xas);
rcu_read_unlock();
- swap = radix_to_swp_entry(page);
page = read_swap_cache_async(swap, GFP_HIGHUSER_MOVABLE,
NULL, 0, false, &splug);
if (page)
@@ -624,11 +627,14 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
swp_entry_t entry;
entry = pte_to_swp_entry(ptent);
- if (non_swap_entry(entry))
- continue;
- nr_swap--;
- free_swap_and_cache(entry);
- pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
+ if (!non_swap_entry(entry)) {
+ nr_swap--;
+ free_swap_and_cache(entry);
+ pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
+ } else if (is_hwpoison_entry(entry) ||
+ is_swapin_error_entry(entry)) {
+ pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
+ }
continue;
}
diff --git a/mm/memory.c b/mm/memory.c
index 54bcd5327b74..21dadf03f089 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1487,7 +1487,8 @@ again:
/* Only drop the uffd-wp marker if explicitly requested */
if (!zap_drop_file_uffd_wp(details))
continue;
- } else if (is_hwpoison_entry(entry)) {
+ } else if (is_hwpoison_entry(entry) ||
+ is_swapin_error_entry(entry)) {
if (!should_zap_cows(details))
continue;
} else {
@@ -3727,6 +3728,8 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
ret = vmf->page->pgmap->ops->migrate_to_ram(vmf);
} else if (is_hwpoison_entry(entry)) {
ret = VM_FAULT_HWPOISON;
+ } else if (is_swapin_error_entry(entry)) {
+ ret = VM_FAULT_SIGBUS;
} else if (is_pte_marker_entry(entry)) {
ret = handle_pte_marker(vmf);
} else {
diff --git a/mm/mmap.c b/mm/mmap.c
index 2b9305ed0dda..61e6135c54ef 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2140,15 +2140,15 @@ unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info)
*
* This function "knows" that -ENOMEM has the bits set.
*/
-#ifndef HAVE_ARCH_UNMAPPED_AREA
unsigned long
-arch_get_unmapped_area(struct file *filp, unsigned long addr,
- unsigned long len, unsigned long pgoff, unsigned long flags)
+generic_get_unmapped_area(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma, *prev;
struct vm_unmapped_area_info info;
- const unsigned long mmap_end = arch_get_mmap_end(addr);
+ const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags);
if (len > mmap_end - mmap_min_addr)
return -ENOMEM;
@@ -2173,22 +2173,30 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
info.align_offset = 0;
return vm_unmapped_area(&info);
}
+
+#ifndef HAVE_ARCH_UNMAPPED_AREA
+unsigned long
+arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags)
+{
+ return generic_get_unmapped_area(filp, addr, len, pgoff, flags);
+}
#endif
/*
* This mmap-allocator allocates new areas top-down from below the
* stack's low limit (the base):
*/
-#ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
unsigned long
-arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
- unsigned long len, unsigned long pgoff,
- unsigned long flags)
+generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags)
{
struct vm_area_struct *vma, *prev;
struct mm_struct *mm = current->mm;
struct vm_unmapped_area_info info;
- const unsigned long mmap_end = arch_get_mmap_end(addr);
+ const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags);
/* requested length too big for entire address space */
if (len > mmap_end - mmap_min_addr)
@@ -2231,6 +2239,15 @@ arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
return addr;
}
+
+#ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
+unsigned long
+arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags)
+{
+ return generic_get_unmapped_area_topdown(filp, addr, len, pgoff, flags);
+}
#endif
unsigned long
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index bc93a82e51e6..e008a3df0485 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -482,8 +482,12 @@ unsigned long __get_pfnblock_flags_mask(const struct page *page,
bitidx = pfn_to_bitidx(page, pfn);
word_bitidx = bitidx / BITS_PER_LONG;
bitidx &= (BITS_PER_LONG-1);
-
- word = bitmap[word_bitidx];
+ /*
+ * This races, without locks, with set_pfnblock_flags_mask(). Ensure
+ * a consistent read of the memory array, so that results, even though
+ * racy, are not corrupted.
+ */
+ word = READ_ONCE(bitmap[word_bitidx]);
return (word >> bitidx) & mask;
}
@@ -1100,30 +1104,44 @@ done_merging:
* @order: the order of the page
* @split_pfn_offset: split offset within the page
*
+ * Return -ENOENT if the free page is changed, otherwise 0
+ *
* It is used when the free page crosses two pageblocks with different migratetypes
* at split_pfn_offset within the page. The split free page will be put into
* separate migratetype lists afterwards. Otherwise, the function achieves
* nothing.
*/
-void split_free_page(struct page *free_page,
- int order, unsigned long split_pfn_offset)
+int split_free_page(struct page *free_page,
+ unsigned int order, unsigned long split_pfn_offset)
{
struct zone *zone = page_zone(free_page);
unsigned long free_page_pfn = page_to_pfn(free_page);
unsigned long pfn;
unsigned long flags;
int free_page_order;
+ int mt;
+ int ret = 0;
if (split_pfn_offset == 0)
- return;
+ return ret;
spin_lock_irqsave(&zone->lock, flags);
+
+ if (!PageBuddy(free_page) || buddy_order(free_page) != order) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ mt = get_pageblock_migratetype(free_page);
+ if (likely(!is_migrate_isolate(mt)))
+ __mod_zone_freepage_state(zone, -(1UL << order), mt);
+
del_page_from_free_list(free_page, zone, order);
for (pfn = free_page_pfn;
pfn < free_page_pfn + (1UL << order);) {
int mt = get_pfnblock_migratetype(pfn_to_page(pfn), pfn);
- free_page_order = min_t(int,
+ free_page_order = min_t(unsigned int,
pfn ? __ffs(pfn) : order,
__fls(split_pfn_offset));
__free_one_page(pfn_to_page(pfn), pfn, zone, free_page_order,
@@ -1134,7 +1152,9 @@ void split_free_page(struct page *free_page,
if (split_pfn_offset == 0)
split_pfn_offset = (1UL << order) - (pfn - free_page_pfn);
}
+out:
spin_unlock_irqrestore(&zone->lock, flags);
+ return ret;
}
/*
* A bad page could be due to a number of fields. Instead of multiple branches,
@@ -5324,8 +5344,8 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
page = __rmqueue_pcplist(zone, 0, ac.migratetype, alloc_flags,
pcp, pcp_list);
if (unlikely(!page)) {
- /* Try and get at least one page */
- if (!nr_populated)
+ /* Try and allocate at least one page */
+ if (!nr_account)
goto failed_irq;
break;
}
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index c643c8420809..6021f8444b5a 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -300,7 +300,7 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages)
* the in-use page then splitting the free page.
*/
static int isolate_single_pageblock(unsigned long boundary_pfn, int flags,
- gfp_t gfp_flags, bool isolate_before)
+ gfp_t gfp_flags, bool isolate_before, bool skip_isolation)
{
unsigned char saved_mt;
unsigned long start_pfn;
@@ -327,11 +327,16 @@ static int isolate_single_pageblock(unsigned long boundary_pfn, int flags,
zone->zone_start_pfn);
saved_mt = get_pageblock_migratetype(pfn_to_page(isolate_pageblock));
- ret = set_migratetype_isolate(pfn_to_page(isolate_pageblock), saved_mt, flags,
- isolate_pageblock, isolate_pageblock + pageblock_nr_pages);
- if (ret)
- return ret;
+ if (skip_isolation)
+ VM_BUG_ON(!is_migrate_isolate(saved_mt));
+ else {
+ ret = set_migratetype_isolate(pfn_to_page(isolate_pageblock), saved_mt, flags,
+ isolate_pageblock, isolate_pageblock + pageblock_nr_pages);
+
+ if (ret)
+ return ret;
+ }
/*
* Bail out early when the to-be-isolated pageblock does not form
@@ -366,9 +371,13 @@ static int isolate_single_pageblock(unsigned long boundary_pfn, int flags,
if (PageBuddy(page)) {
int order = buddy_order(page);
- if (pfn + (1UL << order) > boundary_pfn)
- split_free_page(page, order, boundary_pfn - pfn);
- pfn += (1UL << order);
+ if (pfn + (1UL << order) > boundary_pfn) {
+ /* free page changed before split, check it again */
+ if (split_free_page(page, order, boundary_pfn - pfn))
+ continue;
+ }
+
+ pfn += 1UL << order;
continue;
}
/*
@@ -463,7 +472,8 @@ static int isolate_single_pageblock(unsigned long boundary_pfn, int flags,
return 0;
failed:
/* restore the original migratetype */
- unset_migratetype_isolate(pfn_to_page(isolate_pageblock), saved_mt);
+ if (!skip_isolation)
+ unset_migratetype_isolate(pfn_to_page(isolate_pageblock), saved_mt);
return -EBUSY;
}
@@ -522,14 +532,18 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
unsigned long isolate_start = ALIGN_DOWN(start_pfn, pageblock_nr_pages);
unsigned long isolate_end = ALIGN(end_pfn, pageblock_nr_pages);
int ret;
+ bool skip_isolation = false;
/* isolate [isolate_start, isolate_start + pageblock_nr_pages) pageblock */
- ret = isolate_single_pageblock(isolate_start, flags, gfp_flags, false);
+ ret = isolate_single_pageblock(isolate_start, flags, gfp_flags, false, skip_isolation);
if (ret)
return ret;
+ if (isolate_start == isolate_end - pageblock_nr_pages)
+ skip_isolation = true;
+
/* isolate [isolate_end - pageblock_nr_pages, isolate_end) pageblock */
- ret = isolate_single_pageblock(isolate_end, flags, gfp_flags, true);
+ ret = isolate_single_pageblock(isolate_end, flags, gfp_flags, true, skip_isolation);
if (ret) {
unset_migratetype_isolate(pfn_to_page(isolate_start), migratetype);
return ret;
diff --git a/mm/page_table_check.c b/mm/page_table_check.c
index 3692bea2ea2c..e2062748791a 100644
--- a/mm/page_table_check.c
+++ b/mm/page_table_check.c
@@ -234,11 +234,11 @@ void __page_table_check_pte_clear_range(struct mm_struct *mm,
pte_t *ptep = pte_offset_map(&pmd, addr);
unsigned long i;
- pte_unmap(ptep);
for (i = 0; i < PTRS_PER_PTE; i++) {
__page_table_check_pte_clear(mm, addr, *ptep);
addr += PAGE_SIZE;
ptep++;
}
+ pte_unmap(ptep - PTRS_PER_PTE);
}
}
diff --git a/mm/readahead.c b/mm/readahead.c
index b78921b54754..415c39d764ea 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -749,6 +749,13 @@ SYSCALL_DEFINE3(readahead, int, fd, loff_t, offset, size_t, count)
return ksys_readahead(fd, offset, count);
}
+#if defined(CONFIG_COMPAT) && defined(__ARCH_WANT_COMPAT_READAHEAD)
+COMPAT_SYSCALL_DEFINE4(readahead, int, fd, compat_arg_u64_dual(offset), size_t, count)
+{
+ return ksys_readahead(fd, compat_arg_u64_glue(offset), count);
+}
+#endif
+
/**
* readahead_expand - Expand a readahead request
* @ractl: The request to be expanded
diff --git a/mm/shmem.c b/mm/shmem.c
index da30c769b376..a6f565308133 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1174,6 +1174,10 @@ static int shmem_find_swap_entries(struct address_space *mapping,
continue;
entry = radix_to_swp_entry(folio);
+ /*
+ * swapin error entries can be found in the mapping. But they're
+ * deliberately ignored here as we've done everything we can do.
+ */
if (swp_type(entry) != type)
continue;
@@ -1671,6 +1675,36 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
return error;
}
+static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index,
+ struct folio *folio, swp_entry_t swap)
+{
+ struct address_space *mapping = inode->i_mapping;
+ struct shmem_inode_info *info = SHMEM_I(inode);
+ swp_entry_t swapin_error;
+ void *old;
+
+ swapin_error = make_swapin_error_entry(&folio->page);
+ old = xa_cmpxchg_irq(&mapping->i_pages, index,
+ swp_to_radix_entry(swap),
+ swp_to_radix_entry(swapin_error), 0);
+ if (old != swp_to_radix_entry(swap))
+ return;
+
+ folio_wait_writeback(folio);
+ delete_from_swap_cache(&folio->page);
+ spin_lock_irq(&info->lock);
+ /*
+ * Don't treat swapin error folio as alloced. Otherwise inode->i_blocks won't
+ * be 0 when inode is released and thus trigger WARN_ON(inode->i_blocks) in
+ * shmem_evict_inode.
+ */
+ info->alloced--;
+ info->swapped--;
+ shmem_recalc_inode(inode);
+ spin_unlock_irq(&info->lock);
+ swap_free(swap);
+}
+
/*
* Swap in the page pointed to by *pagep.
* Caller has to make sure that *pagep contains a valid swapped page.
@@ -1694,6 +1728,9 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
swap = radix_to_swp_entry(*foliop);
*foliop = NULL;
+ if (is_swapin_error_entry(swap))
+ return -EIO;
+
/* Look it up and read it in.. */
page = lookup_swap_cache(swap, NULL, 0);
if (!page) {
@@ -1761,6 +1798,8 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
failed:
if (!shmem_confirm_swap(mapping, index, swap))
error = -EEXIST;
+ if (error == -EIO)
+ shmem_set_folio_swapin_error(inode, index, folio, swap);
unlock:
if (folio) {
folio_unlock(folio);
@@ -1906,7 +1945,7 @@ alloc_nohuge:
spin_lock_irq(&info->lock);
info->alloced += folio_nr_pages(folio);
- inode->i_blocks += BLOCKS_PER_PAGE << folio_order(folio);
+ inode->i_blocks += (blkcnt_t)BLOCKS_PER_PAGE << folio_order(folio);
shmem_recalc_inode(inode);
spin_unlock_irq(&info->lock);
alloced = true;
diff --git a/mm/swap_state.c b/mm/swap_state.c
index b9e4ed2e90bf..778d57d2d92d 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -410,6 +410,9 @@ struct page *find_get_incore_page(struct address_space *mapping, pgoff_t index)
return NULL;
swp = radix_to_swp_entry(page);
+ /* There might be swapin error entries in shmem mapping. */
+ if (non_swap_entry(swp))
+ return NULL;
/* Prevent swapoff from happening to us */
si = get_swap_device(swp);
if (!si)
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 94b4ff43ead0..a2e66d855b19 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1775,7 +1775,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
{
struct page *swapcache;
spinlock_t *ptl;
- pte_t *pte;
+ pte_t *pte, new_pte;
int ret = 1;
swapcache = page;
@@ -1789,6 +1789,17 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
goto out;
}
+ if (unlikely(!PageUptodate(page))) {
+ pte_t pteval;
+
+ dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
+ pteval = swp_entry_to_pte(make_swapin_error_entry(page));
+ set_pte_at(vma->vm_mm, addr, pte, pteval);
+ swap_free(entry);
+ ret = 0;
+ goto out;
+ }
+
/* See do_swap_page() */
BUG_ON(!PageAnon(page) && PageMappedToDisk(page));
BUG_ON(PageAnon(page) && PageAnonExclusive(page));
@@ -1813,8 +1824,12 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
page_add_new_anon_rmap(page, vma, addr);
lru_cache_add_inactive_or_unevictable(page, vma);
}
- set_pte_at(vma->vm_mm, addr, pte,
- pte_mkold(mk_pte(page, vma->vm_page_prot)));
+ new_pte = pte_mkold(mk_pte(page, vma->vm_page_prot));
+ if (pte_swp_soft_dirty(*pte))
+ new_pte = pte_mksoft_dirty(new_pte);
+ if (pte_swp_uffd_wp(*pte))
+ new_pte = pte_mkuffd_wp(new_pte);
+ set_pte_at(vma->vm_mm, addr, pte, new_pte);
swap_free(entry);
out:
pte_unmap_unlock(pte, ptl);
diff --git a/mm/util.c b/mm/util.c
index 29f4f773dc7b..0837570c9225 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -377,7 +377,7 @@ unsigned long randomize_page(unsigned long start, unsigned long range)
}
#ifdef CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
-unsigned long arch_randomize_brk(struct mm_struct *mm)
+unsigned long __weak arch_randomize_brk(struct mm_struct *mm)
{
/* Is the current task 32bit ? */
if (!IS_ENABLED(CONFIG_64BIT) || is_compat_task())
diff --git a/mm/vmstat.c b/mm/vmstat.c
index da525bfb6f4a..373d2730fcf2 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -2049,7 +2049,7 @@ static void __init init_cpu_node_state(void)
int node;
for_each_online_node(node) {
- if (cpumask_weight(cpumask_of_node(node)) > 0)
+ if (!cpumask_empty(cpumask_of_node(node)))
node_set_state(node, N_CPU);
}
}
@@ -2081,7 +2081,7 @@ static int vmstat_cpu_dead(unsigned int cpu)
refresh_zone_stat_thresholds();
node_cpus = cpumask_of_node(node);
- if (cpumask_weight(node_cpus) > 0)
+ if (!cpumask_empty(node_cpus))
return 0;
node_clear_state(node, N_CPU);
diff --git a/mm/z3fold.c b/mm/z3fold.c
index 83b5a3514427..f41f8b0d9e9a 100644
--- a/mm/z3fold.c
+++ b/mm/z3fold.c
@@ -181,6 +181,7 @@ enum z3fold_page_flags {
NEEDS_COMPACTING,
PAGE_STALE,
PAGE_CLAIMED, /* by either reclaim or free */
+ PAGE_MIGRATED, /* page is migrated and soon to be released */
};
/*
@@ -212,10 +213,8 @@ static int size_to_chunks(size_t size)
static inline struct z3fold_buddy_slots *alloc_slots(struct z3fold_pool *pool,
gfp_t gfp)
{
- struct z3fold_buddy_slots *slots;
-
- slots = kmem_cache_zalloc(pool->c_handle,
- (gfp & ~(__GFP_HIGHMEM | __GFP_MOVABLE)));
+ struct z3fold_buddy_slots *slots = kmem_cache_zalloc(pool->c_handle,
+ gfp);
if (slots) {
/* It will be freed separately in free_handle(). */
@@ -272,8 +271,13 @@ static inline struct z3fold_header *get_z3fold_header(unsigned long handle)
zhdr = (struct z3fold_header *)(addr & PAGE_MASK);
locked = z3fold_page_trylock(zhdr);
read_unlock(&slots->lock);
- if (locked)
- break;
+ if (locked) {
+ struct page *page = virt_to_page(zhdr);
+
+ if (!test_bit(PAGE_MIGRATED, &page->private))
+ break;
+ z3fold_page_unlock(zhdr);
+ }
cpu_relax();
} while (true);
} else {
@@ -391,6 +395,7 @@ static struct z3fold_header *init_z3fold_page(struct page *page, bool headless,
clear_bit(NEEDS_COMPACTING, &page->private);
clear_bit(PAGE_STALE, &page->private);
clear_bit(PAGE_CLAIMED, &page->private);
+ clear_bit(PAGE_MIGRATED, &page->private);
if (headless)
return zhdr;
@@ -521,13 +526,6 @@ static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked)
atomic64_dec(&pool->pages_nr);
}
-static void release_z3fold_page(struct kref *ref)
-{
- struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
- refcount);
- __release_z3fold_page(zhdr, false);
-}
-
static void release_z3fold_page_locked(struct kref *ref)
{
struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
@@ -940,10 +938,19 @@ lookup:
}
}
- if (zhdr && !zhdr->slots)
- zhdr->slots = alloc_slots(pool,
- can_sleep ? GFP_NOIO : GFP_ATOMIC);
+ if (zhdr && !zhdr->slots) {
+ zhdr->slots = alloc_slots(pool, GFP_ATOMIC);
+ if (!zhdr->slots)
+ goto out_fail;
+ }
return zhdr;
+
+out_fail:
+ if (!kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
+ add_to_unbuddied(pool, zhdr);
+ z3fold_page_unlock(zhdr);
+ }
+ return NULL;
}
/*
@@ -1066,7 +1073,7 @@ static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
enum buddy bud;
bool can_sleep = gfpflags_allow_blocking(gfp);
- if (!size)
+ if (!size || (gfp & __GFP_HIGHMEM))
return -EINVAL;
if (size > PAGE_SIZE)
@@ -1093,28 +1100,7 @@ retry:
bud = FIRST;
}
- page = NULL;
- if (can_sleep) {
- spin_lock(&pool->stale_lock);
- zhdr = list_first_entry_or_null(&pool->stale,
- struct z3fold_header, buddy);
- /*
- * Before allocating a page, let's see if we can take one from
- * the stale pages list. cancel_work_sync() can sleep so we
- * limit this case to the contexts where we can sleep
- */
- if (zhdr) {
- list_del(&zhdr->buddy);
- spin_unlock(&pool->stale_lock);
- cancel_work_sync(&zhdr->work);
- page = virt_to_page(zhdr);
- } else {
- spin_unlock(&pool->stale_lock);
- }
- }
- if (!page)
- page = alloc_page(gfp);
-
+ page = alloc_page(gfp);
if (!page)
return -ENOMEM;
@@ -1134,10 +1120,9 @@ retry:
__SetPageMovable(page, pool->inode->i_mapping);
unlock_page(page);
} else {
- if (trylock_page(page)) {
- __SetPageMovable(page, pool->inode->i_mapping);
- unlock_page(page);
- }
+ WARN_ON(!trylock_page(page));
+ __SetPageMovable(page, pool->inode->i_mapping);
+ unlock_page(page);
}
z3fold_page_lock(zhdr);
@@ -1236,8 +1221,8 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
return;
}
if (test_and_set_bit(NEEDS_COMPACTING, &page->private)) {
- put_z3fold_header(zhdr);
clear_bit(PAGE_CLAIMED, &page->private);
+ put_z3fold_header(zhdr);
return;
}
if (zhdr->cpu < 0 || !cpu_online(zhdr->cpu)) {
@@ -1332,12 +1317,7 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
break;
}
- if (kref_get_unless_zero(&zhdr->refcount) == 0) {
- zhdr = NULL;
- break;
- }
if (!z3fold_page_trylock(zhdr)) {
- kref_put(&zhdr->refcount, release_z3fold_page);
zhdr = NULL;
continue; /* can't evict at this point */
}
@@ -1348,14 +1328,14 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
*/
if (zhdr->foreign_handles ||
test_and_set_bit(PAGE_CLAIMED, &page->private)) {
- if (!kref_put(&zhdr->refcount,
- release_z3fold_page_locked))
- z3fold_page_unlock(zhdr);
+ z3fold_page_unlock(zhdr);
zhdr = NULL;
continue; /* can't evict such page */
}
list_del_init(&zhdr->buddy);
zhdr->cpu = -1;
+ /* See comment in __z3fold_alloc. */
+ kref_get(&zhdr->refcount);
break;
}
@@ -1437,8 +1417,10 @@ next:
spin_lock(&pool->lock);
list_add(&page->lru, &pool->lru);
spin_unlock(&pool->lock);
- z3fold_page_unlock(zhdr);
+ if (list_empty(&zhdr->buddy))
+ add_to_unbuddied(pool, zhdr);
clear_bit(PAGE_CLAIMED, &page->private);
+ z3fold_page_unlock(zhdr);
}
/* We started off locked to we need to lock the pool back */
@@ -1590,8 +1572,8 @@ static int z3fold_page_migrate(struct address_space *mapping, struct page *newpa
if (!z3fold_page_trylock(zhdr))
return -EAGAIN;
if (zhdr->mapped_count != 0 || zhdr->foreign_handles != 0) {
- z3fold_page_unlock(zhdr);
clear_bit(PAGE_CLAIMED, &page->private);
+ z3fold_page_unlock(zhdr);
return -EBUSY;
}
if (work_pending(&zhdr->work)) {
@@ -1601,7 +1583,7 @@ static int z3fold_page_migrate(struct address_space *mapping, struct page *newpa
new_zhdr = page_address(newpage);
memcpy(new_zhdr, zhdr, PAGE_SIZE);
newpage->private = page->private;
- page->private = 0;
+ set_bit(PAGE_MIGRATED, &page->private);
z3fold_page_unlock(zhdr);
spin_lock_init(&new_zhdr->page_lock);
INIT_WORK(&new_zhdr->work, compact_page_work);
@@ -1631,7 +1613,8 @@ static int z3fold_page_migrate(struct address_space *mapping, struct page *newpa
queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work);
- clear_bit(PAGE_CLAIMED, &page->private);
+ /* PAGE_CLAIMED and PAGE_MIGRATED are cleared now. */
+ page->private = 0;
put_page(page);
return 0;
}
@@ -1653,6 +1636,8 @@ static void z3fold_page_putback(struct page *page)
spin_lock(&pool->lock);
list_add(&page->lru, &pool->lru);
spin_unlock(&pool->lock);
+ if (list_empty(&zhdr->buddy))
+ add_to_unbuddied(pool, zhdr);
clear_bit(PAGE_CLAIMED, &page->private);
z3fold_page_unlock(zhdr);
}
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 9152fbde33b5..5d5fc04385b8 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -1718,11 +1718,40 @@ static enum fullness_group putback_zspage(struct size_class *class,
*/
static void lock_zspage(struct zspage *zspage)
{
- struct page *page = get_first_page(zspage);
+ struct page *curr_page, *page;
- do {
- lock_page(page);
- } while ((page = get_next_page(page)) != NULL);
+ /*
+ * Pages we haven't locked yet can be migrated off the list while we're
+ * trying to lock them, so we need to be careful and only attempt to
+ * lock each page under migrate_read_lock(). Otherwise, the page we lock
+ * may no longer belong to the zspage. This means that we may wait for
+ * the wrong page to unlock, so we must take a reference to the page
+ * prior to waiting for it to unlock outside migrate_read_lock().
+ */
+ while (1) {
+ migrate_read_lock(zspage);
+ page = get_first_page(zspage);
+ if (trylock_page(page))
+ break;
+ get_page(page);
+ migrate_read_unlock(zspage);
+ wait_on_page_locked(page);
+ put_page(page);
+ }
+
+ curr_page = page;
+ while ((page = get_next_page(curr_page))) {
+ if (trylock_page(page)) {
+ curr_page = page;
+ } else {
+ get_page(page);
+ migrate_read_unlock(zspage);
+ wait_on_page_locked(page);
+ put_page(page);
+ migrate_read_lock(zspage);
+ }
+ }
+ migrate_read_unlock(zspage);
}
static int zs_init_fs_context(struct fs_context *fc)
diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c
index 77883b6788cd..833cd3792c51 100644
--- a/net/9p/trans_xen.c
+++ b/net/9p/trans_xen.c
@@ -279,13 +279,13 @@ static void xen_9pfs_front_free(struct xen_9pfs_front_priv *priv)
grant_ref_t ref;
ref = priv->rings[i].intf->ref[j];
- gnttab_end_foreign_access(ref, 0);
+ gnttab_end_foreign_access(ref, NULL);
}
free_pages_exact(priv->rings[i].data.in,
1UL << (priv->rings[i].intf->ring_order +
XEN_PAGE_SHIFT));
}
- gnttab_end_foreign_access(priv->rings[i].ref, 0);
+ gnttab_end_foreign_access(priv->rings[i].ref, NULL);
free_page((unsigned long)priv->rings[i].intf);
}
kfree(priv->rings);
@@ -353,10 +353,10 @@ static int xen_9pfs_front_alloc_dataring(struct xenbus_device *dev,
out:
if (bytes) {
for (i--; i >= 0; i--)
- gnttab_end_foreign_access(ring->intf->ref[i], 0);
+ gnttab_end_foreign_access(ring->intf->ref[i], NULL);
free_pages_exact(bytes, 1UL << (order + XEN_PAGE_SHIFT));
}
- gnttab_end_foreign_access(ring->ref, 0);
+ gnttab_end_foreign_access(ring->ref, NULL);
free_page((unsigned long)ring->intf);
return ret;
}
diff --git a/net/Kconfig.debug b/net/Kconfig.debug
index a5781cf63b16..e6ae11cc2fb7 100644
--- a/net/Kconfig.debug
+++ b/net/Kconfig.debug
@@ -20,7 +20,7 @@ config NET_NS_REFCNT_TRACKER
config DEBUG_NET
bool "Add generic networking debug"
- depends on DEBUG_KERNEL
+ depends on DEBUG_KERNEL && NET
help
Enable extra sanity checks in networking.
This is mostly used by fuzzers, but is safe to select.
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 116481e4da82..95393bb2760b 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -62,12 +62,12 @@ static void ax25_free_sock(struct sock *sk)
*/
static void ax25_cb_del(ax25_cb *ax25)
{
+ spin_lock_bh(&ax25_list_lock);
if (!hlist_unhashed(&ax25->ax25_node)) {
- spin_lock_bh(&ax25_list_lock);
hlist_del_init(&ax25->ax25_node);
- spin_unlock_bh(&ax25_list_lock);
ax25_cb_put(ax25);
}
+ spin_unlock_bh(&ax25_list_lock);
}
/*
@@ -81,6 +81,7 @@ static void ax25_kill_by_device(struct net_device *dev)
if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL)
return;
+ ax25_dev->device_up = false;
spin_lock_bh(&ax25_list_lock);
again:
@@ -91,6 +92,7 @@ again:
spin_unlock_bh(&ax25_list_lock);
ax25_disconnect(s, ENETUNREACH);
s->ax25_dev = NULL;
+ ax25_cb_del(s);
spin_lock_bh(&ax25_list_lock);
goto again;
}
@@ -103,6 +105,7 @@ again:
dev_put_track(ax25_dev->dev, &ax25_dev->dev_tracker);
ax25_dev_put(ax25_dev);
}
+ ax25_cb_del(s);
release_sock(sk);
spin_lock_bh(&ax25_list_lock);
sock_put(sk);
@@ -995,9 +998,11 @@ static int ax25_release(struct socket *sock)
if (sk->sk_type == SOCK_SEQPACKET) {
switch (ax25->state) {
case AX25_STATE_0:
- release_sock(sk);
- ax25_disconnect(ax25, 0);
- lock_sock(sk);
+ if (!sock_flag(ax25->sk, SOCK_DEAD)) {
+ release_sock(sk);
+ ax25_disconnect(ax25, 0);
+ lock_sock(sk);
+ }
ax25_destroy_socket(ax25);
break;
@@ -1053,11 +1058,13 @@ static int ax25_release(struct socket *sock)
ax25_destroy_socket(ax25);
}
if (ax25_dev) {
- del_timer_sync(&ax25->timer);
- del_timer_sync(&ax25->t1timer);
- del_timer_sync(&ax25->t2timer);
- del_timer_sync(&ax25->t3timer);
- del_timer_sync(&ax25->idletimer);
+ if (!ax25_dev->device_up) {
+ del_timer_sync(&ax25->timer);
+ del_timer_sync(&ax25->t1timer);
+ del_timer_sync(&ax25->t2timer);
+ del_timer_sync(&ax25->t3timer);
+ del_timer_sync(&ax25->idletimer);
+ }
dev_put_track(ax25_dev->dev, &ax25_dev->dev_tracker);
ax25_dev_put(ax25_dev);
}
diff --git a/net/ax25/ax25_dev.c b/net/ax25/ax25_dev.c
index b80fccbac62a..95a76d571c44 100644
--- a/net/ax25/ax25_dev.c
+++ b/net/ax25/ax25_dev.c
@@ -62,6 +62,7 @@ void ax25_dev_device_up(struct net_device *dev)
ax25_dev->dev = dev;
dev_hold_track(dev, &ax25_dev->dev_tracker, GFP_ATOMIC);
ax25_dev->forward = NULL;
+ ax25_dev->device_up = true;
ax25_dev->values[AX25_VALUES_IPDEFMODE] = AX25_DEF_IPDEFMODE;
ax25_dev->values[AX25_VALUES_AXDEFMODE] = AX25_DEF_AXDEFMODE;
diff --git a/net/ax25/ax25_subr.c b/net/ax25/ax25_subr.c
index 3a476e4f6cd0..9ff98f46dc6b 100644
--- a/net/ax25/ax25_subr.c
+++ b/net/ax25/ax25_subr.c
@@ -268,7 +268,7 @@ void ax25_disconnect(ax25_cb *ax25, int reason)
del_timer_sync(&ax25->t3timer);
del_timer_sync(&ax25->idletimer);
} else {
- if (!ax25->sk || !sock_flag(ax25->sk, SOCK_DESTROY))
+ if (ax25->sk && !sock_flag(ax25->sk, SOCK_DESTROY))
ax25_stop_heartbeat(ax25);
ax25_stop_t1timer(ax25);
ax25_stop_t2timer(ax25);
diff --git a/net/ceph/crush/mapper.c b/net/ceph/crush/mapper.c
index 7057f8db4f99..1daf95e17d67 100644
--- a/net/ceph/crush/mapper.c
+++ b/net/ceph/crush/mapper.c
@@ -906,7 +906,6 @@ int crush_do_rule(const struct crush_map *map,
int recurse_to_leaf;
int wsize = 0;
int osize;
- int *tmp;
const struct crush_rule *rule;
__u32 step;
int i, j;
@@ -1073,9 +1072,7 @@ int crush_do_rule(const struct crush_map *map,
memcpy(o, c, osize*sizeof(*o));
/* swap o and w arrays */
- tmp = o;
- o = w;
- w = tmp;
+ swap(o, w);
wsize = osize;
break;
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 47b6c1f0fdbb..54625287ee5b 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1579,7 +1579,7 @@ static void neigh_managed_work(struct work_struct *work)
list_for_each_entry(neigh, &tbl->managed_list, managed_list)
neigh_event_send_probe(neigh, NULL, false);
queue_delayed_work(system_power_efficient_wq, &tbl->managed_work,
- NEIGH_VAR(&tbl->parms, DELAY_PROBE_TIME));
+ max(NEIGH_VAR(&tbl->parms, DELAY_PROBE_TIME), HZ));
write_unlock_bh(&tbl->lock);
}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 3231af73e430..2e2a9ece9af2 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2706,12 +2706,15 @@ static void tcp_mtup_probe_success(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
struct inet_connection_sock *icsk = inet_csk(sk);
+ u64 val;
- /* FIXME: breaks with very large cwnd */
tp->prior_ssthresh = tcp_current_ssthresh(sk);
- tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) *
- tcp_mss_to_mtu(sk, tp->mss_cache) /
- icsk->icsk_mtup.probe_size);
+
+ val = (u64)tcp_snd_cwnd(tp) * tcp_mss_to_mtu(sk, tp->mss_cache);
+ do_div(val, icsk->icsk_mtup.probe_size);
+ DEBUG_NET_WARN_ON_ONCE((u32)val != val);
+ tcp_snd_cwnd_set(tp, max_t(u32, 1U, val));
+
tp->snd_cwnd_cnt = 0;
tp->snd_cwnd_stamp = tcp_jiffies32;
tp->snd_ssthresh = tcp_current_ssthresh(sk);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index dac2650f3863..fe8f23b95d32 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1207,8 +1207,8 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
key->l3index = l3index;
key->flags = flags;
memcpy(&key->addr, addr,
- (family == AF_INET6) ? sizeof(struct in6_addr) :
- sizeof(struct in_addr));
+ (IS_ENABLED(CONFIG_IPV6) && family == AF_INET6) ? sizeof(struct in6_addr) :
+ sizeof(struct in_addr));
hlist_add_head_rcu(&key->node, &md5sig->head);
return 0;
}
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index b4b2284ed4a2..1c054431e358 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -4115,8 +4115,8 @@ int tcp_rtx_synack(const struct sock *sk, struct request_sock *req)
res = af_ops->send_synack(sk, NULL, &fl, req, NULL, TCP_SYNACK_NORMAL,
NULL);
if (!res) {
- __TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS);
- __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
+ TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS);
+ NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
if (unlikely(tcp_passive_fastopen(sk)))
tcp_sk(sk)->total_retrans++;
trace_tcp_retransmit_synack(sk, req);
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index ca0aa744593e..1b1932502e9e 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -5586,7 +5586,7 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
array[DEVCONF_IOAM6_ID] = cnf->ioam6_id;
array[DEVCONF_IOAM6_ID_WIDE] = cnf->ioam6_id_wide;
array[DEVCONF_NDISC_EVICT_NOCARRIER] = cnf->ndisc_evict_nocarrier;
- array[DEVCONF_ACCEPT_UNSOLICITED_NA] = cnf->accept_unsolicited_na;
+ array[DEVCONF_ACCEPT_UNTRACKED_NA] = cnf->accept_untracked_na;
}
static inline size_t inet6_ifla6_size(void)
@@ -7038,8 +7038,8 @@ static const struct ctl_table addrconf_sysctl[] = {
.extra2 = (void *)SYSCTL_ONE,
},
{
- .procname = "accept_unsolicited_na",
- .data = &ipv6_devconf.accept_unsolicited_na,
+ .procname = "accept_untracked_na",
+ .data = &ipv6_devconf.accept_untracked_na,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 254addad0dd3..b0dfe97ea4ee 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -979,7 +979,7 @@ static void ndisc_recv_na(struct sk_buff *skb)
struct inet6_dev *idev = __in6_dev_get(dev);
struct inet6_ifaddr *ifp;
struct neighbour *neigh;
- bool create_neigh;
+ u8 new_state;
if (skb->len < sizeof(struct nd_msg)) {
ND_PRINTK(2, warn, "NA: packet too short\n");
@@ -1000,7 +1000,7 @@ static void ndisc_recv_na(struct sk_buff *skb)
/* For some 802.11 wireless deployments (and possibly other networks),
* there will be a NA proxy and unsolicitd packets are attacks
* and thus should not be accepted.
- * drop_unsolicited_na takes precedence over accept_unsolicited_na
+ * drop_unsolicited_na takes precedence over accept_untracked_na
*/
if (!msg->icmph.icmp6_solicited && idev &&
idev->cnf.drop_unsolicited_na)
@@ -1041,25 +1041,33 @@ static void ndisc_recv_na(struct sk_buff *skb)
in6_ifa_put(ifp);
return;
}
+
+ neigh = neigh_lookup(&nd_tbl, &msg->target, dev);
+
/* RFC 9131 updates original Neighbour Discovery RFC 4861.
- * An unsolicited NA can now create a neighbour cache entry
- * on routers if it has Target LL Address option.
+ * NAs with Target LL Address option without a corresponding
+ * entry in the neighbour cache can now create a STALE neighbour
+ * cache entry on routers.
+ *
+ * entry accept fwding solicited behaviour
+ * ------- ------ ------ --------- ----------------------
+ * present X X 0 Set state to STALE
+ * present X X 1 Set state to REACHABLE
+ * absent 0 X X Do nothing
+ * absent 1 0 X Do nothing
+ * absent 1 1 X Add a new STALE entry
*
- * drop accept fwding behaviour
- * ---- ------ ------ ----------------------------------------------
- * 1 X X Drop NA packet and don't pass up the stack
- * 0 0 X Pass NA packet up the stack, don't update NC
- * 0 1 0 Pass NA packet up the stack, don't update NC
- * 0 1 1 Pass NA packet up the stack, and add a STALE
- * NC entry
* Note that we don't do a (daddr == all-routers-mcast) check.
*/
- create_neigh = !msg->icmph.icmp6_solicited && lladdr &&
- idev && idev->cnf.forwarding &&
- idev->cnf.accept_unsolicited_na;
- neigh = __neigh_lookup(&nd_tbl, &msg->target, dev, create_neigh);
+ new_state = msg->icmph.icmp6_solicited ? NUD_REACHABLE : NUD_STALE;
+ if (!neigh && lladdr &&
+ idev && idev->cnf.forwarding &&
+ idev->cnf.accept_untracked_na) {
+ neigh = neigh_create(&nd_tbl, &msg->target, dev);
+ new_state = NUD_STALE;
+ }
- if (neigh) {
+ if (neigh && !IS_ERR(neigh)) {
u8 old_flags = neigh->flags;
struct net *net = dev_net(dev);
@@ -1079,7 +1087,7 @@ static void ndisc_recv_na(struct sk_buff *skb)
}
ndisc_update(dev, neigh, lladdr,
- msg->icmph.icmp6_solicited ? NUD_REACHABLE : NUD_STALE,
+ new_state,
NEIGH_UPDATE_F_WEAK_OVERRIDE|
(msg->icmph.icmp6_override ? NEIGH_UPDATE_F_OVERRIDE : 0)|
NEIGH_UPDATE_F_OVERRIDE_ISROUTER|
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
index ff033d16549e..ecf3a553a0dc 100644
--- a/net/ipv6/ping.c
+++ b/net/ipv6/ping.c
@@ -101,6 +101,9 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
ipc6.sockc.tsflags = sk->sk_tsflags;
ipc6.sockc.mark = sk->sk_mark;
+ memset(&fl6, 0, sizeof(fl6));
+ fl6.flowi6_oif = oif;
+
if (msg->msg_controllen) {
struct ipv6_txoptions opt = {};
@@ -112,17 +115,14 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
return err;
/* Changes to txoptions and flow info are not implemented, yet.
- * Drop the options, fl6 is wiped below.
+ * Drop the options.
*/
ipc6.opt = NULL;
}
- memset(&fl6, 0, sizeof(fl6));
-
fl6.flowi6_proto = IPPROTO_ICMPV6;
fl6.saddr = np->saddr;
fl6.daddr = *daddr;
- fl6.flowi6_oif = oif;
fl6.flowi6_mark = ipc6.sockc.mark;
fl6.flowi6_uid = sk->sk_uid;
fl6.fl6_icmp_type = user_icmph.icmp6_type;
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 11e1a3a3e442..fb16d7c4e1b8 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -2826,10 +2826,12 @@ static int pfkey_process(struct sock *sk, struct sk_buff *skb, const struct sadb
void *ext_hdrs[SADB_EXT_MAX];
int err;
- err = pfkey_broadcast(skb_clone(skb, GFP_KERNEL), GFP_KERNEL,
- BROADCAST_PROMISC_ONLY, NULL, sock_net(sk));
- if (err)
- return err;
+ /* Non-zero return value of pfkey_broadcast() does not always signal
+ * an error and even on an actual error we may still want to process
+ * the message so rather ignore the return value.
+ */
+ pfkey_broadcast(skb_clone(skb, GFP_KERNEL), GFP_KERNEL,
+ BROADCAST_PROMISC_ONLY, NULL, sock_net(sk));
memset(ext_hdrs, 0, sizeof(ext_hdrs));
err = parse_exthdrs(skb, hdr, ext_hdrs);
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index e3452445b363..d8246e00a10b 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -1749,12 +1749,9 @@ int ieee80211_vif_use_reserved_context(struct ieee80211_sub_if_data *sdata)
if (new_ctx->replace_state == IEEE80211_CHANCTX_REPLACE_NONE) {
if (old_ctx)
- err = ieee80211_vif_use_reserved_reassign(sdata);
- else
- err = ieee80211_vif_use_reserved_assign(sdata);
+ return ieee80211_vif_use_reserved_reassign(sdata);
- if (err)
- return err;
+ return ieee80211_vif_use_reserved_assign(sdata);
}
/*
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 12fc9cda4a2c..746be13438ef 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -222,12 +222,18 @@ err_register:
}
static void nft_netdev_unregister_hooks(struct net *net,
- struct list_head *hook_list)
+ struct list_head *hook_list,
+ bool release_netdev)
{
- struct nft_hook *hook;
+ struct nft_hook *hook, *next;
- list_for_each_entry(hook, hook_list, list)
+ list_for_each_entry_safe(hook, next, hook_list, list) {
nf_unregister_net_hook(net, &hook->ops);
+ if (release_netdev) {
+ list_del(&hook->list);
+ kfree_rcu(hook, rcu);
+ }
+ }
}
static int nf_tables_register_hook(struct net *net,
@@ -253,9 +259,10 @@ static int nf_tables_register_hook(struct net *net,
return nf_register_net_hook(net, &basechain->ops);
}
-static void nf_tables_unregister_hook(struct net *net,
- const struct nft_table *table,
- struct nft_chain *chain)
+static void __nf_tables_unregister_hook(struct net *net,
+ const struct nft_table *table,
+ struct nft_chain *chain,
+ bool release_netdev)
{
struct nft_base_chain *basechain;
const struct nf_hook_ops *ops;
@@ -270,11 +277,19 @@ static void nf_tables_unregister_hook(struct net *net,
return basechain->type->ops_unregister(net, ops);
if (nft_base_chain_netdev(table->family, basechain->ops.hooknum))
- nft_netdev_unregister_hooks(net, &basechain->hook_list);
+ nft_netdev_unregister_hooks(net, &basechain->hook_list,
+ release_netdev);
else
nf_unregister_net_hook(net, &basechain->ops);
}
+static void nf_tables_unregister_hook(struct net *net,
+ const struct nft_table *table,
+ struct nft_chain *chain)
+{
+ return __nf_tables_unregister_hook(net, table, chain, false);
+}
+
static void nft_trans_commit_list_add_tail(struct net *net, struct nft_trans *trans)
{
struct nftables_pernet *nft_net = nft_pernet(net);
@@ -2873,27 +2888,31 @@ static struct nft_expr *nft_expr_init(const struct nft_ctx *ctx,
err = nf_tables_expr_parse(ctx, nla, &expr_info);
if (err < 0)
- goto err1;
+ goto err_expr_parse;
+
+ err = -EOPNOTSUPP;
+ if (!(expr_info.ops->type->flags & NFT_EXPR_STATEFUL))
+ goto err_expr_stateful;
err = -ENOMEM;
expr = kzalloc(expr_info.ops->size, GFP_KERNEL_ACCOUNT);
if (expr == NULL)
- goto err2;
+ goto err_expr_stateful;
err = nf_tables_newexpr(ctx, &expr_info, expr);
if (err < 0)
- goto err3;
+ goto err_expr_new;
return expr;
-err3:
+err_expr_new:
kfree(expr);
-err2:
+err_expr_stateful:
owner = expr_info.ops->type->owner;
if (expr_info.ops->type->release_ops)
expr_info.ops->type->release_ops(expr_info.ops);
module_put(owner);
-err1:
+err_expr_parse:
return ERR_PTR(err);
}
@@ -4242,6 +4261,9 @@ static int nft_set_desc_concat_parse(const struct nlattr *attr,
u32 len;
int err;
+ if (desc->field_count >= ARRAY_SIZE(desc->field_len))
+ return -E2BIG;
+
err = nla_parse_nested_deprecated(tb, NFTA_SET_FIELD_MAX, attr,
nft_concat_policy, NULL);
if (err < 0)
@@ -4251,9 +4273,8 @@ static int nft_set_desc_concat_parse(const struct nlattr *attr,
return -EINVAL;
len = ntohl(nla_get_be32(tb[NFTA_SET_FIELD_LEN]));
-
- if (len * BITS_PER_BYTE / 32 > NFT_REG32_COUNT)
- return -E2BIG;
+ if (!len || len > U8_MAX)
+ return -EINVAL;
desc->field_len[desc->field_count++] = len;
@@ -4264,7 +4285,8 @@ static int nft_set_desc_concat(struct nft_set_desc *desc,
const struct nlattr *nla)
{
struct nlattr *attr;
- int rem, err;
+ u32 num_regs = 0;
+ int rem, err, i;
nla_for_each_nested(attr, nla, rem) {
if (nla_type(attr) != NFTA_LIST_ELEM)
@@ -4275,6 +4297,12 @@ static int nft_set_desc_concat(struct nft_set_desc *desc,
return err;
}
+ for (i = 0; i < desc->field_count; i++)
+ num_regs += DIV_ROUND_UP(desc->field_len[i], sizeof(u32));
+
+ if (num_regs > NFT_REG32_COUNT)
+ return -E2BIG;
+
return 0;
}
@@ -5344,8 +5372,10 @@ static int nf_tables_getsetelem(struct sk_buff *skb,
nla_for_each_nested(attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) {
err = nft_get_set_elem(&ctx, set, attr);
- if (err < 0)
+ if (err < 0) {
+ NL_SET_BAD_ATTR(extack, attr);
break;
+ }
}
return err;
@@ -5413,9 +5443,6 @@ struct nft_expr *nft_set_elem_expr_alloc(const struct nft_ctx *ctx,
return expr;
err = -EOPNOTSUPP;
- if (!(expr->ops->type->flags & NFT_EXPR_STATEFUL))
- goto err_set_elem_expr;
-
if (expr->ops->type->flags & NFT_EXPR_GC) {
if (set->flags & NFT_SET_TIMEOUT)
goto err_set_elem_expr;
@@ -6125,8 +6152,10 @@ static int nf_tables_newsetelem(struct sk_buff *skb,
nla_for_each_nested(attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) {
err = nft_add_set_elem(&ctx, set, attr, info->nlh->nlmsg_flags);
- if (err < 0)
+ if (err < 0) {
+ NL_SET_BAD_ATTR(extack, attr);
return err;
+ }
}
if (nft_net->validate_state == NFT_VALIDATE_DO)
@@ -6396,8 +6425,10 @@ static int nf_tables_delsetelem(struct sk_buff *skb,
nla_for_each_nested(attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) {
err = nft_del_setelem(&ctx, set, attr);
- if (err < 0)
+ if (err < 0) {
+ NL_SET_BAD_ATTR(extack, attr);
break;
+ }
}
return err;
}
@@ -7291,13 +7322,25 @@ static void nft_unregister_flowtable_hook(struct net *net,
FLOW_BLOCK_UNBIND);
}
-static void nft_unregister_flowtable_net_hooks(struct net *net,
- struct list_head *hook_list)
+static void __nft_unregister_flowtable_net_hooks(struct net *net,
+ struct list_head *hook_list,
+ bool release_netdev)
{
- struct nft_hook *hook;
+ struct nft_hook *hook, *next;
- list_for_each_entry(hook, hook_list, list)
+ list_for_each_entry_safe(hook, next, hook_list, list) {
nf_unregister_net_hook(net, &hook->ops);
+ if (release_netdev) {
+ list_del(&hook->list);
+ kfree_rcu(hook);
+ }
+ }
+}
+
+static void nft_unregister_flowtable_net_hooks(struct net *net,
+ struct list_head *hook_list)
+{
+ __nft_unregister_flowtable_net_hooks(net, hook_list, false);
}
static int nft_register_flowtable_net_hooks(struct net *net,
@@ -9739,9 +9782,10 @@ static void __nft_release_hook(struct net *net, struct nft_table *table)
struct nft_chain *chain;
list_for_each_entry(chain, &table->chains, list)
- nf_tables_unregister_hook(net, table, chain);
+ __nf_tables_unregister_hook(net, table, chain, true);
list_for_each_entry(flowtable, &table->flowtables, list)
- nft_unregister_flowtable_net_hooks(net, &flowtable->hook_list);
+ __nft_unregister_flowtable_net_hooks(net, &flowtable->hook_list,
+ true);
}
static void __nft_release_hooks(struct net *net)
@@ -9880,7 +9924,11 @@ static int __net_init nf_tables_init_net(struct net *net)
static void __net_exit nf_tables_pre_exit_net(struct net *net)
{
+ struct nftables_pernet *nft_net = nft_pernet(net);
+
+ mutex_lock(&nft_net->commit_mutex);
__nft_release_hooks(net);
+ mutex_unlock(&nft_net->commit_mutex);
}
static void __net_exit nf_tables_exit_net(struct net *net)
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index ad3bbe34ca88..2f7c477fc9e7 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -45,7 +45,6 @@ MODULE_DESCRIPTION("Netfilter messages via netlink socket");
static unsigned int nfnetlink_pernet_id __read_mostly;
struct nfnl_net {
- unsigned int ctnetlink_listeners;
struct sock *nfnl;
};
@@ -673,18 +672,8 @@ static int nfnetlink_bind(struct net *net, int group)
#ifdef CONFIG_NF_CONNTRACK_EVENTS
if (type == NFNL_SUBSYS_CTNETLINK) {
- struct nfnl_net *nfnlnet = nfnl_pernet(net);
-
nfnl_lock(NFNL_SUBSYS_CTNETLINK);
-
- if (WARN_ON_ONCE(nfnlnet->ctnetlink_listeners == UINT_MAX)) {
- nfnl_unlock(NFNL_SUBSYS_CTNETLINK);
- return -EOVERFLOW;
- }
-
- nfnlnet->ctnetlink_listeners++;
- if (nfnlnet->ctnetlink_listeners == 1)
- WRITE_ONCE(net->ct.ctnetlink_has_listener, true);
+ WRITE_ONCE(net->ct.ctnetlink_has_listener, true);
nfnl_unlock(NFNL_SUBSYS_CTNETLINK);
}
#endif
@@ -694,15 +683,12 @@ static int nfnetlink_bind(struct net *net, int group)
static void nfnetlink_unbind(struct net *net, int group)
{
#ifdef CONFIG_NF_CONNTRACK_EVENTS
- int type = nfnl_group2type[group];
-
- if (type == NFNL_SUBSYS_CTNETLINK) {
- struct nfnl_net *nfnlnet = nfnl_pernet(net);
+ if (group <= NFNLGRP_NONE || group > NFNLGRP_MAX)
+ return;
+ if (nfnl_group2type[group] == NFNL_SUBSYS_CTNETLINK) {
nfnl_lock(NFNL_SUBSYS_CTNETLINK);
- WARN_ON_ONCE(nfnlnet->ctnetlink_listeners == 0);
- nfnlnet->ctnetlink_listeners--;
- if (nfnlnet->ctnetlink_listeners == 0)
+ if (!nfnetlink_has_listeners(net, group))
WRITE_ONCE(net->ct.ctnetlink_has_listener, false);
nfnl_unlock(NFNL_SUBSYS_CTNETLINK);
}
diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c
index f069c24c6146..af15102bc696 100644
--- a/net/netfilter/nfnetlink_cttimeout.c
+++ b/net/netfilter/nfnetlink_cttimeout.c
@@ -35,12 +35,13 @@ static unsigned int nfct_timeout_id __read_mostly;
struct ctnl_timeout {
struct list_head head;
+ struct list_head free_head;
struct rcu_head rcu_head;
refcount_t refcnt;
char name[CTNL_TIMEOUT_NAME_MAX];
- struct nf_ct_timeout timeout;
- struct list_head free_head;
+ /* must be at the end */
+ struct nf_ct_timeout timeout;
};
struct nfct_timeout_pernet {
diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c
index a16cf47199b7..a25c88bc8b75 100644
--- a/net/netfilter/nft_flow_offload.c
+++ b/net/netfilter/nft_flow_offload.c
@@ -232,19 +232,21 @@ static int nft_flow_route(const struct nft_pktinfo *pkt,
switch (nft_pf(pkt)) {
case NFPROTO_IPV4:
fl.u.ip4.daddr = ct->tuplehash[dir].tuple.src.u3.ip;
- fl.u.ip4.saddr = ct->tuplehash[dir].tuple.dst.u3.ip;
+ fl.u.ip4.saddr = ct->tuplehash[!dir].tuple.src.u3.ip;
fl.u.ip4.flowi4_oif = nft_in(pkt)->ifindex;
fl.u.ip4.flowi4_iif = this_dst->dev->ifindex;
fl.u.ip4.flowi4_tos = RT_TOS(ip_hdr(pkt->skb)->tos);
fl.u.ip4.flowi4_mark = pkt->skb->mark;
+ fl.u.ip4.flowi4_flags = FLOWI_FLAG_ANYSRC;
break;
case NFPROTO_IPV6:
fl.u.ip6.daddr = ct->tuplehash[dir].tuple.src.u3.in6;
- fl.u.ip6.saddr = ct->tuplehash[dir].tuple.dst.u3.in6;
+ fl.u.ip6.saddr = ct->tuplehash[!dir].tuple.src.u3.in6;
fl.u.ip6.flowi6_oif = nft_in(pkt)->ifindex;
fl.u.ip6.flowi6_iif = this_dst->dev->ifindex;
fl.u.ip6.flowlabel = ip6_flowinfo(ipv6_hdr(pkt->skb));
fl.u.ip6.flowi6_mark = pkt->skb->mark;
+ fl.u.ip6.flowi6_flags = FLOWI_FLAG_ANYSRC;
break;
}
diff --git a/net/netfilter/nft_limit.c b/net/netfilter/nft_limit.c
index 04ea8b9bf202..981addb2d051 100644
--- a/net/netfilter/nft_limit.c
+++ b/net/netfilter/nft_limit.c
@@ -213,6 +213,8 @@ static int nft_limit_pkts_clone(struct nft_expr *dst, const struct nft_expr *src
struct nft_limit_priv_pkts *priv_dst = nft_expr_priv(dst);
struct nft_limit_priv_pkts *priv_src = nft_expr_priv(src);
+ priv_dst->cost = priv_src->cost;
+
return nft_limit_clone(&priv_dst->limit, &priv_src->limit);
}
diff --git a/net/nfc/core.c b/net/nfc/core.c
index 6ff3e10ff8e3..eb2c0959e5b6 100644
--- a/net/nfc/core.c
+++ b/net/nfc/core.c
@@ -975,7 +975,7 @@ static void nfc_release(struct device *d)
kfree(se);
}
- ida_simple_remove(&nfc_index_ida, dev->idx);
+ ida_free(&nfc_index_ida, dev->idx);
kfree(dev);
}
@@ -1066,7 +1066,7 @@ struct nfc_dev *nfc_allocate_device(const struct nfc_ops *ops,
if (!dev)
return NULL;
- rc = ida_simple_get(&nfc_index_ida, 0, 0, GFP_KERNEL);
+ rc = ida_alloc(&nfc_index_ida, GFP_KERNEL);
if (rc < 0)
goto err_free_dev;
dev->idx = rc;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 677f9cfa9660..ca6e92a22923 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1935,8 +1935,10 @@ static void packet_parse_headers(struct sk_buff *skb, struct socket *sock)
/* Move network header to the right position for VLAN tagged packets */
if (likely(skb->dev->type == ARPHRD_ETHER) &&
eth_type_vlan(skb->protocol) &&
- __vlan_get_protocol(skb, skb->protocol, &depth) != 0)
- skb_set_network_header(skb, depth);
+ __vlan_get_protocol(skb, skb->protocol, &depth) != 0) {
+ if (pskb_may_pull(skb, depth))
+ skb_set_network_header(skb, depth);
+ }
skb_probe_transport_header(skb);
}
diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
index 8af9d6e5ba61..e013253b10d1 100644
--- a/net/sched/act_ct.c
+++ b/net/sched/act_ct.c
@@ -548,7 +548,7 @@ tcf_ct_flow_table_fill_tuple_ipv6(struct sk_buff *skb,
break;
#endif
default:
- return -1;
+ return false;
}
if (ip6h->hop_limit <= 1)
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index a201bf29af98..433bb5a7df31 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -2161,6 +2161,7 @@ static void smc_find_rdma_v2_device_serv(struct smc_sock *new_smc,
not_found:
ini->smcr_version &= ~SMC_V2;
+ ini->smcrv2.ib_dev_v2 = NULL;
ini->check_smcrv2 = false;
}
diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c
index 5c731f27996e..53f63bfbaf5f 100644
--- a/net/smc/smc_cdc.c
+++ b/net/smc/smc_cdc.c
@@ -82,7 +82,7 @@ int smc_cdc_get_free_slot(struct smc_connection *conn,
/* abnormal termination */
if (!rc)
smc_wr_tx_put_slot(link,
- (struct smc_wr_tx_pend_priv *)pend);
+ (struct smc_wr_tx_pend_priv *)(*pend));
rc = -EPIPE;
}
return rc;
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
index 281ddb87ac8d..190a4de239c8 100644
--- a/net/sunrpc/xprtrdma/rpc_rdma.c
+++ b/net/sunrpc/xprtrdma/rpc_rdma.c
@@ -1121,6 +1121,7 @@ static bool
rpcrdma_is_bcall(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep)
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
{
+ struct rpc_xprt *xprt = &r_xprt->rx_xprt;
struct xdr_stream *xdr = &rep->rr_stream;
__be32 *p;
@@ -1144,6 +1145,10 @@ rpcrdma_is_bcall(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep)
if (*p != cpu_to_be32(RPC_CALL))
return false;
+ /* No bc service. */
+ if (xprt->bc_serv == NULL)
+ return false;
+
/* Now that we are sure this is a backchannel call,
* advance to the RPC header.
*/
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 6d39ca05f249..932c87b98eca 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -259,9 +259,8 @@ static int tipc_enable_bearer(struct net *net, const char *name,
u32 i;
if (!bearer_name_validate(name, &b_names)) {
- errstr = "illegal name";
NL_SET_ERR_MSG(extack, "Illegal name");
- goto rejected;
+ return res;
}
if (prio > TIPC_MAX_LINK_PRI && prio != TIPC_MEDIA_LINK_PRI) {
diff --git a/net/vmw_vsock/hyperv_transport.c b/net/vmw_vsock/hyperv_transport.c
index e111e13b6660..fd98229e3db3 100644
--- a/net/vmw_vsock/hyperv_transport.c
+++ b/net/vmw_vsock/hyperv_transport.c
@@ -78,6 +78,9 @@ struct hvs_send_buf {
ALIGN((payload_len), 8) + \
VMBUS_PKT_TRAILER_SIZE)
+/* Upper bound on the size of a VMbus packet for hv_sock */
+#define HVS_MAX_PKT_SIZE HVS_PKT_LEN(HVS_MTU_SIZE)
+
union hvs_service_id {
guid_t srv_id;
@@ -378,6 +381,8 @@ static void hvs_open_connection(struct vmbus_channel *chan)
rcvbuf = ALIGN(rcvbuf, HV_HYP_PAGE_SIZE);
}
+ chan->max_pkt_size = HVS_MAX_PKT_SIZE;
+
ret = vmbus_open(chan, sndbuf, rcvbuf, NULL, 0, hvs_channel_cb,
conn_from_host ? new : sk);
if (ret != 0) {
@@ -572,12 +577,18 @@ static bool hvs_dgram_allow(u32 cid, u32 port)
static int hvs_update_recv_data(struct hvsock *hvs)
{
struct hvs_recv_buf *recv_buf;
- u32 payload_len;
+ u32 pkt_len, payload_len;
+
+ pkt_len = hv_pkt_len(hvs->recv_desc);
+
+ if (pkt_len < HVS_HEADER_LEN)
+ return -EIO;
recv_buf = (struct hvs_recv_buf *)(hvs->recv_desc + 1);
payload_len = recv_buf->hdr.data_size;
- if (payload_len > HVS_MTU_SIZE)
+ if (payload_len > pkt_len - HVS_HEADER_LEN ||
+ payload_len > HVS_MTU_SIZE)
return -EIO;
if (payload_len == 0)
@@ -602,7 +613,9 @@ static ssize_t hvs_stream_dequeue(struct vsock_sock *vsk, struct msghdr *msg,
return -EOPNOTSUPP;
if (need_refill) {
- hvs->recv_desc = hv_pkt_iter_first_raw(hvs->chan);
+ hvs->recv_desc = hv_pkt_iter_first(hvs->chan);
+ if (!hvs->recv_desc)
+ return -ENOBUFS;
ret = hvs_update_recv_data(hvs);
if (ret)
return ret;
@@ -616,7 +629,7 @@ static ssize_t hvs_stream_dequeue(struct vsock_sock *vsk, struct msghdr *msg,
hvs->recv_data_len -= to_read;
if (hvs->recv_data_len == 0) {
- hvs->recv_desc = hv_pkt_iter_next_raw(hvs->chan, hvs->recv_desc);
+ hvs->recv_desc = hv_pkt_iter_next(hvs->chan, hvs->recv_desc);
if (hvs->recv_desc) {
ret = hvs_update_recv_data(hvs);
if (ret)
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index d4935b3b9983..555ab35cd119 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -273,6 +273,7 @@ static int xfrm4_beet_encap_add(struct xfrm_state *x, struct sk_buff *skb)
*/
static int xfrm4_tunnel_encap_add(struct xfrm_state *x, struct sk_buff *skb)
{
+ bool small_ipv6 = (skb->protocol == htons(ETH_P_IPV6)) && (skb->len <= IPV6_MIN_MTU);
struct dst_entry *dst = skb_dst(skb);
struct iphdr *top_iph;
int flags;
@@ -303,7 +304,7 @@ static int xfrm4_tunnel_encap_add(struct xfrm_state *x, struct sk_buff *skb)
if (flags & XFRM_STATE_NOECN)
IP_ECN_clear(top_iph);
- top_iph->frag_off = (flags & XFRM_STATE_NOPMTUDISC) ?
+ top_iph->frag_off = (flags & XFRM_STATE_NOPMTUDISC) || small_ipv6 ?
0 : (XFRM_MODE_SKB_CB(skb)->frag_off & htons(IP_DF));
top_iph->ttl = ip4_dst_hoplimit(xfrm_dst_child(dst));
diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
index 3514c2149e9d..ece44b735061 100644
--- a/scripts/Kbuild.include
+++ b/scripts/Kbuild.include
@@ -16,6 +16,10 @@ pound := \#
dot-target = $(dir $@).$(notdir $@)
###
+# Name of target with a '.tmp_' as filename prefix. foo/bar.o => foo/.tmp_bar.o
+tmp-target = $(dir $@).tmp_$(notdir $@)
+
+###
# The temporary file to save gcc -MMD generated dependencies must not
# contain a comma
depfile = $(subst $(comma),_,$(dot-target).d)
@@ -138,9 +142,11 @@ check-FORCE = $(if $(filter FORCE, $^),,$(warning FORCE prerequisite is missing)
if-changed-cond = $(newer-prereqs)$(cmd-check)$(check-FORCE)
# Execute command if command has changed or prerequisite(s) are updated.
-if_changed = $(if $(if-changed-cond), \
+if_changed = $(if $(if-changed-cond),$(cmd_and_savecmd),@:)
+
+cmd_and_savecmd = \
$(cmd); \
- printf '%s\n' 'cmd_$@ := $(make-cmd)' > $(dot-target).cmd, @:)
+ printf '%s\n' 'cmd_$@ := $(make-cmd)' > $(dot-target).cmd
# Execute the command and also postprocess generated .d dependencies file.
if_changed_dep = $(if $(if-changed-cond),$(cmd_and_fixdep),@:)
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index 06400504150b..1f01ac65c0cd 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -88,10 +88,6 @@ endif
targets-for-modules := $(foreach x, o mod $(if $(CONFIG_TRIM_UNUSED_KSYMS), usyms), \
$(patsubst %.o, %.$x, $(filter %.o, $(obj-m))))
-ifneq ($(CONFIG_LTO_CLANG)$(CONFIG_X86_KERNEL_IBT),)
-targets-for-modules += $(patsubst %.o, %.prelink.o, $(filter %.o, $(obj-m)))
-endif
-
ifdef need-modorder
targets-for-modules += $(obj)/modules.order
endif
@@ -152,8 +148,18 @@ $(obj)/%.ll: $(src)/%.c FORCE
# The C file is compiled and updated dependency information is generated.
# (See cmd_cc_o_c + relevant part of rule_cc_o_c)
+is-single-obj-m = $(and $(part-of-module),$(filter $@, $(obj-m)),y)
+
+# When a module consists of a single object, there is no reason to keep LLVM IR.
+# Make $(LD) covert LLVM IR to ELF here.
+ifdef CONFIG_LTO_CLANG
+cmd_ld_single_m = $(if $(is-single-obj-m), ; $(LD) $(ld_flags) -r -o $(tmp-target) $@; mv $(tmp-target) $@)
+endif
+
quiet_cmd_cc_o_c = CC $(quiet_modtag) $@
- cmd_cc_o_c = $(CC) $(c_flags) -c -o $@ $< $(cmd_objtool)
+ cmd_cc_o_c = $(CC) $(c_flags) -c -o $@ $< \
+ $(cmd_ld_single_m) \
+ $(cmd_objtool)
ifdef CONFIG_MODVERSIONS
# When module versioning is enabled the following steps are executed:
@@ -204,54 +210,25 @@ cmd_record_mcount = $(if $(findstring $(strip $(CC_FLAGS_FTRACE)),$(_c_flags)),
$(sub_cmd_record_mcount))
endif # CONFIG_FTRACE_MCOUNT_USE_RECORDMCOUNT
-ifdef CONFIG_OBJTOOL
-
-objtool := $(objtree)/tools/objtool/objtool
-
-objtool_args = \
- $(if $(CONFIG_HAVE_JUMP_LABEL_HACK), --hacks=jump_label) \
- $(if $(CONFIG_HAVE_NOINSTR_HACK), --hacks=noinstr) \
- $(if $(CONFIG_X86_KERNEL_IBT), --ibt) \
- $(if $(CONFIG_FTRACE_MCOUNT_USE_OBJTOOL), --mcount) \
- $(if $(CONFIG_UNWINDER_ORC), --orc) \
- $(if $(CONFIG_RETPOLINE), --retpoline) \
- $(if $(CONFIG_SLS), --sls) \
- $(if $(CONFIG_STACK_VALIDATION), --stackval) \
- $(if $(CONFIG_HAVE_STATIC_CALL_INLINE), --static-call) \
- --uaccess \
- $(if $(linked-object), --link) \
- $(if $(part-of-module), --module) \
- $(if $(CONFIG_GCOV_KERNEL), --no-unreachable)
-
-cmd_objtool = $(if $(objtool-enabled), ; $(objtool) $(objtool_args) $@)
-cmd_gen_objtooldep = $(if $(objtool-enabled), { echo ; echo '$@: $$(wildcard $(objtool))' ; } >> $(dot-target).cmd)
-
-endif # CONFIG_OBJTOOL
-
-ifneq ($(CONFIG_LTO_CLANG)$(CONFIG_X86_KERNEL_IBT),)
-
-# Skip objtool for LLVM bitcode
-$(obj)/%.o: objtool-enabled :=
-
-else
-
# 'OBJECT_FILES_NON_STANDARD := y': skip objtool checking for a directory
# 'OBJECT_FILES_NON_STANDARD_foo.o := 'y': skip objtool checking for a file
# 'OBJECT_FILES_NON_STANDARD_foo.o := 'n': override directory skip for a file
-$(obj)/%.o: objtool-enabled = $(if $(filter-out y%, \
- $(OBJECT_FILES_NON_STANDARD_$(basetarget).o)$(OBJECT_FILES_NON_STANDARD)n),y)
+is-standard-object = $(if $(filter-out y%, $(OBJECT_FILES_NON_STANDARD_$(basetarget).o)$(OBJECT_FILES_NON_STANDARD)n),y)
-endif
+$(obj)/%.o: objtool-enabled = $(if $(is-standard-object),$(if $(delay-objtool),$(is-single-obj-m),y))
ifdef CONFIG_TRIM_UNUSED_KSYMS
cmd_gen_ksymdeps = \
$(CONFIG_SHELL) $(srctree)/scripts/gen_ksymdeps.sh $@ >> $(dot-target).cmd
endif
+cmd_check_local_export = $(srctree)/scripts/check-local-export $@
+
define rule_cc_o_c
$(call cmd_and_fixdep,cc_o_c)
$(call cmd,gen_ksymdeps)
+ $(call cmd,check_local_export)
$(call cmd,checksrc)
$(call cmd,checkdoc)
$(call cmd,gen_objtooldep)
@@ -262,6 +239,7 @@ endef
define rule_as_o_S
$(call cmd_and_fixdep,as_o_S)
$(call cmd,gen_ksymdeps)
+ $(call cmd,check_local_export)
$(call cmd,gen_objtooldep)
$(call cmd,gen_symversions_S)
endef
@@ -271,27 +249,10 @@ $(obj)/%.o: $(src)/%.c $(recordmcount_source) FORCE
$(call if_changed_rule,cc_o_c)
$(call cmd,force_checksrc)
-ifneq ($(CONFIG_LTO_CLANG)$(CONFIG_X86_KERNEL_IBT),)
-# Module .o files may contain LLVM bitcode, compile them into native code
-# before ELF processing
-quiet_cmd_cc_prelink_modules = LD [M] $@
- cmd_cc_prelink_modules = \
- $(LD) $(ld_flags) -r -o $@ \
- --whole-archive $(filter-out FORCE,$^) \
- $(cmd_objtool)
-
-# objtool was skipped for LLVM bitcode, run it now that we have compiled
-# modules into native code
-$(obj)/%.prelink.o: objtool-enabled = y
-$(obj)/%.prelink.o: part-of-module := y
-$(obj)/%.prelink.o: linked-object := y
-
-$(obj)/%.prelink.o: $(obj)/%.o FORCE
- $(call if_changed,cc_prelink_modules)
-endif
-
-cmd_mod = echo $(addprefix $(obj)/, $(call real-search, $*.o, .o, -objs -y -m)) | \
- $(AWK) -v RS='( |\n)' '!x[$$0]++' > $@
+# To make this rule robust against "Argument list too long" error,
+# ensure to add $(obj)/ prefix by a shell command.
+cmd_mod = echo $(call real-search, $*.o, .o, -objs -y -m) | \
+ $(AWK) -v RS='( |\n)' '!x[$$0]++ { print("$(obj)/"$$0) }' > $@
$(obj)/%.mod: FORCE
$(call if_changed,mod)
@@ -299,7 +260,7 @@ $(obj)/%.mod: FORCE
# List module undefined symbols
cmd_undefined_syms = $(NM) $< | sed -n 's/^ *U //p' > $@
-$(obj)/%.usyms: $(obj)/%$(mod-prelink-ext).o FORCE
+$(obj)/%.usyms: $(obj)/%.o FORCE
$(call if_changed,undefined_syms)
quiet_cmd_cc_lst_c = MKLST $@
@@ -392,9 +353,14 @@ $(subdir-modorder): $(obj)/%/modules.order: $(obj)/% ;
#
# Rule to compile a set of .o files into one .a file (without symbol table)
#
+# To make this rule robust against "Argument list too long" error,
+# remove $(obj)/ prefix, and restore it by a shell command.
quiet_cmd_ar_builtin = AR $@
- cmd_ar_builtin = rm -f $@; $(AR) cDPrST $@ $(real-prereqs)
+ cmd_ar_builtin = rm -f $@; \
+ echo $(patsubst $(obj)/%,%,$(real-prereqs)) | \
+ sed -E 's:([^ ]+):$(obj)/\1:g' | \
+ xargs $(AR) cDPrST $@
$(obj)/built-in.a: $(real-obj-y) FORCE
$(call if_changed,ar_builtin)
@@ -421,18 +387,18 @@ $(obj)/modules.order: $(obj-m) FORCE
$(obj)/lib.a: $(lib-y) FORCE
$(call if_changed,ar)
-ifneq ($(CONFIG_LTO_CLANG)$(CONFIG_X86_KERNEL_IBT),)
-quiet_cmd_link_multi-m = AR [M] $@
-cmd_link_multi-m = \
- rm -f $@; \
- $(AR) cDPrsT $@ @$(patsubst %.o,%.mod,$@)
-else
-quiet_cmd_link_multi-m = LD [M] $@
- cmd_link_multi-m = $(LD) $(ld_flags) -r -o $@ @$(patsubst %.o,%.mod,$@)
-endif
+quiet_cmd_ld_multi_m = LD [M] $@
+ cmd_ld_multi_m = $(LD) $(ld_flags) -r -o $@ @$(patsubst %.o,%.mod,$@) $(cmd_objtool)
+
+define rule_ld_multi_m
+ $(call cmd_and_savecmd,ld_multi_m)
+ $(call cmd,gen_objtooldep)
+endef
+$(multi-obj-m): objtool-enabled := $(delay-objtool)
+$(multi-obj-m): part-of-module := y
$(multi-obj-m): %.o: %.mod FORCE
- $(call if_changed,link_multi-m)
+ $(call if_changed_rule,ld_multi_m)
$(call multi_depend, $(multi-obj-m), .o, -objs -y -m)
targets := $(filter-out $(PHONY), $(targets))
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index 0453a1904646..d1425778664b 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -225,12 +225,31 @@ dtc_cpp_flags = -Wp,-MMD,$(depfile).pre.tmp -nostdinc \
$(addprefix -I,$(DTC_INCLUDE)) \
-undef -D__DTS__
-ifneq ($(CONFIG_LTO_CLANG)$(CONFIG_X86_KERNEL_IBT),)
-# With CONFIG_LTO_CLANG, .o files in modules might be LLVM bitcode, so we
-# need to run LTO to compile them into native code (.lto.o) before further
-# processing.
-mod-prelink-ext := .prelink
-endif
+ifdef CONFIG_OBJTOOL
+
+objtool := $(objtree)/tools/objtool/objtool
+
+objtool_args = \
+ $(if $(CONFIG_HAVE_JUMP_LABEL_HACK), --hacks=jump_label) \
+ $(if $(CONFIG_HAVE_NOINSTR_HACK), --hacks=noinstr) \
+ $(if $(CONFIG_X86_KERNEL_IBT), --ibt) \
+ $(if $(CONFIG_FTRACE_MCOUNT_USE_OBJTOOL), --mcount) \
+ $(if $(CONFIG_UNWINDER_ORC), --orc) \
+ $(if $(CONFIG_RETPOLINE), --retpoline) \
+ $(if $(CONFIG_SLS), --sls) \
+ $(if $(CONFIG_STACK_VALIDATION), --stackval) \
+ $(if $(CONFIG_HAVE_STATIC_CALL_INLINE), --static-call) \
+ $(if $(CONFIG_HAVE_UACCESS_VALIDATION), --uaccess) \
+ $(if $(delay-objtool), --link) \
+ $(if $(part-of-module), --module) \
+ $(if $(CONFIG_GCOV_KERNEL), --no-unreachable)
+
+delay-objtool := $(or $(CONFIG_LTO_CLANG),$(CONFIG_X86_KERNEL_IBT))
+
+cmd_objtool = $(if $(objtool-enabled), ; $(objtool) $(objtool_args) $@)
+cmd_gen_objtooldep = $(if $(objtool-enabled), { echo ; echo '$@: $$(wildcard $(objtool))' ; } >> $(dot-target).cmd)
+
+endif # CONFIG_OBJTOOL
# Useful for describing the dependency of composite objects
# Usage:
diff --git a/scripts/Makefile.modfinal b/scripts/Makefile.modfinal
index 7f39599e9fae..35100e981f4a 100644
--- a/scripts/Makefile.modfinal
+++ b/scripts/Makefile.modfinal
@@ -9,7 +9,7 @@ __modfinal:
include include/config/auto.conf
include $(srctree)/scripts/Kbuild.include
-# for c_flags and mod-prelink-ext
+# for c_flags
include $(srctree)/scripts/Makefile.lib
# find all modules listed in modules.order
@@ -54,9 +54,8 @@ if_changed_except = $(if $(call newer_prereqs_except,$(2))$(cmd-check), \
$(cmd); \
printf '%s\n' 'cmd_$@ := $(make-cmd)' > $(dot-target).cmd, @:)
-
# Re-generate module BTFs if either module's .ko or vmlinux changed
-$(modules): %.ko: %$(mod-prelink-ext).o %.mod.o scripts/module.lds $(if $(KBUILD_BUILTIN),vmlinux) FORCE
+$(modules): %.ko: %.o %.mod.o scripts/module.lds $(if $(KBUILD_BUILTIN),vmlinux) FORCE
+$(call if_changed_except,ld_ko_o,vmlinux)
ifdef CONFIG_DEBUG_INFO_BTF_MODULES
+$(if $(newer-prereqs),$(call cmd,btf_ko))
diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost
index 48585c4d04ad..911606496341 100644
--- a/scripts/Makefile.modpost
+++ b/scripts/Makefile.modpost
@@ -41,9 +41,6 @@ __modpost:
include include/config/auto.conf
include $(srctree)/scripts/Kbuild.include
-# for mod-prelink-ext
-include $(srctree)/scripts/Makefile.lib
-
MODPOST = scripts/mod/modpost \
$(if $(CONFIG_MODVERSIONS),-m) \
$(if $(CONFIG_MODULE_SRCVERSION_ALL),-a) \
@@ -87,8 +84,7 @@ obj := $(KBUILD_EXTMOD)
src := $(obj)
# Include the module's Makefile to find KBUILD_EXTRA_SYMBOLS
-include $(if $(wildcard $(KBUILD_EXTMOD)/Kbuild), \
- $(KBUILD_EXTMOD)/Kbuild, $(KBUILD_EXTMOD)/Makefile)
+include $(or $(wildcard $(src)/Kbuild), $(src)/Makefile)
# modpost option for external modules
MODPOST += -e
@@ -118,8 +114,6 @@ $(input-symdump):
@echo >&2 ' Modules may not have dependencies or modversions.'
@echo >&2 ' You may get many unresolved symbol warnings.'
-modules := $(sort $(shell cat $(MODORDER)))
-
# KBUILD_MODPOST_WARN can be set to avoid error out in case of undefined symbols
ifneq ($(KBUILD_MODPOST_WARN)$(filter-out $(existing-input-symdump), $(input-symdump)),)
MODPOST += -w
@@ -128,9 +122,9 @@ endif
# Read out modules.order to pass in modpost.
# Otherwise, allmodconfig would fail with "Argument list too long".
quiet_cmd_modpost = MODPOST $@
- cmd_modpost = sed 's/\.ko$$/$(mod-prelink-ext)\.o/' $< | $(MODPOST) -T -
+ cmd_modpost = sed 's/ko$$/o/' $< | $(MODPOST) -T -
-$(output-symdump): $(MODORDER) $(input-symdump) $(modules:.ko=$(mod-prelink-ext).o) FORCE
+$(output-symdump): $(MODORDER) $(input-symdump) FORCE
$(call if_changed,modpost)
targets += $(output-symdump)
diff --git a/scripts/Makefile.vmlinux_o b/scripts/Makefile.vmlinux_o
new file mode 100644
index 000000000000..3c97a1564947
--- /dev/null
+++ b/scripts/Makefile.vmlinux_o
@@ -0,0 +1,87 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+PHONY := __default
+__default: vmlinux.o
+
+include include/config/auto.conf
+include $(srctree)/scripts/Kbuild.include
+
+# for objtool
+include $(srctree)/scripts/Makefile.lib
+
+# Generate a linker script to ensure correct ordering of initcalls for Clang LTO
+# ---------------------------------------------------------------------------
+
+quiet_cmd_gen_initcalls_lds = GEN $@
+ cmd_gen_initcalls_lds = \
+ $(PYTHON3) $(srctree)/scripts/jobserver-exec \
+ $(PERL) $(real-prereqs) > $@
+
+.tmp_initcalls.lds: $(srctree)/scripts/generate_initcall_order.pl \
+ $(KBUILD_VMLINUX_OBJS) $(KBUILD_VMLINUX_LIBS) FORCE
+ $(call if_changed,gen_initcalls_lds)
+
+targets := .tmp_initcalls.lds
+
+ifdef CONFIG_LTO_CLANG
+initcalls-lds := .tmp_initcalls.lds
+endif
+
+# objtool for vmlinux.o
+# ---------------------------------------------------------------------------
+#
+# For LTO and IBT, objtool doesn't run on individual translation units.
+# Run everything on vmlinux instead.
+
+objtool-enabled := $(or $(delay-objtool),$(CONFIG_NOINSTR_VALIDATION))
+
+# Reuse objtool_args defined in scripts/Makefile.lib if LTO or IBT is enabled.
+#
+# Add some more flags as needed.
+# --no-unreachable and --link might be added twice, but it is fine.
+#
+# Expand objtool_args to a simple variable to avoid circular reference.
+
+objtool_args := \
+ $(if $(delay-objtool),$(objtool_args)) \
+ $(if $(CONFIG_NOINSTR_VALIDATION), --noinstr) \
+ $(if $(CONFIG_GCOV_KERNEL), --no-unreachable) \
+ --link
+
+# Link of vmlinux.o used for section mismatch analysis
+# ---------------------------------------------------------------------------
+
+quiet_cmd_ld_vmlinux.o = LD $@
+ cmd_ld_vmlinux.o = \
+ $(LD) ${KBUILD_LDFLAGS} -r -o $@ \
+ $(addprefix -T , $(initcalls-lds)) \
+ --whole-archive $(KBUILD_VMLINUX_OBJS) --no-whole-archive \
+ --start-group $(KBUILD_VMLINUX_LIBS) --end-group \
+ $(cmd_objtool)
+
+define rule_ld_vmlinux.o
+ $(call cmd_and_savecmd,ld_vmlinux.o)
+ $(call cmd,gen_objtooldep)
+endef
+
+vmlinux.o: $(initcalls-lds) $(KBUILD_VMLINUX_OBJS) $(KBUILD_VMLINUX_LIBS) FORCE
+ $(call if_changed_rule,ld_vmlinux.o)
+
+targets += vmlinux.o
+
+# Add FORCE to the prequisites of a target to force it to be always rebuilt.
+# ---------------------------------------------------------------------------
+
+PHONY += FORCE
+FORCE:
+
+# Read all saved command lines and dependencies for the $(targets) we
+# may be building above, using $(if_changed{,_dep}). As an
+# optimization, we don't need to read them if the target does not
+# exist, we will rebuild anyway in that case.
+
+existing-targets := $(wildcard $(sort $(targets)))
+
+-include $(foreach f,$(existing-targets),$(dir $(f)).$(notdir $(f)).cmd)
+
+.PHONY: $(PHONY)
diff --git a/scripts/bloat-o-meter b/scripts/bloat-o-meter
index dcd8d8750b8b..4dd6a804ce41 100755
--- a/scripts/bloat-o-meter
+++ b/scripts/bloat-o-meter
@@ -36,6 +36,7 @@ def getsizes(file, format):
if name.startswith("__se_compat_sys"): continue
if name.startswith("__addressable_"): continue
if name == "linux_banner": continue
+ if name == "vermagic": continue
# statics and some other optimizations adds random .NUMBER
name = re_NUMBER.sub('', name)
sym[name] = sym.get(name, 0) + int(size, 16)
diff --git a/scripts/check-local-export b/scripts/check-local-export
new file mode 100755
index 000000000000..da745e2743b7
--- /dev/null
+++ b/scripts/check-local-export
@@ -0,0 +1,65 @@
+#!/usr/bin/env bash
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Copyright (C) 2022 Masahiro Yamada <masahiroy@kernel.org>
+#
+# Exit with error if a local exported symbol is found.
+# EXPORT_SYMBOL should be used for global symbols.
+
+set -e
+
+declare -A symbol_types
+declare -a export_symbols
+
+exit_code=0
+
+while read value type name
+do
+ # Skip the line if the number of fields is less than 3.
+ #
+ # case 1)
+ # For undefined symbols, the first field (value) is empty.
+ # The outout looks like this:
+ # " U _printk"
+ # It is unneeded to record undefined symbols.
+ #
+ # case 2)
+ # For Clang LTO, llvm-nm outputs a line with type 't' but empty name:
+ # "---------------- t"
+ if [[ -z ${name} ]]; then
+ continue
+ fi
+
+ # save (name, type) in the associative array
+ symbol_types[${name}]=${type}
+
+ # append the exported symbol to the array
+ if [[ ${name} == __ksymtab_* ]]; then
+ export_symbols+=(${name#__ksymtab_})
+ fi
+
+ # If there is no symbol in the object, ${NM} (both GNU nm and llvm-nm)
+ # shows 'no symbols' diagnostic (but exits with 0). It is harmless and
+ # hidden by '2>/dev/null'. However, it suppresses real error messages
+ # as well. Add a hand-crafted error message here.
+ #
+ # Use --quiet instead of 2>/dev/null when we upgrade the minimum version
+ # of binutils to 2.37, llvm to 13.0.0.
+ #
+ # Then, the following line will be really simple:
+ # done < <(${NM} --quiet ${1})
+done < <(${NM} ${1} 2>/dev/null || { echo "${0}: ${NM} failed" >&2; false; } )
+
+# Catch error in the process substitution
+wait $!
+
+for name in "${export_symbols[@]}"
+do
+ # nm(3) says "If lowercase, the symbol is usually local"
+ if [[ ${symbol_types[$name]} =~ [a-z] ]]; then
+ echo "$@: error: local symbol '${name}' was exported" >&2
+ exit_code=1
+ fi
+done
+
+exit ${exit_code}
diff --git a/scripts/decode_stacktrace.sh b/scripts/decode_stacktrace.sh
index 5fbad61fe490..7075e26ab2c4 100755
--- a/scripts/decode_stacktrace.sh
+++ b/scripts/decode_stacktrace.sh
@@ -45,8 +45,13 @@ else
fi
fi
-declare -A cache
-declare -A modcache
+declare aarray_support=true
+declare -A cache 2>/dev/null
+if [[ $? != 0 ]]; then
+ aarray_support=false
+else
+ declare -A modcache
+fi
find_module() {
if [[ -n $debuginfod ]] ; then
@@ -97,7 +102,7 @@ parse_symbol() {
if [[ $module == "" ]] ; then
local objfile=$vmlinux
- elif [[ "${modcache[$module]+isset}" == "isset" ]]; then
+ elif [[ $aarray_support == true && "${modcache[$module]+isset}" == "isset" ]]; then
local objfile=${modcache[$module]}
else
local objfile=$(find_module)
@@ -105,7 +110,9 @@ parse_symbol() {
echo "WARNING! Modules path isn't set, but is needed to parse this symbol" >&2
return
fi
- modcache[$module]=$objfile
+ if [[ $aarray_support == true ]]; then
+ modcache[$module]=$objfile
+ fi
fi
# Remove the englobing parenthesis
@@ -125,7 +132,7 @@ parse_symbol() {
# Use 'nm vmlinux' to figure out the base address of said symbol.
# It's actually faster to call it every time than to load it
# all into bash.
- if [[ "${cache[$module,$name]+isset}" == "isset" ]]; then
+ if [[ $aarray_support == true && "${cache[$module,$name]+isset}" == "isset" ]]; then
local base_addr=${cache[$module,$name]}
else
local base_addr=$(nm "$objfile" 2>/dev/null | awk '$3 == "'$name'" && ($2 == "t" || $2 == "T") {print $1; exit}')
@@ -133,7 +140,9 @@ parse_symbol() {
# address not found
return
fi
- cache[$module,$name]="$base_addr"
+ if [[ $aarray_support == true ]]; then
+ cache[$module,$name]="$base_addr"
+ fi
fi
# Let's start doing the math to get the exact address into the
# symbol. First, strip out the symbol total length.
@@ -149,11 +158,13 @@ parse_symbol() {
# Pass it to addr2line to get filename and line number
# Could get more than one result
- if [[ "${cache[$module,$address]+isset}" == "isset" ]]; then
+ if [[ $aarray_support == true && "${cache[$module,$address]+isset}" == "isset" ]]; then
local code=${cache[$module,$address]}
else
local code=$(${CROSS_COMPILE}addr2line -i -e "$objfile" "$address" 2>/dev/null)
- cache[$module,$address]=$code
+ if [[ $aarray_support == true ]]; then
+ cache[$module,$address]=$code
+ fi
fi
# addr2line doesn't return a proper error code if it fails, so
diff --git a/scripts/get_abi.pl b/scripts/get_abi.pl
index 1389db76cff3..0ffd5531242a 100755
--- a/scripts/get_abi.pl
+++ b/scripts/get_abi.pl
@@ -981,11 +981,11 @@ __END__
=head1 NAME
-abi_book.pl - parse the Linux ABI files and produce a ReST book.
+get_abi.pl - parse the Linux ABI files and produce a ReST book.
=head1 SYNOPSIS
-B<abi_book.pl> [--debug <level>] [--enable-lineno] [--man] [--help]
+B<get_abi.pl> [--debug <level>] [--enable-lineno] [--man] [--help]
[--(no-)rst-source] [--dir=<dir>] [--show-hints]
[--search-string <regex>]
<COMMAND> [<ARGUMENT>]
diff --git a/scripts/get_maintainer.pl b/scripts/get_maintainer.pl
index 6bd5221d37b8..ab123b498fd9 100755
--- a/scripts/get_maintainer.pl
+++ b/scripts/get_maintainer.pl
@@ -983,6 +983,7 @@ sub get_maintainers {
}
foreach my $email (@file_emails) {
+ $email = mailmap_email($email);
my ($name, $address) = parse_email($email);
my $tmp_email = format_email($name, $address, $email_usename);
diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c
index e6906f79833d..f18e6dfc68c5 100644
--- a/scripts/kallsyms.c
+++ b/scripts/kallsyms.c
@@ -70,7 +70,7 @@ static unsigned char best_table_len[256];
static void usage(void)
{
- fprintf(stderr, "Usage: kallsyms [--all-symbols] "
+ fprintf(stderr, "Usage: kallsyms [--all-symbols] [--absolute-percpu] "
"[--base-relative] < in.map > out.S\n");
exit(1);
}
diff --git a/scripts/kconfig/nconf.c b/scripts/kconfig/nconf.c
index 7b371bd7fb36..3ba8b1af390f 100644
--- a/scripts/kconfig/nconf.c
+++ b/scripts/kconfig/nconf.c
@@ -52,8 +52,8 @@ static const char nconf_global_help[] =
"\n"
"Menu navigation keys\n"
"----------------------------------------------------------------------\n"
-"Linewise up <Up>\n"
-"Linewise down <Down>\n"
+"Linewise up <Up> <k>\n"
+"Linewise down <Down> <j>\n"
"Pagewise up <Page Up>\n"
"Pagewise down <Page Down>\n"
"First entry <Home>\n"
@@ -1105,9 +1105,11 @@ static void conf(struct menu *menu)
break;
switch (res) {
case KEY_DOWN:
+ case 'j':
menu_driver(curses_menu, REQ_DOWN_ITEM);
break;
case KEY_UP:
+ case 'k':
menu_driver(curses_menu, REQ_UP_ITEM);
break;
case KEY_NPAGE:
@@ -1287,9 +1289,11 @@ static void conf_choice(struct menu *menu)
break;
switch (res) {
case KEY_DOWN:
+ case 'j':
menu_driver(curses_menu, REQ_DOWN_ITEM);
break;
case KEY_UP:
+ case 'k':
menu_driver(curses_menu, REQ_UP_ITEM);
break;
case KEY_NPAGE:
diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
index a7f6196c7e41..eecc1863e556 100755
--- a/scripts/link-vmlinux.sh
+++ b/scripts/link-vmlinux.sh
@@ -45,115 +45,6 @@ info()
printf " %-7s %s\n" "${1}" "${2}"
}
-# Generate a linker script to ensure correct ordering of initcalls.
-gen_initcalls()
-{
- info GEN .tmp_initcalls.lds
-
- ${PYTHON3} ${srctree}/scripts/jobserver-exec \
- ${PERL} ${srctree}/scripts/generate_initcall_order.pl \
- ${KBUILD_VMLINUX_OBJS} ${KBUILD_VMLINUX_LIBS} \
- > .tmp_initcalls.lds
-}
-
-# Link of vmlinux.o used for section mismatch analysis
-# ${1} output file
-modpost_link()
-{
- local objects
- local lds=""
-
- objects="--whole-archive \
- ${KBUILD_VMLINUX_OBJS} \
- --no-whole-archive \
- --start-group \
- ${KBUILD_VMLINUX_LIBS} \
- --end-group"
-
- if is_enabled CONFIG_LTO_CLANG; then
- gen_initcalls
- lds="-T .tmp_initcalls.lds"
-
- # This might take a while, so indicate that we're doing
- # an LTO link
- info LTO ${1}
- else
- info LD ${1}
- fi
-
- ${LD} ${KBUILD_LDFLAGS} -r -o ${1} ${lds} ${objects}
-}
-
-objtool_link()
-{
- local objtoolcmd;
- local objtoolopt;
-
- if ! is_enabled CONFIG_OBJTOOL; then
- return;
- fi
-
- if is_enabled CONFIG_LTO_CLANG || is_enabled CONFIG_X86_KERNEL_IBT; then
-
- # For LTO and IBT, objtool doesn't run on individual
- # translation units. Run everything on vmlinux instead.
-
- if is_enabled CONFIG_HAVE_JUMP_LABEL_HACK; then
- objtoolopt="${objtoolopt} --hacks=jump_label"
- fi
-
- if is_enabled CONFIG_HAVE_NOINSTR_HACK; then
- objtoolopt="${objtoolopt} --hacks=noinstr"
- fi
-
- if is_enabled CONFIG_X86_KERNEL_IBT; then
- objtoolopt="${objtoolopt} --ibt"
- fi
-
- if is_enabled CONFIG_FTRACE_MCOUNT_USE_OBJTOOL; then
- objtoolopt="${objtoolopt} --mcount"
- fi
-
- if is_enabled CONFIG_UNWINDER_ORC; then
- objtoolopt="${objtoolopt} --orc"
- fi
-
- if is_enabled CONFIG_RETPOLINE; then
- objtoolopt="${objtoolopt} --retpoline"
- fi
-
- if is_enabled CONFIG_SLS; then
- objtoolopt="${objtoolopt} --sls"
- fi
-
- if is_enabled CONFIG_STACK_VALIDATION; then
- objtoolopt="${objtoolopt} --stackval"
- fi
-
- if is_enabled CONFIG_HAVE_STATIC_CALL_INLINE; then
- objtoolopt="${objtoolopt} --static-call"
- fi
-
- objtoolopt="${objtoolopt} --uaccess"
- fi
-
- if is_enabled CONFIG_NOINSTR_VALIDATION; then
- objtoolopt="${objtoolopt} --noinstr"
- fi
-
- if [ -n "${objtoolopt}" ]; then
-
- if is_enabled CONFIG_GCOV_KERNEL; then
- objtoolopt="${objtoolopt} --no-unreachable"
- fi
-
- objtoolopt="${objtoolopt} --link"
-
- info OBJTOOL ${1}
- tools/objtool/objtool ${objtoolopt} ${1}
- fi
-}
-
# Link of vmlinux
# ${1} - output file
# ${2}, ${3}, ... - optional extra .o files
@@ -303,14 +194,9 @@ sorttable()
cleanup()
{
rm -f .btf.*
- rm -f .tmp_System.map
- rm -f .tmp_initcalls.lds
- rm -f .tmp_vmlinux*
rm -f System.map
rm -f vmlinux
rm -f vmlinux.map
- rm -f vmlinux.o
- rm -f .vmlinux.d
rm -f .vmlinux.objs
rm -f .vmlinux.export.c
}
@@ -341,12 +227,18 @@ fi;
${MAKE} -f "${srctree}/scripts/Makefile.build" obj=init need-builtin=1
#link vmlinux.o
-modpost_link vmlinux.o
-objtool_link vmlinux.o
+${MAKE} -f "${srctree}/scripts/Makefile.vmlinux_o"
-# Generate the list of objects in vmlinux
+# Generate the list of in-tree objects in vmlinux
+#
+# This is used to retrieve symbol versions generated by genksyms.
for f in ${KBUILD_VMLINUX_OBJS} ${KBUILD_VMLINUX_LIBS}; do
case ${f} in
+ *libgcc.a)
+ # Some architectures do '$(CC) --print-libgcc-file-name' to
+ # borrow libgcc.a from the toolchain.
+ # There is no EXPORT_SYMBOL in external objects. Ignore this.
+ ;;
*.a)
${AR} t ${f} ;;
*)
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
index 5258247d78ac..cbd6b0f48b4e 100644
--- a/scripts/mod/file2alias.c
+++ b/scripts/mod/file2alias.c
@@ -734,8 +734,6 @@ static int do_vio_entry(const char *filename, void *symval,
return 1;
}
-#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
-
static void do_input(char *alias,
kernel_ulong_t *arr, unsigned int min, unsigned int max)
{
@@ -1391,6 +1389,15 @@ static int do_mhi_entry(const char *filename, void *symval, char *alias)
return 1;
}
+/* Looks like: mhi_ep:S */
+static int do_mhi_ep_entry(const char *filename, void *symval, char *alias)
+{
+ DEF_FIELD_ADDR(symval, mhi_device_id, chan);
+ sprintf(alias, MHI_EP_DEVICE_MODALIAS_FMT, *chan);
+
+ return 1;
+}
+
/* Looks like: ishtp:{guid} */
static int do_ishtp_entry(const char *filename, void *symval, char *alias)
{
@@ -1519,6 +1526,7 @@ static const struct devtable devtable[] = {
{"tee", SIZE_tee_client_device_id, do_tee_entry},
{"wmi", SIZE_wmi_device_id, do_wmi_entry},
{"mhi", SIZE_mhi_device_id, do_mhi_entry},
+ {"mhi_ep", SIZE_mhi_device_id, do_mhi_ep_entry},
{"auxiliary", SIZE_auxiliary_device_id, do_auxiliary_entry},
{"ssam", SIZE_ssam_device_id, do_ssam_entry},
{"dfl", SIZE_dfl_device_id, do_dfl_entry},
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index 42e949cbc255..29d5a841e215 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -13,6 +13,7 @@
#define _GNU_SOURCE
#include <elf.h>
+#include <fnmatch.h>
#include <stdio.h>
#include <ctype.h>
#include <string.h>
@@ -172,11 +173,11 @@ static struct module *find_module(const char *modname)
return NULL;
}
-static struct module *new_module(const char *modname)
+static struct module *new_module(const char *name, size_t namelen)
{
struct module *mod;
- mod = NOFAIL(malloc(sizeof(*mod) + strlen(modname) + 1));
+ mod = NOFAIL(malloc(sizeof(*mod) + namelen + 1));
memset(mod, 0, sizeof(*mod));
INIT_LIST_HEAD(&mod->exported_symbols);
@@ -184,8 +185,9 @@ static struct module *new_module(const char *modname)
INIT_LIST_HEAD(&mod->missing_namespaces);
INIT_LIST_HEAD(&mod->imported_namespaces);
- strcpy(mod->name, modname);
- mod->is_vmlinux = (strcmp(modname, "vmlinux") == 0);
+ memcpy(mod->name, name, namelen);
+ mod->name[namelen] = '\0';
+ mod->is_vmlinux = (strcmp(mod->name, "vmlinux") == 0);
/*
* Set mod->is_gpl_compatible to true by default. If MODULE_LICENSE()
@@ -212,7 +214,6 @@ struct symbol {
unsigned int crc;
bool crc_valid;
bool weak;
- bool is_static; /* true if symbol is not global */
bool is_gpl_only; /* exported by EXPORT_SYMBOL_GPL */
char name[];
};
@@ -242,7 +243,7 @@ static struct symbol *alloc_symbol(const char *name)
memset(s, 0, sizeof(*s));
strcpy(s->name, name);
- s->is_static = true;
+
return s;
}
@@ -710,29 +711,6 @@ static char *get_modinfo(struct elf_info *info, const char *tag)
return get_next_modinfo(info, tag, NULL);
}
-/**
- * Test if string s ends in string sub
- * return 0 if match
- **/
-static int strrcmp(const char *s, const char *sub)
-{
- int slen, sublen;
-
- if (!s || !sub)
- return 1;
-
- slen = strlen(s);
- sublen = strlen(sub);
-
- if ((slen == 0) || (sublen == 0))
- return 1;
-
- if (sublen > slen)
- return 1;
-
- return memcmp(s + slen - sublen, sub, sublen);
-}
-
static const char *sym_name(struct elf_info *elf, Elf_Sym *sym)
{
if (sym)
@@ -741,48 +719,22 @@ static const char *sym_name(struct elf_info *elf, Elf_Sym *sym)
return "(unknown)";
}
-/* The pattern is an array of simple patterns.
- * "foo" will match an exact string equal to "foo"
- * "*foo" will match a string that ends with "foo"
- * "foo*" will match a string that begins with "foo"
- * "*foo*" will match a string that contains "foo"
+/*
+ * Check whether the 'string' argument matches one of the 'patterns',
+ * an array of shell wildcard patterns (glob).
+ *
+ * Return true is there is a match.
*/
-static int match(const char *sym, const char * const pat[])
+static bool match(const char *string, const char *const patterns[])
{
- const char *p;
- while (*pat) {
- const char *endp;
-
- p = *pat++;
- endp = p + strlen(p) - 1;
+ const char *pattern;
- /* "*foo*" */
- if (*p == '*' && *endp == '*') {
- char *bare = NOFAIL(strndup(p + 1, strlen(p) - 2));
- char *here = strstr(sym, bare);
-
- free(bare);
- if (here != NULL)
- return 1;
- }
- /* "*foo" */
- else if (*p == '*') {
- if (strrcmp(sym, p + 1) == 0)
- return 1;
- }
- /* "foo*" */
- else if (*endp == '*') {
- if (strncmp(sym, p, strlen(p) - 1) == 0)
- return 1;
- }
- /* no wildcards */
- else {
- if (strcmp(p, sym) == 0)
- return 1;
- }
+ while ((pattern = *patterns++)) {
+ if (!fnmatch(pattern, string, 0))
+ return true;
}
- /* no match */
- return 0;
+
+ return false;
}
/* sections that we do not want to do full section mismatch check on */
@@ -1049,8 +1001,6 @@ static const struct sectioncheck *section_mismatch(
const char *fromsec, const char *tosec)
{
int i;
- int elems = sizeof(sectioncheck) / sizeof(struct sectioncheck);
- const struct sectioncheck *check = &sectioncheck[0];
/*
* The target section could be the SHT_NUL section when we're
@@ -1061,14 +1011,15 @@ static const struct sectioncheck *section_mismatch(
if (*tosec == '\0')
return NULL;
- for (i = 0; i < elems; i++) {
+ for (i = 0; i < ARRAY_SIZE(sectioncheck); i++) {
+ const struct sectioncheck *check = &sectioncheck[i];
+
if (match(fromsec, check->fromsec)) {
if (check->bad_tosec[0] && match(tosec, check->bad_tosec))
return check;
if (check->good_tosec[0] && !match(tosec, check->good_tosec))
return check;
}
- check++;
}
return NULL;
}
@@ -1180,7 +1131,8 @@ static int secref_whitelist(const struct sectioncheck *mismatch,
static inline int is_arm_mapping_symbol(const char *str)
{
- return str[0] == '$' && strchr("axtd", str[1])
+ return str[0] == '$' &&
+ (str[1] == 'a' || str[1] == 'd' || str[1] == 't' || str[1] == 'x')
&& (str[2] == '\0' || str[2] == '.');
}
@@ -1270,13 +1222,9 @@ static Elf_Sym *find_elf_symbol2(struct elf_info *elf, Elf_Addr addr,
continue;
if (!is_valid_name(elf, sym))
continue;
- if (sym->st_value <= addr) {
- if ((addr - sym->st_value) < distance) {
- distance = addr - sym->st_value;
- near = sym;
- } else if ((addr - sym->st_value) == distance) {
- near = sym;
- }
+ if (sym->st_value <= addr && addr - sym->st_value <= distance) {
+ distance = addr - sym->st_value;
+ near = sym;
}
}
return near;
@@ -1883,8 +1831,7 @@ static void section_rel(const char *modname, struct elf_info *elf,
* to find all references to a section that reference a section that will
* be discarded and warns about it.
**/
-static void check_sec_ref(struct module *mod, const char *modname,
- struct elf_info *elf)
+static void check_sec_ref(const char *modname, struct elf_info *elf)
{
int i;
Elf_Shdr *sechdrs = elf->sechdrs;
@@ -1906,12 +1853,8 @@ static char *remove_dot(char *s)
if (n && s[n]) {
size_t m = strspn(s + n + 1, "0123456789");
- if (m && (s[n + m] == '.' || s[n + m] == 0))
+ if (m && (s[n + m + 1] == '.' || s[n + m + 1] == 0))
s[n] = 0;
-
- /* strip trailing .prelink */
- if (strends(s, ".prelink"))
- s[strlen(s) - 8] = '\0';
}
return s;
}
@@ -2027,19 +1970,14 @@ static void read_symbols(const char *modname)
if (!parse_elf(&info, modname))
return;
- {
- char *tmp;
-
- /* strip trailing .o */
- tmp = NOFAIL(strdup(modname));
- tmp[strlen(tmp) - 2] = '\0';
- /* strip trailing .prelink */
- if (strends(tmp, ".prelink"))
- tmp[strlen(tmp) - 8] = '\0';
- mod = new_module(tmp);
- free(tmp);
+ if (!strends(modname, ".o")) {
+ error("%s: filename must be suffixed with .o\n", modname);
+ return;
}
+ /* strip trailing .o */
+ mod = new_module(modname, strlen(modname) - strlen(".o"));
+
if (!mod->is_vmlinux) {
license = get_modinfo(&info, "license");
if (!license)
@@ -2076,21 +2014,7 @@ static void read_symbols(const char *modname)
sym_get_data(&info, sym));
}
- // check for static EXPORT_SYMBOL_* functions && global vars
- for (sym = info.symtab_start; sym < info.symtab_stop; sym++) {
- unsigned char bind = ELF_ST_BIND(sym->st_info);
-
- if (bind == STB_GLOBAL || bind == STB_WEAK) {
- struct symbol *s =
- find_symbol(remove_dot(info.strtab +
- sym->st_name));
-
- if (s)
- s->is_static = false;
- }
- }
-
- check_sec_ref(mod, modname, &info);
+ check_sec_ref(modname, &info);
if (!mod->is_vmlinux) {
version = get_modinfo(&info, "version");
@@ -2515,11 +2439,10 @@ static void read_dump(const char *fname)
mod = find_module(modname);
if (!mod) {
- mod = new_module(modname);
+ mod = new_module(modname, strlen(modname));
mod->from_dump = true;
}
s = sym_add_exported(symname, mod, gpl_only);
- s->is_static = false;
sym_set_crc(s, crc);
sym_update_namespace(symname, namespace);
}
@@ -2584,7 +2507,6 @@ int main(int argc, char **argv)
char *missing_namespace_deps = NULL;
char *dump_write = NULL, *files_source = NULL;
int opt;
- int n;
LIST_HEAD(dump_lists);
struct dump_list *dl, *dl2;
@@ -2660,15 +2582,6 @@ int main(int argc, char **argv)
if (sec_mismatch_count && !sec_mismatch_warn_only)
error("Section mismatches detected.\n"
"Set CONFIG_SECTION_MISMATCH_WARN_ONLY=y to allow them.\n");
- for (n = 0; n < SYMBOL_HASH_SIZE; n++) {
- struct symbol *s;
-
- for (s = symbolhash[n]; s; s = s->next) {
- if (s->is_static)
- error("\"%s\" [%s] is a static EXPORT_SYMBOL\n",
- s->name, s->module->name);
- }
- }
if (nr_unresolved > MAX_UNRESOLVED_REPORTS)
warn("suppressed %u unresolved symbol warnings because there were too many)\n",
diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h
index d9daeff07b83..044bdfb894b7 100644
--- a/scripts/mod/modpost.h
+++ b/scripts/mod/modpost.h
@@ -97,6 +97,9 @@ static inline void __endian(const void *src, void *dest, unsigned int size)
#endif
#define NOFAIL(ptr) do_nofail((ptr), #ptr)
+
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
+
void *do_nofail(void *ptr, const char *expr);
struct buffer {
diff --git a/scripts/objdiff b/scripts/objdiff
index 72b0b63c3fe1..0685bc3ce3df 100755
--- a/scripts/objdiff
+++ b/scripts/objdiff
@@ -20,10 +20,10 @@
# $ ./scripts/objdiff diff COMMIT_A COMMIT_B
# $
-# And to clean up (everything is in .tmp_objdiff/*)
+# And to clean up (everything is in .objdiff/*)
# $ ./scripts/objdiff clean all
#
-# Note: 'make mrproper' will also remove .tmp_objdiff
+# Note: 'make mrproper' will also remove .objdiff
SRCTREE=$(cd $(git rev-parse --show-toplevel 2>/dev/null); pwd)
@@ -32,7 +32,7 @@ if [ -z "$SRCTREE" ]; then
exit 1
fi
-TMPD=$SRCTREE/.tmp_objdiff
+TMPD=$SRCTREE/.objdiff
usage() {
echo >&2 "Usage: $0 <command> <args>"
diff --git a/scripts/sorttable.c b/scripts/sorttable.c
index d00504c5f530..fba40e99f354 100644
--- a/scripts/sorttable.c
+++ b/scripts/sorttable.c
@@ -60,6 +60,10 @@
#define EM_RISCV 243
#endif
+#ifndef EM_LOONGARCH
+#define EM_LOONGARCH 258
+#endif
+
static uint32_t (*r)(const uint32_t *);
static uint16_t (*r2)(const uint16_t *);
static uint64_t (*r8)(const uint64_t *);
@@ -313,6 +317,7 @@ static int do_file(char const *const fname, void *addr)
case EM_ARCOMPACT:
case EM_ARCV2:
case EM_ARM:
+ case EM_LOONGARCH:
case EM_MICROBLAZE:
case EM_MIPS:
case EM_XTENSA:
diff --git a/scripts/spdxcheck-test.sh b/scripts/spdxcheck-test.sh
index cb76324756bd..9f6d1a74da6e 100644
--- a/scripts/spdxcheck-test.sh
+++ b/scripts/spdxcheck-test.sh
@@ -1,7 +1,7 @@
#!/bin/sh
# run check on a text and a binary file
-for FILE in Makefile Documentation/logo.gif; do
+for FILE in Makefile Documentation/images/logo.gif; do
python3 scripts/spdxcheck.py $FILE
python3 scripts/spdxcheck.py - < $FILE
done
diff --git a/scripts/spdxcheck.py b/scripts/spdxcheck.py
index f3be8ed54f6d..18cb9f5b3d3d 100755
--- a/scripts/spdxcheck.py
+++ b/scripts/spdxcheck.py
@@ -6,6 +6,7 @@ from argparse import ArgumentParser
from ply import lex, yacc
import locale
import traceback
+import fnmatch
import sys
import git
import re
@@ -28,6 +29,21 @@ class SPDXdata(object):
self.licenses = [ ]
self.exceptions = { }
+class dirinfo(object):
+ def __init__(self):
+ self.missing = 0
+ self.total = 0
+ self.files = []
+
+ def update(self, fname, basedir, miss):
+ self.total += 1
+ self.missing += miss
+ if miss:
+ fname = './' + fname
+ bdir = os.path.dirname(fname)
+ if bdir == basedir.rstrip('/'):
+ self.files.append(fname)
+
# Read the spdx data from the LICENSES directory
def read_spdxdata(repo):
@@ -91,11 +107,25 @@ class id_parser(object):
self.parser = yacc.yacc(module = self, write_tables = False, debug = False)
self.lines_checked = 0
self.checked = 0
+ self.excluded = 0
self.spdx_valid = 0
self.spdx_errors = 0
+ self.spdx_dirs = {}
+ self.dirdepth = -1
+ self.basedir = '.'
self.curline = 0
self.deepest = 0
+ def set_dirinfo(self, basedir, dirdepth):
+ if dirdepth >= 0:
+ self.basedir = basedir
+ bdir = basedir.lstrip('./').rstrip('/')
+ if bdir != '':
+ parts = bdir.split('/')
+ else:
+ parts = []
+ self.dirdepth = dirdepth + len(parts)
+
# Validate License and Exception IDs
def validate(self, tok):
id = tok.value.upper()
@@ -167,6 +197,7 @@ class id_parser(object):
def parse_lines(self, fd, maxlines, fname):
self.checked += 1
self.curline = 0
+ fail = 1
try:
for line in fd:
line = line.decode(locale.getpreferredencoding(False), errors='ignore')
@@ -192,6 +223,7 @@ class id_parser(object):
# Should we check for more SPDX ids in the same file and
# complain if there are any?
#
+ fail = 0
break
except ParserException as pe:
@@ -203,28 +235,102 @@ class id_parser(object):
sys.stdout.write('%s: %d:0 %s\n' %(fname, self.curline, pe.txt))
self.spdx_errors += 1
-def scan_git_tree(tree):
+ if fname == '-':
+ return
+
+ base = os.path.dirname(fname)
+ if self.dirdepth > 0:
+ parts = base.split('/')
+ i = 0
+ base = '.'
+ while i < self.dirdepth and i < len(parts) and len(parts[i]):
+ base += '/' + parts[i]
+ i += 1
+ elif self.dirdepth == 0:
+ base = self.basedir
+ else:
+ base = './' + base.rstrip('/')
+ base += '/'
+
+ di = self.spdx_dirs.get(base, dirinfo())
+ di.update(fname, base, fail)
+ self.spdx_dirs[base] = di
+
+class pattern(object):
+ def __init__(self, line):
+ self.pattern = line
+ self.match = self.match_file
+ if line == '.*':
+ self.match = self.match_dot
+ elif line.endswith('/'):
+ self.pattern = line[:-1]
+ self.match = self.match_dir
+ elif line.startswith('/'):
+ self.pattern = line[1:]
+ self.match = self.match_fn
+
+ def match_dot(self, fpath):
+ return os.path.basename(fpath).startswith('.')
+
+ def match_file(self, fpath):
+ return os.path.basename(fpath) == self.pattern
+
+ def match_fn(self, fpath):
+ return fnmatch.fnmatchcase(fpath, self.pattern)
+
+ def match_dir(self, fpath):
+ if self.match_fn(os.path.dirname(fpath)):
+ return True
+ return fpath.startswith(self.pattern)
+
+def exclude_file(fpath):
+ for rule in exclude_rules:
+ if rule.match(fpath):
+ return True
+ return False
+
+def scan_git_tree(tree, basedir, dirdepth):
+ parser.set_dirinfo(basedir, dirdepth)
for el in tree.traverse():
- # Exclude stuff which would make pointless noise
- # FIXME: Put this somewhere more sensible
- if el.path.startswith("LICENSES"):
- continue
- if el.path.find("license-rules.rst") >= 0:
- continue
if not os.path.isfile(el.path):
continue
+ if exclude_file(el.path):
+ parser.excluded += 1
+ continue
with open(el.path, 'rb') as fd:
parser.parse_lines(fd, args.maxlines, el.path)
-def scan_git_subtree(tree, path):
+def scan_git_subtree(tree, path, dirdepth):
for p in path.strip('/').split('/'):
tree = tree[p]
- scan_git_tree(tree)
+ scan_git_tree(tree, path.strip('/'), dirdepth)
+
+def read_exclude_file(fname):
+ rules = []
+ if not fname:
+ return rules
+ with open(fname) as fd:
+ for line in fd:
+ line = line.strip()
+ if line.startswith('#'):
+ continue
+ if not len(line):
+ continue
+ rules.append(pattern(line))
+ return rules
if __name__ == '__main__':
ap = ArgumentParser(description='SPDX expression checker')
ap.add_argument('path', nargs='*', help='Check path or file. If not given full git tree scan. For stdin use "-"')
+ ap.add_argument('-d', '--dirs', action='store_true',
+ help='Show [sub]directory statistics.')
+ ap.add_argument('-D', '--depth', type=int, default=-1,
+ help='Directory depth for -d statistics. Default: unlimited')
+ ap.add_argument('-e', '--exclude',
+ help='File containing file patterns to exclude. Default: scripts/spdxexclude')
+ ap.add_argument('-f', '--files', action='store_true',
+ help='Show files without SPDX.')
ap.add_argument('-m', '--maxlines', type=int, default=15,
help='Maximum number of lines to scan in a file. Default 15')
ap.add_argument('-v', '--verbose', action='store_true', help='Verbose statistics output')
@@ -259,6 +365,15 @@ if __name__ == '__main__':
sys.exit(1)
try:
+ fname = args.exclude
+ if not fname:
+ fname = os.path.join(os.path.dirname(__file__), 'spdxexclude')
+ exclude_rules = read_exclude_file(fname)
+ except Exception as ex:
+ sys.stderr.write('FAIL: Reading exclude file %s: %s\n' %(fname, ex))
+ sys.exit(1)
+
+ try:
if len(args.path) and args.path[0] == '-':
stdin = os.fdopen(sys.stdin.fileno(), 'rb')
parser.parse_lines(stdin, args.maxlines, '-')
@@ -268,13 +383,21 @@ if __name__ == '__main__':
if os.path.isfile(p):
parser.parse_lines(open(p, 'rb'), args.maxlines, p)
elif os.path.isdir(p):
- scan_git_subtree(repo.head.reference.commit.tree, p)
+ scan_git_subtree(repo.head.reference.commit.tree, p,
+ args.depth)
else:
sys.stderr.write('path %s does not exist\n' %p)
sys.exit(1)
else:
# Full git tree scan
- scan_git_tree(repo.head.commit.tree)
+ scan_git_tree(repo.head.commit.tree, '.', args.depth)
+
+ ndirs = len(parser.spdx_dirs)
+ dirsok = 0
+ if ndirs:
+ for di in parser.spdx_dirs.values():
+ if not di.missing:
+ dirsok += 1
if args.verbose:
sys.stderr.write('\n')
@@ -283,10 +406,38 @@ if __name__ == '__main__':
sys.stderr.write('License IDs %12d\n' %len(spdx.licenses))
sys.stderr.write('Exception IDs %12d\n' %len(spdx.exceptions))
sys.stderr.write('\n')
+ sys.stderr.write('Files excluded: %12d\n' %parser.excluded)
sys.stderr.write('Files checked: %12d\n' %parser.checked)
sys.stderr.write('Lines checked: %12d\n' %parser.lines_checked)
- sys.stderr.write('Files with SPDX: %12d\n' %parser.spdx_valid)
+ if parser.checked:
+ pc = int(100 * parser.spdx_valid / parser.checked)
+ sys.stderr.write('Files with SPDX: %12d %3d%%\n' %(parser.spdx_valid, pc))
sys.stderr.write('Files with errors: %12d\n' %parser.spdx_errors)
+ if ndirs:
+ sys.stderr.write('\n')
+ sys.stderr.write('Directories accounted: %8d\n' %ndirs)
+ pc = int(100 * dirsok / ndirs)
+ sys.stderr.write('Directories complete: %8d %3d%%\n' %(dirsok, pc))
+
+ if ndirs and ndirs != dirsok and args.dirs:
+ if args.verbose:
+ sys.stderr.write('\n')
+ sys.stderr.write('Incomplete directories: SPDX in Files\n')
+ for f in sorted(parser.spdx_dirs.keys()):
+ di = parser.spdx_dirs[f]
+ if di.missing:
+ valid = di.total - di.missing
+ pc = int(100 * valid / di.total)
+ sys.stderr.write(' %-80s: %5d of %5d %3d%%\n' %(f, valid, di.total, pc))
+
+ if ndirs and ndirs != dirsok and args.files:
+ if args.verbose or args.dirs:
+ sys.stderr.write('\n')
+ sys.stderr.write('Files without SPDX:\n')
+ for f in sorted(parser.spdx_dirs.keys()):
+ di = parser.spdx_dirs[f]
+ for f in sorted(di.files):
+ sys.stderr.write(' %s\n' %f)
sys.exit(0)
diff --git a/scripts/spdxexclude b/scripts/spdxexclude
new file mode 100644
index 000000000000..81bdb13ed789
--- /dev/null
+++ b/scripts/spdxexclude
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Patterns for excluding files and directories
+
+# Ignore the license directory and the licensing documentation which would
+# create lots of noise for no value
+LICENSES/
+license-rules.rst
+
+# Ignore config files and snippets. The majority is generated
+# by the Kconfig tools
+kernel/configs/
+arch/*/configs/
+
+# Other files without copyrightable content
+/CREDITS
+/MAINTAINERS
+/README
diff --git a/scripts/subarch.include b/scripts/subarch.include
index 776849a3c500..4bd327d0ae42 100644
--- a/scripts/subarch.include
+++ b/scripts/subarch.include
@@ -10,4 +10,4 @@ SUBARCH := $(shell uname -m | sed -e s/i.86/x86/ -e s/x86_64/x86/ \
-e s/s390x/s390/ \
-e s/ppc.*/powerpc/ -e s/mips.*/mips/ \
-e s/sh[234].*/sh/ -e s/aarch64.*/arm64/ \
- -e s/riscv.*/riscv/)
+ -e s/riscv.*/riscv/ -e s/loongarch.*/loongarch/)
diff --git a/scripts/tags.sh b/scripts/tags.sh
index 16d475b3e203..01fab3d4f90b 100755
--- a/scripts/tags.sh
+++ b/scripts/tags.sh
@@ -95,10 +95,13 @@ all_sources()
all_compiled_sources()
{
- realpath -es $([ -z "$KBUILD_ABS_SRCTREE" ] && echo --relative-to=.) \
- include/generated/autoconf.h $(find $ignore -name "*.cmd" -exec \
- grep -Poh '(?(?=^source_.* \K).*|(?=^ \K\S).*(?= \\))' {} \+ |
- awk '!a[$0]++') | sort -u
+ {
+ echo include/generated/autoconf.h
+ find $ignore -name "*.cmd" -exec \
+ grep -Poh '(?(?=^source_.* \K).*|(?=^ \K\S).*(?= \\))' {} \+ |
+ awk '!a[$0]++'
+ } | xargs realpath -es $([ -z "$KBUILD_ABS_SRCTREE" ] && echo --relative-to=.) |
+ sort -u
}
all_target_sources()
diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c
index 9e61014073cc..4b58526450d4 100644
--- a/security/smack/smackfs.c
+++ b/security/smack/smackfs.c
@@ -23,6 +23,7 @@
#include <linux/ctype.h>
#include <linux/audit.h>
#include <linux/magic.h>
+#include <linux/mount.h>
#include <linux/fs_context.h>
#include "smack.h"
diff --git a/sound/arm/pxa2xx-ac97-lib.c b/sound/arm/pxa2xx-ac97-lib.c
index 58274b4a1f09..e55c0421718b 100644
--- a/sound/arm/pxa2xx-ac97-lib.c
+++ b/sound/arm/pxa2xx-ac97-lib.c
@@ -17,12 +17,13 @@
#include <linux/io.h>
#include <linux/gpio.h>
#include <linux/of_gpio.h>
+#include <linux/soc/pxa/cpu.h>
#include <sound/pxa2xx-lib.h>
-#include <mach/irqs.h>
-#include <mach/regs-ac97.h>
-#include <mach/audio.h>
+#include <linux/platform_data/asoc-pxa.h>
+
+#include "pxa2xx-ac97-regs.h"
static DEFINE_MUTEX(car_mutex);
static DECLARE_WAIT_QUEUE_HEAD(gsr_wq);
@@ -30,6 +31,7 @@ static volatile long gsr_bits;
static struct clk *ac97_clk;
static struct clk *ac97conf_clk;
static int reset_gpio;
+static void __iomem *ac97_reg_base;
extern void pxa27x_configure_ac97reset(int reset_gpio, bool to_gpio);
@@ -46,7 +48,7 @@ extern void pxa27x_configure_ac97reset(int reset_gpio, bool to_gpio);
int pxa2xx_ac97_read(int slot, unsigned short reg)
{
int val = -ENODEV;
- volatile u32 *reg_addr;
+ u32 __iomem *reg_addr;
if (slot > 0)
return -ENODEV;
@@ -55,31 +57,33 @@ int pxa2xx_ac97_read(int slot, unsigned short reg)
/* set up primary or secondary codec space */
if (cpu_is_pxa25x() && reg == AC97_GPIO_STATUS)
- reg_addr = slot ? &SMC_REG_BASE : &PMC_REG_BASE;
+ reg_addr = ac97_reg_base +
+ (slot ? SMC_REG_BASE : PMC_REG_BASE);
else
- reg_addr = slot ? &SAC_REG_BASE : &PAC_REG_BASE;
+ reg_addr = ac97_reg_base +
+ (slot ? SAC_REG_BASE : PAC_REG_BASE);
reg_addr += (reg >> 1);
/* start read access across the ac97 link */
- GSR = GSR_CDONE | GSR_SDONE;
+ writel(GSR_CDONE | GSR_SDONE, ac97_reg_base + GSR);
gsr_bits = 0;
- val = (*reg_addr & 0xffff);
+ val = (readl(reg_addr) & 0xffff);
if (reg == AC97_GPIO_STATUS)
goto out;
- if (wait_event_timeout(gsr_wq, (GSR | gsr_bits) & GSR_SDONE, 1) <= 0 &&
- !((GSR | gsr_bits) & GSR_SDONE)) {
+ if (wait_event_timeout(gsr_wq, (readl(ac97_reg_base + GSR) | gsr_bits) & GSR_SDONE, 1) <= 0 &&
+ !((readl(ac97_reg_base + GSR) | gsr_bits) & GSR_SDONE)) {
printk(KERN_ERR "%s: read error (ac97_reg=%d GSR=%#lx)\n",
- __func__, reg, GSR | gsr_bits);
+ __func__, reg, readl(ac97_reg_base + GSR) | gsr_bits);
val = -ETIMEDOUT;
goto out;
}
/* valid data now */
- GSR = GSR_CDONE | GSR_SDONE;
+ writel(GSR_CDONE | GSR_SDONE, ac97_reg_base + GSR);
gsr_bits = 0;
- val = (*reg_addr & 0xffff);
+ val = (readl(reg_addr) & 0xffff);
/* but we've just started another cycle... */
- wait_event_timeout(gsr_wq, (GSR | gsr_bits) & GSR_SDONE, 1);
+ wait_event_timeout(gsr_wq, (readl(ac97_reg_base + GSR) | gsr_bits) & GSR_SDONE, 1);
out: mutex_unlock(&car_mutex);
return val;
@@ -88,25 +92,27 @@ EXPORT_SYMBOL_GPL(pxa2xx_ac97_read);
int pxa2xx_ac97_write(int slot, unsigned short reg, unsigned short val)
{
- volatile u32 *reg_addr;
+ u32 __iomem *reg_addr;
int ret = 0;
mutex_lock(&car_mutex);
/* set up primary or secondary codec space */
if (cpu_is_pxa25x() && reg == AC97_GPIO_STATUS)
- reg_addr = slot ? &SMC_REG_BASE : &PMC_REG_BASE;
+ reg_addr = ac97_reg_base +
+ (slot ? SMC_REG_BASE : PMC_REG_BASE);
else
- reg_addr = slot ? &SAC_REG_BASE : &PAC_REG_BASE;
+ reg_addr = ac97_reg_base +
+ (slot ? SAC_REG_BASE : PAC_REG_BASE);
reg_addr += (reg >> 1);
- GSR = GSR_CDONE | GSR_SDONE;
+ writel(GSR_CDONE | GSR_SDONE, ac97_reg_base + GSR);
gsr_bits = 0;
- *reg_addr = val;
- if (wait_event_timeout(gsr_wq, (GSR | gsr_bits) & GSR_CDONE, 1) <= 0 &&
- !((GSR | gsr_bits) & GSR_CDONE)) {
+ writel(val, reg_addr);
+ if (wait_event_timeout(gsr_wq, (readl(ac97_reg_base + GSR) | gsr_bits) & GSR_CDONE, 1) <= 0 &&
+ !((readl(ac97_reg_base + GSR) | gsr_bits) & GSR_CDONE)) {
printk(KERN_ERR "%s: write error (ac97_reg=%d GSR=%#lx)\n",
- __func__, reg, GSR | gsr_bits);
+ __func__, reg, readl(ac97_reg_base + GSR) | gsr_bits);
ret = -EIO;
}
@@ -120,17 +126,17 @@ static inline void pxa_ac97_warm_pxa25x(void)
{
gsr_bits = 0;
- GCR |= GCR_WARM_RST;
+ writel(readl(ac97_reg_base + GCR) | (GCR_WARM_RST), ac97_reg_base + GCR);
}
static inline void pxa_ac97_cold_pxa25x(void)
{
- GCR &= GCR_COLD_RST; /* clear everything but nCRST */
- GCR &= ~GCR_COLD_RST; /* then assert nCRST */
+ writel(readl(ac97_reg_base + GCR) & ( GCR_COLD_RST), ac97_reg_base + GCR); /* clear everything but nCRST */
+ writel(readl(ac97_reg_base + GCR) & (~GCR_COLD_RST), ac97_reg_base + GCR); /* then assert nCRST */
gsr_bits = 0;
- GCR = GCR_COLD_RST;
+ writel(GCR_COLD_RST, ac97_reg_base + GCR);
}
#endif
@@ -142,15 +148,15 @@ static inline void pxa_ac97_warm_pxa27x(void)
/* warm reset broken on Bulverde, so manually keep AC97 reset high */
pxa27x_configure_ac97reset(reset_gpio, true);
udelay(10);
- GCR |= GCR_WARM_RST;
+ writel(readl(ac97_reg_base + GCR) | (GCR_WARM_RST), ac97_reg_base + GCR);
pxa27x_configure_ac97reset(reset_gpio, false);
udelay(500);
}
static inline void pxa_ac97_cold_pxa27x(void)
{
- GCR &= GCR_COLD_RST; /* clear everything but nCRST */
- GCR &= ~GCR_COLD_RST; /* then assert nCRST */
+ writel(readl(ac97_reg_base + GCR) & ( GCR_COLD_RST), ac97_reg_base + GCR); /* clear everything but nCRST */
+ writel(readl(ac97_reg_base + GCR) & (~GCR_COLD_RST), ac97_reg_base + GCR); /* then assert nCRST */
gsr_bits = 0;
@@ -158,7 +164,7 @@ static inline void pxa_ac97_cold_pxa27x(void)
clk_prepare_enable(ac97conf_clk);
udelay(5);
clk_disable_unprepare(ac97conf_clk);
- GCR = GCR_COLD_RST | GCR_WARM_RST;
+ writel(GCR_COLD_RST | GCR_WARM_RST, ac97_reg_base + GCR);
}
#endif
@@ -168,26 +174,26 @@ static inline void pxa_ac97_warm_pxa3xx(void)
gsr_bits = 0;
/* Can't use interrupts */
- GCR |= GCR_WARM_RST;
+ writel(readl(ac97_reg_base + GCR) | (GCR_WARM_RST), ac97_reg_base + GCR);
}
static inline void pxa_ac97_cold_pxa3xx(void)
{
/* Hold CLKBPB for 100us */
- GCR = 0;
- GCR = GCR_CLKBPB;
+ writel(0, ac97_reg_base + GCR);
+ writel(GCR_CLKBPB, ac97_reg_base + GCR);
udelay(100);
- GCR = 0;
+ writel(0, ac97_reg_base + GCR);
- GCR &= GCR_COLD_RST; /* clear everything but nCRST */
- GCR &= ~GCR_COLD_RST; /* then assert nCRST */
+ writel(readl(ac97_reg_base + GCR) & ( GCR_COLD_RST), ac97_reg_base + GCR); /* clear everything but nCRST */
+ writel(readl(ac97_reg_base + GCR) & (~GCR_COLD_RST), ac97_reg_base + GCR); /* then assert nCRST */
gsr_bits = 0;
/* Can't use interrupts on PXA3xx */
- GCR &= ~(GCR_PRIRDY_IEN|GCR_SECRDY_IEN);
+ writel(readl(ac97_reg_base + GCR) & (~(GCR_PRIRDY_IEN|GCR_SECRDY_IEN)), ac97_reg_base + GCR);
- GCR = GCR_WARM_RST | GCR_COLD_RST;
+ writel(GCR_WARM_RST | GCR_COLD_RST, ac97_reg_base + GCR);
}
#endif
@@ -213,10 +219,10 @@ bool pxa2xx_ac97_try_warm_reset(void)
#endif
snd_BUG();
- while (!((GSR | gsr_bits) & (GSR_PCR | GSR_SCR)) && timeout--)
+ while (!((readl(ac97_reg_base + GSR) | gsr_bits) & (GSR_PCR | GSR_SCR)) && timeout--)
mdelay(1);
- gsr = GSR | gsr_bits;
+ gsr = readl(ac97_reg_base + GSR) | gsr_bits;
if (!(gsr & (GSR_PCR | GSR_SCR))) {
printk(KERN_INFO "%s: warm reset timeout (GSR=%#lx)\n",
__func__, gsr);
@@ -250,10 +256,10 @@ bool pxa2xx_ac97_try_cold_reset(void)
#endif
snd_BUG();
- while (!((GSR | gsr_bits) & (GSR_PCR | GSR_SCR)) && timeout--)
+ while (!((readl(ac97_reg_base + GSR) | gsr_bits) & (GSR_PCR | GSR_SCR)) && timeout--)
mdelay(1);
- gsr = GSR | gsr_bits;
+ gsr = readl(ac97_reg_base + GSR) | gsr_bits;
if (!(gsr & (GSR_PCR | GSR_SCR))) {
printk(KERN_INFO "%s: cold reset timeout (GSR=%#lx)\n",
__func__, gsr);
@@ -268,8 +274,10 @@ EXPORT_SYMBOL_GPL(pxa2xx_ac97_try_cold_reset);
void pxa2xx_ac97_finish_reset(void)
{
- GCR &= ~(GCR_PRIRDY_IEN|GCR_SECRDY_IEN);
- GCR |= GCR_SDONE_IE|GCR_CDONE_IE;
+ u32 gcr = readl(ac97_reg_base + GCR);
+ gcr &= ~(GCR_PRIRDY_IEN|GCR_SECRDY_IEN);
+ gcr |= GCR_SDONE_IE|GCR_CDONE_IE;
+ writel(gcr, ac97_reg_base + GCR);
}
EXPORT_SYMBOL_GPL(pxa2xx_ac97_finish_reset);
@@ -277,9 +285,9 @@ static irqreturn_t pxa2xx_ac97_irq(int irq, void *dev_id)
{
long status;
- status = GSR;
+ status = readl(ac97_reg_base + GSR);
if (status) {
- GSR = status;
+ writel(status, ac97_reg_base + GSR);
gsr_bits |= status;
wake_up(&gsr_wq);
@@ -287,9 +295,9 @@ static irqreturn_t pxa2xx_ac97_irq(int irq, void *dev_id)
since they tend to spuriously trigger when MMC is used
(hardware bug? go figure)... */
if (cpu_is_pxa27x()) {
- MISR = MISR_EOC;
- PISR = PISR_EOC;
- MCSR = MCSR_EOC;
+ writel(MISR_EOC, ac97_reg_base + MISR);
+ writel(PISR_EOC, ac97_reg_base + PISR);
+ writel(MCSR_EOC, ac97_reg_base + MCSR);
}
return IRQ_HANDLED;
@@ -301,7 +309,7 @@ static irqreturn_t pxa2xx_ac97_irq(int irq, void *dev_id)
#ifdef CONFIG_PM
int pxa2xx_ac97_hw_suspend(void)
{
- GCR |= GCR_ACLINK_OFF;
+ writel(readl(ac97_reg_base + GCR) | (GCR_ACLINK_OFF), ac97_reg_base + GCR);
clk_disable_unprepare(ac97_clk);
return 0;
}
@@ -318,8 +326,15 @@ EXPORT_SYMBOL_GPL(pxa2xx_ac97_hw_resume);
int pxa2xx_ac97_hw_probe(struct platform_device *dev)
{
int ret;
+ int irq;
pxa2xx_audio_ops_t *pdata = dev->dev.platform_data;
+ ac97_reg_base = devm_platform_ioremap_resource(dev, 0);
+ if (IS_ERR(ac97_reg_base)) {
+ dev_err(&dev->dev, "Missing MMIO resource\n");
+ return PTR_ERR(ac97_reg_base);
+ }
+
if (pdata) {
switch (pdata->reset_gpio) {
case 95:
@@ -386,14 +401,18 @@ int pxa2xx_ac97_hw_probe(struct platform_device *dev)
if (ret)
goto err_clk2;
- ret = request_irq(IRQ_AC97, pxa2xx_ac97_irq, 0, "AC97", NULL);
+ irq = platform_get_irq(dev, 0);
+ if (!irq)
+ goto err_irq;
+
+ ret = request_irq(irq, pxa2xx_ac97_irq, 0, "AC97", NULL);
if (ret < 0)
goto err_irq;
return 0;
err_irq:
- GCR |= GCR_ACLINK_OFF;
+ writel(readl(ac97_reg_base + GCR) | (GCR_ACLINK_OFF), ac97_reg_base + GCR);
err_clk2:
clk_put(ac97_clk);
ac97_clk = NULL;
@@ -411,8 +430,8 @@ void pxa2xx_ac97_hw_remove(struct platform_device *dev)
{
if (cpu_is_pxa27x())
gpio_free(reset_gpio);
- GCR |= GCR_ACLINK_OFF;
- free_irq(IRQ_AC97, NULL);
+ writel(readl(ac97_reg_base + GCR) | (GCR_ACLINK_OFF), ac97_reg_base + GCR);
+ free_irq(platform_get_irq(dev, 0), NULL);
if (ac97conf_clk) {
clk_put(ac97conf_clk);
ac97conf_clk = NULL;
@@ -423,6 +442,24 @@ void pxa2xx_ac97_hw_remove(struct platform_device *dev)
}
EXPORT_SYMBOL_GPL(pxa2xx_ac97_hw_remove);
+u32 pxa2xx_ac97_read_modr(void)
+{
+ if (!ac97_reg_base)
+ return 0;
+
+ return readl(ac97_reg_base + MODR);
+}
+EXPORT_SYMBOL_GPL(pxa2xx_ac97_read_modr);
+
+u32 pxa2xx_ac97_read_misr(void)
+{
+ if (!ac97_reg_base)
+ return 0;
+
+ return readl(ac97_reg_base + MISR);
+}
+EXPORT_SYMBOL_GPL(pxa2xx_ac97_read_misr);
+
MODULE_AUTHOR("Nicolas Pitre");
MODULE_DESCRIPTION("Intel/Marvell PXA sound library");
MODULE_LICENSE("GPL");
diff --git a/arch/arm/mach-pxa/include/mach/regs-ac97.h b/sound/arm/pxa2xx-ac97-regs.h
index 1db96fd4df32..ae638a1b919b 100644
--- a/arch/arm/mach-pxa/include/mach/regs-ac97.h
+++ b/sound/arm/pxa2xx-ac97-regs.h
@@ -2,25 +2,23 @@
#ifndef __ASM_ARCH_REGS_AC97_H
#define __ASM_ARCH_REGS_AC97_H
-#include <mach/hardware.h>
-
/*
* AC97 Controller registers
*/
-#define POCR __REG(0x40500000) /* PCM Out Control Register */
+#define POCR (0x0000) /* PCM Out Control Register */
#define POCR_FEIE (1 << 3) /* FIFO Error Interrupt Enable */
#define POCR_FSRIE (1 << 1) /* FIFO Service Request Interrupt Enable */
-#define PICR __REG(0x40500004) /* PCM In Control Register */
+#define PICR (0x0004) /* PCM In Control Register */
#define PICR_FEIE (1 << 3) /* FIFO Error Interrupt Enable */
#define PICR_FSRIE (1 << 1) /* FIFO Service Request Interrupt Enable */
-#define MCCR __REG(0x40500008) /* Mic In Control Register */
+#define MCCR (0x0008) /* Mic In Control Register */
#define MCCR_FEIE (1 << 3) /* FIFO Error Interrupt Enable */
#define MCCR_FSRIE (1 << 1) /* FIFO Service Request Interrupt Enable */
-#define GCR __REG(0x4050000C) /* Global Control Register */
+#define GCR (0x000C) /* Global Control Register */
#ifdef CONFIG_PXA3xx
#define GCR_CLKBPB (1 << 31) /* Internal clock enable */
#endif
@@ -36,21 +34,21 @@
#define GCR_COLD_RST (1 << 1) /* AC'97 Cold Reset (0 = active) */
#define GCR_GIE (1 << 0) /* Codec GPI Interrupt Enable */
-#define POSR __REG(0x40500010) /* PCM Out Status Register */
+#define POSR (0x0010) /* PCM Out Status Register */
#define POSR_FIFOE (1 << 4) /* FIFO error */
#define POSR_FSR (1 << 2) /* FIFO Service Request */
-#define PISR __REG(0x40500014) /* PCM In Status Register */
+#define PISR (0x0014) /* PCM In Status Register */
#define PISR_FIFOE (1 << 4) /* FIFO error */
#define PISR_EOC (1 << 3) /* DMA End-of-Chain (exclusive clear) */
#define PISR_FSR (1 << 2) /* FIFO Service Request */
-#define MCSR __REG(0x40500018) /* Mic In Status Register */
+#define MCSR (0x0018) /* Mic In Status Register */
#define MCSR_FIFOE (1 << 4) /* FIFO error */
#define MCSR_EOC (1 << 3) /* DMA End-of-Chain (exclusive clear) */
#define MCSR_FSR (1 << 2) /* FIFO Service Request */
-#define GSR __REG(0x4050001C) /* Global Status Register */
+#define GSR (0x001C) /* Global Status Register */
#define GSR_CDONE (1 << 19) /* Command Done */
#define GSR_SDONE (1 << 18) /* Status Done */
#define GSR_RDCS (1 << 15) /* Read Completion Status */
@@ -69,34 +67,34 @@
#define GSR_MIINT (1 << 1) /* Modem In Interrupt */
#define GSR_GSCI (1 << 0) /* Codec GPI Status Change Interrupt */
-#define CAR __REG(0x40500020) /* CODEC Access Register */
+#define CAR (0x0020) /* CODEC Access Register */
#define CAR_CAIP (1 << 0) /* Codec Access In Progress */
-#define PCDR __REG(0x40500040) /* PCM FIFO Data Register */
-#define MCDR __REG(0x40500060) /* Mic-in FIFO Data Register */
+#define PCDR (0x0040) /* PCM FIFO Data Register */
+#define MCDR (0x0060) /* Mic-in FIFO Data Register */
-#define MOCR __REG(0x40500100) /* Modem Out Control Register */
+#define MOCR (0x0100) /* Modem Out Control Register */
#define MOCR_FEIE (1 << 3) /* FIFO Error */
#define MOCR_FSRIE (1 << 1) /* FIFO Service Request Interrupt Enable */
-#define MICR __REG(0x40500108) /* Modem In Control Register */
+#define MICR (0x0108) /* Modem In Control Register */
#define MICR_FEIE (1 << 3) /* FIFO Error */
#define MICR_FSRIE (1 << 1) /* FIFO Service Request Interrupt Enable */
-#define MOSR __REG(0x40500110) /* Modem Out Status Register */
+#define MOSR (0x0110) /* Modem Out Status Register */
#define MOSR_FIFOE (1 << 4) /* FIFO error */
#define MOSR_FSR (1 << 2) /* FIFO Service Request */
-#define MISR __REG(0x40500118) /* Modem In Status Register */
+#define MISR (0x0118) /* Modem In Status Register */
#define MISR_FIFOE (1 << 4) /* FIFO error */
#define MISR_EOC (1 << 3) /* DMA End-of-Chain (exclusive clear) */
#define MISR_FSR (1 << 2) /* FIFO Service Request */
-#define MODR __REG(0x40500140) /* Modem FIFO Data Register */
+#define MODR (0x0140) /* Modem FIFO Data Register */
-#define PAC_REG_BASE __REG(0x40500200) /* Primary Audio Codec */
-#define SAC_REG_BASE __REG(0x40500300) /* Secondary Audio Codec */
-#define PMC_REG_BASE __REG(0x40500400) /* Primary Modem Codec */
-#define SMC_REG_BASE __REG(0x40500500) /* Secondary Modem Codec */
+#define PAC_REG_BASE (0x0200) /* Primary Audio Codec */
+#define SAC_REG_BASE (0x0300) /* Secondary Audio Codec */
+#define PMC_REG_BASE (0x0400) /* Primary Modem Codec */
+#define SMC_REG_BASE (0x0500) /* Secondary Modem Codec */
#endif /* __ASM_ARCH_REGS_AC97_H */
diff --git a/sound/arm/pxa2xx-ac97.c b/sound/arm/pxa2xx-ac97.c
index c17a19fe59ed..c162086455ad 100644
--- a/sound/arm/pxa2xx-ac97.c
+++ b/sound/arm/pxa2xx-ac97.c
@@ -21,8 +21,7 @@
#include <sound/pxa2xx-lib.h>
#include <sound/dmaengine_pcm.h>
-#include <mach/regs-ac97.h>
-#include <mach/audio.h>
+#include <linux/platform_data/asoc-pxa.h>
static void pxa2xx_ac97_legacy_reset(struct snd_ac97 *ac97)
{
diff --git a/sound/core/Makefile b/sound/core/Makefile
index 350d704ced98..2762f03d9b7b 100644
--- a/sound/core/Makefile
+++ b/sound/core/Makefile
@@ -9,9 +9,7 @@ ifneq ($(CONFIG_SND_PROC_FS),)
snd-y += info.o
snd-$(CONFIG_SND_OSSEMUL) += info_oss.o
endif
-ifneq ($(CONFIG_M68K),y)
snd-$(CONFIG_ISA_DMA_API) += isadma.o
-endif
snd-$(CONFIG_SND_OSSEMUL) += sound_oss.o
snd-$(CONFIG_SND_VMASTER) += vmaster.o
snd-$(CONFIG_SND_JACK) += ctljack.o jack.o
diff --git a/sound/isa/Kconfig b/sound/isa/Kconfig
index 570b88e0b201..6ffa48dd5983 100644
--- a/sound/isa/Kconfig
+++ b/sound/isa/Kconfig
@@ -22,7 +22,7 @@ config SND_SB16_DSP
menuconfig SND_ISA
bool "ISA sound devices"
depends on ISA || COMPILE_TEST
- depends on ISA_DMA_API && !M68K
+ depends on ISA_DMA_API
default y
help
Support for sound devices connected via the ISA bus.
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 80e4955e8c10..f3ad454b3fbf 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -1981,6 +1981,7 @@ enum {
ALC1220_FIXUP_CLEVO_PB51ED_PINS,
ALC887_FIXUP_ASUS_AUDIO,
ALC887_FIXUP_ASUS_HMIC,
+ ALCS1200A_FIXUP_MIC_VREF,
};
static void alc889_fixup_coef(struct hda_codec *codec,
@@ -2526,6 +2527,14 @@ static const struct hda_fixup alc882_fixups[] = {
.chained = true,
.chain_id = ALC887_FIXUP_ASUS_AUDIO,
},
+ [ALCS1200A_FIXUP_MIC_VREF] = {
+ .type = HDA_FIXUP_PINCTLS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x18, PIN_VREF50 }, /* rear mic */
+ { 0x19, PIN_VREF50 }, /* front mic */
+ {}
+ }
+ },
};
static const struct snd_pci_quirk alc882_fixup_tbl[] = {
@@ -2563,6 +2572,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x835f, "Asus Eee 1601", ALC888_FIXUP_EEE1601),
SND_PCI_QUIRK(0x1043, 0x84bc, "ASUS ET2700", ALC887_FIXUP_ASUS_BASS),
SND_PCI_QUIRK(0x1043, 0x8691, "ASUS ROG Ranger VIII", ALC882_FIXUP_GPIO3),
+ SND_PCI_QUIRK(0x1043, 0x8797, "ASUS TUF B550M-PLUS", ALCS1200A_FIXUP_MIC_VREF),
SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP),
SND_PCI_QUIRK(0x104d, 0x9044, "Sony VAIO AiO", ALC882_FIXUP_NO_PRIMARY_HP),
SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT),
@@ -8905,6 +8915,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1028, 0x0a62, "Dell Precision 5560", ALC289_FIXUP_DUAL_SPK),
SND_PCI_QUIRK(0x1028, 0x0a9d, "Dell Latitude 5430", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x0a9e, "Dell Latitude 5430", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x0b19, "Dell XPS 15 9520", ALC289_FIXUP_DUAL_SPK),
SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
index 773a136161f1..a05304f340df 100644
--- a/sound/pci/hda/patch_via.c
+++ b/sound/pci/hda/patch_via.c
@@ -449,8 +449,6 @@ static void vt1708_set_pinconfig_connect(struct hda_codec *codec, hda_nid_t nid)
def_conf = def_conf & (~(AC_JACK_PORT_BOTH << 30));
snd_hda_codec_set_pincfg(codec, nid, def_conf);
}
-
- return;
}
static int vt1708_jack_detect_get(struct snd_kcontrol *kcontrol,
diff --git a/sound/soc/amd/acp/acp-pci.c b/sound/soc/amd/acp/acp-pci.c
index 340e39d7f420..c893963ee2d0 100644
--- a/sound/soc/amd/acp/acp-pci.c
+++ b/sound/soc/amd/acp/acp-pci.c
@@ -16,6 +16,7 @@
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/module.h>
#include "amd.h"
#include "../mach-config.h"
diff --git a/sound/soc/codecs/da7219-aad.c b/sound/soc/codecs/da7219-aad.c
index 7998fdd3b378..bba73c44c219 100644
--- a/sound/soc/codecs/da7219-aad.c
+++ b/sound/soc/codecs/da7219-aad.c
@@ -60,6 +60,9 @@ static void da7219_aad_btn_det_work(struct work_struct *work)
bool micbias_up = false;
int retries = 0;
+ /* Disable ground switch */
+ snd_soc_component_update_bits(component, 0xFB, 0x01, 0x00);
+
/* Drive headphones/lineout */
snd_soc_component_update_bits(component, DA7219_HP_L_CTRL,
DA7219_HP_L_AMP_OE_MASK,
@@ -153,6 +156,9 @@ static void da7219_aad_hptest_work(struct work_struct *work)
tonegen_freq_hptest = cpu_to_le16(DA7219_AAD_HPTEST_RAMP_FREQ_INT_OSC);
}
+ /* Disable ground switch */
+ snd_soc_component_update_bits(component, 0xFB, 0x01, 0x00);
+
/* Ensure gain ramping at fastest rate */
gain_ramp_ctrl = snd_soc_component_read(component, DA7219_GAIN_RAMP_CTRL);
snd_soc_component_write(component, DA7219_GAIN_RAMP_CTRL, DA7219_GAIN_RAMP_RATE_X8);
@@ -428,6 +434,10 @@ static irqreturn_t da7219_aad_irq_thread(int irq, void *data)
mask |= DA7219_AAD_REPORT_ALL_MASK;
da7219_aad->jack_inserted = false;
+ /* Cancel any pending work */
+ cancel_work_sync(&da7219_aad->btn_det_work);
+ cancel_work_sync(&da7219_aad->hptest_work);
+
/* Un-drive headphones/lineout */
snd_soc_component_update_bits(component, DA7219_HP_R_CTRL,
DA7219_HP_R_AMP_OE_MASK, 0);
@@ -444,9 +454,8 @@ static irqreturn_t da7219_aad_irq_thread(int irq, void *data)
snd_soc_dapm_disable_pin(dapm, "Mic Bias");
snd_soc_dapm_sync(dapm);
- /* Cancel any pending work */
- cancel_work_sync(&da7219_aad->btn_det_work);
- cancel_work_sync(&da7219_aad->hptest_work);
+ /* Enable ground switch */
+ snd_soc_component_update_bits(component, 0xFB, 0x01, 0x01);
}
}
@@ -899,6 +908,9 @@ int da7219_aad_init(struct snd_soc_component *component)
snd_soc_component_update_bits(component, DA7219_ACCDET_CONFIG_1,
DA7219_BUTTON_CONFIG_MASK, 0);
+ /* Enable ground switch */
+ snd_soc_component_update_bits(component, 0xFB, 0x01, 0x01);
+
INIT_WORK(&da7219_aad->btn_det_work, da7219_aad_btn_det_work);
INIT_WORK(&da7219_aad->hptest_work, da7219_aad_hptest_work);
diff --git a/sound/soc/codecs/rt5640.c b/sound/soc/codecs/rt5640.c
index 12da2bea1a7b..69c80d80ed9d 100644
--- a/sound/soc/codecs/rt5640.c
+++ b/sound/soc/codecs/rt5640.c
@@ -2094,12 +2094,14 @@ EXPORT_SYMBOL_GPL(rt5640_sel_asrc_clk_src);
void rt5640_enable_micbias1_for_ovcd(struct snd_soc_component *component)
{
struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(component);
+ struct rt5640_priv *rt5640 = snd_soc_component_get_drvdata(component);
snd_soc_dapm_mutex_lock(dapm);
snd_soc_dapm_force_enable_pin_unlocked(dapm, "LDO2");
snd_soc_dapm_force_enable_pin_unlocked(dapm, "MICBIAS1");
/* OVCD is unreliable when used with RCCLK as sysclk-source */
- snd_soc_dapm_force_enable_pin_unlocked(dapm, "Platform Clock");
+ if (rt5640->use_platform_clock)
+ snd_soc_dapm_force_enable_pin_unlocked(dapm, "Platform Clock");
snd_soc_dapm_sync_unlocked(dapm);
snd_soc_dapm_mutex_unlock(dapm);
}
@@ -2108,9 +2110,11 @@ EXPORT_SYMBOL_GPL(rt5640_enable_micbias1_for_ovcd);
void rt5640_disable_micbias1_for_ovcd(struct snd_soc_component *component)
{
struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(component);
+ struct rt5640_priv *rt5640 = snd_soc_component_get_drvdata(component);
snd_soc_dapm_mutex_lock(dapm);
- snd_soc_dapm_disable_pin_unlocked(dapm, "Platform Clock");
+ if (rt5640->use_platform_clock)
+ snd_soc_dapm_disable_pin_unlocked(dapm, "Platform Clock");
snd_soc_dapm_disable_pin_unlocked(dapm, "MICBIAS1");
snd_soc_dapm_disable_pin_unlocked(dapm, "LDO2");
snd_soc_dapm_sync_unlocked(dapm);
@@ -2535,6 +2539,9 @@ static void rt5640_enable_jack_detect(struct snd_soc_component *component,
rt5640->jd_gpio_irq_requested = true;
}
+ if (jack_data && jack_data->use_platform_clock)
+ rt5640->use_platform_clock = jack_data->use_platform_clock;
+
ret = request_irq(rt5640->irq, rt5640_irq,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
"rt5640", rt5640);
diff --git a/sound/soc/codecs/rt5640.h b/sound/soc/codecs/rt5640.h
index 9e49b9a0ccaa..505c93514051 100644
--- a/sound/soc/codecs/rt5640.h
+++ b/sound/soc/codecs/rt5640.h
@@ -2155,11 +2155,13 @@ struct rt5640_priv {
bool jd_inverted;
unsigned int ovcd_th;
unsigned int ovcd_sf;
+ bool use_platform_clock;
};
struct rt5640_set_jack_data {
int codec_irq_override;
struct gpio_desc *jd_gpio;
+ bool use_platform_clock;
};
int rt5640_dmic_enable(struct snd_soc_component *component,
diff --git a/sound/soc/fsl/fsl_sai.h b/sound/soc/fsl/fsl_sai.h
index e4965efe6102..1c8f5ca07f9d 100644
--- a/sound/soc/fsl/fsl_sai.h
+++ b/sound/soc/fsl/fsl_sai.h
@@ -80,8 +80,8 @@
#define FSL_SAI_xCR3(tx, ofs) (tx ? FSL_SAI_TCR3(ofs) : FSL_SAI_RCR3(ofs))
#define FSL_SAI_xCR4(tx, ofs) (tx ? FSL_SAI_TCR4(ofs) : FSL_SAI_RCR4(ofs))
#define FSL_SAI_xCR5(tx, ofs) (tx ? FSL_SAI_TCR5(ofs) : FSL_SAI_RCR5(ofs))
-#define FSL_SAI_xDR(tx, ofs) (tx ? FSL_SAI_TDR(ofs) : FSL_SAI_RDR(ofs))
-#define FSL_SAI_xFR(tx, ofs) (tx ? FSL_SAI_TFR(ofs) : FSL_SAI_RFR(ofs))
+#define FSL_SAI_xDR0(tx) (tx ? FSL_SAI_TDR0 : FSL_SAI_RDR0)
+#define FSL_SAI_xFR0(tx) (tx ? FSL_SAI_TFR0 : FSL_SAI_RFR0)
#define FSL_SAI_xMR(tx) (tx ? FSL_SAI_TMR : FSL_SAI_RMR)
/* SAI Transmit/Receive Control Register */
diff --git a/sound/soc/intel/avs/board_selection.c b/sound/soc/intel/avs/board_selection.c
index 80cb0164678a..87f9c18be238 100644
--- a/sound/soc/intel/avs/board_selection.c
+++ b/sound/soc/intel/avs/board_selection.c
@@ -326,7 +326,8 @@ static int avs_register_i2s_board(struct avs_dev *adev, struct snd_soc_acpi_mach
num_ssps = adev->hw_cfg.i2s_caps.ctrl_count;
if (fls(mach->mach_params.i2s_link_mask) > num_ssps) {
dev_err(adev->dev, "Platform supports %d SSPs but board %s requires SSP%ld\n",
- num_ssps, mach->drv_name, __fls(mach->mach_params.i2s_link_mask));
+ num_ssps, mach->drv_name,
+ (unsigned long)__fls(mach->mach_params.i2s_link_mask));
return -ENODEV;
}
diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
index 7b948a219177..ed9fa1728722 100644
--- a/sound/soc/intel/boards/bytcr_rt5640.c
+++ b/sound/soc/intel/boards/bytcr_rt5640.c
@@ -1191,12 +1191,14 @@ static int byt_rt5640_init(struct snd_soc_pcm_runtime *runtime)
{
struct snd_soc_card *card = runtime->card;
struct byt_rt5640_private *priv = snd_soc_card_get_drvdata(card);
+ struct rt5640_set_jack_data *jack_data = &priv->jack_data;
struct snd_soc_component *component = asoc_rtd_to_codec(runtime, 0)->component;
const struct snd_soc_dapm_route *custom_map = NULL;
int num_routes = 0;
int ret;
card->dapm.idle_bias_off = true;
+ jack_data->use_platform_clock = true;
/* Start with RC clk for jack-detect (we disable MCLK below) */
if (byt_rt5640_quirk & BYT_RT5640_MCLK_EN)
diff --git a/sound/soc/intel/common/soc-acpi-intel-adl-match.c b/sound/soc/intel/common/soc-acpi-intel-adl-match.c
index e6e52c7b6803..c1385161cdc8 100644
--- a/sound/soc/intel/common/soc-acpi-intel-adl-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-adl-match.c
@@ -453,7 +453,7 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_adl_machines[] = {
.drv_name = "adl_mx98360a_nau8825",
.machine_quirk = snd_soc_acpi_codec_list,
.quirk_data = &adl_max98360a_amp,
- .sof_tplg_filename = "sof-adl-mx98360a-nau8825.tplg",
+ .sof_tplg_filename = "sof-adl-max98360a-nau8825.tplg",
},
{
.id = "RTL5682",
diff --git a/sound/soc/pxa/corgi.c b/sound/soc/pxa/corgi.c
index 8ee2dea25a8d..4489d2c8b124 100644
--- a/sound/soc/pxa/corgi.c
+++ b/sound/soc/pxa/corgi.c
@@ -21,8 +21,7 @@
#include <sound/soc.h>
#include <asm/mach-types.h>
-#include <mach/corgi.h>
-#include <mach/audio.h>
+#include <linux/platform_data/asoc-pxa.h>
#include "../codecs/wm8731.h"
#include "pxa2xx-i2s.h"
@@ -41,6 +40,9 @@
static int corgi_jack_func;
static int corgi_spk_func;
+static struct gpio_desc *gpiod_mute_l, *gpiod_mute_r,
+ *gpiod_apm_on, *gpiod_mic_bias;
+
static void corgi_ext_control(struct snd_soc_dapm_context *dapm)
{
snd_soc_dapm_mutex_lock(dapm);
@@ -49,8 +51,8 @@ static void corgi_ext_control(struct snd_soc_dapm_context *dapm)
switch (corgi_jack_func) {
case CORGI_HP:
/* set = unmute headphone */
- gpio_set_value(CORGI_GPIO_MUTE_L, 1);
- gpio_set_value(CORGI_GPIO_MUTE_R, 1);
+ gpiod_set_value(gpiod_mute_l, 1);
+ gpiod_set_value(gpiod_mute_r, 1);
snd_soc_dapm_disable_pin_unlocked(dapm, "Mic Jack");
snd_soc_dapm_disable_pin_unlocked(dapm, "Line Jack");
snd_soc_dapm_enable_pin_unlocked(dapm, "Headphone Jack");
@@ -58,24 +60,24 @@ static void corgi_ext_control(struct snd_soc_dapm_context *dapm)
break;
case CORGI_MIC:
/* reset = mute headphone */
- gpio_set_value(CORGI_GPIO_MUTE_L, 0);
- gpio_set_value(CORGI_GPIO_MUTE_R, 0);
+ gpiod_set_value(gpiod_mute_l, 0);
+ gpiod_set_value(gpiod_mute_r, 0);
snd_soc_dapm_enable_pin_unlocked(dapm, "Mic Jack");
snd_soc_dapm_disable_pin_unlocked(dapm, "Line Jack");
snd_soc_dapm_disable_pin_unlocked(dapm, "Headphone Jack");
snd_soc_dapm_disable_pin_unlocked(dapm, "Headset Jack");
break;
case CORGI_LINE:
- gpio_set_value(CORGI_GPIO_MUTE_L, 0);
- gpio_set_value(CORGI_GPIO_MUTE_R, 0);
+ gpiod_set_value(gpiod_mute_l, 0);
+ gpiod_set_value(gpiod_mute_r, 0);
snd_soc_dapm_disable_pin_unlocked(dapm, "Mic Jack");
snd_soc_dapm_enable_pin_unlocked(dapm, "Line Jack");
snd_soc_dapm_disable_pin_unlocked(dapm, "Headphone Jack");
snd_soc_dapm_disable_pin_unlocked(dapm, "Headset Jack");
break;
case CORGI_HEADSET:
- gpio_set_value(CORGI_GPIO_MUTE_L, 0);
- gpio_set_value(CORGI_GPIO_MUTE_R, 1);
+ gpiod_set_value(gpiod_mute_l, 0);
+ gpiod_set_value(gpiod_mute_r, 1);
snd_soc_dapm_enable_pin_unlocked(dapm, "Mic Jack");
snd_soc_dapm_disable_pin_unlocked(dapm, "Line Jack");
snd_soc_dapm_disable_pin_unlocked(dapm, "Headphone Jack");
@@ -108,8 +110,8 @@ static int corgi_startup(struct snd_pcm_substream *substream)
static void corgi_shutdown(struct snd_pcm_substream *substream)
{
/* set = unmute headphone */
- gpio_set_value(CORGI_GPIO_MUTE_L, 1);
- gpio_set_value(CORGI_GPIO_MUTE_R, 1);
+ gpiod_set_value(gpiod_mute_l, 1);
+ gpiod_set_value(gpiod_mute_r, 1);
}
static int corgi_hw_params(struct snd_pcm_substream *substream,
@@ -199,14 +201,14 @@ static int corgi_set_spk(struct snd_kcontrol *kcontrol,
static int corgi_amp_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *k, int event)
{
- gpio_set_value(CORGI_GPIO_APM_ON, SND_SOC_DAPM_EVENT_ON(event));
+ gpiod_set_value(gpiod_apm_on, SND_SOC_DAPM_EVENT_ON(event));
return 0;
}
static int corgi_mic_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *k, int event)
{
- gpio_set_value(CORGI_GPIO_MIC_BIAS, SND_SOC_DAPM_EVENT_ON(event));
+ gpiod_set_value(gpiod_mic_bias, SND_SOC_DAPM_EVENT_ON(event));
return 0;
}
@@ -293,6 +295,19 @@ static int corgi_probe(struct platform_device *pdev)
card->dev = &pdev->dev;
+ gpiod_mute_l = devm_gpiod_get(&pdev->dev, "mute-l", GPIOD_OUT_HIGH);
+ if (IS_ERR(gpiod_mute_l))
+ return PTR_ERR(gpiod_mute_l);
+ gpiod_mute_r = devm_gpiod_get(&pdev->dev, "mute-r", GPIOD_OUT_HIGH);
+ if (IS_ERR(gpiod_mute_r))
+ return PTR_ERR(gpiod_mute_r);
+ gpiod_apm_on = devm_gpiod_get(&pdev->dev, "apm-on", GPIOD_OUT_LOW);
+ if (IS_ERR(gpiod_apm_on))
+ return PTR_ERR(gpiod_apm_on);
+ gpiod_mic_bias = devm_gpiod_get(&pdev->dev, "mic-bias", GPIOD_OUT_LOW);
+ if (IS_ERR(gpiod_mic_bias))
+ return PTR_ERR(gpiod_mic_bias);
+
ret = devm_snd_soc_register_card(&pdev->dev, card);
if (ret)
dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n",
diff --git a/sound/soc/pxa/e740_wm9705.c b/sound/soc/pxa/e740_wm9705.c
index eafa1482afbe..4e0e9b778d4c 100644
--- a/sound/soc/pxa/e740_wm9705.c
+++ b/sound/soc/pxa/e740_wm9705.c
@@ -7,17 +7,19 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/soc.h>
-#include <mach/audio.h>
-#include <mach/eseries-gpio.h>
+#include <linux/platform_data/asoc-pxa.h>
#include <asm/mach-types.h>
+static struct gpio_desc *gpiod_output_amp, *gpiod_input_amp;
+static struct gpio_desc *gpiod_audio_power;
+
#define E740_AUDIO_OUT 1
#define E740_AUDIO_IN 2
@@ -25,9 +27,9 @@ static int e740_audio_power;
static void e740_sync_audio_power(int status)
{
- gpio_set_value(GPIO_E740_WM9705_nAVDD2, !status);
- gpio_set_value(GPIO_E740_AMP_ON, (status & E740_AUDIO_OUT) ? 1 : 0);
- gpio_set_value(GPIO_E740_MIC_ON, (status & E740_AUDIO_IN) ? 1 : 0);
+ gpiod_set_value(gpiod_audio_power, !status);
+ gpiod_set_value(gpiod_output_amp, (status & E740_AUDIO_OUT) ? 1 : 0);
+ gpiod_set_value(gpiod_input_amp, (status & E740_AUDIO_IN) ? 1 : 0);
}
static int e740_mic_amp_event(struct snd_soc_dapm_widget *w,
@@ -116,36 +118,35 @@ static struct snd_soc_card e740 = {
.fully_routed = true,
};
-static struct gpio e740_audio_gpios[] = {
- { GPIO_E740_MIC_ON, GPIOF_OUT_INIT_LOW, "Mic amp" },
- { GPIO_E740_AMP_ON, GPIOF_OUT_INIT_LOW, "Output amp" },
- { GPIO_E740_WM9705_nAVDD2, GPIOF_OUT_INIT_HIGH, "Audio power" },
-};
-
static int e740_probe(struct platform_device *pdev)
{
struct snd_soc_card *card = &e740;
int ret;
- ret = gpio_request_array(e740_audio_gpios,
- ARRAY_SIZE(e740_audio_gpios));
+ gpiod_input_amp = devm_gpiod_get(&pdev->dev, "Mic amp", GPIOD_OUT_LOW);
+ ret = PTR_ERR_OR_ZERO(gpiod_input_amp);
+ if (ret)
+ return ret;
+ gpiod_output_amp = devm_gpiod_get(&pdev->dev, "Output amp", GPIOD_OUT_LOW);
+ ret = PTR_ERR_OR_ZERO(gpiod_output_amp);
+ if (ret)
+ return ret;
+ gpiod_audio_power = devm_gpiod_get(&pdev->dev, "Audio power", GPIOD_OUT_HIGH);
+ ret = PTR_ERR_OR_ZERO(gpiod_audio_power);
if (ret)
return ret;
card->dev = &pdev->dev;
ret = devm_snd_soc_register_card(&pdev->dev, card);
- if (ret) {
+ if (ret)
dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n",
ret);
- gpio_free_array(e740_audio_gpios, ARRAY_SIZE(e740_audio_gpios));
- }
return ret;
}
static int e740_remove(struct platform_device *pdev)
{
- gpio_free_array(e740_audio_gpios, ARRAY_SIZE(e740_audio_gpios));
return 0;
}
diff --git a/sound/soc/pxa/e750_wm9705.c b/sound/soc/pxa/e750_wm9705.c
index d75510d7b16b..7a1e0d8bfd11 100644
--- a/sound/soc/pxa/e750_wm9705.c
+++ b/sound/soc/pxa/e750_wm9705.c
@@ -7,24 +7,25 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/soc.h>
-#include <mach/audio.h>
-#include <mach/eseries-gpio.h>
+#include <linux/platform_data/asoc-pxa.h>
#include <asm/mach-types.h>
+static struct gpio_desc *gpiod_spk_amp, *gpiod_hp_amp;
+
static int e750_spk_amp_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
if (event & SND_SOC_DAPM_PRE_PMU)
- gpio_set_value(GPIO_E750_SPK_AMP_OFF, 0);
+ gpiod_set_value(gpiod_spk_amp, 1);
else if (event & SND_SOC_DAPM_POST_PMD)
- gpio_set_value(GPIO_E750_SPK_AMP_OFF, 1);
+ gpiod_set_value(gpiod_spk_amp, 0);
return 0;
}
@@ -33,9 +34,9 @@ static int e750_hp_amp_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
if (event & SND_SOC_DAPM_PRE_PMU)
- gpio_set_value(GPIO_E750_HP_AMP_OFF, 0);
+ gpiod_set_value(gpiod_hp_amp, 1);
else if (event & SND_SOC_DAPM_POST_PMD)
- gpio_set_value(GPIO_E750_HP_AMP_OFF, 1);
+ gpiod_set_value(gpiod_hp_amp, 0);
return 0;
}
@@ -100,35 +101,31 @@ static struct snd_soc_card e750 = {
.fully_routed = true,
};
-static struct gpio e750_audio_gpios[] = {
- { GPIO_E750_HP_AMP_OFF, GPIOF_OUT_INIT_HIGH, "Headphone amp" },
- { GPIO_E750_SPK_AMP_OFF, GPIOF_OUT_INIT_HIGH, "Speaker amp" },
-};
-
static int e750_probe(struct platform_device *pdev)
{
struct snd_soc_card *card = &e750;
int ret;
- ret = gpio_request_array(e750_audio_gpios,
- ARRAY_SIZE(e750_audio_gpios));
+ gpiod_hp_amp = devm_gpiod_get(&pdev->dev, "Headphone amp", GPIOD_OUT_LOW);
+ ret = PTR_ERR_OR_ZERO(gpiod_hp_amp);
+ if (ret)
+ return ret;
+ gpiod_spk_amp = devm_gpiod_get(&pdev->dev, "Speaker amp", GPIOD_OUT_LOW);
+ ret = PTR_ERR_OR_ZERO(gpiod_spk_amp);
if (ret)
return ret;
card->dev = &pdev->dev;
ret = devm_snd_soc_register_card(&pdev->dev, card);
- if (ret) {
+ if (ret)
dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n",
ret);
- gpio_free_array(e750_audio_gpios, ARRAY_SIZE(e750_audio_gpios));
- }
return ret;
}
static int e750_remove(struct platform_device *pdev)
{
- gpio_free_array(e750_audio_gpios, ARRAY_SIZE(e750_audio_gpios));
return 0;
}
diff --git a/sound/soc/pxa/e800_wm9712.c b/sound/soc/pxa/e800_wm9712.c
index 56d543da938a..a39c494127cf 100644
--- a/sound/soc/pxa/e800_wm9712.c
+++ b/sound/soc/pxa/e800_wm9712.c
@@ -7,23 +7,24 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/soc.h>
#include <asm/mach-types.h>
-#include <mach/audio.h>
-#include <mach/eseries-gpio.h>
+#include <linux/platform_data/asoc-pxa.h>
+
+static struct gpio_desc *gpiod_spk_amp, *gpiod_hp_amp;
static int e800_spk_amp_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
if (event & SND_SOC_DAPM_PRE_PMU)
- gpio_set_value(GPIO_E800_SPK_AMP_ON, 1);
+ gpiod_set_value(gpiod_spk_amp, 1);
else if (event & SND_SOC_DAPM_POST_PMD)
- gpio_set_value(GPIO_E800_SPK_AMP_ON, 0);
+ gpiod_set_value(gpiod_spk_amp, 0);
return 0;
}
@@ -32,9 +33,9 @@ static int e800_hp_amp_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
if (event & SND_SOC_DAPM_PRE_PMU)
- gpio_set_value(GPIO_E800_HP_AMP_OFF, 0);
+ gpiod_set_value(gpiod_hp_amp, 1);
else if (event & SND_SOC_DAPM_POST_PMD)
- gpio_set_value(GPIO_E800_HP_AMP_OFF, 1);
+ gpiod_set_value(gpiod_hp_amp, 0);
return 0;
}
@@ -100,35 +101,31 @@ static struct snd_soc_card e800 = {
.num_dapm_routes = ARRAY_SIZE(audio_map),
};
-static struct gpio e800_audio_gpios[] = {
- { GPIO_E800_SPK_AMP_ON, GPIOF_OUT_INIT_HIGH, "Headphone amp" },
- { GPIO_E800_HP_AMP_OFF, GPIOF_OUT_INIT_HIGH, "Speaker amp" },
-};
-
static int e800_probe(struct platform_device *pdev)
{
struct snd_soc_card *card = &e800;
int ret;
- ret = gpio_request_array(e800_audio_gpios,
- ARRAY_SIZE(e800_audio_gpios));
+ gpiod_hp_amp = devm_gpiod_get(&pdev->dev, "Headphone amp", GPIOD_OUT_LOW);
+ ret = PTR_ERR_OR_ZERO(gpiod_hp_amp);
+ if (ret)
+ return ret;
+ gpiod_spk_amp = devm_gpiod_get(&pdev->dev, "Speaker amp", GPIOD_OUT_LOW);
+ ret = PTR_ERR_OR_ZERO(gpiod_spk_amp);
if (ret)
return ret;
card->dev = &pdev->dev;
ret = devm_snd_soc_register_card(&pdev->dev, card);
- if (ret) {
+ if (ret)
dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n",
ret);
- gpio_free_array(e800_audio_gpios, ARRAY_SIZE(e800_audio_gpios));
- }
return ret;
}
static int e800_remove(struct platform_device *pdev)
{
- gpio_free_array(e800_audio_gpios, ARRAY_SIZE(e800_audio_gpios));
return 0;
}
diff --git a/sound/soc/pxa/em-x270.c b/sound/soc/pxa/em-x270.c
index 9076ea7e9339..b59ec22e1e7e 100644
--- a/sound/soc/pxa/em-x270.c
+++ b/sound/soc/pxa/em-x270.c
@@ -23,7 +23,7 @@
#include <sound/soc.h>
#include <asm/mach-types.h>
-#include <mach/audio.h>
+#include <linux/platform_data/asoc-pxa.h>
SND_SOC_DAILINK_DEFS(ac97,
DAILINK_COMP_ARRAY(COMP_CPU("pxa2xx-ac97")),
diff --git a/sound/soc/pxa/hx4700.c b/sound/soc/pxa/hx4700.c
index 9a816156f012..a323ddb8fc3e 100644
--- a/sound/soc/pxa/hx4700.c
+++ b/sound/soc/pxa/hx4700.c
@@ -10,7 +10,7 @@
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <sound/core.h>
#include <sound/jack.h>
@@ -18,10 +18,10 @@
#include <sound/pcm_params.h>
#include <sound/soc.h>
-#include <mach/hx4700.h>
#include <asm/mach-types.h>
#include "pxa2xx-i2s.h"
+static struct gpio_desc *gpiod_hp_driver, *gpiod_spk_sd;
static struct snd_soc_jack hs_jack;
/* Headphones jack detection DAPM pin */
@@ -29,20 +29,18 @@ static struct snd_soc_jack_pin hs_jack_pin[] = {
{
.pin = "Headphone Jack",
.mask = SND_JACK_HEADPHONE,
+ .invert = 1,
},
{
.pin = "Speaker",
/* disable speaker when hp jack is inserted */
.mask = SND_JACK_HEADPHONE,
- .invert = 1,
},
};
/* Headphones jack detection GPIO */
static struct snd_soc_jack_gpio hs_jack_gpio = {
- .gpio = GPIO75_HX4700_EARPHONE_nDET,
- .invert = true,
- .name = "hp-gpio",
+ .name = "earphone-det",
.report = SND_JACK_HEADPHONE,
.debounce_time = 200,
};
@@ -81,14 +79,14 @@ static const struct snd_soc_ops hx4700_ops = {
static int hx4700_spk_power(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *k, int event)
{
- gpio_set_value(GPIO107_HX4700_SPK_nSD, !!SND_SOC_DAPM_EVENT_ON(event));
+ gpiod_set_value(gpiod_spk_sd, !SND_SOC_DAPM_EVENT_ON(event));
return 0;
}
static int hx4700_hp_power(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *k, int event)
{
- gpio_set_value(GPIO92_HX4700_HP_DRIVER, !!SND_SOC_DAPM_EVENT_ON(event));
+ gpiod_set_value(gpiod_hp_driver, !!SND_SOC_DAPM_EVENT_ON(event));
return 0;
}
@@ -162,11 +160,6 @@ static struct snd_soc_card snd_soc_card_hx4700 = {
.fully_routed = true,
};
-static struct gpio hx4700_audio_gpios[] = {
- { GPIO107_HX4700_SPK_nSD, GPIOF_OUT_INIT_HIGH, "SPK_POWER" },
- { GPIO92_HX4700_HP_DRIVER, GPIOF_OUT_INIT_LOW, "EP_POWER" },
-};
-
static int hx4700_audio_probe(struct platform_device *pdev)
{
int ret;
@@ -174,26 +167,26 @@ static int hx4700_audio_probe(struct platform_device *pdev)
if (!machine_is_h4700())
return -ENODEV;
- ret = gpio_request_array(hx4700_audio_gpios,
- ARRAY_SIZE(hx4700_audio_gpios));
+ gpiod_hp_driver = devm_gpiod_get(&pdev->dev, "hp-driver", GPIOD_ASIS);
+ ret = PTR_ERR_OR_ZERO(gpiod_hp_driver);
+ if (ret)
+ return ret;
+ gpiod_spk_sd = devm_gpiod_get(&pdev->dev, "spk-sd", GPIOD_ASIS);
+ ret = PTR_ERR_OR_ZERO(gpiod_spk_sd);
if (ret)
return ret;
+ hs_jack_gpio.gpiod_dev = &pdev->dev;
snd_soc_card_hx4700.dev = &pdev->dev;
ret = devm_snd_soc_register_card(&pdev->dev, &snd_soc_card_hx4700);
- if (ret)
- gpio_free_array(hx4700_audio_gpios,
- ARRAY_SIZE(hx4700_audio_gpios));
return ret;
}
static int hx4700_audio_remove(struct platform_device *pdev)
{
- gpio_set_value(GPIO92_HX4700_HP_DRIVER, 0);
- gpio_set_value(GPIO107_HX4700_SPK_nSD, 0);
-
- gpio_free_array(hx4700_audio_gpios, ARRAY_SIZE(hx4700_audio_gpios));
+ gpiod_set_value(gpiod_hp_driver, 0);
+ gpiod_set_value(gpiod_spk_sd, 0);
return 0;
}
diff --git a/sound/soc/pxa/magician.c b/sound/soc/pxa/magician.c
index a5f326c97af2..9433cc927755 100644
--- a/sound/soc/pxa/magician.c
+++ b/sound/soc/pxa/magician.c
@@ -14,16 +14,14 @@
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
-#include <sound/uda1380.h>
-#include <mach/magician.h>
#include <asm/mach-types.h>
#include "../codecs/uda1380.h"
#include "pxa2xx-i2s.h"
@@ -36,6 +34,9 @@ static int magician_hp_switch;
static int magician_spk_switch = 1;
static int magician_in_sel = MAGICIAN_MIC;
+static struct gpio_desc *gpiod_spk_power, *gpiod_ep_power, *gpiod_mic_power;
+static struct gpio_desc *gpiod_in_sel0, *gpiod_in_sel1;
+
static void magician_ext_control(struct snd_soc_dapm_context *dapm)
{
@@ -215,10 +216,10 @@ static int magician_set_input(struct snd_kcontrol *kcontrol,
switch (magician_in_sel) {
case MAGICIAN_MIC:
- gpio_set_value(EGPIO_MAGICIAN_IN_SEL1, 1);
+ gpiod_set_value(gpiod_in_sel1, 1);
break;
case MAGICIAN_MIC_EXT:
- gpio_set_value(EGPIO_MAGICIAN_IN_SEL1, 0);
+ gpiod_set_value(gpiod_in_sel1, 0);
}
return 1;
@@ -227,21 +228,21 @@ static int magician_set_input(struct snd_kcontrol *kcontrol,
static int magician_spk_power(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *k, int event)
{
- gpio_set_value(EGPIO_MAGICIAN_SPK_POWER, SND_SOC_DAPM_EVENT_ON(event));
+ gpiod_set_value(gpiod_spk_power, SND_SOC_DAPM_EVENT_ON(event));
return 0;
}
static int magician_hp_power(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *k, int event)
{
- gpio_set_value(EGPIO_MAGICIAN_EP_POWER, SND_SOC_DAPM_EVENT_ON(event));
+ gpiod_set_value(gpiod_ep_power, SND_SOC_DAPM_EVENT_ON(event));
return 0;
}
static int magician_mic_bias(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *k, int event)
{
- gpio_set_value(EGPIO_MAGICIAN_MIC_POWER, SND_SOC_DAPM_EVENT_ON(event));
+ gpiod_set_value(gpiod_mic_power, SND_SOC_DAPM_EVENT_ON(event));
return 0;
}
@@ -328,106 +329,38 @@ static struct snd_soc_card snd_soc_card_magician = {
.fully_routed = true,
};
-static struct platform_device *magician_snd_device;
-
-/*
- * FIXME: move into magician board file once merged into the pxa tree
- */
-static struct uda1380_platform_data uda1380_info = {
- .gpio_power = EGPIO_MAGICIAN_CODEC_POWER,
- .gpio_reset = EGPIO_MAGICIAN_CODEC_RESET,
- .dac_clk = UDA1380_DAC_CLK_WSPLL,
-};
-
-static struct i2c_board_info i2c_board_info[] = {
- {
- I2C_BOARD_INFO("uda1380", 0x18),
- .platform_data = &uda1380_info,
- },
-};
-
-static int __init magician_init(void)
-{
- int ret;
- struct i2c_adapter *adapter;
- struct i2c_client *client;
-
- if (!machine_is_magician())
- return -ENODEV;
-
- adapter = i2c_get_adapter(0);
- if (!adapter)
- return -ENODEV;
- client = i2c_new_client_device(adapter, i2c_board_info);
- i2c_put_adapter(adapter);
- if (IS_ERR(client))
- return PTR_ERR(client);
-
- ret = gpio_request(EGPIO_MAGICIAN_SPK_POWER, "SPK_POWER");
- if (ret)
- goto err_request_spk;
- ret = gpio_request(EGPIO_MAGICIAN_EP_POWER, "EP_POWER");
- if (ret)
- goto err_request_ep;
- ret = gpio_request(EGPIO_MAGICIAN_MIC_POWER, "MIC_POWER");
- if (ret)
- goto err_request_mic;
- ret = gpio_request(EGPIO_MAGICIAN_IN_SEL0, "IN_SEL0");
- if (ret)
- goto err_request_in_sel0;
- ret = gpio_request(EGPIO_MAGICIAN_IN_SEL1, "IN_SEL1");
- if (ret)
- goto err_request_in_sel1;
-
- gpio_set_value(EGPIO_MAGICIAN_IN_SEL0, 0);
-
- magician_snd_device = platform_device_alloc("soc-audio", -1);
- if (!magician_snd_device) {
- ret = -ENOMEM;
- goto err_pdev;
- }
-
- platform_set_drvdata(magician_snd_device, &snd_soc_card_magician);
- ret = platform_device_add(magician_snd_device);
- if (ret) {
- platform_device_put(magician_snd_device);
- goto err_pdev;
- }
-
- return 0;
-
-err_pdev:
- gpio_free(EGPIO_MAGICIAN_IN_SEL1);
-err_request_in_sel1:
- gpio_free(EGPIO_MAGICIAN_IN_SEL0);
-err_request_in_sel0:
- gpio_free(EGPIO_MAGICIAN_MIC_POWER);
-err_request_mic:
- gpio_free(EGPIO_MAGICIAN_EP_POWER);
-err_request_ep:
- gpio_free(EGPIO_MAGICIAN_SPK_POWER);
-err_request_spk:
- return ret;
-}
-
-static void __exit magician_exit(void)
+static int magician_audio_probe(struct platform_device *pdev)
{
- platform_device_unregister(magician_snd_device);
-
- gpio_set_value(EGPIO_MAGICIAN_SPK_POWER, 0);
- gpio_set_value(EGPIO_MAGICIAN_EP_POWER, 0);
- gpio_set_value(EGPIO_MAGICIAN_MIC_POWER, 0);
-
- gpio_free(EGPIO_MAGICIAN_IN_SEL1);
- gpio_free(EGPIO_MAGICIAN_IN_SEL0);
- gpio_free(EGPIO_MAGICIAN_MIC_POWER);
- gpio_free(EGPIO_MAGICIAN_EP_POWER);
- gpio_free(EGPIO_MAGICIAN_SPK_POWER);
+ struct device *dev = &pdev->dev;
+
+ gpiod_spk_power = devm_gpiod_get(dev, "SPK_POWER", GPIOD_OUT_LOW);
+ if (IS_ERR(gpiod_spk_power))
+ return PTR_ERR(gpiod_spk_power);
+ gpiod_ep_power = devm_gpiod_get(dev, "EP_POWER", GPIOD_OUT_LOW);
+ if (IS_ERR(gpiod_ep_power))
+ return PTR_ERR(gpiod_ep_power);
+ gpiod_mic_power = devm_gpiod_get(dev, "MIC_POWER", GPIOD_OUT_LOW);
+ if (IS_ERR(gpiod_mic_power))
+ return PTR_ERR(gpiod_mic_power);
+ gpiod_in_sel0 = devm_gpiod_get(dev, "IN_SEL0", GPIOD_OUT_HIGH);
+ if (IS_ERR(gpiod_in_sel0))
+ return PTR_ERR(gpiod_in_sel0);
+ gpiod_in_sel1 = devm_gpiod_get(dev, "IN_SEL1", GPIOD_OUT_LOW);
+ if (IS_ERR(gpiod_in_sel1))
+ return PTR_ERR(gpiod_in_sel1);
+
+ snd_soc_card_magician.dev = &pdev->dev;
+ return devm_snd_soc_register_card(&pdev->dev, &snd_soc_card_magician);
}
-module_init(magician_init);
-module_exit(magician_exit);
+static struct platform_driver magician_audio_driver = {
+ .driver.name = "magician-audio",
+ .driver.pm = &snd_soc_pm_ops,
+ .probe = magician_audio_probe,
+};
+module_platform_driver(magician_audio_driver);
MODULE_AUTHOR("Philipp Zabel");
MODULE_DESCRIPTION("ALSA SoC Magician");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:magician-audio");
diff --git a/sound/soc/pxa/mioa701_wm9713.c b/sound/soc/pxa/mioa701_wm9713.c
index 763db7bbd9bb..0fa37637eca9 100644
--- a/sound/soc/pxa/mioa701_wm9713.c
+++ b/sound/soc/pxa/mioa701_wm9713.c
@@ -33,7 +33,7 @@
#include <linux/platform_device.h>
#include <asm/mach-types.h>
-#include <mach/audio.h>
+#include <linux/platform_data/asoc-pxa.h>
#include <sound/core.h>
#include <sound/pcm.h>
diff --git a/sound/soc/pxa/palm27x.c b/sound/soc/pxa/palm27x.c
index 65257f7fe4c4..a2321c01c160 100644
--- a/sound/soc/pxa/palm27x.c
+++ b/sound/soc/pxa/palm27x.c
@@ -20,7 +20,7 @@
#include <sound/jack.h>
#include <asm/mach-types.h>
-#include <mach/audio.h>
+#include <linux/platform_data/asoc-pxa.h>
#include <linux/platform_data/asoc-palm27x.h>
static struct snd_soc_jack hs_jack;
diff --git a/sound/soc/pxa/poodle.c b/sound/soc/pxa/poodle.c
index 323ba3e23039..5fdaa477e85d 100644
--- a/sound/soc/pxa/poodle.c
+++ b/sound/soc/pxa/poodle.c
@@ -21,8 +21,8 @@
#include <asm/mach-types.h>
#include <asm/hardware/locomo.h>
-#include <mach/poodle.h>
-#include <mach/audio.h>
+#include <linux/platform_data/asoc-pxa.h>
+#include <linux/platform_data/asoc-poodle.h>
#include "../codecs/wm8731.h"
#include "pxa2xx-i2s.h"
@@ -38,21 +38,23 @@
static int poodle_jack_func;
static int poodle_spk_func;
+static struct poodle_audio_platform_data *poodle_pdata;
+
static void poodle_ext_control(struct snd_soc_dapm_context *dapm)
{
/* set up jack connection */
if (poodle_jack_func == POODLE_HP) {
/* set = unmute headphone */
- locomo_gpio_write(&poodle_locomo_device.dev,
- POODLE_LOCOMO_GPIO_MUTE_L, 1);
- locomo_gpio_write(&poodle_locomo_device.dev,
- POODLE_LOCOMO_GPIO_MUTE_R, 1);
+ locomo_gpio_write(poodle_pdata->locomo_dev,
+ poodle_pdata->gpio_mute_l, 1);
+ locomo_gpio_write(poodle_pdata->locomo_dev,
+ poodle_pdata->gpio_mute_r, 1);
snd_soc_dapm_enable_pin(dapm, "Headphone Jack");
} else {
- locomo_gpio_write(&poodle_locomo_device.dev,
- POODLE_LOCOMO_GPIO_MUTE_L, 0);
- locomo_gpio_write(&poodle_locomo_device.dev,
- POODLE_LOCOMO_GPIO_MUTE_R, 0);
+ locomo_gpio_write(poodle_pdata->locomo_dev,
+ poodle_pdata->gpio_mute_l, 0);
+ locomo_gpio_write(poodle_pdata->locomo_dev,
+ poodle_pdata->gpio_mute_r, 0);
snd_soc_dapm_disable_pin(dapm, "Headphone Jack");
}
@@ -80,10 +82,10 @@ static int poodle_startup(struct snd_pcm_substream *substream)
static void poodle_shutdown(struct snd_pcm_substream *substream)
{
/* set = unmute headphone */
- locomo_gpio_write(&poodle_locomo_device.dev,
- POODLE_LOCOMO_GPIO_MUTE_L, 1);
- locomo_gpio_write(&poodle_locomo_device.dev,
- POODLE_LOCOMO_GPIO_MUTE_R, 1);
+ locomo_gpio_write(poodle_pdata->locomo_dev,
+ poodle_pdata->gpio_mute_l, 1);
+ locomo_gpio_write(poodle_pdata->locomo_dev,
+ poodle_pdata->gpio_mute_r, 1);
}
static int poodle_hw_params(struct snd_pcm_substream *substream,
@@ -174,11 +176,11 @@ static int poodle_amp_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *k, int event)
{
if (SND_SOC_DAPM_EVENT_ON(event))
- locomo_gpio_write(&poodle_locomo_device.dev,
- POODLE_LOCOMO_GPIO_AMP_ON, 0);
+ locomo_gpio_write(poodle_pdata->locomo_dev,
+ poodle_pdata->gpio_amp_on, 0);
else
- locomo_gpio_write(&poodle_locomo_device.dev,
- POODLE_LOCOMO_GPIO_AMP_ON, 1);
+ locomo_gpio_write(poodle_pdata->locomo_dev,
+ poodle_pdata->gpio_amp_on, 1);
return 0;
}
@@ -254,13 +256,14 @@ static int poodle_probe(struct platform_device *pdev)
struct snd_soc_card *card = &poodle;
int ret;
- locomo_gpio_set_dir(&poodle_locomo_device.dev,
- POODLE_LOCOMO_GPIO_AMP_ON, 0);
+ poodle_pdata = pdev->dev.platform_data;
+ locomo_gpio_set_dir(poodle_pdata->locomo_dev,
+ poodle_pdata->gpio_amp_on, 0);
/* should we mute HP at startup - burning power ?*/
- locomo_gpio_set_dir(&poodle_locomo_device.dev,
- POODLE_LOCOMO_GPIO_MUTE_L, 0);
- locomo_gpio_set_dir(&poodle_locomo_device.dev,
- POODLE_LOCOMO_GPIO_MUTE_R, 0);
+ locomo_gpio_set_dir(poodle_pdata->locomo_dev,
+ poodle_pdata->gpio_mute_l, 0);
+ locomo_gpio_set_dir(poodle_pdata->locomo_dev,
+ poodle_pdata->gpio_mute_r, 0);
card->dev = &pdev->dev;
diff --git a/sound/soc/pxa/pxa2xx-ac97.c b/sound/soc/pxa/pxa2xx-ac97.c
index 58f8541ba55c..809ea34736ed 100644
--- a/sound/soc/pxa/pxa2xx-ac97.c
+++ b/sound/soc/pxa/pxa2xx-ac97.c
@@ -21,9 +21,11 @@
#include <sound/pxa2xx-lib.h>
#include <sound/dmaengine_pcm.h>
-#include <mach/hardware.h>
-#include <mach/regs-ac97.h>
-#include <mach/audio.h>
+#include <linux/platform_data/asoc-pxa.h>
+
+#define PCDR 0x0040 /* PCM FIFO Data Register */
+#define MODR 0x0140 /* Modem FIFO Data Register */
+#define MCDR 0x0060 /* Mic-in FIFO Data Register */
static void pxa2xx_ac97_warm_reset(struct ac97_controller *adrv)
{
@@ -59,35 +61,30 @@ static struct ac97_controller_ops pxa2xx_ac97_ops = {
};
static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_stereo_in = {
- .addr = __PREG(PCDR),
.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
.chan_name = "pcm_pcm_stereo_in",
.maxburst = 32,
};
static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_stereo_out = {
- .addr = __PREG(PCDR),
.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
.chan_name = "pcm_pcm_stereo_out",
.maxburst = 32,
};
static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_aux_mono_out = {
- .addr = __PREG(MODR),
.addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES,
.chan_name = "pcm_aux_mono_out",
.maxburst = 16,
};
static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_aux_mono_in = {
- .addr = __PREG(MODR),
.addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES,
.chan_name = "pcm_aux_mono_in",
.maxburst = 16,
};
static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_mic_mono_in = {
- .addr = __PREG(MCDR),
.addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES,
.chan_name = "pcm_aux_mic_mono",
.maxburst = 16,
@@ -226,6 +223,7 @@ static int pxa2xx_ac97_dev_probe(struct platform_device *pdev)
int ret;
struct ac97_controller *ctrl;
pxa2xx_audio_ops_t *pdata = pdev->dev.platform_data;
+ struct resource *regs;
void **codecs_pdata;
if (pdev->id != -1) {
@@ -233,6 +231,16 @@ static int pxa2xx_ac97_dev_probe(struct platform_device *pdev)
return -ENXIO;
}
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!regs)
+ return -ENXIO;
+
+ pxa2xx_ac97_pcm_stereo_in.addr = regs->start + PCDR;
+ pxa2xx_ac97_pcm_stereo_out.addr = regs->start + PCDR;
+ pxa2xx_ac97_pcm_aux_mono_out.addr = regs->start + MODR;
+ pxa2xx_ac97_pcm_aux_mono_in.addr = regs->start + MODR;
+ pxa2xx_ac97_pcm_mic_mono_in.addr = regs->start + MCDR;
+
ret = pxa2xx_ac97_hw_probe(pdev);
if (ret) {
dev_err(&pdev->dev, "PXA2xx AC97 hw probe error (%d)\n", ret);
diff --git a/sound/soc/pxa/pxa2xx-i2s.c b/sound/soc/pxa/pxa2xx-i2s.c
index 5bfc1a966532..746e6ec9198b 100644
--- a/sound/soc/pxa/pxa2xx-i2s.c
+++ b/sound/soc/pxa/pxa2xx-i2s.c
@@ -21,21 +21,20 @@
#include <sound/pxa2xx-lib.h>
#include <sound/dmaengine_pcm.h>
-#include <mach/hardware.h>
-#include <mach/audio.h>
+#include <linux/platform_data/asoc-pxa.h>
#include "pxa2xx-i2s.h"
/*
* I2S Controller Register and Bit Definitions
*/
-#define SACR0 __REG(0x40400000) /* Global Control Register */
-#define SACR1 __REG(0x40400004) /* Serial Audio I 2 S/MSB-Justified Control Register */
-#define SASR0 __REG(0x4040000C) /* Serial Audio I 2 S/MSB-Justified Interface and FIFO Status Register */
-#define SAIMR __REG(0x40400014) /* Serial Audio Interrupt Mask Register */
-#define SAICR __REG(0x40400018) /* Serial Audio Interrupt Clear Register */
-#define SADIV __REG(0x40400060) /* Audio Clock Divider Register. */
-#define SADR __REG(0x40400080) /* Serial Audio Data Register (TX and RX FIFO access Register). */
+#define SACR0 (0x0000) /* Global Control Register */
+#define SACR1 (0x0004) /* Serial Audio I 2 S/MSB-Justified Control Register */
+#define SASR0 (0x000C) /* Serial Audio I 2 S/MSB-Justified Interface and FIFO Status Register */
+#define SAIMR (0x0014) /* Serial Audio Interrupt Mask Register */
+#define SAICR (0x0018) /* Serial Audio Interrupt Clear Register */
+#define SADIV (0x0060) /* Audio Clock Divider Register. */
+#define SADR (0x0080) /* Serial Audio Data Register (TX and RX FIFO access Register). */
#define SACR0_RFTH(x) ((x) << 12) /* Rx FIFO Interrupt or DMA Trigger Threshold */
#define SACR0_TFTH(x) ((x) << 8) /* Tx FIFO Interrupt or DMA Trigger Threshold */
@@ -77,16 +76,15 @@ struct pxa_i2s_port {
static struct pxa_i2s_port pxa_i2s;
static struct clk *clk_i2s;
static int clk_ena = 0;
+static void __iomem *i2s_reg_base;
static struct snd_dmaengine_dai_dma_data pxa2xx_i2s_pcm_stereo_out = {
- .addr = __PREG(SADR),
.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
.chan_name = "tx",
.maxburst = 32,
};
static struct snd_dmaengine_dai_dma_data pxa2xx_i2s_pcm_stereo_in = {
- .addr = __PREG(SADR),
.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
.chan_name = "rx",
.maxburst = 32,
@@ -102,7 +100,7 @@ static int pxa2xx_i2s_startup(struct snd_pcm_substream *substream,
return PTR_ERR(clk_i2s);
if (!snd_soc_dai_active(cpu_dai))
- SACR0 = 0;
+ writel(0, i2s_reg_base + SACR0);
return 0;
}
@@ -114,7 +112,7 @@ static int pxa_i2s_wait(void)
/* flush the Rx FIFO */
for (i = 0; i < 16; i++)
- SADR;
+ readl(i2s_reg_base + SADR);
return 0;
}
@@ -174,39 +172,39 @@ static int pxa2xx_i2s_hw_params(struct snd_pcm_substream *substream,
/* is port used by another stream */
if (!(SACR0 & SACR0_ENB)) {
- SACR0 = 0;
+ writel(0, i2s_reg_base + SACR0);
if (pxa_i2s.master)
- SACR0 |= SACR0_BCKD;
+ writel(readl(i2s_reg_base + SACR0) | (SACR0_BCKD), i2s_reg_base + SACR0);
- SACR0 |= SACR0_RFTH(14) | SACR0_TFTH(1);
- SACR1 |= pxa_i2s.fmt;
+ writel(readl(i2s_reg_base + SACR0) | (SACR0_RFTH(14) | SACR0_TFTH(1)), i2s_reg_base + SACR0);
+ writel(readl(i2s_reg_base + SACR1) | (pxa_i2s.fmt), i2s_reg_base + SACR1);
}
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
- SAIMR |= SAIMR_TFS;
+ writel(readl(i2s_reg_base + SAIMR) | (SAIMR_TFS), i2s_reg_base + SAIMR);
else
- SAIMR |= SAIMR_RFS;
+ writel(readl(i2s_reg_base + SAIMR) | (SAIMR_RFS), i2s_reg_base + SAIMR);
switch (params_rate(params)) {
case 8000:
- SADIV = 0x48;
+ writel(0x48, i2s_reg_base + SADIV);
break;
case 11025:
- SADIV = 0x34;
+ writel(0x34, i2s_reg_base + SADIV);
break;
case 16000:
- SADIV = 0x24;
+ writel(0x24, i2s_reg_base + SADIV);
break;
case 22050:
- SADIV = 0x1a;
+ writel(0x1a, i2s_reg_base + SADIV);
break;
case 44100:
- SADIV = 0xd;
+ writel(0xd, i2s_reg_base + SADIV);
break;
case 48000:
- SADIV = 0xc;
+ writel(0xc, i2s_reg_base + SADIV);
break;
case 96000: /* not in manual and possibly slightly inaccurate */
- SADIV = 0x6;
+ writel(0x6, i2s_reg_base + SADIV);
break;
}
@@ -221,10 +219,10 @@ static int pxa2xx_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
- SACR1 &= ~SACR1_DRPL;
+ writel(readl(i2s_reg_base + SACR1) & (~SACR1_DRPL), i2s_reg_base + SACR1);
else
- SACR1 &= ~SACR1_DREC;
- SACR0 |= SACR0_ENB;
+ writel(readl(i2s_reg_base + SACR1) & (~SACR1_DREC), i2s_reg_base + SACR1);
+ writel(readl(i2s_reg_base + SACR0) | (SACR0_ENB), i2s_reg_base + SACR0);
break;
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
@@ -243,15 +241,15 @@ static void pxa2xx_i2s_shutdown(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
- SACR1 |= SACR1_DRPL;
- SAIMR &= ~SAIMR_TFS;
+ writel(readl(i2s_reg_base + SACR1) | (SACR1_DRPL), i2s_reg_base + SACR1);
+ writel(readl(i2s_reg_base + SAIMR) & (~SAIMR_TFS), i2s_reg_base + SAIMR);
} else {
- SACR1 |= SACR1_DREC;
- SAIMR &= ~SAIMR_RFS;
+ writel(readl(i2s_reg_base + SACR1) | (SACR1_DREC), i2s_reg_base + SACR1);
+ writel(readl(i2s_reg_base + SAIMR) & (~SAIMR_RFS), i2s_reg_base + SAIMR);
}
- if ((SACR1 & (SACR1_DREC | SACR1_DRPL)) == (SACR1_DREC | SACR1_DRPL)) {
- SACR0 &= ~SACR0_ENB;
+ if ((readl(i2s_reg_base + SACR1) & (SACR1_DREC | SACR1_DRPL)) == (SACR1_DREC | SACR1_DRPL)) {
+ writel(readl(i2s_reg_base + SACR0) & (~SACR0_ENB), i2s_reg_base + SACR0);
pxa_i2s_wait();
if (clk_ena) {
clk_disable_unprepare(clk_i2s);
@@ -264,13 +262,13 @@ static void pxa2xx_i2s_shutdown(struct snd_pcm_substream *substream,
static int pxa2xx_soc_pcm_suspend(struct snd_soc_component *component)
{
/* store registers */
- pxa_i2s.sacr0 = SACR0;
- pxa_i2s.sacr1 = SACR1;
- pxa_i2s.saimr = SAIMR;
- pxa_i2s.sadiv = SADIV;
+ pxa_i2s.sacr0 = readl(i2s_reg_base + SACR0);
+ pxa_i2s.sacr1 = readl(i2s_reg_base + SACR1);
+ pxa_i2s.saimr = readl(i2s_reg_base + SAIMR);
+ pxa_i2s.sadiv = readl(i2s_reg_base + SADIV);
/* deactivate link */
- SACR0 &= ~SACR0_ENB;
+ writel(readl(i2s_reg_base + SACR0) & (~SACR0_ENB), i2s_reg_base + SACR0);
pxa_i2s_wait();
return 0;
}
@@ -279,12 +277,12 @@ static int pxa2xx_soc_pcm_resume(struct snd_soc_component *component)
{
pxa_i2s_wait();
- SACR0 = pxa_i2s.sacr0 & ~SACR0_ENB;
- SACR1 = pxa_i2s.sacr1;
- SAIMR = pxa_i2s.saimr;
- SADIV = pxa_i2s.sadiv;
+ writel(pxa_i2s.sacr0 & ~SACR0_ENB, i2s_reg_base + SACR0);
+ writel(pxa_i2s.sacr1, i2s_reg_base + SACR1);
+ writel(pxa_i2s.saimr, i2s_reg_base + SAIMR);
+ writel(pxa_i2s.sadiv, i2s_reg_base + SADIV);
- SACR0 = pxa_i2s.sacr0;
+ writel(pxa_i2s.sacr0, i2s_reg_base + SACR0);
return 0;
}
@@ -306,12 +304,12 @@ static int pxa2xx_i2s_probe(struct snd_soc_dai *dai)
* the SACR0[RST] bit must also be set and cleared to reset all
* I2S controller registers.
*/
- SACR0 = SACR0_RST;
- SACR0 = 0;
+ writel(SACR0_RST, i2s_reg_base + SACR0);
+ writel(0, i2s_reg_base + SACR0);
/* Make sure RPL and REC are disabled */
- SACR1 = SACR1_DRPL | SACR1_DREC;
+ writel(SACR1_DRPL | SACR1_DREC, i2s_reg_base + SACR1);
/* Along with FIFO servicing */
- SAIMR &= ~(SAIMR_RFS | SAIMR_TFS);
+ writel(readl(i2s_reg_base + SAIMR) & (~(SAIMR_RFS | SAIMR_TFS)), i2s_reg_base + SAIMR);
snd_soc_dai_init_dma_data(dai, &pxa2xx_i2s_pcm_stereo_out,
&pxa2xx_i2s_pcm_stereo_in);
@@ -371,6 +369,22 @@ static const struct snd_soc_component_driver pxa_i2s_component = {
static int pxa2xx_i2s_drv_probe(struct platform_device *pdev)
{
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ if (!res) {
+ dev_err(&pdev->dev, "missing MMIO resource\n");
+ return -ENXIO;
+ }
+
+ i2s_reg_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(i2s_reg_base)) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ return PTR_ERR(i2s_reg_base);
+ }
+
+ pxa2xx_i2s_pcm_stereo_out.addr = res->start + SADR;
+ pxa2xx_i2s_pcm_stereo_in.addr = res->start + SADR;
+
return devm_snd_soc_register_component(&pdev->dev, &pxa_i2s_component,
&pxa_i2s_dai, 1);
}
diff --git a/sound/soc/pxa/spitz.c b/sound/soc/pxa/spitz.c
index 7c1384a869ca..44303b6eb228 100644
--- a/sound/soc/pxa/spitz.c
+++ b/sound/soc/pxa/spitz.c
@@ -14,13 +14,12 @@
#include <linux/timer.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/soc.h>
#include <asm/mach-types.h>
-#include <mach/spitz.h>
#include "../codecs/wm8750.h"
#include "pxa2xx-i2s.h"
@@ -37,7 +36,7 @@
static int spitz_jack_func;
static int spitz_spk_func;
-static int spitz_mic_gpio;
+static struct gpio_desc *gpiod_mic, *gpiod_mute_l, *gpiod_mute_r;
static void spitz_ext_control(struct snd_soc_dapm_context *dapm)
{
@@ -56,8 +55,8 @@ static void spitz_ext_control(struct snd_soc_dapm_context *dapm)
snd_soc_dapm_disable_pin_unlocked(dapm, "Mic Jack");
snd_soc_dapm_disable_pin_unlocked(dapm, "Line Jack");
snd_soc_dapm_enable_pin_unlocked(dapm, "Headphone Jack");
- gpio_set_value(SPITZ_GPIO_MUTE_L, 1);
- gpio_set_value(SPITZ_GPIO_MUTE_R, 1);
+ gpiod_set_value(gpiod_mute_l, 1);
+ gpiod_set_value(gpiod_mute_r, 1);
break;
case SPITZ_MIC:
/* enable mic jack and bias, mute hp */
@@ -65,8 +64,8 @@ static void spitz_ext_control(struct snd_soc_dapm_context *dapm)
snd_soc_dapm_disable_pin_unlocked(dapm, "Headset Jack");
snd_soc_dapm_disable_pin_unlocked(dapm, "Line Jack");
snd_soc_dapm_enable_pin_unlocked(dapm, "Mic Jack");
- gpio_set_value(SPITZ_GPIO_MUTE_L, 0);
- gpio_set_value(SPITZ_GPIO_MUTE_R, 0);
+ gpiod_set_value(gpiod_mute_l, 0);
+ gpiod_set_value(gpiod_mute_r, 0);
break;
case SPITZ_LINE:
/* enable line jack, disable mic bias and mute hp */
@@ -74,8 +73,8 @@ static void spitz_ext_control(struct snd_soc_dapm_context *dapm)
snd_soc_dapm_disable_pin_unlocked(dapm, "Headset Jack");
snd_soc_dapm_disable_pin_unlocked(dapm, "Mic Jack");
snd_soc_dapm_enable_pin_unlocked(dapm, "Line Jack");
- gpio_set_value(SPITZ_GPIO_MUTE_L, 0);
- gpio_set_value(SPITZ_GPIO_MUTE_R, 0);
+ gpiod_set_value(gpiod_mute_l, 0);
+ gpiod_set_value(gpiod_mute_r, 0);
break;
case SPITZ_HEADSET:
/* enable and unmute headset jack enable mic bias, mute L hp */
@@ -83,8 +82,8 @@ static void spitz_ext_control(struct snd_soc_dapm_context *dapm)
snd_soc_dapm_enable_pin_unlocked(dapm, "Mic Jack");
snd_soc_dapm_disable_pin_unlocked(dapm, "Line Jack");
snd_soc_dapm_enable_pin_unlocked(dapm, "Headset Jack");
- gpio_set_value(SPITZ_GPIO_MUTE_L, 0);
- gpio_set_value(SPITZ_GPIO_MUTE_R, 1);
+ gpiod_set_value(gpiod_mute_l, 0);
+ gpiod_set_value(gpiod_mute_r, 1);
break;
case SPITZ_HP_OFF:
@@ -93,8 +92,8 @@ static void spitz_ext_control(struct snd_soc_dapm_context *dapm)
snd_soc_dapm_disable_pin_unlocked(dapm, "Headset Jack");
snd_soc_dapm_disable_pin_unlocked(dapm, "Mic Jack");
snd_soc_dapm_disable_pin_unlocked(dapm, "Line Jack");
- gpio_set_value(SPITZ_GPIO_MUTE_L, 0);
- gpio_set_value(SPITZ_GPIO_MUTE_R, 0);
+ gpiod_set_value(gpiod_mute_l, 0);
+ gpiod_set_value(gpiod_mute_r, 0);
break;
}
@@ -199,7 +198,7 @@ static int spitz_set_spk(struct snd_kcontrol *kcontrol,
static int spitz_mic_bias(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *k, int event)
{
- gpio_set_value_cansleep(spitz_mic_gpio, SND_SOC_DAPM_EVENT_ON(event));
+ gpiod_set_value_cansleep(gpiod_mic, SND_SOC_DAPM_EVENT_ON(event));
return 0;
}
@@ -287,39 +286,28 @@ static int spitz_probe(struct platform_device *pdev)
struct snd_soc_card *card = &snd_soc_spitz;
int ret;
- if (machine_is_akita())
- spitz_mic_gpio = AKITA_GPIO_MIC_BIAS;
- else
- spitz_mic_gpio = SPITZ_GPIO_MIC_BIAS;
-
- ret = gpio_request(spitz_mic_gpio, "MIC GPIO");
- if (ret)
- goto err1;
-
- ret = gpio_direction_output(spitz_mic_gpio, 0);
- if (ret)
- goto err2;
+ gpiod_mic = devm_gpiod_get(&pdev->dev, "mic", GPIOD_OUT_LOW);
+ if (IS_ERR(gpiod_mic))
+ return PTR_ERR(gpiod_mic);
+ gpiod_mute_l = devm_gpiod_get(&pdev->dev, "mute-l", GPIOD_OUT_LOW);
+ if (IS_ERR(gpiod_mute_l))
+ return PTR_ERR(gpiod_mute_l);
+ gpiod_mute_r = devm_gpiod_get(&pdev->dev, "mute-r", GPIOD_OUT_LOW);
+ if (IS_ERR(gpiod_mute_r))
+ return PTR_ERR(gpiod_mute_r);
card->dev = &pdev->dev;
ret = devm_snd_soc_register_card(&pdev->dev, card);
- if (ret) {
+ if (ret)
dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n",
ret);
- goto err2;
- }
-
- return 0;
-err2:
- gpio_free(spitz_mic_gpio);
-err1:
return ret;
}
static int spitz_remove(struct platform_device *pdev)
{
- gpio_free(spitz_mic_gpio);
return 0;
}
diff --git a/sound/soc/pxa/tosa.c b/sound/soc/pxa/tosa.c
index 3b40b5fa5de7..30f83cab0c32 100644
--- a/sound/soc/pxa/tosa.c
+++ b/sound/soc/pxa/tosa.c
@@ -16,15 +16,14 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/device.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/soc.h>
#include <asm/mach-types.h>
-#include <mach/tosa.h>
-#include <mach/audio.h>
+#include <linux/platform_data/asoc-pxa.h>
#define TOSA_HP 0
#define TOSA_MIC_INT 1
@@ -33,6 +32,7 @@
#define TOSA_SPK_ON 0
#define TOSA_SPK_OFF 1
+static struct gpio_desc *tosa_mute;
static int tosa_jack_func;
static int tosa_spk_func;
@@ -128,7 +128,7 @@ static int tosa_set_spk(struct snd_kcontrol *kcontrol,
static int tosa_hp_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *k, int event)
{
- gpio_set_value(TOSA_GPIO_L_MUTE, SND_SOC_DAPM_EVENT_ON(event) ? 1 : 0);
+ gpiod_set_value(tosa_mute, SND_SOC_DAPM_EVENT_ON(event) ? 1 : 0);
return 0;
}
@@ -222,10 +222,11 @@ static int tosa_probe(struct platform_device *pdev)
struct snd_soc_card *card = &tosa;
int ret;
- ret = gpio_request_one(TOSA_GPIO_L_MUTE, GPIOF_OUT_INIT_LOW,
- "Headphone Jack");
- if (ret)
- return ret;
+ tosa_mute = devm_gpiod_get(&pdev->dev, NULL, GPIOD_OUT_LOW);
+ if (IS_ERR(tosa_mute))
+ return dev_err_probe(&pdev->dev, PTR_ERR(tosa_mute),
+ "failed to get L_MUTE GPIO\n");
+ gpiod_set_consumer_name(tosa_mute, "Headphone Jack");
card->dev = &pdev->dev;
@@ -233,24 +234,16 @@ static int tosa_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n",
ret);
- gpio_free(TOSA_GPIO_L_MUTE);
}
return ret;
}
-static int tosa_remove(struct platform_device *pdev)
-{
- gpio_free(TOSA_GPIO_L_MUTE);
- return 0;
-}
-
static struct platform_driver tosa_driver = {
.driver = {
.name = "tosa-audio",
.pm = &snd_soc_pm_ops,
},
.probe = tosa_probe,
- .remove = tosa_remove,
};
module_platform_driver(tosa_driver);
diff --git a/sound/soc/pxa/z2.c b/sound/soc/pxa/z2.c
index f4a7cfe22115..020dcce1df1f 100644
--- a/sound/soc/pxa/z2.c
+++ b/sound/soc/pxa/z2.c
@@ -13,7 +13,7 @@
#include <linux/timer.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <sound/core.h>
#include <sound/pcm.h>
@@ -21,9 +21,7 @@
#include <sound/jack.h>
#include <asm/mach-types.h>
-#include <mach/hardware.h>
-#include <mach/audio.h>
-#include <mach/z2.h>
+#include <linux/platform_data/asoc-pxa.h>
#include "../codecs/wm8750.h"
#include "pxa2xx-i2s.h"
@@ -90,7 +88,6 @@ static struct snd_soc_jack_pin hs_jack_pins[] = {
/* Headset jack detection gpios */
static struct snd_soc_jack_gpio hs_jack_gpios[] = {
{
- .gpio = GPIO37_ZIPITZ2_HEADSET_DETECT,
.name = "hsdet-gpio",
.report = SND_JACK_HEADSET,
.debounce_time = 200,
@@ -197,6 +194,7 @@ static int __init z2_init(void)
if (!z2_snd_device)
return -ENOMEM;
+ hs_jack_gpios[0].gpiod_dev = &z2_snd_device->dev;
platform_set_drvdata(z2_snd_device, &snd_soc_z2);
ret = platform_device_add(z2_snd_device);
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index 6f43db35a5c8..a827cc3c158a 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -2128,8 +2128,6 @@ int dpcm_be_dai_trigger(struct snd_soc_pcm_runtime *fe, int stream,
else
ret = soc_pcm_trigger(be_substream,
SNDRV_PCM_TRIGGER_START);
-
- ret = soc_pcm_trigger(be_substream, cmd);
if (ret) {
be->dpcm[stream].be_start--;
goto next;
diff --git a/sound/usb/clock.c b/sound/usb/clock.c
index 3c435d379306..33db334e6556 100644
--- a/sound/usb/clock.c
+++ b/sound/usb/clock.c
@@ -573,10 +573,14 @@ static int set_sample_rate_v2v3(struct snd_usb_audio *chip,
}
/* FIXME - TEAC devices require the immediate interface setup */
- if (rate != prev_rate && USB_ID_VENDOR(chip->usb_id) == 0x0644) {
- usb_set_interface(chip->dev, fmt->iface, fmt->altsetting);
- if (chip->quirk_flags & QUIRK_FLAG_IFACE_DELAY)
- msleep(50);
+ if (USB_ID_VENDOR(chip->usb_id) == 0x0644) {
+ bool cur_base_48k = (rate % 48000 == 0);
+ bool prev_base_48k = (prev_rate % 48000 == 0);
+ if (cur_base_48k != prev_base_48k) {
+ usb_set_interface(chip->dev, fmt->iface, fmt->altsetting);
+ if (chip->quirk_flags & QUIRK_FLAG_IFACE_DELAY)
+ msleep(50);
+ }
}
validation:
diff --git a/sound/usb/line6/pcm.c b/sound/usb/line6/pcm.c
index fdbdfb7bce92..6a4af725aedd 100644
--- a/sound/usb/line6/pcm.c
+++ b/sound/usb/line6/pcm.c
@@ -552,10 +552,10 @@ int line6_init_pcm(struct usb_line6 *line6,
line6pcm->max_packet_size_in =
usb_maxpacket(line6->usbdev,
- usb_rcvisocpipe(line6->usbdev, ep_read), 0);
+ usb_rcvisocpipe(line6->usbdev, ep_read));
line6pcm->max_packet_size_out =
usb_maxpacket(line6->usbdev,
- usb_sndisocpipe(line6->usbdev, ep_write), 1);
+ usb_sndisocpipe(line6->usbdev, ep_write));
if (!line6pcm->max_packet_size_in || !line6pcm->max_packet_size_out) {
dev_err(line6pcm->line6->ifcdev,
"cannot get proper max packet size\n");
diff --git a/sound/usb/midi.c b/sound/usb/midi.c
index 7c6ca2b433a5..bbff0923d264 100644
--- a/sound/usb/midi.c
+++ b/sound/usb/midi.c
@@ -1145,6 +1145,9 @@ static int snd_usbmidi_output_open(struct snd_rawmidi_substream *substream)
static int snd_usbmidi_output_close(struct snd_rawmidi_substream *substream)
{
+ struct usbmidi_out_port *port = substream->runtime->private_data;
+
+ cancel_work_sync(&port->ep->work);
return substream_open(substream, 0, 0);
}
@@ -1286,7 +1289,7 @@ static int snd_usbmidi_in_endpoint_create(struct snd_usb_midi *umidi,
pipe = usb_rcvintpipe(umidi->dev, ep_info->in_ep);
else
pipe = usb_rcvbulkpipe(umidi->dev, ep_info->in_ep);
- length = usb_maxpacket(umidi->dev, pipe, 0);
+ length = usb_maxpacket(umidi->dev, pipe);
for (i = 0; i < INPUT_URBS; ++i) {
buffer = usb_alloc_coherent(umidi->dev, length, GFP_KERNEL,
&ep->urbs[i]->transfer_dma);
@@ -1375,7 +1378,7 @@ static int snd_usbmidi_out_endpoint_create(struct snd_usb_midi *umidi,
pipe = usb_sndbulkpipe(umidi->dev, ep_info->out_ep);
switch (umidi->usb_id) {
default:
- ep->max_transfer = usb_maxpacket(umidi->dev, pipe, 1);
+ ep->max_transfer = usb_maxpacket(umidi->dev, pipe);
break;
/*
* Various chips declare a packet size larger than 4 bytes, but
diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
index 7ef7a8abcc2b..3c795675f048 100644
--- a/sound/usb/mixer_maps.c
+++ b/sound/usb/mixer_maps.c
@@ -439,6 +439,31 @@ static const struct usbmix_name_map msi_mpg_x570s_carbon_max_wifi_alc4080_map[]
{}
};
+/* Gigabyte B450/550 Mobo */
+static const struct usbmix_name_map gigabyte_b450_map[] = {
+ { 24, NULL }, /* OT, IEC958?, disabled */
+ { 21, "Speaker" }, /* OT */
+ { 29, "Speaker Playback" }, /* FU */
+ { 22, "Headphone" }, /* OT */
+ { 30, "Headphone Playback" }, /* FU */
+ { 11, "Line" }, /* IT */
+ { 27, "Line Capture" }, /* FU */
+ { 12, "Mic" }, /* IT */
+ { 28, "Mic Capture" }, /* FU */
+ { 9, "Front Mic" }, /* IT */
+ { 25, "Front Mic Capture" }, /* FU */
+ {}
+};
+
+static const struct usbmix_connector_map gigabyte_b450_connector_map[] = {
+ { 13, 21 }, /* Speaker */
+ { 14, 22 }, /* Headphone */
+ { 19, 11 }, /* Line */
+ { 20, 12 }, /* Mic */
+ { 17, 9 }, /* Front Mic */
+ {}
+};
+
/*
* Control map entries
*/
@@ -581,6 +606,11 @@ static const struct usbmix_ctl_map usbmix_ctl_maps[] = {
.map = trx40_mobo_map,
.connector_map = trx40_mobo_connector_map,
},
+ { /* Gigabyte B450/550 Mobo */
+ .id = USB_ID(0x0414, 0xa00d),
+ .map = gigabyte_b450_map,
+ .connector_map = gigabyte_b450_connector_map,
+ },
{ /* ASUS ROG Zenith II */
.id = USB_ID(0x0b05, 0x1916),
.map = asus_rog_map,
diff --git a/sound/usb/usx2y/usb_stream.c b/sound/usb/usx2y/usb_stream.c
index 9d0e44793896..a4d32e8a1d36 100644
--- a/sound/usb/usx2y/usb_stream.c
+++ b/sound/usb/usx2y/usb_stream.c
@@ -51,7 +51,7 @@ static int init_pipe_urbs(struct usb_stream_kernel *sk,
{
int u, p;
int maxpacket = use_packsize ?
- use_packsize : usb_maxpacket(dev, pipe, usb_pipeout(pipe));
+ use_packsize : usb_maxpacket(dev, pipe);
int transfer_length = maxpacket * sk->n_o_ps;
for (u = 0; u < USB_STREAM_NURBS;
@@ -171,7 +171,7 @@ struct usb_stream *usb_stream_new(struct usb_stream_kernel *sk,
out_pipe = usb_sndisocpipe(dev, out_endpoint);
max_packsize = use_packsize ?
- use_packsize : usb_maxpacket(dev, in_pipe, 0);
+ use_packsize : usb_maxpacket(dev, in_pipe);
/*
t_period = period_frames / sample_rate
@@ -187,7 +187,7 @@ struct usb_stream *usb_stream_new(struct usb_stream_kernel *sk,
read_size += packets * USB_STREAM_URBDEPTH *
(max_packsize + sizeof(struct usb_stream_packet));
- max_packsize = usb_maxpacket(dev, out_pipe, 1);
+ max_packsize = usb_maxpacket(dev, out_pipe);
write_size = max_packsize * packets * USB_STREAM_URBDEPTH;
if (read_size >= 256*PAGE_SIZE || write_size >= 256*PAGE_SIZE) {
diff --git a/sound/usb/usx2y/usbusx2yaudio.c b/sound/usb/usx2y/usbusx2yaudio.c
index cfc1ea53978d..9cd5e3aae4f7 100644
--- a/sound/usb/usx2y/usbusx2yaudio.c
+++ b/sound/usb/usx2y/usbusx2yaudio.c
@@ -421,7 +421,7 @@ static int usx2y_urbs_allocate(struct snd_usx2y_substream *subs)
pipe = is_playback ? usb_sndisocpipe(dev, subs->endpoint) :
usb_rcvisocpipe(dev, subs->endpoint);
- subs->maxpacksize = usb_maxpacket(dev, pipe, is_playback);
+ subs->maxpacksize = usb_maxpacket(dev, pipe);
if (!subs->maxpacksize)
return -EINVAL;
diff --git a/sound/usb/usx2y/usx2yhwdeppcm.c b/sound/usb/usx2y/usx2yhwdeppcm.c
index db83522c1b49..240349b644f3 100644
--- a/sound/usb/usx2y/usx2yhwdeppcm.c
+++ b/sound/usb/usx2y/usx2yhwdeppcm.c
@@ -321,7 +321,7 @@ static int usx2y_usbpcm_urbs_allocate(struct snd_usx2y_substream *subs)
pipe = is_playback ? usb_sndisocpipe(dev, subs->endpoint) :
usb_rcvisocpipe(dev, subs->endpoint);
- subs->maxpacksize = usb_maxpacket(dev, pipe, is_playback);
+ subs->maxpacksize = usb_maxpacket(dev, pipe);
if (!subs->maxpacksize)
return -EINVAL;
diff --git a/tools/accounting/.gitignore b/tools/accounting/.gitignore
index c45fb4ed4309..522a690aaf3d 100644
--- a/tools/accounting/.gitignore
+++ b/tools/accounting/.gitignore
@@ -1,2 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
getdelays
+procacct
diff --git a/tools/accounting/Makefile b/tools/accounting/Makefile
index 03687f19cbb1..11def1ad046c 100644
--- a/tools/accounting/Makefile
+++ b/tools/accounting/Makefile
@@ -2,7 +2,7 @@
CC := $(CROSS_COMPILE)gcc
CFLAGS := -I../../usr/include
-PROGS := getdelays
+PROGS := getdelays procacct
all: $(PROGS)
diff --git a/tools/accounting/procacct.c b/tools/accounting/procacct.c
new file mode 100644
index 000000000000..8353d3237e50
--- /dev/null
+++ b/tools/accounting/procacct.c
@@ -0,0 +1,417 @@
+// SPDX-License-Identifier: GPL-2.0
+/* procacct.c
+ *
+ * Demonstrator of fetching resource data on task exit, as a way
+ * to accumulate accurate program resource usage statistics, without
+ * prior identification of the programs. For that, the fields for
+ * device and inode of the program executable binary file are also
+ * extracted in addition to the command string.
+ *
+ * The TGID together with the PID and the AGROUP flag allow
+ * identification of threads in a process and single-threaded processes.
+ * The ac_tgetime field gives proper whole-process walltime.
+ *
+ * Written (changed) by Thomas Orgis, University of Hamburg in 2022
+ *
+ * This is a cheap derivation (inheriting the style) of getdelays.c:
+ *
+ * Utility to get per-pid and per-tgid delay accounting statistics
+ * Also illustrates usage of the taskstats interface
+ *
+ * Copyright (C) Shailabh Nagar, IBM Corp. 2005
+ * Copyright (C) Balbir Singh, IBM Corp. 2006
+ * Copyright (c) Jay Lan, SGI. 2006
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <unistd.h>
+#include <poll.h>
+#include <string.h>
+#include <fcntl.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/socket.h>
+#include <sys/wait.h>
+#include <signal.h>
+
+#include <linux/genetlink.h>
+#include <linux/acct.h>
+#include <linux/taskstats.h>
+#include <linux/kdev_t.h>
+
+/*
+ * Generic macros for dealing with netlink sockets. Might be duplicated
+ * elsewhere. It is recommended that commercial grade applications use
+ * libnl or libnetlink and use the interfaces provided by the library
+ */
+#define GENLMSG_DATA(glh) ((void *)(NLMSG_DATA(glh) + GENL_HDRLEN))
+#define GENLMSG_PAYLOAD(glh) (NLMSG_PAYLOAD(glh, 0) - GENL_HDRLEN)
+#define NLA_DATA(na) ((void *)((char *)(na) + NLA_HDRLEN))
+#define NLA_PAYLOAD(len) (len - NLA_HDRLEN)
+
+#define err(code, fmt, arg...) \
+ do { \
+ fprintf(stderr, fmt, ##arg); \
+ exit(code); \
+ } while (0)
+
+int rcvbufsz;
+char name[100];
+int dbg;
+int print_delays;
+int print_io_accounting;
+int print_task_context_switch_counts;
+
+#define PRINTF(fmt, arg...) { \
+ if (dbg) { \
+ printf(fmt, ##arg); \
+ } \
+ }
+
+/* Maximum size of response requested or message sent */
+#define MAX_MSG_SIZE 1024
+/* Maximum number of cpus expected to be specified in a cpumask */
+#define MAX_CPUS 32
+
+struct msgtemplate {
+ struct nlmsghdr n;
+ struct genlmsghdr g;
+ char buf[MAX_MSG_SIZE];
+};
+
+char cpumask[100+6*MAX_CPUS];
+
+static void usage(void)
+{
+ fprintf(stderr, "procacct [-v] [-w logfile] [-r bufsize] [-m cpumask]\n");
+ fprintf(stderr, " -v: debug on\n");
+}
+
+/*
+ * Create a raw netlink socket and bind
+ */
+static int create_nl_socket(int protocol)
+{
+ int fd;
+ struct sockaddr_nl local;
+
+ fd = socket(AF_NETLINK, SOCK_RAW, protocol);
+ if (fd < 0)
+ return -1;
+
+ if (rcvbufsz)
+ if (setsockopt(fd, SOL_SOCKET, SO_RCVBUF,
+ &rcvbufsz, sizeof(rcvbufsz)) < 0) {
+ fprintf(stderr, "Unable to set socket rcv buf size to %d\n",
+ rcvbufsz);
+ goto error;
+ }
+
+ memset(&local, 0, sizeof(local));
+ local.nl_family = AF_NETLINK;
+
+ if (bind(fd, (struct sockaddr *) &local, sizeof(local)) < 0)
+ goto error;
+
+ return fd;
+error:
+ close(fd);
+ return -1;
+}
+
+
+static int send_cmd(int sd, __u16 nlmsg_type, __u32 nlmsg_pid,
+ __u8 genl_cmd, __u16 nla_type,
+ void *nla_data, int nla_len)
+{
+ struct nlattr *na;
+ struct sockaddr_nl nladdr;
+ int r, buflen;
+ char *buf;
+
+ struct msgtemplate msg;
+
+ msg.n.nlmsg_len = NLMSG_LENGTH(GENL_HDRLEN);
+ msg.n.nlmsg_type = nlmsg_type;
+ msg.n.nlmsg_flags = NLM_F_REQUEST;
+ msg.n.nlmsg_seq = 0;
+ msg.n.nlmsg_pid = nlmsg_pid;
+ msg.g.cmd = genl_cmd;
+ msg.g.version = 0x1;
+ na = (struct nlattr *) GENLMSG_DATA(&msg);
+ na->nla_type = nla_type;
+ na->nla_len = nla_len + 1 + NLA_HDRLEN;
+ memcpy(NLA_DATA(na), nla_data, nla_len);
+ msg.n.nlmsg_len += NLMSG_ALIGN(na->nla_len);
+
+ buf = (char *) &msg;
+ buflen = msg.n.nlmsg_len;
+ memset(&nladdr, 0, sizeof(nladdr));
+ nladdr.nl_family = AF_NETLINK;
+ while ((r = sendto(sd, buf, buflen, 0, (struct sockaddr *) &nladdr,
+ sizeof(nladdr))) < buflen) {
+ if (r > 0) {
+ buf += r;
+ buflen -= r;
+ } else if (errno != EAGAIN)
+ return -1;
+ }
+ return 0;
+}
+
+
+/*
+ * Probe the controller in genetlink to find the family id
+ * for the TASKSTATS family
+ */
+static int get_family_id(int sd)
+{
+ struct {
+ struct nlmsghdr n;
+ struct genlmsghdr g;
+ char buf[256];
+ } ans;
+
+ int id = 0, rc;
+ struct nlattr *na;
+ int rep_len;
+
+ strcpy(name, TASKSTATS_GENL_NAME);
+ rc = send_cmd(sd, GENL_ID_CTRL, getpid(), CTRL_CMD_GETFAMILY,
+ CTRL_ATTR_FAMILY_NAME, (void *)name,
+ strlen(TASKSTATS_GENL_NAME)+1);
+ if (rc < 0)
+ return 0; /* sendto() failure? */
+
+ rep_len = recv(sd, &ans, sizeof(ans), 0);
+ if (ans.n.nlmsg_type == NLMSG_ERROR ||
+ (rep_len < 0) || !NLMSG_OK((&ans.n), rep_len))
+ return 0;
+
+ na = (struct nlattr *) GENLMSG_DATA(&ans);
+ na = (struct nlattr *) ((char *) na + NLA_ALIGN(na->nla_len));
+ if (na->nla_type == CTRL_ATTR_FAMILY_ID)
+ id = *(__u16 *) NLA_DATA(na);
+
+ return id;
+}
+
+#define average_ms(t, c) (t / 1000000ULL / (c ? c : 1))
+
+static void print_procacct(struct taskstats *t)
+{
+ /* First letter: T is a mere thread, G the last in a group, U unknown. */
+ printf(
+ "%c pid=%lu tgid=%lu uid=%lu wall=%llu gwall=%llu cpu=%llu vmpeak=%llu rsspeak=%llu dev=%lu:%lu inode=%llu comm=%s\n"
+ , t->version >= 12 ? (t->ac_flag & AGROUP ? 'P' : 'T') : '?'
+ , (unsigned long)t->ac_pid
+ , (unsigned long)(t->version >= 12 ? t->ac_tgid : 0)
+ , (unsigned long)t->ac_uid
+ , (unsigned long long)t->ac_etime
+ , (unsigned long long)(t->version >= 12 ? t->ac_tgetime : 0)
+ , (unsigned long long)(t->ac_utime+t->ac_stime)
+ , (unsigned long long)t->hiwater_vm
+ , (unsigned long long)t->hiwater_rss
+ , (unsigned long)(t->version >= 12 ? MAJOR(t->ac_exe_dev) : 0)
+ , (unsigned long)(t->version >= 12 ? MINOR(t->ac_exe_dev) : 0)
+ , (unsigned long long)(t->version >= 12 ? t->ac_exe_inode : 0)
+ , t->ac_comm
+ );
+}
+
+void handle_aggr(int mother, struct nlattr *na, int fd)
+{
+ int aggr_len = NLA_PAYLOAD(na->nla_len);
+ int len2 = 0;
+ pid_t rtid = 0;
+
+ na = (struct nlattr *) NLA_DATA(na);
+ while (len2 < aggr_len) {
+ switch (na->nla_type) {
+ case TASKSTATS_TYPE_PID:
+ rtid = *(int *) NLA_DATA(na);
+ PRINTF("PID\t%d\n", rtid);
+ break;
+ case TASKSTATS_TYPE_TGID:
+ rtid = *(int *) NLA_DATA(na);
+ PRINTF("TGID\t%d\n", rtid);
+ break;
+ case TASKSTATS_TYPE_STATS:
+ if (mother == TASKSTATS_TYPE_AGGR_PID)
+ print_procacct((struct taskstats *) NLA_DATA(na));
+ if (fd) {
+ if (write(fd, NLA_DATA(na), na->nla_len) < 0)
+ err(1, "write error\n");
+ }
+ break;
+ case TASKSTATS_TYPE_NULL:
+ break;
+ default:
+ fprintf(stderr, "Unknown nested nla_type %d\n",
+ na->nla_type);
+ break;
+ }
+ len2 += NLA_ALIGN(na->nla_len);
+ na = (struct nlattr *)((char *)na +
+ NLA_ALIGN(na->nla_len));
+ }
+}
+
+int main(int argc, char *argv[])
+{
+ int c, rc, rep_len, aggr_len, len2;
+ int cmd_type = TASKSTATS_CMD_ATTR_UNSPEC;
+ __u16 id;
+ __u32 mypid;
+
+ struct nlattr *na;
+ int nl_sd = -1;
+ int len = 0;
+ pid_t tid = 0;
+
+ int fd = 0;
+ int write_file = 0;
+ int maskset = 0;
+ char *logfile = NULL;
+ int containerset = 0;
+ char *containerpath = NULL;
+ int cfd = 0;
+ int forking = 0;
+ sigset_t sigset;
+
+ struct msgtemplate msg;
+
+ while (!forking) {
+ c = getopt(argc, argv, "m:vr:");
+ if (c < 0)
+ break;
+
+ switch (c) {
+ case 'w':
+ logfile = strdup(optarg);
+ printf("write to file %s\n", logfile);
+ write_file = 1;
+ break;
+ case 'r':
+ rcvbufsz = atoi(optarg);
+ printf("receive buf size %d\n", rcvbufsz);
+ if (rcvbufsz < 0)
+ err(1, "Invalid rcv buf size\n");
+ break;
+ case 'm':
+ strncpy(cpumask, optarg, sizeof(cpumask));
+ cpumask[sizeof(cpumask) - 1] = '\0';
+ maskset = 1;
+ break;
+ case 'v':
+ printf("debug on\n");
+ dbg = 1;
+ break;
+ default:
+ usage();
+ exit(-1);
+ }
+ }
+ if (!maskset) {
+ maskset = 1;
+ strncpy(cpumask, "1", sizeof(cpumask));
+ cpumask[sizeof(cpumask) - 1] = '\0';
+ }
+ printf("cpumask %s maskset %d\n", cpumask, maskset);
+
+ if (write_file) {
+ fd = open(logfile, O_WRONLY | O_CREAT | O_TRUNC, 0644);
+ if (fd == -1) {
+ perror("Cannot open output file\n");
+ exit(1);
+ }
+ }
+
+ nl_sd = create_nl_socket(NETLINK_GENERIC);
+ if (nl_sd < 0)
+ err(1, "error creating Netlink socket\n");
+
+ mypid = getpid();
+ id = get_family_id(nl_sd);
+ if (!id) {
+ fprintf(stderr, "Error getting family id, errno %d\n", errno);
+ goto err;
+ }
+ PRINTF("family id %d\n", id);
+
+ if (maskset) {
+ rc = send_cmd(nl_sd, id, mypid, TASKSTATS_CMD_GET,
+ TASKSTATS_CMD_ATTR_REGISTER_CPUMASK,
+ &cpumask, strlen(cpumask) + 1);
+ PRINTF("Sent register cpumask, retval %d\n", rc);
+ if (rc < 0) {
+ fprintf(stderr, "error sending register cpumask\n");
+ goto err;
+ }
+ }
+
+ do {
+ rep_len = recv(nl_sd, &msg, sizeof(msg), 0);
+ PRINTF("received %d bytes\n", rep_len);
+
+ if (rep_len < 0) {
+ fprintf(stderr, "nonfatal reply error: errno %d\n",
+ errno);
+ continue;
+ }
+ if (msg.n.nlmsg_type == NLMSG_ERROR ||
+ !NLMSG_OK((&msg.n), rep_len)) {
+ struct nlmsgerr *err = NLMSG_DATA(&msg);
+
+ fprintf(stderr, "fatal reply error, errno %d\n",
+ err->error);
+ goto done;
+ }
+
+ PRINTF("nlmsghdr size=%zu, nlmsg_len=%d, rep_len=%d\n",
+ sizeof(struct nlmsghdr), msg.n.nlmsg_len, rep_len);
+
+
+ rep_len = GENLMSG_PAYLOAD(&msg.n);
+
+ na = (struct nlattr *) GENLMSG_DATA(&msg);
+ len = 0;
+ while (len < rep_len) {
+ len += NLA_ALIGN(na->nla_len);
+ int mother = na->nla_type;
+
+ PRINTF("mother=%i\n", mother);
+ switch (na->nla_type) {
+ case TASKSTATS_TYPE_AGGR_PID:
+ case TASKSTATS_TYPE_AGGR_TGID:
+ /* For nested attributes, na follows */
+ handle_aggr(mother, na, fd);
+ break;
+ default:
+ fprintf(stderr, "Unexpected nla_type %d\n",
+ na->nla_type);
+ case TASKSTATS_TYPE_NULL:
+ break;
+ }
+ na = (struct nlattr *) (GENLMSG_DATA(&msg) + len);
+ }
+ } while (1);
+done:
+ if (maskset) {
+ rc = send_cmd(nl_sd, id, mypid, TASKSTATS_CMD_GET,
+ TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK,
+ &cpumask, strlen(cpumask) + 1);
+ printf("Sent deregister mask, retval %d\n", rc);
+ if (rc < 0)
+ err(rc, "error sending deregister cpumask\n");
+ }
+err:
+ close(nl_sd);
+ if (fd)
+ close(fd);
+ if (cfd)
+ close(cfd);
+ return 0;
+}
diff --git a/tools/arch/arm64/include/uapi/asm/perf_regs.h b/tools/arch/arm64/include/uapi/asm/perf_regs.h
index d54daafa89e3..fd157f46727e 100644
--- a/tools/arch/arm64/include/uapi/asm/perf_regs.h
+++ b/tools/arch/arm64/include/uapi/asm/perf_regs.h
@@ -36,6 +36,11 @@ enum perf_event_arm_regs {
PERF_REG_ARM64_LR,
PERF_REG_ARM64_SP,
PERF_REG_ARM64_PC,
- PERF_REG_ARM64_MAX,
+
+ /* Extended/pseudo registers */
+ PERF_REG_ARM64_VG = 46, // SVE Vector Granule
+
+ PERF_REG_ARM64_MAX = PERF_REG_ARM64_PC + 1,
+ PERF_REG_ARM64_EXTENDED_MAX = PERF_REG_ARM64_VG + 1
};
#endif /* _ASM_ARM64_PERF_REGS_H */
diff --git a/tools/arch/x86/include/asm/msr-index.h b/tools/arch/x86/include/asm/msr-index.h
index ee15311b6be1..403e83b4adc8 100644
--- a/tools/arch/x86/include/asm/msr-index.h
+++ b/tools/arch/x86/include/asm/msr-index.h
@@ -76,6 +76,8 @@
/* Abbreviated from Intel SDM name IA32_CORE_CAPABILITIES */
#define MSR_IA32_CORE_CAPS 0x000000cf
+#define MSR_IA32_CORE_CAPS_INTEGRITY_CAPS_BIT 2
+#define MSR_IA32_CORE_CAPS_INTEGRITY_CAPS BIT(MSR_IA32_CORE_CAPS_INTEGRITY_CAPS_BIT)
#define MSR_IA32_CORE_CAPS_SPLIT_LOCK_DETECT_BIT 5
#define MSR_IA32_CORE_CAPS_SPLIT_LOCK_DETECT BIT(MSR_IA32_CORE_CAPS_SPLIT_LOCK_DETECT_BIT)
@@ -154,6 +156,11 @@
#define MSR_IA32_POWER_CTL 0x000001fc
#define MSR_IA32_POWER_CTL_BIT_EE 19
+/* Abbreviated from Intel SDM name IA32_INTEGRITY_CAPABILITIES */
+#define MSR_INTEGRITY_CAPS 0x000002d9
+#define MSR_INTEGRITY_CAPS_PERIODIC_BIST_BIT 4
+#define MSR_INTEGRITY_CAPS_PERIODIC_BIST BIT(MSR_INTEGRITY_CAPS_PERIODIC_BIST_BIT)
+
#define MSR_LBR_NHM_FROM 0x00000680
#define MSR_LBR_NHM_TO 0x000006c0
#define MSR_LBR_CORE_FROM 0x00000040
@@ -312,6 +319,7 @@
/* Run Time Average Power Limiting (RAPL) Interface */
+#define MSR_VR_CURRENT_CONFIG 0x00000601
#define MSR_RAPL_POWER_UNIT 0x00000606
#define MSR_PKG_POWER_LIMIT 0x00000610
@@ -502,8 +510,10 @@
#define MSR_AMD64_SEV 0xc0010131
#define MSR_AMD64_SEV_ENABLED_BIT 0
#define MSR_AMD64_SEV_ES_ENABLED_BIT 1
+#define MSR_AMD64_SEV_SNP_ENABLED_BIT 2
#define MSR_AMD64_SEV_ENABLED BIT_ULL(MSR_AMD64_SEV_ENABLED_BIT)
#define MSR_AMD64_SEV_ES_ENABLED BIT_ULL(MSR_AMD64_SEV_ES_ENABLED_BIT)
+#define MSR_AMD64_SEV_SNP_ENABLED BIT_ULL(MSR_AMD64_SEV_SNP_ENABLED_BIT)
#define MSR_AMD64_VIRT_SPEC_CTRL 0xc001011f
@@ -524,6 +534,11 @@
#define AMD_CPPC_DES_PERF(x) (((x) & 0xff) << 16)
#define AMD_CPPC_ENERGY_PERF_PREF(x) (((x) & 0xff) << 24)
+/* AMD Performance Counter Global Status and Control MSRs */
+#define MSR_AMD64_PERF_CNTR_GLOBAL_STATUS 0xc0000300
+#define MSR_AMD64_PERF_CNTR_GLOBAL_CTL 0xc0000301
+#define MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR 0xc0000302
+
/* Fam 17h MSRs */
#define MSR_F17H_IRPERF 0xc00000e9
@@ -688,6 +703,10 @@
#define MSR_IA32_PERF_CTL 0x00000199
#define INTEL_PERF_CTL_MASK 0xffff
+/* AMD Branch Sampling configuration */
+#define MSR_AMD_DBG_EXTN_CFG 0xc000010f
+#define MSR_AMD_SAMP_BR_FROM 0xc0010300
+
#define MSR_IA32_MPERF 0x000000e7
#define MSR_IA32_APERF 0x000000e8
diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature
index c6a48d0ef9ff..888a0421d43b 100644
--- a/tools/build/Makefile.feature
+++ b/tools/build/Makefile.feature
@@ -99,6 +99,10 @@ FEATURE_TESTS_EXTRA := \
clang \
libbpf \
libbpf-btf__load_from_kernel_by_id \
+ libbpf-bpf_prog_load \
+ libbpf-bpf_object__next_program \
+ libbpf-bpf_object__next_map \
+ libbpf-bpf_create_map \
libpfm4 \
libdebuginfod \
clang-bpf-co-re
diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile
index cb4a2a4fa2e4..7c2a17e23c30 100644
--- a/tools/build/feature/Makefile
+++ b/tools/build/feature/Makefile
@@ -58,6 +58,11 @@ FILES= \
test-bpf.bin \
test-libbpf.bin \
test-libbpf-btf__load_from_kernel_by_id.bin \
+ test-libbpf-bpf_prog_load.bin \
+ test-libbpf-bpf_map_create.bin \
+ test-libbpf-bpf_object__next_program.bin \
+ test-libbpf-bpf_object__next_map.bin \
+ test-libbpf-btf__raw_data.bin \
test-get_cpuid.bin \
test-sdt.bin \
test-cxx.bin \
@@ -291,6 +296,21 @@ $(OUTPUT)test-libbpf.bin:
$(OUTPUT)test-libbpf-btf__load_from_kernel_by_id.bin:
$(BUILD) -lbpf
+$(OUTPUT)test-libbpf-bpf_prog_load.bin:
+ $(BUILD) -lbpf
+
+$(OUTPUT)test-libbpf-bpf_map_create.bin:
+ $(BUILD) -lbpf
+
+$(OUTPUT)test-libbpf-bpf_object__next_program.bin:
+ $(BUILD) -lbpf
+
+$(OUTPUT)test-libbpf-bpf_object__next_map.bin:
+ $(BUILD) -lbpf
+
+$(OUTPUT)test-libbpf-btf__raw_data.bin:
+ $(BUILD) -lbpf
+
$(OUTPUT)test-sdt.bin:
$(BUILD)
diff --git a/tools/build/feature/test-libbpf-bpf_map_create.c b/tools/build/feature/test-libbpf-bpf_map_create.c
new file mode 100644
index 000000000000..b9f550e332c8
--- /dev/null
+++ b/tools/build/feature/test-libbpf-bpf_map_create.c
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <bpf/bpf.h>
+
+int main(void)
+{
+ return bpf_map_create(0 /* map_type */, NULL /* map_name */, 0, /* key_size */,
+ 0 /* value_size */, 0 /* max_entries */, NULL /* opts */);
+}
diff --git a/tools/build/feature/test-libbpf-bpf_object__next_map.c b/tools/build/feature/test-libbpf-bpf_object__next_map.c
new file mode 100644
index 000000000000..64adb519e97e
--- /dev/null
+++ b/tools/build/feature/test-libbpf-bpf_object__next_map.c
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <bpf/libbpf.h>
+
+int main(void)
+{
+ bpf_object__next_map(NULL /* obj */, NULL /* prev */);
+ return 0;
+}
diff --git a/tools/build/feature/test-libbpf-bpf_object__next_program.c b/tools/build/feature/test-libbpf-bpf_object__next_program.c
new file mode 100644
index 000000000000..8bf4fd26b545
--- /dev/null
+++ b/tools/build/feature/test-libbpf-bpf_object__next_program.c
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <bpf/libbpf.h>
+
+int main(void)
+{
+ bpf_object__next_program(NULL /* obj */, NULL /* prev */);
+ return 0;
+}
diff --git a/tools/build/feature/test-libbpf-bpf_prog_load.c b/tools/build/feature/test-libbpf-bpf_prog_load.c
new file mode 100644
index 000000000000..47f516d63ebc
--- /dev/null
+++ b/tools/build/feature/test-libbpf-bpf_prog_load.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <bpf/bpf.h>
+
+int main(void)
+{
+ return bpf_prog_load(0 /* prog_type */, NULL /* prog_name */,
+ NULL /* license */, NULL /* insns */,
+ 0 /* insn_cnt */, NULL /* opts */);
+}
diff --git a/tools/build/feature/test-libbpf-btf__load_from_kernel_by_id.c b/tools/build/feature/test-libbpf-btf__load_from_kernel_by_id.c
index f7c084428735..a17647f7d5a4 100644
--- a/tools/build/feature/test-libbpf-btf__load_from_kernel_by_id.c
+++ b/tools/build/feature/test-libbpf-btf__load_from_kernel_by_id.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
-#include <bpf/libbpf.h>
+#include <bpf/btf.h>
int main(void)
{
- return btf__load_from_kernel_by_id(20151128, NULL);
+ btf__load_from_kernel_by_id(20151128);
+ return 0;
}
diff --git a/tools/build/feature/test-libbpf-btf__raw_data.c b/tools/build/feature/test-libbpf-btf__raw_data.c
new file mode 100644
index 000000000000..57da31dd7581
--- /dev/null
+++ b/tools/build/feature/test-libbpf-btf__raw_data.c
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <bpf/btf.h>
+
+int main(void)
+{
+ btf__raw_data(NULL /* btf_ro */, NULL /* size */);
+ return 0;
+}
diff --git a/tools/gpio/gpio-event-mon.c b/tools/gpio/gpio-event-mon.c
index a2b233fdb572..6c122952c589 100644
--- a/tools/gpio/gpio-event-mon.c
+++ b/tools/gpio/gpio-event-mon.c
@@ -149,6 +149,7 @@ void print_usage(void)
" -r Listen for rising edges\n"
" -f Listen for falling edges\n"
" -w Report the wall-clock time for events\n"
+ " -t Report the hardware timestamp for events\n"
" -b <n> Debounce the line with period n microseconds\n"
" [-c <n>] Do <n> loops (optional, infinite loop if not stated)\n"
" -? This helptext\n"
@@ -174,7 +175,7 @@ int main(int argc, char **argv)
memset(&config, 0, sizeof(config));
config.flags = GPIO_V2_LINE_FLAG_INPUT;
- while ((c = getopt(argc, argv, "c:n:o:b:dsrfw?")) != -1) {
+ while ((c = getopt(argc, argv, "c:n:o:b:dsrfwt?")) != -1) {
switch (c) {
case 'c':
loops = strtoul(optarg, NULL, 10);
@@ -208,6 +209,9 @@ int main(int argc, char **argv)
case 'w':
config.flags |= GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME;
break;
+ case 't':
+ config.flags |= GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE;
+ break;
case '?':
print_usage();
return -1;
diff --git a/tools/include/linux/bitmap.h b/tools/include/linux/bitmap.h
index ea97804d04d4..afdf93bebaaf 100644
--- a/tools/include/linux/bitmap.h
+++ b/tools/include/linux/bitmap.h
@@ -16,11 +16,11 @@ void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits);
int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int bits);
-int __bitmap_equal(const unsigned long *bitmap1,
- const unsigned long *bitmap2, unsigned int bits);
+bool __bitmap_equal(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int bits);
void bitmap_clear(unsigned long *map, unsigned int start, int len);
-int __bitmap_intersects(const unsigned long *bitmap1,
- const unsigned long *bitmap2, unsigned int bits);
+bool __bitmap_intersects(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int bits);
#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1)))
#define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1)))
@@ -162,8 +162,8 @@ static inline int bitmap_and(unsigned long *dst, const unsigned long *src1,
#define BITMAP_MEM_MASK (BITMAP_MEM_ALIGNMENT - 1)
#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0)
-static inline int bitmap_equal(const unsigned long *src1,
- const unsigned long *src2, unsigned int nbits)
+static inline bool bitmap_equal(const unsigned long *src1,
+ const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
return !((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits));
@@ -173,8 +173,9 @@ static inline int bitmap_equal(const unsigned long *src1,
return __bitmap_equal(src1, src2, nbits);
}
-static inline int bitmap_intersects(const unsigned long *src1,
- const unsigned long *src2, unsigned int nbits)
+static inline bool bitmap_intersects(const unsigned long *src1,
+ const unsigned long *src2,
+ unsigned int nbits)
{
if (small_const_nbits(nbits))
return ((*src1 & *src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0;
diff --git a/tools/include/uapi/asm-generic/fcntl.h b/tools/include/uapi/asm-generic/fcntl.h
index ac190958c981..0197042b7dfb 100644
--- a/tools/include/uapi/asm-generic/fcntl.h
+++ b/tools/include/uapi/asm-generic/fcntl.h
@@ -115,13 +115,11 @@
#define F_GETSIG 11 /* for sockets. */
#endif
-#ifndef CONFIG_64BIT
#ifndef F_GETLK64
#define F_GETLK64 12 /* using 'struct flock64' */
#define F_SETLK64 13
#define F_SETLKW64 14
#endif
-#endif
#ifndef F_SETOWN_EX
#define F_SETOWN_EX 15
@@ -187,25 +185,19 @@ struct f_owner_ex {
#define F_LINUX_SPECIFIC_BASE 1024
-#ifndef HAVE_ARCH_STRUCT_FLOCK
-#ifndef __ARCH_FLOCK_PAD
-#define __ARCH_FLOCK_PAD
-#endif
-
struct flock {
short l_type;
short l_whence;
__kernel_off_t l_start;
__kernel_off_t l_len;
__kernel_pid_t l_pid;
- __ARCH_FLOCK_PAD
-};
+#ifdef __ARCH_FLOCK_EXTRA_SYSID
+ __ARCH_FLOCK_EXTRA_SYSID
#endif
-
-#ifndef HAVE_ARCH_STRUCT_FLOCK64
-#ifndef __ARCH_FLOCK64_PAD
-#define __ARCH_FLOCK64_PAD
+#ifdef __ARCH_FLOCK_PAD
+ __ARCH_FLOCK_PAD
#endif
+};
struct flock64 {
short l_type;
@@ -213,8 +205,9 @@ struct flock64 {
__kernel_loff_t l_start;
__kernel_loff_t l_len;
__kernel_pid_t l_pid;
+#ifdef __ARCH_FLOCK64_PAD
__ARCH_FLOCK64_PAD
-};
#endif
+};
#endif /* _ASM_GENERIC_FCNTL_H */
diff --git a/tools/include/uapi/asm-generic/unistd.h b/tools/include/uapi/asm-generic/unistd.h
index 1c48b0ae3ba3..45fa180cc56a 100644
--- a/tools/include/uapi/asm-generic/unistd.h
+++ b/tools/include/uapi/asm-generic/unistd.h
@@ -383,7 +383,7 @@ __SYSCALL(__NR_syslog, sys_syslog)
/* kernel/ptrace.c */
#define __NR_ptrace 117
-__SYSCALL(__NR_ptrace, sys_ptrace)
+__SC_COMP(__NR_ptrace, sys_ptrace, compat_sys_ptrace)
/* kernel/sched/core.c */
#define __NR_sched_setparam 118
@@ -779,7 +779,7 @@ __SYSCALL(__NR_rseq, sys_rseq)
#define __NR_kexec_file_load 294
__SYSCALL(__NR_kexec_file_load, sys_kexec_file_load)
/* 295 through 402 are unassigned to sync up with generic numbers, don't use */
-#if __BITS_PER_LONG == 32
+#if defined(__SYSCALL_COMPAT) || __BITS_PER_LONG == 32
#define __NR_clock_gettime64 403
__SYSCALL(__NR_clock_gettime64, sys_clock_gettime)
#define __NR_clock_settime64 404
diff --git a/tools/include/uapi/asm/bitsperlong.h b/tools/include/uapi/asm/bitsperlong.h
index edba4d93e9e6..da5206517158 100644
--- a/tools/include/uapi/asm/bitsperlong.h
+++ b/tools/include/uapi/asm/bitsperlong.h
@@ -17,6 +17,8 @@
#include "../../../arch/riscv/include/uapi/asm/bitsperlong.h"
#elif defined(__alpha__)
#include "../../../arch/alpha/include/uapi/asm/bitsperlong.h"
+#elif defined(__loongarch__)
+#include "../../../arch/loongarch/include/uapi/asm/bitsperlong.h"
#else
#include <asm-generic/bitsperlong.h>
#endif
diff --git a/tools/lib/bitmap.c b/tools/lib/bitmap.c
index db466ef7be9d..354f8cdc0880 100644
--- a/tools/lib/bitmap.c
+++ b/tools/lib/bitmap.c
@@ -72,31 +72,31 @@ int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
return result != 0;
}
-int __bitmap_equal(const unsigned long *bitmap1,
- const unsigned long *bitmap2, unsigned int bits)
+bool __bitmap_equal(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int bits)
{
unsigned int k, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; ++k)
if (bitmap1[k] != bitmap2[k])
- return 0;
+ return false;
if (bits % BITS_PER_LONG)
if ((bitmap1[k] ^ bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits))
- return 0;
+ return false;
- return 1;
+ return true;
}
-int __bitmap_intersects(const unsigned long *bitmap1,
- const unsigned long *bitmap2, unsigned int bits)
+bool __bitmap_intersects(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int bits)
{
unsigned int k, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; ++k)
if (bitmap1[k] & bitmap2[k])
- return 1;
+ return true;
if (bits % BITS_PER_LONG)
if ((bitmap1[k] & bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits))
- return 1;
- return 0;
+ return true;
+ return false;
}
diff --git a/tools/lib/perf/evlist.c b/tools/lib/perf/evlist.c
index ed66f2e38464..e6c98a6e3908 100644
--- a/tools/lib/perf/evlist.c
+++ b/tools/lib/perf/evlist.c
@@ -23,6 +23,7 @@
#include <perf/cpumap.h>
#include <perf/threadmap.h>
#include <api/fd/array.h>
+#include "internal.h"
void perf_evlist__init(struct perf_evlist *evlist)
{
@@ -39,10 +40,11 @@ static void __perf_evlist__propagate_maps(struct perf_evlist *evlist,
* We already have cpus for evsel (via PMU sysfs) so
* keep it, if there's no target cpu list defined.
*/
- if (!evsel->own_cpus || evlist->has_user_cpus) {
- perf_cpu_map__put(evsel->cpus);
- evsel->cpus = perf_cpu_map__get(evlist->user_requested_cpus);
- } else if (!evsel->system_wide && perf_cpu_map__empty(evlist->user_requested_cpus)) {
+ if (!evsel->own_cpus ||
+ (!evsel->system_wide && evlist->has_user_cpus) ||
+ (!evsel->system_wide &&
+ !evsel->requires_cpu &&
+ perf_cpu_map__empty(evlist->user_requested_cpus))) {
perf_cpu_map__put(evsel->cpus);
evsel->cpus = perf_cpu_map__get(evlist->user_requested_cpus);
} else if (evsel->cpus != evsel->own_cpus) {
@@ -50,8 +52,11 @@ static void __perf_evlist__propagate_maps(struct perf_evlist *evlist,
evsel->cpus = perf_cpu_map__get(evsel->own_cpus);
}
- perf_thread_map__put(evsel->threads);
- evsel->threads = perf_thread_map__get(evlist->threads);
+ if (!evsel->system_wide) {
+ perf_thread_map__put(evsel->threads);
+ evsel->threads = perf_thread_map__get(evlist->threads);
+ }
+
evlist->all_cpus = perf_cpu_map__merge(evlist->all_cpus, evsel->cpus);
}
@@ -298,7 +303,7 @@ add:
int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
{
- int nr_cpus = perf_cpu_map__nr(evlist->user_requested_cpus);
+ int nr_cpus = perf_cpu_map__nr(evlist->all_cpus);
int nr_threads = perf_thread_map__nr(evlist->threads);
int nfds = 0;
struct perf_evsel *evsel;
@@ -428,9 +433,9 @@ static void perf_evlist__set_mmap_first(struct perf_evlist *evlist, struct perf_
static int
mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
int idx, struct perf_mmap_param *mp, int cpu_idx,
- int thread, int *_output, int *_output_overwrite)
+ int thread, int *_output, int *_output_overwrite, int *nr_mmaps)
{
- struct perf_cpu evlist_cpu = perf_cpu_map__cpu(evlist->user_requested_cpus, cpu_idx);
+ struct perf_cpu evlist_cpu = perf_cpu_map__cpu(evlist->all_cpus, cpu_idx);
struct perf_evsel *evsel;
int revent;
@@ -484,6 +489,8 @@ mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
if (ops->mmap(map, mp, *output, evlist_cpu) < 0)
return -1;
+ *nr_mmaps += 1;
+
if (!idx)
perf_evlist__set_mmap_first(evlist, map, overwrite);
} else {
@@ -513,34 +520,12 @@ mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
}
static int
-mmap_per_thread(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
- struct perf_mmap_param *mp)
-{
- int thread;
- int nr_threads = perf_thread_map__nr(evlist->threads);
-
- for (thread = 0; thread < nr_threads; thread++) {
- int output = -1;
- int output_overwrite = -1;
-
- if (mmap_per_evsel(evlist, ops, thread, mp, 0, thread,
- &output, &output_overwrite))
- goto out_unmap;
- }
-
- return 0;
-
-out_unmap:
- perf_evlist__munmap(evlist);
- return -1;
-}
-
-static int
mmap_per_cpu(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
struct perf_mmap_param *mp)
{
int nr_threads = perf_thread_map__nr(evlist->threads);
- int nr_cpus = perf_cpu_map__nr(evlist->user_requested_cpus);
+ int nr_cpus = perf_cpu_map__nr(evlist->all_cpus);
+ int nr_mmaps = 0;
int cpu, thread;
for (cpu = 0; cpu < nr_cpus; cpu++) {
@@ -549,11 +534,14 @@ mmap_per_cpu(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
for (thread = 0; thread < nr_threads; thread++) {
if (mmap_per_evsel(evlist, ops, cpu, mp, cpu,
- thread, &output, &output_overwrite))
+ thread, &output, &output_overwrite, &nr_mmaps))
goto out_unmap;
}
}
+ if (nr_mmaps != evlist->nr_mmaps)
+ pr_err("Miscounted nr_mmaps %d vs %d\n", nr_mmaps, evlist->nr_mmaps);
+
return 0;
out_unmap:
@@ -565,9 +553,14 @@ static int perf_evlist__nr_mmaps(struct perf_evlist *evlist)
{
int nr_mmaps;
- nr_mmaps = perf_cpu_map__nr(evlist->user_requested_cpus);
- if (perf_cpu_map__empty(evlist->user_requested_cpus))
- nr_mmaps = perf_thread_map__nr(evlist->threads);
+ /* One for each CPU */
+ nr_mmaps = perf_cpu_map__nr(evlist->all_cpus);
+ if (perf_cpu_map__empty(evlist->all_cpus)) {
+ /* Plus one for each thread */
+ nr_mmaps += perf_thread_map__nr(evlist->threads);
+ /* Minus the per-thread CPU (-1) */
+ nr_mmaps -= 1;
+ }
return nr_mmaps;
}
@@ -577,7 +570,6 @@ int perf_evlist__mmap_ops(struct perf_evlist *evlist,
struct perf_mmap_param *mp)
{
struct perf_evsel *evsel;
- const struct perf_cpu_map *cpus = evlist->user_requested_cpus;
if (!ops || !ops->get || !ops->mmap)
return -EINVAL;
@@ -596,9 +588,6 @@ int perf_evlist__mmap_ops(struct perf_evlist *evlist,
if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
return -ENOMEM;
- if (perf_cpu_map__empty(cpus))
- return mmap_per_thread(evlist, ops, mp);
-
return mmap_per_cpu(evlist, ops, mp);
}
diff --git a/tools/lib/perf/include/internal/evsel.h b/tools/lib/perf/include/internal/evsel.h
index cfc9ebd7968e..2a912a1f1989 100644
--- a/tools/lib/perf/include/internal/evsel.h
+++ b/tools/lib/perf/include/internal/evsel.h
@@ -49,7 +49,18 @@ struct perf_evsel {
/* parse modifier helper */
int nr_members;
+ /*
+ * system_wide is for events that need to be on every CPU, irrespective
+ * of user requested CPUs or threads. Map propagation will set cpus to
+ * this event's own_cpus, whereby they will contribute to evlist
+ * all_cpus.
+ */
bool system_wide;
+ /*
+ * Some events, for example uncore events, require a CPU.
+ * i.e. it cannot be the 'any CPU' value of -1.
+ */
+ bool requires_cpu;
int idx;
};
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index 190b2f6e360a..864bb9dd3584 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -185,7 +185,9 @@ static bool __dead_end_function(struct objtool_file *file, struct symbol *func,
"do_group_exit",
"stop_this_cpu",
"__invalid_creds",
- "cpu_startup_entry",
+ "cpu_startup_entry",
+ "__ubsan_handle_builtin_unreachable",
+ "ex_handler_msr_mce",
};
if (!func)
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index 465be4e62a17..cf8ad50f3de1 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -33,7 +33,7 @@ OPTIONS
- a raw PMU event in the form of rN where N is a hexadecimal value
that represents the raw register encoding with the layout of the
event control registers as described by entries in
- /sys/bus/event_sources/devices/cpu/format/*.
+ /sys/bus/event_source/devices/cpu/format/*.
- a symbolic or raw PMU event followed by an optional colon
and a list of event modifiers, e.g., cpu-cycles:p. See the
@@ -758,6 +758,16 @@ include::intel-hybrid.txt[]
If the URLs is not specified, the value of DEBUGINFOD_URLS
system environment variable is used.
+--off-cpu::
+ Enable off-cpu profiling with BPF. The BPF program will collect
+ task scheduling information with (user) stacktrace and save them
+ as sample data of a software event named "offcpu-time". The
+ sample period will have the time the task slept in nanoseconds.
+
+ Note that BPF can collect stack traces using frame pointer ("fp")
+ only, as of now. So the applications built without the frame
+ pointer might see bogus addresses.
+
SEE ALSO
--------
linkperf:perf-stat[1], linkperf:perf-list[1], linkperf:perf-intel-pt[1]
diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt
index 8d1cde00b8d6..d8a33f4a47c5 100644
--- a/tools/perf/Documentation/perf-stat.txt
+++ b/tools/perf/Documentation/perf-stat.txt
@@ -39,7 +39,7 @@ report::
- a raw PMU event in the form of rN where N is a hexadecimal value
that represents the raw register encoding with the layout of the
event control registers as described by entries in
- /sys/bus/event_sources/devices/cpu/format/*.
+ /sys/bus/event_source/devices/cpu/format/*.
- a symbolic or raw PMU event followed by an optional colon
and a list of event modifiers, e.g., cpu-cycles:p. See the
diff --git a/tools/perf/Documentation/perf-top.txt b/tools/perf/Documentation/perf-top.txt
index cac3dfbee7d8..c1fdba26bf53 100644
--- a/tools/perf/Documentation/perf-top.txt
+++ b/tools/perf/Documentation/perf-top.txt
@@ -41,7 +41,7 @@ Default is to monitor all CPUS.
(use 'perf list' to list all events) or a raw PMU event in the form
of rN where N is a hexadecimal value that represents the raw register
encoding with the layout of the event control registers as described
- by entries in /sys/bus/event_sources/devices/cpu/format/*.
+ by entries in /sys/bus/event_source/devices/cpu/format/*.
-E <entries>::
--entries=<entries>::
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index e0304e70f182..73e0762092fe 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -573,11 +573,36 @@ ifndef NO_LIBELF
ifeq ($(feature-libbpf-btf__load_from_kernel_by_id), 1)
CFLAGS += -DHAVE_LIBBPF_BTF__LOAD_FROM_KERNEL_BY_ID
endif
+ $(call feature_check,libbpf-bpf_prog_load)
+ ifeq ($(feature-libbpf-bpf_prog_load), 1)
+ CFLAGS += -DHAVE_LIBBPF_BPF_PROG_LOAD
+ endif
+ $(call feature_check,libbpf-bpf_object__next_program)
+ ifeq ($(feature-libbpf-bpf_object__next_program), 1)
+ CFLAGS += -DHAVE_LIBBPF_BPF_OBJECT__NEXT_PROGRAM
+ endif
+ $(call feature_check,libbpf-bpf_object__next_map)
+ ifeq ($(feature-libbpf-bpf_object__next_map), 1)
+ CFLAGS += -DHAVE_LIBBPF_BPF_OBJECT__NEXT_MAP
+ endif
+ $(call feature_check,libbpf-btf__raw_data)
+ ifeq ($(feature-libbpf-btf__raw_data), 1)
+ CFLAGS += -DHAVE_LIBBPF_BTF__RAW_DATA
+ endif
+ $(call feature_check,libbpf-bpf_map_create)
+ ifeq ($(feature-libbpf-bpf_map_create), 1)
+ CFLAGS += -DHAVE_LIBBPF_BPF_MAP_CREATE
+ endif
else
dummy := $(error Error: No libbpf devel library found, please install libbpf-devel);
endif
else
CFLAGS += -DHAVE_LIBBPF_BTF__LOAD_FROM_KERNEL_BY_ID
+ CFLAGS += -DHAVE_LIBBPF_BPF_PROG_LOAD
+ CFLAGS += -DHAVE_LIBBPF_BPF_OBJECT__NEXT_PROGRAM
+ CFLAGS += -DHAVE_LIBBPF_BPF_OBJECT__NEXT_MAP
+ CFLAGS += -DHAVE_LIBBPF_BTF__RAW_DATA
+ CFLAGS += -DHAVE_LIBBPF_BPF_MAP_CREATE
endif
endif
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 6e5aded855cc..8f738e11356d 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -1038,6 +1038,7 @@ SKEL_TMP_OUT := $(abspath $(SKEL_OUT)/.tmp)
SKELETONS := $(SKEL_OUT)/bpf_prog_profiler.skel.h
SKELETONS += $(SKEL_OUT)/bperf_leader.skel.h $(SKEL_OUT)/bperf_follower.skel.h
SKELETONS += $(SKEL_OUT)/bperf_cgroup.skel.h $(SKEL_OUT)/func_latency.skel.h
+SKELETONS += $(SKEL_OUT)/off_cpu.skel.h
$(SKEL_TMP_OUT) $(LIBBPF_OUTPUT):
$(Q)$(MKDIR) -p $@
diff --git a/tools/perf/arch/arm64/util/mem-events.c b/tools/perf/arch/arm64/util/mem-events.c
index be41721b9aa1..df817d1f9f3e 100644
--- a/tools/perf/arch/arm64/util/mem-events.c
+++ b/tools/perf/arch/arm64/util/mem-events.c
@@ -5,9 +5,9 @@
#define E(t, n, s) { .tag = t, .name = n, .sysfs_name = s }
static struct perf_mem_event perf_mem_events[PERF_MEM_EVENTS__MAX] = {
- E("spe-load", "arm_spe_0/ts_enable=1,load_filter=1,store_filter=0,min_latency=%u/", "arm_spe_0"),
- E("spe-store", "arm_spe_0/ts_enable=1,load_filter=0,store_filter=1/", "arm_spe_0"),
- E("spe-ldst", "arm_spe_0/ts_enable=1,load_filter=1,store_filter=1,min_latency=%u/", "arm_spe_0"),
+ E("spe-load", "arm_spe_0/ts_enable=1,pa_enable=1,load_filter=1,store_filter=0,min_latency=%u/", "arm_spe_0"),
+ E("spe-store", "arm_spe_0/ts_enable=1,pa_enable=1,load_filter=0,store_filter=1/", "arm_spe_0"),
+ E("spe-ldst", "arm_spe_0/ts_enable=1,pa_enable=1,load_filter=1,store_filter=1,min_latency=%u/", "arm_spe_0"),
};
static char mem_ev_name[100];
diff --git a/tools/perf/arch/arm64/util/perf_regs.c b/tools/perf/arch/arm64/util/perf_regs.c
index 476b037eea1c..006692c9b040 100644
--- a/tools/perf/arch/arm64/util/perf_regs.c
+++ b/tools/perf/arch/arm64/util/perf_regs.c
@@ -2,13 +2,19 @@
#include <errno.h>
#include <regex.h>
#include <string.h>
+#include <sys/auxv.h>
#include <linux/kernel.h>
#include <linux/zalloc.h>
+#include "../../../perf-sys.h"
#include "../../../util/debug.h"
#include "../../../util/event.h"
#include "../../../util/perf_regs.h"
+#ifndef HWCAP_SVE
+#define HWCAP_SVE (1 << 22)
+#endif
+
const struct sample_reg sample_reg_masks[] = {
SMPL_REG(x0, PERF_REG_ARM64_X0),
SMPL_REG(x1, PERF_REG_ARM64_X1),
@@ -43,6 +49,7 @@ const struct sample_reg sample_reg_masks[] = {
SMPL_REG(lr, PERF_REG_ARM64_LR),
SMPL_REG(sp, PERF_REG_ARM64_SP),
SMPL_REG(pc, PERF_REG_ARM64_PC),
+ SMPL_REG(vg, PERF_REG_ARM64_VG),
SMPL_REG_END
};
@@ -131,3 +138,34 @@ int arch_sdt_arg_parse_op(char *old_op, char **new_op)
return SDT_ARG_VALID;
}
+
+uint64_t arch__user_reg_mask(void)
+{
+ struct perf_event_attr attr = {
+ .type = PERF_TYPE_HARDWARE,
+ .config = PERF_COUNT_HW_CPU_CYCLES,
+ .sample_type = PERF_SAMPLE_REGS_USER,
+ .disabled = 1,
+ .exclude_kernel = 1,
+ .sample_period = 1,
+ .sample_regs_user = PERF_REGS_MASK
+ };
+ int fd;
+
+ if (getauxval(AT_HWCAP) & HWCAP_SVE)
+ attr.sample_regs_user |= SMPL_REG_MASK(PERF_REG_ARM64_VG);
+
+ /*
+ * Check if the pmu supports perf extended regs, before
+ * returning the register mask to sample.
+ */
+ if (attr.sample_regs_user != PERF_REGS_MASK) {
+ event_attr_init(&attr);
+ fd = sys_perf_event_open(&attr, 0, -1, -1, 0);
+ if (fd != -1) {
+ close(fd);
+ return attr.sample_regs_user;
+ }
+ }
+ return PERF_REGS_MASK;
+}
diff --git a/tools/perf/arch/arm64/util/unwind-libunwind.c b/tools/perf/arch/arm64/util/unwind-libunwind.c
index 5aecf88e3de6..871af5992298 100644
--- a/tools/perf/arch/arm64/util/unwind-libunwind.c
+++ b/tools/perf/arch/arm64/util/unwind-libunwind.c
@@ -10,77 +10,8 @@
int LIBUNWIND__ARCH_REG_ID(int regnum)
{
- switch (regnum) {
- case UNW_AARCH64_X0:
- return PERF_REG_ARM64_X0;
- case UNW_AARCH64_X1:
- return PERF_REG_ARM64_X1;
- case UNW_AARCH64_X2:
- return PERF_REG_ARM64_X2;
- case UNW_AARCH64_X3:
- return PERF_REG_ARM64_X3;
- case UNW_AARCH64_X4:
- return PERF_REG_ARM64_X4;
- case UNW_AARCH64_X5:
- return PERF_REG_ARM64_X5;
- case UNW_AARCH64_X6:
- return PERF_REG_ARM64_X6;
- case UNW_AARCH64_X7:
- return PERF_REG_ARM64_X7;
- case UNW_AARCH64_X8:
- return PERF_REG_ARM64_X8;
- case UNW_AARCH64_X9:
- return PERF_REG_ARM64_X9;
- case UNW_AARCH64_X10:
- return PERF_REG_ARM64_X10;
- case UNW_AARCH64_X11:
- return PERF_REG_ARM64_X11;
- case UNW_AARCH64_X12:
- return PERF_REG_ARM64_X12;
- case UNW_AARCH64_X13:
- return PERF_REG_ARM64_X13;
- case UNW_AARCH64_X14:
- return PERF_REG_ARM64_X14;
- case UNW_AARCH64_X15:
- return PERF_REG_ARM64_X15;
- case UNW_AARCH64_X16:
- return PERF_REG_ARM64_X16;
- case UNW_AARCH64_X17:
- return PERF_REG_ARM64_X17;
- case UNW_AARCH64_X18:
- return PERF_REG_ARM64_X18;
- case UNW_AARCH64_X19:
- return PERF_REG_ARM64_X19;
- case UNW_AARCH64_X20:
- return PERF_REG_ARM64_X20;
- case UNW_AARCH64_X21:
- return PERF_REG_ARM64_X21;
- case UNW_AARCH64_X22:
- return PERF_REG_ARM64_X22;
- case UNW_AARCH64_X23:
- return PERF_REG_ARM64_X23;
- case UNW_AARCH64_X24:
- return PERF_REG_ARM64_X24;
- case UNW_AARCH64_X25:
- return PERF_REG_ARM64_X25;
- case UNW_AARCH64_X26:
- return PERF_REG_ARM64_X26;
- case UNW_AARCH64_X27:
- return PERF_REG_ARM64_X27;
- case UNW_AARCH64_X28:
- return PERF_REG_ARM64_X28;
- case UNW_AARCH64_X29:
- return PERF_REG_ARM64_X29;
- case UNW_AARCH64_X30:
- return PERF_REG_ARM64_LR;
- case UNW_AARCH64_SP:
- return PERF_REG_ARM64_SP;
- case UNW_AARCH64_PC:
- return PERF_REG_ARM64_PC;
- default:
- pr_err("unwind: invalid reg id %d\n", regnum);
+ if (regnum < 0 || regnum >= PERF_REG_ARM64_EXTENDED_MAX)
return -EINVAL;
- }
- return -EINVAL;
+ return regnum;
}
diff --git a/tools/perf/arch/x86/util/evsel.c b/tools/perf/arch/x86/util/evsel.c
index 88306183d629..3501399cef35 100644
--- a/tools/perf/arch/x86/util/evsel.c
+++ b/tools/perf/arch/x86/util/evsel.c
@@ -5,6 +5,7 @@
#include "util/env.h"
#include "util/pmu.h"
#include "linux/string.h"
+#include "evsel.h"
void arch_evsel__set_sample_weight(struct evsel *evsel)
{
@@ -32,7 +33,7 @@ void arch_evsel__fixup_new_cycles(struct perf_event_attr *attr)
}
/* Check whether the evsel's PMU supports the perf metrics */
-static bool evsel__sys_has_perf_metrics(const struct evsel *evsel)
+bool evsel__sys_has_perf_metrics(const struct evsel *evsel)
{
const char *pmu_name = evsel->pmu_name ? evsel->pmu_name : "cpu";
@@ -57,6 +58,6 @@ bool arch_evsel__must_be_in_group(const struct evsel *evsel)
return false;
return evsel->name &&
- (!strcasecmp(evsel->name, "slots") ||
+ (strcasestr(evsel->name, "slots") ||
strcasestr(evsel->name, "topdown"));
}
diff --git a/tools/perf/arch/x86/util/evsel.h b/tools/perf/arch/x86/util/evsel.h
new file mode 100644
index 000000000000..19ad1691374d
--- /dev/null
+++ b/tools/perf/arch/x86/util/evsel.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _EVSEL_H
+#define _EVSEL_H 1
+
+bool evsel__sys_has_perf_metrics(const struct evsel *evsel);
+
+#endif
diff --git a/tools/perf/arch/x86/util/intel-pt.c b/tools/perf/arch/x86/util/intel-pt.c
index 2eaac4638aab..06c2cdfd8f2f 100644
--- a/tools/perf/arch/x86/util/intel-pt.c
+++ b/tools/perf/arch/x86/util/intel-pt.c
@@ -811,18 +811,11 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
if (!cpu_wide && perf_can_record_cpu_wide()) {
struct evsel *switch_evsel;
- err = parse_events(evlist, "dummy:u", NULL);
- if (err)
- return err;
+ switch_evsel = evlist__add_dummy_on_all_cpus(evlist);
+ if (!switch_evsel)
+ return -ENOMEM;
- switch_evsel = evlist__last(evlist);
-
- switch_evsel->core.attr.freq = 0;
- switch_evsel->core.attr.sample_period = 1;
switch_evsel->core.attr.context_switch = 1;
-
- switch_evsel->core.system_wide = true;
- switch_evsel->no_aux_samples = true;
switch_evsel->immediate = true;
evsel__set_sample_bit(switch_evsel, TID);
@@ -871,20 +864,22 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
/* Add dummy event to keep tracking */
if (opts->full_auxtrace) {
+ bool need_system_wide_tracking;
struct evsel *tracking_evsel;
- err = parse_events(evlist, "dummy:u", NULL);
- if (err)
- return err;
+ /*
+ * User space tasks can migrate between CPUs, so when tracing
+ * selected CPUs, sideband for all CPUs is still needed.
+ */
+ need_system_wide_tracking = evlist->core.has_user_cpus &&
+ !intel_pt_evsel->core.attr.exclude_user;
- tracking_evsel = evlist__last(evlist);
+ tracking_evsel = evlist__add_aux_dummy(evlist, need_system_wide_tracking);
+ if (!tracking_evsel)
+ return -ENOMEM;
evlist__set_tracking_event(evlist, tracking_evsel);
- tracking_evsel->core.attr.freq = 0;
- tracking_evsel->core.attr.sample_period = 1;
-
- tracking_evsel->no_aux_samples = true;
if (need_immediate)
tracking_evsel->immediate = true;
diff --git a/tools/perf/arch/x86/util/topdown.c b/tools/perf/arch/x86/util/topdown.c
index f4d5422e9960..f81a7cfe4d63 100644
--- a/tools/perf/arch/x86/util/topdown.c
+++ b/tools/perf/arch/x86/util/topdown.c
@@ -4,6 +4,7 @@
#include "util/pmu.h"
#include "util/topdown.h"
#include "topdown.h"
+#include "evsel.h"
/* Check whether there is a PMU which supports the perf metrics. */
bool topdown_sys_has_perf_metrics(void)
@@ -55,33 +56,19 @@ void arch_topdown_group_warn(void)
#define TOPDOWN_SLOTS 0x0400
-static bool is_topdown_slots_event(struct evsel *counter)
-{
- if (!counter->pmu_name)
- return false;
-
- if (strcmp(counter->pmu_name, "cpu"))
- return false;
-
- if (counter->core.attr.config == TOPDOWN_SLOTS)
- return true;
-
- return false;
-}
-
/*
* Check whether a topdown group supports sample-read.
*
- * Only Topdown metic supports sample-read. The slots
+ * Only Topdown metric supports sample-read. The slots
* event must be the leader of the topdown group.
*/
bool arch_topdown_sample_read(struct evsel *leader)
{
- if (!pmu_have_event("cpu", "slots"))
+ if (!evsel__sys_has_perf_metrics(leader))
return false;
- if (is_topdown_slots_event(leader))
+ if (leader->core.attr.config == TOPDOWN_SLOTS)
return true;
return false;
diff --git a/tools/perf/builtin-c2c.c b/tools/perf/builtin-c2c.c
index c8230c48125f..4898ee57d156 100644
--- a/tools/perf/builtin-c2c.c
+++ b/tools/perf/builtin-c2c.c
@@ -928,8 +928,8 @@ percent_rmt_hitm_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
double per_left;
double per_right;
- per_left = PERCENT(left, lcl_hitm);
- per_right = PERCENT(right, lcl_hitm);
+ per_left = PERCENT(left, rmt_hitm);
+ per_right = PERCENT(right, rmt_hitm);
return per_left - per_right;
}
@@ -2801,9 +2801,7 @@ static int perf_c2c__report(int argc, const char **argv)
"the input file to process"),
OPT_INCR('N', "node-info", &c2c.node_info,
"show extra node info in report (repeat for more info)"),
-#ifdef HAVE_SLANG_SUPPORT
OPT_BOOLEAN(0, "stdio", &c2c.use_stdio, "Use the stdio interface"),
-#endif
OPT_BOOLEAN(0, "stats", &c2c.stats_only,
"Display only statistic tables (implies --stdio)"),
OPT_BOOLEAN(0, "full-symbols", &c2c.symbol_full,
@@ -2833,6 +2831,10 @@ static int perf_c2c__report(int argc, const char **argv)
if (argc)
usage_with_options(report_c2c_usage, options);
+#ifndef HAVE_SLANG_SUPPORT
+ c2c.use_stdio = true;
+#endif
+
if (c2c.stats_only)
c2c.use_stdio = true;
diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
index b1200b7340a6..23a33ac15e68 100644
--- a/tools/perf/builtin-lock.c
+++ b/tools/perf/builtin-lock.c
@@ -1083,7 +1083,7 @@ out_delete:
static int __cmd_record(int argc, const char **argv)
{
const char *record_args[] = {
- "record", "-R", "-m", "1024", "-c", "1", "--synth", "no",
+ "record", "-R", "-m", "1024", "-c", "1", "--synth", "task",
};
unsigned int rec_argc, i, j, ret;
const char **rec_argv;
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index a5cf6a99d67f..9a71f0330137 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -49,6 +49,7 @@
#include "util/clockid.h"
#include "util/pmu-hybrid.h"
#include "util/evlist-hybrid.h"
+#include "util/off_cpu.h"
#include "asm/bug.h"
#include "perf.h"
#include "cputopo.h"
@@ -162,6 +163,7 @@ struct record {
bool buildid_mmap;
bool timestamp_filename;
bool timestamp_boundary;
+ bool off_cpu;
struct switch_output switch_output;
unsigned long long samples;
unsigned long output_max_size; /* = 0: unlimited */
@@ -869,7 +871,6 @@ static int record__auxtrace_init(struct record *rec __maybe_unused)
static int record__config_text_poke(struct evlist *evlist)
{
struct evsel *evsel;
- int err;
/* Nothing to do if text poke is already configured */
evlist__for_each_entry(evlist, evsel) {
@@ -877,32 +878,23 @@ static int record__config_text_poke(struct evlist *evlist)
return 0;
}
- err = parse_events(evlist, "dummy:u", NULL);
- if (err)
- return err;
-
- evsel = evlist__last(evlist);
+ evsel = evlist__add_dummy_on_all_cpus(evlist);
+ if (!evsel)
+ return -ENOMEM;
- evsel->core.attr.freq = 0;
- evsel->core.attr.sample_period = 1;
evsel->core.attr.text_poke = 1;
evsel->core.attr.ksymbol = 1;
-
- evsel->core.system_wide = true;
- evsel->no_aux_samples = true;
evsel->immediate = true;
-
- /* Text poke must be collected on all CPUs */
- perf_cpu_map__put(evsel->core.own_cpus);
- evsel->core.own_cpus = perf_cpu_map__new(NULL);
- perf_cpu_map__put(evsel->core.cpus);
- evsel->core.cpus = perf_cpu_map__get(evsel->core.own_cpus);
-
evsel__set_sample_bit(evsel, TIME);
return 0;
}
+static int record__config_off_cpu(struct record *rec)
+{
+ return off_cpu_prepare(rec->evlist, &rec->opts.target, &rec->opts);
+}
+
static bool record__kcore_readable(struct machine *machine)
{
char kcore[PATH_MAX];
@@ -982,14 +974,20 @@ static void record__thread_data_close_pipes(struct record_thread *thread_data)
}
}
+static bool evlist__per_thread(struct evlist *evlist)
+{
+ return cpu_map__is_dummy(evlist->core.user_requested_cpus);
+}
+
static int record__thread_data_init_maps(struct record_thread *thread_data, struct evlist *evlist)
{
int m, tm, nr_mmaps = evlist->core.nr_mmaps;
struct mmap *mmap = evlist->mmap;
struct mmap *overwrite_mmap = evlist->overwrite_mmap;
- struct perf_cpu_map *cpus = evlist->core.user_requested_cpus;
+ struct perf_cpu_map *cpus = evlist->core.all_cpus;
+ bool per_thread = evlist__per_thread(evlist);
- if (cpu_map__is_dummy(cpus))
+ if (per_thread)
thread_data->nr_mmaps = nr_mmaps;
else
thread_data->nr_mmaps = bitmap_weight(thread_data->mask->maps.bits,
@@ -1010,7 +1008,7 @@ static int record__thread_data_init_maps(struct record_thread *thread_data, stru
thread_data->nr_mmaps, thread_data->maps, thread_data->overwrite_maps);
for (m = 0, tm = 0; m < nr_mmaps && tm < thread_data->nr_mmaps; m++) {
- if (cpu_map__is_dummy(cpus) ||
+ if (per_thread ||
test_bit(perf_cpu_map__cpu(cpus, m).cpu, thread_data->mask->maps.bits)) {
if (thread_data->maps) {
thread_data->maps[tm] = &mmap[m];
@@ -1885,7 +1883,7 @@ static int record__synthesize(struct record *rec, bool tail)
return err;
}
- err = perf_event__synthesize_cpu_map(&rec->tool, rec->evlist->core.user_requested_cpus,
+ err = perf_event__synthesize_cpu_map(&rec->tool, rec->evlist->core.all_cpus,
process_synthesized_event, NULL);
if (err < 0) {
pr_err("Couldn't synthesize cpu map.\n");
@@ -2600,6 +2598,9 @@ out_free_threads:
} else
status = err;
+ if (rec->off_cpu)
+ rec->bytes_written += off_cpu_write(rec->session);
+
record__synthesize(rec, true);
/* this will be recalculated during process_buildids() */
rec->samples = 0;
@@ -3324,6 +3325,7 @@ static struct option __record_options[] = {
OPT_CALLBACK_OPTARG(0, "threads", &record.opts, NULL, "spec",
"write collected trace data into several data files using parallel threads",
record__parse_threads),
+ OPT_BOOLEAN(0, "off-cpu", &record.off_cpu, "Enable off-cpu analysis"),
OPT_END()
};
@@ -3683,12 +3685,12 @@ static int record__init_thread_default_masks(struct record *rec, struct perf_cpu
static int record__init_thread_masks(struct record *rec)
{
int ret = 0;
- struct perf_cpu_map *cpus = rec->evlist->core.user_requested_cpus;
+ struct perf_cpu_map *cpus = rec->evlist->core.all_cpus;
if (!record__threads_enabled(rec))
return record__init_thread_default_masks(rec, cpus);
- if (cpu_map__is_dummy(cpus)) {
+ if (evlist__per_thread(rec->evlist)) {
pr_err("--per-thread option is mutually exclusive to parallel streaming mode.\n");
return -EINVAL;
}
@@ -3745,6 +3747,12 @@ int cmd_record(int argc, const char **argv)
# undef REASON
#endif
+#ifndef HAVE_BPF_SKEL
+# define set_nobuild(s, l, m, c) set_option_nobuild(record_options, s, l, m, c)
+ set_nobuild('\0', "off-cpu", "no BUILD_BPF_SKEL=1", true);
+# undef set_nobuild
+#endif
+
rec->opts.affinity = PERF_AFFINITY_SYS;
rec->evlist = evlist__new();
@@ -3981,6 +3989,14 @@ int cmd_record(int argc, const char **argv)
}
}
+ if (rec->off_cpu) {
+ err = record__config_off_cpu(rec);
+ if (err) {
+ pr_err("record__config_off_cpu failed, error %d\n", err);
+ goto out;
+ }
+ }
+
if (record_opts__config(&rec->opts)) {
err = -EINVAL;
goto out;
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 7e6cc8bdf061..4ce87a8eb7d7 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -382,9 +382,6 @@ static int read_counter_cpu(struct evsel *counter, struct timespec *rs, int cpu_
if (!counter->supported)
return -ENOENT;
- if (counter->core.system_wide)
- nthreads = 1;
-
for (thread = 0; thread < nthreads; thread++) {
struct perf_counts_values *count;
@@ -2261,7 +2258,7 @@ static void setup_system_wide(int forks)
struct evsel *counter;
evlist__for_each_entry(evsel_list, counter) {
- if (!counter->core.system_wide &&
+ if (!counter->core.requires_cpu &&
strcmp(counter->name, "duration_time")) {
return;
}
diff --git a/tools/perf/pmu-events/arch/s390/cf_z10/basic.json b/tools/perf/pmu-events/arch/s390/cf_z10/basic.json
index 783de7f1aeaa..9bd20a5f47af 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z10/basic.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z10/basic.json
@@ -3,84 +3,84 @@
"Unit": "CPU-M-CF",
"EventCode": "0",
"EventName": "CPU_CYCLES",
- "BriefDescription": "CPU Cycles",
- "PublicDescription": "Cycle Count"
+ "BriefDescription": "Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles, excluding the number of cycles while the CPU is in the wait state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "1",
"EventName": "INSTRUCTIONS",
- "BriefDescription": "Instructions",
- "PublicDescription": "Instruction Count"
+ "BriefDescription": "Instruction Count",
+ "PublicDescription": "This counter counts the total number of instructions executed by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "2",
"EventName": "L1I_DIR_WRITES",
- "BriefDescription": "L1I Directory Writes",
- "PublicDescription": "Level-1 I-Cache Directory Write Count"
+ "BriefDescription": "Level-1 I-Cache Directory Write Count",
+ "PublicDescription": "This counter counts the total number of level-1 instruction-cache or unified-cache directory writes."
},
{
"Unit": "CPU-M-CF",
"EventCode": "3",
"EventName": "L1I_PENALTY_CYCLES",
- "BriefDescription": "L1I Penalty Cycles",
- "PublicDescription": "Level-1 I-Cache Penalty Cycle Count"
+ "BriefDescription": "Level-1 I-Cache Penalty Cycle Count",
+ "PublicDescription": "This counter counts the total number of cache penalty cycles for level-1 instruction cache or unified cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "4",
"EventName": "L1D_DIR_WRITES",
- "BriefDescription": "L1D Directory Writes",
- "PublicDescription": "Level-1 D-Cache Directory Write Count"
+ "BriefDescription": "Level-1 D-Cache Directory Write Count",
+ "PublicDescription": "This counter counts the total number of level-1 data-cache directory writes."
},
{
"Unit": "CPU-M-CF",
"EventCode": "5",
"EventName": "L1D_PENALTY_CYCLES",
- "BriefDescription": "L1D Penalty Cycles",
- "PublicDescription": "Level-1 D-Cache Penalty Cycle Count"
+ "BriefDescription": "Level-1 D-Cache Penalty Cycle Count",
+ "PublicDescription": "This counter counts the total number of cache penalty cycles for level-1 data cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "32",
"EventName": "PROBLEM_STATE_CPU_CYCLES",
- "BriefDescription": "Problem-State CPU Cycles",
- "PublicDescription": "Problem-State Cycle Count"
+ "BriefDescription": "Problem-State Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the CPU is in the problem state, excluding the number of cycles while the CPU is in the wait state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "33",
"EventName": "PROBLEM_STATE_INSTRUCTIONS",
- "BriefDescription": "Problem-State Instructions",
- "PublicDescription": "Problem-State Instruction Count"
+ "BriefDescription": "Problem-State Instruction Count",
+ "PublicDescription": "This counter counts the total number of instructions executed by the CPU while in the problem state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "34",
"EventName": "PROBLEM_STATE_L1I_DIR_WRITES",
- "BriefDescription": "Problem-State L1I Directory Writes",
- "PublicDescription": "Problem-State Level-1 I-Cache Directory Write Count"
+ "BriefDescription": "Problem-State Level-1 I-Cache Directory Write Count",
+ "PublicDescription": "This counter counts the total number of level-1 instruction-cache or unified-cache directory writes while the CPU is in the problem state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "35",
"EventName": "PROBLEM_STATE_L1I_PENALTY_CYCLES",
- "BriefDescription": "Problem-State L1I Penalty Cycles",
- "PublicDescription": "Problem-State Level-1 I-Cache Penalty Cycle Count"
+ "BriefDescription": "Level-1 I-Cache Penalty Cycle Count",
+ "PublicDescription": "This counter counts the total number of penalty cycles for level-1 instruction cache or unified cache while the CPU is in the problem state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "36",
"EventName": "PROBLEM_STATE_L1D_DIR_WRITES",
- "BriefDescription": "Problem-State L1D Directory Writes",
- "PublicDescription": "Problem-State Level-1 D-Cache Directory Write Count"
+ "BriefDescription": "Problem-State Level-1 D-Cache Directory Write Count",
+ "PublicDescription": "This counter counts the total number of level-1 data-cache directory writes while the CPU is in the problem state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "37",
"EventName": "PROBLEM_STATE_L1D_PENALTY_CYCLES",
- "BriefDescription": "Problem-State L1D Penalty Cycles",
- "PublicDescription": "Problem-State Level-1 D-Cache Penalty Cycle Count"
+ "BriefDescription": "Problem-State Level-1 D-Cache Penalty Cycle Count",
+ "PublicDescription": "This counter counts the total number of penalty cycles for level-1 data cache while the CPU is in the problem state."
}
]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z10/crypto.json b/tools/perf/pmu-events/arch/s390/cf_z10/crypto.json
index 3f28007d3892..a8d391ddeb8c 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z10/crypto.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z10/crypto.json
@@ -3,112 +3,112 @@
"Unit": "CPU-M-CF",
"EventCode": "64",
"EventName": "PRNG_FUNCTIONS",
- "BriefDescription": "PRNG Functions",
- "PublicDescription": "Total number of the PRNG functions issued by the CPU"
+ "BriefDescription": "PRNG Function Count",
+ "PublicDescription": "This counter counts the total number of the pseudorandom-number-generation functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "65",
"EventName": "PRNG_CYCLES",
- "BriefDescription": "PRNG Cycles",
- "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing PRNG functions issued by the CPU"
+ "BriefDescription": "PRNG Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES/SHA coprocessor is busy performing the pseudorandom- number-generation functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "66",
"EventName": "PRNG_BLOCKED_FUNCTIONS",
- "BriefDescription": "PRNG Blocked Functions",
- "PublicDescription": "Total number of the PRNG functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "PRNG Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the pseudorandom-number-generation functions that are issued by the CPU and are blocked because the DEA/AES/SHA coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "67",
"EventName": "PRNG_BLOCKED_CYCLES",
- "BriefDescription": "PRNG Blocked Cycles",
- "PublicDescription": "Total number of CPU cycles blocked for the PRNG functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "PRNG Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the pseudorandom-number-generation functions issued by the CPU because the DEA/AES/SHA coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "68",
"EventName": "SHA_FUNCTIONS",
- "BriefDescription": "SHA Functions",
- "PublicDescription": "Total number of SHA functions issued by the CPU"
+ "BriefDescription": "SHA Function Count",
+ "PublicDescription": "This counter counts the total number of the SHA functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "69",
"EventName": "SHA_CYCLES",
- "BriefDescription": "SHA Cycles",
- "PublicDescription": "Total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU"
+ "BriefDescription": "SHA Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "70",
"EventName": "SHA_BLOCKED_FUNCTIONS",
- "BriefDescription": "SHA Blocked Functions",
- "PublicDescription": "Total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "SHA Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "71",
"EventName": "SHA_BLOCKED_CYCLES",
- "BriefDescription": "SHA Bloced Cycles",
- "PublicDescription": "Total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "SHA Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "72",
"EventName": "DEA_FUNCTIONS",
- "BriefDescription": "DEA Functions",
- "PublicDescription": "Total number of the DEA functions issued by the CPU"
+ "BriefDescription": "DEA Function Count",
+ "PublicDescription": "This counter counts the total number of the DEA functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "73",
"EventName": "DEA_CYCLES",
- "BriefDescription": "DEA Cycles",
- "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU"
+ "BriefDescription": "DEA Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "74",
"EventName": "DEA_BLOCKED_FUNCTIONS",
- "BriefDescription": "DEA Blocked Functions",
- "PublicDescription": "Total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "DEA Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "75",
"EventName": "DEA_BLOCKED_CYCLES",
- "BriefDescription": "DEA Blocked Cycles",
- "PublicDescription": "Total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "DEA Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "76",
"EventName": "AES_FUNCTIONS",
- "BriefDescription": "AES Functions",
- "PublicDescription": "Total number of AES functions issued by the CPU"
+ "BriefDescription": "AES Function Count",
+ "PublicDescription": "This counter counts the total number of the AES functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "77",
"EventName": "AES_CYCLES",
- "BriefDescription": "AES Cycles",
- "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU"
+ "BriefDescription": "AES Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "78",
"EventName": "AES_BLOCKED_FUNCTIONS",
- "BriefDescription": "AES Blocked Functions",
- "PublicDescription": "Total number of AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "AES Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "79",
"EventName": "AES_BLOCKED_CYCLES",
- "BriefDescription": "AES Blocked Cycles",
- "PublicDescription": "Total number of CPU cycles blocked for the AES functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "AES Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the AES functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU."
}
]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z10/extended.json b/tools/perf/pmu-events/arch/s390/cf_z10/extended.json
index 86bd8ba9391d..bf6a9811e014 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z10/extended.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z10/extended.json
@@ -4,125 +4,125 @@
"EventCode": "128",
"EventName": "L1I_L2_SOURCED_WRITES",
"BriefDescription": "L1I L2 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 I-Cache directory where the returned cache line was sourced from the Level-2 (L1.5) cache"
+ "PublicDescription": "A directory write to the Level-1 Instruction Cache directory where the returned cache line was sourced from the Level-2 (L1.5) cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "129",
"EventName": "L1D_L2_SOURCED_WRITES",
"BriefDescription": "L1D L2 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 D-Cache directory where the installed cache line was sourced from the Level-2 (L1.5) cache"
+ "PublicDescription": "A directory write to the Level-1 Data Cache directory where the installed cache line was sourced from the Level-2 (L1.5) cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "130",
"EventName": "L1I_L3_LOCAL_WRITES",
"BriefDescription": "L1I L3 Local Writes",
- "PublicDescription": "A directory write to the Level-1 I-Cache directory where the installed cache line was sourced from the Level-3 cache that is on the same book as the Instruction cache (Local L2 cache)"
+ "PublicDescription": "A directory write to the Level-1 Instruction Cache directory where the installed cache line was sourced from the Level-3 cache that is on the same book as the Instruction cache (Local L2 cache)."
},
{
"Unit": "CPU-M-CF",
"EventCode": "131",
"EventName": "L1D_L3_LOCAL_WRITES",
"BriefDescription": "L1D L3 Local Writes",
- "PublicDescription": "A directory write to the Level-1 D-Cache directory where the installtion cache line was source from the Level-3 cache that is on the same book as the Data cache (Local L2 cache)"
+ "PublicDescription": "A directory write to the Level-1 Data Cache directory where the installed cache line was source from the Level-3 cache that is on the same book as the Data cache (Local L2 cache)."
},
{
"Unit": "CPU-M-CF",
"EventCode": "132",
"EventName": "L1I_L3_REMOTE_WRITES",
"BriefDescription": "L1I L3 Remote Writes",
- "PublicDescription": "A directory write to the Level-1 I-Cache directory where the installed cache line was sourced from a Level-3 cache that is not on the same book as the Instruction cache (Remote L2 cache)"
+ "PublicDescription": "A directory write to the Level-1 Instruction Cache directory where the installed cache line was sourced from a Level-3 cache that is not on the same book as the Instruction cache (Remote L2 cache)."
},
{
"Unit": "CPU-M-CF",
"EventCode": "133",
"EventName": "L1D_L3_REMOTE_WRITES",
"BriefDescription": "L1D L3 Remote Writes",
- "PublicDescription": "A directory write to the Level-1 D-Cache directory where the installed cache line was sourced from a Level-3 cache that is not on the same book as the Data cache (Remote L2 cache)"
+ "PublicDescription": "A directory write to the Level-1 Data Cache directory where the installed cache line was sourced from a Level-3 cache that is not on the same book as the Data cache (Remote L2 cache)."
},
{
"Unit": "CPU-M-CF",
"EventCode": "134",
"EventName": "L1D_LMEM_SOURCED_WRITES",
"BriefDescription": "L1D Local Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 D-Cache directory where the installed cache line was sourced from memory that is attached to the same book as the Data cache (Local Memory)"
+ "PublicDescription": "A directory write to the Level-1 Data Cache directory where the installed cache line was sourced from memory that is attached to the same book as the Data cache (Local Memory)."
},
{
"Unit": "CPU-M-CF",
"EventCode": "135",
"EventName": "L1I_LMEM_SOURCED_WRITES",
"BriefDescription": "L1I Local Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 I-Cache where the installed cache line was sourced from memory that is attached to the s ame book as the Instruction cache (Local Memory)"
+ "PublicDescription": "A directory write to the Level-1 Instruction Cache where the installed cache line was sourced from memory that is attached to the s ame book as the Instruction cache (Local Memory)."
},
{
"Unit": "CPU-M-CF",
"EventCode": "136",
"EventName": "L1D_RO_EXCL_WRITES",
"BriefDescription": "L1D Read-only Exclusive Writes",
- "PublicDescription": "A directory write to the Level-1 D-Cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line"
+ "PublicDescription": "A directory write to the Level-1 Data Cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line."
},
{
"Unit": "CPU-M-CF",
"EventCode": "137",
"EventName": "L1I_CACHELINE_INVALIDATES",
"BriefDescription": "L1I Cacheline Invalidates",
- "PublicDescription": "A cache line in the Level-1 I-Cache has been invalidated by a store on the same CPU as the Level-1 I-Cache"
+ "PublicDescription": "A cache line in the Level-1 Instruction Cache has been invalidated by a store on the same CPU as the Level-1 Instruction Cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "138",
"EventName": "ITLB1_WRITES",
"BriefDescription": "ITLB1 Writes",
- "PublicDescription": "A translation entry has been written into the Level-1 Instruction Translation Lookaside Buffer"
+ "PublicDescription": "A translation entry has been written into the Level-1 Instruction Translation Lookaside Buffer (ITLB1)."
},
{
"Unit": "CPU-M-CF",
"EventCode": "139",
"EventName": "DTLB1_WRITES",
"BriefDescription": "DTLB1 Writes",
- "PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer"
+ "PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer (DTLB1)."
},
{
"Unit": "CPU-M-CF",
"EventCode": "140",
"EventName": "TLB2_PTE_WRITES",
"BriefDescription": "TLB2 PTE Writes",
- "PublicDescription": "A translation entry has been written to the Level-2 TLB Page Table Entry arrays"
+ "PublicDescription": "A translation entry has been written to the Level-2 TLB Page Table Entry arrays."
},
{
"Unit": "CPU-M-CF",
"EventCode": "141",
"EventName": "TLB2_CRSTE_WRITES",
"BriefDescription": "TLB2 CRSTE Writes",
- "PublicDescription": "A translation entry has been written to the Level-2 TLB Common Region Segment Table Entry arrays"
+ "PublicDescription": "A translation entry has been written to the Level-2 TLB Common Region Segment Table Entry arrays."
},
{
"Unit": "CPU-M-CF",
"EventCode": "142",
"EventName": "TLB2_CRSTE_HPAGE_WRITES",
"BriefDescription": "TLB2 CRSTE One-Megabyte Page Writes",
- "PublicDescription": "A translation entry has been written to the Level-2 TLB Common Region Segment Table Entry arrays for a one-megabyte large page translation"
+ "PublicDescription": "A translation entry has been written to the Level-2 TLB Common Region Segment Table Entry arrays for a one-megabyte large page translation."
},
{
"Unit": "CPU-M-CF",
"EventCode": "145",
"EventName": "ITLB1_MISSES",
"BriefDescription": "ITLB1 Misses",
- "PublicDescription": "Level-1 Instruction TLB miss in progress. Incremented by one for every cycle an ITLB1 miss is in progress"
+ "PublicDescription": "Level-1 Instruction TLB miss in progress. Incremented by one for every cycle an ITLB1 miss is in progress."
},
{
"Unit": "CPU-M-CF",
"EventCode": "146",
"EventName": "DTLB1_MISSES",
"BriefDescription": "DTLB1 Misses",
- "PublicDescription": "Level-1 Data TLB miss in progress. Incremented by one for every cycle an DTLB1 miss is in progress"
+ "PublicDescription": "Level-1 Data TLB miss in progress. Incremented by one for every cycle an DTLB1 miss is in progress."
},
{
"Unit": "CPU-M-CF",
"EventCode": "147",
"EventName": "L2C_STORES_SENT",
"BriefDescription": "L2C Stores Sent",
- "PublicDescription": "Incremented by one for every store sent to Level-2 (L1.5) cache"
+ "PublicDescription": "Incremented by one for every store sent to Level-2 (L1.5) cache."
}
]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z13/basic.json b/tools/perf/pmu-events/arch/s390/cf_z13/basic.json
index 783de7f1aeaa..9bd20a5f47af 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z13/basic.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z13/basic.json
@@ -3,84 +3,84 @@
"Unit": "CPU-M-CF",
"EventCode": "0",
"EventName": "CPU_CYCLES",
- "BriefDescription": "CPU Cycles",
- "PublicDescription": "Cycle Count"
+ "BriefDescription": "Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles, excluding the number of cycles while the CPU is in the wait state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "1",
"EventName": "INSTRUCTIONS",
- "BriefDescription": "Instructions",
- "PublicDescription": "Instruction Count"
+ "BriefDescription": "Instruction Count",
+ "PublicDescription": "This counter counts the total number of instructions executed by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "2",
"EventName": "L1I_DIR_WRITES",
- "BriefDescription": "L1I Directory Writes",
- "PublicDescription": "Level-1 I-Cache Directory Write Count"
+ "BriefDescription": "Level-1 I-Cache Directory Write Count",
+ "PublicDescription": "This counter counts the total number of level-1 instruction-cache or unified-cache directory writes."
},
{
"Unit": "CPU-M-CF",
"EventCode": "3",
"EventName": "L1I_PENALTY_CYCLES",
- "BriefDescription": "L1I Penalty Cycles",
- "PublicDescription": "Level-1 I-Cache Penalty Cycle Count"
+ "BriefDescription": "Level-1 I-Cache Penalty Cycle Count",
+ "PublicDescription": "This counter counts the total number of cache penalty cycles for level-1 instruction cache or unified cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "4",
"EventName": "L1D_DIR_WRITES",
- "BriefDescription": "L1D Directory Writes",
- "PublicDescription": "Level-1 D-Cache Directory Write Count"
+ "BriefDescription": "Level-1 D-Cache Directory Write Count",
+ "PublicDescription": "This counter counts the total number of level-1 data-cache directory writes."
},
{
"Unit": "CPU-M-CF",
"EventCode": "5",
"EventName": "L1D_PENALTY_CYCLES",
- "BriefDescription": "L1D Penalty Cycles",
- "PublicDescription": "Level-1 D-Cache Penalty Cycle Count"
+ "BriefDescription": "Level-1 D-Cache Penalty Cycle Count",
+ "PublicDescription": "This counter counts the total number of cache penalty cycles for level-1 data cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "32",
"EventName": "PROBLEM_STATE_CPU_CYCLES",
- "BriefDescription": "Problem-State CPU Cycles",
- "PublicDescription": "Problem-State Cycle Count"
+ "BriefDescription": "Problem-State Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the CPU is in the problem state, excluding the number of cycles while the CPU is in the wait state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "33",
"EventName": "PROBLEM_STATE_INSTRUCTIONS",
- "BriefDescription": "Problem-State Instructions",
- "PublicDescription": "Problem-State Instruction Count"
+ "BriefDescription": "Problem-State Instruction Count",
+ "PublicDescription": "This counter counts the total number of instructions executed by the CPU while in the problem state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "34",
"EventName": "PROBLEM_STATE_L1I_DIR_WRITES",
- "BriefDescription": "Problem-State L1I Directory Writes",
- "PublicDescription": "Problem-State Level-1 I-Cache Directory Write Count"
+ "BriefDescription": "Problem-State Level-1 I-Cache Directory Write Count",
+ "PublicDescription": "This counter counts the total number of level-1 instruction-cache or unified-cache directory writes while the CPU is in the problem state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "35",
"EventName": "PROBLEM_STATE_L1I_PENALTY_CYCLES",
- "BriefDescription": "Problem-State L1I Penalty Cycles",
- "PublicDescription": "Problem-State Level-1 I-Cache Penalty Cycle Count"
+ "BriefDescription": "Level-1 I-Cache Penalty Cycle Count",
+ "PublicDescription": "This counter counts the total number of penalty cycles for level-1 instruction cache or unified cache while the CPU is in the problem state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "36",
"EventName": "PROBLEM_STATE_L1D_DIR_WRITES",
- "BriefDescription": "Problem-State L1D Directory Writes",
- "PublicDescription": "Problem-State Level-1 D-Cache Directory Write Count"
+ "BriefDescription": "Problem-State Level-1 D-Cache Directory Write Count",
+ "PublicDescription": "This counter counts the total number of level-1 data-cache directory writes while the CPU is in the problem state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "37",
"EventName": "PROBLEM_STATE_L1D_PENALTY_CYCLES",
- "BriefDescription": "Problem-State L1D Penalty Cycles",
- "PublicDescription": "Problem-State Level-1 D-Cache Penalty Cycle Count"
+ "BriefDescription": "Problem-State Level-1 D-Cache Penalty Cycle Count",
+ "PublicDescription": "This counter counts the total number of penalty cycles for level-1 data cache while the CPU is in the problem state."
}
]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z13/crypto.json b/tools/perf/pmu-events/arch/s390/cf_z13/crypto.json
index 3f28007d3892..a8d391ddeb8c 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z13/crypto.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z13/crypto.json
@@ -3,112 +3,112 @@
"Unit": "CPU-M-CF",
"EventCode": "64",
"EventName": "PRNG_FUNCTIONS",
- "BriefDescription": "PRNG Functions",
- "PublicDescription": "Total number of the PRNG functions issued by the CPU"
+ "BriefDescription": "PRNG Function Count",
+ "PublicDescription": "This counter counts the total number of the pseudorandom-number-generation functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "65",
"EventName": "PRNG_CYCLES",
- "BriefDescription": "PRNG Cycles",
- "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing PRNG functions issued by the CPU"
+ "BriefDescription": "PRNG Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES/SHA coprocessor is busy performing the pseudorandom- number-generation functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "66",
"EventName": "PRNG_BLOCKED_FUNCTIONS",
- "BriefDescription": "PRNG Blocked Functions",
- "PublicDescription": "Total number of the PRNG functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "PRNG Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the pseudorandom-number-generation functions that are issued by the CPU and are blocked because the DEA/AES/SHA coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "67",
"EventName": "PRNG_BLOCKED_CYCLES",
- "BriefDescription": "PRNG Blocked Cycles",
- "PublicDescription": "Total number of CPU cycles blocked for the PRNG functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "PRNG Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the pseudorandom-number-generation functions issued by the CPU because the DEA/AES/SHA coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "68",
"EventName": "SHA_FUNCTIONS",
- "BriefDescription": "SHA Functions",
- "PublicDescription": "Total number of SHA functions issued by the CPU"
+ "BriefDescription": "SHA Function Count",
+ "PublicDescription": "This counter counts the total number of the SHA functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "69",
"EventName": "SHA_CYCLES",
- "BriefDescription": "SHA Cycles",
- "PublicDescription": "Total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU"
+ "BriefDescription": "SHA Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "70",
"EventName": "SHA_BLOCKED_FUNCTIONS",
- "BriefDescription": "SHA Blocked Functions",
- "PublicDescription": "Total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "SHA Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "71",
"EventName": "SHA_BLOCKED_CYCLES",
- "BriefDescription": "SHA Bloced Cycles",
- "PublicDescription": "Total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "SHA Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "72",
"EventName": "DEA_FUNCTIONS",
- "BriefDescription": "DEA Functions",
- "PublicDescription": "Total number of the DEA functions issued by the CPU"
+ "BriefDescription": "DEA Function Count",
+ "PublicDescription": "This counter counts the total number of the DEA functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "73",
"EventName": "DEA_CYCLES",
- "BriefDescription": "DEA Cycles",
- "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU"
+ "BriefDescription": "DEA Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "74",
"EventName": "DEA_BLOCKED_FUNCTIONS",
- "BriefDescription": "DEA Blocked Functions",
- "PublicDescription": "Total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "DEA Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "75",
"EventName": "DEA_BLOCKED_CYCLES",
- "BriefDescription": "DEA Blocked Cycles",
- "PublicDescription": "Total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "DEA Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "76",
"EventName": "AES_FUNCTIONS",
- "BriefDescription": "AES Functions",
- "PublicDescription": "Total number of AES functions issued by the CPU"
+ "BriefDescription": "AES Function Count",
+ "PublicDescription": "This counter counts the total number of the AES functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "77",
"EventName": "AES_CYCLES",
- "BriefDescription": "AES Cycles",
- "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU"
+ "BriefDescription": "AES Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "78",
"EventName": "AES_BLOCKED_FUNCTIONS",
- "BriefDescription": "AES Blocked Functions",
- "PublicDescription": "Total number of AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "AES Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "79",
"EventName": "AES_BLOCKED_CYCLES",
- "BriefDescription": "AES Blocked Cycles",
- "PublicDescription": "Total number of CPU cycles blocked for the AES functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "AES Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the AES functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU."
}
]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z13/extended.json b/tools/perf/pmu-events/arch/s390/cf_z13/extended.json
index 1a5e4f89c57e..99c1b93a7e36 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z13/extended.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z13/extended.json
@@ -11,7 +11,7 @@
"EventCode": "129",
"EventName": "DTLB1_WRITES",
"BriefDescription": "DTLB1 Writes",
- "PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer"
+ "PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer (DTLB1)."
},
{
"Unit": "CPU-M-CF",
@@ -25,7 +25,7 @@
"EventCode": "131",
"EventName": "DTLB1_HPAGE_WRITES",
"BriefDescription": "DTLB1 One-Megabyte Page Writes",
- "PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer for a one-megabyte page"
+ "PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer for a one-megabyte page."
},
{
"Unit": "CPU-M-CF",
@@ -39,63 +39,63 @@
"EventCode": "133",
"EventName": "L1D_L2D_SOURCED_WRITES",
"BriefDescription": "L1D L2D Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the Level-2 Data cache"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the Level-2 Data cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "134",
"EventName": "ITLB1_WRITES",
"BriefDescription": "ITLB1 Writes",
- "PublicDescription": "A translation entry has been written to the Level-1 Instruction Translation Lookaside Buffer"
+ "PublicDescription": "A translation entry has been written to the Level-1 Instruction Translation Lookaside Buffer (ITLB1)."
},
{
"Unit": "CPU-M-CF",
"EventCode": "135",
"EventName": "ITLB1_MISSES",
"BriefDescription": "ITLB1 Misses",
- "PublicDescription": "Level-1 Instruction TLB miss in progress. Incremented by one for every cycle an ITLB1 miss is in progress"
+ "PublicDescription": "Level-1 Instruction TLB miss in progress. Incremented by one for every cycle an ITLB1 miss is in progress."
},
{
"Unit": "CPU-M-CF",
"EventCode": "136",
"EventName": "L1I_L2I_SOURCED_WRITES",
"BriefDescription": "L1I L2I Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from the Level-2 Instruction cache"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from the Level-2 Instruction cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "137",
"EventName": "TLB2_PTE_WRITES",
"BriefDescription": "TLB2 PTE Writes",
- "PublicDescription": "A translation entry has been written to the Level-2 TLB Page Table Entry arrays"
+ "PublicDescription": "A translation entry has been written to the Level-2 TLB Page Table Entry arrays."
},
{
"Unit": "CPU-M-CF",
"EventCode": "138",
"EventName": "TLB2_CRSTE_HPAGE_WRITES",
"BriefDescription": "TLB2 CRSTE One-Megabyte Page Writes",
- "PublicDescription": "A translation entry has been written to the Level-2 TLB Combined Region Segment Table Entry arrays for a one-megabyte large page translation"
+ "PublicDescription": "A translation entry has been written to the Level-2 TLB Combined Region Segment Table Entry arrays for a one-megabyte large page translation."
},
{
"Unit": "CPU-M-CF",
"EventCode": "139",
"EventName": "TLB2_CRSTE_WRITES",
"BriefDescription": "TLB2 CRSTE Writes",
- "PublicDescription": "A translation entry has been written to the Level-2 TLB Combined Region Segment Table Entry arrays"
+ "PublicDescription": "A translation entry has been written to the Level-2 TLB Combined Region Segment Table Entry arrays."
},
{
"Unit": "CPU-M-CF",
"EventCode": "140",
"EventName": "TX_C_TEND",
"BriefDescription": "Completed TEND instructions in constrained TX mode",
- "PublicDescription": "A TEND instruction has completed in a constrained transactional-execution mode"
+ "PublicDescription": "A TEND instruction has completed in a constrained transactional-execution mode."
},
{
"Unit": "CPU-M-CF",
"EventCode": "141",
"EventName": "TX_NC_TEND",
"BriefDescription": "Completed TEND instructions in non-constrained TX mode",
- "PublicDescription": "A TEND instruction has completed in a non-constrained transactional-execution mode"
+ "PublicDescription": "A TEND instruction has completed in a non-constrained transactional-execution mode."
},
{
"Unit": "CPU-M-CF",
@@ -109,273 +109,273 @@
"EventCode": "144",
"EventName": "L1D_ONCHIP_L3_SOURCED_WRITES",
"BriefDescription": "L1D On-Chip L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "145",
"EventName": "L1D_ONCHIP_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1D On-Chip L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "146",
"EventName": "L1D_ONNODE_L4_SOURCED_WRITES",
"BriefDescription": "L1D On-Node L4 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Node Level-4 cache"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Node Level-4 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "147",
"EventName": "L1D_ONNODE_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1D On-Node L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Node Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Node Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "148",
"EventName": "L1D_ONNODE_L3_SOURCED_WRITES",
"BriefDescription": "L1D On-Node L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Node Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Node Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "149",
"EventName": "L1D_ONDRAWER_L4_SOURCED_WRITES",
"BriefDescription": "L1D On-Drawer L4 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Drawer Level-4 cache"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Drawer Level-4 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "150",
"EventName": "L1D_ONDRAWER_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1D On-Drawer L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Drawer Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Drawer Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "151",
"EventName": "L1D_ONDRAWER_L3_SOURCED_WRITES",
"BriefDescription": "L1D On-Drawer L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Drawer Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Drawer Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "152",
"EventName": "L1D_OFFDRAWER_SCOL_L4_SOURCED_WRITES",
"BriefDescription": "L1D Off-Drawer Same-Column L4 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Same-Column Level-4 cache"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Same-Column Level-4 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "153",
"EventName": "L1D_OFFDRAWER_SCOL_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1D Off-Drawer Same-Column L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Same-Column Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Same-Column Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "154",
"EventName": "L1D_OFFDRAWER_SCOL_L3_SOURCED_WRITES",
"BriefDescription": "L1D Off-Drawer Same-Column L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Same-Column Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Same-Column Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "155",
"EventName": "L1D_OFFDRAWER_FCOL_L4_SOURCED_WRITES",
"BriefDescription": "L1D Off-Drawer Far-Column L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Far-Column Level-4 cache"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Far-Column Level-4 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "156",
"EventName": "L1D_OFFDRAWER_FCOL_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1D Off-Drawer Far-Column L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Far-Column Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Far-Column Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "157",
"EventName": "L1D_OFFDRAWER_FCOL_L3_SOURCED_WRITES",
"BriefDescription": "L1D Off-Drawer Far-Column L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Far-Column Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Far-Column Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "158",
"EventName": "L1D_ONNODE_MEM_SOURCED_WRITES",
"BriefDescription": "L1D On-Node Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Node memory"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Node memory."
},
{
"Unit": "CPU-M-CF",
"EventCode": "159",
"EventName": "L1D_ONDRAWER_MEM_SOURCED_WRITES",
"BriefDescription": "L1D On-Drawer Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Drawer memory"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Drawer memory."
},
{
"Unit": "CPU-M-CF",
"EventCode": "160",
"EventName": "L1D_OFFDRAWER_MEM_SOURCED_WRITES",
"BriefDescription": "L1D Off-Drawer Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Drawer memory"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Drawer memory."
},
{
"Unit": "CPU-M-CF",
"EventCode": "161",
"EventName": "L1D_ONCHIP_MEM_SOURCED_WRITES",
"BriefDescription": "L1D On-Chip Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Chip memory"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Chip memory."
},
{
"Unit": "CPU-M-CF",
"EventCode": "162",
"EventName": "L1I_ONCHIP_L3_SOURCED_WRITES",
"BriefDescription": "L1I On-Chip L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Chip Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Chip Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "163",
"EventName": "L1I_ONCHIP_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1I On-Chip L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On Chip Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On Chip Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "164",
"EventName": "L1I_ONNODE_L4_SOURCED_WRITES",
"BriefDescription": "L1I On-Chip L4 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Node Level-4 cache"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Node Level-4 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "165",
"EventName": "L1I_ONNODE_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1I On-Node L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Node Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Node Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "166",
"EventName": "L1I_ONNODE_L3_SOURCED_WRITES",
"BriefDescription": "L1I On-Node L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Node Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Node Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "167",
"EventName": "L1I_ONDRAWER_L4_SOURCED_WRITES",
"BriefDescription": "L1I On-Drawer L4 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Drawer Level-4 cache"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Drawer Level-4 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "168",
"EventName": "L1I_ONDRAWER_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1I On-Drawer L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Drawer Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Drawer Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "169",
"EventName": "L1I_ONDRAWER_L3_SOURCED_WRITES",
"BriefDescription": "L1I On-Drawer L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Drawer Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Drawer Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "170",
"EventName": "L1I_OFFDRAWER_SCOL_L4_SOURCED_WRITES",
"BriefDescription": "L1I Off-Drawer Same-Column L4 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Same-Column Level-4 cache"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Same-Column Level-4 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "171",
"EventName": "L1I_OFFDRAWER_SCOL_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1I Off-Drawer Same-Column L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Same-Column Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Same-Column Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "172",
"EventName": "L1I_OFFDRAWER_SCOL_L3_SOURCED_WRITES",
"BriefDescription": "L1I Off-Drawer Same-Column L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Same-Column Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Same-Column Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "173",
"EventName": "L1I_OFFDRAWER_FCOL_L4_SOURCED_WRITES",
"BriefDescription": "L1I Off-Drawer Far-Column L4 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Far-Column Level-4 cache"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Far-Column Level-4 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "174",
"EventName": "L1I_OFFDRAWER_FCOL_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1I Off-Drawer Far-Column L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Far-Column Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Far-Column Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "175",
"EventName": "L1I_OFFDRAWER_FCOL_L3_SOURCED_WRITES",
"BriefDescription": "L1I Off-Drawer Far-Column L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Far-Column Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Far-Column Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "176",
"EventName": "L1I_ONNODE_MEM_SOURCED_WRITES",
"BriefDescription": "L1I On-Node Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Node memory"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Node memory."
},
{
"Unit": "CPU-M-CF",
"EventCode": "177",
"EventName": "L1I_ONDRAWER_MEM_SOURCED_WRITES",
"BriefDescription": "L1I On-Drawer Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Drawer memory"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Drawer memory."
},
{
"Unit": "CPU-M-CF",
"EventCode": "178",
"EventName": "L1I_OFFDRAWER_MEM_SOURCED_WRITES",
"BriefDescription": "L1I Off-Drawer Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Drawer memory"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Drawer memory."
},
{
"Unit": "CPU-M-CF",
"EventCode": "179",
"EventName": "L1I_ONCHIP_MEM_SOURCED_WRITES",
"BriefDescription": "L1I On-Chip Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Chip memory"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Chip memory."
},
{
"Unit": "CPU-M-CF",
"EventCode": "218",
"EventName": "TX_NC_TABORT",
"BriefDescription": "Aborted transactions in non-constrained TX mode",
- "PublicDescription": "A transaction abort has occurred in a non-constrained transactional-execution mode"
+ "PublicDescription": "A transaction abort has occurred in a non-constrained transactional-execution mode."
},
{
"Unit": "CPU-M-CF",
"EventCode": "219",
"EventName": "TX_C_TABORT_NO_SPECIAL",
"BriefDescription": "Aborted transactions in constrained TX mode not using special completion logic",
- "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is not using any special logic to allow the transaction to complete"
+ "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is not using any special logic to allow the transaction to complete."
},
{
"Unit": "CPU-M-CF",
"EventCode": "220",
"EventName": "TX_C_TABORT_SPECIAL",
"BriefDescription": "Aborted transactions in constrained TX mode using special completion logic",
- "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is using special logic to allow the transaction to complete"
+ "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is using special logic to allow the transaction to complete."
},
{
"Unit": "CPU-M-CF",
diff --git a/tools/perf/pmu-events/arch/s390/cf_z14/basic.json b/tools/perf/pmu-events/arch/s390/cf_z14/basic.json
index fc762e9f1d6e..1023d47028ce 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z14/basic.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z14/basic.json
@@ -3,56 +3,56 @@
"Unit": "CPU-M-CF",
"EventCode": "0",
"EventName": "CPU_CYCLES",
- "BriefDescription": "CPU Cycles",
- "PublicDescription": "Cycle Count"
+ "BriefDescription": "Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles, excluding the number of cycles while the CPU is in the wait state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "1",
"EventName": "INSTRUCTIONS",
- "BriefDescription": "Instructions",
- "PublicDescription": "Instruction Count"
+ "BriefDescription": "Instruction Count",
+ "PublicDescription": "This counter counts the total number of instructions executed by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "2",
"EventName": "L1I_DIR_WRITES",
- "BriefDescription": "L1I Directory Writes",
- "PublicDescription": "Level-1 I-Cache Directory Write Count"
+ "BriefDescription": "Level-1 I-Cache Directory Write Count",
+ "PublicDescription": "This counter counts the total number of level-1 instruction-cache or unified-cache directory writes."
},
{
"Unit": "CPU-M-CF",
"EventCode": "3",
"EventName": "L1I_PENALTY_CYCLES",
- "BriefDescription": "L1I Penalty Cycles",
- "PublicDescription": "Level-1 I-Cache Penalty Cycle Count"
+ "BriefDescription": "Level-1 I-Cache Penalty Cycle Count",
+ "PublicDescription": "This counter counts the total number of cache penalty cycles for level-1 instruction cache or unified cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "4",
"EventName": "L1D_DIR_WRITES",
- "BriefDescription": "L1D Directory Writes",
- "PublicDescription": "Level-1 D-Cache Directory Write Count"
+ "BriefDescription": "Level-1 D-Cache Directory Write Count",
+ "PublicDescription": "This counter counts the total number of level-1 data-cache directory writes."
},
{
"Unit": "CPU-M-CF",
"EventCode": "5",
"EventName": "L1D_PENALTY_CYCLES",
- "BriefDescription": "L1D Penalty Cycles",
- "PublicDescription": "Level-1 D-Cache Penalty Cycle Count"
+ "BriefDescription": "Level-1 D-Cache Penalty Cycle Count",
+ "PublicDescription": "This counter counts the total number of cache penalty cycles for level-1 data cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "32",
"EventName": "PROBLEM_STATE_CPU_CYCLES",
- "BriefDescription": "Problem-State CPU Cycles",
- "PublicDescription": "Problem-State Cycle Count"
+ "BriefDescription": "Problem-State Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the CPU is in the problem state, excluding the number of cycles while the CPU is in the wait state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "33",
"EventName": "PROBLEM_STATE_INSTRUCTIONS",
- "BriefDescription": "Problem-State Instructions",
- "PublicDescription": "Problem-State Instruction Count"
+ "BriefDescription": "Problem-State Instruction Count",
+ "PublicDescription": "This counter counts the total number of instructions executed by the CPU while in the problem state."
}
]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z14/crypto.json b/tools/perf/pmu-events/arch/s390/cf_z14/crypto.json
index 3f28007d3892..a8d391ddeb8c 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z14/crypto.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z14/crypto.json
@@ -3,112 +3,112 @@
"Unit": "CPU-M-CF",
"EventCode": "64",
"EventName": "PRNG_FUNCTIONS",
- "BriefDescription": "PRNG Functions",
- "PublicDescription": "Total number of the PRNG functions issued by the CPU"
+ "BriefDescription": "PRNG Function Count",
+ "PublicDescription": "This counter counts the total number of the pseudorandom-number-generation functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "65",
"EventName": "PRNG_CYCLES",
- "BriefDescription": "PRNG Cycles",
- "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing PRNG functions issued by the CPU"
+ "BriefDescription": "PRNG Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES/SHA coprocessor is busy performing the pseudorandom- number-generation functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "66",
"EventName": "PRNG_BLOCKED_FUNCTIONS",
- "BriefDescription": "PRNG Blocked Functions",
- "PublicDescription": "Total number of the PRNG functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "PRNG Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the pseudorandom-number-generation functions that are issued by the CPU and are blocked because the DEA/AES/SHA coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "67",
"EventName": "PRNG_BLOCKED_CYCLES",
- "BriefDescription": "PRNG Blocked Cycles",
- "PublicDescription": "Total number of CPU cycles blocked for the PRNG functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "PRNG Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the pseudorandom-number-generation functions issued by the CPU because the DEA/AES/SHA coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "68",
"EventName": "SHA_FUNCTIONS",
- "BriefDescription": "SHA Functions",
- "PublicDescription": "Total number of SHA functions issued by the CPU"
+ "BriefDescription": "SHA Function Count",
+ "PublicDescription": "This counter counts the total number of the SHA functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "69",
"EventName": "SHA_CYCLES",
- "BriefDescription": "SHA Cycles",
- "PublicDescription": "Total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU"
+ "BriefDescription": "SHA Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "70",
"EventName": "SHA_BLOCKED_FUNCTIONS",
- "BriefDescription": "SHA Blocked Functions",
- "PublicDescription": "Total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "SHA Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "71",
"EventName": "SHA_BLOCKED_CYCLES",
- "BriefDescription": "SHA Bloced Cycles",
- "PublicDescription": "Total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "SHA Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "72",
"EventName": "DEA_FUNCTIONS",
- "BriefDescription": "DEA Functions",
- "PublicDescription": "Total number of the DEA functions issued by the CPU"
+ "BriefDescription": "DEA Function Count",
+ "PublicDescription": "This counter counts the total number of the DEA functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "73",
"EventName": "DEA_CYCLES",
- "BriefDescription": "DEA Cycles",
- "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU"
+ "BriefDescription": "DEA Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "74",
"EventName": "DEA_BLOCKED_FUNCTIONS",
- "BriefDescription": "DEA Blocked Functions",
- "PublicDescription": "Total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "DEA Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "75",
"EventName": "DEA_BLOCKED_CYCLES",
- "BriefDescription": "DEA Blocked Cycles",
- "PublicDescription": "Total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "DEA Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "76",
"EventName": "AES_FUNCTIONS",
- "BriefDescription": "AES Functions",
- "PublicDescription": "Total number of AES functions issued by the CPU"
+ "BriefDescription": "AES Function Count",
+ "PublicDescription": "This counter counts the total number of the AES functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "77",
"EventName": "AES_CYCLES",
- "BriefDescription": "AES Cycles",
- "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU"
+ "BriefDescription": "AES Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "78",
"EventName": "AES_BLOCKED_FUNCTIONS",
- "BriefDescription": "AES Blocked Functions",
- "PublicDescription": "Total number of AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "AES Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "79",
"EventName": "AES_BLOCKED_CYCLES",
- "BriefDescription": "AES Blocked Cycles",
- "PublicDescription": "Total number of CPU cycles blocked for the AES functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "AES Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the AES functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU."
}
]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z14/extended.json b/tools/perf/pmu-events/arch/s390/cf_z14/extended.json
index 4942b20a1ea1..ad40cc4f9727 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z14/extended.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z14/extended.json
@@ -4,357 +4,357 @@
"EventCode": "128",
"EventName": "L1D_RO_EXCL_WRITES",
"BriefDescription": "L1D Read-only Exclusive Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line"
+ "PublicDescription": "A directory write to the Level-1 Data cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line."
},
{
"Unit": "CPU-M-CF",
"EventCode": "129",
"EventName": "DTLB2_WRITES",
"BriefDescription": "DTLB2 Writes",
- "PublicDescription": "A translation has been written into The Translation Lookaside Buffer 2 (TLB2) and the request was made by the data cache"
+ "PublicDescription": "A translation has been written into The Translation Lookaside Buffer 2 (TLB2) and the request was made by the data cache. This is a replacement for what was provided for the DTLB on prior machines."
},
{
"Unit": "CPU-M-CF",
"EventCode": "130",
"EventName": "DTLB2_MISSES",
"BriefDescription": "DTLB2 Misses",
- "PublicDescription": "A TLB2 miss is in progress for a request made by the data cache. Incremented by one for every TLB2 miss in progress for the Level-1 Data cache on this cycle"
+ "PublicDescription": "A TLB2 miss is in progress for a request made by the data cache. Incremented by one for every TLB2 miss in progress for the Level-1 Data cache on this cycle. This is a replacement for what was provided for the DTLB on prior machines."
},
{
"Unit": "CPU-M-CF",
"EventCode": "131",
"EventName": "DTLB2_HPAGE_WRITES",
"BriefDescription": "DTLB2 One-Megabyte Page Writes",
- "PublicDescription": "A translation entry was written into the Combined Region and Segment Table Entry array in the Level-2 TLB for a one-megabyte page or a Last Host Translation was done"
+ "PublicDescription": "A translation entry was written into the Combined Region and Segment Table Entry array in the Level-2 TLB for a one-megabyte page or a Last Host Translation was done."
},
{
"Unit": "CPU-M-CF",
"EventCode": "132",
"EventName": "DTLB2_GPAGE_WRITES",
"BriefDescription": "DTLB2 Two-Gigabyte Page Writes",
- "PublicDescription": "A translation entry for a two-gigabyte page was written into the Level-2 TLB"
+ "PublicDescription": "A translation entry for a two-gigabyte page was written into the Level-2 TLB."
},
{
"Unit": "CPU-M-CF",
"EventCode": "133",
"EventName": "L1D_L2D_SOURCED_WRITES",
"BriefDescription": "L1D L2D Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the Level-2 Data cache"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the Level-2 Data cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "134",
"EventName": "ITLB2_WRITES",
"BriefDescription": "ITLB2 Writes",
- "PublicDescription": "A translation entry has been written into the Translation Lookaside Buffer 2 (TLB2) and the request was made by the instruction cache"
+ "PublicDescription": "A translation entry has been written into the Translation Lookaside Buffer 2 (TLB2) and the request was made by the instruction cache. This is a replacement for what was provided for the ITLB on prior machines."
},
{
"Unit": "CPU-M-CF",
"EventCode": "135",
"EventName": "ITLB2_MISSES",
"BriefDescription": "ITLB2 Misses",
- "PublicDescription": "A TLB2 miss is in progress for a request made by the instruction cache. Incremented by one for every TLB2 miss in progress for the Level-1 Instruction cache in a cycle"
+ "PublicDescription": "A TLB2 miss is in progress for a request made by the instruction cache. Incremented by one for every TLB2 miss in progress for the Level-1 Instruction cache in a cycle. This is a replacement for what was provided for the ITLB on prior machines."
},
{
"Unit": "CPU-M-CF",
"EventCode": "136",
"EventName": "L1I_L2I_SOURCED_WRITES",
"BriefDescription": "L1I L2I Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from the Level-2 Instruction cache"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from the Level-2 Instruction cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "137",
"EventName": "TLB2_PTE_WRITES",
"BriefDescription": "TLB2 PTE Writes",
- "PublicDescription": "A translation entry was written into the Page Table Entry array in the Level-2 TLB"
+ "PublicDescription": "A translation entry was written into the Page Table Entry array in the Level-2 TLB."
},
{
"Unit": "CPU-M-CF",
"EventCode": "138",
"EventName": "TLB2_CRSTE_WRITES",
"BriefDescription": "TLB2 CRSTE Writes",
- "PublicDescription": "Translation entries were written into the Combined Region and Segment Table Entry array and the Page Table Entry array in the Level-2 TLB"
+ "PublicDescription": "Translation entries were written into the Combined Region and Segment Table Entry array and the Page Table Entry array in the Level-2 TLB."
},
{
"Unit": "CPU-M-CF",
"EventCode": "139",
"EventName": "TLB2_ENGINES_BUSY",
"BriefDescription": "TLB2 Engines Busy",
- "PublicDescription": "The number of Level-2 TLB translation engines busy in a cycle"
+ "PublicDescription": "The number of Level-2 TLB translation engines busy in a cycle."
},
{
"Unit": "CPU-M-CF",
"EventCode": "140",
"EventName": "TX_C_TEND",
"BriefDescription": "Completed TEND instructions in constrained TX mode",
- "PublicDescription": "A TEND instruction has completed in a constrained transactional-execution mode"
+ "PublicDescription": "A TEND instruction has completed in a constrained transactional-execution mode."
},
{
"Unit": "CPU-M-CF",
"EventCode": "141",
"EventName": "TX_NC_TEND",
"BriefDescription": "Completed TEND instructions in non-constrained TX mode",
- "PublicDescription": "A TEND instruction has completed in a non-constrained transactional-execution mode"
+ "PublicDescription": "A TEND instruction has completed in a non-constrained transactional-execution mode."
},
{
"Unit": "CPU-M-CF",
"EventCode": "143",
"EventName": "L1C_TLB2_MISSES",
"BriefDescription": "L1C TLB2 Misses",
- "PublicDescription": "Increments by one for any cycle where a level-1 cache or level-2 TLB miss is in progress"
+ "PublicDescription": "Increments by one for any cycle where a level-1 cache or level-2 TLB miss is in progress."
},
{
"Unit": "CPU-M-CF",
"EventCode": "144",
"EventName": "L1D_ONCHIP_L3_SOURCED_WRITES",
"BriefDescription": "L1D On-Chip L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "145",
"EventName": "L1D_ONCHIP_MEMORY_SOURCED_WRITES",
"BriefDescription": "L1D On-Chip Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Chip memory"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Chip memory."
},
{
"Unit": "CPU-M-CF",
"EventCode": "146",
"EventName": "L1D_ONCHIP_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1D On-Chip L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "147",
"EventName": "L1D_ONCLUSTER_L3_SOURCED_WRITES",
"BriefDescription": "L1D On-Cluster L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Cluster Level-3 cache withountervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Cluster Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "148",
"EventName": "L1D_ONCLUSTER_MEMORY_SOURCED_WRITES",
"BriefDescription": "L1D On-Cluster Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Cluster memory"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Cluster memory."
},
{
"Unit": "CPU-M-CF",
"EventCode": "149",
"EventName": "L1D_ONCLUSTER_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1D On-Cluster L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Cluster Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Cluster Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "150",
"EventName": "L1D_OFFCLUSTER_L3_SOURCED_WRITES",
"BriefDescription": "L1D Off-Cluster L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "151",
"EventName": "L1D_OFFCLUSTER_MEMORY_SOURCED_WRITES",
"BriefDescription": "L1D Off-Cluster Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Cluster memory"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Cluster memory."
},
{
"Unit": "CPU-M-CF",
"EventCode": "152",
"EventName": "L1D_OFFCLUSTER_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1D Off-Cluster L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "153",
"EventName": "L1D_OFFDRAWER_L3_SOURCED_WRITES",
"BriefDescription": "L1D Off-Drawer L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "154",
"EventName": "L1D_OFFDRAWER_MEMORY_SOURCED_WRITES",
"BriefDescription": "L1D Off-Drawer Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Drawer memory"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Drawer memory."
},
{
"Unit": "CPU-M-CF",
"EventCode": "155",
"EventName": "L1D_OFFDRAWER_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1D Off-Drawer L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "156",
"EventName": "L1D_ONDRAWER_L4_SOURCED_WRITES",
"BriefDescription": "L1D On-Drawer L4 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Drawer Level-4 cache"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Drawer Level-4 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "157",
"EventName": "L1D_OFFDRAWER_L4_SOURCED_WRITES",
"BriefDescription": "L1D Off-Drawer L4 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Drawer Level-4 cache"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Drawer Level-4 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "158",
"EventName": "L1D_ONCHIP_L3_SOURCED_WRITES_RO",
"BriefDescription": "L1D On-Chip L3 Sourced Writes read-only",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Chip L3 but a read-only invalidate was done to remove other copies of the cache line"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Chip L3 but a read-only invalidate was done to remove other copies of the cache line."
},
{
"Unit": "CPU-M-CF",
"EventCode": "162",
"EventName": "L1I_ONCHIP_L3_SOURCED_WRITES",
"BriefDescription": "L1I On-Chip L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache ine was sourced from an On-Chip Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache ine was sourced from an On-Chip Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "163",
"EventName": "L1I_ONCHIP_MEMORY_SOURCED_WRITES",
"BriefDescription": "L1I On-Chip Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache ine was sourced from On-Chip memory"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache ine was sourced from On-Chip memory."
},
{
"Unit": "CPU-M-CF",
"EventCode": "164",
"EventName": "L1I_ONCHIP_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1I On-Chip L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache ine was sourced from an On-Chip Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache ine was sourced from an On-Chip Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "165",
"EventName": "L1I_ONCLUSTER_L3_SOURCED_WRITES",
"BriefDescription": "L1I On-Cluster L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Cluster Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Cluster Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "166",
"EventName": "L1I_ONCLUSTER_MEMORY_SOURCED_WRITES",
"BriefDescription": "L1I On-Cluster Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Cluster memory"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Cluster memory."
},
{
"Unit": "CPU-M-CF",
"EventCode": "167",
"EventName": "L1I_ONCLUSTER_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1I On-Cluster L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Cluster Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Cluster Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "168",
"EventName": "L1I_OFFCLUSTER_L3_SOURCED_WRITES",
"BriefDescription": "L1I Off-Cluster L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "169",
"EventName": "L1I_OFFCLUSTER_MEMORY_SOURCED_WRITES",
"BriefDescription": "L1I Off-Cluster Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Cluster memory"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Cluster memory."
},
{
"Unit": "CPU-M-CF",
"EventCode": "170",
"EventName": "L1I_OFFCLUSTER_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1I Off-Cluster L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "171",
"EventName": "L1I_OFFDRAWER_L3_SOURCED_WRITES",
"BriefDescription": "L1I Off-Drawer L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "172",
"EventName": "L1I_OFFDRAWER_MEMORY_SOURCED_WRITES",
"BriefDescription": "L1I Off-Drawer Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Drawer memory"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Drawer memory."
},
{
"Unit": "CPU-M-CF",
"EventCode": "173",
"EventName": "L1I_OFFDRAWER_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1I Off-Drawer L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "174",
"EventName": "L1I_ONDRAWER_L4_SOURCED_WRITES",
"BriefDescription": "L1I On-Drawer L4 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Drawer Level-4 cache"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Drawer Level-4 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "175",
"EventName": "L1I_OFFDRAWER_L4_SOURCED_WRITES",
"BriefDescription": "L1I Off-Drawer L4 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Drawer Level-4 cache"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Drawer Level-4 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "224",
"EventName": "BCD_DFP_EXECUTION_SLOTS",
"BriefDescription": "BCD DFP Execution Slots",
- "PublicDescription": "Count of floating point execution slots used for finished Binary Coded Decimal to Decimal Floating Point conversions. Instructions: CDZT, CXZT, CZDT, CZXT"
+ "PublicDescription": "Count of floating point execution slots used for finished Binary Coded Decimal to Decimal Floating Point conversions. Instructions: CDZT, CXZT, CZDT, CZXT."
},
{
"Unit": "CPU-M-CF",
"EventCode": "225",
"EventName": "VX_BCD_EXECUTION_SLOTS",
"BriefDescription": "VX BCD Execution Slots",
- "PublicDescription": "Count of floating point execution slots used for finished vector arithmetic Binary Coded Decimal instructions. Instructions: VAP, VSP, VMPVMSP, VDP, VSDP, VRP, VLIP, VSRP, VPSOPVCP, VTP, VPKZ, VUPKZ, VCVB, VCVBG, VCVDVCVDG"
+ "PublicDescription": "Count of floating point execution slots used for finished vector arithmetic Binary Coded Decimal instructions. Instructions: VAP, VSP, VMPVMSP, VDP, VSDP, VRP, VLIP, VSRP, VPSOPVCP, VTP, VPKZ, VUPKZ, VCVB, VCVBG, VCVDVCVDG."
},
{
"Unit": "CPU-M-CF",
"EventCode": "226",
"EventName": "DECIMAL_INSTRUCTIONS",
"BriefDescription": "Decimal Instructions",
- "PublicDescription": "Decimal instructions dispatched. Instructions: CVB, CVD, AP, CP, DP, ED, EDMK, MP, SRP, SP, ZAP"
+ "PublicDescription": "Decimal instructions dispatched. Instructions: CVB, CVD, AP, CP, DP, ED, EDMK, MP, SRP, SP, ZAP."
},
{
"Unit": "CPU-M-CF",
"EventCode": "232",
"EventName": "LAST_HOST_TRANSLATIONS",
"BriefDescription": "Last host translation done",
- "PublicDescription": "Last Host Translation done"
+ "PublicDescription": "Last Host Translation done."
},
{
"Unit": "CPU-M-CF",
"EventCode": "243",
"EventName": "TX_NC_TABORT",
"BriefDescription": "Aborted transactions in non-constrained TX mode",
- "PublicDescription": "A transaction abort has occurred in a non-constrained transactional-execution mode"
+ "PublicDescription": "A transaction abort has occurred in a non-constrained transactional-execution mode."
},
{
"Unit": "CPU-M-CF",
"EventCode": "244",
"EventName": "TX_C_TABORT_NO_SPECIAL",
"BriefDescription": "Aborted transactions in constrained TX mode not using special completion logic",
- "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is not using any special logic to allow the transaction to complete"
+ "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is not using any special logic to allow the transaction to complete."
},
{
"Unit": "CPU-M-CF",
"EventCode": "245",
"EventName": "TX_C_TABORT_SPECIAL",
"BriefDescription": "Aborted transactions in constrained TX mode using special completion logic",
- "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is using special logic to allow the transaction to complete"
+ "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is using special logic to allow the transaction to complete."
},
{
"Unit": "CPU-M-CF",
diff --git a/tools/perf/pmu-events/arch/s390/cf_z15/basic.json b/tools/perf/pmu-events/arch/s390/cf_z15/basic.json
index fc762e9f1d6e..1023d47028ce 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z15/basic.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z15/basic.json
@@ -3,56 +3,56 @@
"Unit": "CPU-M-CF",
"EventCode": "0",
"EventName": "CPU_CYCLES",
- "BriefDescription": "CPU Cycles",
- "PublicDescription": "Cycle Count"
+ "BriefDescription": "Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles, excluding the number of cycles while the CPU is in the wait state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "1",
"EventName": "INSTRUCTIONS",
- "BriefDescription": "Instructions",
- "PublicDescription": "Instruction Count"
+ "BriefDescription": "Instruction Count",
+ "PublicDescription": "This counter counts the total number of instructions executed by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "2",
"EventName": "L1I_DIR_WRITES",
- "BriefDescription": "L1I Directory Writes",
- "PublicDescription": "Level-1 I-Cache Directory Write Count"
+ "BriefDescription": "Level-1 I-Cache Directory Write Count",
+ "PublicDescription": "This counter counts the total number of level-1 instruction-cache or unified-cache directory writes."
},
{
"Unit": "CPU-M-CF",
"EventCode": "3",
"EventName": "L1I_PENALTY_CYCLES",
- "BriefDescription": "L1I Penalty Cycles",
- "PublicDescription": "Level-1 I-Cache Penalty Cycle Count"
+ "BriefDescription": "Level-1 I-Cache Penalty Cycle Count",
+ "PublicDescription": "This counter counts the total number of cache penalty cycles for level-1 instruction cache or unified cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "4",
"EventName": "L1D_DIR_WRITES",
- "BriefDescription": "L1D Directory Writes",
- "PublicDescription": "Level-1 D-Cache Directory Write Count"
+ "BriefDescription": "Level-1 D-Cache Directory Write Count",
+ "PublicDescription": "This counter counts the total number of level-1 data-cache directory writes."
},
{
"Unit": "CPU-M-CF",
"EventCode": "5",
"EventName": "L1D_PENALTY_CYCLES",
- "BriefDescription": "L1D Penalty Cycles",
- "PublicDescription": "Level-1 D-Cache Penalty Cycle Count"
+ "BriefDescription": "Level-1 D-Cache Penalty Cycle Count",
+ "PublicDescription": "This counter counts the total number of cache penalty cycles for level-1 data cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "32",
"EventName": "PROBLEM_STATE_CPU_CYCLES",
- "BriefDescription": "Problem-State CPU Cycles",
- "PublicDescription": "Problem-State Cycle Count"
+ "BriefDescription": "Problem-State Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the CPU is in the problem state, excluding the number of cycles while the CPU is in the wait state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "33",
"EventName": "PROBLEM_STATE_INSTRUCTIONS",
- "BriefDescription": "Problem-State Instructions",
- "PublicDescription": "Problem-State Instruction Count"
+ "BriefDescription": "Problem-State Instruction Count",
+ "PublicDescription": "This counter counts the total number of instructions executed by the CPU while in the problem state."
}
]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z15/crypto.json b/tools/perf/pmu-events/arch/s390/cf_z15/crypto.json
deleted file mode 100644
index 3f28007d3892..000000000000
--- a/tools/perf/pmu-events/arch/s390/cf_z15/crypto.json
+++ /dev/null
@@ -1,114 +0,0 @@
-[
- {
- "Unit": "CPU-M-CF",
- "EventCode": "64",
- "EventName": "PRNG_FUNCTIONS",
- "BriefDescription": "PRNG Functions",
- "PublicDescription": "Total number of the PRNG functions issued by the CPU"
- },
- {
- "Unit": "CPU-M-CF",
- "EventCode": "65",
- "EventName": "PRNG_CYCLES",
- "BriefDescription": "PRNG Cycles",
- "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing PRNG functions issued by the CPU"
- },
- {
- "Unit": "CPU-M-CF",
- "EventCode": "66",
- "EventName": "PRNG_BLOCKED_FUNCTIONS",
- "BriefDescription": "PRNG Blocked Functions",
- "PublicDescription": "Total number of the PRNG functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
- },
- {
- "Unit": "CPU-M-CF",
- "EventCode": "67",
- "EventName": "PRNG_BLOCKED_CYCLES",
- "BriefDescription": "PRNG Blocked Cycles",
- "PublicDescription": "Total number of CPU cycles blocked for the PRNG functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
- },
- {
- "Unit": "CPU-M-CF",
- "EventCode": "68",
- "EventName": "SHA_FUNCTIONS",
- "BriefDescription": "SHA Functions",
- "PublicDescription": "Total number of SHA functions issued by the CPU"
- },
- {
- "Unit": "CPU-M-CF",
- "EventCode": "69",
- "EventName": "SHA_CYCLES",
- "BriefDescription": "SHA Cycles",
- "PublicDescription": "Total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU"
- },
- {
- "Unit": "CPU-M-CF",
- "EventCode": "70",
- "EventName": "SHA_BLOCKED_FUNCTIONS",
- "BriefDescription": "SHA Blocked Functions",
- "PublicDescription": "Total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU"
- },
- {
- "Unit": "CPU-M-CF",
- "EventCode": "71",
- "EventName": "SHA_BLOCKED_CYCLES",
- "BriefDescription": "SHA Bloced Cycles",
- "PublicDescription": "Total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU"
- },
- {
- "Unit": "CPU-M-CF",
- "EventCode": "72",
- "EventName": "DEA_FUNCTIONS",
- "BriefDescription": "DEA Functions",
- "PublicDescription": "Total number of the DEA functions issued by the CPU"
- },
- {
- "Unit": "CPU-M-CF",
- "EventCode": "73",
- "EventName": "DEA_CYCLES",
- "BriefDescription": "DEA Cycles",
- "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU"
- },
- {
- "Unit": "CPU-M-CF",
- "EventCode": "74",
- "EventName": "DEA_BLOCKED_FUNCTIONS",
- "BriefDescription": "DEA Blocked Functions",
- "PublicDescription": "Total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
- },
- {
- "Unit": "CPU-M-CF",
- "EventCode": "75",
- "EventName": "DEA_BLOCKED_CYCLES",
- "BriefDescription": "DEA Blocked Cycles",
- "PublicDescription": "Total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
- },
- {
- "Unit": "CPU-M-CF",
- "EventCode": "76",
- "EventName": "AES_FUNCTIONS",
- "BriefDescription": "AES Functions",
- "PublicDescription": "Total number of AES functions issued by the CPU"
- },
- {
- "Unit": "CPU-M-CF",
- "EventCode": "77",
- "EventName": "AES_CYCLES",
- "BriefDescription": "AES Cycles",
- "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU"
- },
- {
- "Unit": "CPU-M-CF",
- "EventCode": "78",
- "EventName": "AES_BLOCKED_FUNCTIONS",
- "BriefDescription": "AES Blocked Functions",
- "PublicDescription": "Total number of AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
- },
- {
- "Unit": "CPU-M-CF",
- "EventCode": "79",
- "EventName": "AES_BLOCKED_CYCLES",
- "BriefDescription": "AES Blocked Cycles",
- "PublicDescription": "Total number of CPU cycles blocked for the AES functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
- }
-]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z15/crypto6.json b/tools/perf/pmu-events/arch/s390/cf_z15/crypto6.json
index ad79189050a0..8b4380b8e489 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z15/crypto6.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z15/crypto6.json
@@ -1,6 +1,118 @@
[
{
"Unit": "CPU-M-CF",
+ "EventCode": "64",
+ "EventName": "PRNG_FUNCTIONS",
+ "BriefDescription": "PRNG Function Count",
+ "PublicDescription": "This counter counts the total number of the pseudorandom-number-generation functions issued by the CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "65",
+ "EventName": "PRNG_CYCLES",
+ "BriefDescription": "PRNG Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES/SHA coprocessor is busy performing the pseudorandom- number-generation functions issued by the CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "66",
+ "EventName": "PRNG_BLOCKED_FUNCTIONS",
+ "BriefDescription": "PRNG Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the pseudorandom-number-generation functions that are issued by the CPU and are blocked because the DEA/AES/SHA coprocessor is busy performing a function issued by another CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "67",
+ "EventName": "PRNG_BLOCKED_CYCLES",
+ "BriefDescription": "PRNG Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the pseudorandom-number-generation functions issued by the CPU because the DEA/AES/SHA coprocessor is busy performing a function issued by another CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "68",
+ "EventName": "SHA_FUNCTIONS",
+ "BriefDescription": "SHA Function Count",
+ "PublicDescription": "This counter counts the total number of the SHA functions issued by the CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "69",
+ "EventName": "SHA_CYCLES",
+ "BriefDescription": "SHA Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "70",
+ "EventName": "SHA_BLOCKED_FUNCTIONS",
+ "BriefDescription": "SHA Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "71",
+ "EventName": "SHA_BLOCKED_CYCLES",
+ "BriefDescription": "SHA Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "72",
+ "EventName": "DEA_FUNCTIONS",
+ "BriefDescription": "DEA Function Count",
+ "PublicDescription": "This counter counts the total number of the DEA functions issued by the CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "73",
+ "EventName": "DEA_CYCLES",
+ "BriefDescription": "DEA Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "74",
+ "EventName": "DEA_BLOCKED_FUNCTIONS",
+ "BriefDescription": "DEA Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "75",
+ "EventName": "DEA_BLOCKED_CYCLES",
+ "BriefDescription": "DEA Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "76",
+ "EventName": "AES_FUNCTIONS",
+ "BriefDescription": "AES Function Count",
+ "PublicDescription": "This counter counts the total number of the AES functions issued by the CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "77",
+ "EventName": "AES_CYCLES",
+ "BriefDescription": "AES Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "78",
+ "EventName": "AES_BLOCKED_FUNCTIONS",
+ "BriefDescription": "AES Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "79",
+ "EventName": "AES_BLOCKED_CYCLES",
+ "BriefDescription": "AES Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the AES functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
"EventCode": "80",
"EventName": "ECC_FUNCTION_COUNT",
"BriefDescription": "ECC Function Count",
diff --git a/tools/perf/pmu-events/arch/s390/cf_z15/extended.json b/tools/perf/pmu-events/arch/s390/cf_z15/extended.json
index 8ac61f8f286b..9c691c391086 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z15/extended.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z15/extended.json
@@ -4,357 +4,357 @@
"EventCode": "128",
"EventName": "L1D_RO_EXCL_WRITES",
"BriefDescription": "L1D Read-only Exclusive Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line"
+ "PublicDescription": "A directory write to the Level-1 Data cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line."
},
{
"Unit": "CPU-M-CF",
"EventCode": "129",
"EventName": "DTLB2_WRITES",
"BriefDescription": "DTLB2 Writes",
- "PublicDescription": "A translation has been written into The Translation Lookaside Buffer 2 (TLB2) and the request was made by the data cache"
+ "PublicDescription": "A translation has been written into The Translation Lookaside Buffer 2 (TLB2) and the request was made by the data cache. This is a replacement for what was provided for the DTLB on prior machines."
},
{
"Unit": "CPU-M-CF",
"EventCode": "130",
"EventName": "DTLB2_MISSES",
"BriefDescription": "DTLB2 Misses",
- "PublicDescription": "A TLB2 miss is in progress for a request made by the data cache. Incremented by one for every TLB2 miss in progress for the Level-1 Data cache on this cycle"
+ "PublicDescription": "A TLB2 miss is in progress for a request made by the data cache. Incremented by one for every TLB2 miss in progress for the Level-1 Data cache on this cycle. This is a replacement for what was provided for the DTLB on prior machines."
},
{
"Unit": "CPU-M-CF",
"EventCode": "131",
"EventName": "DTLB2_HPAGE_WRITES",
"BriefDescription": "DTLB2 One-Megabyte Page Writes",
- "PublicDescription": "A translation entry was written into the Combined Region and Segment Table Entry array in the Level-2 TLB for a one-megabyte page"
+ "PublicDescription": "A translation entry was written into the Combined Region and Segment Table Entry array in the Level-2 TLB for a one-megabyte page."
},
{
"Unit": "CPU-M-CF",
"EventCode": "132",
"EventName": "DTLB2_GPAGE_WRITES",
"BriefDescription": "DTLB2 Two-Gigabyte Page Writes",
- "PublicDescription": "A translation entry for a two-gigabyte page was written into the Level-2 TLB"
+ "PublicDescription": "A translation entry for a two-gigabyte page was written into the Level-2 TLB."
},
{
"Unit": "CPU-M-CF",
"EventCode": "133",
"EventName": "L1D_L2D_SOURCED_WRITES",
"BriefDescription": "L1D L2D Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the Level-2 Data cache"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the Level-2 Data cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "134",
"EventName": "ITLB2_WRITES",
"BriefDescription": "ITLB2 Writes",
- "PublicDescription": "A translation entry has been written into the Translation Lookaside Buffer 2 (TLB2) and the request was made by the instruction cache"
+ "PublicDescription": "A translation entry has been written into the Translation Lookaside Buffer 2 (TLB2) and the request was made by the instruction cache. This is a replacement for what was provided for the ITLB on prior machines."
},
{
"Unit": "CPU-M-CF",
"EventCode": "135",
"EventName": "ITLB2_MISSES",
"BriefDescription": "ITLB2 Misses",
- "PublicDescription": "A TLB2 miss is in progress for a request made by the instruction cache. Incremented by one for every TLB2 miss in progress for the Level-1 Instruction cache in a cycle"
+ "PublicDescription": "A TLB2 miss is in progress for a request made by the instruction cache. Incremented by one for every TLB2 miss in progress for the Level-1 Instruction cache in a cycle. This is a replacement for what was provided for the ITLB on prior machines."
},
{
"Unit": "CPU-M-CF",
"EventCode": "136",
"EventName": "L1I_L2I_SOURCED_WRITES",
"BriefDescription": "L1I L2I Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from the Level-2 Instruction cache"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from the Level-2 Instruction cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "137",
"EventName": "TLB2_PTE_WRITES",
"BriefDescription": "TLB2 PTE Writes",
- "PublicDescription": "A translation entry was written into the Page Table Entry array in the Level-2 TLB"
+ "PublicDescription": "A translation entry was written into the Page Table Entry array in the Level-2 TLB."
},
{
"Unit": "CPU-M-CF",
"EventCode": "138",
"EventName": "TLB2_CRSTE_WRITES",
"BriefDescription": "TLB2 CRSTE Writes",
- "PublicDescription": "Translation entries were written into the Combined Region and Segment Table Entry array and the Page Table Entry array in the Level-2 TLB"
+ "PublicDescription": "Translation entries were written into the Combined Region and Segment Table Entry array and the Page Table Entry array in the Level-2 TLB."
},
{
"Unit": "CPU-M-CF",
"EventCode": "139",
"EventName": "TLB2_ENGINES_BUSY",
"BriefDescription": "TLB2 Engines Busy",
- "PublicDescription": "The number of Level-2 TLB translation engines busy in a cycle"
+ "PublicDescription": "The number of Level-2 TLB translation engines busy in a cycle."
},
{
"Unit": "CPU-M-CF",
"EventCode": "140",
"EventName": "TX_C_TEND",
"BriefDescription": "Completed TEND instructions in constrained TX mode",
- "PublicDescription": "A TEND instruction has completed in a constrained transactional-execution mode"
+ "PublicDescription": "A TEND instruction has completed in a constrained transactional-execution mode."
},
{
"Unit": "CPU-M-CF",
"EventCode": "141",
"EventName": "TX_NC_TEND",
"BriefDescription": "Completed TEND instructions in non-constrained TX mode",
- "PublicDescription": "A TEND instruction has completed in a non-constrained transactional-execution mode"
+ "PublicDescription": "A TEND instruction has completed in a non-constrained transactional-execution mode."
},
{
"Unit": "CPU-M-CF",
"EventCode": "143",
"EventName": "L1C_TLB2_MISSES",
"BriefDescription": "L1C TLB2 Misses",
- "PublicDescription": "Increments by one for any cycle where a level-1 cache or level-2 TLB miss is in progress"
+ "PublicDescription": "Increments by one for any cycle where a level-1 cache or level-2 TLB miss is in progress."
},
{
"Unit": "CPU-M-CF",
"EventCode": "144",
"EventName": "L1D_ONCHIP_L3_SOURCED_WRITES",
"BriefDescription": "L1D On-Chip L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "145",
"EventName": "L1D_ONCHIP_MEMORY_SOURCED_WRITES",
"BriefDescription": "L1D On-Chip Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Chip memory"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Chip memory."
},
{
"Unit": "CPU-M-CF",
"EventCode": "146",
"EventName": "L1D_ONCHIP_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1D On-Chip L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "147",
"EventName": "L1D_ONCLUSTER_L3_SOURCED_WRITES",
"BriefDescription": "L1D On-Cluster L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Cluster Level-3 cache withountervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Cluster Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "148",
"EventName": "L1D_ONCLUSTER_MEMORY_SOURCED_WRITES",
"BriefDescription": "L1D On-Cluster Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Cluster memory"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Cluster memory."
},
{
"Unit": "CPU-M-CF",
"EventCode": "149",
"EventName": "L1D_ONCLUSTER_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1D On-Cluster L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Cluster Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Cluster Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "150",
"EventName": "L1D_OFFCLUSTER_L3_SOURCED_WRITES",
"BriefDescription": "L1D Off-Cluster L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "151",
"EventName": "L1D_OFFCLUSTER_MEMORY_SOURCED_WRITES",
"BriefDescription": "L1D Off-Cluster Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Cluster memory"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Cluster memory."
},
{
"Unit": "CPU-M-CF",
"EventCode": "152",
"EventName": "L1D_OFFCLUSTER_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1D Off-Cluster L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "153",
"EventName": "L1D_OFFDRAWER_L3_SOURCED_WRITES",
"BriefDescription": "L1D Off-Drawer L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "154",
"EventName": "L1D_OFFDRAWER_MEMORY_SOURCED_WRITES",
"BriefDescription": "L1D Off-Drawer Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Drawer memory"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Drawer memory."
},
{
"Unit": "CPU-M-CF",
"EventCode": "155",
"EventName": "L1D_OFFDRAWER_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1D Off-Drawer L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "156",
"EventName": "L1D_ONDRAWER_L4_SOURCED_WRITES",
"BriefDescription": "L1D On-Drawer L4 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Drawer Level-4 cache"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Drawer Level-4 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "157",
"EventName": "L1D_OFFDRAWER_L4_SOURCED_WRITES",
"BriefDescription": "L1D Off-Drawer L4 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Drawer Level-4 cache"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Drawer Level-4 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "158",
"EventName": "L1D_ONCHIP_L3_SOURCED_WRITES_RO",
"BriefDescription": "L1D On-Chip L3 Sourced Writes read-only",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Chip L3 but a read-only invalidate was done to remove other copies of the cache line"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Chip L3 but a read-only invalidate was done to remove other copies of the cache line."
},
{
"Unit": "CPU-M-CF",
"EventCode": "162",
"EventName": "L1I_ONCHIP_L3_SOURCED_WRITES",
"BriefDescription": "L1I On-Chip L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache ine was sourced from an On-Chip Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache ine was sourced from an On-Chip Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "163",
"EventName": "L1I_ONCHIP_MEMORY_SOURCED_WRITES",
"BriefDescription": "L1I On-Chip Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache ine was sourced from On-Chip memory"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache ine was sourced from On-Chip memory."
},
{
"Unit": "CPU-M-CF",
"EventCode": "164",
"EventName": "L1I_ONCHIP_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1I On-Chip L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache ine was sourced from an On-Chip Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache ine was sourced from an On-Chip Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "165",
"EventName": "L1I_ONCLUSTER_L3_SOURCED_WRITES",
"BriefDescription": "L1I On-Cluster L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Cluster Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Cluster Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "166",
"EventName": "L1I_ONCLUSTER_MEMORY_SOURCED_WRITES",
"BriefDescription": "L1I On-Cluster Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Cluster memory"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Cluster memory."
},
{
"Unit": "CPU-M-CF",
"EventCode": "167",
"EventName": "L1I_ONCLUSTER_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1I On-Cluster L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Cluster Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Cluster Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "168",
"EventName": "L1I_OFFCLUSTER_L3_SOURCED_WRITES",
"BriefDescription": "L1I Off-Cluster L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "169",
"EventName": "L1I_OFFCLUSTER_MEMORY_SOURCED_WRITES",
"BriefDescription": "L1I Off-Cluster Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Cluster memory"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Cluster memory."
},
{
"Unit": "CPU-M-CF",
"EventCode": "170",
"EventName": "L1I_OFFCLUSTER_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1I Off-Cluster L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "171",
"EventName": "L1I_OFFDRAWER_L3_SOURCED_WRITES",
"BriefDescription": "L1I Off-Drawer L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "172",
"EventName": "L1I_OFFDRAWER_MEMORY_SOURCED_WRITES",
"BriefDescription": "L1I Off-Drawer Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Drawer memory"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Drawer memory."
},
{
"Unit": "CPU-M-CF",
"EventCode": "173",
"EventName": "L1I_OFFDRAWER_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1I Off-Drawer L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "174",
"EventName": "L1I_ONDRAWER_L4_SOURCED_WRITES",
"BriefDescription": "L1I On-Drawer L4 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Drawer Level-4 cache"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Drawer Level-4 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "175",
"EventName": "L1I_OFFDRAWER_L4_SOURCED_WRITES",
"BriefDescription": "L1I Off-Drawer L4 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Drawer Level-4 cache"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Drawer Level-4 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "224",
"EventName": "BCD_DFP_EXECUTION_SLOTS",
"BriefDescription": "BCD DFP Execution Slots",
- "PublicDescription": "Count of floating point execution slots used for finished Binary Coded Decimal to Decimal Floating Point conversions. Instructions: CDZT, CXZT, CZDT, CZXT"
+ "PublicDescription": "Count of floating point execution slots used for finished Binary Coded Decimal to Decimal Floating Point conversions. Instructions: CDZT, CXZT, CZDT, CZXT."
},
{
"Unit": "CPU-M-CF",
"EventCode": "225",
"EventName": "VX_BCD_EXECUTION_SLOTS",
"BriefDescription": "VX BCD Execution Slots",
- "PublicDescription": "Count of floating point execution slots used for finished vector arithmetic Binary Coded Decimal instructions. Instructions: VAP, VSP, VMPVMSP, VDP, VSDP, VRP, VLIP, VSRP, VPSOPVCP, VTP, VPKZ, VUPKZ, VCVB, VCVBG, VCVDVCVDG"
+ "PublicDescription": "Count of floating point execution slots used for finished vector arithmetic Binary Coded Decimal instructions. Instructions: VAP, VSP, VMPVMSP, VDP, VSDP, VRP, VLIP, VSRP, VPSOPVCP, VTP, VPKZ, VUPKZ, VCVB, VCVBG, VCVDVCVDG."
},
{
"Unit": "CPU-M-CF",
"EventCode": "226",
"EventName": "DECIMAL_INSTRUCTIONS",
"BriefDescription": "Decimal Instructions",
- "PublicDescription": "Decimal instructions dispatched. Instructions: CVB, CVD, AP, CP, DP, ED, EDMK, MP, SRP, SP, ZAP"
+ "PublicDescription": "Decimal instructions dispatched. Instructions: CVB, CVD, AP, CP, DP, ED, EDMK, MP, SRP, SP, ZAP."
},
{
"Unit": "CPU-M-CF",
"EventCode": "232",
"EventName": "LAST_HOST_TRANSLATIONS",
"BriefDescription": "Last host translation done",
- "PublicDescription": "Last Host Translation done"
+ "PublicDescription": "Last Host Translation done."
},
{
"Unit": "CPU-M-CF",
"EventCode": "243",
"EventName": "TX_NC_TABORT",
"BriefDescription": "Aborted transactions in non-constrained TX mode",
- "PublicDescription": "A transaction abort has occurred in a non-constrained transactional-execution mode"
+ "PublicDescription": "A transaction abort has occurred in a non-constrained transactional-execution mode."
},
{
"Unit": "CPU-M-CF",
"EventCode": "244",
"EventName": "TX_C_TABORT_NO_SPECIAL",
"BriefDescription": "Aborted transactions in constrained TX mode not using special completion logic",
- "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is not using any special logic to allow the transaction to complete"
+ "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is not using any special logic to allow the transaction to complete."
},
{
"Unit": "CPU-M-CF",
"EventCode": "245",
"EventName": "TX_C_TABORT_SPECIAL",
"BriefDescription": "Aborted transactions in constrained TX mode using special completion logic",
- "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is using special logic to allow the transaction to complete"
+ "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is using special logic to allow the transaction to complete."
},
{
"Unit": "CPU-M-CF",
@@ -374,15 +374,15 @@
"Unit": "CPU-M-CF",
"EventCode": "264",
"EventName": "DFLT_CC",
- "BriefDescription": "Increments by one for every DEFLATE CONVERSION CALL instruction executed",
+ "BriefDescription": "Increments DEFLATE CONVERSION CALL",
"PublicDescription": "Increments by one for every DEFLATE CONVERSION CALL instruction executed"
},
{
"Unit": "CPU-M-CF",
"EventCode": "265",
"EventName": "DFLT_CCFINISH",
- "BriefDescription": "Increments by one for every DEFLATE CONVERSION CALL instruction executed that ended in Condition Codes 0, 1 or 2",
- "PublicDescription": "Increments by one for every DEFLATE CONVERSION CALL instruction executed that ended in Condition Codes 0, 1 or 2"
+ "BriefDescription": "Increments completed DEFLATE CONVERSION CALL",
+ "PublicDescription": " Increments by one for every DEFLATE CONVERSION CALL instruction executed that ended in Condition Codes 0, 1 or 2 complete. "
},
{
"Unit": "CPU-M-CF",
diff --git a/tools/perf/pmu-events/arch/s390/cf_z16/basic.json b/tools/perf/pmu-events/arch/s390/cf_z16/basic.json
new file mode 100644
index 000000000000..1023d47028ce
--- /dev/null
+++ b/tools/perf/pmu-events/arch/s390/cf_z16/basic.json
@@ -0,0 +1,58 @@
+[
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "0",
+ "EventName": "CPU_CYCLES",
+ "BriefDescription": "Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles, excluding the number of cycles while the CPU is in the wait state."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "1",
+ "EventName": "INSTRUCTIONS",
+ "BriefDescription": "Instruction Count",
+ "PublicDescription": "This counter counts the total number of instructions executed by the CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "2",
+ "EventName": "L1I_DIR_WRITES",
+ "BriefDescription": "Level-1 I-Cache Directory Write Count",
+ "PublicDescription": "This counter counts the total number of level-1 instruction-cache or unified-cache directory writes."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "3",
+ "EventName": "L1I_PENALTY_CYCLES",
+ "BriefDescription": "Level-1 I-Cache Penalty Cycle Count",
+ "PublicDescription": "This counter counts the total number of cache penalty cycles for level-1 instruction cache or unified cache."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "4",
+ "EventName": "L1D_DIR_WRITES",
+ "BriefDescription": "Level-1 D-Cache Directory Write Count",
+ "PublicDescription": "This counter counts the total number of level-1 data-cache directory writes."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "5",
+ "EventName": "L1D_PENALTY_CYCLES",
+ "BriefDescription": "Level-1 D-Cache Penalty Cycle Count",
+ "PublicDescription": "This counter counts the total number of cache penalty cycles for level-1 data cache."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "32",
+ "EventName": "PROBLEM_STATE_CPU_CYCLES",
+ "BriefDescription": "Problem-State Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the CPU is in the problem state, excluding the number of cycles while the CPU is in the wait state."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "33",
+ "EventName": "PROBLEM_STATE_INSTRUCTIONS",
+ "BriefDescription": "Problem-State Instruction Count",
+ "PublicDescription": "This counter counts the total number of instructions executed by the CPU while in the problem state."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z16/crypto6.json b/tools/perf/pmu-events/arch/s390/cf_z16/crypto6.json
new file mode 100644
index 000000000000..8b4380b8e489
--- /dev/null
+++ b/tools/perf/pmu-events/arch/s390/cf_z16/crypto6.json
@@ -0,0 +1,142 @@
+[
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "64",
+ "EventName": "PRNG_FUNCTIONS",
+ "BriefDescription": "PRNG Function Count",
+ "PublicDescription": "This counter counts the total number of the pseudorandom-number-generation functions issued by the CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "65",
+ "EventName": "PRNG_CYCLES",
+ "BriefDescription": "PRNG Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES/SHA coprocessor is busy performing the pseudorandom- number-generation functions issued by the CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "66",
+ "EventName": "PRNG_BLOCKED_FUNCTIONS",
+ "BriefDescription": "PRNG Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the pseudorandom-number-generation functions that are issued by the CPU and are blocked because the DEA/AES/SHA coprocessor is busy performing a function issued by another CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "67",
+ "EventName": "PRNG_BLOCKED_CYCLES",
+ "BriefDescription": "PRNG Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the pseudorandom-number-generation functions issued by the CPU because the DEA/AES/SHA coprocessor is busy performing a function issued by another CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "68",
+ "EventName": "SHA_FUNCTIONS",
+ "BriefDescription": "SHA Function Count",
+ "PublicDescription": "This counter counts the total number of the SHA functions issued by the CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "69",
+ "EventName": "SHA_CYCLES",
+ "BriefDescription": "SHA Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "70",
+ "EventName": "SHA_BLOCKED_FUNCTIONS",
+ "BriefDescription": "SHA Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "71",
+ "EventName": "SHA_BLOCKED_CYCLES",
+ "BriefDescription": "SHA Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "72",
+ "EventName": "DEA_FUNCTIONS",
+ "BriefDescription": "DEA Function Count",
+ "PublicDescription": "This counter counts the total number of the DEA functions issued by the CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "73",
+ "EventName": "DEA_CYCLES",
+ "BriefDescription": "DEA Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "74",
+ "EventName": "DEA_BLOCKED_FUNCTIONS",
+ "BriefDescription": "DEA Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "75",
+ "EventName": "DEA_BLOCKED_CYCLES",
+ "BriefDescription": "DEA Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "76",
+ "EventName": "AES_FUNCTIONS",
+ "BriefDescription": "AES Function Count",
+ "PublicDescription": "This counter counts the total number of the AES functions issued by the CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "77",
+ "EventName": "AES_CYCLES",
+ "BriefDescription": "AES Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "78",
+ "EventName": "AES_BLOCKED_FUNCTIONS",
+ "BriefDescription": "AES Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "79",
+ "EventName": "AES_BLOCKED_CYCLES",
+ "BriefDescription": "AES Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the AES functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "80",
+ "EventName": "ECC_FUNCTION_COUNT",
+ "BriefDescription": "ECC Function Count",
+ "PublicDescription": "This counter counts the total number of the elliptic-curve cryptography (ECC) functions issued by the CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "81",
+ "EventName": "ECC_CYCLES_COUNT",
+ "BriefDescription": "ECC Cycles Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the ECC coprocessor is busy performing the elliptic-curve cryptography (ECC) functions issued by the CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "82",
+ "EventName": "ECC_BLOCKED_FUNCTION_COUNT",
+ "BriefDescription": "Ecc Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the elliptic-curve cryptography (ECC) functions that are issued by the CPU and are blocked because the ECC coprocessor is busy performing a function issued by another CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "83",
+ "EventName": "ECC_BLOCKED_CYCLES_COUNT",
+ "BriefDescription": "ECC Blocked Cycles Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the elliptic-curve cryptography (ECC) functions issued by the CPU because the ECC coprocessor is busy performing a function issued by another CPU."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z16/extended.json b/tools/perf/pmu-events/arch/s390/cf_z16/extended.json
new file mode 100644
index 000000000000..c306190fc06f
--- /dev/null
+++ b/tools/perf/pmu-events/arch/s390/cf_z16/extended.json
@@ -0,0 +1,492 @@
+[
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "128",
+ "EventName": "L1D_RO_EXCL_WRITES",
+ "BriefDescription": "L1D Read-only Exclusive Writes",
+ "PublicDescription": "A directory write to the Level-1 Data cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "129",
+ "EventName": "DTLB2_WRITES",
+ "BriefDescription": "DTLB2 Writes",
+ "PublicDescription": "A translation has been written into The Translation Lookaside Buffer 2 (TLB2) and the request was made by the Level-1 Data cache. This is a replacement for what was provided for the DTLB on z13 and prior machines."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "130",
+ "EventName": "DTLB2_MISSES",
+ "BriefDescription": "DTLB2 Misses",
+ "PublicDescription": "A TLB2 miss is in progress for a request made by the Level-1 Data cache. Incremented by one for every TLB2 miss in progress for the Level-1 Data cache on this cycle. This is a replacement for what was provided for the DTLB on z13 and prior machines."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "131",
+ "EventName": "CRSTE_1MB_WRITES",
+ "BriefDescription": "One Megabyte CRSTE writes",
+ "PublicDescription": "A translation entry was written into the Combined Region and Segment Table Entry array in the Level-2 TLB for a one-megabyte page."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "132",
+ "EventName": "DTLB2_GPAGE_WRITES",
+ "BriefDescription": "DTLB2 Two-Gigabyte Page Writes",
+ "PublicDescription": "A translation entry for a two-gigabyte page was written into the Level-2 TLB."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "134",
+ "EventName": "ITLB2_WRITES",
+ "BriefDescription": "ITLB2 Writes",
+ "PublicDescription": "A translation entry has been written into the Translation Lookaside Buffer 2 (TLB2) and the request was made by the instruction cache. This is a replacement for what was provided for the ITLB on z13 and prior machines."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "135",
+ "EventName": "ITLB2_MISSES",
+ "BriefDescription": "ITLB2 Misses",
+ "PublicDescription": "A TLB2 miss is in progress for a request made by the Level-1 Instruction cache. Incremented by one for every TLB2 miss in progress for the Level-1 Instruction cache in a cycle. This is a replacement for what was provided for the ITLB on z13 and prior machines."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "137",
+ "EventName": "TLB2_PTE_WRITES",
+ "BriefDescription": "TLB2 Page Table Entry Writes",
+ "PublicDescription": "A translation entry was written into the Page Table Entry array in the Level-2 TLB."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "138",
+ "EventName": "TLB2_CRSTE_WRITES",
+ "BriefDescription": "TLB2 Combined Region and Segment Entry Writes",
+ "PublicDescription": "Translation entries were written into the Combined Region and Segment Table Entry array and the Page Table Entry array in the Level-2 TLB."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "139",
+ "EventName": "TLB2_ENGINES_BUSY",
+ "BriefDescription": "TLB2 Engines Busy",
+ "PublicDescription": "The number of Level-2 TLB translation engines busy in a cycle."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "140",
+ "EventName": "TX_C_TEND",
+ "BriefDescription": "Completed TEND instructions in constrained TX mode",
+ "PublicDescription": "A TEND instruction has completed in a constrained transactional-execution mode."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "141",
+ "EventName": "TX_NC_TEND",
+ "BriefDescription": "Completed TEND instructions in non-constrained TX mode",
+ "PublicDescription": "A TEND instruction has completed in a non-constrained transactional-execution mode."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "143",
+ "EventName": "L1C_TLB2_MISSES",
+ "BriefDescription": "L1C TLB2 Misses",
+ "PublicDescription": "Increments by one for any cycle where a level-1 cache or level-2 TLB miss is in progress."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "145",
+ "EventName": "DCW_REQ",
+ "BriefDescription": "Directory Write Level 1 Data Cache from Cache",
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the requestor’s Level-2 cache."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "146",
+ "EventName": "DCW_REQ_IV",
+ "BriefDescription": "Directory Write Level 1 Data Cache from Cache with Intervention",
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the requestor’s Level-2 cache with intervention."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "147",
+ "EventName": "DCW_REQ_CHIP_HIT",
+ "BriefDescription": "Directory Write Level 1 Data Cache from Cache with Chip HP Hit",
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the requestor’s Level-2 cache after using chip level horizontal persistence, Chip-HP hit."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "148",
+ "EventName": "DCW_REQ_DRAWER_HIT",
+ "BriefDescription": "Directory Write Level 1 Data Cache from Cache with Drawer HP Hit",
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the requestor’s Level-2 cache after using drawer level horizontal persistence, Drawer-HP hit."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "149",
+ "EventName": "DCW_ON_CHIP",
+ "BriefDescription": "Directory Write Level 1 Data Cache from On-Chip Cache",
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-2 cache."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "150",
+ "EventName": "DCW_ON_CHIP_IV",
+ "BriefDescription": "Directory Write Level 1 Data Cache from On-Chip Cache with Intervention",
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-2 cache with intervention."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "151",
+ "EventName": "DCW_ON_CHIP_CHIP_HIT",
+ "BriefDescription": "Directory Write Level 1 Data Cache from On-Chip Cache with Chip HP Hit",
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-2 cache after using chip level horizontal persistence, Chip-HP hit."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "152",
+ "EventName": "DCW_ON_CHIP_DRAWER_HIT",
+ "BriefDescription": "Directory Write Level 1 Data Cache from On-Chip Cache with Drawer HP Hit",
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-2 cache using drawer level horizontal persistence, Drawer-HP hit."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "153",
+ "EventName": "DCW_ON_MODULE",
+ "BriefDescription": "Directory Write Level 1 Data Cache from On-Module Cache",
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Module Level-2 cache."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "154",
+ "EventName": "DCW_ON_DRAWER",
+ "BriefDescription": "Directory Write Level 1 Data Cache from On-Drawer Cache",
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Drawer Level-2 cache."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "155",
+ "EventName": "DCW_OFF_DRAWER",
+ "BriefDescription": "Directory Write Level 1 Data Cache from Off-Drawer Cache",
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Level-2 cache."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "156",
+ "EventName": "DCW_ON_CHIP_MEMORY",
+ "BriefDescription": "Directory Write Level 1 Data Cache from On-Chip Memory",
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Chip memory."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "157",
+ "EventName": "DCW_ON_MODULE_MEMORY",
+ "BriefDescription": "Directory Write Level 1 Data Cache from On-Module Memory",
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Module memory."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "158",
+ "EventName": "DCW_ON_DRAWER_MEMORY",
+ "BriefDescription": "Directory Write Level 1 Data Cache from On-Drawer Memory",
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Drawer memory."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "159",
+ "EventName": "DCW_OFF_DRAWER_MEMORY",
+ "BriefDescription": "Directory Write Level 1 Data Cache from Off-Drawer Memory",
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Drawer memory."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "160",
+ "EventName": "IDCW_ON_MODULE_IV",
+ "BriefDescription": "Directory Write Level 1 Instruction and Data Cache from On-Module Memory Cache with Intervention",
+ "PublicDescription": "A directory write to the Level-1 Data or Level-1 Instruction cache directory where the returned cache line was sourced from an On-Module Level-2 cache with intervention."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "161",
+ "EventName": "IDCW_ON_MODULE_CHIP_HIT",
+ "BriefDescription": "Directory Write Level 1 Instruction and Data Cache from On-Module Memory Cache with Chip Hit",
+ "PublicDescription": "A directory write to the Level-1 Data or Level-1 Instruction cache directory where the returned cache line was sourced from an On-Module Level-2 cache using chip horizontal persistence, Chip-HP hit."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "162",
+ "EventName": "IDCW_ON_MODULE_DRAWER_HIT",
+ "BriefDescription": "Directory Write Level 1 Instruction and Data Cache from On-Module Memory Cache with Drawer Hit",
+ "PublicDescription": "A directory write to the Level-1 Data or Level-1 Instruction cache directory where the returned cache line was sourced from an On-Module Level-2 cache using drawer level horizontal persistence, Drawer-HP hit."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "163",
+ "EventName": "IDCW_ON_DRAWER_IV",
+ "BriefDescription": "Directory Write Level 1 Instruction and Data Cache from On-Drawer Cache with Intervention",
+ "PublicDescription": "A directory write to the Level-1 Data or Level-1 Instruction cache directory where the returned cache line was sourced from an On-Drawer Level-2 cache with intervention."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "164",
+ "EventName": "IDCW_ON_DRAWER_CHIP_HIT",
+ "BriefDescription": "Directory Write Level 1 Instruction and Data Cache from On-Drawer Cache with Chip Hit",
+ "PublicDescription": "A directory write to the Level-1 Data or Level-1 instruction cache directory where the returned cache line was sourced from an On-Drawer Level-2 cache using chip level horizontal persistence, Chip-HP hit."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "165",
+ "EventName": "IDCW_ON_DRAWER_DRAWER_HIT",
+ "BriefDescription": "Directory Write Level 1 Instruction and Data Cache from On-Drawer Cache with Drawer Hit",
+ "PublicDescription": "A directory write to the Level-1 Data or Level-1 instruction cache directory where the returned cache line was sourced from an On-Drawer Level-2 cache using drawer level horizontal persistence, Drawer-HP hit."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "166",
+ "EventName": "IDCW_OFF_DRAWER_IV",
+ "BriefDescription": "Directory Write Level 1 Instruction and Data Cache from Off-Drawer Cache with Intervention",
+ "PublicDescription": "A directory write to the Level-1 Data or Level-1 instruction cache directory where the returned cache line was sourced from an Off-Drawer Level-2 cache with intervention."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "167",
+ "EventName": "IDCW_OFF_DRAWER_CHIP_HIT",
+ "BriefDescription": "Directory Write Level 1 Instruction and Data Cache from Off-Drawer Cache with Chip Hit",
+ "PublicDescription": "A directory write to the Level-1 Data or Level-1 instruction cache directory where the returned cache line was sourced from an Off-Drawer Level-2 cache using chip level horizontal persistence, Chip-HP hit."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "168",
+ "EventName": "IDCW_OFF_DRAWER_DRAWER_HIT",
+ "BriefDescription": "Directory Write Level 1 Instruction and Data Cache from Off-Drawer Cache with Drawer Hit",
+ "PublicDescription": "A directory write to the Level-1 Data or Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Level-2 cache using drawer level horizontal persistence, Drawer-HP hit."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "169",
+ "EventName": "ICW_REQ",
+ "BriefDescription": "Directory Write Level 1 Instruction Cache from Cache",
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced the requestors Level-2 cache."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "170",
+ "EventName": "ICW_REQ_IV",
+ "BriefDescription": "Directory Write Level 1 Instruction Cache from Cache with Intervention",
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from the requestors Level-2 cache with intervention."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "171",
+ "EventName": "ICW_REQ_CHIP_HIT",
+ "BriefDescription": "Directory Write Level 1 Instruction Cache from Cache with Chip HP Hit",
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from the requestors Level-2 cache using chip level horizontal persistence, Chip-HP hit."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "172",
+ "EventName": "ICW_REQ_DRAWER_HIT",
+ "BriefDescription": "Directory Write Level 1 Instruction Cache from Cache with Drawer HP Hit",
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from the requestor’s Level-2 cache using drawer level horizontal persistence, Drawer-HP hit."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "173",
+ "EventName": "ICW_ON_CHIP",
+ "BriefDescription": "Directory Write Level 1 Instruction Cache from On-Chip Cache",
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Chip Level-2 cache."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "174",
+ "EventName": "ICW_ON_CHIP_IV",
+ "BriefDescription": "Directory Write Level 1 Instruction Cache from On-Chip Cache with Intervention",
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced an On-Chip Level-2 cache with intervention."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "175",
+ "EventName": "ICW_ON_CHIP_CHIP_HIT",
+ "BriefDescription": "Directory Write Level 1 Instruction Cache from On-Chip Cache with Chip HP Hit",
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Chip Level-2 cache using chip level horizontal persistence, Chip-HP hit."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "176",
+ "EventName": "ICW_ON_CHIP_DRAWER_HIT",
+ "BriefDescription": "Directory Write Level 1 Instruction Cache from On-Chip Cache with Drawer HP Hit",
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Chip level 2 cache using drawer level horizontal persistence, Drawer-HP hit."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "177",
+ "EventName": "ICW_ON_MODULE",
+ "BriefDescription": "Directory Write Level 1 Instruction Cache from On-Module Cache",
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Module Level-2 cache."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "178",
+ "EventName": "ICW_ON_DRAWER",
+ "BriefDescription": "Directory Write Level 1 Instruction Cache from On-Drawer Cache",
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced an On-Drawer Level-2 cache."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "179",
+ "EventName": "ICW_OFF_DRAWER",
+ "BriefDescription": "Directory Write Level 1 Instruction Cache from Off-Drawer Cache",
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced an Off-Drawer Level-2 cache."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "180",
+ "EventName": "ICW_ON_CHIP_MEMORY",
+ "BriefDescription": "Directory Write Level 1 Instruction Cache from On-Chip Memory",
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Chip memory."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "181",
+ "EventName": "ICW_ON_MODULE_MEMORY",
+ "BriefDescription": "Directory Write Level 1 Instruction Cache from On-Module Memory",
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Module memory."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "182",
+ "EventName": "ICW_ON_DRAWER_MEMORY",
+ "BriefDescription": "Directory Write Level 1 Instruction Cache from On-Drawer Memory",
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Drawer memory."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "183",
+ "EventName": "ICW_OFF_DRAWER_MEMORY",
+ "BriefDescription": "Directory Write Level 1 Instruction Cache from Off-Drawer Memory",
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Drawer memory."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "224",
+ "EventName": "BCD_DFP_EXECUTION_SLOTS",
+ "BriefDescription": "Binary Coded Decimal to Decimal Floating Point conversions",
+ "PublicDescription": "Count of floating point execution slots used for finished Binary Coded Decimal to Decimal Floating Point conversions. Instructions: CDZT, CXZT, CZDT, CZXT."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "225",
+ "EventName": "VX_BCD_EXECUTION_SLOTS",
+ "BriefDescription": "Count finished vector arithmetic Binary Coded Decimal instructions",
+ "PublicDescription": "Count of floating point execution slots used for finished vector arithmetic Binary Coded Decimal instructions. Instructions: VAP, VSP, VMP, VMSP, VDP, VSDP, VRP, VLIP, VSRP, VPSOP, VCP, VTP, VPKZ, VUPKZ, VCVB, VCVBG, VCVD, VCVDG."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "226",
+ "EventName": "DECIMAL_INSTRUCTIONS",
+ "BriefDescription": "Decimal instruction dispatched",
+ "PublicDescription": "Decimal instruction dispatched. Instructions: CVB, CVD, AP, CP, DP, ED, EDMK, MP, SRP, SP, ZAP."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "232",
+ "EventName": "LAST_HOST_TRANSLATIONS",
+ "BriefDescription": "Last host translation done",
+ "PublicDescription": "Last Host Translation done"
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "244",
+ "EventName": "TX_NC_TABORT",
+ "BriefDescription": "Aborted transactions in unconstrained TX mode",
+ "PublicDescription": "A transaction abort has occurred in a non-constrained transactional-execution mode."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "245",
+ "EventName": "TX_C_TABORT_NO_SPECIAL",
+ "BriefDescription": "Aborted transactions in constrained TX mode",
+ "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is not using any special logic to allow the transaction to complete."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "246",
+ "EventName": "TX_C_TABORT_SPECIAL",
+ "BriefDescription": "Aborted transactions in constrained TX mode using special completion logic",
+ "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is using special logic to allow the transaction to complete."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "248",
+ "EventName": "DFLT_ACCESS",
+ "BriefDescription": "Cycles CPU spent obtaining access to Deflate unit",
+ "PublicDescription": "Cycles CPU spent obtaining access to Deflate unit"
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "253",
+ "EventName": "DFLT_CYCLES",
+ "BriefDescription": "Cycles CPU is using Deflate unit",
+ "PublicDescription": "Cycles CPU is using Deflate unit"
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "256",
+ "EventName": "SORTL",
+ "BriefDescription": "Count SORTL instructions",
+ "PublicDescription": "Increments by one for every SORT LISTS instruction executed."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "265",
+ "EventName": "DFLT_CC",
+ "BriefDescription": "Increments DEFLATE CONVERSION CALL",
+ "PublicDescription": "Increments by one for every DEFLATE CONVERSION CALL instruction executed."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "266",
+ "EventName": "DFLT_CCFINISH",
+ "BriefDescription": "Increments completed DEFLATE CONVERSION CALL",
+ "PublicDescription": "Increments by one for every DEFLATE CONVERSION CALL instruction executed that ended in Condition Codes 0, 1 or 2."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "267",
+ "EventName": "NNPA_INVOCATIONS",
+ "BriefDescription": "NNPA Total invocations",
+ "PublicDescription": "Increments by one for every Neural Network Processing Assist instruction executed."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "268",
+ "EventName": "NNPA_COMPLETIONS",
+ "BriefDescription": "NNPA Total completions",
+ "PublicDescription": "Increments by one for every Neural Network Processing Assist instruction executed that ended in Condition Codes 0, 1 or 2."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "269",
+ "EventName": "NNPA_WAIT_LOCK",
+ "BriefDescription": "Cycles spent obtaining NNPA lock",
+ "PublicDescription": "Cycles CPU spent obtaining access to IBM Z Integrated Accelerator for AI."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "270",
+ "EventName": "NNPA_HOLD_LOCK",
+ "BriefDescription": "Cycles spent holding NNPA lock",
+ "PublicDescription": "Cycles CPU is using IBM Z Integrated Accelerator for AI."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "448",
+ "EventName": "MT_DIAG_CYCLES_ONE_THR_ACTIVE",
+ "BriefDescription": "Cycle count with one thread active",
+ "PublicDescription": "Cycle count with one thread active"
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "449",
+ "EventName": "MT_DIAG_CYCLES_TWO_THR_ACTIVE",
+ "BriefDescription": "Cycle count with two threads active",
+ "PublicDescription": "Cycle count with two threads active"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z16/transaction.json b/tools/perf/pmu-events/arch/s390/cf_z16/transaction.json
new file mode 100644
index 000000000000..1a0034f79f73
--- /dev/null
+++ b/tools/perf/pmu-events/arch/s390/cf_z16/transaction.json
@@ -0,0 +1,7 @@
+[
+ {
+ "BriefDescription": "Transaction count",
+ "MetricName": "transaction",
+ "MetricExpr": "TX_C_TEND + TX_NC_TEND + TX_NC_TABORT + TX_C_TABORT_SPECIAL + TX_C_TABORT_NO_SPECIAL"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z196/basic.json b/tools/perf/pmu-events/arch/s390/cf_z196/basic.json
index 783de7f1aeaa..9bd20a5f47af 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z196/basic.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z196/basic.json
@@ -3,84 +3,84 @@
"Unit": "CPU-M-CF",
"EventCode": "0",
"EventName": "CPU_CYCLES",
- "BriefDescription": "CPU Cycles",
- "PublicDescription": "Cycle Count"
+ "BriefDescription": "Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles, excluding the number of cycles while the CPU is in the wait state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "1",
"EventName": "INSTRUCTIONS",
- "BriefDescription": "Instructions",
- "PublicDescription": "Instruction Count"
+ "BriefDescription": "Instruction Count",
+ "PublicDescription": "This counter counts the total number of instructions executed by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "2",
"EventName": "L1I_DIR_WRITES",
- "BriefDescription": "L1I Directory Writes",
- "PublicDescription": "Level-1 I-Cache Directory Write Count"
+ "BriefDescription": "Level-1 I-Cache Directory Write Count",
+ "PublicDescription": "This counter counts the total number of level-1 instruction-cache or unified-cache directory writes."
},
{
"Unit": "CPU-M-CF",
"EventCode": "3",
"EventName": "L1I_PENALTY_CYCLES",
- "BriefDescription": "L1I Penalty Cycles",
- "PublicDescription": "Level-1 I-Cache Penalty Cycle Count"
+ "BriefDescription": "Level-1 I-Cache Penalty Cycle Count",
+ "PublicDescription": "This counter counts the total number of cache penalty cycles for level-1 instruction cache or unified cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "4",
"EventName": "L1D_DIR_WRITES",
- "BriefDescription": "L1D Directory Writes",
- "PublicDescription": "Level-1 D-Cache Directory Write Count"
+ "BriefDescription": "Level-1 D-Cache Directory Write Count",
+ "PublicDescription": "This counter counts the total number of level-1 data-cache directory writes."
},
{
"Unit": "CPU-M-CF",
"EventCode": "5",
"EventName": "L1D_PENALTY_CYCLES",
- "BriefDescription": "L1D Penalty Cycles",
- "PublicDescription": "Level-1 D-Cache Penalty Cycle Count"
+ "BriefDescription": "Level-1 D-Cache Penalty Cycle Count",
+ "PublicDescription": "This counter counts the total number of cache penalty cycles for level-1 data cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "32",
"EventName": "PROBLEM_STATE_CPU_CYCLES",
- "BriefDescription": "Problem-State CPU Cycles",
- "PublicDescription": "Problem-State Cycle Count"
+ "BriefDescription": "Problem-State Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the CPU is in the problem state, excluding the number of cycles while the CPU is in the wait state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "33",
"EventName": "PROBLEM_STATE_INSTRUCTIONS",
- "BriefDescription": "Problem-State Instructions",
- "PublicDescription": "Problem-State Instruction Count"
+ "BriefDescription": "Problem-State Instruction Count",
+ "PublicDescription": "This counter counts the total number of instructions executed by the CPU while in the problem state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "34",
"EventName": "PROBLEM_STATE_L1I_DIR_WRITES",
- "BriefDescription": "Problem-State L1I Directory Writes",
- "PublicDescription": "Problem-State Level-1 I-Cache Directory Write Count"
+ "BriefDescription": "Problem-State Level-1 I-Cache Directory Write Count",
+ "PublicDescription": "This counter counts the total number of level-1 instruction-cache or unified-cache directory writes while the CPU is in the problem state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "35",
"EventName": "PROBLEM_STATE_L1I_PENALTY_CYCLES",
- "BriefDescription": "Problem-State L1I Penalty Cycles",
- "PublicDescription": "Problem-State Level-1 I-Cache Penalty Cycle Count"
+ "BriefDescription": "Level-1 I-Cache Penalty Cycle Count",
+ "PublicDescription": "This counter counts the total number of penalty cycles for level-1 instruction cache or unified cache while the CPU is in the problem state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "36",
"EventName": "PROBLEM_STATE_L1D_DIR_WRITES",
- "BriefDescription": "Problem-State L1D Directory Writes",
- "PublicDescription": "Problem-State Level-1 D-Cache Directory Write Count"
+ "BriefDescription": "Problem-State Level-1 D-Cache Directory Write Count",
+ "PublicDescription": "This counter counts the total number of level-1 data-cache directory writes while the CPU is in the problem state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "37",
"EventName": "PROBLEM_STATE_L1D_PENALTY_CYCLES",
- "BriefDescription": "Problem-State L1D Penalty Cycles",
- "PublicDescription": "Problem-State Level-1 D-Cache Penalty Cycle Count"
+ "BriefDescription": "Problem-State Level-1 D-Cache Penalty Cycle Count",
+ "PublicDescription": "This counter counts the total number of penalty cycles for level-1 data cache while the CPU is in the problem state."
}
]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z196/crypto.json b/tools/perf/pmu-events/arch/s390/cf_z196/crypto.json
index 3f28007d3892..a8d391ddeb8c 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z196/crypto.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z196/crypto.json
@@ -3,112 +3,112 @@
"Unit": "CPU-M-CF",
"EventCode": "64",
"EventName": "PRNG_FUNCTIONS",
- "BriefDescription": "PRNG Functions",
- "PublicDescription": "Total number of the PRNG functions issued by the CPU"
+ "BriefDescription": "PRNG Function Count",
+ "PublicDescription": "This counter counts the total number of the pseudorandom-number-generation functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "65",
"EventName": "PRNG_CYCLES",
- "BriefDescription": "PRNG Cycles",
- "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing PRNG functions issued by the CPU"
+ "BriefDescription": "PRNG Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES/SHA coprocessor is busy performing the pseudorandom- number-generation functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "66",
"EventName": "PRNG_BLOCKED_FUNCTIONS",
- "BriefDescription": "PRNG Blocked Functions",
- "PublicDescription": "Total number of the PRNG functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "PRNG Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the pseudorandom-number-generation functions that are issued by the CPU and are blocked because the DEA/AES/SHA coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "67",
"EventName": "PRNG_BLOCKED_CYCLES",
- "BriefDescription": "PRNG Blocked Cycles",
- "PublicDescription": "Total number of CPU cycles blocked for the PRNG functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "PRNG Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the pseudorandom-number-generation functions issued by the CPU because the DEA/AES/SHA coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "68",
"EventName": "SHA_FUNCTIONS",
- "BriefDescription": "SHA Functions",
- "PublicDescription": "Total number of SHA functions issued by the CPU"
+ "BriefDescription": "SHA Function Count",
+ "PublicDescription": "This counter counts the total number of the SHA functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "69",
"EventName": "SHA_CYCLES",
- "BriefDescription": "SHA Cycles",
- "PublicDescription": "Total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU"
+ "BriefDescription": "SHA Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "70",
"EventName": "SHA_BLOCKED_FUNCTIONS",
- "BriefDescription": "SHA Blocked Functions",
- "PublicDescription": "Total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "SHA Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "71",
"EventName": "SHA_BLOCKED_CYCLES",
- "BriefDescription": "SHA Bloced Cycles",
- "PublicDescription": "Total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "SHA Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "72",
"EventName": "DEA_FUNCTIONS",
- "BriefDescription": "DEA Functions",
- "PublicDescription": "Total number of the DEA functions issued by the CPU"
+ "BriefDescription": "DEA Function Count",
+ "PublicDescription": "This counter counts the total number of the DEA functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "73",
"EventName": "DEA_CYCLES",
- "BriefDescription": "DEA Cycles",
- "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU"
+ "BriefDescription": "DEA Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "74",
"EventName": "DEA_BLOCKED_FUNCTIONS",
- "BriefDescription": "DEA Blocked Functions",
- "PublicDescription": "Total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "DEA Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "75",
"EventName": "DEA_BLOCKED_CYCLES",
- "BriefDescription": "DEA Blocked Cycles",
- "PublicDescription": "Total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "DEA Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "76",
"EventName": "AES_FUNCTIONS",
- "BriefDescription": "AES Functions",
- "PublicDescription": "Total number of AES functions issued by the CPU"
+ "BriefDescription": "AES Function Count",
+ "PublicDescription": "This counter counts the total number of the AES functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "77",
"EventName": "AES_CYCLES",
- "BriefDescription": "AES Cycles",
- "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU"
+ "BriefDescription": "AES Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "78",
"EventName": "AES_BLOCKED_FUNCTIONS",
- "BriefDescription": "AES Blocked Functions",
- "PublicDescription": "Total number of AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "AES Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "79",
"EventName": "AES_BLOCKED_CYCLES",
- "BriefDescription": "AES Blocked Cycles",
- "PublicDescription": "Total number of CPU cycles blocked for the AES functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "AES Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the AES functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU."
}
]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z196/extended.json b/tools/perf/pmu-events/arch/s390/cf_z196/extended.json
index 86b29fd181cf..6ebbdbaf7951 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z196/extended.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z196/extended.json
@@ -4,14 +4,14 @@
"EventCode": "128",
"EventName": "L1D_L2_SOURCED_WRITES",
"BriefDescription": "L1D L2 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 D-Cache directory where the returned cache line was sourced from the Level-2 cache"
+ "PublicDescription": "A directory write to the Level-1 Data Cache directory where the returned cache line was sourced from the Level-2 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "129",
"EventName": "L1I_L2_SOURCED_WRITES",
"BriefDescription": "L1I L2 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 I-Cache directory where the returned cache line was sourced from the Level-2 cache"
+ "PublicDescription": "A directory write to the Level-1 Instruction Cache directory where the returned cache line was sourced from the Level-2 cache."
},
{
"Unit": "CPU-M-CF",
@@ -32,139 +32,139 @@
"EventCode": "133",
"EventName": "L2C_STORES_SENT",
"BriefDescription": "L2C Stores Sent",
- "PublicDescription": "Incremented by one for every store sent to Level-2 cache"
+ "PublicDescription": "Incremented by one for every store sent to Level-2 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "134",
"EventName": "L1D_OFFBOOK_L3_SOURCED_WRITES",
"BriefDescription": "L1D Off-Book L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 D-Cache directory where the returned cache line was sourced from an Off Book Level-3 cache"
+ "PublicDescription": "A directory write to the Level-1 Data Cache directory where the returned cache line was sourced from an Off Book Level-3 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "135",
"EventName": "L1D_ONBOOK_L4_SOURCED_WRITES",
"BriefDescription": "L1D On-Book L4 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 D-Cache directory where the returned cache line was sourced from an On Book Level-4 cache"
+ "PublicDescription": "A directory write to the Level-1 Data Cache directory where the returned cache line was sourced from an On Book Level-4 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "136",
"EventName": "L1I_ONBOOK_L4_SOURCED_WRITES",
"BriefDescription": "L1I On-Book L4 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 I-Cache directory where the returned cache line was sourced from an On Book Level-4 cache"
+ "PublicDescription": "A directory write to the Level-1 Instruction Cache directory where the returned cache line was sourced from an On Book Level-4 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "137",
"EventName": "L1D_RO_EXCL_WRITES",
"BriefDescription": "L1D Read-only Exclusive Writes",
- "PublicDescription": "A directory write to the Level-1 D-Cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line"
+ "PublicDescription": "A directory write to the Level-1 Data Cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line."
},
{
"Unit": "CPU-M-CF",
"EventCode": "138",
"EventName": "L1D_OFFBOOK_L4_SOURCED_WRITES",
"BriefDescription": "L1D Off-Book L4 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 D-Cache directory where the returned cache line was sourced from an Off Book Level-4 cache"
+ "PublicDescription": "A directory write to the Level-1 Data Cache directory where the returned cache line was sourced from an Off Book Level-4 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "139",
"EventName": "L1I_OFFBOOK_L4_SOURCED_WRITES",
"BriefDescription": "L1I Off-Book L4 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 I-Cache directory where the returned cache line was sourced from an Off Book Level-4 cache"
+ "PublicDescription": "A directory write to the Level-1 Instruction Cache directory where the returned cache line was sourced from an Off Book Level-4 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "140",
"EventName": "DTLB1_HPAGE_WRITES",
"BriefDescription": "DTLB1 One-Megabyte Page Writes",
- "PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer for a one-megabyte page"
+ "PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer for a one-megabyte page."
},
{
"Unit": "CPU-M-CF",
"EventCode": "141",
"EventName": "L1D_LMEM_SOURCED_WRITES",
"BriefDescription": "L1D Local Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 D-Cache where the installed cache line was sourced from memory that is attached to the same book as the Data cache (Local Memory)"
+ "PublicDescription": "A directory write to the Level-1 Data Cache where the installed cache line was sourced from memory that is attached to the same book as the Data cache (Local Memory)."
},
{
"Unit": "CPU-M-CF",
"EventCode": "142",
"EventName": "L1I_LMEM_SOURCED_WRITES",
"BriefDescription": "L1I Local Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 I-Cache where the installed cache line was sourced from memory that is attached to the same book as the Instruction cache (Local Memory)"
+ "PublicDescription": "A directory write to the Level-1 Instruction Cache where the installed cache line was sourced from memory that is attached to the same book as the Instruction cache (Local Memory)."
},
{
"Unit": "CPU-M-CF",
"EventCode": "143",
"EventName": "L1I_OFFBOOK_L3_SOURCED_WRITES",
"BriefDescription": "L1I Off-Book L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 I-Cache directory where the returned cache line was sourced from an Off Book Level-3 cache"
+ "PublicDescription": "A directory write to the Level-1 Instruction Cache directory where the returned cache line was sourced from an Off Book Level-3 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "144",
"EventName": "DTLB1_WRITES",
"BriefDescription": "DTLB1 Writes",
- "PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer"
+ "PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer (DTLB1)."
},
{
"Unit": "CPU-M-CF",
"EventCode": "145",
"EventName": "ITLB1_WRITES",
"BriefDescription": "ITLB1 Writes",
- "PublicDescription": "A translation entry has been written to the Level-1 Instruction Translation Lookaside Buffer"
+ "PublicDescription": "A translation entry has been written to the Level-1 Instruction Translation Lookaside Buffer (ITLB1)."
},
{
"Unit": "CPU-M-CF",
"EventCode": "146",
"EventName": "TLB2_PTE_WRITES",
"BriefDescription": "TLB2 PTE Writes",
- "PublicDescription": "A translation entry has been written to the Level-2 TLB Page Table Entry arrays"
+ "PublicDescription": "A translation entry has been written to the Level-2 TLB Page Table Entry arrays."
},
{
"Unit": "CPU-M-CF",
"EventCode": "147",
"EventName": "TLB2_CRSTE_HPAGE_WRITES",
"BriefDescription": "TLB2 CRSTE One-Megabyte Page Writes",
- "PublicDescription": "A translation entry has been written to the Level-2 TLB Common Region Segment Table Entry arrays for a one-megabyte large page translation"
+ "PublicDescription": "A translation entry has been written to the Level-2 TLB Common Region Segment Table Entry arrays for a one-megabyte large page translation."
},
{
"Unit": "CPU-M-CF",
"EventCode": "148",
"EventName": "TLB2_CRSTE_WRITES",
"BriefDescription": "TLB2 CRSTE Writes",
- "PublicDescription": "A translation entry has been written to the Level-2 TLB Common Region Segment Table Entry arrays"
+ "PublicDescription": "A translation entry has been written to the Level-2 TLB Common Region Segment Table Entry arrays."
},
{
"Unit": "CPU-M-CF",
"EventCode": "150",
"EventName": "L1D_ONCHIP_L3_SOURCED_WRITES",
"BriefDescription": "L1D On-Chip L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 D-Cache directory where the returned cache line was sourced from an On Chip Level-3 cache"
+ "PublicDescription": "A directory write to the Level-1 Data Cache directory where the returned cache line was sourced from an On Chip Level-3 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "152",
"EventName": "L1D_OFFCHIP_L3_SOURCED_WRITES",
"BriefDescription": "L1D Off-Chip L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 D-Cache directory where the returned cache line was sourced from an Off Chip/On Book Level-3 cache"
+ "PublicDescription": "A directory write to the Level-1 Data Cache directory where the returned cache line was sourced from an Off Chip/On Book Level-3 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "153",
"EventName": "L1I_ONCHIP_L3_SOURCED_WRITES",
"BriefDescription": "L1I On-Chip L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 I-Cache directory where the returned cache line was sourced from an On Chip Level-3 cache"
+ "PublicDescription": "A directory write to the Level-1 Instruction Cache directory where the returned cache line was sourced from an On Chip Level-3 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "155",
"EventName": "L1I_OFFCHIP_L3_SOURCED_WRITES",
"BriefDescription": "L1I Off-Chip L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 I-Cache directory where the returned cache line was sourced from an Off Chip/On Book Level-3 cache"
+ "PublicDescription": "A directory write to the Level-1 Instruction Cache directory where the returned cache line was sourced from an Off Chip/On Book Level-3 cache."
}
]
diff --git a/tools/perf/pmu-events/arch/s390/cf_zec12/basic.json b/tools/perf/pmu-events/arch/s390/cf_zec12/basic.json
index 783de7f1aeaa..9bd20a5f47af 100644
--- a/tools/perf/pmu-events/arch/s390/cf_zec12/basic.json
+++ b/tools/perf/pmu-events/arch/s390/cf_zec12/basic.json
@@ -3,84 +3,84 @@
"Unit": "CPU-M-CF",
"EventCode": "0",
"EventName": "CPU_CYCLES",
- "BriefDescription": "CPU Cycles",
- "PublicDescription": "Cycle Count"
+ "BriefDescription": "Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles, excluding the number of cycles while the CPU is in the wait state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "1",
"EventName": "INSTRUCTIONS",
- "BriefDescription": "Instructions",
- "PublicDescription": "Instruction Count"
+ "BriefDescription": "Instruction Count",
+ "PublicDescription": "This counter counts the total number of instructions executed by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "2",
"EventName": "L1I_DIR_WRITES",
- "BriefDescription": "L1I Directory Writes",
- "PublicDescription": "Level-1 I-Cache Directory Write Count"
+ "BriefDescription": "Level-1 I-Cache Directory Write Count",
+ "PublicDescription": "This counter counts the total number of level-1 instruction-cache or unified-cache directory writes."
},
{
"Unit": "CPU-M-CF",
"EventCode": "3",
"EventName": "L1I_PENALTY_CYCLES",
- "BriefDescription": "L1I Penalty Cycles",
- "PublicDescription": "Level-1 I-Cache Penalty Cycle Count"
+ "BriefDescription": "Level-1 I-Cache Penalty Cycle Count",
+ "PublicDescription": "This counter counts the total number of cache penalty cycles for level-1 instruction cache or unified cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "4",
"EventName": "L1D_DIR_WRITES",
- "BriefDescription": "L1D Directory Writes",
- "PublicDescription": "Level-1 D-Cache Directory Write Count"
+ "BriefDescription": "Level-1 D-Cache Directory Write Count",
+ "PublicDescription": "This counter counts the total number of level-1 data-cache directory writes."
},
{
"Unit": "CPU-M-CF",
"EventCode": "5",
"EventName": "L1D_PENALTY_CYCLES",
- "BriefDescription": "L1D Penalty Cycles",
- "PublicDescription": "Level-1 D-Cache Penalty Cycle Count"
+ "BriefDescription": "Level-1 D-Cache Penalty Cycle Count",
+ "PublicDescription": "This counter counts the total number of cache penalty cycles for level-1 data cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "32",
"EventName": "PROBLEM_STATE_CPU_CYCLES",
- "BriefDescription": "Problem-State CPU Cycles",
- "PublicDescription": "Problem-State Cycle Count"
+ "BriefDescription": "Problem-State Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the CPU is in the problem state, excluding the number of cycles while the CPU is in the wait state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "33",
"EventName": "PROBLEM_STATE_INSTRUCTIONS",
- "BriefDescription": "Problem-State Instructions",
- "PublicDescription": "Problem-State Instruction Count"
+ "BriefDescription": "Problem-State Instruction Count",
+ "PublicDescription": "This counter counts the total number of instructions executed by the CPU while in the problem state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "34",
"EventName": "PROBLEM_STATE_L1I_DIR_WRITES",
- "BriefDescription": "Problem-State L1I Directory Writes",
- "PublicDescription": "Problem-State Level-1 I-Cache Directory Write Count"
+ "BriefDescription": "Problem-State Level-1 I-Cache Directory Write Count",
+ "PublicDescription": "This counter counts the total number of level-1 instruction-cache or unified-cache directory writes while the CPU is in the problem state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "35",
"EventName": "PROBLEM_STATE_L1I_PENALTY_CYCLES",
- "BriefDescription": "Problem-State L1I Penalty Cycles",
- "PublicDescription": "Problem-State Level-1 I-Cache Penalty Cycle Count"
+ "BriefDescription": "Level-1 I-Cache Penalty Cycle Count",
+ "PublicDescription": "This counter counts the total number of penalty cycles for level-1 instruction cache or unified cache while the CPU is in the problem state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "36",
"EventName": "PROBLEM_STATE_L1D_DIR_WRITES",
- "BriefDescription": "Problem-State L1D Directory Writes",
- "PublicDescription": "Problem-State Level-1 D-Cache Directory Write Count"
+ "BriefDescription": "Problem-State Level-1 D-Cache Directory Write Count",
+ "PublicDescription": "This counter counts the total number of level-1 data-cache directory writes while the CPU is in the problem state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "37",
"EventName": "PROBLEM_STATE_L1D_PENALTY_CYCLES",
- "BriefDescription": "Problem-State L1D Penalty Cycles",
- "PublicDescription": "Problem-State Level-1 D-Cache Penalty Cycle Count"
+ "BriefDescription": "Problem-State Level-1 D-Cache Penalty Cycle Count",
+ "PublicDescription": "This counter counts the total number of penalty cycles for level-1 data cache while the CPU is in the problem state."
}
]
diff --git a/tools/perf/pmu-events/arch/s390/cf_zec12/crypto.json b/tools/perf/pmu-events/arch/s390/cf_zec12/crypto.json
index 3f28007d3892..a8d391ddeb8c 100644
--- a/tools/perf/pmu-events/arch/s390/cf_zec12/crypto.json
+++ b/tools/perf/pmu-events/arch/s390/cf_zec12/crypto.json
@@ -3,112 +3,112 @@
"Unit": "CPU-M-CF",
"EventCode": "64",
"EventName": "PRNG_FUNCTIONS",
- "BriefDescription": "PRNG Functions",
- "PublicDescription": "Total number of the PRNG functions issued by the CPU"
+ "BriefDescription": "PRNG Function Count",
+ "PublicDescription": "This counter counts the total number of the pseudorandom-number-generation functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "65",
"EventName": "PRNG_CYCLES",
- "BriefDescription": "PRNG Cycles",
- "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing PRNG functions issued by the CPU"
+ "BriefDescription": "PRNG Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES/SHA coprocessor is busy performing the pseudorandom- number-generation functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "66",
"EventName": "PRNG_BLOCKED_FUNCTIONS",
- "BriefDescription": "PRNG Blocked Functions",
- "PublicDescription": "Total number of the PRNG functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "PRNG Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the pseudorandom-number-generation functions that are issued by the CPU and are blocked because the DEA/AES/SHA coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "67",
"EventName": "PRNG_BLOCKED_CYCLES",
- "BriefDescription": "PRNG Blocked Cycles",
- "PublicDescription": "Total number of CPU cycles blocked for the PRNG functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "PRNG Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the pseudorandom-number-generation functions issued by the CPU because the DEA/AES/SHA coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "68",
"EventName": "SHA_FUNCTIONS",
- "BriefDescription": "SHA Functions",
- "PublicDescription": "Total number of SHA functions issued by the CPU"
+ "BriefDescription": "SHA Function Count",
+ "PublicDescription": "This counter counts the total number of the SHA functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "69",
"EventName": "SHA_CYCLES",
- "BriefDescription": "SHA Cycles",
- "PublicDescription": "Total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU"
+ "BriefDescription": "SHA Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "70",
"EventName": "SHA_BLOCKED_FUNCTIONS",
- "BriefDescription": "SHA Blocked Functions",
- "PublicDescription": "Total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "SHA Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "71",
"EventName": "SHA_BLOCKED_CYCLES",
- "BriefDescription": "SHA Bloced Cycles",
- "PublicDescription": "Total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "SHA Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "72",
"EventName": "DEA_FUNCTIONS",
- "BriefDescription": "DEA Functions",
- "PublicDescription": "Total number of the DEA functions issued by the CPU"
+ "BriefDescription": "DEA Function Count",
+ "PublicDescription": "This counter counts the total number of the DEA functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "73",
"EventName": "DEA_CYCLES",
- "BriefDescription": "DEA Cycles",
- "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU"
+ "BriefDescription": "DEA Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "74",
"EventName": "DEA_BLOCKED_FUNCTIONS",
- "BriefDescription": "DEA Blocked Functions",
- "PublicDescription": "Total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "DEA Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "75",
"EventName": "DEA_BLOCKED_CYCLES",
- "BriefDescription": "DEA Blocked Cycles",
- "PublicDescription": "Total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "DEA Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "76",
"EventName": "AES_FUNCTIONS",
- "BriefDescription": "AES Functions",
- "PublicDescription": "Total number of AES functions issued by the CPU"
+ "BriefDescription": "AES Function Count",
+ "PublicDescription": "This counter counts the total number of the AES functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "77",
"EventName": "AES_CYCLES",
- "BriefDescription": "AES Cycles",
- "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU"
+ "BriefDescription": "AES Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "78",
"EventName": "AES_BLOCKED_FUNCTIONS",
- "BriefDescription": "AES Blocked Functions",
- "PublicDescription": "Total number of AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "AES Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "79",
"EventName": "AES_BLOCKED_CYCLES",
- "BriefDescription": "AES Blocked Cycles",
- "PublicDescription": "Total number of CPU cycles blocked for the AES functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "AES Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the AES functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU."
}
]
diff --git a/tools/perf/pmu-events/arch/s390/cf_zec12/extended.json b/tools/perf/pmu-events/arch/s390/cf_zec12/extended.json
index f40cbed89418..9e765581382b 100644
--- a/tools/perf/pmu-events/arch/s390/cf_zec12/extended.json
+++ b/tools/perf/pmu-events/arch/s390/cf_zec12/extended.json
@@ -18,230 +18,230 @@
"EventCode": "130",
"EventName": "L1D_L2I_SOURCED_WRITES",
"BriefDescription": "L1D L2I Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the Level-2 Instruction cache"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the Level-2 Instruction cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "131",
"EventName": "L1I_L2I_SOURCED_WRITES",
"BriefDescription": "L1I L2I Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from the Level-2 Instruction cache"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from the Level-2 Instruction cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "132",
"EventName": "L1D_L2D_SOURCED_WRITES",
"BriefDescription": "L1D L2D Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the Level-2 Data cache"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the Level-2 Data cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "133",
"EventName": "DTLB1_WRITES",
"BriefDescription": "DTLB1 Writes",
- "PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer"
+ "PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer (DTLB1)."
},
{
"Unit": "CPU-M-CF",
"EventCode": "135",
"EventName": "L1D_LMEM_SOURCED_WRITES",
"BriefDescription": "L1D Local Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache where the installed cache line was sourced from memory that is attached to the same book as the Data cache (Local Memory)"
+ "PublicDescription": "A directory write to the Level-1 Data cache where the installed cache line was sourced from memory that is attached to the same book as the Data cache (Local Memory)."
},
{
"Unit": "CPU-M-CF",
"EventCode": "137",
"EventName": "L1I_LMEM_SOURCED_WRITES",
"BriefDescription": "L1I Local Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache where the installed cache line was sourced from memory that is attached to the same book as the Instruction cache (Local Memory)"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache where the installed cache line was sourced from memory that is attached to the same book as the Instruction cache (Local Memory)."
},
{
"Unit": "CPU-M-CF",
"EventCode": "138",
"EventName": "L1D_RO_EXCL_WRITES",
"BriefDescription": "L1D Read-only Exclusive Writes",
- "PublicDescription": "A directory write to the Level-1 D-Cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line"
+ "PublicDescription": "A directory write to the Level-1 Data Cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line."
},
{
"Unit": "CPU-M-CF",
"EventCode": "139",
"EventName": "DTLB1_HPAGE_WRITES",
"BriefDescription": "DTLB1 One-Megabyte Page Writes",
- "PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer for a one-megabyte page"
+ "PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer for a one-megabyte page."
},
{
"Unit": "CPU-M-CF",
"EventCode": "140",
"EventName": "ITLB1_WRITES",
"BriefDescription": "ITLB1 Writes",
- "PublicDescription": "A translation entry has been written to the Level-1 Instruction Translation Lookaside Buffer"
+ "PublicDescription": "A translation entry has been written to the Level-1 Instruction Translation Lookaside Buffer (ITLB1)."
},
{
"Unit": "CPU-M-CF",
"EventCode": "141",
"EventName": "TLB2_PTE_WRITES",
"BriefDescription": "TLB2 PTE Writes",
- "PublicDescription": "A translation entry has been written to the Level-2 TLB Page Table Entry arrays"
+ "PublicDescription": "A translation entry has been written to the Level-2 TLB Page Table Entry arrays."
},
{
"Unit": "CPU-M-CF",
"EventCode": "142",
"EventName": "TLB2_CRSTE_HPAGE_WRITES",
"BriefDescription": "TLB2 CRSTE One-Megabyte Page Writes",
- "PublicDescription": "A translation entry has been written to the Level-2 TLB Common Region Segment Table Entry arrays for a one-megabyte large page translation"
+ "PublicDescription": "A translation entry has been written to the Level-2 TLB Common Region Segment Table Entry arrays for a one-megabyte large page translation."
},
{
"Unit": "CPU-M-CF",
"EventCode": "143",
"EventName": "TLB2_CRSTE_WRITES",
"BriefDescription": "TLB2 CRSTE Writes",
- "PublicDescription": "A translation entry has been written to the Level-2 TLB Common Region Segment Table Entry arrays"
+ "PublicDescription": "A translation entry has been written to the Level-2 TLB Common Region Segment Table Entry arrays."
},
{
"Unit": "CPU-M-CF",
"EventCode": "144",
"EventName": "L1D_ONCHIP_L3_SOURCED_WRITES",
"BriefDescription": "L1D On-Chip L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On Chip Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On Chip Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "145",
"EventName": "L1D_OFFCHIP_L3_SOURCED_WRITES",
"BriefDescription": "L1D Off-Chip L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off Chip/On Book Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off Chip/On Book Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "146",
"EventName": "L1D_OFFBOOK_L3_SOURCED_WRITES",
"BriefDescription": "L1D Off-Book L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off Book Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off Book Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "147",
"EventName": "L1D_ONBOOK_L4_SOURCED_WRITES",
"BriefDescription": "L1D On-Book L4 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On Book Level-4 cache"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On Book Level-4 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "148",
"EventName": "L1D_OFFBOOK_L4_SOURCED_WRITES",
"BriefDescription": "L1D Off-Book L4 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off Book Level-4 cache"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off Book Level-4 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "149",
"EventName": "TX_NC_TEND",
"BriefDescription": "Completed TEND instructions in non-constrained TX mode",
- "PublicDescription": "A TEND instruction has completed in a nonconstrained transactional-execution mode"
+ "PublicDescription": "A TEND instruction has completed in a nonconstrained transactional-execution mode."
},
{
"Unit": "CPU-M-CF",
"EventCode": "150",
"EventName": "L1D_ONCHIP_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1D On-Chip L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from a On Chip Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from a On Chip Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "151",
"EventName": "L1D_OFFCHIP_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1D Off-Chip L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off Chip/On Book Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off Chip/On Book Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "152",
"EventName": "L1D_OFFBOOK_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1D Off-Book L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off Book Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off Book Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "153",
"EventName": "L1I_ONCHIP_L3_SOURCED_WRITES",
"BriefDescription": "L1I On-Chip L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On Chip Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On Chip Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "154",
"EventName": "L1I_OFFCHIP_L3_SOURCED_WRITES",
"BriefDescription": "L1I Off-Chip L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off Chip/On Book Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off Chip/On Book Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "155",
"EventName": "L1I_OFFBOOK_L3_SOURCED_WRITES",
"BriefDescription": "L1I Off-Book L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off Book Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off Book Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "156",
"EventName": "L1I_ONBOOK_L4_SOURCED_WRITES",
"BriefDescription": "L1I On-Book L4 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On Book Level-4 cache"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On Book Level-4 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "157",
"EventName": "L1I_OFFBOOK_L4_SOURCED_WRITES",
"BriefDescription": "L1I Off-Book L4 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off Book Level-4 cache"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off Book Level-4 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "158",
"EventName": "TX_C_TEND",
"BriefDescription": "Completed TEND instructions in constrained TX mode",
- "PublicDescription": "A TEND instruction has completed in a constrained transactional-execution mode"
+ "PublicDescription": "A TEND instruction has completed in a constrained transactional-execution mode."
},
{
"Unit": "CPU-M-CF",
"EventCode": "159",
"EventName": "L1I_ONCHIP_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1I On-Chip L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On Chip Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On Chip Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "160",
"EventName": "L1I_OFFCHIP_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1I Off-Chip L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off Chip/On Book Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off Chip/On Book Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "161",
"EventName": "L1I_OFFBOOK_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1I Off-Book L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off Book Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off Book Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "177",
"EventName": "TX_NC_TABORT",
"BriefDescription": "Aborted transactions in non-constrained TX mode",
- "PublicDescription": "A transaction abort has occurred in a nonconstrained transactional-execution mode"
+ "PublicDescription": "A transaction abort has occurred in a nonconstrained transactional-execution mode."
},
{
"Unit": "CPU-M-CF",
"EventCode": "178",
"EventName": "TX_C_TABORT_NO_SPECIAL",
"BriefDescription": "Aborted transactions in constrained TX mode not using special completion logic",
- "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is not using any special logic to allow the transaction to complete"
+ "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is not using any special logic to allow the transaction to complete."
},
{
"Unit": "CPU-M-CF",
"EventCode": "179",
"EventName": "TX_C_TABORT_SPECIAL",
"BriefDescription": "Aborted transactions in constrained TX mode using special completion logic",
- "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is using special logic to allow the transaction to complete"
+ "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is using special logic to allow the transaction to complete."
}
]
diff --git a/tools/perf/pmu-events/arch/s390/mapfile.csv b/tools/perf/pmu-events/arch/s390/mapfile.csv
index 61641a3480e0..a918e1af77a5 100644
--- a/tools/perf/pmu-events/arch/s390/mapfile.csv
+++ b/tools/perf/pmu-events/arch/s390/mapfile.csv
@@ -5,3 +5,4 @@ Family-model,Version,Filename,EventType
^IBM.296[45].*[13]\.[1-5].[[:xdigit:]]+$,1,cf_z13,core
^IBM.390[67].*[13]\.[1-5].[[:xdigit:]]+$,3,cf_z14,core
^IBM.856[12].*3\.6.[[:xdigit:]]+$,3,cf_z15,core
+^IBM.393[12].*3\.7.[[:xdigit:]]+$,3,cf_z16,core
diff --git a/tools/perf/pmu-events/arch/x86/alderlake/adl-metrics.json b/tools/perf/pmu-events/arch/x86/alderlake/adl-metrics.json
index 6b24958737b5..f8bdf7812b51 100644
--- a/tools/perf/pmu-events/arch/x86/alderlake/adl-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/alderlake/adl-metrics.json
@@ -37,7 +37,7 @@
{
"BriefDescription": "Fraction of Physical Core issue-slots utilized by this Logical Processor",
"MetricExpr": "TOPDOWN.SLOTS / ( TOPDOWN.SLOTS / 2 ) if #SMT_on else 1",
- "MetricGroup": "SMT",
+ "MetricGroup": "SMT;TmaL1",
"MetricName": "Slots_Utilization",
"Unit": "cpu_core"
},
@@ -64,28 +64,21 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "Actual per-core usage of the Floating Point execution units (regardless of the vector width)",
- "MetricExpr": "( (FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE) + (FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE) ) / ( 2 * CPU_CLK_UNHALTED.DISTRIBUTED )",
+ "BriefDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width)",
+ "MetricExpr": "( FP_ARITH_DISPATCHED.PORT_0 + FP_ARITH_DISPATCHED.PORT_1 + FP_ARITH_DISPATCHED.PORT_5 ) / ( 2 * CPU_CLK_UNHALTED.DISTRIBUTED )",
"MetricGroup": "Cor;Flops;HPC",
"MetricName": "FP_Arith_Utilization",
- "PublicDescription": "Actual per-core usage of the Floating Point execution units (regardless of the vector width). Values > 1 are possible due to Fused-Multiply Add (FMA) counting.",
+ "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common).",
"Unit": "cpu_core"
},
{
- "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-core",
"MetricExpr": "UOPS_EXECUTED.THREAD / (( UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 ) if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)",
"MetricGroup": "Backend;Cor;Pipeline;PortsUtil",
"MetricName": "ILP",
"Unit": "cpu_core"
},
{
- "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)",
- "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
- "MetricGroup": "Bad;BadSpec;BrMispredicts",
- "MetricName": "IpMispredict",
- "Unit": "cpu_core"
- },
- {
"BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
"MetricExpr": "CPU_CLK_UNHALTED.DISTRIBUTED",
"MetricGroup": "SMT",
@@ -182,6 +175,13 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Instructions per Software prefetch instruction (of any type: NTA/T0/T1/T2/Prefetch) (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / cpu_core@SW_PREFETCH_ACCESS.T0\\,umask\\=0xF@",
+ "MetricGroup": "Prefetches",
+ "MetricName": "IpSWPF",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Total number of retired Instructions, Sample with: INST_RETIRED.PREC_DIST",
"MetricExpr": "INST_RETIRED.ANY",
"MetricGroup": "Summary;TmaL1",
@@ -189,6 +189,27 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Estimated fraction of retirement-cycles dealing with repeat instructions",
+ "MetricExpr": "INST_RETIRED.REP_ITERATION / cpu_core@UOPS_RETIRED.SLOTS\\,cmask\\=1@",
+ "MetricGroup": "Pipeline;Ret",
+ "MetricName": "Strings_Cycles",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Instructions per a microcode Assist invocation. See Assists tree node for details (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / cpu_core@ASSISTS.ANY\\,umask\\=0x1B@",
+ "MetricGroup": "Pipeline;Ret;Retire",
+ "MetricName": "IpAssist",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / cpu_core@UOPS_EXECUTED.THREAD\\,cmask\\=1@",
+ "MetricGroup": "Cor;Pipeline;PortsUtil;SMT",
+ "MetricName": "Execute",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Average number of Uops issued by front-end when it issued something",
"MetricExpr": "UOPS_ISSUED.ANY / cpu_core@UOPS_ISSUED.ANY\\,cmask\\=1@",
"MetricGroup": "Fed;FetchBW",
@@ -210,13 +231,27 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "Number of Instructions per non-speculative DSB miss",
+ "BriefDescription": "Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details.",
+ "MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / cpu_core@DSB2MITE_SWITCHES.PENALTY_CYCLES\\,cmask\\=1\\,edge@",
+ "MetricGroup": "DSBmiss",
+ "MetricName": "DSB_Switch_Cost",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of Instructions per non-speculative DSB miss (lower number means higher occurrence rate)",
"MetricExpr": "INST_RETIRED.ANY / FRONTEND_RETIRED.ANY_DSB_MISS",
"MetricGroup": "DSBmiss;Fed",
"MetricName": "IpDSB_Miss_Ret",
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;BadSpec;BrMispredicts",
+ "MetricName": "IpMispredict",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Fraction of branches that are non-taken conditionals",
"MetricExpr": "BR_INST_RETIRED.COND_NTAKEN / BR_INST_RETIRED.ALL_BRANCHES",
"MetricGroup": "Bad;Branches;CodeGen;PGO",
@@ -252,11 +287,10 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load instructions (in core cycles)",
- "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT )",
+ "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / MEM_LOAD_COMPLETED.L1_MISS_ANY",
"MetricGroup": "Mem;MemoryBound;MemoryLat",
"MetricName": "Load_Miss_Real_Latency",
- "PublicDescription": "Actual Average Latency for L1 data-cache miss demand load instructions (in core cycles). Latency may be overestimated for multi-load instructions - e.g. repeat strings.",
"Unit": "cpu_core"
},
{
@@ -267,34 +301,6 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]",
- "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
- "MetricGroup": "Mem;MemoryBW",
- "MetricName": "L1D_Cache_Fill_BW",
- "Unit": "cpu_core"
- },
- {
- "BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]",
- "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
- "MetricGroup": "Mem;MemoryBW",
- "MetricName": "L2_Cache_Fill_BW",
- "Unit": "cpu_core"
- },
- {
- "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
- "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
- "MetricGroup": "Mem;MemoryBW",
- "MetricName": "L3_Cache_Fill_BW",
- "Unit": "cpu_core"
- },
- {
- "BriefDescription": "Average per-core data access bandwidth to the L3 cache [GB / sec]",
- "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1000000000 / duration_time",
- "MetricGroup": "Mem;MemoryBW;Offcore",
- "MetricName": "L3_Cache_Access_BW",
- "Unit": "cpu_core"
- },
- {
"BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
"MetricExpr": "1000 * MEM_LOAD_RETIRED.L1_MISS / INST_RETIRED.ANY",
"MetricGroup": "Mem;CacheMisses",
@@ -316,14 +322,14 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)",
+ "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all request types (including speculative)",
"MetricExpr": "1000 * L2_RQSTS.MISS / INST_RETIRED.ANY",
"MetricGroup": "Mem;CacheMisses;Offcore",
"MetricName": "L2MPKI_All",
"Unit": "cpu_core"
},
{
- "BriefDescription": "L2 cache misses per kilo instruction for all demand loads (including speculative)",
+ "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all demand loads (including speculative)",
"MetricExpr": "1000 * L2_RQSTS.DEMAND_DATA_RD_MISS / INST_RETIRED.ANY",
"MetricGroup": "Mem;CacheMisses",
"MetricName": "L2MPKI_Load",
@@ -351,7 +357,7 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "Fill Buffer (FB) true hits per kilo instructions for retired demand loads",
+ "BriefDescription": "Fill Buffer (FB) hits per kilo instructions for retired demand loads (L1D misses that merge into ongoing miss-handling entries)",
"MetricExpr": "1000 * MEM_LOAD_RETIRED.FB_HIT / INST_RETIRED.ANY",
"MetricGroup": "Mem;CacheMisses",
"MetricName": "FB_HPKI",
@@ -366,6 +372,62 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L1D_Cache_Fill_BW",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L2_Cache_Fill_BW",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L3_Cache_Fill_BW",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Average per-core data access bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1000000000 / duration_time",
+ "MetricGroup": "Mem;MemoryBW;Offcore",
+ "MetricName": "L3_Cache_Access_BW",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "(64 * L1D.REPLACEMENT / 1000000000 / duration_time)",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L1D_Cache_Fill_BW_1T",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "(64 * L2_LINES_IN.ALL / 1000000000 / duration_time)",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L2_Cache_Fill_BW_1T",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "(64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time)",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L3_Cache_Fill_BW_1T",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Average per-thread data access bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "(64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1000000000 / duration_time)",
+ "MetricGroup": "Mem;MemoryBW;Offcore",
+ "MetricName": "L3_Cache_Access_BW_1T",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Average CPU Utilization",
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"MetricGroup": "HPC;Summary",
@@ -384,6 +446,7 @@
"MetricExpr": "( ( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE ) / 1000000000 ) / duration_time",
"MetricGroup": "Cor;Flops;HPC",
"MetricName": "GFLOPs",
+ "PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width and AMX engine.",
"Unit": "cpu_core"
},
{
@@ -461,7 +524,7 @@
},
{
"BriefDescription": "Counts the total number of issue slots that were not consumed by the backend due to backend stalls",
- "MetricExpr": "TOPDOWN_BE_BOUND.ALL / (5 * CPU_CLK_UNHALTED.CORE)",
+ "MetricExpr": "(TOPDOWN_BE_BOUND.ALL / (5 * CPU_CLK_UNHALTED.CORE))",
"MetricGroup": "TopdownL1",
"MetricName": "Backend_Bound_Aux",
"PublicDescription": "Counts the total number of issue slots that were not consumed by the backend due to backend stalls. Note that UOPS must be available for consumption in order for this event to count. If a uop is not available (IQ is empty), this event will not count. All of these subevents count backend stalls, in slots, due to a resource limitation. These are not cycle based events and therefore can not be precisely added or subtracted from the Backend_Bound subevents which are cycle based. These subevents are supplementary to Backend_Bound and can be used to analyze results from a resource perspective at allocation. ",
@@ -608,7 +671,7 @@
},
{
"BriefDescription": "Fraction of cycles spent in Kernel mode",
- "MetricExpr": "CPU_CLK_UNHALTED.CORE:k / CPU_CLK_UNHALTED.CORE",
+ "MetricExpr": "cpu_atom@CPU_CLK_UNHALTED.CORE@k / CPU_CLK_UNHALTED.CORE",
"MetricName": "Kernel_Utilization",
"Unit": "cpu_atom"
},
@@ -620,7 +683,7 @@
},
{
"BriefDescription": "Estimated Pause cost. In percent",
- "MetricExpr": "100 * SERIALIZATION.NON_C01_MS_SCB / ( 5 * CPU_CLK_UNHALTED.CORE )",
+ "MetricExpr": "100 * SERIALIZATION.NON_C01_MS_SCB / (5 * CPU_CLK_UNHALTED.CORE)",
"MetricName": "Estimated_Pause_Cost",
"Unit": "cpu_atom"
},
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/spr-metrics.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/spr-metrics.json
new file mode 100644
index 000000000000..8f9497838bd4
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/spr-metrics.json
@@ -0,0 +1,530 @@
+[
+ {
+ "BriefDescription": "Total pipeline cost of branch related instructions (used for program control-flow including function calls)",
+ "MetricExpr": "100 * (( BR_INST_RETIRED.COND + 3 * BR_INST_RETIRED.NEAR_CALL + (BR_INST_RETIRED.NEAR_TAKEN - BR_INST_RETIRED.COND_TAKEN - 2 * BR_INST_RETIRED.NEAR_CALL) ) / TOPDOWN.SLOTS)",
+ "MetricGroup": "Ret",
+ "MetricName": "Branching_Overhead"
+ },
+ {
+ "BriefDescription": "Instructions Per Cycle (per Logical Processor)",
+ "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "Ret;Summary",
+ "MetricName": "IPC"
+ },
+ {
+ "BriefDescription": "Cycles Per Instruction (per Logical Processor)",
+ "MetricExpr": "1 / (INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD)",
+ "MetricGroup": "Pipeline;Mem",
+ "MetricName": "CPI"
+ },
+ {
+ "BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "Pipeline",
+ "MetricName": "CLKS"
+ },
+ {
+ "BriefDescription": "Total issue-pipeline slots (per-Physical Core till ICL; per-Logical Processor ICL onward)",
+ "MetricExpr": "TOPDOWN.SLOTS",
+ "MetricGroup": "TmaL1",
+ "MetricName": "SLOTS"
+ },
+ {
+ "BriefDescription": "Fraction of Physical Core issue-slots utilized by this Logical Processor",
+ "MetricExpr": "TOPDOWN.SLOTS / ( TOPDOWN.SLOTS / 2 ) if #SMT_on else 1",
+ "MetricGroup": "SMT;TmaL1",
+ "MetricName": "Slots_Utilization"
+ },
+ {
+ "BriefDescription": "The ratio of Executed- by Issued-Uops",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / UOPS_ISSUED.ANY",
+ "MetricGroup": "Cor;Pipeline",
+ "MetricName": "Execute_per_Issue",
+ "PublicDescription": "The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of \"execute\" at rename stage."
+ },
+ {
+ "BriefDescription": "Instructions Per Cycle across hyper-threads (per physical core)",
+ "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.DISTRIBUTED",
+ "MetricGroup": "Ret;SMT;TmaL1",
+ "MetricName": "CoreIPC"
+ },
+ {
+ "BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricExpr": "( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE + FP_ARITH_INST_RETIRED2.SCALAR_HALF ) + 2 * ( FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED2.COMPLEX_SCALAR_HALF ) + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED2.128B_PACKED_HALF + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * ( FP_ARITH_INST_RETIRED2.256B_PACKED_HALF + FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE ) + 32 * FP_ARITH_INST_RETIRED2.512B_PACKED_HALF + 4 * AMX_OPS_RETIRED.BF16 ) / CPU_CLK_UNHALTED.DISTRIBUTED",
+ "MetricGroup": "Ret;Flops",
+ "MetricName": "FLOPc"
+ },
+ {
+ "BriefDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width)",
+ "MetricExpr": "( FP_ARITH_DISPATCHED.PORT_0 + FP_ARITH_DISPATCHED.PORT_1 + FP_ARITH_DISPATCHED.PORT_5 ) / ( 2 * CPU_CLK_UNHALTED.DISTRIBUTED )",
+ "MetricGroup": "Cor;Flops;HPC",
+ "MetricName": "FP_Arith_Utilization",
+ "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common)."
+ },
+ {
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-core",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / (( UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 ) if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)",
+ "MetricGroup": "Backend;Cor;Pipeline;PortsUtil",
+ "MetricName": "ILP"
+ },
+ {
+ "BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
+ "MetricExpr": "CPU_CLK_UNHALTED.DISTRIBUTED",
+ "MetricGroup": "SMT",
+ "MetricName": "CORE_CLKS"
+ },
+ {
+ "BriefDescription": "Instructions per Load (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_LOADS",
+ "MetricGroup": "InsType",
+ "MetricName": "IpLoad"
+ },
+ {
+ "BriefDescription": "Instructions per Store (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_STORES",
+ "MetricGroup": "InsType",
+ "MetricName": "IpStore"
+ },
+ {
+ "BriefDescription": "Instructions per Branch (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Branches;Fed;InsType",
+ "MetricName": "IpBranch"
+ },
+ {
+ "BriefDescription": "Instructions per (near) call (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
+ "MetricGroup": "Branches;Fed;PGO",
+ "MetricName": "IpCall"
+ },
+ {
+ "BriefDescription": "Instruction per taken branch",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
+ "MetricGroup": "Branches;Fed;FetchBW;Frontend;PGO",
+ "MetricName": "IpTB"
+ },
+ {
+ "BriefDescription": "Branch instructions per taken branch. ",
+ "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
+ "MetricGroup": "Branches;Fed;PGO",
+ "MetricName": "BpTkBranch"
+ },
+ {
+ "BriefDescription": "Instructions per Floating Point (FP) Operation (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / ( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE + FP_ARITH_INST_RETIRED2.SCALAR_HALF ) + 2 * ( FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED2.COMPLEX_SCALAR_HALF ) + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED2.128B_PACKED_HALF + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * ( FP_ARITH_INST_RETIRED2.256B_PACKED_HALF + FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE ) + 32 * FP_ARITH_INST_RETIRED2.512B_PACKED_HALF + 4 * AMX_OPS_RETIRED.BF16 )",
+ "MetricGroup": "Flops;InsType",
+ "MetricName": "IpFLOP"
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / ( (FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE + FP_ARITH_INST_RETIRED2.SCALAR) + (FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE + FP_ARITH_INST_RETIRED2.VECTOR) )",
+ "MetricGroup": "Flops;InsType",
+ "MetricName": "IpArith",
+ "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). May undercount due to FMA double counting. Approximated prior to BDW."
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
+ "MetricGroup": "Flops;FpScalar;InsType",
+ "MetricName": "IpArith_Scalar_SP",
+ "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
+ "MetricGroup": "Flops;FpScalar;InsType",
+ "MetricName": "IpArith_Scalar_DP",
+ "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / ( FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED2.128B_PACKED_HALF )",
+ "MetricGroup": "Flops;FpVector;InsType",
+ "MetricName": "IpArith_AVX128",
+ "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / ( FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED2.256B_PACKED_HALF )",
+ "MetricGroup": "Flops;FpVector;InsType",
+ "MetricName": "IpArith_AVX256",
+ "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / ( FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE + FP_ARITH_INST_RETIRED2.512B_PACKED_HALF )",
+ "MetricGroup": "Flops;FpVector;InsType",
+ "MetricName": "IpArith_AVX512",
+ "PublicDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic AMX operation (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / AMX_OPS_RETIRED.BF16",
+ "MetricGroup": "Flops;FpVector;InsType;Server",
+ "MetricName": "IpArith_AMX_F16",
+ "PublicDescription": "Instructions per FP Arithmetic AMX operation (lower number means higher occurrence rate). Operations factored per matrices' sizes of the AMX instructions."
+ },
+ {
+ "BriefDescription": "Instructions per Integer Arithmetic AMX operation (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / AMX_OPS_RETIRED.INT8",
+ "MetricGroup": "IntVector;InsType;Server",
+ "MetricName": "IpArith_AMX_Int8",
+ "PublicDescription": "Instructions per Integer Arithmetic AMX operation (lower number means higher occurrence rate). Operations factored per matrices' sizes of the AMX instructions."
+ },
+ {
+ "BriefDescription": "Instructions per Software prefetch instruction (of any type: NTA/T0/T1/T2/Prefetch) (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / cpu@SW_PREFETCH_ACCESS.T0\\,umask\\=0xF@",
+ "MetricGroup": "Prefetches",
+ "MetricName": "IpSWPF"
+ },
+ {
+ "BriefDescription": "Total number of retired Instructions, Sample with: INST_RETIRED.PREC_DIST",
+ "MetricExpr": "INST_RETIRED.ANY",
+ "MetricGroup": "Summary;TmaL1",
+ "MetricName": "Instructions"
+ },
+ {
+ "BriefDescription": "Estimated fraction of retirement-cycles dealing with repeat instructions",
+ "MetricExpr": "INST_RETIRED.REP_ITERATION / cpu@UOPS_RETIRED.SLOTS\\,cmask\\=1@",
+ "MetricGroup": "Pipeline;Ret",
+ "MetricName": "Strings_Cycles"
+ },
+ {
+ "BriefDescription": "Instructions per a microcode Assist invocation. See Assists tree node for details (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / cpu@ASSISTS.ANY\\,umask\\=0x1B@",
+ "MetricGroup": "Pipeline;Ret;Retire",
+ "MetricName": "IpAssist"
+ },
+ {
+ "BriefDescription": "",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@",
+ "MetricGroup": "Cor;Pipeline;PortsUtil;SMT",
+ "MetricName": "Execute"
+ },
+ {
+ "BriefDescription": "Average number of Uops issued by front-end when it issued something",
+ "MetricExpr": "UOPS_ISSUED.ANY / cpu@UOPS_ISSUED.ANY\\,cmask\\=1@",
+ "MetricGroup": "Fed;FetchBW",
+ "MetricName": "Fetch_UpC"
+ },
+ {
+ "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
+ "MetricExpr": "IDQ.DSB_UOPS / (IDQ.DSB_UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS)",
+ "MetricGroup": "DSB;Fed;FetchBW",
+ "MetricName": "DSB_Coverage"
+ },
+ {
+ "BriefDescription": "Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details.",
+ "MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / cpu@DSB2MITE_SWITCHES.PENALTY_CYCLES\\,cmask\\=1\\,edge@",
+ "MetricGroup": "DSBmiss",
+ "MetricName": "DSB_Switch_Cost"
+ },
+ {
+ "BriefDescription": "Number of Instructions per non-speculative DSB miss (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / FRONTEND_RETIRED.ANY_DSB_MISS",
+ "MetricGroup": "DSBmiss;Fed",
+ "MetricName": "IpDSB_Miss_Ret"
+ },
+ {
+ "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;BadSpec;BrMispredicts",
+ "MetricName": "IpMispredict"
+ },
+ {
+ "BriefDescription": "Fraction of branches that are non-taken conditionals",
+ "MetricExpr": "BR_INST_RETIRED.COND_NTAKEN / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;Branches;CodeGen;PGO",
+ "MetricName": "Cond_NT"
+ },
+ {
+ "BriefDescription": "Fraction of branches that are taken conditionals",
+ "MetricExpr": "BR_INST_RETIRED.COND_TAKEN / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;Branches;CodeGen;PGO",
+ "MetricName": "Cond_TK"
+ },
+ {
+ "BriefDescription": "Fraction of branches that are CALL or RET",
+ "MetricExpr": "( BR_INST_RETIRED.NEAR_CALL + BR_INST_RETIRED.NEAR_RETURN ) / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;Branches",
+ "MetricName": "CallRet"
+ },
+ {
+ "BriefDescription": "Fraction of branches that are unconditional (direct or indirect) jumps",
+ "MetricExpr": "(BR_INST_RETIRED.NEAR_TAKEN - BR_INST_RETIRED.COND_TAKEN - 2 * BR_INST_RETIRED.NEAR_CALL) / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;Branches",
+ "MetricName": "Jump"
+ },
+ {
+ "BriefDescription": "Fraction of branches of other types (not individually covered by other metrics in Info.Branches group)",
+ "MetricExpr": "1 - ( (BR_INST_RETIRED.COND_NTAKEN / BR_INST_RETIRED.ALL_BRANCHES) + (BR_INST_RETIRED.COND_TAKEN / BR_INST_RETIRED.ALL_BRANCHES) + (( BR_INST_RETIRED.NEAR_CALL + BR_INST_RETIRED.NEAR_RETURN ) / BR_INST_RETIRED.ALL_BRANCHES) + ((BR_INST_RETIRED.NEAR_TAKEN - BR_INST_RETIRED.COND_TAKEN - 2 * BR_INST_RETIRED.NEAR_CALL) / BR_INST_RETIRED.ALL_BRANCHES) )",
+ "MetricGroup": "Bad;Branches",
+ "MetricName": "Other_Branches"
+ },
+ {
+ "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / MEM_LOAD_COMPLETED.L1_MISS_ANY",
+ "MetricGroup": "Mem;MemoryBound;MemoryLat",
+ "MetricName": "Load_Miss_Real_Latency"
+ },
+ {
+ "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
+ "MetricGroup": "Mem;MemoryBound;MemoryBW",
+ "MetricName": "MLP"
+ },
+ {
+ "BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
+ "MetricExpr": "1000 * MEM_LOAD_RETIRED.L1_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Mem;CacheMisses",
+ "MetricName": "L1MPKI"
+ },
+ {
+ "BriefDescription": "L1 cache true misses per kilo instruction for all demand loads (including speculative)",
+ "MetricExpr": "1000 * L2_RQSTS.ALL_DEMAND_DATA_RD / INST_RETIRED.ANY",
+ "MetricGroup": "Mem;CacheMisses",
+ "MetricName": "L1MPKI_Load"
+ },
+ {
+ "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
+ "MetricExpr": "1000 * MEM_LOAD_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Mem;Backend;CacheMisses",
+ "MetricName": "L2MPKI"
+ },
+ {
+ "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all request types (including speculative)",
+ "MetricExpr": "1000 * L2_RQSTS.MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Mem;CacheMisses;Offcore",
+ "MetricName": "L2MPKI_All"
+ },
+ {
+ "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all demand loads (including speculative)",
+ "MetricExpr": "1000 * L2_RQSTS.DEMAND_DATA_RD_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Mem;CacheMisses",
+ "MetricName": "L2MPKI_Load"
+ },
+ {
+ "BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)",
+ "MetricExpr": "1000 * ( L2_RQSTS.REFERENCES - L2_RQSTS.MISS ) / INST_RETIRED.ANY",
+ "MetricGroup": "Mem;CacheMisses",
+ "MetricName": "L2HPKI_All"
+ },
+ {
+ "BriefDescription": "L2 cache hits per kilo instruction for all demand loads (including speculative)",
+ "MetricExpr": "1000 * L2_RQSTS.DEMAND_DATA_RD_HIT / INST_RETIRED.ANY",
+ "MetricGroup": "Mem;CacheMisses",
+ "MetricName": "L2HPKI_Load"
+ },
+ {
+ "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
+ "MetricExpr": "1000 * MEM_LOAD_RETIRED.L3_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Mem;CacheMisses",
+ "MetricName": "L3MPKI"
+ },
+ {
+ "BriefDescription": "Fill Buffer (FB) hits per kilo instructions for retired demand loads (L1D misses that merge into ongoing miss-handling entries)",
+ "MetricExpr": "1000 * MEM_LOAD_RETIRED.FB_HIT / INST_RETIRED.ANY",
+ "MetricGroup": "Mem;CacheMisses",
+ "MetricName": "FB_HPKI"
+ },
+ {
+ "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+ "MetricConstraint": "NO_NMI_WATCHDOG",
+ "MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING ) / ( 4 * CPU_CLK_UNHALTED.DISTRIBUTED )",
+ "MetricGroup": "Mem;MemoryTLB",
+ "MetricName": "Page_Walks_Utilization"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L1D_Cache_Fill_BW"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L2_Cache_Fill_BW"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L3_Cache_Fill_BW"
+ },
+ {
+ "BriefDescription": "Average per-core data access bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1000000000 / duration_time",
+ "MetricGroup": "Mem;MemoryBW;Offcore",
+ "MetricName": "L3_Cache_Access_BW"
+ },
+ {
+ "BriefDescription": "Rate of silent evictions from the L2 cache per Kilo instruction where the evicted lines are dropped (no writeback to L3 or memory)",
+ "MetricExpr": "1000 * L2_LINES_OUT.SILENT / INST_RETIRED.ANY",
+ "MetricGroup": "L2Evicts;Mem;Server",
+ "MetricName": "L2_Evictions_Silent_PKI"
+ },
+ {
+ "BriefDescription": "Rate of non silent evictions from the L2 cache per Kilo instruction",
+ "MetricExpr": "1000 * L2_LINES_OUT.NON_SILENT / INST_RETIRED.ANY",
+ "MetricGroup": "L2Evicts;Mem;Server",
+ "MetricName": "L2_Evictions_NonSilent_PKI"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "(64 * L1D.REPLACEMENT / 1000000000 / duration_time)",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L1D_Cache_Fill_BW_1T"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "(64 * L2_LINES_IN.ALL / 1000000000 / duration_time)",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L2_Cache_Fill_BW_1T"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "(64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time)",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L3_Cache_Fill_BW_1T"
+ },
+ {
+ "BriefDescription": "Average per-thread data access bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "(64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1000000000 / duration_time)",
+ "MetricGroup": "Mem;MemoryBW;Offcore",
+ "MetricName": "L3_Cache_Access_BW_1T"
+ },
+ {
+ "BriefDescription": "Average CPU Utilization",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
+ "MetricGroup": "HPC;Summary",
+ "MetricName": "CPU_Utilization"
+ },
+ {
+ "BriefDescription": "Measured Average Frequency for unhalted processors [GHz]",
+ "MetricExpr": "(CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC) * msr@tsc@ / 1000000000 / duration_time",
+ "MetricGroup": "Summary;Power",
+ "MetricName": "Average_Frequency"
+ },
+ {
+ "BriefDescription": "Giga Floating Point Operations Per Second",
+ "MetricExpr": "( ( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE + FP_ARITH_INST_RETIRED2.SCALAR_HALF ) + 2 * ( FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED2.COMPLEX_SCALAR_HALF ) + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED2.128B_PACKED_HALF + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * ( FP_ARITH_INST_RETIRED2.256B_PACKED_HALF + FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE ) + 32 * FP_ARITH_INST_RETIRED2.512B_PACKED_HALF + 4 * AMX_OPS_RETIRED.BF16 ) / 1000000000 ) / duration_time",
+ "MetricGroup": "Cor;Flops;HPC",
+ "MetricName": "GFLOPs",
+ "PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width and AMX engine."
+ },
+ {
+ "BriefDescription": "Tera Integer (matrix) Operations Per Second",
+ "MetricExpr": "( 8 * AMX_OPS_RETIRED.INT8 / 1000000000000 ) / duration_time",
+ "MetricGroup": "Cor;HPC;IntVector;Server",
+ "MetricName": "TIOPS"
+ },
+ {
+ "BriefDescription": "Average Frequency Utilization relative nominal frequency",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
+ "MetricGroup": "Power",
+ "MetricName": "Turbo_Utilization"
+ },
+ {
+ "BriefDescription": "Fraction of cycles where both hardware Logical Processors were active",
+ "MetricExpr": "1 - CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_DISTRIBUTED if #SMT_on else 0",
+ "MetricGroup": "SMT",
+ "MetricName": "SMT_2T_Utilization"
+ },
+ {
+ "BriefDescription": "Fraction of cycles spent in the Operating System (OS) Kernel mode",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD_P:k / CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "OS",
+ "MetricName": "Kernel_Utilization"
+ },
+ {
+ "BriefDescription": "Cycles Per Instruction for the Operating System (OS) Kernel mode",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD_P:k / INST_RETIRED.ANY_P:k",
+ "MetricGroup": "OS",
+ "MetricName": "Kernel_CPI"
+ },
+ {
+ "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+ "MetricExpr": "( 64 * ( uncore_imc@cas_count_read@ + uncore_imc@cas_count_write@ ) / 1000000000 ) / duration_time",
+ "MetricGroup": "HPC;Mem;MemoryBW;SoC",
+ "MetricName": "DRAM_BW_Use"
+ },
+ {
+ "BriefDescription": "Average latency of data read request to external memory (in nanoseconds). Accounts for demand loads and L1/L2 prefetches",
+ "MetricExpr": "1000000000 * ( UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD / UNC_CHA_TOR_INSERTS.IA_MISS_DRD ) / ( uncore_cha_0@event\\=0x1@ / duration_time )",
+ "MetricGroup": "Mem;MemoryLat;SoC",
+ "MetricName": "MEM_Read_Latency"
+ },
+ {
+ "BriefDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches",
+ "MetricExpr": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD / cha@UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD\\,thresh\\=1@",
+ "MetricGroup": "Mem;MemoryBW;SoC",
+ "MetricName": "MEM_Parallel_Reads"
+ },
+ {
+ "BriefDescription": "Average latency of data read request to external 3D X-Point memory [in nanoseconds]. Accounts for demand loads and L1/L2 data-read prefetches",
+ "MetricExpr": "( 1000000000 * ( UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PMM / UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PMM ) / uncore_cha_0@event\\=0x1@ )",
+ "MetricGroup": "Mem;MemoryLat;SoC;Server",
+ "MetricName": "MEM_PMM_Read_Latency"
+ },
+ {
+ "BriefDescription": "Average latency of data read request to external DRAM memory [in nanoseconds]. Accounts for demand loads and L1/L2 data-read prefetches",
+ "MetricExpr": " 1000000000 * ( UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_DDR / UNC_CHA_TOR_INSERTS.IA_MISS_DRD_DDR ) / uncore_cha_0@event\\=0x1@",
+ "MetricGroup": "Mem;MemoryLat;SoC;Server",
+ "MetricName": "MEM_DRAM_Read_Latency"
+ },
+ {
+ "BriefDescription": "Average 3DXP Memory Bandwidth Use for reads [GB / sec]",
+ "MetricExpr": "( ( 64 * UNC_M_PMM_RPQ_INSERTS / 1000000000 ) / duration_time )",
+ "MetricGroup": "Mem;MemoryBW;SoC;Server",
+ "MetricName": "PMM_Read_BW"
+ },
+ {
+ "BriefDescription": "Average 3DXP Memory Bandwidth Use for Writes [GB / sec]",
+ "MetricExpr": "( ( 64 * UNC_M_PMM_WPQ_INSERTS / 1000000000 ) / duration_time )",
+ "MetricGroup": "Mem;MemoryBW;SoC;Server",
+ "MetricName": "PMM_Write_BW"
+ },
+ {
+ "BriefDescription": "Average IO (network or disk) Bandwidth Use for Writes [GB / sec]",
+ "MetricExpr": "UNC_CHA_TOR_INSERTS.IO_PCIRDCUR * 64 / 1000000000 / duration_time",
+ "MetricGroup": "IoBW;Mem;SoC;Server",
+ "MetricName": "IO_Write_BW"
+ },
+ {
+ "BriefDescription": "Socket actual clocks when any core is active on that socket",
+ "MetricExpr": "uncore_cha_0@event\\=0x1@",
+ "MetricGroup": "SoC",
+ "MetricName": "Socket_CLKS"
+ },
+ {
+ "BriefDescription": "Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.FAR_BRANCH:u",
+ "MetricGroup": "Branches;OS",
+ "MetricName": "IpFarBranch"
+ },
+ {
+ "BriefDescription": "C1 residency percent per core",
+ "MetricExpr": "(cstate_core@c1\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C1_Core_Residency"
+ },
+ {
+ "BriefDescription": "C6 residency percent per core",
+ "MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Core_Residency"
+ },
+ {
+ "BriefDescription": "C2 residency percent per package",
+ "MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C2_Pkg_Residency"
+ },
+ {
+ "BriefDescription": "C6 residency percent per package",
+ "MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Pkg_Residency"
+ }
+]
diff --git a/tools/perf/pmu-events/jevents.c b/tools/perf/pmu-events/jevents.c
index cee61c4ed59e..e597e4bac90f 100644
--- a/tools/perf/pmu-events/jevents.c
+++ b/tools/perf/pmu-events/jevents.c
@@ -605,7 +605,7 @@ static int json_events(const char *fn,
} else if (json_streq(map, field, "ExtSel")) {
char *code = NULL;
addfield(map, &code, "", "", val);
- eventcode |= strtoul(code, NULL, 0) << 21;
+ eventcode |= strtoul(code, NULL, 0) << 8;
free(code);
} else if (json_streq(map, field, "EventName")) {
addfield(map, &je.name, "", "", val);
diff --git a/tools/perf/scripts/python/arm-cs-trace-disasm.py b/tools/perf/scripts/python/arm-cs-trace-disasm.py
new file mode 100755
index 000000000000..5f57d9829956
--- /dev/null
+++ b/tools/perf/scripts/python/arm-cs-trace-disasm.py
@@ -0,0 +1,272 @@
+# SPDX-License-Identifier: GPL-2.0
+# arm-cs-trace-disasm.py: ARM CoreSight Trace Dump With Disassember
+#
+# Author: Tor Jeremiassen <tor@ti.com>
+# Mathieu Poirier <mathieu.poirier@linaro.org>
+# Leo Yan <leo.yan@linaro.org>
+# Al Grant <Al.Grant@arm.com>
+
+from __future__ import print_function
+import os
+from os import path
+import sys
+import re
+from subprocess import *
+from optparse import OptionParser, make_option
+
+from perf_trace_context import perf_set_itrace_options, \
+ perf_sample_insn, perf_sample_srccode
+
+# Below are some example commands for using this script.
+#
+# Output disassembly with objdump:
+# perf script -s scripts/python/arm-cs-trace-disasm.py \
+# -- -d objdump -k path/to/vmlinux
+# Output disassembly with llvm-objdump:
+# perf script -s scripts/python/arm-cs-trace-disasm.py \
+# -- -d llvm-objdump-11 -k path/to/vmlinux
+# Output only source line and symbols:
+# perf script -s scripts/python/arm-cs-trace-disasm.py
+
+# Command line parsing.
+option_list = [
+ # formatting options for the bottom entry of the stack
+ make_option("-k", "--vmlinux", dest="vmlinux_name",
+ help="Set path to vmlinux file"),
+ make_option("-d", "--objdump", dest="objdump_name",
+ help="Set path to objdump executable file"),
+ make_option("-v", "--verbose", dest="verbose",
+ action="store_true", default=False,
+ help="Enable debugging log")
+]
+
+parser = OptionParser(option_list=option_list)
+(options, args) = parser.parse_args()
+
+# Initialize global dicts and regular expression
+disasm_cache = dict()
+cpu_data = dict()
+disasm_re = re.compile("^\s*([0-9a-fA-F]+):")
+disasm_func_re = re.compile("^\s*([0-9a-fA-F]+)\s.*:")
+cache_size = 64*1024
+
+glb_source_file_name = None
+glb_line_number = None
+glb_dso = None
+
+def get_optional(perf_dict, field):
+ if field in perf_dict:
+ return perf_dict[field]
+ return "[unknown]"
+
+def get_offset(perf_dict, field):
+ if field in perf_dict:
+ return f"+0x{perf_dict[field]:x}"
+ return ""
+
+def get_dso_file_path(dso_name, dso_build_id):
+ if (dso_name == "[kernel.kallsyms]" or dso_name == "vmlinux"):
+ if (options.vmlinux_name):
+ return options.vmlinux_name;
+ else:
+ return dso_name
+
+ if (dso_name == "[vdso]") :
+ append = "/vdso"
+ else:
+ append = "/elf"
+
+ dso_path = f"{os.environ['PERF_BUILDID_DIR']}/{dso_name}/{dso_build_id}{append}"
+ # Replace duplicate slash chars to single slash char
+ dso_path = dso_path.replace('//', '/', 1)
+ return dso_path
+
+def read_disam(dso_fname, dso_start, start_addr, stop_addr):
+ addr_range = str(start_addr) + ":" + str(stop_addr) + ":" + dso_fname
+
+ # Don't let the cache get too big, clear it when it hits max size
+ if (len(disasm_cache) > cache_size):
+ disasm_cache.clear();
+
+ if addr_range in disasm_cache:
+ disasm_output = disasm_cache[addr_range];
+ else:
+ start_addr = start_addr - dso_start;
+ stop_addr = stop_addr - dso_start;
+ disasm = [ options.objdump_name, "-d", "-z",
+ f"--start-address=0x{start_addr:x}",
+ f"--stop-address=0x{stop_addr:x}" ]
+ disasm += [ dso_fname ]
+ disasm_output = check_output(disasm).decode('utf-8').split('\n')
+ disasm_cache[addr_range] = disasm_output
+
+ return disasm_output
+
+def print_disam(dso_fname, dso_start, start_addr, stop_addr):
+ for line in read_disam(dso_fname, dso_start, start_addr, stop_addr):
+ m = disasm_func_re.search(line)
+ if m is None:
+ m = disasm_re.search(line)
+ if m is None:
+ continue
+ print(f"\t{line}")
+
+def print_sample(sample):
+ print(f"Sample = {{ cpu: {sample['cpu']:04} addr: 0x{sample['addr']:016x} " \
+ f"phys_addr: 0x{sample['phys_addr']:016x} ip: 0x{sample['ip']:016x} " \
+ f"pid: {sample['pid']} tid: {sample['tid']} period: {sample['period']} time: {sample['time']} }}")
+
+def trace_begin():
+ print('ARM CoreSight Trace Data Assembler Dump')
+
+def trace_end():
+ print('End')
+
+def trace_unhandled(event_name, context, event_fields_dict):
+ print(' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())]))
+
+def common_start_str(comm, sample):
+ sec = int(sample["time"] / 1000000000)
+ ns = sample["time"] % 1000000000
+ cpu = sample["cpu"]
+ pid = sample["pid"]
+ tid = sample["tid"]
+ return f"{comm:>16} {pid:>5}/{tid:<5} [{cpu:04}] {sec:9}.{ns:09} "
+
+# This code is copied from intel-pt-events.py for printing source code
+# line and symbols.
+def print_srccode(comm, param_dict, sample, symbol, dso):
+ ip = sample["ip"]
+ if symbol == "[unknown]":
+ start_str = common_start_str(comm, sample) + ("%x" % ip).rjust(16).ljust(40)
+ else:
+ offs = get_offset(param_dict, "symoff")
+ start_str = common_start_str(comm, sample) + (symbol + offs).ljust(40)
+
+ global glb_source_file_name
+ global glb_line_number
+ global glb_dso
+
+ source_file_name, line_number, source_line = perf_sample_srccode(perf_script_context)
+ if source_file_name:
+ if glb_line_number == line_number and glb_source_file_name == source_file_name:
+ src_str = ""
+ else:
+ if len(source_file_name) > 40:
+ src_file = ("..." + source_file_name[-37:]) + " "
+ else:
+ src_file = source_file_name.ljust(41)
+
+ if source_line is None:
+ src_str = src_file + str(line_number).rjust(4) + " <source not found>"
+ else:
+ src_str = src_file + str(line_number).rjust(4) + " " + source_line
+ glb_dso = None
+ elif dso == glb_dso:
+ src_str = ""
+ else:
+ src_str = dso
+ glb_dso = dso
+
+ glb_line_number = line_number
+ glb_source_file_name = source_file_name
+
+ print(f"{start_str}{src_str}")
+
+def process_event(param_dict):
+ global cache_size
+ global options
+
+ sample = param_dict["sample"]
+ comm = param_dict["comm"]
+
+ name = param_dict["ev_name"]
+ dso = get_optional(param_dict, "dso")
+ dso_bid = get_optional(param_dict, "dso_bid")
+ dso_start = get_optional(param_dict, "dso_map_start")
+ dso_end = get_optional(param_dict, "dso_map_end")
+ symbol = get_optional(param_dict, "symbol")
+
+ if (options.verbose == True):
+ print(f"Event type: {name}")
+ print_sample(sample)
+
+ # If cannot find dso so cannot dump assembler, bail out
+ if (dso == '[unknown]'):
+ return
+
+ # Validate dso start and end addresses
+ if ((dso_start == '[unknown]') or (dso_end == '[unknown]')):
+ print(f"Failed to find valid dso map for dso {dso}")
+ return
+
+ if (name[0:12] == "instructions"):
+ print_srccode(comm, param_dict, sample, symbol, dso)
+ return
+
+ # Don't proceed if this event is not a branch sample, .
+ if (name[0:8] != "branches"):
+ return
+
+ cpu = sample["cpu"]
+ ip = sample["ip"]
+ addr = sample["addr"]
+
+ # Initialize CPU data if it's empty, and directly return back
+ # if this is the first tracing event for this CPU.
+ if (cpu_data.get(str(cpu) + 'addr') == None):
+ cpu_data[str(cpu) + 'addr'] = addr
+ return
+
+ # The format for packet is:
+ #
+ # +------------+------------+------------+
+ # sample_prev: | addr | ip | cpu |
+ # +------------+------------+------------+
+ # sample_next: | addr | ip | cpu |
+ # +------------+------------+------------+
+ #
+ # We need to combine the two continuous packets to get the instruction
+ # range for sample_prev::cpu:
+ #
+ # [ sample_prev::addr .. sample_next::ip ]
+ #
+ # For this purose, sample_prev::addr is stored into cpu_data structure
+ # and read back for 'start_addr' when the new packet comes, and we need
+ # to use sample_next::ip to calculate 'stop_addr', plusing extra 4 for
+ # 'stop_addr' is for the sake of objdump so the final assembler dump can
+ # include last instruction for sample_next::ip.
+ start_addr = cpu_data[str(cpu) + 'addr']
+ stop_addr = ip + 4
+
+ # Record for previous sample packet
+ cpu_data[str(cpu) + 'addr'] = addr
+
+ # Handle CS_ETM_TRACE_ON packet if start_addr=0 and stop_addr=4
+ if (start_addr == 0 and stop_addr == 4):
+ print(f"CPU{cpu}: CS_ETM_TRACE_ON packet is inserted")
+ return
+
+ if (start_addr < int(dso_start) or start_addr > int(dso_end)):
+ print(f"Start address 0x{start_addr:x} is out of range [ 0x{dso_start:x} .. 0x{dso_end:x} ] for dso {dso}")
+ return
+
+ if (stop_addr < int(dso_start) or stop_addr > int(dso_end)):
+ print(f"Stop address 0x{stop_addr:x} is out of range [ 0x{dso_start:x} .. 0x{dso_end:x} ] for dso {dso}")
+ return
+
+ if (options.objdump_name != None):
+ # It doesn't need to decrease virtual memory offset for disassembly
+ # for kernel dso, so in this case we set vm_start to zero.
+ if (dso == "[kernel.kallsyms]"):
+ dso_vm_start = 0
+ else:
+ dso_vm_start = int(dso_start)
+
+ dso_fname = get_dso_file_path(dso, dso_bid)
+ if path.exists(dso_fname):
+ print_disam(dso_fname, dso_vm_start, start_addr, stop_addr)
+ else:
+ print(f"Failed to find dso {dso} for address range [ 0x{start_addr:x} .. 0x{stop_addr:x} ]")
+
+ print_srccode(comm, param_dict, sample, symbol, dso)
diff --git a/tools/perf/tests/shell/lib/perf_csv_output_lint.py b/tools/perf/tests/shell/lib/perf_csv_output_lint.py
new file mode 100644
index 000000000000..714f283cfb1b
--- /dev/null
+++ b/tools/perf/tests/shell/lib/perf_csv_output_lint.py
@@ -0,0 +1,48 @@
+#!/usr/bin/python
+# SPDX-License-Identifier: GPL-2.0
+
+import argparse
+import sys
+
+# Basic sanity check of perf CSV output as specified in the man page.
+# Currently just checks the number of fields per line in output.
+
+ap = argparse.ArgumentParser()
+ap.add_argument('--no-args', action='store_true')
+ap.add_argument('--interval', action='store_true')
+ap.add_argument('--system-wide-no-aggr', action='store_true')
+ap.add_argument('--system-wide', action='store_true')
+ap.add_argument('--event', action='store_true')
+ap.add_argument('--per-core', action='store_true')
+ap.add_argument('--per-thread', action='store_true')
+ap.add_argument('--per-die', action='store_true')
+ap.add_argument('--per-node', action='store_true')
+ap.add_argument('--per-socket', action='store_true')
+ap.add_argument('--separator', default=',', nargs='?')
+args = ap.parse_args()
+
+Lines = sys.stdin.readlines()
+
+def check_csv_output(exp):
+ for line in Lines:
+ if 'failed' not in line:
+ count = line.count(args.separator)
+ if count != exp:
+ sys.stdout.write(''.join(Lines))
+ raise RuntimeError(f'wrong number of fields. expected {exp} in {line}')
+
+try:
+ if args.no_args or args.system_wide or args.event:
+ expected_items = 6
+ elif args.interval or args.per_thread or args.system_wide_no_aggr:
+ expected_items = 7
+ elif args.per_core or args.per_socket or args.per_node or args.per_die:
+ expected_items = 8
+ else:
+ ap.print_help()
+ raise RuntimeError('No checking option specified')
+ check_csv_output(expected_items)
+
+except:
+ sys.stdout.write('Test failed for input: ' + ''.join(Lines))
+ raise
diff --git a/tools/perf/tests/shell/record_offcpu.sh b/tools/perf/tests/shell/record_offcpu.sh
new file mode 100755
index 000000000000..96e0739f7478
--- /dev/null
+++ b/tools/perf/tests/shell/record_offcpu.sh
@@ -0,0 +1,60 @@
+#!/bin/sh
+# perf record offcpu profiling tests
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+err=0
+perfdata=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
+
+cleanup() {
+ rm -f ${perfdata}
+ rm -f ${perfdata}.old
+ trap - exit term int
+}
+
+trap_cleanup() {
+ cleanup
+ exit 1
+}
+trap trap_cleanup exit term int
+
+test_offcpu() {
+ echo "Basic off-cpu test"
+ if [ `id -u` != 0 ]
+ then
+ echo "Basic off-cpu test [Skipped permission]"
+ err=2
+ return
+ fi
+ if perf record --off-cpu -o ${perfdata} --quiet true 2>&1 | grep BUILD_BPF_SKEL
+ then
+ echo "Basic off-cpu test [Skipped missing BPF support]"
+ err=2
+ return
+ fi
+ if ! perf record --off-cpu -e dummy -o ${perfdata} sleep 1 2> /dev/null
+ then
+ echo "Basic off-cpu test [Failed record]"
+ err=1
+ return
+ fi
+ if ! perf evlist -i ${perfdata} | grep -q "offcpu-time"
+ then
+ echo "Basic off-cpu test [Failed record]"
+ err=1
+ return
+ fi
+ if ! perf report -i ${perfdata} -q --percent-limit=90 | egrep -q sleep
+ then
+ echo "Basic off-cpu test [Failed missing output]"
+ err=1
+ return
+ fi
+ echo "Basic off-cpu test [Success]"
+}
+
+test_offcpu
+
+cleanup
+exit $err
diff --git a/tools/perf/tests/shell/stat+csv_output.sh b/tools/perf/tests/shell/stat+csv_output.sh
new file mode 100755
index 000000000000..983220ef3cb4
--- /dev/null
+++ b/tools/perf/tests/shell/stat+csv_output.sh
@@ -0,0 +1,147 @@
+#!/bin/bash
+# perf stat CSV output linter
+# SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+# Tests various perf stat CSV output commands for the
+# correct number of fields and the CSV separator set to ','.
+
+set -e
+
+pythonchecker=$(dirname $0)/lib/perf_csv_output_lint.py
+if [ "x$PYTHON" == "x" ]
+then
+ if which python3 > /dev/null
+ then
+ PYTHON=python3
+ elif which python > /dev/null
+ then
+ PYTHON=python
+ else
+ echo Skipping test, python not detected please set environment variable PYTHON.
+ exit 2
+ fi
+fi
+
+# Return true if perf_event_paranoid is > $1 and not running as root.
+function ParanoidAndNotRoot()
+{
+ [ $(id -u) != 0 ] && [ $(cat /proc/sys/kernel/perf_event_paranoid) -gt $1 ]
+}
+
+check_no_args()
+{
+ echo -n "Checking CSV output: no args "
+ perf stat -x, true 2>&1 | $PYTHON $pythonchecker --no-args
+ echo "[Success]"
+}
+
+check_system_wide()
+{
+ echo -n "Checking CSV output: system wide "
+ if ParanoidAndNotRoot 0
+ then
+ echo "[Skip] paranoid and not root"
+ return
+ fi
+ perf stat -x, -a true 2>&1 | $PYTHON $pythonchecker --system-wide
+ echo "[Success]"
+}
+
+check_system_wide_no_aggr()
+{
+ echo -n "Checking CSV output: system wide "
+ if ParanoidAndNotRoot 0
+ then
+ echo "[Skip] paranoid and not root"
+ return
+ fi
+ echo -n "Checking CSV output: system wide no aggregation "
+ perf stat -x, -A -a --no-merge true 2>&1 | $PYTHON $pythonchecker --system-wide-no-aggr
+ echo "[Success]"
+}
+
+check_interval()
+{
+ echo -n "Checking CSV output: interval "
+ perf stat -x, -I 1000 true 2>&1 | $PYTHON $pythonchecker --interval
+ echo "[Success]"
+}
+
+
+check_event()
+{
+ echo -n "Checking CSV output: event "
+ perf stat -x, -e cpu-clock true 2>&1 | $PYTHON $pythonchecker --event
+ echo "[Success]"
+}
+
+check_per_core()
+{
+ echo -n "Checking CSV output: per core "
+ if ParanoidAndNotRoot 0
+ then
+ echo "[Skip] paranoid and not root"
+ return
+ fi
+ perf stat -x, --per-core -a true 2>&1 | $PYTHON $pythonchecker --per-core
+ echo "[Success]"
+}
+
+check_per_thread()
+{
+ echo -n "Checking CSV output: per thread "
+ if ParanoidAndNotRoot 0
+ then
+ echo "[Skip] paranoid and not root"
+ return
+ fi
+ perf stat -x, --per-thread -a true 2>&1 | $PYTHON $pythonchecker --per-thread
+ echo "[Success]"
+}
+
+check_per_die()
+{
+ echo -n "Checking CSV output: per die "
+ if ParanoidAndNotRoot 0
+ then
+ echo "[Skip] paranoid and not root"
+ return
+ fi
+ perf stat -x, --per-die -a true 2>&1 | $PYTHON $pythonchecker --per-die
+ echo "[Success]"
+}
+
+check_per_node()
+{
+ echo -n "Checking CSV output: per node "
+ if ParanoidAndNotRoot 0
+ then
+ echo "[Skip] paranoid and not root"
+ return
+ fi
+ perf stat -x, --per-node -a true 2>&1 | $PYTHON $pythonchecker --per-node
+ echo "[Success]"
+}
+
+check_per_socket()
+{
+ echo -n "Checking CSV output: per socket "
+ if ParanoidAndNotRoot 0
+ then
+ echo "[Skip] paranoid and not root"
+ return
+ fi
+ perf stat -x, --per-socket -a true 2>&1 | $PYTHON $pythonchecker --per-socket
+ echo "[Success]"
+}
+
+check_no_args
+check_system_wide
+check_system_wide_no_aggr
+check_interval
+check_event
+check_per_core
+check_per_thread
+check_per_die
+check_per_node
+check_per_socket
+exit 0
diff --git a/tools/perf/tests/shell/test_arm_spe_fork.sh b/tools/perf/tests/shell/test_arm_spe_fork.sh
new file mode 100755
index 000000000000..c920d3583d30
--- /dev/null
+++ b/tools/perf/tests/shell/test_arm_spe_fork.sh
@@ -0,0 +1,92 @@
+#!/bin/sh
+# Check Arm SPE doesn't hang when there are forks
+
+# SPDX-License-Identifier: GPL-2.0
+# German Gomez <german.gomez@arm.com>, 2022
+
+skip_if_no_arm_spe_event() {
+ perf list | egrep -q 'arm_spe_[0-9]+//' && return 0
+ return 2
+}
+
+skip_if_no_arm_spe_event || exit 2
+
+# skip if there's no compiler
+if ! [ -x "$(command -v cc)" ]; then
+ echo "failed: no compiler, install gcc"
+ exit 2
+fi
+
+TEST_PROGRAM_SOURCE=$(mktemp /tmp/__perf_test.program.XXXXX.c)
+TEST_PROGRAM=$(mktemp /tmp/__perf_test.program.XXXXX)
+PERF_DATA=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
+PERF_RECORD_LOG=$(mktemp /tmp/__perf_test.log.XXXXX)
+
+cleanup_files()
+{
+ echo "Cleaning up files..."
+ rm -f ${PERF_RECORD_LOG}
+ rm -f ${PERF_DATA}
+ rm -f ${TEST_PROGRAM_SOURCE}
+ rm -f ${TEST_PROGRAM}
+}
+
+trap cleanup_files exit term int
+
+# compile test program
+cat << EOF > $TEST_PROGRAM_SOURCE
+#include <math.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/wait.h>
+
+int workload() {
+ while (1)
+ sqrt(rand());
+ return 0;
+}
+
+int main() {
+ switch (fork()) {
+ case 0:
+ return workload();
+ case -1:
+ return 1;
+ default:
+ wait(NULL);
+ }
+ return 0;
+}
+EOF
+
+echo "Compiling test program..."
+CFLAGS="-lm"
+cc $TEST_PROGRAM_SOURCE $CFLAGS -o $TEST_PROGRAM || exit 1
+
+echo "Recording workload..."
+perf record -o ${PERF_DATA} -e arm_spe/period=65536/ -vvv -- $TEST_PROGRAM > ${PERF_RECORD_LOG} 2>&1 &
+PERFPID=$!
+
+# Check if perf hangs by checking the perf-record logs.
+sleep 1
+log0=$(wc -l $PERF_RECORD_LOG)
+echo Log lines = $log0
+sleep 1
+log1=$(wc -l $PERF_RECORD_LOG)
+echo Log lines after 1 second = $log1
+
+kill $PERFPID
+wait $PERFPID
+# test program may leave an orphan process running the workload
+killall $(basename $TEST_PROGRAM)
+
+if [ "$log0" = "$log1" ];
+then
+ echo "SPE hang test: FAIL"
+ exit 1
+else
+ echo "SPE hang test: PASS"
+fi
+
+exit 0
diff --git a/tools/perf/tests/shell/test_intel_pt.sh b/tools/perf/tests/shell/test_intel_pt.sh
new file mode 100755
index 000000000000..a3298643884d
--- /dev/null
+++ b/tools/perf/tests/shell/test_intel_pt.sh
@@ -0,0 +1,71 @@
+#!/bin/sh
+# Miscellaneous Intel PT testing
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+# Skip if no Intel PT
+perf list | grep -q 'intel_pt//' || exit 2
+
+skip_cnt=0
+ok_cnt=0
+err_cnt=0
+
+tmpfile=`mktemp`
+perfdatafile=`mktemp`
+
+can_cpu_wide()
+{
+ perf record -o ${tmpfile} -B -N --no-bpf-event -e dummy:u -C $1 true 2>&1 >/dev/null || return 2
+ return 0
+}
+
+test_system_wide_side_band()
+{
+ # Need CPU 0 and CPU 1
+ can_cpu_wide 0 || return $?
+ can_cpu_wide 1 || return $?
+
+ # Record on CPU 0 a task running on CPU 1
+ perf record -B -N --no-bpf-event -o ${perfdatafile} -e intel_pt//u -C 0 -- taskset --cpu-list 1 uname
+
+ # Should get MMAP events from CPU 1 because they can be needed to decode
+ mmap_cnt=`perf script -i ${perfdatafile} --no-itrace --show-mmap-events -C 1 2>/dev/null | grep MMAP | wc -l`
+
+ if [ ${mmap_cnt} -gt 0 ] ; then
+ return 0
+ fi
+
+ echo "Failed to record MMAP events on CPU 1 when tracing CPU 0"
+ return 1
+}
+
+count_result()
+{
+ if [ $1 -eq 2 ] ; then
+ skip_cnt=`expr ${skip_cnt} \+ 1`
+ return
+ fi
+ if [ $1 -eq 0 ] ; then
+ ok_cnt=`expr ${ok_cnt} \+ 1`
+ return
+ fi
+ err_cnt=`expr ${err_cnt} \+ 1`
+}
+
+test_system_wide_side_band
+
+count_result $?
+
+rm -f ${tmpfile}
+rm -f ${perfdatafile}
+
+if [ ${err_cnt} -gt 0 ] ; then
+ exit 1
+fi
+
+if [ ${ok_cnt} -gt 0 ] ; then
+ exit 0
+fi
+
+exit 2
diff --git a/tools/perf/util/Build b/tools/perf/util/Build
index 9a7209a99e16..a51267d88ca9 100644
--- a/tools/perf/util/Build
+++ b/tools/perf/util/Build
@@ -147,6 +147,7 @@ perf-$(CONFIG_LIBBPF) += bpf_map.o
perf-$(CONFIG_PERF_BPF_SKEL) += bpf_counter.o
perf-$(CONFIG_PERF_BPF_SKEL) += bpf_counter_cgroup.o
perf-$(CONFIG_PERF_BPF_SKEL) += bpf_ftrace.o
+perf-$(CONFIG_PERF_BPF_SKEL) += bpf_off_cpu.o
perf-$(CONFIG_BPF_PROLOGUE) += bpf-prologue.o
perf-$(CONFIG_LIBELF) += symbol-elf.o
perf-$(CONFIG_LIBELF) += probe-file.o
diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
index b11549ae39df..511dd3caa1bc 100644
--- a/tools/perf/util/auxtrace.c
+++ b/tools/perf/util/auxtrace.c
@@ -125,7 +125,7 @@ int auxtrace_mmap__mmap(struct auxtrace_mmap *mm,
mm->tid = mp->tid;
mm->cpu = mp->cpu.cpu;
- if (!mp->len) {
+ if (!mp->len || !mp->mmap_needed) {
mm->base = NULL;
return 0;
}
@@ -168,13 +168,20 @@ void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp,
}
void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
- struct evlist *evlist, int idx,
- bool per_cpu)
+ struct evlist *evlist,
+ struct evsel *evsel, int idx)
{
+ bool per_cpu = !perf_cpu_map__empty(evlist->core.user_requested_cpus);
+
+ mp->mmap_needed = evsel->needs_auxtrace_mmap;
+
+ if (!mp->mmap_needed)
+ return;
+
mp->idx = idx;
if (per_cpu) {
- mp->cpu = perf_cpu_map__cpu(evlist->core.user_requested_cpus, idx);
+ mp->cpu = perf_cpu_map__cpu(evlist->core.all_cpus, idx);
if (evlist->core.threads)
mp->tid = perf_thread_map__pid(evlist->core.threads, 0);
else
diff --git a/tools/perf/util/auxtrace.h b/tools/perf/util/auxtrace.h
index dc38b6f57232..cd0d25c2751c 100644
--- a/tools/perf/util/auxtrace.h
+++ b/tools/perf/util/auxtrace.h
@@ -344,6 +344,10 @@ struct auxtrace_mmap {
* @idx: index of this mmap
* @tid: tid for a per-thread mmap (also set if there is only 1 tid on a per-cpu
* mmap) otherwise %0
+ * @mmap_needed: set to %false for non-auxtrace events. This is needed because
+ * auxtrace mmapping is done in the same code path as non-auxtrace
+ * mmapping but not every evsel that needs non-auxtrace mmapping
+ * also needs auxtrace mmapping.
* @cpu: cpu number for a per-cpu mmap otherwise %-1
*/
struct auxtrace_mmap_params {
@@ -353,6 +357,7 @@ struct auxtrace_mmap_params {
int prot;
int idx;
pid_t tid;
+ bool mmap_needed;
struct perf_cpu cpu;
};
@@ -490,8 +495,8 @@ void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp,
unsigned int auxtrace_pages,
bool auxtrace_overwrite);
void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
- struct evlist *evlist, int idx,
- bool per_cpu);
+ struct evlist *evlist,
+ struct evsel *evsel, int idx);
typedef int (*process_auxtrace_t)(struct perf_tool *tool,
struct mmap *map,
@@ -863,8 +868,8 @@ void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp,
unsigned int auxtrace_pages,
bool auxtrace_overwrite);
void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
- struct evlist *evlist, int idx,
- bool per_cpu);
+ struct evlist *evlist,
+ struct evsel *evsel, int idx);
#define ITRACE_HELP ""
diff --git a/tools/perf/util/bpf-event.c b/tools/perf/util/bpf-event.c
index 8271ab764eb5..eee64ddb766d 100644
--- a/tools/perf/util/bpf-event.c
+++ b/tools/perf/util/bpf-event.c
@@ -35,11 +35,12 @@ struct btf *btf__load_from_kernel_by_id(__u32 id)
}
#endif
-int __weak bpf_prog_load(enum bpf_prog_type prog_type,
- const char *prog_name __maybe_unused,
- const char *license,
- const struct bpf_insn *insns, size_t insn_cnt,
- const struct bpf_prog_load_opts *opts)
+#ifndef HAVE_LIBBPF_BPF_PROG_LOAD
+int bpf_prog_load(enum bpf_prog_type prog_type,
+ const char *prog_name __maybe_unused,
+ const char *license,
+ const struct bpf_insn *insns, size_t insn_cnt,
+ const struct bpf_prog_load_opts *opts)
{
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
@@ -47,8 +48,10 @@ int __weak bpf_prog_load(enum bpf_prog_type prog_type,
opts->kern_version, opts->log_buf, opts->log_size);
#pragma GCC diagnostic pop
}
+#endif
-struct bpf_program * __weak
+#ifndef HAVE_LIBBPF_BPF_OBJECT__NEXT_PROGRAM
+struct bpf_program *
bpf_object__next_program(const struct bpf_object *obj, struct bpf_program *prev)
{
#pragma GCC diagnostic push
@@ -56,8 +59,10 @@ bpf_object__next_program(const struct bpf_object *obj, struct bpf_program *prev)
return bpf_program__next(prev, obj);
#pragma GCC diagnostic pop
}
+#endif
-struct bpf_map * __weak
+#ifndef HAVE_LIBBPF_BPF_OBJECT__NEXT_MAP
+struct bpf_map *
bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *prev)
{
#pragma GCC diagnostic push
@@ -65,8 +70,10 @@ bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *prev)
return bpf_map__next(prev, obj);
#pragma GCC diagnostic pop
}
+#endif
-const void * __weak
+#ifndef HAVE_LIBBPF_BTF__RAW_DATA
+const void *
btf__raw_data(const struct btf *btf_ro, __u32 *size)
{
#pragma GCC diagnostic push
@@ -74,6 +81,7 @@ btf__raw_data(const struct btf *btf_ro, __u32 *size)
return btf__get_raw_data(btf_ro, size);
#pragma GCC diagnostic pop
}
+#endif
static int snprintf_hex(char *buf, size_t size, unsigned char *data, size_t len)
{
diff --git a/tools/perf/util/bpf_counter.c b/tools/perf/util/bpf_counter.c
index d4931f54e1dd..ef1c15e4aeba 100644
--- a/tools/perf/util/bpf_counter.c
+++ b/tools/perf/util/bpf_counter.c
@@ -312,7 +312,10 @@ static bool bperf_attr_map_compatible(int attr_map_fd)
(map_info.value_size == sizeof(struct perf_event_attr_map_entry));
}
-int __weak
+#ifndef HAVE_LIBBPF_BPF_MAP_CREATE
+LIBBPF_API int bpf_create_map(enum bpf_map_type map_type, int key_size,
+ int value_size, int max_entries, __u32 map_flags);
+int
bpf_map_create(enum bpf_map_type map_type,
const char *map_name __maybe_unused,
__u32 key_size,
@@ -325,6 +328,7 @@ bpf_map_create(enum bpf_map_type map_type,
return bpf_create_map(map_type, key_size, value_size, max_entries, 0);
#pragma GCC diagnostic pop
}
+#endif
static int bperf_lock_attr_map(struct target *target)
{
diff --git a/tools/perf/util/bpf_off_cpu.c b/tools/perf/util/bpf_off_cpu.c
new file mode 100644
index 000000000000..b73e84a02264
--- /dev/null
+++ b/tools/perf/util/bpf_off_cpu.c
@@ -0,0 +1,338 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "util/bpf_counter.h"
+#include "util/debug.h"
+#include "util/evsel.h"
+#include "util/evlist.h"
+#include "util/off_cpu.h"
+#include "util/perf-hooks.h"
+#include "util/record.h"
+#include "util/session.h"
+#include "util/target.h"
+#include "util/cpumap.h"
+#include "util/thread_map.h"
+#include "util/cgroup.h"
+#include <bpf/bpf.h>
+
+#include "bpf_skel/off_cpu.skel.h"
+
+#define MAX_STACKS 32
+/* we don't need actual timestamp, just want to put the samples at last */
+#define OFF_CPU_TIMESTAMP (~0ull << 32)
+
+static struct off_cpu_bpf *skel;
+
+struct off_cpu_key {
+ u32 pid;
+ u32 tgid;
+ u32 stack_id;
+ u32 state;
+ u64 cgroup_id;
+};
+
+union off_cpu_data {
+ struct perf_event_header hdr;
+ u64 array[1024 / sizeof(u64)];
+};
+
+static int off_cpu_config(struct evlist *evlist)
+{
+ struct evsel *evsel;
+ struct perf_event_attr attr = {
+ .type = PERF_TYPE_SOFTWARE,
+ .config = PERF_COUNT_SW_BPF_OUTPUT,
+ .size = sizeof(attr), /* to capture ABI version */
+ };
+ char *evname = strdup(OFFCPU_EVENT);
+
+ if (evname == NULL)
+ return -ENOMEM;
+
+ evsel = evsel__new(&attr);
+ if (!evsel) {
+ free(evname);
+ return -ENOMEM;
+ }
+
+ evsel->core.attr.freq = 1;
+ evsel->core.attr.sample_period = 1;
+ /* off-cpu analysis depends on stack trace */
+ evsel->core.attr.sample_type = PERF_SAMPLE_CALLCHAIN;
+
+ evlist__add(evlist, evsel);
+
+ free(evsel->name);
+ evsel->name = evname;
+
+ return 0;
+}
+
+static void off_cpu_start(void *arg)
+{
+ struct evlist *evlist = arg;
+
+ /* update task filter for the given workload */
+ if (!skel->bss->has_cpu && !skel->bss->has_task &&
+ perf_thread_map__pid(evlist->core.threads, 0) != -1) {
+ int fd;
+ u32 pid;
+ u8 val = 1;
+
+ skel->bss->has_task = 1;
+ fd = bpf_map__fd(skel->maps.task_filter);
+ pid = perf_thread_map__pid(evlist->core.threads, 0);
+ bpf_map_update_elem(fd, &pid, &val, BPF_ANY);
+ }
+
+ skel->bss->enabled = 1;
+}
+
+static void off_cpu_finish(void *arg __maybe_unused)
+{
+ skel->bss->enabled = 0;
+ off_cpu_bpf__destroy(skel);
+}
+
+/* v5.18 kernel added prev_state arg, so it needs to check the signature */
+static void check_sched_switch_args(void)
+{
+ const struct btf *btf = bpf_object__btf(skel->obj);
+ const struct btf_type *t1, *t2, *t3;
+ u32 type_id;
+
+ type_id = btf__find_by_name_kind(btf, "bpf_trace_sched_switch",
+ BTF_KIND_TYPEDEF);
+ if ((s32)type_id < 0)
+ return;
+
+ t1 = btf__type_by_id(btf, type_id);
+ if (t1 == NULL)
+ return;
+
+ t2 = btf__type_by_id(btf, t1->type);
+ if (t2 == NULL || !btf_is_ptr(t2))
+ return;
+
+ t3 = btf__type_by_id(btf, t2->type);
+ if (t3 && btf_is_func_proto(t3) && btf_vlen(t3) == 4) {
+ /* new format: pass prev_state as 4th arg */
+ skel->rodata->has_prev_state = true;
+ }
+}
+
+int off_cpu_prepare(struct evlist *evlist, struct target *target,
+ struct record_opts *opts)
+{
+ int err, fd, i;
+ int ncpus = 1, ntasks = 1, ncgrps = 1;
+
+ if (off_cpu_config(evlist) < 0) {
+ pr_err("Failed to config off-cpu BPF event\n");
+ return -1;
+ }
+
+ skel = off_cpu_bpf__open();
+ if (!skel) {
+ pr_err("Failed to open off-cpu BPF skeleton\n");
+ return -1;
+ }
+
+ /* don't need to set cpu filter for system-wide mode */
+ if (target->cpu_list) {
+ ncpus = perf_cpu_map__nr(evlist->core.user_requested_cpus);
+ bpf_map__set_max_entries(skel->maps.cpu_filter, ncpus);
+ }
+
+ if (target__has_task(target)) {
+ ntasks = perf_thread_map__nr(evlist->core.threads);
+ bpf_map__set_max_entries(skel->maps.task_filter, ntasks);
+ }
+
+ if (evlist__first(evlist)->cgrp) {
+ ncgrps = evlist->core.nr_entries - 1; /* excluding a dummy */
+ bpf_map__set_max_entries(skel->maps.cgroup_filter, ncgrps);
+
+ if (!cgroup_is_v2("perf_event"))
+ skel->rodata->uses_cgroup_v1 = true;
+ }
+
+ if (opts->record_cgroup) {
+ skel->rodata->needs_cgroup = true;
+
+ if (!cgroup_is_v2("perf_event"))
+ skel->rodata->uses_cgroup_v1 = true;
+ }
+
+ set_max_rlimit();
+ check_sched_switch_args();
+
+ err = off_cpu_bpf__load(skel);
+ if (err) {
+ pr_err("Failed to load off-cpu skeleton\n");
+ goto out;
+ }
+
+ if (target->cpu_list) {
+ u32 cpu;
+ u8 val = 1;
+
+ skel->bss->has_cpu = 1;
+ fd = bpf_map__fd(skel->maps.cpu_filter);
+
+ for (i = 0; i < ncpus; i++) {
+ cpu = perf_cpu_map__cpu(evlist->core.user_requested_cpus, i).cpu;
+ bpf_map_update_elem(fd, &cpu, &val, BPF_ANY);
+ }
+ }
+
+ if (target__has_task(target)) {
+ u32 pid;
+ u8 val = 1;
+
+ skel->bss->has_task = 1;
+ fd = bpf_map__fd(skel->maps.task_filter);
+
+ for (i = 0; i < ntasks; i++) {
+ pid = perf_thread_map__pid(evlist->core.threads, i);
+ bpf_map_update_elem(fd, &pid, &val, BPF_ANY);
+ }
+ }
+
+ if (evlist__first(evlist)->cgrp) {
+ struct evsel *evsel;
+ u8 val = 1;
+
+ skel->bss->has_cgroup = 1;
+ fd = bpf_map__fd(skel->maps.cgroup_filter);
+
+ evlist__for_each_entry(evlist, evsel) {
+ struct cgroup *cgrp = evsel->cgrp;
+
+ if (cgrp == NULL)
+ continue;
+
+ if (!cgrp->id && read_cgroup_id(cgrp) < 0) {
+ pr_err("Failed to read cgroup id of %s\n",
+ cgrp->name);
+ goto out;
+ }
+
+ bpf_map_update_elem(fd, &cgrp->id, &val, BPF_ANY);
+ }
+ }
+
+ err = off_cpu_bpf__attach(skel);
+ if (err) {
+ pr_err("Failed to attach off-cpu BPF skeleton\n");
+ goto out;
+ }
+
+ if (perf_hooks__set_hook("record_start", off_cpu_start, evlist) ||
+ perf_hooks__set_hook("record_end", off_cpu_finish, evlist)) {
+ pr_err("Failed to attach off-cpu skeleton\n");
+ goto out;
+ }
+
+ return 0;
+
+out:
+ off_cpu_bpf__destroy(skel);
+ return -1;
+}
+
+int off_cpu_write(struct perf_session *session)
+{
+ int bytes = 0, size;
+ int fd, stack;
+ u64 sample_type, val, sid = 0;
+ struct evsel *evsel;
+ struct perf_data_file *file = &session->data->file;
+ struct off_cpu_key prev, key;
+ union off_cpu_data data = {
+ .hdr = {
+ .type = PERF_RECORD_SAMPLE,
+ .misc = PERF_RECORD_MISC_USER,
+ },
+ };
+ u64 tstamp = OFF_CPU_TIMESTAMP;
+
+ skel->bss->enabled = 0;
+
+ evsel = evlist__find_evsel_by_str(session->evlist, OFFCPU_EVENT);
+ if (evsel == NULL) {
+ pr_err("%s evsel not found\n", OFFCPU_EVENT);
+ return 0;
+ }
+
+ sample_type = evsel->core.attr.sample_type;
+
+ if (sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER)) {
+ if (evsel->core.id)
+ sid = evsel->core.id[0];
+ }
+
+ fd = bpf_map__fd(skel->maps.off_cpu);
+ stack = bpf_map__fd(skel->maps.stacks);
+ memset(&prev, 0, sizeof(prev));
+
+ while (!bpf_map_get_next_key(fd, &prev, &key)) {
+ int n = 1; /* start from perf_event_header */
+ int ip_pos = -1;
+
+ bpf_map_lookup_elem(fd, &key, &val);
+
+ if (sample_type & PERF_SAMPLE_IDENTIFIER)
+ data.array[n++] = sid;
+ if (sample_type & PERF_SAMPLE_IP) {
+ ip_pos = n;
+ data.array[n++] = 0; /* will be updated */
+ }
+ if (sample_type & PERF_SAMPLE_TID)
+ data.array[n++] = (u64)key.pid << 32 | key.tgid;
+ if (sample_type & PERF_SAMPLE_TIME)
+ data.array[n++] = tstamp;
+ if (sample_type & PERF_SAMPLE_ID)
+ data.array[n++] = sid;
+ if (sample_type & PERF_SAMPLE_CPU)
+ data.array[n++] = 0;
+ if (sample_type & PERF_SAMPLE_PERIOD)
+ data.array[n++] = val;
+ if (sample_type & PERF_SAMPLE_CALLCHAIN) {
+ int len = 0;
+
+ /* data.array[n] is callchain->nr (updated later) */
+ data.array[n + 1] = PERF_CONTEXT_USER;
+ data.array[n + 2] = 0;
+
+ bpf_map_lookup_elem(stack, &key.stack_id, &data.array[n + 2]);
+ while (data.array[n + 2 + len])
+ len++;
+
+ /* update length of callchain */
+ data.array[n] = len + 1;
+
+ /* update sample ip with the first callchain entry */
+ if (ip_pos >= 0)
+ data.array[ip_pos] = data.array[n + 2];
+
+ /* calculate sample callchain data array length */
+ n += len + 2;
+ }
+ if (sample_type & PERF_SAMPLE_CGROUP)
+ data.array[n++] = key.cgroup_id;
+ /* TODO: handle more sample types */
+
+ size = n * sizeof(u64);
+ data.hdr.size = size;
+ bytes += size;
+
+ if (perf_data_file__write(file, &data, size) < 0) {
+ pr_err("failed to write perf data, error: %m\n");
+ return bytes;
+ }
+
+ prev = key;
+ /* increase dummy timestamp to sort later samples */
+ tstamp++;
+ }
+ return bytes;
+}
diff --git a/tools/perf/util/bpf_skel/off_cpu.bpf.c b/tools/perf/util/bpf_skel/off_cpu.bpf.c
new file mode 100644
index 000000000000..792ae2847080
--- /dev/null
+++ b/tools/perf/util/bpf_skel/off_cpu.bpf.c
@@ -0,0 +1,229 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+// Copyright (c) 2022 Google
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+
+/* task->flags for off-cpu analysis */
+#define PF_KTHREAD 0x00200000 /* I am a kernel thread */
+
+/* task->state for off-cpu analysis */
+#define TASK_INTERRUPTIBLE 0x0001
+#define TASK_UNINTERRUPTIBLE 0x0002
+
+#define MAX_STACKS 32
+#define MAX_ENTRIES 102400
+
+struct tstamp_data {
+ __u32 stack_id;
+ __u32 state;
+ __u64 timestamp;
+};
+
+struct offcpu_key {
+ __u32 pid;
+ __u32 tgid;
+ __u32 stack_id;
+ __u32 state;
+ __u64 cgroup_id;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_STACK_TRACE);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, MAX_STACKS * sizeof(__u64));
+ __uint(max_entries, MAX_ENTRIES);
+} stacks SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_TASK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, struct tstamp_data);
+} tstamp SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(key_size, sizeof(struct offcpu_key));
+ __uint(value_size, sizeof(__u64));
+ __uint(max_entries, MAX_ENTRIES);
+} off_cpu SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u8));
+ __uint(max_entries, 1);
+} cpu_filter SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u8));
+ __uint(max_entries, 1);
+} task_filter SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(key_size, sizeof(__u64));
+ __uint(value_size, sizeof(__u8));
+ __uint(max_entries, 1);
+} cgroup_filter SEC(".maps");
+
+/* old kernel task_struct definition */
+struct task_struct___old {
+ long state;
+} __attribute__((preserve_access_index));
+
+int enabled = 0;
+int has_cpu = 0;
+int has_task = 0;
+int has_cgroup = 0;
+
+const volatile bool has_prev_state = false;
+const volatile bool needs_cgroup = false;
+const volatile bool uses_cgroup_v1 = false;
+
+/*
+ * Old kernel used to call it task_struct->state and now it's '__state'.
+ * Use BPF CO-RE "ignored suffix rule" to deal with it like below:
+ *
+ * https://nakryiko.com/posts/bpf-core-reference-guide/#handling-incompatible-field-and-type-changes
+ */
+static inline int get_task_state(struct task_struct *t)
+{
+ if (bpf_core_field_exists(t->__state))
+ return BPF_CORE_READ(t, __state);
+
+ /* recast pointer to capture task_struct___old type for compiler */
+ struct task_struct___old *t_old = (void *)t;
+
+ /* now use old "state" name of the field */
+ return BPF_CORE_READ(t_old, state);
+}
+
+static inline __u64 get_cgroup_id(struct task_struct *t)
+{
+ struct cgroup *cgrp;
+
+ if (uses_cgroup_v1)
+ cgrp = BPF_CORE_READ(t, cgroups, subsys[perf_event_cgrp_id], cgroup);
+ else
+ cgrp = BPF_CORE_READ(t, cgroups, dfl_cgrp);
+
+ return BPF_CORE_READ(cgrp, kn, id);
+}
+
+static inline int can_record(struct task_struct *t, int state)
+{
+ /* kernel threads don't have user stack */
+ if (t->flags & PF_KTHREAD)
+ return 0;
+
+ if (state != TASK_INTERRUPTIBLE &&
+ state != TASK_UNINTERRUPTIBLE)
+ return 0;
+
+ if (has_cpu) {
+ __u32 cpu = bpf_get_smp_processor_id();
+ __u8 *ok;
+
+ ok = bpf_map_lookup_elem(&cpu_filter, &cpu);
+ if (!ok)
+ return 0;
+ }
+
+ if (has_task) {
+ __u8 *ok;
+ __u32 pid = t->pid;
+
+ ok = bpf_map_lookup_elem(&task_filter, &pid);
+ if (!ok)
+ return 0;
+ }
+
+ if (has_cgroup) {
+ __u8 *ok;
+ __u64 cgrp_id = get_cgroup_id(t);
+
+ ok = bpf_map_lookup_elem(&cgroup_filter, &cgrp_id);
+ if (!ok)
+ return 0;
+ }
+
+ return 1;
+}
+
+static int off_cpu_stat(u64 *ctx, struct task_struct *prev,
+ struct task_struct *next, int state)
+{
+ __u64 ts;
+ __u32 stack_id;
+ struct tstamp_data *pelem;
+
+ ts = bpf_ktime_get_ns();
+
+ if (!can_record(prev, state))
+ goto next;
+
+ stack_id = bpf_get_stackid(ctx, &stacks,
+ BPF_F_FAST_STACK_CMP | BPF_F_USER_STACK);
+
+ pelem = bpf_task_storage_get(&tstamp, prev, NULL,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (!pelem)
+ goto next;
+
+ pelem->timestamp = ts;
+ pelem->state = state;
+ pelem->stack_id = stack_id;
+
+next:
+ pelem = bpf_task_storage_get(&tstamp, next, NULL, 0);
+
+ if (pelem && pelem->timestamp) {
+ struct offcpu_key key = {
+ .pid = next->pid,
+ .tgid = next->tgid,
+ .stack_id = pelem->stack_id,
+ .state = pelem->state,
+ .cgroup_id = needs_cgroup ? get_cgroup_id(next) : 0,
+ };
+ __u64 delta = ts - pelem->timestamp;
+ __u64 *total;
+
+ total = bpf_map_lookup_elem(&off_cpu, &key);
+ if (total)
+ *total += delta;
+ else
+ bpf_map_update_elem(&off_cpu, &key, &delta, BPF_ANY);
+
+ /* prevent to reuse the timestamp later */
+ pelem->timestamp = 0;
+ }
+
+ return 0;
+}
+
+SEC("tp_btf/sched_switch")
+int on_switch(u64 *ctx)
+{
+ struct task_struct *prev, *next;
+ int prev_state;
+
+ if (!enabled)
+ return 0;
+
+ prev = (struct task_struct *)ctx[1];
+ next = (struct task_struct *)ctx[2];
+
+ if (has_prev_state)
+ prev_state = (int)ctx[3];
+ else
+ prev_state = get_task_state(prev);
+
+ return off_cpu_stat(ctx, prev, next, prev_state);
+}
+
+char LICENSE[] SEC("license") = "Dual BSD/GPL";
diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h
index 3a9fd4d389b5..97047a11282b 100644
--- a/tools/perf/util/dso.h
+++ b/tools/perf/util/dso.h
@@ -196,7 +196,9 @@ struct dso {
u32 status_seen;
u64 file_size;
struct list_head open_entry;
+ u64 elf_base_addr;
u64 debug_frame_offset;
+ u64 eh_frame_hdr_addr;
u64 eh_frame_hdr_offset;
} data;
/* bpf prog information */
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 7f9f588e88c6..48af7d379d82 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -242,14 +242,20 @@ int __evlist__add_default(struct evlist *evlist, bool precise)
return 0;
}
-int evlist__add_dummy(struct evlist *evlist)
+static struct evsel *evlist__dummy_event(struct evlist *evlist)
{
struct perf_event_attr attr = {
.type = PERF_TYPE_SOFTWARE,
.config = PERF_COUNT_SW_DUMMY,
.size = sizeof(attr), /* to capture ABI version */
};
- struct evsel *evsel = evsel__new_idx(&attr, evlist->core.nr_entries);
+
+ return evsel__new_idx(&attr, evlist->core.nr_entries);
+}
+
+int evlist__add_dummy(struct evlist *evlist)
+{
+ struct evsel *evsel = evlist__dummy_event(evlist);
if (evsel == NULL)
return -ENOMEM;
@@ -258,6 +264,51 @@ int evlist__add_dummy(struct evlist *evlist)
return 0;
}
+static void evlist__add_on_all_cpus(struct evlist *evlist, struct evsel *evsel)
+{
+ evsel->core.system_wide = true;
+
+ /*
+ * All CPUs.
+ *
+ * Note perf_event_open() does not accept CPUs that are not online, so
+ * in fact this CPU list will include only all online CPUs.
+ */
+ perf_cpu_map__put(evsel->core.own_cpus);
+ evsel->core.own_cpus = perf_cpu_map__new(NULL);
+ perf_cpu_map__put(evsel->core.cpus);
+ evsel->core.cpus = perf_cpu_map__get(evsel->core.own_cpus);
+
+ /* No threads */
+ perf_thread_map__put(evsel->core.threads);
+ evsel->core.threads = perf_thread_map__new_dummy();
+
+ evlist__add(evlist, evsel);
+}
+
+struct evsel *evlist__add_aux_dummy(struct evlist *evlist, bool system_wide)
+{
+ struct evsel *evsel = evlist__dummy_event(evlist);
+
+ if (!evsel)
+ return NULL;
+
+ evsel->core.attr.exclude_kernel = 1;
+ evsel->core.attr.exclude_guest = 1;
+ evsel->core.attr.exclude_hv = 1;
+ evsel->core.attr.freq = 0;
+ evsel->core.attr.sample_period = 1;
+ evsel->no_aux_samples = true;
+ evsel->name = strdup("dummy:u");
+
+ if (system_wide)
+ evlist__add_on_all_cpus(evlist, evsel);
+ else
+ evlist__add(evlist, evsel);
+
+ return evsel;
+}
+
static int evlist__add_attrs(struct evlist *evlist, struct perf_event_attr *attrs, size_t nr_attrs)
{
struct evsel *evsel, *n;
@@ -747,15 +798,15 @@ static struct mmap *evlist__alloc_mmap(struct evlist *evlist,
static void
perf_evlist__mmap_cb_idx(struct perf_evlist *_evlist,
- struct perf_evsel *_evsel __maybe_unused,
+ struct perf_evsel *_evsel,
struct perf_mmap_param *_mp,
int idx)
{
struct evlist *evlist = container_of(_evlist, struct evlist, core);
struct mmap_params *mp = container_of(_mp, struct mmap_params, core);
- bool per_cpu = !perf_cpu_map__empty(_evlist->user_requested_cpus);
+ struct evsel *evsel = container_of(_evsel, struct evsel, core);
- auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, idx, per_cpu);
+ auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, evsel, idx);
}
static struct perf_mmap*
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index 4062f5aebfc1..1bde9ccf4e7d 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -114,6 +114,11 @@ int arch_evlist__add_default_attrs(struct evlist *evlist);
struct evsel *arch_evlist__leader(struct list_head *list);
int evlist__add_dummy(struct evlist *evlist);
+struct evsel *evlist__add_aux_dummy(struct evlist *evlist, bool system_wide);
+static inline struct evsel *evlist__add_dummy_on_all_cpus(struct evlist *evlist)
+{
+ return evlist__add_aux_dummy(evlist, true);
+}
int evlist__add_sb_event(struct evlist *evlist, struct perf_event_attr *attr,
evsel__sb_cb_t cb, void *data);
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index ef169ad15236..ce499c5da8d7 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -296,8 +296,8 @@ struct evsel *evsel__new_idx(struct perf_event_attr *attr, int idx)
return NULL;
evsel__init(evsel, attr, idx);
- if (evsel__is_bpf_output(evsel)) {
- evsel->core.attr.sample_type |= (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
+ if (evsel__is_bpf_output(evsel) && !attr->sample_type) {
+ evsel->core.attr.sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD),
evsel->core.attr.sample_period = 1;
}
@@ -409,6 +409,7 @@ struct evsel *evsel__clone(struct evsel *orig)
evsel->core.threads = perf_thread_map__get(orig->core.threads);
evsel->core.nr_members = orig->core.nr_members;
evsel->core.system_wide = orig->core.system_wide;
+ evsel->core.requires_cpu = orig->core.requires_cpu;
if (orig->name) {
evsel->name = strdup(orig->name);
@@ -896,7 +897,7 @@ static void __evsel__config_callchain(struct evsel *evsel, struct record_opts *o
"specifying a subset with --user-regs may render DWARF unwinding unreliable, "
"so the minimal registers set (IP, SP) is explicitly forced.\n");
} else {
- attr->sample_regs_user |= PERF_REGS_MASK;
+ attr->sample_regs_user |= arch__user_reg_mask();
}
attr->sample_stack_user = param->dump_size;
attr->exclude_callchain_user = 1;
diff --git a/tools/perf/util/libunwind/arm64.c b/tools/perf/util/libunwind/arm64.c
index 15f60fd09424..014d82159656 100644
--- a/tools/perf/util/libunwind/arm64.c
+++ b/tools/perf/util/libunwind/arm64.c
@@ -24,7 +24,7 @@
#include "unwind.h"
#include "libunwind-aarch64.h"
#define perf_event_arm_regs perf_event_arm64_regs
-#include <../../../../arch/arm64/include/uapi/asm/perf_regs.h>
+#include <../../../arch/arm64/include/uapi/asm/perf_regs.h>
#undef perf_event_arm_regs
#include "../../arch/arm64/util/unwind-libunwind.c"
diff --git a/tools/perf/util/mmap.c b/tools/perf/util/mmap.c
index 50502b4a7ca4..a4dff881be39 100644
--- a/tools/perf/util/mmap.c
+++ b/tools/perf/util/mmap.c
@@ -62,8 +62,8 @@ void __weak auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp __maybe_u
void __weak auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp __maybe_unused,
struct evlist *evlist __maybe_unused,
- int idx __maybe_unused,
- bool per_cpu __maybe_unused)
+ struct evsel *evsel __maybe_unused,
+ int idx __maybe_unused)
{
}
diff --git a/tools/perf/util/off_cpu.h b/tools/perf/util/off_cpu.h
new file mode 100644
index 000000000000..548008f74d42
--- /dev/null
+++ b/tools/perf/util/off_cpu.h
@@ -0,0 +1,29 @@
+#ifndef PERF_UTIL_OFF_CPU_H
+#define PERF_UTIL_OFF_CPU_H
+
+struct evlist;
+struct target;
+struct perf_session;
+struct record_opts;
+
+#define OFFCPU_EVENT "offcpu-time"
+
+#ifdef HAVE_BPF_SKEL
+int off_cpu_prepare(struct evlist *evlist, struct target *target,
+ struct record_opts *opts);
+int off_cpu_write(struct perf_session *session);
+#else
+static inline int off_cpu_prepare(struct evlist *evlist __maybe_unused,
+ struct target *target __maybe_unused,
+ struct record_opts *opts __maybe_unused)
+{
+ return -1;
+}
+
+static inline int off_cpu_write(struct perf_session *session __maybe_unused)
+{
+ return -1;
+}
+#endif
+
+#endif /* PERF_UTIL_OFF_CPU_H */
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 30a9d915853d..7ed235740431 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -365,7 +365,7 @@ __add_event(struct list_head *list, int *idx,
(*idx)++;
evsel->core.cpus = cpus;
evsel->core.own_cpus = perf_cpu_map__get(cpus);
- evsel->core.system_wide = pmu ? pmu->is_uncore : false;
+ evsel->core.requires_cpu = pmu ? pmu->is_uncore : false;
evsel->auto_merge_stats = auto_merge_stats;
if (name)
diff --git a/tools/perf/util/perf_regs.c b/tools/perf/util/perf_regs.c
index a982e40ee5a9..872dd3d38782 100644
--- a/tools/perf/util/perf_regs.c
+++ b/tools/perf/util/perf_regs.c
@@ -103,6 +103,8 @@ static const char *__perf_reg_name_arm64(int id)
return "lr";
case PERF_REG_ARM64_PC:
return "pc";
+ case PERF_REG_ARM64_VG:
+ return "vg";
default:
return NULL;
}
diff --git a/tools/perf/util/python-ext-sources b/tools/perf/util/python-ext-sources
index a685d20165f7..aa5156c2bcff 100644
--- a/tools/perf/util/python-ext-sources
+++ b/tools/perf/util/python-ext-sources
@@ -38,5 +38,6 @@ util/units.c
util/affinity.c
util/rwsem.c
util/hashmap.c
+util/perf_regs.c
util/pmu-hybrid.c
util/fncache.c
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index 659eb4e4b34b..adba01b7d9dd 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -755,12 +755,22 @@ static void set_regs_in_dict(PyObject *dict,
}
static void set_sym_in_dict(PyObject *dict, struct addr_location *al,
- const char *dso_field, const char *sym_field,
- const char *symoff_field)
+ const char *dso_field, const char *dso_bid_field,
+ const char *dso_map_start, const char *dso_map_end,
+ const char *sym_field, const char *symoff_field)
{
+ char sbuild_id[SBUILD_ID_SIZE];
+
if (al->map) {
pydict_set_item_string_decref(dict, dso_field,
_PyUnicode_FromString(al->map->dso->name));
+ build_id__sprintf(&al->map->dso->bid, sbuild_id);
+ pydict_set_item_string_decref(dict, dso_bid_field,
+ _PyUnicode_FromString(sbuild_id));
+ pydict_set_item_string_decref(dict, dso_map_start,
+ PyLong_FromUnsignedLong(al->map->start));
+ pydict_set_item_string_decref(dict, dso_map_end,
+ PyLong_FromUnsignedLong(al->map->end));
}
if (al->sym) {
pydict_set_item_string_decref(dict, sym_field,
@@ -840,7 +850,8 @@ static PyObject *get_perf_sample_dict(struct perf_sample *sample,
(const char *)sample->raw_data, sample->raw_size));
pydict_set_item_string_decref(dict, "comm",
_PyUnicode_FromString(thread__comm_str(al->thread)));
- set_sym_in_dict(dict, al, "dso", "symbol", "symoff");
+ set_sym_in_dict(dict, al, "dso", "dso_bid", "dso_map_start", "dso_map_end",
+ "symbol", "symoff");
pydict_set_item_string_decref(dict, "callchain", callchain);
@@ -856,7 +867,9 @@ static PyObject *get_perf_sample_dict(struct perf_sample *sample,
if (addr_al) {
pydict_set_item_string_decref(dict_sample, "addr_correlates_sym",
PyBool_FromLong(1));
- set_sym_in_dict(dict_sample, addr_al, "addr_dso", "addr_symbol", "addr_symoff");
+ set_sym_in_dict(dict_sample, addr_al, "addr_dso", "addr_dso_bid",
+ "addr_dso_map_start", "addr_dso_map_end",
+ "addr_symbol", "addr_symoff");
}
if (sample->flags)
diff --git a/tools/perf/util/unwind-libunwind-local.c b/tools/perf/util/unwind-libunwind-local.c
index 41e29fc7648a..37622699c91a 100644
--- a/tools/perf/util/unwind-libunwind-local.c
+++ b/tools/perf/util/unwind-libunwind-local.c
@@ -169,29 +169,63 @@ static int __dw_read_encoded_value(u8 **p, u8 *end, u64 *val,
__v; \
})
-static u64 elf_section_offset(int fd, const char *name)
+static int elf_section_address_and_offset(int fd, const char *name, u64 *address, u64 *offset)
{
Elf *elf;
GElf_Ehdr ehdr;
GElf_Shdr shdr;
- u64 offset = 0;
+ int ret;
elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
if (elf == NULL)
+ return -1;
+
+ if (gelf_getehdr(elf, &ehdr) == NULL)
+ goto out_err;
+
+ if (!elf_section_by_name(elf, &ehdr, &shdr, name, NULL))
+ goto out_err;
+
+ *address = shdr.sh_addr;
+ *offset = shdr.sh_offset;
+ ret = 0;
+out_err:
+ elf_end(elf);
+ return ret;
+}
+
+#ifndef NO_LIBUNWIND_DEBUG_FRAME
+static u64 elf_section_offset(int fd, const char *name)
+{
+ u64 address, offset;
+
+ if (elf_section_address_and_offset(fd, name, &address, &offset))
return 0;
- do {
- if (gelf_getehdr(elf, &ehdr) == NULL)
- break;
+ return offset;
+}
+#endif
- if (!elf_section_by_name(elf, &ehdr, &shdr, name, NULL))
- break;
+static u64 elf_base_address(int fd)
+{
+ Elf *elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
+ GElf_Phdr phdr;
+ u64 retval = 0;
+ size_t i, phdrnum = 0;
- offset = shdr.sh_offset;
- } while (0);
+ if (elf == NULL)
+ return 0;
+ (void)elf_getphdrnum(elf, &phdrnum);
+ /* PT_LOAD segments are sorted by p_vaddr, so the first has the minimum p_vaddr. */
+ for (i = 0; i < phdrnum; i++) {
+ if (gelf_getphdr(elf, i, &phdr) && phdr.p_type == PT_LOAD) {
+ retval = phdr.p_vaddr & -getpagesize();
+ break;
+ }
+ }
elf_end(elf);
- return offset;
+ return retval;
}
#ifndef NO_LIBUNWIND_DEBUG_FRAME
@@ -248,8 +282,7 @@ struct eh_frame_hdr {
} __packed;
static int unwind_spec_ehframe(struct dso *dso, struct machine *machine,
- u64 offset, u64 *table_data, u64 *segbase,
- u64 *fde_count)
+ u64 offset, u64 *table_data_offset, u64 *fde_count)
{
struct eh_frame_hdr hdr;
u8 *enc = (u8 *) &hdr.enc;
@@ -265,35 +298,47 @@ static int unwind_spec_ehframe(struct dso *dso, struct machine *machine,
dw_read_encoded_value(enc, end, hdr.eh_frame_ptr_enc);
*fde_count = dw_read_encoded_value(enc, end, hdr.fde_count_enc);
- *segbase = offset;
- *table_data = (enc - (u8 *) &hdr) + offset;
+ *table_data_offset = enc - (u8 *) &hdr;
return 0;
}
-static int read_unwind_spec_eh_frame(struct dso *dso, struct machine *machine,
+static int read_unwind_spec_eh_frame(struct dso *dso, struct unwind_info *ui,
u64 *table_data, u64 *segbase,
u64 *fde_count)
{
- int ret = -EINVAL, fd;
- u64 offset = dso->data.eh_frame_hdr_offset;
+ struct map *map;
+ u64 base_addr = UINT64_MAX;
+ int ret, fd;
- if (offset == 0) {
- fd = dso__data_get_fd(dso, machine);
+ if (dso->data.eh_frame_hdr_offset == 0) {
+ fd = dso__data_get_fd(dso, ui->machine);
if (fd < 0)
return -EINVAL;
/* Check the .eh_frame section for unwinding info */
- offset = elf_section_offset(fd, ".eh_frame_hdr");
- dso->data.eh_frame_hdr_offset = offset;
+ ret = elf_section_address_and_offset(fd, ".eh_frame_hdr",
+ &dso->data.eh_frame_hdr_addr,
+ &dso->data.eh_frame_hdr_offset);
+ dso->data.elf_base_addr = elf_base_address(fd);
dso__data_put_fd(dso);
+ if (ret || dso->data.eh_frame_hdr_offset == 0)
+ return -EINVAL;
}
- if (offset)
- ret = unwind_spec_ehframe(dso, machine, offset,
- table_data, segbase,
- fde_count);
-
- return ret;
+ maps__for_each_entry(ui->thread->maps, map) {
+ if (map->dso == dso && map->start < base_addr)
+ base_addr = map->start;
+ }
+ base_addr -= dso->data.elf_base_addr;
+ /* Address of .eh_frame_hdr */
+ *segbase = base_addr + dso->data.eh_frame_hdr_addr;
+ ret = unwind_spec_ehframe(dso, ui->machine, dso->data.eh_frame_hdr_offset,
+ table_data, fde_count);
+ if (ret)
+ return ret;
+ /* binary_search_table offset plus .eh_frame_hdr address */
+ *table_data += *segbase;
+ return 0;
}
#ifndef NO_LIBUNWIND_DEBUG_FRAME
@@ -388,14 +433,14 @@ find_proc_info(unw_addr_space_t as, unw_word_t ip, unw_proc_info_t *pi,
pr_debug("unwind: find_proc_info dso %s\n", map->dso->name);
/* Check the .eh_frame section for unwinding info */
- if (!read_unwind_spec_eh_frame(map->dso, ui->machine,
+ if (!read_unwind_spec_eh_frame(map->dso, ui,
&table_data, &segbase, &fde_count)) {
memset(&di, 0, sizeof(di));
di.format = UNW_INFO_FORMAT_REMOTE_TABLE;
di.start_ip = map->start;
di.end_ip = map->end;
- di.u.rti.segbase = map->start + segbase - map->pgoff;
- di.u.rti.table_data = map->start + table_data - map->pgoff;
+ di.u.rti.segbase = segbase;
+ di.u.rti.table_data = table_data;
di.u.rti.table_len = fde_count * sizeof(struct table_entry)
/ sizeof(unw_word_t);
ret = dwarf_search_unwind_table(as, ip, &di, pi,
diff --git a/tools/testing/crypto/chacha20-s390/Makefile b/tools/testing/crypto/chacha20-s390/Makefile
new file mode 100644
index 000000000000..db81cd2fb9c5
--- /dev/null
+++ b/tools/testing/crypto/chacha20-s390/Makefile
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright (C) 2022 Red Hat, Inc.
+# Author: Vladis Dronov <vdronoff@gmail.com>
+
+obj-m += test_cipher.o
+test_cipher-y := test-cipher.o
+
+all:
+ make -C /lib/modules/$(shell uname -r)/build/ M=$(PWD) modules
+clean:
+ make -C /lib/modules/$(shell uname -r)/build/ M=$(PWD) clean
diff --git a/tools/testing/crypto/chacha20-s390/run-tests.sh b/tools/testing/crypto/chacha20-s390/run-tests.sh
new file mode 100644
index 000000000000..43108794b996
--- /dev/null
+++ b/tools/testing/crypto/chacha20-s390/run-tests.sh
@@ -0,0 +1,34 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright (C) 2022 Red Hat, Inc.
+# Author: Vladis Dronov <vdronoff@gmail.com>
+#
+# This script runs (via instmod) test-cipher.ko module which invokes
+# generic and s390-native ChaCha20 encryprion algorithms with different
+# size of data. Check 'dmesg' for results.
+#
+# The insmod error is expected:
+# insmod: ERROR: could not insert module test_cipher.ko: Operation not permitted
+
+lsmod | grep chacha | cut -f1 -d' ' | xargs rmmod
+modprobe chacha_generic
+modprobe chacha_s390
+
+# run encryption for different data size, including whole block(s) +/- 1
+insmod test_cipher.ko size=63
+insmod test_cipher.ko size=64
+insmod test_cipher.ko size=65
+insmod test_cipher.ko size=127
+insmod test_cipher.ko size=128
+insmod test_cipher.ko size=129
+insmod test_cipher.ko size=511
+insmod test_cipher.ko size=512
+insmod test_cipher.ko size=513
+insmod test_cipher.ko size=4096
+insmod test_cipher.ko size=65611
+insmod test_cipher.ko size=6291456
+insmod test_cipher.ko size=62914560
+
+# print test logs
+dmesg | tail -170
diff --git a/tools/testing/crypto/chacha20-s390/test-cipher.c b/tools/testing/crypto/chacha20-s390/test-cipher.c
new file mode 100644
index 000000000000..34e8b855266f
--- /dev/null
+++ b/tools/testing/crypto/chacha20-s390/test-cipher.c
@@ -0,0 +1,372 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2022 Red Hat, Inc.
+ * Author: Vladis Dronov <vdronoff@gmail.com>
+ */
+
+#include <asm/elf.h>
+#include <asm/uaccess.h>
+#include <asm/smp.h>
+#include <crypto/skcipher.h>
+#include <crypto/akcipher.h>
+#include <crypto/acompress.h>
+#include <crypto/rng.h>
+#include <crypto/drbg.h>
+#include <crypto/kpp.h>
+#include <crypto/internal/simd.h>
+#include <crypto/chacha.h>
+#include <crypto/aead.h>
+#include <crypto/hash.h>
+#include <linux/crypto.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/fips.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/scatterlist.h>
+#include <linux/time.h>
+#include <linux/vmalloc.h>
+#include <linux/zlib.h>
+#include <linux/once.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+static unsigned int data_size __read_mostly = 256;
+static unsigned int debug __read_mostly = 0;
+
+/* tie all skcipher structures together */
+struct skcipher_def {
+ struct scatterlist sginp, sgout;
+ struct crypto_skcipher *tfm;
+ struct skcipher_request *req;
+ struct crypto_wait wait;
+};
+
+/* Perform cipher operations with the chacha lib */
+static int test_lib_chacha(u8 *revert, u8 *cipher, u8 *plain)
+{
+ u32 chacha_state[CHACHA_STATE_WORDS];
+ u8 iv[16], key[32];
+ u64 start, end;
+
+ memset(key, 'X', sizeof(key));
+ memset(iv, 'I', sizeof(iv));
+
+ if (debug) {
+ print_hex_dump(KERN_INFO, "key: ", DUMP_PREFIX_OFFSET,
+ 16, 1, key, 32, 1);
+
+ print_hex_dump(KERN_INFO, "iv: ", DUMP_PREFIX_OFFSET,
+ 16, 1, iv, 16, 1);
+ }
+
+ /* Encrypt */
+ chacha_init_arch(chacha_state, (u32*)key, iv);
+
+ start = ktime_get_ns();
+ chacha_crypt_arch(chacha_state, cipher, plain, data_size, 20);
+ end = ktime_get_ns();
+
+
+ if (debug)
+ print_hex_dump(KERN_INFO, "encr:", DUMP_PREFIX_OFFSET,
+ 16, 1, cipher,
+ (data_size > 64 ? 64 : data_size), 1);
+
+ pr_info("lib encryption took: %lld nsec", end - start);
+
+ /* Decrypt */
+ chacha_init_arch(chacha_state, (u32 *)key, iv);
+
+ start = ktime_get_ns();
+ chacha_crypt_arch(chacha_state, revert, cipher, data_size, 20);
+ end = ktime_get_ns();
+
+ if (debug)
+ print_hex_dump(KERN_INFO, "decr:", DUMP_PREFIX_OFFSET,
+ 16, 1, revert,
+ (data_size > 64 ? 64 : data_size), 1);
+
+ pr_info("lib decryption took: %lld nsec", end - start);
+
+ return 0;
+}
+
+/* Perform cipher operations with skcipher */
+static unsigned int test_skcipher_encdec(struct skcipher_def *sk,
+ int enc)
+{
+ int rc;
+
+ if (enc) {
+ rc = crypto_wait_req(crypto_skcipher_encrypt(sk->req),
+ &sk->wait);
+ if (rc)
+ pr_info("skcipher encrypt returned with result"
+ "%d\n", rc);
+ }
+ else
+ {
+ rc = crypto_wait_req(crypto_skcipher_decrypt(sk->req),
+ &sk->wait);
+ if (rc)
+ pr_info("skcipher decrypt returned with result"
+ "%d\n", rc);
+ }
+
+ return rc;
+}
+
+/* Initialize and trigger cipher operations */
+static int test_skcipher(char *name, u8 *revert, u8 *cipher, u8 *plain)
+{
+ struct skcipher_def sk;
+ struct crypto_skcipher *skcipher = NULL;
+ struct skcipher_request *req = NULL;
+ u8 iv[16], key[32];
+ u64 start, end;
+ int ret = -EFAULT;
+
+ skcipher = crypto_alloc_skcipher(name, 0, 0);
+ if (IS_ERR(skcipher)) {
+ pr_info("could not allocate skcipher %s handle\n", name);
+ return PTR_ERR(skcipher);
+ }
+
+ req = skcipher_request_alloc(skcipher, GFP_KERNEL);
+ if (!req) {
+ pr_info("could not allocate skcipher request\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ crypto_req_done,
+ &sk.wait);
+
+ memset(key, 'X', sizeof(key));
+ memset(iv, 'I', sizeof(iv));
+
+ if (crypto_skcipher_setkey(skcipher, key, 32)) {
+ pr_info("key could not be set\n");
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ if (debug) {
+ print_hex_dump(KERN_INFO, "key: ", DUMP_PREFIX_OFFSET,
+ 16, 1, key, 32, 1);
+
+ print_hex_dump(KERN_INFO, "iv: ", DUMP_PREFIX_OFFSET,
+ 16, 1, iv, 16, 1);
+ }
+
+ sk.tfm = skcipher;
+ sk.req = req;
+
+ /* Encrypt in one pass */
+ sg_init_one(&sk.sginp, plain, data_size);
+ sg_init_one(&sk.sgout, cipher, data_size);
+ skcipher_request_set_crypt(req, &sk.sginp, &sk.sgout,
+ data_size, iv);
+ crypto_init_wait(&sk.wait);
+
+ /* Encrypt data */
+ start = ktime_get_ns();
+ ret = test_skcipher_encdec(&sk, 1);
+ end = ktime_get_ns();
+
+ if (ret)
+ goto out;
+
+ pr_info("%s tfm encryption successful, took %lld nsec\n", name, end - start);
+
+ if (debug)
+ print_hex_dump(KERN_INFO, "encr:", DUMP_PREFIX_OFFSET,
+ 16, 1, cipher,
+ (data_size > 64 ? 64 : data_size), 1);
+
+ /* Prepare for decryption */
+ memset(iv, 'I', sizeof(iv));
+
+ sg_init_one(&sk.sginp, cipher, data_size);
+ sg_init_one(&sk.sgout, revert, data_size);
+ skcipher_request_set_crypt(req, &sk.sginp, &sk.sgout,
+ data_size, iv);
+ crypto_init_wait(&sk.wait);
+
+ /* Decrypt data */
+ start = ktime_get_ns();
+ ret = test_skcipher_encdec(&sk, 0);
+ end = ktime_get_ns();
+
+ if (ret)
+ goto out;
+
+ pr_info("%s tfm decryption successful, took %lld nsec\n", name, end - start);
+
+ if (debug)
+ print_hex_dump(KERN_INFO, "decr:", DUMP_PREFIX_OFFSET,
+ 16, 1, revert,
+ (data_size > 64 ? 64 : data_size), 1);
+
+ /* Dump some internal skcipher data */
+ if (debug)
+ pr_info("skcipher %s: cryptlen %d blksize %d stride %d "
+ "ivsize %d alignmask 0x%x\n",
+ name, sk.req->cryptlen,
+ crypto_skcipher_blocksize(sk.tfm),
+ crypto_skcipher_alg(sk.tfm)->walksize,
+ crypto_skcipher_ivsize(sk.tfm),
+ crypto_skcipher_alignmask(sk.tfm));
+
+out:
+ if (skcipher)
+ crypto_free_skcipher(skcipher);
+ if (req)
+ skcipher_request_free(req);
+ return ret;
+}
+
+static int __init chacha_s390_test_init(void)
+{
+ u8 *plain = NULL, *revert = NULL;
+ u8 *cipher_generic = NULL, *cipher_s390 = NULL;
+ int ret = -1;
+
+ pr_info("s390 ChaCha20 test module: size=%d debug=%d\n",
+ data_size, debug);
+
+ /* Allocate and fill buffers */
+ plain = vmalloc(data_size);
+ if (!plain) {
+ pr_info("could not allocate plain buffer\n");
+ ret = -2;
+ goto out;
+ }
+ memset(plain, 'a', data_size);
+ get_random_bytes(plain, (data_size > 256 ? 256 : data_size));
+
+ cipher_generic = vmalloc(data_size);
+ if (!cipher_generic) {
+ pr_info("could not allocate cipher_generic buffer\n");
+ ret = -2;
+ goto out;
+ }
+ memset(cipher_generic, 0, data_size);
+
+ cipher_s390 = vmalloc(data_size);
+ if (!cipher_s390) {
+ pr_info("could not allocate cipher_s390 buffer\n");
+ ret = -2;
+ goto out;
+ }
+ memset(cipher_s390, 0, data_size);
+
+ revert = vmalloc(data_size);
+ if (!revert) {
+ pr_info("could not allocate revert buffer\n");
+ ret = -2;
+ goto out;
+ }
+ memset(revert, 0, data_size);
+
+ if (debug)
+ print_hex_dump(KERN_INFO, "src: ", DUMP_PREFIX_OFFSET,
+ 16, 1, plain,
+ (data_size > 64 ? 64 : data_size), 1);
+
+ /* Use chacha20 generic */
+ ret = test_skcipher("chacha20-generic", revert, cipher_generic, plain);
+ if (ret)
+ goto out;
+
+ if (memcmp(plain, revert, data_size)) {
+ pr_info("generic en/decryption check FAILED\n");
+ ret = -2;
+ goto out;
+ }
+ else
+ pr_info("generic en/decryption check OK\n");
+
+ memset(revert, 0, data_size);
+
+ /* Use chacha20 s390 */
+ ret = test_skcipher("chacha20-s390", revert, cipher_s390, plain);
+ if (ret)
+ goto out;
+
+ if (memcmp(plain, revert, data_size)) {
+ pr_info("s390 en/decryption check FAILED\n");
+ ret = -2;
+ goto out;
+ }
+ else
+ pr_info("s390 en/decryption check OK\n");
+
+ if (memcmp(cipher_generic, cipher_s390, data_size)) {
+ pr_info("s390 vs generic check FAILED\n");
+ ret = -2;
+ goto out;
+ }
+ else
+ pr_info("s390 vs generic check OK\n");
+
+ memset(cipher_s390, 0, data_size);
+ memset(revert, 0, data_size);
+
+ /* Use chacha20 lib */
+ test_lib_chacha(revert, cipher_s390, plain);
+
+ if (memcmp(plain, revert, data_size)) {
+ pr_info("lib en/decryption check FAILED\n");
+ ret = -2;
+ goto out;
+ }
+ else
+ pr_info("lib en/decryption check OK\n");
+
+ if (memcmp(cipher_generic, cipher_s390, data_size)) {
+ pr_info("lib vs generic check FAILED\n");
+ ret = -2;
+ goto out;
+ }
+ else
+ pr_info("lib vs generic check OK\n");
+
+ pr_info("--- chacha20 s390 test end ---\n");
+
+out:
+ if (plain)
+ vfree(plain);
+ if (cipher_generic)
+ vfree(cipher_generic);
+ if (cipher_s390)
+ vfree(cipher_s390);
+ if (revert)
+ vfree(revert);
+
+ return -1;
+}
+
+static void __exit chacha_s390_test_exit(void)
+{
+ pr_info("s390 ChaCha20 test module exit\n");
+}
+
+module_param_named(size, data_size, uint, 0660);
+module_param(debug, int, 0660);
+MODULE_PARM_DESC(size, "Size of a plaintext");
+MODULE_PARM_DESC(debug, "Debug level (0=off,1=on)");
+
+module_init(chacha_s390_test_init);
+module_exit(chacha_s390_test_exit);
+
+MODULE_DESCRIPTION("s390 ChaCha20 self-test");
+MODULE_AUTHOR("Vladis Dronov <vdronoff@gmail.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/tools/testing/cxl/Kbuild b/tools/testing/cxl/Kbuild
index 82e49ab0937d..33543231d453 100644
--- a/tools/testing/cxl/Kbuild
+++ b/tools/testing/cxl/Kbuild
@@ -8,6 +8,8 @@ ldflags-y += --wrap=devm_cxl_port_enumerate_dports
ldflags-y += --wrap=devm_cxl_setup_hdm
ldflags-y += --wrap=devm_cxl_add_passthrough_decoder
ldflags-y += --wrap=devm_cxl_enumerate_decoders
+ldflags-y += --wrap=cxl_await_media_ready
+ldflags-y += --wrap=cxl_hdm_decode_init
DRIVERS := ../../../drivers
CXL_SRC := $(DRIVERS)/cxl
@@ -34,7 +36,6 @@ cxl_port-y += config_check.o
obj-m += cxl_mem.o
cxl_mem-y := $(CXL_SRC)/mem.o
-cxl_mem-y += mock_mem.o
cxl_mem-y += config_check.o
obj-m += cxl_core.o
diff --git a/tools/testing/cxl/mock_mem.c b/tools/testing/cxl/mock_mem.c
deleted file mode 100644
index d1dec5845139..000000000000
--- a/tools/testing/cxl/mock_mem.c
+++ /dev/null
@@ -1,10 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright(c) 2022 Intel Corporation. All rights reserved. */
-
-#include <linux/types.h>
-
-struct cxl_dev_state;
-bool cxl_dvsec_decode_init(struct cxl_dev_state *cxlds)
-{
- return true;
-}
diff --git a/tools/testing/cxl/test/mem.c b/tools/testing/cxl/test/mem.c
index b6b726eff3e2..6b9239b2afd4 100644
--- a/tools/testing/cxl/test/mem.c
+++ b/tools/testing/cxl/test/mem.c
@@ -237,25 +237,11 @@ static int cxl_mock_mbox_send(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *
return rc;
}
-static int cxl_mock_wait_media_ready(struct cxl_dev_state *cxlds)
-{
- msleep(100);
- return 0;
-}
-
static void label_area_release(void *lsa)
{
vfree(lsa);
}
-static void mock_validate_dvsec_ranges(struct cxl_dev_state *cxlds)
-{
- struct cxl_endpoint_dvsec_info *info;
-
- info = &cxlds->info;
- info->mem_enabled = true;
-}
-
static int cxl_mock_mem_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -278,7 +264,6 @@ static int cxl_mock_mem_probe(struct platform_device *pdev)
cxlds->serial = pdev->id;
cxlds->mbox_send = cxl_mock_mbox_send;
- cxlds->wait_media_ready = cxl_mock_wait_media_ready;
cxlds->payload_size = SZ_4K;
rc = cxl_enumerate_cmds(cxlds);
@@ -293,8 +278,6 @@ static int cxl_mock_mem_probe(struct platform_device *pdev)
if (rc)
return rc;
- mock_validate_dvsec_ranges(cxlds);
-
cxlmd = devm_cxl_add_memdev(cxlds);
if (IS_ERR(cxlmd))
return PTR_ERR(cxlmd);
diff --git a/tools/testing/cxl/test/mock.c b/tools/testing/cxl/test/mock.c
index 6e8c9d63c92d..f1f8c40948c5 100644
--- a/tools/testing/cxl/test/mock.c
+++ b/tools/testing/cxl/test/mock.c
@@ -193,6 +193,35 @@ int __wrap_devm_cxl_port_enumerate_dports(struct cxl_port *port)
}
EXPORT_SYMBOL_NS_GPL(__wrap_devm_cxl_port_enumerate_dports, CXL);
+int __wrap_cxl_await_media_ready(struct cxl_dev_state *cxlds)
+{
+ int rc, index;
+ struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
+
+ if (ops && ops->is_mock_dev(cxlds->dev))
+ rc = 0;
+ else
+ rc = cxl_await_media_ready(cxlds);
+ put_cxl_mock_ops(index);
+
+ return rc;
+}
+EXPORT_SYMBOL_NS_GPL(__wrap_cxl_await_media_ready, CXL);
+
+bool __wrap_cxl_hdm_decode_init(struct cxl_dev_state *cxlds,
+ struct cxl_hdm *cxlhdm)
+{
+ int rc = 0, index;
+ struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
+
+ if (!ops || !ops->is_mock_dev(cxlds->dev))
+ rc = cxl_hdm_decode_init(cxlds, cxlhdm);
+ put_cxl_mock_ops(index);
+
+ return rc;
+}
+EXPORT_SYMBOL_NS_GPL(__wrap_cxl_hdm_decode_init, CXL);
+
MODULE_LICENSE("GPL v2");
MODULE_IMPORT_NS(ACPI);
MODULE_IMPORT_NS(CXL);
diff --git a/tools/testing/memblock/TODO b/tools/testing/memblock/TODO
index c25b2fdec45e..cd1a30d5acc9 100644
--- a/tools/testing/memblock/TODO
+++ b/tools/testing/memblock/TODO
@@ -23,6 +23,3 @@ TODO
5. Add tests for memblock_alloc_node() to check if the correct NUMA node is set
for the new region
-
-6. Update comments in tests/basic_api.c to match the style used in
- tests/alloc_*.c
diff --git a/tools/testing/memblock/tests/basic_api.c b/tools/testing/memblock/tests/basic_api.c
index fbc1ce160303..a7bc180316d6 100644
--- a/tools/testing/memblock/tests/basic_api.c
+++ b/tools/testing/memblock/tests/basic_api.c
@@ -26,8 +26,8 @@ static int memblock_initialization_check(void)
/*
* A simple test that adds a memory block of a specified base address
* and size to the collection of available memory regions (memblock.memory).
- * It checks if a new entry was created and if region counter and total memory
- * were correctly updated.
+ * Expect to create a new entry. The region counter and total memory get
+ * updated.
*/
static int memblock_add_simple_check(void)
{
@@ -53,10 +53,10 @@ static int memblock_add_simple_check(void)
}
/*
- * A simple test that adds a memory block of a specified base address, size
+ * A simple test that adds a memory block of a specified base address, size,
* NUMA node and memory flags to the collection of available memory regions.
- * It checks if the new entry, region counter and total memory size have
- * expected values.
+ * Expect to create a new entry. The region counter and total memory get
+ * updated.
*/
static int memblock_add_node_simple_check(void)
{
@@ -87,9 +87,15 @@ static int memblock_add_node_simple_check(void)
/*
* A test that tries to add two memory blocks that don't overlap with one
- * another. It checks if two correctly initialized entries were added to the
- * collection of available memory regions (memblock.memory) and if this
- * change was reflected in memblock.memory's total size and region counter.
+ * another:
+ *
+ * | +--------+ +--------+ |
+ * | | r1 | | r2 | |
+ * +--------+--------+--------+--------+--+
+ *
+ * Expect to add two correctly initialized entries to the collection of
+ * available memory regions (memblock.memory). The total size and
+ * region counter fields get updated.
*/
static int memblock_add_disjoint_check(void)
{
@@ -124,11 +130,21 @@ static int memblock_add_disjoint_check(void)
}
/*
- * A test that tries to add two memory blocks, where the second one overlaps
- * with the beginning of the first entry (that is r1.base < r2.base + r2.size).
- * After this, it checks if two entries are merged into one region that starts
- * at r2.base and has size of two regions minus their intersection. It also
- * verifies the reported total size of the available memory and region counter.
+ * A test that tries to add two memory blocks r1 and r2, where r2 overlaps
+ * with the beginning of r1 (that is r1.base < r2.base + r2.size):
+ *
+ * | +----+----+------------+ |
+ * | | |r2 | r1 | |
+ * +----+----+----+------------+----------+
+ * ^ ^
+ * | |
+ * | r1.base
+ * |
+ * r2.base
+ *
+ * Expect to merge the two entries into one region that starts at r2.base
+ * and has size of two regions minus their intersection. The total size of
+ * the available memory is updated, and the region counter stays the same.
*/
static int memblock_add_overlap_top_check(void)
{
@@ -162,12 +178,21 @@ static int memblock_add_overlap_top_check(void)
}
/*
- * A test that tries to add two memory blocks, where the second one overlaps
- * with the end of the first entry (that is r2.base < r1.base + r1.size).
- * After this, it checks if two entries are merged into one region that starts
- * at r1.base and has size of two regions minus their intersection. It verifies
- * that memblock can still see only one entry and has a correct total size of
- * the available memory.
+ * A test that tries to add two memory blocks r1 and r2, where r2 overlaps
+ * with the end of r1 (that is r2.base < r1.base + r1.size):
+ *
+ * | +--+------+----------+ |
+ * | | | r1 | r2 | |
+ * +--+--+------+----------+--------------+
+ * ^ ^
+ * | |
+ * | r2.base
+ * |
+ * r1.base
+ *
+ * Expect to merge the two entries into one region that starts at r1.base
+ * and has size of two regions minus their intersection. The total size of
+ * the available memory is updated, and the region counter stays the same.
*/
static int memblock_add_overlap_bottom_check(void)
{
@@ -201,11 +226,19 @@ static int memblock_add_overlap_bottom_check(void)
}
/*
- * A test that tries to add two memory blocks, where the second one is
- * within the range of the first entry (that is r1.base < r2.base &&
- * r2.base + r2.size < r1.base + r1.size). It checks if two entries are merged
- * into one region that stays the same. The counter and total size of available
- * memory are expected to not be updated.
+ * A test that tries to add two memory blocks r1 and r2, where r2 is
+ * within the range of r1 (that is r1.base < r2.base &&
+ * r2.base + r2.size < r1.base + r1.size):
+ *
+ * | +-------+--+-----------------------+
+ * | | |r2| r1 |
+ * +---+-------+--+-----------------------+
+ * ^
+ * |
+ * r1.base
+ *
+ * Expect to merge two entries into one region that stays the same.
+ * The counter and total size of available memory are not updated.
*/
static int memblock_add_within_check(void)
{
@@ -236,8 +269,8 @@ static int memblock_add_within_check(void)
}
/*
- * A simple test that tries to add the same memory block twice. The counter
- * and total size of available memory are expected to not be updated.
+ * A simple test that tries to add the same memory block twice. Expect
+ * the counter and total size of available memory to not be updated.
*/
static int memblock_add_twice_check(void)
{
@@ -270,12 +303,12 @@ static int memblock_add_checks(void)
return 0;
}
- /*
- * A simple test that marks a memory block of a specified base address
- * and size as reserved and to the collection of reserved memory regions
- * (memblock.reserved). It checks if a new entry was created and if region
- * counter and total memory size were correctly updated.
- */
+/*
+ * A simple test that marks a memory block of a specified base address
+ * and size as reserved and to the collection of reserved memory regions
+ * (memblock.reserved). Expect to create a new entry. The region counter
+ * and total memory size are updated.
+ */
static int memblock_reserve_simple_check(void)
{
struct memblock_region *rgn;
@@ -297,10 +330,15 @@ static int memblock_reserve_simple_check(void)
}
/*
- * A test that tries to mark two memory blocks that don't overlap as reserved
- * and checks if two entries were correctly added to the collection of reserved
- * memory regions (memblock.reserved) and if this change was reflected in
- * memblock.reserved's total size and region counter.
+ * A test that tries to mark two memory blocks that don't overlap as reserved:
+ *
+ * | +--+ +----------------+ |
+ * | |r1| | r2 | |
+ * +--------+--+------+----------------+--+
+ *
+ * Expect to add two entries to the collection of reserved memory regions
+ * (memblock.reserved). The total size and region counter for
+ * memblock.reserved are updated.
*/
static int memblock_reserve_disjoint_check(void)
{
@@ -335,13 +373,22 @@ static int memblock_reserve_disjoint_check(void)
}
/*
- * A test that tries to mark two memory blocks as reserved, where the
- * second one overlaps with the beginning of the first (that is
- * r1.base < r2.base + r2.size).
- * It checks if two entries are merged into one region that starts at r2.base
- * and has size of two regions minus their intersection. The test also verifies
- * that memblock can still see only one entry and has a correct total size of
- * the reserved memory.
+ * A test that tries to mark two memory blocks r1 and r2 as reserved,
+ * where r2 overlaps with the beginning of r1 (that is
+ * r1.base < r2.base + r2.size):
+ *
+ * | +--------------+--+--------------+ |
+ * | | r2 | | r1 | |
+ * +--+--------------+--+--------------+--+
+ * ^ ^
+ * | |
+ * | r1.base
+ * |
+ * r2.base
+ *
+ * Expect to merge two entries into one region that starts at r2.base and
+ * has size of two regions minus their intersection. The total size of the
+ * reserved memory is updated, and the region counter is not updated.
*/
static int memblock_reserve_overlap_top_check(void)
{
@@ -375,13 +422,22 @@ static int memblock_reserve_overlap_top_check(void)
}
/*
- * A test that tries to mark two memory blocks as reserved, where the
- * second one overlaps with the end of the first entry (that is
- * r2.base < r1.base + r1.size).
- * It checks if two entries are merged into one region that starts at r1.base
- * and has size of two regions minus their intersection. It verifies that
- * memblock can still see only one entry and has a correct total size of the
- * reserved memory.
+ * A test that tries to mark two memory blocks r1 and r2 as reserved,
+ * where r2 overlaps with the end of r1 (that is
+ * r2.base < r1.base + r1.size):
+ *
+ * | +--------------+--+--------------+ |
+ * | | r1 | | r2 | |
+ * +--+--------------+--+--------------+--+
+ * ^ ^
+ * | |
+ * | r2.base
+ * |
+ * r1.base
+ *
+ * Expect to merge two entries into one region that starts at r1.base and
+ * has size of two regions minus their intersection. The total size of the
+ * reserved memory is updated, and the region counter is not updated.
*/
static int memblock_reserve_overlap_bottom_check(void)
{
@@ -415,12 +471,21 @@ static int memblock_reserve_overlap_bottom_check(void)
}
/*
- * A test that tries to mark two memory blocks as reserved, where the second
- * one is within the range of the first entry (that is
- * (r1.base < r2.base) && (r2.base + r2.size < r1.base + r1.size)).
- * It checks if two entries are merged into one region that stays the
- * same. The counter and total size of available memory are expected to not be
- * updated.
+ * A test that tries to mark two memory blocks r1 and r2 as reserved,
+ * where r2 is within the range of r1 (that is
+ * (r1.base < r2.base) && (r2.base + r2.size < r1.base + r1.size)):
+ *
+ * | +-----+--+---------------------------|
+ * | | |r2| r1 |
+ * +-+-----+--+---------------------------+
+ * ^ ^
+ * | |
+ * | r2.base
+ * |
+ * r1.base
+ *
+ * Expect to merge two entries into one region that stays the same. The
+ * counter and total size of available memory are not updated.
*/
static int memblock_reserve_within_check(void)
{
@@ -452,7 +517,7 @@ static int memblock_reserve_within_check(void)
/*
* A simple test that tries to reserve the same memory block twice.
- * The region counter and total size of reserved memory are expected to not
+ * Expect the region counter and total size of reserved memory to not
* be updated.
*/
static int memblock_reserve_twice_check(void)
@@ -485,14 +550,22 @@ static int memblock_reserve_checks(void)
return 0;
}
- /*
- * A simple test that tries to remove the first entry of the array of
- * available memory regions. By "removing" a region we mean overwriting it
- * with the next region in memblock.memory. To check this is the case, the
- * test adds two memory blocks and verifies that the value of the latter
- * was used to erase r1 region. It also checks if the region counter and
- * total size were updated to expected values.
- */
+/*
+ * A simple test that tries to remove a region r1 from the array of
+ * available memory regions. By "removing" a region we mean overwriting it
+ * with the next region r2 in memblock.memory:
+ *
+ * | ...... +----------------+ |
+ * | : r1 : | r2 | |
+ * +--+----+----------+----------------+--+
+ * ^
+ * |
+ * rgn.base
+ *
+ * Expect to add two memory blocks r1 and r2 and then remove r1 so that
+ * r2 is the first available region. The region counter and total size
+ * are updated.
+ */
static int memblock_remove_simple_check(void)
{
struct memblock_region *rgn;
@@ -522,11 +595,22 @@ static int memblock_remove_simple_check(void)
return 0;
}
- /*
- * A test that tries to remove a region that was not registered as available
- * memory (i.e. has no corresponding entry in memblock.memory). It verifies
- * that array, regions counter and total size were not modified.
- */
+/*
+ * A test that tries to remove a region r2 that was not registered as
+ * available memory (i.e. has no corresponding entry in memblock.memory):
+ *
+ * +----------------+
+ * | r2 |
+ * +----------------+
+ * | +----+ |
+ * | | r1 | |
+ * +--+----+------------------------------+
+ * ^
+ * |
+ * rgn.base
+ *
+ * Expect the array, regions counter and total size to not be modified.
+ */
static int memblock_remove_absent_check(void)
{
struct memblock_region *rgn;
@@ -556,11 +640,23 @@ static int memblock_remove_absent_check(void)
}
/*
- * A test that tries to remove a region which overlaps with the beginning of
- * the already existing entry r1 (that is r1.base < r2.base + r2.size). It
- * checks if only the intersection of both regions is removed from the available
- * memory pool. The test also checks if the regions counter and total size are
- * updated to expected values.
+ * A test that tries to remove a region r2 that overlaps with the
+ * beginning of the already existing entry r1
+ * (that is r1.base < r2.base + r2.size):
+ *
+ * +-----------------+
+ * | r2 |
+ * +-----------------+
+ * | .........+--------+ |
+ * | : r1 | rgn | |
+ * +-----------------+--------+--------+--+
+ * ^ ^
+ * | |
+ * | rgn.base
+ * r1.base
+ *
+ * Expect that only the intersection of both regions is removed from the
+ * available memory pool. The regions counter and total size are updated.
*/
static int memblock_remove_overlap_top_check(void)
{
@@ -596,11 +692,21 @@ static int memblock_remove_overlap_top_check(void)
}
/*
- * A test that tries to remove a region which overlaps with the end of the
- * first entry (that is r2.base < r1.base + r1.size). It checks if only the
- * intersection of both regions is removed from the available memory pool.
- * The test also checks if the regions counter and total size are updated to
- * expected values.
+ * A test that tries to remove a region r2 that overlaps with the end of
+ * the already existing region r1 (that is r2.base < r1.base + r1.size):
+ *
+ * +--------------------------------+
+ * | r2 |
+ * +--------------------------------+
+ * | +---+..... |
+ * | |rgn| r1 : |
+ * +-+---+----+---------------------------+
+ * ^
+ * |
+ * r1.base
+ *
+ * Expect that only the intersection of both regions is removed from the
+ * available memory pool. The regions counter and total size are updated.
*/
static int memblock_remove_overlap_bottom_check(void)
{
@@ -633,13 +739,23 @@ static int memblock_remove_overlap_bottom_check(void)
}
/*
- * A test that tries to remove a region which is within the range of the
- * already existing entry (that is
- * (r1.base < r2.base) && (r2.base + r2.size < r1.base + r1.size)).
- * It checks if the region is split into two - one that ends at r2.base and
- * second that starts at r2.base + size, with appropriate sizes. The test
- * also checks if the region counter and total size were updated to
- * expected values.
+ * A test that tries to remove a region r2 that is within the range of
+ * the already existing entry r1 (that is
+ * (r1.base < r2.base) && (r2.base + r2.size < r1.base + r1.size)):
+ *
+ * +----+
+ * | r2 |
+ * +----+
+ * | +-------------+....+---------------+ |
+ * | | rgn1 | r1 | rgn2 | |
+ * +-+-------------+----+---------------+-+
+ * ^
+ * |
+ * r1.base
+ *
+ * Expect that the region is split into two - one that ends at r2.base and
+ * another that starts at r2.base + r2.size, with appropriate sizes. The
+ * region counter and total size are updated.
*/
static int memblock_remove_within_check(void)
{
@@ -690,12 +806,19 @@ static int memblock_remove_checks(void)
}
/*
- * A simple test that tries to free a memory block that was marked earlier
- * as reserved. By "freeing" a region we mean overwriting it with the next
- * entry in memblock.reserved. To check this is the case, the test reserves
- * two memory regions and verifies that the value of the latter was used to
- * erase r1 region.
- * The test also checks if the region counter and total size were updated.
+ * A simple test that tries to free a memory block r1 that was marked
+ * earlier as reserved. By "freeing" a region we mean overwriting it with
+ * the next entry r2 in memblock.reserved:
+ *
+ * | ...... +----+ |
+ * | : r1 : | r2 | |
+ * +--------------+----+-----------+----+-+
+ * ^
+ * |
+ * rgn.base
+ *
+ * Expect to reserve two memory regions and then erase r1 region with the
+ * value of r2. The region counter and total size are updated.
*/
static int memblock_free_simple_check(void)
{
@@ -726,11 +849,22 @@ static int memblock_free_simple_check(void)
return 0;
}
- /*
- * A test that tries to free a region that was not marked as reserved
- * (i.e. has no corresponding entry in memblock.reserved). It verifies
- * that array, regions counter and total size were not modified.
- */
+/*
+ * A test that tries to free a region r2 that was not marked as reserved
+ * (i.e. has no corresponding entry in memblock.reserved):
+ *
+ * +----------------+
+ * | r2 |
+ * +----------------+
+ * | +----+ |
+ * | | r1 | |
+ * +--+----+------------------------------+
+ * ^
+ * |
+ * rgn.base
+ *
+ * The array, regions counter and total size are not modified.
+ */
static int memblock_free_absent_check(void)
{
struct memblock_region *rgn;
@@ -760,11 +894,23 @@ static int memblock_free_absent_check(void)
}
/*
- * A test that tries to free a region which overlaps with the beginning of
- * the already existing entry r1 (that is r1.base < r2.base + r2.size). It
- * checks if only the intersection of both regions is freed. The test also
- * checks if the regions counter and total size are updated to expected
- * values.
+ * A test that tries to free a region r2 that overlaps with the beginning
+ * of the already existing entry r1 (that is r1.base < r2.base + r2.size):
+ *
+ * +----+
+ * | r2 |
+ * +----+
+ * | ...+--------------+ |
+ * | : | r1 | |
+ * +----+--+--------------+---------------+
+ * ^ ^
+ * | |
+ * | rgn.base
+ * |
+ * r1.base
+ *
+ * Expect that only the intersection of both regions is freed. The
+ * regions counter and total size are updated.
*/
static int memblock_free_overlap_top_check(void)
{
@@ -798,10 +944,18 @@ static int memblock_free_overlap_top_check(void)
}
/*
- * A test that tries to free a region which overlaps with the end of the
- * first entry (that is r2.base < r1.base + r1.size). It checks if only the
- * intersection of both regions is freed. The test also checks if the
- * regions counter and total size are updated to expected values.
+ * A test that tries to free a region r2 that overlaps with the end of
+ * the already existing entry r1 (that is r2.base < r1.base + r1.size):
+ *
+ * +----------------+
+ * | r2 |
+ * +----------------+
+ * | +-----------+..... |
+ * | | r1 | : |
+ * +----+-----------+----+----------------+
+ *
+ * Expect that only the intersection of both regions is freed. The
+ * regions counter and total size are updated.
*/
static int memblock_free_overlap_bottom_check(void)
{
@@ -835,13 +989,23 @@ static int memblock_free_overlap_bottom_check(void)
}
/*
- * A test that tries to free a region which is within the range of the
- * already existing entry (that is
- * (r1.base < r2.base) && (r2.base + r2.size < r1.base + r1.size)).
- * It checks if the region is split into two - one that ends at r2.base and
- * second that starts at r2.base + size, with appropriate sizes. It is
- * expected that the region counter and total size fields were updated t
- * reflect that change.
+ * A test that tries to free a region r2 that is within the range of the
+ * already existing entry r1 (that is
+ * (r1.base < r2.base) && (r2.base + r2.size < r1.base + r1.size)):
+ *
+ * +----+
+ * | r2 |
+ * +----+
+ * | +------------+....+---------------+
+ * | | rgn1 | r1 | rgn2 |
+ * +----+------------+----+---------------+
+ * ^
+ * |
+ * r1.base
+ *
+ * Expect that the region is split into two - one that ends at r2.base and
+ * another that starts at r2.base + r2.size, with appropriate sizes. The
+ * region counter and total size fields are updated.
*/
static int memblock_free_within_check(void)
{
diff --git a/tools/testing/nvdimm/pmem-dax.c b/tools/testing/nvdimm/pmem-dax.c
index af19c85558e7..c1ec099a3b1d 100644
--- a/tools/testing/nvdimm/pmem-dax.c
+++ b/tools/testing/nvdimm/pmem-dax.c
@@ -4,11 +4,13 @@
*/
#include "test/nfit_test.h"
#include <linux/blkdev.h>
+#include <linux/dax.h>
#include <pmem.h>
#include <nd.h>
long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
- long nr_pages, void **kaddr, pfn_t *pfn)
+ long nr_pages, enum dax_access_mode mode, void **kaddr,
+ pfn_t *pfn)
{
resource_size_t offset = PFN_PHYS(pgoff) + pmem->data_offset;
diff --git a/tools/testing/nvdimm/test/iomap.c b/tools/testing/nvdimm/test/iomap.c
index b752ce47ead3..ea956082e6a4 100644
--- a/tools/testing/nvdimm/test/iomap.c
+++ b/tools/testing/nvdimm/test/iomap.c
@@ -62,16 +62,14 @@ struct nfit_test_resource *get_nfit_res(resource_size_t resource)
}
EXPORT_SYMBOL(get_nfit_res);
-static void __iomem *__nfit_test_ioremap(resource_size_t offset, unsigned long size,
- void __iomem *(*fallback_fn)(resource_size_t, unsigned long))
-{
- struct nfit_test_resource *nfit_res = get_nfit_res(offset);
-
- if (nfit_res)
- return (void __iomem *) nfit_res->buf + offset
- - nfit_res->res.start;
- return fallback_fn(offset, size);
-}
+#define __nfit_test_ioremap(offset, size, fallback_fn) ({ \
+ struct nfit_test_resource *nfit_res = get_nfit_res(offset); \
+ nfit_res ? \
+ (void __iomem *) nfit_res->buf + (offset) \
+ - nfit_res->res.start \
+ : \
+ fallback_fn((offset), (size)) ; \
+})
void __iomem *__wrap_devm_ioremap(struct device *dev,
resource_size_t offset, unsigned long size)
diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c
index 1da76ccde448..c75abb497a1a 100644
--- a/tools/testing/nvdimm/test/nfit.c
+++ b/tools/testing/nvdimm/test/nfit.c
@@ -23,8 +23,6 @@
#include "nfit_test.h"
#include "../watermark.h"
-#include <asm/mce.h>
-
/*
* Generate an NFIT table to describe the following topology:
*
@@ -3375,7 +3373,6 @@ static __exit void nfit_test_exit(void)
{
int i;
- flush_workqueue(nfit_wq);
destroy_workqueue(nfit_wq);
for (i = 0; i < NUM_NFITS; i++)
platform_device_unregister(&instances[i]->pdev);
diff --git a/tools/testing/selftests/alsa/Makefile b/tools/testing/selftests/alsa/Makefile
index f64d9090426d..fd8ddce2b1a6 100644
--- a/tools/testing/selftests/alsa/Makefile
+++ b/tools/testing/selftests/alsa/Makefile
@@ -3,6 +3,9 @@
CFLAGS += $(shell pkg-config --cflags alsa)
LDLIBS += $(shell pkg-config --libs alsa)
+ifeq ($(LDLIBS),)
+LDLIBS += -lasound
+endif
TEST_GEN_PROGS := mixer-test
diff --git a/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_sve_change_vl.c b/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_sve_change_vl.c
index bb50b5adbf10..915821375b0a 100644
--- a/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_sve_change_vl.c
+++ b/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_sve_change_vl.c
@@ -6,6 +6,7 @@
* supported and is expected to segfault.
*/
+#include <kselftest.h>
#include <signal.h>
#include <ucontext.h>
#include <sys/prctl.h>
@@ -40,6 +41,7 @@ static bool sve_get_vls(struct tdescr *td)
/* We need at least two VLs */
if (nvls < 2) {
fprintf(stderr, "Only %d VL supported\n", nvls);
+ td->result = KSFT_SKIP;
return false;
}
diff --git a/tools/testing/selftests/bpf/progs/test_stacktrace_build_id.c b/tools/testing/selftests/bpf/progs/test_stacktrace_build_id.c
index 6c62bfb8bb6f..0c4426592a26 100644
--- a/tools/testing/selftests/bpf/progs/test_stacktrace_build_id.c
+++ b/tools/testing/selftests/bpf/progs/test_stacktrace_build_id.c
@@ -39,7 +39,7 @@ struct {
__type(value, stack_trace_t);
} stack_amap SEC(".maps");
-SEC("kprobe/urandom_read")
+SEC("kprobe/urandom_read_iter")
int oncpu(struct pt_regs *args)
{
__u32 max_len = sizeof(struct bpf_stack_build_id)
diff --git a/tools/testing/selftests/cgroup/memcg_protection.m b/tools/testing/selftests/cgroup/memcg_protection.m
new file mode 100644
index 000000000000..051daa3477b6
--- /dev/null
+++ b/tools/testing/selftests/cgroup/memcg_protection.m
@@ -0,0 +1,89 @@
+% SPDX-License-Identifier: GPL-2.0
+%
+% run as: octave-cli memcg_protection.m
+%
+% This script simulates reclaim protection behavior on a single level of memcg
+% hierarchy to illustrate how overcommitted protection spreads among siblings
+% (as it depends also on their current consumption).
+%
+% Simulation assumes siblings consumed the initial amount of memory (w/out
+% reclaim) and then the reclaim starts, all memory is reclaimable, i.e. treated
+% same. It simulates only non-low reclaim and assumes all memory.min = 0.
+%
+% Input configurations
+% --------------------
+% E number parent effective protection
+% n vector nominal protection of siblings set at the given level (memory.low)
+% c vector current consumption -,,- (memory.current)
+
+% example from testcase (values in GB)
+E = 50 / 1024;
+n = [75 25 0 500 ] / 1024;
+c = [50 50 50 0] / 1024;
+
+% Reclaim parameters
+% ------------------
+
+% Minimal reclaim amount (GB)
+cluster = 32*4 / 2**20;
+
+% Reclaim coefficient (think as 0.5^sc->priority)
+alpha = .1
+
+% Simulation parameters
+% ---------------------
+epsilon = 1e-7;
+timeout = 1000;
+
+% Simulation loop
+% ---------------
+
+ch = [];
+eh = [];
+rh = [];
+
+for t = 1:timeout
+ % low_usage
+ u = min(c, n);
+ siblings = sum(u);
+
+ % effective_protection()
+ protected = min(n, c); % start with nominal
+ e = protected * min(1, E / siblings); % normalize overcommit
+
+ % recursive protection
+ unclaimed = max(0, E - siblings);
+ parent_overuse = sum(c) - siblings;
+ if (unclaimed > 0 && parent_overuse > 0)
+ overuse = max(0, c - protected);
+ e += unclaimed * (overuse / parent_overuse);
+ endif
+
+ % get_scan_count()
+ r = alpha * c; % assume all memory is in a single LRU list
+
+ % commit 1bc63fb1272b ("mm, memcg: make scan aggression always exclude protection")
+ sz = max(e, c);
+ r .*= (1 - (e+epsilon) ./ (sz+epsilon));
+
+ % uncomment to debug prints
+ % e, c, r
+
+ % nothing to reclaim, reached equilibrium
+ if max(r) < epsilon
+ break;
+ endif
+
+ % SWAP_CLUSTER_MAX roundup
+ r = max(r, (r > epsilon) .* cluster);
+ % XXX here I do parallel reclaim of all siblings
+ % in reality reclaim is serialized and each sibling recalculates own residual
+ c = max(c - r, 0);
+
+ ch = [ch ; c];
+ eh = [eh ; e];
+ rh = [rh ; r];
+endfor
+
+t
+c, e
diff --git a/tools/testing/selftests/cgroup/test_memcontrol.c b/tools/testing/selftests/cgroup/test_memcontrol.c
index 44a974ec472c..8833359556f3 100644
--- a/tools/testing/selftests/cgroup/test_memcontrol.c
+++ b/tools/testing/selftests/cgroup/test_memcontrol.c
@@ -190,13 +190,6 @@ cleanup:
return ret;
}
-static int alloc_pagecache_50M(const char *cgroup, void *arg)
-{
- int fd = (long)arg;
-
- return alloc_pagecache(fd, MB(50));
-}
-
static int alloc_pagecache_50M_noexit(const char *cgroup, void *arg)
{
int fd = (long)arg;
@@ -247,33 +240,39 @@ static int cg_test_proc_killed(const char *cgroup)
/*
* First, this test creates the following hierarchy:
- * A memory.min = 50M, memory.max = 200M
- * A/B memory.min = 50M, memory.current = 50M
+ * A memory.min = 0, memory.max = 200M
+ * A/B memory.min = 50M
* A/B/C memory.min = 75M, memory.current = 50M
* A/B/D memory.min = 25M, memory.current = 50M
* A/B/E memory.min = 0, memory.current = 50M
* A/B/F memory.min = 500M, memory.current = 0
*
- * Usages are pagecache, but the test keeps a running
+ * (or memory.low if we test soft protection)
+ *
+ * Usages are pagecache and the test keeps a running
* process in every leaf cgroup.
* Then it creates A/G and creates a significant
- * memory pressure in it.
+ * memory pressure in A.
*
+ * Then it checks actual memory usages and expects that:
* A/B memory.current ~= 50M
- * A/B/C memory.current ~= 33M
- * A/B/D memory.current ~= 17M
- * A/B/F memory.current ~= 0
+ * A/B/C memory.current ~= 29M
+ * A/B/D memory.current ~= 21M
+ * A/B/E memory.current ~= 0
+ * A/B/F memory.current = 0
+ * (for origin of the numbers, see model in memcg_protection.m.)
*
* After that it tries to allocate more than there is
- * unprotected memory in A available, and checks
- * checks that memory.min protects pagecache even
- * in this case.
+ * unprotected memory in A available, and checks that:
+ * a) memory.min protects pagecache even in this case,
+ * b) memory.low allows reclaiming page cache with low events.
*/
-static int test_memcg_min(const char *root)
+static int test_memcg_protection(const char *root, bool min)
{
- int ret = KSFT_FAIL;
+ int ret = KSFT_FAIL, rc;
char *parent[3] = {NULL};
char *children[4] = {NULL};
+ const char *attribute = min ? "memory.min" : "memory.low";
long c[4];
int i, attempts;
int fd;
@@ -297,8 +296,10 @@ static int test_memcg_min(const char *root)
if (cg_create(parent[0]))
goto cleanup;
- if (cg_read_long(parent[0], "memory.min")) {
- ret = KSFT_SKIP;
+ if (cg_read_long(parent[0], attribute)) {
+ /* No memory.min on older kernels is fine */
+ if (min)
+ ret = KSFT_SKIP;
goto cleanup;
}
@@ -335,17 +336,15 @@ static int test_memcg_min(const char *root)
(void *)(long)fd);
}
- if (cg_write(parent[0], "memory.min", "50M"))
+ if (cg_write(parent[1], attribute, "50M"))
goto cleanup;
- if (cg_write(parent[1], "memory.min", "50M"))
+ if (cg_write(children[0], attribute, "75M"))
goto cleanup;
- if (cg_write(children[0], "memory.min", "75M"))
+ if (cg_write(children[1], attribute, "25M"))
goto cleanup;
- if (cg_write(children[1], "memory.min", "25M"))
+ if (cg_write(children[2], attribute, "0"))
goto cleanup;
- if (cg_write(children[2], "memory.min", "0"))
- goto cleanup;
- if (cg_write(children[3], "memory.min", "500M"))
+ if (cg_write(children[3], attribute, "500M"))
goto cleanup;
attempts = 0;
@@ -365,170 +364,35 @@ static int test_memcg_min(const char *root)
for (i = 0; i < ARRAY_SIZE(children); i++)
c[i] = cg_read_long(children[i], "memory.current");
- if (!values_close(c[0], MB(33), 10))
+ if (!values_close(c[0], MB(29), 10))
goto cleanup;
- if (!values_close(c[1], MB(17), 10))
+ if (!values_close(c[1], MB(21), 10))
goto cleanup;
if (c[3] != 0)
goto cleanup;
- if (!cg_run(parent[2], alloc_anon, (void *)MB(170)))
- goto cleanup;
-
- if (!values_close(cg_read_long(parent[1], "memory.current"), MB(50), 3))
- goto cleanup;
-
- ret = KSFT_PASS;
-
-cleanup:
- for (i = ARRAY_SIZE(children) - 1; i >= 0; i--) {
- if (!children[i])
- continue;
-
- cg_destroy(children[i]);
- free(children[i]);
- }
-
- for (i = ARRAY_SIZE(parent) - 1; i >= 0; i--) {
- if (!parent[i])
- continue;
-
- cg_destroy(parent[i]);
- free(parent[i]);
- }
- close(fd);
- return ret;
-}
-
-/*
- * First, this test creates the following hierarchy:
- * A memory.low = 50M, memory.max = 200M
- * A/B memory.low = 50M, memory.current = 50M
- * A/B/C memory.low = 75M, memory.current = 50M
- * A/B/D memory.low = 25M, memory.current = 50M
- * A/B/E memory.low = 0, memory.current = 50M
- * A/B/F memory.low = 500M, memory.current = 0
- *
- * Usages are pagecache.
- * Then it creates A/G an creates a significant
- * memory pressure in it.
- *
- * Then it checks actual memory usages and expects that:
- * A/B memory.current ~= 50M
- * A/B/ memory.current ~= 33M
- * A/B/D memory.current ~= 17M
- * A/B/F memory.current ~= 0
- *
- * After that it tries to allocate more than there is
- * unprotected memory in A available,
- * and checks low and oom events in memory.events.
- */
-static int test_memcg_low(const char *root)
-{
- int ret = KSFT_FAIL;
- char *parent[3] = {NULL};
- char *children[4] = {NULL};
- long low, oom;
- long c[4];
- int i;
- int fd;
-
- fd = get_temp_fd();
- if (fd < 0)
- goto cleanup;
-
- parent[0] = cg_name(root, "memcg_test_0");
- if (!parent[0])
- goto cleanup;
-
- parent[1] = cg_name(parent[0], "memcg_test_1");
- if (!parent[1])
- goto cleanup;
-
- parent[2] = cg_name(parent[0], "memcg_test_2");
- if (!parent[2])
- goto cleanup;
-
- if (cg_create(parent[0]))
- goto cleanup;
-
- if (cg_read_long(parent[0], "memory.low"))
- goto cleanup;
-
- if (cg_write(parent[0], "cgroup.subtree_control", "+memory"))
- goto cleanup;
-
- if (cg_write(parent[0], "memory.max", "200M"))
- goto cleanup;
-
- if (cg_write(parent[0], "memory.swap.max", "0"))
- goto cleanup;
-
- if (cg_create(parent[1]))
- goto cleanup;
-
- if (cg_write(parent[1], "cgroup.subtree_control", "+memory"))
+ rc = cg_run(parent[2], alloc_anon, (void *)MB(170));
+ if (min && !rc)
goto cleanup;
-
- if (cg_create(parent[2]))
+ else if (!min && rc) {
+ fprintf(stderr,
+ "memory.low prevents from allocating anon memory\n");
goto cleanup;
-
- for (i = 0; i < ARRAY_SIZE(children); i++) {
- children[i] = cg_name_indexed(parent[1], "child_memcg", i);
- if (!children[i])
- goto cleanup;
-
- if (cg_create(children[i]))
- goto cleanup;
-
- if (i > 2)
- continue;
-
- if (cg_run(children[i], alloc_pagecache_50M, (void *)(long)fd))
- goto cleanup;
}
- if (cg_write(parent[0], "memory.low", "50M"))
- goto cleanup;
- if (cg_write(parent[1], "memory.low", "50M"))
- goto cleanup;
- if (cg_write(children[0], "memory.low", "75M"))
- goto cleanup;
- if (cg_write(children[1], "memory.low", "25M"))
- goto cleanup;
- if (cg_write(children[2], "memory.low", "0"))
- goto cleanup;
- if (cg_write(children[3], "memory.low", "500M"))
- goto cleanup;
-
- if (cg_run(parent[2], alloc_anon, (void *)MB(148)))
- goto cleanup;
-
if (!values_close(cg_read_long(parent[1], "memory.current"), MB(50), 3))
goto cleanup;
- for (i = 0; i < ARRAY_SIZE(children); i++)
- c[i] = cg_read_long(children[i], "memory.current");
-
- if (!values_close(c[0], MB(33), 10))
- goto cleanup;
-
- if (!values_close(c[1], MB(17), 10))
- goto cleanup;
-
- if (c[3] != 0)
- goto cleanup;
-
- if (cg_run(parent[2], alloc_anon, (void *)MB(166))) {
- fprintf(stderr,
- "memory.low prevents from allocating anon memory\n");
+ if (min) {
+ ret = KSFT_PASS;
goto cleanup;
}
for (i = 0; i < ARRAY_SIZE(children); i++) {
- int no_low_events_index = has_recursiveprot ? 2 : 1;
+ int no_low_events_index = 1;
+ long low, oom;
oom = cg_read_key_long(children[i], "memory.events", "oom ");
low = cg_read_key_long(children[i], "memory.events", "low ");
@@ -564,6 +428,16 @@ cleanup:
return ret;
}
+static int test_memcg_min(const char *root)
+{
+ return test_memcg_protection(root, true);
+}
+
+static int test_memcg_low(const char *root)
+{
+ return test_memcg_protection(root, false);
+}
+
static int alloc_pagecache_max_30M(const char *cgroup, void *arg)
{
size_t size = MB(50);
@@ -1241,7 +1115,16 @@ static int test_memcg_oom_group_leaf_events(const char *root)
if (cg_read_key_long(child, "memory.events", "oom_kill ") <= 0)
goto cleanup;
- if (cg_read_key_long(parent, "memory.events", "oom_kill ") <= 0)
+ parent_oom_events = cg_read_key_long(
+ parent, "memory.events", "oom_kill ");
+ /*
+ * If memory_localevents is not enabled (the default), the parent should
+ * count OOM events in its children groups. Otherwise, it should not
+ * have observed any events.
+ */
+ if (has_localevents && parent_oom_events != 0)
+ goto cleanup;
+ else if (!has_localevents && parent_oom_events <= 0)
goto cleanup;
ret = KSFT_PASS;
@@ -1349,20 +1232,14 @@ static int test_memcg_oom_group_score_events(const char *root)
if (!cg_run(memcg, alloc_anon, (void *)MB(100)))
goto cleanup;
- parent_oom_events = cg_read_key_long(
- parent, "memory.events", "oom_kill ");
- /*
- * If memory_localevents is not enabled (the default), the parent should
- * count OOM events in its children groups. Otherwise, it should not
- * have observed any events.
- */
- if ((has_localevents && parent_oom_events == 0) ||
- parent_oom_events > 0)
- ret = KSFT_PASS;
+ if (cg_read_key_long(memcg, "memory.events", "oom_kill ") != 3)
+ goto cleanup;
if (kill(safe_pid, SIGKILL))
goto cleanup;
+ ret = KSFT_PASS;
+
cleanup:
if (memcg)
cg_destroy(memcg);
diff --git a/tools/testing/selftests/filesystems/binderfs/binderfs_test.c b/tools/testing/selftests/filesystems/binderfs/binderfs_test.c
index bc1c407651fc..5f362c0fd890 100644
--- a/tools/testing/selftests/filesystems/binderfs/binderfs_test.c
+++ b/tools/testing/selftests/filesystems/binderfs/binderfs_test.c
@@ -64,6 +64,7 @@ static int __do_binderfs_test(struct __test_metadata *_metadata)
device_path[sizeof(P_tmpdir "/binderfs_XXXXXX/") + BINDERFS_MAX_NAME];
static const char * const binder_features[] = {
"oneway_spam_detection",
+ "extended_error",
};
change_mountns(_metadata);
diff --git a/tools/testing/selftests/firmware/Makefile b/tools/testing/selftests/firmware/Makefile
index 40211cd8f0e6..7992969deaa2 100644
--- a/tools/testing/selftests/firmware/Makefile
+++ b/tools/testing/selftests/firmware/Makefile
@@ -4,7 +4,7 @@ CFLAGS = -Wall \
-O2
TEST_PROGS := fw_run_tests.sh
-TEST_FILES := fw_fallback.sh fw_filesystem.sh fw_lib.sh
+TEST_FILES := fw_fallback.sh fw_filesystem.sh fw_upload.sh fw_lib.sh
TEST_GEN_FILES := fw_namespace
include ../lib.mk
diff --git a/tools/testing/selftests/firmware/config b/tools/testing/selftests/firmware/config
index bf634dda0720..6e402519b117 100644
--- a/tools/testing/selftests/firmware/config
+++ b/tools/testing/selftests/firmware/config
@@ -3,3 +3,4 @@ CONFIG_FW_LOADER=y
CONFIG_FW_LOADER_USER_HELPER=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
+CONFIG_FW_UPLOAD=y
diff --git a/tools/testing/selftests/firmware/fw_filesystem.sh b/tools/testing/selftests/firmware/fw_filesystem.sh
index c2a2a100114b..1a99aea0549e 100755
--- a/tools/testing/selftests/firmware/fw_filesystem.sh
+++ b/tools/testing/selftests/firmware/fw_filesystem.sh
@@ -11,6 +11,9 @@ TEST_REQS_FW_SET_CUSTOM_PATH="yes"
TEST_DIR=$(dirname $0)
source $TEST_DIR/fw_lib.sh
+RUN_XZ="xz -C crc32 --lzma2=dict=2MiB"
+RUN_ZSTD="zstd -q"
+
check_mods
check_setup
verify_reqs
@@ -211,7 +214,7 @@ read_firmwares()
else
fwfile="$FW"
fi
- if [ "$1" = "xzonly" ]; then
+ if [ "$1" = "componly" ]; then
fwfile="${fwfile}-orig"
fi
for i in $(seq 0 3); do
@@ -235,7 +238,7 @@ read_partial_firmwares()
fwfile="${FW}"
fi
- if [ "$1" = "xzonly" ]; then
+ if [ "$1" = "componly" ]; then
fwfile="${fwfile}-orig"
fi
@@ -409,10 +412,8 @@ test_request_firmware_nowait_custom()
config_unset_uevent
RANDOM_FILE_PATH=$(setup_random_file)
RANDOM_FILE="$(basename $RANDOM_FILE_PATH)"
- if [ "$2" = "both" ]; then
- xz -9 -C crc32 -k $RANDOM_FILE_PATH
- elif [ "$2" = "xzonly" ]; then
- xz -9 -C crc32 $RANDOM_FILE_PATH
+ if [ -n "$2" -a "$2" != "normal" ]; then
+ compress_"$2"_"$COMPRESS_FORMAT" $RANDOM_FILE_PATH
fi
config_set_name $RANDOM_FILE
config_trigger_async
@@ -435,6 +436,32 @@ test_request_partial_firmware_into_buf()
echo "OK"
}
+do_tests ()
+{
+ mode="$1"
+ suffix="$2"
+
+ for i in $(seq 1 5); do
+ test_batched_request_firmware$suffix $i $mode
+ done
+
+ for i in $(seq 1 5); do
+ test_batched_request_firmware_into_buf$suffix $i $mode
+ done
+
+ for i in $(seq 1 5); do
+ test_batched_request_firmware_direct$suffix $i $mode
+ done
+
+ for i in $(seq 1 5); do
+ test_request_firmware_nowait_uevent$suffix $i $mode
+ done
+
+ for i in $(seq 1 5); do
+ test_request_firmware_nowait_custom$suffix $i $mode
+ done
+}
+
# Only continue if batched request triggers are present on the
# test-firmware driver
test_config_present
@@ -442,25 +469,7 @@ test_config_present
# test with the file present
echo
echo "Testing with the file present..."
-for i in $(seq 1 5); do
- test_batched_request_firmware $i normal
-done
-
-for i in $(seq 1 5); do
- test_batched_request_firmware_into_buf $i normal
-done
-
-for i in $(seq 1 5); do
- test_batched_request_firmware_direct $i normal
-done
-
-for i in $(seq 1 5); do
- test_request_firmware_nowait_uevent $i normal
-done
-
-for i in $(seq 1 5); do
- test_request_firmware_nowait_custom $i normal
-done
+do_tests normal
# Partial loads cannot use fallback, so do not repeat tests.
test_request_partial_firmware_into_buf 0 10
@@ -472,25 +481,7 @@ test_request_partial_firmware_into_buf 2 10
# a hung task, which would require a hard reset.
echo
echo "Testing with the file missing..."
-for i in $(seq 1 5); do
- test_batched_request_firmware_nofile $i
-done
-
-for i in $(seq 1 5); do
- test_batched_request_firmware_into_buf_nofile $i
-done
-
-for i in $(seq 1 5); do
- test_batched_request_firmware_direct_nofile $i
-done
-
-for i in $(seq 1 5); do
- test_request_firmware_nowait_uevent_nofile $i
-done
-
-for i in $(seq 1 5); do
- test_request_firmware_nowait_custom_nofile $i
-done
+do_tests nofile _nofile
# Partial loads cannot use fallback, so do not repeat tests.
test_request_partial_firmware_into_buf_nofile 0 10
@@ -498,55 +489,58 @@ test_request_partial_firmware_into_buf_nofile 0 5
test_request_partial_firmware_into_buf_nofile 1 6
test_request_partial_firmware_into_buf_nofile 2 10
-test "$HAS_FW_LOADER_COMPRESS" != "yes" && exit 0
+test_request_firmware_compressed ()
+{
+ export COMPRESS_FORMAT="$1"
-# test with both files present
-xz -9 -C crc32 -k $FW
-config_set_name $NAME
-echo
-echo "Testing with both plain and xz files present..."
-for i in $(seq 1 5); do
- test_batched_request_firmware $i both
-done
+ # test with both files present
+ compress_both_"$COMPRESS_FORMAT" $FW
+ compress_both_"$COMPRESS_FORMAT" $FW_INTO_BUF
-for i in $(seq 1 5); do
- test_batched_request_firmware_into_buf $i both
-done
+ config_set_name $NAME
+ echo
+ echo "Testing with both plain and $COMPRESS_FORMAT files present..."
+ do_tests both
-for i in $(seq 1 5); do
- test_batched_request_firmware_direct $i both
-done
+ # test with only compressed file present
+ mv "$FW" "${FW}-orig"
+ mv "$FW_INTO_BUF" "${FW_INTO_BUF}-orig"
-for i in $(seq 1 5); do
- test_request_firmware_nowait_uevent $i both
-done
+ config_set_name $NAME
+ echo
+ echo "Testing with only $COMPRESS_FORMAT file present..."
+ do_tests componly
-for i in $(seq 1 5); do
- test_request_firmware_nowait_custom $i both
-done
+ mv "${FW}-orig" "$FW"
+ mv "${FW_INTO_BUF}-orig" "$FW_INTO_BUF"
+}
-# test with only xz file present
-mv "$FW" "${FW}-orig"
-echo
-echo "Testing with only xz file present..."
-for i in $(seq 1 5); do
- test_batched_request_firmware $i xzonly
-done
-
-for i in $(seq 1 5); do
- test_batched_request_firmware_into_buf $i xzonly
-done
-
-for i in $(seq 1 5); do
- test_batched_request_firmware_direct $i xzonly
-done
-
-for i in $(seq 1 5); do
- test_request_firmware_nowait_uevent $i xzonly
-done
-
-for i in $(seq 1 5); do
- test_request_firmware_nowait_custom $i xzonly
-done
+compress_both_XZ ()
+{
+ $RUN_XZ -k "$@"
+}
+
+compress_componly_XZ ()
+{
+ $RUN_XZ "$@"
+}
+
+compress_both_ZSTD ()
+{
+ $RUN_ZSTD -k "$@"
+}
+
+compress_componly_ZSTD ()
+{
+ $RUN_ZSTD --rm "$@"
+}
+
+if test "$HAS_FW_LOADER_COMPRESS_XZ" = "yes"; then
+ test_request_firmware_compressed XZ
+fi
+
+if test "$HAS_FW_LOADER_COMPRESS_ZSTD" = "yes"; then
+ test_request_firmware_compressed ZSTD
+fi
exit 0
diff --git a/tools/testing/selftests/firmware/fw_lib.sh b/tools/testing/selftests/firmware/fw_lib.sh
index 5b8c0fedee76..7bffd67800bf 100755
--- a/tools/testing/selftests/firmware/fw_lib.sh
+++ b/tools/testing/selftests/firmware/fw_lib.sh
@@ -62,7 +62,9 @@ check_setup()
{
HAS_FW_LOADER_USER_HELPER="$(kconfig_has CONFIG_FW_LOADER_USER_HELPER=y)"
HAS_FW_LOADER_USER_HELPER_FALLBACK="$(kconfig_has CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y)"
- HAS_FW_LOADER_COMPRESS="$(kconfig_has CONFIG_FW_LOADER_COMPRESS=y)"
+ HAS_FW_LOADER_COMPRESS_XZ="$(kconfig_has CONFIG_FW_LOADER_COMPRESS_XZ=y)"
+ HAS_FW_LOADER_COMPRESS_ZSTD="$(kconfig_has CONFIG_FW_LOADER_COMPRESS_ZSTD=y)"
+ HAS_FW_UPLOAD="$(kconfig_has CONFIG_FW_UPLOAD=y)"
PROC_FW_IGNORE_SYSFS_FALLBACK="0"
PROC_FW_FORCE_SYSFS_FALLBACK="0"
@@ -98,9 +100,14 @@ check_setup()
OLD_FWPATH="$(cat /sys/module/firmware_class/parameters/path)"
- if [ "$HAS_FW_LOADER_COMPRESS" = "yes" ]; then
+ if [ "$HAS_FW_LOADER_COMPRESS_XZ" = "yes" ]; then
if ! which xz 2> /dev/null > /dev/null; then
- HAS_FW_LOADER_COMPRESS=""
+ HAS_FW_LOADER_COMPRESS_XZ=""
+ fi
+ fi
+ if [ "$HAS_FW_LOADER_COMPRESS_ZSTD" = "yes" ]; then
+ if ! which zstd 2> /dev/null > /dev/null; then
+ HAS_FW_LOADER_COMPRESS_ZSTD=""
fi
fi
}
@@ -113,6 +120,12 @@ verify_reqs()
exit 0
fi
fi
+ if [ "$TEST_REQS_FW_UPLOAD" = "yes" ]; then
+ if [ ! "$HAS_FW_UPLOAD" = "yes" ]; then
+ echo "firmware upload disabled so ignoring test"
+ exit 0
+ fi
+ fi
}
setup_tmp_file()
diff --git a/tools/testing/selftests/firmware/fw_run_tests.sh b/tools/testing/selftests/firmware/fw_run_tests.sh
index 777377078d5e..f6d95a2d5124 100755
--- a/tools/testing/selftests/firmware/fw_run_tests.sh
+++ b/tools/testing/selftests/firmware/fw_run_tests.sh
@@ -22,6 +22,10 @@ run_tests()
proc_set_force_sysfs_fallback $1
proc_set_ignore_sysfs_fallback $2
$TEST_DIR/fw_fallback.sh
+
+ proc_set_force_sysfs_fallback $1
+ proc_set_ignore_sysfs_fallback $2
+ $TEST_DIR/fw_upload.sh
}
run_test_config_0001()
diff --git a/tools/testing/selftests/firmware/fw_upload.sh b/tools/testing/selftests/firmware/fw_upload.sh
new file mode 100755
index 000000000000..c7a6f06c9adb
--- /dev/null
+++ b/tools/testing/selftests/firmware/fw_upload.sh
@@ -0,0 +1,214 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+# This validates the user-initiated fw upload mechanism of the firmware
+# loader. It verifies that one or more firmware devices can be created
+# for a device driver. It also verifies the data transfer, the
+# cancellation support, and the error flows.
+set -e
+
+TEST_REQS_FW_UPLOAD="yes"
+TEST_DIR=$(dirname $0)
+
+progress_states="preparing transferring programming"
+errors="hw-error
+ timeout
+ device-busy
+ invalid-file-size
+ read-write-error
+ flash-wearout"
+error_abort="user-abort"
+fwname1=fw1
+fwname2=fw2
+fwname3=fw3
+
+source $TEST_DIR/fw_lib.sh
+
+check_mods
+check_setup
+verify_reqs
+
+trap "upload_finish" EXIT
+
+upload_finish() {
+ local fwdevs="$fwname1 $fwname2 $fwname3"
+
+ for name in $fwdevs; do
+ if [ -e "$DIR/$name" ]; then
+ echo -n "$name" > "$DIR"/upload_unregister
+ fi
+ done
+}
+
+upload_fw() {
+ local name="$1"
+ local file="$2"
+
+ echo 1 > "$DIR"/"$name"/loading
+ cat "$file" > "$DIR"/"$name"/data
+ echo 0 > "$DIR"/"$name"/loading
+}
+
+verify_fw() {
+ local name="$1"
+ local file="$2"
+
+ echo -n "$name" > "$DIR"/config_upload_name
+ if ! cmp "$file" "$DIR"/upload_read > /dev/null 2>&1; then
+ echo "$0: firmware compare for $name did not match" >&2
+ exit 1
+ fi
+
+ echo "$0: firmware upload for $name works" >&2
+ return 0
+}
+
+inject_error() {
+ local name="$1"
+ local status="$2"
+ local error="$3"
+
+ echo 1 > "$DIR"/"$name"/loading
+ echo -n "inject":"$status":"$error" > "$DIR"/"$name"/data
+ echo 0 > "$DIR"/"$name"/loading
+}
+
+await_status() {
+ local name="$1"
+ local expected="$2"
+ local status
+ local i
+
+ let i=0
+ while [ $i -lt 50 ]; do
+ status=$(cat "$DIR"/"$name"/status)
+ if [ "$status" = "$expected" ]; then
+ return 0;
+ fi
+ sleep 1e-03
+ let i=$i+1
+ done
+
+ echo "$0: Invalid status: Expected $expected, Actual $status" >&2
+ return 1;
+}
+
+await_idle() {
+ local name="$1"
+
+ await_status "$name" "idle"
+ return $?
+}
+
+expect_error() {
+ local name="$1"
+ local expected="$2"
+ local error=$(cat "$DIR"/"$name"/error)
+
+ if [ "$error" != "$expected" ]; then
+ echo "Invalid error: Expected $expected, Actual $error" >&2
+ return 1
+ fi
+
+ return 0
+}
+
+random_firmware() {
+ local bs="$1"
+ local count="$2"
+ local file=$(mktemp -p /tmp uploadfwXXX.bin)
+
+ dd if=/dev/urandom of="$file" bs="$bs" count="$count" > /dev/null 2>&1
+ echo "$file"
+}
+
+test_upload_cancel() {
+ local name="$1"
+ local status
+
+ for status in $progress_states; do
+ inject_error $name $status $error_abort
+ if ! await_status $name $status; then
+ exit 1
+ fi
+
+ echo 1 > "$DIR"/"$name"/cancel
+
+ if ! await_idle $name; then
+ exit 1
+ fi
+
+ if ! expect_error $name "$status":"$error_abort"; then
+ exit 1
+ fi
+ done
+
+ echo "$0: firmware upload cancellation works"
+ return 0
+}
+
+test_error_handling() {
+ local name=$1
+ local status
+ local error
+
+ for status in $progress_states; do
+ for error in $errors; do
+ inject_error $name $status $error
+
+ if ! await_idle $name; then
+ exit 1
+ fi
+
+ if ! expect_error $name "$status":"$error"; then
+ exit 1
+ fi
+
+ done
+ done
+ echo "$0: firmware upload error handling works"
+}
+
+test_fw_too_big() {
+ local name=$1
+ local fw_too_big=`random_firmware 512 5`
+ local expected="preparing:invalid-file-size"
+
+ upload_fw $name $fw_too_big
+ rm -f $fw_too_big
+
+ if ! await_idle $name; then
+ exit 1
+ fi
+
+ if ! expect_error $name $expected; then
+ exit 1
+ fi
+
+ echo "$0: oversized firmware error handling works"
+}
+
+echo -n "$fwname1" > "$DIR"/upload_register
+echo -n "$fwname2" > "$DIR"/upload_register
+echo -n "$fwname3" > "$DIR"/upload_register
+
+test_upload_cancel $fwname1
+test_error_handling $fwname1
+test_fw_too_big $fwname1
+
+fw_file1=`random_firmware 512 4`
+fw_file2=`random_firmware 512 3`
+fw_file3=`random_firmware 512 2`
+
+upload_fw $fwname1 $fw_file1
+upload_fw $fwname2 $fw_file2
+upload_fw $fwname3 $fw_file3
+
+verify_fw ${fwname1} ${fw_file1}
+verify_fw ${fwname2} ${fw_file2}
+verify_fw ${fwname3} ${fw_file3}
+
+echo -n "$fwname1" > "$DIR"/upload_unregister
+echo -n "$fwname2" > "$DIR"/upload_unregister
+echo -n "$fwname3" > "$DIR"/upload_unregister
+
+exit 0
diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/multiple_kprobes.tc b/tools/testing/selftests/ftrace/test.d/kprobe/multiple_kprobes.tc
index 312d23780096..be754f5bcf79 100644
--- a/tools/testing/selftests/ftrace/test.d/kprobe/multiple_kprobes.tc
+++ b/tools/testing/selftests/ftrace/test.d/kprobe/multiple_kprobes.tc
@@ -25,6 +25,8 @@ if [ $L -ne 256 ]; then
exit_fail
fi
+cat kprobe_events >> $testlog
+
echo 1 > events/kprobes/enable
echo 0 > events/kprobes/enable
echo > kprobe_events
diff --git a/tools/testing/selftests/lkdtm/config b/tools/testing/selftests/lkdtm/config
index 46f39ee76208..5d52f64dfb43 100644
--- a/tools/testing/selftests/lkdtm/config
+++ b/tools/testing/selftests/lkdtm/config
@@ -2,10 +2,14 @@ CONFIG_LKDTM=y
CONFIG_DEBUG_LIST=y
CONFIG_SLAB_FREELIST_HARDENED=y
CONFIG_FORTIFY_SOURCE=y
+CONFIG_GCC_PLUGIN_STACKLEAK=y
CONFIG_HARDENED_USERCOPY=y
CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT=y
+CONFIG_INIT_ON_FREE_DEFAULT_ON=y
CONFIG_INIT_ON_ALLOC_DEFAULT_ON=y
CONFIG_UBSAN=y
CONFIG_UBSAN_BOUNDS=y
CONFIG_UBSAN_TRAP=y
CONFIG_STACKPROTECTOR_STRONG=y
+CONFIG_SLUB_DEBUG=y
+CONFIG_SLUB_DEBUG_ON=y
diff --git a/tools/testing/selftests/lkdtm/tests.txt b/tools/testing/selftests/lkdtm/tests.txt
index 243c781f0780..65e53eb0840b 100644
--- a/tools/testing/selftests/lkdtm/tests.txt
+++ b/tools/testing/selftests/lkdtm/tests.txt
@@ -64,16 +64,17 @@ REFCOUNT_DEC_AND_TEST_SATURATED Saturation detected: still saturated
REFCOUNT_SUB_AND_TEST_SATURATED Saturation detected: still saturated
#REFCOUNT_TIMING timing only
#ATOMIC_TIMING timing only
-USERCOPY_HEAP_SIZE_TO
-USERCOPY_HEAP_SIZE_FROM
-USERCOPY_HEAP_WHITELIST_TO
-USERCOPY_HEAP_WHITELIST_FROM
+USERCOPY_SLAB_SIZE_TO
+USERCOPY_SLAB_SIZE_FROM
+USERCOPY_SLAB_WHITELIST_TO
+USERCOPY_SLAB_WHITELIST_FROM
USERCOPY_STACK_FRAME_TO
USERCOPY_STACK_FRAME_FROM
USERCOPY_STACK_BEYOND
USERCOPY_KERNEL
STACKLEAK_ERASING OK: the rest of the thread stack is properly erased
CFI_FORWARD_PROTO
+CFI_BACKWARD call trace:|ok: control flow unchanged
FORTIFIED_STRSCPY
FORTIFIED_OBJECT
FORTIFIED_SUBOBJECT
diff --git a/tools/testing/selftests/net/ndisc_unsolicited_na_test.sh b/tools/testing/selftests/net/ndisc_unsolicited_na_test.sh
index f508657ee126..86e621b7b9c7 100755
--- a/tools/testing/selftests/net/ndisc_unsolicited_na_test.sh
+++ b/tools/testing/selftests/net/ndisc_unsolicited_na_test.sh
@@ -1,15 +1,14 @@
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
-# This test is for the accept_unsolicited_na feature to
+# This test is for the accept_untracked_na feature to
# enable RFC9131 behaviour. The following is the test-matrix.
# drop accept fwding behaviour
# ---- ------ ------ ----------------------------------------------
-# 1 X X Drop NA packet and don't pass up the stack
-# 0 0 X Pass NA packet up the stack, don't update NC
-# 0 1 0 Pass NA packet up the stack, don't update NC
-# 0 1 1 Pass NA packet up the stack, and add a STALE
-# NC entry
+# 1 X X Don't update NC
+# 0 0 X Don't update NC
+# 0 1 0 Don't update NC
+# 0 1 1 Add a STALE NC entry
ret=0
# Kselftest framework requirement - SKIP code is 4.
@@ -72,7 +71,7 @@ setup()
set -e
local drop_unsolicited_na=$1
- local accept_unsolicited_na=$2
+ local accept_untracked_na=$2
local forwarding=$3
# Setup two namespaces and a veth tunnel across them.
@@ -93,7 +92,7 @@ setup()
${IP_ROUTER_EXEC} sysctl -qw \
${ROUTER_CONF}.drop_unsolicited_na=${drop_unsolicited_na}
${IP_ROUTER_EXEC} sysctl -qw \
- ${ROUTER_CONF}.accept_unsolicited_na=${accept_unsolicited_na}
+ ${ROUTER_CONF}.accept_untracked_na=${accept_untracked_na}
${IP_ROUTER_EXEC} sysctl -qw ${ROUTER_CONF}.disable_ipv6=0
${IP_ROUTER} addr add ${ROUTER_ADDR_WITH_MASK} dev ${ROUTER_INTF}
@@ -144,13 +143,13 @@ link_up() {
verify_ndisc() {
local drop_unsolicited_na=$1
- local accept_unsolicited_na=$2
+ local accept_untracked_na=$2
local forwarding=$3
neigh_show_output=$(${IP_ROUTER} neigh show \
to ${HOST_ADDR} dev ${ROUTER_INTF} nud stale)
if [ ${drop_unsolicited_na} -eq 0 ] && \
- [ ${accept_unsolicited_na} -eq 1 ] && \
+ [ ${accept_untracked_na} -eq 1 ] && \
[ ${forwarding} -eq 1 ]; then
# Neighbour entry expected to be present for 011 case
[[ ${neigh_show_output} ]]
@@ -179,14 +178,14 @@ test_unsolicited_na_combination() {
test_unsolicited_na_common $1 $2 $3
test_msg=("test_unsolicited_na: "
"drop_unsolicited_na=$1 "
- "accept_unsolicited_na=$2 "
+ "accept_untracked_na=$2 "
"forwarding=$3")
log_test $? 0 "${test_msg[*]}"
cleanup
}
test_unsolicited_na_combinations() {
- # Args: drop_unsolicited_na accept_unsolicited_na forwarding
+ # Args: drop_unsolicited_na accept_untracked_na forwarding
# Expect entry
test_unsolicited_na_combination 0 1 1
diff --git a/tools/testing/selftests/net/psock_snd.c b/tools/testing/selftests/net/psock_snd.c
index 7d15e10a9fb6..edf1e6f80d41 100644
--- a/tools/testing/selftests/net/psock_snd.c
+++ b/tools/testing/selftests/net/psock_snd.c
@@ -389,6 +389,8 @@ int main(int argc, char **argv)
error(1, errno, "ip link set mtu");
if (system("ip addr add dev lo 172.17.0.1/24"))
error(1, errno, "ip addr add");
+ if (system("sysctl -w net.ipv4.conf.lo.accept_local=1"))
+ error(1, errno, "sysctl lo.accept_local");
run_test();
diff --git a/tools/testing/selftests/powerpc/include/utils.h b/tools/testing/selftests/powerpc/include/utils.h
index b7d188fc87c7..b9fa9cd709df 100644
--- a/tools/testing/selftests/powerpc/include/utils.h
+++ b/tools/testing/selftests/powerpc/include/utils.h
@@ -135,6 +135,11 @@ do { \
#define PPC_FEATURE2_ARCH_3_1 0x00040000
#endif
+/* POWER10 features */
+#ifndef PPC_FEATURE2_MMA
+#define PPC_FEATURE2_MMA 0x00020000
+#endif
+
#if defined(__powerpc64__)
#define UCONTEXT_NIA(UC) (UC)->uc_mcontext.gp_regs[PT_NIP]
#define UCONTEXT_MSR(UC) (UC)->uc_mcontext.gp_regs[PT_MSR]
diff --git a/tools/testing/selftests/powerpc/math/Makefile b/tools/testing/selftests/powerpc/math/Makefile
index fcc91c205984..3948f7c510aa 100644
--- a/tools/testing/selftests/powerpc/math/Makefile
+++ b/tools/testing/selftests/powerpc/math/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-TEST_GEN_PROGS := fpu_syscall fpu_preempt fpu_signal fpu_denormal vmx_syscall vmx_preempt vmx_signal vsx_preempt
+TEST_GEN_PROGS := fpu_syscall fpu_preempt fpu_signal fpu_denormal vmx_syscall vmx_preempt vmx_signal vsx_preempt mma
top_srcdir = ../../../../..
include ../../lib.mk
@@ -17,3 +17,5 @@ $(OUTPUT)/vmx_signal: vmx_asm.S ../utils.c
$(OUTPUT)/vsx_preempt: CFLAGS += -mvsx
$(OUTPUT)/vsx_preempt: vsx_asm.S ../utils.c
+
+$(OUTPUT)/mma: mma.c mma.S ../utils.c
diff --git a/tools/testing/selftests/powerpc/math/mma.S b/tools/testing/selftests/powerpc/math/mma.S
new file mode 100644
index 000000000000..8528c9849565
--- /dev/null
+++ b/tools/testing/selftests/powerpc/math/mma.S
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * Test basic matrix multiply assist (MMA) functionality if available.
+ *
+ * Copyright 2020, Alistair Popple, IBM Corp.
+ */
+ .global test_mma
+test_mma:
+ /* Load accumulator via VSX registers from image passed in r3 */
+ lxvh8x 4,0,3
+ lxvh8x 5,0,4
+
+ /* Clear and prime the accumulator (xxsetaccz) */
+ .long 0x7c030162
+
+ /* Prime the accumulator with MMA VSX move to accumulator
+ * X-form (xxmtacc) (not needed due to above zeroing) */
+ //.long 0x7c010162
+
+ /* xvi16ger2s */
+ .long 0xec042958
+
+ /* Store result in image passed in r5 */
+ stxvw4x 0,0,5
+ addi 5,5,16
+ stxvw4x 1,0,5
+ addi 5,5,16
+ stxvw4x 2,0,5
+ addi 5,5,16
+ stxvw4x 3,0,5
+ addi 5,5,16
+
+ blr
diff --git a/tools/testing/selftests/powerpc/math/mma.c b/tools/testing/selftests/powerpc/math/mma.c
new file mode 100644
index 000000000000..3a71808c993f
--- /dev/null
+++ b/tools/testing/selftests/powerpc/math/mma.c
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Test basic matrix multiply assist (MMA) functionality if available.
+ *
+ * Copyright 2020, Alistair Popple, IBM Corp.
+ */
+#include <stdio.h>
+#include <stdint.h>
+
+#include "utils.h"
+
+extern void test_mma(uint16_t (*)[8], uint16_t (*)[8], uint32_t (*)[4*4]);
+
+static int mma(void)
+{
+ int i;
+ int rc = 0;
+ uint16_t x[] = {1, 0, 2, 0, 3, 0, 4, 0};
+ uint16_t y[] = {1, 0, 2, 0, 3, 0, 4, 0};
+ uint32_t z[4*4];
+ uint32_t exp[4*4] = {1, 2, 3, 4,
+ 2, 4, 6, 8,
+ 3, 6, 9, 12,
+ 4, 8, 12, 16};
+
+ SKIP_IF_MSG(!have_hwcap2(PPC_FEATURE2_ARCH_3_1), "Need ISAv3.1");
+ SKIP_IF_MSG(!have_hwcap2(PPC_FEATURE2_MMA), "Need MMA");
+
+ test_mma(&x, &y, &z);
+
+ for (i = 0; i < 16; i++) {
+ printf("MMA[%d] = %d ", i, z[i]);
+
+ if (z[i] == exp[i]) {
+ printf(" (Correct)\n");
+ } else {
+ printf(" (Incorrect)\n");
+ rc = 1;
+ }
+ }
+
+ return rc;
+}
+
+int main(int argc, char *argv[])
+{
+ return test_harness(mma, "mma");
+}
diff --git a/tools/testing/selftests/powerpc/mm/.gitignore b/tools/testing/selftests/powerpc/mm/.gitignore
index aac4a59f9e28..4e1a294eec35 100644
--- a/tools/testing/selftests/powerpc/mm/.gitignore
+++ b/tools/testing/selftests/powerpc/mm/.gitignore
@@ -12,3 +12,4 @@ pkey_exec_prot
pkey_siginfo
stack_expansion_ldst
stack_expansion_signal
+large_vm_gpr_corruption
diff --git a/tools/testing/selftests/powerpc/mm/Makefile b/tools/testing/selftests/powerpc/mm/Makefile
index 40253abc6208..27dc09d0bfee 100644
--- a/tools/testing/selftests/powerpc/mm/Makefile
+++ b/tools/testing/selftests/powerpc/mm/Makefile
@@ -4,7 +4,8 @@ noarg:
TEST_GEN_PROGS := hugetlb_vs_thp_test subpage_prot prot_sao segv_errors wild_bctr \
large_vm_fork_separation bad_accesses pkey_exec_prot \
- pkey_siginfo stack_expansion_signal stack_expansion_ldst
+ pkey_siginfo stack_expansion_signal stack_expansion_ldst \
+ large_vm_gpr_corruption
TEST_PROGS := stress_code_patching.sh
TEST_GEN_PROGS_EXTENDED := tlbie_test
@@ -19,6 +20,7 @@ $(OUTPUT)/prot_sao: ../utils.c
$(OUTPUT)/wild_bctr: CFLAGS += -m64
$(OUTPUT)/large_vm_fork_separation: CFLAGS += -m64
+$(OUTPUT)/large_vm_gpr_corruption: CFLAGS += -m64
$(OUTPUT)/bad_accesses: CFLAGS += -m64
$(OUTPUT)/pkey_exec_prot: CFLAGS += -m64
$(OUTPUT)/pkey_siginfo: CFLAGS += -m64
diff --git a/tools/testing/selftests/powerpc/mm/large_vm_gpr_corruption.c b/tools/testing/selftests/powerpc/mm/large_vm_gpr_corruption.c
new file mode 100644
index 000000000000..927bfae99ed9
--- /dev/null
+++ b/tools/testing/selftests/powerpc/mm/large_vm_gpr_corruption.c
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Copyright 2022, Michael Ellerman, IBM Corp.
+//
+// Test that the 4PB address space SLB handling doesn't corrupt userspace registers
+// (r9-r13) due to a SLB fault while saving the PPR.
+//
+// The bug was introduced in f384796c4 ("powerpc/mm: Add support for handling > 512TB
+// address in SLB miss") and fixed in 4c2de74cc869 ("powerpc/64: Interrupts save PPR on
+// stack rather than thread_struct").
+//
+// To hit the bug requires the task struct and kernel stack to be in different segments.
+// Usually that requires more than 1TB of RAM, or if that's not practical, boot the kernel
+// with "disable_1tb_segments".
+//
+// The test works by creating mappings above 512TB, to trigger the large address space
+// support. It creates 64 mappings, double the size of the SLB, to cause SLB faults on
+// each access (assuming naive replacement). It then loops over those mappings touching
+// each, and checks that r9-r13 aren't corrupted.
+//
+// It then forks another child and tries again, because a new child process will get a new
+// kernel stack and thread struct allocated, which may be more optimally placed to trigger
+// the bug. It would probably be better to leave the previous child processes hanging
+// around, so that kernel stack & thread struct allocations are not reused, but that would
+// amount to a 30 second fork bomb. The current design reliably triggers the bug on
+// unpatched kernels.
+
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#include "utils.h"
+
+#ifndef MAP_FIXED_NOREPLACE
+#define MAP_FIXED_NOREPLACE MAP_FIXED // "Should be safe" above 512TB
+#endif
+
+#define BASE_ADDRESS (1ul << 50) // 1PB
+#define STRIDE (2ul << 40) // 2TB
+#define SLB_SIZE 32
+#define NR_MAPPINGS (SLB_SIZE * 2)
+
+static volatile sig_atomic_t signaled;
+
+static void signal_handler(int sig)
+{
+ signaled = 1;
+}
+
+#define CHECK_REG(_reg) \
+ if (_reg != _reg##_orig) { \
+ printf(str(_reg) " corrupted! Expected 0x%lx != 0x%lx\n", _reg##_orig, \
+ _reg); \
+ _exit(1); \
+ }
+
+static int touch_mappings(void)
+{
+ unsigned long r9_orig, r10_orig, r11_orig, r12_orig, r13_orig;
+ unsigned long r9, r10, r11, r12, r13;
+ unsigned long addr, *p;
+ int i;
+
+ for (i = 0; i < NR_MAPPINGS; i++) {
+ addr = BASE_ADDRESS + (i * STRIDE);
+ p = (unsigned long *)addr;
+
+ asm volatile("mr %0, %%r9 ;" // Read original GPR values
+ "mr %1, %%r10 ;"
+ "mr %2, %%r11 ;"
+ "mr %3, %%r12 ;"
+ "mr %4, %%r13 ;"
+ "std %10, 0(%11) ;" // Trigger SLB fault
+ "mr %5, %%r9 ;" // Save possibly corrupted values
+ "mr %6, %%r10 ;"
+ "mr %7, %%r11 ;"
+ "mr %8, %%r12 ;"
+ "mr %9, %%r13 ;"
+ "mr %%r9, %0 ;" // Restore original values
+ "mr %%r10, %1 ;"
+ "mr %%r11, %2 ;"
+ "mr %%r12, %3 ;"
+ "mr %%r13, %4 ;"
+ : "=&b"(r9_orig), "=&b"(r10_orig), "=&b"(r11_orig),
+ "=&b"(r12_orig), "=&b"(r13_orig), "=&b"(r9), "=&b"(r10),
+ "=&b"(r11), "=&b"(r12), "=&b"(r13)
+ : "b"(i), "b"(p)
+ : "r9", "r10", "r11", "r12", "r13");
+
+ CHECK_REG(r9);
+ CHECK_REG(r10);
+ CHECK_REG(r11);
+ CHECK_REG(r12);
+ CHECK_REG(r13);
+ }
+
+ return 0;
+}
+
+static int test(void)
+{
+ unsigned long page_size, addr, *p;
+ struct sigaction action;
+ bool hash_mmu;
+ int i, status;
+ pid_t pid;
+
+ // This tests a hash MMU specific bug.
+ FAIL_IF(using_hash_mmu(&hash_mmu));
+ SKIP_IF(!hash_mmu);
+
+ page_size = sysconf(_SC_PAGESIZE);
+
+ for (i = 0; i < NR_MAPPINGS; i++) {
+ addr = BASE_ADDRESS + (i * STRIDE);
+
+ p = mmap((void *)addr, page_size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED_NOREPLACE, -1, 0);
+ if (p == MAP_FAILED) {
+ perror("mmap");
+ printf("Error: couldn't mmap(), confirm kernel has 4PB support?\n");
+ return 1;
+ }
+ }
+
+ action.sa_handler = signal_handler;
+ action.sa_flags = SA_RESTART;
+ FAIL_IF(sigaction(SIGALRM, &action, NULL) < 0);
+
+ // Seen to always crash in under ~10s on affected kernels.
+ alarm(30);
+
+ while (!signaled) {
+ // Fork new processes, to increase the chance that we hit the case where
+ // the kernel stack and task struct are in different segments.
+ pid = fork();
+ if (pid == 0)
+ exit(touch_mappings());
+
+ FAIL_IF(waitpid(-1, &status, 0) == -1);
+ FAIL_IF(WIFSIGNALED(status));
+ FAIL_IF(!WIFEXITED(status));
+ FAIL_IF(WEXITSTATUS(status));
+ }
+
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(test, "large_vm_gpr_corruption");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/fixed_instruction_loop.S b/tools/testing/selftests/powerpc/pmu/ebb/fixed_instruction_loop.S
deleted file mode 100644
index 08a7b5f133b9..000000000000
--- a/tools/testing/selftests/powerpc/pmu/ebb/fixed_instruction_loop.S
+++ /dev/null
@@ -1,43 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright 2014, Michael Ellerman, IBM Corp.
- */
-
-#include <ppc-asm.h>
-
- .text
-
-FUNC_START(thirty_two_instruction_loop)
- cmpwi r3,0
- beqlr
- addi r4,r3,1
- addi r4,r4,1
- addi r4,r4,1
- addi r4,r4,1
- addi r4,r4,1
- addi r4,r4,1
- addi r4,r4,1
- addi r4,r4,1
- addi r4,r4,1
- addi r4,r4,1
- addi r4,r4,1
- addi r4,r4,1
- addi r4,r4,1
- addi r4,r4,1
- addi r4,r4,1
- addi r4,r4,1
- addi r4,r4,1
- addi r4,r4,1
- addi r4,r4,1
- addi r4,r4,1
- addi r4,r4,1
- addi r4,r4,1
- addi r4,r4,1
- addi r4,r4,1
- addi r4,r4,1
- addi r4,r4,1
- addi r4,r4,1
- addi r4,r4,1 # 28 addi's
- subi r3,r3,1
- b FUNC_NAME(thirty_two_instruction_loop)
-FUNC_END(thirty_two_instruction_loop)
diff --git a/tools/testing/selftests/powerpc/pmu/sampling_tests/misc.c b/tools/testing/selftests/powerpc/pmu/sampling_tests/misc.c
index fca054bbc094..c01a31d5f4ee 100644
--- a/tools/testing/selftests/powerpc/pmu/sampling_tests/misc.c
+++ b/tools/testing/selftests/powerpc/pmu/sampling_tests/misc.c
@@ -274,7 +274,7 @@ u64 *get_intr_regs(struct event *event, void *sample_buff)
return intr_regs;
}
-static const unsigned int __perf_reg_mask(const char *register_name)
+static const int __perf_reg_mask(const char *register_name)
{
if (!strcmp(register_name, "R0"))
return 0;
diff --git a/tools/testing/selftests/powerpc/security/spectre_v2.c b/tools/testing/selftests/powerpc/security/spectre_v2.c
index d42ca8c676c3..5b2abb719ef2 100644
--- a/tools/testing/selftests/powerpc/security/spectre_v2.c
+++ b/tools/testing/selftests/powerpc/security/spectre_v2.c
@@ -182,17 +182,23 @@ int spectre_v2_test(void)
case COUNT_CACHE_FLUSH_HW:
// These should all not affect userspace branch prediction
if (miss_percent > 15) {
- printf("Branch misses > 15%% unexpected in this configuration!\n");
- printf("Possible mis-match between reported & actual mitigation\n");
- /*
- * Such a mismatch may be caused by a guest system
- * reporting as vulnerable when the host is mitigated.
- * Return skip code to avoid detecting this as an error.
- * We are not vulnerable and reporting otherwise, so
- * missing such a mismatch is safe.
- */
- if (miss_percent > 95)
+ if (miss_percent > 95) {
+ /*
+ * Such a mismatch may be caused by a system being unaware
+ * the count cache is disabled. This may be to enable
+ * guest migration between hosts with different settings.
+ * Return skip code to avoid detecting this as an error.
+ * We are not vulnerable and reporting otherwise, so
+ * missing such a mismatch is safe.
+ */
+ printf("Branch misses > 95%% unexpected in this configuration.\n");
+ printf("Count cache likely disabled without Linux knowing.\n");
+ if (state == COUNT_CACHE_FLUSH_SW)
+ printf("WARNING: Kernel performing unnecessary flushes.\n");
return 4;
+ }
+ printf("Branch misses > 15%% unexpected in this configuration!\n");
+ printf("Possible mismatch between reported & actual mitigation\n");
return 1;
}
@@ -201,14 +207,14 @@ int spectre_v2_test(void)
// This seems to affect userspace branch prediction a bit?
if (miss_percent > 25) {
printf("Branch misses > 25%% unexpected in this configuration!\n");
- printf("Possible mis-match between reported & actual mitigation\n");
+ printf("Possible mismatch between reported & actual mitigation\n");
return 1;
}
break;
case COUNT_CACHE_DISABLED:
if (miss_percent < 95) {
- printf("Branch misses < 20%% unexpected in this configuration!\n");
- printf("Possible mis-match between reported & actual mitigation\n");
+ printf("Branch misses < 95%% unexpected in this configuration!\n");
+ printf("Possible mismatch between reported & actual mitigation\n");
return 1;
}
break;
diff --git a/tools/tracing/rtla/Makefile b/tools/tracing/rtla/Makefile
index 11fb417abb42..3822f4ea5f49 100644
--- a/tools/tracing/rtla/Makefile
+++ b/tools/tracing/rtla/Makefile
@@ -23,6 +23,7 @@ $(call allow-override,LD_SO_CONF_PATH,/etc/ld.so.conf.d/)
$(call allow-override,LDCONFIG,ldconfig)
INSTALL = install
+MKDIR = mkdir
FOPTS := -flto=auto -ffat-lto-objects -fexceptions -fstack-protector-strong \
-fasynchronous-unwind-tables -fstack-clash-protection
WOPTS := -Wall -Werror=format-security -Wp,-D_FORTIFY_SOURCE=2 -Wp,-D_GLIBCXX_ASSERTIONS -Wno-maybe-uninitialized
@@ -31,7 +32,7 @@ TRACEFS_HEADERS := $$($(PKG_CONFIG) --cflags libtracefs)
CFLAGS := -O -g -DVERSION=\"$(VERSION)\" $(FOPTS) $(MOPTS) $(WOPTS) $(TRACEFS_HEADERS)
LDFLAGS := -ggdb
-LIBS := $$($(PKG_CONFIG) --libs libtracefs) -lprocps
+LIBS := $$($(PKG_CONFIG) --libs libtracefs)
SRC := $(wildcard src/*.c)
HDR := $(wildcard src/*.h)
@@ -57,6 +58,41 @@ else
DOCSRC = $(SRCTREE)/../../../Documentation/tools/rtla/
endif
+LIBTRACEEVENT_MIN_VERSION = 1.5
+LIBTRACEFS_MIN_VERSION = 1.3
+
+TEST_LIBTRACEEVENT = $(shell sh -c "$(PKG_CONFIG) --atleast-version $(LIBTRACEEVENT_MIN_VERSION) libtraceevent > /dev/null 2>&1 || echo n")
+ifeq ("$(TEST_LIBTRACEEVENT)", "n")
+.PHONY: warning_traceevent
+warning_traceevent:
+ @echo "********************************************"
+ @echo "** NOTICE: libtraceevent version $(LIBTRACEEVENT_MIN_VERSION) or higher not found"
+ @echo "**"
+ @echo "** Consider installing the latest libtraceevent from your"
+ @echo "** distribution, e.g., 'dnf install libtraceevent' on Fedora,"
+ @echo "** or from source:"
+ @echo "**"
+ @echo "** https://git.kernel.org/pub/scm/libs/libtrace/libtraceevent.git/ "
+ @echo "**"
+ @echo "********************************************"
+endif
+
+TEST_LIBTRACEFS = $(shell sh -c "$(PKG_CONFIG) --atleast-version $(LIBTRACEFS_MIN_VERSION) libtracefs > /dev/null 2>&1 || echo n")
+ifeq ("$(TEST_LIBTRACEFS)", "n")
+.PHONY: warning_tracefs
+warning_tracefs:
+ @echo "********************************************"
+ @echo "** NOTICE: libtracefs version $(LIBTRACEFS_MIN_VERSION) or higher not found"
+ @echo "**"
+ @echo "** Consider installing the latest libtracefs from your"
+ @echo "** distribution, e.g., 'dnf install libtracefs' on Fedora,"
+ @echo "** or from source:"
+ @echo "**"
+ @echo "** https://git.kernel.org/pub/scm/libs/libtrace/libtracefs.git/ "
+ @echo "**"
+ @echo "********************************************"
+endif
+
.PHONY: all
all: rtla
@@ -68,7 +104,7 @@ static: $(OBJ)
.PHONY: install
install: doc_install
- $(INSTALL) -d -m 755 $(DESTDIR)$(BINDIR)
+ $(MKDIR) -p $(DESTDIR)$(BINDIR)
$(INSTALL) rtla -m 755 $(DESTDIR)$(BINDIR)
$(STRIP) $(DESTDIR)$(BINDIR)/rtla
@test ! -f $(DESTDIR)$(BINDIR)/osnoise || rm $(DESTDIR)$(BINDIR)/osnoise
diff --git a/tools/tracing/rtla/README.txt b/tools/tracing/rtla/README.txt
index 6c88446f7e74..4af3fd40f171 100644
--- a/tools/tracing/rtla/README.txt
+++ b/tools/tracing/rtla/README.txt
@@ -1,19 +1,16 @@
RTLA: Real-Time Linux Analysis tools
-The rtla is a meta-tool that includes a set of commands that
-aims to analyze the real-time properties of Linux. But, instead of
-testing Linux as a black box, rtla leverages kernel tracing
-capabilities to provide precise information about the properties
-and root causes of unexpected results.
+The rtla meta-tool includes a set of commands that aims to analyze
+the real-time properties of Linux. Instead of testing Linux as a black box,
+rtla leverages kernel tracing capabilities to provide precise information
+about the properties and root causes of unexpected results.
Installing RTLA
-RTLA depends on some libraries and tools. More precisely, it depends on the
-following libraries:
+RTLA depends on the following libraries and tools:
- libtracefs
- libtraceevent
- - procps
It also depends on python3-docutils to compile man pages.
diff --git a/tools/tracing/rtla/src/osnoise_hist.c b/tools/tracing/rtla/src/osnoise_hist.c
index b4380d45cacd..5d7ea479ac89 100644
--- a/tools/tracing/rtla/src/osnoise_hist.c
+++ b/tools/tracing/rtla/src/osnoise_hist.c
@@ -809,7 +809,7 @@ int osnoise_hist_main(int argc, char *argv[])
retval = set_comm_sched_attr("osnoise/", &params->sched_param);
if (retval) {
err_msg("Failed to set sched parameters\n");
- goto out_hist;
+ goto out_free;
}
}
@@ -819,7 +819,7 @@ int osnoise_hist_main(int argc, char *argv[])
record = osnoise_init_trace_tool("osnoise");
if (!record) {
err_msg("Failed to enable the trace instance\n");
- goto out_hist;
+ goto out_free;
}
if (params->events) {
@@ -869,6 +869,7 @@ int osnoise_hist_main(int argc, char *argv[])
out_hist:
trace_events_destroy(&record->trace, params->events);
params->events = NULL;
+out_free:
osnoise_free_histogram(tool->data);
out_destroy:
osnoise_destroy_tool(record);
diff --git a/tools/tracing/rtla/src/osnoise_top.c b/tools/tracing/rtla/src/osnoise_top.c
index 72c2fd6ce005..76479bfb2922 100644
--- a/tools/tracing/rtla/src/osnoise_top.c
+++ b/tools/tracing/rtla/src/osnoise_top.c
@@ -572,7 +572,7 @@ int osnoise_top_main(int argc, char **argv)
retval = osnoise_top_apply_config(tool, params);
if (retval) {
err_msg("Could not apply config\n");
- goto out_top;
+ goto out_free;
}
trace = &tool->trace;
@@ -580,14 +580,14 @@ int osnoise_top_main(int argc, char **argv)
retval = enable_osnoise(trace);
if (retval) {
err_msg("Failed to enable osnoise tracer\n");
- goto out_top;
+ goto out_free;
}
if (params->set_sched) {
retval = set_comm_sched_attr("osnoise/", &params->sched_param);
if (retval) {
err_msg("Failed to set sched parameters\n");
- goto out_top;
+ goto out_free;
}
}
@@ -597,7 +597,7 @@ int osnoise_top_main(int argc, char **argv)
record = osnoise_init_trace_tool("osnoise");
if (!record) {
err_msg("Failed to enable the trace instance\n");
- goto out_top;
+ goto out_free;
}
if (params->events) {
@@ -649,6 +649,7 @@ int osnoise_top_main(int argc, char **argv)
out_top:
trace_events_destroy(&record->trace, params->events);
params->events = NULL;
+out_free:
osnoise_free_top(tool->data);
osnoise_destroy_tool(record);
osnoise_destroy_tool(tool);
diff --git a/tools/tracing/rtla/src/timerlat_hist.c b/tools/tracing/rtla/src/timerlat_hist.c
index dc908126c610..f3ec628f5e51 100644
--- a/tools/tracing/rtla/src/timerlat_hist.c
+++ b/tools/tracing/rtla/src/timerlat_hist.c
@@ -821,7 +821,7 @@ int timerlat_hist_main(int argc, char *argv[])
retval = timerlat_hist_apply_config(tool, params);
if (retval) {
err_msg("Could not apply config\n");
- goto out_hist;
+ goto out_free;
}
trace = &tool->trace;
@@ -829,14 +829,14 @@ int timerlat_hist_main(int argc, char *argv[])
retval = enable_timerlat(trace);
if (retval) {
err_msg("Failed to enable timerlat tracer\n");
- goto out_hist;
+ goto out_free;
}
if (params->set_sched) {
retval = set_comm_sched_attr("timerlat/", &params->sched_param);
if (retval) {
err_msg("Failed to set sched parameters\n");
- goto out_hist;
+ goto out_free;
}
}
@@ -844,7 +844,7 @@ int timerlat_hist_main(int argc, char *argv[])
dma_latency_fd = set_cpu_dma_latency(params->dma_latency);
if (dma_latency_fd < 0) {
err_msg("Could not set /dev/cpu_dma_latency.\n");
- goto out_hist;
+ goto out_free;
}
}
@@ -854,7 +854,7 @@ int timerlat_hist_main(int argc, char *argv[])
record = osnoise_init_trace_tool("timerlat");
if (!record) {
err_msg("Failed to enable the trace instance\n");
- goto out_hist;
+ goto out_free;
}
if (params->events) {
@@ -904,6 +904,7 @@ out_hist:
close(dma_latency_fd);
trace_events_destroy(&record->trace, params->events);
params->events = NULL;
+out_free:
timerlat_free_histogram(tool->data);
osnoise_destroy_tool(record);
osnoise_destroy_tool(tool);
diff --git a/tools/tracing/rtla/src/timerlat_top.c b/tools/tracing/rtla/src/timerlat_top.c
index 1f754c3df53f..35452a1d45e9 100644
--- a/tools/tracing/rtla/src/timerlat_top.c
+++ b/tools/tracing/rtla/src/timerlat_top.c
@@ -612,7 +612,7 @@ int timerlat_top_main(int argc, char *argv[])
retval = timerlat_top_apply_config(top, params);
if (retval) {
err_msg("Could not apply config\n");
- goto out_top;
+ goto out_free;
}
trace = &top->trace;
@@ -620,14 +620,14 @@ int timerlat_top_main(int argc, char *argv[])
retval = enable_timerlat(trace);
if (retval) {
err_msg("Failed to enable timerlat tracer\n");
- goto out_top;
+ goto out_free;
}
if (params->set_sched) {
retval = set_comm_sched_attr("timerlat/", &params->sched_param);
if (retval) {
err_msg("Failed to set sched parameters\n");
- goto out_top;
+ goto out_free;
}
}
@@ -635,7 +635,7 @@ int timerlat_top_main(int argc, char *argv[])
dma_latency_fd = set_cpu_dma_latency(params->dma_latency);
if (dma_latency_fd < 0) {
err_msg("Could not set /dev/cpu_dma_latency.\n");
- goto out_top;
+ goto out_free;
}
}
@@ -645,7 +645,7 @@ int timerlat_top_main(int argc, char *argv[])
record = osnoise_init_trace_tool("timerlat");
if (!record) {
err_msg("Failed to enable the trace instance\n");
- goto out_top;
+ goto out_free;
}
if (params->events) {
@@ -699,6 +699,7 @@ out_top:
close(dma_latency_fd);
trace_events_destroy(&record->trace, params->events);
params->events = NULL;
+out_free:
timerlat_free_top(top->data);
osnoise_destroy_tool(record);
osnoise_destroy_tool(top);
diff --git a/tools/tracing/rtla/src/utils.c b/tools/tracing/rtla/src/utils.c
index da2b590edaed..5352167a1e75 100644
--- a/tools/tracing/rtla/src/utils.c
+++ b/tools/tracing/rtla/src/utils.c
@@ -3,7 +3,7 @@
* Copyright (C) 2021 Red Hat Inc, Daniel Bristot de Oliveira <bristot@kernel.org>
*/
-#include <proc/readproc.h>
+#include <dirent.h>
#include <stdarg.h>
#include <stdlib.h>
#include <string.h>
@@ -255,50 +255,114 @@ int __set_sched_attr(int pid, struct sched_attr *attr)
retval = sched_setattr(pid, attr, flags);
if (retval < 0) {
- err_msg("boost_with_deadline failed to boost pid %d: %s\n",
+ err_msg("Failed to set sched attributes to the pid %d: %s\n",
pid, strerror(errno));
return 1;
}
return 0;
}
+
+/*
+ * procfs_is_workload_pid - check if a procfs entry contains a comm_prefix* comm
+ *
+ * Check if the procfs entry is a directory of a process, and then check if the
+ * process has a comm with the prefix set in char *comm_prefix. As the
+ * current users of this function only check for kernel threads, there is no
+ * need to check for the threads for the process.
+ *
+ * Return: True if the proc_entry contains a comm file with comm_prefix*.
+ * Otherwise returns false.
+ */
+static int procfs_is_workload_pid(const char *comm_prefix, struct dirent *proc_entry)
+{
+ char buffer[MAX_PATH];
+ int comm_fd, retval;
+ char *t_name;
+
+ if (proc_entry->d_type != DT_DIR)
+ return 0;
+
+ if (*proc_entry->d_name == '.')
+ return 0;
+
+ /* check if the string is a pid */
+ for (t_name = proc_entry->d_name; t_name; t_name++) {
+ if (!isdigit(*t_name))
+ break;
+ }
+
+ if (*t_name != '\0')
+ return 0;
+
+ snprintf(buffer, MAX_PATH, "/proc/%s/comm", proc_entry->d_name);
+ comm_fd = open(buffer, O_RDONLY);
+ if (comm_fd < 0)
+ return 0;
+
+ memset(buffer, 0, MAX_PATH);
+ retval = read(comm_fd, buffer, MAX_PATH);
+
+ close(comm_fd);
+
+ if (retval <= 0)
+ return 0;
+
+ retval = strncmp(comm_prefix, buffer, strlen(comm_prefix));
+ if (retval)
+ return 0;
+
+ /* comm already have \n */
+ debug_msg("Found workload pid:%s comm:%s", proc_entry->d_name, buffer);
+
+ return 1;
+}
+
/*
- * set_comm_sched_attr - set sched params to threads starting with char *comm
+ * set_comm_sched_attr - set sched params to threads starting with char *comm_prefix
*
- * This function uses procps to list the currently running threads and then
- * set the sched_attr *attr to the threads that start with char *comm. It is
+ * This function uses procfs to list the currently running threads and then set the
+ * sched_attr *attr to the threads that start with char *comm_prefix. It is
* mainly used to set the priority to the kernel threads created by the
* tracers.
*/
-int set_comm_sched_attr(const char *comm, struct sched_attr *attr)
+int set_comm_sched_attr(const char *comm_prefix, struct sched_attr *attr)
{
- int flags = PROC_FILLCOM | PROC_FILLSTAT;
- PROCTAB *ptp;
- proc_t task;
+ struct dirent *proc_entry;
+ DIR *procfs;
int retval;
- ptp = openproc(flags);
- if (!ptp) {
- err_msg("error openproc()\n");
- return -ENOENT;
+ if (strlen(comm_prefix) >= MAX_PATH) {
+ err_msg("Command prefix is too long: %d < strlen(%s)\n",
+ MAX_PATH, comm_prefix);
+ return 1;
}
- memset(&task, 0, sizeof(task));
+ procfs = opendir("/proc");
+ if (!procfs) {
+ err_msg("Could not open procfs\n");
+ return 1;
+ }
- while (readproc(ptp, &task)) {
- retval = strncmp(comm, task.cmd, strlen(comm));
- if (retval)
+ while ((proc_entry = readdir(procfs))) {
+
+ retval = procfs_is_workload_pid(comm_prefix, proc_entry);
+ if (!retval)
continue;
- retval = __set_sched_attr(task.tid, attr);
- if (retval)
+
+ /* procfs_is_workload_pid confirmed it is a pid */
+ retval = __set_sched_attr(atoi(proc_entry->d_name), attr);
+ if (retval) {
+ err_msg("Error setting sched attributes for pid:%s\n", proc_entry->d_name);
goto out_err;
- }
+ }
- closeproc(ptp);
+ debug_msg("Set sched attributes for pid:%s\n", proc_entry->d_name);
+ }
return 0;
out_err:
- closeproc(ptp);
+ closedir(procfs);
return 1;
}
diff --git a/tools/tracing/rtla/src/utils.h b/tools/tracing/rtla/src/utils.h
index fa08e374870a..5571afd3b549 100644
--- a/tools/tracing/rtla/src/utils.h
+++ b/tools/tracing/rtla/src/utils.h
@@ -6,6 +6,7 @@
* '18446744073709551615\0'
*/
#define BUFF_U64_STR_SIZE 24
+#define MAX_PATH 1024
#define container_of(ptr, type, member)({ \
const typeof(((type *)0)->member) *__mptr = (ptr); \
@@ -53,5 +54,5 @@ struct sched_attr {
};
int parse_prio(char *arg, struct sched_attr *sched_param);
-int set_comm_sched_attr(const char *comm, struct sched_attr *attr);
+int set_comm_sched_attr(const char *comm_prefix, struct sched_attr *attr);
int set_cpu_dma_latency(int32_t latency);
diff --git a/tools/usb/testusb.c b/tools/usb/testusb.c
index 69c3ead25313..474bae868b35 100644
--- a/tools/usb/testusb.c
+++ b/tools/usb/testusb.c
@@ -482,7 +482,7 @@ usage:
}
if (not)
return 0;
- if (testdevs && testdevs->next == 0 && !device)
+ if (testdevs && !testdevs->next && !device)
device = testdevs->name;
for (entry = testdevs; entry; entry = entry->next) {
int status;
diff --git a/usr/gen_init_cpio.c b/usr/gen_init_cpio.c
index 0e2c8a5838b1..dc838e26a5b9 100644
--- a/usr/gen_init_cpio.c
+++ b/usr/gen_init_cpio.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
#include <stdio.h>
#include <stdlib.h>
+#include <stdint.h>
+#include <stdbool.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <string.h>
@@ -20,10 +22,12 @@
#define xstr(s) #s
#define str(s) xstr(s)
+#define MIN(a, b) ((a) < (b) ? (a) : (b))
static unsigned int offset;
static unsigned int ino = 721;
static time_t default_mtime;
+static bool do_csum = false;
struct file_handler {
const char *type;
@@ -77,7 +81,7 @@ static void cpio_trailer(void)
sprintf(s, "%s%08X%08X%08lX%08lX%08X%08lX"
"%08X%08X%08X%08X%08X%08X%08X",
- "070701", /* magic */
+ do_csum ? "070702" : "070701", /* magic */
0, /* ino */
0, /* mode */
(long) 0, /* uid */
@@ -109,7 +113,7 @@ static int cpio_mkslink(const char *name, const char *target,
name++;
sprintf(s,"%s%08X%08X%08lX%08lX%08X%08lX"
"%08X%08X%08X%08X%08X%08X%08X",
- "070701", /* magic */
+ do_csum ? "070702" : "070701", /* magic */
ino++, /* ino */
S_IFLNK | mode, /* mode */
(long) uid, /* uid */
@@ -158,7 +162,7 @@ static int cpio_mkgeneric(const char *name, unsigned int mode,
name++;
sprintf(s,"%s%08X%08X%08lX%08lX%08X%08lX"
"%08X%08X%08X%08X%08X%08X%08X",
- "070701", /* magic */
+ do_csum ? "070702" : "070701", /* magic */
ino++, /* ino */
mode, /* mode */
(long) uid, /* uid */
@@ -252,7 +256,7 @@ static int cpio_mknod(const char *name, unsigned int mode,
name++;
sprintf(s,"%s%08X%08X%08lX%08lX%08X%08lX"
"%08X%08X%08X%08X%08X%08X%08X",
- "070701", /* magic */
+ do_csum ? "070702" : "070701", /* magic */
ino++, /* ino */
mode, /* mode */
(long) uid, /* uid */
@@ -292,19 +296,42 @@ static int cpio_mknod_line(const char *line)
return rc;
}
+static int cpio_mkfile_csum(int fd, unsigned long size, uint32_t *csum)
+{
+ while (size) {
+ unsigned char filebuf[65536];
+ ssize_t this_read;
+ size_t i, this_size = MIN(size, sizeof(filebuf));
+
+ this_read = read(fd, filebuf, this_size);
+ if (this_read <= 0 || this_read > this_size)
+ return -1;
+
+ for (i = 0; i < this_read; i++)
+ *csum += filebuf[i];
+
+ size -= this_read;
+ }
+ /* seek back to the start for data segment I/O */
+ if (lseek(fd, 0, SEEK_SET) < 0)
+ return -1;
+
+ return 0;
+}
+
static int cpio_mkfile(const char *name, const char *location,
unsigned int mode, uid_t uid, gid_t gid,
unsigned int nlinks)
{
char s[256];
- char *filebuf = NULL;
struct stat buf;
- long size;
+ unsigned long size;
int file = -1;
int retval;
int rc = -1;
int namesize;
unsigned int i;
+ uint32_t csum = 0;
mode |= S_IFREG;
@@ -326,29 +353,29 @@ static int cpio_mkfile(const char *name, const char *location,
buf.st_mtime = 0xffffffff;
}
- filebuf = malloc(buf.st_size);
- if (!filebuf) {
- fprintf (stderr, "out of memory\n");
+ if (buf.st_size > 0xffffffff) {
+ fprintf(stderr, "%s: Size exceeds maximum cpio file size\n",
+ location);
goto error;
}
- retval = read (file, filebuf, buf.st_size);
- if (retval < 0) {
- fprintf (stderr, "Can not read %s file\n", location);
+ if (do_csum && cpio_mkfile_csum(file, buf.st_size, &csum) < 0) {
+ fprintf(stderr, "Failed to checksum file %s\n", location);
goto error;
}
size = 0;
for (i = 1; i <= nlinks; i++) {
/* data goes on last link */
- if (i == nlinks) size = buf.st_size;
+ if (i == nlinks)
+ size = buf.st_size;
if (name[0] == '/')
name++;
namesize = strlen(name) + 1;
sprintf(s,"%s%08X%08X%08lX%08lX%08X%08lX"
"%08lX%08X%08X%08X%08X%08X%08X",
- "070701", /* magic */
+ do_csum ? "070702" : "070701", /* magic */
ino, /* ino */
mode, /* mode */
(long) uid, /* uid */
@@ -361,28 +388,39 @@ static int cpio_mkfile(const char *name, const char *location,
0, /* rmajor */
0, /* rminor */
namesize, /* namesize */
- 0); /* chksum */
+ size ? csum : 0); /* chksum */
push_hdr(s);
push_string(name);
push_pad();
- if (size) {
- if (fwrite(filebuf, size, 1, stdout) != 1) {
+ while (size) {
+ unsigned char filebuf[65536];
+ ssize_t this_read;
+ size_t this_size = MIN(size, sizeof(filebuf));
+
+ this_read = read(file, filebuf, this_size);
+ if (this_read <= 0 || this_read > this_size) {
+ fprintf(stderr, "Can not read %s file\n", location);
+ goto error;
+ }
+
+ if (fwrite(filebuf, this_read, 1, stdout) != 1) {
fprintf(stderr, "writing filebuf failed\n");
goto error;
}
- offset += size;
- push_pad();
+ offset += this_read;
+ size -= this_read;
}
+ push_pad();
name += namesize;
}
ino++;
rc = 0;
-
+
error:
- if (filebuf) free(filebuf);
- if (file >= 0) close(file);
+ if (file >= 0)
+ close(file);
return rc;
}
@@ -458,7 +496,7 @@ static int cpio_mkfile_line(const char *line)
static void usage(const char *prog)
{
fprintf(stderr, "Usage:\n"
- "\t%s [-t <timestamp>] <cpio_list>\n"
+ "\t%s [-t <timestamp>] [-c] <cpio_list>\n"
"\n"
"<cpio_list> is a file containing newline separated entries that\n"
"describe the files to be included in the initramfs archive:\n"
@@ -493,7 +531,8 @@ static void usage(const char *prog)
"\n"
"<timestamp> is time in seconds since Epoch that will be used\n"
"as mtime for symlinks, special files and directories. The default\n"
- "is to use the current time for these entries.\n",
+ "is to use the current time for these entries.\n"
+ "-c: calculate and store 32-bit checksums for file data.\n",
prog);
}
@@ -535,7 +574,7 @@ int main (int argc, char *argv[])
default_mtime = time(NULL);
while (1) {
- int opt = getopt(argc, argv, "t:h");
+ int opt = getopt(argc, argv, "t:ch");
char *invalid;
if (opt == -1)
@@ -550,6 +589,9 @@ int main (int argc, char *argv[])
exit(1);
}
break;
+ case 'c':
+ do_csum = true;
+ break;
case 'h':
case '?':
usage(argv[0]);
diff --git a/virt/kvm/vfio.c b/virt/kvm/vfio.c
index 8fcbc50221c2..ce1b01d02c51 100644
--- a/virt/kvm/vfio.c
+++ b/virt/kvm/vfio.c
@@ -23,7 +23,7 @@
struct kvm_vfio_group {
struct list_head node;
- struct vfio_group *vfio_group;
+ struct file *file;
};
struct kvm_vfio {
@@ -32,118 +32,61 @@ struct kvm_vfio {
bool noncoherent;
};
-static struct vfio_group *kvm_vfio_group_get_external_user(struct file *filep)
+static void kvm_vfio_file_set_kvm(struct file *file, struct kvm *kvm)
{
- struct vfio_group *vfio_group;
- struct vfio_group *(*fn)(struct file *);
+ void (*fn)(struct file *file, struct kvm *kvm);
- fn = symbol_get(vfio_group_get_external_user);
- if (!fn)
- return ERR_PTR(-EINVAL);
-
- vfio_group = fn(filep);
-
- symbol_put(vfio_group_get_external_user);
-
- return vfio_group;
-}
-
-static bool kvm_vfio_external_group_match_file(struct vfio_group *group,
- struct file *filep)
-{
- bool ret, (*fn)(struct vfio_group *, struct file *);
-
- fn = symbol_get(vfio_external_group_match_file);
- if (!fn)
- return false;
-
- ret = fn(group, filep);
-
- symbol_put(vfio_external_group_match_file);
-
- return ret;
-}
-
-static void kvm_vfio_group_put_external_user(struct vfio_group *vfio_group)
-{
- void (*fn)(struct vfio_group *);
-
- fn = symbol_get(vfio_group_put_external_user);
- if (!fn)
- return;
-
- fn(vfio_group);
-
- symbol_put(vfio_group_put_external_user);
-}
-
-static void kvm_vfio_group_set_kvm(struct vfio_group *group, struct kvm *kvm)
-{
- void (*fn)(struct vfio_group *, struct kvm *);
-
- fn = symbol_get(vfio_group_set_kvm);
+ fn = symbol_get(vfio_file_set_kvm);
if (!fn)
return;
- fn(group, kvm);
+ fn(file, kvm);
- symbol_put(vfio_group_set_kvm);
+ symbol_put(vfio_file_set_kvm);
}
-static bool kvm_vfio_group_is_coherent(struct vfio_group *vfio_group)
+static bool kvm_vfio_file_enforced_coherent(struct file *file)
{
- long (*fn)(struct vfio_group *, unsigned long);
- long ret;
+ bool (*fn)(struct file *file);
+ bool ret;
- fn = symbol_get(vfio_external_check_extension);
+ fn = symbol_get(vfio_file_enforced_coherent);
if (!fn)
return false;
- ret = fn(vfio_group, VFIO_DMA_CC_IOMMU);
+ ret = fn(file);
- symbol_put(vfio_external_check_extension);
+ symbol_put(vfio_file_enforced_coherent);
- return ret > 0;
+ return ret;
}
-#ifdef CONFIG_SPAPR_TCE_IOMMU
-static int kvm_vfio_external_user_iommu_id(struct vfio_group *vfio_group)
+static struct iommu_group *kvm_vfio_file_iommu_group(struct file *file)
{
- int (*fn)(struct vfio_group *);
- int ret = -EINVAL;
+ struct iommu_group *(*fn)(struct file *file);
+ struct iommu_group *ret;
- fn = symbol_get(vfio_external_user_iommu_id);
+ fn = symbol_get(vfio_file_iommu_group);
if (!fn)
- return ret;
+ return NULL;
- ret = fn(vfio_group);
+ ret = fn(file);
- symbol_put(vfio_external_user_iommu_id);
+ symbol_put(vfio_file_iommu_group);
return ret;
}
-static struct iommu_group *kvm_vfio_group_get_iommu_group(
- struct vfio_group *group)
-{
- int group_id = kvm_vfio_external_user_iommu_id(group);
-
- if (group_id < 0)
- return NULL;
-
- return iommu_group_get_by_id(group_id);
-}
-
+#ifdef CONFIG_SPAPR_TCE_IOMMU
static void kvm_spapr_tce_release_vfio_group(struct kvm *kvm,
- struct vfio_group *vfio_group)
+ struct kvm_vfio_group *kvg)
{
- struct iommu_group *grp = kvm_vfio_group_get_iommu_group(vfio_group);
+ struct iommu_group *grp = kvm_vfio_file_iommu_group(kvg->file);
if (WARN_ON_ONCE(!grp))
return;
kvm_spapr_tce_release_iommu_group(kvm, grp);
- iommu_group_put(grp);
}
#endif
@@ -163,7 +106,7 @@ static void kvm_vfio_update_coherency(struct kvm_device *dev)
mutex_lock(&kv->lock);
list_for_each_entry(kvg, &kv->group_list, node) {
- if (!kvm_vfio_group_is_coherent(kvg->vfio_group)) {
+ if (!kvm_vfio_file_enforced_coherent(kvg->file)) {
noncoherent = true;
break;
}
@@ -181,149 +124,162 @@ static void kvm_vfio_update_coherency(struct kvm_device *dev)
mutex_unlock(&kv->lock);
}
-static int kvm_vfio_set_group(struct kvm_device *dev, long attr, u64 arg)
+static int kvm_vfio_group_add(struct kvm_device *dev, unsigned int fd)
{
struct kvm_vfio *kv = dev->private;
- struct vfio_group *vfio_group;
struct kvm_vfio_group *kvg;
- int32_t __user *argp = (int32_t __user *)(unsigned long)arg;
- struct fd f;
- int32_t fd;
+ struct file *filp;
int ret;
- switch (attr) {
- case KVM_DEV_VFIO_GROUP_ADD:
- if (get_user(fd, argp))
- return -EFAULT;
-
- f = fdget(fd);
- if (!f.file)
- return -EBADF;
-
- vfio_group = kvm_vfio_group_get_external_user(f.file);
- fdput(f);
+ filp = fget(fd);
+ if (!filp)
+ return -EBADF;
- if (IS_ERR(vfio_group))
- return PTR_ERR(vfio_group);
-
- mutex_lock(&kv->lock);
+ /* Ensure the FD is a vfio group FD.*/
+ if (!kvm_vfio_file_iommu_group(filp)) {
+ ret = -EINVAL;
+ goto err_fput;
+ }
- list_for_each_entry(kvg, &kv->group_list, node) {
- if (kvg->vfio_group == vfio_group) {
- mutex_unlock(&kv->lock);
- kvm_vfio_group_put_external_user(vfio_group);
- return -EEXIST;
- }
- }
+ mutex_lock(&kv->lock);
- kvg = kzalloc(sizeof(*kvg), GFP_KERNEL_ACCOUNT);
- if (!kvg) {
- mutex_unlock(&kv->lock);
- kvm_vfio_group_put_external_user(vfio_group);
- return -ENOMEM;
+ list_for_each_entry(kvg, &kv->group_list, node) {
+ if (kvg->file == filp) {
+ ret = -EEXIST;
+ goto err_unlock;
}
+ }
- list_add_tail(&kvg->node, &kv->group_list);
- kvg->vfio_group = vfio_group;
+ kvg = kzalloc(sizeof(*kvg), GFP_KERNEL_ACCOUNT);
+ if (!kvg) {
+ ret = -ENOMEM;
+ goto err_unlock;
+ }
- kvm_arch_start_assignment(dev->kvm);
+ kvg->file = filp;
+ list_add_tail(&kvg->node, &kv->group_list);
- mutex_unlock(&kv->lock);
+ kvm_arch_start_assignment(dev->kvm);
- kvm_vfio_group_set_kvm(vfio_group, dev->kvm);
+ mutex_unlock(&kv->lock);
- kvm_vfio_update_coherency(dev);
+ kvm_vfio_file_set_kvm(kvg->file, dev->kvm);
+ kvm_vfio_update_coherency(dev);
- return 0;
+ return 0;
+err_unlock:
+ mutex_unlock(&kv->lock);
+err_fput:
+ fput(filp);
+ return ret;
+}
- case KVM_DEV_VFIO_GROUP_DEL:
- if (get_user(fd, argp))
- return -EFAULT;
+static int kvm_vfio_group_del(struct kvm_device *dev, unsigned int fd)
+{
+ struct kvm_vfio *kv = dev->private;
+ struct kvm_vfio_group *kvg;
+ struct fd f;
+ int ret;
- f = fdget(fd);
- if (!f.file)
- return -EBADF;
+ f = fdget(fd);
+ if (!f.file)
+ return -EBADF;
- ret = -ENOENT;
+ ret = -ENOENT;
- mutex_lock(&kv->lock);
+ mutex_lock(&kv->lock);
- list_for_each_entry(kvg, &kv->group_list, node) {
- if (!kvm_vfio_external_group_match_file(kvg->vfio_group,
- f.file))
- continue;
+ list_for_each_entry(kvg, &kv->group_list, node) {
+ if (kvg->file != f.file)
+ continue;
- list_del(&kvg->node);
- kvm_arch_end_assignment(dev->kvm);
+ list_del(&kvg->node);
+ kvm_arch_end_assignment(dev->kvm);
#ifdef CONFIG_SPAPR_TCE_IOMMU
- kvm_spapr_tce_release_vfio_group(dev->kvm,
- kvg->vfio_group);
+ kvm_spapr_tce_release_vfio_group(dev->kvm, kvg);
#endif
- kvm_vfio_group_set_kvm(kvg->vfio_group, NULL);
- kvm_vfio_group_put_external_user(kvg->vfio_group);
- kfree(kvg);
- ret = 0;
- break;
- }
+ kvm_vfio_file_set_kvm(kvg->file, NULL);
+ fput(kvg->file);
+ kfree(kvg);
+ ret = 0;
+ break;
+ }
- mutex_unlock(&kv->lock);
+ mutex_unlock(&kv->lock);
- fdput(f);
+ fdput(f);
- kvm_vfio_update_coherency(dev);
+ kvm_vfio_update_coherency(dev);
- return ret;
+ return ret;
+}
#ifdef CONFIG_SPAPR_TCE_IOMMU
- case KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE: {
- struct kvm_vfio_spapr_tce param;
- struct kvm_vfio *kv = dev->private;
- struct vfio_group *vfio_group;
- struct kvm_vfio_group *kvg;
- struct fd f;
- struct iommu_group *grp;
+static int kvm_vfio_group_set_spapr_tce(struct kvm_device *dev,
+ void __user *arg)
+{
+ struct kvm_vfio_spapr_tce param;
+ struct kvm_vfio *kv = dev->private;
+ struct kvm_vfio_group *kvg;
+ struct fd f;
+ int ret;
- if (copy_from_user(&param, (void __user *)arg,
- sizeof(struct kvm_vfio_spapr_tce)))
- return -EFAULT;
+ if (copy_from_user(&param, arg, sizeof(struct kvm_vfio_spapr_tce)))
+ return -EFAULT;
- f = fdget(param.groupfd);
- if (!f.file)
- return -EBADF;
+ f = fdget(param.groupfd);
+ if (!f.file)
+ return -EBADF;
- vfio_group = kvm_vfio_group_get_external_user(f.file);
- fdput(f);
+ ret = -ENOENT;
- if (IS_ERR(vfio_group))
- return PTR_ERR(vfio_group);
+ mutex_lock(&kv->lock);
- grp = kvm_vfio_group_get_iommu_group(vfio_group);
+ list_for_each_entry(kvg, &kv->group_list, node) {
+ struct iommu_group *grp;
+
+ if (kvg->file != f.file)
+ continue;
+
+ grp = kvm_vfio_file_iommu_group(kvg->file);
if (WARN_ON_ONCE(!grp)) {
- kvm_vfio_group_put_external_user(vfio_group);
- return -EIO;
+ ret = -EIO;
+ goto err_fdput;
}
- ret = -ENOENT;
-
- mutex_lock(&kv->lock);
+ ret = kvm_spapr_tce_attach_iommu_group(dev->kvm, param.tablefd,
+ grp);
+ break;
+ }
- list_for_each_entry(kvg, &kv->group_list, node) {
- if (kvg->vfio_group != vfio_group)
- continue;
+err_fdput:
+ mutex_unlock(&kv->lock);
+ fdput(f);
+ return ret;
+}
+#endif
- ret = kvm_spapr_tce_attach_iommu_group(dev->kvm,
- param.tablefd, grp);
- break;
- }
+static int kvm_vfio_set_group(struct kvm_device *dev, long attr,
+ void __user *arg)
+{
+ int32_t __user *argp = arg;
+ int32_t fd;
- mutex_unlock(&kv->lock);
+ switch (attr) {
+ case KVM_DEV_VFIO_GROUP_ADD:
+ if (get_user(fd, argp))
+ return -EFAULT;
+ return kvm_vfio_group_add(dev, fd);
- iommu_group_put(grp);
- kvm_vfio_group_put_external_user(vfio_group);
+ case KVM_DEV_VFIO_GROUP_DEL:
+ if (get_user(fd, argp))
+ return -EFAULT;
+ return kvm_vfio_group_del(dev, fd);
- return ret;
- }
-#endif /* CONFIG_SPAPR_TCE_IOMMU */
+#ifdef CONFIG_SPAPR_TCE_IOMMU
+ case KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE:
+ return kvm_vfio_group_set_spapr_tce(dev, arg);
+#endif
}
return -ENXIO;
@@ -334,7 +290,8 @@ static int kvm_vfio_set_attr(struct kvm_device *dev,
{
switch (attr->group) {
case KVM_DEV_VFIO_GROUP:
- return kvm_vfio_set_group(dev, attr->attr, attr->addr);
+ return kvm_vfio_set_group(dev, attr->attr,
+ u64_to_user_ptr(attr->addr));
}
return -ENXIO;
@@ -367,10 +324,10 @@ static void kvm_vfio_destroy(struct kvm_device *dev)
list_for_each_entry_safe(kvg, tmp, &kv->group_list, node) {
#ifdef CONFIG_SPAPR_TCE_IOMMU
- kvm_spapr_tce_release_vfio_group(dev->kvm, kvg->vfio_group);
+ kvm_spapr_tce_release_vfio_group(dev->kvm, kvg);
#endif
- kvm_vfio_group_set_kvm(kvg->vfio_group, NULL);
- kvm_vfio_group_put_external_user(kvg->vfio_group);
+ kvm_vfio_file_set_kvm(kvg->file, NULL);
+ fput(kvg->file);
list_del(&kvg->node);
kfree(kvg);
kvm_arch_end_assignment(dev->kvm);